]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - drivers/scsi/qla4xxx/ql4_os.c
[SCSI] qla4xxx: Fix clear ddb mbx command failure issue.
[mirror_ubuntu-artful-kernel.git] / drivers / scsi / qla4xxx / ql4_os.c
CommitLineData
afaf5a2d
DS
1/*
2 * QLogic iSCSI HBA Driver
7d01d069 3 * Copyright (c) 2003-2010 QLogic Corporation
afaf5a2d
DS
4 *
5 * See LICENSE.qla4xxx for copyright and licensing details.
6 */
7#include <linux/moduleparam.h>
5a0e3ad6 8#include <linux/slab.h>
2a991c21
MR
9#include <linux/blkdev.h>
10#include <linux/iscsi_boot_sysfs.h>
13483730 11#include <linux/inet.h>
afaf5a2d
DS
12
13#include <scsi/scsi_tcq.h>
14#include <scsi/scsicam.h>
15
16#include "ql4_def.h"
bee4fe8e
DS
17#include "ql4_version.h"
18#include "ql4_glbl.h"
19#include "ql4_dbg.h"
20#include "ql4_inline.h"
afaf5a2d
DS
21
22/*
23 * Driver version
24 */
47975477 25static char qla4xxx_version_str[40];
afaf5a2d
DS
26
27/*
28 * SRB allocation cache
29 */
e18b890b 30static struct kmem_cache *srb_cachep;
afaf5a2d
DS
31
32/*
33 * Module parameter information and variables
34 */
a7380a65 35static int ql4xdisablesysfsboot = 1;
13483730
MC
36module_param(ql4xdisablesysfsboot, int, S_IRUGO | S_IWUSR);
37MODULE_PARM_DESC(ql4xdisablesysfsboot,
a4e8a715
KH
38 " Set to disable exporting boot targets to sysfs.\n"
39 "\t\t 0 - Export boot targets\n"
40 "\t\t 1 - Do not export boot targets (Default)");
13483730 41
3573bfb2 42int ql4xdontresethba;
f4f5df23 43module_param(ql4xdontresethba, int, S_IRUGO | S_IWUSR);
afaf5a2d 44MODULE_PARM_DESC(ql4xdontresethba,
a4e8a715
KH
45 " Don't reset the HBA for driver recovery.\n"
46 "\t\t 0 - It will reset HBA (Default)\n"
47 "\t\t 1 - It will NOT reset HBA");
afaf5a2d 48
a4e8a715 49int ql4xextended_error_logging;
f4f5df23 50module_param(ql4xextended_error_logging, int, S_IRUGO | S_IWUSR);
11010fec 51MODULE_PARM_DESC(ql4xextended_error_logging,
a4e8a715
KH
52 " Option to enable extended error logging.\n"
53 "\t\t 0 - no logging (Default)\n"
54 "\t\t 2 - debug logging");
afaf5a2d 55
f4f5df23
VC
56int ql4xenablemsix = 1;
57module_param(ql4xenablemsix, int, S_IRUGO|S_IWUSR);
58MODULE_PARM_DESC(ql4xenablemsix,
a4e8a715
KH
59 " Set to enable MSI or MSI-X interrupt mechanism.\n"
60 "\t\t 0 = enable INTx interrupt mechanism.\n"
61 "\t\t 1 = enable MSI-X interrupt mechanism (Default).\n"
62 "\t\t 2 = enable MSI interrupt mechanism.");
477ffb9d 63
d510d965 64#define QL4_DEF_QDEPTH 32
8bb4033d
VC
65static int ql4xmaxqdepth = QL4_DEF_QDEPTH;
66module_param(ql4xmaxqdepth, int, S_IRUGO | S_IWUSR);
67MODULE_PARM_DESC(ql4xmaxqdepth,
a4e8a715
KH
68 " Maximum queue depth to report for target devices.\n"
69 "\t\t Default: 32.");
d510d965 70
3038727c
VC
71static int ql4xsess_recovery_tmo = QL4_SESS_RECOVERY_TMO;
72module_param(ql4xsess_recovery_tmo, int, S_IRUGO);
73MODULE_PARM_DESC(ql4xsess_recovery_tmo,
3573bfb2 74 " Target Session Recovery Timeout.\n"
a4e8a715 75 "\t\t Default: 120 sec.");
3038727c 76
b3a271a9 77static int qla4xxx_wait_for_hba_online(struct scsi_qla_host *ha);
afaf5a2d
DS
78/*
79 * SCSI host template entry points
80 */
47975477 81static void qla4xxx_config_dma_addressing(struct scsi_qla_host *ha);
afaf5a2d
DS
82
83/*
84 * iSCSI template entry points
85 */
fca9f04d
MC
86static int qla4xxx_session_get_param(struct iscsi_cls_session *cls_sess,
87 enum iscsi_param param, char *buf);
afaf5a2d
DS
88static int qla4xxx_conn_get_param(struct iscsi_cls_conn *conn,
89 enum iscsi_param param, char *buf);
aa1e93a2
MC
90static int qla4xxx_host_get_param(struct Scsi_Host *shost,
91 enum iscsi_host_param param, char *buf);
00c31889
MC
92static int qla4xxx_iface_set_param(struct Scsi_Host *shost, void *data,
93 uint32_t len);
ed1086e0
VC
94static int qla4xxx_get_iface_param(struct iscsi_iface *iface,
95 enum iscsi_param_type param_type,
96 int param, char *buf);
5c656af7 97static enum blk_eh_timer_return qla4xxx_eh_cmd_timed_out(struct scsi_cmnd *sc);
b3a271a9
MR
98static struct iscsi_endpoint *qla4xxx_ep_connect(struct Scsi_Host *shost,
99 struct sockaddr *dst_addr,
100 int non_blocking);
101static int qla4xxx_ep_poll(struct iscsi_endpoint *ep, int timeout_ms);
102static void qla4xxx_ep_disconnect(struct iscsi_endpoint *ep);
103static int qla4xxx_get_ep_param(struct iscsi_endpoint *ep,
104 enum iscsi_param param, char *buf);
105static int qla4xxx_conn_start(struct iscsi_cls_conn *conn);
106static struct iscsi_cls_conn *
107qla4xxx_conn_create(struct iscsi_cls_session *cls_sess, uint32_t conn_idx);
108static int qla4xxx_conn_bind(struct iscsi_cls_session *cls_session,
109 struct iscsi_cls_conn *cls_conn,
110 uint64_t transport_fd, int is_leading);
111static void qla4xxx_conn_destroy(struct iscsi_cls_conn *conn);
112static struct iscsi_cls_session *
113qla4xxx_session_create(struct iscsi_endpoint *ep, uint16_t cmds_max,
114 uint16_t qdepth, uint32_t initial_cmdsn);
115static void qla4xxx_session_destroy(struct iscsi_cls_session *sess);
116static void qla4xxx_task_work(struct work_struct *wdata);
117static int qla4xxx_alloc_pdu(struct iscsi_task *, uint8_t);
118static int qla4xxx_task_xmit(struct iscsi_task *);
119static void qla4xxx_task_cleanup(struct iscsi_task *);
120static void qla4xxx_fail_session(struct iscsi_cls_session *cls_session);
121static void qla4xxx_conn_get_stats(struct iscsi_cls_conn *cls_conn,
122 struct iscsi_stats *stats);
c0b9d3f7
VC
123static int qla4xxx_send_ping(struct Scsi_Host *shost, uint32_t iface_num,
124 uint32_t iface_type, uint32_t payload_size,
125 uint32_t pid, struct sockaddr *dst_addr);
376738af
NJ
126static int qla4xxx_get_chap_list(struct Scsi_Host *shost, uint16_t chap_tbl_idx,
127 uint32_t *num_entries, char *buf);
128static int qla4xxx_delete_chap(struct Scsi_Host *shost, uint16_t chap_tbl_idx);
c0b9d3f7 129
afaf5a2d
DS
130/*
131 * SCSI host template entry points
132 */
f281233d 133static int qla4xxx_queuecommand(struct Scsi_Host *h, struct scsi_cmnd *cmd);
09a0f719 134static int qla4xxx_eh_abort(struct scsi_cmnd *cmd);
afaf5a2d 135static int qla4xxx_eh_device_reset(struct scsi_cmnd *cmd);
ce545039 136static int qla4xxx_eh_target_reset(struct scsi_cmnd *cmd);
afaf5a2d
DS
137static int qla4xxx_eh_host_reset(struct scsi_cmnd *cmd);
138static int qla4xxx_slave_alloc(struct scsi_device *device);
139static int qla4xxx_slave_configure(struct scsi_device *device);
140static void qla4xxx_slave_destroy(struct scsi_device *sdev);
587a1f16 141static umode_t ql4_attr_is_visible(int param_type, int param);
95d31262 142static int qla4xxx_host_reset(struct Scsi_Host *shost, int reset_type);
afaf5a2d 143
f4f5df23
VC
144static struct qla4_8xxx_legacy_intr_set legacy_intr[] =
145 QLA82XX_LEGACY_INTR_CONFIG;
146
afaf5a2d
DS
147static struct scsi_host_template qla4xxx_driver_template = {
148 .module = THIS_MODULE,
149 .name = DRIVER_NAME,
150 .proc_name = DRIVER_NAME,
151 .queuecommand = qla4xxx_queuecommand,
152
09a0f719 153 .eh_abort_handler = qla4xxx_eh_abort,
afaf5a2d 154 .eh_device_reset_handler = qla4xxx_eh_device_reset,
ce545039 155 .eh_target_reset_handler = qla4xxx_eh_target_reset,
afaf5a2d 156 .eh_host_reset_handler = qla4xxx_eh_host_reset,
5c656af7 157 .eh_timed_out = qla4xxx_eh_cmd_timed_out,
afaf5a2d
DS
158
159 .slave_configure = qla4xxx_slave_configure,
160 .slave_alloc = qla4xxx_slave_alloc,
161 .slave_destroy = qla4xxx_slave_destroy,
162
163 .this_id = -1,
164 .cmd_per_lun = 3,
165 .use_clustering = ENABLE_CLUSTERING,
166 .sg_tablesize = SG_ALL,
167
168 .max_sectors = 0xFFFF,
7ad633c0 169 .shost_attrs = qla4xxx_host_attrs,
95d31262 170 .host_reset = qla4xxx_host_reset,
a355943c 171 .vendor_id = SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_QLOGIC,
afaf5a2d
DS
172};
173
174static struct iscsi_transport qla4xxx_iscsi_transport = {
175 .owner = THIS_MODULE,
176 .name = DRIVER_NAME,
b3a271a9
MR
177 .caps = CAP_TEXT_NEGO |
178 CAP_DATA_PATH_OFFLOAD | CAP_HDRDGST |
179 CAP_DATADGST | CAP_LOGIN_OFFLOAD |
180 CAP_MULTI_R2T,
3128c6c7 181 .attr_is_visible = ql4_attr_is_visible,
b3a271a9
MR
182 .create_session = qla4xxx_session_create,
183 .destroy_session = qla4xxx_session_destroy,
184 .start_conn = qla4xxx_conn_start,
185 .create_conn = qla4xxx_conn_create,
186 .bind_conn = qla4xxx_conn_bind,
187 .stop_conn = iscsi_conn_stop,
188 .destroy_conn = qla4xxx_conn_destroy,
189 .set_param = iscsi_set_param,
afaf5a2d 190 .get_conn_param = qla4xxx_conn_get_param,
fca9f04d 191 .get_session_param = qla4xxx_session_get_param,
b3a271a9
MR
192 .get_ep_param = qla4xxx_get_ep_param,
193 .ep_connect = qla4xxx_ep_connect,
194 .ep_poll = qla4xxx_ep_poll,
195 .ep_disconnect = qla4xxx_ep_disconnect,
196 .get_stats = qla4xxx_conn_get_stats,
197 .send_pdu = iscsi_conn_send_pdu,
198 .xmit_task = qla4xxx_task_xmit,
199 .cleanup_task = qla4xxx_task_cleanup,
200 .alloc_pdu = qla4xxx_alloc_pdu,
201
aa1e93a2 202 .get_host_param = qla4xxx_host_get_param,
d00efe3f 203 .set_iface_param = qla4xxx_iface_set_param,
ed1086e0 204 .get_iface_param = qla4xxx_get_iface_param,
a355943c 205 .bsg_request = qla4xxx_bsg_request,
c0b9d3f7 206 .send_ping = qla4xxx_send_ping,
376738af
NJ
207 .get_chap = qla4xxx_get_chap_list,
208 .delete_chap = qla4xxx_delete_chap,
afaf5a2d
DS
209};
210
211static struct scsi_transport_template *qla4xxx_scsi_transport;
212
c0b9d3f7
VC
213static int qla4xxx_send_ping(struct Scsi_Host *shost, uint32_t iface_num,
214 uint32_t iface_type, uint32_t payload_size,
215 uint32_t pid, struct sockaddr *dst_addr)
216{
217 struct scsi_qla_host *ha = to_qla_host(shost);
218 struct sockaddr_in *addr;
219 struct sockaddr_in6 *addr6;
220 uint32_t options = 0;
221 uint8_t ipaddr[IPv6_ADDR_LEN];
222 int rval;
223
224 memset(ipaddr, 0, IPv6_ADDR_LEN);
225 /* IPv4 to IPv4 */
226 if ((iface_type == ISCSI_IFACE_TYPE_IPV4) &&
227 (dst_addr->sa_family == AF_INET)) {
228 addr = (struct sockaddr_in *)dst_addr;
229 memcpy(ipaddr, &addr->sin_addr.s_addr, IP_ADDR_LEN);
230 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: IPv4 Ping src: %pI4 "
231 "dest: %pI4\n", __func__,
232 &ha->ip_config.ip_address, ipaddr));
233 rval = qla4xxx_ping_iocb(ha, options, payload_size, pid,
234 ipaddr);
235 if (rval)
236 rval = -EINVAL;
237 } else if ((iface_type == ISCSI_IFACE_TYPE_IPV6) &&
238 (dst_addr->sa_family == AF_INET6)) {
239 /* IPv6 to IPv6 */
240 addr6 = (struct sockaddr_in6 *)dst_addr;
241 memcpy(ipaddr, &addr6->sin6_addr.in6_u.u6_addr8, IPv6_ADDR_LEN);
242
243 options |= PING_IPV6_PROTOCOL_ENABLE;
244
245 /* Ping using LinkLocal address */
246 if ((iface_num == 0) || (iface_num == 1)) {
247 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: LinkLocal Ping "
248 "src: %pI6 dest: %pI6\n", __func__,
249 &ha->ip_config.ipv6_link_local_addr,
250 ipaddr));
251 options |= PING_IPV6_LINKLOCAL_ADDR;
252 rval = qla4xxx_ping_iocb(ha, options, payload_size,
253 pid, ipaddr);
254 } else {
255 ql4_printk(KERN_WARNING, ha, "%s: iface num = %d "
256 "not supported\n", __func__, iface_num);
257 rval = -ENOSYS;
258 goto exit_send_ping;
259 }
260
261 /*
262 * If ping using LinkLocal address fails, try ping using
263 * IPv6 address
264 */
265 if (rval != QLA_SUCCESS) {
266 options &= ~PING_IPV6_LINKLOCAL_ADDR;
267 if (iface_num == 0) {
268 options |= PING_IPV6_ADDR0;
269 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: IPv6 "
270 "Ping src: %pI6 "
271 "dest: %pI6\n", __func__,
272 &ha->ip_config.ipv6_addr0,
273 ipaddr));
274 } else if (iface_num == 1) {
275 options |= PING_IPV6_ADDR1;
276 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: IPv6 "
277 "Ping src: %pI6 "
278 "dest: %pI6\n", __func__,
279 &ha->ip_config.ipv6_addr1,
280 ipaddr));
281 }
282 rval = qla4xxx_ping_iocb(ha, options, payload_size,
283 pid, ipaddr);
284 if (rval)
285 rval = -EINVAL;
286 }
287 } else
288 rval = -ENOSYS;
289exit_send_ping:
290 return rval;
291}
292
587a1f16 293static umode_t ql4_attr_is_visible(int param_type, int param)
3128c6c7
MC
294{
295 switch (param_type) {
f27fb2ef
MC
296 case ISCSI_HOST_PARAM:
297 switch (param) {
298 case ISCSI_HOST_PARAM_HWADDRESS:
299 case ISCSI_HOST_PARAM_IPADDRESS:
300 case ISCSI_HOST_PARAM_INITIATOR_NAME:
3254dbe9
VC
301 case ISCSI_HOST_PARAM_PORT_STATE:
302 case ISCSI_HOST_PARAM_PORT_SPEED:
f27fb2ef
MC
303 return S_IRUGO;
304 default:
305 return 0;
306 }
3128c6c7
MC
307 case ISCSI_PARAM:
308 switch (param) {
590134fa
MC
309 case ISCSI_PARAM_PERSISTENT_ADDRESS:
310 case ISCSI_PARAM_PERSISTENT_PORT:
3128c6c7
MC
311 case ISCSI_PARAM_CONN_ADDRESS:
312 case ISCSI_PARAM_CONN_PORT:
1d063c17
MC
313 case ISCSI_PARAM_TARGET_NAME:
314 case ISCSI_PARAM_TPGT:
315 case ISCSI_PARAM_TARGET_ALIAS:
b3a271a9
MR
316 case ISCSI_PARAM_MAX_BURST:
317 case ISCSI_PARAM_MAX_R2T:
318 case ISCSI_PARAM_FIRST_BURST:
319 case ISCSI_PARAM_MAX_RECV_DLENGTH:
320 case ISCSI_PARAM_MAX_XMIT_DLENGTH:
de37920b 321 case ISCSI_PARAM_IFACE_NAME:
fca9f04d
MC
322 case ISCSI_PARAM_CHAP_OUT_IDX:
323 case ISCSI_PARAM_CHAP_IN_IDX:
324 case ISCSI_PARAM_USERNAME:
325 case ISCSI_PARAM_PASSWORD:
326 case ISCSI_PARAM_USERNAME_IN:
327 case ISCSI_PARAM_PASSWORD_IN:
3128c6c7
MC
328 return S_IRUGO;
329 default:
330 return 0;
331 }
b78dbba0
MC
332 case ISCSI_NET_PARAM:
333 switch (param) {
334 case ISCSI_NET_PARAM_IPV4_ADDR:
335 case ISCSI_NET_PARAM_IPV4_SUBNET:
336 case ISCSI_NET_PARAM_IPV4_GW:
337 case ISCSI_NET_PARAM_IPV4_BOOTPROTO:
338 case ISCSI_NET_PARAM_IFACE_ENABLE:
339 case ISCSI_NET_PARAM_IPV6_LINKLOCAL:
340 case ISCSI_NET_PARAM_IPV6_ADDR:
341 case ISCSI_NET_PARAM_IPV6_ROUTER:
342 case ISCSI_NET_PARAM_IPV6_ADDR_AUTOCFG:
343 case ISCSI_NET_PARAM_IPV6_LINKLOCAL_AUTOCFG:
6ac73e8c
VC
344 case ISCSI_NET_PARAM_VLAN_ID:
345 case ISCSI_NET_PARAM_VLAN_PRIORITY:
346 case ISCSI_NET_PARAM_VLAN_ENABLED:
943c157b 347 case ISCSI_NET_PARAM_MTU:
2ada7fc5 348 case ISCSI_NET_PARAM_PORT:
b78dbba0
MC
349 return S_IRUGO;
350 default:
351 return 0;
352 }
3128c6c7
MC
353 }
354
355 return 0;
356}
357
376738af
NJ
358static int qla4xxx_get_chap_list(struct Scsi_Host *shost, uint16_t chap_tbl_idx,
359 uint32_t *num_entries, char *buf)
360{
361 struct scsi_qla_host *ha = to_qla_host(shost);
362 struct ql4_chap_table *chap_table;
363 struct iscsi_chap_rec *chap_rec;
364 int max_chap_entries = 0;
365 int valid_chap_entries = 0;
366 int ret = 0, i;
367
368 if (is_qla8022(ha))
369 max_chap_entries = (ha->hw.flt_chap_size / 2) /
370 sizeof(struct ql4_chap_table);
371 else
372 max_chap_entries = MAX_CHAP_ENTRIES_40XX;
373
374 ql4_printk(KERN_INFO, ha, "%s: num_entries = %d, CHAP idx = %d\n",
375 __func__, *num_entries, chap_tbl_idx);
376
377 if (!buf) {
378 ret = -ENOMEM;
379 goto exit_get_chap_list;
380 }
381
382 chap_rec = (struct iscsi_chap_rec *) buf;
383 mutex_lock(&ha->chap_sem);
384 for (i = chap_tbl_idx; i < max_chap_entries; i++) {
385 chap_table = (struct ql4_chap_table *)ha->chap_list + i;
386 if (chap_table->cookie !=
387 __constant_cpu_to_le16(CHAP_VALID_COOKIE))
388 continue;
389
390 chap_rec->chap_tbl_idx = i;
391 strncpy(chap_rec->username, chap_table->name,
392 ISCSI_CHAP_AUTH_NAME_MAX_LEN);
393 strncpy(chap_rec->password, chap_table->secret,
394 QL4_CHAP_MAX_SECRET_LEN);
395 chap_rec->password_length = chap_table->secret_len;
396
397 if (chap_table->flags & BIT_7) /* local */
398 chap_rec->chap_type = CHAP_TYPE_OUT;
399
400 if (chap_table->flags & BIT_6) /* peer */
401 chap_rec->chap_type = CHAP_TYPE_IN;
402
403 chap_rec++;
404
405 valid_chap_entries++;
406 if (valid_chap_entries == *num_entries)
407 break;
408 else
409 continue;
410 }
411 mutex_unlock(&ha->chap_sem);
412
413exit_get_chap_list:
414 ql4_printk(KERN_INFO, ha, "%s: Valid CHAP Entries = %d\n",
415 __func__, valid_chap_entries);
416 *num_entries = valid_chap_entries;
417 return ret;
418}
419
420static int __qla4xxx_is_chap_active(struct device *dev, void *data)
421{
422 int ret = 0;
423 uint16_t *chap_tbl_idx = (uint16_t *) data;
424 struct iscsi_cls_session *cls_session;
425 struct iscsi_session *sess;
426 struct ddb_entry *ddb_entry;
427
428 if (!iscsi_is_session_dev(dev))
429 goto exit_is_chap_active;
430
431 cls_session = iscsi_dev_to_session(dev);
432 sess = cls_session->dd_data;
433 ddb_entry = sess->dd_data;
434
435 if (iscsi_session_chkready(cls_session))
436 goto exit_is_chap_active;
437
438 if (ddb_entry->chap_tbl_idx == *chap_tbl_idx)
439 ret = 1;
440
441exit_is_chap_active:
442 return ret;
443}
444
445static int qla4xxx_is_chap_active(struct Scsi_Host *shost,
446 uint16_t chap_tbl_idx)
447{
448 int ret = 0;
449
450 ret = device_for_each_child(&shost->shost_gendev, &chap_tbl_idx,
451 __qla4xxx_is_chap_active);
452
453 return ret;
454}
455
456static int qla4xxx_delete_chap(struct Scsi_Host *shost, uint16_t chap_tbl_idx)
457{
458 struct scsi_qla_host *ha = to_qla_host(shost);
459 struct ql4_chap_table *chap_table;
460 dma_addr_t chap_dma;
461 int max_chap_entries = 0;
462 uint32_t offset = 0;
463 uint32_t chap_size;
464 int ret = 0;
465
466 chap_table = dma_pool_alloc(ha->chap_dma_pool, GFP_KERNEL, &chap_dma);
467 if (chap_table == NULL)
468 return -ENOMEM;
469
470 memset(chap_table, 0, sizeof(struct ql4_chap_table));
471
472 if (is_qla8022(ha))
473 max_chap_entries = (ha->hw.flt_chap_size / 2) /
474 sizeof(struct ql4_chap_table);
475 else
476 max_chap_entries = MAX_CHAP_ENTRIES_40XX;
477
478 if (chap_tbl_idx > max_chap_entries) {
479 ret = -EINVAL;
480 goto exit_delete_chap;
481 }
482
483 /* Check if chap index is in use.
484 * If chap is in use don't delet chap entry */
485 ret = qla4xxx_is_chap_active(shost, chap_tbl_idx);
486 if (ret) {
487 ql4_printk(KERN_INFO, ha, "CHAP entry %d is in use, cannot "
488 "delete from flash\n", chap_tbl_idx);
489 ret = -EBUSY;
490 goto exit_delete_chap;
491 }
492
493 chap_size = sizeof(struct ql4_chap_table);
494 if (is_qla40XX(ha))
495 offset = FLASH_CHAP_OFFSET | (chap_tbl_idx * chap_size);
496 else {
497 offset = FLASH_RAW_ACCESS_ADDR + (ha->hw.flt_region_chap << 2);
498 /* flt_chap_size is CHAP table size for both ports
499 * so divide it by 2 to calculate the offset for second port
500 */
501 if (ha->port_num == 1)
502 offset += (ha->hw.flt_chap_size / 2);
503 offset += (chap_tbl_idx * chap_size);
504 }
505
506 ret = qla4xxx_get_flash(ha, chap_dma, offset, chap_size);
507 if (ret != QLA_SUCCESS) {
508 ret = -EINVAL;
509 goto exit_delete_chap;
510 }
511
512 DEBUG2(ql4_printk(KERN_INFO, ha, "Chap Cookie: x%x\n",
513 __le16_to_cpu(chap_table->cookie)));
514
515 if (__le16_to_cpu(chap_table->cookie) != CHAP_VALID_COOKIE) {
516 ql4_printk(KERN_ERR, ha, "No valid chap entry found\n");
517 goto exit_delete_chap;
518 }
519
520 chap_table->cookie = __constant_cpu_to_le16(0xFFFF);
521
522 offset = FLASH_CHAP_OFFSET |
523 (chap_tbl_idx * sizeof(struct ql4_chap_table));
524 ret = qla4xxx_set_flash(ha, chap_dma, offset, chap_size,
525 FLASH_OPT_RMW_COMMIT);
526 if (ret == QLA_SUCCESS && ha->chap_list) {
527 mutex_lock(&ha->chap_sem);
528 /* Update ha chap_list cache */
529 memcpy((struct ql4_chap_table *)ha->chap_list + chap_tbl_idx,
530 chap_table, sizeof(struct ql4_chap_table));
531 mutex_unlock(&ha->chap_sem);
532 }
533 if (ret != QLA_SUCCESS)
534 ret = -EINVAL;
535
536exit_delete_chap:
537 dma_pool_free(ha->chap_dma_pool, chap_table, chap_dma);
538 return ret;
539}
540
ed1086e0
VC
541static int qla4xxx_get_iface_param(struct iscsi_iface *iface,
542 enum iscsi_param_type param_type,
543 int param, char *buf)
544{
545 struct Scsi_Host *shost = iscsi_iface_to_shost(iface);
546 struct scsi_qla_host *ha = to_qla_host(shost);
547 int len = -ENOSYS;
548
549 if (param_type != ISCSI_NET_PARAM)
550 return -ENOSYS;
551
552 switch (param) {
553 case ISCSI_NET_PARAM_IPV4_ADDR:
554 len = sprintf(buf, "%pI4\n", &ha->ip_config.ip_address);
555 break;
556 case ISCSI_NET_PARAM_IPV4_SUBNET:
557 len = sprintf(buf, "%pI4\n", &ha->ip_config.subnet_mask);
558 break;
559 case ISCSI_NET_PARAM_IPV4_GW:
560 len = sprintf(buf, "%pI4\n", &ha->ip_config.gateway);
561 break;
562 case ISCSI_NET_PARAM_IFACE_ENABLE:
563 if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4)
564 len = sprintf(buf, "%s\n",
565 (ha->ip_config.ipv4_options &
566 IPOPT_IPV4_PROTOCOL_ENABLE) ?
567 "enabled" : "disabled");
568 else if (iface->iface_type == ISCSI_IFACE_TYPE_IPV6)
569 len = sprintf(buf, "%s\n",
570 (ha->ip_config.ipv6_options &
571 IPV6_OPT_IPV6_PROTOCOL_ENABLE) ?
572 "enabled" : "disabled");
573 break;
574 case ISCSI_NET_PARAM_IPV4_BOOTPROTO:
575 len = sprintf(buf, "%s\n",
576 (ha->ip_config.tcp_options & TCPOPT_DHCP_ENABLE) ?
577 "dhcp" : "static");
578 break;
579 case ISCSI_NET_PARAM_IPV6_ADDR:
580 if (iface->iface_num == 0)
581 len = sprintf(buf, "%pI6\n", &ha->ip_config.ipv6_addr0);
582 if (iface->iface_num == 1)
583 len = sprintf(buf, "%pI6\n", &ha->ip_config.ipv6_addr1);
584 break;
585 case ISCSI_NET_PARAM_IPV6_LINKLOCAL:
586 len = sprintf(buf, "%pI6\n",
587 &ha->ip_config.ipv6_link_local_addr);
588 break;
589 case ISCSI_NET_PARAM_IPV6_ROUTER:
590 len = sprintf(buf, "%pI6\n",
591 &ha->ip_config.ipv6_default_router_addr);
592 break;
593 case ISCSI_NET_PARAM_IPV6_ADDR_AUTOCFG:
594 len = sprintf(buf, "%s\n",
595 (ha->ip_config.ipv6_addl_options &
596 IPV6_ADDOPT_NEIGHBOR_DISCOVERY_ADDR_ENABLE) ?
597 "nd" : "static");
598 break;
599 case ISCSI_NET_PARAM_IPV6_LINKLOCAL_AUTOCFG:
600 len = sprintf(buf, "%s\n",
601 (ha->ip_config.ipv6_addl_options &
602 IPV6_ADDOPT_AUTOCONFIG_LINK_LOCAL_ADDR) ?
603 "auto" : "static");
604 break;
6ac73e8c
VC
605 case ISCSI_NET_PARAM_VLAN_ID:
606 if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4)
607 len = sprintf(buf, "%d\n",
608 (ha->ip_config.ipv4_vlan_tag &
609 ISCSI_MAX_VLAN_ID));
610 else if (iface->iface_type == ISCSI_IFACE_TYPE_IPV6)
611 len = sprintf(buf, "%d\n",
612 (ha->ip_config.ipv6_vlan_tag &
613 ISCSI_MAX_VLAN_ID));
614 break;
615 case ISCSI_NET_PARAM_VLAN_PRIORITY:
616 if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4)
617 len = sprintf(buf, "%d\n",
618 ((ha->ip_config.ipv4_vlan_tag >> 13) &
619 ISCSI_MAX_VLAN_PRIORITY));
620 else if (iface->iface_type == ISCSI_IFACE_TYPE_IPV6)
621 len = sprintf(buf, "%d\n",
622 ((ha->ip_config.ipv6_vlan_tag >> 13) &
623 ISCSI_MAX_VLAN_PRIORITY));
624 break;
625 case ISCSI_NET_PARAM_VLAN_ENABLED:
626 if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4)
627 len = sprintf(buf, "%s\n",
628 (ha->ip_config.ipv4_options &
629 IPOPT_VLAN_TAGGING_ENABLE) ?
630 "enabled" : "disabled");
631 else if (iface->iface_type == ISCSI_IFACE_TYPE_IPV6)
632 len = sprintf(buf, "%s\n",
633 (ha->ip_config.ipv6_options &
634 IPV6_OPT_VLAN_TAGGING_ENABLE) ?
635 "enabled" : "disabled");
636 break;
943c157b
VC
637 case ISCSI_NET_PARAM_MTU:
638 len = sprintf(buf, "%d\n", ha->ip_config.eth_mtu_size);
639 break;
2ada7fc5
VC
640 case ISCSI_NET_PARAM_PORT:
641 if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4)
642 len = sprintf(buf, "%d\n", ha->ip_config.ipv4_port);
643 else if (iface->iface_type == ISCSI_IFACE_TYPE_IPV6)
644 len = sprintf(buf, "%d\n", ha->ip_config.ipv6_port);
645 break;
ed1086e0
VC
646 default:
647 len = -ENOSYS;
648 }
649
650 return len;
651}
652
b3a271a9
MR
653static struct iscsi_endpoint *
654qla4xxx_ep_connect(struct Scsi_Host *shost, struct sockaddr *dst_addr,
655 int non_blocking)
5c656af7 656{
b3a271a9
MR
657 int ret;
658 struct iscsi_endpoint *ep;
659 struct qla_endpoint *qla_ep;
660 struct scsi_qla_host *ha;
661 struct sockaddr_in *addr;
662 struct sockaddr_in6 *addr6;
5c656af7 663
b3a271a9
MR
664 DEBUG2(printk(KERN_INFO "Func: %s\n", __func__));
665 if (!shost) {
666 ret = -ENXIO;
667 printk(KERN_ERR "%s: shost is NULL\n",
668 __func__);
669 return ERR_PTR(ret);
670 }
5c656af7 671
b3a271a9
MR
672 ha = iscsi_host_priv(shost);
673
674 ep = iscsi_create_endpoint(sizeof(struct qla_endpoint));
675 if (!ep) {
676 ret = -ENOMEM;
677 return ERR_PTR(ret);
678 }
679
680 qla_ep = ep->dd_data;
681 memset(qla_ep, 0, sizeof(struct qla_endpoint));
682 if (dst_addr->sa_family == AF_INET) {
683 memcpy(&qla_ep->dst_addr, dst_addr, sizeof(struct sockaddr_in));
684 addr = (struct sockaddr_in *)&qla_ep->dst_addr;
685 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: %pI4\n", __func__,
686 (char *)&addr->sin_addr));
687 } else if (dst_addr->sa_family == AF_INET6) {
688 memcpy(&qla_ep->dst_addr, dst_addr,
689 sizeof(struct sockaddr_in6));
690 addr6 = (struct sockaddr_in6 *)&qla_ep->dst_addr;
691 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: %pI6\n", __func__,
692 (char *)&addr6->sin6_addr));
693 }
694
695 qla_ep->host = shost;
696
697 return ep;
5c656af7
MC
698}
699
b3a271a9 700static int qla4xxx_ep_poll(struct iscsi_endpoint *ep, int timeout_ms)
afaf5a2d 701{
b3a271a9
MR
702 struct qla_endpoint *qla_ep;
703 struct scsi_qla_host *ha;
704 int ret = 0;
afaf5a2d 705
b3a271a9
MR
706 DEBUG2(printk(KERN_INFO "Func: %s\n", __func__));
707 qla_ep = ep->dd_data;
708 ha = to_qla_host(qla_ep->host);
709
13483730 710 if (adapter_up(ha) && !test_bit(AF_BUILD_DDB_LIST, &ha->flags))
b3a271a9
MR
711 ret = 1;
712
713 return ret;
714}
715
716static void qla4xxx_ep_disconnect(struct iscsi_endpoint *ep)
717{
718 DEBUG2(printk(KERN_INFO "Func: %s\n", __func__));
719 iscsi_destroy_endpoint(ep);
720}
721
722static int qla4xxx_get_ep_param(struct iscsi_endpoint *ep,
723 enum iscsi_param param,
724 char *buf)
725{
726 struct qla_endpoint *qla_ep = ep->dd_data;
727 struct sockaddr *dst_addr;
728
729 DEBUG2(printk(KERN_INFO "Func: %s\n", __func__));
730
731 switch (param) {
732 case ISCSI_PARAM_CONN_PORT:
733 case ISCSI_PARAM_CONN_ADDRESS:
734 if (!qla_ep)
735 return -ENOTCONN;
736
737 dst_addr = (struct sockaddr *)&qla_ep->dst_addr;
738 if (!dst_addr)
739 return -ENOTCONN;
740
741 return iscsi_conn_get_addr_param((struct sockaddr_storage *)
742 &qla_ep->dst_addr, param, buf);
743 default:
744 return -ENOSYS;
745 }
746}
747
748static void qla4xxx_conn_get_stats(struct iscsi_cls_conn *cls_conn,
749 struct iscsi_stats *stats)
750{
751 struct iscsi_session *sess;
752 struct iscsi_cls_session *cls_sess;
753 struct ddb_entry *ddb_entry;
754 struct scsi_qla_host *ha;
755 struct ql_iscsi_stats *ql_iscsi_stats;
756 int stats_size;
757 int ret;
758 dma_addr_t iscsi_stats_dma;
759
760 DEBUG2(printk(KERN_INFO "Func: %s\n", __func__));
568d303b 761
b3a271a9
MR
762 cls_sess = iscsi_conn_to_session(cls_conn);
763 sess = cls_sess->dd_data;
764 ddb_entry = sess->dd_data;
765 ha = ddb_entry->ha;
766
767 stats_size = PAGE_ALIGN(sizeof(struct ql_iscsi_stats));
768 /* Allocate memory */
769 ql_iscsi_stats = dma_alloc_coherent(&ha->pdev->dev, stats_size,
770 &iscsi_stats_dma, GFP_KERNEL);
771 if (!ql_iscsi_stats) {
772 ql4_printk(KERN_ERR, ha,
773 "Unable to allocate memory for iscsi stats\n");
774 goto exit_get_stats;
568d303b 775 }
b3a271a9
MR
776
777 ret = qla4xxx_get_mgmt_data(ha, ddb_entry->fw_ddb_index, stats_size,
778 iscsi_stats_dma);
779 if (ret != QLA_SUCCESS) {
780 ql4_printk(KERN_ERR, ha,
781 "Unable to retreive iscsi stats\n");
782 goto free_stats;
783 }
784
785 /* octets */
786 stats->txdata_octets = le64_to_cpu(ql_iscsi_stats->tx_data_octets);
787 stats->rxdata_octets = le64_to_cpu(ql_iscsi_stats->rx_data_octets);
788 /* xmit pdus */
789 stats->noptx_pdus = le32_to_cpu(ql_iscsi_stats->tx_nopout_pdus);
790 stats->scsicmd_pdus = le32_to_cpu(ql_iscsi_stats->tx_scsi_cmd_pdus);
791 stats->tmfcmd_pdus = le32_to_cpu(ql_iscsi_stats->tx_tmf_cmd_pdus);
792 stats->login_pdus = le32_to_cpu(ql_iscsi_stats->tx_login_cmd_pdus);
793 stats->text_pdus = le32_to_cpu(ql_iscsi_stats->tx_text_cmd_pdus);
794 stats->dataout_pdus = le32_to_cpu(ql_iscsi_stats->tx_scsi_write_pdus);
795 stats->logout_pdus = le32_to_cpu(ql_iscsi_stats->tx_logout_cmd_pdus);
796 stats->snack_pdus = le32_to_cpu(ql_iscsi_stats->tx_snack_req_pdus);
797 /* recv pdus */
798 stats->noprx_pdus = le32_to_cpu(ql_iscsi_stats->rx_nopin_pdus);
799 stats->scsirsp_pdus = le32_to_cpu(ql_iscsi_stats->rx_scsi_resp_pdus);
800 stats->tmfrsp_pdus = le32_to_cpu(ql_iscsi_stats->rx_tmf_resp_pdus);
801 stats->textrsp_pdus = le32_to_cpu(ql_iscsi_stats->rx_text_resp_pdus);
802 stats->datain_pdus = le32_to_cpu(ql_iscsi_stats->rx_scsi_read_pdus);
803 stats->logoutrsp_pdus =
804 le32_to_cpu(ql_iscsi_stats->rx_logout_resp_pdus);
805 stats->r2t_pdus = le32_to_cpu(ql_iscsi_stats->rx_r2t_pdus);
806 stats->async_pdus = le32_to_cpu(ql_iscsi_stats->rx_async_pdus);
807 stats->rjt_pdus = le32_to_cpu(ql_iscsi_stats->rx_reject_pdus);
808
809free_stats:
810 dma_free_coherent(&ha->pdev->dev, stats_size, ql_iscsi_stats,
811 iscsi_stats_dma);
812exit_get_stats:
813 return;
814}
815
816static enum blk_eh_timer_return qla4xxx_eh_cmd_timed_out(struct scsi_cmnd *sc)
817{
818 struct iscsi_cls_session *session;
819 struct iscsi_session *sess;
820 unsigned long flags;
821 enum blk_eh_timer_return ret = BLK_EH_NOT_HANDLED;
822
823 session = starget_to_session(scsi_target(sc->device));
824 sess = session->dd_data;
825
826 spin_lock_irqsave(&session->lock, flags);
827 if (session->state == ISCSI_SESSION_FAILED)
828 ret = BLK_EH_RESET_TIMER;
829 spin_unlock_irqrestore(&session->lock, flags);
830
831 return ret;
afaf5a2d
DS
832}
833
3254dbe9
VC
834static void qla4xxx_set_port_speed(struct Scsi_Host *shost)
835{
836 struct scsi_qla_host *ha = to_qla_host(shost);
e16d166e 837 struct iscsi_cls_host *ihost = shost->shost_data;
3254dbe9
VC
838 uint32_t speed = ISCSI_PORT_SPEED_UNKNOWN;
839
840 qla4xxx_get_firmware_state(ha);
841
842 switch (ha->addl_fw_state & 0x0F00) {
843 case FW_ADDSTATE_LINK_SPEED_10MBPS:
844 speed = ISCSI_PORT_SPEED_10MBPS;
845 break;
846 case FW_ADDSTATE_LINK_SPEED_100MBPS:
847 speed = ISCSI_PORT_SPEED_100MBPS;
848 break;
849 case FW_ADDSTATE_LINK_SPEED_1GBPS:
850 speed = ISCSI_PORT_SPEED_1GBPS;
851 break;
852 case FW_ADDSTATE_LINK_SPEED_10GBPS:
853 speed = ISCSI_PORT_SPEED_10GBPS;
854 break;
855 }
856 ihost->port_speed = speed;
857}
858
859static void qla4xxx_set_port_state(struct Scsi_Host *shost)
860{
861 struct scsi_qla_host *ha = to_qla_host(shost);
e16d166e 862 struct iscsi_cls_host *ihost = shost->shost_data;
3254dbe9
VC
863 uint32_t state = ISCSI_PORT_STATE_DOWN;
864
865 if (test_bit(AF_LINK_UP, &ha->flags))
866 state = ISCSI_PORT_STATE_UP;
867
868 ihost->port_state = state;
869}
870
aa1e93a2
MC
871static int qla4xxx_host_get_param(struct Scsi_Host *shost,
872 enum iscsi_host_param param, char *buf)
873{
874 struct scsi_qla_host *ha = to_qla_host(shost);
875 int len;
876
877 switch (param) {
878 case ISCSI_HOST_PARAM_HWADDRESS:
7ffc49a6 879 len = sysfs_format_mac(buf, ha->my_mac, MAC_ADDR_LEN);
8ad5781a 880 break;
22236961 881 case ISCSI_HOST_PARAM_IPADDRESS:
2bab08fc 882 len = sprintf(buf, "%pI4\n", &ha->ip_config.ip_address);
22236961 883 break;
8ad5781a 884 case ISCSI_HOST_PARAM_INITIATOR_NAME:
22236961 885 len = sprintf(buf, "%s\n", ha->name_string);
aa1e93a2 886 break;
3254dbe9
VC
887 case ISCSI_HOST_PARAM_PORT_STATE:
888 qla4xxx_set_port_state(shost);
889 len = sprintf(buf, "%s\n", iscsi_get_port_state_name(shost));
890 break;
891 case ISCSI_HOST_PARAM_PORT_SPEED:
892 qla4xxx_set_port_speed(shost);
893 len = sprintf(buf, "%s\n", iscsi_get_port_speed_name(shost));
894 break;
aa1e93a2
MC
895 default:
896 return -ENOSYS;
897 }
898
899 return len;
900}
901
ed1086e0
VC
902static void qla4xxx_create_ipv4_iface(struct scsi_qla_host *ha)
903{
904 if (ha->iface_ipv4)
905 return;
906
907 /* IPv4 */
908 ha->iface_ipv4 = iscsi_create_iface(ha->host,
909 &qla4xxx_iscsi_transport,
910 ISCSI_IFACE_TYPE_IPV4, 0, 0);
911 if (!ha->iface_ipv4)
912 ql4_printk(KERN_ERR, ha, "Could not create IPv4 iSCSI "
913 "iface0.\n");
914}
915
916static void qla4xxx_create_ipv6_iface(struct scsi_qla_host *ha)
917{
918 if (!ha->iface_ipv6_0)
919 /* IPv6 iface-0 */
920 ha->iface_ipv6_0 = iscsi_create_iface(ha->host,
921 &qla4xxx_iscsi_transport,
922 ISCSI_IFACE_TYPE_IPV6, 0,
923 0);
924 if (!ha->iface_ipv6_0)
925 ql4_printk(KERN_ERR, ha, "Could not create IPv6 iSCSI "
926 "iface0.\n");
927
928 if (!ha->iface_ipv6_1)
929 /* IPv6 iface-1 */
930 ha->iface_ipv6_1 = iscsi_create_iface(ha->host,
931 &qla4xxx_iscsi_transport,
932 ISCSI_IFACE_TYPE_IPV6, 1,
933 0);
934 if (!ha->iface_ipv6_1)
935 ql4_printk(KERN_ERR, ha, "Could not create IPv6 iSCSI "
936 "iface1.\n");
937}
938
939static void qla4xxx_create_ifaces(struct scsi_qla_host *ha)
940{
941 if (ha->ip_config.ipv4_options & IPOPT_IPV4_PROTOCOL_ENABLE)
942 qla4xxx_create_ipv4_iface(ha);
943
944 if (ha->ip_config.ipv6_options & IPV6_OPT_IPV6_PROTOCOL_ENABLE)
945 qla4xxx_create_ipv6_iface(ha);
946}
947
948static void qla4xxx_destroy_ipv4_iface(struct scsi_qla_host *ha)
949{
950 if (ha->iface_ipv4) {
951 iscsi_destroy_iface(ha->iface_ipv4);
952 ha->iface_ipv4 = NULL;
953 }
954}
955
956static void qla4xxx_destroy_ipv6_iface(struct scsi_qla_host *ha)
957{
958 if (ha->iface_ipv6_0) {
959 iscsi_destroy_iface(ha->iface_ipv6_0);
960 ha->iface_ipv6_0 = NULL;
961 }
962 if (ha->iface_ipv6_1) {
963 iscsi_destroy_iface(ha->iface_ipv6_1);
964 ha->iface_ipv6_1 = NULL;
965 }
966}
967
968static void qla4xxx_destroy_ifaces(struct scsi_qla_host *ha)
969{
970 qla4xxx_destroy_ipv4_iface(ha);
971 qla4xxx_destroy_ipv6_iface(ha);
972}
973
d00efe3f
MC
974static void qla4xxx_set_ipv6(struct scsi_qla_host *ha,
975 struct iscsi_iface_param_info *iface_param,
976 struct addr_ctrl_blk *init_fw_cb)
977{
978 /*
979 * iface_num 0 is valid for IPv6 Addr, linklocal, router, autocfg.
980 * iface_num 1 is valid only for IPv6 Addr.
981 */
982 switch (iface_param->param) {
983 case ISCSI_NET_PARAM_IPV6_ADDR:
984 if (iface_param->iface_num & 0x1)
985 /* IPv6 Addr 1 */
986 memcpy(init_fw_cb->ipv6_addr1, iface_param->value,
987 sizeof(init_fw_cb->ipv6_addr1));
988 else
989 /* IPv6 Addr 0 */
990 memcpy(init_fw_cb->ipv6_addr0, iface_param->value,
991 sizeof(init_fw_cb->ipv6_addr0));
992 break;
993 case ISCSI_NET_PARAM_IPV6_LINKLOCAL:
994 if (iface_param->iface_num & 0x1)
995 break;
996 memcpy(init_fw_cb->ipv6_if_id, &iface_param->value[8],
997 sizeof(init_fw_cb->ipv6_if_id));
998 break;
999 case ISCSI_NET_PARAM_IPV6_ROUTER:
1000 if (iface_param->iface_num & 0x1)
1001 break;
1002 memcpy(init_fw_cb->ipv6_dflt_rtr_addr, iface_param->value,
1003 sizeof(init_fw_cb->ipv6_dflt_rtr_addr));
1004 break;
1005 case ISCSI_NET_PARAM_IPV6_ADDR_AUTOCFG:
1006 /* Autocfg applies to even interface */
1007 if (iface_param->iface_num & 0x1)
1008 break;
1009
1010 if (iface_param->value[0] == ISCSI_IPV6_AUTOCFG_DISABLE)
1011 init_fw_cb->ipv6_addtl_opts &=
1012 cpu_to_le16(
1013 ~IPV6_ADDOPT_NEIGHBOR_DISCOVERY_ADDR_ENABLE);
1014 else if (iface_param->value[0] == ISCSI_IPV6_AUTOCFG_ND_ENABLE)
1015 init_fw_cb->ipv6_addtl_opts |=
1016 cpu_to_le16(
1017 IPV6_ADDOPT_NEIGHBOR_DISCOVERY_ADDR_ENABLE);
1018 else
1019 ql4_printk(KERN_ERR, ha, "Invalid autocfg setting for "
1020 "IPv6 addr\n");
1021 break;
1022 case ISCSI_NET_PARAM_IPV6_LINKLOCAL_AUTOCFG:
1023 /* Autocfg applies to even interface */
1024 if (iface_param->iface_num & 0x1)
1025 break;
1026
1027 if (iface_param->value[0] ==
1028 ISCSI_IPV6_LINKLOCAL_AUTOCFG_ENABLE)
1029 init_fw_cb->ipv6_addtl_opts |= cpu_to_le16(
1030 IPV6_ADDOPT_AUTOCONFIG_LINK_LOCAL_ADDR);
1031 else if (iface_param->value[0] ==
1032 ISCSI_IPV6_LINKLOCAL_AUTOCFG_DISABLE)
1033 init_fw_cb->ipv6_addtl_opts &= cpu_to_le16(
1034 ~IPV6_ADDOPT_AUTOCONFIG_LINK_LOCAL_ADDR);
1035 else
1036 ql4_printk(KERN_ERR, ha, "Invalid autocfg setting for "
1037 "IPv6 linklocal addr\n");
1038 break;
1039 case ISCSI_NET_PARAM_IPV6_ROUTER_AUTOCFG:
1040 /* Autocfg applies to even interface */
1041 if (iface_param->iface_num & 0x1)
1042 break;
1043
1044 if (iface_param->value[0] == ISCSI_IPV6_ROUTER_AUTOCFG_ENABLE)
1045 memset(init_fw_cb->ipv6_dflt_rtr_addr, 0,
1046 sizeof(init_fw_cb->ipv6_dflt_rtr_addr));
1047 break;
1048 case ISCSI_NET_PARAM_IFACE_ENABLE:
ed1086e0 1049 if (iface_param->value[0] == ISCSI_IFACE_ENABLE) {
d00efe3f
MC
1050 init_fw_cb->ipv6_opts |=
1051 cpu_to_le16(IPV6_OPT_IPV6_PROTOCOL_ENABLE);
ed1086e0
VC
1052 qla4xxx_create_ipv6_iface(ha);
1053 } else {
d00efe3f
MC
1054 init_fw_cb->ipv6_opts &=
1055 cpu_to_le16(~IPV6_OPT_IPV6_PROTOCOL_ENABLE &
1056 0xFFFF);
ed1086e0
VC
1057 qla4xxx_destroy_ipv6_iface(ha);
1058 }
d00efe3f 1059 break;
2d63673b 1060 case ISCSI_NET_PARAM_VLAN_TAG:
d00efe3f
MC
1061 if (iface_param->len != sizeof(init_fw_cb->ipv6_vlan_tag))
1062 break;
6ac73e8c
VC
1063 init_fw_cb->ipv6_vlan_tag =
1064 cpu_to_be16(*(uint16_t *)iface_param->value);
1065 break;
1066 case ISCSI_NET_PARAM_VLAN_ENABLED:
1067 if (iface_param->value[0] == ISCSI_VLAN_ENABLE)
1068 init_fw_cb->ipv6_opts |=
1069 cpu_to_le16(IPV6_OPT_VLAN_TAGGING_ENABLE);
1070 else
1071 init_fw_cb->ipv6_opts &=
1072 cpu_to_le16(~IPV6_OPT_VLAN_TAGGING_ENABLE);
d00efe3f 1073 break;
943c157b
VC
1074 case ISCSI_NET_PARAM_MTU:
1075 init_fw_cb->eth_mtu_size =
1076 cpu_to_le16(*(uint16_t *)iface_param->value);
1077 break;
2ada7fc5
VC
1078 case ISCSI_NET_PARAM_PORT:
1079 /* Autocfg applies to even interface */
1080 if (iface_param->iface_num & 0x1)
1081 break;
1082
1083 init_fw_cb->ipv6_port =
1084 cpu_to_le16(*(uint16_t *)iface_param->value);
1085 break;
d00efe3f
MC
1086 default:
1087 ql4_printk(KERN_ERR, ha, "Unknown IPv6 param = %d\n",
1088 iface_param->param);
1089 break;
1090 }
1091}
1092
1093static void qla4xxx_set_ipv4(struct scsi_qla_host *ha,
1094 struct iscsi_iface_param_info *iface_param,
1095 struct addr_ctrl_blk *init_fw_cb)
1096{
1097 switch (iface_param->param) {
1098 case ISCSI_NET_PARAM_IPV4_ADDR:
1099 memcpy(init_fw_cb->ipv4_addr, iface_param->value,
1100 sizeof(init_fw_cb->ipv4_addr));
1101 break;
1102 case ISCSI_NET_PARAM_IPV4_SUBNET:
1103 memcpy(init_fw_cb->ipv4_subnet, iface_param->value,
1104 sizeof(init_fw_cb->ipv4_subnet));
1105 break;
1106 case ISCSI_NET_PARAM_IPV4_GW:
1107 memcpy(init_fw_cb->ipv4_gw_addr, iface_param->value,
1108 sizeof(init_fw_cb->ipv4_gw_addr));
1109 break;
1110 case ISCSI_NET_PARAM_IPV4_BOOTPROTO:
1111 if (iface_param->value[0] == ISCSI_BOOTPROTO_DHCP)
1112 init_fw_cb->ipv4_tcp_opts |=
1113 cpu_to_le16(TCPOPT_DHCP_ENABLE);
1114 else if (iface_param->value[0] == ISCSI_BOOTPROTO_STATIC)
1115 init_fw_cb->ipv4_tcp_opts &=
1116 cpu_to_le16(~TCPOPT_DHCP_ENABLE);
1117 else
1118 ql4_printk(KERN_ERR, ha, "Invalid IPv4 bootproto\n");
1119 break;
1120 case ISCSI_NET_PARAM_IFACE_ENABLE:
ed1086e0 1121 if (iface_param->value[0] == ISCSI_IFACE_ENABLE) {
d00efe3f 1122 init_fw_cb->ipv4_ip_opts |=
2bab08fc 1123 cpu_to_le16(IPOPT_IPV4_PROTOCOL_ENABLE);
ed1086e0
VC
1124 qla4xxx_create_ipv4_iface(ha);
1125 } else {
d00efe3f 1126 init_fw_cb->ipv4_ip_opts &=
2bab08fc 1127 cpu_to_le16(~IPOPT_IPV4_PROTOCOL_ENABLE &
d00efe3f 1128 0xFFFF);
ed1086e0
VC
1129 qla4xxx_destroy_ipv4_iface(ha);
1130 }
d00efe3f 1131 break;
2d63673b 1132 case ISCSI_NET_PARAM_VLAN_TAG:
d00efe3f
MC
1133 if (iface_param->len != sizeof(init_fw_cb->ipv4_vlan_tag))
1134 break;
6ac73e8c
VC
1135 init_fw_cb->ipv4_vlan_tag =
1136 cpu_to_be16(*(uint16_t *)iface_param->value);
1137 break;
1138 case ISCSI_NET_PARAM_VLAN_ENABLED:
1139 if (iface_param->value[0] == ISCSI_VLAN_ENABLE)
1140 init_fw_cb->ipv4_ip_opts |=
1141 cpu_to_le16(IPOPT_VLAN_TAGGING_ENABLE);
1142 else
1143 init_fw_cb->ipv4_ip_opts &=
1144 cpu_to_le16(~IPOPT_VLAN_TAGGING_ENABLE);
d00efe3f 1145 break;
943c157b
VC
1146 case ISCSI_NET_PARAM_MTU:
1147 init_fw_cb->eth_mtu_size =
1148 cpu_to_le16(*(uint16_t *)iface_param->value);
1149 break;
2ada7fc5
VC
1150 case ISCSI_NET_PARAM_PORT:
1151 init_fw_cb->ipv4_port =
1152 cpu_to_le16(*(uint16_t *)iface_param->value);
1153 break;
d00efe3f
MC
1154 default:
1155 ql4_printk(KERN_ERR, ha, "Unknown IPv4 param = %d\n",
1156 iface_param->param);
1157 break;
1158 }
1159}
1160
1161static void
1162qla4xxx_initcb_to_acb(struct addr_ctrl_blk *init_fw_cb)
1163{
1164 struct addr_ctrl_blk_def *acb;
1165 acb = (struct addr_ctrl_blk_def *)init_fw_cb;
1166 memset(acb->reserved1, 0, sizeof(acb->reserved1));
1167 memset(acb->reserved2, 0, sizeof(acb->reserved2));
1168 memset(acb->reserved3, 0, sizeof(acb->reserved3));
1169 memset(acb->reserved4, 0, sizeof(acb->reserved4));
1170 memset(acb->reserved5, 0, sizeof(acb->reserved5));
1171 memset(acb->reserved6, 0, sizeof(acb->reserved6));
1172 memset(acb->reserved7, 0, sizeof(acb->reserved7));
1173 memset(acb->reserved8, 0, sizeof(acb->reserved8));
1174 memset(acb->reserved9, 0, sizeof(acb->reserved9));
1175 memset(acb->reserved10, 0, sizeof(acb->reserved10));
1176 memset(acb->reserved11, 0, sizeof(acb->reserved11));
1177 memset(acb->reserved12, 0, sizeof(acb->reserved12));
1178 memset(acb->reserved13, 0, sizeof(acb->reserved13));
1179 memset(acb->reserved14, 0, sizeof(acb->reserved14));
1180 memset(acb->reserved15, 0, sizeof(acb->reserved15));
1181}
1182
1183static int
00c31889 1184qla4xxx_iface_set_param(struct Scsi_Host *shost, void *data, uint32_t len)
d00efe3f
MC
1185{
1186 struct scsi_qla_host *ha = to_qla_host(shost);
1187 int rval = 0;
1188 struct iscsi_iface_param_info *iface_param = NULL;
1189 struct addr_ctrl_blk *init_fw_cb = NULL;
1190 dma_addr_t init_fw_cb_dma;
1191 uint32_t mbox_cmd[MBOX_REG_COUNT];
1192 uint32_t mbox_sts[MBOX_REG_COUNT];
00c31889
MC
1193 uint32_t rem = len;
1194 struct nlattr *attr;
d00efe3f
MC
1195
1196 init_fw_cb = dma_alloc_coherent(&ha->pdev->dev,
1197 sizeof(struct addr_ctrl_blk),
1198 &init_fw_cb_dma, GFP_KERNEL);
1199 if (!init_fw_cb) {
1200 ql4_printk(KERN_ERR, ha, "%s: Unable to alloc init_cb\n",
1201 __func__);
1202 return -ENOMEM;
1203 }
1204
1205 memset(init_fw_cb, 0, sizeof(struct addr_ctrl_blk));
1206 memset(&mbox_cmd, 0, sizeof(mbox_cmd));
1207 memset(&mbox_sts, 0, sizeof(mbox_sts));
1208
1209 if (qla4xxx_get_ifcb(ha, &mbox_cmd[0], &mbox_sts[0], init_fw_cb_dma)) {
1210 ql4_printk(KERN_ERR, ha, "%s: get ifcb failed\n", __func__);
1211 rval = -EIO;
1212 goto exit_init_fw_cb;
1213 }
1214
00c31889
MC
1215 nla_for_each_attr(attr, data, len, rem) {
1216 iface_param = nla_data(attr);
d00efe3f
MC
1217
1218 if (iface_param->param_type != ISCSI_NET_PARAM)
1219 continue;
1220
1221 switch (iface_param->iface_type) {
1222 case ISCSI_IFACE_TYPE_IPV4:
1223 switch (iface_param->iface_num) {
1224 case 0:
1225 qla4xxx_set_ipv4(ha, iface_param, init_fw_cb);
1226 break;
1227 default:
1228 /* Cannot have more than one IPv4 interface */
1229 ql4_printk(KERN_ERR, ha, "Invalid IPv4 iface "
1230 "number = %d\n",
1231 iface_param->iface_num);
1232 break;
1233 }
1234 break;
1235 case ISCSI_IFACE_TYPE_IPV6:
1236 switch (iface_param->iface_num) {
1237 case 0:
1238 case 1:
1239 qla4xxx_set_ipv6(ha, iface_param, init_fw_cb);
1240 break;
1241 default:
1242 /* Cannot have more than two IPv6 interface */
1243 ql4_printk(KERN_ERR, ha, "Invalid IPv6 iface "
1244 "number = %d\n",
1245 iface_param->iface_num);
1246 break;
1247 }
1248 break;
1249 default:
1250 ql4_printk(KERN_ERR, ha, "Invalid iface type\n");
1251 break;
1252 }
d00efe3f
MC
1253 }
1254
1255 init_fw_cb->cookie = cpu_to_le32(0x11BEAD5A);
1256
1257 rval = qla4xxx_set_flash(ha, init_fw_cb_dma, FLASH_SEGMENT_IFCB,
1258 sizeof(struct addr_ctrl_blk),
1259 FLASH_OPT_RMW_COMMIT);
1260 if (rval != QLA_SUCCESS) {
1261 ql4_printk(KERN_ERR, ha, "%s: set flash mbx failed\n",
1262 __func__);
1263 rval = -EIO;
1264 goto exit_init_fw_cb;
1265 }
1266
ce505f9d
VC
1267 rval = qla4xxx_disable_acb(ha);
1268 if (rval != QLA_SUCCESS) {
1269 ql4_printk(KERN_ERR, ha, "%s: disable acb mbx failed\n",
1270 __func__);
1271 rval = -EIO;
1272 goto exit_init_fw_cb;
1273 }
1274
1275 wait_for_completion_timeout(&ha->disable_acb_comp,
1276 DISABLE_ACB_TOV * HZ);
d00efe3f
MC
1277
1278 qla4xxx_initcb_to_acb(init_fw_cb);
1279
1280 rval = qla4xxx_set_acb(ha, &mbox_cmd[0], &mbox_sts[0], init_fw_cb_dma);
1281 if (rval != QLA_SUCCESS) {
1282 ql4_printk(KERN_ERR, ha, "%s: set acb mbx failed\n",
1283 __func__);
1284 rval = -EIO;
1285 goto exit_init_fw_cb;
1286 }
1287
1288 memset(init_fw_cb, 0, sizeof(struct addr_ctrl_blk));
1289 qla4xxx_update_local_ifcb(ha, &mbox_cmd[0], &mbox_sts[0], init_fw_cb,
1290 init_fw_cb_dma);
1291
1292exit_init_fw_cb:
1293 dma_free_coherent(&ha->pdev->dev, sizeof(struct addr_ctrl_blk),
1294 init_fw_cb, init_fw_cb_dma);
1295
1296 return rval;
1297}
1298
fca9f04d
MC
1299static int qla4xxx_session_get_param(struct iscsi_cls_session *cls_sess,
1300 enum iscsi_param param, char *buf)
1301{
1302 struct iscsi_session *sess = cls_sess->dd_data;
1303 struct ddb_entry *ddb_entry = sess->dd_data;
1304 struct scsi_qla_host *ha = ddb_entry->ha;
1305 int rval, len;
1306 uint16_t idx;
1307
1308 switch (param) {
1309 case ISCSI_PARAM_CHAP_IN_IDX:
1310 rval = qla4xxx_get_chap_index(ha, sess->username_in,
1311 sess->password_in, BIDI_CHAP,
1312 &idx);
1313 if (rval)
1314 return -EINVAL;
1315
1316 len = sprintf(buf, "%hu\n", idx);
1317 break;
1318 case ISCSI_PARAM_CHAP_OUT_IDX:
1319 rval = qla4xxx_get_chap_index(ha, sess->username,
1320 sess->password, LOCAL_CHAP,
1321 &idx);
1322 if (rval)
1323 return -EINVAL;
1324
1325 len = sprintf(buf, "%hu\n", idx);
1326 break;
1327 default:
1328 return iscsi_session_get_param(cls_sess, param, buf);
1329 }
1330
1331 return len;
1332}
1333
b3a271a9 1334static int qla4xxx_conn_get_param(struct iscsi_cls_conn *cls_conn,
afaf5a2d
DS
1335 enum iscsi_param param, char *buf)
1336{
b3a271a9
MR
1337 struct iscsi_conn *conn;
1338 struct qla_conn *qla_conn;
1339 struct sockaddr *dst_addr;
1340 int len = 0;
afaf5a2d 1341
b3a271a9
MR
1342 conn = cls_conn->dd_data;
1343 qla_conn = conn->dd_data;
1344 dst_addr = &qla_conn->qla_ep->dst_addr;
afaf5a2d
DS
1345
1346 switch (param) {
1347 case ISCSI_PARAM_CONN_PORT:
afaf5a2d 1348 case ISCSI_PARAM_CONN_ADDRESS:
b3a271a9
MR
1349 return iscsi_conn_get_addr_param((struct sockaddr_storage *)
1350 dst_addr, param, buf);
afaf5a2d 1351 default:
b3a271a9 1352 return iscsi_conn_get_param(cls_conn, param, buf);
afaf5a2d
DS
1353 }
1354
1355 return len;
b3a271a9 1356
afaf5a2d
DS
1357}
1358
13483730
MC
1359int qla4xxx_get_ddb_index(struct scsi_qla_host *ha, uint16_t *ddb_index)
1360{
1361 uint32_t mbx_sts = 0;
1362 uint16_t tmp_ddb_index;
1363 int ret;
1364
1365get_ddb_index:
1366 tmp_ddb_index = find_first_zero_bit(ha->ddb_idx_map, MAX_DDB_ENTRIES);
1367
1368 if (tmp_ddb_index >= MAX_DDB_ENTRIES) {
1369 DEBUG2(ql4_printk(KERN_INFO, ha,
1370 "Free DDB index not available\n"));
1371 ret = QLA_ERROR;
1372 goto exit_get_ddb_index;
1373 }
1374
1375 if (test_and_set_bit(tmp_ddb_index, ha->ddb_idx_map))
1376 goto get_ddb_index;
1377
1378 DEBUG2(ql4_printk(KERN_INFO, ha,
1379 "Found a free DDB index at %d\n", tmp_ddb_index));
1380 ret = qla4xxx_req_ddb_entry(ha, tmp_ddb_index, &mbx_sts);
1381 if (ret == QLA_ERROR) {
1382 if (mbx_sts == MBOX_STS_COMMAND_ERROR) {
1383 ql4_printk(KERN_INFO, ha,
1384 "DDB index = %d not available trying next\n",
1385 tmp_ddb_index);
1386 goto get_ddb_index;
1387 }
1388 DEBUG2(ql4_printk(KERN_INFO, ha,
1389 "Free FW DDB not available\n"));
1390 }
1391
1392 *ddb_index = tmp_ddb_index;
1393
1394exit_get_ddb_index:
1395 return ret;
1396}
1397
1398static int qla4xxx_match_ipaddress(struct scsi_qla_host *ha,
1399 struct ddb_entry *ddb_entry,
1400 char *existing_ipaddr,
1401 char *user_ipaddr)
1402{
1403 uint8_t dst_ipaddr[IPv6_ADDR_LEN];
1404 char formatted_ipaddr[DDB_IPADDR_LEN];
1405 int status = QLA_SUCCESS, ret = 0;
1406
1407 if (ddb_entry->fw_ddb_entry.options & DDB_OPT_IPV6_DEVICE) {
1408 ret = in6_pton(user_ipaddr, strlen(user_ipaddr), dst_ipaddr,
1409 '\0', NULL);
1410 if (ret == 0) {
1411 status = QLA_ERROR;
1412 goto out_match;
1413 }
1414 ret = sprintf(formatted_ipaddr, "%pI6", dst_ipaddr);
1415 } else {
1416 ret = in4_pton(user_ipaddr, strlen(user_ipaddr), dst_ipaddr,
1417 '\0', NULL);
1418 if (ret == 0) {
1419 status = QLA_ERROR;
1420 goto out_match;
1421 }
1422 ret = sprintf(formatted_ipaddr, "%pI4", dst_ipaddr);
1423 }
1424
1425 if (strcmp(existing_ipaddr, formatted_ipaddr))
1426 status = QLA_ERROR;
1427
1428out_match:
1429 return status;
1430}
1431
1432static int qla4xxx_match_fwdb_session(struct scsi_qla_host *ha,
1433 struct iscsi_cls_conn *cls_conn)
1434{
1435 int idx = 0, max_ddbs, rval;
1436 struct iscsi_cls_session *cls_sess = iscsi_conn_to_session(cls_conn);
1437 struct iscsi_session *sess, *existing_sess;
1438 struct iscsi_conn *conn, *existing_conn;
1439 struct ddb_entry *ddb_entry;
1440
1441 sess = cls_sess->dd_data;
1442 conn = cls_conn->dd_data;
1443
1444 if (sess->targetname == NULL ||
1445 conn->persistent_address == NULL ||
1446 conn->persistent_port == 0)
1447 return QLA_ERROR;
1448
1449 max_ddbs = is_qla40XX(ha) ? MAX_DEV_DB_ENTRIES_40XX :
1450 MAX_DEV_DB_ENTRIES;
1451
1452 for (idx = 0; idx < max_ddbs; idx++) {
1453 ddb_entry = qla4xxx_lookup_ddb_by_fw_index(ha, idx);
1454 if (ddb_entry == NULL)
1455 continue;
1456
1457 if (ddb_entry->ddb_type != FLASH_DDB)
1458 continue;
1459
1460 existing_sess = ddb_entry->sess->dd_data;
1461 existing_conn = ddb_entry->conn->dd_data;
1462
1463 if (existing_sess->targetname == NULL ||
1464 existing_conn->persistent_address == NULL ||
1465 existing_conn->persistent_port == 0)
1466 continue;
1467
1468 DEBUG2(ql4_printk(KERN_INFO, ha,
1469 "IQN = %s User IQN = %s\n",
1470 existing_sess->targetname,
1471 sess->targetname));
1472
1473 DEBUG2(ql4_printk(KERN_INFO, ha,
1474 "IP = %s User IP = %s\n",
1475 existing_conn->persistent_address,
1476 conn->persistent_address));
1477
1478 DEBUG2(ql4_printk(KERN_INFO, ha,
1479 "Port = %d User Port = %d\n",
1480 existing_conn->persistent_port,
1481 conn->persistent_port));
1482
1483 if (strcmp(existing_sess->targetname, sess->targetname))
1484 continue;
1485 rval = qla4xxx_match_ipaddress(ha, ddb_entry,
1486 existing_conn->persistent_address,
1487 conn->persistent_address);
1488 if (rval == QLA_ERROR)
1489 continue;
1490 if (existing_conn->persistent_port != conn->persistent_port)
1491 continue;
1492 break;
1493 }
1494
1495 if (idx == max_ddbs)
1496 return QLA_ERROR;
1497
1498 DEBUG2(ql4_printk(KERN_INFO, ha,
1499 "Match found in fwdb sessions\n"));
1500 return QLA_SUCCESS;
1501}
1502
b3a271a9
MR
1503static struct iscsi_cls_session *
1504qla4xxx_session_create(struct iscsi_endpoint *ep,
1505 uint16_t cmds_max, uint16_t qdepth,
1506 uint32_t initial_cmdsn)
1507{
1508 struct iscsi_cls_session *cls_sess;
1509 struct scsi_qla_host *ha;
1510 struct qla_endpoint *qla_ep;
1511 struct ddb_entry *ddb_entry;
13483730 1512 uint16_t ddb_index;
b3a271a9
MR
1513 struct iscsi_session *sess;
1514 struct sockaddr *dst_addr;
1515 int ret;
1516
1517 DEBUG2(printk(KERN_INFO "Func: %s\n", __func__));
1518 if (!ep) {
1519 printk(KERN_ERR "qla4xxx: missing ep.\n");
1520 return NULL;
1521 }
1522
1523 qla_ep = ep->dd_data;
1524 dst_addr = (struct sockaddr *)&qla_ep->dst_addr;
1525 ha = to_qla_host(qla_ep->host);
736cf369 1526
13483730
MC
1527 ret = qla4xxx_get_ddb_index(ha, &ddb_index);
1528 if (ret == QLA_ERROR)
b3a271a9 1529 return NULL;
b3a271a9
MR
1530
1531 cls_sess = iscsi_session_setup(&qla4xxx_iscsi_transport, qla_ep->host,
1532 cmds_max, sizeof(struct ddb_entry),
1533 sizeof(struct ql4_task_data),
1534 initial_cmdsn, ddb_index);
1535 if (!cls_sess)
1536 return NULL;
1537
1538 sess = cls_sess->dd_data;
1539 ddb_entry = sess->dd_data;
1540 ddb_entry->fw_ddb_index = ddb_index;
1541 ddb_entry->fw_ddb_device_state = DDB_DS_NO_CONNECTION_ACTIVE;
1542 ddb_entry->ha = ha;
1543 ddb_entry->sess = cls_sess;
13483730
MC
1544 ddb_entry->unblock_sess = qla4xxx_unblock_ddb;
1545 ddb_entry->ddb_change = qla4xxx_ddb_change;
b3a271a9
MR
1546 cls_sess->recovery_tmo = ql4xsess_recovery_tmo;
1547 ha->fw_ddb_index_map[ddb_entry->fw_ddb_index] = ddb_entry;
1548 ha->tot_ddbs++;
1549
1550 return cls_sess;
1551}
1552
1553static void qla4xxx_session_destroy(struct iscsi_cls_session *cls_sess)
1554{
1555 struct iscsi_session *sess;
1556 struct ddb_entry *ddb_entry;
1557 struct scsi_qla_host *ha;
90599b62
MR
1558 unsigned long flags, wtime;
1559 struct dev_db_entry *fw_ddb_entry = NULL;
1560 dma_addr_t fw_ddb_entry_dma;
1561 uint32_t ddb_state;
1562 int ret;
b3a271a9
MR
1563
1564 DEBUG2(printk(KERN_INFO "Func: %s\n", __func__));
1565 sess = cls_sess->dd_data;
1566 ddb_entry = sess->dd_data;
1567 ha = ddb_entry->ha;
1568
90599b62
MR
1569 fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
1570 &fw_ddb_entry_dma, GFP_KERNEL);
1571 if (!fw_ddb_entry) {
1572 ql4_printk(KERN_ERR, ha,
1573 "%s: Unable to allocate dma buffer\n", __func__);
1574 goto destroy_session;
1575 }
1576
1577 wtime = jiffies + (HZ * LOGOUT_TOV);
1578 do {
1579 ret = qla4xxx_get_fwddb_entry(ha, ddb_entry->fw_ddb_index,
1580 fw_ddb_entry, fw_ddb_entry_dma,
1581 NULL, NULL, &ddb_state, NULL,
1582 NULL, NULL);
1583 if (ret == QLA_ERROR)
1584 goto destroy_session;
1585
1586 if ((ddb_state == DDB_DS_NO_CONNECTION_ACTIVE) ||
1587 (ddb_state == DDB_DS_SESSION_FAILED))
1588 goto destroy_session;
1589
1590 schedule_timeout_uninterruptible(HZ);
1591 } while ((time_after(wtime, jiffies)));
1592
1593destroy_session:
736cf369
MR
1594 qla4xxx_clear_ddb_entry(ha, ddb_entry->fw_ddb_index);
1595
b3a271a9
MR
1596 spin_lock_irqsave(&ha->hardware_lock, flags);
1597 qla4xxx_free_ddb(ha, ddb_entry);
1598 spin_unlock_irqrestore(&ha->hardware_lock, flags);
90599b62 1599
b3a271a9 1600 iscsi_session_teardown(cls_sess);
90599b62
MR
1601
1602 if (fw_ddb_entry)
1603 dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
1604 fw_ddb_entry, fw_ddb_entry_dma);
b3a271a9
MR
1605}
1606
b3a271a9
MR
1607static struct iscsi_cls_conn *
1608qla4xxx_conn_create(struct iscsi_cls_session *cls_sess, uint32_t conn_idx)
1609{
1610 struct iscsi_cls_conn *cls_conn;
1611 struct iscsi_session *sess;
1612 struct ddb_entry *ddb_entry;
1613
1614 DEBUG2(printk(KERN_INFO "Func: %s\n", __func__));
1615 cls_conn = iscsi_conn_setup(cls_sess, sizeof(struct qla_conn),
1616 conn_idx);
ff1d0319
MC
1617 if (!cls_conn)
1618 return NULL;
1619
b3a271a9
MR
1620 sess = cls_sess->dd_data;
1621 ddb_entry = sess->dd_data;
1622 ddb_entry->conn = cls_conn;
1623
1624 return cls_conn;
1625}
1626
1627static int qla4xxx_conn_bind(struct iscsi_cls_session *cls_session,
1628 struct iscsi_cls_conn *cls_conn,
1629 uint64_t transport_fd, int is_leading)
1630{
1631 struct iscsi_conn *conn;
1632 struct qla_conn *qla_conn;
1633 struct iscsi_endpoint *ep;
1634
1635 DEBUG2(printk(KERN_INFO "Func: %s\n", __func__));
1636
1637 if (iscsi_conn_bind(cls_session, cls_conn, is_leading))
1638 return -EINVAL;
1639 ep = iscsi_lookup_endpoint(transport_fd);
1640 conn = cls_conn->dd_data;
1641 qla_conn = conn->dd_data;
1642 qla_conn->qla_ep = ep->dd_data;
1643 return 0;
1644}
1645
1646static int qla4xxx_conn_start(struct iscsi_cls_conn *cls_conn)
1647{
1648 struct iscsi_cls_session *cls_sess = iscsi_conn_to_session(cls_conn);
1649 struct iscsi_session *sess;
1650 struct ddb_entry *ddb_entry;
1651 struct scsi_qla_host *ha;
13483730 1652 struct dev_db_entry *fw_ddb_entry = NULL;
b3a271a9 1653 dma_addr_t fw_ddb_entry_dma;
b3a271a9
MR
1654 uint32_t mbx_sts = 0;
1655 int ret = 0;
1656 int status = QLA_SUCCESS;
1657
1658 DEBUG2(printk(KERN_INFO "Func: %s\n", __func__));
1659 sess = cls_sess->dd_data;
1660 ddb_entry = sess->dd_data;
1661 ha = ddb_entry->ha;
1662
13483730
MC
1663 /* Check if we have matching FW DDB, if yes then do not
1664 * login to this target. This could cause target to logout previous
1665 * connection
1666 */
1667 ret = qla4xxx_match_fwdb_session(ha, cls_conn);
1668 if (ret == QLA_SUCCESS) {
1669 ql4_printk(KERN_INFO, ha,
1670 "Session already exist in FW.\n");
1671 ret = -EEXIST;
1672 goto exit_conn_start;
1673 }
1674
b3a271a9
MR
1675 fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
1676 &fw_ddb_entry_dma, GFP_KERNEL);
1677 if (!fw_ddb_entry) {
1678 ql4_printk(KERN_ERR, ha,
1679 "%s: Unable to allocate dma buffer\n", __func__);
13483730
MC
1680 ret = -ENOMEM;
1681 goto exit_conn_start;
b3a271a9
MR
1682 }
1683
1684 ret = qla4xxx_set_param_ddbentry(ha, ddb_entry, cls_conn, &mbx_sts);
1685 if (ret) {
1686 /* If iscsid is stopped and started then no need to do
1687 * set param again since ddb state will be already
1688 * active and FW does not allow set ddb to an
1689 * active session.
1690 */
1691 if (mbx_sts)
1692 if (ddb_entry->fw_ddb_device_state ==
f922da79 1693 DDB_DS_SESSION_ACTIVE) {
13483730 1694 ddb_entry->unblock_sess(ddb_entry->sess);
b3a271a9 1695 goto exit_set_param;
f922da79 1696 }
b3a271a9
MR
1697
1698 ql4_printk(KERN_ERR, ha, "%s: Failed set param for index[%d]\n",
1699 __func__, ddb_entry->fw_ddb_index);
1700 goto exit_conn_start;
1701 }
1702
1703 status = qla4xxx_conn_open(ha, ddb_entry->fw_ddb_index);
1704 if (status == QLA_ERROR) {
0e7e8501
MR
1705 ql4_printk(KERN_ERR, ha, "%s: Login failed: %s\n", __func__,
1706 sess->targetname);
b3a271a9
MR
1707 ret = -EINVAL;
1708 goto exit_conn_start;
1709 }
1710
98270ab4
MR
1711 if (ddb_entry->fw_ddb_device_state == DDB_DS_NO_CONNECTION_ACTIVE)
1712 ddb_entry->fw_ddb_device_state = DDB_DS_LOGIN_IN_PROCESS;
1713
1714 DEBUG2(printk(KERN_INFO "%s: DDB state [%d]\n", __func__,
1715 ddb_entry->fw_ddb_device_state));
b3a271a9
MR
1716
1717exit_set_param:
b3a271a9
MR
1718 ret = 0;
1719
1720exit_conn_start:
13483730
MC
1721 if (fw_ddb_entry)
1722 dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
1723 fw_ddb_entry, fw_ddb_entry_dma);
b3a271a9
MR
1724 return ret;
1725}
1726
1727static void qla4xxx_conn_destroy(struct iscsi_cls_conn *cls_conn)
1728{
1729 struct iscsi_cls_session *cls_sess = iscsi_conn_to_session(cls_conn);
1730 struct iscsi_session *sess;
1731 struct scsi_qla_host *ha;
1732 struct ddb_entry *ddb_entry;
1733 int options;
1734
1735 DEBUG2(printk(KERN_INFO "Func: %s\n", __func__));
1736 sess = cls_sess->dd_data;
1737 ddb_entry = sess->dd_data;
1738 ha = ddb_entry->ha;
1739
1740 options = LOGOUT_OPTION_CLOSE_SESSION;
1741 if (qla4xxx_session_logout_ddb(ha, ddb_entry, options) == QLA_ERROR)
1742 ql4_printk(KERN_ERR, ha, "%s: Logout failed\n", __func__);
b3a271a9
MR
1743}
1744
1745static void qla4xxx_task_work(struct work_struct *wdata)
1746{
1747 struct ql4_task_data *task_data;
1748 struct scsi_qla_host *ha;
1749 struct passthru_status *sts;
1750 struct iscsi_task *task;
1751 struct iscsi_hdr *hdr;
1752 uint8_t *data;
1753 uint32_t data_len;
1754 struct iscsi_conn *conn;
1755 int hdr_len;
1756 itt_t itt;
1757
1758 task_data = container_of(wdata, struct ql4_task_data, task_work);
1759 ha = task_data->ha;
1760 task = task_data->task;
1761 sts = &task_data->sts;
1762 hdr_len = sizeof(struct iscsi_hdr);
1763
1764 DEBUG3(printk(KERN_INFO "Status returned\n"));
1765 DEBUG3(qla4xxx_dump_buffer(sts, 64));
1766 DEBUG3(printk(KERN_INFO "Response buffer"));
1767 DEBUG3(qla4xxx_dump_buffer(task_data->resp_buffer, 64));
1768
1769 conn = task->conn;
1770
1771 switch (sts->completionStatus) {
1772 case PASSTHRU_STATUS_COMPLETE:
1773 hdr = (struct iscsi_hdr *)task_data->resp_buffer;
1774 /* Assign back the itt in hdr, until we use the PREASSIGN_TAG */
1775 itt = sts->handle;
1776 hdr->itt = itt;
1777 data = task_data->resp_buffer + hdr_len;
1778 data_len = task_data->resp_len - hdr_len;
1779 iscsi_complete_pdu(conn, hdr, data, data_len);
1780 break;
1781 default:
1782 ql4_printk(KERN_ERR, ha, "Passthru failed status = 0x%x\n",
1783 sts->completionStatus);
1784 break;
1785 }
1786 return;
1787}
1788
1789static int qla4xxx_alloc_pdu(struct iscsi_task *task, uint8_t opcode)
1790{
1791 struct ql4_task_data *task_data;
1792 struct iscsi_session *sess;
1793 struct ddb_entry *ddb_entry;
1794 struct scsi_qla_host *ha;
1795 int hdr_len;
1796
1797 sess = task->conn->session;
1798 ddb_entry = sess->dd_data;
1799 ha = ddb_entry->ha;
1800 task_data = task->dd_data;
1801 memset(task_data, 0, sizeof(struct ql4_task_data));
1802
1803 if (task->sc) {
1804 ql4_printk(KERN_INFO, ha,
1805 "%s: SCSI Commands not implemented\n", __func__);
1806 return -EINVAL;
1807 }
1808
1809 hdr_len = sizeof(struct iscsi_hdr);
1810 task_data->ha = ha;
1811 task_data->task = task;
1812
1813 if (task->data_count) {
1814 task_data->data_dma = dma_map_single(&ha->pdev->dev, task->data,
1815 task->data_count,
1816 PCI_DMA_TODEVICE);
1817 }
1818
1819 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: MaxRecvLen %u, iscsi hrd %d\n",
1820 __func__, task->conn->max_recv_dlength, hdr_len));
1821
69ca216e 1822 task_data->resp_len = task->conn->max_recv_dlength + hdr_len;
b3a271a9
MR
1823 task_data->resp_buffer = dma_alloc_coherent(&ha->pdev->dev,
1824 task_data->resp_len,
1825 &task_data->resp_dma,
1826 GFP_ATOMIC);
1827 if (!task_data->resp_buffer)
1828 goto exit_alloc_pdu;
1829
69ca216e 1830 task_data->req_len = task->data_count + hdr_len;
b3a271a9 1831 task_data->req_buffer = dma_alloc_coherent(&ha->pdev->dev,
69ca216e 1832 task_data->req_len,
b3a271a9
MR
1833 &task_data->req_dma,
1834 GFP_ATOMIC);
1835 if (!task_data->req_buffer)
1836 goto exit_alloc_pdu;
1837
1838 task->hdr = task_data->req_buffer;
1839
1840 INIT_WORK(&task_data->task_work, qla4xxx_task_work);
1841
1842 return 0;
1843
1844exit_alloc_pdu:
1845 if (task_data->resp_buffer)
1846 dma_free_coherent(&ha->pdev->dev, task_data->resp_len,
1847 task_data->resp_buffer, task_data->resp_dma);
1848
1849 if (task_data->req_buffer)
69ca216e 1850 dma_free_coherent(&ha->pdev->dev, task_data->req_len,
b3a271a9
MR
1851 task_data->req_buffer, task_data->req_dma);
1852 return -ENOMEM;
1853}
1854
1855static void qla4xxx_task_cleanup(struct iscsi_task *task)
1856{
1857 struct ql4_task_data *task_data;
1858 struct iscsi_session *sess;
1859 struct ddb_entry *ddb_entry;
1860 struct scsi_qla_host *ha;
1861 int hdr_len;
1862
1863 hdr_len = sizeof(struct iscsi_hdr);
1864 sess = task->conn->session;
1865 ddb_entry = sess->dd_data;
1866 ha = ddb_entry->ha;
1867 task_data = task->dd_data;
1868
1869 if (task->data_count) {
1870 dma_unmap_single(&ha->pdev->dev, task_data->data_dma,
1871 task->data_count, PCI_DMA_TODEVICE);
1872 }
1873
1874 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: MaxRecvLen %u, iscsi hrd %d\n",
1875 __func__, task->conn->max_recv_dlength, hdr_len));
1876
1877 dma_free_coherent(&ha->pdev->dev, task_data->resp_len,
1878 task_data->resp_buffer, task_data->resp_dma);
69ca216e 1879 dma_free_coherent(&ha->pdev->dev, task_data->req_len,
b3a271a9
MR
1880 task_data->req_buffer, task_data->req_dma);
1881 return;
1882}
1883
1884static int qla4xxx_task_xmit(struct iscsi_task *task)
1885{
1886 struct scsi_cmnd *sc = task->sc;
1887 struct iscsi_session *sess = task->conn->session;
1888 struct ddb_entry *ddb_entry = sess->dd_data;
1889 struct scsi_qla_host *ha = ddb_entry->ha;
1890
1891 if (!sc)
1892 return qla4xxx_send_passthru0(task);
1893
1894 ql4_printk(KERN_INFO, ha, "%s: scsi cmd xmit not implemented\n",
1895 __func__);
1896 return -ENOSYS;
1897}
1898
13483730
MC
1899static void qla4xxx_copy_fwddb_param(struct scsi_qla_host *ha,
1900 struct dev_db_entry *fw_ddb_entry,
1901 struct iscsi_cls_session *cls_sess,
1902 struct iscsi_cls_conn *cls_conn)
1903{
1904 int buflen = 0;
1905 struct iscsi_session *sess;
376738af 1906 struct ddb_entry *ddb_entry;
13483730
MC
1907 struct iscsi_conn *conn;
1908 char ip_addr[DDB_IPADDR_LEN];
1909 uint16_t options = 0;
1910
1911 sess = cls_sess->dd_data;
376738af 1912 ddb_entry = sess->dd_data;
13483730
MC
1913 conn = cls_conn->dd_data;
1914
376738af
NJ
1915 ddb_entry->chap_tbl_idx = le16_to_cpu(fw_ddb_entry->chap_tbl_idx);
1916
13483730
MC
1917 conn->max_recv_dlength = BYTE_UNITS *
1918 le16_to_cpu(fw_ddb_entry->iscsi_max_rcv_data_seg_len);
1919
1920 conn->max_xmit_dlength = BYTE_UNITS *
1921 le16_to_cpu(fw_ddb_entry->iscsi_max_snd_data_seg_len);
1922
1923 sess->initial_r2t_en =
1924 (BIT_10 & le16_to_cpu(fw_ddb_entry->iscsi_options));
1925
1926 sess->max_r2t = le16_to_cpu(fw_ddb_entry->iscsi_max_outsnd_r2t);
1927
1928 sess->imm_data_en = (BIT_11 & le16_to_cpu(fw_ddb_entry->iscsi_options));
1929
1930 sess->first_burst = BYTE_UNITS *
1931 le16_to_cpu(fw_ddb_entry->iscsi_first_burst_len);
1932
1933 sess->max_burst = BYTE_UNITS *
1934 le16_to_cpu(fw_ddb_entry->iscsi_max_burst_len);
1935
1936 sess->time2wait = le16_to_cpu(fw_ddb_entry->iscsi_def_time2wait);
1937
1938 sess->time2retain = le16_to_cpu(fw_ddb_entry->iscsi_def_time2retain);
1939
1940 conn->persistent_port = le16_to_cpu(fw_ddb_entry->port);
1941
1942 sess->tpgt = le32_to_cpu(fw_ddb_entry->tgt_portal_grp);
1943
1944 options = le16_to_cpu(fw_ddb_entry->options);
1945 if (options & DDB_OPT_IPV6_DEVICE)
1946 sprintf(ip_addr, "%pI6", fw_ddb_entry->ip_addr);
1947 else
1948 sprintf(ip_addr, "%pI4", fw_ddb_entry->ip_addr);
1949
1950 iscsi_set_param(cls_conn, ISCSI_PARAM_TARGET_NAME,
1951 (char *)fw_ddb_entry->iscsi_name, buflen);
1952 iscsi_set_param(cls_conn, ISCSI_PARAM_INITIATOR_NAME,
1953 (char *)ha->name_string, buflen);
1954 iscsi_set_param(cls_conn, ISCSI_PARAM_PERSISTENT_ADDRESS,
1955 (char *)ip_addr, buflen);
6c1b8789
VC
1956 iscsi_set_param(cls_conn, ISCSI_PARAM_TARGET_ALIAS,
1957 (char *)fw_ddb_entry->iscsi_alias, buflen);
13483730
MC
1958}
1959
1960void qla4xxx_update_session_conn_fwddb_param(struct scsi_qla_host *ha,
1961 struct ddb_entry *ddb_entry)
1962{
1963 struct iscsi_cls_session *cls_sess;
1964 struct iscsi_cls_conn *cls_conn;
1965 uint32_t ddb_state;
1966 dma_addr_t fw_ddb_entry_dma;
1967 struct dev_db_entry *fw_ddb_entry;
1968
1969 fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
1970 &fw_ddb_entry_dma, GFP_KERNEL);
1971 if (!fw_ddb_entry) {
1972 ql4_printk(KERN_ERR, ha,
1973 "%s: Unable to allocate dma buffer\n", __func__);
1974 goto exit_session_conn_fwddb_param;
1975 }
1976
1977 if (qla4xxx_get_fwddb_entry(ha, ddb_entry->fw_ddb_index, fw_ddb_entry,
1978 fw_ddb_entry_dma, NULL, NULL, &ddb_state,
1979 NULL, NULL, NULL) == QLA_ERROR) {
1980 DEBUG2(ql4_printk(KERN_ERR, ha, "scsi%ld: %s: failed "
1981 "get_ddb_entry for fw_ddb_index %d\n",
1982 ha->host_no, __func__,
1983 ddb_entry->fw_ddb_index));
1984 goto exit_session_conn_fwddb_param;
1985 }
1986
1987 cls_sess = ddb_entry->sess;
1988
1989 cls_conn = ddb_entry->conn;
1990
1991 /* Update params */
1992 qla4xxx_copy_fwddb_param(ha, fw_ddb_entry, cls_sess, cls_conn);
1993
1994exit_session_conn_fwddb_param:
1995 if (fw_ddb_entry)
1996 dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
1997 fw_ddb_entry, fw_ddb_entry_dma);
1998}
1999
b3a271a9
MR
2000void qla4xxx_update_session_conn_param(struct scsi_qla_host *ha,
2001 struct ddb_entry *ddb_entry)
2002{
2003 struct iscsi_cls_session *cls_sess;
2004 struct iscsi_cls_conn *cls_conn;
2005 struct iscsi_session *sess;
2006 struct iscsi_conn *conn;
2007 uint32_t ddb_state;
2008 dma_addr_t fw_ddb_entry_dma;
2009 struct dev_db_entry *fw_ddb_entry;
2010
2011 fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
2012 &fw_ddb_entry_dma, GFP_KERNEL);
2013 if (!fw_ddb_entry) {
2014 ql4_printk(KERN_ERR, ha,
2015 "%s: Unable to allocate dma buffer\n", __func__);
13483730 2016 goto exit_session_conn_param;
b3a271a9
MR
2017 }
2018
2019 if (qla4xxx_get_fwddb_entry(ha, ddb_entry->fw_ddb_index, fw_ddb_entry,
2020 fw_ddb_entry_dma, NULL, NULL, &ddb_state,
2021 NULL, NULL, NULL) == QLA_ERROR) {
2022 DEBUG2(ql4_printk(KERN_ERR, ha, "scsi%ld: %s: failed "
2023 "get_ddb_entry for fw_ddb_index %d\n",
2024 ha->host_no, __func__,
2025 ddb_entry->fw_ddb_index));
13483730 2026 goto exit_session_conn_param;
b3a271a9
MR
2027 }
2028
2029 cls_sess = ddb_entry->sess;
2030 sess = cls_sess->dd_data;
2031
2032 cls_conn = ddb_entry->conn;
2033 conn = cls_conn->dd_data;
2034
13483730
MC
2035 /* Update timers after login */
2036 ddb_entry->default_relogin_timeout =
c28eaaca
NJ
2037 (le16_to_cpu(fw_ddb_entry->def_timeout) > LOGIN_TOV) &&
2038 (le16_to_cpu(fw_ddb_entry->def_timeout) < LOGIN_TOV * 10) ?
2039 le16_to_cpu(fw_ddb_entry->def_timeout) : LOGIN_TOV;
13483730
MC
2040 ddb_entry->default_time2wait =
2041 le16_to_cpu(fw_ddb_entry->iscsi_def_time2wait);
2042
b3a271a9 2043 /* Update params */
376738af 2044 ddb_entry->chap_tbl_idx = le16_to_cpu(fw_ddb_entry->chap_tbl_idx);
b3a271a9
MR
2045 conn->max_recv_dlength = BYTE_UNITS *
2046 le16_to_cpu(fw_ddb_entry->iscsi_max_rcv_data_seg_len);
2047
2048 conn->max_xmit_dlength = BYTE_UNITS *
2049 le16_to_cpu(fw_ddb_entry->iscsi_max_snd_data_seg_len);
2050
2051 sess->initial_r2t_en =
2052 (BIT_10 & le16_to_cpu(fw_ddb_entry->iscsi_options));
2053
2054 sess->max_r2t = le16_to_cpu(fw_ddb_entry->iscsi_max_outsnd_r2t);
2055
2056 sess->imm_data_en = (BIT_11 & le16_to_cpu(fw_ddb_entry->iscsi_options));
2057
2058 sess->first_burst = BYTE_UNITS *
2059 le16_to_cpu(fw_ddb_entry->iscsi_first_burst_len);
2060
2061 sess->max_burst = BYTE_UNITS *
2062 le16_to_cpu(fw_ddb_entry->iscsi_max_burst_len);
2063
2064 sess->time2wait = le16_to_cpu(fw_ddb_entry->iscsi_def_time2wait);
2065
2066 sess->time2retain = le16_to_cpu(fw_ddb_entry->iscsi_def_time2retain);
2067
2068 sess->tpgt = le32_to_cpu(fw_ddb_entry->tgt_portal_grp);
2069
2070 memcpy(sess->initiatorname, ha->name_string,
2071 min(sizeof(ha->name_string), sizeof(sess->initiatorname)));
13483730 2072
6c1b8789
VC
2073 iscsi_set_param(cls_conn, ISCSI_PARAM_TARGET_ALIAS,
2074 (char *)fw_ddb_entry->iscsi_alias, 0);
2075
13483730
MC
2076exit_session_conn_param:
2077 if (fw_ddb_entry)
2078 dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
2079 fw_ddb_entry, fw_ddb_entry_dma);
b3a271a9
MR
2080}
2081
afaf5a2d
DS
2082/*
2083 * Timer routines
2084 */
2085
2086static void qla4xxx_start_timer(struct scsi_qla_host *ha, void *func,
2087 unsigned long interval)
2088{
2089 DEBUG(printk("scsi: %s: Starting timer thread for adapter %d\n",
2090 __func__, ha->host->host_no));
2091 init_timer(&ha->timer);
2092 ha->timer.expires = jiffies + interval * HZ;
2093 ha->timer.data = (unsigned long)ha;
2094 ha->timer.function = (void (*)(unsigned long))func;
2095 add_timer(&ha->timer);
2096 ha->timer_active = 1;
2097}
2098
2099static void qla4xxx_stop_timer(struct scsi_qla_host *ha)
2100{
2101 del_timer_sync(&ha->timer);
2102 ha->timer_active = 0;
2103}
2104
2105/***
b3a271a9
MR
2106 * qla4xxx_mark_device_missing - blocks the session
2107 * @cls_session: Pointer to the session to be blocked
afaf5a2d
DS
2108 * @ddb_entry: Pointer to device database entry
2109 *
f4f5df23 2110 * This routine marks a device missing and close connection.
afaf5a2d 2111 **/
b3a271a9 2112void qla4xxx_mark_device_missing(struct iscsi_cls_session *cls_session)
afaf5a2d 2113{
b3a271a9 2114 iscsi_block_session(cls_session);
afaf5a2d
DS
2115}
2116
f4f5df23
VC
2117/**
2118 * qla4xxx_mark_all_devices_missing - mark all devices as missing.
2119 * @ha: Pointer to host adapter structure.
2120 *
2121 * This routine marks a device missing and resets the relogin retry count.
2122 **/
2123void qla4xxx_mark_all_devices_missing(struct scsi_qla_host *ha)
2124{
b3a271a9 2125 iscsi_host_for_each_session(ha->host, qla4xxx_mark_device_missing);
f4f5df23
VC
2126}
2127
afaf5a2d
DS
2128static struct srb* qla4xxx_get_new_srb(struct scsi_qla_host *ha,
2129 struct ddb_entry *ddb_entry,
8f0722ca 2130 struct scsi_cmnd *cmd)
afaf5a2d
DS
2131{
2132 struct srb *srb;
2133
2134 srb = mempool_alloc(ha->srb_mempool, GFP_ATOMIC);
2135 if (!srb)
2136 return srb;
2137
09a0f719 2138 kref_init(&srb->srb_ref);
afaf5a2d
DS
2139 srb->ha = ha;
2140 srb->ddb = ddb_entry;
2141 srb->cmd = cmd;
2142 srb->flags = 0;
5369887a 2143 CMD_SP(cmd) = (void *)srb;
afaf5a2d
DS
2144
2145 return srb;
2146}
2147
2148static void qla4xxx_srb_free_dma(struct scsi_qla_host *ha, struct srb *srb)
2149{
2150 struct scsi_cmnd *cmd = srb->cmd;
2151
2152 if (srb->flags & SRB_DMA_VALID) {
5f7186c8 2153 scsi_dma_unmap(cmd);
afaf5a2d
DS
2154 srb->flags &= ~SRB_DMA_VALID;
2155 }
5369887a 2156 CMD_SP(cmd) = NULL;
afaf5a2d
DS
2157}
2158
09a0f719 2159void qla4xxx_srb_compl(struct kref *ref)
afaf5a2d 2160{
09a0f719 2161 struct srb *srb = container_of(ref, struct srb, srb_ref);
afaf5a2d 2162 struct scsi_cmnd *cmd = srb->cmd;
09a0f719 2163 struct scsi_qla_host *ha = srb->ha;
afaf5a2d
DS
2164
2165 qla4xxx_srb_free_dma(ha, srb);
2166
2167 mempool_free(srb, ha->srb_mempool);
2168
2169 cmd->scsi_done(cmd);
2170}
2171
2172/**
2173 * qla4xxx_queuecommand - scsi layer issues scsi command to driver.
8f0722ca 2174 * @host: scsi host
afaf5a2d 2175 * @cmd: Pointer to Linux's SCSI command structure
afaf5a2d
DS
2176 *
2177 * Remarks:
2178 * This routine is invoked by Linux to send a SCSI command to the driver.
2179 * The mid-level driver tries to ensure that queuecommand never gets
2180 * invoked concurrently with itself or the interrupt handler (although
2181 * the interrupt handler may call this routine as part of request-
2182 * completion handling). Unfortunely, it sometimes calls the scheduler
2183 * in interrupt context which is a big NO! NO!.
2184 **/
8f0722ca 2185static int qla4xxx_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
afaf5a2d 2186{
8f0722ca 2187 struct scsi_qla_host *ha = to_qla_host(host);
afaf5a2d 2188 struct ddb_entry *ddb_entry = cmd->device->hostdata;
7fb1921b 2189 struct iscsi_cls_session *sess = ddb_entry->sess;
afaf5a2d
DS
2190 struct srb *srb;
2191 int rval;
2192
2232be0d
LC
2193 if (test_bit(AF_EEH_BUSY, &ha->flags)) {
2194 if (test_bit(AF_PCI_CHANNEL_IO_PERM_FAILURE, &ha->flags))
2195 cmd->result = DID_NO_CONNECT << 16;
2196 else
2197 cmd->result = DID_REQUEUE << 16;
2198 goto qc_fail_command;
2199 }
2200
7fb1921b
MC
2201 if (!sess) {
2202 cmd->result = DID_IMM_RETRY << 16;
2203 goto qc_fail_command;
2204 }
2205
2206 rval = iscsi_session_chkready(sess);
2207 if (rval) {
2208 cmd->result = rval;
2209 goto qc_fail_command;
2210 }
2211
f4f5df23
VC
2212 if (test_bit(DPC_RESET_HA_INTR, &ha->dpc_flags) ||
2213 test_bit(DPC_RESET_ACTIVE, &ha->dpc_flags) ||
2214 test_bit(DPC_RESET_HA, &ha->dpc_flags) ||
2215 test_bit(DPC_HA_UNRECOVERABLE, &ha->dpc_flags) ||
2216 test_bit(DPC_HA_NEED_QUIESCENT, &ha->dpc_flags) ||
2217 !test_bit(AF_ONLINE, &ha->flags) ||
b3a271a9 2218 !test_bit(AF_LINK_UP, &ha->flags) ||
f4f5df23 2219 test_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags))
477ffb9d
DS
2220 goto qc_host_busy;
2221
8f0722ca 2222 srb = qla4xxx_get_new_srb(ha, ddb_entry, cmd);
afaf5a2d 2223 if (!srb)
8f0722ca 2224 goto qc_host_busy;
afaf5a2d
DS
2225
2226 rval = qla4xxx_send_command_to_isp(ha, srb);
2227 if (rval != QLA_SUCCESS)
2228 goto qc_host_busy_free_sp;
2229
afaf5a2d
DS
2230 return 0;
2231
2232qc_host_busy_free_sp:
2233 qla4xxx_srb_free_dma(ha, srb);
2234 mempool_free(srb, ha->srb_mempool);
2235
afaf5a2d
DS
2236qc_host_busy:
2237 return SCSI_MLQUEUE_HOST_BUSY;
2238
2239qc_fail_command:
8f0722ca 2240 cmd->scsi_done(cmd);
afaf5a2d
DS
2241
2242 return 0;
2243}
2244
2245/**
2246 * qla4xxx_mem_free - frees memory allocated to adapter
2247 * @ha: Pointer to host adapter structure.
2248 *
2249 * Frees memory previously allocated by qla4xxx_mem_alloc
2250 **/
2251static void qla4xxx_mem_free(struct scsi_qla_host *ha)
2252{
2253 if (ha->queues)
2254 dma_free_coherent(&ha->pdev->dev, ha->queues_len, ha->queues,
2255 ha->queues_dma);
2256
2257 ha->queues_len = 0;
2258 ha->queues = NULL;
2259 ha->queues_dma = 0;
2260 ha->request_ring = NULL;
2261 ha->request_dma = 0;
2262 ha->response_ring = NULL;
2263 ha->response_dma = 0;
2264 ha->shadow_regs = NULL;
2265 ha->shadow_regs_dma = 0;
2266
2267 /* Free srb pool. */
2268 if (ha->srb_mempool)
2269 mempool_destroy(ha->srb_mempool);
2270
2271 ha->srb_mempool = NULL;
2272
b3a271a9
MR
2273 if (ha->chap_dma_pool)
2274 dma_pool_destroy(ha->chap_dma_pool);
2275
4549415a
LC
2276 if (ha->chap_list)
2277 vfree(ha->chap_list);
2278 ha->chap_list = NULL;
2279
13483730
MC
2280 if (ha->fw_ddb_dma_pool)
2281 dma_pool_destroy(ha->fw_ddb_dma_pool);
2282
afaf5a2d 2283 /* release io space registers */
f4f5df23
VC
2284 if (is_qla8022(ha)) {
2285 if (ha->nx_pcibase)
2286 iounmap(
2287 (struct device_reg_82xx __iomem *)ha->nx_pcibase);
f4f5df23 2288 } else if (ha->reg)
afaf5a2d
DS
2289 iounmap(ha->reg);
2290 pci_release_regions(ha->pdev);
2291}
2292
2293/**
2294 * qla4xxx_mem_alloc - allocates memory for use by adapter.
2295 * @ha: Pointer to host adapter structure
2296 *
2297 * Allocates DMA memory for request and response queues. Also allocates memory
2298 * for srbs.
2299 **/
2300static int qla4xxx_mem_alloc(struct scsi_qla_host *ha)
2301{
2302 unsigned long align;
2303
2304 /* Allocate contiguous block of DMA memory for queues. */
2305 ha->queues_len = ((REQUEST_QUEUE_DEPTH * QUEUE_SIZE) +
2306 (RESPONSE_QUEUE_DEPTH * QUEUE_SIZE) +
2307 sizeof(struct shadow_regs) +
2308 MEM_ALIGN_VALUE +
2309 (PAGE_SIZE - 1)) & ~(PAGE_SIZE - 1);
2310 ha->queues = dma_alloc_coherent(&ha->pdev->dev, ha->queues_len,
2311 &ha->queues_dma, GFP_KERNEL);
2312 if (ha->queues == NULL) {
c2660df3
VC
2313 ql4_printk(KERN_WARNING, ha,
2314 "Memory Allocation failed - queues.\n");
afaf5a2d
DS
2315
2316 goto mem_alloc_error_exit;
2317 }
2318 memset(ha->queues, 0, ha->queues_len);
2319
2320 /*
2321 * As per RISC alignment requirements -- the bus-address must be a
2322 * multiple of the request-ring size (in bytes).
2323 */
2324 align = 0;
2325 if ((unsigned long)ha->queues_dma & (MEM_ALIGN_VALUE - 1))
2326 align = MEM_ALIGN_VALUE - ((unsigned long)ha->queues_dma &
2327 (MEM_ALIGN_VALUE - 1));
2328
2329 /* Update request and response queue pointers. */
2330 ha->request_dma = ha->queues_dma + align;
2331 ha->request_ring = (struct queue_entry *) (ha->queues + align);
2332 ha->response_dma = ha->queues_dma + align +
2333 (REQUEST_QUEUE_DEPTH * QUEUE_SIZE);
2334 ha->response_ring = (struct queue_entry *) (ha->queues + align +
2335 (REQUEST_QUEUE_DEPTH *
2336 QUEUE_SIZE));
2337 ha->shadow_regs_dma = ha->queues_dma + align +
2338 (REQUEST_QUEUE_DEPTH * QUEUE_SIZE) +
2339 (RESPONSE_QUEUE_DEPTH * QUEUE_SIZE);
2340 ha->shadow_regs = (struct shadow_regs *) (ha->queues + align +
2341 (REQUEST_QUEUE_DEPTH *
2342 QUEUE_SIZE) +
2343 (RESPONSE_QUEUE_DEPTH *
2344 QUEUE_SIZE));
2345
2346 /* Allocate memory for srb pool. */
2347 ha->srb_mempool = mempool_create(SRB_MIN_REQ, mempool_alloc_slab,
2348 mempool_free_slab, srb_cachep);
2349 if (ha->srb_mempool == NULL) {
c2660df3
VC
2350 ql4_printk(KERN_WARNING, ha,
2351 "Memory Allocation failed - SRB Pool.\n");
afaf5a2d
DS
2352
2353 goto mem_alloc_error_exit;
2354 }
2355
b3a271a9
MR
2356 ha->chap_dma_pool = dma_pool_create("ql4_chap", &ha->pdev->dev,
2357 CHAP_DMA_BLOCK_SIZE, 8, 0);
2358
2359 if (ha->chap_dma_pool == NULL) {
2360 ql4_printk(KERN_WARNING, ha,
2361 "%s: chap_dma_pool allocation failed..\n", __func__);
2362 goto mem_alloc_error_exit;
2363 }
2364
13483730
MC
2365 ha->fw_ddb_dma_pool = dma_pool_create("ql4_fw_ddb", &ha->pdev->dev,
2366 DDB_DMA_BLOCK_SIZE, 8, 0);
2367
2368 if (ha->fw_ddb_dma_pool == NULL) {
2369 ql4_printk(KERN_WARNING, ha,
2370 "%s: fw_ddb_dma_pool allocation failed..\n",
2371 __func__);
2372 goto mem_alloc_error_exit;
2373 }
2374
afaf5a2d
DS
2375 return QLA_SUCCESS;
2376
2377mem_alloc_error_exit:
2378 qla4xxx_mem_free(ha);
2379 return QLA_ERROR;
2380}
2381
4f77083e
MH
2382/**
2383 * qla4_8xxx_check_temp - Check the ISP82XX temperature.
2384 * @ha: adapter block pointer.
2385 *
2386 * Note: The caller should not hold the idc lock.
2387 **/
2388static int qla4_8xxx_check_temp(struct scsi_qla_host *ha)
2389{
2390 uint32_t temp, temp_state, temp_val;
2391 int status = QLA_SUCCESS;
2392
2393 temp = qla4_8xxx_rd_32(ha, CRB_TEMP_STATE);
2394
2395 temp_state = qla82xx_get_temp_state(temp);
2396 temp_val = qla82xx_get_temp_val(temp);
2397
2398 if (temp_state == QLA82XX_TEMP_PANIC) {
2399 ql4_printk(KERN_WARNING, ha, "Device temperature %d degrees C"
2400 " exceeds maximum allowed. Hardware has been shut"
2401 " down.\n", temp_val);
2402 status = QLA_ERROR;
2403 } else if (temp_state == QLA82XX_TEMP_WARN) {
2404 if (ha->temperature == QLA82XX_TEMP_NORMAL)
2405 ql4_printk(KERN_WARNING, ha, "Device temperature %d"
2406 " degrees C exceeds operating range."
2407 " Immediate action needed.\n", temp_val);
2408 } else {
2409 if (ha->temperature == QLA82XX_TEMP_WARN)
2410 ql4_printk(KERN_INFO, ha, "Device temperature is"
2411 " now %d degrees C in normal range.\n",
2412 temp_val);
2413 }
2414 ha->temperature = temp_state;
2415 return status;
2416}
2417
f4f5df23
VC
2418/**
2419 * qla4_8xxx_check_fw_alive - Check firmware health
2420 * @ha: Pointer to host adapter structure.
2421 *
2422 * Context: Interrupt
2423 **/
9ee91a38 2424static int qla4_8xxx_check_fw_alive(struct scsi_qla_host *ha)
f4f5df23 2425{
9ee91a38
SS
2426 uint32_t fw_heartbeat_counter;
2427 int status = QLA_SUCCESS;
f4f5df23
VC
2428
2429 fw_heartbeat_counter = qla4_8xxx_rd_32(ha, QLA82XX_PEG_ALIVE_COUNTER);
2232be0d
LC
2430 /* If PEG_ALIVE_COUNTER is 0xffffffff, AER/EEH is in progress, ignore */
2431 if (fw_heartbeat_counter == 0xffffffff) {
2432 DEBUG2(printk(KERN_WARNING "scsi%ld: %s: Device in frozen "
2433 "state, QLA82XX_PEG_ALIVE_COUNTER is 0xffffffff\n",
2434 ha->host_no, __func__));
9ee91a38 2435 return status;
2232be0d 2436 }
f4f5df23
VC
2437
2438 if (ha->fw_heartbeat_counter == fw_heartbeat_counter) {
2439 ha->seconds_since_last_heartbeat++;
2440 /* FW not alive after 2 seconds */
2441 if (ha->seconds_since_last_heartbeat == 2) {
2442 ha->seconds_since_last_heartbeat = 0;
68d92ebf
VC
2443
2444 ql4_printk(KERN_INFO, ha,
2445 "scsi(%ld): %s, Dumping hw/fw registers:\n "
2446 " PEG_HALT_STATUS1: 0x%x, PEG_HALT_STATUS2:"
2447 " 0x%x,\n PEG_NET_0_PC: 0x%x, PEG_NET_1_PC:"
2448 " 0x%x,\n PEG_NET_2_PC: 0x%x, PEG_NET_3_PC:"
2449 " 0x%x,\n PEG_NET_4_PC: 0x%x\n",
9ee91a38
SS
2450 ha->host_no, __func__,
2451 qla4_8xxx_rd_32(ha,
2452 QLA82XX_PEG_HALT_STATUS1),
68d92ebf
VC
2453 qla4_8xxx_rd_32(ha,
2454 QLA82XX_PEG_HALT_STATUS2),
2455 qla4_8xxx_rd_32(ha, QLA82XX_CRB_PEG_NET_0 +
2456 0x3c),
2457 qla4_8xxx_rd_32(ha, QLA82XX_CRB_PEG_NET_1 +
2458 0x3c),
2459 qla4_8xxx_rd_32(ha, QLA82XX_CRB_PEG_NET_2 +
2460 0x3c),
2461 qla4_8xxx_rd_32(ha, QLA82XX_CRB_PEG_NET_3 +
2462 0x3c),
2463 qla4_8xxx_rd_32(ha, QLA82XX_CRB_PEG_NET_4 +
2464 0x3c));
9ee91a38 2465 status = QLA_ERROR;
f4f5df23 2466 }
99457d75
LC
2467 } else
2468 ha->seconds_since_last_heartbeat = 0;
2469
f4f5df23 2470 ha->fw_heartbeat_counter = fw_heartbeat_counter;
9ee91a38 2471 return status;
f4f5df23
VC
2472}
2473
2474/**
2475 * qla4_8xxx_watchdog - Poll dev state
2476 * @ha: Pointer to host adapter structure.
2477 *
2478 * Context: Interrupt
2479 **/
2480void qla4_8xxx_watchdog(struct scsi_qla_host *ha)
2481{
9ee91a38 2482 uint32_t dev_state, halt_status;
f4f5df23
VC
2483
2484 /* don't poll if reset is going on */
d56a1f7b
LC
2485 if (!(test_bit(DPC_RESET_ACTIVE, &ha->dpc_flags) ||
2486 test_bit(DPC_RESET_HA, &ha->dpc_flags) ||
977f46a4 2487 test_bit(DPC_RETRY_RESET_HA, &ha->dpc_flags))) {
9ee91a38 2488 dev_state = qla4_8xxx_rd_32(ha, QLA82XX_CRB_DEV_STATE);
4f77083e
MH
2489
2490 if (qla4_8xxx_check_temp(ha)) {
e6bd0ebd
GM
2491 ql4_printk(KERN_INFO, ha, "disabling pause"
2492 " transmit on port 0 & 1.\n");
2493 qla4_8xxx_wr_32(ha, QLA82XX_CRB_NIU + 0x98,
2494 CRB_NIU_XG_PAUSE_CTL_P0 |
2495 CRB_NIU_XG_PAUSE_CTL_P1);
4f77083e
MH
2496 set_bit(DPC_HA_UNRECOVERABLE, &ha->dpc_flags);
2497 qla4xxx_wake_dpc(ha);
2498 } else if (dev_state == QLA82XX_DEV_NEED_RESET &&
f4f5df23 2499 !test_bit(DPC_RESET_HA, &ha->dpc_flags)) {
3930b8c1
VC
2500 if (!ql4xdontresethba) {
2501 ql4_printk(KERN_INFO, ha, "%s: HW State: "
2502 "NEED RESET!\n", __func__);
2503 set_bit(DPC_RESET_HA, &ha->dpc_flags);
2504 qla4xxx_wake_dpc(ha);
3930b8c1 2505 }
f4f5df23
VC
2506 } else if (dev_state == QLA82XX_DEV_NEED_QUIESCENT &&
2507 !test_bit(DPC_HA_NEED_QUIESCENT, &ha->dpc_flags)) {
3930b8c1
VC
2508 ql4_printk(KERN_INFO, ha, "%s: HW State: NEED QUIES!\n",
2509 __func__);
f4f5df23
VC
2510 set_bit(DPC_HA_NEED_QUIESCENT, &ha->dpc_flags);
2511 qla4xxx_wake_dpc(ha);
2512 } else {
2513 /* Check firmware health */
9ee91a38 2514 if (qla4_8xxx_check_fw_alive(ha)) {
e6bd0ebd
GM
2515 ql4_printk(KERN_INFO, ha, "disabling pause"
2516 " transmit on port 0 & 1.\n");
2517 qla4_8xxx_wr_32(ha, QLA82XX_CRB_NIU + 0x98,
2518 CRB_NIU_XG_PAUSE_CTL_P0 |
2519 CRB_NIU_XG_PAUSE_CTL_P1);
9ee91a38
SS
2520 halt_status = qla4_8xxx_rd_32(ha,
2521 QLA82XX_PEG_HALT_STATUS1);
2522
46801ba6 2523 if (QLA82XX_FWERROR_CODE(halt_status) == 0x67)
527c8b2e
NJ
2524 ql4_printk(KERN_ERR, ha, "%s:"
2525 " Firmware aborted with"
2526 " error code 0x00006700."
2527 " Device is being reset\n",
2528 __func__);
2529
9ee91a38
SS
2530 /* Since we cannot change dev_state in interrupt
2531 * context, set appropriate DPC flag then wakeup
2532 * DPC */
2533 if (halt_status & HALT_STATUS_UNRECOVERABLE)
2534 set_bit(DPC_HA_UNRECOVERABLE,
2535 &ha->dpc_flags);
2536 else {
2537 ql4_printk(KERN_INFO, ha, "%s: detect "
2538 "abort needed!\n", __func__);
2539 set_bit(DPC_RESET_HA, &ha->dpc_flags);
2540 }
2541 qla4xxx_mailbox_premature_completion(ha);
2542 qla4xxx_wake_dpc(ha);
2543 }
f4f5df23
VC
2544 }
2545 }
2546}
2547
4a4bc2e9 2548static void qla4xxx_check_relogin_flash_ddb(struct iscsi_cls_session *cls_sess)
13483730
MC
2549{
2550 struct iscsi_session *sess;
2551 struct ddb_entry *ddb_entry;
2552 struct scsi_qla_host *ha;
2553
2554 sess = cls_sess->dd_data;
2555 ddb_entry = sess->dd_data;
2556 ha = ddb_entry->ha;
2557
2558 if (!(ddb_entry->ddb_type == FLASH_DDB))
2559 return;
2560
2561 if (adapter_up(ha) && !test_bit(DF_RELOGIN, &ddb_entry->flags) &&
2562 !iscsi_is_session_online(cls_sess)) {
2563 if (atomic_read(&ddb_entry->retry_relogin_timer) !=
2564 INVALID_ENTRY) {
2565 if (atomic_read(&ddb_entry->retry_relogin_timer) ==
2566 0) {
2567 atomic_set(&ddb_entry->retry_relogin_timer,
2568 INVALID_ENTRY);
2569 set_bit(DPC_RELOGIN_DEVICE, &ha->dpc_flags);
2570 set_bit(DF_RELOGIN, &ddb_entry->flags);
2571 DEBUG2(ql4_printk(KERN_INFO, ha,
2572 "%s: index [%d] login device\n",
2573 __func__, ddb_entry->fw_ddb_index));
2574 } else
2575 atomic_dec(&ddb_entry->retry_relogin_timer);
2576 }
2577 }
2578
2579 /* Wait for relogin to timeout */
2580 if (atomic_read(&ddb_entry->relogin_timer) &&
2581 (atomic_dec_and_test(&ddb_entry->relogin_timer) != 0)) {
2582 /*
2583 * If the relogin times out and the device is
2584 * still NOT ONLINE then try and relogin again.
2585 */
2586 if (!iscsi_is_session_online(cls_sess)) {
2587 /* Reset retry relogin timer */
2588 atomic_inc(&ddb_entry->relogin_retry_count);
2589 DEBUG2(ql4_printk(KERN_INFO, ha,
2590 "%s: index[%d] relogin timed out-retrying"
2591 " relogin (%d), retry (%d)\n", __func__,
2592 ddb_entry->fw_ddb_index,
2593 atomic_read(&ddb_entry->relogin_retry_count),
2594 ddb_entry->default_time2wait + 4));
2595 set_bit(DPC_RELOGIN_DEVICE, &ha->dpc_flags);
2596 atomic_set(&ddb_entry->retry_relogin_timer,
2597 ddb_entry->default_time2wait + 4);
2598 }
2599 }
2600}
2601
afaf5a2d
DS
2602/**
2603 * qla4xxx_timer - checks every second for work to do.
2604 * @ha: Pointer to host adapter structure.
2605 **/
2606static void qla4xxx_timer(struct scsi_qla_host *ha)
2607{
afaf5a2d 2608 int start_dpc = 0;
2232be0d
LC
2609 uint16_t w;
2610
13483730
MC
2611 iscsi_host_for_each_session(ha->host, qla4xxx_check_relogin_flash_ddb);
2612
2232be0d
LC
2613 /* If we are in the middle of AER/EEH processing
2614 * skip any processing and reschedule the timer
2615 */
2616 if (test_bit(AF_EEH_BUSY, &ha->flags)) {
2617 mod_timer(&ha->timer, jiffies + HZ);
2618 return;
2619 }
2620
2621 /* Hardware read to trigger an EEH error during mailbox waits. */
2622 if (!pci_channel_offline(ha->pdev))
2623 pci_read_config_word(ha->pdev, PCI_VENDOR_ID, &w);
afaf5a2d 2624
f4f5df23
VC
2625 if (is_qla8022(ha)) {
2626 qla4_8xxx_watchdog(ha);
2627 }
2628
f4f5df23
VC
2629 if (!is_qla8022(ha)) {
2630 /* Check for heartbeat interval. */
2631 if (ha->firmware_options & FWOPT_HEARTBEAT_ENABLE &&
2632 ha->heartbeat_interval != 0) {
2633 ha->seconds_since_last_heartbeat++;
2634 if (ha->seconds_since_last_heartbeat >
2635 ha->heartbeat_interval + 2)
2636 set_bit(DPC_RESET_HA, &ha->dpc_flags);
2637 }
afaf5a2d
DS
2638 }
2639
ff884430
VC
2640 /* Process any deferred work. */
2641 if (!list_empty(&ha->work_list))
2642 start_dpc++;
2643
afaf5a2d 2644 /* Wakeup the dpc routine for this adapter, if needed. */
1b46807e 2645 if (start_dpc ||
afaf5a2d
DS
2646 test_bit(DPC_RESET_HA, &ha->dpc_flags) ||
2647 test_bit(DPC_RETRY_RESET_HA, &ha->dpc_flags) ||
2648 test_bit(DPC_RELOGIN_DEVICE, &ha->dpc_flags) ||
f4f5df23 2649 test_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags) ||
afaf5a2d
DS
2650 test_bit(DPC_RESET_HA_INTR, &ha->dpc_flags) ||
2651 test_bit(DPC_GET_DHCP_IP_ADDR, &ha->dpc_flags) ||
065aa1b4 2652 test_bit(DPC_LINK_CHANGED, &ha->dpc_flags) ||
f4f5df23
VC
2653 test_bit(DPC_HA_UNRECOVERABLE, &ha->dpc_flags) ||
2654 test_bit(DPC_HA_NEED_QUIESCENT, &ha->dpc_flags) ||
1b46807e 2655 test_bit(DPC_AEN, &ha->dpc_flags)) {
afaf5a2d
DS
2656 DEBUG2(printk("scsi%ld: %s: scheduling dpc routine"
2657 " - dpc flags = 0x%lx\n",
2658 ha->host_no, __func__, ha->dpc_flags));
f4f5df23 2659 qla4xxx_wake_dpc(ha);
afaf5a2d
DS
2660 }
2661
2662 /* Reschedule timer thread to call us back in one second */
2663 mod_timer(&ha->timer, jiffies + HZ);
2664
2665 DEBUG2(ha->seconds_since_last_intr++);
2666}
2667
2668/**
2669 * qla4xxx_cmd_wait - waits for all outstanding commands to complete
2670 * @ha: Pointer to host adapter structure.
2671 *
2672 * This routine stalls the driver until all outstanding commands are returned.
2673 * Caller must release the Hardware Lock prior to calling this routine.
2674 **/
2675static int qla4xxx_cmd_wait(struct scsi_qla_host *ha)
2676{
2677 uint32_t index = 0;
afaf5a2d
DS
2678 unsigned long flags;
2679 struct scsi_cmnd *cmd;
afaf5a2d 2680
f4f5df23
VC
2681 unsigned long wtime = jiffies + (WAIT_CMD_TOV * HZ);
2682
2683 DEBUG2(ql4_printk(KERN_INFO, ha, "Wait up to %d seconds for cmds to "
2684 "complete\n", WAIT_CMD_TOV));
2685
2686 while (!time_after_eq(jiffies, wtime)) {
afaf5a2d
DS
2687 spin_lock_irqsave(&ha->hardware_lock, flags);
2688 /* Find a command that hasn't completed. */
2689 for (index = 0; index < ha->host->can_queue; index++) {
2690 cmd = scsi_host_find_tag(ha->host, index);
a1e0063d
MC
2691 /*
2692 * We cannot just check if the index is valid,
2693 * becase if we are run from the scsi eh, then
2694 * the scsi/block layer is going to prevent
2695 * the tag from being released.
2696 */
2697 if (cmd != NULL && CMD_SP(cmd))
afaf5a2d
DS
2698 break;
2699 }
2700 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2701
2702 /* If No Commands are pending, wait is complete */
f4f5df23
VC
2703 if (index == ha->host->can_queue)
2704 return QLA_SUCCESS;
afaf5a2d 2705
f4f5df23
VC
2706 msleep(1000);
2707 }
2708 /* If we timed out on waiting for commands to come back
2709 * return ERROR. */
2710 return QLA_ERROR;
afaf5a2d
DS
2711}
2712
f4f5df23 2713int qla4xxx_hw_reset(struct scsi_qla_host *ha)
afaf5a2d 2714{
afaf5a2d 2715 uint32_t ctrl_status;
477ffb9d
DS
2716 unsigned long flags = 0;
2717
2718 DEBUG2(printk(KERN_ERR "scsi%ld: %s\n", ha->host_no, __func__));
afaf5a2d 2719
f4f5df23
VC
2720 if (ql4xxx_lock_drvr_wait(ha) != QLA_SUCCESS)
2721 return QLA_ERROR;
2722
afaf5a2d
DS
2723 spin_lock_irqsave(&ha->hardware_lock, flags);
2724
2725 /*
2726 * If the SCSI Reset Interrupt bit is set, clear it.
2727 * Otherwise, the Soft Reset won't work.
2728 */
2729 ctrl_status = readw(&ha->reg->ctrl_status);
2730 if ((ctrl_status & CSR_SCSI_RESET_INTR) != 0)
2731 writel(set_rmask(CSR_SCSI_RESET_INTR), &ha->reg->ctrl_status);
2732
2733 /* Issue Soft Reset */
2734 writel(set_rmask(CSR_SOFT_RESET), &ha->reg->ctrl_status);
2735 readl(&ha->reg->ctrl_status);
2736
2737 spin_unlock_irqrestore(&ha->hardware_lock, flags);
f4f5df23 2738 return QLA_SUCCESS;
477ffb9d
DS
2739}
2740
2741/**
2742 * qla4xxx_soft_reset - performs soft reset.
2743 * @ha: Pointer to host adapter structure.
2744 **/
2745int qla4xxx_soft_reset(struct scsi_qla_host *ha)
2746{
2747 uint32_t max_wait_time;
2748 unsigned long flags = 0;
f931c534 2749 int status;
477ffb9d
DS
2750 uint32_t ctrl_status;
2751
f931c534
VC
2752 status = qla4xxx_hw_reset(ha);
2753 if (status != QLA_SUCCESS)
2754 return status;
afaf5a2d 2755
f931c534 2756 status = QLA_ERROR;
afaf5a2d
DS
2757 /* Wait until the Network Reset Intr bit is cleared */
2758 max_wait_time = RESET_INTR_TOV;
2759 do {
2760 spin_lock_irqsave(&ha->hardware_lock, flags);
2761 ctrl_status = readw(&ha->reg->ctrl_status);
2762 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2763
2764 if ((ctrl_status & CSR_NET_RESET_INTR) == 0)
2765 break;
2766
2767 msleep(1000);
2768 } while ((--max_wait_time));
2769
2770 if ((ctrl_status & CSR_NET_RESET_INTR) != 0) {
2771 DEBUG2(printk(KERN_WARNING
2772 "scsi%ld: Network Reset Intr not cleared by "
2773 "Network function, clearing it now!\n",
2774 ha->host_no));
2775 spin_lock_irqsave(&ha->hardware_lock, flags);
2776 writel(set_rmask(CSR_NET_RESET_INTR), &ha->reg->ctrl_status);
2777 readl(&ha->reg->ctrl_status);
2778 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2779 }
2780
2781 /* Wait until the firmware tells us the Soft Reset is done */
2782 max_wait_time = SOFT_RESET_TOV;
2783 do {
2784 spin_lock_irqsave(&ha->hardware_lock, flags);
2785 ctrl_status = readw(&ha->reg->ctrl_status);
2786 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2787
2788 if ((ctrl_status & CSR_SOFT_RESET) == 0) {
2789 status = QLA_SUCCESS;
2790 break;
2791 }
2792
2793 msleep(1000);
2794 } while ((--max_wait_time));
2795
2796 /*
2797 * Also, make sure that the SCSI Reset Interrupt bit has been cleared
2798 * after the soft reset has taken place.
2799 */
2800 spin_lock_irqsave(&ha->hardware_lock, flags);
2801 ctrl_status = readw(&ha->reg->ctrl_status);
2802 if ((ctrl_status & CSR_SCSI_RESET_INTR) != 0) {
2803 writel(set_rmask(CSR_SCSI_RESET_INTR), &ha->reg->ctrl_status);
2804 readl(&ha->reg->ctrl_status);
2805 }
2806 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2807
2808 /* If soft reset fails then most probably the bios on other
2809 * function is also enabled.
2810 * Since the initialization is sequential the other fn
2811 * wont be able to acknowledge the soft reset.
2812 * Issue a force soft reset to workaround this scenario.
2813 */
2814 if (max_wait_time == 0) {
2815 /* Issue Force Soft Reset */
2816 spin_lock_irqsave(&ha->hardware_lock, flags);
2817 writel(set_rmask(CSR_FORCE_SOFT_RESET), &ha->reg->ctrl_status);
2818 readl(&ha->reg->ctrl_status);
2819 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2820 /* Wait until the firmware tells us the Soft Reset is done */
2821 max_wait_time = SOFT_RESET_TOV;
2822 do {
2823 spin_lock_irqsave(&ha->hardware_lock, flags);
2824 ctrl_status = readw(&ha->reg->ctrl_status);
2825 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2826
2827 if ((ctrl_status & CSR_FORCE_SOFT_RESET) == 0) {
2828 status = QLA_SUCCESS;
2829 break;
2830 }
2831
2832 msleep(1000);
2833 } while ((--max_wait_time));
2834 }
2835
2836 return status;
2837}
2838
afaf5a2d 2839/**
f4f5df23 2840 * qla4xxx_abort_active_cmds - returns all outstanding i/o requests to O.S.
afaf5a2d 2841 * @ha: Pointer to host adapter structure.
f4f5df23 2842 * @res: returned scsi status
afaf5a2d
DS
2843 *
2844 * This routine is called just prior to a HARD RESET to return all
2845 * outstanding commands back to the Operating System.
2846 * Caller should make sure that the following locks are released
2847 * before this calling routine: Hardware lock, and io_request_lock.
2848 **/
f4f5df23 2849static void qla4xxx_abort_active_cmds(struct scsi_qla_host *ha, int res)
afaf5a2d
DS
2850{
2851 struct srb *srb;
2852 int i;
2853 unsigned long flags;
2854
2855 spin_lock_irqsave(&ha->hardware_lock, flags);
2856 for (i = 0; i < ha->host->can_queue; i++) {
2857 srb = qla4xxx_del_from_active_array(ha, i);
2858 if (srb != NULL) {
f4f5df23 2859 srb->cmd->result = res;
09a0f719 2860 kref_put(&srb->srb_ref, qla4xxx_srb_compl);
afaf5a2d
DS
2861 }
2862 }
2863 spin_unlock_irqrestore(&ha->hardware_lock, flags);
afaf5a2d
DS
2864}
2865
f4f5df23
VC
2866void qla4xxx_dead_adapter_cleanup(struct scsi_qla_host *ha)
2867{
2868 clear_bit(AF_ONLINE, &ha->flags);
2869
2870 /* Disable the board */
2871 ql4_printk(KERN_INFO, ha, "Disabling the board\n");
f4f5df23
VC
2872
2873 qla4xxx_abort_active_cmds(ha, DID_NO_CONNECT << 16);
2874 qla4xxx_mark_all_devices_missing(ha);
2875 clear_bit(AF_INIT_DONE, &ha->flags);
2876}
2877
b3a271a9
MR
2878static void qla4xxx_fail_session(struct iscsi_cls_session *cls_session)
2879{
2880 struct iscsi_session *sess;
2881 struct ddb_entry *ddb_entry;
2882
2883 sess = cls_session->dd_data;
2884 ddb_entry = sess->dd_data;
2885 ddb_entry->fw_ddb_device_state = DDB_DS_SESSION_FAILED;
13483730
MC
2886
2887 if (ddb_entry->ddb_type == FLASH_DDB)
2888 iscsi_block_session(ddb_entry->sess);
2889 else
2890 iscsi_session_failure(cls_session->dd_data,
2891 ISCSI_ERR_CONN_FAILED);
b3a271a9
MR
2892}
2893
afaf5a2d
DS
2894/**
2895 * qla4xxx_recover_adapter - recovers adapter after a fatal error
2896 * @ha: Pointer to host adapter structure.
afaf5a2d 2897 **/
f4f5df23 2898static int qla4xxx_recover_adapter(struct scsi_qla_host *ha)
afaf5a2d 2899{
f4f5df23
VC
2900 int status = QLA_ERROR;
2901 uint8_t reset_chip = 0;
8e0f3a66 2902 uint32_t dev_state;
9ee91a38 2903 unsigned long wait;
afaf5a2d
DS
2904
2905 /* Stall incoming I/O until we are done */
f4f5df23 2906 scsi_block_requests(ha->host);
afaf5a2d 2907 clear_bit(AF_ONLINE, &ha->flags);
b3a271a9 2908 clear_bit(AF_LINK_UP, &ha->flags);
50a29aec 2909
f4f5df23 2910 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: adapter OFFLINE\n", __func__));
afaf5a2d 2911
f4f5df23 2912 set_bit(DPC_RESET_ACTIVE, &ha->dpc_flags);
afaf5a2d 2913
b3a271a9
MR
2914 iscsi_host_for_each_session(ha->host, qla4xxx_fail_session);
2915
f4f5df23
VC
2916 if (test_bit(DPC_RESET_HA, &ha->dpc_flags))
2917 reset_chip = 1;
afaf5a2d 2918
f4f5df23
VC
2919 /* For the DPC_RESET_HA_INTR case (ISP-4xxx specific)
2920 * do not reset adapter, jump to initialize_adapter */
2921 if (test_bit(DPC_RESET_HA_INTR, &ha->dpc_flags)) {
2922 status = QLA_SUCCESS;
2923 goto recover_ha_init_adapter;
2924 }
afaf5a2d 2925
f4f5df23
VC
2926 /* For the ISP-82xx adapter, issue a stop_firmware if invoked
2927 * from eh_host_reset or ioctl module */
2928 if (is_qla8022(ha) && !reset_chip &&
2929 test_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags)) {
2930
2931 DEBUG2(ql4_printk(KERN_INFO, ha,
2932 "scsi%ld: %s - Performing stop_firmware...\n",
2933 ha->host_no, __func__));
2934 status = ha->isp_ops->reset_firmware(ha);
2935 if (status == QLA_SUCCESS) {
2bd1e2be
NJ
2936 if (!test_bit(AF_FW_RECOVERY, &ha->flags))
2937 qla4xxx_cmd_wait(ha);
f4f5df23
VC
2938 ha->isp_ops->disable_intrs(ha);
2939 qla4xxx_process_aen(ha, FLUSH_DDB_CHANGED_AENS);
2940 qla4xxx_abort_active_cmds(ha, DID_RESET << 16);
2941 } else {
2942 /* If the stop_firmware fails then
2943 * reset the entire chip */
2944 reset_chip = 1;
2945 clear_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags);
2946 set_bit(DPC_RESET_HA, &ha->dpc_flags);
2947 }
2948 }
dca05c4c 2949
f4f5df23
VC
2950 /* Issue full chip reset if recovering from a catastrophic error,
2951 * or if stop_firmware fails for ISP-82xx.
2952 * This is the default case for ISP-4xxx */
2953 if (!is_qla8022(ha) || reset_chip) {
9ee91a38
SS
2954 if (!is_qla8022(ha))
2955 goto chip_reset;
2956
2957 /* Check if 82XX firmware is alive or not
2958 * We may have arrived here from NEED_RESET
2959 * detection only */
2960 if (test_bit(AF_FW_RECOVERY, &ha->flags))
2961 goto chip_reset;
2962
2963 wait = jiffies + (FW_ALIVE_WAIT_TOV * HZ);
2964 while (time_before(jiffies, wait)) {
2965 if (qla4_8xxx_check_fw_alive(ha)) {
2966 qla4xxx_mailbox_premature_completion(ha);
2967 break;
2968 }
2969
2970 set_current_state(TASK_UNINTERRUPTIBLE);
2971 schedule_timeout(HZ);
2972 }
2973
2bd1e2be
NJ
2974 if (!test_bit(AF_FW_RECOVERY, &ha->flags))
2975 qla4xxx_cmd_wait(ha);
9ee91a38 2976chip_reset:
f4f5df23
VC
2977 qla4xxx_process_aen(ha, FLUSH_DDB_CHANGED_AENS);
2978 qla4xxx_abort_active_cmds(ha, DID_RESET << 16);
2979 DEBUG2(ql4_printk(KERN_INFO, ha,
2980 "scsi%ld: %s - Performing chip reset..\n",
2981 ha->host_no, __func__));
2982 status = ha->isp_ops->reset_chip(ha);
2983 }
afaf5a2d
DS
2984
2985 /* Flush any pending ddb changed AENs */
2986 qla4xxx_process_aen(ha, FLUSH_DDB_CHANGED_AENS);
2987
f4f5df23
VC
2988recover_ha_init_adapter:
2989 /* Upon successful firmware/chip reset, re-initialize the adapter */
afaf5a2d 2990 if (status == QLA_SUCCESS) {
f4f5df23
VC
2991 /* For ISP-4xxx, force function 1 to always initialize
2992 * before function 3 to prevent both funcions from
2993 * stepping on top of the other */
2994 if (!is_qla8022(ha) && (ha->mac_index == 3))
2995 ssleep(6);
2996
2997 /* NOTE: AF_ONLINE flag set upon successful completion of
2998 * qla4xxx_initialize_adapter */
13483730 2999 status = qla4xxx_initialize_adapter(ha, RESET_ADAPTER);
afaf5a2d
DS
3000 }
3001
f4f5df23
VC
3002 /* Retry failed adapter initialization, if necessary
3003 * Do not retry initialize_adapter for RESET_HA_INTR (ISP-4xxx specific)
3004 * case to prevent ping-pong resets between functions */
3005 if (!test_bit(AF_ONLINE, &ha->flags) &&
3006 !test_bit(DPC_RESET_HA_INTR, &ha->dpc_flags)) {
afaf5a2d 3007 /* Adapter initialization failed, see if we can retry
f4f5df23
VC
3008 * resetting the ha.
3009 * Since we don't want to block the DPC for too long
3010 * with multiple resets in the same thread,
3011 * utilize DPC to retry */
8e0f3a66
SR
3012 if (is_qla8022(ha)) {
3013 qla4_8xxx_idc_lock(ha);
3014 dev_state = qla4_8xxx_rd_32(ha, QLA82XX_CRB_DEV_STATE);
3015 qla4_8xxx_idc_unlock(ha);
3016 if (dev_state == QLA82XX_DEV_FAILED) {
3017 ql4_printk(KERN_INFO, ha, "%s: don't retry "
3018 "recover adapter. H/W is in Failed "
3019 "state\n", __func__);
3020 qla4xxx_dead_adapter_cleanup(ha);
3021 clear_bit(DPC_RETRY_RESET_HA, &ha->dpc_flags);
3022 clear_bit(DPC_RESET_HA, &ha->dpc_flags);
3023 clear_bit(DPC_RESET_HA_FW_CONTEXT,
3024 &ha->dpc_flags);
3025 status = QLA_ERROR;
3026
3027 goto exit_recover;
3028 }
3029 }
3030
afaf5a2d
DS
3031 if (!test_bit(DPC_RETRY_RESET_HA, &ha->dpc_flags)) {
3032 ha->retry_reset_ha_cnt = MAX_RESET_HA_RETRIES;
3033 DEBUG2(printk("scsi%ld: recover adapter - retrying "
3034 "(%d) more times\n", ha->host_no,
3035 ha->retry_reset_ha_cnt));
3036 set_bit(DPC_RETRY_RESET_HA, &ha->dpc_flags);
3037 status = QLA_ERROR;
3038 } else {
3039 if (ha->retry_reset_ha_cnt > 0) {
3040 /* Schedule another Reset HA--DPC will retry */
3041 ha->retry_reset_ha_cnt--;
3042 DEBUG2(printk("scsi%ld: recover adapter - "
3043 "retry remaining %d\n",
3044 ha->host_no,
3045 ha->retry_reset_ha_cnt));
3046 status = QLA_ERROR;
3047 }
3048
3049 if (ha->retry_reset_ha_cnt == 0) {
3050 /* Recover adapter retries have been exhausted.
3051 * Adapter DEAD */
3052 DEBUG2(printk("scsi%ld: recover adapter "
3053 "failed - board disabled\n",
3054 ha->host_no));
f4f5df23 3055 qla4xxx_dead_adapter_cleanup(ha);
afaf5a2d
DS
3056 clear_bit(DPC_RETRY_RESET_HA, &ha->dpc_flags);
3057 clear_bit(DPC_RESET_HA, &ha->dpc_flags);
f4f5df23 3058 clear_bit(DPC_RESET_HA_FW_CONTEXT,
afaf5a2d
DS
3059 &ha->dpc_flags);
3060 status = QLA_ERROR;
3061 }
3062 }
3063 } else {
3064 clear_bit(DPC_RESET_HA, &ha->dpc_flags);
f4f5df23 3065 clear_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags);
afaf5a2d
DS
3066 clear_bit(DPC_RETRY_RESET_HA, &ha->dpc_flags);
3067 }
3068
8e0f3a66 3069exit_recover:
afaf5a2d
DS
3070 ha->adapter_error_count++;
3071
f4f5df23
VC
3072 if (test_bit(AF_ONLINE, &ha->flags))
3073 ha->isp_ops->enable_intrs(ha);
3074
3075 scsi_unblock_requests(ha->host);
3076
3077 clear_bit(DPC_RESET_ACTIVE, &ha->dpc_flags);
3078 DEBUG2(printk("scsi%ld: recover adapter: %s\n", ha->host_no,
25985edc 3079 status == QLA_ERROR ? "FAILED" : "SUCCEEDED"));
afaf5a2d 3080
afaf5a2d
DS
3081 return status;
3082}
3083
b3a271a9 3084static void qla4xxx_relogin_devices(struct iscsi_cls_session *cls_session)
2d7924e6 3085{
b3a271a9
MR
3086 struct iscsi_session *sess;
3087 struct ddb_entry *ddb_entry;
3088 struct scsi_qla_host *ha;
2d7924e6 3089
b3a271a9
MR
3090 sess = cls_session->dd_data;
3091 ddb_entry = sess->dd_data;
3092 ha = ddb_entry->ha;
3093 if (!iscsi_is_session_online(cls_session)) {
3094 if (ddb_entry->fw_ddb_device_state == DDB_DS_SESSION_ACTIVE) {
3095 ql4_printk(KERN_INFO, ha, "scsi%ld: %s: ddb[%d]"
3096 " unblock session\n", ha->host_no, __func__,
3097 ddb_entry->fw_ddb_index);
3098 iscsi_unblock_session(ddb_entry->sess);
3099 } else {
3100 /* Trigger relogin */
13483730
MC
3101 if (ddb_entry->ddb_type == FLASH_DDB) {
3102 if (!test_bit(DF_RELOGIN, &ddb_entry->flags))
3103 qla4xxx_arm_relogin_timer(ddb_entry);
3104 } else
3105 iscsi_session_failure(cls_session->dd_data,
3106 ISCSI_ERR_CONN_FAILED);
2d7924e6
VC
3107 }
3108 }
3109}
3110
13483730
MC
3111int qla4xxx_unblock_flash_ddb(struct iscsi_cls_session *cls_session)
3112{
3113 struct iscsi_session *sess;
3114 struct ddb_entry *ddb_entry;
3115 struct scsi_qla_host *ha;
3116
3117 sess = cls_session->dd_data;
3118 ddb_entry = sess->dd_data;
3119 ha = ddb_entry->ha;
3120 ql4_printk(KERN_INFO, ha, "scsi%ld: %s: ddb[%d]"
3121 " unblock session\n", ha->host_no, __func__,
3122 ddb_entry->fw_ddb_index);
3123
3124 iscsi_unblock_session(ddb_entry->sess);
3125
3126 /* Start scan target */
3127 if (test_bit(AF_ONLINE, &ha->flags)) {
3128 ql4_printk(KERN_INFO, ha, "scsi%ld: %s: ddb[%d]"
3129 " start scan\n", ha->host_no, __func__,
3130 ddb_entry->fw_ddb_index);
3131 scsi_queue_work(ha->host, &ddb_entry->sess->scan_work);
3132 }
3133 return QLA_SUCCESS;
3134}
3135
3136int qla4xxx_unblock_ddb(struct iscsi_cls_session *cls_session)
3137{
3138 struct iscsi_session *sess;
3139 struct ddb_entry *ddb_entry;
3140 struct scsi_qla_host *ha;
3141
3142 sess = cls_session->dd_data;
3143 ddb_entry = sess->dd_data;
3144 ha = ddb_entry->ha;
3145 ql4_printk(KERN_INFO, ha, "scsi%ld: %s: ddb[%d]"
3146 " unblock user space session\n", ha->host_no, __func__,
3147 ddb_entry->fw_ddb_index);
3148 iscsi_conn_start(ddb_entry->conn);
3149 iscsi_conn_login_event(ddb_entry->conn,
3150 ISCSI_CONN_STATE_LOGGED_IN);
3151
3152 return QLA_SUCCESS;
3153}
3154
b3a271a9
MR
3155static void qla4xxx_relogin_all_devices(struct scsi_qla_host *ha)
3156{
3157 iscsi_host_for_each_session(ha->host, qla4xxx_relogin_devices);
3158}
3159
13483730
MC
3160static void qla4xxx_relogin_flash_ddb(struct iscsi_cls_session *cls_sess)
3161{
3162 uint16_t relogin_timer;
3163 struct iscsi_session *sess;
3164 struct ddb_entry *ddb_entry;
3165 struct scsi_qla_host *ha;
3166
3167 sess = cls_sess->dd_data;
3168 ddb_entry = sess->dd_data;
3169 ha = ddb_entry->ha;
3170
3171 relogin_timer = max(ddb_entry->default_relogin_timeout,
3172 (uint16_t)RELOGIN_TOV);
3173 atomic_set(&ddb_entry->relogin_timer, relogin_timer);
3174
3175 DEBUG2(ql4_printk(KERN_INFO, ha,
3176 "scsi%ld: Relogin index [%d]. TOV=%d\n", ha->host_no,
3177 ddb_entry->fw_ddb_index, relogin_timer));
3178
3179 qla4xxx_login_flash_ddb(cls_sess);
3180}
3181
3182static void qla4xxx_dpc_relogin(struct iscsi_cls_session *cls_sess)
3183{
3184 struct iscsi_session *sess;
3185 struct ddb_entry *ddb_entry;
3186 struct scsi_qla_host *ha;
3187
3188 sess = cls_sess->dd_data;
3189 ddb_entry = sess->dd_data;
3190 ha = ddb_entry->ha;
3191
3192 if (!(ddb_entry->ddb_type == FLASH_DDB))
3193 return;
3194
3195 if (test_and_clear_bit(DF_RELOGIN, &ddb_entry->flags) &&
3196 !iscsi_is_session_online(cls_sess)) {
3197 DEBUG2(ql4_printk(KERN_INFO, ha,
3198 "relogin issued\n"));
3199 qla4xxx_relogin_flash_ddb(cls_sess);
3200 }
3201}
3202
f4f5df23
VC
3203void qla4xxx_wake_dpc(struct scsi_qla_host *ha)
3204{
1b46807e 3205 if (ha->dpc_thread)
f4f5df23 3206 queue_work(ha->dpc_thread, &ha->dpc_work);
f4f5df23
VC
3207}
3208
ff884430
VC
3209static struct qla4_work_evt *
3210qla4xxx_alloc_work(struct scsi_qla_host *ha, uint32_t data_size,
3211 enum qla4_work_type type)
3212{
3213 struct qla4_work_evt *e;
3214 uint32_t size = sizeof(struct qla4_work_evt) + data_size;
3215
3216 e = kzalloc(size, GFP_ATOMIC);
3217 if (!e)
3218 return NULL;
3219
3220 INIT_LIST_HEAD(&e->list);
3221 e->type = type;
3222 return e;
3223}
3224
3225static void qla4xxx_post_work(struct scsi_qla_host *ha,
3226 struct qla4_work_evt *e)
3227{
3228 unsigned long flags;
3229
3230 spin_lock_irqsave(&ha->work_lock, flags);
3231 list_add_tail(&e->list, &ha->work_list);
3232 spin_unlock_irqrestore(&ha->work_lock, flags);
3233 qla4xxx_wake_dpc(ha);
3234}
3235
3236int qla4xxx_post_aen_work(struct scsi_qla_host *ha,
3237 enum iscsi_host_event_code aen_code,
3238 uint32_t data_size, uint8_t *data)
3239{
3240 struct qla4_work_evt *e;
3241
3242 e = qla4xxx_alloc_work(ha, data_size, QLA4_EVENT_AEN);
3243 if (!e)
3244 return QLA_ERROR;
3245
3246 e->u.aen.code = aen_code;
3247 e->u.aen.data_size = data_size;
3248 memcpy(e->u.aen.data, data, data_size);
3249
3250 qla4xxx_post_work(ha, e);
3251
3252 return QLA_SUCCESS;
3253}
3254
c0b9d3f7
VC
3255int qla4xxx_post_ping_evt_work(struct scsi_qla_host *ha,
3256 uint32_t status, uint32_t pid,
3257 uint32_t data_size, uint8_t *data)
3258{
3259 struct qla4_work_evt *e;
3260
3261 e = qla4xxx_alloc_work(ha, data_size, QLA4_EVENT_PING_STATUS);
3262 if (!e)
3263 return QLA_ERROR;
3264
3265 e->u.ping.status = status;
3266 e->u.ping.pid = pid;
3267 e->u.ping.data_size = data_size;
3268 memcpy(e->u.ping.data, data, data_size);
3269
3270 qla4xxx_post_work(ha, e);
3271
3272 return QLA_SUCCESS;
3273}
3274
a7380a65 3275static void qla4xxx_do_work(struct scsi_qla_host *ha)
ff884430
VC
3276{
3277 struct qla4_work_evt *e, *tmp;
3278 unsigned long flags;
3279 LIST_HEAD(work);
3280
3281 spin_lock_irqsave(&ha->work_lock, flags);
3282 list_splice_init(&ha->work_list, &work);
3283 spin_unlock_irqrestore(&ha->work_lock, flags);
3284
3285 list_for_each_entry_safe(e, tmp, &work, list) {
3286 list_del_init(&e->list);
3287
3288 switch (e->type) {
3289 case QLA4_EVENT_AEN:
3290 iscsi_post_host_event(ha->host_no,
3291 &qla4xxx_iscsi_transport,
3292 e->u.aen.code,
3293 e->u.aen.data_size,
3294 e->u.aen.data);
3295 break;
c0b9d3f7
VC
3296 case QLA4_EVENT_PING_STATUS:
3297 iscsi_ping_comp_event(ha->host_no,
3298 &qla4xxx_iscsi_transport,
3299 e->u.ping.status,
3300 e->u.ping.pid,
3301 e->u.ping.data_size,
3302 e->u.ping.data);
3303 break;
ff884430
VC
3304 default:
3305 ql4_printk(KERN_WARNING, ha, "event type: 0x%x not "
3306 "supported", e->type);
3307 }
3308 kfree(e);
3309 }
3310}
3311
afaf5a2d
DS
3312/**
3313 * qla4xxx_do_dpc - dpc routine
3314 * @data: in our case pointer to adapter structure
3315 *
3316 * This routine is a task that is schedule by the interrupt handler
3317 * to perform the background processing for interrupts. We put it
3318 * on a task queue that is consumed whenever the scheduler runs; that's
3319 * so you can do anything (i.e. put the process to sleep etc). In fact,
3320 * the mid-level tries to sleep when it reaches the driver threshold
3321 * "host->can_queue". This can cause a panic if we were in our interrupt code.
3322 **/
c4028958 3323static void qla4xxx_do_dpc(struct work_struct *work)
afaf5a2d 3324{
c4028958
DH
3325 struct scsi_qla_host *ha =
3326 container_of(work, struct scsi_qla_host, dpc_work);
477ffb9d 3327 int status = QLA_ERROR;
afaf5a2d 3328
f26b9044 3329 DEBUG2(printk("scsi%ld: %s: DPC handler waking up."
f4f5df23
VC
3330 "flags = 0x%08lx, dpc_flags = 0x%08lx\n",
3331 ha->host_no, __func__, ha->flags, ha->dpc_flags))
afaf5a2d
DS
3332
3333 /* Initialization not yet finished. Don't do anything yet. */
3334 if (!test_bit(AF_INIT_DONE, &ha->flags))
1b46807e 3335 return;
afaf5a2d 3336
2232be0d
LC
3337 if (test_bit(AF_EEH_BUSY, &ha->flags)) {
3338 DEBUG2(printk(KERN_INFO "scsi%ld: %s: flags = %lx\n",
3339 ha->host_no, __func__, ha->flags));
1b46807e 3340 return;
2232be0d
LC
3341 }
3342
ff884430
VC
3343 /* post events to application */
3344 qla4xxx_do_work(ha);
3345
f4f5df23
VC
3346 if (is_qla8022(ha)) {
3347 if (test_bit(DPC_HA_UNRECOVERABLE, &ha->dpc_flags)) {
3348 qla4_8xxx_idc_lock(ha);
3349 qla4_8xxx_wr_32(ha, QLA82XX_CRB_DEV_STATE,
3350 QLA82XX_DEV_FAILED);
3351 qla4_8xxx_idc_unlock(ha);
3352 ql4_printk(KERN_INFO, ha, "HW State: FAILED\n");
3353 qla4_8xxx_device_state_handler(ha);
3354 }
3355 if (test_and_clear_bit(DPC_HA_NEED_QUIESCENT, &ha->dpc_flags)) {
3356 qla4_8xxx_need_qsnt_handler(ha);
3357 }
3358 }
3359
3360 if (!test_bit(DPC_RESET_ACTIVE, &ha->dpc_flags) &&
3361 (test_bit(DPC_RESET_HA, &ha->dpc_flags) ||
afaf5a2d 3362 test_bit(DPC_RESET_HA_INTR, &ha->dpc_flags) ||
f4f5df23
VC
3363 test_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags))) {
3364 if (ql4xdontresethba) {
3365 DEBUG2(printk("scsi%ld: %s: Don't Reset HBA\n",
3366 ha->host_no, __func__));
3367 clear_bit(DPC_RESET_HA, &ha->dpc_flags);
3368 clear_bit(DPC_RESET_HA_INTR, &ha->dpc_flags);
3369 clear_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags);
3370 goto dpc_post_reset_ha;
3371 }
3372 if (test_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags) ||
3373 test_bit(DPC_RESET_HA, &ha->dpc_flags))
3374 qla4xxx_recover_adapter(ha);
afaf5a2d 3375
477ffb9d 3376 if (test_bit(DPC_RESET_HA_INTR, &ha->dpc_flags)) {
afaf5a2d 3377 uint8_t wait_time = RESET_INTR_TOV;
afaf5a2d 3378
afaf5a2d
DS
3379 while ((readw(&ha->reg->ctrl_status) &
3380 (CSR_SOFT_RESET | CSR_FORCE_SOFT_RESET)) != 0) {
3381 if (--wait_time == 0)
3382 break;
afaf5a2d 3383 msleep(1000);
afaf5a2d 3384 }
afaf5a2d
DS
3385 if (wait_time == 0)
3386 DEBUG2(printk("scsi%ld: %s: SR|FSR "
3387 "bit not cleared-- resetting\n",
3388 ha->host_no, __func__));
f4f5df23 3389 qla4xxx_abort_active_cmds(ha, DID_RESET << 16);
477ffb9d
DS
3390 if (ql4xxx_lock_drvr_wait(ha) == QLA_SUCCESS) {
3391 qla4xxx_process_aen(ha, FLUSH_DDB_CHANGED_AENS);
f4f5df23 3392 status = qla4xxx_recover_adapter(ha);
477ffb9d
DS
3393 }
3394 clear_bit(DPC_RESET_HA_INTR, &ha->dpc_flags);
3395 if (status == QLA_SUCCESS)
f4f5df23 3396 ha->isp_ops->enable_intrs(ha);
afaf5a2d
DS
3397 }
3398 }
3399
f4f5df23 3400dpc_post_reset_ha:
afaf5a2d
DS
3401 /* ---- process AEN? --- */
3402 if (test_and_clear_bit(DPC_AEN, &ha->dpc_flags))
3403 qla4xxx_process_aen(ha, PROCESS_ALL_AENS);
3404
3405 /* ---- Get DHCP IP Address? --- */
3406 if (test_and_clear_bit(DPC_GET_DHCP_IP_ADDR, &ha->dpc_flags))
3407 qla4xxx_get_dhcp_ip_address(ha);
3408
13483730
MC
3409 /* ---- relogin device? --- */
3410 if (adapter_up(ha) &&
3411 test_and_clear_bit(DPC_RELOGIN_DEVICE, &ha->dpc_flags)) {
3412 iscsi_host_for_each_session(ha->host, qla4xxx_dpc_relogin);
3413 }
3414
065aa1b4
VC
3415 /* ---- link change? --- */
3416 if (test_and_clear_bit(DPC_LINK_CHANGED, &ha->dpc_flags)) {
3417 if (!test_bit(AF_LINK_UP, &ha->flags)) {
3418 /* ---- link down? --- */
2d7924e6 3419 qla4xxx_mark_all_devices_missing(ha);
065aa1b4
VC
3420 } else {
3421 /* ---- link up? --- *
3422 * F/W will auto login to all devices ONLY ONCE after
3423 * link up during driver initialization and runtime
3424 * fatal error recovery. Therefore, the driver must
3425 * manually relogin to devices when recovering from
3426 * connection failures, logouts, expired KATO, etc. */
13483730
MC
3427 if (test_and_clear_bit(AF_BUILD_DDB_LIST, &ha->flags)) {
3428 qla4xxx_build_ddb_list(ha, ha->is_reset);
3429 iscsi_host_for_each_session(ha->host,
3430 qla4xxx_login_flash_ddb);
3431 } else
3432 qla4xxx_relogin_all_devices(ha);
065aa1b4
VC
3433 }
3434 }
afaf5a2d
DS
3435}
3436
3437/**
3438 * qla4xxx_free_adapter - release the adapter
3439 * @ha: pointer to adapter structure
3440 **/
3441static void qla4xxx_free_adapter(struct scsi_qla_host *ha)
3442{
8a288960 3443 qla4xxx_abort_active_cmds(ha, DID_NO_CONNECT << 16);
afaf5a2d
DS
3444
3445 if (test_bit(AF_INTERRUPTS_ON, &ha->flags)) {
3446 /* Turn-off interrupts on the card. */
f4f5df23 3447 ha->isp_ops->disable_intrs(ha);
afaf5a2d
DS
3448 }
3449
f4f5df23
VC
3450 /* Remove timer thread, if present */
3451 if (ha->timer_active)
3452 qla4xxx_stop_timer(ha);
3453
afaf5a2d
DS
3454 /* Kill the kernel thread for this host */
3455 if (ha->dpc_thread)
3456 destroy_workqueue(ha->dpc_thread);
3457
b3a271a9
MR
3458 /* Kill the kernel thread for this host */
3459 if (ha->task_wq)
3460 destroy_workqueue(ha->task_wq);
3461
f4f5df23
VC
3462 /* Put firmware in known state */
3463 ha->isp_ops->reset_firmware(ha);
afaf5a2d 3464
f4f5df23
VC
3465 if (is_qla8022(ha)) {
3466 qla4_8xxx_idc_lock(ha);
3467 qla4_8xxx_clear_drv_active(ha);
3468 qla4_8xxx_idc_unlock(ha);
3469 }
afaf5a2d 3470
afaf5a2d
DS
3471 /* Detach interrupts */
3472 if (test_and_clear_bit(AF_IRQ_ATTACHED, &ha->flags))
f4f5df23 3473 qla4xxx_free_irqs(ha);
afaf5a2d 3474
bee4fe8e
DS
3475 /* free extra memory */
3476 qla4xxx_mem_free(ha);
f4f5df23
VC
3477}
3478
3479int qla4_8xxx_iospace_config(struct scsi_qla_host *ha)
3480{
3481 int status = 0;
f4f5df23
VC
3482 unsigned long mem_base, mem_len, db_base, db_len;
3483 struct pci_dev *pdev = ha->pdev;
3484
3485 status = pci_request_regions(pdev, DRIVER_NAME);
3486 if (status) {
3487 printk(KERN_WARNING
3488 "scsi(%ld) Failed to reserve PIO regions (%s) "
3489 "status=%d\n", ha->host_no, pci_name(pdev), status);
3490 goto iospace_error_exit;
3491 }
3492
f4f5df23 3493 DEBUG2(printk(KERN_INFO "%s: revision-id=%d\n",
7d7311c4
SS
3494 __func__, pdev->revision));
3495 ha->revision_id = pdev->revision;
bee4fe8e 3496
f4f5df23
VC
3497 /* remap phys address */
3498 mem_base = pci_resource_start(pdev, 0); /* 0 is for BAR 0 */
3499 mem_len = pci_resource_len(pdev, 0);
3500 DEBUG2(printk(KERN_INFO "%s: ioremap from %lx a size of %lx\n",
3501 __func__, mem_base, mem_len));
afaf5a2d 3502
f4f5df23
VC
3503 /* mapping of pcibase pointer */
3504 ha->nx_pcibase = (unsigned long)ioremap(mem_base, mem_len);
3505 if (!ha->nx_pcibase) {
3506 printk(KERN_ERR
3507 "cannot remap MMIO (%s), aborting\n", pci_name(pdev));
3508 pci_release_regions(ha->pdev);
3509 goto iospace_error_exit;
3510 }
3511
3512 /* Mapping of IO base pointer, door bell read and write pointer */
3513
3514 /* mapping of IO base pointer */
3515 ha->qla4_8xxx_reg =
3516 (struct device_reg_82xx __iomem *)((uint8_t *)ha->nx_pcibase +
3517 0xbc000 + (ha->pdev->devfn << 11));
3518
3519 db_base = pci_resource_start(pdev, 4); /* doorbell is on bar 4 */
3520 db_len = pci_resource_len(pdev, 4);
3521
2657c800
SS
3522 ha->nx_db_wr_ptr = (ha->pdev->devfn == 4 ? QLA82XX_CAM_RAM_DB1 :
3523 QLA82XX_CAM_RAM_DB2);
f4f5df23 3524
2657c800 3525 return 0;
f4f5df23
VC
3526iospace_error_exit:
3527 return -ENOMEM;
afaf5a2d
DS
3528}
3529
3530/***
3531 * qla4xxx_iospace_config - maps registers
3532 * @ha: pointer to adapter structure
3533 *
3534 * This routines maps HBA's registers from the pci address space
3535 * into the kernel virtual address space for memory mapped i/o.
3536 **/
f4f5df23 3537int qla4xxx_iospace_config(struct scsi_qla_host *ha)
afaf5a2d
DS
3538{
3539 unsigned long pio, pio_len, pio_flags;
3540 unsigned long mmio, mmio_len, mmio_flags;
3541
3542 pio = pci_resource_start(ha->pdev, 0);
3543 pio_len = pci_resource_len(ha->pdev, 0);
3544 pio_flags = pci_resource_flags(ha->pdev, 0);
3545 if (pio_flags & IORESOURCE_IO) {
3546 if (pio_len < MIN_IOBASE_LEN) {
c2660df3 3547 ql4_printk(KERN_WARNING, ha,
afaf5a2d
DS
3548 "Invalid PCI I/O region size\n");
3549 pio = 0;
3550 }
3551 } else {
c2660df3 3552 ql4_printk(KERN_WARNING, ha, "region #0 not a PIO resource\n");
afaf5a2d
DS
3553 pio = 0;
3554 }
3555
3556 /* Use MMIO operations for all accesses. */
3557 mmio = pci_resource_start(ha->pdev, 1);
3558 mmio_len = pci_resource_len(ha->pdev, 1);
3559 mmio_flags = pci_resource_flags(ha->pdev, 1);
3560
3561 if (!(mmio_flags & IORESOURCE_MEM)) {
c2660df3
VC
3562 ql4_printk(KERN_ERR, ha,
3563 "region #0 not an MMIO resource, aborting\n");
afaf5a2d
DS
3564
3565 goto iospace_error_exit;
3566 }
c2660df3 3567
afaf5a2d 3568 if (mmio_len < MIN_IOBASE_LEN) {
c2660df3
VC
3569 ql4_printk(KERN_ERR, ha,
3570 "Invalid PCI mem region size, aborting\n");
afaf5a2d
DS
3571 goto iospace_error_exit;
3572 }
3573
3574 if (pci_request_regions(ha->pdev, DRIVER_NAME)) {
c2660df3
VC
3575 ql4_printk(KERN_WARNING, ha,
3576 "Failed to reserve PIO/MMIO regions\n");
afaf5a2d
DS
3577
3578 goto iospace_error_exit;
3579 }
3580
3581 ha->pio_address = pio;
3582 ha->pio_length = pio_len;
3583 ha->reg = ioremap(mmio, MIN_IOBASE_LEN);
3584 if (!ha->reg) {
c2660df3
VC
3585 ql4_printk(KERN_ERR, ha,
3586 "cannot remap MMIO, aborting\n");
afaf5a2d
DS
3587
3588 goto iospace_error_exit;
3589 }
3590
3591 return 0;
3592
3593iospace_error_exit:
3594 return -ENOMEM;
3595}
3596
f4f5df23
VC
3597static struct isp_operations qla4xxx_isp_ops = {
3598 .iospace_config = qla4xxx_iospace_config,
3599 .pci_config = qla4xxx_pci_config,
3600 .disable_intrs = qla4xxx_disable_intrs,
3601 .enable_intrs = qla4xxx_enable_intrs,
3602 .start_firmware = qla4xxx_start_firmware,
3603 .intr_handler = qla4xxx_intr_handler,
3604 .interrupt_service_routine = qla4xxx_interrupt_service_routine,
3605 .reset_chip = qla4xxx_soft_reset,
3606 .reset_firmware = qla4xxx_hw_reset,
3607 .queue_iocb = qla4xxx_queue_iocb,
3608 .complete_iocb = qla4xxx_complete_iocb,
3609 .rd_shdw_req_q_out = qla4xxx_rd_shdw_req_q_out,
3610 .rd_shdw_rsp_q_in = qla4xxx_rd_shdw_rsp_q_in,
3611 .get_sys_info = qla4xxx_get_sys_info,
3612};
3613
3614static struct isp_operations qla4_8xxx_isp_ops = {
3615 .iospace_config = qla4_8xxx_iospace_config,
3616 .pci_config = qla4_8xxx_pci_config,
3617 .disable_intrs = qla4_8xxx_disable_intrs,
3618 .enable_intrs = qla4_8xxx_enable_intrs,
3619 .start_firmware = qla4_8xxx_load_risc,
3620 .intr_handler = qla4_8xxx_intr_handler,
3621 .interrupt_service_routine = qla4_8xxx_interrupt_service_routine,
3622 .reset_chip = qla4_8xxx_isp_reset,
3623 .reset_firmware = qla4_8xxx_stop_firmware,
3624 .queue_iocb = qla4_8xxx_queue_iocb,
3625 .complete_iocb = qla4_8xxx_complete_iocb,
3626 .rd_shdw_req_q_out = qla4_8xxx_rd_shdw_req_q_out,
3627 .rd_shdw_rsp_q_in = qla4_8xxx_rd_shdw_rsp_q_in,
3628 .get_sys_info = qla4_8xxx_get_sys_info,
3629};
3630
3631uint16_t qla4xxx_rd_shdw_req_q_out(struct scsi_qla_host *ha)
3632{
3633 return (uint16_t)le32_to_cpu(ha->shadow_regs->req_q_out);
3634}
3635
3636uint16_t qla4_8xxx_rd_shdw_req_q_out(struct scsi_qla_host *ha)
3637{
3638 return (uint16_t)le32_to_cpu(readl(&ha->qla4_8xxx_reg->req_q_out));
3639}
3640
3641uint16_t qla4xxx_rd_shdw_rsp_q_in(struct scsi_qla_host *ha)
3642{
3643 return (uint16_t)le32_to_cpu(ha->shadow_regs->rsp_q_in);
3644}
3645
3646uint16_t qla4_8xxx_rd_shdw_rsp_q_in(struct scsi_qla_host *ha)
3647{
3648 return (uint16_t)le32_to_cpu(readl(&ha->qla4_8xxx_reg->rsp_q_in));
3649}
3650
2a991c21
MR
3651static ssize_t qla4xxx_show_boot_eth_info(void *data, int type, char *buf)
3652{
3653 struct scsi_qla_host *ha = data;
3654 char *str = buf;
3655 int rc;
3656
3657 switch (type) {
3658 case ISCSI_BOOT_ETH_FLAGS:
3659 rc = sprintf(str, "%d\n", SYSFS_FLAG_FW_SEL_BOOT);
3660 break;
3661 case ISCSI_BOOT_ETH_INDEX:
3662 rc = sprintf(str, "0\n");
3663 break;
3664 case ISCSI_BOOT_ETH_MAC:
3665 rc = sysfs_format_mac(str, ha->my_mac,
3666 MAC_ADDR_LEN);
3667 break;
3668 default:
3669 rc = -ENOSYS;
3670 break;
3671 }
3672 return rc;
3673}
3674
587a1f16 3675static umode_t qla4xxx_eth_get_attr_visibility(void *data, int type)
2a991c21
MR
3676{
3677 int rc;
3678
3679 switch (type) {
3680 case ISCSI_BOOT_ETH_FLAGS:
3681 case ISCSI_BOOT_ETH_MAC:
3682 case ISCSI_BOOT_ETH_INDEX:
3683 rc = S_IRUGO;
3684 break;
3685 default:
3686 rc = 0;
3687 break;
3688 }
3689 return rc;
3690}
3691
3692static ssize_t qla4xxx_show_boot_ini_info(void *data, int type, char *buf)
3693{
3694 struct scsi_qla_host *ha = data;
3695 char *str = buf;
3696 int rc;
3697
3698 switch (type) {
3699 case ISCSI_BOOT_INI_INITIATOR_NAME:
3700 rc = sprintf(str, "%s\n", ha->name_string);
3701 break;
3702 default:
3703 rc = -ENOSYS;
3704 break;
3705 }
3706 return rc;
3707}
3708
587a1f16 3709static umode_t qla4xxx_ini_get_attr_visibility(void *data, int type)
2a991c21
MR
3710{
3711 int rc;
3712
3713 switch (type) {
3714 case ISCSI_BOOT_INI_INITIATOR_NAME:
3715 rc = S_IRUGO;
3716 break;
3717 default:
3718 rc = 0;
3719 break;
3720 }
3721 return rc;
3722}
3723
3724static ssize_t
3725qla4xxx_show_boot_tgt_info(struct ql4_boot_session_info *boot_sess, int type,
3726 char *buf)
3727{
3728 struct ql4_conn_info *boot_conn = &boot_sess->conn_list[0];
3729 char *str = buf;
3730 int rc;
3731
3732 switch (type) {
3733 case ISCSI_BOOT_TGT_NAME:
3734 rc = sprintf(buf, "%s\n", (char *)&boot_sess->target_name);
3735 break;
3736 case ISCSI_BOOT_TGT_IP_ADDR:
3737 if (boot_sess->conn_list[0].dest_ipaddr.ip_type == 0x1)
3738 rc = sprintf(buf, "%pI4\n",
3739 &boot_conn->dest_ipaddr.ip_address);
3740 else
3741 rc = sprintf(str, "%pI6\n",
3742 &boot_conn->dest_ipaddr.ip_address);
3743 break;
3744 case ISCSI_BOOT_TGT_PORT:
3745 rc = sprintf(str, "%d\n", boot_conn->dest_port);
3746 break;
3747 case ISCSI_BOOT_TGT_CHAP_NAME:
3748 rc = sprintf(str, "%.*s\n",
3749 boot_conn->chap.target_chap_name_length,
3750 (char *)&boot_conn->chap.target_chap_name);
3751 break;
3752 case ISCSI_BOOT_TGT_CHAP_SECRET:
3753 rc = sprintf(str, "%.*s\n",
3754 boot_conn->chap.target_secret_length,
3755 (char *)&boot_conn->chap.target_secret);
3756 break;
3757 case ISCSI_BOOT_TGT_REV_CHAP_NAME:
3758 rc = sprintf(str, "%.*s\n",
3759 boot_conn->chap.intr_chap_name_length,
3760 (char *)&boot_conn->chap.intr_chap_name);
3761 break;
3762 case ISCSI_BOOT_TGT_REV_CHAP_SECRET:
3763 rc = sprintf(str, "%.*s\n",
3764 boot_conn->chap.intr_secret_length,
3765 (char *)&boot_conn->chap.intr_secret);
3766 break;
3767 case ISCSI_BOOT_TGT_FLAGS:
3768 rc = sprintf(str, "%d\n", SYSFS_FLAG_FW_SEL_BOOT);
3769 break;
3770 case ISCSI_BOOT_TGT_NIC_ASSOC:
3771 rc = sprintf(str, "0\n");
3772 break;
3773 default:
3774 rc = -ENOSYS;
3775 break;
3776 }
3777 return rc;
3778}
3779
3780static ssize_t qla4xxx_show_boot_tgt_pri_info(void *data, int type, char *buf)
3781{
3782 struct scsi_qla_host *ha = data;
3783 struct ql4_boot_session_info *boot_sess = &(ha->boot_tgt.boot_pri_sess);
3784
3785 return qla4xxx_show_boot_tgt_info(boot_sess, type, buf);
3786}
3787
3788static ssize_t qla4xxx_show_boot_tgt_sec_info(void *data, int type, char *buf)
3789{
3790 struct scsi_qla_host *ha = data;
3791 struct ql4_boot_session_info *boot_sess = &(ha->boot_tgt.boot_sec_sess);
3792
3793 return qla4xxx_show_boot_tgt_info(boot_sess, type, buf);
3794}
3795
587a1f16 3796static umode_t qla4xxx_tgt_get_attr_visibility(void *data, int type)
2a991c21
MR
3797{
3798 int rc;
3799
3800 switch (type) {
3801 case ISCSI_BOOT_TGT_NAME:
3802 case ISCSI_BOOT_TGT_IP_ADDR:
3803 case ISCSI_BOOT_TGT_PORT:
3804 case ISCSI_BOOT_TGT_CHAP_NAME:
3805 case ISCSI_BOOT_TGT_CHAP_SECRET:
3806 case ISCSI_BOOT_TGT_REV_CHAP_NAME:
3807 case ISCSI_BOOT_TGT_REV_CHAP_SECRET:
3808 case ISCSI_BOOT_TGT_NIC_ASSOC:
3809 case ISCSI_BOOT_TGT_FLAGS:
3810 rc = S_IRUGO;
3811 break;
3812 default:
3813 rc = 0;
3814 break;
3815 }
3816 return rc;
3817}
3818
3819static void qla4xxx_boot_release(void *data)
3820{
3821 struct scsi_qla_host *ha = data;
3822
3823 scsi_host_put(ha->host);
3824}
3825
3826static int get_fw_boot_info(struct scsi_qla_host *ha, uint16_t ddb_index[])
3827{
3828 dma_addr_t buf_dma;
3829 uint32_t addr, pri_addr, sec_addr;
3830 uint32_t offset;
3831 uint16_t func_num;
3832 uint8_t val;
3833 uint8_t *buf = NULL;
3834 size_t size = 13 * sizeof(uint8_t);
3835 int ret = QLA_SUCCESS;
3836
3837 func_num = PCI_FUNC(ha->pdev->devfn);
3838
0d5b36b8
MR
3839 ql4_printk(KERN_INFO, ha, "%s: Get FW boot info for 0x%x func %d\n",
3840 __func__, ha->pdev->device, func_num);
2a991c21 3841
0d5b36b8 3842 if (is_qla40XX(ha)) {
2a991c21
MR
3843 if (func_num == 1) {
3844 addr = NVRAM_PORT0_BOOT_MODE;
3845 pri_addr = NVRAM_PORT0_BOOT_PRI_TGT;
3846 sec_addr = NVRAM_PORT0_BOOT_SEC_TGT;
3847 } else if (func_num == 3) {
3848 addr = NVRAM_PORT1_BOOT_MODE;
3849 pri_addr = NVRAM_PORT1_BOOT_PRI_TGT;
3850 sec_addr = NVRAM_PORT1_BOOT_SEC_TGT;
3851 } else {
3852 ret = QLA_ERROR;
3853 goto exit_boot_info;
3854 }
3855
3856 /* Check Boot Mode */
3857 val = rd_nvram_byte(ha, addr);
3858 if (!(val & 0x07)) {
e8fb00e0
MR
3859 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: Adapter boot "
3860 "options : 0x%x\n", __func__, val));
2a991c21
MR
3861 ret = QLA_ERROR;
3862 goto exit_boot_info;
3863 }
3864
3865 /* get primary valid target index */
3866 val = rd_nvram_byte(ha, pri_addr);
3867 if (val & BIT_7)
3868 ddb_index[0] = (val & 0x7f);
2a991c21
MR
3869
3870 /* get secondary valid target index */
3871 val = rd_nvram_byte(ha, sec_addr);
3872 if (val & BIT_7)
3873 ddb_index[1] = (val & 0x7f);
2a991c21
MR
3874
3875 } else if (is_qla8022(ha)) {
3876 buf = dma_alloc_coherent(&ha->pdev->dev, size,
3877 &buf_dma, GFP_KERNEL);
3878 if (!buf) {
3879 DEBUG2(ql4_printk(KERN_ERR, ha,
3880 "%s: Unable to allocate dma buffer\n",
3881 __func__));
3882 ret = QLA_ERROR;
3883 goto exit_boot_info;
3884 }
3885
3886 if (ha->port_num == 0)
3887 offset = BOOT_PARAM_OFFSET_PORT0;
3888 else if (ha->port_num == 1)
3889 offset = BOOT_PARAM_OFFSET_PORT1;
3890 else {
3891 ret = QLA_ERROR;
3892 goto exit_boot_info_free;
3893 }
3894 addr = FLASH_RAW_ACCESS_ADDR + (ha->hw.flt_iscsi_param * 4) +
3895 offset;
3896 if (qla4xxx_get_flash(ha, buf_dma, addr,
3897 13 * sizeof(uint8_t)) != QLA_SUCCESS) {
3898 DEBUG2(ql4_printk(KERN_ERR, ha, "scsi%ld: %s: Get Flash"
0bd7f842 3899 " failed\n", ha->host_no, __func__));
2a991c21
MR
3900 ret = QLA_ERROR;
3901 goto exit_boot_info_free;
3902 }
3903 /* Check Boot Mode */
3904 if (!(buf[1] & 0x07)) {
e8fb00e0
MR
3905 DEBUG2(ql4_printk(KERN_INFO, ha, "Firmware boot options"
3906 " : 0x%x\n", buf[1]));
2a991c21
MR
3907 ret = QLA_ERROR;
3908 goto exit_boot_info_free;
3909 }
3910
3911 /* get primary valid target index */
3912 if (buf[2] & BIT_7)
3913 ddb_index[0] = buf[2] & 0x7f;
2a991c21
MR
3914
3915 /* get secondary valid target index */
3916 if (buf[11] & BIT_7)
3917 ddb_index[1] = buf[11] & 0x7f;
2a991c21
MR
3918 } else {
3919 ret = QLA_ERROR;
3920 goto exit_boot_info;
3921 }
3922
3923 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: Primary target ID %d, Secondary"
3924 " target ID %d\n", __func__, ddb_index[0],
3925 ddb_index[1]));
3926
3927exit_boot_info_free:
3928 dma_free_coherent(&ha->pdev->dev, size, buf, buf_dma);
3929exit_boot_info:
20e835b4
LC
3930 ha->pri_ddb_idx = ddb_index[0];
3931 ha->sec_ddb_idx = ddb_index[1];
2a991c21
MR
3932 return ret;
3933}
3934
28deb45c
LC
3935/**
3936 * qla4xxx_get_bidi_chap - Get a BIDI CHAP user and password
3937 * @ha: pointer to adapter structure
3938 * @username: CHAP username to be returned
3939 * @password: CHAP password to be returned
3940 *
3941 * If a boot entry has BIDI CHAP enabled then we need to set the BIDI CHAP
3942 * user and password in the sysfs entry in /sys/firmware/iscsi_boot#/.
3943 * So from the CHAP cache find the first BIDI CHAP entry and set it
3944 * to the boot record in sysfs.
3945 **/
3946static int qla4xxx_get_bidi_chap(struct scsi_qla_host *ha, char *username,
3947 char *password)
3948{
3949 int i, ret = -EINVAL;
3950 int max_chap_entries = 0;
3951 struct ql4_chap_table *chap_table;
3952
3953 if (is_qla8022(ha))
3954 max_chap_entries = (ha->hw.flt_chap_size / 2) /
3955 sizeof(struct ql4_chap_table);
3956 else
3957 max_chap_entries = MAX_CHAP_ENTRIES_40XX;
3958
3959 if (!ha->chap_list) {
3960 ql4_printk(KERN_ERR, ha, "Do not have CHAP table cache\n");
3961 return ret;
3962 }
3963
3964 mutex_lock(&ha->chap_sem);
3965 for (i = 0; i < max_chap_entries; i++) {
3966 chap_table = (struct ql4_chap_table *)ha->chap_list + i;
3967 if (chap_table->cookie !=
3968 __constant_cpu_to_le16(CHAP_VALID_COOKIE)) {
3969 continue;
3970 }
3971
3972 if (chap_table->flags & BIT_7) /* local */
3973 continue;
3974
3975 if (!(chap_table->flags & BIT_6)) /* Not BIDI */
3976 continue;
3977
3978 strncpy(password, chap_table->secret, QL4_CHAP_MAX_SECRET_LEN);
3979 strncpy(username, chap_table->name, QL4_CHAP_MAX_NAME_LEN);
3980 ret = 0;
3981 break;
3982 }
3983 mutex_unlock(&ha->chap_sem);
3984
3985 return ret;
3986}
3987
3988
2a991c21
MR
3989static int qla4xxx_get_boot_target(struct scsi_qla_host *ha,
3990 struct ql4_boot_session_info *boot_sess,
3991 uint16_t ddb_index)
3992{
3993 struct ql4_conn_info *boot_conn = &boot_sess->conn_list[0];
3994 struct dev_db_entry *fw_ddb_entry;
3995 dma_addr_t fw_ddb_entry_dma;
3996 uint16_t idx;
3997 uint16_t options;
3998 int ret = QLA_SUCCESS;
3999
4000 fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
4001 &fw_ddb_entry_dma, GFP_KERNEL);
4002 if (!fw_ddb_entry) {
4003 DEBUG2(ql4_printk(KERN_ERR, ha,
4004 "%s: Unable to allocate dma buffer.\n",
4005 __func__));
4006 ret = QLA_ERROR;
4007 return ret;
4008 }
4009
4010 if (qla4xxx_bootdb_by_index(ha, fw_ddb_entry,
4011 fw_ddb_entry_dma, ddb_index)) {
e8fb00e0
MR
4012 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: No Flash DDB found at "
4013 "index [%d]\n", __func__, ddb_index));
2a991c21
MR
4014 ret = QLA_ERROR;
4015 goto exit_boot_target;
4016 }
4017
4018 /* Update target name and IP from DDB */
4019 memcpy(boot_sess->target_name, fw_ddb_entry->iscsi_name,
4020 min(sizeof(boot_sess->target_name),
4021 sizeof(fw_ddb_entry->iscsi_name)));
4022
4023 options = le16_to_cpu(fw_ddb_entry->options);
4024 if (options & DDB_OPT_IPV6_DEVICE) {
4025 memcpy(&boot_conn->dest_ipaddr.ip_address,
4026 &fw_ddb_entry->ip_addr[0], IPv6_ADDR_LEN);
4027 } else {
4028 boot_conn->dest_ipaddr.ip_type = 0x1;
4029 memcpy(&boot_conn->dest_ipaddr.ip_address,
4030 &fw_ddb_entry->ip_addr[0], IP_ADDR_LEN);
4031 }
4032
4033 boot_conn->dest_port = le16_to_cpu(fw_ddb_entry->port);
4034
4035 /* update chap information */
4036 idx = __le16_to_cpu(fw_ddb_entry->chap_tbl_idx);
4037
4038 if (BIT_7 & le16_to_cpu(fw_ddb_entry->iscsi_options)) {
4039
4040 DEBUG2(ql4_printk(KERN_INFO, ha, "Setting chap\n"));
4041
4042 ret = qla4xxx_get_chap(ha, (char *)&boot_conn->chap.
4043 target_chap_name,
4044 (char *)&boot_conn->chap.target_secret,
4045 idx);
4046 if (ret) {
4047 ql4_printk(KERN_ERR, ha, "Failed to set chap\n");
4048 ret = QLA_ERROR;
4049 goto exit_boot_target;
4050 }
4051
4052 boot_conn->chap.target_chap_name_length = QL4_CHAP_MAX_NAME_LEN;
4053 boot_conn->chap.target_secret_length = QL4_CHAP_MAX_SECRET_LEN;
4054 }
4055
4056 if (BIT_4 & le16_to_cpu(fw_ddb_entry->iscsi_options)) {
4057
4058 DEBUG2(ql4_printk(KERN_INFO, ha, "Setting BIDI chap\n"));
4059
28deb45c
LC
4060 ret = qla4xxx_get_bidi_chap(ha,
4061 (char *)&boot_conn->chap.intr_chap_name,
4062 (char *)&boot_conn->chap.intr_secret);
4063
2a991c21
MR
4064 if (ret) {
4065 ql4_printk(KERN_ERR, ha, "Failed to set BIDI chap\n");
4066 ret = QLA_ERROR;
4067 goto exit_boot_target;
4068 }
4069
4070 boot_conn->chap.intr_chap_name_length = QL4_CHAP_MAX_NAME_LEN;
4071 boot_conn->chap.intr_secret_length = QL4_CHAP_MAX_SECRET_LEN;
4072 }
4073
4074exit_boot_target:
4075 dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
4076 fw_ddb_entry, fw_ddb_entry_dma);
4077 return ret;
4078}
4079
4080static int qla4xxx_get_boot_info(struct scsi_qla_host *ha)
4081{
4082 uint16_t ddb_index[2];
8de5b958
LC
4083 int ret = QLA_ERROR;
4084 int rval;
2a991c21
MR
4085
4086 memset(ddb_index, 0, sizeof(ddb_index));
8de5b958
LC
4087 ddb_index[0] = 0xffff;
4088 ddb_index[1] = 0xffff;
2a991c21
MR
4089 ret = get_fw_boot_info(ha, ddb_index);
4090 if (ret != QLA_SUCCESS) {
e8fb00e0
MR
4091 DEBUG2(ql4_printk(KERN_INFO, ha,
4092 "%s: No boot target configured.\n", __func__));
2a991c21
MR
4093 return ret;
4094 }
4095
13483730
MC
4096 if (ql4xdisablesysfsboot)
4097 return QLA_SUCCESS;
4098
8de5b958
LC
4099 if (ddb_index[0] == 0xffff)
4100 goto sec_target;
4101
4102 rval = qla4xxx_get_boot_target(ha, &(ha->boot_tgt.boot_pri_sess),
2a991c21 4103 ddb_index[0]);
8de5b958 4104 if (rval != QLA_SUCCESS) {
e8fb00e0
MR
4105 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: Primary boot target not "
4106 "configured\n", __func__));
8de5b958
LC
4107 } else
4108 ret = QLA_SUCCESS;
2a991c21 4109
8de5b958
LC
4110sec_target:
4111 if (ddb_index[1] == 0xffff)
4112 goto exit_get_boot_info;
4113
4114 rval = qla4xxx_get_boot_target(ha, &(ha->boot_tgt.boot_sec_sess),
2a991c21 4115 ddb_index[1]);
8de5b958 4116 if (rval != QLA_SUCCESS) {
e8fb00e0
MR
4117 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: Secondary boot target not"
4118 " configured\n", __func__));
8de5b958
LC
4119 } else
4120 ret = QLA_SUCCESS;
4121
4122exit_get_boot_info:
2a991c21
MR
4123 return ret;
4124}
4125
4126static int qla4xxx_setup_boot_info(struct scsi_qla_host *ha)
4127{
4128 struct iscsi_boot_kobj *boot_kobj;
4129
4130 if (qla4xxx_get_boot_info(ha) != QLA_SUCCESS)
13483730
MC
4131 return QLA_ERROR;
4132
4133 if (ql4xdisablesysfsboot) {
4134 ql4_printk(KERN_INFO, ha,
0bd7f842 4135 "%s: syfsboot disabled - driver will trigger login "
13483730
MC
4136 "and publish session for discovery .\n", __func__);
4137 return QLA_SUCCESS;
4138 }
4139
2a991c21
MR
4140
4141 ha->boot_kset = iscsi_boot_create_host_kset(ha->host->host_no);
4142 if (!ha->boot_kset)
4143 goto kset_free;
4144
4145 if (!scsi_host_get(ha->host))
4146 goto kset_free;
4147 boot_kobj = iscsi_boot_create_target(ha->boot_kset, 0, ha,
4148 qla4xxx_show_boot_tgt_pri_info,
4149 qla4xxx_tgt_get_attr_visibility,
4150 qla4xxx_boot_release);
4151 if (!boot_kobj)
4152 goto put_host;
4153
4154 if (!scsi_host_get(ha->host))
4155 goto kset_free;
4156 boot_kobj = iscsi_boot_create_target(ha->boot_kset, 1, ha,
4157 qla4xxx_show_boot_tgt_sec_info,
4158 qla4xxx_tgt_get_attr_visibility,
4159 qla4xxx_boot_release);
4160 if (!boot_kobj)
4161 goto put_host;
4162
4163 if (!scsi_host_get(ha->host))
4164 goto kset_free;
4165 boot_kobj = iscsi_boot_create_initiator(ha->boot_kset, 0, ha,
4166 qla4xxx_show_boot_ini_info,
4167 qla4xxx_ini_get_attr_visibility,
4168 qla4xxx_boot_release);
4169 if (!boot_kobj)
4170 goto put_host;
4171
4172 if (!scsi_host_get(ha->host))
4173 goto kset_free;
4174 boot_kobj = iscsi_boot_create_ethernet(ha->boot_kset, 0, ha,
4175 qla4xxx_show_boot_eth_info,
4176 qla4xxx_eth_get_attr_visibility,
4177 qla4xxx_boot_release);
4178 if (!boot_kobj)
4179 goto put_host;
4180
13483730 4181 return QLA_SUCCESS;
2a991c21
MR
4182
4183put_host:
4184 scsi_host_put(ha->host);
4185kset_free:
4186 iscsi_boot_destroy_kset(ha->boot_kset);
4187 return -ENOMEM;
4188}
4189
4549415a
LC
4190
4191/**
4192 * qla4xxx_create chap_list - Create CHAP list from FLASH
4193 * @ha: pointer to adapter structure
4194 *
4195 * Read flash and make a list of CHAP entries, during login when a CHAP entry
4196 * is received, it will be checked in this list. If entry exist then the CHAP
4197 * entry index is set in the DDB. If CHAP entry does not exist in this list
4198 * then a new entry is added in FLASH in CHAP table and the index obtained is
4199 * used in the DDB.
4200 **/
4201static void qla4xxx_create_chap_list(struct scsi_qla_host *ha)
4202{
4203 int rval = 0;
4204 uint8_t *chap_flash_data = NULL;
4205 uint32_t offset;
4206 dma_addr_t chap_dma;
4207 uint32_t chap_size = 0;
4208
4209 if (is_qla40XX(ha))
4210 chap_size = MAX_CHAP_ENTRIES_40XX *
4211 sizeof(struct ql4_chap_table);
4212 else /* Single region contains CHAP info for both
4213 * ports which is divided into half for each port.
4214 */
4215 chap_size = ha->hw.flt_chap_size / 2;
4216
4217 chap_flash_data = dma_alloc_coherent(&ha->pdev->dev, chap_size,
4218 &chap_dma, GFP_KERNEL);
4219 if (!chap_flash_data) {
4220 ql4_printk(KERN_ERR, ha, "No memory for chap_flash_data\n");
4221 return;
4222 }
4223 if (is_qla40XX(ha))
4224 offset = FLASH_CHAP_OFFSET;
4225 else {
4226 offset = FLASH_RAW_ACCESS_ADDR + (ha->hw.flt_region_chap << 2);
4227 if (ha->port_num == 1)
4228 offset += chap_size;
4229 }
4230
4231 rval = qla4xxx_get_flash(ha, chap_dma, offset, chap_size);
4232 if (rval != QLA_SUCCESS)
4233 goto exit_chap_list;
4234
4235 if (ha->chap_list == NULL)
4236 ha->chap_list = vmalloc(chap_size);
4237 if (ha->chap_list == NULL) {
4238 ql4_printk(KERN_ERR, ha, "No memory for ha->chap_list\n");
4239 goto exit_chap_list;
4240 }
4241
4242 memcpy(ha->chap_list, chap_flash_data, chap_size);
4243
4244exit_chap_list:
4245 dma_free_coherent(&ha->pdev->dev, chap_size,
4246 chap_flash_data, chap_dma);
4549415a
LC
4247}
4248
13483730
MC
4249static void qla4xxx_get_param_ddb(struct ddb_entry *ddb_entry,
4250 struct ql4_tuple_ddb *tddb)
4251{
4252 struct scsi_qla_host *ha;
4253 struct iscsi_cls_session *cls_sess;
4254 struct iscsi_cls_conn *cls_conn;
4255 struct iscsi_session *sess;
4256 struct iscsi_conn *conn;
4257
4258 DEBUG2(printk(KERN_INFO "Func: %s\n", __func__));
4259 ha = ddb_entry->ha;
4260 cls_sess = ddb_entry->sess;
4261 sess = cls_sess->dd_data;
4262 cls_conn = ddb_entry->conn;
4263 conn = cls_conn->dd_data;
4264
4265 tddb->tpgt = sess->tpgt;
4266 tddb->port = conn->persistent_port;
4267 strncpy(tddb->iscsi_name, sess->targetname, ISCSI_NAME_SIZE);
4268 strncpy(tddb->ip_addr, conn->persistent_address, DDB_IPADDR_LEN);
4269}
4270
4271static void qla4xxx_convert_param_ddb(struct dev_db_entry *fw_ddb_entry,
4272 struct ql4_tuple_ddb *tddb)
4273{
4274 uint16_t options = 0;
4275
4276 tddb->tpgt = le32_to_cpu(fw_ddb_entry->tgt_portal_grp);
4277 memcpy(&tddb->iscsi_name[0], &fw_ddb_entry->iscsi_name[0],
4278 min(sizeof(tddb->iscsi_name), sizeof(fw_ddb_entry->iscsi_name)));
4279
4280 options = le16_to_cpu(fw_ddb_entry->options);
4281 if (options & DDB_OPT_IPV6_DEVICE)
4282 sprintf(tddb->ip_addr, "%pI6", fw_ddb_entry->ip_addr);
4283 else
4284 sprintf(tddb->ip_addr, "%pI4", fw_ddb_entry->ip_addr);
4285
4286 tddb->port = le16_to_cpu(fw_ddb_entry->port);
173269ef 4287 memcpy(&tddb->isid[0], &fw_ddb_entry->isid[0], sizeof(tddb->isid));
13483730
MC
4288}
4289
4290static int qla4xxx_compare_tuple_ddb(struct scsi_qla_host *ha,
4291 struct ql4_tuple_ddb *old_tddb,
173269ef
MR
4292 struct ql4_tuple_ddb *new_tddb,
4293 uint8_t is_isid_compare)
13483730
MC
4294{
4295 if (strcmp(old_tddb->iscsi_name, new_tddb->iscsi_name))
4296 return QLA_ERROR;
4297
4298 if (strcmp(old_tddb->ip_addr, new_tddb->ip_addr))
4299 return QLA_ERROR;
4300
4301 if (old_tddb->port != new_tddb->port)
4302 return QLA_ERROR;
4303
173269ef
MR
4304 /* For multi sessions, driver generates the ISID, so do not compare
4305 * ISID in reset path since it would be a comparision between the
4306 * driver generated ISID and firmware generated ISID. This could
4307 * lead to adding duplicated DDBs in the list as driver generated
4308 * ISID would not match firmware generated ISID.
4309 */
4310 if (is_isid_compare) {
4311 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: old ISID [%02x%02x%02x"
4312 "%02x%02x%02x] New ISID [%02x%02x%02x%02x%02x%02x]\n",
4313 __func__, old_tddb->isid[5], old_tddb->isid[4],
4314 old_tddb->isid[3], old_tddb->isid[2], old_tddb->isid[1],
4315 old_tddb->isid[0], new_tddb->isid[5], new_tddb->isid[4],
4316 new_tddb->isid[3], new_tddb->isid[2], new_tddb->isid[1],
4317 new_tddb->isid[0]));
4318
4319 if (memcmp(&old_tddb->isid[0], &new_tddb->isid[0],
4320 sizeof(old_tddb->isid)))
4321 return QLA_ERROR;
4322 }
4323
13483730
MC
4324 DEBUG2(ql4_printk(KERN_INFO, ha,
4325 "Match Found, fw[%d,%d,%s,%s], [%d,%d,%s,%s]",
4326 old_tddb->port, old_tddb->tpgt, old_tddb->ip_addr,
4327 old_tddb->iscsi_name, new_tddb->port, new_tddb->tpgt,
4328 new_tddb->ip_addr, new_tddb->iscsi_name));
4329
4330 return QLA_SUCCESS;
4331}
4332
4333static int qla4xxx_is_session_exists(struct scsi_qla_host *ha,
4334 struct dev_db_entry *fw_ddb_entry)
4335{
4336 struct ddb_entry *ddb_entry;
4337 struct ql4_tuple_ddb *fw_tddb = NULL;
4338 struct ql4_tuple_ddb *tmp_tddb = NULL;
4339 int idx;
4340 int ret = QLA_ERROR;
4341
4342 fw_tddb = vzalloc(sizeof(*fw_tddb));
4343 if (!fw_tddb) {
4344 DEBUG2(ql4_printk(KERN_WARNING, ha,
4345 "Memory Allocation failed.\n"));
4346 ret = QLA_SUCCESS;
4347 goto exit_check;
4348 }
4349
4350 tmp_tddb = vzalloc(sizeof(*tmp_tddb));
4351 if (!tmp_tddb) {
4352 DEBUG2(ql4_printk(KERN_WARNING, ha,
4353 "Memory Allocation failed.\n"));
4354 ret = QLA_SUCCESS;
4355 goto exit_check;
4356 }
4357
4358 qla4xxx_convert_param_ddb(fw_ddb_entry, fw_tddb);
4359
4360 for (idx = 0; idx < MAX_DDB_ENTRIES; idx++) {
4361 ddb_entry = qla4xxx_lookup_ddb_by_fw_index(ha, idx);
4362 if (ddb_entry == NULL)
4363 continue;
4364
4365 qla4xxx_get_param_ddb(ddb_entry, tmp_tddb);
173269ef 4366 if (!qla4xxx_compare_tuple_ddb(ha, fw_tddb, tmp_tddb, false)) {
13483730
MC
4367 ret = QLA_SUCCESS; /* found */
4368 goto exit_check;
4369 }
4370 }
4371
4372exit_check:
4373 if (fw_tddb)
4374 vfree(fw_tddb);
4375 if (tmp_tddb)
4376 vfree(tmp_tddb);
4377 return ret;
4378}
4379
4380static int qla4xxx_is_flash_ddb_exists(struct scsi_qla_host *ha,
4381 struct list_head *list_nt,
4382 struct dev_db_entry *fw_ddb_entry)
4383{
4384 struct qla_ddb_index *nt_ddb_idx, *nt_ddb_idx_tmp;
4385 struct ql4_tuple_ddb *fw_tddb = NULL;
4386 struct ql4_tuple_ddb *tmp_tddb = NULL;
4387 int ret = QLA_ERROR;
4388
4389 fw_tddb = vzalloc(sizeof(*fw_tddb));
4390 if (!fw_tddb) {
4391 DEBUG2(ql4_printk(KERN_WARNING, ha,
4392 "Memory Allocation failed.\n"));
4393 ret = QLA_SUCCESS;
4394 goto exit_check;
4395 }
4396
4397 tmp_tddb = vzalloc(sizeof(*tmp_tddb));
4398 if (!tmp_tddb) {
4399 DEBUG2(ql4_printk(KERN_WARNING, ha,
4400 "Memory Allocation failed.\n"));
4401 ret = QLA_SUCCESS;
4402 goto exit_check;
4403 }
4404
4405 qla4xxx_convert_param_ddb(fw_ddb_entry, fw_tddb);
4406
4407 list_for_each_entry_safe(nt_ddb_idx, nt_ddb_idx_tmp, list_nt, list) {
4408 qla4xxx_convert_param_ddb(&nt_ddb_idx->fw_ddb, tmp_tddb);
173269ef 4409 if (!qla4xxx_compare_tuple_ddb(ha, fw_tddb, tmp_tddb, true)) {
13483730
MC
4410 ret = QLA_SUCCESS; /* found */
4411 goto exit_check;
4412 }
4413 }
4414
4415exit_check:
4416 if (fw_tddb)
4417 vfree(fw_tddb);
4418 if (tmp_tddb)
4419 vfree(tmp_tddb);
4420 return ret;
4421}
4422
4a4bc2e9 4423static void qla4xxx_free_ddb_list(struct list_head *list_ddb)
13483730 4424{
4a4bc2e9 4425 struct qla_ddb_index *ddb_idx, *ddb_idx_tmp;
13483730 4426
4a4bc2e9
LC
4427 list_for_each_entry_safe(ddb_idx, ddb_idx_tmp, list_ddb, list) {
4428 list_del_init(&ddb_idx->list);
4429 vfree(ddb_idx);
13483730 4430 }
13483730
MC
4431}
4432
4433static struct iscsi_endpoint *qla4xxx_get_ep_fwdb(struct scsi_qla_host *ha,
4434 struct dev_db_entry *fw_ddb_entry)
4435{
4436 struct iscsi_endpoint *ep;
4437 struct sockaddr_in *addr;
4438 struct sockaddr_in6 *addr6;
4439 struct sockaddr *dst_addr;
4440 char *ip;
4441
4442 /* TODO: need to destroy on unload iscsi_endpoint*/
4443 dst_addr = vmalloc(sizeof(*dst_addr));
4444 if (!dst_addr)
4445 return NULL;
4446
4447 if (fw_ddb_entry->options & DDB_OPT_IPV6_DEVICE) {
4448 dst_addr->sa_family = AF_INET6;
4449 addr6 = (struct sockaddr_in6 *)dst_addr;
4450 ip = (char *)&addr6->sin6_addr;
4451 memcpy(ip, fw_ddb_entry->ip_addr, IPv6_ADDR_LEN);
4452 addr6->sin6_port = htons(le16_to_cpu(fw_ddb_entry->port));
4453
4454 } else {
4455 dst_addr->sa_family = AF_INET;
4456 addr = (struct sockaddr_in *)dst_addr;
4457 ip = (char *)&addr->sin_addr;
4458 memcpy(ip, fw_ddb_entry->ip_addr, IP_ADDR_LEN);
4459 addr->sin_port = htons(le16_to_cpu(fw_ddb_entry->port));
4460 }
4461
4462 ep = qla4xxx_ep_connect(ha->host, dst_addr, 0);
4463 vfree(dst_addr);
4464 return ep;
4465}
4466
4467static int qla4xxx_verify_boot_idx(struct scsi_qla_host *ha, uint16_t idx)
4468{
4469 if (ql4xdisablesysfsboot)
4470 return QLA_SUCCESS;
4471 if (idx == ha->pri_ddb_idx || idx == ha->sec_ddb_idx)
4472 return QLA_ERROR;
4473 return QLA_SUCCESS;
4474}
4475
4476static void qla4xxx_setup_flash_ddb_entry(struct scsi_qla_host *ha,
4477 struct ddb_entry *ddb_entry)
4478{
c28eaaca
NJ
4479 uint16_t def_timeout;
4480
13483730
MC
4481 ddb_entry->ddb_type = FLASH_DDB;
4482 ddb_entry->fw_ddb_index = INVALID_ENTRY;
4483 ddb_entry->fw_ddb_device_state = DDB_DS_NO_CONNECTION_ACTIVE;
4484 ddb_entry->ha = ha;
4485 ddb_entry->unblock_sess = qla4xxx_unblock_flash_ddb;
4486 ddb_entry->ddb_change = qla4xxx_flash_ddb_change;
4487
4488 atomic_set(&ddb_entry->retry_relogin_timer, INVALID_ENTRY);
4489 atomic_set(&ddb_entry->relogin_timer, 0);
4490 atomic_set(&ddb_entry->relogin_retry_count, 0);
c28eaaca 4491 def_timeout = le16_to_cpu(ddb_entry->fw_ddb_entry.def_timeout);
13483730 4492 ddb_entry->default_relogin_timeout =
c28eaaca
NJ
4493 (def_timeout > LOGIN_TOV) && (def_timeout < LOGIN_TOV * 10) ?
4494 def_timeout : LOGIN_TOV;
13483730
MC
4495 ddb_entry->default_time2wait =
4496 le16_to_cpu(ddb_entry->fw_ddb_entry.iscsi_def_time2wait);
4497}
4498
4499static void qla4xxx_wait_for_ip_configuration(struct scsi_qla_host *ha)
4500{
4501 uint32_t idx = 0;
4502 uint32_t ip_idx[IP_ADDR_COUNT] = {0, 1, 2, 3}; /* 4 IP interfaces */
4503 uint32_t sts[MBOX_REG_COUNT];
4504 uint32_t ip_state;
4505 unsigned long wtime;
4506 int ret;
4507
4508 wtime = jiffies + (HZ * IP_CONFIG_TOV);
4509 do {
4510 for (idx = 0; idx < IP_ADDR_COUNT; idx++) {
4511 if (ip_idx[idx] == -1)
4512 continue;
4513
4514 ret = qla4xxx_get_ip_state(ha, 0, ip_idx[idx], sts);
4515
4516 if (ret == QLA_ERROR) {
4517 ip_idx[idx] = -1;
4518 continue;
4519 }
4520
4521 ip_state = (sts[1] & IP_STATE_MASK) >> IP_STATE_SHIFT;
4522
4523 DEBUG2(ql4_printk(KERN_INFO, ha,
4524 "Waiting for IP state for idx = %d, state = 0x%x\n",
4525 ip_idx[idx], ip_state));
4526 if (ip_state == IP_ADDRSTATE_UNCONFIGURED ||
4527 ip_state == IP_ADDRSTATE_INVALID ||
4528 ip_state == IP_ADDRSTATE_PREFERRED ||
4529 ip_state == IP_ADDRSTATE_DEPRICATED ||
4530 ip_state == IP_ADDRSTATE_DISABLING)
4531 ip_idx[idx] = -1;
13483730
MC
4532 }
4533
4534 /* Break if all IP states checked */
4535 if ((ip_idx[0] == -1) &&
4536 (ip_idx[1] == -1) &&
4537 (ip_idx[2] == -1) &&
4538 (ip_idx[3] == -1))
4539 break;
4540 schedule_timeout_uninterruptible(HZ);
4541 } while (time_after(wtime, jiffies));
4542}
4543
4a4bc2e9
LC
4544static void qla4xxx_build_st_list(struct scsi_qla_host *ha,
4545 struct list_head *list_st)
13483730 4546{
4a4bc2e9 4547 struct qla_ddb_index *st_ddb_idx;
13483730 4548 int max_ddbs;
4a4bc2e9
LC
4549 int fw_idx_size;
4550 struct dev_db_entry *fw_ddb_entry;
4551 dma_addr_t fw_ddb_dma;
13483730
MC
4552 int ret;
4553 uint32_t idx = 0, next_idx = 0;
4554 uint32_t state = 0, conn_err = 0;
4a4bc2e9 4555 uint16_t conn_id = 0;
13483730
MC
4556
4557 fw_ddb_entry = dma_pool_alloc(ha->fw_ddb_dma_pool, GFP_KERNEL,
4558 &fw_ddb_dma);
4559 if (fw_ddb_entry == NULL) {
4560 DEBUG2(ql4_printk(KERN_ERR, ha, "Out of memory\n"));
4a4bc2e9 4561 goto exit_st_list;
13483730
MC
4562 }
4563
4a4bc2e9
LC
4564 max_ddbs = is_qla40XX(ha) ? MAX_DEV_DB_ENTRIES_40XX :
4565 MAX_DEV_DB_ENTRIES;
13483730
MC
4566 fw_idx_size = sizeof(struct qla_ddb_index);
4567
4568 for (idx = 0; idx < max_ddbs; idx = next_idx) {
4a4bc2e9
LC
4569 ret = qla4xxx_get_fwddb_entry(ha, idx, fw_ddb_entry, fw_ddb_dma,
4570 NULL, &next_idx, &state,
4571 &conn_err, NULL, &conn_id);
13483730
MC
4572 if (ret == QLA_ERROR)
4573 break;
4574
981c982c
LC
4575 /* Ignore DDB if invalid state (unassigned) */
4576 if (state == DDB_DS_UNASSIGNED)
4577 goto continue_next_st;
4578
13483730
MC
4579 /* Check if ST, add to the list_st */
4580 if (strlen((char *) fw_ddb_entry->iscsi_name) != 0)
4581 goto continue_next_st;
4582
4583 st_ddb_idx = vzalloc(fw_idx_size);
4584 if (!st_ddb_idx)
4585 break;
4586
4587 st_ddb_idx->fw_ddb_idx = idx;
4588
4a4bc2e9 4589 list_add_tail(&st_ddb_idx->list, list_st);
13483730
MC
4590continue_next_st:
4591 if (next_idx == 0)
4592 break;
4593 }
4594
4a4bc2e9
LC
4595exit_st_list:
4596 if (fw_ddb_entry)
4597 dma_pool_free(ha->fw_ddb_dma_pool, fw_ddb_entry, fw_ddb_dma);
4598}
4599
4600/**
4601 * qla4xxx_remove_failed_ddb - Remove inactive or failed ddb from list
4602 * @ha: pointer to adapter structure
4603 * @list_ddb: List from which failed ddb to be removed
4604 *
4605 * Iterate over the list of DDBs and find and remove DDBs that are either in
4606 * no connection active state or failed state
4607 **/
4608static void qla4xxx_remove_failed_ddb(struct scsi_qla_host *ha,
4609 struct list_head *list_ddb)
4610{
4611 struct qla_ddb_index *ddb_idx, *ddb_idx_tmp;
4612 uint32_t next_idx = 0;
4613 uint32_t state = 0, conn_err = 0;
4614 int ret;
4615
4616 list_for_each_entry_safe(ddb_idx, ddb_idx_tmp, list_ddb, list) {
4617 ret = qla4xxx_get_fwddb_entry(ha, ddb_idx->fw_ddb_idx,
4618 NULL, 0, NULL, &next_idx, &state,
4619 &conn_err, NULL, NULL);
4620 if (ret == QLA_ERROR)
4621 continue;
4622
4623 if (state == DDB_DS_NO_CONNECTION_ACTIVE ||
4624 state == DDB_DS_SESSION_FAILED) {
4625 list_del_init(&ddb_idx->list);
4626 vfree(ddb_idx);
4627 }
4628 }
4629}
4630
4631static int qla4xxx_sess_conn_setup(struct scsi_qla_host *ha,
4632 struct dev_db_entry *fw_ddb_entry,
4633 int is_reset)
4634{
4635 struct iscsi_cls_session *cls_sess;
4636 struct iscsi_session *sess;
4637 struct iscsi_cls_conn *cls_conn;
4638 struct iscsi_endpoint *ep;
4639 uint16_t cmds_max = 32;
4640 uint16_t conn_id = 0;
4641 uint32_t initial_cmdsn = 0;
4642 int ret = QLA_SUCCESS;
4643
4644 struct ddb_entry *ddb_entry = NULL;
4645
4646 /* Create session object, with INVALID_ENTRY,
4647 * the targer_id would get set when we issue the login
13483730 4648 */
4a4bc2e9
LC
4649 cls_sess = iscsi_session_setup(&qla4xxx_iscsi_transport, ha->host,
4650 cmds_max, sizeof(struct ddb_entry),
4651 sizeof(struct ql4_task_data),
4652 initial_cmdsn, INVALID_ENTRY);
4653 if (!cls_sess) {
4654 ret = QLA_ERROR;
4655 goto exit_setup;
4656 }
13483730 4657
4a4bc2e9
LC
4658 /*
4659 * so calling module_put function to decrement the
4660 * reference count.
4661 **/
4662 module_put(qla4xxx_iscsi_transport.owner);
4663 sess = cls_sess->dd_data;
4664 ddb_entry = sess->dd_data;
4665 ddb_entry->sess = cls_sess;
4666
4667 cls_sess->recovery_tmo = ql4xsess_recovery_tmo;
4668 memcpy(&ddb_entry->fw_ddb_entry, fw_ddb_entry,
4669 sizeof(struct dev_db_entry));
4670
4671 qla4xxx_setup_flash_ddb_entry(ha, ddb_entry);
4672
4673 cls_conn = iscsi_conn_setup(cls_sess, sizeof(struct qla_conn), conn_id);
4674
4675 if (!cls_conn) {
4676 ret = QLA_ERROR;
4677 goto exit_setup;
13483730
MC
4678 }
4679
4a4bc2e9 4680 ddb_entry->conn = cls_conn;
13483730 4681
4a4bc2e9
LC
4682 /* Setup ep, for displaying attributes in sysfs */
4683 ep = qla4xxx_get_ep_fwdb(ha, fw_ddb_entry);
4684 if (ep) {
4685 ep->conn = cls_conn;
4686 cls_conn->ep = ep;
4687 } else {
4688 DEBUG2(ql4_printk(KERN_ERR, ha, "Unable to get ep\n"));
4689 ret = QLA_ERROR;
4690 goto exit_setup;
4691 }
13483730 4692
4a4bc2e9
LC
4693 /* Update sess/conn params */
4694 qla4xxx_copy_fwddb_param(ha, fw_ddb_entry, cls_sess, cls_conn);
13483730 4695
4a4bc2e9
LC
4696 if (is_reset == RESET_ADAPTER) {
4697 iscsi_block_session(cls_sess);
4698 /* Use the relogin path to discover new devices
4699 * by short-circuting the logic of setting
4700 * timer to relogin - instead set the flags
4701 * to initiate login right away.
4702 */
4703 set_bit(DPC_RELOGIN_DEVICE, &ha->dpc_flags);
4704 set_bit(DF_RELOGIN, &ddb_entry->flags);
4705 }
4706
4707exit_setup:
4708 return ret;
4709}
4710
4711static void qla4xxx_build_nt_list(struct scsi_qla_host *ha,
4712 struct list_head *list_nt, int is_reset)
4713{
4714 struct dev_db_entry *fw_ddb_entry;
4715 dma_addr_t fw_ddb_dma;
4716 int max_ddbs;
4717 int fw_idx_size;
4718 int ret;
4719 uint32_t idx = 0, next_idx = 0;
4720 uint32_t state = 0, conn_err = 0;
4721 uint16_t conn_id = 0;
4722 struct qla_ddb_index *nt_ddb_idx;
4723
4724 fw_ddb_entry = dma_pool_alloc(ha->fw_ddb_dma_pool, GFP_KERNEL,
4725 &fw_ddb_dma);
4726 if (fw_ddb_entry == NULL) {
4727 DEBUG2(ql4_printk(KERN_ERR, ha, "Out of memory\n"));
4728 goto exit_nt_list;
13483730 4729 }
4a4bc2e9
LC
4730 max_ddbs = is_qla40XX(ha) ? MAX_DEV_DB_ENTRIES_40XX :
4731 MAX_DEV_DB_ENTRIES;
4732 fw_idx_size = sizeof(struct qla_ddb_index);
13483730
MC
4733
4734 for (idx = 0; idx < max_ddbs; idx = next_idx) {
4a4bc2e9
LC
4735 ret = qla4xxx_get_fwddb_entry(ha, idx, fw_ddb_entry, fw_ddb_dma,
4736 NULL, &next_idx, &state,
4737 &conn_err, NULL, &conn_id);
13483730
MC
4738 if (ret == QLA_ERROR)
4739 break;
4740
4741 if (qla4xxx_verify_boot_idx(ha, idx) != QLA_SUCCESS)
4742 goto continue_next_nt;
4743
4744 /* Check if NT, then add to list it */
4745 if (strlen((char *) fw_ddb_entry->iscsi_name) == 0)
4746 goto continue_next_nt;
4747
4a4bc2e9
LC
4748 if (!(state == DDB_DS_NO_CONNECTION_ACTIVE ||
4749 state == DDB_DS_SESSION_FAILED))
4750 goto continue_next_nt;
13483730 4751
4a4bc2e9
LC
4752 DEBUG2(ql4_printk(KERN_INFO, ha,
4753 "Adding DDB to session = 0x%x\n", idx));
4754 if (is_reset == INIT_ADAPTER) {
4755 nt_ddb_idx = vmalloc(fw_idx_size);
4756 if (!nt_ddb_idx)
4757 break;
13483730 4758
4a4bc2e9 4759 nt_ddb_idx->fw_ddb_idx = idx;
13483730 4760
4a4bc2e9 4761 memcpy(&nt_ddb_idx->fw_ddb, fw_ddb_entry,
13483730
MC
4762 sizeof(struct dev_db_entry));
4763
4a4bc2e9
LC
4764 if (qla4xxx_is_flash_ddb_exists(ha, list_nt,
4765 fw_ddb_entry) == QLA_SUCCESS) {
4766 vfree(nt_ddb_idx);
4767 goto continue_next_nt;
13483730 4768 }
4a4bc2e9
LC
4769 list_add_tail(&nt_ddb_idx->list, list_nt);
4770 } else if (is_reset == RESET_ADAPTER) {
4771 if (qla4xxx_is_session_exists(ha, fw_ddb_entry) ==
4772 QLA_SUCCESS)
4773 goto continue_next_nt;
13483730 4774 }
4a4bc2e9
LC
4775
4776 ret = qla4xxx_sess_conn_setup(ha, fw_ddb_entry, is_reset);
4777 if (ret == QLA_ERROR)
4778 goto exit_nt_list;
4779
13483730
MC
4780continue_next_nt:
4781 if (next_idx == 0)
4782 break;
4783 }
4a4bc2e9
LC
4784
4785exit_nt_list:
13483730
MC
4786 if (fw_ddb_entry)
4787 dma_pool_free(ha->fw_ddb_dma_pool, fw_ddb_entry, fw_ddb_dma);
4a4bc2e9
LC
4788}
4789
4790/**
4791 * qla4xxx_build_ddb_list - Build ddb list and setup sessions
4792 * @ha: pointer to adapter structure
4793 * @is_reset: Is this init path or reset path
4794 *
4795 * Create a list of sendtargets (st) from firmware DDBs, issue send targets
4796 * using connection open, then create the list of normal targets (nt)
4797 * from firmware DDBs. Based on the list of nt setup session and connection
4798 * objects.
4799 **/
4800void qla4xxx_build_ddb_list(struct scsi_qla_host *ha, int is_reset)
4801{
4802 uint16_t tmo = 0;
4803 struct list_head list_st, list_nt;
4804 struct qla_ddb_index *st_ddb_idx, *st_ddb_idx_tmp;
4805 unsigned long wtime;
4806
4807 if (!test_bit(AF_LINK_UP, &ha->flags)) {
4808 set_bit(AF_BUILD_DDB_LIST, &ha->flags);
4809 ha->is_reset = is_reset;
4810 return;
4811 }
4812
4813 INIT_LIST_HEAD(&list_st);
4814 INIT_LIST_HEAD(&list_nt);
4815
4816 qla4xxx_build_st_list(ha, &list_st);
4817
4818 /* Before issuing conn open mbox, ensure all IPs states are configured
4819 * Note, conn open fails if IPs are not configured
4820 */
4821 qla4xxx_wait_for_ip_configuration(ha);
4822
4823 /* Go thru the STs and fire the sendtargets by issuing conn open mbx */
4824 list_for_each_entry_safe(st_ddb_idx, st_ddb_idx_tmp, &list_st, list) {
4825 qla4xxx_conn_open(ha, st_ddb_idx->fw_ddb_idx);
4826 }
4827
4828 /* Wait to ensure all sendtargets are done for min 12 sec wait */
c28eaaca
NJ
4829 tmo = ((ha->def_timeout > LOGIN_TOV) &&
4830 (ha->def_timeout < LOGIN_TOV * 10) ?
4831 ha->def_timeout : LOGIN_TOV);
4832
4a4bc2e9
LC
4833 DEBUG2(ql4_printk(KERN_INFO, ha,
4834 "Default time to wait for build ddb %d\n", tmo));
4835
4836 wtime = jiffies + (HZ * tmo);
4837 do {
f1f2e60e
NJ
4838 if (list_empty(&list_st))
4839 break;
4840
4a4bc2e9
LC
4841 qla4xxx_remove_failed_ddb(ha, &list_st);
4842 schedule_timeout_uninterruptible(HZ / 10);
4843 } while (time_after(wtime, jiffies));
4844
4845 /* Free up the sendtargets list */
4846 qla4xxx_free_ddb_list(&list_st);
4847
4848 qla4xxx_build_nt_list(ha, &list_nt, is_reset);
4849
4850 qla4xxx_free_ddb_list(&list_nt);
13483730
MC
4851
4852 qla4xxx_free_ddb_index(ha);
4853}
4854
afaf5a2d
DS
4855/**
4856 * qla4xxx_probe_adapter - callback function to probe HBA
4857 * @pdev: pointer to pci_dev structure
4858 * @pci_device_id: pointer to pci_device entry
4859 *
4860 * This routine will probe for Qlogic 4xxx iSCSI host adapters.
4861 * It returns zero if successful. It also initializes all data necessary for
4862 * the driver.
4863 **/
4864static int __devinit qla4xxx_probe_adapter(struct pci_dev *pdev,
4865 const struct pci_device_id *ent)
4866{
4867 int ret = -ENODEV, status;
4868 struct Scsi_Host *host;
4869 struct scsi_qla_host *ha;
afaf5a2d
DS
4870 uint8_t init_retry_count = 0;
4871 char buf[34];
f4f5df23 4872 struct qla4_8xxx_legacy_intr_set *nx_legacy_intr;
f9880e76 4873 uint32_t dev_state;
afaf5a2d
DS
4874
4875 if (pci_enable_device(pdev))
4876 return -1;
4877
b3a271a9 4878 host = iscsi_host_alloc(&qla4xxx_driver_template, sizeof(*ha), 0);
afaf5a2d
DS
4879 if (host == NULL) {
4880 printk(KERN_WARNING
4881 "qla4xxx: Couldn't allocate host from scsi layer!\n");
4882 goto probe_disable_device;
4883 }
4884
4885 /* Clear our data area */
b3a271a9 4886 ha = to_qla_host(host);
afaf5a2d
DS
4887 memset(ha, 0, sizeof(*ha));
4888
4889 /* Save the information from PCI BIOS. */
4890 ha->pdev = pdev;
4891 ha->host = host;
4892 ha->host_no = host->host_no;
4893
2232be0d
LC
4894 pci_enable_pcie_error_reporting(pdev);
4895
f4f5df23
VC
4896 /* Setup Runtime configurable options */
4897 if (is_qla8022(ha)) {
4898 ha->isp_ops = &qla4_8xxx_isp_ops;
4899 rwlock_init(&ha->hw_lock);
4900 ha->qdr_sn_window = -1;
4901 ha->ddr_mn_window = -1;
4902 ha->curr_window = 255;
4903 ha->func_num = PCI_FUNC(ha->pdev->devfn);
4904 nx_legacy_intr = &legacy_intr[ha->func_num];
4905 ha->nx_legacy_intr.int_vec_bit = nx_legacy_intr->int_vec_bit;
4906 ha->nx_legacy_intr.tgt_status_reg =
4907 nx_legacy_intr->tgt_status_reg;
4908 ha->nx_legacy_intr.tgt_mask_reg = nx_legacy_intr->tgt_mask_reg;
4909 ha->nx_legacy_intr.pci_int_reg = nx_legacy_intr->pci_int_reg;
4910 } else {
4911 ha->isp_ops = &qla4xxx_isp_ops;
4912 }
4913
2232be0d
LC
4914 /* Set EEH reset type to fundamental if required by hba */
4915 if (is_qla8022(ha))
4916 pdev->needs_freset = 1;
4917
afaf5a2d 4918 /* Configure PCI I/O space. */
f4f5df23 4919 ret = ha->isp_ops->iospace_config(ha);
afaf5a2d 4920 if (ret)
f4f5df23 4921 goto probe_failed_ioconfig;
afaf5a2d 4922
c2660df3 4923 ql4_printk(KERN_INFO, ha, "Found an ISP%04x, irq %d, iobase 0x%p\n",
afaf5a2d
DS
4924 pdev->device, pdev->irq, ha->reg);
4925
4926 qla4xxx_config_dma_addressing(ha);
4927
4928 /* Initialize lists and spinlocks. */
afaf5a2d
DS
4929 INIT_LIST_HEAD(&ha->free_srb_q);
4930
4931 mutex_init(&ha->mbox_sem);
4549415a 4932 mutex_init(&ha->chap_sem);
f4f5df23 4933 init_completion(&ha->mbx_intr_comp);
95d31262 4934 init_completion(&ha->disable_acb_comp);
afaf5a2d
DS
4935
4936 spin_lock_init(&ha->hardware_lock);
afaf5a2d 4937
ff884430
VC
4938 /* Initialize work list */
4939 INIT_LIST_HEAD(&ha->work_list);
4940
afaf5a2d
DS
4941 /* Allocate dma buffers */
4942 if (qla4xxx_mem_alloc(ha)) {
c2660df3
VC
4943 ql4_printk(KERN_WARNING, ha,
4944 "[ERROR] Failed to allocate memory for adapter\n");
afaf5a2d
DS
4945
4946 ret = -ENOMEM;
4947 goto probe_failed;
4948 }
4949
b3a271a9
MR
4950 host->cmd_per_lun = 3;
4951 host->max_channel = 0;
4952 host->max_lun = MAX_LUNS - 1;
4953 host->max_id = MAX_TARGETS;
4954 host->max_cmd_len = IOCB_MAX_CDB_LEN;
4955 host->can_queue = MAX_SRBS ;
4956 host->transportt = qla4xxx_scsi_transport;
4957
4958 ret = scsi_init_shared_tag_map(host, MAX_SRBS);
4959 if (ret) {
4960 ql4_printk(KERN_WARNING, ha,
4961 "%s: scsi_init_shared_tag_map failed\n", __func__);
4962 goto probe_failed;
4963 }
4964
4965 pci_set_drvdata(pdev, ha);
4966
4967 ret = scsi_add_host(host, &pdev->dev);
4968 if (ret)
4969 goto probe_failed;
4970
f4f5df23
VC
4971 if (is_qla8022(ha))
4972 (void) qla4_8xxx_get_flash_info(ha);
4973
afaf5a2d
DS
4974 /*
4975 * Initialize the Host adapter request/response queues and
4976 * firmware
4977 * NOTE: interrupts enabled upon successful completion
4978 */
13483730 4979 status = qla4xxx_initialize_adapter(ha, INIT_ADAPTER);
f4f5df23
VC
4980 while ((!test_bit(AF_ONLINE, &ha->flags)) &&
4981 init_retry_count++ < MAX_INIT_RETRIES) {
f9880e76
PM
4982
4983 if (is_qla8022(ha)) {
4984 qla4_8xxx_idc_lock(ha);
4985 dev_state = qla4_8xxx_rd_32(ha, QLA82XX_CRB_DEV_STATE);
4986 qla4_8xxx_idc_unlock(ha);
4987 if (dev_state == QLA82XX_DEV_FAILED) {
4988 ql4_printk(KERN_WARNING, ha, "%s: don't retry "
4989 "initialize adapter. H/W is in failed state\n",
4990 __func__);
4991 break;
4992 }
4993 }
afaf5a2d
DS
4994 DEBUG2(printk("scsi: %s: retrying adapter initialization "
4995 "(%d)\n", __func__, init_retry_count));
f4f5df23
VC
4996
4997 if (ha->isp_ops->reset_chip(ha) == QLA_ERROR)
4998 continue;
4999
13483730 5000 status = qla4xxx_initialize_adapter(ha, INIT_ADAPTER);
afaf5a2d 5001 }
f4f5df23
VC
5002
5003 if (!test_bit(AF_ONLINE, &ha->flags)) {
c2660df3 5004 ql4_printk(KERN_WARNING, ha, "Failed to initialize adapter\n");
afaf5a2d 5005
fe998527
LC
5006 if (is_qla8022(ha) && ql4xdontresethba) {
5007 /* Put the device in failed state. */
5008 DEBUG2(printk(KERN_ERR "HW STATE: FAILED\n"));
5009 qla4_8xxx_idc_lock(ha);
5010 qla4_8xxx_wr_32(ha, QLA82XX_CRB_DEV_STATE,
5011 QLA82XX_DEV_FAILED);
5012 qla4_8xxx_idc_unlock(ha);
5013 }
afaf5a2d 5014 ret = -ENODEV;
b3a271a9 5015 goto remove_host;
afaf5a2d
DS
5016 }
5017
afaf5a2d
DS
5018 /* Startup the kernel thread for this host adapter. */
5019 DEBUG2(printk("scsi: %s: Starting kernel thread for "
5020 "qla4xxx_dpc\n", __func__));
5021 sprintf(buf, "qla4xxx_%lu_dpc", ha->host_no);
5022 ha->dpc_thread = create_singlethread_workqueue(buf);
5023 if (!ha->dpc_thread) {
c2660df3 5024 ql4_printk(KERN_WARNING, ha, "Unable to start DPC thread!\n");
afaf5a2d 5025 ret = -ENODEV;
b3a271a9 5026 goto remove_host;
afaf5a2d 5027 }
c4028958 5028 INIT_WORK(&ha->dpc_work, qla4xxx_do_dpc);
afaf5a2d 5029
b3a271a9
MR
5030 sprintf(buf, "qla4xxx_%lu_task", ha->host_no);
5031 ha->task_wq = alloc_workqueue(buf, WQ_MEM_RECLAIM, 1);
5032 if (!ha->task_wq) {
5033 ql4_printk(KERN_WARNING, ha, "Unable to start task thread!\n");
5034 ret = -ENODEV;
5035 goto remove_host;
5036 }
5037
f4f5df23
VC
5038 /* For ISP-82XX, request_irqs is called in qla4_8xxx_load_risc
5039 * (which is called indirectly by qla4xxx_initialize_adapter),
5040 * so that irqs will be registered after crbinit but before
5041 * mbx_intr_enable.
5042 */
5043 if (!is_qla8022(ha)) {
5044 ret = qla4xxx_request_irqs(ha);
5045 if (ret) {
5046 ql4_printk(KERN_WARNING, ha, "Failed to reserve "
5047 "interrupt %d already in use.\n", pdev->irq);
b3a271a9 5048 goto remove_host;
f4f5df23 5049 }
afaf5a2d 5050 }
afaf5a2d 5051
2232be0d 5052 pci_save_state(ha->pdev);
f4f5df23 5053 ha->isp_ops->enable_intrs(ha);
afaf5a2d
DS
5054
5055 /* Start timer thread. */
5056 qla4xxx_start_timer(ha, qla4xxx_timer, 1);
5057
5058 set_bit(AF_INIT_DONE, &ha->flags);
5059
afaf5a2d
DS
5060 printk(KERN_INFO
5061 " QLogic iSCSI HBA Driver version: %s\n"
5062 " QLogic ISP%04x @ %s, host#=%ld, fw=%02d.%02d.%02d.%02d\n",
5063 qla4xxx_version_str, ha->pdev->device, pci_name(ha->pdev),
5064 ha->host_no, ha->firmware_version[0], ha->firmware_version[1],
5065 ha->patch_number, ha->build_number);
ed1086e0 5066
2a991c21 5067 if (qla4xxx_setup_boot_info(ha))
3573bfb2
VC
5068 ql4_printk(KERN_ERR, ha,
5069 "%s: No iSCSI boot target configured\n", __func__);
2a991c21 5070
13483730
MC
5071 /* Perform the build ddb list and login to each */
5072 qla4xxx_build_ddb_list(ha, INIT_ADAPTER);
5073 iscsi_host_for_each_session(ha->host, qla4xxx_login_flash_ddb);
5074
5075 qla4xxx_create_chap_list(ha);
5076
ed1086e0 5077 qla4xxx_create_ifaces(ha);
afaf5a2d
DS
5078 return 0;
5079
b3a271a9
MR
5080remove_host:
5081 scsi_remove_host(ha->host);
5082
afaf5a2d
DS
5083probe_failed:
5084 qla4xxx_free_adapter(ha);
f4f5df23
VC
5085
5086probe_failed_ioconfig:
2232be0d 5087 pci_disable_pcie_error_reporting(pdev);
afaf5a2d
DS
5088 scsi_host_put(ha->host);
5089
5090probe_disable_device:
5091 pci_disable_device(pdev);
5092
5093 return ret;
5094}
5095
7eece5a0
KH
5096/**
5097 * qla4xxx_prevent_other_port_reinit - prevent other port from re-initialize
5098 * @ha: pointer to adapter structure
5099 *
5100 * Mark the other ISP-4xxx port to indicate that the driver is being removed,
5101 * so that the other port will not re-initialize while in the process of
5102 * removing the ha due to driver unload or hba hotplug.
5103 **/
5104static void qla4xxx_prevent_other_port_reinit(struct scsi_qla_host *ha)
5105{
5106 struct scsi_qla_host *other_ha = NULL;
5107 struct pci_dev *other_pdev = NULL;
5108 int fn = ISP4XXX_PCI_FN_2;
5109
5110 /*iscsi function numbers for ISP4xxx is 1 and 3*/
5111 if (PCI_FUNC(ha->pdev->devfn) & BIT_1)
5112 fn = ISP4XXX_PCI_FN_1;
5113
5114 other_pdev =
5115 pci_get_domain_bus_and_slot(pci_domain_nr(ha->pdev->bus),
5116 ha->pdev->bus->number, PCI_DEVFN(PCI_SLOT(ha->pdev->devfn),
5117 fn));
5118
5119 /* Get other_ha if other_pdev is valid and state is enable*/
5120 if (other_pdev) {
5121 if (atomic_read(&other_pdev->enable_cnt)) {
5122 other_ha = pci_get_drvdata(other_pdev);
5123 if (other_ha) {
5124 set_bit(AF_HA_REMOVAL, &other_ha->flags);
5125 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: "
5126 "Prevent %s reinit\n", __func__,
5127 dev_name(&other_ha->pdev->dev)));
5128 }
5129 }
5130 pci_dev_put(other_pdev);
5131 }
5132}
5133
13483730
MC
5134static void qla4xxx_destroy_fw_ddb_session(struct scsi_qla_host *ha)
5135{
5136 struct ddb_entry *ddb_entry;
5137 int options;
5138 int idx;
5139
5140 for (idx = 0; idx < MAX_DDB_ENTRIES; idx++) {
5141
5142 ddb_entry = qla4xxx_lookup_ddb_by_fw_index(ha, idx);
5143 if ((ddb_entry != NULL) &&
5144 (ddb_entry->ddb_type == FLASH_DDB)) {
5145
5146 options = LOGOUT_OPTION_CLOSE_SESSION;
5147 if (qla4xxx_session_logout_ddb(ha, ddb_entry, options)
5148 == QLA_ERROR)
5149 ql4_printk(KERN_ERR, ha, "%s: Logout failed\n",
5150 __func__);
5151
5152 qla4xxx_clear_ddb_entry(ha, ddb_entry->fw_ddb_index);
5153 /*
5154 * we have decremented the reference count of the driver
5155 * when we setup the session to have the driver unload
5156 * to be seamless without actually destroying the
5157 * session
5158 **/
5159 try_module_get(qla4xxx_iscsi_transport.owner);
5160 iscsi_destroy_endpoint(ddb_entry->conn->ep);
5161 qla4xxx_free_ddb(ha, ddb_entry);
5162 iscsi_session_teardown(ddb_entry->sess);
5163 }
5164 }
5165}
afaf5a2d
DS
5166/**
5167 * qla4xxx_remove_adapter - calback function to remove adapter.
5168 * @pci_dev: PCI device pointer
5169 **/
5170static void __devexit qla4xxx_remove_adapter(struct pci_dev *pdev)
5171{
5172 struct scsi_qla_host *ha;
5173
5174 ha = pci_get_drvdata(pdev);
5175
7eece5a0
KH
5176 if (!is_qla8022(ha))
5177 qla4xxx_prevent_other_port_reinit(ha);
bee4fe8e 5178
ed1086e0
VC
5179 /* destroy iface from sysfs */
5180 qla4xxx_destroy_ifaces(ha);
5181
13483730 5182 if ((!ql4xdisablesysfsboot) && ha->boot_kset)
2a991c21
MR
5183 iscsi_boot_destroy_kset(ha->boot_kset);
5184
13483730
MC
5185 qla4xxx_destroy_fw_ddb_session(ha);
5186
afaf5a2d
DS
5187 scsi_remove_host(ha->host);
5188
5189 qla4xxx_free_adapter(ha);
5190
5191 scsi_host_put(ha->host);
5192
2232be0d 5193 pci_disable_pcie_error_reporting(pdev);
f4f5df23 5194 pci_disable_device(pdev);
afaf5a2d
DS
5195 pci_set_drvdata(pdev, NULL);
5196}
5197
5198/**
5199 * qla4xxx_config_dma_addressing() - Configure OS DMA addressing method.
5200 * @ha: HA context
5201 *
5202 * At exit, the @ha's flags.enable_64bit_addressing set to indicated
5203 * supported addressing method.
5204 */
47975477 5205static void qla4xxx_config_dma_addressing(struct scsi_qla_host *ha)
afaf5a2d
DS
5206{
5207 int retval;
5208
5209 /* Update our PCI device dma_mask for full 64 bit mask */
6a35528a
YH
5210 if (pci_set_dma_mask(ha->pdev, DMA_BIT_MASK(64)) == 0) {
5211 if (pci_set_consistent_dma_mask(ha->pdev, DMA_BIT_MASK(64))) {
afaf5a2d
DS
5212 dev_dbg(&ha->pdev->dev,
5213 "Failed to set 64 bit PCI consistent mask; "
5214 "using 32 bit.\n");
5215 retval = pci_set_consistent_dma_mask(ha->pdev,
284901a9 5216 DMA_BIT_MASK(32));
afaf5a2d
DS
5217 }
5218 } else
284901a9 5219 retval = pci_set_dma_mask(ha->pdev, DMA_BIT_MASK(32));
afaf5a2d
DS
5220}
5221
5222static int qla4xxx_slave_alloc(struct scsi_device *sdev)
5223{
b3a271a9
MR
5224 struct iscsi_cls_session *cls_sess;
5225 struct iscsi_session *sess;
5226 struct ddb_entry *ddb;
8bb4033d 5227 int queue_depth = QL4_DEF_QDEPTH;
afaf5a2d 5228
b3a271a9
MR
5229 cls_sess = starget_to_session(sdev->sdev_target);
5230 sess = cls_sess->dd_data;
5231 ddb = sess->dd_data;
5232
afaf5a2d
DS
5233 sdev->hostdata = ddb;
5234 sdev->tagged_supported = 1;
8bb4033d
VC
5235
5236 if (ql4xmaxqdepth != 0 && ql4xmaxqdepth <= 0xffffU)
5237 queue_depth = ql4xmaxqdepth;
5238
5239 scsi_activate_tcq(sdev, queue_depth);
afaf5a2d
DS
5240 return 0;
5241}
5242
5243static int qla4xxx_slave_configure(struct scsi_device *sdev)
5244{
5245 sdev->tagged_supported = 1;
5246 return 0;
5247}
5248
5249static void qla4xxx_slave_destroy(struct scsi_device *sdev)
5250{
5251 scsi_deactivate_tcq(sdev, 1);
5252}
5253
5254/**
5255 * qla4xxx_del_from_active_array - returns an active srb
5256 * @ha: Pointer to host adapter structure.
fd589a8f 5257 * @index: index into the active_array
afaf5a2d
DS
5258 *
5259 * This routine removes and returns the srb at the specified index
5260 **/
f4f5df23
VC
5261struct srb *qla4xxx_del_from_active_array(struct scsi_qla_host *ha,
5262 uint32_t index)
afaf5a2d
DS
5263{
5264 struct srb *srb = NULL;
5369887a 5265 struct scsi_cmnd *cmd = NULL;
afaf5a2d 5266
5369887a
VC
5267 cmd = scsi_host_find_tag(ha->host, index);
5268 if (!cmd)
afaf5a2d
DS
5269 return srb;
5270
5369887a
VC
5271 srb = (struct srb *)CMD_SP(cmd);
5272 if (!srb)
afaf5a2d
DS
5273 return srb;
5274
5275 /* update counters */
5276 if (srb->flags & SRB_DMA_VALID) {
5277 ha->req_q_count += srb->iocb_cnt;
5278 ha->iocb_cnt -= srb->iocb_cnt;
5279 if (srb->cmd)
5369887a
VC
5280 srb->cmd->host_scribble =
5281 (unsigned char *)(unsigned long) MAX_SRBS;
afaf5a2d
DS
5282 }
5283 return srb;
5284}
5285
afaf5a2d
DS
5286/**
5287 * qla4xxx_eh_wait_on_command - waits for command to be returned by firmware
09a0f719 5288 * @ha: Pointer to host adapter structure.
afaf5a2d
DS
5289 * @cmd: Scsi Command to wait on.
5290 *
5291 * This routine waits for the command to be returned by the Firmware
5292 * for some max time.
5293 **/
5294static int qla4xxx_eh_wait_on_command(struct scsi_qla_host *ha,
5295 struct scsi_cmnd *cmd)
5296{
5297 int done = 0;
5298 struct srb *rp;
5299 uint32_t max_wait_time = EH_WAIT_CMD_TOV;
2232be0d
LC
5300 int ret = SUCCESS;
5301
5302 /* Dont wait on command if PCI error is being handled
5303 * by PCI AER driver
5304 */
5305 if (unlikely(pci_channel_offline(ha->pdev)) ||
5306 (test_bit(AF_EEH_BUSY, &ha->flags))) {
5307 ql4_printk(KERN_WARNING, ha, "scsi%ld: Return from %s\n",
5308 ha->host_no, __func__);
5309 return ret;
5310 }
afaf5a2d
DS
5311
5312 do {
5313 /* Checking to see if its returned to OS */
5369887a 5314 rp = (struct srb *) CMD_SP(cmd);
afaf5a2d
DS
5315 if (rp == NULL) {
5316 done++;
5317 break;
5318 }
5319
5320 msleep(2000);
5321 } while (max_wait_time--);
5322
5323 return done;
5324}
5325
5326/**
5327 * qla4xxx_wait_for_hba_online - waits for HBA to come online
5328 * @ha: Pointer to host adapter structure
5329 **/
5330static int qla4xxx_wait_for_hba_online(struct scsi_qla_host *ha)
5331{
5332 unsigned long wait_online;
5333
f581a3f7 5334 wait_online = jiffies + (HBA_ONLINE_TOV * HZ);
afaf5a2d
DS
5335 while (time_before(jiffies, wait_online)) {
5336
5337 if (adapter_up(ha))
5338 return QLA_SUCCESS;
afaf5a2d
DS
5339
5340 msleep(2000);
5341 }
5342
5343 return QLA_ERROR;
5344}
5345
5346/**
ce545039 5347 * qla4xxx_eh_wait_for_commands - wait for active cmds to finish.
fd589a8f 5348 * @ha: pointer to HBA
afaf5a2d
DS
5349 * @t: target id
5350 * @l: lun id
5351 *
5352 * This function waits for all outstanding commands to a lun to complete. It
5353 * returns 0 if all pending commands are returned and 1 otherwise.
5354 **/
ce545039
MC
5355static int qla4xxx_eh_wait_for_commands(struct scsi_qla_host *ha,
5356 struct scsi_target *stgt,
5357 struct scsi_device *sdev)
afaf5a2d
DS
5358{
5359 int cnt;
5360 int status = 0;
5361 struct scsi_cmnd *cmd;
5362
5363 /*
ce545039
MC
5364 * Waiting for all commands for the designated target or dev
5365 * in the active array
afaf5a2d
DS
5366 */
5367 for (cnt = 0; cnt < ha->host->can_queue; cnt++) {
5368 cmd = scsi_host_find_tag(ha->host, cnt);
ce545039
MC
5369 if (cmd && stgt == scsi_target(cmd->device) &&
5370 (!sdev || sdev == cmd->device)) {
afaf5a2d
DS
5371 if (!qla4xxx_eh_wait_on_command(ha, cmd)) {
5372 status++;
5373 break;
5374 }
5375 }
5376 }
5377 return status;
5378}
5379
09a0f719
VC
5380/**
5381 * qla4xxx_eh_abort - callback for abort task.
5382 * @cmd: Pointer to Linux's SCSI command structure
5383 *
5384 * This routine is called by the Linux OS to abort the specified
5385 * command.
5386 **/
5387static int qla4xxx_eh_abort(struct scsi_cmnd *cmd)
5388{
5389 struct scsi_qla_host *ha = to_qla_host(cmd->device->host);
5390 unsigned int id = cmd->device->id;
5391 unsigned int lun = cmd->device->lun;
92b3e5bb 5392 unsigned long flags;
09a0f719
VC
5393 struct srb *srb = NULL;
5394 int ret = SUCCESS;
5395 int wait = 0;
5396
c2660df3 5397 ql4_printk(KERN_INFO, ha,
5cd049a5
CH
5398 "scsi%ld:%d:%d: Abort command issued cmd=%p\n",
5399 ha->host_no, id, lun, cmd);
09a0f719 5400
92b3e5bb 5401 spin_lock_irqsave(&ha->hardware_lock, flags);
09a0f719 5402 srb = (struct srb *) CMD_SP(cmd);
92b3e5bb
MC
5403 if (!srb) {
5404 spin_unlock_irqrestore(&ha->hardware_lock, flags);
09a0f719 5405 return SUCCESS;
92b3e5bb 5406 }
09a0f719 5407 kref_get(&srb->srb_ref);
92b3e5bb 5408 spin_unlock_irqrestore(&ha->hardware_lock, flags);
09a0f719
VC
5409
5410 if (qla4xxx_abort_task(ha, srb) != QLA_SUCCESS) {
5411 DEBUG3(printk("scsi%ld:%d:%d: Abort_task mbx failed.\n",
5412 ha->host_no, id, lun));
5413 ret = FAILED;
5414 } else {
5415 DEBUG3(printk("scsi%ld:%d:%d: Abort_task mbx success.\n",
5416 ha->host_no, id, lun));
5417 wait = 1;
5418 }
5419
5420 kref_put(&srb->srb_ref, qla4xxx_srb_compl);
5421
5422 /* Wait for command to complete */
5423 if (wait) {
5424 if (!qla4xxx_eh_wait_on_command(ha, cmd)) {
5425 DEBUG2(printk("scsi%ld:%d:%d: Abort handler timed out\n",
5426 ha->host_no, id, lun));
5427 ret = FAILED;
5428 }
5429 }
5430
c2660df3 5431 ql4_printk(KERN_INFO, ha,
09a0f719 5432 "scsi%ld:%d:%d: Abort command - %s\n",
25985edc 5433 ha->host_no, id, lun, (ret == SUCCESS) ? "succeeded" : "failed");
09a0f719
VC
5434
5435 return ret;
5436}
5437
afaf5a2d
DS
5438/**
5439 * qla4xxx_eh_device_reset - callback for target reset.
5440 * @cmd: Pointer to Linux's SCSI command structure
5441 *
5442 * This routine is called by the Linux OS to reset all luns on the
5443 * specified target.
5444 **/
5445static int qla4xxx_eh_device_reset(struct scsi_cmnd *cmd)
5446{
5447 struct scsi_qla_host *ha = to_qla_host(cmd->device->host);
5448 struct ddb_entry *ddb_entry = cmd->device->hostdata;
afaf5a2d
DS
5449 int ret = FAILED, stat;
5450
612f7348 5451 if (!ddb_entry)
afaf5a2d
DS
5452 return ret;
5453
c01be6dc
MC
5454 ret = iscsi_block_scsi_eh(cmd);
5455 if (ret)
5456 return ret;
5457 ret = FAILED;
5458
c2660df3 5459 ql4_printk(KERN_INFO, ha,
afaf5a2d
DS
5460 "scsi%ld:%d:%d:%d: DEVICE RESET ISSUED.\n", ha->host_no,
5461 cmd->device->channel, cmd->device->id, cmd->device->lun);
5462
5463 DEBUG2(printk(KERN_INFO
5464 "scsi%ld: DEVICE_RESET cmd=%p jiffies = 0x%lx, to=%x,"
5465 "dpc_flags=%lx, status=%x allowed=%d\n", ha->host_no,
242f9dcb 5466 cmd, jiffies, cmd->request->timeout / HZ,
afaf5a2d
DS
5467 ha->dpc_flags, cmd->result, cmd->allowed));
5468
5469 /* FIXME: wait for hba to go online */
5470 stat = qla4xxx_reset_lun(ha, ddb_entry, cmd->device->lun);
5471 if (stat != QLA_SUCCESS) {
c2660df3 5472 ql4_printk(KERN_INFO, ha, "DEVICE RESET FAILED. %d\n", stat);
afaf5a2d
DS
5473 goto eh_dev_reset_done;
5474 }
5475
ce545039
MC
5476 if (qla4xxx_eh_wait_for_commands(ha, scsi_target(cmd->device),
5477 cmd->device)) {
c2660df3 5478 ql4_printk(KERN_INFO, ha,
ce545039
MC
5479 "DEVICE RESET FAILED - waiting for "
5480 "commands.\n");
5481 goto eh_dev_reset_done;
afaf5a2d
DS
5482 }
5483
9d562913
DS
5484 /* Send marker. */
5485 if (qla4xxx_send_marker_iocb(ha, ddb_entry, cmd->device->lun,
5486 MM_LUN_RESET) != QLA_SUCCESS)
5487 goto eh_dev_reset_done;
5488
c2660df3 5489 ql4_printk(KERN_INFO, ha,
afaf5a2d
DS
5490 "scsi(%ld:%d:%d:%d): DEVICE RESET SUCCEEDED.\n",
5491 ha->host_no, cmd->device->channel, cmd->device->id,
5492 cmd->device->lun);
5493
5494 ret = SUCCESS;
5495
5496eh_dev_reset_done:
5497
5498 return ret;
5499}
5500
ce545039
MC
5501/**
5502 * qla4xxx_eh_target_reset - callback for target reset.
5503 * @cmd: Pointer to Linux's SCSI command structure
5504 *
5505 * This routine is called by the Linux OS to reset the target.
5506 **/
5507static int qla4xxx_eh_target_reset(struct scsi_cmnd *cmd)
5508{
5509 struct scsi_qla_host *ha = to_qla_host(cmd->device->host);
5510 struct ddb_entry *ddb_entry = cmd->device->hostdata;
c01be6dc 5511 int stat, ret;
ce545039
MC
5512
5513 if (!ddb_entry)
5514 return FAILED;
5515
c01be6dc
MC
5516 ret = iscsi_block_scsi_eh(cmd);
5517 if (ret)
5518 return ret;
5519
ce545039
MC
5520 starget_printk(KERN_INFO, scsi_target(cmd->device),
5521 "WARM TARGET RESET ISSUED.\n");
5522
5523 DEBUG2(printk(KERN_INFO
5524 "scsi%ld: TARGET_DEVICE_RESET cmd=%p jiffies = 0x%lx, "
5525 "to=%x,dpc_flags=%lx, status=%x allowed=%d\n",
242f9dcb 5526 ha->host_no, cmd, jiffies, cmd->request->timeout / HZ,
ce545039
MC
5527 ha->dpc_flags, cmd->result, cmd->allowed));
5528
5529 stat = qla4xxx_reset_target(ha, ddb_entry);
5530 if (stat != QLA_SUCCESS) {
5531 starget_printk(KERN_INFO, scsi_target(cmd->device),
5532 "WARM TARGET RESET FAILED.\n");
5533 return FAILED;
5534 }
5535
ce545039
MC
5536 if (qla4xxx_eh_wait_for_commands(ha, scsi_target(cmd->device),
5537 NULL)) {
5538 starget_printk(KERN_INFO, scsi_target(cmd->device),
5539 "WARM TARGET DEVICE RESET FAILED - "
5540 "waiting for commands.\n");
5541 return FAILED;
5542 }
5543
9d562913
DS
5544 /* Send marker. */
5545 if (qla4xxx_send_marker_iocb(ha, ddb_entry, cmd->device->lun,
5546 MM_TGT_WARM_RESET) != QLA_SUCCESS) {
5547 starget_printk(KERN_INFO, scsi_target(cmd->device),
5548 "WARM TARGET DEVICE RESET FAILED - "
5549 "marker iocb failed.\n");
5550 return FAILED;
5551 }
5552
ce545039
MC
5553 starget_printk(KERN_INFO, scsi_target(cmd->device),
5554 "WARM TARGET RESET SUCCEEDED.\n");
5555 return SUCCESS;
5556}
5557
8a288960
SR
5558/**
5559 * qla4xxx_is_eh_active - check if error handler is running
5560 * @shost: Pointer to SCSI Host struct
5561 *
5562 * This routine finds that if reset host is called in EH
5563 * scenario or from some application like sg_reset
5564 **/
5565static int qla4xxx_is_eh_active(struct Scsi_Host *shost)
5566{
5567 if (shost->shost_state == SHOST_RECOVERY)
5568 return 1;
5569 return 0;
5570}
5571
afaf5a2d
DS
5572/**
5573 * qla4xxx_eh_host_reset - kernel callback
5574 * @cmd: Pointer to Linux's SCSI command structure
5575 *
5576 * This routine is invoked by the Linux kernel to perform fatal error
5577 * recovery on the specified adapter.
5578 **/
5579static int qla4xxx_eh_host_reset(struct scsi_cmnd *cmd)
5580{
5581 int return_status = FAILED;
5582 struct scsi_qla_host *ha;
5583
b3a271a9 5584 ha = to_qla_host(cmd->device->host);
afaf5a2d 5585
f4f5df23
VC
5586 if (ql4xdontresethba) {
5587 DEBUG2(printk("scsi%ld: %s: Don't Reset HBA\n",
5588 ha->host_no, __func__));
8a288960
SR
5589
5590 /* Clear outstanding srb in queues */
5591 if (qla4xxx_is_eh_active(cmd->device->host))
5592 qla4xxx_abort_active_cmds(ha, DID_ABORT << 16);
5593
f4f5df23
VC
5594 return FAILED;
5595 }
5596
c2660df3 5597 ql4_printk(KERN_INFO, ha,
dca05c4c 5598 "scsi(%ld:%d:%d:%d): HOST RESET ISSUED.\n", ha->host_no,
afaf5a2d
DS
5599 cmd->device->channel, cmd->device->id, cmd->device->lun);
5600
5601 if (qla4xxx_wait_for_hba_online(ha) != QLA_SUCCESS) {
5602 DEBUG2(printk("scsi%ld:%d: %s: Unable to reset host. Adapter "
5603 "DEAD.\n", ha->host_no, cmd->device->channel,
5604 __func__));
5605
5606 return FAILED;
5607 }
5608
f4f5df23
VC
5609 if (!test_bit(DPC_RESET_HA, &ha->dpc_flags)) {
5610 if (is_qla8022(ha))
5611 set_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags);
5612 else
5613 set_bit(DPC_RESET_HA, &ha->dpc_flags);
5614 }
50a29aec 5615
f4f5df23 5616 if (qla4xxx_recover_adapter(ha) == QLA_SUCCESS)
afaf5a2d 5617 return_status = SUCCESS;
afaf5a2d 5618
c2660df3 5619 ql4_printk(KERN_INFO, ha, "HOST RESET %s.\n",
25985edc 5620 return_status == FAILED ? "FAILED" : "SUCCEEDED");
afaf5a2d
DS
5621
5622 return return_status;
5623}
5624
95d31262
VC
5625static int qla4xxx_context_reset(struct scsi_qla_host *ha)
5626{
5627 uint32_t mbox_cmd[MBOX_REG_COUNT];
5628 uint32_t mbox_sts[MBOX_REG_COUNT];
5629 struct addr_ctrl_blk_def *acb = NULL;
5630 uint32_t acb_len = sizeof(struct addr_ctrl_blk_def);
5631 int rval = QLA_SUCCESS;
5632 dma_addr_t acb_dma;
5633
5634 acb = dma_alloc_coherent(&ha->pdev->dev,
5635 sizeof(struct addr_ctrl_blk_def),
5636 &acb_dma, GFP_KERNEL);
5637 if (!acb) {
5638 ql4_printk(KERN_ERR, ha, "%s: Unable to alloc acb\n",
5639 __func__);
5640 rval = -ENOMEM;
5641 goto exit_port_reset;
5642 }
5643
5644 memset(acb, 0, acb_len);
5645
5646 rval = qla4xxx_get_acb(ha, acb_dma, PRIMARI_ACB, acb_len);
5647 if (rval != QLA_SUCCESS) {
5648 rval = -EIO;
5649 goto exit_free_acb;
5650 }
5651
5652 rval = qla4xxx_disable_acb(ha);
5653 if (rval != QLA_SUCCESS) {
5654 rval = -EIO;
5655 goto exit_free_acb;
5656 }
5657
5658 wait_for_completion_timeout(&ha->disable_acb_comp,
5659 DISABLE_ACB_TOV * HZ);
5660
5661 rval = qla4xxx_set_acb(ha, &mbox_cmd[0], &mbox_sts[0], acb_dma);
5662 if (rval != QLA_SUCCESS) {
5663 rval = -EIO;
5664 goto exit_free_acb;
5665 }
5666
5667exit_free_acb:
5668 dma_free_coherent(&ha->pdev->dev, sizeof(struct addr_ctrl_blk_def),
5669 acb, acb_dma);
5670exit_port_reset:
5671 DEBUG2(ql4_printk(KERN_INFO, ha, "%s %s\n", __func__,
5672 rval == QLA_SUCCESS ? "SUCCEEDED" : "FAILED"));
5673 return rval;
5674}
5675
5676static int qla4xxx_host_reset(struct Scsi_Host *shost, int reset_type)
5677{
5678 struct scsi_qla_host *ha = to_qla_host(shost);
5679 int rval = QLA_SUCCESS;
5680
5681 if (ql4xdontresethba) {
5682 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: Don't Reset HBA\n",
5683 __func__));
5684 rval = -EPERM;
5685 goto exit_host_reset;
5686 }
5687
5688 rval = qla4xxx_wait_for_hba_online(ha);
5689 if (rval != QLA_SUCCESS) {
5690 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: Unable to reset host "
5691 "adapter\n", __func__));
5692 rval = -EIO;
5693 goto exit_host_reset;
5694 }
5695
5696 if (test_bit(DPC_RESET_HA, &ha->dpc_flags))
5697 goto recover_adapter;
5698
5699 switch (reset_type) {
5700 case SCSI_ADAPTER_RESET:
5701 set_bit(DPC_RESET_HA, &ha->dpc_flags);
5702 break;
5703 case SCSI_FIRMWARE_RESET:
5704 if (!test_bit(DPC_RESET_HA, &ha->dpc_flags)) {
5705 if (is_qla8022(ha))
5706 /* set firmware context reset */
5707 set_bit(DPC_RESET_HA_FW_CONTEXT,
5708 &ha->dpc_flags);
5709 else {
5710 rval = qla4xxx_context_reset(ha);
5711 goto exit_host_reset;
5712 }
5713 }
5714 break;
5715 }
5716
5717recover_adapter:
5718 rval = qla4xxx_recover_adapter(ha);
5719 if (rval != QLA_SUCCESS) {
5720 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: recover adapter fail\n",
5721 __func__));
5722 rval = -EIO;
5723 }
5724
5725exit_host_reset:
5726 return rval;
5727}
5728
2232be0d
LC
5729/* PCI AER driver recovers from all correctable errors w/o
5730 * driver intervention. For uncorrectable errors PCI AER
5731 * driver calls the following device driver's callbacks
5732 *
5733 * - Fatal Errors - link_reset
5734 * - Non-Fatal Errors - driver's pci_error_detected() which
5735 * returns CAN_RECOVER, NEED_RESET or DISCONNECT.
5736 *
5737 * PCI AER driver calls
5738 * CAN_RECOVER - driver's pci_mmio_enabled(), mmio_enabled
5739 * returns RECOVERED or NEED_RESET if fw_hung
5740 * NEED_RESET - driver's slot_reset()
5741 * DISCONNECT - device is dead & cannot recover
5742 * RECOVERED - driver's pci_resume()
5743 */
5744static pci_ers_result_t
5745qla4xxx_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
5746{
5747 struct scsi_qla_host *ha = pci_get_drvdata(pdev);
5748
5749 ql4_printk(KERN_WARNING, ha, "scsi%ld: %s: error detected:state %x\n",
5750 ha->host_no, __func__, state);
5751
5752 if (!is_aer_supported(ha))
5753 return PCI_ERS_RESULT_NONE;
5754
5755 switch (state) {
5756 case pci_channel_io_normal:
5757 clear_bit(AF_EEH_BUSY, &ha->flags);
5758 return PCI_ERS_RESULT_CAN_RECOVER;
5759 case pci_channel_io_frozen:
5760 set_bit(AF_EEH_BUSY, &ha->flags);
5761 qla4xxx_mailbox_premature_completion(ha);
5762 qla4xxx_free_irqs(ha);
5763 pci_disable_device(pdev);
7b3595df
VC
5764 /* Return back all IOs */
5765 qla4xxx_abort_active_cmds(ha, DID_RESET << 16);
2232be0d
LC
5766 return PCI_ERS_RESULT_NEED_RESET;
5767 case pci_channel_io_perm_failure:
5768 set_bit(AF_EEH_BUSY, &ha->flags);
5769 set_bit(AF_PCI_CHANNEL_IO_PERM_FAILURE, &ha->flags);
5770 qla4xxx_abort_active_cmds(ha, DID_NO_CONNECT << 16);
5771 return PCI_ERS_RESULT_DISCONNECT;
5772 }
5773 return PCI_ERS_RESULT_NEED_RESET;
5774}
5775
5776/**
5777 * qla4xxx_pci_mmio_enabled() gets called if
5778 * qla4xxx_pci_error_detected() returns PCI_ERS_RESULT_CAN_RECOVER
5779 * and read/write to the device still works.
5780 **/
5781static pci_ers_result_t
5782qla4xxx_pci_mmio_enabled(struct pci_dev *pdev)
5783{
5784 struct scsi_qla_host *ha = pci_get_drvdata(pdev);
5785
5786 if (!is_aer_supported(ha))
5787 return PCI_ERS_RESULT_NONE;
5788
7b3595df 5789 return PCI_ERS_RESULT_RECOVERED;
2232be0d
LC
5790}
5791
7b3595df 5792static uint32_t qla4_8xxx_error_recovery(struct scsi_qla_host *ha)
2232be0d
LC
5793{
5794 uint32_t rval = QLA_ERROR;
7b3595df 5795 uint32_t ret = 0;
2232be0d
LC
5796 int fn;
5797 struct pci_dev *other_pdev = NULL;
5798
5799 ql4_printk(KERN_WARNING, ha, "scsi%ld: In %s\n", ha->host_no, __func__);
5800
5801 set_bit(DPC_RESET_ACTIVE, &ha->dpc_flags);
5802
5803 if (test_bit(AF_ONLINE, &ha->flags)) {
5804 clear_bit(AF_ONLINE, &ha->flags);
b3a271a9
MR
5805 clear_bit(AF_LINK_UP, &ha->flags);
5806 iscsi_host_for_each_session(ha->host, qla4xxx_fail_session);
2232be0d 5807 qla4xxx_process_aen(ha, FLUSH_DDB_CHANGED_AENS);
2232be0d
LC
5808 }
5809
5810 fn = PCI_FUNC(ha->pdev->devfn);
5811 while (fn > 0) {
5812 fn--;
5813 ql4_printk(KERN_INFO, ha, "scsi%ld: %s: Finding PCI device at "
5814 "func %x\n", ha->host_no, __func__, fn);
5815 /* Get the pci device given the domain, bus,
5816 * slot/function number */
5817 other_pdev =
5818 pci_get_domain_bus_and_slot(pci_domain_nr(ha->pdev->bus),
5819 ha->pdev->bus->number, PCI_DEVFN(PCI_SLOT(ha->pdev->devfn),
5820 fn));
5821
5822 if (!other_pdev)
5823 continue;
5824
5825 if (atomic_read(&other_pdev->enable_cnt)) {
5826 ql4_printk(KERN_INFO, ha, "scsi%ld: %s: Found PCI "
5827 "func in enabled state%x\n", ha->host_no,
5828 __func__, fn);
5829 pci_dev_put(other_pdev);
5830 break;
5831 }
5832 pci_dev_put(other_pdev);
5833 }
5834
5835 /* The first function on the card, the reset owner will
5836 * start & initialize the firmware. The other functions
5837 * on the card will reset the firmware context
5838 */
5839 if (!fn) {
5840 ql4_printk(KERN_INFO, ha, "scsi%ld: %s: devfn being reset "
5841 "0x%x is the owner\n", ha->host_no, __func__,
5842 ha->pdev->devfn);
5843
5844 qla4_8xxx_idc_lock(ha);
5845 qla4_8xxx_wr_32(ha, QLA82XX_CRB_DEV_STATE,
5846 QLA82XX_DEV_COLD);
5847
5848 qla4_8xxx_wr_32(ha, QLA82XX_CRB_DRV_IDC_VERSION,
5849 QLA82XX_IDC_VERSION);
5850
5851 qla4_8xxx_idc_unlock(ha);
5852 clear_bit(AF_FW_RECOVERY, &ha->flags);
13483730 5853 rval = qla4xxx_initialize_adapter(ha, RESET_ADAPTER);
2232be0d
LC
5854 qla4_8xxx_idc_lock(ha);
5855
5856 if (rval != QLA_SUCCESS) {
5857 ql4_printk(KERN_INFO, ha, "scsi%ld: %s: HW State: "
5858 "FAILED\n", ha->host_no, __func__);
5859 qla4_8xxx_clear_drv_active(ha);
5860 qla4_8xxx_wr_32(ha, QLA82XX_CRB_DEV_STATE,
5861 QLA82XX_DEV_FAILED);
5862 } else {
5863 ql4_printk(KERN_INFO, ha, "scsi%ld: %s: HW State: "
5864 "READY\n", ha->host_no, __func__);
5865 qla4_8xxx_wr_32(ha, QLA82XX_CRB_DEV_STATE,
5866 QLA82XX_DEV_READY);
5867 /* Clear driver state register */
5868 qla4_8xxx_wr_32(ha, QLA82XX_CRB_DRV_STATE, 0);
5869 qla4_8xxx_set_drv_active(ha);
7b3595df
VC
5870 ret = qla4xxx_request_irqs(ha);
5871 if (ret) {
5872 ql4_printk(KERN_WARNING, ha, "Failed to "
5873 "reserve interrupt %d already in use.\n",
5874 ha->pdev->irq);
5875 rval = QLA_ERROR;
5876 } else {
5877 ha->isp_ops->enable_intrs(ha);
5878 rval = QLA_SUCCESS;
5879 }
2232be0d
LC
5880 }
5881 qla4_8xxx_idc_unlock(ha);
5882 } else {
5883 ql4_printk(KERN_INFO, ha, "scsi%ld: %s: devfn 0x%x is not "
5884 "the reset owner\n", ha->host_no, __func__,
5885 ha->pdev->devfn);
5886 if ((qla4_8xxx_rd_32(ha, QLA82XX_CRB_DEV_STATE) ==
5887 QLA82XX_DEV_READY)) {
5888 clear_bit(AF_FW_RECOVERY, &ha->flags);
13483730 5889 rval = qla4xxx_initialize_adapter(ha, RESET_ADAPTER);
7b3595df
VC
5890 if (rval == QLA_SUCCESS) {
5891 ret = qla4xxx_request_irqs(ha);
5892 if (ret) {
5893 ql4_printk(KERN_WARNING, ha, "Failed to"
5894 " reserve interrupt %d already in"
5895 " use.\n", ha->pdev->irq);
5896 rval = QLA_ERROR;
5897 } else {
5898 ha->isp_ops->enable_intrs(ha);
5899 rval = QLA_SUCCESS;
5900 }
5901 }
2232be0d
LC
5902 qla4_8xxx_idc_lock(ha);
5903 qla4_8xxx_set_drv_active(ha);
5904 qla4_8xxx_idc_unlock(ha);
5905 }
5906 }
5907 clear_bit(DPC_RESET_ACTIVE, &ha->dpc_flags);
5908 return rval;
5909}
5910
5911static pci_ers_result_t
5912qla4xxx_pci_slot_reset(struct pci_dev *pdev)
5913{
5914 pci_ers_result_t ret = PCI_ERS_RESULT_DISCONNECT;
5915 struct scsi_qla_host *ha = pci_get_drvdata(pdev);
5916 int rc;
5917
5918 ql4_printk(KERN_WARNING, ha, "scsi%ld: %s: slot_reset\n",
5919 ha->host_no, __func__);
5920
5921 if (!is_aer_supported(ha))
5922 return PCI_ERS_RESULT_NONE;
5923
5924 /* Restore the saved state of PCIe device -
5925 * BAR registers, PCI Config space, PCIX, MSI,
5926 * IOV states
5927 */
5928 pci_restore_state(pdev);
5929
5930 /* pci_restore_state() clears the saved_state flag of the device
5931 * save restored state which resets saved_state flag
5932 */
5933 pci_save_state(pdev);
5934
5935 /* Initialize device or resume if in suspended state */
5936 rc = pci_enable_device(pdev);
5937 if (rc) {
25985edc 5938 ql4_printk(KERN_WARNING, ha, "scsi%ld: %s: Can't re-enable "
2232be0d
LC
5939 "device after reset\n", ha->host_no, __func__);
5940 goto exit_slot_reset;
5941 }
5942
7b3595df 5943 ha->isp_ops->disable_intrs(ha);
2232be0d
LC
5944
5945 if (is_qla8022(ha)) {
5946 if (qla4_8xxx_error_recovery(ha) == QLA_SUCCESS) {
5947 ret = PCI_ERS_RESULT_RECOVERED;
5948 goto exit_slot_reset;
5949 } else
5950 goto exit_slot_reset;
5951 }
5952
5953exit_slot_reset:
5954 ql4_printk(KERN_WARNING, ha, "scsi%ld: %s: Return=%x\n"
5955 "device after reset\n", ha->host_no, __func__, ret);
5956 return ret;
5957}
5958
5959static void
5960qla4xxx_pci_resume(struct pci_dev *pdev)
5961{
5962 struct scsi_qla_host *ha = pci_get_drvdata(pdev);
5963 int ret;
5964
5965 ql4_printk(KERN_WARNING, ha, "scsi%ld: %s: pci_resume\n",
5966 ha->host_no, __func__);
5967
5968 ret = qla4xxx_wait_for_hba_online(ha);
5969 if (ret != QLA_SUCCESS) {
5970 ql4_printk(KERN_ERR, ha, "scsi%ld: %s: the device failed to "
5971 "resume I/O from slot/link_reset\n", ha->host_no,
5972 __func__);
5973 }
5974
5975 pci_cleanup_aer_uncorrect_error_status(pdev);
5976 clear_bit(AF_EEH_BUSY, &ha->flags);
5977}
5978
5979static struct pci_error_handlers qla4xxx_err_handler = {
5980 .error_detected = qla4xxx_pci_error_detected,
5981 .mmio_enabled = qla4xxx_pci_mmio_enabled,
5982 .slot_reset = qla4xxx_pci_slot_reset,
5983 .resume = qla4xxx_pci_resume,
5984};
5985
afaf5a2d
DS
5986static struct pci_device_id qla4xxx_pci_tbl[] = {
5987 {
5988 .vendor = PCI_VENDOR_ID_QLOGIC,
5989 .device = PCI_DEVICE_ID_QLOGIC_ISP4010,
5990 .subvendor = PCI_ANY_ID,
5991 .subdevice = PCI_ANY_ID,
5992 },
5993 {
5994 .vendor = PCI_VENDOR_ID_QLOGIC,
5995 .device = PCI_DEVICE_ID_QLOGIC_ISP4022,
5996 .subvendor = PCI_ANY_ID,
5997 .subdevice = PCI_ANY_ID,
5998 },
d915058f
DS
5999 {
6000 .vendor = PCI_VENDOR_ID_QLOGIC,
6001 .device = PCI_DEVICE_ID_QLOGIC_ISP4032,
6002 .subvendor = PCI_ANY_ID,
6003 .subdevice = PCI_ANY_ID,
6004 },
f4f5df23
VC
6005 {
6006 .vendor = PCI_VENDOR_ID_QLOGIC,
6007 .device = PCI_DEVICE_ID_QLOGIC_ISP8022,
6008 .subvendor = PCI_ANY_ID,
6009 .subdevice = PCI_ANY_ID,
6010 },
afaf5a2d
DS
6011 {0, 0},
6012};
6013MODULE_DEVICE_TABLE(pci, qla4xxx_pci_tbl);
6014
47975477 6015static struct pci_driver qla4xxx_pci_driver = {
afaf5a2d
DS
6016 .name = DRIVER_NAME,
6017 .id_table = qla4xxx_pci_tbl,
6018 .probe = qla4xxx_probe_adapter,
6019 .remove = qla4xxx_remove_adapter,
2232be0d 6020 .err_handler = &qla4xxx_err_handler,
afaf5a2d
DS
6021};
6022
6023static int __init qla4xxx_module_init(void)
6024{
6025 int ret;
6026
6027 /* Allocate cache for SRBs. */
6028 srb_cachep = kmem_cache_create("qla4xxx_srbs", sizeof(struct srb), 0,
20c2df83 6029 SLAB_HWCACHE_ALIGN, NULL);
afaf5a2d
DS
6030 if (srb_cachep == NULL) {
6031 printk(KERN_ERR
6032 "%s: Unable to allocate SRB cache..."
6033 "Failing load!\n", DRIVER_NAME);
6034 ret = -ENOMEM;
6035 goto no_srp_cache;
6036 }
6037
6038 /* Derive version string. */
6039 strcpy(qla4xxx_version_str, QLA4XXX_DRIVER_VERSION);
11010fec 6040 if (ql4xextended_error_logging)
afaf5a2d
DS
6041 strcat(qla4xxx_version_str, "-debug");
6042
6043 qla4xxx_scsi_transport =
6044 iscsi_register_transport(&qla4xxx_iscsi_transport);
6045 if (!qla4xxx_scsi_transport){
6046 ret = -ENODEV;
6047 goto release_srb_cache;
6048 }
6049
afaf5a2d
DS
6050 ret = pci_register_driver(&qla4xxx_pci_driver);
6051 if (ret)
6052 goto unregister_transport;
6053
6054 printk(KERN_INFO "QLogic iSCSI HBA Driver\n");
6055 return 0;
5ae16db3 6056
afaf5a2d
DS
6057unregister_transport:
6058 iscsi_unregister_transport(&qla4xxx_iscsi_transport);
6059release_srb_cache:
6060 kmem_cache_destroy(srb_cachep);
6061no_srp_cache:
6062 return ret;
6063}
6064
6065static void __exit qla4xxx_module_exit(void)
6066{
6067 pci_unregister_driver(&qla4xxx_pci_driver);
6068 iscsi_unregister_transport(&qla4xxx_iscsi_transport);
6069 kmem_cache_destroy(srb_cachep);
6070}
6071
6072module_init(qla4xxx_module_init);
6073module_exit(qla4xxx_module_exit);
6074
6075MODULE_AUTHOR("QLogic Corporation");
6076MODULE_DESCRIPTION("QLogic iSCSI HBA Driver");
6077MODULE_LICENSE("GPL");
6078MODULE_VERSION(QLA4XXX_DRIVER_VERSION);