]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - drivers/scsi/qla4xxx/ql4_os.c
[SCSI] qla4xxx: Fix kernel panic during discovery logout.
[mirror_ubuntu-artful-kernel.git] / drivers / scsi / qla4xxx / ql4_os.c
CommitLineData
afaf5a2d
DS
1/*
2 * QLogic iSCSI HBA Driver
7d01d069 3 * Copyright (c) 2003-2010 QLogic Corporation
afaf5a2d
DS
4 *
5 * See LICENSE.qla4xxx for copyright and licensing details.
6 */
7#include <linux/moduleparam.h>
5a0e3ad6 8#include <linux/slab.h>
2a991c21
MR
9#include <linux/blkdev.h>
10#include <linux/iscsi_boot_sysfs.h>
13483730 11#include <linux/inet.h>
afaf5a2d
DS
12
13#include <scsi/scsi_tcq.h>
14#include <scsi/scsicam.h>
15
16#include "ql4_def.h"
bee4fe8e
DS
17#include "ql4_version.h"
18#include "ql4_glbl.h"
19#include "ql4_dbg.h"
20#include "ql4_inline.h"
afaf5a2d
DS
21
22/*
23 * Driver version
24 */
47975477 25static char qla4xxx_version_str[40];
afaf5a2d
DS
26
27/*
28 * SRB allocation cache
29 */
e18b890b 30static struct kmem_cache *srb_cachep;
afaf5a2d
DS
31
32/*
33 * Module parameter information and variables
34 */
a7380a65 35static int ql4xdisablesysfsboot = 1;
13483730
MC
36module_param(ql4xdisablesysfsboot, int, S_IRUGO | S_IWUSR);
37MODULE_PARM_DESC(ql4xdisablesysfsboot,
a4e8a715
KH
38 " Set to disable exporting boot targets to sysfs.\n"
39 "\t\t 0 - Export boot targets\n"
40 "\t\t 1 - Do not export boot targets (Default)");
13483730 41
3573bfb2 42int ql4xdontresethba;
f4f5df23 43module_param(ql4xdontresethba, int, S_IRUGO | S_IWUSR);
afaf5a2d 44MODULE_PARM_DESC(ql4xdontresethba,
a4e8a715
KH
45 " Don't reset the HBA for driver recovery.\n"
46 "\t\t 0 - It will reset HBA (Default)\n"
47 "\t\t 1 - It will NOT reset HBA");
afaf5a2d 48
a4e8a715 49int ql4xextended_error_logging;
f4f5df23 50module_param(ql4xextended_error_logging, int, S_IRUGO | S_IWUSR);
11010fec 51MODULE_PARM_DESC(ql4xextended_error_logging,
a4e8a715
KH
52 " Option to enable extended error logging.\n"
53 "\t\t 0 - no logging (Default)\n"
54 "\t\t 2 - debug logging");
afaf5a2d 55
f4f5df23
VC
56int ql4xenablemsix = 1;
57module_param(ql4xenablemsix, int, S_IRUGO|S_IWUSR);
58MODULE_PARM_DESC(ql4xenablemsix,
a4e8a715
KH
59 " Set to enable MSI or MSI-X interrupt mechanism.\n"
60 "\t\t 0 = enable INTx interrupt mechanism.\n"
61 "\t\t 1 = enable MSI-X interrupt mechanism (Default).\n"
62 "\t\t 2 = enable MSI interrupt mechanism.");
477ffb9d 63
d510d965 64#define QL4_DEF_QDEPTH 32
8bb4033d
VC
65static int ql4xmaxqdepth = QL4_DEF_QDEPTH;
66module_param(ql4xmaxqdepth, int, S_IRUGO | S_IWUSR);
67MODULE_PARM_DESC(ql4xmaxqdepth,
a4e8a715
KH
68 " Maximum queue depth to report for target devices.\n"
69 "\t\t Default: 32.");
d510d965 70
3038727c
VC
71static int ql4xsess_recovery_tmo = QL4_SESS_RECOVERY_TMO;
72module_param(ql4xsess_recovery_tmo, int, S_IRUGO);
73MODULE_PARM_DESC(ql4xsess_recovery_tmo,
3573bfb2 74 " Target Session Recovery Timeout.\n"
a4e8a715 75 "\t\t Default: 120 sec.");
3038727c 76
b3a271a9 77static int qla4xxx_wait_for_hba_online(struct scsi_qla_host *ha);
afaf5a2d
DS
78/*
79 * SCSI host template entry points
80 */
47975477 81static void qla4xxx_config_dma_addressing(struct scsi_qla_host *ha);
afaf5a2d
DS
82
83/*
84 * iSCSI template entry points
85 */
fca9f04d
MC
86static int qla4xxx_session_get_param(struct iscsi_cls_session *cls_sess,
87 enum iscsi_param param, char *buf);
afaf5a2d
DS
88static int qla4xxx_conn_get_param(struct iscsi_cls_conn *conn,
89 enum iscsi_param param, char *buf);
aa1e93a2
MC
90static int qla4xxx_host_get_param(struct Scsi_Host *shost,
91 enum iscsi_host_param param, char *buf);
00c31889
MC
92static int qla4xxx_iface_set_param(struct Scsi_Host *shost, void *data,
93 uint32_t len);
ed1086e0
VC
94static int qla4xxx_get_iface_param(struct iscsi_iface *iface,
95 enum iscsi_param_type param_type,
96 int param, char *buf);
5c656af7 97static enum blk_eh_timer_return qla4xxx_eh_cmd_timed_out(struct scsi_cmnd *sc);
b3a271a9
MR
98static struct iscsi_endpoint *qla4xxx_ep_connect(struct Scsi_Host *shost,
99 struct sockaddr *dst_addr,
100 int non_blocking);
101static int qla4xxx_ep_poll(struct iscsi_endpoint *ep, int timeout_ms);
102static void qla4xxx_ep_disconnect(struct iscsi_endpoint *ep);
103static int qla4xxx_get_ep_param(struct iscsi_endpoint *ep,
104 enum iscsi_param param, char *buf);
105static int qla4xxx_conn_start(struct iscsi_cls_conn *conn);
106static struct iscsi_cls_conn *
107qla4xxx_conn_create(struct iscsi_cls_session *cls_sess, uint32_t conn_idx);
108static int qla4xxx_conn_bind(struct iscsi_cls_session *cls_session,
109 struct iscsi_cls_conn *cls_conn,
110 uint64_t transport_fd, int is_leading);
111static void qla4xxx_conn_destroy(struct iscsi_cls_conn *conn);
112static struct iscsi_cls_session *
113qla4xxx_session_create(struct iscsi_endpoint *ep, uint16_t cmds_max,
114 uint16_t qdepth, uint32_t initial_cmdsn);
115static void qla4xxx_session_destroy(struct iscsi_cls_session *sess);
116static void qla4xxx_task_work(struct work_struct *wdata);
117static int qla4xxx_alloc_pdu(struct iscsi_task *, uint8_t);
118static int qla4xxx_task_xmit(struct iscsi_task *);
119static void qla4xxx_task_cleanup(struct iscsi_task *);
120static void qla4xxx_fail_session(struct iscsi_cls_session *cls_session);
121static void qla4xxx_conn_get_stats(struct iscsi_cls_conn *cls_conn,
122 struct iscsi_stats *stats);
c0b9d3f7
VC
123static int qla4xxx_send_ping(struct Scsi_Host *shost, uint32_t iface_num,
124 uint32_t iface_type, uint32_t payload_size,
125 uint32_t pid, struct sockaddr *dst_addr);
376738af
NJ
126static int qla4xxx_get_chap_list(struct Scsi_Host *shost, uint16_t chap_tbl_idx,
127 uint32_t *num_entries, char *buf);
128static int qla4xxx_delete_chap(struct Scsi_Host *shost, uint16_t chap_tbl_idx);
c0b9d3f7 129
afaf5a2d
DS
130/*
131 * SCSI host template entry points
132 */
f281233d 133static int qla4xxx_queuecommand(struct Scsi_Host *h, struct scsi_cmnd *cmd);
09a0f719 134static int qla4xxx_eh_abort(struct scsi_cmnd *cmd);
afaf5a2d 135static int qla4xxx_eh_device_reset(struct scsi_cmnd *cmd);
ce545039 136static int qla4xxx_eh_target_reset(struct scsi_cmnd *cmd);
afaf5a2d
DS
137static int qla4xxx_eh_host_reset(struct scsi_cmnd *cmd);
138static int qla4xxx_slave_alloc(struct scsi_device *device);
139static int qla4xxx_slave_configure(struct scsi_device *device);
140static void qla4xxx_slave_destroy(struct scsi_device *sdev);
587a1f16 141static umode_t ql4_attr_is_visible(int param_type, int param);
95d31262 142static int qla4xxx_host_reset(struct Scsi_Host *shost, int reset_type);
afaf5a2d 143
f4f5df23
VC
144static struct qla4_8xxx_legacy_intr_set legacy_intr[] =
145 QLA82XX_LEGACY_INTR_CONFIG;
146
afaf5a2d
DS
147static struct scsi_host_template qla4xxx_driver_template = {
148 .module = THIS_MODULE,
149 .name = DRIVER_NAME,
150 .proc_name = DRIVER_NAME,
151 .queuecommand = qla4xxx_queuecommand,
152
09a0f719 153 .eh_abort_handler = qla4xxx_eh_abort,
afaf5a2d 154 .eh_device_reset_handler = qla4xxx_eh_device_reset,
ce545039 155 .eh_target_reset_handler = qla4xxx_eh_target_reset,
afaf5a2d 156 .eh_host_reset_handler = qla4xxx_eh_host_reset,
5c656af7 157 .eh_timed_out = qla4xxx_eh_cmd_timed_out,
afaf5a2d
DS
158
159 .slave_configure = qla4xxx_slave_configure,
160 .slave_alloc = qla4xxx_slave_alloc,
161 .slave_destroy = qla4xxx_slave_destroy,
162
163 .this_id = -1,
164 .cmd_per_lun = 3,
165 .use_clustering = ENABLE_CLUSTERING,
166 .sg_tablesize = SG_ALL,
167
168 .max_sectors = 0xFFFF,
7ad633c0 169 .shost_attrs = qla4xxx_host_attrs,
95d31262 170 .host_reset = qla4xxx_host_reset,
a355943c 171 .vendor_id = SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_QLOGIC,
afaf5a2d
DS
172};
173
174static struct iscsi_transport qla4xxx_iscsi_transport = {
175 .owner = THIS_MODULE,
176 .name = DRIVER_NAME,
b3a271a9
MR
177 .caps = CAP_TEXT_NEGO |
178 CAP_DATA_PATH_OFFLOAD | CAP_HDRDGST |
179 CAP_DATADGST | CAP_LOGIN_OFFLOAD |
180 CAP_MULTI_R2T,
3128c6c7 181 .attr_is_visible = ql4_attr_is_visible,
b3a271a9
MR
182 .create_session = qla4xxx_session_create,
183 .destroy_session = qla4xxx_session_destroy,
184 .start_conn = qla4xxx_conn_start,
185 .create_conn = qla4xxx_conn_create,
186 .bind_conn = qla4xxx_conn_bind,
187 .stop_conn = iscsi_conn_stop,
188 .destroy_conn = qla4xxx_conn_destroy,
189 .set_param = iscsi_set_param,
afaf5a2d 190 .get_conn_param = qla4xxx_conn_get_param,
fca9f04d 191 .get_session_param = qla4xxx_session_get_param,
b3a271a9
MR
192 .get_ep_param = qla4xxx_get_ep_param,
193 .ep_connect = qla4xxx_ep_connect,
194 .ep_poll = qla4xxx_ep_poll,
195 .ep_disconnect = qla4xxx_ep_disconnect,
196 .get_stats = qla4xxx_conn_get_stats,
197 .send_pdu = iscsi_conn_send_pdu,
198 .xmit_task = qla4xxx_task_xmit,
199 .cleanup_task = qla4xxx_task_cleanup,
200 .alloc_pdu = qla4xxx_alloc_pdu,
201
aa1e93a2 202 .get_host_param = qla4xxx_host_get_param,
d00efe3f 203 .set_iface_param = qla4xxx_iface_set_param,
ed1086e0 204 .get_iface_param = qla4xxx_get_iface_param,
a355943c 205 .bsg_request = qla4xxx_bsg_request,
c0b9d3f7 206 .send_ping = qla4xxx_send_ping,
376738af
NJ
207 .get_chap = qla4xxx_get_chap_list,
208 .delete_chap = qla4xxx_delete_chap,
afaf5a2d
DS
209};
210
211static struct scsi_transport_template *qla4xxx_scsi_transport;
212
c0b9d3f7
VC
213static int qla4xxx_send_ping(struct Scsi_Host *shost, uint32_t iface_num,
214 uint32_t iface_type, uint32_t payload_size,
215 uint32_t pid, struct sockaddr *dst_addr)
216{
217 struct scsi_qla_host *ha = to_qla_host(shost);
218 struct sockaddr_in *addr;
219 struct sockaddr_in6 *addr6;
220 uint32_t options = 0;
221 uint8_t ipaddr[IPv6_ADDR_LEN];
222 int rval;
223
224 memset(ipaddr, 0, IPv6_ADDR_LEN);
225 /* IPv4 to IPv4 */
226 if ((iface_type == ISCSI_IFACE_TYPE_IPV4) &&
227 (dst_addr->sa_family == AF_INET)) {
228 addr = (struct sockaddr_in *)dst_addr;
229 memcpy(ipaddr, &addr->sin_addr.s_addr, IP_ADDR_LEN);
230 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: IPv4 Ping src: %pI4 "
231 "dest: %pI4\n", __func__,
232 &ha->ip_config.ip_address, ipaddr));
233 rval = qla4xxx_ping_iocb(ha, options, payload_size, pid,
234 ipaddr);
235 if (rval)
236 rval = -EINVAL;
237 } else if ((iface_type == ISCSI_IFACE_TYPE_IPV6) &&
238 (dst_addr->sa_family == AF_INET6)) {
239 /* IPv6 to IPv6 */
240 addr6 = (struct sockaddr_in6 *)dst_addr;
241 memcpy(ipaddr, &addr6->sin6_addr.in6_u.u6_addr8, IPv6_ADDR_LEN);
242
243 options |= PING_IPV6_PROTOCOL_ENABLE;
244
245 /* Ping using LinkLocal address */
246 if ((iface_num == 0) || (iface_num == 1)) {
247 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: LinkLocal Ping "
248 "src: %pI6 dest: %pI6\n", __func__,
249 &ha->ip_config.ipv6_link_local_addr,
250 ipaddr));
251 options |= PING_IPV6_LINKLOCAL_ADDR;
252 rval = qla4xxx_ping_iocb(ha, options, payload_size,
253 pid, ipaddr);
254 } else {
255 ql4_printk(KERN_WARNING, ha, "%s: iface num = %d "
256 "not supported\n", __func__, iface_num);
257 rval = -ENOSYS;
258 goto exit_send_ping;
259 }
260
261 /*
262 * If ping using LinkLocal address fails, try ping using
263 * IPv6 address
264 */
265 if (rval != QLA_SUCCESS) {
266 options &= ~PING_IPV6_LINKLOCAL_ADDR;
267 if (iface_num == 0) {
268 options |= PING_IPV6_ADDR0;
269 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: IPv6 "
270 "Ping src: %pI6 "
271 "dest: %pI6\n", __func__,
272 &ha->ip_config.ipv6_addr0,
273 ipaddr));
274 } else if (iface_num == 1) {
275 options |= PING_IPV6_ADDR1;
276 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: IPv6 "
277 "Ping src: %pI6 "
278 "dest: %pI6\n", __func__,
279 &ha->ip_config.ipv6_addr1,
280 ipaddr));
281 }
282 rval = qla4xxx_ping_iocb(ha, options, payload_size,
283 pid, ipaddr);
284 if (rval)
285 rval = -EINVAL;
286 }
287 } else
288 rval = -ENOSYS;
289exit_send_ping:
290 return rval;
291}
292
587a1f16 293static umode_t ql4_attr_is_visible(int param_type, int param)
3128c6c7
MC
294{
295 switch (param_type) {
f27fb2ef
MC
296 case ISCSI_HOST_PARAM:
297 switch (param) {
298 case ISCSI_HOST_PARAM_HWADDRESS:
299 case ISCSI_HOST_PARAM_IPADDRESS:
300 case ISCSI_HOST_PARAM_INITIATOR_NAME:
3254dbe9
VC
301 case ISCSI_HOST_PARAM_PORT_STATE:
302 case ISCSI_HOST_PARAM_PORT_SPEED:
f27fb2ef
MC
303 return S_IRUGO;
304 default:
305 return 0;
306 }
3128c6c7
MC
307 case ISCSI_PARAM:
308 switch (param) {
590134fa
MC
309 case ISCSI_PARAM_PERSISTENT_ADDRESS:
310 case ISCSI_PARAM_PERSISTENT_PORT:
3128c6c7
MC
311 case ISCSI_PARAM_CONN_ADDRESS:
312 case ISCSI_PARAM_CONN_PORT:
1d063c17
MC
313 case ISCSI_PARAM_TARGET_NAME:
314 case ISCSI_PARAM_TPGT:
315 case ISCSI_PARAM_TARGET_ALIAS:
b3a271a9
MR
316 case ISCSI_PARAM_MAX_BURST:
317 case ISCSI_PARAM_MAX_R2T:
318 case ISCSI_PARAM_FIRST_BURST:
319 case ISCSI_PARAM_MAX_RECV_DLENGTH:
320 case ISCSI_PARAM_MAX_XMIT_DLENGTH:
de37920b 321 case ISCSI_PARAM_IFACE_NAME:
fca9f04d
MC
322 case ISCSI_PARAM_CHAP_OUT_IDX:
323 case ISCSI_PARAM_CHAP_IN_IDX:
324 case ISCSI_PARAM_USERNAME:
325 case ISCSI_PARAM_PASSWORD:
326 case ISCSI_PARAM_USERNAME_IN:
327 case ISCSI_PARAM_PASSWORD_IN:
3128c6c7
MC
328 return S_IRUGO;
329 default:
330 return 0;
331 }
b78dbba0
MC
332 case ISCSI_NET_PARAM:
333 switch (param) {
334 case ISCSI_NET_PARAM_IPV4_ADDR:
335 case ISCSI_NET_PARAM_IPV4_SUBNET:
336 case ISCSI_NET_PARAM_IPV4_GW:
337 case ISCSI_NET_PARAM_IPV4_BOOTPROTO:
338 case ISCSI_NET_PARAM_IFACE_ENABLE:
339 case ISCSI_NET_PARAM_IPV6_LINKLOCAL:
340 case ISCSI_NET_PARAM_IPV6_ADDR:
341 case ISCSI_NET_PARAM_IPV6_ROUTER:
342 case ISCSI_NET_PARAM_IPV6_ADDR_AUTOCFG:
343 case ISCSI_NET_PARAM_IPV6_LINKLOCAL_AUTOCFG:
6ac73e8c
VC
344 case ISCSI_NET_PARAM_VLAN_ID:
345 case ISCSI_NET_PARAM_VLAN_PRIORITY:
346 case ISCSI_NET_PARAM_VLAN_ENABLED:
943c157b 347 case ISCSI_NET_PARAM_MTU:
2ada7fc5 348 case ISCSI_NET_PARAM_PORT:
b78dbba0
MC
349 return S_IRUGO;
350 default:
351 return 0;
352 }
3128c6c7
MC
353 }
354
355 return 0;
356}
357
376738af
NJ
358static int qla4xxx_get_chap_list(struct Scsi_Host *shost, uint16_t chap_tbl_idx,
359 uint32_t *num_entries, char *buf)
360{
361 struct scsi_qla_host *ha = to_qla_host(shost);
362 struct ql4_chap_table *chap_table;
363 struct iscsi_chap_rec *chap_rec;
364 int max_chap_entries = 0;
365 int valid_chap_entries = 0;
366 int ret = 0, i;
367
368 if (is_qla8022(ha))
369 max_chap_entries = (ha->hw.flt_chap_size / 2) /
370 sizeof(struct ql4_chap_table);
371 else
372 max_chap_entries = MAX_CHAP_ENTRIES_40XX;
373
374 ql4_printk(KERN_INFO, ha, "%s: num_entries = %d, CHAP idx = %d\n",
375 __func__, *num_entries, chap_tbl_idx);
376
377 if (!buf) {
378 ret = -ENOMEM;
379 goto exit_get_chap_list;
380 }
381
382 chap_rec = (struct iscsi_chap_rec *) buf;
383 mutex_lock(&ha->chap_sem);
384 for (i = chap_tbl_idx; i < max_chap_entries; i++) {
385 chap_table = (struct ql4_chap_table *)ha->chap_list + i;
386 if (chap_table->cookie !=
387 __constant_cpu_to_le16(CHAP_VALID_COOKIE))
388 continue;
389
390 chap_rec->chap_tbl_idx = i;
391 strncpy(chap_rec->username, chap_table->name,
392 ISCSI_CHAP_AUTH_NAME_MAX_LEN);
393 strncpy(chap_rec->password, chap_table->secret,
394 QL4_CHAP_MAX_SECRET_LEN);
395 chap_rec->password_length = chap_table->secret_len;
396
397 if (chap_table->flags & BIT_7) /* local */
398 chap_rec->chap_type = CHAP_TYPE_OUT;
399
400 if (chap_table->flags & BIT_6) /* peer */
401 chap_rec->chap_type = CHAP_TYPE_IN;
402
403 chap_rec++;
404
405 valid_chap_entries++;
406 if (valid_chap_entries == *num_entries)
407 break;
408 else
409 continue;
410 }
411 mutex_unlock(&ha->chap_sem);
412
413exit_get_chap_list:
414 ql4_printk(KERN_INFO, ha, "%s: Valid CHAP Entries = %d\n",
415 __func__, valid_chap_entries);
416 *num_entries = valid_chap_entries;
417 return ret;
418}
419
420static int __qla4xxx_is_chap_active(struct device *dev, void *data)
421{
422 int ret = 0;
423 uint16_t *chap_tbl_idx = (uint16_t *) data;
424 struct iscsi_cls_session *cls_session;
425 struct iscsi_session *sess;
426 struct ddb_entry *ddb_entry;
427
428 if (!iscsi_is_session_dev(dev))
429 goto exit_is_chap_active;
430
431 cls_session = iscsi_dev_to_session(dev);
432 sess = cls_session->dd_data;
433 ddb_entry = sess->dd_data;
434
435 if (iscsi_session_chkready(cls_session))
436 goto exit_is_chap_active;
437
438 if (ddb_entry->chap_tbl_idx == *chap_tbl_idx)
439 ret = 1;
440
441exit_is_chap_active:
442 return ret;
443}
444
445static int qla4xxx_is_chap_active(struct Scsi_Host *shost,
446 uint16_t chap_tbl_idx)
447{
448 int ret = 0;
449
450 ret = device_for_each_child(&shost->shost_gendev, &chap_tbl_idx,
451 __qla4xxx_is_chap_active);
452
453 return ret;
454}
455
456static int qla4xxx_delete_chap(struct Scsi_Host *shost, uint16_t chap_tbl_idx)
457{
458 struct scsi_qla_host *ha = to_qla_host(shost);
459 struct ql4_chap_table *chap_table;
460 dma_addr_t chap_dma;
461 int max_chap_entries = 0;
462 uint32_t offset = 0;
463 uint32_t chap_size;
464 int ret = 0;
465
466 chap_table = dma_pool_alloc(ha->chap_dma_pool, GFP_KERNEL, &chap_dma);
467 if (chap_table == NULL)
468 return -ENOMEM;
469
470 memset(chap_table, 0, sizeof(struct ql4_chap_table));
471
472 if (is_qla8022(ha))
473 max_chap_entries = (ha->hw.flt_chap_size / 2) /
474 sizeof(struct ql4_chap_table);
475 else
476 max_chap_entries = MAX_CHAP_ENTRIES_40XX;
477
478 if (chap_tbl_idx > max_chap_entries) {
479 ret = -EINVAL;
480 goto exit_delete_chap;
481 }
482
483 /* Check if chap index is in use.
484 * If chap is in use don't delet chap entry */
485 ret = qla4xxx_is_chap_active(shost, chap_tbl_idx);
486 if (ret) {
487 ql4_printk(KERN_INFO, ha, "CHAP entry %d is in use, cannot "
488 "delete from flash\n", chap_tbl_idx);
489 ret = -EBUSY;
490 goto exit_delete_chap;
491 }
492
493 chap_size = sizeof(struct ql4_chap_table);
494 if (is_qla40XX(ha))
495 offset = FLASH_CHAP_OFFSET | (chap_tbl_idx * chap_size);
496 else {
497 offset = FLASH_RAW_ACCESS_ADDR + (ha->hw.flt_region_chap << 2);
498 /* flt_chap_size is CHAP table size for both ports
499 * so divide it by 2 to calculate the offset for second port
500 */
501 if (ha->port_num == 1)
502 offset += (ha->hw.flt_chap_size / 2);
503 offset += (chap_tbl_idx * chap_size);
504 }
505
506 ret = qla4xxx_get_flash(ha, chap_dma, offset, chap_size);
507 if (ret != QLA_SUCCESS) {
508 ret = -EINVAL;
509 goto exit_delete_chap;
510 }
511
512 DEBUG2(ql4_printk(KERN_INFO, ha, "Chap Cookie: x%x\n",
513 __le16_to_cpu(chap_table->cookie)));
514
515 if (__le16_to_cpu(chap_table->cookie) != CHAP_VALID_COOKIE) {
516 ql4_printk(KERN_ERR, ha, "No valid chap entry found\n");
517 goto exit_delete_chap;
518 }
519
520 chap_table->cookie = __constant_cpu_to_le16(0xFFFF);
521
522 offset = FLASH_CHAP_OFFSET |
523 (chap_tbl_idx * sizeof(struct ql4_chap_table));
524 ret = qla4xxx_set_flash(ha, chap_dma, offset, chap_size,
525 FLASH_OPT_RMW_COMMIT);
526 if (ret == QLA_SUCCESS && ha->chap_list) {
527 mutex_lock(&ha->chap_sem);
528 /* Update ha chap_list cache */
529 memcpy((struct ql4_chap_table *)ha->chap_list + chap_tbl_idx,
530 chap_table, sizeof(struct ql4_chap_table));
531 mutex_unlock(&ha->chap_sem);
532 }
533 if (ret != QLA_SUCCESS)
534 ret = -EINVAL;
535
536exit_delete_chap:
537 dma_pool_free(ha->chap_dma_pool, chap_table, chap_dma);
538 return ret;
539}
540
ed1086e0
VC
541static int qla4xxx_get_iface_param(struct iscsi_iface *iface,
542 enum iscsi_param_type param_type,
543 int param, char *buf)
544{
545 struct Scsi_Host *shost = iscsi_iface_to_shost(iface);
546 struct scsi_qla_host *ha = to_qla_host(shost);
547 int len = -ENOSYS;
548
549 if (param_type != ISCSI_NET_PARAM)
550 return -ENOSYS;
551
552 switch (param) {
553 case ISCSI_NET_PARAM_IPV4_ADDR:
554 len = sprintf(buf, "%pI4\n", &ha->ip_config.ip_address);
555 break;
556 case ISCSI_NET_PARAM_IPV4_SUBNET:
557 len = sprintf(buf, "%pI4\n", &ha->ip_config.subnet_mask);
558 break;
559 case ISCSI_NET_PARAM_IPV4_GW:
560 len = sprintf(buf, "%pI4\n", &ha->ip_config.gateway);
561 break;
562 case ISCSI_NET_PARAM_IFACE_ENABLE:
563 if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4)
564 len = sprintf(buf, "%s\n",
565 (ha->ip_config.ipv4_options &
566 IPOPT_IPV4_PROTOCOL_ENABLE) ?
567 "enabled" : "disabled");
568 else if (iface->iface_type == ISCSI_IFACE_TYPE_IPV6)
569 len = sprintf(buf, "%s\n",
570 (ha->ip_config.ipv6_options &
571 IPV6_OPT_IPV6_PROTOCOL_ENABLE) ?
572 "enabled" : "disabled");
573 break;
574 case ISCSI_NET_PARAM_IPV4_BOOTPROTO:
575 len = sprintf(buf, "%s\n",
576 (ha->ip_config.tcp_options & TCPOPT_DHCP_ENABLE) ?
577 "dhcp" : "static");
578 break;
579 case ISCSI_NET_PARAM_IPV6_ADDR:
580 if (iface->iface_num == 0)
581 len = sprintf(buf, "%pI6\n", &ha->ip_config.ipv6_addr0);
582 if (iface->iface_num == 1)
583 len = sprintf(buf, "%pI6\n", &ha->ip_config.ipv6_addr1);
584 break;
585 case ISCSI_NET_PARAM_IPV6_LINKLOCAL:
586 len = sprintf(buf, "%pI6\n",
587 &ha->ip_config.ipv6_link_local_addr);
588 break;
589 case ISCSI_NET_PARAM_IPV6_ROUTER:
590 len = sprintf(buf, "%pI6\n",
591 &ha->ip_config.ipv6_default_router_addr);
592 break;
593 case ISCSI_NET_PARAM_IPV6_ADDR_AUTOCFG:
594 len = sprintf(buf, "%s\n",
595 (ha->ip_config.ipv6_addl_options &
596 IPV6_ADDOPT_NEIGHBOR_DISCOVERY_ADDR_ENABLE) ?
597 "nd" : "static");
598 break;
599 case ISCSI_NET_PARAM_IPV6_LINKLOCAL_AUTOCFG:
600 len = sprintf(buf, "%s\n",
601 (ha->ip_config.ipv6_addl_options &
602 IPV6_ADDOPT_AUTOCONFIG_LINK_LOCAL_ADDR) ?
603 "auto" : "static");
604 break;
6ac73e8c
VC
605 case ISCSI_NET_PARAM_VLAN_ID:
606 if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4)
607 len = sprintf(buf, "%d\n",
608 (ha->ip_config.ipv4_vlan_tag &
609 ISCSI_MAX_VLAN_ID));
610 else if (iface->iface_type == ISCSI_IFACE_TYPE_IPV6)
611 len = sprintf(buf, "%d\n",
612 (ha->ip_config.ipv6_vlan_tag &
613 ISCSI_MAX_VLAN_ID));
614 break;
615 case ISCSI_NET_PARAM_VLAN_PRIORITY:
616 if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4)
617 len = sprintf(buf, "%d\n",
618 ((ha->ip_config.ipv4_vlan_tag >> 13) &
619 ISCSI_MAX_VLAN_PRIORITY));
620 else if (iface->iface_type == ISCSI_IFACE_TYPE_IPV6)
621 len = sprintf(buf, "%d\n",
622 ((ha->ip_config.ipv6_vlan_tag >> 13) &
623 ISCSI_MAX_VLAN_PRIORITY));
624 break;
625 case ISCSI_NET_PARAM_VLAN_ENABLED:
626 if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4)
627 len = sprintf(buf, "%s\n",
628 (ha->ip_config.ipv4_options &
629 IPOPT_VLAN_TAGGING_ENABLE) ?
630 "enabled" : "disabled");
631 else if (iface->iface_type == ISCSI_IFACE_TYPE_IPV6)
632 len = sprintf(buf, "%s\n",
633 (ha->ip_config.ipv6_options &
634 IPV6_OPT_VLAN_TAGGING_ENABLE) ?
635 "enabled" : "disabled");
636 break;
943c157b
VC
637 case ISCSI_NET_PARAM_MTU:
638 len = sprintf(buf, "%d\n", ha->ip_config.eth_mtu_size);
639 break;
2ada7fc5
VC
640 case ISCSI_NET_PARAM_PORT:
641 if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4)
642 len = sprintf(buf, "%d\n", ha->ip_config.ipv4_port);
643 else if (iface->iface_type == ISCSI_IFACE_TYPE_IPV6)
644 len = sprintf(buf, "%d\n", ha->ip_config.ipv6_port);
645 break;
ed1086e0
VC
646 default:
647 len = -ENOSYS;
648 }
649
650 return len;
651}
652
b3a271a9
MR
653static struct iscsi_endpoint *
654qla4xxx_ep_connect(struct Scsi_Host *shost, struct sockaddr *dst_addr,
655 int non_blocking)
5c656af7 656{
b3a271a9
MR
657 int ret;
658 struct iscsi_endpoint *ep;
659 struct qla_endpoint *qla_ep;
660 struct scsi_qla_host *ha;
661 struct sockaddr_in *addr;
662 struct sockaddr_in6 *addr6;
5c656af7 663
b3a271a9
MR
664 DEBUG2(printk(KERN_INFO "Func: %s\n", __func__));
665 if (!shost) {
666 ret = -ENXIO;
667 printk(KERN_ERR "%s: shost is NULL\n",
668 __func__);
669 return ERR_PTR(ret);
670 }
5c656af7 671
b3a271a9
MR
672 ha = iscsi_host_priv(shost);
673
674 ep = iscsi_create_endpoint(sizeof(struct qla_endpoint));
675 if (!ep) {
676 ret = -ENOMEM;
677 return ERR_PTR(ret);
678 }
679
680 qla_ep = ep->dd_data;
681 memset(qla_ep, 0, sizeof(struct qla_endpoint));
682 if (dst_addr->sa_family == AF_INET) {
683 memcpy(&qla_ep->dst_addr, dst_addr, sizeof(struct sockaddr_in));
684 addr = (struct sockaddr_in *)&qla_ep->dst_addr;
685 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: %pI4\n", __func__,
686 (char *)&addr->sin_addr));
687 } else if (dst_addr->sa_family == AF_INET6) {
688 memcpy(&qla_ep->dst_addr, dst_addr,
689 sizeof(struct sockaddr_in6));
690 addr6 = (struct sockaddr_in6 *)&qla_ep->dst_addr;
691 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: %pI6\n", __func__,
692 (char *)&addr6->sin6_addr));
693 }
694
695 qla_ep->host = shost;
696
697 return ep;
5c656af7
MC
698}
699
b3a271a9 700static int qla4xxx_ep_poll(struct iscsi_endpoint *ep, int timeout_ms)
afaf5a2d 701{
b3a271a9
MR
702 struct qla_endpoint *qla_ep;
703 struct scsi_qla_host *ha;
704 int ret = 0;
afaf5a2d 705
b3a271a9
MR
706 DEBUG2(printk(KERN_INFO "Func: %s\n", __func__));
707 qla_ep = ep->dd_data;
708 ha = to_qla_host(qla_ep->host);
709
13483730 710 if (adapter_up(ha) && !test_bit(AF_BUILD_DDB_LIST, &ha->flags))
b3a271a9
MR
711 ret = 1;
712
713 return ret;
714}
715
716static void qla4xxx_ep_disconnect(struct iscsi_endpoint *ep)
717{
718 DEBUG2(printk(KERN_INFO "Func: %s\n", __func__));
719 iscsi_destroy_endpoint(ep);
720}
721
722static int qla4xxx_get_ep_param(struct iscsi_endpoint *ep,
723 enum iscsi_param param,
724 char *buf)
725{
726 struct qla_endpoint *qla_ep = ep->dd_data;
727 struct sockaddr *dst_addr;
728
729 DEBUG2(printk(KERN_INFO "Func: %s\n", __func__));
730
731 switch (param) {
732 case ISCSI_PARAM_CONN_PORT:
733 case ISCSI_PARAM_CONN_ADDRESS:
734 if (!qla_ep)
735 return -ENOTCONN;
736
737 dst_addr = (struct sockaddr *)&qla_ep->dst_addr;
738 if (!dst_addr)
739 return -ENOTCONN;
740
741 return iscsi_conn_get_addr_param((struct sockaddr_storage *)
742 &qla_ep->dst_addr, param, buf);
743 default:
744 return -ENOSYS;
745 }
746}
747
748static void qla4xxx_conn_get_stats(struct iscsi_cls_conn *cls_conn,
749 struct iscsi_stats *stats)
750{
751 struct iscsi_session *sess;
752 struct iscsi_cls_session *cls_sess;
753 struct ddb_entry *ddb_entry;
754 struct scsi_qla_host *ha;
755 struct ql_iscsi_stats *ql_iscsi_stats;
756 int stats_size;
757 int ret;
758 dma_addr_t iscsi_stats_dma;
759
760 DEBUG2(printk(KERN_INFO "Func: %s\n", __func__));
568d303b 761
b3a271a9
MR
762 cls_sess = iscsi_conn_to_session(cls_conn);
763 sess = cls_sess->dd_data;
764 ddb_entry = sess->dd_data;
765 ha = ddb_entry->ha;
766
767 stats_size = PAGE_ALIGN(sizeof(struct ql_iscsi_stats));
768 /* Allocate memory */
769 ql_iscsi_stats = dma_alloc_coherent(&ha->pdev->dev, stats_size,
770 &iscsi_stats_dma, GFP_KERNEL);
771 if (!ql_iscsi_stats) {
772 ql4_printk(KERN_ERR, ha,
773 "Unable to allocate memory for iscsi stats\n");
774 goto exit_get_stats;
568d303b 775 }
b3a271a9
MR
776
777 ret = qla4xxx_get_mgmt_data(ha, ddb_entry->fw_ddb_index, stats_size,
778 iscsi_stats_dma);
779 if (ret != QLA_SUCCESS) {
780 ql4_printk(KERN_ERR, ha,
781 "Unable to retreive iscsi stats\n");
782 goto free_stats;
783 }
784
785 /* octets */
786 stats->txdata_octets = le64_to_cpu(ql_iscsi_stats->tx_data_octets);
787 stats->rxdata_octets = le64_to_cpu(ql_iscsi_stats->rx_data_octets);
788 /* xmit pdus */
789 stats->noptx_pdus = le32_to_cpu(ql_iscsi_stats->tx_nopout_pdus);
790 stats->scsicmd_pdus = le32_to_cpu(ql_iscsi_stats->tx_scsi_cmd_pdus);
791 stats->tmfcmd_pdus = le32_to_cpu(ql_iscsi_stats->tx_tmf_cmd_pdus);
792 stats->login_pdus = le32_to_cpu(ql_iscsi_stats->tx_login_cmd_pdus);
793 stats->text_pdus = le32_to_cpu(ql_iscsi_stats->tx_text_cmd_pdus);
794 stats->dataout_pdus = le32_to_cpu(ql_iscsi_stats->tx_scsi_write_pdus);
795 stats->logout_pdus = le32_to_cpu(ql_iscsi_stats->tx_logout_cmd_pdus);
796 stats->snack_pdus = le32_to_cpu(ql_iscsi_stats->tx_snack_req_pdus);
797 /* recv pdus */
798 stats->noprx_pdus = le32_to_cpu(ql_iscsi_stats->rx_nopin_pdus);
799 stats->scsirsp_pdus = le32_to_cpu(ql_iscsi_stats->rx_scsi_resp_pdus);
800 stats->tmfrsp_pdus = le32_to_cpu(ql_iscsi_stats->rx_tmf_resp_pdus);
801 stats->textrsp_pdus = le32_to_cpu(ql_iscsi_stats->rx_text_resp_pdus);
802 stats->datain_pdus = le32_to_cpu(ql_iscsi_stats->rx_scsi_read_pdus);
803 stats->logoutrsp_pdus =
804 le32_to_cpu(ql_iscsi_stats->rx_logout_resp_pdus);
805 stats->r2t_pdus = le32_to_cpu(ql_iscsi_stats->rx_r2t_pdus);
806 stats->async_pdus = le32_to_cpu(ql_iscsi_stats->rx_async_pdus);
807 stats->rjt_pdus = le32_to_cpu(ql_iscsi_stats->rx_reject_pdus);
808
809free_stats:
810 dma_free_coherent(&ha->pdev->dev, stats_size, ql_iscsi_stats,
811 iscsi_stats_dma);
812exit_get_stats:
813 return;
814}
815
816static enum blk_eh_timer_return qla4xxx_eh_cmd_timed_out(struct scsi_cmnd *sc)
817{
818 struct iscsi_cls_session *session;
819 struct iscsi_session *sess;
820 unsigned long flags;
821 enum blk_eh_timer_return ret = BLK_EH_NOT_HANDLED;
822
823 session = starget_to_session(scsi_target(sc->device));
824 sess = session->dd_data;
825
826 spin_lock_irqsave(&session->lock, flags);
827 if (session->state == ISCSI_SESSION_FAILED)
828 ret = BLK_EH_RESET_TIMER;
829 spin_unlock_irqrestore(&session->lock, flags);
830
831 return ret;
afaf5a2d
DS
832}
833
3254dbe9
VC
834static void qla4xxx_set_port_speed(struct Scsi_Host *shost)
835{
836 struct scsi_qla_host *ha = to_qla_host(shost);
e16d166e 837 struct iscsi_cls_host *ihost = shost->shost_data;
3254dbe9
VC
838 uint32_t speed = ISCSI_PORT_SPEED_UNKNOWN;
839
840 qla4xxx_get_firmware_state(ha);
841
842 switch (ha->addl_fw_state & 0x0F00) {
843 case FW_ADDSTATE_LINK_SPEED_10MBPS:
844 speed = ISCSI_PORT_SPEED_10MBPS;
845 break;
846 case FW_ADDSTATE_LINK_SPEED_100MBPS:
847 speed = ISCSI_PORT_SPEED_100MBPS;
848 break;
849 case FW_ADDSTATE_LINK_SPEED_1GBPS:
850 speed = ISCSI_PORT_SPEED_1GBPS;
851 break;
852 case FW_ADDSTATE_LINK_SPEED_10GBPS:
853 speed = ISCSI_PORT_SPEED_10GBPS;
854 break;
855 }
856 ihost->port_speed = speed;
857}
858
859static void qla4xxx_set_port_state(struct Scsi_Host *shost)
860{
861 struct scsi_qla_host *ha = to_qla_host(shost);
e16d166e 862 struct iscsi_cls_host *ihost = shost->shost_data;
3254dbe9
VC
863 uint32_t state = ISCSI_PORT_STATE_DOWN;
864
865 if (test_bit(AF_LINK_UP, &ha->flags))
866 state = ISCSI_PORT_STATE_UP;
867
868 ihost->port_state = state;
869}
870
aa1e93a2
MC
871static int qla4xxx_host_get_param(struct Scsi_Host *shost,
872 enum iscsi_host_param param, char *buf)
873{
874 struct scsi_qla_host *ha = to_qla_host(shost);
875 int len;
876
877 switch (param) {
878 case ISCSI_HOST_PARAM_HWADDRESS:
7ffc49a6 879 len = sysfs_format_mac(buf, ha->my_mac, MAC_ADDR_LEN);
8ad5781a 880 break;
22236961 881 case ISCSI_HOST_PARAM_IPADDRESS:
2bab08fc 882 len = sprintf(buf, "%pI4\n", &ha->ip_config.ip_address);
22236961 883 break;
8ad5781a 884 case ISCSI_HOST_PARAM_INITIATOR_NAME:
22236961 885 len = sprintf(buf, "%s\n", ha->name_string);
aa1e93a2 886 break;
3254dbe9
VC
887 case ISCSI_HOST_PARAM_PORT_STATE:
888 qla4xxx_set_port_state(shost);
889 len = sprintf(buf, "%s\n", iscsi_get_port_state_name(shost));
890 break;
891 case ISCSI_HOST_PARAM_PORT_SPEED:
892 qla4xxx_set_port_speed(shost);
893 len = sprintf(buf, "%s\n", iscsi_get_port_speed_name(shost));
894 break;
aa1e93a2
MC
895 default:
896 return -ENOSYS;
897 }
898
899 return len;
900}
901
ed1086e0
VC
902static void qla4xxx_create_ipv4_iface(struct scsi_qla_host *ha)
903{
904 if (ha->iface_ipv4)
905 return;
906
907 /* IPv4 */
908 ha->iface_ipv4 = iscsi_create_iface(ha->host,
909 &qla4xxx_iscsi_transport,
910 ISCSI_IFACE_TYPE_IPV4, 0, 0);
911 if (!ha->iface_ipv4)
912 ql4_printk(KERN_ERR, ha, "Could not create IPv4 iSCSI "
913 "iface0.\n");
914}
915
916static void qla4xxx_create_ipv6_iface(struct scsi_qla_host *ha)
917{
918 if (!ha->iface_ipv6_0)
919 /* IPv6 iface-0 */
920 ha->iface_ipv6_0 = iscsi_create_iface(ha->host,
921 &qla4xxx_iscsi_transport,
922 ISCSI_IFACE_TYPE_IPV6, 0,
923 0);
924 if (!ha->iface_ipv6_0)
925 ql4_printk(KERN_ERR, ha, "Could not create IPv6 iSCSI "
926 "iface0.\n");
927
928 if (!ha->iface_ipv6_1)
929 /* IPv6 iface-1 */
930 ha->iface_ipv6_1 = iscsi_create_iface(ha->host,
931 &qla4xxx_iscsi_transport,
932 ISCSI_IFACE_TYPE_IPV6, 1,
933 0);
934 if (!ha->iface_ipv6_1)
935 ql4_printk(KERN_ERR, ha, "Could not create IPv6 iSCSI "
936 "iface1.\n");
937}
938
939static void qla4xxx_create_ifaces(struct scsi_qla_host *ha)
940{
941 if (ha->ip_config.ipv4_options & IPOPT_IPV4_PROTOCOL_ENABLE)
942 qla4xxx_create_ipv4_iface(ha);
943
944 if (ha->ip_config.ipv6_options & IPV6_OPT_IPV6_PROTOCOL_ENABLE)
945 qla4xxx_create_ipv6_iface(ha);
946}
947
948static void qla4xxx_destroy_ipv4_iface(struct scsi_qla_host *ha)
949{
950 if (ha->iface_ipv4) {
951 iscsi_destroy_iface(ha->iface_ipv4);
952 ha->iface_ipv4 = NULL;
953 }
954}
955
956static void qla4xxx_destroy_ipv6_iface(struct scsi_qla_host *ha)
957{
958 if (ha->iface_ipv6_0) {
959 iscsi_destroy_iface(ha->iface_ipv6_0);
960 ha->iface_ipv6_0 = NULL;
961 }
962 if (ha->iface_ipv6_1) {
963 iscsi_destroy_iface(ha->iface_ipv6_1);
964 ha->iface_ipv6_1 = NULL;
965 }
966}
967
968static void qla4xxx_destroy_ifaces(struct scsi_qla_host *ha)
969{
970 qla4xxx_destroy_ipv4_iface(ha);
971 qla4xxx_destroy_ipv6_iface(ha);
972}
973
d00efe3f
MC
974static void qla4xxx_set_ipv6(struct scsi_qla_host *ha,
975 struct iscsi_iface_param_info *iface_param,
976 struct addr_ctrl_blk *init_fw_cb)
977{
978 /*
979 * iface_num 0 is valid for IPv6 Addr, linklocal, router, autocfg.
980 * iface_num 1 is valid only for IPv6 Addr.
981 */
982 switch (iface_param->param) {
983 case ISCSI_NET_PARAM_IPV6_ADDR:
984 if (iface_param->iface_num & 0x1)
985 /* IPv6 Addr 1 */
986 memcpy(init_fw_cb->ipv6_addr1, iface_param->value,
987 sizeof(init_fw_cb->ipv6_addr1));
988 else
989 /* IPv6 Addr 0 */
990 memcpy(init_fw_cb->ipv6_addr0, iface_param->value,
991 sizeof(init_fw_cb->ipv6_addr0));
992 break;
993 case ISCSI_NET_PARAM_IPV6_LINKLOCAL:
994 if (iface_param->iface_num & 0x1)
995 break;
996 memcpy(init_fw_cb->ipv6_if_id, &iface_param->value[8],
997 sizeof(init_fw_cb->ipv6_if_id));
998 break;
999 case ISCSI_NET_PARAM_IPV6_ROUTER:
1000 if (iface_param->iface_num & 0x1)
1001 break;
1002 memcpy(init_fw_cb->ipv6_dflt_rtr_addr, iface_param->value,
1003 sizeof(init_fw_cb->ipv6_dflt_rtr_addr));
1004 break;
1005 case ISCSI_NET_PARAM_IPV6_ADDR_AUTOCFG:
1006 /* Autocfg applies to even interface */
1007 if (iface_param->iface_num & 0x1)
1008 break;
1009
1010 if (iface_param->value[0] == ISCSI_IPV6_AUTOCFG_DISABLE)
1011 init_fw_cb->ipv6_addtl_opts &=
1012 cpu_to_le16(
1013 ~IPV6_ADDOPT_NEIGHBOR_DISCOVERY_ADDR_ENABLE);
1014 else if (iface_param->value[0] == ISCSI_IPV6_AUTOCFG_ND_ENABLE)
1015 init_fw_cb->ipv6_addtl_opts |=
1016 cpu_to_le16(
1017 IPV6_ADDOPT_NEIGHBOR_DISCOVERY_ADDR_ENABLE);
1018 else
1019 ql4_printk(KERN_ERR, ha, "Invalid autocfg setting for "
1020 "IPv6 addr\n");
1021 break;
1022 case ISCSI_NET_PARAM_IPV6_LINKLOCAL_AUTOCFG:
1023 /* Autocfg applies to even interface */
1024 if (iface_param->iface_num & 0x1)
1025 break;
1026
1027 if (iface_param->value[0] ==
1028 ISCSI_IPV6_LINKLOCAL_AUTOCFG_ENABLE)
1029 init_fw_cb->ipv6_addtl_opts |= cpu_to_le16(
1030 IPV6_ADDOPT_AUTOCONFIG_LINK_LOCAL_ADDR);
1031 else if (iface_param->value[0] ==
1032 ISCSI_IPV6_LINKLOCAL_AUTOCFG_DISABLE)
1033 init_fw_cb->ipv6_addtl_opts &= cpu_to_le16(
1034 ~IPV6_ADDOPT_AUTOCONFIG_LINK_LOCAL_ADDR);
1035 else
1036 ql4_printk(KERN_ERR, ha, "Invalid autocfg setting for "
1037 "IPv6 linklocal addr\n");
1038 break;
1039 case ISCSI_NET_PARAM_IPV6_ROUTER_AUTOCFG:
1040 /* Autocfg applies to even interface */
1041 if (iface_param->iface_num & 0x1)
1042 break;
1043
1044 if (iface_param->value[0] == ISCSI_IPV6_ROUTER_AUTOCFG_ENABLE)
1045 memset(init_fw_cb->ipv6_dflt_rtr_addr, 0,
1046 sizeof(init_fw_cb->ipv6_dflt_rtr_addr));
1047 break;
1048 case ISCSI_NET_PARAM_IFACE_ENABLE:
ed1086e0 1049 if (iface_param->value[0] == ISCSI_IFACE_ENABLE) {
d00efe3f
MC
1050 init_fw_cb->ipv6_opts |=
1051 cpu_to_le16(IPV6_OPT_IPV6_PROTOCOL_ENABLE);
ed1086e0
VC
1052 qla4xxx_create_ipv6_iface(ha);
1053 } else {
d00efe3f
MC
1054 init_fw_cb->ipv6_opts &=
1055 cpu_to_le16(~IPV6_OPT_IPV6_PROTOCOL_ENABLE &
1056 0xFFFF);
ed1086e0
VC
1057 qla4xxx_destroy_ipv6_iface(ha);
1058 }
d00efe3f 1059 break;
2d63673b 1060 case ISCSI_NET_PARAM_VLAN_TAG:
d00efe3f
MC
1061 if (iface_param->len != sizeof(init_fw_cb->ipv6_vlan_tag))
1062 break;
6ac73e8c
VC
1063 init_fw_cb->ipv6_vlan_tag =
1064 cpu_to_be16(*(uint16_t *)iface_param->value);
1065 break;
1066 case ISCSI_NET_PARAM_VLAN_ENABLED:
1067 if (iface_param->value[0] == ISCSI_VLAN_ENABLE)
1068 init_fw_cb->ipv6_opts |=
1069 cpu_to_le16(IPV6_OPT_VLAN_TAGGING_ENABLE);
1070 else
1071 init_fw_cb->ipv6_opts &=
1072 cpu_to_le16(~IPV6_OPT_VLAN_TAGGING_ENABLE);
d00efe3f 1073 break;
943c157b
VC
1074 case ISCSI_NET_PARAM_MTU:
1075 init_fw_cb->eth_mtu_size =
1076 cpu_to_le16(*(uint16_t *)iface_param->value);
1077 break;
2ada7fc5
VC
1078 case ISCSI_NET_PARAM_PORT:
1079 /* Autocfg applies to even interface */
1080 if (iface_param->iface_num & 0x1)
1081 break;
1082
1083 init_fw_cb->ipv6_port =
1084 cpu_to_le16(*(uint16_t *)iface_param->value);
1085 break;
d00efe3f
MC
1086 default:
1087 ql4_printk(KERN_ERR, ha, "Unknown IPv6 param = %d\n",
1088 iface_param->param);
1089 break;
1090 }
1091}
1092
1093static void qla4xxx_set_ipv4(struct scsi_qla_host *ha,
1094 struct iscsi_iface_param_info *iface_param,
1095 struct addr_ctrl_blk *init_fw_cb)
1096{
1097 switch (iface_param->param) {
1098 case ISCSI_NET_PARAM_IPV4_ADDR:
1099 memcpy(init_fw_cb->ipv4_addr, iface_param->value,
1100 sizeof(init_fw_cb->ipv4_addr));
1101 break;
1102 case ISCSI_NET_PARAM_IPV4_SUBNET:
1103 memcpy(init_fw_cb->ipv4_subnet, iface_param->value,
1104 sizeof(init_fw_cb->ipv4_subnet));
1105 break;
1106 case ISCSI_NET_PARAM_IPV4_GW:
1107 memcpy(init_fw_cb->ipv4_gw_addr, iface_param->value,
1108 sizeof(init_fw_cb->ipv4_gw_addr));
1109 break;
1110 case ISCSI_NET_PARAM_IPV4_BOOTPROTO:
1111 if (iface_param->value[0] == ISCSI_BOOTPROTO_DHCP)
1112 init_fw_cb->ipv4_tcp_opts |=
1113 cpu_to_le16(TCPOPT_DHCP_ENABLE);
1114 else if (iface_param->value[0] == ISCSI_BOOTPROTO_STATIC)
1115 init_fw_cb->ipv4_tcp_opts &=
1116 cpu_to_le16(~TCPOPT_DHCP_ENABLE);
1117 else
1118 ql4_printk(KERN_ERR, ha, "Invalid IPv4 bootproto\n");
1119 break;
1120 case ISCSI_NET_PARAM_IFACE_ENABLE:
ed1086e0 1121 if (iface_param->value[0] == ISCSI_IFACE_ENABLE) {
d00efe3f 1122 init_fw_cb->ipv4_ip_opts |=
2bab08fc 1123 cpu_to_le16(IPOPT_IPV4_PROTOCOL_ENABLE);
ed1086e0
VC
1124 qla4xxx_create_ipv4_iface(ha);
1125 } else {
d00efe3f 1126 init_fw_cb->ipv4_ip_opts &=
2bab08fc 1127 cpu_to_le16(~IPOPT_IPV4_PROTOCOL_ENABLE &
d00efe3f 1128 0xFFFF);
ed1086e0
VC
1129 qla4xxx_destroy_ipv4_iface(ha);
1130 }
d00efe3f 1131 break;
2d63673b 1132 case ISCSI_NET_PARAM_VLAN_TAG:
d00efe3f
MC
1133 if (iface_param->len != sizeof(init_fw_cb->ipv4_vlan_tag))
1134 break;
6ac73e8c
VC
1135 init_fw_cb->ipv4_vlan_tag =
1136 cpu_to_be16(*(uint16_t *)iface_param->value);
1137 break;
1138 case ISCSI_NET_PARAM_VLAN_ENABLED:
1139 if (iface_param->value[0] == ISCSI_VLAN_ENABLE)
1140 init_fw_cb->ipv4_ip_opts |=
1141 cpu_to_le16(IPOPT_VLAN_TAGGING_ENABLE);
1142 else
1143 init_fw_cb->ipv4_ip_opts &=
1144 cpu_to_le16(~IPOPT_VLAN_TAGGING_ENABLE);
d00efe3f 1145 break;
943c157b
VC
1146 case ISCSI_NET_PARAM_MTU:
1147 init_fw_cb->eth_mtu_size =
1148 cpu_to_le16(*(uint16_t *)iface_param->value);
1149 break;
2ada7fc5
VC
1150 case ISCSI_NET_PARAM_PORT:
1151 init_fw_cb->ipv4_port =
1152 cpu_to_le16(*(uint16_t *)iface_param->value);
1153 break;
d00efe3f
MC
1154 default:
1155 ql4_printk(KERN_ERR, ha, "Unknown IPv4 param = %d\n",
1156 iface_param->param);
1157 break;
1158 }
1159}
1160
1161static void
1162qla4xxx_initcb_to_acb(struct addr_ctrl_blk *init_fw_cb)
1163{
1164 struct addr_ctrl_blk_def *acb;
1165 acb = (struct addr_ctrl_blk_def *)init_fw_cb;
1166 memset(acb->reserved1, 0, sizeof(acb->reserved1));
1167 memset(acb->reserved2, 0, sizeof(acb->reserved2));
1168 memset(acb->reserved3, 0, sizeof(acb->reserved3));
1169 memset(acb->reserved4, 0, sizeof(acb->reserved4));
1170 memset(acb->reserved5, 0, sizeof(acb->reserved5));
1171 memset(acb->reserved6, 0, sizeof(acb->reserved6));
1172 memset(acb->reserved7, 0, sizeof(acb->reserved7));
1173 memset(acb->reserved8, 0, sizeof(acb->reserved8));
1174 memset(acb->reserved9, 0, sizeof(acb->reserved9));
1175 memset(acb->reserved10, 0, sizeof(acb->reserved10));
1176 memset(acb->reserved11, 0, sizeof(acb->reserved11));
1177 memset(acb->reserved12, 0, sizeof(acb->reserved12));
1178 memset(acb->reserved13, 0, sizeof(acb->reserved13));
1179 memset(acb->reserved14, 0, sizeof(acb->reserved14));
1180 memset(acb->reserved15, 0, sizeof(acb->reserved15));
1181}
1182
1183static int
00c31889 1184qla4xxx_iface_set_param(struct Scsi_Host *shost, void *data, uint32_t len)
d00efe3f
MC
1185{
1186 struct scsi_qla_host *ha = to_qla_host(shost);
1187 int rval = 0;
1188 struct iscsi_iface_param_info *iface_param = NULL;
1189 struct addr_ctrl_blk *init_fw_cb = NULL;
1190 dma_addr_t init_fw_cb_dma;
1191 uint32_t mbox_cmd[MBOX_REG_COUNT];
1192 uint32_t mbox_sts[MBOX_REG_COUNT];
00c31889
MC
1193 uint32_t rem = len;
1194 struct nlattr *attr;
d00efe3f
MC
1195
1196 init_fw_cb = dma_alloc_coherent(&ha->pdev->dev,
1197 sizeof(struct addr_ctrl_blk),
1198 &init_fw_cb_dma, GFP_KERNEL);
1199 if (!init_fw_cb) {
1200 ql4_printk(KERN_ERR, ha, "%s: Unable to alloc init_cb\n",
1201 __func__);
1202 return -ENOMEM;
1203 }
1204
1205 memset(init_fw_cb, 0, sizeof(struct addr_ctrl_blk));
1206 memset(&mbox_cmd, 0, sizeof(mbox_cmd));
1207 memset(&mbox_sts, 0, sizeof(mbox_sts));
1208
1209 if (qla4xxx_get_ifcb(ha, &mbox_cmd[0], &mbox_sts[0], init_fw_cb_dma)) {
1210 ql4_printk(KERN_ERR, ha, "%s: get ifcb failed\n", __func__);
1211 rval = -EIO;
1212 goto exit_init_fw_cb;
1213 }
1214
00c31889
MC
1215 nla_for_each_attr(attr, data, len, rem) {
1216 iface_param = nla_data(attr);
d00efe3f
MC
1217
1218 if (iface_param->param_type != ISCSI_NET_PARAM)
1219 continue;
1220
1221 switch (iface_param->iface_type) {
1222 case ISCSI_IFACE_TYPE_IPV4:
1223 switch (iface_param->iface_num) {
1224 case 0:
1225 qla4xxx_set_ipv4(ha, iface_param, init_fw_cb);
1226 break;
1227 default:
1228 /* Cannot have more than one IPv4 interface */
1229 ql4_printk(KERN_ERR, ha, "Invalid IPv4 iface "
1230 "number = %d\n",
1231 iface_param->iface_num);
1232 break;
1233 }
1234 break;
1235 case ISCSI_IFACE_TYPE_IPV6:
1236 switch (iface_param->iface_num) {
1237 case 0:
1238 case 1:
1239 qla4xxx_set_ipv6(ha, iface_param, init_fw_cb);
1240 break;
1241 default:
1242 /* Cannot have more than two IPv6 interface */
1243 ql4_printk(KERN_ERR, ha, "Invalid IPv6 iface "
1244 "number = %d\n",
1245 iface_param->iface_num);
1246 break;
1247 }
1248 break;
1249 default:
1250 ql4_printk(KERN_ERR, ha, "Invalid iface type\n");
1251 break;
1252 }
d00efe3f
MC
1253 }
1254
1255 init_fw_cb->cookie = cpu_to_le32(0x11BEAD5A);
1256
1257 rval = qla4xxx_set_flash(ha, init_fw_cb_dma, FLASH_SEGMENT_IFCB,
1258 sizeof(struct addr_ctrl_blk),
1259 FLASH_OPT_RMW_COMMIT);
1260 if (rval != QLA_SUCCESS) {
1261 ql4_printk(KERN_ERR, ha, "%s: set flash mbx failed\n",
1262 __func__);
1263 rval = -EIO;
1264 goto exit_init_fw_cb;
1265 }
1266
ce505f9d
VC
1267 rval = qla4xxx_disable_acb(ha);
1268 if (rval != QLA_SUCCESS) {
1269 ql4_printk(KERN_ERR, ha, "%s: disable acb mbx failed\n",
1270 __func__);
1271 rval = -EIO;
1272 goto exit_init_fw_cb;
1273 }
1274
1275 wait_for_completion_timeout(&ha->disable_acb_comp,
1276 DISABLE_ACB_TOV * HZ);
d00efe3f
MC
1277
1278 qla4xxx_initcb_to_acb(init_fw_cb);
1279
1280 rval = qla4xxx_set_acb(ha, &mbox_cmd[0], &mbox_sts[0], init_fw_cb_dma);
1281 if (rval != QLA_SUCCESS) {
1282 ql4_printk(KERN_ERR, ha, "%s: set acb mbx failed\n",
1283 __func__);
1284 rval = -EIO;
1285 goto exit_init_fw_cb;
1286 }
1287
1288 memset(init_fw_cb, 0, sizeof(struct addr_ctrl_blk));
1289 qla4xxx_update_local_ifcb(ha, &mbox_cmd[0], &mbox_sts[0], init_fw_cb,
1290 init_fw_cb_dma);
1291
1292exit_init_fw_cb:
1293 dma_free_coherent(&ha->pdev->dev, sizeof(struct addr_ctrl_blk),
1294 init_fw_cb, init_fw_cb_dma);
1295
1296 return rval;
1297}
1298
fca9f04d
MC
1299static int qla4xxx_session_get_param(struct iscsi_cls_session *cls_sess,
1300 enum iscsi_param param, char *buf)
1301{
1302 struct iscsi_session *sess = cls_sess->dd_data;
1303 struct ddb_entry *ddb_entry = sess->dd_data;
1304 struct scsi_qla_host *ha = ddb_entry->ha;
1305 int rval, len;
1306 uint16_t idx;
1307
1308 switch (param) {
1309 case ISCSI_PARAM_CHAP_IN_IDX:
1310 rval = qla4xxx_get_chap_index(ha, sess->username_in,
1311 sess->password_in, BIDI_CHAP,
1312 &idx);
1313 if (rval)
1314 return -EINVAL;
1315
1316 len = sprintf(buf, "%hu\n", idx);
1317 break;
1318 case ISCSI_PARAM_CHAP_OUT_IDX:
1319 rval = qla4xxx_get_chap_index(ha, sess->username,
1320 sess->password, LOCAL_CHAP,
1321 &idx);
1322 if (rval)
1323 return -EINVAL;
1324
1325 len = sprintf(buf, "%hu\n", idx);
1326 break;
1327 default:
1328 return iscsi_session_get_param(cls_sess, param, buf);
1329 }
1330
1331 return len;
1332}
1333
b3a271a9 1334static int qla4xxx_conn_get_param(struct iscsi_cls_conn *cls_conn,
afaf5a2d
DS
1335 enum iscsi_param param, char *buf)
1336{
b3a271a9
MR
1337 struct iscsi_conn *conn;
1338 struct qla_conn *qla_conn;
1339 struct sockaddr *dst_addr;
1340 int len = 0;
afaf5a2d 1341
b3a271a9
MR
1342 conn = cls_conn->dd_data;
1343 qla_conn = conn->dd_data;
1344 dst_addr = &qla_conn->qla_ep->dst_addr;
afaf5a2d
DS
1345
1346 switch (param) {
1347 case ISCSI_PARAM_CONN_PORT:
afaf5a2d 1348 case ISCSI_PARAM_CONN_ADDRESS:
b3a271a9
MR
1349 return iscsi_conn_get_addr_param((struct sockaddr_storage *)
1350 dst_addr, param, buf);
afaf5a2d 1351 default:
b3a271a9 1352 return iscsi_conn_get_param(cls_conn, param, buf);
afaf5a2d
DS
1353 }
1354
1355 return len;
b3a271a9 1356
afaf5a2d
DS
1357}
1358
13483730
MC
1359int qla4xxx_get_ddb_index(struct scsi_qla_host *ha, uint16_t *ddb_index)
1360{
1361 uint32_t mbx_sts = 0;
1362 uint16_t tmp_ddb_index;
1363 int ret;
1364
1365get_ddb_index:
1366 tmp_ddb_index = find_first_zero_bit(ha->ddb_idx_map, MAX_DDB_ENTRIES);
1367
1368 if (tmp_ddb_index >= MAX_DDB_ENTRIES) {
1369 DEBUG2(ql4_printk(KERN_INFO, ha,
1370 "Free DDB index not available\n"));
1371 ret = QLA_ERROR;
1372 goto exit_get_ddb_index;
1373 }
1374
1375 if (test_and_set_bit(tmp_ddb_index, ha->ddb_idx_map))
1376 goto get_ddb_index;
1377
1378 DEBUG2(ql4_printk(KERN_INFO, ha,
1379 "Found a free DDB index at %d\n", tmp_ddb_index));
1380 ret = qla4xxx_req_ddb_entry(ha, tmp_ddb_index, &mbx_sts);
1381 if (ret == QLA_ERROR) {
1382 if (mbx_sts == MBOX_STS_COMMAND_ERROR) {
1383 ql4_printk(KERN_INFO, ha,
1384 "DDB index = %d not available trying next\n",
1385 tmp_ddb_index);
1386 goto get_ddb_index;
1387 }
1388 DEBUG2(ql4_printk(KERN_INFO, ha,
1389 "Free FW DDB not available\n"));
1390 }
1391
1392 *ddb_index = tmp_ddb_index;
1393
1394exit_get_ddb_index:
1395 return ret;
1396}
1397
1398static int qla4xxx_match_ipaddress(struct scsi_qla_host *ha,
1399 struct ddb_entry *ddb_entry,
1400 char *existing_ipaddr,
1401 char *user_ipaddr)
1402{
1403 uint8_t dst_ipaddr[IPv6_ADDR_LEN];
1404 char formatted_ipaddr[DDB_IPADDR_LEN];
1405 int status = QLA_SUCCESS, ret = 0;
1406
1407 if (ddb_entry->fw_ddb_entry.options & DDB_OPT_IPV6_DEVICE) {
1408 ret = in6_pton(user_ipaddr, strlen(user_ipaddr), dst_ipaddr,
1409 '\0', NULL);
1410 if (ret == 0) {
1411 status = QLA_ERROR;
1412 goto out_match;
1413 }
1414 ret = sprintf(formatted_ipaddr, "%pI6", dst_ipaddr);
1415 } else {
1416 ret = in4_pton(user_ipaddr, strlen(user_ipaddr), dst_ipaddr,
1417 '\0', NULL);
1418 if (ret == 0) {
1419 status = QLA_ERROR;
1420 goto out_match;
1421 }
1422 ret = sprintf(formatted_ipaddr, "%pI4", dst_ipaddr);
1423 }
1424
1425 if (strcmp(existing_ipaddr, formatted_ipaddr))
1426 status = QLA_ERROR;
1427
1428out_match:
1429 return status;
1430}
1431
1432static int qla4xxx_match_fwdb_session(struct scsi_qla_host *ha,
1433 struct iscsi_cls_conn *cls_conn)
1434{
1435 int idx = 0, max_ddbs, rval;
1436 struct iscsi_cls_session *cls_sess = iscsi_conn_to_session(cls_conn);
1437 struct iscsi_session *sess, *existing_sess;
1438 struct iscsi_conn *conn, *existing_conn;
1439 struct ddb_entry *ddb_entry;
1440
1441 sess = cls_sess->dd_data;
1442 conn = cls_conn->dd_data;
1443
1444 if (sess->targetname == NULL ||
1445 conn->persistent_address == NULL ||
1446 conn->persistent_port == 0)
1447 return QLA_ERROR;
1448
1449 max_ddbs = is_qla40XX(ha) ? MAX_DEV_DB_ENTRIES_40XX :
1450 MAX_DEV_DB_ENTRIES;
1451
1452 for (idx = 0; idx < max_ddbs; idx++) {
1453 ddb_entry = qla4xxx_lookup_ddb_by_fw_index(ha, idx);
1454 if (ddb_entry == NULL)
1455 continue;
1456
1457 if (ddb_entry->ddb_type != FLASH_DDB)
1458 continue;
1459
1460 existing_sess = ddb_entry->sess->dd_data;
1461 existing_conn = ddb_entry->conn->dd_data;
1462
1463 if (existing_sess->targetname == NULL ||
1464 existing_conn->persistent_address == NULL ||
1465 existing_conn->persistent_port == 0)
1466 continue;
1467
1468 DEBUG2(ql4_printk(KERN_INFO, ha,
1469 "IQN = %s User IQN = %s\n",
1470 existing_sess->targetname,
1471 sess->targetname));
1472
1473 DEBUG2(ql4_printk(KERN_INFO, ha,
1474 "IP = %s User IP = %s\n",
1475 existing_conn->persistent_address,
1476 conn->persistent_address));
1477
1478 DEBUG2(ql4_printk(KERN_INFO, ha,
1479 "Port = %d User Port = %d\n",
1480 existing_conn->persistent_port,
1481 conn->persistent_port));
1482
1483 if (strcmp(existing_sess->targetname, sess->targetname))
1484 continue;
1485 rval = qla4xxx_match_ipaddress(ha, ddb_entry,
1486 existing_conn->persistent_address,
1487 conn->persistent_address);
1488 if (rval == QLA_ERROR)
1489 continue;
1490 if (existing_conn->persistent_port != conn->persistent_port)
1491 continue;
1492 break;
1493 }
1494
1495 if (idx == max_ddbs)
1496 return QLA_ERROR;
1497
1498 DEBUG2(ql4_printk(KERN_INFO, ha,
1499 "Match found in fwdb sessions\n"));
1500 return QLA_SUCCESS;
1501}
1502
b3a271a9
MR
1503static struct iscsi_cls_session *
1504qla4xxx_session_create(struct iscsi_endpoint *ep,
1505 uint16_t cmds_max, uint16_t qdepth,
1506 uint32_t initial_cmdsn)
1507{
1508 struct iscsi_cls_session *cls_sess;
1509 struct scsi_qla_host *ha;
1510 struct qla_endpoint *qla_ep;
1511 struct ddb_entry *ddb_entry;
13483730 1512 uint16_t ddb_index;
b3a271a9
MR
1513 struct iscsi_session *sess;
1514 struct sockaddr *dst_addr;
1515 int ret;
1516
1517 DEBUG2(printk(KERN_INFO "Func: %s\n", __func__));
1518 if (!ep) {
1519 printk(KERN_ERR "qla4xxx: missing ep.\n");
1520 return NULL;
1521 }
1522
1523 qla_ep = ep->dd_data;
1524 dst_addr = (struct sockaddr *)&qla_ep->dst_addr;
1525 ha = to_qla_host(qla_ep->host);
736cf369 1526
13483730
MC
1527 ret = qla4xxx_get_ddb_index(ha, &ddb_index);
1528 if (ret == QLA_ERROR)
b3a271a9 1529 return NULL;
b3a271a9
MR
1530
1531 cls_sess = iscsi_session_setup(&qla4xxx_iscsi_transport, qla_ep->host,
1532 cmds_max, sizeof(struct ddb_entry),
1533 sizeof(struct ql4_task_data),
1534 initial_cmdsn, ddb_index);
1535 if (!cls_sess)
1536 return NULL;
1537
1538 sess = cls_sess->dd_data;
1539 ddb_entry = sess->dd_data;
1540 ddb_entry->fw_ddb_index = ddb_index;
1541 ddb_entry->fw_ddb_device_state = DDB_DS_NO_CONNECTION_ACTIVE;
1542 ddb_entry->ha = ha;
1543 ddb_entry->sess = cls_sess;
13483730
MC
1544 ddb_entry->unblock_sess = qla4xxx_unblock_ddb;
1545 ddb_entry->ddb_change = qla4xxx_ddb_change;
b3a271a9
MR
1546 cls_sess->recovery_tmo = ql4xsess_recovery_tmo;
1547 ha->fw_ddb_index_map[ddb_entry->fw_ddb_index] = ddb_entry;
1548 ha->tot_ddbs++;
1549
1550 return cls_sess;
1551}
1552
1553static void qla4xxx_session_destroy(struct iscsi_cls_session *cls_sess)
1554{
1555 struct iscsi_session *sess;
1556 struct ddb_entry *ddb_entry;
1557 struct scsi_qla_host *ha;
1558 unsigned long flags;
1559
1560 DEBUG2(printk(KERN_INFO "Func: %s\n", __func__));
1561 sess = cls_sess->dd_data;
1562 ddb_entry = sess->dd_data;
1563 ha = ddb_entry->ha;
1564
736cf369
MR
1565 qla4xxx_clear_ddb_entry(ha, ddb_entry->fw_ddb_index);
1566
b3a271a9
MR
1567 spin_lock_irqsave(&ha->hardware_lock, flags);
1568 qla4xxx_free_ddb(ha, ddb_entry);
1569 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1570 iscsi_session_teardown(cls_sess);
1571}
1572
b3a271a9
MR
1573static struct iscsi_cls_conn *
1574qla4xxx_conn_create(struct iscsi_cls_session *cls_sess, uint32_t conn_idx)
1575{
1576 struct iscsi_cls_conn *cls_conn;
1577 struct iscsi_session *sess;
1578 struct ddb_entry *ddb_entry;
1579
1580 DEBUG2(printk(KERN_INFO "Func: %s\n", __func__));
1581 cls_conn = iscsi_conn_setup(cls_sess, sizeof(struct qla_conn),
1582 conn_idx);
ff1d0319
MC
1583 if (!cls_conn)
1584 return NULL;
1585
b3a271a9
MR
1586 sess = cls_sess->dd_data;
1587 ddb_entry = sess->dd_data;
1588 ddb_entry->conn = cls_conn;
1589
1590 return cls_conn;
1591}
1592
1593static int qla4xxx_conn_bind(struct iscsi_cls_session *cls_session,
1594 struct iscsi_cls_conn *cls_conn,
1595 uint64_t transport_fd, int is_leading)
1596{
1597 struct iscsi_conn *conn;
1598 struct qla_conn *qla_conn;
1599 struct iscsi_endpoint *ep;
1600
1601 DEBUG2(printk(KERN_INFO "Func: %s\n", __func__));
1602
1603 if (iscsi_conn_bind(cls_session, cls_conn, is_leading))
1604 return -EINVAL;
1605 ep = iscsi_lookup_endpoint(transport_fd);
1606 conn = cls_conn->dd_data;
1607 qla_conn = conn->dd_data;
1608 qla_conn->qla_ep = ep->dd_data;
1609 return 0;
1610}
1611
1612static int qla4xxx_conn_start(struct iscsi_cls_conn *cls_conn)
1613{
1614 struct iscsi_cls_session *cls_sess = iscsi_conn_to_session(cls_conn);
1615 struct iscsi_session *sess;
1616 struct ddb_entry *ddb_entry;
1617 struct scsi_qla_host *ha;
13483730 1618 struct dev_db_entry *fw_ddb_entry = NULL;
b3a271a9 1619 dma_addr_t fw_ddb_entry_dma;
b3a271a9
MR
1620 uint32_t mbx_sts = 0;
1621 int ret = 0;
1622 int status = QLA_SUCCESS;
1623
1624 DEBUG2(printk(KERN_INFO "Func: %s\n", __func__));
1625 sess = cls_sess->dd_data;
1626 ddb_entry = sess->dd_data;
1627 ha = ddb_entry->ha;
1628
13483730
MC
1629 /* Check if we have matching FW DDB, if yes then do not
1630 * login to this target. This could cause target to logout previous
1631 * connection
1632 */
1633 ret = qla4xxx_match_fwdb_session(ha, cls_conn);
1634 if (ret == QLA_SUCCESS) {
1635 ql4_printk(KERN_INFO, ha,
1636 "Session already exist in FW.\n");
1637 ret = -EEXIST;
1638 goto exit_conn_start;
1639 }
1640
b3a271a9
MR
1641 fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
1642 &fw_ddb_entry_dma, GFP_KERNEL);
1643 if (!fw_ddb_entry) {
1644 ql4_printk(KERN_ERR, ha,
1645 "%s: Unable to allocate dma buffer\n", __func__);
13483730
MC
1646 ret = -ENOMEM;
1647 goto exit_conn_start;
b3a271a9
MR
1648 }
1649
1650 ret = qla4xxx_set_param_ddbentry(ha, ddb_entry, cls_conn, &mbx_sts);
1651 if (ret) {
1652 /* If iscsid is stopped and started then no need to do
1653 * set param again since ddb state will be already
1654 * active and FW does not allow set ddb to an
1655 * active session.
1656 */
1657 if (mbx_sts)
1658 if (ddb_entry->fw_ddb_device_state ==
f922da79 1659 DDB_DS_SESSION_ACTIVE) {
13483730 1660 ddb_entry->unblock_sess(ddb_entry->sess);
b3a271a9 1661 goto exit_set_param;
f922da79 1662 }
b3a271a9
MR
1663
1664 ql4_printk(KERN_ERR, ha, "%s: Failed set param for index[%d]\n",
1665 __func__, ddb_entry->fw_ddb_index);
1666 goto exit_conn_start;
1667 }
1668
1669 status = qla4xxx_conn_open(ha, ddb_entry->fw_ddb_index);
1670 if (status == QLA_ERROR) {
0e7e8501
MR
1671 ql4_printk(KERN_ERR, ha, "%s: Login failed: %s\n", __func__,
1672 sess->targetname);
b3a271a9
MR
1673 ret = -EINVAL;
1674 goto exit_conn_start;
1675 }
1676
98270ab4
MR
1677 if (ddb_entry->fw_ddb_device_state == DDB_DS_NO_CONNECTION_ACTIVE)
1678 ddb_entry->fw_ddb_device_state = DDB_DS_LOGIN_IN_PROCESS;
1679
1680 DEBUG2(printk(KERN_INFO "%s: DDB state [%d]\n", __func__,
1681 ddb_entry->fw_ddb_device_state));
b3a271a9
MR
1682
1683exit_set_param:
b3a271a9
MR
1684 ret = 0;
1685
1686exit_conn_start:
13483730
MC
1687 if (fw_ddb_entry)
1688 dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
1689 fw_ddb_entry, fw_ddb_entry_dma);
b3a271a9
MR
1690 return ret;
1691}
1692
1693static void qla4xxx_conn_destroy(struct iscsi_cls_conn *cls_conn)
1694{
1695 struct iscsi_cls_session *cls_sess = iscsi_conn_to_session(cls_conn);
1696 struct iscsi_session *sess;
1697 struct scsi_qla_host *ha;
1698 struct ddb_entry *ddb_entry;
1699 int options;
1700
1701 DEBUG2(printk(KERN_INFO "Func: %s\n", __func__));
1702 sess = cls_sess->dd_data;
1703 ddb_entry = sess->dd_data;
1704 ha = ddb_entry->ha;
1705
1706 options = LOGOUT_OPTION_CLOSE_SESSION;
1707 if (qla4xxx_session_logout_ddb(ha, ddb_entry, options) == QLA_ERROR)
1708 ql4_printk(KERN_ERR, ha, "%s: Logout failed\n", __func__);
b3a271a9
MR
1709}
1710
1711static void qla4xxx_task_work(struct work_struct *wdata)
1712{
1713 struct ql4_task_data *task_data;
1714 struct scsi_qla_host *ha;
1715 struct passthru_status *sts;
1716 struct iscsi_task *task;
1717 struct iscsi_hdr *hdr;
1718 uint8_t *data;
1719 uint32_t data_len;
1720 struct iscsi_conn *conn;
1721 int hdr_len;
1722 itt_t itt;
1723
1724 task_data = container_of(wdata, struct ql4_task_data, task_work);
1725 ha = task_data->ha;
1726 task = task_data->task;
1727 sts = &task_data->sts;
1728 hdr_len = sizeof(struct iscsi_hdr);
1729
1730 DEBUG3(printk(KERN_INFO "Status returned\n"));
1731 DEBUG3(qla4xxx_dump_buffer(sts, 64));
1732 DEBUG3(printk(KERN_INFO "Response buffer"));
1733 DEBUG3(qla4xxx_dump_buffer(task_data->resp_buffer, 64));
1734
1735 conn = task->conn;
1736
1737 switch (sts->completionStatus) {
1738 case PASSTHRU_STATUS_COMPLETE:
1739 hdr = (struct iscsi_hdr *)task_data->resp_buffer;
1740 /* Assign back the itt in hdr, until we use the PREASSIGN_TAG */
1741 itt = sts->handle;
1742 hdr->itt = itt;
1743 data = task_data->resp_buffer + hdr_len;
1744 data_len = task_data->resp_len - hdr_len;
1745 iscsi_complete_pdu(conn, hdr, data, data_len);
1746 break;
1747 default:
1748 ql4_printk(KERN_ERR, ha, "Passthru failed status = 0x%x\n",
1749 sts->completionStatus);
1750 break;
1751 }
1752 return;
1753}
1754
1755static int qla4xxx_alloc_pdu(struct iscsi_task *task, uint8_t opcode)
1756{
1757 struct ql4_task_data *task_data;
1758 struct iscsi_session *sess;
1759 struct ddb_entry *ddb_entry;
1760 struct scsi_qla_host *ha;
1761 int hdr_len;
1762
1763 sess = task->conn->session;
1764 ddb_entry = sess->dd_data;
1765 ha = ddb_entry->ha;
1766 task_data = task->dd_data;
1767 memset(task_data, 0, sizeof(struct ql4_task_data));
1768
1769 if (task->sc) {
1770 ql4_printk(KERN_INFO, ha,
1771 "%s: SCSI Commands not implemented\n", __func__);
1772 return -EINVAL;
1773 }
1774
1775 hdr_len = sizeof(struct iscsi_hdr);
1776 task_data->ha = ha;
1777 task_data->task = task;
1778
1779 if (task->data_count) {
1780 task_data->data_dma = dma_map_single(&ha->pdev->dev, task->data,
1781 task->data_count,
1782 PCI_DMA_TODEVICE);
1783 }
1784
1785 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: MaxRecvLen %u, iscsi hrd %d\n",
1786 __func__, task->conn->max_recv_dlength, hdr_len));
1787
69ca216e 1788 task_data->resp_len = task->conn->max_recv_dlength + hdr_len;
b3a271a9
MR
1789 task_data->resp_buffer = dma_alloc_coherent(&ha->pdev->dev,
1790 task_data->resp_len,
1791 &task_data->resp_dma,
1792 GFP_ATOMIC);
1793 if (!task_data->resp_buffer)
1794 goto exit_alloc_pdu;
1795
69ca216e 1796 task_data->req_len = task->data_count + hdr_len;
b3a271a9 1797 task_data->req_buffer = dma_alloc_coherent(&ha->pdev->dev,
69ca216e 1798 task_data->req_len,
b3a271a9
MR
1799 &task_data->req_dma,
1800 GFP_ATOMIC);
1801 if (!task_data->req_buffer)
1802 goto exit_alloc_pdu;
1803
1804 task->hdr = task_data->req_buffer;
1805
1806 INIT_WORK(&task_data->task_work, qla4xxx_task_work);
1807
1808 return 0;
1809
1810exit_alloc_pdu:
1811 if (task_data->resp_buffer)
1812 dma_free_coherent(&ha->pdev->dev, task_data->resp_len,
1813 task_data->resp_buffer, task_data->resp_dma);
1814
1815 if (task_data->req_buffer)
69ca216e 1816 dma_free_coherent(&ha->pdev->dev, task_data->req_len,
b3a271a9
MR
1817 task_data->req_buffer, task_data->req_dma);
1818 return -ENOMEM;
1819}
1820
1821static void qla4xxx_task_cleanup(struct iscsi_task *task)
1822{
1823 struct ql4_task_data *task_data;
1824 struct iscsi_session *sess;
1825 struct ddb_entry *ddb_entry;
1826 struct scsi_qla_host *ha;
1827 int hdr_len;
1828
1829 hdr_len = sizeof(struct iscsi_hdr);
1830 sess = task->conn->session;
1831 ddb_entry = sess->dd_data;
1832 ha = ddb_entry->ha;
1833 task_data = task->dd_data;
1834
1835 if (task->data_count) {
1836 dma_unmap_single(&ha->pdev->dev, task_data->data_dma,
1837 task->data_count, PCI_DMA_TODEVICE);
1838 }
1839
1840 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: MaxRecvLen %u, iscsi hrd %d\n",
1841 __func__, task->conn->max_recv_dlength, hdr_len));
1842
1843 dma_free_coherent(&ha->pdev->dev, task_data->resp_len,
1844 task_data->resp_buffer, task_data->resp_dma);
69ca216e 1845 dma_free_coherent(&ha->pdev->dev, task_data->req_len,
b3a271a9
MR
1846 task_data->req_buffer, task_data->req_dma);
1847 return;
1848}
1849
1850static int qla4xxx_task_xmit(struct iscsi_task *task)
1851{
1852 struct scsi_cmnd *sc = task->sc;
1853 struct iscsi_session *sess = task->conn->session;
1854 struct ddb_entry *ddb_entry = sess->dd_data;
1855 struct scsi_qla_host *ha = ddb_entry->ha;
1856
1857 if (!sc)
1858 return qla4xxx_send_passthru0(task);
1859
1860 ql4_printk(KERN_INFO, ha, "%s: scsi cmd xmit not implemented\n",
1861 __func__);
1862 return -ENOSYS;
1863}
1864
13483730
MC
1865static void qla4xxx_copy_fwddb_param(struct scsi_qla_host *ha,
1866 struct dev_db_entry *fw_ddb_entry,
1867 struct iscsi_cls_session *cls_sess,
1868 struct iscsi_cls_conn *cls_conn)
1869{
1870 int buflen = 0;
1871 struct iscsi_session *sess;
376738af 1872 struct ddb_entry *ddb_entry;
13483730
MC
1873 struct iscsi_conn *conn;
1874 char ip_addr[DDB_IPADDR_LEN];
1875 uint16_t options = 0;
1876
1877 sess = cls_sess->dd_data;
376738af 1878 ddb_entry = sess->dd_data;
13483730
MC
1879 conn = cls_conn->dd_data;
1880
376738af
NJ
1881 ddb_entry->chap_tbl_idx = le16_to_cpu(fw_ddb_entry->chap_tbl_idx);
1882
13483730
MC
1883 conn->max_recv_dlength = BYTE_UNITS *
1884 le16_to_cpu(fw_ddb_entry->iscsi_max_rcv_data_seg_len);
1885
1886 conn->max_xmit_dlength = BYTE_UNITS *
1887 le16_to_cpu(fw_ddb_entry->iscsi_max_snd_data_seg_len);
1888
1889 sess->initial_r2t_en =
1890 (BIT_10 & le16_to_cpu(fw_ddb_entry->iscsi_options));
1891
1892 sess->max_r2t = le16_to_cpu(fw_ddb_entry->iscsi_max_outsnd_r2t);
1893
1894 sess->imm_data_en = (BIT_11 & le16_to_cpu(fw_ddb_entry->iscsi_options));
1895
1896 sess->first_burst = BYTE_UNITS *
1897 le16_to_cpu(fw_ddb_entry->iscsi_first_burst_len);
1898
1899 sess->max_burst = BYTE_UNITS *
1900 le16_to_cpu(fw_ddb_entry->iscsi_max_burst_len);
1901
1902 sess->time2wait = le16_to_cpu(fw_ddb_entry->iscsi_def_time2wait);
1903
1904 sess->time2retain = le16_to_cpu(fw_ddb_entry->iscsi_def_time2retain);
1905
1906 conn->persistent_port = le16_to_cpu(fw_ddb_entry->port);
1907
1908 sess->tpgt = le32_to_cpu(fw_ddb_entry->tgt_portal_grp);
1909
1910 options = le16_to_cpu(fw_ddb_entry->options);
1911 if (options & DDB_OPT_IPV6_DEVICE)
1912 sprintf(ip_addr, "%pI6", fw_ddb_entry->ip_addr);
1913 else
1914 sprintf(ip_addr, "%pI4", fw_ddb_entry->ip_addr);
1915
1916 iscsi_set_param(cls_conn, ISCSI_PARAM_TARGET_NAME,
1917 (char *)fw_ddb_entry->iscsi_name, buflen);
1918 iscsi_set_param(cls_conn, ISCSI_PARAM_INITIATOR_NAME,
1919 (char *)ha->name_string, buflen);
1920 iscsi_set_param(cls_conn, ISCSI_PARAM_PERSISTENT_ADDRESS,
1921 (char *)ip_addr, buflen);
6c1b8789
VC
1922 iscsi_set_param(cls_conn, ISCSI_PARAM_TARGET_ALIAS,
1923 (char *)fw_ddb_entry->iscsi_alias, buflen);
13483730
MC
1924}
1925
1926void qla4xxx_update_session_conn_fwddb_param(struct scsi_qla_host *ha,
1927 struct ddb_entry *ddb_entry)
1928{
1929 struct iscsi_cls_session *cls_sess;
1930 struct iscsi_cls_conn *cls_conn;
1931 uint32_t ddb_state;
1932 dma_addr_t fw_ddb_entry_dma;
1933 struct dev_db_entry *fw_ddb_entry;
1934
1935 fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
1936 &fw_ddb_entry_dma, GFP_KERNEL);
1937 if (!fw_ddb_entry) {
1938 ql4_printk(KERN_ERR, ha,
1939 "%s: Unable to allocate dma buffer\n", __func__);
1940 goto exit_session_conn_fwddb_param;
1941 }
1942
1943 if (qla4xxx_get_fwddb_entry(ha, ddb_entry->fw_ddb_index, fw_ddb_entry,
1944 fw_ddb_entry_dma, NULL, NULL, &ddb_state,
1945 NULL, NULL, NULL) == QLA_ERROR) {
1946 DEBUG2(ql4_printk(KERN_ERR, ha, "scsi%ld: %s: failed "
1947 "get_ddb_entry for fw_ddb_index %d\n",
1948 ha->host_no, __func__,
1949 ddb_entry->fw_ddb_index));
1950 goto exit_session_conn_fwddb_param;
1951 }
1952
1953 cls_sess = ddb_entry->sess;
1954
1955 cls_conn = ddb_entry->conn;
1956
1957 /* Update params */
1958 qla4xxx_copy_fwddb_param(ha, fw_ddb_entry, cls_sess, cls_conn);
1959
1960exit_session_conn_fwddb_param:
1961 if (fw_ddb_entry)
1962 dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
1963 fw_ddb_entry, fw_ddb_entry_dma);
1964}
1965
b3a271a9
MR
1966void qla4xxx_update_session_conn_param(struct scsi_qla_host *ha,
1967 struct ddb_entry *ddb_entry)
1968{
1969 struct iscsi_cls_session *cls_sess;
1970 struct iscsi_cls_conn *cls_conn;
1971 struct iscsi_session *sess;
1972 struct iscsi_conn *conn;
1973 uint32_t ddb_state;
1974 dma_addr_t fw_ddb_entry_dma;
1975 struct dev_db_entry *fw_ddb_entry;
1976
1977 fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
1978 &fw_ddb_entry_dma, GFP_KERNEL);
1979 if (!fw_ddb_entry) {
1980 ql4_printk(KERN_ERR, ha,
1981 "%s: Unable to allocate dma buffer\n", __func__);
13483730 1982 goto exit_session_conn_param;
b3a271a9
MR
1983 }
1984
1985 if (qla4xxx_get_fwddb_entry(ha, ddb_entry->fw_ddb_index, fw_ddb_entry,
1986 fw_ddb_entry_dma, NULL, NULL, &ddb_state,
1987 NULL, NULL, NULL) == QLA_ERROR) {
1988 DEBUG2(ql4_printk(KERN_ERR, ha, "scsi%ld: %s: failed "
1989 "get_ddb_entry for fw_ddb_index %d\n",
1990 ha->host_no, __func__,
1991 ddb_entry->fw_ddb_index));
13483730 1992 goto exit_session_conn_param;
b3a271a9
MR
1993 }
1994
1995 cls_sess = ddb_entry->sess;
1996 sess = cls_sess->dd_data;
1997
1998 cls_conn = ddb_entry->conn;
1999 conn = cls_conn->dd_data;
2000
13483730
MC
2001 /* Update timers after login */
2002 ddb_entry->default_relogin_timeout =
c28eaaca
NJ
2003 (le16_to_cpu(fw_ddb_entry->def_timeout) > LOGIN_TOV) &&
2004 (le16_to_cpu(fw_ddb_entry->def_timeout) < LOGIN_TOV * 10) ?
2005 le16_to_cpu(fw_ddb_entry->def_timeout) : LOGIN_TOV;
13483730
MC
2006 ddb_entry->default_time2wait =
2007 le16_to_cpu(fw_ddb_entry->iscsi_def_time2wait);
2008
b3a271a9 2009 /* Update params */
376738af 2010 ddb_entry->chap_tbl_idx = le16_to_cpu(fw_ddb_entry->chap_tbl_idx);
b3a271a9
MR
2011 conn->max_recv_dlength = BYTE_UNITS *
2012 le16_to_cpu(fw_ddb_entry->iscsi_max_rcv_data_seg_len);
2013
2014 conn->max_xmit_dlength = BYTE_UNITS *
2015 le16_to_cpu(fw_ddb_entry->iscsi_max_snd_data_seg_len);
2016
2017 sess->initial_r2t_en =
2018 (BIT_10 & le16_to_cpu(fw_ddb_entry->iscsi_options));
2019
2020 sess->max_r2t = le16_to_cpu(fw_ddb_entry->iscsi_max_outsnd_r2t);
2021
2022 sess->imm_data_en = (BIT_11 & le16_to_cpu(fw_ddb_entry->iscsi_options));
2023
2024 sess->first_burst = BYTE_UNITS *
2025 le16_to_cpu(fw_ddb_entry->iscsi_first_burst_len);
2026
2027 sess->max_burst = BYTE_UNITS *
2028 le16_to_cpu(fw_ddb_entry->iscsi_max_burst_len);
2029
2030 sess->time2wait = le16_to_cpu(fw_ddb_entry->iscsi_def_time2wait);
2031
2032 sess->time2retain = le16_to_cpu(fw_ddb_entry->iscsi_def_time2retain);
2033
2034 sess->tpgt = le32_to_cpu(fw_ddb_entry->tgt_portal_grp);
2035
2036 memcpy(sess->initiatorname, ha->name_string,
2037 min(sizeof(ha->name_string), sizeof(sess->initiatorname)));
13483730 2038
6c1b8789
VC
2039 iscsi_set_param(cls_conn, ISCSI_PARAM_TARGET_ALIAS,
2040 (char *)fw_ddb_entry->iscsi_alias, 0);
2041
13483730
MC
2042exit_session_conn_param:
2043 if (fw_ddb_entry)
2044 dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
2045 fw_ddb_entry, fw_ddb_entry_dma);
b3a271a9
MR
2046}
2047
afaf5a2d
DS
2048/*
2049 * Timer routines
2050 */
2051
2052static void qla4xxx_start_timer(struct scsi_qla_host *ha, void *func,
2053 unsigned long interval)
2054{
2055 DEBUG(printk("scsi: %s: Starting timer thread for adapter %d\n",
2056 __func__, ha->host->host_no));
2057 init_timer(&ha->timer);
2058 ha->timer.expires = jiffies + interval * HZ;
2059 ha->timer.data = (unsigned long)ha;
2060 ha->timer.function = (void (*)(unsigned long))func;
2061 add_timer(&ha->timer);
2062 ha->timer_active = 1;
2063}
2064
2065static void qla4xxx_stop_timer(struct scsi_qla_host *ha)
2066{
2067 del_timer_sync(&ha->timer);
2068 ha->timer_active = 0;
2069}
2070
2071/***
b3a271a9
MR
2072 * qla4xxx_mark_device_missing - blocks the session
2073 * @cls_session: Pointer to the session to be blocked
afaf5a2d
DS
2074 * @ddb_entry: Pointer to device database entry
2075 *
f4f5df23 2076 * This routine marks a device missing and close connection.
afaf5a2d 2077 **/
b3a271a9 2078void qla4xxx_mark_device_missing(struct iscsi_cls_session *cls_session)
afaf5a2d 2079{
b3a271a9 2080 iscsi_block_session(cls_session);
afaf5a2d
DS
2081}
2082
f4f5df23
VC
2083/**
2084 * qla4xxx_mark_all_devices_missing - mark all devices as missing.
2085 * @ha: Pointer to host adapter structure.
2086 *
2087 * This routine marks a device missing and resets the relogin retry count.
2088 **/
2089void qla4xxx_mark_all_devices_missing(struct scsi_qla_host *ha)
2090{
b3a271a9 2091 iscsi_host_for_each_session(ha->host, qla4xxx_mark_device_missing);
f4f5df23
VC
2092}
2093
afaf5a2d
DS
2094static struct srb* qla4xxx_get_new_srb(struct scsi_qla_host *ha,
2095 struct ddb_entry *ddb_entry,
8f0722ca 2096 struct scsi_cmnd *cmd)
afaf5a2d
DS
2097{
2098 struct srb *srb;
2099
2100 srb = mempool_alloc(ha->srb_mempool, GFP_ATOMIC);
2101 if (!srb)
2102 return srb;
2103
09a0f719 2104 kref_init(&srb->srb_ref);
afaf5a2d
DS
2105 srb->ha = ha;
2106 srb->ddb = ddb_entry;
2107 srb->cmd = cmd;
2108 srb->flags = 0;
5369887a 2109 CMD_SP(cmd) = (void *)srb;
afaf5a2d
DS
2110
2111 return srb;
2112}
2113
2114static void qla4xxx_srb_free_dma(struct scsi_qla_host *ha, struct srb *srb)
2115{
2116 struct scsi_cmnd *cmd = srb->cmd;
2117
2118 if (srb->flags & SRB_DMA_VALID) {
5f7186c8 2119 scsi_dma_unmap(cmd);
afaf5a2d
DS
2120 srb->flags &= ~SRB_DMA_VALID;
2121 }
5369887a 2122 CMD_SP(cmd) = NULL;
afaf5a2d
DS
2123}
2124
09a0f719 2125void qla4xxx_srb_compl(struct kref *ref)
afaf5a2d 2126{
09a0f719 2127 struct srb *srb = container_of(ref, struct srb, srb_ref);
afaf5a2d 2128 struct scsi_cmnd *cmd = srb->cmd;
09a0f719 2129 struct scsi_qla_host *ha = srb->ha;
afaf5a2d
DS
2130
2131 qla4xxx_srb_free_dma(ha, srb);
2132
2133 mempool_free(srb, ha->srb_mempool);
2134
2135 cmd->scsi_done(cmd);
2136}
2137
2138/**
2139 * qla4xxx_queuecommand - scsi layer issues scsi command to driver.
8f0722ca 2140 * @host: scsi host
afaf5a2d 2141 * @cmd: Pointer to Linux's SCSI command structure
afaf5a2d
DS
2142 *
2143 * Remarks:
2144 * This routine is invoked by Linux to send a SCSI command to the driver.
2145 * The mid-level driver tries to ensure that queuecommand never gets
2146 * invoked concurrently with itself or the interrupt handler (although
2147 * the interrupt handler may call this routine as part of request-
2148 * completion handling). Unfortunely, it sometimes calls the scheduler
2149 * in interrupt context which is a big NO! NO!.
2150 **/
8f0722ca 2151static int qla4xxx_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
afaf5a2d 2152{
8f0722ca 2153 struct scsi_qla_host *ha = to_qla_host(host);
afaf5a2d 2154 struct ddb_entry *ddb_entry = cmd->device->hostdata;
7fb1921b 2155 struct iscsi_cls_session *sess = ddb_entry->sess;
afaf5a2d
DS
2156 struct srb *srb;
2157 int rval;
2158
2232be0d
LC
2159 if (test_bit(AF_EEH_BUSY, &ha->flags)) {
2160 if (test_bit(AF_PCI_CHANNEL_IO_PERM_FAILURE, &ha->flags))
2161 cmd->result = DID_NO_CONNECT << 16;
2162 else
2163 cmd->result = DID_REQUEUE << 16;
2164 goto qc_fail_command;
2165 }
2166
7fb1921b
MC
2167 if (!sess) {
2168 cmd->result = DID_IMM_RETRY << 16;
2169 goto qc_fail_command;
2170 }
2171
2172 rval = iscsi_session_chkready(sess);
2173 if (rval) {
2174 cmd->result = rval;
2175 goto qc_fail_command;
2176 }
2177
f4f5df23
VC
2178 if (test_bit(DPC_RESET_HA_INTR, &ha->dpc_flags) ||
2179 test_bit(DPC_RESET_ACTIVE, &ha->dpc_flags) ||
2180 test_bit(DPC_RESET_HA, &ha->dpc_flags) ||
2181 test_bit(DPC_HA_UNRECOVERABLE, &ha->dpc_flags) ||
2182 test_bit(DPC_HA_NEED_QUIESCENT, &ha->dpc_flags) ||
2183 !test_bit(AF_ONLINE, &ha->flags) ||
b3a271a9 2184 !test_bit(AF_LINK_UP, &ha->flags) ||
f4f5df23 2185 test_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags))
477ffb9d
DS
2186 goto qc_host_busy;
2187
8f0722ca 2188 srb = qla4xxx_get_new_srb(ha, ddb_entry, cmd);
afaf5a2d 2189 if (!srb)
8f0722ca 2190 goto qc_host_busy;
afaf5a2d
DS
2191
2192 rval = qla4xxx_send_command_to_isp(ha, srb);
2193 if (rval != QLA_SUCCESS)
2194 goto qc_host_busy_free_sp;
2195
afaf5a2d
DS
2196 return 0;
2197
2198qc_host_busy_free_sp:
2199 qla4xxx_srb_free_dma(ha, srb);
2200 mempool_free(srb, ha->srb_mempool);
2201
afaf5a2d
DS
2202qc_host_busy:
2203 return SCSI_MLQUEUE_HOST_BUSY;
2204
2205qc_fail_command:
8f0722ca 2206 cmd->scsi_done(cmd);
afaf5a2d
DS
2207
2208 return 0;
2209}
2210
2211/**
2212 * qla4xxx_mem_free - frees memory allocated to adapter
2213 * @ha: Pointer to host adapter structure.
2214 *
2215 * Frees memory previously allocated by qla4xxx_mem_alloc
2216 **/
2217static void qla4xxx_mem_free(struct scsi_qla_host *ha)
2218{
2219 if (ha->queues)
2220 dma_free_coherent(&ha->pdev->dev, ha->queues_len, ha->queues,
2221 ha->queues_dma);
2222
2223 ha->queues_len = 0;
2224 ha->queues = NULL;
2225 ha->queues_dma = 0;
2226 ha->request_ring = NULL;
2227 ha->request_dma = 0;
2228 ha->response_ring = NULL;
2229 ha->response_dma = 0;
2230 ha->shadow_regs = NULL;
2231 ha->shadow_regs_dma = 0;
2232
2233 /* Free srb pool. */
2234 if (ha->srb_mempool)
2235 mempool_destroy(ha->srb_mempool);
2236
2237 ha->srb_mempool = NULL;
2238
b3a271a9
MR
2239 if (ha->chap_dma_pool)
2240 dma_pool_destroy(ha->chap_dma_pool);
2241
4549415a
LC
2242 if (ha->chap_list)
2243 vfree(ha->chap_list);
2244 ha->chap_list = NULL;
2245
13483730
MC
2246 if (ha->fw_ddb_dma_pool)
2247 dma_pool_destroy(ha->fw_ddb_dma_pool);
2248
afaf5a2d 2249 /* release io space registers */
f4f5df23
VC
2250 if (is_qla8022(ha)) {
2251 if (ha->nx_pcibase)
2252 iounmap(
2253 (struct device_reg_82xx __iomem *)ha->nx_pcibase);
f4f5df23 2254 } else if (ha->reg)
afaf5a2d
DS
2255 iounmap(ha->reg);
2256 pci_release_regions(ha->pdev);
2257}
2258
2259/**
2260 * qla4xxx_mem_alloc - allocates memory for use by adapter.
2261 * @ha: Pointer to host adapter structure
2262 *
2263 * Allocates DMA memory for request and response queues. Also allocates memory
2264 * for srbs.
2265 **/
2266static int qla4xxx_mem_alloc(struct scsi_qla_host *ha)
2267{
2268 unsigned long align;
2269
2270 /* Allocate contiguous block of DMA memory for queues. */
2271 ha->queues_len = ((REQUEST_QUEUE_DEPTH * QUEUE_SIZE) +
2272 (RESPONSE_QUEUE_DEPTH * QUEUE_SIZE) +
2273 sizeof(struct shadow_regs) +
2274 MEM_ALIGN_VALUE +
2275 (PAGE_SIZE - 1)) & ~(PAGE_SIZE - 1);
2276 ha->queues = dma_alloc_coherent(&ha->pdev->dev, ha->queues_len,
2277 &ha->queues_dma, GFP_KERNEL);
2278 if (ha->queues == NULL) {
c2660df3
VC
2279 ql4_printk(KERN_WARNING, ha,
2280 "Memory Allocation failed - queues.\n");
afaf5a2d
DS
2281
2282 goto mem_alloc_error_exit;
2283 }
2284 memset(ha->queues, 0, ha->queues_len);
2285
2286 /*
2287 * As per RISC alignment requirements -- the bus-address must be a
2288 * multiple of the request-ring size (in bytes).
2289 */
2290 align = 0;
2291 if ((unsigned long)ha->queues_dma & (MEM_ALIGN_VALUE - 1))
2292 align = MEM_ALIGN_VALUE - ((unsigned long)ha->queues_dma &
2293 (MEM_ALIGN_VALUE - 1));
2294
2295 /* Update request and response queue pointers. */
2296 ha->request_dma = ha->queues_dma + align;
2297 ha->request_ring = (struct queue_entry *) (ha->queues + align);
2298 ha->response_dma = ha->queues_dma + align +
2299 (REQUEST_QUEUE_DEPTH * QUEUE_SIZE);
2300 ha->response_ring = (struct queue_entry *) (ha->queues + align +
2301 (REQUEST_QUEUE_DEPTH *
2302 QUEUE_SIZE));
2303 ha->shadow_regs_dma = ha->queues_dma + align +
2304 (REQUEST_QUEUE_DEPTH * QUEUE_SIZE) +
2305 (RESPONSE_QUEUE_DEPTH * QUEUE_SIZE);
2306 ha->shadow_regs = (struct shadow_regs *) (ha->queues + align +
2307 (REQUEST_QUEUE_DEPTH *
2308 QUEUE_SIZE) +
2309 (RESPONSE_QUEUE_DEPTH *
2310 QUEUE_SIZE));
2311
2312 /* Allocate memory for srb pool. */
2313 ha->srb_mempool = mempool_create(SRB_MIN_REQ, mempool_alloc_slab,
2314 mempool_free_slab, srb_cachep);
2315 if (ha->srb_mempool == NULL) {
c2660df3
VC
2316 ql4_printk(KERN_WARNING, ha,
2317 "Memory Allocation failed - SRB Pool.\n");
afaf5a2d
DS
2318
2319 goto mem_alloc_error_exit;
2320 }
2321
b3a271a9
MR
2322 ha->chap_dma_pool = dma_pool_create("ql4_chap", &ha->pdev->dev,
2323 CHAP_DMA_BLOCK_SIZE, 8, 0);
2324
2325 if (ha->chap_dma_pool == NULL) {
2326 ql4_printk(KERN_WARNING, ha,
2327 "%s: chap_dma_pool allocation failed..\n", __func__);
2328 goto mem_alloc_error_exit;
2329 }
2330
13483730
MC
2331 ha->fw_ddb_dma_pool = dma_pool_create("ql4_fw_ddb", &ha->pdev->dev,
2332 DDB_DMA_BLOCK_SIZE, 8, 0);
2333
2334 if (ha->fw_ddb_dma_pool == NULL) {
2335 ql4_printk(KERN_WARNING, ha,
2336 "%s: fw_ddb_dma_pool allocation failed..\n",
2337 __func__);
2338 goto mem_alloc_error_exit;
2339 }
2340
afaf5a2d
DS
2341 return QLA_SUCCESS;
2342
2343mem_alloc_error_exit:
2344 qla4xxx_mem_free(ha);
2345 return QLA_ERROR;
2346}
2347
4f77083e
MH
2348/**
2349 * qla4_8xxx_check_temp - Check the ISP82XX temperature.
2350 * @ha: adapter block pointer.
2351 *
2352 * Note: The caller should not hold the idc lock.
2353 **/
2354static int qla4_8xxx_check_temp(struct scsi_qla_host *ha)
2355{
2356 uint32_t temp, temp_state, temp_val;
2357 int status = QLA_SUCCESS;
2358
2359 temp = qla4_8xxx_rd_32(ha, CRB_TEMP_STATE);
2360
2361 temp_state = qla82xx_get_temp_state(temp);
2362 temp_val = qla82xx_get_temp_val(temp);
2363
2364 if (temp_state == QLA82XX_TEMP_PANIC) {
2365 ql4_printk(KERN_WARNING, ha, "Device temperature %d degrees C"
2366 " exceeds maximum allowed. Hardware has been shut"
2367 " down.\n", temp_val);
2368 status = QLA_ERROR;
2369 } else if (temp_state == QLA82XX_TEMP_WARN) {
2370 if (ha->temperature == QLA82XX_TEMP_NORMAL)
2371 ql4_printk(KERN_WARNING, ha, "Device temperature %d"
2372 " degrees C exceeds operating range."
2373 " Immediate action needed.\n", temp_val);
2374 } else {
2375 if (ha->temperature == QLA82XX_TEMP_WARN)
2376 ql4_printk(KERN_INFO, ha, "Device temperature is"
2377 " now %d degrees C in normal range.\n",
2378 temp_val);
2379 }
2380 ha->temperature = temp_state;
2381 return status;
2382}
2383
f4f5df23
VC
2384/**
2385 * qla4_8xxx_check_fw_alive - Check firmware health
2386 * @ha: Pointer to host adapter structure.
2387 *
2388 * Context: Interrupt
2389 **/
9ee91a38 2390static int qla4_8xxx_check_fw_alive(struct scsi_qla_host *ha)
f4f5df23 2391{
9ee91a38
SS
2392 uint32_t fw_heartbeat_counter;
2393 int status = QLA_SUCCESS;
f4f5df23
VC
2394
2395 fw_heartbeat_counter = qla4_8xxx_rd_32(ha, QLA82XX_PEG_ALIVE_COUNTER);
2232be0d
LC
2396 /* If PEG_ALIVE_COUNTER is 0xffffffff, AER/EEH is in progress, ignore */
2397 if (fw_heartbeat_counter == 0xffffffff) {
2398 DEBUG2(printk(KERN_WARNING "scsi%ld: %s: Device in frozen "
2399 "state, QLA82XX_PEG_ALIVE_COUNTER is 0xffffffff\n",
2400 ha->host_no, __func__));
9ee91a38 2401 return status;
2232be0d 2402 }
f4f5df23
VC
2403
2404 if (ha->fw_heartbeat_counter == fw_heartbeat_counter) {
2405 ha->seconds_since_last_heartbeat++;
2406 /* FW not alive after 2 seconds */
2407 if (ha->seconds_since_last_heartbeat == 2) {
2408 ha->seconds_since_last_heartbeat = 0;
68d92ebf
VC
2409
2410 ql4_printk(KERN_INFO, ha,
2411 "scsi(%ld): %s, Dumping hw/fw registers:\n "
2412 " PEG_HALT_STATUS1: 0x%x, PEG_HALT_STATUS2:"
2413 " 0x%x,\n PEG_NET_0_PC: 0x%x, PEG_NET_1_PC:"
2414 " 0x%x,\n PEG_NET_2_PC: 0x%x, PEG_NET_3_PC:"
2415 " 0x%x,\n PEG_NET_4_PC: 0x%x\n",
9ee91a38
SS
2416 ha->host_no, __func__,
2417 qla4_8xxx_rd_32(ha,
2418 QLA82XX_PEG_HALT_STATUS1),
68d92ebf
VC
2419 qla4_8xxx_rd_32(ha,
2420 QLA82XX_PEG_HALT_STATUS2),
2421 qla4_8xxx_rd_32(ha, QLA82XX_CRB_PEG_NET_0 +
2422 0x3c),
2423 qla4_8xxx_rd_32(ha, QLA82XX_CRB_PEG_NET_1 +
2424 0x3c),
2425 qla4_8xxx_rd_32(ha, QLA82XX_CRB_PEG_NET_2 +
2426 0x3c),
2427 qla4_8xxx_rd_32(ha, QLA82XX_CRB_PEG_NET_3 +
2428 0x3c),
2429 qla4_8xxx_rd_32(ha, QLA82XX_CRB_PEG_NET_4 +
2430 0x3c));
9ee91a38 2431 status = QLA_ERROR;
f4f5df23 2432 }
99457d75
LC
2433 } else
2434 ha->seconds_since_last_heartbeat = 0;
2435
f4f5df23 2436 ha->fw_heartbeat_counter = fw_heartbeat_counter;
9ee91a38 2437 return status;
f4f5df23
VC
2438}
2439
2440/**
2441 * qla4_8xxx_watchdog - Poll dev state
2442 * @ha: Pointer to host adapter structure.
2443 *
2444 * Context: Interrupt
2445 **/
2446void qla4_8xxx_watchdog(struct scsi_qla_host *ha)
2447{
9ee91a38 2448 uint32_t dev_state, halt_status;
f4f5df23
VC
2449
2450 /* don't poll if reset is going on */
d56a1f7b
LC
2451 if (!(test_bit(DPC_RESET_ACTIVE, &ha->dpc_flags) ||
2452 test_bit(DPC_RESET_HA, &ha->dpc_flags) ||
977f46a4 2453 test_bit(DPC_RETRY_RESET_HA, &ha->dpc_flags))) {
9ee91a38 2454 dev_state = qla4_8xxx_rd_32(ha, QLA82XX_CRB_DEV_STATE);
4f77083e
MH
2455
2456 if (qla4_8xxx_check_temp(ha)) {
e6bd0ebd
GM
2457 ql4_printk(KERN_INFO, ha, "disabling pause"
2458 " transmit on port 0 & 1.\n");
2459 qla4_8xxx_wr_32(ha, QLA82XX_CRB_NIU + 0x98,
2460 CRB_NIU_XG_PAUSE_CTL_P0 |
2461 CRB_NIU_XG_PAUSE_CTL_P1);
4f77083e
MH
2462 set_bit(DPC_HA_UNRECOVERABLE, &ha->dpc_flags);
2463 qla4xxx_wake_dpc(ha);
2464 } else if (dev_state == QLA82XX_DEV_NEED_RESET &&
f4f5df23 2465 !test_bit(DPC_RESET_HA, &ha->dpc_flags)) {
3930b8c1
VC
2466 if (!ql4xdontresethba) {
2467 ql4_printk(KERN_INFO, ha, "%s: HW State: "
2468 "NEED RESET!\n", __func__);
2469 set_bit(DPC_RESET_HA, &ha->dpc_flags);
2470 qla4xxx_wake_dpc(ha);
3930b8c1 2471 }
f4f5df23
VC
2472 } else if (dev_state == QLA82XX_DEV_NEED_QUIESCENT &&
2473 !test_bit(DPC_HA_NEED_QUIESCENT, &ha->dpc_flags)) {
3930b8c1
VC
2474 ql4_printk(KERN_INFO, ha, "%s: HW State: NEED QUIES!\n",
2475 __func__);
f4f5df23
VC
2476 set_bit(DPC_HA_NEED_QUIESCENT, &ha->dpc_flags);
2477 qla4xxx_wake_dpc(ha);
2478 } else {
2479 /* Check firmware health */
9ee91a38 2480 if (qla4_8xxx_check_fw_alive(ha)) {
e6bd0ebd
GM
2481 ql4_printk(KERN_INFO, ha, "disabling pause"
2482 " transmit on port 0 & 1.\n");
2483 qla4_8xxx_wr_32(ha, QLA82XX_CRB_NIU + 0x98,
2484 CRB_NIU_XG_PAUSE_CTL_P0 |
2485 CRB_NIU_XG_PAUSE_CTL_P1);
9ee91a38
SS
2486 halt_status = qla4_8xxx_rd_32(ha,
2487 QLA82XX_PEG_HALT_STATUS1);
2488
46801ba6 2489 if (QLA82XX_FWERROR_CODE(halt_status) == 0x67)
527c8b2e
NJ
2490 ql4_printk(KERN_ERR, ha, "%s:"
2491 " Firmware aborted with"
2492 " error code 0x00006700."
2493 " Device is being reset\n",
2494 __func__);
2495
9ee91a38
SS
2496 /* Since we cannot change dev_state in interrupt
2497 * context, set appropriate DPC flag then wakeup
2498 * DPC */
2499 if (halt_status & HALT_STATUS_UNRECOVERABLE)
2500 set_bit(DPC_HA_UNRECOVERABLE,
2501 &ha->dpc_flags);
2502 else {
2503 ql4_printk(KERN_INFO, ha, "%s: detect "
2504 "abort needed!\n", __func__);
2505 set_bit(DPC_RESET_HA, &ha->dpc_flags);
2506 }
2507 qla4xxx_mailbox_premature_completion(ha);
2508 qla4xxx_wake_dpc(ha);
2509 }
f4f5df23
VC
2510 }
2511 }
2512}
2513
4a4bc2e9 2514static void qla4xxx_check_relogin_flash_ddb(struct iscsi_cls_session *cls_sess)
13483730
MC
2515{
2516 struct iscsi_session *sess;
2517 struct ddb_entry *ddb_entry;
2518 struct scsi_qla_host *ha;
2519
2520 sess = cls_sess->dd_data;
2521 ddb_entry = sess->dd_data;
2522 ha = ddb_entry->ha;
2523
2524 if (!(ddb_entry->ddb_type == FLASH_DDB))
2525 return;
2526
2527 if (adapter_up(ha) && !test_bit(DF_RELOGIN, &ddb_entry->flags) &&
2528 !iscsi_is_session_online(cls_sess)) {
2529 if (atomic_read(&ddb_entry->retry_relogin_timer) !=
2530 INVALID_ENTRY) {
2531 if (atomic_read(&ddb_entry->retry_relogin_timer) ==
2532 0) {
2533 atomic_set(&ddb_entry->retry_relogin_timer,
2534 INVALID_ENTRY);
2535 set_bit(DPC_RELOGIN_DEVICE, &ha->dpc_flags);
2536 set_bit(DF_RELOGIN, &ddb_entry->flags);
2537 DEBUG2(ql4_printk(KERN_INFO, ha,
2538 "%s: index [%d] login device\n",
2539 __func__, ddb_entry->fw_ddb_index));
2540 } else
2541 atomic_dec(&ddb_entry->retry_relogin_timer);
2542 }
2543 }
2544
2545 /* Wait for relogin to timeout */
2546 if (atomic_read(&ddb_entry->relogin_timer) &&
2547 (atomic_dec_and_test(&ddb_entry->relogin_timer) != 0)) {
2548 /*
2549 * If the relogin times out and the device is
2550 * still NOT ONLINE then try and relogin again.
2551 */
2552 if (!iscsi_is_session_online(cls_sess)) {
2553 /* Reset retry relogin timer */
2554 atomic_inc(&ddb_entry->relogin_retry_count);
2555 DEBUG2(ql4_printk(KERN_INFO, ha,
2556 "%s: index[%d] relogin timed out-retrying"
2557 " relogin (%d), retry (%d)\n", __func__,
2558 ddb_entry->fw_ddb_index,
2559 atomic_read(&ddb_entry->relogin_retry_count),
2560 ddb_entry->default_time2wait + 4));
2561 set_bit(DPC_RELOGIN_DEVICE, &ha->dpc_flags);
2562 atomic_set(&ddb_entry->retry_relogin_timer,
2563 ddb_entry->default_time2wait + 4);
2564 }
2565 }
2566}
2567
afaf5a2d
DS
2568/**
2569 * qla4xxx_timer - checks every second for work to do.
2570 * @ha: Pointer to host adapter structure.
2571 **/
2572static void qla4xxx_timer(struct scsi_qla_host *ha)
2573{
afaf5a2d 2574 int start_dpc = 0;
2232be0d
LC
2575 uint16_t w;
2576
13483730
MC
2577 iscsi_host_for_each_session(ha->host, qla4xxx_check_relogin_flash_ddb);
2578
2232be0d
LC
2579 /* If we are in the middle of AER/EEH processing
2580 * skip any processing and reschedule the timer
2581 */
2582 if (test_bit(AF_EEH_BUSY, &ha->flags)) {
2583 mod_timer(&ha->timer, jiffies + HZ);
2584 return;
2585 }
2586
2587 /* Hardware read to trigger an EEH error during mailbox waits. */
2588 if (!pci_channel_offline(ha->pdev))
2589 pci_read_config_word(ha->pdev, PCI_VENDOR_ID, &w);
afaf5a2d 2590
f4f5df23
VC
2591 if (is_qla8022(ha)) {
2592 qla4_8xxx_watchdog(ha);
2593 }
2594
f4f5df23
VC
2595 if (!is_qla8022(ha)) {
2596 /* Check for heartbeat interval. */
2597 if (ha->firmware_options & FWOPT_HEARTBEAT_ENABLE &&
2598 ha->heartbeat_interval != 0) {
2599 ha->seconds_since_last_heartbeat++;
2600 if (ha->seconds_since_last_heartbeat >
2601 ha->heartbeat_interval + 2)
2602 set_bit(DPC_RESET_HA, &ha->dpc_flags);
2603 }
afaf5a2d
DS
2604 }
2605
ff884430
VC
2606 /* Process any deferred work. */
2607 if (!list_empty(&ha->work_list))
2608 start_dpc++;
2609
afaf5a2d 2610 /* Wakeup the dpc routine for this adapter, if needed. */
1b46807e 2611 if (start_dpc ||
afaf5a2d
DS
2612 test_bit(DPC_RESET_HA, &ha->dpc_flags) ||
2613 test_bit(DPC_RETRY_RESET_HA, &ha->dpc_flags) ||
2614 test_bit(DPC_RELOGIN_DEVICE, &ha->dpc_flags) ||
f4f5df23 2615 test_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags) ||
afaf5a2d
DS
2616 test_bit(DPC_RESET_HA_INTR, &ha->dpc_flags) ||
2617 test_bit(DPC_GET_DHCP_IP_ADDR, &ha->dpc_flags) ||
065aa1b4 2618 test_bit(DPC_LINK_CHANGED, &ha->dpc_flags) ||
f4f5df23
VC
2619 test_bit(DPC_HA_UNRECOVERABLE, &ha->dpc_flags) ||
2620 test_bit(DPC_HA_NEED_QUIESCENT, &ha->dpc_flags) ||
1b46807e 2621 test_bit(DPC_AEN, &ha->dpc_flags)) {
afaf5a2d
DS
2622 DEBUG2(printk("scsi%ld: %s: scheduling dpc routine"
2623 " - dpc flags = 0x%lx\n",
2624 ha->host_no, __func__, ha->dpc_flags));
f4f5df23 2625 qla4xxx_wake_dpc(ha);
afaf5a2d
DS
2626 }
2627
2628 /* Reschedule timer thread to call us back in one second */
2629 mod_timer(&ha->timer, jiffies + HZ);
2630
2631 DEBUG2(ha->seconds_since_last_intr++);
2632}
2633
2634/**
2635 * qla4xxx_cmd_wait - waits for all outstanding commands to complete
2636 * @ha: Pointer to host adapter structure.
2637 *
2638 * This routine stalls the driver until all outstanding commands are returned.
2639 * Caller must release the Hardware Lock prior to calling this routine.
2640 **/
2641static int qla4xxx_cmd_wait(struct scsi_qla_host *ha)
2642{
2643 uint32_t index = 0;
afaf5a2d
DS
2644 unsigned long flags;
2645 struct scsi_cmnd *cmd;
afaf5a2d 2646
f4f5df23
VC
2647 unsigned long wtime = jiffies + (WAIT_CMD_TOV * HZ);
2648
2649 DEBUG2(ql4_printk(KERN_INFO, ha, "Wait up to %d seconds for cmds to "
2650 "complete\n", WAIT_CMD_TOV));
2651
2652 while (!time_after_eq(jiffies, wtime)) {
afaf5a2d
DS
2653 spin_lock_irqsave(&ha->hardware_lock, flags);
2654 /* Find a command that hasn't completed. */
2655 for (index = 0; index < ha->host->can_queue; index++) {
2656 cmd = scsi_host_find_tag(ha->host, index);
a1e0063d
MC
2657 /*
2658 * We cannot just check if the index is valid,
2659 * becase if we are run from the scsi eh, then
2660 * the scsi/block layer is going to prevent
2661 * the tag from being released.
2662 */
2663 if (cmd != NULL && CMD_SP(cmd))
afaf5a2d
DS
2664 break;
2665 }
2666 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2667
2668 /* If No Commands are pending, wait is complete */
f4f5df23
VC
2669 if (index == ha->host->can_queue)
2670 return QLA_SUCCESS;
afaf5a2d 2671
f4f5df23
VC
2672 msleep(1000);
2673 }
2674 /* If we timed out on waiting for commands to come back
2675 * return ERROR. */
2676 return QLA_ERROR;
afaf5a2d
DS
2677}
2678
f4f5df23 2679int qla4xxx_hw_reset(struct scsi_qla_host *ha)
afaf5a2d 2680{
afaf5a2d 2681 uint32_t ctrl_status;
477ffb9d
DS
2682 unsigned long flags = 0;
2683
2684 DEBUG2(printk(KERN_ERR "scsi%ld: %s\n", ha->host_no, __func__));
afaf5a2d 2685
f4f5df23
VC
2686 if (ql4xxx_lock_drvr_wait(ha) != QLA_SUCCESS)
2687 return QLA_ERROR;
2688
afaf5a2d
DS
2689 spin_lock_irqsave(&ha->hardware_lock, flags);
2690
2691 /*
2692 * If the SCSI Reset Interrupt bit is set, clear it.
2693 * Otherwise, the Soft Reset won't work.
2694 */
2695 ctrl_status = readw(&ha->reg->ctrl_status);
2696 if ((ctrl_status & CSR_SCSI_RESET_INTR) != 0)
2697 writel(set_rmask(CSR_SCSI_RESET_INTR), &ha->reg->ctrl_status);
2698
2699 /* Issue Soft Reset */
2700 writel(set_rmask(CSR_SOFT_RESET), &ha->reg->ctrl_status);
2701 readl(&ha->reg->ctrl_status);
2702
2703 spin_unlock_irqrestore(&ha->hardware_lock, flags);
f4f5df23 2704 return QLA_SUCCESS;
477ffb9d
DS
2705}
2706
2707/**
2708 * qla4xxx_soft_reset - performs soft reset.
2709 * @ha: Pointer to host adapter structure.
2710 **/
2711int qla4xxx_soft_reset(struct scsi_qla_host *ha)
2712{
2713 uint32_t max_wait_time;
2714 unsigned long flags = 0;
f931c534 2715 int status;
477ffb9d
DS
2716 uint32_t ctrl_status;
2717
f931c534
VC
2718 status = qla4xxx_hw_reset(ha);
2719 if (status != QLA_SUCCESS)
2720 return status;
afaf5a2d 2721
f931c534 2722 status = QLA_ERROR;
afaf5a2d
DS
2723 /* Wait until the Network Reset Intr bit is cleared */
2724 max_wait_time = RESET_INTR_TOV;
2725 do {
2726 spin_lock_irqsave(&ha->hardware_lock, flags);
2727 ctrl_status = readw(&ha->reg->ctrl_status);
2728 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2729
2730 if ((ctrl_status & CSR_NET_RESET_INTR) == 0)
2731 break;
2732
2733 msleep(1000);
2734 } while ((--max_wait_time));
2735
2736 if ((ctrl_status & CSR_NET_RESET_INTR) != 0) {
2737 DEBUG2(printk(KERN_WARNING
2738 "scsi%ld: Network Reset Intr not cleared by "
2739 "Network function, clearing it now!\n",
2740 ha->host_no));
2741 spin_lock_irqsave(&ha->hardware_lock, flags);
2742 writel(set_rmask(CSR_NET_RESET_INTR), &ha->reg->ctrl_status);
2743 readl(&ha->reg->ctrl_status);
2744 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2745 }
2746
2747 /* Wait until the firmware tells us the Soft Reset is done */
2748 max_wait_time = SOFT_RESET_TOV;
2749 do {
2750 spin_lock_irqsave(&ha->hardware_lock, flags);
2751 ctrl_status = readw(&ha->reg->ctrl_status);
2752 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2753
2754 if ((ctrl_status & CSR_SOFT_RESET) == 0) {
2755 status = QLA_SUCCESS;
2756 break;
2757 }
2758
2759 msleep(1000);
2760 } while ((--max_wait_time));
2761
2762 /*
2763 * Also, make sure that the SCSI Reset Interrupt bit has been cleared
2764 * after the soft reset has taken place.
2765 */
2766 spin_lock_irqsave(&ha->hardware_lock, flags);
2767 ctrl_status = readw(&ha->reg->ctrl_status);
2768 if ((ctrl_status & CSR_SCSI_RESET_INTR) != 0) {
2769 writel(set_rmask(CSR_SCSI_RESET_INTR), &ha->reg->ctrl_status);
2770 readl(&ha->reg->ctrl_status);
2771 }
2772 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2773
2774 /* If soft reset fails then most probably the bios on other
2775 * function is also enabled.
2776 * Since the initialization is sequential the other fn
2777 * wont be able to acknowledge the soft reset.
2778 * Issue a force soft reset to workaround this scenario.
2779 */
2780 if (max_wait_time == 0) {
2781 /* Issue Force Soft Reset */
2782 spin_lock_irqsave(&ha->hardware_lock, flags);
2783 writel(set_rmask(CSR_FORCE_SOFT_RESET), &ha->reg->ctrl_status);
2784 readl(&ha->reg->ctrl_status);
2785 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2786 /* Wait until the firmware tells us the Soft Reset is done */
2787 max_wait_time = SOFT_RESET_TOV;
2788 do {
2789 spin_lock_irqsave(&ha->hardware_lock, flags);
2790 ctrl_status = readw(&ha->reg->ctrl_status);
2791 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2792
2793 if ((ctrl_status & CSR_FORCE_SOFT_RESET) == 0) {
2794 status = QLA_SUCCESS;
2795 break;
2796 }
2797
2798 msleep(1000);
2799 } while ((--max_wait_time));
2800 }
2801
2802 return status;
2803}
2804
afaf5a2d 2805/**
f4f5df23 2806 * qla4xxx_abort_active_cmds - returns all outstanding i/o requests to O.S.
afaf5a2d 2807 * @ha: Pointer to host adapter structure.
f4f5df23 2808 * @res: returned scsi status
afaf5a2d
DS
2809 *
2810 * This routine is called just prior to a HARD RESET to return all
2811 * outstanding commands back to the Operating System.
2812 * Caller should make sure that the following locks are released
2813 * before this calling routine: Hardware lock, and io_request_lock.
2814 **/
f4f5df23 2815static void qla4xxx_abort_active_cmds(struct scsi_qla_host *ha, int res)
afaf5a2d
DS
2816{
2817 struct srb *srb;
2818 int i;
2819 unsigned long flags;
2820
2821 spin_lock_irqsave(&ha->hardware_lock, flags);
2822 for (i = 0; i < ha->host->can_queue; i++) {
2823 srb = qla4xxx_del_from_active_array(ha, i);
2824 if (srb != NULL) {
f4f5df23 2825 srb->cmd->result = res;
09a0f719 2826 kref_put(&srb->srb_ref, qla4xxx_srb_compl);
afaf5a2d
DS
2827 }
2828 }
2829 spin_unlock_irqrestore(&ha->hardware_lock, flags);
afaf5a2d
DS
2830}
2831
f4f5df23
VC
2832void qla4xxx_dead_adapter_cleanup(struct scsi_qla_host *ha)
2833{
2834 clear_bit(AF_ONLINE, &ha->flags);
2835
2836 /* Disable the board */
2837 ql4_printk(KERN_INFO, ha, "Disabling the board\n");
f4f5df23
VC
2838
2839 qla4xxx_abort_active_cmds(ha, DID_NO_CONNECT << 16);
2840 qla4xxx_mark_all_devices_missing(ha);
2841 clear_bit(AF_INIT_DONE, &ha->flags);
2842}
2843
b3a271a9
MR
2844static void qla4xxx_fail_session(struct iscsi_cls_session *cls_session)
2845{
2846 struct iscsi_session *sess;
2847 struct ddb_entry *ddb_entry;
2848
2849 sess = cls_session->dd_data;
2850 ddb_entry = sess->dd_data;
2851 ddb_entry->fw_ddb_device_state = DDB_DS_SESSION_FAILED;
13483730
MC
2852
2853 if (ddb_entry->ddb_type == FLASH_DDB)
2854 iscsi_block_session(ddb_entry->sess);
2855 else
2856 iscsi_session_failure(cls_session->dd_data,
2857 ISCSI_ERR_CONN_FAILED);
b3a271a9
MR
2858}
2859
afaf5a2d
DS
2860/**
2861 * qla4xxx_recover_adapter - recovers adapter after a fatal error
2862 * @ha: Pointer to host adapter structure.
afaf5a2d 2863 **/
f4f5df23 2864static int qla4xxx_recover_adapter(struct scsi_qla_host *ha)
afaf5a2d 2865{
f4f5df23
VC
2866 int status = QLA_ERROR;
2867 uint8_t reset_chip = 0;
8e0f3a66 2868 uint32_t dev_state;
9ee91a38 2869 unsigned long wait;
afaf5a2d
DS
2870
2871 /* Stall incoming I/O until we are done */
f4f5df23 2872 scsi_block_requests(ha->host);
afaf5a2d 2873 clear_bit(AF_ONLINE, &ha->flags);
b3a271a9 2874 clear_bit(AF_LINK_UP, &ha->flags);
50a29aec 2875
f4f5df23 2876 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: adapter OFFLINE\n", __func__));
afaf5a2d 2877
f4f5df23 2878 set_bit(DPC_RESET_ACTIVE, &ha->dpc_flags);
afaf5a2d 2879
b3a271a9
MR
2880 iscsi_host_for_each_session(ha->host, qla4xxx_fail_session);
2881
f4f5df23
VC
2882 if (test_bit(DPC_RESET_HA, &ha->dpc_flags))
2883 reset_chip = 1;
afaf5a2d 2884
f4f5df23
VC
2885 /* For the DPC_RESET_HA_INTR case (ISP-4xxx specific)
2886 * do not reset adapter, jump to initialize_adapter */
2887 if (test_bit(DPC_RESET_HA_INTR, &ha->dpc_flags)) {
2888 status = QLA_SUCCESS;
2889 goto recover_ha_init_adapter;
2890 }
afaf5a2d 2891
f4f5df23
VC
2892 /* For the ISP-82xx adapter, issue a stop_firmware if invoked
2893 * from eh_host_reset or ioctl module */
2894 if (is_qla8022(ha) && !reset_chip &&
2895 test_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags)) {
2896
2897 DEBUG2(ql4_printk(KERN_INFO, ha,
2898 "scsi%ld: %s - Performing stop_firmware...\n",
2899 ha->host_no, __func__));
2900 status = ha->isp_ops->reset_firmware(ha);
2901 if (status == QLA_SUCCESS) {
2bd1e2be
NJ
2902 if (!test_bit(AF_FW_RECOVERY, &ha->flags))
2903 qla4xxx_cmd_wait(ha);
f4f5df23
VC
2904 ha->isp_ops->disable_intrs(ha);
2905 qla4xxx_process_aen(ha, FLUSH_DDB_CHANGED_AENS);
2906 qla4xxx_abort_active_cmds(ha, DID_RESET << 16);
2907 } else {
2908 /* If the stop_firmware fails then
2909 * reset the entire chip */
2910 reset_chip = 1;
2911 clear_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags);
2912 set_bit(DPC_RESET_HA, &ha->dpc_flags);
2913 }
2914 }
dca05c4c 2915
f4f5df23
VC
2916 /* Issue full chip reset if recovering from a catastrophic error,
2917 * or if stop_firmware fails for ISP-82xx.
2918 * This is the default case for ISP-4xxx */
2919 if (!is_qla8022(ha) || reset_chip) {
9ee91a38
SS
2920 if (!is_qla8022(ha))
2921 goto chip_reset;
2922
2923 /* Check if 82XX firmware is alive or not
2924 * We may have arrived here from NEED_RESET
2925 * detection only */
2926 if (test_bit(AF_FW_RECOVERY, &ha->flags))
2927 goto chip_reset;
2928
2929 wait = jiffies + (FW_ALIVE_WAIT_TOV * HZ);
2930 while (time_before(jiffies, wait)) {
2931 if (qla4_8xxx_check_fw_alive(ha)) {
2932 qla4xxx_mailbox_premature_completion(ha);
2933 break;
2934 }
2935
2936 set_current_state(TASK_UNINTERRUPTIBLE);
2937 schedule_timeout(HZ);
2938 }
2939
2bd1e2be
NJ
2940 if (!test_bit(AF_FW_RECOVERY, &ha->flags))
2941 qla4xxx_cmd_wait(ha);
9ee91a38 2942chip_reset:
f4f5df23
VC
2943 qla4xxx_process_aen(ha, FLUSH_DDB_CHANGED_AENS);
2944 qla4xxx_abort_active_cmds(ha, DID_RESET << 16);
2945 DEBUG2(ql4_printk(KERN_INFO, ha,
2946 "scsi%ld: %s - Performing chip reset..\n",
2947 ha->host_no, __func__));
2948 status = ha->isp_ops->reset_chip(ha);
2949 }
afaf5a2d
DS
2950
2951 /* Flush any pending ddb changed AENs */
2952 qla4xxx_process_aen(ha, FLUSH_DDB_CHANGED_AENS);
2953
f4f5df23
VC
2954recover_ha_init_adapter:
2955 /* Upon successful firmware/chip reset, re-initialize the adapter */
afaf5a2d 2956 if (status == QLA_SUCCESS) {
f4f5df23
VC
2957 /* For ISP-4xxx, force function 1 to always initialize
2958 * before function 3 to prevent both funcions from
2959 * stepping on top of the other */
2960 if (!is_qla8022(ha) && (ha->mac_index == 3))
2961 ssleep(6);
2962
2963 /* NOTE: AF_ONLINE flag set upon successful completion of
2964 * qla4xxx_initialize_adapter */
13483730 2965 status = qla4xxx_initialize_adapter(ha, RESET_ADAPTER);
afaf5a2d
DS
2966 }
2967
f4f5df23
VC
2968 /* Retry failed adapter initialization, if necessary
2969 * Do not retry initialize_adapter for RESET_HA_INTR (ISP-4xxx specific)
2970 * case to prevent ping-pong resets between functions */
2971 if (!test_bit(AF_ONLINE, &ha->flags) &&
2972 !test_bit(DPC_RESET_HA_INTR, &ha->dpc_flags)) {
afaf5a2d 2973 /* Adapter initialization failed, see if we can retry
f4f5df23
VC
2974 * resetting the ha.
2975 * Since we don't want to block the DPC for too long
2976 * with multiple resets in the same thread,
2977 * utilize DPC to retry */
8e0f3a66
SR
2978 if (is_qla8022(ha)) {
2979 qla4_8xxx_idc_lock(ha);
2980 dev_state = qla4_8xxx_rd_32(ha, QLA82XX_CRB_DEV_STATE);
2981 qla4_8xxx_idc_unlock(ha);
2982 if (dev_state == QLA82XX_DEV_FAILED) {
2983 ql4_printk(KERN_INFO, ha, "%s: don't retry "
2984 "recover adapter. H/W is in Failed "
2985 "state\n", __func__);
2986 qla4xxx_dead_adapter_cleanup(ha);
2987 clear_bit(DPC_RETRY_RESET_HA, &ha->dpc_flags);
2988 clear_bit(DPC_RESET_HA, &ha->dpc_flags);
2989 clear_bit(DPC_RESET_HA_FW_CONTEXT,
2990 &ha->dpc_flags);
2991 status = QLA_ERROR;
2992
2993 goto exit_recover;
2994 }
2995 }
2996
afaf5a2d
DS
2997 if (!test_bit(DPC_RETRY_RESET_HA, &ha->dpc_flags)) {
2998 ha->retry_reset_ha_cnt = MAX_RESET_HA_RETRIES;
2999 DEBUG2(printk("scsi%ld: recover adapter - retrying "
3000 "(%d) more times\n", ha->host_no,
3001 ha->retry_reset_ha_cnt));
3002 set_bit(DPC_RETRY_RESET_HA, &ha->dpc_flags);
3003 status = QLA_ERROR;
3004 } else {
3005 if (ha->retry_reset_ha_cnt > 0) {
3006 /* Schedule another Reset HA--DPC will retry */
3007 ha->retry_reset_ha_cnt--;
3008 DEBUG2(printk("scsi%ld: recover adapter - "
3009 "retry remaining %d\n",
3010 ha->host_no,
3011 ha->retry_reset_ha_cnt));
3012 status = QLA_ERROR;
3013 }
3014
3015 if (ha->retry_reset_ha_cnt == 0) {
3016 /* Recover adapter retries have been exhausted.
3017 * Adapter DEAD */
3018 DEBUG2(printk("scsi%ld: recover adapter "
3019 "failed - board disabled\n",
3020 ha->host_no));
f4f5df23 3021 qla4xxx_dead_adapter_cleanup(ha);
afaf5a2d
DS
3022 clear_bit(DPC_RETRY_RESET_HA, &ha->dpc_flags);
3023 clear_bit(DPC_RESET_HA, &ha->dpc_flags);
f4f5df23 3024 clear_bit(DPC_RESET_HA_FW_CONTEXT,
afaf5a2d
DS
3025 &ha->dpc_flags);
3026 status = QLA_ERROR;
3027 }
3028 }
3029 } else {
3030 clear_bit(DPC_RESET_HA, &ha->dpc_flags);
f4f5df23 3031 clear_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags);
afaf5a2d
DS
3032 clear_bit(DPC_RETRY_RESET_HA, &ha->dpc_flags);
3033 }
3034
8e0f3a66 3035exit_recover:
afaf5a2d
DS
3036 ha->adapter_error_count++;
3037
f4f5df23
VC
3038 if (test_bit(AF_ONLINE, &ha->flags))
3039 ha->isp_ops->enable_intrs(ha);
3040
3041 scsi_unblock_requests(ha->host);
3042
3043 clear_bit(DPC_RESET_ACTIVE, &ha->dpc_flags);
3044 DEBUG2(printk("scsi%ld: recover adapter: %s\n", ha->host_no,
25985edc 3045 status == QLA_ERROR ? "FAILED" : "SUCCEEDED"));
afaf5a2d 3046
afaf5a2d
DS
3047 return status;
3048}
3049
b3a271a9 3050static void qla4xxx_relogin_devices(struct iscsi_cls_session *cls_session)
2d7924e6 3051{
b3a271a9
MR
3052 struct iscsi_session *sess;
3053 struct ddb_entry *ddb_entry;
3054 struct scsi_qla_host *ha;
2d7924e6 3055
b3a271a9
MR
3056 sess = cls_session->dd_data;
3057 ddb_entry = sess->dd_data;
3058 ha = ddb_entry->ha;
3059 if (!iscsi_is_session_online(cls_session)) {
3060 if (ddb_entry->fw_ddb_device_state == DDB_DS_SESSION_ACTIVE) {
3061 ql4_printk(KERN_INFO, ha, "scsi%ld: %s: ddb[%d]"
3062 " unblock session\n", ha->host_no, __func__,
3063 ddb_entry->fw_ddb_index);
3064 iscsi_unblock_session(ddb_entry->sess);
3065 } else {
3066 /* Trigger relogin */
13483730
MC
3067 if (ddb_entry->ddb_type == FLASH_DDB) {
3068 if (!test_bit(DF_RELOGIN, &ddb_entry->flags))
3069 qla4xxx_arm_relogin_timer(ddb_entry);
3070 } else
3071 iscsi_session_failure(cls_session->dd_data,
3072 ISCSI_ERR_CONN_FAILED);
2d7924e6
VC
3073 }
3074 }
3075}
3076
13483730
MC
3077int qla4xxx_unblock_flash_ddb(struct iscsi_cls_session *cls_session)
3078{
3079 struct iscsi_session *sess;
3080 struct ddb_entry *ddb_entry;
3081 struct scsi_qla_host *ha;
3082
3083 sess = cls_session->dd_data;
3084 ddb_entry = sess->dd_data;
3085 ha = ddb_entry->ha;
3086 ql4_printk(KERN_INFO, ha, "scsi%ld: %s: ddb[%d]"
3087 " unblock session\n", ha->host_no, __func__,
3088 ddb_entry->fw_ddb_index);
3089
3090 iscsi_unblock_session(ddb_entry->sess);
3091
3092 /* Start scan target */
3093 if (test_bit(AF_ONLINE, &ha->flags)) {
3094 ql4_printk(KERN_INFO, ha, "scsi%ld: %s: ddb[%d]"
3095 " start scan\n", ha->host_no, __func__,
3096 ddb_entry->fw_ddb_index);
3097 scsi_queue_work(ha->host, &ddb_entry->sess->scan_work);
3098 }
3099 return QLA_SUCCESS;
3100}
3101
3102int qla4xxx_unblock_ddb(struct iscsi_cls_session *cls_session)
3103{
3104 struct iscsi_session *sess;
3105 struct ddb_entry *ddb_entry;
3106 struct scsi_qla_host *ha;
3107
3108 sess = cls_session->dd_data;
3109 ddb_entry = sess->dd_data;
3110 ha = ddb_entry->ha;
3111 ql4_printk(KERN_INFO, ha, "scsi%ld: %s: ddb[%d]"
3112 " unblock user space session\n", ha->host_no, __func__,
3113 ddb_entry->fw_ddb_index);
3114 iscsi_conn_start(ddb_entry->conn);
3115 iscsi_conn_login_event(ddb_entry->conn,
3116 ISCSI_CONN_STATE_LOGGED_IN);
3117
3118 return QLA_SUCCESS;
3119}
3120
b3a271a9
MR
3121static void qla4xxx_relogin_all_devices(struct scsi_qla_host *ha)
3122{
3123 iscsi_host_for_each_session(ha->host, qla4xxx_relogin_devices);
3124}
3125
13483730
MC
3126static void qla4xxx_relogin_flash_ddb(struct iscsi_cls_session *cls_sess)
3127{
3128 uint16_t relogin_timer;
3129 struct iscsi_session *sess;
3130 struct ddb_entry *ddb_entry;
3131 struct scsi_qla_host *ha;
3132
3133 sess = cls_sess->dd_data;
3134 ddb_entry = sess->dd_data;
3135 ha = ddb_entry->ha;
3136
3137 relogin_timer = max(ddb_entry->default_relogin_timeout,
3138 (uint16_t)RELOGIN_TOV);
3139 atomic_set(&ddb_entry->relogin_timer, relogin_timer);
3140
3141 DEBUG2(ql4_printk(KERN_INFO, ha,
3142 "scsi%ld: Relogin index [%d]. TOV=%d\n", ha->host_no,
3143 ddb_entry->fw_ddb_index, relogin_timer));
3144
3145 qla4xxx_login_flash_ddb(cls_sess);
3146}
3147
3148static void qla4xxx_dpc_relogin(struct iscsi_cls_session *cls_sess)
3149{
3150 struct iscsi_session *sess;
3151 struct ddb_entry *ddb_entry;
3152 struct scsi_qla_host *ha;
3153
3154 sess = cls_sess->dd_data;
3155 ddb_entry = sess->dd_data;
3156 ha = ddb_entry->ha;
3157
3158 if (!(ddb_entry->ddb_type == FLASH_DDB))
3159 return;
3160
3161 if (test_and_clear_bit(DF_RELOGIN, &ddb_entry->flags) &&
3162 !iscsi_is_session_online(cls_sess)) {
3163 DEBUG2(ql4_printk(KERN_INFO, ha,
3164 "relogin issued\n"));
3165 qla4xxx_relogin_flash_ddb(cls_sess);
3166 }
3167}
3168
f4f5df23
VC
3169void qla4xxx_wake_dpc(struct scsi_qla_host *ha)
3170{
1b46807e 3171 if (ha->dpc_thread)
f4f5df23 3172 queue_work(ha->dpc_thread, &ha->dpc_work);
f4f5df23
VC
3173}
3174
ff884430
VC
3175static struct qla4_work_evt *
3176qla4xxx_alloc_work(struct scsi_qla_host *ha, uint32_t data_size,
3177 enum qla4_work_type type)
3178{
3179 struct qla4_work_evt *e;
3180 uint32_t size = sizeof(struct qla4_work_evt) + data_size;
3181
3182 e = kzalloc(size, GFP_ATOMIC);
3183 if (!e)
3184 return NULL;
3185
3186 INIT_LIST_HEAD(&e->list);
3187 e->type = type;
3188 return e;
3189}
3190
3191static void qla4xxx_post_work(struct scsi_qla_host *ha,
3192 struct qla4_work_evt *e)
3193{
3194 unsigned long flags;
3195
3196 spin_lock_irqsave(&ha->work_lock, flags);
3197 list_add_tail(&e->list, &ha->work_list);
3198 spin_unlock_irqrestore(&ha->work_lock, flags);
3199 qla4xxx_wake_dpc(ha);
3200}
3201
3202int qla4xxx_post_aen_work(struct scsi_qla_host *ha,
3203 enum iscsi_host_event_code aen_code,
3204 uint32_t data_size, uint8_t *data)
3205{
3206 struct qla4_work_evt *e;
3207
3208 e = qla4xxx_alloc_work(ha, data_size, QLA4_EVENT_AEN);
3209 if (!e)
3210 return QLA_ERROR;
3211
3212 e->u.aen.code = aen_code;
3213 e->u.aen.data_size = data_size;
3214 memcpy(e->u.aen.data, data, data_size);
3215
3216 qla4xxx_post_work(ha, e);
3217
3218 return QLA_SUCCESS;
3219}
3220
c0b9d3f7
VC
3221int qla4xxx_post_ping_evt_work(struct scsi_qla_host *ha,
3222 uint32_t status, uint32_t pid,
3223 uint32_t data_size, uint8_t *data)
3224{
3225 struct qla4_work_evt *e;
3226
3227 e = qla4xxx_alloc_work(ha, data_size, QLA4_EVENT_PING_STATUS);
3228 if (!e)
3229 return QLA_ERROR;
3230
3231 e->u.ping.status = status;
3232 e->u.ping.pid = pid;
3233 e->u.ping.data_size = data_size;
3234 memcpy(e->u.ping.data, data, data_size);
3235
3236 qla4xxx_post_work(ha, e);
3237
3238 return QLA_SUCCESS;
3239}
3240
a7380a65 3241static void qla4xxx_do_work(struct scsi_qla_host *ha)
ff884430
VC
3242{
3243 struct qla4_work_evt *e, *tmp;
3244 unsigned long flags;
3245 LIST_HEAD(work);
3246
3247 spin_lock_irqsave(&ha->work_lock, flags);
3248 list_splice_init(&ha->work_list, &work);
3249 spin_unlock_irqrestore(&ha->work_lock, flags);
3250
3251 list_for_each_entry_safe(e, tmp, &work, list) {
3252 list_del_init(&e->list);
3253
3254 switch (e->type) {
3255 case QLA4_EVENT_AEN:
3256 iscsi_post_host_event(ha->host_no,
3257 &qla4xxx_iscsi_transport,
3258 e->u.aen.code,
3259 e->u.aen.data_size,
3260 e->u.aen.data);
3261 break;
c0b9d3f7
VC
3262 case QLA4_EVENT_PING_STATUS:
3263 iscsi_ping_comp_event(ha->host_no,
3264 &qla4xxx_iscsi_transport,
3265 e->u.ping.status,
3266 e->u.ping.pid,
3267 e->u.ping.data_size,
3268 e->u.ping.data);
3269 break;
ff884430
VC
3270 default:
3271 ql4_printk(KERN_WARNING, ha, "event type: 0x%x not "
3272 "supported", e->type);
3273 }
3274 kfree(e);
3275 }
3276}
3277
afaf5a2d
DS
3278/**
3279 * qla4xxx_do_dpc - dpc routine
3280 * @data: in our case pointer to adapter structure
3281 *
3282 * This routine is a task that is schedule by the interrupt handler
3283 * to perform the background processing for interrupts. We put it
3284 * on a task queue that is consumed whenever the scheduler runs; that's
3285 * so you can do anything (i.e. put the process to sleep etc). In fact,
3286 * the mid-level tries to sleep when it reaches the driver threshold
3287 * "host->can_queue". This can cause a panic if we were in our interrupt code.
3288 **/
c4028958 3289static void qla4xxx_do_dpc(struct work_struct *work)
afaf5a2d 3290{
c4028958
DH
3291 struct scsi_qla_host *ha =
3292 container_of(work, struct scsi_qla_host, dpc_work);
477ffb9d 3293 int status = QLA_ERROR;
afaf5a2d 3294
f26b9044 3295 DEBUG2(printk("scsi%ld: %s: DPC handler waking up."
f4f5df23
VC
3296 "flags = 0x%08lx, dpc_flags = 0x%08lx\n",
3297 ha->host_no, __func__, ha->flags, ha->dpc_flags))
afaf5a2d
DS
3298
3299 /* Initialization not yet finished. Don't do anything yet. */
3300 if (!test_bit(AF_INIT_DONE, &ha->flags))
1b46807e 3301 return;
afaf5a2d 3302
2232be0d
LC
3303 if (test_bit(AF_EEH_BUSY, &ha->flags)) {
3304 DEBUG2(printk(KERN_INFO "scsi%ld: %s: flags = %lx\n",
3305 ha->host_no, __func__, ha->flags));
1b46807e 3306 return;
2232be0d
LC
3307 }
3308
ff884430
VC
3309 /* post events to application */
3310 qla4xxx_do_work(ha);
3311
f4f5df23
VC
3312 if (is_qla8022(ha)) {
3313 if (test_bit(DPC_HA_UNRECOVERABLE, &ha->dpc_flags)) {
3314 qla4_8xxx_idc_lock(ha);
3315 qla4_8xxx_wr_32(ha, QLA82XX_CRB_DEV_STATE,
3316 QLA82XX_DEV_FAILED);
3317 qla4_8xxx_idc_unlock(ha);
3318 ql4_printk(KERN_INFO, ha, "HW State: FAILED\n");
3319 qla4_8xxx_device_state_handler(ha);
3320 }
3321 if (test_and_clear_bit(DPC_HA_NEED_QUIESCENT, &ha->dpc_flags)) {
3322 qla4_8xxx_need_qsnt_handler(ha);
3323 }
3324 }
3325
3326 if (!test_bit(DPC_RESET_ACTIVE, &ha->dpc_flags) &&
3327 (test_bit(DPC_RESET_HA, &ha->dpc_flags) ||
afaf5a2d 3328 test_bit(DPC_RESET_HA_INTR, &ha->dpc_flags) ||
f4f5df23
VC
3329 test_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags))) {
3330 if (ql4xdontresethba) {
3331 DEBUG2(printk("scsi%ld: %s: Don't Reset HBA\n",
3332 ha->host_no, __func__));
3333 clear_bit(DPC_RESET_HA, &ha->dpc_flags);
3334 clear_bit(DPC_RESET_HA_INTR, &ha->dpc_flags);
3335 clear_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags);
3336 goto dpc_post_reset_ha;
3337 }
3338 if (test_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags) ||
3339 test_bit(DPC_RESET_HA, &ha->dpc_flags))
3340 qla4xxx_recover_adapter(ha);
afaf5a2d 3341
477ffb9d 3342 if (test_bit(DPC_RESET_HA_INTR, &ha->dpc_flags)) {
afaf5a2d 3343 uint8_t wait_time = RESET_INTR_TOV;
afaf5a2d 3344
afaf5a2d
DS
3345 while ((readw(&ha->reg->ctrl_status) &
3346 (CSR_SOFT_RESET | CSR_FORCE_SOFT_RESET)) != 0) {
3347 if (--wait_time == 0)
3348 break;
afaf5a2d 3349 msleep(1000);
afaf5a2d 3350 }
afaf5a2d
DS
3351 if (wait_time == 0)
3352 DEBUG2(printk("scsi%ld: %s: SR|FSR "
3353 "bit not cleared-- resetting\n",
3354 ha->host_no, __func__));
f4f5df23 3355 qla4xxx_abort_active_cmds(ha, DID_RESET << 16);
477ffb9d
DS
3356 if (ql4xxx_lock_drvr_wait(ha) == QLA_SUCCESS) {
3357 qla4xxx_process_aen(ha, FLUSH_DDB_CHANGED_AENS);
f4f5df23 3358 status = qla4xxx_recover_adapter(ha);
477ffb9d
DS
3359 }
3360 clear_bit(DPC_RESET_HA_INTR, &ha->dpc_flags);
3361 if (status == QLA_SUCCESS)
f4f5df23 3362 ha->isp_ops->enable_intrs(ha);
afaf5a2d
DS
3363 }
3364 }
3365
f4f5df23 3366dpc_post_reset_ha:
afaf5a2d
DS
3367 /* ---- process AEN? --- */
3368 if (test_and_clear_bit(DPC_AEN, &ha->dpc_flags))
3369 qla4xxx_process_aen(ha, PROCESS_ALL_AENS);
3370
3371 /* ---- Get DHCP IP Address? --- */
3372 if (test_and_clear_bit(DPC_GET_DHCP_IP_ADDR, &ha->dpc_flags))
3373 qla4xxx_get_dhcp_ip_address(ha);
3374
13483730
MC
3375 /* ---- relogin device? --- */
3376 if (adapter_up(ha) &&
3377 test_and_clear_bit(DPC_RELOGIN_DEVICE, &ha->dpc_flags)) {
3378 iscsi_host_for_each_session(ha->host, qla4xxx_dpc_relogin);
3379 }
3380
065aa1b4
VC
3381 /* ---- link change? --- */
3382 if (test_and_clear_bit(DPC_LINK_CHANGED, &ha->dpc_flags)) {
3383 if (!test_bit(AF_LINK_UP, &ha->flags)) {
3384 /* ---- link down? --- */
2d7924e6 3385 qla4xxx_mark_all_devices_missing(ha);
065aa1b4
VC
3386 } else {
3387 /* ---- link up? --- *
3388 * F/W will auto login to all devices ONLY ONCE after
3389 * link up during driver initialization and runtime
3390 * fatal error recovery. Therefore, the driver must
3391 * manually relogin to devices when recovering from
3392 * connection failures, logouts, expired KATO, etc. */
13483730
MC
3393 if (test_and_clear_bit(AF_BUILD_DDB_LIST, &ha->flags)) {
3394 qla4xxx_build_ddb_list(ha, ha->is_reset);
3395 iscsi_host_for_each_session(ha->host,
3396 qla4xxx_login_flash_ddb);
3397 } else
3398 qla4xxx_relogin_all_devices(ha);
065aa1b4
VC
3399 }
3400 }
afaf5a2d
DS
3401}
3402
3403/**
3404 * qla4xxx_free_adapter - release the adapter
3405 * @ha: pointer to adapter structure
3406 **/
3407static void qla4xxx_free_adapter(struct scsi_qla_host *ha)
3408{
8a288960 3409 qla4xxx_abort_active_cmds(ha, DID_NO_CONNECT << 16);
afaf5a2d
DS
3410
3411 if (test_bit(AF_INTERRUPTS_ON, &ha->flags)) {
3412 /* Turn-off interrupts on the card. */
f4f5df23 3413 ha->isp_ops->disable_intrs(ha);
afaf5a2d
DS
3414 }
3415
f4f5df23
VC
3416 /* Remove timer thread, if present */
3417 if (ha->timer_active)
3418 qla4xxx_stop_timer(ha);
3419
afaf5a2d
DS
3420 /* Kill the kernel thread for this host */
3421 if (ha->dpc_thread)
3422 destroy_workqueue(ha->dpc_thread);
3423
b3a271a9
MR
3424 /* Kill the kernel thread for this host */
3425 if (ha->task_wq)
3426 destroy_workqueue(ha->task_wq);
3427
f4f5df23
VC
3428 /* Put firmware in known state */
3429 ha->isp_ops->reset_firmware(ha);
afaf5a2d 3430
f4f5df23
VC
3431 if (is_qla8022(ha)) {
3432 qla4_8xxx_idc_lock(ha);
3433 qla4_8xxx_clear_drv_active(ha);
3434 qla4_8xxx_idc_unlock(ha);
3435 }
afaf5a2d 3436
afaf5a2d
DS
3437 /* Detach interrupts */
3438 if (test_and_clear_bit(AF_IRQ_ATTACHED, &ha->flags))
f4f5df23 3439 qla4xxx_free_irqs(ha);
afaf5a2d 3440
bee4fe8e
DS
3441 /* free extra memory */
3442 qla4xxx_mem_free(ha);
f4f5df23
VC
3443}
3444
3445int qla4_8xxx_iospace_config(struct scsi_qla_host *ha)
3446{
3447 int status = 0;
f4f5df23
VC
3448 unsigned long mem_base, mem_len, db_base, db_len;
3449 struct pci_dev *pdev = ha->pdev;
3450
3451 status = pci_request_regions(pdev, DRIVER_NAME);
3452 if (status) {
3453 printk(KERN_WARNING
3454 "scsi(%ld) Failed to reserve PIO regions (%s) "
3455 "status=%d\n", ha->host_no, pci_name(pdev), status);
3456 goto iospace_error_exit;
3457 }
3458
f4f5df23 3459 DEBUG2(printk(KERN_INFO "%s: revision-id=%d\n",
7d7311c4
SS
3460 __func__, pdev->revision));
3461 ha->revision_id = pdev->revision;
bee4fe8e 3462
f4f5df23
VC
3463 /* remap phys address */
3464 mem_base = pci_resource_start(pdev, 0); /* 0 is for BAR 0 */
3465 mem_len = pci_resource_len(pdev, 0);
3466 DEBUG2(printk(KERN_INFO "%s: ioremap from %lx a size of %lx\n",
3467 __func__, mem_base, mem_len));
afaf5a2d 3468
f4f5df23
VC
3469 /* mapping of pcibase pointer */
3470 ha->nx_pcibase = (unsigned long)ioremap(mem_base, mem_len);
3471 if (!ha->nx_pcibase) {
3472 printk(KERN_ERR
3473 "cannot remap MMIO (%s), aborting\n", pci_name(pdev));
3474 pci_release_regions(ha->pdev);
3475 goto iospace_error_exit;
3476 }
3477
3478 /* Mapping of IO base pointer, door bell read and write pointer */
3479
3480 /* mapping of IO base pointer */
3481 ha->qla4_8xxx_reg =
3482 (struct device_reg_82xx __iomem *)((uint8_t *)ha->nx_pcibase +
3483 0xbc000 + (ha->pdev->devfn << 11));
3484
3485 db_base = pci_resource_start(pdev, 4); /* doorbell is on bar 4 */
3486 db_len = pci_resource_len(pdev, 4);
3487
2657c800
SS
3488 ha->nx_db_wr_ptr = (ha->pdev->devfn == 4 ? QLA82XX_CAM_RAM_DB1 :
3489 QLA82XX_CAM_RAM_DB2);
f4f5df23 3490
2657c800 3491 return 0;
f4f5df23
VC
3492iospace_error_exit:
3493 return -ENOMEM;
afaf5a2d
DS
3494}
3495
3496/***
3497 * qla4xxx_iospace_config - maps registers
3498 * @ha: pointer to adapter structure
3499 *
3500 * This routines maps HBA's registers from the pci address space
3501 * into the kernel virtual address space for memory mapped i/o.
3502 **/
f4f5df23 3503int qla4xxx_iospace_config(struct scsi_qla_host *ha)
afaf5a2d
DS
3504{
3505 unsigned long pio, pio_len, pio_flags;
3506 unsigned long mmio, mmio_len, mmio_flags;
3507
3508 pio = pci_resource_start(ha->pdev, 0);
3509 pio_len = pci_resource_len(ha->pdev, 0);
3510 pio_flags = pci_resource_flags(ha->pdev, 0);
3511 if (pio_flags & IORESOURCE_IO) {
3512 if (pio_len < MIN_IOBASE_LEN) {
c2660df3 3513 ql4_printk(KERN_WARNING, ha,
afaf5a2d
DS
3514 "Invalid PCI I/O region size\n");
3515 pio = 0;
3516 }
3517 } else {
c2660df3 3518 ql4_printk(KERN_WARNING, ha, "region #0 not a PIO resource\n");
afaf5a2d
DS
3519 pio = 0;
3520 }
3521
3522 /* Use MMIO operations for all accesses. */
3523 mmio = pci_resource_start(ha->pdev, 1);
3524 mmio_len = pci_resource_len(ha->pdev, 1);
3525 mmio_flags = pci_resource_flags(ha->pdev, 1);
3526
3527 if (!(mmio_flags & IORESOURCE_MEM)) {
c2660df3
VC
3528 ql4_printk(KERN_ERR, ha,
3529 "region #0 not an MMIO resource, aborting\n");
afaf5a2d
DS
3530
3531 goto iospace_error_exit;
3532 }
c2660df3 3533
afaf5a2d 3534 if (mmio_len < MIN_IOBASE_LEN) {
c2660df3
VC
3535 ql4_printk(KERN_ERR, ha,
3536 "Invalid PCI mem region size, aborting\n");
afaf5a2d
DS
3537 goto iospace_error_exit;
3538 }
3539
3540 if (pci_request_regions(ha->pdev, DRIVER_NAME)) {
c2660df3
VC
3541 ql4_printk(KERN_WARNING, ha,
3542 "Failed to reserve PIO/MMIO regions\n");
afaf5a2d
DS
3543
3544 goto iospace_error_exit;
3545 }
3546
3547 ha->pio_address = pio;
3548 ha->pio_length = pio_len;
3549 ha->reg = ioremap(mmio, MIN_IOBASE_LEN);
3550 if (!ha->reg) {
c2660df3
VC
3551 ql4_printk(KERN_ERR, ha,
3552 "cannot remap MMIO, aborting\n");
afaf5a2d
DS
3553
3554 goto iospace_error_exit;
3555 }
3556
3557 return 0;
3558
3559iospace_error_exit:
3560 return -ENOMEM;
3561}
3562
f4f5df23
VC
3563static struct isp_operations qla4xxx_isp_ops = {
3564 .iospace_config = qla4xxx_iospace_config,
3565 .pci_config = qla4xxx_pci_config,
3566 .disable_intrs = qla4xxx_disable_intrs,
3567 .enable_intrs = qla4xxx_enable_intrs,
3568 .start_firmware = qla4xxx_start_firmware,
3569 .intr_handler = qla4xxx_intr_handler,
3570 .interrupt_service_routine = qla4xxx_interrupt_service_routine,
3571 .reset_chip = qla4xxx_soft_reset,
3572 .reset_firmware = qla4xxx_hw_reset,
3573 .queue_iocb = qla4xxx_queue_iocb,
3574 .complete_iocb = qla4xxx_complete_iocb,
3575 .rd_shdw_req_q_out = qla4xxx_rd_shdw_req_q_out,
3576 .rd_shdw_rsp_q_in = qla4xxx_rd_shdw_rsp_q_in,
3577 .get_sys_info = qla4xxx_get_sys_info,
3578};
3579
3580static struct isp_operations qla4_8xxx_isp_ops = {
3581 .iospace_config = qla4_8xxx_iospace_config,
3582 .pci_config = qla4_8xxx_pci_config,
3583 .disable_intrs = qla4_8xxx_disable_intrs,
3584 .enable_intrs = qla4_8xxx_enable_intrs,
3585 .start_firmware = qla4_8xxx_load_risc,
3586 .intr_handler = qla4_8xxx_intr_handler,
3587 .interrupt_service_routine = qla4_8xxx_interrupt_service_routine,
3588 .reset_chip = qla4_8xxx_isp_reset,
3589 .reset_firmware = qla4_8xxx_stop_firmware,
3590 .queue_iocb = qla4_8xxx_queue_iocb,
3591 .complete_iocb = qla4_8xxx_complete_iocb,
3592 .rd_shdw_req_q_out = qla4_8xxx_rd_shdw_req_q_out,
3593 .rd_shdw_rsp_q_in = qla4_8xxx_rd_shdw_rsp_q_in,
3594 .get_sys_info = qla4_8xxx_get_sys_info,
3595};
3596
3597uint16_t qla4xxx_rd_shdw_req_q_out(struct scsi_qla_host *ha)
3598{
3599 return (uint16_t)le32_to_cpu(ha->shadow_regs->req_q_out);
3600}
3601
3602uint16_t qla4_8xxx_rd_shdw_req_q_out(struct scsi_qla_host *ha)
3603{
3604 return (uint16_t)le32_to_cpu(readl(&ha->qla4_8xxx_reg->req_q_out));
3605}
3606
3607uint16_t qla4xxx_rd_shdw_rsp_q_in(struct scsi_qla_host *ha)
3608{
3609 return (uint16_t)le32_to_cpu(ha->shadow_regs->rsp_q_in);
3610}
3611
3612uint16_t qla4_8xxx_rd_shdw_rsp_q_in(struct scsi_qla_host *ha)
3613{
3614 return (uint16_t)le32_to_cpu(readl(&ha->qla4_8xxx_reg->rsp_q_in));
3615}
3616
2a991c21
MR
3617static ssize_t qla4xxx_show_boot_eth_info(void *data, int type, char *buf)
3618{
3619 struct scsi_qla_host *ha = data;
3620 char *str = buf;
3621 int rc;
3622
3623 switch (type) {
3624 case ISCSI_BOOT_ETH_FLAGS:
3625 rc = sprintf(str, "%d\n", SYSFS_FLAG_FW_SEL_BOOT);
3626 break;
3627 case ISCSI_BOOT_ETH_INDEX:
3628 rc = sprintf(str, "0\n");
3629 break;
3630 case ISCSI_BOOT_ETH_MAC:
3631 rc = sysfs_format_mac(str, ha->my_mac,
3632 MAC_ADDR_LEN);
3633 break;
3634 default:
3635 rc = -ENOSYS;
3636 break;
3637 }
3638 return rc;
3639}
3640
587a1f16 3641static umode_t qla4xxx_eth_get_attr_visibility(void *data, int type)
2a991c21
MR
3642{
3643 int rc;
3644
3645 switch (type) {
3646 case ISCSI_BOOT_ETH_FLAGS:
3647 case ISCSI_BOOT_ETH_MAC:
3648 case ISCSI_BOOT_ETH_INDEX:
3649 rc = S_IRUGO;
3650 break;
3651 default:
3652 rc = 0;
3653 break;
3654 }
3655 return rc;
3656}
3657
3658static ssize_t qla4xxx_show_boot_ini_info(void *data, int type, char *buf)
3659{
3660 struct scsi_qla_host *ha = data;
3661 char *str = buf;
3662 int rc;
3663
3664 switch (type) {
3665 case ISCSI_BOOT_INI_INITIATOR_NAME:
3666 rc = sprintf(str, "%s\n", ha->name_string);
3667 break;
3668 default:
3669 rc = -ENOSYS;
3670 break;
3671 }
3672 return rc;
3673}
3674
587a1f16 3675static umode_t qla4xxx_ini_get_attr_visibility(void *data, int type)
2a991c21
MR
3676{
3677 int rc;
3678
3679 switch (type) {
3680 case ISCSI_BOOT_INI_INITIATOR_NAME:
3681 rc = S_IRUGO;
3682 break;
3683 default:
3684 rc = 0;
3685 break;
3686 }
3687 return rc;
3688}
3689
3690static ssize_t
3691qla4xxx_show_boot_tgt_info(struct ql4_boot_session_info *boot_sess, int type,
3692 char *buf)
3693{
3694 struct ql4_conn_info *boot_conn = &boot_sess->conn_list[0];
3695 char *str = buf;
3696 int rc;
3697
3698 switch (type) {
3699 case ISCSI_BOOT_TGT_NAME:
3700 rc = sprintf(buf, "%s\n", (char *)&boot_sess->target_name);
3701 break;
3702 case ISCSI_BOOT_TGT_IP_ADDR:
3703 if (boot_sess->conn_list[0].dest_ipaddr.ip_type == 0x1)
3704 rc = sprintf(buf, "%pI4\n",
3705 &boot_conn->dest_ipaddr.ip_address);
3706 else
3707 rc = sprintf(str, "%pI6\n",
3708 &boot_conn->dest_ipaddr.ip_address);
3709 break;
3710 case ISCSI_BOOT_TGT_PORT:
3711 rc = sprintf(str, "%d\n", boot_conn->dest_port);
3712 break;
3713 case ISCSI_BOOT_TGT_CHAP_NAME:
3714 rc = sprintf(str, "%.*s\n",
3715 boot_conn->chap.target_chap_name_length,
3716 (char *)&boot_conn->chap.target_chap_name);
3717 break;
3718 case ISCSI_BOOT_TGT_CHAP_SECRET:
3719 rc = sprintf(str, "%.*s\n",
3720 boot_conn->chap.target_secret_length,
3721 (char *)&boot_conn->chap.target_secret);
3722 break;
3723 case ISCSI_BOOT_TGT_REV_CHAP_NAME:
3724 rc = sprintf(str, "%.*s\n",
3725 boot_conn->chap.intr_chap_name_length,
3726 (char *)&boot_conn->chap.intr_chap_name);
3727 break;
3728 case ISCSI_BOOT_TGT_REV_CHAP_SECRET:
3729 rc = sprintf(str, "%.*s\n",
3730 boot_conn->chap.intr_secret_length,
3731 (char *)&boot_conn->chap.intr_secret);
3732 break;
3733 case ISCSI_BOOT_TGT_FLAGS:
3734 rc = sprintf(str, "%d\n", SYSFS_FLAG_FW_SEL_BOOT);
3735 break;
3736 case ISCSI_BOOT_TGT_NIC_ASSOC:
3737 rc = sprintf(str, "0\n");
3738 break;
3739 default:
3740 rc = -ENOSYS;
3741 break;
3742 }
3743 return rc;
3744}
3745
3746static ssize_t qla4xxx_show_boot_tgt_pri_info(void *data, int type, char *buf)
3747{
3748 struct scsi_qla_host *ha = data;
3749 struct ql4_boot_session_info *boot_sess = &(ha->boot_tgt.boot_pri_sess);
3750
3751 return qla4xxx_show_boot_tgt_info(boot_sess, type, buf);
3752}
3753
3754static ssize_t qla4xxx_show_boot_tgt_sec_info(void *data, int type, char *buf)
3755{
3756 struct scsi_qla_host *ha = data;
3757 struct ql4_boot_session_info *boot_sess = &(ha->boot_tgt.boot_sec_sess);
3758
3759 return qla4xxx_show_boot_tgt_info(boot_sess, type, buf);
3760}
3761
587a1f16 3762static umode_t qla4xxx_tgt_get_attr_visibility(void *data, int type)
2a991c21
MR
3763{
3764 int rc;
3765
3766 switch (type) {
3767 case ISCSI_BOOT_TGT_NAME:
3768 case ISCSI_BOOT_TGT_IP_ADDR:
3769 case ISCSI_BOOT_TGT_PORT:
3770 case ISCSI_BOOT_TGT_CHAP_NAME:
3771 case ISCSI_BOOT_TGT_CHAP_SECRET:
3772 case ISCSI_BOOT_TGT_REV_CHAP_NAME:
3773 case ISCSI_BOOT_TGT_REV_CHAP_SECRET:
3774 case ISCSI_BOOT_TGT_NIC_ASSOC:
3775 case ISCSI_BOOT_TGT_FLAGS:
3776 rc = S_IRUGO;
3777 break;
3778 default:
3779 rc = 0;
3780 break;
3781 }
3782 return rc;
3783}
3784
3785static void qla4xxx_boot_release(void *data)
3786{
3787 struct scsi_qla_host *ha = data;
3788
3789 scsi_host_put(ha->host);
3790}
3791
3792static int get_fw_boot_info(struct scsi_qla_host *ha, uint16_t ddb_index[])
3793{
3794 dma_addr_t buf_dma;
3795 uint32_t addr, pri_addr, sec_addr;
3796 uint32_t offset;
3797 uint16_t func_num;
3798 uint8_t val;
3799 uint8_t *buf = NULL;
3800 size_t size = 13 * sizeof(uint8_t);
3801 int ret = QLA_SUCCESS;
3802
3803 func_num = PCI_FUNC(ha->pdev->devfn);
3804
0d5b36b8
MR
3805 ql4_printk(KERN_INFO, ha, "%s: Get FW boot info for 0x%x func %d\n",
3806 __func__, ha->pdev->device, func_num);
2a991c21 3807
0d5b36b8 3808 if (is_qla40XX(ha)) {
2a991c21
MR
3809 if (func_num == 1) {
3810 addr = NVRAM_PORT0_BOOT_MODE;
3811 pri_addr = NVRAM_PORT0_BOOT_PRI_TGT;
3812 sec_addr = NVRAM_PORT0_BOOT_SEC_TGT;
3813 } else if (func_num == 3) {
3814 addr = NVRAM_PORT1_BOOT_MODE;
3815 pri_addr = NVRAM_PORT1_BOOT_PRI_TGT;
3816 sec_addr = NVRAM_PORT1_BOOT_SEC_TGT;
3817 } else {
3818 ret = QLA_ERROR;
3819 goto exit_boot_info;
3820 }
3821
3822 /* Check Boot Mode */
3823 val = rd_nvram_byte(ha, addr);
3824 if (!(val & 0x07)) {
e8fb00e0
MR
3825 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: Adapter boot "
3826 "options : 0x%x\n", __func__, val));
2a991c21
MR
3827 ret = QLA_ERROR;
3828 goto exit_boot_info;
3829 }
3830
3831 /* get primary valid target index */
3832 val = rd_nvram_byte(ha, pri_addr);
3833 if (val & BIT_7)
3834 ddb_index[0] = (val & 0x7f);
2a991c21
MR
3835
3836 /* get secondary valid target index */
3837 val = rd_nvram_byte(ha, sec_addr);
3838 if (val & BIT_7)
3839 ddb_index[1] = (val & 0x7f);
2a991c21
MR
3840
3841 } else if (is_qla8022(ha)) {
3842 buf = dma_alloc_coherent(&ha->pdev->dev, size,
3843 &buf_dma, GFP_KERNEL);
3844 if (!buf) {
3845 DEBUG2(ql4_printk(KERN_ERR, ha,
3846 "%s: Unable to allocate dma buffer\n",
3847 __func__));
3848 ret = QLA_ERROR;
3849 goto exit_boot_info;
3850 }
3851
3852 if (ha->port_num == 0)
3853 offset = BOOT_PARAM_OFFSET_PORT0;
3854 else if (ha->port_num == 1)
3855 offset = BOOT_PARAM_OFFSET_PORT1;
3856 else {
3857 ret = QLA_ERROR;
3858 goto exit_boot_info_free;
3859 }
3860 addr = FLASH_RAW_ACCESS_ADDR + (ha->hw.flt_iscsi_param * 4) +
3861 offset;
3862 if (qla4xxx_get_flash(ha, buf_dma, addr,
3863 13 * sizeof(uint8_t)) != QLA_SUCCESS) {
3864 DEBUG2(ql4_printk(KERN_ERR, ha, "scsi%ld: %s: Get Flash"
0bd7f842 3865 " failed\n", ha->host_no, __func__));
2a991c21
MR
3866 ret = QLA_ERROR;
3867 goto exit_boot_info_free;
3868 }
3869 /* Check Boot Mode */
3870 if (!(buf[1] & 0x07)) {
e8fb00e0
MR
3871 DEBUG2(ql4_printk(KERN_INFO, ha, "Firmware boot options"
3872 " : 0x%x\n", buf[1]));
2a991c21
MR
3873 ret = QLA_ERROR;
3874 goto exit_boot_info_free;
3875 }
3876
3877 /* get primary valid target index */
3878 if (buf[2] & BIT_7)
3879 ddb_index[0] = buf[2] & 0x7f;
2a991c21
MR
3880
3881 /* get secondary valid target index */
3882 if (buf[11] & BIT_7)
3883 ddb_index[1] = buf[11] & 0x7f;
2a991c21
MR
3884 } else {
3885 ret = QLA_ERROR;
3886 goto exit_boot_info;
3887 }
3888
3889 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: Primary target ID %d, Secondary"
3890 " target ID %d\n", __func__, ddb_index[0],
3891 ddb_index[1]));
3892
3893exit_boot_info_free:
3894 dma_free_coherent(&ha->pdev->dev, size, buf, buf_dma);
3895exit_boot_info:
20e835b4
LC
3896 ha->pri_ddb_idx = ddb_index[0];
3897 ha->sec_ddb_idx = ddb_index[1];
2a991c21
MR
3898 return ret;
3899}
3900
28deb45c
LC
3901/**
3902 * qla4xxx_get_bidi_chap - Get a BIDI CHAP user and password
3903 * @ha: pointer to adapter structure
3904 * @username: CHAP username to be returned
3905 * @password: CHAP password to be returned
3906 *
3907 * If a boot entry has BIDI CHAP enabled then we need to set the BIDI CHAP
3908 * user and password in the sysfs entry in /sys/firmware/iscsi_boot#/.
3909 * So from the CHAP cache find the first BIDI CHAP entry and set it
3910 * to the boot record in sysfs.
3911 **/
3912static int qla4xxx_get_bidi_chap(struct scsi_qla_host *ha, char *username,
3913 char *password)
3914{
3915 int i, ret = -EINVAL;
3916 int max_chap_entries = 0;
3917 struct ql4_chap_table *chap_table;
3918
3919 if (is_qla8022(ha))
3920 max_chap_entries = (ha->hw.flt_chap_size / 2) /
3921 sizeof(struct ql4_chap_table);
3922 else
3923 max_chap_entries = MAX_CHAP_ENTRIES_40XX;
3924
3925 if (!ha->chap_list) {
3926 ql4_printk(KERN_ERR, ha, "Do not have CHAP table cache\n");
3927 return ret;
3928 }
3929
3930 mutex_lock(&ha->chap_sem);
3931 for (i = 0; i < max_chap_entries; i++) {
3932 chap_table = (struct ql4_chap_table *)ha->chap_list + i;
3933 if (chap_table->cookie !=
3934 __constant_cpu_to_le16(CHAP_VALID_COOKIE)) {
3935 continue;
3936 }
3937
3938 if (chap_table->flags & BIT_7) /* local */
3939 continue;
3940
3941 if (!(chap_table->flags & BIT_6)) /* Not BIDI */
3942 continue;
3943
3944 strncpy(password, chap_table->secret, QL4_CHAP_MAX_SECRET_LEN);
3945 strncpy(username, chap_table->name, QL4_CHAP_MAX_NAME_LEN);
3946 ret = 0;
3947 break;
3948 }
3949 mutex_unlock(&ha->chap_sem);
3950
3951 return ret;
3952}
3953
3954
2a991c21
MR
3955static int qla4xxx_get_boot_target(struct scsi_qla_host *ha,
3956 struct ql4_boot_session_info *boot_sess,
3957 uint16_t ddb_index)
3958{
3959 struct ql4_conn_info *boot_conn = &boot_sess->conn_list[0];
3960 struct dev_db_entry *fw_ddb_entry;
3961 dma_addr_t fw_ddb_entry_dma;
3962 uint16_t idx;
3963 uint16_t options;
3964 int ret = QLA_SUCCESS;
3965
3966 fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
3967 &fw_ddb_entry_dma, GFP_KERNEL);
3968 if (!fw_ddb_entry) {
3969 DEBUG2(ql4_printk(KERN_ERR, ha,
3970 "%s: Unable to allocate dma buffer.\n",
3971 __func__));
3972 ret = QLA_ERROR;
3973 return ret;
3974 }
3975
3976 if (qla4xxx_bootdb_by_index(ha, fw_ddb_entry,
3977 fw_ddb_entry_dma, ddb_index)) {
e8fb00e0
MR
3978 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: No Flash DDB found at "
3979 "index [%d]\n", __func__, ddb_index));
2a991c21
MR
3980 ret = QLA_ERROR;
3981 goto exit_boot_target;
3982 }
3983
3984 /* Update target name and IP from DDB */
3985 memcpy(boot_sess->target_name, fw_ddb_entry->iscsi_name,
3986 min(sizeof(boot_sess->target_name),
3987 sizeof(fw_ddb_entry->iscsi_name)));
3988
3989 options = le16_to_cpu(fw_ddb_entry->options);
3990 if (options & DDB_OPT_IPV6_DEVICE) {
3991 memcpy(&boot_conn->dest_ipaddr.ip_address,
3992 &fw_ddb_entry->ip_addr[0], IPv6_ADDR_LEN);
3993 } else {
3994 boot_conn->dest_ipaddr.ip_type = 0x1;
3995 memcpy(&boot_conn->dest_ipaddr.ip_address,
3996 &fw_ddb_entry->ip_addr[0], IP_ADDR_LEN);
3997 }
3998
3999 boot_conn->dest_port = le16_to_cpu(fw_ddb_entry->port);
4000
4001 /* update chap information */
4002 idx = __le16_to_cpu(fw_ddb_entry->chap_tbl_idx);
4003
4004 if (BIT_7 & le16_to_cpu(fw_ddb_entry->iscsi_options)) {
4005
4006 DEBUG2(ql4_printk(KERN_INFO, ha, "Setting chap\n"));
4007
4008 ret = qla4xxx_get_chap(ha, (char *)&boot_conn->chap.
4009 target_chap_name,
4010 (char *)&boot_conn->chap.target_secret,
4011 idx);
4012 if (ret) {
4013 ql4_printk(KERN_ERR, ha, "Failed to set chap\n");
4014 ret = QLA_ERROR;
4015 goto exit_boot_target;
4016 }
4017
4018 boot_conn->chap.target_chap_name_length = QL4_CHAP_MAX_NAME_LEN;
4019 boot_conn->chap.target_secret_length = QL4_CHAP_MAX_SECRET_LEN;
4020 }
4021
4022 if (BIT_4 & le16_to_cpu(fw_ddb_entry->iscsi_options)) {
4023
4024 DEBUG2(ql4_printk(KERN_INFO, ha, "Setting BIDI chap\n"));
4025
28deb45c
LC
4026 ret = qla4xxx_get_bidi_chap(ha,
4027 (char *)&boot_conn->chap.intr_chap_name,
4028 (char *)&boot_conn->chap.intr_secret);
4029
2a991c21
MR
4030 if (ret) {
4031 ql4_printk(KERN_ERR, ha, "Failed to set BIDI chap\n");
4032 ret = QLA_ERROR;
4033 goto exit_boot_target;
4034 }
4035
4036 boot_conn->chap.intr_chap_name_length = QL4_CHAP_MAX_NAME_LEN;
4037 boot_conn->chap.intr_secret_length = QL4_CHAP_MAX_SECRET_LEN;
4038 }
4039
4040exit_boot_target:
4041 dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
4042 fw_ddb_entry, fw_ddb_entry_dma);
4043 return ret;
4044}
4045
4046static int qla4xxx_get_boot_info(struct scsi_qla_host *ha)
4047{
4048 uint16_t ddb_index[2];
8de5b958
LC
4049 int ret = QLA_ERROR;
4050 int rval;
2a991c21
MR
4051
4052 memset(ddb_index, 0, sizeof(ddb_index));
8de5b958
LC
4053 ddb_index[0] = 0xffff;
4054 ddb_index[1] = 0xffff;
2a991c21
MR
4055 ret = get_fw_boot_info(ha, ddb_index);
4056 if (ret != QLA_SUCCESS) {
e8fb00e0
MR
4057 DEBUG2(ql4_printk(KERN_INFO, ha,
4058 "%s: No boot target configured.\n", __func__));
2a991c21
MR
4059 return ret;
4060 }
4061
13483730
MC
4062 if (ql4xdisablesysfsboot)
4063 return QLA_SUCCESS;
4064
8de5b958
LC
4065 if (ddb_index[0] == 0xffff)
4066 goto sec_target;
4067
4068 rval = qla4xxx_get_boot_target(ha, &(ha->boot_tgt.boot_pri_sess),
2a991c21 4069 ddb_index[0]);
8de5b958 4070 if (rval != QLA_SUCCESS) {
e8fb00e0
MR
4071 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: Primary boot target not "
4072 "configured\n", __func__));
8de5b958
LC
4073 } else
4074 ret = QLA_SUCCESS;
2a991c21 4075
8de5b958
LC
4076sec_target:
4077 if (ddb_index[1] == 0xffff)
4078 goto exit_get_boot_info;
4079
4080 rval = qla4xxx_get_boot_target(ha, &(ha->boot_tgt.boot_sec_sess),
2a991c21 4081 ddb_index[1]);
8de5b958 4082 if (rval != QLA_SUCCESS) {
e8fb00e0
MR
4083 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: Secondary boot target not"
4084 " configured\n", __func__));
8de5b958
LC
4085 } else
4086 ret = QLA_SUCCESS;
4087
4088exit_get_boot_info:
2a991c21
MR
4089 return ret;
4090}
4091
4092static int qla4xxx_setup_boot_info(struct scsi_qla_host *ha)
4093{
4094 struct iscsi_boot_kobj *boot_kobj;
4095
4096 if (qla4xxx_get_boot_info(ha) != QLA_SUCCESS)
13483730
MC
4097 return QLA_ERROR;
4098
4099 if (ql4xdisablesysfsboot) {
4100 ql4_printk(KERN_INFO, ha,
0bd7f842 4101 "%s: syfsboot disabled - driver will trigger login "
13483730
MC
4102 "and publish session for discovery .\n", __func__);
4103 return QLA_SUCCESS;
4104 }
4105
2a991c21
MR
4106
4107 ha->boot_kset = iscsi_boot_create_host_kset(ha->host->host_no);
4108 if (!ha->boot_kset)
4109 goto kset_free;
4110
4111 if (!scsi_host_get(ha->host))
4112 goto kset_free;
4113 boot_kobj = iscsi_boot_create_target(ha->boot_kset, 0, ha,
4114 qla4xxx_show_boot_tgt_pri_info,
4115 qla4xxx_tgt_get_attr_visibility,
4116 qla4xxx_boot_release);
4117 if (!boot_kobj)
4118 goto put_host;
4119
4120 if (!scsi_host_get(ha->host))
4121 goto kset_free;
4122 boot_kobj = iscsi_boot_create_target(ha->boot_kset, 1, ha,
4123 qla4xxx_show_boot_tgt_sec_info,
4124 qla4xxx_tgt_get_attr_visibility,
4125 qla4xxx_boot_release);
4126 if (!boot_kobj)
4127 goto put_host;
4128
4129 if (!scsi_host_get(ha->host))
4130 goto kset_free;
4131 boot_kobj = iscsi_boot_create_initiator(ha->boot_kset, 0, ha,
4132 qla4xxx_show_boot_ini_info,
4133 qla4xxx_ini_get_attr_visibility,
4134 qla4xxx_boot_release);
4135 if (!boot_kobj)
4136 goto put_host;
4137
4138 if (!scsi_host_get(ha->host))
4139 goto kset_free;
4140 boot_kobj = iscsi_boot_create_ethernet(ha->boot_kset, 0, ha,
4141 qla4xxx_show_boot_eth_info,
4142 qla4xxx_eth_get_attr_visibility,
4143 qla4xxx_boot_release);
4144 if (!boot_kobj)
4145 goto put_host;
4146
13483730 4147 return QLA_SUCCESS;
2a991c21
MR
4148
4149put_host:
4150 scsi_host_put(ha->host);
4151kset_free:
4152 iscsi_boot_destroy_kset(ha->boot_kset);
4153 return -ENOMEM;
4154}
4155
4549415a
LC
4156
4157/**
4158 * qla4xxx_create chap_list - Create CHAP list from FLASH
4159 * @ha: pointer to adapter structure
4160 *
4161 * Read flash and make a list of CHAP entries, during login when a CHAP entry
4162 * is received, it will be checked in this list. If entry exist then the CHAP
4163 * entry index is set in the DDB. If CHAP entry does not exist in this list
4164 * then a new entry is added in FLASH in CHAP table and the index obtained is
4165 * used in the DDB.
4166 **/
4167static void qla4xxx_create_chap_list(struct scsi_qla_host *ha)
4168{
4169 int rval = 0;
4170 uint8_t *chap_flash_data = NULL;
4171 uint32_t offset;
4172 dma_addr_t chap_dma;
4173 uint32_t chap_size = 0;
4174
4175 if (is_qla40XX(ha))
4176 chap_size = MAX_CHAP_ENTRIES_40XX *
4177 sizeof(struct ql4_chap_table);
4178 else /* Single region contains CHAP info for both
4179 * ports which is divided into half for each port.
4180 */
4181 chap_size = ha->hw.flt_chap_size / 2;
4182
4183 chap_flash_data = dma_alloc_coherent(&ha->pdev->dev, chap_size,
4184 &chap_dma, GFP_KERNEL);
4185 if (!chap_flash_data) {
4186 ql4_printk(KERN_ERR, ha, "No memory for chap_flash_data\n");
4187 return;
4188 }
4189 if (is_qla40XX(ha))
4190 offset = FLASH_CHAP_OFFSET;
4191 else {
4192 offset = FLASH_RAW_ACCESS_ADDR + (ha->hw.flt_region_chap << 2);
4193 if (ha->port_num == 1)
4194 offset += chap_size;
4195 }
4196
4197 rval = qla4xxx_get_flash(ha, chap_dma, offset, chap_size);
4198 if (rval != QLA_SUCCESS)
4199 goto exit_chap_list;
4200
4201 if (ha->chap_list == NULL)
4202 ha->chap_list = vmalloc(chap_size);
4203 if (ha->chap_list == NULL) {
4204 ql4_printk(KERN_ERR, ha, "No memory for ha->chap_list\n");
4205 goto exit_chap_list;
4206 }
4207
4208 memcpy(ha->chap_list, chap_flash_data, chap_size);
4209
4210exit_chap_list:
4211 dma_free_coherent(&ha->pdev->dev, chap_size,
4212 chap_flash_data, chap_dma);
4549415a
LC
4213}
4214
13483730
MC
4215static void qla4xxx_get_param_ddb(struct ddb_entry *ddb_entry,
4216 struct ql4_tuple_ddb *tddb)
4217{
4218 struct scsi_qla_host *ha;
4219 struct iscsi_cls_session *cls_sess;
4220 struct iscsi_cls_conn *cls_conn;
4221 struct iscsi_session *sess;
4222 struct iscsi_conn *conn;
4223
4224 DEBUG2(printk(KERN_INFO "Func: %s\n", __func__));
4225 ha = ddb_entry->ha;
4226 cls_sess = ddb_entry->sess;
4227 sess = cls_sess->dd_data;
4228 cls_conn = ddb_entry->conn;
4229 conn = cls_conn->dd_data;
4230
4231 tddb->tpgt = sess->tpgt;
4232 tddb->port = conn->persistent_port;
4233 strncpy(tddb->iscsi_name, sess->targetname, ISCSI_NAME_SIZE);
4234 strncpy(tddb->ip_addr, conn->persistent_address, DDB_IPADDR_LEN);
4235}
4236
4237static void qla4xxx_convert_param_ddb(struct dev_db_entry *fw_ddb_entry,
4238 struct ql4_tuple_ddb *tddb)
4239{
4240 uint16_t options = 0;
4241
4242 tddb->tpgt = le32_to_cpu(fw_ddb_entry->tgt_portal_grp);
4243 memcpy(&tddb->iscsi_name[0], &fw_ddb_entry->iscsi_name[0],
4244 min(sizeof(tddb->iscsi_name), sizeof(fw_ddb_entry->iscsi_name)));
4245
4246 options = le16_to_cpu(fw_ddb_entry->options);
4247 if (options & DDB_OPT_IPV6_DEVICE)
4248 sprintf(tddb->ip_addr, "%pI6", fw_ddb_entry->ip_addr);
4249 else
4250 sprintf(tddb->ip_addr, "%pI4", fw_ddb_entry->ip_addr);
4251
4252 tddb->port = le16_to_cpu(fw_ddb_entry->port);
173269ef 4253 memcpy(&tddb->isid[0], &fw_ddb_entry->isid[0], sizeof(tddb->isid));
13483730
MC
4254}
4255
4256static int qla4xxx_compare_tuple_ddb(struct scsi_qla_host *ha,
4257 struct ql4_tuple_ddb *old_tddb,
173269ef
MR
4258 struct ql4_tuple_ddb *new_tddb,
4259 uint8_t is_isid_compare)
13483730
MC
4260{
4261 if (strcmp(old_tddb->iscsi_name, new_tddb->iscsi_name))
4262 return QLA_ERROR;
4263
4264 if (strcmp(old_tddb->ip_addr, new_tddb->ip_addr))
4265 return QLA_ERROR;
4266
4267 if (old_tddb->port != new_tddb->port)
4268 return QLA_ERROR;
4269
173269ef
MR
4270 /* For multi sessions, driver generates the ISID, so do not compare
4271 * ISID in reset path since it would be a comparision between the
4272 * driver generated ISID and firmware generated ISID. This could
4273 * lead to adding duplicated DDBs in the list as driver generated
4274 * ISID would not match firmware generated ISID.
4275 */
4276 if (is_isid_compare) {
4277 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: old ISID [%02x%02x%02x"
4278 "%02x%02x%02x] New ISID [%02x%02x%02x%02x%02x%02x]\n",
4279 __func__, old_tddb->isid[5], old_tddb->isid[4],
4280 old_tddb->isid[3], old_tddb->isid[2], old_tddb->isid[1],
4281 old_tddb->isid[0], new_tddb->isid[5], new_tddb->isid[4],
4282 new_tddb->isid[3], new_tddb->isid[2], new_tddb->isid[1],
4283 new_tddb->isid[0]));
4284
4285 if (memcmp(&old_tddb->isid[0], &new_tddb->isid[0],
4286 sizeof(old_tddb->isid)))
4287 return QLA_ERROR;
4288 }
4289
13483730
MC
4290 DEBUG2(ql4_printk(KERN_INFO, ha,
4291 "Match Found, fw[%d,%d,%s,%s], [%d,%d,%s,%s]",
4292 old_tddb->port, old_tddb->tpgt, old_tddb->ip_addr,
4293 old_tddb->iscsi_name, new_tddb->port, new_tddb->tpgt,
4294 new_tddb->ip_addr, new_tddb->iscsi_name));
4295
4296 return QLA_SUCCESS;
4297}
4298
4299static int qla4xxx_is_session_exists(struct scsi_qla_host *ha,
4300 struct dev_db_entry *fw_ddb_entry)
4301{
4302 struct ddb_entry *ddb_entry;
4303 struct ql4_tuple_ddb *fw_tddb = NULL;
4304 struct ql4_tuple_ddb *tmp_tddb = NULL;
4305 int idx;
4306 int ret = QLA_ERROR;
4307
4308 fw_tddb = vzalloc(sizeof(*fw_tddb));
4309 if (!fw_tddb) {
4310 DEBUG2(ql4_printk(KERN_WARNING, ha,
4311 "Memory Allocation failed.\n"));
4312 ret = QLA_SUCCESS;
4313 goto exit_check;
4314 }
4315
4316 tmp_tddb = vzalloc(sizeof(*tmp_tddb));
4317 if (!tmp_tddb) {
4318 DEBUG2(ql4_printk(KERN_WARNING, ha,
4319 "Memory Allocation failed.\n"));
4320 ret = QLA_SUCCESS;
4321 goto exit_check;
4322 }
4323
4324 qla4xxx_convert_param_ddb(fw_ddb_entry, fw_tddb);
4325
4326 for (idx = 0; idx < MAX_DDB_ENTRIES; idx++) {
4327 ddb_entry = qla4xxx_lookup_ddb_by_fw_index(ha, idx);
4328 if (ddb_entry == NULL)
4329 continue;
4330
4331 qla4xxx_get_param_ddb(ddb_entry, tmp_tddb);
173269ef 4332 if (!qla4xxx_compare_tuple_ddb(ha, fw_tddb, tmp_tddb, false)) {
13483730
MC
4333 ret = QLA_SUCCESS; /* found */
4334 goto exit_check;
4335 }
4336 }
4337
4338exit_check:
4339 if (fw_tddb)
4340 vfree(fw_tddb);
4341 if (tmp_tddb)
4342 vfree(tmp_tddb);
4343 return ret;
4344}
4345
4346static int qla4xxx_is_flash_ddb_exists(struct scsi_qla_host *ha,
4347 struct list_head *list_nt,
4348 struct dev_db_entry *fw_ddb_entry)
4349{
4350 struct qla_ddb_index *nt_ddb_idx, *nt_ddb_idx_tmp;
4351 struct ql4_tuple_ddb *fw_tddb = NULL;
4352 struct ql4_tuple_ddb *tmp_tddb = NULL;
4353 int ret = QLA_ERROR;
4354
4355 fw_tddb = vzalloc(sizeof(*fw_tddb));
4356 if (!fw_tddb) {
4357 DEBUG2(ql4_printk(KERN_WARNING, ha,
4358 "Memory Allocation failed.\n"));
4359 ret = QLA_SUCCESS;
4360 goto exit_check;
4361 }
4362
4363 tmp_tddb = vzalloc(sizeof(*tmp_tddb));
4364 if (!tmp_tddb) {
4365 DEBUG2(ql4_printk(KERN_WARNING, ha,
4366 "Memory Allocation failed.\n"));
4367 ret = QLA_SUCCESS;
4368 goto exit_check;
4369 }
4370
4371 qla4xxx_convert_param_ddb(fw_ddb_entry, fw_tddb);
4372
4373 list_for_each_entry_safe(nt_ddb_idx, nt_ddb_idx_tmp, list_nt, list) {
4374 qla4xxx_convert_param_ddb(&nt_ddb_idx->fw_ddb, tmp_tddb);
173269ef 4375 if (!qla4xxx_compare_tuple_ddb(ha, fw_tddb, tmp_tddb, true)) {
13483730
MC
4376 ret = QLA_SUCCESS; /* found */
4377 goto exit_check;
4378 }
4379 }
4380
4381exit_check:
4382 if (fw_tddb)
4383 vfree(fw_tddb);
4384 if (tmp_tddb)
4385 vfree(tmp_tddb);
4386 return ret;
4387}
4388
4a4bc2e9 4389static void qla4xxx_free_ddb_list(struct list_head *list_ddb)
13483730 4390{
4a4bc2e9 4391 struct qla_ddb_index *ddb_idx, *ddb_idx_tmp;
13483730 4392
4a4bc2e9
LC
4393 list_for_each_entry_safe(ddb_idx, ddb_idx_tmp, list_ddb, list) {
4394 list_del_init(&ddb_idx->list);
4395 vfree(ddb_idx);
13483730 4396 }
13483730
MC
4397}
4398
4399static struct iscsi_endpoint *qla4xxx_get_ep_fwdb(struct scsi_qla_host *ha,
4400 struct dev_db_entry *fw_ddb_entry)
4401{
4402 struct iscsi_endpoint *ep;
4403 struct sockaddr_in *addr;
4404 struct sockaddr_in6 *addr6;
4405 struct sockaddr *dst_addr;
4406 char *ip;
4407
4408 /* TODO: need to destroy on unload iscsi_endpoint*/
4409 dst_addr = vmalloc(sizeof(*dst_addr));
4410 if (!dst_addr)
4411 return NULL;
4412
4413 if (fw_ddb_entry->options & DDB_OPT_IPV6_DEVICE) {
4414 dst_addr->sa_family = AF_INET6;
4415 addr6 = (struct sockaddr_in6 *)dst_addr;
4416 ip = (char *)&addr6->sin6_addr;
4417 memcpy(ip, fw_ddb_entry->ip_addr, IPv6_ADDR_LEN);
4418 addr6->sin6_port = htons(le16_to_cpu(fw_ddb_entry->port));
4419
4420 } else {
4421 dst_addr->sa_family = AF_INET;
4422 addr = (struct sockaddr_in *)dst_addr;
4423 ip = (char *)&addr->sin_addr;
4424 memcpy(ip, fw_ddb_entry->ip_addr, IP_ADDR_LEN);
4425 addr->sin_port = htons(le16_to_cpu(fw_ddb_entry->port));
4426 }
4427
4428 ep = qla4xxx_ep_connect(ha->host, dst_addr, 0);
4429 vfree(dst_addr);
4430 return ep;
4431}
4432
4433static int qla4xxx_verify_boot_idx(struct scsi_qla_host *ha, uint16_t idx)
4434{
4435 if (ql4xdisablesysfsboot)
4436 return QLA_SUCCESS;
4437 if (idx == ha->pri_ddb_idx || idx == ha->sec_ddb_idx)
4438 return QLA_ERROR;
4439 return QLA_SUCCESS;
4440}
4441
4442static void qla4xxx_setup_flash_ddb_entry(struct scsi_qla_host *ha,
4443 struct ddb_entry *ddb_entry)
4444{
c28eaaca
NJ
4445 uint16_t def_timeout;
4446
13483730
MC
4447 ddb_entry->ddb_type = FLASH_DDB;
4448 ddb_entry->fw_ddb_index = INVALID_ENTRY;
4449 ddb_entry->fw_ddb_device_state = DDB_DS_NO_CONNECTION_ACTIVE;
4450 ddb_entry->ha = ha;
4451 ddb_entry->unblock_sess = qla4xxx_unblock_flash_ddb;
4452 ddb_entry->ddb_change = qla4xxx_flash_ddb_change;
4453
4454 atomic_set(&ddb_entry->retry_relogin_timer, INVALID_ENTRY);
4455 atomic_set(&ddb_entry->relogin_timer, 0);
4456 atomic_set(&ddb_entry->relogin_retry_count, 0);
c28eaaca 4457 def_timeout = le16_to_cpu(ddb_entry->fw_ddb_entry.def_timeout);
13483730 4458 ddb_entry->default_relogin_timeout =
c28eaaca
NJ
4459 (def_timeout > LOGIN_TOV) && (def_timeout < LOGIN_TOV * 10) ?
4460 def_timeout : LOGIN_TOV;
13483730
MC
4461 ddb_entry->default_time2wait =
4462 le16_to_cpu(ddb_entry->fw_ddb_entry.iscsi_def_time2wait);
4463}
4464
4465static void qla4xxx_wait_for_ip_configuration(struct scsi_qla_host *ha)
4466{
4467 uint32_t idx = 0;
4468 uint32_t ip_idx[IP_ADDR_COUNT] = {0, 1, 2, 3}; /* 4 IP interfaces */
4469 uint32_t sts[MBOX_REG_COUNT];
4470 uint32_t ip_state;
4471 unsigned long wtime;
4472 int ret;
4473
4474 wtime = jiffies + (HZ * IP_CONFIG_TOV);
4475 do {
4476 for (idx = 0; idx < IP_ADDR_COUNT; idx++) {
4477 if (ip_idx[idx] == -1)
4478 continue;
4479
4480 ret = qla4xxx_get_ip_state(ha, 0, ip_idx[idx], sts);
4481
4482 if (ret == QLA_ERROR) {
4483 ip_idx[idx] = -1;
4484 continue;
4485 }
4486
4487 ip_state = (sts[1] & IP_STATE_MASK) >> IP_STATE_SHIFT;
4488
4489 DEBUG2(ql4_printk(KERN_INFO, ha,
4490 "Waiting for IP state for idx = %d, state = 0x%x\n",
4491 ip_idx[idx], ip_state));
4492 if (ip_state == IP_ADDRSTATE_UNCONFIGURED ||
4493 ip_state == IP_ADDRSTATE_INVALID ||
4494 ip_state == IP_ADDRSTATE_PREFERRED ||
4495 ip_state == IP_ADDRSTATE_DEPRICATED ||
4496 ip_state == IP_ADDRSTATE_DISABLING)
4497 ip_idx[idx] = -1;
13483730
MC
4498 }
4499
4500 /* Break if all IP states checked */
4501 if ((ip_idx[0] == -1) &&
4502 (ip_idx[1] == -1) &&
4503 (ip_idx[2] == -1) &&
4504 (ip_idx[3] == -1))
4505 break;
4506 schedule_timeout_uninterruptible(HZ);
4507 } while (time_after(wtime, jiffies));
4508}
4509
4a4bc2e9
LC
4510static void qla4xxx_build_st_list(struct scsi_qla_host *ha,
4511 struct list_head *list_st)
13483730 4512{
4a4bc2e9 4513 struct qla_ddb_index *st_ddb_idx;
13483730 4514 int max_ddbs;
4a4bc2e9
LC
4515 int fw_idx_size;
4516 struct dev_db_entry *fw_ddb_entry;
4517 dma_addr_t fw_ddb_dma;
13483730
MC
4518 int ret;
4519 uint32_t idx = 0, next_idx = 0;
4520 uint32_t state = 0, conn_err = 0;
4a4bc2e9 4521 uint16_t conn_id = 0;
13483730
MC
4522
4523 fw_ddb_entry = dma_pool_alloc(ha->fw_ddb_dma_pool, GFP_KERNEL,
4524 &fw_ddb_dma);
4525 if (fw_ddb_entry == NULL) {
4526 DEBUG2(ql4_printk(KERN_ERR, ha, "Out of memory\n"));
4a4bc2e9 4527 goto exit_st_list;
13483730
MC
4528 }
4529
4a4bc2e9
LC
4530 max_ddbs = is_qla40XX(ha) ? MAX_DEV_DB_ENTRIES_40XX :
4531 MAX_DEV_DB_ENTRIES;
13483730
MC
4532 fw_idx_size = sizeof(struct qla_ddb_index);
4533
4534 for (idx = 0; idx < max_ddbs; idx = next_idx) {
4a4bc2e9
LC
4535 ret = qla4xxx_get_fwddb_entry(ha, idx, fw_ddb_entry, fw_ddb_dma,
4536 NULL, &next_idx, &state,
4537 &conn_err, NULL, &conn_id);
13483730
MC
4538 if (ret == QLA_ERROR)
4539 break;
4540
981c982c
LC
4541 /* Ignore DDB if invalid state (unassigned) */
4542 if (state == DDB_DS_UNASSIGNED)
4543 goto continue_next_st;
4544
13483730
MC
4545 /* Check if ST, add to the list_st */
4546 if (strlen((char *) fw_ddb_entry->iscsi_name) != 0)
4547 goto continue_next_st;
4548
4549 st_ddb_idx = vzalloc(fw_idx_size);
4550 if (!st_ddb_idx)
4551 break;
4552
4553 st_ddb_idx->fw_ddb_idx = idx;
4554
4a4bc2e9 4555 list_add_tail(&st_ddb_idx->list, list_st);
13483730
MC
4556continue_next_st:
4557 if (next_idx == 0)
4558 break;
4559 }
4560
4a4bc2e9
LC
4561exit_st_list:
4562 if (fw_ddb_entry)
4563 dma_pool_free(ha->fw_ddb_dma_pool, fw_ddb_entry, fw_ddb_dma);
4564}
4565
4566/**
4567 * qla4xxx_remove_failed_ddb - Remove inactive or failed ddb from list
4568 * @ha: pointer to adapter structure
4569 * @list_ddb: List from which failed ddb to be removed
4570 *
4571 * Iterate over the list of DDBs and find and remove DDBs that are either in
4572 * no connection active state or failed state
4573 **/
4574static void qla4xxx_remove_failed_ddb(struct scsi_qla_host *ha,
4575 struct list_head *list_ddb)
4576{
4577 struct qla_ddb_index *ddb_idx, *ddb_idx_tmp;
4578 uint32_t next_idx = 0;
4579 uint32_t state = 0, conn_err = 0;
4580 int ret;
4581
4582 list_for_each_entry_safe(ddb_idx, ddb_idx_tmp, list_ddb, list) {
4583 ret = qla4xxx_get_fwddb_entry(ha, ddb_idx->fw_ddb_idx,
4584 NULL, 0, NULL, &next_idx, &state,
4585 &conn_err, NULL, NULL);
4586 if (ret == QLA_ERROR)
4587 continue;
4588
4589 if (state == DDB_DS_NO_CONNECTION_ACTIVE ||
4590 state == DDB_DS_SESSION_FAILED) {
4591 list_del_init(&ddb_idx->list);
4592 vfree(ddb_idx);
4593 }
4594 }
4595}
4596
4597static int qla4xxx_sess_conn_setup(struct scsi_qla_host *ha,
4598 struct dev_db_entry *fw_ddb_entry,
4599 int is_reset)
4600{
4601 struct iscsi_cls_session *cls_sess;
4602 struct iscsi_session *sess;
4603 struct iscsi_cls_conn *cls_conn;
4604 struct iscsi_endpoint *ep;
4605 uint16_t cmds_max = 32;
4606 uint16_t conn_id = 0;
4607 uint32_t initial_cmdsn = 0;
4608 int ret = QLA_SUCCESS;
4609
4610 struct ddb_entry *ddb_entry = NULL;
4611
4612 /* Create session object, with INVALID_ENTRY,
4613 * the targer_id would get set when we issue the login
13483730 4614 */
4a4bc2e9
LC
4615 cls_sess = iscsi_session_setup(&qla4xxx_iscsi_transport, ha->host,
4616 cmds_max, sizeof(struct ddb_entry),
4617 sizeof(struct ql4_task_data),
4618 initial_cmdsn, INVALID_ENTRY);
4619 if (!cls_sess) {
4620 ret = QLA_ERROR;
4621 goto exit_setup;
4622 }
13483730 4623
4a4bc2e9
LC
4624 /*
4625 * so calling module_put function to decrement the
4626 * reference count.
4627 **/
4628 module_put(qla4xxx_iscsi_transport.owner);
4629 sess = cls_sess->dd_data;
4630 ddb_entry = sess->dd_data;
4631 ddb_entry->sess = cls_sess;
4632
4633 cls_sess->recovery_tmo = ql4xsess_recovery_tmo;
4634 memcpy(&ddb_entry->fw_ddb_entry, fw_ddb_entry,
4635 sizeof(struct dev_db_entry));
4636
4637 qla4xxx_setup_flash_ddb_entry(ha, ddb_entry);
4638
4639 cls_conn = iscsi_conn_setup(cls_sess, sizeof(struct qla_conn), conn_id);
4640
4641 if (!cls_conn) {
4642 ret = QLA_ERROR;
4643 goto exit_setup;
13483730
MC
4644 }
4645
4a4bc2e9 4646 ddb_entry->conn = cls_conn;
13483730 4647
4a4bc2e9
LC
4648 /* Setup ep, for displaying attributes in sysfs */
4649 ep = qla4xxx_get_ep_fwdb(ha, fw_ddb_entry);
4650 if (ep) {
4651 ep->conn = cls_conn;
4652 cls_conn->ep = ep;
4653 } else {
4654 DEBUG2(ql4_printk(KERN_ERR, ha, "Unable to get ep\n"));
4655 ret = QLA_ERROR;
4656 goto exit_setup;
4657 }
13483730 4658
4a4bc2e9
LC
4659 /* Update sess/conn params */
4660 qla4xxx_copy_fwddb_param(ha, fw_ddb_entry, cls_sess, cls_conn);
13483730 4661
4a4bc2e9
LC
4662 if (is_reset == RESET_ADAPTER) {
4663 iscsi_block_session(cls_sess);
4664 /* Use the relogin path to discover new devices
4665 * by short-circuting the logic of setting
4666 * timer to relogin - instead set the flags
4667 * to initiate login right away.
4668 */
4669 set_bit(DPC_RELOGIN_DEVICE, &ha->dpc_flags);
4670 set_bit(DF_RELOGIN, &ddb_entry->flags);
4671 }
4672
4673exit_setup:
4674 return ret;
4675}
4676
4677static void qla4xxx_build_nt_list(struct scsi_qla_host *ha,
4678 struct list_head *list_nt, int is_reset)
4679{
4680 struct dev_db_entry *fw_ddb_entry;
4681 dma_addr_t fw_ddb_dma;
4682 int max_ddbs;
4683 int fw_idx_size;
4684 int ret;
4685 uint32_t idx = 0, next_idx = 0;
4686 uint32_t state = 0, conn_err = 0;
4687 uint16_t conn_id = 0;
4688 struct qla_ddb_index *nt_ddb_idx;
4689
4690 fw_ddb_entry = dma_pool_alloc(ha->fw_ddb_dma_pool, GFP_KERNEL,
4691 &fw_ddb_dma);
4692 if (fw_ddb_entry == NULL) {
4693 DEBUG2(ql4_printk(KERN_ERR, ha, "Out of memory\n"));
4694 goto exit_nt_list;
13483730 4695 }
4a4bc2e9
LC
4696 max_ddbs = is_qla40XX(ha) ? MAX_DEV_DB_ENTRIES_40XX :
4697 MAX_DEV_DB_ENTRIES;
4698 fw_idx_size = sizeof(struct qla_ddb_index);
13483730
MC
4699
4700 for (idx = 0; idx < max_ddbs; idx = next_idx) {
4a4bc2e9
LC
4701 ret = qla4xxx_get_fwddb_entry(ha, idx, fw_ddb_entry, fw_ddb_dma,
4702 NULL, &next_idx, &state,
4703 &conn_err, NULL, &conn_id);
13483730
MC
4704 if (ret == QLA_ERROR)
4705 break;
4706
4707 if (qla4xxx_verify_boot_idx(ha, idx) != QLA_SUCCESS)
4708 goto continue_next_nt;
4709
4710 /* Check if NT, then add to list it */
4711 if (strlen((char *) fw_ddb_entry->iscsi_name) == 0)
4712 goto continue_next_nt;
4713
4a4bc2e9
LC
4714 if (!(state == DDB_DS_NO_CONNECTION_ACTIVE ||
4715 state == DDB_DS_SESSION_FAILED))
4716 goto continue_next_nt;
13483730 4717
4a4bc2e9
LC
4718 DEBUG2(ql4_printk(KERN_INFO, ha,
4719 "Adding DDB to session = 0x%x\n", idx));
4720 if (is_reset == INIT_ADAPTER) {
4721 nt_ddb_idx = vmalloc(fw_idx_size);
4722 if (!nt_ddb_idx)
4723 break;
13483730 4724
4a4bc2e9 4725 nt_ddb_idx->fw_ddb_idx = idx;
13483730 4726
4a4bc2e9 4727 memcpy(&nt_ddb_idx->fw_ddb, fw_ddb_entry,
13483730
MC
4728 sizeof(struct dev_db_entry));
4729
4a4bc2e9
LC
4730 if (qla4xxx_is_flash_ddb_exists(ha, list_nt,
4731 fw_ddb_entry) == QLA_SUCCESS) {
4732 vfree(nt_ddb_idx);
4733 goto continue_next_nt;
13483730 4734 }
4a4bc2e9
LC
4735 list_add_tail(&nt_ddb_idx->list, list_nt);
4736 } else if (is_reset == RESET_ADAPTER) {
4737 if (qla4xxx_is_session_exists(ha, fw_ddb_entry) ==
4738 QLA_SUCCESS)
4739 goto continue_next_nt;
13483730 4740 }
4a4bc2e9
LC
4741
4742 ret = qla4xxx_sess_conn_setup(ha, fw_ddb_entry, is_reset);
4743 if (ret == QLA_ERROR)
4744 goto exit_nt_list;
4745
13483730
MC
4746continue_next_nt:
4747 if (next_idx == 0)
4748 break;
4749 }
4a4bc2e9
LC
4750
4751exit_nt_list:
13483730
MC
4752 if (fw_ddb_entry)
4753 dma_pool_free(ha->fw_ddb_dma_pool, fw_ddb_entry, fw_ddb_dma);
4a4bc2e9
LC
4754}
4755
4756/**
4757 * qla4xxx_build_ddb_list - Build ddb list and setup sessions
4758 * @ha: pointer to adapter structure
4759 * @is_reset: Is this init path or reset path
4760 *
4761 * Create a list of sendtargets (st) from firmware DDBs, issue send targets
4762 * using connection open, then create the list of normal targets (nt)
4763 * from firmware DDBs. Based on the list of nt setup session and connection
4764 * objects.
4765 **/
4766void qla4xxx_build_ddb_list(struct scsi_qla_host *ha, int is_reset)
4767{
4768 uint16_t tmo = 0;
4769 struct list_head list_st, list_nt;
4770 struct qla_ddb_index *st_ddb_idx, *st_ddb_idx_tmp;
4771 unsigned long wtime;
4772
4773 if (!test_bit(AF_LINK_UP, &ha->flags)) {
4774 set_bit(AF_BUILD_DDB_LIST, &ha->flags);
4775 ha->is_reset = is_reset;
4776 return;
4777 }
4778
4779 INIT_LIST_HEAD(&list_st);
4780 INIT_LIST_HEAD(&list_nt);
4781
4782 qla4xxx_build_st_list(ha, &list_st);
4783
4784 /* Before issuing conn open mbox, ensure all IPs states are configured
4785 * Note, conn open fails if IPs are not configured
4786 */
4787 qla4xxx_wait_for_ip_configuration(ha);
4788
4789 /* Go thru the STs and fire the sendtargets by issuing conn open mbx */
4790 list_for_each_entry_safe(st_ddb_idx, st_ddb_idx_tmp, &list_st, list) {
4791 qla4xxx_conn_open(ha, st_ddb_idx->fw_ddb_idx);
4792 }
4793
4794 /* Wait to ensure all sendtargets are done for min 12 sec wait */
c28eaaca
NJ
4795 tmo = ((ha->def_timeout > LOGIN_TOV) &&
4796 (ha->def_timeout < LOGIN_TOV * 10) ?
4797 ha->def_timeout : LOGIN_TOV);
4798
4a4bc2e9
LC
4799 DEBUG2(ql4_printk(KERN_INFO, ha,
4800 "Default time to wait for build ddb %d\n", tmo));
4801
4802 wtime = jiffies + (HZ * tmo);
4803 do {
f1f2e60e
NJ
4804 if (list_empty(&list_st))
4805 break;
4806
4a4bc2e9
LC
4807 qla4xxx_remove_failed_ddb(ha, &list_st);
4808 schedule_timeout_uninterruptible(HZ / 10);
4809 } while (time_after(wtime, jiffies));
4810
4811 /* Free up the sendtargets list */
4812 qla4xxx_free_ddb_list(&list_st);
4813
4814 qla4xxx_build_nt_list(ha, &list_nt, is_reset);
4815
4816 qla4xxx_free_ddb_list(&list_nt);
13483730
MC
4817
4818 qla4xxx_free_ddb_index(ha);
4819}
4820
afaf5a2d
DS
4821/**
4822 * qla4xxx_probe_adapter - callback function to probe HBA
4823 * @pdev: pointer to pci_dev structure
4824 * @pci_device_id: pointer to pci_device entry
4825 *
4826 * This routine will probe for Qlogic 4xxx iSCSI host adapters.
4827 * It returns zero if successful. It also initializes all data necessary for
4828 * the driver.
4829 **/
4830static int __devinit qla4xxx_probe_adapter(struct pci_dev *pdev,
4831 const struct pci_device_id *ent)
4832{
4833 int ret = -ENODEV, status;
4834 struct Scsi_Host *host;
4835 struct scsi_qla_host *ha;
afaf5a2d
DS
4836 uint8_t init_retry_count = 0;
4837 char buf[34];
f4f5df23 4838 struct qla4_8xxx_legacy_intr_set *nx_legacy_intr;
f9880e76 4839 uint32_t dev_state;
afaf5a2d
DS
4840
4841 if (pci_enable_device(pdev))
4842 return -1;
4843
b3a271a9 4844 host = iscsi_host_alloc(&qla4xxx_driver_template, sizeof(*ha), 0);
afaf5a2d
DS
4845 if (host == NULL) {
4846 printk(KERN_WARNING
4847 "qla4xxx: Couldn't allocate host from scsi layer!\n");
4848 goto probe_disable_device;
4849 }
4850
4851 /* Clear our data area */
b3a271a9 4852 ha = to_qla_host(host);
afaf5a2d
DS
4853 memset(ha, 0, sizeof(*ha));
4854
4855 /* Save the information from PCI BIOS. */
4856 ha->pdev = pdev;
4857 ha->host = host;
4858 ha->host_no = host->host_no;
4859
2232be0d
LC
4860 pci_enable_pcie_error_reporting(pdev);
4861
f4f5df23
VC
4862 /* Setup Runtime configurable options */
4863 if (is_qla8022(ha)) {
4864 ha->isp_ops = &qla4_8xxx_isp_ops;
4865 rwlock_init(&ha->hw_lock);
4866 ha->qdr_sn_window = -1;
4867 ha->ddr_mn_window = -1;
4868 ha->curr_window = 255;
4869 ha->func_num = PCI_FUNC(ha->pdev->devfn);
4870 nx_legacy_intr = &legacy_intr[ha->func_num];
4871 ha->nx_legacy_intr.int_vec_bit = nx_legacy_intr->int_vec_bit;
4872 ha->nx_legacy_intr.tgt_status_reg =
4873 nx_legacy_intr->tgt_status_reg;
4874 ha->nx_legacy_intr.tgt_mask_reg = nx_legacy_intr->tgt_mask_reg;
4875 ha->nx_legacy_intr.pci_int_reg = nx_legacy_intr->pci_int_reg;
4876 } else {
4877 ha->isp_ops = &qla4xxx_isp_ops;
4878 }
4879
2232be0d
LC
4880 /* Set EEH reset type to fundamental if required by hba */
4881 if (is_qla8022(ha))
4882 pdev->needs_freset = 1;
4883
afaf5a2d 4884 /* Configure PCI I/O space. */
f4f5df23 4885 ret = ha->isp_ops->iospace_config(ha);
afaf5a2d 4886 if (ret)
f4f5df23 4887 goto probe_failed_ioconfig;
afaf5a2d 4888
c2660df3 4889 ql4_printk(KERN_INFO, ha, "Found an ISP%04x, irq %d, iobase 0x%p\n",
afaf5a2d
DS
4890 pdev->device, pdev->irq, ha->reg);
4891
4892 qla4xxx_config_dma_addressing(ha);
4893
4894 /* Initialize lists and spinlocks. */
afaf5a2d
DS
4895 INIT_LIST_HEAD(&ha->free_srb_q);
4896
4897 mutex_init(&ha->mbox_sem);
4549415a 4898 mutex_init(&ha->chap_sem);
f4f5df23 4899 init_completion(&ha->mbx_intr_comp);
95d31262 4900 init_completion(&ha->disable_acb_comp);
afaf5a2d
DS
4901
4902 spin_lock_init(&ha->hardware_lock);
afaf5a2d 4903
ff884430
VC
4904 /* Initialize work list */
4905 INIT_LIST_HEAD(&ha->work_list);
4906
afaf5a2d
DS
4907 /* Allocate dma buffers */
4908 if (qla4xxx_mem_alloc(ha)) {
c2660df3
VC
4909 ql4_printk(KERN_WARNING, ha,
4910 "[ERROR] Failed to allocate memory for adapter\n");
afaf5a2d
DS
4911
4912 ret = -ENOMEM;
4913 goto probe_failed;
4914 }
4915
b3a271a9
MR
4916 host->cmd_per_lun = 3;
4917 host->max_channel = 0;
4918 host->max_lun = MAX_LUNS - 1;
4919 host->max_id = MAX_TARGETS;
4920 host->max_cmd_len = IOCB_MAX_CDB_LEN;
4921 host->can_queue = MAX_SRBS ;
4922 host->transportt = qla4xxx_scsi_transport;
4923
4924 ret = scsi_init_shared_tag_map(host, MAX_SRBS);
4925 if (ret) {
4926 ql4_printk(KERN_WARNING, ha,
4927 "%s: scsi_init_shared_tag_map failed\n", __func__);
4928 goto probe_failed;
4929 }
4930
4931 pci_set_drvdata(pdev, ha);
4932
4933 ret = scsi_add_host(host, &pdev->dev);
4934 if (ret)
4935 goto probe_failed;
4936
f4f5df23
VC
4937 if (is_qla8022(ha))
4938 (void) qla4_8xxx_get_flash_info(ha);
4939
afaf5a2d
DS
4940 /*
4941 * Initialize the Host adapter request/response queues and
4942 * firmware
4943 * NOTE: interrupts enabled upon successful completion
4944 */
13483730 4945 status = qla4xxx_initialize_adapter(ha, INIT_ADAPTER);
f4f5df23
VC
4946 while ((!test_bit(AF_ONLINE, &ha->flags)) &&
4947 init_retry_count++ < MAX_INIT_RETRIES) {
f9880e76
PM
4948
4949 if (is_qla8022(ha)) {
4950 qla4_8xxx_idc_lock(ha);
4951 dev_state = qla4_8xxx_rd_32(ha, QLA82XX_CRB_DEV_STATE);
4952 qla4_8xxx_idc_unlock(ha);
4953 if (dev_state == QLA82XX_DEV_FAILED) {
4954 ql4_printk(KERN_WARNING, ha, "%s: don't retry "
4955 "initialize adapter. H/W is in failed state\n",
4956 __func__);
4957 break;
4958 }
4959 }
afaf5a2d
DS
4960 DEBUG2(printk("scsi: %s: retrying adapter initialization "
4961 "(%d)\n", __func__, init_retry_count));
f4f5df23
VC
4962
4963 if (ha->isp_ops->reset_chip(ha) == QLA_ERROR)
4964 continue;
4965
13483730 4966 status = qla4xxx_initialize_adapter(ha, INIT_ADAPTER);
afaf5a2d 4967 }
f4f5df23
VC
4968
4969 if (!test_bit(AF_ONLINE, &ha->flags)) {
c2660df3 4970 ql4_printk(KERN_WARNING, ha, "Failed to initialize adapter\n");
afaf5a2d 4971
fe998527
LC
4972 if (is_qla8022(ha) && ql4xdontresethba) {
4973 /* Put the device in failed state. */
4974 DEBUG2(printk(KERN_ERR "HW STATE: FAILED\n"));
4975 qla4_8xxx_idc_lock(ha);
4976 qla4_8xxx_wr_32(ha, QLA82XX_CRB_DEV_STATE,
4977 QLA82XX_DEV_FAILED);
4978 qla4_8xxx_idc_unlock(ha);
4979 }
afaf5a2d 4980 ret = -ENODEV;
b3a271a9 4981 goto remove_host;
afaf5a2d
DS
4982 }
4983
afaf5a2d
DS
4984 /* Startup the kernel thread for this host adapter. */
4985 DEBUG2(printk("scsi: %s: Starting kernel thread for "
4986 "qla4xxx_dpc\n", __func__));
4987 sprintf(buf, "qla4xxx_%lu_dpc", ha->host_no);
4988 ha->dpc_thread = create_singlethread_workqueue(buf);
4989 if (!ha->dpc_thread) {
c2660df3 4990 ql4_printk(KERN_WARNING, ha, "Unable to start DPC thread!\n");
afaf5a2d 4991 ret = -ENODEV;
b3a271a9 4992 goto remove_host;
afaf5a2d 4993 }
c4028958 4994 INIT_WORK(&ha->dpc_work, qla4xxx_do_dpc);
afaf5a2d 4995
b3a271a9
MR
4996 sprintf(buf, "qla4xxx_%lu_task", ha->host_no);
4997 ha->task_wq = alloc_workqueue(buf, WQ_MEM_RECLAIM, 1);
4998 if (!ha->task_wq) {
4999 ql4_printk(KERN_WARNING, ha, "Unable to start task thread!\n");
5000 ret = -ENODEV;
5001 goto remove_host;
5002 }
5003
f4f5df23
VC
5004 /* For ISP-82XX, request_irqs is called in qla4_8xxx_load_risc
5005 * (which is called indirectly by qla4xxx_initialize_adapter),
5006 * so that irqs will be registered after crbinit but before
5007 * mbx_intr_enable.
5008 */
5009 if (!is_qla8022(ha)) {
5010 ret = qla4xxx_request_irqs(ha);
5011 if (ret) {
5012 ql4_printk(KERN_WARNING, ha, "Failed to reserve "
5013 "interrupt %d already in use.\n", pdev->irq);
b3a271a9 5014 goto remove_host;
f4f5df23 5015 }
afaf5a2d 5016 }
afaf5a2d 5017
2232be0d 5018 pci_save_state(ha->pdev);
f4f5df23 5019 ha->isp_ops->enable_intrs(ha);
afaf5a2d
DS
5020
5021 /* Start timer thread. */
5022 qla4xxx_start_timer(ha, qla4xxx_timer, 1);
5023
5024 set_bit(AF_INIT_DONE, &ha->flags);
5025
afaf5a2d
DS
5026 printk(KERN_INFO
5027 " QLogic iSCSI HBA Driver version: %s\n"
5028 " QLogic ISP%04x @ %s, host#=%ld, fw=%02d.%02d.%02d.%02d\n",
5029 qla4xxx_version_str, ha->pdev->device, pci_name(ha->pdev),
5030 ha->host_no, ha->firmware_version[0], ha->firmware_version[1],
5031 ha->patch_number, ha->build_number);
ed1086e0 5032
2a991c21 5033 if (qla4xxx_setup_boot_info(ha))
3573bfb2
VC
5034 ql4_printk(KERN_ERR, ha,
5035 "%s: No iSCSI boot target configured\n", __func__);
2a991c21 5036
13483730
MC
5037 /* Perform the build ddb list and login to each */
5038 qla4xxx_build_ddb_list(ha, INIT_ADAPTER);
5039 iscsi_host_for_each_session(ha->host, qla4xxx_login_flash_ddb);
5040
5041 qla4xxx_create_chap_list(ha);
5042
ed1086e0 5043 qla4xxx_create_ifaces(ha);
afaf5a2d
DS
5044 return 0;
5045
b3a271a9
MR
5046remove_host:
5047 scsi_remove_host(ha->host);
5048
afaf5a2d
DS
5049probe_failed:
5050 qla4xxx_free_adapter(ha);
f4f5df23
VC
5051
5052probe_failed_ioconfig:
2232be0d 5053 pci_disable_pcie_error_reporting(pdev);
afaf5a2d
DS
5054 scsi_host_put(ha->host);
5055
5056probe_disable_device:
5057 pci_disable_device(pdev);
5058
5059 return ret;
5060}
5061
7eece5a0
KH
5062/**
5063 * qla4xxx_prevent_other_port_reinit - prevent other port from re-initialize
5064 * @ha: pointer to adapter structure
5065 *
5066 * Mark the other ISP-4xxx port to indicate that the driver is being removed,
5067 * so that the other port will not re-initialize while in the process of
5068 * removing the ha due to driver unload or hba hotplug.
5069 **/
5070static void qla4xxx_prevent_other_port_reinit(struct scsi_qla_host *ha)
5071{
5072 struct scsi_qla_host *other_ha = NULL;
5073 struct pci_dev *other_pdev = NULL;
5074 int fn = ISP4XXX_PCI_FN_2;
5075
5076 /*iscsi function numbers for ISP4xxx is 1 and 3*/
5077 if (PCI_FUNC(ha->pdev->devfn) & BIT_1)
5078 fn = ISP4XXX_PCI_FN_1;
5079
5080 other_pdev =
5081 pci_get_domain_bus_and_slot(pci_domain_nr(ha->pdev->bus),
5082 ha->pdev->bus->number, PCI_DEVFN(PCI_SLOT(ha->pdev->devfn),
5083 fn));
5084
5085 /* Get other_ha if other_pdev is valid and state is enable*/
5086 if (other_pdev) {
5087 if (atomic_read(&other_pdev->enable_cnt)) {
5088 other_ha = pci_get_drvdata(other_pdev);
5089 if (other_ha) {
5090 set_bit(AF_HA_REMOVAL, &other_ha->flags);
5091 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: "
5092 "Prevent %s reinit\n", __func__,
5093 dev_name(&other_ha->pdev->dev)));
5094 }
5095 }
5096 pci_dev_put(other_pdev);
5097 }
5098}
5099
13483730
MC
5100static void qla4xxx_destroy_fw_ddb_session(struct scsi_qla_host *ha)
5101{
5102 struct ddb_entry *ddb_entry;
5103 int options;
5104 int idx;
5105
5106 for (idx = 0; idx < MAX_DDB_ENTRIES; idx++) {
5107
5108 ddb_entry = qla4xxx_lookup_ddb_by_fw_index(ha, idx);
5109 if ((ddb_entry != NULL) &&
5110 (ddb_entry->ddb_type == FLASH_DDB)) {
5111
5112 options = LOGOUT_OPTION_CLOSE_SESSION;
5113 if (qla4xxx_session_logout_ddb(ha, ddb_entry, options)
5114 == QLA_ERROR)
5115 ql4_printk(KERN_ERR, ha, "%s: Logout failed\n",
5116 __func__);
5117
5118 qla4xxx_clear_ddb_entry(ha, ddb_entry->fw_ddb_index);
5119 /*
5120 * we have decremented the reference count of the driver
5121 * when we setup the session to have the driver unload
5122 * to be seamless without actually destroying the
5123 * session
5124 **/
5125 try_module_get(qla4xxx_iscsi_transport.owner);
5126 iscsi_destroy_endpoint(ddb_entry->conn->ep);
5127 qla4xxx_free_ddb(ha, ddb_entry);
5128 iscsi_session_teardown(ddb_entry->sess);
5129 }
5130 }
5131}
afaf5a2d
DS
5132/**
5133 * qla4xxx_remove_adapter - calback function to remove adapter.
5134 * @pci_dev: PCI device pointer
5135 **/
5136static void __devexit qla4xxx_remove_adapter(struct pci_dev *pdev)
5137{
5138 struct scsi_qla_host *ha;
5139
5140 ha = pci_get_drvdata(pdev);
5141
7eece5a0
KH
5142 if (!is_qla8022(ha))
5143 qla4xxx_prevent_other_port_reinit(ha);
bee4fe8e 5144
ed1086e0
VC
5145 /* destroy iface from sysfs */
5146 qla4xxx_destroy_ifaces(ha);
5147
13483730 5148 if ((!ql4xdisablesysfsboot) && ha->boot_kset)
2a991c21
MR
5149 iscsi_boot_destroy_kset(ha->boot_kset);
5150
13483730
MC
5151 qla4xxx_destroy_fw_ddb_session(ha);
5152
afaf5a2d
DS
5153 scsi_remove_host(ha->host);
5154
5155 qla4xxx_free_adapter(ha);
5156
5157 scsi_host_put(ha->host);
5158
2232be0d 5159 pci_disable_pcie_error_reporting(pdev);
f4f5df23 5160 pci_disable_device(pdev);
afaf5a2d
DS
5161 pci_set_drvdata(pdev, NULL);
5162}
5163
5164/**
5165 * qla4xxx_config_dma_addressing() - Configure OS DMA addressing method.
5166 * @ha: HA context
5167 *
5168 * At exit, the @ha's flags.enable_64bit_addressing set to indicated
5169 * supported addressing method.
5170 */
47975477 5171static void qla4xxx_config_dma_addressing(struct scsi_qla_host *ha)
afaf5a2d
DS
5172{
5173 int retval;
5174
5175 /* Update our PCI device dma_mask for full 64 bit mask */
6a35528a
YH
5176 if (pci_set_dma_mask(ha->pdev, DMA_BIT_MASK(64)) == 0) {
5177 if (pci_set_consistent_dma_mask(ha->pdev, DMA_BIT_MASK(64))) {
afaf5a2d
DS
5178 dev_dbg(&ha->pdev->dev,
5179 "Failed to set 64 bit PCI consistent mask; "
5180 "using 32 bit.\n");
5181 retval = pci_set_consistent_dma_mask(ha->pdev,
284901a9 5182 DMA_BIT_MASK(32));
afaf5a2d
DS
5183 }
5184 } else
284901a9 5185 retval = pci_set_dma_mask(ha->pdev, DMA_BIT_MASK(32));
afaf5a2d
DS
5186}
5187
5188static int qla4xxx_slave_alloc(struct scsi_device *sdev)
5189{
b3a271a9
MR
5190 struct iscsi_cls_session *cls_sess;
5191 struct iscsi_session *sess;
5192 struct ddb_entry *ddb;
8bb4033d 5193 int queue_depth = QL4_DEF_QDEPTH;
afaf5a2d 5194
b3a271a9
MR
5195 cls_sess = starget_to_session(sdev->sdev_target);
5196 sess = cls_sess->dd_data;
5197 ddb = sess->dd_data;
5198
afaf5a2d
DS
5199 sdev->hostdata = ddb;
5200 sdev->tagged_supported = 1;
8bb4033d
VC
5201
5202 if (ql4xmaxqdepth != 0 && ql4xmaxqdepth <= 0xffffU)
5203 queue_depth = ql4xmaxqdepth;
5204
5205 scsi_activate_tcq(sdev, queue_depth);
afaf5a2d
DS
5206 return 0;
5207}
5208
5209static int qla4xxx_slave_configure(struct scsi_device *sdev)
5210{
5211 sdev->tagged_supported = 1;
5212 return 0;
5213}
5214
5215static void qla4xxx_slave_destroy(struct scsi_device *sdev)
5216{
5217 scsi_deactivate_tcq(sdev, 1);
5218}
5219
5220/**
5221 * qla4xxx_del_from_active_array - returns an active srb
5222 * @ha: Pointer to host adapter structure.
fd589a8f 5223 * @index: index into the active_array
afaf5a2d
DS
5224 *
5225 * This routine removes and returns the srb at the specified index
5226 **/
f4f5df23
VC
5227struct srb *qla4xxx_del_from_active_array(struct scsi_qla_host *ha,
5228 uint32_t index)
afaf5a2d
DS
5229{
5230 struct srb *srb = NULL;
5369887a 5231 struct scsi_cmnd *cmd = NULL;
afaf5a2d 5232
5369887a
VC
5233 cmd = scsi_host_find_tag(ha->host, index);
5234 if (!cmd)
afaf5a2d
DS
5235 return srb;
5236
5369887a
VC
5237 srb = (struct srb *)CMD_SP(cmd);
5238 if (!srb)
afaf5a2d
DS
5239 return srb;
5240
5241 /* update counters */
5242 if (srb->flags & SRB_DMA_VALID) {
5243 ha->req_q_count += srb->iocb_cnt;
5244 ha->iocb_cnt -= srb->iocb_cnt;
5245 if (srb->cmd)
5369887a
VC
5246 srb->cmd->host_scribble =
5247 (unsigned char *)(unsigned long) MAX_SRBS;
afaf5a2d
DS
5248 }
5249 return srb;
5250}
5251
afaf5a2d
DS
5252/**
5253 * qla4xxx_eh_wait_on_command - waits for command to be returned by firmware
09a0f719 5254 * @ha: Pointer to host adapter structure.
afaf5a2d
DS
5255 * @cmd: Scsi Command to wait on.
5256 *
5257 * This routine waits for the command to be returned by the Firmware
5258 * for some max time.
5259 **/
5260static int qla4xxx_eh_wait_on_command(struct scsi_qla_host *ha,
5261 struct scsi_cmnd *cmd)
5262{
5263 int done = 0;
5264 struct srb *rp;
5265 uint32_t max_wait_time = EH_WAIT_CMD_TOV;
2232be0d
LC
5266 int ret = SUCCESS;
5267
5268 /* Dont wait on command if PCI error is being handled
5269 * by PCI AER driver
5270 */
5271 if (unlikely(pci_channel_offline(ha->pdev)) ||
5272 (test_bit(AF_EEH_BUSY, &ha->flags))) {
5273 ql4_printk(KERN_WARNING, ha, "scsi%ld: Return from %s\n",
5274 ha->host_no, __func__);
5275 return ret;
5276 }
afaf5a2d
DS
5277
5278 do {
5279 /* Checking to see if its returned to OS */
5369887a 5280 rp = (struct srb *) CMD_SP(cmd);
afaf5a2d
DS
5281 if (rp == NULL) {
5282 done++;
5283 break;
5284 }
5285
5286 msleep(2000);
5287 } while (max_wait_time--);
5288
5289 return done;
5290}
5291
5292/**
5293 * qla4xxx_wait_for_hba_online - waits for HBA to come online
5294 * @ha: Pointer to host adapter structure
5295 **/
5296static int qla4xxx_wait_for_hba_online(struct scsi_qla_host *ha)
5297{
5298 unsigned long wait_online;
5299
f581a3f7 5300 wait_online = jiffies + (HBA_ONLINE_TOV * HZ);
afaf5a2d
DS
5301 while (time_before(jiffies, wait_online)) {
5302
5303 if (adapter_up(ha))
5304 return QLA_SUCCESS;
afaf5a2d
DS
5305
5306 msleep(2000);
5307 }
5308
5309 return QLA_ERROR;
5310}
5311
5312/**
ce545039 5313 * qla4xxx_eh_wait_for_commands - wait for active cmds to finish.
fd589a8f 5314 * @ha: pointer to HBA
afaf5a2d
DS
5315 * @t: target id
5316 * @l: lun id
5317 *
5318 * This function waits for all outstanding commands to a lun to complete. It
5319 * returns 0 if all pending commands are returned and 1 otherwise.
5320 **/
ce545039
MC
5321static int qla4xxx_eh_wait_for_commands(struct scsi_qla_host *ha,
5322 struct scsi_target *stgt,
5323 struct scsi_device *sdev)
afaf5a2d
DS
5324{
5325 int cnt;
5326 int status = 0;
5327 struct scsi_cmnd *cmd;
5328
5329 /*
ce545039
MC
5330 * Waiting for all commands for the designated target or dev
5331 * in the active array
afaf5a2d
DS
5332 */
5333 for (cnt = 0; cnt < ha->host->can_queue; cnt++) {
5334 cmd = scsi_host_find_tag(ha->host, cnt);
ce545039
MC
5335 if (cmd && stgt == scsi_target(cmd->device) &&
5336 (!sdev || sdev == cmd->device)) {
afaf5a2d
DS
5337 if (!qla4xxx_eh_wait_on_command(ha, cmd)) {
5338 status++;
5339 break;
5340 }
5341 }
5342 }
5343 return status;
5344}
5345
09a0f719
VC
5346/**
5347 * qla4xxx_eh_abort - callback for abort task.
5348 * @cmd: Pointer to Linux's SCSI command structure
5349 *
5350 * This routine is called by the Linux OS to abort the specified
5351 * command.
5352 **/
5353static int qla4xxx_eh_abort(struct scsi_cmnd *cmd)
5354{
5355 struct scsi_qla_host *ha = to_qla_host(cmd->device->host);
5356 unsigned int id = cmd->device->id;
5357 unsigned int lun = cmd->device->lun;
92b3e5bb 5358 unsigned long flags;
09a0f719
VC
5359 struct srb *srb = NULL;
5360 int ret = SUCCESS;
5361 int wait = 0;
5362
c2660df3 5363 ql4_printk(KERN_INFO, ha,
5cd049a5
CH
5364 "scsi%ld:%d:%d: Abort command issued cmd=%p\n",
5365 ha->host_no, id, lun, cmd);
09a0f719 5366
92b3e5bb 5367 spin_lock_irqsave(&ha->hardware_lock, flags);
09a0f719 5368 srb = (struct srb *) CMD_SP(cmd);
92b3e5bb
MC
5369 if (!srb) {
5370 spin_unlock_irqrestore(&ha->hardware_lock, flags);
09a0f719 5371 return SUCCESS;
92b3e5bb 5372 }
09a0f719 5373 kref_get(&srb->srb_ref);
92b3e5bb 5374 spin_unlock_irqrestore(&ha->hardware_lock, flags);
09a0f719
VC
5375
5376 if (qla4xxx_abort_task(ha, srb) != QLA_SUCCESS) {
5377 DEBUG3(printk("scsi%ld:%d:%d: Abort_task mbx failed.\n",
5378 ha->host_no, id, lun));
5379 ret = FAILED;
5380 } else {
5381 DEBUG3(printk("scsi%ld:%d:%d: Abort_task mbx success.\n",
5382 ha->host_no, id, lun));
5383 wait = 1;
5384 }
5385
5386 kref_put(&srb->srb_ref, qla4xxx_srb_compl);
5387
5388 /* Wait for command to complete */
5389 if (wait) {
5390 if (!qla4xxx_eh_wait_on_command(ha, cmd)) {
5391 DEBUG2(printk("scsi%ld:%d:%d: Abort handler timed out\n",
5392 ha->host_no, id, lun));
5393 ret = FAILED;
5394 }
5395 }
5396
c2660df3 5397 ql4_printk(KERN_INFO, ha,
09a0f719 5398 "scsi%ld:%d:%d: Abort command - %s\n",
25985edc 5399 ha->host_no, id, lun, (ret == SUCCESS) ? "succeeded" : "failed");
09a0f719
VC
5400
5401 return ret;
5402}
5403
afaf5a2d
DS
5404/**
5405 * qla4xxx_eh_device_reset - callback for target reset.
5406 * @cmd: Pointer to Linux's SCSI command structure
5407 *
5408 * This routine is called by the Linux OS to reset all luns on the
5409 * specified target.
5410 **/
5411static int qla4xxx_eh_device_reset(struct scsi_cmnd *cmd)
5412{
5413 struct scsi_qla_host *ha = to_qla_host(cmd->device->host);
5414 struct ddb_entry *ddb_entry = cmd->device->hostdata;
afaf5a2d
DS
5415 int ret = FAILED, stat;
5416
612f7348 5417 if (!ddb_entry)
afaf5a2d
DS
5418 return ret;
5419
c01be6dc
MC
5420 ret = iscsi_block_scsi_eh(cmd);
5421 if (ret)
5422 return ret;
5423 ret = FAILED;
5424
c2660df3 5425 ql4_printk(KERN_INFO, ha,
afaf5a2d
DS
5426 "scsi%ld:%d:%d:%d: DEVICE RESET ISSUED.\n", ha->host_no,
5427 cmd->device->channel, cmd->device->id, cmd->device->lun);
5428
5429 DEBUG2(printk(KERN_INFO
5430 "scsi%ld: DEVICE_RESET cmd=%p jiffies = 0x%lx, to=%x,"
5431 "dpc_flags=%lx, status=%x allowed=%d\n", ha->host_no,
242f9dcb 5432 cmd, jiffies, cmd->request->timeout / HZ,
afaf5a2d
DS
5433 ha->dpc_flags, cmd->result, cmd->allowed));
5434
5435 /* FIXME: wait for hba to go online */
5436 stat = qla4xxx_reset_lun(ha, ddb_entry, cmd->device->lun);
5437 if (stat != QLA_SUCCESS) {
c2660df3 5438 ql4_printk(KERN_INFO, ha, "DEVICE RESET FAILED. %d\n", stat);
afaf5a2d
DS
5439 goto eh_dev_reset_done;
5440 }
5441
ce545039
MC
5442 if (qla4xxx_eh_wait_for_commands(ha, scsi_target(cmd->device),
5443 cmd->device)) {
c2660df3 5444 ql4_printk(KERN_INFO, ha,
ce545039
MC
5445 "DEVICE RESET FAILED - waiting for "
5446 "commands.\n");
5447 goto eh_dev_reset_done;
afaf5a2d
DS
5448 }
5449
9d562913
DS
5450 /* Send marker. */
5451 if (qla4xxx_send_marker_iocb(ha, ddb_entry, cmd->device->lun,
5452 MM_LUN_RESET) != QLA_SUCCESS)
5453 goto eh_dev_reset_done;
5454
c2660df3 5455 ql4_printk(KERN_INFO, ha,
afaf5a2d
DS
5456 "scsi(%ld:%d:%d:%d): DEVICE RESET SUCCEEDED.\n",
5457 ha->host_no, cmd->device->channel, cmd->device->id,
5458 cmd->device->lun);
5459
5460 ret = SUCCESS;
5461
5462eh_dev_reset_done:
5463
5464 return ret;
5465}
5466
ce545039
MC
5467/**
5468 * qla4xxx_eh_target_reset - callback for target reset.
5469 * @cmd: Pointer to Linux's SCSI command structure
5470 *
5471 * This routine is called by the Linux OS to reset the target.
5472 **/
5473static int qla4xxx_eh_target_reset(struct scsi_cmnd *cmd)
5474{
5475 struct scsi_qla_host *ha = to_qla_host(cmd->device->host);
5476 struct ddb_entry *ddb_entry = cmd->device->hostdata;
c01be6dc 5477 int stat, ret;
ce545039
MC
5478
5479 if (!ddb_entry)
5480 return FAILED;
5481
c01be6dc
MC
5482 ret = iscsi_block_scsi_eh(cmd);
5483 if (ret)
5484 return ret;
5485
ce545039
MC
5486 starget_printk(KERN_INFO, scsi_target(cmd->device),
5487 "WARM TARGET RESET ISSUED.\n");
5488
5489 DEBUG2(printk(KERN_INFO
5490 "scsi%ld: TARGET_DEVICE_RESET cmd=%p jiffies = 0x%lx, "
5491 "to=%x,dpc_flags=%lx, status=%x allowed=%d\n",
242f9dcb 5492 ha->host_no, cmd, jiffies, cmd->request->timeout / HZ,
ce545039
MC
5493 ha->dpc_flags, cmd->result, cmd->allowed));
5494
5495 stat = qla4xxx_reset_target(ha, ddb_entry);
5496 if (stat != QLA_SUCCESS) {
5497 starget_printk(KERN_INFO, scsi_target(cmd->device),
5498 "WARM TARGET RESET FAILED.\n");
5499 return FAILED;
5500 }
5501
ce545039
MC
5502 if (qla4xxx_eh_wait_for_commands(ha, scsi_target(cmd->device),
5503 NULL)) {
5504 starget_printk(KERN_INFO, scsi_target(cmd->device),
5505 "WARM TARGET DEVICE RESET FAILED - "
5506 "waiting for commands.\n");
5507 return FAILED;
5508 }
5509
9d562913
DS
5510 /* Send marker. */
5511 if (qla4xxx_send_marker_iocb(ha, ddb_entry, cmd->device->lun,
5512 MM_TGT_WARM_RESET) != QLA_SUCCESS) {
5513 starget_printk(KERN_INFO, scsi_target(cmd->device),
5514 "WARM TARGET DEVICE RESET FAILED - "
5515 "marker iocb failed.\n");
5516 return FAILED;
5517 }
5518
ce545039
MC
5519 starget_printk(KERN_INFO, scsi_target(cmd->device),
5520 "WARM TARGET RESET SUCCEEDED.\n");
5521 return SUCCESS;
5522}
5523
8a288960
SR
5524/**
5525 * qla4xxx_is_eh_active - check if error handler is running
5526 * @shost: Pointer to SCSI Host struct
5527 *
5528 * This routine finds that if reset host is called in EH
5529 * scenario or from some application like sg_reset
5530 **/
5531static int qla4xxx_is_eh_active(struct Scsi_Host *shost)
5532{
5533 if (shost->shost_state == SHOST_RECOVERY)
5534 return 1;
5535 return 0;
5536}
5537
afaf5a2d
DS
5538/**
5539 * qla4xxx_eh_host_reset - kernel callback
5540 * @cmd: Pointer to Linux's SCSI command structure
5541 *
5542 * This routine is invoked by the Linux kernel to perform fatal error
5543 * recovery on the specified adapter.
5544 **/
5545static int qla4xxx_eh_host_reset(struct scsi_cmnd *cmd)
5546{
5547 int return_status = FAILED;
5548 struct scsi_qla_host *ha;
5549
b3a271a9 5550 ha = to_qla_host(cmd->device->host);
afaf5a2d 5551
f4f5df23
VC
5552 if (ql4xdontresethba) {
5553 DEBUG2(printk("scsi%ld: %s: Don't Reset HBA\n",
5554 ha->host_no, __func__));
8a288960
SR
5555
5556 /* Clear outstanding srb in queues */
5557 if (qla4xxx_is_eh_active(cmd->device->host))
5558 qla4xxx_abort_active_cmds(ha, DID_ABORT << 16);
5559
f4f5df23
VC
5560 return FAILED;
5561 }
5562
c2660df3 5563 ql4_printk(KERN_INFO, ha,
dca05c4c 5564 "scsi(%ld:%d:%d:%d): HOST RESET ISSUED.\n", ha->host_no,
afaf5a2d
DS
5565 cmd->device->channel, cmd->device->id, cmd->device->lun);
5566
5567 if (qla4xxx_wait_for_hba_online(ha) != QLA_SUCCESS) {
5568 DEBUG2(printk("scsi%ld:%d: %s: Unable to reset host. Adapter "
5569 "DEAD.\n", ha->host_no, cmd->device->channel,
5570 __func__));
5571
5572 return FAILED;
5573 }
5574
f4f5df23
VC
5575 if (!test_bit(DPC_RESET_HA, &ha->dpc_flags)) {
5576 if (is_qla8022(ha))
5577 set_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags);
5578 else
5579 set_bit(DPC_RESET_HA, &ha->dpc_flags);
5580 }
50a29aec 5581
f4f5df23 5582 if (qla4xxx_recover_adapter(ha) == QLA_SUCCESS)
afaf5a2d 5583 return_status = SUCCESS;
afaf5a2d 5584
c2660df3 5585 ql4_printk(KERN_INFO, ha, "HOST RESET %s.\n",
25985edc 5586 return_status == FAILED ? "FAILED" : "SUCCEEDED");
afaf5a2d
DS
5587
5588 return return_status;
5589}
5590
95d31262
VC
5591static int qla4xxx_context_reset(struct scsi_qla_host *ha)
5592{
5593 uint32_t mbox_cmd[MBOX_REG_COUNT];
5594 uint32_t mbox_sts[MBOX_REG_COUNT];
5595 struct addr_ctrl_blk_def *acb = NULL;
5596 uint32_t acb_len = sizeof(struct addr_ctrl_blk_def);
5597 int rval = QLA_SUCCESS;
5598 dma_addr_t acb_dma;
5599
5600 acb = dma_alloc_coherent(&ha->pdev->dev,
5601 sizeof(struct addr_ctrl_blk_def),
5602 &acb_dma, GFP_KERNEL);
5603 if (!acb) {
5604 ql4_printk(KERN_ERR, ha, "%s: Unable to alloc acb\n",
5605 __func__);
5606 rval = -ENOMEM;
5607 goto exit_port_reset;
5608 }
5609
5610 memset(acb, 0, acb_len);
5611
5612 rval = qla4xxx_get_acb(ha, acb_dma, PRIMARI_ACB, acb_len);
5613 if (rval != QLA_SUCCESS) {
5614 rval = -EIO;
5615 goto exit_free_acb;
5616 }
5617
5618 rval = qla4xxx_disable_acb(ha);
5619 if (rval != QLA_SUCCESS) {
5620 rval = -EIO;
5621 goto exit_free_acb;
5622 }
5623
5624 wait_for_completion_timeout(&ha->disable_acb_comp,
5625 DISABLE_ACB_TOV * HZ);
5626
5627 rval = qla4xxx_set_acb(ha, &mbox_cmd[0], &mbox_sts[0], acb_dma);
5628 if (rval != QLA_SUCCESS) {
5629 rval = -EIO;
5630 goto exit_free_acb;
5631 }
5632
5633exit_free_acb:
5634 dma_free_coherent(&ha->pdev->dev, sizeof(struct addr_ctrl_blk_def),
5635 acb, acb_dma);
5636exit_port_reset:
5637 DEBUG2(ql4_printk(KERN_INFO, ha, "%s %s\n", __func__,
5638 rval == QLA_SUCCESS ? "SUCCEEDED" : "FAILED"));
5639 return rval;
5640}
5641
5642static int qla4xxx_host_reset(struct Scsi_Host *shost, int reset_type)
5643{
5644 struct scsi_qla_host *ha = to_qla_host(shost);
5645 int rval = QLA_SUCCESS;
5646
5647 if (ql4xdontresethba) {
5648 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: Don't Reset HBA\n",
5649 __func__));
5650 rval = -EPERM;
5651 goto exit_host_reset;
5652 }
5653
5654 rval = qla4xxx_wait_for_hba_online(ha);
5655 if (rval != QLA_SUCCESS) {
5656 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: Unable to reset host "
5657 "adapter\n", __func__));
5658 rval = -EIO;
5659 goto exit_host_reset;
5660 }
5661
5662 if (test_bit(DPC_RESET_HA, &ha->dpc_flags))
5663 goto recover_adapter;
5664
5665 switch (reset_type) {
5666 case SCSI_ADAPTER_RESET:
5667 set_bit(DPC_RESET_HA, &ha->dpc_flags);
5668 break;
5669 case SCSI_FIRMWARE_RESET:
5670 if (!test_bit(DPC_RESET_HA, &ha->dpc_flags)) {
5671 if (is_qla8022(ha))
5672 /* set firmware context reset */
5673 set_bit(DPC_RESET_HA_FW_CONTEXT,
5674 &ha->dpc_flags);
5675 else {
5676 rval = qla4xxx_context_reset(ha);
5677 goto exit_host_reset;
5678 }
5679 }
5680 break;
5681 }
5682
5683recover_adapter:
5684 rval = qla4xxx_recover_adapter(ha);
5685 if (rval != QLA_SUCCESS) {
5686 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: recover adapter fail\n",
5687 __func__));
5688 rval = -EIO;
5689 }
5690
5691exit_host_reset:
5692 return rval;
5693}
5694
2232be0d
LC
5695/* PCI AER driver recovers from all correctable errors w/o
5696 * driver intervention. For uncorrectable errors PCI AER
5697 * driver calls the following device driver's callbacks
5698 *
5699 * - Fatal Errors - link_reset
5700 * - Non-Fatal Errors - driver's pci_error_detected() which
5701 * returns CAN_RECOVER, NEED_RESET or DISCONNECT.
5702 *
5703 * PCI AER driver calls
5704 * CAN_RECOVER - driver's pci_mmio_enabled(), mmio_enabled
5705 * returns RECOVERED or NEED_RESET if fw_hung
5706 * NEED_RESET - driver's slot_reset()
5707 * DISCONNECT - device is dead & cannot recover
5708 * RECOVERED - driver's pci_resume()
5709 */
5710static pci_ers_result_t
5711qla4xxx_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
5712{
5713 struct scsi_qla_host *ha = pci_get_drvdata(pdev);
5714
5715 ql4_printk(KERN_WARNING, ha, "scsi%ld: %s: error detected:state %x\n",
5716 ha->host_no, __func__, state);
5717
5718 if (!is_aer_supported(ha))
5719 return PCI_ERS_RESULT_NONE;
5720
5721 switch (state) {
5722 case pci_channel_io_normal:
5723 clear_bit(AF_EEH_BUSY, &ha->flags);
5724 return PCI_ERS_RESULT_CAN_RECOVER;
5725 case pci_channel_io_frozen:
5726 set_bit(AF_EEH_BUSY, &ha->flags);
5727 qla4xxx_mailbox_premature_completion(ha);
5728 qla4xxx_free_irqs(ha);
5729 pci_disable_device(pdev);
7b3595df
VC
5730 /* Return back all IOs */
5731 qla4xxx_abort_active_cmds(ha, DID_RESET << 16);
2232be0d
LC
5732 return PCI_ERS_RESULT_NEED_RESET;
5733 case pci_channel_io_perm_failure:
5734 set_bit(AF_EEH_BUSY, &ha->flags);
5735 set_bit(AF_PCI_CHANNEL_IO_PERM_FAILURE, &ha->flags);
5736 qla4xxx_abort_active_cmds(ha, DID_NO_CONNECT << 16);
5737 return PCI_ERS_RESULT_DISCONNECT;
5738 }
5739 return PCI_ERS_RESULT_NEED_RESET;
5740}
5741
5742/**
5743 * qla4xxx_pci_mmio_enabled() gets called if
5744 * qla4xxx_pci_error_detected() returns PCI_ERS_RESULT_CAN_RECOVER
5745 * and read/write to the device still works.
5746 **/
5747static pci_ers_result_t
5748qla4xxx_pci_mmio_enabled(struct pci_dev *pdev)
5749{
5750 struct scsi_qla_host *ha = pci_get_drvdata(pdev);
5751
5752 if (!is_aer_supported(ha))
5753 return PCI_ERS_RESULT_NONE;
5754
7b3595df 5755 return PCI_ERS_RESULT_RECOVERED;
2232be0d
LC
5756}
5757
7b3595df 5758static uint32_t qla4_8xxx_error_recovery(struct scsi_qla_host *ha)
2232be0d
LC
5759{
5760 uint32_t rval = QLA_ERROR;
7b3595df 5761 uint32_t ret = 0;
2232be0d
LC
5762 int fn;
5763 struct pci_dev *other_pdev = NULL;
5764
5765 ql4_printk(KERN_WARNING, ha, "scsi%ld: In %s\n", ha->host_no, __func__);
5766
5767 set_bit(DPC_RESET_ACTIVE, &ha->dpc_flags);
5768
5769 if (test_bit(AF_ONLINE, &ha->flags)) {
5770 clear_bit(AF_ONLINE, &ha->flags);
b3a271a9
MR
5771 clear_bit(AF_LINK_UP, &ha->flags);
5772 iscsi_host_for_each_session(ha->host, qla4xxx_fail_session);
2232be0d 5773 qla4xxx_process_aen(ha, FLUSH_DDB_CHANGED_AENS);
2232be0d
LC
5774 }
5775
5776 fn = PCI_FUNC(ha->pdev->devfn);
5777 while (fn > 0) {
5778 fn--;
5779 ql4_printk(KERN_INFO, ha, "scsi%ld: %s: Finding PCI device at "
5780 "func %x\n", ha->host_no, __func__, fn);
5781 /* Get the pci device given the domain, bus,
5782 * slot/function number */
5783 other_pdev =
5784 pci_get_domain_bus_and_slot(pci_domain_nr(ha->pdev->bus),
5785 ha->pdev->bus->number, PCI_DEVFN(PCI_SLOT(ha->pdev->devfn),
5786 fn));
5787
5788 if (!other_pdev)
5789 continue;
5790
5791 if (atomic_read(&other_pdev->enable_cnt)) {
5792 ql4_printk(KERN_INFO, ha, "scsi%ld: %s: Found PCI "
5793 "func in enabled state%x\n", ha->host_no,
5794 __func__, fn);
5795 pci_dev_put(other_pdev);
5796 break;
5797 }
5798 pci_dev_put(other_pdev);
5799 }
5800
5801 /* The first function on the card, the reset owner will
5802 * start & initialize the firmware. The other functions
5803 * on the card will reset the firmware context
5804 */
5805 if (!fn) {
5806 ql4_printk(KERN_INFO, ha, "scsi%ld: %s: devfn being reset "
5807 "0x%x is the owner\n", ha->host_no, __func__,
5808 ha->pdev->devfn);
5809
5810 qla4_8xxx_idc_lock(ha);
5811 qla4_8xxx_wr_32(ha, QLA82XX_CRB_DEV_STATE,
5812 QLA82XX_DEV_COLD);
5813
5814 qla4_8xxx_wr_32(ha, QLA82XX_CRB_DRV_IDC_VERSION,
5815 QLA82XX_IDC_VERSION);
5816
5817 qla4_8xxx_idc_unlock(ha);
5818 clear_bit(AF_FW_RECOVERY, &ha->flags);
13483730 5819 rval = qla4xxx_initialize_adapter(ha, RESET_ADAPTER);
2232be0d
LC
5820 qla4_8xxx_idc_lock(ha);
5821
5822 if (rval != QLA_SUCCESS) {
5823 ql4_printk(KERN_INFO, ha, "scsi%ld: %s: HW State: "
5824 "FAILED\n", ha->host_no, __func__);
5825 qla4_8xxx_clear_drv_active(ha);
5826 qla4_8xxx_wr_32(ha, QLA82XX_CRB_DEV_STATE,
5827 QLA82XX_DEV_FAILED);
5828 } else {
5829 ql4_printk(KERN_INFO, ha, "scsi%ld: %s: HW State: "
5830 "READY\n", ha->host_no, __func__);
5831 qla4_8xxx_wr_32(ha, QLA82XX_CRB_DEV_STATE,
5832 QLA82XX_DEV_READY);
5833 /* Clear driver state register */
5834 qla4_8xxx_wr_32(ha, QLA82XX_CRB_DRV_STATE, 0);
5835 qla4_8xxx_set_drv_active(ha);
7b3595df
VC
5836 ret = qla4xxx_request_irqs(ha);
5837 if (ret) {
5838 ql4_printk(KERN_WARNING, ha, "Failed to "
5839 "reserve interrupt %d already in use.\n",
5840 ha->pdev->irq);
5841 rval = QLA_ERROR;
5842 } else {
5843 ha->isp_ops->enable_intrs(ha);
5844 rval = QLA_SUCCESS;
5845 }
2232be0d
LC
5846 }
5847 qla4_8xxx_idc_unlock(ha);
5848 } else {
5849 ql4_printk(KERN_INFO, ha, "scsi%ld: %s: devfn 0x%x is not "
5850 "the reset owner\n", ha->host_no, __func__,
5851 ha->pdev->devfn);
5852 if ((qla4_8xxx_rd_32(ha, QLA82XX_CRB_DEV_STATE) ==
5853 QLA82XX_DEV_READY)) {
5854 clear_bit(AF_FW_RECOVERY, &ha->flags);
13483730 5855 rval = qla4xxx_initialize_adapter(ha, RESET_ADAPTER);
7b3595df
VC
5856 if (rval == QLA_SUCCESS) {
5857 ret = qla4xxx_request_irqs(ha);
5858 if (ret) {
5859 ql4_printk(KERN_WARNING, ha, "Failed to"
5860 " reserve interrupt %d already in"
5861 " use.\n", ha->pdev->irq);
5862 rval = QLA_ERROR;
5863 } else {
5864 ha->isp_ops->enable_intrs(ha);
5865 rval = QLA_SUCCESS;
5866 }
5867 }
2232be0d
LC
5868 qla4_8xxx_idc_lock(ha);
5869 qla4_8xxx_set_drv_active(ha);
5870 qla4_8xxx_idc_unlock(ha);
5871 }
5872 }
5873 clear_bit(DPC_RESET_ACTIVE, &ha->dpc_flags);
5874 return rval;
5875}
5876
5877static pci_ers_result_t
5878qla4xxx_pci_slot_reset(struct pci_dev *pdev)
5879{
5880 pci_ers_result_t ret = PCI_ERS_RESULT_DISCONNECT;
5881 struct scsi_qla_host *ha = pci_get_drvdata(pdev);
5882 int rc;
5883
5884 ql4_printk(KERN_WARNING, ha, "scsi%ld: %s: slot_reset\n",
5885 ha->host_no, __func__);
5886
5887 if (!is_aer_supported(ha))
5888 return PCI_ERS_RESULT_NONE;
5889
5890 /* Restore the saved state of PCIe device -
5891 * BAR registers, PCI Config space, PCIX, MSI,
5892 * IOV states
5893 */
5894 pci_restore_state(pdev);
5895
5896 /* pci_restore_state() clears the saved_state flag of the device
5897 * save restored state which resets saved_state flag
5898 */
5899 pci_save_state(pdev);
5900
5901 /* Initialize device or resume if in suspended state */
5902 rc = pci_enable_device(pdev);
5903 if (rc) {
25985edc 5904 ql4_printk(KERN_WARNING, ha, "scsi%ld: %s: Can't re-enable "
2232be0d
LC
5905 "device after reset\n", ha->host_no, __func__);
5906 goto exit_slot_reset;
5907 }
5908
7b3595df 5909 ha->isp_ops->disable_intrs(ha);
2232be0d
LC
5910
5911 if (is_qla8022(ha)) {
5912 if (qla4_8xxx_error_recovery(ha) == QLA_SUCCESS) {
5913 ret = PCI_ERS_RESULT_RECOVERED;
5914 goto exit_slot_reset;
5915 } else
5916 goto exit_slot_reset;
5917 }
5918
5919exit_slot_reset:
5920 ql4_printk(KERN_WARNING, ha, "scsi%ld: %s: Return=%x\n"
5921 "device after reset\n", ha->host_no, __func__, ret);
5922 return ret;
5923}
5924
5925static void
5926qla4xxx_pci_resume(struct pci_dev *pdev)
5927{
5928 struct scsi_qla_host *ha = pci_get_drvdata(pdev);
5929 int ret;
5930
5931 ql4_printk(KERN_WARNING, ha, "scsi%ld: %s: pci_resume\n",
5932 ha->host_no, __func__);
5933
5934 ret = qla4xxx_wait_for_hba_online(ha);
5935 if (ret != QLA_SUCCESS) {
5936 ql4_printk(KERN_ERR, ha, "scsi%ld: %s: the device failed to "
5937 "resume I/O from slot/link_reset\n", ha->host_no,
5938 __func__);
5939 }
5940
5941 pci_cleanup_aer_uncorrect_error_status(pdev);
5942 clear_bit(AF_EEH_BUSY, &ha->flags);
5943}
5944
5945static struct pci_error_handlers qla4xxx_err_handler = {
5946 .error_detected = qla4xxx_pci_error_detected,
5947 .mmio_enabled = qla4xxx_pci_mmio_enabled,
5948 .slot_reset = qla4xxx_pci_slot_reset,
5949 .resume = qla4xxx_pci_resume,
5950};
5951
afaf5a2d
DS
5952static struct pci_device_id qla4xxx_pci_tbl[] = {
5953 {
5954 .vendor = PCI_VENDOR_ID_QLOGIC,
5955 .device = PCI_DEVICE_ID_QLOGIC_ISP4010,
5956 .subvendor = PCI_ANY_ID,
5957 .subdevice = PCI_ANY_ID,
5958 },
5959 {
5960 .vendor = PCI_VENDOR_ID_QLOGIC,
5961 .device = PCI_DEVICE_ID_QLOGIC_ISP4022,
5962 .subvendor = PCI_ANY_ID,
5963 .subdevice = PCI_ANY_ID,
5964 },
d915058f
DS
5965 {
5966 .vendor = PCI_VENDOR_ID_QLOGIC,
5967 .device = PCI_DEVICE_ID_QLOGIC_ISP4032,
5968 .subvendor = PCI_ANY_ID,
5969 .subdevice = PCI_ANY_ID,
5970 },
f4f5df23
VC
5971 {
5972 .vendor = PCI_VENDOR_ID_QLOGIC,
5973 .device = PCI_DEVICE_ID_QLOGIC_ISP8022,
5974 .subvendor = PCI_ANY_ID,
5975 .subdevice = PCI_ANY_ID,
5976 },
afaf5a2d
DS
5977 {0, 0},
5978};
5979MODULE_DEVICE_TABLE(pci, qla4xxx_pci_tbl);
5980
47975477 5981static struct pci_driver qla4xxx_pci_driver = {
afaf5a2d
DS
5982 .name = DRIVER_NAME,
5983 .id_table = qla4xxx_pci_tbl,
5984 .probe = qla4xxx_probe_adapter,
5985 .remove = qla4xxx_remove_adapter,
2232be0d 5986 .err_handler = &qla4xxx_err_handler,
afaf5a2d
DS
5987};
5988
5989static int __init qla4xxx_module_init(void)
5990{
5991 int ret;
5992
5993 /* Allocate cache for SRBs. */
5994 srb_cachep = kmem_cache_create("qla4xxx_srbs", sizeof(struct srb), 0,
20c2df83 5995 SLAB_HWCACHE_ALIGN, NULL);
afaf5a2d
DS
5996 if (srb_cachep == NULL) {
5997 printk(KERN_ERR
5998 "%s: Unable to allocate SRB cache..."
5999 "Failing load!\n", DRIVER_NAME);
6000 ret = -ENOMEM;
6001 goto no_srp_cache;
6002 }
6003
6004 /* Derive version string. */
6005 strcpy(qla4xxx_version_str, QLA4XXX_DRIVER_VERSION);
11010fec 6006 if (ql4xextended_error_logging)
afaf5a2d
DS
6007 strcat(qla4xxx_version_str, "-debug");
6008
6009 qla4xxx_scsi_transport =
6010 iscsi_register_transport(&qla4xxx_iscsi_transport);
6011 if (!qla4xxx_scsi_transport){
6012 ret = -ENODEV;
6013 goto release_srb_cache;
6014 }
6015
afaf5a2d
DS
6016 ret = pci_register_driver(&qla4xxx_pci_driver);
6017 if (ret)
6018 goto unregister_transport;
6019
6020 printk(KERN_INFO "QLogic iSCSI HBA Driver\n");
6021 return 0;
5ae16db3 6022
afaf5a2d
DS
6023unregister_transport:
6024 iscsi_unregister_transport(&qla4xxx_iscsi_transport);
6025release_srb_cache:
6026 kmem_cache_destroy(srb_cachep);
6027no_srp_cache:
6028 return ret;
6029}
6030
6031static void __exit qla4xxx_module_exit(void)
6032{
6033 pci_unregister_driver(&qla4xxx_pci_driver);
6034 iscsi_unregister_transport(&qla4xxx_iscsi_transport);
6035 kmem_cache_destroy(srb_cachep);
6036}
6037
6038module_init(qla4xxx_module_init);
6039module_exit(qla4xxx_module_exit);
6040
6041MODULE_AUTHOR("QLogic Corporation");
6042MODULE_DESCRIPTION("QLogic iSCSI HBA Driver");
6043MODULE_LICENSE("GPL");
6044MODULE_VERSION(QLA4XXX_DRIVER_VERSION);