]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - drivers/scsi/qla4xxx/ql4_os.c
[SCSI] qla4xxx: Fixed target discovery failed issue.
[mirror_ubuntu-bionic-kernel.git] / drivers / scsi / qla4xxx / ql4_os.c
CommitLineData
afaf5a2d
DS
1/*
2 * QLogic iSCSI HBA Driver
7d01d069 3 * Copyright (c) 2003-2010 QLogic Corporation
afaf5a2d
DS
4 *
5 * See LICENSE.qla4xxx for copyright and licensing details.
6 */
7#include <linux/moduleparam.h>
5a0e3ad6 8#include <linux/slab.h>
2a991c21
MR
9#include <linux/blkdev.h>
10#include <linux/iscsi_boot_sysfs.h>
afaf5a2d
DS
11
12#include <scsi/scsi_tcq.h>
13#include <scsi/scsicam.h>
14
15#include "ql4_def.h"
bee4fe8e
DS
16#include "ql4_version.h"
17#include "ql4_glbl.h"
18#include "ql4_dbg.h"
19#include "ql4_inline.h"
afaf5a2d
DS
20
21/*
22 * Driver version
23 */
47975477 24static char qla4xxx_version_str[40];
afaf5a2d
DS
25
26/*
27 * SRB allocation cache
28 */
e18b890b 29static struct kmem_cache *srb_cachep;
afaf5a2d
DS
30
31/*
32 * Module parameter information and variables
33 */
afaf5a2d 34int ql4xdontresethba = 0;
f4f5df23 35module_param(ql4xdontresethba, int, S_IRUGO | S_IWUSR);
afaf5a2d 36MODULE_PARM_DESC(ql4xdontresethba,
f4f5df23
VC
37 "Don't reset the HBA for driver recovery \n"
38 " 0 - It will reset HBA (Default)\n"
39 " 1 - It will NOT reset HBA");
afaf5a2d 40
11010fec 41int ql4xextended_error_logging = 0; /* 0 = off, 1 = log errors */
f4f5df23 42module_param(ql4xextended_error_logging, int, S_IRUGO | S_IWUSR);
11010fec 43MODULE_PARM_DESC(ql4xextended_error_logging,
afaf5a2d
DS
44 "Option to enable extended error logging, "
45 "Default is 0 - no logging, 1 - debug logging");
46
f4f5df23
VC
47int ql4xenablemsix = 1;
48module_param(ql4xenablemsix, int, S_IRUGO|S_IWUSR);
49MODULE_PARM_DESC(ql4xenablemsix,
50 "Set to enable MSI or MSI-X interrupt mechanism.\n"
51 " 0 = enable INTx interrupt mechanism.\n"
52 " 1 = enable MSI-X interrupt mechanism (Default).\n"
53 " 2 = enable MSI interrupt mechanism.");
477ffb9d 54
d510d965 55#define QL4_DEF_QDEPTH 32
8bb4033d
VC
56static int ql4xmaxqdepth = QL4_DEF_QDEPTH;
57module_param(ql4xmaxqdepth, int, S_IRUGO | S_IWUSR);
58MODULE_PARM_DESC(ql4xmaxqdepth,
59 "Maximum queue depth to report for target devices.\n"
60 " Default: 32.");
d510d965 61
3038727c
VC
62static int ql4xsess_recovery_tmo = QL4_SESS_RECOVERY_TMO;
63module_param(ql4xsess_recovery_tmo, int, S_IRUGO);
64MODULE_PARM_DESC(ql4xsess_recovery_tmo,
65 "Target Session Recovery Timeout.\n"
66 " Default: 30 sec.");
67
b3a271a9 68static int qla4xxx_wait_for_hba_online(struct scsi_qla_host *ha);
afaf5a2d
DS
69/*
70 * SCSI host template entry points
71 */
47975477 72static void qla4xxx_config_dma_addressing(struct scsi_qla_host *ha);
afaf5a2d
DS
73
74/*
75 * iSCSI template entry points
76 */
afaf5a2d
DS
77static int qla4xxx_conn_get_param(struct iscsi_cls_conn *conn,
78 enum iscsi_param param, char *buf);
aa1e93a2
MC
79static int qla4xxx_host_get_param(struct Scsi_Host *shost,
80 enum iscsi_host_param param, char *buf);
d00efe3f
MC
81static int qla4xxx_iface_set_param(struct Scsi_Host *shost, char *data,
82 int count);
ed1086e0
VC
83static int qla4xxx_get_iface_param(struct iscsi_iface *iface,
84 enum iscsi_param_type param_type,
85 int param, char *buf);
5c656af7 86static enum blk_eh_timer_return qla4xxx_eh_cmd_timed_out(struct scsi_cmnd *sc);
b3a271a9
MR
87static struct iscsi_endpoint *qla4xxx_ep_connect(struct Scsi_Host *shost,
88 struct sockaddr *dst_addr,
89 int non_blocking);
90static int qla4xxx_ep_poll(struct iscsi_endpoint *ep, int timeout_ms);
91static void qla4xxx_ep_disconnect(struct iscsi_endpoint *ep);
92static int qla4xxx_get_ep_param(struct iscsi_endpoint *ep,
93 enum iscsi_param param, char *buf);
94static int qla4xxx_conn_start(struct iscsi_cls_conn *conn);
95static struct iscsi_cls_conn *
96qla4xxx_conn_create(struct iscsi_cls_session *cls_sess, uint32_t conn_idx);
97static int qla4xxx_conn_bind(struct iscsi_cls_session *cls_session,
98 struct iscsi_cls_conn *cls_conn,
99 uint64_t transport_fd, int is_leading);
100static void qla4xxx_conn_destroy(struct iscsi_cls_conn *conn);
101static struct iscsi_cls_session *
102qla4xxx_session_create(struct iscsi_endpoint *ep, uint16_t cmds_max,
103 uint16_t qdepth, uint32_t initial_cmdsn);
104static void qla4xxx_session_destroy(struct iscsi_cls_session *sess);
105static void qla4xxx_task_work(struct work_struct *wdata);
106static int qla4xxx_alloc_pdu(struct iscsi_task *, uint8_t);
107static int qla4xxx_task_xmit(struct iscsi_task *);
108static void qla4xxx_task_cleanup(struct iscsi_task *);
109static void qla4xxx_fail_session(struct iscsi_cls_session *cls_session);
110static void qla4xxx_conn_get_stats(struct iscsi_cls_conn *cls_conn,
111 struct iscsi_stats *stats);
afaf5a2d
DS
112/*
113 * SCSI host template entry points
114 */
f281233d 115static int qla4xxx_queuecommand(struct Scsi_Host *h, struct scsi_cmnd *cmd);
09a0f719 116static int qla4xxx_eh_abort(struct scsi_cmnd *cmd);
afaf5a2d 117static int qla4xxx_eh_device_reset(struct scsi_cmnd *cmd);
ce545039 118static int qla4xxx_eh_target_reset(struct scsi_cmnd *cmd);
afaf5a2d
DS
119static int qla4xxx_eh_host_reset(struct scsi_cmnd *cmd);
120static int qla4xxx_slave_alloc(struct scsi_device *device);
121static int qla4xxx_slave_configure(struct scsi_device *device);
122static void qla4xxx_slave_destroy(struct scsi_device *sdev);
3128c6c7 123static mode_t ql4_attr_is_visible(int param_type, int param);
95d31262 124static int qla4xxx_host_reset(struct Scsi_Host *shost, int reset_type);
afaf5a2d 125
f4f5df23
VC
126static struct qla4_8xxx_legacy_intr_set legacy_intr[] =
127 QLA82XX_LEGACY_INTR_CONFIG;
128
afaf5a2d
DS
129static struct scsi_host_template qla4xxx_driver_template = {
130 .module = THIS_MODULE,
131 .name = DRIVER_NAME,
132 .proc_name = DRIVER_NAME,
133 .queuecommand = qla4xxx_queuecommand,
134
09a0f719 135 .eh_abort_handler = qla4xxx_eh_abort,
afaf5a2d 136 .eh_device_reset_handler = qla4xxx_eh_device_reset,
ce545039 137 .eh_target_reset_handler = qla4xxx_eh_target_reset,
afaf5a2d 138 .eh_host_reset_handler = qla4xxx_eh_host_reset,
5c656af7 139 .eh_timed_out = qla4xxx_eh_cmd_timed_out,
afaf5a2d
DS
140
141 .slave_configure = qla4xxx_slave_configure,
142 .slave_alloc = qla4xxx_slave_alloc,
143 .slave_destroy = qla4xxx_slave_destroy,
144
145 .this_id = -1,
146 .cmd_per_lun = 3,
147 .use_clustering = ENABLE_CLUSTERING,
148 .sg_tablesize = SG_ALL,
149
150 .max_sectors = 0xFFFF,
7ad633c0 151 .shost_attrs = qla4xxx_host_attrs,
95d31262 152 .host_reset = qla4xxx_host_reset,
a355943c 153 .vendor_id = SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_QLOGIC,
afaf5a2d
DS
154};
155
156static struct iscsi_transport qla4xxx_iscsi_transport = {
157 .owner = THIS_MODULE,
158 .name = DRIVER_NAME,
b3a271a9
MR
159 .caps = CAP_TEXT_NEGO |
160 CAP_DATA_PATH_OFFLOAD | CAP_HDRDGST |
161 CAP_DATADGST | CAP_LOGIN_OFFLOAD |
162 CAP_MULTI_R2T,
3128c6c7 163 .attr_is_visible = ql4_attr_is_visible,
b3a271a9
MR
164 .create_session = qla4xxx_session_create,
165 .destroy_session = qla4xxx_session_destroy,
166 .start_conn = qla4xxx_conn_start,
167 .create_conn = qla4xxx_conn_create,
168 .bind_conn = qla4xxx_conn_bind,
169 .stop_conn = iscsi_conn_stop,
170 .destroy_conn = qla4xxx_conn_destroy,
171 .set_param = iscsi_set_param,
afaf5a2d 172 .get_conn_param = qla4xxx_conn_get_param,
b3a271a9
MR
173 .get_session_param = iscsi_session_get_param,
174 .get_ep_param = qla4xxx_get_ep_param,
175 .ep_connect = qla4xxx_ep_connect,
176 .ep_poll = qla4xxx_ep_poll,
177 .ep_disconnect = qla4xxx_ep_disconnect,
178 .get_stats = qla4xxx_conn_get_stats,
179 .send_pdu = iscsi_conn_send_pdu,
180 .xmit_task = qla4xxx_task_xmit,
181 .cleanup_task = qla4xxx_task_cleanup,
182 .alloc_pdu = qla4xxx_alloc_pdu,
183
aa1e93a2 184 .get_host_param = qla4xxx_host_get_param,
d00efe3f 185 .set_iface_param = qla4xxx_iface_set_param,
ed1086e0 186 .get_iface_param = qla4xxx_get_iface_param,
a355943c 187 .bsg_request = qla4xxx_bsg_request,
afaf5a2d
DS
188};
189
190static struct scsi_transport_template *qla4xxx_scsi_transport;
191
3128c6c7
MC
192static mode_t ql4_attr_is_visible(int param_type, int param)
193{
194 switch (param_type) {
f27fb2ef
MC
195 case ISCSI_HOST_PARAM:
196 switch (param) {
197 case ISCSI_HOST_PARAM_HWADDRESS:
198 case ISCSI_HOST_PARAM_IPADDRESS:
199 case ISCSI_HOST_PARAM_INITIATOR_NAME:
200 return S_IRUGO;
201 default:
202 return 0;
203 }
3128c6c7
MC
204 case ISCSI_PARAM:
205 switch (param) {
206 case ISCSI_PARAM_CONN_ADDRESS:
207 case ISCSI_PARAM_CONN_PORT:
1d063c17
MC
208 case ISCSI_PARAM_TARGET_NAME:
209 case ISCSI_PARAM_TPGT:
210 case ISCSI_PARAM_TARGET_ALIAS:
b3a271a9
MR
211 case ISCSI_PARAM_MAX_BURST:
212 case ISCSI_PARAM_MAX_R2T:
213 case ISCSI_PARAM_FIRST_BURST:
214 case ISCSI_PARAM_MAX_RECV_DLENGTH:
215 case ISCSI_PARAM_MAX_XMIT_DLENGTH:
de37920b 216 case ISCSI_PARAM_IFACE_NAME:
3128c6c7
MC
217 return S_IRUGO;
218 default:
219 return 0;
220 }
b78dbba0
MC
221 case ISCSI_NET_PARAM:
222 switch (param) {
223 case ISCSI_NET_PARAM_IPV4_ADDR:
224 case ISCSI_NET_PARAM_IPV4_SUBNET:
225 case ISCSI_NET_PARAM_IPV4_GW:
226 case ISCSI_NET_PARAM_IPV4_BOOTPROTO:
227 case ISCSI_NET_PARAM_IFACE_ENABLE:
228 case ISCSI_NET_PARAM_IPV6_LINKLOCAL:
229 case ISCSI_NET_PARAM_IPV6_ADDR:
230 case ISCSI_NET_PARAM_IPV6_ROUTER:
231 case ISCSI_NET_PARAM_IPV6_ADDR_AUTOCFG:
232 case ISCSI_NET_PARAM_IPV6_LINKLOCAL_AUTOCFG:
6ac73e8c
VC
233 case ISCSI_NET_PARAM_VLAN_ID:
234 case ISCSI_NET_PARAM_VLAN_PRIORITY:
235 case ISCSI_NET_PARAM_VLAN_ENABLED:
943c157b 236 case ISCSI_NET_PARAM_MTU:
2ada7fc5 237 case ISCSI_NET_PARAM_PORT:
b78dbba0
MC
238 return S_IRUGO;
239 default:
240 return 0;
241 }
3128c6c7
MC
242 }
243
244 return 0;
245}
246
ed1086e0
VC
247static int qla4xxx_get_iface_param(struct iscsi_iface *iface,
248 enum iscsi_param_type param_type,
249 int param, char *buf)
250{
251 struct Scsi_Host *shost = iscsi_iface_to_shost(iface);
252 struct scsi_qla_host *ha = to_qla_host(shost);
253 int len = -ENOSYS;
254
255 if (param_type != ISCSI_NET_PARAM)
256 return -ENOSYS;
257
258 switch (param) {
259 case ISCSI_NET_PARAM_IPV4_ADDR:
260 len = sprintf(buf, "%pI4\n", &ha->ip_config.ip_address);
261 break;
262 case ISCSI_NET_PARAM_IPV4_SUBNET:
263 len = sprintf(buf, "%pI4\n", &ha->ip_config.subnet_mask);
264 break;
265 case ISCSI_NET_PARAM_IPV4_GW:
266 len = sprintf(buf, "%pI4\n", &ha->ip_config.gateway);
267 break;
268 case ISCSI_NET_PARAM_IFACE_ENABLE:
269 if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4)
270 len = sprintf(buf, "%s\n",
271 (ha->ip_config.ipv4_options &
272 IPOPT_IPV4_PROTOCOL_ENABLE) ?
273 "enabled" : "disabled");
274 else if (iface->iface_type == ISCSI_IFACE_TYPE_IPV6)
275 len = sprintf(buf, "%s\n",
276 (ha->ip_config.ipv6_options &
277 IPV6_OPT_IPV6_PROTOCOL_ENABLE) ?
278 "enabled" : "disabled");
279 break;
280 case ISCSI_NET_PARAM_IPV4_BOOTPROTO:
281 len = sprintf(buf, "%s\n",
282 (ha->ip_config.tcp_options & TCPOPT_DHCP_ENABLE) ?
283 "dhcp" : "static");
284 break;
285 case ISCSI_NET_PARAM_IPV6_ADDR:
286 if (iface->iface_num == 0)
287 len = sprintf(buf, "%pI6\n", &ha->ip_config.ipv6_addr0);
288 if (iface->iface_num == 1)
289 len = sprintf(buf, "%pI6\n", &ha->ip_config.ipv6_addr1);
290 break;
291 case ISCSI_NET_PARAM_IPV6_LINKLOCAL:
292 len = sprintf(buf, "%pI6\n",
293 &ha->ip_config.ipv6_link_local_addr);
294 break;
295 case ISCSI_NET_PARAM_IPV6_ROUTER:
296 len = sprintf(buf, "%pI6\n",
297 &ha->ip_config.ipv6_default_router_addr);
298 break;
299 case ISCSI_NET_PARAM_IPV6_ADDR_AUTOCFG:
300 len = sprintf(buf, "%s\n",
301 (ha->ip_config.ipv6_addl_options &
302 IPV6_ADDOPT_NEIGHBOR_DISCOVERY_ADDR_ENABLE) ?
303 "nd" : "static");
304 break;
305 case ISCSI_NET_PARAM_IPV6_LINKLOCAL_AUTOCFG:
306 len = sprintf(buf, "%s\n",
307 (ha->ip_config.ipv6_addl_options &
308 IPV6_ADDOPT_AUTOCONFIG_LINK_LOCAL_ADDR) ?
309 "auto" : "static");
310 break;
6ac73e8c
VC
311 case ISCSI_NET_PARAM_VLAN_ID:
312 if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4)
313 len = sprintf(buf, "%d\n",
314 (ha->ip_config.ipv4_vlan_tag &
315 ISCSI_MAX_VLAN_ID));
316 else if (iface->iface_type == ISCSI_IFACE_TYPE_IPV6)
317 len = sprintf(buf, "%d\n",
318 (ha->ip_config.ipv6_vlan_tag &
319 ISCSI_MAX_VLAN_ID));
320 break;
321 case ISCSI_NET_PARAM_VLAN_PRIORITY:
322 if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4)
323 len = sprintf(buf, "%d\n",
324 ((ha->ip_config.ipv4_vlan_tag >> 13) &
325 ISCSI_MAX_VLAN_PRIORITY));
326 else if (iface->iface_type == ISCSI_IFACE_TYPE_IPV6)
327 len = sprintf(buf, "%d\n",
328 ((ha->ip_config.ipv6_vlan_tag >> 13) &
329 ISCSI_MAX_VLAN_PRIORITY));
330 break;
331 case ISCSI_NET_PARAM_VLAN_ENABLED:
332 if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4)
333 len = sprintf(buf, "%s\n",
334 (ha->ip_config.ipv4_options &
335 IPOPT_VLAN_TAGGING_ENABLE) ?
336 "enabled" : "disabled");
337 else if (iface->iface_type == ISCSI_IFACE_TYPE_IPV6)
338 len = sprintf(buf, "%s\n",
339 (ha->ip_config.ipv6_options &
340 IPV6_OPT_VLAN_TAGGING_ENABLE) ?
341 "enabled" : "disabled");
342 break;
943c157b
VC
343 case ISCSI_NET_PARAM_MTU:
344 len = sprintf(buf, "%d\n", ha->ip_config.eth_mtu_size);
345 break;
2ada7fc5
VC
346 case ISCSI_NET_PARAM_PORT:
347 if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4)
348 len = sprintf(buf, "%d\n", ha->ip_config.ipv4_port);
349 else if (iface->iface_type == ISCSI_IFACE_TYPE_IPV6)
350 len = sprintf(buf, "%d\n", ha->ip_config.ipv6_port);
351 break;
ed1086e0
VC
352 default:
353 len = -ENOSYS;
354 }
355
356 return len;
357}
358
b3a271a9
MR
359static struct iscsi_endpoint *
360qla4xxx_ep_connect(struct Scsi_Host *shost, struct sockaddr *dst_addr,
361 int non_blocking)
5c656af7 362{
b3a271a9
MR
363 int ret;
364 struct iscsi_endpoint *ep;
365 struct qla_endpoint *qla_ep;
366 struct scsi_qla_host *ha;
367 struct sockaddr_in *addr;
368 struct sockaddr_in6 *addr6;
5c656af7 369
b3a271a9
MR
370 DEBUG2(printk(KERN_INFO "Func: %s\n", __func__));
371 if (!shost) {
372 ret = -ENXIO;
373 printk(KERN_ERR "%s: shost is NULL\n",
374 __func__);
375 return ERR_PTR(ret);
376 }
5c656af7 377
b3a271a9
MR
378 ha = iscsi_host_priv(shost);
379
380 ep = iscsi_create_endpoint(sizeof(struct qla_endpoint));
381 if (!ep) {
382 ret = -ENOMEM;
383 return ERR_PTR(ret);
384 }
385
386 qla_ep = ep->dd_data;
387 memset(qla_ep, 0, sizeof(struct qla_endpoint));
388 if (dst_addr->sa_family == AF_INET) {
389 memcpy(&qla_ep->dst_addr, dst_addr, sizeof(struct sockaddr_in));
390 addr = (struct sockaddr_in *)&qla_ep->dst_addr;
391 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: %pI4\n", __func__,
392 (char *)&addr->sin_addr));
393 } else if (dst_addr->sa_family == AF_INET6) {
394 memcpy(&qla_ep->dst_addr, dst_addr,
395 sizeof(struct sockaddr_in6));
396 addr6 = (struct sockaddr_in6 *)&qla_ep->dst_addr;
397 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: %pI6\n", __func__,
398 (char *)&addr6->sin6_addr));
399 }
400
401 qla_ep->host = shost;
402
403 return ep;
5c656af7
MC
404}
405
b3a271a9 406static int qla4xxx_ep_poll(struct iscsi_endpoint *ep, int timeout_ms)
afaf5a2d 407{
b3a271a9
MR
408 struct qla_endpoint *qla_ep;
409 struct scsi_qla_host *ha;
410 int ret = 0;
afaf5a2d 411
b3a271a9
MR
412 DEBUG2(printk(KERN_INFO "Func: %s\n", __func__));
413 qla_ep = ep->dd_data;
414 ha = to_qla_host(qla_ep->host);
415
416 if (adapter_up(ha))
417 ret = 1;
418
419 return ret;
420}
421
422static void qla4xxx_ep_disconnect(struct iscsi_endpoint *ep)
423{
424 DEBUG2(printk(KERN_INFO "Func: %s\n", __func__));
425 iscsi_destroy_endpoint(ep);
426}
427
428static int qla4xxx_get_ep_param(struct iscsi_endpoint *ep,
429 enum iscsi_param param,
430 char *buf)
431{
432 struct qla_endpoint *qla_ep = ep->dd_data;
433 struct sockaddr *dst_addr;
434
435 DEBUG2(printk(KERN_INFO "Func: %s\n", __func__));
436
437 switch (param) {
438 case ISCSI_PARAM_CONN_PORT:
439 case ISCSI_PARAM_CONN_ADDRESS:
440 if (!qla_ep)
441 return -ENOTCONN;
442
443 dst_addr = (struct sockaddr *)&qla_ep->dst_addr;
444 if (!dst_addr)
445 return -ENOTCONN;
446
447 return iscsi_conn_get_addr_param((struct sockaddr_storage *)
448 &qla_ep->dst_addr, param, buf);
449 default:
450 return -ENOSYS;
451 }
452}
453
454static void qla4xxx_conn_get_stats(struct iscsi_cls_conn *cls_conn,
455 struct iscsi_stats *stats)
456{
457 struct iscsi_session *sess;
458 struct iscsi_cls_session *cls_sess;
459 struct ddb_entry *ddb_entry;
460 struct scsi_qla_host *ha;
461 struct ql_iscsi_stats *ql_iscsi_stats;
462 int stats_size;
463 int ret;
464 dma_addr_t iscsi_stats_dma;
465
466 DEBUG2(printk(KERN_INFO "Func: %s\n", __func__));
568d303b 467
b3a271a9
MR
468 cls_sess = iscsi_conn_to_session(cls_conn);
469 sess = cls_sess->dd_data;
470 ddb_entry = sess->dd_data;
471 ha = ddb_entry->ha;
472
473 stats_size = PAGE_ALIGN(sizeof(struct ql_iscsi_stats));
474 /* Allocate memory */
475 ql_iscsi_stats = dma_alloc_coherent(&ha->pdev->dev, stats_size,
476 &iscsi_stats_dma, GFP_KERNEL);
477 if (!ql_iscsi_stats) {
478 ql4_printk(KERN_ERR, ha,
479 "Unable to allocate memory for iscsi stats\n");
480 goto exit_get_stats;
568d303b 481 }
b3a271a9
MR
482
483 ret = qla4xxx_get_mgmt_data(ha, ddb_entry->fw_ddb_index, stats_size,
484 iscsi_stats_dma);
485 if (ret != QLA_SUCCESS) {
486 ql4_printk(KERN_ERR, ha,
487 "Unable to retreive iscsi stats\n");
488 goto free_stats;
489 }
490
491 /* octets */
492 stats->txdata_octets = le64_to_cpu(ql_iscsi_stats->tx_data_octets);
493 stats->rxdata_octets = le64_to_cpu(ql_iscsi_stats->rx_data_octets);
494 /* xmit pdus */
495 stats->noptx_pdus = le32_to_cpu(ql_iscsi_stats->tx_nopout_pdus);
496 stats->scsicmd_pdus = le32_to_cpu(ql_iscsi_stats->tx_scsi_cmd_pdus);
497 stats->tmfcmd_pdus = le32_to_cpu(ql_iscsi_stats->tx_tmf_cmd_pdus);
498 stats->login_pdus = le32_to_cpu(ql_iscsi_stats->tx_login_cmd_pdus);
499 stats->text_pdus = le32_to_cpu(ql_iscsi_stats->tx_text_cmd_pdus);
500 stats->dataout_pdus = le32_to_cpu(ql_iscsi_stats->tx_scsi_write_pdus);
501 stats->logout_pdus = le32_to_cpu(ql_iscsi_stats->tx_logout_cmd_pdus);
502 stats->snack_pdus = le32_to_cpu(ql_iscsi_stats->tx_snack_req_pdus);
503 /* recv pdus */
504 stats->noprx_pdus = le32_to_cpu(ql_iscsi_stats->rx_nopin_pdus);
505 stats->scsirsp_pdus = le32_to_cpu(ql_iscsi_stats->rx_scsi_resp_pdus);
506 stats->tmfrsp_pdus = le32_to_cpu(ql_iscsi_stats->rx_tmf_resp_pdus);
507 stats->textrsp_pdus = le32_to_cpu(ql_iscsi_stats->rx_text_resp_pdus);
508 stats->datain_pdus = le32_to_cpu(ql_iscsi_stats->rx_scsi_read_pdus);
509 stats->logoutrsp_pdus =
510 le32_to_cpu(ql_iscsi_stats->rx_logout_resp_pdus);
511 stats->r2t_pdus = le32_to_cpu(ql_iscsi_stats->rx_r2t_pdus);
512 stats->async_pdus = le32_to_cpu(ql_iscsi_stats->rx_async_pdus);
513 stats->rjt_pdus = le32_to_cpu(ql_iscsi_stats->rx_reject_pdus);
514
515free_stats:
516 dma_free_coherent(&ha->pdev->dev, stats_size, ql_iscsi_stats,
517 iscsi_stats_dma);
518exit_get_stats:
519 return;
520}
521
522static enum blk_eh_timer_return qla4xxx_eh_cmd_timed_out(struct scsi_cmnd *sc)
523{
524 struct iscsi_cls_session *session;
525 struct iscsi_session *sess;
526 unsigned long flags;
527 enum blk_eh_timer_return ret = BLK_EH_NOT_HANDLED;
528
529 session = starget_to_session(scsi_target(sc->device));
530 sess = session->dd_data;
531
532 spin_lock_irqsave(&session->lock, flags);
533 if (session->state == ISCSI_SESSION_FAILED)
534 ret = BLK_EH_RESET_TIMER;
535 spin_unlock_irqrestore(&session->lock, flags);
536
537 return ret;
afaf5a2d
DS
538}
539
aa1e93a2
MC
540static int qla4xxx_host_get_param(struct Scsi_Host *shost,
541 enum iscsi_host_param param, char *buf)
542{
543 struct scsi_qla_host *ha = to_qla_host(shost);
544 int len;
545
546 switch (param) {
547 case ISCSI_HOST_PARAM_HWADDRESS:
7ffc49a6 548 len = sysfs_format_mac(buf, ha->my_mac, MAC_ADDR_LEN);
8ad5781a 549 break;
22236961 550 case ISCSI_HOST_PARAM_IPADDRESS:
2bab08fc 551 len = sprintf(buf, "%pI4\n", &ha->ip_config.ip_address);
22236961 552 break;
8ad5781a 553 case ISCSI_HOST_PARAM_INITIATOR_NAME:
22236961 554 len = sprintf(buf, "%s\n", ha->name_string);
aa1e93a2
MC
555 break;
556 default:
557 return -ENOSYS;
558 }
559
560 return len;
561}
562
ed1086e0
VC
563static void qla4xxx_create_ipv4_iface(struct scsi_qla_host *ha)
564{
565 if (ha->iface_ipv4)
566 return;
567
568 /* IPv4 */
569 ha->iface_ipv4 = iscsi_create_iface(ha->host,
570 &qla4xxx_iscsi_transport,
571 ISCSI_IFACE_TYPE_IPV4, 0, 0);
572 if (!ha->iface_ipv4)
573 ql4_printk(KERN_ERR, ha, "Could not create IPv4 iSCSI "
574 "iface0.\n");
575}
576
577static void qla4xxx_create_ipv6_iface(struct scsi_qla_host *ha)
578{
579 if (!ha->iface_ipv6_0)
580 /* IPv6 iface-0 */
581 ha->iface_ipv6_0 = iscsi_create_iface(ha->host,
582 &qla4xxx_iscsi_transport,
583 ISCSI_IFACE_TYPE_IPV6, 0,
584 0);
585 if (!ha->iface_ipv6_0)
586 ql4_printk(KERN_ERR, ha, "Could not create IPv6 iSCSI "
587 "iface0.\n");
588
589 if (!ha->iface_ipv6_1)
590 /* IPv6 iface-1 */
591 ha->iface_ipv6_1 = iscsi_create_iface(ha->host,
592 &qla4xxx_iscsi_transport,
593 ISCSI_IFACE_TYPE_IPV6, 1,
594 0);
595 if (!ha->iface_ipv6_1)
596 ql4_printk(KERN_ERR, ha, "Could not create IPv6 iSCSI "
597 "iface1.\n");
598}
599
600static void qla4xxx_create_ifaces(struct scsi_qla_host *ha)
601{
602 if (ha->ip_config.ipv4_options & IPOPT_IPV4_PROTOCOL_ENABLE)
603 qla4xxx_create_ipv4_iface(ha);
604
605 if (ha->ip_config.ipv6_options & IPV6_OPT_IPV6_PROTOCOL_ENABLE)
606 qla4xxx_create_ipv6_iface(ha);
607}
608
609static void qla4xxx_destroy_ipv4_iface(struct scsi_qla_host *ha)
610{
611 if (ha->iface_ipv4) {
612 iscsi_destroy_iface(ha->iface_ipv4);
613 ha->iface_ipv4 = NULL;
614 }
615}
616
617static void qla4xxx_destroy_ipv6_iface(struct scsi_qla_host *ha)
618{
619 if (ha->iface_ipv6_0) {
620 iscsi_destroy_iface(ha->iface_ipv6_0);
621 ha->iface_ipv6_0 = NULL;
622 }
623 if (ha->iface_ipv6_1) {
624 iscsi_destroy_iface(ha->iface_ipv6_1);
625 ha->iface_ipv6_1 = NULL;
626 }
627}
628
629static void qla4xxx_destroy_ifaces(struct scsi_qla_host *ha)
630{
631 qla4xxx_destroy_ipv4_iface(ha);
632 qla4xxx_destroy_ipv6_iface(ha);
633}
634
d00efe3f
MC
635static void qla4xxx_set_ipv6(struct scsi_qla_host *ha,
636 struct iscsi_iface_param_info *iface_param,
637 struct addr_ctrl_blk *init_fw_cb)
638{
639 /*
640 * iface_num 0 is valid for IPv6 Addr, linklocal, router, autocfg.
641 * iface_num 1 is valid only for IPv6 Addr.
642 */
643 switch (iface_param->param) {
644 case ISCSI_NET_PARAM_IPV6_ADDR:
645 if (iface_param->iface_num & 0x1)
646 /* IPv6 Addr 1 */
647 memcpy(init_fw_cb->ipv6_addr1, iface_param->value,
648 sizeof(init_fw_cb->ipv6_addr1));
649 else
650 /* IPv6 Addr 0 */
651 memcpy(init_fw_cb->ipv6_addr0, iface_param->value,
652 sizeof(init_fw_cb->ipv6_addr0));
653 break;
654 case ISCSI_NET_PARAM_IPV6_LINKLOCAL:
655 if (iface_param->iface_num & 0x1)
656 break;
657 memcpy(init_fw_cb->ipv6_if_id, &iface_param->value[8],
658 sizeof(init_fw_cb->ipv6_if_id));
659 break;
660 case ISCSI_NET_PARAM_IPV6_ROUTER:
661 if (iface_param->iface_num & 0x1)
662 break;
663 memcpy(init_fw_cb->ipv6_dflt_rtr_addr, iface_param->value,
664 sizeof(init_fw_cb->ipv6_dflt_rtr_addr));
665 break;
666 case ISCSI_NET_PARAM_IPV6_ADDR_AUTOCFG:
667 /* Autocfg applies to even interface */
668 if (iface_param->iface_num & 0x1)
669 break;
670
671 if (iface_param->value[0] == ISCSI_IPV6_AUTOCFG_DISABLE)
672 init_fw_cb->ipv6_addtl_opts &=
673 cpu_to_le16(
674 ~IPV6_ADDOPT_NEIGHBOR_DISCOVERY_ADDR_ENABLE);
675 else if (iface_param->value[0] == ISCSI_IPV6_AUTOCFG_ND_ENABLE)
676 init_fw_cb->ipv6_addtl_opts |=
677 cpu_to_le16(
678 IPV6_ADDOPT_NEIGHBOR_DISCOVERY_ADDR_ENABLE);
679 else
680 ql4_printk(KERN_ERR, ha, "Invalid autocfg setting for "
681 "IPv6 addr\n");
682 break;
683 case ISCSI_NET_PARAM_IPV6_LINKLOCAL_AUTOCFG:
684 /* Autocfg applies to even interface */
685 if (iface_param->iface_num & 0x1)
686 break;
687
688 if (iface_param->value[0] ==
689 ISCSI_IPV6_LINKLOCAL_AUTOCFG_ENABLE)
690 init_fw_cb->ipv6_addtl_opts |= cpu_to_le16(
691 IPV6_ADDOPT_AUTOCONFIG_LINK_LOCAL_ADDR);
692 else if (iface_param->value[0] ==
693 ISCSI_IPV6_LINKLOCAL_AUTOCFG_DISABLE)
694 init_fw_cb->ipv6_addtl_opts &= cpu_to_le16(
695 ~IPV6_ADDOPT_AUTOCONFIG_LINK_LOCAL_ADDR);
696 else
697 ql4_printk(KERN_ERR, ha, "Invalid autocfg setting for "
698 "IPv6 linklocal addr\n");
699 break;
700 case ISCSI_NET_PARAM_IPV6_ROUTER_AUTOCFG:
701 /* Autocfg applies to even interface */
702 if (iface_param->iface_num & 0x1)
703 break;
704
705 if (iface_param->value[0] == ISCSI_IPV6_ROUTER_AUTOCFG_ENABLE)
706 memset(init_fw_cb->ipv6_dflt_rtr_addr, 0,
707 sizeof(init_fw_cb->ipv6_dflt_rtr_addr));
708 break;
709 case ISCSI_NET_PARAM_IFACE_ENABLE:
ed1086e0 710 if (iface_param->value[0] == ISCSI_IFACE_ENABLE) {
d00efe3f
MC
711 init_fw_cb->ipv6_opts |=
712 cpu_to_le16(IPV6_OPT_IPV6_PROTOCOL_ENABLE);
ed1086e0
VC
713 qla4xxx_create_ipv6_iface(ha);
714 } else {
d00efe3f
MC
715 init_fw_cb->ipv6_opts &=
716 cpu_to_le16(~IPV6_OPT_IPV6_PROTOCOL_ENABLE &
717 0xFFFF);
ed1086e0
VC
718 qla4xxx_destroy_ipv6_iface(ha);
719 }
d00efe3f
MC
720 break;
721 case ISCSI_NET_PARAM_VLAN_ID:
722 if (iface_param->len != sizeof(init_fw_cb->ipv6_vlan_tag))
723 break;
6ac73e8c
VC
724 init_fw_cb->ipv6_vlan_tag =
725 cpu_to_be16(*(uint16_t *)iface_param->value);
726 break;
727 case ISCSI_NET_PARAM_VLAN_ENABLED:
728 if (iface_param->value[0] == ISCSI_VLAN_ENABLE)
729 init_fw_cb->ipv6_opts |=
730 cpu_to_le16(IPV6_OPT_VLAN_TAGGING_ENABLE);
731 else
732 init_fw_cb->ipv6_opts &=
733 cpu_to_le16(~IPV6_OPT_VLAN_TAGGING_ENABLE);
d00efe3f 734 break;
943c157b
VC
735 case ISCSI_NET_PARAM_MTU:
736 init_fw_cb->eth_mtu_size =
737 cpu_to_le16(*(uint16_t *)iface_param->value);
738 break;
2ada7fc5
VC
739 case ISCSI_NET_PARAM_PORT:
740 /* Autocfg applies to even interface */
741 if (iface_param->iface_num & 0x1)
742 break;
743
744 init_fw_cb->ipv6_port =
745 cpu_to_le16(*(uint16_t *)iface_param->value);
746 break;
d00efe3f
MC
747 default:
748 ql4_printk(KERN_ERR, ha, "Unknown IPv6 param = %d\n",
749 iface_param->param);
750 break;
751 }
752}
753
754static void qla4xxx_set_ipv4(struct scsi_qla_host *ha,
755 struct iscsi_iface_param_info *iface_param,
756 struct addr_ctrl_blk *init_fw_cb)
757{
758 switch (iface_param->param) {
759 case ISCSI_NET_PARAM_IPV4_ADDR:
760 memcpy(init_fw_cb->ipv4_addr, iface_param->value,
761 sizeof(init_fw_cb->ipv4_addr));
762 break;
763 case ISCSI_NET_PARAM_IPV4_SUBNET:
764 memcpy(init_fw_cb->ipv4_subnet, iface_param->value,
765 sizeof(init_fw_cb->ipv4_subnet));
766 break;
767 case ISCSI_NET_PARAM_IPV4_GW:
768 memcpy(init_fw_cb->ipv4_gw_addr, iface_param->value,
769 sizeof(init_fw_cb->ipv4_gw_addr));
770 break;
771 case ISCSI_NET_PARAM_IPV4_BOOTPROTO:
772 if (iface_param->value[0] == ISCSI_BOOTPROTO_DHCP)
773 init_fw_cb->ipv4_tcp_opts |=
774 cpu_to_le16(TCPOPT_DHCP_ENABLE);
775 else if (iface_param->value[0] == ISCSI_BOOTPROTO_STATIC)
776 init_fw_cb->ipv4_tcp_opts &=
777 cpu_to_le16(~TCPOPT_DHCP_ENABLE);
778 else
779 ql4_printk(KERN_ERR, ha, "Invalid IPv4 bootproto\n");
780 break;
781 case ISCSI_NET_PARAM_IFACE_ENABLE:
ed1086e0 782 if (iface_param->value[0] == ISCSI_IFACE_ENABLE) {
d00efe3f 783 init_fw_cb->ipv4_ip_opts |=
2bab08fc 784 cpu_to_le16(IPOPT_IPV4_PROTOCOL_ENABLE);
ed1086e0
VC
785 qla4xxx_create_ipv4_iface(ha);
786 } else {
d00efe3f 787 init_fw_cb->ipv4_ip_opts &=
2bab08fc 788 cpu_to_le16(~IPOPT_IPV4_PROTOCOL_ENABLE &
d00efe3f 789 0xFFFF);
ed1086e0
VC
790 qla4xxx_destroy_ipv4_iface(ha);
791 }
d00efe3f
MC
792 break;
793 case ISCSI_NET_PARAM_VLAN_ID:
794 if (iface_param->len != sizeof(init_fw_cb->ipv4_vlan_tag))
795 break;
6ac73e8c
VC
796 init_fw_cb->ipv4_vlan_tag =
797 cpu_to_be16(*(uint16_t *)iface_param->value);
798 break;
799 case ISCSI_NET_PARAM_VLAN_ENABLED:
800 if (iface_param->value[0] == ISCSI_VLAN_ENABLE)
801 init_fw_cb->ipv4_ip_opts |=
802 cpu_to_le16(IPOPT_VLAN_TAGGING_ENABLE);
803 else
804 init_fw_cb->ipv4_ip_opts &=
805 cpu_to_le16(~IPOPT_VLAN_TAGGING_ENABLE);
d00efe3f 806 break;
943c157b
VC
807 case ISCSI_NET_PARAM_MTU:
808 init_fw_cb->eth_mtu_size =
809 cpu_to_le16(*(uint16_t *)iface_param->value);
810 break;
2ada7fc5
VC
811 case ISCSI_NET_PARAM_PORT:
812 init_fw_cb->ipv4_port =
813 cpu_to_le16(*(uint16_t *)iface_param->value);
814 break;
d00efe3f
MC
815 default:
816 ql4_printk(KERN_ERR, ha, "Unknown IPv4 param = %d\n",
817 iface_param->param);
818 break;
819 }
820}
821
822static void
823qla4xxx_initcb_to_acb(struct addr_ctrl_blk *init_fw_cb)
824{
825 struct addr_ctrl_blk_def *acb;
826 acb = (struct addr_ctrl_blk_def *)init_fw_cb;
827 memset(acb->reserved1, 0, sizeof(acb->reserved1));
828 memset(acb->reserved2, 0, sizeof(acb->reserved2));
829 memset(acb->reserved3, 0, sizeof(acb->reserved3));
830 memset(acb->reserved4, 0, sizeof(acb->reserved4));
831 memset(acb->reserved5, 0, sizeof(acb->reserved5));
832 memset(acb->reserved6, 0, sizeof(acb->reserved6));
833 memset(acb->reserved7, 0, sizeof(acb->reserved7));
834 memset(acb->reserved8, 0, sizeof(acb->reserved8));
835 memset(acb->reserved9, 0, sizeof(acb->reserved9));
836 memset(acb->reserved10, 0, sizeof(acb->reserved10));
837 memset(acb->reserved11, 0, sizeof(acb->reserved11));
838 memset(acb->reserved12, 0, sizeof(acb->reserved12));
839 memset(acb->reserved13, 0, sizeof(acb->reserved13));
840 memset(acb->reserved14, 0, sizeof(acb->reserved14));
841 memset(acb->reserved15, 0, sizeof(acb->reserved15));
842}
843
844static int
845qla4xxx_iface_set_param(struct Scsi_Host *shost, char *data, int count)
846{
847 struct scsi_qla_host *ha = to_qla_host(shost);
848 int rval = 0;
849 struct iscsi_iface_param_info *iface_param = NULL;
850 struct addr_ctrl_blk *init_fw_cb = NULL;
851 dma_addr_t init_fw_cb_dma;
852 uint32_t mbox_cmd[MBOX_REG_COUNT];
853 uint32_t mbox_sts[MBOX_REG_COUNT];
854 uint32_t total_param_count;
855 uint32_t length;
856
857 init_fw_cb = dma_alloc_coherent(&ha->pdev->dev,
858 sizeof(struct addr_ctrl_blk),
859 &init_fw_cb_dma, GFP_KERNEL);
860 if (!init_fw_cb) {
861 ql4_printk(KERN_ERR, ha, "%s: Unable to alloc init_cb\n",
862 __func__);
863 return -ENOMEM;
864 }
865
866 memset(init_fw_cb, 0, sizeof(struct addr_ctrl_blk));
867 memset(&mbox_cmd, 0, sizeof(mbox_cmd));
868 memset(&mbox_sts, 0, sizeof(mbox_sts));
869
870 if (qla4xxx_get_ifcb(ha, &mbox_cmd[0], &mbox_sts[0], init_fw_cb_dma)) {
871 ql4_printk(KERN_ERR, ha, "%s: get ifcb failed\n", __func__);
872 rval = -EIO;
873 goto exit_init_fw_cb;
874 }
875
876 total_param_count = count;
877 iface_param = (struct iscsi_iface_param_info *)data;
878
879 for ( ; total_param_count != 0; total_param_count--) {
880 length = iface_param->len;
881
882 if (iface_param->param_type != ISCSI_NET_PARAM)
883 continue;
884
885 switch (iface_param->iface_type) {
886 case ISCSI_IFACE_TYPE_IPV4:
887 switch (iface_param->iface_num) {
888 case 0:
889 qla4xxx_set_ipv4(ha, iface_param, init_fw_cb);
890 break;
891 default:
892 /* Cannot have more than one IPv4 interface */
893 ql4_printk(KERN_ERR, ha, "Invalid IPv4 iface "
894 "number = %d\n",
895 iface_param->iface_num);
896 break;
897 }
898 break;
899 case ISCSI_IFACE_TYPE_IPV6:
900 switch (iface_param->iface_num) {
901 case 0:
902 case 1:
903 qla4xxx_set_ipv6(ha, iface_param, init_fw_cb);
904 break;
905 default:
906 /* Cannot have more than two IPv6 interface */
907 ql4_printk(KERN_ERR, ha, "Invalid IPv6 iface "
908 "number = %d\n",
909 iface_param->iface_num);
910 break;
911 }
912 break;
913 default:
914 ql4_printk(KERN_ERR, ha, "Invalid iface type\n");
915 break;
916 }
917
918 iface_param = (struct iscsi_iface_param_info *)
919 ((uint8_t *)iface_param +
920 sizeof(struct iscsi_iface_param_info) + length);
921 }
922
923 init_fw_cb->cookie = cpu_to_le32(0x11BEAD5A);
924
925 rval = qla4xxx_set_flash(ha, init_fw_cb_dma, FLASH_SEGMENT_IFCB,
926 sizeof(struct addr_ctrl_blk),
927 FLASH_OPT_RMW_COMMIT);
928 if (rval != QLA_SUCCESS) {
929 ql4_printk(KERN_ERR, ha, "%s: set flash mbx failed\n",
930 __func__);
931 rval = -EIO;
932 goto exit_init_fw_cb;
933 }
934
935 qla4xxx_disable_acb(ha);
936
937 qla4xxx_initcb_to_acb(init_fw_cb);
938
939 rval = qla4xxx_set_acb(ha, &mbox_cmd[0], &mbox_sts[0], init_fw_cb_dma);
940 if (rval != QLA_SUCCESS) {
941 ql4_printk(KERN_ERR, ha, "%s: set acb mbx failed\n",
942 __func__);
943 rval = -EIO;
944 goto exit_init_fw_cb;
945 }
946
947 memset(init_fw_cb, 0, sizeof(struct addr_ctrl_blk));
948 qla4xxx_update_local_ifcb(ha, &mbox_cmd[0], &mbox_sts[0], init_fw_cb,
949 init_fw_cb_dma);
950
951exit_init_fw_cb:
952 dma_free_coherent(&ha->pdev->dev, sizeof(struct addr_ctrl_blk),
953 init_fw_cb, init_fw_cb_dma);
954
955 return rval;
956}
957
b3a271a9 958static int qla4xxx_conn_get_param(struct iscsi_cls_conn *cls_conn,
afaf5a2d
DS
959 enum iscsi_param param, char *buf)
960{
b3a271a9
MR
961 struct iscsi_conn *conn;
962 struct qla_conn *qla_conn;
963 struct sockaddr *dst_addr;
964 int len = 0;
afaf5a2d 965
b3a271a9
MR
966 conn = cls_conn->dd_data;
967 qla_conn = conn->dd_data;
968 dst_addr = &qla_conn->qla_ep->dst_addr;
afaf5a2d
DS
969
970 switch (param) {
971 case ISCSI_PARAM_CONN_PORT:
afaf5a2d 972 case ISCSI_PARAM_CONN_ADDRESS:
b3a271a9
MR
973 return iscsi_conn_get_addr_param((struct sockaddr_storage *)
974 dst_addr, param, buf);
afaf5a2d 975 default:
b3a271a9 976 return iscsi_conn_get_param(cls_conn, param, buf);
afaf5a2d
DS
977 }
978
979 return len;
b3a271a9 980
afaf5a2d
DS
981}
982
b3a271a9
MR
983static struct iscsi_cls_session *
984qla4xxx_session_create(struct iscsi_endpoint *ep,
985 uint16_t cmds_max, uint16_t qdepth,
986 uint32_t initial_cmdsn)
987{
988 struct iscsi_cls_session *cls_sess;
989 struct scsi_qla_host *ha;
990 struct qla_endpoint *qla_ep;
991 struct ddb_entry *ddb_entry;
992 uint32_t ddb_index;
993 uint32_t mbx_sts = 0;
994 struct iscsi_session *sess;
995 struct sockaddr *dst_addr;
996 int ret;
997
998 DEBUG2(printk(KERN_INFO "Func: %s\n", __func__));
999 if (!ep) {
1000 printk(KERN_ERR "qla4xxx: missing ep.\n");
1001 return NULL;
1002 }
1003
1004 qla_ep = ep->dd_data;
1005 dst_addr = (struct sockaddr *)&qla_ep->dst_addr;
1006 ha = to_qla_host(qla_ep->host);
736cf369 1007
b3a271a9
MR
1008get_ddb_index:
1009 ddb_index = find_first_zero_bit(ha->ddb_idx_map, MAX_DDB_ENTRIES);
1010
1011 if (ddb_index >= MAX_DDB_ENTRIES) {
1012 DEBUG2(ql4_printk(KERN_INFO, ha,
1013 "Free DDB index not available\n"));
1014 return NULL;
1015 }
1016
1017 if (test_and_set_bit(ddb_index, ha->ddb_idx_map))
1018 goto get_ddb_index;
1019
1020 DEBUG2(ql4_printk(KERN_INFO, ha,
1021 "Found a free DDB index at %d\n", ddb_index));
1022 ret = qla4xxx_req_ddb_entry(ha, ddb_index, &mbx_sts);
1023 if (ret == QLA_ERROR) {
1024 if (mbx_sts == MBOX_STS_COMMAND_ERROR) {
1025 ql4_printk(KERN_INFO, ha,
1026 "DDB index = %d not available trying next\n",
1027 ddb_index);
1028 goto get_ddb_index;
1029 }
1030 DEBUG2(ql4_printk(KERN_INFO, ha,
1031 "Free FW DDB not available\n"));
1032 return NULL;
1033 }
1034
1035 cls_sess = iscsi_session_setup(&qla4xxx_iscsi_transport, qla_ep->host,
1036 cmds_max, sizeof(struct ddb_entry),
1037 sizeof(struct ql4_task_data),
1038 initial_cmdsn, ddb_index);
1039 if (!cls_sess)
1040 return NULL;
1041
1042 sess = cls_sess->dd_data;
1043 ddb_entry = sess->dd_data;
1044 ddb_entry->fw_ddb_index = ddb_index;
1045 ddb_entry->fw_ddb_device_state = DDB_DS_NO_CONNECTION_ACTIVE;
1046 ddb_entry->ha = ha;
1047 ddb_entry->sess = cls_sess;
1048 cls_sess->recovery_tmo = ql4xsess_recovery_tmo;
1049 ha->fw_ddb_index_map[ddb_entry->fw_ddb_index] = ddb_entry;
1050 ha->tot_ddbs++;
1051
1052 return cls_sess;
1053}
1054
1055static void qla4xxx_session_destroy(struct iscsi_cls_session *cls_sess)
1056{
1057 struct iscsi_session *sess;
1058 struct ddb_entry *ddb_entry;
1059 struct scsi_qla_host *ha;
1060 unsigned long flags;
1061
1062 DEBUG2(printk(KERN_INFO "Func: %s\n", __func__));
1063 sess = cls_sess->dd_data;
1064 ddb_entry = sess->dd_data;
1065 ha = ddb_entry->ha;
1066
736cf369
MR
1067 qla4xxx_clear_ddb_entry(ha, ddb_entry->fw_ddb_index);
1068
b3a271a9
MR
1069 spin_lock_irqsave(&ha->hardware_lock, flags);
1070 qla4xxx_free_ddb(ha, ddb_entry);
1071 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1072 iscsi_session_teardown(cls_sess);
1073}
1074
b3a271a9
MR
1075static struct iscsi_cls_conn *
1076qla4xxx_conn_create(struct iscsi_cls_session *cls_sess, uint32_t conn_idx)
1077{
1078 struct iscsi_cls_conn *cls_conn;
1079 struct iscsi_session *sess;
1080 struct ddb_entry *ddb_entry;
1081
1082 DEBUG2(printk(KERN_INFO "Func: %s\n", __func__));
1083 cls_conn = iscsi_conn_setup(cls_sess, sizeof(struct qla_conn),
1084 conn_idx);
1085 sess = cls_sess->dd_data;
1086 ddb_entry = sess->dd_data;
1087 ddb_entry->conn = cls_conn;
1088
1089 return cls_conn;
1090}
1091
1092static int qla4xxx_conn_bind(struct iscsi_cls_session *cls_session,
1093 struct iscsi_cls_conn *cls_conn,
1094 uint64_t transport_fd, int is_leading)
1095{
1096 struct iscsi_conn *conn;
1097 struct qla_conn *qla_conn;
1098 struct iscsi_endpoint *ep;
1099
1100 DEBUG2(printk(KERN_INFO "Func: %s\n", __func__));
1101
1102 if (iscsi_conn_bind(cls_session, cls_conn, is_leading))
1103 return -EINVAL;
1104 ep = iscsi_lookup_endpoint(transport_fd);
1105 conn = cls_conn->dd_data;
1106 qla_conn = conn->dd_data;
1107 qla_conn->qla_ep = ep->dd_data;
1108 return 0;
1109}
1110
1111static int qla4xxx_conn_start(struct iscsi_cls_conn *cls_conn)
1112{
1113 struct iscsi_cls_session *cls_sess = iscsi_conn_to_session(cls_conn);
1114 struct iscsi_session *sess;
1115 struct ddb_entry *ddb_entry;
1116 struct scsi_qla_host *ha;
1117 struct dev_db_entry *fw_ddb_entry;
1118 dma_addr_t fw_ddb_entry_dma;
b3a271a9
MR
1119 uint32_t mbx_sts = 0;
1120 int ret = 0;
1121 int status = QLA_SUCCESS;
1122
1123 DEBUG2(printk(KERN_INFO "Func: %s\n", __func__));
1124 sess = cls_sess->dd_data;
1125 ddb_entry = sess->dd_data;
1126 ha = ddb_entry->ha;
1127
1128 fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
1129 &fw_ddb_entry_dma, GFP_KERNEL);
1130 if (!fw_ddb_entry) {
1131 ql4_printk(KERN_ERR, ha,
1132 "%s: Unable to allocate dma buffer\n", __func__);
1133 return -ENOMEM;
1134 }
1135
1136 ret = qla4xxx_set_param_ddbentry(ha, ddb_entry, cls_conn, &mbx_sts);
1137 if (ret) {
1138 /* If iscsid is stopped and started then no need to do
1139 * set param again since ddb state will be already
1140 * active and FW does not allow set ddb to an
1141 * active session.
1142 */
1143 if (mbx_sts)
1144 if (ddb_entry->fw_ddb_device_state ==
f922da79
MR
1145 DDB_DS_SESSION_ACTIVE) {
1146 iscsi_conn_start(ddb_entry->conn);
1147 iscsi_conn_login_event(ddb_entry->conn,
1148 ISCSI_CONN_STATE_LOGGED_IN);
b3a271a9 1149 goto exit_set_param;
f922da79 1150 }
b3a271a9
MR
1151
1152 ql4_printk(KERN_ERR, ha, "%s: Failed set param for index[%d]\n",
1153 __func__, ddb_entry->fw_ddb_index);
1154 goto exit_conn_start;
1155 }
1156
1157 status = qla4xxx_conn_open(ha, ddb_entry->fw_ddb_index);
1158 if (status == QLA_ERROR) {
0e7e8501
MR
1159 ql4_printk(KERN_ERR, ha, "%s: Login failed: %s\n", __func__,
1160 sess->targetname);
b3a271a9
MR
1161 ret = -EINVAL;
1162 goto exit_conn_start;
1163 }
1164
98270ab4
MR
1165 if (ddb_entry->fw_ddb_device_state == DDB_DS_NO_CONNECTION_ACTIVE)
1166 ddb_entry->fw_ddb_device_state = DDB_DS_LOGIN_IN_PROCESS;
1167
1168 DEBUG2(printk(KERN_INFO "%s: DDB state [%d]\n", __func__,
1169 ddb_entry->fw_ddb_device_state));
b3a271a9
MR
1170
1171exit_set_param:
b3a271a9
MR
1172 ret = 0;
1173
1174exit_conn_start:
1175 dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
1176 fw_ddb_entry, fw_ddb_entry_dma);
1177 return ret;
1178}
1179
1180static void qla4xxx_conn_destroy(struct iscsi_cls_conn *cls_conn)
1181{
1182 struct iscsi_cls_session *cls_sess = iscsi_conn_to_session(cls_conn);
1183 struct iscsi_session *sess;
1184 struct scsi_qla_host *ha;
1185 struct ddb_entry *ddb_entry;
1186 int options;
1187
1188 DEBUG2(printk(KERN_INFO "Func: %s\n", __func__));
1189 sess = cls_sess->dd_data;
1190 ddb_entry = sess->dd_data;
1191 ha = ddb_entry->ha;
1192
1193 options = LOGOUT_OPTION_CLOSE_SESSION;
1194 if (qla4xxx_session_logout_ddb(ha, ddb_entry, options) == QLA_ERROR)
1195 ql4_printk(KERN_ERR, ha, "%s: Logout failed\n", __func__);
b3a271a9
MR
1196}
1197
1198static void qla4xxx_task_work(struct work_struct *wdata)
1199{
1200 struct ql4_task_data *task_data;
1201 struct scsi_qla_host *ha;
1202 struct passthru_status *sts;
1203 struct iscsi_task *task;
1204 struct iscsi_hdr *hdr;
1205 uint8_t *data;
1206 uint32_t data_len;
1207 struct iscsi_conn *conn;
1208 int hdr_len;
1209 itt_t itt;
1210
1211 task_data = container_of(wdata, struct ql4_task_data, task_work);
1212 ha = task_data->ha;
1213 task = task_data->task;
1214 sts = &task_data->sts;
1215 hdr_len = sizeof(struct iscsi_hdr);
1216
1217 DEBUG3(printk(KERN_INFO "Status returned\n"));
1218 DEBUG3(qla4xxx_dump_buffer(sts, 64));
1219 DEBUG3(printk(KERN_INFO "Response buffer"));
1220 DEBUG3(qla4xxx_dump_buffer(task_data->resp_buffer, 64));
1221
1222 conn = task->conn;
1223
1224 switch (sts->completionStatus) {
1225 case PASSTHRU_STATUS_COMPLETE:
1226 hdr = (struct iscsi_hdr *)task_data->resp_buffer;
1227 /* Assign back the itt in hdr, until we use the PREASSIGN_TAG */
1228 itt = sts->handle;
1229 hdr->itt = itt;
1230 data = task_data->resp_buffer + hdr_len;
1231 data_len = task_data->resp_len - hdr_len;
1232 iscsi_complete_pdu(conn, hdr, data, data_len);
1233 break;
1234 default:
1235 ql4_printk(KERN_ERR, ha, "Passthru failed status = 0x%x\n",
1236 sts->completionStatus);
1237 break;
1238 }
1239 return;
1240}
1241
1242static int qla4xxx_alloc_pdu(struct iscsi_task *task, uint8_t opcode)
1243{
1244 struct ql4_task_data *task_data;
1245 struct iscsi_session *sess;
1246 struct ddb_entry *ddb_entry;
1247 struct scsi_qla_host *ha;
1248 int hdr_len;
1249
1250 sess = task->conn->session;
1251 ddb_entry = sess->dd_data;
1252 ha = ddb_entry->ha;
1253 task_data = task->dd_data;
1254 memset(task_data, 0, sizeof(struct ql4_task_data));
1255
1256 if (task->sc) {
1257 ql4_printk(KERN_INFO, ha,
1258 "%s: SCSI Commands not implemented\n", __func__);
1259 return -EINVAL;
1260 }
1261
1262 hdr_len = sizeof(struct iscsi_hdr);
1263 task_data->ha = ha;
1264 task_data->task = task;
1265
1266 if (task->data_count) {
1267 task_data->data_dma = dma_map_single(&ha->pdev->dev, task->data,
1268 task->data_count,
1269 PCI_DMA_TODEVICE);
1270 }
1271
1272 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: MaxRecvLen %u, iscsi hrd %d\n",
1273 __func__, task->conn->max_recv_dlength, hdr_len));
1274
69ca216e 1275 task_data->resp_len = task->conn->max_recv_dlength + hdr_len;
b3a271a9
MR
1276 task_data->resp_buffer = dma_alloc_coherent(&ha->pdev->dev,
1277 task_data->resp_len,
1278 &task_data->resp_dma,
1279 GFP_ATOMIC);
1280 if (!task_data->resp_buffer)
1281 goto exit_alloc_pdu;
1282
69ca216e 1283 task_data->req_len = task->data_count + hdr_len;
b3a271a9 1284 task_data->req_buffer = dma_alloc_coherent(&ha->pdev->dev,
69ca216e 1285 task_data->req_len,
b3a271a9
MR
1286 &task_data->req_dma,
1287 GFP_ATOMIC);
1288 if (!task_data->req_buffer)
1289 goto exit_alloc_pdu;
1290
1291 task->hdr = task_data->req_buffer;
1292
1293 INIT_WORK(&task_data->task_work, qla4xxx_task_work);
1294
1295 return 0;
1296
1297exit_alloc_pdu:
1298 if (task_data->resp_buffer)
1299 dma_free_coherent(&ha->pdev->dev, task_data->resp_len,
1300 task_data->resp_buffer, task_data->resp_dma);
1301
1302 if (task_data->req_buffer)
69ca216e 1303 dma_free_coherent(&ha->pdev->dev, task_data->req_len,
b3a271a9
MR
1304 task_data->req_buffer, task_data->req_dma);
1305 return -ENOMEM;
1306}
1307
1308static void qla4xxx_task_cleanup(struct iscsi_task *task)
1309{
1310 struct ql4_task_data *task_data;
1311 struct iscsi_session *sess;
1312 struct ddb_entry *ddb_entry;
1313 struct scsi_qla_host *ha;
1314 int hdr_len;
1315
1316 hdr_len = sizeof(struct iscsi_hdr);
1317 sess = task->conn->session;
1318 ddb_entry = sess->dd_data;
1319 ha = ddb_entry->ha;
1320 task_data = task->dd_data;
1321
1322 if (task->data_count) {
1323 dma_unmap_single(&ha->pdev->dev, task_data->data_dma,
1324 task->data_count, PCI_DMA_TODEVICE);
1325 }
1326
1327 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: MaxRecvLen %u, iscsi hrd %d\n",
1328 __func__, task->conn->max_recv_dlength, hdr_len));
1329
1330 dma_free_coherent(&ha->pdev->dev, task_data->resp_len,
1331 task_data->resp_buffer, task_data->resp_dma);
69ca216e 1332 dma_free_coherent(&ha->pdev->dev, task_data->req_len,
b3a271a9
MR
1333 task_data->req_buffer, task_data->req_dma);
1334 return;
1335}
1336
1337static int qla4xxx_task_xmit(struct iscsi_task *task)
1338{
1339 struct scsi_cmnd *sc = task->sc;
1340 struct iscsi_session *sess = task->conn->session;
1341 struct ddb_entry *ddb_entry = sess->dd_data;
1342 struct scsi_qla_host *ha = ddb_entry->ha;
1343
1344 if (!sc)
1345 return qla4xxx_send_passthru0(task);
1346
1347 ql4_printk(KERN_INFO, ha, "%s: scsi cmd xmit not implemented\n",
1348 __func__);
1349 return -ENOSYS;
1350}
1351
1352void qla4xxx_update_session_conn_param(struct scsi_qla_host *ha,
1353 struct ddb_entry *ddb_entry)
1354{
1355 struct iscsi_cls_session *cls_sess;
1356 struct iscsi_cls_conn *cls_conn;
1357 struct iscsi_session *sess;
1358 struct iscsi_conn *conn;
1359 uint32_t ddb_state;
1360 dma_addr_t fw_ddb_entry_dma;
1361 struct dev_db_entry *fw_ddb_entry;
1362
1363 fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
1364 &fw_ddb_entry_dma, GFP_KERNEL);
1365 if (!fw_ddb_entry) {
1366 ql4_printk(KERN_ERR, ha,
1367 "%s: Unable to allocate dma buffer\n", __func__);
1368 return;
1369 }
1370
1371 if (qla4xxx_get_fwddb_entry(ha, ddb_entry->fw_ddb_index, fw_ddb_entry,
1372 fw_ddb_entry_dma, NULL, NULL, &ddb_state,
1373 NULL, NULL, NULL) == QLA_ERROR) {
1374 DEBUG2(ql4_printk(KERN_ERR, ha, "scsi%ld: %s: failed "
1375 "get_ddb_entry for fw_ddb_index %d\n",
1376 ha->host_no, __func__,
1377 ddb_entry->fw_ddb_index));
1378 return;
1379 }
1380
1381 cls_sess = ddb_entry->sess;
1382 sess = cls_sess->dd_data;
1383
1384 cls_conn = ddb_entry->conn;
1385 conn = cls_conn->dd_data;
1386
1387 /* Update params */
1388 conn->max_recv_dlength = BYTE_UNITS *
1389 le16_to_cpu(fw_ddb_entry->iscsi_max_rcv_data_seg_len);
1390
1391 conn->max_xmit_dlength = BYTE_UNITS *
1392 le16_to_cpu(fw_ddb_entry->iscsi_max_snd_data_seg_len);
1393
1394 sess->initial_r2t_en =
1395 (BIT_10 & le16_to_cpu(fw_ddb_entry->iscsi_options));
1396
1397 sess->max_r2t = le16_to_cpu(fw_ddb_entry->iscsi_max_outsnd_r2t);
1398
1399 sess->imm_data_en = (BIT_11 & le16_to_cpu(fw_ddb_entry->iscsi_options));
1400
1401 sess->first_burst = BYTE_UNITS *
1402 le16_to_cpu(fw_ddb_entry->iscsi_first_burst_len);
1403
1404 sess->max_burst = BYTE_UNITS *
1405 le16_to_cpu(fw_ddb_entry->iscsi_max_burst_len);
1406
1407 sess->time2wait = le16_to_cpu(fw_ddb_entry->iscsi_def_time2wait);
1408
1409 sess->time2retain = le16_to_cpu(fw_ddb_entry->iscsi_def_time2retain);
1410
1411 sess->tpgt = le32_to_cpu(fw_ddb_entry->tgt_portal_grp);
1412
1413 memcpy(sess->initiatorname, ha->name_string,
1414 min(sizeof(ha->name_string), sizeof(sess->initiatorname)));
1415}
1416
afaf5a2d
DS
1417/*
1418 * Timer routines
1419 */
1420
1421static void qla4xxx_start_timer(struct scsi_qla_host *ha, void *func,
1422 unsigned long interval)
1423{
1424 DEBUG(printk("scsi: %s: Starting timer thread for adapter %d\n",
1425 __func__, ha->host->host_no));
1426 init_timer(&ha->timer);
1427 ha->timer.expires = jiffies + interval * HZ;
1428 ha->timer.data = (unsigned long)ha;
1429 ha->timer.function = (void (*)(unsigned long))func;
1430 add_timer(&ha->timer);
1431 ha->timer_active = 1;
1432}
1433
1434static void qla4xxx_stop_timer(struct scsi_qla_host *ha)
1435{
1436 del_timer_sync(&ha->timer);
1437 ha->timer_active = 0;
1438}
1439
1440/***
b3a271a9
MR
1441 * qla4xxx_mark_device_missing - blocks the session
1442 * @cls_session: Pointer to the session to be blocked
afaf5a2d
DS
1443 * @ddb_entry: Pointer to device database entry
1444 *
f4f5df23 1445 * This routine marks a device missing and close connection.
afaf5a2d 1446 **/
b3a271a9 1447void qla4xxx_mark_device_missing(struct iscsi_cls_session *cls_session)
afaf5a2d 1448{
b3a271a9 1449 iscsi_block_session(cls_session);
afaf5a2d
DS
1450}
1451
f4f5df23
VC
1452/**
1453 * qla4xxx_mark_all_devices_missing - mark all devices as missing.
1454 * @ha: Pointer to host adapter structure.
1455 *
1456 * This routine marks a device missing and resets the relogin retry count.
1457 **/
1458void qla4xxx_mark_all_devices_missing(struct scsi_qla_host *ha)
1459{
b3a271a9 1460 iscsi_host_for_each_session(ha->host, qla4xxx_mark_device_missing);
f4f5df23
VC
1461}
1462
afaf5a2d
DS
1463static struct srb* qla4xxx_get_new_srb(struct scsi_qla_host *ha,
1464 struct ddb_entry *ddb_entry,
8f0722ca 1465 struct scsi_cmnd *cmd)
afaf5a2d
DS
1466{
1467 struct srb *srb;
1468
1469 srb = mempool_alloc(ha->srb_mempool, GFP_ATOMIC);
1470 if (!srb)
1471 return srb;
1472
09a0f719 1473 kref_init(&srb->srb_ref);
afaf5a2d
DS
1474 srb->ha = ha;
1475 srb->ddb = ddb_entry;
1476 srb->cmd = cmd;
1477 srb->flags = 0;
5369887a 1478 CMD_SP(cmd) = (void *)srb;
afaf5a2d
DS
1479
1480 return srb;
1481}
1482
1483static void qla4xxx_srb_free_dma(struct scsi_qla_host *ha, struct srb *srb)
1484{
1485 struct scsi_cmnd *cmd = srb->cmd;
1486
1487 if (srb->flags & SRB_DMA_VALID) {
5f7186c8 1488 scsi_dma_unmap(cmd);
afaf5a2d
DS
1489 srb->flags &= ~SRB_DMA_VALID;
1490 }
5369887a 1491 CMD_SP(cmd) = NULL;
afaf5a2d
DS
1492}
1493
09a0f719 1494void qla4xxx_srb_compl(struct kref *ref)
afaf5a2d 1495{
09a0f719 1496 struct srb *srb = container_of(ref, struct srb, srb_ref);
afaf5a2d 1497 struct scsi_cmnd *cmd = srb->cmd;
09a0f719 1498 struct scsi_qla_host *ha = srb->ha;
afaf5a2d
DS
1499
1500 qla4xxx_srb_free_dma(ha, srb);
1501
1502 mempool_free(srb, ha->srb_mempool);
1503
1504 cmd->scsi_done(cmd);
1505}
1506
1507/**
1508 * qla4xxx_queuecommand - scsi layer issues scsi command to driver.
8f0722ca 1509 * @host: scsi host
afaf5a2d 1510 * @cmd: Pointer to Linux's SCSI command structure
afaf5a2d
DS
1511 *
1512 * Remarks:
1513 * This routine is invoked by Linux to send a SCSI command to the driver.
1514 * The mid-level driver tries to ensure that queuecommand never gets
1515 * invoked concurrently with itself or the interrupt handler (although
1516 * the interrupt handler may call this routine as part of request-
1517 * completion handling). Unfortunely, it sometimes calls the scheduler
1518 * in interrupt context which is a big NO! NO!.
1519 **/
8f0722ca 1520static int qla4xxx_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
afaf5a2d 1521{
8f0722ca 1522 struct scsi_qla_host *ha = to_qla_host(host);
afaf5a2d 1523 struct ddb_entry *ddb_entry = cmd->device->hostdata;
7fb1921b 1524 struct iscsi_cls_session *sess = ddb_entry->sess;
afaf5a2d
DS
1525 struct srb *srb;
1526 int rval;
1527
2232be0d
LC
1528 if (test_bit(AF_EEH_BUSY, &ha->flags)) {
1529 if (test_bit(AF_PCI_CHANNEL_IO_PERM_FAILURE, &ha->flags))
1530 cmd->result = DID_NO_CONNECT << 16;
1531 else
1532 cmd->result = DID_REQUEUE << 16;
1533 goto qc_fail_command;
1534 }
1535
7fb1921b
MC
1536 if (!sess) {
1537 cmd->result = DID_IMM_RETRY << 16;
1538 goto qc_fail_command;
1539 }
1540
1541 rval = iscsi_session_chkready(sess);
1542 if (rval) {
1543 cmd->result = rval;
1544 goto qc_fail_command;
1545 }
1546
f4f5df23
VC
1547 if (test_bit(DPC_RESET_HA_INTR, &ha->dpc_flags) ||
1548 test_bit(DPC_RESET_ACTIVE, &ha->dpc_flags) ||
1549 test_bit(DPC_RESET_HA, &ha->dpc_flags) ||
1550 test_bit(DPC_HA_UNRECOVERABLE, &ha->dpc_flags) ||
1551 test_bit(DPC_HA_NEED_QUIESCENT, &ha->dpc_flags) ||
1552 !test_bit(AF_ONLINE, &ha->flags) ||
b3a271a9 1553 !test_bit(AF_LINK_UP, &ha->flags) ||
f4f5df23 1554 test_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags))
477ffb9d
DS
1555 goto qc_host_busy;
1556
8f0722ca 1557 srb = qla4xxx_get_new_srb(ha, ddb_entry, cmd);
afaf5a2d 1558 if (!srb)
8f0722ca 1559 goto qc_host_busy;
afaf5a2d
DS
1560
1561 rval = qla4xxx_send_command_to_isp(ha, srb);
1562 if (rval != QLA_SUCCESS)
1563 goto qc_host_busy_free_sp;
1564
afaf5a2d
DS
1565 return 0;
1566
1567qc_host_busy_free_sp:
1568 qla4xxx_srb_free_dma(ha, srb);
1569 mempool_free(srb, ha->srb_mempool);
1570
afaf5a2d
DS
1571qc_host_busy:
1572 return SCSI_MLQUEUE_HOST_BUSY;
1573
1574qc_fail_command:
8f0722ca 1575 cmd->scsi_done(cmd);
afaf5a2d
DS
1576
1577 return 0;
1578}
1579
1580/**
1581 * qla4xxx_mem_free - frees memory allocated to adapter
1582 * @ha: Pointer to host adapter structure.
1583 *
1584 * Frees memory previously allocated by qla4xxx_mem_alloc
1585 **/
1586static void qla4xxx_mem_free(struct scsi_qla_host *ha)
1587{
1588 if (ha->queues)
1589 dma_free_coherent(&ha->pdev->dev, ha->queues_len, ha->queues,
1590 ha->queues_dma);
1591
1592 ha->queues_len = 0;
1593 ha->queues = NULL;
1594 ha->queues_dma = 0;
1595 ha->request_ring = NULL;
1596 ha->request_dma = 0;
1597 ha->response_ring = NULL;
1598 ha->response_dma = 0;
1599 ha->shadow_regs = NULL;
1600 ha->shadow_regs_dma = 0;
1601
1602 /* Free srb pool. */
1603 if (ha->srb_mempool)
1604 mempool_destroy(ha->srb_mempool);
1605
1606 ha->srb_mempool = NULL;
1607
b3a271a9
MR
1608 if (ha->chap_dma_pool)
1609 dma_pool_destroy(ha->chap_dma_pool);
1610
4549415a
LC
1611 if (ha->chap_list)
1612 vfree(ha->chap_list);
1613 ha->chap_list = NULL;
1614
afaf5a2d 1615 /* release io space registers */
f4f5df23
VC
1616 if (is_qla8022(ha)) {
1617 if (ha->nx_pcibase)
1618 iounmap(
1619 (struct device_reg_82xx __iomem *)ha->nx_pcibase);
f4f5df23 1620 } else if (ha->reg)
afaf5a2d
DS
1621 iounmap(ha->reg);
1622 pci_release_regions(ha->pdev);
1623}
1624
1625/**
1626 * qla4xxx_mem_alloc - allocates memory for use by adapter.
1627 * @ha: Pointer to host adapter structure
1628 *
1629 * Allocates DMA memory for request and response queues. Also allocates memory
1630 * for srbs.
1631 **/
1632static int qla4xxx_mem_alloc(struct scsi_qla_host *ha)
1633{
1634 unsigned long align;
1635
1636 /* Allocate contiguous block of DMA memory for queues. */
1637 ha->queues_len = ((REQUEST_QUEUE_DEPTH * QUEUE_SIZE) +
1638 (RESPONSE_QUEUE_DEPTH * QUEUE_SIZE) +
1639 sizeof(struct shadow_regs) +
1640 MEM_ALIGN_VALUE +
1641 (PAGE_SIZE - 1)) & ~(PAGE_SIZE - 1);
1642 ha->queues = dma_alloc_coherent(&ha->pdev->dev, ha->queues_len,
1643 &ha->queues_dma, GFP_KERNEL);
1644 if (ha->queues == NULL) {
c2660df3
VC
1645 ql4_printk(KERN_WARNING, ha,
1646 "Memory Allocation failed - queues.\n");
afaf5a2d
DS
1647
1648 goto mem_alloc_error_exit;
1649 }
1650 memset(ha->queues, 0, ha->queues_len);
1651
1652 /*
1653 * As per RISC alignment requirements -- the bus-address must be a
1654 * multiple of the request-ring size (in bytes).
1655 */
1656 align = 0;
1657 if ((unsigned long)ha->queues_dma & (MEM_ALIGN_VALUE - 1))
1658 align = MEM_ALIGN_VALUE - ((unsigned long)ha->queues_dma &
1659 (MEM_ALIGN_VALUE - 1));
1660
1661 /* Update request and response queue pointers. */
1662 ha->request_dma = ha->queues_dma + align;
1663 ha->request_ring = (struct queue_entry *) (ha->queues + align);
1664 ha->response_dma = ha->queues_dma + align +
1665 (REQUEST_QUEUE_DEPTH * QUEUE_SIZE);
1666 ha->response_ring = (struct queue_entry *) (ha->queues + align +
1667 (REQUEST_QUEUE_DEPTH *
1668 QUEUE_SIZE));
1669 ha->shadow_regs_dma = ha->queues_dma + align +
1670 (REQUEST_QUEUE_DEPTH * QUEUE_SIZE) +
1671 (RESPONSE_QUEUE_DEPTH * QUEUE_SIZE);
1672 ha->shadow_regs = (struct shadow_regs *) (ha->queues + align +
1673 (REQUEST_QUEUE_DEPTH *
1674 QUEUE_SIZE) +
1675 (RESPONSE_QUEUE_DEPTH *
1676 QUEUE_SIZE));
1677
1678 /* Allocate memory for srb pool. */
1679 ha->srb_mempool = mempool_create(SRB_MIN_REQ, mempool_alloc_slab,
1680 mempool_free_slab, srb_cachep);
1681 if (ha->srb_mempool == NULL) {
c2660df3
VC
1682 ql4_printk(KERN_WARNING, ha,
1683 "Memory Allocation failed - SRB Pool.\n");
afaf5a2d
DS
1684
1685 goto mem_alloc_error_exit;
1686 }
1687
b3a271a9
MR
1688 ha->chap_dma_pool = dma_pool_create("ql4_chap", &ha->pdev->dev,
1689 CHAP_DMA_BLOCK_SIZE, 8, 0);
1690
1691 if (ha->chap_dma_pool == NULL) {
1692 ql4_printk(KERN_WARNING, ha,
1693 "%s: chap_dma_pool allocation failed..\n", __func__);
1694 goto mem_alloc_error_exit;
1695 }
1696
afaf5a2d
DS
1697 return QLA_SUCCESS;
1698
1699mem_alloc_error_exit:
1700 qla4xxx_mem_free(ha);
1701 return QLA_ERROR;
1702}
1703
f4f5df23
VC
1704/**
1705 * qla4_8xxx_check_fw_alive - Check firmware health
1706 * @ha: Pointer to host adapter structure.
1707 *
1708 * Context: Interrupt
1709 **/
1710static void qla4_8xxx_check_fw_alive(struct scsi_qla_host *ha)
1711{
1712 uint32_t fw_heartbeat_counter, halt_status;
1713
1714 fw_heartbeat_counter = qla4_8xxx_rd_32(ha, QLA82XX_PEG_ALIVE_COUNTER);
2232be0d
LC
1715 /* If PEG_ALIVE_COUNTER is 0xffffffff, AER/EEH is in progress, ignore */
1716 if (fw_heartbeat_counter == 0xffffffff) {
1717 DEBUG2(printk(KERN_WARNING "scsi%ld: %s: Device in frozen "
1718 "state, QLA82XX_PEG_ALIVE_COUNTER is 0xffffffff\n",
1719 ha->host_no, __func__));
1720 return;
1721 }
f4f5df23
VC
1722
1723 if (ha->fw_heartbeat_counter == fw_heartbeat_counter) {
1724 ha->seconds_since_last_heartbeat++;
1725 /* FW not alive after 2 seconds */
1726 if (ha->seconds_since_last_heartbeat == 2) {
1727 ha->seconds_since_last_heartbeat = 0;
1728 halt_status = qla4_8xxx_rd_32(ha,
68d92ebf
VC
1729 QLA82XX_PEG_HALT_STATUS1);
1730
1731 ql4_printk(KERN_INFO, ha,
1732 "scsi(%ld): %s, Dumping hw/fw registers:\n "
1733 " PEG_HALT_STATUS1: 0x%x, PEG_HALT_STATUS2:"
1734 " 0x%x,\n PEG_NET_0_PC: 0x%x, PEG_NET_1_PC:"
1735 " 0x%x,\n PEG_NET_2_PC: 0x%x, PEG_NET_3_PC:"
1736 " 0x%x,\n PEG_NET_4_PC: 0x%x\n",
1737 ha->host_no, __func__, halt_status,
1738 qla4_8xxx_rd_32(ha,
1739 QLA82XX_PEG_HALT_STATUS2),
1740 qla4_8xxx_rd_32(ha, QLA82XX_CRB_PEG_NET_0 +
1741 0x3c),
1742 qla4_8xxx_rd_32(ha, QLA82XX_CRB_PEG_NET_1 +
1743 0x3c),
1744 qla4_8xxx_rd_32(ha, QLA82XX_CRB_PEG_NET_2 +
1745 0x3c),
1746 qla4_8xxx_rd_32(ha, QLA82XX_CRB_PEG_NET_3 +
1747 0x3c),
1748 qla4_8xxx_rd_32(ha, QLA82XX_CRB_PEG_NET_4 +
1749 0x3c));
21033639 1750
f4f5df23
VC
1751 /* Since we cannot change dev_state in interrupt
1752 * context, set appropriate DPC flag then wakeup
1753 * DPC */
1754 if (halt_status & HALT_STATUS_UNRECOVERABLE)
1755 set_bit(DPC_HA_UNRECOVERABLE, &ha->dpc_flags);
1756 else {
1757 printk("scsi%ld: %s: detect abort needed!\n",
1758 ha->host_no, __func__);
1759 set_bit(DPC_RESET_HA, &ha->dpc_flags);
1760 }
1761 qla4xxx_wake_dpc(ha);
21033639 1762 qla4xxx_mailbox_premature_completion(ha);
f4f5df23 1763 }
99457d75
LC
1764 } else
1765 ha->seconds_since_last_heartbeat = 0;
1766
f4f5df23
VC
1767 ha->fw_heartbeat_counter = fw_heartbeat_counter;
1768}
1769
1770/**
1771 * qla4_8xxx_watchdog - Poll dev state
1772 * @ha: Pointer to host adapter structure.
1773 *
1774 * Context: Interrupt
1775 **/
1776void qla4_8xxx_watchdog(struct scsi_qla_host *ha)
1777{
1778 uint32_t dev_state;
1779
1780 dev_state = qla4_8xxx_rd_32(ha, QLA82XX_CRB_DEV_STATE);
1781
1782 /* don't poll if reset is going on */
d56a1f7b
LC
1783 if (!(test_bit(DPC_RESET_ACTIVE, &ha->dpc_flags) ||
1784 test_bit(DPC_RESET_HA, &ha->dpc_flags) ||
977f46a4 1785 test_bit(DPC_RETRY_RESET_HA, &ha->dpc_flags))) {
f4f5df23
VC
1786 if (dev_state == QLA82XX_DEV_NEED_RESET &&
1787 !test_bit(DPC_RESET_HA, &ha->dpc_flags)) {
3930b8c1
VC
1788 if (!ql4xdontresethba) {
1789 ql4_printk(KERN_INFO, ha, "%s: HW State: "
1790 "NEED RESET!\n", __func__);
1791 set_bit(DPC_RESET_HA, &ha->dpc_flags);
1792 qla4xxx_wake_dpc(ha);
1793 qla4xxx_mailbox_premature_completion(ha);
1794 }
f4f5df23
VC
1795 } else if (dev_state == QLA82XX_DEV_NEED_QUIESCENT &&
1796 !test_bit(DPC_HA_NEED_QUIESCENT, &ha->dpc_flags)) {
3930b8c1
VC
1797 ql4_printk(KERN_INFO, ha, "%s: HW State: NEED QUIES!\n",
1798 __func__);
f4f5df23
VC
1799 set_bit(DPC_HA_NEED_QUIESCENT, &ha->dpc_flags);
1800 qla4xxx_wake_dpc(ha);
1801 } else {
1802 /* Check firmware health */
1803 qla4_8xxx_check_fw_alive(ha);
1804 }
1805 }
1806}
1807
afaf5a2d
DS
1808/**
1809 * qla4xxx_timer - checks every second for work to do.
1810 * @ha: Pointer to host adapter structure.
1811 **/
1812static void qla4xxx_timer(struct scsi_qla_host *ha)
1813{
afaf5a2d 1814 int start_dpc = 0;
2232be0d
LC
1815 uint16_t w;
1816
1817 /* If we are in the middle of AER/EEH processing
1818 * skip any processing and reschedule the timer
1819 */
1820 if (test_bit(AF_EEH_BUSY, &ha->flags)) {
1821 mod_timer(&ha->timer, jiffies + HZ);
1822 return;
1823 }
1824
1825 /* Hardware read to trigger an EEH error during mailbox waits. */
1826 if (!pci_channel_offline(ha->pdev))
1827 pci_read_config_word(ha->pdev, PCI_VENDOR_ID, &w);
afaf5a2d 1828
f4f5df23
VC
1829 if (is_qla8022(ha)) {
1830 qla4_8xxx_watchdog(ha);
1831 }
1832
f4f5df23
VC
1833 if (!is_qla8022(ha)) {
1834 /* Check for heartbeat interval. */
1835 if (ha->firmware_options & FWOPT_HEARTBEAT_ENABLE &&
1836 ha->heartbeat_interval != 0) {
1837 ha->seconds_since_last_heartbeat++;
1838 if (ha->seconds_since_last_heartbeat >
1839 ha->heartbeat_interval + 2)
1840 set_bit(DPC_RESET_HA, &ha->dpc_flags);
1841 }
afaf5a2d
DS
1842 }
1843
afaf5a2d 1844 /* Wakeup the dpc routine for this adapter, if needed. */
1b46807e 1845 if (start_dpc ||
afaf5a2d
DS
1846 test_bit(DPC_RESET_HA, &ha->dpc_flags) ||
1847 test_bit(DPC_RETRY_RESET_HA, &ha->dpc_flags) ||
1848 test_bit(DPC_RELOGIN_DEVICE, &ha->dpc_flags) ||
f4f5df23 1849 test_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags) ||
afaf5a2d
DS
1850 test_bit(DPC_RESET_HA_INTR, &ha->dpc_flags) ||
1851 test_bit(DPC_GET_DHCP_IP_ADDR, &ha->dpc_flags) ||
065aa1b4 1852 test_bit(DPC_LINK_CHANGED, &ha->dpc_flags) ||
f4f5df23
VC
1853 test_bit(DPC_HA_UNRECOVERABLE, &ha->dpc_flags) ||
1854 test_bit(DPC_HA_NEED_QUIESCENT, &ha->dpc_flags) ||
1b46807e 1855 test_bit(DPC_AEN, &ha->dpc_flags)) {
afaf5a2d
DS
1856 DEBUG2(printk("scsi%ld: %s: scheduling dpc routine"
1857 " - dpc flags = 0x%lx\n",
1858 ha->host_no, __func__, ha->dpc_flags));
f4f5df23 1859 qla4xxx_wake_dpc(ha);
afaf5a2d
DS
1860 }
1861
1862 /* Reschedule timer thread to call us back in one second */
1863 mod_timer(&ha->timer, jiffies + HZ);
1864
1865 DEBUG2(ha->seconds_since_last_intr++);
1866}
1867
1868/**
1869 * qla4xxx_cmd_wait - waits for all outstanding commands to complete
1870 * @ha: Pointer to host adapter structure.
1871 *
1872 * This routine stalls the driver until all outstanding commands are returned.
1873 * Caller must release the Hardware Lock prior to calling this routine.
1874 **/
1875static int qla4xxx_cmd_wait(struct scsi_qla_host *ha)
1876{
1877 uint32_t index = 0;
afaf5a2d
DS
1878 unsigned long flags;
1879 struct scsi_cmnd *cmd;
afaf5a2d 1880
f4f5df23
VC
1881 unsigned long wtime = jiffies + (WAIT_CMD_TOV * HZ);
1882
1883 DEBUG2(ql4_printk(KERN_INFO, ha, "Wait up to %d seconds for cmds to "
1884 "complete\n", WAIT_CMD_TOV));
1885
1886 while (!time_after_eq(jiffies, wtime)) {
afaf5a2d
DS
1887 spin_lock_irqsave(&ha->hardware_lock, flags);
1888 /* Find a command that hasn't completed. */
1889 for (index = 0; index < ha->host->can_queue; index++) {
1890 cmd = scsi_host_find_tag(ha->host, index);
a1e0063d
MC
1891 /*
1892 * We cannot just check if the index is valid,
1893 * becase if we are run from the scsi eh, then
1894 * the scsi/block layer is going to prevent
1895 * the tag from being released.
1896 */
1897 if (cmd != NULL && CMD_SP(cmd))
afaf5a2d
DS
1898 break;
1899 }
1900 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1901
1902 /* If No Commands are pending, wait is complete */
f4f5df23
VC
1903 if (index == ha->host->can_queue)
1904 return QLA_SUCCESS;
afaf5a2d 1905
f4f5df23
VC
1906 msleep(1000);
1907 }
1908 /* If we timed out on waiting for commands to come back
1909 * return ERROR. */
1910 return QLA_ERROR;
afaf5a2d
DS
1911}
1912
f4f5df23 1913int qla4xxx_hw_reset(struct scsi_qla_host *ha)
afaf5a2d 1914{
afaf5a2d 1915 uint32_t ctrl_status;
477ffb9d
DS
1916 unsigned long flags = 0;
1917
1918 DEBUG2(printk(KERN_ERR "scsi%ld: %s\n", ha->host_no, __func__));
afaf5a2d 1919
f4f5df23
VC
1920 if (ql4xxx_lock_drvr_wait(ha) != QLA_SUCCESS)
1921 return QLA_ERROR;
1922
afaf5a2d
DS
1923 spin_lock_irqsave(&ha->hardware_lock, flags);
1924
1925 /*
1926 * If the SCSI Reset Interrupt bit is set, clear it.
1927 * Otherwise, the Soft Reset won't work.
1928 */
1929 ctrl_status = readw(&ha->reg->ctrl_status);
1930 if ((ctrl_status & CSR_SCSI_RESET_INTR) != 0)
1931 writel(set_rmask(CSR_SCSI_RESET_INTR), &ha->reg->ctrl_status);
1932
1933 /* Issue Soft Reset */
1934 writel(set_rmask(CSR_SOFT_RESET), &ha->reg->ctrl_status);
1935 readl(&ha->reg->ctrl_status);
1936
1937 spin_unlock_irqrestore(&ha->hardware_lock, flags);
f4f5df23 1938 return QLA_SUCCESS;
477ffb9d
DS
1939}
1940
1941/**
1942 * qla4xxx_soft_reset - performs soft reset.
1943 * @ha: Pointer to host adapter structure.
1944 **/
1945int qla4xxx_soft_reset(struct scsi_qla_host *ha)
1946{
1947 uint32_t max_wait_time;
1948 unsigned long flags = 0;
f931c534 1949 int status;
477ffb9d
DS
1950 uint32_t ctrl_status;
1951
f931c534
VC
1952 status = qla4xxx_hw_reset(ha);
1953 if (status != QLA_SUCCESS)
1954 return status;
afaf5a2d 1955
f931c534 1956 status = QLA_ERROR;
afaf5a2d
DS
1957 /* Wait until the Network Reset Intr bit is cleared */
1958 max_wait_time = RESET_INTR_TOV;
1959 do {
1960 spin_lock_irqsave(&ha->hardware_lock, flags);
1961 ctrl_status = readw(&ha->reg->ctrl_status);
1962 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1963
1964 if ((ctrl_status & CSR_NET_RESET_INTR) == 0)
1965 break;
1966
1967 msleep(1000);
1968 } while ((--max_wait_time));
1969
1970 if ((ctrl_status & CSR_NET_RESET_INTR) != 0) {
1971 DEBUG2(printk(KERN_WARNING
1972 "scsi%ld: Network Reset Intr not cleared by "
1973 "Network function, clearing it now!\n",
1974 ha->host_no));
1975 spin_lock_irqsave(&ha->hardware_lock, flags);
1976 writel(set_rmask(CSR_NET_RESET_INTR), &ha->reg->ctrl_status);
1977 readl(&ha->reg->ctrl_status);
1978 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1979 }
1980
1981 /* Wait until the firmware tells us the Soft Reset is done */
1982 max_wait_time = SOFT_RESET_TOV;
1983 do {
1984 spin_lock_irqsave(&ha->hardware_lock, flags);
1985 ctrl_status = readw(&ha->reg->ctrl_status);
1986 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1987
1988 if ((ctrl_status & CSR_SOFT_RESET) == 0) {
1989 status = QLA_SUCCESS;
1990 break;
1991 }
1992
1993 msleep(1000);
1994 } while ((--max_wait_time));
1995
1996 /*
1997 * Also, make sure that the SCSI Reset Interrupt bit has been cleared
1998 * after the soft reset has taken place.
1999 */
2000 spin_lock_irqsave(&ha->hardware_lock, flags);
2001 ctrl_status = readw(&ha->reg->ctrl_status);
2002 if ((ctrl_status & CSR_SCSI_RESET_INTR) != 0) {
2003 writel(set_rmask(CSR_SCSI_RESET_INTR), &ha->reg->ctrl_status);
2004 readl(&ha->reg->ctrl_status);
2005 }
2006 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2007
2008 /* If soft reset fails then most probably the bios on other
2009 * function is also enabled.
2010 * Since the initialization is sequential the other fn
2011 * wont be able to acknowledge the soft reset.
2012 * Issue a force soft reset to workaround this scenario.
2013 */
2014 if (max_wait_time == 0) {
2015 /* Issue Force Soft Reset */
2016 spin_lock_irqsave(&ha->hardware_lock, flags);
2017 writel(set_rmask(CSR_FORCE_SOFT_RESET), &ha->reg->ctrl_status);
2018 readl(&ha->reg->ctrl_status);
2019 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2020 /* Wait until the firmware tells us the Soft Reset is done */
2021 max_wait_time = SOFT_RESET_TOV;
2022 do {
2023 spin_lock_irqsave(&ha->hardware_lock, flags);
2024 ctrl_status = readw(&ha->reg->ctrl_status);
2025 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2026
2027 if ((ctrl_status & CSR_FORCE_SOFT_RESET) == 0) {
2028 status = QLA_SUCCESS;
2029 break;
2030 }
2031
2032 msleep(1000);
2033 } while ((--max_wait_time));
2034 }
2035
2036 return status;
2037}
2038
afaf5a2d 2039/**
f4f5df23 2040 * qla4xxx_abort_active_cmds - returns all outstanding i/o requests to O.S.
afaf5a2d 2041 * @ha: Pointer to host adapter structure.
f4f5df23 2042 * @res: returned scsi status
afaf5a2d
DS
2043 *
2044 * This routine is called just prior to a HARD RESET to return all
2045 * outstanding commands back to the Operating System.
2046 * Caller should make sure that the following locks are released
2047 * before this calling routine: Hardware lock, and io_request_lock.
2048 **/
f4f5df23 2049static void qla4xxx_abort_active_cmds(struct scsi_qla_host *ha, int res)
afaf5a2d
DS
2050{
2051 struct srb *srb;
2052 int i;
2053 unsigned long flags;
2054
2055 spin_lock_irqsave(&ha->hardware_lock, flags);
2056 for (i = 0; i < ha->host->can_queue; i++) {
2057 srb = qla4xxx_del_from_active_array(ha, i);
2058 if (srb != NULL) {
f4f5df23 2059 srb->cmd->result = res;
09a0f719 2060 kref_put(&srb->srb_ref, qla4xxx_srb_compl);
afaf5a2d
DS
2061 }
2062 }
2063 spin_unlock_irqrestore(&ha->hardware_lock, flags);
afaf5a2d
DS
2064}
2065
f4f5df23
VC
2066void qla4xxx_dead_adapter_cleanup(struct scsi_qla_host *ha)
2067{
2068 clear_bit(AF_ONLINE, &ha->flags);
2069
2070 /* Disable the board */
2071 ql4_printk(KERN_INFO, ha, "Disabling the board\n");
f4f5df23
VC
2072
2073 qla4xxx_abort_active_cmds(ha, DID_NO_CONNECT << 16);
2074 qla4xxx_mark_all_devices_missing(ha);
2075 clear_bit(AF_INIT_DONE, &ha->flags);
2076}
2077
b3a271a9
MR
2078static void qla4xxx_fail_session(struct iscsi_cls_session *cls_session)
2079{
2080 struct iscsi_session *sess;
2081 struct ddb_entry *ddb_entry;
2082
2083 sess = cls_session->dd_data;
2084 ddb_entry = sess->dd_data;
2085 ddb_entry->fw_ddb_device_state = DDB_DS_SESSION_FAILED;
2086 iscsi_session_failure(cls_session->dd_data, ISCSI_ERR_CONN_FAILED);
2087}
2088
afaf5a2d
DS
2089/**
2090 * qla4xxx_recover_adapter - recovers adapter after a fatal error
2091 * @ha: Pointer to host adapter structure.
afaf5a2d 2092 **/
f4f5df23 2093static int qla4xxx_recover_adapter(struct scsi_qla_host *ha)
afaf5a2d 2094{
f4f5df23
VC
2095 int status = QLA_ERROR;
2096 uint8_t reset_chip = 0;
afaf5a2d
DS
2097
2098 /* Stall incoming I/O until we are done */
f4f5df23 2099 scsi_block_requests(ha->host);
afaf5a2d 2100 clear_bit(AF_ONLINE, &ha->flags);
b3a271a9 2101 clear_bit(AF_LINK_UP, &ha->flags);
50a29aec 2102
f4f5df23 2103 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: adapter OFFLINE\n", __func__));
afaf5a2d 2104
f4f5df23 2105 set_bit(DPC_RESET_ACTIVE, &ha->dpc_flags);
afaf5a2d 2106
b3a271a9
MR
2107 iscsi_host_for_each_session(ha->host, qla4xxx_fail_session);
2108
f4f5df23
VC
2109 if (test_bit(DPC_RESET_HA, &ha->dpc_flags))
2110 reset_chip = 1;
afaf5a2d 2111
f4f5df23
VC
2112 /* For the DPC_RESET_HA_INTR case (ISP-4xxx specific)
2113 * do not reset adapter, jump to initialize_adapter */
2114 if (test_bit(DPC_RESET_HA_INTR, &ha->dpc_flags)) {
2115 status = QLA_SUCCESS;
2116 goto recover_ha_init_adapter;
2117 }
afaf5a2d 2118
f4f5df23
VC
2119 /* For the ISP-82xx adapter, issue a stop_firmware if invoked
2120 * from eh_host_reset or ioctl module */
2121 if (is_qla8022(ha) && !reset_chip &&
2122 test_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags)) {
2123
2124 DEBUG2(ql4_printk(KERN_INFO, ha,
2125 "scsi%ld: %s - Performing stop_firmware...\n",
2126 ha->host_no, __func__));
2127 status = ha->isp_ops->reset_firmware(ha);
2128 if (status == QLA_SUCCESS) {
2bd1e2be
NJ
2129 if (!test_bit(AF_FW_RECOVERY, &ha->flags))
2130 qla4xxx_cmd_wait(ha);
f4f5df23
VC
2131 ha->isp_ops->disable_intrs(ha);
2132 qla4xxx_process_aen(ha, FLUSH_DDB_CHANGED_AENS);
2133 qla4xxx_abort_active_cmds(ha, DID_RESET << 16);
2134 } else {
2135 /* If the stop_firmware fails then
2136 * reset the entire chip */
2137 reset_chip = 1;
2138 clear_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags);
2139 set_bit(DPC_RESET_HA, &ha->dpc_flags);
2140 }
2141 }
dca05c4c 2142
f4f5df23
VC
2143 /* Issue full chip reset if recovering from a catastrophic error,
2144 * or if stop_firmware fails for ISP-82xx.
2145 * This is the default case for ISP-4xxx */
2146 if (!is_qla8022(ha) || reset_chip) {
2bd1e2be
NJ
2147 if (!test_bit(AF_FW_RECOVERY, &ha->flags))
2148 qla4xxx_cmd_wait(ha);
f4f5df23
VC
2149 qla4xxx_process_aen(ha, FLUSH_DDB_CHANGED_AENS);
2150 qla4xxx_abort_active_cmds(ha, DID_RESET << 16);
2151 DEBUG2(ql4_printk(KERN_INFO, ha,
2152 "scsi%ld: %s - Performing chip reset..\n",
2153 ha->host_no, __func__));
2154 status = ha->isp_ops->reset_chip(ha);
2155 }
afaf5a2d
DS
2156
2157 /* Flush any pending ddb changed AENs */
2158 qla4xxx_process_aen(ha, FLUSH_DDB_CHANGED_AENS);
2159
f4f5df23
VC
2160recover_ha_init_adapter:
2161 /* Upon successful firmware/chip reset, re-initialize the adapter */
afaf5a2d 2162 if (status == QLA_SUCCESS) {
f4f5df23
VC
2163 /* For ISP-4xxx, force function 1 to always initialize
2164 * before function 3 to prevent both funcions from
2165 * stepping on top of the other */
2166 if (!is_qla8022(ha) && (ha->mac_index == 3))
2167 ssleep(6);
2168
2169 /* NOTE: AF_ONLINE flag set upon successful completion of
2170 * qla4xxx_initialize_adapter */
0e7e8501 2171 status = qla4xxx_initialize_adapter(ha);
afaf5a2d
DS
2172 }
2173
f4f5df23
VC
2174 /* Retry failed adapter initialization, if necessary
2175 * Do not retry initialize_adapter for RESET_HA_INTR (ISP-4xxx specific)
2176 * case to prevent ping-pong resets between functions */
2177 if (!test_bit(AF_ONLINE, &ha->flags) &&
2178 !test_bit(DPC_RESET_HA_INTR, &ha->dpc_flags)) {
afaf5a2d 2179 /* Adapter initialization failed, see if we can retry
f4f5df23
VC
2180 * resetting the ha.
2181 * Since we don't want to block the DPC for too long
2182 * with multiple resets in the same thread,
2183 * utilize DPC to retry */
afaf5a2d
DS
2184 if (!test_bit(DPC_RETRY_RESET_HA, &ha->dpc_flags)) {
2185 ha->retry_reset_ha_cnt = MAX_RESET_HA_RETRIES;
2186 DEBUG2(printk("scsi%ld: recover adapter - retrying "
2187 "(%d) more times\n", ha->host_no,
2188 ha->retry_reset_ha_cnt));
2189 set_bit(DPC_RETRY_RESET_HA, &ha->dpc_flags);
2190 status = QLA_ERROR;
2191 } else {
2192 if (ha->retry_reset_ha_cnt > 0) {
2193 /* Schedule another Reset HA--DPC will retry */
2194 ha->retry_reset_ha_cnt--;
2195 DEBUG2(printk("scsi%ld: recover adapter - "
2196 "retry remaining %d\n",
2197 ha->host_no,
2198 ha->retry_reset_ha_cnt));
2199 status = QLA_ERROR;
2200 }
2201
2202 if (ha->retry_reset_ha_cnt == 0) {
2203 /* Recover adapter retries have been exhausted.
2204 * Adapter DEAD */
2205 DEBUG2(printk("scsi%ld: recover adapter "
2206 "failed - board disabled\n",
2207 ha->host_no));
f4f5df23 2208 qla4xxx_dead_adapter_cleanup(ha);
afaf5a2d
DS
2209 clear_bit(DPC_RETRY_RESET_HA, &ha->dpc_flags);
2210 clear_bit(DPC_RESET_HA, &ha->dpc_flags);
f4f5df23 2211 clear_bit(DPC_RESET_HA_FW_CONTEXT,
afaf5a2d
DS
2212 &ha->dpc_flags);
2213 status = QLA_ERROR;
2214 }
2215 }
2216 } else {
2217 clear_bit(DPC_RESET_HA, &ha->dpc_flags);
f4f5df23 2218 clear_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags);
afaf5a2d
DS
2219 clear_bit(DPC_RETRY_RESET_HA, &ha->dpc_flags);
2220 }
2221
2222 ha->adapter_error_count++;
2223
f4f5df23
VC
2224 if (test_bit(AF_ONLINE, &ha->flags))
2225 ha->isp_ops->enable_intrs(ha);
2226
2227 scsi_unblock_requests(ha->host);
2228
2229 clear_bit(DPC_RESET_ACTIVE, &ha->dpc_flags);
2230 DEBUG2(printk("scsi%ld: recover adapter: %s\n", ha->host_no,
25985edc 2231 status == QLA_ERROR ? "FAILED" : "SUCCEEDED"));
afaf5a2d 2232
afaf5a2d
DS
2233 return status;
2234}
2235
b3a271a9 2236static void qla4xxx_relogin_devices(struct iscsi_cls_session *cls_session)
2d7924e6 2237{
b3a271a9
MR
2238 struct iscsi_session *sess;
2239 struct ddb_entry *ddb_entry;
2240 struct scsi_qla_host *ha;
2d7924e6 2241
b3a271a9
MR
2242 sess = cls_session->dd_data;
2243 ddb_entry = sess->dd_data;
2244 ha = ddb_entry->ha;
2245 if (!iscsi_is_session_online(cls_session)) {
2246 if (ddb_entry->fw_ddb_device_state == DDB_DS_SESSION_ACTIVE) {
2247 ql4_printk(KERN_INFO, ha, "scsi%ld: %s: ddb[%d]"
2248 " unblock session\n", ha->host_no, __func__,
2249 ddb_entry->fw_ddb_index);
2250 iscsi_unblock_session(ddb_entry->sess);
2251 } else {
2252 /* Trigger relogin */
2253 iscsi_session_failure(cls_session->dd_data,
2254 ISCSI_ERR_CONN_FAILED);
2d7924e6
VC
2255 }
2256 }
2257}
2258
b3a271a9
MR
2259static void qla4xxx_relogin_all_devices(struct scsi_qla_host *ha)
2260{
2261 iscsi_host_for_each_session(ha->host, qla4xxx_relogin_devices);
2262}
2263
f4f5df23
VC
2264void qla4xxx_wake_dpc(struct scsi_qla_host *ha)
2265{
1b46807e 2266 if (ha->dpc_thread)
f4f5df23 2267 queue_work(ha->dpc_thread, &ha->dpc_work);
f4f5df23
VC
2268}
2269
afaf5a2d
DS
2270/**
2271 * qla4xxx_do_dpc - dpc routine
2272 * @data: in our case pointer to adapter structure
2273 *
2274 * This routine is a task that is schedule by the interrupt handler
2275 * to perform the background processing for interrupts. We put it
2276 * on a task queue that is consumed whenever the scheduler runs; that's
2277 * so you can do anything (i.e. put the process to sleep etc). In fact,
2278 * the mid-level tries to sleep when it reaches the driver threshold
2279 * "host->can_queue". This can cause a panic if we were in our interrupt code.
2280 **/
c4028958 2281static void qla4xxx_do_dpc(struct work_struct *work)
afaf5a2d 2282{
c4028958
DH
2283 struct scsi_qla_host *ha =
2284 container_of(work, struct scsi_qla_host, dpc_work);
477ffb9d 2285 int status = QLA_ERROR;
afaf5a2d 2286
f26b9044 2287 DEBUG2(printk("scsi%ld: %s: DPC handler waking up."
f4f5df23
VC
2288 "flags = 0x%08lx, dpc_flags = 0x%08lx\n",
2289 ha->host_no, __func__, ha->flags, ha->dpc_flags))
afaf5a2d
DS
2290
2291 /* Initialization not yet finished. Don't do anything yet. */
2292 if (!test_bit(AF_INIT_DONE, &ha->flags))
1b46807e 2293 return;
afaf5a2d 2294
2232be0d
LC
2295 if (test_bit(AF_EEH_BUSY, &ha->flags)) {
2296 DEBUG2(printk(KERN_INFO "scsi%ld: %s: flags = %lx\n",
2297 ha->host_no, __func__, ha->flags));
1b46807e 2298 return;
2232be0d
LC
2299 }
2300
f4f5df23
VC
2301 if (is_qla8022(ha)) {
2302 if (test_bit(DPC_HA_UNRECOVERABLE, &ha->dpc_flags)) {
2303 qla4_8xxx_idc_lock(ha);
2304 qla4_8xxx_wr_32(ha, QLA82XX_CRB_DEV_STATE,
2305 QLA82XX_DEV_FAILED);
2306 qla4_8xxx_idc_unlock(ha);
2307 ql4_printk(KERN_INFO, ha, "HW State: FAILED\n");
2308 qla4_8xxx_device_state_handler(ha);
2309 }
2310 if (test_and_clear_bit(DPC_HA_NEED_QUIESCENT, &ha->dpc_flags)) {
2311 qla4_8xxx_need_qsnt_handler(ha);
2312 }
2313 }
2314
2315 if (!test_bit(DPC_RESET_ACTIVE, &ha->dpc_flags) &&
2316 (test_bit(DPC_RESET_HA, &ha->dpc_flags) ||
afaf5a2d 2317 test_bit(DPC_RESET_HA_INTR, &ha->dpc_flags) ||
f4f5df23
VC
2318 test_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags))) {
2319 if (ql4xdontresethba) {
2320 DEBUG2(printk("scsi%ld: %s: Don't Reset HBA\n",
2321 ha->host_no, __func__));
2322 clear_bit(DPC_RESET_HA, &ha->dpc_flags);
2323 clear_bit(DPC_RESET_HA_INTR, &ha->dpc_flags);
2324 clear_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags);
2325 goto dpc_post_reset_ha;
2326 }
2327 if (test_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags) ||
2328 test_bit(DPC_RESET_HA, &ha->dpc_flags))
2329 qla4xxx_recover_adapter(ha);
afaf5a2d 2330
477ffb9d 2331 if (test_bit(DPC_RESET_HA_INTR, &ha->dpc_flags)) {
afaf5a2d 2332 uint8_t wait_time = RESET_INTR_TOV;
afaf5a2d 2333
afaf5a2d
DS
2334 while ((readw(&ha->reg->ctrl_status) &
2335 (CSR_SOFT_RESET | CSR_FORCE_SOFT_RESET)) != 0) {
2336 if (--wait_time == 0)
2337 break;
afaf5a2d 2338 msleep(1000);
afaf5a2d 2339 }
afaf5a2d
DS
2340 if (wait_time == 0)
2341 DEBUG2(printk("scsi%ld: %s: SR|FSR "
2342 "bit not cleared-- resetting\n",
2343 ha->host_no, __func__));
f4f5df23 2344 qla4xxx_abort_active_cmds(ha, DID_RESET << 16);
477ffb9d
DS
2345 if (ql4xxx_lock_drvr_wait(ha) == QLA_SUCCESS) {
2346 qla4xxx_process_aen(ha, FLUSH_DDB_CHANGED_AENS);
f4f5df23 2347 status = qla4xxx_recover_adapter(ha);
477ffb9d
DS
2348 }
2349 clear_bit(DPC_RESET_HA_INTR, &ha->dpc_flags);
2350 if (status == QLA_SUCCESS)
f4f5df23 2351 ha->isp_ops->enable_intrs(ha);
afaf5a2d
DS
2352 }
2353 }
2354
f4f5df23 2355dpc_post_reset_ha:
afaf5a2d
DS
2356 /* ---- process AEN? --- */
2357 if (test_and_clear_bit(DPC_AEN, &ha->dpc_flags))
2358 qla4xxx_process_aen(ha, PROCESS_ALL_AENS);
2359
2360 /* ---- Get DHCP IP Address? --- */
2361 if (test_and_clear_bit(DPC_GET_DHCP_IP_ADDR, &ha->dpc_flags))
2362 qla4xxx_get_dhcp_ip_address(ha);
2363
065aa1b4
VC
2364 /* ---- link change? --- */
2365 if (test_and_clear_bit(DPC_LINK_CHANGED, &ha->dpc_flags)) {
2366 if (!test_bit(AF_LINK_UP, &ha->flags)) {
2367 /* ---- link down? --- */
2d7924e6 2368 qla4xxx_mark_all_devices_missing(ha);
065aa1b4
VC
2369 } else {
2370 /* ---- link up? --- *
2371 * F/W will auto login to all devices ONLY ONCE after
2372 * link up during driver initialization and runtime
2373 * fatal error recovery. Therefore, the driver must
2374 * manually relogin to devices when recovering from
2375 * connection failures, logouts, expired KATO, etc. */
2376
2d7924e6 2377 qla4xxx_relogin_all_devices(ha);
065aa1b4
VC
2378 }
2379 }
afaf5a2d
DS
2380}
2381
2382/**
2383 * qla4xxx_free_adapter - release the adapter
2384 * @ha: pointer to adapter structure
2385 **/
2386static void qla4xxx_free_adapter(struct scsi_qla_host *ha)
2387{
2388
2389 if (test_bit(AF_INTERRUPTS_ON, &ha->flags)) {
2390 /* Turn-off interrupts on the card. */
f4f5df23 2391 ha->isp_ops->disable_intrs(ha);
afaf5a2d
DS
2392 }
2393
f4f5df23
VC
2394 /* Remove timer thread, if present */
2395 if (ha->timer_active)
2396 qla4xxx_stop_timer(ha);
2397
afaf5a2d
DS
2398 /* Kill the kernel thread for this host */
2399 if (ha->dpc_thread)
2400 destroy_workqueue(ha->dpc_thread);
2401
b3a271a9
MR
2402 /* Kill the kernel thread for this host */
2403 if (ha->task_wq)
2404 destroy_workqueue(ha->task_wq);
2405
f4f5df23
VC
2406 /* Put firmware in known state */
2407 ha->isp_ops->reset_firmware(ha);
afaf5a2d 2408
f4f5df23
VC
2409 if (is_qla8022(ha)) {
2410 qla4_8xxx_idc_lock(ha);
2411 qla4_8xxx_clear_drv_active(ha);
2412 qla4_8xxx_idc_unlock(ha);
2413 }
afaf5a2d 2414
afaf5a2d
DS
2415 /* Detach interrupts */
2416 if (test_and_clear_bit(AF_IRQ_ATTACHED, &ha->flags))
f4f5df23 2417 qla4xxx_free_irqs(ha);
afaf5a2d 2418
bee4fe8e
DS
2419 /* free extra memory */
2420 qla4xxx_mem_free(ha);
f4f5df23
VC
2421}
2422
2423int qla4_8xxx_iospace_config(struct scsi_qla_host *ha)
2424{
2425 int status = 0;
2426 uint8_t revision_id;
2427 unsigned long mem_base, mem_len, db_base, db_len;
2428 struct pci_dev *pdev = ha->pdev;
2429
2430 status = pci_request_regions(pdev, DRIVER_NAME);
2431 if (status) {
2432 printk(KERN_WARNING
2433 "scsi(%ld) Failed to reserve PIO regions (%s) "
2434 "status=%d\n", ha->host_no, pci_name(pdev), status);
2435 goto iospace_error_exit;
2436 }
2437
2438 pci_read_config_byte(pdev, PCI_REVISION_ID, &revision_id);
2439 DEBUG2(printk(KERN_INFO "%s: revision-id=%d\n",
2440 __func__, revision_id));
2441 ha->revision_id = revision_id;
bee4fe8e 2442
f4f5df23
VC
2443 /* remap phys address */
2444 mem_base = pci_resource_start(pdev, 0); /* 0 is for BAR 0 */
2445 mem_len = pci_resource_len(pdev, 0);
2446 DEBUG2(printk(KERN_INFO "%s: ioremap from %lx a size of %lx\n",
2447 __func__, mem_base, mem_len));
afaf5a2d 2448
f4f5df23
VC
2449 /* mapping of pcibase pointer */
2450 ha->nx_pcibase = (unsigned long)ioremap(mem_base, mem_len);
2451 if (!ha->nx_pcibase) {
2452 printk(KERN_ERR
2453 "cannot remap MMIO (%s), aborting\n", pci_name(pdev));
2454 pci_release_regions(ha->pdev);
2455 goto iospace_error_exit;
2456 }
2457
2458 /* Mapping of IO base pointer, door bell read and write pointer */
2459
2460 /* mapping of IO base pointer */
2461 ha->qla4_8xxx_reg =
2462 (struct device_reg_82xx __iomem *)((uint8_t *)ha->nx_pcibase +
2463 0xbc000 + (ha->pdev->devfn << 11));
2464
2465 db_base = pci_resource_start(pdev, 4); /* doorbell is on bar 4 */
2466 db_len = pci_resource_len(pdev, 4);
2467
2657c800
SS
2468 ha->nx_db_wr_ptr = (ha->pdev->devfn == 4 ? QLA82XX_CAM_RAM_DB1 :
2469 QLA82XX_CAM_RAM_DB2);
f4f5df23 2470
2657c800 2471 return 0;
f4f5df23
VC
2472iospace_error_exit:
2473 return -ENOMEM;
afaf5a2d
DS
2474}
2475
2476/***
2477 * qla4xxx_iospace_config - maps registers
2478 * @ha: pointer to adapter structure
2479 *
2480 * This routines maps HBA's registers from the pci address space
2481 * into the kernel virtual address space for memory mapped i/o.
2482 **/
f4f5df23 2483int qla4xxx_iospace_config(struct scsi_qla_host *ha)
afaf5a2d
DS
2484{
2485 unsigned long pio, pio_len, pio_flags;
2486 unsigned long mmio, mmio_len, mmio_flags;
2487
2488 pio = pci_resource_start(ha->pdev, 0);
2489 pio_len = pci_resource_len(ha->pdev, 0);
2490 pio_flags = pci_resource_flags(ha->pdev, 0);
2491 if (pio_flags & IORESOURCE_IO) {
2492 if (pio_len < MIN_IOBASE_LEN) {
c2660df3 2493 ql4_printk(KERN_WARNING, ha,
afaf5a2d
DS
2494 "Invalid PCI I/O region size\n");
2495 pio = 0;
2496 }
2497 } else {
c2660df3 2498 ql4_printk(KERN_WARNING, ha, "region #0 not a PIO resource\n");
afaf5a2d
DS
2499 pio = 0;
2500 }
2501
2502 /* Use MMIO operations for all accesses. */
2503 mmio = pci_resource_start(ha->pdev, 1);
2504 mmio_len = pci_resource_len(ha->pdev, 1);
2505 mmio_flags = pci_resource_flags(ha->pdev, 1);
2506
2507 if (!(mmio_flags & IORESOURCE_MEM)) {
c2660df3
VC
2508 ql4_printk(KERN_ERR, ha,
2509 "region #0 not an MMIO resource, aborting\n");
afaf5a2d
DS
2510
2511 goto iospace_error_exit;
2512 }
c2660df3 2513
afaf5a2d 2514 if (mmio_len < MIN_IOBASE_LEN) {
c2660df3
VC
2515 ql4_printk(KERN_ERR, ha,
2516 "Invalid PCI mem region size, aborting\n");
afaf5a2d
DS
2517 goto iospace_error_exit;
2518 }
2519
2520 if (pci_request_regions(ha->pdev, DRIVER_NAME)) {
c2660df3
VC
2521 ql4_printk(KERN_WARNING, ha,
2522 "Failed to reserve PIO/MMIO regions\n");
afaf5a2d
DS
2523
2524 goto iospace_error_exit;
2525 }
2526
2527 ha->pio_address = pio;
2528 ha->pio_length = pio_len;
2529 ha->reg = ioremap(mmio, MIN_IOBASE_LEN);
2530 if (!ha->reg) {
c2660df3
VC
2531 ql4_printk(KERN_ERR, ha,
2532 "cannot remap MMIO, aborting\n");
afaf5a2d
DS
2533
2534 goto iospace_error_exit;
2535 }
2536
2537 return 0;
2538
2539iospace_error_exit:
2540 return -ENOMEM;
2541}
2542
f4f5df23
VC
2543static struct isp_operations qla4xxx_isp_ops = {
2544 .iospace_config = qla4xxx_iospace_config,
2545 .pci_config = qla4xxx_pci_config,
2546 .disable_intrs = qla4xxx_disable_intrs,
2547 .enable_intrs = qla4xxx_enable_intrs,
2548 .start_firmware = qla4xxx_start_firmware,
2549 .intr_handler = qla4xxx_intr_handler,
2550 .interrupt_service_routine = qla4xxx_interrupt_service_routine,
2551 .reset_chip = qla4xxx_soft_reset,
2552 .reset_firmware = qla4xxx_hw_reset,
2553 .queue_iocb = qla4xxx_queue_iocb,
2554 .complete_iocb = qla4xxx_complete_iocb,
2555 .rd_shdw_req_q_out = qla4xxx_rd_shdw_req_q_out,
2556 .rd_shdw_rsp_q_in = qla4xxx_rd_shdw_rsp_q_in,
2557 .get_sys_info = qla4xxx_get_sys_info,
2558};
2559
2560static struct isp_operations qla4_8xxx_isp_ops = {
2561 .iospace_config = qla4_8xxx_iospace_config,
2562 .pci_config = qla4_8xxx_pci_config,
2563 .disable_intrs = qla4_8xxx_disable_intrs,
2564 .enable_intrs = qla4_8xxx_enable_intrs,
2565 .start_firmware = qla4_8xxx_load_risc,
2566 .intr_handler = qla4_8xxx_intr_handler,
2567 .interrupt_service_routine = qla4_8xxx_interrupt_service_routine,
2568 .reset_chip = qla4_8xxx_isp_reset,
2569 .reset_firmware = qla4_8xxx_stop_firmware,
2570 .queue_iocb = qla4_8xxx_queue_iocb,
2571 .complete_iocb = qla4_8xxx_complete_iocb,
2572 .rd_shdw_req_q_out = qla4_8xxx_rd_shdw_req_q_out,
2573 .rd_shdw_rsp_q_in = qla4_8xxx_rd_shdw_rsp_q_in,
2574 .get_sys_info = qla4_8xxx_get_sys_info,
2575};
2576
2577uint16_t qla4xxx_rd_shdw_req_q_out(struct scsi_qla_host *ha)
2578{
2579 return (uint16_t)le32_to_cpu(ha->shadow_regs->req_q_out);
2580}
2581
2582uint16_t qla4_8xxx_rd_shdw_req_q_out(struct scsi_qla_host *ha)
2583{
2584 return (uint16_t)le32_to_cpu(readl(&ha->qla4_8xxx_reg->req_q_out));
2585}
2586
2587uint16_t qla4xxx_rd_shdw_rsp_q_in(struct scsi_qla_host *ha)
2588{
2589 return (uint16_t)le32_to_cpu(ha->shadow_regs->rsp_q_in);
2590}
2591
2592uint16_t qla4_8xxx_rd_shdw_rsp_q_in(struct scsi_qla_host *ha)
2593{
2594 return (uint16_t)le32_to_cpu(readl(&ha->qla4_8xxx_reg->rsp_q_in));
2595}
2596
2a991c21
MR
2597static ssize_t qla4xxx_show_boot_eth_info(void *data, int type, char *buf)
2598{
2599 struct scsi_qla_host *ha = data;
2600 char *str = buf;
2601 int rc;
2602
2603 switch (type) {
2604 case ISCSI_BOOT_ETH_FLAGS:
2605 rc = sprintf(str, "%d\n", SYSFS_FLAG_FW_SEL_BOOT);
2606 break;
2607 case ISCSI_BOOT_ETH_INDEX:
2608 rc = sprintf(str, "0\n");
2609 break;
2610 case ISCSI_BOOT_ETH_MAC:
2611 rc = sysfs_format_mac(str, ha->my_mac,
2612 MAC_ADDR_LEN);
2613 break;
2614 default:
2615 rc = -ENOSYS;
2616 break;
2617 }
2618 return rc;
2619}
2620
2621static mode_t qla4xxx_eth_get_attr_visibility(void *data, int type)
2622{
2623 int rc;
2624
2625 switch (type) {
2626 case ISCSI_BOOT_ETH_FLAGS:
2627 case ISCSI_BOOT_ETH_MAC:
2628 case ISCSI_BOOT_ETH_INDEX:
2629 rc = S_IRUGO;
2630 break;
2631 default:
2632 rc = 0;
2633 break;
2634 }
2635 return rc;
2636}
2637
2638static ssize_t qla4xxx_show_boot_ini_info(void *data, int type, char *buf)
2639{
2640 struct scsi_qla_host *ha = data;
2641 char *str = buf;
2642 int rc;
2643
2644 switch (type) {
2645 case ISCSI_BOOT_INI_INITIATOR_NAME:
2646 rc = sprintf(str, "%s\n", ha->name_string);
2647 break;
2648 default:
2649 rc = -ENOSYS;
2650 break;
2651 }
2652 return rc;
2653}
2654
2655static mode_t qla4xxx_ini_get_attr_visibility(void *data, int type)
2656{
2657 int rc;
2658
2659 switch (type) {
2660 case ISCSI_BOOT_INI_INITIATOR_NAME:
2661 rc = S_IRUGO;
2662 break;
2663 default:
2664 rc = 0;
2665 break;
2666 }
2667 return rc;
2668}
2669
2670static ssize_t
2671qla4xxx_show_boot_tgt_info(struct ql4_boot_session_info *boot_sess, int type,
2672 char *buf)
2673{
2674 struct ql4_conn_info *boot_conn = &boot_sess->conn_list[0];
2675 char *str = buf;
2676 int rc;
2677
2678 switch (type) {
2679 case ISCSI_BOOT_TGT_NAME:
2680 rc = sprintf(buf, "%s\n", (char *)&boot_sess->target_name);
2681 break;
2682 case ISCSI_BOOT_TGT_IP_ADDR:
2683 if (boot_sess->conn_list[0].dest_ipaddr.ip_type == 0x1)
2684 rc = sprintf(buf, "%pI4\n",
2685 &boot_conn->dest_ipaddr.ip_address);
2686 else
2687 rc = sprintf(str, "%pI6\n",
2688 &boot_conn->dest_ipaddr.ip_address);
2689 break;
2690 case ISCSI_BOOT_TGT_PORT:
2691 rc = sprintf(str, "%d\n", boot_conn->dest_port);
2692 break;
2693 case ISCSI_BOOT_TGT_CHAP_NAME:
2694 rc = sprintf(str, "%.*s\n",
2695 boot_conn->chap.target_chap_name_length,
2696 (char *)&boot_conn->chap.target_chap_name);
2697 break;
2698 case ISCSI_BOOT_TGT_CHAP_SECRET:
2699 rc = sprintf(str, "%.*s\n",
2700 boot_conn->chap.target_secret_length,
2701 (char *)&boot_conn->chap.target_secret);
2702 break;
2703 case ISCSI_BOOT_TGT_REV_CHAP_NAME:
2704 rc = sprintf(str, "%.*s\n",
2705 boot_conn->chap.intr_chap_name_length,
2706 (char *)&boot_conn->chap.intr_chap_name);
2707 break;
2708 case ISCSI_BOOT_TGT_REV_CHAP_SECRET:
2709 rc = sprintf(str, "%.*s\n",
2710 boot_conn->chap.intr_secret_length,
2711 (char *)&boot_conn->chap.intr_secret);
2712 break;
2713 case ISCSI_BOOT_TGT_FLAGS:
2714 rc = sprintf(str, "%d\n", SYSFS_FLAG_FW_SEL_BOOT);
2715 break;
2716 case ISCSI_BOOT_TGT_NIC_ASSOC:
2717 rc = sprintf(str, "0\n");
2718 break;
2719 default:
2720 rc = -ENOSYS;
2721 break;
2722 }
2723 return rc;
2724}
2725
2726static ssize_t qla4xxx_show_boot_tgt_pri_info(void *data, int type, char *buf)
2727{
2728 struct scsi_qla_host *ha = data;
2729 struct ql4_boot_session_info *boot_sess = &(ha->boot_tgt.boot_pri_sess);
2730
2731 return qla4xxx_show_boot_tgt_info(boot_sess, type, buf);
2732}
2733
2734static ssize_t qla4xxx_show_boot_tgt_sec_info(void *data, int type, char *buf)
2735{
2736 struct scsi_qla_host *ha = data;
2737 struct ql4_boot_session_info *boot_sess = &(ha->boot_tgt.boot_sec_sess);
2738
2739 return qla4xxx_show_boot_tgt_info(boot_sess, type, buf);
2740}
2741
2742static mode_t qla4xxx_tgt_get_attr_visibility(void *data, int type)
2743{
2744 int rc;
2745
2746 switch (type) {
2747 case ISCSI_BOOT_TGT_NAME:
2748 case ISCSI_BOOT_TGT_IP_ADDR:
2749 case ISCSI_BOOT_TGT_PORT:
2750 case ISCSI_BOOT_TGT_CHAP_NAME:
2751 case ISCSI_BOOT_TGT_CHAP_SECRET:
2752 case ISCSI_BOOT_TGT_REV_CHAP_NAME:
2753 case ISCSI_BOOT_TGT_REV_CHAP_SECRET:
2754 case ISCSI_BOOT_TGT_NIC_ASSOC:
2755 case ISCSI_BOOT_TGT_FLAGS:
2756 rc = S_IRUGO;
2757 break;
2758 default:
2759 rc = 0;
2760 break;
2761 }
2762 return rc;
2763}
2764
2765static void qla4xxx_boot_release(void *data)
2766{
2767 struct scsi_qla_host *ha = data;
2768
2769 scsi_host_put(ha->host);
2770}
2771
2772static int get_fw_boot_info(struct scsi_qla_host *ha, uint16_t ddb_index[])
2773{
2774 dma_addr_t buf_dma;
2775 uint32_t addr, pri_addr, sec_addr;
2776 uint32_t offset;
2777 uint16_t func_num;
2778 uint8_t val;
2779 uint8_t *buf = NULL;
2780 size_t size = 13 * sizeof(uint8_t);
2781 int ret = QLA_SUCCESS;
2782
2783 func_num = PCI_FUNC(ha->pdev->devfn);
2784
2785 DEBUG2(ql4_printk(KERN_INFO, ha,
2786 "%s: Get FW boot info for 0x%x func %d\n", __func__,
2787 (is_qla4032(ha) ? PCI_DEVICE_ID_QLOGIC_ISP4032 :
2788 PCI_DEVICE_ID_QLOGIC_ISP8022), func_num));
2789
2790 if (is_qla4032(ha)) {
2791 if (func_num == 1) {
2792 addr = NVRAM_PORT0_BOOT_MODE;
2793 pri_addr = NVRAM_PORT0_BOOT_PRI_TGT;
2794 sec_addr = NVRAM_PORT0_BOOT_SEC_TGT;
2795 } else if (func_num == 3) {
2796 addr = NVRAM_PORT1_BOOT_MODE;
2797 pri_addr = NVRAM_PORT1_BOOT_PRI_TGT;
2798 sec_addr = NVRAM_PORT1_BOOT_SEC_TGT;
2799 } else {
2800 ret = QLA_ERROR;
2801 goto exit_boot_info;
2802 }
2803
2804 /* Check Boot Mode */
2805 val = rd_nvram_byte(ha, addr);
2806 if (!(val & 0x07)) {
2807 DEBUG2(ql4_printk(KERN_ERR, ha,
2808 "%s: Failed Boot options : 0x%x\n",
2809 __func__, val));
2810 ret = QLA_ERROR;
2811 goto exit_boot_info;
2812 }
2813
2814 /* get primary valid target index */
2815 val = rd_nvram_byte(ha, pri_addr);
2816 if (val & BIT_7)
2817 ddb_index[0] = (val & 0x7f);
2a991c21
MR
2818
2819 /* get secondary valid target index */
2820 val = rd_nvram_byte(ha, sec_addr);
2821 if (val & BIT_7)
2822 ddb_index[1] = (val & 0x7f);
2a991c21
MR
2823
2824 } else if (is_qla8022(ha)) {
2825 buf = dma_alloc_coherent(&ha->pdev->dev, size,
2826 &buf_dma, GFP_KERNEL);
2827 if (!buf) {
2828 DEBUG2(ql4_printk(KERN_ERR, ha,
2829 "%s: Unable to allocate dma buffer\n",
2830 __func__));
2831 ret = QLA_ERROR;
2832 goto exit_boot_info;
2833 }
2834
2835 if (ha->port_num == 0)
2836 offset = BOOT_PARAM_OFFSET_PORT0;
2837 else if (ha->port_num == 1)
2838 offset = BOOT_PARAM_OFFSET_PORT1;
2839 else {
2840 ret = QLA_ERROR;
2841 goto exit_boot_info_free;
2842 }
2843 addr = FLASH_RAW_ACCESS_ADDR + (ha->hw.flt_iscsi_param * 4) +
2844 offset;
2845 if (qla4xxx_get_flash(ha, buf_dma, addr,
2846 13 * sizeof(uint8_t)) != QLA_SUCCESS) {
2847 DEBUG2(ql4_printk(KERN_ERR, ha, "scsi%ld: %s: Get Flash"
2848 "failed\n", ha->host_no, __func__));
2849 ret = QLA_ERROR;
2850 goto exit_boot_info_free;
2851 }
2852 /* Check Boot Mode */
2853 if (!(buf[1] & 0x07)) {
2854 DEBUG2(ql4_printk(KERN_INFO, ha,
2855 "Failed: Boot options : 0x%x\n",
2856 buf[1]));
2857 ret = QLA_ERROR;
2858 goto exit_boot_info_free;
2859 }
2860
2861 /* get primary valid target index */
2862 if (buf[2] & BIT_7)
2863 ddb_index[0] = buf[2] & 0x7f;
2a991c21
MR
2864
2865 /* get secondary valid target index */
2866 if (buf[11] & BIT_7)
2867 ddb_index[1] = buf[11] & 0x7f;
2a991c21
MR
2868 } else {
2869 ret = QLA_ERROR;
2870 goto exit_boot_info;
2871 }
2872
2873 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: Primary target ID %d, Secondary"
2874 " target ID %d\n", __func__, ddb_index[0],
2875 ddb_index[1]));
2876
2877exit_boot_info_free:
2878 dma_free_coherent(&ha->pdev->dev, size, buf, buf_dma);
2879exit_boot_info:
2880 return ret;
2881}
2882
28deb45c
LC
2883/**
2884 * qla4xxx_get_bidi_chap - Get a BIDI CHAP user and password
2885 * @ha: pointer to adapter structure
2886 * @username: CHAP username to be returned
2887 * @password: CHAP password to be returned
2888 *
2889 * If a boot entry has BIDI CHAP enabled then we need to set the BIDI CHAP
2890 * user and password in the sysfs entry in /sys/firmware/iscsi_boot#/.
2891 * So from the CHAP cache find the first BIDI CHAP entry and set it
2892 * to the boot record in sysfs.
2893 **/
2894static int qla4xxx_get_bidi_chap(struct scsi_qla_host *ha, char *username,
2895 char *password)
2896{
2897 int i, ret = -EINVAL;
2898 int max_chap_entries = 0;
2899 struct ql4_chap_table *chap_table;
2900
2901 if (is_qla8022(ha))
2902 max_chap_entries = (ha->hw.flt_chap_size / 2) /
2903 sizeof(struct ql4_chap_table);
2904 else
2905 max_chap_entries = MAX_CHAP_ENTRIES_40XX;
2906
2907 if (!ha->chap_list) {
2908 ql4_printk(KERN_ERR, ha, "Do not have CHAP table cache\n");
2909 return ret;
2910 }
2911
2912 mutex_lock(&ha->chap_sem);
2913 for (i = 0; i < max_chap_entries; i++) {
2914 chap_table = (struct ql4_chap_table *)ha->chap_list + i;
2915 if (chap_table->cookie !=
2916 __constant_cpu_to_le16(CHAP_VALID_COOKIE)) {
2917 continue;
2918 }
2919
2920 if (chap_table->flags & BIT_7) /* local */
2921 continue;
2922
2923 if (!(chap_table->flags & BIT_6)) /* Not BIDI */
2924 continue;
2925
2926 strncpy(password, chap_table->secret, QL4_CHAP_MAX_SECRET_LEN);
2927 strncpy(username, chap_table->name, QL4_CHAP_MAX_NAME_LEN);
2928 ret = 0;
2929 break;
2930 }
2931 mutex_unlock(&ha->chap_sem);
2932
2933 return ret;
2934}
2935
2936
2a991c21
MR
2937static int qla4xxx_get_boot_target(struct scsi_qla_host *ha,
2938 struct ql4_boot_session_info *boot_sess,
2939 uint16_t ddb_index)
2940{
2941 struct ql4_conn_info *boot_conn = &boot_sess->conn_list[0];
2942 struct dev_db_entry *fw_ddb_entry;
2943 dma_addr_t fw_ddb_entry_dma;
2944 uint16_t idx;
2945 uint16_t options;
2946 int ret = QLA_SUCCESS;
2947
2948 fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
2949 &fw_ddb_entry_dma, GFP_KERNEL);
2950 if (!fw_ddb_entry) {
2951 DEBUG2(ql4_printk(KERN_ERR, ha,
2952 "%s: Unable to allocate dma buffer.\n",
2953 __func__));
2954 ret = QLA_ERROR;
2955 return ret;
2956 }
2957
2958 if (qla4xxx_bootdb_by_index(ha, fw_ddb_entry,
2959 fw_ddb_entry_dma, ddb_index)) {
2960 DEBUG2(ql4_printk(KERN_ERR, ha,
2961 "%s: Flash DDB read Failed\n", __func__));
2962 ret = QLA_ERROR;
2963 goto exit_boot_target;
2964 }
2965
2966 /* Update target name and IP from DDB */
2967 memcpy(boot_sess->target_name, fw_ddb_entry->iscsi_name,
2968 min(sizeof(boot_sess->target_name),
2969 sizeof(fw_ddb_entry->iscsi_name)));
2970
2971 options = le16_to_cpu(fw_ddb_entry->options);
2972 if (options & DDB_OPT_IPV6_DEVICE) {
2973 memcpy(&boot_conn->dest_ipaddr.ip_address,
2974 &fw_ddb_entry->ip_addr[0], IPv6_ADDR_LEN);
2975 } else {
2976 boot_conn->dest_ipaddr.ip_type = 0x1;
2977 memcpy(&boot_conn->dest_ipaddr.ip_address,
2978 &fw_ddb_entry->ip_addr[0], IP_ADDR_LEN);
2979 }
2980
2981 boot_conn->dest_port = le16_to_cpu(fw_ddb_entry->port);
2982
2983 /* update chap information */
2984 idx = __le16_to_cpu(fw_ddb_entry->chap_tbl_idx);
2985
2986 if (BIT_7 & le16_to_cpu(fw_ddb_entry->iscsi_options)) {
2987
2988 DEBUG2(ql4_printk(KERN_INFO, ha, "Setting chap\n"));
2989
2990 ret = qla4xxx_get_chap(ha, (char *)&boot_conn->chap.
2991 target_chap_name,
2992 (char *)&boot_conn->chap.target_secret,
2993 idx);
2994 if (ret) {
2995 ql4_printk(KERN_ERR, ha, "Failed to set chap\n");
2996 ret = QLA_ERROR;
2997 goto exit_boot_target;
2998 }
2999
3000 boot_conn->chap.target_chap_name_length = QL4_CHAP_MAX_NAME_LEN;
3001 boot_conn->chap.target_secret_length = QL4_CHAP_MAX_SECRET_LEN;
3002 }
3003
3004 if (BIT_4 & le16_to_cpu(fw_ddb_entry->iscsi_options)) {
3005
3006 DEBUG2(ql4_printk(KERN_INFO, ha, "Setting BIDI chap\n"));
3007
28deb45c
LC
3008 ret = qla4xxx_get_bidi_chap(ha,
3009 (char *)&boot_conn->chap.intr_chap_name,
3010 (char *)&boot_conn->chap.intr_secret);
3011
2a991c21
MR
3012 if (ret) {
3013 ql4_printk(KERN_ERR, ha, "Failed to set BIDI chap\n");
3014 ret = QLA_ERROR;
3015 goto exit_boot_target;
3016 }
3017
3018 boot_conn->chap.intr_chap_name_length = QL4_CHAP_MAX_NAME_LEN;
3019 boot_conn->chap.intr_secret_length = QL4_CHAP_MAX_SECRET_LEN;
3020 }
3021
3022exit_boot_target:
3023 dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
3024 fw_ddb_entry, fw_ddb_entry_dma);
3025 return ret;
3026}
3027
3028static int qla4xxx_get_boot_info(struct scsi_qla_host *ha)
3029{
3030 uint16_t ddb_index[2];
8de5b958
LC
3031 int ret = QLA_ERROR;
3032 int rval;
2a991c21
MR
3033
3034 memset(ddb_index, 0, sizeof(ddb_index));
8de5b958
LC
3035 ddb_index[0] = 0xffff;
3036 ddb_index[1] = 0xffff;
2a991c21
MR
3037 ret = get_fw_boot_info(ha, ddb_index);
3038 if (ret != QLA_SUCCESS) {
3039 DEBUG2(ql4_printk(KERN_ERR, ha,
3040 "%s: Failed to set boot info.\n", __func__));
3041 return ret;
3042 }
3043
8de5b958
LC
3044 if (ddb_index[0] == 0xffff)
3045 goto sec_target;
3046
3047 rval = qla4xxx_get_boot_target(ha, &(ha->boot_tgt.boot_pri_sess),
2a991c21 3048 ddb_index[0]);
8de5b958 3049 if (rval != QLA_SUCCESS) {
2a991c21
MR
3050 DEBUG2(ql4_printk(KERN_ERR, ha, "%s: Failed to get "
3051 "primary target\n", __func__));
8de5b958
LC
3052 } else
3053 ret = QLA_SUCCESS;
2a991c21 3054
8de5b958
LC
3055sec_target:
3056 if (ddb_index[1] == 0xffff)
3057 goto exit_get_boot_info;
3058
3059 rval = qla4xxx_get_boot_target(ha, &(ha->boot_tgt.boot_sec_sess),
2a991c21 3060 ddb_index[1]);
8de5b958 3061 if (rval != QLA_SUCCESS) {
2a991c21
MR
3062 DEBUG2(ql4_printk(KERN_ERR, ha, "%s: Failed to get "
3063 "secondary target\n", __func__));
8de5b958
LC
3064 } else
3065 ret = QLA_SUCCESS;
3066
3067exit_get_boot_info:
2a991c21
MR
3068 return ret;
3069}
3070
3071static int qla4xxx_setup_boot_info(struct scsi_qla_host *ha)
3072{
3073 struct iscsi_boot_kobj *boot_kobj;
3074
3075 if (qla4xxx_get_boot_info(ha) != QLA_SUCCESS)
3076 return 0;
3077
3078 ha->boot_kset = iscsi_boot_create_host_kset(ha->host->host_no);
3079 if (!ha->boot_kset)
3080 goto kset_free;
3081
3082 if (!scsi_host_get(ha->host))
3083 goto kset_free;
3084 boot_kobj = iscsi_boot_create_target(ha->boot_kset, 0, ha,
3085 qla4xxx_show_boot_tgt_pri_info,
3086 qla4xxx_tgt_get_attr_visibility,
3087 qla4xxx_boot_release);
3088 if (!boot_kobj)
3089 goto put_host;
3090
3091 if (!scsi_host_get(ha->host))
3092 goto kset_free;
3093 boot_kobj = iscsi_boot_create_target(ha->boot_kset, 1, ha,
3094 qla4xxx_show_boot_tgt_sec_info,
3095 qla4xxx_tgt_get_attr_visibility,
3096 qla4xxx_boot_release);
3097 if (!boot_kobj)
3098 goto put_host;
3099
3100 if (!scsi_host_get(ha->host))
3101 goto kset_free;
3102 boot_kobj = iscsi_boot_create_initiator(ha->boot_kset, 0, ha,
3103 qla4xxx_show_boot_ini_info,
3104 qla4xxx_ini_get_attr_visibility,
3105 qla4xxx_boot_release);
3106 if (!boot_kobj)
3107 goto put_host;
3108
3109 if (!scsi_host_get(ha->host))
3110 goto kset_free;
3111 boot_kobj = iscsi_boot_create_ethernet(ha->boot_kset, 0, ha,
3112 qla4xxx_show_boot_eth_info,
3113 qla4xxx_eth_get_attr_visibility,
3114 qla4xxx_boot_release);
3115 if (!boot_kobj)
3116 goto put_host;
3117
3118 return 0;
3119
3120put_host:
3121 scsi_host_put(ha->host);
3122kset_free:
3123 iscsi_boot_destroy_kset(ha->boot_kset);
3124 return -ENOMEM;
3125}
3126
4549415a
LC
3127
3128/**
3129 * qla4xxx_create chap_list - Create CHAP list from FLASH
3130 * @ha: pointer to adapter structure
3131 *
3132 * Read flash and make a list of CHAP entries, during login when a CHAP entry
3133 * is received, it will be checked in this list. If entry exist then the CHAP
3134 * entry index is set in the DDB. If CHAP entry does not exist in this list
3135 * then a new entry is added in FLASH in CHAP table and the index obtained is
3136 * used in the DDB.
3137 **/
3138static void qla4xxx_create_chap_list(struct scsi_qla_host *ha)
3139{
3140 int rval = 0;
3141 uint8_t *chap_flash_data = NULL;
3142 uint32_t offset;
3143 dma_addr_t chap_dma;
3144 uint32_t chap_size = 0;
3145
3146 if (is_qla40XX(ha))
3147 chap_size = MAX_CHAP_ENTRIES_40XX *
3148 sizeof(struct ql4_chap_table);
3149 else /* Single region contains CHAP info for both
3150 * ports which is divided into half for each port.
3151 */
3152 chap_size = ha->hw.flt_chap_size / 2;
3153
3154 chap_flash_data = dma_alloc_coherent(&ha->pdev->dev, chap_size,
3155 &chap_dma, GFP_KERNEL);
3156 if (!chap_flash_data) {
3157 ql4_printk(KERN_ERR, ha, "No memory for chap_flash_data\n");
3158 return;
3159 }
3160 if (is_qla40XX(ha))
3161 offset = FLASH_CHAP_OFFSET;
3162 else {
3163 offset = FLASH_RAW_ACCESS_ADDR + (ha->hw.flt_region_chap << 2);
3164 if (ha->port_num == 1)
3165 offset += chap_size;
3166 }
3167
3168 rval = qla4xxx_get_flash(ha, chap_dma, offset, chap_size);
3169 if (rval != QLA_SUCCESS)
3170 goto exit_chap_list;
3171
3172 if (ha->chap_list == NULL)
3173 ha->chap_list = vmalloc(chap_size);
3174 if (ha->chap_list == NULL) {
3175 ql4_printk(KERN_ERR, ha, "No memory for ha->chap_list\n");
3176 goto exit_chap_list;
3177 }
3178
3179 memcpy(ha->chap_list, chap_flash_data, chap_size);
3180
3181exit_chap_list:
3182 dma_free_coherent(&ha->pdev->dev, chap_size,
3183 chap_flash_data, chap_dma);
3184 return;
3185}
3186
afaf5a2d
DS
3187/**
3188 * qla4xxx_probe_adapter - callback function to probe HBA
3189 * @pdev: pointer to pci_dev structure
3190 * @pci_device_id: pointer to pci_device entry
3191 *
3192 * This routine will probe for Qlogic 4xxx iSCSI host adapters.
3193 * It returns zero if successful. It also initializes all data necessary for
3194 * the driver.
3195 **/
3196static int __devinit qla4xxx_probe_adapter(struct pci_dev *pdev,
3197 const struct pci_device_id *ent)
3198{
3199 int ret = -ENODEV, status;
3200 struct Scsi_Host *host;
3201 struct scsi_qla_host *ha;
afaf5a2d
DS
3202 uint8_t init_retry_count = 0;
3203 char buf[34];
f4f5df23 3204 struct qla4_8xxx_legacy_intr_set *nx_legacy_intr;
f9880e76 3205 uint32_t dev_state;
afaf5a2d
DS
3206
3207 if (pci_enable_device(pdev))
3208 return -1;
3209
b3a271a9 3210 host = iscsi_host_alloc(&qla4xxx_driver_template, sizeof(*ha), 0);
afaf5a2d
DS
3211 if (host == NULL) {
3212 printk(KERN_WARNING
3213 "qla4xxx: Couldn't allocate host from scsi layer!\n");
3214 goto probe_disable_device;
3215 }
3216
3217 /* Clear our data area */
b3a271a9 3218 ha = to_qla_host(host);
afaf5a2d
DS
3219 memset(ha, 0, sizeof(*ha));
3220
3221 /* Save the information from PCI BIOS. */
3222 ha->pdev = pdev;
3223 ha->host = host;
3224 ha->host_no = host->host_no;
3225
2232be0d
LC
3226 pci_enable_pcie_error_reporting(pdev);
3227
f4f5df23
VC
3228 /* Setup Runtime configurable options */
3229 if (is_qla8022(ha)) {
3230 ha->isp_ops = &qla4_8xxx_isp_ops;
3231 rwlock_init(&ha->hw_lock);
3232 ha->qdr_sn_window = -1;
3233 ha->ddr_mn_window = -1;
3234 ha->curr_window = 255;
3235 ha->func_num = PCI_FUNC(ha->pdev->devfn);
3236 nx_legacy_intr = &legacy_intr[ha->func_num];
3237 ha->nx_legacy_intr.int_vec_bit = nx_legacy_intr->int_vec_bit;
3238 ha->nx_legacy_intr.tgt_status_reg =
3239 nx_legacy_intr->tgt_status_reg;
3240 ha->nx_legacy_intr.tgt_mask_reg = nx_legacy_intr->tgt_mask_reg;
3241 ha->nx_legacy_intr.pci_int_reg = nx_legacy_intr->pci_int_reg;
3242 } else {
3243 ha->isp_ops = &qla4xxx_isp_ops;
3244 }
3245
2232be0d
LC
3246 /* Set EEH reset type to fundamental if required by hba */
3247 if (is_qla8022(ha))
3248 pdev->needs_freset = 1;
3249
afaf5a2d 3250 /* Configure PCI I/O space. */
f4f5df23 3251 ret = ha->isp_ops->iospace_config(ha);
afaf5a2d 3252 if (ret)
f4f5df23 3253 goto probe_failed_ioconfig;
afaf5a2d 3254
c2660df3 3255 ql4_printk(KERN_INFO, ha, "Found an ISP%04x, irq %d, iobase 0x%p\n",
afaf5a2d
DS
3256 pdev->device, pdev->irq, ha->reg);
3257
3258 qla4xxx_config_dma_addressing(ha);
3259
3260 /* Initialize lists and spinlocks. */
afaf5a2d
DS
3261 INIT_LIST_HEAD(&ha->free_srb_q);
3262
3263 mutex_init(&ha->mbox_sem);
4549415a 3264 mutex_init(&ha->chap_sem);
f4f5df23 3265 init_completion(&ha->mbx_intr_comp);
95d31262 3266 init_completion(&ha->disable_acb_comp);
afaf5a2d
DS
3267
3268 spin_lock_init(&ha->hardware_lock);
afaf5a2d
DS
3269
3270 /* Allocate dma buffers */
3271 if (qla4xxx_mem_alloc(ha)) {
c2660df3
VC
3272 ql4_printk(KERN_WARNING, ha,
3273 "[ERROR] Failed to allocate memory for adapter\n");
afaf5a2d
DS
3274
3275 ret = -ENOMEM;
3276 goto probe_failed;
3277 }
3278
b3a271a9
MR
3279 host->cmd_per_lun = 3;
3280 host->max_channel = 0;
3281 host->max_lun = MAX_LUNS - 1;
3282 host->max_id = MAX_TARGETS;
3283 host->max_cmd_len = IOCB_MAX_CDB_LEN;
3284 host->can_queue = MAX_SRBS ;
3285 host->transportt = qla4xxx_scsi_transport;
3286
3287 ret = scsi_init_shared_tag_map(host, MAX_SRBS);
3288 if (ret) {
3289 ql4_printk(KERN_WARNING, ha,
3290 "%s: scsi_init_shared_tag_map failed\n", __func__);
3291 goto probe_failed;
3292 }
3293
3294 pci_set_drvdata(pdev, ha);
3295
3296 ret = scsi_add_host(host, &pdev->dev);
3297 if (ret)
3298 goto probe_failed;
3299
f4f5df23
VC
3300 if (is_qla8022(ha))
3301 (void) qla4_8xxx_get_flash_info(ha);
3302
afaf5a2d
DS
3303 /*
3304 * Initialize the Host adapter request/response queues and
3305 * firmware
3306 * NOTE: interrupts enabled upon successful completion
3307 */
0e7e8501 3308 status = qla4xxx_initialize_adapter(ha);
f4f5df23
VC
3309 while ((!test_bit(AF_ONLINE, &ha->flags)) &&
3310 init_retry_count++ < MAX_INIT_RETRIES) {
f9880e76
PM
3311
3312 if (is_qla8022(ha)) {
3313 qla4_8xxx_idc_lock(ha);
3314 dev_state = qla4_8xxx_rd_32(ha, QLA82XX_CRB_DEV_STATE);
3315 qla4_8xxx_idc_unlock(ha);
3316 if (dev_state == QLA82XX_DEV_FAILED) {
3317 ql4_printk(KERN_WARNING, ha, "%s: don't retry "
3318 "initialize adapter. H/W is in failed state\n",
3319 __func__);
3320 break;
3321 }
3322 }
afaf5a2d
DS
3323 DEBUG2(printk("scsi: %s: retrying adapter initialization "
3324 "(%d)\n", __func__, init_retry_count));
f4f5df23
VC
3325
3326 if (ha->isp_ops->reset_chip(ha) == QLA_ERROR)
3327 continue;
3328
0e7e8501 3329 status = qla4xxx_initialize_adapter(ha);
afaf5a2d 3330 }
f4f5df23
VC
3331
3332 if (!test_bit(AF_ONLINE, &ha->flags)) {
c2660df3 3333 ql4_printk(KERN_WARNING, ha, "Failed to initialize adapter\n");
afaf5a2d 3334
fe998527
LC
3335 if (is_qla8022(ha) && ql4xdontresethba) {
3336 /* Put the device in failed state. */
3337 DEBUG2(printk(KERN_ERR "HW STATE: FAILED\n"));
3338 qla4_8xxx_idc_lock(ha);
3339 qla4_8xxx_wr_32(ha, QLA82XX_CRB_DEV_STATE,
3340 QLA82XX_DEV_FAILED);
3341 qla4_8xxx_idc_unlock(ha);
3342 }
afaf5a2d 3343 ret = -ENODEV;
b3a271a9 3344 goto remove_host;
afaf5a2d
DS
3345 }
3346
afaf5a2d
DS
3347 /* Startup the kernel thread for this host adapter. */
3348 DEBUG2(printk("scsi: %s: Starting kernel thread for "
3349 "qla4xxx_dpc\n", __func__));
3350 sprintf(buf, "qla4xxx_%lu_dpc", ha->host_no);
3351 ha->dpc_thread = create_singlethread_workqueue(buf);
3352 if (!ha->dpc_thread) {
c2660df3 3353 ql4_printk(KERN_WARNING, ha, "Unable to start DPC thread!\n");
afaf5a2d 3354 ret = -ENODEV;
b3a271a9 3355 goto remove_host;
afaf5a2d 3356 }
c4028958 3357 INIT_WORK(&ha->dpc_work, qla4xxx_do_dpc);
afaf5a2d 3358
b3a271a9
MR
3359 sprintf(buf, "qla4xxx_%lu_task", ha->host_no);
3360 ha->task_wq = alloc_workqueue(buf, WQ_MEM_RECLAIM, 1);
3361 if (!ha->task_wq) {
3362 ql4_printk(KERN_WARNING, ha, "Unable to start task thread!\n");
3363 ret = -ENODEV;
3364 goto remove_host;
3365 }
3366
f4f5df23
VC
3367 /* For ISP-82XX, request_irqs is called in qla4_8xxx_load_risc
3368 * (which is called indirectly by qla4xxx_initialize_adapter),
3369 * so that irqs will be registered after crbinit but before
3370 * mbx_intr_enable.
3371 */
3372 if (!is_qla8022(ha)) {
3373 ret = qla4xxx_request_irqs(ha);
3374 if (ret) {
3375 ql4_printk(KERN_WARNING, ha, "Failed to reserve "
3376 "interrupt %d already in use.\n", pdev->irq);
b3a271a9 3377 goto remove_host;
f4f5df23 3378 }
afaf5a2d 3379 }
afaf5a2d 3380
2232be0d 3381 pci_save_state(ha->pdev);
f4f5df23 3382 ha->isp_ops->enable_intrs(ha);
afaf5a2d
DS
3383
3384 /* Start timer thread. */
3385 qla4xxx_start_timer(ha, qla4xxx_timer, 1);
3386
3387 set_bit(AF_INIT_DONE, &ha->flags);
3388
afaf5a2d
DS
3389 printk(KERN_INFO
3390 " QLogic iSCSI HBA Driver version: %s\n"
3391 " QLogic ISP%04x @ %s, host#=%ld, fw=%02d.%02d.%02d.%02d\n",
3392 qla4xxx_version_str, ha->pdev->device, pci_name(ha->pdev),
3393 ha->host_no, ha->firmware_version[0], ha->firmware_version[1],
3394 ha->patch_number, ha->build_number);
ed1086e0 3395
4549415a
LC
3396 qla4xxx_create_chap_list(ha);
3397
2a991c21
MR
3398 if (qla4xxx_setup_boot_info(ha))
3399 ql4_printk(KERN_ERR, ha, "%s:ISCSI boot info setup failed\n",
3400 __func__);
3401
ed1086e0 3402 qla4xxx_create_ifaces(ha);
afaf5a2d
DS
3403 return 0;
3404
b3a271a9
MR
3405remove_host:
3406 scsi_remove_host(ha->host);
3407
afaf5a2d
DS
3408probe_failed:
3409 qla4xxx_free_adapter(ha);
f4f5df23
VC
3410
3411probe_failed_ioconfig:
2232be0d 3412 pci_disable_pcie_error_reporting(pdev);
afaf5a2d
DS
3413 scsi_host_put(ha->host);
3414
3415probe_disable_device:
3416 pci_disable_device(pdev);
3417
3418 return ret;
3419}
3420
7eece5a0
KH
3421/**
3422 * qla4xxx_prevent_other_port_reinit - prevent other port from re-initialize
3423 * @ha: pointer to adapter structure
3424 *
3425 * Mark the other ISP-4xxx port to indicate that the driver is being removed,
3426 * so that the other port will not re-initialize while in the process of
3427 * removing the ha due to driver unload or hba hotplug.
3428 **/
3429static void qla4xxx_prevent_other_port_reinit(struct scsi_qla_host *ha)
3430{
3431 struct scsi_qla_host *other_ha = NULL;
3432 struct pci_dev *other_pdev = NULL;
3433 int fn = ISP4XXX_PCI_FN_2;
3434
3435 /*iscsi function numbers for ISP4xxx is 1 and 3*/
3436 if (PCI_FUNC(ha->pdev->devfn) & BIT_1)
3437 fn = ISP4XXX_PCI_FN_1;
3438
3439 other_pdev =
3440 pci_get_domain_bus_and_slot(pci_domain_nr(ha->pdev->bus),
3441 ha->pdev->bus->number, PCI_DEVFN(PCI_SLOT(ha->pdev->devfn),
3442 fn));
3443
3444 /* Get other_ha if other_pdev is valid and state is enable*/
3445 if (other_pdev) {
3446 if (atomic_read(&other_pdev->enable_cnt)) {
3447 other_ha = pci_get_drvdata(other_pdev);
3448 if (other_ha) {
3449 set_bit(AF_HA_REMOVAL, &other_ha->flags);
3450 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: "
3451 "Prevent %s reinit\n", __func__,
3452 dev_name(&other_ha->pdev->dev)));
3453 }
3454 }
3455 pci_dev_put(other_pdev);
3456 }
3457}
3458
afaf5a2d
DS
3459/**
3460 * qla4xxx_remove_adapter - calback function to remove adapter.
3461 * @pci_dev: PCI device pointer
3462 **/
3463static void __devexit qla4xxx_remove_adapter(struct pci_dev *pdev)
3464{
3465 struct scsi_qla_host *ha;
3466
3467 ha = pci_get_drvdata(pdev);
3468
7eece5a0
KH
3469 if (!is_qla8022(ha))
3470 qla4xxx_prevent_other_port_reinit(ha);
bee4fe8e 3471
ed1086e0
VC
3472 /* destroy iface from sysfs */
3473 qla4xxx_destroy_ifaces(ha);
3474
2a991c21
MR
3475 if (ha->boot_kset)
3476 iscsi_boot_destroy_kset(ha->boot_kset);
3477
afaf5a2d
DS
3478 scsi_remove_host(ha->host);
3479
3480 qla4xxx_free_adapter(ha);
3481
3482 scsi_host_put(ha->host);
3483
2232be0d 3484 pci_disable_pcie_error_reporting(pdev);
f4f5df23 3485 pci_disable_device(pdev);
afaf5a2d
DS
3486 pci_set_drvdata(pdev, NULL);
3487}
3488
3489/**
3490 * qla4xxx_config_dma_addressing() - Configure OS DMA addressing method.
3491 * @ha: HA context
3492 *
3493 * At exit, the @ha's flags.enable_64bit_addressing set to indicated
3494 * supported addressing method.
3495 */
47975477 3496static void qla4xxx_config_dma_addressing(struct scsi_qla_host *ha)
afaf5a2d
DS
3497{
3498 int retval;
3499
3500 /* Update our PCI device dma_mask for full 64 bit mask */
6a35528a
YH
3501 if (pci_set_dma_mask(ha->pdev, DMA_BIT_MASK(64)) == 0) {
3502 if (pci_set_consistent_dma_mask(ha->pdev, DMA_BIT_MASK(64))) {
afaf5a2d
DS
3503 dev_dbg(&ha->pdev->dev,
3504 "Failed to set 64 bit PCI consistent mask; "
3505 "using 32 bit.\n");
3506 retval = pci_set_consistent_dma_mask(ha->pdev,
284901a9 3507 DMA_BIT_MASK(32));
afaf5a2d
DS
3508 }
3509 } else
284901a9 3510 retval = pci_set_dma_mask(ha->pdev, DMA_BIT_MASK(32));
afaf5a2d
DS
3511}
3512
3513static int qla4xxx_slave_alloc(struct scsi_device *sdev)
3514{
b3a271a9
MR
3515 struct iscsi_cls_session *cls_sess;
3516 struct iscsi_session *sess;
3517 struct ddb_entry *ddb;
8bb4033d 3518 int queue_depth = QL4_DEF_QDEPTH;
afaf5a2d 3519
b3a271a9
MR
3520 cls_sess = starget_to_session(sdev->sdev_target);
3521 sess = cls_sess->dd_data;
3522 ddb = sess->dd_data;
3523
afaf5a2d
DS
3524 sdev->hostdata = ddb;
3525 sdev->tagged_supported = 1;
8bb4033d
VC
3526
3527 if (ql4xmaxqdepth != 0 && ql4xmaxqdepth <= 0xffffU)
3528 queue_depth = ql4xmaxqdepth;
3529
3530 scsi_activate_tcq(sdev, queue_depth);
afaf5a2d
DS
3531 return 0;
3532}
3533
3534static int qla4xxx_slave_configure(struct scsi_device *sdev)
3535{
3536 sdev->tagged_supported = 1;
3537 return 0;
3538}
3539
3540static void qla4xxx_slave_destroy(struct scsi_device *sdev)
3541{
3542 scsi_deactivate_tcq(sdev, 1);
3543}
3544
3545/**
3546 * qla4xxx_del_from_active_array - returns an active srb
3547 * @ha: Pointer to host adapter structure.
fd589a8f 3548 * @index: index into the active_array
afaf5a2d
DS
3549 *
3550 * This routine removes and returns the srb at the specified index
3551 **/
f4f5df23
VC
3552struct srb *qla4xxx_del_from_active_array(struct scsi_qla_host *ha,
3553 uint32_t index)
afaf5a2d
DS
3554{
3555 struct srb *srb = NULL;
5369887a 3556 struct scsi_cmnd *cmd = NULL;
afaf5a2d 3557
5369887a
VC
3558 cmd = scsi_host_find_tag(ha->host, index);
3559 if (!cmd)
afaf5a2d
DS
3560 return srb;
3561
5369887a
VC
3562 srb = (struct srb *)CMD_SP(cmd);
3563 if (!srb)
afaf5a2d
DS
3564 return srb;
3565
3566 /* update counters */
3567 if (srb->flags & SRB_DMA_VALID) {
3568 ha->req_q_count += srb->iocb_cnt;
3569 ha->iocb_cnt -= srb->iocb_cnt;
3570 if (srb->cmd)
5369887a
VC
3571 srb->cmd->host_scribble =
3572 (unsigned char *)(unsigned long) MAX_SRBS;
afaf5a2d
DS
3573 }
3574 return srb;
3575}
3576
afaf5a2d
DS
3577/**
3578 * qla4xxx_eh_wait_on_command - waits for command to be returned by firmware
09a0f719 3579 * @ha: Pointer to host adapter structure.
afaf5a2d
DS
3580 * @cmd: Scsi Command to wait on.
3581 *
3582 * This routine waits for the command to be returned by the Firmware
3583 * for some max time.
3584 **/
3585static int qla4xxx_eh_wait_on_command(struct scsi_qla_host *ha,
3586 struct scsi_cmnd *cmd)
3587{
3588 int done = 0;
3589 struct srb *rp;
3590 uint32_t max_wait_time = EH_WAIT_CMD_TOV;
2232be0d
LC
3591 int ret = SUCCESS;
3592
3593 /* Dont wait on command if PCI error is being handled
3594 * by PCI AER driver
3595 */
3596 if (unlikely(pci_channel_offline(ha->pdev)) ||
3597 (test_bit(AF_EEH_BUSY, &ha->flags))) {
3598 ql4_printk(KERN_WARNING, ha, "scsi%ld: Return from %s\n",
3599 ha->host_no, __func__);
3600 return ret;
3601 }
afaf5a2d
DS
3602
3603 do {
3604 /* Checking to see if its returned to OS */
5369887a 3605 rp = (struct srb *) CMD_SP(cmd);
afaf5a2d
DS
3606 if (rp == NULL) {
3607 done++;
3608 break;
3609 }
3610
3611 msleep(2000);
3612 } while (max_wait_time--);
3613
3614 return done;
3615}
3616
3617/**
3618 * qla4xxx_wait_for_hba_online - waits for HBA to come online
3619 * @ha: Pointer to host adapter structure
3620 **/
3621static int qla4xxx_wait_for_hba_online(struct scsi_qla_host *ha)
3622{
3623 unsigned long wait_online;
3624
f581a3f7 3625 wait_online = jiffies + (HBA_ONLINE_TOV * HZ);
afaf5a2d
DS
3626 while (time_before(jiffies, wait_online)) {
3627
3628 if (adapter_up(ha))
3629 return QLA_SUCCESS;
afaf5a2d
DS
3630
3631 msleep(2000);
3632 }
3633
3634 return QLA_ERROR;
3635}
3636
3637/**
ce545039 3638 * qla4xxx_eh_wait_for_commands - wait for active cmds to finish.
fd589a8f 3639 * @ha: pointer to HBA
afaf5a2d
DS
3640 * @t: target id
3641 * @l: lun id
3642 *
3643 * This function waits for all outstanding commands to a lun to complete. It
3644 * returns 0 if all pending commands are returned and 1 otherwise.
3645 **/
ce545039
MC
3646static int qla4xxx_eh_wait_for_commands(struct scsi_qla_host *ha,
3647 struct scsi_target *stgt,
3648 struct scsi_device *sdev)
afaf5a2d
DS
3649{
3650 int cnt;
3651 int status = 0;
3652 struct scsi_cmnd *cmd;
3653
3654 /*
ce545039
MC
3655 * Waiting for all commands for the designated target or dev
3656 * in the active array
afaf5a2d
DS
3657 */
3658 for (cnt = 0; cnt < ha->host->can_queue; cnt++) {
3659 cmd = scsi_host_find_tag(ha->host, cnt);
ce545039
MC
3660 if (cmd && stgt == scsi_target(cmd->device) &&
3661 (!sdev || sdev == cmd->device)) {
afaf5a2d
DS
3662 if (!qla4xxx_eh_wait_on_command(ha, cmd)) {
3663 status++;
3664 break;
3665 }
3666 }
3667 }
3668 return status;
3669}
3670
09a0f719
VC
3671/**
3672 * qla4xxx_eh_abort - callback for abort task.
3673 * @cmd: Pointer to Linux's SCSI command structure
3674 *
3675 * This routine is called by the Linux OS to abort the specified
3676 * command.
3677 **/
3678static int qla4xxx_eh_abort(struct scsi_cmnd *cmd)
3679{
3680 struct scsi_qla_host *ha = to_qla_host(cmd->device->host);
3681 unsigned int id = cmd->device->id;
3682 unsigned int lun = cmd->device->lun;
92b3e5bb 3683 unsigned long flags;
09a0f719
VC
3684 struct srb *srb = NULL;
3685 int ret = SUCCESS;
3686 int wait = 0;
3687
c2660df3 3688 ql4_printk(KERN_INFO, ha,
5cd049a5
CH
3689 "scsi%ld:%d:%d: Abort command issued cmd=%p\n",
3690 ha->host_no, id, lun, cmd);
09a0f719 3691
92b3e5bb 3692 spin_lock_irqsave(&ha->hardware_lock, flags);
09a0f719 3693 srb = (struct srb *) CMD_SP(cmd);
92b3e5bb
MC
3694 if (!srb) {
3695 spin_unlock_irqrestore(&ha->hardware_lock, flags);
09a0f719 3696 return SUCCESS;
92b3e5bb 3697 }
09a0f719 3698 kref_get(&srb->srb_ref);
92b3e5bb 3699 spin_unlock_irqrestore(&ha->hardware_lock, flags);
09a0f719
VC
3700
3701 if (qla4xxx_abort_task(ha, srb) != QLA_SUCCESS) {
3702 DEBUG3(printk("scsi%ld:%d:%d: Abort_task mbx failed.\n",
3703 ha->host_no, id, lun));
3704 ret = FAILED;
3705 } else {
3706 DEBUG3(printk("scsi%ld:%d:%d: Abort_task mbx success.\n",
3707 ha->host_no, id, lun));
3708 wait = 1;
3709 }
3710
3711 kref_put(&srb->srb_ref, qla4xxx_srb_compl);
3712
3713 /* Wait for command to complete */
3714 if (wait) {
3715 if (!qla4xxx_eh_wait_on_command(ha, cmd)) {
3716 DEBUG2(printk("scsi%ld:%d:%d: Abort handler timed out\n",
3717 ha->host_no, id, lun));
3718 ret = FAILED;
3719 }
3720 }
3721
c2660df3 3722 ql4_printk(KERN_INFO, ha,
09a0f719 3723 "scsi%ld:%d:%d: Abort command - %s\n",
25985edc 3724 ha->host_no, id, lun, (ret == SUCCESS) ? "succeeded" : "failed");
09a0f719
VC
3725
3726 return ret;
3727}
3728
afaf5a2d
DS
3729/**
3730 * qla4xxx_eh_device_reset - callback for target reset.
3731 * @cmd: Pointer to Linux's SCSI command structure
3732 *
3733 * This routine is called by the Linux OS to reset all luns on the
3734 * specified target.
3735 **/
3736static int qla4xxx_eh_device_reset(struct scsi_cmnd *cmd)
3737{
3738 struct scsi_qla_host *ha = to_qla_host(cmd->device->host);
3739 struct ddb_entry *ddb_entry = cmd->device->hostdata;
afaf5a2d
DS
3740 int ret = FAILED, stat;
3741
612f7348 3742 if (!ddb_entry)
afaf5a2d
DS
3743 return ret;
3744
c01be6dc
MC
3745 ret = iscsi_block_scsi_eh(cmd);
3746 if (ret)
3747 return ret;
3748 ret = FAILED;
3749
c2660df3 3750 ql4_printk(KERN_INFO, ha,
afaf5a2d
DS
3751 "scsi%ld:%d:%d:%d: DEVICE RESET ISSUED.\n", ha->host_no,
3752 cmd->device->channel, cmd->device->id, cmd->device->lun);
3753
3754 DEBUG2(printk(KERN_INFO
3755 "scsi%ld: DEVICE_RESET cmd=%p jiffies = 0x%lx, to=%x,"
3756 "dpc_flags=%lx, status=%x allowed=%d\n", ha->host_no,
242f9dcb 3757 cmd, jiffies, cmd->request->timeout / HZ,
afaf5a2d
DS
3758 ha->dpc_flags, cmd->result, cmd->allowed));
3759
3760 /* FIXME: wait for hba to go online */
3761 stat = qla4xxx_reset_lun(ha, ddb_entry, cmd->device->lun);
3762 if (stat != QLA_SUCCESS) {
c2660df3 3763 ql4_printk(KERN_INFO, ha, "DEVICE RESET FAILED. %d\n", stat);
afaf5a2d
DS
3764 goto eh_dev_reset_done;
3765 }
3766
ce545039
MC
3767 if (qla4xxx_eh_wait_for_commands(ha, scsi_target(cmd->device),
3768 cmd->device)) {
c2660df3 3769 ql4_printk(KERN_INFO, ha,
ce545039
MC
3770 "DEVICE RESET FAILED - waiting for "
3771 "commands.\n");
3772 goto eh_dev_reset_done;
afaf5a2d
DS
3773 }
3774
9d562913
DS
3775 /* Send marker. */
3776 if (qla4xxx_send_marker_iocb(ha, ddb_entry, cmd->device->lun,
3777 MM_LUN_RESET) != QLA_SUCCESS)
3778 goto eh_dev_reset_done;
3779
c2660df3 3780 ql4_printk(KERN_INFO, ha,
afaf5a2d
DS
3781 "scsi(%ld:%d:%d:%d): DEVICE RESET SUCCEEDED.\n",
3782 ha->host_no, cmd->device->channel, cmd->device->id,
3783 cmd->device->lun);
3784
3785 ret = SUCCESS;
3786
3787eh_dev_reset_done:
3788
3789 return ret;
3790}
3791
ce545039
MC
3792/**
3793 * qla4xxx_eh_target_reset - callback for target reset.
3794 * @cmd: Pointer to Linux's SCSI command structure
3795 *
3796 * This routine is called by the Linux OS to reset the target.
3797 **/
3798static int qla4xxx_eh_target_reset(struct scsi_cmnd *cmd)
3799{
3800 struct scsi_qla_host *ha = to_qla_host(cmd->device->host);
3801 struct ddb_entry *ddb_entry = cmd->device->hostdata;
c01be6dc 3802 int stat, ret;
ce545039
MC
3803
3804 if (!ddb_entry)
3805 return FAILED;
3806
c01be6dc
MC
3807 ret = iscsi_block_scsi_eh(cmd);
3808 if (ret)
3809 return ret;
3810
ce545039
MC
3811 starget_printk(KERN_INFO, scsi_target(cmd->device),
3812 "WARM TARGET RESET ISSUED.\n");
3813
3814 DEBUG2(printk(KERN_INFO
3815 "scsi%ld: TARGET_DEVICE_RESET cmd=%p jiffies = 0x%lx, "
3816 "to=%x,dpc_flags=%lx, status=%x allowed=%d\n",
242f9dcb 3817 ha->host_no, cmd, jiffies, cmd->request->timeout / HZ,
ce545039
MC
3818 ha->dpc_flags, cmd->result, cmd->allowed));
3819
3820 stat = qla4xxx_reset_target(ha, ddb_entry);
3821 if (stat != QLA_SUCCESS) {
3822 starget_printk(KERN_INFO, scsi_target(cmd->device),
3823 "WARM TARGET RESET FAILED.\n");
3824 return FAILED;
3825 }
3826
ce545039
MC
3827 if (qla4xxx_eh_wait_for_commands(ha, scsi_target(cmd->device),
3828 NULL)) {
3829 starget_printk(KERN_INFO, scsi_target(cmd->device),
3830 "WARM TARGET DEVICE RESET FAILED - "
3831 "waiting for commands.\n");
3832 return FAILED;
3833 }
3834
9d562913
DS
3835 /* Send marker. */
3836 if (qla4xxx_send_marker_iocb(ha, ddb_entry, cmd->device->lun,
3837 MM_TGT_WARM_RESET) != QLA_SUCCESS) {
3838 starget_printk(KERN_INFO, scsi_target(cmd->device),
3839 "WARM TARGET DEVICE RESET FAILED - "
3840 "marker iocb failed.\n");
3841 return FAILED;
3842 }
3843
ce545039
MC
3844 starget_printk(KERN_INFO, scsi_target(cmd->device),
3845 "WARM TARGET RESET SUCCEEDED.\n");
3846 return SUCCESS;
3847}
3848
afaf5a2d
DS
3849/**
3850 * qla4xxx_eh_host_reset - kernel callback
3851 * @cmd: Pointer to Linux's SCSI command structure
3852 *
3853 * This routine is invoked by the Linux kernel to perform fatal error
3854 * recovery on the specified adapter.
3855 **/
3856static int qla4xxx_eh_host_reset(struct scsi_cmnd *cmd)
3857{
3858 int return_status = FAILED;
3859 struct scsi_qla_host *ha;
3860
b3a271a9 3861 ha = to_qla_host(cmd->device->host);
afaf5a2d 3862
f4f5df23
VC
3863 if (ql4xdontresethba) {
3864 DEBUG2(printk("scsi%ld: %s: Don't Reset HBA\n",
3865 ha->host_no, __func__));
3866 return FAILED;
3867 }
3868
c2660df3 3869 ql4_printk(KERN_INFO, ha,
dca05c4c 3870 "scsi(%ld:%d:%d:%d): HOST RESET ISSUED.\n", ha->host_no,
afaf5a2d
DS
3871 cmd->device->channel, cmd->device->id, cmd->device->lun);
3872
3873 if (qla4xxx_wait_for_hba_online(ha) != QLA_SUCCESS) {
3874 DEBUG2(printk("scsi%ld:%d: %s: Unable to reset host. Adapter "
3875 "DEAD.\n", ha->host_no, cmd->device->channel,
3876 __func__));
3877
3878 return FAILED;
3879 }
3880
f4f5df23
VC
3881 if (!test_bit(DPC_RESET_HA, &ha->dpc_flags)) {
3882 if (is_qla8022(ha))
3883 set_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags);
3884 else
3885 set_bit(DPC_RESET_HA, &ha->dpc_flags);
3886 }
50a29aec 3887
f4f5df23 3888 if (qla4xxx_recover_adapter(ha) == QLA_SUCCESS)
afaf5a2d 3889 return_status = SUCCESS;
afaf5a2d 3890
c2660df3 3891 ql4_printk(KERN_INFO, ha, "HOST RESET %s.\n",
25985edc 3892 return_status == FAILED ? "FAILED" : "SUCCEEDED");
afaf5a2d
DS
3893
3894 return return_status;
3895}
3896
95d31262
VC
3897static int qla4xxx_context_reset(struct scsi_qla_host *ha)
3898{
3899 uint32_t mbox_cmd[MBOX_REG_COUNT];
3900 uint32_t mbox_sts[MBOX_REG_COUNT];
3901 struct addr_ctrl_blk_def *acb = NULL;
3902 uint32_t acb_len = sizeof(struct addr_ctrl_blk_def);
3903 int rval = QLA_SUCCESS;
3904 dma_addr_t acb_dma;
3905
3906 acb = dma_alloc_coherent(&ha->pdev->dev,
3907 sizeof(struct addr_ctrl_blk_def),
3908 &acb_dma, GFP_KERNEL);
3909 if (!acb) {
3910 ql4_printk(KERN_ERR, ha, "%s: Unable to alloc acb\n",
3911 __func__);
3912 rval = -ENOMEM;
3913 goto exit_port_reset;
3914 }
3915
3916 memset(acb, 0, acb_len);
3917
3918 rval = qla4xxx_get_acb(ha, acb_dma, PRIMARI_ACB, acb_len);
3919 if (rval != QLA_SUCCESS) {
3920 rval = -EIO;
3921 goto exit_free_acb;
3922 }
3923
3924 rval = qla4xxx_disable_acb(ha);
3925 if (rval != QLA_SUCCESS) {
3926 rval = -EIO;
3927 goto exit_free_acb;
3928 }
3929
3930 wait_for_completion_timeout(&ha->disable_acb_comp,
3931 DISABLE_ACB_TOV * HZ);
3932
3933 rval = qla4xxx_set_acb(ha, &mbox_cmd[0], &mbox_sts[0], acb_dma);
3934 if (rval != QLA_SUCCESS) {
3935 rval = -EIO;
3936 goto exit_free_acb;
3937 }
3938
3939exit_free_acb:
3940 dma_free_coherent(&ha->pdev->dev, sizeof(struct addr_ctrl_blk_def),
3941 acb, acb_dma);
3942exit_port_reset:
3943 DEBUG2(ql4_printk(KERN_INFO, ha, "%s %s\n", __func__,
3944 rval == QLA_SUCCESS ? "SUCCEEDED" : "FAILED"));
3945 return rval;
3946}
3947
3948static int qla4xxx_host_reset(struct Scsi_Host *shost, int reset_type)
3949{
3950 struct scsi_qla_host *ha = to_qla_host(shost);
3951 int rval = QLA_SUCCESS;
3952
3953 if (ql4xdontresethba) {
3954 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: Don't Reset HBA\n",
3955 __func__));
3956 rval = -EPERM;
3957 goto exit_host_reset;
3958 }
3959
3960 rval = qla4xxx_wait_for_hba_online(ha);
3961 if (rval != QLA_SUCCESS) {
3962 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: Unable to reset host "
3963 "adapter\n", __func__));
3964 rval = -EIO;
3965 goto exit_host_reset;
3966 }
3967
3968 if (test_bit(DPC_RESET_HA, &ha->dpc_flags))
3969 goto recover_adapter;
3970
3971 switch (reset_type) {
3972 case SCSI_ADAPTER_RESET:
3973 set_bit(DPC_RESET_HA, &ha->dpc_flags);
3974 break;
3975 case SCSI_FIRMWARE_RESET:
3976 if (!test_bit(DPC_RESET_HA, &ha->dpc_flags)) {
3977 if (is_qla8022(ha))
3978 /* set firmware context reset */
3979 set_bit(DPC_RESET_HA_FW_CONTEXT,
3980 &ha->dpc_flags);
3981 else {
3982 rval = qla4xxx_context_reset(ha);
3983 goto exit_host_reset;
3984 }
3985 }
3986 break;
3987 }
3988
3989recover_adapter:
3990 rval = qla4xxx_recover_adapter(ha);
3991 if (rval != QLA_SUCCESS) {
3992 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: recover adapter fail\n",
3993 __func__));
3994 rval = -EIO;
3995 }
3996
3997exit_host_reset:
3998 return rval;
3999}
4000
2232be0d
LC
4001/* PCI AER driver recovers from all correctable errors w/o
4002 * driver intervention. For uncorrectable errors PCI AER
4003 * driver calls the following device driver's callbacks
4004 *
4005 * - Fatal Errors - link_reset
4006 * - Non-Fatal Errors - driver's pci_error_detected() which
4007 * returns CAN_RECOVER, NEED_RESET or DISCONNECT.
4008 *
4009 * PCI AER driver calls
4010 * CAN_RECOVER - driver's pci_mmio_enabled(), mmio_enabled
4011 * returns RECOVERED or NEED_RESET if fw_hung
4012 * NEED_RESET - driver's slot_reset()
4013 * DISCONNECT - device is dead & cannot recover
4014 * RECOVERED - driver's pci_resume()
4015 */
4016static pci_ers_result_t
4017qla4xxx_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
4018{
4019 struct scsi_qla_host *ha = pci_get_drvdata(pdev);
4020
4021 ql4_printk(KERN_WARNING, ha, "scsi%ld: %s: error detected:state %x\n",
4022 ha->host_no, __func__, state);
4023
4024 if (!is_aer_supported(ha))
4025 return PCI_ERS_RESULT_NONE;
4026
4027 switch (state) {
4028 case pci_channel_io_normal:
4029 clear_bit(AF_EEH_BUSY, &ha->flags);
4030 return PCI_ERS_RESULT_CAN_RECOVER;
4031 case pci_channel_io_frozen:
4032 set_bit(AF_EEH_BUSY, &ha->flags);
4033 qla4xxx_mailbox_premature_completion(ha);
4034 qla4xxx_free_irqs(ha);
4035 pci_disable_device(pdev);
7b3595df
VC
4036 /* Return back all IOs */
4037 qla4xxx_abort_active_cmds(ha, DID_RESET << 16);
2232be0d
LC
4038 return PCI_ERS_RESULT_NEED_RESET;
4039 case pci_channel_io_perm_failure:
4040 set_bit(AF_EEH_BUSY, &ha->flags);
4041 set_bit(AF_PCI_CHANNEL_IO_PERM_FAILURE, &ha->flags);
4042 qla4xxx_abort_active_cmds(ha, DID_NO_CONNECT << 16);
4043 return PCI_ERS_RESULT_DISCONNECT;
4044 }
4045 return PCI_ERS_RESULT_NEED_RESET;
4046}
4047
4048/**
4049 * qla4xxx_pci_mmio_enabled() gets called if
4050 * qla4xxx_pci_error_detected() returns PCI_ERS_RESULT_CAN_RECOVER
4051 * and read/write to the device still works.
4052 **/
4053static pci_ers_result_t
4054qla4xxx_pci_mmio_enabled(struct pci_dev *pdev)
4055{
4056 struct scsi_qla_host *ha = pci_get_drvdata(pdev);
4057
4058 if (!is_aer_supported(ha))
4059 return PCI_ERS_RESULT_NONE;
4060
7b3595df 4061 return PCI_ERS_RESULT_RECOVERED;
2232be0d
LC
4062}
4063
7b3595df 4064static uint32_t qla4_8xxx_error_recovery(struct scsi_qla_host *ha)
2232be0d
LC
4065{
4066 uint32_t rval = QLA_ERROR;
7b3595df 4067 uint32_t ret = 0;
2232be0d
LC
4068 int fn;
4069 struct pci_dev *other_pdev = NULL;
4070
4071 ql4_printk(KERN_WARNING, ha, "scsi%ld: In %s\n", ha->host_no, __func__);
4072
4073 set_bit(DPC_RESET_ACTIVE, &ha->dpc_flags);
4074
4075 if (test_bit(AF_ONLINE, &ha->flags)) {
4076 clear_bit(AF_ONLINE, &ha->flags);
b3a271a9
MR
4077 clear_bit(AF_LINK_UP, &ha->flags);
4078 iscsi_host_for_each_session(ha->host, qla4xxx_fail_session);
2232be0d 4079 qla4xxx_process_aen(ha, FLUSH_DDB_CHANGED_AENS);
2232be0d
LC
4080 }
4081
4082 fn = PCI_FUNC(ha->pdev->devfn);
4083 while (fn > 0) {
4084 fn--;
4085 ql4_printk(KERN_INFO, ha, "scsi%ld: %s: Finding PCI device at "
4086 "func %x\n", ha->host_no, __func__, fn);
4087 /* Get the pci device given the domain, bus,
4088 * slot/function number */
4089 other_pdev =
4090 pci_get_domain_bus_and_slot(pci_domain_nr(ha->pdev->bus),
4091 ha->pdev->bus->number, PCI_DEVFN(PCI_SLOT(ha->pdev->devfn),
4092 fn));
4093
4094 if (!other_pdev)
4095 continue;
4096
4097 if (atomic_read(&other_pdev->enable_cnt)) {
4098 ql4_printk(KERN_INFO, ha, "scsi%ld: %s: Found PCI "
4099 "func in enabled state%x\n", ha->host_no,
4100 __func__, fn);
4101 pci_dev_put(other_pdev);
4102 break;
4103 }
4104 pci_dev_put(other_pdev);
4105 }
4106
4107 /* The first function on the card, the reset owner will
4108 * start & initialize the firmware. The other functions
4109 * on the card will reset the firmware context
4110 */
4111 if (!fn) {
4112 ql4_printk(KERN_INFO, ha, "scsi%ld: %s: devfn being reset "
4113 "0x%x is the owner\n", ha->host_no, __func__,
4114 ha->pdev->devfn);
4115
4116 qla4_8xxx_idc_lock(ha);
4117 qla4_8xxx_wr_32(ha, QLA82XX_CRB_DEV_STATE,
4118 QLA82XX_DEV_COLD);
4119
4120 qla4_8xxx_wr_32(ha, QLA82XX_CRB_DRV_IDC_VERSION,
4121 QLA82XX_IDC_VERSION);
4122
4123 qla4_8xxx_idc_unlock(ha);
4124 clear_bit(AF_FW_RECOVERY, &ha->flags);
0e7e8501 4125 rval = qla4xxx_initialize_adapter(ha);
2232be0d
LC
4126 qla4_8xxx_idc_lock(ha);
4127
4128 if (rval != QLA_SUCCESS) {
4129 ql4_printk(KERN_INFO, ha, "scsi%ld: %s: HW State: "
4130 "FAILED\n", ha->host_no, __func__);
4131 qla4_8xxx_clear_drv_active(ha);
4132 qla4_8xxx_wr_32(ha, QLA82XX_CRB_DEV_STATE,
4133 QLA82XX_DEV_FAILED);
4134 } else {
4135 ql4_printk(KERN_INFO, ha, "scsi%ld: %s: HW State: "
4136 "READY\n", ha->host_no, __func__);
4137 qla4_8xxx_wr_32(ha, QLA82XX_CRB_DEV_STATE,
4138 QLA82XX_DEV_READY);
4139 /* Clear driver state register */
4140 qla4_8xxx_wr_32(ha, QLA82XX_CRB_DRV_STATE, 0);
4141 qla4_8xxx_set_drv_active(ha);
7b3595df
VC
4142 ret = qla4xxx_request_irqs(ha);
4143 if (ret) {
4144 ql4_printk(KERN_WARNING, ha, "Failed to "
4145 "reserve interrupt %d already in use.\n",
4146 ha->pdev->irq);
4147 rval = QLA_ERROR;
4148 } else {
4149 ha->isp_ops->enable_intrs(ha);
4150 rval = QLA_SUCCESS;
4151 }
2232be0d
LC
4152 }
4153 qla4_8xxx_idc_unlock(ha);
4154 } else {
4155 ql4_printk(KERN_INFO, ha, "scsi%ld: %s: devfn 0x%x is not "
4156 "the reset owner\n", ha->host_no, __func__,
4157 ha->pdev->devfn);
4158 if ((qla4_8xxx_rd_32(ha, QLA82XX_CRB_DEV_STATE) ==
4159 QLA82XX_DEV_READY)) {
4160 clear_bit(AF_FW_RECOVERY, &ha->flags);
0e7e8501 4161 rval = qla4xxx_initialize_adapter(ha);
7b3595df
VC
4162 if (rval == QLA_SUCCESS) {
4163 ret = qla4xxx_request_irqs(ha);
4164 if (ret) {
4165 ql4_printk(KERN_WARNING, ha, "Failed to"
4166 " reserve interrupt %d already in"
4167 " use.\n", ha->pdev->irq);
4168 rval = QLA_ERROR;
4169 } else {
4170 ha->isp_ops->enable_intrs(ha);
4171 rval = QLA_SUCCESS;
4172 }
4173 }
2232be0d
LC
4174 qla4_8xxx_idc_lock(ha);
4175 qla4_8xxx_set_drv_active(ha);
4176 qla4_8xxx_idc_unlock(ha);
4177 }
4178 }
4179 clear_bit(DPC_RESET_ACTIVE, &ha->dpc_flags);
4180 return rval;
4181}
4182
4183static pci_ers_result_t
4184qla4xxx_pci_slot_reset(struct pci_dev *pdev)
4185{
4186 pci_ers_result_t ret = PCI_ERS_RESULT_DISCONNECT;
4187 struct scsi_qla_host *ha = pci_get_drvdata(pdev);
4188 int rc;
4189
4190 ql4_printk(KERN_WARNING, ha, "scsi%ld: %s: slot_reset\n",
4191 ha->host_no, __func__);
4192
4193 if (!is_aer_supported(ha))
4194 return PCI_ERS_RESULT_NONE;
4195
4196 /* Restore the saved state of PCIe device -
4197 * BAR registers, PCI Config space, PCIX, MSI,
4198 * IOV states
4199 */
4200 pci_restore_state(pdev);
4201
4202 /* pci_restore_state() clears the saved_state flag of the device
4203 * save restored state which resets saved_state flag
4204 */
4205 pci_save_state(pdev);
4206
4207 /* Initialize device or resume if in suspended state */
4208 rc = pci_enable_device(pdev);
4209 if (rc) {
25985edc 4210 ql4_printk(KERN_WARNING, ha, "scsi%ld: %s: Can't re-enable "
2232be0d
LC
4211 "device after reset\n", ha->host_no, __func__);
4212 goto exit_slot_reset;
4213 }
4214
7b3595df 4215 ha->isp_ops->disable_intrs(ha);
2232be0d
LC
4216
4217 if (is_qla8022(ha)) {
4218 if (qla4_8xxx_error_recovery(ha) == QLA_SUCCESS) {
4219 ret = PCI_ERS_RESULT_RECOVERED;
4220 goto exit_slot_reset;
4221 } else
4222 goto exit_slot_reset;
4223 }
4224
4225exit_slot_reset:
4226 ql4_printk(KERN_WARNING, ha, "scsi%ld: %s: Return=%x\n"
4227 "device after reset\n", ha->host_no, __func__, ret);
4228 return ret;
4229}
4230
4231static void
4232qla4xxx_pci_resume(struct pci_dev *pdev)
4233{
4234 struct scsi_qla_host *ha = pci_get_drvdata(pdev);
4235 int ret;
4236
4237 ql4_printk(KERN_WARNING, ha, "scsi%ld: %s: pci_resume\n",
4238 ha->host_no, __func__);
4239
4240 ret = qla4xxx_wait_for_hba_online(ha);
4241 if (ret != QLA_SUCCESS) {
4242 ql4_printk(KERN_ERR, ha, "scsi%ld: %s: the device failed to "
4243 "resume I/O from slot/link_reset\n", ha->host_no,
4244 __func__);
4245 }
4246
4247 pci_cleanup_aer_uncorrect_error_status(pdev);
4248 clear_bit(AF_EEH_BUSY, &ha->flags);
4249}
4250
4251static struct pci_error_handlers qla4xxx_err_handler = {
4252 .error_detected = qla4xxx_pci_error_detected,
4253 .mmio_enabled = qla4xxx_pci_mmio_enabled,
4254 .slot_reset = qla4xxx_pci_slot_reset,
4255 .resume = qla4xxx_pci_resume,
4256};
4257
afaf5a2d
DS
4258static struct pci_device_id qla4xxx_pci_tbl[] = {
4259 {
4260 .vendor = PCI_VENDOR_ID_QLOGIC,
4261 .device = PCI_DEVICE_ID_QLOGIC_ISP4010,
4262 .subvendor = PCI_ANY_ID,
4263 .subdevice = PCI_ANY_ID,
4264 },
4265 {
4266 .vendor = PCI_VENDOR_ID_QLOGIC,
4267 .device = PCI_DEVICE_ID_QLOGIC_ISP4022,
4268 .subvendor = PCI_ANY_ID,
4269 .subdevice = PCI_ANY_ID,
4270 },
d915058f
DS
4271 {
4272 .vendor = PCI_VENDOR_ID_QLOGIC,
4273 .device = PCI_DEVICE_ID_QLOGIC_ISP4032,
4274 .subvendor = PCI_ANY_ID,
4275 .subdevice = PCI_ANY_ID,
4276 },
f4f5df23
VC
4277 {
4278 .vendor = PCI_VENDOR_ID_QLOGIC,
4279 .device = PCI_DEVICE_ID_QLOGIC_ISP8022,
4280 .subvendor = PCI_ANY_ID,
4281 .subdevice = PCI_ANY_ID,
4282 },
afaf5a2d
DS
4283 {0, 0},
4284};
4285MODULE_DEVICE_TABLE(pci, qla4xxx_pci_tbl);
4286
47975477 4287static struct pci_driver qla4xxx_pci_driver = {
afaf5a2d
DS
4288 .name = DRIVER_NAME,
4289 .id_table = qla4xxx_pci_tbl,
4290 .probe = qla4xxx_probe_adapter,
4291 .remove = qla4xxx_remove_adapter,
2232be0d 4292 .err_handler = &qla4xxx_err_handler,
afaf5a2d
DS
4293};
4294
4295static int __init qla4xxx_module_init(void)
4296{
4297 int ret;
4298
4299 /* Allocate cache for SRBs. */
4300 srb_cachep = kmem_cache_create("qla4xxx_srbs", sizeof(struct srb), 0,
20c2df83 4301 SLAB_HWCACHE_ALIGN, NULL);
afaf5a2d
DS
4302 if (srb_cachep == NULL) {
4303 printk(KERN_ERR
4304 "%s: Unable to allocate SRB cache..."
4305 "Failing load!\n", DRIVER_NAME);
4306 ret = -ENOMEM;
4307 goto no_srp_cache;
4308 }
4309
4310 /* Derive version string. */
4311 strcpy(qla4xxx_version_str, QLA4XXX_DRIVER_VERSION);
11010fec 4312 if (ql4xextended_error_logging)
afaf5a2d
DS
4313 strcat(qla4xxx_version_str, "-debug");
4314
4315 qla4xxx_scsi_transport =
4316 iscsi_register_transport(&qla4xxx_iscsi_transport);
4317 if (!qla4xxx_scsi_transport){
4318 ret = -ENODEV;
4319 goto release_srb_cache;
4320 }
4321
afaf5a2d
DS
4322 ret = pci_register_driver(&qla4xxx_pci_driver);
4323 if (ret)
4324 goto unregister_transport;
4325
4326 printk(KERN_INFO "QLogic iSCSI HBA Driver\n");
4327 return 0;
5ae16db3 4328
afaf5a2d
DS
4329unregister_transport:
4330 iscsi_unregister_transport(&qla4xxx_iscsi_transport);
4331release_srb_cache:
4332 kmem_cache_destroy(srb_cachep);
4333no_srp_cache:
4334 return ret;
4335}
4336
4337static void __exit qla4xxx_module_exit(void)
4338{
4339 pci_unregister_driver(&qla4xxx_pci_driver);
4340 iscsi_unregister_transport(&qla4xxx_iscsi_transport);
4341 kmem_cache_destroy(srb_cachep);
4342}
4343
4344module_init(qla4xxx_module_init);
4345module_exit(qla4xxx_module_exit);
4346
4347MODULE_AUTHOR("QLogic Corporation");
4348MODULE_DESCRIPTION("QLogic iSCSI HBA Driver");
4349MODULE_LICENSE("GPL");
4350MODULE_VERSION(QLA4XXX_DRIVER_VERSION);