]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - drivers/scsi/qla4xxx/ql4_os.c
[SCSI] scsi_transport_iscsi: Added support to update initiator iscsi port
[mirror_ubuntu-bionic-kernel.git] / drivers / scsi / qla4xxx / ql4_os.c
CommitLineData
afaf5a2d
DS
1/*
2 * QLogic iSCSI HBA Driver
7d01d069 3 * Copyright (c) 2003-2010 QLogic Corporation
afaf5a2d
DS
4 *
5 * See LICENSE.qla4xxx for copyright and licensing details.
6 */
7#include <linux/moduleparam.h>
5a0e3ad6 8#include <linux/slab.h>
2a991c21
MR
9#include <linux/blkdev.h>
10#include <linux/iscsi_boot_sysfs.h>
afaf5a2d
DS
11
12#include <scsi/scsi_tcq.h>
13#include <scsi/scsicam.h>
14
15#include "ql4_def.h"
bee4fe8e
DS
16#include "ql4_version.h"
17#include "ql4_glbl.h"
18#include "ql4_dbg.h"
19#include "ql4_inline.h"
afaf5a2d
DS
20
21/*
22 * Driver version
23 */
47975477 24static char qla4xxx_version_str[40];
afaf5a2d
DS
25
26/*
27 * SRB allocation cache
28 */
e18b890b 29static struct kmem_cache *srb_cachep;
afaf5a2d
DS
30
31/*
32 * Module parameter information and variables
33 */
afaf5a2d 34int ql4xdontresethba = 0;
f4f5df23 35module_param(ql4xdontresethba, int, S_IRUGO | S_IWUSR);
afaf5a2d 36MODULE_PARM_DESC(ql4xdontresethba,
f4f5df23
VC
37 "Don't reset the HBA for driver recovery \n"
38 " 0 - It will reset HBA (Default)\n"
39 " 1 - It will NOT reset HBA");
afaf5a2d 40
11010fec 41int ql4xextended_error_logging = 0; /* 0 = off, 1 = log errors */
f4f5df23 42module_param(ql4xextended_error_logging, int, S_IRUGO | S_IWUSR);
11010fec 43MODULE_PARM_DESC(ql4xextended_error_logging,
afaf5a2d
DS
44 "Option to enable extended error logging, "
45 "Default is 0 - no logging, 1 - debug logging");
46
f4f5df23
VC
47int ql4xenablemsix = 1;
48module_param(ql4xenablemsix, int, S_IRUGO|S_IWUSR);
49MODULE_PARM_DESC(ql4xenablemsix,
50 "Set to enable MSI or MSI-X interrupt mechanism.\n"
51 " 0 = enable INTx interrupt mechanism.\n"
52 " 1 = enable MSI-X interrupt mechanism (Default).\n"
53 " 2 = enable MSI interrupt mechanism.");
477ffb9d 54
d510d965 55#define QL4_DEF_QDEPTH 32
8bb4033d
VC
56static int ql4xmaxqdepth = QL4_DEF_QDEPTH;
57module_param(ql4xmaxqdepth, int, S_IRUGO | S_IWUSR);
58MODULE_PARM_DESC(ql4xmaxqdepth,
59 "Maximum queue depth to report for target devices.\n"
60 " Default: 32.");
d510d965 61
3038727c
VC
62static int ql4xsess_recovery_tmo = QL4_SESS_RECOVERY_TMO;
63module_param(ql4xsess_recovery_tmo, int, S_IRUGO);
64MODULE_PARM_DESC(ql4xsess_recovery_tmo,
65 "Target Session Recovery Timeout.\n"
66 " Default: 30 sec.");
67
b3a271a9 68static int qla4xxx_wait_for_hba_online(struct scsi_qla_host *ha);
afaf5a2d
DS
69/*
70 * SCSI host template entry points
71 */
47975477 72static void qla4xxx_config_dma_addressing(struct scsi_qla_host *ha);
afaf5a2d
DS
73
74/*
75 * iSCSI template entry points
76 */
afaf5a2d
DS
77static int qla4xxx_conn_get_param(struct iscsi_cls_conn *conn,
78 enum iscsi_param param, char *buf);
aa1e93a2
MC
79static int qla4xxx_host_get_param(struct Scsi_Host *shost,
80 enum iscsi_host_param param, char *buf);
d00efe3f
MC
81static int qla4xxx_iface_set_param(struct Scsi_Host *shost, char *data,
82 int count);
ed1086e0
VC
83static int qla4xxx_get_iface_param(struct iscsi_iface *iface,
84 enum iscsi_param_type param_type,
85 int param, char *buf);
5c656af7 86static enum blk_eh_timer_return qla4xxx_eh_cmd_timed_out(struct scsi_cmnd *sc);
b3a271a9
MR
87static struct iscsi_endpoint *qla4xxx_ep_connect(struct Scsi_Host *shost,
88 struct sockaddr *dst_addr,
89 int non_blocking);
90static int qla4xxx_ep_poll(struct iscsi_endpoint *ep, int timeout_ms);
91static void qla4xxx_ep_disconnect(struct iscsi_endpoint *ep);
92static int qla4xxx_get_ep_param(struct iscsi_endpoint *ep,
93 enum iscsi_param param, char *buf);
94static int qla4xxx_conn_start(struct iscsi_cls_conn *conn);
95static struct iscsi_cls_conn *
96qla4xxx_conn_create(struct iscsi_cls_session *cls_sess, uint32_t conn_idx);
97static int qla4xxx_conn_bind(struct iscsi_cls_session *cls_session,
98 struct iscsi_cls_conn *cls_conn,
99 uint64_t transport_fd, int is_leading);
100static void qla4xxx_conn_destroy(struct iscsi_cls_conn *conn);
101static struct iscsi_cls_session *
102qla4xxx_session_create(struct iscsi_endpoint *ep, uint16_t cmds_max,
103 uint16_t qdepth, uint32_t initial_cmdsn);
104static void qla4xxx_session_destroy(struct iscsi_cls_session *sess);
105static void qla4xxx_task_work(struct work_struct *wdata);
106static int qla4xxx_alloc_pdu(struct iscsi_task *, uint8_t);
107static int qla4xxx_task_xmit(struct iscsi_task *);
108static void qla4xxx_task_cleanup(struct iscsi_task *);
109static void qla4xxx_fail_session(struct iscsi_cls_session *cls_session);
110static void qla4xxx_conn_get_stats(struct iscsi_cls_conn *cls_conn,
111 struct iscsi_stats *stats);
afaf5a2d
DS
112/*
113 * SCSI host template entry points
114 */
f281233d 115static int qla4xxx_queuecommand(struct Scsi_Host *h, struct scsi_cmnd *cmd);
09a0f719 116static int qla4xxx_eh_abort(struct scsi_cmnd *cmd);
afaf5a2d 117static int qla4xxx_eh_device_reset(struct scsi_cmnd *cmd);
ce545039 118static int qla4xxx_eh_target_reset(struct scsi_cmnd *cmd);
afaf5a2d
DS
119static int qla4xxx_eh_host_reset(struct scsi_cmnd *cmd);
120static int qla4xxx_slave_alloc(struct scsi_device *device);
121static int qla4xxx_slave_configure(struct scsi_device *device);
122static void qla4xxx_slave_destroy(struct scsi_device *sdev);
3128c6c7 123static mode_t ql4_attr_is_visible(int param_type, int param);
afaf5a2d 124
f4f5df23
VC
125static struct qla4_8xxx_legacy_intr_set legacy_intr[] =
126 QLA82XX_LEGACY_INTR_CONFIG;
127
afaf5a2d
DS
128static struct scsi_host_template qla4xxx_driver_template = {
129 .module = THIS_MODULE,
130 .name = DRIVER_NAME,
131 .proc_name = DRIVER_NAME,
132 .queuecommand = qla4xxx_queuecommand,
133
09a0f719 134 .eh_abort_handler = qla4xxx_eh_abort,
afaf5a2d 135 .eh_device_reset_handler = qla4xxx_eh_device_reset,
ce545039 136 .eh_target_reset_handler = qla4xxx_eh_target_reset,
afaf5a2d 137 .eh_host_reset_handler = qla4xxx_eh_host_reset,
5c656af7 138 .eh_timed_out = qla4xxx_eh_cmd_timed_out,
afaf5a2d
DS
139
140 .slave_configure = qla4xxx_slave_configure,
141 .slave_alloc = qla4xxx_slave_alloc,
142 .slave_destroy = qla4xxx_slave_destroy,
143
144 .this_id = -1,
145 .cmd_per_lun = 3,
146 .use_clustering = ENABLE_CLUSTERING,
147 .sg_tablesize = SG_ALL,
148
149 .max_sectors = 0xFFFF,
7ad633c0 150 .shost_attrs = qla4xxx_host_attrs,
a355943c 151 .vendor_id = SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_QLOGIC,
afaf5a2d
DS
152};
153
154static struct iscsi_transport qla4xxx_iscsi_transport = {
155 .owner = THIS_MODULE,
156 .name = DRIVER_NAME,
b3a271a9
MR
157 .caps = CAP_TEXT_NEGO |
158 CAP_DATA_PATH_OFFLOAD | CAP_HDRDGST |
159 CAP_DATADGST | CAP_LOGIN_OFFLOAD |
160 CAP_MULTI_R2T,
3128c6c7 161 .attr_is_visible = ql4_attr_is_visible,
b3a271a9
MR
162 .create_session = qla4xxx_session_create,
163 .destroy_session = qla4xxx_session_destroy,
164 .start_conn = qla4xxx_conn_start,
165 .create_conn = qla4xxx_conn_create,
166 .bind_conn = qla4xxx_conn_bind,
167 .stop_conn = iscsi_conn_stop,
168 .destroy_conn = qla4xxx_conn_destroy,
169 .set_param = iscsi_set_param,
afaf5a2d 170 .get_conn_param = qla4xxx_conn_get_param,
b3a271a9
MR
171 .get_session_param = iscsi_session_get_param,
172 .get_ep_param = qla4xxx_get_ep_param,
173 .ep_connect = qla4xxx_ep_connect,
174 .ep_poll = qla4xxx_ep_poll,
175 .ep_disconnect = qla4xxx_ep_disconnect,
176 .get_stats = qla4xxx_conn_get_stats,
177 .send_pdu = iscsi_conn_send_pdu,
178 .xmit_task = qla4xxx_task_xmit,
179 .cleanup_task = qla4xxx_task_cleanup,
180 .alloc_pdu = qla4xxx_alloc_pdu,
181
aa1e93a2 182 .get_host_param = qla4xxx_host_get_param,
d00efe3f 183 .set_iface_param = qla4xxx_iface_set_param,
ed1086e0 184 .get_iface_param = qla4xxx_get_iface_param,
a355943c 185 .bsg_request = qla4xxx_bsg_request,
afaf5a2d
DS
186};
187
188static struct scsi_transport_template *qla4xxx_scsi_transport;
189
3128c6c7
MC
190static mode_t ql4_attr_is_visible(int param_type, int param)
191{
192 switch (param_type) {
f27fb2ef
MC
193 case ISCSI_HOST_PARAM:
194 switch (param) {
195 case ISCSI_HOST_PARAM_HWADDRESS:
196 case ISCSI_HOST_PARAM_IPADDRESS:
197 case ISCSI_HOST_PARAM_INITIATOR_NAME:
198 return S_IRUGO;
199 default:
200 return 0;
201 }
3128c6c7
MC
202 case ISCSI_PARAM:
203 switch (param) {
204 case ISCSI_PARAM_CONN_ADDRESS:
205 case ISCSI_PARAM_CONN_PORT:
1d063c17
MC
206 case ISCSI_PARAM_TARGET_NAME:
207 case ISCSI_PARAM_TPGT:
208 case ISCSI_PARAM_TARGET_ALIAS:
b3a271a9
MR
209 case ISCSI_PARAM_MAX_BURST:
210 case ISCSI_PARAM_MAX_R2T:
211 case ISCSI_PARAM_FIRST_BURST:
212 case ISCSI_PARAM_MAX_RECV_DLENGTH:
213 case ISCSI_PARAM_MAX_XMIT_DLENGTH:
3128c6c7
MC
214 return S_IRUGO;
215 default:
216 return 0;
217 }
b78dbba0
MC
218 case ISCSI_NET_PARAM:
219 switch (param) {
220 case ISCSI_NET_PARAM_IPV4_ADDR:
221 case ISCSI_NET_PARAM_IPV4_SUBNET:
222 case ISCSI_NET_PARAM_IPV4_GW:
223 case ISCSI_NET_PARAM_IPV4_BOOTPROTO:
224 case ISCSI_NET_PARAM_IFACE_ENABLE:
225 case ISCSI_NET_PARAM_IPV6_LINKLOCAL:
226 case ISCSI_NET_PARAM_IPV6_ADDR:
227 case ISCSI_NET_PARAM_IPV6_ROUTER:
228 case ISCSI_NET_PARAM_IPV6_ADDR_AUTOCFG:
229 case ISCSI_NET_PARAM_IPV6_LINKLOCAL_AUTOCFG:
6ac73e8c
VC
230 case ISCSI_NET_PARAM_VLAN_ID:
231 case ISCSI_NET_PARAM_VLAN_PRIORITY:
232 case ISCSI_NET_PARAM_VLAN_ENABLED:
943c157b 233 case ISCSI_NET_PARAM_MTU:
b78dbba0
MC
234 return S_IRUGO;
235 default:
236 return 0;
237 }
3128c6c7
MC
238 }
239
240 return 0;
241}
242
ed1086e0
VC
243static int qla4xxx_get_iface_param(struct iscsi_iface *iface,
244 enum iscsi_param_type param_type,
245 int param, char *buf)
246{
247 struct Scsi_Host *shost = iscsi_iface_to_shost(iface);
248 struct scsi_qla_host *ha = to_qla_host(shost);
249 int len = -ENOSYS;
250
251 if (param_type != ISCSI_NET_PARAM)
252 return -ENOSYS;
253
254 switch (param) {
255 case ISCSI_NET_PARAM_IPV4_ADDR:
256 len = sprintf(buf, "%pI4\n", &ha->ip_config.ip_address);
257 break;
258 case ISCSI_NET_PARAM_IPV4_SUBNET:
259 len = sprintf(buf, "%pI4\n", &ha->ip_config.subnet_mask);
260 break;
261 case ISCSI_NET_PARAM_IPV4_GW:
262 len = sprintf(buf, "%pI4\n", &ha->ip_config.gateway);
263 break;
264 case ISCSI_NET_PARAM_IFACE_ENABLE:
265 if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4)
266 len = sprintf(buf, "%s\n",
267 (ha->ip_config.ipv4_options &
268 IPOPT_IPV4_PROTOCOL_ENABLE) ?
269 "enabled" : "disabled");
270 else if (iface->iface_type == ISCSI_IFACE_TYPE_IPV6)
271 len = sprintf(buf, "%s\n",
272 (ha->ip_config.ipv6_options &
273 IPV6_OPT_IPV6_PROTOCOL_ENABLE) ?
274 "enabled" : "disabled");
275 break;
276 case ISCSI_NET_PARAM_IPV4_BOOTPROTO:
277 len = sprintf(buf, "%s\n",
278 (ha->ip_config.tcp_options & TCPOPT_DHCP_ENABLE) ?
279 "dhcp" : "static");
280 break;
281 case ISCSI_NET_PARAM_IPV6_ADDR:
282 if (iface->iface_num == 0)
283 len = sprintf(buf, "%pI6\n", &ha->ip_config.ipv6_addr0);
284 if (iface->iface_num == 1)
285 len = sprintf(buf, "%pI6\n", &ha->ip_config.ipv6_addr1);
286 break;
287 case ISCSI_NET_PARAM_IPV6_LINKLOCAL:
288 len = sprintf(buf, "%pI6\n",
289 &ha->ip_config.ipv6_link_local_addr);
290 break;
291 case ISCSI_NET_PARAM_IPV6_ROUTER:
292 len = sprintf(buf, "%pI6\n",
293 &ha->ip_config.ipv6_default_router_addr);
294 break;
295 case ISCSI_NET_PARAM_IPV6_ADDR_AUTOCFG:
296 len = sprintf(buf, "%s\n",
297 (ha->ip_config.ipv6_addl_options &
298 IPV6_ADDOPT_NEIGHBOR_DISCOVERY_ADDR_ENABLE) ?
299 "nd" : "static");
300 break;
301 case ISCSI_NET_PARAM_IPV6_LINKLOCAL_AUTOCFG:
302 len = sprintf(buf, "%s\n",
303 (ha->ip_config.ipv6_addl_options &
304 IPV6_ADDOPT_AUTOCONFIG_LINK_LOCAL_ADDR) ?
305 "auto" : "static");
306 break;
6ac73e8c
VC
307 case ISCSI_NET_PARAM_VLAN_ID:
308 if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4)
309 len = sprintf(buf, "%d\n",
310 (ha->ip_config.ipv4_vlan_tag &
311 ISCSI_MAX_VLAN_ID));
312 else if (iface->iface_type == ISCSI_IFACE_TYPE_IPV6)
313 len = sprintf(buf, "%d\n",
314 (ha->ip_config.ipv6_vlan_tag &
315 ISCSI_MAX_VLAN_ID));
316 break;
317 case ISCSI_NET_PARAM_VLAN_PRIORITY:
318 if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4)
319 len = sprintf(buf, "%d\n",
320 ((ha->ip_config.ipv4_vlan_tag >> 13) &
321 ISCSI_MAX_VLAN_PRIORITY));
322 else if (iface->iface_type == ISCSI_IFACE_TYPE_IPV6)
323 len = sprintf(buf, "%d\n",
324 ((ha->ip_config.ipv6_vlan_tag >> 13) &
325 ISCSI_MAX_VLAN_PRIORITY));
326 break;
327 case ISCSI_NET_PARAM_VLAN_ENABLED:
328 if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4)
329 len = sprintf(buf, "%s\n",
330 (ha->ip_config.ipv4_options &
331 IPOPT_VLAN_TAGGING_ENABLE) ?
332 "enabled" : "disabled");
333 else if (iface->iface_type == ISCSI_IFACE_TYPE_IPV6)
334 len = sprintf(buf, "%s\n",
335 (ha->ip_config.ipv6_options &
336 IPV6_OPT_VLAN_TAGGING_ENABLE) ?
337 "enabled" : "disabled");
338 break;
943c157b
VC
339 case ISCSI_NET_PARAM_MTU:
340 len = sprintf(buf, "%d\n", ha->ip_config.eth_mtu_size);
341 break;
ed1086e0
VC
342 default:
343 len = -ENOSYS;
344 }
345
346 return len;
347}
348
b3a271a9
MR
349static struct iscsi_endpoint *
350qla4xxx_ep_connect(struct Scsi_Host *shost, struct sockaddr *dst_addr,
351 int non_blocking)
5c656af7 352{
b3a271a9
MR
353 int ret;
354 struct iscsi_endpoint *ep;
355 struct qla_endpoint *qla_ep;
356 struct scsi_qla_host *ha;
357 struct sockaddr_in *addr;
358 struct sockaddr_in6 *addr6;
5c656af7 359
b3a271a9
MR
360 DEBUG2(printk(KERN_INFO "Func: %s\n", __func__));
361 if (!shost) {
362 ret = -ENXIO;
363 printk(KERN_ERR "%s: shost is NULL\n",
364 __func__);
365 return ERR_PTR(ret);
366 }
5c656af7 367
b3a271a9
MR
368 ha = iscsi_host_priv(shost);
369
370 ep = iscsi_create_endpoint(sizeof(struct qla_endpoint));
371 if (!ep) {
372 ret = -ENOMEM;
373 return ERR_PTR(ret);
374 }
375
376 qla_ep = ep->dd_data;
377 memset(qla_ep, 0, sizeof(struct qla_endpoint));
378 if (dst_addr->sa_family == AF_INET) {
379 memcpy(&qla_ep->dst_addr, dst_addr, sizeof(struct sockaddr_in));
380 addr = (struct sockaddr_in *)&qla_ep->dst_addr;
381 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: %pI4\n", __func__,
382 (char *)&addr->sin_addr));
383 } else if (dst_addr->sa_family == AF_INET6) {
384 memcpy(&qla_ep->dst_addr, dst_addr,
385 sizeof(struct sockaddr_in6));
386 addr6 = (struct sockaddr_in6 *)&qla_ep->dst_addr;
387 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: %pI6\n", __func__,
388 (char *)&addr6->sin6_addr));
389 }
390
391 qla_ep->host = shost;
392
393 return ep;
5c656af7
MC
394}
395
b3a271a9 396static int qla4xxx_ep_poll(struct iscsi_endpoint *ep, int timeout_ms)
afaf5a2d 397{
b3a271a9
MR
398 struct qla_endpoint *qla_ep;
399 struct scsi_qla_host *ha;
400 int ret = 0;
afaf5a2d 401
b3a271a9
MR
402 DEBUG2(printk(KERN_INFO "Func: %s\n", __func__));
403 qla_ep = ep->dd_data;
404 ha = to_qla_host(qla_ep->host);
405
406 if (adapter_up(ha))
407 ret = 1;
408
409 return ret;
410}
411
412static void qla4xxx_ep_disconnect(struct iscsi_endpoint *ep)
413{
414 DEBUG2(printk(KERN_INFO "Func: %s\n", __func__));
415 iscsi_destroy_endpoint(ep);
416}
417
418static int qla4xxx_get_ep_param(struct iscsi_endpoint *ep,
419 enum iscsi_param param,
420 char *buf)
421{
422 struct qla_endpoint *qla_ep = ep->dd_data;
423 struct sockaddr *dst_addr;
424
425 DEBUG2(printk(KERN_INFO "Func: %s\n", __func__));
426
427 switch (param) {
428 case ISCSI_PARAM_CONN_PORT:
429 case ISCSI_PARAM_CONN_ADDRESS:
430 if (!qla_ep)
431 return -ENOTCONN;
432
433 dst_addr = (struct sockaddr *)&qla_ep->dst_addr;
434 if (!dst_addr)
435 return -ENOTCONN;
436
437 return iscsi_conn_get_addr_param((struct sockaddr_storage *)
438 &qla_ep->dst_addr, param, buf);
439 default:
440 return -ENOSYS;
441 }
442}
443
444static void qla4xxx_conn_get_stats(struct iscsi_cls_conn *cls_conn,
445 struct iscsi_stats *stats)
446{
447 struct iscsi_session *sess;
448 struct iscsi_cls_session *cls_sess;
449 struct ddb_entry *ddb_entry;
450 struct scsi_qla_host *ha;
451 struct ql_iscsi_stats *ql_iscsi_stats;
452 int stats_size;
453 int ret;
454 dma_addr_t iscsi_stats_dma;
455
456 DEBUG2(printk(KERN_INFO "Func: %s\n", __func__));
568d303b 457
b3a271a9
MR
458 cls_sess = iscsi_conn_to_session(cls_conn);
459 sess = cls_sess->dd_data;
460 ddb_entry = sess->dd_data;
461 ha = ddb_entry->ha;
462
463 stats_size = PAGE_ALIGN(sizeof(struct ql_iscsi_stats));
464 /* Allocate memory */
465 ql_iscsi_stats = dma_alloc_coherent(&ha->pdev->dev, stats_size,
466 &iscsi_stats_dma, GFP_KERNEL);
467 if (!ql_iscsi_stats) {
468 ql4_printk(KERN_ERR, ha,
469 "Unable to allocate memory for iscsi stats\n");
470 goto exit_get_stats;
568d303b 471 }
b3a271a9
MR
472
473 ret = qla4xxx_get_mgmt_data(ha, ddb_entry->fw_ddb_index, stats_size,
474 iscsi_stats_dma);
475 if (ret != QLA_SUCCESS) {
476 ql4_printk(KERN_ERR, ha,
477 "Unable to retreive iscsi stats\n");
478 goto free_stats;
479 }
480
481 /* octets */
482 stats->txdata_octets = le64_to_cpu(ql_iscsi_stats->tx_data_octets);
483 stats->rxdata_octets = le64_to_cpu(ql_iscsi_stats->rx_data_octets);
484 /* xmit pdus */
485 stats->noptx_pdus = le32_to_cpu(ql_iscsi_stats->tx_nopout_pdus);
486 stats->scsicmd_pdus = le32_to_cpu(ql_iscsi_stats->tx_scsi_cmd_pdus);
487 stats->tmfcmd_pdus = le32_to_cpu(ql_iscsi_stats->tx_tmf_cmd_pdus);
488 stats->login_pdus = le32_to_cpu(ql_iscsi_stats->tx_login_cmd_pdus);
489 stats->text_pdus = le32_to_cpu(ql_iscsi_stats->tx_text_cmd_pdus);
490 stats->dataout_pdus = le32_to_cpu(ql_iscsi_stats->tx_scsi_write_pdus);
491 stats->logout_pdus = le32_to_cpu(ql_iscsi_stats->tx_logout_cmd_pdus);
492 stats->snack_pdus = le32_to_cpu(ql_iscsi_stats->tx_snack_req_pdus);
493 /* recv pdus */
494 stats->noprx_pdus = le32_to_cpu(ql_iscsi_stats->rx_nopin_pdus);
495 stats->scsirsp_pdus = le32_to_cpu(ql_iscsi_stats->rx_scsi_resp_pdus);
496 stats->tmfrsp_pdus = le32_to_cpu(ql_iscsi_stats->rx_tmf_resp_pdus);
497 stats->textrsp_pdus = le32_to_cpu(ql_iscsi_stats->rx_text_resp_pdus);
498 stats->datain_pdus = le32_to_cpu(ql_iscsi_stats->rx_scsi_read_pdus);
499 stats->logoutrsp_pdus =
500 le32_to_cpu(ql_iscsi_stats->rx_logout_resp_pdus);
501 stats->r2t_pdus = le32_to_cpu(ql_iscsi_stats->rx_r2t_pdus);
502 stats->async_pdus = le32_to_cpu(ql_iscsi_stats->rx_async_pdus);
503 stats->rjt_pdus = le32_to_cpu(ql_iscsi_stats->rx_reject_pdus);
504
505free_stats:
506 dma_free_coherent(&ha->pdev->dev, stats_size, ql_iscsi_stats,
507 iscsi_stats_dma);
508exit_get_stats:
509 return;
510}
511
512static enum blk_eh_timer_return qla4xxx_eh_cmd_timed_out(struct scsi_cmnd *sc)
513{
514 struct iscsi_cls_session *session;
515 struct iscsi_session *sess;
516 unsigned long flags;
517 enum blk_eh_timer_return ret = BLK_EH_NOT_HANDLED;
518
519 session = starget_to_session(scsi_target(sc->device));
520 sess = session->dd_data;
521
522 spin_lock_irqsave(&session->lock, flags);
523 if (session->state == ISCSI_SESSION_FAILED)
524 ret = BLK_EH_RESET_TIMER;
525 spin_unlock_irqrestore(&session->lock, flags);
526
527 return ret;
afaf5a2d
DS
528}
529
aa1e93a2
MC
530static int qla4xxx_host_get_param(struct Scsi_Host *shost,
531 enum iscsi_host_param param, char *buf)
532{
533 struct scsi_qla_host *ha = to_qla_host(shost);
534 int len;
535
536 switch (param) {
537 case ISCSI_HOST_PARAM_HWADDRESS:
7ffc49a6 538 len = sysfs_format_mac(buf, ha->my_mac, MAC_ADDR_LEN);
8ad5781a 539 break;
22236961 540 case ISCSI_HOST_PARAM_IPADDRESS:
2bab08fc 541 len = sprintf(buf, "%pI4\n", &ha->ip_config.ip_address);
22236961 542 break;
8ad5781a 543 case ISCSI_HOST_PARAM_INITIATOR_NAME:
22236961 544 len = sprintf(buf, "%s\n", ha->name_string);
aa1e93a2
MC
545 break;
546 default:
547 return -ENOSYS;
548 }
549
550 return len;
551}
552
ed1086e0
VC
553static void qla4xxx_create_ipv4_iface(struct scsi_qla_host *ha)
554{
555 if (ha->iface_ipv4)
556 return;
557
558 /* IPv4 */
559 ha->iface_ipv4 = iscsi_create_iface(ha->host,
560 &qla4xxx_iscsi_transport,
561 ISCSI_IFACE_TYPE_IPV4, 0, 0);
562 if (!ha->iface_ipv4)
563 ql4_printk(KERN_ERR, ha, "Could not create IPv4 iSCSI "
564 "iface0.\n");
565}
566
567static void qla4xxx_create_ipv6_iface(struct scsi_qla_host *ha)
568{
569 if (!ha->iface_ipv6_0)
570 /* IPv6 iface-0 */
571 ha->iface_ipv6_0 = iscsi_create_iface(ha->host,
572 &qla4xxx_iscsi_transport,
573 ISCSI_IFACE_TYPE_IPV6, 0,
574 0);
575 if (!ha->iface_ipv6_0)
576 ql4_printk(KERN_ERR, ha, "Could not create IPv6 iSCSI "
577 "iface0.\n");
578
579 if (!ha->iface_ipv6_1)
580 /* IPv6 iface-1 */
581 ha->iface_ipv6_1 = iscsi_create_iface(ha->host,
582 &qla4xxx_iscsi_transport,
583 ISCSI_IFACE_TYPE_IPV6, 1,
584 0);
585 if (!ha->iface_ipv6_1)
586 ql4_printk(KERN_ERR, ha, "Could not create IPv6 iSCSI "
587 "iface1.\n");
588}
589
590static void qla4xxx_create_ifaces(struct scsi_qla_host *ha)
591{
592 if (ha->ip_config.ipv4_options & IPOPT_IPV4_PROTOCOL_ENABLE)
593 qla4xxx_create_ipv4_iface(ha);
594
595 if (ha->ip_config.ipv6_options & IPV6_OPT_IPV6_PROTOCOL_ENABLE)
596 qla4xxx_create_ipv6_iface(ha);
597}
598
599static void qla4xxx_destroy_ipv4_iface(struct scsi_qla_host *ha)
600{
601 if (ha->iface_ipv4) {
602 iscsi_destroy_iface(ha->iface_ipv4);
603 ha->iface_ipv4 = NULL;
604 }
605}
606
607static void qla4xxx_destroy_ipv6_iface(struct scsi_qla_host *ha)
608{
609 if (ha->iface_ipv6_0) {
610 iscsi_destroy_iface(ha->iface_ipv6_0);
611 ha->iface_ipv6_0 = NULL;
612 }
613 if (ha->iface_ipv6_1) {
614 iscsi_destroy_iface(ha->iface_ipv6_1);
615 ha->iface_ipv6_1 = NULL;
616 }
617}
618
619static void qla4xxx_destroy_ifaces(struct scsi_qla_host *ha)
620{
621 qla4xxx_destroy_ipv4_iface(ha);
622 qla4xxx_destroy_ipv6_iface(ha);
623}
624
d00efe3f
MC
625static void qla4xxx_set_ipv6(struct scsi_qla_host *ha,
626 struct iscsi_iface_param_info *iface_param,
627 struct addr_ctrl_blk *init_fw_cb)
628{
629 /*
630 * iface_num 0 is valid for IPv6 Addr, linklocal, router, autocfg.
631 * iface_num 1 is valid only for IPv6 Addr.
632 */
633 switch (iface_param->param) {
634 case ISCSI_NET_PARAM_IPV6_ADDR:
635 if (iface_param->iface_num & 0x1)
636 /* IPv6 Addr 1 */
637 memcpy(init_fw_cb->ipv6_addr1, iface_param->value,
638 sizeof(init_fw_cb->ipv6_addr1));
639 else
640 /* IPv6 Addr 0 */
641 memcpy(init_fw_cb->ipv6_addr0, iface_param->value,
642 sizeof(init_fw_cb->ipv6_addr0));
643 break;
644 case ISCSI_NET_PARAM_IPV6_LINKLOCAL:
645 if (iface_param->iface_num & 0x1)
646 break;
647 memcpy(init_fw_cb->ipv6_if_id, &iface_param->value[8],
648 sizeof(init_fw_cb->ipv6_if_id));
649 break;
650 case ISCSI_NET_PARAM_IPV6_ROUTER:
651 if (iface_param->iface_num & 0x1)
652 break;
653 memcpy(init_fw_cb->ipv6_dflt_rtr_addr, iface_param->value,
654 sizeof(init_fw_cb->ipv6_dflt_rtr_addr));
655 break;
656 case ISCSI_NET_PARAM_IPV6_ADDR_AUTOCFG:
657 /* Autocfg applies to even interface */
658 if (iface_param->iface_num & 0x1)
659 break;
660
661 if (iface_param->value[0] == ISCSI_IPV6_AUTOCFG_DISABLE)
662 init_fw_cb->ipv6_addtl_opts &=
663 cpu_to_le16(
664 ~IPV6_ADDOPT_NEIGHBOR_DISCOVERY_ADDR_ENABLE);
665 else if (iface_param->value[0] == ISCSI_IPV6_AUTOCFG_ND_ENABLE)
666 init_fw_cb->ipv6_addtl_opts |=
667 cpu_to_le16(
668 IPV6_ADDOPT_NEIGHBOR_DISCOVERY_ADDR_ENABLE);
669 else
670 ql4_printk(KERN_ERR, ha, "Invalid autocfg setting for "
671 "IPv6 addr\n");
672 break;
673 case ISCSI_NET_PARAM_IPV6_LINKLOCAL_AUTOCFG:
674 /* Autocfg applies to even interface */
675 if (iface_param->iface_num & 0x1)
676 break;
677
678 if (iface_param->value[0] ==
679 ISCSI_IPV6_LINKLOCAL_AUTOCFG_ENABLE)
680 init_fw_cb->ipv6_addtl_opts |= cpu_to_le16(
681 IPV6_ADDOPT_AUTOCONFIG_LINK_LOCAL_ADDR);
682 else if (iface_param->value[0] ==
683 ISCSI_IPV6_LINKLOCAL_AUTOCFG_DISABLE)
684 init_fw_cb->ipv6_addtl_opts &= cpu_to_le16(
685 ~IPV6_ADDOPT_AUTOCONFIG_LINK_LOCAL_ADDR);
686 else
687 ql4_printk(KERN_ERR, ha, "Invalid autocfg setting for "
688 "IPv6 linklocal addr\n");
689 break;
690 case ISCSI_NET_PARAM_IPV6_ROUTER_AUTOCFG:
691 /* Autocfg applies to even interface */
692 if (iface_param->iface_num & 0x1)
693 break;
694
695 if (iface_param->value[0] == ISCSI_IPV6_ROUTER_AUTOCFG_ENABLE)
696 memset(init_fw_cb->ipv6_dflt_rtr_addr, 0,
697 sizeof(init_fw_cb->ipv6_dflt_rtr_addr));
698 break;
699 case ISCSI_NET_PARAM_IFACE_ENABLE:
ed1086e0 700 if (iface_param->value[0] == ISCSI_IFACE_ENABLE) {
d00efe3f
MC
701 init_fw_cb->ipv6_opts |=
702 cpu_to_le16(IPV6_OPT_IPV6_PROTOCOL_ENABLE);
ed1086e0
VC
703 qla4xxx_create_ipv6_iface(ha);
704 } else {
d00efe3f
MC
705 init_fw_cb->ipv6_opts &=
706 cpu_to_le16(~IPV6_OPT_IPV6_PROTOCOL_ENABLE &
707 0xFFFF);
ed1086e0
VC
708 qla4xxx_destroy_ipv6_iface(ha);
709 }
d00efe3f
MC
710 break;
711 case ISCSI_NET_PARAM_VLAN_ID:
712 if (iface_param->len != sizeof(init_fw_cb->ipv6_vlan_tag))
713 break;
6ac73e8c
VC
714 init_fw_cb->ipv6_vlan_tag =
715 cpu_to_be16(*(uint16_t *)iface_param->value);
716 break;
717 case ISCSI_NET_PARAM_VLAN_ENABLED:
718 if (iface_param->value[0] == ISCSI_VLAN_ENABLE)
719 init_fw_cb->ipv6_opts |=
720 cpu_to_le16(IPV6_OPT_VLAN_TAGGING_ENABLE);
721 else
722 init_fw_cb->ipv6_opts &=
723 cpu_to_le16(~IPV6_OPT_VLAN_TAGGING_ENABLE);
d00efe3f 724 break;
943c157b
VC
725 case ISCSI_NET_PARAM_MTU:
726 init_fw_cb->eth_mtu_size =
727 cpu_to_le16(*(uint16_t *)iface_param->value);
728 break;
d00efe3f
MC
729 default:
730 ql4_printk(KERN_ERR, ha, "Unknown IPv6 param = %d\n",
731 iface_param->param);
732 break;
733 }
734}
735
736static void qla4xxx_set_ipv4(struct scsi_qla_host *ha,
737 struct iscsi_iface_param_info *iface_param,
738 struct addr_ctrl_blk *init_fw_cb)
739{
740 switch (iface_param->param) {
741 case ISCSI_NET_PARAM_IPV4_ADDR:
742 memcpy(init_fw_cb->ipv4_addr, iface_param->value,
743 sizeof(init_fw_cb->ipv4_addr));
744 break;
745 case ISCSI_NET_PARAM_IPV4_SUBNET:
746 memcpy(init_fw_cb->ipv4_subnet, iface_param->value,
747 sizeof(init_fw_cb->ipv4_subnet));
748 break;
749 case ISCSI_NET_PARAM_IPV4_GW:
750 memcpy(init_fw_cb->ipv4_gw_addr, iface_param->value,
751 sizeof(init_fw_cb->ipv4_gw_addr));
752 break;
753 case ISCSI_NET_PARAM_IPV4_BOOTPROTO:
754 if (iface_param->value[0] == ISCSI_BOOTPROTO_DHCP)
755 init_fw_cb->ipv4_tcp_opts |=
756 cpu_to_le16(TCPOPT_DHCP_ENABLE);
757 else if (iface_param->value[0] == ISCSI_BOOTPROTO_STATIC)
758 init_fw_cb->ipv4_tcp_opts &=
759 cpu_to_le16(~TCPOPT_DHCP_ENABLE);
760 else
761 ql4_printk(KERN_ERR, ha, "Invalid IPv4 bootproto\n");
762 break;
763 case ISCSI_NET_PARAM_IFACE_ENABLE:
ed1086e0 764 if (iface_param->value[0] == ISCSI_IFACE_ENABLE) {
d00efe3f 765 init_fw_cb->ipv4_ip_opts |=
2bab08fc 766 cpu_to_le16(IPOPT_IPV4_PROTOCOL_ENABLE);
ed1086e0
VC
767 qla4xxx_create_ipv4_iface(ha);
768 } else {
d00efe3f 769 init_fw_cb->ipv4_ip_opts &=
2bab08fc 770 cpu_to_le16(~IPOPT_IPV4_PROTOCOL_ENABLE &
d00efe3f 771 0xFFFF);
ed1086e0
VC
772 qla4xxx_destroy_ipv4_iface(ha);
773 }
d00efe3f
MC
774 break;
775 case ISCSI_NET_PARAM_VLAN_ID:
776 if (iface_param->len != sizeof(init_fw_cb->ipv4_vlan_tag))
777 break;
6ac73e8c
VC
778 init_fw_cb->ipv4_vlan_tag =
779 cpu_to_be16(*(uint16_t *)iface_param->value);
780 break;
781 case ISCSI_NET_PARAM_VLAN_ENABLED:
782 if (iface_param->value[0] == ISCSI_VLAN_ENABLE)
783 init_fw_cb->ipv4_ip_opts |=
784 cpu_to_le16(IPOPT_VLAN_TAGGING_ENABLE);
785 else
786 init_fw_cb->ipv4_ip_opts &=
787 cpu_to_le16(~IPOPT_VLAN_TAGGING_ENABLE);
d00efe3f 788 break;
943c157b
VC
789 case ISCSI_NET_PARAM_MTU:
790 init_fw_cb->eth_mtu_size =
791 cpu_to_le16(*(uint16_t *)iface_param->value);
792 break;
d00efe3f
MC
793 default:
794 ql4_printk(KERN_ERR, ha, "Unknown IPv4 param = %d\n",
795 iface_param->param);
796 break;
797 }
798}
799
800static void
801qla4xxx_initcb_to_acb(struct addr_ctrl_blk *init_fw_cb)
802{
803 struct addr_ctrl_blk_def *acb;
804 acb = (struct addr_ctrl_blk_def *)init_fw_cb;
805 memset(acb->reserved1, 0, sizeof(acb->reserved1));
806 memset(acb->reserved2, 0, sizeof(acb->reserved2));
807 memset(acb->reserved3, 0, sizeof(acb->reserved3));
808 memset(acb->reserved4, 0, sizeof(acb->reserved4));
809 memset(acb->reserved5, 0, sizeof(acb->reserved5));
810 memset(acb->reserved6, 0, sizeof(acb->reserved6));
811 memset(acb->reserved7, 0, sizeof(acb->reserved7));
812 memset(acb->reserved8, 0, sizeof(acb->reserved8));
813 memset(acb->reserved9, 0, sizeof(acb->reserved9));
814 memset(acb->reserved10, 0, sizeof(acb->reserved10));
815 memset(acb->reserved11, 0, sizeof(acb->reserved11));
816 memset(acb->reserved12, 0, sizeof(acb->reserved12));
817 memset(acb->reserved13, 0, sizeof(acb->reserved13));
818 memset(acb->reserved14, 0, sizeof(acb->reserved14));
819 memset(acb->reserved15, 0, sizeof(acb->reserved15));
820}
821
822static int
823qla4xxx_iface_set_param(struct Scsi_Host *shost, char *data, int count)
824{
825 struct scsi_qla_host *ha = to_qla_host(shost);
826 int rval = 0;
827 struct iscsi_iface_param_info *iface_param = NULL;
828 struct addr_ctrl_blk *init_fw_cb = NULL;
829 dma_addr_t init_fw_cb_dma;
830 uint32_t mbox_cmd[MBOX_REG_COUNT];
831 uint32_t mbox_sts[MBOX_REG_COUNT];
832 uint32_t total_param_count;
833 uint32_t length;
834
835 init_fw_cb = dma_alloc_coherent(&ha->pdev->dev,
836 sizeof(struct addr_ctrl_blk),
837 &init_fw_cb_dma, GFP_KERNEL);
838 if (!init_fw_cb) {
839 ql4_printk(KERN_ERR, ha, "%s: Unable to alloc init_cb\n",
840 __func__);
841 return -ENOMEM;
842 }
843
844 memset(init_fw_cb, 0, sizeof(struct addr_ctrl_blk));
845 memset(&mbox_cmd, 0, sizeof(mbox_cmd));
846 memset(&mbox_sts, 0, sizeof(mbox_sts));
847
848 if (qla4xxx_get_ifcb(ha, &mbox_cmd[0], &mbox_sts[0], init_fw_cb_dma)) {
849 ql4_printk(KERN_ERR, ha, "%s: get ifcb failed\n", __func__);
850 rval = -EIO;
851 goto exit_init_fw_cb;
852 }
853
854 total_param_count = count;
855 iface_param = (struct iscsi_iface_param_info *)data;
856
857 for ( ; total_param_count != 0; total_param_count--) {
858 length = iface_param->len;
859
860 if (iface_param->param_type != ISCSI_NET_PARAM)
861 continue;
862
863 switch (iface_param->iface_type) {
864 case ISCSI_IFACE_TYPE_IPV4:
865 switch (iface_param->iface_num) {
866 case 0:
867 qla4xxx_set_ipv4(ha, iface_param, init_fw_cb);
868 break;
869 default:
870 /* Cannot have more than one IPv4 interface */
871 ql4_printk(KERN_ERR, ha, "Invalid IPv4 iface "
872 "number = %d\n",
873 iface_param->iface_num);
874 break;
875 }
876 break;
877 case ISCSI_IFACE_TYPE_IPV6:
878 switch (iface_param->iface_num) {
879 case 0:
880 case 1:
881 qla4xxx_set_ipv6(ha, iface_param, init_fw_cb);
882 break;
883 default:
884 /* Cannot have more than two IPv6 interface */
885 ql4_printk(KERN_ERR, ha, "Invalid IPv6 iface "
886 "number = %d\n",
887 iface_param->iface_num);
888 break;
889 }
890 break;
891 default:
892 ql4_printk(KERN_ERR, ha, "Invalid iface type\n");
893 break;
894 }
895
896 iface_param = (struct iscsi_iface_param_info *)
897 ((uint8_t *)iface_param +
898 sizeof(struct iscsi_iface_param_info) + length);
899 }
900
901 init_fw_cb->cookie = cpu_to_le32(0x11BEAD5A);
902
903 rval = qla4xxx_set_flash(ha, init_fw_cb_dma, FLASH_SEGMENT_IFCB,
904 sizeof(struct addr_ctrl_blk),
905 FLASH_OPT_RMW_COMMIT);
906 if (rval != QLA_SUCCESS) {
907 ql4_printk(KERN_ERR, ha, "%s: set flash mbx failed\n",
908 __func__);
909 rval = -EIO;
910 goto exit_init_fw_cb;
911 }
912
913 qla4xxx_disable_acb(ha);
914
915 qla4xxx_initcb_to_acb(init_fw_cb);
916
917 rval = qla4xxx_set_acb(ha, &mbox_cmd[0], &mbox_sts[0], init_fw_cb_dma);
918 if (rval != QLA_SUCCESS) {
919 ql4_printk(KERN_ERR, ha, "%s: set acb mbx failed\n",
920 __func__);
921 rval = -EIO;
922 goto exit_init_fw_cb;
923 }
924
925 memset(init_fw_cb, 0, sizeof(struct addr_ctrl_blk));
926 qla4xxx_update_local_ifcb(ha, &mbox_cmd[0], &mbox_sts[0], init_fw_cb,
927 init_fw_cb_dma);
928
929exit_init_fw_cb:
930 dma_free_coherent(&ha->pdev->dev, sizeof(struct addr_ctrl_blk),
931 init_fw_cb, init_fw_cb_dma);
932
933 return rval;
934}
935
b3a271a9 936static int qla4xxx_conn_get_param(struct iscsi_cls_conn *cls_conn,
afaf5a2d
DS
937 enum iscsi_param param, char *buf)
938{
b3a271a9
MR
939 struct iscsi_conn *conn;
940 struct qla_conn *qla_conn;
941 struct sockaddr *dst_addr;
942 int len = 0;
afaf5a2d 943
b3a271a9
MR
944 conn = cls_conn->dd_data;
945 qla_conn = conn->dd_data;
946 dst_addr = &qla_conn->qla_ep->dst_addr;
afaf5a2d
DS
947
948 switch (param) {
949 case ISCSI_PARAM_CONN_PORT:
afaf5a2d 950 case ISCSI_PARAM_CONN_ADDRESS:
b3a271a9
MR
951 return iscsi_conn_get_addr_param((struct sockaddr_storage *)
952 dst_addr, param, buf);
afaf5a2d 953 default:
b3a271a9 954 return iscsi_conn_get_param(cls_conn, param, buf);
afaf5a2d
DS
955 }
956
957 return len;
b3a271a9 958
afaf5a2d
DS
959}
960
b3a271a9
MR
961static struct iscsi_cls_session *
962qla4xxx_session_create(struct iscsi_endpoint *ep,
963 uint16_t cmds_max, uint16_t qdepth,
964 uint32_t initial_cmdsn)
965{
966 struct iscsi_cls_session *cls_sess;
967 struct scsi_qla_host *ha;
968 struct qla_endpoint *qla_ep;
969 struct ddb_entry *ddb_entry;
970 uint32_t ddb_index;
971 uint32_t mbx_sts = 0;
972 struct iscsi_session *sess;
973 struct sockaddr *dst_addr;
974 int ret;
975
976 DEBUG2(printk(KERN_INFO "Func: %s\n", __func__));
977 if (!ep) {
978 printk(KERN_ERR "qla4xxx: missing ep.\n");
979 return NULL;
980 }
981
982 qla_ep = ep->dd_data;
983 dst_addr = (struct sockaddr *)&qla_ep->dst_addr;
984 ha = to_qla_host(qla_ep->host);
985get_ddb_index:
986 ddb_index = find_first_zero_bit(ha->ddb_idx_map, MAX_DDB_ENTRIES);
987
988 if (ddb_index >= MAX_DDB_ENTRIES) {
989 DEBUG2(ql4_printk(KERN_INFO, ha,
990 "Free DDB index not available\n"));
991 return NULL;
992 }
993
994 if (test_and_set_bit(ddb_index, ha->ddb_idx_map))
995 goto get_ddb_index;
996
997 DEBUG2(ql4_printk(KERN_INFO, ha,
998 "Found a free DDB index at %d\n", ddb_index));
999 ret = qla4xxx_req_ddb_entry(ha, ddb_index, &mbx_sts);
1000 if (ret == QLA_ERROR) {
1001 if (mbx_sts == MBOX_STS_COMMAND_ERROR) {
1002 ql4_printk(KERN_INFO, ha,
1003 "DDB index = %d not available trying next\n",
1004 ddb_index);
1005 goto get_ddb_index;
1006 }
1007 DEBUG2(ql4_printk(KERN_INFO, ha,
1008 "Free FW DDB not available\n"));
1009 return NULL;
1010 }
1011
1012 cls_sess = iscsi_session_setup(&qla4xxx_iscsi_transport, qla_ep->host,
1013 cmds_max, sizeof(struct ddb_entry),
1014 sizeof(struct ql4_task_data),
1015 initial_cmdsn, ddb_index);
1016 if (!cls_sess)
1017 return NULL;
1018
1019 sess = cls_sess->dd_data;
1020 ddb_entry = sess->dd_data;
1021 ddb_entry->fw_ddb_index = ddb_index;
1022 ddb_entry->fw_ddb_device_state = DDB_DS_NO_CONNECTION_ACTIVE;
1023 ddb_entry->ha = ha;
1024 ddb_entry->sess = cls_sess;
1025 cls_sess->recovery_tmo = ql4xsess_recovery_tmo;
1026 ha->fw_ddb_index_map[ddb_entry->fw_ddb_index] = ddb_entry;
1027 ha->tot_ddbs++;
1028
1029 return cls_sess;
1030}
1031
1032static void qla4xxx_session_destroy(struct iscsi_cls_session *cls_sess)
1033{
1034 struct iscsi_session *sess;
1035 struct ddb_entry *ddb_entry;
1036 struct scsi_qla_host *ha;
1037 unsigned long flags;
1038
1039 DEBUG2(printk(KERN_INFO "Func: %s\n", __func__));
1040 sess = cls_sess->dd_data;
1041 ddb_entry = sess->dd_data;
1042 ha = ddb_entry->ha;
1043
1044 spin_lock_irqsave(&ha->hardware_lock, flags);
1045 qla4xxx_free_ddb(ha, ddb_entry);
1046 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1047 iscsi_session_teardown(cls_sess);
1048}
1049
b3a271a9
MR
1050static struct iscsi_cls_conn *
1051qla4xxx_conn_create(struct iscsi_cls_session *cls_sess, uint32_t conn_idx)
1052{
1053 struct iscsi_cls_conn *cls_conn;
1054 struct iscsi_session *sess;
1055 struct ddb_entry *ddb_entry;
1056
1057 DEBUG2(printk(KERN_INFO "Func: %s\n", __func__));
1058 cls_conn = iscsi_conn_setup(cls_sess, sizeof(struct qla_conn),
1059 conn_idx);
1060 sess = cls_sess->dd_data;
1061 ddb_entry = sess->dd_data;
1062 ddb_entry->conn = cls_conn;
1063
1064 return cls_conn;
1065}
1066
1067static int qla4xxx_conn_bind(struct iscsi_cls_session *cls_session,
1068 struct iscsi_cls_conn *cls_conn,
1069 uint64_t transport_fd, int is_leading)
1070{
1071 struct iscsi_conn *conn;
1072 struct qla_conn *qla_conn;
1073 struct iscsi_endpoint *ep;
1074
1075 DEBUG2(printk(KERN_INFO "Func: %s\n", __func__));
1076
1077 if (iscsi_conn_bind(cls_session, cls_conn, is_leading))
1078 return -EINVAL;
1079 ep = iscsi_lookup_endpoint(transport_fd);
1080 conn = cls_conn->dd_data;
1081 qla_conn = conn->dd_data;
1082 qla_conn->qla_ep = ep->dd_data;
1083 return 0;
1084}
1085
1086static int qla4xxx_conn_start(struct iscsi_cls_conn *cls_conn)
1087{
1088 struct iscsi_cls_session *cls_sess = iscsi_conn_to_session(cls_conn);
1089 struct iscsi_session *sess;
1090 struct ddb_entry *ddb_entry;
1091 struct scsi_qla_host *ha;
1092 struct dev_db_entry *fw_ddb_entry;
1093 dma_addr_t fw_ddb_entry_dma;
b3a271a9
MR
1094 uint32_t mbx_sts = 0;
1095 int ret = 0;
1096 int status = QLA_SUCCESS;
1097
1098 DEBUG2(printk(KERN_INFO "Func: %s\n", __func__));
1099 sess = cls_sess->dd_data;
1100 ddb_entry = sess->dd_data;
1101 ha = ddb_entry->ha;
1102
1103 fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
1104 &fw_ddb_entry_dma, GFP_KERNEL);
1105 if (!fw_ddb_entry) {
1106 ql4_printk(KERN_ERR, ha,
1107 "%s: Unable to allocate dma buffer\n", __func__);
1108 return -ENOMEM;
1109 }
1110
1111 ret = qla4xxx_set_param_ddbentry(ha, ddb_entry, cls_conn, &mbx_sts);
1112 if (ret) {
1113 /* If iscsid is stopped and started then no need to do
1114 * set param again since ddb state will be already
1115 * active and FW does not allow set ddb to an
1116 * active session.
1117 */
1118 if (mbx_sts)
1119 if (ddb_entry->fw_ddb_device_state ==
1120 DDB_DS_SESSION_ACTIVE)
1121 goto exit_set_param;
1122
1123 ql4_printk(KERN_ERR, ha, "%s: Failed set param for index[%d]\n",
1124 __func__, ddb_entry->fw_ddb_index);
1125 goto exit_conn_start;
1126 }
1127
1128 status = qla4xxx_conn_open(ha, ddb_entry->fw_ddb_index);
1129 if (status == QLA_ERROR) {
0e7e8501
MR
1130 ql4_printk(KERN_ERR, ha, "%s: Login failed: %s\n", __func__,
1131 sess->targetname);
b3a271a9
MR
1132 ret = -EINVAL;
1133 goto exit_conn_start;
1134 }
1135
1136 ddb_entry->fw_ddb_device_state = DDB_DS_LOGIN_IN_PROCESS;
1137
1138exit_set_param:
1139 iscsi_conn_start(cls_conn);
1140 ret = 0;
1141
1142exit_conn_start:
1143 dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
1144 fw_ddb_entry, fw_ddb_entry_dma);
1145 return ret;
1146}
1147
1148static void qla4xxx_conn_destroy(struct iscsi_cls_conn *cls_conn)
1149{
1150 struct iscsi_cls_session *cls_sess = iscsi_conn_to_session(cls_conn);
1151 struct iscsi_session *sess;
1152 struct scsi_qla_host *ha;
1153 struct ddb_entry *ddb_entry;
1154 int options;
1155
1156 DEBUG2(printk(KERN_INFO "Func: %s\n", __func__));
1157 sess = cls_sess->dd_data;
1158 ddb_entry = sess->dd_data;
1159 ha = ddb_entry->ha;
1160
1161 options = LOGOUT_OPTION_CLOSE_SESSION;
1162 if (qla4xxx_session_logout_ddb(ha, ddb_entry, options) == QLA_ERROR)
1163 ql4_printk(KERN_ERR, ha, "%s: Logout failed\n", __func__);
1164 else
1165 qla4xxx_clear_ddb_entry(ha, ddb_entry->fw_ddb_index);
1166
1167 /*
1168 * Clear the DDB bit so that next login can use the bit
1169 * if FW is not clearing the DDB entry then set DDB will fail anyways
1170 */
1171 clear_bit(ddb_entry->fw_ddb_index, ha->ddb_idx_map);
1172}
1173
1174static void qla4xxx_task_work(struct work_struct *wdata)
1175{
1176 struct ql4_task_data *task_data;
1177 struct scsi_qla_host *ha;
1178 struct passthru_status *sts;
1179 struct iscsi_task *task;
1180 struct iscsi_hdr *hdr;
1181 uint8_t *data;
1182 uint32_t data_len;
1183 struct iscsi_conn *conn;
1184 int hdr_len;
1185 itt_t itt;
1186
1187 task_data = container_of(wdata, struct ql4_task_data, task_work);
1188 ha = task_data->ha;
1189 task = task_data->task;
1190 sts = &task_data->sts;
1191 hdr_len = sizeof(struct iscsi_hdr);
1192
1193 DEBUG3(printk(KERN_INFO "Status returned\n"));
1194 DEBUG3(qla4xxx_dump_buffer(sts, 64));
1195 DEBUG3(printk(KERN_INFO "Response buffer"));
1196 DEBUG3(qla4xxx_dump_buffer(task_data->resp_buffer, 64));
1197
1198 conn = task->conn;
1199
1200 switch (sts->completionStatus) {
1201 case PASSTHRU_STATUS_COMPLETE:
1202 hdr = (struct iscsi_hdr *)task_data->resp_buffer;
1203 /* Assign back the itt in hdr, until we use the PREASSIGN_TAG */
1204 itt = sts->handle;
1205 hdr->itt = itt;
1206 data = task_data->resp_buffer + hdr_len;
1207 data_len = task_data->resp_len - hdr_len;
1208 iscsi_complete_pdu(conn, hdr, data, data_len);
1209 break;
1210 default:
1211 ql4_printk(KERN_ERR, ha, "Passthru failed status = 0x%x\n",
1212 sts->completionStatus);
1213 break;
1214 }
1215 return;
1216}
1217
1218static int qla4xxx_alloc_pdu(struct iscsi_task *task, uint8_t opcode)
1219{
1220 struct ql4_task_data *task_data;
1221 struct iscsi_session *sess;
1222 struct ddb_entry *ddb_entry;
1223 struct scsi_qla_host *ha;
1224 int hdr_len;
1225
1226 sess = task->conn->session;
1227 ddb_entry = sess->dd_data;
1228 ha = ddb_entry->ha;
1229 task_data = task->dd_data;
1230 memset(task_data, 0, sizeof(struct ql4_task_data));
1231
1232 if (task->sc) {
1233 ql4_printk(KERN_INFO, ha,
1234 "%s: SCSI Commands not implemented\n", __func__);
1235 return -EINVAL;
1236 }
1237
1238 hdr_len = sizeof(struct iscsi_hdr);
1239 task_data->ha = ha;
1240 task_data->task = task;
1241
1242 if (task->data_count) {
1243 task_data->data_dma = dma_map_single(&ha->pdev->dev, task->data,
1244 task->data_count,
1245 PCI_DMA_TODEVICE);
1246 }
1247
1248 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: MaxRecvLen %u, iscsi hrd %d\n",
1249 __func__, task->conn->max_recv_dlength, hdr_len));
1250
1251 task_data->resp_len = task->conn->max_recv_dlength;
1252 task_data->resp_buffer = dma_alloc_coherent(&ha->pdev->dev,
1253 task_data->resp_len,
1254 &task_data->resp_dma,
1255 GFP_ATOMIC);
1256 if (!task_data->resp_buffer)
1257 goto exit_alloc_pdu;
1258
1259 task_data->req_buffer = dma_alloc_coherent(&ha->pdev->dev,
1260 task->data_count + hdr_len,
1261 &task_data->req_dma,
1262 GFP_ATOMIC);
1263 if (!task_data->req_buffer)
1264 goto exit_alloc_pdu;
1265
1266 task->hdr = task_data->req_buffer;
1267
1268 INIT_WORK(&task_data->task_work, qla4xxx_task_work);
1269
1270 return 0;
1271
1272exit_alloc_pdu:
1273 if (task_data->resp_buffer)
1274 dma_free_coherent(&ha->pdev->dev, task_data->resp_len,
1275 task_data->resp_buffer, task_data->resp_dma);
1276
1277 if (task_data->req_buffer)
1278 dma_free_coherent(&ha->pdev->dev, task->data_count + hdr_len,
1279 task_data->req_buffer, task_data->req_dma);
1280 return -ENOMEM;
1281}
1282
1283static void qla4xxx_task_cleanup(struct iscsi_task *task)
1284{
1285 struct ql4_task_data *task_data;
1286 struct iscsi_session *sess;
1287 struct ddb_entry *ddb_entry;
1288 struct scsi_qla_host *ha;
1289 int hdr_len;
1290
1291 hdr_len = sizeof(struct iscsi_hdr);
1292 sess = task->conn->session;
1293 ddb_entry = sess->dd_data;
1294 ha = ddb_entry->ha;
1295 task_data = task->dd_data;
1296
1297 if (task->data_count) {
1298 dma_unmap_single(&ha->pdev->dev, task_data->data_dma,
1299 task->data_count, PCI_DMA_TODEVICE);
1300 }
1301
1302 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: MaxRecvLen %u, iscsi hrd %d\n",
1303 __func__, task->conn->max_recv_dlength, hdr_len));
1304
1305 dma_free_coherent(&ha->pdev->dev, task_data->resp_len,
1306 task_data->resp_buffer, task_data->resp_dma);
1307 dma_free_coherent(&ha->pdev->dev, task->data_count + hdr_len,
1308 task_data->req_buffer, task_data->req_dma);
1309 return;
1310}
1311
1312static int qla4xxx_task_xmit(struct iscsi_task *task)
1313{
1314 struct scsi_cmnd *sc = task->sc;
1315 struct iscsi_session *sess = task->conn->session;
1316 struct ddb_entry *ddb_entry = sess->dd_data;
1317 struct scsi_qla_host *ha = ddb_entry->ha;
1318
1319 if (!sc)
1320 return qla4xxx_send_passthru0(task);
1321
1322 ql4_printk(KERN_INFO, ha, "%s: scsi cmd xmit not implemented\n",
1323 __func__);
1324 return -ENOSYS;
1325}
1326
1327void qla4xxx_update_session_conn_param(struct scsi_qla_host *ha,
1328 struct ddb_entry *ddb_entry)
1329{
1330 struct iscsi_cls_session *cls_sess;
1331 struct iscsi_cls_conn *cls_conn;
1332 struct iscsi_session *sess;
1333 struct iscsi_conn *conn;
1334 uint32_t ddb_state;
1335 dma_addr_t fw_ddb_entry_dma;
1336 struct dev_db_entry *fw_ddb_entry;
1337
1338 fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
1339 &fw_ddb_entry_dma, GFP_KERNEL);
1340 if (!fw_ddb_entry) {
1341 ql4_printk(KERN_ERR, ha,
1342 "%s: Unable to allocate dma buffer\n", __func__);
1343 return;
1344 }
1345
1346 if (qla4xxx_get_fwddb_entry(ha, ddb_entry->fw_ddb_index, fw_ddb_entry,
1347 fw_ddb_entry_dma, NULL, NULL, &ddb_state,
1348 NULL, NULL, NULL) == QLA_ERROR) {
1349 DEBUG2(ql4_printk(KERN_ERR, ha, "scsi%ld: %s: failed "
1350 "get_ddb_entry for fw_ddb_index %d\n",
1351 ha->host_no, __func__,
1352 ddb_entry->fw_ddb_index));
1353 return;
1354 }
1355
1356 cls_sess = ddb_entry->sess;
1357 sess = cls_sess->dd_data;
1358
1359 cls_conn = ddb_entry->conn;
1360 conn = cls_conn->dd_data;
1361
1362 /* Update params */
1363 conn->max_recv_dlength = BYTE_UNITS *
1364 le16_to_cpu(fw_ddb_entry->iscsi_max_rcv_data_seg_len);
1365
1366 conn->max_xmit_dlength = BYTE_UNITS *
1367 le16_to_cpu(fw_ddb_entry->iscsi_max_snd_data_seg_len);
1368
1369 sess->initial_r2t_en =
1370 (BIT_10 & le16_to_cpu(fw_ddb_entry->iscsi_options));
1371
1372 sess->max_r2t = le16_to_cpu(fw_ddb_entry->iscsi_max_outsnd_r2t);
1373
1374 sess->imm_data_en = (BIT_11 & le16_to_cpu(fw_ddb_entry->iscsi_options));
1375
1376 sess->first_burst = BYTE_UNITS *
1377 le16_to_cpu(fw_ddb_entry->iscsi_first_burst_len);
1378
1379 sess->max_burst = BYTE_UNITS *
1380 le16_to_cpu(fw_ddb_entry->iscsi_max_burst_len);
1381
1382 sess->time2wait = le16_to_cpu(fw_ddb_entry->iscsi_def_time2wait);
1383
1384 sess->time2retain = le16_to_cpu(fw_ddb_entry->iscsi_def_time2retain);
1385
1386 sess->tpgt = le32_to_cpu(fw_ddb_entry->tgt_portal_grp);
1387
1388 memcpy(sess->initiatorname, ha->name_string,
1389 min(sizeof(ha->name_string), sizeof(sess->initiatorname)));
1390}
1391
afaf5a2d
DS
1392/*
1393 * Timer routines
1394 */
1395
1396static void qla4xxx_start_timer(struct scsi_qla_host *ha, void *func,
1397 unsigned long interval)
1398{
1399 DEBUG(printk("scsi: %s: Starting timer thread for adapter %d\n",
1400 __func__, ha->host->host_no));
1401 init_timer(&ha->timer);
1402 ha->timer.expires = jiffies + interval * HZ;
1403 ha->timer.data = (unsigned long)ha;
1404 ha->timer.function = (void (*)(unsigned long))func;
1405 add_timer(&ha->timer);
1406 ha->timer_active = 1;
1407}
1408
1409static void qla4xxx_stop_timer(struct scsi_qla_host *ha)
1410{
1411 del_timer_sync(&ha->timer);
1412 ha->timer_active = 0;
1413}
1414
1415/***
b3a271a9
MR
1416 * qla4xxx_mark_device_missing - blocks the session
1417 * @cls_session: Pointer to the session to be blocked
afaf5a2d
DS
1418 * @ddb_entry: Pointer to device database entry
1419 *
f4f5df23 1420 * This routine marks a device missing and close connection.
afaf5a2d 1421 **/
b3a271a9 1422void qla4xxx_mark_device_missing(struct iscsi_cls_session *cls_session)
afaf5a2d 1423{
b3a271a9 1424 iscsi_block_session(cls_session);
afaf5a2d
DS
1425}
1426
f4f5df23
VC
1427/**
1428 * qla4xxx_mark_all_devices_missing - mark all devices as missing.
1429 * @ha: Pointer to host adapter structure.
1430 *
1431 * This routine marks a device missing and resets the relogin retry count.
1432 **/
1433void qla4xxx_mark_all_devices_missing(struct scsi_qla_host *ha)
1434{
b3a271a9 1435 iscsi_host_for_each_session(ha->host, qla4xxx_mark_device_missing);
f4f5df23
VC
1436}
1437
afaf5a2d
DS
1438static struct srb* qla4xxx_get_new_srb(struct scsi_qla_host *ha,
1439 struct ddb_entry *ddb_entry,
8f0722ca 1440 struct scsi_cmnd *cmd)
afaf5a2d
DS
1441{
1442 struct srb *srb;
1443
1444 srb = mempool_alloc(ha->srb_mempool, GFP_ATOMIC);
1445 if (!srb)
1446 return srb;
1447
09a0f719 1448 kref_init(&srb->srb_ref);
afaf5a2d
DS
1449 srb->ha = ha;
1450 srb->ddb = ddb_entry;
1451 srb->cmd = cmd;
1452 srb->flags = 0;
5369887a 1453 CMD_SP(cmd) = (void *)srb;
afaf5a2d
DS
1454
1455 return srb;
1456}
1457
1458static void qla4xxx_srb_free_dma(struct scsi_qla_host *ha, struct srb *srb)
1459{
1460 struct scsi_cmnd *cmd = srb->cmd;
1461
1462 if (srb->flags & SRB_DMA_VALID) {
5f7186c8 1463 scsi_dma_unmap(cmd);
afaf5a2d
DS
1464 srb->flags &= ~SRB_DMA_VALID;
1465 }
5369887a 1466 CMD_SP(cmd) = NULL;
afaf5a2d
DS
1467}
1468
09a0f719 1469void qla4xxx_srb_compl(struct kref *ref)
afaf5a2d 1470{
09a0f719 1471 struct srb *srb = container_of(ref, struct srb, srb_ref);
afaf5a2d 1472 struct scsi_cmnd *cmd = srb->cmd;
09a0f719 1473 struct scsi_qla_host *ha = srb->ha;
afaf5a2d
DS
1474
1475 qla4xxx_srb_free_dma(ha, srb);
1476
1477 mempool_free(srb, ha->srb_mempool);
1478
1479 cmd->scsi_done(cmd);
1480}
1481
1482/**
1483 * qla4xxx_queuecommand - scsi layer issues scsi command to driver.
8f0722ca 1484 * @host: scsi host
afaf5a2d 1485 * @cmd: Pointer to Linux's SCSI command structure
afaf5a2d
DS
1486 *
1487 * Remarks:
1488 * This routine is invoked by Linux to send a SCSI command to the driver.
1489 * The mid-level driver tries to ensure that queuecommand never gets
1490 * invoked concurrently with itself or the interrupt handler (although
1491 * the interrupt handler may call this routine as part of request-
1492 * completion handling). Unfortunely, it sometimes calls the scheduler
1493 * in interrupt context which is a big NO! NO!.
1494 **/
8f0722ca 1495static int qla4xxx_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
afaf5a2d 1496{
8f0722ca 1497 struct scsi_qla_host *ha = to_qla_host(host);
afaf5a2d 1498 struct ddb_entry *ddb_entry = cmd->device->hostdata;
7fb1921b 1499 struct iscsi_cls_session *sess = ddb_entry->sess;
afaf5a2d
DS
1500 struct srb *srb;
1501 int rval;
1502
2232be0d
LC
1503 if (test_bit(AF_EEH_BUSY, &ha->flags)) {
1504 if (test_bit(AF_PCI_CHANNEL_IO_PERM_FAILURE, &ha->flags))
1505 cmd->result = DID_NO_CONNECT << 16;
1506 else
1507 cmd->result = DID_REQUEUE << 16;
1508 goto qc_fail_command;
1509 }
1510
7fb1921b
MC
1511 if (!sess) {
1512 cmd->result = DID_IMM_RETRY << 16;
1513 goto qc_fail_command;
1514 }
1515
1516 rval = iscsi_session_chkready(sess);
1517 if (rval) {
1518 cmd->result = rval;
1519 goto qc_fail_command;
1520 }
1521
f4f5df23
VC
1522 if (test_bit(DPC_RESET_HA_INTR, &ha->dpc_flags) ||
1523 test_bit(DPC_RESET_ACTIVE, &ha->dpc_flags) ||
1524 test_bit(DPC_RESET_HA, &ha->dpc_flags) ||
1525 test_bit(DPC_HA_UNRECOVERABLE, &ha->dpc_flags) ||
1526 test_bit(DPC_HA_NEED_QUIESCENT, &ha->dpc_flags) ||
1527 !test_bit(AF_ONLINE, &ha->flags) ||
b3a271a9 1528 !test_bit(AF_LINK_UP, &ha->flags) ||
f4f5df23 1529 test_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags))
477ffb9d
DS
1530 goto qc_host_busy;
1531
8f0722ca 1532 srb = qla4xxx_get_new_srb(ha, ddb_entry, cmd);
afaf5a2d 1533 if (!srb)
8f0722ca 1534 goto qc_host_busy;
afaf5a2d
DS
1535
1536 rval = qla4xxx_send_command_to_isp(ha, srb);
1537 if (rval != QLA_SUCCESS)
1538 goto qc_host_busy_free_sp;
1539
afaf5a2d
DS
1540 return 0;
1541
1542qc_host_busy_free_sp:
1543 qla4xxx_srb_free_dma(ha, srb);
1544 mempool_free(srb, ha->srb_mempool);
1545
afaf5a2d
DS
1546qc_host_busy:
1547 return SCSI_MLQUEUE_HOST_BUSY;
1548
1549qc_fail_command:
8f0722ca 1550 cmd->scsi_done(cmd);
afaf5a2d
DS
1551
1552 return 0;
1553}
1554
1555/**
1556 * qla4xxx_mem_free - frees memory allocated to adapter
1557 * @ha: Pointer to host adapter structure.
1558 *
1559 * Frees memory previously allocated by qla4xxx_mem_alloc
1560 **/
1561static void qla4xxx_mem_free(struct scsi_qla_host *ha)
1562{
1563 if (ha->queues)
1564 dma_free_coherent(&ha->pdev->dev, ha->queues_len, ha->queues,
1565 ha->queues_dma);
1566
1567 ha->queues_len = 0;
1568 ha->queues = NULL;
1569 ha->queues_dma = 0;
1570 ha->request_ring = NULL;
1571 ha->request_dma = 0;
1572 ha->response_ring = NULL;
1573 ha->response_dma = 0;
1574 ha->shadow_regs = NULL;
1575 ha->shadow_regs_dma = 0;
1576
1577 /* Free srb pool. */
1578 if (ha->srb_mempool)
1579 mempool_destroy(ha->srb_mempool);
1580
1581 ha->srb_mempool = NULL;
1582
b3a271a9
MR
1583 if (ha->chap_dma_pool)
1584 dma_pool_destroy(ha->chap_dma_pool);
1585
afaf5a2d 1586 /* release io space registers */
f4f5df23
VC
1587 if (is_qla8022(ha)) {
1588 if (ha->nx_pcibase)
1589 iounmap(
1590 (struct device_reg_82xx __iomem *)ha->nx_pcibase);
f4f5df23 1591 } else if (ha->reg)
afaf5a2d
DS
1592 iounmap(ha->reg);
1593 pci_release_regions(ha->pdev);
1594}
1595
1596/**
1597 * qla4xxx_mem_alloc - allocates memory for use by adapter.
1598 * @ha: Pointer to host adapter structure
1599 *
1600 * Allocates DMA memory for request and response queues. Also allocates memory
1601 * for srbs.
1602 **/
1603static int qla4xxx_mem_alloc(struct scsi_qla_host *ha)
1604{
1605 unsigned long align;
1606
1607 /* Allocate contiguous block of DMA memory for queues. */
1608 ha->queues_len = ((REQUEST_QUEUE_DEPTH * QUEUE_SIZE) +
1609 (RESPONSE_QUEUE_DEPTH * QUEUE_SIZE) +
1610 sizeof(struct shadow_regs) +
1611 MEM_ALIGN_VALUE +
1612 (PAGE_SIZE - 1)) & ~(PAGE_SIZE - 1);
1613 ha->queues = dma_alloc_coherent(&ha->pdev->dev, ha->queues_len,
1614 &ha->queues_dma, GFP_KERNEL);
1615 if (ha->queues == NULL) {
c2660df3
VC
1616 ql4_printk(KERN_WARNING, ha,
1617 "Memory Allocation failed - queues.\n");
afaf5a2d
DS
1618
1619 goto mem_alloc_error_exit;
1620 }
1621 memset(ha->queues, 0, ha->queues_len);
1622
1623 /*
1624 * As per RISC alignment requirements -- the bus-address must be a
1625 * multiple of the request-ring size (in bytes).
1626 */
1627 align = 0;
1628 if ((unsigned long)ha->queues_dma & (MEM_ALIGN_VALUE - 1))
1629 align = MEM_ALIGN_VALUE - ((unsigned long)ha->queues_dma &
1630 (MEM_ALIGN_VALUE - 1));
1631
1632 /* Update request and response queue pointers. */
1633 ha->request_dma = ha->queues_dma + align;
1634 ha->request_ring = (struct queue_entry *) (ha->queues + align);
1635 ha->response_dma = ha->queues_dma + align +
1636 (REQUEST_QUEUE_DEPTH * QUEUE_SIZE);
1637 ha->response_ring = (struct queue_entry *) (ha->queues + align +
1638 (REQUEST_QUEUE_DEPTH *
1639 QUEUE_SIZE));
1640 ha->shadow_regs_dma = ha->queues_dma + align +
1641 (REQUEST_QUEUE_DEPTH * QUEUE_SIZE) +
1642 (RESPONSE_QUEUE_DEPTH * QUEUE_SIZE);
1643 ha->shadow_regs = (struct shadow_regs *) (ha->queues + align +
1644 (REQUEST_QUEUE_DEPTH *
1645 QUEUE_SIZE) +
1646 (RESPONSE_QUEUE_DEPTH *
1647 QUEUE_SIZE));
1648
1649 /* Allocate memory for srb pool. */
1650 ha->srb_mempool = mempool_create(SRB_MIN_REQ, mempool_alloc_slab,
1651 mempool_free_slab, srb_cachep);
1652 if (ha->srb_mempool == NULL) {
c2660df3
VC
1653 ql4_printk(KERN_WARNING, ha,
1654 "Memory Allocation failed - SRB Pool.\n");
afaf5a2d
DS
1655
1656 goto mem_alloc_error_exit;
1657 }
1658
b3a271a9
MR
1659 ha->chap_dma_pool = dma_pool_create("ql4_chap", &ha->pdev->dev,
1660 CHAP_DMA_BLOCK_SIZE, 8, 0);
1661
1662 if (ha->chap_dma_pool == NULL) {
1663 ql4_printk(KERN_WARNING, ha,
1664 "%s: chap_dma_pool allocation failed..\n", __func__);
1665 goto mem_alloc_error_exit;
1666 }
1667
afaf5a2d
DS
1668 return QLA_SUCCESS;
1669
1670mem_alloc_error_exit:
1671 qla4xxx_mem_free(ha);
1672 return QLA_ERROR;
1673}
1674
f4f5df23
VC
1675/**
1676 * qla4_8xxx_check_fw_alive - Check firmware health
1677 * @ha: Pointer to host adapter structure.
1678 *
1679 * Context: Interrupt
1680 **/
1681static void qla4_8xxx_check_fw_alive(struct scsi_qla_host *ha)
1682{
1683 uint32_t fw_heartbeat_counter, halt_status;
1684
1685 fw_heartbeat_counter = qla4_8xxx_rd_32(ha, QLA82XX_PEG_ALIVE_COUNTER);
2232be0d
LC
1686 /* If PEG_ALIVE_COUNTER is 0xffffffff, AER/EEH is in progress, ignore */
1687 if (fw_heartbeat_counter == 0xffffffff) {
1688 DEBUG2(printk(KERN_WARNING "scsi%ld: %s: Device in frozen "
1689 "state, QLA82XX_PEG_ALIVE_COUNTER is 0xffffffff\n",
1690 ha->host_no, __func__));
1691 return;
1692 }
f4f5df23
VC
1693
1694 if (ha->fw_heartbeat_counter == fw_heartbeat_counter) {
1695 ha->seconds_since_last_heartbeat++;
1696 /* FW not alive after 2 seconds */
1697 if (ha->seconds_since_last_heartbeat == 2) {
1698 ha->seconds_since_last_heartbeat = 0;
1699 halt_status = qla4_8xxx_rd_32(ha,
68d92ebf
VC
1700 QLA82XX_PEG_HALT_STATUS1);
1701
1702 ql4_printk(KERN_INFO, ha,
1703 "scsi(%ld): %s, Dumping hw/fw registers:\n "
1704 " PEG_HALT_STATUS1: 0x%x, PEG_HALT_STATUS2:"
1705 " 0x%x,\n PEG_NET_0_PC: 0x%x, PEG_NET_1_PC:"
1706 " 0x%x,\n PEG_NET_2_PC: 0x%x, PEG_NET_3_PC:"
1707 " 0x%x,\n PEG_NET_4_PC: 0x%x\n",
1708 ha->host_no, __func__, halt_status,
1709 qla4_8xxx_rd_32(ha,
1710 QLA82XX_PEG_HALT_STATUS2),
1711 qla4_8xxx_rd_32(ha, QLA82XX_CRB_PEG_NET_0 +
1712 0x3c),
1713 qla4_8xxx_rd_32(ha, QLA82XX_CRB_PEG_NET_1 +
1714 0x3c),
1715 qla4_8xxx_rd_32(ha, QLA82XX_CRB_PEG_NET_2 +
1716 0x3c),
1717 qla4_8xxx_rd_32(ha, QLA82XX_CRB_PEG_NET_3 +
1718 0x3c),
1719 qla4_8xxx_rd_32(ha, QLA82XX_CRB_PEG_NET_4 +
1720 0x3c));
21033639 1721
f4f5df23
VC
1722 /* Since we cannot change dev_state in interrupt
1723 * context, set appropriate DPC flag then wakeup
1724 * DPC */
1725 if (halt_status & HALT_STATUS_UNRECOVERABLE)
1726 set_bit(DPC_HA_UNRECOVERABLE, &ha->dpc_flags);
1727 else {
1728 printk("scsi%ld: %s: detect abort needed!\n",
1729 ha->host_no, __func__);
1730 set_bit(DPC_RESET_HA, &ha->dpc_flags);
1731 }
1732 qla4xxx_wake_dpc(ha);
21033639 1733 qla4xxx_mailbox_premature_completion(ha);
f4f5df23 1734 }
99457d75
LC
1735 } else
1736 ha->seconds_since_last_heartbeat = 0;
1737
f4f5df23
VC
1738 ha->fw_heartbeat_counter = fw_heartbeat_counter;
1739}
1740
1741/**
1742 * qla4_8xxx_watchdog - Poll dev state
1743 * @ha: Pointer to host adapter structure.
1744 *
1745 * Context: Interrupt
1746 **/
1747void qla4_8xxx_watchdog(struct scsi_qla_host *ha)
1748{
1749 uint32_t dev_state;
1750
1751 dev_state = qla4_8xxx_rd_32(ha, QLA82XX_CRB_DEV_STATE);
1752
1753 /* don't poll if reset is going on */
d56a1f7b
LC
1754 if (!(test_bit(DPC_RESET_ACTIVE, &ha->dpc_flags) ||
1755 test_bit(DPC_RESET_HA, &ha->dpc_flags) ||
977f46a4 1756 test_bit(DPC_RETRY_RESET_HA, &ha->dpc_flags))) {
f4f5df23
VC
1757 if (dev_state == QLA82XX_DEV_NEED_RESET &&
1758 !test_bit(DPC_RESET_HA, &ha->dpc_flags)) {
3930b8c1
VC
1759 if (!ql4xdontresethba) {
1760 ql4_printk(KERN_INFO, ha, "%s: HW State: "
1761 "NEED RESET!\n", __func__);
1762 set_bit(DPC_RESET_HA, &ha->dpc_flags);
1763 qla4xxx_wake_dpc(ha);
1764 qla4xxx_mailbox_premature_completion(ha);
1765 }
f4f5df23
VC
1766 } else if (dev_state == QLA82XX_DEV_NEED_QUIESCENT &&
1767 !test_bit(DPC_HA_NEED_QUIESCENT, &ha->dpc_flags)) {
3930b8c1
VC
1768 ql4_printk(KERN_INFO, ha, "%s: HW State: NEED QUIES!\n",
1769 __func__);
f4f5df23
VC
1770 set_bit(DPC_HA_NEED_QUIESCENT, &ha->dpc_flags);
1771 qla4xxx_wake_dpc(ha);
1772 } else {
1773 /* Check firmware health */
1774 qla4_8xxx_check_fw_alive(ha);
1775 }
1776 }
1777}
1778
afaf5a2d
DS
1779/**
1780 * qla4xxx_timer - checks every second for work to do.
1781 * @ha: Pointer to host adapter structure.
1782 **/
1783static void qla4xxx_timer(struct scsi_qla_host *ha)
1784{
afaf5a2d 1785 int start_dpc = 0;
2232be0d
LC
1786 uint16_t w;
1787
1788 /* If we are in the middle of AER/EEH processing
1789 * skip any processing and reschedule the timer
1790 */
1791 if (test_bit(AF_EEH_BUSY, &ha->flags)) {
1792 mod_timer(&ha->timer, jiffies + HZ);
1793 return;
1794 }
1795
1796 /* Hardware read to trigger an EEH error during mailbox waits. */
1797 if (!pci_channel_offline(ha->pdev))
1798 pci_read_config_word(ha->pdev, PCI_VENDOR_ID, &w);
afaf5a2d 1799
f4f5df23
VC
1800 if (is_qla8022(ha)) {
1801 qla4_8xxx_watchdog(ha);
1802 }
1803
f4f5df23
VC
1804 if (!is_qla8022(ha)) {
1805 /* Check for heartbeat interval. */
1806 if (ha->firmware_options & FWOPT_HEARTBEAT_ENABLE &&
1807 ha->heartbeat_interval != 0) {
1808 ha->seconds_since_last_heartbeat++;
1809 if (ha->seconds_since_last_heartbeat >
1810 ha->heartbeat_interval + 2)
1811 set_bit(DPC_RESET_HA, &ha->dpc_flags);
1812 }
afaf5a2d
DS
1813 }
1814
afaf5a2d 1815 /* Wakeup the dpc routine for this adapter, if needed. */
1b46807e 1816 if (start_dpc ||
afaf5a2d
DS
1817 test_bit(DPC_RESET_HA, &ha->dpc_flags) ||
1818 test_bit(DPC_RETRY_RESET_HA, &ha->dpc_flags) ||
1819 test_bit(DPC_RELOGIN_DEVICE, &ha->dpc_flags) ||
f4f5df23 1820 test_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags) ||
afaf5a2d
DS
1821 test_bit(DPC_RESET_HA_INTR, &ha->dpc_flags) ||
1822 test_bit(DPC_GET_DHCP_IP_ADDR, &ha->dpc_flags) ||
065aa1b4 1823 test_bit(DPC_LINK_CHANGED, &ha->dpc_flags) ||
f4f5df23
VC
1824 test_bit(DPC_HA_UNRECOVERABLE, &ha->dpc_flags) ||
1825 test_bit(DPC_HA_NEED_QUIESCENT, &ha->dpc_flags) ||
1b46807e 1826 test_bit(DPC_AEN, &ha->dpc_flags)) {
afaf5a2d
DS
1827 DEBUG2(printk("scsi%ld: %s: scheduling dpc routine"
1828 " - dpc flags = 0x%lx\n",
1829 ha->host_no, __func__, ha->dpc_flags));
f4f5df23 1830 qla4xxx_wake_dpc(ha);
afaf5a2d
DS
1831 }
1832
1833 /* Reschedule timer thread to call us back in one second */
1834 mod_timer(&ha->timer, jiffies + HZ);
1835
1836 DEBUG2(ha->seconds_since_last_intr++);
1837}
1838
1839/**
1840 * qla4xxx_cmd_wait - waits for all outstanding commands to complete
1841 * @ha: Pointer to host adapter structure.
1842 *
1843 * This routine stalls the driver until all outstanding commands are returned.
1844 * Caller must release the Hardware Lock prior to calling this routine.
1845 **/
1846static int qla4xxx_cmd_wait(struct scsi_qla_host *ha)
1847{
1848 uint32_t index = 0;
afaf5a2d
DS
1849 unsigned long flags;
1850 struct scsi_cmnd *cmd;
afaf5a2d 1851
f4f5df23
VC
1852 unsigned long wtime = jiffies + (WAIT_CMD_TOV * HZ);
1853
1854 DEBUG2(ql4_printk(KERN_INFO, ha, "Wait up to %d seconds for cmds to "
1855 "complete\n", WAIT_CMD_TOV));
1856
1857 while (!time_after_eq(jiffies, wtime)) {
afaf5a2d
DS
1858 spin_lock_irqsave(&ha->hardware_lock, flags);
1859 /* Find a command that hasn't completed. */
1860 for (index = 0; index < ha->host->can_queue; index++) {
1861 cmd = scsi_host_find_tag(ha->host, index);
a1e0063d
MC
1862 /*
1863 * We cannot just check if the index is valid,
1864 * becase if we are run from the scsi eh, then
1865 * the scsi/block layer is going to prevent
1866 * the tag from being released.
1867 */
1868 if (cmd != NULL && CMD_SP(cmd))
afaf5a2d
DS
1869 break;
1870 }
1871 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1872
1873 /* If No Commands are pending, wait is complete */
f4f5df23
VC
1874 if (index == ha->host->can_queue)
1875 return QLA_SUCCESS;
afaf5a2d 1876
f4f5df23
VC
1877 msleep(1000);
1878 }
1879 /* If we timed out on waiting for commands to come back
1880 * return ERROR. */
1881 return QLA_ERROR;
afaf5a2d
DS
1882}
1883
f4f5df23 1884int qla4xxx_hw_reset(struct scsi_qla_host *ha)
afaf5a2d 1885{
afaf5a2d 1886 uint32_t ctrl_status;
477ffb9d
DS
1887 unsigned long flags = 0;
1888
1889 DEBUG2(printk(KERN_ERR "scsi%ld: %s\n", ha->host_no, __func__));
afaf5a2d 1890
f4f5df23
VC
1891 if (ql4xxx_lock_drvr_wait(ha) != QLA_SUCCESS)
1892 return QLA_ERROR;
1893
afaf5a2d
DS
1894 spin_lock_irqsave(&ha->hardware_lock, flags);
1895
1896 /*
1897 * If the SCSI Reset Interrupt bit is set, clear it.
1898 * Otherwise, the Soft Reset won't work.
1899 */
1900 ctrl_status = readw(&ha->reg->ctrl_status);
1901 if ((ctrl_status & CSR_SCSI_RESET_INTR) != 0)
1902 writel(set_rmask(CSR_SCSI_RESET_INTR), &ha->reg->ctrl_status);
1903
1904 /* Issue Soft Reset */
1905 writel(set_rmask(CSR_SOFT_RESET), &ha->reg->ctrl_status);
1906 readl(&ha->reg->ctrl_status);
1907
1908 spin_unlock_irqrestore(&ha->hardware_lock, flags);
f4f5df23 1909 return QLA_SUCCESS;
477ffb9d
DS
1910}
1911
1912/**
1913 * qla4xxx_soft_reset - performs soft reset.
1914 * @ha: Pointer to host adapter structure.
1915 **/
1916int qla4xxx_soft_reset(struct scsi_qla_host *ha)
1917{
1918 uint32_t max_wait_time;
1919 unsigned long flags = 0;
f931c534 1920 int status;
477ffb9d
DS
1921 uint32_t ctrl_status;
1922
f931c534
VC
1923 status = qla4xxx_hw_reset(ha);
1924 if (status != QLA_SUCCESS)
1925 return status;
afaf5a2d 1926
f931c534 1927 status = QLA_ERROR;
afaf5a2d
DS
1928 /* Wait until the Network Reset Intr bit is cleared */
1929 max_wait_time = RESET_INTR_TOV;
1930 do {
1931 spin_lock_irqsave(&ha->hardware_lock, flags);
1932 ctrl_status = readw(&ha->reg->ctrl_status);
1933 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1934
1935 if ((ctrl_status & CSR_NET_RESET_INTR) == 0)
1936 break;
1937
1938 msleep(1000);
1939 } while ((--max_wait_time));
1940
1941 if ((ctrl_status & CSR_NET_RESET_INTR) != 0) {
1942 DEBUG2(printk(KERN_WARNING
1943 "scsi%ld: Network Reset Intr not cleared by "
1944 "Network function, clearing it now!\n",
1945 ha->host_no));
1946 spin_lock_irqsave(&ha->hardware_lock, flags);
1947 writel(set_rmask(CSR_NET_RESET_INTR), &ha->reg->ctrl_status);
1948 readl(&ha->reg->ctrl_status);
1949 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1950 }
1951
1952 /* Wait until the firmware tells us the Soft Reset is done */
1953 max_wait_time = SOFT_RESET_TOV;
1954 do {
1955 spin_lock_irqsave(&ha->hardware_lock, flags);
1956 ctrl_status = readw(&ha->reg->ctrl_status);
1957 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1958
1959 if ((ctrl_status & CSR_SOFT_RESET) == 0) {
1960 status = QLA_SUCCESS;
1961 break;
1962 }
1963
1964 msleep(1000);
1965 } while ((--max_wait_time));
1966
1967 /*
1968 * Also, make sure that the SCSI Reset Interrupt bit has been cleared
1969 * after the soft reset has taken place.
1970 */
1971 spin_lock_irqsave(&ha->hardware_lock, flags);
1972 ctrl_status = readw(&ha->reg->ctrl_status);
1973 if ((ctrl_status & CSR_SCSI_RESET_INTR) != 0) {
1974 writel(set_rmask(CSR_SCSI_RESET_INTR), &ha->reg->ctrl_status);
1975 readl(&ha->reg->ctrl_status);
1976 }
1977 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1978
1979 /* If soft reset fails then most probably the bios on other
1980 * function is also enabled.
1981 * Since the initialization is sequential the other fn
1982 * wont be able to acknowledge the soft reset.
1983 * Issue a force soft reset to workaround this scenario.
1984 */
1985 if (max_wait_time == 0) {
1986 /* Issue Force Soft Reset */
1987 spin_lock_irqsave(&ha->hardware_lock, flags);
1988 writel(set_rmask(CSR_FORCE_SOFT_RESET), &ha->reg->ctrl_status);
1989 readl(&ha->reg->ctrl_status);
1990 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1991 /* Wait until the firmware tells us the Soft Reset is done */
1992 max_wait_time = SOFT_RESET_TOV;
1993 do {
1994 spin_lock_irqsave(&ha->hardware_lock, flags);
1995 ctrl_status = readw(&ha->reg->ctrl_status);
1996 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1997
1998 if ((ctrl_status & CSR_FORCE_SOFT_RESET) == 0) {
1999 status = QLA_SUCCESS;
2000 break;
2001 }
2002
2003 msleep(1000);
2004 } while ((--max_wait_time));
2005 }
2006
2007 return status;
2008}
2009
afaf5a2d 2010/**
f4f5df23 2011 * qla4xxx_abort_active_cmds - returns all outstanding i/o requests to O.S.
afaf5a2d 2012 * @ha: Pointer to host adapter structure.
f4f5df23 2013 * @res: returned scsi status
afaf5a2d
DS
2014 *
2015 * This routine is called just prior to a HARD RESET to return all
2016 * outstanding commands back to the Operating System.
2017 * Caller should make sure that the following locks are released
2018 * before this calling routine: Hardware lock, and io_request_lock.
2019 **/
f4f5df23 2020static void qla4xxx_abort_active_cmds(struct scsi_qla_host *ha, int res)
afaf5a2d
DS
2021{
2022 struct srb *srb;
2023 int i;
2024 unsigned long flags;
2025
2026 spin_lock_irqsave(&ha->hardware_lock, flags);
2027 for (i = 0; i < ha->host->can_queue; i++) {
2028 srb = qla4xxx_del_from_active_array(ha, i);
2029 if (srb != NULL) {
f4f5df23 2030 srb->cmd->result = res;
09a0f719 2031 kref_put(&srb->srb_ref, qla4xxx_srb_compl);
afaf5a2d
DS
2032 }
2033 }
2034 spin_unlock_irqrestore(&ha->hardware_lock, flags);
afaf5a2d
DS
2035}
2036
f4f5df23
VC
2037void qla4xxx_dead_adapter_cleanup(struct scsi_qla_host *ha)
2038{
2039 clear_bit(AF_ONLINE, &ha->flags);
2040
2041 /* Disable the board */
2042 ql4_printk(KERN_INFO, ha, "Disabling the board\n");
f4f5df23
VC
2043
2044 qla4xxx_abort_active_cmds(ha, DID_NO_CONNECT << 16);
2045 qla4xxx_mark_all_devices_missing(ha);
2046 clear_bit(AF_INIT_DONE, &ha->flags);
2047}
2048
b3a271a9
MR
2049static void qla4xxx_fail_session(struct iscsi_cls_session *cls_session)
2050{
2051 struct iscsi_session *sess;
2052 struct ddb_entry *ddb_entry;
2053
2054 sess = cls_session->dd_data;
2055 ddb_entry = sess->dd_data;
2056 ddb_entry->fw_ddb_device_state = DDB_DS_SESSION_FAILED;
2057 iscsi_session_failure(cls_session->dd_data, ISCSI_ERR_CONN_FAILED);
2058}
2059
afaf5a2d
DS
2060/**
2061 * qla4xxx_recover_adapter - recovers adapter after a fatal error
2062 * @ha: Pointer to host adapter structure.
afaf5a2d 2063 **/
f4f5df23 2064static int qla4xxx_recover_adapter(struct scsi_qla_host *ha)
afaf5a2d 2065{
f4f5df23
VC
2066 int status = QLA_ERROR;
2067 uint8_t reset_chip = 0;
afaf5a2d
DS
2068
2069 /* Stall incoming I/O until we are done */
f4f5df23 2070 scsi_block_requests(ha->host);
afaf5a2d 2071 clear_bit(AF_ONLINE, &ha->flags);
b3a271a9 2072 clear_bit(AF_LINK_UP, &ha->flags);
50a29aec 2073
f4f5df23 2074 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: adapter OFFLINE\n", __func__));
afaf5a2d 2075
f4f5df23 2076 set_bit(DPC_RESET_ACTIVE, &ha->dpc_flags);
afaf5a2d 2077
b3a271a9
MR
2078 iscsi_host_for_each_session(ha->host, qla4xxx_fail_session);
2079
f4f5df23
VC
2080 if (test_bit(DPC_RESET_HA, &ha->dpc_flags))
2081 reset_chip = 1;
afaf5a2d 2082
f4f5df23
VC
2083 /* For the DPC_RESET_HA_INTR case (ISP-4xxx specific)
2084 * do not reset adapter, jump to initialize_adapter */
2085 if (test_bit(DPC_RESET_HA_INTR, &ha->dpc_flags)) {
2086 status = QLA_SUCCESS;
2087 goto recover_ha_init_adapter;
2088 }
afaf5a2d 2089
f4f5df23
VC
2090 /* For the ISP-82xx adapter, issue a stop_firmware if invoked
2091 * from eh_host_reset or ioctl module */
2092 if (is_qla8022(ha) && !reset_chip &&
2093 test_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags)) {
2094
2095 DEBUG2(ql4_printk(KERN_INFO, ha,
2096 "scsi%ld: %s - Performing stop_firmware...\n",
2097 ha->host_no, __func__));
2098 status = ha->isp_ops->reset_firmware(ha);
2099 if (status == QLA_SUCCESS) {
2bd1e2be
NJ
2100 if (!test_bit(AF_FW_RECOVERY, &ha->flags))
2101 qla4xxx_cmd_wait(ha);
f4f5df23
VC
2102 ha->isp_ops->disable_intrs(ha);
2103 qla4xxx_process_aen(ha, FLUSH_DDB_CHANGED_AENS);
2104 qla4xxx_abort_active_cmds(ha, DID_RESET << 16);
2105 } else {
2106 /* If the stop_firmware fails then
2107 * reset the entire chip */
2108 reset_chip = 1;
2109 clear_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags);
2110 set_bit(DPC_RESET_HA, &ha->dpc_flags);
2111 }
2112 }
dca05c4c 2113
f4f5df23
VC
2114 /* Issue full chip reset if recovering from a catastrophic error,
2115 * or if stop_firmware fails for ISP-82xx.
2116 * This is the default case for ISP-4xxx */
2117 if (!is_qla8022(ha) || reset_chip) {
2bd1e2be
NJ
2118 if (!test_bit(AF_FW_RECOVERY, &ha->flags))
2119 qla4xxx_cmd_wait(ha);
f4f5df23
VC
2120 qla4xxx_process_aen(ha, FLUSH_DDB_CHANGED_AENS);
2121 qla4xxx_abort_active_cmds(ha, DID_RESET << 16);
2122 DEBUG2(ql4_printk(KERN_INFO, ha,
2123 "scsi%ld: %s - Performing chip reset..\n",
2124 ha->host_no, __func__));
2125 status = ha->isp_ops->reset_chip(ha);
2126 }
afaf5a2d
DS
2127
2128 /* Flush any pending ddb changed AENs */
2129 qla4xxx_process_aen(ha, FLUSH_DDB_CHANGED_AENS);
2130
f4f5df23
VC
2131recover_ha_init_adapter:
2132 /* Upon successful firmware/chip reset, re-initialize the adapter */
afaf5a2d 2133 if (status == QLA_SUCCESS) {
f4f5df23
VC
2134 /* For ISP-4xxx, force function 1 to always initialize
2135 * before function 3 to prevent both funcions from
2136 * stepping on top of the other */
2137 if (!is_qla8022(ha) && (ha->mac_index == 3))
2138 ssleep(6);
2139
2140 /* NOTE: AF_ONLINE flag set upon successful completion of
2141 * qla4xxx_initialize_adapter */
0e7e8501 2142 status = qla4xxx_initialize_adapter(ha);
afaf5a2d
DS
2143 }
2144
f4f5df23
VC
2145 /* Retry failed adapter initialization, if necessary
2146 * Do not retry initialize_adapter for RESET_HA_INTR (ISP-4xxx specific)
2147 * case to prevent ping-pong resets between functions */
2148 if (!test_bit(AF_ONLINE, &ha->flags) &&
2149 !test_bit(DPC_RESET_HA_INTR, &ha->dpc_flags)) {
afaf5a2d 2150 /* Adapter initialization failed, see if we can retry
f4f5df23
VC
2151 * resetting the ha.
2152 * Since we don't want to block the DPC for too long
2153 * with multiple resets in the same thread,
2154 * utilize DPC to retry */
afaf5a2d
DS
2155 if (!test_bit(DPC_RETRY_RESET_HA, &ha->dpc_flags)) {
2156 ha->retry_reset_ha_cnt = MAX_RESET_HA_RETRIES;
2157 DEBUG2(printk("scsi%ld: recover adapter - retrying "
2158 "(%d) more times\n", ha->host_no,
2159 ha->retry_reset_ha_cnt));
2160 set_bit(DPC_RETRY_RESET_HA, &ha->dpc_flags);
2161 status = QLA_ERROR;
2162 } else {
2163 if (ha->retry_reset_ha_cnt > 0) {
2164 /* Schedule another Reset HA--DPC will retry */
2165 ha->retry_reset_ha_cnt--;
2166 DEBUG2(printk("scsi%ld: recover adapter - "
2167 "retry remaining %d\n",
2168 ha->host_no,
2169 ha->retry_reset_ha_cnt));
2170 status = QLA_ERROR;
2171 }
2172
2173 if (ha->retry_reset_ha_cnt == 0) {
2174 /* Recover adapter retries have been exhausted.
2175 * Adapter DEAD */
2176 DEBUG2(printk("scsi%ld: recover adapter "
2177 "failed - board disabled\n",
2178 ha->host_no));
f4f5df23 2179 qla4xxx_dead_adapter_cleanup(ha);
afaf5a2d
DS
2180 clear_bit(DPC_RETRY_RESET_HA, &ha->dpc_flags);
2181 clear_bit(DPC_RESET_HA, &ha->dpc_flags);
f4f5df23 2182 clear_bit(DPC_RESET_HA_FW_CONTEXT,
afaf5a2d
DS
2183 &ha->dpc_flags);
2184 status = QLA_ERROR;
2185 }
2186 }
2187 } else {
2188 clear_bit(DPC_RESET_HA, &ha->dpc_flags);
f4f5df23 2189 clear_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags);
afaf5a2d
DS
2190 clear_bit(DPC_RETRY_RESET_HA, &ha->dpc_flags);
2191 }
2192
2193 ha->adapter_error_count++;
2194
f4f5df23
VC
2195 if (test_bit(AF_ONLINE, &ha->flags))
2196 ha->isp_ops->enable_intrs(ha);
2197
2198 scsi_unblock_requests(ha->host);
2199
2200 clear_bit(DPC_RESET_ACTIVE, &ha->dpc_flags);
2201 DEBUG2(printk("scsi%ld: recover adapter: %s\n", ha->host_no,
25985edc 2202 status == QLA_ERROR ? "FAILED" : "SUCCEEDED"));
afaf5a2d 2203
afaf5a2d
DS
2204 return status;
2205}
2206
b3a271a9 2207static void qla4xxx_relogin_devices(struct iscsi_cls_session *cls_session)
2d7924e6 2208{
b3a271a9
MR
2209 struct iscsi_session *sess;
2210 struct ddb_entry *ddb_entry;
2211 struct scsi_qla_host *ha;
2d7924e6 2212
b3a271a9
MR
2213 sess = cls_session->dd_data;
2214 ddb_entry = sess->dd_data;
2215 ha = ddb_entry->ha;
2216 if (!iscsi_is_session_online(cls_session)) {
2217 if (ddb_entry->fw_ddb_device_state == DDB_DS_SESSION_ACTIVE) {
2218 ql4_printk(KERN_INFO, ha, "scsi%ld: %s: ddb[%d]"
2219 " unblock session\n", ha->host_no, __func__,
2220 ddb_entry->fw_ddb_index);
2221 iscsi_unblock_session(ddb_entry->sess);
2222 } else {
2223 /* Trigger relogin */
2224 iscsi_session_failure(cls_session->dd_data,
2225 ISCSI_ERR_CONN_FAILED);
2d7924e6
VC
2226 }
2227 }
2228}
2229
b3a271a9
MR
2230static void qla4xxx_relogin_all_devices(struct scsi_qla_host *ha)
2231{
2232 iscsi_host_for_each_session(ha->host, qla4xxx_relogin_devices);
2233}
2234
f4f5df23
VC
2235void qla4xxx_wake_dpc(struct scsi_qla_host *ha)
2236{
1b46807e 2237 if (ha->dpc_thread)
f4f5df23 2238 queue_work(ha->dpc_thread, &ha->dpc_work);
f4f5df23
VC
2239}
2240
afaf5a2d
DS
2241/**
2242 * qla4xxx_do_dpc - dpc routine
2243 * @data: in our case pointer to adapter structure
2244 *
2245 * This routine is a task that is schedule by the interrupt handler
2246 * to perform the background processing for interrupts. We put it
2247 * on a task queue that is consumed whenever the scheduler runs; that's
2248 * so you can do anything (i.e. put the process to sleep etc). In fact,
2249 * the mid-level tries to sleep when it reaches the driver threshold
2250 * "host->can_queue". This can cause a panic if we were in our interrupt code.
2251 **/
c4028958 2252static void qla4xxx_do_dpc(struct work_struct *work)
afaf5a2d 2253{
c4028958
DH
2254 struct scsi_qla_host *ha =
2255 container_of(work, struct scsi_qla_host, dpc_work);
477ffb9d 2256 int status = QLA_ERROR;
afaf5a2d 2257
f26b9044 2258 DEBUG2(printk("scsi%ld: %s: DPC handler waking up."
f4f5df23
VC
2259 "flags = 0x%08lx, dpc_flags = 0x%08lx\n",
2260 ha->host_no, __func__, ha->flags, ha->dpc_flags))
afaf5a2d
DS
2261
2262 /* Initialization not yet finished. Don't do anything yet. */
2263 if (!test_bit(AF_INIT_DONE, &ha->flags))
1b46807e 2264 return;
afaf5a2d 2265
2232be0d
LC
2266 if (test_bit(AF_EEH_BUSY, &ha->flags)) {
2267 DEBUG2(printk(KERN_INFO "scsi%ld: %s: flags = %lx\n",
2268 ha->host_no, __func__, ha->flags));
1b46807e 2269 return;
2232be0d
LC
2270 }
2271
f4f5df23
VC
2272 if (is_qla8022(ha)) {
2273 if (test_bit(DPC_HA_UNRECOVERABLE, &ha->dpc_flags)) {
2274 qla4_8xxx_idc_lock(ha);
2275 qla4_8xxx_wr_32(ha, QLA82XX_CRB_DEV_STATE,
2276 QLA82XX_DEV_FAILED);
2277 qla4_8xxx_idc_unlock(ha);
2278 ql4_printk(KERN_INFO, ha, "HW State: FAILED\n");
2279 qla4_8xxx_device_state_handler(ha);
2280 }
2281 if (test_and_clear_bit(DPC_HA_NEED_QUIESCENT, &ha->dpc_flags)) {
2282 qla4_8xxx_need_qsnt_handler(ha);
2283 }
2284 }
2285
2286 if (!test_bit(DPC_RESET_ACTIVE, &ha->dpc_flags) &&
2287 (test_bit(DPC_RESET_HA, &ha->dpc_flags) ||
afaf5a2d 2288 test_bit(DPC_RESET_HA_INTR, &ha->dpc_flags) ||
f4f5df23
VC
2289 test_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags))) {
2290 if (ql4xdontresethba) {
2291 DEBUG2(printk("scsi%ld: %s: Don't Reset HBA\n",
2292 ha->host_no, __func__));
2293 clear_bit(DPC_RESET_HA, &ha->dpc_flags);
2294 clear_bit(DPC_RESET_HA_INTR, &ha->dpc_flags);
2295 clear_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags);
2296 goto dpc_post_reset_ha;
2297 }
2298 if (test_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags) ||
2299 test_bit(DPC_RESET_HA, &ha->dpc_flags))
2300 qla4xxx_recover_adapter(ha);
afaf5a2d 2301
477ffb9d 2302 if (test_bit(DPC_RESET_HA_INTR, &ha->dpc_flags)) {
afaf5a2d 2303 uint8_t wait_time = RESET_INTR_TOV;
afaf5a2d 2304
afaf5a2d
DS
2305 while ((readw(&ha->reg->ctrl_status) &
2306 (CSR_SOFT_RESET | CSR_FORCE_SOFT_RESET)) != 0) {
2307 if (--wait_time == 0)
2308 break;
afaf5a2d 2309 msleep(1000);
afaf5a2d 2310 }
afaf5a2d
DS
2311 if (wait_time == 0)
2312 DEBUG2(printk("scsi%ld: %s: SR|FSR "
2313 "bit not cleared-- resetting\n",
2314 ha->host_no, __func__));
f4f5df23 2315 qla4xxx_abort_active_cmds(ha, DID_RESET << 16);
477ffb9d
DS
2316 if (ql4xxx_lock_drvr_wait(ha) == QLA_SUCCESS) {
2317 qla4xxx_process_aen(ha, FLUSH_DDB_CHANGED_AENS);
f4f5df23 2318 status = qla4xxx_recover_adapter(ha);
477ffb9d
DS
2319 }
2320 clear_bit(DPC_RESET_HA_INTR, &ha->dpc_flags);
2321 if (status == QLA_SUCCESS)
f4f5df23 2322 ha->isp_ops->enable_intrs(ha);
afaf5a2d
DS
2323 }
2324 }
2325
f4f5df23 2326dpc_post_reset_ha:
afaf5a2d
DS
2327 /* ---- process AEN? --- */
2328 if (test_and_clear_bit(DPC_AEN, &ha->dpc_flags))
2329 qla4xxx_process_aen(ha, PROCESS_ALL_AENS);
2330
2331 /* ---- Get DHCP IP Address? --- */
2332 if (test_and_clear_bit(DPC_GET_DHCP_IP_ADDR, &ha->dpc_flags))
2333 qla4xxx_get_dhcp_ip_address(ha);
2334
065aa1b4
VC
2335 /* ---- link change? --- */
2336 if (test_and_clear_bit(DPC_LINK_CHANGED, &ha->dpc_flags)) {
2337 if (!test_bit(AF_LINK_UP, &ha->flags)) {
2338 /* ---- link down? --- */
2d7924e6 2339 qla4xxx_mark_all_devices_missing(ha);
065aa1b4
VC
2340 } else {
2341 /* ---- link up? --- *
2342 * F/W will auto login to all devices ONLY ONCE after
2343 * link up during driver initialization and runtime
2344 * fatal error recovery. Therefore, the driver must
2345 * manually relogin to devices when recovering from
2346 * connection failures, logouts, expired KATO, etc. */
2347
2d7924e6 2348 qla4xxx_relogin_all_devices(ha);
065aa1b4
VC
2349 }
2350 }
afaf5a2d
DS
2351}
2352
2353/**
2354 * qla4xxx_free_adapter - release the adapter
2355 * @ha: pointer to adapter structure
2356 **/
2357static void qla4xxx_free_adapter(struct scsi_qla_host *ha)
2358{
2359
2360 if (test_bit(AF_INTERRUPTS_ON, &ha->flags)) {
2361 /* Turn-off interrupts on the card. */
f4f5df23 2362 ha->isp_ops->disable_intrs(ha);
afaf5a2d
DS
2363 }
2364
f4f5df23
VC
2365 /* Remove timer thread, if present */
2366 if (ha->timer_active)
2367 qla4xxx_stop_timer(ha);
2368
afaf5a2d
DS
2369 /* Kill the kernel thread for this host */
2370 if (ha->dpc_thread)
2371 destroy_workqueue(ha->dpc_thread);
2372
b3a271a9
MR
2373 /* Kill the kernel thread for this host */
2374 if (ha->task_wq)
2375 destroy_workqueue(ha->task_wq);
2376
f4f5df23
VC
2377 /* Put firmware in known state */
2378 ha->isp_ops->reset_firmware(ha);
afaf5a2d 2379
f4f5df23
VC
2380 if (is_qla8022(ha)) {
2381 qla4_8xxx_idc_lock(ha);
2382 qla4_8xxx_clear_drv_active(ha);
2383 qla4_8xxx_idc_unlock(ha);
2384 }
afaf5a2d 2385
afaf5a2d
DS
2386 /* Detach interrupts */
2387 if (test_and_clear_bit(AF_IRQ_ATTACHED, &ha->flags))
f4f5df23 2388 qla4xxx_free_irqs(ha);
afaf5a2d 2389
bee4fe8e
DS
2390 /* free extra memory */
2391 qla4xxx_mem_free(ha);
f4f5df23
VC
2392}
2393
2394int qla4_8xxx_iospace_config(struct scsi_qla_host *ha)
2395{
2396 int status = 0;
2397 uint8_t revision_id;
2398 unsigned long mem_base, mem_len, db_base, db_len;
2399 struct pci_dev *pdev = ha->pdev;
2400
2401 status = pci_request_regions(pdev, DRIVER_NAME);
2402 if (status) {
2403 printk(KERN_WARNING
2404 "scsi(%ld) Failed to reserve PIO regions (%s) "
2405 "status=%d\n", ha->host_no, pci_name(pdev), status);
2406 goto iospace_error_exit;
2407 }
2408
2409 pci_read_config_byte(pdev, PCI_REVISION_ID, &revision_id);
2410 DEBUG2(printk(KERN_INFO "%s: revision-id=%d\n",
2411 __func__, revision_id));
2412 ha->revision_id = revision_id;
bee4fe8e 2413
f4f5df23
VC
2414 /* remap phys address */
2415 mem_base = pci_resource_start(pdev, 0); /* 0 is for BAR 0 */
2416 mem_len = pci_resource_len(pdev, 0);
2417 DEBUG2(printk(KERN_INFO "%s: ioremap from %lx a size of %lx\n",
2418 __func__, mem_base, mem_len));
afaf5a2d 2419
f4f5df23
VC
2420 /* mapping of pcibase pointer */
2421 ha->nx_pcibase = (unsigned long)ioremap(mem_base, mem_len);
2422 if (!ha->nx_pcibase) {
2423 printk(KERN_ERR
2424 "cannot remap MMIO (%s), aborting\n", pci_name(pdev));
2425 pci_release_regions(ha->pdev);
2426 goto iospace_error_exit;
2427 }
2428
2429 /* Mapping of IO base pointer, door bell read and write pointer */
2430
2431 /* mapping of IO base pointer */
2432 ha->qla4_8xxx_reg =
2433 (struct device_reg_82xx __iomem *)((uint8_t *)ha->nx_pcibase +
2434 0xbc000 + (ha->pdev->devfn << 11));
2435
2436 db_base = pci_resource_start(pdev, 4); /* doorbell is on bar 4 */
2437 db_len = pci_resource_len(pdev, 4);
2438
2657c800
SS
2439 ha->nx_db_wr_ptr = (ha->pdev->devfn == 4 ? QLA82XX_CAM_RAM_DB1 :
2440 QLA82XX_CAM_RAM_DB2);
f4f5df23 2441
2657c800 2442 return 0;
f4f5df23
VC
2443iospace_error_exit:
2444 return -ENOMEM;
afaf5a2d
DS
2445}
2446
2447/***
2448 * qla4xxx_iospace_config - maps registers
2449 * @ha: pointer to adapter structure
2450 *
2451 * This routines maps HBA's registers from the pci address space
2452 * into the kernel virtual address space for memory mapped i/o.
2453 **/
f4f5df23 2454int qla4xxx_iospace_config(struct scsi_qla_host *ha)
afaf5a2d
DS
2455{
2456 unsigned long pio, pio_len, pio_flags;
2457 unsigned long mmio, mmio_len, mmio_flags;
2458
2459 pio = pci_resource_start(ha->pdev, 0);
2460 pio_len = pci_resource_len(ha->pdev, 0);
2461 pio_flags = pci_resource_flags(ha->pdev, 0);
2462 if (pio_flags & IORESOURCE_IO) {
2463 if (pio_len < MIN_IOBASE_LEN) {
c2660df3 2464 ql4_printk(KERN_WARNING, ha,
afaf5a2d
DS
2465 "Invalid PCI I/O region size\n");
2466 pio = 0;
2467 }
2468 } else {
c2660df3 2469 ql4_printk(KERN_WARNING, ha, "region #0 not a PIO resource\n");
afaf5a2d
DS
2470 pio = 0;
2471 }
2472
2473 /* Use MMIO operations for all accesses. */
2474 mmio = pci_resource_start(ha->pdev, 1);
2475 mmio_len = pci_resource_len(ha->pdev, 1);
2476 mmio_flags = pci_resource_flags(ha->pdev, 1);
2477
2478 if (!(mmio_flags & IORESOURCE_MEM)) {
c2660df3
VC
2479 ql4_printk(KERN_ERR, ha,
2480 "region #0 not an MMIO resource, aborting\n");
afaf5a2d
DS
2481
2482 goto iospace_error_exit;
2483 }
c2660df3 2484
afaf5a2d 2485 if (mmio_len < MIN_IOBASE_LEN) {
c2660df3
VC
2486 ql4_printk(KERN_ERR, ha,
2487 "Invalid PCI mem region size, aborting\n");
afaf5a2d
DS
2488 goto iospace_error_exit;
2489 }
2490
2491 if (pci_request_regions(ha->pdev, DRIVER_NAME)) {
c2660df3
VC
2492 ql4_printk(KERN_WARNING, ha,
2493 "Failed to reserve PIO/MMIO regions\n");
afaf5a2d
DS
2494
2495 goto iospace_error_exit;
2496 }
2497
2498 ha->pio_address = pio;
2499 ha->pio_length = pio_len;
2500 ha->reg = ioremap(mmio, MIN_IOBASE_LEN);
2501 if (!ha->reg) {
c2660df3
VC
2502 ql4_printk(KERN_ERR, ha,
2503 "cannot remap MMIO, aborting\n");
afaf5a2d
DS
2504
2505 goto iospace_error_exit;
2506 }
2507
2508 return 0;
2509
2510iospace_error_exit:
2511 return -ENOMEM;
2512}
2513
f4f5df23
VC
2514static struct isp_operations qla4xxx_isp_ops = {
2515 .iospace_config = qla4xxx_iospace_config,
2516 .pci_config = qla4xxx_pci_config,
2517 .disable_intrs = qla4xxx_disable_intrs,
2518 .enable_intrs = qla4xxx_enable_intrs,
2519 .start_firmware = qla4xxx_start_firmware,
2520 .intr_handler = qla4xxx_intr_handler,
2521 .interrupt_service_routine = qla4xxx_interrupt_service_routine,
2522 .reset_chip = qla4xxx_soft_reset,
2523 .reset_firmware = qla4xxx_hw_reset,
2524 .queue_iocb = qla4xxx_queue_iocb,
2525 .complete_iocb = qla4xxx_complete_iocb,
2526 .rd_shdw_req_q_out = qla4xxx_rd_shdw_req_q_out,
2527 .rd_shdw_rsp_q_in = qla4xxx_rd_shdw_rsp_q_in,
2528 .get_sys_info = qla4xxx_get_sys_info,
2529};
2530
2531static struct isp_operations qla4_8xxx_isp_ops = {
2532 .iospace_config = qla4_8xxx_iospace_config,
2533 .pci_config = qla4_8xxx_pci_config,
2534 .disable_intrs = qla4_8xxx_disable_intrs,
2535 .enable_intrs = qla4_8xxx_enable_intrs,
2536 .start_firmware = qla4_8xxx_load_risc,
2537 .intr_handler = qla4_8xxx_intr_handler,
2538 .interrupt_service_routine = qla4_8xxx_interrupt_service_routine,
2539 .reset_chip = qla4_8xxx_isp_reset,
2540 .reset_firmware = qla4_8xxx_stop_firmware,
2541 .queue_iocb = qla4_8xxx_queue_iocb,
2542 .complete_iocb = qla4_8xxx_complete_iocb,
2543 .rd_shdw_req_q_out = qla4_8xxx_rd_shdw_req_q_out,
2544 .rd_shdw_rsp_q_in = qla4_8xxx_rd_shdw_rsp_q_in,
2545 .get_sys_info = qla4_8xxx_get_sys_info,
2546};
2547
2548uint16_t qla4xxx_rd_shdw_req_q_out(struct scsi_qla_host *ha)
2549{
2550 return (uint16_t)le32_to_cpu(ha->shadow_regs->req_q_out);
2551}
2552
2553uint16_t qla4_8xxx_rd_shdw_req_q_out(struct scsi_qla_host *ha)
2554{
2555 return (uint16_t)le32_to_cpu(readl(&ha->qla4_8xxx_reg->req_q_out));
2556}
2557
2558uint16_t qla4xxx_rd_shdw_rsp_q_in(struct scsi_qla_host *ha)
2559{
2560 return (uint16_t)le32_to_cpu(ha->shadow_regs->rsp_q_in);
2561}
2562
2563uint16_t qla4_8xxx_rd_shdw_rsp_q_in(struct scsi_qla_host *ha)
2564{
2565 return (uint16_t)le32_to_cpu(readl(&ha->qla4_8xxx_reg->rsp_q_in));
2566}
2567
2a991c21
MR
2568static ssize_t qla4xxx_show_boot_eth_info(void *data, int type, char *buf)
2569{
2570 struct scsi_qla_host *ha = data;
2571 char *str = buf;
2572 int rc;
2573
2574 switch (type) {
2575 case ISCSI_BOOT_ETH_FLAGS:
2576 rc = sprintf(str, "%d\n", SYSFS_FLAG_FW_SEL_BOOT);
2577 break;
2578 case ISCSI_BOOT_ETH_INDEX:
2579 rc = sprintf(str, "0\n");
2580 break;
2581 case ISCSI_BOOT_ETH_MAC:
2582 rc = sysfs_format_mac(str, ha->my_mac,
2583 MAC_ADDR_LEN);
2584 break;
2585 default:
2586 rc = -ENOSYS;
2587 break;
2588 }
2589 return rc;
2590}
2591
2592static mode_t qla4xxx_eth_get_attr_visibility(void *data, int type)
2593{
2594 int rc;
2595
2596 switch (type) {
2597 case ISCSI_BOOT_ETH_FLAGS:
2598 case ISCSI_BOOT_ETH_MAC:
2599 case ISCSI_BOOT_ETH_INDEX:
2600 rc = S_IRUGO;
2601 break;
2602 default:
2603 rc = 0;
2604 break;
2605 }
2606 return rc;
2607}
2608
2609static ssize_t qla4xxx_show_boot_ini_info(void *data, int type, char *buf)
2610{
2611 struct scsi_qla_host *ha = data;
2612 char *str = buf;
2613 int rc;
2614
2615 switch (type) {
2616 case ISCSI_BOOT_INI_INITIATOR_NAME:
2617 rc = sprintf(str, "%s\n", ha->name_string);
2618 break;
2619 default:
2620 rc = -ENOSYS;
2621 break;
2622 }
2623 return rc;
2624}
2625
2626static mode_t qla4xxx_ini_get_attr_visibility(void *data, int type)
2627{
2628 int rc;
2629
2630 switch (type) {
2631 case ISCSI_BOOT_INI_INITIATOR_NAME:
2632 rc = S_IRUGO;
2633 break;
2634 default:
2635 rc = 0;
2636 break;
2637 }
2638 return rc;
2639}
2640
2641static ssize_t
2642qla4xxx_show_boot_tgt_info(struct ql4_boot_session_info *boot_sess, int type,
2643 char *buf)
2644{
2645 struct ql4_conn_info *boot_conn = &boot_sess->conn_list[0];
2646 char *str = buf;
2647 int rc;
2648
2649 switch (type) {
2650 case ISCSI_BOOT_TGT_NAME:
2651 rc = sprintf(buf, "%s\n", (char *)&boot_sess->target_name);
2652 break;
2653 case ISCSI_BOOT_TGT_IP_ADDR:
2654 if (boot_sess->conn_list[0].dest_ipaddr.ip_type == 0x1)
2655 rc = sprintf(buf, "%pI4\n",
2656 &boot_conn->dest_ipaddr.ip_address);
2657 else
2658 rc = sprintf(str, "%pI6\n",
2659 &boot_conn->dest_ipaddr.ip_address);
2660 break;
2661 case ISCSI_BOOT_TGT_PORT:
2662 rc = sprintf(str, "%d\n", boot_conn->dest_port);
2663 break;
2664 case ISCSI_BOOT_TGT_CHAP_NAME:
2665 rc = sprintf(str, "%.*s\n",
2666 boot_conn->chap.target_chap_name_length,
2667 (char *)&boot_conn->chap.target_chap_name);
2668 break;
2669 case ISCSI_BOOT_TGT_CHAP_SECRET:
2670 rc = sprintf(str, "%.*s\n",
2671 boot_conn->chap.target_secret_length,
2672 (char *)&boot_conn->chap.target_secret);
2673 break;
2674 case ISCSI_BOOT_TGT_REV_CHAP_NAME:
2675 rc = sprintf(str, "%.*s\n",
2676 boot_conn->chap.intr_chap_name_length,
2677 (char *)&boot_conn->chap.intr_chap_name);
2678 break;
2679 case ISCSI_BOOT_TGT_REV_CHAP_SECRET:
2680 rc = sprintf(str, "%.*s\n",
2681 boot_conn->chap.intr_secret_length,
2682 (char *)&boot_conn->chap.intr_secret);
2683 break;
2684 case ISCSI_BOOT_TGT_FLAGS:
2685 rc = sprintf(str, "%d\n", SYSFS_FLAG_FW_SEL_BOOT);
2686 break;
2687 case ISCSI_BOOT_TGT_NIC_ASSOC:
2688 rc = sprintf(str, "0\n");
2689 break;
2690 default:
2691 rc = -ENOSYS;
2692 break;
2693 }
2694 return rc;
2695}
2696
2697static ssize_t qla4xxx_show_boot_tgt_pri_info(void *data, int type, char *buf)
2698{
2699 struct scsi_qla_host *ha = data;
2700 struct ql4_boot_session_info *boot_sess = &(ha->boot_tgt.boot_pri_sess);
2701
2702 return qla4xxx_show_boot_tgt_info(boot_sess, type, buf);
2703}
2704
2705static ssize_t qla4xxx_show_boot_tgt_sec_info(void *data, int type, char *buf)
2706{
2707 struct scsi_qla_host *ha = data;
2708 struct ql4_boot_session_info *boot_sess = &(ha->boot_tgt.boot_sec_sess);
2709
2710 return qla4xxx_show_boot_tgt_info(boot_sess, type, buf);
2711}
2712
2713static mode_t qla4xxx_tgt_get_attr_visibility(void *data, int type)
2714{
2715 int rc;
2716
2717 switch (type) {
2718 case ISCSI_BOOT_TGT_NAME:
2719 case ISCSI_BOOT_TGT_IP_ADDR:
2720 case ISCSI_BOOT_TGT_PORT:
2721 case ISCSI_BOOT_TGT_CHAP_NAME:
2722 case ISCSI_BOOT_TGT_CHAP_SECRET:
2723 case ISCSI_BOOT_TGT_REV_CHAP_NAME:
2724 case ISCSI_BOOT_TGT_REV_CHAP_SECRET:
2725 case ISCSI_BOOT_TGT_NIC_ASSOC:
2726 case ISCSI_BOOT_TGT_FLAGS:
2727 rc = S_IRUGO;
2728 break;
2729 default:
2730 rc = 0;
2731 break;
2732 }
2733 return rc;
2734}
2735
2736static void qla4xxx_boot_release(void *data)
2737{
2738 struct scsi_qla_host *ha = data;
2739
2740 scsi_host_put(ha->host);
2741}
2742
2743static int get_fw_boot_info(struct scsi_qla_host *ha, uint16_t ddb_index[])
2744{
2745 dma_addr_t buf_dma;
2746 uint32_t addr, pri_addr, sec_addr;
2747 uint32_t offset;
2748 uint16_t func_num;
2749 uint8_t val;
2750 uint8_t *buf = NULL;
2751 size_t size = 13 * sizeof(uint8_t);
2752 int ret = QLA_SUCCESS;
2753
2754 func_num = PCI_FUNC(ha->pdev->devfn);
2755
2756 DEBUG2(ql4_printk(KERN_INFO, ha,
2757 "%s: Get FW boot info for 0x%x func %d\n", __func__,
2758 (is_qla4032(ha) ? PCI_DEVICE_ID_QLOGIC_ISP4032 :
2759 PCI_DEVICE_ID_QLOGIC_ISP8022), func_num));
2760
2761 if (is_qla4032(ha)) {
2762 if (func_num == 1) {
2763 addr = NVRAM_PORT0_BOOT_MODE;
2764 pri_addr = NVRAM_PORT0_BOOT_PRI_TGT;
2765 sec_addr = NVRAM_PORT0_BOOT_SEC_TGT;
2766 } else if (func_num == 3) {
2767 addr = NVRAM_PORT1_BOOT_MODE;
2768 pri_addr = NVRAM_PORT1_BOOT_PRI_TGT;
2769 sec_addr = NVRAM_PORT1_BOOT_SEC_TGT;
2770 } else {
2771 ret = QLA_ERROR;
2772 goto exit_boot_info;
2773 }
2774
2775 /* Check Boot Mode */
2776 val = rd_nvram_byte(ha, addr);
2777 if (!(val & 0x07)) {
2778 DEBUG2(ql4_printk(KERN_ERR, ha,
2779 "%s: Failed Boot options : 0x%x\n",
2780 __func__, val));
2781 ret = QLA_ERROR;
2782 goto exit_boot_info;
2783 }
2784
2785 /* get primary valid target index */
2786 val = rd_nvram_byte(ha, pri_addr);
2787 if (val & BIT_7)
2788 ddb_index[0] = (val & 0x7f);
2789 else
2790 ddb_index[0] = 0;
2791
2792 /* get secondary valid target index */
2793 val = rd_nvram_byte(ha, sec_addr);
2794 if (val & BIT_7)
2795 ddb_index[1] = (val & 0x7f);
2796 else
2797 ddb_index[1] = 1;
2798
2799 } else if (is_qla8022(ha)) {
2800 buf = dma_alloc_coherent(&ha->pdev->dev, size,
2801 &buf_dma, GFP_KERNEL);
2802 if (!buf) {
2803 DEBUG2(ql4_printk(KERN_ERR, ha,
2804 "%s: Unable to allocate dma buffer\n",
2805 __func__));
2806 ret = QLA_ERROR;
2807 goto exit_boot_info;
2808 }
2809
2810 if (ha->port_num == 0)
2811 offset = BOOT_PARAM_OFFSET_PORT0;
2812 else if (ha->port_num == 1)
2813 offset = BOOT_PARAM_OFFSET_PORT1;
2814 else {
2815 ret = QLA_ERROR;
2816 goto exit_boot_info_free;
2817 }
2818 addr = FLASH_RAW_ACCESS_ADDR + (ha->hw.flt_iscsi_param * 4) +
2819 offset;
2820 if (qla4xxx_get_flash(ha, buf_dma, addr,
2821 13 * sizeof(uint8_t)) != QLA_SUCCESS) {
2822 DEBUG2(ql4_printk(KERN_ERR, ha, "scsi%ld: %s: Get Flash"
2823 "failed\n", ha->host_no, __func__));
2824 ret = QLA_ERROR;
2825 goto exit_boot_info_free;
2826 }
2827 /* Check Boot Mode */
2828 if (!(buf[1] & 0x07)) {
2829 DEBUG2(ql4_printk(KERN_INFO, ha,
2830 "Failed: Boot options : 0x%x\n",
2831 buf[1]));
2832 ret = QLA_ERROR;
2833 goto exit_boot_info_free;
2834 }
2835
2836 /* get primary valid target index */
2837 if (buf[2] & BIT_7)
2838 ddb_index[0] = buf[2] & 0x7f;
2839 else
2840 ddb_index[0] = 0;
2841
2842 /* get secondary valid target index */
2843 if (buf[11] & BIT_7)
2844 ddb_index[1] = buf[11] & 0x7f;
2845 else
2846 ddb_index[1] = 1;
2847
2848 } else {
2849 ret = QLA_ERROR;
2850 goto exit_boot_info;
2851 }
2852
2853 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: Primary target ID %d, Secondary"
2854 " target ID %d\n", __func__, ddb_index[0],
2855 ddb_index[1]));
2856
2857exit_boot_info_free:
2858 dma_free_coherent(&ha->pdev->dev, size, buf, buf_dma);
2859exit_boot_info:
2860 return ret;
2861}
2862
2863static int qla4xxx_get_boot_target(struct scsi_qla_host *ha,
2864 struct ql4_boot_session_info *boot_sess,
2865 uint16_t ddb_index)
2866{
2867 struct ql4_conn_info *boot_conn = &boot_sess->conn_list[0];
2868 struct dev_db_entry *fw_ddb_entry;
2869 dma_addr_t fw_ddb_entry_dma;
2870 uint16_t idx;
2871 uint16_t options;
2872 int ret = QLA_SUCCESS;
2873
2874 fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
2875 &fw_ddb_entry_dma, GFP_KERNEL);
2876 if (!fw_ddb_entry) {
2877 DEBUG2(ql4_printk(KERN_ERR, ha,
2878 "%s: Unable to allocate dma buffer.\n",
2879 __func__));
2880 ret = QLA_ERROR;
2881 return ret;
2882 }
2883
2884 if (qla4xxx_bootdb_by_index(ha, fw_ddb_entry,
2885 fw_ddb_entry_dma, ddb_index)) {
2886 DEBUG2(ql4_printk(KERN_ERR, ha,
2887 "%s: Flash DDB read Failed\n", __func__));
2888 ret = QLA_ERROR;
2889 goto exit_boot_target;
2890 }
2891
2892 /* Update target name and IP from DDB */
2893 memcpy(boot_sess->target_name, fw_ddb_entry->iscsi_name,
2894 min(sizeof(boot_sess->target_name),
2895 sizeof(fw_ddb_entry->iscsi_name)));
2896
2897 options = le16_to_cpu(fw_ddb_entry->options);
2898 if (options & DDB_OPT_IPV6_DEVICE) {
2899 memcpy(&boot_conn->dest_ipaddr.ip_address,
2900 &fw_ddb_entry->ip_addr[0], IPv6_ADDR_LEN);
2901 } else {
2902 boot_conn->dest_ipaddr.ip_type = 0x1;
2903 memcpy(&boot_conn->dest_ipaddr.ip_address,
2904 &fw_ddb_entry->ip_addr[0], IP_ADDR_LEN);
2905 }
2906
2907 boot_conn->dest_port = le16_to_cpu(fw_ddb_entry->port);
2908
2909 /* update chap information */
2910 idx = __le16_to_cpu(fw_ddb_entry->chap_tbl_idx);
2911
2912 if (BIT_7 & le16_to_cpu(fw_ddb_entry->iscsi_options)) {
2913
2914 DEBUG2(ql4_printk(KERN_INFO, ha, "Setting chap\n"));
2915
2916 ret = qla4xxx_get_chap(ha, (char *)&boot_conn->chap.
2917 target_chap_name,
2918 (char *)&boot_conn->chap.target_secret,
2919 idx);
2920 if (ret) {
2921 ql4_printk(KERN_ERR, ha, "Failed to set chap\n");
2922 ret = QLA_ERROR;
2923 goto exit_boot_target;
2924 }
2925
2926 boot_conn->chap.target_chap_name_length = QL4_CHAP_MAX_NAME_LEN;
2927 boot_conn->chap.target_secret_length = QL4_CHAP_MAX_SECRET_LEN;
2928 }
2929
2930 if (BIT_4 & le16_to_cpu(fw_ddb_entry->iscsi_options)) {
2931
2932 DEBUG2(ql4_printk(KERN_INFO, ha, "Setting BIDI chap\n"));
2933
2934 ret = qla4xxx_get_chap(ha, (char *)&boot_conn->chap.
2935 intr_chap_name,
2936 (char *)&boot_conn->chap.intr_secret,
2937 (idx + 1));
2938 if (ret) {
2939 ql4_printk(KERN_ERR, ha, "Failed to set BIDI chap\n");
2940 ret = QLA_ERROR;
2941 goto exit_boot_target;
2942 }
2943
2944 boot_conn->chap.intr_chap_name_length = QL4_CHAP_MAX_NAME_LEN;
2945 boot_conn->chap.intr_secret_length = QL4_CHAP_MAX_SECRET_LEN;
2946 }
2947
2948exit_boot_target:
2949 dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
2950 fw_ddb_entry, fw_ddb_entry_dma);
2951 return ret;
2952}
2953
2954static int qla4xxx_get_boot_info(struct scsi_qla_host *ha)
2955{
2956 uint16_t ddb_index[2];
2957 int ret = QLA_SUCCESS;
2958
2959 memset(ddb_index, 0, sizeof(ddb_index));
2960 ret = get_fw_boot_info(ha, ddb_index);
2961 if (ret != QLA_SUCCESS) {
2962 DEBUG2(ql4_printk(KERN_ERR, ha,
2963 "%s: Failed to set boot info.\n", __func__));
2964 return ret;
2965 }
2966
2967 ret = qla4xxx_get_boot_target(ha, &(ha->boot_tgt.boot_pri_sess),
2968 ddb_index[0]);
2969 if (ret != QLA_SUCCESS) {
2970 DEBUG2(ql4_printk(KERN_ERR, ha, "%s: Failed to get "
2971 "primary target\n", __func__));
2972 }
2973
2974 ret = qla4xxx_get_boot_target(ha, &(ha->boot_tgt.boot_sec_sess),
2975 ddb_index[1]);
2976 if (ret != QLA_SUCCESS) {
2977 DEBUG2(ql4_printk(KERN_ERR, ha, "%s: Failed to get "
2978 "secondary target\n", __func__));
2979 }
2980 return ret;
2981}
2982
2983static int qla4xxx_setup_boot_info(struct scsi_qla_host *ha)
2984{
2985 struct iscsi_boot_kobj *boot_kobj;
2986
2987 if (qla4xxx_get_boot_info(ha) != QLA_SUCCESS)
2988 return 0;
2989
2990 ha->boot_kset = iscsi_boot_create_host_kset(ha->host->host_no);
2991 if (!ha->boot_kset)
2992 goto kset_free;
2993
2994 if (!scsi_host_get(ha->host))
2995 goto kset_free;
2996 boot_kobj = iscsi_boot_create_target(ha->boot_kset, 0, ha,
2997 qla4xxx_show_boot_tgt_pri_info,
2998 qla4xxx_tgt_get_attr_visibility,
2999 qla4xxx_boot_release);
3000 if (!boot_kobj)
3001 goto put_host;
3002
3003 if (!scsi_host_get(ha->host))
3004 goto kset_free;
3005 boot_kobj = iscsi_boot_create_target(ha->boot_kset, 1, ha,
3006 qla4xxx_show_boot_tgt_sec_info,
3007 qla4xxx_tgt_get_attr_visibility,
3008 qla4xxx_boot_release);
3009 if (!boot_kobj)
3010 goto put_host;
3011
3012 if (!scsi_host_get(ha->host))
3013 goto kset_free;
3014 boot_kobj = iscsi_boot_create_initiator(ha->boot_kset, 0, ha,
3015 qla4xxx_show_boot_ini_info,
3016 qla4xxx_ini_get_attr_visibility,
3017 qla4xxx_boot_release);
3018 if (!boot_kobj)
3019 goto put_host;
3020
3021 if (!scsi_host_get(ha->host))
3022 goto kset_free;
3023 boot_kobj = iscsi_boot_create_ethernet(ha->boot_kset, 0, ha,
3024 qla4xxx_show_boot_eth_info,
3025 qla4xxx_eth_get_attr_visibility,
3026 qla4xxx_boot_release);
3027 if (!boot_kobj)
3028 goto put_host;
3029
3030 return 0;
3031
3032put_host:
3033 scsi_host_put(ha->host);
3034kset_free:
3035 iscsi_boot_destroy_kset(ha->boot_kset);
3036 return -ENOMEM;
3037}
3038
afaf5a2d
DS
3039/**
3040 * qla4xxx_probe_adapter - callback function to probe HBA
3041 * @pdev: pointer to pci_dev structure
3042 * @pci_device_id: pointer to pci_device entry
3043 *
3044 * This routine will probe for Qlogic 4xxx iSCSI host adapters.
3045 * It returns zero if successful. It also initializes all data necessary for
3046 * the driver.
3047 **/
3048static int __devinit qla4xxx_probe_adapter(struct pci_dev *pdev,
3049 const struct pci_device_id *ent)
3050{
3051 int ret = -ENODEV, status;
3052 struct Scsi_Host *host;
3053 struct scsi_qla_host *ha;
afaf5a2d
DS
3054 uint8_t init_retry_count = 0;
3055 char buf[34];
f4f5df23 3056 struct qla4_8xxx_legacy_intr_set *nx_legacy_intr;
f9880e76 3057 uint32_t dev_state;
afaf5a2d
DS
3058
3059 if (pci_enable_device(pdev))
3060 return -1;
3061
b3a271a9 3062 host = iscsi_host_alloc(&qla4xxx_driver_template, sizeof(*ha), 0);
afaf5a2d
DS
3063 if (host == NULL) {
3064 printk(KERN_WARNING
3065 "qla4xxx: Couldn't allocate host from scsi layer!\n");
3066 goto probe_disable_device;
3067 }
3068
3069 /* Clear our data area */
b3a271a9 3070 ha = to_qla_host(host);
afaf5a2d
DS
3071 memset(ha, 0, sizeof(*ha));
3072
3073 /* Save the information from PCI BIOS. */
3074 ha->pdev = pdev;
3075 ha->host = host;
3076 ha->host_no = host->host_no;
3077
2232be0d
LC
3078 pci_enable_pcie_error_reporting(pdev);
3079
f4f5df23
VC
3080 /* Setup Runtime configurable options */
3081 if (is_qla8022(ha)) {
3082 ha->isp_ops = &qla4_8xxx_isp_ops;
3083 rwlock_init(&ha->hw_lock);
3084 ha->qdr_sn_window = -1;
3085 ha->ddr_mn_window = -1;
3086 ha->curr_window = 255;
3087 ha->func_num = PCI_FUNC(ha->pdev->devfn);
3088 nx_legacy_intr = &legacy_intr[ha->func_num];
3089 ha->nx_legacy_intr.int_vec_bit = nx_legacy_intr->int_vec_bit;
3090 ha->nx_legacy_intr.tgt_status_reg =
3091 nx_legacy_intr->tgt_status_reg;
3092 ha->nx_legacy_intr.tgt_mask_reg = nx_legacy_intr->tgt_mask_reg;
3093 ha->nx_legacy_intr.pci_int_reg = nx_legacy_intr->pci_int_reg;
3094 } else {
3095 ha->isp_ops = &qla4xxx_isp_ops;
3096 }
3097
2232be0d
LC
3098 /* Set EEH reset type to fundamental if required by hba */
3099 if (is_qla8022(ha))
3100 pdev->needs_freset = 1;
3101
afaf5a2d 3102 /* Configure PCI I/O space. */
f4f5df23 3103 ret = ha->isp_ops->iospace_config(ha);
afaf5a2d 3104 if (ret)
f4f5df23 3105 goto probe_failed_ioconfig;
afaf5a2d 3106
c2660df3 3107 ql4_printk(KERN_INFO, ha, "Found an ISP%04x, irq %d, iobase 0x%p\n",
afaf5a2d
DS
3108 pdev->device, pdev->irq, ha->reg);
3109
3110 qla4xxx_config_dma_addressing(ha);
3111
3112 /* Initialize lists and spinlocks. */
afaf5a2d
DS
3113 INIT_LIST_HEAD(&ha->free_srb_q);
3114
3115 mutex_init(&ha->mbox_sem);
f4f5df23 3116 init_completion(&ha->mbx_intr_comp);
afaf5a2d
DS
3117
3118 spin_lock_init(&ha->hardware_lock);
afaf5a2d
DS
3119
3120 /* Allocate dma buffers */
3121 if (qla4xxx_mem_alloc(ha)) {
c2660df3
VC
3122 ql4_printk(KERN_WARNING, ha,
3123 "[ERROR] Failed to allocate memory for adapter\n");
afaf5a2d
DS
3124
3125 ret = -ENOMEM;
3126 goto probe_failed;
3127 }
3128
b3a271a9
MR
3129 host->cmd_per_lun = 3;
3130 host->max_channel = 0;
3131 host->max_lun = MAX_LUNS - 1;
3132 host->max_id = MAX_TARGETS;
3133 host->max_cmd_len = IOCB_MAX_CDB_LEN;
3134 host->can_queue = MAX_SRBS ;
3135 host->transportt = qla4xxx_scsi_transport;
3136
3137 ret = scsi_init_shared_tag_map(host, MAX_SRBS);
3138 if (ret) {
3139 ql4_printk(KERN_WARNING, ha,
3140 "%s: scsi_init_shared_tag_map failed\n", __func__);
3141 goto probe_failed;
3142 }
3143
3144 pci_set_drvdata(pdev, ha);
3145
3146 ret = scsi_add_host(host, &pdev->dev);
3147 if (ret)
3148 goto probe_failed;
3149
f4f5df23
VC
3150 if (is_qla8022(ha))
3151 (void) qla4_8xxx_get_flash_info(ha);
3152
afaf5a2d
DS
3153 /*
3154 * Initialize the Host adapter request/response queues and
3155 * firmware
3156 * NOTE: interrupts enabled upon successful completion
3157 */
0e7e8501 3158 status = qla4xxx_initialize_adapter(ha);
f4f5df23
VC
3159 while ((!test_bit(AF_ONLINE, &ha->flags)) &&
3160 init_retry_count++ < MAX_INIT_RETRIES) {
f9880e76
PM
3161
3162 if (is_qla8022(ha)) {
3163 qla4_8xxx_idc_lock(ha);
3164 dev_state = qla4_8xxx_rd_32(ha, QLA82XX_CRB_DEV_STATE);
3165 qla4_8xxx_idc_unlock(ha);
3166 if (dev_state == QLA82XX_DEV_FAILED) {
3167 ql4_printk(KERN_WARNING, ha, "%s: don't retry "
3168 "initialize adapter. H/W is in failed state\n",
3169 __func__);
3170 break;
3171 }
3172 }
afaf5a2d
DS
3173 DEBUG2(printk("scsi: %s: retrying adapter initialization "
3174 "(%d)\n", __func__, init_retry_count));
f4f5df23
VC
3175
3176 if (ha->isp_ops->reset_chip(ha) == QLA_ERROR)
3177 continue;
3178
0e7e8501 3179 status = qla4xxx_initialize_adapter(ha);
afaf5a2d 3180 }
f4f5df23
VC
3181
3182 if (!test_bit(AF_ONLINE, &ha->flags)) {
c2660df3 3183 ql4_printk(KERN_WARNING, ha, "Failed to initialize adapter\n");
afaf5a2d 3184
fe998527
LC
3185 if (is_qla8022(ha) && ql4xdontresethba) {
3186 /* Put the device in failed state. */
3187 DEBUG2(printk(KERN_ERR "HW STATE: FAILED\n"));
3188 qla4_8xxx_idc_lock(ha);
3189 qla4_8xxx_wr_32(ha, QLA82XX_CRB_DEV_STATE,
3190 QLA82XX_DEV_FAILED);
3191 qla4_8xxx_idc_unlock(ha);
3192 }
afaf5a2d 3193 ret = -ENODEV;
b3a271a9 3194 goto remove_host;
afaf5a2d
DS
3195 }
3196
afaf5a2d
DS
3197 /* Startup the kernel thread for this host adapter. */
3198 DEBUG2(printk("scsi: %s: Starting kernel thread for "
3199 "qla4xxx_dpc\n", __func__));
3200 sprintf(buf, "qla4xxx_%lu_dpc", ha->host_no);
3201 ha->dpc_thread = create_singlethread_workqueue(buf);
3202 if (!ha->dpc_thread) {
c2660df3 3203 ql4_printk(KERN_WARNING, ha, "Unable to start DPC thread!\n");
afaf5a2d 3204 ret = -ENODEV;
b3a271a9 3205 goto remove_host;
afaf5a2d 3206 }
c4028958 3207 INIT_WORK(&ha->dpc_work, qla4xxx_do_dpc);
afaf5a2d 3208
b3a271a9
MR
3209 sprintf(buf, "qla4xxx_%lu_task", ha->host_no);
3210 ha->task_wq = alloc_workqueue(buf, WQ_MEM_RECLAIM, 1);
3211 if (!ha->task_wq) {
3212 ql4_printk(KERN_WARNING, ha, "Unable to start task thread!\n");
3213 ret = -ENODEV;
3214 goto remove_host;
3215 }
3216
f4f5df23
VC
3217 /* For ISP-82XX, request_irqs is called in qla4_8xxx_load_risc
3218 * (which is called indirectly by qla4xxx_initialize_adapter),
3219 * so that irqs will be registered after crbinit but before
3220 * mbx_intr_enable.
3221 */
3222 if (!is_qla8022(ha)) {
3223 ret = qla4xxx_request_irqs(ha);
3224 if (ret) {
3225 ql4_printk(KERN_WARNING, ha, "Failed to reserve "
3226 "interrupt %d already in use.\n", pdev->irq);
b3a271a9 3227 goto remove_host;
f4f5df23 3228 }
afaf5a2d 3229 }
afaf5a2d 3230
2232be0d 3231 pci_save_state(ha->pdev);
f4f5df23 3232 ha->isp_ops->enable_intrs(ha);
afaf5a2d
DS
3233
3234 /* Start timer thread. */
3235 qla4xxx_start_timer(ha, qla4xxx_timer, 1);
3236
3237 set_bit(AF_INIT_DONE, &ha->flags);
3238
afaf5a2d
DS
3239 printk(KERN_INFO
3240 " QLogic iSCSI HBA Driver version: %s\n"
3241 " QLogic ISP%04x @ %s, host#=%ld, fw=%02d.%02d.%02d.%02d\n",
3242 qla4xxx_version_str, ha->pdev->device, pci_name(ha->pdev),
3243 ha->host_no, ha->firmware_version[0], ha->firmware_version[1],
3244 ha->patch_number, ha->build_number);
ed1086e0 3245
2a991c21
MR
3246 if (qla4xxx_setup_boot_info(ha))
3247 ql4_printk(KERN_ERR, ha, "%s:ISCSI boot info setup failed\n",
3248 __func__);
3249
ed1086e0 3250 qla4xxx_create_ifaces(ha);
afaf5a2d
DS
3251 return 0;
3252
b3a271a9
MR
3253remove_host:
3254 scsi_remove_host(ha->host);
3255
afaf5a2d
DS
3256probe_failed:
3257 qla4xxx_free_adapter(ha);
f4f5df23
VC
3258
3259probe_failed_ioconfig:
2232be0d 3260 pci_disable_pcie_error_reporting(pdev);
afaf5a2d
DS
3261 scsi_host_put(ha->host);
3262
3263probe_disable_device:
3264 pci_disable_device(pdev);
3265
3266 return ret;
3267}
3268
7eece5a0
KH
3269/**
3270 * qla4xxx_prevent_other_port_reinit - prevent other port from re-initialize
3271 * @ha: pointer to adapter structure
3272 *
3273 * Mark the other ISP-4xxx port to indicate that the driver is being removed,
3274 * so that the other port will not re-initialize while in the process of
3275 * removing the ha due to driver unload or hba hotplug.
3276 **/
3277static void qla4xxx_prevent_other_port_reinit(struct scsi_qla_host *ha)
3278{
3279 struct scsi_qla_host *other_ha = NULL;
3280 struct pci_dev *other_pdev = NULL;
3281 int fn = ISP4XXX_PCI_FN_2;
3282
3283 /*iscsi function numbers for ISP4xxx is 1 and 3*/
3284 if (PCI_FUNC(ha->pdev->devfn) & BIT_1)
3285 fn = ISP4XXX_PCI_FN_1;
3286
3287 other_pdev =
3288 pci_get_domain_bus_and_slot(pci_domain_nr(ha->pdev->bus),
3289 ha->pdev->bus->number, PCI_DEVFN(PCI_SLOT(ha->pdev->devfn),
3290 fn));
3291
3292 /* Get other_ha if other_pdev is valid and state is enable*/
3293 if (other_pdev) {
3294 if (atomic_read(&other_pdev->enable_cnt)) {
3295 other_ha = pci_get_drvdata(other_pdev);
3296 if (other_ha) {
3297 set_bit(AF_HA_REMOVAL, &other_ha->flags);
3298 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: "
3299 "Prevent %s reinit\n", __func__,
3300 dev_name(&other_ha->pdev->dev)));
3301 }
3302 }
3303 pci_dev_put(other_pdev);
3304 }
3305}
3306
afaf5a2d
DS
3307/**
3308 * qla4xxx_remove_adapter - calback function to remove adapter.
3309 * @pci_dev: PCI device pointer
3310 **/
3311static void __devexit qla4xxx_remove_adapter(struct pci_dev *pdev)
3312{
3313 struct scsi_qla_host *ha;
3314
3315 ha = pci_get_drvdata(pdev);
3316
7eece5a0
KH
3317 if (!is_qla8022(ha))
3318 qla4xxx_prevent_other_port_reinit(ha);
bee4fe8e 3319
ed1086e0
VC
3320 /* destroy iface from sysfs */
3321 qla4xxx_destroy_ifaces(ha);
3322
2a991c21
MR
3323 if (ha->boot_kset)
3324 iscsi_boot_destroy_kset(ha->boot_kset);
3325
afaf5a2d
DS
3326 scsi_remove_host(ha->host);
3327
3328 qla4xxx_free_adapter(ha);
3329
3330 scsi_host_put(ha->host);
3331
2232be0d 3332 pci_disable_pcie_error_reporting(pdev);
f4f5df23 3333 pci_disable_device(pdev);
afaf5a2d
DS
3334 pci_set_drvdata(pdev, NULL);
3335}
3336
3337/**
3338 * qla4xxx_config_dma_addressing() - Configure OS DMA addressing method.
3339 * @ha: HA context
3340 *
3341 * At exit, the @ha's flags.enable_64bit_addressing set to indicated
3342 * supported addressing method.
3343 */
47975477 3344static void qla4xxx_config_dma_addressing(struct scsi_qla_host *ha)
afaf5a2d
DS
3345{
3346 int retval;
3347
3348 /* Update our PCI device dma_mask for full 64 bit mask */
6a35528a
YH
3349 if (pci_set_dma_mask(ha->pdev, DMA_BIT_MASK(64)) == 0) {
3350 if (pci_set_consistent_dma_mask(ha->pdev, DMA_BIT_MASK(64))) {
afaf5a2d
DS
3351 dev_dbg(&ha->pdev->dev,
3352 "Failed to set 64 bit PCI consistent mask; "
3353 "using 32 bit.\n");
3354 retval = pci_set_consistent_dma_mask(ha->pdev,
284901a9 3355 DMA_BIT_MASK(32));
afaf5a2d
DS
3356 }
3357 } else
284901a9 3358 retval = pci_set_dma_mask(ha->pdev, DMA_BIT_MASK(32));
afaf5a2d
DS
3359}
3360
3361static int qla4xxx_slave_alloc(struct scsi_device *sdev)
3362{
b3a271a9
MR
3363 struct iscsi_cls_session *cls_sess;
3364 struct iscsi_session *sess;
3365 struct ddb_entry *ddb;
8bb4033d 3366 int queue_depth = QL4_DEF_QDEPTH;
afaf5a2d 3367
b3a271a9
MR
3368 cls_sess = starget_to_session(sdev->sdev_target);
3369 sess = cls_sess->dd_data;
3370 ddb = sess->dd_data;
3371
afaf5a2d
DS
3372 sdev->hostdata = ddb;
3373 sdev->tagged_supported = 1;
8bb4033d
VC
3374
3375 if (ql4xmaxqdepth != 0 && ql4xmaxqdepth <= 0xffffU)
3376 queue_depth = ql4xmaxqdepth;
3377
3378 scsi_activate_tcq(sdev, queue_depth);
afaf5a2d
DS
3379 return 0;
3380}
3381
3382static int qla4xxx_slave_configure(struct scsi_device *sdev)
3383{
3384 sdev->tagged_supported = 1;
3385 return 0;
3386}
3387
3388static void qla4xxx_slave_destroy(struct scsi_device *sdev)
3389{
3390 scsi_deactivate_tcq(sdev, 1);
3391}
3392
3393/**
3394 * qla4xxx_del_from_active_array - returns an active srb
3395 * @ha: Pointer to host adapter structure.
fd589a8f 3396 * @index: index into the active_array
afaf5a2d
DS
3397 *
3398 * This routine removes and returns the srb at the specified index
3399 **/
f4f5df23
VC
3400struct srb *qla4xxx_del_from_active_array(struct scsi_qla_host *ha,
3401 uint32_t index)
afaf5a2d
DS
3402{
3403 struct srb *srb = NULL;
5369887a 3404 struct scsi_cmnd *cmd = NULL;
afaf5a2d 3405
5369887a
VC
3406 cmd = scsi_host_find_tag(ha->host, index);
3407 if (!cmd)
afaf5a2d
DS
3408 return srb;
3409
5369887a
VC
3410 srb = (struct srb *)CMD_SP(cmd);
3411 if (!srb)
afaf5a2d
DS
3412 return srb;
3413
3414 /* update counters */
3415 if (srb->flags & SRB_DMA_VALID) {
3416 ha->req_q_count += srb->iocb_cnt;
3417 ha->iocb_cnt -= srb->iocb_cnt;
3418 if (srb->cmd)
5369887a
VC
3419 srb->cmd->host_scribble =
3420 (unsigned char *)(unsigned long) MAX_SRBS;
afaf5a2d
DS
3421 }
3422 return srb;
3423}
3424
afaf5a2d
DS
3425/**
3426 * qla4xxx_eh_wait_on_command - waits for command to be returned by firmware
09a0f719 3427 * @ha: Pointer to host adapter structure.
afaf5a2d
DS
3428 * @cmd: Scsi Command to wait on.
3429 *
3430 * This routine waits for the command to be returned by the Firmware
3431 * for some max time.
3432 **/
3433static int qla4xxx_eh_wait_on_command(struct scsi_qla_host *ha,
3434 struct scsi_cmnd *cmd)
3435{
3436 int done = 0;
3437 struct srb *rp;
3438 uint32_t max_wait_time = EH_WAIT_CMD_TOV;
2232be0d
LC
3439 int ret = SUCCESS;
3440
3441 /* Dont wait on command if PCI error is being handled
3442 * by PCI AER driver
3443 */
3444 if (unlikely(pci_channel_offline(ha->pdev)) ||
3445 (test_bit(AF_EEH_BUSY, &ha->flags))) {
3446 ql4_printk(KERN_WARNING, ha, "scsi%ld: Return from %s\n",
3447 ha->host_no, __func__);
3448 return ret;
3449 }
afaf5a2d
DS
3450
3451 do {
3452 /* Checking to see if its returned to OS */
5369887a 3453 rp = (struct srb *) CMD_SP(cmd);
afaf5a2d
DS
3454 if (rp == NULL) {
3455 done++;
3456 break;
3457 }
3458
3459 msleep(2000);
3460 } while (max_wait_time--);
3461
3462 return done;
3463}
3464
3465/**
3466 * qla4xxx_wait_for_hba_online - waits for HBA to come online
3467 * @ha: Pointer to host adapter structure
3468 **/
3469static int qla4xxx_wait_for_hba_online(struct scsi_qla_host *ha)
3470{
3471 unsigned long wait_online;
3472
f581a3f7 3473 wait_online = jiffies + (HBA_ONLINE_TOV * HZ);
afaf5a2d
DS
3474 while (time_before(jiffies, wait_online)) {
3475
3476 if (adapter_up(ha))
3477 return QLA_SUCCESS;
afaf5a2d
DS
3478
3479 msleep(2000);
3480 }
3481
3482 return QLA_ERROR;
3483}
3484
3485/**
ce545039 3486 * qla4xxx_eh_wait_for_commands - wait for active cmds to finish.
fd589a8f 3487 * @ha: pointer to HBA
afaf5a2d
DS
3488 * @t: target id
3489 * @l: lun id
3490 *
3491 * This function waits for all outstanding commands to a lun to complete. It
3492 * returns 0 if all pending commands are returned and 1 otherwise.
3493 **/
ce545039
MC
3494static int qla4xxx_eh_wait_for_commands(struct scsi_qla_host *ha,
3495 struct scsi_target *stgt,
3496 struct scsi_device *sdev)
afaf5a2d
DS
3497{
3498 int cnt;
3499 int status = 0;
3500 struct scsi_cmnd *cmd;
3501
3502 /*
ce545039
MC
3503 * Waiting for all commands for the designated target or dev
3504 * in the active array
afaf5a2d
DS
3505 */
3506 for (cnt = 0; cnt < ha->host->can_queue; cnt++) {
3507 cmd = scsi_host_find_tag(ha->host, cnt);
ce545039
MC
3508 if (cmd && stgt == scsi_target(cmd->device) &&
3509 (!sdev || sdev == cmd->device)) {
afaf5a2d
DS
3510 if (!qla4xxx_eh_wait_on_command(ha, cmd)) {
3511 status++;
3512 break;
3513 }
3514 }
3515 }
3516 return status;
3517}
3518
09a0f719
VC
3519/**
3520 * qla4xxx_eh_abort - callback for abort task.
3521 * @cmd: Pointer to Linux's SCSI command structure
3522 *
3523 * This routine is called by the Linux OS to abort the specified
3524 * command.
3525 **/
3526static int qla4xxx_eh_abort(struct scsi_cmnd *cmd)
3527{
3528 struct scsi_qla_host *ha = to_qla_host(cmd->device->host);
3529 unsigned int id = cmd->device->id;
3530 unsigned int lun = cmd->device->lun;
92b3e5bb 3531 unsigned long flags;
09a0f719
VC
3532 struct srb *srb = NULL;
3533 int ret = SUCCESS;
3534 int wait = 0;
3535
c2660df3 3536 ql4_printk(KERN_INFO, ha,
5cd049a5
CH
3537 "scsi%ld:%d:%d: Abort command issued cmd=%p\n",
3538 ha->host_no, id, lun, cmd);
09a0f719 3539
92b3e5bb 3540 spin_lock_irqsave(&ha->hardware_lock, flags);
09a0f719 3541 srb = (struct srb *) CMD_SP(cmd);
92b3e5bb
MC
3542 if (!srb) {
3543 spin_unlock_irqrestore(&ha->hardware_lock, flags);
09a0f719 3544 return SUCCESS;
92b3e5bb 3545 }
09a0f719 3546 kref_get(&srb->srb_ref);
92b3e5bb 3547 spin_unlock_irqrestore(&ha->hardware_lock, flags);
09a0f719
VC
3548
3549 if (qla4xxx_abort_task(ha, srb) != QLA_SUCCESS) {
3550 DEBUG3(printk("scsi%ld:%d:%d: Abort_task mbx failed.\n",
3551 ha->host_no, id, lun));
3552 ret = FAILED;
3553 } else {
3554 DEBUG3(printk("scsi%ld:%d:%d: Abort_task mbx success.\n",
3555 ha->host_no, id, lun));
3556 wait = 1;
3557 }
3558
3559 kref_put(&srb->srb_ref, qla4xxx_srb_compl);
3560
3561 /* Wait for command to complete */
3562 if (wait) {
3563 if (!qla4xxx_eh_wait_on_command(ha, cmd)) {
3564 DEBUG2(printk("scsi%ld:%d:%d: Abort handler timed out\n",
3565 ha->host_no, id, lun));
3566 ret = FAILED;
3567 }
3568 }
3569
c2660df3 3570 ql4_printk(KERN_INFO, ha,
09a0f719 3571 "scsi%ld:%d:%d: Abort command - %s\n",
25985edc 3572 ha->host_no, id, lun, (ret == SUCCESS) ? "succeeded" : "failed");
09a0f719
VC
3573
3574 return ret;
3575}
3576
afaf5a2d
DS
3577/**
3578 * qla4xxx_eh_device_reset - callback for target reset.
3579 * @cmd: Pointer to Linux's SCSI command structure
3580 *
3581 * This routine is called by the Linux OS to reset all luns on the
3582 * specified target.
3583 **/
3584static int qla4xxx_eh_device_reset(struct scsi_cmnd *cmd)
3585{
3586 struct scsi_qla_host *ha = to_qla_host(cmd->device->host);
3587 struct ddb_entry *ddb_entry = cmd->device->hostdata;
afaf5a2d
DS
3588 int ret = FAILED, stat;
3589
612f7348 3590 if (!ddb_entry)
afaf5a2d
DS
3591 return ret;
3592
c01be6dc
MC
3593 ret = iscsi_block_scsi_eh(cmd);
3594 if (ret)
3595 return ret;
3596 ret = FAILED;
3597
c2660df3 3598 ql4_printk(KERN_INFO, ha,
afaf5a2d
DS
3599 "scsi%ld:%d:%d:%d: DEVICE RESET ISSUED.\n", ha->host_no,
3600 cmd->device->channel, cmd->device->id, cmd->device->lun);
3601
3602 DEBUG2(printk(KERN_INFO
3603 "scsi%ld: DEVICE_RESET cmd=%p jiffies = 0x%lx, to=%x,"
3604 "dpc_flags=%lx, status=%x allowed=%d\n", ha->host_no,
242f9dcb 3605 cmd, jiffies, cmd->request->timeout / HZ,
afaf5a2d
DS
3606 ha->dpc_flags, cmd->result, cmd->allowed));
3607
3608 /* FIXME: wait for hba to go online */
3609 stat = qla4xxx_reset_lun(ha, ddb_entry, cmd->device->lun);
3610 if (stat != QLA_SUCCESS) {
c2660df3 3611 ql4_printk(KERN_INFO, ha, "DEVICE RESET FAILED. %d\n", stat);
afaf5a2d
DS
3612 goto eh_dev_reset_done;
3613 }
3614
ce545039
MC
3615 if (qla4xxx_eh_wait_for_commands(ha, scsi_target(cmd->device),
3616 cmd->device)) {
c2660df3 3617 ql4_printk(KERN_INFO, ha,
ce545039
MC
3618 "DEVICE RESET FAILED - waiting for "
3619 "commands.\n");
3620 goto eh_dev_reset_done;
afaf5a2d
DS
3621 }
3622
9d562913
DS
3623 /* Send marker. */
3624 if (qla4xxx_send_marker_iocb(ha, ddb_entry, cmd->device->lun,
3625 MM_LUN_RESET) != QLA_SUCCESS)
3626 goto eh_dev_reset_done;
3627
c2660df3 3628 ql4_printk(KERN_INFO, ha,
afaf5a2d
DS
3629 "scsi(%ld:%d:%d:%d): DEVICE RESET SUCCEEDED.\n",
3630 ha->host_no, cmd->device->channel, cmd->device->id,
3631 cmd->device->lun);
3632
3633 ret = SUCCESS;
3634
3635eh_dev_reset_done:
3636
3637 return ret;
3638}
3639
ce545039
MC
3640/**
3641 * qla4xxx_eh_target_reset - callback for target reset.
3642 * @cmd: Pointer to Linux's SCSI command structure
3643 *
3644 * This routine is called by the Linux OS to reset the target.
3645 **/
3646static int qla4xxx_eh_target_reset(struct scsi_cmnd *cmd)
3647{
3648 struct scsi_qla_host *ha = to_qla_host(cmd->device->host);
3649 struct ddb_entry *ddb_entry = cmd->device->hostdata;
c01be6dc 3650 int stat, ret;
ce545039
MC
3651
3652 if (!ddb_entry)
3653 return FAILED;
3654
c01be6dc
MC
3655 ret = iscsi_block_scsi_eh(cmd);
3656 if (ret)
3657 return ret;
3658
ce545039
MC
3659 starget_printk(KERN_INFO, scsi_target(cmd->device),
3660 "WARM TARGET RESET ISSUED.\n");
3661
3662 DEBUG2(printk(KERN_INFO
3663 "scsi%ld: TARGET_DEVICE_RESET cmd=%p jiffies = 0x%lx, "
3664 "to=%x,dpc_flags=%lx, status=%x allowed=%d\n",
242f9dcb 3665 ha->host_no, cmd, jiffies, cmd->request->timeout / HZ,
ce545039
MC
3666 ha->dpc_flags, cmd->result, cmd->allowed));
3667
3668 stat = qla4xxx_reset_target(ha, ddb_entry);
3669 if (stat != QLA_SUCCESS) {
3670 starget_printk(KERN_INFO, scsi_target(cmd->device),
3671 "WARM TARGET RESET FAILED.\n");
3672 return FAILED;
3673 }
3674
ce545039
MC
3675 if (qla4xxx_eh_wait_for_commands(ha, scsi_target(cmd->device),
3676 NULL)) {
3677 starget_printk(KERN_INFO, scsi_target(cmd->device),
3678 "WARM TARGET DEVICE RESET FAILED - "
3679 "waiting for commands.\n");
3680 return FAILED;
3681 }
3682
9d562913
DS
3683 /* Send marker. */
3684 if (qla4xxx_send_marker_iocb(ha, ddb_entry, cmd->device->lun,
3685 MM_TGT_WARM_RESET) != QLA_SUCCESS) {
3686 starget_printk(KERN_INFO, scsi_target(cmd->device),
3687 "WARM TARGET DEVICE RESET FAILED - "
3688 "marker iocb failed.\n");
3689 return FAILED;
3690 }
3691
ce545039
MC
3692 starget_printk(KERN_INFO, scsi_target(cmd->device),
3693 "WARM TARGET RESET SUCCEEDED.\n");
3694 return SUCCESS;
3695}
3696
afaf5a2d
DS
3697/**
3698 * qla4xxx_eh_host_reset - kernel callback
3699 * @cmd: Pointer to Linux's SCSI command structure
3700 *
3701 * This routine is invoked by the Linux kernel to perform fatal error
3702 * recovery on the specified adapter.
3703 **/
3704static int qla4xxx_eh_host_reset(struct scsi_cmnd *cmd)
3705{
3706 int return_status = FAILED;
3707 struct scsi_qla_host *ha;
3708
b3a271a9 3709 ha = to_qla_host(cmd->device->host);
afaf5a2d 3710
f4f5df23
VC
3711 if (ql4xdontresethba) {
3712 DEBUG2(printk("scsi%ld: %s: Don't Reset HBA\n",
3713 ha->host_no, __func__));
3714 return FAILED;
3715 }
3716
c2660df3 3717 ql4_printk(KERN_INFO, ha,
dca05c4c 3718 "scsi(%ld:%d:%d:%d): HOST RESET ISSUED.\n", ha->host_no,
afaf5a2d
DS
3719 cmd->device->channel, cmd->device->id, cmd->device->lun);
3720
3721 if (qla4xxx_wait_for_hba_online(ha) != QLA_SUCCESS) {
3722 DEBUG2(printk("scsi%ld:%d: %s: Unable to reset host. Adapter "
3723 "DEAD.\n", ha->host_no, cmd->device->channel,
3724 __func__));
3725
3726 return FAILED;
3727 }
3728
f4f5df23
VC
3729 if (!test_bit(DPC_RESET_HA, &ha->dpc_flags)) {
3730 if (is_qla8022(ha))
3731 set_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags);
3732 else
3733 set_bit(DPC_RESET_HA, &ha->dpc_flags);
3734 }
50a29aec 3735
f4f5df23 3736 if (qla4xxx_recover_adapter(ha) == QLA_SUCCESS)
afaf5a2d 3737 return_status = SUCCESS;
afaf5a2d 3738
c2660df3 3739 ql4_printk(KERN_INFO, ha, "HOST RESET %s.\n",
25985edc 3740 return_status == FAILED ? "FAILED" : "SUCCEEDED");
afaf5a2d
DS
3741
3742 return return_status;
3743}
3744
2232be0d
LC
3745/* PCI AER driver recovers from all correctable errors w/o
3746 * driver intervention. For uncorrectable errors PCI AER
3747 * driver calls the following device driver's callbacks
3748 *
3749 * - Fatal Errors - link_reset
3750 * - Non-Fatal Errors - driver's pci_error_detected() which
3751 * returns CAN_RECOVER, NEED_RESET or DISCONNECT.
3752 *
3753 * PCI AER driver calls
3754 * CAN_RECOVER - driver's pci_mmio_enabled(), mmio_enabled
3755 * returns RECOVERED or NEED_RESET if fw_hung
3756 * NEED_RESET - driver's slot_reset()
3757 * DISCONNECT - device is dead & cannot recover
3758 * RECOVERED - driver's pci_resume()
3759 */
3760static pci_ers_result_t
3761qla4xxx_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
3762{
3763 struct scsi_qla_host *ha = pci_get_drvdata(pdev);
3764
3765 ql4_printk(KERN_WARNING, ha, "scsi%ld: %s: error detected:state %x\n",
3766 ha->host_no, __func__, state);
3767
3768 if (!is_aer_supported(ha))
3769 return PCI_ERS_RESULT_NONE;
3770
3771 switch (state) {
3772 case pci_channel_io_normal:
3773 clear_bit(AF_EEH_BUSY, &ha->flags);
3774 return PCI_ERS_RESULT_CAN_RECOVER;
3775 case pci_channel_io_frozen:
3776 set_bit(AF_EEH_BUSY, &ha->flags);
3777 qla4xxx_mailbox_premature_completion(ha);
3778 qla4xxx_free_irqs(ha);
3779 pci_disable_device(pdev);
7b3595df
VC
3780 /* Return back all IOs */
3781 qla4xxx_abort_active_cmds(ha, DID_RESET << 16);
2232be0d
LC
3782 return PCI_ERS_RESULT_NEED_RESET;
3783 case pci_channel_io_perm_failure:
3784 set_bit(AF_EEH_BUSY, &ha->flags);
3785 set_bit(AF_PCI_CHANNEL_IO_PERM_FAILURE, &ha->flags);
3786 qla4xxx_abort_active_cmds(ha, DID_NO_CONNECT << 16);
3787 return PCI_ERS_RESULT_DISCONNECT;
3788 }
3789 return PCI_ERS_RESULT_NEED_RESET;
3790}
3791
3792/**
3793 * qla4xxx_pci_mmio_enabled() gets called if
3794 * qla4xxx_pci_error_detected() returns PCI_ERS_RESULT_CAN_RECOVER
3795 * and read/write to the device still works.
3796 **/
3797static pci_ers_result_t
3798qla4xxx_pci_mmio_enabled(struct pci_dev *pdev)
3799{
3800 struct scsi_qla_host *ha = pci_get_drvdata(pdev);
3801
3802 if (!is_aer_supported(ha))
3803 return PCI_ERS_RESULT_NONE;
3804
7b3595df 3805 return PCI_ERS_RESULT_RECOVERED;
2232be0d
LC
3806}
3807
7b3595df 3808static uint32_t qla4_8xxx_error_recovery(struct scsi_qla_host *ha)
2232be0d
LC
3809{
3810 uint32_t rval = QLA_ERROR;
7b3595df 3811 uint32_t ret = 0;
2232be0d
LC
3812 int fn;
3813 struct pci_dev *other_pdev = NULL;
3814
3815 ql4_printk(KERN_WARNING, ha, "scsi%ld: In %s\n", ha->host_no, __func__);
3816
3817 set_bit(DPC_RESET_ACTIVE, &ha->dpc_flags);
3818
3819 if (test_bit(AF_ONLINE, &ha->flags)) {
3820 clear_bit(AF_ONLINE, &ha->flags);
b3a271a9
MR
3821 clear_bit(AF_LINK_UP, &ha->flags);
3822 iscsi_host_for_each_session(ha->host, qla4xxx_fail_session);
2232be0d 3823 qla4xxx_process_aen(ha, FLUSH_DDB_CHANGED_AENS);
2232be0d
LC
3824 }
3825
3826 fn = PCI_FUNC(ha->pdev->devfn);
3827 while (fn > 0) {
3828 fn--;
3829 ql4_printk(KERN_INFO, ha, "scsi%ld: %s: Finding PCI device at "
3830 "func %x\n", ha->host_no, __func__, fn);
3831 /* Get the pci device given the domain, bus,
3832 * slot/function number */
3833 other_pdev =
3834 pci_get_domain_bus_and_slot(pci_domain_nr(ha->pdev->bus),
3835 ha->pdev->bus->number, PCI_DEVFN(PCI_SLOT(ha->pdev->devfn),
3836 fn));
3837
3838 if (!other_pdev)
3839 continue;
3840
3841 if (atomic_read(&other_pdev->enable_cnt)) {
3842 ql4_printk(KERN_INFO, ha, "scsi%ld: %s: Found PCI "
3843 "func in enabled state%x\n", ha->host_no,
3844 __func__, fn);
3845 pci_dev_put(other_pdev);
3846 break;
3847 }
3848 pci_dev_put(other_pdev);
3849 }
3850
3851 /* The first function on the card, the reset owner will
3852 * start & initialize the firmware. The other functions
3853 * on the card will reset the firmware context
3854 */
3855 if (!fn) {
3856 ql4_printk(KERN_INFO, ha, "scsi%ld: %s: devfn being reset "
3857 "0x%x is the owner\n", ha->host_no, __func__,
3858 ha->pdev->devfn);
3859
3860 qla4_8xxx_idc_lock(ha);
3861 qla4_8xxx_wr_32(ha, QLA82XX_CRB_DEV_STATE,
3862 QLA82XX_DEV_COLD);
3863
3864 qla4_8xxx_wr_32(ha, QLA82XX_CRB_DRV_IDC_VERSION,
3865 QLA82XX_IDC_VERSION);
3866
3867 qla4_8xxx_idc_unlock(ha);
3868 clear_bit(AF_FW_RECOVERY, &ha->flags);
0e7e8501 3869 rval = qla4xxx_initialize_adapter(ha);
2232be0d
LC
3870 qla4_8xxx_idc_lock(ha);
3871
3872 if (rval != QLA_SUCCESS) {
3873 ql4_printk(KERN_INFO, ha, "scsi%ld: %s: HW State: "
3874 "FAILED\n", ha->host_no, __func__);
3875 qla4_8xxx_clear_drv_active(ha);
3876 qla4_8xxx_wr_32(ha, QLA82XX_CRB_DEV_STATE,
3877 QLA82XX_DEV_FAILED);
3878 } else {
3879 ql4_printk(KERN_INFO, ha, "scsi%ld: %s: HW State: "
3880 "READY\n", ha->host_no, __func__);
3881 qla4_8xxx_wr_32(ha, QLA82XX_CRB_DEV_STATE,
3882 QLA82XX_DEV_READY);
3883 /* Clear driver state register */
3884 qla4_8xxx_wr_32(ha, QLA82XX_CRB_DRV_STATE, 0);
3885 qla4_8xxx_set_drv_active(ha);
7b3595df
VC
3886 ret = qla4xxx_request_irqs(ha);
3887 if (ret) {
3888 ql4_printk(KERN_WARNING, ha, "Failed to "
3889 "reserve interrupt %d already in use.\n",
3890 ha->pdev->irq);
3891 rval = QLA_ERROR;
3892 } else {
3893 ha->isp_ops->enable_intrs(ha);
3894 rval = QLA_SUCCESS;
3895 }
2232be0d
LC
3896 }
3897 qla4_8xxx_idc_unlock(ha);
3898 } else {
3899 ql4_printk(KERN_INFO, ha, "scsi%ld: %s: devfn 0x%x is not "
3900 "the reset owner\n", ha->host_no, __func__,
3901 ha->pdev->devfn);
3902 if ((qla4_8xxx_rd_32(ha, QLA82XX_CRB_DEV_STATE) ==
3903 QLA82XX_DEV_READY)) {
3904 clear_bit(AF_FW_RECOVERY, &ha->flags);
0e7e8501 3905 rval = qla4xxx_initialize_adapter(ha);
7b3595df
VC
3906 if (rval == QLA_SUCCESS) {
3907 ret = qla4xxx_request_irqs(ha);
3908 if (ret) {
3909 ql4_printk(KERN_WARNING, ha, "Failed to"
3910 " reserve interrupt %d already in"
3911 " use.\n", ha->pdev->irq);
3912 rval = QLA_ERROR;
3913 } else {
3914 ha->isp_ops->enable_intrs(ha);
3915 rval = QLA_SUCCESS;
3916 }
3917 }
2232be0d
LC
3918 qla4_8xxx_idc_lock(ha);
3919 qla4_8xxx_set_drv_active(ha);
3920 qla4_8xxx_idc_unlock(ha);
3921 }
3922 }
3923 clear_bit(DPC_RESET_ACTIVE, &ha->dpc_flags);
3924 return rval;
3925}
3926
3927static pci_ers_result_t
3928qla4xxx_pci_slot_reset(struct pci_dev *pdev)
3929{
3930 pci_ers_result_t ret = PCI_ERS_RESULT_DISCONNECT;
3931 struct scsi_qla_host *ha = pci_get_drvdata(pdev);
3932 int rc;
3933
3934 ql4_printk(KERN_WARNING, ha, "scsi%ld: %s: slot_reset\n",
3935 ha->host_no, __func__);
3936
3937 if (!is_aer_supported(ha))
3938 return PCI_ERS_RESULT_NONE;
3939
3940 /* Restore the saved state of PCIe device -
3941 * BAR registers, PCI Config space, PCIX, MSI,
3942 * IOV states
3943 */
3944 pci_restore_state(pdev);
3945
3946 /* pci_restore_state() clears the saved_state flag of the device
3947 * save restored state which resets saved_state flag
3948 */
3949 pci_save_state(pdev);
3950
3951 /* Initialize device or resume if in suspended state */
3952 rc = pci_enable_device(pdev);
3953 if (rc) {
25985edc 3954 ql4_printk(KERN_WARNING, ha, "scsi%ld: %s: Can't re-enable "
2232be0d
LC
3955 "device after reset\n", ha->host_no, __func__);
3956 goto exit_slot_reset;
3957 }
3958
7b3595df 3959 ha->isp_ops->disable_intrs(ha);
2232be0d
LC
3960
3961 if (is_qla8022(ha)) {
3962 if (qla4_8xxx_error_recovery(ha) == QLA_SUCCESS) {
3963 ret = PCI_ERS_RESULT_RECOVERED;
3964 goto exit_slot_reset;
3965 } else
3966 goto exit_slot_reset;
3967 }
3968
3969exit_slot_reset:
3970 ql4_printk(KERN_WARNING, ha, "scsi%ld: %s: Return=%x\n"
3971 "device after reset\n", ha->host_no, __func__, ret);
3972 return ret;
3973}
3974
3975static void
3976qla4xxx_pci_resume(struct pci_dev *pdev)
3977{
3978 struct scsi_qla_host *ha = pci_get_drvdata(pdev);
3979 int ret;
3980
3981 ql4_printk(KERN_WARNING, ha, "scsi%ld: %s: pci_resume\n",
3982 ha->host_no, __func__);
3983
3984 ret = qla4xxx_wait_for_hba_online(ha);
3985 if (ret != QLA_SUCCESS) {
3986 ql4_printk(KERN_ERR, ha, "scsi%ld: %s: the device failed to "
3987 "resume I/O from slot/link_reset\n", ha->host_no,
3988 __func__);
3989 }
3990
3991 pci_cleanup_aer_uncorrect_error_status(pdev);
3992 clear_bit(AF_EEH_BUSY, &ha->flags);
3993}
3994
3995static struct pci_error_handlers qla4xxx_err_handler = {
3996 .error_detected = qla4xxx_pci_error_detected,
3997 .mmio_enabled = qla4xxx_pci_mmio_enabled,
3998 .slot_reset = qla4xxx_pci_slot_reset,
3999 .resume = qla4xxx_pci_resume,
4000};
4001
afaf5a2d
DS
4002static struct pci_device_id qla4xxx_pci_tbl[] = {
4003 {
4004 .vendor = PCI_VENDOR_ID_QLOGIC,
4005 .device = PCI_DEVICE_ID_QLOGIC_ISP4010,
4006 .subvendor = PCI_ANY_ID,
4007 .subdevice = PCI_ANY_ID,
4008 },
4009 {
4010 .vendor = PCI_VENDOR_ID_QLOGIC,
4011 .device = PCI_DEVICE_ID_QLOGIC_ISP4022,
4012 .subvendor = PCI_ANY_ID,
4013 .subdevice = PCI_ANY_ID,
4014 },
d915058f
DS
4015 {
4016 .vendor = PCI_VENDOR_ID_QLOGIC,
4017 .device = PCI_DEVICE_ID_QLOGIC_ISP4032,
4018 .subvendor = PCI_ANY_ID,
4019 .subdevice = PCI_ANY_ID,
4020 },
f4f5df23
VC
4021 {
4022 .vendor = PCI_VENDOR_ID_QLOGIC,
4023 .device = PCI_DEVICE_ID_QLOGIC_ISP8022,
4024 .subvendor = PCI_ANY_ID,
4025 .subdevice = PCI_ANY_ID,
4026 },
afaf5a2d
DS
4027 {0, 0},
4028};
4029MODULE_DEVICE_TABLE(pci, qla4xxx_pci_tbl);
4030
47975477 4031static struct pci_driver qla4xxx_pci_driver = {
afaf5a2d
DS
4032 .name = DRIVER_NAME,
4033 .id_table = qla4xxx_pci_tbl,
4034 .probe = qla4xxx_probe_adapter,
4035 .remove = qla4xxx_remove_adapter,
2232be0d 4036 .err_handler = &qla4xxx_err_handler,
afaf5a2d
DS
4037};
4038
4039static int __init qla4xxx_module_init(void)
4040{
4041 int ret;
4042
4043 /* Allocate cache for SRBs. */
4044 srb_cachep = kmem_cache_create("qla4xxx_srbs", sizeof(struct srb), 0,
20c2df83 4045 SLAB_HWCACHE_ALIGN, NULL);
afaf5a2d
DS
4046 if (srb_cachep == NULL) {
4047 printk(KERN_ERR
4048 "%s: Unable to allocate SRB cache..."
4049 "Failing load!\n", DRIVER_NAME);
4050 ret = -ENOMEM;
4051 goto no_srp_cache;
4052 }
4053
4054 /* Derive version string. */
4055 strcpy(qla4xxx_version_str, QLA4XXX_DRIVER_VERSION);
11010fec 4056 if (ql4xextended_error_logging)
afaf5a2d
DS
4057 strcat(qla4xxx_version_str, "-debug");
4058
4059 qla4xxx_scsi_transport =
4060 iscsi_register_transport(&qla4xxx_iscsi_transport);
4061 if (!qla4xxx_scsi_transport){
4062 ret = -ENODEV;
4063 goto release_srb_cache;
4064 }
4065
afaf5a2d
DS
4066 ret = pci_register_driver(&qla4xxx_pci_driver);
4067 if (ret)
4068 goto unregister_transport;
4069
4070 printk(KERN_INFO "QLogic iSCSI HBA Driver\n");
4071 return 0;
5ae16db3 4072
afaf5a2d
DS
4073unregister_transport:
4074 iscsi_unregister_transport(&qla4xxx_iscsi_transport);
4075release_srb_cache:
4076 kmem_cache_destroy(srb_cachep);
4077no_srp_cache:
4078 return ret;
4079}
4080
4081static void __exit qla4xxx_module_exit(void)
4082{
4083 pci_unregister_driver(&qla4xxx_pci_driver);
4084 iscsi_unregister_transport(&qla4xxx_iscsi_transport);
4085 kmem_cache_destroy(srb_cachep);
4086}
4087
4088module_init(qla4xxx_module_init);
4089module_exit(qla4xxx_module_exit);
4090
4091MODULE_AUTHOR("QLogic Corporation");
4092MODULE_DESCRIPTION("QLogic iSCSI HBA Driver");
4093MODULE_LICENSE("GPL");
4094MODULE_VERSION(QLA4XXX_DRIVER_VERSION);