]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - drivers/scsi/qla4xxx/ql4_os.c
[SCSI] qla4xxx: Fix exporting boot targets to sysfs
[mirror_ubuntu-bionic-kernel.git] / drivers / scsi / qla4xxx / ql4_os.c
CommitLineData
afaf5a2d
DS
1/*
2 * QLogic iSCSI HBA Driver
7d01d069 3 * Copyright (c) 2003-2010 QLogic Corporation
afaf5a2d
DS
4 *
5 * See LICENSE.qla4xxx for copyright and licensing details.
6 */
7#include <linux/moduleparam.h>
5a0e3ad6 8#include <linux/slab.h>
2a991c21
MR
9#include <linux/blkdev.h>
10#include <linux/iscsi_boot_sysfs.h>
afaf5a2d
DS
11
12#include <scsi/scsi_tcq.h>
13#include <scsi/scsicam.h>
14
15#include "ql4_def.h"
bee4fe8e
DS
16#include "ql4_version.h"
17#include "ql4_glbl.h"
18#include "ql4_dbg.h"
19#include "ql4_inline.h"
afaf5a2d
DS
20
21/*
22 * Driver version
23 */
47975477 24static char qla4xxx_version_str[40];
afaf5a2d
DS
25
26/*
27 * SRB allocation cache
28 */
e18b890b 29static struct kmem_cache *srb_cachep;
afaf5a2d
DS
30
31/*
32 * Module parameter information and variables
33 */
afaf5a2d 34int ql4xdontresethba = 0;
f4f5df23 35module_param(ql4xdontresethba, int, S_IRUGO | S_IWUSR);
afaf5a2d 36MODULE_PARM_DESC(ql4xdontresethba,
f4f5df23
VC
37 "Don't reset the HBA for driver recovery \n"
38 " 0 - It will reset HBA (Default)\n"
39 " 1 - It will NOT reset HBA");
afaf5a2d 40
11010fec 41int ql4xextended_error_logging = 0; /* 0 = off, 1 = log errors */
f4f5df23 42module_param(ql4xextended_error_logging, int, S_IRUGO | S_IWUSR);
11010fec 43MODULE_PARM_DESC(ql4xextended_error_logging,
afaf5a2d
DS
44 "Option to enable extended error logging, "
45 "Default is 0 - no logging, 1 - debug logging");
46
f4f5df23
VC
47int ql4xenablemsix = 1;
48module_param(ql4xenablemsix, int, S_IRUGO|S_IWUSR);
49MODULE_PARM_DESC(ql4xenablemsix,
50 "Set to enable MSI or MSI-X interrupt mechanism.\n"
51 " 0 = enable INTx interrupt mechanism.\n"
52 " 1 = enable MSI-X interrupt mechanism (Default).\n"
53 " 2 = enable MSI interrupt mechanism.");
477ffb9d 54
d510d965 55#define QL4_DEF_QDEPTH 32
8bb4033d
VC
56static int ql4xmaxqdepth = QL4_DEF_QDEPTH;
57module_param(ql4xmaxqdepth, int, S_IRUGO | S_IWUSR);
58MODULE_PARM_DESC(ql4xmaxqdepth,
59 "Maximum queue depth to report for target devices.\n"
60 " Default: 32.");
d510d965 61
3038727c
VC
62static int ql4xsess_recovery_tmo = QL4_SESS_RECOVERY_TMO;
63module_param(ql4xsess_recovery_tmo, int, S_IRUGO);
64MODULE_PARM_DESC(ql4xsess_recovery_tmo,
65 "Target Session Recovery Timeout.\n"
66 " Default: 30 sec.");
67
b3a271a9 68static int qla4xxx_wait_for_hba_online(struct scsi_qla_host *ha);
afaf5a2d
DS
69/*
70 * SCSI host template entry points
71 */
47975477 72static void qla4xxx_config_dma_addressing(struct scsi_qla_host *ha);
afaf5a2d
DS
73
74/*
75 * iSCSI template entry points
76 */
afaf5a2d
DS
77static int qla4xxx_conn_get_param(struct iscsi_cls_conn *conn,
78 enum iscsi_param param, char *buf);
aa1e93a2
MC
79static int qla4xxx_host_get_param(struct Scsi_Host *shost,
80 enum iscsi_host_param param, char *buf);
d00efe3f
MC
81static int qla4xxx_iface_set_param(struct Scsi_Host *shost, char *data,
82 int count);
ed1086e0
VC
83static int qla4xxx_get_iface_param(struct iscsi_iface *iface,
84 enum iscsi_param_type param_type,
85 int param, char *buf);
5c656af7 86static enum blk_eh_timer_return qla4xxx_eh_cmd_timed_out(struct scsi_cmnd *sc);
b3a271a9
MR
87static struct iscsi_endpoint *qla4xxx_ep_connect(struct Scsi_Host *shost,
88 struct sockaddr *dst_addr,
89 int non_blocking);
90static int qla4xxx_ep_poll(struct iscsi_endpoint *ep, int timeout_ms);
91static void qla4xxx_ep_disconnect(struct iscsi_endpoint *ep);
92static int qla4xxx_get_ep_param(struct iscsi_endpoint *ep,
93 enum iscsi_param param, char *buf);
94static int qla4xxx_conn_start(struct iscsi_cls_conn *conn);
95static struct iscsi_cls_conn *
96qla4xxx_conn_create(struct iscsi_cls_session *cls_sess, uint32_t conn_idx);
97static int qla4xxx_conn_bind(struct iscsi_cls_session *cls_session,
98 struct iscsi_cls_conn *cls_conn,
99 uint64_t transport_fd, int is_leading);
100static void qla4xxx_conn_destroy(struct iscsi_cls_conn *conn);
101static struct iscsi_cls_session *
102qla4xxx_session_create(struct iscsi_endpoint *ep, uint16_t cmds_max,
103 uint16_t qdepth, uint32_t initial_cmdsn);
104static void qla4xxx_session_destroy(struct iscsi_cls_session *sess);
105static void qla4xxx_task_work(struct work_struct *wdata);
106static int qla4xxx_alloc_pdu(struct iscsi_task *, uint8_t);
107static int qla4xxx_task_xmit(struct iscsi_task *);
108static void qla4xxx_task_cleanup(struct iscsi_task *);
109static void qla4xxx_fail_session(struct iscsi_cls_session *cls_session);
110static void qla4xxx_conn_get_stats(struct iscsi_cls_conn *cls_conn,
111 struct iscsi_stats *stats);
afaf5a2d
DS
112/*
113 * SCSI host template entry points
114 */
f281233d 115static int qla4xxx_queuecommand(struct Scsi_Host *h, struct scsi_cmnd *cmd);
09a0f719 116static int qla4xxx_eh_abort(struct scsi_cmnd *cmd);
afaf5a2d 117static int qla4xxx_eh_device_reset(struct scsi_cmnd *cmd);
ce545039 118static int qla4xxx_eh_target_reset(struct scsi_cmnd *cmd);
afaf5a2d
DS
119static int qla4xxx_eh_host_reset(struct scsi_cmnd *cmd);
120static int qla4xxx_slave_alloc(struct scsi_device *device);
121static int qla4xxx_slave_configure(struct scsi_device *device);
122static void qla4xxx_slave_destroy(struct scsi_device *sdev);
3128c6c7 123static mode_t ql4_attr_is_visible(int param_type, int param);
95d31262 124static int qla4xxx_host_reset(struct Scsi_Host *shost, int reset_type);
afaf5a2d 125
f4f5df23
VC
126static struct qla4_8xxx_legacy_intr_set legacy_intr[] =
127 QLA82XX_LEGACY_INTR_CONFIG;
128
afaf5a2d
DS
129static struct scsi_host_template qla4xxx_driver_template = {
130 .module = THIS_MODULE,
131 .name = DRIVER_NAME,
132 .proc_name = DRIVER_NAME,
133 .queuecommand = qla4xxx_queuecommand,
134
09a0f719 135 .eh_abort_handler = qla4xxx_eh_abort,
afaf5a2d 136 .eh_device_reset_handler = qla4xxx_eh_device_reset,
ce545039 137 .eh_target_reset_handler = qla4xxx_eh_target_reset,
afaf5a2d 138 .eh_host_reset_handler = qla4xxx_eh_host_reset,
5c656af7 139 .eh_timed_out = qla4xxx_eh_cmd_timed_out,
afaf5a2d
DS
140
141 .slave_configure = qla4xxx_slave_configure,
142 .slave_alloc = qla4xxx_slave_alloc,
143 .slave_destroy = qla4xxx_slave_destroy,
144
145 .this_id = -1,
146 .cmd_per_lun = 3,
147 .use_clustering = ENABLE_CLUSTERING,
148 .sg_tablesize = SG_ALL,
149
150 .max_sectors = 0xFFFF,
7ad633c0 151 .shost_attrs = qla4xxx_host_attrs,
95d31262 152 .host_reset = qla4xxx_host_reset,
a355943c 153 .vendor_id = SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_QLOGIC,
afaf5a2d
DS
154};
155
156static struct iscsi_transport qla4xxx_iscsi_transport = {
157 .owner = THIS_MODULE,
158 .name = DRIVER_NAME,
b3a271a9
MR
159 .caps = CAP_TEXT_NEGO |
160 CAP_DATA_PATH_OFFLOAD | CAP_HDRDGST |
161 CAP_DATADGST | CAP_LOGIN_OFFLOAD |
162 CAP_MULTI_R2T,
3128c6c7 163 .attr_is_visible = ql4_attr_is_visible,
b3a271a9
MR
164 .create_session = qla4xxx_session_create,
165 .destroy_session = qla4xxx_session_destroy,
166 .start_conn = qla4xxx_conn_start,
167 .create_conn = qla4xxx_conn_create,
168 .bind_conn = qla4xxx_conn_bind,
169 .stop_conn = iscsi_conn_stop,
170 .destroy_conn = qla4xxx_conn_destroy,
171 .set_param = iscsi_set_param,
afaf5a2d 172 .get_conn_param = qla4xxx_conn_get_param,
b3a271a9
MR
173 .get_session_param = iscsi_session_get_param,
174 .get_ep_param = qla4xxx_get_ep_param,
175 .ep_connect = qla4xxx_ep_connect,
176 .ep_poll = qla4xxx_ep_poll,
177 .ep_disconnect = qla4xxx_ep_disconnect,
178 .get_stats = qla4xxx_conn_get_stats,
179 .send_pdu = iscsi_conn_send_pdu,
180 .xmit_task = qla4xxx_task_xmit,
181 .cleanup_task = qla4xxx_task_cleanup,
182 .alloc_pdu = qla4xxx_alloc_pdu,
183
aa1e93a2 184 .get_host_param = qla4xxx_host_get_param,
d00efe3f 185 .set_iface_param = qla4xxx_iface_set_param,
ed1086e0 186 .get_iface_param = qla4xxx_get_iface_param,
a355943c 187 .bsg_request = qla4xxx_bsg_request,
afaf5a2d
DS
188};
189
190static struct scsi_transport_template *qla4xxx_scsi_transport;
191
3128c6c7
MC
192static mode_t ql4_attr_is_visible(int param_type, int param)
193{
194 switch (param_type) {
f27fb2ef
MC
195 case ISCSI_HOST_PARAM:
196 switch (param) {
197 case ISCSI_HOST_PARAM_HWADDRESS:
198 case ISCSI_HOST_PARAM_IPADDRESS:
199 case ISCSI_HOST_PARAM_INITIATOR_NAME:
200 return S_IRUGO;
201 default:
202 return 0;
203 }
3128c6c7
MC
204 case ISCSI_PARAM:
205 switch (param) {
206 case ISCSI_PARAM_CONN_ADDRESS:
207 case ISCSI_PARAM_CONN_PORT:
1d063c17
MC
208 case ISCSI_PARAM_TARGET_NAME:
209 case ISCSI_PARAM_TPGT:
210 case ISCSI_PARAM_TARGET_ALIAS:
b3a271a9
MR
211 case ISCSI_PARAM_MAX_BURST:
212 case ISCSI_PARAM_MAX_R2T:
213 case ISCSI_PARAM_FIRST_BURST:
214 case ISCSI_PARAM_MAX_RECV_DLENGTH:
215 case ISCSI_PARAM_MAX_XMIT_DLENGTH:
de37920b 216 case ISCSI_PARAM_IFACE_NAME:
3128c6c7
MC
217 return S_IRUGO;
218 default:
219 return 0;
220 }
b78dbba0
MC
221 case ISCSI_NET_PARAM:
222 switch (param) {
223 case ISCSI_NET_PARAM_IPV4_ADDR:
224 case ISCSI_NET_PARAM_IPV4_SUBNET:
225 case ISCSI_NET_PARAM_IPV4_GW:
226 case ISCSI_NET_PARAM_IPV4_BOOTPROTO:
227 case ISCSI_NET_PARAM_IFACE_ENABLE:
228 case ISCSI_NET_PARAM_IPV6_LINKLOCAL:
229 case ISCSI_NET_PARAM_IPV6_ADDR:
230 case ISCSI_NET_PARAM_IPV6_ROUTER:
231 case ISCSI_NET_PARAM_IPV6_ADDR_AUTOCFG:
232 case ISCSI_NET_PARAM_IPV6_LINKLOCAL_AUTOCFG:
6ac73e8c
VC
233 case ISCSI_NET_PARAM_VLAN_ID:
234 case ISCSI_NET_PARAM_VLAN_PRIORITY:
235 case ISCSI_NET_PARAM_VLAN_ENABLED:
943c157b 236 case ISCSI_NET_PARAM_MTU:
2ada7fc5 237 case ISCSI_NET_PARAM_PORT:
b78dbba0
MC
238 return S_IRUGO;
239 default:
240 return 0;
241 }
3128c6c7
MC
242 }
243
244 return 0;
245}
246
ed1086e0
VC
247static int qla4xxx_get_iface_param(struct iscsi_iface *iface,
248 enum iscsi_param_type param_type,
249 int param, char *buf)
250{
251 struct Scsi_Host *shost = iscsi_iface_to_shost(iface);
252 struct scsi_qla_host *ha = to_qla_host(shost);
253 int len = -ENOSYS;
254
255 if (param_type != ISCSI_NET_PARAM)
256 return -ENOSYS;
257
258 switch (param) {
259 case ISCSI_NET_PARAM_IPV4_ADDR:
260 len = sprintf(buf, "%pI4\n", &ha->ip_config.ip_address);
261 break;
262 case ISCSI_NET_PARAM_IPV4_SUBNET:
263 len = sprintf(buf, "%pI4\n", &ha->ip_config.subnet_mask);
264 break;
265 case ISCSI_NET_PARAM_IPV4_GW:
266 len = sprintf(buf, "%pI4\n", &ha->ip_config.gateway);
267 break;
268 case ISCSI_NET_PARAM_IFACE_ENABLE:
269 if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4)
270 len = sprintf(buf, "%s\n",
271 (ha->ip_config.ipv4_options &
272 IPOPT_IPV4_PROTOCOL_ENABLE) ?
273 "enabled" : "disabled");
274 else if (iface->iface_type == ISCSI_IFACE_TYPE_IPV6)
275 len = sprintf(buf, "%s\n",
276 (ha->ip_config.ipv6_options &
277 IPV6_OPT_IPV6_PROTOCOL_ENABLE) ?
278 "enabled" : "disabled");
279 break;
280 case ISCSI_NET_PARAM_IPV4_BOOTPROTO:
281 len = sprintf(buf, "%s\n",
282 (ha->ip_config.tcp_options & TCPOPT_DHCP_ENABLE) ?
283 "dhcp" : "static");
284 break;
285 case ISCSI_NET_PARAM_IPV6_ADDR:
286 if (iface->iface_num == 0)
287 len = sprintf(buf, "%pI6\n", &ha->ip_config.ipv6_addr0);
288 if (iface->iface_num == 1)
289 len = sprintf(buf, "%pI6\n", &ha->ip_config.ipv6_addr1);
290 break;
291 case ISCSI_NET_PARAM_IPV6_LINKLOCAL:
292 len = sprintf(buf, "%pI6\n",
293 &ha->ip_config.ipv6_link_local_addr);
294 break;
295 case ISCSI_NET_PARAM_IPV6_ROUTER:
296 len = sprintf(buf, "%pI6\n",
297 &ha->ip_config.ipv6_default_router_addr);
298 break;
299 case ISCSI_NET_PARAM_IPV6_ADDR_AUTOCFG:
300 len = sprintf(buf, "%s\n",
301 (ha->ip_config.ipv6_addl_options &
302 IPV6_ADDOPT_NEIGHBOR_DISCOVERY_ADDR_ENABLE) ?
303 "nd" : "static");
304 break;
305 case ISCSI_NET_PARAM_IPV6_LINKLOCAL_AUTOCFG:
306 len = sprintf(buf, "%s\n",
307 (ha->ip_config.ipv6_addl_options &
308 IPV6_ADDOPT_AUTOCONFIG_LINK_LOCAL_ADDR) ?
309 "auto" : "static");
310 break;
6ac73e8c
VC
311 case ISCSI_NET_PARAM_VLAN_ID:
312 if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4)
313 len = sprintf(buf, "%d\n",
314 (ha->ip_config.ipv4_vlan_tag &
315 ISCSI_MAX_VLAN_ID));
316 else if (iface->iface_type == ISCSI_IFACE_TYPE_IPV6)
317 len = sprintf(buf, "%d\n",
318 (ha->ip_config.ipv6_vlan_tag &
319 ISCSI_MAX_VLAN_ID));
320 break;
321 case ISCSI_NET_PARAM_VLAN_PRIORITY:
322 if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4)
323 len = sprintf(buf, "%d\n",
324 ((ha->ip_config.ipv4_vlan_tag >> 13) &
325 ISCSI_MAX_VLAN_PRIORITY));
326 else if (iface->iface_type == ISCSI_IFACE_TYPE_IPV6)
327 len = sprintf(buf, "%d\n",
328 ((ha->ip_config.ipv6_vlan_tag >> 13) &
329 ISCSI_MAX_VLAN_PRIORITY));
330 break;
331 case ISCSI_NET_PARAM_VLAN_ENABLED:
332 if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4)
333 len = sprintf(buf, "%s\n",
334 (ha->ip_config.ipv4_options &
335 IPOPT_VLAN_TAGGING_ENABLE) ?
336 "enabled" : "disabled");
337 else if (iface->iface_type == ISCSI_IFACE_TYPE_IPV6)
338 len = sprintf(buf, "%s\n",
339 (ha->ip_config.ipv6_options &
340 IPV6_OPT_VLAN_TAGGING_ENABLE) ?
341 "enabled" : "disabled");
342 break;
943c157b
VC
343 case ISCSI_NET_PARAM_MTU:
344 len = sprintf(buf, "%d\n", ha->ip_config.eth_mtu_size);
345 break;
2ada7fc5
VC
346 case ISCSI_NET_PARAM_PORT:
347 if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4)
348 len = sprintf(buf, "%d\n", ha->ip_config.ipv4_port);
349 else if (iface->iface_type == ISCSI_IFACE_TYPE_IPV6)
350 len = sprintf(buf, "%d\n", ha->ip_config.ipv6_port);
351 break;
ed1086e0
VC
352 default:
353 len = -ENOSYS;
354 }
355
356 return len;
357}
358
b3a271a9
MR
359static struct iscsi_endpoint *
360qla4xxx_ep_connect(struct Scsi_Host *shost, struct sockaddr *dst_addr,
361 int non_blocking)
5c656af7 362{
b3a271a9
MR
363 int ret;
364 struct iscsi_endpoint *ep;
365 struct qla_endpoint *qla_ep;
366 struct scsi_qla_host *ha;
367 struct sockaddr_in *addr;
368 struct sockaddr_in6 *addr6;
5c656af7 369
b3a271a9
MR
370 DEBUG2(printk(KERN_INFO "Func: %s\n", __func__));
371 if (!shost) {
372 ret = -ENXIO;
373 printk(KERN_ERR "%s: shost is NULL\n",
374 __func__);
375 return ERR_PTR(ret);
376 }
5c656af7 377
b3a271a9
MR
378 ha = iscsi_host_priv(shost);
379
380 ep = iscsi_create_endpoint(sizeof(struct qla_endpoint));
381 if (!ep) {
382 ret = -ENOMEM;
383 return ERR_PTR(ret);
384 }
385
386 qla_ep = ep->dd_data;
387 memset(qla_ep, 0, sizeof(struct qla_endpoint));
388 if (dst_addr->sa_family == AF_INET) {
389 memcpy(&qla_ep->dst_addr, dst_addr, sizeof(struct sockaddr_in));
390 addr = (struct sockaddr_in *)&qla_ep->dst_addr;
391 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: %pI4\n", __func__,
392 (char *)&addr->sin_addr));
393 } else if (dst_addr->sa_family == AF_INET6) {
394 memcpy(&qla_ep->dst_addr, dst_addr,
395 sizeof(struct sockaddr_in6));
396 addr6 = (struct sockaddr_in6 *)&qla_ep->dst_addr;
397 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: %pI6\n", __func__,
398 (char *)&addr6->sin6_addr));
399 }
400
401 qla_ep->host = shost;
402
403 return ep;
5c656af7
MC
404}
405
b3a271a9 406static int qla4xxx_ep_poll(struct iscsi_endpoint *ep, int timeout_ms)
afaf5a2d 407{
b3a271a9
MR
408 struct qla_endpoint *qla_ep;
409 struct scsi_qla_host *ha;
410 int ret = 0;
afaf5a2d 411
b3a271a9
MR
412 DEBUG2(printk(KERN_INFO "Func: %s\n", __func__));
413 qla_ep = ep->dd_data;
414 ha = to_qla_host(qla_ep->host);
415
416 if (adapter_up(ha))
417 ret = 1;
418
419 return ret;
420}
421
422static void qla4xxx_ep_disconnect(struct iscsi_endpoint *ep)
423{
424 DEBUG2(printk(KERN_INFO "Func: %s\n", __func__));
425 iscsi_destroy_endpoint(ep);
426}
427
428static int qla4xxx_get_ep_param(struct iscsi_endpoint *ep,
429 enum iscsi_param param,
430 char *buf)
431{
432 struct qla_endpoint *qla_ep = ep->dd_data;
433 struct sockaddr *dst_addr;
434
435 DEBUG2(printk(KERN_INFO "Func: %s\n", __func__));
436
437 switch (param) {
438 case ISCSI_PARAM_CONN_PORT:
439 case ISCSI_PARAM_CONN_ADDRESS:
440 if (!qla_ep)
441 return -ENOTCONN;
442
443 dst_addr = (struct sockaddr *)&qla_ep->dst_addr;
444 if (!dst_addr)
445 return -ENOTCONN;
446
447 return iscsi_conn_get_addr_param((struct sockaddr_storage *)
448 &qla_ep->dst_addr, param, buf);
449 default:
450 return -ENOSYS;
451 }
452}
453
454static void qla4xxx_conn_get_stats(struct iscsi_cls_conn *cls_conn,
455 struct iscsi_stats *stats)
456{
457 struct iscsi_session *sess;
458 struct iscsi_cls_session *cls_sess;
459 struct ddb_entry *ddb_entry;
460 struct scsi_qla_host *ha;
461 struct ql_iscsi_stats *ql_iscsi_stats;
462 int stats_size;
463 int ret;
464 dma_addr_t iscsi_stats_dma;
465
466 DEBUG2(printk(KERN_INFO "Func: %s\n", __func__));
568d303b 467
b3a271a9
MR
468 cls_sess = iscsi_conn_to_session(cls_conn);
469 sess = cls_sess->dd_data;
470 ddb_entry = sess->dd_data;
471 ha = ddb_entry->ha;
472
473 stats_size = PAGE_ALIGN(sizeof(struct ql_iscsi_stats));
474 /* Allocate memory */
475 ql_iscsi_stats = dma_alloc_coherent(&ha->pdev->dev, stats_size,
476 &iscsi_stats_dma, GFP_KERNEL);
477 if (!ql_iscsi_stats) {
478 ql4_printk(KERN_ERR, ha,
479 "Unable to allocate memory for iscsi stats\n");
480 goto exit_get_stats;
568d303b 481 }
b3a271a9
MR
482
483 ret = qla4xxx_get_mgmt_data(ha, ddb_entry->fw_ddb_index, stats_size,
484 iscsi_stats_dma);
485 if (ret != QLA_SUCCESS) {
486 ql4_printk(KERN_ERR, ha,
487 "Unable to retreive iscsi stats\n");
488 goto free_stats;
489 }
490
491 /* octets */
492 stats->txdata_octets = le64_to_cpu(ql_iscsi_stats->tx_data_octets);
493 stats->rxdata_octets = le64_to_cpu(ql_iscsi_stats->rx_data_octets);
494 /* xmit pdus */
495 stats->noptx_pdus = le32_to_cpu(ql_iscsi_stats->tx_nopout_pdus);
496 stats->scsicmd_pdus = le32_to_cpu(ql_iscsi_stats->tx_scsi_cmd_pdus);
497 stats->tmfcmd_pdus = le32_to_cpu(ql_iscsi_stats->tx_tmf_cmd_pdus);
498 stats->login_pdus = le32_to_cpu(ql_iscsi_stats->tx_login_cmd_pdus);
499 stats->text_pdus = le32_to_cpu(ql_iscsi_stats->tx_text_cmd_pdus);
500 stats->dataout_pdus = le32_to_cpu(ql_iscsi_stats->tx_scsi_write_pdus);
501 stats->logout_pdus = le32_to_cpu(ql_iscsi_stats->tx_logout_cmd_pdus);
502 stats->snack_pdus = le32_to_cpu(ql_iscsi_stats->tx_snack_req_pdus);
503 /* recv pdus */
504 stats->noprx_pdus = le32_to_cpu(ql_iscsi_stats->rx_nopin_pdus);
505 stats->scsirsp_pdus = le32_to_cpu(ql_iscsi_stats->rx_scsi_resp_pdus);
506 stats->tmfrsp_pdus = le32_to_cpu(ql_iscsi_stats->rx_tmf_resp_pdus);
507 stats->textrsp_pdus = le32_to_cpu(ql_iscsi_stats->rx_text_resp_pdus);
508 stats->datain_pdus = le32_to_cpu(ql_iscsi_stats->rx_scsi_read_pdus);
509 stats->logoutrsp_pdus =
510 le32_to_cpu(ql_iscsi_stats->rx_logout_resp_pdus);
511 stats->r2t_pdus = le32_to_cpu(ql_iscsi_stats->rx_r2t_pdus);
512 stats->async_pdus = le32_to_cpu(ql_iscsi_stats->rx_async_pdus);
513 stats->rjt_pdus = le32_to_cpu(ql_iscsi_stats->rx_reject_pdus);
514
515free_stats:
516 dma_free_coherent(&ha->pdev->dev, stats_size, ql_iscsi_stats,
517 iscsi_stats_dma);
518exit_get_stats:
519 return;
520}
521
522static enum blk_eh_timer_return qla4xxx_eh_cmd_timed_out(struct scsi_cmnd *sc)
523{
524 struct iscsi_cls_session *session;
525 struct iscsi_session *sess;
526 unsigned long flags;
527 enum blk_eh_timer_return ret = BLK_EH_NOT_HANDLED;
528
529 session = starget_to_session(scsi_target(sc->device));
530 sess = session->dd_data;
531
532 spin_lock_irqsave(&session->lock, flags);
533 if (session->state == ISCSI_SESSION_FAILED)
534 ret = BLK_EH_RESET_TIMER;
535 spin_unlock_irqrestore(&session->lock, flags);
536
537 return ret;
afaf5a2d
DS
538}
539
aa1e93a2
MC
540static int qla4xxx_host_get_param(struct Scsi_Host *shost,
541 enum iscsi_host_param param, char *buf)
542{
543 struct scsi_qla_host *ha = to_qla_host(shost);
544 int len;
545
546 switch (param) {
547 case ISCSI_HOST_PARAM_HWADDRESS:
7ffc49a6 548 len = sysfs_format_mac(buf, ha->my_mac, MAC_ADDR_LEN);
8ad5781a 549 break;
22236961 550 case ISCSI_HOST_PARAM_IPADDRESS:
2bab08fc 551 len = sprintf(buf, "%pI4\n", &ha->ip_config.ip_address);
22236961 552 break;
8ad5781a 553 case ISCSI_HOST_PARAM_INITIATOR_NAME:
22236961 554 len = sprintf(buf, "%s\n", ha->name_string);
aa1e93a2
MC
555 break;
556 default:
557 return -ENOSYS;
558 }
559
560 return len;
561}
562
ed1086e0
VC
563static void qla4xxx_create_ipv4_iface(struct scsi_qla_host *ha)
564{
565 if (ha->iface_ipv4)
566 return;
567
568 /* IPv4 */
569 ha->iface_ipv4 = iscsi_create_iface(ha->host,
570 &qla4xxx_iscsi_transport,
571 ISCSI_IFACE_TYPE_IPV4, 0, 0);
572 if (!ha->iface_ipv4)
573 ql4_printk(KERN_ERR, ha, "Could not create IPv4 iSCSI "
574 "iface0.\n");
575}
576
577static void qla4xxx_create_ipv6_iface(struct scsi_qla_host *ha)
578{
579 if (!ha->iface_ipv6_0)
580 /* IPv6 iface-0 */
581 ha->iface_ipv6_0 = iscsi_create_iface(ha->host,
582 &qla4xxx_iscsi_transport,
583 ISCSI_IFACE_TYPE_IPV6, 0,
584 0);
585 if (!ha->iface_ipv6_0)
586 ql4_printk(KERN_ERR, ha, "Could not create IPv6 iSCSI "
587 "iface0.\n");
588
589 if (!ha->iface_ipv6_1)
590 /* IPv6 iface-1 */
591 ha->iface_ipv6_1 = iscsi_create_iface(ha->host,
592 &qla4xxx_iscsi_transport,
593 ISCSI_IFACE_TYPE_IPV6, 1,
594 0);
595 if (!ha->iface_ipv6_1)
596 ql4_printk(KERN_ERR, ha, "Could not create IPv6 iSCSI "
597 "iface1.\n");
598}
599
600static void qla4xxx_create_ifaces(struct scsi_qla_host *ha)
601{
602 if (ha->ip_config.ipv4_options & IPOPT_IPV4_PROTOCOL_ENABLE)
603 qla4xxx_create_ipv4_iface(ha);
604
605 if (ha->ip_config.ipv6_options & IPV6_OPT_IPV6_PROTOCOL_ENABLE)
606 qla4xxx_create_ipv6_iface(ha);
607}
608
609static void qla4xxx_destroy_ipv4_iface(struct scsi_qla_host *ha)
610{
611 if (ha->iface_ipv4) {
612 iscsi_destroy_iface(ha->iface_ipv4);
613 ha->iface_ipv4 = NULL;
614 }
615}
616
617static void qla4xxx_destroy_ipv6_iface(struct scsi_qla_host *ha)
618{
619 if (ha->iface_ipv6_0) {
620 iscsi_destroy_iface(ha->iface_ipv6_0);
621 ha->iface_ipv6_0 = NULL;
622 }
623 if (ha->iface_ipv6_1) {
624 iscsi_destroy_iface(ha->iface_ipv6_1);
625 ha->iface_ipv6_1 = NULL;
626 }
627}
628
629static void qla4xxx_destroy_ifaces(struct scsi_qla_host *ha)
630{
631 qla4xxx_destroy_ipv4_iface(ha);
632 qla4xxx_destroy_ipv6_iface(ha);
633}
634
d00efe3f
MC
635static void qla4xxx_set_ipv6(struct scsi_qla_host *ha,
636 struct iscsi_iface_param_info *iface_param,
637 struct addr_ctrl_blk *init_fw_cb)
638{
639 /*
640 * iface_num 0 is valid for IPv6 Addr, linklocal, router, autocfg.
641 * iface_num 1 is valid only for IPv6 Addr.
642 */
643 switch (iface_param->param) {
644 case ISCSI_NET_PARAM_IPV6_ADDR:
645 if (iface_param->iface_num & 0x1)
646 /* IPv6 Addr 1 */
647 memcpy(init_fw_cb->ipv6_addr1, iface_param->value,
648 sizeof(init_fw_cb->ipv6_addr1));
649 else
650 /* IPv6 Addr 0 */
651 memcpy(init_fw_cb->ipv6_addr0, iface_param->value,
652 sizeof(init_fw_cb->ipv6_addr0));
653 break;
654 case ISCSI_NET_PARAM_IPV6_LINKLOCAL:
655 if (iface_param->iface_num & 0x1)
656 break;
657 memcpy(init_fw_cb->ipv6_if_id, &iface_param->value[8],
658 sizeof(init_fw_cb->ipv6_if_id));
659 break;
660 case ISCSI_NET_PARAM_IPV6_ROUTER:
661 if (iface_param->iface_num & 0x1)
662 break;
663 memcpy(init_fw_cb->ipv6_dflt_rtr_addr, iface_param->value,
664 sizeof(init_fw_cb->ipv6_dflt_rtr_addr));
665 break;
666 case ISCSI_NET_PARAM_IPV6_ADDR_AUTOCFG:
667 /* Autocfg applies to even interface */
668 if (iface_param->iface_num & 0x1)
669 break;
670
671 if (iface_param->value[0] == ISCSI_IPV6_AUTOCFG_DISABLE)
672 init_fw_cb->ipv6_addtl_opts &=
673 cpu_to_le16(
674 ~IPV6_ADDOPT_NEIGHBOR_DISCOVERY_ADDR_ENABLE);
675 else if (iface_param->value[0] == ISCSI_IPV6_AUTOCFG_ND_ENABLE)
676 init_fw_cb->ipv6_addtl_opts |=
677 cpu_to_le16(
678 IPV6_ADDOPT_NEIGHBOR_DISCOVERY_ADDR_ENABLE);
679 else
680 ql4_printk(KERN_ERR, ha, "Invalid autocfg setting for "
681 "IPv6 addr\n");
682 break;
683 case ISCSI_NET_PARAM_IPV6_LINKLOCAL_AUTOCFG:
684 /* Autocfg applies to even interface */
685 if (iface_param->iface_num & 0x1)
686 break;
687
688 if (iface_param->value[0] ==
689 ISCSI_IPV6_LINKLOCAL_AUTOCFG_ENABLE)
690 init_fw_cb->ipv6_addtl_opts |= cpu_to_le16(
691 IPV6_ADDOPT_AUTOCONFIG_LINK_LOCAL_ADDR);
692 else if (iface_param->value[0] ==
693 ISCSI_IPV6_LINKLOCAL_AUTOCFG_DISABLE)
694 init_fw_cb->ipv6_addtl_opts &= cpu_to_le16(
695 ~IPV6_ADDOPT_AUTOCONFIG_LINK_LOCAL_ADDR);
696 else
697 ql4_printk(KERN_ERR, ha, "Invalid autocfg setting for "
698 "IPv6 linklocal addr\n");
699 break;
700 case ISCSI_NET_PARAM_IPV6_ROUTER_AUTOCFG:
701 /* Autocfg applies to even interface */
702 if (iface_param->iface_num & 0x1)
703 break;
704
705 if (iface_param->value[0] == ISCSI_IPV6_ROUTER_AUTOCFG_ENABLE)
706 memset(init_fw_cb->ipv6_dflt_rtr_addr, 0,
707 sizeof(init_fw_cb->ipv6_dflt_rtr_addr));
708 break;
709 case ISCSI_NET_PARAM_IFACE_ENABLE:
ed1086e0 710 if (iface_param->value[0] == ISCSI_IFACE_ENABLE) {
d00efe3f
MC
711 init_fw_cb->ipv6_opts |=
712 cpu_to_le16(IPV6_OPT_IPV6_PROTOCOL_ENABLE);
ed1086e0
VC
713 qla4xxx_create_ipv6_iface(ha);
714 } else {
d00efe3f
MC
715 init_fw_cb->ipv6_opts &=
716 cpu_to_le16(~IPV6_OPT_IPV6_PROTOCOL_ENABLE &
717 0xFFFF);
ed1086e0
VC
718 qla4xxx_destroy_ipv6_iface(ha);
719 }
d00efe3f
MC
720 break;
721 case ISCSI_NET_PARAM_VLAN_ID:
722 if (iface_param->len != sizeof(init_fw_cb->ipv6_vlan_tag))
723 break;
6ac73e8c
VC
724 init_fw_cb->ipv6_vlan_tag =
725 cpu_to_be16(*(uint16_t *)iface_param->value);
726 break;
727 case ISCSI_NET_PARAM_VLAN_ENABLED:
728 if (iface_param->value[0] == ISCSI_VLAN_ENABLE)
729 init_fw_cb->ipv6_opts |=
730 cpu_to_le16(IPV6_OPT_VLAN_TAGGING_ENABLE);
731 else
732 init_fw_cb->ipv6_opts &=
733 cpu_to_le16(~IPV6_OPT_VLAN_TAGGING_ENABLE);
d00efe3f 734 break;
943c157b
VC
735 case ISCSI_NET_PARAM_MTU:
736 init_fw_cb->eth_mtu_size =
737 cpu_to_le16(*(uint16_t *)iface_param->value);
738 break;
2ada7fc5
VC
739 case ISCSI_NET_PARAM_PORT:
740 /* Autocfg applies to even interface */
741 if (iface_param->iface_num & 0x1)
742 break;
743
744 init_fw_cb->ipv6_port =
745 cpu_to_le16(*(uint16_t *)iface_param->value);
746 break;
d00efe3f
MC
747 default:
748 ql4_printk(KERN_ERR, ha, "Unknown IPv6 param = %d\n",
749 iface_param->param);
750 break;
751 }
752}
753
754static void qla4xxx_set_ipv4(struct scsi_qla_host *ha,
755 struct iscsi_iface_param_info *iface_param,
756 struct addr_ctrl_blk *init_fw_cb)
757{
758 switch (iface_param->param) {
759 case ISCSI_NET_PARAM_IPV4_ADDR:
760 memcpy(init_fw_cb->ipv4_addr, iface_param->value,
761 sizeof(init_fw_cb->ipv4_addr));
762 break;
763 case ISCSI_NET_PARAM_IPV4_SUBNET:
764 memcpy(init_fw_cb->ipv4_subnet, iface_param->value,
765 sizeof(init_fw_cb->ipv4_subnet));
766 break;
767 case ISCSI_NET_PARAM_IPV4_GW:
768 memcpy(init_fw_cb->ipv4_gw_addr, iface_param->value,
769 sizeof(init_fw_cb->ipv4_gw_addr));
770 break;
771 case ISCSI_NET_PARAM_IPV4_BOOTPROTO:
772 if (iface_param->value[0] == ISCSI_BOOTPROTO_DHCP)
773 init_fw_cb->ipv4_tcp_opts |=
774 cpu_to_le16(TCPOPT_DHCP_ENABLE);
775 else if (iface_param->value[0] == ISCSI_BOOTPROTO_STATIC)
776 init_fw_cb->ipv4_tcp_opts &=
777 cpu_to_le16(~TCPOPT_DHCP_ENABLE);
778 else
779 ql4_printk(KERN_ERR, ha, "Invalid IPv4 bootproto\n");
780 break;
781 case ISCSI_NET_PARAM_IFACE_ENABLE:
ed1086e0 782 if (iface_param->value[0] == ISCSI_IFACE_ENABLE) {
d00efe3f 783 init_fw_cb->ipv4_ip_opts |=
2bab08fc 784 cpu_to_le16(IPOPT_IPV4_PROTOCOL_ENABLE);
ed1086e0
VC
785 qla4xxx_create_ipv4_iface(ha);
786 } else {
d00efe3f 787 init_fw_cb->ipv4_ip_opts &=
2bab08fc 788 cpu_to_le16(~IPOPT_IPV4_PROTOCOL_ENABLE &
d00efe3f 789 0xFFFF);
ed1086e0
VC
790 qla4xxx_destroy_ipv4_iface(ha);
791 }
d00efe3f
MC
792 break;
793 case ISCSI_NET_PARAM_VLAN_ID:
794 if (iface_param->len != sizeof(init_fw_cb->ipv4_vlan_tag))
795 break;
6ac73e8c
VC
796 init_fw_cb->ipv4_vlan_tag =
797 cpu_to_be16(*(uint16_t *)iface_param->value);
798 break;
799 case ISCSI_NET_PARAM_VLAN_ENABLED:
800 if (iface_param->value[0] == ISCSI_VLAN_ENABLE)
801 init_fw_cb->ipv4_ip_opts |=
802 cpu_to_le16(IPOPT_VLAN_TAGGING_ENABLE);
803 else
804 init_fw_cb->ipv4_ip_opts &=
805 cpu_to_le16(~IPOPT_VLAN_TAGGING_ENABLE);
d00efe3f 806 break;
943c157b
VC
807 case ISCSI_NET_PARAM_MTU:
808 init_fw_cb->eth_mtu_size =
809 cpu_to_le16(*(uint16_t *)iface_param->value);
810 break;
2ada7fc5
VC
811 case ISCSI_NET_PARAM_PORT:
812 init_fw_cb->ipv4_port =
813 cpu_to_le16(*(uint16_t *)iface_param->value);
814 break;
d00efe3f
MC
815 default:
816 ql4_printk(KERN_ERR, ha, "Unknown IPv4 param = %d\n",
817 iface_param->param);
818 break;
819 }
820}
821
822static void
823qla4xxx_initcb_to_acb(struct addr_ctrl_blk *init_fw_cb)
824{
825 struct addr_ctrl_blk_def *acb;
826 acb = (struct addr_ctrl_blk_def *)init_fw_cb;
827 memset(acb->reserved1, 0, sizeof(acb->reserved1));
828 memset(acb->reserved2, 0, sizeof(acb->reserved2));
829 memset(acb->reserved3, 0, sizeof(acb->reserved3));
830 memset(acb->reserved4, 0, sizeof(acb->reserved4));
831 memset(acb->reserved5, 0, sizeof(acb->reserved5));
832 memset(acb->reserved6, 0, sizeof(acb->reserved6));
833 memset(acb->reserved7, 0, sizeof(acb->reserved7));
834 memset(acb->reserved8, 0, sizeof(acb->reserved8));
835 memset(acb->reserved9, 0, sizeof(acb->reserved9));
836 memset(acb->reserved10, 0, sizeof(acb->reserved10));
837 memset(acb->reserved11, 0, sizeof(acb->reserved11));
838 memset(acb->reserved12, 0, sizeof(acb->reserved12));
839 memset(acb->reserved13, 0, sizeof(acb->reserved13));
840 memset(acb->reserved14, 0, sizeof(acb->reserved14));
841 memset(acb->reserved15, 0, sizeof(acb->reserved15));
842}
843
844static int
845qla4xxx_iface_set_param(struct Scsi_Host *shost, char *data, int count)
846{
847 struct scsi_qla_host *ha = to_qla_host(shost);
848 int rval = 0;
849 struct iscsi_iface_param_info *iface_param = NULL;
850 struct addr_ctrl_blk *init_fw_cb = NULL;
851 dma_addr_t init_fw_cb_dma;
852 uint32_t mbox_cmd[MBOX_REG_COUNT];
853 uint32_t mbox_sts[MBOX_REG_COUNT];
854 uint32_t total_param_count;
855 uint32_t length;
856
857 init_fw_cb = dma_alloc_coherent(&ha->pdev->dev,
858 sizeof(struct addr_ctrl_blk),
859 &init_fw_cb_dma, GFP_KERNEL);
860 if (!init_fw_cb) {
861 ql4_printk(KERN_ERR, ha, "%s: Unable to alloc init_cb\n",
862 __func__);
863 return -ENOMEM;
864 }
865
866 memset(init_fw_cb, 0, sizeof(struct addr_ctrl_blk));
867 memset(&mbox_cmd, 0, sizeof(mbox_cmd));
868 memset(&mbox_sts, 0, sizeof(mbox_sts));
869
870 if (qla4xxx_get_ifcb(ha, &mbox_cmd[0], &mbox_sts[0], init_fw_cb_dma)) {
871 ql4_printk(KERN_ERR, ha, "%s: get ifcb failed\n", __func__);
872 rval = -EIO;
873 goto exit_init_fw_cb;
874 }
875
876 total_param_count = count;
877 iface_param = (struct iscsi_iface_param_info *)data;
878
879 for ( ; total_param_count != 0; total_param_count--) {
880 length = iface_param->len;
881
882 if (iface_param->param_type != ISCSI_NET_PARAM)
883 continue;
884
885 switch (iface_param->iface_type) {
886 case ISCSI_IFACE_TYPE_IPV4:
887 switch (iface_param->iface_num) {
888 case 0:
889 qla4xxx_set_ipv4(ha, iface_param, init_fw_cb);
890 break;
891 default:
892 /* Cannot have more than one IPv4 interface */
893 ql4_printk(KERN_ERR, ha, "Invalid IPv4 iface "
894 "number = %d\n",
895 iface_param->iface_num);
896 break;
897 }
898 break;
899 case ISCSI_IFACE_TYPE_IPV6:
900 switch (iface_param->iface_num) {
901 case 0:
902 case 1:
903 qla4xxx_set_ipv6(ha, iface_param, init_fw_cb);
904 break;
905 default:
906 /* Cannot have more than two IPv6 interface */
907 ql4_printk(KERN_ERR, ha, "Invalid IPv6 iface "
908 "number = %d\n",
909 iface_param->iface_num);
910 break;
911 }
912 break;
913 default:
914 ql4_printk(KERN_ERR, ha, "Invalid iface type\n");
915 break;
916 }
917
918 iface_param = (struct iscsi_iface_param_info *)
919 ((uint8_t *)iface_param +
920 sizeof(struct iscsi_iface_param_info) + length);
921 }
922
923 init_fw_cb->cookie = cpu_to_le32(0x11BEAD5A);
924
925 rval = qla4xxx_set_flash(ha, init_fw_cb_dma, FLASH_SEGMENT_IFCB,
926 sizeof(struct addr_ctrl_blk),
927 FLASH_OPT_RMW_COMMIT);
928 if (rval != QLA_SUCCESS) {
929 ql4_printk(KERN_ERR, ha, "%s: set flash mbx failed\n",
930 __func__);
931 rval = -EIO;
932 goto exit_init_fw_cb;
933 }
934
935 qla4xxx_disable_acb(ha);
936
937 qla4xxx_initcb_to_acb(init_fw_cb);
938
939 rval = qla4xxx_set_acb(ha, &mbox_cmd[0], &mbox_sts[0], init_fw_cb_dma);
940 if (rval != QLA_SUCCESS) {
941 ql4_printk(KERN_ERR, ha, "%s: set acb mbx failed\n",
942 __func__);
943 rval = -EIO;
944 goto exit_init_fw_cb;
945 }
946
947 memset(init_fw_cb, 0, sizeof(struct addr_ctrl_blk));
948 qla4xxx_update_local_ifcb(ha, &mbox_cmd[0], &mbox_sts[0], init_fw_cb,
949 init_fw_cb_dma);
950
951exit_init_fw_cb:
952 dma_free_coherent(&ha->pdev->dev, sizeof(struct addr_ctrl_blk),
953 init_fw_cb, init_fw_cb_dma);
954
955 return rval;
956}
957
b3a271a9 958static int qla4xxx_conn_get_param(struct iscsi_cls_conn *cls_conn,
afaf5a2d
DS
959 enum iscsi_param param, char *buf)
960{
b3a271a9
MR
961 struct iscsi_conn *conn;
962 struct qla_conn *qla_conn;
963 struct sockaddr *dst_addr;
964 int len = 0;
afaf5a2d 965
b3a271a9
MR
966 conn = cls_conn->dd_data;
967 qla_conn = conn->dd_data;
968 dst_addr = &qla_conn->qla_ep->dst_addr;
afaf5a2d
DS
969
970 switch (param) {
971 case ISCSI_PARAM_CONN_PORT:
afaf5a2d 972 case ISCSI_PARAM_CONN_ADDRESS:
b3a271a9
MR
973 return iscsi_conn_get_addr_param((struct sockaddr_storage *)
974 dst_addr, param, buf);
afaf5a2d 975 default:
b3a271a9 976 return iscsi_conn_get_param(cls_conn, param, buf);
afaf5a2d
DS
977 }
978
979 return len;
b3a271a9 980
afaf5a2d
DS
981}
982
b3a271a9
MR
983static struct iscsi_cls_session *
984qla4xxx_session_create(struct iscsi_endpoint *ep,
985 uint16_t cmds_max, uint16_t qdepth,
986 uint32_t initial_cmdsn)
987{
988 struct iscsi_cls_session *cls_sess;
989 struct scsi_qla_host *ha;
990 struct qla_endpoint *qla_ep;
991 struct ddb_entry *ddb_entry;
992 uint32_t ddb_index;
993 uint32_t mbx_sts = 0;
994 struct iscsi_session *sess;
995 struct sockaddr *dst_addr;
996 int ret;
997
998 DEBUG2(printk(KERN_INFO "Func: %s\n", __func__));
999 if (!ep) {
1000 printk(KERN_ERR "qla4xxx: missing ep.\n");
1001 return NULL;
1002 }
1003
1004 qla_ep = ep->dd_data;
1005 dst_addr = (struct sockaddr *)&qla_ep->dst_addr;
1006 ha = to_qla_host(qla_ep->host);
1007get_ddb_index:
1008 ddb_index = find_first_zero_bit(ha->ddb_idx_map, MAX_DDB_ENTRIES);
1009
1010 if (ddb_index >= MAX_DDB_ENTRIES) {
1011 DEBUG2(ql4_printk(KERN_INFO, ha,
1012 "Free DDB index not available\n"));
1013 return NULL;
1014 }
1015
1016 if (test_and_set_bit(ddb_index, ha->ddb_idx_map))
1017 goto get_ddb_index;
1018
1019 DEBUG2(ql4_printk(KERN_INFO, ha,
1020 "Found a free DDB index at %d\n", ddb_index));
1021 ret = qla4xxx_req_ddb_entry(ha, ddb_index, &mbx_sts);
1022 if (ret == QLA_ERROR) {
1023 if (mbx_sts == MBOX_STS_COMMAND_ERROR) {
1024 ql4_printk(KERN_INFO, ha,
1025 "DDB index = %d not available trying next\n",
1026 ddb_index);
1027 goto get_ddb_index;
1028 }
1029 DEBUG2(ql4_printk(KERN_INFO, ha,
1030 "Free FW DDB not available\n"));
1031 return NULL;
1032 }
1033
1034 cls_sess = iscsi_session_setup(&qla4xxx_iscsi_transport, qla_ep->host,
1035 cmds_max, sizeof(struct ddb_entry),
1036 sizeof(struct ql4_task_data),
1037 initial_cmdsn, ddb_index);
1038 if (!cls_sess)
1039 return NULL;
1040
1041 sess = cls_sess->dd_data;
1042 ddb_entry = sess->dd_data;
1043 ddb_entry->fw_ddb_index = ddb_index;
1044 ddb_entry->fw_ddb_device_state = DDB_DS_NO_CONNECTION_ACTIVE;
1045 ddb_entry->ha = ha;
1046 ddb_entry->sess = cls_sess;
1047 cls_sess->recovery_tmo = ql4xsess_recovery_tmo;
1048 ha->fw_ddb_index_map[ddb_entry->fw_ddb_index] = ddb_entry;
1049 ha->tot_ddbs++;
1050
1051 return cls_sess;
1052}
1053
1054static void qla4xxx_session_destroy(struct iscsi_cls_session *cls_sess)
1055{
1056 struct iscsi_session *sess;
1057 struct ddb_entry *ddb_entry;
1058 struct scsi_qla_host *ha;
1059 unsigned long flags;
1060
1061 DEBUG2(printk(KERN_INFO "Func: %s\n", __func__));
1062 sess = cls_sess->dd_data;
1063 ddb_entry = sess->dd_data;
1064 ha = ddb_entry->ha;
1065
1066 spin_lock_irqsave(&ha->hardware_lock, flags);
1067 qla4xxx_free_ddb(ha, ddb_entry);
1068 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1069 iscsi_session_teardown(cls_sess);
1070}
1071
b3a271a9
MR
1072static struct iscsi_cls_conn *
1073qla4xxx_conn_create(struct iscsi_cls_session *cls_sess, uint32_t conn_idx)
1074{
1075 struct iscsi_cls_conn *cls_conn;
1076 struct iscsi_session *sess;
1077 struct ddb_entry *ddb_entry;
1078
1079 DEBUG2(printk(KERN_INFO "Func: %s\n", __func__));
1080 cls_conn = iscsi_conn_setup(cls_sess, sizeof(struct qla_conn),
1081 conn_idx);
1082 sess = cls_sess->dd_data;
1083 ddb_entry = sess->dd_data;
1084 ddb_entry->conn = cls_conn;
1085
1086 return cls_conn;
1087}
1088
1089static int qla4xxx_conn_bind(struct iscsi_cls_session *cls_session,
1090 struct iscsi_cls_conn *cls_conn,
1091 uint64_t transport_fd, int is_leading)
1092{
1093 struct iscsi_conn *conn;
1094 struct qla_conn *qla_conn;
1095 struct iscsi_endpoint *ep;
1096
1097 DEBUG2(printk(KERN_INFO "Func: %s\n", __func__));
1098
1099 if (iscsi_conn_bind(cls_session, cls_conn, is_leading))
1100 return -EINVAL;
1101 ep = iscsi_lookup_endpoint(transport_fd);
1102 conn = cls_conn->dd_data;
1103 qla_conn = conn->dd_data;
1104 qla_conn->qla_ep = ep->dd_data;
1105 return 0;
1106}
1107
1108static int qla4xxx_conn_start(struct iscsi_cls_conn *cls_conn)
1109{
1110 struct iscsi_cls_session *cls_sess = iscsi_conn_to_session(cls_conn);
1111 struct iscsi_session *sess;
1112 struct ddb_entry *ddb_entry;
1113 struct scsi_qla_host *ha;
1114 struct dev_db_entry *fw_ddb_entry;
1115 dma_addr_t fw_ddb_entry_dma;
b3a271a9
MR
1116 uint32_t mbx_sts = 0;
1117 int ret = 0;
1118 int status = QLA_SUCCESS;
1119
1120 DEBUG2(printk(KERN_INFO "Func: %s\n", __func__));
1121 sess = cls_sess->dd_data;
1122 ddb_entry = sess->dd_data;
1123 ha = ddb_entry->ha;
1124
1125 fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
1126 &fw_ddb_entry_dma, GFP_KERNEL);
1127 if (!fw_ddb_entry) {
1128 ql4_printk(KERN_ERR, ha,
1129 "%s: Unable to allocate dma buffer\n", __func__);
1130 return -ENOMEM;
1131 }
1132
1133 ret = qla4xxx_set_param_ddbentry(ha, ddb_entry, cls_conn, &mbx_sts);
1134 if (ret) {
1135 /* If iscsid is stopped and started then no need to do
1136 * set param again since ddb state will be already
1137 * active and FW does not allow set ddb to an
1138 * active session.
1139 */
1140 if (mbx_sts)
1141 if (ddb_entry->fw_ddb_device_state ==
1142 DDB_DS_SESSION_ACTIVE)
1143 goto exit_set_param;
1144
1145 ql4_printk(KERN_ERR, ha, "%s: Failed set param for index[%d]\n",
1146 __func__, ddb_entry->fw_ddb_index);
1147 goto exit_conn_start;
1148 }
1149
1150 status = qla4xxx_conn_open(ha, ddb_entry->fw_ddb_index);
1151 if (status == QLA_ERROR) {
0e7e8501
MR
1152 ql4_printk(KERN_ERR, ha, "%s: Login failed: %s\n", __func__,
1153 sess->targetname);
b3a271a9
MR
1154 ret = -EINVAL;
1155 goto exit_conn_start;
1156 }
1157
1158 ddb_entry->fw_ddb_device_state = DDB_DS_LOGIN_IN_PROCESS;
1159
1160exit_set_param:
1161 iscsi_conn_start(cls_conn);
1162 ret = 0;
1163
1164exit_conn_start:
1165 dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
1166 fw_ddb_entry, fw_ddb_entry_dma);
1167 return ret;
1168}
1169
1170static void qla4xxx_conn_destroy(struct iscsi_cls_conn *cls_conn)
1171{
1172 struct iscsi_cls_session *cls_sess = iscsi_conn_to_session(cls_conn);
1173 struct iscsi_session *sess;
1174 struct scsi_qla_host *ha;
1175 struct ddb_entry *ddb_entry;
1176 int options;
1177
1178 DEBUG2(printk(KERN_INFO "Func: %s\n", __func__));
1179 sess = cls_sess->dd_data;
1180 ddb_entry = sess->dd_data;
1181 ha = ddb_entry->ha;
1182
1183 options = LOGOUT_OPTION_CLOSE_SESSION;
1184 if (qla4xxx_session_logout_ddb(ha, ddb_entry, options) == QLA_ERROR)
1185 ql4_printk(KERN_ERR, ha, "%s: Logout failed\n", __func__);
1186 else
1187 qla4xxx_clear_ddb_entry(ha, ddb_entry->fw_ddb_index);
1188
1189 /*
1190 * Clear the DDB bit so that next login can use the bit
1191 * if FW is not clearing the DDB entry then set DDB will fail anyways
1192 */
1193 clear_bit(ddb_entry->fw_ddb_index, ha->ddb_idx_map);
1194}
1195
1196static void qla4xxx_task_work(struct work_struct *wdata)
1197{
1198 struct ql4_task_data *task_data;
1199 struct scsi_qla_host *ha;
1200 struct passthru_status *sts;
1201 struct iscsi_task *task;
1202 struct iscsi_hdr *hdr;
1203 uint8_t *data;
1204 uint32_t data_len;
1205 struct iscsi_conn *conn;
1206 int hdr_len;
1207 itt_t itt;
1208
1209 task_data = container_of(wdata, struct ql4_task_data, task_work);
1210 ha = task_data->ha;
1211 task = task_data->task;
1212 sts = &task_data->sts;
1213 hdr_len = sizeof(struct iscsi_hdr);
1214
1215 DEBUG3(printk(KERN_INFO "Status returned\n"));
1216 DEBUG3(qla4xxx_dump_buffer(sts, 64));
1217 DEBUG3(printk(KERN_INFO "Response buffer"));
1218 DEBUG3(qla4xxx_dump_buffer(task_data->resp_buffer, 64));
1219
1220 conn = task->conn;
1221
1222 switch (sts->completionStatus) {
1223 case PASSTHRU_STATUS_COMPLETE:
1224 hdr = (struct iscsi_hdr *)task_data->resp_buffer;
1225 /* Assign back the itt in hdr, until we use the PREASSIGN_TAG */
1226 itt = sts->handle;
1227 hdr->itt = itt;
1228 data = task_data->resp_buffer + hdr_len;
1229 data_len = task_data->resp_len - hdr_len;
1230 iscsi_complete_pdu(conn, hdr, data, data_len);
1231 break;
1232 default:
1233 ql4_printk(KERN_ERR, ha, "Passthru failed status = 0x%x\n",
1234 sts->completionStatus);
1235 break;
1236 }
1237 return;
1238}
1239
1240static int qla4xxx_alloc_pdu(struct iscsi_task *task, uint8_t opcode)
1241{
1242 struct ql4_task_data *task_data;
1243 struct iscsi_session *sess;
1244 struct ddb_entry *ddb_entry;
1245 struct scsi_qla_host *ha;
1246 int hdr_len;
1247
1248 sess = task->conn->session;
1249 ddb_entry = sess->dd_data;
1250 ha = ddb_entry->ha;
1251 task_data = task->dd_data;
1252 memset(task_data, 0, sizeof(struct ql4_task_data));
1253
1254 if (task->sc) {
1255 ql4_printk(KERN_INFO, ha,
1256 "%s: SCSI Commands not implemented\n", __func__);
1257 return -EINVAL;
1258 }
1259
1260 hdr_len = sizeof(struct iscsi_hdr);
1261 task_data->ha = ha;
1262 task_data->task = task;
1263
1264 if (task->data_count) {
1265 task_data->data_dma = dma_map_single(&ha->pdev->dev, task->data,
1266 task->data_count,
1267 PCI_DMA_TODEVICE);
1268 }
1269
1270 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: MaxRecvLen %u, iscsi hrd %d\n",
1271 __func__, task->conn->max_recv_dlength, hdr_len));
1272
1273 task_data->resp_len = task->conn->max_recv_dlength;
1274 task_data->resp_buffer = dma_alloc_coherent(&ha->pdev->dev,
1275 task_data->resp_len,
1276 &task_data->resp_dma,
1277 GFP_ATOMIC);
1278 if (!task_data->resp_buffer)
1279 goto exit_alloc_pdu;
1280
1281 task_data->req_buffer = dma_alloc_coherent(&ha->pdev->dev,
1282 task->data_count + hdr_len,
1283 &task_data->req_dma,
1284 GFP_ATOMIC);
1285 if (!task_data->req_buffer)
1286 goto exit_alloc_pdu;
1287
1288 task->hdr = task_data->req_buffer;
1289
1290 INIT_WORK(&task_data->task_work, qla4xxx_task_work);
1291
1292 return 0;
1293
1294exit_alloc_pdu:
1295 if (task_data->resp_buffer)
1296 dma_free_coherent(&ha->pdev->dev, task_data->resp_len,
1297 task_data->resp_buffer, task_data->resp_dma);
1298
1299 if (task_data->req_buffer)
1300 dma_free_coherent(&ha->pdev->dev, task->data_count + hdr_len,
1301 task_data->req_buffer, task_data->req_dma);
1302 return -ENOMEM;
1303}
1304
1305static void qla4xxx_task_cleanup(struct iscsi_task *task)
1306{
1307 struct ql4_task_data *task_data;
1308 struct iscsi_session *sess;
1309 struct ddb_entry *ddb_entry;
1310 struct scsi_qla_host *ha;
1311 int hdr_len;
1312
1313 hdr_len = sizeof(struct iscsi_hdr);
1314 sess = task->conn->session;
1315 ddb_entry = sess->dd_data;
1316 ha = ddb_entry->ha;
1317 task_data = task->dd_data;
1318
1319 if (task->data_count) {
1320 dma_unmap_single(&ha->pdev->dev, task_data->data_dma,
1321 task->data_count, PCI_DMA_TODEVICE);
1322 }
1323
1324 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: MaxRecvLen %u, iscsi hrd %d\n",
1325 __func__, task->conn->max_recv_dlength, hdr_len));
1326
1327 dma_free_coherent(&ha->pdev->dev, task_data->resp_len,
1328 task_data->resp_buffer, task_data->resp_dma);
1329 dma_free_coherent(&ha->pdev->dev, task->data_count + hdr_len,
1330 task_data->req_buffer, task_data->req_dma);
1331 return;
1332}
1333
1334static int qla4xxx_task_xmit(struct iscsi_task *task)
1335{
1336 struct scsi_cmnd *sc = task->sc;
1337 struct iscsi_session *sess = task->conn->session;
1338 struct ddb_entry *ddb_entry = sess->dd_data;
1339 struct scsi_qla_host *ha = ddb_entry->ha;
1340
1341 if (!sc)
1342 return qla4xxx_send_passthru0(task);
1343
1344 ql4_printk(KERN_INFO, ha, "%s: scsi cmd xmit not implemented\n",
1345 __func__);
1346 return -ENOSYS;
1347}
1348
1349void qla4xxx_update_session_conn_param(struct scsi_qla_host *ha,
1350 struct ddb_entry *ddb_entry)
1351{
1352 struct iscsi_cls_session *cls_sess;
1353 struct iscsi_cls_conn *cls_conn;
1354 struct iscsi_session *sess;
1355 struct iscsi_conn *conn;
1356 uint32_t ddb_state;
1357 dma_addr_t fw_ddb_entry_dma;
1358 struct dev_db_entry *fw_ddb_entry;
1359
1360 fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
1361 &fw_ddb_entry_dma, GFP_KERNEL);
1362 if (!fw_ddb_entry) {
1363 ql4_printk(KERN_ERR, ha,
1364 "%s: Unable to allocate dma buffer\n", __func__);
1365 return;
1366 }
1367
1368 if (qla4xxx_get_fwddb_entry(ha, ddb_entry->fw_ddb_index, fw_ddb_entry,
1369 fw_ddb_entry_dma, NULL, NULL, &ddb_state,
1370 NULL, NULL, NULL) == QLA_ERROR) {
1371 DEBUG2(ql4_printk(KERN_ERR, ha, "scsi%ld: %s: failed "
1372 "get_ddb_entry for fw_ddb_index %d\n",
1373 ha->host_no, __func__,
1374 ddb_entry->fw_ddb_index));
1375 return;
1376 }
1377
1378 cls_sess = ddb_entry->sess;
1379 sess = cls_sess->dd_data;
1380
1381 cls_conn = ddb_entry->conn;
1382 conn = cls_conn->dd_data;
1383
1384 /* Update params */
1385 conn->max_recv_dlength = BYTE_UNITS *
1386 le16_to_cpu(fw_ddb_entry->iscsi_max_rcv_data_seg_len);
1387
1388 conn->max_xmit_dlength = BYTE_UNITS *
1389 le16_to_cpu(fw_ddb_entry->iscsi_max_snd_data_seg_len);
1390
1391 sess->initial_r2t_en =
1392 (BIT_10 & le16_to_cpu(fw_ddb_entry->iscsi_options));
1393
1394 sess->max_r2t = le16_to_cpu(fw_ddb_entry->iscsi_max_outsnd_r2t);
1395
1396 sess->imm_data_en = (BIT_11 & le16_to_cpu(fw_ddb_entry->iscsi_options));
1397
1398 sess->first_burst = BYTE_UNITS *
1399 le16_to_cpu(fw_ddb_entry->iscsi_first_burst_len);
1400
1401 sess->max_burst = BYTE_UNITS *
1402 le16_to_cpu(fw_ddb_entry->iscsi_max_burst_len);
1403
1404 sess->time2wait = le16_to_cpu(fw_ddb_entry->iscsi_def_time2wait);
1405
1406 sess->time2retain = le16_to_cpu(fw_ddb_entry->iscsi_def_time2retain);
1407
1408 sess->tpgt = le32_to_cpu(fw_ddb_entry->tgt_portal_grp);
1409
1410 memcpy(sess->initiatorname, ha->name_string,
1411 min(sizeof(ha->name_string), sizeof(sess->initiatorname)));
1412}
1413
afaf5a2d
DS
1414/*
1415 * Timer routines
1416 */
1417
1418static void qla4xxx_start_timer(struct scsi_qla_host *ha, void *func,
1419 unsigned long interval)
1420{
1421 DEBUG(printk("scsi: %s: Starting timer thread for adapter %d\n",
1422 __func__, ha->host->host_no));
1423 init_timer(&ha->timer);
1424 ha->timer.expires = jiffies + interval * HZ;
1425 ha->timer.data = (unsigned long)ha;
1426 ha->timer.function = (void (*)(unsigned long))func;
1427 add_timer(&ha->timer);
1428 ha->timer_active = 1;
1429}
1430
1431static void qla4xxx_stop_timer(struct scsi_qla_host *ha)
1432{
1433 del_timer_sync(&ha->timer);
1434 ha->timer_active = 0;
1435}
1436
1437/***
b3a271a9
MR
1438 * qla4xxx_mark_device_missing - blocks the session
1439 * @cls_session: Pointer to the session to be blocked
afaf5a2d
DS
1440 * @ddb_entry: Pointer to device database entry
1441 *
f4f5df23 1442 * This routine marks a device missing and close connection.
afaf5a2d 1443 **/
b3a271a9 1444void qla4xxx_mark_device_missing(struct iscsi_cls_session *cls_session)
afaf5a2d 1445{
b3a271a9 1446 iscsi_block_session(cls_session);
afaf5a2d
DS
1447}
1448
f4f5df23
VC
1449/**
1450 * qla4xxx_mark_all_devices_missing - mark all devices as missing.
1451 * @ha: Pointer to host adapter structure.
1452 *
1453 * This routine marks a device missing and resets the relogin retry count.
1454 **/
1455void qla4xxx_mark_all_devices_missing(struct scsi_qla_host *ha)
1456{
b3a271a9 1457 iscsi_host_for_each_session(ha->host, qla4xxx_mark_device_missing);
f4f5df23
VC
1458}
1459
afaf5a2d
DS
1460static struct srb* qla4xxx_get_new_srb(struct scsi_qla_host *ha,
1461 struct ddb_entry *ddb_entry,
8f0722ca 1462 struct scsi_cmnd *cmd)
afaf5a2d
DS
1463{
1464 struct srb *srb;
1465
1466 srb = mempool_alloc(ha->srb_mempool, GFP_ATOMIC);
1467 if (!srb)
1468 return srb;
1469
09a0f719 1470 kref_init(&srb->srb_ref);
afaf5a2d
DS
1471 srb->ha = ha;
1472 srb->ddb = ddb_entry;
1473 srb->cmd = cmd;
1474 srb->flags = 0;
5369887a 1475 CMD_SP(cmd) = (void *)srb;
afaf5a2d
DS
1476
1477 return srb;
1478}
1479
1480static void qla4xxx_srb_free_dma(struct scsi_qla_host *ha, struct srb *srb)
1481{
1482 struct scsi_cmnd *cmd = srb->cmd;
1483
1484 if (srb->flags & SRB_DMA_VALID) {
5f7186c8 1485 scsi_dma_unmap(cmd);
afaf5a2d
DS
1486 srb->flags &= ~SRB_DMA_VALID;
1487 }
5369887a 1488 CMD_SP(cmd) = NULL;
afaf5a2d
DS
1489}
1490
09a0f719 1491void qla4xxx_srb_compl(struct kref *ref)
afaf5a2d 1492{
09a0f719 1493 struct srb *srb = container_of(ref, struct srb, srb_ref);
afaf5a2d 1494 struct scsi_cmnd *cmd = srb->cmd;
09a0f719 1495 struct scsi_qla_host *ha = srb->ha;
afaf5a2d
DS
1496
1497 qla4xxx_srb_free_dma(ha, srb);
1498
1499 mempool_free(srb, ha->srb_mempool);
1500
1501 cmd->scsi_done(cmd);
1502}
1503
1504/**
1505 * qla4xxx_queuecommand - scsi layer issues scsi command to driver.
8f0722ca 1506 * @host: scsi host
afaf5a2d 1507 * @cmd: Pointer to Linux's SCSI command structure
afaf5a2d
DS
1508 *
1509 * Remarks:
1510 * This routine is invoked by Linux to send a SCSI command to the driver.
1511 * The mid-level driver tries to ensure that queuecommand never gets
1512 * invoked concurrently with itself or the interrupt handler (although
1513 * the interrupt handler may call this routine as part of request-
1514 * completion handling). Unfortunely, it sometimes calls the scheduler
1515 * in interrupt context which is a big NO! NO!.
1516 **/
8f0722ca 1517static int qla4xxx_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
afaf5a2d 1518{
8f0722ca 1519 struct scsi_qla_host *ha = to_qla_host(host);
afaf5a2d 1520 struct ddb_entry *ddb_entry = cmd->device->hostdata;
7fb1921b 1521 struct iscsi_cls_session *sess = ddb_entry->sess;
afaf5a2d
DS
1522 struct srb *srb;
1523 int rval;
1524
2232be0d
LC
1525 if (test_bit(AF_EEH_BUSY, &ha->flags)) {
1526 if (test_bit(AF_PCI_CHANNEL_IO_PERM_FAILURE, &ha->flags))
1527 cmd->result = DID_NO_CONNECT << 16;
1528 else
1529 cmd->result = DID_REQUEUE << 16;
1530 goto qc_fail_command;
1531 }
1532
7fb1921b
MC
1533 if (!sess) {
1534 cmd->result = DID_IMM_RETRY << 16;
1535 goto qc_fail_command;
1536 }
1537
1538 rval = iscsi_session_chkready(sess);
1539 if (rval) {
1540 cmd->result = rval;
1541 goto qc_fail_command;
1542 }
1543
f4f5df23
VC
1544 if (test_bit(DPC_RESET_HA_INTR, &ha->dpc_flags) ||
1545 test_bit(DPC_RESET_ACTIVE, &ha->dpc_flags) ||
1546 test_bit(DPC_RESET_HA, &ha->dpc_flags) ||
1547 test_bit(DPC_HA_UNRECOVERABLE, &ha->dpc_flags) ||
1548 test_bit(DPC_HA_NEED_QUIESCENT, &ha->dpc_flags) ||
1549 !test_bit(AF_ONLINE, &ha->flags) ||
b3a271a9 1550 !test_bit(AF_LINK_UP, &ha->flags) ||
f4f5df23 1551 test_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags))
477ffb9d
DS
1552 goto qc_host_busy;
1553
8f0722ca 1554 srb = qla4xxx_get_new_srb(ha, ddb_entry, cmd);
afaf5a2d 1555 if (!srb)
8f0722ca 1556 goto qc_host_busy;
afaf5a2d
DS
1557
1558 rval = qla4xxx_send_command_to_isp(ha, srb);
1559 if (rval != QLA_SUCCESS)
1560 goto qc_host_busy_free_sp;
1561
afaf5a2d
DS
1562 return 0;
1563
1564qc_host_busy_free_sp:
1565 qla4xxx_srb_free_dma(ha, srb);
1566 mempool_free(srb, ha->srb_mempool);
1567
afaf5a2d
DS
1568qc_host_busy:
1569 return SCSI_MLQUEUE_HOST_BUSY;
1570
1571qc_fail_command:
8f0722ca 1572 cmd->scsi_done(cmd);
afaf5a2d
DS
1573
1574 return 0;
1575}
1576
1577/**
1578 * qla4xxx_mem_free - frees memory allocated to adapter
1579 * @ha: Pointer to host adapter structure.
1580 *
1581 * Frees memory previously allocated by qla4xxx_mem_alloc
1582 **/
1583static void qla4xxx_mem_free(struct scsi_qla_host *ha)
1584{
1585 if (ha->queues)
1586 dma_free_coherent(&ha->pdev->dev, ha->queues_len, ha->queues,
1587 ha->queues_dma);
1588
1589 ha->queues_len = 0;
1590 ha->queues = NULL;
1591 ha->queues_dma = 0;
1592 ha->request_ring = NULL;
1593 ha->request_dma = 0;
1594 ha->response_ring = NULL;
1595 ha->response_dma = 0;
1596 ha->shadow_regs = NULL;
1597 ha->shadow_regs_dma = 0;
1598
1599 /* Free srb pool. */
1600 if (ha->srb_mempool)
1601 mempool_destroy(ha->srb_mempool);
1602
1603 ha->srb_mempool = NULL;
1604
b3a271a9
MR
1605 if (ha->chap_dma_pool)
1606 dma_pool_destroy(ha->chap_dma_pool);
1607
4549415a
LC
1608 if (ha->chap_list)
1609 vfree(ha->chap_list);
1610 ha->chap_list = NULL;
1611
afaf5a2d 1612 /* release io space registers */
f4f5df23
VC
1613 if (is_qla8022(ha)) {
1614 if (ha->nx_pcibase)
1615 iounmap(
1616 (struct device_reg_82xx __iomem *)ha->nx_pcibase);
f4f5df23 1617 } else if (ha->reg)
afaf5a2d
DS
1618 iounmap(ha->reg);
1619 pci_release_regions(ha->pdev);
1620}
1621
1622/**
1623 * qla4xxx_mem_alloc - allocates memory for use by adapter.
1624 * @ha: Pointer to host adapter structure
1625 *
1626 * Allocates DMA memory for request and response queues. Also allocates memory
1627 * for srbs.
1628 **/
1629static int qla4xxx_mem_alloc(struct scsi_qla_host *ha)
1630{
1631 unsigned long align;
1632
1633 /* Allocate contiguous block of DMA memory for queues. */
1634 ha->queues_len = ((REQUEST_QUEUE_DEPTH * QUEUE_SIZE) +
1635 (RESPONSE_QUEUE_DEPTH * QUEUE_SIZE) +
1636 sizeof(struct shadow_regs) +
1637 MEM_ALIGN_VALUE +
1638 (PAGE_SIZE - 1)) & ~(PAGE_SIZE - 1);
1639 ha->queues = dma_alloc_coherent(&ha->pdev->dev, ha->queues_len,
1640 &ha->queues_dma, GFP_KERNEL);
1641 if (ha->queues == NULL) {
c2660df3
VC
1642 ql4_printk(KERN_WARNING, ha,
1643 "Memory Allocation failed - queues.\n");
afaf5a2d
DS
1644
1645 goto mem_alloc_error_exit;
1646 }
1647 memset(ha->queues, 0, ha->queues_len);
1648
1649 /*
1650 * As per RISC alignment requirements -- the bus-address must be a
1651 * multiple of the request-ring size (in bytes).
1652 */
1653 align = 0;
1654 if ((unsigned long)ha->queues_dma & (MEM_ALIGN_VALUE - 1))
1655 align = MEM_ALIGN_VALUE - ((unsigned long)ha->queues_dma &
1656 (MEM_ALIGN_VALUE - 1));
1657
1658 /* Update request and response queue pointers. */
1659 ha->request_dma = ha->queues_dma + align;
1660 ha->request_ring = (struct queue_entry *) (ha->queues + align);
1661 ha->response_dma = ha->queues_dma + align +
1662 (REQUEST_QUEUE_DEPTH * QUEUE_SIZE);
1663 ha->response_ring = (struct queue_entry *) (ha->queues + align +
1664 (REQUEST_QUEUE_DEPTH *
1665 QUEUE_SIZE));
1666 ha->shadow_regs_dma = ha->queues_dma + align +
1667 (REQUEST_QUEUE_DEPTH * QUEUE_SIZE) +
1668 (RESPONSE_QUEUE_DEPTH * QUEUE_SIZE);
1669 ha->shadow_regs = (struct shadow_regs *) (ha->queues + align +
1670 (REQUEST_QUEUE_DEPTH *
1671 QUEUE_SIZE) +
1672 (RESPONSE_QUEUE_DEPTH *
1673 QUEUE_SIZE));
1674
1675 /* Allocate memory for srb pool. */
1676 ha->srb_mempool = mempool_create(SRB_MIN_REQ, mempool_alloc_slab,
1677 mempool_free_slab, srb_cachep);
1678 if (ha->srb_mempool == NULL) {
c2660df3
VC
1679 ql4_printk(KERN_WARNING, ha,
1680 "Memory Allocation failed - SRB Pool.\n");
afaf5a2d
DS
1681
1682 goto mem_alloc_error_exit;
1683 }
1684
b3a271a9
MR
1685 ha->chap_dma_pool = dma_pool_create("ql4_chap", &ha->pdev->dev,
1686 CHAP_DMA_BLOCK_SIZE, 8, 0);
1687
1688 if (ha->chap_dma_pool == NULL) {
1689 ql4_printk(KERN_WARNING, ha,
1690 "%s: chap_dma_pool allocation failed..\n", __func__);
1691 goto mem_alloc_error_exit;
1692 }
1693
afaf5a2d
DS
1694 return QLA_SUCCESS;
1695
1696mem_alloc_error_exit:
1697 qla4xxx_mem_free(ha);
1698 return QLA_ERROR;
1699}
1700
f4f5df23
VC
1701/**
1702 * qla4_8xxx_check_fw_alive - Check firmware health
1703 * @ha: Pointer to host adapter structure.
1704 *
1705 * Context: Interrupt
1706 **/
1707static void qla4_8xxx_check_fw_alive(struct scsi_qla_host *ha)
1708{
1709 uint32_t fw_heartbeat_counter, halt_status;
1710
1711 fw_heartbeat_counter = qla4_8xxx_rd_32(ha, QLA82XX_PEG_ALIVE_COUNTER);
2232be0d
LC
1712 /* If PEG_ALIVE_COUNTER is 0xffffffff, AER/EEH is in progress, ignore */
1713 if (fw_heartbeat_counter == 0xffffffff) {
1714 DEBUG2(printk(KERN_WARNING "scsi%ld: %s: Device in frozen "
1715 "state, QLA82XX_PEG_ALIVE_COUNTER is 0xffffffff\n",
1716 ha->host_no, __func__));
1717 return;
1718 }
f4f5df23
VC
1719
1720 if (ha->fw_heartbeat_counter == fw_heartbeat_counter) {
1721 ha->seconds_since_last_heartbeat++;
1722 /* FW not alive after 2 seconds */
1723 if (ha->seconds_since_last_heartbeat == 2) {
1724 ha->seconds_since_last_heartbeat = 0;
1725 halt_status = qla4_8xxx_rd_32(ha,
68d92ebf
VC
1726 QLA82XX_PEG_HALT_STATUS1);
1727
1728 ql4_printk(KERN_INFO, ha,
1729 "scsi(%ld): %s, Dumping hw/fw registers:\n "
1730 " PEG_HALT_STATUS1: 0x%x, PEG_HALT_STATUS2:"
1731 " 0x%x,\n PEG_NET_0_PC: 0x%x, PEG_NET_1_PC:"
1732 " 0x%x,\n PEG_NET_2_PC: 0x%x, PEG_NET_3_PC:"
1733 " 0x%x,\n PEG_NET_4_PC: 0x%x\n",
1734 ha->host_no, __func__, halt_status,
1735 qla4_8xxx_rd_32(ha,
1736 QLA82XX_PEG_HALT_STATUS2),
1737 qla4_8xxx_rd_32(ha, QLA82XX_CRB_PEG_NET_0 +
1738 0x3c),
1739 qla4_8xxx_rd_32(ha, QLA82XX_CRB_PEG_NET_1 +
1740 0x3c),
1741 qla4_8xxx_rd_32(ha, QLA82XX_CRB_PEG_NET_2 +
1742 0x3c),
1743 qla4_8xxx_rd_32(ha, QLA82XX_CRB_PEG_NET_3 +
1744 0x3c),
1745 qla4_8xxx_rd_32(ha, QLA82XX_CRB_PEG_NET_4 +
1746 0x3c));
21033639 1747
f4f5df23
VC
1748 /* Since we cannot change dev_state in interrupt
1749 * context, set appropriate DPC flag then wakeup
1750 * DPC */
1751 if (halt_status & HALT_STATUS_UNRECOVERABLE)
1752 set_bit(DPC_HA_UNRECOVERABLE, &ha->dpc_flags);
1753 else {
1754 printk("scsi%ld: %s: detect abort needed!\n",
1755 ha->host_no, __func__);
1756 set_bit(DPC_RESET_HA, &ha->dpc_flags);
1757 }
1758 qla4xxx_wake_dpc(ha);
21033639 1759 qla4xxx_mailbox_premature_completion(ha);
f4f5df23 1760 }
99457d75
LC
1761 } else
1762 ha->seconds_since_last_heartbeat = 0;
1763
f4f5df23
VC
1764 ha->fw_heartbeat_counter = fw_heartbeat_counter;
1765}
1766
1767/**
1768 * qla4_8xxx_watchdog - Poll dev state
1769 * @ha: Pointer to host adapter structure.
1770 *
1771 * Context: Interrupt
1772 **/
1773void qla4_8xxx_watchdog(struct scsi_qla_host *ha)
1774{
1775 uint32_t dev_state;
1776
1777 dev_state = qla4_8xxx_rd_32(ha, QLA82XX_CRB_DEV_STATE);
1778
1779 /* don't poll if reset is going on */
d56a1f7b
LC
1780 if (!(test_bit(DPC_RESET_ACTIVE, &ha->dpc_flags) ||
1781 test_bit(DPC_RESET_HA, &ha->dpc_flags) ||
977f46a4 1782 test_bit(DPC_RETRY_RESET_HA, &ha->dpc_flags))) {
f4f5df23
VC
1783 if (dev_state == QLA82XX_DEV_NEED_RESET &&
1784 !test_bit(DPC_RESET_HA, &ha->dpc_flags)) {
3930b8c1
VC
1785 if (!ql4xdontresethba) {
1786 ql4_printk(KERN_INFO, ha, "%s: HW State: "
1787 "NEED RESET!\n", __func__);
1788 set_bit(DPC_RESET_HA, &ha->dpc_flags);
1789 qla4xxx_wake_dpc(ha);
1790 qla4xxx_mailbox_premature_completion(ha);
1791 }
f4f5df23
VC
1792 } else if (dev_state == QLA82XX_DEV_NEED_QUIESCENT &&
1793 !test_bit(DPC_HA_NEED_QUIESCENT, &ha->dpc_flags)) {
3930b8c1
VC
1794 ql4_printk(KERN_INFO, ha, "%s: HW State: NEED QUIES!\n",
1795 __func__);
f4f5df23
VC
1796 set_bit(DPC_HA_NEED_QUIESCENT, &ha->dpc_flags);
1797 qla4xxx_wake_dpc(ha);
1798 } else {
1799 /* Check firmware health */
1800 qla4_8xxx_check_fw_alive(ha);
1801 }
1802 }
1803}
1804
afaf5a2d
DS
1805/**
1806 * qla4xxx_timer - checks every second for work to do.
1807 * @ha: Pointer to host adapter structure.
1808 **/
1809static void qla4xxx_timer(struct scsi_qla_host *ha)
1810{
afaf5a2d 1811 int start_dpc = 0;
2232be0d
LC
1812 uint16_t w;
1813
1814 /* If we are in the middle of AER/EEH processing
1815 * skip any processing and reschedule the timer
1816 */
1817 if (test_bit(AF_EEH_BUSY, &ha->flags)) {
1818 mod_timer(&ha->timer, jiffies + HZ);
1819 return;
1820 }
1821
1822 /* Hardware read to trigger an EEH error during mailbox waits. */
1823 if (!pci_channel_offline(ha->pdev))
1824 pci_read_config_word(ha->pdev, PCI_VENDOR_ID, &w);
afaf5a2d 1825
f4f5df23
VC
1826 if (is_qla8022(ha)) {
1827 qla4_8xxx_watchdog(ha);
1828 }
1829
f4f5df23
VC
1830 if (!is_qla8022(ha)) {
1831 /* Check for heartbeat interval. */
1832 if (ha->firmware_options & FWOPT_HEARTBEAT_ENABLE &&
1833 ha->heartbeat_interval != 0) {
1834 ha->seconds_since_last_heartbeat++;
1835 if (ha->seconds_since_last_heartbeat >
1836 ha->heartbeat_interval + 2)
1837 set_bit(DPC_RESET_HA, &ha->dpc_flags);
1838 }
afaf5a2d
DS
1839 }
1840
afaf5a2d 1841 /* Wakeup the dpc routine for this adapter, if needed. */
1b46807e 1842 if (start_dpc ||
afaf5a2d
DS
1843 test_bit(DPC_RESET_HA, &ha->dpc_flags) ||
1844 test_bit(DPC_RETRY_RESET_HA, &ha->dpc_flags) ||
1845 test_bit(DPC_RELOGIN_DEVICE, &ha->dpc_flags) ||
f4f5df23 1846 test_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags) ||
afaf5a2d
DS
1847 test_bit(DPC_RESET_HA_INTR, &ha->dpc_flags) ||
1848 test_bit(DPC_GET_DHCP_IP_ADDR, &ha->dpc_flags) ||
065aa1b4 1849 test_bit(DPC_LINK_CHANGED, &ha->dpc_flags) ||
f4f5df23
VC
1850 test_bit(DPC_HA_UNRECOVERABLE, &ha->dpc_flags) ||
1851 test_bit(DPC_HA_NEED_QUIESCENT, &ha->dpc_flags) ||
1b46807e 1852 test_bit(DPC_AEN, &ha->dpc_flags)) {
afaf5a2d
DS
1853 DEBUG2(printk("scsi%ld: %s: scheduling dpc routine"
1854 " - dpc flags = 0x%lx\n",
1855 ha->host_no, __func__, ha->dpc_flags));
f4f5df23 1856 qla4xxx_wake_dpc(ha);
afaf5a2d
DS
1857 }
1858
1859 /* Reschedule timer thread to call us back in one second */
1860 mod_timer(&ha->timer, jiffies + HZ);
1861
1862 DEBUG2(ha->seconds_since_last_intr++);
1863}
1864
1865/**
1866 * qla4xxx_cmd_wait - waits for all outstanding commands to complete
1867 * @ha: Pointer to host adapter structure.
1868 *
1869 * This routine stalls the driver until all outstanding commands are returned.
1870 * Caller must release the Hardware Lock prior to calling this routine.
1871 **/
1872static int qla4xxx_cmd_wait(struct scsi_qla_host *ha)
1873{
1874 uint32_t index = 0;
afaf5a2d
DS
1875 unsigned long flags;
1876 struct scsi_cmnd *cmd;
afaf5a2d 1877
f4f5df23
VC
1878 unsigned long wtime = jiffies + (WAIT_CMD_TOV * HZ);
1879
1880 DEBUG2(ql4_printk(KERN_INFO, ha, "Wait up to %d seconds for cmds to "
1881 "complete\n", WAIT_CMD_TOV));
1882
1883 while (!time_after_eq(jiffies, wtime)) {
afaf5a2d
DS
1884 spin_lock_irqsave(&ha->hardware_lock, flags);
1885 /* Find a command that hasn't completed. */
1886 for (index = 0; index < ha->host->can_queue; index++) {
1887 cmd = scsi_host_find_tag(ha->host, index);
a1e0063d
MC
1888 /*
1889 * We cannot just check if the index is valid,
1890 * becase if we are run from the scsi eh, then
1891 * the scsi/block layer is going to prevent
1892 * the tag from being released.
1893 */
1894 if (cmd != NULL && CMD_SP(cmd))
afaf5a2d
DS
1895 break;
1896 }
1897 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1898
1899 /* If No Commands are pending, wait is complete */
f4f5df23
VC
1900 if (index == ha->host->can_queue)
1901 return QLA_SUCCESS;
afaf5a2d 1902
f4f5df23
VC
1903 msleep(1000);
1904 }
1905 /* If we timed out on waiting for commands to come back
1906 * return ERROR. */
1907 return QLA_ERROR;
afaf5a2d
DS
1908}
1909
f4f5df23 1910int qla4xxx_hw_reset(struct scsi_qla_host *ha)
afaf5a2d 1911{
afaf5a2d 1912 uint32_t ctrl_status;
477ffb9d
DS
1913 unsigned long flags = 0;
1914
1915 DEBUG2(printk(KERN_ERR "scsi%ld: %s\n", ha->host_no, __func__));
afaf5a2d 1916
f4f5df23
VC
1917 if (ql4xxx_lock_drvr_wait(ha) != QLA_SUCCESS)
1918 return QLA_ERROR;
1919
afaf5a2d
DS
1920 spin_lock_irqsave(&ha->hardware_lock, flags);
1921
1922 /*
1923 * If the SCSI Reset Interrupt bit is set, clear it.
1924 * Otherwise, the Soft Reset won't work.
1925 */
1926 ctrl_status = readw(&ha->reg->ctrl_status);
1927 if ((ctrl_status & CSR_SCSI_RESET_INTR) != 0)
1928 writel(set_rmask(CSR_SCSI_RESET_INTR), &ha->reg->ctrl_status);
1929
1930 /* Issue Soft Reset */
1931 writel(set_rmask(CSR_SOFT_RESET), &ha->reg->ctrl_status);
1932 readl(&ha->reg->ctrl_status);
1933
1934 spin_unlock_irqrestore(&ha->hardware_lock, flags);
f4f5df23 1935 return QLA_SUCCESS;
477ffb9d
DS
1936}
1937
1938/**
1939 * qla4xxx_soft_reset - performs soft reset.
1940 * @ha: Pointer to host adapter structure.
1941 **/
1942int qla4xxx_soft_reset(struct scsi_qla_host *ha)
1943{
1944 uint32_t max_wait_time;
1945 unsigned long flags = 0;
f931c534 1946 int status;
477ffb9d
DS
1947 uint32_t ctrl_status;
1948
f931c534
VC
1949 status = qla4xxx_hw_reset(ha);
1950 if (status != QLA_SUCCESS)
1951 return status;
afaf5a2d 1952
f931c534 1953 status = QLA_ERROR;
afaf5a2d
DS
1954 /* Wait until the Network Reset Intr bit is cleared */
1955 max_wait_time = RESET_INTR_TOV;
1956 do {
1957 spin_lock_irqsave(&ha->hardware_lock, flags);
1958 ctrl_status = readw(&ha->reg->ctrl_status);
1959 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1960
1961 if ((ctrl_status & CSR_NET_RESET_INTR) == 0)
1962 break;
1963
1964 msleep(1000);
1965 } while ((--max_wait_time));
1966
1967 if ((ctrl_status & CSR_NET_RESET_INTR) != 0) {
1968 DEBUG2(printk(KERN_WARNING
1969 "scsi%ld: Network Reset Intr not cleared by "
1970 "Network function, clearing it now!\n",
1971 ha->host_no));
1972 spin_lock_irqsave(&ha->hardware_lock, flags);
1973 writel(set_rmask(CSR_NET_RESET_INTR), &ha->reg->ctrl_status);
1974 readl(&ha->reg->ctrl_status);
1975 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1976 }
1977
1978 /* Wait until the firmware tells us the Soft Reset is done */
1979 max_wait_time = SOFT_RESET_TOV;
1980 do {
1981 spin_lock_irqsave(&ha->hardware_lock, flags);
1982 ctrl_status = readw(&ha->reg->ctrl_status);
1983 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1984
1985 if ((ctrl_status & CSR_SOFT_RESET) == 0) {
1986 status = QLA_SUCCESS;
1987 break;
1988 }
1989
1990 msleep(1000);
1991 } while ((--max_wait_time));
1992
1993 /*
1994 * Also, make sure that the SCSI Reset Interrupt bit has been cleared
1995 * after the soft reset has taken place.
1996 */
1997 spin_lock_irqsave(&ha->hardware_lock, flags);
1998 ctrl_status = readw(&ha->reg->ctrl_status);
1999 if ((ctrl_status & CSR_SCSI_RESET_INTR) != 0) {
2000 writel(set_rmask(CSR_SCSI_RESET_INTR), &ha->reg->ctrl_status);
2001 readl(&ha->reg->ctrl_status);
2002 }
2003 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2004
2005 /* If soft reset fails then most probably the bios on other
2006 * function is also enabled.
2007 * Since the initialization is sequential the other fn
2008 * wont be able to acknowledge the soft reset.
2009 * Issue a force soft reset to workaround this scenario.
2010 */
2011 if (max_wait_time == 0) {
2012 /* Issue Force Soft Reset */
2013 spin_lock_irqsave(&ha->hardware_lock, flags);
2014 writel(set_rmask(CSR_FORCE_SOFT_RESET), &ha->reg->ctrl_status);
2015 readl(&ha->reg->ctrl_status);
2016 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2017 /* Wait until the firmware tells us the Soft Reset is done */
2018 max_wait_time = SOFT_RESET_TOV;
2019 do {
2020 spin_lock_irqsave(&ha->hardware_lock, flags);
2021 ctrl_status = readw(&ha->reg->ctrl_status);
2022 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2023
2024 if ((ctrl_status & CSR_FORCE_SOFT_RESET) == 0) {
2025 status = QLA_SUCCESS;
2026 break;
2027 }
2028
2029 msleep(1000);
2030 } while ((--max_wait_time));
2031 }
2032
2033 return status;
2034}
2035
afaf5a2d 2036/**
f4f5df23 2037 * qla4xxx_abort_active_cmds - returns all outstanding i/o requests to O.S.
afaf5a2d 2038 * @ha: Pointer to host adapter structure.
f4f5df23 2039 * @res: returned scsi status
afaf5a2d
DS
2040 *
2041 * This routine is called just prior to a HARD RESET to return all
2042 * outstanding commands back to the Operating System.
2043 * Caller should make sure that the following locks are released
2044 * before this calling routine: Hardware lock, and io_request_lock.
2045 **/
f4f5df23 2046static void qla4xxx_abort_active_cmds(struct scsi_qla_host *ha, int res)
afaf5a2d
DS
2047{
2048 struct srb *srb;
2049 int i;
2050 unsigned long flags;
2051
2052 spin_lock_irqsave(&ha->hardware_lock, flags);
2053 for (i = 0; i < ha->host->can_queue; i++) {
2054 srb = qla4xxx_del_from_active_array(ha, i);
2055 if (srb != NULL) {
f4f5df23 2056 srb->cmd->result = res;
09a0f719 2057 kref_put(&srb->srb_ref, qla4xxx_srb_compl);
afaf5a2d
DS
2058 }
2059 }
2060 spin_unlock_irqrestore(&ha->hardware_lock, flags);
afaf5a2d
DS
2061}
2062
f4f5df23
VC
2063void qla4xxx_dead_adapter_cleanup(struct scsi_qla_host *ha)
2064{
2065 clear_bit(AF_ONLINE, &ha->flags);
2066
2067 /* Disable the board */
2068 ql4_printk(KERN_INFO, ha, "Disabling the board\n");
f4f5df23
VC
2069
2070 qla4xxx_abort_active_cmds(ha, DID_NO_CONNECT << 16);
2071 qla4xxx_mark_all_devices_missing(ha);
2072 clear_bit(AF_INIT_DONE, &ha->flags);
2073}
2074
b3a271a9
MR
2075static void qla4xxx_fail_session(struct iscsi_cls_session *cls_session)
2076{
2077 struct iscsi_session *sess;
2078 struct ddb_entry *ddb_entry;
2079
2080 sess = cls_session->dd_data;
2081 ddb_entry = sess->dd_data;
2082 ddb_entry->fw_ddb_device_state = DDB_DS_SESSION_FAILED;
2083 iscsi_session_failure(cls_session->dd_data, ISCSI_ERR_CONN_FAILED);
2084}
2085
afaf5a2d
DS
2086/**
2087 * qla4xxx_recover_adapter - recovers adapter after a fatal error
2088 * @ha: Pointer to host adapter structure.
afaf5a2d 2089 **/
f4f5df23 2090static int qla4xxx_recover_adapter(struct scsi_qla_host *ha)
afaf5a2d 2091{
f4f5df23
VC
2092 int status = QLA_ERROR;
2093 uint8_t reset_chip = 0;
afaf5a2d
DS
2094
2095 /* Stall incoming I/O until we are done */
f4f5df23 2096 scsi_block_requests(ha->host);
afaf5a2d 2097 clear_bit(AF_ONLINE, &ha->flags);
b3a271a9 2098 clear_bit(AF_LINK_UP, &ha->flags);
50a29aec 2099
f4f5df23 2100 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: adapter OFFLINE\n", __func__));
afaf5a2d 2101
f4f5df23 2102 set_bit(DPC_RESET_ACTIVE, &ha->dpc_flags);
afaf5a2d 2103
b3a271a9
MR
2104 iscsi_host_for_each_session(ha->host, qla4xxx_fail_session);
2105
f4f5df23
VC
2106 if (test_bit(DPC_RESET_HA, &ha->dpc_flags))
2107 reset_chip = 1;
afaf5a2d 2108
f4f5df23
VC
2109 /* For the DPC_RESET_HA_INTR case (ISP-4xxx specific)
2110 * do not reset adapter, jump to initialize_adapter */
2111 if (test_bit(DPC_RESET_HA_INTR, &ha->dpc_flags)) {
2112 status = QLA_SUCCESS;
2113 goto recover_ha_init_adapter;
2114 }
afaf5a2d 2115
f4f5df23
VC
2116 /* For the ISP-82xx adapter, issue a stop_firmware if invoked
2117 * from eh_host_reset or ioctl module */
2118 if (is_qla8022(ha) && !reset_chip &&
2119 test_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags)) {
2120
2121 DEBUG2(ql4_printk(KERN_INFO, ha,
2122 "scsi%ld: %s - Performing stop_firmware...\n",
2123 ha->host_no, __func__));
2124 status = ha->isp_ops->reset_firmware(ha);
2125 if (status == QLA_SUCCESS) {
2bd1e2be
NJ
2126 if (!test_bit(AF_FW_RECOVERY, &ha->flags))
2127 qla4xxx_cmd_wait(ha);
f4f5df23
VC
2128 ha->isp_ops->disable_intrs(ha);
2129 qla4xxx_process_aen(ha, FLUSH_DDB_CHANGED_AENS);
2130 qla4xxx_abort_active_cmds(ha, DID_RESET << 16);
2131 } else {
2132 /* If the stop_firmware fails then
2133 * reset the entire chip */
2134 reset_chip = 1;
2135 clear_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags);
2136 set_bit(DPC_RESET_HA, &ha->dpc_flags);
2137 }
2138 }
dca05c4c 2139
f4f5df23
VC
2140 /* Issue full chip reset if recovering from a catastrophic error,
2141 * or if stop_firmware fails for ISP-82xx.
2142 * This is the default case for ISP-4xxx */
2143 if (!is_qla8022(ha) || reset_chip) {
2bd1e2be
NJ
2144 if (!test_bit(AF_FW_RECOVERY, &ha->flags))
2145 qla4xxx_cmd_wait(ha);
f4f5df23
VC
2146 qla4xxx_process_aen(ha, FLUSH_DDB_CHANGED_AENS);
2147 qla4xxx_abort_active_cmds(ha, DID_RESET << 16);
2148 DEBUG2(ql4_printk(KERN_INFO, ha,
2149 "scsi%ld: %s - Performing chip reset..\n",
2150 ha->host_no, __func__));
2151 status = ha->isp_ops->reset_chip(ha);
2152 }
afaf5a2d
DS
2153
2154 /* Flush any pending ddb changed AENs */
2155 qla4xxx_process_aen(ha, FLUSH_DDB_CHANGED_AENS);
2156
f4f5df23
VC
2157recover_ha_init_adapter:
2158 /* Upon successful firmware/chip reset, re-initialize the adapter */
afaf5a2d 2159 if (status == QLA_SUCCESS) {
f4f5df23
VC
2160 /* For ISP-4xxx, force function 1 to always initialize
2161 * before function 3 to prevent both funcions from
2162 * stepping on top of the other */
2163 if (!is_qla8022(ha) && (ha->mac_index == 3))
2164 ssleep(6);
2165
2166 /* NOTE: AF_ONLINE flag set upon successful completion of
2167 * qla4xxx_initialize_adapter */
0e7e8501 2168 status = qla4xxx_initialize_adapter(ha);
afaf5a2d
DS
2169 }
2170
f4f5df23
VC
2171 /* Retry failed adapter initialization, if necessary
2172 * Do not retry initialize_adapter for RESET_HA_INTR (ISP-4xxx specific)
2173 * case to prevent ping-pong resets between functions */
2174 if (!test_bit(AF_ONLINE, &ha->flags) &&
2175 !test_bit(DPC_RESET_HA_INTR, &ha->dpc_flags)) {
afaf5a2d 2176 /* Adapter initialization failed, see if we can retry
f4f5df23
VC
2177 * resetting the ha.
2178 * Since we don't want to block the DPC for too long
2179 * with multiple resets in the same thread,
2180 * utilize DPC to retry */
afaf5a2d
DS
2181 if (!test_bit(DPC_RETRY_RESET_HA, &ha->dpc_flags)) {
2182 ha->retry_reset_ha_cnt = MAX_RESET_HA_RETRIES;
2183 DEBUG2(printk("scsi%ld: recover adapter - retrying "
2184 "(%d) more times\n", ha->host_no,
2185 ha->retry_reset_ha_cnt));
2186 set_bit(DPC_RETRY_RESET_HA, &ha->dpc_flags);
2187 status = QLA_ERROR;
2188 } else {
2189 if (ha->retry_reset_ha_cnt > 0) {
2190 /* Schedule another Reset HA--DPC will retry */
2191 ha->retry_reset_ha_cnt--;
2192 DEBUG2(printk("scsi%ld: recover adapter - "
2193 "retry remaining %d\n",
2194 ha->host_no,
2195 ha->retry_reset_ha_cnt));
2196 status = QLA_ERROR;
2197 }
2198
2199 if (ha->retry_reset_ha_cnt == 0) {
2200 /* Recover adapter retries have been exhausted.
2201 * Adapter DEAD */
2202 DEBUG2(printk("scsi%ld: recover adapter "
2203 "failed - board disabled\n",
2204 ha->host_no));
f4f5df23 2205 qla4xxx_dead_adapter_cleanup(ha);
afaf5a2d
DS
2206 clear_bit(DPC_RETRY_RESET_HA, &ha->dpc_flags);
2207 clear_bit(DPC_RESET_HA, &ha->dpc_flags);
f4f5df23 2208 clear_bit(DPC_RESET_HA_FW_CONTEXT,
afaf5a2d
DS
2209 &ha->dpc_flags);
2210 status = QLA_ERROR;
2211 }
2212 }
2213 } else {
2214 clear_bit(DPC_RESET_HA, &ha->dpc_flags);
f4f5df23 2215 clear_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags);
afaf5a2d
DS
2216 clear_bit(DPC_RETRY_RESET_HA, &ha->dpc_flags);
2217 }
2218
2219 ha->adapter_error_count++;
2220
f4f5df23
VC
2221 if (test_bit(AF_ONLINE, &ha->flags))
2222 ha->isp_ops->enable_intrs(ha);
2223
2224 scsi_unblock_requests(ha->host);
2225
2226 clear_bit(DPC_RESET_ACTIVE, &ha->dpc_flags);
2227 DEBUG2(printk("scsi%ld: recover adapter: %s\n", ha->host_no,
25985edc 2228 status == QLA_ERROR ? "FAILED" : "SUCCEEDED"));
afaf5a2d 2229
afaf5a2d
DS
2230 return status;
2231}
2232
b3a271a9 2233static void qla4xxx_relogin_devices(struct iscsi_cls_session *cls_session)
2d7924e6 2234{
b3a271a9
MR
2235 struct iscsi_session *sess;
2236 struct ddb_entry *ddb_entry;
2237 struct scsi_qla_host *ha;
2d7924e6 2238
b3a271a9
MR
2239 sess = cls_session->dd_data;
2240 ddb_entry = sess->dd_data;
2241 ha = ddb_entry->ha;
2242 if (!iscsi_is_session_online(cls_session)) {
2243 if (ddb_entry->fw_ddb_device_state == DDB_DS_SESSION_ACTIVE) {
2244 ql4_printk(KERN_INFO, ha, "scsi%ld: %s: ddb[%d]"
2245 " unblock session\n", ha->host_no, __func__,
2246 ddb_entry->fw_ddb_index);
2247 iscsi_unblock_session(ddb_entry->sess);
2248 } else {
2249 /* Trigger relogin */
2250 iscsi_session_failure(cls_session->dd_data,
2251 ISCSI_ERR_CONN_FAILED);
2d7924e6
VC
2252 }
2253 }
2254}
2255
b3a271a9
MR
2256static void qla4xxx_relogin_all_devices(struct scsi_qla_host *ha)
2257{
2258 iscsi_host_for_each_session(ha->host, qla4xxx_relogin_devices);
2259}
2260
f4f5df23
VC
2261void qla4xxx_wake_dpc(struct scsi_qla_host *ha)
2262{
1b46807e 2263 if (ha->dpc_thread)
f4f5df23 2264 queue_work(ha->dpc_thread, &ha->dpc_work);
f4f5df23
VC
2265}
2266
afaf5a2d
DS
2267/**
2268 * qla4xxx_do_dpc - dpc routine
2269 * @data: in our case pointer to adapter structure
2270 *
2271 * This routine is a task that is schedule by the interrupt handler
2272 * to perform the background processing for interrupts. We put it
2273 * on a task queue that is consumed whenever the scheduler runs; that's
2274 * so you can do anything (i.e. put the process to sleep etc). In fact,
2275 * the mid-level tries to sleep when it reaches the driver threshold
2276 * "host->can_queue". This can cause a panic if we were in our interrupt code.
2277 **/
c4028958 2278static void qla4xxx_do_dpc(struct work_struct *work)
afaf5a2d 2279{
c4028958
DH
2280 struct scsi_qla_host *ha =
2281 container_of(work, struct scsi_qla_host, dpc_work);
477ffb9d 2282 int status = QLA_ERROR;
afaf5a2d 2283
f26b9044 2284 DEBUG2(printk("scsi%ld: %s: DPC handler waking up."
f4f5df23
VC
2285 "flags = 0x%08lx, dpc_flags = 0x%08lx\n",
2286 ha->host_no, __func__, ha->flags, ha->dpc_flags))
afaf5a2d
DS
2287
2288 /* Initialization not yet finished. Don't do anything yet. */
2289 if (!test_bit(AF_INIT_DONE, &ha->flags))
1b46807e 2290 return;
afaf5a2d 2291
2232be0d
LC
2292 if (test_bit(AF_EEH_BUSY, &ha->flags)) {
2293 DEBUG2(printk(KERN_INFO "scsi%ld: %s: flags = %lx\n",
2294 ha->host_no, __func__, ha->flags));
1b46807e 2295 return;
2232be0d
LC
2296 }
2297
f4f5df23
VC
2298 if (is_qla8022(ha)) {
2299 if (test_bit(DPC_HA_UNRECOVERABLE, &ha->dpc_flags)) {
2300 qla4_8xxx_idc_lock(ha);
2301 qla4_8xxx_wr_32(ha, QLA82XX_CRB_DEV_STATE,
2302 QLA82XX_DEV_FAILED);
2303 qla4_8xxx_idc_unlock(ha);
2304 ql4_printk(KERN_INFO, ha, "HW State: FAILED\n");
2305 qla4_8xxx_device_state_handler(ha);
2306 }
2307 if (test_and_clear_bit(DPC_HA_NEED_QUIESCENT, &ha->dpc_flags)) {
2308 qla4_8xxx_need_qsnt_handler(ha);
2309 }
2310 }
2311
2312 if (!test_bit(DPC_RESET_ACTIVE, &ha->dpc_flags) &&
2313 (test_bit(DPC_RESET_HA, &ha->dpc_flags) ||
afaf5a2d 2314 test_bit(DPC_RESET_HA_INTR, &ha->dpc_flags) ||
f4f5df23
VC
2315 test_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags))) {
2316 if (ql4xdontresethba) {
2317 DEBUG2(printk("scsi%ld: %s: Don't Reset HBA\n",
2318 ha->host_no, __func__));
2319 clear_bit(DPC_RESET_HA, &ha->dpc_flags);
2320 clear_bit(DPC_RESET_HA_INTR, &ha->dpc_flags);
2321 clear_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags);
2322 goto dpc_post_reset_ha;
2323 }
2324 if (test_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags) ||
2325 test_bit(DPC_RESET_HA, &ha->dpc_flags))
2326 qla4xxx_recover_adapter(ha);
afaf5a2d 2327
477ffb9d 2328 if (test_bit(DPC_RESET_HA_INTR, &ha->dpc_flags)) {
afaf5a2d 2329 uint8_t wait_time = RESET_INTR_TOV;
afaf5a2d 2330
afaf5a2d
DS
2331 while ((readw(&ha->reg->ctrl_status) &
2332 (CSR_SOFT_RESET | CSR_FORCE_SOFT_RESET)) != 0) {
2333 if (--wait_time == 0)
2334 break;
afaf5a2d 2335 msleep(1000);
afaf5a2d 2336 }
afaf5a2d
DS
2337 if (wait_time == 0)
2338 DEBUG2(printk("scsi%ld: %s: SR|FSR "
2339 "bit not cleared-- resetting\n",
2340 ha->host_no, __func__));
f4f5df23 2341 qla4xxx_abort_active_cmds(ha, DID_RESET << 16);
477ffb9d
DS
2342 if (ql4xxx_lock_drvr_wait(ha) == QLA_SUCCESS) {
2343 qla4xxx_process_aen(ha, FLUSH_DDB_CHANGED_AENS);
f4f5df23 2344 status = qla4xxx_recover_adapter(ha);
477ffb9d
DS
2345 }
2346 clear_bit(DPC_RESET_HA_INTR, &ha->dpc_flags);
2347 if (status == QLA_SUCCESS)
f4f5df23 2348 ha->isp_ops->enable_intrs(ha);
afaf5a2d
DS
2349 }
2350 }
2351
f4f5df23 2352dpc_post_reset_ha:
afaf5a2d
DS
2353 /* ---- process AEN? --- */
2354 if (test_and_clear_bit(DPC_AEN, &ha->dpc_flags))
2355 qla4xxx_process_aen(ha, PROCESS_ALL_AENS);
2356
2357 /* ---- Get DHCP IP Address? --- */
2358 if (test_and_clear_bit(DPC_GET_DHCP_IP_ADDR, &ha->dpc_flags))
2359 qla4xxx_get_dhcp_ip_address(ha);
2360
065aa1b4
VC
2361 /* ---- link change? --- */
2362 if (test_and_clear_bit(DPC_LINK_CHANGED, &ha->dpc_flags)) {
2363 if (!test_bit(AF_LINK_UP, &ha->flags)) {
2364 /* ---- link down? --- */
2d7924e6 2365 qla4xxx_mark_all_devices_missing(ha);
065aa1b4
VC
2366 } else {
2367 /* ---- link up? --- *
2368 * F/W will auto login to all devices ONLY ONCE after
2369 * link up during driver initialization and runtime
2370 * fatal error recovery. Therefore, the driver must
2371 * manually relogin to devices when recovering from
2372 * connection failures, logouts, expired KATO, etc. */
2373
2d7924e6 2374 qla4xxx_relogin_all_devices(ha);
065aa1b4
VC
2375 }
2376 }
afaf5a2d
DS
2377}
2378
2379/**
2380 * qla4xxx_free_adapter - release the adapter
2381 * @ha: pointer to adapter structure
2382 **/
2383static void qla4xxx_free_adapter(struct scsi_qla_host *ha)
2384{
2385
2386 if (test_bit(AF_INTERRUPTS_ON, &ha->flags)) {
2387 /* Turn-off interrupts on the card. */
f4f5df23 2388 ha->isp_ops->disable_intrs(ha);
afaf5a2d
DS
2389 }
2390
f4f5df23
VC
2391 /* Remove timer thread, if present */
2392 if (ha->timer_active)
2393 qla4xxx_stop_timer(ha);
2394
afaf5a2d
DS
2395 /* Kill the kernel thread for this host */
2396 if (ha->dpc_thread)
2397 destroy_workqueue(ha->dpc_thread);
2398
b3a271a9
MR
2399 /* Kill the kernel thread for this host */
2400 if (ha->task_wq)
2401 destroy_workqueue(ha->task_wq);
2402
f4f5df23
VC
2403 /* Put firmware in known state */
2404 ha->isp_ops->reset_firmware(ha);
afaf5a2d 2405
f4f5df23
VC
2406 if (is_qla8022(ha)) {
2407 qla4_8xxx_idc_lock(ha);
2408 qla4_8xxx_clear_drv_active(ha);
2409 qla4_8xxx_idc_unlock(ha);
2410 }
afaf5a2d 2411
afaf5a2d
DS
2412 /* Detach interrupts */
2413 if (test_and_clear_bit(AF_IRQ_ATTACHED, &ha->flags))
f4f5df23 2414 qla4xxx_free_irqs(ha);
afaf5a2d 2415
bee4fe8e
DS
2416 /* free extra memory */
2417 qla4xxx_mem_free(ha);
f4f5df23
VC
2418}
2419
2420int qla4_8xxx_iospace_config(struct scsi_qla_host *ha)
2421{
2422 int status = 0;
2423 uint8_t revision_id;
2424 unsigned long mem_base, mem_len, db_base, db_len;
2425 struct pci_dev *pdev = ha->pdev;
2426
2427 status = pci_request_regions(pdev, DRIVER_NAME);
2428 if (status) {
2429 printk(KERN_WARNING
2430 "scsi(%ld) Failed to reserve PIO regions (%s) "
2431 "status=%d\n", ha->host_no, pci_name(pdev), status);
2432 goto iospace_error_exit;
2433 }
2434
2435 pci_read_config_byte(pdev, PCI_REVISION_ID, &revision_id);
2436 DEBUG2(printk(KERN_INFO "%s: revision-id=%d\n",
2437 __func__, revision_id));
2438 ha->revision_id = revision_id;
bee4fe8e 2439
f4f5df23
VC
2440 /* remap phys address */
2441 mem_base = pci_resource_start(pdev, 0); /* 0 is for BAR 0 */
2442 mem_len = pci_resource_len(pdev, 0);
2443 DEBUG2(printk(KERN_INFO "%s: ioremap from %lx a size of %lx\n",
2444 __func__, mem_base, mem_len));
afaf5a2d 2445
f4f5df23
VC
2446 /* mapping of pcibase pointer */
2447 ha->nx_pcibase = (unsigned long)ioremap(mem_base, mem_len);
2448 if (!ha->nx_pcibase) {
2449 printk(KERN_ERR
2450 "cannot remap MMIO (%s), aborting\n", pci_name(pdev));
2451 pci_release_regions(ha->pdev);
2452 goto iospace_error_exit;
2453 }
2454
2455 /* Mapping of IO base pointer, door bell read and write pointer */
2456
2457 /* mapping of IO base pointer */
2458 ha->qla4_8xxx_reg =
2459 (struct device_reg_82xx __iomem *)((uint8_t *)ha->nx_pcibase +
2460 0xbc000 + (ha->pdev->devfn << 11));
2461
2462 db_base = pci_resource_start(pdev, 4); /* doorbell is on bar 4 */
2463 db_len = pci_resource_len(pdev, 4);
2464
2657c800
SS
2465 ha->nx_db_wr_ptr = (ha->pdev->devfn == 4 ? QLA82XX_CAM_RAM_DB1 :
2466 QLA82XX_CAM_RAM_DB2);
f4f5df23 2467
2657c800 2468 return 0;
f4f5df23
VC
2469iospace_error_exit:
2470 return -ENOMEM;
afaf5a2d
DS
2471}
2472
2473/***
2474 * qla4xxx_iospace_config - maps registers
2475 * @ha: pointer to adapter structure
2476 *
2477 * This routines maps HBA's registers from the pci address space
2478 * into the kernel virtual address space for memory mapped i/o.
2479 **/
f4f5df23 2480int qla4xxx_iospace_config(struct scsi_qla_host *ha)
afaf5a2d
DS
2481{
2482 unsigned long pio, pio_len, pio_flags;
2483 unsigned long mmio, mmio_len, mmio_flags;
2484
2485 pio = pci_resource_start(ha->pdev, 0);
2486 pio_len = pci_resource_len(ha->pdev, 0);
2487 pio_flags = pci_resource_flags(ha->pdev, 0);
2488 if (pio_flags & IORESOURCE_IO) {
2489 if (pio_len < MIN_IOBASE_LEN) {
c2660df3 2490 ql4_printk(KERN_WARNING, ha,
afaf5a2d
DS
2491 "Invalid PCI I/O region size\n");
2492 pio = 0;
2493 }
2494 } else {
c2660df3 2495 ql4_printk(KERN_WARNING, ha, "region #0 not a PIO resource\n");
afaf5a2d
DS
2496 pio = 0;
2497 }
2498
2499 /* Use MMIO operations for all accesses. */
2500 mmio = pci_resource_start(ha->pdev, 1);
2501 mmio_len = pci_resource_len(ha->pdev, 1);
2502 mmio_flags = pci_resource_flags(ha->pdev, 1);
2503
2504 if (!(mmio_flags & IORESOURCE_MEM)) {
c2660df3
VC
2505 ql4_printk(KERN_ERR, ha,
2506 "region #0 not an MMIO resource, aborting\n");
afaf5a2d
DS
2507
2508 goto iospace_error_exit;
2509 }
c2660df3 2510
afaf5a2d 2511 if (mmio_len < MIN_IOBASE_LEN) {
c2660df3
VC
2512 ql4_printk(KERN_ERR, ha,
2513 "Invalid PCI mem region size, aborting\n");
afaf5a2d
DS
2514 goto iospace_error_exit;
2515 }
2516
2517 if (pci_request_regions(ha->pdev, DRIVER_NAME)) {
c2660df3
VC
2518 ql4_printk(KERN_WARNING, ha,
2519 "Failed to reserve PIO/MMIO regions\n");
afaf5a2d
DS
2520
2521 goto iospace_error_exit;
2522 }
2523
2524 ha->pio_address = pio;
2525 ha->pio_length = pio_len;
2526 ha->reg = ioremap(mmio, MIN_IOBASE_LEN);
2527 if (!ha->reg) {
c2660df3
VC
2528 ql4_printk(KERN_ERR, ha,
2529 "cannot remap MMIO, aborting\n");
afaf5a2d
DS
2530
2531 goto iospace_error_exit;
2532 }
2533
2534 return 0;
2535
2536iospace_error_exit:
2537 return -ENOMEM;
2538}
2539
f4f5df23
VC
2540static struct isp_operations qla4xxx_isp_ops = {
2541 .iospace_config = qla4xxx_iospace_config,
2542 .pci_config = qla4xxx_pci_config,
2543 .disable_intrs = qla4xxx_disable_intrs,
2544 .enable_intrs = qla4xxx_enable_intrs,
2545 .start_firmware = qla4xxx_start_firmware,
2546 .intr_handler = qla4xxx_intr_handler,
2547 .interrupt_service_routine = qla4xxx_interrupt_service_routine,
2548 .reset_chip = qla4xxx_soft_reset,
2549 .reset_firmware = qla4xxx_hw_reset,
2550 .queue_iocb = qla4xxx_queue_iocb,
2551 .complete_iocb = qla4xxx_complete_iocb,
2552 .rd_shdw_req_q_out = qla4xxx_rd_shdw_req_q_out,
2553 .rd_shdw_rsp_q_in = qla4xxx_rd_shdw_rsp_q_in,
2554 .get_sys_info = qla4xxx_get_sys_info,
2555};
2556
2557static struct isp_operations qla4_8xxx_isp_ops = {
2558 .iospace_config = qla4_8xxx_iospace_config,
2559 .pci_config = qla4_8xxx_pci_config,
2560 .disable_intrs = qla4_8xxx_disable_intrs,
2561 .enable_intrs = qla4_8xxx_enable_intrs,
2562 .start_firmware = qla4_8xxx_load_risc,
2563 .intr_handler = qla4_8xxx_intr_handler,
2564 .interrupt_service_routine = qla4_8xxx_interrupt_service_routine,
2565 .reset_chip = qla4_8xxx_isp_reset,
2566 .reset_firmware = qla4_8xxx_stop_firmware,
2567 .queue_iocb = qla4_8xxx_queue_iocb,
2568 .complete_iocb = qla4_8xxx_complete_iocb,
2569 .rd_shdw_req_q_out = qla4_8xxx_rd_shdw_req_q_out,
2570 .rd_shdw_rsp_q_in = qla4_8xxx_rd_shdw_rsp_q_in,
2571 .get_sys_info = qla4_8xxx_get_sys_info,
2572};
2573
2574uint16_t qla4xxx_rd_shdw_req_q_out(struct scsi_qla_host *ha)
2575{
2576 return (uint16_t)le32_to_cpu(ha->shadow_regs->req_q_out);
2577}
2578
2579uint16_t qla4_8xxx_rd_shdw_req_q_out(struct scsi_qla_host *ha)
2580{
2581 return (uint16_t)le32_to_cpu(readl(&ha->qla4_8xxx_reg->req_q_out));
2582}
2583
2584uint16_t qla4xxx_rd_shdw_rsp_q_in(struct scsi_qla_host *ha)
2585{
2586 return (uint16_t)le32_to_cpu(ha->shadow_regs->rsp_q_in);
2587}
2588
2589uint16_t qla4_8xxx_rd_shdw_rsp_q_in(struct scsi_qla_host *ha)
2590{
2591 return (uint16_t)le32_to_cpu(readl(&ha->qla4_8xxx_reg->rsp_q_in));
2592}
2593
2a991c21
MR
2594static ssize_t qla4xxx_show_boot_eth_info(void *data, int type, char *buf)
2595{
2596 struct scsi_qla_host *ha = data;
2597 char *str = buf;
2598 int rc;
2599
2600 switch (type) {
2601 case ISCSI_BOOT_ETH_FLAGS:
2602 rc = sprintf(str, "%d\n", SYSFS_FLAG_FW_SEL_BOOT);
2603 break;
2604 case ISCSI_BOOT_ETH_INDEX:
2605 rc = sprintf(str, "0\n");
2606 break;
2607 case ISCSI_BOOT_ETH_MAC:
2608 rc = sysfs_format_mac(str, ha->my_mac,
2609 MAC_ADDR_LEN);
2610 break;
2611 default:
2612 rc = -ENOSYS;
2613 break;
2614 }
2615 return rc;
2616}
2617
2618static mode_t qla4xxx_eth_get_attr_visibility(void *data, int type)
2619{
2620 int rc;
2621
2622 switch (type) {
2623 case ISCSI_BOOT_ETH_FLAGS:
2624 case ISCSI_BOOT_ETH_MAC:
2625 case ISCSI_BOOT_ETH_INDEX:
2626 rc = S_IRUGO;
2627 break;
2628 default:
2629 rc = 0;
2630 break;
2631 }
2632 return rc;
2633}
2634
2635static ssize_t qla4xxx_show_boot_ini_info(void *data, int type, char *buf)
2636{
2637 struct scsi_qla_host *ha = data;
2638 char *str = buf;
2639 int rc;
2640
2641 switch (type) {
2642 case ISCSI_BOOT_INI_INITIATOR_NAME:
2643 rc = sprintf(str, "%s\n", ha->name_string);
2644 break;
2645 default:
2646 rc = -ENOSYS;
2647 break;
2648 }
2649 return rc;
2650}
2651
2652static mode_t qla4xxx_ini_get_attr_visibility(void *data, int type)
2653{
2654 int rc;
2655
2656 switch (type) {
2657 case ISCSI_BOOT_INI_INITIATOR_NAME:
2658 rc = S_IRUGO;
2659 break;
2660 default:
2661 rc = 0;
2662 break;
2663 }
2664 return rc;
2665}
2666
2667static ssize_t
2668qla4xxx_show_boot_tgt_info(struct ql4_boot_session_info *boot_sess, int type,
2669 char *buf)
2670{
2671 struct ql4_conn_info *boot_conn = &boot_sess->conn_list[0];
2672 char *str = buf;
2673 int rc;
2674
2675 switch (type) {
2676 case ISCSI_BOOT_TGT_NAME:
2677 rc = sprintf(buf, "%s\n", (char *)&boot_sess->target_name);
2678 break;
2679 case ISCSI_BOOT_TGT_IP_ADDR:
2680 if (boot_sess->conn_list[0].dest_ipaddr.ip_type == 0x1)
2681 rc = sprintf(buf, "%pI4\n",
2682 &boot_conn->dest_ipaddr.ip_address);
2683 else
2684 rc = sprintf(str, "%pI6\n",
2685 &boot_conn->dest_ipaddr.ip_address);
2686 break;
2687 case ISCSI_BOOT_TGT_PORT:
2688 rc = sprintf(str, "%d\n", boot_conn->dest_port);
2689 break;
2690 case ISCSI_BOOT_TGT_CHAP_NAME:
2691 rc = sprintf(str, "%.*s\n",
2692 boot_conn->chap.target_chap_name_length,
2693 (char *)&boot_conn->chap.target_chap_name);
2694 break;
2695 case ISCSI_BOOT_TGT_CHAP_SECRET:
2696 rc = sprintf(str, "%.*s\n",
2697 boot_conn->chap.target_secret_length,
2698 (char *)&boot_conn->chap.target_secret);
2699 break;
2700 case ISCSI_BOOT_TGT_REV_CHAP_NAME:
2701 rc = sprintf(str, "%.*s\n",
2702 boot_conn->chap.intr_chap_name_length,
2703 (char *)&boot_conn->chap.intr_chap_name);
2704 break;
2705 case ISCSI_BOOT_TGT_REV_CHAP_SECRET:
2706 rc = sprintf(str, "%.*s\n",
2707 boot_conn->chap.intr_secret_length,
2708 (char *)&boot_conn->chap.intr_secret);
2709 break;
2710 case ISCSI_BOOT_TGT_FLAGS:
2711 rc = sprintf(str, "%d\n", SYSFS_FLAG_FW_SEL_BOOT);
2712 break;
2713 case ISCSI_BOOT_TGT_NIC_ASSOC:
2714 rc = sprintf(str, "0\n");
2715 break;
2716 default:
2717 rc = -ENOSYS;
2718 break;
2719 }
2720 return rc;
2721}
2722
2723static ssize_t qla4xxx_show_boot_tgt_pri_info(void *data, int type, char *buf)
2724{
2725 struct scsi_qla_host *ha = data;
2726 struct ql4_boot_session_info *boot_sess = &(ha->boot_tgt.boot_pri_sess);
2727
2728 return qla4xxx_show_boot_tgt_info(boot_sess, type, buf);
2729}
2730
2731static ssize_t qla4xxx_show_boot_tgt_sec_info(void *data, int type, char *buf)
2732{
2733 struct scsi_qla_host *ha = data;
2734 struct ql4_boot_session_info *boot_sess = &(ha->boot_tgt.boot_sec_sess);
2735
2736 return qla4xxx_show_boot_tgt_info(boot_sess, type, buf);
2737}
2738
2739static mode_t qla4xxx_tgt_get_attr_visibility(void *data, int type)
2740{
2741 int rc;
2742
2743 switch (type) {
2744 case ISCSI_BOOT_TGT_NAME:
2745 case ISCSI_BOOT_TGT_IP_ADDR:
2746 case ISCSI_BOOT_TGT_PORT:
2747 case ISCSI_BOOT_TGT_CHAP_NAME:
2748 case ISCSI_BOOT_TGT_CHAP_SECRET:
2749 case ISCSI_BOOT_TGT_REV_CHAP_NAME:
2750 case ISCSI_BOOT_TGT_REV_CHAP_SECRET:
2751 case ISCSI_BOOT_TGT_NIC_ASSOC:
2752 case ISCSI_BOOT_TGT_FLAGS:
2753 rc = S_IRUGO;
2754 break;
2755 default:
2756 rc = 0;
2757 break;
2758 }
2759 return rc;
2760}
2761
2762static void qla4xxx_boot_release(void *data)
2763{
2764 struct scsi_qla_host *ha = data;
2765
2766 scsi_host_put(ha->host);
2767}
2768
2769static int get_fw_boot_info(struct scsi_qla_host *ha, uint16_t ddb_index[])
2770{
2771 dma_addr_t buf_dma;
2772 uint32_t addr, pri_addr, sec_addr;
2773 uint32_t offset;
2774 uint16_t func_num;
2775 uint8_t val;
2776 uint8_t *buf = NULL;
2777 size_t size = 13 * sizeof(uint8_t);
2778 int ret = QLA_SUCCESS;
2779
2780 func_num = PCI_FUNC(ha->pdev->devfn);
2781
2782 DEBUG2(ql4_printk(KERN_INFO, ha,
2783 "%s: Get FW boot info for 0x%x func %d\n", __func__,
2784 (is_qla4032(ha) ? PCI_DEVICE_ID_QLOGIC_ISP4032 :
2785 PCI_DEVICE_ID_QLOGIC_ISP8022), func_num));
2786
2787 if (is_qla4032(ha)) {
2788 if (func_num == 1) {
2789 addr = NVRAM_PORT0_BOOT_MODE;
2790 pri_addr = NVRAM_PORT0_BOOT_PRI_TGT;
2791 sec_addr = NVRAM_PORT0_BOOT_SEC_TGT;
2792 } else if (func_num == 3) {
2793 addr = NVRAM_PORT1_BOOT_MODE;
2794 pri_addr = NVRAM_PORT1_BOOT_PRI_TGT;
2795 sec_addr = NVRAM_PORT1_BOOT_SEC_TGT;
2796 } else {
2797 ret = QLA_ERROR;
2798 goto exit_boot_info;
2799 }
2800
2801 /* Check Boot Mode */
2802 val = rd_nvram_byte(ha, addr);
2803 if (!(val & 0x07)) {
2804 DEBUG2(ql4_printk(KERN_ERR, ha,
2805 "%s: Failed Boot options : 0x%x\n",
2806 __func__, val));
2807 ret = QLA_ERROR;
2808 goto exit_boot_info;
2809 }
2810
2811 /* get primary valid target index */
2812 val = rd_nvram_byte(ha, pri_addr);
2813 if (val & BIT_7)
2814 ddb_index[0] = (val & 0x7f);
2a991c21
MR
2815
2816 /* get secondary valid target index */
2817 val = rd_nvram_byte(ha, sec_addr);
2818 if (val & BIT_7)
2819 ddb_index[1] = (val & 0x7f);
2a991c21
MR
2820
2821 } else if (is_qla8022(ha)) {
2822 buf = dma_alloc_coherent(&ha->pdev->dev, size,
2823 &buf_dma, GFP_KERNEL);
2824 if (!buf) {
2825 DEBUG2(ql4_printk(KERN_ERR, ha,
2826 "%s: Unable to allocate dma buffer\n",
2827 __func__));
2828 ret = QLA_ERROR;
2829 goto exit_boot_info;
2830 }
2831
2832 if (ha->port_num == 0)
2833 offset = BOOT_PARAM_OFFSET_PORT0;
2834 else if (ha->port_num == 1)
2835 offset = BOOT_PARAM_OFFSET_PORT1;
2836 else {
2837 ret = QLA_ERROR;
2838 goto exit_boot_info_free;
2839 }
2840 addr = FLASH_RAW_ACCESS_ADDR + (ha->hw.flt_iscsi_param * 4) +
2841 offset;
2842 if (qla4xxx_get_flash(ha, buf_dma, addr,
2843 13 * sizeof(uint8_t)) != QLA_SUCCESS) {
2844 DEBUG2(ql4_printk(KERN_ERR, ha, "scsi%ld: %s: Get Flash"
2845 "failed\n", ha->host_no, __func__));
2846 ret = QLA_ERROR;
2847 goto exit_boot_info_free;
2848 }
2849 /* Check Boot Mode */
2850 if (!(buf[1] & 0x07)) {
2851 DEBUG2(ql4_printk(KERN_INFO, ha,
2852 "Failed: Boot options : 0x%x\n",
2853 buf[1]));
2854 ret = QLA_ERROR;
2855 goto exit_boot_info_free;
2856 }
2857
2858 /* get primary valid target index */
2859 if (buf[2] & BIT_7)
2860 ddb_index[0] = buf[2] & 0x7f;
2a991c21
MR
2861
2862 /* get secondary valid target index */
2863 if (buf[11] & BIT_7)
2864 ddb_index[1] = buf[11] & 0x7f;
2a991c21
MR
2865 } else {
2866 ret = QLA_ERROR;
2867 goto exit_boot_info;
2868 }
2869
2870 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: Primary target ID %d, Secondary"
2871 " target ID %d\n", __func__, ddb_index[0],
2872 ddb_index[1]));
2873
2874exit_boot_info_free:
2875 dma_free_coherent(&ha->pdev->dev, size, buf, buf_dma);
2876exit_boot_info:
2877 return ret;
2878}
2879
2880static int qla4xxx_get_boot_target(struct scsi_qla_host *ha,
2881 struct ql4_boot_session_info *boot_sess,
2882 uint16_t ddb_index)
2883{
2884 struct ql4_conn_info *boot_conn = &boot_sess->conn_list[0];
2885 struct dev_db_entry *fw_ddb_entry;
2886 dma_addr_t fw_ddb_entry_dma;
2887 uint16_t idx;
2888 uint16_t options;
2889 int ret = QLA_SUCCESS;
2890
2891 fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
2892 &fw_ddb_entry_dma, GFP_KERNEL);
2893 if (!fw_ddb_entry) {
2894 DEBUG2(ql4_printk(KERN_ERR, ha,
2895 "%s: Unable to allocate dma buffer.\n",
2896 __func__));
2897 ret = QLA_ERROR;
2898 return ret;
2899 }
2900
2901 if (qla4xxx_bootdb_by_index(ha, fw_ddb_entry,
2902 fw_ddb_entry_dma, ddb_index)) {
2903 DEBUG2(ql4_printk(KERN_ERR, ha,
2904 "%s: Flash DDB read Failed\n", __func__));
2905 ret = QLA_ERROR;
2906 goto exit_boot_target;
2907 }
2908
2909 /* Update target name and IP from DDB */
2910 memcpy(boot_sess->target_name, fw_ddb_entry->iscsi_name,
2911 min(sizeof(boot_sess->target_name),
2912 sizeof(fw_ddb_entry->iscsi_name)));
2913
2914 options = le16_to_cpu(fw_ddb_entry->options);
2915 if (options & DDB_OPT_IPV6_DEVICE) {
2916 memcpy(&boot_conn->dest_ipaddr.ip_address,
2917 &fw_ddb_entry->ip_addr[0], IPv6_ADDR_LEN);
2918 } else {
2919 boot_conn->dest_ipaddr.ip_type = 0x1;
2920 memcpy(&boot_conn->dest_ipaddr.ip_address,
2921 &fw_ddb_entry->ip_addr[0], IP_ADDR_LEN);
2922 }
2923
2924 boot_conn->dest_port = le16_to_cpu(fw_ddb_entry->port);
2925
2926 /* update chap information */
2927 idx = __le16_to_cpu(fw_ddb_entry->chap_tbl_idx);
2928
2929 if (BIT_7 & le16_to_cpu(fw_ddb_entry->iscsi_options)) {
2930
2931 DEBUG2(ql4_printk(KERN_INFO, ha, "Setting chap\n"));
2932
2933 ret = qla4xxx_get_chap(ha, (char *)&boot_conn->chap.
2934 target_chap_name,
2935 (char *)&boot_conn->chap.target_secret,
2936 idx);
2937 if (ret) {
2938 ql4_printk(KERN_ERR, ha, "Failed to set chap\n");
2939 ret = QLA_ERROR;
2940 goto exit_boot_target;
2941 }
2942
2943 boot_conn->chap.target_chap_name_length = QL4_CHAP_MAX_NAME_LEN;
2944 boot_conn->chap.target_secret_length = QL4_CHAP_MAX_SECRET_LEN;
2945 }
2946
2947 if (BIT_4 & le16_to_cpu(fw_ddb_entry->iscsi_options)) {
2948
2949 DEBUG2(ql4_printk(KERN_INFO, ha, "Setting BIDI chap\n"));
2950
2951 ret = qla4xxx_get_chap(ha, (char *)&boot_conn->chap.
2952 intr_chap_name,
2953 (char *)&boot_conn->chap.intr_secret,
2954 (idx + 1));
2955 if (ret) {
2956 ql4_printk(KERN_ERR, ha, "Failed to set BIDI chap\n");
2957 ret = QLA_ERROR;
2958 goto exit_boot_target;
2959 }
2960
2961 boot_conn->chap.intr_chap_name_length = QL4_CHAP_MAX_NAME_LEN;
2962 boot_conn->chap.intr_secret_length = QL4_CHAP_MAX_SECRET_LEN;
2963 }
2964
2965exit_boot_target:
2966 dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
2967 fw_ddb_entry, fw_ddb_entry_dma);
2968 return ret;
2969}
2970
2971static int qla4xxx_get_boot_info(struct scsi_qla_host *ha)
2972{
2973 uint16_t ddb_index[2];
8de5b958
LC
2974 int ret = QLA_ERROR;
2975 int rval;
2a991c21
MR
2976
2977 memset(ddb_index, 0, sizeof(ddb_index));
8de5b958
LC
2978 ddb_index[0] = 0xffff;
2979 ddb_index[1] = 0xffff;
2a991c21
MR
2980 ret = get_fw_boot_info(ha, ddb_index);
2981 if (ret != QLA_SUCCESS) {
2982 DEBUG2(ql4_printk(KERN_ERR, ha,
2983 "%s: Failed to set boot info.\n", __func__));
2984 return ret;
2985 }
2986
8de5b958
LC
2987 if (ddb_index[0] == 0xffff)
2988 goto sec_target;
2989
2990 rval = qla4xxx_get_boot_target(ha, &(ha->boot_tgt.boot_pri_sess),
2a991c21 2991 ddb_index[0]);
8de5b958 2992 if (rval != QLA_SUCCESS) {
2a991c21
MR
2993 DEBUG2(ql4_printk(KERN_ERR, ha, "%s: Failed to get "
2994 "primary target\n", __func__));
8de5b958
LC
2995 } else
2996 ret = QLA_SUCCESS;
2a991c21 2997
8de5b958
LC
2998sec_target:
2999 if (ddb_index[1] == 0xffff)
3000 goto exit_get_boot_info;
3001
3002 rval = qla4xxx_get_boot_target(ha, &(ha->boot_tgt.boot_sec_sess),
2a991c21 3003 ddb_index[1]);
8de5b958 3004 if (rval != QLA_SUCCESS) {
2a991c21
MR
3005 DEBUG2(ql4_printk(KERN_ERR, ha, "%s: Failed to get "
3006 "secondary target\n", __func__));
8de5b958
LC
3007 } else
3008 ret = QLA_SUCCESS;
3009
3010exit_get_boot_info:
2a991c21
MR
3011 return ret;
3012}
3013
3014static int qla4xxx_setup_boot_info(struct scsi_qla_host *ha)
3015{
3016 struct iscsi_boot_kobj *boot_kobj;
3017
3018 if (qla4xxx_get_boot_info(ha) != QLA_SUCCESS)
3019 return 0;
3020
3021 ha->boot_kset = iscsi_boot_create_host_kset(ha->host->host_no);
3022 if (!ha->boot_kset)
3023 goto kset_free;
3024
3025 if (!scsi_host_get(ha->host))
3026 goto kset_free;
3027 boot_kobj = iscsi_boot_create_target(ha->boot_kset, 0, ha,
3028 qla4xxx_show_boot_tgt_pri_info,
3029 qla4xxx_tgt_get_attr_visibility,
3030 qla4xxx_boot_release);
3031 if (!boot_kobj)
3032 goto put_host;
3033
3034 if (!scsi_host_get(ha->host))
3035 goto kset_free;
3036 boot_kobj = iscsi_boot_create_target(ha->boot_kset, 1, ha,
3037 qla4xxx_show_boot_tgt_sec_info,
3038 qla4xxx_tgt_get_attr_visibility,
3039 qla4xxx_boot_release);
3040 if (!boot_kobj)
3041 goto put_host;
3042
3043 if (!scsi_host_get(ha->host))
3044 goto kset_free;
3045 boot_kobj = iscsi_boot_create_initiator(ha->boot_kset, 0, ha,
3046 qla4xxx_show_boot_ini_info,
3047 qla4xxx_ini_get_attr_visibility,
3048 qla4xxx_boot_release);
3049 if (!boot_kobj)
3050 goto put_host;
3051
3052 if (!scsi_host_get(ha->host))
3053 goto kset_free;
3054 boot_kobj = iscsi_boot_create_ethernet(ha->boot_kset, 0, ha,
3055 qla4xxx_show_boot_eth_info,
3056 qla4xxx_eth_get_attr_visibility,
3057 qla4xxx_boot_release);
3058 if (!boot_kobj)
3059 goto put_host;
3060
3061 return 0;
3062
3063put_host:
3064 scsi_host_put(ha->host);
3065kset_free:
3066 iscsi_boot_destroy_kset(ha->boot_kset);
3067 return -ENOMEM;
3068}
3069
4549415a
LC
3070
3071/**
3072 * qla4xxx_create chap_list - Create CHAP list from FLASH
3073 * @ha: pointer to adapter structure
3074 *
3075 * Read flash and make a list of CHAP entries, during login when a CHAP entry
3076 * is received, it will be checked in this list. If entry exist then the CHAP
3077 * entry index is set in the DDB. If CHAP entry does not exist in this list
3078 * then a new entry is added in FLASH in CHAP table and the index obtained is
3079 * used in the DDB.
3080 **/
3081static void qla4xxx_create_chap_list(struct scsi_qla_host *ha)
3082{
3083 int rval = 0;
3084 uint8_t *chap_flash_data = NULL;
3085 uint32_t offset;
3086 dma_addr_t chap_dma;
3087 uint32_t chap_size = 0;
3088
3089 if (is_qla40XX(ha))
3090 chap_size = MAX_CHAP_ENTRIES_40XX *
3091 sizeof(struct ql4_chap_table);
3092 else /* Single region contains CHAP info for both
3093 * ports which is divided into half for each port.
3094 */
3095 chap_size = ha->hw.flt_chap_size / 2;
3096
3097 chap_flash_data = dma_alloc_coherent(&ha->pdev->dev, chap_size,
3098 &chap_dma, GFP_KERNEL);
3099 if (!chap_flash_data) {
3100 ql4_printk(KERN_ERR, ha, "No memory for chap_flash_data\n");
3101 return;
3102 }
3103 if (is_qla40XX(ha))
3104 offset = FLASH_CHAP_OFFSET;
3105 else {
3106 offset = FLASH_RAW_ACCESS_ADDR + (ha->hw.flt_region_chap << 2);
3107 if (ha->port_num == 1)
3108 offset += chap_size;
3109 }
3110
3111 rval = qla4xxx_get_flash(ha, chap_dma, offset, chap_size);
3112 if (rval != QLA_SUCCESS)
3113 goto exit_chap_list;
3114
3115 if (ha->chap_list == NULL)
3116 ha->chap_list = vmalloc(chap_size);
3117 if (ha->chap_list == NULL) {
3118 ql4_printk(KERN_ERR, ha, "No memory for ha->chap_list\n");
3119 goto exit_chap_list;
3120 }
3121
3122 memcpy(ha->chap_list, chap_flash_data, chap_size);
3123
3124exit_chap_list:
3125 dma_free_coherent(&ha->pdev->dev, chap_size,
3126 chap_flash_data, chap_dma);
3127 return;
3128}
3129
afaf5a2d
DS
3130/**
3131 * qla4xxx_probe_adapter - callback function to probe HBA
3132 * @pdev: pointer to pci_dev structure
3133 * @pci_device_id: pointer to pci_device entry
3134 *
3135 * This routine will probe for Qlogic 4xxx iSCSI host adapters.
3136 * It returns zero if successful. It also initializes all data necessary for
3137 * the driver.
3138 **/
3139static int __devinit qla4xxx_probe_adapter(struct pci_dev *pdev,
3140 const struct pci_device_id *ent)
3141{
3142 int ret = -ENODEV, status;
3143 struct Scsi_Host *host;
3144 struct scsi_qla_host *ha;
afaf5a2d
DS
3145 uint8_t init_retry_count = 0;
3146 char buf[34];
f4f5df23 3147 struct qla4_8xxx_legacy_intr_set *nx_legacy_intr;
f9880e76 3148 uint32_t dev_state;
afaf5a2d
DS
3149
3150 if (pci_enable_device(pdev))
3151 return -1;
3152
b3a271a9 3153 host = iscsi_host_alloc(&qla4xxx_driver_template, sizeof(*ha), 0);
afaf5a2d
DS
3154 if (host == NULL) {
3155 printk(KERN_WARNING
3156 "qla4xxx: Couldn't allocate host from scsi layer!\n");
3157 goto probe_disable_device;
3158 }
3159
3160 /* Clear our data area */
b3a271a9 3161 ha = to_qla_host(host);
afaf5a2d
DS
3162 memset(ha, 0, sizeof(*ha));
3163
3164 /* Save the information from PCI BIOS. */
3165 ha->pdev = pdev;
3166 ha->host = host;
3167 ha->host_no = host->host_no;
3168
2232be0d
LC
3169 pci_enable_pcie_error_reporting(pdev);
3170
f4f5df23
VC
3171 /* Setup Runtime configurable options */
3172 if (is_qla8022(ha)) {
3173 ha->isp_ops = &qla4_8xxx_isp_ops;
3174 rwlock_init(&ha->hw_lock);
3175 ha->qdr_sn_window = -1;
3176 ha->ddr_mn_window = -1;
3177 ha->curr_window = 255;
3178 ha->func_num = PCI_FUNC(ha->pdev->devfn);
3179 nx_legacy_intr = &legacy_intr[ha->func_num];
3180 ha->nx_legacy_intr.int_vec_bit = nx_legacy_intr->int_vec_bit;
3181 ha->nx_legacy_intr.tgt_status_reg =
3182 nx_legacy_intr->tgt_status_reg;
3183 ha->nx_legacy_intr.tgt_mask_reg = nx_legacy_intr->tgt_mask_reg;
3184 ha->nx_legacy_intr.pci_int_reg = nx_legacy_intr->pci_int_reg;
3185 } else {
3186 ha->isp_ops = &qla4xxx_isp_ops;
3187 }
3188
2232be0d
LC
3189 /* Set EEH reset type to fundamental if required by hba */
3190 if (is_qla8022(ha))
3191 pdev->needs_freset = 1;
3192
afaf5a2d 3193 /* Configure PCI I/O space. */
f4f5df23 3194 ret = ha->isp_ops->iospace_config(ha);
afaf5a2d 3195 if (ret)
f4f5df23 3196 goto probe_failed_ioconfig;
afaf5a2d 3197
c2660df3 3198 ql4_printk(KERN_INFO, ha, "Found an ISP%04x, irq %d, iobase 0x%p\n",
afaf5a2d
DS
3199 pdev->device, pdev->irq, ha->reg);
3200
3201 qla4xxx_config_dma_addressing(ha);
3202
3203 /* Initialize lists and spinlocks. */
afaf5a2d
DS
3204 INIT_LIST_HEAD(&ha->free_srb_q);
3205
3206 mutex_init(&ha->mbox_sem);
4549415a 3207 mutex_init(&ha->chap_sem);
f4f5df23 3208 init_completion(&ha->mbx_intr_comp);
95d31262 3209 init_completion(&ha->disable_acb_comp);
afaf5a2d
DS
3210
3211 spin_lock_init(&ha->hardware_lock);
afaf5a2d
DS
3212
3213 /* Allocate dma buffers */
3214 if (qla4xxx_mem_alloc(ha)) {
c2660df3
VC
3215 ql4_printk(KERN_WARNING, ha,
3216 "[ERROR] Failed to allocate memory for adapter\n");
afaf5a2d
DS
3217
3218 ret = -ENOMEM;
3219 goto probe_failed;
3220 }
3221
b3a271a9
MR
3222 host->cmd_per_lun = 3;
3223 host->max_channel = 0;
3224 host->max_lun = MAX_LUNS - 1;
3225 host->max_id = MAX_TARGETS;
3226 host->max_cmd_len = IOCB_MAX_CDB_LEN;
3227 host->can_queue = MAX_SRBS ;
3228 host->transportt = qla4xxx_scsi_transport;
3229
3230 ret = scsi_init_shared_tag_map(host, MAX_SRBS);
3231 if (ret) {
3232 ql4_printk(KERN_WARNING, ha,
3233 "%s: scsi_init_shared_tag_map failed\n", __func__);
3234 goto probe_failed;
3235 }
3236
3237 pci_set_drvdata(pdev, ha);
3238
3239 ret = scsi_add_host(host, &pdev->dev);
3240 if (ret)
3241 goto probe_failed;
3242
f4f5df23
VC
3243 if (is_qla8022(ha))
3244 (void) qla4_8xxx_get_flash_info(ha);
3245
afaf5a2d
DS
3246 /*
3247 * Initialize the Host adapter request/response queues and
3248 * firmware
3249 * NOTE: interrupts enabled upon successful completion
3250 */
0e7e8501 3251 status = qla4xxx_initialize_adapter(ha);
f4f5df23
VC
3252 while ((!test_bit(AF_ONLINE, &ha->flags)) &&
3253 init_retry_count++ < MAX_INIT_RETRIES) {
f9880e76
PM
3254
3255 if (is_qla8022(ha)) {
3256 qla4_8xxx_idc_lock(ha);
3257 dev_state = qla4_8xxx_rd_32(ha, QLA82XX_CRB_DEV_STATE);
3258 qla4_8xxx_idc_unlock(ha);
3259 if (dev_state == QLA82XX_DEV_FAILED) {
3260 ql4_printk(KERN_WARNING, ha, "%s: don't retry "
3261 "initialize adapter. H/W is in failed state\n",
3262 __func__);
3263 break;
3264 }
3265 }
afaf5a2d
DS
3266 DEBUG2(printk("scsi: %s: retrying adapter initialization "
3267 "(%d)\n", __func__, init_retry_count));
f4f5df23
VC
3268
3269 if (ha->isp_ops->reset_chip(ha) == QLA_ERROR)
3270 continue;
3271
0e7e8501 3272 status = qla4xxx_initialize_adapter(ha);
afaf5a2d 3273 }
f4f5df23
VC
3274
3275 if (!test_bit(AF_ONLINE, &ha->flags)) {
c2660df3 3276 ql4_printk(KERN_WARNING, ha, "Failed to initialize adapter\n");
afaf5a2d 3277
fe998527
LC
3278 if (is_qla8022(ha) && ql4xdontresethba) {
3279 /* Put the device in failed state. */
3280 DEBUG2(printk(KERN_ERR "HW STATE: FAILED\n"));
3281 qla4_8xxx_idc_lock(ha);
3282 qla4_8xxx_wr_32(ha, QLA82XX_CRB_DEV_STATE,
3283 QLA82XX_DEV_FAILED);
3284 qla4_8xxx_idc_unlock(ha);
3285 }
afaf5a2d 3286 ret = -ENODEV;
b3a271a9 3287 goto remove_host;
afaf5a2d
DS
3288 }
3289
afaf5a2d
DS
3290 /* Startup the kernel thread for this host adapter. */
3291 DEBUG2(printk("scsi: %s: Starting kernel thread for "
3292 "qla4xxx_dpc\n", __func__));
3293 sprintf(buf, "qla4xxx_%lu_dpc", ha->host_no);
3294 ha->dpc_thread = create_singlethread_workqueue(buf);
3295 if (!ha->dpc_thread) {
c2660df3 3296 ql4_printk(KERN_WARNING, ha, "Unable to start DPC thread!\n");
afaf5a2d 3297 ret = -ENODEV;
b3a271a9 3298 goto remove_host;
afaf5a2d 3299 }
c4028958 3300 INIT_WORK(&ha->dpc_work, qla4xxx_do_dpc);
afaf5a2d 3301
b3a271a9
MR
3302 sprintf(buf, "qla4xxx_%lu_task", ha->host_no);
3303 ha->task_wq = alloc_workqueue(buf, WQ_MEM_RECLAIM, 1);
3304 if (!ha->task_wq) {
3305 ql4_printk(KERN_WARNING, ha, "Unable to start task thread!\n");
3306 ret = -ENODEV;
3307 goto remove_host;
3308 }
3309
f4f5df23
VC
3310 /* For ISP-82XX, request_irqs is called in qla4_8xxx_load_risc
3311 * (which is called indirectly by qla4xxx_initialize_adapter),
3312 * so that irqs will be registered after crbinit but before
3313 * mbx_intr_enable.
3314 */
3315 if (!is_qla8022(ha)) {
3316 ret = qla4xxx_request_irqs(ha);
3317 if (ret) {
3318 ql4_printk(KERN_WARNING, ha, "Failed to reserve "
3319 "interrupt %d already in use.\n", pdev->irq);
b3a271a9 3320 goto remove_host;
f4f5df23 3321 }
afaf5a2d 3322 }
afaf5a2d 3323
2232be0d 3324 pci_save_state(ha->pdev);
f4f5df23 3325 ha->isp_ops->enable_intrs(ha);
afaf5a2d
DS
3326
3327 /* Start timer thread. */
3328 qla4xxx_start_timer(ha, qla4xxx_timer, 1);
3329
3330 set_bit(AF_INIT_DONE, &ha->flags);
3331
afaf5a2d
DS
3332 printk(KERN_INFO
3333 " QLogic iSCSI HBA Driver version: %s\n"
3334 " QLogic ISP%04x @ %s, host#=%ld, fw=%02d.%02d.%02d.%02d\n",
3335 qla4xxx_version_str, ha->pdev->device, pci_name(ha->pdev),
3336 ha->host_no, ha->firmware_version[0], ha->firmware_version[1],
3337 ha->patch_number, ha->build_number);
ed1086e0 3338
4549415a
LC
3339 qla4xxx_create_chap_list(ha);
3340
2a991c21
MR
3341 if (qla4xxx_setup_boot_info(ha))
3342 ql4_printk(KERN_ERR, ha, "%s:ISCSI boot info setup failed\n",
3343 __func__);
3344
ed1086e0 3345 qla4xxx_create_ifaces(ha);
afaf5a2d
DS
3346 return 0;
3347
b3a271a9
MR
3348remove_host:
3349 scsi_remove_host(ha->host);
3350
afaf5a2d
DS
3351probe_failed:
3352 qla4xxx_free_adapter(ha);
f4f5df23
VC
3353
3354probe_failed_ioconfig:
2232be0d 3355 pci_disable_pcie_error_reporting(pdev);
afaf5a2d
DS
3356 scsi_host_put(ha->host);
3357
3358probe_disable_device:
3359 pci_disable_device(pdev);
3360
3361 return ret;
3362}
3363
7eece5a0
KH
3364/**
3365 * qla4xxx_prevent_other_port_reinit - prevent other port from re-initialize
3366 * @ha: pointer to adapter structure
3367 *
3368 * Mark the other ISP-4xxx port to indicate that the driver is being removed,
3369 * so that the other port will not re-initialize while in the process of
3370 * removing the ha due to driver unload or hba hotplug.
3371 **/
3372static void qla4xxx_prevent_other_port_reinit(struct scsi_qla_host *ha)
3373{
3374 struct scsi_qla_host *other_ha = NULL;
3375 struct pci_dev *other_pdev = NULL;
3376 int fn = ISP4XXX_PCI_FN_2;
3377
3378 /*iscsi function numbers for ISP4xxx is 1 and 3*/
3379 if (PCI_FUNC(ha->pdev->devfn) & BIT_1)
3380 fn = ISP4XXX_PCI_FN_1;
3381
3382 other_pdev =
3383 pci_get_domain_bus_and_slot(pci_domain_nr(ha->pdev->bus),
3384 ha->pdev->bus->number, PCI_DEVFN(PCI_SLOT(ha->pdev->devfn),
3385 fn));
3386
3387 /* Get other_ha if other_pdev is valid and state is enable*/
3388 if (other_pdev) {
3389 if (atomic_read(&other_pdev->enable_cnt)) {
3390 other_ha = pci_get_drvdata(other_pdev);
3391 if (other_ha) {
3392 set_bit(AF_HA_REMOVAL, &other_ha->flags);
3393 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: "
3394 "Prevent %s reinit\n", __func__,
3395 dev_name(&other_ha->pdev->dev)));
3396 }
3397 }
3398 pci_dev_put(other_pdev);
3399 }
3400}
3401
afaf5a2d
DS
3402/**
3403 * qla4xxx_remove_adapter - calback function to remove adapter.
3404 * @pci_dev: PCI device pointer
3405 **/
3406static void __devexit qla4xxx_remove_adapter(struct pci_dev *pdev)
3407{
3408 struct scsi_qla_host *ha;
3409
3410 ha = pci_get_drvdata(pdev);
3411
7eece5a0
KH
3412 if (!is_qla8022(ha))
3413 qla4xxx_prevent_other_port_reinit(ha);
bee4fe8e 3414
ed1086e0
VC
3415 /* destroy iface from sysfs */
3416 qla4xxx_destroy_ifaces(ha);
3417
2a991c21
MR
3418 if (ha->boot_kset)
3419 iscsi_boot_destroy_kset(ha->boot_kset);
3420
afaf5a2d
DS
3421 scsi_remove_host(ha->host);
3422
3423 qla4xxx_free_adapter(ha);
3424
3425 scsi_host_put(ha->host);
3426
2232be0d 3427 pci_disable_pcie_error_reporting(pdev);
f4f5df23 3428 pci_disable_device(pdev);
afaf5a2d
DS
3429 pci_set_drvdata(pdev, NULL);
3430}
3431
3432/**
3433 * qla4xxx_config_dma_addressing() - Configure OS DMA addressing method.
3434 * @ha: HA context
3435 *
3436 * At exit, the @ha's flags.enable_64bit_addressing set to indicated
3437 * supported addressing method.
3438 */
47975477 3439static void qla4xxx_config_dma_addressing(struct scsi_qla_host *ha)
afaf5a2d
DS
3440{
3441 int retval;
3442
3443 /* Update our PCI device dma_mask for full 64 bit mask */
6a35528a
YH
3444 if (pci_set_dma_mask(ha->pdev, DMA_BIT_MASK(64)) == 0) {
3445 if (pci_set_consistent_dma_mask(ha->pdev, DMA_BIT_MASK(64))) {
afaf5a2d
DS
3446 dev_dbg(&ha->pdev->dev,
3447 "Failed to set 64 bit PCI consistent mask; "
3448 "using 32 bit.\n");
3449 retval = pci_set_consistent_dma_mask(ha->pdev,
284901a9 3450 DMA_BIT_MASK(32));
afaf5a2d
DS
3451 }
3452 } else
284901a9 3453 retval = pci_set_dma_mask(ha->pdev, DMA_BIT_MASK(32));
afaf5a2d
DS
3454}
3455
3456static int qla4xxx_slave_alloc(struct scsi_device *sdev)
3457{
b3a271a9
MR
3458 struct iscsi_cls_session *cls_sess;
3459 struct iscsi_session *sess;
3460 struct ddb_entry *ddb;
8bb4033d 3461 int queue_depth = QL4_DEF_QDEPTH;
afaf5a2d 3462
b3a271a9
MR
3463 cls_sess = starget_to_session(sdev->sdev_target);
3464 sess = cls_sess->dd_data;
3465 ddb = sess->dd_data;
3466
afaf5a2d
DS
3467 sdev->hostdata = ddb;
3468 sdev->tagged_supported = 1;
8bb4033d
VC
3469
3470 if (ql4xmaxqdepth != 0 && ql4xmaxqdepth <= 0xffffU)
3471 queue_depth = ql4xmaxqdepth;
3472
3473 scsi_activate_tcq(sdev, queue_depth);
afaf5a2d
DS
3474 return 0;
3475}
3476
3477static int qla4xxx_slave_configure(struct scsi_device *sdev)
3478{
3479 sdev->tagged_supported = 1;
3480 return 0;
3481}
3482
3483static void qla4xxx_slave_destroy(struct scsi_device *sdev)
3484{
3485 scsi_deactivate_tcq(sdev, 1);
3486}
3487
3488/**
3489 * qla4xxx_del_from_active_array - returns an active srb
3490 * @ha: Pointer to host adapter structure.
fd589a8f 3491 * @index: index into the active_array
afaf5a2d
DS
3492 *
3493 * This routine removes and returns the srb at the specified index
3494 **/
f4f5df23
VC
3495struct srb *qla4xxx_del_from_active_array(struct scsi_qla_host *ha,
3496 uint32_t index)
afaf5a2d
DS
3497{
3498 struct srb *srb = NULL;
5369887a 3499 struct scsi_cmnd *cmd = NULL;
afaf5a2d 3500
5369887a
VC
3501 cmd = scsi_host_find_tag(ha->host, index);
3502 if (!cmd)
afaf5a2d
DS
3503 return srb;
3504
5369887a
VC
3505 srb = (struct srb *)CMD_SP(cmd);
3506 if (!srb)
afaf5a2d
DS
3507 return srb;
3508
3509 /* update counters */
3510 if (srb->flags & SRB_DMA_VALID) {
3511 ha->req_q_count += srb->iocb_cnt;
3512 ha->iocb_cnt -= srb->iocb_cnt;
3513 if (srb->cmd)
5369887a
VC
3514 srb->cmd->host_scribble =
3515 (unsigned char *)(unsigned long) MAX_SRBS;
afaf5a2d
DS
3516 }
3517 return srb;
3518}
3519
afaf5a2d
DS
3520/**
3521 * qla4xxx_eh_wait_on_command - waits for command to be returned by firmware
09a0f719 3522 * @ha: Pointer to host adapter structure.
afaf5a2d
DS
3523 * @cmd: Scsi Command to wait on.
3524 *
3525 * This routine waits for the command to be returned by the Firmware
3526 * for some max time.
3527 **/
3528static int qla4xxx_eh_wait_on_command(struct scsi_qla_host *ha,
3529 struct scsi_cmnd *cmd)
3530{
3531 int done = 0;
3532 struct srb *rp;
3533 uint32_t max_wait_time = EH_WAIT_CMD_TOV;
2232be0d
LC
3534 int ret = SUCCESS;
3535
3536 /* Dont wait on command if PCI error is being handled
3537 * by PCI AER driver
3538 */
3539 if (unlikely(pci_channel_offline(ha->pdev)) ||
3540 (test_bit(AF_EEH_BUSY, &ha->flags))) {
3541 ql4_printk(KERN_WARNING, ha, "scsi%ld: Return from %s\n",
3542 ha->host_no, __func__);
3543 return ret;
3544 }
afaf5a2d
DS
3545
3546 do {
3547 /* Checking to see if its returned to OS */
5369887a 3548 rp = (struct srb *) CMD_SP(cmd);
afaf5a2d
DS
3549 if (rp == NULL) {
3550 done++;
3551 break;
3552 }
3553
3554 msleep(2000);
3555 } while (max_wait_time--);
3556
3557 return done;
3558}
3559
3560/**
3561 * qla4xxx_wait_for_hba_online - waits for HBA to come online
3562 * @ha: Pointer to host adapter structure
3563 **/
3564static int qla4xxx_wait_for_hba_online(struct scsi_qla_host *ha)
3565{
3566 unsigned long wait_online;
3567
f581a3f7 3568 wait_online = jiffies + (HBA_ONLINE_TOV * HZ);
afaf5a2d
DS
3569 while (time_before(jiffies, wait_online)) {
3570
3571 if (adapter_up(ha))
3572 return QLA_SUCCESS;
afaf5a2d
DS
3573
3574 msleep(2000);
3575 }
3576
3577 return QLA_ERROR;
3578}
3579
3580/**
ce545039 3581 * qla4xxx_eh_wait_for_commands - wait for active cmds to finish.
fd589a8f 3582 * @ha: pointer to HBA
afaf5a2d
DS
3583 * @t: target id
3584 * @l: lun id
3585 *
3586 * This function waits for all outstanding commands to a lun to complete. It
3587 * returns 0 if all pending commands are returned and 1 otherwise.
3588 **/
ce545039
MC
3589static int qla4xxx_eh_wait_for_commands(struct scsi_qla_host *ha,
3590 struct scsi_target *stgt,
3591 struct scsi_device *sdev)
afaf5a2d
DS
3592{
3593 int cnt;
3594 int status = 0;
3595 struct scsi_cmnd *cmd;
3596
3597 /*
ce545039
MC
3598 * Waiting for all commands for the designated target or dev
3599 * in the active array
afaf5a2d
DS
3600 */
3601 for (cnt = 0; cnt < ha->host->can_queue; cnt++) {
3602 cmd = scsi_host_find_tag(ha->host, cnt);
ce545039
MC
3603 if (cmd && stgt == scsi_target(cmd->device) &&
3604 (!sdev || sdev == cmd->device)) {
afaf5a2d
DS
3605 if (!qla4xxx_eh_wait_on_command(ha, cmd)) {
3606 status++;
3607 break;
3608 }
3609 }
3610 }
3611 return status;
3612}
3613
09a0f719
VC
3614/**
3615 * qla4xxx_eh_abort - callback for abort task.
3616 * @cmd: Pointer to Linux's SCSI command structure
3617 *
3618 * This routine is called by the Linux OS to abort the specified
3619 * command.
3620 **/
3621static int qla4xxx_eh_abort(struct scsi_cmnd *cmd)
3622{
3623 struct scsi_qla_host *ha = to_qla_host(cmd->device->host);
3624 unsigned int id = cmd->device->id;
3625 unsigned int lun = cmd->device->lun;
92b3e5bb 3626 unsigned long flags;
09a0f719
VC
3627 struct srb *srb = NULL;
3628 int ret = SUCCESS;
3629 int wait = 0;
3630
c2660df3 3631 ql4_printk(KERN_INFO, ha,
5cd049a5
CH
3632 "scsi%ld:%d:%d: Abort command issued cmd=%p\n",
3633 ha->host_no, id, lun, cmd);
09a0f719 3634
92b3e5bb 3635 spin_lock_irqsave(&ha->hardware_lock, flags);
09a0f719 3636 srb = (struct srb *) CMD_SP(cmd);
92b3e5bb
MC
3637 if (!srb) {
3638 spin_unlock_irqrestore(&ha->hardware_lock, flags);
09a0f719 3639 return SUCCESS;
92b3e5bb 3640 }
09a0f719 3641 kref_get(&srb->srb_ref);
92b3e5bb 3642 spin_unlock_irqrestore(&ha->hardware_lock, flags);
09a0f719
VC
3643
3644 if (qla4xxx_abort_task(ha, srb) != QLA_SUCCESS) {
3645 DEBUG3(printk("scsi%ld:%d:%d: Abort_task mbx failed.\n",
3646 ha->host_no, id, lun));
3647 ret = FAILED;
3648 } else {
3649 DEBUG3(printk("scsi%ld:%d:%d: Abort_task mbx success.\n",
3650 ha->host_no, id, lun));
3651 wait = 1;
3652 }
3653
3654 kref_put(&srb->srb_ref, qla4xxx_srb_compl);
3655
3656 /* Wait for command to complete */
3657 if (wait) {
3658 if (!qla4xxx_eh_wait_on_command(ha, cmd)) {
3659 DEBUG2(printk("scsi%ld:%d:%d: Abort handler timed out\n",
3660 ha->host_no, id, lun));
3661 ret = FAILED;
3662 }
3663 }
3664
c2660df3 3665 ql4_printk(KERN_INFO, ha,
09a0f719 3666 "scsi%ld:%d:%d: Abort command - %s\n",
25985edc 3667 ha->host_no, id, lun, (ret == SUCCESS) ? "succeeded" : "failed");
09a0f719
VC
3668
3669 return ret;
3670}
3671
afaf5a2d
DS
3672/**
3673 * qla4xxx_eh_device_reset - callback for target reset.
3674 * @cmd: Pointer to Linux's SCSI command structure
3675 *
3676 * This routine is called by the Linux OS to reset all luns on the
3677 * specified target.
3678 **/
3679static int qla4xxx_eh_device_reset(struct scsi_cmnd *cmd)
3680{
3681 struct scsi_qla_host *ha = to_qla_host(cmd->device->host);
3682 struct ddb_entry *ddb_entry = cmd->device->hostdata;
afaf5a2d
DS
3683 int ret = FAILED, stat;
3684
612f7348 3685 if (!ddb_entry)
afaf5a2d
DS
3686 return ret;
3687
c01be6dc
MC
3688 ret = iscsi_block_scsi_eh(cmd);
3689 if (ret)
3690 return ret;
3691 ret = FAILED;
3692
c2660df3 3693 ql4_printk(KERN_INFO, ha,
afaf5a2d
DS
3694 "scsi%ld:%d:%d:%d: DEVICE RESET ISSUED.\n", ha->host_no,
3695 cmd->device->channel, cmd->device->id, cmd->device->lun);
3696
3697 DEBUG2(printk(KERN_INFO
3698 "scsi%ld: DEVICE_RESET cmd=%p jiffies = 0x%lx, to=%x,"
3699 "dpc_flags=%lx, status=%x allowed=%d\n", ha->host_no,
242f9dcb 3700 cmd, jiffies, cmd->request->timeout / HZ,
afaf5a2d
DS
3701 ha->dpc_flags, cmd->result, cmd->allowed));
3702
3703 /* FIXME: wait for hba to go online */
3704 stat = qla4xxx_reset_lun(ha, ddb_entry, cmd->device->lun);
3705 if (stat != QLA_SUCCESS) {
c2660df3 3706 ql4_printk(KERN_INFO, ha, "DEVICE RESET FAILED. %d\n", stat);
afaf5a2d
DS
3707 goto eh_dev_reset_done;
3708 }
3709
ce545039
MC
3710 if (qla4xxx_eh_wait_for_commands(ha, scsi_target(cmd->device),
3711 cmd->device)) {
c2660df3 3712 ql4_printk(KERN_INFO, ha,
ce545039
MC
3713 "DEVICE RESET FAILED - waiting for "
3714 "commands.\n");
3715 goto eh_dev_reset_done;
afaf5a2d
DS
3716 }
3717
9d562913
DS
3718 /* Send marker. */
3719 if (qla4xxx_send_marker_iocb(ha, ddb_entry, cmd->device->lun,
3720 MM_LUN_RESET) != QLA_SUCCESS)
3721 goto eh_dev_reset_done;
3722
c2660df3 3723 ql4_printk(KERN_INFO, ha,
afaf5a2d
DS
3724 "scsi(%ld:%d:%d:%d): DEVICE RESET SUCCEEDED.\n",
3725 ha->host_no, cmd->device->channel, cmd->device->id,
3726 cmd->device->lun);
3727
3728 ret = SUCCESS;
3729
3730eh_dev_reset_done:
3731
3732 return ret;
3733}
3734
ce545039
MC
3735/**
3736 * qla4xxx_eh_target_reset - callback for target reset.
3737 * @cmd: Pointer to Linux's SCSI command structure
3738 *
3739 * This routine is called by the Linux OS to reset the target.
3740 **/
3741static int qla4xxx_eh_target_reset(struct scsi_cmnd *cmd)
3742{
3743 struct scsi_qla_host *ha = to_qla_host(cmd->device->host);
3744 struct ddb_entry *ddb_entry = cmd->device->hostdata;
c01be6dc 3745 int stat, ret;
ce545039
MC
3746
3747 if (!ddb_entry)
3748 return FAILED;
3749
c01be6dc
MC
3750 ret = iscsi_block_scsi_eh(cmd);
3751 if (ret)
3752 return ret;
3753
ce545039
MC
3754 starget_printk(KERN_INFO, scsi_target(cmd->device),
3755 "WARM TARGET RESET ISSUED.\n");
3756
3757 DEBUG2(printk(KERN_INFO
3758 "scsi%ld: TARGET_DEVICE_RESET cmd=%p jiffies = 0x%lx, "
3759 "to=%x,dpc_flags=%lx, status=%x allowed=%d\n",
242f9dcb 3760 ha->host_no, cmd, jiffies, cmd->request->timeout / HZ,
ce545039
MC
3761 ha->dpc_flags, cmd->result, cmd->allowed));
3762
3763 stat = qla4xxx_reset_target(ha, ddb_entry);
3764 if (stat != QLA_SUCCESS) {
3765 starget_printk(KERN_INFO, scsi_target(cmd->device),
3766 "WARM TARGET RESET FAILED.\n");
3767 return FAILED;
3768 }
3769
ce545039
MC
3770 if (qla4xxx_eh_wait_for_commands(ha, scsi_target(cmd->device),
3771 NULL)) {
3772 starget_printk(KERN_INFO, scsi_target(cmd->device),
3773 "WARM TARGET DEVICE RESET FAILED - "
3774 "waiting for commands.\n");
3775 return FAILED;
3776 }
3777
9d562913
DS
3778 /* Send marker. */
3779 if (qla4xxx_send_marker_iocb(ha, ddb_entry, cmd->device->lun,
3780 MM_TGT_WARM_RESET) != QLA_SUCCESS) {
3781 starget_printk(KERN_INFO, scsi_target(cmd->device),
3782 "WARM TARGET DEVICE RESET FAILED - "
3783 "marker iocb failed.\n");
3784 return FAILED;
3785 }
3786
ce545039
MC
3787 starget_printk(KERN_INFO, scsi_target(cmd->device),
3788 "WARM TARGET RESET SUCCEEDED.\n");
3789 return SUCCESS;
3790}
3791
afaf5a2d
DS
3792/**
3793 * qla4xxx_eh_host_reset - kernel callback
3794 * @cmd: Pointer to Linux's SCSI command structure
3795 *
3796 * This routine is invoked by the Linux kernel to perform fatal error
3797 * recovery on the specified adapter.
3798 **/
3799static int qla4xxx_eh_host_reset(struct scsi_cmnd *cmd)
3800{
3801 int return_status = FAILED;
3802 struct scsi_qla_host *ha;
3803
b3a271a9 3804 ha = to_qla_host(cmd->device->host);
afaf5a2d 3805
f4f5df23
VC
3806 if (ql4xdontresethba) {
3807 DEBUG2(printk("scsi%ld: %s: Don't Reset HBA\n",
3808 ha->host_no, __func__));
3809 return FAILED;
3810 }
3811
c2660df3 3812 ql4_printk(KERN_INFO, ha,
dca05c4c 3813 "scsi(%ld:%d:%d:%d): HOST RESET ISSUED.\n", ha->host_no,
afaf5a2d
DS
3814 cmd->device->channel, cmd->device->id, cmd->device->lun);
3815
3816 if (qla4xxx_wait_for_hba_online(ha) != QLA_SUCCESS) {
3817 DEBUG2(printk("scsi%ld:%d: %s: Unable to reset host. Adapter "
3818 "DEAD.\n", ha->host_no, cmd->device->channel,
3819 __func__));
3820
3821 return FAILED;
3822 }
3823
f4f5df23
VC
3824 if (!test_bit(DPC_RESET_HA, &ha->dpc_flags)) {
3825 if (is_qla8022(ha))
3826 set_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags);
3827 else
3828 set_bit(DPC_RESET_HA, &ha->dpc_flags);
3829 }
50a29aec 3830
f4f5df23 3831 if (qla4xxx_recover_adapter(ha) == QLA_SUCCESS)
afaf5a2d 3832 return_status = SUCCESS;
afaf5a2d 3833
c2660df3 3834 ql4_printk(KERN_INFO, ha, "HOST RESET %s.\n",
25985edc 3835 return_status == FAILED ? "FAILED" : "SUCCEEDED");
afaf5a2d
DS
3836
3837 return return_status;
3838}
3839
95d31262
VC
3840static int qla4xxx_context_reset(struct scsi_qla_host *ha)
3841{
3842 uint32_t mbox_cmd[MBOX_REG_COUNT];
3843 uint32_t mbox_sts[MBOX_REG_COUNT];
3844 struct addr_ctrl_blk_def *acb = NULL;
3845 uint32_t acb_len = sizeof(struct addr_ctrl_blk_def);
3846 int rval = QLA_SUCCESS;
3847 dma_addr_t acb_dma;
3848
3849 acb = dma_alloc_coherent(&ha->pdev->dev,
3850 sizeof(struct addr_ctrl_blk_def),
3851 &acb_dma, GFP_KERNEL);
3852 if (!acb) {
3853 ql4_printk(KERN_ERR, ha, "%s: Unable to alloc acb\n",
3854 __func__);
3855 rval = -ENOMEM;
3856 goto exit_port_reset;
3857 }
3858
3859 memset(acb, 0, acb_len);
3860
3861 rval = qla4xxx_get_acb(ha, acb_dma, PRIMARI_ACB, acb_len);
3862 if (rval != QLA_SUCCESS) {
3863 rval = -EIO;
3864 goto exit_free_acb;
3865 }
3866
3867 rval = qla4xxx_disable_acb(ha);
3868 if (rval != QLA_SUCCESS) {
3869 rval = -EIO;
3870 goto exit_free_acb;
3871 }
3872
3873 wait_for_completion_timeout(&ha->disable_acb_comp,
3874 DISABLE_ACB_TOV * HZ);
3875
3876 rval = qla4xxx_set_acb(ha, &mbox_cmd[0], &mbox_sts[0], acb_dma);
3877 if (rval != QLA_SUCCESS) {
3878 rval = -EIO;
3879 goto exit_free_acb;
3880 }
3881
3882exit_free_acb:
3883 dma_free_coherent(&ha->pdev->dev, sizeof(struct addr_ctrl_blk_def),
3884 acb, acb_dma);
3885exit_port_reset:
3886 DEBUG2(ql4_printk(KERN_INFO, ha, "%s %s\n", __func__,
3887 rval == QLA_SUCCESS ? "SUCCEEDED" : "FAILED"));
3888 return rval;
3889}
3890
3891static int qla4xxx_host_reset(struct Scsi_Host *shost, int reset_type)
3892{
3893 struct scsi_qla_host *ha = to_qla_host(shost);
3894 int rval = QLA_SUCCESS;
3895
3896 if (ql4xdontresethba) {
3897 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: Don't Reset HBA\n",
3898 __func__));
3899 rval = -EPERM;
3900 goto exit_host_reset;
3901 }
3902
3903 rval = qla4xxx_wait_for_hba_online(ha);
3904 if (rval != QLA_SUCCESS) {
3905 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: Unable to reset host "
3906 "adapter\n", __func__));
3907 rval = -EIO;
3908 goto exit_host_reset;
3909 }
3910
3911 if (test_bit(DPC_RESET_HA, &ha->dpc_flags))
3912 goto recover_adapter;
3913
3914 switch (reset_type) {
3915 case SCSI_ADAPTER_RESET:
3916 set_bit(DPC_RESET_HA, &ha->dpc_flags);
3917 break;
3918 case SCSI_FIRMWARE_RESET:
3919 if (!test_bit(DPC_RESET_HA, &ha->dpc_flags)) {
3920 if (is_qla8022(ha))
3921 /* set firmware context reset */
3922 set_bit(DPC_RESET_HA_FW_CONTEXT,
3923 &ha->dpc_flags);
3924 else {
3925 rval = qla4xxx_context_reset(ha);
3926 goto exit_host_reset;
3927 }
3928 }
3929 break;
3930 }
3931
3932recover_adapter:
3933 rval = qla4xxx_recover_adapter(ha);
3934 if (rval != QLA_SUCCESS) {
3935 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: recover adapter fail\n",
3936 __func__));
3937 rval = -EIO;
3938 }
3939
3940exit_host_reset:
3941 return rval;
3942}
3943
2232be0d
LC
3944/* PCI AER driver recovers from all correctable errors w/o
3945 * driver intervention. For uncorrectable errors PCI AER
3946 * driver calls the following device driver's callbacks
3947 *
3948 * - Fatal Errors - link_reset
3949 * - Non-Fatal Errors - driver's pci_error_detected() which
3950 * returns CAN_RECOVER, NEED_RESET or DISCONNECT.
3951 *
3952 * PCI AER driver calls
3953 * CAN_RECOVER - driver's pci_mmio_enabled(), mmio_enabled
3954 * returns RECOVERED or NEED_RESET if fw_hung
3955 * NEED_RESET - driver's slot_reset()
3956 * DISCONNECT - device is dead & cannot recover
3957 * RECOVERED - driver's pci_resume()
3958 */
3959static pci_ers_result_t
3960qla4xxx_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
3961{
3962 struct scsi_qla_host *ha = pci_get_drvdata(pdev);
3963
3964 ql4_printk(KERN_WARNING, ha, "scsi%ld: %s: error detected:state %x\n",
3965 ha->host_no, __func__, state);
3966
3967 if (!is_aer_supported(ha))
3968 return PCI_ERS_RESULT_NONE;
3969
3970 switch (state) {
3971 case pci_channel_io_normal:
3972 clear_bit(AF_EEH_BUSY, &ha->flags);
3973 return PCI_ERS_RESULT_CAN_RECOVER;
3974 case pci_channel_io_frozen:
3975 set_bit(AF_EEH_BUSY, &ha->flags);
3976 qla4xxx_mailbox_premature_completion(ha);
3977 qla4xxx_free_irqs(ha);
3978 pci_disable_device(pdev);
7b3595df
VC
3979 /* Return back all IOs */
3980 qla4xxx_abort_active_cmds(ha, DID_RESET << 16);
2232be0d
LC
3981 return PCI_ERS_RESULT_NEED_RESET;
3982 case pci_channel_io_perm_failure:
3983 set_bit(AF_EEH_BUSY, &ha->flags);
3984 set_bit(AF_PCI_CHANNEL_IO_PERM_FAILURE, &ha->flags);
3985 qla4xxx_abort_active_cmds(ha, DID_NO_CONNECT << 16);
3986 return PCI_ERS_RESULT_DISCONNECT;
3987 }
3988 return PCI_ERS_RESULT_NEED_RESET;
3989}
3990
3991/**
3992 * qla4xxx_pci_mmio_enabled() gets called if
3993 * qla4xxx_pci_error_detected() returns PCI_ERS_RESULT_CAN_RECOVER
3994 * and read/write to the device still works.
3995 **/
3996static pci_ers_result_t
3997qla4xxx_pci_mmio_enabled(struct pci_dev *pdev)
3998{
3999 struct scsi_qla_host *ha = pci_get_drvdata(pdev);
4000
4001 if (!is_aer_supported(ha))
4002 return PCI_ERS_RESULT_NONE;
4003
7b3595df 4004 return PCI_ERS_RESULT_RECOVERED;
2232be0d
LC
4005}
4006
7b3595df 4007static uint32_t qla4_8xxx_error_recovery(struct scsi_qla_host *ha)
2232be0d
LC
4008{
4009 uint32_t rval = QLA_ERROR;
7b3595df 4010 uint32_t ret = 0;
2232be0d
LC
4011 int fn;
4012 struct pci_dev *other_pdev = NULL;
4013
4014 ql4_printk(KERN_WARNING, ha, "scsi%ld: In %s\n", ha->host_no, __func__);
4015
4016 set_bit(DPC_RESET_ACTIVE, &ha->dpc_flags);
4017
4018 if (test_bit(AF_ONLINE, &ha->flags)) {
4019 clear_bit(AF_ONLINE, &ha->flags);
b3a271a9
MR
4020 clear_bit(AF_LINK_UP, &ha->flags);
4021 iscsi_host_for_each_session(ha->host, qla4xxx_fail_session);
2232be0d 4022 qla4xxx_process_aen(ha, FLUSH_DDB_CHANGED_AENS);
2232be0d
LC
4023 }
4024
4025 fn = PCI_FUNC(ha->pdev->devfn);
4026 while (fn > 0) {
4027 fn--;
4028 ql4_printk(KERN_INFO, ha, "scsi%ld: %s: Finding PCI device at "
4029 "func %x\n", ha->host_no, __func__, fn);
4030 /* Get the pci device given the domain, bus,
4031 * slot/function number */
4032 other_pdev =
4033 pci_get_domain_bus_and_slot(pci_domain_nr(ha->pdev->bus),
4034 ha->pdev->bus->number, PCI_DEVFN(PCI_SLOT(ha->pdev->devfn),
4035 fn));
4036
4037 if (!other_pdev)
4038 continue;
4039
4040 if (atomic_read(&other_pdev->enable_cnt)) {
4041 ql4_printk(KERN_INFO, ha, "scsi%ld: %s: Found PCI "
4042 "func in enabled state%x\n", ha->host_no,
4043 __func__, fn);
4044 pci_dev_put(other_pdev);
4045 break;
4046 }
4047 pci_dev_put(other_pdev);
4048 }
4049
4050 /* The first function on the card, the reset owner will
4051 * start & initialize the firmware. The other functions
4052 * on the card will reset the firmware context
4053 */
4054 if (!fn) {
4055 ql4_printk(KERN_INFO, ha, "scsi%ld: %s: devfn being reset "
4056 "0x%x is the owner\n", ha->host_no, __func__,
4057 ha->pdev->devfn);
4058
4059 qla4_8xxx_idc_lock(ha);
4060 qla4_8xxx_wr_32(ha, QLA82XX_CRB_DEV_STATE,
4061 QLA82XX_DEV_COLD);
4062
4063 qla4_8xxx_wr_32(ha, QLA82XX_CRB_DRV_IDC_VERSION,
4064 QLA82XX_IDC_VERSION);
4065
4066 qla4_8xxx_idc_unlock(ha);
4067 clear_bit(AF_FW_RECOVERY, &ha->flags);
0e7e8501 4068 rval = qla4xxx_initialize_adapter(ha);
2232be0d
LC
4069 qla4_8xxx_idc_lock(ha);
4070
4071 if (rval != QLA_SUCCESS) {
4072 ql4_printk(KERN_INFO, ha, "scsi%ld: %s: HW State: "
4073 "FAILED\n", ha->host_no, __func__);
4074 qla4_8xxx_clear_drv_active(ha);
4075 qla4_8xxx_wr_32(ha, QLA82XX_CRB_DEV_STATE,
4076 QLA82XX_DEV_FAILED);
4077 } else {
4078 ql4_printk(KERN_INFO, ha, "scsi%ld: %s: HW State: "
4079 "READY\n", ha->host_no, __func__);
4080 qla4_8xxx_wr_32(ha, QLA82XX_CRB_DEV_STATE,
4081 QLA82XX_DEV_READY);
4082 /* Clear driver state register */
4083 qla4_8xxx_wr_32(ha, QLA82XX_CRB_DRV_STATE, 0);
4084 qla4_8xxx_set_drv_active(ha);
7b3595df
VC
4085 ret = qla4xxx_request_irqs(ha);
4086 if (ret) {
4087 ql4_printk(KERN_WARNING, ha, "Failed to "
4088 "reserve interrupt %d already in use.\n",
4089 ha->pdev->irq);
4090 rval = QLA_ERROR;
4091 } else {
4092 ha->isp_ops->enable_intrs(ha);
4093 rval = QLA_SUCCESS;
4094 }
2232be0d
LC
4095 }
4096 qla4_8xxx_idc_unlock(ha);
4097 } else {
4098 ql4_printk(KERN_INFO, ha, "scsi%ld: %s: devfn 0x%x is not "
4099 "the reset owner\n", ha->host_no, __func__,
4100 ha->pdev->devfn);
4101 if ((qla4_8xxx_rd_32(ha, QLA82XX_CRB_DEV_STATE) ==
4102 QLA82XX_DEV_READY)) {
4103 clear_bit(AF_FW_RECOVERY, &ha->flags);
0e7e8501 4104 rval = qla4xxx_initialize_adapter(ha);
7b3595df
VC
4105 if (rval == QLA_SUCCESS) {
4106 ret = qla4xxx_request_irqs(ha);
4107 if (ret) {
4108 ql4_printk(KERN_WARNING, ha, "Failed to"
4109 " reserve interrupt %d already in"
4110 " use.\n", ha->pdev->irq);
4111 rval = QLA_ERROR;
4112 } else {
4113 ha->isp_ops->enable_intrs(ha);
4114 rval = QLA_SUCCESS;
4115 }
4116 }
2232be0d
LC
4117 qla4_8xxx_idc_lock(ha);
4118 qla4_8xxx_set_drv_active(ha);
4119 qla4_8xxx_idc_unlock(ha);
4120 }
4121 }
4122 clear_bit(DPC_RESET_ACTIVE, &ha->dpc_flags);
4123 return rval;
4124}
4125
4126static pci_ers_result_t
4127qla4xxx_pci_slot_reset(struct pci_dev *pdev)
4128{
4129 pci_ers_result_t ret = PCI_ERS_RESULT_DISCONNECT;
4130 struct scsi_qla_host *ha = pci_get_drvdata(pdev);
4131 int rc;
4132
4133 ql4_printk(KERN_WARNING, ha, "scsi%ld: %s: slot_reset\n",
4134 ha->host_no, __func__);
4135
4136 if (!is_aer_supported(ha))
4137 return PCI_ERS_RESULT_NONE;
4138
4139 /* Restore the saved state of PCIe device -
4140 * BAR registers, PCI Config space, PCIX, MSI,
4141 * IOV states
4142 */
4143 pci_restore_state(pdev);
4144
4145 /* pci_restore_state() clears the saved_state flag of the device
4146 * save restored state which resets saved_state flag
4147 */
4148 pci_save_state(pdev);
4149
4150 /* Initialize device or resume if in suspended state */
4151 rc = pci_enable_device(pdev);
4152 if (rc) {
25985edc 4153 ql4_printk(KERN_WARNING, ha, "scsi%ld: %s: Can't re-enable "
2232be0d
LC
4154 "device after reset\n", ha->host_no, __func__);
4155 goto exit_slot_reset;
4156 }
4157
7b3595df 4158 ha->isp_ops->disable_intrs(ha);
2232be0d
LC
4159
4160 if (is_qla8022(ha)) {
4161 if (qla4_8xxx_error_recovery(ha) == QLA_SUCCESS) {
4162 ret = PCI_ERS_RESULT_RECOVERED;
4163 goto exit_slot_reset;
4164 } else
4165 goto exit_slot_reset;
4166 }
4167
4168exit_slot_reset:
4169 ql4_printk(KERN_WARNING, ha, "scsi%ld: %s: Return=%x\n"
4170 "device after reset\n", ha->host_no, __func__, ret);
4171 return ret;
4172}
4173
4174static void
4175qla4xxx_pci_resume(struct pci_dev *pdev)
4176{
4177 struct scsi_qla_host *ha = pci_get_drvdata(pdev);
4178 int ret;
4179
4180 ql4_printk(KERN_WARNING, ha, "scsi%ld: %s: pci_resume\n",
4181 ha->host_no, __func__);
4182
4183 ret = qla4xxx_wait_for_hba_online(ha);
4184 if (ret != QLA_SUCCESS) {
4185 ql4_printk(KERN_ERR, ha, "scsi%ld: %s: the device failed to "
4186 "resume I/O from slot/link_reset\n", ha->host_no,
4187 __func__);
4188 }
4189
4190 pci_cleanup_aer_uncorrect_error_status(pdev);
4191 clear_bit(AF_EEH_BUSY, &ha->flags);
4192}
4193
4194static struct pci_error_handlers qla4xxx_err_handler = {
4195 .error_detected = qla4xxx_pci_error_detected,
4196 .mmio_enabled = qla4xxx_pci_mmio_enabled,
4197 .slot_reset = qla4xxx_pci_slot_reset,
4198 .resume = qla4xxx_pci_resume,
4199};
4200
afaf5a2d
DS
4201static struct pci_device_id qla4xxx_pci_tbl[] = {
4202 {
4203 .vendor = PCI_VENDOR_ID_QLOGIC,
4204 .device = PCI_DEVICE_ID_QLOGIC_ISP4010,
4205 .subvendor = PCI_ANY_ID,
4206 .subdevice = PCI_ANY_ID,
4207 },
4208 {
4209 .vendor = PCI_VENDOR_ID_QLOGIC,
4210 .device = PCI_DEVICE_ID_QLOGIC_ISP4022,
4211 .subvendor = PCI_ANY_ID,
4212 .subdevice = PCI_ANY_ID,
4213 },
d915058f
DS
4214 {
4215 .vendor = PCI_VENDOR_ID_QLOGIC,
4216 .device = PCI_DEVICE_ID_QLOGIC_ISP4032,
4217 .subvendor = PCI_ANY_ID,
4218 .subdevice = PCI_ANY_ID,
4219 },
f4f5df23
VC
4220 {
4221 .vendor = PCI_VENDOR_ID_QLOGIC,
4222 .device = PCI_DEVICE_ID_QLOGIC_ISP8022,
4223 .subvendor = PCI_ANY_ID,
4224 .subdevice = PCI_ANY_ID,
4225 },
afaf5a2d
DS
4226 {0, 0},
4227};
4228MODULE_DEVICE_TABLE(pci, qla4xxx_pci_tbl);
4229
47975477 4230static struct pci_driver qla4xxx_pci_driver = {
afaf5a2d
DS
4231 .name = DRIVER_NAME,
4232 .id_table = qla4xxx_pci_tbl,
4233 .probe = qla4xxx_probe_adapter,
4234 .remove = qla4xxx_remove_adapter,
2232be0d 4235 .err_handler = &qla4xxx_err_handler,
afaf5a2d
DS
4236};
4237
4238static int __init qla4xxx_module_init(void)
4239{
4240 int ret;
4241
4242 /* Allocate cache for SRBs. */
4243 srb_cachep = kmem_cache_create("qla4xxx_srbs", sizeof(struct srb), 0,
20c2df83 4244 SLAB_HWCACHE_ALIGN, NULL);
afaf5a2d
DS
4245 if (srb_cachep == NULL) {
4246 printk(KERN_ERR
4247 "%s: Unable to allocate SRB cache..."
4248 "Failing load!\n", DRIVER_NAME);
4249 ret = -ENOMEM;
4250 goto no_srp_cache;
4251 }
4252
4253 /* Derive version string. */
4254 strcpy(qla4xxx_version_str, QLA4XXX_DRIVER_VERSION);
11010fec 4255 if (ql4xextended_error_logging)
afaf5a2d
DS
4256 strcat(qla4xxx_version_str, "-debug");
4257
4258 qla4xxx_scsi_transport =
4259 iscsi_register_transport(&qla4xxx_iscsi_transport);
4260 if (!qla4xxx_scsi_transport){
4261 ret = -ENODEV;
4262 goto release_srb_cache;
4263 }
4264
afaf5a2d
DS
4265 ret = pci_register_driver(&qla4xxx_pci_driver);
4266 if (ret)
4267 goto unregister_transport;
4268
4269 printk(KERN_INFO "QLogic iSCSI HBA Driver\n");
4270 return 0;
5ae16db3 4271
afaf5a2d
DS
4272unregister_transport:
4273 iscsi_unregister_transport(&qla4xxx_iscsi_transport);
4274release_srb_cache:
4275 kmem_cache_destroy(srb_cachep);
4276no_srp_cache:
4277 return ret;
4278}
4279
4280static void __exit qla4xxx_module_exit(void)
4281{
4282 pci_unregister_driver(&qla4xxx_pci_driver);
4283 iscsi_unregister_transport(&qla4xxx_iscsi_transport);
4284 kmem_cache_destroy(srb_cachep);
4285}
4286
4287module_init(qla4xxx_module_init);
4288module_exit(qla4xxx_module_exit);
4289
4290MODULE_AUTHOR("QLogic Corporation");
4291MODULE_DESCRIPTION("QLogic iSCSI HBA Driver");
4292MODULE_LICENSE("GPL");
4293MODULE_VERSION(QLA4XXX_DRIVER_VERSION);