]> git.proxmox.com Git - mirror_ubuntu-kernels.git/blob - drivers/scsi/qla4xxx/ql4_os.c
HID: logitech-dj: fix spelling in printk
[mirror_ubuntu-kernels.git] / drivers / scsi / qla4xxx / ql4_os.c
1 /*
2 * QLogic iSCSI HBA Driver
3 * Copyright (c) 2003-2013 QLogic Corporation
4 *
5 * See LICENSE.qla4xxx for copyright and licensing details.
6 */
7 #include <linux/moduleparam.h>
8 #include <linux/slab.h>
9 #include <linux/blkdev.h>
10 #include <linux/iscsi_boot_sysfs.h>
11 #include <linux/inet.h>
12
13 #include <scsi/scsi_tcq.h>
14 #include <scsi/scsicam.h>
15
16 #include "ql4_def.h"
17 #include "ql4_version.h"
18 #include "ql4_glbl.h"
19 #include "ql4_dbg.h"
20 #include "ql4_inline.h"
21 #include "ql4_83xx.h"
22
23 /*
24 * Driver version
25 */
26 static char qla4xxx_version_str[40];
27
28 /*
29 * SRB allocation cache
30 */
31 static struct kmem_cache *srb_cachep;
32
33 /*
34 * Module parameter information and variables
35 */
36 static int ql4xdisablesysfsboot = 1;
37 module_param(ql4xdisablesysfsboot, int, S_IRUGO | S_IWUSR);
38 MODULE_PARM_DESC(ql4xdisablesysfsboot,
39 " Set to disable exporting boot targets to sysfs.\n"
40 "\t\t 0 - Export boot targets\n"
41 "\t\t 1 - Do not export boot targets (Default)");
42
43 int ql4xdontresethba;
44 module_param(ql4xdontresethba, int, S_IRUGO | S_IWUSR);
45 MODULE_PARM_DESC(ql4xdontresethba,
46 " Don't reset the HBA for driver recovery.\n"
47 "\t\t 0 - It will reset HBA (Default)\n"
48 "\t\t 1 - It will NOT reset HBA");
49
50 int ql4xextended_error_logging;
51 module_param(ql4xextended_error_logging, int, S_IRUGO | S_IWUSR);
52 MODULE_PARM_DESC(ql4xextended_error_logging,
53 " Option to enable extended error logging.\n"
54 "\t\t 0 - no logging (Default)\n"
55 "\t\t 2 - debug logging");
56
57 int ql4xenablemsix = 1;
58 module_param(ql4xenablemsix, int, S_IRUGO|S_IWUSR);
59 MODULE_PARM_DESC(ql4xenablemsix,
60 " Set to enable MSI or MSI-X interrupt mechanism.\n"
61 "\t\t 0 = enable INTx interrupt mechanism.\n"
62 "\t\t 1 = enable MSI-X interrupt mechanism (Default).\n"
63 "\t\t 2 = enable MSI interrupt mechanism.");
64
65 #define QL4_DEF_QDEPTH 32
66 static int ql4xmaxqdepth = QL4_DEF_QDEPTH;
67 module_param(ql4xmaxqdepth, int, S_IRUGO | S_IWUSR);
68 MODULE_PARM_DESC(ql4xmaxqdepth,
69 " Maximum queue depth to report for target devices.\n"
70 "\t\t Default: 32.");
71
72 static int ql4xqfulltracking = 1;
73 module_param(ql4xqfulltracking, int, S_IRUGO | S_IWUSR);
74 MODULE_PARM_DESC(ql4xqfulltracking,
75 " Enable or disable dynamic tracking and adjustment of\n"
76 "\t\t scsi device queue depth.\n"
77 "\t\t 0 - Disable.\n"
78 "\t\t 1 - Enable. (Default)");
79
80 static int ql4xsess_recovery_tmo = QL4_SESS_RECOVERY_TMO;
81 module_param(ql4xsess_recovery_tmo, int, S_IRUGO);
82 MODULE_PARM_DESC(ql4xsess_recovery_tmo,
83 " Target Session Recovery Timeout.\n"
84 "\t\t Default: 120 sec.");
85
86 int ql4xmdcapmask = 0;
87 module_param(ql4xmdcapmask, int, S_IRUGO);
88 MODULE_PARM_DESC(ql4xmdcapmask,
89 " Set the Minidump driver capture mask level.\n"
90 "\t\t Default is 0 (firmware default capture mask)\n"
91 "\t\t Can be set to 0x3, 0x7, 0xF, 0x1F, 0x3F, 0x7F, 0xFF");
92
93 int ql4xenablemd = 1;
94 module_param(ql4xenablemd, int, S_IRUGO | S_IWUSR);
95 MODULE_PARM_DESC(ql4xenablemd,
96 " Set to enable minidump.\n"
97 "\t\t 0 - disable minidump\n"
98 "\t\t 1 - enable minidump (Default)");
99
100 static int qla4xxx_wait_for_hba_online(struct scsi_qla_host *ha);
101 /*
102 * SCSI host template entry points
103 */
104 static void qla4xxx_config_dma_addressing(struct scsi_qla_host *ha);
105
106 /*
107 * iSCSI template entry points
108 */
109 static int qla4xxx_session_get_param(struct iscsi_cls_session *cls_sess,
110 enum iscsi_param param, char *buf);
111 static int qla4xxx_conn_get_param(struct iscsi_cls_conn *conn,
112 enum iscsi_param param, char *buf);
113 static int qla4xxx_host_get_param(struct Scsi_Host *shost,
114 enum iscsi_host_param param, char *buf);
115 static int qla4xxx_iface_set_param(struct Scsi_Host *shost, void *data,
116 uint32_t len);
117 static int qla4xxx_get_iface_param(struct iscsi_iface *iface,
118 enum iscsi_param_type param_type,
119 int param, char *buf);
120 static enum blk_eh_timer_return qla4xxx_eh_cmd_timed_out(struct scsi_cmnd *sc);
121 static struct iscsi_endpoint *qla4xxx_ep_connect(struct Scsi_Host *shost,
122 struct sockaddr *dst_addr,
123 int non_blocking);
124 static int qla4xxx_ep_poll(struct iscsi_endpoint *ep, int timeout_ms);
125 static void qla4xxx_ep_disconnect(struct iscsi_endpoint *ep);
126 static int qla4xxx_get_ep_param(struct iscsi_endpoint *ep,
127 enum iscsi_param param, char *buf);
128 static int qla4xxx_conn_start(struct iscsi_cls_conn *conn);
129 static struct iscsi_cls_conn *
130 qla4xxx_conn_create(struct iscsi_cls_session *cls_sess, uint32_t conn_idx);
131 static int qla4xxx_conn_bind(struct iscsi_cls_session *cls_session,
132 struct iscsi_cls_conn *cls_conn,
133 uint64_t transport_fd, int is_leading);
134 static void qla4xxx_conn_destroy(struct iscsi_cls_conn *conn);
135 static struct iscsi_cls_session *
136 qla4xxx_session_create(struct iscsi_endpoint *ep, uint16_t cmds_max,
137 uint16_t qdepth, uint32_t initial_cmdsn);
138 static void qla4xxx_session_destroy(struct iscsi_cls_session *sess);
139 static void qla4xxx_task_work(struct work_struct *wdata);
140 static int qla4xxx_alloc_pdu(struct iscsi_task *, uint8_t);
141 static int qla4xxx_task_xmit(struct iscsi_task *);
142 static void qla4xxx_task_cleanup(struct iscsi_task *);
143 static void qla4xxx_fail_session(struct iscsi_cls_session *cls_session);
144 static void qla4xxx_conn_get_stats(struct iscsi_cls_conn *cls_conn,
145 struct iscsi_stats *stats);
146 static int qla4xxx_send_ping(struct Scsi_Host *shost, uint32_t iface_num,
147 uint32_t iface_type, uint32_t payload_size,
148 uint32_t pid, struct sockaddr *dst_addr);
149 static int qla4xxx_get_chap_list(struct Scsi_Host *shost, uint16_t chap_tbl_idx,
150 uint32_t *num_entries, char *buf);
151 static int qla4xxx_delete_chap(struct Scsi_Host *shost, uint16_t chap_tbl_idx);
152 static int qla4xxx_set_chap_entry(struct Scsi_Host *shost, void *data,
153 int len);
154 static int qla4xxx_get_host_stats(struct Scsi_Host *shost, char *buf, int len);
155
156 /*
157 * SCSI host template entry points
158 */
159 static int qla4xxx_queuecommand(struct Scsi_Host *h, struct scsi_cmnd *cmd);
160 static int qla4xxx_eh_abort(struct scsi_cmnd *cmd);
161 static int qla4xxx_eh_device_reset(struct scsi_cmnd *cmd);
162 static int qla4xxx_eh_target_reset(struct scsi_cmnd *cmd);
163 static int qla4xxx_eh_host_reset(struct scsi_cmnd *cmd);
164 static int qla4xxx_slave_alloc(struct scsi_device *device);
165 static umode_t qla4_attr_is_visible(int param_type, int param);
166 static int qla4xxx_host_reset(struct Scsi_Host *shost, int reset_type);
167
168 /*
169 * iSCSI Flash DDB sysfs entry points
170 */
171 static int
172 qla4xxx_sysfs_ddb_set_param(struct iscsi_bus_flash_session *fnode_sess,
173 struct iscsi_bus_flash_conn *fnode_conn,
174 void *data, int len);
175 static int
176 qla4xxx_sysfs_ddb_get_param(struct iscsi_bus_flash_session *fnode_sess,
177 int param, char *buf);
178 static int qla4xxx_sysfs_ddb_add(struct Scsi_Host *shost, const char *buf,
179 int len);
180 static int
181 qla4xxx_sysfs_ddb_delete(struct iscsi_bus_flash_session *fnode_sess);
182 static int qla4xxx_sysfs_ddb_login(struct iscsi_bus_flash_session *fnode_sess,
183 struct iscsi_bus_flash_conn *fnode_conn);
184 static int qla4xxx_sysfs_ddb_logout(struct iscsi_bus_flash_session *fnode_sess,
185 struct iscsi_bus_flash_conn *fnode_conn);
186 static int qla4xxx_sysfs_ddb_logout_sid(struct iscsi_cls_session *cls_sess);
187
188 static struct qla4_8xxx_legacy_intr_set legacy_intr[] =
189 QLA82XX_LEGACY_INTR_CONFIG;
190
191 static struct scsi_host_template qla4xxx_driver_template = {
192 .module = THIS_MODULE,
193 .name = DRIVER_NAME,
194 .proc_name = DRIVER_NAME,
195 .queuecommand = qla4xxx_queuecommand,
196
197 .eh_abort_handler = qla4xxx_eh_abort,
198 .eh_device_reset_handler = qla4xxx_eh_device_reset,
199 .eh_target_reset_handler = qla4xxx_eh_target_reset,
200 .eh_host_reset_handler = qla4xxx_eh_host_reset,
201 .eh_timed_out = qla4xxx_eh_cmd_timed_out,
202
203 .slave_alloc = qla4xxx_slave_alloc,
204 .change_queue_depth = scsi_change_queue_depth,
205
206 .this_id = -1,
207 .cmd_per_lun = 3,
208 .sg_tablesize = SG_ALL,
209
210 .max_sectors = 0xFFFF,
211 .shost_attrs = qla4xxx_host_attrs,
212 .host_reset = qla4xxx_host_reset,
213 .vendor_id = SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_QLOGIC,
214 };
215
216 static struct iscsi_transport qla4xxx_iscsi_transport = {
217 .owner = THIS_MODULE,
218 .name = DRIVER_NAME,
219 .caps = CAP_TEXT_NEGO |
220 CAP_DATA_PATH_OFFLOAD | CAP_HDRDGST |
221 CAP_DATADGST | CAP_LOGIN_OFFLOAD |
222 CAP_MULTI_R2T,
223 .attr_is_visible = qla4_attr_is_visible,
224 .create_session = qla4xxx_session_create,
225 .destroy_session = qla4xxx_session_destroy,
226 .start_conn = qla4xxx_conn_start,
227 .create_conn = qla4xxx_conn_create,
228 .bind_conn = qla4xxx_conn_bind,
229 .stop_conn = iscsi_conn_stop,
230 .destroy_conn = qla4xxx_conn_destroy,
231 .set_param = iscsi_set_param,
232 .get_conn_param = qla4xxx_conn_get_param,
233 .get_session_param = qla4xxx_session_get_param,
234 .get_ep_param = qla4xxx_get_ep_param,
235 .ep_connect = qla4xxx_ep_connect,
236 .ep_poll = qla4xxx_ep_poll,
237 .ep_disconnect = qla4xxx_ep_disconnect,
238 .get_stats = qla4xxx_conn_get_stats,
239 .send_pdu = iscsi_conn_send_pdu,
240 .xmit_task = qla4xxx_task_xmit,
241 .cleanup_task = qla4xxx_task_cleanup,
242 .alloc_pdu = qla4xxx_alloc_pdu,
243
244 .get_host_param = qla4xxx_host_get_param,
245 .set_iface_param = qla4xxx_iface_set_param,
246 .get_iface_param = qla4xxx_get_iface_param,
247 .bsg_request = qla4xxx_bsg_request,
248 .send_ping = qla4xxx_send_ping,
249 .get_chap = qla4xxx_get_chap_list,
250 .delete_chap = qla4xxx_delete_chap,
251 .set_chap = qla4xxx_set_chap_entry,
252 .get_flashnode_param = qla4xxx_sysfs_ddb_get_param,
253 .set_flashnode_param = qla4xxx_sysfs_ddb_set_param,
254 .new_flashnode = qla4xxx_sysfs_ddb_add,
255 .del_flashnode = qla4xxx_sysfs_ddb_delete,
256 .login_flashnode = qla4xxx_sysfs_ddb_login,
257 .logout_flashnode = qla4xxx_sysfs_ddb_logout,
258 .logout_flashnode_sid = qla4xxx_sysfs_ddb_logout_sid,
259 .get_host_stats = qla4xxx_get_host_stats,
260 };
261
262 static struct scsi_transport_template *qla4xxx_scsi_transport;
263
264 static int qla4xxx_isp_check_reg(struct scsi_qla_host *ha)
265 {
266 u32 reg_val = 0;
267 int rval = QLA_SUCCESS;
268
269 if (is_qla8022(ha))
270 reg_val = readl(&ha->qla4_82xx_reg->host_status);
271 else if (is_qla8032(ha) || is_qla8042(ha))
272 reg_val = qla4_8xxx_rd_direct(ha, QLA8XXX_PEG_ALIVE_COUNTER);
273 else
274 reg_val = readw(&ha->reg->ctrl_status);
275
276 if (reg_val == QL4_ISP_REG_DISCONNECT)
277 rval = QLA_ERROR;
278
279 return rval;
280 }
281
282 static int qla4xxx_send_ping(struct Scsi_Host *shost, uint32_t iface_num,
283 uint32_t iface_type, uint32_t payload_size,
284 uint32_t pid, struct sockaddr *dst_addr)
285 {
286 struct scsi_qla_host *ha = to_qla_host(shost);
287 struct sockaddr_in *addr;
288 struct sockaddr_in6 *addr6;
289 uint32_t options = 0;
290 uint8_t ipaddr[IPv6_ADDR_LEN];
291 int rval;
292
293 memset(ipaddr, 0, IPv6_ADDR_LEN);
294 /* IPv4 to IPv4 */
295 if ((iface_type == ISCSI_IFACE_TYPE_IPV4) &&
296 (dst_addr->sa_family == AF_INET)) {
297 addr = (struct sockaddr_in *)dst_addr;
298 memcpy(ipaddr, &addr->sin_addr.s_addr, IP_ADDR_LEN);
299 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: IPv4 Ping src: %pI4 "
300 "dest: %pI4\n", __func__,
301 &ha->ip_config.ip_address, ipaddr));
302 rval = qla4xxx_ping_iocb(ha, options, payload_size, pid,
303 ipaddr);
304 if (rval)
305 rval = -EINVAL;
306 } else if ((iface_type == ISCSI_IFACE_TYPE_IPV6) &&
307 (dst_addr->sa_family == AF_INET6)) {
308 /* IPv6 to IPv6 */
309 addr6 = (struct sockaddr_in6 *)dst_addr;
310 memcpy(ipaddr, &addr6->sin6_addr.in6_u.u6_addr8, IPv6_ADDR_LEN);
311
312 options |= PING_IPV6_PROTOCOL_ENABLE;
313
314 /* Ping using LinkLocal address */
315 if ((iface_num == 0) || (iface_num == 1)) {
316 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: LinkLocal Ping "
317 "src: %pI6 dest: %pI6\n", __func__,
318 &ha->ip_config.ipv6_link_local_addr,
319 ipaddr));
320 options |= PING_IPV6_LINKLOCAL_ADDR;
321 rval = qla4xxx_ping_iocb(ha, options, payload_size,
322 pid, ipaddr);
323 } else {
324 ql4_printk(KERN_WARNING, ha, "%s: iface num = %d "
325 "not supported\n", __func__, iface_num);
326 rval = -ENOSYS;
327 goto exit_send_ping;
328 }
329
330 /*
331 * If ping using LinkLocal address fails, try ping using
332 * IPv6 address
333 */
334 if (rval != QLA_SUCCESS) {
335 options &= ~PING_IPV6_LINKLOCAL_ADDR;
336 if (iface_num == 0) {
337 options |= PING_IPV6_ADDR0;
338 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: IPv6 "
339 "Ping src: %pI6 "
340 "dest: %pI6\n", __func__,
341 &ha->ip_config.ipv6_addr0,
342 ipaddr));
343 } else if (iface_num == 1) {
344 options |= PING_IPV6_ADDR1;
345 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: IPv6 "
346 "Ping src: %pI6 "
347 "dest: %pI6\n", __func__,
348 &ha->ip_config.ipv6_addr1,
349 ipaddr));
350 }
351 rval = qla4xxx_ping_iocb(ha, options, payload_size,
352 pid, ipaddr);
353 if (rval)
354 rval = -EINVAL;
355 }
356 } else
357 rval = -ENOSYS;
358 exit_send_ping:
359 return rval;
360 }
361
362 static umode_t qla4_attr_is_visible(int param_type, int param)
363 {
364 switch (param_type) {
365 case ISCSI_HOST_PARAM:
366 switch (param) {
367 case ISCSI_HOST_PARAM_HWADDRESS:
368 case ISCSI_HOST_PARAM_IPADDRESS:
369 case ISCSI_HOST_PARAM_INITIATOR_NAME:
370 case ISCSI_HOST_PARAM_PORT_STATE:
371 case ISCSI_HOST_PARAM_PORT_SPEED:
372 return S_IRUGO;
373 default:
374 return 0;
375 }
376 case ISCSI_PARAM:
377 switch (param) {
378 case ISCSI_PARAM_PERSISTENT_ADDRESS:
379 case ISCSI_PARAM_PERSISTENT_PORT:
380 case ISCSI_PARAM_CONN_ADDRESS:
381 case ISCSI_PARAM_CONN_PORT:
382 case ISCSI_PARAM_TARGET_NAME:
383 case ISCSI_PARAM_TPGT:
384 case ISCSI_PARAM_TARGET_ALIAS:
385 case ISCSI_PARAM_MAX_BURST:
386 case ISCSI_PARAM_MAX_R2T:
387 case ISCSI_PARAM_FIRST_BURST:
388 case ISCSI_PARAM_MAX_RECV_DLENGTH:
389 case ISCSI_PARAM_MAX_XMIT_DLENGTH:
390 case ISCSI_PARAM_IFACE_NAME:
391 case ISCSI_PARAM_CHAP_OUT_IDX:
392 case ISCSI_PARAM_CHAP_IN_IDX:
393 case ISCSI_PARAM_USERNAME:
394 case ISCSI_PARAM_PASSWORD:
395 case ISCSI_PARAM_USERNAME_IN:
396 case ISCSI_PARAM_PASSWORD_IN:
397 case ISCSI_PARAM_AUTO_SND_TGT_DISABLE:
398 case ISCSI_PARAM_DISCOVERY_SESS:
399 case ISCSI_PARAM_PORTAL_TYPE:
400 case ISCSI_PARAM_CHAP_AUTH_EN:
401 case ISCSI_PARAM_DISCOVERY_LOGOUT_EN:
402 case ISCSI_PARAM_BIDI_CHAP_EN:
403 case ISCSI_PARAM_DISCOVERY_AUTH_OPTIONAL:
404 case ISCSI_PARAM_DEF_TIME2WAIT:
405 case ISCSI_PARAM_DEF_TIME2RETAIN:
406 case ISCSI_PARAM_HDRDGST_EN:
407 case ISCSI_PARAM_DATADGST_EN:
408 case ISCSI_PARAM_INITIAL_R2T_EN:
409 case ISCSI_PARAM_IMM_DATA_EN:
410 case ISCSI_PARAM_PDU_INORDER_EN:
411 case ISCSI_PARAM_DATASEQ_INORDER_EN:
412 case ISCSI_PARAM_MAX_SEGMENT_SIZE:
413 case ISCSI_PARAM_TCP_TIMESTAMP_STAT:
414 case ISCSI_PARAM_TCP_WSF_DISABLE:
415 case ISCSI_PARAM_TCP_NAGLE_DISABLE:
416 case ISCSI_PARAM_TCP_TIMER_SCALE:
417 case ISCSI_PARAM_TCP_TIMESTAMP_EN:
418 case ISCSI_PARAM_TCP_XMIT_WSF:
419 case ISCSI_PARAM_TCP_RECV_WSF:
420 case ISCSI_PARAM_IP_FRAGMENT_DISABLE:
421 case ISCSI_PARAM_IPV4_TOS:
422 case ISCSI_PARAM_IPV6_TC:
423 case ISCSI_PARAM_IPV6_FLOW_LABEL:
424 case ISCSI_PARAM_IS_FW_ASSIGNED_IPV6:
425 case ISCSI_PARAM_KEEPALIVE_TMO:
426 case ISCSI_PARAM_LOCAL_PORT:
427 case ISCSI_PARAM_ISID:
428 case ISCSI_PARAM_TSID:
429 case ISCSI_PARAM_DEF_TASKMGMT_TMO:
430 case ISCSI_PARAM_ERL:
431 case ISCSI_PARAM_STATSN:
432 case ISCSI_PARAM_EXP_STATSN:
433 case ISCSI_PARAM_DISCOVERY_PARENT_IDX:
434 case ISCSI_PARAM_DISCOVERY_PARENT_TYPE:
435 case ISCSI_PARAM_LOCAL_IPADDR:
436 return S_IRUGO;
437 default:
438 return 0;
439 }
440 case ISCSI_NET_PARAM:
441 switch (param) {
442 case ISCSI_NET_PARAM_IPV4_ADDR:
443 case ISCSI_NET_PARAM_IPV4_SUBNET:
444 case ISCSI_NET_PARAM_IPV4_GW:
445 case ISCSI_NET_PARAM_IPV4_BOOTPROTO:
446 case ISCSI_NET_PARAM_IFACE_ENABLE:
447 case ISCSI_NET_PARAM_IPV6_LINKLOCAL:
448 case ISCSI_NET_PARAM_IPV6_ADDR:
449 case ISCSI_NET_PARAM_IPV6_ROUTER:
450 case ISCSI_NET_PARAM_IPV6_ADDR_AUTOCFG:
451 case ISCSI_NET_PARAM_IPV6_LINKLOCAL_AUTOCFG:
452 case ISCSI_NET_PARAM_VLAN_ID:
453 case ISCSI_NET_PARAM_VLAN_PRIORITY:
454 case ISCSI_NET_PARAM_VLAN_ENABLED:
455 case ISCSI_NET_PARAM_MTU:
456 case ISCSI_NET_PARAM_PORT:
457 case ISCSI_NET_PARAM_IPADDR_STATE:
458 case ISCSI_NET_PARAM_IPV6_LINKLOCAL_STATE:
459 case ISCSI_NET_PARAM_IPV6_ROUTER_STATE:
460 case ISCSI_NET_PARAM_DELAYED_ACK_EN:
461 case ISCSI_NET_PARAM_TCP_NAGLE_DISABLE:
462 case ISCSI_NET_PARAM_TCP_WSF_DISABLE:
463 case ISCSI_NET_PARAM_TCP_WSF:
464 case ISCSI_NET_PARAM_TCP_TIMER_SCALE:
465 case ISCSI_NET_PARAM_TCP_TIMESTAMP_EN:
466 case ISCSI_NET_PARAM_CACHE_ID:
467 case ISCSI_NET_PARAM_IPV4_DHCP_DNS_ADDR_EN:
468 case ISCSI_NET_PARAM_IPV4_DHCP_SLP_DA_EN:
469 case ISCSI_NET_PARAM_IPV4_TOS_EN:
470 case ISCSI_NET_PARAM_IPV4_TOS:
471 case ISCSI_NET_PARAM_IPV4_GRAT_ARP_EN:
472 case ISCSI_NET_PARAM_IPV4_DHCP_ALT_CLIENT_ID_EN:
473 case ISCSI_NET_PARAM_IPV4_DHCP_ALT_CLIENT_ID:
474 case ISCSI_NET_PARAM_IPV4_DHCP_REQ_VENDOR_ID_EN:
475 case ISCSI_NET_PARAM_IPV4_DHCP_USE_VENDOR_ID_EN:
476 case ISCSI_NET_PARAM_IPV4_DHCP_VENDOR_ID:
477 case ISCSI_NET_PARAM_IPV4_DHCP_LEARN_IQN_EN:
478 case ISCSI_NET_PARAM_IPV4_FRAGMENT_DISABLE:
479 case ISCSI_NET_PARAM_IPV4_IN_FORWARD_EN:
480 case ISCSI_NET_PARAM_REDIRECT_EN:
481 case ISCSI_NET_PARAM_IPV4_TTL:
482 case ISCSI_NET_PARAM_IPV6_GRAT_NEIGHBOR_ADV_EN:
483 case ISCSI_NET_PARAM_IPV6_MLD_EN:
484 case ISCSI_NET_PARAM_IPV6_FLOW_LABEL:
485 case ISCSI_NET_PARAM_IPV6_TRAFFIC_CLASS:
486 case ISCSI_NET_PARAM_IPV6_HOP_LIMIT:
487 case ISCSI_NET_PARAM_IPV6_ND_REACHABLE_TMO:
488 case ISCSI_NET_PARAM_IPV6_ND_REXMIT_TIME:
489 case ISCSI_NET_PARAM_IPV6_ND_STALE_TMO:
490 case ISCSI_NET_PARAM_IPV6_DUP_ADDR_DETECT_CNT:
491 case ISCSI_NET_PARAM_IPV6_RTR_ADV_LINK_MTU:
492 return S_IRUGO;
493 default:
494 return 0;
495 }
496 case ISCSI_IFACE_PARAM:
497 switch (param) {
498 case ISCSI_IFACE_PARAM_DEF_TASKMGMT_TMO:
499 case ISCSI_IFACE_PARAM_HDRDGST_EN:
500 case ISCSI_IFACE_PARAM_DATADGST_EN:
501 case ISCSI_IFACE_PARAM_IMM_DATA_EN:
502 case ISCSI_IFACE_PARAM_INITIAL_R2T_EN:
503 case ISCSI_IFACE_PARAM_DATASEQ_INORDER_EN:
504 case ISCSI_IFACE_PARAM_PDU_INORDER_EN:
505 case ISCSI_IFACE_PARAM_ERL:
506 case ISCSI_IFACE_PARAM_MAX_RECV_DLENGTH:
507 case ISCSI_IFACE_PARAM_FIRST_BURST:
508 case ISCSI_IFACE_PARAM_MAX_R2T:
509 case ISCSI_IFACE_PARAM_MAX_BURST:
510 case ISCSI_IFACE_PARAM_CHAP_AUTH_EN:
511 case ISCSI_IFACE_PARAM_BIDI_CHAP_EN:
512 case ISCSI_IFACE_PARAM_DISCOVERY_AUTH_OPTIONAL:
513 case ISCSI_IFACE_PARAM_DISCOVERY_LOGOUT_EN:
514 case ISCSI_IFACE_PARAM_STRICT_LOGIN_COMP_EN:
515 case ISCSI_IFACE_PARAM_INITIATOR_NAME:
516 return S_IRUGO;
517 default:
518 return 0;
519 }
520 case ISCSI_FLASHNODE_PARAM:
521 switch (param) {
522 case ISCSI_FLASHNODE_IS_FW_ASSIGNED_IPV6:
523 case ISCSI_FLASHNODE_PORTAL_TYPE:
524 case ISCSI_FLASHNODE_AUTO_SND_TGT_DISABLE:
525 case ISCSI_FLASHNODE_DISCOVERY_SESS:
526 case ISCSI_FLASHNODE_ENTRY_EN:
527 case ISCSI_FLASHNODE_HDR_DGST_EN:
528 case ISCSI_FLASHNODE_DATA_DGST_EN:
529 case ISCSI_FLASHNODE_IMM_DATA_EN:
530 case ISCSI_FLASHNODE_INITIAL_R2T_EN:
531 case ISCSI_FLASHNODE_DATASEQ_INORDER:
532 case ISCSI_FLASHNODE_PDU_INORDER:
533 case ISCSI_FLASHNODE_CHAP_AUTH_EN:
534 case ISCSI_FLASHNODE_SNACK_REQ_EN:
535 case ISCSI_FLASHNODE_DISCOVERY_LOGOUT_EN:
536 case ISCSI_FLASHNODE_BIDI_CHAP_EN:
537 case ISCSI_FLASHNODE_DISCOVERY_AUTH_OPTIONAL:
538 case ISCSI_FLASHNODE_ERL:
539 case ISCSI_FLASHNODE_TCP_TIMESTAMP_STAT:
540 case ISCSI_FLASHNODE_TCP_NAGLE_DISABLE:
541 case ISCSI_FLASHNODE_TCP_WSF_DISABLE:
542 case ISCSI_FLASHNODE_TCP_TIMER_SCALE:
543 case ISCSI_FLASHNODE_TCP_TIMESTAMP_EN:
544 case ISCSI_FLASHNODE_IP_FRAG_DISABLE:
545 case ISCSI_FLASHNODE_MAX_RECV_DLENGTH:
546 case ISCSI_FLASHNODE_MAX_XMIT_DLENGTH:
547 case ISCSI_FLASHNODE_FIRST_BURST:
548 case ISCSI_FLASHNODE_DEF_TIME2WAIT:
549 case ISCSI_FLASHNODE_DEF_TIME2RETAIN:
550 case ISCSI_FLASHNODE_MAX_R2T:
551 case ISCSI_FLASHNODE_KEEPALIVE_TMO:
552 case ISCSI_FLASHNODE_ISID:
553 case ISCSI_FLASHNODE_TSID:
554 case ISCSI_FLASHNODE_PORT:
555 case ISCSI_FLASHNODE_MAX_BURST:
556 case ISCSI_FLASHNODE_DEF_TASKMGMT_TMO:
557 case ISCSI_FLASHNODE_IPADDR:
558 case ISCSI_FLASHNODE_ALIAS:
559 case ISCSI_FLASHNODE_REDIRECT_IPADDR:
560 case ISCSI_FLASHNODE_MAX_SEGMENT_SIZE:
561 case ISCSI_FLASHNODE_LOCAL_PORT:
562 case ISCSI_FLASHNODE_IPV4_TOS:
563 case ISCSI_FLASHNODE_IPV6_TC:
564 case ISCSI_FLASHNODE_IPV6_FLOW_LABEL:
565 case ISCSI_FLASHNODE_NAME:
566 case ISCSI_FLASHNODE_TPGT:
567 case ISCSI_FLASHNODE_LINK_LOCAL_IPV6:
568 case ISCSI_FLASHNODE_DISCOVERY_PARENT_IDX:
569 case ISCSI_FLASHNODE_DISCOVERY_PARENT_TYPE:
570 case ISCSI_FLASHNODE_TCP_XMIT_WSF:
571 case ISCSI_FLASHNODE_TCP_RECV_WSF:
572 case ISCSI_FLASHNODE_CHAP_OUT_IDX:
573 case ISCSI_FLASHNODE_USERNAME:
574 case ISCSI_FLASHNODE_PASSWORD:
575 case ISCSI_FLASHNODE_STATSN:
576 case ISCSI_FLASHNODE_EXP_STATSN:
577 case ISCSI_FLASHNODE_IS_BOOT_TGT:
578 return S_IRUGO;
579 default:
580 return 0;
581 }
582 }
583
584 return 0;
585 }
586
587 /**
588 * qla4xxx_create chap_list - Create CHAP list from FLASH
589 * @ha: pointer to adapter structure
590 *
591 * Read flash and make a list of CHAP entries, during login when a CHAP entry
592 * is received, it will be checked in this list. If entry exist then the CHAP
593 * entry index is set in the DDB. If CHAP entry does not exist in this list
594 * then a new entry is added in FLASH in CHAP table and the index obtained is
595 * used in the DDB.
596 **/
597 static void qla4xxx_create_chap_list(struct scsi_qla_host *ha)
598 {
599 int rval = 0;
600 uint8_t *chap_flash_data = NULL;
601 uint32_t offset;
602 dma_addr_t chap_dma;
603 uint32_t chap_size = 0;
604
605 if (is_qla40XX(ha))
606 chap_size = MAX_CHAP_ENTRIES_40XX *
607 sizeof(struct ql4_chap_table);
608 else /* Single region contains CHAP info for both
609 * ports which is divided into half for each port.
610 */
611 chap_size = ha->hw.flt_chap_size / 2;
612
613 chap_flash_data = dma_alloc_coherent(&ha->pdev->dev, chap_size,
614 &chap_dma, GFP_KERNEL);
615 if (!chap_flash_data) {
616 ql4_printk(KERN_ERR, ha, "No memory for chap_flash_data\n");
617 return;
618 }
619
620 if (is_qla40XX(ha)) {
621 offset = FLASH_CHAP_OFFSET;
622 } else {
623 offset = FLASH_RAW_ACCESS_ADDR + (ha->hw.flt_region_chap << 2);
624 if (ha->port_num == 1)
625 offset += chap_size;
626 }
627
628 rval = qla4xxx_get_flash(ha, chap_dma, offset, chap_size);
629 if (rval != QLA_SUCCESS)
630 goto exit_chap_list;
631
632 if (ha->chap_list == NULL)
633 ha->chap_list = vmalloc(chap_size);
634 if (ha->chap_list == NULL) {
635 ql4_printk(KERN_ERR, ha, "No memory for ha->chap_list\n");
636 goto exit_chap_list;
637 }
638
639 memset(ha->chap_list, 0, chap_size);
640 memcpy(ha->chap_list, chap_flash_data, chap_size);
641
642 exit_chap_list:
643 dma_free_coherent(&ha->pdev->dev, chap_size, chap_flash_data, chap_dma);
644 }
645
646 static int qla4xxx_get_chap_by_index(struct scsi_qla_host *ha,
647 int16_t chap_index,
648 struct ql4_chap_table **chap_entry)
649 {
650 int rval = QLA_ERROR;
651 int max_chap_entries;
652
653 if (!ha->chap_list) {
654 ql4_printk(KERN_ERR, ha, "CHAP table cache is empty!\n");
655 rval = QLA_ERROR;
656 goto exit_get_chap;
657 }
658
659 if (is_qla80XX(ha))
660 max_chap_entries = (ha->hw.flt_chap_size / 2) /
661 sizeof(struct ql4_chap_table);
662 else
663 max_chap_entries = MAX_CHAP_ENTRIES_40XX;
664
665 if (chap_index > max_chap_entries) {
666 ql4_printk(KERN_ERR, ha, "Invalid Chap index\n");
667 rval = QLA_ERROR;
668 goto exit_get_chap;
669 }
670
671 *chap_entry = (struct ql4_chap_table *)ha->chap_list + chap_index;
672 if ((*chap_entry)->cookie !=
673 __constant_cpu_to_le16(CHAP_VALID_COOKIE)) {
674 rval = QLA_ERROR;
675 *chap_entry = NULL;
676 } else {
677 rval = QLA_SUCCESS;
678 }
679
680 exit_get_chap:
681 return rval;
682 }
683
684 /**
685 * qla4xxx_find_free_chap_index - Find the first free chap index
686 * @ha: pointer to adapter structure
687 * @chap_index: CHAP index to be returned
688 *
689 * Find the first free chap index available in the chap table
690 *
691 * Note: Caller should acquire the chap lock before getting here.
692 **/
693 static int qla4xxx_find_free_chap_index(struct scsi_qla_host *ha,
694 uint16_t *chap_index)
695 {
696 int i, rval;
697 int free_index = -1;
698 int max_chap_entries = 0;
699 struct ql4_chap_table *chap_table;
700
701 if (is_qla80XX(ha))
702 max_chap_entries = (ha->hw.flt_chap_size / 2) /
703 sizeof(struct ql4_chap_table);
704 else
705 max_chap_entries = MAX_CHAP_ENTRIES_40XX;
706
707 if (!ha->chap_list) {
708 ql4_printk(KERN_ERR, ha, "CHAP table cache is empty!\n");
709 rval = QLA_ERROR;
710 goto exit_find_chap;
711 }
712
713 for (i = 0; i < max_chap_entries; i++) {
714 chap_table = (struct ql4_chap_table *)ha->chap_list + i;
715
716 if ((chap_table->cookie !=
717 __constant_cpu_to_le16(CHAP_VALID_COOKIE)) &&
718 (i > MAX_RESRV_CHAP_IDX)) {
719 free_index = i;
720 break;
721 }
722 }
723
724 if (free_index != -1) {
725 *chap_index = free_index;
726 rval = QLA_SUCCESS;
727 } else {
728 rval = QLA_ERROR;
729 }
730
731 exit_find_chap:
732 return rval;
733 }
734
735 static int qla4xxx_get_chap_list(struct Scsi_Host *shost, uint16_t chap_tbl_idx,
736 uint32_t *num_entries, char *buf)
737 {
738 struct scsi_qla_host *ha = to_qla_host(shost);
739 struct ql4_chap_table *chap_table;
740 struct iscsi_chap_rec *chap_rec;
741 int max_chap_entries = 0;
742 int valid_chap_entries = 0;
743 int ret = 0, i;
744
745 if (is_qla80XX(ha))
746 max_chap_entries = (ha->hw.flt_chap_size / 2) /
747 sizeof(struct ql4_chap_table);
748 else
749 max_chap_entries = MAX_CHAP_ENTRIES_40XX;
750
751 ql4_printk(KERN_INFO, ha, "%s: num_entries = %d, CHAP idx = %d\n",
752 __func__, *num_entries, chap_tbl_idx);
753
754 if (!buf) {
755 ret = -ENOMEM;
756 goto exit_get_chap_list;
757 }
758
759 qla4xxx_create_chap_list(ha);
760
761 chap_rec = (struct iscsi_chap_rec *) buf;
762 mutex_lock(&ha->chap_sem);
763 for (i = chap_tbl_idx; i < max_chap_entries; i++) {
764 chap_table = (struct ql4_chap_table *)ha->chap_list + i;
765 if (chap_table->cookie !=
766 __constant_cpu_to_le16(CHAP_VALID_COOKIE))
767 continue;
768
769 chap_rec->chap_tbl_idx = i;
770 strlcpy(chap_rec->username, chap_table->name,
771 ISCSI_CHAP_AUTH_NAME_MAX_LEN);
772 strlcpy(chap_rec->password, chap_table->secret,
773 QL4_CHAP_MAX_SECRET_LEN);
774 chap_rec->password_length = chap_table->secret_len;
775
776 if (chap_table->flags & BIT_7) /* local */
777 chap_rec->chap_type = CHAP_TYPE_OUT;
778
779 if (chap_table->flags & BIT_6) /* peer */
780 chap_rec->chap_type = CHAP_TYPE_IN;
781
782 chap_rec++;
783
784 valid_chap_entries++;
785 if (valid_chap_entries == *num_entries)
786 break;
787 else
788 continue;
789 }
790 mutex_unlock(&ha->chap_sem);
791
792 exit_get_chap_list:
793 ql4_printk(KERN_INFO, ha, "%s: Valid CHAP Entries = %d\n",
794 __func__, valid_chap_entries);
795 *num_entries = valid_chap_entries;
796 return ret;
797 }
798
799 static int __qla4xxx_is_chap_active(struct device *dev, void *data)
800 {
801 int ret = 0;
802 uint16_t *chap_tbl_idx = (uint16_t *) data;
803 struct iscsi_cls_session *cls_session;
804 struct iscsi_session *sess;
805 struct ddb_entry *ddb_entry;
806
807 if (!iscsi_is_session_dev(dev))
808 goto exit_is_chap_active;
809
810 cls_session = iscsi_dev_to_session(dev);
811 sess = cls_session->dd_data;
812 ddb_entry = sess->dd_data;
813
814 if (iscsi_session_chkready(cls_session))
815 goto exit_is_chap_active;
816
817 if (ddb_entry->chap_tbl_idx == *chap_tbl_idx)
818 ret = 1;
819
820 exit_is_chap_active:
821 return ret;
822 }
823
824 static int qla4xxx_is_chap_active(struct Scsi_Host *shost,
825 uint16_t chap_tbl_idx)
826 {
827 int ret = 0;
828
829 ret = device_for_each_child(&shost->shost_gendev, &chap_tbl_idx,
830 __qla4xxx_is_chap_active);
831
832 return ret;
833 }
834
835 static int qla4xxx_delete_chap(struct Scsi_Host *shost, uint16_t chap_tbl_idx)
836 {
837 struct scsi_qla_host *ha = to_qla_host(shost);
838 struct ql4_chap_table *chap_table;
839 dma_addr_t chap_dma;
840 int max_chap_entries = 0;
841 uint32_t offset = 0;
842 uint32_t chap_size;
843 int ret = 0;
844
845 chap_table = dma_pool_zalloc(ha->chap_dma_pool, GFP_KERNEL, &chap_dma);
846 if (chap_table == NULL)
847 return -ENOMEM;
848
849 if (is_qla80XX(ha))
850 max_chap_entries = (ha->hw.flt_chap_size / 2) /
851 sizeof(struct ql4_chap_table);
852 else
853 max_chap_entries = MAX_CHAP_ENTRIES_40XX;
854
855 if (chap_tbl_idx > max_chap_entries) {
856 ret = -EINVAL;
857 goto exit_delete_chap;
858 }
859
860 /* Check if chap index is in use.
861 * If chap is in use don't delet chap entry */
862 ret = qla4xxx_is_chap_active(shost, chap_tbl_idx);
863 if (ret) {
864 ql4_printk(KERN_INFO, ha, "CHAP entry %d is in use, cannot "
865 "delete from flash\n", chap_tbl_idx);
866 ret = -EBUSY;
867 goto exit_delete_chap;
868 }
869
870 chap_size = sizeof(struct ql4_chap_table);
871 if (is_qla40XX(ha))
872 offset = FLASH_CHAP_OFFSET | (chap_tbl_idx * chap_size);
873 else {
874 offset = FLASH_RAW_ACCESS_ADDR + (ha->hw.flt_region_chap << 2);
875 /* flt_chap_size is CHAP table size for both ports
876 * so divide it by 2 to calculate the offset for second port
877 */
878 if (ha->port_num == 1)
879 offset += (ha->hw.flt_chap_size / 2);
880 offset += (chap_tbl_idx * chap_size);
881 }
882
883 ret = qla4xxx_get_flash(ha, chap_dma, offset, chap_size);
884 if (ret != QLA_SUCCESS) {
885 ret = -EINVAL;
886 goto exit_delete_chap;
887 }
888
889 DEBUG2(ql4_printk(KERN_INFO, ha, "Chap Cookie: x%x\n",
890 __le16_to_cpu(chap_table->cookie)));
891
892 if (__le16_to_cpu(chap_table->cookie) != CHAP_VALID_COOKIE) {
893 ql4_printk(KERN_ERR, ha, "No valid chap entry found\n");
894 goto exit_delete_chap;
895 }
896
897 chap_table->cookie = __constant_cpu_to_le16(0xFFFF);
898
899 offset = FLASH_CHAP_OFFSET |
900 (chap_tbl_idx * sizeof(struct ql4_chap_table));
901 ret = qla4xxx_set_flash(ha, chap_dma, offset, chap_size,
902 FLASH_OPT_RMW_COMMIT);
903 if (ret == QLA_SUCCESS && ha->chap_list) {
904 mutex_lock(&ha->chap_sem);
905 /* Update ha chap_list cache */
906 memcpy((struct ql4_chap_table *)ha->chap_list + chap_tbl_idx,
907 chap_table, sizeof(struct ql4_chap_table));
908 mutex_unlock(&ha->chap_sem);
909 }
910 if (ret != QLA_SUCCESS)
911 ret = -EINVAL;
912
913 exit_delete_chap:
914 dma_pool_free(ha->chap_dma_pool, chap_table, chap_dma);
915 return ret;
916 }
917
918 /**
919 * qla4xxx_set_chap_entry - Make chap entry with given information
920 * @shost: pointer to host
921 * @data: chap info - credentials, index and type to make chap entry
922 * @len: length of data
923 *
924 * Add or update chap entry with the given information
925 **/
926 static int qla4xxx_set_chap_entry(struct Scsi_Host *shost, void *data, int len)
927 {
928 struct scsi_qla_host *ha = to_qla_host(shost);
929 struct iscsi_chap_rec chap_rec;
930 struct ql4_chap_table *chap_entry = NULL;
931 struct iscsi_param_info *param_info;
932 struct nlattr *attr;
933 int max_chap_entries = 0;
934 int type;
935 int rem = len;
936 int rc = 0;
937 int size;
938
939 memset(&chap_rec, 0, sizeof(chap_rec));
940
941 nla_for_each_attr(attr, data, len, rem) {
942 param_info = nla_data(attr);
943
944 switch (param_info->param) {
945 case ISCSI_CHAP_PARAM_INDEX:
946 chap_rec.chap_tbl_idx = *(uint16_t *)param_info->value;
947 break;
948 case ISCSI_CHAP_PARAM_CHAP_TYPE:
949 chap_rec.chap_type = param_info->value[0];
950 break;
951 case ISCSI_CHAP_PARAM_USERNAME:
952 size = min_t(size_t, sizeof(chap_rec.username),
953 param_info->len);
954 memcpy(chap_rec.username, param_info->value, size);
955 break;
956 case ISCSI_CHAP_PARAM_PASSWORD:
957 size = min_t(size_t, sizeof(chap_rec.password),
958 param_info->len);
959 memcpy(chap_rec.password, param_info->value, size);
960 break;
961 case ISCSI_CHAP_PARAM_PASSWORD_LEN:
962 chap_rec.password_length = param_info->value[0];
963 break;
964 default:
965 ql4_printk(KERN_ERR, ha,
966 "%s: No such sysfs attribute\n", __func__);
967 rc = -ENOSYS;
968 goto exit_set_chap;
969 };
970 }
971
972 if (chap_rec.chap_type == CHAP_TYPE_IN)
973 type = BIDI_CHAP;
974 else
975 type = LOCAL_CHAP;
976
977 if (is_qla80XX(ha))
978 max_chap_entries = (ha->hw.flt_chap_size / 2) /
979 sizeof(struct ql4_chap_table);
980 else
981 max_chap_entries = MAX_CHAP_ENTRIES_40XX;
982
983 mutex_lock(&ha->chap_sem);
984 if (chap_rec.chap_tbl_idx < max_chap_entries) {
985 rc = qla4xxx_get_chap_by_index(ha, chap_rec.chap_tbl_idx,
986 &chap_entry);
987 if (!rc) {
988 if (!(type == qla4xxx_get_chap_type(chap_entry))) {
989 ql4_printk(KERN_INFO, ha,
990 "Type mismatch for CHAP entry %d\n",
991 chap_rec.chap_tbl_idx);
992 rc = -EINVAL;
993 goto exit_unlock_chap;
994 }
995
996 /* If chap index is in use then don't modify it */
997 rc = qla4xxx_is_chap_active(shost,
998 chap_rec.chap_tbl_idx);
999 if (rc) {
1000 ql4_printk(KERN_INFO, ha,
1001 "CHAP entry %d is in use\n",
1002 chap_rec.chap_tbl_idx);
1003 rc = -EBUSY;
1004 goto exit_unlock_chap;
1005 }
1006 }
1007 } else {
1008 rc = qla4xxx_find_free_chap_index(ha, &chap_rec.chap_tbl_idx);
1009 if (rc) {
1010 ql4_printk(KERN_INFO, ha, "CHAP entry not available\n");
1011 rc = -EBUSY;
1012 goto exit_unlock_chap;
1013 }
1014 }
1015
1016 rc = qla4xxx_set_chap(ha, chap_rec.username, chap_rec.password,
1017 chap_rec.chap_tbl_idx, type);
1018
1019 exit_unlock_chap:
1020 mutex_unlock(&ha->chap_sem);
1021
1022 exit_set_chap:
1023 return rc;
1024 }
1025
1026
1027 static int qla4xxx_get_host_stats(struct Scsi_Host *shost, char *buf, int len)
1028 {
1029 struct scsi_qla_host *ha = to_qla_host(shost);
1030 struct iscsi_offload_host_stats *host_stats = NULL;
1031 int host_stats_size;
1032 int ret = 0;
1033 int ddb_idx = 0;
1034 struct ql_iscsi_stats *ql_iscsi_stats = NULL;
1035 int stats_size;
1036 dma_addr_t iscsi_stats_dma;
1037
1038 DEBUG2(ql4_printk(KERN_INFO, ha, "Func: %s\n", __func__));
1039
1040 host_stats_size = sizeof(struct iscsi_offload_host_stats);
1041
1042 if (host_stats_size != len) {
1043 ql4_printk(KERN_INFO, ha, "%s: host_stats size mismatch expected = %d, is = %d\n",
1044 __func__, len, host_stats_size);
1045 ret = -EINVAL;
1046 goto exit_host_stats;
1047 }
1048 host_stats = (struct iscsi_offload_host_stats *)buf;
1049
1050 if (!buf) {
1051 ret = -ENOMEM;
1052 goto exit_host_stats;
1053 }
1054
1055 stats_size = PAGE_ALIGN(sizeof(struct ql_iscsi_stats));
1056
1057 ql_iscsi_stats = dma_alloc_coherent(&ha->pdev->dev, stats_size,
1058 &iscsi_stats_dma, GFP_KERNEL);
1059 if (!ql_iscsi_stats) {
1060 ql4_printk(KERN_ERR, ha,
1061 "Unable to allocate memory for iscsi stats\n");
1062 ret = -ENOMEM;
1063 goto exit_host_stats;
1064 }
1065
1066 ret = qla4xxx_get_mgmt_data(ha, ddb_idx, stats_size,
1067 iscsi_stats_dma);
1068 if (ret != QLA_SUCCESS) {
1069 ql4_printk(KERN_ERR, ha,
1070 "Unable to retrieve iscsi stats\n");
1071 ret = -EIO;
1072 goto exit_host_stats;
1073 }
1074 host_stats->mactx_frames = le64_to_cpu(ql_iscsi_stats->mac_tx_frames);
1075 host_stats->mactx_bytes = le64_to_cpu(ql_iscsi_stats->mac_tx_bytes);
1076 host_stats->mactx_multicast_frames =
1077 le64_to_cpu(ql_iscsi_stats->mac_tx_multicast_frames);
1078 host_stats->mactx_broadcast_frames =
1079 le64_to_cpu(ql_iscsi_stats->mac_tx_broadcast_frames);
1080 host_stats->mactx_pause_frames =
1081 le64_to_cpu(ql_iscsi_stats->mac_tx_pause_frames);
1082 host_stats->mactx_control_frames =
1083 le64_to_cpu(ql_iscsi_stats->mac_tx_control_frames);
1084 host_stats->mactx_deferral =
1085 le64_to_cpu(ql_iscsi_stats->mac_tx_deferral);
1086 host_stats->mactx_excess_deferral =
1087 le64_to_cpu(ql_iscsi_stats->mac_tx_excess_deferral);
1088 host_stats->mactx_late_collision =
1089 le64_to_cpu(ql_iscsi_stats->mac_tx_late_collision);
1090 host_stats->mactx_abort = le64_to_cpu(ql_iscsi_stats->mac_tx_abort);
1091 host_stats->mactx_single_collision =
1092 le64_to_cpu(ql_iscsi_stats->mac_tx_single_collision);
1093 host_stats->mactx_multiple_collision =
1094 le64_to_cpu(ql_iscsi_stats->mac_tx_multiple_collision);
1095 host_stats->mactx_collision =
1096 le64_to_cpu(ql_iscsi_stats->mac_tx_collision);
1097 host_stats->mactx_frames_dropped =
1098 le64_to_cpu(ql_iscsi_stats->mac_tx_frames_dropped);
1099 host_stats->mactx_jumbo_frames =
1100 le64_to_cpu(ql_iscsi_stats->mac_tx_jumbo_frames);
1101 host_stats->macrx_frames = le64_to_cpu(ql_iscsi_stats->mac_rx_frames);
1102 host_stats->macrx_bytes = le64_to_cpu(ql_iscsi_stats->mac_rx_bytes);
1103 host_stats->macrx_unknown_control_frames =
1104 le64_to_cpu(ql_iscsi_stats->mac_rx_unknown_control_frames);
1105 host_stats->macrx_pause_frames =
1106 le64_to_cpu(ql_iscsi_stats->mac_rx_pause_frames);
1107 host_stats->macrx_control_frames =
1108 le64_to_cpu(ql_iscsi_stats->mac_rx_control_frames);
1109 host_stats->macrx_dribble =
1110 le64_to_cpu(ql_iscsi_stats->mac_rx_dribble);
1111 host_stats->macrx_frame_length_error =
1112 le64_to_cpu(ql_iscsi_stats->mac_rx_frame_length_error);
1113 host_stats->macrx_jabber = le64_to_cpu(ql_iscsi_stats->mac_rx_jabber);
1114 host_stats->macrx_carrier_sense_error =
1115 le64_to_cpu(ql_iscsi_stats->mac_rx_carrier_sense_error);
1116 host_stats->macrx_frame_discarded =
1117 le64_to_cpu(ql_iscsi_stats->mac_rx_frame_discarded);
1118 host_stats->macrx_frames_dropped =
1119 le64_to_cpu(ql_iscsi_stats->mac_rx_frames_dropped);
1120 host_stats->mac_crc_error = le64_to_cpu(ql_iscsi_stats->mac_crc_error);
1121 host_stats->mac_encoding_error =
1122 le64_to_cpu(ql_iscsi_stats->mac_encoding_error);
1123 host_stats->macrx_length_error_large =
1124 le64_to_cpu(ql_iscsi_stats->mac_rx_length_error_large);
1125 host_stats->macrx_length_error_small =
1126 le64_to_cpu(ql_iscsi_stats->mac_rx_length_error_small);
1127 host_stats->macrx_multicast_frames =
1128 le64_to_cpu(ql_iscsi_stats->mac_rx_multicast_frames);
1129 host_stats->macrx_broadcast_frames =
1130 le64_to_cpu(ql_iscsi_stats->mac_rx_broadcast_frames);
1131 host_stats->iptx_packets = le64_to_cpu(ql_iscsi_stats->ip_tx_packets);
1132 host_stats->iptx_bytes = le64_to_cpu(ql_iscsi_stats->ip_tx_bytes);
1133 host_stats->iptx_fragments =
1134 le64_to_cpu(ql_iscsi_stats->ip_tx_fragments);
1135 host_stats->iprx_packets = le64_to_cpu(ql_iscsi_stats->ip_rx_packets);
1136 host_stats->iprx_bytes = le64_to_cpu(ql_iscsi_stats->ip_rx_bytes);
1137 host_stats->iprx_fragments =
1138 le64_to_cpu(ql_iscsi_stats->ip_rx_fragments);
1139 host_stats->ip_datagram_reassembly =
1140 le64_to_cpu(ql_iscsi_stats->ip_datagram_reassembly);
1141 host_stats->ip_invalid_address_error =
1142 le64_to_cpu(ql_iscsi_stats->ip_invalid_address_error);
1143 host_stats->ip_error_packets =
1144 le64_to_cpu(ql_iscsi_stats->ip_error_packets);
1145 host_stats->ip_fragrx_overlap =
1146 le64_to_cpu(ql_iscsi_stats->ip_fragrx_overlap);
1147 host_stats->ip_fragrx_outoforder =
1148 le64_to_cpu(ql_iscsi_stats->ip_fragrx_outoforder);
1149 host_stats->ip_datagram_reassembly_timeout =
1150 le64_to_cpu(ql_iscsi_stats->ip_datagram_reassembly_timeout);
1151 host_stats->ipv6tx_packets =
1152 le64_to_cpu(ql_iscsi_stats->ipv6_tx_packets);
1153 host_stats->ipv6tx_bytes = le64_to_cpu(ql_iscsi_stats->ipv6_tx_bytes);
1154 host_stats->ipv6tx_fragments =
1155 le64_to_cpu(ql_iscsi_stats->ipv6_tx_fragments);
1156 host_stats->ipv6rx_packets =
1157 le64_to_cpu(ql_iscsi_stats->ipv6_rx_packets);
1158 host_stats->ipv6rx_bytes = le64_to_cpu(ql_iscsi_stats->ipv6_rx_bytes);
1159 host_stats->ipv6rx_fragments =
1160 le64_to_cpu(ql_iscsi_stats->ipv6_rx_fragments);
1161 host_stats->ipv6_datagram_reassembly =
1162 le64_to_cpu(ql_iscsi_stats->ipv6_datagram_reassembly);
1163 host_stats->ipv6_invalid_address_error =
1164 le64_to_cpu(ql_iscsi_stats->ipv6_invalid_address_error);
1165 host_stats->ipv6_error_packets =
1166 le64_to_cpu(ql_iscsi_stats->ipv6_error_packets);
1167 host_stats->ipv6_fragrx_overlap =
1168 le64_to_cpu(ql_iscsi_stats->ipv6_fragrx_overlap);
1169 host_stats->ipv6_fragrx_outoforder =
1170 le64_to_cpu(ql_iscsi_stats->ipv6_fragrx_outoforder);
1171 host_stats->ipv6_datagram_reassembly_timeout =
1172 le64_to_cpu(ql_iscsi_stats->ipv6_datagram_reassembly_timeout);
1173 host_stats->tcptx_segments =
1174 le64_to_cpu(ql_iscsi_stats->tcp_tx_segments);
1175 host_stats->tcptx_bytes = le64_to_cpu(ql_iscsi_stats->tcp_tx_bytes);
1176 host_stats->tcprx_segments =
1177 le64_to_cpu(ql_iscsi_stats->tcp_rx_segments);
1178 host_stats->tcprx_byte = le64_to_cpu(ql_iscsi_stats->tcp_rx_byte);
1179 host_stats->tcp_duplicate_ack_retx =
1180 le64_to_cpu(ql_iscsi_stats->tcp_duplicate_ack_retx);
1181 host_stats->tcp_retx_timer_expired =
1182 le64_to_cpu(ql_iscsi_stats->tcp_retx_timer_expired);
1183 host_stats->tcprx_duplicate_ack =
1184 le64_to_cpu(ql_iscsi_stats->tcp_rx_duplicate_ack);
1185 host_stats->tcprx_pure_ackr =
1186 le64_to_cpu(ql_iscsi_stats->tcp_rx_pure_ackr);
1187 host_stats->tcptx_delayed_ack =
1188 le64_to_cpu(ql_iscsi_stats->tcp_tx_delayed_ack);
1189 host_stats->tcptx_pure_ack =
1190 le64_to_cpu(ql_iscsi_stats->tcp_tx_pure_ack);
1191 host_stats->tcprx_segment_error =
1192 le64_to_cpu(ql_iscsi_stats->tcp_rx_segment_error);
1193 host_stats->tcprx_segment_outoforder =
1194 le64_to_cpu(ql_iscsi_stats->tcp_rx_segment_outoforder);
1195 host_stats->tcprx_window_probe =
1196 le64_to_cpu(ql_iscsi_stats->tcp_rx_window_probe);
1197 host_stats->tcprx_window_update =
1198 le64_to_cpu(ql_iscsi_stats->tcp_rx_window_update);
1199 host_stats->tcptx_window_probe_persist =
1200 le64_to_cpu(ql_iscsi_stats->tcp_tx_window_probe_persist);
1201 host_stats->ecc_error_correction =
1202 le64_to_cpu(ql_iscsi_stats->ecc_error_correction);
1203 host_stats->iscsi_pdu_tx = le64_to_cpu(ql_iscsi_stats->iscsi_pdu_tx);
1204 host_stats->iscsi_data_bytes_tx =
1205 le64_to_cpu(ql_iscsi_stats->iscsi_data_bytes_tx);
1206 host_stats->iscsi_pdu_rx = le64_to_cpu(ql_iscsi_stats->iscsi_pdu_rx);
1207 host_stats->iscsi_data_bytes_rx =
1208 le64_to_cpu(ql_iscsi_stats->iscsi_data_bytes_rx);
1209 host_stats->iscsi_io_completed =
1210 le64_to_cpu(ql_iscsi_stats->iscsi_io_completed);
1211 host_stats->iscsi_unexpected_io_rx =
1212 le64_to_cpu(ql_iscsi_stats->iscsi_unexpected_io_rx);
1213 host_stats->iscsi_format_error =
1214 le64_to_cpu(ql_iscsi_stats->iscsi_format_error);
1215 host_stats->iscsi_hdr_digest_error =
1216 le64_to_cpu(ql_iscsi_stats->iscsi_hdr_digest_error);
1217 host_stats->iscsi_data_digest_error =
1218 le64_to_cpu(ql_iscsi_stats->iscsi_data_digest_error);
1219 host_stats->iscsi_sequence_error =
1220 le64_to_cpu(ql_iscsi_stats->iscsi_sequence_error);
1221 exit_host_stats:
1222 if (ql_iscsi_stats)
1223 dma_free_coherent(&ha->pdev->dev, host_stats_size,
1224 ql_iscsi_stats, iscsi_stats_dma);
1225
1226 ql4_printk(KERN_INFO, ha, "%s: Get host stats done\n",
1227 __func__);
1228 return ret;
1229 }
1230
1231 static int qla4xxx_get_iface_param(struct iscsi_iface *iface,
1232 enum iscsi_param_type param_type,
1233 int param, char *buf)
1234 {
1235 struct Scsi_Host *shost = iscsi_iface_to_shost(iface);
1236 struct scsi_qla_host *ha = to_qla_host(shost);
1237 int ival;
1238 char *pval = NULL;
1239 int len = -ENOSYS;
1240
1241 if (param_type == ISCSI_NET_PARAM) {
1242 switch (param) {
1243 case ISCSI_NET_PARAM_IPV4_ADDR:
1244 len = sprintf(buf, "%pI4\n", &ha->ip_config.ip_address);
1245 break;
1246 case ISCSI_NET_PARAM_IPV4_SUBNET:
1247 len = sprintf(buf, "%pI4\n",
1248 &ha->ip_config.subnet_mask);
1249 break;
1250 case ISCSI_NET_PARAM_IPV4_GW:
1251 len = sprintf(buf, "%pI4\n", &ha->ip_config.gateway);
1252 break;
1253 case ISCSI_NET_PARAM_IFACE_ENABLE:
1254 if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4) {
1255 OP_STATE(ha->ip_config.ipv4_options,
1256 IPOPT_IPV4_PROTOCOL_ENABLE, pval);
1257 } else {
1258 OP_STATE(ha->ip_config.ipv6_options,
1259 IPV6_OPT_IPV6_PROTOCOL_ENABLE, pval);
1260 }
1261
1262 len = sprintf(buf, "%s\n", pval);
1263 break;
1264 case ISCSI_NET_PARAM_IPV4_BOOTPROTO:
1265 len = sprintf(buf, "%s\n",
1266 (ha->ip_config.tcp_options &
1267 TCPOPT_DHCP_ENABLE) ?
1268 "dhcp" : "static");
1269 break;
1270 case ISCSI_NET_PARAM_IPV6_ADDR:
1271 if (iface->iface_num == 0)
1272 len = sprintf(buf, "%pI6\n",
1273 &ha->ip_config.ipv6_addr0);
1274 if (iface->iface_num == 1)
1275 len = sprintf(buf, "%pI6\n",
1276 &ha->ip_config.ipv6_addr1);
1277 break;
1278 case ISCSI_NET_PARAM_IPV6_LINKLOCAL:
1279 len = sprintf(buf, "%pI6\n",
1280 &ha->ip_config.ipv6_link_local_addr);
1281 break;
1282 case ISCSI_NET_PARAM_IPV6_ROUTER:
1283 len = sprintf(buf, "%pI6\n",
1284 &ha->ip_config.ipv6_default_router_addr);
1285 break;
1286 case ISCSI_NET_PARAM_IPV6_ADDR_AUTOCFG:
1287 pval = (ha->ip_config.ipv6_addl_options &
1288 IPV6_ADDOPT_NEIGHBOR_DISCOVERY_ADDR_ENABLE) ?
1289 "nd" : "static";
1290
1291 len = sprintf(buf, "%s\n", pval);
1292 break;
1293 case ISCSI_NET_PARAM_IPV6_LINKLOCAL_AUTOCFG:
1294 pval = (ha->ip_config.ipv6_addl_options &
1295 IPV6_ADDOPT_AUTOCONFIG_LINK_LOCAL_ADDR) ?
1296 "auto" : "static";
1297
1298 len = sprintf(buf, "%s\n", pval);
1299 break;
1300 case ISCSI_NET_PARAM_VLAN_ID:
1301 if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4)
1302 ival = ha->ip_config.ipv4_vlan_tag &
1303 ISCSI_MAX_VLAN_ID;
1304 else
1305 ival = ha->ip_config.ipv6_vlan_tag &
1306 ISCSI_MAX_VLAN_ID;
1307
1308 len = sprintf(buf, "%d\n", ival);
1309 break;
1310 case ISCSI_NET_PARAM_VLAN_PRIORITY:
1311 if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4)
1312 ival = (ha->ip_config.ipv4_vlan_tag >> 13) &
1313 ISCSI_MAX_VLAN_PRIORITY;
1314 else
1315 ival = (ha->ip_config.ipv6_vlan_tag >> 13) &
1316 ISCSI_MAX_VLAN_PRIORITY;
1317
1318 len = sprintf(buf, "%d\n", ival);
1319 break;
1320 case ISCSI_NET_PARAM_VLAN_ENABLED:
1321 if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4) {
1322 OP_STATE(ha->ip_config.ipv4_options,
1323 IPOPT_VLAN_TAGGING_ENABLE, pval);
1324 } else {
1325 OP_STATE(ha->ip_config.ipv6_options,
1326 IPV6_OPT_VLAN_TAGGING_ENABLE, pval);
1327 }
1328 len = sprintf(buf, "%s\n", pval);
1329 break;
1330 case ISCSI_NET_PARAM_MTU:
1331 len = sprintf(buf, "%d\n", ha->ip_config.eth_mtu_size);
1332 break;
1333 case ISCSI_NET_PARAM_PORT:
1334 if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4)
1335 len = sprintf(buf, "%d\n",
1336 ha->ip_config.ipv4_port);
1337 else
1338 len = sprintf(buf, "%d\n",
1339 ha->ip_config.ipv6_port);
1340 break;
1341 case ISCSI_NET_PARAM_IPADDR_STATE:
1342 if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4) {
1343 pval = iscsi_get_ipaddress_state_name(
1344 ha->ip_config.ipv4_addr_state);
1345 } else {
1346 if (iface->iface_num == 0)
1347 pval = iscsi_get_ipaddress_state_name(
1348 ha->ip_config.ipv6_addr0_state);
1349 else if (iface->iface_num == 1)
1350 pval = iscsi_get_ipaddress_state_name(
1351 ha->ip_config.ipv6_addr1_state);
1352 }
1353
1354 len = sprintf(buf, "%s\n", pval);
1355 break;
1356 case ISCSI_NET_PARAM_IPV6_LINKLOCAL_STATE:
1357 pval = iscsi_get_ipaddress_state_name(
1358 ha->ip_config.ipv6_link_local_state);
1359 len = sprintf(buf, "%s\n", pval);
1360 break;
1361 case ISCSI_NET_PARAM_IPV6_ROUTER_STATE:
1362 pval = iscsi_get_router_state_name(
1363 ha->ip_config.ipv6_default_router_state);
1364 len = sprintf(buf, "%s\n", pval);
1365 break;
1366 case ISCSI_NET_PARAM_DELAYED_ACK_EN:
1367 if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4) {
1368 OP_STATE(~ha->ip_config.tcp_options,
1369 TCPOPT_DELAYED_ACK_DISABLE, pval);
1370 } else {
1371 OP_STATE(~ha->ip_config.ipv6_tcp_options,
1372 IPV6_TCPOPT_DELAYED_ACK_DISABLE, pval);
1373 }
1374 len = sprintf(buf, "%s\n", pval);
1375 break;
1376 case ISCSI_NET_PARAM_TCP_NAGLE_DISABLE:
1377 if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4) {
1378 OP_STATE(~ha->ip_config.tcp_options,
1379 TCPOPT_NAGLE_ALGO_DISABLE, pval);
1380 } else {
1381 OP_STATE(~ha->ip_config.ipv6_tcp_options,
1382 IPV6_TCPOPT_NAGLE_ALGO_DISABLE, pval);
1383 }
1384 len = sprintf(buf, "%s\n", pval);
1385 break;
1386 case ISCSI_NET_PARAM_TCP_WSF_DISABLE:
1387 if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4) {
1388 OP_STATE(~ha->ip_config.tcp_options,
1389 TCPOPT_WINDOW_SCALE_DISABLE, pval);
1390 } else {
1391 OP_STATE(~ha->ip_config.ipv6_tcp_options,
1392 IPV6_TCPOPT_WINDOW_SCALE_DISABLE,
1393 pval);
1394 }
1395 len = sprintf(buf, "%s\n", pval);
1396 break;
1397 case ISCSI_NET_PARAM_TCP_WSF:
1398 if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4)
1399 len = sprintf(buf, "%d\n",
1400 ha->ip_config.tcp_wsf);
1401 else
1402 len = sprintf(buf, "%d\n",
1403 ha->ip_config.ipv6_tcp_wsf);
1404 break;
1405 case ISCSI_NET_PARAM_TCP_TIMER_SCALE:
1406 if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4)
1407 ival = (ha->ip_config.tcp_options &
1408 TCPOPT_TIMER_SCALE) >> 1;
1409 else
1410 ival = (ha->ip_config.ipv6_tcp_options &
1411 IPV6_TCPOPT_TIMER_SCALE) >> 1;
1412
1413 len = sprintf(buf, "%d\n", ival);
1414 break;
1415 case ISCSI_NET_PARAM_TCP_TIMESTAMP_EN:
1416 if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4) {
1417 OP_STATE(ha->ip_config.tcp_options,
1418 TCPOPT_TIMESTAMP_ENABLE, pval);
1419 } else {
1420 OP_STATE(ha->ip_config.ipv6_tcp_options,
1421 IPV6_TCPOPT_TIMESTAMP_EN, pval);
1422 }
1423 len = sprintf(buf, "%s\n", pval);
1424 break;
1425 case ISCSI_NET_PARAM_CACHE_ID:
1426 if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4)
1427 len = sprintf(buf, "%d\n",
1428 ha->ip_config.ipv4_cache_id);
1429 else
1430 len = sprintf(buf, "%d\n",
1431 ha->ip_config.ipv6_cache_id);
1432 break;
1433 case ISCSI_NET_PARAM_IPV4_DHCP_DNS_ADDR_EN:
1434 OP_STATE(ha->ip_config.tcp_options,
1435 TCPOPT_DNS_SERVER_IP_EN, pval);
1436
1437 len = sprintf(buf, "%s\n", pval);
1438 break;
1439 case ISCSI_NET_PARAM_IPV4_DHCP_SLP_DA_EN:
1440 OP_STATE(ha->ip_config.tcp_options,
1441 TCPOPT_SLP_DA_INFO_EN, pval);
1442
1443 len = sprintf(buf, "%s\n", pval);
1444 break;
1445 case ISCSI_NET_PARAM_IPV4_TOS_EN:
1446 OP_STATE(ha->ip_config.ipv4_options,
1447 IPOPT_IPV4_TOS_EN, pval);
1448
1449 len = sprintf(buf, "%s\n", pval);
1450 break;
1451 case ISCSI_NET_PARAM_IPV4_TOS:
1452 len = sprintf(buf, "%d\n", ha->ip_config.ipv4_tos);
1453 break;
1454 case ISCSI_NET_PARAM_IPV4_GRAT_ARP_EN:
1455 OP_STATE(ha->ip_config.ipv4_options,
1456 IPOPT_GRAT_ARP_EN, pval);
1457
1458 len = sprintf(buf, "%s\n", pval);
1459 break;
1460 case ISCSI_NET_PARAM_IPV4_DHCP_ALT_CLIENT_ID_EN:
1461 OP_STATE(ha->ip_config.ipv4_options, IPOPT_ALT_CID_EN,
1462 pval);
1463
1464 len = sprintf(buf, "%s\n", pval);
1465 break;
1466 case ISCSI_NET_PARAM_IPV4_DHCP_ALT_CLIENT_ID:
1467 pval = (ha->ip_config.ipv4_alt_cid_len) ?
1468 (char *)ha->ip_config.ipv4_alt_cid : "";
1469
1470 len = sprintf(buf, "%s\n", pval);
1471 break;
1472 case ISCSI_NET_PARAM_IPV4_DHCP_REQ_VENDOR_ID_EN:
1473 OP_STATE(ha->ip_config.ipv4_options,
1474 IPOPT_REQ_VID_EN, pval);
1475
1476 len = sprintf(buf, "%s\n", pval);
1477 break;
1478 case ISCSI_NET_PARAM_IPV4_DHCP_USE_VENDOR_ID_EN:
1479 OP_STATE(ha->ip_config.ipv4_options,
1480 IPOPT_USE_VID_EN, pval);
1481
1482 len = sprintf(buf, "%s\n", pval);
1483 break;
1484 case ISCSI_NET_PARAM_IPV4_DHCP_VENDOR_ID:
1485 pval = (ha->ip_config.ipv4_vid_len) ?
1486 (char *)ha->ip_config.ipv4_vid : "";
1487
1488 len = sprintf(buf, "%s\n", pval);
1489 break;
1490 case ISCSI_NET_PARAM_IPV4_DHCP_LEARN_IQN_EN:
1491 OP_STATE(ha->ip_config.ipv4_options,
1492 IPOPT_LEARN_IQN_EN, pval);
1493
1494 len = sprintf(buf, "%s\n", pval);
1495 break;
1496 case ISCSI_NET_PARAM_IPV4_FRAGMENT_DISABLE:
1497 OP_STATE(~ha->ip_config.ipv4_options,
1498 IPOPT_FRAGMENTATION_DISABLE, pval);
1499
1500 len = sprintf(buf, "%s\n", pval);
1501 break;
1502 case ISCSI_NET_PARAM_IPV4_IN_FORWARD_EN:
1503 OP_STATE(ha->ip_config.ipv4_options,
1504 IPOPT_IN_FORWARD_EN, pval);
1505
1506 len = sprintf(buf, "%s\n", pval);
1507 break;
1508 case ISCSI_NET_PARAM_REDIRECT_EN:
1509 if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4) {
1510 OP_STATE(ha->ip_config.ipv4_options,
1511 IPOPT_ARP_REDIRECT_EN, pval);
1512 } else {
1513 OP_STATE(ha->ip_config.ipv6_options,
1514 IPV6_OPT_REDIRECT_EN, pval);
1515 }
1516 len = sprintf(buf, "%s\n", pval);
1517 break;
1518 case ISCSI_NET_PARAM_IPV4_TTL:
1519 len = sprintf(buf, "%d\n", ha->ip_config.ipv4_ttl);
1520 break;
1521 case ISCSI_NET_PARAM_IPV6_GRAT_NEIGHBOR_ADV_EN:
1522 OP_STATE(ha->ip_config.ipv6_options,
1523 IPV6_OPT_GRAT_NEIGHBOR_ADV_EN, pval);
1524
1525 len = sprintf(buf, "%s\n", pval);
1526 break;
1527 case ISCSI_NET_PARAM_IPV6_MLD_EN:
1528 OP_STATE(ha->ip_config.ipv6_addl_options,
1529 IPV6_ADDOPT_MLD_EN, pval);
1530
1531 len = sprintf(buf, "%s\n", pval);
1532 break;
1533 case ISCSI_NET_PARAM_IPV6_FLOW_LABEL:
1534 len = sprintf(buf, "%u\n", ha->ip_config.ipv6_flow_lbl);
1535 break;
1536 case ISCSI_NET_PARAM_IPV6_TRAFFIC_CLASS:
1537 len = sprintf(buf, "%d\n",
1538 ha->ip_config.ipv6_traffic_class);
1539 break;
1540 case ISCSI_NET_PARAM_IPV6_HOP_LIMIT:
1541 len = sprintf(buf, "%d\n",
1542 ha->ip_config.ipv6_hop_limit);
1543 break;
1544 case ISCSI_NET_PARAM_IPV6_ND_REACHABLE_TMO:
1545 len = sprintf(buf, "%d\n",
1546 ha->ip_config.ipv6_nd_reach_time);
1547 break;
1548 case ISCSI_NET_PARAM_IPV6_ND_REXMIT_TIME:
1549 len = sprintf(buf, "%d\n",
1550 ha->ip_config.ipv6_nd_rexmit_timer);
1551 break;
1552 case ISCSI_NET_PARAM_IPV6_ND_STALE_TMO:
1553 len = sprintf(buf, "%d\n",
1554 ha->ip_config.ipv6_nd_stale_timeout);
1555 break;
1556 case ISCSI_NET_PARAM_IPV6_DUP_ADDR_DETECT_CNT:
1557 len = sprintf(buf, "%d\n",
1558 ha->ip_config.ipv6_dup_addr_detect_count);
1559 break;
1560 case ISCSI_NET_PARAM_IPV6_RTR_ADV_LINK_MTU:
1561 len = sprintf(buf, "%d\n",
1562 ha->ip_config.ipv6_gw_advrt_mtu);
1563 break;
1564 default:
1565 len = -ENOSYS;
1566 }
1567 } else if (param_type == ISCSI_IFACE_PARAM) {
1568 switch (param) {
1569 case ISCSI_IFACE_PARAM_DEF_TASKMGMT_TMO:
1570 len = sprintf(buf, "%d\n", ha->ip_config.def_timeout);
1571 break;
1572 case ISCSI_IFACE_PARAM_HDRDGST_EN:
1573 OP_STATE(ha->ip_config.iscsi_options,
1574 ISCSIOPTS_HEADER_DIGEST_EN, pval);
1575
1576 len = sprintf(buf, "%s\n", pval);
1577 break;
1578 case ISCSI_IFACE_PARAM_DATADGST_EN:
1579 OP_STATE(ha->ip_config.iscsi_options,
1580 ISCSIOPTS_DATA_DIGEST_EN, pval);
1581
1582 len = sprintf(buf, "%s\n", pval);
1583 break;
1584 case ISCSI_IFACE_PARAM_IMM_DATA_EN:
1585 OP_STATE(ha->ip_config.iscsi_options,
1586 ISCSIOPTS_IMMEDIATE_DATA_EN, pval);
1587
1588 len = sprintf(buf, "%s\n", pval);
1589 break;
1590 case ISCSI_IFACE_PARAM_INITIAL_R2T_EN:
1591 OP_STATE(ha->ip_config.iscsi_options,
1592 ISCSIOPTS_INITIAL_R2T_EN, pval);
1593
1594 len = sprintf(buf, "%s\n", pval);
1595 break;
1596 case ISCSI_IFACE_PARAM_DATASEQ_INORDER_EN:
1597 OP_STATE(ha->ip_config.iscsi_options,
1598 ISCSIOPTS_DATA_SEQ_INORDER_EN, pval);
1599
1600 len = sprintf(buf, "%s\n", pval);
1601 break;
1602 case ISCSI_IFACE_PARAM_PDU_INORDER_EN:
1603 OP_STATE(ha->ip_config.iscsi_options,
1604 ISCSIOPTS_DATA_PDU_INORDER_EN, pval);
1605
1606 len = sprintf(buf, "%s\n", pval);
1607 break;
1608 case ISCSI_IFACE_PARAM_ERL:
1609 len = sprintf(buf, "%d\n",
1610 (ha->ip_config.iscsi_options &
1611 ISCSIOPTS_ERL));
1612 break;
1613 case ISCSI_IFACE_PARAM_MAX_RECV_DLENGTH:
1614 len = sprintf(buf, "%u\n",
1615 ha->ip_config.iscsi_max_pdu_size *
1616 BYTE_UNITS);
1617 break;
1618 case ISCSI_IFACE_PARAM_FIRST_BURST:
1619 len = sprintf(buf, "%u\n",
1620 ha->ip_config.iscsi_first_burst_len *
1621 BYTE_UNITS);
1622 break;
1623 case ISCSI_IFACE_PARAM_MAX_R2T:
1624 len = sprintf(buf, "%d\n",
1625 ha->ip_config.iscsi_max_outstnd_r2t);
1626 break;
1627 case ISCSI_IFACE_PARAM_MAX_BURST:
1628 len = sprintf(buf, "%u\n",
1629 ha->ip_config.iscsi_max_burst_len *
1630 BYTE_UNITS);
1631 break;
1632 case ISCSI_IFACE_PARAM_CHAP_AUTH_EN:
1633 OP_STATE(ha->ip_config.iscsi_options,
1634 ISCSIOPTS_CHAP_AUTH_EN, pval);
1635
1636 len = sprintf(buf, "%s\n", pval);
1637 break;
1638 case ISCSI_IFACE_PARAM_BIDI_CHAP_EN:
1639 OP_STATE(ha->ip_config.iscsi_options,
1640 ISCSIOPTS_BIDI_CHAP_EN, pval);
1641
1642 len = sprintf(buf, "%s\n", pval);
1643 break;
1644 case ISCSI_IFACE_PARAM_DISCOVERY_AUTH_OPTIONAL:
1645 OP_STATE(ha->ip_config.iscsi_options,
1646 ISCSIOPTS_DISCOVERY_AUTH_EN, pval);
1647
1648 len = sprintf(buf, "%s\n", pval);
1649 break;
1650 case ISCSI_IFACE_PARAM_DISCOVERY_LOGOUT_EN:
1651 OP_STATE(ha->ip_config.iscsi_options,
1652 ISCSIOPTS_DISCOVERY_LOGOUT_EN, pval);
1653
1654 len = sprintf(buf, "%s\n", pval);
1655 break;
1656 case ISCSI_IFACE_PARAM_STRICT_LOGIN_COMP_EN:
1657 OP_STATE(ha->ip_config.iscsi_options,
1658 ISCSIOPTS_STRICT_LOGIN_COMP_EN, pval);
1659
1660 len = sprintf(buf, "%s\n", pval);
1661 break;
1662 case ISCSI_IFACE_PARAM_INITIATOR_NAME:
1663 len = sprintf(buf, "%s\n", ha->ip_config.iscsi_name);
1664 break;
1665 default:
1666 len = -ENOSYS;
1667 }
1668 }
1669
1670 return len;
1671 }
1672
1673 static struct iscsi_endpoint *
1674 qla4xxx_ep_connect(struct Scsi_Host *shost, struct sockaddr *dst_addr,
1675 int non_blocking)
1676 {
1677 int ret;
1678 struct iscsi_endpoint *ep;
1679 struct qla_endpoint *qla_ep;
1680 struct scsi_qla_host *ha;
1681 struct sockaddr_in *addr;
1682 struct sockaddr_in6 *addr6;
1683
1684 if (!shost) {
1685 ret = -ENXIO;
1686 pr_err("%s: shost is NULL\n", __func__);
1687 return ERR_PTR(ret);
1688 }
1689
1690 ha = iscsi_host_priv(shost);
1691 ep = iscsi_create_endpoint(sizeof(struct qla_endpoint));
1692 if (!ep) {
1693 ret = -ENOMEM;
1694 return ERR_PTR(ret);
1695 }
1696
1697 qla_ep = ep->dd_data;
1698 memset(qla_ep, 0, sizeof(struct qla_endpoint));
1699 if (dst_addr->sa_family == AF_INET) {
1700 memcpy(&qla_ep->dst_addr, dst_addr, sizeof(struct sockaddr_in));
1701 addr = (struct sockaddr_in *)&qla_ep->dst_addr;
1702 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: %pI4\n", __func__,
1703 (char *)&addr->sin_addr));
1704 } else if (dst_addr->sa_family == AF_INET6) {
1705 memcpy(&qla_ep->dst_addr, dst_addr,
1706 sizeof(struct sockaddr_in6));
1707 addr6 = (struct sockaddr_in6 *)&qla_ep->dst_addr;
1708 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: %pI6\n", __func__,
1709 (char *)&addr6->sin6_addr));
1710 } else {
1711 ql4_printk(KERN_WARNING, ha, "%s: Invalid endpoint\n",
1712 __func__);
1713 }
1714
1715 qla_ep->host = shost;
1716
1717 return ep;
1718 }
1719
1720 static int qla4xxx_ep_poll(struct iscsi_endpoint *ep, int timeout_ms)
1721 {
1722 struct qla_endpoint *qla_ep;
1723 struct scsi_qla_host *ha;
1724 int ret = 0;
1725
1726 qla_ep = ep->dd_data;
1727 ha = to_qla_host(qla_ep->host);
1728 DEBUG2(pr_info_ratelimited("%s: host: %ld\n", __func__, ha->host_no));
1729
1730 if (adapter_up(ha) && !test_bit(AF_BUILD_DDB_LIST, &ha->flags))
1731 ret = 1;
1732
1733 return ret;
1734 }
1735
1736 static void qla4xxx_ep_disconnect(struct iscsi_endpoint *ep)
1737 {
1738 struct qla_endpoint *qla_ep;
1739 struct scsi_qla_host *ha;
1740
1741 qla_ep = ep->dd_data;
1742 ha = to_qla_host(qla_ep->host);
1743 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: host: %ld\n", __func__,
1744 ha->host_no));
1745 iscsi_destroy_endpoint(ep);
1746 }
1747
1748 static int qla4xxx_get_ep_param(struct iscsi_endpoint *ep,
1749 enum iscsi_param param,
1750 char *buf)
1751 {
1752 struct qla_endpoint *qla_ep = ep->dd_data;
1753 struct sockaddr *dst_addr;
1754 struct scsi_qla_host *ha;
1755
1756 if (!qla_ep)
1757 return -ENOTCONN;
1758
1759 ha = to_qla_host(qla_ep->host);
1760 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: host: %ld\n", __func__,
1761 ha->host_no));
1762
1763 switch (param) {
1764 case ISCSI_PARAM_CONN_PORT:
1765 case ISCSI_PARAM_CONN_ADDRESS:
1766 dst_addr = (struct sockaddr *)&qla_ep->dst_addr;
1767 if (!dst_addr)
1768 return -ENOTCONN;
1769
1770 return iscsi_conn_get_addr_param((struct sockaddr_storage *)
1771 &qla_ep->dst_addr, param, buf);
1772 default:
1773 return -ENOSYS;
1774 }
1775 }
1776
1777 static void qla4xxx_conn_get_stats(struct iscsi_cls_conn *cls_conn,
1778 struct iscsi_stats *stats)
1779 {
1780 struct iscsi_session *sess;
1781 struct iscsi_cls_session *cls_sess;
1782 struct ddb_entry *ddb_entry;
1783 struct scsi_qla_host *ha;
1784 struct ql_iscsi_stats *ql_iscsi_stats;
1785 int stats_size;
1786 int ret;
1787 dma_addr_t iscsi_stats_dma;
1788
1789 cls_sess = iscsi_conn_to_session(cls_conn);
1790 sess = cls_sess->dd_data;
1791 ddb_entry = sess->dd_data;
1792 ha = ddb_entry->ha;
1793
1794 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: host: %ld\n", __func__,
1795 ha->host_no));
1796 stats_size = PAGE_ALIGN(sizeof(struct ql_iscsi_stats));
1797 /* Allocate memory */
1798 ql_iscsi_stats = dma_alloc_coherent(&ha->pdev->dev, stats_size,
1799 &iscsi_stats_dma, GFP_KERNEL);
1800 if (!ql_iscsi_stats) {
1801 ql4_printk(KERN_ERR, ha,
1802 "Unable to allocate memory for iscsi stats\n");
1803 goto exit_get_stats;
1804 }
1805
1806 ret = qla4xxx_get_mgmt_data(ha, ddb_entry->fw_ddb_index, stats_size,
1807 iscsi_stats_dma);
1808 if (ret != QLA_SUCCESS) {
1809 ql4_printk(KERN_ERR, ha,
1810 "Unable to retrieve iscsi stats\n");
1811 goto free_stats;
1812 }
1813
1814 /* octets */
1815 stats->txdata_octets = le64_to_cpu(ql_iscsi_stats->tx_data_octets);
1816 stats->rxdata_octets = le64_to_cpu(ql_iscsi_stats->rx_data_octets);
1817 /* xmit pdus */
1818 stats->noptx_pdus = le32_to_cpu(ql_iscsi_stats->tx_nopout_pdus);
1819 stats->scsicmd_pdus = le32_to_cpu(ql_iscsi_stats->tx_scsi_cmd_pdus);
1820 stats->tmfcmd_pdus = le32_to_cpu(ql_iscsi_stats->tx_tmf_cmd_pdus);
1821 stats->login_pdus = le32_to_cpu(ql_iscsi_stats->tx_login_cmd_pdus);
1822 stats->text_pdus = le32_to_cpu(ql_iscsi_stats->tx_text_cmd_pdus);
1823 stats->dataout_pdus = le32_to_cpu(ql_iscsi_stats->tx_scsi_write_pdus);
1824 stats->logout_pdus = le32_to_cpu(ql_iscsi_stats->tx_logout_cmd_pdus);
1825 stats->snack_pdus = le32_to_cpu(ql_iscsi_stats->tx_snack_req_pdus);
1826 /* recv pdus */
1827 stats->noprx_pdus = le32_to_cpu(ql_iscsi_stats->rx_nopin_pdus);
1828 stats->scsirsp_pdus = le32_to_cpu(ql_iscsi_stats->rx_scsi_resp_pdus);
1829 stats->tmfrsp_pdus = le32_to_cpu(ql_iscsi_stats->rx_tmf_resp_pdus);
1830 stats->textrsp_pdus = le32_to_cpu(ql_iscsi_stats->rx_text_resp_pdus);
1831 stats->datain_pdus = le32_to_cpu(ql_iscsi_stats->rx_scsi_read_pdus);
1832 stats->logoutrsp_pdus =
1833 le32_to_cpu(ql_iscsi_stats->rx_logout_resp_pdus);
1834 stats->r2t_pdus = le32_to_cpu(ql_iscsi_stats->rx_r2t_pdus);
1835 stats->async_pdus = le32_to_cpu(ql_iscsi_stats->rx_async_pdus);
1836 stats->rjt_pdus = le32_to_cpu(ql_iscsi_stats->rx_reject_pdus);
1837
1838 free_stats:
1839 dma_free_coherent(&ha->pdev->dev, stats_size, ql_iscsi_stats,
1840 iscsi_stats_dma);
1841 exit_get_stats:
1842 return;
1843 }
1844
1845 static enum blk_eh_timer_return qla4xxx_eh_cmd_timed_out(struct scsi_cmnd *sc)
1846 {
1847 struct iscsi_cls_session *session;
1848 struct iscsi_session *sess;
1849 unsigned long flags;
1850 enum blk_eh_timer_return ret = BLK_EH_DONE;
1851
1852 session = starget_to_session(scsi_target(sc->device));
1853 sess = session->dd_data;
1854
1855 spin_lock_irqsave(&session->lock, flags);
1856 if (session->state == ISCSI_SESSION_FAILED)
1857 ret = BLK_EH_RESET_TIMER;
1858 spin_unlock_irqrestore(&session->lock, flags);
1859
1860 return ret;
1861 }
1862
1863 static void qla4xxx_set_port_speed(struct Scsi_Host *shost)
1864 {
1865 struct scsi_qla_host *ha = to_qla_host(shost);
1866 struct iscsi_cls_host *ihost = shost->shost_data;
1867 uint32_t speed = ISCSI_PORT_SPEED_UNKNOWN;
1868
1869 qla4xxx_get_firmware_state(ha);
1870
1871 switch (ha->addl_fw_state & 0x0F00) {
1872 case FW_ADDSTATE_LINK_SPEED_10MBPS:
1873 speed = ISCSI_PORT_SPEED_10MBPS;
1874 break;
1875 case FW_ADDSTATE_LINK_SPEED_100MBPS:
1876 speed = ISCSI_PORT_SPEED_100MBPS;
1877 break;
1878 case FW_ADDSTATE_LINK_SPEED_1GBPS:
1879 speed = ISCSI_PORT_SPEED_1GBPS;
1880 break;
1881 case FW_ADDSTATE_LINK_SPEED_10GBPS:
1882 speed = ISCSI_PORT_SPEED_10GBPS;
1883 break;
1884 }
1885 ihost->port_speed = speed;
1886 }
1887
1888 static void qla4xxx_set_port_state(struct Scsi_Host *shost)
1889 {
1890 struct scsi_qla_host *ha = to_qla_host(shost);
1891 struct iscsi_cls_host *ihost = shost->shost_data;
1892 uint32_t state = ISCSI_PORT_STATE_DOWN;
1893
1894 if (test_bit(AF_LINK_UP, &ha->flags))
1895 state = ISCSI_PORT_STATE_UP;
1896
1897 ihost->port_state = state;
1898 }
1899
1900 static int qla4xxx_host_get_param(struct Scsi_Host *shost,
1901 enum iscsi_host_param param, char *buf)
1902 {
1903 struct scsi_qla_host *ha = to_qla_host(shost);
1904 int len;
1905
1906 switch (param) {
1907 case ISCSI_HOST_PARAM_HWADDRESS:
1908 len = sysfs_format_mac(buf, ha->my_mac, MAC_ADDR_LEN);
1909 break;
1910 case ISCSI_HOST_PARAM_IPADDRESS:
1911 len = sprintf(buf, "%pI4\n", &ha->ip_config.ip_address);
1912 break;
1913 case ISCSI_HOST_PARAM_INITIATOR_NAME:
1914 len = sprintf(buf, "%s\n", ha->name_string);
1915 break;
1916 case ISCSI_HOST_PARAM_PORT_STATE:
1917 qla4xxx_set_port_state(shost);
1918 len = sprintf(buf, "%s\n", iscsi_get_port_state_name(shost));
1919 break;
1920 case ISCSI_HOST_PARAM_PORT_SPEED:
1921 qla4xxx_set_port_speed(shost);
1922 len = sprintf(buf, "%s\n", iscsi_get_port_speed_name(shost));
1923 break;
1924 default:
1925 return -ENOSYS;
1926 }
1927
1928 return len;
1929 }
1930
1931 static void qla4xxx_create_ipv4_iface(struct scsi_qla_host *ha)
1932 {
1933 if (ha->iface_ipv4)
1934 return;
1935
1936 /* IPv4 */
1937 ha->iface_ipv4 = iscsi_create_iface(ha->host,
1938 &qla4xxx_iscsi_transport,
1939 ISCSI_IFACE_TYPE_IPV4, 0, 0);
1940 if (!ha->iface_ipv4)
1941 ql4_printk(KERN_ERR, ha, "Could not create IPv4 iSCSI "
1942 "iface0.\n");
1943 }
1944
1945 static void qla4xxx_create_ipv6_iface(struct scsi_qla_host *ha)
1946 {
1947 if (!ha->iface_ipv6_0)
1948 /* IPv6 iface-0 */
1949 ha->iface_ipv6_0 = iscsi_create_iface(ha->host,
1950 &qla4xxx_iscsi_transport,
1951 ISCSI_IFACE_TYPE_IPV6, 0,
1952 0);
1953 if (!ha->iface_ipv6_0)
1954 ql4_printk(KERN_ERR, ha, "Could not create IPv6 iSCSI "
1955 "iface0.\n");
1956
1957 if (!ha->iface_ipv6_1)
1958 /* IPv6 iface-1 */
1959 ha->iface_ipv6_1 = iscsi_create_iface(ha->host,
1960 &qla4xxx_iscsi_transport,
1961 ISCSI_IFACE_TYPE_IPV6, 1,
1962 0);
1963 if (!ha->iface_ipv6_1)
1964 ql4_printk(KERN_ERR, ha, "Could not create IPv6 iSCSI "
1965 "iface1.\n");
1966 }
1967
1968 static void qla4xxx_create_ifaces(struct scsi_qla_host *ha)
1969 {
1970 if (ha->ip_config.ipv4_options & IPOPT_IPV4_PROTOCOL_ENABLE)
1971 qla4xxx_create_ipv4_iface(ha);
1972
1973 if (ha->ip_config.ipv6_options & IPV6_OPT_IPV6_PROTOCOL_ENABLE)
1974 qla4xxx_create_ipv6_iface(ha);
1975 }
1976
1977 static void qla4xxx_destroy_ipv4_iface(struct scsi_qla_host *ha)
1978 {
1979 if (ha->iface_ipv4) {
1980 iscsi_destroy_iface(ha->iface_ipv4);
1981 ha->iface_ipv4 = NULL;
1982 }
1983 }
1984
1985 static void qla4xxx_destroy_ipv6_iface(struct scsi_qla_host *ha)
1986 {
1987 if (ha->iface_ipv6_0) {
1988 iscsi_destroy_iface(ha->iface_ipv6_0);
1989 ha->iface_ipv6_0 = NULL;
1990 }
1991 if (ha->iface_ipv6_1) {
1992 iscsi_destroy_iface(ha->iface_ipv6_1);
1993 ha->iface_ipv6_1 = NULL;
1994 }
1995 }
1996
1997 static void qla4xxx_destroy_ifaces(struct scsi_qla_host *ha)
1998 {
1999 qla4xxx_destroy_ipv4_iface(ha);
2000 qla4xxx_destroy_ipv6_iface(ha);
2001 }
2002
2003 static void qla4xxx_set_ipv6(struct scsi_qla_host *ha,
2004 struct iscsi_iface_param_info *iface_param,
2005 struct addr_ctrl_blk *init_fw_cb)
2006 {
2007 /*
2008 * iface_num 0 is valid for IPv6 Addr, linklocal, router, autocfg.
2009 * iface_num 1 is valid only for IPv6 Addr.
2010 */
2011 switch (iface_param->param) {
2012 case ISCSI_NET_PARAM_IPV6_ADDR:
2013 if (iface_param->iface_num & 0x1)
2014 /* IPv6 Addr 1 */
2015 memcpy(init_fw_cb->ipv6_addr1, iface_param->value,
2016 sizeof(init_fw_cb->ipv6_addr1));
2017 else
2018 /* IPv6 Addr 0 */
2019 memcpy(init_fw_cb->ipv6_addr0, iface_param->value,
2020 sizeof(init_fw_cb->ipv6_addr0));
2021 break;
2022 case ISCSI_NET_PARAM_IPV6_LINKLOCAL:
2023 if (iface_param->iface_num & 0x1)
2024 break;
2025 memcpy(init_fw_cb->ipv6_if_id, &iface_param->value[8],
2026 sizeof(init_fw_cb->ipv6_if_id));
2027 break;
2028 case ISCSI_NET_PARAM_IPV6_ROUTER:
2029 if (iface_param->iface_num & 0x1)
2030 break;
2031 memcpy(init_fw_cb->ipv6_dflt_rtr_addr, iface_param->value,
2032 sizeof(init_fw_cb->ipv6_dflt_rtr_addr));
2033 break;
2034 case ISCSI_NET_PARAM_IPV6_ADDR_AUTOCFG:
2035 /* Autocfg applies to even interface */
2036 if (iface_param->iface_num & 0x1)
2037 break;
2038
2039 if (iface_param->value[0] == ISCSI_IPV6_AUTOCFG_DISABLE)
2040 init_fw_cb->ipv6_addtl_opts &=
2041 cpu_to_le16(
2042 ~IPV6_ADDOPT_NEIGHBOR_DISCOVERY_ADDR_ENABLE);
2043 else if (iface_param->value[0] == ISCSI_IPV6_AUTOCFG_ND_ENABLE)
2044 init_fw_cb->ipv6_addtl_opts |=
2045 cpu_to_le16(
2046 IPV6_ADDOPT_NEIGHBOR_DISCOVERY_ADDR_ENABLE);
2047 else
2048 ql4_printk(KERN_ERR, ha,
2049 "Invalid autocfg setting for IPv6 addr\n");
2050 break;
2051 case ISCSI_NET_PARAM_IPV6_LINKLOCAL_AUTOCFG:
2052 /* Autocfg applies to even interface */
2053 if (iface_param->iface_num & 0x1)
2054 break;
2055
2056 if (iface_param->value[0] ==
2057 ISCSI_IPV6_LINKLOCAL_AUTOCFG_ENABLE)
2058 init_fw_cb->ipv6_addtl_opts |= cpu_to_le16(
2059 IPV6_ADDOPT_AUTOCONFIG_LINK_LOCAL_ADDR);
2060 else if (iface_param->value[0] ==
2061 ISCSI_IPV6_LINKLOCAL_AUTOCFG_DISABLE)
2062 init_fw_cb->ipv6_addtl_opts &= cpu_to_le16(
2063 ~IPV6_ADDOPT_AUTOCONFIG_LINK_LOCAL_ADDR);
2064 else
2065 ql4_printk(KERN_ERR, ha,
2066 "Invalid autocfg setting for IPv6 linklocal addr\n");
2067 break;
2068 case ISCSI_NET_PARAM_IPV6_ROUTER_AUTOCFG:
2069 /* Autocfg applies to even interface */
2070 if (iface_param->iface_num & 0x1)
2071 break;
2072
2073 if (iface_param->value[0] == ISCSI_IPV6_ROUTER_AUTOCFG_ENABLE)
2074 memset(init_fw_cb->ipv6_dflt_rtr_addr, 0,
2075 sizeof(init_fw_cb->ipv6_dflt_rtr_addr));
2076 break;
2077 case ISCSI_NET_PARAM_IFACE_ENABLE:
2078 if (iface_param->value[0] == ISCSI_IFACE_ENABLE) {
2079 init_fw_cb->ipv6_opts |=
2080 cpu_to_le16(IPV6_OPT_IPV6_PROTOCOL_ENABLE);
2081 qla4xxx_create_ipv6_iface(ha);
2082 } else {
2083 init_fw_cb->ipv6_opts &=
2084 cpu_to_le16(~IPV6_OPT_IPV6_PROTOCOL_ENABLE &
2085 0xFFFF);
2086 qla4xxx_destroy_ipv6_iface(ha);
2087 }
2088 break;
2089 case ISCSI_NET_PARAM_VLAN_TAG:
2090 if (iface_param->len != sizeof(init_fw_cb->ipv6_vlan_tag))
2091 break;
2092 init_fw_cb->ipv6_vlan_tag =
2093 cpu_to_be16(*(uint16_t *)iface_param->value);
2094 break;
2095 case ISCSI_NET_PARAM_VLAN_ENABLED:
2096 if (iface_param->value[0] == ISCSI_VLAN_ENABLE)
2097 init_fw_cb->ipv6_opts |=
2098 cpu_to_le16(IPV6_OPT_VLAN_TAGGING_ENABLE);
2099 else
2100 init_fw_cb->ipv6_opts &=
2101 cpu_to_le16(~IPV6_OPT_VLAN_TAGGING_ENABLE);
2102 break;
2103 case ISCSI_NET_PARAM_MTU:
2104 init_fw_cb->eth_mtu_size =
2105 cpu_to_le16(*(uint16_t *)iface_param->value);
2106 break;
2107 case ISCSI_NET_PARAM_PORT:
2108 /* Autocfg applies to even interface */
2109 if (iface_param->iface_num & 0x1)
2110 break;
2111
2112 init_fw_cb->ipv6_port =
2113 cpu_to_le16(*(uint16_t *)iface_param->value);
2114 break;
2115 case ISCSI_NET_PARAM_DELAYED_ACK_EN:
2116 if (iface_param->iface_num & 0x1)
2117 break;
2118 if (iface_param->value[0] == ISCSI_NET_PARAM_DISABLE)
2119 init_fw_cb->ipv6_tcp_opts |=
2120 cpu_to_le16(IPV6_TCPOPT_DELAYED_ACK_DISABLE);
2121 else
2122 init_fw_cb->ipv6_tcp_opts &=
2123 cpu_to_le16(~IPV6_TCPOPT_DELAYED_ACK_DISABLE &
2124 0xFFFF);
2125 break;
2126 case ISCSI_NET_PARAM_TCP_NAGLE_DISABLE:
2127 if (iface_param->iface_num & 0x1)
2128 break;
2129 if (iface_param->value[0] == ISCSI_NET_PARAM_DISABLE)
2130 init_fw_cb->ipv6_tcp_opts |=
2131 cpu_to_le16(IPV6_TCPOPT_NAGLE_ALGO_DISABLE);
2132 else
2133 init_fw_cb->ipv6_tcp_opts &=
2134 cpu_to_le16(~IPV6_TCPOPT_NAGLE_ALGO_DISABLE);
2135 break;
2136 case ISCSI_NET_PARAM_TCP_WSF_DISABLE:
2137 if (iface_param->iface_num & 0x1)
2138 break;
2139 if (iface_param->value[0] == ISCSI_NET_PARAM_DISABLE)
2140 init_fw_cb->ipv6_tcp_opts |=
2141 cpu_to_le16(IPV6_TCPOPT_WINDOW_SCALE_DISABLE);
2142 else
2143 init_fw_cb->ipv6_tcp_opts &=
2144 cpu_to_le16(~IPV6_TCPOPT_WINDOW_SCALE_DISABLE);
2145 break;
2146 case ISCSI_NET_PARAM_TCP_WSF:
2147 if (iface_param->iface_num & 0x1)
2148 break;
2149 init_fw_cb->ipv6_tcp_wsf = iface_param->value[0];
2150 break;
2151 case ISCSI_NET_PARAM_TCP_TIMER_SCALE:
2152 if (iface_param->iface_num & 0x1)
2153 break;
2154 init_fw_cb->ipv6_tcp_opts &=
2155 cpu_to_le16(~IPV6_TCPOPT_TIMER_SCALE);
2156 init_fw_cb->ipv6_tcp_opts |=
2157 cpu_to_le16((iface_param->value[0] << 1) &
2158 IPV6_TCPOPT_TIMER_SCALE);
2159 break;
2160 case ISCSI_NET_PARAM_TCP_TIMESTAMP_EN:
2161 if (iface_param->iface_num & 0x1)
2162 break;
2163 if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE)
2164 init_fw_cb->ipv6_tcp_opts |=
2165 cpu_to_le16(IPV6_TCPOPT_TIMESTAMP_EN);
2166 else
2167 init_fw_cb->ipv6_tcp_opts &=
2168 cpu_to_le16(~IPV6_TCPOPT_TIMESTAMP_EN);
2169 break;
2170 case ISCSI_NET_PARAM_IPV6_GRAT_NEIGHBOR_ADV_EN:
2171 if (iface_param->iface_num & 0x1)
2172 break;
2173 if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE)
2174 init_fw_cb->ipv6_opts |=
2175 cpu_to_le16(IPV6_OPT_GRAT_NEIGHBOR_ADV_EN);
2176 else
2177 init_fw_cb->ipv6_opts &=
2178 cpu_to_le16(~IPV6_OPT_GRAT_NEIGHBOR_ADV_EN);
2179 break;
2180 case ISCSI_NET_PARAM_REDIRECT_EN:
2181 if (iface_param->iface_num & 0x1)
2182 break;
2183 if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE)
2184 init_fw_cb->ipv6_opts |=
2185 cpu_to_le16(IPV6_OPT_REDIRECT_EN);
2186 else
2187 init_fw_cb->ipv6_opts &=
2188 cpu_to_le16(~IPV6_OPT_REDIRECT_EN);
2189 break;
2190 case ISCSI_NET_PARAM_IPV6_MLD_EN:
2191 if (iface_param->iface_num & 0x1)
2192 break;
2193 if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE)
2194 init_fw_cb->ipv6_addtl_opts |=
2195 cpu_to_le16(IPV6_ADDOPT_MLD_EN);
2196 else
2197 init_fw_cb->ipv6_addtl_opts &=
2198 cpu_to_le16(~IPV6_ADDOPT_MLD_EN);
2199 break;
2200 case ISCSI_NET_PARAM_IPV6_FLOW_LABEL:
2201 if (iface_param->iface_num & 0x1)
2202 break;
2203 init_fw_cb->ipv6_flow_lbl =
2204 cpu_to_le16(*(uint16_t *)iface_param->value);
2205 break;
2206 case ISCSI_NET_PARAM_IPV6_TRAFFIC_CLASS:
2207 if (iface_param->iface_num & 0x1)
2208 break;
2209 init_fw_cb->ipv6_traffic_class = iface_param->value[0];
2210 break;
2211 case ISCSI_NET_PARAM_IPV6_HOP_LIMIT:
2212 if (iface_param->iface_num & 0x1)
2213 break;
2214 init_fw_cb->ipv6_hop_limit = iface_param->value[0];
2215 break;
2216 case ISCSI_NET_PARAM_IPV6_ND_REACHABLE_TMO:
2217 if (iface_param->iface_num & 0x1)
2218 break;
2219 init_fw_cb->ipv6_nd_reach_time =
2220 cpu_to_le32(*(uint32_t *)iface_param->value);
2221 break;
2222 case ISCSI_NET_PARAM_IPV6_ND_REXMIT_TIME:
2223 if (iface_param->iface_num & 0x1)
2224 break;
2225 init_fw_cb->ipv6_nd_rexmit_timer =
2226 cpu_to_le32(*(uint32_t *)iface_param->value);
2227 break;
2228 case ISCSI_NET_PARAM_IPV6_ND_STALE_TMO:
2229 if (iface_param->iface_num & 0x1)
2230 break;
2231 init_fw_cb->ipv6_nd_stale_timeout =
2232 cpu_to_le32(*(uint32_t *)iface_param->value);
2233 break;
2234 case ISCSI_NET_PARAM_IPV6_DUP_ADDR_DETECT_CNT:
2235 if (iface_param->iface_num & 0x1)
2236 break;
2237 init_fw_cb->ipv6_dup_addr_detect_count = iface_param->value[0];
2238 break;
2239 case ISCSI_NET_PARAM_IPV6_RTR_ADV_LINK_MTU:
2240 if (iface_param->iface_num & 0x1)
2241 break;
2242 init_fw_cb->ipv6_gw_advrt_mtu =
2243 cpu_to_le32(*(uint32_t *)iface_param->value);
2244 break;
2245 default:
2246 ql4_printk(KERN_ERR, ha, "Unknown IPv6 param = %d\n",
2247 iface_param->param);
2248 break;
2249 }
2250 }
2251
2252 static void qla4xxx_set_ipv4(struct scsi_qla_host *ha,
2253 struct iscsi_iface_param_info *iface_param,
2254 struct addr_ctrl_blk *init_fw_cb)
2255 {
2256 switch (iface_param->param) {
2257 case ISCSI_NET_PARAM_IPV4_ADDR:
2258 memcpy(init_fw_cb->ipv4_addr, iface_param->value,
2259 sizeof(init_fw_cb->ipv4_addr));
2260 break;
2261 case ISCSI_NET_PARAM_IPV4_SUBNET:
2262 memcpy(init_fw_cb->ipv4_subnet, iface_param->value,
2263 sizeof(init_fw_cb->ipv4_subnet));
2264 break;
2265 case ISCSI_NET_PARAM_IPV4_GW:
2266 memcpy(init_fw_cb->ipv4_gw_addr, iface_param->value,
2267 sizeof(init_fw_cb->ipv4_gw_addr));
2268 break;
2269 case ISCSI_NET_PARAM_IPV4_BOOTPROTO:
2270 if (iface_param->value[0] == ISCSI_BOOTPROTO_DHCP)
2271 init_fw_cb->ipv4_tcp_opts |=
2272 cpu_to_le16(TCPOPT_DHCP_ENABLE);
2273 else if (iface_param->value[0] == ISCSI_BOOTPROTO_STATIC)
2274 init_fw_cb->ipv4_tcp_opts &=
2275 cpu_to_le16(~TCPOPT_DHCP_ENABLE);
2276 else
2277 ql4_printk(KERN_ERR, ha, "Invalid IPv4 bootproto\n");
2278 break;
2279 case ISCSI_NET_PARAM_IFACE_ENABLE:
2280 if (iface_param->value[0] == ISCSI_IFACE_ENABLE) {
2281 init_fw_cb->ipv4_ip_opts |=
2282 cpu_to_le16(IPOPT_IPV4_PROTOCOL_ENABLE);
2283 qla4xxx_create_ipv4_iface(ha);
2284 } else {
2285 init_fw_cb->ipv4_ip_opts &=
2286 cpu_to_le16(~IPOPT_IPV4_PROTOCOL_ENABLE &
2287 0xFFFF);
2288 qla4xxx_destroy_ipv4_iface(ha);
2289 }
2290 break;
2291 case ISCSI_NET_PARAM_VLAN_TAG:
2292 if (iface_param->len != sizeof(init_fw_cb->ipv4_vlan_tag))
2293 break;
2294 init_fw_cb->ipv4_vlan_tag =
2295 cpu_to_be16(*(uint16_t *)iface_param->value);
2296 break;
2297 case ISCSI_NET_PARAM_VLAN_ENABLED:
2298 if (iface_param->value[0] == ISCSI_VLAN_ENABLE)
2299 init_fw_cb->ipv4_ip_opts |=
2300 cpu_to_le16(IPOPT_VLAN_TAGGING_ENABLE);
2301 else
2302 init_fw_cb->ipv4_ip_opts &=
2303 cpu_to_le16(~IPOPT_VLAN_TAGGING_ENABLE);
2304 break;
2305 case ISCSI_NET_PARAM_MTU:
2306 init_fw_cb->eth_mtu_size =
2307 cpu_to_le16(*(uint16_t *)iface_param->value);
2308 break;
2309 case ISCSI_NET_PARAM_PORT:
2310 init_fw_cb->ipv4_port =
2311 cpu_to_le16(*(uint16_t *)iface_param->value);
2312 break;
2313 case ISCSI_NET_PARAM_DELAYED_ACK_EN:
2314 if (iface_param->iface_num & 0x1)
2315 break;
2316 if (iface_param->value[0] == ISCSI_NET_PARAM_DISABLE)
2317 init_fw_cb->ipv4_tcp_opts |=
2318 cpu_to_le16(TCPOPT_DELAYED_ACK_DISABLE);
2319 else
2320 init_fw_cb->ipv4_tcp_opts &=
2321 cpu_to_le16(~TCPOPT_DELAYED_ACK_DISABLE &
2322 0xFFFF);
2323 break;
2324 case ISCSI_NET_PARAM_TCP_NAGLE_DISABLE:
2325 if (iface_param->iface_num & 0x1)
2326 break;
2327 if (iface_param->value[0] == ISCSI_NET_PARAM_DISABLE)
2328 init_fw_cb->ipv4_tcp_opts |=
2329 cpu_to_le16(TCPOPT_NAGLE_ALGO_DISABLE);
2330 else
2331 init_fw_cb->ipv4_tcp_opts &=
2332 cpu_to_le16(~TCPOPT_NAGLE_ALGO_DISABLE);
2333 break;
2334 case ISCSI_NET_PARAM_TCP_WSF_DISABLE:
2335 if (iface_param->iface_num & 0x1)
2336 break;
2337 if (iface_param->value[0] == ISCSI_NET_PARAM_DISABLE)
2338 init_fw_cb->ipv4_tcp_opts |=
2339 cpu_to_le16(TCPOPT_WINDOW_SCALE_DISABLE);
2340 else
2341 init_fw_cb->ipv4_tcp_opts &=
2342 cpu_to_le16(~TCPOPT_WINDOW_SCALE_DISABLE);
2343 break;
2344 case ISCSI_NET_PARAM_TCP_WSF:
2345 if (iface_param->iface_num & 0x1)
2346 break;
2347 init_fw_cb->ipv4_tcp_wsf = iface_param->value[0];
2348 break;
2349 case ISCSI_NET_PARAM_TCP_TIMER_SCALE:
2350 if (iface_param->iface_num & 0x1)
2351 break;
2352 init_fw_cb->ipv4_tcp_opts &= cpu_to_le16(~TCPOPT_TIMER_SCALE);
2353 init_fw_cb->ipv4_tcp_opts |=
2354 cpu_to_le16((iface_param->value[0] << 1) &
2355 TCPOPT_TIMER_SCALE);
2356 break;
2357 case ISCSI_NET_PARAM_TCP_TIMESTAMP_EN:
2358 if (iface_param->iface_num & 0x1)
2359 break;
2360 if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE)
2361 init_fw_cb->ipv4_tcp_opts |=
2362 cpu_to_le16(TCPOPT_TIMESTAMP_ENABLE);
2363 else
2364 init_fw_cb->ipv4_tcp_opts &=
2365 cpu_to_le16(~TCPOPT_TIMESTAMP_ENABLE);
2366 break;
2367 case ISCSI_NET_PARAM_IPV4_DHCP_DNS_ADDR_EN:
2368 if (iface_param->iface_num & 0x1)
2369 break;
2370 if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE)
2371 init_fw_cb->ipv4_tcp_opts |=
2372 cpu_to_le16(TCPOPT_DNS_SERVER_IP_EN);
2373 else
2374 init_fw_cb->ipv4_tcp_opts &=
2375 cpu_to_le16(~TCPOPT_DNS_SERVER_IP_EN);
2376 break;
2377 case ISCSI_NET_PARAM_IPV4_DHCP_SLP_DA_EN:
2378 if (iface_param->iface_num & 0x1)
2379 break;
2380 if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE)
2381 init_fw_cb->ipv4_tcp_opts |=
2382 cpu_to_le16(TCPOPT_SLP_DA_INFO_EN);
2383 else
2384 init_fw_cb->ipv4_tcp_opts &=
2385 cpu_to_le16(~TCPOPT_SLP_DA_INFO_EN);
2386 break;
2387 case ISCSI_NET_PARAM_IPV4_TOS_EN:
2388 if (iface_param->iface_num & 0x1)
2389 break;
2390 if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE)
2391 init_fw_cb->ipv4_ip_opts |=
2392 cpu_to_le16(IPOPT_IPV4_TOS_EN);
2393 else
2394 init_fw_cb->ipv4_ip_opts &=
2395 cpu_to_le16(~IPOPT_IPV4_TOS_EN);
2396 break;
2397 case ISCSI_NET_PARAM_IPV4_TOS:
2398 if (iface_param->iface_num & 0x1)
2399 break;
2400 init_fw_cb->ipv4_tos = iface_param->value[0];
2401 break;
2402 case ISCSI_NET_PARAM_IPV4_GRAT_ARP_EN:
2403 if (iface_param->iface_num & 0x1)
2404 break;
2405 if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE)
2406 init_fw_cb->ipv4_ip_opts |=
2407 cpu_to_le16(IPOPT_GRAT_ARP_EN);
2408 else
2409 init_fw_cb->ipv4_ip_opts &=
2410 cpu_to_le16(~IPOPT_GRAT_ARP_EN);
2411 break;
2412 case ISCSI_NET_PARAM_IPV4_DHCP_ALT_CLIENT_ID_EN:
2413 if (iface_param->iface_num & 0x1)
2414 break;
2415 if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE)
2416 init_fw_cb->ipv4_ip_opts |=
2417 cpu_to_le16(IPOPT_ALT_CID_EN);
2418 else
2419 init_fw_cb->ipv4_ip_opts &=
2420 cpu_to_le16(~IPOPT_ALT_CID_EN);
2421 break;
2422 case ISCSI_NET_PARAM_IPV4_DHCP_ALT_CLIENT_ID:
2423 if (iface_param->iface_num & 0x1)
2424 break;
2425 memcpy(init_fw_cb->ipv4_dhcp_alt_cid, iface_param->value,
2426 (sizeof(init_fw_cb->ipv4_dhcp_alt_cid) - 1));
2427 init_fw_cb->ipv4_dhcp_alt_cid_len =
2428 strlen(init_fw_cb->ipv4_dhcp_alt_cid);
2429 break;
2430 case ISCSI_NET_PARAM_IPV4_DHCP_REQ_VENDOR_ID_EN:
2431 if (iface_param->iface_num & 0x1)
2432 break;
2433 if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE)
2434 init_fw_cb->ipv4_ip_opts |=
2435 cpu_to_le16(IPOPT_REQ_VID_EN);
2436 else
2437 init_fw_cb->ipv4_ip_opts &=
2438 cpu_to_le16(~IPOPT_REQ_VID_EN);
2439 break;
2440 case ISCSI_NET_PARAM_IPV4_DHCP_USE_VENDOR_ID_EN:
2441 if (iface_param->iface_num & 0x1)
2442 break;
2443 if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE)
2444 init_fw_cb->ipv4_ip_opts |=
2445 cpu_to_le16(IPOPT_USE_VID_EN);
2446 else
2447 init_fw_cb->ipv4_ip_opts &=
2448 cpu_to_le16(~IPOPT_USE_VID_EN);
2449 break;
2450 case ISCSI_NET_PARAM_IPV4_DHCP_VENDOR_ID:
2451 if (iface_param->iface_num & 0x1)
2452 break;
2453 memcpy(init_fw_cb->ipv4_dhcp_vid, iface_param->value,
2454 (sizeof(init_fw_cb->ipv4_dhcp_vid) - 1));
2455 init_fw_cb->ipv4_dhcp_vid_len =
2456 strlen(init_fw_cb->ipv4_dhcp_vid);
2457 break;
2458 case ISCSI_NET_PARAM_IPV4_DHCP_LEARN_IQN_EN:
2459 if (iface_param->iface_num & 0x1)
2460 break;
2461 if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE)
2462 init_fw_cb->ipv4_ip_opts |=
2463 cpu_to_le16(IPOPT_LEARN_IQN_EN);
2464 else
2465 init_fw_cb->ipv4_ip_opts &=
2466 cpu_to_le16(~IPOPT_LEARN_IQN_EN);
2467 break;
2468 case ISCSI_NET_PARAM_IPV4_FRAGMENT_DISABLE:
2469 if (iface_param->iface_num & 0x1)
2470 break;
2471 if (iface_param->value[0] == ISCSI_NET_PARAM_DISABLE)
2472 init_fw_cb->ipv4_ip_opts |=
2473 cpu_to_le16(IPOPT_FRAGMENTATION_DISABLE);
2474 else
2475 init_fw_cb->ipv4_ip_opts &=
2476 cpu_to_le16(~IPOPT_FRAGMENTATION_DISABLE);
2477 break;
2478 case ISCSI_NET_PARAM_IPV4_IN_FORWARD_EN:
2479 if (iface_param->iface_num & 0x1)
2480 break;
2481 if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE)
2482 init_fw_cb->ipv4_ip_opts |=
2483 cpu_to_le16(IPOPT_IN_FORWARD_EN);
2484 else
2485 init_fw_cb->ipv4_ip_opts &=
2486 cpu_to_le16(~IPOPT_IN_FORWARD_EN);
2487 break;
2488 case ISCSI_NET_PARAM_REDIRECT_EN:
2489 if (iface_param->iface_num & 0x1)
2490 break;
2491 if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE)
2492 init_fw_cb->ipv4_ip_opts |=
2493 cpu_to_le16(IPOPT_ARP_REDIRECT_EN);
2494 else
2495 init_fw_cb->ipv4_ip_opts &=
2496 cpu_to_le16(~IPOPT_ARP_REDIRECT_EN);
2497 break;
2498 case ISCSI_NET_PARAM_IPV4_TTL:
2499 if (iface_param->iface_num & 0x1)
2500 break;
2501 init_fw_cb->ipv4_ttl = iface_param->value[0];
2502 break;
2503 default:
2504 ql4_printk(KERN_ERR, ha, "Unknown IPv4 param = %d\n",
2505 iface_param->param);
2506 break;
2507 }
2508 }
2509
2510 static void qla4xxx_set_iscsi_param(struct scsi_qla_host *ha,
2511 struct iscsi_iface_param_info *iface_param,
2512 struct addr_ctrl_blk *init_fw_cb)
2513 {
2514 switch (iface_param->param) {
2515 case ISCSI_IFACE_PARAM_DEF_TASKMGMT_TMO:
2516 if (iface_param->iface_num & 0x1)
2517 break;
2518 init_fw_cb->def_timeout =
2519 cpu_to_le16(*(uint16_t *)iface_param->value);
2520 break;
2521 case ISCSI_IFACE_PARAM_HDRDGST_EN:
2522 if (iface_param->iface_num & 0x1)
2523 break;
2524 if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE)
2525 init_fw_cb->iscsi_opts |=
2526 cpu_to_le16(ISCSIOPTS_HEADER_DIGEST_EN);
2527 else
2528 init_fw_cb->iscsi_opts &=
2529 cpu_to_le16(~ISCSIOPTS_HEADER_DIGEST_EN);
2530 break;
2531 case ISCSI_IFACE_PARAM_DATADGST_EN:
2532 if (iface_param->iface_num & 0x1)
2533 break;
2534 if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE)
2535 init_fw_cb->iscsi_opts |=
2536 cpu_to_le16(ISCSIOPTS_DATA_DIGEST_EN);
2537 else
2538 init_fw_cb->iscsi_opts &=
2539 cpu_to_le16(~ISCSIOPTS_DATA_DIGEST_EN);
2540 break;
2541 case ISCSI_IFACE_PARAM_IMM_DATA_EN:
2542 if (iface_param->iface_num & 0x1)
2543 break;
2544 if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE)
2545 init_fw_cb->iscsi_opts |=
2546 cpu_to_le16(ISCSIOPTS_IMMEDIATE_DATA_EN);
2547 else
2548 init_fw_cb->iscsi_opts &=
2549 cpu_to_le16(~ISCSIOPTS_IMMEDIATE_DATA_EN);
2550 break;
2551 case ISCSI_IFACE_PARAM_INITIAL_R2T_EN:
2552 if (iface_param->iface_num & 0x1)
2553 break;
2554 if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE)
2555 init_fw_cb->iscsi_opts |=
2556 cpu_to_le16(ISCSIOPTS_INITIAL_R2T_EN);
2557 else
2558 init_fw_cb->iscsi_opts &=
2559 cpu_to_le16(~ISCSIOPTS_INITIAL_R2T_EN);
2560 break;
2561 case ISCSI_IFACE_PARAM_DATASEQ_INORDER_EN:
2562 if (iface_param->iface_num & 0x1)
2563 break;
2564 if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE)
2565 init_fw_cb->iscsi_opts |=
2566 cpu_to_le16(ISCSIOPTS_DATA_SEQ_INORDER_EN);
2567 else
2568 init_fw_cb->iscsi_opts &=
2569 cpu_to_le16(~ISCSIOPTS_DATA_SEQ_INORDER_EN);
2570 break;
2571 case ISCSI_IFACE_PARAM_PDU_INORDER_EN:
2572 if (iface_param->iface_num & 0x1)
2573 break;
2574 if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE)
2575 init_fw_cb->iscsi_opts |=
2576 cpu_to_le16(ISCSIOPTS_DATA_PDU_INORDER_EN);
2577 else
2578 init_fw_cb->iscsi_opts &=
2579 cpu_to_le16(~ISCSIOPTS_DATA_PDU_INORDER_EN);
2580 break;
2581 case ISCSI_IFACE_PARAM_ERL:
2582 if (iface_param->iface_num & 0x1)
2583 break;
2584 init_fw_cb->iscsi_opts &= cpu_to_le16(~ISCSIOPTS_ERL);
2585 init_fw_cb->iscsi_opts |= cpu_to_le16(iface_param->value[0] &
2586 ISCSIOPTS_ERL);
2587 break;
2588 case ISCSI_IFACE_PARAM_MAX_RECV_DLENGTH:
2589 if (iface_param->iface_num & 0x1)
2590 break;
2591 init_fw_cb->iscsi_max_pdu_size =
2592 cpu_to_le32(*(uint32_t *)iface_param->value) /
2593 BYTE_UNITS;
2594 break;
2595 case ISCSI_IFACE_PARAM_FIRST_BURST:
2596 if (iface_param->iface_num & 0x1)
2597 break;
2598 init_fw_cb->iscsi_fburst_len =
2599 cpu_to_le32(*(uint32_t *)iface_param->value) /
2600 BYTE_UNITS;
2601 break;
2602 case ISCSI_IFACE_PARAM_MAX_R2T:
2603 if (iface_param->iface_num & 0x1)
2604 break;
2605 init_fw_cb->iscsi_max_outstnd_r2t =
2606 cpu_to_le16(*(uint16_t *)iface_param->value);
2607 break;
2608 case ISCSI_IFACE_PARAM_MAX_BURST:
2609 if (iface_param->iface_num & 0x1)
2610 break;
2611 init_fw_cb->iscsi_max_burst_len =
2612 cpu_to_le32(*(uint32_t *)iface_param->value) /
2613 BYTE_UNITS;
2614 break;
2615 case ISCSI_IFACE_PARAM_CHAP_AUTH_EN:
2616 if (iface_param->iface_num & 0x1)
2617 break;
2618 if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE)
2619 init_fw_cb->iscsi_opts |=
2620 cpu_to_le16(ISCSIOPTS_CHAP_AUTH_EN);
2621 else
2622 init_fw_cb->iscsi_opts &=
2623 cpu_to_le16(~ISCSIOPTS_CHAP_AUTH_EN);
2624 break;
2625 case ISCSI_IFACE_PARAM_BIDI_CHAP_EN:
2626 if (iface_param->iface_num & 0x1)
2627 break;
2628 if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE)
2629 init_fw_cb->iscsi_opts |=
2630 cpu_to_le16(ISCSIOPTS_BIDI_CHAP_EN);
2631 else
2632 init_fw_cb->iscsi_opts &=
2633 cpu_to_le16(~ISCSIOPTS_BIDI_CHAP_EN);
2634 break;
2635 case ISCSI_IFACE_PARAM_DISCOVERY_AUTH_OPTIONAL:
2636 if (iface_param->iface_num & 0x1)
2637 break;
2638 if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE)
2639 init_fw_cb->iscsi_opts |=
2640 cpu_to_le16(ISCSIOPTS_DISCOVERY_AUTH_EN);
2641 else
2642 init_fw_cb->iscsi_opts &=
2643 cpu_to_le16(~ISCSIOPTS_DISCOVERY_AUTH_EN);
2644 break;
2645 case ISCSI_IFACE_PARAM_DISCOVERY_LOGOUT_EN:
2646 if (iface_param->iface_num & 0x1)
2647 break;
2648 if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE)
2649 init_fw_cb->iscsi_opts |=
2650 cpu_to_le16(ISCSIOPTS_DISCOVERY_LOGOUT_EN);
2651 else
2652 init_fw_cb->iscsi_opts &=
2653 cpu_to_le16(~ISCSIOPTS_DISCOVERY_LOGOUT_EN);
2654 break;
2655 case ISCSI_IFACE_PARAM_STRICT_LOGIN_COMP_EN:
2656 if (iface_param->iface_num & 0x1)
2657 break;
2658 if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE)
2659 init_fw_cb->iscsi_opts |=
2660 cpu_to_le16(ISCSIOPTS_STRICT_LOGIN_COMP_EN);
2661 else
2662 init_fw_cb->iscsi_opts &=
2663 cpu_to_le16(~ISCSIOPTS_STRICT_LOGIN_COMP_EN);
2664 break;
2665 default:
2666 ql4_printk(KERN_ERR, ha, "Unknown iscsi param = %d\n",
2667 iface_param->param);
2668 break;
2669 }
2670 }
2671
2672 static void
2673 qla4xxx_initcb_to_acb(struct addr_ctrl_blk *init_fw_cb)
2674 {
2675 struct addr_ctrl_blk_def *acb;
2676 acb = (struct addr_ctrl_blk_def *)init_fw_cb;
2677 memset(acb->reserved1, 0, sizeof(acb->reserved1));
2678 memset(acb->reserved2, 0, sizeof(acb->reserved2));
2679 memset(acb->reserved3, 0, sizeof(acb->reserved3));
2680 memset(acb->reserved4, 0, sizeof(acb->reserved4));
2681 memset(acb->reserved5, 0, sizeof(acb->reserved5));
2682 memset(acb->reserved6, 0, sizeof(acb->reserved6));
2683 memset(acb->reserved7, 0, sizeof(acb->reserved7));
2684 memset(acb->reserved8, 0, sizeof(acb->reserved8));
2685 memset(acb->reserved9, 0, sizeof(acb->reserved9));
2686 memset(acb->reserved10, 0, sizeof(acb->reserved10));
2687 memset(acb->reserved11, 0, sizeof(acb->reserved11));
2688 memset(acb->reserved12, 0, sizeof(acb->reserved12));
2689 memset(acb->reserved13, 0, sizeof(acb->reserved13));
2690 memset(acb->reserved14, 0, sizeof(acb->reserved14));
2691 memset(acb->reserved15, 0, sizeof(acb->reserved15));
2692 }
2693
2694 static int
2695 qla4xxx_iface_set_param(struct Scsi_Host *shost, void *data, uint32_t len)
2696 {
2697 struct scsi_qla_host *ha = to_qla_host(shost);
2698 int rval = 0;
2699 struct iscsi_iface_param_info *iface_param = NULL;
2700 struct addr_ctrl_blk *init_fw_cb = NULL;
2701 dma_addr_t init_fw_cb_dma;
2702 uint32_t mbox_cmd[MBOX_REG_COUNT];
2703 uint32_t mbox_sts[MBOX_REG_COUNT];
2704 uint32_t rem = len;
2705 struct nlattr *attr;
2706
2707 init_fw_cb = dma_alloc_coherent(&ha->pdev->dev,
2708 sizeof(struct addr_ctrl_blk),
2709 &init_fw_cb_dma, GFP_KERNEL);
2710 if (!init_fw_cb) {
2711 ql4_printk(KERN_ERR, ha, "%s: Unable to alloc init_cb\n",
2712 __func__);
2713 return -ENOMEM;
2714 }
2715
2716 memset(&mbox_cmd, 0, sizeof(mbox_cmd));
2717 memset(&mbox_sts, 0, sizeof(mbox_sts));
2718
2719 if (qla4xxx_get_ifcb(ha, &mbox_cmd[0], &mbox_sts[0], init_fw_cb_dma)) {
2720 ql4_printk(KERN_ERR, ha, "%s: get ifcb failed\n", __func__);
2721 rval = -EIO;
2722 goto exit_init_fw_cb;
2723 }
2724
2725 nla_for_each_attr(attr, data, len, rem) {
2726 iface_param = nla_data(attr);
2727
2728 if (iface_param->param_type == ISCSI_NET_PARAM) {
2729 switch (iface_param->iface_type) {
2730 case ISCSI_IFACE_TYPE_IPV4:
2731 switch (iface_param->iface_num) {
2732 case 0:
2733 qla4xxx_set_ipv4(ha, iface_param,
2734 init_fw_cb);
2735 break;
2736 default:
2737 /* Cannot have more than one IPv4 interface */
2738 ql4_printk(KERN_ERR, ha,
2739 "Invalid IPv4 iface number = %d\n",
2740 iface_param->iface_num);
2741 break;
2742 }
2743 break;
2744 case ISCSI_IFACE_TYPE_IPV6:
2745 switch (iface_param->iface_num) {
2746 case 0:
2747 case 1:
2748 qla4xxx_set_ipv6(ha, iface_param,
2749 init_fw_cb);
2750 break;
2751 default:
2752 /* Cannot have more than two IPv6 interface */
2753 ql4_printk(KERN_ERR, ha,
2754 "Invalid IPv6 iface number = %d\n",
2755 iface_param->iface_num);
2756 break;
2757 }
2758 break;
2759 default:
2760 ql4_printk(KERN_ERR, ha,
2761 "Invalid iface type\n");
2762 break;
2763 }
2764 } else if (iface_param->param_type == ISCSI_IFACE_PARAM) {
2765 qla4xxx_set_iscsi_param(ha, iface_param,
2766 init_fw_cb);
2767 } else {
2768 continue;
2769 }
2770 }
2771
2772 init_fw_cb->cookie = cpu_to_le32(0x11BEAD5A);
2773
2774 rval = qla4xxx_set_flash(ha, init_fw_cb_dma, FLASH_SEGMENT_IFCB,
2775 sizeof(struct addr_ctrl_blk),
2776 FLASH_OPT_RMW_COMMIT);
2777 if (rval != QLA_SUCCESS) {
2778 ql4_printk(KERN_ERR, ha, "%s: set flash mbx failed\n",
2779 __func__);
2780 rval = -EIO;
2781 goto exit_init_fw_cb;
2782 }
2783
2784 rval = qla4xxx_disable_acb(ha);
2785 if (rval != QLA_SUCCESS) {
2786 ql4_printk(KERN_ERR, ha, "%s: disable acb mbx failed\n",
2787 __func__);
2788 rval = -EIO;
2789 goto exit_init_fw_cb;
2790 }
2791
2792 wait_for_completion_timeout(&ha->disable_acb_comp,
2793 DISABLE_ACB_TOV * HZ);
2794
2795 qla4xxx_initcb_to_acb(init_fw_cb);
2796
2797 rval = qla4xxx_set_acb(ha, &mbox_cmd[0], &mbox_sts[0], init_fw_cb_dma);
2798 if (rval != QLA_SUCCESS) {
2799 ql4_printk(KERN_ERR, ha, "%s: set acb mbx failed\n",
2800 __func__);
2801 rval = -EIO;
2802 goto exit_init_fw_cb;
2803 }
2804
2805 memset(init_fw_cb, 0, sizeof(struct addr_ctrl_blk));
2806 qla4xxx_update_local_ifcb(ha, &mbox_cmd[0], &mbox_sts[0], init_fw_cb,
2807 init_fw_cb_dma);
2808
2809 exit_init_fw_cb:
2810 dma_free_coherent(&ha->pdev->dev, sizeof(struct addr_ctrl_blk),
2811 init_fw_cb, init_fw_cb_dma);
2812
2813 return rval;
2814 }
2815
2816 static int qla4xxx_session_get_param(struct iscsi_cls_session *cls_sess,
2817 enum iscsi_param param, char *buf)
2818 {
2819 struct iscsi_session *sess = cls_sess->dd_data;
2820 struct ddb_entry *ddb_entry = sess->dd_data;
2821 struct scsi_qla_host *ha = ddb_entry->ha;
2822 struct iscsi_cls_conn *cls_conn = ddb_entry->conn;
2823 struct ql4_chap_table chap_tbl;
2824 int rval, len;
2825 uint16_t idx;
2826
2827 memset(&chap_tbl, 0, sizeof(chap_tbl));
2828 switch (param) {
2829 case ISCSI_PARAM_CHAP_IN_IDX:
2830 rval = qla4xxx_get_chap_index(ha, sess->username_in,
2831 sess->password_in, BIDI_CHAP,
2832 &idx);
2833 if (rval)
2834 len = sprintf(buf, "\n");
2835 else
2836 len = sprintf(buf, "%hu\n", idx);
2837 break;
2838 case ISCSI_PARAM_CHAP_OUT_IDX:
2839 if (ddb_entry->ddb_type == FLASH_DDB) {
2840 if (ddb_entry->chap_tbl_idx != INVALID_ENTRY) {
2841 idx = ddb_entry->chap_tbl_idx;
2842 rval = QLA_SUCCESS;
2843 } else {
2844 rval = QLA_ERROR;
2845 }
2846 } else {
2847 rval = qla4xxx_get_chap_index(ha, sess->username,
2848 sess->password,
2849 LOCAL_CHAP, &idx);
2850 }
2851 if (rval)
2852 len = sprintf(buf, "\n");
2853 else
2854 len = sprintf(buf, "%hu\n", idx);
2855 break;
2856 case ISCSI_PARAM_USERNAME:
2857 case ISCSI_PARAM_PASSWORD:
2858 /* First, populate session username and password for FLASH DDB,
2859 * if not already done. This happens when session login fails
2860 * for a FLASH DDB.
2861 */
2862 if (ddb_entry->ddb_type == FLASH_DDB &&
2863 ddb_entry->chap_tbl_idx != INVALID_ENTRY &&
2864 !sess->username && !sess->password) {
2865 idx = ddb_entry->chap_tbl_idx;
2866 rval = qla4xxx_get_uni_chap_at_index(ha, chap_tbl.name,
2867 chap_tbl.secret,
2868 idx);
2869 if (!rval) {
2870 iscsi_set_param(cls_conn, ISCSI_PARAM_USERNAME,
2871 (char *)chap_tbl.name,
2872 strlen((char *)chap_tbl.name));
2873 iscsi_set_param(cls_conn, ISCSI_PARAM_PASSWORD,
2874 (char *)chap_tbl.secret,
2875 chap_tbl.secret_len);
2876 }
2877 }
2878 /* allow fall-through */
2879 default:
2880 return iscsi_session_get_param(cls_sess, param, buf);
2881 }
2882
2883 return len;
2884 }
2885
2886 static int qla4xxx_conn_get_param(struct iscsi_cls_conn *cls_conn,
2887 enum iscsi_param param, char *buf)
2888 {
2889 struct iscsi_conn *conn;
2890 struct qla_conn *qla_conn;
2891 struct sockaddr *dst_addr;
2892
2893 conn = cls_conn->dd_data;
2894 qla_conn = conn->dd_data;
2895 dst_addr = (struct sockaddr *)&qla_conn->qla_ep->dst_addr;
2896
2897 switch (param) {
2898 case ISCSI_PARAM_CONN_PORT:
2899 case ISCSI_PARAM_CONN_ADDRESS:
2900 return iscsi_conn_get_addr_param((struct sockaddr_storage *)
2901 dst_addr, param, buf);
2902 default:
2903 return iscsi_conn_get_param(cls_conn, param, buf);
2904 }
2905 }
2906
2907 int qla4xxx_get_ddb_index(struct scsi_qla_host *ha, uint16_t *ddb_index)
2908 {
2909 uint32_t mbx_sts = 0;
2910 uint16_t tmp_ddb_index;
2911 int ret;
2912
2913 get_ddb_index:
2914 tmp_ddb_index = find_first_zero_bit(ha->ddb_idx_map, MAX_DDB_ENTRIES);
2915
2916 if (tmp_ddb_index >= MAX_DDB_ENTRIES) {
2917 DEBUG2(ql4_printk(KERN_INFO, ha,
2918 "Free DDB index not available\n"));
2919 ret = QLA_ERROR;
2920 goto exit_get_ddb_index;
2921 }
2922
2923 if (test_and_set_bit(tmp_ddb_index, ha->ddb_idx_map))
2924 goto get_ddb_index;
2925
2926 DEBUG2(ql4_printk(KERN_INFO, ha,
2927 "Found a free DDB index at %d\n", tmp_ddb_index));
2928 ret = qla4xxx_req_ddb_entry(ha, tmp_ddb_index, &mbx_sts);
2929 if (ret == QLA_ERROR) {
2930 if (mbx_sts == MBOX_STS_COMMAND_ERROR) {
2931 ql4_printk(KERN_INFO, ha,
2932 "DDB index = %d not available trying next\n",
2933 tmp_ddb_index);
2934 goto get_ddb_index;
2935 }
2936 DEBUG2(ql4_printk(KERN_INFO, ha,
2937 "Free FW DDB not available\n"));
2938 }
2939
2940 *ddb_index = tmp_ddb_index;
2941
2942 exit_get_ddb_index:
2943 return ret;
2944 }
2945
2946 static int qla4xxx_match_ipaddress(struct scsi_qla_host *ha,
2947 struct ddb_entry *ddb_entry,
2948 char *existing_ipaddr,
2949 char *user_ipaddr)
2950 {
2951 uint8_t dst_ipaddr[IPv6_ADDR_LEN];
2952 char formatted_ipaddr[DDB_IPADDR_LEN];
2953 int status = QLA_SUCCESS, ret = 0;
2954
2955 if (ddb_entry->fw_ddb_entry.options & DDB_OPT_IPV6_DEVICE) {
2956 ret = in6_pton(user_ipaddr, strlen(user_ipaddr), dst_ipaddr,
2957 '\0', NULL);
2958 if (ret == 0) {
2959 status = QLA_ERROR;
2960 goto out_match;
2961 }
2962 ret = sprintf(formatted_ipaddr, "%pI6", dst_ipaddr);
2963 } else {
2964 ret = in4_pton(user_ipaddr, strlen(user_ipaddr), dst_ipaddr,
2965 '\0', NULL);
2966 if (ret == 0) {
2967 status = QLA_ERROR;
2968 goto out_match;
2969 }
2970 ret = sprintf(formatted_ipaddr, "%pI4", dst_ipaddr);
2971 }
2972
2973 if (strcmp(existing_ipaddr, formatted_ipaddr))
2974 status = QLA_ERROR;
2975
2976 out_match:
2977 return status;
2978 }
2979
2980 static int qla4xxx_match_fwdb_session(struct scsi_qla_host *ha,
2981 struct iscsi_cls_conn *cls_conn)
2982 {
2983 int idx = 0, max_ddbs, rval;
2984 struct iscsi_cls_session *cls_sess = iscsi_conn_to_session(cls_conn);
2985 struct iscsi_session *sess, *existing_sess;
2986 struct iscsi_conn *conn, *existing_conn;
2987 struct ddb_entry *ddb_entry;
2988
2989 sess = cls_sess->dd_data;
2990 conn = cls_conn->dd_data;
2991
2992 if (sess->targetname == NULL ||
2993 conn->persistent_address == NULL ||
2994 conn->persistent_port == 0)
2995 return QLA_ERROR;
2996
2997 max_ddbs = is_qla40XX(ha) ? MAX_DEV_DB_ENTRIES_40XX :
2998 MAX_DEV_DB_ENTRIES;
2999
3000 for (idx = 0; idx < max_ddbs; idx++) {
3001 ddb_entry = qla4xxx_lookup_ddb_by_fw_index(ha, idx);
3002 if (ddb_entry == NULL)
3003 continue;
3004
3005 if (ddb_entry->ddb_type != FLASH_DDB)
3006 continue;
3007
3008 existing_sess = ddb_entry->sess->dd_data;
3009 existing_conn = ddb_entry->conn->dd_data;
3010
3011 if (existing_sess->targetname == NULL ||
3012 existing_conn->persistent_address == NULL ||
3013 existing_conn->persistent_port == 0)
3014 continue;
3015
3016 DEBUG2(ql4_printk(KERN_INFO, ha,
3017 "IQN = %s User IQN = %s\n",
3018 existing_sess->targetname,
3019 sess->targetname));
3020
3021 DEBUG2(ql4_printk(KERN_INFO, ha,
3022 "IP = %s User IP = %s\n",
3023 existing_conn->persistent_address,
3024 conn->persistent_address));
3025
3026 DEBUG2(ql4_printk(KERN_INFO, ha,
3027 "Port = %d User Port = %d\n",
3028 existing_conn->persistent_port,
3029 conn->persistent_port));
3030
3031 if (strcmp(existing_sess->targetname, sess->targetname))
3032 continue;
3033 rval = qla4xxx_match_ipaddress(ha, ddb_entry,
3034 existing_conn->persistent_address,
3035 conn->persistent_address);
3036 if (rval == QLA_ERROR)
3037 continue;
3038 if (existing_conn->persistent_port != conn->persistent_port)
3039 continue;
3040 break;
3041 }
3042
3043 if (idx == max_ddbs)
3044 return QLA_ERROR;
3045
3046 DEBUG2(ql4_printk(KERN_INFO, ha,
3047 "Match found in fwdb sessions\n"));
3048 return QLA_SUCCESS;
3049 }
3050
3051 static struct iscsi_cls_session *
3052 qla4xxx_session_create(struct iscsi_endpoint *ep,
3053 uint16_t cmds_max, uint16_t qdepth,
3054 uint32_t initial_cmdsn)
3055 {
3056 struct iscsi_cls_session *cls_sess;
3057 struct scsi_qla_host *ha;
3058 struct qla_endpoint *qla_ep;
3059 struct ddb_entry *ddb_entry;
3060 uint16_t ddb_index;
3061 struct iscsi_session *sess;
3062 struct sockaddr *dst_addr;
3063 int ret;
3064
3065 if (!ep) {
3066 printk(KERN_ERR "qla4xxx: missing ep.\n");
3067 return NULL;
3068 }
3069
3070 qla_ep = ep->dd_data;
3071 dst_addr = (struct sockaddr *)&qla_ep->dst_addr;
3072 ha = to_qla_host(qla_ep->host);
3073 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: host: %ld\n", __func__,
3074 ha->host_no));
3075
3076 ret = qla4xxx_get_ddb_index(ha, &ddb_index);
3077 if (ret == QLA_ERROR)
3078 return NULL;
3079
3080 cls_sess = iscsi_session_setup(&qla4xxx_iscsi_transport, qla_ep->host,
3081 cmds_max, sizeof(struct ddb_entry),
3082 sizeof(struct ql4_task_data),
3083 initial_cmdsn, ddb_index);
3084 if (!cls_sess)
3085 return NULL;
3086
3087 sess = cls_sess->dd_data;
3088 ddb_entry = sess->dd_data;
3089 ddb_entry->fw_ddb_index = ddb_index;
3090 ddb_entry->fw_ddb_device_state = DDB_DS_NO_CONNECTION_ACTIVE;
3091 ddb_entry->ha = ha;
3092 ddb_entry->sess = cls_sess;
3093 ddb_entry->unblock_sess = qla4xxx_unblock_ddb;
3094 ddb_entry->ddb_change = qla4xxx_ddb_change;
3095 clear_bit(DDB_CONN_CLOSE_FAILURE, &ddb_entry->flags);
3096 cls_sess->recovery_tmo = ql4xsess_recovery_tmo;
3097 ha->fw_ddb_index_map[ddb_entry->fw_ddb_index] = ddb_entry;
3098 ha->tot_ddbs++;
3099
3100 return cls_sess;
3101 }
3102
3103 static void qla4xxx_session_destroy(struct iscsi_cls_session *cls_sess)
3104 {
3105 struct iscsi_session *sess;
3106 struct ddb_entry *ddb_entry;
3107 struct scsi_qla_host *ha;
3108 unsigned long flags, wtime;
3109 struct dev_db_entry *fw_ddb_entry = NULL;
3110 dma_addr_t fw_ddb_entry_dma;
3111 uint32_t ddb_state;
3112 int ret;
3113
3114 sess = cls_sess->dd_data;
3115 ddb_entry = sess->dd_data;
3116 ha = ddb_entry->ha;
3117 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: host: %ld\n", __func__,
3118 ha->host_no));
3119
3120 fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
3121 &fw_ddb_entry_dma, GFP_KERNEL);
3122 if (!fw_ddb_entry) {
3123 ql4_printk(KERN_ERR, ha,
3124 "%s: Unable to allocate dma buffer\n", __func__);
3125 goto destroy_session;
3126 }
3127
3128 wtime = jiffies + (HZ * LOGOUT_TOV);
3129 do {
3130 ret = qla4xxx_get_fwddb_entry(ha, ddb_entry->fw_ddb_index,
3131 fw_ddb_entry, fw_ddb_entry_dma,
3132 NULL, NULL, &ddb_state, NULL,
3133 NULL, NULL);
3134 if (ret == QLA_ERROR)
3135 goto destroy_session;
3136
3137 if ((ddb_state == DDB_DS_NO_CONNECTION_ACTIVE) ||
3138 (ddb_state == DDB_DS_SESSION_FAILED))
3139 goto destroy_session;
3140
3141 schedule_timeout_uninterruptible(HZ);
3142 } while ((time_after(wtime, jiffies)));
3143
3144 destroy_session:
3145 qla4xxx_clear_ddb_entry(ha, ddb_entry->fw_ddb_index);
3146 if (test_and_clear_bit(DDB_CONN_CLOSE_FAILURE, &ddb_entry->flags))
3147 clear_bit(ddb_entry->fw_ddb_index, ha->ddb_idx_map);
3148 spin_lock_irqsave(&ha->hardware_lock, flags);
3149 qla4xxx_free_ddb(ha, ddb_entry);
3150 spin_unlock_irqrestore(&ha->hardware_lock, flags);
3151
3152 iscsi_session_teardown(cls_sess);
3153
3154 if (fw_ddb_entry)
3155 dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
3156 fw_ddb_entry, fw_ddb_entry_dma);
3157 }
3158
3159 static struct iscsi_cls_conn *
3160 qla4xxx_conn_create(struct iscsi_cls_session *cls_sess, uint32_t conn_idx)
3161 {
3162 struct iscsi_cls_conn *cls_conn;
3163 struct iscsi_session *sess;
3164 struct ddb_entry *ddb_entry;
3165 struct scsi_qla_host *ha;
3166
3167 cls_conn = iscsi_conn_setup(cls_sess, sizeof(struct qla_conn),
3168 conn_idx);
3169 if (!cls_conn) {
3170 pr_info("%s: Can not create connection for conn_idx = %u\n",
3171 __func__, conn_idx);
3172 return NULL;
3173 }
3174
3175 sess = cls_sess->dd_data;
3176 ddb_entry = sess->dd_data;
3177 ddb_entry->conn = cls_conn;
3178
3179 ha = ddb_entry->ha;
3180 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: conn_idx = %u\n", __func__,
3181 conn_idx));
3182 return cls_conn;
3183 }
3184
3185 static int qla4xxx_conn_bind(struct iscsi_cls_session *cls_session,
3186 struct iscsi_cls_conn *cls_conn,
3187 uint64_t transport_fd, int is_leading)
3188 {
3189 struct iscsi_conn *conn;
3190 struct qla_conn *qla_conn;
3191 struct iscsi_endpoint *ep;
3192 struct ddb_entry *ddb_entry;
3193 struct scsi_qla_host *ha;
3194 struct iscsi_session *sess;
3195
3196 sess = cls_session->dd_data;
3197 ddb_entry = sess->dd_data;
3198 ha = ddb_entry->ha;
3199
3200 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: sid = %d, cid = %d\n", __func__,
3201 cls_session->sid, cls_conn->cid));
3202
3203 if (iscsi_conn_bind(cls_session, cls_conn, is_leading))
3204 return -EINVAL;
3205 ep = iscsi_lookup_endpoint(transport_fd);
3206 conn = cls_conn->dd_data;
3207 qla_conn = conn->dd_data;
3208 qla_conn->qla_ep = ep->dd_data;
3209 return 0;
3210 }
3211
3212 static int qla4xxx_conn_start(struct iscsi_cls_conn *cls_conn)
3213 {
3214 struct iscsi_cls_session *cls_sess = iscsi_conn_to_session(cls_conn);
3215 struct iscsi_session *sess;
3216 struct ddb_entry *ddb_entry;
3217 struct scsi_qla_host *ha;
3218 struct dev_db_entry *fw_ddb_entry = NULL;
3219 dma_addr_t fw_ddb_entry_dma;
3220 uint32_t mbx_sts = 0;
3221 int ret = 0;
3222 int status = QLA_SUCCESS;
3223
3224 sess = cls_sess->dd_data;
3225 ddb_entry = sess->dd_data;
3226 ha = ddb_entry->ha;
3227 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: sid = %d, cid = %d\n", __func__,
3228 cls_sess->sid, cls_conn->cid));
3229
3230 /* Check if we have matching FW DDB, if yes then do not
3231 * login to this target. This could cause target to logout previous
3232 * connection
3233 */
3234 ret = qla4xxx_match_fwdb_session(ha, cls_conn);
3235 if (ret == QLA_SUCCESS) {
3236 ql4_printk(KERN_INFO, ha,
3237 "Session already exist in FW.\n");
3238 ret = -EEXIST;
3239 goto exit_conn_start;
3240 }
3241
3242 fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
3243 &fw_ddb_entry_dma, GFP_KERNEL);
3244 if (!fw_ddb_entry) {
3245 ql4_printk(KERN_ERR, ha,
3246 "%s: Unable to allocate dma buffer\n", __func__);
3247 ret = -ENOMEM;
3248 goto exit_conn_start;
3249 }
3250
3251 ret = qla4xxx_set_param_ddbentry(ha, ddb_entry, cls_conn, &mbx_sts);
3252 if (ret) {
3253 /* If iscsid is stopped and started then no need to do
3254 * set param again since ddb state will be already
3255 * active and FW does not allow set ddb to an
3256 * active session.
3257 */
3258 if (mbx_sts)
3259 if (ddb_entry->fw_ddb_device_state ==
3260 DDB_DS_SESSION_ACTIVE) {
3261 ddb_entry->unblock_sess(ddb_entry->sess);
3262 goto exit_set_param;
3263 }
3264
3265 ql4_printk(KERN_ERR, ha, "%s: Failed set param for index[%d]\n",
3266 __func__, ddb_entry->fw_ddb_index);
3267 goto exit_conn_start;
3268 }
3269
3270 status = qla4xxx_conn_open(ha, ddb_entry->fw_ddb_index);
3271 if (status == QLA_ERROR) {
3272 ql4_printk(KERN_ERR, ha, "%s: Login failed: %s\n", __func__,
3273 sess->targetname);
3274 ret = -EINVAL;
3275 goto exit_conn_start;
3276 }
3277
3278 if (ddb_entry->fw_ddb_device_state == DDB_DS_NO_CONNECTION_ACTIVE)
3279 ddb_entry->fw_ddb_device_state = DDB_DS_LOGIN_IN_PROCESS;
3280
3281 DEBUG2(printk(KERN_INFO "%s: DDB state [%d]\n", __func__,
3282 ddb_entry->fw_ddb_device_state));
3283
3284 exit_set_param:
3285 ret = 0;
3286
3287 exit_conn_start:
3288 if (fw_ddb_entry)
3289 dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
3290 fw_ddb_entry, fw_ddb_entry_dma);
3291 return ret;
3292 }
3293
3294 static void qla4xxx_conn_destroy(struct iscsi_cls_conn *cls_conn)
3295 {
3296 struct iscsi_cls_session *cls_sess = iscsi_conn_to_session(cls_conn);
3297 struct iscsi_session *sess;
3298 struct scsi_qla_host *ha;
3299 struct ddb_entry *ddb_entry;
3300 int options;
3301
3302 sess = cls_sess->dd_data;
3303 ddb_entry = sess->dd_data;
3304 ha = ddb_entry->ha;
3305 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: cid = %d\n", __func__,
3306 cls_conn->cid));
3307
3308 options = LOGOUT_OPTION_CLOSE_SESSION;
3309 if (qla4xxx_session_logout_ddb(ha, ddb_entry, options) == QLA_ERROR)
3310 ql4_printk(KERN_ERR, ha, "%s: Logout failed\n", __func__);
3311 }
3312
3313 static void qla4xxx_task_work(struct work_struct *wdata)
3314 {
3315 struct ql4_task_data *task_data;
3316 struct scsi_qla_host *ha;
3317 struct passthru_status *sts;
3318 struct iscsi_task *task;
3319 struct iscsi_hdr *hdr;
3320 uint8_t *data;
3321 uint32_t data_len;
3322 struct iscsi_conn *conn;
3323 int hdr_len;
3324 itt_t itt;
3325
3326 task_data = container_of(wdata, struct ql4_task_data, task_work);
3327 ha = task_data->ha;
3328 task = task_data->task;
3329 sts = &task_data->sts;
3330 hdr_len = sizeof(struct iscsi_hdr);
3331
3332 DEBUG3(printk(KERN_INFO "Status returned\n"));
3333 DEBUG3(qla4xxx_dump_buffer(sts, 64));
3334 DEBUG3(printk(KERN_INFO "Response buffer"));
3335 DEBUG3(qla4xxx_dump_buffer(task_data->resp_buffer, 64));
3336
3337 conn = task->conn;
3338
3339 switch (sts->completionStatus) {
3340 case PASSTHRU_STATUS_COMPLETE:
3341 hdr = (struct iscsi_hdr *)task_data->resp_buffer;
3342 /* Assign back the itt in hdr, until we use the PREASSIGN_TAG */
3343 itt = sts->handle;
3344 hdr->itt = itt;
3345 data = task_data->resp_buffer + hdr_len;
3346 data_len = task_data->resp_len - hdr_len;
3347 iscsi_complete_pdu(conn, hdr, data, data_len);
3348 break;
3349 default:
3350 ql4_printk(KERN_ERR, ha, "Passthru failed status = 0x%x\n",
3351 sts->completionStatus);
3352 break;
3353 }
3354 return;
3355 }
3356
3357 static int qla4xxx_alloc_pdu(struct iscsi_task *task, uint8_t opcode)
3358 {
3359 struct ql4_task_data *task_data;
3360 struct iscsi_session *sess;
3361 struct ddb_entry *ddb_entry;
3362 struct scsi_qla_host *ha;
3363 int hdr_len;
3364
3365 sess = task->conn->session;
3366 ddb_entry = sess->dd_data;
3367 ha = ddb_entry->ha;
3368 task_data = task->dd_data;
3369 memset(task_data, 0, sizeof(struct ql4_task_data));
3370
3371 if (task->sc) {
3372 ql4_printk(KERN_INFO, ha,
3373 "%s: SCSI Commands not implemented\n", __func__);
3374 return -EINVAL;
3375 }
3376
3377 hdr_len = sizeof(struct iscsi_hdr);
3378 task_data->ha = ha;
3379 task_data->task = task;
3380
3381 if (task->data_count) {
3382 task_data->data_dma = dma_map_single(&ha->pdev->dev, task->data,
3383 task->data_count,
3384 DMA_TO_DEVICE);
3385 }
3386
3387 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: MaxRecvLen %u, iscsi hrd %d\n",
3388 __func__, task->conn->max_recv_dlength, hdr_len));
3389
3390 task_data->resp_len = task->conn->max_recv_dlength + hdr_len;
3391 task_data->resp_buffer = dma_alloc_coherent(&ha->pdev->dev,
3392 task_data->resp_len,
3393 &task_data->resp_dma,
3394 GFP_ATOMIC);
3395 if (!task_data->resp_buffer)
3396 goto exit_alloc_pdu;
3397
3398 task_data->req_len = task->data_count + hdr_len;
3399 task_data->req_buffer = dma_alloc_coherent(&ha->pdev->dev,
3400 task_data->req_len,
3401 &task_data->req_dma,
3402 GFP_ATOMIC);
3403 if (!task_data->req_buffer)
3404 goto exit_alloc_pdu;
3405
3406 task->hdr = task_data->req_buffer;
3407
3408 INIT_WORK(&task_data->task_work, qla4xxx_task_work);
3409
3410 return 0;
3411
3412 exit_alloc_pdu:
3413 if (task_data->resp_buffer)
3414 dma_free_coherent(&ha->pdev->dev, task_data->resp_len,
3415 task_data->resp_buffer, task_data->resp_dma);
3416
3417 if (task_data->req_buffer)
3418 dma_free_coherent(&ha->pdev->dev, task_data->req_len,
3419 task_data->req_buffer, task_data->req_dma);
3420 return -ENOMEM;
3421 }
3422
3423 static void qla4xxx_task_cleanup(struct iscsi_task *task)
3424 {
3425 struct ql4_task_data *task_data;
3426 struct iscsi_session *sess;
3427 struct ddb_entry *ddb_entry;
3428 struct scsi_qla_host *ha;
3429 int hdr_len;
3430
3431 hdr_len = sizeof(struct iscsi_hdr);
3432 sess = task->conn->session;
3433 ddb_entry = sess->dd_data;
3434 ha = ddb_entry->ha;
3435 task_data = task->dd_data;
3436
3437 if (task->data_count) {
3438 dma_unmap_single(&ha->pdev->dev, task_data->data_dma,
3439 task->data_count, DMA_TO_DEVICE);
3440 }
3441
3442 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: MaxRecvLen %u, iscsi hrd %d\n",
3443 __func__, task->conn->max_recv_dlength, hdr_len));
3444
3445 dma_free_coherent(&ha->pdev->dev, task_data->resp_len,
3446 task_data->resp_buffer, task_data->resp_dma);
3447 dma_free_coherent(&ha->pdev->dev, task_data->req_len,
3448 task_data->req_buffer, task_data->req_dma);
3449 return;
3450 }
3451
3452 static int qla4xxx_task_xmit(struct iscsi_task *task)
3453 {
3454 struct scsi_cmnd *sc = task->sc;
3455 struct iscsi_session *sess = task->conn->session;
3456 struct ddb_entry *ddb_entry = sess->dd_data;
3457 struct scsi_qla_host *ha = ddb_entry->ha;
3458
3459 if (!sc)
3460 return qla4xxx_send_passthru0(task);
3461
3462 ql4_printk(KERN_INFO, ha, "%s: scsi cmd xmit not implemented\n",
3463 __func__);
3464 return -ENOSYS;
3465 }
3466
3467 static int qla4xxx_copy_from_fwddb_param(struct iscsi_bus_flash_session *sess,
3468 struct iscsi_bus_flash_conn *conn,
3469 struct dev_db_entry *fw_ddb_entry)
3470 {
3471 unsigned long options = 0;
3472 int rc = 0;
3473
3474 options = le16_to_cpu(fw_ddb_entry->options);
3475 conn->is_fw_assigned_ipv6 = test_bit(OPT_IS_FW_ASSIGNED_IPV6, &options);
3476 if (test_bit(OPT_IPV6_DEVICE, &options)) {
3477 rc = iscsi_switch_str_param(&sess->portal_type,
3478 PORTAL_TYPE_IPV6);
3479 if (rc)
3480 goto exit_copy;
3481 } else {
3482 rc = iscsi_switch_str_param(&sess->portal_type,
3483 PORTAL_TYPE_IPV4);
3484 if (rc)
3485 goto exit_copy;
3486 }
3487
3488 sess->auto_snd_tgt_disable = test_bit(OPT_AUTO_SENDTGTS_DISABLE,
3489 &options);
3490 sess->discovery_sess = test_bit(OPT_DISC_SESSION, &options);
3491 sess->entry_state = test_bit(OPT_ENTRY_STATE, &options);
3492
3493 options = le16_to_cpu(fw_ddb_entry->iscsi_options);
3494 conn->hdrdgst_en = test_bit(ISCSIOPT_HEADER_DIGEST_EN, &options);
3495 conn->datadgst_en = test_bit(ISCSIOPT_DATA_DIGEST_EN, &options);
3496 sess->imm_data_en = test_bit(ISCSIOPT_IMMEDIATE_DATA_EN, &options);
3497 sess->initial_r2t_en = test_bit(ISCSIOPT_INITIAL_R2T_EN, &options);
3498 sess->dataseq_inorder_en = test_bit(ISCSIOPT_DATA_SEQ_IN_ORDER,
3499 &options);
3500 sess->pdu_inorder_en = test_bit(ISCSIOPT_DATA_PDU_IN_ORDER, &options);
3501 sess->chap_auth_en = test_bit(ISCSIOPT_CHAP_AUTH_EN, &options);
3502 conn->snack_req_en = test_bit(ISCSIOPT_SNACK_REQ_EN, &options);
3503 sess->discovery_logout_en = test_bit(ISCSIOPT_DISCOVERY_LOGOUT_EN,
3504 &options);
3505 sess->bidi_chap_en = test_bit(ISCSIOPT_BIDI_CHAP_EN, &options);
3506 sess->discovery_auth_optional =
3507 test_bit(ISCSIOPT_DISCOVERY_AUTH_OPTIONAL, &options);
3508 if (test_bit(ISCSIOPT_ERL1, &options))
3509 sess->erl |= BIT_1;
3510 if (test_bit(ISCSIOPT_ERL0, &options))
3511 sess->erl |= BIT_0;
3512
3513 options = le16_to_cpu(fw_ddb_entry->tcp_options);
3514 conn->tcp_timestamp_stat = test_bit(TCPOPT_TIMESTAMP_STAT, &options);
3515 conn->tcp_nagle_disable = test_bit(TCPOPT_NAGLE_DISABLE, &options);
3516 conn->tcp_wsf_disable = test_bit(TCPOPT_WSF_DISABLE, &options);
3517 if (test_bit(TCPOPT_TIMER_SCALE3, &options))
3518 conn->tcp_timer_scale |= BIT_3;
3519 if (test_bit(TCPOPT_TIMER_SCALE2, &options))
3520 conn->tcp_timer_scale |= BIT_2;
3521 if (test_bit(TCPOPT_TIMER_SCALE1, &options))
3522 conn->tcp_timer_scale |= BIT_1;
3523
3524 conn->tcp_timer_scale >>= 1;
3525 conn->tcp_timestamp_en = test_bit(TCPOPT_TIMESTAMP_EN, &options);
3526
3527 options = le16_to_cpu(fw_ddb_entry->ip_options);
3528 conn->fragment_disable = test_bit(IPOPT_FRAGMENT_DISABLE, &options);
3529
3530 conn->max_recv_dlength = BYTE_UNITS *
3531 le16_to_cpu(fw_ddb_entry->iscsi_max_rcv_data_seg_len);
3532 conn->max_xmit_dlength = BYTE_UNITS *
3533 le16_to_cpu(fw_ddb_entry->iscsi_max_snd_data_seg_len);
3534 sess->first_burst = BYTE_UNITS *
3535 le16_to_cpu(fw_ddb_entry->iscsi_first_burst_len);
3536 sess->max_burst = BYTE_UNITS *
3537 le16_to_cpu(fw_ddb_entry->iscsi_max_burst_len);
3538 sess->max_r2t = le16_to_cpu(fw_ddb_entry->iscsi_max_outsnd_r2t);
3539 sess->time2wait = le16_to_cpu(fw_ddb_entry->iscsi_def_time2wait);
3540 sess->time2retain = le16_to_cpu(fw_ddb_entry->iscsi_def_time2retain);
3541 sess->tpgt = le32_to_cpu(fw_ddb_entry->tgt_portal_grp);
3542 conn->max_segment_size = le16_to_cpu(fw_ddb_entry->mss);
3543 conn->tcp_xmit_wsf = fw_ddb_entry->tcp_xmt_wsf;
3544 conn->tcp_recv_wsf = fw_ddb_entry->tcp_rcv_wsf;
3545 conn->ipv6_flow_label = le16_to_cpu(fw_ddb_entry->ipv6_flow_lbl);
3546 conn->keepalive_timeout = le16_to_cpu(fw_ddb_entry->ka_timeout);
3547 conn->local_port = le16_to_cpu(fw_ddb_entry->lcl_port);
3548 conn->statsn = le32_to_cpu(fw_ddb_entry->stat_sn);
3549 conn->exp_statsn = le32_to_cpu(fw_ddb_entry->exp_stat_sn);
3550 sess->discovery_parent_idx = le16_to_cpu(fw_ddb_entry->ddb_link);
3551 sess->discovery_parent_type = le16_to_cpu(fw_ddb_entry->ddb_link);
3552 sess->chap_out_idx = le16_to_cpu(fw_ddb_entry->chap_tbl_idx);
3553 sess->tsid = le16_to_cpu(fw_ddb_entry->tsid);
3554
3555 sess->default_taskmgmt_timeout =
3556 le16_to_cpu(fw_ddb_entry->def_timeout);
3557 conn->port = le16_to_cpu(fw_ddb_entry->port);
3558
3559 options = le16_to_cpu(fw_ddb_entry->options);
3560 conn->ipaddress = kzalloc(IPv6_ADDR_LEN, GFP_KERNEL);
3561 if (!conn->ipaddress) {
3562 rc = -ENOMEM;
3563 goto exit_copy;
3564 }
3565
3566 conn->redirect_ipaddr = kzalloc(IPv6_ADDR_LEN, GFP_KERNEL);
3567 if (!conn->redirect_ipaddr) {
3568 rc = -ENOMEM;
3569 goto exit_copy;
3570 }
3571
3572 memcpy(conn->ipaddress, fw_ddb_entry->ip_addr, IPv6_ADDR_LEN);
3573 memcpy(conn->redirect_ipaddr, fw_ddb_entry->tgt_addr, IPv6_ADDR_LEN);
3574
3575 if (test_bit(OPT_IPV6_DEVICE, &options)) {
3576 conn->ipv6_traffic_class = fw_ddb_entry->ipv4_tos;
3577
3578 conn->link_local_ipv6_addr = kmemdup(
3579 fw_ddb_entry->link_local_ipv6_addr,
3580 IPv6_ADDR_LEN, GFP_KERNEL);
3581 if (!conn->link_local_ipv6_addr) {
3582 rc = -ENOMEM;
3583 goto exit_copy;
3584 }
3585 } else {
3586 conn->ipv4_tos = fw_ddb_entry->ipv4_tos;
3587 }
3588
3589 if (fw_ddb_entry->iscsi_name[0]) {
3590 rc = iscsi_switch_str_param(&sess->targetname,
3591 (char *)fw_ddb_entry->iscsi_name);
3592 if (rc)
3593 goto exit_copy;
3594 }
3595
3596 if (fw_ddb_entry->iscsi_alias[0]) {
3597 rc = iscsi_switch_str_param(&sess->targetalias,
3598 (char *)fw_ddb_entry->iscsi_alias);
3599 if (rc)
3600 goto exit_copy;
3601 }
3602
3603 COPY_ISID(sess->isid, fw_ddb_entry->isid);
3604
3605 exit_copy:
3606 return rc;
3607 }
3608
3609 static int qla4xxx_copy_to_fwddb_param(struct iscsi_bus_flash_session *sess,
3610 struct iscsi_bus_flash_conn *conn,
3611 struct dev_db_entry *fw_ddb_entry)
3612 {
3613 uint16_t options;
3614 int rc = 0;
3615
3616 options = le16_to_cpu(fw_ddb_entry->options);
3617 SET_BITVAL(conn->is_fw_assigned_ipv6, options, BIT_11);
3618 if (!strncmp(sess->portal_type, PORTAL_TYPE_IPV6, 4))
3619 options |= BIT_8;
3620 else
3621 options &= ~BIT_8;
3622
3623 SET_BITVAL(sess->auto_snd_tgt_disable, options, BIT_6);
3624 SET_BITVAL(sess->discovery_sess, options, BIT_4);
3625 SET_BITVAL(sess->entry_state, options, BIT_3);
3626 fw_ddb_entry->options = cpu_to_le16(options);
3627
3628 options = le16_to_cpu(fw_ddb_entry->iscsi_options);
3629 SET_BITVAL(conn->hdrdgst_en, options, BIT_13);
3630 SET_BITVAL(conn->datadgst_en, options, BIT_12);
3631 SET_BITVAL(sess->imm_data_en, options, BIT_11);
3632 SET_BITVAL(sess->initial_r2t_en, options, BIT_10);
3633 SET_BITVAL(sess->dataseq_inorder_en, options, BIT_9);
3634 SET_BITVAL(sess->pdu_inorder_en, options, BIT_8);
3635 SET_BITVAL(sess->chap_auth_en, options, BIT_7);
3636 SET_BITVAL(conn->snack_req_en, options, BIT_6);
3637 SET_BITVAL(sess->discovery_logout_en, options, BIT_5);
3638 SET_BITVAL(sess->bidi_chap_en, options, BIT_4);
3639 SET_BITVAL(sess->discovery_auth_optional, options, BIT_3);
3640 SET_BITVAL(sess->erl & BIT_1, options, BIT_1);
3641 SET_BITVAL(sess->erl & BIT_0, options, BIT_0);
3642 fw_ddb_entry->iscsi_options = cpu_to_le16(options);
3643
3644 options = le16_to_cpu(fw_ddb_entry->tcp_options);
3645 SET_BITVAL(conn->tcp_timestamp_stat, options, BIT_6);
3646 SET_BITVAL(conn->tcp_nagle_disable, options, BIT_5);
3647 SET_BITVAL(conn->tcp_wsf_disable, options, BIT_4);
3648 SET_BITVAL(conn->tcp_timer_scale & BIT_2, options, BIT_3);
3649 SET_BITVAL(conn->tcp_timer_scale & BIT_1, options, BIT_2);
3650 SET_BITVAL(conn->tcp_timer_scale & BIT_0, options, BIT_1);
3651 SET_BITVAL(conn->tcp_timestamp_en, options, BIT_0);
3652 fw_ddb_entry->tcp_options = cpu_to_le16(options);
3653
3654 options = le16_to_cpu(fw_ddb_entry->ip_options);
3655 SET_BITVAL(conn->fragment_disable, options, BIT_4);
3656 fw_ddb_entry->ip_options = cpu_to_le16(options);
3657
3658 fw_ddb_entry->iscsi_max_outsnd_r2t = cpu_to_le16(sess->max_r2t);
3659 fw_ddb_entry->iscsi_max_rcv_data_seg_len =
3660 cpu_to_le16(conn->max_recv_dlength / BYTE_UNITS);
3661 fw_ddb_entry->iscsi_max_snd_data_seg_len =
3662 cpu_to_le16(conn->max_xmit_dlength / BYTE_UNITS);
3663 fw_ddb_entry->iscsi_first_burst_len =
3664 cpu_to_le16(sess->first_burst / BYTE_UNITS);
3665 fw_ddb_entry->iscsi_max_burst_len = cpu_to_le16(sess->max_burst /
3666 BYTE_UNITS);
3667 fw_ddb_entry->iscsi_def_time2wait = cpu_to_le16(sess->time2wait);
3668 fw_ddb_entry->iscsi_def_time2retain = cpu_to_le16(sess->time2retain);
3669 fw_ddb_entry->tgt_portal_grp = cpu_to_le16(sess->tpgt);
3670 fw_ddb_entry->mss = cpu_to_le16(conn->max_segment_size);
3671 fw_ddb_entry->tcp_xmt_wsf = (uint8_t) cpu_to_le32(conn->tcp_xmit_wsf);
3672 fw_ddb_entry->tcp_rcv_wsf = (uint8_t) cpu_to_le32(conn->tcp_recv_wsf);
3673 fw_ddb_entry->ipv6_flow_lbl = cpu_to_le16(conn->ipv6_flow_label);
3674 fw_ddb_entry->ka_timeout = cpu_to_le16(conn->keepalive_timeout);
3675 fw_ddb_entry->lcl_port = cpu_to_le16(conn->local_port);
3676 fw_ddb_entry->stat_sn = cpu_to_le32(conn->statsn);
3677 fw_ddb_entry->exp_stat_sn = cpu_to_le32(conn->exp_statsn);
3678 fw_ddb_entry->ddb_link = cpu_to_le16(sess->discovery_parent_idx);
3679 fw_ddb_entry->chap_tbl_idx = cpu_to_le16(sess->chap_out_idx);
3680 fw_ddb_entry->tsid = cpu_to_le16(sess->tsid);
3681 fw_ddb_entry->port = cpu_to_le16(conn->port);
3682 fw_ddb_entry->def_timeout =
3683 cpu_to_le16(sess->default_taskmgmt_timeout);
3684
3685 if (!strncmp(sess->portal_type, PORTAL_TYPE_IPV6, 4))
3686 fw_ddb_entry->ipv4_tos = conn->ipv6_traffic_class;
3687 else
3688 fw_ddb_entry->ipv4_tos = conn->ipv4_tos;
3689
3690 if (conn->ipaddress)
3691 memcpy(fw_ddb_entry->ip_addr, conn->ipaddress,
3692 sizeof(fw_ddb_entry->ip_addr));
3693
3694 if (conn->redirect_ipaddr)
3695 memcpy(fw_ddb_entry->tgt_addr, conn->redirect_ipaddr,
3696 sizeof(fw_ddb_entry->tgt_addr));
3697
3698 if (conn->link_local_ipv6_addr)
3699 memcpy(fw_ddb_entry->link_local_ipv6_addr,
3700 conn->link_local_ipv6_addr,
3701 sizeof(fw_ddb_entry->link_local_ipv6_addr));
3702
3703 if (sess->targetname)
3704 memcpy(fw_ddb_entry->iscsi_name, sess->targetname,
3705 sizeof(fw_ddb_entry->iscsi_name));
3706
3707 if (sess->targetalias)
3708 memcpy(fw_ddb_entry->iscsi_alias, sess->targetalias,
3709 sizeof(fw_ddb_entry->iscsi_alias));
3710
3711 COPY_ISID(fw_ddb_entry->isid, sess->isid);
3712
3713 return rc;
3714 }
3715
3716 static void qla4xxx_copy_to_sess_conn_params(struct iscsi_conn *conn,
3717 struct iscsi_session *sess,
3718 struct dev_db_entry *fw_ddb_entry)
3719 {
3720 unsigned long options = 0;
3721 uint16_t ddb_link;
3722 uint16_t disc_parent;
3723 char ip_addr[DDB_IPADDR_LEN];
3724
3725 options = le16_to_cpu(fw_ddb_entry->options);
3726 conn->is_fw_assigned_ipv6 = test_bit(OPT_IS_FW_ASSIGNED_IPV6, &options);
3727 sess->auto_snd_tgt_disable = test_bit(OPT_AUTO_SENDTGTS_DISABLE,
3728 &options);
3729 sess->discovery_sess = test_bit(OPT_DISC_SESSION, &options);
3730
3731 options = le16_to_cpu(fw_ddb_entry->iscsi_options);
3732 conn->hdrdgst_en = test_bit(ISCSIOPT_HEADER_DIGEST_EN, &options);
3733 conn->datadgst_en = test_bit(ISCSIOPT_DATA_DIGEST_EN, &options);
3734 sess->imm_data_en = test_bit(ISCSIOPT_IMMEDIATE_DATA_EN, &options);
3735 sess->initial_r2t_en = test_bit(ISCSIOPT_INITIAL_R2T_EN, &options);
3736 sess->dataseq_inorder_en = test_bit(ISCSIOPT_DATA_SEQ_IN_ORDER,
3737 &options);
3738 sess->pdu_inorder_en = test_bit(ISCSIOPT_DATA_PDU_IN_ORDER, &options);
3739 sess->chap_auth_en = test_bit(ISCSIOPT_CHAP_AUTH_EN, &options);
3740 sess->discovery_logout_en = test_bit(ISCSIOPT_DISCOVERY_LOGOUT_EN,
3741 &options);
3742 sess->bidi_chap_en = test_bit(ISCSIOPT_BIDI_CHAP_EN, &options);
3743 sess->discovery_auth_optional =
3744 test_bit(ISCSIOPT_DISCOVERY_AUTH_OPTIONAL, &options);
3745 if (test_bit(ISCSIOPT_ERL1, &options))
3746 sess->erl |= BIT_1;
3747 if (test_bit(ISCSIOPT_ERL0, &options))
3748 sess->erl |= BIT_0;
3749
3750 options = le16_to_cpu(fw_ddb_entry->tcp_options);
3751 conn->tcp_timestamp_stat = test_bit(TCPOPT_TIMESTAMP_STAT, &options);
3752 conn->tcp_nagle_disable = test_bit(TCPOPT_NAGLE_DISABLE, &options);
3753 conn->tcp_wsf_disable = test_bit(TCPOPT_WSF_DISABLE, &options);
3754 if (test_bit(TCPOPT_TIMER_SCALE3, &options))
3755 conn->tcp_timer_scale |= BIT_3;
3756 if (test_bit(TCPOPT_TIMER_SCALE2, &options))
3757 conn->tcp_timer_scale |= BIT_2;
3758 if (test_bit(TCPOPT_TIMER_SCALE1, &options))
3759 conn->tcp_timer_scale |= BIT_1;
3760
3761 conn->tcp_timer_scale >>= 1;
3762 conn->tcp_timestamp_en = test_bit(TCPOPT_TIMESTAMP_EN, &options);
3763
3764 options = le16_to_cpu(fw_ddb_entry->ip_options);
3765 conn->fragment_disable = test_bit(IPOPT_FRAGMENT_DISABLE, &options);
3766
3767 conn->max_recv_dlength = BYTE_UNITS *
3768 le16_to_cpu(fw_ddb_entry->iscsi_max_rcv_data_seg_len);
3769 conn->max_xmit_dlength = BYTE_UNITS *
3770 le16_to_cpu(fw_ddb_entry->iscsi_max_snd_data_seg_len);
3771 sess->max_r2t = le16_to_cpu(fw_ddb_entry->iscsi_max_outsnd_r2t);
3772 sess->first_burst = BYTE_UNITS *
3773 le16_to_cpu(fw_ddb_entry->iscsi_first_burst_len);
3774 sess->max_burst = BYTE_UNITS *
3775 le16_to_cpu(fw_ddb_entry->iscsi_max_burst_len);
3776 sess->time2wait = le16_to_cpu(fw_ddb_entry->iscsi_def_time2wait);
3777 sess->time2retain = le16_to_cpu(fw_ddb_entry->iscsi_def_time2retain);
3778 sess->tpgt = le32_to_cpu(fw_ddb_entry->tgt_portal_grp);
3779 conn->max_segment_size = le16_to_cpu(fw_ddb_entry->mss);
3780 conn->tcp_xmit_wsf = fw_ddb_entry->tcp_xmt_wsf;
3781 conn->tcp_recv_wsf = fw_ddb_entry->tcp_rcv_wsf;
3782 conn->ipv4_tos = fw_ddb_entry->ipv4_tos;
3783 conn->keepalive_tmo = le16_to_cpu(fw_ddb_entry->ka_timeout);
3784 conn->local_port = le16_to_cpu(fw_ddb_entry->lcl_port);
3785 conn->statsn = le32_to_cpu(fw_ddb_entry->stat_sn);
3786 conn->exp_statsn = le32_to_cpu(fw_ddb_entry->exp_stat_sn);
3787 sess->tsid = le16_to_cpu(fw_ddb_entry->tsid);
3788 COPY_ISID(sess->isid, fw_ddb_entry->isid);
3789
3790 ddb_link = le16_to_cpu(fw_ddb_entry->ddb_link);
3791 if (ddb_link == DDB_ISNS)
3792 disc_parent = ISCSI_DISC_PARENT_ISNS;
3793 else if (ddb_link == DDB_NO_LINK)
3794 disc_parent = ISCSI_DISC_PARENT_UNKNOWN;
3795 else if (ddb_link < MAX_DDB_ENTRIES)
3796 disc_parent = ISCSI_DISC_PARENT_SENDTGT;
3797 else
3798 disc_parent = ISCSI_DISC_PARENT_UNKNOWN;
3799
3800 iscsi_set_param(conn->cls_conn, ISCSI_PARAM_DISCOVERY_PARENT_TYPE,
3801 iscsi_get_discovery_parent_name(disc_parent), 0);
3802
3803 iscsi_set_param(conn->cls_conn, ISCSI_PARAM_TARGET_ALIAS,
3804 (char *)fw_ddb_entry->iscsi_alias, 0);
3805
3806 options = le16_to_cpu(fw_ddb_entry->options);
3807 if (options & DDB_OPT_IPV6_DEVICE) {
3808 memset(ip_addr, 0, sizeof(ip_addr));
3809 sprintf(ip_addr, "%pI6", fw_ddb_entry->link_local_ipv6_addr);
3810 iscsi_set_param(conn->cls_conn, ISCSI_PARAM_LOCAL_IPADDR,
3811 (char *)ip_addr, 0);
3812 }
3813 }
3814
3815 static void qla4xxx_copy_fwddb_param(struct scsi_qla_host *ha,
3816 struct dev_db_entry *fw_ddb_entry,
3817 struct iscsi_cls_session *cls_sess,
3818 struct iscsi_cls_conn *cls_conn)
3819 {
3820 int buflen = 0;
3821 struct iscsi_session *sess;
3822 struct ddb_entry *ddb_entry;
3823 struct ql4_chap_table chap_tbl;
3824 struct iscsi_conn *conn;
3825 char ip_addr[DDB_IPADDR_LEN];
3826 uint16_t options = 0;
3827
3828 sess = cls_sess->dd_data;
3829 ddb_entry = sess->dd_data;
3830 conn = cls_conn->dd_data;
3831 memset(&chap_tbl, 0, sizeof(chap_tbl));
3832
3833 ddb_entry->chap_tbl_idx = le16_to_cpu(fw_ddb_entry->chap_tbl_idx);
3834
3835 qla4xxx_copy_to_sess_conn_params(conn, sess, fw_ddb_entry);
3836
3837 sess->def_taskmgmt_tmo = le16_to_cpu(fw_ddb_entry->def_timeout);
3838 conn->persistent_port = le16_to_cpu(fw_ddb_entry->port);
3839
3840 memset(ip_addr, 0, sizeof(ip_addr));
3841 options = le16_to_cpu(fw_ddb_entry->options);
3842 if (options & DDB_OPT_IPV6_DEVICE) {
3843 iscsi_set_param(cls_conn, ISCSI_PARAM_PORTAL_TYPE, "ipv6", 4);
3844
3845 memset(ip_addr, 0, sizeof(ip_addr));
3846 sprintf(ip_addr, "%pI6", fw_ddb_entry->ip_addr);
3847 } else {
3848 iscsi_set_param(cls_conn, ISCSI_PARAM_PORTAL_TYPE, "ipv4", 4);
3849 sprintf(ip_addr, "%pI4", fw_ddb_entry->ip_addr);
3850 }
3851
3852 iscsi_set_param(cls_conn, ISCSI_PARAM_PERSISTENT_ADDRESS,
3853 (char *)ip_addr, buflen);
3854 iscsi_set_param(cls_conn, ISCSI_PARAM_TARGET_NAME,
3855 (char *)fw_ddb_entry->iscsi_name, buflen);
3856 iscsi_set_param(cls_conn, ISCSI_PARAM_INITIATOR_NAME,
3857 (char *)ha->name_string, buflen);
3858
3859 if (ddb_entry->chap_tbl_idx != INVALID_ENTRY) {
3860 if (!qla4xxx_get_uni_chap_at_index(ha, chap_tbl.name,
3861 chap_tbl.secret,
3862 ddb_entry->chap_tbl_idx)) {
3863 iscsi_set_param(cls_conn, ISCSI_PARAM_USERNAME,
3864 (char *)chap_tbl.name,
3865 strlen((char *)chap_tbl.name));
3866 iscsi_set_param(cls_conn, ISCSI_PARAM_PASSWORD,
3867 (char *)chap_tbl.secret,
3868 chap_tbl.secret_len);
3869 }
3870 }
3871 }
3872
3873 void qla4xxx_update_session_conn_fwddb_param(struct scsi_qla_host *ha,
3874 struct ddb_entry *ddb_entry)
3875 {
3876 struct iscsi_cls_session *cls_sess;
3877 struct iscsi_cls_conn *cls_conn;
3878 uint32_t ddb_state;
3879 dma_addr_t fw_ddb_entry_dma;
3880 struct dev_db_entry *fw_ddb_entry;
3881
3882 fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
3883 &fw_ddb_entry_dma, GFP_KERNEL);
3884 if (!fw_ddb_entry) {
3885 ql4_printk(KERN_ERR, ha,
3886 "%s: Unable to allocate dma buffer\n", __func__);
3887 goto exit_session_conn_fwddb_param;
3888 }
3889
3890 if (qla4xxx_get_fwddb_entry(ha, ddb_entry->fw_ddb_index, fw_ddb_entry,
3891 fw_ddb_entry_dma, NULL, NULL, &ddb_state,
3892 NULL, NULL, NULL) == QLA_ERROR) {
3893 DEBUG2(ql4_printk(KERN_ERR, ha, "scsi%ld: %s: failed "
3894 "get_ddb_entry for fw_ddb_index %d\n",
3895 ha->host_no, __func__,
3896 ddb_entry->fw_ddb_index));
3897 goto exit_session_conn_fwddb_param;
3898 }
3899
3900 cls_sess = ddb_entry->sess;
3901
3902 cls_conn = ddb_entry->conn;
3903
3904 /* Update params */
3905 qla4xxx_copy_fwddb_param(ha, fw_ddb_entry, cls_sess, cls_conn);
3906
3907 exit_session_conn_fwddb_param:
3908 if (fw_ddb_entry)
3909 dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
3910 fw_ddb_entry, fw_ddb_entry_dma);
3911 }
3912
3913 void qla4xxx_update_session_conn_param(struct scsi_qla_host *ha,
3914 struct ddb_entry *ddb_entry)
3915 {
3916 struct iscsi_cls_session *cls_sess;
3917 struct iscsi_cls_conn *cls_conn;
3918 struct iscsi_session *sess;
3919 struct iscsi_conn *conn;
3920 uint32_t ddb_state;
3921 dma_addr_t fw_ddb_entry_dma;
3922 struct dev_db_entry *fw_ddb_entry;
3923
3924 fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
3925 &fw_ddb_entry_dma, GFP_KERNEL);
3926 if (!fw_ddb_entry) {
3927 ql4_printk(KERN_ERR, ha,
3928 "%s: Unable to allocate dma buffer\n", __func__);
3929 goto exit_session_conn_param;
3930 }
3931
3932 if (qla4xxx_get_fwddb_entry(ha, ddb_entry->fw_ddb_index, fw_ddb_entry,
3933 fw_ddb_entry_dma, NULL, NULL, &ddb_state,
3934 NULL, NULL, NULL) == QLA_ERROR) {
3935 DEBUG2(ql4_printk(KERN_ERR, ha, "scsi%ld: %s: failed "
3936 "get_ddb_entry for fw_ddb_index %d\n",
3937 ha->host_no, __func__,
3938 ddb_entry->fw_ddb_index));
3939 goto exit_session_conn_param;
3940 }
3941
3942 cls_sess = ddb_entry->sess;
3943 sess = cls_sess->dd_data;
3944
3945 cls_conn = ddb_entry->conn;
3946 conn = cls_conn->dd_data;
3947
3948 /* Update timers after login */
3949 ddb_entry->default_relogin_timeout =
3950 (le16_to_cpu(fw_ddb_entry->def_timeout) > LOGIN_TOV) &&
3951 (le16_to_cpu(fw_ddb_entry->def_timeout) < LOGIN_TOV * 10) ?
3952 le16_to_cpu(fw_ddb_entry->def_timeout) : LOGIN_TOV;
3953 ddb_entry->default_time2wait =
3954 le16_to_cpu(fw_ddb_entry->iscsi_def_time2wait);
3955
3956 /* Update params */
3957 ddb_entry->chap_tbl_idx = le16_to_cpu(fw_ddb_entry->chap_tbl_idx);
3958 qla4xxx_copy_to_sess_conn_params(conn, sess, fw_ddb_entry);
3959
3960 memcpy(sess->initiatorname, ha->name_string,
3961 min(sizeof(ha->name_string), sizeof(sess->initiatorname)));
3962
3963 exit_session_conn_param:
3964 if (fw_ddb_entry)
3965 dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
3966 fw_ddb_entry, fw_ddb_entry_dma);
3967 }
3968
3969 /*
3970 * Timer routines
3971 */
3972 static void qla4xxx_timer(struct timer_list *t);
3973
3974 static void qla4xxx_start_timer(struct scsi_qla_host *ha,
3975 unsigned long interval)
3976 {
3977 DEBUG(printk("scsi: %s: Starting timer thread for adapter %d\n",
3978 __func__, ha->host->host_no));
3979 timer_setup(&ha->timer, qla4xxx_timer, 0);
3980 ha->timer.expires = jiffies + interval * HZ;
3981 add_timer(&ha->timer);
3982 ha->timer_active = 1;
3983 }
3984
3985 static void qla4xxx_stop_timer(struct scsi_qla_host *ha)
3986 {
3987 del_timer_sync(&ha->timer);
3988 ha->timer_active = 0;
3989 }
3990
3991 /***
3992 * qla4xxx_mark_device_missing - blocks the session
3993 * @cls_session: Pointer to the session to be blocked
3994 * @ddb_entry: Pointer to device database entry
3995 *
3996 * This routine marks a device missing and close connection.
3997 **/
3998 void qla4xxx_mark_device_missing(struct iscsi_cls_session *cls_session)
3999 {
4000 iscsi_block_session(cls_session);
4001 }
4002
4003 /**
4004 * qla4xxx_mark_all_devices_missing - mark all devices as missing.
4005 * @ha: Pointer to host adapter structure.
4006 *
4007 * This routine marks a device missing and resets the relogin retry count.
4008 **/
4009 void qla4xxx_mark_all_devices_missing(struct scsi_qla_host *ha)
4010 {
4011 iscsi_host_for_each_session(ha->host, qla4xxx_mark_device_missing);
4012 }
4013
4014 static struct srb* qla4xxx_get_new_srb(struct scsi_qla_host *ha,
4015 struct ddb_entry *ddb_entry,
4016 struct scsi_cmnd *cmd)
4017 {
4018 struct srb *srb;
4019
4020 srb = mempool_alloc(ha->srb_mempool, GFP_ATOMIC);
4021 if (!srb)
4022 return srb;
4023
4024 kref_init(&srb->srb_ref);
4025 srb->ha = ha;
4026 srb->ddb = ddb_entry;
4027 srb->cmd = cmd;
4028 srb->flags = 0;
4029 CMD_SP(cmd) = (void *)srb;
4030
4031 return srb;
4032 }
4033
4034 static void qla4xxx_srb_free_dma(struct scsi_qla_host *ha, struct srb *srb)
4035 {
4036 struct scsi_cmnd *cmd = srb->cmd;
4037
4038 if (srb->flags & SRB_DMA_VALID) {
4039 scsi_dma_unmap(cmd);
4040 srb->flags &= ~SRB_DMA_VALID;
4041 }
4042 CMD_SP(cmd) = NULL;
4043 }
4044
4045 void qla4xxx_srb_compl(struct kref *ref)
4046 {
4047 struct srb *srb = container_of(ref, struct srb, srb_ref);
4048 struct scsi_cmnd *cmd = srb->cmd;
4049 struct scsi_qla_host *ha = srb->ha;
4050
4051 qla4xxx_srb_free_dma(ha, srb);
4052
4053 mempool_free(srb, ha->srb_mempool);
4054
4055 cmd->scsi_done(cmd);
4056 }
4057
4058 /**
4059 * qla4xxx_queuecommand - scsi layer issues scsi command to driver.
4060 * @host: scsi host
4061 * @cmd: Pointer to Linux's SCSI command structure
4062 *
4063 * Remarks:
4064 * This routine is invoked by Linux to send a SCSI command to the driver.
4065 * The mid-level driver tries to ensure that queuecommand never gets
4066 * invoked concurrently with itself or the interrupt handler (although
4067 * the interrupt handler may call this routine as part of request-
4068 * completion handling). Unfortunely, it sometimes calls the scheduler
4069 * in interrupt context which is a big NO! NO!.
4070 **/
4071 static int qla4xxx_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
4072 {
4073 struct scsi_qla_host *ha = to_qla_host(host);
4074 struct ddb_entry *ddb_entry = cmd->device->hostdata;
4075 struct iscsi_cls_session *sess = ddb_entry->sess;
4076 struct srb *srb;
4077 int rval;
4078
4079 if (test_bit(AF_EEH_BUSY, &ha->flags)) {
4080 if (test_bit(AF_PCI_CHANNEL_IO_PERM_FAILURE, &ha->flags))
4081 cmd->result = DID_NO_CONNECT << 16;
4082 else
4083 cmd->result = DID_REQUEUE << 16;
4084 goto qc_fail_command;
4085 }
4086
4087 if (!sess) {
4088 cmd->result = DID_IMM_RETRY << 16;
4089 goto qc_fail_command;
4090 }
4091
4092 rval = iscsi_session_chkready(sess);
4093 if (rval) {
4094 cmd->result = rval;
4095 goto qc_fail_command;
4096 }
4097
4098 if (test_bit(DPC_RESET_HA_INTR, &ha->dpc_flags) ||
4099 test_bit(DPC_RESET_ACTIVE, &ha->dpc_flags) ||
4100 test_bit(DPC_RESET_HA, &ha->dpc_flags) ||
4101 test_bit(DPC_HA_UNRECOVERABLE, &ha->dpc_flags) ||
4102 test_bit(DPC_HA_NEED_QUIESCENT, &ha->dpc_flags) ||
4103 !test_bit(AF_ONLINE, &ha->flags) ||
4104 !test_bit(AF_LINK_UP, &ha->flags) ||
4105 test_bit(AF_LOOPBACK, &ha->flags) ||
4106 test_bit(DPC_POST_IDC_ACK, &ha->dpc_flags) ||
4107 test_bit(DPC_RESTORE_ACB, &ha->dpc_flags) ||
4108 test_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags))
4109 goto qc_host_busy;
4110
4111 srb = qla4xxx_get_new_srb(ha, ddb_entry, cmd);
4112 if (!srb)
4113 goto qc_host_busy;
4114
4115 rval = qla4xxx_send_command_to_isp(ha, srb);
4116 if (rval != QLA_SUCCESS)
4117 goto qc_host_busy_free_sp;
4118
4119 return 0;
4120
4121 qc_host_busy_free_sp:
4122 qla4xxx_srb_free_dma(ha, srb);
4123 mempool_free(srb, ha->srb_mempool);
4124
4125 qc_host_busy:
4126 return SCSI_MLQUEUE_HOST_BUSY;
4127
4128 qc_fail_command:
4129 cmd->scsi_done(cmd);
4130
4131 return 0;
4132 }
4133
4134 /**
4135 * qla4xxx_mem_free - frees memory allocated to adapter
4136 * @ha: Pointer to host adapter structure.
4137 *
4138 * Frees memory previously allocated by qla4xxx_mem_alloc
4139 **/
4140 static void qla4xxx_mem_free(struct scsi_qla_host *ha)
4141 {
4142 if (ha->queues)
4143 dma_free_coherent(&ha->pdev->dev, ha->queues_len, ha->queues,
4144 ha->queues_dma);
4145
4146 if (ha->fw_dump)
4147 vfree(ha->fw_dump);
4148
4149 ha->queues_len = 0;
4150 ha->queues = NULL;
4151 ha->queues_dma = 0;
4152 ha->request_ring = NULL;
4153 ha->request_dma = 0;
4154 ha->response_ring = NULL;
4155 ha->response_dma = 0;
4156 ha->shadow_regs = NULL;
4157 ha->shadow_regs_dma = 0;
4158 ha->fw_dump = NULL;
4159 ha->fw_dump_size = 0;
4160
4161 /* Free srb pool. */
4162 mempool_destroy(ha->srb_mempool);
4163 ha->srb_mempool = NULL;
4164
4165 dma_pool_destroy(ha->chap_dma_pool);
4166
4167 if (ha->chap_list)
4168 vfree(ha->chap_list);
4169 ha->chap_list = NULL;
4170
4171 dma_pool_destroy(ha->fw_ddb_dma_pool);
4172
4173 /* release io space registers */
4174 if (is_qla8022(ha)) {
4175 if (ha->nx_pcibase)
4176 iounmap(
4177 (struct device_reg_82xx __iomem *)ha->nx_pcibase);
4178 } else if (is_qla8032(ha) || is_qla8042(ha)) {
4179 if (ha->nx_pcibase)
4180 iounmap(
4181 (struct device_reg_83xx __iomem *)ha->nx_pcibase);
4182 } else if (ha->reg) {
4183 iounmap(ha->reg);
4184 }
4185
4186 if (ha->reset_tmplt.buff)
4187 vfree(ha->reset_tmplt.buff);
4188
4189 pci_release_regions(ha->pdev);
4190 }
4191
4192 /**
4193 * qla4xxx_mem_alloc - allocates memory for use by adapter.
4194 * @ha: Pointer to host adapter structure
4195 *
4196 * Allocates DMA memory for request and response queues. Also allocates memory
4197 * for srbs.
4198 **/
4199 static int qla4xxx_mem_alloc(struct scsi_qla_host *ha)
4200 {
4201 unsigned long align;
4202
4203 /* Allocate contiguous block of DMA memory for queues. */
4204 ha->queues_len = ((REQUEST_QUEUE_DEPTH * QUEUE_SIZE) +
4205 (RESPONSE_QUEUE_DEPTH * QUEUE_SIZE) +
4206 sizeof(struct shadow_regs) +
4207 MEM_ALIGN_VALUE +
4208 (PAGE_SIZE - 1)) & ~(PAGE_SIZE - 1);
4209 ha->queues = dma_alloc_coherent(&ha->pdev->dev, ha->queues_len,
4210 &ha->queues_dma, GFP_KERNEL);
4211 if (ha->queues == NULL) {
4212 ql4_printk(KERN_WARNING, ha,
4213 "Memory Allocation failed - queues.\n");
4214
4215 goto mem_alloc_error_exit;
4216 }
4217
4218 /*
4219 * As per RISC alignment requirements -- the bus-address must be a
4220 * multiple of the request-ring size (in bytes).
4221 */
4222 align = 0;
4223 if ((unsigned long)ha->queues_dma & (MEM_ALIGN_VALUE - 1))
4224 align = MEM_ALIGN_VALUE - ((unsigned long)ha->queues_dma &
4225 (MEM_ALIGN_VALUE - 1));
4226
4227 /* Update request and response queue pointers. */
4228 ha->request_dma = ha->queues_dma + align;
4229 ha->request_ring = (struct queue_entry *) (ha->queues + align);
4230 ha->response_dma = ha->queues_dma + align +
4231 (REQUEST_QUEUE_DEPTH * QUEUE_SIZE);
4232 ha->response_ring = (struct queue_entry *) (ha->queues + align +
4233 (REQUEST_QUEUE_DEPTH *
4234 QUEUE_SIZE));
4235 ha->shadow_regs_dma = ha->queues_dma + align +
4236 (REQUEST_QUEUE_DEPTH * QUEUE_SIZE) +
4237 (RESPONSE_QUEUE_DEPTH * QUEUE_SIZE);
4238 ha->shadow_regs = (struct shadow_regs *) (ha->queues + align +
4239 (REQUEST_QUEUE_DEPTH *
4240 QUEUE_SIZE) +
4241 (RESPONSE_QUEUE_DEPTH *
4242 QUEUE_SIZE));
4243
4244 /* Allocate memory for srb pool. */
4245 ha->srb_mempool = mempool_create(SRB_MIN_REQ, mempool_alloc_slab,
4246 mempool_free_slab, srb_cachep);
4247 if (ha->srb_mempool == NULL) {
4248 ql4_printk(KERN_WARNING, ha,
4249 "Memory Allocation failed - SRB Pool.\n");
4250
4251 goto mem_alloc_error_exit;
4252 }
4253
4254 ha->chap_dma_pool = dma_pool_create("ql4_chap", &ha->pdev->dev,
4255 CHAP_DMA_BLOCK_SIZE, 8, 0);
4256
4257 if (ha->chap_dma_pool == NULL) {
4258 ql4_printk(KERN_WARNING, ha,
4259 "%s: chap_dma_pool allocation failed..\n", __func__);
4260 goto mem_alloc_error_exit;
4261 }
4262
4263 ha->fw_ddb_dma_pool = dma_pool_create("ql4_fw_ddb", &ha->pdev->dev,
4264 DDB_DMA_BLOCK_SIZE, 8, 0);
4265
4266 if (ha->fw_ddb_dma_pool == NULL) {
4267 ql4_printk(KERN_WARNING, ha,
4268 "%s: fw_ddb_dma_pool allocation failed..\n",
4269 __func__);
4270 goto mem_alloc_error_exit;
4271 }
4272
4273 return QLA_SUCCESS;
4274
4275 mem_alloc_error_exit:
4276 qla4xxx_mem_free(ha);
4277 return QLA_ERROR;
4278 }
4279
4280 /**
4281 * qla4_8xxx_check_temp - Check the ISP82XX temperature.
4282 * @ha: adapter block pointer.
4283 *
4284 * Note: The caller should not hold the idc lock.
4285 **/
4286 static int qla4_8xxx_check_temp(struct scsi_qla_host *ha)
4287 {
4288 uint32_t temp, temp_state, temp_val;
4289 int status = QLA_SUCCESS;
4290
4291 temp = qla4_8xxx_rd_direct(ha, QLA8XXX_CRB_TEMP_STATE);
4292
4293 temp_state = qla82xx_get_temp_state(temp);
4294 temp_val = qla82xx_get_temp_val(temp);
4295
4296 if (temp_state == QLA82XX_TEMP_PANIC) {
4297 ql4_printk(KERN_WARNING, ha, "Device temperature %d degrees C"
4298 " exceeds maximum allowed. Hardware has been shut"
4299 " down.\n", temp_val);
4300 status = QLA_ERROR;
4301 } else if (temp_state == QLA82XX_TEMP_WARN) {
4302 if (ha->temperature == QLA82XX_TEMP_NORMAL)
4303 ql4_printk(KERN_WARNING, ha, "Device temperature %d"
4304 " degrees C exceeds operating range."
4305 " Immediate action needed.\n", temp_val);
4306 } else {
4307 if (ha->temperature == QLA82XX_TEMP_WARN)
4308 ql4_printk(KERN_INFO, ha, "Device temperature is"
4309 " now %d degrees C in normal range.\n",
4310 temp_val);
4311 }
4312 ha->temperature = temp_state;
4313 return status;
4314 }
4315
4316 /**
4317 * qla4_8xxx_check_fw_alive - Check firmware health
4318 * @ha: Pointer to host adapter structure.
4319 *
4320 * Context: Interrupt
4321 **/
4322 static int qla4_8xxx_check_fw_alive(struct scsi_qla_host *ha)
4323 {
4324 uint32_t fw_heartbeat_counter;
4325 int status = QLA_SUCCESS;
4326
4327 fw_heartbeat_counter = qla4_8xxx_rd_direct(ha,
4328 QLA8XXX_PEG_ALIVE_COUNTER);
4329 /* If PEG_ALIVE_COUNTER is 0xffffffff, AER/EEH is in progress, ignore */
4330 if (fw_heartbeat_counter == 0xffffffff) {
4331 DEBUG2(printk(KERN_WARNING "scsi%ld: %s: Device in frozen "
4332 "state, QLA82XX_PEG_ALIVE_COUNTER is 0xffffffff\n",
4333 ha->host_no, __func__));
4334 return status;
4335 }
4336
4337 if (ha->fw_heartbeat_counter == fw_heartbeat_counter) {
4338 ha->seconds_since_last_heartbeat++;
4339 /* FW not alive after 2 seconds */
4340 if (ha->seconds_since_last_heartbeat == 2) {
4341 ha->seconds_since_last_heartbeat = 0;
4342 qla4_8xxx_dump_peg_reg(ha);
4343 status = QLA_ERROR;
4344 }
4345 } else
4346 ha->seconds_since_last_heartbeat = 0;
4347
4348 ha->fw_heartbeat_counter = fw_heartbeat_counter;
4349 return status;
4350 }
4351
4352 static void qla4_8xxx_process_fw_error(struct scsi_qla_host *ha)
4353 {
4354 uint32_t halt_status;
4355 int halt_status_unrecoverable = 0;
4356
4357 halt_status = qla4_8xxx_rd_direct(ha, QLA8XXX_PEG_HALT_STATUS1);
4358
4359 if (is_qla8022(ha)) {
4360 ql4_printk(KERN_INFO, ha, "%s: disabling pause transmit on port 0 & 1.\n",
4361 __func__);
4362 qla4_82xx_wr_32(ha, QLA82XX_CRB_NIU + 0x98,
4363 CRB_NIU_XG_PAUSE_CTL_P0 |
4364 CRB_NIU_XG_PAUSE_CTL_P1);
4365
4366 if (QLA82XX_FWERROR_CODE(halt_status) == 0x67)
4367 ql4_printk(KERN_ERR, ha, "%s: Firmware aborted with error code 0x00006700. Device is being reset\n",
4368 __func__);
4369 if (halt_status & HALT_STATUS_UNRECOVERABLE)
4370 halt_status_unrecoverable = 1;
4371 } else if (is_qla8032(ha) || is_qla8042(ha)) {
4372 if (halt_status & QLA83XX_HALT_STATUS_FW_RESET)
4373 ql4_printk(KERN_ERR, ha, "%s: Firmware error detected device is being reset\n",
4374 __func__);
4375 else if (halt_status & QLA83XX_HALT_STATUS_UNRECOVERABLE)
4376 halt_status_unrecoverable = 1;
4377 }
4378
4379 /*
4380 * Since we cannot change dev_state in interrupt context,
4381 * set appropriate DPC flag then wakeup DPC
4382 */
4383 if (halt_status_unrecoverable) {
4384 set_bit(DPC_HA_UNRECOVERABLE, &ha->dpc_flags);
4385 } else {
4386 ql4_printk(KERN_INFO, ha, "%s: detect abort needed!\n",
4387 __func__);
4388 set_bit(DPC_RESET_HA, &ha->dpc_flags);
4389 }
4390 qla4xxx_mailbox_premature_completion(ha);
4391 qla4xxx_wake_dpc(ha);
4392 }
4393
4394 /**
4395 * qla4_8xxx_watchdog - Poll dev state
4396 * @ha: Pointer to host adapter structure.
4397 *
4398 * Context: Interrupt
4399 **/
4400 void qla4_8xxx_watchdog(struct scsi_qla_host *ha)
4401 {
4402 uint32_t dev_state;
4403 uint32_t idc_ctrl;
4404
4405 if (is_qla8032(ha) &&
4406 (qla4_83xx_is_detached(ha) == QLA_SUCCESS))
4407 WARN_ONCE(1, "%s: iSCSI function %d marked invisible\n",
4408 __func__, ha->func_num);
4409
4410 /* don't poll if reset is going on */
4411 if (!(test_bit(DPC_RESET_ACTIVE, &ha->dpc_flags) ||
4412 test_bit(DPC_RESET_HA, &ha->dpc_flags) ||
4413 test_bit(DPC_RETRY_RESET_HA, &ha->dpc_flags))) {
4414 dev_state = qla4_8xxx_rd_direct(ha, QLA8XXX_CRB_DEV_STATE);
4415
4416 if (qla4_8xxx_check_temp(ha)) {
4417 if (is_qla8022(ha)) {
4418 ql4_printk(KERN_INFO, ha, "disabling pause transmit on port 0 & 1.\n");
4419 qla4_82xx_wr_32(ha, QLA82XX_CRB_NIU + 0x98,
4420 CRB_NIU_XG_PAUSE_CTL_P0 |
4421 CRB_NIU_XG_PAUSE_CTL_P1);
4422 }
4423 set_bit(DPC_HA_UNRECOVERABLE, &ha->dpc_flags);
4424 qla4xxx_wake_dpc(ha);
4425 } else if (dev_state == QLA8XXX_DEV_NEED_RESET &&
4426 !test_bit(DPC_RESET_HA, &ha->dpc_flags)) {
4427
4428 ql4_printk(KERN_INFO, ha, "%s: HW State: NEED RESET!\n",
4429 __func__);
4430
4431 if (is_qla8032(ha) || is_qla8042(ha)) {
4432 idc_ctrl = qla4_83xx_rd_reg(ha,
4433 QLA83XX_IDC_DRV_CTRL);
4434 if (!(idc_ctrl & GRACEFUL_RESET_BIT1)) {
4435 ql4_printk(KERN_INFO, ha, "%s: Graceful reset bit is not set\n",
4436 __func__);
4437 qla4xxx_mailbox_premature_completion(
4438 ha);
4439 }
4440 }
4441
4442 if ((is_qla8032(ha) || is_qla8042(ha)) ||
4443 (is_qla8022(ha) && !ql4xdontresethba)) {
4444 set_bit(DPC_RESET_HA, &ha->dpc_flags);
4445 qla4xxx_wake_dpc(ha);
4446 }
4447 } else if (dev_state == QLA8XXX_DEV_NEED_QUIESCENT &&
4448 !test_bit(DPC_HA_NEED_QUIESCENT, &ha->dpc_flags)) {
4449 ql4_printk(KERN_INFO, ha, "%s: HW State: NEED QUIES!\n",
4450 __func__);
4451 set_bit(DPC_HA_NEED_QUIESCENT, &ha->dpc_flags);
4452 qla4xxx_wake_dpc(ha);
4453 } else {
4454 /* Check firmware health */
4455 if (qla4_8xxx_check_fw_alive(ha))
4456 qla4_8xxx_process_fw_error(ha);
4457 }
4458 }
4459 }
4460
4461 static void qla4xxx_check_relogin_flash_ddb(struct iscsi_cls_session *cls_sess)
4462 {
4463 struct iscsi_session *sess;
4464 struct ddb_entry *ddb_entry;
4465 struct scsi_qla_host *ha;
4466
4467 sess = cls_sess->dd_data;
4468 ddb_entry = sess->dd_data;
4469 ha = ddb_entry->ha;
4470
4471 if (!(ddb_entry->ddb_type == FLASH_DDB))
4472 return;
4473
4474 if (adapter_up(ha) && !test_bit(DF_RELOGIN, &ddb_entry->flags) &&
4475 !iscsi_is_session_online(cls_sess)) {
4476 if (atomic_read(&ddb_entry->retry_relogin_timer) !=
4477 INVALID_ENTRY) {
4478 if (atomic_read(&ddb_entry->retry_relogin_timer) ==
4479 0) {
4480 atomic_set(&ddb_entry->retry_relogin_timer,
4481 INVALID_ENTRY);
4482 set_bit(DPC_RELOGIN_DEVICE, &ha->dpc_flags);
4483 set_bit(DF_RELOGIN, &ddb_entry->flags);
4484 DEBUG2(ql4_printk(KERN_INFO, ha,
4485 "%s: index [%d] login device\n",
4486 __func__, ddb_entry->fw_ddb_index));
4487 } else
4488 atomic_dec(&ddb_entry->retry_relogin_timer);
4489 }
4490 }
4491
4492 /* Wait for relogin to timeout */
4493 if (atomic_read(&ddb_entry->relogin_timer) &&
4494 (atomic_dec_and_test(&ddb_entry->relogin_timer) != 0)) {
4495 /*
4496 * If the relogin times out and the device is
4497 * still NOT ONLINE then try and relogin again.
4498 */
4499 if (!iscsi_is_session_online(cls_sess)) {
4500 /* Reset retry relogin timer */
4501 atomic_inc(&ddb_entry->relogin_retry_count);
4502 DEBUG2(ql4_printk(KERN_INFO, ha,
4503 "%s: index[%d] relogin timed out-retrying"
4504 " relogin (%d), retry (%d)\n", __func__,
4505 ddb_entry->fw_ddb_index,
4506 atomic_read(&ddb_entry->relogin_retry_count),
4507 ddb_entry->default_time2wait + 4));
4508 set_bit(DPC_RELOGIN_DEVICE, &ha->dpc_flags);
4509 atomic_set(&ddb_entry->retry_relogin_timer,
4510 ddb_entry->default_time2wait + 4);
4511 }
4512 }
4513 }
4514
4515 /**
4516 * qla4xxx_timer - checks every second for work to do.
4517 * @ha: Pointer to host adapter structure.
4518 **/
4519 static void qla4xxx_timer(struct timer_list *t)
4520 {
4521 struct scsi_qla_host *ha = from_timer(ha, t, timer);
4522 int start_dpc = 0;
4523 uint16_t w;
4524
4525 iscsi_host_for_each_session(ha->host, qla4xxx_check_relogin_flash_ddb);
4526
4527 /* If we are in the middle of AER/EEH processing
4528 * skip any processing and reschedule the timer
4529 */
4530 if (test_bit(AF_EEH_BUSY, &ha->flags)) {
4531 mod_timer(&ha->timer, jiffies + HZ);
4532 return;
4533 }
4534
4535 /* Hardware read to trigger an EEH error during mailbox waits. */
4536 if (!pci_channel_offline(ha->pdev))
4537 pci_read_config_word(ha->pdev, PCI_VENDOR_ID, &w);
4538
4539 if (is_qla80XX(ha))
4540 qla4_8xxx_watchdog(ha);
4541
4542 if (is_qla40XX(ha)) {
4543 /* Check for heartbeat interval. */
4544 if (ha->firmware_options & FWOPT_HEARTBEAT_ENABLE &&
4545 ha->heartbeat_interval != 0) {
4546 ha->seconds_since_last_heartbeat++;
4547 if (ha->seconds_since_last_heartbeat >
4548 ha->heartbeat_interval + 2)
4549 set_bit(DPC_RESET_HA, &ha->dpc_flags);
4550 }
4551 }
4552
4553 /* Process any deferred work. */
4554 if (!list_empty(&ha->work_list))
4555 start_dpc++;
4556
4557 /* Wakeup the dpc routine for this adapter, if needed. */
4558 if (start_dpc ||
4559 test_bit(DPC_RESET_HA, &ha->dpc_flags) ||
4560 test_bit(DPC_RETRY_RESET_HA, &ha->dpc_flags) ||
4561 test_bit(DPC_RELOGIN_DEVICE, &ha->dpc_flags) ||
4562 test_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags) ||
4563 test_bit(DPC_RESET_HA_INTR, &ha->dpc_flags) ||
4564 test_bit(DPC_GET_DHCP_IP_ADDR, &ha->dpc_flags) ||
4565 test_bit(DPC_LINK_CHANGED, &ha->dpc_flags) ||
4566 test_bit(DPC_HA_UNRECOVERABLE, &ha->dpc_flags) ||
4567 test_bit(DPC_HA_NEED_QUIESCENT, &ha->dpc_flags) ||
4568 test_bit(DPC_SYSFS_DDB_EXPORT, &ha->dpc_flags) ||
4569 test_bit(DPC_AEN, &ha->dpc_flags)) {
4570 DEBUG2(printk("scsi%ld: %s: scheduling dpc routine"
4571 " - dpc flags = 0x%lx\n",
4572 ha->host_no, __func__, ha->dpc_flags));
4573 qla4xxx_wake_dpc(ha);
4574 }
4575
4576 /* Reschedule timer thread to call us back in one second */
4577 mod_timer(&ha->timer, jiffies + HZ);
4578
4579 DEBUG2(ha->seconds_since_last_intr++);
4580 }
4581
4582 /**
4583 * qla4xxx_cmd_wait - waits for all outstanding commands to complete
4584 * @ha: Pointer to host adapter structure.
4585 *
4586 * This routine stalls the driver until all outstanding commands are returned.
4587 * Caller must release the Hardware Lock prior to calling this routine.
4588 **/
4589 static int qla4xxx_cmd_wait(struct scsi_qla_host *ha)
4590 {
4591 uint32_t index = 0;
4592 unsigned long flags;
4593 struct scsi_cmnd *cmd;
4594 unsigned long wtime;
4595 uint32_t wtmo;
4596
4597 if (is_qla40XX(ha))
4598 wtmo = WAIT_CMD_TOV;
4599 else
4600 wtmo = ha->nx_reset_timeout / 2;
4601
4602 wtime = jiffies + (wtmo * HZ);
4603
4604 DEBUG2(ql4_printk(KERN_INFO, ha,
4605 "Wait up to %u seconds for cmds to complete\n",
4606 wtmo));
4607
4608 while (!time_after_eq(jiffies, wtime)) {
4609 spin_lock_irqsave(&ha->hardware_lock, flags);
4610 /* Find a command that hasn't completed. */
4611 for (index = 0; index < ha->host->can_queue; index++) {
4612 cmd = scsi_host_find_tag(ha->host, index);
4613 /*
4614 * We cannot just check if the index is valid,
4615 * becase if we are run from the scsi eh, then
4616 * the scsi/block layer is going to prevent
4617 * the tag from being released.
4618 */
4619 if (cmd != NULL && CMD_SP(cmd))
4620 break;
4621 }
4622 spin_unlock_irqrestore(&ha->hardware_lock, flags);
4623
4624 /* If No Commands are pending, wait is complete */
4625 if (index == ha->host->can_queue)
4626 return QLA_SUCCESS;
4627
4628 msleep(1000);
4629 }
4630 /* If we timed out on waiting for commands to come back
4631 * return ERROR. */
4632 return QLA_ERROR;
4633 }
4634
4635 int qla4xxx_hw_reset(struct scsi_qla_host *ha)
4636 {
4637 uint32_t ctrl_status;
4638 unsigned long flags = 0;
4639
4640 DEBUG2(printk(KERN_ERR "scsi%ld: %s\n", ha->host_no, __func__));
4641
4642 if (ql4xxx_lock_drvr_wait(ha) != QLA_SUCCESS)
4643 return QLA_ERROR;
4644
4645 spin_lock_irqsave(&ha->hardware_lock, flags);
4646
4647 /*
4648 * If the SCSI Reset Interrupt bit is set, clear it.
4649 * Otherwise, the Soft Reset won't work.
4650 */
4651 ctrl_status = readw(&ha->reg->ctrl_status);
4652 if ((ctrl_status & CSR_SCSI_RESET_INTR) != 0)
4653 writel(set_rmask(CSR_SCSI_RESET_INTR), &ha->reg->ctrl_status);
4654
4655 /* Issue Soft Reset */
4656 writel(set_rmask(CSR_SOFT_RESET), &ha->reg->ctrl_status);
4657 readl(&ha->reg->ctrl_status);
4658
4659 spin_unlock_irqrestore(&ha->hardware_lock, flags);
4660 return QLA_SUCCESS;
4661 }
4662
4663 /**
4664 * qla4xxx_soft_reset - performs soft reset.
4665 * @ha: Pointer to host adapter structure.
4666 **/
4667 int qla4xxx_soft_reset(struct scsi_qla_host *ha)
4668 {
4669 uint32_t max_wait_time;
4670 unsigned long flags = 0;
4671 int status;
4672 uint32_t ctrl_status;
4673
4674 status = qla4xxx_hw_reset(ha);
4675 if (status != QLA_SUCCESS)
4676 return status;
4677
4678 status = QLA_ERROR;
4679 /* Wait until the Network Reset Intr bit is cleared */
4680 max_wait_time = RESET_INTR_TOV;
4681 do {
4682 spin_lock_irqsave(&ha->hardware_lock, flags);
4683 ctrl_status = readw(&ha->reg->ctrl_status);
4684 spin_unlock_irqrestore(&ha->hardware_lock, flags);
4685
4686 if ((ctrl_status & CSR_NET_RESET_INTR) == 0)
4687 break;
4688
4689 msleep(1000);
4690 } while ((--max_wait_time));
4691
4692 if ((ctrl_status & CSR_NET_RESET_INTR) != 0) {
4693 DEBUG2(printk(KERN_WARNING
4694 "scsi%ld: Network Reset Intr not cleared by "
4695 "Network function, clearing it now!\n",
4696 ha->host_no));
4697 spin_lock_irqsave(&ha->hardware_lock, flags);
4698 writel(set_rmask(CSR_NET_RESET_INTR), &ha->reg->ctrl_status);
4699 readl(&ha->reg->ctrl_status);
4700 spin_unlock_irqrestore(&ha->hardware_lock, flags);
4701 }
4702
4703 /* Wait until the firmware tells us the Soft Reset is done */
4704 max_wait_time = SOFT_RESET_TOV;
4705 do {
4706 spin_lock_irqsave(&ha->hardware_lock, flags);
4707 ctrl_status = readw(&ha->reg->ctrl_status);
4708 spin_unlock_irqrestore(&ha->hardware_lock, flags);
4709
4710 if ((ctrl_status & CSR_SOFT_RESET) == 0) {
4711 status = QLA_SUCCESS;
4712 break;
4713 }
4714
4715 msleep(1000);
4716 } while ((--max_wait_time));
4717
4718 /*
4719 * Also, make sure that the SCSI Reset Interrupt bit has been cleared
4720 * after the soft reset has taken place.
4721 */
4722 spin_lock_irqsave(&ha->hardware_lock, flags);
4723 ctrl_status = readw(&ha->reg->ctrl_status);
4724 if ((ctrl_status & CSR_SCSI_RESET_INTR) != 0) {
4725 writel(set_rmask(CSR_SCSI_RESET_INTR), &ha->reg->ctrl_status);
4726 readl(&ha->reg->ctrl_status);
4727 }
4728 spin_unlock_irqrestore(&ha->hardware_lock, flags);
4729
4730 /* If soft reset fails then most probably the bios on other
4731 * function is also enabled.
4732 * Since the initialization is sequential the other fn
4733 * wont be able to acknowledge the soft reset.
4734 * Issue a force soft reset to workaround this scenario.
4735 */
4736 if (max_wait_time == 0) {
4737 /* Issue Force Soft Reset */
4738 spin_lock_irqsave(&ha->hardware_lock, flags);
4739 writel(set_rmask(CSR_FORCE_SOFT_RESET), &ha->reg->ctrl_status);
4740 readl(&ha->reg->ctrl_status);
4741 spin_unlock_irqrestore(&ha->hardware_lock, flags);
4742 /* Wait until the firmware tells us the Soft Reset is done */
4743 max_wait_time = SOFT_RESET_TOV;
4744 do {
4745 spin_lock_irqsave(&ha->hardware_lock, flags);
4746 ctrl_status = readw(&ha->reg->ctrl_status);
4747 spin_unlock_irqrestore(&ha->hardware_lock, flags);
4748
4749 if ((ctrl_status & CSR_FORCE_SOFT_RESET) == 0) {
4750 status = QLA_SUCCESS;
4751 break;
4752 }
4753
4754 msleep(1000);
4755 } while ((--max_wait_time));
4756 }
4757
4758 return status;
4759 }
4760
4761 /**
4762 * qla4xxx_abort_active_cmds - returns all outstanding i/o requests to O.S.
4763 * @ha: Pointer to host adapter structure.
4764 * @res: returned scsi status
4765 *
4766 * This routine is called just prior to a HARD RESET to return all
4767 * outstanding commands back to the Operating System.
4768 * Caller should make sure that the following locks are released
4769 * before this calling routine: Hardware lock, and io_request_lock.
4770 **/
4771 static void qla4xxx_abort_active_cmds(struct scsi_qla_host *ha, int res)
4772 {
4773 struct srb *srb;
4774 int i;
4775 unsigned long flags;
4776
4777 spin_lock_irqsave(&ha->hardware_lock, flags);
4778 for (i = 0; i < ha->host->can_queue; i++) {
4779 srb = qla4xxx_del_from_active_array(ha, i);
4780 if (srb != NULL) {
4781 srb->cmd->result = res;
4782 kref_put(&srb->srb_ref, qla4xxx_srb_compl);
4783 }
4784 }
4785 spin_unlock_irqrestore(&ha->hardware_lock, flags);
4786 }
4787
4788 void qla4xxx_dead_adapter_cleanup(struct scsi_qla_host *ha)
4789 {
4790 clear_bit(AF_ONLINE, &ha->flags);
4791
4792 /* Disable the board */
4793 ql4_printk(KERN_INFO, ha, "Disabling the board\n");
4794
4795 qla4xxx_abort_active_cmds(ha, DID_NO_CONNECT << 16);
4796 qla4xxx_mark_all_devices_missing(ha);
4797 clear_bit(AF_INIT_DONE, &ha->flags);
4798 }
4799
4800 static void qla4xxx_fail_session(struct iscsi_cls_session *cls_session)
4801 {
4802 struct iscsi_session *sess;
4803 struct ddb_entry *ddb_entry;
4804
4805 sess = cls_session->dd_data;
4806 ddb_entry = sess->dd_data;
4807 ddb_entry->fw_ddb_device_state = DDB_DS_SESSION_FAILED;
4808
4809 if (ddb_entry->ddb_type == FLASH_DDB)
4810 iscsi_block_session(ddb_entry->sess);
4811 else
4812 iscsi_session_failure(cls_session->dd_data,
4813 ISCSI_ERR_CONN_FAILED);
4814 }
4815
4816 /**
4817 * qla4xxx_recover_adapter - recovers adapter after a fatal error
4818 * @ha: Pointer to host adapter structure.
4819 **/
4820 static int qla4xxx_recover_adapter(struct scsi_qla_host *ha)
4821 {
4822 int status = QLA_ERROR;
4823 uint8_t reset_chip = 0;
4824 uint32_t dev_state;
4825 unsigned long wait;
4826
4827 /* Stall incoming I/O until we are done */
4828 scsi_block_requests(ha->host);
4829 clear_bit(AF_ONLINE, &ha->flags);
4830 clear_bit(AF_LINK_UP, &ha->flags);
4831
4832 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: adapter OFFLINE\n", __func__));
4833
4834 set_bit(DPC_RESET_ACTIVE, &ha->dpc_flags);
4835
4836 if ((is_qla8032(ha) || is_qla8042(ha)) &&
4837 !test_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags)) {
4838 ql4_printk(KERN_INFO, ha, "%s: disabling pause transmit on port 0 & 1.\n",
4839 __func__);
4840 /* disable pause frame for ISP83xx */
4841 qla4_83xx_disable_pause(ha);
4842 }
4843
4844 iscsi_host_for_each_session(ha->host, qla4xxx_fail_session);
4845
4846 if (test_bit(DPC_RESET_HA, &ha->dpc_flags))
4847 reset_chip = 1;
4848
4849 /* For the DPC_RESET_HA_INTR case (ISP-4xxx specific)
4850 * do not reset adapter, jump to initialize_adapter */
4851 if (test_bit(DPC_RESET_HA_INTR, &ha->dpc_flags)) {
4852 status = QLA_SUCCESS;
4853 goto recover_ha_init_adapter;
4854 }
4855
4856 /* For the ISP-8xxx adapter, issue a stop_firmware if invoked
4857 * from eh_host_reset or ioctl module */
4858 if (is_qla80XX(ha) && !reset_chip &&
4859 test_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags)) {
4860
4861 DEBUG2(ql4_printk(KERN_INFO, ha,
4862 "scsi%ld: %s - Performing stop_firmware...\n",
4863 ha->host_no, __func__));
4864 status = ha->isp_ops->reset_firmware(ha);
4865 if (status == QLA_SUCCESS) {
4866 ha->isp_ops->disable_intrs(ha);
4867 qla4xxx_process_aen(ha, FLUSH_DDB_CHANGED_AENS);
4868 qla4xxx_abort_active_cmds(ha, DID_RESET << 16);
4869 } else {
4870 /* If the stop_firmware fails then
4871 * reset the entire chip */
4872 reset_chip = 1;
4873 clear_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags);
4874 set_bit(DPC_RESET_HA, &ha->dpc_flags);
4875 }
4876 }
4877
4878 /* Issue full chip reset if recovering from a catastrophic error,
4879 * or if stop_firmware fails for ISP-8xxx.
4880 * This is the default case for ISP-4xxx */
4881 if (is_qla40XX(ha) || reset_chip) {
4882 if (is_qla40XX(ha))
4883 goto chip_reset;
4884
4885 /* Check if 8XXX firmware is alive or not
4886 * We may have arrived here from NEED_RESET
4887 * detection only */
4888 if (test_bit(AF_FW_RECOVERY, &ha->flags))
4889 goto chip_reset;
4890
4891 wait = jiffies + (FW_ALIVE_WAIT_TOV * HZ);
4892 while (time_before(jiffies, wait)) {
4893 if (qla4_8xxx_check_fw_alive(ha)) {
4894 qla4xxx_mailbox_premature_completion(ha);
4895 break;
4896 }
4897
4898 set_current_state(TASK_UNINTERRUPTIBLE);
4899 schedule_timeout(HZ);
4900 }
4901 chip_reset:
4902 if (!test_bit(AF_FW_RECOVERY, &ha->flags))
4903 qla4xxx_cmd_wait(ha);
4904
4905 qla4xxx_process_aen(ha, FLUSH_DDB_CHANGED_AENS);
4906 DEBUG2(ql4_printk(KERN_INFO, ha,
4907 "scsi%ld: %s - Performing chip reset..\n",
4908 ha->host_no, __func__));
4909 status = ha->isp_ops->reset_chip(ha);
4910 qla4xxx_abort_active_cmds(ha, DID_RESET << 16);
4911 }
4912
4913 /* Flush any pending ddb changed AENs */
4914 qla4xxx_process_aen(ha, FLUSH_DDB_CHANGED_AENS);
4915
4916 recover_ha_init_adapter:
4917 /* Upon successful firmware/chip reset, re-initialize the adapter */
4918 if (status == QLA_SUCCESS) {
4919 /* For ISP-4xxx, force function 1 to always initialize
4920 * before function 3 to prevent both funcions from
4921 * stepping on top of the other */
4922 if (is_qla40XX(ha) && (ha->mac_index == 3))
4923 ssleep(6);
4924
4925 /* NOTE: AF_ONLINE flag set upon successful completion of
4926 * qla4xxx_initialize_adapter */
4927 status = qla4xxx_initialize_adapter(ha, RESET_ADAPTER);
4928 if (is_qla80XX(ha) && (status == QLA_ERROR)) {
4929 status = qla4_8xxx_check_init_adapter_retry(ha);
4930 if (status == QLA_ERROR) {
4931 ql4_printk(KERN_INFO, ha, "scsi%ld: %s: Don't retry recover adapter\n",
4932 ha->host_no, __func__);
4933 qla4xxx_dead_adapter_cleanup(ha);
4934 clear_bit(DPC_RETRY_RESET_HA, &ha->dpc_flags);
4935 clear_bit(DPC_RESET_HA, &ha->dpc_flags);
4936 clear_bit(DPC_RESET_HA_FW_CONTEXT,
4937 &ha->dpc_flags);
4938 goto exit_recover;
4939 }
4940 }
4941 }
4942
4943 /* Retry failed adapter initialization, if necessary
4944 * Do not retry initialize_adapter for RESET_HA_INTR (ISP-4xxx specific)
4945 * case to prevent ping-pong resets between functions */
4946 if (!test_bit(AF_ONLINE, &ha->flags) &&
4947 !test_bit(DPC_RESET_HA_INTR, &ha->dpc_flags)) {
4948 /* Adapter initialization failed, see if we can retry
4949 * resetting the ha.
4950 * Since we don't want to block the DPC for too long
4951 * with multiple resets in the same thread,
4952 * utilize DPC to retry */
4953 if (is_qla80XX(ha)) {
4954 ha->isp_ops->idc_lock(ha);
4955 dev_state = qla4_8xxx_rd_direct(ha,
4956 QLA8XXX_CRB_DEV_STATE);
4957 ha->isp_ops->idc_unlock(ha);
4958 if (dev_state == QLA8XXX_DEV_FAILED) {
4959 ql4_printk(KERN_INFO, ha, "%s: don't retry "
4960 "recover adapter. H/W is in Failed "
4961 "state\n", __func__);
4962 qla4xxx_dead_adapter_cleanup(ha);
4963 clear_bit(DPC_RETRY_RESET_HA, &ha->dpc_flags);
4964 clear_bit(DPC_RESET_HA, &ha->dpc_flags);
4965 clear_bit(DPC_RESET_HA_FW_CONTEXT,
4966 &ha->dpc_flags);
4967 status = QLA_ERROR;
4968
4969 goto exit_recover;
4970 }
4971 }
4972
4973 if (!test_bit(DPC_RETRY_RESET_HA, &ha->dpc_flags)) {
4974 ha->retry_reset_ha_cnt = MAX_RESET_HA_RETRIES;
4975 DEBUG2(printk("scsi%ld: recover adapter - retrying "
4976 "(%d) more times\n", ha->host_no,
4977 ha->retry_reset_ha_cnt));
4978 set_bit(DPC_RETRY_RESET_HA, &ha->dpc_flags);
4979 status = QLA_ERROR;
4980 } else {
4981 if (ha->retry_reset_ha_cnt > 0) {
4982 /* Schedule another Reset HA--DPC will retry */
4983 ha->retry_reset_ha_cnt--;
4984 DEBUG2(printk("scsi%ld: recover adapter - "
4985 "retry remaining %d\n",
4986 ha->host_no,
4987 ha->retry_reset_ha_cnt));
4988 status = QLA_ERROR;
4989 }
4990
4991 if (ha->retry_reset_ha_cnt == 0) {
4992 /* Recover adapter retries have been exhausted.
4993 * Adapter DEAD */
4994 DEBUG2(printk("scsi%ld: recover adapter "
4995 "failed - board disabled\n",
4996 ha->host_no));
4997 qla4xxx_dead_adapter_cleanup(ha);
4998 clear_bit(DPC_RETRY_RESET_HA, &ha->dpc_flags);
4999 clear_bit(DPC_RESET_HA, &ha->dpc_flags);
5000 clear_bit(DPC_RESET_HA_FW_CONTEXT,
5001 &ha->dpc_flags);
5002 status = QLA_ERROR;
5003 }
5004 }
5005 } else {
5006 clear_bit(DPC_RESET_HA, &ha->dpc_flags);
5007 clear_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags);
5008 clear_bit(DPC_RETRY_RESET_HA, &ha->dpc_flags);
5009 }
5010
5011 exit_recover:
5012 ha->adapter_error_count++;
5013
5014 if (test_bit(AF_ONLINE, &ha->flags))
5015 ha->isp_ops->enable_intrs(ha);
5016
5017 scsi_unblock_requests(ha->host);
5018
5019 clear_bit(DPC_RESET_ACTIVE, &ha->dpc_flags);
5020 DEBUG2(printk("scsi%ld: recover adapter: %s\n", ha->host_no,
5021 status == QLA_ERROR ? "FAILED" : "SUCCEEDED"));
5022
5023 return status;
5024 }
5025
5026 static void qla4xxx_relogin_devices(struct iscsi_cls_session *cls_session)
5027 {
5028 struct iscsi_session *sess;
5029 struct ddb_entry *ddb_entry;
5030 struct scsi_qla_host *ha;
5031
5032 sess = cls_session->dd_data;
5033 ddb_entry = sess->dd_data;
5034 ha = ddb_entry->ha;
5035 if (!iscsi_is_session_online(cls_session)) {
5036 if (ddb_entry->fw_ddb_device_state == DDB_DS_SESSION_ACTIVE) {
5037 ql4_printk(KERN_INFO, ha, "scsi%ld: %s: ddb[%d]"
5038 " unblock session\n", ha->host_no, __func__,
5039 ddb_entry->fw_ddb_index);
5040 iscsi_unblock_session(ddb_entry->sess);
5041 } else {
5042 /* Trigger relogin */
5043 if (ddb_entry->ddb_type == FLASH_DDB) {
5044 if (!(test_bit(DF_RELOGIN, &ddb_entry->flags) ||
5045 test_bit(DF_DISABLE_RELOGIN,
5046 &ddb_entry->flags)))
5047 qla4xxx_arm_relogin_timer(ddb_entry);
5048 } else
5049 iscsi_session_failure(cls_session->dd_data,
5050 ISCSI_ERR_CONN_FAILED);
5051 }
5052 }
5053 }
5054
5055 int qla4xxx_unblock_flash_ddb(struct iscsi_cls_session *cls_session)
5056 {
5057 struct iscsi_session *sess;
5058 struct ddb_entry *ddb_entry;
5059 struct scsi_qla_host *ha;
5060
5061 sess = cls_session->dd_data;
5062 ddb_entry = sess->dd_data;
5063 ha = ddb_entry->ha;
5064 ql4_printk(KERN_INFO, ha, "scsi%ld: %s: ddb[%d]"
5065 " unblock session\n", ha->host_no, __func__,
5066 ddb_entry->fw_ddb_index);
5067
5068 iscsi_unblock_session(ddb_entry->sess);
5069
5070 /* Start scan target */
5071 if (test_bit(AF_ONLINE, &ha->flags)) {
5072 ql4_printk(KERN_INFO, ha, "scsi%ld: %s: ddb[%d]"
5073 " start scan\n", ha->host_no, __func__,
5074 ddb_entry->fw_ddb_index);
5075 scsi_queue_work(ha->host, &ddb_entry->sess->scan_work);
5076 }
5077 return QLA_SUCCESS;
5078 }
5079
5080 int qla4xxx_unblock_ddb(struct iscsi_cls_session *cls_session)
5081 {
5082 struct iscsi_session *sess;
5083 struct ddb_entry *ddb_entry;
5084 struct scsi_qla_host *ha;
5085 int status = QLA_SUCCESS;
5086
5087 sess = cls_session->dd_data;
5088 ddb_entry = sess->dd_data;
5089 ha = ddb_entry->ha;
5090 ql4_printk(KERN_INFO, ha, "scsi%ld: %s: ddb[%d]"
5091 " unblock user space session\n", ha->host_no, __func__,
5092 ddb_entry->fw_ddb_index);
5093
5094 if (!iscsi_is_session_online(cls_session)) {
5095 iscsi_conn_start(ddb_entry->conn);
5096 iscsi_conn_login_event(ddb_entry->conn,
5097 ISCSI_CONN_STATE_LOGGED_IN);
5098 } else {
5099 ql4_printk(KERN_INFO, ha,
5100 "scsi%ld: %s: ddb[%d] session [%d] already logged in\n",
5101 ha->host_no, __func__, ddb_entry->fw_ddb_index,
5102 cls_session->sid);
5103 status = QLA_ERROR;
5104 }
5105
5106 return status;
5107 }
5108
5109 static void qla4xxx_relogin_all_devices(struct scsi_qla_host *ha)
5110 {
5111 iscsi_host_for_each_session(ha->host, qla4xxx_relogin_devices);
5112 }
5113
5114 static void qla4xxx_relogin_flash_ddb(struct iscsi_cls_session *cls_sess)
5115 {
5116 uint16_t relogin_timer;
5117 struct iscsi_session *sess;
5118 struct ddb_entry *ddb_entry;
5119 struct scsi_qla_host *ha;
5120
5121 sess = cls_sess->dd_data;
5122 ddb_entry = sess->dd_data;
5123 ha = ddb_entry->ha;
5124
5125 relogin_timer = max(ddb_entry->default_relogin_timeout,
5126 (uint16_t)RELOGIN_TOV);
5127 atomic_set(&ddb_entry->relogin_timer, relogin_timer);
5128
5129 DEBUG2(ql4_printk(KERN_INFO, ha,
5130 "scsi%ld: Relogin index [%d]. TOV=%d\n", ha->host_no,
5131 ddb_entry->fw_ddb_index, relogin_timer));
5132
5133 qla4xxx_login_flash_ddb(cls_sess);
5134 }
5135
5136 static void qla4xxx_dpc_relogin(struct iscsi_cls_session *cls_sess)
5137 {
5138 struct iscsi_session *sess;
5139 struct ddb_entry *ddb_entry;
5140 struct scsi_qla_host *ha;
5141
5142 sess = cls_sess->dd_data;
5143 ddb_entry = sess->dd_data;
5144 ha = ddb_entry->ha;
5145
5146 if (!(ddb_entry->ddb_type == FLASH_DDB))
5147 return;
5148
5149 if (test_bit(DF_DISABLE_RELOGIN, &ddb_entry->flags))
5150 return;
5151
5152 if (test_and_clear_bit(DF_RELOGIN, &ddb_entry->flags) &&
5153 !iscsi_is_session_online(cls_sess)) {
5154 DEBUG2(ql4_printk(KERN_INFO, ha,
5155 "relogin issued\n"));
5156 qla4xxx_relogin_flash_ddb(cls_sess);
5157 }
5158 }
5159
5160 void qla4xxx_wake_dpc(struct scsi_qla_host *ha)
5161 {
5162 if (ha->dpc_thread)
5163 queue_work(ha->dpc_thread, &ha->dpc_work);
5164 }
5165
5166 static struct qla4_work_evt *
5167 qla4xxx_alloc_work(struct scsi_qla_host *ha, uint32_t data_size,
5168 enum qla4_work_type type)
5169 {
5170 struct qla4_work_evt *e;
5171 uint32_t size = sizeof(struct qla4_work_evt) + data_size;
5172
5173 e = kzalloc(size, GFP_ATOMIC);
5174 if (!e)
5175 return NULL;
5176
5177 INIT_LIST_HEAD(&e->list);
5178 e->type = type;
5179 return e;
5180 }
5181
5182 static void qla4xxx_post_work(struct scsi_qla_host *ha,
5183 struct qla4_work_evt *e)
5184 {
5185 unsigned long flags;
5186
5187 spin_lock_irqsave(&ha->work_lock, flags);
5188 list_add_tail(&e->list, &ha->work_list);
5189 spin_unlock_irqrestore(&ha->work_lock, flags);
5190 qla4xxx_wake_dpc(ha);
5191 }
5192
5193 int qla4xxx_post_aen_work(struct scsi_qla_host *ha,
5194 enum iscsi_host_event_code aen_code,
5195 uint32_t data_size, uint8_t *data)
5196 {
5197 struct qla4_work_evt *e;
5198
5199 e = qla4xxx_alloc_work(ha, data_size, QLA4_EVENT_AEN);
5200 if (!e)
5201 return QLA_ERROR;
5202
5203 e->u.aen.code = aen_code;
5204 e->u.aen.data_size = data_size;
5205 memcpy(e->u.aen.data, data, data_size);
5206
5207 qla4xxx_post_work(ha, e);
5208
5209 return QLA_SUCCESS;
5210 }
5211
5212 int qla4xxx_post_ping_evt_work(struct scsi_qla_host *ha,
5213 uint32_t status, uint32_t pid,
5214 uint32_t data_size, uint8_t *data)
5215 {
5216 struct qla4_work_evt *e;
5217
5218 e = qla4xxx_alloc_work(ha, data_size, QLA4_EVENT_PING_STATUS);
5219 if (!e)
5220 return QLA_ERROR;
5221
5222 e->u.ping.status = status;
5223 e->u.ping.pid = pid;
5224 e->u.ping.data_size = data_size;
5225 memcpy(e->u.ping.data, data, data_size);
5226
5227 qla4xxx_post_work(ha, e);
5228
5229 return QLA_SUCCESS;
5230 }
5231
5232 static void qla4xxx_do_work(struct scsi_qla_host *ha)
5233 {
5234 struct qla4_work_evt *e, *tmp;
5235 unsigned long flags;
5236 LIST_HEAD(work);
5237
5238 spin_lock_irqsave(&ha->work_lock, flags);
5239 list_splice_init(&ha->work_list, &work);
5240 spin_unlock_irqrestore(&ha->work_lock, flags);
5241
5242 list_for_each_entry_safe(e, tmp, &work, list) {
5243 list_del_init(&e->list);
5244
5245 switch (e->type) {
5246 case QLA4_EVENT_AEN:
5247 iscsi_post_host_event(ha->host_no,
5248 &qla4xxx_iscsi_transport,
5249 e->u.aen.code,
5250 e->u.aen.data_size,
5251 e->u.aen.data);
5252 break;
5253 case QLA4_EVENT_PING_STATUS:
5254 iscsi_ping_comp_event(ha->host_no,
5255 &qla4xxx_iscsi_transport,
5256 e->u.ping.status,
5257 e->u.ping.pid,
5258 e->u.ping.data_size,
5259 e->u.ping.data);
5260 break;
5261 default:
5262 ql4_printk(KERN_WARNING, ha, "event type: 0x%x not "
5263 "supported", e->type);
5264 }
5265 kfree(e);
5266 }
5267 }
5268
5269 /**
5270 * qla4xxx_do_dpc - dpc routine
5271 * @data: in our case pointer to adapter structure
5272 *
5273 * This routine is a task that is schedule by the interrupt handler
5274 * to perform the background processing for interrupts. We put it
5275 * on a task queue that is consumed whenever the scheduler runs; that's
5276 * so you can do anything (i.e. put the process to sleep etc). In fact,
5277 * the mid-level tries to sleep when it reaches the driver threshold
5278 * "host->can_queue". This can cause a panic if we were in our interrupt code.
5279 **/
5280 static void qla4xxx_do_dpc(struct work_struct *work)
5281 {
5282 struct scsi_qla_host *ha =
5283 container_of(work, struct scsi_qla_host, dpc_work);
5284 int status = QLA_ERROR;
5285
5286 DEBUG2(ql4_printk(KERN_INFO, ha,
5287 "scsi%ld: %s: DPC handler waking up. flags = 0x%08lx, dpc_flags = 0x%08lx\n",
5288 ha->host_no, __func__, ha->flags, ha->dpc_flags));
5289
5290 /* Initialization not yet finished. Don't do anything yet. */
5291 if (!test_bit(AF_INIT_DONE, &ha->flags))
5292 return;
5293
5294 if (test_bit(AF_EEH_BUSY, &ha->flags)) {
5295 DEBUG2(printk(KERN_INFO "scsi%ld: %s: flags = %lx\n",
5296 ha->host_no, __func__, ha->flags));
5297 return;
5298 }
5299
5300 /* post events to application */
5301 qla4xxx_do_work(ha);
5302
5303 if (is_qla80XX(ha)) {
5304 if (test_bit(DPC_HA_UNRECOVERABLE, &ha->dpc_flags)) {
5305 if (is_qla8032(ha) || is_qla8042(ha)) {
5306 ql4_printk(KERN_INFO, ha, "%s: disabling pause transmit on port 0 & 1.\n",
5307 __func__);
5308 /* disable pause frame for ISP83xx */
5309 qla4_83xx_disable_pause(ha);
5310 }
5311
5312 ha->isp_ops->idc_lock(ha);
5313 qla4_8xxx_wr_direct(ha, QLA8XXX_CRB_DEV_STATE,
5314 QLA8XXX_DEV_FAILED);
5315 ha->isp_ops->idc_unlock(ha);
5316 ql4_printk(KERN_INFO, ha, "HW State: FAILED\n");
5317 qla4_8xxx_device_state_handler(ha);
5318 }
5319
5320 if (test_bit(DPC_POST_IDC_ACK, &ha->dpc_flags)) {
5321 if (is_qla8042(ha)) {
5322 if (ha->idc_info.info2 &
5323 ENABLE_INTERNAL_LOOPBACK) {
5324 ql4_printk(KERN_INFO, ha, "%s: Disabling ACB\n",
5325 __func__);
5326 status = qla4_84xx_config_acb(ha,
5327 ACB_CONFIG_DISABLE);
5328 if (status != QLA_SUCCESS) {
5329 ql4_printk(KERN_INFO, ha, "%s: ACB config failed\n",
5330 __func__);
5331 }
5332 }
5333 }
5334 qla4_83xx_post_idc_ack(ha);
5335 clear_bit(DPC_POST_IDC_ACK, &ha->dpc_flags);
5336 }
5337
5338 if (is_qla8042(ha) &&
5339 test_bit(DPC_RESTORE_ACB, &ha->dpc_flags)) {
5340 ql4_printk(KERN_INFO, ha, "%s: Restoring ACB\n",
5341 __func__);
5342 if (qla4_84xx_config_acb(ha, ACB_CONFIG_SET) !=
5343 QLA_SUCCESS) {
5344 ql4_printk(KERN_INFO, ha, "%s: ACB config failed ",
5345 __func__);
5346 }
5347 clear_bit(DPC_RESTORE_ACB, &ha->dpc_flags);
5348 }
5349
5350 if (test_and_clear_bit(DPC_HA_NEED_QUIESCENT, &ha->dpc_flags)) {
5351 qla4_8xxx_need_qsnt_handler(ha);
5352 }
5353 }
5354
5355 if (!test_bit(DPC_RESET_ACTIVE, &ha->dpc_flags) &&
5356 (test_bit(DPC_RESET_HA, &ha->dpc_flags) ||
5357 test_bit(DPC_RESET_HA_INTR, &ha->dpc_flags) ||
5358 test_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags))) {
5359 if ((is_qla8022(ha) && ql4xdontresethba) ||
5360 ((is_qla8032(ha) || is_qla8042(ha)) &&
5361 qla4_83xx_idc_dontreset(ha))) {
5362 DEBUG2(printk("scsi%ld: %s: Don't Reset HBA\n",
5363 ha->host_no, __func__));
5364 clear_bit(DPC_RESET_HA, &ha->dpc_flags);
5365 clear_bit(DPC_RESET_HA_INTR, &ha->dpc_flags);
5366 clear_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags);
5367 goto dpc_post_reset_ha;
5368 }
5369 if (test_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags) ||
5370 test_bit(DPC_RESET_HA, &ha->dpc_flags))
5371 qla4xxx_recover_adapter(ha);
5372
5373 if (test_bit(DPC_RESET_HA_INTR, &ha->dpc_flags)) {
5374 uint8_t wait_time = RESET_INTR_TOV;
5375
5376 while ((readw(&ha->reg->ctrl_status) &
5377 (CSR_SOFT_RESET | CSR_FORCE_SOFT_RESET)) != 0) {
5378 if (--wait_time == 0)
5379 break;
5380 msleep(1000);
5381 }
5382 if (wait_time == 0)
5383 DEBUG2(printk("scsi%ld: %s: SR|FSR "
5384 "bit not cleared-- resetting\n",
5385 ha->host_no, __func__));
5386 qla4xxx_abort_active_cmds(ha, DID_RESET << 16);
5387 if (ql4xxx_lock_drvr_wait(ha) == QLA_SUCCESS) {
5388 qla4xxx_process_aen(ha, FLUSH_DDB_CHANGED_AENS);
5389 status = qla4xxx_recover_adapter(ha);
5390 }
5391 clear_bit(DPC_RESET_HA_INTR, &ha->dpc_flags);
5392 if (status == QLA_SUCCESS)
5393 ha->isp_ops->enable_intrs(ha);
5394 }
5395 }
5396
5397 dpc_post_reset_ha:
5398 /* ---- process AEN? --- */
5399 if (test_and_clear_bit(DPC_AEN, &ha->dpc_flags))
5400 qla4xxx_process_aen(ha, PROCESS_ALL_AENS);
5401
5402 /* ---- Get DHCP IP Address? --- */
5403 if (test_and_clear_bit(DPC_GET_DHCP_IP_ADDR, &ha->dpc_flags))
5404 qla4xxx_get_dhcp_ip_address(ha);
5405
5406 /* ---- relogin device? --- */
5407 if (adapter_up(ha) &&
5408 test_and_clear_bit(DPC_RELOGIN_DEVICE, &ha->dpc_flags)) {
5409 iscsi_host_for_each_session(ha->host, qla4xxx_dpc_relogin);
5410 }
5411
5412 /* ---- link change? --- */
5413 if (!test_bit(AF_LOOPBACK, &ha->flags) &&
5414 test_and_clear_bit(DPC_LINK_CHANGED, &ha->dpc_flags)) {
5415 if (!test_bit(AF_LINK_UP, &ha->flags)) {
5416 /* ---- link down? --- */
5417 qla4xxx_mark_all_devices_missing(ha);
5418 } else {
5419 /* ---- link up? --- *
5420 * F/W will auto login to all devices ONLY ONCE after
5421 * link up during driver initialization and runtime
5422 * fatal error recovery. Therefore, the driver must
5423 * manually relogin to devices when recovering from
5424 * connection failures, logouts, expired KATO, etc. */
5425 if (test_and_clear_bit(AF_BUILD_DDB_LIST, &ha->flags)) {
5426 qla4xxx_build_ddb_list(ha, ha->is_reset);
5427 iscsi_host_for_each_session(ha->host,
5428 qla4xxx_login_flash_ddb);
5429 } else
5430 qla4xxx_relogin_all_devices(ha);
5431 }
5432 }
5433 if (test_and_clear_bit(DPC_SYSFS_DDB_EXPORT, &ha->dpc_flags)) {
5434 if (qla4xxx_sysfs_ddb_export(ha))
5435 ql4_printk(KERN_ERR, ha, "%s: Error exporting ddb to sysfs\n",
5436 __func__);
5437 }
5438 }
5439
5440 /**
5441 * qla4xxx_free_adapter - release the adapter
5442 * @ha: pointer to adapter structure
5443 **/
5444 static void qla4xxx_free_adapter(struct scsi_qla_host *ha)
5445 {
5446 qla4xxx_abort_active_cmds(ha, DID_NO_CONNECT << 16);
5447
5448 /* Turn-off interrupts on the card. */
5449 ha->isp_ops->disable_intrs(ha);
5450
5451 if (is_qla40XX(ha)) {
5452 writel(set_rmask(CSR_SCSI_PROCESSOR_INTR),
5453 &ha->reg->ctrl_status);
5454 readl(&ha->reg->ctrl_status);
5455 } else if (is_qla8022(ha)) {
5456 writel(0, &ha->qla4_82xx_reg->host_int);
5457 readl(&ha->qla4_82xx_reg->host_int);
5458 } else if (is_qla8032(ha) || is_qla8042(ha)) {
5459 writel(0, &ha->qla4_83xx_reg->risc_intr);
5460 readl(&ha->qla4_83xx_reg->risc_intr);
5461 }
5462
5463 /* Remove timer thread, if present */
5464 if (ha->timer_active)
5465 qla4xxx_stop_timer(ha);
5466
5467 /* Kill the kernel thread for this host */
5468 if (ha->dpc_thread)
5469 destroy_workqueue(ha->dpc_thread);
5470
5471 /* Kill the kernel thread for this host */
5472 if (ha->task_wq)
5473 destroy_workqueue(ha->task_wq);
5474
5475 /* Put firmware in known state */
5476 ha->isp_ops->reset_firmware(ha);
5477
5478 if (is_qla80XX(ha)) {
5479 ha->isp_ops->idc_lock(ha);
5480 qla4_8xxx_clear_drv_active(ha);
5481 ha->isp_ops->idc_unlock(ha);
5482 }
5483
5484 /* Detach interrupts */
5485 qla4xxx_free_irqs(ha);
5486
5487 /* free extra memory */
5488 qla4xxx_mem_free(ha);
5489 }
5490
5491 int qla4_8xxx_iospace_config(struct scsi_qla_host *ha)
5492 {
5493 int status = 0;
5494 unsigned long mem_base, mem_len, db_base, db_len;
5495 struct pci_dev *pdev = ha->pdev;
5496
5497 status = pci_request_regions(pdev, DRIVER_NAME);
5498 if (status) {
5499 printk(KERN_WARNING
5500 "scsi(%ld) Failed to reserve PIO regions (%s) "
5501 "status=%d\n", ha->host_no, pci_name(pdev), status);
5502 goto iospace_error_exit;
5503 }
5504
5505 DEBUG2(printk(KERN_INFO "%s: revision-id=%d\n",
5506 __func__, pdev->revision));
5507 ha->revision_id = pdev->revision;
5508
5509 /* remap phys address */
5510 mem_base = pci_resource_start(pdev, 0); /* 0 is for BAR 0 */
5511 mem_len = pci_resource_len(pdev, 0);
5512 DEBUG2(printk(KERN_INFO "%s: ioremap from %lx a size of %lx\n",
5513 __func__, mem_base, mem_len));
5514
5515 /* mapping of pcibase pointer */
5516 ha->nx_pcibase = (unsigned long)ioremap(mem_base, mem_len);
5517 if (!ha->nx_pcibase) {
5518 printk(KERN_ERR
5519 "cannot remap MMIO (%s), aborting\n", pci_name(pdev));
5520 pci_release_regions(ha->pdev);
5521 goto iospace_error_exit;
5522 }
5523
5524 /* Mapping of IO base pointer, door bell read and write pointer */
5525
5526 /* mapping of IO base pointer */
5527 if (is_qla8022(ha)) {
5528 ha->qla4_82xx_reg = (struct device_reg_82xx __iomem *)
5529 ((uint8_t *)ha->nx_pcibase + 0xbc000 +
5530 (ha->pdev->devfn << 11));
5531 ha->nx_db_wr_ptr = (ha->pdev->devfn == 4 ? QLA82XX_CAM_RAM_DB1 :
5532 QLA82XX_CAM_RAM_DB2);
5533 } else if (is_qla8032(ha) || is_qla8042(ha)) {
5534 ha->qla4_83xx_reg = (struct device_reg_83xx __iomem *)
5535 ((uint8_t *)ha->nx_pcibase);
5536 }
5537
5538 db_base = pci_resource_start(pdev, 4); /* doorbell is on bar 4 */
5539 db_len = pci_resource_len(pdev, 4);
5540
5541 return 0;
5542 iospace_error_exit:
5543 return -ENOMEM;
5544 }
5545
5546 /***
5547 * qla4xxx_iospace_config - maps registers
5548 * @ha: pointer to adapter structure
5549 *
5550 * This routines maps HBA's registers from the pci address space
5551 * into the kernel virtual address space for memory mapped i/o.
5552 **/
5553 int qla4xxx_iospace_config(struct scsi_qla_host *ha)
5554 {
5555 unsigned long pio, pio_len, pio_flags;
5556 unsigned long mmio, mmio_len, mmio_flags;
5557
5558 pio = pci_resource_start(ha->pdev, 0);
5559 pio_len = pci_resource_len(ha->pdev, 0);
5560 pio_flags = pci_resource_flags(ha->pdev, 0);
5561 if (pio_flags & IORESOURCE_IO) {
5562 if (pio_len < MIN_IOBASE_LEN) {
5563 ql4_printk(KERN_WARNING, ha,
5564 "Invalid PCI I/O region size\n");
5565 pio = 0;
5566 }
5567 } else {
5568 ql4_printk(KERN_WARNING, ha, "region #0 not a PIO resource\n");
5569 pio = 0;
5570 }
5571
5572 /* Use MMIO operations for all accesses. */
5573 mmio = pci_resource_start(ha->pdev, 1);
5574 mmio_len = pci_resource_len(ha->pdev, 1);
5575 mmio_flags = pci_resource_flags(ha->pdev, 1);
5576
5577 if (!(mmio_flags & IORESOURCE_MEM)) {
5578 ql4_printk(KERN_ERR, ha,
5579 "region #0 not an MMIO resource, aborting\n");
5580
5581 goto iospace_error_exit;
5582 }
5583
5584 if (mmio_len < MIN_IOBASE_LEN) {
5585 ql4_printk(KERN_ERR, ha,
5586 "Invalid PCI mem region size, aborting\n");
5587 goto iospace_error_exit;
5588 }
5589
5590 if (pci_request_regions(ha->pdev, DRIVER_NAME)) {
5591 ql4_printk(KERN_WARNING, ha,
5592 "Failed to reserve PIO/MMIO regions\n");
5593
5594 goto iospace_error_exit;
5595 }
5596
5597 ha->pio_address = pio;
5598 ha->pio_length = pio_len;
5599 ha->reg = ioremap(mmio, MIN_IOBASE_LEN);
5600 if (!ha->reg) {
5601 ql4_printk(KERN_ERR, ha,
5602 "cannot remap MMIO, aborting\n");
5603
5604 goto iospace_error_exit;
5605 }
5606
5607 return 0;
5608
5609 iospace_error_exit:
5610 return -ENOMEM;
5611 }
5612
5613 static struct isp_operations qla4xxx_isp_ops = {
5614 .iospace_config = qla4xxx_iospace_config,
5615 .pci_config = qla4xxx_pci_config,
5616 .disable_intrs = qla4xxx_disable_intrs,
5617 .enable_intrs = qla4xxx_enable_intrs,
5618 .start_firmware = qla4xxx_start_firmware,
5619 .intr_handler = qla4xxx_intr_handler,
5620 .interrupt_service_routine = qla4xxx_interrupt_service_routine,
5621 .reset_chip = qla4xxx_soft_reset,
5622 .reset_firmware = qla4xxx_hw_reset,
5623 .queue_iocb = qla4xxx_queue_iocb,
5624 .complete_iocb = qla4xxx_complete_iocb,
5625 .rd_shdw_req_q_out = qla4xxx_rd_shdw_req_q_out,
5626 .rd_shdw_rsp_q_in = qla4xxx_rd_shdw_rsp_q_in,
5627 .get_sys_info = qla4xxx_get_sys_info,
5628 .queue_mailbox_command = qla4xxx_queue_mbox_cmd,
5629 .process_mailbox_interrupt = qla4xxx_process_mbox_intr,
5630 };
5631
5632 static struct isp_operations qla4_82xx_isp_ops = {
5633 .iospace_config = qla4_8xxx_iospace_config,
5634 .pci_config = qla4_8xxx_pci_config,
5635 .disable_intrs = qla4_82xx_disable_intrs,
5636 .enable_intrs = qla4_82xx_enable_intrs,
5637 .start_firmware = qla4_8xxx_load_risc,
5638 .restart_firmware = qla4_82xx_try_start_fw,
5639 .intr_handler = qla4_82xx_intr_handler,
5640 .interrupt_service_routine = qla4_82xx_interrupt_service_routine,
5641 .need_reset = qla4_8xxx_need_reset,
5642 .reset_chip = qla4_82xx_isp_reset,
5643 .reset_firmware = qla4_8xxx_stop_firmware,
5644 .queue_iocb = qla4_82xx_queue_iocb,
5645 .complete_iocb = qla4_82xx_complete_iocb,
5646 .rd_shdw_req_q_out = qla4_82xx_rd_shdw_req_q_out,
5647 .rd_shdw_rsp_q_in = qla4_82xx_rd_shdw_rsp_q_in,
5648 .get_sys_info = qla4_8xxx_get_sys_info,
5649 .rd_reg_direct = qla4_82xx_rd_32,
5650 .wr_reg_direct = qla4_82xx_wr_32,
5651 .rd_reg_indirect = qla4_82xx_md_rd_32,
5652 .wr_reg_indirect = qla4_82xx_md_wr_32,
5653 .idc_lock = qla4_82xx_idc_lock,
5654 .idc_unlock = qla4_82xx_idc_unlock,
5655 .rom_lock_recovery = qla4_82xx_rom_lock_recovery,
5656 .queue_mailbox_command = qla4_82xx_queue_mbox_cmd,
5657 .process_mailbox_interrupt = qla4_82xx_process_mbox_intr,
5658 };
5659
5660 static struct isp_operations qla4_83xx_isp_ops = {
5661 .iospace_config = qla4_8xxx_iospace_config,
5662 .pci_config = qla4_8xxx_pci_config,
5663 .disable_intrs = qla4_83xx_disable_intrs,
5664 .enable_intrs = qla4_83xx_enable_intrs,
5665 .start_firmware = qla4_8xxx_load_risc,
5666 .restart_firmware = qla4_83xx_start_firmware,
5667 .intr_handler = qla4_83xx_intr_handler,
5668 .interrupt_service_routine = qla4_83xx_interrupt_service_routine,
5669 .need_reset = qla4_8xxx_need_reset,
5670 .reset_chip = qla4_83xx_isp_reset,
5671 .reset_firmware = qla4_8xxx_stop_firmware,
5672 .queue_iocb = qla4_83xx_queue_iocb,
5673 .complete_iocb = qla4_83xx_complete_iocb,
5674 .rd_shdw_req_q_out = qla4xxx_rd_shdw_req_q_out,
5675 .rd_shdw_rsp_q_in = qla4xxx_rd_shdw_rsp_q_in,
5676 .get_sys_info = qla4_8xxx_get_sys_info,
5677 .rd_reg_direct = qla4_83xx_rd_reg,
5678 .wr_reg_direct = qla4_83xx_wr_reg,
5679 .rd_reg_indirect = qla4_83xx_rd_reg_indirect,
5680 .wr_reg_indirect = qla4_83xx_wr_reg_indirect,
5681 .idc_lock = qla4_83xx_drv_lock,
5682 .idc_unlock = qla4_83xx_drv_unlock,
5683 .rom_lock_recovery = qla4_83xx_rom_lock_recovery,
5684 .queue_mailbox_command = qla4_83xx_queue_mbox_cmd,
5685 .process_mailbox_interrupt = qla4_83xx_process_mbox_intr,
5686 };
5687
5688 uint16_t qla4xxx_rd_shdw_req_q_out(struct scsi_qla_host *ha)
5689 {
5690 return (uint16_t)le32_to_cpu(ha->shadow_regs->req_q_out);
5691 }
5692
5693 uint16_t qla4_82xx_rd_shdw_req_q_out(struct scsi_qla_host *ha)
5694 {
5695 return (uint16_t)le32_to_cpu(readl(&ha->qla4_82xx_reg->req_q_out));
5696 }
5697
5698 uint16_t qla4xxx_rd_shdw_rsp_q_in(struct scsi_qla_host *ha)
5699 {
5700 return (uint16_t)le32_to_cpu(ha->shadow_regs->rsp_q_in);
5701 }
5702
5703 uint16_t qla4_82xx_rd_shdw_rsp_q_in(struct scsi_qla_host *ha)
5704 {
5705 return (uint16_t)le32_to_cpu(readl(&ha->qla4_82xx_reg->rsp_q_in));
5706 }
5707
5708 static ssize_t qla4xxx_show_boot_eth_info(void *data, int type, char *buf)
5709 {
5710 struct scsi_qla_host *ha = data;
5711 char *str = buf;
5712 int rc;
5713
5714 switch (type) {
5715 case ISCSI_BOOT_ETH_FLAGS:
5716 rc = sprintf(str, "%d\n", SYSFS_FLAG_FW_SEL_BOOT);
5717 break;
5718 case ISCSI_BOOT_ETH_INDEX:
5719 rc = sprintf(str, "0\n");
5720 break;
5721 case ISCSI_BOOT_ETH_MAC:
5722 rc = sysfs_format_mac(str, ha->my_mac,
5723 MAC_ADDR_LEN);
5724 break;
5725 default:
5726 rc = -ENOSYS;
5727 break;
5728 }
5729 return rc;
5730 }
5731
5732 static umode_t qla4xxx_eth_get_attr_visibility(void *data, int type)
5733 {
5734 int rc;
5735
5736 switch (type) {
5737 case ISCSI_BOOT_ETH_FLAGS:
5738 case ISCSI_BOOT_ETH_MAC:
5739 case ISCSI_BOOT_ETH_INDEX:
5740 rc = S_IRUGO;
5741 break;
5742 default:
5743 rc = 0;
5744 break;
5745 }
5746 return rc;
5747 }
5748
5749 static ssize_t qla4xxx_show_boot_ini_info(void *data, int type, char *buf)
5750 {
5751 struct scsi_qla_host *ha = data;
5752 char *str = buf;
5753 int rc;
5754
5755 switch (type) {
5756 case ISCSI_BOOT_INI_INITIATOR_NAME:
5757 rc = sprintf(str, "%s\n", ha->name_string);
5758 break;
5759 default:
5760 rc = -ENOSYS;
5761 break;
5762 }
5763 return rc;
5764 }
5765
5766 static umode_t qla4xxx_ini_get_attr_visibility(void *data, int type)
5767 {
5768 int rc;
5769
5770 switch (type) {
5771 case ISCSI_BOOT_INI_INITIATOR_NAME:
5772 rc = S_IRUGO;
5773 break;
5774 default:
5775 rc = 0;
5776 break;
5777 }
5778 return rc;
5779 }
5780
5781 static ssize_t
5782 qla4xxx_show_boot_tgt_info(struct ql4_boot_session_info *boot_sess, int type,
5783 char *buf)
5784 {
5785 struct ql4_conn_info *boot_conn = &boot_sess->conn_list[0];
5786 char *str = buf;
5787 int rc;
5788
5789 switch (type) {
5790 case ISCSI_BOOT_TGT_NAME:
5791 rc = sprintf(buf, "%s\n", (char *)&boot_sess->target_name);
5792 break;
5793 case ISCSI_BOOT_TGT_IP_ADDR:
5794 if (boot_sess->conn_list[0].dest_ipaddr.ip_type == 0x1)
5795 rc = sprintf(buf, "%pI4\n",
5796 &boot_conn->dest_ipaddr.ip_address);
5797 else
5798 rc = sprintf(str, "%pI6\n",
5799 &boot_conn->dest_ipaddr.ip_address);
5800 break;
5801 case ISCSI_BOOT_TGT_PORT:
5802 rc = sprintf(str, "%d\n", boot_conn->dest_port);
5803 break;
5804 case ISCSI_BOOT_TGT_CHAP_NAME:
5805 rc = sprintf(str, "%.*s\n",
5806 boot_conn->chap.target_chap_name_length,
5807 (char *)&boot_conn->chap.target_chap_name);
5808 break;
5809 case ISCSI_BOOT_TGT_CHAP_SECRET:
5810 rc = sprintf(str, "%.*s\n",
5811 boot_conn->chap.target_secret_length,
5812 (char *)&boot_conn->chap.target_secret);
5813 break;
5814 case ISCSI_BOOT_TGT_REV_CHAP_NAME:
5815 rc = sprintf(str, "%.*s\n",
5816 boot_conn->chap.intr_chap_name_length,
5817 (char *)&boot_conn->chap.intr_chap_name);
5818 break;
5819 case ISCSI_BOOT_TGT_REV_CHAP_SECRET:
5820 rc = sprintf(str, "%.*s\n",
5821 boot_conn->chap.intr_secret_length,
5822 (char *)&boot_conn->chap.intr_secret);
5823 break;
5824 case ISCSI_BOOT_TGT_FLAGS:
5825 rc = sprintf(str, "%d\n", SYSFS_FLAG_FW_SEL_BOOT);
5826 break;
5827 case ISCSI_BOOT_TGT_NIC_ASSOC:
5828 rc = sprintf(str, "0\n");
5829 break;
5830 default:
5831 rc = -ENOSYS;
5832 break;
5833 }
5834 return rc;
5835 }
5836
5837 static ssize_t qla4xxx_show_boot_tgt_pri_info(void *data, int type, char *buf)
5838 {
5839 struct scsi_qla_host *ha = data;
5840 struct ql4_boot_session_info *boot_sess = &(ha->boot_tgt.boot_pri_sess);
5841
5842 return qla4xxx_show_boot_tgt_info(boot_sess, type, buf);
5843 }
5844
5845 static ssize_t qla4xxx_show_boot_tgt_sec_info(void *data, int type, char *buf)
5846 {
5847 struct scsi_qla_host *ha = data;
5848 struct ql4_boot_session_info *boot_sess = &(ha->boot_tgt.boot_sec_sess);
5849
5850 return qla4xxx_show_boot_tgt_info(boot_sess, type, buf);
5851 }
5852
5853 static umode_t qla4xxx_tgt_get_attr_visibility(void *data, int type)
5854 {
5855 int rc;
5856
5857 switch (type) {
5858 case ISCSI_BOOT_TGT_NAME:
5859 case ISCSI_BOOT_TGT_IP_ADDR:
5860 case ISCSI_BOOT_TGT_PORT:
5861 case ISCSI_BOOT_TGT_CHAP_NAME:
5862 case ISCSI_BOOT_TGT_CHAP_SECRET:
5863 case ISCSI_BOOT_TGT_REV_CHAP_NAME:
5864 case ISCSI_BOOT_TGT_REV_CHAP_SECRET:
5865 case ISCSI_BOOT_TGT_NIC_ASSOC:
5866 case ISCSI_BOOT_TGT_FLAGS:
5867 rc = S_IRUGO;
5868 break;
5869 default:
5870 rc = 0;
5871 break;
5872 }
5873 return rc;
5874 }
5875
5876 static void qla4xxx_boot_release(void *data)
5877 {
5878 struct scsi_qla_host *ha = data;
5879
5880 scsi_host_put(ha->host);
5881 }
5882
5883 static int get_fw_boot_info(struct scsi_qla_host *ha, uint16_t ddb_index[])
5884 {
5885 dma_addr_t buf_dma;
5886 uint32_t addr, pri_addr, sec_addr;
5887 uint32_t offset;
5888 uint16_t func_num;
5889 uint8_t val;
5890 uint8_t *buf = NULL;
5891 size_t size = 13 * sizeof(uint8_t);
5892 int ret = QLA_SUCCESS;
5893
5894 func_num = PCI_FUNC(ha->pdev->devfn);
5895
5896 ql4_printk(KERN_INFO, ha, "%s: Get FW boot info for 0x%x func %d\n",
5897 __func__, ha->pdev->device, func_num);
5898
5899 if (is_qla40XX(ha)) {
5900 if (func_num == 1) {
5901 addr = NVRAM_PORT0_BOOT_MODE;
5902 pri_addr = NVRAM_PORT0_BOOT_PRI_TGT;
5903 sec_addr = NVRAM_PORT0_BOOT_SEC_TGT;
5904 } else if (func_num == 3) {
5905 addr = NVRAM_PORT1_BOOT_MODE;
5906 pri_addr = NVRAM_PORT1_BOOT_PRI_TGT;
5907 sec_addr = NVRAM_PORT1_BOOT_SEC_TGT;
5908 } else {
5909 ret = QLA_ERROR;
5910 goto exit_boot_info;
5911 }
5912
5913 /* Check Boot Mode */
5914 val = rd_nvram_byte(ha, addr);
5915 if (!(val & 0x07)) {
5916 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: Adapter boot "
5917 "options : 0x%x\n", __func__, val));
5918 ret = QLA_ERROR;
5919 goto exit_boot_info;
5920 }
5921
5922 /* get primary valid target index */
5923 val = rd_nvram_byte(ha, pri_addr);
5924 if (val & BIT_7)
5925 ddb_index[0] = (val & 0x7f);
5926
5927 /* get secondary valid target index */
5928 val = rd_nvram_byte(ha, sec_addr);
5929 if (val & BIT_7)
5930 ddb_index[1] = (val & 0x7f);
5931
5932 } else if (is_qla80XX(ha)) {
5933 buf = dma_alloc_coherent(&ha->pdev->dev, size,
5934 &buf_dma, GFP_KERNEL);
5935 if (!buf) {
5936 DEBUG2(ql4_printk(KERN_ERR, ha,
5937 "%s: Unable to allocate dma buffer\n",
5938 __func__));
5939 ret = QLA_ERROR;
5940 goto exit_boot_info;
5941 }
5942
5943 if (ha->port_num == 0)
5944 offset = BOOT_PARAM_OFFSET_PORT0;
5945 else if (ha->port_num == 1)
5946 offset = BOOT_PARAM_OFFSET_PORT1;
5947 else {
5948 ret = QLA_ERROR;
5949 goto exit_boot_info_free;
5950 }
5951 addr = FLASH_RAW_ACCESS_ADDR + (ha->hw.flt_iscsi_param * 4) +
5952 offset;
5953 if (qla4xxx_get_flash(ha, buf_dma, addr,
5954 13 * sizeof(uint8_t)) != QLA_SUCCESS) {
5955 DEBUG2(ql4_printk(KERN_ERR, ha, "scsi%ld: %s: Get Flash"
5956 " failed\n", ha->host_no, __func__));
5957 ret = QLA_ERROR;
5958 goto exit_boot_info_free;
5959 }
5960 /* Check Boot Mode */
5961 if (!(buf[1] & 0x07)) {
5962 DEBUG2(ql4_printk(KERN_INFO, ha, "Firmware boot options"
5963 " : 0x%x\n", buf[1]));
5964 ret = QLA_ERROR;
5965 goto exit_boot_info_free;
5966 }
5967
5968 /* get primary valid target index */
5969 if (buf[2] & BIT_7)
5970 ddb_index[0] = buf[2] & 0x7f;
5971
5972 /* get secondary valid target index */
5973 if (buf[11] & BIT_7)
5974 ddb_index[1] = buf[11] & 0x7f;
5975 } else {
5976 ret = QLA_ERROR;
5977 goto exit_boot_info;
5978 }
5979
5980 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: Primary target ID %d, Secondary"
5981 " target ID %d\n", __func__, ddb_index[0],
5982 ddb_index[1]));
5983
5984 exit_boot_info_free:
5985 dma_free_coherent(&ha->pdev->dev, size, buf, buf_dma);
5986 exit_boot_info:
5987 ha->pri_ddb_idx = ddb_index[0];
5988 ha->sec_ddb_idx = ddb_index[1];
5989 return ret;
5990 }
5991
5992 /**
5993 * qla4xxx_get_bidi_chap - Get a BIDI CHAP user and password
5994 * @ha: pointer to adapter structure
5995 * @username: CHAP username to be returned
5996 * @password: CHAP password to be returned
5997 *
5998 * If a boot entry has BIDI CHAP enabled then we need to set the BIDI CHAP
5999 * user and password in the sysfs entry in /sys/firmware/iscsi_boot#/.
6000 * So from the CHAP cache find the first BIDI CHAP entry and set it
6001 * to the boot record in sysfs.
6002 **/
6003 static int qla4xxx_get_bidi_chap(struct scsi_qla_host *ha, char *username,
6004 char *password)
6005 {
6006 int i, ret = -EINVAL;
6007 int max_chap_entries = 0;
6008 struct ql4_chap_table *chap_table;
6009
6010 if (is_qla80XX(ha))
6011 max_chap_entries = (ha->hw.flt_chap_size / 2) /
6012 sizeof(struct ql4_chap_table);
6013 else
6014 max_chap_entries = MAX_CHAP_ENTRIES_40XX;
6015
6016 if (!ha->chap_list) {
6017 ql4_printk(KERN_ERR, ha, "Do not have CHAP table cache\n");
6018 return ret;
6019 }
6020
6021 mutex_lock(&ha->chap_sem);
6022 for (i = 0; i < max_chap_entries; i++) {
6023 chap_table = (struct ql4_chap_table *)ha->chap_list + i;
6024 if (chap_table->cookie !=
6025 __constant_cpu_to_le16(CHAP_VALID_COOKIE)) {
6026 continue;
6027 }
6028
6029 if (chap_table->flags & BIT_7) /* local */
6030 continue;
6031
6032 if (!(chap_table->flags & BIT_6)) /* Not BIDI */
6033 continue;
6034
6035 strlcpy(password, chap_table->secret, QL4_CHAP_MAX_SECRET_LEN);
6036 strlcpy(username, chap_table->name, QL4_CHAP_MAX_NAME_LEN);
6037 ret = 0;
6038 break;
6039 }
6040 mutex_unlock(&ha->chap_sem);
6041
6042 return ret;
6043 }
6044
6045
6046 static int qla4xxx_get_boot_target(struct scsi_qla_host *ha,
6047 struct ql4_boot_session_info *boot_sess,
6048 uint16_t ddb_index)
6049 {
6050 struct ql4_conn_info *boot_conn = &boot_sess->conn_list[0];
6051 struct dev_db_entry *fw_ddb_entry;
6052 dma_addr_t fw_ddb_entry_dma;
6053 uint16_t idx;
6054 uint16_t options;
6055 int ret = QLA_SUCCESS;
6056
6057 fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
6058 &fw_ddb_entry_dma, GFP_KERNEL);
6059 if (!fw_ddb_entry) {
6060 DEBUG2(ql4_printk(KERN_ERR, ha,
6061 "%s: Unable to allocate dma buffer.\n",
6062 __func__));
6063 ret = QLA_ERROR;
6064 return ret;
6065 }
6066
6067 if (qla4xxx_bootdb_by_index(ha, fw_ddb_entry,
6068 fw_ddb_entry_dma, ddb_index)) {
6069 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: No Flash DDB found at "
6070 "index [%d]\n", __func__, ddb_index));
6071 ret = QLA_ERROR;
6072 goto exit_boot_target;
6073 }
6074
6075 /* Update target name and IP from DDB */
6076 memcpy(boot_sess->target_name, fw_ddb_entry->iscsi_name,
6077 min(sizeof(boot_sess->target_name),
6078 sizeof(fw_ddb_entry->iscsi_name)));
6079
6080 options = le16_to_cpu(fw_ddb_entry->options);
6081 if (options & DDB_OPT_IPV6_DEVICE) {
6082 memcpy(&boot_conn->dest_ipaddr.ip_address,
6083 &fw_ddb_entry->ip_addr[0], IPv6_ADDR_LEN);
6084 } else {
6085 boot_conn->dest_ipaddr.ip_type = 0x1;
6086 memcpy(&boot_conn->dest_ipaddr.ip_address,
6087 &fw_ddb_entry->ip_addr[0], IP_ADDR_LEN);
6088 }
6089
6090 boot_conn->dest_port = le16_to_cpu(fw_ddb_entry->port);
6091
6092 /* update chap information */
6093 idx = __le16_to_cpu(fw_ddb_entry->chap_tbl_idx);
6094
6095 if (BIT_7 & le16_to_cpu(fw_ddb_entry->iscsi_options)) {
6096
6097 DEBUG2(ql4_printk(KERN_INFO, ha, "Setting chap\n"));
6098
6099 ret = qla4xxx_get_chap(ha, (char *)&boot_conn->chap.
6100 target_chap_name,
6101 (char *)&boot_conn->chap.target_secret,
6102 idx);
6103 if (ret) {
6104 ql4_printk(KERN_ERR, ha, "Failed to set chap\n");
6105 ret = QLA_ERROR;
6106 goto exit_boot_target;
6107 }
6108
6109 boot_conn->chap.target_chap_name_length = QL4_CHAP_MAX_NAME_LEN;
6110 boot_conn->chap.target_secret_length = QL4_CHAP_MAX_SECRET_LEN;
6111 }
6112
6113 if (BIT_4 & le16_to_cpu(fw_ddb_entry->iscsi_options)) {
6114
6115 DEBUG2(ql4_printk(KERN_INFO, ha, "Setting BIDI chap\n"));
6116
6117 ret = qla4xxx_get_bidi_chap(ha,
6118 (char *)&boot_conn->chap.intr_chap_name,
6119 (char *)&boot_conn->chap.intr_secret);
6120
6121 if (ret) {
6122 ql4_printk(KERN_ERR, ha, "Failed to set BIDI chap\n");
6123 ret = QLA_ERROR;
6124 goto exit_boot_target;
6125 }
6126
6127 boot_conn->chap.intr_chap_name_length = QL4_CHAP_MAX_NAME_LEN;
6128 boot_conn->chap.intr_secret_length = QL4_CHAP_MAX_SECRET_LEN;
6129 }
6130
6131 exit_boot_target:
6132 dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
6133 fw_ddb_entry, fw_ddb_entry_dma);
6134 return ret;
6135 }
6136
6137 static int qla4xxx_get_boot_info(struct scsi_qla_host *ha)
6138 {
6139 uint16_t ddb_index[2];
6140 int ret = QLA_ERROR;
6141 int rval;
6142
6143 memset(ddb_index, 0, sizeof(ddb_index));
6144 ddb_index[0] = 0xffff;
6145 ddb_index[1] = 0xffff;
6146 ret = get_fw_boot_info(ha, ddb_index);
6147 if (ret != QLA_SUCCESS) {
6148 DEBUG2(ql4_printk(KERN_INFO, ha,
6149 "%s: No boot target configured.\n", __func__));
6150 return ret;
6151 }
6152
6153 if (ql4xdisablesysfsboot)
6154 return QLA_SUCCESS;
6155
6156 if (ddb_index[0] == 0xffff)
6157 goto sec_target;
6158
6159 rval = qla4xxx_get_boot_target(ha, &(ha->boot_tgt.boot_pri_sess),
6160 ddb_index[0]);
6161 if (rval != QLA_SUCCESS) {
6162 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: Primary boot target not "
6163 "configured\n", __func__));
6164 } else
6165 ret = QLA_SUCCESS;
6166
6167 sec_target:
6168 if (ddb_index[1] == 0xffff)
6169 goto exit_get_boot_info;
6170
6171 rval = qla4xxx_get_boot_target(ha, &(ha->boot_tgt.boot_sec_sess),
6172 ddb_index[1]);
6173 if (rval != QLA_SUCCESS) {
6174 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: Secondary boot target not"
6175 " configured\n", __func__));
6176 } else
6177 ret = QLA_SUCCESS;
6178
6179 exit_get_boot_info:
6180 return ret;
6181 }
6182
6183 static int qla4xxx_setup_boot_info(struct scsi_qla_host *ha)
6184 {
6185 struct iscsi_boot_kobj *boot_kobj;
6186
6187 if (qla4xxx_get_boot_info(ha) != QLA_SUCCESS)
6188 return QLA_ERROR;
6189
6190 if (ql4xdisablesysfsboot) {
6191 ql4_printk(KERN_INFO, ha,
6192 "%s: syfsboot disabled - driver will trigger login "
6193 "and publish session for discovery .\n", __func__);
6194 return QLA_SUCCESS;
6195 }
6196
6197
6198 ha->boot_kset = iscsi_boot_create_host_kset(ha->host->host_no);
6199 if (!ha->boot_kset)
6200 goto kset_free;
6201
6202 if (!scsi_host_get(ha->host))
6203 goto kset_free;
6204 boot_kobj = iscsi_boot_create_target(ha->boot_kset, 0, ha,
6205 qla4xxx_show_boot_tgt_pri_info,
6206 qla4xxx_tgt_get_attr_visibility,
6207 qla4xxx_boot_release);
6208 if (!boot_kobj)
6209 goto put_host;
6210
6211 if (!scsi_host_get(ha->host))
6212 goto kset_free;
6213 boot_kobj = iscsi_boot_create_target(ha->boot_kset, 1, ha,
6214 qla4xxx_show_boot_tgt_sec_info,
6215 qla4xxx_tgt_get_attr_visibility,
6216 qla4xxx_boot_release);
6217 if (!boot_kobj)
6218 goto put_host;
6219
6220 if (!scsi_host_get(ha->host))
6221 goto kset_free;
6222 boot_kobj = iscsi_boot_create_initiator(ha->boot_kset, 0, ha,
6223 qla4xxx_show_boot_ini_info,
6224 qla4xxx_ini_get_attr_visibility,
6225 qla4xxx_boot_release);
6226 if (!boot_kobj)
6227 goto put_host;
6228
6229 if (!scsi_host_get(ha->host))
6230 goto kset_free;
6231 boot_kobj = iscsi_boot_create_ethernet(ha->boot_kset, 0, ha,
6232 qla4xxx_show_boot_eth_info,
6233 qla4xxx_eth_get_attr_visibility,
6234 qla4xxx_boot_release);
6235 if (!boot_kobj)
6236 goto put_host;
6237
6238 return QLA_SUCCESS;
6239
6240 put_host:
6241 scsi_host_put(ha->host);
6242 kset_free:
6243 iscsi_boot_destroy_kset(ha->boot_kset);
6244 return -ENOMEM;
6245 }
6246
6247
6248 static void qla4xxx_get_param_ddb(struct ddb_entry *ddb_entry,
6249 struct ql4_tuple_ddb *tddb)
6250 {
6251 struct scsi_qla_host *ha;
6252 struct iscsi_cls_session *cls_sess;
6253 struct iscsi_cls_conn *cls_conn;
6254 struct iscsi_session *sess;
6255 struct iscsi_conn *conn;
6256
6257 DEBUG2(printk(KERN_INFO "Func: %s\n", __func__));
6258 ha = ddb_entry->ha;
6259 cls_sess = ddb_entry->sess;
6260 sess = cls_sess->dd_data;
6261 cls_conn = ddb_entry->conn;
6262 conn = cls_conn->dd_data;
6263
6264 tddb->tpgt = sess->tpgt;
6265 tddb->port = conn->persistent_port;
6266 strlcpy(tddb->iscsi_name, sess->targetname, ISCSI_NAME_SIZE);
6267 strlcpy(tddb->ip_addr, conn->persistent_address, DDB_IPADDR_LEN);
6268 }
6269
6270 static void qla4xxx_convert_param_ddb(struct dev_db_entry *fw_ddb_entry,
6271 struct ql4_tuple_ddb *tddb,
6272 uint8_t *flash_isid)
6273 {
6274 uint16_t options = 0;
6275
6276 tddb->tpgt = le32_to_cpu(fw_ddb_entry->tgt_portal_grp);
6277 memcpy(&tddb->iscsi_name[0], &fw_ddb_entry->iscsi_name[0],
6278 min(sizeof(tddb->iscsi_name), sizeof(fw_ddb_entry->iscsi_name)));
6279
6280 options = le16_to_cpu(fw_ddb_entry->options);
6281 if (options & DDB_OPT_IPV6_DEVICE)
6282 sprintf(tddb->ip_addr, "%pI6", fw_ddb_entry->ip_addr);
6283 else
6284 sprintf(tddb->ip_addr, "%pI4", fw_ddb_entry->ip_addr);
6285
6286 tddb->port = le16_to_cpu(fw_ddb_entry->port);
6287
6288 if (flash_isid == NULL)
6289 memcpy(&tddb->isid[0], &fw_ddb_entry->isid[0],
6290 sizeof(tddb->isid));
6291 else
6292 memcpy(&tddb->isid[0], &flash_isid[0], sizeof(tddb->isid));
6293 }
6294
6295 static int qla4xxx_compare_tuple_ddb(struct scsi_qla_host *ha,
6296 struct ql4_tuple_ddb *old_tddb,
6297 struct ql4_tuple_ddb *new_tddb,
6298 uint8_t is_isid_compare)
6299 {
6300 if (strcmp(old_tddb->iscsi_name, new_tddb->iscsi_name))
6301 return QLA_ERROR;
6302
6303 if (strcmp(old_tddb->ip_addr, new_tddb->ip_addr))
6304 return QLA_ERROR;
6305
6306 if (old_tddb->port != new_tddb->port)
6307 return QLA_ERROR;
6308
6309 /* For multi sessions, driver generates the ISID, so do not compare
6310 * ISID in reset path since it would be a comparison between the
6311 * driver generated ISID and firmware generated ISID. This could
6312 * lead to adding duplicated DDBs in the list as driver generated
6313 * ISID would not match firmware generated ISID.
6314 */
6315 if (is_isid_compare) {
6316 DEBUG2(ql4_printk(KERN_INFO, ha,
6317 "%s: old ISID [%pmR] New ISID [%pmR]\n",
6318 __func__, old_tddb->isid, new_tddb->isid));
6319
6320 if (memcmp(&old_tddb->isid[0], &new_tddb->isid[0],
6321 sizeof(old_tddb->isid)))
6322 return QLA_ERROR;
6323 }
6324
6325 DEBUG2(ql4_printk(KERN_INFO, ha,
6326 "Match Found, fw[%d,%d,%s,%s], [%d,%d,%s,%s]",
6327 old_tddb->port, old_tddb->tpgt, old_tddb->ip_addr,
6328 old_tddb->iscsi_name, new_tddb->port, new_tddb->tpgt,
6329 new_tddb->ip_addr, new_tddb->iscsi_name));
6330
6331 return QLA_SUCCESS;
6332 }
6333
6334 static int qla4xxx_is_session_exists(struct scsi_qla_host *ha,
6335 struct dev_db_entry *fw_ddb_entry,
6336 uint32_t *index)
6337 {
6338 struct ddb_entry *ddb_entry;
6339 struct ql4_tuple_ddb *fw_tddb = NULL;
6340 struct ql4_tuple_ddb *tmp_tddb = NULL;
6341 int idx;
6342 int ret = QLA_ERROR;
6343
6344 fw_tddb = vzalloc(sizeof(*fw_tddb));
6345 if (!fw_tddb) {
6346 DEBUG2(ql4_printk(KERN_WARNING, ha,
6347 "Memory Allocation failed.\n"));
6348 ret = QLA_SUCCESS;
6349 goto exit_check;
6350 }
6351
6352 tmp_tddb = vzalloc(sizeof(*tmp_tddb));
6353 if (!tmp_tddb) {
6354 DEBUG2(ql4_printk(KERN_WARNING, ha,
6355 "Memory Allocation failed.\n"));
6356 ret = QLA_SUCCESS;
6357 goto exit_check;
6358 }
6359
6360 qla4xxx_convert_param_ddb(fw_ddb_entry, fw_tddb, NULL);
6361
6362 for (idx = 0; idx < MAX_DDB_ENTRIES; idx++) {
6363 ddb_entry = qla4xxx_lookup_ddb_by_fw_index(ha, idx);
6364 if (ddb_entry == NULL)
6365 continue;
6366
6367 qla4xxx_get_param_ddb(ddb_entry, tmp_tddb);
6368 if (!qla4xxx_compare_tuple_ddb(ha, fw_tddb, tmp_tddb, false)) {
6369 ret = QLA_SUCCESS; /* found */
6370 if (index != NULL)
6371 *index = idx;
6372 goto exit_check;
6373 }
6374 }
6375
6376 exit_check:
6377 if (fw_tddb)
6378 vfree(fw_tddb);
6379 if (tmp_tddb)
6380 vfree(tmp_tddb);
6381 return ret;
6382 }
6383
6384 /**
6385 * qla4xxx_check_existing_isid - check if target with same isid exist
6386 * in target list
6387 * @list_nt: list of target
6388 * @isid: isid to check
6389 *
6390 * This routine return QLA_SUCCESS if target with same isid exist
6391 **/
6392 static int qla4xxx_check_existing_isid(struct list_head *list_nt, uint8_t *isid)
6393 {
6394 struct qla_ddb_index *nt_ddb_idx, *nt_ddb_idx_tmp;
6395 struct dev_db_entry *fw_ddb_entry;
6396
6397 list_for_each_entry_safe(nt_ddb_idx, nt_ddb_idx_tmp, list_nt, list) {
6398 fw_ddb_entry = &nt_ddb_idx->fw_ddb;
6399
6400 if (memcmp(&fw_ddb_entry->isid[0], &isid[0],
6401 sizeof(nt_ddb_idx->fw_ddb.isid)) == 0) {
6402 return QLA_SUCCESS;
6403 }
6404 }
6405 return QLA_ERROR;
6406 }
6407
6408 /**
6409 * qla4xxx_update_isid - compare ddbs and updated isid
6410 * @ha: Pointer to host adapter structure.
6411 * @list_nt: list of nt target
6412 * @fw_ddb_entry: firmware ddb entry
6413 *
6414 * This routine update isid if ddbs have same iqn, same isid and
6415 * different IP addr.
6416 * Return QLA_SUCCESS if isid is updated.
6417 **/
6418 static int qla4xxx_update_isid(struct scsi_qla_host *ha,
6419 struct list_head *list_nt,
6420 struct dev_db_entry *fw_ddb_entry)
6421 {
6422 uint8_t base_value, i;
6423
6424 base_value = fw_ddb_entry->isid[1] & 0x1f;
6425 for (i = 0; i < 8; i++) {
6426 fw_ddb_entry->isid[1] = (base_value | (i << 5));
6427 if (qla4xxx_check_existing_isid(list_nt, fw_ddb_entry->isid))
6428 break;
6429 }
6430
6431 if (!qla4xxx_check_existing_isid(list_nt, fw_ddb_entry->isid))
6432 return QLA_ERROR;
6433
6434 return QLA_SUCCESS;
6435 }
6436
6437 /**
6438 * qla4xxx_should_update_isid - check if isid need to update
6439 * @ha: Pointer to host adapter structure.
6440 * @old_tddb: ddb tuple
6441 * @new_tddb: ddb tuple
6442 *
6443 * Return QLA_SUCCESS if different IP, different PORT, same iqn,
6444 * same isid
6445 **/
6446 static int qla4xxx_should_update_isid(struct scsi_qla_host *ha,
6447 struct ql4_tuple_ddb *old_tddb,
6448 struct ql4_tuple_ddb *new_tddb)
6449 {
6450 if (strcmp(old_tddb->ip_addr, new_tddb->ip_addr) == 0) {
6451 /* Same ip */
6452 if (old_tddb->port == new_tddb->port)
6453 return QLA_ERROR;
6454 }
6455
6456 if (strcmp(old_tddb->iscsi_name, new_tddb->iscsi_name))
6457 /* different iqn */
6458 return QLA_ERROR;
6459
6460 if (memcmp(&old_tddb->isid[0], &new_tddb->isid[0],
6461 sizeof(old_tddb->isid)))
6462 /* different isid */
6463 return QLA_ERROR;
6464
6465 return QLA_SUCCESS;
6466 }
6467
6468 /**
6469 * qla4xxx_is_flash_ddb_exists - check if fw_ddb_entry already exists in list_nt
6470 * @ha: Pointer to host adapter structure.
6471 * @list_nt: list of nt target.
6472 * @fw_ddb_entry: firmware ddb entry.
6473 *
6474 * This routine check if fw_ddb_entry already exists in list_nt to avoid
6475 * duplicate ddb in list_nt.
6476 * Return QLA_SUCCESS if duplicate ddb exit in list_nl.
6477 * Note: This function also update isid of DDB if required.
6478 **/
6479
6480 static int qla4xxx_is_flash_ddb_exists(struct scsi_qla_host *ha,
6481 struct list_head *list_nt,
6482 struct dev_db_entry *fw_ddb_entry)
6483 {
6484 struct qla_ddb_index *nt_ddb_idx, *nt_ddb_idx_tmp;
6485 struct ql4_tuple_ddb *fw_tddb = NULL;
6486 struct ql4_tuple_ddb *tmp_tddb = NULL;
6487 int rval, ret = QLA_ERROR;
6488
6489 fw_tddb = vzalloc(sizeof(*fw_tddb));
6490 if (!fw_tddb) {
6491 DEBUG2(ql4_printk(KERN_WARNING, ha,
6492 "Memory Allocation failed.\n"));
6493 ret = QLA_SUCCESS;
6494 goto exit_check;
6495 }
6496
6497 tmp_tddb = vzalloc(sizeof(*tmp_tddb));
6498 if (!tmp_tddb) {
6499 DEBUG2(ql4_printk(KERN_WARNING, ha,
6500 "Memory Allocation failed.\n"));
6501 ret = QLA_SUCCESS;
6502 goto exit_check;
6503 }
6504
6505 qla4xxx_convert_param_ddb(fw_ddb_entry, fw_tddb, NULL);
6506
6507 list_for_each_entry_safe(nt_ddb_idx, nt_ddb_idx_tmp, list_nt, list) {
6508 qla4xxx_convert_param_ddb(&nt_ddb_idx->fw_ddb, tmp_tddb,
6509 nt_ddb_idx->flash_isid);
6510 ret = qla4xxx_compare_tuple_ddb(ha, fw_tddb, tmp_tddb, true);
6511 /* found duplicate ddb */
6512 if (ret == QLA_SUCCESS)
6513 goto exit_check;
6514 }
6515
6516 list_for_each_entry_safe(nt_ddb_idx, nt_ddb_idx_tmp, list_nt, list) {
6517 qla4xxx_convert_param_ddb(&nt_ddb_idx->fw_ddb, tmp_tddb, NULL);
6518
6519 ret = qla4xxx_should_update_isid(ha, tmp_tddb, fw_tddb);
6520 if (ret == QLA_SUCCESS) {
6521 rval = qla4xxx_update_isid(ha, list_nt, fw_ddb_entry);
6522 if (rval == QLA_SUCCESS)
6523 ret = QLA_ERROR;
6524 else
6525 ret = QLA_SUCCESS;
6526
6527 goto exit_check;
6528 }
6529 }
6530
6531 exit_check:
6532 if (fw_tddb)
6533 vfree(fw_tddb);
6534 if (tmp_tddb)
6535 vfree(tmp_tddb);
6536 return ret;
6537 }
6538
6539 static void qla4xxx_free_ddb_list(struct list_head *list_ddb)
6540 {
6541 struct qla_ddb_index *ddb_idx, *ddb_idx_tmp;
6542
6543 list_for_each_entry_safe(ddb_idx, ddb_idx_tmp, list_ddb, list) {
6544 list_del_init(&ddb_idx->list);
6545 vfree(ddb_idx);
6546 }
6547 }
6548
6549 static struct iscsi_endpoint *qla4xxx_get_ep_fwdb(struct scsi_qla_host *ha,
6550 struct dev_db_entry *fw_ddb_entry)
6551 {
6552 struct iscsi_endpoint *ep;
6553 struct sockaddr_in *addr;
6554 struct sockaddr_in6 *addr6;
6555 struct sockaddr *t_addr;
6556 struct sockaddr_storage *dst_addr;
6557 char *ip;
6558
6559 /* TODO: need to destroy on unload iscsi_endpoint*/
6560 dst_addr = vmalloc(sizeof(*dst_addr));
6561 if (!dst_addr)
6562 return NULL;
6563
6564 if (fw_ddb_entry->options & DDB_OPT_IPV6_DEVICE) {
6565 t_addr = (struct sockaddr *)dst_addr;
6566 t_addr->sa_family = AF_INET6;
6567 addr6 = (struct sockaddr_in6 *)dst_addr;
6568 ip = (char *)&addr6->sin6_addr;
6569 memcpy(ip, fw_ddb_entry->ip_addr, IPv6_ADDR_LEN);
6570 addr6->sin6_port = htons(le16_to_cpu(fw_ddb_entry->port));
6571
6572 } else {
6573 t_addr = (struct sockaddr *)dst_addr;
6574 t_addr->sa_family = AF_INET;
6575 addr = (struct sockaddr_in *)dst_addr;
6576 ip = (char *)&addr->sin_addr;
6577 memcpy(ip, fw_ddb_entry->ip_addr, IP_ADDR_LEN);
6578 addr->sin_port = htons(le16_to_cpu(fw_ddb_entry->port));
6579 }
6580
6581 ep = qla4xxx_ep_connect(ha->host, (struct sockaddr *)dst_addr, 0);
6582 vfree(dst_addr);
6583 return ep;
6584 }
6585
6586 static int qla4xxx_verify_boot_idx(struct scsi_qla_host *ha, uint16_t idx)
6587 {
6588 if (ql4xdisablesysfsboot)
6589 return QLA_SUCCESS;
6590 if (idx == ha->pri_ddb_idx || idx == ha->sec_ddb_idx)
6591 return QLA_ERROR;
6592 return QLA_SUCCESS;
6593 }
6594
6595 static void qla4xxx_setup_flash_ddb_entry(struct scsi_qla_host *ha,
6596 struct ddb_entry *ddb_entry,
6597 uint16_t idx)
6598 {
6599 uint16_t def_timeout;
6600
6601 ddb_entry->ddb_type = FLASH_DDB;
6602 ddb_entry->fw_ddb_index = INVALID_ENTRY;
6603 ddb_entry->fw_ddb_device_state = DDB_DS_NO_CONNECTION_ACTIVE;
6604 ddb_entry->ha = ha;
6605 ddb_entry->unblock_sess = qla4xxx_unblock_flash_ddb;
6606 ddb_entry->ddb_change = qla4xxx_flash_ddb_change;
6607 ddb_entry->chap_tbl_idx = INVALID_ENTRY;
6608
6609 atomic_set(&ddb_entry->retry_relogin_timer, INVALID_ENTRY);
6610 atomic_set(&ddb_entry->relogin_timer, 0);
6611 atomic_set(&ddb_entry->relogin_retry_count, 0);
6612 def_timeout = le16_to_cpu(ddb_entry->fw_ddb_entry.def_timeout);
6613 ddb_entry->default_relogin_timeout =
6614 (def_timeout > LOGIN_TOV) && (def_timeout < LOGIN_TOV * 10) ?
6615 def_timeout : LOGIN_TOV;
6616 ddb_entry->default_time2wait =
6617 le16_to_cpu(ddb_entry->fw_ddb_entry.iscsi_def_time2wait);
6618
6619 if (ql4xdisablesysfsboot &&
6620 (idx == ha->pri_ddb_idx || idx == ha->sec_ddb_idx))
6621 set_bit(DF_BOOT_TGT, &ddb_entry->flags);
6622 }
6623
6624 static void qla4xxx_wait_for_ip_configuration(struct scsi_qla_host *ha)
6625 {
6626 uint32_t idx = 0;
6627 uint32_t ip_idx[IP_ADDR_COUNT] = {0, 1, 2, 3}; /* 4 IP interfaces */
6628 uint32_t sts[MBOX_REG_COUNT];
6629 uint32_t ip_state;
6630 unsigned long wtime;
6631 int ret;
6632
6633 wtime = jiffies + (HZ * IP_CONFIG_TOV);
6634 do {
6635 for (idx = 0; idx < IP_ADDR_COUNT; idx++) {
6636 if (ip_idx[idx] == -1)
6637 continue;
6638
6639 ret = qla4xxx_get_ip_state(ha, 0, ip_idx[idx], sts);
6640
6641 if (ret == QLA_ERROR) {
6642 ip_idx[idx] = -1;
6643 continue;
6644 }
6645
6646 ip_state = (sts[1] & IP_STATE_MASK) >> IP_STATE_SHIFT;
6647
6648 DEBUG2(ql4_printk(KERN_INFO, ha,
6649 "Waiting for IP state for idx = %d, state = 0x%x\n",
6650 ip_idx[idx], ip_state));
6651 if (ip_state == IP_ADDRSTATE_UNCONFIGURED ||
6652 ip_state == IP_ADDRSTATE_INVALID ||
6653 ip_state == IP_ADDRSTATE_PREFERRED ||
6654 ip_state == IP_ADDRSTATE_DEPRICATED ||
6655 ip_state == IP_ADDRSTATE_DISABLING)
6656 ip_idx[idx] = -1;
6657 }
6658
6659 /* Break if all IP states checked */
6660 if ((ip_idx[0] == -1) &&
6661 (ip_idx[1] == -1) &&
6662 (ip_idx[2] == -1) &&
6663 (ip_idx[3] == -1))
6664 break;
6665 schedule_timeout_uninterruptible(HZ);
6666 } while (time_after(wtime, jiffies));
6667 }
6668
6669 static int qla4xxx_cmp_fw_stentry(struct dev_db_entry *fw_ddb_entry,
6670 struct dev_db_entry *flash_ddb_entry)
6671 {
6672 uint16_t options = 0;
6673 size_t ip_len = IP_ADDR_LEN;
6674
6675 options = le16_to_cpu(fw_ddb_entry->options);
6676 if (options & DDB_OPT_IPV6_DEVICE)
6677 ip_len = IPv6_ADDR_LEN;
6678
6679 if (memcmp(fw_ddb_entry->ip_addr, flash_ddb_entry->ip_addr, ip_len))
6680 return QLA_ERROR;
6681
6682 if (memcmp(&fw_ddb_entry->isid[0], &flash_ddb_entry->isid[0],
6683 sizeof(fw_ddb_entry->isid)))
6684 return QLA_ERROR;
6685
6686 if (memcmp(&fw_ddb_entry->port, &flash_ddb_entry->port,
6687 sizeof(fw_ddb_entry->port)))
6688 return QLA_ERROR;
6689
6690 return QLA_SUCCESS;
6691 }
6692
6693 static int qla4xxx_find_flash_st_idx(struct scsi_qla_host *ha,
6694 struct dev_db_entry *fw_ddb_entry,
6695 uint32_t fw_idx, uint32_t *flash_index)
6696 {
6697 struct dev_db_entry *flash_ddb_entry;
6698 dma_addr_t flash_ddb_entry_dma;
6699 uint32_t idx = 0;
6700 int max_ddbs;
6701 int ret = QLA_ERROR, status;
6702
6703 max_ddbs = is_qla40XX(ha) ? MAX_DEV_DB_ENTRIES_40XX :
6704 MAX_DEV_DB_ENTRIES;
6705
6706 flash_ddb_entry = dma_pool_alloc(ha->fw_ddb_dma_pool, GFP_KERNEL,
6707 &flash_ddb_entry_dma);
6708 if (flash_ddb_entry == NULL || fw_ddb_entry == NULL) {
6709 ql4_printk(KERN_ERR, ha, "Out of memory\n");
6710 goto exit_find_st_idx;
6711 }
6712
6713 status = qla4xxx_flashdb_by_index(ha, flash_ddb_entry,
6714 flash_ddb_entry_dma, fw_idx);
6715 if (status == QLA_SUCCESS) {
6716 status = qla4xxx_cmp_fw_stentry(fw_ddb_entry, flash_ddb_entry);
6717 if (status == QLA_SUCCESS) {
6718 *flash_index = fw_idx;
6719 ret = QLA_SUCCESS;
6720 goto exit_find_st_idx;
6721 }
6722 }
6723
6724 for (idx = 0; idx < max_ddbs; idx++) {
6725 status = qla4xxx_flashdb_by_index(ha, flash_ddb_entry,
6726 flash_ddb_entry_dma, idx);
6727 if (status == QLA_ERROR)
6728 continue;
6729
6730 status = qla4xxx_cmp_fw_stentry(fw_ddb_entry, flash_ddb_entry);
6731 if (status == QLA_SUCCESS) {
6732 *flash_index = idx;
6733 ret = QLA_SUCCESS;
6734 goto exit_find_st_idx;
6735 }
6736 }
6737
6738 if (idx == max_ddbs)
6739 ql4_printk(KERN_ERR, ha, "Failed to find ST [%d] in flash\n",
6740 fw_idx);
6741
6742 exit_find_st_idx:
6743 if (flash_ddb_entry)
6744 dma_pool_free(ha->fw_ddb_dma_pool, flash_ddb_entry,
6745 flash_ddb_entry_dma);
6746
6747 return ret;
6748 }
6749
6750 static void qla4xxx_build_st_list(struct scsi_qla_host *ha,
6751 struct list_head *list_st)
6752 {
6753 struct qla_ddb_index *st_ddb_idx;
6754 int max_ddbs;
6755 int fw_idx_size;
6756 struct dev_db_entry *fw_ddb_entry;
6757 dma_addr_t fw_ddb_dma;
6758 int ret;
6759 uint32_t idx = 0, next_idx = 0;
6760 uint32_t state = 0, conn_err = 0;
6761 uint32_t flash_index = -1;
6762 uint16_t conn_id = 0;
6763
6764 fw_ddb_entry = dma_pool_alloc(ha->fw_ddb_dma_pool, GFP_KERNEL,
6765 &fw_ddb_dma);
6766 if (fw_ddb_entry == NULL) {
6767 DEBUG2(ql4_printk(KERN_ERR, ha, "Out of memory\n"));
6768 goto exit_st_list;
6769 }
6770
6771 max_ddbs = is_qla40XX(ha) ? MAX_DEV_DB_ENTRIES_40XX :
6772 MAX_DEV_DB_ENTRIES;
6773 fw_idx_size = sizeof(struct qla_ddb_index);
6774
6775 for (idx = 0; idx < max_ddbs; idx = next_idx) {
6776 ret = qla4xxx_get_fwddb_entry(ha, idx, fw_ddb_entry, fw_ddb_dma,
6777 NULL, &next_idx, &state,
6778 &conn_err, NULL, &conn_id);
6779 if (ret == QLA_ERROR)
6780 break;
6781
6782 /* Ignore DDB if invalid state (unassigned) */
6783 if (state == DDB_DS_UNASSIGNED)
6784 goto continue_next_st;
6785
6786 /* Check if ST, add to the list_st */
6787 if (strlen((char *) fw_ddb_entry->iscsi_name) != 0)
6788 goto continue_next_st;
6789
6790 st_ddb_idx = vzalloc(fw_idx_size);
6791 if (!st_ddb_idx)
6792 break;
6793
6794 ret = qla4xxx_find_flash_st_idx(ha, fw_ddb_entry, idx,
6795 &flash_index);
6796 if (ret == QLA_ERROR) {
6797 ql4_printk(KERN_ERR, ha,
6798 "No flash entry for ST at idx [%d]\n", idx);
6799 st_ddb_idx->flash_ddb_idx = idx;
6800 } else {
6801 ql4_printk(KERN_INFO, ha,
6802 "ST at idx [%d] is stored at flash [%d]\n",
6803 idx, flash_index);
6804 st_ddb_idx->flash_ddb_idx = flash_index;
6805 }
6806
6807 st_ddb_idx->fw_ddb_idx = idx;
6808
6809 list_add_tail(&st_ddb_idx->list, list_st);
6810 continue_next_st:
6811 if (next_idx == 0)
6812 break;
6813 }
6814
6815 exit_st_list:
6816 if (fw_ddb_entry)
6817 dma_pool_free(ha->fw_ddb_dma_pool, fw_ddb_entry, fw_ddb_dma);
6818 }
6819
6820 /**
6821 * qla4xxx_remove_failed_ddb - Remove inactive or failed ddb from list
6822 * @ha: pointer to adapter structure
6823 * @list_ddb: List from which failed ddb to be removed
6824 *
6825 * Iterate over the list of DDBs and find and remove DDBs that are either in
6826 * no connection active state or failed state
6827 **/
6828 static void qla4xxx_remove_failed_ddb(struct scsi_qla_host *ha,
6829 struct list_head *list_ddb)
6830 {
6831 struct qla_ddb_index *ddb_idx, *ddb_idx_tmp;
6832 uint32_t next_idx = 0;
6833 uint32_t state = 0, conn_err = 0;
6834 int ret;
6835
6836 list_for_each_entry_safe(ddb_idx, ddb_idx_tmp, list_ddb, list) {
6837 ret = qla4xxx_get_fwddb_entry(ha, ddb_idx->fw_ddb_idx,
6838 NULL, 0, NULL, &next_idx, &state,
6839 &conn_err, NULL, NULL);
6840 if (ret == QLA_ERROR)
6841 continue;
6842
6843 if (state == DDB_DS_NO_CONNECTION_ACTIVE ||
6844 state == DDB_DS_SESSION_FAILED) {
6845 list_del_init(&ddb_idx->list);
6846 vfree(ddb_idx);
6847 }
6848 }
6849 }
6850
6851 static void qla4xxx_update_sess_disc_idx(struct scsi_qla_host *ha,
6852 struct ddb_entry *ddb_entry,
6853 struct dev_db_entry *fw_ddb_entry)
6854 {
6855 struct iscsi_cls_session *cls_sess;
6856 struct iscsi_session *sess;
6857 uint32_t max_ddbs = 0;
6858 uint16_t ddb_link = -1;
6859
6860 max_ddbs = is_qla40XX(ha) ? MAX_DEV_DB_ENTRIES_40XX :
6861 MAX_DEV_DB_ENTRIES;
6862
6863 cls_sess = ddb_entry->sess;
6864 sess = cls_sess->dd_data;
6865
6866 ddb_link = le16_to_cpu(fw_ddb_entry->ddb_link);
6867 if (ddb_link < max_ddbs)
6868 sess->discovery_parent_idx = ddb_link;
6869 else
6870 sess->discovery_parent_idx = DDB_NO_LINK;
6871 }
6872
6873 static int qla4xxx_sess_conn_setup(struct scsi_qla_host *ha,
6874 struct dev_db_entry *fw_ddb_entry,
6875 int is_reset, uint16_t idx)
6876 {
6877 struct iscsi_cls_session *cls_sess;
6878 struct iscsi_session *sess;
6879 struct iscsi_cls_conn *cls_conn;
6880 struct iscsi_endpoint *ep;
6881 uint16_t cmds_max = 32;
6882 uint16_t conn_id = 0;
6883 uint32_t initial_cmdsn = 0;
6884 int ret = QLA_SUCCESS;
6885
6886 struct ddb_entry *ddb_entry = NULL;
6887
6888 /* Create session object, with INVALID_ENTRY,
6889 * the targer_id would get set when we issue the login
6890 */
6891 cls_sess = iscsi_session_setup(&qla4xxx_iscsi_transport, ha->host,
6892 cmds_max, sizeof(struct ddb_entry),
6893 sizeof(struct ql4_task_data),
6894 initial_cmdsn, INVALID_ENTRY);
6895 if (!cls_sess) {
6896 ret = QLA_ERROR;
6897 goto exit_setup;
6898 }
6899
6900 /*
6901 * so calling module_put function to decrement the
6902 * reference count.
6903 **/
6904 module_put(qla4xxx_iscsi_transport.owner);
6905 sess = cls_sess->dd_data;
6906 ddb_entry = sess->dd_data;
6907 ddb_entry->sess = cls_sess;
6908
6909 cls_sess->recovery_tmo = ql4xsess_recovery_tmo;
6910 memcpy(&ddb_entry->fw_ddb_entry, fw_ddb_entry,
6911 sizeof(struct dev_db_entry));
6912
6913 qla4xxx_setup_flash_ddb_entry(ha, ddb_entry, idx);
6914
6915 cls_conn = iscsi_conn_setup(cls_sess, sizeof(struct qla_conn), conn_id);
6916
6917 if (!cls_conn) {
6918 ret = QLA_ERROR;
6919 goto exit_setup;
6920 }
6921
6922 ddb_entry->conn = cls_conn;
6923
6924 /* Setup ep, for displaying attributes in sysfs */
6925 ep = qla4xxx_get_ep_fwdb(ha, fw_ddb_entry);
6926 if (ep) {
6927 ep->conn = cls_conn;
6928 cls_conn->ep = ep;
6929 } else {
6930 DEBUG2(ql4_printk(KERN_ERR, ha, "Unable to get ep\n"));
6931 ret = QLA_ERROR;
6932 goto exit_setup;
6933 }
6934
6935 /* Update sess/conn params */
6936 qla4xxx_copy_fwddb_param(ha, fw_ddb_entry, cls_sess, cls_conn);
6937 qla4xxx_update_sess_disc_idx(ha, ddb_entry, fw_ddb_entry);
6938
6939 if (is_reset == RESET_ADAPTER) {
6940 iscsi_block_session(cls_sess);
6941 /* Use the relogin path to discover new devices
6942 * by short-circuting the logic of setting
6943 * timer to relogin - instead set the flags
6944 * to initiate login right away.
6945 */
6946 set_bit(DPC_RELOGIN_DEVICE, &ha->dpc_flags);
6947 set_bit(DF_RELOGIN, &ddb_entry->flags);
6948 }
6949
6950 exit_setup:
6951 return ret;
6952 }
6953
6954 static void qla4xxx_update_fw_ddb_link(struct scsi_qla_host *ha,
6955 struct list_head *list_ddb,
6956 struct dev_db_entry *fw_ddb_entry)
6957 {
6958 struct qla_ddb_index *ddb_idx, *ddb_idx_tmp;
6959 uint16_t ddb_link;
6960
6961 ddb_link = le16_to_cpu(fw_ddb_entry->ddb_link);
6962
6963 list_for_each_entry_safe(ddb_idx, ddb_idx_tmp, list_ddb, list) {
6964 if (ddb_idx->fw_ddb_idx == ddb_link) {
6965 DEBUG2(ql4_printk(KERN_INFO, ha,
6966 "Updating NT parent idx from [%d] to [%d]\n",
6967 ddb_link, ddb_idx->flash_ddb_idx));
6968 fw_ddb_entry->ddb_link =
6969 cpu_to_le16(ddb_idx->flash_ddb_idx);
6970 return;
6971 }
6972 }
6973 }
6974
6975 static void qla4xxx_build_nt_list(struct scsi_qla_host *ha,
6976 struct list_head *list_nt,
6977 struct list_head *list_st,
6978 int is_reset)
6979 {
6980 struct dev_db_entry *fw_ddb_entry;
6981 struct ddb_entry *ddb_entry = NULL;
6982 dma_addr_t fw_ddb_dma;
6983 int max_ddbs;
6984 int fw_idx_size;
6985 int ret;
6986 uint32_t idx = 0, next_idx = 0;
6987 uint32_t state = 0, conn_err = 0;
6988 uint32_t ddb_idx = -1;
6989 uint16_t conn_id = 0;
6990 uint16_t ddb_link = -1;
6991 struct qla_ddb_index *nt_ddb_idx;
6992
6993 fw_ddb_entry = dma_pool_alloc(ha->fw_ddb_dma_pool, GFP_KERNEL,
6994 &fw_ddb_dma);
6995 if (fw_ddb_entry == NULL) {
6996 DEBUG2(ql4_printk(KERN_ERR, ha, "Out of memory\n"));
6997 goto exit_nt_list;
6998 }
6999 max_ddbs = is_qla40XX(ha) ? MAX_DEV_DB_ENTRIES_40XX :
7000 MAX_DEV_DB_ENTRIES;
7001 fw_idx_size = sizeof(struct qla_ddb_index);
7002
7003 for (idx = 0; idx < max_ddbs; idx = next_idx) {
7004 ret = qla4xxx_get_fwddb_entry(ha, idx, fw_ddb_entry, fw_ddb_dma,
7005 NULL, &next_idx, &state,
7006 &conn_err, NULL, &conn_id);
7007 if (ret == QLA_ERROR)
7008 break;
7009
7010 if (qla4xxx_verify_boot_idx(ha, idx) != QLA_SUCCESS)
7011 goto continue_next_nt;
7012
7013 /* Check if NT, then add to list it */
7014 if (strlen((char *) fw_ddb_entry->iscsi_name) == 0)
7015 goto continue_next_nt;
7016
7017 ddb_link = le16_to_cpu(fw_ddb_entry->ddb_link);
7018 if (ddb_link < max_ddbs)
7019 qla4xxx_update_fw_ddb_link(ha, list_st, fw_ddb_entry);
7020
7021 if (!(state == DDB_DS_NO_CONNECTION_ACTIVE ||
7022 state == DDB_DS_SESSION_FAILED) &&
7023 (is_reset == INIT_ADAPTER))
7024 goto continue_next_nt;
7025
7026 DEBUG2(ql4_printk(KERN_INFO, ha,
7027 "Adding DDB to session = 0x%x\n", idx));
7028
7029 if (is_reset == INIT_ADAPTER) {
7030 nt_ddb_idx = vmalloc(fw_idx_size);
7031 if (!nt_ddb_idx)
7032 break;
7033
7034 nt_ddb_idx->fw_ddb_idx = idx;
7035
7036 /* Copy original isid as it may get updated in function
7037 * qla4xxx_update_isid(). We need original isid in
7038 * function qla4xxx_compare_tuple_ddb to find duplicate
7039 * target */
7040 memcpy(&nt_ddb_idx->flash_isid[0],
7041 &fw_ddb_entry->isid[0],
7042 sizeof(nt_ddb_idx->flash_isid));
7043
7044 ret = qla4xxx_is_flash_ddb_exists(ha, list_nt,
7045 fw_ddb_entry);
7046 if (ret == QLA_SUCCESS) {
7047 /* free nt_ddb_idx and do not add to list_nt */
7048 vfree(nt_ddb_idx);
7049 goto continue_next_nt;
7050 }
7051
7052 /* Copy updated isid */
7053 memcpy(&nt_ddb_idx->fw_ddb, fw_ddb_entry,
7054 sizeof(struct dev_db_entry));
7055
7056 list_add_tail(&nt_ddb_idx->list, list_nt);
7057 } else if (is_reset == RESET_ADAPTER) {
7058 ret = qla4xxx_is_session_exists(ha, fw_ddb_entry,
7059 &ddb_idx);
7060 if (ret == QLA_SUCCESS) {
7061 ddb_entry = qla4xxx_lookup_ddb_by_fw_index(ha,
7062 ddb_idx);
7063 if (ddb_entry != NULL)
7064 qla4xxx_update_sess_disc_idx(ha,
7065 ddb_entry,
7066 fw_ddb_entry);
7067 goto continue_next_nt;
7068 }
7069 }
7070
7071 ret = qla4xxx_sess_conn_setup(ha, fw_ddb_entry, is_reset, idx);
7072 if (ret == QLA_ERROR)
7073 goto exit_nt_list;
7074
7075 continue_next_nt:
7076 if (next_idx == 0)
7077 break;
7078 }
7079
7080 exit_nt_list:
7081 if (fw_ddb_entry)
7082 dma_pool_free(ha->fw_ddb_dma_pool, fw_ddb_entry, fw_ddb_dma);
7083 }
7084
7085 static void qla4xxx_build_new_nt_list(struct scsi_qla_host *ha,
7086 struct list_head *list_nt,
7087 uint16_t target_id)
7088 {
7089 struct dev_db_entry *fw_ddb_entry;
7090 dma_addr_t fw_ddb_dma;
7091 int max_ddbs;
7092 int fw_idx_size;
7093 int ret;
7094 uint32_t idx = 0, next_idx = 0;
7095 uint32_t state = 0, conn_err = 0;
7096 uint16_t conn_id = 0;
7097 struct qla_ddb_index *nt_ddb_idx;
7098
7099 fw_ddb_entry = dma_pool_alloc(ha->fw_ddb_dma_pool, GFP_KERNEL,
7100 &fw_ddb_dma);
7101 if (fw_ddb_entry == NULL) {
7102 DEBUG2(ql4_printk(KERN_ERR, ha, "Out of memory\n"));
7103 goto exit_new_nt_list;
7104 }
7105 max_ddbs = is_qla40XX(ha) ? MAX_DEV_DB_ENTRIES_40XX :
7106 MAX_DEV_DB_ENTRIES;
7107 fw_idx_size = sizeof(struct qla_ddb_index);
7108
7109 for (idx = 0; idx < max_ddbs; idx = next_idx) {
7110 ret = qla4xxx_get_fwddb_entry(ha, idx, fw_ddb_entry, fw_ddb_dma,
7111 NULL, &next_idx, &state,
7112 &conn_err, NULL, &conn_id);
7113 if (ret == QLA_ERROR)
7114 break;
7115
7116 /* Check if NT, then add it to list */
7117 if (strlen((char *)fw_ddb_entry->iscsi_name) == 0)
7118 goto continue_next_new_nt;
7119
7120 if (!(state == DDB_DS_NO_CONNECTION_ACTIVE))
7121 goto continue_next_new_nt;
7122
7123 DEBUG2(ql4_printk(KERN_INFO, ha,
7124 "Adding DDB to session = 0x%x\n", idx));
7125
7126 nt_ddb_idx = vmalloc(fw_idx_size);
7127 if (!nt_ddb_idx)
7128 break;
7129
7130 nt_ddb_idx->fw_ddb_idx = idx;
7131
7132 ret = qla4xxx_is_session_exists(ha, fw_ddb_entry, NULL);
7133 if (ret == QLA_SUCCESS) {
7134 /* free nt_ddb_idx and do not add to list_nt */
7135 vfree(nt_ddb_idx);
7136 goto continue_next_new_nt;
7137 }
7138
7139 if (target_id < max_ddbs)
7140 fw_ddb_entry->ddb_link = cpu_to_le16(target_id);
7141
7142 list_add_tail(&nt_ddb_idx->list, list_nt);
7143
7144 ret = qla4xxx_sess_conn_setup(ha, fw_ddb_entry, RESET_ADAPTER,
7145 idx);
7146 if (ret == QLA_ERROR)
7147 goto exit_new_nt_list;
7148
7149 continue_next_new_nt:
7150 if (next_idx == 0)
7151 break;
7152 }
7153
7154 exit_new_nt_list:
7155 if (fw_ddb_entry)
7156 dma_pool_free(ha->fw_ddb_dma_pool, fw_ddb_entry, fw_ddb_dma);
7157 }
7158
7159 /**
7160 * qla4xxx_sysfs_ddb_is_non_persistent - check for non-persistence of ddb entry
7161 * @dev: dev associated with the sysfs entry
7162 * @data: pointer to flashnode session object
7163 *
7164 * Returns:
7165 * 1: if flashnode entry is non-persistent
7166 * 0: if flashnode entry is persistent
7167 **/
7168 static int qla4xxx_sysfs_ddb_is_non_persistent(struct device *dev, void *data)
7169 {
7170 struct iscsi_bus_flash_session *fnode_sess;
7171
7172 if (!iscsi_flashnode_bus_match(dev, NULL))
7173 return 0;
7174
7175 fnode_sess = iscsi_dev_to_flash_session(dev);
7176
7177 return (fnode_sess->flash_state == DEV_DB_NON_PERSISTENT);
7178 }
7179
7180 /**
7181 * qla4xxx_sysfs_ddb_tgt_create - Create sysfs entry for target
7182 * @ha: pointer to host
7183 * @fw_ddb_entry: flash ddb data
7184 * @idx: target index
7185 * @user: if set then this call is made from userland else from kernel
7186 *
7187 * Returns:
7188 * On sucess: QLA_SUCCESS
7189 * On failure: QLA_ERROR
7190 *
7191 * This create separate sysfs entries for session and connection attributes of
7192 * the given fw ddb entry.
7193 * If this is invoked as a result of a userspace call then the entry is marked
7194 * as nonpersistent using flash_state field.
7195 **/
7196 static int qla4xxx_sysfs_ddb_tgt_create(struct scsi_qla_host *ha,
7197 struct dev_db_entry *fw_ddb_entry,
7198 uint16_t *idx, int user)
7199 {
7200 struct iscsi_bus_flash_session *fnode_sess = NULL;
7201 struct iscsi_bus_flash_conn *fnode_conn = NULL;
7202 int rc = QLA_ERROR;
7203
7204 fnode_sess = iscsi_create_flashnode_sess(ha->host, *idx,
7205 &qla4xxx_iscsi_transport, 0);
7206 if (!fnode_sess) {
7207 ql4_printk(KERN_ERR, ha,
7208 "%s: Unable to create session sysfs entry for flashnode %d of host%lu\n",
7209 __func__, *idx, ha->host_no);
7210 goto exit_tgt_create;
7211 }
7212
7213 fnode_conn = iscsi_create_flashnode_conn(ha->host, fnode_sess,
7214 &qla4xxx_iscsi_transport, 0);
7215 if (!fnode_conn) {
7216 ql4_printk(KERN_ERR, ha,
7217 "%s: Unable to create conn sysfs entry for flashnode %d of host%lu\n",
7218 __func__, *idx, ha->host_no);
7219 goto free_sess;
7220 }
7221
7222 if (user) {
7223 fnode_sess->flash_state = DEV_DB_NON_PERSISTENT;
7224 } else {
7225 fnode_sess->flash_state = DEV_DB_PERSISTENT;
7226
7227 if (*idx == ha->pri_ddb_idx || *idx == ha->sec_ddb_idx)
7228 fnode_sess->is_boot_target = 1;
7229 else
7230 fnode_sess->is_boot_target = 0;
7231 }
7232
7233 rc = qla4xxx_copy_from_fwddb_param(fnode_sess, fnode_conn,
7234 fw_ddb_entry);
7235 if (rc)
7236 goto free_sess;
7237
7238 ql4_printk(KERN_INFO, ha, "%s: sysfs entry %s created\n",
7239 __func__, fnode_sess->dev.kobj.name);
7240
7241 ql4_printk(KERN_INFO, ha, "%s: sysfs entry %s created\n",
7242 __func__, fnode_conn->dev.kobj.name);
7243
7244 return QLA_SUCCESS;
7245
7246 free_sess:
7247 iscsi_destroy_flashnode_sess(fnode_sess);
7248
7249 exit_tgt_create:
7250 return QLA_ERROR;
7251 }
7252
7253 /**
7254 * qla4xxx_sysfs_ddb_add - Add new ddb entry in flash
7255 * @shost: pointer to host
7256 * @buf: type of ddb entry (ipv4/ipv6)
7257 * @len: length of buf
7258 *
7259 * This creates new ddb entry in the flash by finding first free index and
7260 * storing default ddb there. And then create sysfs entry for the new ddb entry.
7261 **/
7262 static int qla4xxx_sysfs_ddb_add(struct Scsi_Host *shost, const char *buf,
7263 int len)
7264 {
7265 struct scsi_qla_host *ha = to_qla_host(shost);
7266 struct dev_db_entry *fw_ddb_entry = NULL;
7267 dma_addr_t fw_ddb_entry_dma;
7268 struct device *dev;
7269 uint16_t idx = 0;
7270 uint16_t max_ddbs = 0;
7271 uint32_t options = 0;
7272 uint32_t rval = QLA_ERROR;
7273
7274 if (strncasecmp(PORTAL_TYPE_IPV4, buf, 4) &&
7275 strncasecmp(PORTAL_TYPE_IPV6, buf, 4)) {
7276 DEBUG2(ql4_printk(KERN_ERR, ha, "%s: Invalid portal type\n",
7277 __func__));
7278 goto exit_ddb_add;
7279 }
7280
7281 max_ddbs = is_qla40XX(ha) ? MAX_PRST_DEV_DB_ENTRIES :
7282 MAX_DEV_DB_ENTRIES;
7283
7284 fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
7285 &fw_ddb_entry_dma, GFP_KERNEL);
7286 if (!fw_ddb_entry) {
7287 DEBUG2(ql4_printk(KERN_ERR, ha,
7288 "%s: Unable to allocate dma buffer\n",
7289 __func__));
7290 goto exit_ddb_add;
7291 }
7292
7293 dev = iscsi_find_flashnode_sess(ha->host, NULL,
7294 qla4xxx_sysfs_ddb_is_non_persistent);
7295 if (dev) {
7296 ql4_printk(KERN_ERR, ha,
7297 "%s: A non-persistent entry %s found\n",
7298 __func__, dev->kobj.name);
7299 put_device(dev);
7300 goto exit_ddb_add;
7301 }
7302
7303 /* Index 0 and 1 are reserved for boot target entries */
7304 for (idx = 2; idx < max_ddbs; idx++) {
7305 if (qla4xxx_flashdb_by_index(ha, fw_ddb_entry,
7306 fw_ddb_entry_dma, idx))
7307 break;
7308 }
7309
7310 if (idx == max_ddbs)
7311 goto exit_ddb_add;
7312
7313 if (!strncasecmp("ipv6", buf, 4))
7314 options |= IPV6_DEFAULT_DDB_ENTRY;
7315
7316 rval = qla4xxx_get_default_ddb(ha, options, fw_ddb_entry_dma);
7317 if (rval == QLA_ERROR)
7318 goto exit_ddb_add;
7319
7320 rval = qla4xxx_sysfs_ddb_tgt_create(ha, fw_ddb_entry, &idx, 1);
7321
7322 exit_ddb_add:
7323 if (fw_ddb_entry)
7324 dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
7325 fw_ddb_entry, fw_ddb_entry_dma);
7326 if (rval == QLA_SUCCESS)
7327 return idx;
7328 else
7329 return -EIO;
7330 }
7331
7332 /**
7333 * qla4xxx_sysfs_ddb_apply - write the target ddb contents to Flash
7334 * @fnode_sess: pointer to session attrs of flash ddb entry
7335 * @fnode_conn: pointer to connection attrs of flash ddb entry
7336 *
7337 * This writes the contents of target ddb buffer to Flash with a valid cookie
7338 * value in order to make the ddb entry persistent.
7339 **/
7340 static int qla4xxx_sysfs_ddb_apply(struct iscsi_bus_flash_session *fnode_sess,
7341 struct iscsi_bus_flash_conn *fnode_conn)
7342 {
7343 struct Scsi_Host *shost = iscsi_flash_session_to_shost(fnode_sess);
7344 struct scsi_qla_host *ha = to_qla_host(shost);
7345 uint32_t dev_db_start_offset = FLASH_OFFSET_DB_INFO;
7346 struct dev_db_entry *fw_ddb_entry = NULL;
7347 dma_addr_t fw_ddb_entry_dma;
7348 uint32_t options = 0;
7349 int rval = 0;
7350
7351 fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
7352 &fw_ddb_entry_dma, GFP_KERNEL);
7353 if (!fw_ddb_entry) {
7354 DEBUG2(ql4_printk(KERN_ERR, ha,
7355 "%s: Unable to allocate dma buffer\n",
7356 __func__));
7357 rval = -ENOMEM;
7358 goto exit_ddb_apply;
7359 }
7360
7361 if (!strncasecmp(fnode_sess->portal_type, PORTAL_TYPE_IPV6, 4))
7362 options |= IPV6_DEFAULT_DDB_ENTRY;
7363
7364 rval = qla4xxx_get_default_ddb(ha, options, fw_ddb_entry_dma);
7365 if (rval == QLA_ERROR)
7366 goto exit_ddb_apply;
7367
7368 dev_db_start_offset += (fnode_sess->target_id *
7369 sizeof(*fw_ddb_entry));
7370
7371 qla4xxx_copy_to_fwddb_param(fnode_sess, fnode_conn, fw_ddb_entry);
7372 fw_ddb_entry->cookie = DDB_VALID_COOKIE;
7373
7374 rval = qla4xxx_set_flash(ha, fw_ddb_entry_dma, dev_db_start_offset,
7375 sizeof(*fw_ddb_entry), FLASH_OPT_RMW_COMMIT);
7376
7377 if (rval == QLA_SUCCESS) {
7378 fnode_sess->flash_state = DEV_DB_PERSISTENT;
7379 ql4_printk(KERN_INFO, ha,
7380 "%s: flash node %u of host %lu written to flash\n",
7381 __func__, fnode_sess->target_id, ha->host_no);
7382 } else {
7383 rval = -EIO;
7384 ql4_printk(KERN_ERR, ha,
7385 "%s: Error while writing flash node %u of host %lu to flash\n",
7386 __func__, fnode_sess->target_id, ha->host_no);
7387 }
7388
7389 exit_ddb_apply:
7390 if (fw_ddb_entry)
7391 dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
7392 fw_ddb_entry, fw_ddb_entry_dma);
7393 return rval;
7394 }
7395
7396 static ssize_t qla4xxx_sysfs_ddb_conn_open(struct scsi_qla_host *ha,
7397 struct dev_db_entry *fw_ddb_entry,
7398 uint16_t idx)
7399 {
7400 struct dev_db_entry *ddb_entry = NULL;
7401 dma_addr_t ddb_entry_dma;
7402 unsigned long wtime;
7403 uint32_t mbx_sts = 0;
7404 uint32_t state = 0, conn_err = 0;
7405 uint16_t tmo = 0;
7406 int ret = 0;
7407
7408 ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*ddb_entry),
7409 &ddb_entry_dma, GFP_KERNEL);
7410 if (!ddb_entry) {
7411 DEBUG2(ql4_printk(KERN_ERR, ha,
7412 "%s: Unable to allocate dma buffer\n",
7413 __func__));
7414 return QLA_ERROR;
7415 }
7416
7417 memcpy(ddb_entry, fw_ddb_entry, sizeof(*ddb_entry));
7418
7419 ret = qla4xxx_set_ddb_entry(ha, idx, ddb_entry_dma, &mbx_sts);
7420 if (ret != QLA_SUCCESS) {
7421 DEBUG2(ql4_printk(KERN_ERR, ha,
7422 "%s: Unable to set ddb entry for index %d\n",
7423 __func__, idx));
7424 goto exit_ddb_conn_open;
7425 }
7426
7427 qla4xxx_conn_open(ha, idx);
7428
7429 /* To ensure that sendtargets is done, wait for at least 12 secs */
7430 tmo = ((ha->def_timeout > LOGIN_TOV) &&
7431 (ha->def_timeout < LOGIN_TOV * 10) ?
7432 ha->def_timeout : LOGIN_TOV);
7433
7434 DEBUG2(ql4_printk(KERN_INFO, ha,
7435 "Default time to wait for login to ddb %d\n", tmo));
7436
7437 wtime = jiffies + (HZ * tmo);
7438 do {
7439 ret = qla4xxx_get_fwddb_entry(ha, idx, NULL, 0, NULL,
7440 NULL, &state, &conn_err, NULL,
7441 NULL);
7442 if (ret == QLA_ERROR)
7443 continue;
7444
7445 if (state == DDB_DS_NO_CONNECTION_ACTIVE ||
7446 state == DDB_DS_SESSION_FAILED)
7447 break;
7448
7449 schedule_timeout_uninterruptible(HZ / 10);
7450 } while (time_after(wtime, jiffies));
7451
7452 exit_ddb_conn_open:
7453 if (ddb_entry)
7454 dma_free_coherent(&ha->pdev->dev, sizeof(*ddb_entry),
7455 ddb_entry, ddb_entry_dma);
7456 return ret;
7457 }
7458
7459 static int qla4xxx_ddb_login_st(struct scsi_qla_host *ha,
7460 struct dev_db_entry *fw_ddb_entry,
7461 uint16_t target_id)
7462 {
7463 struct qla_ddb_index *ddb_idx, *ddb_idx_tmp;
7464 struct list_head list_nt;
7465 uint16_t ddb_index;
7466 int ret = 0;
7467
7468 if (test_bit(AF_ST_DISCOVERY_IN_PROGRESS, &ha->flags)) {
7469 ql4_printk(KERN_WARNING, ha,
7470 "%s: A discovery already in progress!\n", __func__);
7471 return QLA_ERROR;
7472 }
7473
7474 INIT_LIST_HEAD(&list_nt);
7475
7476 set_bit(AF_ST_DISCOVERY_IN_PROGRESS, &ha->flags);
7477
7478 ret = qla4xxx_get_ddb_index(ha, &ddb_index);
7479 if (ret == QLA_ERROR)
7480 goto exit_login_st_clr_bit;
7481
7482 ret = qla4xxx_sysfs_ddb_conn_open(ha, fw_ddb_entry, ddb_index);
7483 if (ret == QLA_ERROR)
7484 goto exit_login_st;
7485
7486 qla4xxx_build_new_nt_list(ha, &list_nt, target_id);
7487
7488 list_for_each_entry_safe(ddb_idx, ddb_idx_tmp, &list_nt, list) {
7489 list_del_init(&ddb_idx->list);
7490 qla4xxx_clear_ddb_entry(ha, ddb_idx->fw_ddb_idx);
7491 vfree(ddb_idx);
7492 }
7493
7494 exit_login_st:
7495 if (qla4xxx_clear_ddb_entry(ha, ddb_index) == QLA_ERROR) {
7496 ql4_printk(KERN_ERR, ha,
7497 "Unable to clear DDB index = 0x%x\n", ddb_index);
7498 }
7499
7500 clear_bit(ddb_index, ha->ddb_idx_map);
7501
7502 exit_login_st_clr_bit:
7503 clear_bit(AF_ST_DISCOVERY_IN_PROGRESS, &ha->flags);
7504 return ret;
7505 }
7506
7507 static int qla4xxx_ddb_login_nt(struct scsi_qla_host *ha,
7508 struct dev_db_entry *fw_ddb_entry,
7509 uint16_t idx)
7510 {
7511 int ret = QLA_ERROR;
7512
7513 ret = qla4xxx_is_session_exists(ha, fw_ddb_entry, NULL);
7514 if (ret != QLA_SUCCESS)
7515 ret = qla4xxx_sess_conn_setup(ha, fw_ddb_entry, RESET_ADAPTER,
7516 idx);
7517 else
7518 ret = -EPERM;
7519
7520 return ret;
7521 }
7522
7523 /**
7524 * qla4xxx_sysfs_ddb_login - Login to the specified target
7525 * @fnode_sess: pointer to session attrs of flash ddb entry
7526 * @fnode_conn: pointer to connection attrs of flash ddb entry
7527 *
7528 * This logs in to the specified target
7529 **/
7530 static int qla4xxx_sysfs_ddb_login(struct iscsi_bus_flash_session *fnode_sess,
7531 struct iscsi_bus_flash_conn *fnode_conn)
7532 {
7533 struct Scsi_Host *shost = iscsi_flash_session_to_shost(fnode_sess);
7534 struct scsi_qla_host *ha = to_qla_host(shost);
7535 struct dev_db_entry *fw_ddb_entry = NULL;
7536 dma_addr_t fw_ddb_entry_dma;
7537 uint32_t options = 0;
7538 int ret = 0;
7539
7540 if (fnode_sess->flash_state == DEV_DB_NON_PERSISTENT) {
7541 ql4_printk(KERN_ERR, ha,
7542 "%s: Target info is not persistent\n", __func__);
7543 ret = -EIO;
7544 goto exit_ddb_login;
7545 }
7546
7547 fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
7548 &fw_ddb_entry_dma, GFP_KERNEL);
7549 if (!fw_ddb_entry) {
7550 DEBUG2(ql4_printk(KERN_ERR, ha,
7551 "%s: Unable to allocate dma buffer\n",
7552 __func__));
7553 ret = -ENOMEM;
7554 goto exit_ddb_login;
7555 }
7556
7557 if (!strncasecmp(fnode_sess->portal_type, PORTAL_TYPE_IPV6, 4))
7558 options |= IPV6_DEFAULT_DDB_ENTRY;
7559
7560 ret = qla4xxx_get_default_ddb(ha, options, fw_ddb_entry_dma);
7561 if (ret == QLA_ERROR)
7562 goto exit_ddb_login;
7563
7564 qla4xxx_copy_to_fwddb_param(fnode_sess, fnode_conn, fw_ddb_entry);
7565 fw_ddb_entry->cookie = DDB_VALID_COOKIE;
7566
7567 if (strlen((char *)fw_ddb_entry->iscsi_name) == 0)
7568 ret = qla4xxx_ddb_login_st(ha, fw_ddb_entry,
7569 fnode_sess->target_id);
7570 else
7571 ret = qla4xxx_ddb_login_nt(ha, fw_ddb_entry,
7572 fnode_sess->target_id);
7573
7574 if (ret > 0)
7575 ret = -EIO;
7576
7577 exit_ddb_login:
7578 if (fw_ddb_entry)
7579 dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
7580 fw_ddb_entry, fw_ddb_entry_dma);
7581 return ret;
7582 }
7583
7584 /**
7585 * qla4xxx_sysfs_ddb_logout_sid - Logout session for the specified target
7586 * @cls_sess: pointer to session to be logged out
7587 *
7588 * This performs session log out from the specified target
7589 **/
7590 static int qla4xxx_sysfs_ddb_logout_sid(struct iscsi_cls_session *cls_sess)
7591 {
7592 struct iscsi_session *sess;
7593 struct ddb_entry *ddb_entry = NULL;
7594 struct scsi_qla_host *ha;
7595 struct dev_db_entry *fw_ddb_entry = NULL;
7596 dma_addr_t fw_ddb_entry_dma;
7597 unsigned long flags;
7598 unsigned long wtime;
7599 uint32_t ddb_state;
7600 int options;
7601 int ret = 0;
7602
7603 sess = cls_sess->dd_data;
7604 ddb_entry = sess->dd_data;
7605 ha = ddb_entry->ha;
7606
7607 if (ddb_entry->ddb_type != FLASH_DDB) {
7608 ql4_printk(KERN_ERR, ha, "%s: Not a flash node session\n",
7609 __func__);
7610 ret = -ENXIO;
7611 goto exit_ddb_logout;
7612 }
7613
7614 if (test_bit(DF_BOOT_TGT, &ddb_entry->flags)) {
7615 ql4_printk(KERN_ERR, ha,
7616 "%s: Logout from boot target entry is not permitted.\n",
7617 __func__);
7618 ret = -EPERM;
7619 goto exit_ddb_logout;
7620 }
7621
7622 fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
7623 &fw_ddb_entry_dma, GFP_KERNEL);
7624 if (!fw_ddb_entry) {
7625 ql4_printk(KERN_ERR, ha,
7626 "%s: Unable to allocate dma buffer\n", __func__);
7627 ret = -ENOMEM;
7628 goto exit_ddb_logout;
7629 }
7630
7631 if (test_and_set_bit(DF_DISABLE_RELOGIN, &ddb_entry->flags))
7632 goto ddb_logout_init;
7633
7634 ret = qla4xxx_get_fwddb_entry(ha, ddb_entry->fw_ddb_index,
7635 fw_ddb_entry, fw_ddb_entry_dma,
7636 NULL, NULL, &ddb_state, NULL,
7637 NULL, NULL);
7638 if (ret == QLA_ERROR)
7639 goto ddb_logout_init;
7640
7641 if (ddb_state == DDB_DS_SESSION_ACTIVE)
7642 goto ddb_logout_init;
7643
7644 /* wait until next relogin is triggered using DF_RELOGIN and
7645 * clear DF_RELOGIN to avoid invocation of further relogin
7646 */
7647 wtime = jiffies + (HZ * RELOGIN_TOV);
7648 do {
7649 if (test_and_clear_bit(DF_RELOGIN, &ddb_entry->flags))
7650 goto ddb_logout_init;
7651
7652 schedule_timeout_uninterruptible(HZ);
7653 } while ((time_after(wtime, jiffies)));
7654
7655 ddb_logout_init:
7656 atomic_set(&ddb_entry->retry_relogin_timer, INVALID_ENTRY);
7657 atomic_set(&ddb_entry->relogin_timer, 0);
7658
7659 options = LOGOUT_OPTION_CLOSE_SESSION;
7660 qla4xxx_session_logout_ddb(ha, ddb_entry, options);
7661
7662 memset(fw_ddb_entry, 0, sizeof(*fw_ddb_entry));
7663 wtime = jiffies + (HZ * LOGOUT_TOV);
7664 do {
7665 ret = qla4xxx_get_fwddb_entry(ha, ddb_entry->fw_ddb_index,
7666 fw_ddb_entry, fw_ddb_entry_dma,
7667 NULL, NULL, &ddb_state, NULL,
7668 NULL, NULL);
7669 if (ret == QLA_ERROR)
7670 goto ddb_logout_clr_sess;
7671
7672 if ((ddb_state == DDB_DS_NO_CONNECTION_ACTIVE) ||
7673 (ddb_state == DDB_DS_SESSION_FAILED))
7674 goto ddb_logout_clr_sess;
7675
7676 schedule_timeout_uninterruptible(HZ);
7677 } while ((time_after(wtime, jiffies)));
7678
7679 ddb_logout_clr_sess:
7680 qla4xxx_clear_ddb_entry(ha, ddb_entry->fw_ddb_index);
7681 /*
7682 * we have decremented the reference count of the driver
7683 * when we setup the session to have the driver unload
7684 * to be seamless without actually destroying the
7685 * session
7686 **/
7687 try_module_get(qla4xxx_iscsi_transport.owner);
7688 iscsi_destroy_endpoint(ddb_entry->conn->ep);
7689
7690 spin_lock_irqsave(&ha->hardware_lock, flags);
7691 qla4xxx_free_ddb(ha, ddb_entry);
7692 clear_bit(ddb_entry->fw_ddb_index, ha->ddb_idx_map);
7693 spin_unlock_irqrestore(&ha->hardware_lock, flags);
7694
7695 iscsi_session_teardown(ddb_entry->sess);
7696
7697 clear_bit(DF_DISABLE_RELOGIN, &ddb_entry->flags);
7698 ret = QLA_SUCCESS;
7699
7700 exit_ddb_logout:
7701 if (fw_ddb_entry)
7702 dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
7703 fw_ddb_entry, fw_ddb_entry_dma);
7704 return ret;
7705 }
7706
7707 /**
7708 * qla4xxx_sysfs_ddb_logout - Logout from the specified target
7709 * @fnode_sess: pointer to session attrs of flash ddb entry
7710 * @fnode_conn: pointer to connection attrs of flash ddb entry
7711 *
7712 * This performs log out from the specified target
7713 **/
7714 static int qla4xxx_sysfs_ddb_logout(struct iscsi_bus_flash_session *fnode_sess,
7715 struct iscsi_bus_flash_conn *fnode_conn)
7716 {
7717 struct Scsi_Host *shost = iscsi_flash_session_to_shost(fnode_sess);
7718 struct scsi_qla_host *ha = to_qla_host(shost);
7719 struct ql4_tuple_ddb *flash_tddb = NULL;
7720 struct ql4_tuple_ddb *tmp_tddb = NULL;
7721 struct dev_db_entry *fw_ddb_entry = NULL;
7722 struct ddb_entry *ddb_entry = NULL;
7723 dma_addr_t fw_ddb_dma;
7724 uint32_t next_idx = 0;
7725 uint32_t state = 0, conn_err = 0;
7726 uint16_t conn_id = 0;
7727 int idx, index;
7728 int status, ret = 0;
7729
7730 fw_ddb_entry = dma_pool_alloc(ha->fw_ddb_dma_pool, GFP_KERNEL,
7731 &fw_ddb_dma);
7732 if (fw_ddb_entry == NULL) {
7733 ql4_printk(KERN_ERR, ha, "%s:Out of memory\n", __func__);
7734 ret = -ENOMEM;
7735 goto exit_ddb_logout;
7736 }
7737
7738 flash_tddb = vzalloc(sizeof(*flash_tddb));
7739 if (!flash_tddb) {
7740 ql4_printk(KERN_WARNING, ha,
7741 "%s:Memory Allocation failed.\n", __func__);
7742 ret = -ENOMEM;
7743 goto exit_ddb_logout;
7744 }
7745
7746 tmp_tddb = vzalloc(sizeof(*tmp_tddb));
7747 if (!tmp_tddb) {
7748 ql4_printk(KERN_WARNING, ha,
7749 "%s:Memory Allocation failed.\n", __func__);
7750 ret = -ENOMEM;
7751 goto exit_ddb_logout;
7752 }
7753
7754 if (!fnode_sess->targetname) {
7755 ql4_printk(KERN_ERR, ha,
7756 "%s:Cannot logout from SendTarget entry\n",
7757 __func__);
7758 ret = -EPERM;
7759 goto exit_ddb_logout;
7760 }
7761
7762 if (fnode_sess->is_boot_target) {
7763 ql4_printk(KERN_ERR, ha,
7764 "%s: Logout from boot target entry is not permitted.\n",
7765 __func__);
7766 ret = -EPERM;
7767 goto exit_ddb_logout;
7768 }
7769
7770 strlcpy(flash_tddb->iscsi_name, fnode_sess->targetname,
7771 ISCSI_NAME_SIZE);
7772
7773 if (!strncmp(fnode_sess->portal_type, PORTAL_TYPE_IPV6, 4))
7774 sprintf(flash_tddb->ip_addr, "%pI6", fnode_conn->ipaddress);
7775 else
7776 sprintf(flash_tddb->ip_addr, "%pI4", fnode_conn->ipaddress);
7777
7778 flash_tddb->tpgt = fnode_sess->tpgt;
7779 flash_tddb->port = fnode_conn->port;
7780
7781 COPY_ISID(flash_tddb->isid, fnode_sess->isid);
7782
7783 for (idx = 0; idx < MAX_DDB_ENTRIES; idx++) {
7784 ddb_entry = qla4xxx_lookup_ddb_by_fw_index(ha, idx);
7785 if (ddb_entry == NULL)
7786 continue;
7787
7788 if (ddb_entry->ddb_type != FLASH_DDB)
7789 continue;
7790
7791 index = ddb_entry->sess->target_id;
7792 status = qla4xxx_get_fwddb_entry(ha, index, fw_ddb_entry,
7793 fw_ddb_dma, NULL, &next_idx,
7794 &state, &conn_err, NULL,
7795 &conn_id);
7796 if (status == QLA_ERROR) {
7797 ret = -ENOMEM;
7798 break;
7799 }
7800
7801 qla4xxx_convert_param_ddb(fw_ddb_entry, tmp_tddb, NULL);
7802
7803 status = qla4xxx_compare_tuple_ddb(ha, flash_tddb, tmp_tddb,
7804 true);
7805 if (status == QLA_SUCCESS) {
7806 ret = qla4xxx_sysfs_ddb_logout_sid(ddb_entry->sess);
7807 break;
7808 }
7809 }
7810
7811 if (idx == MAX_DDB_ENTRIES)
7812 ret = -ESRCH;
7813
7814 exit_ddb_logout:
7815 if (flash_tddb)
7816 vfree(flash_tddb);
7817 if (tmp_tddb)
7818 vfree(tmp_tddb);
7819 if (fw_ddb_entry)
7820 dma_pool_free(ha->fw_ddb_dma_pool, fw_ddb_entry, fw_ddb_dma);
7821
7822 return ret;
7823 }
7824
7825 static int
7826 qla4xxx_sysfs_ddb_get_param(struct iscsi_bus_flash_session *fnode_sess,
7827 int param, char *buf)
7828 {
7829 struct Scsi_Host *shost = iscsi_flash_session_to_shost(fnode_sess);
7830 struct scsi_qla_host *ha = to_qla_host(shost);
7831 struct iscsi_bus_flash_conn *fnode_conn;
7832 struct ql4_chap_table chap_tbl;
7833 struct device *dev;
7834 int parent_type;
7835 int rc = 0;
7836
7837 dev = iscsi_find_flashnode_conn(fnode_sess);
7838 if (!dev)
7839 return -EIO;
7840
7841 fnode_conn = iscsi_dev_to_flash_conn(dev);
7842
7843 switch (param) {
7844 case ISCSI_FLASHNODE_IS_FW_ASSIGNED_IPV6:
7845 rc = sprintf(buf, "%u\n", fnode_conn->is_fw_assigned_ipv6);
7846 break;
7847 case ISCSI_FLASHNODE_PORTAL_TYPE:
7848 rc = sprintf(buf, "%s\n", fnode_sess->portal_type);
7849 break;
7850 case ISCSI_FLASHNODE_AUTO_SND_TGT_DISABLE:
7851 rc = sprintf(buf, "%u\n", fnode_sess->auto_snd_tgt_disable);
7852 break;
7853 case ISCSI_FLASHNODE_DISCOVERY_SESS:
7854 rc = sprintf(buf, "%u\n", fnode_sess->discovery_sess);
7855 break;
7856 case ISCSI_FLASHNODE_ENTRY_EN:
7857 rc = sprintf(buf, "%u\n", fnode_sess->entry_state);
7858 break;
7859 case ISCSI_FLASHNODE_HDR_DGST_EN:
7860 rc = sprintf(buf, "%u\n", fnode_conn->hdrdgst_en);
7861 break;
7862 case ISCSI_FLASHNODE_DATA_DGST_EN:
7863 rc = sprintf(buf, "%u\n", fnode_conn->datadgst_en);
7864 break;
7865 case ISCSI_FLASHNODE_IMM_DATA_EN:
7866 rc = sprintf(buf, "%u\n", fnode_sess->imm_data_en);
7867 break;
7868 case ISCSI_FLASHNODE_INITIAL_R2T_EN:
7869 rc = sprintf(buf, "%u\n", fnode_sess->initial_r2t_en);
7870 break;
7871 case ISCSI_FLASHNODE_DATASEQ_INORDER:
7872 rc = sprintf(buf, "%u\n", fnode_sess->dataseq_inorder_en);
7873 break;
7874 case ISCSI_FLASHNODE_PDU_INORDER:
7875 rc = sprintf(buf, "%u\n", fnode_sess->pdu_inorder_en);
7876 break;
7877 case ISCSI_FLASHNODE_CHAP_AUTH_EN:
7878 rc = sprintf(buf, "%u\n", fnode_sess->chap_auth_en);
7879 break;
7880 case ISCSI_FLASHNODE_SNACK_REQ_EN:
7881 rc = sprintf(buf, "%u\n", fnode_conn->snack_req_en);
7882 break;
7883 case ISCSI_FLASHNODE_DISCOVERY_LOGOUT_EN:
7884 rc = sprintf(buf, "%u\n", fnode_sess->discovery_logout_en);
7885 break;
7886 case ISCSI_FLASHNODE_BIDI_CHAP_EN:
7887 rc = sprintf(buf, "%u\n", fnode_sess->bidi_chap_en);
7888 break;
7889 case ISCSI_FLASHNODE_DISCOVERY_AUTH_OPTIONAL:
7890 rc = sprintf(buf, "%u\n", fnode_sess->discovery_auth_optional);
7891 break;
7892 case ISCSI_FLASHNODE_ERL:
7893 rc = sprintf(buf, "%u\n", fnode_sess->erl);
7894 break;
7895 case ISCSI_FLASHNODE_TCP_TIMESTAMP_STAT:
7896 rc = sprintf(buf, "%u\n", fnode_conn->tcp_timestamp_stat);
7897 break;
7898 case ISCSI_FLASHNODE_TCP_NAGLE_DISABLE:
7899 rc = sprintf(buf, "%u\n", fnode_conn->tcp_nagle_disable);
7900 break;
7901 case ISCSI_FLASHNODE_TCP_WSF_DISABLE:
7902 rc = sprintf(buf, "%u\n", fnode_conn->tcp_wsf_disable);
7903 break;
7904 case ISCSI_FLASHNODE_TCP_TIMER_SCALE:
7905 rc = sprintf(buf, "%u\n", fnode_conn->tcp_timer_scale);
7906 break;
7907 case ISCSI_FLASHNODE_TCP_TIMESTAMP_EN:
7908 rc = sprintf(buf, "%u\n", fnode_conn->tcp_timestamp_en);
7909 break;
7910 case ISCSI_FLASHNODE_IP_FRAG_DISABLE:
7911 rc = sprintf(buf, "%u\n", fnode_conn->fragment_disable);
7912 break;
7913 case ISCSI_FLASHNODE_MAX_RECV_DLENGTH:
7914 rc = sprintf(buf, "%u\n", fnode_conn->max_recv_dlength);
7915 break;
7916 case ISCSI_FLASHNODE_MAX_XMIT_DLENGTH:
7917 rc = sprintf(buf, "%u\n", fnode_conn->max_xmit_dlength);
7918 break;
7919 case ISCSI_FLASHNODE_FIRST_BURST:
7920 rc = sprintf(buf, "%u\n", fnode_sess->first_burst);
7921 break;
7922 case ISCSI_FLASHNODE_DEF_TIME2WAIT:
7923 rc = sprintf(buf, "%u\n", fnode_sess->time2wait);
7924 break;
7925 case ISCSI_FLASHNODE_DEF_TIME2RETAIN:
7926 rc = sprintf(buf, "%u\n", fnode_sess->time2retain);
7927 break;
7928 case ISCSI_FLASHNODE_MAX_R2T:
7929 rc = sprintf(buf, "%u\n", fnode_sess->max_r2t);
7930 break;
7931 case ISCSI_FLASHNODE_KEEPALIVE_TMO:
7932 rc = sprintf(buf, "%u\n", fnode_conn->keepalive_timeout);
7933 break;
7934 case ISCSI_FLASHNODE_ISID:
7935 rc = sprintf(buf, "%pm\n", fnode_sess->isid);
7936 break;
7937 case ISCSI_FLASHNODE_TSID:
7938 rc = sprintf(buf, "%u\n", fnode_sess->tsid);
7939 break;
7940 case ISCSI_FLASHNODE_PORT:
7941 rc = sprintf(buf, "%d\n", fnode_conn->port);
7942 break;
7943 case ISCSI_FLASHNODE_MAX_BURST:
7944 rc = sprintf(buf, "%u\n", fnode_sess->max_burst);
7945 break;
7946 case ISCSI_FLASHNODE_DEF_TASKMGMT_TMO:
7947 rc = sprintf(buf, "%u\n",
7948 fnode_sess->default_taskmgmt_timeout);
7949 break;
7950 case ISCSI_FLASHNODE_IPADDR:
7951 if (!strncmp(fnode_sess->portal_type, PORTAL_TYPE_IPV6, 4))
7952 rc = sprintf(buf, "%pI6\n", fnode_conn->ipaddress);
7953 else
7954 rc = sprintf(buf, "%pI4\n", fnode_conn->ipaddress);
7955 break;
7956 case ISCSI_FLASHNODE_ALIAS:
7957 if (fnode_sess->targetalias)
7958 rc = sprintf(buf, "%s\n", fnode_sess->targetalias);
7959 else
7960 rc = sprintf(buf, "\n");
7961 break;
7962 case ISCSI_FLASHNODE_REDIRECT_IPADDR:
7963 if (!strncmp(fnode_sess->portal_type, PORTAL_TYPE_IPV6, 4))
7964 rc = sprintf(buf, "%pI6\n",
7965 fnode_conn->redirect_ipaddr);
7966 else
7967 rc = sprintf(buf, "%pI4\n",
7968 fnode_conn->redirect_ipaddr);
7969 break;
7970 case ISCSI_FLASHNODE_MAX_SEGMENT_SIZE:
7971 rc = sprintf(buf, "%u\n", fnode_conn->max_segment_size);
7972 break;
7973 case ISCSI_FLASHNODE_LOCAL_PORT:
7974 rc = sprintf(buf, "%u\n", fnode_conn->local_port);
7975 break;
7976 case ISCSI_FLASHNODE_IPV4_TOS:
7977 rc = sprintf(buf, "%u\n", fnode_conn->ipv4_tos);
7978 break;
7979 case ISCSI_FLASHNODE_IPV6_TC:
7980 if (!strncmp(fnode_sess->portal_type, PORTAL_TYPE_IPV6, 4))
7981 rc = sprintf(buf, "%u\n",
7982 fnode_conn->ipv6_traffic_class);
7983 else
7984 rc = sprintf(buf, "\n");
7985 break;
7986 case ISCSI_FLASHNODE_IPV6_FLOW_LABEL:
7987 rc = sprintf(buf, "%u\n", fnode_conn->ipv6_flow_label);
7988 break;
7989 case ISCSI_FLASHNODE_LINK_LOCAL_IPV6:
7990 if (!strncmp(fnode_sess->portal_type, PORTAL_TYPE_IPV6, 4))
7991 rc = sprintf(buf, "%pI6\n",
7992 fnode_conn->link_local_ipv6_addr);
7993 else
7994 rc = sprintf(buf, "\n");
7995 break;
7996 case ISCSI_FLASHNODE_DISCOVERY_PARENT_IDX:
7997 rc = sprintf(buf, "%u\n", fnode_sess->discovery_parent_idx);
7998 break;
7999 case ISCSI_FLASHNODE_DISCOVERY_PARENT_TYPE:
8000 if (fnode_sess->discovery_parent_type == DDB_ISNS)
8001 parent_type = ISCSI_DISC_PARENT_ISNS;
8002 else if (fnode_sess->discovery_parent_type == DDB_NO_LINK)
8003 parent_type = ISCSI_DISC_PARENT_UNKNOWN;
8004 else if (fnode_sess->discovery_parent_type < MAX_DDB_ENTRIES)
8005 parent_type = ISCSI_DISC_PARENT_SENDTGT;
8006 else
8007 parent_type = ISCSI_DISC_PARENT_UNKNOWN;
8008
8009 rc = sprintf(buf, "%s\n",
8010 iscsi_get_discovery_parent_name(parent_type));
8011 break;
8012 case ISCSI_FLASHNODE_NAME:
8013 if (fnode_sess->targetname)
8014 rc = sprintf(buf, "%s\n", fnode_sess->targetname);
8015 else
8016 rc = sprintf(buf, "\n");
8017 break;
8018 case ISCSI_FLASHNODE_TPGT:
8019 rc = sprintf(buf, "%u\n", fnode_sess->tpgt);
8020 break;
8021 case ISCSI_FLASHNODE_TCP_XMIT_WSF:
8022 rc = sprintf(buf, "%u\n", fnode_conn->tcp_xmit_wsf);
8023 break;
8024 case ISCSI_FLASHNODE_TCP_RECV_WSF:
8025 rc = sprintf(buf, "%u\n", fnode_conn->tcp_recv_wsf);
8026 break;
8027 case ISCSI_FLASHNODE_CHAP_OUT_IDX:
8028 rc = sprintf(buf, "%u\n", fnode_sess->chap_out_idx);
8029 break;
8030 case ISCSI_FLASHNODE_USERNAME:
8031 if (fnode_sess->chap_auth_en) {
8032 qla4xxx_get_uni_chap_at_index(ha,
8033 chap_tbl.name,
8034 chap_tbl.secret,
8035 fnode_sess->chap_out_idx);
8036 rc = sprintf(buf, "%s\n", chap_tbl.name);
8037 } else {
8038 rc = sprintf(buf, "\n");
8039 }
8040 break;
8041 case ISCSI_FLASHNODE_PASSWORD:
8042 if (fnode_sess->chap_auth_en) {
8043 qla4xxx_get_uni_chap_at_index(ha,
8044 chap_tbl.name,
8045 chap_tbl.secret,
8046 fnode_sess->chap_out_idx);
8047 rc = sprintf(buf, "%s\n", chap_tbl.secret);
8048 } else {
8049 rc = sprintf(buf, "\n");
8050 }
8051 break;
8052 case ISCSI_FLASHNODE_STATSN:
8053 rc = sprintf(buf, "%u\n", fnode_conn->statsn);
8054 break;
8055 case ISCSI_FLASHNODE_EXP_STATSN:
8056 rc = sprintf(buf, "%u\n", fnode_conn->exp_statsn);
8057 break;
8058 case ISCSI_FLASHNODE_IS_BOOT_TGT:
8059 rc = sprintf(buf, "%u\n", fnode_sess->is_boot_target);
8060 break;
8061 default:
8062 rc = -ENOSYS;
8063 break;
8064 }
8065
8066 put_device(dev);
8067 return rc;
8068 }
8069
8070 /**
8071 * qla4xxx_sysfs_ddb_set_param - Set parameter for firmware DDB entry
8072 * @fnode_sess: pointer to session attrs of flash ddb entry
8073 * @fnode_conn: pointer to connection attrs of flash ddb entry
8074 * @data: Parameters and their values to update
8075 * @len: len of data
8076 *
8077 * This sets the parameter of flash ddb entry and writes them to flash
8078 **/
8079 static int
8080 qla4xxx_sysfs_ddb_set_param(struct iscsi_bus_flash_session *fnode_sess,
8081 struct iscsi_bus_flash_conn *fnode_conn,
8082 void *data, int len)
8083 {
8084 struct Scsi_Host *shost = iscsi_flash_session_to_shost(fnode_sess);
8085 struct scsi_qla_host *ha = to_qla_host(shost);
8086 struct iscsi_flashnode_param_info *fnode_param;
8087 struct ql4_chap_table chap_tbl;
8088 struct nlattr *attr;
8089 uint16_t chap_out_idx = INVALID_ENTRY;
8090 int rc = QLA_ERROR;
8091 uint32_t rem = len;
8092
8093 memset((void *)&chap_tbl, 0, sizeof(chap_tbl));
8094 nla_for_each_attr(attr, data, len, rem) {
8095 fnode_param = nla_data(attr);
8096
8097 switch (fnode_param->param) {
8098 case ISCSI_FLASHNODE_IS_FW_ASSIGNED_IPV6:
8099 fnode_conn->is_fw_assigned_ipv6 = fnode_param->value[0];
8100 break;
8101 case ISCSI_FLASHNODE_PORTAL_TYPE:
8102 memcpy(fnode_sess->portal_type, fnode_param->value,
8103 strlen(fnode_sess->portal_type));
8104 break;
8105 case ISCSI_FLASHNODE_AUTO_SND_TGT_DISABLE:
8106 fnode_sess->auto_snd_tgt_disable =
8107 fnode_param->value[0];
8108 break;
8109 case ISCSI_FLASHNODE_DISCOVERY_SESS:
8110 fnode_sess->discovery_sess = fnode_param->value[0];
8111 break;
8112 case ISCSI_FLASHNODE_ENTRY_EN:
8113 fnode_sess->entry_state = fnode_param->value[0];
8114 break;
8115 case ISCSI_FLASHNODE_HDR_DGST_EN:
8116 fnode_conn->hdrdgst_en = fnode_param->value[0];
8117 break;
8118 case ISCSI_FLASHNODE_DATA_DGST_EN:
8119 fnode_conn->datadgst_en = fnode_param->value[0];
8120 break;
8121 case ISCSI_FLASHNODE_IMM_DATA_EN:
8122 fnode_sess->imm_data_en = fnode_param->value[0];
8123 break;
8124 case ISCSI_FLASHNODE_INITIAL_R2T_EN:
8125 fnode_sess->initial_r2t_en = fnode_param->value[0];
8126 break;
8127 case ISCSI_FLASHNODE_DATASEQ_INORDER:
8128 fnode_sess->dataseq_inorder_en = fnode_param->value[0];
8129 break;
8130 case ISCSI_FLASHNODE_PDU_INORDER:
8131 fnode_sess->pdu_inorder_en = fnode_param->value[0];
8132 break;
8133 case ISCSI_FLASHNODE_CHAP_AUTH_EN:
8134 fnode_sess->chap_auth_en = fnode_param->value[0];
8135 /* Invalidate chap index if chap auth is disabled */
8136 if (!fnode_sess->chap_auth_en)
8137 fnode_sess->chap_out_idx = INVALID_ENTRY;
8138
8139 break;
8140 case ISCSI_FLASHNODE_SNACK_REQ_EN:
8141 fnode_conn->snack_req_en = fnode_param->value[0];
8142 break;
8143 case ISCSI_FLASHNODE_DISCOVERY_LOGOUT_EN:
8144 fnode_sess->discovery_logout_en = fnode_param->value[0];
8145 break;
8146 case ISCSI_FLASHNODE_BIDI_CHAP_EN:
8147 fnode_sess->bidi_chap_en = fnode_param->value[0];
8148 break;
8149 case ISCSI_FLASHNODE_DISCOVERY_AUTH_OPTIONAL:
8150 fnode_sess->discovery_auth_optional =
8151 fnode_param->value[0];
8152 break;
8153 case ISCSI_FLASHNODE_ERL:
8154 fnode_sess->erl = fnode_param->value[0];
8155 break;
8156 case ISCSI_FLASHNODE_TCP_TIMESTAMP_STAT:
8157 fnode_conn->tcp_timestamp_stat = fnode_param->value[0];
8158 break;
8159 case ISCSI_FLASHNODE_TCP_NAGLE_DISABLE:
8160 fnode_conn->tcp_nagle_disable = fnode_param->value[0];
8161 break;
8162 case ISCSI_FLASHNODE_TCP_WSF_DISABLE:
8163 fnode_conn->tcp_wsf_disable = fnode_param->value[0];
8164 break;
8165 case ISCSI_FLASHNODE_TCP_TIMER_SCALE:
8166 fnode_conn->tcp_timer_scale = fnode_param->value[0];
8167 break;
8168 case ISCSI_FLASHNODE_TCP_TIMESTAMP_EN:
8169 fnode_conn->tcp_timestamp_en = fnode_param->value[0];
8170 break;
8171 case ISCSI_FLASHNODE_IP_FRAG_DISABLE:
8172 fnode_conn->fragment_disable = fnode_param->value[0];
8173 break;
8174 case ISCSI_FLASHNODE_MAX_RECV_DLENGTH:
8175 fnode_conn->max_recv_dlength =
8176 *(unsigned *)fnode_param->value;
8177 break;
8178 case ISCSI_FLASHNODE_MAX_XMIT_DLENGTH:
8179 fnode_conn->max_xmit_dlength =
8180 *(unsigned *)fnode_param->value;
8181 break;
8182 case ISCSI_FLASHNODE_FIRST_BURST:
8183 fnode_sess->first_burst =
8184 *(unsigned *)fnode_param->value;
8185 break;
8186 case ISCSI_FLASHNODE_DEF_TIME2WAIT:
8187 fnode_sess->time2wait = *(uint16_t *)fnode_param->value;
8188 break;
8189 case ISCSI_FLASHNODE_DEF_TIME2RETAIN:
8190 fnode_sess->time2retain =
8191 *(uint16_t *)fnode_param->value;
8192 break;
8193 case ISCSI_FLASHNODE_MAX_R2T:
8194 fnode_sess->max_r2t =
8195 *(uint16_t *)fnode_param->value;
8196 break;
8197 case ISCSI_FLASHNODE_KEEPALIVE_TMO:
8198 fnode_conn->keepalive_timeout =
8199 *(uint16_t *)fnode_param->value;
8200 break;
8201 case ISCSI_FLASHNODE_ISID:
8202 memcpy(fnode_sess->isid, fnode_param->value,
8203 sizeof(fnode_sess->isid));
8204 break;
8205 case ISCSI_FLASHNODE_TSID:
8206 fnode_sess->tsid = *(uint16_t *)fnode_param->value;
8207 break;
8208 case ISCSI_FLASHNODE_PORT:
8209 fnode_conn->port = *(uint16_t *)fnode_param->value;
8210 break;
8211 case ISCSI_FLASHNODE_MAX_BURST:
8212 fnode_sess->max_burst = *(unsigned *)fnode_param->value;
8213 break;
8214 case ISCSI_FLASHNODE_DEF_TASKMGMT_TMO:
8215 fnode_sess->default_taskmgmt_timeout =
8216 *(uint16_t *)fnode_param->value;
8217 break;
8218 case ISCSI_FLASHNODE_IPADDR:
8219 memcpy(fnode_conn->ipaddress, fnode_param->value,
8220 IPv6_ADDR_LEN);
8221 break;
8222 case ISCSI_FLASHNODE_ALIAS:
8223 rc = iscsi_switch_str_param(&fnode_sess->targetalias,
8224 (char *)fnode_param->value);
8225 break;
8226 case ISCSI_FLASHNODE_REDIRECT_IPADDR:
8227 memcpy(fnode_conn->redirect_ipaddr, fnode_param->value,
8228 IPv6_ADDR_LEN);
8229 break;
8230 case ISCSI_FLASHNODE_MAX_SEGMENT_SIZE:
8231 fnode_conn->max_segment_size =
8232 *(unsigned *)fnode_param->value;
8233 break;
8234 case ISCSI_FLASHNODE_LOCAL_PORT:
8235 fnode_conn->local_port =
8236 *(uint16_t *)fnode_param->value;
8237 break;
8238 case ISCSI_FLASHNODE_IPV4_TOS:
8239 fnode_conn->ipv4_tos = fnode_param->value[0];
8240 break;
8241 case ISCSI_FLASHNODE_IPV6_TC:
8242 fnode_conn->ipv6_traffic_class = fnode_param->value[0];
8243 break;
8244 case ISCSI_FLASHNODE_IPV6_FLOW_LABEL:
8245 fnode_conn->ipv6_flow_label = fnode_param->value[0];
8246 break;
8247 case ISCSI_FLASHNODE_NAME:
8248 rc = iscsi_switch_str_param(&fnode_sess->targetname,
8249 (char *)fnode_param->value);
8250 break;
8251 case ISCSI_FLASHNODE_TPGT:
8252 fnode_sess->tpgt = *(uint16_t *)fnode_param->value;
8253 break;
8254 case ISCSI_FLASHNODE_LINK_LOCAL_IPV6:
8255 memcpy(fnode_conn->link_local_ipv6_addr,
8256 fnode_param->value, IPv6_ADDR_LEN);
8257 break;
8258 case ISCSI_FLASHNODE_DISCOVERY_PARENT_IDX:
8259 fnode_sess->discovery_parent_idx =
8260 *(uint16_t *)fnode_param->value;
8261 break;
8262 case ISCSI_FLASHNODE_TCP_XMIT_WSF:
8263 fnode_conn->tcp_xmit_wsf =
8264 *(uint8_t *)fnode_param->value;
8265 break;
8266 case ISCSI_FLASHNODE_TCP_RECV_WSF:
8267 fnode_conn->tcp_recv_wsf =
8268 *(uint8_t *)fnode_param->value;
8269 break;
8270 case ISCSI_FLASHNODE_STATSN:
8271 fnode_conn->statsn = *(uint32_t *)fnode_param->value;
8272 break;
8273 case ISCSI_FLASHNODE_EXP_STATSN:
8274 fnode_conn->exp_statsn =
8275 *(uint32_t *)fnode_param->value;
8276 break;
8277 case ISCSI_FLASHNODE_CHAP_OUT_IDX:
8278 chap_out_idx = *(uint16_t *)fnode_param->value;
8279 if (!qla4xxx_get_uni_chap_at_index(ha,
8280 chap_tbl.name,
8281 chap_tbl.secret,
8282 chap_out_idx)) {
8283 fnode_sess->chap_out_idx = chap_out_idx;
8284 /* Enable chap auth if chap index is valid */
8285 fnode_sess->chap_auth_en = QL4_PARAM_ENABLE;
8286 }
8287 break;
8288 default:
8289 ql4_printk(KERN_ERR, ha,
8290 "%s: No such sysfs attribute\n", __func__);
8291 rc = -ENOSYS;
8292 goto exit_set_param;
8293 }
8294 }
8295
8296 rc = qla4xxx_sysfs_ddb_apply(fnode_sess, fnode_conn);
8297
8298 exit_set_param:
8299 return rc;
8300 }
8301
8302 /**
8303 * qla4xxx_sysfs_ddb_delete - Delete firmware DDB entry
8304 * @fnode_sess: pointer to session attrs of flash ddb entry
8305 *
8306 * This invalidates the flash ddb entry at the given index
8307 **/
8308 static int qla4xxx_sysfs_ddb_delete(struct iscsi_bus_flash_session *fnode_sess)
8309 {
8310 struct Scsi_Host *shost = iscsi_flash_session_to_shost(fnode_sess);
8311 struct scsi_qla_host *ha = to_qla_host(shost);
8312 uint32_t dev_db_start_offset;
8313 uint32_t dev_db_end_offset;
8314 struct dev_db_entry *fw_ddb_entry = NULL;
8315 dma_addr_t fw_ddb_entry_dma;
8316 uint16_t *ddb_cookie = NULL;
8317 size_t ddb_size = 0;
8318 void *pddb = NULL;
8319 int target_id;
8320 int rc = 0;
8321
8322 if (fnode_sess->is_boot_target) {
8323 rc = -EPERM;
8324 DEBUG2(ql4_printk(KERN_ERR, ha,
8325 "%s: Deletion of boot target entry is not permitted.\n",
8326 __func__));
8327 goto exit_ddb_del;
8328 }
8329
8330 if (fnode_sess->flash_state == DEV_DB_NON_PERSISTENT)
8331 goto sysfs_ddb_del;
8332
8333 if (is_qla40XX(ha)) {
8334 dev_db_start_offset = FLASH_OFFSET_DB_INFO;
8335 dev_db_end_offset = FLASH_OFFSET_DB_END;
8336 dev_db_start_offset += (fnode_sess->target_id *
8337 sizeof(*fw_ddb_entry));
8338 ddb_size = sizeof(*fw_ddb_entry);
8339 } else {
8340 dev_db_start_offset = FLASH_RAW_ACCESS_ADDR +
8341 (ha->hw.flt_region_ddb << 2);
8342 /* flt_ddb_size is DDB table size for both ports
8343 * so divide it by 2 to calculate the offset for second port
8344 */
8345 if (ha->port_num == 1)
8346 dev_db_start_offset += (ha->hw.flt_ddb_size / 2);
8347
8348 dev_db_end_offset = dev_db_start_offset +
8349 (ha->hw.flt_ddb_size / 2);
8350
8351 dev_db_start_offset += (fnode_sess->target_id *
8352 sizeof(*fw_ddb_entry));
8353 dev_db_start_offset += offsetof(struct dev_db_entry, cookie);
8354
8355 ddb_size = sizeof(*ddb_cookie);
8356 }
8357
8358 DEBUG2(ql4_printk(KERN_ERR, ha, "%s: start offset=%u, end offset=%u\n",
8359 __func__, dev_db_start_offset, dev_db_end_offset));
8360
8361 if (dev_db_start_offset > dev_db_end_offset) {
8362 rc = -EIO;
8363 DEBUG2(ql4_printk(KERN_ERR, ha, "%s:Invalid DDB index %u\n",
8364 __func__, fnode_sess->target_id));
8365 goto exit_ddb_del;
8366 }
8367
8368 pddb = dma_alloc_coherent(&ha->pdev->dev, ddb_size,
8369 &fw_ddb_entry_dma, GFP_KERNEL);
8370 if (!pddb) {
8371 rc = -ENOMEM;
8372 DEBUG2(ql4_printk(KERN_ERR, ha,
8373 "%s: Unable to allocate dma buffer\n",
8374 __func__));
8375 goto exit_ddb_del;
8376 }
8377
8378 if (is_qla40XX(ha)) {
8379 fw_ddb_entry = pddb;
8380 memset(fw_ddb_entry, 0, ddb_size);
8381 ddb_cookie = &fw_ddb_entry->cookie;
8382 } else {
8383 ddb_cookie = pddb;
8384 }
8385
8386 /* invalidate the cookie */
8387 *ddb_cookie = 0xFFEE;
8388 qla4xxx_set_flash(ha, fw_ddb_entry_dma, dev_db_start_offset,
8389 ddb_size, FLASH_OPT_RMW_COMMIT);
8390
8391 sysfs_ddb_del:
8392 target_id = fnode_sess->target_id;
8393 iscsi_destroy_flashnode_sess(fnode_sess);
8394 ql4_printk(KERN_INFO, ha,
8395 "%s: session and conn entries for flashnode %u of host %lu deleted\n",
8396 __func__, target_id, ha->host_no);
8397 exit_ddb_del:
8398 if (pddb)
8399 dma_free_coherent(&ha->pdev->dev, ddb_size, pddb,
8400 fw_ddb_entry_dma);
8401 return rc;
8402 }
8403
8404 /**
8405 * qla4xxx_sysfs_ddb_export - Create sysfs entries for firmware DDBs
8406 * @ha: pointer to adapter structure
8407 *
8408 * Export the firmware DDB for all send targets and normal targets to sysfs.
8409 **/
8410 int qla4xxx_sysfs_ddb_export(struct scsi_qla_host *ha)
8411 {
8412 struct dev_db_entry *fw_ddb_entry = NULL;
8413 dma_addr_t fw_ddb_entry_dma;
8414 uint16_t max_ddbs;
8415 uint16_t idx = 0;
8416 int ret = QLA_SUCCESS;
8417
8418 fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev,
8419 sizeof(*fw_ddb_entry),
8420 &fw_ddb_entry_dma, GFP_KERNEL);
8421 if (!fw_ddb_entry) {
8422 DEBUG2(ql4_printk(KERN_ERR, ha,
8423 "%s: Unable to allocate dma buffer\n",
8424 __func__));
8425 return -ENOMEM;
8426 }
8427
8428 max_ddbs = is_qla40XX(ha) ? MAX_PRST_DEV_DB_ENTRIES :
8429 MAX_DEV_DB_ENTRIES;
8430
8431 for (idx = 0; idx < max_ddbs; idx++) {
8432 if (qla4xxx_flashdb_by_index(ha, fw_ddb_entry, fw_ddb_entry_dma,
8433 idx))
8434 continue;
8435
8436 ret = qla4xxx_sysfs_ddb_tgt_create(ha, fw_ddb_entry, &idx, 0);
8437 if (ret) {
8438 ret = -EIO;
8439 break;
8440 }
8441 }
8442
8443 dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry), fw_ddb_entry,
8444 fw_ddb_entry_dma);
8445
8446 return ret;
8447 }
8448
8449 static void qla4xxx_sysfs_ddb_remove(struct scsi_qla_host *ha)
8450 {
8451 iscsi_destroy_all_flashnode(ha->host);
8452 }
8453
8454 /**
8455 * qla4xxx_build_ddb_list - Build ddb list and setup sessions
8456 * @ha: pointer to adapter structure
8457 * @is_reset: Is this init path or reset path
8458 *
8459 * Create a list of sendtargets (st) from firmware DDBs, issue send targets
8460 * using connection open, then create the list of normal targets (nt)
8461 * from firmware DDBs. Based on the list of nt setup session and connection
8462 * objects.
8463 **/
8464 void qla4xxx_build_ddb_list(struct scsi_qla_host *ha, int is_reset)
8465 {
8466 uint16_t tmo = 0;
8467 struct list_head list_st, list_nt;
8468 struct qla_ddb_index *st_ddb_idx, *st_ddb_idx_tmp;
8469 unsigned long wtime;
8470
8471 if (!test_bit(AF_LINK_UP, &ha->flags)) {
8472 set_bit(AF_BUILD_DDB_LIST, &ha->flags);
8473 ha->is_reset = is_reset;
8474 return;
8475 }
8476
8477 INIT_LIST_HEAD(&list_st);
8478 INIT_LIST_HEAD(&list_nt);
8479
8480 qla4xxx_build_st_list(ha, &list_st);
8481
8482 /* Before issuing conn open mbox, ensure all IPs states are configured
8483 * Note, conn open fails if IPs are not configured
8484 */
8485 qla4xxx_wait_for_ip_configuration(ha);
8486
8487 /* Go thru the STs and fire the sendtargets by issuing conn open mbx */
8488 list_for_each_entry_safe(st_ddb_idx, st_ddb_idx_tmp, &list_st, list) {
8489 qla4xxx_conn_open(ha, st_ddb_idx->fw_ddb_idx);
8490 }
8491
8492 /* Wait to ensure all sendtargets are done for min 12 sec wait */
8493 tmo = ((ha->def_timeout > LOGIN_TOV) &&
8494 (ha->def_timeout < LOGIN_TOV * 10) ?
8495 ha->def_timeout : LOGIN_TOV);
8496
8497 DEBUG2(ql4_printk(KERN_INFO, ha,
8498 "Default time to wait for build ddb %d\n", tmo));
8499
8500 wtime = jiffies + (HZ * tmo);
8501 do {
8502 if (list_empty(&list_st))
8503 break;
8504
8505 qla4xxx_remove_failed_ddb(ha, &list_st);
8506 schedule_timeout_uninterruptible(HZ / 10);
8507 } while (time_after(wtime, jiffies));
8508
8509
8510 qla4xxx_build_nt_list(ha, &list_nt, &list_st, is_reset);
8511
8512 qla4xxx_free_ddb_list(&list_st);
8513 qla4xxx_free_ddb_list(&list_nt);
8514
8515 qla4xxx_free_ddb_index(ha);
8516 }
8517
8518 /**
8519 * qla4xxx_wait_login_resp_boot_tgt - Wait for iSCSI boot target login
8520 * response.
8521 * @ha: pointer to adapter structure
8522 *
8523 * When the boot entry is normal iSCSI target then DF_BOOT_TGT flag will be
8524 * set in DDB and we will wait for login response of boot targets during
8525 * probe.
8526 **/
8527 static void qla4xxx_wait_login_resp_boot_tgt(struct scsi_qla_host *ha)
8528 {
8529 struct ddb_entry *ddb_entry;
8530 struct dev_db_entry *fw_ddb_entry = NULL;
8531 dma_addr_t fw_ddb_entry_dma;
8532 unsigned long wtime;
8533 uint32_t ddb_state;
8534 int max_ddbs, idx, ret;
8535
8536 max_ddbs = is_qla40XX(ha) ? MAX_DEV_DB_ENTRIES_40XX :
8537 MAX_DEV_DB_ENTRIES;
8538
8539 fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
8540 &fw_ddb_entry_dma, GFP_KERNEL);
8541 if (!fw_ddb_entry) {
8542 ql4_printk(KERN_ERR, ha,
8543 "%s: Unable to allocate dma buffer\n", __func__);
8544 goto exit_login_resp;
8545 }
8546
8547 wtime = jiffies + (HZ * BOOT_LOGIN_RESP_TOV);
8548
8549 for (idx = 0; idx < max_ddbs; idx++) {
8550 ddb_entry = qla4xxx_lookup_ddb_by_fw_index(ha, idx);
8551 if (ddb_entry == NULL)
8552 continue;
8553
8554 if (test_bit(DF_BOOT_TGT, &ddb_entry->flags)) {
8555 DEBUG2(ql4_printk(KERN_INFO, ha,
8556 "%s: DDB index [%d]\n", __func__,
8557 ddb_entry->fw_ddb_index));
8558 do {
8559 ret = qla4xxx_get_fwddb_entry(ha,
8560 ddb_entry->fw_ddb_index,
8561 fw_ddb_entry, fw_ddb_entry_dma,
8562 NULL, NULL, &ddb_state, NULL,
8563 NULL, NULL);
8564 if (ret == QLA_ERROR)
8565 goto exit_login_resp;
8566
8567 if ((ddb_state == DDB_DS_SESSION_ACTIVE) ||
8568 (ddb_state == DDB_DS_SESSION_FAILED))
8569 break;
8570
8571 schedule_timeout_uninterruptible(HZ);
8572
8573 } while ((time_after(wtime, jiffies)));
8574
8575 if (!time_after(wtime, jiffies)) {
8576 DEBUG2(ql4_printk(KERN_INFO, ha,
8577 "%s: Login response wait timer expired\n",
8578 __func__));
8579 goto exit_login_resp;
8580 }
8581 }
8582 }
8583
8584 exit_login_resp:
8585 if (fw_ddb_entry)
8586 dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
8587 fw_ddb_entry, fw_ddb_entry_dma);
8588 }
8589
8590 /**
8591 * qla4xxx_probe_adapter - callback function to probe HBA
8592 * @pdev: pointer to pci_dev structure
8593 * @pci_device_id: pointer to pci_device entry
8594 *
8595 * This routine will probe for Qlogic 4xxx iSCSI host adapters.
8596 * It returns zero if successful. It also initializes all data necessary for
8597 * the driver.
8598 **/
8599 static int qla4xxx_probe_adapter(struct pci_dev *pdev,
8600 const struct pci_device_id *ent)
8601 {
8602 int ret = -ENODEV, status;
8603 struct Scsi_Host *host;
8604 struct scsi_qla_host *ha;
8605 uint8_t init_retry_count = 0;
8606 char buf[34];
8607 struct qla4_8xxx_legacy_intr_set *nx_legacy_intr;
8608 uint32_t dev_state;
8609
8610 if (pci_enable_device(pdev))
8611 return -1;
8612
8613 host = iscsi_host_alloc(&qla4xxx_driver_template, sizeof(*ha), 0);
8614 if (host == NULL) {
8615 printk(KERN_WARNING
8616 "qla4xxx: Couldn't allocate host from scsi layer!\n");
8617 goto probe_disable_device;
8618 }
8619
8620 /* Clear our data area */
8621 ha = to_qla_host(host);
8622 memset(ha, 0, sizeof(*ha));
8623
8624 /* Save the information from PCI BIOS. */
8625 ha->pdev = pdev;
8626 ha->host = host;
8627 ha->host_no = host->host_no;
8628 ha->func_num = PCI_FUNC(ha->pdev->devfn);
8629
8630 pci_enable_pcie_error_reporting(pdev);
8631
8632 /* Setup Runtime configurable options */
8633 if (is_qla8022(ha)) {
8634 ha->isp_ops = &qla4_82xx_isp_ops;
8635 ha->reg_tbl = (uint32_t *) qla4_82xx_reg_tbl;
8636 ha->qdr_sn_window = -1;
8637 ha->ddr_mn_window = -1;
8638 ha->curr_window = 255;
8639 nx_legacy_intr = &legacy_intr[ha->func_num];
8640 ha->nx_legacy_intr.int_vec_bit = nx_legacy_intr->int_vec_bit;
8641 ha->nx_legacy_intr.tgt_status_reg =
8642 nx_legacy_intr->tgt_status_reg;
8643 ha->nx_legacy_intr.tgt_mask_reg = nx_legacy_intr->tgt_mask_reg;
8644 ha->nx_legacy_intr.pci_int_reg = nx_legacy_intr->pci_int_reg;
8645 } else if (is_qla8032(ha) || is_qla8042(ha)) {
8646 ha->isp_ops = &qla4_83xx_isp_ops;
8647 ha->reg_tbl = (uint32_t *)qla4_83xx_reg_tbl;
8648 } else {
8649 ha->isp_ops = &qla4xxx_isp_ops;
8650 }
8651
8652 if (is_qla80XX(ha)) {
8653 rwlock_init(&ha->hw_lock);
8654 ha->pf_bit = ha->func_num << 16;
8655 /* Set EEH reset type to fundamental if required by hba */
8656 pdev->needs_freset = 1;
8657 }
8658
8659 /* Configure PCI I/O space. */
8660 ret = ha->isp_ops->iospace_config(ha);
8661 if (ret)
8662 goto probe_failed_ioconfig;
8663
8664 ql4_printk(KERN_INFO, ha, "Found an ISP%04x, irq %d, iobase 0x%p\n",
8665 pdev->device, pdev->irq, ha->reg);
8666
8667 qla4xxx_config_dma_addressing(ha);
8668
8669 /* Initialize lists and spinlocks. */
8670 INIT_LIST_HEAD(&ha->free_srb_q);
8671
8672 mutex_init(&ha->mbox_sem);
8673 mutex_init(&ha->chap_sem);
8674 init_completion(&ha->mbx_intr_comp);
8675 init_completion(&ha->disable_acb_comp);
8676 init_completion(&ha->idc_comp);
8677 init_completion(&ha->link_up_comp);
8678
8679 spin_lock_init(&ha->hardware_lock);
8680 spin_lock_init(&ha->work_lock);
8681
8682 /* Initialize work list */
8683 INIT_LIST_HEAD(&ha->work_list);
8684
8685 /* Allocate dma buffers */
8686 if (qla4xxx_mem_alloc(ha)) {
8687 ql4_printk(KERN_WARNING, ha,
8688 "[ERROR] Failed to allocate memory for adapter\n");
8689
8690 ret = -ENOMEM;
8691 goto probe_failed;
8692 }
8693
8694 host->cmd_per_lun = 3;
8695 host->max_channel = 0;
8696 host->max_lun = MAX_LUNS - 1;
8697 host->max_id = MAX_TARGETS;
8698 host->max_cmd_len = IOCB_MAX_CDB_LEN;
8699 host->can_queue = MAX_SRBS ;
8700 host->transportt = qla4xxx_scsi_transport;
8701
8702 pci_set_drvdata(pdev, ha);
8703
8704 ret = scsi_add_host(host, &pdev->dev);
8705 if (ret)
8706 goto probe_failed;
8707
8708 if (is_qla80XX(ha))
8709 qla4_8xxx_get_flash_info(ha);
8710
8711 if (is_qla8032(ha) || is_qla8042(ha)) {
8712 qla4_83xx_read_reset_template(ha);
8713 /*
8714 * NOTE: If ql4dontresethba==1, set IDC_CTRL DONTRESET_BIT0.
8715 * If DONRESET_BIT0 is set, drivers should not set dev_state
8716 * to NEED_RESET. But if NEED_RESET is set, drivers should
8717 * should honor the reset.
8718 */
8719 if (ql4xdontresethba == 1)
8720 qla4_83xx_set_idc_dontreset(ha);
8721 }
8722
8723 /*
8724 * Initialize the Host adapter request/response queues and
8725 * firmware
8726 * NOTE: interrupts enabled upon successful completion
8727 */
8728 status = qla4xxx_initialize_adapter(ha, INIT_ADAPTER);
8729
8730 /* Dont retry adapter initialization if IRQ allocation failed */
8731 if (is_qla80XX(ha) && (status == QLA_ERROR))
8732 goto skip_retry_init;
8733
8734 while ((!test_bit(AF_ONLINE, &ha->flags)) &&
8735 init_retry_count++ < MAX_INIT_RETRIES) {
8736
8737 if (is_qla80XX(ha)) {
8738 ha->isp_ops->idc_lock(ha);
8739 dev_state = qla4_8xxx_rd_direct(ha,
8740 QLA8XXX_CRB_DEV_STATE);
8741 ha->isp_ops->idc_unlock(ha);
8742 if (dev_state == QLA8XXX_DEV_FAILED) {
8743 ql4_printk(KERN_WARNING, ha, "%s: don't retry "
8744 "initialize adapter. H/W is in failed state\n",
8745 __func__);
8746 break;
8747 }
8748 }
8749 DEBUG2(printk("scsi: %s: retrying adapter initialization "
8750 "(%d)\n", __func__, init_retry_count));
8751
8752 if (ha->isp_ops->reset_chip(ha) == QLA_ERROR)
8753 continue;
8754
8755 status = qla4xxx_initialize_adapter(ha, INIT_ADAPTER);
8756 if (is_qla80XX(ha) && (status == QLA_ERROR)) {
8757 if (qla4_8xxx_check_init_adapter_retry(ha) == QLA_ERROR)
8758 goto skip_retry_init;
8759 }
8760 }
8761
8762 skip_retry_init:
8763 if (!test_bit(AF_ONLINE, &ha->flags)) {
8764 ql4_printk(KERN_WARNING, ha, "Failed to initialize adapter\n");
8765
8766 if ((is_qla8022(ha) && ql4xdontresethba) ||
8767 ((is_qla8032(ha) || is_qla8042(ha)) &&
8768 qla4_83xx_idc_dontreset(ha))) {
8769 /* Put the device in failed state. */
8770 DEBUG2(printk(KERN_ERR "HW STATE: FAILED\n"));
8771 ha->isp_ops->idc_lock(ha);
8772 qla4_8xxx_wr_direct(ha, QLA8XXX_CRB_DEV_STATE,
8773 QLA8XXX_DEV_FAILED);
8774 ha->isp_ops->idc_unlock(ha);
8775 }
8776 ret = -ENODEV;
8777 goto remove_host;
8778 }
8779
8780 /* Startup the kernel thread for this host adapter. */
8781 DEBUG2(printk("scsi: %s: Starting kernel thread for "
8782 "qla4xxx_dpc\n", __func__));
8783 sprintf(buf, "qla4xxx_%lu_dpc", ha->host_no);
8784 ha->dpc_thread = create_singlethread_workqueue(buf);
8785 if (!ha->dpc_thread) {
8786 ql4_printk(KERN_WARNING, ha, "Unable to start DPC thread!\n");
8787 ret = -ENODEV;
8788 goto remove_host;
8789 }
8790 INIT_WORK(&ha->dpc_work, qla4xxx_do_dpc);
8791
8792 ha->task_wq = alloc_workqueue("qla4xxx_%lu_task", WQ_MEM_RECLAIM, 1,
8793 ha->host_no);
8794 if (!ha->task_wq) {
8795 ql4_printk(KERN_WARNING, ha, "Unable to start task thread!\n");
8796 ret = -ENODEV;
8797 goto remove_host;
8798 }
8799
8800 /*
8801 * For ISP-8XXX, request_irqs is called in qla4_8xxx_load_risc
8802 * (which is called indirectly by qla4xxx_initialize_adapter),
8803 * so that irqs will be registered after crbinit but before
8804 * mbx_intr_enable.
8805 */
8806 if (is_qla40XX(ha)) {
8807 ret = qla4xxx_request_irqs(ha);
8808 if (ret) {
8809 ql4_printk(KERN_WARNING, ha, "Failed to reserve "
8810 "interrupt %d already in use.\n", pdev->irq);
8811 goto remove_host;
8812 }
8813 }
8814
8815 pci_save_state(ha->pdev);
8816 ha->isp_ops->enable_intrs(ha);
8817
8818 /* Start timer thread. */
8819 qla4xxx_start_timer(ha, 1);
8820
8821 set_bit(AF_INIT_DONE, &ha->flags);
8822
8823 qla4_8xxx_alloc_sysfs_attr(ha);
8824
8825 printk(KERN_INFO
8826 " QLogic iSCSI HBA Driver version: %s\n"
8827 " QLogic ISP%04x @ %s, host#=%ld, fw=%02d.%02d.%02d.%02d\n",
8828 qla4xxx_version_str, ha->pdev->device, pci_name(ha->pdev),
8829 ha->host_no, ha->fw_info.fw_major, ha->fw_info.fw_minor,
8830 ha->fw_info.fw_patch, ha->fw_info.fw_build);
8831
8832 /* Set the driver version */
8833 if (is_qla80XX(ha))
8834 qla4_8xxx_set_param(ha, SET_DRVR_VERSION);
8835
8836 if (qla4xxx_setup_boot_info(ha))
8837 ql4_printk(KERN_ERR, ha,
8838 "%s: No iSCSI boot target configured\n", __func__);
8839
8840 set_bit(DPC_SYSFS_DDB_EXPORT, &ha->dpc_flags);
8841 /* Perform the build ddb list and login to each */
8842 qla4xxx_build_ddb_list(ha, INIT_ADAPTER);
8843 iscsi_host_for_each_session(ha->host, qla4xxx_login_flash_ddb);
8844 qla4xxx_wait_login_resp_boot_tgt(ha);
8845
8846 qla4xxx_create_chap_list(ha);
8847
8848 qla4xxx_create_ifaces(ha);
8849 return 0;
8850
8851 remove_host:
8852 scsi_remove_host(ha->host);
8853
8854 probe_failed:
8855 qla4xxx_free_adapter(ha);
8856
8857 probe_failed_ioconfig:
8858 pci_disable_pcie_error_reporting(pdev);
8859 scsi_host_put(ha->host);
8860
8861 probe_disable_device:
8862 pci_disable_device(pdev);
8863
8864 return ret;
8865 }
8866
8867 /**
8868 * qla4xxx_prevent_other_port_reinit - prevent other port from re-initialize
8869 * @ha: pointer to adapter structure
8870 *
8871 * Mark the other ISP-4xxx port to indicate that the driver is being removed,
8872 * so that the other port will not re-initialize while in the process of
8873 * removing the ha due to driver unload or hba hotplug.
8874 **/
8875 static void qla4xxx_prevent_other_port_reinit(struct scsi_qla_host *ha)
8876 {
8877 struct scsi_qla_host *other_ha = NULL;
8878 struct pci_dev *other_pdev = NULL;
8879 int fn = ISP4XXX_PCI_FN_2;
8880
8881 /*iscsi function numbers for ISP4xxx is 1 and 3*/
8882 if (PCI_FUNC(ha->pdev->devfn) & BIT_1)
8883 fn = ISP4XXX_PCI_FN_1;
8884
8885 other_pdev =
8886 pci_get_domain_bus_and_slot(pci_domain_nr(ha->pdev->bus),
8887 ha->pdev->bus->number, PCI_DEVFN(PCI_SLOT(ha->pdev->devfn),
8888 fn));
8889
8890 /* Get other_ha if other_pdev is valid and state is enable*/
8891 if (other_pdev) {
8892 if (atomic_read(&other_pdev->enable_cnt)) {
8893 other_ha = pci_get_drvdata(other_pdev);
8894 if (other_ha) {
8895 set_bit(AF_HA_REMOVAL, &other_ha->flags);
8896 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: "
8897 "Prevent %s reinit\n", __func__,
8898 dev_name(&other_ha->pdev->dev)));
8899 }
8900 }
8901 pci_dev_put(other_pdev);
8902 }
8903 }
8904
8905 static void qla4xxx_destroy_ddb(struct scsi_qla_host *ha,
8906 struct ddb_entry *ddb_entry)
8907 {
8908 struct dev_db_entry *fw_ddb_entry = NULL;
8909 dma_addr_t fw_ddb_entry_dma;
8910 unsigned long wtime;
8911 uint32_t ddb_state;
8912 int options;
8913 int status;
8914
8915 options = LOGOUT_OPTION_CLOSE_SESSION;
8916 if (qla4xxx_session_logout_ddb(ha, ddb_entry, options) == QLA_ERROR) {
8917 ql4_printk(KERN_ERR, ha, "%s: Logout failed\n", __func__);
8918 goto clear_ddb;
8919 }
8920
8921 fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
8922 &fw_ddb_entry_dma, GFP_KERNEL);
8923 if (!fw_ddb_entry) {
8924 ql4_printk(KERN_ERR, ha,
8925 "%s: Unable to allocate dma buffer\n", __func__);
8926 goto clear_ddb;
8927 }
8928
8929 wtime = jiffies + (HZ * LOGOUT_TOV);
8930 do {
8931 status = qla4xxx_get_fwddb_entry(ha, ddb_entry->fw_ddb_index,
8932 fw_ddb_entry, fw_ddb_entry_dma,
8933 NULL, NULL, &ddb_state, NULL,
8934 NULL, NULL);
8935 if (status == QLA_ERROR)
8936 goto free_ddb;
8937
8938 if ((ddb_state == DDB_DS_NO_CONNECTION_ACTIVE) ||
8939 (ddb_state == DDB_DS_SESSION_FAILED))
8940 goto free_ddb;
8941
8942 schedule_timeout_uninterruptible(HZ);
8943 } while ((time_after(wtime, jiffies)));
8944
8945 free_ddb:
8946 dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
8947 fw_ddb_entry, fw_ddb_entry_dma);
8948 clear_ddb:
8949 qla4xxx_clear_ddb_entry(ha, ddb_entry->fw_ddb_index);
8950 }
8951
8952 static void qla4xxx_destroy_fw_ddb_session(struct scsi_qla_host *ha)
8953 {
8954 struct ddb_entry *ddb_entry;
8955 int idx;
8956
8957 for (idx = 0; idx < MAX_DDB_ENTRIES; idx++) {
8958
8959 ddb_entry = qla4xxx_lookup_ddb_by_fw_index(ha, idx);
8960 if ((ddb_entry != NULL) &&
8961 (ddb_entry->ddb_type == FLASH_DDB)) {
8962
8963 qla4xxx_destroy_ddb(ha, ddb_entry);
8964 /*
8965 * we have decremented the reference count of the driver
8966 * when we setup the session to have the driver unload
8967 * to be seamless without actually destroying the
8968 * session
8969 **/
8970 try_module_get(qla4xxx_iscsi_transport.owner);
8971 iscsi_destroy_endpoint(ddb_entry->conn->ep);
8972 qla4xxx_free_ddb(ha, ddb_entry);
8973 iscsi_session_teardown(ddb_entry->sess);
8974 }
8975 }
8976 }
8977 /**
8978 * qla4xxx_remove_adapter - callback function to remove adapter.
8979 * @pci_dev: PCI device pointer
8980 **/
8981 static void qla4xxx_remove_adapter(struct pci_dev *pdev)
8982 {
8983 struct scsi_qla_host *ha;
8984
8985 /*
8986 * If the PCI device is disabled then it means probe_adapter had
8987 * failed and resources already cleaned up on probe_adapter exit.
8988 */
8989 if (!pci_is_enabled(pdev))
8990 return;
8991
8992 ha = pci_get_drvdata(pdev);
8993
8994 if (is_qla40XX(ha))
8995 qla4xxx_prevent_other_port_reinit(ha);
8996
8997 /* destroy iface from sysfs */
8998 qla4xxx_destroy_ifaces(ha);
8999
9000 if ((!ql4xdisablesysfsboot) && ha->boot_kset)
9001 iscsi_boot_destroy_kset(ha->boot_kset);
9002
9003 qla4xxx_destroy_fw_ddb_session(ha);
9004 qla4_8xxx_free_sysfs_attr(ha);
9005
9006 qla4xxx_sysfs_ddb_remove(ha);
9007 scsi_remove_host(ha->host);
9008
9009 qla4xxx_free_adapter(ha);
9010
9011 scsi_host_put(ha->host);
9012
9013 pci_disable_pcie_error_reporting(pdev);
9014 pci_disable_device(pdev);
9015 }
9016
9017 /**
9018 * qla4xxx_config_dma_addressing() - Configure OS DMA addressing method.
9019 * @ha: HA context
9020 */
9021 static void qla4xxx_config_dma_addressing(struct scsi_qla_host *ha)
9022 {
9023 /* Update our PCI device dma_mask for full 64 bit mask */
9024 if (dma_set_mask_and_coherent(&ha->pdev->dev, DMA_BIT_MASK(64))) {
9025 dev_dbg(&ha->pdev->dev,
9026 "Failed to set 64 bit PCI consistent mask; "
9027 "using 32 bit.\n");
9028 dma_set_mask_and_coherent(&ha->pdev->dev, DMA_BIT_MASK(32));
9029 }
9030 }
9031
9032 static int qla4xxx_slave_alloc(struct scsi_device *sdev)
9033 {
9034 struct iscsi_cls_session *cls_sess;
9035 struct iscsi_session *sess;
9036 struct ddb_entry *ddb;
9037 int queue_depth = QL4_DEF_QDEPTH;
9038
9039 cls_sess = starget_to_session(sdev->sdev_target);
9040 sess = cls_sess->dd_data;
9041 ddb = sess->dd_data;
9042
9043 sdev->hostdata = ddb;
9044
9045 if (ql4xmaxqdepth != 0 && ql4xmaxqdepth <= 0xffffU)
9046 queue_depth = ql4xmaxqdepth;
9047
9048 scsi_change_queue_depth(sdev, queue_depth);
9049 return 0;
9050 }
9051
9052 /**
9053 * qla4xxx_del_from_active_array - returns an active srb
9054 * @ha: Pointer to host adapter structure.
9055 * @index: index into the active_array
9056 *
9057 * This routine removes and returns the srb at the specified index
9058 **/
9059 struct srb *qla4xxx_del_from_active_array(struct scsi_qla_host *ha,
9060 uint32_t index)
9061 {
9062 struct srb *srb = NULL;
9063 struct scsi_cmnd *cmd = NULL;
9064
9065 cmd = scsi_host_find_tag(ha->host, index);
9066 if (!cmd)
9067 return srb;
9068
9069 srb = (struct srb *)CMD_SP(cmd);
9070 if (!srb)
9071 return srb;
9072
9073 /* update counters */
9074 if (srb->flags & SRB_DMA_VALID) {
9075 ha->iocb_cnt -= srb->iocb_cnt;
9076 if (srb->cmd)
9077 srb->cmd->host_scribble =
9078 (unsigned char *)(unsigned long) MAX_SRBS;
9079 }
9080 return srb;
9081 }
9082
9083 /**
9084 * qla4xxx_eh_wait_on_command - waits for command to be returned by firmware
9085 * @ha: Pointer to host adapter structure.
9086 * @cmd: Scsi Command to wait on.
9087 *
9088 * This routine waits for the command to be returned by the Firmware
9089 * for some max time.
9090 **/
9091 static int qla4xxx_eh_wait_on_command(struct scsi_qla_host *ha,
9092 struct scsi_cmnd *cmd)
9093 {
9094 int done = 0;
9095 struct srb *rp;
9096 uint32_t max_wait_time = EH_WAIT_CMD_TOV;
9097 int ret = SUCCESS;
9098
9099 /* Dont wait on command if PCI error is being handled
9100 * by PCI AER driver
9101 */
9102 if (unlikely(pci_channel_offline(ha->pdev)) ||
9103 (test_bit(AF_EEH_BUSY, &ha->flags))) {
9104 ql4_printk(KERN_WARNING, ha, "scsi%ld: Return from %s\n",
9105 ha->host_no, __func__);
9106 return ret;
9107 }
9108
9109 do {
9110 /* Checking to see if its returned to OS */
9111 rp = (struct srb *) CMD_SP(cmd);
9112 if (rp == NULL) {
9113 done++;
9114 break;
9115 }
9116
9117 msleep(2000);
9118 } while (max_wait_time--);
9119
9120 return done;
9121 }
9122
9123 /**
9124 * qla4xxx_wait_for_hba_online - waits for HBA to come online
9125 * @ha: Pointer to host adapter structure
9126 **/
9127 static int qla4xxx_wait_for_hba_online(struct scsi_qla_host *ha)
9128 {
9129 unsigned long wait_online;
9130
9131 wait_online = jiffies + (HBA_ONLINE_TOV * HZ);
9132 while (time_before(jiffies, wait_online)) {
9133
9134 if (adapter_up(ha))
9135 return QLA_SUCCESS;
9136
9137 msleep(2000);
9138 }
9139
9140 return QLA_ERROR;
9141 }
9142
9143 /**
9144 * qla4xxx_eh_wait_for_commands - wait for active cmds to finish.
9145 * @ha: pointer to HBA
9146 * @t: target id
9147 * @l: lun id
9148 *
9149 * This function waits for all outstanding commands to a lun to complete. It
9150 * returns 0 if all pending commands are returned and 1 otherwise.
9151 **/
9152 static int qla4xxx_eh_wait_for_commands(struct scsi_qla_host *ha,
9153 struct scsi_target *stgt,
9154 struct scsi_device *sdev)
9155 {
9156 int cnt;
9157 int status = 0;
9158 struct scsi_cmnd *cmd;
9159
9160 /*
9161 * Waiting for all commands for the designated target or dev
9162 * in the active array
9163 */
9164 for (cnt = 0; cnt < ha->host->can_queue; cnt++) {
9165 cmd = scsi_host_find_tag(ha->host, cnt);
9166 if (cmd && stgt == scsi_target(cmd->device) &&
9167 (!sdev || sdev == cmd->device)) {
9168 if (!qla4xxx_eh_wait_on_command(ha, cmd)) {
9169 status++;
9170 break;
9171 }
9172 }
9173 }
9174 return status;
9175 }
9176
9177 /**
9178 * qla4xxx_eh_abort - callback for abort task.
9179 * @cmd: Pointer to Linux's SCSI command structure
9180 *
9181 * This routine is called by the Linux OS to abort the specified
9182 * command.
9183 **/
9184 static int qla4xxx_eh_abort(struct scsi_cmnd *cmd)
9185 {
9186 struct scsi_qla_host *ha = to_qla_host(cmd->device->host);
9187 unsigned int id = cmd->device->id;
9188 uint64_t lun = cmd->device->lun;
9189 unsigned long flags;
9190 struct srb *srb = NULL;
9191 int ret = SUCCESS;
9192 int wait = 0;
9193 int rval;
9194
9195 ql4_printk(KERN_INFO, ha, "scsi%ld:%d:%llu: Abort command issued cmd=%p, cdb=0x%x\n",
9196 ha->host_no, id, lun, cmd, cmd->cmnd[0]);
9197
9198 rval = qla4xxx_isp_check_reg(ha);
9199 if (rval != QLA_SUCCESS) {
9200 ql4_printk(KERN_INFO, ha, "PCI/Register disconnect, exiting.\n");
9201 return FAILED;
9202 }
9203
9204 spin_lock_irqsave(&ha->hardware_lock, flags);
9205 srb = (struct srb *) CMD_SP(cmd);
9206 if (!srb) {
9207 spin_unlock_irqrestore(&ha->hardware_lock, flags);
9208 ql4_printk(KERN_INFO, ha, "scsi%ld:%d:%llu: Specified command has already completed.\n",
9209 ha->host_no, id, lun);
9210 return SUCCESS;
9211 }
9212 kref_get(&srb->srb_ref);
9213 spin_unlock_irqrestore(&ha->hardware_lock, flags);
9214
9215 if (qla4xxx_abort_task(ha, srb) != QLA_SUCCESS) {
9216 DEBUG3(printk("scsi%ld:%d:%llu: Abort_task mbx failed.\n",
9217 ha->host_no, id, lun));
9218 ret = FAILED;
9219 } else {
9220 DEBUG3(printk("scsi%ld:%d:%llu: Abort_task mbx success.\n",
9221 ha->host_no, id, lun));
9222 wait = 1;
9223 }
9224
9225 kref_put(&srb->srb_ref, qla4xxx_srb_compl);
9226
9227 /* Wait for command to complete */
9228 if (wait) {
9229 if (!qla4xxx_eh_wait_on_command(ha, cmd)) {
9230 DEBUG2(printk("scsi%ld:%d:%llu: Abort handler timed out\n",
9231 ha->host_no, id, lun));
9232 ret = FAILED;
9233 }
9234 }
9235
9236 ql4_printk(KERN_INFO, ha,
9237 "scsi%ld:%d:%llu: Abort command - %s\n",
9238 ha->host_no, id, lun, (ret == SUCCESS) ? "succeeded" : "failed");
9239
9240 return ret;
9241 }
9242
9243 /**
9244 * qla4xxx_eh_device_reset - callback for target reset.
9245 * @cmd: Pointer to Linux's SCSI command structure
9246 *
9247 * This routine is called by the Linux OS to reset all luns on the
9248 * specified target.
9249 **/
9250 static int qla4xxx_eh_device_reset(struct scsi_cmnd *cmd)
9251 {
9252 struct scsi_qla_host *ha = to_qla_host(cmd->device->host);
9253 struct ddb_entry *ddb_entry = cmd->device->hostdata;
9254 int ret = FAILED, stat;
9255 int rval;
9256
9257 if (!ddb_entry)
9258 return ret;
9259
9260 ret = iscsi_block_scsi_eh(cmd);
9261 if (ret)
9262 return ret;
9263 ret = FAILED;
9264
9265 ql4_printk(KERN_INFO, ha,
9266 "scsi%ld:%d:%d:%llu: DEVICE RESET ISSUED.\n", ha->host_no,
9267 cmd->device->channel, cmd->device->id, cmd->device->lun);
9268
9269 DEBUG2(printk(KERN_INFO
9270 "scsi%ld: DEVICE_RESET cmd=%p jiffies = 0x%lx, to=%x,"
9271 "dpc_flags=%lx, status=%x allowed=%d\n", ha->host_no,
9272 cmd, jiffies, cmd->request->timeout / HZ,
9273 ha->dpc_flags, cmd->result, cmd->allowed));
9274
9275 rval = qla4xxx_isp_check_reg(ha);
9276 if (rval != QLA_SUCCESS) {
9277 ql4_printk(KERN_INFO, ha, "PCI/Register disconnect, exiting.\n");
9278 return FAILED;
9279 }
9280
9281 /* FIXME: wait for hba to go online */
9282 stat = qla4xxx_reset_lun(ha, ddb_entry, cmd->device->lun);
9283 if (stat != QLA_SUCCESS) {
9284 ql4_printk(KERN_INFO, ha, "DEVICE RESET FAILED. %d\n", stat);
9285 goto eh_dev_reset_done;
9286 }
9287
9288 if (qla4xxx_eh_wait_for_commands(ha, scsi_target(cmd->device),
9289 cmd->device)) {
9290 ql4_printk(KERN_INFO, ha,
9291 "DEVICE RESET FAILED - waiting for "
9292 "commands.\n");
9293 goto eh_dev_reset_done;
9294 }
9295
9296 /* Send marker. */
9297 if (qla4xxx_send_marker_iocb(ha, ddb_entry, cmd->device->lun,
9298 MM_LUN_RESET) != QLA_SUCCESS)
9299 goto eh_dev_reset_done;
9300
9301 ql4_printk(KERN_INFO, ha,
9302 "scsi(%ld:%d:%d:%llu): DEVICE RESET SUCCEEDED.\n",
9303 ha->host_no, cmd->device->channel, cmd->device->id,
9304 cmd->device->lun);
9305
9306 ret = SUCCESS;
9307
9308 eh_dev_reset_done:
9309
9310 return ret;
9311 }
9312
9313 /**
9314 * qla4xxx_eh_target_reset - callback for target reset.
9315 * @cmd: Pointer to Linux's SCSI command structure
9316 *
9317 * This routine is called by the Linux OS to reset the target.
9318 **/
9319 static int qla4xxx_eh_target_reset(struct scsi_cmnd *cmd)
9320 {
9321 struct scsi_qla_host *ha = to_qla_host(cmd->device->host);
9322 struct ddb_entry *ddb_entry = cmd->device->hostdata;
9323 int stat, ret;
9324 int rval;
9325
9326 if (!ddb_entry)
9327 return FAILED;
9328
9329 ret = iscsi_block_scsi_eh(cmd);
9330 if (ret)
9331 return ret;
9332
9333 starget_printk(KERN_INFO, scsi_target(cmd->device),
9334 "WARM TARGET RESET ISSUED.\n");
9335
9336 DEBUG2(printk(KERN_INFO
9337 "scsi%ld: TARGET_DEVICE_RESET cmd=%p jiffies = 0x%lx, "
9338 "to=%x,dpc_flags=%lx, status=%x allowed=%d\n",
9339 ha->host_no, cmd, jiffies, cmd->request->timeout / HZ,
9340 ha->dpc_flags, cmd->result, cmd->allowed));
9341
9342 rval = qla4xxx_isp_check_reg(ha);
9343 if (rval != QLA_SUCCESS) {
9344 ql4_printk(KERN_INFO, ha, "PCI/Register disconnect, exiting.\n");
9345 return FAILED;
9346 }
9347
9348 stat = qla4xxx_reset_target(ha, ddb_entry);
9349 if (stat != QLA_SUCCESS) {
9350 starget_printk(KERN_INFO, scsi_target(cmd->device),
9351 "WARM TARGET RESET FAILED.\n");
9352 return FAILED;
9353 }
9354
9355 if (qla4xxx_eh_wait_for_commands(ha, scsi_target(cmd->device),
9356 NULL)) {
9357 starget_printk(KERN_INFO, scsi_target(cmd->device),
9358 "WARM TARGET DEVICE RESET FAILED - "
9359 "waiting for commands.\n");
9360 return FAILED;
9361 }
9362
9363 /* Send marker. */
9364 if (qla4xxx_send_marker_iocb(ha, ddb_entry, cmd->device->lun,
9365 MM_TGT_WARM_RESET) != QLA_SUCCESS) {
9366 starget_printk(KERN_INFO, scsi_target(cmd->device),
9367 "WARM TARGET DEVICE RESET FAILED - "
9368 "marker iocb failed.\n");
9369 return FAILED;
9370 }
9371
9372 starget_printk(KERN_INFO, scsi_target(cmd->device),
9373 "WARM TARGET RESET SUCCEEDED.\n");
9374 return SUCCESS;
9375 }
9376
9377 /**
9378 * qla4xxx_is_eh_active - check if error handler is running
9379 * @shost: Pointer to SCSI Host struct
9380 *
9381 * This routine finds that if reset host is called in EH
9382 * scenario or from some application like sg_reset
9383 **/
9384 static int qla4xxx_is_eh_active(struct Scsi_Host *shost)
9385 {
9386 if (shost->shost_state == SHOST_RECOVERY)
9387 return 1;
9388 return 0;
9389 }
9390
9391 /**
9392 * qla4xxx_eh_host_reset - kernel callback
9393 * @cmd: Pointer to Linux's SCSI command structure
9394 *
9395 * This routine is invoked by the Linux kernel to perform fatal error
9396 * recovery on the specified adapter.
9397 **/
9398 static int qla4xxx_eh_host_reset(struct scsi_cmnd *cmd)
9399 {
9400 int return_status = FAILED;
9401 struct scsi_qla_host *ha;
9402 int rval;
9403
9404 ha = to_qla_host(cmd->device->host);
9405
9406 rval = qla4xxx_isp_check_reg(ha);
9407 if (rval != QLA_SUCCESS) {
9408 ql4_printk(KERN_INFO, ha, "PCI/Register disconnect, exiting.\n");
9409 return FAILED;
9410 }
9411
9412 if ((is_qla8032(ha) || is_qla8042(ha)) && ql4xdontresethba)
9413 qla4_83xx_set_idc_dontreset(ha);
9414
9415 /*
9416 * For ISP8324 and ISP8042, if IDC_CTRL DONTRESET_BIT0 is set by other
9417 * protocol drivers, we should not set device_state to NEED_RESET
9418 */
9419 if (ql4xdontresethba ||
9420 ((is_qla8032(ha) || is_qla8042(ha)) &&
9421 qla4_83xx_idc_dontreset(ha))) {
9422 DEBUG2(printk("scsi%ld: %s: Don't Reset HBA\n",
9423 ha->host_no, __func__));
9424
9425 /* Clear outstanding srb in queues */
9426 if (qla4xxx_is_eh_active(cmd->device->host))
9427 qla4xxx_abort_active_cmds(ha, DID_ABORT << 16);
9428
9429 return FAILED;
9430 }
9431
9432 ql4_printk(KERN_INFO, ha,
9433 "scsi(%ld:%d:%d:%llu): HOST RESET ISSUED.\n", ha->host_no,
9434 cmd->device->channel, cmd->device->id, cmd->device->lun);
9435
9436 if (qla4xxx_wait_for_hba_online(ha) != QLA_SUCCESS) {
9437 DEBUG2(printk("scsi%ld:%d: %s: Unable to reset host. Adapter "
9438 "DEAD.\n", ha->host_no, cmd->device->channel,
9439 __func__));
9440
9441 return FAILED;
9442 }
9443
9444 if (!test_bit(DPC_RESET_HA, &ha->dpc_flags)) {
9445 if (is_qla80XX(ha))
9446 set_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags);
9447 else
9448 set_bit(DPC_RESET_HA, &ha->dpc_flags);
9449 }
9450
9451 if (qla4xxx_recover_adapter(ha) == QLA_SUCCESS)
9452 return_status = SUCCESS;
9453
9454 ql4_printk(KERN_INFO, ha, "HOST RESET %s.\n",
9455 return_status == FAILED ? "FAILED" : "SUCCEEDED");
9456
9457 return return_status;
9458 }
9459
9460 static int qla4xxx_context_reset(struct scsi_qla_host *ha)
9461 {
9462 uint32_t mbox_cmd[MBOX_REG_COUNT];
9463 uint32_t mbox_sts[MBOX_REG_COUNT];
9464 struct addr_ctrl_blk_def *acb = NULL;
9465 uint32_t acb_len = sizeof(struct addr_ctrl_blk_def);
9466 int rval = QLA_SUCCESS;
9467 dma_addr_t acb_dma;
9468
9469 acb = dma_alloc_coherent(&ha->pdev->dev,
9470 sizeof(struct addr_ctrl_blk_def),
9471 &acb_dma, GFP_KERNEL);
9472 if (!acb) {
9473 ql4_printk(KERN_ERR, ha, "%s: Unable to alloc acb\n",
9474 __func__);
9475 rval = -ENOMEM;
9476 goto exit_port_reset;
9477 }
9478
9479 memset(acb, 0, acb_len);
9480
9481 rval = qla4xxx_get_acb(ha, acb_dma, PRIMARI_ACB, acb_len);
9482 if (rval != QLA_SUCCESS) {
9483 rval = -EIO;
9484 goto exit_free_acb;
9485 }
9486
9487 rval = qla4xxx_disable_acb(ha);
9488 if (rval != QLA_SUCCESS) {
9489 rval = -EIO;
9490 goto exit_free_acb;
9491 }
9492
9493 wait_for_completion_timeout(&ha->disable_acb_comp,
9494 DISABLE_ACB_TOV * HZ);
9495
9496 rval = qla4xxx_set_acb(ha, &mbox_cmd[0], &mbox_sts[0], acb_dma);
9497 if (rval != QLA_SUCCESS) {
9498 rval = -EIO;
9499 goto exit_free_acb;
9500 }
9501
9502 exit_free_acb:
9503 dma_free_coherent(&ha->pdev->dev, sizeof(struct addr_ctrl_blk_def),
9504 acb, acb_dma);
9505 exit_port_reset:
9506 DEBUG2(ql4_printk(KERN_INFO, ha, "%s %s\n", __func__,
9507 rval == QLA_SUCCESS ? "SUCCEEDED" : "FAILED"));
9508 return rval;
9509 }
9510
9511 static int qla4xxx_host_reset(struct Scsi_Host *shost, int reset_type)
9512 {
9513 struct scsi_qla_host *ha = to_qla_host(shost);
9514 int rval = QLA_SUCCESS;
9515 uint32_t idc_ctrl;
9516
9517 if (ql4xdontresethba) {
9518 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: Don't Reset HBA\n",
9519 __func__));
9520 rval = -EPERM;
9521 goto exit_host_reset;
9522 }
9523
9524 if (test_bit(DPC_RESET_HA, &ha->dpc_flags))
9525 goto recover_adapter;
9526
9527 switch (reset_type) {
9528 case SCSI_ADAPTER_RESET:
9529 set_bit(DPC_RESET_HA, &ha->dpc_flags);
9530 break;
9531 case SCSI_FIRMWARE_RESET:
9532 if (!test_bit(DPC_RESET_HA, &ha->dpc_flags)) {
9533 if (is_qla80XX(ha))
9534 /* set firmware context reset */
9535 set_bit(DPC_RESET_HA_FW_CONTEXT,
9536 &ha->dpc_flags);
9537 else {
9538 rval = qla4xxx_context_reset(ha);
9539 goto exit_host_reset;
9540 }
9541 }
9542 break;
9543 }
9544
9545 recover_adapter:
9546 /* For ISP8324 and ISP8042 set graceful reset bit in IDC_DRV_CTRL if
9547 * reset is issued by application */
9548 if ((is_qla8032(ha) || is_qla8042(ha)) &&
9549 test_bit(DPC_RESET_HA, &ha->dpc_flags)) {
9550 idc_ctrl = qla4_83xx_rd_reg(ha, QLA83XX_IDC_DRV_CTRL);
9551 qla4_83xx_wr_reg(ha, QLA83XX_IDC_DRV_CTRL,
9552 (idc_ctrl | GRACEFUL_RESET_BIT1));
9553 }
9554
9555 rval = qla4xxx_recover_adapter(ha);
9556 if (rval != QLA_SUCCESS) {
9557 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: recover adapter fail\n",
9558 __func__));
9559 rval = -EIO;
9560 }
9561
9562 exit_host_reset:
9563 return rval;
9564 }
9565
9566 /* PCI AER driver recovers from all correctable errors w/o
9567 * driver intervention. For uncorrectable errors PCI AER
9568 * driver calls the following device driver's callbacks
9569 *
9570 * - Fatal Errors - link_reset
9571 * - Non-Fatal Errors - driver's error_detected() which
9572 * returns CAN_RECOVER, NEED_RESET or DISCONNECT.
9573 *
9574 * PCI AER driver calls
9575 * CAN_RECOVER - driver's mmio_enabled(), mmio_enabled()
9576 * returns RECOVERED or NEED_RESET if fw_hung
9577 * NEED_RESET - driver's slot_reset()
9578 * DISCONNECT - device is dead & cannot recover
9579 * RECOVERED - driver's resume()
9580 */
9581 static pci_ers_result_t
9582 qla4xxx_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
9583 {
9584 struct scsi_qla_host *ha = pci_get_drvdata(pdev);
9585
9586 ql4_printk(KERN_WARNING, ha, "scsi%ld: %s: error detected:state %x\n",
9587 ha->host_no, __func__, state);
9588
9589 if (!is_aer_supported(ha))
9590 return PCI_ERS_RESULT_NONE;
9591
9592 switch (state) {
9593 case pci_channel_io_normal:
9594 clear_bit(AF_EEH_BUSY, &ha->flags);
9595 return PCI_ERS_RESULT_CAN_RECOVER;
9596 case pci_channel_io_frozen:
9597 set_bit(AF_EEH_BUSY, &ha->flags);
9598 qla4xxx_mailbox_premature_completion(ha);
9599 qla4xxx_free_irqs(ha);
9600 pci_disable_device(pdev);
9601 /* Return back all IOs */
9602 qla4xxx_abort_active_cmds(ha, DID_RESET << 16);
9603 return PCI_ERS_RESULT_NEED_RESET;
9604 case pci_channel_io_perm_failure:
9605 set_bit(AF_EEH_BUSY, &ha->flags);
9606 set_bit(AF_PCI_CHANNEL_IO_PERM_FAILURE, &ha->flags);
9607 qla4xxx_abort_active_cmds(ha, DID_NO_CONNECT << 16);
9608 return PCI_ERS_RESULT_DISCONNECT;
9609 }
9610 return PCI_ERS_RESULT_NEED_RESET;
9611 }
9612
9613 /**
9614 * qla4xxx_pci_mmio_enabled() gets called if
9615 * qla4xxx_pci_error_detected() returns PCI_ERS_RESULT_CAN_RECOVER
9616 * and read/write to the device still works.
9617 **/
9618 static pci_ers_result_t
9619 qla4xxx_pci_mmio_enabled(struct pci_dev *pdev)
9620 {
9621 struct scsi_qla_host *ha = pci_get_drvdata(pdev);
9622
9623 if (!is_aer_supported(ha))
9624 return PCI_ERS_RESULT_NONE;
9625
9626 return PCI_ERS_RESULT_RECOVERED;
9627 }
9628
9629 static uint32_t qla4_8xxx_error_recovery(struct scsi_qla_host *ha)
9630 {
9631 uint32_t rval = QLA_ERROR;
9632 int fn;
9633 struct pci_dev *other_pdev = NULL;
9634
9635 ql4_printk(KERN_WARNING, ha, "scsi%ld: In %s\n", ha->host_no, __func__);
9636
9637 set_bit(DPC_RESET_ACTIVE, &ha->dpc_flags);
9638
9639 if (test_bit(AF_ONLINE, &ha->flags)) {
9640 clear_bit(AF_ONLINE, &ha->flags);
9641 clear_bit(AF_LINK_UP, &ha->flags);
9642 iscsi_host_for_each_session(ha->host, qla4xxx_fail_session);
9643 qla4xxx_process_aen(ha, FLUSH_DDB_CHANGED_AENS);
9644 }
9645
9646 fn = PCI_FUNC(ha->pdev->devfn);
9647 if (is_qla8022(ha)) {
9648 while (fn > 0) {
9649 fn--;
9650 ql4_printk(KERN_INFO, ha, "scsi%ld: %s: Finding PCI device at func %x\n",
9651 ha->host_no, __func__, fn);
9652 /* Get the pci device given the domain, bus,
9653 * slot/function number */
9654 other_pdev = pci_get_domain_bus_and_slot(
9655 pci_domain_nr(ha->pdev->bus),
9656 ha->pdev->bus->number,
9657 PCI_DEVFN(PCI_SLOT(ha->pdev->devfn),
9658 fn));
9659
9660 if (!other_pdev)
9661 continue;
9662
9663 if (atomic_read(&other_pdev->enable_cnt)) {
9664 ql4_printk(KERN_INFO, ha, "scsi%ld: %s: Found PCI func in enabled state%x\n",
9665 ha->host_no, __func__, fn);
9666 pci_dev_put(other_pdev);
9667 break;
9668 }
9669 pci_dev_put(other_pdev);
9670 }
9671 } else {
9672 /* this case is meant for ISP83xx/ISP84xx only */
9673 if (qla4_83xx_can_perform_reset(ha)) {
9674 /* reset fn as iSCSI is going to perform the reset */
9675 fn = 0;
9676 }
9677 }
9678
9679 /* The first function on the card, the reset owner will
9680 * start & initialize the firmware. The other functions
9681 * on the card will reset the firmware context
9682 */
9683 if (!fn) {
9684 ql4_printk(KERN_INFO, ha, "scsi%ld: %s: devfn being reset "
9685 "0x%x is the owner\n", ha->host_no, __func__,
9686 ha->pdev->devfn);
9687
9688 ha->isp_ops->idc_lock(ha);
9689 qla4_8xxx_wr_direct(ha, QLA8XXX_CRB_DEV_STATE,
9690 QLA8XXX_DEV_COLD);
9691 ha->isp_ops->idc_unlock(ha);
9692
9693 rval = qla4_8xxx_update_idc_reg(ha);
9694 if (rval == QLA_ERROR) {
9695 ql4_printk(KERN_INFO, ha, "scsi%ld: %s: HW State: FAILED\n",
9696 ha->host_no, __func__);
9697 ha->isp_ops->idc_lock(ha);
9698 qla4_8xxx_wr_direct(ha, QLA8XXX_CRB_DEV_STATE,
9699 QLA8XXX_DEV_FAILED);
9700 ha->isp_ops->idc_unlock(ha);
9701 goto exit_error_recovery;
9702 }
9703
9704 clear_bit(AF_FW_RECOVERY, &ha->flags);
9705 rval = qla4xxx_initialize_adapter(ha, RESET_ADAPTER);
9706
9707 if (rval != QLA_SUCCESS) {
9708 ql4_printk(KERN_INFO, ha, "scsi%ld: %s: HW State: "
9709 "FAILED\n", ha->host_no, __func__);
9710 qla4xxx_free_irqs(ha);
9711 ha->isp_ops->idc_lock(ha);
9712 qla4_8xxx_clear_drv_active(ha);
9713 qla4_8xxx_wr_direct(ha, QLA8XXX_CRB_DEV_STATE,
9714 QLA8XXX_DEV_FAILED);
9715 ha->isp_ops->idc_unlock(ha);
9716 } else {
9717 ql4_printk(KERN_INFO, ha, "scsi%ld: %s: HW State: "
9718 "READY\n", ha->host_no, __func__);
9719 ha->isp_ops->idc_lock(ha);
9720 qla4_8xxx_wr_direct(ha, QLA8XXX_CRB_DEV_STATE,
9721 QLA8XXX_DEV_READY);
9722 /* Clear driver state register */
9723 qla4_8xxx_wr_direct(ha, QLA8XXX_CRB_DRV_STATE, 0);
9724 qla4_8xxx_set_drv_active(ha);
9725 ha->isp_ops->idc_unlock(ha);
9726 ha->isp_ops->enable_intrs(ha);
9727 }
9728 } else {
9729 ql4_printk(KERN_INFO, ha, "scsi%ld: %s: devfn 0x%x is not "
9730 "the reset owner\n", ha->host_no, __func__,
9731 ha->pdev->devfn);
9732 if ((qla4_8xxx_rd_direct(ha, QLA8XXX_CRB_DEV_STATE) ==
9733 QLA8XXX_DEV_READY)) {
9734 clear_bit(AF_FW_RECOVERY, &ha->flags);
9735 rval = qla4xxx_initialize_adapter(ha, RESET_ADAPTER);
9736 if (rval == QLA_SUCCESS)
9737 ha->isp_ops->enable_intrs(ha);
9738 else
9739 qla4xxx_free_irqs(ha);
9740
9741 ha->isp_ops->idc_lock(ha);
9742 qla4_8xxx_set_drv_active(ha);
9743 ha->isp_ops->idc_unlock(ha);
9744 }
9745 }
9746 exit_error_recovery:
9747 clear_bit(DPC_RESET_ACTIVE, &ha->dpc_flags);
9748 return rval;
9749 }
9750
9751 static pci_ers_result_t
9752 qla4xxx_pci_slot_reset(struct pci_dev *pdev)
9753 {
9754 pci_ers_result_t ret = PCI_ERS_RESULT_DISCONNECT;
9755 struct scsi_qla_host *ha = pci_get_drvdata(pdev);
9756 int rc;
9757
9758 ql4_printk(KERN_WARNING, ha, "scsi%ld: %s: slot_reset\n",
9759 ha->host_no, __func__);
9760
9761 if (!is_aer_supported(ha))
9762 return PCI_ERS_RESULT_NONE;
9763
9764 /* Restore the saved state of PCIe device -
9765 * BAR registers, PCI Config space, PCIX, MSI,
9766 * IOV states
9767 */
9768 pci_restore_state(pdev);
9769
9770 /* pci_restore_state() clears the saved_state flag of the device
9771 * save restored state which resets saved_state flag
9772 */
9773 pci_save_state(pdev);
9774
9775 /* Initialize device or resume if in suspended state */
9776 rc = pci_enable_device(pdev);
9777 if (rc) {
9778 ql4_printk(KERN_WARNING, ha, "scsi%ld: %s: Can't re-enable "
9779 "device after reset\n", ha->host_no, __func__);
9780 goto exit_slot_reset;
9781 }
9782
9783 ha->isp_ops->disable_intrs(ha);
9784
9785 if (is_qla80XX(ha)) {
9786 if (qla4_8xxx_error_recovery(ha) == QLA_SUCCESS) {
9787 ret = PCI_ERS_RESULT_RECOVERED;
9788 goto exit_slot_reset;
9789 } else
9790 goto exit_slot_reset;
9791 }
9792
9793 exit_slot_reset:
9794 ql4_printk(KERN_WARNING, ha, "scsi%ld: %s: Return=%x\n"
9795 "device after reset\n", ha->host_no, __func__, ret);
9796 return ret;
9797 }
9798
9799 static void
9800 qla4xxx_pci_resume(struct pci_dev *pdev)
9801 {
9802 struct scsi_qla_host *ha = pci_get_drvdata(pdev);
9803 int ret;
9804
9805 ql4_printk(KERN_WARNING, ha, "scsi%ld: %s: pci_resume\n",
9806 ha->host_no, __func__);
9807
9808 ret = qla4xxx_wait_for_hba_online(ha);
9809 if (ret != QLA_SUCCESS) {
9810 ql4_printk(KERN_ERR, ha, "scsi%ld: %s: the device failed to "
9811 "resume I/O from slot/link_reset\n", ha->host_no,
9812 __func__);
9813 }
9814
9815 clear_bit(AF_EEH_BUSY, &ha->flags);
9816 }
9817
9818 static const struct pci_error_handlers qla4xxx_err_handler = {
9819 .error_detected = qla4xxx_pci_error_detected,
9820 .mmio_enabled = qla4xxx_pci_mmio_enabled,
9821 .slot_reset = qla4xxx_pci_slot_reset,
9822 .resume = qla4xxx_pci_resume,
9823 };
9824
9825 static struct pci_device_id qla4xxx_pci_tbl[] = {
9826 {
9827 .vendor = PCI_VENDOR_ID_QLOGIC,
9828 .device = PCI_DEVICE_ID_QLOGIC_ISP4010,
9829 .subvendor = PCI_ANY_ID,
9830 .subdevice = PCI_ANY_ID,
9831 },
9832 {
9833 .vendor = PCI_VENDOR_ID_QLOGIC,
9834 .device = PCI_DEVICE_ID_QLOGIC_ISP4022,
9835 .subvendor = PCI_ANY_ID,
9836 .subdevice = PCI_ANY_ID,
9837 },
9838 {
9839 .vendor = PCI_VENDOR_ID_QLOGIC,
9840 .device = PCI_DEVICE_ID_QLOGIC_ISP4032,
9841 .subvendor = PCI_ANY_ID,
9842 .subdevice = PCI_ANY_ID,
9843 },
9844 {
9845 .vendor = PCI_VENDOR_ID_QLOGIC,
9846 .device = PCI_DEVICE_ID_QLOGIC_ISP8022,
9847 .subvendor = PCI_ANY_ID,
9848 .subdevice = PCI_ANY_ID,
9849 },
9850 {
9851 .vendor = PCI_VENDOR_ID_QLOGIC,
9852 .device = PCI_DEVICE_ID_QLOGIC_ISP8324,
9853 .subvendor = PCI_ANY_ID,
9854 .subdevice = PCI_ANY_ID,
9855 },
9856 {
9857 .vendor = PCI_VENDOR_ID_QLOGIC,
9858 .device = PCI_DEVICE_ID_QLOGIC_ISP8042,
9859 .subvendor = PCI_ANY_ID,
9860 .subdevice = PCI_ANY_ID,
9861 },
9862 {0, 0},
9863 };
9864 MODULE_DEVICE_TABLE(pci, qla4xxx_pci_tbl);
9865
9866 static struct pci_driver qla4xxx_pci_driver = {
9867 .name = DRIVER_NAME,
9868 .id_table = qla4xxx_pci_tbl,
9869 .probe = qla4xxx_probe_adapter,
9870 .remove = qla4xxx_remove_adapter,
9871 .err_handler = &qla4xxx_err_handler,
9872 };
9873
9874 static int __init qla4xxx_module_init(void)
9875 {
9876 int ret;
9877
9878 if (ql4xqfulltracking)
9879 qla4xxx_driver_template.track_queue_depth = 1;
9880
9881 /* Allocate cache for SRBs. */
9882 srb_cachep = kmem_cache_create("qla4xxx_srbs", sizeof(struct srb), 0,
9883 SLAB_HWCACHE_ALIGN, NULL);
9884 if (srb_cachep == NULL) {
9885 printk(KERN_ERR
9886 "%s: Unable to allocate SRB cache..."
9887 "Failing load!\n", DRIVER_NAME);
9888 ret = -ENOMEM;
9889 goto no_srp_cache;
9890 }
9891
9892 /* Derive version string. */
9893 strcpy(qla4xxx_version_str, QLA4XXX_DRIVER_VERSION);
9894 if (ql4xextended_error_logging)
9895 strcat(qla4xxx_version_str, "-debug");
9896
9897 qla4xxx_scsi_transport =
9898 iscsi_register_transport(&qla4xxx_iscsi_transport);
9899 if (!qla4xxx_scsi_transport){
9900 ret = -ENODEV;
9901 goto release_srb_cache;
9902 }
9903
9904 ret = pci_register_driver(&qla4xxx_pci_driver);
9905 if (ret)
9906 goto unregister_transport;
9907
9908 printk(KERN_INFO "QLogic iSCSI HBA Driver\n");
9909 return 0;
9910
9911 unregister_transport:
9912 iscsi_unregister_transport(&qla4xxx_iscsi_transport);
9913 release_srb_cache:
9914 kmem_cache_destroy(srb_cachep);
9915 no_srp_cache:
9916 return ret;
9917 }
9918
9919 static void __exit qla4xxx_module_exit(void)
9920 {
9921 pci_unregister_driver(&qla4xxx_pci_driver);
9922 iscsi_unregister_transport(&qla4xxx_iscsi_transport);
9923 kmem_cache_destroy(srb_cachep);
9924 }
9925
9926 module_init(qla4xxx_module_init);
9927 module_exit(qla4xxx_module_exit);
9928
9929 MODULE_AUTHOR("QLogic Corporation");
9930 MODULE_DESCRIPTION("QLogic iSCSI HBA Driver");
9931 MODULE_LICENSE("GPL");
9932 MODULE_VERSION(QLA4XXX_DRIVER_VERSION);