]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - drivers/scsi/be2iscsi/be_main.c
[SCSI] be2iscsi: Fix session update context with V2 version.
[mirror_ubuntu-bionic-kernel.git] / drivers / scsi / be2iscsi / be_main.c
1 /**
2 * Copyright (C) 2005 - 2011 Emulex
3 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
9 *
10 * Written by: Jayamohan Kallickal (jayamohan.kallickal@emulex.com)
11 *
12 * Contact Information:
13 * linux-drivers@emulex.com
14 *
15 * Emulex
16 * 3333 Susan Street
17 * Costa Mesa, CA 92626
18 */
19
20 #include <linux/reboot.h>
21 #include <linux/delay.h>
22 #include <linux/slab.h>
23 #include <linux/interrupt.h>
24 #include <linux/blkdev.h>
25 #include <linux/pci.h>
26 #include <linux/string.h>
27 #include <linux/kernel.h>
28 #include <linux/semaphore.h>
29 #include <linux/iscsi_boot_sysfs.h>
30 #include <linux/module.h>
31 #include <linux/bsg-lib.h>
32
33 #include <scsi/libiscsi.h>
34 #include <scsi/scsi_bsg_iscsi.h>
35 #include <scsi/scsi_netlink.h>
36 #include <scsi/scsi_transport_iscsi.h>
37 #include <scsi/scsi_transport.h>
38 #include <scsi/scsi_cmnd.h>
39 #include <scsi/scsi_device.h>
40 #include <scsi/scsi_host.h>
41 #include <scsi/scsi.h>
42 #include "be_main.h"
43 #include "be_iscsi.h"
44 #include "be_mgmt.h"
45 #include "be_cmds.h"
46
47 static unsigned int be_iopoll_budget = 10;
48 static unsigned int be_max_phys_size = 64;
49 static unsigned int enable_msix = 1;
50
51 MODULE_DEVICE_TABLE(pci, beiscsi_pci_id_table);
52 MODULE_DESCRIPTION(DRV_DESC " " BUILD_STR);
53 MODULE_VERSION(BUILD_STR);
54 MODULE_AUTHOR("Emulex Corporation");
55 MODULE_LICENSE("GPL");
56 module_param(be_iopoll_budget, int, 0);
57 module_param(enable_msix, int, 0);
58 module_param(be_max_phys_size, uint, S_IRUGO);
59 MODULE_PARM_DESC(be_max_phys_size,
60 "Maximum Size (In Kilobytes) of physically contiguous "
61 "memory that can be allocated. Range is 16 - 128");
62
63 #define beiscsi_disp_param(_name)\
64 ssize_t \
65 beiscsi_##_name##_disp(struct device *dev,\
66 struct device_attribute *attrib, char *buf) \
67 { \
68 struct Scsi_Host *shost = class_to_shost(dev);\
69 struct beiscsi_hba *phba = iscsi_host_priv(shost); \
70 uint32_t param_val = 0; \
71 param_val = phba->attr_##_name;\
72 return snprintf(buf, PAGE_SIZE, "%d\n",\
73 phba->attr_##_name);\
74 }
75
76 #define beiscsi_change_param(_name, _minval, _maxval, _defaval)\
77 int \
78 beiscsi_##_name##_change(struct beiscsi_hba *phba, uint32_t val)\
79 {\
80 if (val >= _minval && val <= _maxval) {\
81 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,\
82 "BA_%d : beiscsi_"#_name" updated "\
83 "from 0x%x ==> 0x%x\n",\
84 phba->attr_##_name, val); \
85 phba->attr_##_name = val;\
86 return 0;\
87 } \
88 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, \
89 "BA_%d beiscsi_"#_name" attribute "\
90 "cannot be updated to 0x%x, "\
91 "range allowed is ["#_minval" - "#_maxval"]\n", val);\
92 return -EINVAL;\
93 }
94
95 #define beiscsi_store_param(_name) \
96 ssize_t \
97 beiscsi_##_name##_store(struct device *dev,\
98 struct device_attribute *attr, const char *buf,\
99 size_t count) \
100 { \
101 struct Scsi_Host *shost = class_to_shost(dev);\
102 struct beiscsi_hba *phba = iscsi_host_priv(shost);\
103 uint32_t param_val = 0;\
104 if (!isdigit(buf[0]))\
105 return -EINVAL;\
106 if (sscanf(buf, "%i", &param_val) != 1)\
107 return -EINVAL;\
108 if (beiscsi_##_name##_change(phba, param_val) == 0) \
109 return strlen(buf);\
110 else \
111 return -EINVAL;\
112 }
113
114 #define beiscsi_init_param(_name, _minval, _maxval, _defval) \
115 int \
116 beiscsi_##_name##_init(struct beiscsi_hba *phba, uint32_t val) \
117 { \
118 if (val >= _minval && val <= _maxval) {\
119 phba->attr_##_name = val;\
120 return 0;\
121 } \
122 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,\
123 "BA_%d beiscsi_"#_name" attribute " \
124 "cannot be updated to 0x%x, "\
125 "range allowed is ["#_minval" - "#_maxval"]\n", val);\
126 phba->attr_##_name = _defval;\
127 return -EINVAL;\
128 }
129
130 #define BEISCSI_RW_ATTR(_name, _minval, _maxval, _defval, _descp) \
131 static uint beiscsi_##_name = _defval;\
132 module_param(beiscsi_##_name, uint, S_IRUGO);\
133 MODULE_PARM_DESC(beiscsi_##_name, _descp);\
134 beiscsi_disp_param(_name)\
135 beiscsi_change_param(_name, _minval, _maxval, _defval)\
136 beiscsi_store_param(_name)\
137 beiscsi_init_param(_name, _minval, _maxval, _defval)\
138 DEVICE_ATTR(beiscsi_##_name, S_IRUGO | S_IWUSR,\
139 beiscsi_##_name##_disp, beiscsi_##_name##_store)
140
141 /*
142 * When new log level added update the
143 * the MAX allowed value for log_enable
144 */
145 BEISCSI_RW_ATTR(log_enable, 0x00,
146 0xFF, 0x00, "Enable logging Bit Mask\n"
147 "\t\t\t\tInitialization Events : 0x01\n"
148 "\t\t\t\tMailbox Events : 0x02\n"
149 "\t\t\t\tMiscellaneous Events : 0x04\n"
150 "\t\t\t\tError Handling : 0x08\n"
151 "\t\t\t\tIO Path Events : 0x10\n"
152 "\t\t\t\tConfiguration Path : 0x20\n");
153
154 DEVICE_ATTR(beiscsi_drvr_ver, S_IRUGO, beiscsi_drvr_ver_disp, NULL);
155 struct device_attribute *beiscsi_attrs[] = {
156 &dev_attr_beiscsi_log_enable,
157 &dev_attr_beiscsi_drvr_ver,
158 NULL,
159 };
160
161 static char const *cqe_desc[] = {
162 "RESERVED_DESC",
163 "SOL_CMD_COMPLETE",
164 "SOL_CMD_KILLED_DATA_DIGEST_ERR",
165 "CXN_KILLED_PDU_SIZE_EXCEEDS_DSL",
166 "CXN_KILLED_BURST_LEN_MISMATCH",
167 "CXN_KILLED_AHS_RCVD",
168 "CXN_KILLED_HDR_DIGEST_ERR",
169 "CXN_KILLED_UNKNOWN_HDR",
170 "CXN_KILLED_STALE_ITT_TTT_RCVD",
171 "CXN_KILLED_INVALID_ITT_TTT_RCVD",
172 "CXN_KILLED_RST_RCVD",
173 "CXN_KILLED_TIMED_OUT",
174 "CXN_KILLED_RST_SENT",
175 "CXN_KILLED_FIN_RCVD",
176 "CXN_KILLED_BAD_UNSOL_PDU_RCVD",
177 "CXN_KILLED_BAD_WRB_INDEX_ERROR",
178 "CXN_KILLED_OVER_RUN_RESIDUAL",
179 "CXN_KILLED_UNDER_RUN_RESIDUAL",
180 "CMD_KILLED_INVALID_STATSN_RCVD",
181 "CMD_KILLED_INVALID_R2T_RCVD",
182 "CMD_CXN_KILLED_LUN_INVALID",
183 "CMD_CXN_KILLED_ICD_INVALID",
184 "CMD_CXN_KILLED_ITT_INVALID",
185 "CMD_CXN_KILLED_SEQ_OUTOFORDER",
186 "CMD_CXN_KILLED_INVALID_DATASN_RCVD",
187 "CXN_INVALIDATE_NOTIFY",
188 "CXN_INVALIDATE_INDEX_NOTIFY",
189 "CMD_INVALIDATED_NOTIFY",
190 "UNSOL_HDR_NOTIFY",
191 "UNSOL_DATA_NOTIFY",
192 "UNSOL_DATA_DIGEST_ERROR_NOTIFY",
193 "DRIVERMSG_NOTIFY",
194 "CXN_KILLED_CMND_DATA_NOT_ON_SAME_CONN",
195 "SOL_CMD_KILLED_DIF_ERR",
196 "CXN_KILLED_SYN_RCVD",
197 "CXN_KILLED_IMM_DATA_RCVD"
198 };
199
200 static int beiscsi_slave_configure(struct scsi_device *sdev)
201 {
202 blk_queue_max_segment_size(sdev->request_queue, 65536);
203 return 0;
204 }
205
206 static int beiscsi_eh_abort(struct scsi_cmnd *sc)
207 {
208 struct iscsi_cls_session *cls_session;
209 struct iscsi_task *aborted_task = (struct iscsi_task *)sc->SCp.ptr;
210 struct beiscsi_io_task *aborted_io_task;
211 struct iscsi_conn *conn;
212 struct beiscsi_conn *beiscsi_conn;
213 struct beiscsi_hba *phba;
214 struct iscsi_session *session;
215 struct invalidate_command_table *inv_tbl;
216 struct be_dma_mem nonemb_cmd;
217 unsigned int cid, tag, num_invalidate;
218
219 cls_session = starget_to_session(scsi_target(sc->device));
220 session = cls_session->dd_data;
221
222 spin_lock_bh(&session->lock);
223 if (!aborted_task || !aborted_task->sc) {
224 /* we raced */
225 spin_unlock_bh(&session->lock);
226 return SUCCESS;
227 }
228
229 aborted_io_task = aborted_task->dd_data;
230 if (!aborted_io_task->scsi_cmnd) {
231 /* raced or invalid command */
232 spin_unlock_bh(&session->lock);
233 return SUCCESS;
234 }
235 spin_unlock_bh(&session->lock);
236 conn = aborted_task->conn;
237 beiscsi_conn = conn->dd_data;
238 phba = beiscsi_conn->phba;
239
240 /* invalidate iocb */
241 cid = beiscsi_conn->beiscsi_conn_cid;
242 inv_tbl = phba->inv_tbl;
243 memset(inv_tbl, 0x0, sizeof(*inv_tbl));
244 inv_tbl->cid = cid;
245 inv_tbl->icd = aborted_io_task->psgl_handle->sgl_index;
246 num_invalidate = 1;
247 nonemb_cmd.va = pci_alloc_consistent(phba->ctrl.pdev,
248 sizeof(struct invalidate_commands_params_in),
249 &nonemb_cmd.dma);
250 if (nonemb_cmd.va == NULL) {
251 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_EH,
252 "BM_%d : Failed to allocate memory for"
253 "mgmt_invalidate_icds\n");
254 return FAILED;
255 }
256 nonemb_cmd.size = sizeof(struct invalidate_commands_params_in);
257
258 tag = mgmt_invalidate_icds(phba, inv_tbl, num_invalidate,
259 cid, &nonemb_cmd);
260 if (!tag) {
261 beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_EH,
262 "BM_%d : mgmt_invalidate_icds could not be"
263 "submitted\n");
264 pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size,
265 nonemb_cmd.va, nonemb_cmd.dma);
266
267 return FAILED;
268 } else {
269 wait_event_interruptible(phba->ctrl.mcc_wait[tag],
270 phba->ctrl.mcc_numtag[tag]);
271 free_mcc_tag(&phba->ctrl, tag);
272 }
273 pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size,
274 nonemb_cmd.va, nonemb_cmd.dma);
275 return iscsi_eh_abort(sc);
276 }
277
278 static int beiscsi_eh_device_reset(struct scsi_cmnd *sc)
279 {
280 struct iscsi_task *abrt_task;
281 struct beiscsi_io_task *abrt_io_task;
282 struct iscsi_conn *conn;
283 struct beiscsi_conn *beiscsi_conn;
284 struct beiscsi_hba *phba;
285 struct iscsi_session *session;
286 struct iscsi_cls_session *cls_session;
287 struct invalidate_command_table *inv_tbl;
288 struct be_dma_mem nonemb_cmd;
289 unsigned int cid, tag, i, num_invalidate;
290
291 /* invalidate iocbs */
292 cls_session = starget_to_session(scsi_target(sc->device));
293 session = cls_session->dd_data;
294 spin_lock_bh(&session->lock);
295 if (!session->leadconn || session->state != ISCSI_STATE_LOGGED_IN) {
296 spin_unlock_bh(&session->lock);
297 return FAILED;
298 }
299 conn = session->leadconn;
300 beiscsi_conn = conn->dd_data;
301 phba = beiscsi_conn->phba;
302 cid = beiscsi_conn->beiscsi_conn_cid;
303 inv_tbl = phba->inv_tbl;
304 memset(inv_tbl, 0x0, sizeof(*inv_tbl) * BE2_CMDS_PER_CXN);
305 num_invalidate = 0;
306 for (i = 0; i < conn->session->cmds_max; i++) {
307 abrt_task = conn->session->cmds[i];
308 abrt_io_task = abrt_task->dd_data;
309 if (!abrt_task->sc || abrt_task->state == ISCSI_TASK_FREE)
310 continue;
311
312 if (abrt_task->sc->device->lun != abrt_task->sc->device->lun)
313 continue;
314
315 inv_tbl->cid = cid;
316 inv_tbl->icd = abrt_io_task->psgl_handle->sgl_index;
317 num_invalidate++;
318 inv_tbl++;
319 }
320 spin_unlock_bh(&session->lock);
321 inv_tbl = phba->inv_tbl;
322
323 nonemb_cmd.va = pci_alloc_consistent(phba->ctrl.pdev,
324 sizeof(struct invalidate_commands_params_in),
325 &nonemb_cmd.dma);
326 if (nonemb_cmd.va == NULL) {
327 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_EH,
328 "BM_%d : Failed to allocate memory for"
329 "mgmt_invalidate_icds\n");
330 return FAILED;
331 }
332 nonemb_cmd.size = sizeof(struct invalidate_commands_params_in);
333 memset(nonemb_cmd.va, 0, nonemb_cmd.size);
334 tag = mgmt_invalidate_icds(phba, inv_tbl, num_invalidate,
335 cid, &nonemb_cmd);
336 if (!tag) {
337 beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_EH,
338 "BM_%d : mgmt_invalidate_icds could not be"
339 " submitted\n");
340 pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size,
341 nonemb_cmd.va, nonemb_cmd.dma);
342 return FAILED;
343 } else {
344 wait_event_interruptible(phba->ctrl.mcc_wait[tag],
345 phba->ctrl.mcc_numtag[tag]);
346 free_mcc_tag(&phba->ctrl, tag);
347 }
348 pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size,
349 nonemb_cmd.va, nonemb_cmd.dma);
350 return iscsi_eh_device_reset(sc);
351 }
352
353 static ssize_t beiscsi_show_boot_tgt_info(void *data, int type, char *buf)
354 {
355 struct beiscsi_hba *phba = data;
356 struct mgmt_session_info *boot_sess = &phba->boot_sess;
357 struct mgmt_conn_info *boot_conn = &boot_sess->conn_list[0];
358 char *str = buf;
359 int rc;
360
361 switch (type) {
362 case ISCSI_BOOT_TGT_NAME:
363 rc = sprintf(buf, "%.*s\n",
364 (int)strlen(boot_sess->target_name),
365 (char *)&boot_sess->target_name);
366 break;
367 case ISCSI_BOOT_TGT_IP_ADDR:
368 if (boot_conn->dest_ipaddr.ip_type == 0x1)
369 rc = sprintf(buf, "%pI4\n",
370 (char *)&boot_conn->dest_ipaddr.addr);
371 else
372 rc = sprintf(str, "%pI6\n",
373 (char *)&boot_conn->dest_ipaddr.addr);
374 break;
375 case ISCSI_BOOT_TGT_PORT:
376 rc = sprintf(str, "%d\n", boot_conn->dest_port);
377 break;
378
379 case ISCSI_BOOT_TGT_CHAP_NAME:
380 rc = sprintf(str, "%.*s\n",
381 boot_conn->negotiated_login_options.auth_data.chap.
382 target_chap_name_length,
383 (char *)&boot_conn->negotiated_login_options.
384 auth_data.chap.target_chap_name);
385 break;
386 case ISCSI_BOOT_TGT_CHAP_SECRET:
387 rc = sprintf(str, "%.*s\n",
388 boot_conn->negotiated_login_options.auth_data.chap.
389 target_secret_length,
390 (char *)&boot_conn->negotiated_login_options.
391 auth_data.chap.target_secret);
392 break;
393 case ISCSI_BOOT_TGT_REV_CHAP_NAME:
394 rc = sprintf(str, "%.*s\n",
395 boot_conn->negotiated_login_options.auth_data.chap.
396 intr_chap_name_length,
397 (char *)&boot_conn->negotiated_login_options.
398 auth_data.chap.intr_chap_name);
399 break;
400 case ISCSI_BOOT_TGT_REV_CHAP_SECRET:
401 rc = sprintf(str, "%.*s\n",
402 boot_conn->negotiated_login_options.auth_data.chap.
403 intr_secret_length,
404 (char *)&boot_conn->negotiated_login_options.
405 auth_data.chap.intr_secret);
406 break;
407 case ISCSI_BOOT_TGT_FLAGS:
408 rc = sprintf(str, "2\n");
409 break;
410 case ISCSI_BOOT_TGT_NIC_ASSOC:
411 rc = sprintf(str, "0\n");
412 break;
413 default:
414 rc = -ENOSYS;
415 break;
416 }
417 return rc;
418 }
419
420 static ssize_t beiscsi_show_boot_ini_info(void *data, int type, char *buf)
421 {
422 struct beiscsi_hba *phba = data;
423 char *str = buf;
424 int rc;
425
426 switch (type) {
427 case ISCSI_BOOT_INI_INITIATOR_NAME:
428 rc = sprintf(str, "%s\n", phba->boot_sess.initiator_iscsiname);
429 break;
430 default:
431 rc = -ENOSYS;
432 break;
433 }
434 return rc;
435 }
436
437 static ssize_t beiscsi_show_boot_eth_info(void *data, int type, char *buf)
438 {
439 struct beiscsi_hba *phba = data;
440 char *str = buf;
441 int rc;
442
443 switch (type) {
444 case ISCSI_BOOT_ETH_FLAGS:
445 rc = sprintf(str, "2\n");
446 break;
447 case ISCSI_BOOT_ETH_INDEX:
448 rc = sprintf(str, "0\n");
449 break;
450 case ISCSI_BOOT_ETH_MAC:
451 rc = beiscsi_get_macaddr(str, phba);
452 break;
453 default:
454 rc = -ENOSYS;
455 break;
456 }
457 return rc;
458 }
459
460
461 static umode_t beiscsi_tgt_get_attr_visibility(void *data, int type)
462 {
463 umode_t rc;
464
465 switch (type) {
466 case ISCSI_BOOT_TGT_NAME:
467 case ISCSI_BOOT_TGT_IP_ADDR:
468 case ISCSI_BOOT_TGT_PORT:
469 case ISCSI_BOOT_TGT_CHAP_NAME:
470 case ISCSI_BOOT_TGT_CHAP_SECRET:
471 case ISCSI_BOOT_TGT_REV_CHAP_NAME:
472 case ISCSI_BOOT_TGT_REV_CHAP_SECRET:
473 case ISCSI_BOOT_TGT_NIC_ASSOC:
474 case ISCSI_BOOT_TGT_FLAGS:
475 rc = S_IRUGO;
476 break;
477 default:
478 rc = 0;
479 break;
480 }
481 return rc;
482 }
483
484 static umode_t beiscsi_ini_get_attr_visibility(void *data, int type)
485 {
486 umode_t rc;
487
488 switch (type) {
489 case ISCSI_BOOT_INI_INITIATOR_NAME:
490 rc = S_IRUGO;
491 break;
492 default:
493 rc = 0;
494 break;
495 }
496 return rc;
497 }
498
499
500 static umode_t beiscsi_eth_get_attr_visibility(void *data, int type)
501 {
502 umode_t rc;
503
504 switch (type) {
505 case ISCSI_BOOT_ETH_FLAGS:
506 case ISCSI_BOOT_ETH_MAC:
507 case ISCSI_BOOT_ETH_INDEX:
508 rc = S_IRUGO;
509 break;
510 default:
511 rc = 0;
512 break;
513 }
514 return rc;
515 }
516
517 /*------------------- PCI Driver operations and data ----------------- */
518 static DEFINE_PCI_DEVICE_TABLE(beiscsi_pci_id_table) = {
519 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
520 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
521 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
522 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
523 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID3) },
524 { PCI_DEVICE(ELX_VENDOR_ID, OC_SKH_ID1) },
525 { 0 }
526 };
527 MODULE_DEVICE_TABLE(pci, beiscsi_pci_id_table);
528
529
530 static struct scsi_host_template beiscsi_sht = {
531 .module = THIS_MODULE,
532 .name = "Emulex 10Gbe open-iscsi Initiator Driver",
533 .proc_name = DRV_NAME,
534 .queuecommand = iscsi_queuecommand,
535 .change_queue_depth = iscsi_change_queue_depth,
536 .slave_configure = beiscsi_slave_configure,
537 .target_alloc = iscsi_target_alloc,
538 .eh_abort_handler = beiscsi_eh_abort,
539 .eh_device_reset_handler = beiscsi_eh_device_reset,
540 .eh_target_reset_handler = iscsi_eh_session_reset,
541 .shost_attrs = beiscsi_attrs,
542 .sg_tablesize = BEISCSI_SGLIST_ELEMENTS,
543 .can_queue = BE2_IO_DEPTH,
544 .this_id = -1,
545 .max_sectors = BEISCSI_MAX_SECTORS,
546 .cmd_per_lun = BEISCSI_CMD_PER_LUN,
547 .use_clustering = ENABLE_CLUSTERING,
548 .vendor_id = SCSI_NL_VID_TYPE_PCI | BE_VENDOR_ID,
549
550 };
551
552 static struct scsi_transport_template *beiscsi_scsi_transport;
553
554 static struct beiscsi_hba *beiscsi_hba_alloc(struct pci_dev *pcidev)
555 {
556 struct beiscsi_hba *phba;
557 struct Scsi_Host *shost;
558
559 shost = iscsi_host_alloc(&beiscsi_sht, sizeof(*phba), 0);
560 if (!shost) {
561 dev_err(&pcidev->dev,
562 "beiscsi_hba_alloc - iscsi_host_alloc failed\n");
563 return NULL;
564 }
565 shost->dma_boundary = pcidev->dma_mask;
566 shost->max_id = BE2_MAX_SESSIONS;
567 shost->max_channel = 0;
568 shost->max_cmd_len = BEISCSI_MAX_CMD_LEN;
569 shost->max_lun = BEISCSI_NUM_MAX_LUN;
570 shost->transportt = beiscsi_scsi_transport;
571 phba = iscsi_host_priv(shost);
572 memset(phba, 0, sizeof(*phba));
573 phba->shost = shost;
574 phba->pcidev = pci_dev_get(pcidev);
575 pci_set_drvdata(pcidev, phba);
576 phba->interface_handle = 0xFFFFFFFF;
577
578 if (iscsi_host_add(shost, &phba->pcidev->dev))
579 goto free_devices;
580
581 return phba;
582
583 free_devices:
584 pci_dev_put(phba->pcidev);
585 iscsi_host_free(phba->shost);
586 return NULL;
587 }
588
589 static void beiscsi_unmap_pci_function(struct beiscsi_hba *phba)
590 {
591 if (phba->csr_va) {
592 iounmap(phba->csr_va);
593 phba->csr_va = NULL;
594 }
595 if (phba->db_va) {
596 iounmap(phba->db_va);
597 phba->db_va = NULL;
598 }
599 if (phba->pci_va) {
600 iounmap(phba->pci_va);
601 phba->pci_va = NULL;
602 }
603 }
604
605 static int beiscsi_map_pci_bars(struct beiscsi_hba *phba,
606 struct pci_dev *pcidev)
607 {
608 u8 __iomem *addr;
609 int pcicfg_reg;
610
611 addr = ioremap_nocache(pci_resource_start(pcidev, 2),
612 pci_resource_len(pcidev, 2));
613 if (addr == NULL)
614 return -ENOMEM;
615 phba->ctrl.csr = addr;
616 phba->csr_va = addr;
617 phba->csr_pa.u.a64.address = pci_resource_start(pcidev, 2);
618
619 addr = ioremap_nocache(pci_resource_start(pcidev, 4), 128 * 1024);
620 if (addr == NULL)
621 goto pci_map_err;
622 phba->ctrl.db = addr;
623 phba->db_va = addr;
624 phba->db_pa.u.a64.address = pci_resource_start(pcidev, 4);
625
626 if (phba->generation == BE_GEN2)
627 pcicfg_reg = 1;
628 else
629 pcicfg_reg = 0;
630
631 addr = ioremap_nocache(pci_resource_start(pcidev, pcicfg_reg),
632 pci_resource_len(pcidev, pcicfg_reg));
633
634 if (addr == NULL)
635 goto pci_map_err;
636 phba->ctrl.pcicfg = addr;
637 phba->pci_va = addr;
638 phba->pci_pa.u.a64.address = pci_resource_start(pcidev, pcicfg_reg);
639 return 0;
640
641 pci_map_err:
642 beiscsi_unmap_pci_function(phba);
643 return -ENOMEM;
644 }
645
646 static int beiscsi_enable_pci(struct pci_dev *pcidev)
647 {
648 int ret;
649
650 ret = pci_enable_device(pcidev);
651 if (ret) {
652 dev_err(&pcidev->dev,
653 "beiscsi_enable_pci - enable device failed\n");
654 return ret;
655 }
656
657 pci_set_master(pcidev);
658 if (pci_set_consistent_dma_mask(pcidev, DMA_BIT_MASK(64))) {
659 ret = pci_set_consistent_dma_mask(pcidev, DMA_BIT_MASK(32));
660 if (ret) {
661 dev_err(&pcidev->dev, "Could not set PCI DMA Mask\n");
662 pci_disable_device(pcidev);
663 return ret;
664 }
665 }
666 return 0;
667 }
668
669 static int be_ctrl_init(struct beiscsi_hba *phba, struct pci_dev *pdev)
670 {
671 struct be_ctrl_info *ctrl = &phba->ctrl;
672 struct be_dma_mem *mbox_mem_alloc = &ctrl->mbox_mem_alloced;
673 struct be_dma_mem *mbox_mem_align = &ctrl->mbox_mem;
674 int status = 0;
675
676 ctrl->pdev = pdev;
677 status = beiscsi_map_pci_bars(phba, pdev);
678 if (status)
679 return status;
680 mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
681 mbox_mem_alloc->va = pci_alloc_consistent(pdev,
682 mbox_mem_alloc->size,
683 &mbox_mem_alloc->dma);
684 if (!mbox_mem_alloc->va) {
685 beiscsi_unmap_pci_function(phba);
686 return -ENOMEM;
687 }
688
689 mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
690 mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
691 mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
692 memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
693 spin_lock_init(&ctrl->mbox_lock);
694 spin_lock_init(&phba->ctrl.mcc_lock);
695 spin_lock_init(&phba->ctrl.mcc_cq_lock);
696
697 return status;
698 }
699
700 static void beiscsi_get_params(struct beiscsi_hba *phba)
701 {
702 phba->params.ios_per_ctrl = (phba->fw_config.iscsi_icd_count
703 - (phba->fw_config.iscsi_cid_count
704 + BE2_TMFS
705 + BE2_NOPOUT_REQ));
706 phba->params.cxns_per_ctrl = phba->fw_config.iscsi_cid_count;
707 phba->params.asyncpdus_per_ctrl = phba->fw_config.iscsi_cid_count * 2;
708 phba->params.icds_per_ctrl = phba->fw_config.iscsi_icd_count;
709 phba->params.num_sge_per_io = BE2_SGE;
710 phba->params.defpdu_hdr_sz = BE2_DEFPDU_HDR_SZ;
711 phba->params.defpdu_data_sz = BE2_DEFPDU_DATA_SZ;
712 phba->params.eq_timer = 64;
713 phba->params.num_eq_entries =
714 (((BE2_CMDS_PER_CXN * 2 + phba->fw_config.iscsi_cid_count * 2
715 + BE2_TMFS) / 512) + 1) * 512;
716 phba->params.num_eq_entries = (phba->params.num_eq_entries < 1024)
717 ? 1024 : phba->params.num_eq_entries;
718 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
719 "BM_%d : phba->params.num_eq_entries=%d\n",
720 phba->params.num_eq_entries);
721 phba->params.num_cq_entries =
722 (((BE2_CMDS_PER_CXN * 2 + phba->fw_config.iscsi_cid_count * 2
723 + BE2_TMFS) / 512) + 1) * 512;
724 phba->params.wrbs_per_cxn = 256;
725 }
726
727 static void hwi_ring_eq_db(struct beiscsi_hba *phba,
728 unsigned int id, unsigned int clr_interrupt,
729 unsigned int num_processed,
730 unsigned char rearm, unsigned char event)
731 {
732 u32 val = 0;
733 val |= id & DB_EQ_RING_ID_MASK;
734 if (rearm)
735 val |= 1 << DB_EQ_REARM_SHIFT;
736 if (clr_interrupt)
737 val |= 1 << DB_EQ_CLR_SHIFT;
738 if (event)
739 val |= 1 << DB_EQ_EVNT_SHIFT;
740 val |= num_processed << DB_EQ_NUM_POPPED_SHIFT;
741 iowrite32(val, phba->db_va + DB_EQ_OFFSET);
742 }
743
744 /**
745 * be_isr_mcc - The isr routine of the driver.
746 * @irq: Not used
747 * @dev_id: Pointer to host adapter structure
748 */
749 static irqreturn_t be_isr_mcc(int irq, void *dev_id)
750 {
751 struct beiscsi_hba *phba;
752 struct be_eq_entry *eqe = NULL;
753 struct be_queue_info *eq;
754 struct be_queue_info *mcc;
755 unsigned int num_eq_processed;
756 struct be_eq_obj *pbe_eq;
757 unsigned long flags;
758
759 pbe_eq = dev_id;
760 eq = &pbe_eq->q;
761 phba = pbe_eq->phba;
762 mcc = &phba->ctrl.mcc_obj.cq;
763 eqe = queue_tail_node(eq);
764
765 num_eq_processed = 0;
766
767 while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32]
768 & EQE_VALID_MASK) {
769 if (((eqe->dw[offsetof(struct amap_eq_entry,
770 resource_id) / 32] &
771 EQE_RESID_MASK) >> 16) == mcc->id) {
772 spin_lock_irqsave(&phba->isr_lock, flags);
773 pbe_eq->todo_mcc_cq = true;
774 spin_unlock_irqrestore(&phba->isr_lock, flags);
775 }
776 AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0);
777 queue_tail_inc(eq);
778 eqe = queue_tail_node(eq);
779 num_eq_processed++;
780 }
781 if (pbe_eq->todo_mcc_cq)
782 queue_work(phba->wq, &pbe_eq->work_cqs);
783 if (num_eq_processed)
784 hwi_ring_eq_db(phba, eq->id, 1, num_eq_processed, 1, 1);
785
786 return IRQ_HANDLED;
787 }
788
789 /**
790 * be_isr_msix - The isr routine of the driver.
791 * @irq: Not used
792 * @dev_id: Pointer to host adapter structure
793 */
794 static irqreturn_t be_isr_msix(int irq, void *dev_id)
795 {
796 struct beiscsi_hba *phba;
797 struct be_eq_entry *eqe = NULL;
798 struct be_queue_info *eq;
799 struct be_queue_info *cq;
800 unsigned int num_eq_processed;
801 struct be_eq_obj *pbe_eq;
802 unsigned long flags;
803
804 pbe_eq = dev_id;
805 eq = &pbe_eq->q;
806 cq = pbe_eq->cq;
807 eqe = queue_tail_node(eq);
808
809 phba = pbe_eq->phba;
810 num_eq_processed = 0;
811 if (blk_iopoll_enabled) {
812 while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32]
813 & EQE_VALID_MASK) {
814 if (!blk_iopoll_sched_prep(&pbe_eq->iopoll))
815 blk_iopoll_sched(&pbe_eq->iopoll);
816
817 AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0);
818 queue_tail_inc(eq);
819 eqe = queue_tail_node(eq);
820 num_eq_processed++;
821 }
822 } else {
823 while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32]
824 & EQE_VALID_MASK) {
825 spin_lock_irqsave(&phba->isr_lock, flags);
826 pbe_eq->todo_cq = true;
827 spin_unlock_irqrestore(&phba->isr_lock, flags);
828 AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0);
829 queue_tail_inc(eq);
830 eqe = queue_tail_node(eq);
831 num_eq_processed++;
832 }
833
834 if (pbe_eq->todo_cq)
835 queue_work(phba->wq, &pbe_eq->work_cqs);
836 }
837
838 if (num_eq_processed)
839 hwi_ring_eq_db(phba, eq->id, 1, num_eq_processed, 0, 1);
840
841 return IRQ_HANDLED;
842 }
843
844 /**
845 * be_isr - The isr routine of the driver.
846 * @irq: Not used
847 * @dev_id: Pointer to host adapter structure
848 */
849 static irqreturn_t be_isr(int irq, void *dev_id)
850 {
851 struct beiscsi_hba *phba;
852 struct hwi_controller *phwi_ctrlr;
853 struct hwi_context_memory *phwi_context;
854 struct be_eq_entry *eqe = NULL;
855 struct be_queue_info *eq;
856 struct be_queue_info *cq;
857 struct be_queue_info *mcc;
858 unsigned long flags, index;
859 unsigned int num_mcceq_processed, num_ioeq_processed;
860 struct be_ctrl_info *ctrl;
861 struct be_eq_obj *pbe_eq;
862 int isr;
863
864 phba = dev_id;
865 ctrl = &phba->ctrl;
866 isr = ioread32(ctrl->csr + CEV_ISR0_OFFSET +
867 (PCI_FUNC(ctrl->pdev->devfn) * CEV_ISR_SIZE));
868 if (!isr)
869 return IRQ_NONE;
870
871 phwi_ctrlr = phba->phwi_ctrlr;
872 phwi_context = phwi_ctrlr->phwi_ctxt;
873 pbe_eq = &phwi_context->be_eq[0];
874
875 eq = &phwi_context->be_eq[0].q;
876 mcc = &phba->ctrl.mcc_obj.cq;
877 index = 0;
878 eqe = queue_tail_node(eq);
879
880 num_ioeq_processed = 0;
881 num_mcceq_processed = 0;
882 if (blk_iopoll_enabled) {
883 while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32]
884 & EQE_VALID_MASK) {
885 if (((eqe->dw[offsetof(struct amap_eq_entry,
886 resource_id) / 32] &
887 EQE_RESID_MASK) >> 16) == mcc->id) {
888 spin_lock_irqsave(&phba->isr_lock, flags);
889 pbe_eq->todo_mcc_cq = true;
890 spin_unlock_irqrestore(&phba->isr_lock, flags);
891 num_mcceq_processed++;
892 } else {
893 if (!blk_iopoll_sched_prep(&pbe_eq->iopoll))
894 blk_iopoll_sched(&pbe_eq->iopoll);
895 num_ioeq_processed++;
896 }
897 AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0);
898 queue_tail_inc(eq);
899 eqe = queue_tail_node(eq);
900 }
901 if (num_ioeq_processed || num_mcceq_processed) {
902 if (pbe_eq->todo_mcc_cq)
903 queue_work(phba->wq, &pbe_eq->work_cqs);
904
905 if ((num_mcceq_processed) && (!num_ioeq_processed))
906 hwi_ring_eq_db(phba, eq->id, 0,
907 (num_ioeq_processed +
908 num_mcceq_processed) , 1, 1);
909 else
910 hwi_ring_eq_db(phba, eq->id, 0,
911 (num_ioeq_processed +
912 num_mcceq_processed), 0, 1);
913
914 return IRQ_HANDLED;
915 } else
916 return IRQ_NONE;
917 } else {
918 cq = &phwi_context->be_cq[0];
919 while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32]
920 & EQE_VALID_MASK) {
921
922 if (((eqe->dw[offsetof(struct amap_eq_entry,
923 resource_id) / 32] &
924 EQE_RESID_MASK) >> 16) != cq->id) {
925 spin_lock_irqsave(&phba->isr_lock, flags);
926 pbe_eq->todo_mcc_cq = true;
927 spin_unlock_irqrestore(&phba->isr_lock, flags);
928 } else {
929 spin_lock_irqsave(&phba->isr_lock, flags);
930 pbe_eq->todo_cq = true;
931 spin_unlock_irqrestore(&phba->isr_lock, flags);
932 }
933 AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0);
934 queue_tail_inc(eq);
935 eqe = queue_tail_node(eq);
936 num_ioeq_processed++;
937 }
938 if (pbe_eq->todo_cq || pbe_eq->todo_mcc_cq)
939 queue_work(phba->wq, &pbe_eq->work_cqs);
940
941 if (num_ioeq_processed) {
942 hwi_ring_eq_db(phba, eq->id, 0,
943 num_ioeq_processed, 1, 1);
944 return IRQ_HANDLED;
945 } else
946 return IRQ_NONE;
947 }
948 }
949
950 static int beiscsi_init_irqs(struct beiscsi_hba *phba)
951 {
952 struct pci_dev *pcidev = phba->pcidev;
953 struct hwi_controller *phwi_ctrlr;
954 struct hwi_context_memory *phwi_context;
955 int ret, msix_vec, i, j;
956
957 phwi_ctrlr = phba->phwi_ctrlr;
958 phwi_context = phwi_ctrlr->phwi_ctxt;
959
960 if (phba->msix_enabled) {
961 for (i = 0; i < phba->num_cpus; i++) {
962 phba->msi_name[i] = kzalloc(BEISCSI_MSI_NAME,
963 GFP_KERNEL);
964 if (!phba->msi_name[i]) {
965 ret = -ENOMEM;
966 goto free_msix_irqs;
967 }
968
969 sprintf(phba->msi_name[i], "beiscsi_%02x_%02x",
970 phba->shost->host_no, i);
971 msix_vec = phba->msix_entries[i].vector;
972 ret = request_irq(msix_vec, be_isr_msix, 0,
973 phba->msi_name[i],
974 &phwi_context->be_eq[i]);
975 if (ret) {
976 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
977 "BM_%d : beiscsi_init_irqs-Failed to"
978 "register msix for i = %d\n",
979 i);
980 kfree(phba->msi_name[i]);
981 goto free_msix_irqs;
982 }
983 }
984 phba->msi_name[i] = kzalloc(BEISCSI_MSI_NAME, GFP_KERNEL);
985 if (!phba->msi_name[i]) {
986 ret = -ENOMEM;
987 goto free_msix_irqs;
988 }
989 sprintf(phba->msi_name[i], "beiscsi_mcc_%02x",
990 phba->shost->host_no);
991 msix_vec = phba->msix_entries[i].vector;
992 ret = request_irq(msix_vec, be_isr_mcc, 0, phba->msi_name[i],
993 &phwi_context->be_eq[i]);
994 if (ret) {
995 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT ,
996 "BM_%d : beiscsi_init_irqs-"
997 "Failed to register beiscsi_msix_mcc\n");
998 kfree(phba->msi_name[i]);
999 goto free_msix_irqs;
1000 }
1001
1002 } else {
1003 ret = request_irq(pcidev->irq, be_isr, IRQF_SHARED,
1004 "beiscsi", phba);
1005 if (ret) {
1006 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
1007 "BM_%d : beiscsi_init_irqs-"
1008 "Failed to register irq\\n");
1009 return ret;
1010 }
1011 }
1012 return 0;
1013 free_msix_irqs:
1014 for (j = i - 1; j >= 0; j--) {
1015 kfree(phba->msi_name[j]);
1016 msix_vec = phba->msix_entries[j].vector;
1017 free_irq(msix_vec, &phwi_context->be_eq[j]);
1018 }
1019 return ret;
1020 }
1021
1022 static void hwi_ring_cq_db(struct beiscsi_hba *phba,
1023 unsigned int id, unsigned int num_processed,
1024 unsigned char rearm, unsigned char event)
1025 {
1026 u32 val = 0;
1027 val |= id & DB_CQ_RING_ID_MASK;
1028 if (rearm)
1029 val |= 1 << DB_CQ_REARM_SHIFT;
1030 val |= num_processed << DB_CQ_NUM_POPPED_SHIFT;
1031 iowrite32(val, phba->db_va + DB_CQ_OFFSET);
1032 }
1033
1034 static unsigned int
1035 beiscsi_process_async_pdu(struct beiscsi_conn *beiscsi_conn,
1036 struct beiscsi_hba *phba,
1037 unsigned short cid,
1038 struct pdu_base *ppdu,
1039 unsigned long pdu_len,
1040 void *pbuffer, unsigned long buf_len)
1041 {
1042 struct iscsi_conn *conn = beiscsi_conn->conn;
1043 struct iscsi_session *session = conn->session;
1044 struct iscsi_task *task;
1045 struct beiscsi_io_task *io_task;
1046 struct iscsi_hdr *login_hdr;
1047
1048 switch (ppdu->dw[offsetof(struct amap_pdu_base, opcode) / 32] &
1049 PDUBASE_OPCODE_MASK) {
1050 case ISCSI_OP_NOOP_IN:
1051 pbuffer = NULL;
1052 buf_len = 0;
1053 break;
1054 case ISCSI_OP_ASYNC_EVENT:
1055 break;
1056 case ISCSI_OP_REJECT:
1057 WARN_ON(!pbuffer);
1058 WARN_ON(!(buf_len == 48));
1059 beiscsi_log(phba, KERN_ERR,
1060 BEISCSI_LOG_CONFIG | BEISCSI_LOG_IO,
1061 "BM_%d : In ISCSI_OP_REJECT\n");
1062 break;
1063 case ISCSI_OP_LOGIN_RSP:
1064 case ISCSI_OP_TEXT_RSP:
1065 task = conn->login_task;
1066 io_task = task->dd_data;
1067 login_hdr = (struct iscsi_hdr *)ppdu;
1068 login_hdr->itt = io_task->libiscsi_itt;
1069 break;
1070 default:
1071 beiscsi_log(phba, KERN_WARNING,
1072 BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG,
1073 "BM_%d : Unrecognized opcode 0x%x in async msg\n",
1074 (ppdu->
1075 dw[offsetof(struct amap_pdu_base, opcode) / 32]
1076 & PDUBASE_OPCODE_MASK));
1077 return 1;
1078 }
1079
1080 spin_lock_bh(&session->lock);
1081 __iscsi_complete_pdu(conn, (struct iscsi_hdr *)ppdu, pbuffer, buf_len);
1082 spin_unlock_bh(&session->lock);
1083 return 0;
1084 }
1085
1086 static struct sgl_handle *alloc_io_sgl_handle(struct beiscsi_hba *phba)
1087 {
1088 struct sgl_handle *psgl_handle;
1089
1090 if (phba->io_sgl_hndl_avbl) {
1091 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_IO,
1092 "BM_%d : In alloc_io_sgl_handle,"
1093 " io_sgl_alloc_index=%d\n",
1094 phba->io_sgl_alloc_index);
1095
1096 psgl_handle = phba->io_sgl_hndl_base[phba->
1097 io_sgl_alloc_index];
1098 phba->io_sgl_hndl_base[phba->io_sgl_alloc_index] = NULL;
1099 phba->io_sgl_hndl_avbl--;
1100 if (phba->io_sgl_alloc_index == (phba->params.
1101 ios_per_ctrl - 1))
1102 phba->io_sgl_alloc_index = 0;
1103 else
1104 phba->io_sgl_alloc_index++;
1105 } else
1106 psgl_handle = NULL;
1107 return psgl_handle;
1108 }
1109
1110 static void
1111 free_io_sgl_handle(struct beiscsi_hba *phba, struct sgl_handle *psgl_handle)
1112 {
1113 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_IO,
1114 "BM_%d : In free_,io_sgl_free_index=%d\n",
1115 phba->io_sgl_free_index);
1116
1117 if (phba->io_sgl_hndl_base[phba->io_sgl_free_index]) {
1118 /*
1119 * this can happen if clean_task is called on a task that
1120 * failed in xmit_task or alloc_pdu.
1121 */
1122 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_IO,
1123 "BM_%d : Double Free in IO SGL io_sgl_free_index=%d,"
1124 "value there=%p\n", phba->io_sgl_free_index,
1125 phba->io_sgl_hndl_base
1126 [phba->io_sgl_free_index]);
1127 return;
1128 }
1129 phba->io_sgl_hndl_base[phba->io_sgl_free_index] = psgl_handle;
1130 phba->io_sgl_hndl_avbl++;
1131 if (phba->io_sgl_free_index == (phba->params.ios_per_ctrl - 1))
1132 phba->io_sgl_free_index = 0;
1133 else
1134 phba->io_sgl_free_index++;
1135 }
1136
1137 /**
1138 * alloc_wrb_handle - To allocate a wrb handle
1139 * @phba: The hba pointer
1140 * @cid: The cid to use for allocation
1141 *
1142 * This happens under session_lock until submission to chip
1143 */
1144 struct wrb_handle *alloc_wrb_handle(struct beiscsi_hba *phba, unsigned int cid)
1145 {
1146 struct hwi_wrb_context *pwrb_context;
1147 struct hwi_controller *phwi_ctrlr;
1148 struct wrb_handle *pwrb_handle, *pwrb_handle_tmp;
1149
1150 phwi_ctrlr = phba->phwi_ctrlr;
1151 pwrb_context = &phwi_ctrlr->wrb_context[cid];
1152 if (pwrb_context->wrb_handles_available >= 2) {
1153 pwrb_handle = pwrb_context->pwrb_handle_base[
1154 pwrb_context->alloc_index];
1155 pwrb_context->wrb_handles_available--;
1156 if (pwrb_context->alloc_index ==
1157 (phba->params.wrbs_per_cxn - 1))
1158 pwrb_context->alloc_index = 0;
1159 else
1160 pwrb_context->alloc_index++;
1161 pwrb_handle_tmp = pwrb_context->pwrb_handle_base[
1162 pwrb_context->alloc_index];
1163 pwrb_handle->nxt_wrb_index = pwrb_handle_tmp->wrb_index;
1164 } else
1165 pwrb_handle = NULL;
1166 return pwrb_handle;
1167 }
1168
1169 /**
1170 * free_wrb_handle - To free the wrb handle back to pool
1171 * @phba: The hba pointer
1172 * @pwrb_context: The context to free from
1173 * @pwrb_handle: The wrb_handle to free
1174 *
1175 * This happens under session_lock until submission to chip
1176 */
1177 static void
1178 free_wrb_handle(struct beiscsi_hba *phba, struct hwi_wrb_context *pwrb_context,
1179 struct wrb_handle *pwrb_handle)
1180 {
1181 pwrb_context->pwrb_handle_base[pwrb_context->free_index] = pwrb_handle;
1182 pwrb_context->wrb_handles_available++;
1183 if (pwrb_context->free_index == (phba->params.wrbs_per_cxn - 1))
1184 pwrb_context->free_index = 0;
1185 else
1186 pwrb_context->free_index++;
1187
1188 beiscsi_log(phba, KERN_INFO,
1189 BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG,
1190 "BM_%d : FREE WRB: pwrb_handle=%p free_index=0x%x"
1191 "wrb_handles_available=%d\n",
1192 pwrb_handle, pwrb_context->free_index,
1193 pwrb_context->wrb_handles_available);
1194 }
1195
1196 static struct sgl_handle *alloc_mgmt_sgl_handle(struct beiscsi_hba *phba)
1197 {
1198 struct sgl_handle *psgl_handle;
1199
1200 if (phba->eh_sgl_hndl_avbl) {
1201 psgl_handle = phba->eh_sgl_hndl_base[phba->eh_sgl_alloc_index];
1202 phba->eh_sgl_hndl_base[phba->eh_sgl_alloc_index] = NULL;
1203 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_CONFIG,
1204 "BM_%d : mgmt_sgl_alloc_index=%d=0x%x\n",
1205 phba->eh_sgl_alloc_index,
1206 phba->eh_sgl_alloc_index);
1207
1208 phba->eh_sgl_hndl_avbl--;
1209 if (phba->eh_sgl_alloc_index ==
1210 (phba->params.icds_per_ctrl - phba->params.ios_per_ctrl -
1211 1))
1212 phba->eh_sgl_alloc_index = 0;
1213 else
1214 phba->eh_sgl_alloc_index++;
1215 } else
1216 psgl_handle = NULL;
1217 return psgl_handle;
1218 }
1219
1220 void
1221 free_mgmt_sgl_handle(struct beiscsi_hba *phba, struct sgl_handle *psgl_handle)
1222 {
1223
1224 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_CONFIG,
1225 "BM_%d : In free_mgmt_sgl_handle,"
1226 "eh_sgl_free_index=%d\n",
1227 phba->eh_sgl_free_index);
1228
1229 if (phba->eh_sgl_hndl_base[phba->eh_sgl_free_index]) {
1230 /*
1231 * this can happen if clean_task is called on a task that
1232 * failed in xmit_task or alloc_pdu.
1233 */
1234 beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_CONFIG,
1235 "BM_%d : Double Free in eh SGL ,"
1236 "eh_sgl_free_index=%d\n",
1237 phba->eh_sgl_free_index);
1238 return;
1239 }
1240 phba->eh_sgl_hndl_base[phba->eh_sgl_free_index] = psgl_handle;
1241 phba->eh_sgl_hndl_avbl++;
1242 if (phba->eh_sgl_free_index ==
1243 (phba->params.icds_per_ctrl - phba->params.ios_per_ctrl - 1))
1244 phba->eh_sgl_free_index = 0;
1245 else
1246 phba->eh_sgl_free_index++;
1247 }
1248
1249 static void
1250 be_complete_io(struct beiscsi_conn *beiscsi_conn,
1251 struct iscsi_task *task, struct sol_cqe *psol)
1252 {
1253 struct beiscsi_io_task *io_task = task->dd_data;
1254 struct be_status_bhs *sts_bhs =
1255 (struct be_status_bhs *)io_task->cmd_bhs;
1256 struct iscsi_conn *conn = beiscsi_conn->conn;
1257 unsigned char *sense;
1258 u32 resid = 0, exp_cmdsn, max_cmdsn;
1259 u8 rsp, status, flags;
1260
1261 exp_cmdsn = (psol->
1262 dw[offsetof(struct amap_sol_cqe, i_exp_cmd_sn) / 32]
1263 & SOL_EXP_CMD_SN_MASK);
1264 max_cmdsn = ((psol->
1265 dw[offsetof(struct amap_sol_cqe, i_exp_cmd_sn) / 32]
1266 & SOL_EXP_CMD_SN_MASK) +
1267 ((psol->dw[offsetof(struct amap_sol_cqe, i_cmd_wnd)
1268 / 32] & SOL_CMD_WND_MASK) >> 24) - 1);
1269 rsp = ((psol->dw[offsetof(struct amap_sol_cqe, i_resp) / 32]
1270 & SOL_RESP_MASK) >> 16);
1271 status = ((psol->dw[offsetof(struct amap_sol_cqe, i_sts) / 32]
1272 & SOL_STS_MASK) >> 8);
1273 flags = ((psol->dw[offsetof(struct amap_sol_cqe, i_flags) / 32]
1274 & SOL_FLAGS_MASK) >> 24) | 0x80;
1275 if (!task->sc) {
1276 if (io_task->scsi_cmnd)
1277 scsi_dma_unmap(io_task->scsi_cmnd);
1278
1279 return;
1280 }
1281 task->sc->result = (DID_OK << 16) | status;
1282 if (rsp != ISCSI_STATUS_CMD_COMPLETED) {
1283 task->sc->result = DID_ERROR << 16;
1284 goto unmap;
1285 }
1286
1287 /* bidi not initially supported */
1288 if (flags & (ISCSI_FLAG_CMD_UNDERFLOW | ISCSI_FLAG_CMD_OVERFLOW)) {
1289 resid = (psol->dw[offsetof(struct amap_sol_cqe, i_res_cnt) /
1290 32] & SOL_RES_CNT_MASK);
1291
1292 if (!status && (flags & ISCSI_FLAG_CMD_OVERFLOW))
1293 task->sc->result = DID_ERROR << 16;
1294
1295 if (flags & ISCSI_FLAG_CMD_UNDERFLOW) {
1296 scsi_set_resid(task->sc, resid);
1297 if (!status && (scsi_bufflen(task->sc) - resid <
1298 task->sc->underflow))
1299 task->sc->result = DID_ERROR << 16;
1300 }
1301 }
1302
1303 if (status == SAM_STAT_CHECK_CONDITION) {
1304 u16 sense_len;
1305 unsigned short *slen = (unsigned short *)sts_bhs->sense_info;
1306
1307 sense = sts_bhs->sense_info + sizeof(unsigned short);
1308 sense_len = be16_to_cpu(*slen);
1309 memcpy(task->sc->sense_buffer, sense,
1310 min_t(u16, sense_len, SCSI_SENSE_BUFFERSIZE));
1311 }
1312
1313 if (io_task->cmd_bhs->iscsi_hdr.flags & ISCSI_FLAG_CMD_READ) {
1314 if (psol->dw[offsetof(struct amap_sol_cqe, i_res_cnt) / 32]
1315 & SOL_RES_CNT_MASK)
1316 conn->rxdata_octets += (psol->
1317 dw[offsetof(struct amap_sol_cqe, i_res_cnt) / 32]
1318 & SOL_RES_CNT_MASK);
1319 }
1320 unmap:
1321 scsi_dma_unmap(io_task->scsi_cmnd);
1322 iscsi_complete_scsi_task(task, exp_cmdsn, max_cmdsn);
1323 }
1324
1325 static void
1326 be_complete_logout(struct beiscsi_conn *beiscsi_conn,
1327 struct iscsi_task *task, struct sol_cqe *psol)
1328 {
1329 struct iscsi_logout_rsp *hdr;
1330 struct beiscsi_io_task *io_task = task->dd_data;
1331 struct iscsi_conn *conn = beiscsi_conn->conn;
1332
1333 hdr = (struct iscsi_logout_rsp *)task->hdr;
1334 hdr->opcode = ISCSI_OP_LOGOUT_RSP;
1335 hdr->t2wait = 5;
1336 hdr->t2retain = 0;
1337 hdr->flags = ((psol->dw[offsetof(struct amap_sol_cqe, i_flags) / 32]
1338 & SOL_FLAGS_MASK) >> 24) | 0x80;
1339 hdr->response = (psol->dw[offsetof(struct amap_sol_cqe, i_resp) /
1340 32] & SOL_RESP_MASK);
1341 hdr->exp_cmdsn = cpu_to_be32(psol->
1342 dw[offsetof(struct amap_sol_cqe, i_exp_cmd_sn) / 32]
1343 & SOL_EXP_CMD_SN_MASK);
1344 hdr->max_cmdsn = be32_to_cpu((psol->
1345 dw[offsetof(struct amap_sol_cqe, i_exp_cmd_sn) / 32]
1346 & SOL_EXP_CMD_SN_MASK) +
1347 ((psol->dw[offsetof(struct amap_sol_cqe, i_cmd_wnd)
1348 / 32] & SOL_CMD_WND_MASK) >> 24) - 1);
1349 hdr->dlength[0] = 0;
1350 hdr->dlength[1] = 0;
1351 hdr->dlength[2] = 0;
1352 hdr->hlength = 0;
1353 hdr->itt = io_task->libiscsi_itt;
1354 __iscsi_complete_pdu(conn, (struct iscsi_hdr *)hdr, NULL, 0);
1355 }
1356
1357 static void
1358 be_complete_tmf(struct beiscsi_conn *beiscsi_conn,
1359 struct iscsi_task *task, struct sol_cqe *psol)
1360 {
1361 struct iscsi_tm_rsp *hdr;
1362 struct iscsi_conn *conn = beiscsi_conn->conn;
1363 struct beiscsi_io_task *io_task = task->dd_data;
1364
1365 hdr = (struct iscsi_tm_rsp *)task->hdr;
1366 hdr->opcode = ISCSI_OP_SCSI_TMFUNC_RSP;
1367 hdr->flags = ((psol->dw[offsetof(struct amap_sol_cqe, i_flags) / 32]
1368 & SOL_FLAGS_MASK) >> 24) | 0x80;
1369 hdr->response = (psol->dw[offsetof(struct amap_sol_cqe, i_resp) /
1370 32] & SOL_RESP_MASK);
1371 hdr->exp_cmdsn = cpu_to_be32(psol->dw[offsetof(struct amap_sol_cqe,
1372 i_exp_cmd_sn) / 32] & SOL_EXP_CMD_SN_MASK);
1373 hdr->max_cmdsn = be32_to_cpu((psol->dw[offsetof(struct amap_sol_cqe,
1374 i_exp_cmd_sn) / 32] & SOL_EXP_CMD_SN_MASK) +
1375 ((psol->dw[offsetof(struct amap_sol_cqe, i_cmd_wnd)
1376 / 32] & SOL_CMD_WND_MASK) >> 24) - 1);
1377 hdr->itt = io_task->libiscsi_itt;
1378 __iscsi_complete_pdu(conn, (struct iscsi_hdr *)hdr, NULL, 0);
1379 }
1380
1381 static void
1382 hwi_complete_drvr_msgs(struct beiscsi_conn *beiscsi_conn,
1383 struct beiscsi_hba *phba, struct sol_cqe *psol)
1384 {
1385 struct hwi_wrb_context *pwrb_context;
1386 struct wrb_handle *pwrb_handle = NULL;
1387 struct hwi_controller *phwi_ctrlr;
1388 struct iscsi_task *task;
1389 struct beiscsi_io_task *io_task;
1390 struct iscsi_conn *conn = beiscsi_conn->conn;
1391 struct iscsi_session *session = conn->session;
1392
1393 phwi_ctrlr = phba->phwi_ctrlr;
1394 pwrb_context = &phwi_ctrlr->wrb_context[((psol->
1395 dw[offsetof(struct amap_sol_cqe, cid) / 32] &
1396 SOL_CID_MASK) >> 6) -
1397 phba->fw_config.iscsi_cid_start];
1398 pwrb_handle = pwrb_context->pwrb_handle_basestd[((psol->
1399 dw[offsetof(struct amap_sol_cqe, wrb_index) /
1400 32] & SOL_WRB_INDEX_MASK) >> 16)];
1401 task = pwrb_handle->pio_handle;
1402
1403 io_task = task->dd_data;
1404 spin_lock_bh(&phba->mgmt_sgl_lock);
1405 free_mgmt_sgl_handle(phba, io_task->psgl_handle);
1406 spin_unlock_bh(&phba->mgmt_sgl_lock);
1407 spin_lock_bh(&session->lock);
1408 free_wrb_handle(phba, pwrb_context, pwrb_handle);
1409 spin_unlock_bh(&session->lock);
1410 }
1411
1412 static void
1413 be_complete_nopin_resp(struct beiscsi_conn *beiscsi_conn,
1414 struct iscsi_task *task, struct sol_cqe *psol)
1415 {
1416 struct iscsi_nopin *hdr;
1417 struct iscsi_conn *conn = beiscsi_conn->conn;
1418 struct beiscsi_io_task *io_task = task->dd_data;
1419
1420 hdr = (struct iscsi_nopin *)task->hdr;
1421 hdr->flags = ((psol->dw[offsetof(struct amap_sol_cqe, i_flags) / 32]
1422 & SOL_FLAGS_MASK) >> 24) | 0x80;
1423 hdr->exp_cmdsn = cpu_to_be32(psol->dw[offsetof(struct amap_sol_cqe,
1424 i_exp_cmd_sn) / 32] & SOL_EXP_CMD_SN_MASK);
1425 hdr->max_cmdsn = be32_to_cpu((psol->dw[offsetof(struct amap_sol_cqe,
1426 i_exp_cmd_sn) / 32] & SOL_EXP_CMD_SN_MASK) +
1427 ((psol->dw[offsetof(struct amap_sol_cqe, i_cmd_wnd)
1428 / 32] & SOL_CMD_WND_MASK) >> 24) - 1);
1429 hdr->opcode = ISCSI_OP_NOOP_IN;
1430 hdr->itt = io_task->libiscsi_itt;
1431 __iscsi_complete_pdu(conn, (struct iscsi_hdr *)hdr, NULL, 0);
1432 }
1433
1434 static void hwi_complete_cmd(struct beiscsi_conn *beiscsi_conn,
1435 struct beiscsi_hba *phba, struct sol_cqe *psol)
1436 {
1437 struct hwi_wrb_context *pwrb_context;
1438 struct wrb_handle *pwrb_handle;
1439 struct iscsi_wrb *pwrb = NULL;
1440 struct hwi_controller *phwi_ctrlr;
1441 struct iscsi_task *task;
1442 unsigned int type;
1443 struct iscsi_conn *conn = beiscsi_conn->conn;
1444 struct iscsi_session *session = conn->session;
1445
1446 phwi_ctrlr = phba->phwi_ctrlr;
1447 pwrb_context = &phwi_ctrlr->wrb_context[((psol->dw[offsetof
1448 (struct amap_sol_cqe, cid) / 32]
1449 & SOL_CID_MASK) >> 6) -
1450 phba->fw_config.iscsi_cid_start];
1451 pwrb_handle = pwrb_context->pwrb_handle_basestd[((psol->
1452 dw[offsetof(struct amap_sol_cqe, wrb_index) /
1453 32] & SOL_WRB_INDEX_MASK) >> 16)];
1454 task = pwrb_handle->pio_handle;
1455 pwrb = pwrb_handle->pwrb;
1456 type = (pwrb->dw[offsetof(struct amap_iscsi_wrb, type) / 32] &
1457 WRB_TYPE_MASK) >> 28;
1458
1459 spin_lock_bh(&session->lock);
1460 switch (type) {
1461 case HWH_TYPE_IO:
1462 case HWH_TYPE_IO_RD:
1463 if ((task->hdr->opcode & ISCSI_OPCODE_MASK) ==
1464 ISCSI_OP_NOOP_OUT)
1465 be_complete_nopin_resp(beiscsi_conn, task, psol);
1466 else
1467 be_complete_io(beiscsi_conn, task, psol);
1468 break;
1469
1470 case HWH_TYPE_LOGOUT:
1471 if ((task->hdr->opcode & ISCSI_OPCODE_MASK) == ISCSI_OP_LOGOUT)
1472 be_complete_logout(beiscsi_conn, task, psol);
1473 else
1474 be_complete_tmf(beiscsi_conn, task, psol);
1475
1476 break;
1477
1478 case HWH_TYPE_LOGIN:
1479 beiscsi_log(phba, KERN_ERR,
1480 BEISCSI_LOG_CONFIG | BEISCSI_LOG_IO,
1481 "BM_%d :\t\t No HWH_TYPE_LOGIN Expected in"
1482 " hwi_complete_cmd- Solicited path\n");
1483 break;
1484
1485 case HWH_TYPE_NOP:
1486 be_complete_nopin_resp(beiscsi_conn, task, psol);
1487 break;
1488
1489 default:
1490 beiscsi_log(phba, KERN_WARNING,
1491 BEISCSI_LOG_CONFIG | BEISCSI_LOG_IO,
1492 "BM_%d : In hwi_complete_cmd, unknown type = %d"
1493 "wrb_index 0x%x CID 0x%x\n", type,
1494 ((psol->dw[offsetof(struct amap_iscsi_wrb,
1495 type) / 32] & SOL_WRB_INDEX_MASK) >> 16),
1496 ((psol->dw[offsetof(struct amap_sol_cqe,
1497 cid) / 32] & SOL_CID_MASK) >> 6));
1498 break;
1499 }
1500
1501 spin_unlock_bh(&session->lock);
1502 }
1503
1504 static struct list_head *hwi_get_async_busy_list(struct hwi_async_pdu_context
1505 *pasync_ctx, unsigned int is_header,
1506 unsigned int host_write_ptr)
1507 {
1508 if (is_header)
1509 return &pasync_ctx->async_entry[host_write_ptr].
1510 header_busy_list;
1511 else
1512 return &pasync_ctx->async_entry[host_write_ptr].data_busy_list;
1513 }
1514
1515 static struct async_pdu_handle *
1516 hwi_get_async_handle(struct beiscsi_hba *phba,
1517 struct beiscsi_conn *beiscsi_conn,
1518 struct hwi_async_pdu_context *pasync_ctx,
1519 struct i_t_dpdu_cqe *pdpdu_cqe, unsigned int *pcq_index)
1520 {
1521 struct be_bus_address phys_addr;
1522 struct list_head *pbusy_list;
1523 struct async_pdu_handle *pasync_handle = NULL;
1524 unsigned char is_header = 0;
1525
1526 phys_addr.u.a32.address_lo =
1527 pdpdu_cqe->dw[offsetof(struct amap_i_t_dpdu_cqe, db_addr_lo) / 32] -
1528 ((pdpdu_cqe->dw[offsetof(struct amap_i_t_dpdu_cqe, dpl) / 32]
1529 & PDUCQE_DPL_MASK) >> 16);
1530 phys_addr.u.a32.address_hi =
1531 pdpdu_cqe->dw[offsetof(struct amap_i_t_dpdu_cqe, db_addr_hi) / 32];
1532
1533 phys_addr.u.a64.address =
1534 *((unsigned long long *)(&phys_addr.u.a64.address));
1535
1536 switch (pdpdu_cqe->dw[offsetof(struct amap_i_t_dpdu_cqe, code) / 32]
1537 & PDUCQE_CODE_MASK) {
1538 case UNSOL_HDR_NOTIFY:
1539 is_header = 1;
1540
1541 pbusy_list = hwi_get_async_busy_list(pasync_ctx, 1,
1542 (pdpdu_cqe->dw[offsetof(struct amap_i_t_dpdu_cqe,
1543 index) / 32] & PDUCQE_INDEX_MASK));
1544 break;
1545 case UNSOL_DATA_NOTIFY:
1546 pbusy_list = hwi_get_async_busy_list(pasync_ctx, 0, (pdpdu_cqe->
1547 dw[offsetof(struct amap_i_t_dpdu_cqe,
1548 index) / 32] & PDUCQE_INDEX_MASK));
1549 break;
1550 default:
1551 pbusy_list = NULL;
1552 beiscsi_log(phba, KERN_WARNING,
1553 BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG,
1554 "BM_%d : Unexpected code=%d\n",
1555 pdpdu_cqe->dw[offsetof(struct amap_i_t_dpdu_cqe,
1556 code) / 32] & PDUCQE_CODE_MASK);
1557 return NULL;
1558 }
1559
1560 WARN_ON(list_empty(pbusy_list));
1561 list_for_each_entry(pasync_handle, pbusy_list, link) {
1562 if (pasync_handle->pa.u.a64.address == phys_addr.u.a64.address)
1563 break;
1564 }
1565
1566 WARN_ON(!pasync_handle);
1567
1568 pasync_handle->cri = (unsigned short)beiscsi_conn->beiscsi_conn_cid -
1569 phba->fw_config.iscsi_cid_start;
1570 pasync_handle->is_header = is_header;
1571 pasync_handle->buffer_len = ((pdpdu_cqe->
1572 dw[offsetof(struct amap_i_t_dpdu_cqe, dpl) / 32]
1573 & PDUCQE_DPL_MASK) >> 16);
1574
1575 *pcq_index = (pdpdu_cqe->dw[offsetof(struct amap_i_t_dpdu_cqe,
1576 index) / 32] & PDUCQE_INDEX_MASK);
1577 return pasync_handle;
1578 }
1579
1580 static unsigned int
1581 hwi_update_async_writables(struct beiscsi_hba *phba,
1582 struct hwi_async_pdu_context *pasync_ctx,
1583 unsigned int is_header, unsigned int cq_index)
1584 {
1585 struct list_head *pbusy_list;
1586 struct async_pdu_handle *pasync_handle;
1587 unsigned int num_entries, writables = 0;
1588 unsigned int *pep_read_ptr, *pwritables;
1589
1590 num_entries = pasync_ctx->num_entries;
1591 if (is_header) {
1592 pep_read_ptr = &pasync_ctx->async_header.ep_read_ptr;
1593 pwritables = &pasync_ctx->async_header.writables;
1594 } else {
1595 pep_read_ptr = &pasync_ctx->async_data.ep_read_ptr;
1596 pwritables = &pasync_ctx->async_data.writables;
1597 }
1598
1599 while ((*pep_read_ptr) != cq_index) {
1600 (*pep_read_ptr)++;
1601 *pep_read_ptr = (*pep_read_ptr) % num_entries;
1602
1603 pbusy_list = hwi_get_async_busy_list(pasync_ctx, is_header,
1604 *pep_read_ptr);
1605 if (writables == 0)
1606 WARN_ON(list_empty(pbusy_list));
1607
1608 if (!list_empty(pbusy_list)) {
1609 pasync_handle = list_entry(pbusy_list->next,
1610 struct async_pdu_handle,
1611 link);
1612 WARN_ON(!pasync_handle);
1613 pasync_handle->consumed = 1;
1614 }
1615
1616 writables++;
1617 }
1618
1619 if (!writables) {
1620 beiscsi_log(phba, KERN_ERR,
1621 BEISCSI_LOG_CONFIG | BEISCSI_LOG_IO,
1622 "BM_%d : Duplicate notification received - index 0x%x!!\n",
1623 cq_index);
1624 WARN_ON(1);
1625 }
1626
1627 *pwritables = *pwritables + writables;
1628 return 0;
1629 }
1630
1631 static void hwi_free_async_msg(struct beiscsi_hba *phba,
1632 unsigned int cri)
1633 {
1634 struct hwi_controller *phwi_ctrlr;
1635 struct hwi_async_pdu_context *pasync_ctx;
1636 struct async_pdu_handle *pasync_handle, *tmp_handle;
1637 struct list_head *plist;
1638
1639 phwi_ctrlr = phba->phwi_ctrlr;
1640 pasync_ctx = HWI_GET_ASYNC_PDU_CTX(phwi_ctrlr);
1641
1642 plist = &pasync_ctx->async_entry[cri].wait_queue.list;
1643
1644 list_for_each_entry_safe(pasync_handle, tmp_handle, plist, link) {
1645 list_del(&pasync_handle->link);
1646
1647 if (pasync_handle->is_header) {
1648 list_add_tail(&pasync_handle->link,
1649 &pasync_ctx->async_header.free_list);
1650 pasync_ctx->async_header.free_entries++;
1651 } else {
1652 list_add_tail(&pasync_handle->link,
1653 &pasync_ctx->async_data.free_list);
1654 pasync_ctx->async_data.free_entries++;
1655 }
1656 }
1657
1658 INIT_LIST_HEAD(&pasync_ctx->async_entry[cri].wait_queue.list);
1659 pasync_ctx->async_entry[cri].wait_queue.hdr_received = 0;
1660 pasync_ctx->async_entry[cri].wait_queue.bytes_received = 0;
1661 }
1662
1663 static struct phys_addr *
1664 hwi_get_ring_address(struct hwi_async_pdu_context *pasync_ctx,
1665 unsigned int is_header, unsigned int host_write_ptr)
1666 {
1667 struct phys_addr *pasync_sge = NULL;
1668
1669 if (is_header)
1670 pasync_sge = pasync_ctx->async_header.ring_base;
1671 else
1672 pasync_sge = pasync_ctx->async_data.ring_base;
1673
1674 return pasync_sge + host_write_ptr;
1675 }
1676
1677 static void hwi_post_async_buffers(struct beiscsi_hba *phba,
1678 unsigned int is_header)
1679 {
1680 struct hwi_controller *phwi_ctrlr;
1681 struct hwi_async_pdu_context *pasync_ctx;
1682 struct async_pdu_handle *pasync_handle;
1683 struct list_head *pfree_link, *pbusy_list;
1684 struct phys_addr *pasync_sge;
1685 unsigned int ring_id, num_entries;
1686 unsigned int host_write_num;
1687 unsigned int writables;
1688 unsigned int i = 0;
1689 u32 doorbell = 0;
1690
1691 phwi_ctrlr = phba->phwi_ctrlr;
1692 pasync_ctx = HWI_GET_ASYNC_PDU_CTX(phwi_ctrlr);
1693 num_entries = pasync_ctx->num_entries;
1694
1695 if (is_header) {
1696 writables = min(pasync_ctx->async_header.writables,
1697 pasync_ctx->async_header.free_entries);
1698 pfree_link = pasync_ctx->async_header.free_list.next;
1699 host_write_num = pasync_ctx->async_header.host_write_ptr;
1700 ring_id = phwi_ctrlr->default_pdu_hdr.id;
1701 } else {
1702 writables = min(pasync_ctx->async_data.writables,
1703 pasync_ctx->async_data.free_entries);
1704 pfree_link = pasync_ctx->async_data.free_list.next;
1705 host_write_num = pasync_ctx->async_data.host_write_ptr;
1706 ring_id = phwi_ctrlr->default_pdu_data.id;
1707 }
1708
1709 writables = (writables / 8) * 8;
1710 if (writables) {
1711 for (i = 0; i < writables; i++) {
1712 pbusy_list =
1713 hwi_get_async_busy_list(pasync_ctx, is_header,
1714 host_write_num);
1715 pasync_handle =
1716 list_entry(pfree_link, struct async_pdu_handle,
1717 link);
1718 WARN_ON(!pasync_handle);
1719 pasync_handle->consumed = 0;
1720
1721 pfree_link = pfree_link->next;
1722
1723 pasync_sge = hwi_get_ring_address(pasync_ctx,
1724 is_header, host_write_num);
1725
1726 pasync_sge->hi = pasync_handle->pa.u.a32.address_lo;
1727 pasync_sge->lo = pasync_handle->pa.u.a32.address_hi;
1728
1729 list_move(&pasync_handle->link, pbusy_list);
1730
1731 host_write_num++;
1732 host_write_num = host_write_num % num_entries;
1733 }
1734
1735 if (is_header) {
1736 pasync_ctx->async_header.host_write_ptr =
1737 host_write_num;
1738 pasync_ctx->async_header.free_entries -= writables;
1739 pasync_ctx->async_header.writables -= writables;
1740 pasync_ctx->async_header.busy_entries += writables;
1741 } else {
1742 pasync_ctx->async_data.host_write_ptr = host_write_num;
1743 pasync_ctx->async_data.free_entries -= writables;
1744 pasync_ctx->async_data.writables -= writables;
1745 pasync_ctx->async_data.busy_entries += writables;
1746 }
1747
1748 doorbell |= ring_id & DB_DEF_PDU_RING_ID_MASK;
1749 doorbell |= 1 << DB_DEF_PDU_REARM_SHIFT;
1750 doorbell |= 0 << DB_DEF_PDU_EVENT_SHIFT;
1751 doorbell |= (writables & DB_DEF_PDU_CQPROC_MASK)
1752 << DB_DEF_PDU_CQPROC_SHIFT;
1753
1754 iowrite32(doorbell, phba->db_va + DB_RXULP0_OFFSET);
1755 }
1756 }
1757
1758 static void hwi_flush_default_pdu_buffer(struct beiscsi_hba *phba,
1759 struct beiscsi_conn *beiscsi_conn,
1760 struct i_t_dpdu_cqe *pdpdu_cqe)
1761 {
1762 struct hwi_controller *phwi_ctrlr;
1763 struct hwi_async_pdu_context *pasync_ctx;
1764 struct async_pdu_handle *pasync_handle = NULL;
1765 unsigned int cq_index = -1;
1766
1767 phwi_ctrlr = phba->phwi_ctrlr;
1768 pasync_ctx = HWI_GET_ASYNC_PDU_CTX(phwi_ctrlr);
1769
1770 pasync_handle = hwi_get_async_handle(phba, beiscsi_conn, pasync_ctx,
1771 pdpdu_cqe, &cq_index);
1772 BUG_ON(pasync_handle->is_header != 0);
1773 if (pasync_handle->consumed == 0)
1774 hwi_update_async_writables(phba, pasync_ctx,
1775 pasync_handle->is_header, cq_index);
1776
1777 hwi_free_async_msg(phba, pasync_handle->cri);
1778 hwi_post_async_buffers(phba, pasync_handle->is_header);
1779 }
1780
1781 static unsigned int
1782 hwi_fwd_async_msg(struct beiscsi_conn *beiscsi_conn,
1783 struct beiscsi_hba *phba,
1784 struct hwi_async_pdu_context *pasync_ctx, unsigned short cri)
1785 {
1786 struct list_head *plist;
1787 struct async_pdu_handle *pasync_handle;
1788 void *phdr = NULL;
1789 unsigned int hdr_len = 0, buf_len = 0;
1790 unsigned int status, index = 0, offset = 0;
1791 void *pfirst_buffer = NULL;
1792 unsigned int num_buf = 0;
1793
1794 plist = &pasync_ctx->async_entry[cri].wait_queue.list;
1795
1796 list_for_each_entry(pasync_handle, plist, link) {
1797 if (index == 0) {
1798 phdr = pasync_handle->pbuffer;
1799 hdr_len = pasync_handle->buffer_len;
1800 } else {
1801 buf_len = pasync_handle->buffer_len;
1802 if (!num_buf) {
1803 pfirst_buffer = pasync_handle->pbuffer;
1804 num_buf++;
1805 }
1806 memcpy(pfirst_buffer + offset,
1807 pasync_handle->pbuffer, buf_len);
1808 offset += buf_len;
1809 }
1810 index++;
1811 }
1812
1813 status = beiscsi_process_async_pdu(beiscsi_conn, phba,
1814 (beiscsi_conn->beiscsi_conn_cid -
1815 phba->fw_config.iscsi_cid_start),
1816 phdr, hdr_len, pfirst_buffer,
1817 offset);
1818
1819 hwi_free_async_msg(phba, cri);
1820 return 0;
1821 }
1822
1823 static unsigned int
1824 hwi_gather_async_pdu(struct beiscsi_conn *beiscsi_conn,
1825 struct beiscsi_hba *phba,
1826 struct async_pdu_handle *pasync_handle)
1827 {
1828 struct hwi_async_pdu_context *pasync_ctx;
1829 struct hwi_controller *phwi_ctrlr;
1830 unsigned int bytes_needed = 0, status = 0;
1831 unsigned short cri = pasync_handle->cri;
1832 struct pdu_base *ppdu;
1833
1834 phwi_ctrlr = phba->phwi_ctrlr;
1835 pasync_ctx = HWI_GET_ASYNC_PDU_CTX(phwi_ctrlr);
1836
1837 list_del(&pasync_handle->link);
1838 if (pasync_handle->is_header) {
1839 pasync_ctx->async_header.busy_entries--;
1840 if (pasync_ctx->async_entry[cri].wait_queue.hdr_received) {
1841 hwi_free_async_msg(phba, cri);
1842 BUG();
1843 }
1844
1845 pasync_ctx->async_entry[cri].wait_queue.bytes_received = 0;
1846 pasync_ctx->async_entry[cri].wait_queue.hdr_received = 1;
1847 pasync_ctx->async_entry[cri].wait_queue.hdr_len =
1848 (unsigned short)pasync_handle->buffer_len;
1849 list_add_tail(&pasync_handle->link,
1850 &pasync_ctx->async_entry[cri].wait_queue.list);
1851
1852 ppdu = pasync_handle->pbuffer;
1853 bytes_needed = ((((ppdu->dw[offsetof(struct amap_pdu_base,
1854 data_len_hi) / 32] & PDUBASE_DATALENHI_MASK) << 8) &
1855 0xFFFF0000) | ((be16_to_cpu((ppdu->
1856 dw[offsetof(struct amap_pdu_base, data_len_lo) / 32]
1857 & PDUBASE_DATALENLO_MASK) >> 16)) & 0x0000FFFF));
1858
1859 if (status == 0) {
1860 pasync_ctx->async_entry[cri].wait_queue.bytes_needed =
1861 bytes_needed;
1862
1863 if (bytes_needed == 0)
1864 status = hwi_fwd_async_msg(beiscsi_conn, phba,
1865 pasync_ctx, cri);
1866 }
1867 } else {
1868 pasync_ctx->async_data.busy_entries--;
1869 if (pasync_ctx->async_entry[cri].wait_queue.hdr_received) {
1870 list_add_tail(&pasync_handle->link,
1871 &pasync_ctx->async_entry[cri].wait_queue.
1872 list);
1873 pasync_ctx->async_entry[cri].wait_queue.
1874 bytes_received +=
1875 (unsigned short)pasync_handle->buffer_len;
1876
1877 if (pasync_ctx->async_entry[cri].wait_queue.
1878 bytes_received >=
1879 pasync_ctx->async_entry[cri].wait_queue.
1880 bytes_needed)
1881 status = hwi_fwd_async_msg(beiscsi_conn, phba,
1882 pasync_ctx, cri);
1883 }
1884 }
1885 return status;
1886 }
1887
1888 static void hwi_process_default_pdu_ring(struct beiscsi_conn *beiscsi_conn,
1889 struct beiscsi_hba *phba,
1890 struct i_t_dpdu_cqe *pdpdu_cqe)
1891 {
1892 struct hwi_controller *phwi_ctrlr;
1893 struct hwi_async_pdu_context *pasync_ctx;
1894 struct async_pdu_handle *pasync_handle = NULL;
1895 unsigned int cq_index = -1;
1896
1897 phwi_ctrlr = phba->phwi_ctrlr;
1898 pasync_ctx = HWI_GET_ASYNC_PDU_CTX(phwi_ctrlr);
1899 pasync_handle = hwi_get_async_handle(phba, beiscsi_conn, pasync_ctx,
1900 pdpdu_cqe, &cq_index);
1901
1902 if (pasync_handle->consumed == 0)
1903 hwi_update_async_writables(phba, pasync_ctx,
1904 pasync_handle->is_header, cq_index);
1905
1906 hwi_gather_async_pdu(beiscsi_conn, phba, pasync_handle);
1907 hwi_post_async_buffers(phba, pasync_handle->is_header);
1908 }
1909
1910 static void beiscsi_process_mcc_isr(struct beiscsi_hba *phba)
1911 {
1912 struct be_queue_info *mcc_cq;
1913 struct be_mcc_compl *mcc_compl;
1914 unsigned int num_processed = 0;
1915
1916 mcc_cq = &phba->ctrl.mcc_obj.cq;
1917 mcc_compl = queue_tail_node(mcc_cq);
1918 mcc_compl->flags = le32_to_cpu(mcc_compl->flags);
1919 while (mcc_compl->flags & CQE_FLAGS_VALID_MASK) {
1920
1921 if (num_processed >= 32) {
1922 hwi_ring_cq_db(phba, mcc_cq->id,
1923 num_processed, 0, 0);
1924 num_processed = 0;
1925 }
1926 if (mcc_compl->flags & CQE_FLAGS_ASYNC_MASK) {
1927 /* Interpret flags as an async trailer */
1928 if (is_link_state_evt(mcc_compl->flags))
1929 /* Interpret compl as a async link evt */
1930 beiscsi_async_link_state_process(phba,
1931 (struct be_async_event_link_state *) mcc_compl);
1932 else
1933 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_MBOX,
1934 "BM_%d : Unsupported Async Event, flags"
1935 " = 0x%08x\n",
1936 mcc_compl->flags);
1937 } else if (mcc_compl->flags & CQE_FLAGS_COMPLETED_MASK) {
1938 be_mcc_compl_process_isr(&phba->ctrl, mcc_compl);
1939 atomic_dec(&phba->ctrl.mcc_obj.q.used);
1940 }
1941
1942 mcc_compl->flags = 0;
1943 queue_tail_inc(mcc_cq);
1944 mcc_compl = queue_tail_node(mcc_cq);
1945 mcc_compl->flags = le32_to_cpu(mcc_compl->flags);
1946 num_processed++;
1947 }
1948
1949 if (num_processed > 0)
1950 hwi_ring_cq_db(phba, mcc_cq->id, num_processed, 1, 0);
1951
1952 }
1953
1954 /**
1955 * beiscsi_process_cq()- Process the Completion Queue
1956 * @pbe_eq: Event Q on which the Completion has come
1957 *
1958 * return
1959 * Number of Completion Entries processed.
1960 **/
1961 static unsigned int beiscsi_process_cq(struct be_eq_obj *pbe_eq)
1962 {
1963 struct be_queue_info *cq;
1964 struct sol_cqe *sol;
1965 struct dmsg_cqe *dmsg;
1966 unsigned int num_processed = 0;
1967 unsigned int tot_nump = 0;
1968 unsigned short code = 0, cid = 0;
1969 struct beiscsi_conn *beiscsi_conn;
1970 struct beiscsi_endpoint *beiscsi_ep;
1971 struct iscsi_endpoint *ep;
1972 struct beiscsi_hba *phba;
1973
1974 cq = pbe_eq->cq;
1975 sol = queue_tail_node(cq);
1976 phba = pbe_eq->phba;
1977
1978 while (sol->dw[offsetof(struct amap_sol_cqe, valid) / 32] &
1979 CQE_VALID_MASK) {
1980 be_dws_le_to_cpu(sol, sizeof(struct sol_cqe));
1981
1982 cid = ((sol->dw[offsetof(struct amap_sol_cqe, cid)/32] &
1983 CQE_CID_MASK) >> 6);
1984 code = (sol->dw[offsetof(struct amap_sol_cqe, code)/32] &
1985 CQE_CODE_MASK);
1986 ep = phba->ep_array[cid - phba->fw_config.iscsi_cid_start];
1987
1988 beiscsi_ep = ep->dd_data;
1989 beiscsi_conn = beiscsi_ep->conn;
1990
1991 if (num_processed >= 32) {
1992 hwi_ring_cq_db(phba, cq->id,
1993 num_processed, 0, 0);
1994 tot_nump += num_processed;
1995 num_processed = 0;
1996 }
1997
1998 switch (code) {
1999 case SOL_CMD_COMPLETE:
2000 hwi_complete_cmd(beiscsi_conn, phba, sol);
2001 break;
2002 case DRIVERMSG_NOTIFY:
2003 beiscsi_log(phba, KERN_INFO,
2004 BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG,
2005 "BM_%d : Received %s[%d] on CID : %d\n",
2006 cqe_desc[code], code, cid);
2007
2008 dmsg = (struct dmsg_cqe *)sol;
2009 hwi_complete_drvr_msgs(beiscsi_conn, phba, sol);
2010 break;
2011 case UNSOL_HDR_NOTIFY:
2012 beiscsi_log(phba, KERN_INFO,
2013 BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG,
2014 "BM_%d : Received %s[%d] on CID : %d\n",
2015 cqe_desc[code], code, cid);
2016
2017 hwi_process_default_pdu_ring(beiscsi_conn, phba,
2018 (struct i_t_dpdu_cqe *)sol);
2019 break;
2020 case UNSOL_DATA_NOTIFY:
2021 beiscsi_log(phba, KERN_INFO,
2022 BEISCSI_LOG_CONFIG | BEISCSI_LOG_IO,
2023 "BM_%d : Received %s[%d] on CID : %d\n",
2024 cqe_desc[code], code, cid);
2025
2026 hwi_process_default_pdu_ring(beiscsi_conn, phba,
2027 (struct i_t_dpdu_cqe *)sol);
2028 break;
2029 case CXN_INVALIDATE_INDEX_NOTIFY:
2030 case CMD_INVALIDATED_NOTIFY:
2031 case CXN_INVALIDATE_NOTIFY:
2032 beiscsi_log(phba, KERN_ERR,
2033 BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG,
2034 "BM_%d : Ignoring %s[%d] on CID : %d\n",
2035 cqe_desc[code], code, cid);
2036 break;
2037 case SOL_CMD_KILLED_DATA_DIGEST_ERR:
2038 case CMD_KILLED_INVALID_STATSN_RCVD:
2039 case CMD_KILLED_INVALID_R2T_RCVD:
2040 case CMD_CXN_KILLED_LUN_INVALID:
2041 case CMD_CXN_KILLED_ICD_INVALID:
2042 case CMD_CXN_KILLED_ITT_INVALID:
2043 case CMD_CXN_KILLED_SEQ_OUTOFORDER:
2044 case CMD_CXN_KILLED_INVALID_DATASN_RCVD:
2045 beiscsi_log(phba, KERN_ERR,
2046 BEISCSI_LOG_CONFIG | BEISCSI_LOG_IO,
2047 "BM_%d : Cmd Notification %s[%d] on CID : %d\n",
2048 cqe_desc[code], code, cid);
2049 break;
2050 case UNSOL_DATA_DIGEST_ERROR_NOTIFY:
2051 beiscsi_log(phba, KERN_ERR,
2052 BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG,
2053 "BM_%d : Dropping %s[%d] on DPDU ring on CID : %d\n",
2054 cqe_desc[code], code, cid);
2055 hwi_flush_default_pdu_buffer(phba, beiscsi_conn,
2056 (struct i_t_dpdu_cqe *) sol);
2057 break;
2058 case CXN_KILLED_PDU_SIZE_EXCEEDS_DSL:
2059 case CXN_KILLED_BURST_LEN_MISMATCH:
2060 case CXN_KILLED_AHS_RCVD:
2061 case CXN_KILLED_HDR_DIGEST_ERR:
2062 case CXN_KILLED_UNKNOWN_HDR:
2063 case CXN_KILLED_STALE_ITT_TTT_RCVD:
2064 case CXN_KILLED_INVALID_ITT_TTT_RCVD:
2065 case CXN_KILLED_TIMED_OUT:
2066 case CXN_KILLED_FIN_RCVD:
2067 case CXN_KILLED_RST_SENT:
2068 case CXN_KILLED_RST_RCVD:
2069 case CXN_KILLED_BAD_UNSOL_PDU_RCVD:
2070 case CXN_KILLED_BAD_WRB_INDEX_ERROR:
2071 case CXN_KILLED_OVER_RUN_RESIDUAL:
2072 case CXN_KILLED_UNDER_RUN_RESIDUAL:
2073 case CXN_KILLED_CMND_DATA_NOT_ON_SAME_CONN:
2074 beiscsi_log(phba, KERN_ERR,
2075 BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG,
2076 "BM_%d : Event %s[%d] received on CID : %d\n",
2077 cqe_desc[code], code, cid);
2078 if (beiscsi_conn)
2079 iscsi_conn_failure(beiscsi_conn->conn,
2080 ISCSI_ERR_CONN_FAILED);
2081 break;
2082 default:
2083 beiscsi_log(phba, KERN_ERR,
2084 BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG,
2085 "BM_%d : Invalid CQE Event Received Code : %d"
2086 "CID 0x%x...\n",
2087 code, cid);
2088 break;
2089 }
2090
2091 AMAP_SET_BITS(struct amap_sol_cqe, valid, sol, 0);
2092 queue_tail_inc(cq);
2093 sol = queue_tail_node(cq);
2094 num_processed++;
2095 }
2096
2097 if (num_processed > 0) {
2098 tot_nump += num_processed;
2099 hwi_ring_cq_db(phba, cq->id, num_processed, 1, 0);
2100 }
2101 return tot_nump;
2102 }
2103
2104 void beiscsi_process_all_cqs(struct work_struct *work)
2105 {
2106 unsigned long flags;
2107 struct hwi_controller *phwi_ctrlr;
2108 struct hwi_context_memory *phwi_context;
2109 struct beiscsi_hba *phba;
2110 struct be_eq_obj *pbe_eq =
2111 container_of(work, struct be_eq_obj, work_cqs);
2112
2113 phba = pbe_eq->phba;
2114 phwi_ctrlr = phba->phwi_ctrlr;
2115 phwi_context = phwi_ctrlr->phwi_ctxt;
2116
2117 if (pbe_eq->todo_mcc_cq) {
2118 spin_lock_irqsave(&phba->isr_lock, flags);
2119 pbe_eq->todo_mcc_cq = false;
2120 spin_unlock_irqrestore(&phba->isr_lock, flags);
2121 beiscsi_process_mcc_isr(phba);
2122 }
2123
2124 if (pbe_eq->todo_cq) {
2125 spin_lock_irqsave(&phba->isr_lock, flags);
2126 pbe_eq->todo_cq = false;
2127 spin_unlock_irqrestore(&phba->isr_lock, flags);
2128 beiscsi_process_cq(pbe_eq);
2129 }
2130
2131 /* rearm EQ for further interrupts */
2132 hwi_ring_eq_db(phba, pbe_eq->q.id, 0, 0, 1, 1);
2133 }
2134
2135 static int be_iopoll(struct blk_iopoll *iop, int budget)
2136 {
2137 static unsigned int ret;
2138 struct beiscsi_hba *phba;
2139 struct be_eq_obj *pbe_eq;
2140
2141 pbe_eq = container_of(iop, struct be_eq_obj, iopoll);
2142 ret = beiscsi_process_cq(pbe_eq);
2143 if (ret < budget) {
2144 phba = pbe_eq->phba;
2145 blk_iopoll_complete(iop);
2146 beiscsi_log(phba, KERN_INFO,
2147 BEISCSI_LOG_CONFIG | BEISCSI_LOG_IO,
2148 "BM_%d : rearm pbe_eq->q.id =%d\n",
2149 pbe_eq->q.id);
2150 hwi_ring_eq_db(phba, pbe_eq->q.id, 0, 0, 1, 1);
2151 }
2152 return ret;
2153 }
2154
2155 static void
2156 hwi_write_sgl_v2(struct iscsi_wrb *pwrb, struct scatterlist *sg,
2157 unsigned int num_sg, struct beiscsi_io_task *io_task)
2158 {
2159 struct iscsi_sge *psgl;
2160 unsigned int sg_len, index;
2161 unsigned int sge_len = 0;
2162 unsigned long long addr;
2163 struct scatterlist *l_sg;
2164 unsigned int offset;
2165
2166 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, iscsi_bhs_addr_lo, pwrb,
2167 io_task->bhs_pa.u.a32.address_lo);
2168 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, iscsi_bhs_addr_hi, pwrb,
2169 io_task->bhs_pa.u.a32.address_hi);
2170
2171 l_sg = sg;
2172 for (index = 0; (index < num_sg) && (index < 2); index++,
2173 sg = sg_next(sg)) {
2174 if (index == 0) {
2175 sg_len = sg_dma_len(sg);
2176 addr = (u64) sg_dma_address(sg);
2177 AMAP_SET_BITS(struct amap_iscsi_wrb_v2,
2178 sge0_addr_lo, pwrb,
2179 lower_32_bits(addr));
2180 AMAP_SET_BITS(struct amap_iscsi_wrb_v2,
2181 sge0_addr_hi, pwrb,
2182 upper_32_bits(addr));
2183 AMAP_SET_BITS(struct amap_iscsi_wrb_v2,
2184 sge0_len, pwrb,
2185 sg_len);
2186 sge_len = sg_len;
2187 } else {
2188 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, sge1_r2t_offset,
2189 pwrb, sge_len);
2190 sg_len = sg_dma_len(sg);
2191 addr = (u64) sg_dma_address(sg);
2192 AMAP_SET_BITS(struct amap_iscsi_wrb_v2,
2193 sge1_addr_lo, pwrb,
2194 lower_32_bits(addr));
2195 AMAP_SET_BITS(struct amap_iscsi_wrb_v2,
2196 sge1_addr_hi, pwrb,
2197 upper_32_bits(addr));
2198 AMAP_SET_BITS(struct amap_iscsi_wrb_v2,
2199 sge1_len, pwrb,
2200 sg_len);
2201 }
2202 }
2203 psgl = (struct iscsi_sge *)io_task->psgl_handle->pfrag;
2204 memset(psgl, 0, sizeof(*psgl) * BE2_SGE);
2205
2206 AMAP_SET_BITS(struct amap_iscsi_sge, len, psgl, io_task->bhs_len - 2);
2207
2208 AMAP_SET_BITS(struct amap_iscsi_sge, addr_hi, psgl,
2209 io_task->bhs_pa.u.a32.address_hi);
2210 AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, psgl,
2211 io_task->bhs_pa.u.a32.address_lo);
2212
2213 if (num_sg == 1) {
2214 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, sge0_last, pwrb,
2215 1);
2216 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, sge1_last, pwrb,
2217 0);
2218 } else if (num_sg == 2) {
2219 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, sge0_last, pwrb,
2220 0);
2221 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, sge1_last, pwrb,
2222 1);
2223 } else {
2224 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, sge0_last, pwrb,
2225 0);
2226 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, sge1_last, pwrb,
2227 0);
2228 }
2229
2230 sg = l_sg;
2231 psgl++;
2232 psgl++;
2233 offset = 0;
2234 for (index = 0; index < num_sg; index++, sg = sg_next(sg), psgl++) {
2235 sg_len = sg_dma_len(sg);
2236 addr = (u64) sg_dma_address(sg);
2237 AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, psgl,
2238 lower_32_bits(addr));
2239 AMAP_SET_BITS(struct amap_iscsi_sge, addr_hi, psgl,
2240 upper_32_bits(addr));
2241 AMAP_SET_BITS(struct amap_iscsi_sge, len, psgl, sg_len);
2242 AMAP_SET_BITS(struct amap_iscsi_sge, sge_offset, psgl, offset);
2243 AMAP_SET_BITS(struct amap_iscsi_sge, last_sge, psgl, 0);
2244 offset += sg_len;
2245 }
2246 psgl--;
2247 AMAP_SET_BITS(struct amap_iscsi_sge, last_sge, psgl, 1);
2248 }
2249
2250 static void
2251 hwi_write_sgl(struct iscsi_wrb *pwrb, struct scatterlist *sg,
2252 unsigned int num_sg, struct beiscsi_io_task *io_task)
2253 {
2254 struct iscsi_sge *psgl;
2255 unsigned int sg_len, index;
2256 unsigned int sge_len = 0;
2257 unsigned long long addr;
2258 struct scatterlist *l_sg;
2259 unsigned int offset;
2260
2261 AMAP_SET_BITS(struct amap_iscsi_wrb, iscsi_bhs_addr_lo, pwrb,
2262 io_task->bhs_pa.u.a32.address_lo);
2263 AMAP_SET_BITS(struct amap_iscsi_wrb, iscsi_bhs_addr_hi, pwrb,
2264 io_task->bhs_pa.u.a32.address_hi);
2265
2266 l_sg = sg;
2267 for (index = 0; (index < num_sg) && (index < 2); index++,
2268 sg = sg_next(sg)) {
2269 if (index == 0) {
2270 sg_len = sg_dma_len(sg);
2271 addr = (u64) sg_dma_address(sg);
2272 AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_addr_lo, pwrb,
2273 ((u32)(addr & 0xFFFFFFFF)));
2274 AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_addr_hi, pwrb,
2275 ((u32)(addr >> 32)));
2276 AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_len, pwrb,
2277 sg_len);
2278 sge_len = sg_len;
2279 } else {
2280 AMAP_SET_BITS(struct amap_iscsi_wrb, sge1_r2t_offset,
2281 pwrb, sge_len);
2282 sg_len = sg_dma_len(sg);
2283 addr = (u64) sg_dma_address(sg);
2284 AMAP_SET_BITS(struct amap_iscsi_wrb, sge1_addr_lo, pwrb,
2285 ((u32)(addr & 0xFFFFFFFF)));
2286 AMAP_SET_BITS(struct amap_iscsi_wrb, sge1_addr_hi, pwrb,
2287 ((u32)(addr >> 32)));
2288 AMAP_SET_BITS(struct amap_iscsi_wrb, sge1_len, pwrb,
2289 sg_len);
2290 }
2291 }
2292 psgl = (struct iscsi_sge *)io_task->psgl_handle->pfrag;
2293 memset(psgl, 0, sizeof(*psgl) * BE2_SGE);
2294
2295 AMAP_SET_BITS(struct amap_iscsi_sge, len, psgl, io_task->bhs_len - 2);
2296
2297 AMAP_SET_BITS(struct amap_iscsi_sge, addr_hi, psgl,
2298 io_task->bhs_pa.u.a32.address_hi);
2299 AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, psgl,
2300 io_task->bhs_pa.u.a32.address_lo);
2301
2302 if (num_sg == 1) {
2303 AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_last, pwrb,
2304 1);
2305 AMAP_SET_BITS(struct amap_iscsi_wrb, sge1_last, pwrb,
2306 0);
2307 } else if (num_sg == 2) {
2308 AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_last, pwrb,
2309 0);
2310 AMAP_SET_BITS(struct amap_iscsi_wrb, sge1_last, pwrb,
2311 1);
2312 } else {
2313 AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_last, pwrb,
2314 0);
2315 AMAP_SET_BITS(struct amap_iscsi_wrb, sge1_last, pwrb,
2316 0);
2317 }
2318 sg = l_sg;
2319 psgl++;
2320 psgl++;
2321 offset = 0;
2322 for (index = 0; index < num_sg; index++, sg = sg_next(sg), psgl++) {
2323 sg_len = sg_dma_len(sg);
2324 addr = (u64) sg_dma_address(sg);
2325 AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, psgl,
2326 (addr & 0xFFFFFFFF));
2327 AMAP_SET_BITS(struct amap_iscsi_sge, addr_hi, psgl,
2328 (addr >> 32));
2329 AMAP_SET_BITS(struct amap_iscsi_sge, len, psgl, sg_len);
2330 AMAP_SET_BITS(struct amap_iscsi_sge, sge_offset, psgl, offset);
2331 AMAP_SET_BITS(struct amap_iscsi_sge, last_sge, psgl, 0);
2332 offset += sg_len;
2333 }
2334 psgl--;
2335 AMAP_SET_BITS(struct amap_iscsi_sge, last_sge, psgl, 1);
2336 }
2337
2338 /**
2339 * hwi_write_buffer()- Populate the WRB with task info
2340 * @pwrb: ptr to the WRB entry
2341 * @task: iscsi task which is to be executed
2342 **/
2343 static void hwi_write_buffer(struct iscsi_wrb *pwrb, struct iscsi_task *task)
2344 {
2345 struct iscsi_sge *psgl;
2346 struct beiscsi_io_task *io_task = task->dd_data;
2347 struct beiscsi_conn *beiscsi_conn = io_task->conn;
2348 struct beiscsi_hba *phba = beiscsi_conn->phba;
2349 uint8_t dsp_value = 0;
2350
2351 io_task->bhs_len = sizeof(struct be_nonio_bhs) - 2;
2352 AMAP_SET_BITS(struct amap_iscsi_wrb, iscsi_bhs_addr_lo, pwrb,
2353 io_task->bhs_pa.u.a32.address_lo);
2354 AMAP_SET_BITS(struct amap_iscsi_wrb, iscsi_bhs_addr_hi, pwrb,
2355 io_task->bhs_pa.u.a32.address_hi);
2356
2357 if (task->data) {
2358
2359 /* Check for the data_count */
2360 dsp_value = (task->data_count) ? 1 : 0;
2361
2362 if (chip_skh_r(phba->pcidev))
2363 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, dsp,
2364 pwrb, dsp_value);
2365 else
2366 AMAP_SET_BITS(struct amap_iscsi_wrb, dsp,
2367 pwrb, dsp_value);
2368
2369 /* Map addr only if there is data_count */
2370 if (dsp_value) {
2371 io_task->mtask_addr = pci_map_single(phba->pcidev,
2372 task->data,
2373 task->data_count,
2374 PCI_DMA_TODEVICE);
2375 io_task->mtask_data_count = task->data_count;
2376 } else
2377 io_task->mtask_addr = 0;
2378
2379 AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_addr_lo, pwrb,
2380 lower_32_bits(io_task->mtask_addr));
2381 AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_addr_hi, pwrb,
2382 upper_32_bits(io_task->mtask_addr));
2383 AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_len, pwrb,
2384 task->data_count);
2385
2386 AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_last, pwrb, 1);
2387 } else {
2388 AMAP_SET_BITS(struct amap_iscsi_wrb, dsp, pwrb, 0);
2389 io_task->mtask_addr = 0;
2390 }
2391
2392 psgl = (struct iscsi_sge *)io_task->psgl_handle->pfrag;
2393
2394 AMAP_SET_BITS(struct amap_iscsi_sge, len, psgl, io_task->bhs_len);
2395
2396 AMAP_SET_BITS(struct amap_iscsi_sge, addr_hi, psgl,
2397 io_task->bhs_pa.u.a32.address_hi);
2398 AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, psgl,
2399 io_task->bhs_pa.u.a32.address_lo);
2400 if (task->data) {
2401 psgl++;
2402 AMAP_SET_BITS(struct amap_iscsi_sge, addr_hi, psgl, 0);
2403 AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, psgl, 0);
2404 AMAP_SET_BITS(struct amap_iscsi_sge, len, psgl, 0);
2405 AMAP_SET_BITS(struct amap_iscsi_sge, sge_offset, psgl, 0);
2406 AMAP_SET_BITS(struct amap_iscsi_sge, rsvd0, psgl, 0);
2407 AMAP_SET_BITS(struct amap_iscsi_sge, last_sge, psgl, 0);
2408
2409 psgl++;
2410 if (task->data) {
2411 AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, psgl,
2412 lower_32_bits(io_task->mtask_addr));
2413 AMAP_SET_BITS(struct amap_iscsi_sge, addr_hi, psgl,
2414 upper_32_bits(io_task->mtask_addr));
2415 }
2416 AMAP_SET_BITS(struct amap_iscsi_sge, len, psgl, 0x106);
2417 }
2418 AMAP_SET_BITS(struct amap_iscsi_sge, last_sge, psgl, 1);
2419 }
2420
2421 static void beiscsi_find_mem_req(struct beiscsi_hba *phba)
2422 {
2423 unsigned int num_cq_pages, num_async_pdu_buf_pages;
2424 unsigned int num_async_pdu_data_pages, wrb_sz_per_cxn;
2425 unsigned int num_async_pdu_buf_sgl_pages, num_async_pdu_data_sgl_pages;
2426
2427 num_cq_pages = PAGES_REQUIRED(phba->params.num_cq_entries * \
2428 sizeof(struct sol_cqe));
2429 num_async_pdu_buf_pages =
2430 PAGES_REQUIRED(phba->params.asyncpdus_per_ctrl * \
2431 phba->params.defpdu_hdr_sz);
2432 num_async_pdu_buf_sgl_pages =
2433 PAGES_REQUIRED(phba->params.asyncpdus_per_ctrl * \
2434 sizeof(struct phys_addr));
2435 num_async_pdu_data_pages =
2436 PAGES_REQUIRED(phba->params.asyncpdus_per_ctrl * \
2437 phba->params.defpdu_data_sz);
2438 num_async_pdu_data_sgl_pages =
2439 PAGES_REQUIRED(phba->params.asyncpdus_per_ctrl * \
2440 sizeof(struct phys_addr));
2441
2442 phba->params.hwi_ws_sz = sizeof(struct hwi_controller);
2443
2444 phba->mem_req[ISCSI_MEM_GLOBAL_HEADER] = 2 *
2445 BE_ISCSI_PDU_HEADER_SIZE;
2446 phba->mem_req[HWI_MEM_ADDN_CONTEXT] =
2447 sizeof(struct hwi_context_memory);
2448
2449
2450 phba->mem_req[HWI_MEM_WRB] = sizeof(struct iscsi_wrb)
2451 * (phba->params.wrbs_per_cxn)
2452 * phba->params.cxns_per_ctrl;
2453 wrb_sz_per_cxn = sizeof(struct wrb_handle) *
2454 (phba->params.wrbs_per_cxn);
2455 phba->mem_req[HWI_MEM_WRBH] = roundup_pow_of_two((wrb_sz_per_cxn) *
2456 phba->params.cxns_per_ctrl);
2457
2458 phba->mem_req[HWI_MEM_SGLH] = sizeof(struct sgl_handle) *
2459 phba->params.icds_per_ctrl;
2460 phba->mem_req[HWI_MEM_SGE] = sizeof(struct iscsi_sge) *
2461 phba->params.num_sge_per_io * phba->params.icds_per_ctrl;
2462
2463 phba->mem_req[HWI_MEM_ASYNC_HEADER_BUF] =
2464 num_async_pdu_buf_pages * PAGE_SIZE;
2465 phba->mem_req[HWI_MEM_ASYNC_DATA_BUF] =
2466 num_async_pdu_data_pages * PAGE_SIZE;
2467 phba->mem_req[HWI_MEM_ASYNC_HEADER_RING] =
2468 num_async_pdu_buf_sgl_pages * PAGE_SIZE;
2469 phba->mem_req[HWI_MEM_ASYNC_DATA_RING] =
2470 num_async_pdu_data_sgl_pages * PAGE_SIZE;
2471 phba->mem_req[HWI_MEM_ASYNC_HEADER_HANDLE] =
2472 phba->params.asyncpdus_per_ctrl *
2473 sizeof(struct async_pdu_handle);
2474 phba->mem_req[HWI_MEM_ASYNC_DATA_HANDLE] =
2475 phba->params.asyncpdus_per_ctrl *
2476 sizeof(struct async_pdu_handle);
2477 phba->mem_req[HWI_MEM_ASYNC_PDU_CONTEXT] =
2478 sizeof(struct hwi_async_pdu_context) +
2479 (phba->params.cxns_per_ctrl * sizeof(struct hwi_async_entry));
2480 }
2481
2482 static int beiscsi_alloc_mem(struct beiscsi_hba *phba)
2483 {
2484 struct be_mem_descriptor *mem_descr;
2485 dma_addr_t bus_add;
2486 struct mem_array *mem_arr, *mem_arr_orig;
2487 unsigned int i, j, alloc_size, curr_alloc_size;
2488
2489 phba->phwi_ctrlr = kzalloc(phba->params.hwi_ws_sz, GFP_KERNEL);
2490 if (!phba->phwi_ctrlr)
2491 return -ENOMEM;
2492
2493 phba->init_mem = kcalloc(SE_MEM_MAX, sizeof(*mem_descr),
2494 GFP_KERNEL);
2495 if (!phba->init_mem) {
2496 kfree(phba->phwi_ctrlr);
2497 return -ENOMEM;
2498 }
2499
2500 mem_arr_orig = kmalloc(sizeof(*mem_arr_orig) * BEISCSI_MAX_FRAGS_INIT,
2501 GFP_KERNEL);
2502 if (!mem_arr_orig) {
2503 kfree(phba->init_mem);
2504 kfree(phba->phwi_ctrlr);
2505 return -ENOMEM;
2506 }
2507
2508 mem_descr = phba->init_mem;
2509 for (i = 0; i < SE_MEM_MAX; i++) {
2510 j = 0;
2511 mem_arr = mem_arr_orig;
2512 alloc_size = phba->mem_req[i];
2513 memset(mem_arr, 0, sizeof(struct mem_array) *
2514 BEISCSI_MAX_FRAGS_INIT);
2515 curr_alloc_size = min(be_max_phys_size * 1024, alloc_size);
2516 do {
2517 mem_arr->virtual_address = pci_alloc_consistent(
2518 phba->pcidev,
2519 curr_alloc_size,
2520 &bus_add);
2521 if (!mem_arr->virtual_address) {
2522 if (curr_alloc_size <= BE_MIN_MEM_SIZE)
2523 goto free_mem;
2524 if (curr_alloc_size -
2525 rounddown_pow_of_two(curr_alloc_size))
2526 curr_alloc_size = rounddown_pow_of_two
2527 (curr_alloc_size);
2528 else
2529 curr_alloc_size = curr_alloc_size / 2;
2530 } else {
2531 mem_arr->bus_address.u.
2532 a64.address = (__u64) bus_add;
2533 mem_arr->size = curr_alloc_size;
2534 alloc_size -= curr_alloc_size;
2535 curr_alloc_size = min(be_max_phys_size *
2536 1024, alloc_size);
2537 j++;
2538 mem_arr++;
2539 }
2540 } while (alloc_size);
2541 mem_descr->num_elements = j;
2542 mem_descr->size_in_bytes = phba->mem_req[i];
2543 mem_descr->mem_array = kmalloc(sizeof(*mem_arr) * j,
2544 GFP_KERNEL);
2545 if (!mem_descr->mem_array)
2546 goto free_mem;
2547
2548 memcpy(mem_descr->mem_array, mem_arr_orig,
2549 sizeof(struct mem_array) * j);
2550 mem_descr++;
2551 }
2552 kfree(mem_arr_orig);
2553 return 0;
2554 free_mem:
2555 mem_descr->num_elements = j;
2556 while ((i) || (j)) {
2557 for (j = mem_descr->num_elements; j > 0; j--) {
2558 pci_free_consistent(phba->pcidev,
2559 mem_descr->mem_array[j - 1].size,
2560 mem_descr->mem_array[j - 1].
2561 virtual_address,
2562 (unsigned long)mem_descr->
2563 mem_array[j - 1].
2564 bus_address.u.a64.address);
2565 }
2566 if (i) {
2567 i--;
2568 kfree(mem_descr->mem_array);
2569 mem_descr--;
2570 }
2571 }
2572 kfree(mem_arr_orig);
2573 kfree(phba->init_mem);
2574 kfree(phba->phwi_ctrlr);
2575 return -ENOMEM;
2576 }
2577
2578 static int beiscsi_get_memory(struct beiscsi_hba *phba)
2579 {
2580 beiscsi_find_mem_req(phba);
2581 return beiscsi_alloc_mem(phba);
2582 }
2583
2584 static void iscsi_init_global_templates(struct beiscsi_hba *phba)
2585 {
2586 struct pdu_data_out *pdata_out;
2587 struct pdu_nop_out *pnop_out;
2588 struct be_mem_descriptor *mem_descr;
2589
2590 mem_descr = phba->init_mem;
2591 mem_descr += ISCSI_MEM_GLOBAL_HEADER;
2592 pdata_out =
2593 (struct pdu_data_out *)mem_descr->mem_array[0].virtual_address;
2594 memset(pdata_out, 0, BE_ISCSI_PDU_HEADER_SIZE);
2595
2596 AMAP_SET_BITS(struct amap_pdu_data_out, opcode, pdata_out,
2597 IIOC_SCSI_DATA);
2598
2599 pnop_out =
2600 (struct pdu_nop_out *)((unsigned char *)mem_descr->mem_array[0].
2601 virtual_address + BE_ISCSI_PDU_HEADER_SIZE);
2602
2603 memset(pnop_out, 0, BE_ISCSI_PDU_HEADER_SIZE);
2604 AMAP_SET_BITS(struct amap_pdu_nop_out, ttt, pnop_out, 0xFFFFFFFF);
2605 AMAP_SET_BITS(struct amap_pdu_nop_out, f_bit, pnop_out, 1);
2606 AMAP_SET_BITS(struct amap_pdu_nop_out, i_bit, pnop_out, 0);
2607 }
2608
2609 static int beiscsi_init_wrb_handle(struct beiscsi_hba *phba)
2610 {
2611 struct be_mem_descriptor *mem_descr_wrbh, *mem_descr_wrb;
2612 struct wrb_handle *pwrb_handle = NULL;
2613 struct hwi_controller *phwi_ctrlr;
2614 struct hwi_wrb_context *pwrb_context;
2615 struct iscsi_wrb *pwrb = NULL;
2616 unsigned int num_cxn_wrbh = 0;
2617 unsigned int num_cxn_wrb = 0, j, idx = 0, index;
2618
2619 mem_descr_wrbh = phba->init_mem;
2620 mem_descr_wrbh += HWI_MEM_WRBH;
2621
2622 mem_descr_wrb = phba->init_mem;
2623 mem_descr_wrb += HWI_MEM_WRB;
2624 phwi_ctrlr = phba->phwi_ctrlr;
2625
2626 for (index = 0; index < phba->params.cxns_per_ctrl * 2; index += 2) {
2627 pwrb_context = &phwi_ctrlr->wrb_context[index];
2628 pwrb_context->pwrb_handle_base =
2629 kzalloc(sizeof(struct wrb_handle *) *
2630 phba->params.wrbs_per_cxn, GFP_KERNEL);
2631 if (!pwrb_context->pwrb_handle_base) {
2632 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
2633 "BM_%d : Mem Alloc Failed. Failing to load\n");
2634 goto init_wrb_hndl_failed;
2635 }
2636 pwrb_context->pwrb_handle_basestd =
2637 kzalloc(sizeof(struct wrb_handle *) *
2638 phba->params.wrbs_per_cxn, GFP_KERNEL);
2639 if (!pwrb_context->pwrb_handle_basestd) {
2640 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
2641 "BM_%d : Mem Alloc Failed. Failing to load\n");
2642 goto init_wrb_hndl_failed;
2643 }
2644 if (!num_cxn_wrbh) {
2645 pwrb_handle =
2646 mem_descr_wrbh->mem_array[idx].virtual_address;
2647 num_cxn_wrbh = ((mem_descr_wrbh->mem_array[idx].size) /
2648 ((sizeof(struct wrb_handle)) *
2649 phba->params.wrbs_per_cxn));
2650 idx++;
2651 }
2652 pwrb_context->alloc_index = 0;
2653 pwrb_context->wrb_handles_available = 0;
2654 pwrb_context->free_index = 0;
2655
2656 if (num_cxn_wrbh) {
2657 for (j = 0; j < phba->params.wrbs_per_cxn; j++) {
2658 pwrb_context->pwrb_handle_base[j] = pwrb_handle;
2659 pwrb_context->pwrb_handle_basestd[j] =
2660 pwrb_handle;
2661 pwrb_context->wrb_handles_available++;
2662 pwrb_handle->wrb_index = j;
2663 pwrb_handle++;
2664 }
2665 num_cxn_wrbh--;
2666 }
2667 }
2668 idx = 0;
2669 for (index = 0; index < phba->params.cxns_per_ctrl * 2; index += 2) {
2670 pwrb_context = &phwi_ctrlr->wrb_context[index];
2671 if (!num_cxn_wrb) {
2672 pwrb = mem_descr_wrb->mem_array[idx].virtual_address;
2673 num_cxn_wrb = (mem_descr_wrb->mem_array[idx].size) /
2674 ((sizeof(struct iscsi_wrb) *
2675 phba->params.wrbs_per_cxn));
2676 idx++;
2677 }
2678
2679 if (num_cxn_wrb) {
2680 for (j = 0; j < phba->params.wrbs_per_cxn; j++) {
2681 pwrb_handle = pwrb_context->pwrb_handle_base[j];
2682 pwrb_handle->pwrb = pwrb;
2683 pwrb++;
2684 }
2685 num_cxn_wrb--;
2686 }
2687 }
2688 return 0;
2689 init_wrb_hndl_failed:
2690 for (j = index; j > 0; j--) {
2691 pwrb_context = &phwi_ctrlr->wrb_context[j];
2692 kfree(pwrb_context->pwrb_handle_base);
2693 kfree(pwrb_context->pwrb_handle_basestd);
2694 }
2695 return -ENOMEM;
2696 }
2697
2698 static void hwi_init_async_pdu_ctx(struct beiscsi_hba *phba)
2699 {
2700 struct hwi_controller *phwi_ctrlr;
2701 struct hba_parameters *p = &phba->params;
2702 struct hwi_async_pdu_context *pasync_ctx;
2703 struct async_pdu_handle *pasync_header_h, *pasync_data_h;
2704 unsigned int index, idx, num_per_mem, num_async_data;
2705 struct be_mem_descriptor *mem_descr;
2706
2707 mem_descr = (struct be_mem_descriptor *)phba->init_mem;
2708 mem_descr += HWI_MEM_ASYNC_PDU_CONTEXT;
2709
2710 phwi_ctrlr = phba->phwi_ctrlr;
2711 phwi_ctrlr->phwi_ctxt->pasync_ctx = (struct hwi_async_pdu_context *)
2712 mem_descr->mem_array[0].virtual_address;
2713 pasync_ctx = phwi_ctrlr->phwi_ctxt->pasync_ctx;
2714 memset(pasync_ctx, 0, sizeof(*pasync_ctx));
2715
2716 pasync_ctx->num_entries = p->asyncpdus_per_ctrl;
2717 pasync_ctx->buffer_size = p->defpdu_hdr_sz;
2718
2719 mem_descr = (struct be_mem_descriptor *)phba->init_mem;
2720 mem_descr += HWI_MEM_ASYNC_HEADER_BUF;
2721 if (mem_descr->mem_array[0].virtual_address) {
2722 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
2723 "BM_%d : hwi_init_async_pdu_ctx"
2724 " HWI_MEM_ASYNC_HEADER_BUF va=%p\n",
2725 mem_descr->mem_array[0].virtual_address);
2726 } else
2727 beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_INIT,
2728 "BM_%d : No Virtual address\n");
2729
2730 pasync_ctx->async_header.va_base =
2731 mem_descr->mem_array[0].virtual_address;
2732
2733 pasync_ctx->async_header.pa_base.u.a64.address =
2734 mem_descr->mem_array[0].bus_address.u.a64.address;
2735
2736 mem_descr = (struct be_mem_descriptor *)phba->init_mem;
2737 mem_descr += HWI_MEM_ASYNC_HEADER_RING;
2738 if (mem_descr->mem_array[0].virtual_address) {
2739 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
2740 "BM_%d : hwi_init_async_pdu_ctx"
2741 " HWI_MEM_ASYNC_HEADER_RING va=%p\n",
2742 mem_descr->mem_array[0].virtual_address);
2743 } else
2744 beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_INIT,
2745 "BM_%d : No Virtual address\n");
2746
2747 pasync_ctx->async_header.ring_base =
2748 mem_descr->mem_array[0].virtual_address;
2749
2750 mem_descr = (struct be_mem_descriptor *)phba->init_mem;
2751 mem_descr += HWI_MEM_ASYNC_HEADER_HANDLE;
2752 if (mem_descr->mem_array[0].virtual_address) {
2753 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
2754 "BM_%d : hwi_init_async_pdu_ctx"
2755 " HWI_MEM_ASYNC_HEADER_HANDLE va=%p\n",
2756 mem_descr->mem_array[0].virtual_address);
2757 } else
2758 beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_INIT,
2759 "BM_%d : No Virtual address\n");
2760
2761 pasync_ctx->async_header.handle_base =
2762 mem_descr->mem_array[0].virtual_address;
2763 pasync_ctx->async_header.writables = 0;
2764 INIT_LIST_HEAD(&pasync_ctx->async_header.free_list);
2765
2766
2767 mem_descr = (struct be_mem_descriptor *)phba->init_mem;
2768 mem_descr += HWI_MEM_ASYNC_DATA_RING;
2769 if (mem_descr->mem_array[0].virtual_address) {
2770 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
2771 "BM_%d : hwi_init_async_pdu_ctx"
2772 " HWI_MEM_ASYNC_DATA_RING va=%p\n",
2773 mem_descr->mem_array[0].virtual_address);
2774 } else
2775 beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_INIT,
2776 "BM_%d : No Virtual address\n");
2777
2778 pasync_ctx->async_data.ring_base =
2779 mem_descr->mem_array[0].virtual_address;
2780
2781 mem_descr = (struct be_mem_descriptor *)phba->init_mem;
2782 mem_descr += HWI_MEM_ASYNC_DATA_HANDLE;
2783 if (!mem_descr->mem_array[0].virtual_address)
2784 beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_INIT,
2785 "BM_%d : No Virtual address\n");
2786
2787 pasync_ctx->async_data.handle_base =
2788 mem_descr->mem_array[0].virtual_address;
2789 pasync_ctx->async_data.writables = 0;
2790 INIT_LIST_HEAD(&pasync_ctx->async_data.free_list);
2791
2792 pasync_header_h =
2793 (struct async_pdu_handle *)pasync_ctx->async_header.handle_base;
2794 pasync_data_h =
2795 (struct async_pdu_handle *)pasync_ctx->async_data.handle_base;
2796
2797 mem_descr = (struct be_mem_descriptor *)phba->init_mem;
2798 mem_descr += HWI_MEM_ASYNC_DATA_BUF;
2799 if (mem_descr->mem_array[0].virtual_address) {
2800 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
2801 "BM_%d : hwi_init_async_pdu_ctx"
2802 " HWI_MEM_ASYNC_DATA_BUF va=%p\n",
2803 mem_descr->mem_array[0].virtual_address);
2804 } else
2805 beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_INIT,
2806 "BM_%d : No Virtual address\n");
2807
2808 idx = 0;
2809 pasync_ctx->async_data.va_base =
2810 mem_descr->mem_array[idx].virtual_address;
2811 pasync_ctx->async_data.pa_base.u.a64.address =
2812 mem_descr->mem_array[idx].bus_address.u.a64.address;
2813
2814 num_async_data = ((mem_descr->mem_array[idx].size) /
2815 phba->params.defpdu_data_sz);
2816 num_per_mem = 0;
2817
2818 for (index = 0; index < p->asyncpdus_per_ctrl; index++) {
2819 pasync_header_h->cri = -1;
2820 pasync_header_h->index = (char)index;
2821 INIT_LIST_HEAD(&pasync_header_h->link);
2822 pasync_header_h->pbuffer =
2823 (void *)((unsigned long)
2824 (pasync_ctx->async_header.va_base) +
2825 (p->defpdu_hdr_sz * index));
2826
2827 pasync_header_h->pa.u.a64.address =
2828 pasync_ctx->async_header.pa_base.u.a64.address +
2829 (p->defpdu_hdr_sz * index);
2830
2831 list_add_tail(&pasync_header_h->link,
2832 &pasync_ctx->async_header.free_list);
2833 pasync_header_h++;
2834 pasync_ctx->async_header.free_entries++;
2835 pasync_ctx->async_header.writables++;
2836
2837 INIT_LIST_HEAD(&pasync_ctx->async_entry[index].wait_queue.list);
2838 INIT_LIST_HEAD(&pasync_ctx->async_entry[index].
2839 header_busy_list);
2840 pasync_data_h->cri = -1;
2841 pasync_data_h->index = (char)index;
2842 INIT_LIST_HEAD(&pasync_data_h->link);
2843
2844 if (!num_async_data) {
2845 num_per_mem = 0;
2846 idx++;
2847 pasync_ctx->async_data.va_base =
2848 mem_descr->mem_array[idx].virtual_address;
2849 pasync_ctx->async_data.pa_base.u.a64.address =
2850 mem_descr->mem_array[idx].
2851 bus_address.u.a64.address;
2852
2853 num_async_data = ((mem_descr->mem_array[idx].size) /
2854 phba->params.defpdu_data_sz);
2855 }
2856 pasync_data_h->pbuffer =
2857 (void *)((unsigned long)
2858 (pasync_ctx->async_data.va_base) +
2859 (p->defpdu_data_sz * num_per_mem));
2860
2861 pasync_data_h->pa.u.a64.address =
2862 pasync_ctx->async_data.pa_base.u.a64.address +
2863 (p->defpdu_data_sz * num_per_mem);
2864 num_per_mem++;
2865 num_async_data--;
2866
2867 list_add_tail(&pasync_data_h->link,
2868 &pasync_ctx->async_data.free_list);
2869 pasync_data_h++;
2870 pasync_ctx->async_data.free_entries++;
2871 pasync_ctx->async_data.writables++;
2872
2873 INIT_LIST_HEAD(&pasync_ctx->async_entry[index].data_busy_list);
2874 }
2875
2876 pasync_ctx->async_header.host_write_ptr = 0;
2877 pasync_ctx->async_header.ep_read_ptr = -1;
2878 pasync_ctx->async_data.host_write_ptr = 0;
2879 pasync_ctx->async_data.ep_read_ptr = -1;
2880 }
2881
2882 static int
2883 be_sgl_create_contiguous(void *virtual_address,
2884 u64 physical_address, u32 length,
2885 struct be_dma_mem *sgl)
2886 {
2887 WARN_ON(!virtual_address);
2888 WARN_ON(!physical_address);
2889 WARN_ON(!length > 0);
2890 WARN_ON(!sgl);
2891
2892 sgl->va = virtual_address;
2893 sgl->dma = (unsigned long)physical_address;
2894 sgl->size = length;
2895
2896 return 0;
2897 }
2898
2899 static void be_sgl_destroy_contiguous(struct be_dma_mem *sgl)
2900 {
2901 memset(sgl, 0, sizeof(*sgl));
2902 }
2903
2904 static void
2905 hwi_build_be_sgl_arr(struct beiscsi_hba *phba,
2906 struct mem_array *pmem, struct be_dma_mem *sgl)
2907 {
2908 if (sgl->va)
2909 be_sgl_destroy_contiguous(sgl);
2910
2911 be_sgl_create_contiguous(pmem->virtual_address,
2912 pmem->bus_address.u.a64.address,
2913 pmem->size, sgl);
2914 }
2915
2916 static void
2917 hwi_build_be_sgl_by_offset(struct beiscsi_hba *phba,
2918 struct mem_array *pmem, struct be_dma_mem *sgl)
2919 {
2920 if (sgl->va)
2921 be_sgl_destroy_contiguous(sgl);
2922
2923 be_sgl_create_contiguous((unsigned char *)pmem->virtual_address,
2924 pmem->bus_address.u.a64.address,
2925 pmem->size, sgl);
2926 }
2927
2928 static int be_fill_queue(struct be_queue_info *q,
2929 u16 len, u16 entry_size, void *vaddress)
2930 {
2931 struct be_dma_mem *mem = &q->dma_mem;
2932
2933 memset(q, 0, sizeof(*q));
2934 q->len = len;
2935 q->entry_size = entry_size;
2936 mem->size = len * entry_size;
2937 mem->va = vaddress;
2938 if (!mem->va)
2939 return -ENOMEM;
2940 memset(mem->va, 0, mem->size);
2941 return 0;
2942 }
2943
2944 static int beiscsi_create_eqs(struct beiscsi_hba *phba,
2945 struct hwi_context_memory *phwi_context)
2946 {
2947 unsigned int i, num_eq_pages;
2948 int ret = 0, eq_for_mcc;
2949 struct be_queue_info *eq;
2950 struct be_dma_mem *mem;
2951 void *eq_vaddress;
2952 dma_addr_t paddr;
2953
2954 num_eq_pages = PAGES_REQUIRED(phba->params.num_eq_entries * \
2955 sizeof(struct be_eq_entry));
2956
2957 if (phba->msix_enabled)
2958 eq_for_mcc = 1;
2959 else
2960 eq_for_mcc = 0;
2961 for (i = 0; i < (phba->num_cpus + eq_for_mcc); i++) {
2962 eq = &phwi_context->be_eq[i].q;
2963 mem = &eq->dma_mem;
2964 phwi_context->be_eq[i].phba = phba;
2965 eq_vaddress = pci_alloc_consistent(phba->pcidev,
2966 num_eq_pages * PAGE_SIZE,
2967 &paddr);
2968 if (!eq_vaddress)
2969 goto create_eq_error;
2970
2971 mem->va = eq_vaddress;
2972 ret = be_fill_queue(eq, phba->params.num_eq_entries,
2973 sizeof(struct be_eq_entry), eq_vaddress);
2974 if (ret) {
2975 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
2976 "BM_%d : be_fill_queue Failed for EQ\n");
2977 goto create_eq_error;
2978 }
2979
2980 mem->dma = paddr;
2981 ret = beiscsi_cmd_eq_create(&phba->ctrl, eq,
2982 phwi_context->cur_eqd);
2983 if (ret) {
2984 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
2985 "BM_%d : beiscsi_cmd_eq_create"
2986 "Failed for EQ\n");
2987 goto create_eq_error;
2988 }
2989
2990 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
2991 "BM_%d : eqid = %d\n",
2992 phwi_context->be_eq[i].q.id);
2993 }
2994 return 0;
2995 create_eq_error:
2996 for (i = 0; i < (phba->num_cpus + eq_for_mcc); i++) {
2997 eq = &phwi_context->be_eq[i].q;
2998 mem = &eq->dma_mem;
2999 if (mem->va)
3000 pci_free_consistent(phba->pcidev, num_eq_pages
3001 * PAGE_SIZE,
3002 mem->va, mem->dma);
3003 }
3004 return ret;
3005 }
3006
3007 static int beiscsi_create_cqs(struct beiscsi_hba *phba,
3008 struct hwi_context_memory *phwi_context)
3009 {
3010 unsigned int i, num_cq_pages;
3011 int ret = 0;
3012 struct be_queue_info *cq, *eq;
3013 struct be_dma_mem *mem;
3014 struct be_eq_obj *pbe_eq;
3015 void *cq_vaddress;
3016 dma_addr_t paddr;
3017
3018 num_cq_pages = PAGES_REQUIRED(phba->params.num_cq_entries * \
3019 sizeof(struct sol_cqe));
3020
3021 for (i = 0; i < phba->num_cpus; i++) {
3022 cq = &phwi_context->be_cq[i];
3023 eq = &phwi_context->be_eq[i].q;
3024 pbe_eq = &phwi_context->be_eq[i];
3025 pbe_eq->cq = cq;
3026 pbe_eq->phba = phba;
3027 mem = &cq->dma_mem;
3028 cq_vaddress = pci_alloc_consistent(phba->pcidev,
3029 num_cq_pages * PAGE_SIZE,
3030 &paddr);
3031 if (!cq_vaddress)
3032 goto create_cq_error;
3033 ret = be_fill_queue(cq, phba->params.num_cq_entries,
3034 sizeof(struct sol_cqe), cq_vaddress);
3035 if (ret) {
3036 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
3037 "BM_%d : be_fill_queue Failed "
3038 "for ISCSI CQ\n");
3039 goto create_cq_error;
3040 }
3041
3042 mem->dma = paddr;
3043 ret = beiscsi_cmd_cq_create(&phba->ctrl, cq, eq, false,
3044 false, 0);
3045 if (ret) {
3046 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
3047 "BM_%d : beiscsi_cmd_eq_create"
3048 "Failed for ISCSI CQ\n");
3049 goto create_cq_error;
3050 }
3051 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
3052 "BM_%d : iscsi cq_id is %d for eq_id %d\n"
3053 "iSCSI CQ CREATED\n", cq->id, eq->id);
3054 }
3055 return 0;
3056
3057 create_cq_error:
3058 for (i = 0; i < phba->num_cpus; i++) {
3059 cq = &phwi_context->be_cq[i];
3060 mem = &cq->dma_mem;
3061 if (mem->va)
3062 pci_free_consistent(phba->pcidev, num_cq_pages
3063 * PAGE_SIZE,
3064 mem->va, mem->dma);
3065 }
3066 return ret;
3067
3068 }
3069
3070 static int
3071 beiscsi_create_def_hdr(struct beiscsi_hba *phba,
3072 struct hwi_context_memory *phwi_context,
3073 struct hwi_controller *phwi_ctrlr,
3074 unsigned int def_pdu_ring_sz)
3075 {
3076 unsigned int idx;
3077 int ret;
3078 struct be_queue_info *dq, *cq;
3079 struct be_dma_mem *mem;
3080 struct be_mem_descriptor *mem_descr;
3081 void *dq_vaddress;
3082
3083 idx = 0;
3084 dq = &phwi_context->be_def_hdrq;
3085 cq = &phwi_context->be_cq[0];
3086 mem = &dq->dma_mem;
3087 mem_descr = phba->init_mem;
3088 mem_descr += HWI_MEM_ASYNC_HEADER_RING;
3089 dq_vaddress = mem_descr->mem_array[idx].virtual_address;
3090 ret = be_fill_queue(dq, mem_descr->mem_array[0].size /
3091 sizeof(struct phys_addr),
3092 sizeof(struct phys_addr), dq_vaddress);
3093 if (ret) {
3094 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
3095 "BM_%d : be_fill_queue Failed for DEF PDU HDR\n");
3096 return ret;
3097 }
3098 mem->dma = (unsigned long)mem_descr->mem_array[idx].
3099 bus_address.u.a64.address;
3100 ret = be_cmd_create_default_pdu_queue(&phba->ctrl, cq, dq,
3101 def_pdu_ring_sz,
3102 phba->params.defpdu_hdr_sz);
3103 if (ret) {
3104 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
3105 "BM_%d : be_cmd_create_default_pdu_queue Failed DEFHDR\n");
3106 return ret;
3107 }
3108 phwi_ctrlr->default_pdu_hdr.id = phwi_context->be_def_hdrq.id;
3109 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
3110 "BM_%d : iscsi def pdu id is %d\n",
3111 phwi_context->be_def_hdrq.id);
3112
3113 hwi_post_async_buffers(phba, 1);
3114 return 0;
3115 }
3116
3117 static int
3118 beiscsi_create_def_data(struct beiscsi_hba *phba,
3119 struct hwi_context_memory *phwi_context,
3120 struct hwi_controller *phwi_ctrlr,
3121 unsigned int def_pdu_ring_sz)
3122 {
3123 unsigned int idx;
3124 int ret;
3125 struct be_queue_info *dataq, *cq;
3126 struct be_dma_mem *mem;
3127 struct be_mem_descriptor *mem_descr;
3128 void *dq_vaddress;
3129
3130 idx = 0;
3131 dataq = &phwi_context->be_def_dataq;
3132 cq = &phwi_context->be_cq[0];
3133 mem = &dataq->dma_mem;
3134 mem_descr = phba->init_mem;
3135 mem_descr += HWI_MEM_ASYNC_DATA_RING;
3136 dq_vaddress = mem_descr->mem_array[idx].virtual_address;
3137 ret = be_fill_queue(dataq, mem_descr->mem_array[0].size /
3138 sizeof(struct phys_addr),
3139 sizeof(struct phys_addr), dq_vaddress);
3140 if (ret) {
3141 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
3142 "BM_%d : be_fill_queue Failed for DEF PDU DATA\n");
3143 return ret;
3144 }
3145 mem->dma = (unsigned long)mem_descr->mem_array[idx].
3146 bus_address.u.a64.address;
3147 ret = be_cmd_create_default_pdu_queue(&phba->ctrl, cq, dataq,
3148 def_pdu_ring_sz,
3149 phba->params.defpdu_data_sz);
3150 if (ret) {
3151 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
3152 "BM_%d be_cmd_create_default_pdu_queue"
3153 " Failed for DEF PDU DATA\n");
3154 return ret;
3155 }
3156 phwi_ctrlr->default_pdu_data.id = phwi_context->be_def_dataq.id;
3157 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
3158 "BM_%d : iscsi def data id is %d\n",
3159 phwi_context->be_def_dataq.id);
3160
3161 hwi_post_async_buffers(phba, 0);
3162 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
3163 "BM_%d : DEFAULT PDU DATA RING CREATED\n");
3164
3165 return 0;
3166 }
3167
3168 static int
3169 beiscsi_post_pages(struct beiscsi_hba *phba)
3170 {
3171 struct be_mem_descriptor *mem_descr;
3172 struct mem_array *pm_arr;
3173 unsigned int page_offset, i;
3174 struct be_dma_mem sgl;
3175 int status;
3176
3177 mem_descr = phba->init_mem;
3178 mem_descr += HWI_MEM_SGE;
3179 pm_arr = mem_descr->mem_array;
3180
3181 page_offset = (sizeof(struct iscsi_sge) * phba->params.num_sge_per_io *
3182 phba->fw_config.iscsi_icd_start) / PAGE_SIZE;
3183 for (i = 0; i < mem_descr->num_elements; i++) {
3184 hwi_build_be_sgl_arr(phba, pm_arr, &sgl);
3185 status = be_cmd_iscsi_post_sgl_pages(&phba->ctrl, &sgl,
3186 page_offset,
3187 (pm_arr->size / PAGE_SIZE));
3188 page_offset += pm_arr->size / PAGE_SIZE;
3189 if (status != 0) {
3190 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
3191 "BM_%d : post sgl failed.\n");
3192 return status;
3193 }
3194 pm_arr++;
3195 }
3196 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
3197 "BM_%d : POSTED PAGES\n");
3198 return 0;
3199 }
3200
3201 static void be_queue_free(struct beiscsi_hba *phba, struct be_queue_info *q)
3202 {
3203 struct be_dma_mem *mem = &q->dma_mem;
3204 if (mem->va) {
3205 pci_free_consistent(phba->pcidev, mem->size,
3206 mem->va, mem->dma);
3207 mem->va = NULL;
3208 }
3209 }
3210
3211 static int be_queue_alloc(struct beiscsi_hba *phba, struct be_queue_info *q,
3212 u16 len, u16 entry_size)
3213 {
3214 struct be_dma_mem *mem = &q->dma_mem;
3215
3216 memset(q, 0, sizeof(*q));
3217 q->len = len;
3218 q->entry_size = entry_size;
3219 mem->size = len * entry_size;
3220 mem->va = pci_alloc_consistent(phba->pcidev, mem->size, &mem->dma);
3221 if (!mem->va)
3222 return -ENOMEM;
3223 memset(mem->va, 0, mem->size);
3224 return 0;
3225 }
3226
3227 static int
3228 beiscsi_create_wrb_rings(struct beiscsi_hba *phba,
3229 struct hwi_context_memory *phwi_context,
3230 struct hwi_controller *phwi_ctrlr)
3231 {
3232 unsigned int wrb_mem_index, offset, size, num_wrb_rings;
3233 u64 pa_addr_lo;
3234 unsigned int idx, num, i;
3235 struct mem_array *pwrb_arr;
3236 void *wrb_vaddr;
3237 struct be_dma_mem sgl;
3238 struct be_mem_descriptor *mem_descr;
3239 int status;
3240
3241 idx = 0;
3242 mem_descr = phba->init_mem;
3243 mem_descr += HWI_MEM_WRB;
3244 pwrb_arr = kmalloc(sizeof(*pwrb_arr) * phba->params.cxns_per_ctrl,
3245 GFP_KERNEL);
3246 if (!pwrb_arr) {
3247 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
3248 "BM_%d : Memory alloc failed in create wrb ring.\n");
3249 return -ENOMEM;
3250 }
3251 wrb_vaddr = mem_descr->mem_array[idx].virtual_address;
3252 pa_addr_lo = mem_descr->mem_array[idx].bus_address.u.a64.address;
3253 num_wrb_rings = mem_descr->mem_array[idx].size /
3254 (phba->params.wrbs_per_cxn * sizeof(struct iscsi_wrb));
3255
3256 for (num = 0; num < phba->params.cxns_per_ctrl; num++) {
3257 if (num_wrb_rings) {
3258 pwrb_arr[num].virtual_address = wrb_vaddr;
3259 pwrb_arr[num].bus_address.u.a64.address = pa_addr_lo;
3260 pwrb_arr[num].size = phba->params.wrbs_per_cxn *
3261 sizeof(struct iscsi_wrb);
3262 wrb_vaddr += pwrb_arr[num].size;
3263 pa_addr_lo += pwrb_arr[num].size;
3264 num_wrb_rings--;
3265 } else {
3266 idx++;
3267 wrb_vaddr = mem_descr->mem_array[idx].virtual_address;
3268 pa_addr_lo = mem_descr->mem_array[idx].\
3269 bus_address.u.a64.address;
3270 num_wrb_rings = mem_descr->mem_array[idx].size /
3271 (phba->params.wrbs_per_cxn *
3272 sizeof(struct iscsi_wrb));
3273 pwrb_arr[num].virtual_address = wrb_vaddr;
3274 pwrb_arr[num].bus_address.u.a64.address\
3275 = pa_addr_lo;
3276 pwrb_arr[num].size = phba->params.wrbs_per_cxn *
3277 sizeof(struct iscsi_wrb);
3278 wrb_vaddr += pwrb_arr[num].size;
3279 pa_addr_lo += pwrb_arr[num].size;
3280 num_wrb_rings--;
3281 }
3282 }
3283 for (i = 0; i < phba->params.cxns_per_ctrl; i++) {
3284 wrb_mem_index = 0;
3285 offset = 0;
3286 size = 0;
3287
3288 hwi_build_be_sgl_by_offset(phba, &pwrb_arr[i], &sgl);
3289 status = be_cmd_wrbq_create(&phba->ctrl, &sgl,
3290 &phwi_context->be_wrbq[i]);
3291 if (status != 0) {
3292 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
3293 "BM_%d : wrbq create failed.");
3294 kfree(pwrb_arr);
3295 return status;
3296 }
3297 phwi_ctrlr->wrb_context[i * 2].cid = phwi_context->be_wrbq[i].
3298 id;
3299 }
3300 kfree(pwrb_arr);
3301 return 0;
3302 }
3303
3304 static void free_wrb_handles(struct beiscsi_hba *phba)
3305 {
3306 unsigned int index;
3307 struct hwi_controller *phwi_ctrlr;
3308 struct hwi_wrb_context *pwrb_context;
3309
3310 phwi_ctrlr = phba->phwi_ctrlr;
3311 for (index = 0; index < phba->params.cxns_per_ctrl * 2; index += 2) {
3312 pwrb_context = &phwi_ctrlr->wrb_context[index];
3313 kfree(pwrb_context->pwrb_handle_base);
3314 kfree(pwrb_context->pwrb_handle_basestd);
3315 }
3316 }
3317
3318 static void be_mcc_queues_destroy(struct beiscsi_hba *phba)
3319 {
3320 struct be_queue_info *q;
3321 struct be_ctrl_info *ctrl = &phba->ctrl;
3322
3323 q = &phba->ctrl.mcc_obj.q;
3324 if (q->created)
3325 beiscsi_cmd_q_destroy(ctrl, q, QTYPE_MCCQ);
3326 be_queue_free(phba, q);
3327
3328 q = &phba->ctrl.mcc_obj.cq;
3329 if (q->created)
3330 beiscsi_cmd_q_destroy(ctrl, q, QTYPE_CQ);
3331 be_queue_free(phba, q);
3332 }
3333
3334 static void hwi_cleanup(struct beiscsi_hba *phba)
3335 {
3336 struct be_queue_info *q;
3337 struct be_ctrl_info *ctrl = &phba->ctrl;
3338 struct hwi_controller *phwi_ctrlr;
3339 struct hwi_context_memory *phwi_context;
3340 int i, eq_num;
3341
3342 phwi_ctrlr = phba->phwi_ctrlr;
3343 phwi_context = phwi_ctrlr->phwi_ctxt;
3344 for (i = 0; i < phba->params.cxns_per_ctrl; i++) {
3345 q = &phwi_context->be_wrbq[i];
3346 if (q->created)
3347 beiscsi_cmd_q_destroy(ctrl, q, QTYPE_WRBQ);
3348 }
3349 free_wrb_handles(phba);
3350
3351 q = &phwi_context->be_def_hdrq;
3352 if (q->created)
3353 beiscsi_cmd_q_destroy(ctrl, q, QTYPE_DPDUQ);
3354
3355 q = &phwi_context->be_def_dataq;
3356 if (q->created)
3357 beiscsi_cmd_q_destroy(ctrl, q, QTYPE_DPDUQ);
3358
3359 beiscsi_cmd_q_destroy(ctrl, NULL, QTYPE_SGL);
3360
3361 for (i = 0; i < (phba->num_cpus); i++) {
3362 q = &phwi_context->be_cq[i];
3363 if (q->created)
3364 beiscsi_cmd_q_destroy(ctrl, q, QTYPE_CQ);
3365 }
3366 if (phba->msix_enabled)
3367 eq_num = 1;
3368 else
3369 eq_num = 0;
3370 for (i = 0; i < (phba->num_cpus + eq_num); i++) {
3371 q = &phwi_context->be_eq[i].q;
3372 if (q->created)
3373 beiscsi_cmd_q_destroy(ctrl, q, QTYPE_EQ);
3374 }
3375 be_mcc_queues_destroy(phba);
3376 }
3377
3378 static int be_mcc_queues_create(struct beiscsi_hba *phba,
3379 struct hwi_context_memory *phwi_context)
3380 {
3381 struct be_queue_info *q, *cq;
3382 struct be_ctrl_info *ctrl = &phba->ctrl;
3383
3384 /* Alloc MCC compl queue */
3385 cq = &phba->ctrl.mcc_obj.cq;
3386 if (be_queue_alloc(phba, cq, MCC_CQ_LEN,
3387 sizeof(struct be_mcc_compl)))
3388 goto err;
3389 /* Ask BE to create MCC compl queue; */
3390 if (phba->msix_enabled) {
3391 if (beiscsi_cmd_cq_create(ctrl, cq, &phwi_context->be_eq
3392 [phba->num_cpus].q, false, true, 0))
3393 goto mcc_cq_free;
3394 } else {
3395 if (beiscsi_cmd_cq_create(ctrl, cq, &phwi_context->be_eq[0].q,
3396 false, true, 0))
3397 goto mcc_cq_free;
3398 }
3399
3400 /* Alloc MCC queue */
3401 q = &phba->ctrl.mcc_obj.q;
3402 if (be_queue_alloc(phba, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
3403 goto mcc_cq_destroy;
3404
3405 /* Ask BE to create MCC queue */
3406 if (beiscsi_cmd_mccq_create(phba, q, cq))
3407 goto mcc_q_free;
3408
3409 return 0;
3410
3411 mcc_q_free:
3412 be_queue_free(phba, q);
3413 mcc_cq_destroy:
3414 beiscsi_cmd_q_destroy(ctrl, cq, QTYPE_CQ);
3415 mcc_cq_free:
3416 be_queue_free(phba, cq);
3417 err:
3418 return -ENOMEM;
3419 }
3420
3421 /**
3422 * find_num_cpus()- Get the CPU online count
3423 * @phba: ptr to priv structure
3424 *
3425 * CPU count is used for creating EQ.
3426 **/
3427 static void find_num_cpus(struct beiscsi_hba *phba)
3428 {
3429 int num_cpus = 0;
3430
3431 num_cpus = num_online_cpus();
3432
3433 switch (phba->generation) {
3434 case BE_GEN2:
3435 case BE_GEN3:
3436 phba->num_cpus = (num_cpus > BEISCSI_MAX_NUM_CPUS) ?
3437 BEISCSI_MAX_NUM_CPUS : num_cpus;
3438 break;
3439 case BE_GEN4:
3440 phba->num_cpus = (num_cpus > OC_SKH_MAX_NUM_CPUS) ?
3441 OC_SKH_MAX_NUM_CPUS : num_cpus;
3442 break;
3443 default:
3444 phba->num_cpus = 1;
3445 }
3446 }
3447
3448 static int hwi_init_port(struct beiscsi_hba *phba)
3449 {
3450 struct hwi_controller *phwi_ctrlr;
3451 struct hwi_context_memory *phwi_context;
3452 unsigned int def_pdu_ring_sz;
3453 struct be_ctrl_info *ctrl = &phba->ctrl;
3454 int status;
3455
3456 def_pdu_ring_sz =
3457 phba->params.asyncpdus_per_ctrl * sizeof(struct phys_addr);
3458 phwi_ctrlr = phba->phwi_ctrlr;
3459 phwi_context = phwi_ctrlr->phwi_ctxt;
3460 phwi_context->max_eqd = 0;
3461 phwi_context->min_eqd = 0;
3462 phwi_context->cur_eqd = 64;
3463 be_cmd_fw_initialize(&phba->ctrl);
3464
3465 status = beiscsi_create_eqs(phba, phwi_context);
3466 if (status != 0) {
3467 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
3468 "BM_%d : EQ not created\n");
3469 goto error;
3470 }
3471
3472 status = be_mcc_queues_create(phba, phwi_context);
3473 if (status != 0)
3474 goto error;
3475
3476 status = mgmt_check_supported_fw(ctrl, phba);
3477 if (status != 0) {
3478 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
3479 "BM_%d : Unsupported fw version\n");
3480 goto error;
3481 }
3482
3483 status = beiscsi_create_cqs(phba, phwi_context);
3484 if (status != 0) {
3485 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
3486 "BM_%d : CQ not created\n");
3487 goto error;
3488 }
3489
3490 status = beiscsi_create_def_hdr(phba, phwi_context, phwi_ctrlr,
3491 def_pdu_ring_sz);
3492 if (status != 0) {
3493 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
3494 "BM_%d : Default Header not created\n");
3495 goto error;
3496 }
3497
3498 status = beiscsi_create_def_data(phba, phwi_context,
3499 phwi_ctrlr, def_pdu_ring_sz);
3500 if (status != 0) {
3501 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
3502 "BM_%d : Default Data not created\n");
3503 goto error;
3504 }
3505
3506 status = beiscsi_post_pages(phba);
3507 if (status != 0) {
3508 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
3509 "BM_%d : Post SGL Pages Failed\n");
3510 goto error;
3511 }
3512
3513 status = beiscsi_create_wrb_rings(phba, phwi_context, phwi_ctrlr);
3514 if (status != 0) {
3515 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
3516 "BM_%d : WRB Rings not created\n");
3517 goto error;
3518 }
3519
3520 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
3521 "BM_%d : hwi_init_port success\n");
3522 return 0;
3523
3524 error:
3525 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
3526 "BM_%d : hwi_init_port failed");
3527 hwi_cleanup(phba);
3528 return status;
3529 }
3530
3531 static int hwi_init_controller(struct beiscsi_hba *phba)
3532 {
3533 struct hwi_controller *phwi_ctrlr;
3534
3535 phwi_ctrlr = phba->phwi_ctrlr;
3536 if (1 == phba->init_mem[HWI_MEM_ADDN_CONTEXT].num_elements) {
3537 phwi_ctrlr->phwi_ctxt = (struct hwi_context_memory *)phba->
3538 init_mem[HWI_MEM_ADDN_CONTEXT].mem_array[0].virtual_address;
3539 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
3540 "BM_%d : phwi_ctrlr->phwi_ctxt=%p\n",
3541 phwi_ctrlr->phwi_ctxt);
3542 } else {
3543 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
3544 "BM_%d : HWI_MEM_ADDN_CONTEXT is more "
3545 "than one element.Failing to load\n");
3546 return -ENOMEM;
3547 }
3548
3549 iscsi_init_global_templates(phba);
3550 if (beiscsi_init_wrb_handle(phba))
3551 return -ENOMEM;
3552
3553 hwi_init_async_pdu_ctx(phba);
3554 if (hwi_init_port(phba) != 0) {
3555 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
3556 "BM_%d : hwi_init_controller failed\n");
3557
3558 return -ENOMEM;
3559 }
3560 return 0;
3561 }
3562
3563 static void beiscsi_free_mem(struct beiscsi_hba *phba)
3564 {
3565 struct be_mem_descriptor *mem_descr;
3566 int i, j;
3567
3568 mem_descr = phba->init_mem;
3569 i = 0;
3570 j = 0;
3571 for (i = 0; i < SE_MEM_MAX; i++) {
3572 for (j = mem_descr->num_elements; j > 0; j--) {
3573 pci_free_consistent(phba->pcidev,
3574 mem_descr->mem_array[j - 1].size,
3575 mem_descr->mem_array[j - 1].virtual_address,
3576 (unsigned long)mem_descr->mem_array[j - 1].
3577 bus_address.u.a64.address);
3578 }
3579 kfree(mem_descr->mem_array);
3580 mem_descr++;
3581 }
3582 kfree(phba->init_mem);
3583 kfree(phba->phwi_ctrlr);
3584 }
3585
3586 static int beiscsi_init_controller(struct beiscsi_hba *phba)
3587 {
3588 int ret = -ENOMEM;
3589
3590 ret = beiscsi_get_memory(phba);
3591 if (ret < 0) {
3592 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
3593 "BM_%d : beiscsi_dev_probe -"
3594 "Failed in beiscsi_alloc_memory\n");
3595 return ret;
3596 }
3597
3598 ret = hwi_init_controller(phba);
3599 if (ret)
3600 goto free_init;
3601 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
3602 "BM_%d : Return success from beiscsi_init_controller");
3603
3604 return 0;
3605
3606 free_init:
3607 beiscsi_free_mem(phba);
3608 return ret;
3609 }
3610
3611 static int beiscsi_init_sgl_handle(struct beiscsi_hba *phba)
3612 {
3613 struct be_mem_descriptor *mem_descr_sglh, *mem_descr_sg;
3614 struct sgl_handle *psgl_handle;
3615 struct iscsi_sge *pfrag;
3616 unsigned int arr_index, i, idx;
3617
3618 phba->io_sgl_hndl_avbl = 0;
3619 phba->eh_sgl_hndl_avbl = 0;
3620
3621 mem_descr_sglh = phba->init_mem;
3622 mem_descr_sglh += HWI_MEM_SGLH;
3623 if (1 == mem_descr_sglh->num_elements) {
3624 phba->io_sgl_hndl_base = kzalloc(sizeof(struct sgl_handle *) *
3625 phba->params.ios_per_ctrl,
3626 GFP_KERNEL);
3627 if (!phba->io_sgl_hndl_base) {
3628 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
3629 "BM_%d : Mem Alloc Failed. Failing to load\n");
3630 return -ENOMEM;
3631 }
3632 phba->eh_sgl_hndl_base = kzalloc(sizeof(struct sgl_handle *) *
3633 (phba->params.icds_per_ctrl -
3634 phba->params.ios_per_ctrl),
3635 GFP_KERNEL);
3636 if (!phba->eh_sgl_hndl_base) {
3637 kfree(phba->io_sgl_hndl_base);
3638 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
3639 "BM_%d : Mem Alloc Failed. Failing to load\n");
3640 return -ENOMEM;
3641 }
3642 } else {
3643 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
3644 "BM_%d : HWI_MEM_SGLH is more than one element."
3645 "Failing to load\n");
3646 return -ENOMEM;
3647 }
3648
3649 arr_index = 0;
3650 idx = 0;
3651 while (idx < mem_descr_sglh->num_elements) {
3652 psgl_handle = mem_descr_sglh->mem_array[idx].virtual_address;
3653
3654 for (i = 0; i < (mem_descr_sglh->mem_array[idx].size /
3655 sizeof(struct sgl_handle)); i++) {
3656 if (arr_index < phba->params.ios_per_ctrl) {
3657 phba->io_sgl_hndl_base[arr_index] = psgl_handle;
3658 phba->io_sgl_hndl_avbl++;
3659 arr_index++;
3660 } else {
3661 phba->eh_sgl_hndl_base[arr_index -
3662 phba->params.ios_per_ctrl] =
3663 psgl_handle;
3664 arr_index++;
3665 phba->eh_sgl_hndl_avbl++;
3666 }
3667 psgl_handle++;
3668 }
3669 idx++;
3670 }
3671 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
3672 "BM_%d : phba->io_sgl_hndl_avbl=%d"
3673 "phba->eh_sgl_hndl_avbl=%d\n",
3674 phba->io_sgl_hndl_avbl,
3675 phba->eh_sgl_hndl_avbl);
3676
3677 mem_descr_sg = phba->init_mem;
3678 mem_descr_sg += HWI_MEM_SGE;
3679 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
3680 "\n BM_%d : mem_descr_sg->num_elements=%d\n",
3681 mem_descr_sg->num_elements);
3682
3683 arr_index = 0;
3684 idx = 0;
3685 while (idx < mem_descr_sg->num_elements) {
3686 pfrag = mem_descr_sg->mem_array[idx].virtual_address;
3687
3688 for (i = 0;
3689 i < (mem_descr_sg->mem_array[idx].size) /
3690 (sizeof(struct iscsi_sge) * phba->params.num_sge_per_io);
3691 i++) {
3692 if (arr_index < phba->params.ios_per_ctrl)
3693 psgl_handle = phba->io_sgl_hndl_base[arr_index];
3694 else
3695 psgl_handle = phba->eh_sgl_hndl_base[arr_index -
3696 phba->params.ios_per_ctrl];
3697 psgl_handle->pfrag = pfrag;
3698 AMAP_SET_BITS(struct amap_iscsi_sge, addr_hi, pfrag, 0);
3699 AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, pfrag, 0);
3700 pfrag += phba->params.num_sge_per_io;
3701 psgl_handle->sgl_index =
3702 phba->fw_config.iscsi_icd_start + arr_index++;
3703 }
3704 idx++;
3705 }
3706 phba->io_sgl_free_index = 0;
3707 phba->io_sgl_alloc_index = 0;
3708 phba->eh_sgl_free_index = 0;
3709 phba->eh_sgl_alloc_index = 0;
3710 return 0;
3711 }
3712
3713 static int hba_setup_cid_tbls(struct beiscsi_hba *phba)
3714 {
3715 int i, new_cid;
3716
3717 phba->cid_array = kzalloc(sizeof(void *) * phba->params.cxns_per_ctrl,
3718 GFP_KERNEL);
3719 if (!phba->cid_array) {
3720 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
3721 "BM_%d : Failed to allocate memory in "
3722 "hba_setup_cid_tbls\n");
3723 return -ENOMEM;
3724 }
3725 phba->ep_array = kzalloc(sizeof(struct iscsi_endpoint *) *
3726 phba->params.cxns_per_ctrl * 2, GFP_KERNEL);
3727 if (!phba->ep_array) {
3728 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
3729 "BM_%d : Failed to allocate memory in "
3730 "hba_setup_cid_tbls\n");
3731 kfree(phba->cid_array);
3732 return -ENOMEM;
3733 }
3734 new_cid = phba->fw_config.iscsi_cid_start;
3735 for (i = 0; i < phba->params.cxns_per_ctrl; i++) {
3736 phba->cid_array[i] = new_cid;
3737 new_cid += 2;
3738 }
3739 phba->avlbl_cids = phba->params.cxns_per_ctrl;
3740 return 0;
3741 }
3742
3743 static void hwi_enable_intr(struct beiscsi_hba *phba)
3744 {
3745 struct be_ctrl_info *ctrl = &phba->ctrl;
3746 struct hwi_controller *phwi_ctrlr;
3747 struct hwi_context_memory *phwi_context;
3748 struct be_queue_info *eq;
3749 u8 __iomem *addr;
3750 u32 reg, i;
3751 u32 enabled;
3752
3753 phwi_ctrlr = phba->phwi_ctrlr;
3754 phwi_context = phwi_ctrlr->phwi_ctxt;
3755
3756 addr = (u8 __iomem *) ((u8 __iomem *) ctrl->pcicfg +
3757 PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET);
3758 reg = ioread32(addr);
3759
3760 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
3761 if (!enabled) {
3762 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
3763 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
3764 "BM_%d : reg =x%08x addr=%p\n", reg, addr);
3765 iowrite32(reg, addr);
3766 }
3767
3768 if (!phba->msix_enabled) {
3769 eq = &phwi_context->be_eq[0].q;
3770 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
3771 "BM_%d : eq->id=%d\n", eq->id);
3772
3773 hwi_ring_eq_db(phba, eq->id, 0, 0, 1, 1);
3774 } else {
3775 for (i = 0; i <= phba->num_cpus; i++) {
3776 eq = &phwi_context->be_eq[i].q;
3777 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
3778 "BM_%d : eq->id=%d\n", eq->id);
3779 hwi_ring_eq_db(phba, eq->id, 0, 0, 1, 1);
3780 }
3781 }
3782 }
3783
3784 static void hwi_disable_intr(struct beiscsi_hba *phba)
3785 {
3786 struct be_ctrl_info *ctrl = &phba->ctrl;
3787
3788 u8 __iomem *addr = ctrl->pcicfg + PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET;
3789 u32 reg = ioread32(addr);
3790
3791 u32 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
3792 if (enabled) {
3793 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
3794 iowrite32(reg, addr);
3795 } else
3796 beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_INIT,
3797 "BM_%d : In hwi_disable_intr, Already Disabled\n");
3798 }
3799
3800 /**
3801 * beiscsi_get_boot_info()- Get the boot session info
3802 * @phba: The device priv structure instance
3803 *
3804 * Get the boot target info and store in driver priv structure
3805 *
3806 * return values
3807 * Success: 0
3808 * Failure: Non-Zero Value
3809 **/
3810 static int beiscsi_get_boot_info(struct beiscsi_hba *phba)
3811 {
3812 struct be_cmd_get_session_resp *session_resp;
3813 struct be_mcc_wrb *wrb;
3814 struct be_dma_mem nonemb_cmd;
3815 unsigned int tag, wrb_num;
3816 unsigned short status, extd_status;
3817 unsigned int s_handle;
3818 struct be_queue_info *mccq = &phba->ctrl.mcc_obj.q;
3819 int ret = -ENOMEM;
3820
3821 /* Get the session handle of the boot target */
3822 ret = be_mgmt_get_boot_shandle(phba, &s_handle);
3823 if (ret) {
3824 beiscsi_log(phba, KERN_ERR,
3825 BEISCSI_LOG_INIT | BEISCSI_LOG_CONFIG,
3826 "BM_%d : No boot session\n");
3827 return ret;
3828 }
3829 nonemb_cmd.va = pci_alloc_consistent(phba->ctrl.pdev,
3830 sizeof(*session_resp),
3831 &nonemb_cmd.dma);
3832 if (nonemb_cmd.va == NULL) {
3833 beiscsi_log(phba, KERN_ERR,
3834 BEISCSI_LOG_INIT | BEISCSI_LOG_CONFIG,
3835 "BM_%d : Failed to allocate memory for"
3836 "beiscsi_get_session_info\n");
3837
3838 return -ENOMEM;
3839 }
3840
3841 memset(nonemb_cmd.va, 0, sizeof(*session_resp));
3842 tag = mgmt_get_session_info(phba, s_handle,
3843 &nonemb_cmd);
3844 if (!tag) {
3845 beiscsi_log(phba, KERN_ERR,
3846 BEISCSI_LOG_INIT | BEISCSI_LOG_CONFIG,
3847 "BM_%d : beiscsi_get_session_info"
3848 " Failed\n");
3849
3850 goto boot_freemem;
3851 } else
3852 wait_event_interruptible(phba->ctrl.mcc_wait[tag],
3853 phba->ctrl.mcc_numtag[tag]);
3854
3855 wrb_num = (phba->ctrl.mcc_numtag[tag] & 0x00FF0000) >> 16;
3856 extd_status = (phba->ctrl.mcc_numtag[tag] & 0x0000FF00) >> 8;
3857 status = phba->ctrl.mcc_numtag[tag] & 0x000000FF;
3858 if (status || extd_status) {
3859 beiscsi_log(phba, KERN_ERR,
3860 BEISCSI_LOG_INIT | BEISCSI_LOG_CONFIG,
3861 "BM_%d : beiscsi_get_session_info Failed"
3862 " status = %d extd_status = %d\n",
3863 status, extd_status);
3864
3865 free_mcc_tag(&phba->ctrl, tag);
3866 goto boot_freemem;
3867 }
3868 wrb = queue_get_wrb(mccq, wrb_num);
3869 free_mcc_tag(&phba->ctrl, tag);
3870 session_resp = nonemb_cmd.va ;
3871
3872 memcpy(&phba->boot_sess, &session_resp->session_info,
3873 sizeof(struct mgmt_session_info));
3874 ret = 0;
3875
3876 boot_freemem:
3877 pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size,
3878 nonemb_cmd.va, nonemb_cmd.dma);
3879 return ret;
3880 }
3881
3882 static void beiscsi_boot_release(void *data)
3883 {
3884 struct beiscsi_hba *phba = data;
3885
3886 scsi_host_put(phba->shost);
3887 }
3888
3889 static int beiscsi_setup_boot_info(struct beiscsi_hba *phba)
3890 {
3891 struct iscsi_boot_kobj *boot_kobj;
3892
3893 /* get boot info using mgmt cmd */
3894 if (beiscsi_get_boot_info(phba))
3895 /* Try to see if we can carry on without this */
3896 return 0;
3897
3898 phba->boot_kset = iscsi_boot_create_host_kset(phba->shost->host_no);
3899 if (!phba->boot_kset)
3900 return -ENOMEM;
3901
3902 /* get a ref because the show function will ref the phba */
3903 if (!scsi_host_get(phba->shost))
3904 goto free_kset;
3905 boot_kobj = iscsi_boot_create_target(phba->boot_kset, 0, phba,
3906 beiscsi_show_boot_tgt_info,
3907 beiscsi_tgt_get_attr_visibility,
3908 beiscsi_boot_release);
3909 if (!boot_kobj)
3910 goto put_shost;
3911
3912 if (!scsi_host_get(phba->shost))
3913 goto free_kset;
3914 boot_kobj = iscsi_boot_create_initiator(phba->boot_kset, 0, phba,
3915 beiscsi_show_boot_ini_info,
3916 beiscsi_ini_get_attr_visibility,
3917 beiscsi_boot_release);
3918 if (!boot_kobj)
3919 goto put_shost;
3920
3921 if (!scsi_host_get(phba->shost))
3922 goto free_kset;
3923 boot_kobj = iscsi_boot_create_ethernet(phba->boot_kset, 0, phba,
3924 beiscsi_show_boot_eth_info,
3925 beiscsi_eth_get_attr_visibility,
3926 beiscsi_boot_release);
3927 if (!boot_kobj)
3928 goto put_shost;
3929 return 0;
3930
3931 put_shost:
3932 scsi_host_put(phba->shost);
3933 free_kset:
3934 iscsi_boot_destroy_kset(phba->boot_kset);
3935 return -ENOMEM;
3936 }
3937
3938 static int beiscsi_init_port(struct beiscsi_hba *phba)
3939 {
3940 int ret;
3941
3942 ret = beiscsi_init_controller(phba);
3943 if (ret < 0) {
3944 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
3945 "BM_%d : beiscsi_dev_probe - Failed in"
3946 "beiscsi_init_controller\n");
3947 return ret;
3948 }
3949 ret = beiscsi_init_sgl_handle(phba);
3950 if (ret < 0) {
3951 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
3952 "BM_%d : beiscsi_dev_probe - Failed in"
3953 "beiscsi_init_sgl_handle\n");
3954 goto do_cleanup_ctrlr;
3955 }
3956
3957 if (hba_setup_cid_tbls(phba)) {
3958 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
3959 "BM_%d : Failed in hba_setup_cid_tbls\n");
3960 kfree(phba->io_sgl_hndl_base);
3961 kfree(phba->eh_sgl_hndl_base);
3962 goto do_cleanup_ctrlr;
3963 }
3964
3965 return ret;
3966
3967 do_cleanup_ctrlr:
3968 hwi_cleanup(phba);
3969 return ret;
3970 }
3971
3972 static void hwi_purge_eq(struct beiscsi_hba *phba)
3973 {
3974 struct hwi_controller *phwi_ctrlr;
3975 struct hwi_context_memory *phwi_context;
3976 struct be_queue_info *eq;
3977 struct be_eq_entry *eqe = NULL;
3978 int i, eq_msix;
3979 unsigned int num_processed;
3980
3981 phwi_ctrlr = phba->phwi_ctrlr;
3982 phwi_context = phwi_ctrlr->phwi_ctxt;
3983 if (phba->msix_enabled)
3984 eq_msix = 1;
3985 else
3986 eq_msix = 0;
3987
3988 for (i = 0; i < (phba->num_cpus + eq_msix); i++) {
3989 eq = &phwi_context->be_eq[i].q;
3990 eqe = queue_tail_node(eq);
3991 num_processed = 0;
3992 while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32]
3993 & EQE_VALID_MASK) {
3994 AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0);
3995 queue_tail_inc(eq);
3996 eqe = queue_tail_node(eq);
3997 num_processed++;
3998 }
3999
4000 if (num_processed)
4001 hwi_ring_eq_db(phba, eq->id, 1, num_processed, 1, 1);
4002 }
4003 }
4004
4005 static void beiscsi_clean_port(struct beiscsi_hba *phba)
4006 {
4007 int mgmt_status;
4008
4009 mgmt_status = mgmt_epfw_cleanup(phba, CMD_CONNECTION_CHUTE_0);
4010 if (mgmt_status)
4011 beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_INIT,
4012 "BM_%d : mgmt_epfw_cleanup FAILED\n");
4013
4014 hwi_purge_eq(phba);
4015 hwi_cleanup(phba);
4016 kfree(phba->io_sgl_hndl_base);
4017 kfree(phba->eh_sgl_hndl_base);
4018 kfree(phba->cid_array);
4019 kfree(phba->ep_array);
4020 }
4021
4022 /**
4023 * beiscsi_cleanup_task()- Free driver resources of the task
4024 * @task: ptr to the iscsi task
4025 *
4026 **/
4027 static void beiscsi_cleanup_task(struct iscsi_task *task)
4028 {
4029 struct beiscsi_io_task *io_task = task->dd_data;
4030 struct iscsi_conn *conn = task->conn;
4031 struct beiscsi_conn *beiscsi_conn = conn->dd_data;
4032 struct beiscsi_hba *phba = beiscsi_conn->phba;
4033 struct beiscsi_session *beiscsi_sess = beiscsi_conn->beiscsi_sess;
4034 struct hwi_wrb_context *pwrb_context;
4035 struct hwi_controller *phwi_ctrlr;
4036
4037 phwi_ctrlr = phba->phwi_ctrlr;
4038 pwrb_context = &phwi_ctrlr->wrb_context[beiscsi_conn->beiscsi_conn_cid
4039 - phba->fw_config.iscsi_cid_start];
4040
4041 if (io_task->cmd_bhs) {
4042 pci_pool_free(beiscsi_sess->bhs_pool, io_task->cmd_bhs,
4043 io_task->bhs_pa.u.a64.address);
4044 io_task->cmd_bhs = NULL;
4045 }
4046
4047 if (task->sc) {
4048 if (io_task->pwrb_handle) {
4049 free_wrb_handle(phba, pwrb_context,
4050 io_task->pwrb_handle);
4051 io_task->pwrb_handle = NULL;
4052 }
4053
4054 if (io_task->psgl_handle) {
4055 spin_lock(&phba->io_sgl_lock);
4056 free_io_sgl_handle(phba, io_task->psgl_handle);
4057 spin_unlock(&phba->io_sgl_lock);
4058 io_task->psgl_handle = NULL;
4059 }
4060 } else {
4061 if (!beiscsi_conn->login_in_progress) {
4062 if (io_task->pwrb_handle) {
4063 free_wrb_handle(phba, pwrb_context,
4064 io_task->pwrb_handle);
4065 io_task->pwrb_handle = NULL;
4066 }
4067 if (io_task->psgl_handle) {
4068 spin_lock(&phba->mgmt_sgl_lock);
4069 free_mgmt_sgl_handle(phba,
4070 io_task->psgl_handle);
4071 spin_unlock(&phba->mgmt_sgl_lock);
4072 io_task->psgl_handle = NULL;
4073 }
4074 if (io_task->mtask_addr) {
4075 pci_unmap_single(phba->pcidev,
4076 io_task->mtask_addr,
4077 io_task->mtask_data_count,
4078 PCI_DMA_TODEVICE);
4079 io_task->mtask_addr = 0;
4080 }
4081 }
4082 }
4083 }
4084
4085 void
4086 beiscsi_offload_connection(struct beiscsi_conn *beiscsi_conn,
4087 struct beiscsi_offload_params *params)
4088 {
4089 struct wrb_handle *pwrb_handle;
4090 struct beiscsi_hba *phba = beiscsi_conn->phba;
4091 struct iscsi_task *task = beiscsi_conn->task;
4092 struct iscsi_session *session = task->conn->session;
4093 u32 doorbell = 0;
4094
4095 /*
4096 * We can always use 0 here because it is reserved by libiscsi for
4097 * login/startup related tasks.
4098 */
4099 beiscsi_conn->login_in_progress = 0;
4100 spin_lock_bh(&session->lock);
4101 beiscsi_cleanup_task(task);
4102 spin_unlock_bh(&session->lock);
4103
4104 pwrb_handle = alloc_wrb_handle(phba, (beiscsi_conn->beiscsi_conn_cid -
4105 phba->fw_config.iscsi_cid_start));
4106
4107 /* Check for the adapter family */
4108 if (chip_skh_r(phba->pcidev))
4109 beiscsi_offload_cxn_v2(params, pwrb_handle);
4110 else
4111 beiscsi_offload_cxn_v0(params, pwrb_handle,
4112 phba->init_mem);
4113
4114 be_dws_le_to_cpu(pwrb_handle->pwrb,
4115 sizeof(struct iscsi_target_context_update_wrb));
4116
4117 doorbell |= beiscsi_conn->beiscsi_conn_cid & DB_WRB_POST_CID_MASK;
4118 doorbell |= (pwrb_handle->wrb_index & DB_DEF_PDU_WRB_INDEX_MASK)
4119 << DB_DEF_PDU_WRB_INDEX_SHIFT;
4120 doorbell |= 1 << DB_DEF_PDU_NUM_POSTED_SHIFT;
4121
4122 iowrite32(doorbell, phba->db_va + DB_TXULP0_OFFSET);
4123 }
4124
4125 static void beiscsi_parse_pdu(struct iscsi_conn *conn, itt_t itt,
4126 int *index, int *age)
4127 {
4128 *index = (int)itt;
4129 if (age)
4130 *age = conn->session->age;
4131 }
4132
4133 /**
4134 * beiscsi_alloc_pdu - allocates pdu and related resources
4135 * @task: libiscsi task
4136 * @opcode: opcode of pdu for task
4137 *
4138 * This is called with the session lock held. It will allocate
4139 * the wrb and sgl if needed for the command. And it will prep
4140 * the pdu's itt. beiscsi_parse_pdu will later translate
4141 * the pdu itt to the libiscsi task itt.
4142 */
4143 static int beiscsi_alloc_pdu(struct iscsi_task *task, uint8_t opcode)
4144 {
4145 struct beiscsi_io_task *io_task = task->dd_data;
4146 struct iscsi_conn *conn = task->conn;
4147 struct beiscsi_conn *beiscsi_conn = conn->dd_data;
4148 struct beiscsi_hba *phba = beiscsi_conn->phba;
4149 struct hwi_wrb_context *pwrb_context;
4150 struct hwi_controller *phwi_ctrlr;
4151 itt_t itt;
4152 struct beiscsi_session *beiscsi_sess = beiscsi_conn->beiscsi_sess;
4153 dma_addr_t paddr;
4154
4155 io_task->cmd_bhs = pci_pool_alloc(beiscsi_sess->bhs_pool,
4156 GFP_ATOMIC, &paddr);
4157 if (!io_task->cmd_bhs)
4158 return -ENOMEM;
4159 io_task->bhs_pa.u.a64.address = paddr;
4160 io_task->libiscsi_itt = (itt_t)task->itt;
4161 io_task->conn = beiscsi_conn;
4162
4163 task->hdr = (struct iscsi_hdr *)&io_task->cmd_bhs->iscsi_hdr;
4164 task->hdr_max = sizeof(struct be_cmd_bhs);
4165 io_task->psgl_handle = NULL;
4166 io_task->pwrb_handle = NULL;
4167
4168 if (task->sc) {
4169 spin_lock(&phba->io_sgl_lock);
4170 io_task->psgl_handle = alloc_io_sgl_handle(phba);
4171 spin_unlock(&phba->io_sgl_lock);
4172 if (!io_task->psgl_handle) {
4173 beiscsi_log(phba, KERN_ERR,
4174 BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG,
4175 "BM_%d : Alloc of IO_SGL_ICD Failed"
4176 "for the CID : %d\n",
4177 beiscsi_conn->beiscsi_conn_cid);
4178 goto free_hndls;
4179 }
4180 io_task->pwrb_handle = alloc_wrb_handle(phba,
4181 beiscsi_conn->beiscsi_conn_cid -
4182 phba->fw_config.iscsi_cid_start);
4183 if (!io_task->pwrb_handle) {
4184 beiscsi_log(phba, KERN_ERR,
4185 BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG,
4186 "BM_%d : Alloc of WRB_HANDLE Failed"
4187 "for the CID : %d\n",
4188 beiscsi_conn->beiscsi_conn_cid);
4189 goto free_io_hndls;
4190 }
4191 } else {
4192 io_task->scsi_cmnd = NULL;
4193 if ((opcode & ISCSI_OPCODE_MASK) == ISCSI_OP_LOGIN) {
4194 if (!beiscsi_conn->login_in_progress) {
4195 spin_lock(&phba->mgmt_sgl_lock);
4196 io_task->psgl_handle = (struct sgl_handle *)
4197 alloc_mgmt_sgl_handle(phba);
4198 spin_unlock(&phba->mgmt_sgl_lock);
4199 if (!io_task->psgl_handle) {
4200 beiscsi_log(phba, KERN_ERR,
4201 BEISCSI_LOG_IO |
4202 BEISCSI_LOG_CONFIG,
4203 "BM_%d : Alloc of MGMT_SGL_ICD Failed"
4204 "for the CID : %d\n",
4205 beiscsi_conn->
4206 beiscsi_conn_cid);
4207 goto free_hndls;
4208 }
4209
4210 beiscsi_conn->login_in_progress = 1;
4211 beiscsi_conn->plogin_sgl_handle =
4212 io_task->psgl_handle;
4213 io_task->pwrb_handle =
4214 alloc_wrb_handle(phba,
4215 beiscsi_conn->beiscsi_conn_cid -
4216 phba->fw_config.iscsi_cid_start);
4217 if (!io_task->pwrb_handle) {
4218 beiscsi_log(phba, KERN_ERR,
4219 BEISCSI_LOG_IO |
4220 BEISCSI_LOG_CONFIG,
4221 "BM_%d : Alloc of WRB_HANDLE Failed"
4222 "for the CID : %d\n",
4223 beiscsi_conn->
4224 beiscsi_conn_cid);
4225 goto free_mgmt_hndls;
4226 }
4227 beiscsi_conn->plogin_wrb_handle =
4228 io_task->pwrb_handle;
4229
4230 } else {
4231 io_task->psgl_handle =
4232 beiscsi_conn->plogin_sgl_handle;
4233 io_task->pwrb_handle =
4234 beiscsi_conn->plogin_wrb_handle;
4235 }
4236 beiscsi_conn->task = task;
4237 } else {
4238 spin_lock(&phba->mgmt_sgl_lock);
4239 io_task->psgl_handle = alloc_mgmt_sgl_handle(phba);
4240 spin_unlock(&phba->mgmt_sgl_lock);
4241 if (!io_task->psgl_handle) {
4242 beiscsi_log(phba, KERN_ERR,
4243 BEISCSI_LOG_IO |
4244 BEISCSI_LOG_CONFIG,
4245 "BM_%d : Alloc of MGMT_SGL_ICD Failed"
4246 "for the CID : %d\n",
4247 beiscsi_conn->
4248 beiscsi_conn_cid);
4249 goto free_hndls;
4250 }
4251 io_task->pwrb_handle =
4252 alloc_wrb_handle(phba,
4253 beiscsi_conn->beiscsi_conn_cid -
4254 phba->fw_config.iscsi_cid_start);
4255 if (!io_task->pwrb_handle) {
4256 beiscsi_log(phba, KERN_ERR,
4257 BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG,
4258 "BM_%d : Alloc of WRB_HANDLE Failed"
4259 "for the CID : %d\n",
4260 beiscsi_conn->beiscsi_conn_cid);
4261 goto free_mgmt_hndls;
4262 }
4263
4264 }
4265 }
4266 itt = (itt_t) cpu_to_be32(((unsigned int)io_task->pwrb_handle->
4267 wrb_index << 16) | (unsigned int)
4268 (io_task->psgl_handle->sgl_index));
4269 io_task->pwrb_handle->pio_handle = task;
4270
4271 io_task->cmd_bhs->iscsi_hdr.itt = itt;
4272 return 0;
4273
4274 free_io_hndls:
4275 spin_lock(&phba->io_sgl_lock);
4276 free_io_sgl_handle(phba, io_task->psgl_handle);
4277 spin_unlock(&phba->io_sgl_lock);
4278 goto free_hndls;
4279 free_mgmt_hndls:
4280 spin_lock(&phba->mgmt_sgl_lock);
4281 free_mgmt_sgl_handle(phba, io_task->psgl_handle);
4282 spin_unlock(&phba->mgmt_sgl_lock);
4283 free_hndls:
4284 phwi_ctrlr = phba->phwi_ctrlr;
4285 pwrb_context = &phwi_ctrlr->wrb_context[
4286 beiscsi_conn->beiscsi_conn_cid -
4287 phba->fw_config.iscsi_cid_start];
4288 if (io_task->pwrb_handle)
4289 free_wrb_handle(phba, pwrb_context, io_task->pwrb_handle);
4290 io_task->pwrb_handle = NULL;
4291 pci_pool_free(beiscsi_sess->bhs_pool, io_task->cmd_bhs,
4292 io_task->bhs_pa.u.a64.address);
4293 io_task->cmd_bhs = NULL;
4294 return -ENOMEM;
4295 }
4296 int beiscsi_iotask_v2(struct iscsi_task *task, struct scatterlist *sg,
4297 unsigned int num_sg, unsigned int xferlen,
4298 unsigned int writedir)
4299 {
4300
4301 struct beiscsi_io_task *io_task = task->dd_data;
4302 struct iscsi_conn *conn = task->conn;
4303 struct beiscsi_conn *beiscsi_conn = conn->dd_data;
4304 struct beiscsi_hba *phba = beiscsi_conn->phba;
4305 struct iscsi_wrb *pwrb = NULL;
4306 unsigned int doorbell = 0;
4307
4308 pwrb = io_task->pwrb_handle->pwrb;
4309 memset(pwrb, 0, sizeof(*pwrb));
4310
4311 io_task->cmd_bhs->iscsi_hdr.exp_statsn = 0;
4312 io_task->bhs_len = sizeof(struct be_cmd_bhs);
4313
4314 if (writedir) {
4315 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, type, pwrb,
4316 INI_WR_CMD);
4317 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, dsp, pwrb, 1);
4318 } else {
4319 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, type, pwrb,
4320 INI_RD_CMD);
4321 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, dsp, pwrb, 0);
4322 }
4323
4324 io_task->wrb_type = AMAP_GET_BITS(struct amap_iscsi_wrb_v2,
4325 type, pwrb);
4326
4327 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, lun, pwrb,
4328 cpu_to_be16(*(unsigned short *)
4329 &io_task->cmd_bhs->iscsi_hdr.lun));
4330 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, r2t_exp_dtl, pwrb, xferlen);
4331 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, wrb_idx, pwrb,
4332 io_task->pwrb_handle->wrb_index);
4333 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, cmdsn_itt, pwrb,
4334 be32_to_cpu(task->cmdsn));
4335 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, sgl_idx, pwrb,
4336 io_task->psgl_handle->sgl_index);
4337
4338 hwi_write_sgl_v2(pwrb, sg, num_sg, io_task);
4339 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, ptr2nextwrb, pwrb,
4340 io_task->pwrb_handle->nxt_wrb_index);
4341
4342 be_dws_le_to_cpu(pwrb, sizeof(struct iscsi_wrb));
4343
4344 doorbell |= beiscsi_conn->beiscsi_conn_cid & DB_WRB_POST_CID_MASK;
4345 doorbell |= (io_task->pwrb_handle->wrb_index &
4346 DB_DEF_PDU_WRB_INDEX_MASK) <<
4347 DB_DEF_PDU_WRB_INDEX_SHIFT;
4348 doorbell |= 1 << DB_DEF_PDU_NUM_POSTED_SHIFT;
4349 iowrite32(doorbell, phba->db_va + DB_TXULP0_OFFSET);
4350 return 0;
4351 }
4352
4353 static int beiscsi_iotask(struct iscsi_task *task, struct scatterlist *sg,
4354 unsigned int num_sg, unsigned int xferlen,
4355 unsigned int writedir)
4356 {
4357
4358 struct beiscsi_io_task *io_task = task->dd_data;
4359 struct iscsi_conn *conn = task->conn;
4360 struct beiscsi_conn *beiscsi_conn = conn->dd_data;
4361 struct beiscsi_hba *phba = beiscsi_conn->phba;
4362 struct iscsi_wrb *pwrb = NULL;
4363 unsigned int doorbell = 0;
4364
4365 pwrb = io_task->pwrb_handle->pwrb;
4366 io_task->cmd_bhs->iscsi_hdr.exp_statsn = 0;
4367 io_task->bhs_len = sizeof(struct be_cmd_bhs);
4368
4369 if (writedir) {
4370 AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb,
4371 INI_WR_CMD);
4372 AMAP_SET_BITS(struct amap_iscsi_wrb, dsp, pwrb, 1);
4373 } else {
4374 AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb,
4375 INI_RD_CMD);
4376 AMAP_SET_BITS(struct amap_iscsi_wrb, dsp, pwrb, 0);
4377 }
4378
4379 io_task->wrb_type = AMAP_GET_BITS(struct amap_iscsi_wrb,
4380 type, pwrb);
4381
4382 AMAP_SET_BITS(struct amap_iscsi_wrb, lun, pwrb,
4383 cpu_to_be16(*(unsigned short *)
4384 &io_task->cmd_bhs->iscsi_hdr.lun));
4385 AMAP_SET_BITS(struct amap_iscsi_wrb, r2t_exp_dtl, pwrb, xferlen);
4386 AMAP_SET_BITS(struct amap_iscsi_wrb, wrb_idx, pwrb,
4387 io_task->pwrb_handle->wrb_index);
4388 AMAP_SET_BITS(struct amap_iscsi_wrb, cmdsn_itt, pwrb,
4389 be32_to_cpu(task->cmdsn));
4390 AMAP_SET_BITS(struct amap_iscsi_wrb, sgl_icd_idx, pwrb,
4391 io_task->psgl_handle->sgl_index);
4392
4393 hwi_write_sgl(pwrb, sg, num_sg, io_task);
4394
4395 AMAP_SET_BITS(struct amap_iscsi_wrb, ptr2nextwrb, pwrb,
4396 io_task->pwrb_handle->nxt_wrb_index);
4397 be_dws_le_to_cpu(pwrb, sizeof(struct iscsi_wrb));
4398
4399 doorbell |= beiscsi_conn->beiscsi_conn_cid & DB_WRB_POST_CID_MASK;
4400 doorbell |= (io_task->pwrb_handle->wrb_index &
4401 DB_DEF_PDU_WRB_INDEX_MASK) << DB_DEF_PDU_WRB_INDEX_SHIFT;
4402 doorbell |= 1 << DB_DEF_PDU_NUM_POSTED_SHIFT;
4403
4404 iowrite32(doorbell, phba->db_va + DB_TXULP0_OFFSET);
4405 return 0;
4406 }
4407
4408 static int beiscsi_mtask(struct iscsi_task *task)
4409 {
4410 struct beiscsi_io_task *io_task = task->dd_data;
4411 struct iscsi_conn *conn = task->conn;
4412 struct beiscsi_conn *beiscsi_conn = conn->dd_data;
4413 struct beiscsi_hba *phba = beiscsi_conn->phba;
4414 struct iscsi_wrb *pwrb = NULL;
4415 unsigned int doorbell = 0;
4416 unsigned int cid;
4417 unsigned int pwrb_typeoffset = 0;
4418
4419 cid = beiscsi_conn->beiscsi_conn_cid;
4420 pwrb = io_task->pwrb_handle->pwrb;
4421 memset(pwrb, 0, sizeof(*pwrb));
4422
4423 if (chip_skh_r(phba->pcidev)) {
4424 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, cmdsn_itt, pwrb,
4425 be32_to_cpu(task->cmdsn));
4426 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, wrb_idx, pwrb,
4427 io_task->pwrb_handle->wrb_index);
4428 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, sgl_idx, pwrb,
4429 io_task->psgl_handle->sgl_index);
4430 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, r2t_exp_dtl, pwrb,
4431 task->data_count);
4432 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, ptr2nextwrb, pwrb,
4433 io_task->pwrb_handle->nxt_wrb_index);
4434 pwrb_typeoffset = SKH_WRB_TYPE_OFFSET;
4435 } else {
4436 AMAP_SET_BITS(struct amap_iscsi_wrb, cmdsn_itt, pwrb,
4437 be32_to_cpu(task->cmdsn));
4438 AMAP_SET_BITS(struct amap_iscsi_wrb, wrb_idx, pwrb,
4439 io_task->pwrb_handle->wrb_index);
4440 AMAP_SET_BITS(struct amap_iscsi_wrb, sgl_icd_idx, pwrb,
4441 io_task->psgl_handle->sgl_index);
4442 AMAP_SET_BITS(struct amap_iscsi_wrb, r2t_exp_dtl, pwrb,
4443 task->data_count);
4444 AMAP_SET_BITS(struct amap_iscsi_wrb, ptr2nextwrb, pwrb,
4445 io_task->pwrb_handle->nxt_wrb_index);
4446 pwrb_typeoffset = BE_WRB_TYPE_OFFSET;
4447 }
4448
4449
4450 switch (task->hdr->opcode & ISCSI_OPCODE_MASK) {
4451 case ISCSI_OP_LOGIN:
4452 AMAP_SET_BITS(struct amap_iscsi_wrb, cmdsn_itt, pwrb, 1);
4453 ADAPTER_SET_WRB_TYPE(pwrb, TGT_DM_CMD, pwrb_typeoffset);
4454 hwi_write_buffer(pwrb, task);
4455 break;
4456 case ISCSI_OP_NOOP_OUT:
4457 if (task->hdr->ttt != ISCSI_RESERVED_TAG) {
4458 ADAPTER_SET_WRB_TYPE(pwrb, TGT_DM_CMD, pwrb_typeoffset);
4459 if (chip_skh_r(phba->pcidev))
4460 AMAP_SET_BITS(struct amap_iscsi_wrb_v2,
4461 dmsg, pwrb, 1);
4462 else
4463 AMAP_SET_BITS(struct amap_iscsi_wrb,
4464 dmsg, pwrb, 1);
4465 } else {
4466 ADAPTER_SET_WRB_TYPE(pwrb, INI_RD_CMD, pwrb_typeoffset);
4467 if (chip_skh_r(phba->pcidev))
4468 AMAP_SET_BITS(struct amap_iscsi_wrb_v2,
4469 dmsg, pwrb, 0);
4470 else
4471 AMAP_SET_BITS(struct amap_iscsi_wrb,
4472 dmsg, pwrb, 0);
4473 }
4474 hwi_write_buffer(pwrb, task);
4475 break;
4476 case ISCSI_OP_TEXT:
4477 ADAPTER_SET_WRB_TYPE(pwrb, TGT_DM_CMD, pwrb_typeoffset);
4478 hwi_write_buffer(pwrb, task);
4479 break;
4480 case ISCSI_OP_SCSI_TMFUNC:
4481 ADAPTER_SET_WRB_TYPE(pwrb, INI_TMF_CMD, pwrb_typeoffset);
4482 hwi_write_buffer(pwrb, task);
4483 break;
4484 case ISCSI_OP_LOGOUT:
4485 ADAPTER_SET_WRB_TYPE(pwrb, HWH_TYPE_LOGOUT, pwrb_typeoffset);
4486 hwi_write_buffer(pwrb, task);
4487 break;
4488
4489 default:
4490 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
4491 "BM_%d : opcode =%d Not supported\n",
4492 task->hdr->opcode & ISCSI_OPCODE_MASK);
4493
4494 return -EINVAL;
4495 }
4496
4497 /* Set the task type */
4498 io_task->wrb_type = (chip_skh_r(phba->pcidev)) ?
4499 AMAP_GET_BITS(struct amap_iscsi_wrb_v2, type, pwrb) :
4500 AMAP_GET_BITS(struct amap_iscsi_wrb, type, pwrb);
4501
4502 doorbell |= cid & DB_WRB_POST_CID_MASK;
4503 doorbell |= (io_task->pwrb_handle->wrb_index &
4504 DB_DEF_PDU_WRB_INDEX_MASK) << DB_DEF_PDU_WRB_INDEX_SHIFT;
4505 doorbell |= 1 << DB_DEF_PDU_NUM_POSTED_SHIFT;
4506 iowrite32(doorbell, phba->db_va + DB_TXULP0_OFFSET);
4507 return 0;
4508 }
4509
4510 static int beiscsi_task_xmit(struct iscsi_task *task)
4511 {
4512 struct beiscsi_io_task *io_task = task->dd_data;
4513 struct scsi_cmnd *sc = task->sc;
4514 struct beiscsi_hba *phba = NULL;
4515 struct scatterlist *sg;
4516 int num_sg;
4517 unsigned int writedir = 0, xferlen = 0;
4518
4519 phba = ((struct beiscsi_conn *)task->conn->dd_data)->phba;
4520
4521 if (!sc)
4522 return beiscsi_mtask(task);
4523
4524 io_task->scsi_cmnd = sc;
4525 num_sg = scsi_dma_map(sc);
4526 if (num_sg < 0) {
4527 struct iscsi_conn *conn = task->conn;
4528 struct beiscsi_hba *phba = NULL;
4529
4530 phba = ((struct beiscsi_conn *)conn->dd_data)->phba;
4531 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_IO,
4532 "BM_%d : scsi_dma_map Failed\n");
4533
4534 return num_sg;
4535 }
4536 xferlen = scsi_bufflen(sc);
4537 sg = scsi_sglist(sc);
4538 if (sc->sc_data_direction == DMA_TO_DEVICE)
4539 writedir = 1;
4540 else
4541 writedir = 0;
4542
4543 return phba->iotask_fn(task, sg, num_sg, xferlen, writedir);
4544 }
4545
4546 /**
4547 * beiscsi_bsg_request - handle bsg request from ISCSI transport
4548 * @job: job to handle
4549 */
4550 static int beiscsi_bsg_request(struct bsg_job *job)
4551 {
4552 struct Scsi_Host *shost;
4553 struct beiscsi_hba *phba;
4554 struct iscsi_bsg_request *bsg_req = job->request;
4555 int rc = -EINVAL;
4556 unsigned int tag;
4557 struct be_dma_mem nonemb_cmd;
4558 struct be_cmd_resp_hdr *resp;
4559 struct iscsi_bsg_reply *bsg_reply = job->reply;
4560 unsigned short status, extd_status;
4561
4562 shost = iscsi_job_to_shost(job);
4563 phba = iscsi_host_priv(shost);
4564
4565 switch (bsg_req->msgcode) {
4566 case ISCSI_BSG_HST_VENDOR:
4567 nonemb_cmd.va = pci_alloc_consistent(phba->ctrl.pdev,
4568 job->request_payload.payload_len,
4569 &nonemb_cmd.dma);
4570 if (nonemb_cmd.va == NULL) {
4571 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
4572 "BM_%d : Failed to allocate memory for "
4573 "beiscsi_bsg_request\n");
4574 return -ENOMEM;
4575 }
4576 tag = mgmt_vendor_specific_fw_cmd(&phba->ctrl, phba, job,
4577 &nonemb_cmd);
4578 if (!tag) {
4579 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
4580 "BM_%d : MBX Tag Allocation Failed\n");
4581
4582 pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size,
4583 nonemb_cmd.va, nonemb_cmd.dma);
4584 return -EAGAIN;
4585 } else
4586 wait_event_interruptible(phba->ctrl.mcc_wait[tag],
4587 phba->ctrl.mcc_numtag[tag]);
4588 extd_status = (phba->ctrl.mcc_numtag[tag] & 0x0000FF00) >> 8;
4589 status = phba->ctrl.mcc_numtag[tag] & 0x000000FF;
4590 free_mcc_tag(&phba->ctrl, tag);
4591 resp = (struct be_cmd_resp_hdr *)nonemb_cmd.va;
4592 sg_copy_from_buffer(job->reply_payload.sg_list,
4593 job->reply_payload.sg_cnt,
4594 nonemb_cmd.va, (resp->response_length
4595 + sizeof(*resp)));
4596 bsg_reply->reply_payload_rcv_len = resp->response_length;
4597 bsg_reply->result = status;
4598 bsg_job_done(job, bsg_reply->result,
4599 bsg_reply->reply_payload_rcv_len);
4600 pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size,
4601 nonemb_cmd.va, nonemb_cmd.dma);
4602 if (status || extd_status) {
4603 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
4604 "BM_%d : MBX Cmd Failed"
4605 " status = %d extd_status = %d\n",
4606 status, extd_status);
4607
4608 return -EIO;
4609 } else {
4610 rc = 0;
4611 }
4612 break;
4613
4614 default:
4615 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
4616 "BM_%d : Unsupported bsg command: 0x%x\n",
4617 bsg_req->msgcode);
4618 break;
4619 }
4620
4621 return rc;
4622 }
4623
4624 void beiscsi_hba_attrs_init(struct beiscsi_hba *phba)
4625 {
4626 /* Set the logging parameter */
4627 beiscsi_log_enable_init(phba, beiscsi_log_enable);
4628 }
4629
4630 /*
4631 * beiscsi_quiesce()- Cleanup Driver resources
4632 * @phba: Instance Priv structure
4633 *
4634 * Free the OS and HW resources held by the driver
4635 **/
4636 static void beiscsi_quiesce(struct beiscsi_hba *phba)
4637 {
4638 struct hwi_controller *phwi_ctrlr;
4639 struct hwi_context_memory *phwi_context;
4640 struct be_eq_obj *pbe_eq;
4641 unsigned int i, msix_vec;
4642
4643 phwi_ctrlr = phba->phwi_ctrlr;
4644 phwi_context = phwi_ctrlr->phwi_ctxt;
4645 hwi_disable_intr(phba);
4646 if (phba->msix_enabled) {
4647 for (i = 0; i <= phba->num_cpus; i++) {
4648 msix_vec = phba->msix_entries[i].vector;
4649 free_irq(msix_vec, &phwi_context->be_eq[i]);
4650 kfree(phba->msi_name[i]);
4651 }
4652 } else
4653 if (phba->pcidev->irq)
4654 free_irq(phba->pcidev->irq, phba);
4655 pci_disable_msix(phba->pcidev);
4656 destroy_workqueue(phba->wq);
4657 if (blk_iopoll_enabled)
4658 for (i = 0; i < phba->num_cpus; i++) {
4659 pbe_eq = &phwi_context->be_eq[i];
4660 blk_iopoll_disable(&pbe_eq->iopoll);
4661 }
4662
4663 beiscsi_clean_port(phba);
4664 beiscsi_free_mem(phba);
4665
4666 beiscsi_unmap_pci_function(phba);
4667 pci_free_consistent(phba->pcidev,
4668 phba->ctrl.mbox_mem_alloced.size,
4669 phba->ctrl.mbox_mem_alloced.va,
4670 phba->ctrl.mbox_mem_alloced.dma);
4671 }
4672
4673 static void beiscsi_remove(struct pci_dev *pcidev)
4674 {
4675
4676 struct beiscsi_hba *phba = NULL;
4677
4678 phba = pci_get_drvdata(pcidev);
4679 if (!phba) {
4680 dev_err(&pcidev->dev, "beiscsi_remove called with no phba\n");
4681 return;
4682 }
4683
4684 beiscsi_destroy_def_ifaces(phba);
4685 beiscsi_quiesce(phba);
4686 iscsi_boot_destroy_kset(phba->boot_kset);
4687 iscsi_host_remove(phba->shost);
4688 pci_dev_put(phba->pcidev);
4689 iscsi_host_free(phba->shost);
4690 pci_disable_device(pcidev);
4691 }
4692
4693 static void beiscsi_shutdown(struct pci_dev *pcidev)
4694 {
4695
4696 struct beiscsi_hba *phba = NULL;
4697
4698 phba = (struct beiscsi_hba *)pci_get_drvdata(pcidev);
4699 if (!phba) {
4700 dev_err(&pcidev->dev, "beiscsi_shutdown called with no phba\n");
4701 return;
4702 }
4703
4704 beiscsi_quiesce(phba);
4705 pci_disable_device(pcidev);
4706 }
4707
4708 static void beiscsi_msix_enable(struct beiscsi_hba *phba)
4709 {
4710 int i, status;
4711
4712 for (i = 0; i <= phba->num_cpus; i++)
4713 phba->msix_entries[i].entry = i;
4714
4715 status = pci_enable_msix(phba->pcidev, phba->msix_entries,
4716 (phba->num_cpus + 1));
4717 if (!status)
4718 phba->msix_enabled = true;
4719
4720 return;
4721 }
4722
4723 static int __devinit beiscsi_dev_probe(struct pci_dev *pcidev,
4724 const struct pci_device_id *id)
4725 {
4726 struct beiscsi_hba *phba = NULL;
4727 struct hwi_controller *phwi_ctrlr;
4728 struct hwi_context_memory *phwi_context;
4729 struct be_eq_obj *pbe_eq;
4730 int ret, i;
4731
4732 ret = beiscsi_enable_pci(pcidev);
4733 if (ret < 0) {
4734 dev_err(&pcidev->dev,
4735 "beiscsi_dev_probe - Failed to enable pci device\n");
4736 return ret;
4737 }
4738
4739 phba = beiscsi_hba_alloc(pcidev);
4740 if (!phba) {
4741 dev_err(&pcidev->dev,
4742 "beiscsi_dev_probe - Failed in beiscsi_hba_alloc\n");
4743 goto disable_pci;
4744 }
4745
4746 /* Initialize Driver configuration Paramters */
4747 beiscsi_hba_attrs_init(phba);
4748
4749 switch (pcidev->device) {
4750 case BE_DEVICE_ID1:
4751 case OC_DEVICE_ID1:
4752 case OC_DEVICE_ID2:
4753 phba->generation = BE_GEN2;
4754 phba->iotask_fn = beiscsi_iotask;
4755 break;
4756 case BE_DEVICE_ID2:
4757 case OC_DEVICE_ID3:
4758 phba->generation = BE_GEN3;
4759 phba->iotask_fn = beiscsi_iotask;
4760 break;
4761 case OC_SKH_ID1:
4762 phba->generation = BE_GEN4;
4763 phba->iotask_fn = beiscsi_iotask_v2;
4764 default:
4765 phba->generation = 0;
4766 }
4767
4768 if (enable_msix)
4769 find_num_cpus(phba);
4770 else
4771 phba->num_cpus = 1;
4772
4773 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
4774 "BM_%d : num_cpus = %d\n",
4775 phba->num_cpus);
4776
4777 if (enable_msix) {
4778 beiscsi_msix_enable(phba);
4779 if (!phba->msix_enabled)
4780 phba->num_cpus = 1;
4781 }
4782 ret = be_ctrl_init(phba, pcidev);
4783 if (ret) {
4784 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
4785 "BM_%d : beiscsi_dev_probe-"
4786 "Failed in be_ctrl_init\n");
4787 goto hba_free;
4788 }
4789
4790 ret = beiscsi_cmd_reset_function(phba);
4791 if (ret) {
4792 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
4793 "BM_%d : Reset Failed. Aborting Crashdump\n");
4794 goto hba_free;
4795 }
4796 ret = be_chk_reset_complete(phba);
4797 if (ret) {
4798 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
4799 "BM_%d : Failed to get out of reset."
4800 "Aborting Crashdump\n");
4801 goto hba_free;
4802 }
4803
4804 spin_lock_init(&phba->io_sgl_lock);
4805 spin_lock_init(&phba->mgmt_sgl_lock);
4806 spin_lock_init(&phba->isr_lock);
4807 ret = mgmt_get_fw_config(&phba->ctrl, phba);
4808 if (ret != 0) {
4809 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
4810 "BM_%d : Error getting fw config\n");
4811 goto free_port;
4812 }
4813 phba->shost->max_id = phba->fw_config.iscsi_cid_count;
4814 beiscsi_get_params(phba);
4815 phba->shost->can_queue = phba->params.ios_per_ctrl;
4816 ret = beiscsi_init_port(phba);
4817 if (ret < 0) {
4818 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
4819 "BM_%d : beiscsi_dev_probe-"
4820 "Failed in beiscsi_init_port\n");
4821 goto free_port;
4822 }
4823
4824 for (i = 0; i < MAX_MCC_CMD ; i++) {
4825 init_waitqueue_head(&phba->ctrl.mcc_wait[i + 1]);
4826 phba->ctrl.mcc_tag[i] = i + 1;
4827 phba->ctrl.mcc_numtag[i + 1] = 0;
4828 phba->ctrl.mcc_tag_available++;
4829 }
4830
4831 phba->ctrl.mcc_alloc_index = phba->ctrl.mcc_free_index = 0;
4832
4833 snprintf(phba->wq_name, sizeof(phba->wq_name), "beiscsi_%02x_wq",
4834 phba->shost->host_no);
4835 phba->wq = alloc_workqueue(phba->wq_name, WQ_MEM_RECLAIM, 1);
4836 if (!phba->wq) {
4837 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
4838 "BM_%d : beiscsi_dev_probe-"
4839 "Failed to allocate work queue\n");
4840 goto free_twq;
4841 }
4842
4843
4844 phwi_ctrlr = phba->phwi_ctrlr;
4845 phwi_context = phwi_ctrlr->phwi_ctxt;
4846
4847 if (blk_iopoll_enabled) {
4848 for (i = 0; i < phba->num_cpus; i++) {
4849 pbe_eq = &phwi_context->be_eq[i];
4850 blk_iopoll_init(&pbe_eq->iopoll, be_iopoll_budget,
4851 be_iopoll);
4852 blk_iopoll_enable(&pbe_eq->iopoll);
4853 }
4854
4855 i = (phba->msix_enabled) ? i : 0;
4856 /* Work item for MCC handling */
4857 pbe_eq = &phwi_context->be_eq[i];
4858 INIT_WORK(&pbe_eq->work_cqs, beiscsi_process_all_cqs);
4859 } else {
4860 if (phba->msix_enabled) {
4861 for (i = 0; i <= phba->num_cpus; i++) {
4862 pbe_eq = &phwi_context->be_eq[i];
4863 INIT_WORK(&pbe_eq->work_cqs,
4864 beiscsi_process_all_cqs);
4865 }
4866 } else {
4867 pbe_eq = &phwi_context->be_eq[0];
4868 INIT_WORK(&pbe_eq->work_cqs,
4869 beiscsi_process_all_cqs);
4870 }
4871 }
4872
4873 ret = beiscsi_init_irqs(phba);
4874 if (ret < 0) {
4875 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
4876 "BM_%d : beiscsi_dev_probe-"
4877 "Failed to beiscsi_init_irqs\n");
4878 goto free_blkenbld;
4879 }
4880 hwi_enable_intr(phba);
4881
4882 if (beiscsi_setup_boot_info(phba))
4883 /*
4884 * log error but continue, because we may not be using
4885 * iscsi boot.
4886 */
4887 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
4888 "BM_%d : Could not set up "
4889 "iSCSI boot info.\n");
4890
4891 beiscsi_create_def_ifaces(phba);
4892 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
4893 "\n\n\n BM_%d : SUCCESS - DRIVER LOADED\n\n\n");
4894 return 0;
4895
4896 free_blkenbld:
4897 destroy_workqueue(phba->wq);
4898 if (blk_iopoll_enabled)
4899 for (i = 0; i < phba->num_cpus; i++) {
4900 pbe_eq = &phwi_context->be_eq[i];
4901 blk_iopoll_disable(&pbe_eq->iopoll);
4902 }
4903 free_twq:
4904 beiscsi_clean_port(phba);
4905 beiscsi_free_mem(phba);
4906 free_port:
4907 pci_free_consistent(phba->pcidev,
4908 phba->ctrl.mbox_mem_alloced.size,
4909 phba->ctrl.mbox_mem_alloced.va,
4910 phba->ctrl.mbox_mem_alloced.dma);
4911 beiscsi_unmap_pci_function(phba);
4912 hba_free:
4913 if (phba->msix_enabled)
4914 pci_disable_msix(phba->pcidev);
4915 iscsi_host_remove(phba->shost);
4916 pci_dev_put(phba->pcidev);
4917 iscsi_host_free(phba->shost);
4918 disable_pci:
4919 pci_disable_device(pcidev);
4920 return ret;
4921 }
4922
4923 struct iscsi_transport beiscsi_iscsi_transport = {
4924 .owner = THIS_MODULE,
4925 .name = DRV_NAME,
4926 .caps = CAP_RECOVERY_L0 | CAP_HDRDGST | CAP_TEXT_NEGO |
4927 CAP_MULTI_R2T | CAP_DATADGST | CAP_DATA_PATH_OFFLOAD,
4928 .create_session = beiscsi_session_create,
4929 .destroy_session = beiscsi_session_destroy,
4930 .create_conn = beiscsi_conn_create,
4931 .bind_conn = beiscsi_conn_bind,
4932 .destroy_conn = iscsi_conn_teardown,
4933 .attr_is_visible = be2iscsi_attr_is_visible,
4934 .set_iface_param = be2iscsi_iface_set_param,
4935 .get_iface_param = be2iscsi_iface_get_param,
4936 .set_param = beiscsi_set_param,
4937 .get_conn_param = iscsi_conn_get_param,
4938 .get_session_param = iscsi_session_get_param,
4939 .get_host_param = beiscsi_get_host_param,
4940 .start_conn = beiscsi_conn_start,
4941 .stop_conn = iscsi_conn_stop,
4942 .send_pdu = iscsi_conn_send_pdu,
4943 .xmit_task = beiscsi_task_xmit,
4944 .cleanup_task = beiscsi_cleanup_task,
4945 .alloc_pdu = beiscsi_alloc_pdu,
4946 .parse_pdu_itt = beiscsi_parse_pdu,
4947 .get_stats = beiscsi_conn_get_stats,
4948 .get_ep_param = beiscsi_ep_get_param,
4949 .ep_connect = beiscsi_ep_connect,
4950 .ep_poll = beiscsi_ep_poll,
4951 .ep_disconnect = beiscsi_ep_disconnect,
4952 .session_recovery_timedout = iscsi_session_recovery_timedout,
4953 .bsg_request = beiscsi_bsg_request,
4954 };
4955
4956 static struct pci_driver beiscsi_pci_driver = {
4957 .name = DRV_NAME,
4958 .probe = beiscsi_dev_probe,
4959 .remove = beiscsi_remove,
4960 .shutdown = beiscsi_shutdown,
4961 .id_table = beiscsi_pci_id_table
4962 };
4963
4964
4965 static int __init beiscsi_module_init(void)
4966 {
4967 int ret;
4968
4969 beiscsi_scsi_transport =
4970 iscsi_register_transport(&beiscsi_iscsi_transport);
4971 if (!beiscsi_scsi_transport) {
4972 printk(KERN_ERR
4973 "beiscsi_module_init - Unable to register beiscsi transport.\n");
4974 return -ENOMEM;
4975 }
4976 printk(KERN_INFO "In beiscsi_module_init, tt=%p\n",
4977 &beiscsi_iscsi_transport);
4978
4979 ret = pci_register_driver(&beiscsi_pci_driver);
4980 if (ret) {
4981 printk(KERN_ERR
4982 "beiscsi_module_init - Unable to register beiscsi pci driver.\n");
4983 goto unregister_iscsi_transport;
4984 }
4985 return 0;
4986
4987 unregister_iscsi_transport:
4988 iscsi_unregister_transport(&beiscsi_iscsi_transport);
4989 return ret;
4990 }
4991
4992 static void __exit beiscsi_module_exit(void)
4993 {
4994 pci_unregister_driver(&beiscsi_pci_driver);
4995 iscsi_unregister_transport(&beiscsi_iscsi_transport);
4996 }
4997
4998 module_init(beiscsi_module_init);
4999 module_exit(beiscsi_module_exit);