]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - drivers/scsi/be2iscsi/be_main.c
[SCSI] be2iscsi: Adding bsg interface for be2iscsi
[mirror_ubuntu-bionic-kernel.git] / drivers / scsi / be2iscsi / be_main.c
CommitLineData
6733b39a 1/**
255fa9a3 2 * Copyright (C) 2005 - 2011 Emulex
6733b39a
JK
3 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
9 *
255fa9a3 10 * Written by: Jayamohan Kallickal (jayamohan.kallickal@emulex.com)
6733b39a
JK
11 *
12 * Contact Information:
255fa9a3 13 * linux-drivers@emulex.com
6733b39a 14 *
255fa9a3
JK
15 * Emulex
16 * 3333 Susan Street
17 * Costa Mesa, CA 92626
6733b39a 18 */
255fa9a3 19
6733b39a
JK
20#include <linux/reboot.h>
21#include <linux/delay.h>
5a0e3ad6 22#include <linux/slab.h>
6733b39a
JK
23#include <linux/interrupt.h>
24#include <linux/blkdev.h>
25#include <linux/pci.h>
26#include <linux/string.h>
27#include <linux/kernel.h>
28#include <linux/semaphore.h>
c7acc5b8 29#include <linux/iscsi_boot_sysfs.h>
acf3368f 30#include <linux/module.h>
ffce3e2e 31#include <linux/bsg-lib.h>
6733b39a
JK
32
33#include <scsi/libiscsi.h>
ffce3e2e
JK
34#include <scsi/scsi_bsg_iscsi.h>
35#include <scsi/scsi_netlink.h>
6733b39a
JK
36#include <scsi/scsi_transport_iscsi.h>
37#include <scsi/scsi_transport.h>
38#include <scsi/scsi_cmnd.h>
39#include <scsi/scsi_device.h>
40#include <scsi/scsi_host.h>
41#include <scsi/scsi.h>
42#include "be_main.h"
43#include "be_iscsi.h"
44#include "be_mgmt.h"
45
46static unsigned int be_iopoll_budget = 10;
47static unsigned int be_max_phys_size = 64;
bfead3b2 48static unsigned int enable_msix = 1;
e9b91193
JK
49static unsigned int gcrashmode = 0;
50static unsigned int num_hba = 0;
6733b39a
JK
51
52MODULE_DEVICE_TABLE(pci, beiscsi_pci_id_table);
53MODULE_DESCRIPTION(DRV_DESC " " BUILD_STR);
76d15dbd 54MODULE_VERSION(BUILD_STR);
2f635883 55MODULE_AUTHOR("Emulex Corporation");
6733b39a
JK
56MODULE_LICENSE("GPL");
57module_param(be_iopoll_budget, int, 0);
58module_param(enable_msix, int, 0);
59module_param(be_max_phys_size, uint, S_IRUGO);
60MODULE_PARM_DESC(be_max_phys_size, "Maximum Size (In Kilobytes) of physically"
61 "contiguous memory that can be allocated."
62 "Range is 16 - 128");
63
64static int beiscsi_slave_configure(struct scsi_device *sdev)
65{
66 blk_queue_max_segment_size(sdev->request_queue, 65536);
67 return 0;
68}
69
4183122d
JK
70static int beiscsi_eh_abort(struct scsi_cmnd *sc)
71{
72 struct iscsi_cls_session *cls_session;
73 struct iscsi_task *aborted_task = (struct iscsi_task *)sc->SCp.ptr;
74 struct beiscsi_io_task *aborted_io_task;
75 struct iscsi_conn *conn;
76 struct beiscsi_conn *beiscsi_conn;
77 struct beiscsi_hba *phba;
78 struct iscsi_session *session;
79 struct invalidate_command_table *inv_tbl;
3cbb7a74 80 struct be_dma_mem nonemb_cmd;
4183122d
JK
81 unsigned int cid, tag, num_invalidate;
82
83 cls_session = starget_to_session(scsi_target(sc->device));
84 session = cls_session->dd_data;
85
86 spin_lock_bh(&session->lock);
87 if (!aborted_task || !aborted_task->sc) {
88 /* we raced */
89 spin_unlock_bh(&session->lock);
90 return SUCCESS;
91 }
92
93 aborted_io_task = aborted_task->dd_data;
94 if (!aborted_io_task->scsi_cmnd) {
95 /* raced or invalid command */
96 spin_unlock_bh(&session->lock);
97 return SUCCESS;
98 }
99 spin_unlock_bh(&session->lock);
100 conn = aborted_task->conn;
101 beiscsi_conn = conn->dd_data;
102 phba = beiscsi_conn->phba;
103
104 /* invalidate iocb */
105 cid = beiscsi_conn->beiscsi_conn_cid;
106 inv_tbl = phba->inv_tbl;
107 memset(inv_tbl, 0x0, sizeof(*inv_tbl));
108 inv_tbl->cid = cid;
109 inv_tbl->icd = aborted_io_task->psgl_handle->sgl_index;
110 num_invalidate = 1;
3cbb7a74
JK
111 nonemb_cmd.va = pci_alloc_consistent(phba->ctrl.pdev,
112 sizeof(struct invalidate_commands_params_in),
113 &nonemb_cmd.dma);
114 if (nonemb_cmd.va == NULL) {
115 SE_DEBUG(DBG_LVL_1,
116 "Failed to allocate memory for"
117 "mgmt_invalidate_icds\n");
118 return FAILED;
119 }
120 nonemb_cmd.size = sizeof(struct invalidate_commands_params_in);
121
122 tag = mgmt_invalidate_icds(phba, inv_tbl, num_invalidate,
123 cid, &nonemb_cmd);
4183122d
JK
124 if (!tag) {
125 shost_printk(KERN_WARNING, phba->shost,
126 "mgmt_invalidate_icds could not be"
127 " submitted\n");
3cbb7a74
JK
128 pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size,
129 nonemb_cmd.va, nonemb_cmd.dma);
130
4183122d
JK
131 return FAILED;
132 } else {
133 wait_event_interruptible(phba->ctrl.mcc_wait[tag],
134 phba->ctrl.mcc_numtag[tag]);
135 free_mcc_tag(&phba->ctrl, tag);
136 }
3cbb7a74
JK
137 pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size,
138 nonemb_cmd.va, nonemb_cmd.dma);
4183122d
JK
139 return iscsi_eh_abort(sc);
140}
141
142static int beiscsi_eh_device_reset(struct scsi_cmnd *sc)
143{
144 struct iscsi_task *abrt_task;
145 struct beiscsi_io_task *abrt_io_task;
146 struct iscsi_conn *conn;
147 struct beiscsi_conn *beiscsi_conn;
148 struct beiscsi_hba *phba;
149 struct iscsi_session *session;
150 struct iscsi_cls_session *cls_session;
151 struct invalidate_command_table *inv_tbl;
3cbb7a74 152 struct be_dma_mem nonemb_cmd;
4183122d 153 unsigned int cid, tag, i, num_invalidate;
4183122d
JK
154
155 /* invalidate iocbs */
156 cls_session = starget_to_session(scsi_target(sc->device));
157 session = cls_session->dd_data;
158 spin_lock_bh(&session->lock);
db7f7709
JK
159 if (!session->leadconn || session->state != ISCSI_STATE_LOGGED_IN) {
160 spin_unlock_bh(&session->lock);
161 return FAILED;
162 }
4183122d
JK
163 conn = session->leadconn;
164 beiscsi_conn = conn->dd_data;
165 phba = beiscsi_conn->phba;
166 cid = beiscsi_conn->beiscsi_conn_cid;
167 inv_tbl = phba->inv_tbl;
168 memset(inv_tbl, 0x0, sizeof(*inv_tbl) * BE2_CMDS_PER_CXN);
169 num_invalidate = 0;
170 for (i = 0; i < conn->session->cmds_max; i++) {
171 abrt_task = conn->session->cmds[i];
172 abrt_io_task = abrt_task->dd_data;
173 if (!abrt_task->sc || abrt_task->state == ISCSI_TASK_FREE)
174 continue;
175
176 if (abrt_task->sc->device->lun != abrt_task->sc->device->lun)
177 continue;
178
179 inv_tbl->cid = cid;
180 inv_tbl->icd = abrt_io_task->psgl_handle->sgl_index;
181 num_invalidate++;
182 inv_tbl++;
183 }
184 spin_unlock_bh(&session->lock);
185 inv_tbl = phba->inv_tbl;
186
3cbb7a74
JK
187 nonemb_cmd.va = pci_alloc_consistent(phba->ctrl.pdev,
188 sizeof(struct invalidate_commands_params_in),
189 &nonemb_cmd.dma);
190 if (nonemb_cmd.va == NULL) {
191 SE_DEBUG(DBG_LVL_1,
192 "Failed to allocate memory for"
193 "mgmt_invalidate_icds\n");
194 return FAILED;
195 }
196 nonemb_cmd.size = sizeof(struct invalidate_commands_params_in);
197 memset(nonemb_cmd.va, 0, nonemb_cmd.size);
198 tag = mgmt_invalidate_icds(phba, inv_tbl, num_invalidate,
199 cid, &nonemb_cmd);
4183122d
JK
200 if (!tag) {
201 shost_printk(KERN_WARNING, phba->shost,
202 "mgmt_invalidate_icds could not be"
203 " submitted\n");
3cbb7a74
JK
204 pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size,
205 nonemb_cmd.va, nonemb_cmd.dma);
4183122d
JK
206 return FAILED;
207 } else {
208 wait_event_interruptible(phba->ctrl.mcc_wait[tag],
209 phba->ctrl.mcc_numtag[tag]);
210 free_mcc_tag(&phba->ctrl, tag);
211 }
3cbb7a74
JK
212 pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size,
213 nonemb_cmd.va, nonemb_cmd.dma);
4183122d 214 return iscsi_eh_device_reset(sc);
4183122d
JK
215}
216
c7acc5b8
JK
217static ssize_t beiscsi_show_boot_tgt_info(void *data, int type, char *buf)
218{
219 struct beiscsi_hba *phba = data;
f457a46f
MC
220 struct mgmt_session_info *boot_sess = &phba->boot_sess;
221 struct mgmt_conn_info *boot_conn = &boot_sess->conn_list[0];
c7acc5b8
JK
222 char *str = buf;
223 int rc;
224
225 switch (type) {
226 case ISCSI_BOOT_TGT_NAME:
227 rc = sprintf(buf, "%.*s\n",
f457a46f
MC
228 (int)strlen(boot_sess->target_name),
229 (char *)&boot_sess->target_name);
c7acc5b8
JK
230 break;
231 case ISCSI_BOOT_TGT_IP_ADDR:
f457a46f 232 if (boot_conn->dest_ipaddr.ip_type == 0x1)
c7acc5b8 233 rc = sprintf(buf, "%pI4\n",
f457a46f 234 (char *)&boot_conn->dest_ipaddr.ip_address);
c7acc5b8
JK
235 else
236 rc = sprintf(str, "%pI6\n",
f457a46f 237 (char *)&boot_conn->dest_ipaddr.ip_address);
c7acc5b8
JK
238 break;
239 case ISCSI_BOOT_TGT_PORT:
f457a46f 240 rc = sprintf(str, "%d\n", boot_conn->dest_port);
c7acc5b8
JK
241 break;
242
243 case ISCSI_BOOT_TGT_CHAP_NAME:
244 rc = sprintf(str, "%.*s\n",
f457a46f
MC
245 boot_conn->negotiated_login_options.auth_data.chap.
246 target_chap_name_length,
247 (char *)&boot_conn->negotiated_login_options.
248 auth_data.chap.target_chap_name);
c7acc5b8
JK
249 break;
250 case ISCSI_BOOT_TGT_CHAP_SECRET:
251 rc = sprintf(str, "%.*s\n",
f457a46f
MC
252 boot_conn->negotiated_login_options.auth_data.chap.
253 target_secret_length,
254 (char *)&boot_conn->negotiated_login_options.
255 auth_data.chap.target_secret);
c7acc5b8
JK
256 break;
257 case ISCSI_BOOT_TGT_REV_CHAP_NAME:
258 rc = sprintf(str, "%.*s\n",
f457a46f
MC
259 boot_conn->negotiated_login_options.auth_data.chap.
260 intr_chap_name_length,
261 (char *)&boot_conn->negotiated_login_options.
262 auth_data.chap.intr_chap_name);
c7acc5b8
JK
263 break;
264 case ISCSI_BOOT_TGT_REV_CHAP_SECRET:
f457a46f
MC
265 rc = sprintf(str, "%.*s\n",
266 boot_conn->negotiated_login_options.auth_data.chap.
267 intr_secret_length,
268 (char *)&boot_conn->negotiated_login_options.
269 auth_data.chap.intr_secret);
c7acc5b8
JK
270 break;
271 case ISCSI_BOOT_TGT_FLAGS:
f457a46f 272 rc = sprintf(str, "2\n");
c7acc5b8
JK
273 break;
274 case ISCSI_BOOT_TGT_NIC_ASSOC:
f457a46f 275 rc = sprintf(str, "0\n");
c7acc5b8
JK
276 break;
277 default:
278 rc = -ENOSYS;
279 break;
280 }
281 return rc;
282}
283
284static ssize_t beiscsi_show_boot_ini_info(void *data, int type, char *buf)
285{
286 struct beiscsi_hba *phba = data;
287 char *str = buf;
288 int rc;
289
290 switch (type) {
291 case ISCSI_BOOT_INI_INITIATOR_NAME:
292 rc = sprintf(str, "%s\n", phba->boot_sess.initiator_iscsiname);
293 break;
294 default:
295 rc = -ENOSYS;
296 break;
297 }
298 return rc;
299}
300
301static ssize_t beiscsi_show_boot_eth_info(void *data, int type, char *buf)
302{
303 struct beiscsi_hba *phba = data;
304 char *str = buf;
305 int rc;
306
307 switch (type) {
308 case ISCSI_BOOT_ETH_FLAGS:
f457a46f 309 rc = sprintf(str, "2\n");
c7acc5b8
JK
310 break;
311 case ISCSI_BOOT_ETH_INDEX:
f457a46f 312 rc = sprintf(str, "0\n");
c7acc5b8
JK
313 break;
314 case ISCSI_BOOT_ETH_MAC:
315 rc = beiscsi_get_macaddr(buf, phba);
316 if (rc < 0) {
317 SE_DEBUG(DBG_LVL_1, "beiscsi_get_macaddr Failed\n");
318 return rc;
319 }
320 break;
321 default:
322 rc = -ENOSYS;
323 break;
324 }
325 return rc;
326}
327
328
587a1f16 329static umode_t beiscsi_tgt_get_attr_visibility(void *data, int type)
c7acc5b8 330{
587a1f16 331 umode_t rc;
c7acc5b8
JK
332
333 switch (type) {
334 case ISCSI_BOOT_TGT_NAME:
335 case ISCSI_BOOT_TGT_IP_ADDR:
336 case ISCSI_BOOT_TGT_PORT:
337 case ISCSI_BOOT_TGT_CHAP_NAME:
338 case ISCSI_BOOT_TGT_CHAP_SECRET:
339 case ISCSI_BOOT_TGT_REV_CHAP_NAME:
340 case ISCSI_BOOT_TGT_REV_CHAP_SECRET:
341 case ISCSI_BOOT_TGT_NIC_ASSOC:
342 case ISCSI_BOOT_TGT_FLAGS:
343 rc = S_IRUGO;
344 break;
345 default:
346 rc = 0;
347 break;
348 }
349 return rc;
350}
351
587a1f16 352static umode_t beiscsi_ini_get_attr_visibility(void *data, int type)
c7acc5b8 353{
587a1f16 354 umode_t rc;
c7acc5b8
JK
355
356 switch (type) {
357 case ISCSI_BOOT_INI_INITIATOR_NAME:
358 rc = S_IRUGO;
359 break;
360 default:
361 rc = 0;
362 break;
363 }
364 return rc;
365}
366
367
587a1f16 368static umode_t beiscsi_eth_get_attr_visibility(void *data, int type)
c7acc5b8 369{
587a1f16 370 umode_t rc;
c7acc5b8
JK
371
372 switch (type) {
373 case ISCSI_BOOT_ETH_FLAGS:
374 case ISCSI_BOOT_ETH_MAC:
375 case ISCSI_BOOT_ETH_INDEX:
376 rc = S_IRUGO;
377 break;
378 default:
379 rc = 0;
380 break;
381 }
382 return rc;
383}
384
bfead3b2
JK
385/*------------------- PCI Driver operations and data ----------------- */
386static DEFINE_PCI_DEVICE_TABLE(beiscsi_pci_id_table) = {
387 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
f98c96b0 388 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
bfead3b2
JK
389 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
390 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
391 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID3) },
bfead3b2
JK
392 { 0 }
393};
394MODULE_DEVICE_TABLE(pci, beiscsi_pci_id_table);
395
6733b39a
JK
396static struct scsi_host_template beiscsi_sht = {
397 .module = THIS_MODULE,
2f635883 398 .name = "Emulex 10Gbe open-iscsi Initiator Driver",
6733b39a
JK
399 .proc_name = DRV_NAME,
400 .queuecommand = iscsi_queuecommand,
6733b39a
JK
401 .change_queue_depth = iscsi_change_queue_depth,
402 .slave_configure = beiscsi_slave_configure,
403 .target_alloc = iscsi_target_alloc,
4183122d
JK
404 .eh_abort_handler = beiscsi_eh_abort,
405 .eh_device_reset_handler = beiscsi_eh_device_reset,
309ce156 406 .eh_target_reset_handler = iscsi_eh_session_reset,
6733b39a
JK
407 .sg_tablesize = BEISCSI_SGLIST_ELEMENTS,
408 .can_queue = BE2_IO_DEPTH,
409 .this_id = -1,
410 .max_sectors = BEISCSI_MAX_SECTORS,
411 .cmd_per_lun = BEISCSI_CMD_PER_LUN,
412 .use_clustering = ENABLE_CLUSTERING,
ffce3e2e
JK
413 .vendor_id = SCSI_NL_VID_TYPE_PCI | BE_VENDOR_ID,
414
6733b39a 415};
6733b39a 416
bfead3b2 417static struct scsi_transport_template *beiscsi_scsi_transport;
6733b39a
JK
418
419static struct beiscsi_hba *beiscsi_hba_alloc(struct pci_dev *pcidev)
420{
421 struct beiscsi_hba *phba;
422 struct Scsi_Host *shost;
423
424 shost = iscsi_host_alloc(&beiscsi_sht, sizeof(*phba), 0);
425 if (!shost) {
426 dev_err(&pcidev->dev, "beiscsi_hba_alloc -"
457ff3b7 427 "iscsi_host_alloc failed\n");
6733b39a
JK
428 return NULL;
429 }
430 shost->dma_boundary = pcidev->dma_mask;
431 shost->max_id = BE2_MAX_SESSIONS;
432 shost->max_channel = 0;
433 shost->max_cmd_len = BEISCSI_MAX_CMD_LEN;
434 shost->max_lun = BEISCSI_NUM_MAX_LUN;
435 shost->transportt = beiscsi_scsi_transport;
6733b39a
JK
436 phba = iscsi_host_priv(shost);
437 memset(phba, 0, sizeof(*phba));
438 phba->shost = shost;
439 phba->pcidev = pci_dev_get(pcidev);
2807afb7 440 pci_set_drvdata(pcidev, phba);
6733b39a
JK
441
442 if (iscsi_host_add(shost, &phba->pcidev->dev))
443 goto free_devices;
c7acc5b8 444
6733b39a
JK
445 return phba;
446
447free_devices:
448 pci_dev_put(phba->pcidev);
449 iscsi_host_free(phba->shost);
450 return NULL;
451}
452
453static void beiscsi_unmap_pci_function(struct beiscsi_hba *phba)
454{
455 if (phba->csr_va) {
456 iounmap(phba->csr_va);
457 phba->csr_va = NULL;
458 }
459 if (phba->db_va) {
460 iounmap(phba->db_va);
461 phba->db_va = NULL;
462 }
463 if (phba->pci_va) {
464 iounmap(phba->pci_va);
465 phba->pci_va = NULL;
466 }
467}
468
469static int beiscsi_map_pci_bars(struct beiscsi_hba *phba,
470 struct pci_dev *pcidev)
471{
472 u8 __iomem *addr;
f98c96b0 473 int pcicfg_reg;
6733b39a
JK
474
475 addr = ioremap_nocache(pci_resource_start(pcidev, 2),
476 pci_resource_len(pcidev, 2));
477 if (addr == NULL)
478 return -ENOMEM;
479 phba->ctrl.csr = addr;
480 phba->csr_va = addr;
481 phba->csr_pa.u.a64.address = pci_resource_start(pcidev, 2);
482
483 addr = ioremap_nocache(pci_resource_start(pcidev, 4), 128 * 1024);
484 if (addr == NULL)
485 goto pci_map_err;
486 phba->ctrl.db = addr;
487 phba->db_va = addr;
488 phba->db_pa.u.a64.address = pci_resource_start(pcidev, 4);
489
f98c96b0
JK
490 if (phba->generation == BE_GEN2)
491 pcicfg_reg = 1;
492 else
493 pcicfg_reg = 0;
494
495 addr = ioremap_nocache(pci_resource_start(pcidev, pcicfg_reg),
496 pci_resource_len(pcidev, pcicfg_reg));
497
6733b39a
JK
498 if (addr == NULL)
499 goto pci_map_err;
500 phba->ctrl.pcicfg = addr;
501 phba->pci_va = addr;
f98c96b0 502 phba->pci_pa.u.a64.address = pci_resource_start(pcidev, pcicfg_reg);
6733b39a
JK
503 return 0;
504
505pci_map_err:
506 beiscsi_unmap_pci_function(phba);
507 return -ENOMEM;
508}
509
510static int beiscsi_enable_pci(struct pci_dev *pcidev)
511{
512 int ret;
513
514 ret = pci_enable_device(pcidev);
515 if (ret) {
516 dev_err(&pcidev->dev, "beiscsi_enable_pci - enable device "
517 "failed. Returning -ENODEV\n");
518 return ret;
519 }
520
bfead3b2 521 pci_set_master(pcidev);
6733b39a
JK
522 if (pci_set_consistent_dma_mask(pcidev, DMA_BIT_MASK(64))) {
523 ret = pci_set_consistent_dma_mask(pcidev, DMA_BIT_MASK(32));
524 if (ret) {
525 dev_err(&pcidev->dev, "Could not set PCI DMA Mask\n");
526 pci_disable_device(pcidev);
527 return ret;
528 }
529 }
530 return 0;
531}
532
533static int be_ctrl_init(struct beiscsi_hba *phba, struct pci_dev *pdev)
534{
535 struct be_ctrl_info *ctrl = &phba->ctrl;
536 struct be_dma_mem *mbox_mem_alloc = &ctrl->mbox_mem_alloced;
537 struct be_dma_mem *mbox_mem_align = &ctrl->mbox_mem;
538 int status = 0;
539
540 ctrl->pdev = pdev;
541 status = beiscsi_map_pci_bars(phba, pdev);
542 if (status)
543 return status;
6733b39a
JK
544 mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
545 mbox_mem_alloc->va = pci_alloc_consistent(pdev,
546 mbox_mem_alloc->size,
547 &mbox_mem_alloc->dma);
548 if (!mbox_mem_alloc->va) {
549 beiscsi_unmap_pci_function(phba);
a49e06d5 550 return -ENOMEM;
6733b39a
JK
551 }
552
553 mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
554 mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
555 mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
556 memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
557 spin_lock_init(&ctrl->mbox_lock);
bfead3b2
JK
558 spin_lock_init(&phba->ctrl.mcc_lock);
559 spin_lock_init(&phba->ctrl.mcc_cq_lock);
560
6733b39a
JK
561 return status;
562}
563
564static void beiscsi_get_params(struct beiscsi_hba *phba)
565{
7da50879
JK
566 phba->params.ios_per_ctrl = (phba->fw_config.iscsi_icd_count
567 - (phba->fw_config.iscsi_cid_count
568 + BE2_TMFS
569 + BE2_NOPOUT_REQ));
570 phba->params.cxns_per_ctrl = phba->fw_config.iscsi_cid_count;
ed58ea2a 571 phba->params.asyncpdus_per_ctrl = phba->fw_config.iscsi_cid_count * 2;
6eab04a8 572 phba->params.icds_per_ctrl = phba->fw_config.iscsi_icd_count;
6733b39a
JK
573 phba->params.num_sge_per_io = BE2_SGE;
574 phba->params.defpdu_hdr_sz = BE2_DEFPDU_HDR_SZ;
575 phba->params.defpdu_data_sz = BE2_DEFPDU_DATA_SZ;
576 phba->params.eq_timer = 64;
577 phba->params.num_eq_entries =
7da50879
JK
578 (((BE2_CMDS_PER_CXN * 2 + phba->fw_config.iscsi_cid_count * 2
579 + BE2_TMFS) / 512) + 1) * 512;
6733b39a
JK
580 phba->params.num_eq_entries = (phba->params.num_eq_entries < 1024)
581 ? 1024 : phba->params.num_eq_entries;
457ff3b7 582 SE_DEBUG(DBG_LVL_8, "phba->params.num_eq_entries=%d\n",
7da50879 583 phba->params.num_eq_entries);
6733b39a 584 phba->params.num_cq_entries =
7da50879
JK
585 (((BE2_CMDS_PER_CXN * 2 + phba->fw_config.iscsi_cid_count * 2
586 + BE2_TMFS) / 512) + 1) * 512;
6733b39a
JK
587 phba->params.wrbs_per_cxn = 256;
588}
589
590static void hwi_ring_eq_db(struct beiscsi_hba *phba,
591 unsigned int id, unsigned int clr_interrupt,
592 unsigned int num_processed,
593 unsigned char rearm, unsigned char event)
594{
595 u32 val = 0;
596 val |= id & DB_EQ_RING_ID_MASK;
597 if (rearm)
598 val |= 1 << DB_EQ_REARM_SHIFT;
599 if (clr_interrupt)
600 val |= 1 << DB_EQ_CLR_SHIFT;
601 if (event)
602 val |= 1 << DB_EQ_EVNT_SHIFT;
603 val |= num_processed << DB_EQ_NUM_POPPED_SHIFT;
604 iowrite32(val, phba->db_va + DB_EQ_OFFSET);
605}
606
bfead3b2
JK
607/**
608 * be_isr_mcc - The isr routine of the driver.
609 * @irq: Not used
610 * @dev_id: Pointer to host adapter structure
611 */
612static irqreturn_t be_isr_mcc(int irq, void *dev_id)
613{
614 struct beiscsi_hba *phba;
615 struct be_eq_entry *eqe = NULL;
616 struct be_queue_info *eq;
617 struct be_queue_info *mcc;
618 unsigned int num_eq_processed;
619 struct be_eq_obj *pbe_eq;
620 unsigned long flags;
621
622 pbe_eq = dev_id;
623 eq = &pbe_eq->q;
624 phba = pbe_eq->phba;
625 mcc = &phba->ctrl.mcc_obj.cq;
626 eqe = queue_tail_node(eq);
627 if (!eqe)
628 SE_DEBUG(DBG_LVL_1, "eqe is NULL\n");
629
630 num_eq_processed = 0;
631
632 while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32]
633 & EQE_VALID_MASK) {
634 if (((eqe->dw[offsetof(struct amap_eq_entry,
635 resource_id) / 32] &
636 EQE_RESID_MASK) >> 16) == mcc->id) {
637 spin_lock_irqsave(&phba->isr_lock, flags);
638 phba->todo_mcc_cq = 1;
639 spin_unlock_irqrestore(&phba->isr_lock, flags);
640 }
641 AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0);
642 queue_tail_inc(eq);
643 eqe = queue_tail_node(eq);
644 num_eq_processed++;
645 }
646 if (phba->todo_mcc_cq)
647 queue_work(phba->wq, &phba->work_cqs);
648 if (num_eq_processed)
649 hwi_ring_eq_db(phba, eq->id, 1, num_eq_processed, 1, 1);
650
651 return IRQ_HANDLED;
652}
653
654/**
655 * be_isr_msix - The isr routine of the driver.
656 * @irq: Not used
657 * @dev_id: Pointer to host adapter structure
658 */
659static irqreturn_t be_isr_msix(int irq, void *dev_id)
660{
661 struct beiscsi_hba *phba;
662 struct be_eq_entry *eqe = NULL;
663 struct be_queue_info *eq;
664 struct be_queue_info *cq;
665 unsigned int num_eq_processed;
666 struct be_eq_obj *pbe_eq;
667 unsigned long flags;
668
669 pbe_eq = dev_id;
670 eq = &pbe_eq->q;
671 cq = pbe_eq->cq;
672 eqe = queue_tail_node(eq);
673 if (!eqe)
674 SE_DEBUG(DBG_LVL_1, "eqe is NULL\n");
675
676 phba = pbe_eq->phba;
677 num_eq_processed = 0;
678 if (blk_iopoll_enabled) {
679 while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32]
680 & EQE_VALID_MASK) {
681 if (!blk_iopoll_sched_prep(&pbe_eq->iopoll))
682 blk_iopoll_sched(&pbe_eq->iopoll);
683
684 AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0);
685 queue_tail_inc(eq);
686 eqe = queue_tail_node(eq);
687 num_eq_processed++;
688 }
689 if (num_eq_processed)
690 hwi_ring_eq_db(phba, eq->id, 1, num_eq_processed, 0, 1);
691
692 return IRQ_HANDLED;
693 } else {
694 while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32]
695 & EQE_VALID_MASK) {
696 spin_lock_irqsave(&phba->isr_lock, flags);
697 phba->todo_cq = 1;
698 spin_unlock_irqrestore(&phba->isr_lock, flags);
699 AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0);
700 queue_tail_inc(eq);
701 eqe = queue_tail_node(eq);
702 num_eq_processed++;
703 }
704 if (phba->todo_cq)
705 queue_work(phba->wq, &phba->work_cqs);
706
707 if (num_eq_processed)
708 hwi_ring_eq_db(phba, eq->id, 1, num_eq_processed, 1, 1);
709
710 return IRQ_HANDLED;
711 }
712}
713
6733b39a
JK
714/**
715 * be_isr - The isr routine of the driver.
716 * @irq: Not used
717 * @dev_id: Pointer to host adapter structure
718 */
719static irqreturn_t be_isr(int irq, void *dev_id)
720{
721 struct beiscsi_hba *phba;
722 struct hwi_controller *phwi_ctrlr;
723 struct hwi_context_memory *phwi_context;
724 struct be_eq_entry *eqe = NULL;
725 struct be_queue_info *eq;
726 struct be_queue_info *cq;
bfead3b2 727 struct be_queue_info *mcc;
6733b39a 728 unsigned long flags, index;
bfead3b2 729 unsigned int num_mcceq_processed, num_ioeq_processed;
6733b39a 730 struct be_ctrl_info *ctrl;
bfead3b2 731 struct be_eq_obj *pbe_eq;
6733b39a
JK
732 int isr;
733
734 phba = dev_id;
6eab04a8 735 ctrl = &phba->ctrl;
bfead3b2
JK
736 isr = ioread32(ctrl->csr + CEV_ISR0_OFFSET +
737 (PCI_FUNC(ctrl->pdev->devfn) * CEV_ISR_SIZE));
738 if (!isr)
739 return IRQ_NONE;
6733b39a
JK
740
741 phwi_ctrlr = phba->phwi_ctrlr;
742 phwi_context = phwi_ctrlr->phwi_ctxt;
bfead3b2
JK
743 pbe_eq = &phwi_context->be_eq[0];
744
745 eq = &phwi_context->be_eq[0].q;
746 mcc = &phba->ctrl.mcc_obj.cq;
6733b39a
JK
747 index = 0;
748 eqe = queue_tail_node(eq);
749 if (!eqe)
750 SE_DEBUG(DBG_LVL_1, "eqe is NULL\n");
751
bfead3b2
JK
752 num_ioeq_processed = 0;
753 num_mcceq_processed = 0;
6733b39a
JK
754 if (blk_iopoll_enabled) {
755 while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32]
756 & EQE_VALID_MASK) {
bfead3b2
JK
757 if (((eqe->dw[offsetof(struct amap_eq_entry,
758 resource_id) / 32] &
759 EQE_RESID_MASK) >> 16) == mcc->id) {
760 spin_lock_irqsave(&phba->isr_lock, flags);
761 phba->todo_mcc_cq = 1;
762 spin_unlock_irqrestore(&phba->isr_lock, flags);
763 num_mcceq_processed++;
764 } else {
765 if (!blk_iopoll_sched_prep(&pbe_eq->iopoll))
766 blk_iopoll_sched(&pbe_eq->iopoll);
767 num_ioeq_processed++;
768 }
6733b39a
JK
769 AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0);
770 queue_tail_inc(eq);
771 eqe = queue_tail_node(eq);
6733b39a 772 }
bfead3b2
JK
773 if (num_ioeq_processed || num_mcceq_processed) {
774 if (phba->todo_mcc_cq)
775 queue_work(phba->wq, &phba->work_cqs);
776
756d29c8 777 if ((num_mcceq_processed) && (!num_ioeq_processed))
bfead3b2
JK
778 hwi_ring_eq_db(phba, eq->id, 0,
779 (num_ioeq_processed +
780 num_mcceq_processed) , 1, 1);
781 else
782 hwi_ring_eq_db(phba, eq->id, 0,
783 (num_ioeq_processed +
784 num_mcceq_processed), 0, 1);
785
6733b39a
JK
786 return IRQ_HANDLED;
787 } else
788 return IRQ_NONE;
789 } else {
bfead3b2 790 cq = &phwi_context->be_cq[0];
6733b39a
JK
791 while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32]
792 & EQE_VALID_MASK) {
793
794 if (((eqe->dw[offsetof(struct amap_eq_entry,
795 resource_id) / 32] &
796 EQE_RESID_MASK) >> 16) != cq->id) {
797 spin_lock_irqsave(&phba->isr_lock, flags);
798 phba->todo_mcc_cq = 1;
799 spin_unlock_irqrestore(&phba->isr_lock, flags);
800 } else {
801 spin_lock_irqsave(&phba->isr_lock, flags);
802 phba->todo_cq = 1;
803 spin_unlock_irqrestore(&phba->isr_lock, flags);
804 }
805 AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0);
806 queue_tail_inc(eq);
807 eqe = queue_tail_node(eq);
bfead3b2 808 num_ioeq_processed++;
6733b39a
JK
809 }
810 if (phba->todo_cq || phba->todo_mcc_cq)
811 queue_work(phba->wq, &phba->work_cqs);
812
bfead3b2
JK
813 if (num_ioeq_processed) {
814 hwi_ring_eq_db(phba, eq->id, 0,
815 num_ioeq_processed, 1, 1);
6733b39a
JK
816 return IRQ_HANDLED;
817 } else
818 return IRQ_NONE;
819 }
820}
821
822static int beiscsi_init_irqs(struct beiscsi_hba *phba)
823{
824 struct pci_dev *pcidev = phba->pcidev;
bfead3b2
JK
825 struct hwi_controller *phwi_ctrlr;
826 struct hwi_context_memory *phwi_context;
4f5af07e 827 int ret, msix_vec, i, j;
6733b39a 828
bfead3b2
JK
829 phwi_ctrlr = phba->phwi_ctrlr;
830 phwi_context = phwi_ctrlr->phwi_ctxt;
831
832 if (phba->msix_enabled) {
833 for (i = 0; i < phba->num_cpus; i++) {
8fcfb210
JK
834 phba->msi_name[i] = kzalloc(BEISCSI_MSI_NAME,
835 GFP_KERNEL);
836 if (!phba->msi_name[i]) {
837 ret = -ENOMEM;
838 goto free_msix_irqs;
839 }
840
841 sprintf(phba->msi_name[i], "beiscsi_%02x_%02x",
842 phba->shost->host_no, i);
bfead3b2 843 msix_vec = phba->msix_entries[i].vector;
8fcfb210
JK
844 ret = request_irq(msix_vec, be_isr_msix, 0,
845 phba->msi_name[i],
bfead3b2 846 &phwi_context->be_eq[i]);
4f5af07e
JK
847 if (ret) {
848 shost_printk(KERN_ERR, phba->shost,
849 "beiscsi_init_irqs-Failed to"
850 "register msix for i = %d\n", i);
8fcfb210 851 kfree(phba->msi_name[i]);
4f5af07e
JK
852 goto free_msix_irqs;
853 }
bfead3b2 854 }
8fcfb210
JK
855 phba->msi_name[i] = kzalloc(BEISCSI_MSI_NAME, GFP_KERNEL);
856 if (!phba->msi_name[i]) {
857 ret = -ENOMEM;
858 goto free_msix_irqs;
859 }
860 sprintf(phba->msi_name[i], "beiscsi_mcc_%02x",
861 phba->shost->host_no);
bfead3b2 862 msix_vec = phba->msix_entries[i].vector;
8fcfb210 863 ret = request_irq(msix_vec, be_isr_mcc, 0, phba->msi_name[i],
bfead3b2 864 &phwi_context->be_eq[i]);
4f5af07e
JK
865 if (ret) {
866 shost_printk(KERN_ERR, phba->shost, "beiscsi_init_irqs-"
867 "Failed to register beiscsi_msix_mcc\n");
8fcfb210 868 kfree(phba->msi_name[i]);
4f5af07e
JK
869 goto free_msix_irqs;
870 }
871
bfead3b2
JK
872 } else {
873 ret = request_irq(pcidev->irq, be_isr, IRQF_SHARED,
874 "beiscsi", phba);
875 if (ret) {
876 shost_printk(KERN_ERR, phba->shost, "beiscsi_init_irqs-"
877 "Failed to register irq\\n");
878 return ret;
879 }
6733b39a
JK
880 }
881 return 0;
4f5af07e 882free_msix_irqs:
8fcfb210
JK
883 for (j = i - 1; j >= 0; j--) {
884 kfree(phba->msi_name[j]);
885 msix_vec = phba->msix_entries[j].vector;
4f5af07e 886 free_irq(msix_vec, &phwi_context->be_eq[j]);
8fcfb210 887 }
4f5af07e 888 return ret;
6733b39a
JK
889}
890
891static void hwi_ring_cq_db(struct beiscsi_hba *phba,
892 unsigned int id, unsigned int num_processed,
893 unsigned char rearm, unsigned char event)
894{
895 u32 val = 0;
896 val |= id & DB_CQ_RING_ID_MASK;
897 if (rearm)
898 val |= 1 << DB_CQ_REARM_SHIFT;
899 val |= num_processed << DB_CQ_NUM_POPPED_SHIFT;
900 iowrite32(val, phba->db_va + DB_CQ_OFFSET);
901}
902
6733b39a
JK
903static unsigned int
904beiscsi_process_async_pdu(struct beiscsi_conn *beiscsi_conn,
905 struct beiscsi_hba *phba,
906 unsigned short cid,
907 struct pdu_base *ppdu,
908 unsigned long pdu_len,
909 void *pbuffer, unsigned long buf_len)
910{
911 struct iscsi_conn *conn = beiscsi_conn->conn;
912 struct iscsi_session *session = conn->session;
bfead3b2
JK
913 struct iscsi_task *task;
914 struct beiscsi_io_task *io_task;
915 struct iscsi_hdr *login_hdr;
6733b39a
JK
916
917 switch (ppdu->dw[offsetof(struct amap_pdu_base, opcode) / 32] &
918 PDUBASE_OPCODE_MASK) {
919 case ISCSI_OP_NOOP_IN:
920 pbuffer = NULL;
921 buf_len = 0;
922 break;
923 case ISCSI_OP_ASYNC_EVENT:
924 break;
925 case ISCSI_OP_REJECT:
926 WARN_ON(!pbuffer);
927 WARN_ON(!(buf_len == 48));
928 SE_DEBUG(DBG_LVL_1, "In ISCSI_OP_REJECT\n");
929 break;
930 case ISCSI_OP_LOGIN_RSP:
7bd6e25c 931 case ISCSI_OP_TEXT_RSP:
bfead3b2
JK
932 task = conn->login_task;
933 io_task = task->dd_data;
934 login_hdr = (struct iscsi_hdr *)ppdu;
935 login_hdr->itt = io_task->libiscsi_itt;
6733b39a
JK
936 break;
937 default:
938 shost_printk(KERN_WARNING, phba->shost,
457ff3b7 939 "Unrecognized opcode 0x%x in async msg\n",
6733b39a
JK
940 (ppdu->
941 dw[offsetof(struct amap_pdu_base, opcode) / 32]
942 & PDUBASE_OPCODE_MASK));
943 return 1;
944 }
945
946 spin_lock_bh(&session->lock);
947 __iscsi_complete_pdu(conn, (struct iscsi_hdr *)ppdu, pbuffer, buf_len);
948 spin_unlock_bh(&session->lock);
949 return 0;
950}
951
952static struct sgl_handle *alloc_io_sgl_handle(struct beiscsi_hba *phba)
953{
954 struct sgl_handle *psgl_handle;
955
956 if (phba->io_sgl_hndl_avbl) {
957 SE_DEBUG(DBG_LVL_8,
457ff3b7 958 "In alloc_io_sgl_handle,io_sgl_alloc_index=%d\n",
6733b39a
JK
959 phba->io_sgl_alloc_index);
960 psgl_handle = phba->io_sgl_hndl_base[phba->
961 io_sgl_alloc_index];
962 phba->io_sgl_hndl_base[phba->io_sgl_alloc_index] = NULL;
963 phba->io_sgl_hndl_avbl--;
bfead3b2
JK
964 if (phba->io_sgl_alloc_index == (phba->params.
965 ios_per_ctrl - 1))
6733b39a
JK
966 phba->io_sgl_alloc_index = 0;
967 else
968 phba->io_sgl_alloc_index++;
969 } else
970 psgl_handle = NULL;
971 return psgl_handle;
972}
973
974static void
975free_io_sgl_handle(struct beiscsi_hba *phba, struct sgl_handle *psgl_handle)
976{
457ff3b7 977 SE_DEBUG(DBG_LVL_8, "In free_,io_sgl_free_index=%d\n",
6733b39a
JK
978 phba->io_sgl_free_index);
979 if (phba->io_sgl_hndl_base[phba->io_sgl_free_index]) {
980 /*
981 * this can happen if clean_task is called on a task that
982 * failed in xmit_task or alloc_pdu.
983 */
984 SE_DEBUG(DBG_LVL_8,
985 "Double Free in IO SGL io_sgl_free_index=%d,"
457ff3b7 986 "value there=%p\n", phba->io_sgl_free_index,
6733b39a
JK
987 phba->io_sgl_hndl_base[phba->io_sgl_free_index]);
988 return;
989 }
990 phba->io_sgl_hndl_base[phba->io_sgl_free_index] = psgl_handle;
991 phba->io_sgl_hndl_avbl++;
992 if (phba->io_sgl_free_index == (phba->params.ios_per_ctrl - 1))
993 phba->io_sgl_free_index = 0;
994 else
995 phba->io_sgl_free_index++;
996}
997
998/**
999 * alloc_wrb_handle - To allocate a wrb handle
1000 * @phba: The hba pointer
1001 * @cid: The cid to use for allocation
6733b39a
JK
1002 *
1003 * This happens under session_lock until submission to chip
1004 */
d5431488 1005struct wrb_handle *alloc_wrb_handle(struct beiscsi_hba *phba, unsigned int cid)
6733b39a
JK
1006{
1007 struct hwi_wrb_context *pwrb_context;
1008 struct hwi_controller *phwi_ctrlr;
d5431488 1009 struct wrb_handle *pwrb_handle, *pwrb_handle_tmp;
6733b39a
JK
1010
1011 phwi_ctrlr = phba->phwi_ctrlr;
1012 pwrb_context = &phwi_ctrlr->wrb_context[cid];
d5431488 1013 if (pwrb_context->wrb_handles_available >= 2) {
bfead3b2
JK
1014 pwrb_handle = pwrb_context->pwrb_handle_base[
1015 pwrb_context->alloc_index];
1016 pwrb_context->wrb_handles_available--;
bfead3b2
JK
1017 if (pwrb_context->alloc_index ==
1018 (phba->params.wrbs_per_cxn - 1))
1019 pwrb_context->alloc_index = 0;
1020 else
1021 pwrb_context->alloc_index++;
d5431488
JK
1022 pwrb_handle_tmp = pwrb_context->pwrb_handle_base[
1023 pwrb_context->alloc_index];
1024 pwrb_handle->nxt_wrb_index = pwrb_handle_tmp->wrb_index;
bfead3b2
JK
1025 } else
1026 pwrb_handle = NULL;
6733b39a
JK
1027 return pwrb_handle;
1028}
1029
1030/**
1031 * free_wrb_handle - To free the wrb handle back to pool
1032 * @phba: The hba pointer
1033 * @pwrb_context: The context to free from
1034 * @pwrb_handle: The wrb_handle to free
1035 *
1036 * This happens under session_lock until submission to chip
1037 */
1038static void
1039free_wrb_handle(struct beiscsi_hba *phba, struct hwi_wrb_context *pwrb_context,
1040 struct wrb_handle *pwrb_handle)
1041{
32951dd8 1042 pwrb_context->pwrb_handle_base[pwrb_context->free_index] = pwrb_handle;
bfead3b2
JK
1043 pwrb_context->wrb_handles_available++;
1044 if (pwrb_context->free_index == (phba->params.wrbs_per_cxn - 1))
1045 pwrb_context->free_index = 0;
1046 else
1047 pwrb_context->free_index++;
1048
6733b39a 1049 SE_DEBUG(DBG_LVL_8,
bfead3b2 1050 "FREE WRB: pwrb_handle=%p free_index=0x%x"
457ff3b7 1051 "wrb_handles_available=%d\n",
6733b39a 1052 pwrb_handle, pwrb_context->free_index,
bfead3b2 1053 pwrb_context->wrb_handles_available);
6733b39a
JK
1054}
1055
1056static struct sgl_handle *alloc_mgmt_sgl_handle(struct beiscsi_hba *phba)
1057{
1058 struct sgl_handle *psgl_handle;
1059
1060 if (phba->eh_sgl_hndl_avbl) {
1061 psgl_handle = phba->eh_sgl_hndl_base[phba->eh_sgl_alloc_index];
1062 phba->eh_sgl_hndl_base[phba->eh_sgl_alloc_index] = NULL;
457ff3b7 1063 SE_DEBUG(DBG_LVL_8, "mgmt_sgl_alloc_index=%d=0x%x\n",
6733b39a
JK
1064 phba->eh_sgl_alloc_index, phba->eh_sgl_alloc_index);
1065 phba->eh_sgl_hndl_avbl--;
1066 if (phba->eh_sgl_alloc_index ==
1067 (phba->params.icds_per_ctrl - phba->params.ios_per_ctrl -
1068 1))
1069 phba->eh_sgl_alloc_index = 0;
1070 else
1071 phba->eh_sgl_alloc_index++;
1072 } else
1073 psgl_handle = NULL;
1074 return psgl_handle;
1075}
1076
1077void
1078free_mgmt_sgl_handle(struct beiscsi_hba *phba, struct sgl_handle *psgl_handle)
1079{
1080
457ff3b7 1081 SE_DEBUG(DBG_LVL_8, "In free_mgmt_sgl_handle,eh_sgl_free_index=%d\n",
bfead3b2 1082 phba->eh_sgl_free_index);
6733b39a
JK
1083 if (phba->eh_sgl_hndl_base[phba->eh_sgl_free_index]) {
1084 /*
1085 * this can happen if clean_task is called on a task that
1086 * failed in xmit_task or alloc_pdu.
1087 */
1088 SE_DEBUG(DBG_LVL_8,
457ff3b7 1089 "Double Free in eh SGL ,eh_sgl_free_index=%d\n",
6733b39a
JK
1090 phba->eh_sgl_free_index);
1091 return;
1092 }
1093 phba->eh_sgl_hndl_base[phba->eh_sgl_free_index] = psgl_handle;
1094 phba->eh_sgl_hndl_avbl++;
1095 if (phba->eh_sgl_free_index ==
1096 (phba->params.icds_per_ctrl - phba->params.ios_per_ctrl - 1))
1097 phba->eh_sgl_free_index = 0;
1098 else
1099 phba->eh_sgl_free_index++;
1100}
1101
1102static void
1103be_complete_io(struct beiscsi_conn *beiscsi_conn,
1104 struct iscsi_task *task, struct sol_cqe *psol)
1105{
1106 struct beiscsi_io_task *io_task = task->dd_data;
1107 struct be_status_bhs *sts_bhs =
1108 (struct be_status_bhs *)io_task->cmd_bhs;
1109 struct iscsi_conn *conn = beiscsi_conn->conn;
6733b39a
JK
1110 unsigned char *sense;
1111 u32 resid = 0, exp_cmdsn, max_cmdsn;
1112 u8 rsp, status, flags;
1113
bfead3b2 1114 exp_cmdsn = (psol->
6733b39a
JK
1115 dw[offsetof(struct amap_sol_cqe, i_exp_cmd_sn) / 32]
1116 & SOL_EXP_CMD_SN_MASK);
bfead3b2 1117 max_cmdsn = ((psol->
6733b39a
JK
1118 dw[offsetof(struct amap_sol_cqe, i_exp_cmd_sn) / 32]
1119 & SOL_EXP_CMD_SN_MASK) +
1120 ((psol->dw[offsetof(struct amap_sol_cqe, i_cmd_wnd)
1121 / 32] & SOL_CMD_WND_MASK) >> 24) - 1);
1122 rsp = ((psol->dw[offsetof(struct amap_sol_cqe, i_resp) / 32]
1123 & SOL_RESP_MASK) >> 16);
1124 status = ((psol->dw[offsetof(struct amap_sol_cqe, i_sts) / 32]
1125 & SOL_STS_MASK) >> 8);
1126 flags = ((psol->dw[offsetof(struct amap_sol_cqe, i_flags) / 32]
1127 & SOL_FLAGS_MASK) >> 24) | 0x80;
bd535451
JK
1128 if (!task->sc) {
1129 if (io_task->scsi_cmnd)
1130 scsi_dma_unmap(io_task->scsi_cmnd);
6733b39a 1131
bd535451
JK
1132 return;
1133 }
6733b39a
JK
1134 task->sc->result = (DID_OK << 16) | status;
1135 if (rsp != ISCSI_STATUS_CMD_COMPLETED) {
1136 task->sc->result = DID_ERROR << 16;
1137 goto unmap;
1138 }
1139
1140 /* bidi not initially supported */
1141 if (flags & (ISCSI_FLAG_CMD_UNDERFLOW | ISCSI_FLAG_CMD_OVERFLOW)) {
1142 resid = (psol->dw[offsetof(struct amap_sol_cqe, i_res_cnt) /
1143 32] & SOL_RES_CNT_MASK);
1144
1145 if (!status && (flags & ISCSI_FLAG_CMD_OVERFLOW))
1146 task->sc->result = DID_ERROR << 16;
1147
1148 if (flags & ISCSI_FLAG_CMD_UNDERFLOW) {
1149 scsi_set_resid(task->sc, resid);
1150 if (!status && (scsi_bufflen(task->sc) - resid <
1151 task->sc->underflow))
1152 task->sc->result = DID_ERROR << 16;
1153 }
1154 }
1155
1156 if (status == SAM_STAT_CHECK_CONDITION) {
4053a4be 1157 u16 sense_len;
bfead3b2 1158 unsigned short *slen = (unsigned short *)sts_bhs->sense_info;
4053a4be 1159
6733b39a 1160 sense = sts_bhs->sense_info + sizeof(unsigned short);
4053a4be 1161 sense_len = be16_to_cpu(*slen);
6733b39a
JK
1162 memcpy(task->sc->sense_buffer, sense,
1163 min_t(u16, sense_len, SCSI_SENSE_BUFFERSIZE));
1164 }
756d29c8 1165
6733b39a
JK
1166 if (io_task->cmd_bhs->iscsi_hdr.flags & ISCSI_FLAG_CMD_READ) {
1167 if (psol->dw[offsetof(struct amap_sol_cqe, i_res_cnt) / 32]
1168 & SOL_RES_CNT_MASK)
1169 conn->rxdata_octets += (psol->
bfead3b2
JK
1170 dw[offsetof(struct amap_sol_cqe, i_res_cnt) / 32]
1171 & SOL_RES_CNT_MASK);
6733b39a
JK
1172 }
1173unmap:
1174 scsi_dma_unmap(io_task->scsi_cmnd);
1175 iscsi_complete_scsi_task(task, exp_cmdsn, max_cmdsn);
1176}
1177
1178static void
1179be_complete_logout(struct beiscsi_conn *beiscsi_conn,
1180 struct iscsi_task *task, struct sol_cqe *psol)
1181{
1182 struct iscsi_logout_rsp *hdr;
bfead3b2 1183 struct beiscsi_io_task *io_task = task->dd_data;
6733b39a
JK
1184 struct iscsi_conn *conn = beiscsi_conn->conn;
1185
1186 hdr = (struct iscsi_logout_rsp *)task->hdr;
7bd6e25c 1187 hdr->opcode = ISCSI_OP_LOGOUT_RSP;
6733b39a
JK
1188 hdr->t2wait = 5;
1189 hdr->t2retain = 0;
1190 hdr->flags = ((psol->dw[offsetof(struct amap_sol_cqe, i_flags) / 32]
1191 & SOL_FLAGS_MASK) >> 24) | 0x80;
1192 hdr->response = (psol->dw[offsetof(struct amap_sol_cqe, i_resp) /
1193 32] & SOL_RESP_MASK);
1194 hdr->exp_cmdsn = cpu_to_be32(psol->
1195 dw[offsetof(struct amap_sol_cqe, i_exp_cmd_sn) / 32]
1196 & SOL_EXP_CMD_SN_MASK);
1197 hdr->max_cmdsn = be32_to_cpu((psol->
1198 dw[offsetof(struct amap_sol_cqe, i_exp_cmd_sn) / 32]
1199 & SOL_EXP_CMD_SN_MASK) +
1200 ((psol->dw[offsetof(struct amap_sol_cqe, i_cmd_wnd)
1201 / 32] & SOL_CMD_WND_MASK) >> 24) - 1);
7bd6e25c
JK
1202 hdr->dlength[0] = 0;
1203 hdr->dlength[1] = 0;
1204 hdr->dlength[2] = 0;
6733b39a 1205 hdr->hlength = 0;
bfead3b2 1206 hdr->itt = io_task->libiscsi_itt;
6733b39a
JK
1207 __iscsi_complete_pdu(conn, (struct iscsi_hdr *)hdr, NULL, 0);
1208}
1209
1210static void
1211be_complete_tmf(struct beiscsi_conn *beiscsi_conn,
1212 struct iscsi_task *task, struct sol_cqe *psol)
1213{
1214 struct iscsi_tm_rsp *hdr;
1215 struct iscsi_conn *conn = beiscsi_conn->conn;
bfead3b2 1216 struct beiscsi_io_task *io_task = task->dd_data;
6733b39a
JK
1217
1218 hdr = (struct iscsi_tm_rsp *)task->hdr;
7bd6e25c 1219 hdr->opcode = ISCSI_OP_SCSI_TMFUNC_RSP;
6733b39a
JK
1220 hdr->flags = ((psol->dw[offsetof(struct amap_sol_cqe, i_flags) / 32]
1221 & SOL_FLAGS_MASK) >> 24) | 0x80;
1222 hdr->response = (psol->dw[offsetof(struct amap_sol_cqe, i_resp) /
1223 32] & SOL_RESP_MASK);
1224 hdr->exp_cmdsn = cpu_to_be32(psol->dw[offsetof(struct amap_sol_cqe,
bfead3b2 1225 i_exp_cmd_sn) / 32] & SOL_EXP_CMD_SN_MASK);
6733b39a
JK
1226 hdr->max_cmdsn = be32_to_cpu((psol->dw[offsetof(struct amap_sol_cqe,
1227 i_exp_cmd_sn) / 32] & SOL_EXP_CMD_SN_MASK) +
1228 ((psol->dw[offsetof(struct amap_sol_cqe, i_cmd_wnd)
1229 / 32] & SOL_CMD_WND_MASK) >> 24) - 1);
bfead3b2 1230 hdr->itt = io_task->libiscsi_itt;
6733b39a
JK
1231 __iscsi_complete_pdu(conn, (struct iscsi_hdr *)hdr, NULL, 0);
1232}
1233
1234static void
1235hwi_complete_drvr_msgs(struct beiscsi_conn *beiscsi_conn,
1236 struct beiscsi_hba *phba, struct sol_cqe *psol)
1237{
1238 struct hwi_wrb_context *pwrb_context;
bfead3b2 1239 struct wrb_handle *pwrb_handle = NULL;
6733b39a 1240 struct hwi_controller *phwi_ctrlr;
bfead3b2
JK
1241 struct iscsi_task *task;
1242 struct beiscsi_io_task *io_task;
6733b39a
JK
1243 struct iscsi_conn *conn = beiscsi_conn->conn;
1244 struct iscsi_session *session = conn->session;
1245
1246 phwi_ctrlr = phba->phwi_ctrlr;
32951dd8 1247 pwrb_context = &phwi_ctrlr->wrb_context[((psol->
35e66019 1248 dw[offsetof(struct amap_sol_cqe, cid) / 32] &
7da50879
JK
1249 SOL_CID_MASK) >> 6) -
1250 phba->fw_config.iscsi_cid_start];
32951dd8 1251 pwrb_handle = pwrb_context->pwrb_handle_basestd[((psol->
35e66019
JK
1252 dw[offsetof(struct amap_sol_cqe, wrb_index) /
1253 32] & SOL_WRB_INDEX_MASK) >> 16)];
32951dd8 1254 task = pwrb_handle->pio_handle;
35e66019 1255
bfead3b2 1256 io_task = task->dd_data;
1282ab76 1257 spin_lock_bh(&phba->mgmt_sgl_lock);
bfead3b2 1258 free_mgmt_sgl_handle(phba, io_task->psgl_handle);
1282ab76 1259 spin_unlock_bh(&phba->mgmt_sgl_lock);
6733b39a
JK
1260 spin_lock_bh(&session->lock);
1261 free_wrb_handle(phba, pwrb_context, pwrb_handle);
1262 spin_unlock_bh(&session->lock);
1263}
1264
1265static void
1266be_complete_nopin_resp(struct beiscsi_conn *beiscsi_conn,
1267 struct iscsi_task *task, struct sol_cqe *psol)
1268{
1269 struct iscsi_nopin *hdr;
1270 struct iscsi_conn *conn = beiscsi_conn->conn;
bfead3b2 1271 struct beiscsi_io_task *io_task = task->dd_data;
6733b39a
JK
1272
1273 hdr = (struct iscsi_nopin *)task->hdr;
1274 hdr->flags = ((psol->dw[offsetof(struct amap_sol_cqe, i_flags) / 32]
1275 & SOL_FLAGS_MASK) >> 24) | 0x80;
1276 hdr->exp_cmdsn = cpu_to_be32(psol->dw[offsetof(struct amap_sol_cqe,
1277 i_exp_cmd_sn) / 32] & SOL_EXP_CMD_SN_MASK);
1278 hdr->max_cmdsn = be32_to_cpu((psol->dw[offsetof(struct amap_sol_cqe,
1279 i_exp_cmd_sn) / 32] & SOL_EXP_CMD_SN_MASK) +
1280 ((psol->dw[offsetof(struct amap_sol_cqe, i_cmd_wnd)
1281 / 32] & SOL_CMD_WND_MASK) >> 24) - 1);
1282 hdr->opcode = ISCSI_OP_NOOP_IN;
bfead3b2 1283 hdr->itt = io_task->libiscsi_itt;
6733b39a
JK
1284 __iscsi_complete_pdu(conn, (struct iscsi_hdr *)hdr, NULL, 0);
1285}
1286
1287static void hwi_complete_cmd(struct beiscsi_conn *beiscsi_conn,
1288 struct beiscsi_hba *phba, struct sol_cqe *psol)
1289{
1290 struct hwi_wrb_context *pwrb_context;
1291 struct wrb_handle *pwrb_handle;
1292 struct iscsi_wrb *pwrb = NULL;
1293 struct hwi_controller *phwi_ctrlr;
1294 struct iscsi_task *task;
bfead3b2 1295 unsigned int type;
6733b39a
JK
1296 struct iscsi_conn *conn = beiscsi_conn->conn;
1297 struct iscsi_session *session = conn->session;
1298
1299 phwi_ctrlr = phba->phwi_ctrlr;
32951dd8 1300 pwrb_context = &phwi_ctrlr->wrb_context[((psol->dw[offsetof
35e66019 1301 (struct amap_sol_cqe, cid) / 32]
7da50879
JK
1302 & SOL_CID_MASK) >> 6) -
1303 phba->fw_config.iscsi_cid_start];
32951dd8 1304 pwrb_handle = pwrb_context->pwrb_handle_basestd[((psol->
35e66019
JK
1305 dw[offsetof(struct amap_sol_cqe, wrb_index) /
1306 32] & SOL_WRB_INDEX_MASK) >> 16)];
32951dd8
JK
1307 task = pwrb_handle->pio_handle;
1308 pwrb = pwrb_handle->pwrb;
1309 type = (pwrb->dw[offsetof(struct amap_iscsi_wrb, type) / 32] &
1310 WRB_TYPE_MASK) >> 28;
1311
bfead3b2
JK
1312 spin_lock_bh(&session->lock);
1313 switch (type) {
6733b39a
JK
1314 case HWH_TYPE_IO:
1315 case HWH_TYPE_IO_RD:
1316 if ((task->hdr->opcode & ISCSI_OPCODE_MASK) ==
dafab8e0 1317 ISCSI_OP_NOOP_OUT)
6733b39a 1318 be_complete_nopin_resp(beiscsi_conn, task, psol);
dafab8e0 1319 else
6733b39a
JK
1320 be_complete_io(beiscsi_conn, task, psol);
1321 break;
1322
1323 case HWH_TYPE_LOGOUT:
dafab8e0
JK
1324 if ((task->hdr->opcode & ISCSI_OPCODE_MASK) == ISCSI_OP_LOGOUT)
1325 be_complete_logout(beiscsi_conn, task, psol);
1326 else
1327 be_complete_tmf(beiscsi_conn, task, psol);
1328
6733b39a
JK
1329 break;
1330
1331 case HWH_TYPE_LOGIN:
1332 SE_DEBUG(DBG_LVL_1,
1333 "\t\t No HWH_TYPE_LOGIN Expected in hwi_complete_cmd"
457ff3b7 1334 "- Solicited path\n");
6733b39a
JK
1335 break;
1336
6733b39a
JK
1337 case HWH_TYPE_NOP:
1338 be_complete_nopin_resp(beiscsi_conn, task, psol);
1339 break;
1340
1341 default:
32951dd8 1342 shost_printk(KERN_WARNING, phba->shost,
35e66019
JK
1343 "In hwi_complete_cmd, unknown type = %d"
1344 "wrb_index 0x%x CID 0x%x\n", type,
1345 ((psol->dw[offsetof(struct amap_iscsi_wrb,
1346 type) / 32] & SOL_WRB_INDEX_MASK) >> 16),
1347 ((psol->dw[offsetof(struct amap_sol_cqe,
1348 cid) / 32] & SOL_CID_MASK) >> 6));
6733b39a
JK
1349 break;
1350 }
35e66019 1351
6733b39a
JK
1352 spin_unlock_bh(&session->lock);
1353}
1354
1355static struct list_head *hwi_get_async_busy_list(struct hwi_async_pdu_context
1356 *pasync_ctx, unsigned int is_header,
1357 unsigned int host_write_ptr)
1358{
1359 if (is_header)
1360 return &pasync_ctx->async_entry[host_write_ptr].
1361 header_busy_list;
1362 else
1363 return &pasync_ctx->async_entry[host_write_ptr].data_busy_list;
1364}
1365
1366static struct async_pdu_handle *
1367hwi_get_async_handle(struct beiscsi_hba *phba,
1368 struct beiscsi_conn *beiscsi_conn,
1369 struct hwi_async_pdu_context *pasync_ctx,
1370 struct i_t_dpdu_cqe *pdpdu_cqe, unsigned int *pcq_index)
1371{
1372 struct be_bus_address phys_addr;
1373 struct list_head *pbusy_list;
1374 struct async_pdu_handle *pasync_handle = NULL;
6733b39a
JK
1375 unsigned char is_header = 0;
1376
1377 phys_addr.u.a32.address_lo =
1378 pdpdu_cqe->dw[offsetof(struct amap_i_t_dpdu_cqe, db_addr_lo) / 32] -
1379 ((pdpdu_cqe->dw[offsetof(struct amap_i_t_dpdu_cqe, dpl) / 32]
1380 & PDUCQE_DPL_MASK) >> 16);
1381 phys_addr.u.a32.address_hi =
1382 pdpdu_cqe->dw[offsetof(struct amap_i_t_dpdu_cqe, db_addr_hi) / 32];
1383
1384 phys_addr.u.a64.address =
1385 *((unsigned long long *)(&phys_addr.u.a64.address));
1386
1387 switch (pdpdu_cqe->dw[offsetof(struct amap_i_t_dpdu_cqe, code) / 32]
1388 & PDUCQE_CODE_MASK) {
1389 case UNSOL_HDR_NOTIFY:
1390 is_header = 1;
1391
1392 pbusy_list = hwi_get_async_busy_list(pasync_ctx, 1,
1393 (pdpdu_cqe->dw[offsetof(struct amap_i_t_dpdu_cqe,
1394 index) / 32] & PDUCQE_INDEX_MASK));
6733b39a
JK
1395 break;
1396 case UNSOL_DATA_NOTIFY:
1397 pbusy_list = hwi_get_async_busy_list(pasync_ctx, 0, (pdpdu_cqe->
1398 dw[offsetof(struct amap_i_t_dpdu_cqe,
1399 index) / 32] & PDUCQE_INDEX_MASK));
6733b39a
JK
1400 break;
1401 default:
1402 pbusy_list = NULL;
1403 shost_printk(KERN_WARNING, phba->shost,
457ff3b7 1404 "Unexpected code=%d\n",
6733b39a
JK
1405 pdpdu_cqe->dw[offsetof(struct amap_i_t_dpdu_cqe,
1406 code) / 32] & PDUCQE_CODE_MASK);
1407 return NULL;
1408 }
1409
6733b39a
JK
1410 WARN_ON(list_empty(pbusy_list));
1411 list_for_each_entry(pasync_handle, pbusy_list, link) {
dc63aac6 1412 if (pasync_handle->pa.u.a64.address == phys_addr.u.a64.address)
6733b39a
JK
1413 break;
1414 }
1415
1416 WARN_ON(!pasync_handle);
1417
7da50879
JK
1418 pasync_handle->cri = (unsigned short)beiscsi_conn->beiscsi_conn_cid -
1419 phba->fw_config.iscsi_cid_start;
6733b39a
JK
1420 pasync_handle->is_header = is_header;
1421 pasync_handle->buffer_len = ((pdpdu_cqe->
1422 dw[offsetof(struct amap_i_t_dpdu_cqe, dpl) / 32]
1423 & PDUCQE_DPL_MASK) >> 16);
1424
1425 *pcq_index = (pdpdu_cqe->dw[offsetof(struct amap_i_t_dpdu_cqe,
1426 index) / 32] & PDUCQE_INDEX_MASK);
1427 return pasync_handle;
1428}
1429
1430static unsigned int
1431hwi_update_async_writables(struct hwi_async_pdu_context *pasync_ctx,
1432 unsigned int is_header, unsigned int cq_index)
1433{
1434 struct list_head *pbusy_list;
1435 struct async_pdu_handle *pasync_handle;
1436 unsigned int num_entries, writables = 0;
1437 unsigned int *pep_read_ptr, *pwritables;
1438
dc63aac6 1439 num_entries = pasync_ctx->num_entries;
6733b39a
JK
1440 if (is_header) {
1441 pep_read_ptr = &pasync_ctx->async_header.ep_read_ptr;
1442 pwritables = &pasync_ctx->async_header.writables;
6733b39a
JK
1443 } else {
1444 pep_read_ptr = &pasync_ctx->async_data.ep_read_ptr;
1445 pwritables = &pasync_ctx->async_data.writables;
6733b39a
JK
1446 }
1447
1448 while ((*pep_read_ptr) != cq_index) {
1449 (*pep_read_ptr)++;
1450 *pep_read_ptr = (*pep_read_ptr) % num_entries;
1451
1452 pbusy_list = hwi_get_async_busy_list(pasync_ctx, is_header,
1453 *pep_read_ptr);
1454 if (writables == 0)
1455 WARN_ON(list_empty(pbusy_list));
1456
1457 if (!list_empty(pbusy_list)) {
1458 pasync_handle = list_entry(pbusy_list->next,
1459 struct async_pdu_handle,
1460 link);
1461 WARN_ON(!pasync_handle);
1462 pasync_handle->consumed = 1;
1463 }
1464
1465 writables++;
1466 }
1467
1468 if (!writables) {
1469 SE_DEBUG(DBG_LVL_1,
1470 "Duplicate notification received - index 0x%x!!\n",
1471 cq_index);
1472 WARN_ON(1);
1473 }
1474
1475 *pwritables = *pwritables + writables;
1476 return 0;
1477}
1478
9728d8d0 1479static void hwi_free_async_msg(struct beiscsi_hba *phba,
6733b39a
JK
1480 unsigned int cri)
1481{
1482 struct hwi_controller *phwi_ctrlr;
1483 struct hwi_async_pdu_context *pasync_ctx;
1484 struct async_pdu_handle *pasync_handle, *tmp_handle;
1485 struct list_head *plist;
6733b39a
JK
1486
1487 phwi_ctrlr = phba->phwi_ctrlr;
1488 pasync_ctx = HWI_GET_ASYNC_PDU_CTX(phwi_ctrlr);
1489
1490 plist = &pasync_ctx->async_entry[cri].wait_queue.list;
1491
1492 list_for_each_entry_safe(pasync_handle, tmp_handle, plist, link) {
1493 list_del(&pasync_handle->link);
1494
9728d8d0 1495 if (pasync_handle->is_header) {
6733b39a
JK
1496 list_add_tail(&pasync_handle->link,
1497 &pasync_ctx->async_header.free_list);
1498 pasync_ctx->async_header.free_entries++;
6733b39a
JK
1499 } else {
1500 list_add_tail(&pasync_handle->link,
1501 &pasync_ctx->async_data.free_list);
1502 pasync_ctx->async_data.free_entries++;
6733b39a
JK
1503 }
1504 }
1505
1506 INIT_LIST_HEAD(&pasync_ctx->async_entry[cri].wait_queue.list);
1507 pasync_ctx->async_entry[cri].wait_queue.hdr_received = 0;
1508 pasync_ctx->async_entry[cri].wait_queue.bytes_received = 0;
6733b39a
JK
1509}
1510
1511static struct phys_addr *
1512hwi_get_ring_address(struct hwi_async_pdu_context *pasync_ctx,
1513 unsigned int is_header, unsigned int host_write_ptr)
1514{
1515 struct phys_addr *pasync_sge = NULL;
1516
1517 if (is_header)
1518 pasync_sge = pasync_ctx->async_header.ring_base;
1519 else
1520 pasync_sge = pasync_ctx->async_data.ring_base;
1521
1522 return pasync_sge + host_write_ptr;
1523}
1524
1525static void hwi_post_async_buffers(struct beiscsi_hba *phba,
1526 unsigned int is_header)
1527{
1528 struct hwi_controller *phwi_ctrlr;
1529 struct hwi_async_pdu_context *pasync_ctx;
1530 struct async_pdu_handle *pasync_handle;
1531 struct list_head *pfree_link, *pbusy_list;
1532 struct phys_addr *pasync_sge;
1533 unsigned int ring_id, num_entries;
1534 unsigned int host_write_num;
1535 unsigned int writables;
1536 unsigned int i = 0;
1537 u32 doorbell = 0;
1538
1539 phwi_ctrlr = phba->phwi_ctrlr;
1540 pasync_ctx = HWI_GET_ASYNC_PDU_CTX(phwi_ctrlr);
dc63aac6 1541 num_entries = pasync_ctx->num_entries;
6733b39a
JK
1542
1543 if (is_header) {
6733b39a
JK
1544 writables = min(pasync_ctx->async_header.writables,
1545 pasync_ctx->async_header.free_entries);
1546 pfree_link = pasync_ctx->async_header.free_list.next;
1547 host_write_num = pasync_ctx->async_header.host_write_ptr;
1548 ring_id = phwi_ctrlr->default_pdu_hdr.id;
1549 } else {
6733b39a
JK
1550 writables = min(pasync_ctx->async_data.writables,
1551 pasync_ctx->async_data.free_entries);
1552 pfree_link = pasync_ctx->async_data.free_list.next;
1553 host_write_num = pasync_ctx->async_data.host_write_ptr;
1554 ring_id = phwi_ctrlr->default_pdu_data.id;
1555 }
1556
1557 writables = (writables / 8) * 8;
1558 if (writables) {
1559 for (i = 0; i < writables; i++) {
1560 pbusy_list =
1561 hwi_get_async_busy_list(pasync_ctx, is_header,
1562 host_write_num);
1563 pasync_handle =
1564 list_entry(pfree_link, struct async_pdu_handle,
1565 link);
1566 WARN_ON(!pasync_handle);
1567 pasync_handle->consumed = 0;
1568
1569 pfree_link = pfree_link->next;
1570
1571 pasync_sge = hwi_get_ring_address(pasync_ctx,
1572 is_header, host_write_num);
1573
1574 pasync_sge->hi = pasync_handle->pa.u.a32.address_lo;
1575 pasync_sge->lo = pasync_handle->pa.u.a32.address_hi;
1576
1577 list_move(&pasync_handle->link, pbusy_list);
1578
1579 host_write_num++;
1580 host_write_num = host_write_num % num_entries;
1581 }
1582
1583 if (is_header) {
1584 pasync_ctx->async_header.host_write_ptr =
1585 host_write_num;
1586 pasync_ctx->async_header.free_entries -= writables;
1587 pasync_ctx->async_header.writables -= writables;
1588 pasync_ctx->async_header.busy_entries += writables;
1589 } else {
1590 pasync_ctx->async_data.host_write_ptr = host_write_num;
1591 pasync_ctx->async_data.free_entries -= writables;
1592 pasync_ctx->async_data.writables -= writables;
1593 pasync_ctx->async_data.busy_entries += writables;
1594 }
1595
1596 doorbell |= ring_id & DB_DEF_PDU_RING_ID_MASK;
1597 doorbell |= 1 << DB_DEF_PDU_REARM_SHIFT;
1598 doorbell |= 0 << DB_DEF_PDU_EVENT_SHIFT;
1599 doorbell |= (writables & DB_DEF_PDU_CQPROC_MASK)
1600 << DB_DEF_PDU_CQPROC_SHIFT;
1601
1602 iowrite32(doorbell, phba->db_va + DB_RXULP0_OFFSET);
1603 }
1604}
1605
1606static void hwi_flush_default_pdu_buffer(struct beiscsi_hba *phba,
1607 struct beiscsi_conn *beiscsi_conn,
1608 struct i_t_dpdu_cqe *pdpdu_cqe)
1609{
1610 struct hwi_controller *phwi_ctrlr;
1611 struct hwi_async_pdu_context *pasync_ctx;
1612 struct async_pdu_handle *pasync_handle = NULL;
1613 unsigned int cq_index = -1;
1614
1615 phwi_ctrlr = phba->phwi_ctrlr;
1616 pasync_ctx = HWI_GET_ASYNC_PDU_CTX(phwi_ctrlr);
1617
1618 pasync_handle = hwi_get_async_handle(phba, beiscsi_conn, pasync_ctx,
1619 pdpdu_cqe, &cq_index);
1620 BUG_ON(pasync_handle->is_header != 0);
1621 if (pasync_handle->consumed == 0)
1622 hwi_update_async_writables(pasync_ctx, pasync_handle->is_header,
1623 cq_index);
1624
1625 hwi_free_async_msg(phba, pasync_handle->cri);
1626 hwi_post_async_buffers(phba, pasync_handle->is_header);
1627}
1628
1629static unsigned int
1630hwi_fwd_async_msg(struct beiscsi_conn *beiscsi_conn,
1631 struct beiscsi_hba *phba,
1632 struct hwi_async_pdu_context *pasync_ctx, unsigned short cri)
1633{
1634 struct list_head *plist;
1635 struct async_pdu_handle *pasync_handle;
1636 void *phdr = NULL;
1637 unsigned int hdr_len = 0, buf_len = 0;
1638 unsigned int status, index = 0, offset = 0;
1639 void *pfirst_buffer = NULL;
1640 unsigned int num_buf = 0;
1641
1642 plist = &pasync_ctx->async_entry[cri].wait_queue.list;
1643
1644 list_for_each_entry(pasync_handle, plist, link) {
1645 if (index == 0) {
1646 phdr = pasync_handle->pbuffer;
1647 hdr_len = pasync_handle->buffer_len;
1648 } else {
1649 buf_len = pasync_handle->buffer_len;
1650 if (!num_buf) {
1651 pfirst_buffer = pasync_handle->pbuffer;
1652 num_buf++;
1653 }
1654 memcpy(pfirst_buffer + offset,
1655 pasync_handle->pbuffer, buf_len);
f2ba02b8 1656 offset += buf_len;
6733b39a
JK
1657 }
1658 index++;
1659 }
1660
1661 status = beiscsi_process_async_pdu(beiscsi_conn, phba,
7da50879
JK
1662 (beiscsi_conn->beiscsi_conn_cid -
1663 phba->fw_config.iscsi_cid_start),
1664 phdr, hdr_len, pfirst_buffer,
f2ba02b8 1665 offset);
6733b39a 1666
605c6cd2 1667 hwi_free_async_msg(phba, cri);
6733b39a
JK
1668 return 0;
1669}
1670
1671static unsigned int
1672hwi_gather_async_pdu(struct beiscsi_conn *beiscsi_conn,
1673 struct beiscsi_hba *phba,
1674 struct async_pdu_handle *pasync_handle)
1675{
1676 struct hwi_async_pdu_context *pasync_ctx;
1677 struct hwi_controller *phwi_ctrlr;
1678 unsigned int bytes_needed = 0, status = 0;
1679 unsigned short cri = pasync_handle->cri;
1680 struct pdu_base *ppdu;
1681
1682 phwi_ctrlr = phba->phwi_ctrlr;
1683 pasync_ctx = HWI_GET_ASYNC_PDU_CTX(phwi_ctrlr);
1684
1685 list_del(&pasync_handle->link);
1686 if (pasync_handle->is_header) {
1687 pasync_ctx->async_header.busy_entries--;
1688 if (pasync_ctx->async_entry[cri].wait_queue.hdr_received) {
1689 hwi_free_async_msg(phba, cri);
1690 BUG();
1691 }
1692
1693 pasync_ctx->async_entry[cri].wait_queue.bytes_received = 0;
1694 pasync_ctx->async_entry[cri].wait_queue.hdr_received = 1;
1695 pasync_ctx->async_entry[cri].wait_queue.hdr_len =
1696 (unsigned short)pasync_handle->buffer_len;
1697 list_add_tail(&pasync_handle->link,
1698 &pasync_ctx->async_entry[cri].wait_queue.list);
1699
1700 ppdu = pasync_handle->pbuffer;
1701 bytes_needed = ((((ppdu->dw[offsetof(struct amap_pdu_base,
1702 data_len_hi) / 32] & PDUBASE_DATALENHI_MASK) << 8) &
1703 0xFFFF0000) | ((be16_to_cpu((ppdu->
1704 dw[offsetof(struct amap_pdu_base, data_len_lo) / 32]
1705 & PDUBASE_DATALENLO_MASK) >> 16)) & 0x0000FFFF));
1706
1707 if (status == 0) {
1708 pasync_ctx->async_entry[cri].wait_queue.bytes_needed =
1709 bytes_needed;
1710
1711 if (bytes_needed == 0)
1712 status = hwi_fwd_async_msg(beiscsi_conn, phba,
1713 pasync_ctx, cri);
1714 }
1715 } else {
1716 pasync_ctx->async_data.busy_entries--;
1717 if (pasync_ctx->async_entry[cri].wait_queue.hdr_received) {
1718 list_add_tail(&pasync_handle->link,
1719 &pasync_ctx->async_entry[cri].wait_queue.
1720 list);
1721 pasync_ctx->async_entry[cri].wait_queue.
1722 bytes_received +=
1723 (unsigned short)pasync_handle->buffer_len;
1724
1725 if (pasync_ctx->async_entry[cri].wait_queue.
1726 bytes_received >=
1727 pasync_ctx->async_entry[cri].wait_queue.
1728 bytes_needed)
1729 status = hwi_fwd_async_msg(beiscsi_conn, phba,
1730 pasync_ctx, cri);
1731 }
1732 }
1733 return status;
1734}
1735
1736static void hwi_process_default_pdu_ring(struct beiscsi_conn *beiscsi_conn,
1737 struct beiscsi_hba *phba,
1738 struct i_t_dpdu_cqe *pdpdu_cqe)
1739{
1740 struct hwi_controller *phwi_ctrlr;
1741 struct hwi_async_pdu_context *pasync_ctx;
1742 struct async_pdu_handle *pasync_handle = NULL;
1743 unsigned int cq_index = -1;
1744
1745 phwi_ctrlr = phba->phwi_ctrlr;
1746 pasync_ctx = HWI_GET_ASYNC_PDU_CTX(phwi_ctrlr);
1747 pasync_handle = hwi_get_async_handle(phba, beiscsi_conn, pasync_ctx,
1748 pdpdu_cqe, &cq_index);
1749
1750 if (pasync_handle->consumed == 0)
1751 hwi_update_async_writables(pasync_ctx, pasync_handle->is_header,
1752 cq_index);
1753 hwi_gather_async_pdu(beiscsi_conn, phba, pasync_handle);
1754 hwi_post_async_buffers(phba, pasync_handle->is_header);
1755}
1756
756d29c8
JK
1757static void beiscsi_process_mcc_isr(struct beiscsi_hba *phba)
1758{
1759 struct be_queue_info *mcc_cq;
1760 struct be_mcc_compl *mcc_compl;
1761 unsigned int num_processed = 0;
1762
1763 mcc_cq = &phba->ctrl.mcc_obj.cq;
1764 mcc_compl = queue_tail_node(mcc_cq);
1765 mcc_compl->flags = le32_to_cpu(mcc_compl->flags);
1766 while (mcc_compl->flags & CQE_FLAGS_VALID_MASK) {
1767
1768 if (num_processed >= 32) {
1769 hwi_ring_cq_db(phba, mcc_cq->id,
1770 num_processed, 0, 0);
1771 num_processed = 0;
1772 }
1773 if (mcc_compl->flags & CQE_FLAGS_ASYNC_MASK) {
1774 /* Interpret flags as an async trailer */
1775 if (is_link_state_evt(mcc_compl->flags))
1776 /* Interpret compl as a async link evt */
1777 beiscsi_async_link_state_process(phba,
1778 (struct be_async_event_link_state *) mcc_compl);
1779 else
1780 SE_DEBUG(DBG_LVL_1,
1781 " Unsupported Async Event, flags"
457ff3b7 1782 " = 0x%08x\n", mcc_compl->flags);
756d29c8
JK
1783 } else if (mcc_compl->flags & CQE_FLAGS_COMPLETED_MASK) {
1784 be_mcc_compl_process_isr(&phba->ctrl, mcc_compl);
1785 atomic_dec(&phba->ctrl.mcc_obj.q.used);
1786 }
1787
1788 mcc_compl->flags = 0;
1789 queue_tail_inc(mcc_cq);
1790 mcc_compl = queue_tail_node(mcc_cq);
1791 mcc_compl->flags = le32_to_cpu(mcc_compl->flags);
1792 num_processed++;
1793 }
1794
1795 if (num_processed > 0)
1796 hwi_ring_cq_db(phba, mcc_cq->id, num_processed, 1, 0);
1797
1798}
bfead3b2
JK
1799
1800static unsigned int beiscsi_process_cq(struct be_eq_obj *pbe_eq)
6733b39a 1801{
6733b39a
JK
1802 struct be_queue_info *cq;
1803 struct sol_cqe *sol;
1804 struct dmsg_cqe *dmsg;
1805 unsigned int num_processed = 0;
1806 unsigned int tot_nump = 0;
1807 struct beiscsi_conn *beiscsi_conn;
c2462288
JK
1808 struct beiscsi_endpoint *beiscsi_ep;
1809 struct iscsi_endpoint *ep;
bfead3b2 1810 struct beiscsi_hba *phba;
6733b39a 1811
bfead3b2 1812 cq = pbe_eq->cq;
6733b39a 1813 sol = queue_tail_node(cq);
bfead3b2 1814 phba = pbe_eq->phba;
6733b39a
JK
1815
1816 while (sol->dw[offsetof(struct amap_sol_cqe, valid) / 32] &
1817 CQE_VALID_MASK) {
1818 be_dws_le_to_cpu(sol, sizeof(struct sol_cqe));
1819
32951dd8 1820 ep = phba->ep_array[(u32) ((sol->
c2462288
JK
1821 dw[offsetof(struct amap_sol_cqe, cid) / 32] &
1822 SOL_CID_MASK) >> 6) -
7da50879 1823 phba->fw_config.iscsi_cid_start];
32951dd8 1824
c2462288
JK
1825 beiscsi_ep = ep->dd_data;
1826 beiscsi_conn = beiscsi_ep->conn;
756d29c8 1827
6733b39a 1828 if (num_processed >= 32) {
bfead3b2 1829 hwi_ring_cq_db(phba, cq->id,
6733b39a
JK
1830 num_processed, 0, 0);
1831 tot_nump += num_processed;
1832 num_processed = 0;
1833 }
1834
1835 switch ((u32) sol->dw[offsetof(struct amap_sol_cqe, code) /
1836 32] & CQE_CODE_MASK) {
1837 case SOL_CMD_COMPLETE:
1838 hwi_complete_cmd(beiscsi_conn, phba, sol);
1839 break;
1840 case DRIVERMSG_NOTIFY:
457ff3b7 1841 SE_DEBUG(DBG_LVL_8, "Received DRIVERMSG_NOTIFY\n");
6733b39a
JK
1842 dmsg = (struct dmsg_cqe *)sol;
1843 hwi_complete_drvr_msgs(beiscsi_conn, phba, sol);
1844 break;
1845 case UNSOL_HDR_NOTIFY:
bfead3b2
JK
1846 SE_DEBUG(DBG_LVL_8, "Received UNSOL_HDR_ NOTIFY\n");
1847 hwi_process_default_pdu_ring(beiscsi_conn, phba,
1848 (struct i_t_dpdu_cqe *)sol);
1849 break;
6733b39a 1850 case UNSOL_DATA_NOTIFY:
bfead3b2 1851 SE_DEBUG(DBG_LVL_8, "Received UNSOL_DATA_NOTIFY\n");
6733b39a
JK
1852 hwi_process_default_pdu_ring(beiscsi_conn, phba,
1853 (struct i_t_dpdu_cqe *)sol);
1854 break;
1855 case CXN_INVALIDATE_INDEX_NOTIFY:
1856 case CMD_INVALIDATED_NOTIFY:
1857 case CXN_INVALIDATE_NOTIFY:
1858 SE_DEBUG(DBG_LVL_1,
1859 "Ignoring CQ Error notification for cmd/cxn"
1860 "invalidate\n");
1861 break;
1862 case SOL_CMD_KILLED_DATA_DIGEST_ERR:
1863 case CMD_KILLED_INVALID_STATSN_RCVD:
1864 case CMD_KILLED_INVALID_R2T_RCVD:
1865 case CMD_CXN_KILLED_LUN_INVALID:
1866 case CMD_CXN_KILLED_ICD_INVALID:
1867 case CMD_CXN_KILLED_ITT_INVALID:
1868 case CMD_CXN_KILLED_SEQ_OUTOFORDER:
1869 case CMD_CXN_KILLED_INVALID_DATASN_RCVD:
32951dd8 1870 SE_DEBUG(DBG_LVL_1,
6733b39a
JK
1871 "CQ Error notification for cmd.. "
1872 "code %d cid 0x%x\n",
1873 sol->dw[offsetof(struct amap_sol_cqe, code) /
1874 32] & CQE_CODE_MASK,
1875 (sol->dw[offsetof(struct amap_sol_cqe, cid) /
1876 32] & SOL_CID_MASK));
1877 break;
1878 case UNSOL_DATA_DIGEST_ERROR_NOTIFY:
1879 SE_DEBUG(DBG_LVL_1,
1880 "Digest error on def pdu ring, dropping..\n");
1881 hwi_flush_default_pdu_buffer(phba, beiscsi_conn,
1882 (struct i_t_dpdu_cqe *) sol);
1883 break;
1884 case CXN_KILLED_PDU_SIZE_EXCEEDS_DSL:
1885 case CXN_KILLED_BURST_LEN_MISMATCH:
1886 case CXN_KILLED_AHS_RCVD:
1887 case CXN_KILLED_HDR_DIGEST_ERR:
1888 case CXN_KILLED_UNKNOWN_HDR:
1889 case CXN_KILLED_STALE_ITT_TTT_RCVD:
1890 case CXN_KILLED_INVALID_ITT_TTT_RCVD:
1891 case CXN_KILLED_TIMED_OUT:
1892 case CXN_KILLED_FIN_RCVD:
1893 case CXN_KILLED_BAD_UNSOL_PDU_RCVD:
1894 case CXN_KILLED_BAD_WRB_INDEX_ERROR:
1895 case CXN_KILLED_OVER_RUN_RESIDUAL:
1896 case CXN_KILLED_UNDER_RUN_RESIDUAL:
1897 case CXN_KILLED_CMND_DATA_NOT_ON_SAME_CONN:
32951dd8 1898 SE_DEBUG(DBG_LVL_1, "CQ Error %d, reset CID "
6733b39a
JK
1899 "0x%x...\n",
1900 sol->dw[offsetof(struct amap_sol_cqe, code) /
1901 32] & CQE_CODE_MASK,
7da50879
JK
1902 (sol->dw[offsetof(struct amap_sol_cqe, cid) /
1903 32] & CQE_CID_MASK));
6733b39a
JK
1904 iscsi_conn_failure(beiscsi_conn->conn,
1905 ISCSI_ERR_CONN_FAILED);
1906 break;
1907 case CXN_KILLED_RST_SENT:
1908 case CXN_KILLED_RST_RCVD:
32951dd8 1909 SE_DEBUG(DBG_LVL_1, "CQ Error %d, reset"
bfead3b2 1910 "received/sent on CID 0x%x...\n",
6733b39a
JK
1911 sol->dw[offsetof(struct amap_sol_cqe, code) /
1912 32] & CQE_CODE_MASK,
7da50879
JK
1913 (sol->dw[offsetof(struct amap_sol_cqe, cid) /
1914 32] & CQE_CID_MASK));
6733b39a
JK
1915 iscsi_conn_failure(beiscsi_conn->conn,
1916 ISCSI_ERR_CONN_FAILED);
1917 break;
1918 default:
1919 SE_DEBUG(DBG_LVL_1, "CQ Error Invalid code= %d "
1920 "received on CID 0x%x...\n",
1921 sol->dw[offsetof(struct amap_sol_cqe, code) /
1922 32] & CQE_CODE_MASK,
7da50879
JK
1923 (sol->dw[offsetof(struct amap_sol_cqe, cid) /
1924 32] & CQE_CID_MASK));
6733b39a
JK
1925 break;
1926 }
1927
1928 AMAP_SET_BITS(struct amap_sol_cqe, valid, sol, 0);
1929 queue_tail_inc(cq);
1930 sol = queue_tail_node(cq);
1931 num_processed++;
1932 }
1933
1934 if (num_processed > 0) {
1935 tot_nump += num_processed;
bfead3b2 1936 hwi_ring_cq_db(phba, cq->id, num_processed, 1, 0);
6733b39a
JK
1937 }
1938 return tot_nump;
1939}
1940
756d29c8 1941void beiscsi_process_all_cqs(struct work_struct *work)
6733b39a
JK
1942{
1943 unsigned long flags;
bfead3b2
JK
1944 struct hwi_controller *phwi_ctrlr;
1945 struct hwi_context_memory *phwi_context;
1946 struct be_eq_obj *pbe_eq;
6733b39a
JK
1947 struct beiscsi_hba *phba =
1948 container_of(work, struct beiscsi_hba, work_cqs);
1949
bfead3b2
JK
1950 phwi_ctrlr = phba->phwi_ctrlr;
1951 phwi_context = phwi_ctrlr->phwi_ctxt;
1952 if (phba->msix_enabled)
1953 pbe_eq = &phwi_context->be_eq[phba->num_cpus];
1954 else
1955 pbe_eq = &phwi_context->be_eq[0];
1956
6733b39a
JK
1957 if (phba->todo_mcc_cq) {
1958 spin_lock_irqsave(&phba->isr_lock, flags);
1959 phba->todo_mcc_cq = 0;
1960 spin_unlock_irqrestore(&phba->isr_lock, flags);
756d29c8 1961 beiscsi_process_mcc_isr(phba);
6733b39a
JK
1962 }
1963
1964 if (phba->todo_cq) {
1965 spin_lock_irqsave(&phba->isr_lock, flags);
1966 phba->todo_cq = 0;
1967 spin_unlock_irqrestore(&phba->isr_lock, flags);
bfead3b2 1968 beiscsi_process_cq(pbe_eq);
6733b39a
JK
1969 }
1970}
1971
1972static int be_iopoll(struct blk_iopoll *iop, int budget)
1973{
1974 static unsigned int ret;
1975 struct beiscsi_hba *phba;
bfead3b2 1976 struct be_eq_obj *pbe_eq;
6733b39a 1977
bfead3b2
JK
1978 pbe_eq = container_of(iop, struct be_eq_obj, iopoll);
1979 ret = beiscsi_process_cq(pbe_eq);
6733b39a 1980 if (ret < budget) {
bfead3b2 1981 phba = pbe_eq->phba;
6733b39a 1982 blk_iopoll_complete(iop);
bfead3b2
JK
1983 SE_DEBUG(DBG_LVL_8, "rearm pbe_eq->q.id =%d\n", pbe_eq->q.id);
1984 hwi_ring_eq_db(phba, pbe_eq->q.id, 0, 0, 1, 1);
6733b39a
JK
1985 }
1986 return ret;
1987}
1988
1989static void
1990hwi_write_sgl(struct iscsi_wrb *pwrb, struct scatterlist *sg,
1991 unsigned int num_sg, struct beiscsi_io_task *io_task)
1992{
1993 struct iscsi_sge *psgl;
58ff4bd0 1994 unsigned int sg_len, index;
6733b39a
JK
1995 unsigned int sge_len = 0;
1996 unsigned long long addr;
1997 struct scatterlist *l_sg;
1998 unsigned int offset;
1999
2000 AMAP_SET_BITS(struct amap_iscsi_wrb, iscsi_bhs_addr_lo, pwrb,
2001 io_task->bhs_pa.u.a32.address_lo);
2002 AMAP_SET_BITS(struct amap_iscsi_wrb, iscsi_bhs_addr_hi, pwrb,
2003 io_task->bhs_pa.u.a32.address_hi);
2004
2005 l_sg = sg;
48bd86cf
JK
2006 for (index = 0; (index < num_sg) && (index < 2); index++,
2007 sg = sg_next(sg)) {
6733b39a
JK
2008 if (index == 0) {
2009 sg_len = sg_dma_len(sg);
2010 addr = (u64) sg_dma_address(sg);
2011 AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_addr_lo, pwrb,
457ff3b7 2012 ((u32)(addr & 0xFFFFFFFF)));
6733b39a 2013 AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_addr_hi, pwrb,
457ff3b7 2014 ((u32)(addr >> 32)));
6733b39a
JK
2015 AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_len, pwrb,
2016 sg_len);
2017 sge_len = sg_len;
6733b39a 2018 } else {
6733b39a
JK
2019 AMAP_SET_BITS(struct amap_iscsi_wrb, sge1_r2t_offset,
2020 pwrb, sge_len);
2021 sg_len = sg_dma_len(sg);
2022 addr = (u64) sg_dma_address(sg);
2023 AMAP_SET_BITS(struct amap_iscsi_wrb, sge1_addr_lo, pwrb,
457ff3b7 2024 ((u32)(addr & 0xFFFFFFFF)));
6733b39a 2025 AMAP_SET_BITS(struct amap_iscsi_wrb, sge1_addr_hi, pwrb,
457ff3b7 2026 ((u32)(addr >> 32)));
6733b39a
JK
2027 AMAP_SET_BITS(struct amap_iscsi_wrb, sge1_len, pwrb,
2028 sg_len);
2029 }
2030 }
2031 psgl = (struct iscsi_sge *)io_task->psgl_handle->pfrag;
2032 memset(psgl, 0, sizeof(*psgl) * BE2_SGE);
2033
2034 AMAP_SET_BITS(struct amap_iscsi_sge, len, psgl, io_task->bhs_len - 2);
2035
2036 AMAP_SET_BITS(struct amap_iscsi_sge, addr_hi, psgl,
2037 io_task->bhs_pa.u.a32.address_hi);
2038 AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, psgl,
2039 io_task->bhs_pa.u.a32.address_lo);
2040
caf818f1
JK
2041 if (num_sg == 1) {
2042 AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_last, pwrb,
2043 1);
2044 AMAP_SET_BITS(struct amap_iscsi_wrb, sge1_last, pwrb,
2045 0);
2046 } else if (num_sg == 2) {
2047 AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_last, pwrb,
2048 0);
2049 AMAP_SET_BITS(struct amap_iscsi_wrb, sge1_last, pwrb,
2050 1);
2051 } else {
2052 AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_last, pwrb,
2053 0);
2054 AMAP_SET_BITS(struct amap_iscsi_wrb, sge1_last, pwrb,
2055 0);
2056 }
6733b39a
JK
2057 sg = l_sg;
2058 psgl++;
2059 psgl++;
2060 offset = 0;
48bd86cf 2061 for (index = 0; index < num_sg; index++, sg = sg_next(sg), psgl++) {
6733b39a
JK
2062 sg_len = sg_dma_len(sg);
2063 addr = (u64) sg_dma_address(sg);
2064 AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, psgl,
2065 (addr & 0xFFFFFFFF));
2066 AMAP_SET_BITS(struct amap_iscsi_sge, addr_hi, psgl,
2067 (addr >> 32));
2068 AMAP_SET_BITS(struct amap_iscsi_sge, len, psgl, sg_len);
2069 AMAP_SET_BITS(struct amap_iscsi_sge, sge_offset, psgl, offset);
2070 AMAP_SET_BITS(struct amap_iscsi_sge, last_sge, psgl, 0);
2071 offset += sg_len;
2072 }
2073 psgl--;
2074 AMAP_SET_BITS(struct amap_iscsi_sge, last_sge, psgl, 1);
2075}
2076
2077static void hwi_write_buffer(struct iscsi_wrb *pwrb, struct iscsi_task *task)
2078{
2079 struct iscsi_sge *psgl;
2080 unsigned long long addr;
2081 struct beiscsi_io_task *io_task = task->dd_data;
2082 struct beiscsi_conn *beiscsi_conn = io_task->conn;
2083 struct beiscsi_hba *phba = beiscsi_conn->phba;
2084
2085 io_task->bhs_len = sizeof(struct be_nonio_bhs) - 2;
2086 AMAP_SET_BITS(struct amap_iscsi_wrb, iscsi_bhs_addr_lo, pwrb,
2087 io_task->bhs_pa.u.a32.address_lo);
2088 AMAP_SET_BITS(struct amap_iscsi_wrb, iscsi_bhs_addr_hi, pwrb,
2089 io_task->bhs_pa.u.a32.address_hi);
2090
2091 if (task->data) {
2092 if (task->data_count) {
2093 AMAP_SET_BITS(struct amap_iscsi_wrb, dsp, pwrb, 1);
2094 addr = (u64) pci_map_single(phba->pcidev,
2095 task->data,
2096 task->data_count, 1);
2097 } else {
2098 AMAP_SET_BITS(struct amap_iscsi_wrb, dsp, pwrb, 0);
2099 addr = 0;
2100 }
2101 AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_addr_lo, pwrb,
457ff3b7 2102 ((u32)(addr & 0xFFFFFFFF)));
6733b39a 2103 AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_addr_hi, pwrb,
457ff3b7 2104 ((u32)(addr >> 32)));
6733b39a
JK
2105 AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_len, pwrb,
2106 task->data_count);
2107
2108 AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_last, pwrb, 1);
2109 } else {
2110 AMAP_SET_BITS(struct amap_iscsi_wrb, dsp, pwrb, 0);
2111 addr = 0;
2112 }
2113
2114 psgl = (struct iscsi_sge *)io_task->psgl_handle->pfrag;
2115
2116 AMAP_SET_BITS(struct amap_iscsi_sge, len, psgl, io_task->bhs_len);
2117
2118 AMAP_SET_BITS(struct amap_iscsi_sge, addr_hi, psgl,
2119 io_task->bhs_pa.u.a32.address_hi);
2120 AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, psgl,
2121 io_task->bhs_pa.u.a32.address_lo);
2122 if (task->data) {
2123 psgl++;
2124 AMAP_SET_BITS(struct amap_iscsi_sge, addr_hi, psgl, 0);
2125 AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, psgl, 0);
2126 AMAP_SET_BITS(struct amap_iscsi_sge, len, psgl, 0);
2127 AMAP_SET_BITS(struct amap_iscsi_sge, sge_offset, psgl, 0);
2128 AMAP_SET_BITS(struct amap_iscsi_sge, rsvd0, psgl, 0);
2129 AMAP_SET_BITS(struct amap_iscsi_sge, last_sge, psgl, 0);
2130
2131 psgl++;
2132 if (task->data) {
2133 AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, psgl,
457ff3b7 2134 ((u32)(addr & 0xFFFFFFFF)));
6733b39a 2135 AMAP_SET_BITS(struct amap_iscsi_sge, addr_hi, psgl,
457ff3b7 2136 ((u32)(addr >> 32)));
6733b39a
JK
2137 }
2138 AMAP_SET_BITS(struct amap_iscsi_sge, len, psgl, 0x106);
2139 }
2140 AMAP_SET_BITS(struct amap_iscsi_sge, last_sge, psgl, 1);
2141}
2142
2143static void beiscsi_find_mem_req(struct beiscsi_hba *phba)
2144{
bfead3b2 2145 unsigned int num_cq_pages, num_async_pdu_buf_pages;
6733b39a
JK
2146 unsigned int num_async_pdu_data_pages, wrb_sz_per_cxn;
2147 unsigned int num_async_pdu_buf_sgl_pages, num_async_pdu_data_sgl_pages;
2148
2149 num_cq_pages = PAGES_REQUIRED(phba->params.num_cq_entries * \
2150 sizeof(struct sol_cqe));
6733b39a
JK
2151 num_async_pdu_buf_pages =
2152 PAGES_REQUIRED(phba->params.asyncpdus_per_ctrl * \
2153 phba->params.defpdu_hdr_sz);
2154 num_async_pdu_buf_sgl_pages =
2155 PAGES_REQUIRED(phba->params.asyncpdus_per_ctrl * \
2156 sizeof(struct phys_addr));
2157 num_async_pdu_data_pages =
2158 PAGES_REQUIRED(phba->params.asyncpdus_per_ctrl * \
2159 phba->params.defpdu_data_sz);
2160 num_async_pdu_data_sgl_pages =
2161 PAGES_REQUIRED(phba->params.asyncpdus_per_ctrl * \
2162 sizeof(struct phys_addr));
2163
2164 phba->params.hwi_ws_sz = sizeof(struct hwi_controller);
2165
2166 phba->mem_req[ISCSI_MEM_GLOBAL_HEADER] = 2 *
2167 BE_ISCSI_PDU_HEADER_SIZE;
2168 phba->mem_req[HWI_MEM_ADDN_CONTEXT] =
2169 sizeof(struct hwi_context_memory);
2170
6733b39a
JK
2171
2172 phba->mem_req[HWI_MEM_WRB] = sizeof(struct iscsi_wrb)
2173 * (phba->params.wrbs_per_cxn)
2174 * phba->params.cxns_per_ctrl;
2175 wrb_sz_per_cxn = sizeof(struct wrb_handle) *
2176 (phba->params.wrbs_per_cxn);
2177 phba->mem_req[HWI_MEM_WRBH] = roundup_pow_of_two((wrb_sz_per_cxn) *
2178 phba->params.cxns_per_ctrl);
2179
2180 phba->mem_req[HWI_MEM_SGLH] = sizeof(struct sgl_handle) *
2181 phba->params.icds_per_ctrl;
2182 phba->mem_req[HWI_MEM_SGE] = sizeof(struct iscsi_sge) *
2183 phba->params.num_sge_per_io * phba->params.icds_per_ctrl;
2184
2185 phba->mem_req[HWI_MEM_ASYNC_HEADER_BUF] =
2186 num_async_pdu_buf_pages * PAGE_SIZE;
2187 phba->mem_req[HWI_MEM_ASYNC_DATA_BUF] =
2188 num_async_pdu_data_pages * PAGE_SIZE;
2189 phba->mem_req[HWI_MEM_ASYNC_HEADER_RING] =
2190 num_async_pdu_buf_sgl_pages * PAGE_SIZE;
2191 phba->mem_req[HWI_MEM_ASYNC_DATA_RING] =
2192 num_async_pdu_data_sgl_pages * PAGE_SIZE;
2193 phba->mem_req[HWI_MEM_ASYNC_HEADER_HANDLE] =
2194 phba->params.asyncpdus_per_ctrl *
2195 sizeof(struct async_pdu_handle);
2196 phba->mem_req[HWI_MEM_ASYNC_DATA_HANDLE] =
2197 phba->params.asyncpdus_per_ctrl *
2198 sizeof(struct async_pdu_handle);
2199 phba->mem_req[HWI_MEM_ASYNC_PDU_CONTEXT] =
2200 sizeof(struct hwi_async_pdu_context) +
2201 (phba->params.cxns_per_ctrl * sizeof(struct hwi_async_entry));
2202}
2203
2204static int beiscsi_alloc_mem(struct beiscsi_hba *phba)
2205{
2206 struct be_mem_descriptor *mem_descr;
2207 dma_addr_t bus_add;
2208 struct mem_array *mem_arr, *mem_arr_orig;
2209 unsigned int i, j, alloc_size, curr_alloc_size;
2210
3ec78271 2211 phba->phwi_ctrlr = kzalloc(phba->params.hwi_ws_sz, GFP_KERNEL);
6733b39a
JK
2212 if (!phba->phwi_ctrlr)
2213 return -ENOMEM;
2214
2215 phba->init_mem = kcalloc(SE_MEM_MAX, sizeof(*mem_descr),
2216 GFP_KERNEL);
2217 if (!phba->init_mem) {
2218 kfree(phba->phwi_ctrlr);
2219 return -ENOMEM;
2220 }
2221
2222 mem_arr_orig = kmalloc(sizeof(*mem_arr_orig) * BEISCSI_MAX_FRAGS_INIT,
2223 GFP_KERNEL);
2224 if (!mem_arr_orig) {
2225 kfree(phba->init_mem);
2226 kfree(phba->phwi_ctrlr);
2227 return -ENOMEM;
2228 }
2229
2230 mem_descr = phba->init_mem;
2231 for (i = 0; i < SE_MEM_MAX; i++) {
2232 j = 0;
2233 mem_arr = mem_arr_orig;
2234 alloc_size = phba->mem_req[i];
2235 memset(mem_arr, 0, sizeof(struct mem_array) *
2236 BEISCSI_MAX_FRAGS_INIT);
2237 curr_alloc_size = min(be_max_phys_size * 1024, alloc_size);
2238 do {
2239 mem_arr->virtual_address = pci_alloc_consistent(
2240 phba->pcidev,
2241 curr_alloc_size,
2242 &bus_add);
2243 if (!mem_arr->virtual_address) {
2244 if (curr_alloc_size <= BE_MIN_MEM_SIZE)
2245 goto free_mem;
2246 if (curr_alloc_size -
2247 rounddown_pow_of_two(curr_alloc_size))
2248 curr_alloc_size = rounddown_pow_of_two
2249 (curr_alloc_size);
2250 else
2251 curr_alloc_size = curr_alloc_size / 2;
2252 } else {
2253 mem_arr->bus_address.u.
2254 a64.address = (__u64) bus_add;
2255 mem_arr->size = curr_alloc_size;
2256 alloc_size -= curr_alloc_size;
2257 curr_alloc_size = min(be_max_phys_size *
2258 1024, alloc_size);
2259 j++;
2260 mem_arr++;
2261 }
2262 } while (alloc_size);
2263 mem_descr->num_elements = j;
2264 mem_descr->size_in_bytes = phba->mem_req[i];
2265 mem_descr->mem_array = kmalloc(sizeof(*mem_arr) * j,
2266 GFP_KERNEL);
2267 if (!mem_descr->mem_array)
2268 goto free_mem;
2269
2270 memcpy(mem_descr->mem_array, mem_arr_orig,
2271 sizeof(struct mem_array) * j);
2272 mem_descr++;
2273 }
2274 kfree(mem_arr_orig);
2275 return 0;
2276free_mem:
2277 mem_descr->num_elements = j;
2278 while ((i) || (j)) {
2279 for (j = mem_descr->num_elements; j > 0; j--) {
2280 pci_free_consistent(phba->pcidev,
2281 mem_descr->mem_array[j - 1].size,
2282 mem_descr->mem_array[j - 1].
2283 virtual_address,
457ff3b7
JK
2284 (unsigned long)mem_descr->
2285 mem_array[j - 1].
6733b39a
JK
2286 bus_address.u.a64.address);
2287 }
2288 if (i) {
2289 i--;
2290 kfree(mem_descr->mem_array);
2291 mem_descr--;
2292 }
2293 }
2294 kfree(mem_arr_orig);
2295 kfree(phba->init_mem);
2296 kfree(phba->phwi_ctrlr);
2297 return -ENOMEM;
2298}
2299
2300static int beiscsi_get_memory(struct beiscsi_hba *phba)
2301{
2302 beiscsi_find_mem_req(phba);
2303 return beiscsi_alloc_mem(phba);
2304}
2305
2306static void iscsi_init_global_templates(struct beiscsi_hba *phba)
2307{
2308 struct pdu_data_out *pdata_out;
2309 struct pdu_nop_out *pnop_out;
2310 struct be_mem_descriptor *mem_descr;
2311
2312 mem_descr = phba->init_mem;
2313 mem_descr += ISCSI_MEM_GLOBAL_HEADER;
2314 pdata_out =
2315 (struct pdu_data_out *)mem_descr->mem_array[0].virtual_address;
2316 memset(pdata_out, 0, BE_ISCSI_PDU_HEADER_SIZE);
2317
2318 AMAP_SET_BITS(struct amap_pdu_data_out, opcode, pdata_out,
2319 IIOC_SCSI_DATA);
2320
2321 pnop_out =
2322 (struct pdu_nop_out *)((unsigned char *)mem_descr->mem_array[0].
2323 virtual_address + BE_ISCSI_PDU_HEADER_SIZE);
2324
2325 memset(pnop_out, 0, BE_ISCSI_PDU_HEADER_SIZE);
2326 AMAP_SET_BITS(struct amap_pdu_nop_out, ttt, pnop_out, 0xFFFFFFFF);
2327 AMAP_SET_BITS(struct amap_pdu_nop_out, f_bit, pnop_out, 1);
2328 AMAP_SET_BITS(struct amap_pdu_nop_out, i_bit, pnop_out, 0);
2329}
2330
3ec78271 2331static int beiscsi_init_wrb_handle(struct beiscsi_hba *phba)
6733b39a
JK
2332{
2333 struct be_mem_descriptor *mem_descr_wrbh, *mem_descr_wrb;
3ec78271 2334 struct wrb_handle *pwrb_handle = NULL;
6733b39a
JK
2335 struct hwi_controller *phwi_ctrlr;
2336 struct hwi_wrb_context *pwrb_context;
3ec78271
JK
2337 struct iscsi_wrb *pwrb = NULL;
2338 unsigned int num_cxn_wrbh = 0;
2339 unsigned int num_cxn_wrb = 0, j, idx = 0, index;
6733b39a
JK
2340
2341 mem_descr_wrbh = phba->init_mem;
2342 mem_descr_wrbh += HWI_MEM_WRBH;
2343
2344 mem_descr_wrb = phba->init_mem;
2345 mem_descr_wrb += HWI_MEM_WRB;
6733b39a
JK
2346 phwi_ctrlr = phba->phwi_ctrlr;
2347
2348 for (index = 0; index < phba->params.cxns_per_ctrl * 2; index += 2) {
2349 pwrb_context = &phwi_ctrlr->wrb_context[index];
6733b39a
JK
2350 pwrb_context->pwrb_handle_base =
2351 kzalloc(sizeof(struct wrb_handle *) *
2352 phba->params.wrbs_per_cxn, GFP_KERNEL);
3ec78271
JK
2353 if (!pwrb_context->pwrb_handle_base) {
2354 shost_printk(KERN_ERR, phba->shost,
2355 "Mem Alloc Failed. Failing to load\n");
2356 goto init_wrb_hndl_failed;
2357 }
6733b39a
JK
2358 pwrb_context->pwrb_handle_basestd =
2359 kzalloc(sizeof(struct wrb_handle *) *
2360 phba->params.wrbs_per_cxn, GFP_KERNEL);
3ec78271
JK
2361 if (!pwrb_context->pwrb_handle_basestd) {
2362 shost_printk(KERN_ERR, phba->shost,
2363 "Mem Alloc Failed. Failing to load\n");
2364 goto init_wrb_hndl_failed;
2365 }
2366 if (!num_cxn_wrbh) {
2367 pwrb_handle =
2368 mem_descr_wrbh->mem_array[idx].virtual_address;
2369 num_cxn_wrbh = ((mem_descr_wrbh->mem_array[idx].size) /
2370 ((sizeof(struct wrb_handle)) *
2371 phba->params.wrbs_per_cxn));
2372 idx++;
2373 }
2374 pwrb_context->alloc_index = 0;
2375 pwrb_context->wrb_handles_available = 0;
2376 pwrb_context->free_index = 0;
2377
6733b39a 2378 if (num_cxn_wrbh) {
6733b39a
JK
2379 for (j = 0; j < phba->params.wrbs_per_cxn; j++) {
2380 pwrb_context->pwrb_handle_base[j] = pwrb_handle;
2381 pwrb_context->pwrb_handle_basestd[j] =
2382 pwrb_handle;
2383 pwrb_context->wrb_handles_available++;
bfead3b2 2384 pwrb_handle->wrb_index = j;
6733b39a
JK
2385 pwrb_handle++;
2386 }
6733b39a
JK
2387 num_cxn_wrbh--;
2388 }
2389 }
2390 idx = 0;
ed58ea2a 2391 for (index = 0; index < phba->params.cxns_per_ctrl * 2; index += 2) {
6733b39a 2392 pwrb_context = &phwi_ctrlr->wrb_context[index];
3ec78271 2393 if (!num_cxn_wrb) {
6733b39a 2394 pwrb = mem_descr_wrb->mem_array[idx].virtual_address;
7c56533c 2395 num_cxn_wrb = (mem_descr_wrb->mem_array[idx].size) /
3ec78271
JK
2396 ((sizeof(struct iscsi_wrb) *
2397 phba->params.wrbs_per_cxn));
2398 idx++;
2399 }
2400
2401 if (num_cxn_wrb) {
6733b39a
JK
2402 for (j = 0; j < phba->params.wrbs_per_cxn; j++) {
2403 pwrb_handle = pwrb_context->pwrb_handle_base[j];
2404 pwrb_handle->pwrb = pwrb;
2405 pwrb++;
2406 }
2407 num_cxn_wrb--;
2408 }
2409 }
3ec78271
JK
2410 return 0;
2411init_wrb_hndl_failed:
2412 for (j = index; j > 0; j--) {
2413 pwrb_context = &phwi_ctrlr->wrb_context[j];
2414 kfree(pwrb_context->pwrb_handle_base);
2415 kfree(pwrb_context->pwrb_handle_basestd);
2416 }
2417 return -ENOMEM;
6733b39a
JK
2418}
2419
2420static void hwi_init_async_pdu_ctx(struct beiscsi_hba *phba)
2421{
2422 struct hwi_controller *phwi_ctrlr;
2423 struct hba_parameters *p = &phba->params;
2424 struct hwi_async_pdu_context *pasync_ctx;
2425 struct async_pdu_handle *pasync_header_h, *pasync_data_h;
dc63aac6 2426 unsigned int index, idx, num_per_mem, num_async_data;
6733b39a
JK
2427 struct be_mem_descriptor *mem_descr;
2428
2429 mem_descr = (struct be_mem_descriptor *)phba->init_mem;
2430 mem_descr += HWI_MEM_ASYNC_PDU_CONTEXT;
2431
2432 phwi_ctrlr = phba->phwi_ctrlr;
2433 phwi_ctrlr->phwi_ctxt->pasync_ctx = (struct hwi_async_pdu_context *)
2434 mem_descr->mem_array[0].virtual_address;
2435 pasync_ctx = phwi_ctrlr->phwi_ctxt->pasync_ctx;
2436 memset(pasync_ctx, 0, sizeof(*pasync_ctx));
2437
dc63aac6
JK
2438 pasync_ctx->num_entries = p->asyncpdus_per_ctrl;
2439 pasync_ctx->buffer_size = p->defpdu_hdr_sz;
6733b39a
JK
2440
2441 mem_descr = (struct be_mem_descriptor *)phba->init_mem;
2442 mem_descr += HWI_MEM_ASYNC_HEADER_BUF;
2443 if (mem_descr->mem_array[0].virtual_address) {
2444 SE_DEBUG(DBG_LVL_8,
2445 "hwi_init_async_pdu_ctx HWI_MEM_ASYNC_HEADER_BUF"
457ff3b7 2446 "va=%p\n", mem_descr->mem_array[0].virtual_address);
6733b39a
JK
2447 } else
2448 shost_printk(KERN_WARNING, phba->shost,
457ff3b7 2449 "No Virtual address\n");
6733b39a
JK
2450
2451 pasync_ctx->async_header.va_base =
2452 mem_descr->mem_array[0].virtual_address;
2453
2454 pasync_ctx->async_header.pa_base.u.a64.address =
2455 mem_descr->mem_array[0].bus_address.u.a64.address;
2456
2457 mem_descr = (struct be_mem_descriptor *)phba->init_mem;
2458 mem_descr += HWI_MEM_ASYNC_HEADER_RING;
2459 if (mem_descr->mem_array[0].virtual_address) {
2460 SE_DEBUG(DBG_LVL_8,
2461 "hwi_init_async_pdu_ctx HWI_MEM_ASYNC_HEADER_RING"
457ff3b7 2462 "va=%p\n", mem_descr->mem_array[0].virtual_address);
6733b39a
JK
2463 } else
2464 shost_printk(KERN_WARNING, phba->shost,
457ff3b7 2465 "No Virtual address\n");
6733b39a
JK
2466 pasync_ctx->async_header.ring_base =
2467 mem_descr->mem_array[0].virtual_address;
2468
2469 mem_descr = (struct be_mem_descriptor *)phba->init_mem;
2470 mem_descr += HWI_MEM_ASYNC_HEADER_HANDLE;
2471 if (mem_descr->mem_array[0].virtual_address) {
2472 SE_DEBUG(DBG_LVL_8,
2473 "hwi_init_async_pdu_ctx HWI_MEM_ASYNC_HEADER_HANDLE"
457ff3b7 2474 "va=%p\n", mem_descr->mem_array[0].virtual_address);
6733b39a
JK
2475 } else
2476 shost_printk(KERN_WARNING, phba->shost,
457ff3b7 2477 "No Virtual address\n");
6733b39a
JK
2478
2479 pasync_ctx->async_header.handle_base =
2480 mem_descr->mem_array[0].virtual_address;
2481 pasync_ctx->async_header.writables = 0;
2482 INIT_LIST_HEAD(&pasync_ctx->async_header.free_list);
2483
6733b39a
JK
2484
2485 mem_descr = (struct be_mem_descriptor *)phba->init_mem;
2486 mem_descr += HWI_MEM_ASYNC_DATA_RING;
2487 if (mem_descr->mem_array[0].virtual_address) {
2488 SE_DEBUG(DBG_LVL_8,
2489 "hwi_init_async_pdu_ctx HWI_MEM_ASYNC_DATA_RING"
457ff3b7 2490 "va=%p\n", mem_descr->mem_array[0].virtual_address);
6733b39a
JK
2491 } else
2492 shost_printk(KERN_WARNING, phba->shost,
457ff3b7 2493 "No Virtual address\n");
6733b39a
JK
2494
2495 pasync_ctx->async_data.ring_base =
2496 mem_descr->mem_array[0].virtual_address;
2497
2498 mem_descr = (struct be_mem_descriptor *)phba->init_mem;
2499 mem_descr += HWI_MEM_ASYNC_DATA_HANDLE;
2500 if (!mem_descr->mem_array[0].virtual_address)
2501 shost_printk(KERN_WARNING, phba->shost,
457ff3b7 2502 "No Virtual address\n");
6733b39a
JK
2503
2504 pasync_ctx->async_data.handle_base =
2505 mem_descr->mem_array[0].virtual_address;
2506 pasync_ctx->async_data.writables = 0;
2507 INIT_LIST_HEAD(&pasync_ctx->async_data.free_list);
2508
2509 pasync_header_h =
2510 (struct async_pdu_handle *)pasync_ctx->async_header.handle_base;
2511 pasync_data_h =
2512 (struct async_pdu_handle *)pasync_ctx->async_data.handle_base;
2513
dc63aac6
JK
2514 mem_descr = (struct be_mem_descriptor *)phba->init_mem;
2515 mem_descr += HWI_MEM_ASYNC_DATA_BUF;
2516 if (mem_descr->mem_array[0].virtual_address) {
2517 SE_DEBUG(DBG_LVL_8,
2518 "hwi_init_async_pdu_ctx HWI_MEM_ASYNC_DATA_BUF"
2519 "va=%p\n", mem_descr->mem_array[0].virtual_address);
2520 } else
2521 shost_printk(KERN_WARNING, phba->shost,
2522 "No Virtual address\n");
2523 idx = 0;
2524 pasync_ctx->async_data.va_base =
2525 mem_descr->mem_array[idx].virtual_address;
2526 pasync_ctx->async_data.pa_base.u.a64.address =
2527 mem_descr->mem_array[idx].bus_address.u.a64.address;
2528
2529 num_async_data = ((mem_descr->mem_array[idx].size) /
2530 phba->params.defpdu_data_sz);
2531 num_per_mem = 0;
2532
6733b39a
JK
2533 for (index = 0; index < p->asyncpdus_per_ctrl; index++) {
2534 pasync_header_h->cri = -1;
2535 pasync_header_h->index = (char)index;
2536 INIT_LIST_HEAD(&pasync_header_h->link);
2537 pasync_header_h->pbuffer =
2538 (void *)((unsigned long)
2539 (pasync_ctx->async_header.va_base) +
2540 (p->defpdu_hdr_sz * index));
2541
2542 pasync_header_h->pa.u.a64.address =
2543 pasync_ctx->async_header.pa_base.u.a64.address +
2544 (p->defpdu_hdr_sz * index);
2545
2546 list_add_tail(&pasync_header_h->link,
2547 &pasync_ctx->async_header.free_list);
2548 pasync_header_h++;
2549 pasync_ctx->async_header.free_entries++;
2550 pasync_ctx->async_header.writables++;
2551
2552 INIT_LIST_HEAD(&pasync_ctx->async_entry[index].wait_queue.list);
2553 INIT_LIST_HEAD(&pasync_ctx->async_entry[index].
2554 header_busy_list);
2555 pasync_data_h->cri = -1;
2556 pasync_data_h->index = (char)index;
2557 INIT_LIST_HEAD(&pasync_data_h->link);
dc63aac6
JK
2558
2559 if (!num_async_data) {
2560 num_per_mem = 0;
2561 idx++;
2562 pasync_ctx->async_data.va_base =
2563 mem_descr->mem_array[idx].virtual_address;
2564 pasync_ctx->async_data.pa_base.u.a64.address =
2565 mem_descr->mem_array[idx].
2566 bus_address.u.a64.address;
2567
2568 num_async_data = ((mem_descr->mem_array[idx].size) /
2569 phba->params.defpdu_data_sz);
2570 }
6733b39a
JK
2571 pasync_data_h->pbuffer =
2572 (void *)((unsigned long)
2573 (pasync_ctx->async_data.va_base) +
dc63aac6 2574 (p->defpdu_data_sz * num_per_mem));
6733b39a
JK
2575
2576 pasync_data_h->pa.u.a64.address =
2577 pasync_ctx->async_data.pa_base.u.a64.address +
dc63aac6
JK
2578 (p->defpdu_data_sz * num_per_mem);
2579 num_per_mem++;
2580 num_async_data--;
6733b39a
JK
2581
2582 list_add_tail(&pasync_data_h->link,
2583 &pasync_ctx->async_data.free_list);
2584 pasync_data_h++;
2585 pasync_ctx->async_data.free_entries++;
2586 pasync_ctx->async_data.writables++;
2587
2588 INIT_LIST_HEAD(&pasync_ctx->async_entry[index].data_busy_list);
2589 }
2590
2591 pasync_ctx->async_header.host_write_ptr = 0;
2592 pasync_ctx->async_header.ep_read_ptr = -1;
2593 pasync_ctx->async_data.host_write_ptr = 0;
2594 pasync_ctx->async_data.ep_read_ptr = -1;
2595}
2596
2597static int
2598be_sgl_create_contiguous(void *virtual_address,
2599 u64 physical_address, u32 length,
2600 struct be_dma_mem *sgl)
2601{
2602 WARN_ON(!virtual_address);
2603 WARN_ON(!physical_address);
2604 WARN_ON(!length > 0);
2605 WARN_ON(!sgl);
2606
2607 sgl->va = virtual_address;
457ff3b7 2608 sgl->dma = (unsigned long)physical_address;
6733b39a
JK
2609 sgl->size = length;
2610
2611 return 0;
2612}
2613
2614static void be_sgl_destroy_contiguous(struct be_dma_mem *sgl)
2615{
2616 memset(sgl, 0, sizeof(*sgl));
2617}
2618
2619static void
2620hwi_build_be_sgl_arr(struct beiscsi_hba *phba,
2621 struct mem_array *pmem, struct be_dma_mem *sgl)
2622{
2623 if (sgl->va)
2624 be_sgl_destroy_contiguous(sgl);
2625
2626 be_sgl_create_contiguous(pmem->virtual_address,
2627 pmem->bus_address.u.a64.address,
2628 pmem->size, sgl);
2629}
2630
2631static void
2632hwi_build_be_sgl_by_offset(struct beiscsi_hba *phba,
2633 struct mem_array *pmem, struct be_dma_mem *sgl)
2634{
2635 if (sgl->va)
2636 be_sgl_destroy_contiguous(sgl);
2637
2638 be_sgl_create_contiguous((unsigned char *)pmem->virtual_address,
2639 pmem->bus_address.u.a64.address,
2640 pmem->size, sgl);
2641}
2642
2643static int be_fill_queue(struct be_queue_info *q,
2644 u16 len, u16 entry_size, void *vaddress)
2645{
2646 struct be_dma_mem *mem = &q->dma_mem;
2647
2648 memset(q, 0, sizeof(*q));
2649 q->len = len;
2650 q->entry_size = entry_size;
2651 mem->size = len * entry_size;
2652 mem->va = vaddress;
2653 if (!mem->va)
2654 return -ENOMEM;
2655 memset(mem->va, 0, mem->size);
2656 return 0;
2657}
2658
bfead3b2 2659static int beiscsi_create_eqs(struct beiscsi_hba *phba,
6733b39a
JK
2660 struct hwi_context_memory *phwi_context)
2661{
bfead3b2
JK
2662 unsigned int i, num_eq_pages;
2663 int ret, eq_for_mcc;
6733b39a
JK
2664 struct be_queue_info *eq;
2665 struct be_dma_mem *mem;
6733b39a 2666 void *eq_vaddress;
bfead3b2 2667 dma_addr_t paddr;
6733b39a 2668
bfead3b2
JK
2669 num_eq_pages = PAGES_REQUIRED(phba->params.num_eq_entries * \
2670 sizeof(struct be_eq_entry));
6733b39a 2671
bfead3b2
JK
2672 if (phba->msix_enabled)
2673 eq_for_mcc = 1;
2674 else
2675 eq_for_mcc = 0;
2676 for (i = 0; i < (phba->num_cpus + eq_for_mcc); i++) {
2677 eq = &phwi_context->be_eq[i].q;
2678 mem = &eq->dma_mem;
2679 phwi_context->be_eq[i].phba = phba;
2680 eq_vaddress = pci_alloc_consistent(phba->pcidev,
2681 num_eq_pages * PAGE_SIZE,
2682 &paddr);
2683 if (!eq_vaddress)
2684 goto create_eq_error;
2685
2686 mem->va = eq_vaddress;
2687 ret = be_fill_queue(eq, phba->params.num_eq_entries,
2688 sizeof(struct be_eq_entry), eq_vaddress);
2689 if (ret) {
2690 shost_printk(KERN_ERR, phba->shost,
457ff3b7 2691 "be_fill_queue Failed for EQ\n");
bfead3b2
JK
2692 goto create_eq_error;
2693 }
6733b39a 2694
bfead3b2
JK
2695 mem->dma = paddr;
2696 ret = beiscsi_cmd_eq_create(&phba->ctrl, eq,
2697 phwi_context->cur_eqd);
2698 if (ret) {
2699 shost_printk(KERN_ERR, phba->shost,
2700 "beiscsi_cmd_eq_create"
457ff3b7 2701 "Failedfor EQ\n");
bfead3b2
JK
2702 goto create_eq_error;
2703 }
2704 SE_DEBUG(DBG_LVL_8, "eqid = %d\n", phwi_context->be_eq[i].q.id);
6733b39a 2705 }
6733b39a 2706 return 0;
bfead3b2
JK
2707create_eq_error:
2708 for (i = 0; i < (phba->num_cpus + 1); i++) {
2709 eq = &phwi_context->be_eq[i].q;
2710 mem = &eq->dma_mem;
2711 if (mem->va)
2712 pci_free_consistent(phba->pcidev, num_eq_pages
2713 * PAGE_SIZE,
2714 mem->va, mem->dma);
2715 }
2716 return ret;
6733b39a
JK
2717}
2718
bfead3b2 2719static int beiscsi_create_cqs(struct beiscsi_hba *phba,
6733b39a
JK
2720 struct hwi_context_memory *phwi_context)
2721{
bfead3b2 2722 unsigned int i, num_cq_pages;
6733b39a
JK
2723 int ret;
2724 struct be_queue_info *cq, *eq;
2725 struct be_dma_mem *mem;
bfead3b2 2726 struct be_eq_obj *pbe_eq;
6733b39a 2727 void *cq_vaddress;
bfead3b2 2728 dma_addr_t paddr;
6733b39a 2729
bfead3b2
JK
2730 num_cq_pages = PAGES_REQUIRED(phba->params.num_cq_entries * \
2731 sizeof(struct sol_cqe));
6733b39a 2732
bfead3b2
JK
2733 for (i = 0; i < phba->num_cpus; i++) {
2734 cq = &phwi_context->be_cq[i];
2735 eq = &phwi_context->be_eq[i].q;
2736 pbe_eq = &phwi_context->be_eq[i];
2737 pbe_eq->cq = cq;
2738 pbe_eq->phba = phba;
2739 mem = &cq->dma_mem;
2740 cq_vaddress = pci_alloc_consistent(phba->pcidev,
2741 num_cq_pages * PAGE_SIZE,
2742 &paddr);
2743 if (!cq_vaddress)
2744 goto create_cq_error;
7da50879 2745 ret = be_fill_queue(cq, phba->params.num_cq_entries,
bfead3b2
JK
2746 sizeof(struct sol_cqe), cq_vaddress);
2747 if (ret) {
2748 shost_printk(KERN_ERR, phba->shost,
457ff3b7 2749 "be_fill_queue Failed for ISCSI CQ\n");
bfead3b2
JK
2750 goto create_cq_error;
2751 }
2752
2753 mem->dma = paddr;
2754 ret = beiscsi_cmd_cq_create(&phba->ctrl, cq, eq, false,
2755 false, 0);
2756 if (ret) {
2757 shost_printk(KERN_ERR, phba->shost,
2758 "beiscsi_cmd_eq_create"
457ff3b7 2759 "Failed for ISCSI CQ\n");
bfead3b2
JK
2760 goto create_cq_error;
2761 }
2762 SE_DEBUG(DBG_LVL_8, "iscsi cq_id is %d for eq_id %d\n",
2763 cq->id, eq->id);
2764 SE_DEBUG(DBG_LVL_8, "ISCSI CQ CREATED\n");
6733b39a 2765 }
6733b39a 2766 return 0;
bfead3b2
JK
2767
2768create_cq_error:
2769 for (i = 0; i < phba->num_cpus; i++) {
2770 cq = &phwi_context->be_cq[i];
2771 mem = &cq->dma_mem;
2772 if (mem->va)
2773 pci_free_consistent(phba->pcidev, num_cq_pages
2774 * PAGE_SIZE,
2775 mem->va, mem->dma);
2776 }
2777 return ret;
2778
6733b39a
JK
2779}
2780
2781static int
2782beiscsi_create_def_hdr(struct beiscsi_hba *phba,
2783 struct hwi_context_memory *phwi_context,
2784 struct hwi_controller *phwi_ctrlr,
2785 unsigned int def_pdu_ring_sz)
2786{
2787 unsigned int idx;
2788 int ret;
2789 struct be_queue_info *dq, *cq;
2790 struct be_dma_mem *mem;
2791 struct be_mem_descriptor *mem_descr;
2792 void *dq_vaddress;
2793
2794 idx = 0;
2795 dq = &phwi_context->be_def_hdrq;
bfead3b2 2796 cq = &phwi_context->be_cq[0];
6733b39a
JK
2797 mem = &dq->dma_mem;
2798 mem_descr = phba->init_mem;
2799 mem_descr += HWI_MEM_ASYNC_HEADER_RING;
2800 dq_vaddress = mem_descr->mem_array[idx].virtual_address;
2801 ret = be_fill_queue(dq, mem_descr->mem_array[0].size /
2802 sizeof(struct phys_addr),
2803 sizeof(struct phys_addr), dq_vaddress);
2804 if (ret) {
2805 shost_printk(KERN_ERR, phba->shost,
2806 "be_fill_queue Failed for DEF PDU HDR\n");
2807 return ret;
2808 }
457ff3b7
JK
2809 mem->dma = (unsigned long)mem_descr->mem_array[idx].
2810 bus_address.u.a64.address;
6733b39a
JK
2811 ret = be_cmd_create_default_pdu_queue(&phba->ctrl, cq, dq,
2812 def_pdu_ring_sz,
2813 phba->params.defpdu_hdr_sz);
2814 if (ret) {
2815 shost_printk(KERN_ERR, phba->shost,
2816 "be_cmd_create_default_pdu_queue Failed DEFHDR\n");
2817 return ret;
2818 }
2819 phwi_ctrlr->default_pdu_hdr.id = phwi_context->be_def_hdrq.id;
2820 SE_DEBUG(DBG_LVL_8, "iscsi def pdu id is %d\n",
2821 phwi_context->be_def_hdrq.id);
2822 hwi_post_async_buffers(phba, 1);
2823 return 0;
2824}
2825
2826static int
2827beiscsi_create_def_data(struct beiscsi_hba *phba,
2828 struct hwi_context_memory *phwi_context,
2829 struct hwi_controller *phwi_ctrlr,
2830 unsigned int def_pdu_ring_sz)
2831{
2832 unsigned int idx;
2833 int ret;
2834 struct be_queue_info *dataq, *cq;
2835 struct be_dma_mem *mem;
2836 struct be_mem_descriptor *mem_descr;
2837 void *dq_vaddress;
2838
2839 idx = 0;
2840 dataq = &phwi_context->be_def_dataq;
bfead3b2 2841 cq = &phwi_context->be_cq[0];
6733b39a
JK
2842 mem = &dataq->dma_mem;
2843 mem_descr = phba->init_mem;
2844 mem_descr += HWI_MEM_ASYNC_DATA_RING;
2845 dq_vaddress = mem_descr->mem_array[idx].virtual_address;
2846 ret = be_fill_queue(dataq, mem_descr->mem_array[0].size /
2847 sizeof(struct phys_addr),
2848 sizeof(struct phys_addr), dq_vaddress);
2849 if (ret) {
2850 shost_printk(KERN_ERR, phba->shost,
2851 "be_fill_queue Failed for DEF PDU DATA\n");
2852 return ret;
2853 }
457ff3b7
JK
2854 mem->dma = (unsigned long)mem_descr->mem_array[idx].
2855 bus_address.u.a64.address;
6733b39a
JK
2856 ret = be_cmd_create_default_pdu_queue(&phba->ctrl, cq, dataq,
2857 def_pdu_ring_sz,
2858 phba->params.defpdu_data_sz);
2859 if (ret) {
2860 shost_printk(KERN_ERR, phba->shost,
2861 "be_cmd_create_default_pdu_queue Failed"
2862 " for DEF PDU DATA\n");
2863 return ret;
2864 }
2865 phwi_ctrlr->default_pdu_data.id = phwi_context->be_def_dataq.id;
2866 SE_DEBUG(DBG_LVL_8, "iscsi def data id is %d\n",
2867 phwi_context->be_def_dataq.id);
2868 hwi_post_async_buffers(phba, 0);
457ff3b7 2869 SE_DEBUG(DBG_LVL_8, "DEFAULT PDU DATA RING CREATED\n");
6733b39a
JK
2870 return 0;
2871}
2872
2873static int
2874beiscsi_post_pages(struct beiscsi_hba *phba)
2875{
2876 struct be_mem_descriptor *mem_descr;
2877 struct mem_array *pm_arr;
2878 unsigned int page_offset, i;
2879 struct be_dma_mem sgl;
2880 int status;
2881
2882 mem_descr = phba->init_mem;
2883 mem_descr += HWI_MEM_SGE;
2884 pm_arr = mem_descr->mem_array;
2885
2886 page_offset = (sizeof(struct iscsi_sge) * phba->params.num_sge_per_io *
2887 phba->fw_config.iscsi_icd_start) / PAGE_SIZE;
2888 for (i = 0; i < mem_descr->num_elements; i++) {
2889 hwi_build_be_sgl_arr(phba, pm_arr, &sgl);
2890 status = be_cmd_iscsi_post_sgl_pages(&phba->ctrl, &sgl,
2891 page_offset,
2892 (pm_arr->size / PAGE_SIZE));
2893 page_offset += pm_arr->size / PAGE_SIZE;
2894 if (status != 0) {
2895 shost_printk(KERN_ERR, phba->shost,
2896 "post sgl failed.\n");
2897 return status;
2898 }
2899 pm_arr++;
2900 }
457ff3b7 2901 SE_DEBUG(DBG_LVL_8, "POSTED PAGES\n");
6733b39a
JK
2902 return 0;
2903}
2904
bfead3b2
JK
2905static void be_queue_free(struct beiscsi_hba *phba, struct be_queue_info *q)
2906{
2907 struct be_dma_mem *mem = &q->dma_mem;
c8b25598 2908 if (mem->va) {
bfead3b2
JK
2909 pci_free_consistent(phba->pcidev, mem->size,
2910 mem->va, mem->dma);
c8b25598
JK
2911 mem->va = NULL;
2912 }
bfead3b2
JK
2913}
2914
2915static int be_queue_alloc(struct beiscsi_hba *phba, struct be_queue_info *q,
2916 u16 len, u16 entry_size)
2917{
2918 struct be_dma_mem *mem = &q->dma_mem;
2919
2920 memset(q, 0, sizeof(*q));
2921 q->len = len;
2922 q->entry_size = entry_size;
2923 mem->size = len * entry_size;
2924 mem->va = pci_alloc_consistent(phba->pcidev, mem->size, &mem->dma);
2925 if (!mem->va)
d3ad2bb3 2926 return -ENOMEM;
bfead3b2
JK
2927 memset(mem->va, 0, mem->size);
2928 return 0;
2929}
2930
6733b39a
JK
2931static int
2932beiscsi_create_wrb_rings(struct beiscsi_hba *phba,
2933 struct hwi_context_memory *phwi_context,
2934 struct hwi_controller *phwi_ctrlr)
2935{
2936 unsigned int wrb_mem_index, offset, size, num_wrb_rings;
2937 u64 pa_addr_lo;
2938 unsigned int idx, num, i;
2939 struct mem_array *pwrb_arr;
2940 void *wrb_vaddr;
2941 struct be_dma_mem sgl;
2942 struct be_mem_descriptor *mem_descr;
2943 int status;
2944
2945 idx = 0;
2946 mem_descr = phba->init_mem;
2947 mem_descr += HWI_MEM_WRB;
2948 pwrb_arr = kmalloc(sizeof(*pwrb_arr) * phba->params.cxns_per_ctrl,
2949 GFP_KERNEL);
2950 if (!pwrb_arr) {
2951 shost_printk(KERN_ERR, phba->shost,
2952 "Memory alloc failed in create wrb ring.\n");
2953 return -ENOMEM;
2954 }
2955 wrb_vaddr = mem_descr->mem_array[idx].virtual_address;
2956 pa_addr_lo = mem_descr->mem_array[idx].bus_address.u.a64.address;
2957 num_wrb_rings = mem_descr->mem_array[idx].size /
2958 (phba->params.wrbs_per_cxn * sizeof(struct iscsi_wrb));
2959
2960 for (num = 0; num < phba->params.cxns_per_ctrl; num++) {
2961 if (num_wrb_rings) {
2962 pwrb_arr[num].virtual_address = wrb_vaddr;
2963 pwrb_arr[num].bus_address.u.a64.address = pa_addr_lo;
2964 pwrb_arr[num].size = phba->params.wrbs_per_cxn *
2965 sizeof(struct iscsi_wrb);
2966 wrb_vaddr += pwrb_arr[num].size;
2967 pa_addr_lo += pwrb_arr[num].size;
2968 num_wrb_rings--;
2969 } else {
2970 idx++;
2971 wrb_vaddr = mem_descr->mem_array[idx].virtual_address;
2972 pa_addr_lo = mem_descr->mem_array[idx].\
2973 bus_address.u.a64.address;
2974 num_wrb_rings = mem_descr->mem_array[idx].size /
2975 (phba->params.wrbs_per_cxn *
2976 sizeof(struct iscsi_wrb));
2977 pwrb_arr[num].virtual_address = wrb_vaddr;
2978 pwrb_arr[num].bus_address.u.a64.address\
2979 = pa_addr_lo;
2980 pwrb_arr[num].size = phba->params.wrbs_per_cxn *
2981 sizeof(struct iscsi_wrb);
2982 wrb_vaddr += pwrb_arr[num].size;
2983 pa_addr_lo += pwrb_arr[num].size;
2984 num_wrb_rings--;
2985 }
2986 }
2987 for (i = 0; i < phba->params.cxns_per_ctrl; i++) {
2988 wrb_mem_index = 0;
2989 offset = 0;
2990 size = 0;
2991
2992 hwi_build_be_sgl_by_offset(phba, &pwrb_arr[i], &sgl);
2993 status = be_cmd_wrbq_create(&phba->ctrl, &sgl,
2994 &phwi_context->be_wrbq[i]);
2995 if (status != 0) {
2996 shost_printk(KERN_ERR, phba->shost,
2997 "wrbq create failed.");
1462b8ff 2998 kfree(pwrb_arr);
6733b39a
JK
2999 return status;
3000 }
7da50879
JK
3001 phwi_ctrlr->wrb_context[i * 2].cid = phwi_context->be_wrbq[i].
3002 id;
6733b39a
JK
3003 }
3004 kfree(pwrb_arr);
3005 return 0;
3006}
3007
3008static void free_wrb_handles(struct beiscsi_hba *phba)
3009{
3010 unsigned int index;
3011 struct hwi_controller *phwi_ctrlr;
3012 struct hwi_wrb_context *pwrb_context;
3013
3014 phwi_ctrlr = phba->phwi_ctrlr;
3015 for (index = 0; index < phba->params.cxns_per_ctrl * 2; index += 2) {
3016 pwrb_context = &phwi_ctrlr->wrb_context[index];
3017 kfree(pwrb_context->pwrb_handle_base);
3018 kfree(pwrb_context->pwrb_handle_basestd);
3019 }
3020}
3021
bfead3b2
JK
3022static void be_mcc_queues_destroy(struct beiscsi_hba *phba)
3023{
3024 struct be_queue_info *q;
3025 struct be_ctrl_info *ctrl = &phba->ctrl;
3026
3027 q = &phba->ctrl.mcc_obj.q;
3028 if (q->created)
3029 beiscsi_cmd_q_destroy(ctrl, q, QTYPE_MCCQ);
3030 be_queue_free(phba, q);
3031
3032 q = &phba->ctrl.mcc_obj.cq;
3033 if (q->created)
3034 beiscsi_cmd_q_destroy(ctrl, q, QTYPE_CQ);
3035 be_queue_free(phba, q);
3036}
3037
6733b39a
JK
3038static void hwi_cleanup(struct beiscsi_hba *phba)
3039{
3040 struct be_queue_info *q;
3041 struct be_ctrl_info *ctrl = &phba->ctrl;
3042 struct hwi_controller *phwi_ctrlr;
3043 struct hwi_context_memory *phwi_context;
bfead3b2 3044 int i, eq_num;
6733b39a
JK
3045
3046 phwi_ctrlr = phba->phwi_ctrlr;
3047 phwi_context = phwi_ctrlr->phwi_ctxt;
3048 for (i = 0; i < phba->params.cxns_per_ctrl; i++) {
3049 q = &phwi_context->be_wrbq[i];
3050 if (q->created)
3051 beiscsi_cmd_q_destroy(ctrl, q, QTYPE_WRBQ);
3052 }
6733b39a
JK
3053 free_wrb_handles(phba);
3054
3055 q = &phwi_context->be_def_hdrq;
3056 if (q->created)
3057 beiscsi_cmd_q_destroy(ctrl, q, QTYPE_DPDUQ);
3058
3059 q = &phwi_context->be_def_dataq;
3060 if (q->created)
3061 beiscsi_cmd_q_destroy(ctrl, q, QTYPE_DPDUQ);
3062
3063 beiscsi_cmd_q_destroy(ctrl, NULL, QTYPE_SGL);
3064
bfead3b2
JK
3065 for (i = 0; i < (phba->num_cpus); i++) {
3066 q = &phwi_context->be_cq[i];
3067 if (q->created)
3068 beiscsi_cmd_q_destroy(ctrl, q, QTYPE_CQ);
3069 }
3070 if (phba->msix_enabled)
3071 eq_num = 1;
3072 else
3073 eq_num = 0;
3074 for (i = 0; i < (phba->num_cpus + eq_num); i++) {
3075 q = &phwi_context->be_eq[i].q;
3076 if (q->created)
3077 beiscsi_cmd_q_destroy(ctrl, q, QTYPE_EQ);
3078 }
3079 be_mcc_queues_destroy(phba);
3080}
6733b39a 3081
bfead3b2
JK
3082static int be_mcc_queues_create(struct beiscsi_hba *phba,
3083 struct hwi_context_memory *phwi_context)
3084{
3085 struct be_queue_info *q, *cq;
3086 struct be_ctrl_info *ctrl = &phba->ctrl;
3087
3088 /* Alloc MCC compl queue */
3089 cq = &phba->ctrl.mcc_obj.cq;
3090 if (be_queue_alloc(phba, cq, MCC_CQ_LEN,
3091 sizeof(struct be_mcc_compl)))
3092 goto err;
3093 /* Ask BE to create MCC compl queue; */
3094 if (phba->msix_enabled) {
3095 if (beiscsi_cmd_cq_create(ctrl, cq, &phwi_context->be_eq
3096 [phba->num_cpus].q, false, true, 0))
3097 goto mcc_cq_free;
3098 } else {
3099 if (beiscsi_cmd_cq_create(ctrl, cq, &phwi_context->be_eq[0].q,
3100 false, true, 0))
3101 goto mcc_cq_free;
3102 }
3103
3104 /* Alloc MCC queue */
3105 q = &phba->ctrl.mcc_obj.q;
3106 if (be_queue_alloc(phba, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
3107 goto mcc_cq_destroy;
3108
3109 /* Ask BE to create MCC queue */
35e66019 3110 if (beiscsi_cmd_mccq_create(phba, q, cq))
bfead3b2
JK
3111 goto mcc_q_free;
3112
3113 return 0;
3114
3115mcc_q_free:
3116 be_queue_free(phba, q);
3117mcc_cq_destroy:
3118 beiscsi_cmd_q_destroy(ctrl, cq, QTYPE_CQ);
3119mcc_cq_free:
3120 be_queue_free(phba, cq);
3121err:
d3ad2bb3 3122 return -ENOMEM;
bfead3b2
JK
3123}
3124
3125static int find_num_cpus(void)
3126{
3127 int num_cpus = 0;
3128
3129 num_cpus = num_online_cpus();
3130 if (num_cpus >= MAX_CPUS)
3131 num_cpus = MAX_CPUS - 1;
3132
457ff3b7 3133 SE_DEBUG(DBG_LVL_8, "num_cpus = %d\n", num_cpus);
bfead3b2 3134 return num_cpus;
6733b39a
JK
3135}
3136
3137static int hwi_init_port(struct beiscsi_hba *phba)
3138{
3139 struct hwi_controller *phwi_ctrlr;
3140 struct hwi_context_memory *phwi_context;
3141 unsigned int def_pdu_ring_sz;
3142 struct be_ctrl_info *ctrl = &phba->ctrl;
3143 int status;
3144
3145 def_pdu_ring_sz =
3146 phba->params.asyncpdus_per_ctrl * sizeof(struct phys_addr);
3147 phwi_ctrlr = phba->phwi_ctrlr;
6733b39a 3148 phwi_context = phwi_ctrlr->phwi_ctxt;
bfead3b2
JK
3149 phwi_context->max_eqd = 0;
3150 phwi_context->min_eqd = 0;
3151 phwi_context->cur_eqd = 64;
6733b39a 3152 be_cmd_fw_initialize(&phba->ctrl);
bfead3b2
JK
3153
3154 status = beiscsi_create_eqs(phba, phwi_context);
6733b39a 3155 if (status != 0) {
457ff3b7 3156 shost_printk(KERN_ERR, phba->shost, "EQ not created\n");
6733b39a
JK
3157 goto error;
3158 }
3159
bfead3b2
JK
3160 status = be_mcc_queues_create(phba, phwi_context);
3161 if (status != 0)
3162 goto error;
3163
3164 status = mgmt_check_supported_fw(ctrl, phba);
6733b39a
JK
3165 if (status != 0) {
3166 shost_printk(KERN_ERR, phba->shost,
457ff3b7 3167 "Unsupported fw version\n");
6733b39a
JK
3168 goto error;
3169 }
3170
bfead3b2 3171 status = beiscsi_create_cqs(phba, phwi_context);
6733b39a
JK
3172 if (status != 0) {
3173 shost_printk(KERN_ERR, phba->shost, "CQ not created\n");
3174 goto error;
3175 }
3176
3177 status = beiscsi_create_def_hdr(phba, phwi_context, phwi_ctrlr,
3178 def_pdu_ring_sz);
3179 if (status != 0) {
3180 shost_printk(KERN_ERR, phba->shost,
3181 "Default Header not created\n");
3182 goto error;
3183 }
3184
3185 status = beiscsi_create_def_data(phba, phwi_context,
3186 phwi_ctrlr, def_pdu_ring_sz);
3187 if (status != 0) {
3188 shost_printk(KERN_ERR, phba->shost,
3189 "Default Data not created\n");
3190 goto error;
3191 }
3192
3193 status = beiscsi_post_pages(phba);
3194 if (status != 0) {
3195 shost_printk(KERN_ERR, phba->shost, "Post SGL Pages Failed\n");
3196 goto error;
3197 }
3198
3199 status = beiscsi_create_wrb_rings(phba, phwi_context, phwi_ctrlr);
3200 if (status != 0) {
3201 shost_printk(KERN_ERR, phba->shost,
3202 "WRB Rings not created\n");
3203 goto error;
3204 }
3205
3206 SE_DEBUG(DBG_LVL_8, "hwi_init_port success\n");
3207 return 0;
3208
3209error:
3210 shost_printk(KERN_ERR, phba->shost, "hwi_init_port failed");
3211 hwi_cleanup(phba);
a49e06d5 3212 return status;
6733b39a
JK
3213}
3214
6733b39a
JK
3215static int hwi_init_controller(struct beiscsi_hba *phba)
3216{
3217 struct hwi_controller *phwi_ctrlr;
3218
3219 phwi_ctrlr = phba->phwi_ctrlr;
3220 if (1 == phba->init_mem[HWI_MEM_ADDN_CONTEXT].num_elements) {
3221 phwi_ctrlr->phwi_ctxt = (struct hwi_context_memory *)phba->
3222 init_mem[HWI_MEM_ADDN_CONTEXT].mem_array[0].virtual_address;
457ff3b7 3223 SE_DEBUG(DBG_LVL_8, " phwi_ctrlr->phwi_ctxt=%p\n",
6733b39a
JK
3224 phwi_ctrlr->phwi_ctxt);
3225 } else {
3226 shost_printk(KERN_ERR, phba->shost,
3227 "HWI_MEM_ADDN_CONTEXT is more than one element."
3228 "Failing to load\n");
3229 return -ENOMEM;
3230 }
3231
3232 iscsi_init_global_templates(phba);
3ec78271
JK
3233 if (beiscsi_init_wrb_handle(phba))
3234 return -ENOMEM;
3235
6733b39a
JK
3236 hwi_init_async_pdu_ctx(phba);
3237 if (hwi_init_port(phba) != 0) {
3238 shost_printk(KERN_ERR, phba->shost,
3239 "hwi_init_controller failed\n");
3240 return -ENOMEM;
3241 }
3242 return 0;
3243}
3244
3245static void beiscsi_free_mem(struct beiscsi_hba *phba)
3246{
3247 struct be_mem_descriptor *mem_descr;
3248 int i, j;
3249
3250 mem_descr = phba->init_mem;
3251 i = 0;
3252 j = 0;
3253 for (i = 0; i < SE_MEM_MAX; i++) {
3254 for (j = mem_descr->num_elements; j > 0; j--) {
3255 pci_free_consistent(phba->pcidev,
3256 mem_descr->mem_array[j - 1].size,
3257 mem_descr->mem_array[j - 1].virtual_address,
457ff3b7
JK
3258 (unsigned long)mem_descr->mem_array[j - 1].
3259 bus_address.u.a64.address);
6733b39a
JK
3260 }
3261 kfree(mem_descr->mem_array);
3262 mem_descr++;
3263 }
3264 kfree(phba->init_mem);
3265 kfree(phba->phwi_ctrlr);
3266}
3267
3268static int beiscsi_init_controller(struct beiscsi_hba *phba)
3269{
3270 int ret = -ENOMEM;
3271
3272 ret = beiscsi_get_memory(phba);
3273 if (ret < 0) {
3274 shost_printk(KERN_ERR, phba->shost, "beiscsi_dev_probe -"
457ff3b7 3275 "Failed in beiscsi_alloc_memory\n");
6733b39a
JK
3276 return ret;
3277 }
3278
3279 ret = hwi_init_controller(phba);
3280 if (ret)
3281 goto free_init;
3282 SE_DEBUG(DBG_LVL_8, "Return success from beiscsi_init_controller");
3283 return 0;
3284
3285free_init:
3286 beiscsi_free_mem(phba);
a49e06d5 3287 return ret;
6733b39a
JK
3288}
3289
3290static int beiscsi_init_sgl_handle(struct beiscsi_hba *phba)
3291{
3292 struct be_mem_descriptor *mem_descr_sglh, *mem_descr_sg;
3293 struct sgl_handle *psgl_handle;
3294 struct iscsi_sge *pfrag;
3295 unsigned int arr_index, i, idx;
3296
3297 phba->io_sgl_hndl_avbl = 0;
3298 phba->eh_sgl_hndl_avbl = 0;
bfead3b2 3299
6733b39a
JK
3300 mem_descr_sglh = phba->init_mem;
3301 mem_descr_sglh += HWI_MEM_SGLH;
3302 if (1 == mem_descr_sglh->num_elements) {
3303 phba->io_sgl_hndl_base = kzalloc(sizeof(struct sgl_handle *) *
3304 phba->params.ios_per_ctrl,
3305 GFP_KERNEL);
3306 if (!phba->io_sgl_hndl_base) {
3307 shost_printk(KERN_ERR, phba->shost,
3308 "Mem Alloc Failed. Failing to load\n");
3309 return -ENOMEM;
3310 }
3311 phba->eh_sgl_hndl_base = kzalloc(sizeof(struct sgl_handle *) *
3312 (phba->params.icds_per_ctrl -
3313 phba->params.ios_per_ctrl),
3314 GFP_KERNEL);
3315 if (!phba->eh_sgl_hndl_base) {
3316 kfree(phba->io_sgl_hndl_base);
3317 shost_printk(KERN_ERR, phba->shost,
3318 "Mem Alloc Failed. Failing to load\n");
3319 return -ENOMEM;
3320 }
3321 } else {
3322 shost_printk(KERN_ERR, phba->shost,
3323 "HWI_MEM_SGLH is more than one element."
3324 "Failing to load\n");
3325 return -ENOMEM;
3326 }
3327
3328 arr_index = 0;
3329 idx = 0;
3330 while (idx < mem_descr_sglh->num_elements) {
3331 psgl_handle = mem_descr_sglh->mem_array[idx].virtual_address;
3332
3333 for (i = 0; i < (mem_descr_sglh->mem_array[idx].size /
3334 sizeof(struct sgl_handle)); i++) {
3335 if (arr_index < phba->params.ios_per_ctrl) {
3336 phba->io_sgl_hndl_base[arr_index] = psgl_handle;
3337 phba->io_sgl_hndl_avbl++;
3338 arr_index++;
3339 } else {
3340 phba->eh_sgl_hndl_base[arr_index -
3341 phba->params.ios_per_ctrl] =
3342 psgl_handle;
3343 arr_index++;
3344 phba->eh_sgl_hndl_avbl++;
3345 }
3346 psgl_handle++;
3347 }
3348 idx++;
3349 }
3350 SE_DEBUG(DBG_LVL_8,
3351 "phba->io_sgl_hndl_avbl=%d"
457ff3b7 3352 "phba->eh_sgl_hndl_avbl=%d\n",
6733b39a
JK
3353 phba->io_sgl_hndl_avbl,
3354 phba->eh_sgl_hndl_avbl);
3355 mem_descr_sg = phba->init_mem;
3356 mem_descr_sg += HWI_MEM_SGE;
457ff3b7 3357 SE_DEBUG(DBG_LVL_8, "\n mem_descr_sg->num_elements=%d\n",
6733b39a
JK
3358 mem_descr_sg->num_elements);
3359 arr_index = 0;
3360 idx = 0;
3361 while (idx < mem_descr_sg->num_elements) {
3362 pfrag = mem_descr_sg->mem_array[idx].virtual_address;
3363
3364 for (i = 0;
3365 i < (mem_descr_sg->mem_array[idx].size) /
3366 (sizeof(struct iscsi_sge) * phba->params.num_sge_per_io);
3367 i++) {
3368 if (arr_index < phba->params.ios_per_ctrl)
3369 psgl_handle = phba->io_sgl_hndl_base[arr_index];
3370 else
3371 psgl_handle = phba->eh_sgl_hndl_base[arr_index -
3372 phba->params.ios_per_ctrl];
3373 psgl_handle->pfrag = pfrag;
3374 AMAP_SET_BITS(struct amap_iscsi_sge, addr_hi, pfrag, 0);
3375 AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, pfrag, 0);
3376 pfrag += phba->params.num_sge_per_io;
3377 psgl_handle->sgl_index =
7da50879 3378 phba->fw_config.iscsi_icd_start + arr_index++;
6733b39a
JK
3379 }
3380 idx++;
3381 }
3382 phba->io_sgl_free_index = 0;
3383 phba->io_sgl_alloc_index = 0;
3384 phba->eh_sgl_free_index = 0;
3385 phba->eh_sgl_alloc_index = 0;
3386 return 0;
3387}
3388
3389static int hba_setup_cid_tbls(struct beiscsi_hba *phba)
3390{
3391 int i, new_cid;
3392
c2462288 3393 phba->cid_array = kzalloc(sizeof(void *) * phba->params.cxns_per_ctrl,
6733b39a
JK
3394 GFP_KERNEL);
3395 if (!phba->cid_array) {
3396 shost_printk(KERN_ERR, phba->shost,
3397 "Failed to allocate memory in "
3398 "hba_setup_cid_tbls\n");
3399 return -ENOMEM;
3400 }
c2462288 3401 phba->ep_array = kzalloc(sizeof(struct iscsi_endpoint *) *
6733b39a
JK
3402 phba->params.cxns_per_ctrl * 2, GFP_KERNEL);
3403 if (!phba->ep_array) {
3404 shost_printk(KERN_ERR, phba->shost,
3405 "Failed to allocate memory in "
457ff3b7 3406 "hba_setup_cid_tbls\n");
6733b39a
JK
3407 kfree(phba->cid_array);
3408 return -ENOMEM;
3409 }
7da50879 3410 new_cid = phba->fw_config.iscsi_cid_start;
6733b39a
JK
3411 for (i = 0; i < phba->params.cxns_per_ctrl; i++) {
3412 phba->cid_array[i] = new_cid;
3413 new_cid += 2;
3414 }
3415 phba->avlbl_cids = phba->params.cxns_per_ctrl;
3416 return 0;
3417}
3418
238f6b72 3419static void hwi_enable_intr(struct beiscsi_hba *phba)
6733b39a
JK
3420{
3421 struct be_ctrl_info *ctrl = &phba->ctrl;
3422 struct hwi_controller *phwi_ctrlr;
3423 struct hwi_context_memory *phwi_context;
3424 struct be_queue_info *eq;
3425 u8 __iomem *addr;
bfead3b2 3426 u32 reg, i;
6733b39a
JK
3427 u32 enabled;
3428
3429 phwi_ctrlr = phba->phwi_ctrlr;
3430 phwi_context = phwi_ctrlr->phwi_ctxt;
3431
6733b39a
JK
3432 addr = (u8 __iomem *) ((u8 __iomem *) ctrl->pcicfg +
3433 PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET);
3434 reg = ioread32(addr);
6733b39a
JK
3435
3436 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
3437 if (!enabled) {
3438 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
457ff3b7 3439 SE_DEBUG(DBG_LVL_8, "reg =x%08x addr=%p\n", reg, addr);
6733b39a 3440 iowrite32(reg, addr);
665d6d94
JK
3441 }
3442
3443 if (!phba->msix_enabled) {
3444 eq = &phwi_context->be_eq[0].q;
3445 SE_DEBUG(DBG_LVL_8, "eq->id=%d\n", eq->id);
3446 hwi_ring_eq_db(phba, eq->id, 0, 0, 1, 1);
3447 } else {
3448 for (i = 0; i <= phba->num_cpus; i++) {
3449 eq = &phwi_context->be_eq[i].q;
457ff3b7 3450 SE_DEBUG(DBG_LVL_8, "eq->id=%d\n", eq->id);
bfead3b2
JK
3451 hwi_ring_eq_db(phba, eq->id, 0, 0, 1, 1);
3452 }
c03af1ae 3453 }
6733b39a
JK
3454}
3455
3456static void hwi_disable_intr(struct beiscsi_hba *phba)
3457{
3458 struct be_ctrl_info *ctrl = &phba->ctrl;
3459
3460 u8 __iomem *addr = ctrl->pcicfg + PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET;
3461 u32 reg = ioread32(addr);
3462
3463 u32 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
3464 if (enabled) {
3465 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
3466 iowrite32(reg, addr);
3467 } else
3468 shost_printk(KERN_WARNING, phba->shost,
457ff3b7 3469 "In hwi_disable_intr, Already Disabled\n");
6733b39a
JK
3470}
3471
c7acc5b8
JK
3472static int beiscsi_get_boot_info(struct beiscsi_hba *phba)
3473{
3474 struct be_cmd_resp_get_boot_target *boot_resp;
3475 struct be_cmd_resp_get_session *session_resp;
3476 struct be_mcc_wrb *wrb;
3477 struct be_dma_mem nonemb_cmd;
3478 unsigned int tag, wrb_num;
3479 unsigned short status, extd_status;
3480 struct be_queue_info *mccq = &phba->ctrl.mcc_obj.q;
f457a46f 3481 int ret = -ENOMEM;
c7acc5b8
JK
3482
3483 tag = beiscsi_get_boot_target(phba);
3484 if (!tag) {
3485 SE_DEBUG(DBG_LVL_1, "be_cmd_get_mac_addr Failed\n");
3486 return -EAGAIN;
3487 } else
3488 wait_event_interruptible(phba->ctrl.mcc_wait[tag],
3489 phba->ctrl.mcc_numtag[tag]);
3490
3491 wrb_num = (phba->ctrl.mcc_numtag[tag] & 0x00FF0000) >> 16;
3492 extd_status = (phba->ctrl.mcc_numtag[tag] & 0x0000FF00) >> 8;
3493 status = phba->ctrl.mcc_numtag[tag] & 0x000000FF;
3494 if (status || extd_status) {
3495 SE_DEBUG(DBG_LVL_1, "be_cmd_get_mac_addr Failed"
3496 " status = %d extd_status = %d\n",
3497 status, extd_status);
3498 free_mcc_tag(&phba->ctrl, tag);
3499 return -EBUSY;
3500 }
3501 wrb = queue_get_wrb(mccq, wrb_num);
3502 free_mcc_tag(&phba->ctrl, tag);
3503 boot_resp = embedded_payload(wrb);
3504
3505 if (boot_resp->boot_session_handle < 0) {
f457a46f 3506 shost_printk(KERN_INFO, phba->shost, "No Boot Session.\n");
c7acc5b8
JK
3507 return -ENXIO;
3508 }
3509
3510 nonemb_cmd.va = pci_alloc_consistent(phba->ctrl.pdev,
3511 sizeof(*session_resp),
3512 &nonemb_cmd.dma);
3513 if (nonemb_cmd.va == NULL) {
3514 SE_DEBUG(DBG_LVL_1,
3515 "Failed to allocate memory for"
3516 "beiscsi_get_session_info\n");
3517 return -ENOMEM;
3518 }
3519
3520 memset(nonemb_cmd.va, 0, sizeof(*session_resp));
3521 tag = beiscsi_get_session_info(phba,
3522 boot_resp->boot_session_handle, &nonemb_cmd);
3523 if (!tag) {
3524 SE_DEBUG(DBG_LVL_1, "beiscsi_get_session_info"
3525 " Failed\n");
3526 goto boot_freemem;
3527 } else
3528 wait_event_interruptible(phba->ctrl.mcc_wait[tag],
3529 phba->ctrl.mcc_numtag[tag]);
3530
3531 wrb_num = (phba->ctrl.mcc_numtag[tag] & 0x00FF0000) >> 16;
3532 extd_status = (phba->ctrl.mcc_numtag[tag] & 0x0000FF00) >> 8;
3533 status = phba->ctrl.mcc_numtag[tag] & 0x000000FF;
3534 if (status || extd_status) {
3535 SE_DEBUG(DBG_LVL_1, "beiscsi_get_session_info Failed"
3536 " status = %d extd_status = %d\n",
3537 status, extd_status);
3538 free_mcc_tag(&phba->ctrl, tag);
3539 goto boot_freemem;
3540 }
3541 wrb = queue_get_wrb(mccq, wrb_num);
3542 free_mcc_tag(&phba->ctrl, tag);
3543 session_resp = nonemb_cmd.va ;
f457a46f 3544
c7acc5b8
JK
3545 memcpy(&phba->boot_sess, &session_resp->session_info,
3546 sizeof(struct mgmt_session_info));
f457a46f
MC
3547 ret = 0;
3548
c7acc5b8
JK
3549boot_freemem:
3550 pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size,
3551 nonemb_cmd.va, nonemb_cmd.dma);
f457a46f
MC
3552 return ret;
3553}
3554
3555static void beiscsi_boot_release(void *data)
3556{
3557 struct beiscsi_hba *phba = data;
3558
3559 scsi_host_put(phba->shost);
3560}
3561
3562static int beiscsi_setup_boot_info(struct beiscsi_hba *phba)
3563{
3564 struct iscsi_boot_kobj *boot_kobj;
3565
3566 /* get boot info using mgmt cmd */
3567 if (beiscsi_get_boot_info(phba))
3568 /* Try to see if we can carry on without this */
3569 return 0;
3570
3571 phba->boot_kset = iscsi_boot_create_host_kset(phba->shost->host_no);
3572 if (!phba->boot_kset)
3573 return -ENOMEM;
3574
3575 /* get a ref because the show function will ref the phba */
3576 if (!scsi_host_get(phba->shost))
3577 goto free_kset;
3578 boot_kobj = iscsi_boot_create_target(phba->boot_kset, 0, phba,
3579 beiscsi_show_boot_tgt_info,
3580 beiscsi_tgt_get_attr_visibility,
3581 beiscsi_boot_release);
3582 if (!boot_kobj)
3583 goto put_shost;
3584
3585 if (!scsi_host_get(phba->shost))
3586 goto free_kset;
3587 boot_kobj = iscsi_boot_create_initiator(phba->boot_kset, 0, phba,
3588 beiscsi_show_boot_ini_info,
3589 beiscsi_ini_get_attr_visibility,
3590 beiscsi_boot_release);
3591 if (!boot_kobj)
3592 goto put_shost;
3593
3594 if (!scsi_host_get(phba->shost))
3595 goto free_kset;
3596 boot_kobj = iscsi_boot_create_ethernet(phba->boot_kset, 0, phba,
3597 beiscsi_show_boot_eth_info,
3598 beiscsi_eth_get_attr_visibility,
3599 beiscsi_boot_release);
3600 if (!boot_kobj)
3601 goto put_shost;
3602 return 0;
3603
3604put_shost:
3605 scsi_host_put(phba->shost);
3606free_kset:
3607 iscsi_boot_destroy_kset(phba->boot_kset);
c7acc5b8
JK
3608 return -ENOMEM;
3609}
3610
6733b39a
JK
3611static int beiscsi_init_port(struct beiscsi_hba *phba)
3612{
3613 int ret;
3614
3615 ret = beiscsi_init_controller(phba);
3616 if (ret < 0) {
3617 shost_printk(KERN_ERR, phba->shost,
3618 "beiscsi_dev_probe - Failed in"
457ff3b7 3619 "beiscsi_init_controller\n");
6733b39a
JK
3620 return ret;
3621 }
3622 ret = beiscsi_init_sgl_handle(phba);
3623 if (ret < 0) {
3624 shost_printk(KERN_ERR, phba->shost,
3625 "beiscsi_dev_probe - Failed in"
457ff3b7 3626 "beiscsi_init_sgl_handle\n");
6733b39a
JK
3627 goto do_cleanup_ctrlr;
3628 }
3629
3630 if (hba_setup_cid_tbls(phba)) {
3631 shost_printk(KERN_ERR, phba->shost,
3632 "Failed in hba_setup_cid_tbls\n");
3633 kfree(phba->io_sgl_hndl_base);
3634 kfree(phba->eh_sgl_hndl_base);
3635 goto do_cleanup_ctrlr;
3636 }
3637
3638 return ret;
3639
3640do_cleanup_ctrlr:
3641 hwi_cleanup(phba);
3642 return ret;
3643}
3644
3645static void hwi_purge_eq(struct beiscsi_hba *phba)
3646{
3647 struct hwi_controller *phwi_ctrlr;
3648 struct hwi_context_memory *phwi_context;
3649 struct be_queue_info *eq;
3650 struct be_eq_entry *eqe = NULL;
bfead3b2 3651 int i, eq_msix;
756d29c8 3652 unsigned int num_processed;
6733b39a
JK
3653
3654 phwi_ctrlr = phba->phwi_ctrlr;
3655 phwi_context = phwi_ctrlr->phwi_ctxt;
bfead3b2
JK
3656 if (phba->msix_enabled)
3657 eq_msix = 1;
3658 else
3659 eq_msix = 0;
6733b39a 3660
bfead3b2
JK
3661 for (i = 0; i < (phba->num_cpus + eq_msix); i++) {
3662 eq = &phwi_context->be_eq[i].q;
6733b39a 3663 eqe = queue_tail_node(eq);
756d29c8 3664 num_processed = 0;
bfead3b2
JK
3665 while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32]
3666 & EQE_VALID_MASK) {
3667 AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0);
3668 queue_tail_inc(eq);
3669 eqe = queue_tail_node(eq);
756d29c8 3670 num_processed++;
bfead3b2 3671 }
756d29c8
JK
3672
3673 if (num_processed)
3674 hwi_ring_eq_db(phba, eq->id, 1, num_processed, 1, 1);
6733b39a
JK
3675 }
3676}
3677
3678static void beiscsi_clean_port(struct beiscsi_hba *phba)
3679{
03a12310 3680 int mgmt_status;
6733b39a
JK
3681
3682 mgmt_status = mgmt_epfw_cleanup(phba, CMD_CONNECTION_CHUTE_0);
3683 if (mgmt_status)
3684 shost_printk(KERN_WARNING, phba->shost,
457ff3b7 3685 "mgmt_epfw_cleanup FAILED\n");
756d29c8 3686
6733b39a 3687 hwi_purge_eq(phba);
756d29c8 3688 hwi_cleanup(phba);
6733b39a
JK
3689 kfree(phba->io_sgl_hndl_base);
3690 kfree(phba->eh_sgl_hndl_base);
3691 kfree(phba->cid_array);
3692 kfree(phba->ep_array);
3693}
3694
1282ab76
MC
3695static void beiscsi_cleanup_task(struct iscsi_task *task)
3696{
3697 struct beiscsi_io_task *io_task = task->dd_data;
3698 struct iscsi_conn *conn = task->conn;
3699 struct beiscsi_conn *beiscsi_conn = conn->dd_data;
3700 struct beiscsi_hba *phba = beiscsi_conn->phba;
3701 struct beiscsi_session *beiscsi_sess = beiscsi_conn->beiscsi_sess;
3702 struct hwi_wrb_context *pwrb_context;
3703 struct hwi_controller *phwi_ctrlr;
3704
3705 phwi_ctrlr = phba->phwi_ctrlr;
3706 pwrb_context = &phwi_ctrlr->wrb_context[beiscsi_conn->beiscsi_conn_cid
3707 - phba->fw_config.iscsi_cid_start];
3708
3709 if (io_task->cmd_bhs) {
3710 pci_pool_free(beiscsi_sess->bhs_pool, io_task->cmd_bhs,
3711 io_task->bhs_pa.u.a64.address);
3712 io_task->cmd_bhs = NULL;
3713 }
3714
3715 if (task->sc) {
3716 if (io_task->pwrb_handle) {
3717 free_wrb_handle(phba, pwrb_context,
3718 io_task->pwrb_handle);
3719 io_task->pwrb_handle = NULL;
3720 }
3721
3722 if (io_task->psgl_handle) {
3723 spin_lock(&phba->io_sgl_lock);
3724 free_io_sgl_handle(phba, io_task->psgl_handle);
3725 spin_unlock(&phba->io_sgl_lock);
3726 io_task->psgl_handle = NULL;
3727 }
3728 } else {
3729 if (!beiscsi_conn->login_in_progress) {
3730 if (io_task->pwrb_handle) {
3731 free_wrb_handle(phba, pwrb_context,
3732 io_task->pwrb_handle);
3733 io_task->pwrb_handle = NULL;
3734 }
3735 if (io_task->psgl_handle) {
3736 spin_lock(&phba->mgmt_sgl_lock);
3737 free_mgmt_sgl_handle(phba,
3738 io_task->psgl_handle);
3739 spin_unlock(&phba->mgmt_sgl_lock);
3740 io_task->psgl_handle = NULL;
3741 }
3742 }
3743 }
3744}
3745
6733b39a
JK
3746void
3747beiscsi_offload_connection(struct beiscsi_conn *beiscsi_conn,
3748 struct beiscsi_offload_params *params)
3749{
3750 struct wrb_handle *pwrb_handle;
3751 struct iscsi_target_context_update_wrb *pwrb = NULL;
3752 struct be_mem_descriptor *mem_descr;
3753 struct beiscsi_hba *phba = beiscsi_conn->phba;
1282ab76
MC
3754 struct iscsi_task *task = beiscsi_conn->task;
3755 struct iscsi_session *session = task->conn->session;
6733b39a
JK
3756 u32 doorbell = 0;
3757
3758 /*
3759 * We can always use 0 here because it is reserved by libiscsi for
3760 * login/startup related tasks.
3761 */
1282ab76
MC
3762 beiscsi_conn->login_in_progress = 0;
3763 spin_lock_bh(&session->lock);
3764 beiscsi_cleanup_task(task);
3765 spin_unlock_bh(&session->lock);
3766
7da50879 3767 pwrb_handle = alloc_wrb_handle(phba, (beiscsi_conn->beiscsi_conn_cid -
d5431488 3768 phba->fw_config.iscsi_cid_start));
6733b39a
JK
3769 pwrb = (struct iscsi_target_context_update_wrb *)pwrb_handle->pwrb;
3770 memset(pwrb, 0, sizeof(*pwrb));
3771 AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb,
3772 max_burst_length, pwrb, params->dw[offsetof
3773 (struct amap_beiscsi_offload_params,
3774 max_burst_length) / 32]);
3775 AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb,
3776 max_send_data_segment_length, pwrb,
3777 params->dw[offsetof(struct amap_beiscsi_offload_params,
3778 max_send_data_segment_length) / 32]);
3779 AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb,
3780 first_burst_length,
3781 pwrb,
3782 params->dw[offsetof(struct amap_beiscsi_offload_params,
3783 first_burst_length) / 32]);
3784
3785 AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, erl, pwrb,
3786 (params->dw[offsetof(struct amap_beiscsi_offload_params,
3787 erl) / 32] & OFFLD_PARAMS_ERL));
3788 AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, dde, pwrb,
3789 (params->dw[offsetof(struct amap_beiscsi_offload_params,
3790 dde) / 32] & OFFLD_PARAMS_DDE) >> 2);
3791 AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, hde, pwrb,
3792 (params->dw[offsetof(struct amap_beiscsi_offload_params,
3793 hde) / 32] & OFFLD_PARAMS_HDE) >> 3);
3794 AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, ir2t, pwrb,
3795 (params->dw[offsetof(struct amap_beiscsi_offload_params,
3796 ir2t) / 32] & OFFLD_PARAMS_IR2T) >> 4);
3797 AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, imd, pwrb,
3798 (params->dw[offsetof(struct amap_beiscsi_offload_params,
3799 imd) / 32] & OFFLD_PARAMS_IMD) >> 5);
3800 AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, stat_sn,
3801 pwrb,
3802 (params->dw[offsetof(struct amap_beiscsi_offload_params,
3803 exp_statsn) / 32] + 1));
3804 AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, type, pwrb,
3805 0x7);
3806 AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, wrb_idx,
3807 pwrb, pwrb_handle->wrb_index);
3808 AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, ptr2nextwrb,
3809 pwrb, pwrb_handle->nxt_wrb_index);
3810 AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb,
3811 session_state, pwrb, 0);
3812 AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, compltonack,
3813 pwrb, 1);
3814 AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, notpredblq,
3815 pwrb, 0);
3816 AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, mode, pwrb,
3817 0);
3818
3819 mem_descr = phba->init_mem;
3820 mem_descr += ISCSI_MEM_GLOBAL_HEADER;
3821
3822 AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb,
3823 pad_buffer_addr_hi, pwrb,
3824 mem_descr->mem_array[0].bus_address.u.a32.address_hi);
3825 AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb,
3826 pad_buffer_addr_lo, pwrb,
3827 mem_descr->mem_array[0].bus_address.u.a32.address_lo);
3828
3829 be_dws_le_to_cpu(pwrb, sizeof(struct iscsi_target_context_update_wrb));
3830
3831 doorbell |= beiscsi_conn->beiscsi_conn_cid & DB_WRB_POST_CID_MASK;
32951dd8 3832 doorbell |= (pwrb_handle->wrb_index & DB_DEF_PDU_WRB_INDEX_MASK)
bfead3b2 3833 << DB_DEF_PDU_WRB_INDEX_SHIFT;
6733b39a
JK
3834 doorbell |= 1 << DB_DEF_PDU_NUM_POSTED_SHIFT;
3835
3836 iowrite32(doorbell, phba->db_va + DB_TXULP0_OFFSET);
3837}
3838
3839static void beiscsi_parse_pdu(struct iscsi_conn *conn, itt_t itt,
3840 int *index, int *age)
3841{
bfead3b2 3842 *index = (int)itt;
6733b39a
JK
3843 if (age)
3844 *age = conn->session->age;
3845}
3846
3847/**
3848 * beiscsi_alloc_pdu - allocates pdu and related resources
3849 * @task: libiscsi task
3850 * @opcode: opcode of pdu for task
3851 *
3852 * This is called with the session lock held. It will allocate
3853 * the wrb and sgl if needed for the command. And it will prep
3854 * the pdu's itt. beiscsi_parse_pdu will later translate
3855 * the pdu itt to the libiscsi task itt.
3856 */
3857static int beiscsi_alloc_pdu(struct iscsi_task *task, uint8_t opcode)
3858{
3859 struct beiscsi_io_task *io_task = task->dd_data;
3860 struct iscsi_conn *conn = task->conn;
3861 struct beiscsi_conn *beiscsi_conn = conn->dd_data;
3862 struct beiscsi_hba *phba = beiscsi_conn->phba;
3863 struct hwi_wrb_context *pwrb_context;
3864 struct hwi_controller *phwi_ctrlr;
3865 itt_t itt;
2afc95bf
JK
3866 struct beiscsi_session *beiscsi_sess = beiscsi_conn->beiscsi_sess;
3867 dma_addr_t paddr;
6733b39a 3868
2afc95bf 3869 io_task->cmd_bhs = pci_pool_alloc(beiscsi_sess->bhs_pool,
bc7accec 3870 GFP_ATOMIC, &paddr);
2afc95bf
JK
3871 if (!io_task->cmd_bhs)
3872 return -ENOMEM;
2afc95bf 3873 io_task->bhs_pa.u.a64.address = paddr;
bfead3b2 3874 io_task->libiscsi_itt = (itt_t)task->itt;
6733b39a
JK
3875 io_task->conn = beiscsi_conn;
3876
3877 task->hdr = (struct iscsi_hdr *)&io_task->cmd_bhs->iscsi_hdr;
3878 task->hdr_max = sizeof(struct be_cmd_bhs);
d2cecf0d 3879 io_task->psgl_handle = NULL;
3ec78271 3880 io_task->pwrb_handle = NULL;
6733b39a
JK
3881
3882 if (task->sc) {
3883 spin_lock(&phba->io_sgl_lock);
3884 io_task->psgl_handle = alloc_io_sgl_handle(phba);
3885 spin_unlock(&phba->io_sgl_lock);
2afc95bf
JK
3886 if (!io_task->psgl_handle)
3887 goto free_hndls;
d2cecf0d
JK
3888 io_task->pwrb_handle = alloc_wrb_handle(phba,
3889 beiscsi_conn->beiscsi_conn_cid -
3890 phba->fw_config.iscsi_cid_start);
3891 if (!io_task->pwrb_handle)
3892 goto free_io_hndls;
6733b39a
JK
3893 } else {
3894 io_task->scsi_cmnd = NULL;
d7aea67b 3895 if ((opcode & ISCSI_OPCODE_MASK) == ISCSI_OP_LOGIN) {
6733b39a
JK
3896 if (!beiscsi_conn->login_in_progress) {
3897 spin_lock(&phba->mgmt_sgl_lock);
3898 io_task->psgl_handle = (struct sgl_handle *)
3899 alloc_mgmt_sgl_handle(phba);
3900 spin_unlock(&phba->mgmt_sgl_lock);
2afc95bf
JK
3901 if (!io_task->psgl_handle)
3902 goto free_hndls;
3903
6733b39a
JK
3904 beiscsi_conn->login_in_progress = 1;
3905 beiscsi_conn->plogin_sgl_handle =
3906 io_task->psgl_handle;
d2cecf0d
JK
3907 io_task->pwrb_handle =
3908 alloc_wrb_handle(phba,
3909 beiscsi_conn->beiscsi_conn_cid -
3910 phba->fw_config.iscsi_cid_start);
3911 if (!io_task->pwrb_handle)
3912 goto free_io_hndls;
3913 beiscsi_conn->plogin_wrb_handle =
3914 io_task->pwrb_handle;
3915
6733b39a
JK
3916 } else {
3917 io_task->psgl_handle =
3918 beiscsi_conn->plogin_sgl_handle;
d2cecf0d
JK
3919 io_task->pwrb_handle =
3920 beiscsi_conn->plogin_wrb_handle;
6733b39a 3921 }
1282ab76 3922 beiscsi_conn->task = task;
6733b39a
JK
3923 } else {
3924 spin_lock(&phba->mgmt_sgl_lock);
3925 io_task->psgl_handle = alloc_mgmt_sgl_handle(phba);
3926 spin_unlock(&phba->mgmt_sgl_lock);
2afc95bf
JK
3927 if (!io_task->psgl_handle)
3928 goto free_hndls;
d2cecf0d
JK
3929 io_task->pwrb_handle =
3930 alloc_wrb_handle(phba,
3931 beiscsi_conn->beiscsi_conn_cid -
3932 phba->fw_config.iscsi_cid_start);
3933 if (!io_task->pwrb_handle)
3934 goto free_mgmt_hndls;
3935
6733b39a
JK
3936 }
3937 }
bfead3b2
JK
3938 itt = (itt_t) cpu_to_be32(((unsigned int)io_task->pwrb_handle->
3939 wrb_index << 16) | (unsigned int)
3940 (io_task->psgl_handle->sgl_index));
32951dd8 3941 io_task->pwrb_handle->pio_handle = task;
bfead3b2 3942
6733b39a
JK
3943 io_task->cmd_bhs->iscsi_hdr.itt = itt;
3944 return 0;
2afc95bf 3945
d2cecf0d
JK
3946free_io_hndls:
3947 spin_lock(&phba->io_sgl_lock);
3948 free_io_sgl_handle(phba, io_task->psgl_handle);
3949 spin_unlock(&phba->io_sgl_lock);
3950 goto free_hndls;
3951free_mgmt_hndls:
3952 spin_lock(&phba->mgmt_sgl_lock);
3953 free_mgmt_sgl_handle(phba, io_task->psgl_handle);
3954 spin_unlock(&phba->mgmt_sgl_lock);
2afc95bf
JK
3955free_hndls:
3956 phwi_ctrlr = phba->phwi_ctrlr;
7da50879
JK
3957 pwrb_context = &phwi_ctrlr->wrb_context[
3958 beiscsi_conn->beiscsi_conn_cid -
3959 phba->fw_config.iscsi_cid_start];
d2cecf0d
JK
3960 if (io_task->pwrb_handle)
3961 free_wrb_handle(phba, pwrb_context, io_task->pwrb_handle);
2afc95bf
JK
3962 io_task->pwrb_handle = NULL;
3963 pci_pool_free(beiscsi_sess->bhs_pool, io_task->cmd_bhs,
3964 io_task->bhs_pa.u.a64.address);
1282ab76 3965 io_task->cmd_bhs = NULL;
457ff3b7 3966 SE_DEBUG(DBG_LVL_1, "Alloc of SGL_ICD Failed\n");
2afc95bf 3967 return -ENOMEM;
6733b39a
JK
3968}
3969
6733b39a
JK
3970static int beiscsi_iotask(struct iscsi_task *task, struct scatterlist *sg,
3971 unsigned int num_sg, unsigned int xferlen,
3972 unsigned int writedir)
3973{
3974
3975 struct beiscsi_io_task *io_task = task->dd_data;
3976 struct iscsi_conn *conn = task->conn;
3977 struct beiscsi_conn *beiscsi_conn = conn->dd_data;
3978 struct beiscsi_hba *phba = beiscsi_conn->phba;
3979 struct iscsi_wrb *pwrb = NULL;
3980 unsigned int doorbell = 0;
3981
3982 pwrb = io_task->pwrb_handle->pwrb;
6733b39a
JK
3983 io_task->cmd_bhs->iscsi_hdr.exp_statsn = 0;
3984 io_task->bhs_len = sizeof(struct be_cmd_bhs);
3985
3986 if (writedir) {
6733b39a
JK
3987 memset(&io_task->cmd_bhs->iscsi_data_pdu, 0, 48);
3988 AMAP_SET_BITS(struct amap_pdu_data_out, itt,
3989 &io_task->cmd_bhs->iscsi_data_pdu,
3990 (unsigned int)io_task->cmd_bhs->iscsi_hdr.itt);
3991 AMAP_SET_BITS(struct amap_pdu_data_out, opcode,
3992 &io_task->cmd_bhs->iscsi_data_pdu,
3993 ISCSI_OPCODE_SCSI_DATA_OUT);
3994 AMAP_SET_BITS(struct amap_pdu_data_out, final_bit,
3995 &io_task->cmd_bhs->iscsi_data_pdu, 1);
32951dd8
JK
3996 AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb,
3997 INI_WR_CMD);
6733b39a 3998 AMAP_SET_BITS(struct amap_iscsi_wrb, dsp, pwrb, 1);
6733b39a 3999 } else {
32951dd8
JK
4000 AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb,
4001 INI_RD_CMD);
6733b39a
JK
4002 AMAP_SET_BITS(struct amap_iscsi_wrb, dsp, pwrb, 0);
4003 }
4004 memcpy(&io_task->cmd_bhs->iscsi_data_pdu.
4005 dw[offsetof(struct amap_pdu_data_out, lun) / 32],
516f43a2 4006 &io_task->cmd_bhs->iscsi_hdr.lun, sizeof(struct scsi_lun));
6733b39a
JK
4007
4008 AMAP_SET_BITS(struct amap_iscsi_wrb, lun, pwrb,
dc63aac6
JK
4009 cpu_to_be16(*(unsigned short *)
4010 &io_task->cmd_bhs->iscsi_hdr.lun));
6733b39a
JK
4011 AMAP_SET_BITS(struct amap_iscsi_wrb, r2t_exp_dtl, pwrb, xferlen);
4012 AMAP_SET_BITS(struct amap_iscsi_wrb, wrb_idx, pwrb,
4013 io_task->pwrb_handle->wrb_index);
4014 AMAP_SET_BITS(struct amap_iscsi_wrb, cmdsn_itt, pwrb,
4015 be32_to_cpu(task->cmdsn));
4016 AMAP_SET_BITS(struct amap_iscsi_wrb, sgl_icd_idx, pwrb,
4017 io_task->psgl_handle->sgl_index);
4018
4019 hwi_write_sgl(pwrb, sg, num_sg, io_task);
4020
4021 AMAP_SET_BITS(struct amap_iscsi_wrb, ptr2nextwrb, pwrb,
4022 io_task->pwrb_handle->nxt_wrb_index);
4023 be_dws_le_to_cpu(pwrb, sizeof(struct iscsi_wrb));
4024
4025 doorbell |= beiscsi_conn->beiscsi_conn_cid & DB_WRB_POST_CID_MASK;
32951dd8 4026 doorbell |= (io_task->pwrb_handle->wrb_index &
6733b39a
JK
4027 DB_DEF_PDU_WRB_INDEX_MASK) << DB_DEF_PDU_WRB_INDEX_SHIFT;
4028 doorbell |= 1 << DB_DEF_PDU_NUM_POSTED_SHIFT;
4029
4030 iowrite32(doorbell, phba->db_va + DB_TXULP0_OFFSET);
4031 return 0;
4032}
4033
4034static int beiscsi_mtask(struct iscsi_task *task)
4035{
dafab8e0 4036 struct beiscsi_io_task *io_task = task->dd_data;
6733b39a
JK
4037 struct iscsi_conn *conn = task->conn;
4038 struct beiscsi_conn *beiscsi_conn = conn->dd_data;
4039 struct beiscsi_hba *phba = beiscsi_conn->phba;
4040 struct iscsi_wrb *pwrb = NULL;
4041 unsigned int doorbell = 0;
dafab8e0 4042 unsigned int cid;
6733b39a 4043
bfead3b2 4044 cid = beiscsi_conn->beiscsi_conn_cid;
6733b39a 4045 pwrb = io_task->pwrb_handle->pwrb;
caf818f1 4046 memset(pwrb, 0, sizeof(*pwrb));
6733b39a
JK
4047 AMAP_SET_BITS(struct amap_iscsi_wrb, cmdsn_itt, pwrb,
4048 be32_to_cpu(task->cmdsn));
4049 AMAP_SET_BITS(struct amap_iscsi_wrb, wrb_idx, pwrb,
4050 io_task->pwrb_handle->wrb_index);
4051 AMAP_SET_BITS(struct amap_iscsi_wrb, sgl_icd_idx, pwrb,
4052 io_task->psgl_handle->sgl_index);
dafab8e0 4053
6733b39a
JK
4054 switch (task->hdr->opcode & ISCSI_OPCODE_MASK) {
4055 case ISCSI_OP_LOGIN:
32951dd8
JK
4056 AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb,
4057 TGT_DM_CMD);
6733b39a
JK
4058 AMAP_SET_BITS(struct amap_iscsi_wrb, dmsg, pwrb, 0);
4059 AMAP_SET_BITS(struct amap_iscsi_wrb, cmdsn_itt, pwrb, 1);
4060 hwi_write_buffer(pwrb, task);
4061 break;
4062 case ISCSI_OP_NOOP_OUT:
1390b01b
JK
4063 if (task->hdr->ttt != ISCSI_RESERVED_TAG) {
4064 AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb,
4065 TGT_DM_CMD);
4066 AMAP_SET_BITS(struct amap_iscsi_wrb, cmdsn_itt,
4067 pwrb, 0);
685e16fd 4068 AMAP_SET_BITS(struct amap_iscsi_wrb, dmsg, pwrb, 1);
1390b01b
JK
4069 } else {
4070 AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb,
4071 INI_RD_CMD);
685e16fd 4072 AMAP_SET_BITS(struct amap_iscsi_wrb, dmsg, pwrb, 0);
1390b01b 4073 }
6733b39a
JK
4074 hwi_write_buffer(pwrb, task);
4075 break;
4076 case ISCSI_OP_TEXT:
32951dd8 4077 AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb,
b30c6dab 4078 TGT_DM_CMD);
0ecb0b45 4079 AMAP_SET_BITS(struct amap_iscsi_wrb, dmsg, pwrb, 0);
6733b39a
JK
4080 hwi_write_buffer(pwrb, task);
4081 break;
4082 case ISCSI_OP_SCSI_TMFUNC:
32951dd8
JK
4083 AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb,
4084 INI_TMF_CMD);
6733b39a
JK
4085 AMAP_SET_BITS(struct amap_iscsi_wrb, dmsg, pwrb, 0);
4086 hwi_write_buffer(pwrb, task);
4087 break;
4088 case ISCSI_OP_LOGOUT:
4089 AMAP_SET_BITS(struct amap_iscsi_wrb, dmsg, pwrb, 0);
4090 AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb,
dafab8e0 4091 HWH_TYPE_LOGOUT);
6733b39a
JK
4092 hwi_write_buffer(pwrb, task);
4093 break;
4094
4095 default:
457ff3b7 4096 SE_DEBUG(DBG_LVL_1, "opcode =%d Not supported\n",
6733b39a
JK
4097 task->hdr->opcode & ISCSI_OPCODE_MASK);
4098 return -EINVAL;
4099 }
4100
4101 AMAP_SET_BITS(struct amap_iscsi_wrb, r2t_exp_dtl, pwrb,
51a46250 4102 task->data_count);
6733b39a
JK
4103 AMAP_SET_BITS(struct amap_iscsi_wrb, ptr2nextwrb, pwrb,
4104 io_task->pwrb_handle->nxt_wrb_index);
4105 be_dws_le_to_cpu(pwrb, sizeof(struct iscsi_wrb));
4106
bfead3b2 4107 doorbell |= cid & DB_WRB_POST_CID_MASK;
32951dd8 4108 doorbell |= (io_task->pwrb_handle->wrb_index &
6733b39a
JK
4109 DB_DEF_PDU_WRB_INDEX_MASK) << DB_DEF_PDU_WRB_INDEX_SHIFT;
4110 doorbell |= 1 << DB_DEF_PDU_NUM_POSTED_SHIFT;
4111 iowrite32(doorbell, phba->db_va + DB_TXULP0_OFFSET);
4112 return 0;
4113}
4114
4115static int beiscsi_task_xmit(struct iscsi_task *task)
4116{
6733b39a
JK
4117 struct beiscsi_io_task *io_task = task->dd_data;
4118 struct scsi_cmnd *sc = task->sc;
6733b39a
JK
4119 struct scatterlist *sg;
4120 int num_sg;
4121 unsigned int writedir = 0, xferlen = 0;
4122
6733b39a
JK
4123 if (!sc)
4124 return beiscsi_mtask(task);
4125
4126 io_task->scsi_cmnd = sc;
4127 num_sg = scsi_dma_map(sc);
4128 if (num_sg < 0) {
4129 SE_DEBUG(DBG_LVL_1, " scsi_dma_map Failed\n")
4130 return num_sg;
4131 }
6733b39a
JK
4132 xferlen = scsi_bufflen(sc);
4133 sg = scsi_sglist(sc);
4134 if (sc->sc_data_direction == DMA_TO_DEVICE) {
4135 writedir = 1;
457ff3b7 4136 SE_DEBUG(DBG_LVL_4, "task->imm_count=0x%08x\n",
6733b39a
JK
4137 task->imm_count);
4138 } else
4139 writedir = 0;
4140 return beiscsi_iotask(task, sg, num_sg, xferlen, writedir);
4141}
4142
ffce3e2e
JK
4143/**
4144 * beiscsi_bsg_request - handle bsg request from ISCSI transport
4145 * @job: job to handle
4146 */
4147static int beiscsi_bsg_request(struct bsg_job *job)
4148{
4149 struct Scsi_Host *shost;
4150 struct beiscsi_hba *phba;
4151 struct iscsi_bsg_request *bsg_req = job->request;
4152 int rc = -EINVAL;
4153 unsigned int tag;
4154 struct be_dma_mem nonemb_cmd;
4155 struct be_cmd_resp_hdr *resp;
4156 struct iscsi_bsg_reply *bsg_reply = job->reply;
4157 unsigned short status, extd_status;
4158
4159 shost = iscsi_job_to_shost(job);
4160 phba = iscsi_host_priv(shost);
4161
4162 switch (bsg_req->msgcode) {
4163 case ISCSI_BSG_HST_VENDOR:
4164 nonemb_cmd.va = pci_alloc_consistent(phba->ctrl.pdev,
4165 job->request_payload.payload_len,
4166 &nonemb_cmd.dma);
4167 if (nonemb_cmd.va == NULL) {
4168 SE_DEBUG(DBG_LVL_1, "Failed to allocate memory for "
4169 "beiscsi_bsg_request\n");
4170 return -EIO;
4171 }
4172 tag = mgmt_vendor_specific_fw_cmd(&phba->ctrl, phba, job,
4173 &nonemb_cmd);
4174 if (!tag) {
4175 SE_DEBUG(DBG_LVL_1, "be_cmd_get_mac_addr Failed\n");
4176 pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size,
4177 nonemb_cmd.va, nonemb_cmd.dma);
4178 return -EAGAIN;
4179 } else
4180 wait_event_interruptible(phba->ctrl.mcc_wait[tag],
4181 phba->ctrl.mcc_numtag[tag]);
4182 extd_status = (phba->ctrl.mcc_numtag[tag] & 0x0000FF00) >> 8;
4183 status = phba->ctrl.mcc_numtag[tag] & 0x000000FF;
4184 free_mcc_tag(&phba->ctrl, tag);
4185 resp = (struct be_cmd_resp_hdr *)nonemb_cmd.va;
4186 sg_copy_from_buffer(job->reply_payload.sg_list,
4187 job->reply_payload.sg_cnt,
4188 nonemb_cmd.va, (resp->response_length
4189 + sizeof(*resp)));
4190 bsg_reply->reply_payload_rcv_len = resp->response_length;
4191 bsg_reply->result = status;
4192 bsg_job_done(job, bsg_reply->result,
4193 bsg_reply->reply_payload_rcv_len);
4194 pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size,
4195 nonemb_cmd.va, nonemb_cmd.dma);
4196 if (status || extd_status) {
4197 SE_DEBUG(DBG_LVL_1, "be_cmd_get_mac_addr Failed"
4198 " status = %d extd_status = %d\n",
4199 status, extd_status);
4200 return -EIO;
4201 }
4202 break;
4203
4204 default:
4205 SE_DEBUG(DBG_LVL_1, "Unsupported bsg command: 0x%x\n",
4206 bsg_req->msgcode);
4207 break;
4208 }
4209
4210 return rc;
4211}
4212
25602c97 4213static void beiscsi_quiesce(struct beiscsi_hba *phba)
6733b39a 4214{
bfead3b2
JK
4215 struct hwi_controller *phwi_ctrlr;
4216 struct hwi_context_memory *phwi_context;
4217 struct be_eq_obj *pbe_eq;
4218 unsigned int i, msix_vec;
e9b91193
JK
4219 u8 *real_offset = 0;
4220 u32 value = 0;
6733b39a 4221
bfead3b2
JK
4222 phwi_ctrlr = phba->phwi_ctrlr;
4223 phwi_context = phwi_ctrlr->phwi_ctxt;
6733b39a 4224 hwi_disable_intr(phba);
bfead3b2
JK
4225 if (phba->msix_enabled) {
4226 for (i = 0; i <= phba->num_cpus; i++) {
4227 msix_vec = phba->msix_entries[i].vector;
4228 free_irq(msix_vec, &phwi_context->be_eq[i]);
8fcfb210 4229 kfree(phba->msi_name[i]);
bfead3b2
JK
4230 }
4231 } else
4232 if (phba->pcidev->irq)
4233 free_irq(phba->pcidev->irq, phba);
4234 pci_disable_msix(phba->pcidev);
6733b39a
JK
4235 destroy_workqueue(phba->wq);
4236 if (blk_iopoll_enabled)
bfead3b2
JK
4237 for (i = 0; i < phba->num_cpus; i++) {
4238 pbe_eq = &phwi_context->be_eq[i];
4239 blk_iopoll_disable(&pbe_eq->iopoll);
4240 }
6733b39a
JK
4241
4242 beiscsi_clean_port(phba);
4243 beiscsi_free_mem(phba);
e9b91193
JK
4244 real_offset = (u8 *)phba->csr_va + MPU_EP_SEMAPHORE;
4245
4246 value = readl((void *)real_offset);
4247
4248 if (value & 0x00010000) {
4249 value &= 0xfffeffff;
4250 writel(value, (void *)real_offset);
4251 }
6733b39a
JK
4252 beiscsi_unmap_pci_function(phba);
4253 pci_free_consistent(phba->pcidev,
4254 phba->ctrl.mbox_mem_alloced.size,
4255 phba->ctrl.mbox_mem_alloced.va,
4256 phba->ctrl.mbox_mem_alloced.dma);
25602c97
JK
4257}
4258
4259static void beiscsi_remove(struct pci_dev *pcidev)
4260{
4261
4262 struct beiscsi_hba *phba = NULL;
4263
4264 phba = pci_get_drvdata(pcidev);
4265 if (!phba) {
4266 dev_err(&pcidev->dev, "beiscsi_remove called with no phba\n");
4267 return;
4268 }
4269
4270 beiscsi_quiesce(phba);
9d045163 4271 iscsi_boot_destroy_kset(phba->boot_kset);
6733b39a
JK
4272 iscsi_host_remove(phba->shost);
4273 pci_dev_put(phba->pcidev);
4274 iscsi_host_free(phba->shost);
8dce69ff 4275 pci_disable_device(pcidev);
6733b39a
JK
4276}
4277
25602c97
JK
4278static void beiscsi_shutdown(struct pci_dev *pcidev)
4279{
4280
4281 struct beiscsi_hba *phba = NULL;
4282
4283 phba = (struct beiscsi_hba *)pci_get_drvdata(pcidev);
4284 if (!phba) {
4285 dev_err(&pcidev->dev, "beiscsi_shutdown called with no phba\n");
4286 return;
4287 }
4288
4289 beiscsi_quiesce(phba);
8dce69ff 4290 pci_disable_device(pcidev);
25602c97
JK
4291}
4292
bfead3b2
JK
4293static void beiscsi_msix_enable(struct beiscsi_hba *phba)
4294{
4295 int i, status;
4296
4297 for (i = 0; i <= phba->num_cpus; i++)
4298 phba->msix_entries[i].entry = i;
4299
4300 status = pci_enable_msix(phba->pcidev, phba->msix_entries,
4301 (phba->num_cpus + 1));
4302 if (!status)
4303 phba->msix_enabled = true;
4304
4305 return;
4306}
4307
6733b39a
JK
4308static int __devinit beiscsi_dev_probe(struct pci_dev *pcidev,
4309 const struct pci_device_id *id)
4310{
4311 struct beiscsi_hba *phba = NULL;
bfead3b2
JK
4312 struct hwi_controller *phwi_ctrlr;
4313 struct hwi_context_memory *phwi_context;
4314 struct be_eq_obj *pbe_eq;
238f6b72 4315 int ret, num_cpus, i;
e9b91193
JK
4316 u8 *real_offset = 0;
4317 u32 value = 0;
6733b39a
JK
4318
4319 ret = beiscsi_enable_pci(pcidev);
4320 if (ret < 0) {
82284c09
DC
4321 dev_err(&pcidev->dev, "beiscsi_dev_probe-"
4322 " Failed to enable pci device\n");
6733b39a
JK
4323 return ret;
4324 }
4325
4326 phba = beiscsi_hba_alloc(pcidev);
4327 if (!phba) {
4328 dev_err(&pcidev->dev, "beiscsi_dev_probe-"
457ff3b7 4329 " Failed in beiscsi_hba_alloc\n");
6733b39a
JK
4330 goto disable_pci;
4331 }
4332
f98c96b0
JK
4333 switch (pcidev->device) {
4334 case BE_DEVICE_ID1:
4335 case OC_DEVICE_ID1:
4336 case OC_DEVICE_ID2:
4337 phba->generation = BE_GEN2;
4338 break;
4339 case BE_DEVICE_ID2:
4340 case OC_DEVICE_ID3:
4341 phba->generation = BE_GEN3;
4342 break;
4343 default:
4344 phba->generation = 0;
4345 }
4346
bfead3b2
JK
4347 if (enable_msix)
4348 num_cpus = find_num_cpus();
4349 else
4350 num_cpus = 1;
4351 phba->num_cpus = num_cpus;
457ff3b7 4352 SE_DEBUG(DBG_LVL_8, "num_cpus = %d\n", phba->num_cpus);
bfead3b2 4353
b547f2d6 4354 if (enable_msix) {
bfead3b2 4355 beiscsi_msix_enable(phba);
b547f2d6
JK
4356 if (!phba->msix_enabled)
4357 phba->num_cpus = 1;
4358 }
6733b39a
JK
4359 ret = be_ctrl_init(phba, pcidev);
4360 if (ret) {
4361 shost_printk(KERN_ERR, phba->shost, "beiscsi_dev_probe-"
4362 "Failed in be_ctrl_init\n");
4363 goto hba_free;
4364 }
4365
e9b91193
JK
4366 if (!num_hba) {
4367 real_offset = (u8 *)phba->csr_va + MPU_EP_SEMAPHORE;
4368 value = readl((void *)real_offset);
4369 if (value & 0x00010000) {
4370 gcrashmode++;
4371 shost_printk(KERN_ERR, phba->shost,
4372 "Loading Driver in crashdump mode\n");
e5285860 4373 ret = beiscsi_cmd_reset_function(phba);
e9b91193
JK
4374 if (ret) {
4375 shost_printk(KERN_ERR, phba->shost,
4376 "Reset Failed. Aborting Crashdump\n");
4377 goto hba_free;
4378 }
4379 ret = be_chk_reset_complete(phba);
4380 if (ret) {
4381 shost_printk(KERN_ERR, phba->shost,
4382 "Failed to get out of reset."
4383 "Aborting Crashdump\n");
4384 goto hba_free;
4385 }
4386 } else {
4387 value |= 0x00010000;
4388 writel(value, (void *)real_offset);
4389 num_hba++;
4390 }
4391 }
4392
6733b39a
JK
4393 spin_lock_init(&phba->io_sgl_lock);
4394 spin_lock_init(&phba->mgmt_sgl_lock);
4395 spin_lock_init(&phba->isr_lock);
7da50879
JK
4396 ret = mgmt_get_fw_config(&phba->ctrl, phba);
4397 if (ret != 0) {
4398 shost_printk(KERN_ERR, phba->shost,
4399 "Error getting fw config\n");
4400 goto free_port;
4401 }
4402 phba->shost->max_id = phba->fw_config.iscsi_cid_count;
6733b39a 4403 beiscsi_get_params(phba);
aa874f07 4404 phba->shost->can_queue = phba->params.ios_per_ctrl;
6733b39a
JK
4405 ret = beiscsi_init_port(phba);
4406 if (ret < 0) {
4407 shost_printk(KERN_ERR, phba->shost, "beiscsi_dev_probe-"
4408 "Failed in beiscsi_init_port\n");
4409 goto free_port;
4410 }
4411
756d29c8
JK
4412 for (i = 0; i < MAX_MCC_CMD ; i++) {
4413 init_waitqueue_head(&phba->ctrl.mcc_wait[i + 1]);
4414 phba->ctrl.mcc_tag[i] = i + 1;
4415 phba->ctrl.mcc_numtag[i + 1] = 0;
4416 phba->ctrl.mcc_tag_available++;
4417 }
4418
4419 phba->ctrl.mcc_alloc_index = phba->ctrl.mcc_free_index = 0;
4420
6733b39a
JK
4421 snprintf(phba->wq_name, sizeof(phba->wq_name), "beiscsi_q_irq%u",
4422 phba->shost->host_no);
278274d5 4423 phba->wq = alloc_workqueue(phba->wq_name, WQ_MEM_RECLAIM, 1);
6733b39a
JK
4424 if (!phba->wq) {
4425 shost_printk(KERN_ERR, phba->shost, "beiscsi_dev_probe-"
4426 "Failed to allocate work queue\n");
4427 goto free_twq;
4428 }
4429
4430 INIT_WORK(&phba->work_cqs, beiscsi_process_all_cqs);
4431
bfead3b2
JK
4432 phwi_ctrlr = phba->phwi_ctrlr;
4433 phwi_context = phwi_ctrlr->phwi_ctxt;
6733b39a 4434 if (blk_iopoll_enabled) {
bfead3b2
JK
4435 for (i = 0; i < phba->num_cpus; i++) {
4436 pbe_eq = &phwi_context->be_eq[i];
4437 blk_iopoll_init(&pbe_eq->iopoll, be_iopoll_budget,
4438 be_iopoll);
4439 blk_iopoll_enable(&pbe_eq->iopoll);
4440 }
6733b39a 4441 }
6733b39a
JK
4442 ret = beiscsi_init_irqs(phba);
4443 if (ret < 0) {
4444 shost_printk(KERN_ERR, phba->shost, "beiscsi_dev_probe-"
4445 "Failed to beiscsi_init_irqs\n");
4446 goto free_blkenbld;
4447 }
238f6b72 4448 hwi_enable_intr(phba);
f457a46f
MC
4449
4450 if (beiscsi_setup_boot_info(phba))
4451 /*
4452 * log error but continue, because we may not be using
4453 * iscsi boot.
4454 */
4455 shost_printk(KERN_ERR, phba->shost, "Could not set up "
4456 "iSCSI boot info.");
4457
457ff3b7 4458 SE_DEBUG(DBG_LVL_8, "\n\n\n SUCCESS - DRIVER LOADED\n\n\n");
6733b39a
JK
4459 return 0;
4460
6733b39a
JK
4461free_blkenbld:
4462 destroy_workqueue(phba->wq);
4463 if (blk_iopoll_enabled)
bfead3b2
JK
4464 for (i = 0; i < phba->num_cpus; i++) {
4465 pbe_eq = &phwi_context->be_eq[i];
4466 blk_iopoll_disable(&pbe_eq->iopoll);
4467 }
6733b39a
JK
4468free_twq:
4469 beiscsi_clean_port(phba);
4470 beiscsi_free_mem(phba);
4471free_port:
e9b91193
JK
4472 real_offset = (u8 *)phba->csr_va + MPU_EP_SEMAPHORE;
4473
4474 value = readl((void *)real_offset);
4475
4476 if (value & 0x00010000) {
4477 value &= 0xfffeffff;
4478 writel(value, (void *)real_offset);
4479 }
4480
6733b39a
JK
4481 pci_free_consistent(phba->pcidev,
4482 phba->ctrl.mbox_mem_alloced.size,
4483 phba->ctrl.mbox_mem_alloced.va,
4484 phba->ctrl.mbox_mem_alloced.dma);
4485 beiscsi_unmap_pci_function(phba);
4486hba_free:
238f6b72
JK
4487 if (phba->msix_enabled)
4488 pci_disable_msix(phba->pcidev);
6733b39a
JK
4489 iscsi_host_remove(phba->shost);
4490 pci_dev_put(phba->pcidev);
4491 iscsi_host_free(phba->shost);
4492disable_pci:
4493 pci_disable_device(pcidev);
4494 return ret;
4495}
4496
4497struct iscsi_transport beiscsi_iscsi_transport = {
4498 .owner = THIS_MODULE,
4499 .name = DRV_NAME,
9db0fb3a 4500 .caps = CAP_RECOVERY_L0 | CAP_HDRDGST | CAP_TEXT_NEGO |
6733b39a 4501 CAP_MULTI_R2T | CAP_DATADGST | CAP_DATA_PATH_OFFLOAD,
6733b39a
JK
4502 .create_session = beiscsi_session_create,
4503 .destroy_session = beiscsi_session_destroy,
4504 .create_conn = beiscsi_conn_create,
4505 .bind_conn = beiscsi_conn_bind,
4506 .destroy_conn = iscsi_conn_teardown,
3128c6c7 4507 .attr_is_visible = be2iscsi_attr_is_visible,
6733b39a 4508 .set_param = beiscsi_set_param,
c7f7fd5b 4509 .get_conn_param = iscsi_conn_get_param,
6733b39a
JK
4510 .get_session_param = iscsi_session_get_param,
4511 .get_host_param = beiscsi_get_host_param,
4512 .start_conn = beiscsi_conn_start,
fa95d206 4513 .stop_conn = iscsi_conn_stop,
6733b39a
JK
4514 .send_pdu = iscsi_conn_send_pdu,
4515 .xmit_task = beiscsi_task_xmit,
4516 .cleanup_task = beiscsi_cleanup_task,
4517 .alloc_pdu = beiscsi_alloc_pdu,
4518 .parse_pdu_itt = beiscsi_parse_pdu,
4519 .get_stats = beiscsi_conn_get_stats,
c7f7fd5b 4520 .get_ep_param = beiscsi_ep_get_param,
6733b39a
JK
4521 .ep_connect = beiscsi_ep_connect,
4522 .ep_poll = beiscsi_ep_poll,
4523 .ep_disconnect = beiscsi_ep_disconnect,
4524 .session_recovery_timedout = iscsi_session_recovery_timedout,
ffce3e2e 4525 .bsg_request = beiscsi_bsg_request,
6733b39a
JK
4526};
4527
4528static struct pci_driver beiscsi_pci_driver = {
4529 .name = DRV_NAME,
4530 .probe = beiscsi_dev_probe,
4531 .remove = beiscsi_remove,
25602c97 4532 .shutdown = beiscsi_shutdown,
6733b39a
JK
4533 .id_table = beiscsi_pci_id_table
4534};
4535
bfead3b2 4536
6733b39a
JK
4537static int __init beiscsi_module_init(void)
4538{
4539 int ret;
4540
4541 beiscsi_scsi_transport =
4542 iscsi_register_transport(&beiscsi_iscsi_transport);
4543 if (!beiscsi_scsi_transport) {
4544 SE_DEBUG(DBG_LVL_1,
4545 "beiscsi_module_init - Unable to register beiscsi"
4546 "transport.\n");
f55a24f2 4547 return -ENOMEM;
6733b39a 4548 }
457ff3b7 4549 SE_DEBUG(DBG_LVL_8, "In beiscsi_module_init, tt=%p\n",
6733b39a
JK
4550 &beiscsi_iscsi_transport);
4551
4552 ret = pci_register_driver(&beiscsi_pci_driver);
4553 if (ret) {
4554 SE_DEBUG(DBG_LVL_1,
4555 "beiscsi_module_init - Unable to register"
4556 "beiscsi pci driver.\n");
4557 goto unregister_iscsi_transport;
4558 }
4559 return 0;
4560
4561unregister_iscsi_transport:
4562 iscsi_unregister_transport(&beiscsi_iscsi_transport);
4563 return ret;
4564}
4565
4566static void __exit beiscsi_module_exit(void)
4567{
4568 pci_unregister_driver(&beiscsi_pci_driver);
4569 iscsi_unregister_transport(&beiscsi_iscsi_transport);
4570}
4571
4572module_init(beiscsi_module_init);
4573module_exit(beiscsi_module_exit);