]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - drivers/scsi/be2iscsi/be_main.c
[SCSI] be2iscsi: Code cleanup, removing the goto statement
[mirror_ubuntu-bionic-kernel.git] / drivers / scsi / be2iscsi / be_main.c
CommitLineData
6733b39a 1/**
255fa9a3 2 * Copyright (C) 2005 - 2011 Emulex
6733b39a
JK
3 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
9 *
255fa9a3 10 * Written by: Jayamohan Kallickal (jayamohan.kallickal@emulex.com)
6733b39a
JK
11 *
12 * Contact Information:
255fa9a3 13 * linux-drivers@emulex.com
6733b39a 14 *
255fa9a3
JK
15 * Emulex
16 * 3333 Susan Street
17 * Costa Mesa, CA 92626
6733b39a 18 */
255fa9a3 19
6733b39a
JK
20#include <linux/reboot.h>
21#include <linux/delay.h>
5a0e3ad6 22#include <linux/slab.h>
6733b39a
JK
23#include <linux/interrupt.h>
24#include <linux/blkdev.h>
25#include <linux/pci.h>
26#include <linux/string.h>
27#include <linux/kernel.h>
28#include <linux/semaphore.h>
c7acc5b8 29#include <linux/iscsi_boot_sysfs.h>
acf3368f 30#include <linux/module.h>
6733b39a
JK
31
32#include <scsi/libiscsi.h>
33#include <scsi/scsi_transport_iscsi.h>
34#include <scsi/scsi_transport.h>
35#include <scsi/scsi_cmnd.h>
36#include <scsi/scsi_device.h>
37#include <scsi/scsi_host.h>
38#include <scsi/scsi.h>
39#include "be_main.h"
40#include "be_iscsi.h"
41#include "be_mgmt.h"
42
43static unsigned int be_iopoll_budget = 10;
44static unsigned int be_max_phys_size = 64;
bfead3b2 45static unsigned int enable_msix = 1;
e9b91193
JK
46static unsigned int gcrashmode = 0;
47static unsigned int num_hba = 0;
6733b39a
JK
48
49MODULE_DEVICE_TABLE(pci, beiscsi_pci_id_table);
50MODULE_DESCRIPTION(DRV_DESC " " BUILD_STR);
51MODULE_AUTHOR("ServerEngines Corporation");
52MODULE_LICENSE("GPL");
53module_param(be_iopoll_budget, int, 0);
54module_param(enable_msix, int, 0);
55module_param(be_max_phys_size, uint, S_IRUGO);
56MODULE_PARM_DESC(be_max_phys_size, "Maximum Size (In Kilobytes) of physically"
57 "contiguous memory that can be allocated."
58 "Range is 16 - 128");
59
60static int beiscsi_slave_configure(struct scsi_device *sdev)
61{
62 blk_queue_max_segment_size(sdev->request_queue, 65536);
63 return 0;
64}
65
4183122d
JK
66static int beiscsi_eh_abort(struct scsi_cmnd *sc)
67{
68 struct iscsi_cls_session *cls_session;
69 struct iscsi_task *aborted_task = (struct iscsi_task *)sc->SCp.ptr;
70 struct beiscsi_io_task *aborted_io_task;
71 struct iscsi_conn *conn;
72 struct beiscsi_conn *beiscsi_conn;
73 struct beiscsi_hba *phba;
74 struct iscsi_session *session;
75 struct invalidate_command_table *inv_tbl;
3cbb7a74 76 struct be_dma_mem nonemb_cmd;
4183122d
JK
77 unsigned int cid, tag, num_invalidate;
78
79 cls_session = starget_to_session(scsi_target(sc->device));
80 session = cls_session->dd_data;
81
82 spin_lock_bh(&session->lock);
83 if (!aborted_task || !aborted_task->sc) {
84 /* we raced */
85 spin_unlock_bh(&session->lock);
86 return SUCCESS;
87 }
88
89 aborted_io_task = aborted_task->dd_data;
90 if (!aborted_io_task->scsi_cmnd) {
91 /* raced or invalid command */
92 spin_unlock_bh(&session->lock);
93 return SUCCESS;
94 }
95 spin_unlock_bh(&session->lock);
96 conn = aborted_task->conn;
97 beiscsi_conn = conn->dd_data;
98 phba = beiscsi_conn->phba;
99
100 /* invalidate iocb */
101 cid = beiscsi_conn->beiscsi_conn_cid;
102 inv_tbl = phba->inv_tbl;
103 memset(inv_tbl, 0x0, sizeof(*inv_tbl));
104 inv_tbl->cid = cid;
105 inv_tbl->icd = aborted_io_task->psgl_handle->sgl_index;
106 num_invalidate = 1;
3cbb7a74
JK
107 nonemb_cmd.va = pci_alloc_consistent(phba->ctrl.pdev,
108 sizeof(struct invalidate_commands_params_in),
109 &nonemb_cmd.dma);
110 if (nonemb_cmd.va == NULL) {
111 SE_DEBUG(DBG_LVL_1,
112 "Failed to allocate memory for"
113 "mgmt_invalidate_icds\n");
114 return FAILED;
115 }
116 nonemb_cmd.size = sizeof(struct invalidate_commands_params_in);
117
118 tag = mgmt_invalidate_icds(phba, inv_tbl, num_invalidate,
119 cid, &nonemb_cmd);
4183122d
JK
120 if (!tag) {
121 shost_printk(KERN_WARNING, phba->shost,
122 "mgmt_invalidate_icds could not be"
123 " submitted\n");
3cbb7a74
JK
124 pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size,
125 nonemb_cmd.va, nonemb_cmd.dma);
126
4183122d
JK
127 return FAILED;
128 } else {
129 wait_event_interruptible(phba->ctrl.mcc_wait[tag],
130 phba->ctrl.mcc_numtag[tag]);
131 free_mcc_tag(&phba->ctrl, tag);
132 }
3cbb7a74
JK
133 pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size,
134 nonemb_cmd.va, nonemb_cmd.dma);
4183122d
JK
135 return iscsi_eh_abort(sc);
136}
137
138static int beiscsi_eh_device_reset(struct scsi_cmnd *sc)
139{
140 struct iscsi_task *abrt_task;
141 struct beiscsi_io_task *abrt_io_task;
142 struct iscsi_conn *conn;
143 struct beiscsi_conn *beiscsi_conn;
144 struct beiscsi_hba *phba;
145 struct iscsi_session *session;
146 struct iscsi_cls_session *cls_session;
147 struct invalidate_command_table *inv_tbl;
3cbb7a74 148 struct be_dma_mem nonemb_cmd;
4183122d 149 unsigned int cid, tag, i, num_invalidate;
4183122d
JK
150
151 /* invalidate iocbs */
152 cls_session = starget_to_session(scsi_target(sc->device));
153 session = cls_session->dd_data;
154 spin_lock_bh(&session->lock);
db7f7709
JK
155 if (!session->leadconn || session->state != ISCSI_STATE_LOGGED_IN) {
156 spin_unlock_bh(&session->lock);
157 return FAILED;
158 }
4183122d
JK
159 conn = session->leadconn;
160 beiscsi_conn = conn->dd_data;
161 phba = beiscsi_conn->phba;
162 cid = beiscsi_conn->beiscsi_conn_cid;
163 inv_tbl = phba->inv_tbl;
164 memset(inv_tbl, 0x0, sizeof(*inv_tbl) * BE2_CMDS_PER_CXN);
165 num_invalidate = 0;
166 for (i = 0; i < conn->session->cmds_max; i++) {
167 abrt_task = conn->session->cmds[i];
168 abrt_io_task = abrt_task->dd_data;
169 if (!abrt_task->sc || abrt_task->state == ISCSI_TASK_FREE)
170 continue;
171
172 if (abrt_task->sc->device->lun != abrt_task->sc->device->lun)
173 continue;
174
175 inv_tbl->cid = cid;
176 inv_tbl->icd = abrt_io_task->psgl_handle->sgl_index;
177 num_invalidate++;
178 inv_tbl++;
179 }
180 spin_unlock_bh(&session->lock);
181 inv_tbl = phba->inv_tbl;
182
3cbb7a74
JK
183 nonemb_cmd.va = pci_alloc_consistent(phba->ctrl.pdev,
184 sizeof(struct invalidate_commands_params_in),
185 &nonemb_cmd.dma);
186 if (nonemb_cmd.va == NULL) {
187 SE_DEBUG(DBG_LVL_1,
188 "Failed to allocate memory for"
189 "mgmt_invalidate_icds\n");
190 return FAILED;
191 }
192 nonemb_cmd.size = sizeof(struct invalidate_commands_params_in);
193 memset(nonemb_cmd.va, 0, nonemb_cmd.size);
194 tag = mgmt_invalidate_icds(phba, inv_tbl, num_invalidate,
195 cid, &nonemb_cmd);
4183122d
JK
196 if (!tag) {
197 shost_printk(KERN_WARNING, phba->shost,
198 "mgmt_invalidate_icds could not be"
199 " submitted\n");
3cbb7a74
JK
200 pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size,
201 nonemb_cmd.va, nonemb_cmd.dma);
4183122d
JK
202 return FAILED;
203 } else {
204 wait_event_interruptible(phba->ctrl.mcc_wait[tag],
205 phba->ctrl.mcc_numtag[tag]);
206 free_mcc_tag(&phba->ctrl, tag);
207 }
3cbb7a74
JK
208 pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size,
209 nonemb_cmd.va, nonemb_cmd.dma);
4183122d 210 return iscsi_eh_device_reset(sc);
4183122d
JK
211}
212
c7acc5b8
JK
213static ssize_t beiscsi_show_boot_tgt_info(void *data, int type, char *buf)
214{
215 struct beiscsi_hba *phba = data;
f457a46f
MC
216 struct mgmt_session_info *boot_sess = &phba->boot_sess;
217 struct mgmt_conn_info *boot_conn = &boot_sess->conn_list[0];
c7acc5b8
JK
218 char *str = buf;
219 int rc;
220
221 switch (type) {
222 case ISCSI_BOOT_TGT_NAME:
223 rc = sprintf(buf, "%.*s\n",
f457a46f
MC
224 (int)strlen(boot_sess->target_name),
225 (char *)&boot_sess->target_name);
c7acc5b8
JK
226 break;
227 case ISCSI_BOOT_TGT_IP_ADDR:
f457a46f 228 if (boot_conn->dest_ipaddr.ip_type == 0x1)
c7acc5b8 229 rc = sprintf(buf, "%pI4\n",
f457a46f 230 (char *)&boot_conn->dest_ipaddr.ip_address);
c7acc5b8
JK
231 else
232 rc = sprintf(str, "%pI6\n",
f457a46f 233 (char *)&boot_conn->dest_ipaddr.ip_address);
c7acc5b8
JK
234 break;
235 case ISCSI_BOOT_TGT_PORT:
f457a46f 236 rc = sprintf(str, "%d\n", boot_conn->dest_port);
c7acc5b8
JK
237 break;
238
239 case ISCSI_BOOT_TGT_CHAP_NAME:
240 rc = sprintf(str, "%.*s\n",
f457a46f
MC
241 boot_conn->negotiated_login_options.auth_data.chap.
242 target_chap_name_length,
243 (char *)&boot_conn->negotiated_login_options.
244 auth_data.chap.target_chap_name);
c7acc5b8
JK
245 break;
246 case ISCSI_BOOT_TGT_CHAP_SECRET:
247 rc = sprintf(str, "%.*s\n",
f457a46f
MC
248 boot_conn->negotiated_login_options.auth_data.chap.
249 target_secret_length,
250 (char *)&boot_conn->negotiated_login_options.
251 auth_data.chap.target_secret);
c7acc5b8
JK
252 break;
253 case ISCSI_BOOT_TGT_REV_CHAP_NAME:
254 rc = sprintf(str, "%.*s\n",
f457a46f
MC
255 boot_conn->negotiated_login_options.auth_data.chap.
256 intr_chap_name_length,
257 (char *)&boot_conn->negotiated_login_options.
258 auth_data.chap.intr_chap_name);
c7acc5b8
JK
259 break;
260 case ISCSI_BOOT_TGT_REV_CHAP_SECRET:
f457a46f
MC
261 rc = sprintf(str, "%.*s\n",
262 boot_conn->negotiated_login_options.auth_data.chap.
263 intr_secret_length,
264 (char *)&boot_conn->negotiated_login_options.
265 auth_data.chap.intr_secret);
c7acc5b8
JK
266 break;
267 case ISCSI_BOOT_TGT_FLAGS:
f457a46f 268 rc = sprintf(str, "2\n");
c7acc5b8
JK
269 break;
270 case ISCSI_BOOT_TGT_NIC_ASSOC:
f457a46f 271 rc = sprintf(str, "0\n");
c7acc5b8
JK
272 break;
273 default:
274 rc = -ENOSYS;
275 break;
276 }
277 return rc;
278}
279
280static ssize_t beiscsi_show_boot_ini_info(void *data, int type, char *buf)
281{
282 struct beiscsi_hba *phba = data;
283 char *str = buf;
284 int rc;
285
286 switch (type) {
287 case ISCSI_BOOT_INI_INITIATOR_NAME:
288 rc = sprintf(str, "%s\n", phba->boot_sess.initiator_iscsiname);
289 break;
290 default:
291 rc = -ENOSYS;
292 break;
293 }
294 return rc;
295}
296
297static ssize_t beiscsi_show_boot_eth_info(void *data, int type, char *buf)
298{
299 struct beiscsi_hba *phba = data;
300 char *str = buf;
301 int rc;
302
303 switch (type) {
304 case ISCSI_BOOT_ETH_FLAGS:
f457a46f 305 rc = sprintf(str, "2\n");
c7acc5b8
JK
306 break;
307 case ISCSI_BOOT_ETH_INDEX:
f457a46f 308 rc = sprintf(str, "0\n");
c7acc5b8
JK
309 break;
310 case ISCSI_BOOT_ETH_MAC:
311 rc = beiscsi_get_macaddr(buf, phba);
312 if (rc < 0) {
313 SE_DEBUG(DBG_LVL_1, "beiscsi_get_macaddr Failed\n");
314 return rc;
315 }
316 break;
317 default:
318 rc = -ENOSYS;
319 break;
320 }
321 return rc;
322}
323
324
587a1f16 325static umode_t beiscsi_tgt_get_attr_visibility(void *data, int type)
c7acc5b8 326{
587a1f16 327 umode_t rc;
c7acc5b8
JK
328
329 switch (type) {
330 case ISCSI_BOOT_TGT_NAME:
331 case ISCSI_BOOT_TGT_IP_ADDR:
332 case ISCSI_BOOT_TGT_PORT:
333 case ISCSI_BOOT_TGT_CHAP_NAME:
334 case ISCSI_BOOT_TGT_CHAP_SECRET:
335 case ISCSI_BOOT_TGT_REV_CHAP_NAME:
336 case ISCSI_BOOT_TGT_REV_CHAP_SECRET:
337 case ISCSI_BOOT_TGT_NIC_ASSOC:
338 case ISCSI_BOOT_TGT_FLAGS:
339 rc = S_IRUGO;
340 break;
341 default:
342 rc = 0;
343 break;
344 }
345 return rc;
346}
347
587a1f16 348static umode_t beiscsi_ini_get_attr_visibility(void *data, int type)
c7acc5b8 349{
587a1f16 350 umode_t rc;
c7acc5b8
JK
351
352 switch (type) {
353 case ISCSI_BOOT_INI_INITIATOR_NAME:
354 rc = S_IRUGO;
355 break;
356 default:
357 rc = 0;
358 break;
359 }
360 return rc;
361}
362
363
587a1f16 364static umode_t beiscsi_eth_get_attr_visibility(void *data, int type)
c7acc5b8 365{
587a1f16 366 umode_t rc;
c7acc5b8
JK
367
368 switch (type) {
369 case ISCSI_BOOT_ETH_FLAGS:
370 case ISCSI_BOOT_ETH_MAC:
371 case ISCSI_BOOT_ETH_INDEX:
372 rc = S_IRUGO;
373 break;
374 default:
375 rc = 0;
376 break;
377 }
378 return rc;
379}
380
bfead3b2
JK
381/*------------------- PCI Driver operations and data ----------------- */
382static DEFINE_PCI_DEVICE_TABLE(beiscsi_pci_id_table) = {
383 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
f98c96b0 384 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
bfead3b2
JK
385 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
386 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
387 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID3) },
bfead3b2
JK
388 { 0 }
389};
390MODULE_DEVICE_TABLE(pci, beiscsi_pci_id_table);
391
6733b39a
JK
392static struct scsi_host_template beiscsi_sht = {
393 .module = THIS_MODULE,
394 .name = "ServerEngines 10Gbe open-iscsi Initiator Driver",
395 .proc_name = DRV_NAME,
396 .queuecommand = iscsi_queuecommand,
6733b39a
JK
397 .change_queue_depth = iscsi_change_queue_depth,
398 .slave_configure = beiscsi_slave_configure,
399 .target_alloc = iscsi_target_alloc,
4183122d
JK
400 .eh_abort_handler = beiscsi_eh_abort,
401 .eh_device_reset_handler = beiscsi_eh_device_reset,
309ce156 402 .eh_target_reset_handler = iscsi_eh_session_reset,
6733b39a
JK
403 .sg_tablesize = BEISCSI_SGLIST_ELEMENTS,
404 .can_queue = BE2_IO_DEPTH,
405 .this_id = -1,
406 .max_sectors = BEISCSI_MAX_SECTORS,
407 .cmd_per_lun = BEISCSI_CMD_PER_LUN,
408 .use_clustering = ENABLE_CLUSTERING,
409};
6733b39a 410
bfead3b2 411static struct scsi_transport_template *beiscsi_scsi_transport;
6733b39a
JK
412
413static struct beiscsi_hba *beiscsi_hba_alloc(struct pci_dev *pcidev)
414{
415 struct beiscsi_hba *phba;
416 struct Scsi_Host *shost;
417
418 shost = iscsi_host_alloc(&beiscsi_sht, sizeof(*phba), 0);
419 if (!shost) {
420 dev_err(&pcidev->dev, "beiscsi_hba_alloc -"
457ff3b7 421 "iscsi_host_alloc failed\n");
6733b39a
JK
422 return NULL;
423 }
424 shost->dma_boundary = pcidev->dma_mask;
425 shost->max_id = BE2_MAX_SESSIONS;
426 shost->max_channel = 0;
427 shost->max_cmd_len = BEISCSI_MAX_CMD_LEN;
428 shost->max_lun = BEISCSI_NUM_MAX_LUN;
429 shost->transportt = beiscsi_scsi_transport;
6733b39a
JK
430 phba = iscsi_host_priv(shost);
431 memset(phba, 0, sizeof(*phba));
432 phba->shost = shost;
433 phba->pcidev = pci_dev_get(pcidev);
2807afb7 434 pci_set_drvdata(pcidev, phba);
6733b39a
JK
435
436 if (iscsi_host_add(shost, &phba->pcidev->dev))
437 goto free_devices;
c7acc5b8 438
6733b39a
JK
439 return phba;
440
441free_devices:
442 pci_dev_put(phba->pcidev);
443 iscsi_host_free(phba->shost);
444 return NULL;
445}
446
447static void beiscsi_unmap_pci_function(struct beiscsi_hba *phba)
448{
449 if (phba->csr_va) {
450 iounmap(phba->csr_va);
451 phba->csr_va = NULL;
452 }
453 if (phba->db_va) {
454 iounmap(phba->db_va);
455 phba->db_va = NULL;
456 }
457 if (phba->pci_va) {
458 iounmap(phba->pci_va);
459 phba->pci_va = NULL;
460 }
461}
462
463static int beiscsi_map_pci_bars(struct beiscsi_hba *phba,
464 struct pci_dev *pcidev)
465{
466 u8 __iomem *addr;
f98c96b0 467 int pcicfg_reg;
6733b39a
JK
468
469 addr = ioremap_nocache(pci_resource_start(pcidev, 2),
470 pci_resource_len(pcidev, 2));
471 if (addr == NULL)
472 return -ENOMEM;
473 phba->ctrl.csr = addr;
474 phba->csr_va = addr;
475 phba->csr_pa.u.a64.address = pci_resource_start(pcidev, 2);
476
477 addr = ioremap_nocache(pci_resource_start(pcidev, 4), 128 * 1024);
478 if (addr == NULL)
479 goto pci_map_err;
480 phba->ctrl.db = addr;
481 phba->db_va = addr;
482 phba->db_pa.u.a64.address = pci_resource_start(pcidev, 4);
483
f98c96b0
JK
484 if (phba->generation == BE_GEN2)
485 pcicfg_reg = 1;
486 else
487 pcicfg_reg = 0;
488
489 addr = ioremap_nocache(pci_resource_start(pcidev, pcicfg_reg),
490 pci_resource_len(pcidev, pcicfg_reg));
491
6733b39a
JK
492 if (addr == NULL)
493 goto pci_map_err;
494 phba->ctrl.pcicfg = addr;
495 phba->pci_va = addr;
f98c96b0 496 phba->pci_pa.u.a64.address = pci_resource_start(pcidev, pcicfg_reg);
6733b39a
JK
497 return 0;
498
499pci_map_err:
500 beiscsi_unmap_pci_function(phba);
501 return -ENOMEM;
502}
503
504static int beiscsi_enable_pci(struct pci_dev *pcidev)
505{
506 int ret;
507
508 ret = pci_enable_device(pcidev);
509 if (ret) {
510 dev_err(&pcidev->dev, "beiscsi_enable_pci - enable device "
511 "failed. Returning -ENODEV\n");
512 return ret;
513 }
514
bfead3b2 515 pci_set_master(pcidev);
6733b39a
JK
516 if (pci_set_consistent_dma_mask(pcidev, DMA_BIT_MASK(64))) {
517 ret = pci_set_consistent_dma_mask(pcidev, DMA_BIT_MASK(32));
518 if (ret) {
519 dev_err(&pcidev->dev, "Could not set PCI DMA Mask\n");
520 pci_disable_device(pcidev);
521 return ret;
522 }
523 }
524 return 0;
525}
526
527static int be_ctrl_init(struct beiscsi_hba *phba, struct pci_dev *pdev)
528{
529 struct be_ctrl_info *ctrl = &phba->ctrl;
530 struct be_dma_mem *mbox_mem_alloc = &ctrl->mbox_mem_alloced;
531 struct be_dma_mem *mbox_mem_align = &ctrl->mbox_mem;
532 int status = 0;
533
534 ctrl->pdev = pdev;
535 status = beiscsi_map_pci_bars(phba, pdev);
536 if (status)
537 return status;
6733b39a
JK
538 mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
539 mbox_mem_alloc->va = pci_alloc_consistent(pdev,
540 mbox_mem_alloc->size,
541 &mbox_mem_alloc->dma);
542 if (!mbox_mem_alloc->va) {
543 beiscsi_unmap_pci_function(phba);
544 status = -ENOMEM;
545 return status;
546 }
547
548 mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
549 mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
550 mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
551 memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
552 spin_lock_init(&ctrl->mbox_lock);
bfead3b2
JK
553 spin_lock_init(&phba->ctrl.mcc_lock);
554 spin_lock_init(&phba->ctrl.mcc_cq_lock);
555
6733b39a
JK
556 return status;
557}
558
559static void beiscsi_get_params(struct beiscsi_hba *phba)
560{
7da50879
JK
561 phba->params.ios_per_ctrl = (phba->fw_config.iscsi_icd_count
562 - (phba->fw_config.iscsi_cid_count
563 + BE2_TMFS
564 + BE2_NOPOUT_REQ));
565 phba->params.cxns_per_ctrl = phba->fw_config.iscsi_cid_count;
ed58ea2a 566 phba->params.asyncpdus_per_ctrl = phba->fw_config.iscsi_cid_count * 2;
6eab04a8 567 phba->params.icds_per_ctrl = phba->fw_config.iscsi_icd_count;
6733b39a
JK
568 phba->params.num_sge_per_io = BE2_SGE;
569 phba->params.defpdu_hdr_sz = BE2_DEFPDU_HDR_SZ;
570 phba->params.defpdu_data_sz = BE2_DEFPDU_DATA_SZ;
571 phba->params.eq_timer = 64;
572 phba->params.num_eq_entries =
7da50879
JK
573 (((BE2_CMDS_PER_CXN * 2 + phba->fw_config.iscsi_cid_count * 2
574 + BE2_TMFS) / 512) + 1) * 512;
6733b39a
JK
575 phba->params.num_eq_entries = (phba->params.num_eq_entries < 1024)
576 ? 1024 : phba->params.num_eq_entries;
457ff3b7 577 SE_DEBUG(DBG_LVL_8, "phba->params.num_eq_entries=%d\n",
7da50879 578 phba->params.num_eq_entries);
6733b39a 579 phba->params.num_cq_entries =
7da50879
JK
580 (((BE2_CMDS_PER_CXN * 2 + phba->fw_config.iscsi_cid_count * 2
581 + BE2_TMFS) / 512) + 1) * 512;
6733b39a
JK
582 phba->params.wrbs_per_cxn = 256;
583}
584
585static void hwi_ring_eq_db(struct beiscsi_hba *phba,
586 unsigned int id, unsigned int clr_interrupt,
587 unsigned int num_processed,
588 unsigned char rearm, unsigned char event)
589{
590 u32 val = 0;
591 val |= id & DB_EQ_RING_ID_MASK;
592 if (rearm)
593 val |= 1 << DB_EQ_REARM_SHIFT;
594 if (clr_interrupt)
595 val |= 1 << DB_EQ_CLR_SHIFT;
596 if (event)
597 val |= 1 << DB_EQ_EVNT_SHIFT;
598 val |= num_processed << DB_EQ_NUM_POPPED_SHIFT;
599 iowrite32(val, phba->db_va + DB_EQ_OFFSET);
600}
601
bfead3b2
JK
602/**
603 * be_isr_mcc - The isr routine of the driver.
604 * @irq: Not used
605 * @dev_id: Pointer to host adapter structure
606 */
607static irqreturn_t be_isr_mcc(int irq, void *dev_id)
608{
609 struct beiscsi_hba *phba;
610 struct be_eq_entry *eqe = NULL;
611 struct be_queue_info *eq;
612 struct be_queue_info *mcc;
613 unsigned int num_eq_processed;
614 struct be_eq_obj *pbe_eq;
615 unsigned long flags;
616
617 pbe_eq = dev_id;
618 eq = &pbe_eq->q;
619 phba = pbe_eq->phba;
620 mcc = &phba->ctrl.mcc_obj.cq;
621 eqe = queue_tail_node(eq);
622 if (!eqe)
623 SE_DEBUG(DBG_LVL_1, "eqe is NULL\n");
624
625 num_eq_processed = 0;
626
627 while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32]
628 & EQE_VALID_MASK) {
629 if (((eqe->dw[offsetof(struct amap_eq_entry,
630 resource_id) / 32] &
631 EQE_RESID_MASK) >> 16) == mcc->id) {
632 spin_lock_irqsave(&phba->isr_lock, flags);
633 phba->todo_mcc_cq = 1;
634 spin_unlock_irqrestore(&phba->isr_lock, flags);
635 }
636 AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0);
637 queue_tail_inc(eq);
638 eqe = queue_tail_node(eq);
639 num_eq_processed++;
640 }
641 if (phba->todo_mcc_cq)
642 queue_work(phba->wq, &phba->work_cqs);
643 if (num_eq_processed)
644 hwi_ring_eq_db(phba, eq->id, 1, num_eq_processed, 1, 1);
645
646 return IRQ_HANDLED;
647}
648
649/**
650 * be_isr_msix - The isr routine of the driver.
651 * @irq: Not used
652 * @dev_id: Pointer to host adapter structure
653 */
654static irqreturn_t be_isr_msix(int irq, void *dev_id)
655{
656 struct beiscsi_hba *phba;
657 struct be_eq_entry *eqe = NULL;
658 struct be_queue_info *eq;
659 struct be_queue_info *cq;
660 unsigned int num_eq_processed;
661 struct be_eq_obj *pbe_eq;
662 unsigned long flags;
663
664 pbe_eq = dev_id;
665 eq = &pbe_eq->q;
666 cq = pbe_eq->cq;
667 eqe = queue_tail_node(eq);
668 if (!eqe)
669 SE_DEBUG(DBG_LVL_1, "eqe is NULL\n");
670
671 phba = pbe_eq->phba;
672 num_eq_processed = 0;
673 if (blk_iopoll_enabled) {
674 while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32]
675 & EQE_VALID_MASK) {
676 if (!blk_iopoll_sched_prep(&pbe_eq->iopoll))
677 blk_iopoll_sched(&pbe_eq->iopoll);
678
679 AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0);
680 queue_tail_inc(eq);
681 eqe = queue_tail_node(eq);
682 num_eq_processed++;
683 }
684 if (num_eq_processed)
685 hwi_ring_eq_db(phba, eq->id, 1, num_eq_processed, 0, 1);
686
687 return IRQ_HANDLED;
688 } else {
689 while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32]
690 & EQE_VALID_MASK) {
691 spin_lock_irqsave(&phba->isr_lock, flags);
692 phba->todo_cq = 1;
693 spin_unlock_irqrestore(&phba->isr_lock, flags);
694 AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0);
695 queue_tail_inc(eq);
696 eqe = queue_tail_node(eq);
697 num_eq_processed++;
698 }
699 if (phba->todo_cq)
700 queue_work(phba->wq, &phba->work_cqs);
701
702 if (num_eq_processed)
703 hwi_ring_eq_db(phba, eq->id, 1, num_eq_processed, 1, 1);
704
705 return IRQ_HANDLED;
706 }
707}
708
6733b39a
JK
709/**
710 * be_isr - The isr routine of the driver.
711 * @irq: Not used
712 * @dev_id: Pointer to host adapter structure
713 */
714static irqreturn_t be_isr(int irq, void *dev_id)
715{
716 struct beiscsi_hba *phba;
717 struct hwi_controller *phwi_ctrlr;
718 struct hwi_context_memory *phwi_context;
719 struct be_eq_entry *eqe = NULL;
720 struct be_queue_info *eq;
721 struct be_queue_info *cq;
bfead3b2 722 struct be_queue_info *mcc;
6733b39a 723 unsigned long flags, index;
bfead3b2 724 unsigned int num_mcceq_processed, num_ioeq_processed;
6733b39a 725 struct be_ctrl_info *ctrl;
bfead3b2 726 struct be_eq_obj *pbe_eq;
6733b39a
JK
727 int isr;
728
729 phba = dev_id;
6eab04a8 730 ctrl = &phba->ctrl;
bfead3b2
JK
731 isr = ioread32(ctrl->csr + CEV_ISR0_OFFSET +
732 (PCI_FUNC(ctrl->pdev->devfn) * CEV_ISR_SIZE));
733 if (!isr)
734 return IRQ_NONE;
6733b39a
JK
735
736 phwi_ctrlr = phba->phwi_ctrlr;
737 phwi_context = phwi_ctrlr->phwi_ctxt;
bfead3b2
JK
738 pbe_eq = &phwi_context->be_eq[0];
739
740 eq = &phwi_context->be_eq[0].q;
741 mcc = &phba->ctrl.mcc_obj.cq;
6733b39a
JK
742 index = 0;
743 eqe = queue_tail_node(eq);
744 if (!eqe)
745 SE_DEBUG(DBG_LVL_1, "eqe is NULL\n");
746
bfead3b2
JK
747 num_ioeq_processed = 0;
748 num_mcceq_processed = 0;
6733b39a
JK
749 if (blk_iopoll_enabled) {
750 while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32]
751 & EQE_VALID_MASK) {
bfead3b2
JK
752 if (((eqe->dw[offsetof(struct amap_eq_entry,
753 resource_id) / 32] &
754 EQE_RESID_MASK) >> 16) == mcc->id) {
755 spin_lock_irqsave(&phba->isr_lock, flags);
756 phba->todo_mcc_cq = 1;
757 spin_unlock_irqrestore(&phba->isr_lock, flags);
758 num_mcceq_processed++;
759 } else {
760 if (!blk_iopoll_sched_prep(&pbe_eq->iopoll))
761 blk_iopoll_sched(&pbe_eq->iopoll);
762 num_ioeq_processed++;
763 }
6733b39a
JK
764 AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0);
765 queue_tail_inc(eq);
766 eqe = queue_tail_node(eq);
6733b39a 767 }
bfead3b2
JK
768 if (num_ioeq_processed || num_mcceq_processed) {
769 if (phba->todo_mcc_cq)
770 queue_work(phba->wq, &phba->work_cqs);
771
756d29c8 772 if ((num_mcceq_processed) && (!num_ioeq_processed))
bfead3b2
JK
773 hwi_ring_eq_db(phba, eq->id, 0,
774 (num_ioeq_processed +
775 num_mcceq_processed) , 1, 1);
776 else
777 hwi_ring_eq_db(phba, eq->id, 0,
778 (num_ioeq_processed +
779 num_mcceq_processed), 0, 1);
780
6733b39a
JK
781 return IRQ_HANDLED;
782 } else
783 return IRQ_NONE;
784 } else {
bfead3b2 785 cq = &phwi_context->be_cq[0];
6733b39a
JK
786 while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32]
787 & EQE_VALID_MASK) {
788
789 if (((eqe->dw[offsetof(struct amap_eq_entry,
790 resource_id) / 32] &
791 EQE_RESID_MASK) >> 16) != cq->id) {
792 spin_lock_irqsave(&phba->isr_lock, flags);
793 phba->todo_mcc_cq = 1;
794 spin_unlock_irqrestore(&phba->isr_lock, flags);
795 } else {
796 spin_lock_irqsave(&phba->isr_lock, flags);
797 phba->todo_cq = 1;
798 spin_unlock_irqrestore(&phba->isr_lock, flags);
799 }
800 AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0);
801 queue_tail_inc(eq);
802 eqe = queue_tail_node(eq);
bfead3b2 803 num_ioeq_processed++;
6733b39a
JK
804 }
805 if (phba->todo_cq || phba->todo_mcc_cq)
806 queue_work(phba->wq, &phba->work_cqs);
807
bfead3b2
JK
808 if (num_ioeq_processed) {
809 hwi_ring_eq_db(phba, eq->id, 0,
810 num_ioeq_processed, 1, 1);
6733b39a
JK
811 return IRQ_HANDLED;
812 } else
813 return IRQ_NONE;
814 }
815}
816
817static int beiscsi_init_irqs(struct beiscsi_hba *phba)
818{
819 struct pci_dev *pcidev = phba->pcidev;
bfead3b2
JK
820 struct hwi_controller *phwi_ctrlr;
821 struct hwi_context_memory *phwi_context;
4f5af07e 822 int ret, msix_vec, i, j;
6733b39a 823
bfead3b2
JK
824 phwi_ctrlr = phba->phwi_ctrlr;
825 phwi_context = phwi_ctrlr->phwi_ctxt;
826
827 if (phba->msix_enabled) {
828 for (i = 0; i < phba->num_cpus; i++) {
8fcfb210
JK
829 phba->msi_name[i] = kzalloc(BEISCSI_MSI_NAME,
830 GFP_KERNEL);
831 if (!phba->msi_name[i]) {
832 ret = -ENOMEM;
833 goto free_msix_irqs;
834 }
835
836 sprintf(phba->msi_name[i], "beiscsi_%02x_%02x",
837 phba->shost->host_no, i);
bfead3b2 838 msix_vec = phba->msix_entries[i].vector;
8fcfb210
JK
839 ret = request_irq(msix_vec, be_isr_msix, 0,
840 phba->msi_name[i],
bfead3b2 841 &phwi_context->be_eq[i]);
4f5af07e
JK
842 if (ret) {
843 shost_printk(KERN_ERR, phba->shost,
844 "beiscsi_init_irqs-Failed to"
845 "register msix for i = %d\n", i);
8fcfb210 846 kfree(phba->msi_name[i]);
4f5af07e
JK
847 goto free_msix_irqs;
848 }
bfead3b2 849 }
8fcfb210
JK
850 phba->msi_name[i] = kzalloc(BEISCSI_MSI_NAME, GFP_KERNEL);
851 if (!phba->msi_name[i]) {
852 ret = -ENOMEM;
853 goto free_msix_irqs;
854 }
855 sprintf(phba->msi_name[i], "beiscsi_mcc_%02x",
856 phba->shost->host_no);
bfead3b2 857 msix_vec = phba->msix_entries[i].vector;
8fcfb210 858 ret = request_irq(msix_vec, be_isr_mcc, 0, phba->msi_name[i],
bfead3b2 859 &phwi_context->be_eq[i]);
4f5af07e
JK
860 if (ret) {
861 shost_printk(KERN_ERR, phba->shost, "beiscsi_init_irqs-"
862 "Failed to register beiscsi_msix_mcc\n");
8fcfb210 863 kfree(phba->msi_name[i]);
4f5af07e
JK
864 goto free_msix_irqs;
865 }
866
bfead3b2
JK
867 } else {
868 ret = request_irq(pcidev->irq, be_isr, IRQF_SHARED,
869 "beiscsi", phba);
870 if (ret) {
871 shost_printk(KERN_ERR, phba->shost, "beiscsi_init_irqs-"
872 "Failed to register irq\\n");
873 return ret;
874 }
6733b39a
JK
875 }
876 return 0;
4f5af07e 877free_msix_irqs:
8fcfb210
JK
878 for (j = i - 1; j >= 0; j--) {
879 kfree(phba->msi_name[j]);
880 msix_vec = phba->msix_entries[j].vector;
4f5af07e 881 free_irq(msix_vec, &phwi_context->be_eq[j]);
8fcfb210 882 }
4f5af07e 883 return ret;
6733b39a
JK
884}
885
886static void hwi_ring_cq_db(struct beiscsi_hba *phba,
887 unsigned int id, unsigned int num_processed,
888 unsigned char rearm, unsigned char event)
889{
890 u32 val = 0;
891 val |= id & DB_CQ_RING_ID_MASK;
892 if (rearm)
893 val |= 1 << DB_CQ_REARM_SHIFT;
894 val |= num_processed << DB_CQ_NUM_POPPED_SHIFT;
895 iowrite32(val, phba->db_va + DB_CQ_OFFSET);
896}
897
6733b39a
JK
898static unsigned int
899beiscsi_process_async_pdu(struct beiscsi_conn *beiscsi_conn,
900 struct beiscsi_hba *phba,
901 unsigned short cid,
902 struct pdu_base *ppdu,
903 unsigned long pdu_len,
904 void *pbuffer, unsigned long buf_len)
905{
906 struct iscsi_conn *conn = beiscsi_conn->conn;
907 struct iscsi_session *session = conn->session;
bfead3b2
JK
908 struct iscsi_task *task;
909 struct beiscsi_io_task *io_task;
910 struct iscsi_hdr *login_hdr;
6733b39a
JK
911
912 switch (ppdu->dw[offsetof(struct amap_pdu_base, opcode) / 32] &
913 PDUBASE_OPCODE_MASK) {
914 case ISCSI_OP_NOOP_IN:
915 pbuffer = NULL;
916 buf_len = 0;
917 break;
918 case ISCSI_OP_ASYNC_EVENT:
919 break;
920 case ISCSI_OP_REJECT:
921 WARN_ON(!pbuffer);
922 WARN_ON(!(buf_len == 48));
923 SE_DEBUG(DBG_LVL_1, "In ISCSI_OP_REJECT\n");
924 break;
925 case ISCSI_OP_LOGIN_RSP:
7bd6e25c 926 case ISCSI_OP_TEXT_RSP:
bfead3b2
JK
927 task = conn->login_task;
928 io_task = task->dd_data;
929 login_hdr = (struct iscsi_hdr *)ppdu;
930 login_hdr->itt = io_task->libiscsi_itt;
6733b39a
JK
931 break;
932 default:
933 shost_printk(KERN_WARNING, phba->shost,
457ff3b7 934 "Unrecognized opcode 0x%x in async msg\n",
6733b39a
JK
935 (ppdu->
936 dw[offsetof(struct amap_pdu_base, opcode) / 32]
937 & PDUBASE_OPCODE_MASK));
938 return 1;
939 }
940
941 spin_lock_bh(&session->lock);
942 __iscsi_complete_pdu(conn, (struct iscsi_hdr *)ppdu, pbuffer, buf_len);
943 spin_unlock_bh(&session->lock);
944 return 0;
945}
946
947static struct sgl_handle *alloc_io_sgl_handle(struct beiscsi_hba *phba)
948{
949 struct sgl_handle *psgl_handle;
950
951 if (phba->io_sgl_hndl_avbl) {
952 SE_DEBUG(DBG_LVL_8,
457ff3b7 953 "In alloc_io_sgl_handle,io_sgl_alloc_index=%d\n",
6733b39a
JK
954 phba->io_sgl_alloc_index);
955 psgl_handle = phba->io_sgl_hndl_base[phba->
956 io_sgl_alloc_index];
957 phba->io_sgl_hndl_base[phba->io_sgl_alloc_index] = NULL;
958 phba->io_sgl_hndl_avbl--;
bfead3b2
JK
959 if (phba->io_sgl_alloc_index == (phba->params.
960 ios_per_ctrl - 1))
6733b39a
JK
961 phba->io_sgl_alloc_index = 0;
962 else
963 phba->io_sgl_alloc_index++;
964 } else
965 psgl_handle = NULL;
966 return psgl_handle;
967}
968
969static void
970free_io_sgl_handle(struct beiscsi_hba *phba, struct sgl_handle *psgl_handle)
971{
457ff3b7 972 SE_DEBUG(DBG_LVL_8, "In free_,io_sgl_free_index=%d\n",
6733b39a
JK
973 phba->io_sgl_free_index);
974 if (phba->io_sgl_hndl_base[phba->io_sgl_free_index]) {
975 /*
976 * this can happen if clean_task is called on a task that
977 * failed in xmit_task or alloc_pdu.
978 */
979 SE_DEBUG(DBG_LVL_8,
980 "Double Free in IO SGL io_sgl_free_index=%d,"
457ff3b7 981 "value there=%p\n", phba->io_sgl_free_index,
6733b39a
JK
982 phba->io_sgl_hndl_base[phba->io_sgl_free_index]);
983 return;
984 }
985 phba->io_sgl_hndl_base[phba->io_sgl_free_index] = psgl_handle;
986 phba->io_sgl_hndl_avbl++;
987 if (phba->io_sgl_free_index == (phba->params.ios_per_ctrl - 1))
988 phba->io_sgl_free_index = 0;
989 else
990 phba->io_sgl_free_index++;
991}
992
993/**
994 * alloc_wrb_handle - To allocate a wrb handle
995 * @phba: The hba pointer
996 * @cid: The cid to use for allocation
6733b39a
JK
997 *
998 * This happens under session_lock until submission to chip
999 */
d5431488 1000struct wrb_handle *alloc_wrb_handle(struct beiscsi_hba *phba, unsigned int cid)
6733b39a
JK
1001{
1002 struct hwi_wrb_context *pwrb_context;
1003 struct hwi_controller *phwi_ctrlr;
d5431488 1004 struct wrb_handle *pwrb_handle, *pwrb_handle_tmp;
6733b39a
JK
1005
1006 phwi_ctrlr = phba->phwi_ctrlr;
1007 pwrb_context = &phwi_ctrlr->wrb_context[cid];
d5431488 1008 if (pwrb_context->wrb_handles_available >= 2) {
bfead3b2
JK
1009 pwrb_handle = pwrb_context->pwrb_handle_base[
1010 pwrb_context->alloc_index];
1011 pwrb_context->wrb_handles_available--;
bfead3b2
JK
1012 if (pwrb_context->alloc_index ==
1013 (phba->params.wrbs_per_cxn - 1))
1014 pwrb_context->alloc_index = 0;
1015 else
1016 pwrb_context->alloc_index++;
d5431488
JK
1017 pwrb_handle_tmp = pwrb_context->pwrb_handle_base[
1018 pwrb_context->alloc_index];
1019 pwrb_handle->nxt_wrb_index = pwrb_handle_tmp->wrb_index;
bfead3b2
JK
1020 } else
1021 pwrb_handle = NULL;
6733b39a
JK
1022 return pwrb_handle;
1023}
1024
1025/**
1026 * free_wrb_handle - To free the wrb handle back to pool
1027 * @phba: The hba pointer
1028 * @pwrb_context: The context to free from
1029 * @pwrb_handle: The wrb_handle to free
1030 *
1031 * This happens under session_lock until submission to chip
1032 */
1033static void
1034free_wrb_handle(struct beiscsi_hba *phba, struct hwi_wrb_context *pwrb_context,
1035 struct wrb_handle *pwrb_handle)
1036{
32951dd8 1037 pwrb_context->pwrb_handle_base[pwrb_context->free_index] = pwrb_handle;
bfead3b2
JK
1038 pwrb_context->wrb_handles_available++;
1039 if (pwrb_context->free_index == (phba->params.wrbs_per_cxn - 1))
1040 pwrb_context->free_index = 0;
1041 else
1042 pwrb_context->free_index++;
1043
6733b39a 1044 SE_DEBUG(DBG_LVL_8,
bfead3b2 1045 "FREE WRB: pwrb_handle=%p free_index=0x%x"
457ff3b7 1046 "wrb_handles_available=%d\n",
6733b39a 1047 pwrb_handle, pwrb_context->free_index,
bfead3b2 1048 pwrb_context->wrb_handles_available);
6733b39a
JK
1049}
1050
1051static struct sgl_handle *alloc_mgmt_sgl_handle(struct beiscsi_hba *phba)
1052{
1053 struct sgl_handle *psgl_handle;
1054
1055 if (phba->eh_sgl_hndl_avbl) {
1056 psgl_handle = phba->eh_sgl_hndl_base[phba->eh_sgl_alloc_index];
1057 phba->eh_sgl_hndl_base[phba->eh_sgl_alloc_index] = NULL;
457ff3b7 1058 SE_DEBUG(DBG_LVL_8, "mgmt_sgl_alloc_index=%d=0x%x\n",
6733b39a
JK
1059 phba->eh_sgl_alloc_index, phba->eh_sgl_alloc_index);
1060 phba->eh_sgl_hndl_avbl--;
1061 if (phba->eh_sgl_alloc_index ==
1062 (phba->params.icds_per_ctrl - phba->params.ios_per_ctrl -
1063 1))
1064 phba->eh_sgl_alloc_index = 0;
1065 else
1066 phba->eh_sgl_alloc_index++;
1067 } else
1068 psgl_handle = NULL;
1069 return psgl_handle;
1070}
1071
1072void
1073free_mgmt_sgl_handle(struct beiscsi_hba *phba, struct sgl_handle *psgl_handle)
1074{
1075
457ff3b7 1076 SE_DEBUG(DBG_LVL_8, "In free_mgmt_sgl_handle,eh_sgl_free_index=%d\n",
bfead3b2 1077 phba->eh_sgl_free_index);
6733b39a
JK
1078 if (phba->eh_sgl_hndl_base[phba->eh_sgl_free_index]) {
1079 /*
1080 * this can happen if clean_task is called on a task that
1081 * failed in xmit_task or alloc_pdu.
1082 */
1083 SE_DEBUG(DBG_LVL_8,
457ff3b7 1084 "Double Free in eh SGL ,eh_sgl_free_index=%d\n",
6733b39a
JK
1085 phba->eh_sgl_free_index);
1086 return;
1087 }
1088 phba->eh_sgl_hndl_base[phba->eh_sgl_free_index] = psgl_handle;
1089 phba->eh_sgl_hndl_avbl++;
1090 if (phba->eh_sgl_free_index ==
1091 (phba->params.icds_per_ctrl - phba->params.ios_per_ctrl - 1))
1092 phba->eh_sgl_free_index = 0;
1093 else
1094 phba->eh_sgl_free_index++;
1095}
1096
1097static void
1098be_complete_io(struct beiscsi_conn *beiscsi_conn,
1099 struct iscsi_task *task, struct sol_cqe *psol)
1100{
1101 struct beiscsi_io_task *io_task = task->dd_data;
1102 struct be_status_bhs *sts_bhs =
1103 (struct be_status_bhs *)io_task->cmd_bhs;
1104 struct iscsi_conn *conn = beiscsi_conn->conn;
6733b39a
JK
1105 unsigned char *sense;
1106 u32 resid = 0, exp_cmdsn, max_cmdsn;
1107 u8 rsp, status, flags;
1108
bfead3b2 1109 exp_cmdsn = (psol->
6733b39a
JK
1110 dw[offsetof(struct amap_sol_cqe, i_exp_cmd_sn) / 32]
1111 & SOL_EXP_CMD_SN_MASK);
bfead3b2 1112 max_cmdsn = ((psol->
6733b39a
JK
1113 dw[offsetof(struct amap_sol_cqe, i_exp_cmd_sn) / 32]
1114 & SOL_EXP_CMD_SN_MASK) +
1115 ((psol->dw[offsetof(struct amap_sol_cqe, i_cmd_wnd)
1116 / 32] & SOL_CMD_WND_MASK) >> 24) - 1);
1117 rsp = ((psol->dw[offsetof(struct amap_sol_cqe, i_resp) / 32]
1118 & SOL_RESP_MASK) >> 16);
1119 status = ((psol->dw[offsetof(struct amap_sol_cqe, i_sts) / 32]
1120 & SOL_STS_MASK) >> 8);
1121 flags = ((psol->dw[offsetof(struct amap_sol_cqe, i_flags) / 32]
1122 & SOL_FLAGS_MASK) >> 24) | 0x80;
bd535451
JK
1123 if (!task->sc) {
1124 if (io_task->scsi_cmnd)
1125 scsi_dma_unmap(io_task->scsi_cmnd);
6733b39a 1126
bd535451
JK
1127 return;
1128 }
6733b39a
JK
1129 task->sc->result = (DID_OK << 16) | status;
1130 if (rsp != ISCSI_STATUS_CMD_COMPLETED) {
1131 task->sc->result = DID_ERROR << 16;
1132 goto unmap;
1133 }
1134
1135 /* bidi not initially supported */
1136 if (flags & (ISCSI_FLAG_CMD_UNDERFLOW | ISCSI_FLAG_CMD_OVERFLOW)) {
1137 resid = (psol->dw[offsetof(struct amap_sol_cqe, i_res_cnt) /
1138 32] & SOL_RES_CNT_MASK);
1139
1140 if (!status && (flags & ISCSI_FLAG_CMD_OVERFLOW))
1141 task->sc->result = DID_ERROR << 16;
1142
1143 if (flags & ISCSI_FLAG_CMD_UNDERFLOW) {
1144 scsi_set_resid(task->sc, resid);
1145 if (!status && (scsi_bufflen(task->sc) - resid <
1146 task->sc->underflow))
1147 task->sc->result = DID_ERROR << 16;
1148 }
1149 }
1150
1151 if (status == SAM_STAT_CHECK_CONDITION) {
4053a4be 1152 u16 sense_len;
bfead3b2 1153 unsigned short *slen = (unsigned short *)sts_bhs->sense_info;
4053a4be 1154
6733b39a 1155 sense = sts_bhs->sense_info + sizeof(unsigned short);
4053a4be 1156 sense_len = be16_to_cpu(*slen);
6733b39a
JK
1157 memcpy(task->sc->sense_buffer, sense,
1158 min_t(u16, sense_len, SCSI_SENSE_BUFFERSIZE));
1159 }
756d29c8 1160
6733b39a
JK
1161 if (io_task->cmd_bhs->iscsi_hdr.flags & ISCSI_FLAG_CMD_READ) {
1162 if (psol->dw[offsetof(struct amap_sol_cqe, i_res_cnt) / 32]
1163 & SOL_RES_CNT_MASK)
1164 conn->rxdata_octets += (psol->
bfead3b2
JK
1165 dw[offsetof(struct amap_sol_cqe, i_res_cnt) / 32]
1166 & SOL_RES_CNT_MASK);
6733b39a
JK
1167 }
1168unmap:
1169 scsi_dma_unmap(io_task->scsi_cmnd);
1170 iscsi_complete_scsi_task(task, exp_cmdsn, max_cmdsn);
1171}
1172
1173static void
1174be_complete_logout(struct beiscsi_conn *beiscsi_conn,
1175 struct iscsi_task *task, struct sol_cqe *psol)
1176{
1177 struct iscsi_logout_rsp *hdr;
bfead3b2 1178 struct beiscsi_io_task *io_task = task->dd_data;
6733b39a
JK
1179 struct iscsi_conn *conn = beiscsi_conn->conn;
1180
1181 hdr = (struct iscsi_logout_rsp *)task->hdr;
7bd6e25c 1182 hdr->opcode = ISCSI_OP_LOGOUT_RSP;
6733b39a
JK
1183 hdr->t2wait = 5;
1184 hdr->t2retain = 0;
1185 hdr->flags = ((psol->dw[offsetof(struct amap_sol_cqe, i_flags) / 32]
1186 & SOL_FLAGS_MASK) >> 24) | 0x80;
1187 hdr->response = (psol->dw[offsetof(struct amap_sol_cqe, i_resp) /
1188 32] & SOL_RESP_MASK);
1189 hdr->exp_cmdsn = cpu_to_be32(psol->
1190 dw[offsetof(struct amap_sol_cqe, i_exp_cmd_sn) / 32]
1191 & SOL_EXP_CMD_SN_MASK);
1192 hdr->max_cmdsn = be32_to_cpu((psol->
1193 dw[offsetof(struct amap_sol_cqe, i_exp_cmd_sn) / 32]
1194 & SOL_EXP_CMD_SN_MASK) +
1195 ((psol->dw[offsetof(struct amap_sol_cqe, i_cmd_wnd)
1196 / 32] & SOL_CMD_WND_MASK) >> 24) - 1);
7bd6e25c
JK
1197 hdr->dlength[0] = 0;
1198 hdr->dlength[1] = 0;
1199 hdr->dlength[2] = 0;
6733b39a 1200 hdr->hlength = 0;
bfead3b2 1201 hdr->itt = io_task->libiscsi_itt;
6733b39a
JK
1202 __iscsi_complete_pdu(conn, (struct iscsi_hdr *)hdr, NULL, 0);
1203}
1204
1205static void
1206be_complete_tmf(struct beiscsi_conn *beiscsi_conn,
1207 struct iscsi_task *task, struct sol_cqe *psol)
1208{
1209 struct iscsi_tm_rsp *hdr;
1210 struct iscsi_conn *conn = beiscsi_conn->conn;
bfead3b2 1211 struct beiscsi_io_task *io_task = task->dd_data;
6733b39a
JK
1212
1213 hdr = (struct iscsi_tm_rsp *)task->hdr;
7bd6e25c 1214 hdr->opcode = ISCSI_OP_SCSI_TMFUNC_RSP;
6733b39a
JK
1215 hdr->flags = ((psol->dw[offsetof(struct amap_sol_cqe, i_flags) / 32]
1216 & SOL_FLAGS_MASK) >> 24) | 0x80;
1217 hdr->response = (psol->dw[offsetof(struct amap_sol_cqe, i_resp) /
1218 32] & SOL_RESP_MASK);
1219 hdr->exp_cmdsn = cpu_to_be32(psol->dw[offsetof(struct amap_sol_cqe,
bfead3b2 1220 i_exp_cmd_sn) / 32] & SOL_EXP_CMD_SN_MASK);
6733b39a
JK
1221 hdr->max_cmdsn = be32_to_cpu((psol->dw[offsetof(struct amap_sol_cqe,
1222 i_exp_cmd_sn) / 32] & SOL_EXP_CMD_SN_MASK) +
1223 ((psol->dw[offsetof(struct amap_sol_cqe, i_cmd_wnd)
1224 / 32] & SOL_CMD_WND_MASK) >> 24) - 1);
bfead3b2 1225 hdr->itt = io_task->libiscsi_itt;
6733b39a
JK
1226 __iscsi_complete_pdu(conn, (struct iscsi_hdr *)hdr, NULL, 0);
1227}
1228
1229static void
1230hwi_complete_drvr_msgs(struct beiscsi_conn *beiscsi_conn,
1231 struct beiscsi_hba *phba, struct sol_cqe *psol)
1232{
1233 struct hwi_wrb_context *pwrb_context;
bfead3b2 1234 struct wrb_handle *pwrb_handle = NULL;
6733b39a 1235 struct hwi_controller *phwi_ctrlr;
bfead3b2
JK
1236 struct iscsi_task *task;
1237 struct beiscsi_io_task *io_task;
6733b39a
JK
1238 struct iscsi_conn *conn = beiscsi_conn->conn;
1239 struct iscsi_session *session = conn->session;
1240
1241 phwi_ctrlr = phba->phwi_ctrlr;
32951dd8 1242 pwrb_context = &phwi_ctrlr->wrb_context[((psol->
35e66019 1243 dw[offsetof(struct amap_sol_cqe, cid) / 32] &
7da50879
JK
1244 SOL_CID_MASK) >> 6) -
1245 phba->fw_config.iscsi_cid_start];
32951dd8 1246 pwrb_handle = pwrb_context->pwrb_handle_basestd[((psol->
35e66019
JK
1247 dw[offsetof(struct amap_sol_cqe, wrb_index) /
1248 32] & SOL_WRB_INDEX_MASK) >> 16)];
32951dd8 1249 task = pwrb_handle->pio_handle;
35e66019 1250
bfead3b2 1251 io_task = task->dd_data;
1282ab76 1252 spin_lock_bh(&phba->mgmt_sgl_lock);
bfead3b2 1253 free_mgmt_sgl_handle(phba, io_task->psgl_handle);
1282ab76 1254 spin_unlock_bh(&phba->mgmt_sgl_lock);
6733b39a
JK
1255 spin_lock_bh(&session->lock);
1256 free_wrb_handle(phba, pwrb_context, pwrb_handle);
1257 spin_unlock_bh(&session->lock);
1258}
1259
1260static void
1261be_complete_nopin_resp(struct beiscsi_conn *beiscsi_conn,
1262 struct iscsi_task *task, struct sol_cqe *psol)
1263{
1264 struct iscsi_nopin *hdr;
1265 struct iscsi_conn *conn = beiscsi_conn->conn;
bfead3b2 1266 struct beiscsi_io_task *io_task = task->dd_data;
6733b39a
JK
1267
1268 hdr = (struct iscsi_nopin *)task->hdr;
1269 hdr->flags = ((psol->dw[offsetof(struct amap_sol_cqe, i_flags) / 32]
1270 & SOL_FLAGS_MASK) >> 24) | 0x80;
1271 hdr->exp_cmdsn = cpu_to_be32(psol->dw[offsetof(struct amap_sol_cqe,
1272 i_exp_cmd_sn) / 32] & SOL_EXP_CMD_SN_MASK);
1273 hdr->max_cmdsn = be32_to_cpu((psol->dw[offsetof(struct amap_sol_cqe,
1274 i_exp_cmd_sn) / 32] & SOL_EXP_CMD_SN_MASK) +
1275 ((psol->dw[offsetof(struct amap_sol_cqe, i_cmd_wnd)
1276 / 32] & SOL_CMD_WND_MASK) >> 24) - 1);
1277 hdr->opcode = ISCSI_OP_NOOP_IN;
bfead3b2 1278 hdr->itt = io_task->libiscsi_itt;
6733b39a
JK
1279 __iscsi_complete_pdu(conn, (struct iscsi_hdr *)hdr, NULL, 0);
1280}
1281
1282static void hwi_complete_cmd(struct beiscsi_conn *beiscsi_conn,
1283 struct beiscsi_hba *phba, struct sol_cqe *psol)
1284{
1285 struct hwi_wrb_context *pwrb_context;
1286 struct wrb_handle *pwrb_handle;
1287 struct iscsi_wrb *pwrb = NULL;
1288 struct hwi_controller *phwi_ctrlr;
1289 struct iscsi_task *task;
bfead3b2 1290 unsigned int type;
6733b39a
JK
1291 struct iscsi_conn *conn = beiscsi_conn->conn;
1292 struct iscsi_session *session = conn->session;
1293
1294 phwi_ctrlr = phba->phwi_ctrlr;
32951dd8 1295 pwrb_context = &phwi_ctrlr->wrb_context[((psol->dw[offsetof
35e66019 1296 (struct amap_sol_cqe, cid) / 32]
7da50879
JK
1297 & SOL_CID_MASK) >> 6) -
1298 phba->fw_config.iscsi_cid_start];
32951dd8 1299 pwrb_handle = pwrb_context->pwrb_handle_basestd[((psol->
35e66019
JK
1300 dw[offsetof(struct amap_sol_cqe, wrb_index) /
1301 32] & SOL_WRB_INDEX_MASK) >> 16)];
32951dd8
JK
1302 task = pwrb_handle->pio_handle;
1303 pwrb = pwrb_handle->pwrb;
1304 type = (pwrb->dw[offsetof(struct amap_iscsi_wrb, type) / 32] &
1305 WRB_TYPE_MASK) >> 28;
1306
bfead3b2
JK
1307 spin_lock_bh(&session->lock);
1308 switch (type) {
6733b39a
JK
1309 case HWH_TYPE_IO:
1310 case HWH_TYPE_IO_RD:
1311 if ((task->hdr->opcode & ISCSI_OPCODE_MASK) ==
dafab8e0 1312 ISCSI_OP_NOOP_OUT)
6733b39a 1313 be_complete_nopin_resp(beiscsi_conn, task, psol);
dafab8e0 1314 else
6733b39a
JK
1315 be_complete_io(beiscsi_conn, task, psol);
1316 break;
1317
1318 case HWH_TYPE_LOGOUT:
dafab8e0
JK
1319 if ((task->hdr->opcode & ISCSI_OPCODE_MASK) == ISCSI_OP_LOGOUT)
1320 be_complete_logout(beiscsi_conn, task, psol);
1321 else
1322 be_complete_tmf(beiscsi_conn, task, psol);
1323
6733b39a
JK
1324 break;
1325
1326 case HWH_TYPE_LOGIN:
1327 SE_DEBUG(DBG_LVL_1,
1328 "\t\t No HWH_TYPE_LOGIN Expected in hwi_complete_cmd"
457ff3b7 1329 "- Solicited path\n");
6733b39a
JK
1330 break;
1331
6733b39a
JK
1332 case HWH_TYPE_NOP:
1333 be_complete_nopin_resp(beiscsi_conn, task, psol);
1334 break;
1335
1336 default:
32951dd8 1337 shost_printk(KERN_WARNING, phba->shost,
35e66019
JK
1338 "In hwi_complete_cmd, unknown type = %d"
1339 "wrb_index 0x%x CID 0x%x\n", type,
1340 ((psol->dw[offsetof(struct amap_iscsi_wrb,
1341 type) / 32] & SOL_WRB_INDEX_MASK) >> 16),
1342 ((psol->dw[offsetof(struct amap_sol_cqe,
1343 cid) / 32] & SOL_CID_MASK) >> 6));
6733b39a
JK
1344 break;
1345 }
35e66019 1346
6733b39a
JK
1347 spin_unlock_bh(&session->lock);
1348}
1349
1350static struct list_head *hwi_get_async_busy_list(struct hwi_async_pdu_context
1351 *pasync_ctx, unsigned int is_header,
1352 unsigned int host_write_ptr)
1353{
1354 if (is_header)
1355 return &pasync_ctx->async_entry[host_write_ptr].
1356 header_busy_list;
1357 else
1358 return &pasync_ctx->async_entry[host_write_ptr].data_busy_list;
1359}
1360
1361static struct async_pdu_handle *
1362hwi_get_async_handle(struct beiscsi_hba *phba,
1363 struct beiscsi_conn *beiscsi_conn,
1364 struct hwi_async_pdu_context *pasync_ctx,
1365 struct i_t_dpdu_cqe *pdpdu_cqe, unsigned int *pcq_index)
1366{
1367 struct be_bus_address phys_addr;
1368 struct list_head *pbusy_list;
1369 struct async_pdu_handle *pasync_handle = NULL;
6733b39a
JK
1370 unsigned char is_header = 0;
1371
1372 phys_addr.u.a32.address_lo =
1373 pdpdu_cqe->dw[offsetof(struct amap_i_t_dpdu_cqe, db_addr_lo) / 32] -
1374 ((pdpdu_cqe->dw[offsetof(struct amap_i_t_dpdu_cqe, dpl) / 32]
1375 & PDUCQE_DPL_MASK) >> 16);
1376 phys_addr.u.a32.address_hi =
1377 pdpdu_cqe->dw[offsetof(struct amap_i_t_dpdu_cqe, db_addr_hi) / 32];
1378
1379 phys_addr.u.a64.address =
1380 *((unsigned long long *)(&phys_addr.u.a64.address));
1381
1382 switch (pdpdu_cqe->dw[offsetof(struct amap_i_t_dpdu_cqe, code) / 32]
1383 & PDUCQE_CODE_MASK) {
1384 case UNSOL_HDR_NOTIFY:
1385 is_header = 1;
1386
1387 pbusy_list = hwi_get_async_busy_list(pasync_ctx, 1,
1388 (pdpdu_cqe->dw[offsetof(struct amap_i_t_dpdu_cqe,
1389 index) / 32] & PDUCQE_INDEX_MASK));
6733b39a
JK
1390 break;
1391 case UNSOL_DATA_NOTIFY:
1392 pbusy_list = hwi_get_async_busy_list(pasync_ctx, 0, (pdpdu_cqe->
1393 dw[offsetof(struct amap_i_t_dpdu_cqe,
1394 index) / 32] & PDUCQE_INDEX_MASK));
6733b39a
JK
1395 break;
1396 default:
1397 pbusy_list = NULL;
1398 shost_printk(KERN_WARNING, phba->shost,
457ff3b7 1399 "Unexpected code=%d\n",
6733b39a
JK
1400 pdpdu_cqe->dw[offsetof(struct amap_i_t_dpdu_cqe,
1401 code) / 32] & PDUCQE_CODE_MASK);
1402 return NULL;
1403 }
1404
6733b39a
JK
1405 WARN_ON(list_empty(pbusy_list));
1406 list_for_each_entry(pasync_handle, pbusy_list, link) {
dc63aac6 1407 if (pasync_handle->pa.u.a64.address == phys_addr.u.a64.address)
6733b39a
JK
1408 break;
1409 }
1410
1411 WARN_ON(!pasync_handle);
1412
7da50879
JK
1413 pasync_handle->cri = (unsigned short)beiscsi_conn->beiscsi_conn_cid -
1414 phba->fw_config.iscsi_cid_start;
6733b39a
JK
1415 pasync_handle->is_header = is_header;
1416 pasync_handle->buffer_len = ((pdpdu_cqe->
1417 dw[offsetof(struct amap_i_t_dpdu_cqe, dpl) / 32]
1418 & PDUCQE_DPL_MASK) >> 16);
1419
1420 *pcq_index = (pdpdu_cqe->dw[offsetof(struct amap_i_t_dpdu_cqe,
1421 index) / 32] & PDUCQE_INDEX_MASK);
1422 return pasync_handle;
1423}
1424
1425static unsigned int
1426hwi_update_async_writables(struct hwi_async_pdu_context *pasync_ctx,
1427 unsigned int is_header, unsigned int cq_index)
1428{
1429 struct list_head *pbusy_list;
1430 struct async_pdu_handle *pasync_handle;
1431 unsigned int num_entries, writables = 0;
1432 unsigned int *pep_read_ptr, *pwritables;
1433
dc63aac6 1434 num_entries = pasync_ctx->num_entries;
6733b39a
JK
1435 if (is_header) {
1436 pep_read_ptr = &pasync_ctx->async_header.ep_read_ptr;
1437 pwritables = &pasync_ctx->async_header.writables;
6733b39a
JK
1438 } else {
1439 pep_read_ptr = &pasync_ctx->async_data.ep_read_ptr;
1440 pwritables = &pasync_ctx->async_data.writables;
6733b39a
JK
1441 }
1442
1443 while ((*pep_read_ptr) != cq_index) {
1444 (*pep_read_ptr)++;
1445 *pep_read_ptr = (*pep_read_ptr) % num_entries;
1446
1447 pbusy_list = hwi_get_async_busy_list(pasync_ctx, is_header,
1448 *pep_read_ptr);
1449 if (writables == 0)
1450 WARN_ON(list_empty(pbusy_list));
1451
1452 if (!list_empty(pbusy_list)) {
1453 pasync_handle = list_entry(pbusy_list->next,
1454 struct async_pdu_handle,
1455 link);
1456 WARN_ON(!pasync_handle);
1457 pasync_handle->consumed = 1;
1458 }
1459
1460 writables++;
1461 }
1462
1463 if (!writables) {
1464 SE_DEBUG(DBG_LVL_1,
1465 "Duplicate notification received - index 0x%x!!\n",
1466 cq_index);
1467 WARN_ON(1);
1468 }
1469
1470 *pwritables = *pwritables + writables;
1471 return 0;
1472}
1473
1474static unsigned int hwi_free_async_msg(struct beiscsi_hba *phba,
1475 unsigned int cri)
1476{
1477 struct hwi_controller *phwi_ctrlr;
1478 struct hwi_async_pdu_context *pasync_ctx;
1479 struct async_pdu_handle *pasync_handle, *tmp_handle;
1480 struct list_head *plist;
1481 unsigned int i = 0;
1482
1483 phwi_ctrlr = phba->phwi_ctrlr;
1484 pasync_ctx = HWI_GET_ASYNC_PDU_CTX(phwi_ctrlr);
1485
1486 plist = &pasync_ctx->async_entry[cri].wait_queue.list;
1487
1488 list_for_each_entry_safe(pasync_handle, tmp_handle, plist, link) {
1489 list_del(&pasync_handle->link);
1490
1491 if (i == 0) {
1492 list_add_tail(&pasync_handle->link,
1493 &pasync_ctx->async_header.free_list);
1494 pasync_ctx->async_header.free_entries++;
1495 i++;
1496 } else {
1497 list_add_tail(&pasync_handle->link,
1498 &pasync_ctx->async_data.free_list);
1499 pasync_ctx->async_data.free_entries++;
1500 i++;
1501 }
1502 }
1503
1504 INIT_LIST_HEAD(&pasync_ctx->async_entry[cri].wait_queue.list);
1505 pasync_ctx->async_entry[cri].wait_queue.hdr_received = 0;
1506 pasync_ctx->async_entry[cri].wait_queue.bytes_received = 0;
1507 return 0;
1508}
1509
1510static struct phys_addr *
1511hwi_get_ring_address(struct hwi_async_pdu_context *pasync_ctx,
1512 unsigned int is_header, unsigned int host_write_ptr)
1513{
1514 struct phys_addr *pasync_sge = NULL;
1515
1516 if (is_header)
1517 pasync_sge = pasync_ctx->async_header.ring_base;
1518 else
1519 pasync_sge = pasync_ctx->async_data.ring_base;
1520
1521 return pasync_sge + host_write_ptr;
1522}
1523
1524static void hwi_post_async_buffers(struct beiscsi_hba *phba,
1525 unsigned int is_header)
1526{
1527 struct hwi_controller *phwi_ctrlr;
1528 struct hwi_async_pdu_context *pasync_ctx;
1529 struct async_pdu_handle *pasync_handle;
1530 struct list_head *pfree_link, *pbusy_list;
1531 struct phys_addr *pasync_sge;
1532 unsigned int ring_id, num_entries;
1533 unsigned int host_write_num;
1534 unsigned int writables;
1535 unsigned int i = 0;
1536 u32 doorbell = 0;
1537
1538 phwi_ctrlr = phba->phwi_ctrlr;
1539 pasync_ctx = HWI_GET_ASYNC_PDU_CTX(phwi_ctrlr);
dc63aac6 1540 num_entries = pasync_ctx->num_entries;
6733b39a
JK
1541
1542 if (is_header) {
6733b39a
JK
1543 writables = min(pasync_ctx->async_header.writables,
1544 pasync_ctx->async_header.free_entries);
1545 pfree_link = pasync_ctx->async_header.free_list.next;
1546 host_write_num = pasync_ctx->async_header.host_write_ptr;
1547 ring_id = phwi_ctrlr->default_pdu_hdr.id;
1548 } else {
6733b39a
JK
1549 writables = min(pasync_ctx->async_data.writables,
1550 pasync_ctx->async_data.free_entries);
1551 pfree_link = pasync_ctx->async_data.free_list.next;
1552 host_write_num = pasync_ctx->async_data.host_write_ptr;
1553 ring_id = phwi_ctrlr->default_pdu_data.id;
1554 }
1555
1556 writables = (writables / 8) * 8;
1557 if (writables) {
1558 for (i = 0; i < writables; i++) {
1559 pbusy_list =
1560 hwi_get_async_busy_list(pasync_ctx, is_header,
1561 host_write_num);
1562 pasync_handle =
1563 list_entry(pfree_link, struct async_pdu_handle,
1564 link);
1565 WARN_ON(!pasync_handle);
1566 pasync_handle->consumed = 0;
1567
1568 pfree_link = pfree_link->next;
1569
1570 pasync_sge = hwi_get_ring_address(pasync_ctx,
1571 is_header, host_write_num);
1572
1573 pasync_sge->hi = pasync_handle->pa.u.a32.address_lo;
1574 pasync_sge->lo = pasync_handle->pa.u.a32.address_hi;
1575
1576 list_move(&pasync_handle->link, pbusy_list);
1577
1578 host_write_num++;
1579 host_write_num = host_write_num % num_entries;
1580 }
1581
1582 if (is_header) {
1583 pasync_ctx->async_header.host_write_ptr =
1584 host_write_num;
1585 pasync_ctx->async_header.free_entries -= writables;
1586 pasync_ctx->async_header.writables -= writables;
1587 pasync_ctx->async_header.busy_entries += writables;
1588 } else {
1589 pasync_ctx->async_data.host_write_ptr = host_write_num;
1590 pasync_ctx->async_data.free_entries -= writables;
1591 pasync_ctx->async_data.writables -= writables;
1592 pasync_ctx->async_data.busy_entries += writables;
1593 }
1594
1595 doorbell |= ring_id & DB_DEF_PDU_RING_ID_MASK;
1596 doorbell |= 1 << DB_DEF_PDU_REARM_SHIFT;
1597 doorbell |= 0 << DB_DEF_PDU_EVENT_SHIFT;
1598 doorbell |= (writables & DB_DEF_PDU_CQPROC_MASK)
1599 << DB_DEF_PDU_CQPROC_SHIFT;
1600
1601 iowrite32(doorbell, phba->db_va + DB_RXULP0_OFFSET);
1602 }
1603}
1604
1605static void hwi_flush_default_pdu_buffer(struct beiscsi_hba *phba,
1606 struct beiscsi_conn *beiscsi_conn,
1607 struct i_t_dpdu_cqe *pdpdu_cqe)
1608{
1609 struct hwi_controller *phwi_ctrlr;
1610 struct hwi_async_pdu_context *pasync_ctx;
1611 struct async_pdu_handle *pasync_handle = NULL;
1612 unsigned int cq_index = -1;
1613
1614 phwi_ctrlr = phba->phwi_ctrlr;
1615 pasync_ctx = HWI_GET_ASYNC_PDU_CTX(phwi_ctrlr);
1616
1617 pasync_handle = hwi_get_async_handle(phba, beiscsi_conn, pasync_ctx,
1618 pdpdu_cqe, &cq_index);
1619 BUG_ON(pasync_handle->is_header != 0);
1620 if (pasync_handle->consumed == 0)
1621 hwi_update_async_writables(pasync_ctx, pasync_handle->is_header,
1622 cq_index);
1623
1624 hwi_free_async_msg(phba, pasync_handle->cri);
1625 hwi_post_async_buffers(phba, pasync_handle->is_header);
1626}
1627
1628static unsigned int
1629hwi_fwd_async_msg(struct beiscsi_conn *beiscsi_conn,
1630 struct beiscsi_hba *phba,
1631 struct hwi_async_pdu_context *pasync_ctx, unsigned short cri)
1632{
1633 struct list_head *plist;
1634 struct async_pdu_handle *pasync_handle;
1635 void *phdr = NULL;
1636 unsigned int hdr_len = 0, buf_len = 0;
1637 unsigned int status, index = 0, offset = 0;
1638 void *pfirst_buffer = NULL;
1639 unsigned int num_buf = 0;
1640
1641 plist = &pasync_ctx->async_entry[cri].wait_queue.list;
1642
1643 list_for_each_entry(pasync_handle, plist, link) {
1644 if (index == 0) {
1645 phdr = pasync_handle->pbuffer;
1646 hdr_len = pasync_handle->buffer_len;
1647 } else {
1648 buf_len = pasync_handle->buffer_len;
1649 if (!num_buf) {
1650 pfirst_buffer = pasync_handle->pbuffer;
1651 num_buf++;
1652 }
1653 memcpy(pfirst_buffer + offset,
1654 pasync_handle->pbuffer, buf_len);
f2ba02b8 1655 offset += buf_len;
6733b39a
JK
1656 }
1657 index++;
1658 }
1659
1660 status = beiscsi_process_async_pdu(beiscsi_conn, phba,
7da50879
JK
1661 (beiscsi_conn->beiscsi_conn_cid -
1662 phba->fw_config.iscsi_cid_start),
1663 phdr, hdr_len, pfirst_buffer,
f2ba02b8 1664 offset);
6733b39a
JK
1665
1666 if (status == 0)
1667 hwi_free_async_msg(phba, cri);
1668 return 0;
1669}
1670
1671static unsigned int
1672hwi_gather_async_pdu(struct beiscsi_conn *beiscsi_conn,
1673 struct beiscsi_hba *phba,
1674 struct async_pdu_handle *pasync_handle)
1675{
1676 struct hwi_async_pdu_context *pasync_ctx;
1677 struct hwi_controller *phwi_ctrlr;
1678 unsigned int bytes_needed = 0, status = 0;
1679 unsigned short cri = pasync_handle->cri;
1680 struct pdu_base *ppdu;
1681
1682 phwi_ctrlr = phba->phwi_ctrlr;
1683 pasync_ctx = HWI_GET_ASYNC_PDU_CTX(phwi_ctrlr);
1684
1685 list_del(&pasync_handle->link);
1686 if (pasync_handle->is_header) {
1687 pasync_ctx->async_header.busy_entries--;
1688 if (pasync_ctx->async_entry[cri].wait_queue.hdr_received) {
1689 hwi_free_async_msg(phba, cri);
1690 BUG();
1691 }
1692
1693 pasync_ctx->async_entry[cri].wait_queue.bytes_received = 0;
1694 pasync_ctx->async_entry[cri].wait_queue.hdr_received = 1;
1695 pasync_ctx->async_entry[cri].wait_queue.hdr_len =
1696 (unsigned short)pasync_handle->buffer_len;
1697 list_add_tail(&pasync_handle->link,
1698 &pasync_ctx->async_entry[cri].wait_queue.list);
1699
1700 ppdu = pasync_handle->pbuffer;
1701 bytes_needed = ((((ppdu->dw[offsetof(struct amap_pdu_base,
1702 data_len_hi) / 32] & PDUBASE_DATALENHI_MASK) << 8) &
1703 0xFFFF0000) | ((be16_to_cpu((ppdu->
1704 dw[offsetof(struct amap_pdu_base, data_len_lo) / 32]
1705 & PDUBASE_DATALENLO_MASK) >> 16)) & 0x0000FFFF));
1706
1707 if (status == 0) {
1708 pasync_ctx->async_entry[cri].wait_queue.bytes_needed =
1709 bytes_needed;
1710
1711 if (bytes_needed == 0)
1712 status = hwi_fwd_async_msg(beiscsi_conn, phba,
1713 pasync_ctx, cri);
1714 }
1715 } else {
1716 pasync_ctx->async_data.busy_entries--;
1717 if (pasync_ctx->async_entry[cri].wait_queue.hdr_received) {
1718 list_add_tail(&pasync_handle->link,
1719 &pasync_ctx->async_entry[cri].wait_queue.
1720 list);
1721 pasync_ctx->async_entry[cri].wait_queue.
1722 bytes_received +=
1723 (unsigned short)pasync_handle->buffer_len;
1724
1725 if (pasync_ctx->async_entry[cri].wait_queue.
1726 bytes_received >=
1727 pasync_ctx->async_entry[cri].wait_queue.
1728 bytes_needed)
1729 status = hwi_fwd_async_msg(beiscsi_conn, phba,
1730 pasync_ctx, cri);
1731 }
1732 }
1733 return status;
1734}
1735
1736static void hwi_process_default_pdu_ring(struct beiscsi_conn *beiscsi_conn,
1737 struct beiscsi_hba *phba,
1738 struct i_t_dpdu_cqe *pdpdu_cqe)
1739{
1740 struct hwi_controller *phwi_ctrlr;
1741 struct hwi_async_pdu_context *pasync_ctx;
1742 struct async_pdu_handle *pasync_handle = NULL;
1743 unsigned int cq_index = -1;
1744
1745 phwi_ctrlr = phba->phwi_ctrlr;
1746 pasync_ctx = HWI_GET_ASYNC_PDU_CTX(phwi_ctrlr);
1747 pasync_handle = hwi_get_async_handle(phba, beiscsi_conn, pasync_ctx,
1748 pdpdu_cqe, &cq_index);
1749
1750 if (pasync_handle->consumed == 0)
1751 hwi_update_async_writables(pasync_ctx, pasync_handle->is_header,
1752 cq_index);
1753 hwi_gather_async_pdu(beiscsi_conn, phba, pasync_handle);
1754 hwi_post_async_buffers(phba, pasync_handle->is_header);
1755}
1756
756d29c8
JK
1757static void beiscsi_process_mcc_isr(struct beiscsi_hba *phba)
1758{
1759 struct be_queue_info *mcc_cq;
1760 struct be_mcc_compl *mcc_compl;
1761 unsigned int num_processed = 0;
1762
1763 mcc_cq = &phba->ctrl.mcc_obj.cq;
1764 mcc_compl = queue_tail_node(mcc_cq);
1765 mcc_compl->flags = le32_to_cpu(mcc_compl->flags);
1766 while (mcc_compl->flags & CQE_FLAGS_VALID_MASK) {
1767
1768 if (num_processed >= 32) {
1769 hwi_ring_cq_db(phba, mcc_cq->id,
1770 num_processed, 0, 0);
1771 num_processed = 0;
1772 }
1773 if (mcc_compl->flags & CQE_FLAGS_ASYNC_MASK) {
1774 /* Interpret flags as an async trailer */
1775 if (is_link_state_evt(mcc_compl->flags))
1776 /* Interpret compl as a async link evt */
1777 beiscsi_async_link_state_process(phba,
1778 (struct be_async_event_link_state *) mcc_compl);
1779 else
1780 SE_DEBUG(DBG_LVL_1,
1781 " Unsupported Async Event, flags"
457ff3b7 1782 " = 0x%08x\n", mcc_compl->flags);
756d29c8
JK
1783 } else if (mcc_compl->flags & CQE_FLAGS_COMPLETED_MASK) {
1784 be_mcc_compl_process_isr(&phba->ctrl, mcc_compl);
1785 atomic_dec(&phba->ctrl.mcc_obj.q.used);
1786 }
1787
1788 mcc_compl->flags = 0;
1789 queue_tail_inc(mcc_cq);
1790 mcc_compl = queue_tail_node(mcc_cq);
1791 mcc_compl->flags = le32_to_cpu(mcc_compl->flags);
1792 num_processed++;
1793 }
1794
1795 if (num_processed > 0)
1796 hwi_ring_cq_db(phba, mcc_cq->id, num_processed, 1, 0);
1797
1798}
bfead3b2
JK
1799
1800static unsigned int beiscsi_process_cq(struct be_eq_obj *pbe_eq)
6733b39a 1801{
6733b39a
JK
1802 struct be_queue_info *cq;
1803 struct sol_cqe *sol;
1804 struct dmsg_cqe *dmsg;
1805 unsigned int num_processed = 0;
1806 unsigned int tot_nump = 0;
1807 struct beiscsi_conn *beiscsi_conn;
c2462288
JK
1808 struct beiscsi_endpoint *beiscsi_ep;
1809 struct iscsi_endpoint *ep;
bfead3b2 1810 struct beiscsi_hba *phba;
6733b39a 1811
bfead3b2 1812 cq = pbe_eq->cq;
6733b39a 1813 sol = queue_tail_node(cq);
bfead3b2 1814 phba = pbe_eq->phba;
6733b39a
JK
1815
1816 while (sol->dw[offsetof(struct amap_sol_cqe, valid) / 32] &
1817 CQE_VALID_MASK) {
1818 be_dws_le_to_cpu(sol, sizeof(struct sol_cqe));
1819
32951dd8 1820 ep = phba->ep_array[(u32) ((sol->
c2462288
JK
1821 dw[offsetof(struct amap_sol_cqe, cid) / 32] &
1822 SOL_CID_MASK) >> 6) -
7da50879 1823 phba->fw_config.iscsi_cid_start];
32951dd8 1824
c2462288
JK
1825 beiscsi_ep = ep->dd_data;
1826 beiscsi_conn = beiscsi_ep->conn;
756d29c8 1827
6733b39a 1828 if (num_processed >= 32) {
bfead3b2 1829 hwi_ring_cq_db(phba, cq->id,
6733b39a
JK
1830 num_processed, 0, 0);
1831 tot_nump += num_processed;
1832 num_processed = 0;
1833 }
1834
1835 switch ((u32) sol->dw[offsetof(struct amap_sol_cqe, code) /
1836 32] & CQE_CODE_MASK) {
1837 case SOL_CMD_COMPLETE:
1838 hwi_complete_cmd(beiscsi_conn, phba, sol);
1839 break;
1840 case DRIVERMSG_NOTIFY:
457ff3b7 1841 SE_DEBUG(DBG_LVL_8, "Received DRIVERMSG_NOTIFY\n");
6733b39a
JK
1842 dmsg = (struct dmsg_cqe *)sol;
1843 hwi_complete_drvr_msgs(beiscsi_conn, phba, sol);
1844 break;
1845 case UNSOL_HDR_NOTIFY:
bfead3b2
JK
1846 SE_DEBUG(DBG_LVL_8, "Received UNSOL_HDR_ NOTIFY\n");
1847 hwi_process_default_pdu_ring(beiscsi_conn, phba,
1848 (struct i_t_dpdu_cqe *)sol);
1849 break;
6733b39a 1850 case UNSOL_DATA_NOTIFY:
bfead3b2 1851 SE_DEBUG(DBG_LVL_8, "Received UNSOL_DATA_NOTIFY\n");
6733b39a
JK
1852 hwi_process_default_pdu_ring(beiscsi_conn, phba,
1853 (struct i_t_dpdu_cqe *)sol);
1854 break;
1855 case CXN_INVALIDATE_INDEX_NOTIFY:
1856 case CMD_INVALIDATED_NOTIFY:
1857 case CXN_INVALIDATE_NOTIFY:
1858 SE_DEBUG(DBG_LVL_1,
1859 "Ignoring CQ Error notification for cmd/cxn"
1860 "invalidate\n");
1861 break;
1862 case SOL_CMD_KILLED_DATA_DIGEST_ERR:
1863 case CMD_KILLED_INVALID_STATSN_RCVD:
1864 case CMD_KILLED_INVALID_R2T_RCVD:
1865 case CMD_CXN_KILLED_LUN_INVALID:
1866 case CMD_CXN_KILLED_ICD_INVALID:
1867 case CMD_CXN_KILLED_ITT_INVALID:
1868 case CMD_CXN_KILLED_SEQ_OUTOFORDER:
1869 case CMD_CXN_KILLED_INVALID_DATASN_RCVD:
32951dd8 1870 SE_DEBUG(DBG_LVL_1,
6733b39a
JK
1871 "CQ Error notification for cmd.. "
1872 "code %d cid 0x%x\n",
1873 sol->dw[offsetof(struct amap_sol_cqe, code) /
1874 32] & CQE_CODE_MASK,
1875 (sol->dw[offsetof(struct amap_sol_cqe, cid) /
1876 32] & SOL_CID_MASK));
1877 break;
1878 case UNSOL_DATA_DIGEST_ERROR_NOTIFY:
1879 SE_DEBUG(DBG_LVL_1,
1880 "Digest error on def pdu ring, dropping..\n");
1881 hwi_flush_default_pdu_buffer(phba, beiscsi_conn,
1882 (struct i_t_dpdu_cqe *) sol);
1883 break;
1884 case CXN_KILLED_PDU_SIZE_EXCEEDS_DSL:
1885 case CXN_KILLED_BURST_LEN_MISMATCH:
1886 case CXN_KILLED_AHS_RCVD:
1887 case CXN_KILLED_HDR_DIGEST_ERR:
1888 case CXN_KILLED_UNKNOWN_HDR:
1889 case CXN_KILLED_STALE_ITT_TTT_RCVD:
1890 case CXN_KILLED_INVALID_ITT_TTT_RCVD:
1891 case CXN_KILLED_TIMED_OUT:
1892 case CXN_KILLED_FIN_RCVD:
1893 case CXN_KILLED_BAD_UNSOL_PDU_RCVD:
1894 case CXN_KILLED_BAD_WRB_INDEX_ERROR:
1895 case CXN_KILLED_OVER_RUN_RESIDUAL:
1896 case CXN_KILLED_UNDER_RUN_RESIDUAL:
1897 case CXN_KILLED_CMND_DATA_NOT_ON_SAME_CONN:
32951dd8 1898 SE_DEBUG(DBG_LVL_1, "CQ Error %d, reset CID "
6733b39a
JK
1899 "0x%x...\n",
1900 sol->dw[offsetof(struct amap_sol_cqe, code) /
1901 32] & CQE_CODE_MASK,
7da50879
JK
1902 (sol->dw[offsetof(struct amap_sol_cqe, cid) /
1903 32] & CQE_CID_MASK));
6733b39a
JK
1904 iscsi_conn_failure(beiscsi_conn->conn,
1905 ISCSI_ERR_CONN_FAILED);
1906 break;
1907 case CXN_KILLED_RST_SENT:
1908 case CXN_KILLED_RST_RCVD:
32951dd8 1909 SE_DEBUG(DBG_LVL_1, "CQ Error %d, reset"
bfead3b2 1910 "received/sent on CID 0x%x...\n",
6733b39a
JK
1911 sol->dw[offsetof(struct amap_sol_cqe, code) /
1912 32] & CQE_CODE_MASK,
7da50879
JK
1913 (sol->dw[offsetof(struct amap_sol_cqe, cid) /
1914 32] & CQE_CID_MASK));
6733b39a
JK
1915 iscsi_conn_failure(beiscsi_conn->conn,
1916 ISCSI_ERR_CONN_FAILED);
1917 break;
1918 default:
1919 SE_DEBUG(DBG_LVL_1, "CQ Error Invalid code= %d "
1920 "received on CID 0x%x...\n",
1921 sol->dw[offsetof(struct amap_sol_cqe, code) /
1922 32] & CQE_CODE_MASK,
7da50879
JK
1923 (sol->dw[offsetof(struct amap_sol_cqe, cid) /
1924 32] & CQE_CID_MASK));
6733b39a
JK
1925 break;
1926 }
1927
1928 AMAP_SET_BITS(struct amap_sol_cqe, valid, sol, 0);
1929 queue_tail_inc(cq);
1930 sol = queue_tail_node(cq);
1931 num_processed++;
1932 }
1933
1934 if (num_processed > 0) {
1935 tot_nump += num_processed;
bfead3b2 1936 hwi_ring_cq_db(phba, cq->id, num_processed, 1, 0);
6733b39a
JK
1937 }
1938 return tot_nump;
1939}
1940
756d29c8 1941void beiscsi_process_all_cqs(struct work_struct *work)
6733b39a
JK
1942{
1943 unsigned long flags;
bfead3b2
JK
1944 struct hwi_controller *phwi_ctrlr;
1945 struct hwi_context_memory *phwi_context;
1946 struct be_eq_obj *pbe_eq;
6733b39a
JK
1947 struct beiscsi_hba *phba =
1948 container_of(work, struct beiscsi_hba, work_cqs);
1949
bfead3b2
JK
1950 phwi_ctrlr = phba->phwi_ctrlr;
1951 phwi_context = phwi_ctrlr->phwi_ctxt;
1952 if (phba->msix_enabled)
1953 pbe_eq = &phwi_context->be_eq[phba->num_cpus];
1954 else
1955 pbe_eq = &phwi_context->be_eq[0];
1956
6733b39a
JK
1957 if (phba->todo_mcc_cq) {
1958 spin_lock_irqsave(&phba->isr_lock, flags);
1959 phba->todo_mcc_cq = 0;
1960 spin_unlock_irqrestore(&phba->isr_lock, flags);
756d29c8 1961 beiscsi_process_mcc_isr(phba);
6733b39a
JK
1962 }
1963
1964 if (phba->todo_cq) {
1965 spin_lock_irqsave(&phba->isr_lock, flags);
1966 phba->todo_cq = 0;
1967 spin_unlock_irqrestore(&phba->isr_lock, flags);
bfead3b2 1968 beiscsi_process_cq(pbe_eq);
6733b39a
JK
1969 }
1970}
1971
1972static int be_iopoll(struct blk_iopoll *iop, int budget)
1973{
1974 static unsigned int ret;
1975 struct beiscsi_hba *phba;
bfead3b2 1976 struct be_eq_obj *pbe_eq;
6733b39a 1977
bfead3b2
JK
1978 pbe_eq = container_of(iop, struct be_eq_obj, iopoll);
1979 ret = beiscsi_process_cq(pbe_eq);
6733b39a 1980 if (ret < budget) {
bfead3b2 1981 phba = pbe_eq->phba;
6733b39a 1982 blk_iopoll_complete(iop);
bfead3b2
JK
1983 SE_DEBUG(DBG_LVL_8, "rearm pbe_eq->q.id =%d\n", pbe_eq->q.id);
1984 hwi_ring_eq_db(phba, pbe_eq->q.id, 0, 0, 1, 1);
6733b39a
JK
1985 }
1986 return ret;
1987}
1988
1989static void
1990hwi_write_sgl(struct iscsi_wrb *pwrb, struct scatterlist *sg,
1991 unsigned int num_sg, struct beiscsi_io_task *io_task)
1992{
1993 struct iscsi_sge *psgl;
58ff4bd0 1994 unsigned int sg_len, index;
6733b39a
JK
1995 unsigned int sge_len = 0;
1996 unsigned long long addr;
1997 struct scatterlist *l_sg;
1998 unsigned int offset;
1999
2000 AMAP_SET_BITS(struct amap_iscsi_wrb, iscsi_bhs_addr_lo, pwrb,
2001 io_task->bhs_pa.u.a32.address_lo);
2002 AMAP_SET_BITS(struct amap_iscsi_wrb, iscsi_bhs_addr_hi, pwrb,
2003 io_task->bhs_pa.u.a32.address_hi);
2004
2005 l_sg = sg;
48bd86cf
JK
2006 for (index = 0; (index < num_sg) && (index < 2); index++,
2007 sg = sg_next(sg)) {
6733b39a
JK
2008 if (index == 0) {
2009 sg_len = sg_dma_len(sg);
2010 addr = (u64) sg_dma_address(sg);
2011 AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_addr_lo, pwrb,
457ff3b7 2012 ((u32)(addr & 0xFFFFFFFF)));
6733b39a 2013 AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_addr_hi, pwrb,
457ff3b7 2014 ((u32)(addr >> 32)));
6733b39a
JK
2015 AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_len, pwrb,
2016 sg_len);
2017 sge_len = sg_len;
6733b39a 2018 } else {
6733b39a
JK
2019 AMAP_SET_BITS(struct amap_iscsi_wrb, sge1_r2t_offset,
2020 pwrb, sge_len);
2021 sg_len = sg_dma_len(sg);
2022 addr = (u64) sg_dma_address(sg);
2023 AMAP_SET_BITS(struct amap_iscsi_wrb, sge1_addr_lo, pwrb,
457ff3b7 2024 ((u32)(addr & 0xFFFFFFFF)));
6733b39a 2025 AMAP_SET_BITS(struct amap_iscsi_wrb, sge1_addr_hi, pwrb,
457ff3b7 2026 ((u32)(addr >> 32)));
6733b39a
JK
2027 AMAP_SET_BITS(struct amap_iscsi_wrb, sge1_len, pwrb,
2028 sg_len);
2029 }
2030 }
2031 psgl = (struct iscsi_sge *)io_task->psgl_handle->pfrag;
2032 memset(psgl, 0, sizeof(*psgl) * BE2_SGE);
2033
2034 AMAP_SET_BITS(struct amap_iscsi_sge, len, psgl, io_task->bhs_len - 2);
2035
2036 AMAP_SET_BITS(struct amap_iscsi_sge, addr_hi, psgl,
2037 io_task->bhs_pa.u.a32.address_hi);
2038 AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, psgl,
2039 io_task->bhs_pa.u.a32.address_lo);
2040
caf818f1
JK
2041 if (num_sg == 1) {
2042 AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_last, pwrb,
2043 1);
2044 AMAP_SET_BITS(struct amap_iscsi_wrb, sge1_last, pwrb,
2045 0);
2046 } else if (num_sg == 2) {
2047 AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_last, pwrb,
2048 0);
2049 AMAP_SET_BITS(struct amap_iscsi_wrb, sge1_last, pwrb,
2050 1);
2051 } else {
2052 AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_last, pwrb,
2053 0);
2054 AMAP_SET_BITS(struct amap_iscsi_wrb, sge1_last, pwrb,
2055 0);
2056 }
6733b39a
JK
2057 sg = l_sg;
2058 psgl++;
2059 psgl++;
2060 offset = 0;
48bd86cf 2061 for (index = 0; index < num_sg; index++, sg = sg_next(sg), psgl++) {
6733b39a
JK
2062 sg_len = sg_dma_len(sg);
2063 addr = (u64) sg_dma_address(sg);
2064 AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, psgl,
2065 (addr & 0xFFFFFFFF));
2066 AMAP_SET_BITS(struct amap_iscsi_sge, addr_hi, psgl,
2067 (addr >> 32));
2068 AMAP_SET_BITS(struct amap_iscsi_sge, len, psgl, sg_len);
2069 AMAP_SET_BITS(struct amap_iscsi_sge, sge_offset, psgl, offset);
2070 AMAP_SET_BITS(struct amap_iscsi_sge, last_sge, psgl, 0);
2071 offset += sg_len;
2072 }
2073 psgl--;
2074 AMAP_SET_BITS(struct amap_iscsi_sge, last_sge, psgl, 1);
2075}
2076
2077static void hwi_write_buffer(struct iscsi_wrb *pwrb, struct iscsi_task *task)
2078{
2079 struct iscsi_sge *psgl;
2080 unsigned long long addr;
2081 struct beiscsi_io_task *io_task = task->dd_data;
2082 struct beiscsi_conn *beiscsi_conn = io_task->conn;
2083 struct beiscsi_hba *phba = beiscsi_conn->phba;
2084
2085 io_task->bhs_len = sizeof(struct be_nonio_bhs) - 2;
2086 AMAP_SET_BITS(struct amap_iscsi_wrb, iscsi_bhs_addr_lo, pwrb,
2087 io_task->bhs_pa.u.a32.address_lo);
2088 AMAP_SET_BITS(struct amap_iscsi_wrb, iscsi_bhs_addr_hi, pwrb,
2089 io_task->bhs_pa.u.a32.address_hi);
2090
2091 if (task->data) {
2092 if (task->data_count) {
2093 AMAP_SET_BITS(struct amap_iscsi_wrb, dsp, pwrb, 1);
2094 addr = (u64) pci_map_single(phba->pcidev,
2095 task->data,
2096 task->data_count, 1);
2097 } else {
2098 AMAP_SET_BITS(struct amap_iscsi_wrb, dsp, pwrb, 0);
2099 addr = 0;
2100 }
2101 AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_addr_lo, pwrb,
457ff3b7 2102 ((u32)(addr & 0xFFFFFFFF)));
6733b39a 2103 AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_addr_hi, pwrb,
457ff3b7 2104 ((u32)(addr >> 32)));
6733b39a
JK
2105 AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_len, pwrb,
2106 task->data_count);
2107
2108 AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_last, pwrb, 1);
2109 } else {
2110 AMAP_SET_BITS(struct amap_iscsi_wrb, dsp, pwrb, 0);
2111 addr = 0;
2112 }
2113
2114 psgl = (struct iscsi_sge *)io_task->psgl_handle->pfrag;
2115
2116 AMAP_SET_BITS(struct amap_iscsi_sge, len, psgl, io_task->bhs_len);
2117
2118 AMAP_SET_BITS(struct amap_iscsi_sge, addr_hi, psgl,
2119 io_task->bhs_pa.u.a32.address_hi);
2120 AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, psgl,
2121 io_task->bhs_pa.u.a32.address_lo);
2122 if (task->data) {
2123 psgl++;
2124 AMAP_SET_BITS(struct amap_iscsi_sge, addr_hi, psgl, 0);
2125 AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, psgl, 0);
2126 AMAP_SET_BITS(struct amap_iscsi_sge, len, psgl, 0);
2127 AMAP_SET_BITS(struct amap_iscsi_sge, sge_offset, psgl, 0);
2128 AMAP_SET_BITS(struct amap_iscsi_sge, rsvd0, psgl, 0);
2129 AMAP_SET_BITS(struct amap_iscsi_sge, last_sge, psgl, 0);
2130
2131 psgl++;
2132 if (task->data) {
2133 AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, psgl,
457ff3b7 2134 ((u32)(addr & 0xFFFFFFFF)));
6733b39a 2135 AMAP_SET_BITS(struct amap_iscsi_sge, addr_hi, psgl,
457ff3b7 2136 ((u32)(addr >> 32)));
6733b39a
JK
2137 }
2138 AMAP_SET_BITS(struct amap_iscsi_sge, len, psgl, 0x106);
2139 }
2140 AMAP_SET_BITS(struct amap_iscsi_sge, last_sge, psgl, 1);
2141}
2142
2143static void beiscsi_find_mem_req(struct beiscsi_hba *phba)
2144{
bfead3b2 2145 unsigned int num_cq_pages, num_async_pdu_buf_pages;
6733b39a
JK
2146 unsigned int num_async_pdu_data_pages, wrb_sz_per_cxn;
2147 unsigned int num_async_pdu_buf_sgl_pages, num_async_pdu_data_sgl_pages;
2148
2149 num_cq_pages = PAGES_REQUIRED(phba->params.num_cq_entries * \
2150 sizeof(struct sol_cqe));
6733b39a
JK
2151 num_async_pdu_buf_pages =
2152 PAGES_REQUIRED(phba->params.asyncpdus_per_ctrl * \
2153 phba->params.defpdu_hdr_sz);
2154 num_async_pdu_buf_sgl_pages =
2155 PAGES_REQUIRED(phba->params.asyncpdus_per_ctrl * \
2156 sizeof(struct phys_addr));
2157 num_async_pdu_data_pages =
2158 PAGES_REQUIRED(phba->params.asyncpdus_per_ctrl * \
2159 phba->params.defpdu_data_sz);
2160 num_async_pdu_data_sgl_pages =
2161 PAGES_REQUIRED(phba->params.asyncpdus_per_ctrl * \
2162 sizeof(struct phys_addr));
2163
2164 phba->params.hwi_ws_sz = sizeof(struct hwi_controller);
2165
2166 phba->mem_req[ISCSI_MEM_GLOBAL_HEADER] = 2 *
2167 BE_ISCSI_PDU_HEADER_SIZE;
2168 phba->mem_req[HWI_MEM_ADDN_CONTEXT] =
2169 sizeof(struct hwi_context_memory);
2170
6733b39a
JK
2171
2172 phba->mem_req[HWI_MEM_WRB] = sizeof(struct iscsi_wrb)
2173 * (phba->params.wrbs_per_cxn)
2174 * phba->params.cxns_per_ctrl;
2175 wrb_sz_per_cxn = sizeof(struct wrb_handle) *
2176 (phba->params.wrbs_per_cxn);
2177 phba->mem_req[HWI_MEM_WRBH] = roundup_pow_of_two((wrb_sz_per_cxn) *
2178 phba->params.cxns_per_ctrl);
2179
2180 phba->mem_req[HWI_MEM_SGLH] = sizeof(struct sgl_handle) *
2181 phba->params.icds_per_ctrl;
2182 phba->mem_req[HWI_MEM_SGE] = sizeof(struct iscsi_sge) *
2183 phba->params.num_sge_per_io * phba->params.icds_per_ctrl;
2184
2185 phba->mem_req[HWI_MEM_ASYNC_HEADER_BUF] =
2186 num_async_pdu_buf_pages * PAGE_SIZE;
2187 phba->mem_req[HWI_MEM_ASYNC_DATA_BUF] =
2188 num_async_pdu_data_pages * PAGE_SIZE;
2189 phba->mem_req[HWI_MEM_ASYNC_HEADER_RING] =
2190 num_async_pdu_buf_sgl_pages * PAGE_SIZE;
2191 phba->mem_req[HWI_MEM_ASYNC_DATA_RING] =
2192 num_async_pdu_data_sgl_pages * PAGE_SIZE;
2193 phba->mem_req[HWI_MEM_ASYNC_HEADER_HANDLE] =
2194 phba->params.asyncpdus_per_ctrl *
2195 sizeof(struct async_pdu_handle);
2196 phba->mem_req[HWI_MEM_ASYNC_DATA_HANDLE] =
2197 phba->params.asyncpdus_per_ctrl *
2198 sizeof(struct async_pdu_handle);
2199 phba->mem_req[HWI_MEM_ASYNC_PDU_CONTEXT] =
2200 sizeof(struct hwi_async_pdu_context) +
2201 (phba->params.cxns_per_ctrl * sizeof(struct hwi_async_entry));
2202}
2203
2204static int beiscsi_alloc_mem(struct beiscsi_hba *phba)
2205{
2206 struct be_mem_descriptor *mem_descr;
2207 dma_addr_t bus_add;
2208 struct mem_array *mem_arr, *mem_arr_orig;
2209 unsigned int i, j, alloc_size, curr_alloc_size;
2210
3ec78271 2211 phba->phwi_ctrlr = kzalloc(phba->params.hwi_ws_sz, GFP_KERNEL);
6733b39a
JK
2212 if (!phba->phwi_ctrlr)
2213 return -ENOMEM;
2214
2215 phba->init_mem = kcalloc(SE_MEM_MAX, sizeof(*mem_descr),
2216 GFP_KERNEL);
2217 if (!phba->init_mem) {
2218 kfree(phba->phwi_ctrlr);
2219 return -ENOMEM;
2220 }
2221
2222 mem_arr_orig = kmalloc(sizeof(*mem_arr_orig) * BEISCSI_MAX_FRAGS_INIT,
2223 GFP_KERNEL);
2224 if (!mem_arr_orig) {
2225 kfree(phba->init_mem);
2226 kfree(phba->phwi_ctrlr);
2227 return -ENOMEM;
2228 }
2229
2230 mem_descr = phba->init_mem;
2231 for (i = 0; i < SE_MEM_MAX; i++) {
2232 j = 0;
2233 mem_arr = mem_arr_orig;
2234 alloc_size = phba->mem_req[i];
2235 memset(mem_arr, 0, sizeof(struct mem_array) *
2236 BEISCSI_MAX_FRAGS_INIT);
2237 curr_alloc_size = min(be_max_phys_size * 1024, alloc_size);
2238 do {
2239 mem_arr->virtual_address = pci_alloc_consistent(
2240 phba->pcidev,
2241 curr_alloc_size,
2242 &bus_add);
2243 if (!mem_arr->virtual_address) {
2244 if (curr_alloc_size <= BE_MIN_MEM_SIZE)
2245 goto free_mem;
2246 if (curr_alloc_size -
2247 rounddown_pow_of_two(curr_alloc_size))
2248 curr_alloc_size = rounddown_pow_of_two
2249 (curr_alloc_size);
2250 else
2251 curr_alloc_size = curr_alloc_size / 2;
2252 } else {
2253 mem_arr->bus_address.u.
2254 a64.address = (__u64) bus_add;
2255 mem_arr->size = curr_alloc_size;
2256 alloc_size -= curr_alloc_size;
2257 curr_alloc_size = min(be_max_phys_size *
2258 1024, alloc_size);
2259 j++;
2260 mem_arr++;
2261 }
2262 } while (alloc_size);
2263 mem_descr->num_elements = j;
2264 mem_descr->size_in_bytes = phba->mem_req[i];
2265 mem_descr->mem_array = kmalloc(sizeof(*mem_arr) * j,
2266 GFP_KERNEL);
2267 if (!mem_descr->mem_array)
2268 goto free_mem;
2269
2270 memcpy(mem_descr->mem_array, mem_arr_orig,
2271 sizeof(struct mem_array) * j);
2272 mem_descr++;
2273 }
2274 kfree(mem_arr_orig);
2275 return 0;
2276free_mem:
2277 mem_descr->num_elements = j;
2278 while ((i) || (j)) {
2279 for (j = mem_descr->num_elements; j > 0; j--) {
2280 pci_free_consistent(phba->pcidev,
2281 mem_descr->mem_array[j - 1].size,
2282 mem_descr->mem_array[j - 1].
2283 virtual_address,
457ff3b7
JK
2284 (unsigned long)mem_descr->
2285 mem_array[j - 1].
6733b39a
JK
2286 bus_address.u.a64.address);
2287 }
2288 if (i) {
2289 i--;
2290 kfree(mem_descr->mem_array);
2291 mem_descr--;
2292 }
2293 }
2294 kfree(mem_arr_orig);
2295 kfree(phba->init_mem);
2296 kfree(phba->phwi_ctrlr);
2297 return -ENOMEM;
2298}
2299
2300static int beiscsi_get_memory(struct beiscsi_hba *phba)
2301{
2302 beiscsi_find_mem_req(phba);
2303 return beiscsi_alloc_mem(phba);
2304}
2305
2306static void iscsi_init_global_templates(struct beiscsi_hba *phba)
2307{
2308 struct pdu_data_out *pdata_out;
2309 struct pdu_nop_out *pnop_out;
2310 struct be_mem_descriptor *mem_descr;
2311
2312 mem_descr = phba->init_mem;
2313 mem_descr += ISCSI_MEM_GLOBAL_HEADER;
2314 pdata_out =
2315 (struct pdu_data_out *)mem_descr->mem_array[0].virtual_address;
2316 memset(pdata_out, 0, BE_ISCSI_PDU_HEADER_SIZE);
2317
2318 AMAP_SET_BITS(struct amap_pdu_data_out, opcode, pdata_out,
2319 IIOC_SCSI_DATA);
2320
2321 pnop_out =
2322 (struct pdu_nop_out *)((unsigned char *)mem_descr->mem_array[0].
2323 virtual_address + BE_ISCSI_PDU_HEADER_SIZE);
2324
2325 memset(pnop_out, 0, BE_ISCSI_PDU_HEADER_SIZE);
2326 AMAP_SET_BITS(struct amap_pdu_nop_out, ttt, pnop_out, 0xFFFFFFFF);
2327 AMAP_SET_BITS(struct amap_pdu_nop_out, f_bit, pnop_out, 1);
2328 AMAP_SET_BITS(struct amap_pdu_nop_out, i_bit, pnop_out, 0);
2329}
2330
3ec78271 2331static int beiscsi_init_wrb_handle(struct beiscsi_hba *phba)
6733b39a
JK
2332{
2333 struct be_mem_descriptor *mem_descr_wrbh, *mem_descr_wrb;
3ec78271 2334 struct wrb_handle *pwrb_handle = NULL;
6733b39a
JK
2335 struct hwi_controller *phwi_ctrlr;
2336 struct hwi_wrb_context *pwrb_context;
3ec78271
JK
2337 struct iscsi_wrb *pwrb = NULL;
2338 unsigned int num_cxn_wrbh = 0;
2339 unsigned int num_cxn_wrb = 0, j, idx = 0, index;
6733b39a
JK
2340
2341 mem_descr_wrbh = phba->init_mem;
2342 mem_descr_wrbh += HWI_MEM_WRBH;
2343
2344 mem_descr_wrb = phba->init_mem;
2345 mem_descr_wrb += HWI_MEM_WRB;
6733b39a
JK
2346 phwi_ctrlr = phba->phwi_ctrlr;
2347
2348 for (index = 0; index < phba->params.cxns_per_ctrl * 2; index += 2) {
2349 pwrb_context = &phwi_ctrlr->wrb_context[index];
6733b39a
JK
2350 pwrb_context->pwrb_handle_base =
2351 kzalloc(sizeof(struct wrb_handle *) *
2352 phba->params.wrbs_per_cxn, GFP_KERNEL);
3ec78271
JK
2353 if (!pwrb_context->pwrb_handle_base) {
2354 shost_printk(KERN_ERR, phba->shost,
2355 "Mem Alloc Failed. Failing to load\n");
2356 goto init_wrb_hndl_failed;
2357 }
6733b39a
JK
2358 pwrb_context->pwrb_handle_basestd =
2359 kzalloc(sizeof(struct wrb_handle *) *
2360 phba->params.wrbs_per_cxn, GFP_KERNEL);
3ec78271
JK
2361 if (!pwrb_context->pwrb_handle_basestd) {
2362 shost_printk(KERN_ERR, phba->shost,
2363 "Mem Alloc Failed. Failing to load\n");
2364 goto init_wrb_hndl_failed;
2365 }
2366 if (!num_cxn_wrbh) {
2367 pwrb_handle =
2368 mem_descr_wrbh->mem_array[idx].virtual_address;
2369 num_cxn_wrbh = ((mem_descr_wrbh->mem_array[idx].size) /
2370 ((sizeof(struct wrb_handle)) *
2371 phba->params.wrbs_per_cxn));
2372 idx++;
2373 }
2374 pwrb_context->alloc_index = 0;
2375 pwrb_context->wrb_handles_available = 0;
2376 pwrb_context->free_index = 0;
2377
6733b39a 2378 if (num_cxn_wrbh) {
6733b39a
JK
2379 for (j = 0; j < phba->params.wrbs_per_cxn; j++) {
2380 pwrb_context->pwrb_handle_base[j] = pwrb_handle;
2381 pwrb_context->pwrb_handle_basestd[j] =
2382 pwrb_handle;
2383 pwrb_context->wrb_handles_available++;
bfead3b2 2384 pwrb_handle->wrb_index = j;
6733b39a
JK
2385 pwrb_handle++;
2386 }
6733b39a
JK
2387 num_cxn_wrbh--;
2388 }
2389 }
2390 idx = 0;
ed58ea2a 2391 for (index = 0; index < phba->params.cxns_per_ctrl * 2; index += 2) {
6733b39a 2392 pwrb_context = &phwi_ctrlr->wrb_context[index];
3ec78271 2393 if (!num_cxn_wrb) {
6733b39a 2394 pwrb = mem_descr_wrb->mem_array[idx].virtual_address;
7c56533c 2395 num_cxn_wrb = (mem_descr_wrb->mem_array[idx].size) /
3ec78271
JK
2396 ((sizeof(struct iscsi_wrb) *
2397 phba->params.wrbs_per_cxn));
2398 idx++;
2399 }
2400
2401 if (num_cxn_wrb) {
6733b39a
JK
2402 for (j = 0; j < phba->params.wrbs_per_cxn; j++) {
2403 pwrb_handle = pwrb_context->pwrb_handle_base[j];
2404 pwrb_handle->pwrb = pwrb;
2405 pwrb++;
2406 }
2407 num_cxn_wrb--;
2408 }
2409 }
3ec78271
JK
2410 return 0;
2411init_wrb_hndl_failed:
2412 for (j = index; j > 0; j--) {
2413 pwrb_context = &phwi_ctrlr->wrb_context[j];
2414 kfree(pwrb_context->pwrb_handle_base);
2415 kfree(pwrb_context->pwrb_handle_basestd);
2416 }
2417 return -ENOMEM;
6733b39a
JK
2418}
2419
2420static void hwi_init_async_pdu_ctx(struct beiscsi_hba *phba)
2421{
2422 struct hwi_controller *phwi_ctrlr;
2423 struct hba_parameters *p = &phba->params;
2424 struct hwi_async_pdu_context *pasync_ctx;
2425 struct async_pdu_handle *pasync_header_h, *pasync_data_h;
dc63aac6 2426 unsigned int index, idx, num_per_mem, num_async_data;
6733b39a
JK
2427 struct be_mem_descriptor *mem_descr;
2428
2429 mem_descr = (struct be_mem_descriptor *)phba->init_mem;
2430 mem_descr += HWI_MEM_ASYNC_PDU_CONTEXT;
2431
2432 phwi_ctrlr = phba->phwi_ctrlr;
2433 phwi_ctrlr->phwi_ctxt->pasync_ctx = (struct hwi_async_pdu_context *)
2434 mem_descr->mem_array[0].virtual_address;
2435 pasync_ctx = phwi_ctrlr->phwi_ctxt->pasync_ctx;
2436 memset(pasync_ctx, 0, sizeof(*pasync_ctx));
2437
dc63aac6
JK
2438 pasync_ctx->num_entries = p->asyncpdus_per_ctrl;
2439 pasync_ctx->buffer_size = p->defpdu_hdr_sz;
6733b39a
JK
2440
2441 mem_descr = (struct be_mem_descriptor *)phba->init_mem;
2442 mem_descr += HWI_MEM_ASYNC_HEADER_BUF;
2443 if (mem_descr->mem_array[0].virtual_address) {
2444 SE_DEBUG(DBG_LVL_8,
2445 "hwi_init_async_pdu_ctx HWI_MEM_ASYNC_HEADER_BUF"
457ff3b7 2446 "va=%p\n", mem_descr->mem_array[0].virtual_address);
6733b39a
JK
2447 } else
2448 shost_printk(KERN_WARNING, phba->shost,
457ff3b7 2449 "No Virtual address\n");
6733b39a
JK
2450
2451 pasync_ctx->async_header.va_base =
2452 mem_descr->mem_array[0].virtual_address;
2453
2454 pasync_ctx->async_header.pa_base.u.a64.address =
2455 mem_descr->mem_array[0].bus_address.u.a64.address;
2456
2457 mem_descr = (struct be_mem_descriptor *)phba->init_mem;
2458 mem_descr += HWI_MEM_ASYNC_HEADER_RING;
2459 if (mem_descr->mem_array[0].virtual_address) {
2460 SE_DEBUG(DBG_LVL_8,
2461 "hwi_init_async_pdu_ctx HWI_MEM_ASYNC_HEADER_RING"
457ff3b7 2462 "va=%p\n", mem_descr->mem_array[0].virtual_address);
6733b39a
JK
2463 } else
2464 shost_printk(KERN_WARNING, phba->shost,
457ff3b7 2465 "No Virtual address\n");
6733b39a
JK
2466 pasync_ctx->async_header.ring_base =
2467 mem_descr->mem_array[0].virtual_address;
2468
2469 mem_descr = (struct be_mem_descriptor *)phba->init_mem;
2470 mem_descr += HWI_MEM_ASYNC_HEADER_HANDLE;
2471 if (mem_descr->mem_array[0].virtual_address) {
2472 SE_DEBUG(DBG_LVL_8,
2473 "hwi_init_async_pdu_ctx HWI_MEM_ASYNC_HEADER_HANDLE"
457ff3b7 2474 "va=%p\n", mem_descr->mem_array[0].virtual_address);
6733b39a
JK
2475 } else
2476 shost_printk(KERN_WARNING, phba->shost,
457ff3b7 2477 "No Virtual address\n");
6733b39a
JK
2478
2479 pasync_ctx->async_header.handle_base =
2480 mem_descr->mem_array[0].virtual_address;
2481 pasync_ctx->async_header.writables = 0;
2482 INIT_LIST_HEAD(&pasync_ctx->async_header.free_list);
2483
6733b39a
JK
2484
2485 mem_descr = (struct be_mem_descriptor *)phba->init_mem;
2486 mem_descr += HWI_MEM_ASYNC_DATA_RING;
2487 if (mem_descr->mem_array[0].virtual_address) {
2488 SE_DEBUG(DBG_LVL_8,
2489 "hwi_init_async_pdu_ctx HWI_MEM_ASYNC_DATA_RING"
457ff3b7 2490 "va=%p\n", mem_descr->mem_array[0].virtual_address);
6733b39a
JK
2491 } else
2492 shost_printk(KERN_WARNING, phba->shost,
457ff3b7 2493 "No Virtual address\n");
6733b39a
JK
2494
2495 pasync_ctx->async_data.ring_base =
2496 mem_descr->mem_array[0].virtual_address;
2497
2498 mem_descr = (struct be_mem_descriptor *)phba->init_mem;
2499 mem_descr += HWI_MEM_ASYNC_DATA_HANDLE;
2500 if (!mem_descr->mem_array[0].virtual_address)
2501 shost_printk(KERN_WARNING, phba->shost,
457ff3b7 2502 "No Virtual address\n");
6733b39a
JK
2503
2504 pasync_ctx->async_data.handle_base =
2505 mem_descr->mem_array[0].virtual_address;
2506 pasync_ctx->async_data.writables = 0;
2507 INIT_LIST_HEAD(&pasync_ctx->async_data.free_list);
2508
2509 pasync_header_h =
2510 (struct async_pdu_handle *)pasync_ctx->async_header.handle_base;
2511 pasync_data_h =
2512 (struct async_pdu_handle *)pasync_ctx->async_data.handle_base;
2513
dc63aac6
JK
2514 mem_descr = (struct be_mem_descriptor *)phba->init_mem;
2515 mem_descr += HWI_MEM_ASYNC_DATA_BUF;
2516 if (mem_descr->mem_array[0].virtual_address) {
2517 SE_DEBUG(DBG_LVL_8,
2518 "hwi_init_async_pdu_ctx HWI_MEM_ASYNC_DATA_BUF"
2519 "va=%p\n", mem_descr->mem_array[0].virtual_address);
2520 } else
2521 shost_printk(KERN_WARNING, phba->shost,
2522 "No Virtual address\n");
2523 idx = 0;
2524 pasync_ctx->async_data.va_base =
2525 mem_descr->mem_array[idx].virtual_address;
2526 pasync_ctx->async_data.pa_base.u.a64.address =
2527 mem_descr->mem_array[idx].bus_address.u.a64.address;
2528
2529 num_async_data = ((mem_descr->mem_array[idx].size) /
2530 phba->params.defpdu_data_sz);
2531 num_per_mem = 0;
2532
6733b39a
JK
2533 for (index = 0; index < p->asyncpdus_per_ctrl; index++) {
2534 pasync_header_h->cri = -1;
2535 pasync_header_h->index = (char)index;
2536 INIT_LIST_HEAD(&pasync_header_h->link);
2537 pasync_header_h->pbuffer =
2538 (void *)((unsigned long)
2539 (pasync_ctx->async_header.va_base) +
2540 (p->defpdu_hdr_sz * index));
2541
2542 pasync_header_h->pa.u.a64.address =
2543 pasync_ctx->async_header.pa_base.u.a64.address +
2544 (p->defpdu_hdr_sz * index);
2545
2546 list_add_tail(&pasync_header_h->link,
2547 &pasync_ctx->async_header.free_list);
2548 pasync_header_h++;
2549 pasync_ctx->async_header.free_entries++;
2550 pasync_ctx->async_header.writables++;
2551
2552 INIT_LIST_HEAD(&pasync_ctx->async_entry[index].wait_queue.list);
2553 INIT_LIST_HEAD(&pasync_ctx->async_entry[index].
2554 header_busy_list);
2555 pasync_data_h->cri = -1;
2556 pasync_data_h->index = (char)index;
2557 INIT_LIST_HEAD(&pasync_data_h->link);
dc63aac6
JK
2558
2559 if (!num_async_data) {
2560 num_per_mem = 0;
2561 idx++;
2562 pasync_ctx->async_data.va_base =
2563 mem_descr->mem_array[idx].virtual_address;
2564 pasync_ctx->async_data.pa_base.u.a64.address =
2565 mem_descr->mem_array[idx].
2566 bus_address.u.a64.address;
2567
2568 num_async_data = ((mem_descr->mem_array[idx].size) /
2569 phba->params.defpdu_data_sz);
2570 }
6733b39a
JK
2571 pasync_data_h->pbuffer =
2572 (void *)((unsigned long)
2573 (pasync_ctx->async_data.va_base) +
dc63aac6 2574 (p->defpdu_data_sz * num_per_mem));
6733b39a
JK
2575
2576 pasync_data_h->pa.u.a64.address =
2577 pasync_ctx->async_data.pa_base.u.a64.address +
dc63aac6
JK
2578 (p->defpdu_data_sz * num_per_mem);
2579 num_per_mem++;
2580 num_async_data--;
6733b39a
JK
2581
2582 list_add_tail(&pasync_data_h->link,
2583 &pasync_ctx->async_data.free_list);
2584 pasync_data_h++;
2585 pasync_ctx->async_data.free_entries++;
2586 pasync_ctx->async_data.writables++;
2587
2588 INIT_LIST_HEAD(&pasync_ctx->async_entry[index].data_busy_list);
2589 }
2590
2591 pasync_ctx->async_header.host_write_ptr = 0;
2592 pasync_ctx->async_header.ep_read_ptr = -1;
2593 pasync_ctx->async_data.host_write_ptr = 0;
2594 pasync_ctx->async_data.ep_read_ptr = -1;
2595}
2596
2597static int
2598be_sgl_create_contiguous(void *virtual_address,
2599 u64 physical_address, u32 length,
2600 struct be_dma_mem *sgl)
2601{
2602 WARN_ON(!virtual_address);
2603 WARN_ON(!physical_address);
2604 WARN_ON(!length > 0);
2605 WARN_ON(!sgl);
2606
2607 sgl->va = virtual_address;
457ff3b7 2608 sgl->dma = (unsigned long)physical_address;
6733b39a
JK
2609 sgl->size = length;
2610
2611 return 0;
2612}
2613
2614static void be_sgl_destroy_contiguous(struct be_dma_mem *sgl)
2615{
2616 memset(sgl, 0, sizeof(*sgl));
2617}
2618
2619static void
2620hwi_build_be_sgl_arr(struct beiscsi_hba *phba,
2621 struct mem_array *pmem, struct be_dma_mem *sgl)
2622{
2623 if (sgl->va)
2624 be_sgl_destroy_contiguous(sgl);
2625
2626 be_sgl_create_contiguous(pmem->virtual_address,
2627 pmem->bus_address.u.a64.address,
2628 pmem->size, sgl);
2629}
2630
2631static void
2632hwi_build_be_sgl_by_offset(struct beiscsi_hba *phba,
2633 struct mem_array *pmem, struct be_dma_mem *sgl)
2634{
2635 if (sgl->va)
2636 be_sgl_destroy_contiguous(sgl);
2637
2638 be_sgl_create_contiguous((unsigned char *)pmem->virtual_address,
2639 pmem->bus_address.u.a64.address,
2640 pmem->size, sgl);
2641}
2642
2643static int be_fill_queue(struct be_queue_info *q,
2644 u16 len, u16 entry_size, void *vaddress)
2645{
2646 struct be_dma_mem *mem = &q->dma_mem;
2647
2648 memset(q, 0, sizeof(*q));
2649 q->len = len;
2650 q->entry_size = entry_size;
2651 mem->size = len * entry_size;
2652 mem->va = vaddress;
2653 if (!mem->va)
2654 return -ENOMEM;
2655 memset(mem->va, 0, mem->size);
2656 return 0;
2657}
2658
bfead3b2 2659static int beiscsi_create_eqs(struct beiscsi_hba *phba,
6733b39a
JK
2660 struct hwi_context_memory *phwi_context)
2661{
bfead3b2
JK
2662 unsigned int i, num_eq_pages;
2663 int ret, eq_for_mcc;
6733b39a
JK
2664 struct be_queue_info *eq;
2665 struct be_dma_mem *mem;
6733b39a 2666 void *eq_vaddress;
bfead3b2 2667 dma_addr_t paddr;
6733b39a 2668
bfead3b2
JK
2669 num_eq_pages = PAGES_REQUIRED(phba->params.num_eq_entries * \
2670 sizeof(struct be_eq_entry));
6733b39a 2671
bfead3b2
JK
2672 if (phba->msix_enabled)
2673 eq_for_mcc = 1;
2674 else
2675 eq_for_mcc = 0;
2676 for (i = 0; i < (phba->num_cpus + eq_for_mcc); i++) {
2677 eq = &phwi_context->be_eq[i].q;
2678 mem = &eq->dma_mem;
2679 phwi_context->be_eq[i].phba = phba;
2680 eq_vaddress = pci_alloc_consistent(phba->pcidev,
2681 num_eq_pages * PAGE_SIZE,
2682 &paddr);
2683 if (!eq_vaddress)
2684 goto create_eq_error;
2685
2686 mem->va = eq_vaddress;
2687 ret = be_fill_queue(eq, phba->params.num_eq_entries,
2688 sizeof(struct be_eq_entry), eq_vaddress);
2689 if (ret) {
2690 shost_printk(KERN_ERR, phba->shost,
457ff3b7 2691 "be_fill_queue Failed for EQ\n");
bfead3b2
JK
2692 goto create_eq_error;
2693 }
6733b39a 2694
bfead3b2
JK
2695 mem->dma = paddr;
2696 ret = beiscsi_cmd_eq_create(&phba->ctrl, eq,
2697 phwi_context->cur_eqd);
2698 if (ret) {
2699 shost_printk(KERN_ERR, phba->shost,
2700 "beiscsi_cmd_eq_create"
457ff3b7 2701 "Failedfor EQ\n");
bfead3b2
JK
2702 goto create_eq_error;
2703 }
2704 SE_DEBUG(DBG_LVL_8, "eqid = %d\n", phwi_context->be_eq[i].q.id);
6733b39a 2705 }
6733b39a 2706 return 0;
bfead3b2
JK
2707create_eq_error:
2708 for (i = 0; i < (phba->num_cpus + 1); i++) {
2709 eq = &phwi_context->be_eq[i].q;
2710 mem = &eq->dma_mem;
2711 if (mem->va)
2712 pci_free_consistent(phba->pcidev, num_eq_pages
2713 * PAGE_SIZE,
2714 mem->va, mem->dma);
2715 }
2716 return ret;
6733b39a
JK
2717}
2718
bfead3b2 2719static int beiscsi_create_cqs(struct beiscsi_hba *phba,
6733b39a
JK
2720 struct hwi_context_memory *phwi_context)
2721{
bfead3b2 2722 unsigned int i, num_cq_pages;
6733b39a
JK
2723 int ret;
2724 struct be_queue_info *cq, *eq;
2725 struct be_dma_mem *mem;
bfead3b2 2726 struct be_eq_obj *pbe_eq;
6733b39a 2727 void *cq_vaddress;
bfead3b2 2728 dma_addr_t paddr;
6733b39a 2729
bfead3b2
JK
2730 num_cq_pages = PAGES_REQUIRED(phba->params.num_cq_entries * \
2731 sizeof(struct sol_cqe));
6733b39a 2732
bfead3b2
JK
2733 for (i = 0; i < phba->num_cpus; i++) {
2734 cq = &phwi_context->be_cq[i];
2735 eq = &phwi_context->be_eq[i].q;
2736 pbe_eq = &phwi_context->be_eq[i];
2737 pbe_eq->cq = cq;
2738 pbe_eq->phba = phba;
2739 mem = &cq->dma_mem;
2740 cq_vaddress = pci_alloc_consistent(phba->pcidev,
2741 num_cq_pages * PAGE_SIZE,
2742 &paddr);
2743 if (!cq_vaddress)
2744 goto create_cq_error;
7da50879 2745 ret = be_fill_queue(cq, phba->params.num_cq_entries,
bfead3b2
JK
2746 sizeof(struct sol_cqe), cq_vaddress);
2747 if (ret) {
2748 shost_printk(KERN_ERR, phba->shost,
457ff3b7 2749 "be_fill_queue Failed for ISCSI CQ\n");
bfead3b2
JK
2750 goto create_cq_error;
2751 }
2752
2753 mem->dma = paddr;
2754 ret = beiscsi_cmd_cq_create(&phba->ctrl, cq, eq, false,
2755 false, 0);
2756 if (ret) {
2757 shost_printk(KERN_ERR, phba->shost,
2758 "beiscsi_cmd_eq_create"
457ff3b7 2759 "Failed for ISCSI CQ\n");
bfead3b2
JK
2760 goto create_cq_error;
2761 }
2762 SE_DEBUG(DBG_LVL_8, "iscsi cq_id is %d for eq_id %d\n",
2763 cq->id, eq->id);
2764 SE_DEBUG(DBG_LVL_8, "ISCSI CQ CREATED\n");
6733b39a 2765 }
6733b39a 2766 return 0;
bfead3b2
JK
2767
2768create_cq_error:
2769 for (i = 0; i < phba->num_cpus; i++) {
2770 cq = &phwi_context->be_cq[i];
2771 mem = &cq->dma_mem;
2772 if (mem->va)
2773 pci_free_consistent(phba->pcidev, num_cq_pages
2774 * PAGE_SIZE,
2775 mem->va, mem->dma);
2776 }
2777 return ret;
2778
6733b39a
JK
2779}
2780
2781static int
2782beiscsi_create_def_hdr(struct beiscsi_hba *phba,
2783 struct hwi_context_memory *phwi_context,
2784 struct hwi_controller *phwi_ctrlr,
2785 unsigned int def_pdu_ring_sz)
2786{
2787 unsigned int idx;
2788 int ret;
2789 struct be_queue_info *dq, *cq;
2790 struct be_dma_mem *mem;
2791 struct be_mem_descriptor *mem_descr;
2792 void *dq_vaddress;
2793
2794 idx = 0;
2795 dq = &phwi_context->be_def_hdrq;
bfead3b2 2796 cq = &phwi_context->be_cq[0];
6733b39a
JK
2797 mem = &dq->dma_mem;
2798 mem_descr = phba->init_mem;
2799 mem_descr += HWI_MEM_ASYNC_HEADER_RING;
2800 dq_vaddress = mem_descr->mem_array[idx].virtual_address;
2801 ret = be_fill_queue(dq, mem_descr->mem_array[0].size /
2802 sizeof(struct phys_addr),
2803 sizeof(struct phys_addr), dq_vaddress);
2804 if (ret) {
2805 shost_printk(KERN_ERR, phba->shost,
2806 "be_fill_queue Failed for DEF PDU HDR\n");
2807 return ret;
2808 }
457ff3b7
JK
2809 mem->dma = (unsigned long)mem_descr->mem_array[idx].
2810 bus_address.u.a64.address;
6733b39a
JK
2811 ret = be_cmd_create_default_pdu_queue(&phba->ctrl, cq, dq,
2812 def_pdu_ring_sz,
2813 phba->params.defpdu_hdr_sz);
2814 if (ret) {
2815 shost_printk(KERN_ERR, phba->shost,
2816 "be_cmd_create_default_pdu_queue Failed DEFHDR\n");
2817 return ret;
2818 }
2819 phwi_ctrlr->default_pdu_hdr.id = phwi_context->be_def_hdrq.id;
2820 SE_DEBUG(DBG_LVL_8, "iscsi def pdu id is %d\n",
2821 phwi_context->be_def_hdrq.id);
2822 hwi_post_async_buffers(phba, 1);
2823 return 0;
2824}
2825
2826static int
2827beiscsi_create_def_data(struct beiscsi_hba *phba,
2828 struct hwi_context_memory *phwi_context,
2829 struct hwi_controller *phwi_ctrlr,
2830 unsigned int def_pdu_ring_sz)
2831{
2832 unsigned int idx;
2833 int ret;
2834 struct be_queue_info *dataq, *cq;
2835 struct be_dma_mem *mem;
2836 struct be_mem_descriptor *mem_descr;
2837 void *dq_vaddress;
2838
2839 idx = 0;
2840 dataq = &phwi_context->be_def_dataq;
bfead3b2 2841 cq = &phwi_context->be_cq[0];
6733b39a
JK
2842 mem = &dataq->dma_mem;
2843 mem_descr = phba->init_mem;
2844 mem_descr += HWI_MEM_ASYNC_DATA_RING;
2845 dq_vaddress = mem_descr->mem_array[idx].virtual_address;
2846 ret = be_fill_queue(dataq, mem_descr->mem_array[0].size /
2847 sizeof(struct phys_addr),
2848 sizeof(struct phys_addr), dq_vaddress);
2849 if (ret) {
2850 shost_printk(KERN_ERR, phba->shost,
2851 "be_fill_queue Failed for DEF PDU DATA\n");
2852 return ret;
2853 }
457ff3b7
JK
2854 mem->dma = (unsigned long)mem_descr->mem_array[idx].
2855 bus_address.u.a64.address;
6733b39a
JK
2856 ret = be_cmd_create_default_pdu_queue(&phba->ctrl, cq, dataq,
2857 def_pdu_ring_sz,
2858 phba->params.defpdu_data_sz);
2859 if (ret) {
2860 shost_printk(KERN_ERR, phba->shost,
2861 "be_cmd_create_default_pdu_queue Failed"
2862 " for DEF PDU DATA\n");
2863 return ret;
2864 }
2865 phwi_ctrlr->default_pdu_data.id = phwi_context->be_def_dataq.id;
2866 SE_DEBUG(DBG_LVL_8, "iscsi def data id is %d\n",
2867 phwi_context->be_def_dataq.id);
2868 hwi_post_async_buffers(phba, 0);
457ff3b7 2869 SE_DEBUG(DBG_LVL_8, "DEFAULT PDU DATA RING CREATED\n");
6733b39a
JK
2870 return 0;
2871}
2872
2873static int
2874beiscsi_post_pages(struct beiscsi_hba *phba)
2875{
2876 struct be_mem_descriptor *mem_descr;
2877 struct mem_array *pm_arr;
2878 unsigned int page_offset, i;
2879 struct be_dma_mem sgl;
2880 int status;
2881
2882 mem_descr = phba->init_mem;
2883 mem_descr += HWI_MEM_SGE;
2884 pm_arr = mem_descr->mem_array;
2885
2886 page_offset = (sizeof(struct iscsi_sge) * phba->params.num_sge_per_io *
2887 phba->fw_config.iscsi_icd_start) / PAGE_SIZE;
2888 for (i = 0; i < mem_descr->num_elements; i++) {
2889 hwi_build_be_sgl_arr(phba, pm_arr, &sgl);
2890 status = be_cmd_iscsi_post_sgl_pages(&phba->ctrl, &sgl,
2891 page_offset,
2892 (pm_arr->size / PAGE_SIZE));
2893 page_offset += pm_arr->size / PAGE_SIZE;
2894 if (status != 0) {
2895 shost_printk(KERN_ERR, phba->shost,
2896 "post sgl failed.\n");
2897 return status;
2898 }
2899 pm_arr++;
2900 }
457ff3b7 2901 SE_DEBUG(DBG_LVL_8, "POSTED PAGES\n");
6733b39a
JK
2902 return 0;
2903}
2904
bfead3b2
JK
2905static void be_queue_free(struct beiscsi_hba *phba, struct be_queue_info *q)
2906{
2907 struct be_dma_mem *mem = &q->dma_mem;
c8b25598 2908 if (mem->va) {
bfead3b2
JK
2909 pci_free_consistent(phba->pcidev, mem->size,
2910 mem->va, mem->dma);
c8b25598
JK
2911 mem->va = NULL;
2912 }
bfead3b2
JK
2913}
2914
2915static int be_queue_alloc(struct beiscsi_hba *phba, struct be_queue_info *q,
2916 u16 len, u16 entry_size)
2917{
2918 struct be_dma_mem *mem = &q->dma_mem;
2919
2920 memset(q, 0, sizeof(*q));
2921 q->len = len;
2922 q->entry_size = entry_size;
2923 mem->size = len * entry_size;
2924 mem->va = pci_alloc_consistent(phba->pcidev, mem->size, &mem->dma);
2925 if (!mem->va)
d3ad2bb3 2926 return -ENOMEM;
bfead3b2
JK
2927 memset(mem->va, 0, mem->size);
2928 return 0;
2929}
2930
6733b39a
JK
2931static int
2932beiscsi_create_wrb_rings(struct beiscsi_hba *phba,
2933 struct hwi_context_memory *phwi_context,
2934 struct hwi_controller *phwi_ctrlr)
2935{
2936 unsigned int wrb_mem_index, offset, size, num_wrb_rings;
2937 u64 pa_addr_lo;
2938 unsigned int idx, num, i;
2939 struct mem_array *pwrb_arr;
2940 void *wrb_vaddr;
2941 struct be_dma_mem sgl;
2942 struct be_mem_descriptor *mem_descr;
2943 int status;
2944
2945 idx = 0;
2946 mem_descr = phba->init_mem;
2947 mem_descr += HWI_MEM_WRB;
2948 pwrb_arr = kmalloc(sizeof(*pwrb_arr) * phba->params.cxns_per_ctrl,
2949 GFP_KERNEL);
2950 if (!pwrb_arr) {
2951 shost_printk(KERN_ERR, phba->shost,
2952 "Memory alloc failed in create wrb ring.\n");
2953 return -ENOMEM;
2954 }
2955 wrb_vaddr = mem_descr->mem_array[idx].virtual_address;
2956 pa_addr_lo = mem_descr->mem_array[idx].bus_address.u.a64.address;
2957 num_wrb_rings = mem_descr->mem_array[idx].size /
2958 (phba->params.wrbs_per_cxn * sizeof(struct iscsi_wrb));
2959
2960 for (num = 0; num < phba->params.cxns_per_ctrl; num++) {
2961 if (num_wrb_rings) {
2962 pwrb_arr[num].virtual_address = wrb_vaddr;
2963 pwrb_arr[num].bus_address.u.a64.address = pa_addr_lo;
2964 pwrb_arr[num].size = phba->params.wrbs_per_cxn *
2965 sizeof(struct iscsi_wrb);
2966 wrb_vaddr += pwrb_arr[num].size;
2967 pa_addr_lo += pwrb_arr[num].size;
2968 num_wrb_rings--;
2969 } else {
2970 idx++;
2971 wrb_vaddr = mem_descr->mem_array[idx].virtual_address;
2972 pa_addr_lo = mem_descr->mem_array[idx].\
2973 bus_address.u.a64.address;
2974 num_wrb_rings = mem_descr->mem_array[idx].size /
2975 (phba->params.wrbs_per_cxn *
2976 sizeof(struct iscsi_wrb));
2977 pwrb_arr[num].virtual_address = wrb_vaddr;
2978 pwrb_arr[num].bus_address.u.a64.address\
2979 = pa_addr_lo;
2980 pwrb_arr[num].size = phba->params.wrbs_per_cxn *
2981 sizeof(struct iscsi_wrb);
2982 wrb_vaddr += pwrb_arr[num].size;
2983 pa_addr_lo += pwrb_arr[num].size;
2984 num_wrb_rings--;
2985 }
2986 }
2987 for (i = 0; i < phba->params.cxns_per_ctrl; i++) {
2988 wrb_mem_index = 0;
2989 offset = 0;
2990 size = 0;
2991
2992 hwi_build_be_sgl_by_offset(phba, &pwrb_arr[i], &sgl);
2993 status = be_cmd_wrbq_create(&phba->ctrl, &sgl,
2994 &phwi_context->be_wrbq[i]);
2995 if (status != 0) {
2996 shost_printk(KERN_ERR, phba->shost,
2997 "wrbq create failed.");
1462b8ff 2998 kfree(pwrb_arr);
6733b39a
JK
2999 return status;
3000 }
7da50879
JK
3001 phwi_ctrlr->wrb_context[i * 2].cid = phwi_context->be_wrbq[i].
3002 id;
6733b39a
JK
3003 }
3004 kfree(pwrb_arr);
3005 return 0;
3006}
3007
3008static void free_wrb_handles(struct beiscsi_hba *phba)
3009{
3010 unsigned int index;
3011 struct hwi_controller *phwi_ctrlr;
3012 struct hwi_wrb_context *pwrb_context;
3013
3014 phwi_ctrlr = phba->phwi_ctrlr;
3015 for (index = 0; index < phba->params.cxns_per_ctrl * 2; index += 2) {
3016 pwrb_context = &phwi_ctrlr->wrb_context[index];
3017 kfree(pwrb_context->pwrb_handle_base);
3018 kfree(pwrb_context->pwrb_handle_basestd);
3019 }
3020}
3021
bfead3b2
JK
3022static void be_mcc_queues_destroy(struct beiscsi_hba *phba)
3023{
3024 struct be_queue_info *q;
3025 struct be_ctrl_info *ctrl = &phba->ctrl;
3026
3027 q = &phba->ctrl.mcc_obj.q;
3028 if (q->created)
3029 beiscsi_cmd_q_destroy(ctrl, q, QTYPE_MCCQ);
3030 be_queue_free(phba, q);
3031
3032 q = &phba->ctrl.mcc_obj.cq;
3033 if (q->created)
3034 beiscsi_cmd_q_destroy(ctrl, q, QTYPE_CQ);
3035 be_queue_free(phba, q);
3036}
3037
6733b39a
JK
3038static void hwi_cleanup(struct beiscsi_hba *phba)
3039{
3040 struct be_queue_info *q;
3041 struct be_ctrl_info *ctrl = &phba->ctrl;
3042 struct hwi_controller *phwi_ctrlr;
3043 struct hwi_context_memory *phwi_context;
bfead3b2 3044 int i, eq_num;
6733b39a
JK
3045
3046 phwi_ctrlr = phba->phwi_ctrlr;
3047 phwi_context = phwi_ctrlr->phwi_ctxt;
3048 for (i = 0; i < phba->params.cxns_per_ctrl; i++) {
3049 q = &phwi_context->be_wrbq[i];
3050 if (q->created)
3051 beiscsi_cmd_q_destroy(ctrl, q, QTYPE_WRBQ);
3052 }
6733b39a
JK
3053 free_wrb_handles(phba);
3054
3055 q = &phwi_context->be_def_hdrq;
3056 if (q->created)
3057 beiscsi_cmd_q_destroy(ctrl, q, QTYPE_DPDUQ);
3058
3059 q = &phwi_context->be_def_dataq;
3060 if (q->created)
3061 beiscsi_cmd_q_destroy(ctrl, q, QTYPE_DPDUQ);
3062
3063 beiscsi_cmd_q_destroy(ctrl, NULL, QTYPE_SGL);
3064
bfead3b2
JK
3065 for (i = 0; i < (phba->num_cpus); i++) {
3066 q = &phwi_context->be_cq[i];
3067 if (q->created)
3068 beiscsi_cmd_q_destroy(ctrl, q, QTYPE_CQ);
3069 }
3070 if (phba->msix_enabled)
3071 eq_num = 1;
3072 else
3073 eq_num = 0;
3074 for (i = 0; i < (phba->num_cpus + eq_num); i++) {
3075 q = &phwi_context->be_eq[i].q;
3076 if (q->created)
3077 beiscsi_cmd_q_destroy(ctrl, q, QTYPE_EQ);
3078 }
3079 be_mcc_queues_destroy(phba);
3080}
6733b39a 3081
bfead3b2
JK
3082static int be_mcc_queues_create(struct beiscsi_hba *phba,
3083 struct hwi_context_memory *phwi_context)
3084{
3085 struct be_queue_info *q, *cq;
3086 struct be_ctrl_info *ctrl = &phba->ctrl;
3087
3088 /* Alloc MCC compl queue */
3089 cq = &phba->ctrl.mcc_obj.cq;
3090 if (be_queue_alloc(phba, cq, MCC_CQ_LEN,
3091 sizeof(struct be_mcc_compl)))
3092 goto err;
3093 /* Ask BE to create MCC compl queue; */
3094 if (phba->msix_enabled) {
3095 if (beiscsi_cmd_cq_create(ctrl, cq, &phwi_context->be_eq
3096 [phba->num_cpus].q, false, true, 0))
3097 goto mcc_cq_free;
3098 } else {
3099 if (beiscsi_cmd_cq_create(ctrl, cq, &phwi_context->be_eq[0].q,
3100 false, true, 0))
3101 goto mcc_cq_free;
3102 }
3103
3104 /* Alloc MCC queue */
3105 q = &phba->ctrl.mcc_obj.q;
3106 if (be_queue_alloc(phba, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
3107 goto mcc_cq_destroy;
3108
3109 /* Ask BE to create MCC queue */
35e66019 3110 if (beiscsi_cmd_mccq_create(phba, q, cq))
bfead3b2
JK
3111 goto mcc_q_free;
3112
3113 return 0;
3114
3115mcc_q_free:
3116 be_queue_free(phba, q);
3117mcc_cq_destroy:
3118 beiscsi_cmd_q_destroy(ctrl, cq, QTYPE_CQ);
3119mcc_cq_free:
3120 be_queue_free(phba, cq);
3121err:
d3ad2bb3 3122 return -ENOMEM;
bfead3b2
JK
3123}
3124
3125static int find_num_cpus(void)
3126{
3127 int num_cpus = 0;
3128
3129 num_cpus = num_online_cpus();
3130 if (num_cpus >= MAX_CPUS)
3131 num_cpus = MAX_CPUS - 1;
3132
457ff3b7 3133 SE_DEBUG(DBG_LVL_8, "num_cpus = %d\n", num_cpus);
bfead3b2 3134 return num_cpus;
6733b39a
JK
3135}
3136
3137static int hwi_init_port(struct beiscsi_hba *phba)
3138{
3139 struct hwi_controller *phwi_ctrlr;
3140 struct hwi_context_memory *phwi_context;
3141 unsigned int def_pdu_ring_sz;
3142 struct be_ctrl_info *ctrl = &phba->ctrl;
3143 int status;
3144
3145 def_pdu_ring_sz =
3146 phba->params.asyncpdus_per_ctrl * sizeof(struct phys_addr);
3147 phwi_ctrlr = phba->phwi_ctrlr;
6733b39a 3148 phwi_context = phwi_ctrlr->phwi_ctxt;
bfead3b2
JK
3149 phwi_context->max_eqd = 0;
3150 phwi_context->min_eqd = 0;
3151 phwi_context->cur_eqd = 64;
6733b39a 3152 be_cmd_fw_initialize(&phba->ctrl);
bfead3b2
JK
3153
3154 status = beiscsi_create_eqs(phba, phwi_context);
6733b39a 3155 if (status != 0) {
457ff3b7 3156 shost_printk(KERN_ERR, phba->shost, "EQ not created\n");
6733b39a
JK
3157 goto error;
3158 }
3159
bfead3b2
JK
3160 status = be_mcc_queues_create(phba, phwi_context);
3161 if (status != 0)
3162 goto error;
3163
3164 status = mgmt_check_supported_fw(ctrl, phba);
6733b39a
JK
3165 if (status != 0) {
3166 shost_printk(KERN_ERR, phba->shost,
457ff3b7 3167 "Unsupported fw version\n");
6733b39a
JK
3168 goto error;
3169 }
3170
bfead3b2 3171 status = beiscsi_create_cqs(phba, phwi_context);
6733b39a
JK
3172 if (status != 0) {
3173 shost_printk(KERN_ERR, phba->shost, "CQ not created\n");
3174 goto error;
3175 }
3176
3177 status = beiscsi_create_def_hdr(phba, phwi_context, phwi_ctrlr,
3178 def_pdu_ring_sz);
3179 if (status != 0) {
3180 shost_printk(KERN_ERR, phba->shost,
3181 "Default Header not created\n");
3182 goto error;
3183 }
3184
3185 status = beiscsi_create_def_data(phba, phwi_context,
3186 phwi_ctrlr, def_pdu_ring_sz);
3187 if (status != 0) {
3188 shost_printk(KERN_ERR, phba->shost,
3189 "Default Data not created\n");
3190 goto error;
3191 }
3192
3193 status = beiscsi_post_pages(phba);
3194 if (status != 0) {
3195 shost_printk(KERN_ERR, phba->shost, "Post SGL Pages Failed\n");
3196 goto error;
3197 }
3198
3199 status = beiscsi_create_wrb_rings(phba, phwi_context, phwi_ctrlr);
3200 if (status != 0) {
3201 shost_printk(KERN_ERR, phba->shost,
3202 "WRB Rings not created\n");
3203 goto error;
3204 }
3205
3206 SE_DEBUG(DBG_LVL_8, "hwi_init_port success\n");
3207 return 0;
3208
3209error:
3210 shost_printk(KERN_ERR, phba->shost, "hwi_init_port failed");
3211 hwi_cleanup(phba);
3212 return -ENOMEM;
3213}
3214
6733b39a
JK
3215static int hwi_init_controller(struct beiscsi_hba *phba)
3216{
3217 struct hwi_controller *phwi_ctrlr;
3218
3219 phwi_ctrlr = phba->phwi_ctrlr;
3220 if (1 == phba->init_mem[HWI_MEM_ADDN_CONTEXT].num_elements) {
3221 phwi_ctrlr->phwi_ctxt = (struct hwi_context_memory *)phba->
3222 init_mem[HWI_MEM_ADDN_CONTEXT].mem_array[0].virtual_address;
457ff3b7 3223 SE_DEBUG(DBG_LVL_8, " phwi_ctrlr->phwi_ctxt=%p\n",
6733b39a
JK
3224 phwi_ctrlr->phwi_ctxt);
3225 } else {
3226 shost_printk(KERN_ERR, phba->shost,
3227 "HWI_MEM_ADDN_CONTEXT is more than one element."
3228 "Failing to load\n");
3229 return -ENOMEM;
3230 }
3231
3232 iscsi_init_global_templates(phba);
3ec78271
JK
3233 if (beiscsi_init_wrb_handle(phba))
3234 return -ENOMEM;
3235
6733b39a
JK
3236 hwi_init_async_pdu_ctx(phba);
3237 if (hwi_init_port(phba) != 0) {
3238 shost_printk(KERN_ERR, phba->shost,
3239 "hwi_init_controller failed\n");
3240 return -ENOMEM;
3241 }
3242 return 0;
3243}
3244
3245static void beiscsi_free_mem(struct beiscsi_hba *phba)
3246{
3247 struct be_mem_descriptor *mem_descr;
3248 int i, j;
3249
3250 mem_descr = phba->init_mem;
3251 i = 0;
3252 j = 0;
3253 for (i = 0; i < SE_MEM_MAX; i++) {
3254 for (j = mem_descr->num_elements; j > 0; j--) {
3255 pci_free_consistent(phba->pcidev,
3256 mem_descr->mem_array[j - 1].size,
3257 mem_descr->mem_array[j - 1].virtual_address,
457ff3b7
JK
3258 (unsigned long)mem_descr->mem_array[j - 1].
3259 bus_address.u.a64.address);
6733b39a
JK
3260 }
3261 kfree(mem_descr->mem_array);
3262 mem_descr++;
3263 }
3264 kfree(phba->init_mem);
3265 kfree(phba->phwi_ctrlr);
3266}
3267
3268static int beiscsi_init_controller(struct beiscsi_hba *phba)
3269{
3270 int ret = -ENOMEM;
3271
3272 ret = beiscsi_get_memory(phba);
3273 if (ret < 0) {
3274 shost_printk(KERN_ERR, phba->shost, "beiscsi_dev_probe -"
457ff3b7 3275 "Failed in beiscsi_alloc_memory\n");
6733b39a
JK
3276 return ret;
3277 }
3278
3279 ret = hwi_init_controller(phba);
3280 if (ret)
3281 goto free_init;
3282 SE_DEBUG(DBG_LVL_8, "Return success from beiscsi_init_controller");
3283 return 0;
3284
3285free_init:
3286 beiscsi_free_mem(phba);
3287 return -ENOMEM;
3288}
3289
3290static int beiscsi_init_sgl_handle(struct beiscsi_hba *phba)
3291{
3292 struct be_mem_descriptor *mem_descr_sglh, *mem_descr_sg;
3293 struct sgl_handle *psgl_handle;
3294 struct iscsi_sge *pfrag;
3295 unsigned int arr_index, i, idx;
3296
3297 phba->io_sgl_hndl_avbl = 0;
3298 phba->eh_sgl_hndl_avbl = 0;
bfead3b2 3299
6733b39a
JK
3300 mem_descr_sglh = phba->init_mem;
3301 mem_descr_sglh += HWI_MEM_SGLH;
3302 if (1 == mem_descr_sglh->num_elements) {
3303 phba->io_sgl_hndl_base = kzalloc(sizeof(struct sgl_handle *) *
3304 phba->params.ios_per_ctrl,
3305 GFP_KERNEL);
3306 if (!phba->io_sgl_hndl_base) {
3307 shost_printk(KERN_ERR, phba->shost,
3308 "Mem Alloc Failed. Failing to load\n");
3309 return -ENOMEM;
3310 }
3311 phba->eh_sgl_hndl_base = kzalloc(sizeof(struct sgl_handle *) *
3312 (phba->params.icds_per_ctrl -
3313 phba->params.ios_per_ctrl),
3314 GFP_KERNEL);
3315 if (!phba->eh_sgl_hndl_base) {
3316 kfree(phba->io_sgl_hndl_base);
3317 shost_printk(KERN_ERR, phba->shost,
3318 "Mem Alloc Failed. Failing to load\n");
3319 return -ENOMEM;
3320 }
3321 } else {
3322 shost_printk(KERN_ERR, phba->shost,
3323 "HWI_MEM_SGLH is more than one element."
3324 "Failing to load\n");
3325 return -ENOMEM;
3326 }
3327
3328 arr_index = 0;
3329 idx = 0;
3330 while (idx < mem_descr_sglh->num_elements) {
3331 psgl_handle = mem_descr_sglh->mem_array[idx].virtual_address;
3332
3333 for (i = 0; i < (mem_descr_sglh->mem_array[idx].size /
3334 sizeof(struct sgl_handle)); i++) {
3335 if (arr_index < phba->params.ios_per_ctrl) {
3336 phba->io_sgl_hndl_base[arr_index] = psgl_handle;
3337 phba->io_sgl_hndl_avbl++;
3338 arr_index++;
3339 } else {
3340 phba->eh_sgl_hndl_base[arr_index -
3341 phba->params.ios_per_ctrl] =
3342 psgl_handle;
3343 arr_index++;
3344 phba->eh_sgl_hndl_avbl++;
3345 }
3346 psgl_handle++;
3347 }
3348 idx++;
3349 }
3350 SE_DEBUG(DBG_LVL_8,
3351 "phba->io_sgl_hndl_avbl=%d"
457ff3b7 3352 "phba->eh_sgl_hndl_avbl=%d\n",
6733b39a
JK
3353 phba->io_sgl_hndl_avbl,
3354 phba->eh_sgl_hndl_avbl);
3355 mem_descr_sg = phba->init_mem;
3356 mem_descr_sg += HWI_MEM_SGE;
457ff3b7 3357 SE_DEBUG(DBG_LVL_8, "\n mem_descr_sg->num_elements=%d\n",
6733b39a
JK
3358 mem_descr_sg->num_elements);
3359 arr_index = 0;
3360 idx = 0;
3361 while (idx < mem_descr_sg->num_elements) {
3362 pfrag = mem_descr_sg->mem_array[idx].virtual_address;
3363
3364 for (i = 0;
3365 i < (mem_descr_sg->mem_array[idx].size) /
3366 (sizeof(struct iscsi_sge) * phba->params.num_sge_per_io);
3367 i++) {
3368 if (arr_index < phba->params.ios_per_ctrl)
3369 psgl_handle = phba->io_sgl_hndl_base[arr_index];
3370 else
3371 psgl_handle = phba->eh_sgl_hndl_base[arr_index -
3372 phba->params.ios_per_ctrl];
3373 psgl_handle->pfrag = pfrag;
3374 AMAP_SET_BITS(struct amap_iscsi_sge, addr_hi, pfrag, 0);
3375 AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, pfrag, 0);
3376 pfrag += phba->params.num_sge_per_io;
3377 psgl_handle->sgl_index =
7da50879 3378 phba->fw_config.iscsi_icd_start + arr_index++;
6733b39a
JK
3379 }
3380 idx++;
3381 }
3382 phba->io_sgl_free_index = 0;
3383 phba->io_sgl_alloc_index = 0;
3384 phba->eh_sgl_free_index = 0;
3385 phba->eh_sgl_alloc_index = 0;
3386 return 0;
3387}
3388
3389static int hba_setup_cid_tbls(struct beiscsi_hba *phba)
3390{
3391 int i, new_cid;
3392
c2462288 3393 phba->cid_array = kzalloc(sizeof(void *) * phba->params.cxns_per_ctrl,
6733b39a
JK
3394 GFP_KERNEL);
3395 if (!phba->cid_array) {
3396 shost_printk(KERN_ERR, phba->shost,
3397 "Failed to allocate memory in "
3398 "hba_setup_cid_tbls\n");
3399 return -ENOMEM;
3400 }
c2462288 3401 phba->ep_array = kzalloc(sizeof(struct iscsi_endpoint *) *
6733b39a
JK
3402 phba->params.cxns_per_ctrl * 2, GFP_KERNEL);
3403 if (!phba->ep_array) {
3404 shost_printk(KERN_ERR, phba->shost,
3405 "Failed to allocate memory in "
457ff3b7 3406 "hba_setup_cid_tbls\n");
6733b39a
JK
3407 kfree(phba->cid_array);
3408 return -ENOMEM;
3409 }
7da50879 3410 new_cid = phba->fw_config.iscsi_cid_start;
6733b39a
JK
3411 for (i = 0; i < phba->params.cxns_per_ctrl; i++) {
3412 phba->cid_array[i] = new_cid;
3413 new_cid += 2;
3414 }
3415 phba->avlbl_cids = phba->params.cxns_per_ctrl;
3416 return 0;
3417}
3418
238f6b72 3419static void hwi_enable_intr(struct beiscsi_hba *phba)
6733b39a
JK
3420{
3421 struct be_ctrl_info *ctrl = &phba->ctrl;
3422 struct hwi_controller *phwi_ctrlr;
3423 struct hwi_context_memory *phwi_context;
3424 struct be_queue_info *eq;
3425 u8 __iomem *addr;
bfead3b2 3426 u32 reg, i;
6733b39a
JK
3427 u32 enabled;
3428
3429 phwi_ctrlr = phba->phwi_ctrlr;
3430 phwi_context = phwi_ctrlr->phwi_ctxt;
3431
6733b39a
JK
3432 addr = (u8 __iomem *) ((u8 __iomem *) ctrl->pcicfg +
3433 PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET);
3434 reg = ioread32(addr);
6733b39a
JK
3435
3436 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
3437 if (!enabled) {
3438 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
457ff3b7 3439 SE_DEBUG(DBG_LVL_8, "reg =x%08x addr=%p\n", reg, addr);
6733b39a 3440 iowrite32(reg, addr);
665d6d94
JK
3441 }
3442
3443 if (!phba->msix_enabled) {
3444 eq = &phwi_context->be_eq[0].q;
3445 SE_DEBUG(DBG_LVL_8, "eq->id=%d\n", eq->id);
3446 hwi_ring_eq_db(phba, eq->id, 0, 0, 1, 1);
3447 } else {
3448 for (i = 0; i <= phba->num_cpus; i++) {
3449 eq = &phwi_context->be_eq[i].q;
457ff3b7 3450 SE_DEBUG(DBG_LVL_8, "eq->id=%d\n", eq->id);
bfead3b2
JK
3451 hwi_ring_eq_db(phba, eq->id, 0, 0, 1, 1);
3452 }
c03af1ae 3453 }
6733b39a
JK
3454}
3455
3456static void hwi_disable_intr(struct beiscsi_hba *phba)
3457{
3458 struct be_ctrl_info *ctrl = &phba->ctrl;
3459
3460 u8 __iomem *addr = ctrl->pcicfg + PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET;
3461 u32 reg = ioread32(addr);
3462
3463 u32 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
3464 if (enabled) {
3465 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
3466 iowrite32(reg, addr);
3467 } else
3468 shost_printk(KERN_WARNING, phba->shost,
457ff3b7 3469 "In hwi_disable_intr, Already Disabled\n");
6733b39a
JK
3470}
3471
c7acc5b8
JK
3472static int beiscsi_get_boot_info(struct beiscsi_hba *phba)
3473{
3474 struct be_cmd_resp_get_boot_target *boot_resp;
3475 struct be_cmd_resp_get_session *session_resp;
3476 struct be_mcc_wrb *wrb;
3477 struct be_dma_mem nonemb_cmd;
3478 unsigned int tag, wrb_num;
3479 unsigned short status, extd_status;
3480 struct be_queue_info *mccq = &phba->ctrl.mcc_obj.q;
f457a46f 3481 int ret = -ENOMEM;
c7acc5b8
JK
3482
3483 tag = beiscsi_get_boot_target(phba);
3484 if (!tag) {
3485 SE_DEBUG(DBG_LVL_1, "be_cmd_get_mac_addr Failed\n");
3486 return -EAGAIN;
3487 } else
3488 wait_event_interruptible(phba->ctrl.mcc_wait[tag],
3489 phba->ctrl.mcc_numtag[tag]);
3490
3491 wrb_num = (phba->ctrl.mcc_numtag[tag] & 0x00FF0000) >> 16;
3492 extd_status = (phba->ctrl.mcc_numtag[tag] & 0x0000FF00) >> 8;
3493 status = phba->ctrl.mcc_numtag[tag] & 0x000000FF;
3494 if (status || extd_status) {
3495 SE_DEBUG(DBG_LVL_1, "be_cmd_get_mac_addr Failed"
3496 " status = %d extd_status = %d\n",
3497 status, extd_status);
3498 free_mcc_tag(&phba->ctrl, tag);
3499 return -EBUSY;
3500 }
3501 wrb = queue_get_wrb(mccq, wrb_num);
3502 free_mcc_tag(&phba->ctrl, tag);
3503 boot_resp = embedded_payload(wrb);
3504
3505 if (boot_resp->boot_session_handle < 0) {
f457a46f 3506 shost_printk(KERN_INFO, phba->shost, "No Boot Session.\n");
c7acc5b8
JK
3507 return -ENXIO;
3508 }
3509
3510 nonemb_cmd.va = pci_alloc_consistent(phba->ctrl.pdev,
3511 sizeof(*session_resp),
3512 &nonemb_cmd.dma);
3513 if (nonemb_cmd.va == NULL) {
3514 SE_DEBUG(DBG_LVL_1,
3515 "Failed to allocate memory for"
3516 "beiscsi_get_session_info\n");
3517 return -ENOMEM;
3518 }
3519
3520 memset(nonemb_cmd.va, 0, sizeof(*session_resp));
3521 tag = beiscsi_get_session_info(phba,
3522 boot_resp->boot_session_handle, &nonemb_cmd);
3523 if (!tag) {
3524 SE_DEBUG(DBG_LVL_1, "beiscsi_get_session_info"
3525 " Failed\n");
3526 goto boot_freemem;
3527 } else
3528 wait_event_interruptible(phba->ctrl.mcc_wait[tag],
3529 phba->ctrl.mcc_numtag[tag]);
3530
3531 wrb_num = (phba->ctrl.mcc_numtag[tag] & 0x00FF0000) >> 16;
3532 extd_status = (phba->ctrl.mcc_numtag[tag] & 0x0000FF00) >> 8;
3533 status = phba->ctrl.mcc_numtag[tag] & 0x000000FF;
3534 if (status || extd_status) {
3535 SE_DEBUG(DBG_LVL_1, "beiscsi_get_session_info Failed"
3536 " status = %d extd_status = %d\n",
3537 status, extd_status);
3538 free_mcc_tag(&phba->ctrl, tag);
3539 goto boot_freemem;
3540 }
3541 wrb = queue_get_wrb(mccq, wrb_num);
3542 free_mcc_tag(&phba->ctrl, tag);
3543 session_resp = nonemb_cmd.va ;
f457a46f 3544
c7acc5b8
JK
3545 memcpy(&phba->boot_sess, &session_resp->session_info,
3546 sizeof(struct mgmt_session_info));
f457a46f
MC
3547 ret = 0;
3548
c7acc5b8
JK
3549boot_freemem:
3550 pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size,
3551 nonemb_cmd.va, nonemb_cmd.dma);
f457a46f
MC
3552 return ret;
3553}
3554
3555static void beiscsi_boot_release(void *data)
3556{
3557 struct beiscsi_hba *phba = data;
3558
3559 scsi_host_put(phba->shost);
3560}
3561
3562static int beiscsi_setup_boot_info(struct beiscsi_hba *phba)
3563{
3564 struct iscsi_boot_kobj *boot_kobj;
3565
3566 /* get boot info using mgmt cmd */
3567 if (beiscsi_get_boot_info(phba))
3568 /* Try to see if we can carry on without this */
3569 return 0;
3570
3571 phba->boot_kset = iscsi_boot_create_host_kset(phba->shost->host_no);
3572 if (!phba->boot_kset)
3573 return -ENOMEM;
3574
3575 /* get a ref because the show function will ref the phba */
3576 if (!scsi_host_get(phba->shost))
3577 goto free_kset;
3578 boot_kobj = iscsi_boot_create_target(phba->boot_kset, 0, phba,
3579 beiscsi_show_boot_tgt_info,
3580 beiscsi_tgt_get_attr_visibility,
3581 beiscsi_boot_release);
3582 if (!boot_kobj)
3583 goto put_shost;
3584
3585 if (!scsi_host_get(phba->shost))
3586 goto free_kset;
3587 boot_kobj = iscsi_boot_create_initiator(phba->boot_kset, 0, phba,
3588 beiscsi_show_boot_ini_info,
3589 beiscsi_ini_get_attr_visibility,
3590 beiscsi_boot_release);
3591 if (!boot_kobj)
3592 goto put_shost;
3593
3594 if (!scsi_host_get(phba->shost))
3595 goto free_kset;
3596 boot_kobj = iscsi_boot_create_ethernet(phba->boot_kset, 0, phba,
3597 beiscsi_show_boot_eth_info,
3598 beiscsi_eth_get_attr_visibility,
3599 beiscsi_boot_release);
3600 if (!boot_kobj)
3601 goto put_shost;
3602 return 0;
3603
3604put_shost:
3605 scsi_host_put(phba->shost);
3606free_kset:
3607 iscsi_boot_destroy_kset(phba->boot_kset);
c7acc5b8
JK
3608 return -ENOMEM;
3609}
3610
6733b39a
JK
3611static int beiscsi_init_port(struct beiscsi_hba *phba)
3612{
3613 int ret;
3614
3615 ret = beiscsi_init_controller(phba);
3616 if (ret < 0) {
3617 shost_printk(KERN_ERR, phba->shost,
3618 "beiscsi_dev_probe - Failed in"
457ff3b7 3619 "beiscsi_init_controller\n");
6733b39a
JK
3620 return ret;
3621 }
3622 ret = beiscsi_init_sgl_handle(phba);
3623 if (ret < 0) {
3624 shost_printk(KERN_ERR, phba->shost,
3625 "beiscsi_dev_probe - Failed in"
457ff3b7 3626 "beiscsi_init_sgl_handle\n");
6733b39a
JK
3627 goto do_cleanup_ctrlr;
3628 }
3629
3630 if (hba_setup_cid_tbls(phba)) {
3631 shost_printk(KERN_ERR, phba->shost,
3632 "Failed in hba_setup_cid_tbls\n");
3633 kfree(phba->io_sgl_hndl_base);
3634 kfree(phba->eh_sgl_hndl_base);
3635 goto do_cleanup_ctrlr;
3636 }
3637
3638 return ret;
3639
3640do_cleanup_ctrlr:
3641 hwi_cleanup(phba);
3642 return ret;
3643}
3644
3645static void hwi_purge_eq(struct beiscsi_hba *phba)
3646{
3647 struct hwi_controller *phwi_ctrlr;
3648 struct hwi_context_memory *phwi_context;
3649 struct be_queue_info *eq;
3650 struct be_eq_entry *eqe = NULL;
bfead3b2 3651 int i, eq_msix;
756d29c8 3652 unsigned int num_processed;
6733b39a
JK
3653
3654 phwi_ctrlr = phba->phwi_ctrlr;
3655 phwi_context = phwi_ctrlr->phwi_ctxt;
bfead3b2
JK
3656 if (phba->msix_enabled)
3657 eq_msix = 1;
3658 else
3659 eq_msix = 0;
6733b39a 3660
bfead3b2
JK
3661 for (i = 0; i < (phba->num_cpus + eq_msix); i++) {
3662 eq = &phwi_context->be_eq[i].q;
6733b39a 3663 eqe = queue_tail_node(eq);
756d29c8 3664 num_processed = 0;
bfead3b2
JK
3665 while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32]
3666 & EQE_VALID_MASK) {
3667 AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0);
3668 queue_tail_inc(eq);
3669 eqe = queue_tail_node(eq);
756d29c8 3670 num_processed++;
bfead3b2 3671 }
756d29c8
JK
3672
3673 if (num_processed)
3674 hwi_ring_eq_db(phba, eq->id, 1, num_processed, 1, 1);
6733b39a
JK
3675 }
3676}
3677
3678static void beiscsi_clean_port(struct beiscsi_hba *phba)
3679{
03a12310 3680 int mgmt_status;
6733b39a
JK
3681
3682 mgmt_status = mgmt_epfw_cleanup(phba, CMD_CONNECTION_CHUTE_0);
3683 if (mgmt_status)
3684 shost_printk(KERN_WARNING, phba->shost,
457ff3b7 3685 "mgmt_epfw_cleanup FAILED\n");
756d29c8 3686
6733b39a 3687 hwi_purge_eq(phba);
756d29c8 3688 hwi_cleanup(phba);
6733b39a
JK
3689 kfree(phba->io_sgl_hndl_base);
3690 kfree(phba->eh_sgl_hndl_base);
3691 kfree(phba->cid_array);
3692 kfree(phba->ep_array);
3693}
3694
1282ab76
MC
3695static void beiscsi_cleanup_task(struct iscsi_task *task)
3696{
3697 struct beiscsi_io_task *io_task = task->dd_data;
3698 struct iscsi_conn *conn = task->conn;
3699 struct beiscsi_conn *beiscsi_conn = conn->dd_data;
3700 struct beiscsi_hba *phba = beiscsi_conn->phba;
3701 struct beiscsi_session *beiscsi_sess = beiscsi_conn->beiscsi_sess;
3702 struct hwi_wrb_context *pwrb_context;
3703 struct hwi_controller *phwi_ctrlr;
3704
3705 phwi_ctrlr = phba->phwi_ctrlr;
3706 pwrb_context = &phwi_ctrlr->wrb_context[beiscsi_conn->beiscsi_conn_cid
3707 - phba->fw_config.iscsi_cid_start];
3708
3709 if (io_task->cmd_bhs) {
3710 pci_pool_free(beiscsi_sess->bhs_pool, io_task->cmd_bhs,
3711 io_task->bhs_pa.u.a64.address);
3712 io_task->cmd_bhs = NULL;
3713 }
3714
3715 if (task->sc) {
3716 if (io_task->pwrb_handle) {
3717 free_wrb_handle(phba, pwrb_context,
3718 io_task->pwrb_handle);
3719 io_task->pwrb_handle = NULL;
3720 }
3721
3722 if (io_task->psgl_handle) {
3723 spin_lock(&phba->io_sgl_lock);
3724 free_io_sgl_handle(phba, io_task->psgl_handle);
3725 spin_unlock(&phba->io_sgl_lock);
3726 io_task->psgl_handle = NULL;
3727 }
3728 } else {
3729 if (!beiscsi_conn->login_in_progress) {
3730 if (io_task->pwrb_handle) {
3731 free_wrb_handle(phba, pwrb_context,
3732 io_task->pwrb_handle);
3733 io_task->pwrb_handle = NULL;
3734 }
3735 if (io_task->psgl_handle) {
3736 spin_lock(&phba->mgmt_sgl_lock);
3737 free_mgmt_sgl_handle(phba,
3738 io_task->psgl_handle);
3739 spin_unlock(&phba->mgmt_sgl_lock);
3740 io_task->psgl_handle = NULL;
3741 }
3742 }
3743 }
3744}
3745
6733b39a
JK
3746void
3747beiscsi_offload_connection(struct beiscsi_conn *beiscsi_conn,
3748 struct beiscsi_offload_params *params)
3749{
3750 struct wrb_handle *pwrb_handle;
3751 struct iscsi_target_context_update_wrb *pwrb = NULL;
3752 struct be_mem_descriptor *mem_descr;
3753 struct beiscsi_hba *phba = beiscsi_conn->phba;
1282ab76
MC
3754 struct iscsi_task *task = beiscsi_conn->task;
3755 struct iscsi_session *session = task->conn->session;
6733b39a
JK
3756 u32 doorbell = 0;
3757
3758 /*
3759 * We can always use 0 here because it is reserved by libiscsi for
3760 * login/startup related tasks.
3761 */
1282ab76
MC
3762 beiscsi_conn->login_in_progress = 0;
3763 spin_lock_bh(&session->lock);
3764 beiscsi_cleanup_task(task);
3765 spin_unlock_bh(&session->lock);
3766
7da50879 3767 pwrb_handle = alloc_wrb_handle(phba, (beiscsi_conn->beiscsi_conn_cid -
d5431488 3768 phba->fw_config.iscsi_cid_start));
6733b39a
JK
3769 pwrb = (struct iscsi_target_context_update_wrb *)pwrb_handle->pwrb;
3770 memset(pwrb, 0, sizeof(*pwrb));
3771 AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb,
3772 max_burst_length, pwrb, params->dw[offsetof
3773 (struct amap_beiscsi_offload_params,
3774 max_burst_length) / 32]);
3775 AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb,
3776 max_send_data_segment_length, pwrb,
3777 params->dw[offsetof(struct amap_beiscsi_offload_params,
3778 max_send_data_segment_length) / 32]);
3779 AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb,
3780 first_burst_length,
3781 pwrb,
3782 params->dw[offsetof(struct amap_beiscsi_offload_params,
3783 first_burst_length) / 32]);
3784
3785 AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, erl, pwrb,
3786 (params->dw[offsetof(struct amap_beiscsi_offload_params,
3787 erl) / 32] & OFFLD_PARAMS_ERL));
3788 AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, dde, pwrb,
3789 (params->dw[offsetof(struct amap_beiscsi_offload_params,
3790 dde) / 32] & OFFLD_PARAMS_DDE) >> 2);
3791 AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, hde, pwrb,
3792 (params->dw[offsetof(struct amap_beiscsi_offload_params,
3793 hde) / 32] & OFFLD_PARAMS_HDE) >> 3);
3794 AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, ir2t, pwrb,
3795 (params->dw[offsetof(struct amap_beiscsi_offload_params,
3796 ir2t) / 32] & OFFLD_PARAMS_IR2T) >> 4);
3797 AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, imd, pwrb,
3798 (params->dw[offsetof(struct amap_beiscsi_offload_params,
3799 imd) / 32] & OFFLD_PARAMS_IMD) >> 5);
3800 AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, stat_sn,
3801 pwrb,
3802 (params->dw[offsetof(struct amap_beiscsi_offload_params,
3803 exp_statsn) / 32] + 1));
3804 AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, type, pwrb,
3805 0x7);
3806 AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, wrb_idx,
3807 pwrb, pwrb_handle->wrb_index);
3808 AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, ptr2nextwrb,
3809 pwrb, pwrb_handle->nxt_wrb_index);
3810 AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb,
3811 session_state, pwrb, 0);
3812 AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, compltonack,
3813 pwrb, 1);
3814 AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, notpredblq,
3815 pwrb, 0);
3816 AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, mode, pwrb,
3817 0);
3818
3819 mem_descr = phba->init_mem;
3820 mem_descr += ISCSI_MEM_GLOBAL_HEADER;
3821
3822 AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb,
3823 pad_buffer_addr_hi, pwrb,
3824 mem_descr->mem_array[0].bus_address.u.a32.address_hi);
3825 AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb,
3826 pad_buffer_addr_lo, pwrb,
3827 mem_descr->mem_array[0].bus_address.u.a32.address_lo);
3828
3829 be_dws_le_to_cpu(pwrb, sizeof(struct iscsi_target_context_update_wrb));
3830
3831 doorbell |= beiscsi_conn->beiscsi_conn_cid & DB_WRB_POST_CID_MASK;
32951dd8 3832 doorbell |= (pwrb_handle->wrb_index & DB_DEF_PDU_WRB_INDEX_MASK)
bfead3b2 3833 << DB_DEF_PDU_WRB_INDEX_SHIFT;
6733b39a
JK
3834 doorbell |= 1 << DB_DEF_PDU_NUM_POSTED_SHIFT;
3835
3836 iowrite32(doorbell, phba->db_va + DB_TXULP0_OFFSET);
3837}
3838
3839static void beiscsi_parse_pdu(struct iscsi_conn *conn, itt_t itt,
3840 int *index, int *age)
3841{
bfead3b2 3842 *index = (int)itt;
6733b39a
JK
3843 if (age)
3844 *age = conn->session->age;
3845}
3846
3847/**
3848 * beiscsi_alloc_pdu - allocates pdu and related resources
3849 * @task: libiscsi task
3850 * @opcode: opcode of pdu for task
3851 *
3852 * This is called with the session lock held. It will allocate
3853 * the wrb and sgl if needed for the command. And it will prep
3854 * the pdu's itt. beiscsi_parse_pdu will later translate
3855 * the pdu itt to the libiscsi task itt.
3856 */
3857static int beiscsi_alloc_pdu(struct iscsi_task *task, uint8_t opcode)
3858{
3859 struct beiscsi_io_task *io_task = task->dd_data;
3860 struct iscsi_conn *conn = task->conn;
3861 struct beiscsi_conn *beiscsi_conn = conn->dd_data;
3862 struct beiscsi_hba *phba = beiscsi_conn->phba;
3863 struct hwi_wrb_context *pwrb_context;
3864 struct hwi_controller *phwi_ctrlr;
3865 itt_t itt;
2afc95bf
JK
3866 struct beiscsi_session *beiscsi_sess = beiscsi_conn->beiscsi_sess;
3867 dma_addr_t paddr;
6733b39a 3868
2afc95bf 3869 io_task->cmd_bhs = pci_pool_alloc(beiscsi_sess->bhs_pool,
bc7accec 3870 GFP_ATOMIC, &paddr);
2afc95bf
JK
3871 if (!io_task->cmd_bhs)
3872 return -ENOMEM;
2afc95bf 3873 io_task->bhs_pa.u.a64.address = paddr;
bfead3b2 3874 io_task->libiscsi_itt = (itt_t)task->itt;
6733b39a
JK
3875 io_task->conn = beiscsi_conn;
3876
3877 task->hdr = (struct iscsi_hdr *)&io_task->cmd_bhs->iscsi_hdr;
3878 task->hdr_max = sizeof(struct be_cmd_bhs);
d2cecf0d 3879 io_task->psgl_handle = NULL;
3ec78271 3880 io_task->pwrb_handle = NULL;
6733b39a
JK
3881
3882 if (task->sc) {
3883 spin_lock(&phba->io_sgl_lock);
3884 io_task->psgl_handle = alloc_io_sgl_handle(phba);
3885 spin_unlock(&phba->io_sgl_lock);
2afc95bf
JK
3886 if (!io_task->psgl_handle)
3887 goto free_hndls;
d2cecf0d
JK
3888 io_task->pwrb_handle = alloc_wrb_handle(phba,
3889 beiscsi_conn->beiscsi_conn_cid -
3890 phba->fw_config.iscsi_cid_start);
3891 if (!io_task->pwrb_handle)
3892 goto free_io_hndls;
6733b39a
JK
3893 } else {
3894 io_task->scsi_cmnd = NULL;
d7aea67b 3895 if ((opcode & ISCSI_OPCODE_MASK) == ISCSI_OP_LOGIN) {
6733b39a
JK
3896 if (!beiscsi_conn->login_in_progress) {
3897 spin_lock(&phba->mgmt_sgl_lock);
3898 io_task->psgl_handle = (struct sgl_handle *)
3899 alloc_mgmt_sgl_handle(phba);
3900 spin_unlock(&phba->mgmt_sgl_lock);
2afc95bf
JK
3901 if (!io_task->psgl_handle)
3902 goto free_hndls;
3903
6733b39a
JK
3904 beiscsi_conn->login_in_progress = 1;
3905 beiscsi_conn->plogin_sgl_handle =
3906 io_task->psgl_handle;
d2cecf0d
JK
3907 io_task->pwrb_handle =
3908 alloc_wrb_handle(phba,
3909 beiscsi_conn->beiscsi_conn_cid -
3910 phba->fw_config.iscsi_cid_start);
3911 if (!io_task->pwrb_handle)
3912 goto free_io_hndls;
3913 beiscsi_conn->plogin_wrb_handle =
3914 io_task->pwrb_handle;
3915
6733b39a
JK
3916 } else {
3917 io_task->psgl_handle =
3918 beiscsi_conn->plogin_sgl_handle;
d2cecf0d
JK
3919 io_task->pwrb_handle =
3920 beiscsi_conn->plogin_wrb_handle;
6733b39a 3921 }
1282ab76 3922 beiscsi_conn->task = task;
6733b39a
JK
3923 } else {
3924 spin_lock(&phba->mgmt_sgl_lock);
3925 io_task->psgl_handle = alloc_mgmt_sgl_handle(phba);
3926 spin_unlock(&phba->mgmt_sgl_lock);
2afc95bf
JK
3927 if (!io_task->psgl_handle)
3928 goto free_hndls;
d2cecf0d
JK
3929 io_task->pwrb_handle =
3930 alloc_wrb_handle(phba,
3931 beiscsi_conn->beiscsi_conn_cid -
3932 phba->fw_config.iscsi_cid_start);
3933 if (!io_task->pwrb_handle)
3934 goto free_mgmt_hndls;
3935
6733b39a
JK
3936 }
3937 }
bfead3b2
JK
3938 itt = (itt_t) cpu_to_be32(((unsigned int)io_task->pwrb_handle->
3939 wrb_index << 16) | (unsigned int)
3940 (io_task->psgl_handle->sgl_index));
32951dd8 3941 io_task->pwrb_handle->pio_handle = task;
bfead3b2 3942
6733b39a
JK
3943 io_task->cmd_bhs->iscsi_hdr.itt = itt;
3944 return 0;
2afc95bf 3945
d2cecf0d
JK
3946free_io_hndls:
3947 spin_lock(&phba->io_sgl_lock);
3948 free_io_sgl_handle(phba, io_task->psgl_handle);
3949 spin_unlock(&phba->io_sgl_lock);
3950 goto free_hndls;
3951free_mgmt_hndls:
3952 spin_lock(&phba->mgmt_sgl_lock);
3953 free_mgmt_sgl_handle(phba, io_task->psgl_handle);
3954 spin_unlock(&phba->mgmt_sgl_lock);
2afc95bf
JK
3955free_hndls:
3956 phwi_ctrlr = phba->phwi_ctrlr;
7da50879
JK
3957 pwrb_context = &phwi_ctrlr->wrb_context[
3958 beiscsi_conn->beiscsi_conn_cid -
3959 phba->fw_config.iscsi_cid_start];
d2cecf0d
JK
3960 if (io_task->pwrb_handle)
3961 free_wrb_handle(phba, pwrb_context, io_task->pwrb_handle);
2afc95bf
JK
3962 io_task->pwrb_handle = NULL;
3963 pci_pool_free(beiscsi_sess->bhs_pool, io_task->cmd_bhs,
3964 io_task->bhs_pa.u.a64.address);
1282ab76 3965 io_task->cmd_bhs = NULL;
457ff3b7 3966 SE_DEBUG(DBG_LVL_1, "Alloc of SGL_ICD Failed\n");
2afc95bf 3967 return -ENOMEM;
6733b39a
JK
3968}
3969
6733b39a
JK
3970static int beiscsi_iotask(struct iscsi_task *task, struct scatterlist *sg,
3971 unsigned int num_sg, unsigned int xferlen,
3972 unsigned int writedir)
3973{
3974
3975 struct beiscsi_io_task *io_task = task->dd_data;
3976 struct iscsi_conn *conn = task->conn;
3977 struct beiscsi_conn *beiscsi_conn = conn->dd_data;
3978 struct beiscsi_hba *phba = beiscsi_conn->phba;
3979 struct iscsi_wrb *pwrb = NULL;
3980 unsigned int doorbell = 0;
3981
3982 pwrb = io_task->pwrb_handle->pwrb;
6733b39a
JK
3983 io_task->cmd_bhs->iscsi_hdr.exp_statsn = 0;
3984 io_task->bhs_len = sizeof(struct be_cmd_bhs);
3985
3986 if (writedir) {
6733b39a
JK
3987 memset(&io_task->cmd_bhs->iscsi_data_pdu, 0, 48);
3988 AMAP_SET_BITS(struct amap_pdu_data_out, itt,
3989 &io_task->cmd_bhs->iscsi_data_pdu,
3990 (unsigned int)io_task->cmd_bhs->iscsi_hdr.itt);
3991 AMAP_SET_BITS(struct amap_pdu_data_out, opcode,
3992 &io_task->cmd_bhs->iscsi_data_pdu,
3993 ISCSI_OPCODE_SCSI_DATA_OUT);
3994 AMAP_SET_BITS(struct amap_pdu_data_out, final_bit,
3995 &io_task->cmd_bhs->iscsi_data_pdu, 1);
32951dd8
JK
3996 AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb,
3997 INI_WR_CMD);
6733b39a 3998 AMAP_SET_BITS(struct amap_iscsi_wrb, dsp, pwrb, 1);
6733b39a 3999 } else {
32951dd8
JK
4000 AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb,
4001 INI_RD_CMD);
6733b39a
JK
4002 AMAP_SET_BITS(struct amap_iscsi_wrb, dsp, pwrb, 0);
4003 }
4004 memcpy(&io_task->cmd_bhs->iscsi_data_pdu.
4005 dw[offsetof(struct amap_pdu_data_out, lun) / 32],
516f43a2 4006 &io_task->cmd_bhs->iscsi_hdr.lun, sizeof(struct scsi_lun));
6733b39a
JK
4007
4008 AMAP_SET_BITS(struct amap_iscsi_wrb, lun, pwrb,
dc63aac6
JK
4009 cpu_to_be16(*(unsigned short *)
4010 &io_task->cmd_bhs->iscsi_hdr.lun));
6733b39a
JK
4011 AMAP_SET_BITS(struct amap_iscsi_wrb, r2t_exp_dtl, pwrb, xferlen);
4012 AMAP_SET_BITS(struct amap_iscsi_wrb, wrb_idx, pwrb,
4013 io_task->pwrb_handle->wrb_index);
4014 AMAP_SET_BITS(struct amap_iscsi_wrb, cmdsn_itt, pwrb,
4015 be32_to_cpu(task->cmdsn));
4016 AMAP_SET_BITS(struct amap_iscsi_wrb, sgl_icd_idx, pwrb,
4017 io_task->psgl_handle->sgl_index);
4018
4019 hwi_write_sgl(pwrb, sg, num_sg, io_task);
4020
4021 AMAP_SET_BITS(struct amap_iscsi_wrb, ptr2nextwrb, pwrb,
4022 io_task->pwrb_handle->nxt_wrb_index);
4023 be_dws_le_to_cpu(pwrb, sizeof(struct iscsi_wrb));
4024
4025 doorbell |= beiscsi_conn->beiscsi_conn_cid & DB_WRB_POST_CID_MASK;
32951dd8 4026 doorbell |= (io_task->pwrb_handle->wrb_index &
6733b39a
JK
4027 DB_DEF_PDU_WRB_INDEX_MASK) << DB_DEF_PDU_WRB_INDEX_SHIFT;
4028 doorbell |= 1 << DB_DEF_PDU_NUM_POSTED_SHIFT;
4029
4030 iowrite32(doorbell, phba->db_va + DB_TXULP0_OFFSET);
4031 return 0;
4032}
4033
4034static int beiscsi_mtask(struct iscsi_task *task)
4035{
dafab8e0 4036 struct beiscsi_io_task *io_task = task->dd_data;
6733b39a
JK
4037 struct iscsi_conn *conn = task->conn;
4038 struct beiscsi_conn *beiscsi_conn = conn->dd_data;
4039 struct beiscsi_hba *phba = beiscsi_conn->phba;
4040 struct iscsi_wrb *pwrb = NULL;
4041 unsigned int doorbell = 0;
dafab8e0 4042 unsigned int cid;
6733b39a 4043
bfead3b2 4044 cid = beiscsi_conn->beiscsi_conn_cid;
6733b39a 4045 pwrb = io_task->pwrb_handle->pwrb;
caf818f1 4046 memset(pwrb, 0, sizeof(*pwrb));
6733b39a
JK
4047 AMAP_SET_BITS(struct amap_iscsi_wrb, cmdsn_itt, pwrb,
4048 be32_to_cpu(task->cmdsn));
4049 AMAP_SET_BITS(struct amap_iscsi_wrb, wrb_idx, pwrb,
4050 io_task->pwrb_handle->wrb_index);
4051 AMAP_SET_BITS(struct amap_iscsi_wrb, sgl_icd_idx, pwrb,
4052 io_task->psgl_handle->sgl_index);
dafab8e0 4053
6733b39a
JK
4054 switch (task->hdr->opcode & ISCSI_OPCODE_MASK) {
4055 case ISCSI_OP_LOGIN:
32951dd8
JK
4056 AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb,
4057 TGT_DM_CMD);
6733b39a
JK
4058 AMAP_SET_BITS(struct amap_iscsi_wrb, dmsg, pwrb, 0);
4059 AMAP_SET_BITS(struct amap_iscsi_wrb, cmdsn_itt, pwrb, 1);
4060 hwi_write_buffer(pwrb, task);
4061 break;
4062 case ISCSI_OP_NOOP_OUT:
1390b01b
JK
4063 if (task->hdr->ttt != ISCSI_RESERVED_TAG) {
4064 AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb,
4065 TGT_DM_CMD);
4066 AMAP_SET_BITS(struct amap_iscsi_wrb, cmdsn_itt,
4067 pwrb, 0);
685e16fd 4068 AMAP_SET_BITS(struct amap_iscsi_wrb, dmsg, pwrb, 1);
1390b01b
JK
4069 } else {
4070 AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb,
4071 INI_RD_CMD);
685e16fd 4072 AMAP_SET_BITS(struct amap_iscsi_wrb, dmsg, pwrb, 0);
1390b01b 4073 }
6733b39a
JK
4074 hwi_write_buffer(pwrb, task);
4075 break;
4076 case ISCSI_OP_TEXT:
32951dd8 4077 AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb,
b30c6dab 4078 TGT_DM_CMD);
0ecb0b45 4079 AMAP_SET_BITS(struct amap_iscsi_wrb, dmsg, pwrb, 0);
6733b39a
JK
4080 hwi_write_buffer(pwrb, task);
4081 break;
4082 case ISCSI_OP_SCSI_TMFUNC:
32951dd8
JK
4083 AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb,
4084 INI_TMF_CMD);
6733b39a
JK
4085 AMAP_SET_BITS(struct amap_iscsi_wrb, dmsg, pwrb, 0);
4086 hwi_write_buffer(pwrb, task);
4087 break;
4088 case ISCSI_OP_LOGOUT:
4089 AMAP_SET_BITS(struct amap_iscsi_wrb, dmsg, pwrb, 0);
4090 AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb,
dafab8e0 4091 HWH_TYPE_LOGOUT);
6733b39a
JK
4092 hwi_write_buffer(pwrb, task);
4093 break;
4094
4095 default:
457ff3b7 4096 SE_DEBUG(DBG_LVL_1, "opcode =%d Not supported\n",
6733b39a
JK
4097 task->hdr->opcode & ISCSI_OPCODE_MASK);
4098 return -EINVAL;
4099 }
4100
4101 AMAP_SET_BITS(struct amap_iscsi_wrb, r2t_exp_dtl, pwrb,
51a46250 4102 task->data_count);
6733b39a
JK
4103 AMAP_SET_BITS(struct amap_iscsi_wrb, ptr2nextwrb, pwrb,
4104 io_task->pwrb_handle->nxt_wrb_index);
4105 be_dws_le_to_cpu(pwrb, sizeof(struct iscsi_wrb));
4106
bfead3b2 4107 doorbell |= cid & DB_WRB_POST_CID_MASK;
32951dd8 4108 doorbell |= (io_task->pwrb_handle->wrb_index &
6733b39a
JK
4109 DB_DEF_PDU_WRB_INDEX_MASK) << DB_DEF_PDU_WRB_INDEX_SHIFT;
4110 doorbell |= 1 << DB_DEF_PDU_NUM_POSTED_SHIFT;
4111 iowrite32(doorbell, phba->db_va + DB_TXULP0_OFFSET);
4112 return 0;
4113}
4114
4115static int beiscsi_task_xmit(struct iscsi_task *task)
4116{
6733b39a
JK
4117 struct beiscsi_io_task *io_task = task->dd_data;
4118 struct scsi_cmnd *sc = task->sc;
6733b39a
JK
4119 struct scatterlist *sg;
4120 int num_sg;
4121 unsigned int writedir = 0, xferlen = 0;
4122
6733b39a
JK
4123 if (!sc)
4124 return beiscsi_mtask(task);
4125
4126 io_task->scsi_cmnd = sc;
4127 num_sg = scsi_dma_map(sc);
4128 if (num_sg < 0) {
4129 SE_DEBUG(DBG_LVL_1, " scsi_dma_map Failed\n")
4130 return num_sg;
4131 }
6733b39a
JK
4132 xferlen = scsi_bufflen(sc);
4133 sg = scsi_sglist(sc);
4134 if (sc->sc_data_direction == DMA_TO_DEVICE) {
4135 writedir = 1;
457ff3b7 4136 SE_DEBUG(DBG_LVL_4, "task->imm_count=0x%08x\n",
6733b39a
JK
4137 task->imm_count);
4138 } else
4139 writedir = 0;
4140 return beiscsi_iotask(task, sg, num_sg, xferlen, writedir);
4141}
4142
25602c97 4143static void beiscsi_quiesce(struct beiscsi_hba *phba)
6733b39a 4144{
bfead3b2
JK
4145 struct hwi_controller *phwi_ctrlr;
4146 struct hwi_context_memory *phwi_context;
4147 struct be_eq_obj *pbe_eq;
4148 unsigned int i, msix_vec;
e9b91193
JK
4149 u8 *real_offset = 0;
4150 u32 value = 0;
6733b39a 4151
bfead3b2
JK
4152 phwi_ctrlr = phba->phwi_ctrlr;
4153 phwi_context = phwi_ctrlr->phwi_ctxt;
6733b39a 4154 hwi_disable_intr(phba);
bfead3b2
JK
4155 if (phba->msix_enabled) {
4156 for (i = 0; i <= phba->num_cpus; i++) {
4157 msix_vec = phba->msix_entries[i].vector;
4158 free_irq(msix_vec, &phwi_context->be_eq[i]);
8fcfb210 4159 kfree(phba->msi_name[i]);
bfead3b2
JK
4160 }
4161 } else
4162 if (phba->pcidev->irq)
4163 free_irq(phba->pcidev->irq, phba);
4164 pci_disable_msix(phba->pcidev);
6733b39a
JK
4165 destroy_workqueue(phba->wq);
4166 if (blk_iopoll_enabled)
bfead3b2
JK
4167 for (i = 0; i < phba->num_cpus; i++) {
4168 pbe_eq = &phwi_context->be_eq[i];
4169 blk_iopoll_disable(&pbe_eq->iopoll);
4170 }
6733b39a
JK
4171
4172 beiscsi_clean_port(phba);
4173 beiscsi_free_mem(phba);
e9b91193
JK
4174 real_offset = (u8 *)phba->csr_va + MPU_EP_SEMAPHORE;
4175
4176 value = readl((void *)real_offset);
4177
4178 if (value & 0x00010000) {
4179 value &= 0xfffeffff;
4180 writel(value, (void *)real_offset);
4181 }
6733b39a
JK
4182 beiscsi_unmap_pci_function(phba);
4183 pci_free_consistent(phba->pcidev,
4184 phba->ctrl.mbox_mem_alloced.size,
4185 phba->ctrl.mbox_mem_alloced.va,
4186 phba->ctrl.mbox_mem_alloced.dma);
25602c97
JK
4187}
4188
4189static void beiscsi_remove(struct pci_dev *pcidev)
4190{
4191
4192 struct beiscsi_hba *phba = NULL;
4193
4194 phba = pci_get_drvdata(pcidev);
4195 if (!phba) {
4196 dev_err(&pcidev->dev, "beiscsi_remove called with no phba\n");
4197 return;
4198 }
4199
4200 beiscsi_quiesce(phba);
9d045163 4201 iscsi_boot_destroy_kset(phba->boot_kset);
6733b39a
JK
4202 iscsi_host_remove(phba->shost);
4203 pci_dev_put(phba->pcidev);
4204 iscsi_host_free(phba->shost);
8dce69ff 4205 pci_disable_device(pcidev);
6733b39a
JK
4206}
4207
25602c97
JK
4208static void beiscsi_shutdown(struct pci_dev *pcidev)
4209{
4210
4211 struct beiscsi_hba *phba = NULL;
4212
4213 phba = (struct beiscsi_hba *)pci_get_drvdata(pcidev);
4214 if (!phba) {
4215 dev_err(&pcidev->dev, "beiscsi_shutdown called with no phba\n");
4216 return;
4217 }
4218
4219 beiscsi_quiesce(phba);
8dce69ff 4220 pci_disable_device(pcidev);
25602c97
JK
4221}
4222
bfead3b2
JK
4223static void beiscsi_msix_enable(struct beiscsi_hba *phba)
4224{
4225 int i, status;
4226
4227 for (i = 0; i <= phba->num_cpus; i++)
4228 phba->msix_entries[i].entry = i;
4229
4230 status = pci_enable_msix(phba->pcidev, phba->msix_entries,
4231 (phba->num_cpus + 1));
4232 if (!status)
4233 phba->msix_enabled = true;
4234
4235 return;
4236}
4237
6733b39a
JK
4238static int __devinit beiscsi_dev_probe(struct pci_dev *pcidev,
4239 const struct pci_device_id *id)
4240{
4241 struct beiscsi_hba *phba = NULL;
bfead3b2
JK
4242 struct hwi_controller *phwi_ctrlr;
4243 struct hwi_context_memory *phwi_context;
4244 struct be_eq_obj *pbe_eq;
238f6b72 4245 int ret, num_cpus, i;
e9b91193
JK
4246 u8 *real_offset = 0;
4247 u32 value = 0;
6733b39a
JK
4248
4249 ret = beiscsi_enable_pci(pcidev);
4250 if (ret < 0) {
82284c09
DC
4251 dev_err(&pcidev->dev, "beiscsi_dev_probe-"
4252 " Failed to enable pci device\n");
6733b39a
JK
4253 return ret;
4254 }
4255
4256 phba = beiscsi_hba_alloc(pcidev);
4257 if (!phba) {
4258 dev_err(&pcidev->dev, "beiscsi_dev_probe-"
457ff3b7 4259 " Failed in beiscsi_hba_alloc\n");
6733b39a
JK
4260 goto disable_pci;
4261 }
4262
f98c96b0
JK
4263 switch (pcidev->device) {
4264 case BE_DEVICE_ID1:
4265 case OC_DEVICE_ID1:
4266 case OC_DEVICE_ID2:
4267 phba->generation = BE_GEN2;
4268 break;
4269 case BE_DEVICE_ID2:
4270 case OC_DEVICE_ID3:
4271 phba->generation = BE_GEN3;
4272 break;
4273 default:
4274 phba->generation = 0;
4275 }
4276
bfead3b2
JK
4277 if (enable_msix)
4278 num_cpus = find_num_cpus();
4279 else
4280 num_cpus = 1;
4281 phba->num_cpus = num_cpus;
457ff3b7 4282 SE_DEBUG(DBG_LVL_8, "num_cpus = %d\n", phba->num_cpus);
bfead3b2 4283
b547f2d6 4284 if (enable_msix) {
bfead3b2 4285 beiscsi_msix_enable(phba);
b547f2d6
JK
4286 if (!phba->msix_enabled)
4287 phba->num_cpus = 1;
4288 }
6733b39a
JK
4289 ret = be_ctrl_init(phba, pcidev);
4290 if (ret) {
4291 shost_printk(KERN_ERR, phba->shost, "beiscsi_dev_probe-"
4292 "Failed in be_ctrl_init\n");
4293 goto hba_free;
4294 }
4295
e9b91193
JK
4296 if (!num_hba) {
4297 real_offset = (u8 *)phba->csr_va + MPU_EP_SEMAPHORE;
4298 value = readl((void *)real_offset);
4299 if (value & 0x00010000) {
4300 gcrashmode++;
4301 shost_printk(KERN_ERR, phba->shost,
4302 "Loading Driver in crashdump mode\n");
e5285860 4303 ret = beiscsi_cmd_reset_function(phba);
e9b91193
JK
4304 if (ret) {
4305 shost_printk(KERN_ERR, phba->shost,
4306 "Reset Failed. Aborting Crashdump\n");
4307 goto hba_free;
4308 }
4309 ret = be_chk_reset_complete(phba);
4310 if (ret) {
4311 shost_printk(KERN_ERR, phba->shost,
4312 "Failed to get out of reset."
4313 "Aborting Crashdump\n");
4314 goto hba_free;
4315 }
4316 } else {
4317 value |= 0x00010000;
4318 writel(value, (void *)real_offset);
4319 num_hba++;
4320 }
4321 }
4322
6733b39a
JK
4323 spin_lock_init(&phba->io_sgl_lock);
4324 spin_lock_init(&phba->mgmt_sgl_lock);
4325 spin_lock_init(&phba->isr_lock);
7da50879
JK
4326 ret = mgmt_get_fw_config(&phba->ctrl, phba);
4327 if (ret != 0) {
4328 shost_printk(KERN_ERR, phba->shost,
4329 "Error getting fw config\n");
4330 goto free_port;
4331 }
4332 phba->shost->max_id = phba->fw_config.iscsi_cid_count;
6733b39a 4333 beiscsi_get_params(phba);
aa874f07 4334 phba->shost->can_queue = phba->params.ios_per_ctrl;
6733b39a
JK
4335 ret = beiscsi_init_port(phba);
4336 if (ret < 0) {
4337 shost_printk(KERN_ERR, phba->shost, "beiscsi_dev_probe-"
4338 "Failed in beiscsi_init_port\n");
4339 goto free_port;
4340 }
4341
756d29c8
JK
4342 for (i = 0; i < MAX_MCC_CMD ; i++) {
4343 init_waitqueue_head(&phba->ctrl.mcc_wait[i + 1]);
4344 phba->ctrl.mcc_tag[i] = i + 1;
4345 phba->ctrl.mcc_numtag[i + 1] = 0;
4346 phba->ctrl.mcc_tag_available++;
4347 }
4348
4349 phba->ctrl.mcc_alloc_index = phba->ctrl.mcc_free_index = 0;
4350
6733b39a
JK
4351 snprintf(phba->wq_name, sizeof(phba->wq_name), "beiscsi_q_irq%u",
4352 phba->shost->host_no);
278274d5 4353 phba->wq = alloc_workqueue(phba->wq_name, WQ_MEM_RECLAIM, 1);
6733b39a
JK
4354 if (!phba->wq) {
4355 shost_printk(KERN_ERR, phba->shost, "beiscsi_dev_probe-"
4356 "Failed to allocate work queue\n");
4357 goto free_twq;
4358 }
4359
4360 INIT_WORK(&phba->work_cqs, beiscsi_process_all_cqs);
4361
bfead3b2
JK
4362 phwi_ctrlr = phba->phwi_ctrlr;
4363 phwi_context = phwi_ctrlr->phwi_ctxt;
6733b39a 4364 if (blk_iopoll_enabled) {
bfead3b2
JK
4365 for (i = 0; i < phba->num_cpus; i++) {
4366 pbe_eq = &phwi_context->be_eq[i];
4367 blk_iopoll_init(&pbe_eq->iopoll, be_iopoll_budget,
4368 be_iopoll);
4369 blk_iopoll_enable(&pbe_eq->iopoll);
4370 }
6733b39a 4371 }
6733b39a
JK
4372 ret = beiscsi_init_irqs(phba);
4373 if (ret < 0) {
4374 shost_printk(KERN_ERR, phba->shost, "beiscsi_dev_probe-"
4375 "Failed to beiscsi_init_irqs\n");
4376 goto free_blkenbld;
4377 }
238f6b72 4378 hwi_enable_intr(phba);
f457a46f
MC
4379
4380 if (beiscsi_setup_boot_info(phba))
4381 /*
4382 * log error but continue, because we may not be using
4383 * iscsi boot.
4384 */
4385 shost_printk(KERN_ERR, phba->shost, "Could not set up "
4386 "iSCSI boot info.");
4387
457ff3b7 4388 SE_DEBUG(DBG_LVL_8, "\n\n\n SUCCESS - DRIVER LOADED\n\n\n");
6733b39a
JK
4389 return 0;
4390
6733b39a
JK
4391free_blkenbld:
4392 destroy_workqueue(phba->wq);
4393 if (blk_iopoll_enabled)
bfead3b2
JK
4394 for (i = 0; i < phba->num_cpus; i++) {
4395 pbe_eq = &phwi_context->be_eq[i];
4396 blk_iopoll_disable(&pbe_eq->iopoll);
4397 }
6733b39a
JK
4398free_twq:
4399 beiscsi_clean_port(phba);
4400 beiscsi_free_mem(phba);
4401free_port:
e9b91193
JK
4402 real_offset = (u8 *)phba->csr_va + MPU_EP_SEMAPHORE;
4403
4404 value = readl((void *)real_offset);
4405
4406 if (value & 0x00010000) {
4407 value &= 0xfffeffff;
4408 writel(value, (void *)real_offset);
4409 }
4410
6733b39a
JK
4411 pci_free_consistent(phba->pcidev,
4412 phba->ctrl.mbox_mem_alloced.size,
4413 phba->ctrl.mbox_mem_alloced.va,
4414 phba->ctrl.mbox_mem_alloced.dma);
4415 beiscsi_unmap_pci_function(phba);
4416hba_free:
238f6b72
JK
4417 if (phba->msix_enabled)
4418 pci_disable_msix(phba->pcidev);
6733b39a
JK
4419 iscsi_host_remove(phba->shost);
4420 pci_dev_put(phba->pcidev);
4421 iscsi_host_free(phba->shost);
4422disable_pci:
4423 pci_disable_device(pcidev);
4424 return ret;
4425}
4426
4427struct iscsi_transport beiscsi_iscsi_transport = {
4428 .owner = THIS_MODULE,
4429 .name = DRV_NAME,
9db0fb3a 4430 .caps = CAP_RECOVERY_L0 | CAP_HDRDGST | CAP_TEXT_NEGO |
6733b39a 4431 CAP_MULTI_R2T | CAP_DATADGST | CAP_DATA_PATH_OFFLOAD,
6733b39a
JK
4432 .create_session = beiscsi_session_create,
4433 .destroy_session = beiscsi_session_destroy,
4434 .create_conn = beiscsi_conn_create,
4435 .bind_conn = beiscsi_conn_bind,
4436 .destroy_conn = iscsi_conn_teardown,
3128c6c7 4437 .attr_is_visible = be2iscsi_attr_is_visible,
6733b39a 4438 .set_param = beiscsi_set_param,
c7f7fd5b 4439 .get_conn_param = iscsi_conn_get_param,
6733b39a
JK
4440 .get_session_param = iscsi_session_get_param,
4441 .get_host_param = beiscsi_get_host_param,
4442 .start_conn = beiscsi_conn_start,
fa95d206 4443 .stop_conn = iscsi_conn_stop,
6733b39a
JK
4444 .send_pdu = iscsi_conn_send_pdu,
4445 .xmit_task = beiscsi_task_xmit,
4446 .cleanup_task = beiscsi_cleanup_task,
4447 .alloc_pdu = beiscsi_alloc_pdu,
4448 .parse_pdu_itt = beiscsi_parse_pdu,
4449 .get_stats = beiscsi_conn_get_stats,
c7f7fd5b 4450 .get_ep_param = beiscsi_ep_get_param,
6733b39a
JK
4451 .ep_connect = beiscsi_ep_connect,
4452 .ep_poll = beiscsi_ep_poll,
4453 .ep_disconnect = beiscsi_ep_disconnect,
4454 .session_recovery_timedout = iscsi_session_recovery_timedout,
4455};
4456
4457static struct pci_driver beiscsi_pci_driver = {
4458 .name = DRV_NAME,
4459 .probe = beiscsi_dev_probe,
4460 .remove = beiscsi_remove,
25602c97 4461 .shutdown = beiscsi_shutdown,
6733b39a
JK
4462 .id_table = beiscsi_pci_id_table
4463};
4464
bfead3b2 4465
6733b39a
JK
4466static int __init beiscsi_module_init(void)
4467{
4468 int ret;
4469
4470 beiscsi_scsi_transport =
4471 iscsi_register_transport(&beiscsi_iscsi_transport);
4472 if (!beiscsi_scsi_transport) {
4473 SE_DEBUG(DBG_LVL_1,
4474 "beiscsi_module_init - Unable to register beiscsi"
4475 "transport.\n");
f55a24f2 4476 return -ENOMEM;
6733b39a 4477 }
457ff3b7 4478 SE_DEBUG(DBG_LVL_8, "In beiscsi_module_init, tt=%p\n",
6733b39a
JK
4479 &beiscsi_iscsi_transport);
4480
4481 ret = pci_register_driver(&beiscsi_pci_driver);
4482 if (ret) {
4483 SE_DEBUG(DBG_LVL_1,
4484 "beiscsi_module_init - Unable to register"
4485 "beiscsi pci driver.\n");
4486 goto unregister_iscsi_transport;
4487 }
4488 return 0;
4489
4490unregister_iscsi_transport:
4491 iscsi_unregister_transport(&beiscsi_iscsi_transport);
4492 return ret;
4493}
4494
4495static void __exit beiscsi_module_exit(void)
4496{
4497 pci_unregister_driver(&beiscsi_pci_driver);
4498 iscsi_unregister_transport(&beiscsi_iscsi_transport);
4499}
4500
4501module_init(beiscsi_module_init);
4502module_exit(beiscsi_module_exit);