]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - drivers/scsi/be2iscsi/be_main.c
[SCSI] be2iscsi: Fix driver support for Skyhawk-R adapter.
[mirror_ubuntu-bionic-kernel.git] / drivers / scsi / be2iscsi / be_main.c
CommitLineData
6733b39a 1/**
255fa9a3 2 * Copyright (C) 2005 - 2011 Emulex
6733b39a
JK
3 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
9 *
255fa9a3 10 * Written by: Jayamohan Kallickal (jayamohan.kallickal@emulex.com)
6733b39a
JK
11 *
12 * Contact Information:
255fa9a3 13 * linux-drivers@emulex.com
6733b39a 14 *
255fa9a3
JK
15 * Emulex
16 * 3333 Susan Street
17 * Costa Mesa, CA 92626
6733b39a 18 */
255fa9a3 19
6733b39a
JK
20#include <linux/reboot.h>
21#include <linux/delay.h>
5a0e3ad6 22#include <linux/slab.h>
6733b39a
JK
23#include <linux/interrupt.h>
24#include <linux/blkdev.h>
25#include <linux/pci.h>
26#include <linux/string.h>
27#include <linux/kernel.h>
28#include <linux/semaphore.h>
c7acc5b8 29#include <linux/iscsi_boot_sysfs.h>
acf3368f 30#include <linux/module.h>
ffce3e2e 31#include <linux/bsg-lib.h>
6733b39a
JK
32
33#include <scsi/libiscsi.h>
ffce3e2e
JK
34#include <scsi/scsi_bsg_iscsi.h>
35#include <scsi/scsi_netlink.h>
6733b39a
JK
36#include <scsi/scsi_transport_iscsi.h>
37#include <scsi/scsi_transport.h>
38#include <scsi/scsi_cmnd.h>
39#include <scsi/scsi_device.h>
40#include <scsi/scsi_host.h>
41#include <scsi/scsi.h>
42#include "be_main.h"
43#include "be_iscsi.h"
44#include "be_mgmt.h"
0a513dd8 45#include "be_cmds.h"
6733b39a
JK
46
47static unsigned int be_iopoll_budget = 10;
48static unsigned int be_max_phys_size = 64;
bfead3b2 49static unsigned int enable_msix = 1;
6733b39a
JK
50
51MODULE_DEVICE_TABLE(pci, beiscsi_pci_id_table);
52MODULE_DESCRIPTION(DRV_DESC " " BUILD_STR);
76d15dbd 53MODULE_VERSION(BUILD_STR);
2f635883 54MODULE_AUTHOR("Emulex Corporation");
6733b39a
JK
55MODULE_LICENSE("GPL");
56module_param(be_iopoll_budget, int, 0);
57module_param(enable_msix, int, 0);
58module_param(be_max_phys_size, uint, S_IRUGO);
99bc5d55
JSJ
59MODULE_PARM_DESC(be_max_phys_size,
60 "Maximum Size (In Kilobytes) of physically contiguous "
61 "memory that can be allocated. Range is 16 - 128");
62
63#define beiscsi_disp_param(_name)\
64ssize_t \
65beiscsi_##_name##_disp(struct device *dev,\
66 struct device_attribute *attrib, char *buf) \
67{ \
68 struct Scsi_Host *shost = class_to_shost(dev);\
69 struct beiscsi_hba *phba = iscsi_host_priv(shost); \
70 uint32_t param_val = 0; \
71 param_val = phba->attr_##_name;\
72 return snprintf(buf, PAGE_SIZE, "%d\n",\
73 phba->attr_##_name);\
74}
75
76#define beiscsi_change_param(_name, _minval, _maxval, _defaval)\
77int \
78beiscsi_##_name##_change(struct beiscsi_hba *phba, uint32_t val)\
79{\
80 if (val >= _minval && val <= _maxval) {\
81 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,\
82 "BA_%d : beiscsi_"#_name" updated "\
83 "from 0x%x ==> 0x%x\n",\
84 phba->attr_##_name, val); \
85 phba->attr_##_name = val;\
86 return 0;\
87 } \
88 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, \
89 "BA_%d beiscsi_"#_name" attribute "\
90 "cannot be updated to 0x%x, "\
91 "range allowed is ["#_minval" - "#_maxval"]\n", val);\
92 return -EINVAL;\
93}
94
95#define beiscsi_store_param(_name) \
96ssize_t \
97beiscsi_##_name##_store(struct device *dev,\
98 struct device_attribute *attr, const char *buf,\
99 size_t count) \
100{ \
101 struct Scsi_Host *shost = class_to_shost(dev);\
102 struct beiscsi_hba *phba = iscsi_host_priv(shost);\
103 uint32_t param_val = 0;\
104 if (!isdigit(buf[0]))\
105 return -EINVAL;\
106 if (sscanf(buf, "%i", &param_val) != 1)\
107 return -EINVAL;\
108 if (beiscsi_##_name##_change(phba, param_val) == 0) \
109 return strlen(buf);\
110 else \
111 return -EINVAL;\
112}
113
114#define beiscsi_init_param(_name, _minval, _maxval, _defval) \
115int \
116beiscsi_##_name##_init(struct beiscsi_hba *phba, uint32_t val) \
117{ \
118 if (val >= _minval && val <= _maxval) {\
119 phba->attr_##_name = val;\
120 return 0;\
121 } \
122 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,\
123 "BA_%d beiscsi_"#_name" attribute " \
124 "cannot be updated to 0x%x, "\
125 "range allowed is ["#_minval" - "#_maxval"]\n", val);\
126 phba->attr_##_name = _defval;\
127 return -EINVAL;\
128}
129
130#define BEISCSI_RW_ATTR(_name, _minval, _maxval, _defval, _descp) \
131static uint beiscsi_##_name = _defval;\
132module_param(beiscsi_##_name, uint, S_IRUGO);\
133MODULE_PARM_DESC(beiscsi_##_name, _descp);\
134beiscsi_disp_param(_name)\
135beiscsi_change_param(_name, _minval, _maxval, _defval)\
136beiscsi_store_param(_name)\
137beiscsi_init_param(_name, _minval, _maxval, _defval)\
138DEVICE_ATTR(beiscsi_##_name, S_IRUGO | S_IWUSR,\
139 beiscsi_##_name##_disp, beiscsi_##_name##_store)
140
141/*
142 * When new log level added update the
143 * the MAX allowed value for log_enable
144 */
145BEISCSI_RW_ATTR(log_enable, 0x00,
146 0xFF, 0x00, "Enable logging Bit Mask\n"
147 "\t\t\t\tInitialization Events : 0x01\n"
148 "\t\t\t\tMailbox Events : 0x02\n"
149 "\t\t\t\tMiscellaneous Events : 0x04\n"
150 "\t\t\t\tError Handling : 0x08\n"
151 "\t\t\t\tIO Path Events : 0x10\n"
152 "\t\t\t\tConfiguration Path : 0x20\n");
153
5cac7596 154DEVICE_ATTR(beiscsi_drvr_ver, S_IRUGO, beiscsi_drvr_ver_disp, NULL);
99bc5d55
JSJ
155struct device_attribute *beiscsi_attrs[] = {
156 &dev_attr_beiscsi_log_enable,
5cac7596 157 &dev_attr_beiscsi_drvr_ver,
99bc5d55
JSJ
158 NULL,
159};
6733b39a 160
6763daae
JSJ
161static char const *cqe_desc[] = {
162 "RESERVED_DESC",
163 "SOL_CMD_COMPLETE",
164 "SOL_CMD_KILLED_DATA_DIGEST_ERR",
165 "CXN_KILLED_PDU_SIZE_EXCEEDS_DSL",
166 "CXN_KILLED_BURST_LEN_MISMATCH",
167 "CXN_KILLED_AHS_RCVD",
168 "CXN_KILLED_HDR_DIGEST_ERR",
169 "CXN_KILLED_UNKNOWN_HDR",
170 "CXN_KILLED_STALE_ITT_TTT_RCVD",
171 "CXN_KILLED_INVALID_ITT_TTT_RCVD",
172 "CXN_KILLED_RST_RCVD",
173 "CXN_KILLED_TIMED_OUT",
174 "CXN_KILLED_RST_SENT",
175 "CXN_KILLED_FIN_RCVD",
176 "CXN_KILLED_BAD_UNSOL_PDU_RCVD",
177 "CXN_KILLED_BAD_WRB_INDEX_ERROR",
178 "CXN_KILLED_OVER_RUN_RESIDUAL",
179 "CXN_KILLED_UNDER_RUN_RESIDUAL",
180 "CMD_KILLED_INVALID_STATSN_RCVD",
181 "CMD_KILLED_INVALID_R2T_RCVD",
182 "CMD_CXN_KILLED_LUN_INVALID",
183 "CMD_CXN_KILLED_ICD_INVALID",
184 "CMD_CXN_KILLED_ITT_INVALID",
185 "CMD_CXN_KILLED_SEQ_OUTOFORDER",
186 "CMD_CXN_KILLED_INVALID_DATASN_RCVD",
187 "CXN_INVALIDATE_NOTIFY",
188 "CXN_INVALIDATE_INDEX_NOTIFY",
189 "CMD_INVALIDATED_NOTIFY",
190 "UNSOL_HDR_NOTIFY",
191 "UNSOL_DATA_NOTIFY",
192 "UNSOL_DATA_DIGEST_ERROR_NOTIFY",
193 "DRIVERMSG_NOTIFY",
194 "CXN_KILLED_CMND_DATA_NOT_ON_SAME_CONN",
195 "SOL_CMD_KILLED_DIF_ERR",
196 "CXN_KILLED_SYN_RCVD",
197 "CXN_KILLED_IMM_DATA_RCVD"
198};
199
6733b39a
JK
200static int beiscsi_slave_configure(struct scsi_device *sdev)
201{
202 blk_queue_max_segment_size(sdev->request_queue, 65536);
203 return 0;
204}
205
4183122d
JK
206static int beiscsi_eh_abort(struct scsi_cmnd *sc)
207{
208 struct iscsi_cls_session *cls_session;
209 struct iscsi_task *aborted_task = (struct iscsi_task *)sc->SCp.ptr;
210 struct beiscsi_io_task *aborted_io_task;
211 struct iscsi_conn *conn;
212 struct beiscsi_conn *beiscsi_conn;
213 struct beiscsi_hba *phba;
214 struct iscsi_session *session;
215 struct invalidate_command_table *inv_tbl;
3cbb7a74 216 struct be_dma_mem nonemb_cmd;
4183122d
JK
217 unsigned int cid, tag, num_invalidate;
218
219 cls_session = starget_to_session(scsi_target(sc->device));
220 session = cls_session->dd_data;
221
222 spin_lock_bh(&session->lock);
223 if (!aborted_task || !aborted_task->sc) {
224 /* we raced */
225 spin_unlock_bh(&session->lock);
226 return SUCCESS;
227 }
228
229 aborted_io_task = aborted_task->dd_data;
230 if (!aborted_io_task->scsi_cmnd) {
231 /* raced or invalid command */
232 spin_unlock_bh(&session->lock);
233 return SUCCESS;
234 }
235 spin_unlock_bh(&session->lock);
236 conn = aborted_task->conn;
237 beiscsi_conn = conn->dd_data;
238 phba = beiscsi_conn->phba;
239
240 /* invalidate iocb */
241 cid = beiscsi_conn->beiscsi_conn_cid;
242 inv_tbl = phba->inv_tbl;
243 memset(inv_tbl, 0x0, sizeof(*inv_tbl));
244 inv_tbl->cid = cid;
245 inv_tbl->icd = aborted_io_task->psgl_handle->sgl_index;
246 num_invalidate = 1;
3cbb7a74
JK
247 nonemb_cmd.va = pci_alloc_consistent(phba->ctrl.pdev,
248 sizeof(struct invalidate_commands_params_in),
249 &nonemb_cmd.dma);
250 if (nonemb_cmd.va == NULL) {
99bc5d55
JSJ
251 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_EH,
252 "BM_%d : Failed to allocate memory for"
253 "mgmt_invalidate_icds\n");
3cbb7a74
JK
254 return FAILED;
255 }
256 nonemb_cmd.size = sizeof(struct invalidate_commands_params_in);
257
258 tag = mgmt_invalidate_icds(phba, inv_tbl, num_invalidate,
259 cid, &nonemb_cmd);
4183122d 260 if (!tag) {
99bc5d55
JSJ
261 beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_EH,
262 "BM_%d : mgmt_invalidate_icds could not be"
263 "submitted\n");
3cbb7a74
JK
264 pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size,
265 nonemb_cmd.va, nonemb_cmd.dma);
266
4183122d
JK
267 return FAILED;
268 } else {
269 wait_event_interruptible(phba->ctrl.mcc_wait[tag],
270 phba->ctrl.mcc_numtag[tag]);
271 free_mcc_tag(&phba->ctrl, tag);
272 }
3cbb7a74
JK
273 pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size,
274 nonemb_cmd.va, nonemb_cmd.dma);
4183122d
JK
275 return iscsi_eh_abort(sc);
276}
277
278static int beiscsi_eh_device_reset(struct scsi_cmnd *sc)
279{
280 struct iscsi_task *abrt_task;
281 struct beiscsi_io_task *abrt_io_task;
282 struct iscsi_conn *conn;
283 struct beiscsi_conn *beiscsi_conn;
284 struct beiscsi_hba *phba;
285 struct iscsi_session *session;
286 struct iscsi_cls_session *cls_session;
287 struct invalidate_command_table *inv_tbl;
3cbb7a74 288 struct be_dma_mem nonemb_cmd;
4183122d 289 unsigned int cid, tag, i, num_invalidate;
4183122d
JK
290
291 /* invalidate iocbs */
292 cls_session = starget_to_session(scsi_target(sc->device));
293 session = cls_session->dd_data;
294 spin_lock_bh(&session->lock);
db7f7709
JK
295 if (!session->leadconn || session->state != ISCSI_STATE_LOGGED_IN) {
296 spin_unlock_bh(&session->lock);
297 return FAILED;
298 }
4183122d
JK
299 conn = session->leadconn;
300 beiscsi_conn = conn->dd_data;
301 phba = beiscsi_conn->phba;
302 cid = beiscsi_conn->beiscsi_conn_cid;
303 inv_tbl = phba->inv_tbl;
304 memset(inv_tbl, 0x0, sizeof(*inv_tbl) * BE2_CMDS_PER_CXN);
305 num_invalidate = 0;
306 for (i = 0; i < conn->session->cmds_max; i++) {
307 abrt_task = conn->session->cmds[i];
308 abrt_io_task = abrt_task->dd_data;
309 if (!abrt_task->sc || abrt_task->state == ISCSI_TASK_FREE)
310 continue;
311
312 if (abrt_task->sc->device->lun != abrt_task->sc->device->lun)
313 continue;
314
315 inv_tbl->cid = cid;
316 inv_tbl->icd = abrt_io_task->psgl_handle->sgl_index;
317 num_invalidate++;
318 inv_tbl++;
319 }
320 spin_unlock_bh(&session->lock);
321 inv_tbl = phba->inv_tbl;
322
3cbb7a74
JK
323 nonemb_cmd.va = pci_alloc_consistent(phba->ctrl.pdev,
324 sizeof(struct invalidate_commands_params_in),
325 &nonemb_cmd.dma);
326 if (nonemb_cmd.va == NULL) {
99bc5d55
JSJ
327 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_EH,
328 "BM_%d : Failed to allocate memory for"
329 "mgmt_invalidate_icds\n");
3cbb7a74
JK
330 return FAILED;
331 }
332 nonemb_cmd.size = sizeof(struct invalidate_commands_params_in);
333 memset(nonemb_cmd.va, 0, nonemb_cmd.size);
334 tag = mgmt_invalidate_icds(phba, inv_tbl, num_invalidate,
335 cid, &nonemb_cmd);
4183122d 336 if (!tag) {
99bc5d55
JSJ
337 beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_EH,
338 "BM_%d : mgmt_invalidate_icds could not be"
339 " submitted\n");
3cbb7a74
JK
340 pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size,
341 nonemb_cmd.va, nonemb_cmd.dma);
4183122d
JK
342 return FAILED;
343 } else {
344 wait_event_interruptible(phba->ctrl.mcc_wait[tag],
345 phba->ctrl.mcc_numtag[tag]);
346 free_mcc_tag(&phba->ctrl, tag);
347 }
3cbb7a74
JK
348 pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size,
349 nonemb_cmd.va, nonemb_cmd.dma);
4183122d 350 return iscsi_eh_device_reset(sc);
4183122d
JK
351}
352
c7acc5b8
JK
353static ssize_t beiscsi_show_boot_tgt_info(void *data, int type, char *buf)
354{
355 struct beiscsi_hba *phba = data;
f457a46f
MC
356 struct mgmt_session_info *boot_sess = &phba->boot_sess;
357 struct mgmt_conn_info *boot_conn = &boot_sess->conn_list[0];
c7acc5b8
JK
358 char *str = buf;
359 int rc;
360
361 switch (type) {
362 case ISCSI_BOOT_TGT_NAME:
363 rc = sprintf(buf, "%.*s\n",
f457a46f
MC
364 (int)strlen(boot_sess->target_name),
365 (char *)&boot_sess->target_name);
c7acc5b8
JK
366 break;
367 case ISCSI_BOOT_TGT_IP_ADDR:
f457a46f 368 if (boot_conn->dest_ipaddr.ip_type == 0x1)
c7acc5b8 369 rc = sprintf(buf, "%pI4\n",
0e43895e 370 (char *)&boot_conn->dest_ipaddr.addr);
c7acc5b8
JK
371 else
372 rc = sprintf(str, "%pI6\n",
0e43895e 373 (char *)&boot_conn->dest_ipaddr.addr);
c7acc5b8
JK
374 break;
375 case ISCSI_BOOT_TGT_PORT:
f457a46f 376 rc = sprintf(str, "%d\n", boot_conn->dest_port);
c7acc5b8
JK
377 break;
378
379 case ISCSI_BOOT_TGT_CHAP_NAME:
380 rc = sprintf(str, "%.*s\n",
f457a46f
MC
381 boot_conn->negotiated_login_options.auth_data.chap.
382 target_chap_name_length,
383 (char *)&boot_conn->negotiated_login_options.
384 auth_data.chap.target_chap_name);
c7acc5b8
JK
385 break;
386 case ISCSI_BOOT_TGT_CHAP_SECRET:
387 rc = sprintf(str, "%.*s\n",
f457a46f
MC
388 boot_conn->negotiated_login_options.auth_data.chap.
389 target_secret_length,
390 (char *)&boot_conn->negotiated_login_options.
391 auth_data.chap.target_secret);
c7acc5b8
JK
392 break;
393 case ISCSI_BOOT_TGT_REV_CHAP_NAME:
394 rc = sprintf(str, "%.*s\n",
f457a46f
MC
395 boot_conn->negotiated_login_options.auth_data.chap.
396 intr_chap_name_length,
397 (char *)&boot_conn->negotiated_login_options.
398 auth_data.chap.intr_chap_name);
c7acc5b8
JK
399 break;
400 case ISCSI_BOOT_TGT_REV_CHAP_SECRET:
f457a46f
MC
401 rc = sprintf(str, "%.*s\n",
402 boot_conn->negotiated_login_options.auth_data.chap.
403 intr_secret_length,
404 (char *)&boot_conn->negotiated_login_options.
405 auth_data.chap.intr_secret);
c7acc5b8
JK
406 break;
407 case ISCSI_BOOT_TGT_FLAGS:
f457a46f 408 rc = sprintf(str, "2\n");
c7acc5b8
JK
409 break;
410 case ISCSI_BOOT_TGT_NIC_ASSOC:
f457a46f 411 rc = sprintf(str, "0\n");
c7acc5b8
JK
412 break;
413 default:
414 rc = -ENOSYS;
415 break;
416 }
417 return rc;
418}
419
420static ssize_t beiscsi_show_boot_ini_info(void *data, int type, char *buf)
421{
422 struct beiscsi_hba *phba = data;
423 char *str = buf;
424 int rc;
425
426 switch (type) {
427 case ISCSI_BOOT_INI_INITIATOR_NAME:
428 rc = sprintf(str, "%s\n", phba->boot_sess.initiator_iscsiname);
429 break;
430 default:
431 rc = -ENOSYS;
432 break;
433 }
434 return rc;
435}
436
437static ssize_t beiscsi_show_boot_eth_info(void *data, int type, char *buf)
438{
439 struct beiscsi_hba *phba = data;
440 char *str = buf;
441 int rc;
442
443 switch (type) {
444 case ISCSI_BOOT_ETH_FLAGS:
f457a46f 445 rc = sprintf(str, "2\n");
c7acc5b8
JK
446 break;
447 case ISCSI_BOOT_ETH_INDEX:
f457a46f 448 rc = sprintf(str, "0\n");
c7acc5b8
JK
449 break;
450 case ISCSI_BOOT_ETH_MAC:
0e43895e
MC
451 rc = beiscsi_get_macaddr(str, phba);
452 break;
c7acc5b8
JK
453 default:
454 rc = -ENOSYS;
455 break;
456 }
457 return rc;
458}
459
460
587a1f16 461static umode_t beiscsi_tgt_get_attr_visibility(void *data, int type)
c7acc5b8 462{
587a1f16 463 umode_t rc;
c7acc5b8
JK
464
465 switch (type) {
466 case ISCSI_BOOT_TGT_NAME:
467 case ISCSI_BOOT_TGT_IP_ADDR:
468 case ISCSI_BOOT_TGT_PORT:
469 case ISCSI_BOOT_TGT_CHAP_NAME:
470 case ISCSI_BOOT_TGT_CHAP_SECRET:
471 case ISCSI_BOOT_TGT_REV_CHAP_NAME:
472 case ISCSI_BOOT_TGT_REV_CHAP_SECRET:
473 case ISCSI_BOOT_TGT_NIC_ASSOC:
474 case ISCSI_BOOT_TGT_FLAGS:
475 rc = S_IRUGO;
476 break;
477 default:
478 rc = 0;
479 break;
480 }
481 return rc;
482}
483
587a1f16 484static umode_t beiscsi_ini_get_attr_visibility(void *data, int type)
c7acc5b8 485{
587a1f16 486 umode_t rc;
c7acc5b8
JK
487
488 switch (type) {
489 case ISCSI_BOOT_INI_INITIATOR_NAME:
490 rc = S_IRUGO;
491 break;
492 default:
493 rc = 0;
494 break;
495 }
496 return rc;
497}
498
499
587a1f16 500static umode_t beiscsi_eth_get_attr_visibility(void *data, int type)
c7acc5b8 501{
587a1f16 502 umode_t rc;
c7acc5b8
JK
503
504 switch (type) {
505 case ISCSI_BOOT_ETH_FLAGS:
506 case ISCSI_BOOT_ETH_MAC:
507 case ISCSI_BOOT_ETH_INDEX:
508 rc = S_IRUGO;
509 break;
510 default:
511 rc = 0;
512 break;
513 }
514 return rc;
515}
516
bfead3b2
JK
517/*------------------- PCI Driver operations and data ----------------- */
518static DEFINE_PCI_DEVICE_TABLE(beiscsi_pci_id_table) = {
519 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
f98c96b0 520 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
bfead3b2
JK
521 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
522 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
523 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID3) },
139a1b1e 524 { PCI_DEVICE(ELX_VENDOR_ID, OC_SKH_ID1) },
bfead3b2
JK
525 { 0 }
526};
527MODULE_DEVICE_TABLE(pci, beiscsi_pci_id_table);
528
99bc5d55 529
6733b39a
JK
530static struct scsi_host_template beiscsi_sht = {
531 .module = THIS_MODULE,
2f635883 532 .name = "Emulex 10Gbe open-iscsi Initiator Driver",
6733b39a
JK
533 .proc_name = DRV_NAME,
534 .queuecommand = iscsi_queuecommand,
6733b39a
JK
535 .change_queue_depth = iscsi_change_queue_depth,
536 .slave_configure = beiscsi_slave_configure,
537 .target_alloc = iscsi_target_alloc,
4183122d
JK
538 .eh_abort_handler = beiscsi_eh_abort,
539 .eh_device_reset_handler = beiscsi_eh_device_reset,
309ce156 540 .eh_target_reset_handler = iscsi_eh_session_reset,
99bc5d55 541 .shost_attrs = beiscsi_attrs,
6733b39a
JK
542 .sg_tablesize = BEISCSI_SGLIST_ELEMENTS,
543 .can_queue = BE2_IO_DEPTH,
544 .this_id = -1,
545 .max_sectors = BEISCSI_MAX_SECTORS,
546 .cmd_per_lun = BEISCSI_CMD_PER_LUN,
547 .use_clustering = ENABLE_CLUSTERING,
ffce3e2e
JK
548 .vendor_id = SCSI_NL_VID_TYPE_PCI | BE_VENDOR_ID,
549
6733b39a 550};
6733b39a 551
bfead3b2 552static struct scsi_transport_template *beiscsi_scsi_transport;
6733b39a
JK
553
554static struct beiscsi_hba *beiscsi_hba_alloc(struct pci_dev *pcidev)
555{
556 struct beiscsi_hba *phba;
557 struct Scsi_Host *shost;
558
559 shost = iscsi_host_alloc(&beiscsi_sht, sizeof(*phba), 0);
560 if (!shost) {
99bc5d55
JSJ
561 dev_err(&pcidev->dev,
562 "beiscsi_hba_alloc - iscsi_host_alloc failed\n");
6733b39a
JK
563 return NULL;
564 }
565 shost->dma_boundary = pcidev->dma_mask;
566 shost->max_id = BE2_MAX_SESSIONS;
567 shost->max_channel = 0;
568 shost->max_cmd_len = BEISCSI_MAX_CMD_LEN;
569 shost->max_lun = BEISCSI_NUM_MAX_LUN;
570 shost->transportt = beiscsi_scsi_transport;
6733b39a
JK
571 phba = iscsi_host_priv(shost);
572 memset(phba, 0, sizeof(*phba));
573 phba->shost = shost;
574 phba->pcidev = pci_dev_get(pcidev);
2807afb7 575 pci_set_drvdata(pcidev, phba);
0e43895e 576 phba->interface_handle = 0xFFFFFFFF;
6733b39a
JK
577
578 if (iscsi_host_add(shost, &phba->pcidev->dev))
579 goto free_devices;
c7acc5b8 580
6733b39a
JK
581 return phba;
582
583free_devices:
584 pci_dev_put(phba->pcidev);
585 iscsi_host_free(phba->shost);
586 return NULL;
587}
588
589static void beiscsi_unmap_pci_function(struct beiscsi_hba *phba)
590{
591 if (phba->csr_va) {
592 iounmap(phba->csr_va);
593 phba->csr_va = NULL;
594 }
595 if (phba->db_va) {
596 iounmap(phba->db_va);
597 phba->db_va = NULL;
598 }
599 if (phba->pci_va) {
600 iounmap(phba->pci_va);
601 phba->pci_va = NULL;
602 }
603}
604
605static int beiscsi_map_pci_bars(struct beiscsi_hba *phba,
606 struct pci_dev *pcidev)
607{
608 u8 __iomem *addr;
f98c96b0 609 int pcicfg_reg;
6733b39a
JK
610
611 addr = ioremap_nocache(pci_resource_start(pcidev, 2),
612 pci_resource_len(pcidev, 2));
613 if (addr == NULL)
614 return -ENOMEM;
615 phba->ctrl.csr = addr;
616 phba->csr_va = addr;
617 phba->csr_pa.u.a64.address = pci_resource_start(pcidev, 2);
618
619 addr = ioremap_nocache(pci_resource_start(pcidev, 4), 128 * 1024);
620 if (addr == NULL)
621 goto pci_map_err;
622 phba->ctrl.db = addr;
623 phba->db_va = addr;
624 phba->db_pa.u.a64.address = pci_resource_start(pcidev, 4);
625
f98c96b0
JK
626 if (phba->generation == BE_GEN2)
627 pcicfg_reg = 1;
628 else
629 pcicfg_reg = 0;
630
631 addr = ioremap_nocache(pci_resource_start(pcidev, pcicfg_reg),
632 pci_resource_len(pcidev, pcicfg_reg));
633
6733b39a
JK
634 if (addr == NULL)
635 goto pci_map_err;
636 phba->ctrl.pcicfg = addr;
637 phba->pci_va = addr;
f98c96b0 638 phba->pci_pa.u.a64.address = pci_resource_start(pcidev, pcicfg_reg);
6733b39a
JK
639 return 0;
640
641pci_map_err:
642 beiscsi_unmap_pci_function(phba);
643 return -ENOMEM;
644}
645
646static int beiscsi_enable_pci(struct pci_dev *pcidev)
647{
648 int ret;
649
650 ret = pci_enable_device(pcidev);
651 if (ret) {
99bc5d55
JSJ
652 dev_err(&pcidev->dev,
653 "beiscsi_enable_pci - enable device failed\n");
6733b39a
JK
654 return ret;
655 }
656
bfead3b2 657 pci_set_master(pcidev);
6733b39a
JK
658 if (pci_set_consistent_dma_mask(pcidev, DMA_BIT_MASK(64))) {
659 ret = pci_set_consistent_dma_mask(pcidev, DMA_BIT_MASK(32));
660 if (ret) {
661 dev_err(&pcidev->dev, "Could not set PCI DMA Mask\n");
662 pci_disable_device(pcidev);
663 return ret;
664 }
665 }
666 return 0;
667}
668
669static int be_ctrl_init(struct beiscsi_hba *phba, struct pci_dev *pdev)
670{
671 struct be_ctrl_info *ctrl = &phba->ctrl;
672 struct be_dma_mem *mbox_mem_alloc = &ctrl->mbox_mem_alloced;
673 struct be_dma_mem *mbox_mem_align = &ctrl->mbox_mem;
674 int status = 0;
675
676 ctrl->pdev = pdev;
677 status = beiscsi_map_pci_bars(phba, pdev);
678 if (status)
679 return status;
6733b39a
JK
680 mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
681 mbox_mem_alloc->va = pci_alloc_consistent(pdev,
682 mbox_mem_alloc->size,
683 &mbox_mem_alloc->dma);
684 if (!mbox_mem_alloc->va) {
685 beiscsi_unmap_pci_function(phba);
a49e06d5 686 return -ENOMEM;
6733b39a
JK
687 }
688
689 mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
690 mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
691 mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
692 memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
693 spin_lock_init(&ctrl->mbox_lock);
bfead3b2
JK
694 spin_lock_init(&phba->ctrl.mcc_lock);
695 spin_lock_init(&phba->ctrl.mcc_cq_lock);
696
6733b39a
JK
697 return status;
698}
699
700static void beiscsi_get_params(struct beiscsi_hba *phba)
701{
7da50879
JK
702 phba->params.ios_per_ctrl = (phba->fw_config.iscsi_icd_count
703 - (phba->fw_config.iscsi_cid_count
704 + BE2_TMFS
705 + BE2_NOPOUT_REQ));
706 phba->params.cxns_per_ctrl = phba->fw_config.iscsi_cid_count;
ed58ea2a 707 phba->params.asyncpdus_per_ctrl = phba->fw_config.iscsi_cid_count * 2;
6eab04a8 708 phba->params.icds_per_ctrl = phba->fw_config.iscsi_icd_count;
6733b39a
JK
709 phba->params.num_sge_per_io = BE2_SGE;
710 phba->params.defpdu_hdr_sz = BE2_DEFPDU_HDR_SZ;
711 phba->params.defpdu_data_sz = BE2_DEFPDU_DATA_SZ;
712 phba->params.eq_timer = 64;
713 phba->params.num_eq_entries =
7da50879
JK
714 (((BE2_CMDS_PER_CXN * 2 + phba->fw_config.iscsi_cid_count * 2
715 + BE2_TMFS) / 512) + 1) * 512;
6733b39a
JK
716 phba->params.num_eq_entries = (phba->params.num_eq_entries < 1024)
717 ? 1024 : phba->params.num_eq_entries;
99bc5d55
JSJ
718 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
719 "BM_%d : phba->params.num_eq_entries=%d\n",
720 phba->params.num_eq_entries);
6733b39a 721 phba->params.num_cq_entries =
7da50879
JK
722 (((BE2_CMDS_PER_CXN * 2 + phba->fw_config.iscsi_cid_count * 2
723 + BE2_TMFS) / 512) + 1) * 512;
6733b39a
JK
724 phba->params.wrbs_per_cxn = 256;
725}
726
727static void hwi_ring_eq_db(struct beiscsi_hba *phba,
728 unsigned int id, unsigned int clr_interrupt,
729 unsigned int num_processed,
730 unsigned char rearm, unsigned char event)
731{
732 u32 val = 0;
733 val |= id & DB_EQ_RING_ID_MASK;
734 if (rearm)
735 val |= 1 << DB_EQ_REARM_SHIFT;
736 if (clr_interrupt)
737 val |= 1 << DB_EQ_CLR_SHIFT;
738 if (event)
739 val |= 1 << DB_EQ_EVNT_SHIFT;
740 val |= num_processed << DB_EQ_NUM_POPPED_SHIFT;
741 iowrite32(val, phba->db_va + DB_EQ_OFFSET);
742}
743
bfead3b2
JK
744/**
745 * be_isr_mcc - The isr routine of the driver.
746 * @irq: Not used
747 * @dev_id: Pointer to host adapter structure
748 */
749static irqreturn_t be_isr_mcc(int irq, void *dev_id)
750{
751 struct beiscsi_hba *phba;
752 struct be_eq_entry *eqe = NULL;
753 struct be_queue_info *eq;
754 struct be_queue_info *mcc;
755 unsigned int num_eq_processed;
756 struct be_eq_obj *pbe_eq;
757 unsigned long flags;
758
759 pbe_eq = dev_id;
760 eq = &pbe_eq->q;
761 phba = pbe_eq->phba;
762 mcc = &phba->ctrl.mcc_obj.cq;
763 eqe = queue_tail_node(eq);
bfead3b2
JK
764
765 num_eq_processed = 0;
766
767 while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32]
768 & EQE_VALID_MASK) {
769 if (((eqe->dw[offsetof(struct amap_eq_entry,
770 resource_id) / 32] &
771 EQE_RESID_MASK) >> 16) == mcc->id) {
772 spin_lock_irqsave(&phba->isr_lock, flags);
72fb46a9 773 pbe_eq->todo_mcc_cq = true;
bfead3b2
JK
774 spin_unlock_irqrestore(&phba->isr_lock, flags);
775 }
776 AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0);
777 queue_tail_inc(eq);
778 eqe = queue_tail_node(eq);
779 num_eq_processed++;
780 }
72fb46a9
JSJ
781 if (pbe_eq->todo_mcc_cq)
782 queue_work(phba->wq, &pbe_eq->work_cqs);
bfead3b2
JK
783 if (num_eq_processed)
784 hwi_ring_eq_db(phba, eq->id, 1, num_eq_processed, 1, 1);
785
786 return IRQ_HANDLED;
787}
788
789/**
790 * be_isr_msix - The isr routine of the driver.
791 * @irq: Not used
792 * @dev_id: Pointer to host adapter structure
793 */
794static irqreturn_t be_isr_msix(int irq, void *dev_id)
795{
796 struct beiscsi_hba *phba;
797 struct be_eq_entry *eqe = NULL;
798 struct be_queue_info *eq;
799 struct be_queue_info *cq;
800 unsigned int num_eq_processed;
801 struct be_eq_obj *pbe_eq;
802 unsigned long flags;
803
804 pbe_eq = dev_id;
805 eq = &pbe_eq->q;
806 cq = pbe_eq->cq;
807 eqe = queue_tail_node(eq);
bfead3b2
JK
808
809 phba = pbe_eq->phba;
810 num_eq_processed = 0;
811 if (blk_iopoll_enabled) {
812 while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32]
813 & EQE_VALID_MASK) {
814 if (!blk_iopoll_sched_prep(&pbe_eq->iopoll))
815 blk_iopoll_sched(&pbe_eq->iopoll);
816
817 AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0);
818 queue_tail_inc(eq);
819 eqe = queue_tail_node(eq);
820 num_eq_processed++;
821 }
bfead3b2
JK
822 } else {
823 while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32]
824 & EQE_VALID_MASK) {
825 spin_lock_irqsave(&phba->isr_lock, flags);
72fb46a9 826 pbe_eq->todo_cq = true;
bfead3b2
JK
827 spin_unlock_irqrestore(&phba->isr_lock, flags);
828 AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0);
829 queue_tail_inc(eq);
830 eqe = queue_tail_node(eq);
831 num_eq_processed++;
832 }
bfead3b2 833
72fb46a9
JSJ
834 if (pbe_eq->todo_cq)
835 queue_work(phba->wq, &pbe_eq->work_cqs);
bfead3b2 836 }
72fb46a9
JSJ
837
838 if (num_eq_processed)
839 hwi_ring_eq_db(phba, eq->id, 1, num_eq_processed, 0, 1);
840
841 return IRQ_HANDLED;
bfead3b2
JK
842}
843
6733b39a
JK
844/**
845 * be_isr - The isr routine of the driver.
846 * @irq: Not used
847 * @dev_id: Pointer to host adapter structure
848 */
849static irqreturn_t be_isr(int irq, void *dev_id)
850{
851 struct beiscsi_hba *phba;
852 struct hwi_controller *phwi_ctrlr;
853 struct hwi_context_memory *phwi_context;
854 struct be_eq_entry *eqe = NULL;
855 struct be_queue_info *eq;
856 struct be_queue_info *cq;
bfead3b2 857 struct be_queue_info *mcc;
6733b39a 858 unsigned long flags, index;
bfead3b2 859 unsigned int num_mcceq_processed, num_ioeq_processed;
6733b39a 860 struct be_ctrl_info *ctrl;
bfead3b2 861 struct be_eq_obj *pbe_eq;
6733b39a
JK
862 int isr;
863
864 phba = dev_id;
6eab04a8 865 ctrl = &phba->ctrl;
bfead3b2
JK
866 isr = ioread32(ctrl->csr + CEV_ISR0_OFFSET +
867 (PCI_FUNC(ctrl->pdev->devfn) * CEV_ISR_SIZE));
868 if (!isr)
869 return IRQ_NONE;
6733b39a
JK
870
871 phwi_ctrlr = phba->phwi_ctrlr;
872 phwi_context = phwi_ctrlr->phwi_ctxt;
bfead3b2
JK
873 pbe_eq = &phwi_context->be_eq[0];
874
875 eq = &phwi_context->be_eq[0].q;
876 mcc = &phba->ctrl.mcc_obj.cq;
6733b39a
JK
877 index = 0;
878 eqe = queue_tail_node(eq);
6733b39a 879
bfead3b2
JK
880 num_ioeq_processed = 0;
881 num_mcceq_processed = 0;
6733b39a
JK
882 if (blk_iopoll_enabled) {
883 while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32]
884 & EQE_VALID_MASK) {
bfead3b2
JK
885 if (((eqe->dw[offsetof(struct amap_eq_entry,
886 resource_id) / 32] &
887 EQE_RESID_MASK) >> 16) == mcc->id) {
888 spin_lock_irqsave(&phba->isr_lock, flags);
72fb46a9 889 pbe_eq->todo_mcc_cq = true;
bfead3b2
JK
890 spin_unlock_irqrestore(&phba->isr_lock, flags);
891 num_mcceq_processed++;
892 } else {
893 if (!blk_iopoll_sched_prep(&pbe_eq->iopoll))
894 blk_iopoll_sched(&pbe_eq->iopoll);
895 num_ioeq_processed++;
896 }
6733b39a
JK
897 AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0);
898 queue_tail_inc(eq);
899 eqe = queue_tail_node(eq);
6733b39a 900 }
bfead3b2 901 if (num_ioeq_processed || num_mcceq_processed) {
72fb46a9
JSJ
902 if (pbe_eq->todo_mcc_cq)
903 queue_work(phba->wq, &pbe_eq->work_cqs);
bfead3b2 904
756d29c8 905 if ((num_mcceq_processed) && (!num_ioeq_processed))
bfead3b2
JK
906 hwi_ring_eq_db(phba, eq->id, 0,
907 (num_ioeq_processed +
908 num_mcceq_processed) , 1, 1);
909 else
910 hwi_ring_eq_db(phba, eq->id, 0,
911 (num_ioeq_processed +
912 num_mcceq_processed), 0, 1);
913
6733b39a
JK
914 return IRQ_HANDLED;
915 } else
916 return IRQ_NONE;
917 } else {
bfead3b2 918 cq = &phwi_context->be_cq[0];
6733b39a
JK
919 while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32]
920 & EQE_VALID_MASK) {
921
922 if (((eqe->dw[offsetof(struct amap_eq_entry,
923 resource_id) / 32] &
924 EQE_RESID_MASK) >> 16) != cq->id) {
925 spin_lock_irqsave(&phba->isr_lock, flags);
72fb46a9 926 pbe_eq->todo_mcc_cq = true;
6733b39a
JK
927 spin_unlock_irqrestore(&phba->isr_lock, flags);
928 } else {
929 spin_lock_irqsave(&phba->isr_lock, flags);
72fb46a9 930 pbe_eq->todo_cq = true;
6733b39a
JK
931 spin_unlock_irqrestore(&phba->isr_lock, flags);
932 }
933 AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0);
934 queue_tail_inc(eq);
935 eqe = queue_tail_node(eq);
bfead3b2 936 num_ioeq_processed++;
6733b39a 937 }
72fb46a9
JSJ
938 if (pbe_eq->todo_cq || pbe_eq->todo_mcc_cq)
939 queue_work(phba->wq, &pbe_eq->work_cqs);
6733b39a 940
bfead3b2
JK
941 if (num_ioeq_processed) {
942 hwi_ring_eq_db(phba, eq->id, 0,
943 num_ioeq_processed, 1, 1);
6733b39a
JK
944 return IRQ_HANDLED;
945 } else
946 return IRQ_NONE;
947 }
948}
949
950static int beiscsi_init_irqs(struct beiscsi_hba *phba)
951{
952 struct pci_dev *pcidev = phba->pcidev;
bfead3b2
JK
953 struct hwi_controller *phwi_ctrlr;
954 struct hwi_context_memory *phwi_context;
4f5af07e 955 int ret, msix_vec, i, j;
6733b39a 956
bfead3b2
JK
957 phwi_ctrlr = phba->phwi_ctrlr;
958 phwi_context = phwi_ctrlr->phwi_ctxt;
959
960 if (phba->msix_enabled) {
961 for (i = 0; i < phba->num_cpus; i++) {
8fcfb210
JK
962 phba->msi_name[i] = kzalloc(BEISCSI_MSI_NAME,
963 GFP_KERNEL);
964 if (!phba->msi_name[i]) {
965 ret = -ENOMEM;
966 goto free_msix_irqs;
967 }
968
969 sprintf(phba->msi_name[i], "beiscsi_%02x_%02x",
970 phba->shost->host_no, i);
bfead3b2 971 msix_vec = phba->msix_entries[i].vector;
8fcfb210
JK
972 ret = request_irq(msix_vec, be_isr_msix, 0,
973 phba->msi_name[i],
bfead3b2 974 &phwi_context->be_eq[i]);
4f5af07e 975 if (ret) {
99bc5d55
JSJ
976 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
977 "BM_%d : beiscsi_init_irqs-Failed to"
978 "register msix for i = %d\n",
979 i);
8fcfb210 980 kfree(phba->msi_name[i]);
4f5af07e
JK
981 goto free_msix_irqs;
982 }
bfead3b2 983 }
8fcfb210
JK
984 phba->msi_name[i] = kzalloc(BEISCSI_MSI_NAME, GFP_KERNEL);
985 if (!phba->msi_name[i]) {
986 ret = -ENOMEM;
987 goto free_msix_irqs;
988 }
989 sprintf(phba->msi_name[i], "beiscsi_mcc_%02x",
990 phba->shost->host_no);
bfead3b2 991 msix_vec = phba->msix_entries[i].vector;
8fcfb210 992 ret = request_irq(msix_vec, be_isr_mcc, 0, phba->msi_name[i],
bfead3b2 993 &phwi_context->be_eq[i]);
4f5af07e 994 if (ret) {
99bc5d55
JSJ
995 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT ,
996 "BM_%d : beiscsi_init_irqs-"
997 "Failed to register beiscsi_msix_mcc\n");
8fcfb210 998 kfree(phba->msi_name[i]);
4f5af07e
JK
999 goto free_msix_irqs;
1000 }
1001
bfead3b2
JK
1002 } else {
1003 ret = request_irq(pcidev->irq, be_isr, IRQF_SHARED,
1004 "beiscsi", phba);
1005 if (ret) {
99bc5d55
JSJ
1006 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
1007 "BM_%d : beiscsi_init_irqs-"
1008 "Failed to register irq\\n");
bfead3b2
JK
1009 return ret;
1010 }
6733b39a
JK
1011 }
1012 return 0;
4f5af07e 1013free_msix_irqs:
8fcfb210
JK
1014 for (j = i - 1; j >= 0; j--) {
1015 kfree(phba->msi_name[j]);
1016 msix_vec = phba->msix_entries[j].vector;
4f5af07e 1017 free_irq(msix_vec, &phwi_context->be_eq[j]);
8fcfb210 1018 }
4f5af07e 1019 return ret;
6733b39a
JK
1020}
1021
1022static void hwi_ring_cq_db(struct beiscsi_hba *phba,
1023 unsigned int id, unsigned int num_processed,
1024 unsigned char rearm, unsigned char event)
1025{
1026 u32 val = 0;
1027 val |= id & DB_CQ_RING_ID_MASK;
1028 if (rearm)
1029 val |= 1 << DB_CQ_REARM_SHIFT;
1030 val |= num_processed << DB_CQ_NUM_POPPED_SHIFT;
1031 iowrite32(val, phba->db_va + DB_CQ_OFFSET);
1032}
1033
6733b39a
JK
1034static unsigned int
1035beiscsi_process_async_pdu(struct beiscsi_conn *beiscsi_conn,
1036 struct beiscsi_hba *phba,
1037 unsigned short cid,
1038 struct pdu_base *ppdu,
1039 unsigned long pdu_len,
1040 void *pbuffer, unsigned long buf_len)
1041{
1042 struct iscsi_conn *conn = beiscsi_conn->conn;
1043 struct iscsi_session *session = conn->session;
bfead3b2
JK
1044 struct iscsi_task *task;
1045 struct beiscsi_io_task *io_task;
1046 struct iscsi_hdr *login_hdr;
6733b39a
JK
1047
1048 switch (ppdu->dw[offsetof(struct amap_pdu_base, opcode) / 32] &
1049 PDUBASE_OPCODE_MASK) {
1050 case ISCSI_OP_NOOP_IN:
1051 pbuffer = NULL;
1052 buf_len = 0;
1053 break;
1054 case ISCSI_OP_ASYNC_EVENT:
1055 break;
1056 case ISCSI_OP_REJECT:
1057 WARN_ON(!pbuffer);
1058 WARN_ON(!(buf_len == 48));
99bc5d55
JSJ
1059 beiscsi_log(phba, KERN_ERR,
1060 BEISCSI_LOG_CONFIG | BEISCSI_LOG_IO,
1061 "BM_%d : In ISCSI_OP_REJECT\n");
6733b39a
JK
1062 break;
1063 case ISCSI_OP_LOGIN_RSP:
7bd6e25c 1064 case ISCSI_OP_TEXT_RSP:
bfead3b2
JK
1065 task = conn->login_task;
1066 io_task = task->dd_data;
1067 login_hdr = (struct iscsi_hdr *)ppdu;
1068 login_hdr->itt = io_task->libiscsi_itt;
6733b39a
JK
1069 break;
1070 default:
99bc5d55
JSJ
1071 beiscsi_log(phba, KERN_WARNING,
1072 BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG,
1073 "BM_%d : Unrecognized opcode 0x%x in async msg\n",
1074 (ppdu->
6733b39a 1075 dw[offsetof(struct amap_pdu_base, opcode) / 32]
99bc5d55 1076 & PDUBASE_OPCODE_MASK));
6733b39a
JK
1077 return 1;
1078 }
1079
1080 spin_lock_bh(&session->lock);
1081 __iscsi_complete_pdu(conn, (struct iscsi_hdr *)ppdu, pbuffer, buf_len);
1082 spin_unlock_bh(&session->lock);
1083 return 0;
1084}
1085
1086static struct sgl_handle *alloc_io_sgl_handle(struct beiscsi_hba *phba)
1087{
1088 struct sgl_handle *psgl_handle;
1089
1090 if (phba->io_sgl_hndl_avbl) {
99bc5d55
JSJ
1091 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_IO,
1092 "BM_%d : In alloc_io_sgl_handle,"
1093 " io_sgl_alloc_index=%d\n",
1094 phba->io_sgl_alloc_index);
1095
6733b39a
JK
1096 psgl_handle = phba->io_sgl_hndl_base[phba->
1097 io_sgl_alloc_index];
1098 phba->io_sgl_hndl_base[phba->io_sgl_alloc_index] = NULL;
1099 phba->io_sgl_hndl_avbl--;
bfead3b2
JK
1100 if (phba->io_sgl_alloc_index == (phba->params.
1101 ios_per_ctrl - 1))
6733b39a
JK
1102 phba->io_sgl_alloc_index = 0;
1103 else
1104 phba->io_sgl_alloc_index++;
1105 } else
1106 psgl_handle = NULL;
1107 return psgl_handle;
1108}
1109
1110static void
1111free_io_sgl_handle(struct beiscsi_hba *phba, struct sgl_handle *psgl_handle)
1112{
99bc5d55
JSJ
1113 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_IO,
1114 "BM_%d : In free_,io_sgl_free_index=%d\n",
1115 phba->io_sgl_free_index);
1116
6733b39a
JK
1117 if (phba->io_sgl_hndl_base[phba->io_sgl_free_index]) {
1118 /*
1119 * this can happen if clean_task is called on a task that
1120 * failed in xmit_task or alloc_pdu.
1121 */
99bc5d55
JSJ
1122 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_IO,
1123 "BM_%d : Double Free in IO SGL io_sgl_free_index=%d,"
1124 "value there=%p\n", phba->io_sgl_free_index,
1125 phba->io_sgl_hndl_base
1126 [phba->io_sgl_free_index]);
6733b39a
JK
1127 return;
1128 }
1129 phba->io_sgl_hndl_base[phba->io_sgl_free_index] = psgl_handle;
1130 phba->io_sgl_hndl_avbl++;
1131 if (phba->io_sgl_free_index == (phba->params.ios_per_ctrl - 1))
1132 phba->io_sgl_free_index = 0;
1133 else
1134 phba->io_sgl_free_index++;
1135}
1136
1137/**
1138 * alloc_wrb_handle - To allocate a wrb handle
1139 * @phba: The hba pointer
1140 * @cid: The cid to use for allocation
6733b39a
JK
1141 *
1142 * This happens under session_lock until submission to chip
1143 */
d5431488 1144struct wrb_handle *alloc_wrb_handle(struct beiscsi_hba *phba, unsigned int cid)
6733b39a
JK
1145{
1146 struct hwi_wrb_context *pwrb_context;
1147 struct hwi_controller *phwi_ctrlr;
d5431488 1148 struct wrb_handle *pwrb_handle, *pwrb_handle_tmp;
6733b39a
JK
1149
1150 phwi_ctrlr = phba->phwi_ctrlr;
1151 pwrb_context = &phwi_ctrlr->wrb_context[cid];
d5431488 1152 if (pwrb_context->wrb_handles_available >= 2) {
bfead3b2
JK
1153 pwrb_handle = pwrb_context->pwrb_handle_base[
1154 pwrb_context->alloc_index];
1155 pwrb_context->wrb_handles_available--;
bfead3b2
JK
1156 if (pwrb_context->alloc_index ==
1157 (phba->params.wrbs_per_cxn - 1))
1158 pwrb_context->alloc_index = 0;
1159 else
1160 pwrb_context->alloc_index++;
d5431488
JK
1161 pwrb_handle_tmp = pwrb_context->pwrb_handle_base[
1162 pwrb_context->alloc_index];
1163 pwrb_handle->nxt_wrb_index = pwrb_handle_tmp->wrb_index;
bfead3b2
JK
1164 } else
1165 pwrb_handle = NULL;
6733b39a
JK
1166 return pwrb_handle;
1167}
1168
1169/**
1170 * free_wrb_handle - To free the wrb handle back to pool
1171 * @phba: The hba pointer
1172 * @pwrb_context: The context to free from
1173 * @pwrb_handle: The wrb_handle to free
1174 *
1175 * This happens under session_lock until submission to chip
1176 */
1177static void
1178free_wrb_handle(struct beiscsi_hba *phba, struct hwi_wrb_context *pwrb_context,
1179 struct wrb_handle *pwrb_handle)
1180{
32951dd8 1181 pwrb_context->pwrb_handle_base[pwrb_context->free_index] = pwrb_handle;
bfead3b2
JK
1182 pwrb_context->wrb_handles_available++;
1183 if (pwrb_context->free_index == (phba->params.wrbs_per_cxn - 1))
1184 pwrb_context->free_index = 0;
1185 else
1186 pwrb_context->free_index++;
1187
99bc5d55
JSJ
1188 beiscsi_log(phba, KERN_INFO,
1189 BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG,
1190 "BM_%d : FREE WRB: pwrb_handle=%p free_index=0x%x"
1191 "wrb_handles_available=%d\n",
1192 pwrb_handle, pwrb_context->free_index,
1193 pwrb_context->wrb_handles_available);
6733b39a
JK
1194}
1195
1196static struct sgl_handle *alloc_mgmt_sgl_handle(struct beiscsi_hba *phba)
1197{
1198 struct sgl_handle *psgl_handle;
1199
1200 if (phba->eh_sgl_hndl_avbl) {
1201 psgl_handle = phba->eh_sgl_hndl_base[phba->eh_sgl_alloc_index];
1202 phba->eh_sgl_hndl_base[phba->eh_sgl_alloc_index] = NULL;
99bc5d55
JSJ
1203 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_CONFIG,
1204 "BM_%d : mgmt_sgl_alloc_index=%d=0x%x\n",
1205 phba->eh_sgl_alloc_index,
1206 phba->eh_sgl_alloc_index);
1207
6733b39a
JK
1208 phba->eh_sgl_hndl_avbl--;
1209 if (phba->eh_sgl_alloc_index ==
1210 (phba->params.icds_per_ctrl - phba->params.ios_per_ctrl -
1211 1))
1212 phba->eh_sgl_alloc_index = 0;
1213 else
1214 phba->eh_sgl_alloc_index++;
1215 } else
1216 psgl_handle = NULL;
1217 return psgl_handle;
1218}
1219
1220void
1221free_mgmt_sgl_handle(struct beiscsi_hba *phba, struct sgl_handle *psgl_handle)
1222{
1223
99bc5d55
JSJ
1224 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_CONFIG,
1225 "BM_%d : In free_mgmt_sgl_handle,"
1226 "eh_sgl_free_index=%d\n",
1227 phba->eh_sgl_free_index);
1228
6733b39a
JK
1229 if (phba->eh_sgl_hndl_base[phba->eh_sgl_free_index]) {
1230 /*
1231 * this can happen if clean_task is called on a task that
1232 * failed in xmit_task or alloc_pdu.
1233 */
99bc5d55
JSJ
1234 beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_CONFIG,
1235 "BM_%d : Double Free in eh SGL ,"
1236 "eh_sgl_free_index=%d\n",
1237 phba->eh_sgl_free_index);
6733b39a
JK
1238 return;
1239 }
1240 phba->eh_sgl_hndl_base[phba->eh_sgl_free_index] = psgl_handle;
1241 phba->eh_sgl_hndl_avbl++;
1242 if (phba->eh_sgl_free_index ==
1243 (phba->params.icds_per_ctrl - phba->params.ios_per_ctrl - 1))
1244 phba->eh_sgl_free_index = 0;
1245 else
1246 phba->eh_sgl_free_index++;
1247}
1248
1249static void
1250be_complete_io(struct beiscsi_conn *beiscsi_conn,
1251 struct iscsi_task *task, struct sol_cqe *psol)
1252{
1253 struct beiscsi_io_task *io_task = task->dd_data;
1254 struct be_status_bhs *sts_bhs =
1255 (struct be_status_bhs *)io_task->cmd_bhs;
1256 struct iscsi_conn *conn = beiscsi_conn->conn;
6733b39a
JK
1257 unsigned char *sense;
1258 u32 resid = 0, exp_cmdsn, max_cmdsn;
1259 u8 rsp, status, flags;
1260
bfead3b2 1261 exp_cmdsn = (psol->
6733b39a
JK
1262 dw[offsetof(struct amap_sol_cqe, i_exp_cmd_sn) / 32]
1263 & SOL_EXP_CMD_SN_MASK);
bfead3b2 1264 max_cmdsn = ((psol->
6733b39a
JK
1265 dw[offsetof(struct amap_sol_cqe, i_exp_cmd_sn) / 32]
1266 & SOL_EXP_CMD_SN_MASK) +
1267 ((psol->dw[offsetof(struct amap_sol_cqe, i_cmd_wnd)
1268 / 32] & SOL_CMD_WND_MASK) >> 24) - 1);
1269 rsp = ((psol->dw[offsetof(struct amap_sol_cqe, i_resp) / 32]
1270 & SOL_RESP_MASK) >> 16);
1271 status = ((psol->dw[offsetof(struct amap_sol_cqe, i_sts) / 32]
1272 & SOL_STS_MASK) >> 8);
1273 flags = ((psol->dw[offsetof(struct amap_sol_cqe, i_flags) / 32]
1274 & SOL_FLAGS_MASK) >> 24) | 0x80;
bd535451
JK
1275 if (!task->sc) {
1276 if (io_task->scsi_cmnd)
1277 scsi_dma_unmap(io_task->scsi_cmnd);
6733b39a 1278
bd535451
JK
1279 return;
1280 }
6733b39a
JK
1281 task->sc->result = (DID_OK << 16) | status;
1282 if (rsp != ISCSI_STATUS_CMD_COMPLETED) {
1283 task->sc->result = DID_ERROR << 16;
1284 goto unmap;
1285 }
1286
1287 /* bidi not initially supported */
1288 if (flags & (ISCSI_FLAG_CMD_UNDERFLOW | ISCSI_FLAG_CMD_OVERFLOW)) {
1289 resid = (psol->dw[offsetof(struct amap_sol_cqe, i_res_cnt) /
1290 32] & SOL_RES_CNT_MASK);
1291
1292 if (!status && (flags & ISCSI_FLAG_CMD_OVERFLOW))
1293 task->sc->result = DID_ERROR << 16;
1294
1295 if (flags & ISCSI_FLAG_CMD_UNDERFLOW) {
1296 scsi_set_resid(task->sc, resid);
1297 if (!status && (scsi_bufflen(task->sc) - resid <
1298 task->sc->underflow))
1299 task->sc->result = DID_ERROR << 16;
1300 }
1301 }
1302
1303 if (status == SAM_STAT_CHECK_CONDITION) {
4053a4be 1304 u16 sense_len;
bfead3b2 1305 unsigned short *slen = (unsigned short *)sts_bhs->sense_info;
4053a4be 1306
6733b39a 1307 sense = sts_bhs->sense_info + sizeof(unsigned short);
4053a4be 1308 sense_len = be16_to_cpu(*slen);
6733b39a
JK
1309 memcpy(task->sc->sense_buffer, sense,
1310 min_t(u16, sense_len, SCSI_SENSE_BUFFERSIZE));
1311 }
756d29c8 1312
6733b39a
JK
1313 if (io_task->cmd_bhs->iscsi_hdr.flags & ISCSI_FLAG_CMD_READ) {
1314 if (psol->dw[offsetof(struct amap_sol_cqe, i_res_cnt) / 32]
1315 & SOL_RES_CNT_MASK)
1316 conn->rxdata_octets += (psol->
bfead3b2
JK
1317 dw[offsetof(struct amap_sol_cqe, i_res_cnt) / 32]
1318 & SOL_RES_CNT_MASK);
6733b39a
JK
1319 }
1320unmap:
1321 scsi_dma_unmap(io_task->scsi_cmnd);
1322 iscsi_complete_scsi_task(task, exp_cmdsn, max_cmdsn);
1323}
1324
1325static void
1326be_complete_logout(struct beiscsi_conn *beiscsi_conn,
1327 struct iscsi_task *task, struct sol_cqe *psol)
1328{
1329 struct iscsi_logout_rsp *hdr;
bfead3b2 1330 struct beiscsi_io_task *io_task = task->dd_data;
6733b39a
JK
1331 struct iscsi_conn *conn = beiscsi_conn->conn;
1332
1333 hdr = (struct iscsi_logout_rsp *)task->hdr;
7bd6e25c 1334 hdr->opcode = ISCSI_OP_LOGOUT_RSP;
6733b39a
JK
1335 hdr->t2wait = 5;
1336 hdr->t2retain = 0;
1337 hdr->flags = ((psol->dw[offsetof(struct amap_sol_cqe, i_flags) / 32]
1338 & SOL_FLAGS_MASK) >> 24) | 0x80;
1339 hdr->response = (psol->dw[offsetof(struct amap_sol_cqe, i_resp) /
1340 32] & SOL_RESP_MASK);
1341 hdr->exp_cmdsn = cpu_to_be32(psol->
1342 dw[offsetof(struct amap_sol_cqe, i_exp_cmd_sn) / 32]
1343 & SOL_EXP_CMD_SN_MASK);
1344 hdr->max_cmdsn = be32_to_cpu((psol->
1345 dw[offsetof(struct amap_sol_cqe, i_exp_cmd_sn) / 32]
1346 & SOL_EXP_CMD_SN_MASK) +
1347 ((psol->dw[offsetof(struct amap_sol_cqe, i_cmd_wnd)
1348 / 32] & SOL_CMD_WND_MASK) >> 24) - 1);
7bd6e25c
JK
1349 hdr->dlength[0] = 0;
1350 hdr->dlength[1] = 0;
1351 hdr->dlength[2] = 0;
6733b39a 1352 hdr->hlength = 0;
bfead3b2 1353 hdr->itt = io_task->libiscsi_itt;
6733b39a
JK
1354 __iscsi_complete_pdu(conn, (struct iscsi_hdr *)hdr, NULL, 0);
1355}
1356
1357static void
1358be_complete_tmf(struct beiscsi_conn *beiscsi_conn,
1359 struct iscsi_task *task, struct sol_cqe *psol)
1360{
1361 struct iscsi_tm_rsp *hdr;
1362 struct iscsi_conn *conn = beiscsi_conn->conn;
bfead3b2 1363 struct beiscsi_io_task *io_task = task->dd_data;
6733b39a
JK
1364
1365 hdr = (struct iscsi_tm_rsp *)task->hdr;
7bd6e25c 1366 hdr->opcode = ISCSI_OP_SCSI_TMFUNC_RSP;
6733b39a
JK
1367 hdr->flags = ((psol->dw[offsetof(struct amap_sol_cqe, i_flags) / 32]
1368 & SOL_FLAGS_MASK) >> 24) | 0x80;
1369 hdr->response = (psol->dw[offsetof(struct amap_sol_cqe, i_resp) /
1370 32] & SOL_RESP_MASK);
1371 hdr->exp_cmdsn = cpu_to_be32(psol->dw[offsetof(struct amap_sol_cqe,
bfead3b2 1372 i_exp_cmd_sn) / 32] & SOL_EXP_CMD_SN_MASK);
6733b39a
JK
1373 hdr->max_cmdsn = be32_to_cpu((psol->dw[offsetof(struct amap_sol_cqe,
1374 i_exp_cmd_sn) / 32] & SOL_EXP_CMD_SN_MASK) +
1375 ((psol->dw[offsetof(struct amap_sol_cqe, i_cmd_wnd)
1376 / 32] & SOL_CMD_WND_MASK) >> 24) - 1);
bfead3b2 1377 hdr->itt = io_task->libiscsi_itt;
6733b39a
JK
1378 __iscsi_complete_pdu(conn, (struct iscsi_hdr *)hdr, NULL, 0);
1379}
1380
1381static void
1382hwi_complete_drvr_msgs(struct beiscsi_conn *beiscsi_conn,
1383 struct beiscsi_hba *phba, struct sol_cqe *psol)
1384{
1385 struct hwi_wrb_context *pwrb_context;
bfead3b2 1386 struct wrb_handle *pwrb_handle = NULL;
6733b39a 1387 struct hwi_controller *phwi_ctrlr;
bfead3b2
JK
1388 struct iscsi_task *task;
1389 struct beiscsi_io_task *io_task;
6733b39a
JK
1390 struct iscsi_conn *conn = beiscsi_conn->conn;
1391 struct iscsi_session *session = conn->session;
1392
1393 phwi_ctrlr = phba->phwi_ctrlr;
32951dd8 1394 pwrb_context = &phwi_ctrlr->wrb_context[((psol->
35e66019 1395 dw[offsetof(struct amap_sol_cqe, cid) / 32] &
7da50879
JK
1396 SOL_CID_MASK) >> 6) -
1397 phba->fw_config.iscsi_cid_start];
32951dd8 1398 pwrb_handle = pwrb_context->pwrb_handle_basestd[((psol->
35e66019
JK
1399 dw[offsetof(struct amap_sol_cqe, wrb_index) /
1400 32] & SOL_WRB_INDEX_MASK) >> 16)];
32951dd8 1401 task = pwrb_handle->pio_handle;
35e66019 1402
bfead3b2 1403 io_task = task->dd_data;
1282ab76 1404 spin_lock_bh(&phba->mgmt_sgl_lock);
bfead3b2 1405 free_mgmt_sgl_handle(phba, io_task->psgl_handle);
1282ab76 1406 spin_unlock_bh(&phba->mgmt_sgl_lock);
6733b39a
JK
1407 spin_lock_bh(&session->lock);
1408 free_wrb_handle(phba, pwrb_context, pwrb_handle);
1409 spin_unlock_bh(&session->lock);
1410}
1411
1412static void
1413be_complete_nopin_resp(struct beiscsi_conn *beiscsi_conn,
1414 struct iscsi_task *task, struct sol_cqe *psol)
1415{
1416 struct iscsi_nopin *hdr;
1417 struct iscsi_conn *conn = beiscsi_conn->conn;
bfead3b2 1418 struct beiscsi_io_task *io_task = task->dd_data;
6733b39a
JK
1419
1420 hdr = (struct iscsi_nopin *)task->hdr;
1421 hdr->flags = ((psol->dw[offsetof(struct amap_sol_cqe, i_flags) / 32]
1422 & SOL_FLAGS_MASK) >> 24) | 0x80;
1423 hdr->exp_cmdsn = cpu_to_be32(psol->dw[offsetof(struct amap_sol_cqe,
1424 i_exp_cmd_sn) / 32] & SOL_EXP_CMD_SN_MASK);
1425 hdr->max_cmdsn = be32_to_cpu((psol->dw[offsetof(struct amap_sol_cqe,
1426 i_exp_cmd_sn) / 32] & SOL_EXP_CMD_SN_MASK) +
1427 ((psol->dw[offsetof(struct amap_sol_cqe, i_cmd_wnd)
1428 / 32] & SOL_CMD_WND_MASK) >> 24) - 1);
1429 hdr->opcode = ISCSI_OP_NOOP_IN;
bfead3b2 1430 hdr->itt = io_task->libiscsi_itt;
6733b39a
JK
1431 __iscsi_complete_pdu(conn, (struct iscsi_hdr *)hdr, NULL, 0);
1432}
1433
1434static void hwi_complete_cmd(struct beiscsi_conn *beiscsi_conn,
1435 struct beiscsi_hba *phba, struct sol_cqe *psol)
1436{
1437 struct hwi_wrb_context *pwrb_context;
1438 struct wrb_handle *pwrb_handle;
1439 struct iscsi_wrb *pwrb = NULL;
1440 struct hwi_controller *phwi_ctrlr;
1441 struct iscsi_task *task;
bfead3b2 1442 unsigned int type;
6733b39a
JK
1443 struct iscsi_conn *conn = beiscsi_conn->conn;
1444 struct iscsi_session *session = conn->session;
1445
1446 phwi_ctrlr = phba->phwi_ctrlr;
32951dd8 1447 pwrb_context = &phwi_ctrlr->wrb_context[((psol->dw[offsetof
35e66019 1448 (struct amap_sol_cqe, cid) / 32]
7da50879
JK
1449 & SOL_CID_MASK) >> 6) -
1450 phba->fw_config.iscsi_cid_start];
32951dd8 1451 pwrb_handle = pwrb_context->pwrb_handle_basestd[((psol->
35e66019
JK
1452 dw[offsetof(struct amap_sol_cqe, wrb_index) /
1453 32] & SOL_WRB_INDEX_MASK) >> 16)];
32951dd8
JK
1454 task = pwrb_handle->pio_handle;
1455 pwrb = pwrb_handle->pwrb;
1456 type = (pwrb->dw[offsetof(struct amap_iscsi_wrb, type) / 32] &
1457 WRB_TYPE_MASK) >> 28;
1458
bfead3b2
JK
1459 spin_lock_bh(&session->lock);
1460 switch (type) {
6733b39a
JK
1461 case HWH_TYPE_IO:
1462 case HWH_TYPE_IO_RD:
1463 if ((task->hdr->opcode & ISCSI_OPCODE_MASK) ==
dafab8e0 1464 ISCSI_OP_NOOP_OUT)
6733b39a 1465 be_complete_nopin_resp(beiscsi_conn, task, psol);
dafab8e0 1466 else
6733b39a
JK
1467 be_complete_io(beiscsi_conn, task, psol);
1468 break;
1469
1470 case HWH_TYPE_LOGOUT:
dafab8e0
JK
1471 if ((task->hdr->opcode & ISCSI_OPCODE_MASK) == ISCSI_OP_LOGOUT)
1472 be_complete_logout(beiscsi_conn, task, psol);
1473 else
1474 be_complete_tmf(beiscsi_conn, task, psol);
1475
6733b39a
JK
1476 break;
1477
1478 case HWH_TYPE_LOGIN:
99bc5d55
JSJ
1479 beiscsi_log(phba, KERN_ERR,
1480 BEISCSI_LOG_CONFIG | BEISCSI_LOG_IO,
1481 "BM_%d :\t\t No HWH_TYPE_LOGIN Expected in"
1482 " hwi_complete_cmd- Solicited path\n");
6733b39a
JK
1483 break;
1484
6733b39a
JK
1485 case HWH_TYPE_NOP:
1486 be_complete_nopin_resp(beiscsi_conn, task, psol);
1487 break;
1488
1489 default:
99bc5d55
JSJ
1490 beiscsi_log(phba, KERN_WARNING,
1491 BEISCSI_LOG_CONFIG | BEISCSI_LOG_IO,
1492 "BM_%d : In hwi_complete_cmd, unknown type = %d"
1493 "wrb_index 0x%x CID 0x%x\n", type,
1494 ((psol->dw[offsetof(struct amap_iscsi_wrb,
1495 type) / 32] & SOL_WRB_INDEX_MASK) >> 16),
1496 ((psol->dw[offsetof(struct amap_sol_cqe,
1497 cid) / 32] & SOL_CID_MASK) >> 6));
6733b39a
JK
1498 break;
1499 }
35e66019 1500
6733b39a
JK
1501 spin_unlock_bh(&session->lock);
1502}
1503
1504static struct list_head *hwi_get_async_busy_list(struct hwi_async_pdu_context
1505 *pasync_ctx, unsigned int is_header,
1506 unsigned int host_write_ptr)
1507{
1508 if (is_header)
1509 return &pasync_ctx->async_entry[host_write_ptr].
1510 header_busy_list;
1511 else
1512 return &pasync_ctx->async_entry[host_write_ptr].data_busy_list;
1513}
1514
1515static struct async_pdu_handle *
1516hwi_get_async_handle(struct beiscsi_hba *phba,
1517 struct beiscsi_conn *beiscsi_conn,
1518 struct hwi_async_pdu_context *pasync_ctx,
1519 struct i_t_dpdu_cqe *pdpdu_cqe, unsigned int *pcq_index)
1520{
1521 struct be_bus_address phys_addr;
1522 struct list_head *pbusy_list;
1523 struct async_pdu_handle *pasync_handle = NULL;
6733b39a
JK
1524 unsigned char is_header = 0;
1525
1526 phys_addr.u.a32.address_lo =
1527 pdpdu_cqe->dw[offsetof(struct amap_i_t_dpdu_cqe, db_addr_lo) / 32] -
1528 ((pdpdu_cqe->dw[offsetof(struct amap_i_t_dpdu_cqe, dpl) / 32]
1529 & PDUCQE_DPL_MASK) >> 16);
1530 phys_addr.u.a32.address_hi =
1531 pdpdu_cqe->dw[offsetof(struct amap_i_t_dpdu_cqe, db_addr_hi) / 32];
1532
1533 phys_addr.u.a64.address =
1534 *((unsigned long long *)(&phys_addr.u.a64.address));
1535
1536 switch (pdpdu_cqe->dw[offsetof(struct amap_i_t_dpdu_cqe, code) / 32]
1537 & PDUCQE_CODE_MASK) {
1538 case UNSOL_HDR_NOTIFY:
1539 is_header = 1;
1540
1541 pbusy_list = hwi_get_async_busy_list(pasync_ctx, 1,
1542 (pdpdu_cqe->dw[offsetof(struct amap_i_t_dpdu_cqe,
1543 index) / 32] & PDUCQE_INDEX_MASK));
6733b39a
JK
1544 break;
1545 case UNSOL_DATA_NOTIFY:
1546 pbusy_list = hwi_get_async_busy_list(pasync_ctx, 0, (pdpdu_cqe->
1547 dw[offsetof(struct amap_i_t_dpdu_cqe,
1548 index) / 32] & PDUCQE_INDEX_MASK));
6733b39a
JK
1549 break;
1550 default:
1551 pbusy_list = NULL;
99bc5d55
JSJ
1552 beiscsi_log(phba, KERN_WARNING,
1553 BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG,
1554 "BM_%d : Unexpected code=%d\n",
1555 pdpdu_cqe->dw[offsetof(struct amap_i_t_dpdu_cqe,
1556 code) / 32] & PDUCQE_CODE_MASK);
6733b39a
JK
1557 return NULL;
1558 }
1559
6733b39a
JK
1560 WARN_ON(list_empty(pbusy_list));
1561 list_for_each_entry(pasync_handle, pbusy_list, link) {
dc63aac6 1562 if (pasync_handle->pa.u.a64.address == phys_addr.u.a64.address)
6733b39a
JK
1563 break;
1564 }
1565
1566 WARN_ON(!pasync_handle);
1567
7da50879
JK
1568 pasync_handle->cri = (unsigned short)beiscsi_conn->beiscsi_conn_cid -
1569 phba->fw_config.iscsi_cid_start;
6733b39a
JK
1570 pasync_handle->is_header = is_header;
1571 pasync_handle->buffer_len = ((pdpdu_cqe->
1572 dw[offsetof(struct amap_i_t_dpdu_cqe, dpl) / 32]
1573 & PDUCQE_DPL_MASK) >> 16);
1574
1575 *pcq_index = (pdpdu_cqe->dw[offsetof(struct amap_i_t_dpdu_cqe,
1576 index) / 32] & PDUCQE_INDEX_MASK);
1577 return pasync_handle;
1578}
1579
1580static unsigned int
99bc5d55
JSJ
1581hwi_update_async_writables(struct beiscsi_hba *phba,
1582 struct hwi_async_pdu_context *pasync_ctx,
1583 unsigned int is_header, unsigned int cq_index)
6733b39a
JK
1584{
1585 struct list_head *pbusy_list;
1586 struct async_pdu_handle *pasync_handle;
1587 unsigned int num_entries, writables = 0;
1588 unsigned int *pep_read_ptr, *pwritables;
1589
dc63aac6 1590 num_entries = pasync_ctx->num_entries;
6733b39a
JK
1591 if (is_header) {
1592 pep_read_ptr = &pasync_ctx->async_header.ep_read_ptr;
1593 pwritables = &pasync_ctx->async_header.writables;
6733b39a
JK
1594 } else {
1595 pep_read_ptr = &pasync_ctx->async_data.ep_read_ptr;
1596 pwritables = &pasync_ctx->async_data.writables;
6733b39a
JK
1597 }
1598
1599 while ((*pep_read_ptr) != cq_index) {
1600 (*pep_read_ptr)++;
1601 *pep_read_ptr = (*pep_read_ptr) % num_entries;
1602
1603 pbusy_list = hwi_get_async_busy_list(pasync_ctx, is_header,
1604 *pep_read_ptr);
1605 if (writables == 0)
1606 WARN_ON(list_empty(pbusy_list));
1607
1608 if (!list_empty(pbusy_list)) {
1609 pasync_handle = list_entry(pbusy_list->next,
1610 struct async_pdu_handle,
1611 link);
1612 WARN_ON(!pasync_handle);
1613 pasync_handle->consumed = 1;
1614 }
1615
1616 writables++;
1617 }
1618
1619 if (!writables) {
99bc5d55
JSJ
1620 beiscsi_log(phba, KERN_ERR,
1621 BEISCSI_LOG_CONFIG | BEISCSI_LOG_IO,
1622 "BM_%d : Duplicate notification received - index 0x%x!!\n",
1623 cq_index);
6733b39a
JK
1624 WARN_ON(1);
1625 }
1626
1627 *pwritables = *pwritables + writables;
1628 return 0;
1629}
1630
9728d8d0 1631static void hwi_free_async_msg(struct beiscsi_hba *phba,
6733b39a
JK
1632 unsigned int cri)
1633{
1634 struct hwi_controller *phwi_ctrlr;
1635 struct hwi_async_pdu_context *pasync_ctx;
1636 struct async_pdu_handle *pasync_handle, *tmp_handle;
1637 struct list_head *plist;
6733b39a
JK
1638
1639 phwi_ctrlr = phba->phwi_ctrlr;
1640 pasync_ctx = HWI_GET_ASYNC_PDU_CTX(phwi_ctrlr);
1641
1642 plist = &pasync_ctx->async_entry[cri].wait_queue.list;
1643
1644 list_for_each_entry_safe(pasync_handle, tmp_handle, plist, link) {
1645 list_del(&pasync_handle->link);
1646
9728d8d0 1647 if (pasync_handle->is_header) {
6733b39a
JK
1648 list_add_tail(&pasync_handle->link,
1649 &pasync_ctx->async_header.free_list);
1650 pasync_ctx->async_header.free_entries++;
6733b39a
JK
1651 } else {
1652 list_add_tail(&pasync_handle->link,
1653 &pasync_ctx->async_data.free_list);
1654 pasync_ctx->async_data.free_entries++;
6733b39a
JK
1655 }
1656 }
1657
1658 INIT_LIST_HEAD(&pasync_ctx->async_entry[cri].wait_queue.list);
1659 pasync_ctx->async_entry[cri].wait_queue.hdr_received = 0;
1660 pasync_ctx->async_entry[cri].wait_queue.bytes_received = 0;
6733b39a
JK
1661}
1662
1663static struct phys_addr *
1664hwi_get_ring_address(struct hwi_async_pdu_context *pasync_ctx,
1665 unsigned int is_header, unsigned int host_write_ptr)
1666{
1667 struct phys_addr *pasync_sge = NULL;
1668
1669 if (is_header)
1670 pasync_sge = pasync_ctx->async_header.ring_base;
1671 else
1672 pasync_sge = pasync_ctx->async_data.ring_base;
1673
1674 return pasync_sge + host_write_ptr;
1675}
1676
1677static void hwi_post_async_buffers(struct beiscsi_hba *phba,
1678 unsigned int is_header)
1679{
1680 struct hwi_controller *phwi_ctrlr;
1681 struct hwi_async_pdu_context *pasync_ctx;
1682 struct async_pdu_handle *pasync_handle;
1683 struct list_head *pfree_link, *pbusy_list;
1684 struct phys_addr *pasync_sge;
1685 unsigned int ring_id, num_entries;
1686 unsigned int host_write_num;
1687 unsigned int writables;
1688 unsigned int i = 0;
1689 u32 doorbell = 0;
1690
1691 phwi_ctrlr = phba->phwi_ctrlr;
1692 pasync_ctx = HWI_GET_ASYNC_PDU_CTX(phwi_ctrlr);
dc63aac6 1693 num_entries = pasync_ctx->num_entries;
6733b39a
JK
1694
1695 if (is_header) {
6733b39a
JK
1696 writables = min(pasync_ctx->async_header.writables,
1697 pasync_ctx->async_header.free_entries);
1698 pfree_link = pasync_ctx->async_header.free_list.next;
1699 host_write_num = pasync_ctx->async_header.host_write_ptr;
1700 ring_id = phwi_ctrlr->default_pdu_hdr.id;
1701 } else {
6733b39a
JK
1702 writables = min(pasync_ctx->async_data.writables,
1703 pasync_ctx->async_data.free_entries);
1704 pfree_link = pasync_ctx->async_data.free_list.next;
1705 host_write_num = pasync_ctx->async_data.host_write_ptr;
1706 ring_id = phwi_ctrlr->default_pdu_data.id;
1707 }
1708
1709 writables = (writables / 8) * 8;
1710 if (writables) {
1711 for (i = 0; i < writables; i++) {
1712 pbusy_list =
1713 hwi_get_async_busy_list(pasync_ctx, is_header,
1714 host_write_num);
1715 pasync_handle =
1716 list_entry(pfree_link, struct async_pdu_handle,
1717 link);
1718 WARN_ON(!pasync_handle);
1719 pasync_handle->consumed = 0;
1720
1721 pfree_link = pfree_link->next;
1722
1723 pasync_sge = hwi_get_ring_address(pasync_ctx,
1724 is_header, host_write_num);
1725
1726 pasync_sge->hi = pasync_handle->pa.u.a32.address_lo;
1727 pasync_sge->lo = pasync_handle->pa.u.a32.address_hi;
1728
1729 list_move(&pasync_handle->link, pbusy_list);
1730
1731 host_write_num++;
1732 host_write_num = host_write_num % num_entries;
1733 }
1734
1735 if (is_header) {
1736 pasync_ctx->async_header.host_write_ptr =
1737 host_write_num;
1738 pasync_ctx->async_header.free_entries -= writables;
1739 pasync_ctx->async_header.writables -= writables;
1740 pasync_ctx->async_header.busy_entries += writables;
1741 } else {
1742 pasync_ctx->async_data.host_write_ptr = host_write_num;
1743 pasync_ctx->async_data.free_entries -= writables;
1744 pasync_ctx->async_data.writables -= writables;
1745 pasync_ctx->async_data.busy_entries += writables;
1746 }
1747
1748 doorbell |= ring_id & DB_DEF_PDU_RING_ID_MASK;
1749 doorbell |= 1 << DB_DEF_PDU_REARM_SHIFT;
1750 doorbell |= 0 << DB_DEF_PDU_EVENT_SHIFT;
1751 doorbell |= (writables & DB_DEF_PDU_CQPROC_MASK)
1752 << DB_DEF_PDU_CQPROC_SHIFT;
1753
1754 iowrite32(doorbell, phba->db_va + DB_RXULP0_OFFSET);
1755 }
1756}
1757
1758static void hwi_flush_default_pdu_buffer(struct beiscsi_hba *phba,
1759 struct beiscsi_conn *beiscsi_conn,
1760 struct i_t_dpdu_cqe *pdpdu_cqe)
1761{
1762 struct hwi_controller *phwi_ctrlr;
1763 struct hwi_async_pdu_context *pasync_ctx;
1764 struct async_pdu_handle *pasync_handle = NULL;
1765 unsigned int cq_index = -1;
1766
1767 phwi_ctrlr = phba->phwi_ctrlr;
1768 pasync_ctx = HWI_GET_ASYNC_PDU_CTX(phwi_ctrlr);
1769
1770 pasync_handle = hwi_get_async_handle(phba, beiscsi_conn, pasync_ctx,
1771 pdpdu_cqe, &cq_index);
1772 BUG_ON(pasync_handle->is_header != 0);
1773 if (pasync_handle->consumed == 0)
99bc5d55
JSJ
1774 hwi_update_async_writables(phba, pasync_ctx,
1775 pasync_handle->is_header, cq_index);
6733b39a
JK
1776
1777 hwi_free_async_msg(phba, pasync_handle->cri);
1778 hwi_post_async_buffers(phba, pasync_handle->is_header);
1779}
1780
1781static unsigned int
1782hwi_fwd_async_msg(struct beiscsi_conn *beiscsi_conn,
1783 struct beiscsi_hba *phba,
1784 struct hwi_async_pdu_context *pasync_ctx, unsigned short cri)
1785{
1786 struct list_head *plist;
1787 struct async_pdu_handle *pasync_handle;
1788 void *phdr = NULL;
1789 unsigned int hdr_len = 0, buf_len = 0;
1790 unsigned int status, index = 0, offset = 0;
1791 void *pfirst_buffer = NULL;
1792 unsigned int num_buf = 0;
1793
1794 plist = &pasync_ctx->async_entry[cri].wait_queue.list;
1795
1796 list_for_each_entry(pasync_handle, plist, link) {
1797 if (index == 0) {
1798 phdr = pasync_handle->pbuffer;
1799 hdr_len = pasync_handle->buffer_len;
1800 } else {
1801 buf_len = pasync_handle->buffer_len;
1802 if (!num_buf) {
1803 pfirst_buffer = pasync_handle->pbuffer;
1804 num_buf++;
1805 }
1806 memcpy(pfirst_buffer + offset,
1807 pasync_handle->pbuffer, buf_len);
f2ba02b8 1808 offset += buf_len;
6733b39a
JK
1809 }
1810 index++;
1811 }
1812
1813 status = beiscsi_process_async_pdu(beiscsi_conn, phba,
7da50879
JK
1814 (beiscsi_conn->beiscsi_conn_cid -
1815 phba->fw_config.iscsi_cid_start),
1816 phdr, hdr_len, pfirst_buffer,
f2ba02b8 1817 offset);
6733b39a 1818
605c6cd2 1819 hwi_free_async_msg(phba, cri);
6733b39a
JK
1820 return 0;
1821}
1822
1823static unsigned int
1824hwi_gather_async_pdu(struct beiscsi_conn *beiscsi_conn,
1825 struct beiscsi_hba *phba,
1826 struct async_pdu_handle *pasync_handle)
1827{
1828 struct hwi_async_pdu_context *pasync_ctx;
1829 struct hwi_controller *phwi_ctrlr;
1830 unsigned int bytes_needed = 0, status = 0;
1831 unsigned short cri = pasync_handle->cri;
1832 struct pdu_base *ppdu;
1833
1834 phwi_ctrlr = phba->phwi_ctrlr;
1835 pasync_ctx = HWI_GET_ASYNC_PDU_CTX(phwi_ctrlr);
1836
1837 list_del(&pasync_handle->link);
1838 if (pasync_handle->is_header) {
1839 pasync_ctx->async_header.busy_entries--;
1840 if (pasync_ctx->async_entry[cri].wait_queue.hdr_received) {
1841 hwi_free_async_msg(phba, cri);
1842 BUG();
1843 }
1844
1845 pasync_ctx->async_entry[cri].wait_queue.bytes_received = 0;
1846 pasync_ctx->async_entry[cri].wait_queue.hdr_received = 1;
1847 pasync_ctx->async_entry[cri].wait_queue.hdr_len =
1848 (unsigned short)pasync_handle->buffer_len;
1849 list_add_tail(&pasync_handle->link,
1850 &pasync_ctx->async_entry[cri].wait_queue.list);
1851
1852 ppdu = pasync_handle->pbuffer;
1853 bytes_needed = ((((ppdu->dw[offsetof(struct amap_pdu_base,
1854 data_len_hi) / 32] & PDUBASE_DATALENHI_MASK) << 8) &
1855 0xFFFF0000) | ((be16_to_cpu((ppdu->
1856 dw[offsetof(struct amap_pdu_base, data_len_lo) / 32]
1857 & PDUBASE_DATALENLO_MASK) >> 16)) & 0x0000FFFF));
1858
1859 if (status == 0) {
1860 pasync_ctx->async_entry[cri].wait_queue.bytes_needed =
1861 bytes_needed;
1862
1863 if (bytes_needed == 0)
1864 status = hwi_fwd_async_msg(beiscsi_conn, phba,
1865 pasync_ctx, cri);
1866 }
1867 } else {
1868 pasync_ctx->async_data.busy_entries--;
1869 if (pasync_ctx->async_entry[cri].wait_queue.hdr_received) {
1870 list_add_tail(&pasync_handle->link,
1871 &pasync_ctx->async_entry[cri].wait_queue.
1872 list);
1873 pasync_ctx->async_entry[cri].wait_queue.
1874 bytes_received +=
1875 (unsigned short)pasync_handle->buffer_len;
1876
1877 if (pasync_ctx->async_entry[cri].wait_queue.
1878 bytes_received >=
1879 pasync_ctx->async_entry[cri].wait_queue.
1880 bytes_needed)
1881 status = hwi_fwd_async_msg(beiscsi_conn, phba,
1882 pasync_ctx, cri);
1883 }
1884 }
1885 return status;
1886}
1887
1888static void hwi_process_default_pdu_ring(struct beiscsi_conn *beiscsi_conn,
1889 struct beiscsi_hba *phba,
1890 struct i_t_dpdu_cqe *pdpdu_cqe)
1891{
1892 struct hwi_controller *phwi_ctrlr;
1893 struct hwi_async_pdu_context *pasync_ctx;
1894 struct async_pdu_handle *pasync_handle = NULL;
1895 unsigned int cq_index = -1;
1896
1897 phwi_ctrlr = phba->phwi_ctrlr;
1898 pasync_ctx = HWI_GET_ASYNC_PDU_CTX(phwi_ctrlr);
1899 pasync_handle = hwi_get_async_handle(phba, beiscsi_conn, pasync_ctx,
1900 pdpdu_cqe, &cq_index);
1901
1902 if (pasync_handle->consumed == 0)
99bc5d55
JSJ
1903 hwi_update_async_writables(phba, pasync_ctx,
1904 pasync_handle->is_header, cq_index);
1905
6733b39a
JK
1906 hwi_gather_async_pdu(beiscsi_conn, phba, pasync_handle);
1907 hwi_post_async_buffers(phba, pasync_handle->is_header);
1908}
1909
756d29c8
JK
1910static void beiscsi_process_mcc_isr(struct beiscsi_hba *phba)
1911{
1912 struct be_queue_info *mcc_cq;
1913 struct be_mcc_compl *mcc_compl;
1914 unsigned int num_processed = 0;
1915
1916 mcc_cq = &phba->ctrl.mcc_obj.cq;
1917 mcc_compl = queue_tail_node(mcc_cq);
1918 mcc_compl->flags = le32_to_cpu(mcc_compl->flags);
1919 while (mcc_compl->flags & CQE_FLAGS_VALID_MASK) {
1920
1921 if (num_processed >= 32) {
1922 hwi_ring_cq_db(phba, mcc_cq->id,
1923 num_processed, 0, 0);
1924 num_processed = 0;
1925 }
1926 if (mcc_compl->flags & CQE_FLAGS_ASYNC_MASK) {
1927 /* Interpret flags as an async trailer */
1928 if (is_link_state_evt(mcc_compl->flags))
1929 /* Interpret compl as a async link evt */
1930 beiscsi_async_link_state_process(phba,
1931 (struct be_async_event_link_state *) mcc_compl);
1932 else
99bc5d55
JSJ
1933 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_MBOX,
1934 "BM_%d : Unsupported Async Event, flags"
1935 " = 0x%08x\n",
1936 mcc_compl->flags);
756d29c8
JK
1937 } else if (mcc_compl->flags & CQE_FLAGS_COMPLETED_MASK) {
1938 be_mcc_compl_process_isr(&phba->ctrl, mcc_compl);
1939 atomic_dec(&phba->ctrl.mcc_obj.q.used);
1940 }
1941
1942 mcc_compl->flags = 0;
1943 queue_tail_inc(mcc_cq);
1944 mcc_compl = queue_tail_node(mcc_cq);
1945 mcc_compl->flags = le32_to_cpu(mcc_compl->flags);
1946 num_processed++;
1947 }
1948
1949 if (num_processed > 0)
1950 hwi_ring_cq_db(phba, mcc_cq->id, num_processed, 1, 0);
1951
1952}
bfead3b2 1953
6763daae
JSJ
1954/**
1955 * beiscsi_process_cq()- Process the Completion Queue
1956 * @pbe_eq: Event Q on which the Completion has come
1957 *
1958 * return
1959 * Number of Completion Entries processed.
1960 **/
bfead3b2 1961static unsigned int beiscsi_process_cq(struct be_eq_obj *pbe_eq)
6733b39a 1962{
6733b39a
JK
1963 struct be_queue_info *cq;
1964 struct sol_cqe *sol;
1965 struct dmsg_cqe *dmsg;
1966 unsigned int num_processed = 0;
1967 unsigned int tot_nump = 0;
0a513dd8 1968 unsigned short code = 0, cid = 0;
6733b39a 1969 struct beiscsi_conn *beiscsi_conn;
c2462288
JK
1970 struct beiscsi_endpoint *beiscsi_ep;
1971 struct iscsi_endpoint *ep;
bfead3b2 1972 struct beiscsi_hba *phba;
6733b39a 1973
bfead3b2 1974 cq = pbe_eq->cq;
6733b39a 1975 sol = queue_tail_node(cq);
bfead3b2 1976 phba = pbe_eq->phba;
6733b39a
JK
1977
1978 while (sol->dw[offsetof(struct amap_sol_cqe, valid) / 32] &
1979 CQE_VALID_MASK) {
1980 be_dws_le_to_cpu(sol, sizeof(struct sol_cqe));
1981
0a513dd8
JSJ
1982 cid = ((sol->dw[offsetof(struct amap_sol_cqe, cid)/32] &
1983 CQE_CID_MASK) >> 6);
1984 code = (sol->dw[offsetof(struct amap_sol_cqe, code)/32] &
1985 CQE_CODE_MASK);
1986 ep = phba->ep_array[cid - phba->fw_config.iscsi_cid_start];
32951dd8 1987
c2462288
JK
1988 beiscsi_ep = ep->dd_data;
1989 beiscsi_conn = beiscsi_ep->conn;
756d29c8 1990
6733b39a 1991 if (num_processed >= 32) {
bfead3b2 1992 hwi_ring_cq_db(phba, cq->id,
6733b39a
JK
1993 num_processed, 0, 0);
1994 tot_nump += num_processed;
1995 num_processed = 0;
1996 }
1997
0a513dd8 1998 switch (code) {
6733b39a
JK
1999 case SOL_CMD_COMPLETE:
2000 hwi_complete_cmd(beiscsi_conn, phba, sol);
2001 break;
2002 case DRIVERMSG_NOTIFY:
99bc5d55
JSJ
2003 beiscsi_log(phba, KERN_INFO,
2004 BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG,
6763daae
JSJ
2005 "BM_%d : Received %s[%d] on CID : %d\n",
2006 cqe_desc[code], code, cid);
99bc5d55 2007
6733b39a
JK
2008 dmsg = (struct dmsg_cqe *)sol;
2009 hwi_complete_drvr_msgs(beiscsi_conn, phba, sol);
2010 break;
2011 case UNSOL_HDR_NOTIFY:
99bc5d55
JSJ
2012 beiscsi_log(phba, KERN_INFO,
2013 BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG,
6763daae
JSJ
2014 "BM_%d : Received %s[%d] on CID : %d\n",
2015 cqe_desc[code], code, cid);
99bc5d55 2016
bfead3b2
JK
2017 hwi_process_default_pdu_ring(beiscsi_conn, phba,
2018 (struct i_t_dpdu_cqe *)sol);
2019 break;
6733b39a 2020 case UNSOL_DATA_NOTIFY:
99bc5d55
JSJ
2021 beiscsi_log(phba, KERN_INFO,
2022 BEISCSI_LOG_CONFIG | BEISCSI_LOG_IO,
6763daae
JSJ
2023 "BM_%d : Received %s[%d] on CID : %d\n",
2024 cqe_desc[code], code, cid);
99bc5d55 2025
6733b39a
JK
2026 hwi_process_default_pdu_ring(beiscsi_conn, phba,
2027 (struct i_t_dpdu_cqe *)sol);
2028 break;
2029 case CXN_INVALIDATE_INDEX_NOTIFY:
2030 case CMD_INVALIDATED_NOTIFY:
2031 case CXN_INVALIDATE_NOTIFY:
99bc5d55
JSJ
2032 beiscsi_log(phba, KERN_ERR,
2033 BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG,
6763daae
JSJ
2034 "BM_%d : Ignoring %s[%d] on CID : %d\n",
2035 cqe_desc[code], code, cid);
6733b39a
JK
2036 break;
2037 case SOL_CMD_KILLED_DATA_DIGEST_ERR:
2038 case CMD_KILLED_INVALID_STATSN_RCVD:
2039 case CMD_KILLED_INVALID_R2T_RCVD:
2040 case CMD_CXN_KILLED_LUN_INVALID:
2041 case CMD_CXN_KILLED_ICD_INVALID:
2042 case CMD_CXN_KILLED_ITT_INVALID:
2043 case CMD_CXN_KILLED_SEQ_OUTOFORDER:
2044 case CMD_CXN_KILLED_INVALID_DATASN_RCVD:
99bc5d55
JSJ
2045 beiscsi_log(phba, KERN_ERR,
2046 BEISCSI_LOG_CONFIG | BEISCSI_LOG_IO,
6763daae
JSJ
2047 "BM_%d : Cmd Notification %s[%d] on CID : %d\n",
2048 cqe_desc[code], code, cid);
6733b39a
JK
2049 break;
2050 case UNSOL_DATA_DIGEST_ERROR_NOTIFY:
99bc5d55
JSJ
2051 beiscsi_log(phba, KERN_ERR,
2052 BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG,
6763daae
JSJ
2053 "BM_%d : Dropping %s[%d] on DPDU ring on CID : %d\n",
2054 cqe_desc[code], code, cid);
6733b39a
JK
2055 hwi_flush_default_pdu_buffer(phba, beiscsi_conn,
2056 (struct i_t_dpdu_cqe *) sol);
2057 break;
2058 case CXN_KILLED_PDU_SIZE_EXCEEDS_DSL:
2059 case CXN_KILLED_BURST_LEN_MISMATCH:
2060 case CXN_KILLED_AHS_RCVD:
2061 case CXN_KILLED_HDR_DIGEST_ERR:
2062 case CXN_KILLED_UNKNOWN_HDR:
2063 case CXN_KILLED_STALE_ITT_TTT_RCVD:
2064 case CXN_KILLED_INVALID_ITT_TTT_RCVD:
2065 case CXN_KILLED_TIMED_OUT:
2066 case CXN_KILLED_FIN_RCVD:
6763daae
JSJ
2067 case CXN_KILLED_RST_SENT:
2068 case CXN_KILLED_RST_RCVD:
6733b39a
JK
2069 case CXN_KILLED_BAD_UNSOL_PDU_RCVD:
2070 case CXN_KILLED_BAD_WRB_INDEX_ERROR:
2071 case CXN_KILLED_OVER_RUN_RESIDUAL:
2072 case CXN_KILLED_UNDER_RUN_RESIDUAL:
2073 case CXN_KILLED_CMND_DATA_NOT_ON_SAME_CONN:
99bc5d55
JSJ
2074 beiscsi_log(phba, KERN_ERR,
2075 BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG,
6763daae
JSJ
2076 "BM_%d : Event %s[%d] received on CID : %d\n",
2077 cqe_desc[code], code, cid);
0a513dd8
JSJ
2078 if (beiscsi_conn)
2079 iscsi_conn_failure(beiscsi_conn->conn,
2080 ISCSI_ERR_CONN_FAILED);
6733b39a
JK
2081 break;
2082 default:
99bc5d55
JSJ
2083 beiscsi_log(phba, KERN_ERR,
2084 BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG,
6763daae
JSJ
2085 "BM_%d : Invalid CQE Event Received Code : %d"
2086 "CID 0x%x...\n",
0a513dd8 2087 code, cid);
6733b39a
JK
2088 break;
2089 }
2090
2091 AMAP_SET_BITS(struct amap_sol_cqe, valid, sol, 0);
2092 queue_tail_inc(cq);
2093 sol = queue_tail_node(cq);
2094 num_processed++;
2095 }
2096
2097 if (num_processed > 0) {
2098 tot_nump += num_processed;
bfead3b2 2099 hwi_ring_cq_db(phba, cq->id, num_processed, 1, 0);
6733b39a
JK
2100 }
2101 return tot_nump;
2102}
2103
756d29c8 2104void beiscsi_process_all_cqs(struct work_struct *work)
6733b39a
JK
2105{
2106 unsigned long flags;
bfead3b2
JK
2107 struct hwi_controller *phwi_ctrlr;
2108 struct hwi_context_memory *phwi_context;
72fb46a9
JSJ
2109 struct beiscsi_hba *phba;
2110 struct be_eq_obj *pbe_eq =
2111 container_of(work, struct be_eq_obj, work_cqs);
6733b39a 2112
72fb46a9 2113 phba = pbe_eq->phba;
bfead3b2
JK
2114 phwi_ctrlr = phba->phwi_ctrlr;
2115 phwi_context = phwi_ctrlr->phwi_ctxt;
bfead3b2 2116
72fb46a9 2117 if (pbe_eq->todo_mcc_cq) {
6733b39a 2118 spin_lock_irqsave(&phba->isr_lock, flags);
72fb46a9 2119 pbe_eq->todo_mcc_cq = false;
6733b39a 2120 spin_unlock_irqrestore(&phba->isr_lock, flags);
756d29c8 2121 beiscsi_process_mcc_isr(phba);
6733b39a
JK
2122 }
2123
72fb46a9 2124 if (pbe_eq->todo_cq) {
6733b39a 2125 spin_lock_irqsave(&phba->isr_lock, flags);
72fb46a9 2126 pbe_eq->todo_cq = false;
6733b39a 2127 spin_unlock_irqrestore(&phba->isr_lock, flags);
bfead3b2 2128 beiscsi_process_cq(pbe_eq);
6733b39a 2129 }
72fb46a9
JSJ
2130
2131 /* rearm EQ for further interrupts */
2132 hwi_ring_eq_db(phba, pbe_eq->q.id, 0, 0, 1, 1);
6733b39a
JK
2133}
2134
2135static int be_iopoll(struct blk_iopoll *iop, int budget)
2136{
2137 static unsigned int ret;
2138 struct beiscsi_hba *phba;
bfead3b2 2139 struct be_eq_obj *pbe_eq;
6733b39a 2140
bfead3b2
JK
2141 pbe_eq = container_of(iop, struct be_eq_obj, iopoll);
2142 ret = beiscsi_process_cq(pbe_eq);
6733b39a 2143 if (ret < budget) {
bfead3b2 2144 phba = pbe_eq->phba;
6733b39a 2145 blk_iopoll_complete(iop);
99bc5d55
JSJ
2146 beiscsi_log(phba, KERN_INFO,
2147 BEISCSI_LOG_CONFIG | BEISCSI_LOG_IO,
2148 "BM_%d : rearm pbe_eq->q.id =%d\n",
2149 pbe_eq->q.id);
bfead3b2 2150 hwi_ring_eq_db(phba, pbe_eq->q.id, 0, 0, 1, 1);
6733b39a
JK
2151 }
2152 return ret;
2153}
2154
2155static void
2156hwi_write_sgl(struct iscsi_wrb *pwrb, struct scatterlist *sg,
2157 unsigned int num_sg, struct beiscsi_io_task *io_task)
2158{
2159 struct iscsi_sge *psgl;
58ff4bd0 2160 unsigned int sg_len, index;
6733b39a
JK
2161 unsigned int sge_len = 0;
2162 unsigned long long addr;
2163 struct scatterlist *l_sg;
2164 unsigned int offset;
2165
2166 AMAP_SET_BITS(struct amap_iscsi_wrb, iscsi_bhs_addr_lo, pwrb,
2167 io_task->bhs_pa.u.a32.address_lo);
2168 AMAP_SET_BITS(struct amap_iscsi_wrb, iscsi_bhs_addr_hi, pwrb,
2169 io_task->bhs_pa.u.a32.address_hi);
2170
2171 l_sg = sg;
48bd86cf
JK
2172 for (index = 0; (index < num_sg) && (index < 2); index++,
2173 sg = sg_next(sg)) {
6733b39a
JK
2174 if (index == 0) {
2175 sg_len = sg_dma_len(sg);
2176 addr = (u64) sg_dma_address(sg);
2177 AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_addr_lo, pwrb,
457ff3b7 2178 ((u32)(addr & 0xFFFFFFFF)));
6733b39a 2179 AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_addr_hi, pwrb,
457ff3b7 2180 ((u32)(addr >> 32)));
6733b39a
JK
2181 AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_len, pwrb,
2182 sg_len);
2183 sge_len = sg_len;
6733b39a 2184 } else {
6733b39a
JK
2185 AMAP_SET_BITS(struct amap_iscsi_wrb, sge1_r2t_offset,
2186 pwrb, sge_len);
2187 sg_len = sg_dma_len(sg);
2188 addr = (u64) sg_dma_address(sg);
2189 AMAP_SET_BITS(struct amap_iscsi_wrb, sge1_addr_lo, pwrb,
457ff3b7 2190 ((u32)(addr & 0xFFFFFFFF)));
6733b39a 2191 AMAP_SET_BITS(struct amap_iscsi_wrb, sge1_addr_hi, pwrb,
457ff3b7 2192 ((u32)(addr >> 32)));
6733b39a
JK
2193 AMAP_SET_BITS(struct amap_iscsi_wrb, sge1_len, pwrb,
2194 sg_len);
2195 }
2196 }
2197 psgl = (struct iscsi_sge *)io_task->psgl_handle->pfrag;
2198 memset(psgl, 0, sizeof(*psgl) * BE2_SGE);
2199
2200 AMAP_SET_BITS(struct amap_iscsi_sge, len, psgl, io_task->bhs_len - 2);
2201
2202 AMAP_SET_BITS(struct amap_iscsi_sge, addr_hi, psgl,
2203 io_task->bhs_pa.u.a32.address_hi);
2204 AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, psgl,
2205 io_task->bhs_pa.u.a32.address_lo);
2206
caf818f1
JK
2207 if (num_sg == 1) {
2208 AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_last, pwrb,
2209 1);
2210 AMAP_SET_BITS(struct amap_iscsi_wrb, sge1_last, pwrb,
2211 0);
2212 } else if (num_sg == 2) {
2213 AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_last, pwrb,
2214 0);
2215 AMAP_SET_BITS(struct amap_iscsi_wrb, sge1_last, pwrb,
2216 1);
2217 } else {
2218 AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_last, pwrb,
2219 0);
2220 AMAP_SET_BITS(struct amap_iscsi_wrb, sge1_last, pwrb,
2221 0);
2222 }
6733b39a
JK
2223 sg = l_sg;
2224 psgl++;
2225 psgl++;
2226 offset = 0;
48bd86cf 2227 for (index = 0; index < num_sg; index++, sg = sg_next(sg), psgl++) {
6733b39a
JK
2228 sg_len = sg_dma_len(sg);
2229 addr = (u64) sg_dma_address(sg);
2230 AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, psgl,
2231 (addr & 0xFFFFFFFF));
2232 AMAP_SET_BITS(struct amap_iscsi_sge, addr_hi, psgl,
2233 (addr >> 32));
2234 AMAP_SET_BITS(struct amap_iscsi_sge, len, psgl, sg_len);
2235 AMAP_SET_BITS(struct amap_iscsi_sge, sge_offset, psgl, offset);
2236 AMAP_SET_BITS(struct amap_iscsi_sge, last_sge, psgl, 0);
2237 offset += sg_len;
2238 }
2239 psgl--;
2240 AMAP_SET_BITS(struct amap_iscsi_sge, last_sge, psgl, 1);
2241}
2242
d629c471
JSJ
2243/**
2244 * hwi_write_buffer()- Populate the WRB with task info
2245 * @pwrb: ptr to the WRB entry
2246 * @task: iscsi task which is to be executed
2247 **/
6733b39a
JK
2248static void hwi_write_buffer(struct iscsi_wrb *pwrb, struct iscsi_task *task)
2249{
2250 struct iscsi_sge *psgl;
6733b39a
JK
2251 struct beiscsi_io_task *io_task = task->dd_data;
2252 struct beiscsi_conn *beiscsi_conn = io_task->conn;
2253 struct beiscsi_hba *phba = beiscsi_conn->phba;
2254
2255 io_task->bhs_len = sizeof(struct be_nonio_bhs) - 2;
2256 AMAP_SET_BITS(struct amap_iscsi_wrb, iscsi_bhs_addr_lo, pwrb,
2257 io_task->bhs_pa.u.a32.address_lo);
2258 AMAP_SET_BITS(struct amap_iscsi_wrb, iscsi_bhs_addr_hi, pwrb,
2259 io_task->bhs_pa.u.a32.address_hi);
2260
2261 if (task->data) {
2262 if (task->data_count) {
2263 AMAP_SET_BITS(struct amap_iscsi_wrb, dsp, pwrb, 1);
d629c471
JSJ
2264 io_task->mtask_addr = pci_map_single(phba->pcidev,
2265 task->data,
2266 task->data_count,
2267 PCI_DMA_TODEVICE);
2268
2269 io_task->mtask_data_count = task->data_count;
6733b39a
JK
2270 } else {
2271 AMAP_SET_BITS(struct amap_iscsi_wrb, dsp, pwrb, 0);
d629c471 2272 io_task->mtask_addr = 0;
6733b39a
JK
2273 }
2274 AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_addr_lo, pwrb,
d629c471 2275 lower_32_bits(io_task->mtask_addr));
6733b39a 2276 AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_addr_hi, pwrb,
d629c471 2277 upper_32_bits(io_task->mtask_addr));
6733b39a
JK
2278 AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_len, pwrb,
2279 task->data_count);
2280
2281 AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_last, pwrb, 1);
2282 } else {
2283 AMAP_SET_BITS(struct amap_iscsi_wrb, dsp, pwrb, 0);
d629c471 2284 io_task->mtask_addr = 0;
6733b39a
JK
2285 }
2286
2287 psgl = (struct iscsi_sge *)io_task->psgl_handle->pfrag;
2288
2289 AMAP_SET_BITS(struct amap_iscsi_sge, len, psgl, io_task->bhs_len);
2290
2291 AMAP_SET_BITS(struct amap_iscsi_sge, addr_hi, psgl,
2292 io_task->bhs_pa.u.a32.address_hi);
2293 AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, psgl,
2294 io_task->bhs_pa.u.a32.address_lo);
2295 if (task->data) {
2296 psgl++;
2297 AMAP_SET_BITS(struct amap_iscsi_sge, addr_hi, psgl, 0);
2298 AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, psgl, 0);
2299 AMAP_SET_BITS(struct amap_iscsi_sge, len, psgl, 0);
2300 AMAP_SET_BITS(struct amap_iscsi_sge, sge_offset, psgl, 0);
2301 AMAP_SET_BITS(struct amap_iscsi_sge, rsvd0, psgl, 0);
2302 AMAP_SET_BITS(struct amap_iscsi_sge, last_sge, psgl, 0);
2303
2304 psgl++;
2305 if (task->data) {
2306 AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, psgl,
d629c471 2307 lower_32_bits(io_task->mtask_addr));
6733b39a 2308 AMAP_SET_BITS(struct amap_iscsi_sge, addr_hi, psgl,
d629c471 2309 upper_32_bits(io_task->mtask_addr));
6733b39a
JK
2310 }
2311 AMAP_SET_BITS(struct amap_iscsi_sge, len, psgl, 0x106);
2312 }
2313 AMAP_SET_BITS(struct amap_iscsi_sge, last_sge, psgl, 1);
2314}
2315
2316static void beiscsi_find_mem_req(struct beiscsi_hba *phba)
2317{
bfead3b2 2318 unsigned int num_cq_pages, num_async_pdu_buf_pages;
6733b39a
JK
2319 unsigned int num_async_pdu_data_pages, wrb_sz_per_cxn;
2320 unsigned int num_async_pdu_buf_sgl_pages, num_async_pdu_data_sgl_pages;
2321
2322 num_cq_pages = PAGES_REQUIRED(phba->params.num_cq_entries * \
2323 sizeof(struct sol_cqe));
6733b39a
JK
2324 num_async_pdu_buf_pages =
2325 PAGES_REQUIRED(phba->params.asyncpdus_per_ctrl * \
2326 phba->params.defpdu_hdr_sz);
2327 num_async_pdu_buf_sgl_pages =
2328 PAGES_REQUIRED(phba->params.asyncpdus_per_ctrl * \
2329 sizeof(struct phys_addr));
2330 num_async_pdu_data_pages =
2331 PAGES_REQUIRED(phba->params.asyncpdus_per_ctrl * \
2332 phba->params.defpdu_data_sz);
2333 num_async_pdu_data_sgl_pages =
2334 PAGES_REQUIRED(phba->params.asyncpdus_per_ctrl * \
2335 sizeof(struct phys_addr));
2336
2337 phba->params.hwi_ws_sz = sizeof(struct hwi_controller);
2338
2339 phba->mem_req[ISCSI_MEM_GLOBAL_HEADER] = 2 *
2340 BE_ISCSI_PDU_HEADER_SIZE;
2341 phba->mem_req[HWI_MEM_ADDN_CONTEXT] =
2342 sizeof(struct hwi_context_memory);
2343
6733b39a
JK
2344
2345 phba->mem_req[HWI_MEM_WRB] = sizeof(struct iscsi_wrb)
2346 * (phba->params.wrbs_per_cxn)
2347 * phba->params.cxns_per_ctrl;
2348 wrb_sz_per_cxn = sizeof(struct wrb_handle) *
2349 (phba->params.wrbs_per_cxn);
2350 phba->mem_req[HWI_MEM_WRBH] = roundup_pow_of_two((wrb_sz_per_cxn) *
2351 phba->params.cxns_per_ctrl);
2352
2353 phba->mem_req[HWI_MEM_SGLH] = sizeof(struct sgl_handle) *
2354 phba->params.icds_per_ctrl;
2355 phba->mem_req[HWI_MEM_SGE] = sizeof(struct iscsi_sge) *
2356 phba->params.num_sge_per_io * phba->params.icds_per_ctrl;
2357
2358 phba->mem_req[HWI_MEM_ASYNC_HEADER_BUF] =
2359 num_async_pdu_buf_pages * PAGE_SIZE;
2360 phba->mem_req[HWI_MEM_ASYNC_DATA_BUF] =
2361 num_async_pdu_data_pages * PAGE_SIZE;
2362 phba->mem_req[HWI_MEM_ASYNC_HEADER_RING] =
2363 num_async_pdu_buf_sgl_pages * PAGE_SIZE;
2364 phba->mem_req[HWI_MEM_ASYNC_DATA_RING] =
2365 num_async_pdu_data_sgl_pages * PAGE_SIZE;
2366 phba->mem_req[HWI_MEM_ASYNC_HEADER_HANDLE] =
2367 phba->params.asyncpdus_per_ctrl *
2368 sizeof(struct async_pdu_handle);
2369 phba->mem_req[HWI_MEM_ASYNC_DATA_HANDLE] =
2370 phba->params.asyncpdus_per_ctrl *
2371 sizeof(struct async_pdu_handle);
2372 phba->mem_req[HWI_MEM_ASYNC_PDU_CONTEXT] =
2373 sizeof(struct hwi_async_pdu_context) +
2374 (phba->params.cxns_per_ctrl * sizeof(struct hwi_async_entry));
2375}
2376
2377static int beiscsi_alloc_mem(struct beiscsi_hba *phba)
2378{
2379 struct be_mem_descriptor *mem_descr;
2380 dma_addr_t bus_add;
2381 struct mem_array *mem_arr, *mem_arr_orig;
2382 unsigned int i, j, alloc_size, curr_alloc_size;
2383
3ec78271 2384 phba->phwi_ctrlr = kzalloc(phba->params.hwi_ws_sz, GFP_KERNEL);
6733b39a
JK
2385 if (!phba->phwi_ctrlr)
2386 return -ENOMEM;
2387
2388 phba->init_mem = kcalloc(SE_MEM_MAX, sizeof(*mem_descr),
2389 GFP_KERNEL);
2390 if (!phba->init_mem) {
2391 kfree(phba->phwi_ctrlr);
2392 return -ENOMEM;
2393 }
2394
2395 mem_arr_orig = kmalloc(sizeof(*mem_arr_orig) * BEISCSI_MAX_FRAGS_INIT,
2396 GFP_KERNEL);
2397 if (!mem_arr_orig) {
2398 kfree(phba->init_mem);
2399 kfree(phba->phwi_ctrlr);
2400 return -ENOMEM;
2401 }
2402
2403 mem_descr = phba->init_mem;
2404 for (i = 0; i < SE_MEM_MAX; i++) {
2405 j = 0;
2406 mem_arr = mem_arr_orig;
2407 alloc_size = phba->mem_req[i];
2408 memset(mem_arr, 0, sizeof(struct mem_array) *
2409 BEISCSI_MAX_FRAGS_INIT);
2410 curr_alloc_size = min(be_max_phys_size * 1024, alloc_size);
2411 do {
2412 mem_arr->virtual_address = pci_alloc_consistent(
2413 phba->pcidev,
2414 curr_alloc_size,
2415 &bus_add);
2416 if (!mem_arr->virtual_address) {
2417 if (curr_alloc_size <= BE_MIN_MEM_SIZE)
2418 goto free_mem;
2419 if (curr_alloc_size -
2420 rounddown_pow_of_two(curr_alloc_size))
2421 curr_alloc_size = rounddown_pow_of_two
2422 (curr_alloc_size);
2423 else
2424 curr_alloc_size = curr_alloc_size / 2;
2425 } else {
2426 mem_arr->bus_address.u.
2427 a64.address = (__u64) bus_add;
2428 mem_arr->size = curr_alloc_size;
2429 alloc_size -= curr_alloc_size;
2430 curr_alloc_size = min(be_max_phys_size *
2431 1024, alloc_size);
2432 j++;
2433 mem_arr++;
2434 }
2435 } while (alloc_size);
2436 mem_descr->num_elements = j;
2437 mem_descr->size_in_bytes = phba->mem_req[i];
2438 mem_descr->mem_array = kmalloc(sizeof(*mem_arr) * j,
2439 GFP_KERNEL);
2440 if (!mem_descr->mem_array)
2441 goto free_mem;
2442
2443 memcpy(mem_descr->mem_array, mem_arr_orig,
2444 sizeof(struct mem_array) * j);
2445 mem_descr++;
2446 }
2447 kfree(mem_arr_orig);
2448 return 0;
2449free_mem:
2450 mem_descr->num_elements = j;
2451 while ((i) || (j)) {
2452 for (j = mem_descr->num_elements; j > 0; j--) {
2453 pci_free_consistent(phba->pcidev,
2454 mem_descr->mem_array[j - 1].size,
2455 mem_descr->mem_array[j - 1].
2456 virtual_address,
457ff3b7
JK
2457 (unsigned long)mem_descr->
2458 mem_array[j - 1].
6733b39a
JK
2459 bus_address.u.a64.address);
2460 }
2461 if (i) {
2462 i--;
2463 kfree(mem_descr->mem_array);
2464 mem_descr--;
2465 }
2466 }
2467 kfree(mem_arr_orig);
2468 kfree(phba->init_mem);
2469 kfree(phba->phwi_ctrlr);
2470 return -ENOMEM;
2471}
2472
2473static int beiscsi_get_memory(struct beiscsi_hba *phba)
2474{
2475 beiscsi_find_mem_req(phba);
2476 return beiscsi_alloc_mem(phba);
2477}
2478
2479static void iscsi_init_global_templates(struct beiscsi_hba *phba)
2480{
2481 struct pdu_data_out *pdata_out;
2482 struct pdu_nop_out *pnop_out;
2483 struct be_mem_descriptor *mem_descr;
2484
2485 mem_descr = phba->init_mem;
2486 mem_descr += ISCSI_MEM_GLOBAL_HEADER;
2487 pdata_out =
2488 (struct pdu_data_out *)mem_descr->mem_array[0].virtual_address;
2489 memset(pdata_out, 0, BE_ISCSI_PDU_HEADER_SIZE);
2490
2491 AMAP_SET_BITS(struct amap_pdu_data_out, opcode, pdata_out,
2492 IIOC_SCSI_DATA);
2493
2494 pnop_out =
2495 (struct pdu_nop_out *)((unsigned char *)mem_descr->mem_array[0].
2496 virtual_address + BE_ISCSI_PDU_HEADER_SIZE);
2497
2498 memset(pnop_out, 0, BE_ISCSI_PDU_HEADER_SIZE);
2499 AMAP_SET_BITS(struct amap_pdu_nop_out, ttt, pnop_out, 0xFFFFFFFF);
2500 AMAP_SET_BITS(struct amap_pdu_nop_out, f_bit, pnop_out, 1);
2501 AMAP_SET_BITS(struct amap_pdu_nop_out, i_bit, pnop_out, 0);
2502}
2503
3ec78271 2504static int beiscsi_init_wrb_handle(struct beiscsi_hba *phba)
6733b39a
JK
2505{
2506 struct be_mem_descriptor *mem_descr_wrbh, *mem_descr_wrb;
3ec78271 2507 struct wrb_handle *pwrb_handle = NULL;
6733b39a
JK
2508 struct hwi_controller *phwi_ctrlr;
2509 struct hwi_wrb_context *pwrb_context;
3ec78271
JK
2510 struct iscsi_wrb *pwrb = NULL;
2511 unsigned int num_cxn_wrbh = 0;
2512 unsigned int num_cxn_wrb = 0, j, idx = 0, index;
6733b39a
JK
2513
2514 mem_descr_wrbh = phba->init_mem;
2515 mem_descr_wrbh += HWI_MEM_WRBH;
2516
2517 mem_descr_wrb = phba->init_mem;
2518 mem_descr_wrb += HWI_MEM_WRB;
6733b39a
JK
2519 phwi_ctrlr = phba->phwi_ctrlr;
2520
2521 for (index = 0; index < phba->params.cxns_per_ctrl * 2; index += 2) {
2522 pwrb_context = &phwi_ctrlr->wrb_context[index];
6733b39a
JK
2523 pwrb_context->pwrb_handle_base =
2524 kzalloc(sizeof(struct wrb_handle *) *
2525 phba->params.wrbs_per_cxn, GFP_KERNEL);
3ec78271 2526 if (!pwrb_context->pwrb_handle_base) {
99bc5d55
JSJ
2527 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
2528 "BM_%d : Mem Alloc Failed. Failing to load\n");
3ec78271
JK
2529 goto init_wrb_hndl_failed;
2530 }
6733b39a
JK
2531 pwrb_context->pwrb_handle_basestd =
2532 kzalloc(sizeof(struct wrb_handle *) *
2533 phba->params.wrbs_per_cxn, GFP_KERNEL);
3ec78271 2534 if (!pwrb_context->pwrb_handle_basestd) {
99bc5d55
JSJ
2535 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
2536 "BM_%d : Mem Alloc Failed. Failing to load\n");
3ec78271
JK
2537 goto init_wrb_hndl_failed;
2538 }
2539 if (!num_cxn_wrbh) {
2540 pwrb_handle =
2541 mem_descr_wrbh->mem_array[idx].virtual_address;
2542 num_cxn_wrbh = ((mem_descr_wrbh->mem_array[idx].size) /
2543 ((sizeof(struct wrb_handle)) *
2544 phba->params.wrbs_per_cxn));
2545 idx++;
2546 }
2547 pwrb_context->alloc_index = 0;
2548 pwrb_context->wrb_handles_available = 0;
2549 pwrb_context->free_index = 0;
2550
6733b39a 2551 if (num_cxn_wrbh) {
6733b39a
JK
2552 for (j = 0; j < phba->params.wrbs_per_cxn; j++) {
2553 pwrb_context->pwrb_handle_base[j] = pwrb_handle;
2554 pwrb_context->pwrb_handle_basestd[j] =
2555 pwrb_handle;
2556 pwrb_context->wrb_handles_available++;
bfead3b2 2557 pwrb_handle->wrb_index = j;
6733b39a
JK
2558 pwrb_handle++;
2559 }
6733b39a
JK
2560 num_cxn_wrbh--;
2561 }
2562 }
2563 idx = 0;
ed58ea2a 2564 for (index = 0; index < phba->params.cxns_per_ctrl * 2; index += 2) {
6733b39a 2565 pwrb_context = &phwi_ctrlr->wrb_context[index];
3ec78271 2566 if (!num_cxn_wrb) {
6733b39a 2567 pwrb = mem_descr_wrb->mem_array[idx].virtual_address;
7c56533c 2568 num_cxn_wrb = (mem_descr_wrb->mem_array[idx].size) /
3ec78271
JK
2569 ((sizeof(struct iscsi_wrb) *
2570 phba->params.wrbs_per_cxn));
2571 idx++;
2572 }
2573
2574 if (num_cxn_wrb) {
6733b39a
JK
2575 for (j = 0; j < phba->params.wrbs_per_cxn; j++) {
2576 pwrb_handle = pwrb_context->pwrb_handle_base[j];
2577 pwrb_handle->pwrb = pwrb;
2578 pwrb++;
2579 }
2580 num_cxn_wrb--;
2581 }
2582 }
3ec78271
JK
2583 return 0;
2584init_wrb_hndl_failed:
2585 for (j = index; j > 0; j--) {
2586 pwrb_context = &phwi_ctrlr->wrb_context[j];
2587 kfree(pwrb_context->pwrb_handle_base);
2588 kfree(pwrb_context->pwrb_handle_basestd);
2589 }
2590 return -ENOMEM;
6733b39a
JK
2591}
2592
2593static void hwi_init_async_pdu_ctx(struct beiscsi_hba *phba)
2594{
2595 struct hwi_controller *phwi_ctrlr;
2596 struct hba_parameters *p = &phba->params;
2597 struct hwi_async_pdu_context *pasync_ctx;
2598 struct async_pdu_handle *pasync_header_h, *pasync_data_h;
dc63aac6 2599 unsigned int index, idx, num_per_mem, num_async_data;
6733b39a
JK
2600 struct be_mem_descriptor *mem_descr;
2601
2602 mem_descr = (struct be_mem_descriptor *)phba->init_mem;
2603 mem_descr += HWI_MEM_ASYNC_PDU_CONTEXT;
2604
2605 phwi_ctrlr = phba->phwi_ctrlr;
2606 phwi_ctrlr->phwi_ctxt->pasync_ctx = (struct hwi_async_pdu_context *)
2607 mem_descr->mem_array[0].virtual_address;
2608 pasync_ctx = phwi_ctrlr->phwi_ctxt->pasync_ctx;
2609 memset(pasync_ctx, 0, sizeof(*pasync_ctx));
2610
dc63aac6
JK
2611 pasync_ctx->num_entries = p->asyncpdus_per_ctrl;
2612 pasync_ctx->buffer_size = p->defpdu_hdr_sz;
6733b39a
JK
2613
2614 mem_descr = (struct be_mem_descriptor *)phba->init_mem;
2615 mem_descr += HWI_MEM_ASYNC_HEADER_BUF;
2616 if (mem_descr->mem_array[0].virtual_address) {
99bc5d55
JSJ
2617 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
2618 "BM_%d : hwi_init_async_pdu_ctx"
2619 " HWI_MEM_ASYNC_HEADER_BUF va=%p\n",
2620 mem_descr->mem_array[0].virtual_address);
6733b39a 2621 } else
99bc5d55
JSJ
2622 beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_INIT,
2623 "BM_%d : No Virtual address\n");
6733b39a
JK
2624
2625 pasync_ctx->async_header.va_base =
2626 mem_descr->mem_array[0].virtual_address;
2627
2628 pasync_ctx->async_header.pa_base.u.a64.address =
2629 mem_descr->mem_array[0].bus_address.u.a64.address;
2630
2631 mem_descr = (struct be_mem_descriptor *)phba->init_mem;
2632 mem_descr += HWI_MEM_ASYNC_HEADER_RING;
2633 if (mem_descr->mem_array[0].virtual_address) {
99bc5d55
JSJ
2634 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
2635 "BM_%d : hwi_init_async_pdu_ctx"
2636 " HWI_MEM_ASYNC_HEADER_RING va=%p\n",
2637 mem_descr->mem_array[0].virtual_address);
6733b39a 2638 } else
99bc5d55
JSJ
2639 beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_INIT,
2640 "BM_%d : No Virtual address\n");
2641
6733b39a
JK
2642 pasync_ctx->async_header.ring_base =
2643 mem_descr->mem_array[0].virtual_address;
2644
2645 mem_descr = (struct be_mem_descriptor *)phba->init_mem;
2646 mem_descr += HWI_MEM_ASYNC_HEADER_HANDLE;
2647 if (mem_descr->mem_array[0].virtual_address) {
99bc5d55
JSJ
2648 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
2649 "BM_%d : hwi_init_async_pdu_ctx"
2650 " HWI_MEM_ASYNC_HEADER_HANDLE va=%p\n",
2651 mem_descr->mem_array[0].virtual_address);
6733b39a 2652 } else
99bc5d55
JSJ
2653 beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_INIT,
2654 "BM_%d : No Virtual address\n");
6733b39a
JK
2655
2656 pasync_ctx->async_header.handle_base =
2657 mem_descr->mem_array[0].virtual_address;
2658 pasync_ctx->async_header.writables = 0;
2659 INIT_LIST_HEAD(&pasync_ctx->async_header.free_list);
2660
6733b39a
JK
2661
2662 mem_descr = (struct be_mem_descriptor *)phba->init_mem;
2663 mem_descr += HWI_MEM_ASYNC_DATA_RING;
2664 if (mem_descr->mem_array[0].virtual_address) {
99bc5d55
JSJ
2665 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
2666 "BM_%d : hwi_init_async_pdu_ctx"
2667 " HWI_MEM_ASYNC_DATA_RING va=%p\n",
2668 mem_descr->mem_array[0].virtual_address);
6733b39a 2669 } else
99bc5d55
JSJ
2670 beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_INIT,
2671 "BM_%d : No Virtual address\n");
6733b39a
JK
2672
2673 pasync_ctx->async_data.ring_base =
2674 mem_descr->mem_array[0].virtual_address;
2675
2676 mem_descr = (struct be_mem_descriptor *)phba->init_mem;
2677 mem_descr += HWI_MEM_ASYNC_DATA_HANDLE;
2678 if (!mem_descr->mem_array[0].virtual_address)
99bc5d55
JSJ
2679 beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_INIT,
2680 "BM_%d : No Virtual address\n");
6733b39a
JK
2681
2682 pasync_ctx->async_data.handle_base =
2683 mem_descr->mem_array[0].virtual_address;
2684 pasync_ctx->async_data.writables = 0;
2685 INIT_LIST_HEAD(&pasync_ctx->async_data.free_list);
2686
2687 pasync_header_h =
2688 (struct async_pdu_handle *)pasync_ctx->async_header.handle_base;
2689 pasync_data_h =
2690 (struct async_pdu_handle *)pasync_ctx->async_data.handle_base;
2691
dc63aac6
JK
2692 mem_descr = (struct be_mem_descriptor *)phba->init_mem;
2693 mem_descr += HWI_MEM_ASYNC_DATA_BUF;
2694 if (mem_descr->mem_array[0].virtual_address) {
99bc5d55
JSJ
2695 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
2696 "BM_%d : hwi_init_async_pdu_ctx"
2697 " HWI_MEM_ASYNC_DATA_BUF va=%p\n",
2698 mem_descr->mem_array[0].virtual_address);
dc63aac6 2699 } else
99bc5d55
JSJ
2700 beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_INIT,
2701 "BM_%d : No Virtual address\n");
2702
dc63aac6
JK
2703 idx = 0;
2704 pasync_ctx->async_data.va_base =
2705 mem_descr->mem_array[idx].virtual_address;
2706 pasync_ctx->async_data.pa_base.u.a64.address =
2707 mem_descr->mem_array[idx].bus_address.u.a64.address;
2708
2709 num_async_data = ((mem_descr->mem_array[idx].size) /
2710 phba->params.defpdu_data_sz);
2711 num_per_mem = 0;
2712
6733b39a
JK
2713 for (index = 0; index < p->asyncpdus_per_ctrl; index++) {
2714 pasync_header_h->cri = -1;
2715 pasync_header_h->index = (char)index;
2716 INIT_LIST_HEAD(&pasync_header_h->link);
2717 pasync_header_h->pbuffer =
2718 (void *)((unsigned long)
2719 (pasync_ctx->async_header.va_base) +
2720 (p->defpdu_hdr_sz * index));
2721
2722 pasync_header_h->pa.u.a64.address =
2723 pasync_ctx->async_header.pa_base.u.a64.address +
2724 (p->defpdu_hdr_sz * index);
2725
2726 list_add_tail(&pasync_header_h->link,
2727 &pasync_ctx->async_header.free_list);
2728 pasync_header_h++;
2729 pasync_ctx->async_header.free_entries++;
2730 pasync_ctx->async_header.writables++;
2731
2732 INIT_LIST_HEAD(&pasync_ctx->async_entry[index].wait_queue.list);
2733 INIT_LIST_HEAD(&pasync_ctx->async_entry[index].
2734 header_busy_list);
2735 pasync_data_h->cri = -1;
2736 pasync_data_h->index = (char)index;
2737 INIT_LIST_HEAD(&pasync_data_h->link);
dc63aac6
JK
2738
2739 if (!num_async_data) {
2740 num_per_mem = 0;
2741 idx++;
2742 pasync_ctx->async_data.va_base =
2743 mem_descr->mem_array[idx].virtual_address;
2744 pasync_ctx->async_data.pa_base.u.a64.address =
2745 mem_descr->mem_array[idx].
2746 bus_address.u.a64.address;
2747
2748 num_async_data = ((mem_descr->mem_array[idx].size) /
2749 phba->params.defpdu_data_sz);
2750 }
6733b39a
JK
2751 pasync_data_h->pbuffer =
2752 (void *)((unsigned long)
2753 (pasync_ctx->async_data.va_base) +
dc63aac6 2754 (p->defpdu_data_sz * num_per_mem));
6733b39a
JK
2755
2756 pasync_data_h->pa.u.a64.address =
2757 pasync_ctx->async_data.pa_base.u.a64.address +
dc63aac6
JK
2758 (p->defpdu_data_sz * num_per_mem);
2759 num_per_mem++;
2760 num_async_data--;
6733b39a
JK
2761
2762 list_add_tail(&pasync_data_h->link,
2763 &pasync_ctx->async_data.free_list);
2764 pasync_data_h++;
2765 pasync_ctx->async_data.free_entries++;
2766 pasync_ctx->async_data.writables++;
2767
2768 INIT_LIST_HEAD(&pasync_ctx->async_entry[index].data_busy_list);
2769 }
2770
2771 pasync_ctx->async_header.host_write_ptr = 0;
2772 pasync_ctx->async_header.ep_read_ptr = -1;
2773 pasync_ctx->async_data.host_write_ptr = 0;
2774 pasync_ctx->async_data.ep_read_ptr = -1;
2775}
2776
2777static int
2778be_sgl_create_contiguous(void *virtual_address,
2779 u64 physical_address, u32 length,
2780 struct be_dma_mem *sgl)
2781{
2782 WARN_ON(!virtual_address);
2783 WARN_ON(!physical_address);
2784 WARN_ON(!length > 0);
2785 WARN_ON(!sgl);
2786
2787 sgl->va = virtual_address;
457ff3b7 2788 sgl->dma = (unsigned long)physical_address;
6733b39a
JK
2789 sgl->size = length;
2790
2791 return 0;
2792}
2793
2794static void be_sgl_destroy_contiguous(struct be_dma_mem *sgl)
2795{
2796 memset(sgl, 0, sizeof(*sgl));
2797}
2798
2799static void
2800hwi_build_be_sgl_arr(struct beiscsi_hba *phba,
2801 struct mem_array *pmem, struct be_dma_mem *sgl)
2802{
2803 if (sgl->va)
2804 be_sgl_destroy_contiguous(sgl);
2805
2806 be_sgl_create_contiguous(pmem->virtual_address,
2807 pmem->bus_address.u.a64.address,
2808 pmem->size, sgl);
2809}
2810
2811static void
2812hwi_build_be_sgl_by_offset(struct beiscsi_hba *phba,
2813 struct mem_array *pmem, struct be_dma_mem *sgl)
2814{
2815 if (sgl->va)
2816 be_sgl_destroy_contiguous(sgl);
2817
2818 be_sgl_create_contiguous((unsigned char *)pmem->virtual_address,
2819 pmem->bus_address.u.a64.address,
2820 pmem->size, sgl);
2821}
2822
2823static int be_fill_queue(struct be_queue_info *q,
2824 u16 len, u16 entry_size, void *vaddress)
2825{
2826 struct be_dma_mem *mem = &q->dma_mem;
2827
2828 memset(q, 0, sizeof(*q));
2829 q->len = len;
2830 q->entry_size = entry_size;
2831 mem->size = len * entry_size;
2832 mem->va = vaddress;
2833 if (!mem->va)
2834 return -ENOMEM;
2835 memset(mem->va, 0, mem->size);
2836 return 0;
2837}
2838
bfead3b2 2839static int beiscsi_create_eqs(struct beiscsi_hba *phba,
6733b39a
JK
2840 struct hwi_context_memory *phwi_context)
2841{
bfead3b2 2842 unsigned int i, num_eq_pages;
99bc5d55 2843 int ret = 0, eq_for_mcc;
6733b39a
JK
2844 struct be_queue_info *eq;
2845 struct be_dma_mem *mem;
6733b39a 2846 void *eq_vaddress;
bfead3b2 2847 dma_addr_t paddr;
6733b39a 2848
bfead3b2
JK
2849 num_eq_pages = PAGES_REQUIRED(phba->params.num_eq_entries * \
2850 sizeof(struct be_eq_entry));
6733b39a 2851
bfead3b2
JK
2852 if (phba->msix_enabled)
2853 eq_for_mcc = 1;
2854 else
2855 eq_for_mcc = 0;
2856 for (i = 0; i < (phba->num_cpus + eq_for_mcc); i++) {
2857 eq = &phwi_context->be_eq[i].q;
2858 mem = &eq->dma_mem;
2859 phwi_context->be_eq[i].phba = phba;
2860 eq_vaddress = pci_alloc_consistent(phba->pcidev,
2861 num_eq_pages * PAGE_SIZE,
2862 &paddr);
2863 if (!eq_vaddress)
2864 goto create_eq_error;
2865
2866 mem->va = eq_vaddress;
2867 ret = be_fill_queue(eq, phba->params.num_eq_entries,
2868 sizeof(struct be_eq_entry), eq_vaddress);
2869 if (ret) {
99bc5d55
JSJ
2870 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
2871 "BM_%d : be_fill_queue Failed for EQ\n");
bfead3b2
JK
2872 goto create_eq_error;
2873 }
6733b39a 2874
bfead3b2
JK
2875 mem->dma = paddr;
2876 ret = beiscsi_cmd_eq_create(&phba->ctrl, eq,
2877 phwi_context->cur_eqd);
2878 if (ret) {
99bc5d55
JSJ
2879 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
2880 "BM_%d : beiscsi_cmd_eq_create"
2881 "Failed for EQ\n");
bfead3b2
JK
2882 goto create_eq_error;
2883 }
99bc5d55
JSJ
2884
2885 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
2886 "BM_%d : eqid = %d\n",
2887 phwi_context->be_eq[i].q.id);
6733b39a 2888 }
6733b39a 2889 return 0;
bfead3b2 2890create_eq_error:
107dfcba 2891 for (i = 0; i < (phba->num_cpus + eq_for_mcc); i++) {
bfead3b2
JK
2892 eq = &phwi_context->be_eq[i].q;
2893 mem = &eq->dma_mem;
2894 if (mem->va)
2895 pci_free_consistent(phba->pcidev, num_eq_pages
2896 * PAGE_SIZE,
2897 mem->va, mem->dma);
2898 }
2899 return ret;
6733b39a
JK
2900}
2901
bfead3b2 2902static int beiscsi_create_cqs(struct beiscsi_hba *phba,
6733b39a
JK
2903 struct hwi_context_memory *phwi_context)
2904{
bfead3b2 2905 unsigned int i, num_cq_pages;
99bc5d55 2906 int ret = 0;
6733b39a
JK
2907 struct be_queue_info *cq, *eq;
2908 struct be_dma_mem *mem;
bfead3b2 2909 struct be_eq_obj *pbe_eq;
6733b39a 2910 void *cq_vaddress;
bfead3b2 2911 dma_addr_t paddr;
6733b39a 2912
bfead3b2
JK
2913 num_cq_pages = PAGES_REQUIRED(phba->params.num_cq_entries * \
2914 sizeof(struct sol_cqe));
6733b39a 2915
bfead3b2
JK
2916 for (i = 0; i < phba->num_cpus; i++) {
2917 cq = &phwi_context->be_cq[i];
2918 eq = &phwi_context->be_eq[i].q;
2919 pbe_eq = &phwi_context->be_eq[i];
2920 pbe_eq->cq = cq;
2921 pbe_eq->phba = phba;
2922 mem = &cq->dma_mem;
2923 cq_vaddress = pci_alloc_consistent(phba->pcidev,
2924 num_cq_pages * PAGE_SIZE,
2925 &paddr);
2926 if (!cq_vaddress)
2927 goto create_cq_error;
7da50879 2928 ret = be_fill_queue(cq, phba->params.num_cq_entries,
bfead3b2
JK
2929 sizeof(struct sol_cqe), cq_vaddress);
2930 if (ret) {
99bc5d55
JSJ
2931 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
2932 "BM_%d : be_fill_queue Failed "
2933 "for ISCSI CQ\n");
bfead3b2
JK
2934 goto create_cq_error;
2935 }
2936
2937 mem->dma = paddr;
2938 ret = beiscsi_cmd_cq_create(&phba->ctrl, cq, eq, false,
2939 false, 0);
2940 if (ret) {
99bc5d55
JSJ
2941 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
2942 "BM_%d : beiscsi_cmd_eq_create"
2943 "Failed for ISCSI CQ\n");
bfead3b2
JK
2944 goto create_cq_error;
2945 }
99bc5d55
JSJ
2946 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
2947 "BM_%d : iscsi cq_id is %d for eq_id %d\n"
2948 "iSCSI CQ CREATED\n", cq->id, eq->id);
6733b39a 2949 }
6733b39a 2950 return 0;
bfead3b2
JK
2951
2952create_cq_error:
2953 for (i = 0; i < phba->num_cpus; i++) {
2954 cq = &phwi_context->be_cq[i];
2955 mem = &cq->dma_mem;
2956 if (mem->va)
2957 pci_free_consistent(phba->pcidev, num_cq_pages
2958 * PAGE_SIZE,
2959 mem->va, mem->dma);
2960 }
2961 return ret;
2962
6733b39a
JK
2963}
2964
2965static int
2966beiscsi_create_def_hdr(struct beiscsi_hba *phba,
2967 struct hwi_context_memory *phwi_context,
2968 struct hwi_controller *phwi_ctrlr,
2969 unsigned int def_pdu_ring_sz)
2970{
2971 unsigned int idx;
2972 int ret;
2973 struct be_queue_info *dq, *cq;
2974 struct be_dma_mem *mem;
2975 struct be_mem_descriptor *mem_descr;
2976 void *dq_vaddress;
2977
2978 idx = 0;
2979 dq = &phwi_context->be_def_hdrq;
bfead3b2 2980 cq = &phwi_context->be_cq[0];
6733b39a
JK
2981 mem = &dq->dma_mem;
2982 mem_descr = phba->init_mem;
2983 mem_descr += HWI_MEM_ASYNC_HEADER_RING;
2984 dq_vaddress = mem_descr->mem_array[idx].virtual_address;
2985 ret = be_fill_queue(dq, mem_descr->mem_array[0].size /
2986 sizeof(struct phys_addr),
2987 sizeof(struct phys_addr), dq_vaddress);
2988 if (ret) {
99bc5d55
JSJ
2989 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
2990 "BM_%d : be_fill_queue Failed for DEF PDU HDR\n");
6733b39a
JK
2991 return ret;
2992 }
457ff3b7
JK
2993 mem->dma = (unsigned long)mem_descr->mem_array[idx].
2994 bus_address.u.a64.address;
6733b39a
JK
2995 ret = be_cmd_create_default_pdu_queue(&phba->ctrl, cq, dq,
2996 def_pdu_ring_sz,
2997 phba->params.defpdu_hdr_sz);
2998 if (ret) {
99bc5d55
JSJ
2999 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
3000 "BM_%d : be_cmd_create_default_pdu_queue Failed DEFHDR\n");
6733b39a
JK
3001 return ret;
3002 }
3003 phwi_ctrlr->default_pdu_hdr.id = phwi_context->be_def_hdrq.id;
99bc5d55
JSJ
3004 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
3005 "BM_%d : iscsi def pdu id is %d\n",
3006 phwi_context->be_def_hdrq.id);
3007
6733b39a
JK
3008 hwi_post_async_buffers(phba, 1);
3009 return 0;
3010}
3011
3012static int
3013beiscsi_create_def_data(struct beiscsi_hba *phba,
3014 struct hwi_context_memory *phwi_context,
3015 struct hwi_controller *phwi_ctrlr,
3016 unsigned int def_pdu_ring_sz)
3017{
3018 unsigned int idx;
3019 int ret;
3020 struct be_queue_info *dataq, *cq;
3021 struct be_dma_mem *mem;
3022 struct be_mem_descriptor *mem_descr;
3023 void *dq_vaddress;
3024
3025 idx = 0;
3026 dataq = &phwi_context->be_def_dataq;
bfead3b2 3027 cq = &phwi_context->be_cq[0];
6733b39a
JK
3028 mem = &dataq->dma_mem;
3029 mem_descr = phba->init_mem;
3030 mem_descr += HWI_MEM_ASYNC_DATA_RING;
3031 dq_vaddress = mem_descr->mem_array[idx].virtual_address;
3032 ret = be_fill_queue(dataq, mem_descr->mem_array[0].size /
3033 sizeof(struct phys_addr),
3034 sizeof(struct phys_addr), dq_vaddress);
3035 if (ret) {
99bc5d55
JSJ
3036 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
3037 "BM_%d : be_fill_queue Failed for DEF PDU DATA\n");
6733b39a
JK
3038 return ret;
3039 }
457ff3b7
JK
3040 mem->dma = (unsigned long)mem_descr->mem_array[idx].
3041 bus_address.u.a64.address;
6733b39a
JK
3042 ret = be_cmd_create_default_pdu_queue(&phba->ctrl, cq, dataq,
3043 def_pdu_ring_sz,
3044 phba->params.defpdu_data_sz);
3045 if (ret) {
99bc5d55
JSJ
3046 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
3047 "BM_%d be_cmd_create_default_pdu_queue"
3048 " Failed for DEF PDU DATA\n");
6733b39a
JK
3049 return ret;
3050 }
3051 phwi_ctrlr->default_pdu_data.id = phwi_context->be_def_dataq.id;
99bc5d55
JSJ
3052 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
3053 "BM_%d : iscsi def data id is %d\n",
3054 phwi_context->be_def_dataq.id);
3055
6733b39a 3056 hwi_post_async_buffers(phba, 0);
99bc5d55
JSJ
3057 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
3058 "BM_%d : DEFAULT PDU DATA RING CREATED\n");
3059
6733b39a
JK
3060 return 0;
3061}
3062
3063static int
3064beiscsi_post_pages(struct beiscsi_hba *phba)
3065{
3066 struct be_mem_descriptor *mem_descr;
3067 struct mem_array *pm_arr;
3068 unsigned int page_offset, i;
3069 struct be_dma_mem sgl;
3070 int status;
3071
3072 mem_descr = phba->init_mem;
3073 mem_descr += HWI_MEM_SGE;
3074 pm_arr = mem_descr->mem_array;
3075
3076 page_offset = (sizeof(struct iscsi_sge) * phba->params.num_sge_per_io *
3077 phba->fw_config.iscsi_icd_start) / PAGE_SIZE;
3078 for (i = 0; i < mem_descr->num_elements; i++) {
3079 hwi_build_be_sgl_arr(phba, pm_arr, &sgl);
3080 status = be_cmd_iscsi_post_sgl_pages(&phba->ctrl, &sgl,
3081 page_offset,
3082 (pm_arr->size / PAGE_SIZE));
3083 page_offset += pm_arr->size / PAGE_SIZE;
3084 if (status != 0) {
99bc5d55
JSJ
3085 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
3086 "BM_%d : post sgl failed.\n");
6733b39a
JK
3087 return status;
3088 }
3089 pm_arr++;
3090 }
99bc5d55
JSJ
3091 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
3092 "BM_%d : POSTED PAGES\n");
6733b39a
JK
3093 return 0;
3094}
3095
bfead3b2
JK
3096static void be_queue_free(struct beiscsi_hba *phba, struct be_queue_info *q)
3097{
3098 struct be_dma_mem *mem = &q->dma_mem;
c8b25598 3099 if (mem->va) {
bfead3b2
JK
3100 pci_free_consistent(phba->pcidev, mem->size,
3101 mem->va, mem->dma);
c8b25598
JK
3102 mem->va = NULL;
3103 }
bfead3b2
JK
3104}
3105
3106static int be_queue_alloc(struct beiscsi_hba *phba, struct be_queue_info *q,
3107 u16 len, u16 entry_size)
3108{
3109 struct be_dma_mem *mem = &q->dma_mem;
3110
3111 memset(q, 0, sizeof(*q));
3112 q->len = len;
3113 q->entry_size = entry_size;
3114 mem->size = len * entry_size;
3115 mem->va = pci_alloc_consistent(phba->pcidev, mem->size, &mem->dma);
3116 if (!mem->va)
d3ad2bb3 3117 return -ENOMEM;
bfead3b2
JK
3118 memset(mem->va, 0, mem->size);
3119 return 0;
3120}
3121
6733b39a
JK
3122static int
3123beiscsi_create_wrb_rings(struct beiscsi_hba *phba,
3124 struct hwi_context_memory *phwi_context,
3125 struct hwi_controller *phwi_ctrlr)
3126{
3127 unsigned int wrb_mem_index, offset, size, num_wrb_rings;
3128 u64 pa_addr_lo;
3129 unsigned int idx, num, i;
3130 struct mem_array *pwrb_arr;
3131 void *wrb_vaddr;
3132 struct be_dma_mem sgl;
3133 struct be_mem_descriptor *mem_descr;
3134 int status;
3135
3136 idx = 0;
3137 mem_descr = phba->init_mem;
3138 mem_descr += HWI_MEM_WRB;
3139 pwrb_arr = kmalloc(sizeof(*pwrb_arr) * phba->params.cxns_per_ctrl,
3140 GFP_KERNEL);
3141 if (!pwrb_arr) {
99bc5d55
JSJ
3142 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
3143 "BM_%d : Memory alloc failed in create wrb ring.\n");
6733b39a
JK
3144 return -ENOMEM;
3145 }
3146 wrb_vaddr = mem_descr->mem_array[idx].virtual_address;
3147 pa_addr_lo = mem_descr->mem_array[idx].bus_address.u.a64.address;
3148 num_wrb_rings = mem_descr->mem_array[idx].size /
3149 (phba->params.wrbs_per_cxn * sizeof(struct iscsi_wrb));
3150
3151 for (num = 0; num < phba->params.cxns_per_ctrl; num++) {
3152 if (num_wrb_rings) {
3153 pwrb_arr[num].virtual_address = wrb_vaddr;
3154 pwrb_arr[num].bus_address.u.a64.address = pa_addr_lo;
3155 pwrb_arr[num].size = phba->params.wrbs_per_cxn *
3156 sizeof(struct iscsi_wrb);
3157 wrb_vaddr += pwrb_arr[num].size;
3158 pa_addr_lo += pwrb_arr[num].size;
3159 num_wrb_rings--;
3160 } else {
3161 idx++;
3162 wrb_vaddr = mem_descr->mem_array[idx].virtual_address;
3163 pa_addr_lo = mem_descr->mem_array[idx].\
3164 bus_address.u.a64.address;
3165 num_wrb_rings = mem_descr->mem_array[idx].size /
3166 (phba->params.wrbs_per_cxn *
3167 sizeof(struct iscsi_wrb));
3168 pwrb_arr[num].virtual_address = wrb_vaddr;
3169 pwrb_arr[num].bus_address.u.a64.address\
3170 = pa_addr_lo;
3171 pwrb_arr[num].size = phba->params.wrbs_per_cxn *
3172 sizeof(struct iscsi_wrb);
3173 wrb_vaddr += pwrb_arr[num].size;
3174 pa_addr_lo += pwrb_arr[num].size;
3175 num_wrb_rings--;
3176 }
3177 }
3178 for (i = 0; i < phba->params.cxns_per_ctrl; i++) {
3179 wrb_mem_index = 0;
3180 offset = 0;
3181 size = 0;
3182
3183 hwi_build_be_sgl_by_offset(phba, &pwrb_arr[i], &sgl);
3184 status = be_cmd_wrbq_create(&phba->ctrl, &sgl,
3185 &phwi_context->be_wrbq[i]);
3186 if (status != 0) {
99bc5d55
JSJ
3187 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
3188 "BM_%d : wrbq create failed.");
1462b8ff 3189 kfree(pwrb_arr);
6733b39a
JK
3190 return status;
3191 }
7da50879
JK
3192 phwi_ctrlr->wrb_context[i * 2].cid = phwi_context->be_wrbq[i].
3193 id;
6733b39a
JK
3194 }
3195 kfree(pwrb_arr);
3196 return 0;
3197}
3198
3199static void free_wrb_handles(struct beiscsi_hba *phba)
3200{
3201 unsigned int index;
3202 struct hwi_controller *phwi_ctrlr;
3203 struct hwi_wrb_context *pwrb_context;
3204
3205 phwi_ctrlr = phba->phwi_ctrlr;
3206 for (index = 0; index < phba->params.cxns_per_ctrl * 2; index += 2) {
3207 pwrb_context = &phwi_ctrlr->wrb_context[index];
3208 kfree(pwrb_context->pwrb_handle_base);
3209 kfree(pwrb_context->pwrb_handle_basestd);
3210 }
3211}
3212
bfead3b2
JK
3213static void be_mcc_queues_destroy(struct beiscsi_hba *phba)
3214{
3215 struct be_queue_info *q;
3216 struct be_ctrl_info *ctrl = &phba->ctrl;
3217
3218 q = &phba->ctrl.mcc_obj.q;
3219 if (q->created)
3220 beiscsi_cmd_q_destroy(ctrl, q, QTYPE_MCCQ);
3221 be_queue_free(phba, q);
3222
3223 q = &phba->ctrl.mcc_obj.cq;
3224 if (q->created)
3225 beiscsi_cmd_q_destroy(ctrl, q, QTYPE_CQ);
3226 be_queue_free(phba, q);
3227}
3228
6733b39a
JK
3229static void hwi_cleanup(struct beiscsi_hba *phba)
3230{
3231 struct be_queue_info *q;
3232 struct be_ctrl_info *ctrl = &phba->ctrl;
3233 struct hwi_controller *phwi_ctrlr;
3234 struct hwi_context_memory *phwi_context;
bfead3b2 3235 int i, eq_num;
6733b39a
JK
3236
3237 phwi_ctrlr = phba->phwi_ctrlr;
3238 phwi_context = phwi_ctrlr->phwi_ctxt;
3239 for (i = 0; i < phba->params.cxns_per_ctrl; i++) {
3240 q = &phwi_context->be_wrbq[i];
3241 if (q->created)
3242 beiscsi_cmd_q_destroy(ctrl, q, QTYPE_WRBQ);
3243 }
6733b39a
JK
3244 free_wrb_handles(phba);
3245
3246 q = &phwi_context->be_def_hdrq;
3247 if (q->created)
3248 beiscsi_cmd_q_destroy(ctrl, q, QTYPE_DPDUQ);
3249
3250 q = &phwi_context->be_def_dataq;
3251 if (q->created)
3252 beiscsi_cmd_q_destroy(ctrl, q, QTYPE_DPDUQ);
3253
3254 beiscsi_cmd_q_destroy(ctrl, NULL, QTYPE_SGL);
3255
bfead3b2
JK
3256 for (i = 0; i < (phba->num_cpus); i++) {
3257 q = &phwi_context->be_cq[i];
3258 if (q->created)
3259 beiscsi_cmd_q_destroy(ctrl, q, QTYPE_CQ);
3260 }
3261 if (phba->msix_enabled)
3262 eq_num = 1;
3263 else
3264 eq_num = 0;
3265 for (i = 0; i < (phba->num_cpus + eq_num); i++) {
3266 q = &phwi_context->be_eq[i].q;
3267 if (q->created)
3268 beiscsi_cmd_q_destroy(ctrl, q, QTYPE_EQ);
3269 }
3270 be_mcc_queues_destroy(phba);
3271}
6733b39a 3272
bfead3b2
JK
3273static int be_mcc_queues_create(struct beiscsi_hba *phba,
3274 struct hwi_context_memory *phwi_context)
3275{
3276 struct be_queue_info *q, *cq;
3277 struct be_ctrl_info *ctrl = &phba->ctrl;
3278
3279 /* Alloc MCC compl queue */
3280 cq = &phba->ctrl.mcc_obj.cq;
3281 if (be_queue_alloc(phba, cq, MCC_CQ_LEN,
3282 sizeof(struct be_mcc_compl)))
3283 goto err;
3284 /* Ask BE to create MCC compl queue; */
3285 if (phba->msix_enabled) {
3286 if (beiscsi_cmd_cq_create(ctrl, cq, &phwi_context->be_eq
3287 [phba->num_cpus].q, false, true, 0))
3288 goto mcc_cq_free;
3289 } else {
3290 if (beiscsi_cmd_cq_create(ctrl, cq, &phwi_context->be_eq[0].q,
3291 false, true, 0))
3292 goto mcc_cq_free;
3293 }
3294
3295 /* Alloc MCC queue */
3296 q = &phba->ctrl.mcc_obj.q;
3297 if (be_queue_alloc(phba, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
3298 goto mcc_cq_destroy;
3299
3300 /* Ask BE to create MCC queue */
35e66019 3301 if (beiscsi_cmd_mccq_create(phba, q, cq))
bfead3b2
JK
3302 goto mcc_q_free;
3303
3304 return 0;
3305
3306mcc_q_free:
3307 be_queue_free(phba, q);
3308mcc_cq_destroy:
3309 beiscsi_cmd_q_destroy(ctrl, cq, QTYPE_CQ);
3310mcc_cq_free:
3311 be_queue_free(phba, cq);
3312err:
d3ad2bb3 3313 return -ENOMEM;
bfead3b2
JK
3314}
3315
107dfcba
JSJ
3316/**
3317 * find_num_cpus()- Get the CPU online count
3318 * @phba: ptr to priv structure
3319 *
3320 * CPU count is used for creating EQ.
3321 **/
3322static void find_num_cpus(struct beiscsi_hba *phba)
bfead3b2
JK
3323{
3324 int num_cpus = 0;
3325
3326 num_cpus = num_online_cpus();
bfead3b2 3327
107dfcba
JSJ
3328 phba->num_cpus = (num_cpus >= BEISCSI_MAX_NUM_CPU) ?
3329 (BEISCSI_MAX_NUM_CPU - 1) : num_cpus;
6733b39a
JK
3330}
3331
3332static int hwi_init_port(struct beiscsi_hba *phba)
3333{
3334 struct hwi_controller *phwi_ctrlr;
3335 struct hwi_context_memory *phwi_context;
3336 unsigned int def_pdu_ring_sz;
3337 struct be_ctrl_info *ctrl = &phba->ctrl;
3338 int status;
3339
3340 def_pdu_ring_sz =
3341 phba->params.asyncpdus_per_ctrl * sizeof(struct phys_addr);
3342 phwi_ctrlr = phba->phwi_ctrlr;
6733b39a 3343 phwi_context = phwi_ctrlr->phwi_ctxt;
bfead3b2
JK
3344 phwi_context->max_eqd = 0;
3345 phwi_context->min_eqd = 0;
3346 phwi_context->cur_eqd = 64;
6733b39a 3347 be_cmd_fw_initialize(&phba->ctrl);
bfead3b2
JK
3348
3349 status = beiscsi_create_eqs(phba, phwi_context);
6733b39a 3350 if (status != 0) {
99bc5d55
JSJ
3351 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
3352 "BM_%d : EQ not created\n");
6733b39a
JK
3353 goto error;
3354 }
3355
bfead3b2
JK
3356 status = be_mcc_queues_create(phba, phwi_context);
3357 if (status != 0)
3358 goto error;
3359
3360 status = mgmt_check_supported_fw(ctrl, phba);
6733b39a 3361 if (status != 0) {
99bc5d55
JSJ
3362 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
3363 "BM_%d : Unsupported fw version\n");
6733b39a
JK
3364 goto error;
3365 }
3366
bfead3b2 3367 status = beiscsi_create_cqs(phba, phwi_context);
6733b39a 3368 if (status != 0) {
99bc5d55
JSJ
3369 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
3370 "BM_%d : CQ not created\n");
6733b39a
JK
3371 goto error;
3372 }
3373
3374 status = beiscsi_create_def_hdr(phba, phwi_context, phwi_ctrlr,
3375 def_pdu_ring_sz);
3376 if (status != 0) {
99bc5d55
JSJ
3377 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
3378 "BM_%d : Default Header not created\n");
6733b39a
JK
3379 goto error;
3380 }
3381
3382 status = beiscsi_create_def_data(phba, phwi_context,
3383 phwi_ctrlr, def_pdu_ring_sz);
3384 if (status != 0) {
99bc5d55
JSJ
3385 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
3386 "BM_%d : Default Data not created\n");
6733b39a
JK
3387 goto error;
3388 }
3389
3390 status = beiscsi_post_pages(phba);
3391 if (status != 0) {
99bc5d55
JSJ
3392 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
3393 "BM_%d : Post SGL Pages Failed\n");
6733b39a
JK
3394 goto error;
3395 }
3396
3397 status = beiscsi_create_wrb_rings(phba, phwi_context, phwi_ctrlr);
3398 if (status != 0) {
99bc5d55
JSJ
3399 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
3400 "BM_%d : WRB Rings not created\n");
6733b39a
JK
3401 goto error;
3402 }
3403
99bc5d55
JSJ
3404 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
3405 "BM_%d : hwi_init_port success\n");
6733b39a
JK
3406 return 0;
3407
3408error:
99bc5d55
JSJ
3409 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
3410 "BM_%d : hwi_init_port failed");
6733b39a 3411 hwi_cleanup(phba);
a49e06d5 3412 return status;
6733b39a
JK
3413}
3414
6733b39a
JK
3415static int hwi_init_controller(struct beiscsi_hba *phba)
3416{
3417 struct hwi_controller *phwi_ctrlr;
3418
3419 phwi_ctrlr = phba->phwi_ctrlr;
3420 if (1 == phba->init_mem[HWI_MEM_ADDN_CONTEXT].num_elements) {
3421 phwi_ctrlr->phwi_ctxt = (struct hwi_context_memory *)phba->
3422 init_mem[HWI_MEM_ADDN_CONTEXT].mem_array[0].virtual_address;
99bc5d55
JSJ
3423 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
3424 "BM_%d : phwi_ctrlr->phwi_ctxt=%p\n",
3425 phwi_ctrlr->phwi_ctxt);
6733b39a 3426 } else {
99bc5d55
JSJ
3427 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
3428 "BM_%d : HWI_MEM_ADDN_CONTEXT is more "
3429 "than one element.Failing to load\n");
6733b39a
JK
3430 return -ENOMEM;
3431 }
3432
3433 iscsi_init_global_templates(phba);
3ec78271
JK
3434 if (beiscsi_init_wrb_handle(phba))
3435 return -ENOMEM;
3436
6733b39a
JK
3437 hwi_init_async_pdu_ctx(phba);
3438 if (hwi_init_port(phba) != 0) {
99bc5d55
JSJ
3439 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
3440 "BM_%d : hwi_init_controller failed\n");
3441
6733b39a
JK
3442 return -ENOMEM;
3443 }
3444 return 0;
3445}
3446
3447static void beiscsi_free_mem(struct beiscsi_hba *phba)
3448{
3449 struct be_mem_descriptor *mem_descr;
3450 int i, j;
3451
3452 mem_descr = phba->init_mem;
3453 i = 0;
3454 j = 0;
3455 for (i = 0; i < SE_MEM_MAX; i++) {
3456 for (j = mem_descr->num_elements; j > 0; j--) {
3457 pci_free_consistent(phba->pcidev,
3458 mem_descr->mem_array[j - 1].size,
3459 mem_descr->mem_array[j - 1].virtual_address,
457ff3b7
JK
3460 (unsigned long)mem_descr->mem_array[j - 1].
3461 bus_address.u.a64.address);
6733b39a
JK
3462 }
3463 kfree(mem_descr->mem_array);
3464 mem_descr++;
3465 }
3466 kfree(phba->init_mem);
3467 kfree(phba->phwi_ctrlr);
3468}
3469
3470static int beiscsi_init_controller(struct beiscsi_hba *phba)
3471{
3472 int ret = -ENOMEM;
3473
3474 ret = beiscsi_get_memory(phba);
3475 if (ret < 0) {
99bc5d55
JSJ
3476 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
3477 "BM_%d : beiscsi_dev_probe -"
3478 "Failed in beiscsi_alloc_memory\n");
6733b39a
JK
3479 return ret;
3480 }
3481
3482 ret = hwi_init_controller(phba);
3483 if (ret)
3484 goto free_init;
99bc5d55
JSJ
3485 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
3486 "BM_%d : Return success from beiscsi_init_controller");
3487
6733b39a
JK
3488 return 0;
3489
3490free_init:
3491 beiscsi_free_mem(phba);
a49e06d5 3492 return ret;
6733b39a
JK
3493}
3494
3495static int beiscsi_init_sgl_handle(struct beiscsi_hba *phba)
3496{
3497 struct be_mem_descriptor *mem_descr_sglh, *mem_descr_sg;
3498 struct sgl_handle *psgl_handle;
3499 struct iscsi_sge *pfrag;
3500 unsigned int arr_index, i, idx;
3501
3502 phba->io_sgl_hndl_avbl = 0;
3503 phba->eh_sgl_hndl_avbl = 0;
bfead3b2 3504
6733b39a
JK
3505 mem_descr_sglh = phba->init_mem;
3506 mem_descr_sglh += HWI_MEM_SGLH;
3507 if (1 == mem_descr_sglh->num_elements) {
3508 phba->io_sgl_hndl_base = kzalloc(sizeof(struct sgl_handle *) *
3509 phba->params.ios_per_ctrl,
3510 GFP_KERNEL);
3511 if (!phba->io_sgl_hndl_base) {
99bc5d55
JSJ
3512 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
3513 "BM_%d : Mem Alloc Failed. Failing to load\n");
6733b39a
JK
3514 return -ENOMEM;
3515 }
3516 phba->eh_sgl_hndl_base = kzalloc(sizeof(struct sgl_handle *) *
3517 (phba->params.icds_per_ctrl -
3518 phba->params.ios_per_ctrl),
3519 GFP_KERNEL);
3520 if (!phba->eh_sgl_hndl_base) {
3521 kfree(phba->io_sgl_hndl_base);
99bc5d55
JSJ
3522 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
3523 "BM_%d : Mem Alloc Failed. Failing to load\n");
6733b39a
JK
3524 return -ENOMEM;
3525 }
3526 } else {
99bc5d55
JSJ
3527 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
3528 "BM_%d : HWI_MEM_SGLH is more than one element."
3529 "Failing to load\n");
6733b39a
JK
3530 return -ENOMEM;
3531 }
3532
3533 arr_index = 0;
3534 idx = 0;
3535 while (idx < mem_descr_sglh->num_elements) {
3536 psgl_handle = mem_descr_sglh->mem_array[idx].virtual_address;
3537
3538 for (i = 0; i < (mem_descr_sglh->mem_array[idx].size /
3539 sizeof(struct sgl_handle)); i++) {
3540 if (arr_index < phba->params.ios_per_ctrl) {
3541 phba->io_sgl_hndl_base[arr_index] = psgl_handle;
3542 phba->io_sgl_hndl_avbl++;
3543 arr_index++;
3544 } else {
3545 phba->eh_sgl_hndl_base[arr_index -
3546 phba->params.ios_per_ctrl] =
3547 psgl_handle;
3548 arr_index++;
3549 phba->eh_sgl_hndl_avbl++;
3550 }
3551 psgl_handle++;
3552 }
3553 idx++;
3554 }
99bc5d55
JSJ
3555 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
3556 "BM_%d : phba->io_sgl_hndl_avbl=%d"
3557 "phba->eh_sgl_hndl_avbl=%d\n",
3558 phba->io_sgl_hndl_avbl,
3559 phba->eh_sgl_hndl_avbl);
3560
6733b39a
JK
3561 mem_descr_sg = phba->init_mem;
3562 mem_descr_sg += HWI_MEM_SGE;
99bc5d55
JSJ
3563 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
3564 "\n BM_%d : mem_descr_sg->num_elements=%d\n",
3565 mem_descr_sg->num_elements);
3566
6733b39a
JK
3567 arr_index = 0;
3568 idx = 0;
3569 while (idx < mem_descr_sg->num_elements) {
3570 pfrag = mem_descr_sg->mem_array[idx].virtual_address;
3571
3572 for (i = 0;
3573 i < (mem_descr_sg->mem_array[idx].size) /
3574 (sizeof(struct iscsi_sge) * phba->params.num_sge_per_io);
3575 i++) {
3576 if (arr_index < phba->params.ios_per_ctrl)
3577 psgl_handle = phba->io_sgl_hndl_base[arr_index];
3578 else
3579 psgl_handle = phba->eh_sgl_hndl_base[arr_index -
3580 phba->params.ios_per_ctrl];
3581 psgl_handle->pfrag = pfrag;
3582 AMAP_SET_BITS(struct amap_iscsi_sge, addr_hi, pfrag, 0);
3583 AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, pfrag, 0);
3584 pfrag += phba->params.num_sge_per_io;
3585 psgl_handle->sgl_index =
7da50879 3586 phba->fw_config.iscsi_icd_start + arr_index++;
6733b39a
JK
3587 }
3588 idx++;
3589 }
3590 phba->io_sgl_free_index = 0;
3591 phba->io_sgl_alloc_index = 0;
3592 phba->eh_sgl_free_index = 0;
3593 phba->eh_sgl_alloc_index = 0;
3594 return 0;
3595}
3596
3597static int hba_setup_cid_tbls(struct beiscsi_hba *phba)
3598{
3599 int i, new_cid;
3600
c2462288 3601 phba->cid_array = kzalloc(sizeof(void *) * phba->params.cxns_per_ctrl,
6733b39a
JK
3602 GFP_KERNEL);
3603 if (!phba->cid_array) {
99bc5d55
JSJ
3604 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
3605 "BM_%d : Failed to allocate memory in "
3606 "hba_setup_cid_tbls\n");
6733b39a
JK
3607 return -ENOMEM;
3608 }
c2462288 3609 phba->ep_array = kzalloc(sizeof(struct iscsi_endpoint *) *
6733b39a
JK
3610 phba->params.cxns_per_ctrl * 2, GFP_KERNEL);
3611 if (!phba->ep_array) {
99bc5d55
JSJ
3612 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
3613 "BM_%d : Failed to allocate memory in "
3614 "hba_setup_cid_tbls\n");
6733b39a
JK
3615 kfree(phba->cid_array);
3616 return -ENOMEM;
3617 }
7da50879 3618 new_cid = phba->fw_config.iscsi_cid_start;
6733b39a
JK
3619 for (i = 0; i < phba->params.cxns_per_ctrl; i++) {
3620 phba->cid_array[i] = new_cid;
3621 new_cid += 2;
3622 }
3623 phba->avlbl_cids = phba->params.cxns_per_ctrl;
3624 return 0;
3625}
3626
238f6b72 3627static void hwi_enable_intr(struct beiscsi_hba *phba)
6733b39a
JK
3628{
3629 struct be_ctrl_info *ctrl = &phba->ctrl;
3630 struct hwi_controller *phwi_ctrlr;
3631 struct hwi_context_memory *phwi_context;
3632 struct be_queue_info *eq;
3633 u8 __iomem *addr;
bfead3b2 3634 u32 reg, i;
6733b39a
JK
3635 u32 enabled;
3636
3637 phwi_ctrlr = phba->phwi_ctrlr;
3638 phwi_context = phwi_ctrlr->phwi_ctxt;
3639
6733b39a
JK
3640 addr = (u8 __iomem *) ((u8 __iomem *) ctrl->pcicfg +
3641 PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET);
3642 reg = ioread32(addr);
6733b39a
JK
3643
3644 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
3645 if (!enabled) {
3646 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
99bc5d55
JSJ
3647 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
3648 "BM_%d : reg =x%08x addr=%p\n", reg, addr);
6733b39a 3649 iowrite32(reg, addr);
665d6d94
JK
3650 }
3651
3652 if (!phba->msix_enabled) {
3653 eq = &phwi_context->be_eq[0].q;
99bc5d55
JSJ
3654 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
3655 "BM_%d : eq->id=%d\n", eq->id);
3656
665d6d94
JK
3657 hwi_ring_eq_db(phba, eq->id, 0, 0, 1, 1);
3658 } else {
3659 for (i = 0; i <= phba->num_cpus; i++) {
3660 eq = &phwi_context->be_eq[i].q;
99bc5d55
JSJ
3661 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
3662 "BM_%d : eq->id=%d\n", eq->id);
bfead3b2
JK
3663 hwi_ring_eq_db(phba, eq->id, 0, 0, 1, 1);
3664 }
c03af1ae 3665 }
6733b39a
JK
3666}
3667
3668static void hwi_disable_intr(struct beiscsi_hba *phba)
3669{
3670 struct be_ctrl_info *ctrl = &phba->ctrl;
3671
3672 u8 __iomem *addr = ctrl->pcicfg + PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET;
3673 u32 reg = ioread32(addr);
3674
3675 u32 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
3676 if (enabled) {
3677 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
3678 iowrite32(reg, addr);
3679 } else
99bc5d55
JSJ
3680 beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_INIT,
3681 "BM_%d : In hwi_disable_intr, Already Disabled\n");
6733b39a
JK
3682}
3683
9aef4200
JSJ
3684/**
3685 * beiscsi_get_boot_info()- Get the boot session info
3686 * @phba: The device priv structure instance
3687 *
3688 * Get the boot target info and store in driver priv structure
3689 *
3690 * return values
3691 * Success: 0
3692 * Failure: Non-Zero Value
3693 **/
c7acc5b8
JK
3694static int beiscsi_get_boot_info(struct beiscsi_hba *phba)
3695{
0e43895e 3696 struct be_cmd_get_session_resp *session_resp;
c7acc5b8
JK
3697 struct be_mcc_wrb *wrb;
3698 struct be_dma_mem nonemb_cmd;
3699 unsigned int tag, wrb_num;
3700 unsigned short status, extd_status;
9aef4200 3701 unsigned int s_handle;
c7acc5b8 3702 struct be_queue_info *mccq = &phba->ctrl.mcc_obj.q;
f457a46f 3703 int ret = -ENOMEM;
c7acc5b8 3704
9aef4200
JSJ
3705 /* Get the session handle of the boot target */
3706 ret = be_mgmt_get_boot_shandle(phba, &s_handle);
3707 if (ret) {
99bc5d55
JSJ
3708 beiscsi_log(phba, KERN_ERR,
3709 BEISCSI_LOG_INIT | BEISCSI_LOG_CONFIG,
3710 "BM_%d : No boot session\n");
9aef4200 3711 return ret;
c7acc5b8 3712 }
c7acc5b8
JK
3713 nonemb_cmd.va = pci_alloc_consistent(phba->ctrl.pdev,
3714 sizeof(*session_resp),
3715 &nonemb_cmd.dma);
3716 if (nonemb_cmd.va == NULL) {
99bc5d55
JSJ
3717 beiscsi_log(phba, KERN_ERR,
3718 BEISCSI_LOG_INIT | BEISCSI_LOG_CONFIG,
3719 "BM_%d : Failed to allocate memory for"
3720 "beiscsi_get_session_info\n");
3721
c7acc5b8
JK
3722 return -ENOMEM;
3723 }
3724
3725 memset(nonemb_cmd.va, 0, sizeof(*session_resp));
9aef4200 3726 tag = mgmt_get_session_info(phba, s_handle,
0e43895e 3727 &nonemb_cmd);
c7acc5b8 3728 if (!tag) {
99bc5d55
JSJ
3729 beiscsi_log(phba, KERN_ERR,
3730 BEISCSI_LOG_INIT | BEISCSI_LOG_CONFIG,
3731 "BM_%d : beiscsi_get_session_info"
3732 " Failed\n");
3733
c7acc5b8
JK
3734 goto boot_freemem;
3735 } else
3736 wait_event_interruptible(phba->ctrl.mcc_wait[tag],
3737 phba->ctrl.mcc_numtag[tag]);
3738
3739 wrb_num = (phba->ctrl.mcc_numtag[tag] & 0x00FF0000) >> 16;
3740 extd_status = (phba->ctrl.mcc_numtag[tag] & 0x0000FF00) >> 8;
3741 status = phba->ctrl.mcc_numtag[tag] & 0x000000FF;
3742 if (status || extd_status) {
99bc5d55
JSJ
3743 beiscsi_log(phba, KERN_ERR,
3744 BEISCSI_LOG_INIT | BEISCSI_LOG_CONFIG,
3745 "BM_%d : beiscsi_get_session_info Failed"
3746 " status = %d extd_status = %d\n",
3747 status, extd_status);
3748
c7acc5b8
JK
3749 free_mcc_tag(&phba->ctrl, tag);
3750 goto boot_freemem;
3751 }
3752 wrb = queue_get_wrb(mccq, wrb_num);
3753 free_mcc_tag(&phba->ctrl, tag);
3754 session_resp = nonemb_cmd.va ;
f457a46f 3755
c7acc5b8
JK
3756 memcpy(&phba->boot_sess, &session_resp->session_info,
3757 sizeof(struct mgmt_session_info));
f457a46f
MC
3758 ret = 0;
3759
c7acc5b8
JK
3760boot_freemem:
3761 pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size,
3762 nonemb_cmd.va, nonemb_cmd.dma);
f457a46f
MC
3763 return ret;
3764}
3765
3766static void beiscsi_boot_release(void *data)
3767{
3768 struct beiscsi_hba *phba = data;
3769
3770 scsi_host_put(phba->shost);
3771}
3772
3773static int beiscsi_setup_boot_info(struct beiscsi_hba *phba)
3774{
3775 struct iscsi_boot_kobj *boot_kobj;
3776
3777 /* get boot info using mgmt cmd */
3778 if (beiscsi_get_boot_info(phba))
3779 /* Try to see if we can carry on without this */
3780 return 0;
3781
3782 phba->boot_kset = iscsi_boot_create_host_kset(phba->shost->host_no);
3783 if (!phba->boot_kset)
3784 return -ENOMEM;
3785
3786 /* get a ref because the show function will ref the phba */
3787 if (!scsi_host_get(phba->shost))
3788 goto free_kset;
3789 boot_kobj = iscsi_boot_create_target(phba->boot_kset, 0, phba,
3790 beiscsi_show_boot_tgt_info,
3791 beiscsi_tgt_get_attr_visibility,
3792 beiscsi_boot_release);
3793 if (!boot_kobj)
3794 goto put_shost;
3795
3796 if (!scsi_host_get(phba->shost))
3797 goto free_kset;
3798 boot_kobj = iscsi_boot_create_initiator(phba->boot_kset, 0, phba,
3799 beiscsi_show_boot_ini_info,
3800 beiscsi_ini_get_attr_visibility,
3801 beiscsi_boot_release);
3802 if (!boot_kobj)
3803 goto put_shost;
3804
3805 if (!scsi_host_get(phba->shost))
3806 goto free_kset;
3807 boot_kobj = iscsi_boot_create_ethernet(phba->boot_kset, 0, phba,
3808 beiscsi_show_boot_eth_info,
3809 beiscsi_eth_get_attr_visibility,
3810 beiscsi_boot_release);
3811 if (!boot_kobj)
3812 goto put_shost;
3813 return 0;
3814
3815put_shost:
3816 scsi_host_put(phba->shost);
3817free_kset:
3818 iscsi_boot_destroy_kset(phba->boot_kset);
c7acc5b8
JK
3819 return -ENOMEM;
3820}
3821
6733b39a
JK
3822static int beiscsi_init_port(struct beiscsi_hba *phba)
3823{
3824 int ret;
3825
3826 ret = beiscsi_init_controller(phba);
3827 if (ret < 0) {
99bc5d55
JSJ
3828 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
3829 "BM_%d : beiscsi_dev_probe - Failed in"
3830 "beiscsi_init_controller\n");
6733b39a
JK
3831 return ret;
3832 }
3833 ret = beiscsi_init_sgl_handle(phba);
3834 if (ret < 0) {
99bc5d55
JSJ
3835 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
3836 "BM_%d : beiscsi_dev_probe - Failed in"
3837 "beiscsi_init_sgl_handle\n");
6733b39a
JK
3838 goto do_cleanup_ctrlr;
3839 }
3840
3841 if (hba_setup_cid_tbls(phba)) {
99bc5d55
JSJ
3842 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
3843 "BM_%d : Failed in hba_setup_cid_tbls\n");
6733b39a
JK
3844 kfree(phba->io_sgl_hndl_base);
3845 kfree(phba->eh_sgl_hndl_base);
3846 goto do_cleanup_ctrlr;
3847 }
3848
3849 return ret;
3850
3851do_cleanup_ctrlr:
3852 hwi_cleanup(phba);
3853 return ret;
3854}
3855
3856static void hwi_purge_eq(struct beiscsi_hba *phba)
3857{
3858 struct hwi_controller *phwi_ctrlr;
3859 struct hwi_context_memory *phwi_context;
3860 struct be_queue_info *eq;
3861 struct be_eq_entry *eqe = NULL;
bfead3b2 3862 int i, eq_msix;
756d29c8 3863 unsigned int num_processed;
6733b39a
JK
3864
3865 phwi_ctrlr = phba->phwi_ctrlr;
3866 phwi_context = phwi_ctrlr->phwi_ctxt;
bfead3b2
JK
3867 if (phba->msix_enabled)
3868 eq_msix = 1;
3869 else
3870 eq_msix = 0;
6733b39a 3871
bfead3b2
JK
3872 for (i = 0; i < (phba->num_cpus + eq_msix); i++) {
3873 eq = &phwi_context->be_eq[i].q;
6733b39a 3874 eqe = queue_tail_node(eq);
756d29c8 3875 num_processed = 0;
bfead3b2
JK
3876 while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32]
3877 & EQE_VALID_MASK) {
3878 AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0);
3879 queue_tail_inc(eq);
3880 eqe = queue_tail_node(eq);
756d29c8 3881 num_processed++;
bfead3b2 3882 }
756d29c8
JK
3883
3884 if (num_processed)
3885 hwi_ring_eq_db(phba, eq->id, 1, num_processed, 1, 1);
6733b39a
JK
3886 }
3887}
3888
3889static void beiscsi_clean_port(struct beiscsi_hba *phba)
3890{
03a12310 3891 int mgmt_status;
6733b39a
JK
3892
3893 mgmt_status = mgmt_epfw_cleanup(phba, CMD_CONNECTION_CHUTE_0);
3894 if (mgmt_status)
99bc5d55
JSJ
3895 beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_INIT,
3896 "BM_%d : mgmt_epfw_cleanup FAILED\n");
756d29c8 3897
6733b39a 3898 hwi_purge_eq(phba);
756d29c8 3899 hwi_cleanup(phba);
6733b39a
JK
3900 kfree(phba->io_sgl_hndl_base);
3901 kfree(phba->eh_sgl_hndl_base);
3902 kfree(phba->cid_array);
3903 kfree(phba->ep_array);
3904}
3905
d629c471
JSJ
3906/**
3907 * beiscsi_cleanup_task()- Free driver resources of the task
3908 * @task: ptr to the iscsi task
3909 *
3910 **/
1282ab76
MC
3911static void beiscsi_cleanup_task(struct iscsi_task *task)
3912{
3913 struct beiscsi_io_task *io_task = task->dd_data;
3914 struct iscsi_conn *conn = task->conn;
3915 struct beiscsi_conn *beiscsi_conn = conn->dd_data;
3916 struct beiscsi_hba *phba = beiscsi_conn->phba;
3917 struct beiscsi_session *beiscsi_sess = beiscsi_conn->beiscsi_sess;
3918 struct hwi_wrb_context *pwrb_context;
3919 struct hwi_controller *phwi_ctrlr;
3920
3921 phwi_ctrlr = phba->phwi_ctrlr;
3922 pwrb_context = &phwi_ctrlr->wrb_context[beiscsi_conn->beiscsi_conn_cid
3923 - phba->fw_config.iscsi_cid_start];
3924
3925 if (io_task->cmd_bhs) {
3926 pci_pool_free(beiscsi_sess->bhs_pool, io_task->cmd_bhs,
3927 io_task->bhs_pa.u.a64.address);
3928 io_task->cmd_bhs = NULL;
3929 }
3930
3931 if (task->sc) {
3932 if (io_task->pwrb_handle) {
3933 free_wrb_handle(phba, pwrb_context,
3934 io_task->pwrb_handle);
3935 io_task->pwrb_handle = NULL;
3936 }
3937
3938 if (io_task->psgl_handle) {
3939 spin_lock(&phba->io_sgl_lock);
3940 free_io_sgl_handle(phba, io_task->psgl_handle);
3941 spin_unlock(&phba->io_sgl_lock);
3942 io_task->psgl_handle = NULL;
3943 }
3944 } else {
3945 if (!beiscsi_conn->login_in_progress) {
3946 if (io_task->pwrb_handle) {
3947 free_wrb_handle(phba, pwrb_context,
3948 io_task->pwrb_handle);
3949 io_task->pwrb_handle = NULL;
3950 }
3951 if (io_task->psgl_handle) {
3952 spin_lock(&phba->mgmt_sgl_lock);
3953 free_mgmt_sgl_handle(phba,
3954 io_task->psgl_handle);
3955 spin_unlock(&phba->mgmt_sgl_lock);
3956 io_task->psgl_handle = NULL;
3957 }
d629c471
JSJ
3958 if (io_task->mtask_addr) {
3959 pci_unmap_single(phba->pcidev,
3960 io_task->mtask_addr,
3961 io_task->mtask_data_count,
3962 PCI_DMA_TODEVICE);
3963 io_task->mtask_addr = 0;
3964 }
1282ab76
MC
3965 }
3966 }
3967}
3968
6733b39a
JK
3969void
3970beiscsi_offload_connection(struct beiscsi_conn *beiscsi_conn,
3971 struct beiscsi_offload_params *params)
3972{
3973 struct wrb_handle *pwrb_handle;
3974 struct iscsi_target_context_update_wrb *pwrb = NULL;
3975 struct be_mem_descriptor *mem_descr;
3976 struct beiscsi_hba *phba = beiscsi_conn->phba;
1282ab76
MC
3977 struct iscsi_task *task = beiscsi_conn->task;
3978 struct iscsi_session *session = task->conn->session;
6733b39a
JK
3979 u32 doorbell = 0;
3980
3981 /*
3982 * We can always use 0 here because it is reserved by libiscsi for
3983 * login/startup related tasks.
3984 */
1282ab76
MC
3985 beiscsi_conn->login_in_progress = 0;
3986 spin_lock_bh(&session->lock);
3987 beiscsi_cleanup_task(task);
3988 spin_unlock_bh(&session->lock);
3989
7da50879 3990 pwrb_handle = alloc_wrb_handle(phba, (beiscsi_conn->beiscsi_conn_cid -
d5431488 3991 phba->fw_config.iscsi_cid_start));
6733b39a
JK
3992 pwrb = (struct iscsi_target_context_update_wrb *)pwrb_handle->pwrb;
3993 memset(pwrb, 0, sizeof(*pwrb));
3994 AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb,
3995 max_burst_length, pwrb, params->dw[offsetof
3996 (struct amap_beiscsi_offload_params,
3997 max_burst_length) / 32]);
3998 AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb,
3999 max_send_data_segment_length, pwrb,
4000 params->dw[offsetof(struct amap_beiscsi_offload_params,
4001 max_send_data_segment_length) / 32]);
4002 AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb,
4003 first_burst_length,
4004 pwrb,
4005 params->dw[offsetof(struct amap_beiscsi_offload_params,
4006 first_burst_length) / 32]);
4007
4008 AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, erl, pwrb,
4009 (params->dw[offsetof(struct amap_beiscsi_offload_params,
4010 erl) / 32] & OFFLD_PARAMS_ERL));
4011 AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, dde, pwrb,
4012 (params->dw[offsetof(struct amap_beiscsi_offload_params,
4013 dde) / 32] & OFFLD_PARAMS_DDE) >> 2);
4014 AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, hde, pwrb,
4015 (params->dw[offsetof(struct amap_beiscsi_offload_params,
4016 hde) / 32] & OFFLD_PARAMS_HDE) >> 3);
4017 AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, ir2t, pwrb,
4018 (params->dw[offsetof(struct amap_beiscsi_offload_params,
4019 ir2t) / 32] & OFFLD_PARAMS_IR2T) >> 4);
4020 AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, imd, pwrb,
4021 (params->dw[offsetof(struct amap_beiscsi_offload_params,
4022 imd) / 32] & OFFLD_PARAMS_IMD) >> 5);
4023 AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, stat_sn,
4024 pwrb,
4025 (params->dw[offsetof(struct amap_beiscsi_offload_params,
4026 exp_statsn) / 32] + 1));
4027 AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, type, pwrb,
4028 0x7);
4029 AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, wrb_idx,
4030 pwrb, pwrb_handle->wrb_index);
4031 AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, ptr2nextwrb,
4032 pwrb, pwrb_handle->nxt_wrb_index);
4033 AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb,
4034 session_state, pwrb, 0);
4035 AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, compltonack,
4036 pwrb, 1);
4037 AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, notpredblq,
4038 pwrb, 0);
4039 AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, mode, pwrb,
4040 0);
4041
4042 mem_descr = phba->init_mem;
4043 mem_descr += ISCSI_MEM_GLOBAL_HEADER;
4044
4045 AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb,
4046 pad_buffer_addr_hi, pwrb,
4047 mem_descr->mem_array[0].bus_address.u.a32.address_hi);
4048 AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb,
4049 pad_buffer_addr_lo, pwrb,
4050 mem_descr->mem_array[0].bus_address.u.a32.address_lo);
4051
4052 be_dws_le_to_cpu(pwrb, sizeof(struct iscsi_target_context_update_wrb));
4053
4054 doorbell |= beiscsi_conn->beiscsi_conn_cid & DB_WRB_POST_CID_MASK;
32951dd8 4055 doorbell |= (pwrb_handle->wrb_index & DB_DEF_PDU_WRB_INDEX_MASK)
bfead3b2 4056 << DB_DEF_PDU_WRB_INDEX_SHIFT;
6733b39a
JK
4057 doorbell |= 1 << DB_DEF_PDU_NUM_POSTED_SHIFT;
4058
4059 iowrite32(doorbell, phba->db_va + DB_TXULP0_OFFSET);
4060}
4061
4062static void beiscsi_parse_pdu(struct iscsi_conn *conn, itt_t itt,
4063 int *index, int *age)
4064{
bfead3b2 4065 *index = (int)itt;
6733b39a
JK
4066 if (age)
4067 *age = conn->session->age;
4068}
4069
4070/**
4071 * beiscsi_alloc_pdu - allocates pdu and related resources
4072 * @task: libiscsi task
4073 * @opcode: opcode of pdu for task
4074 *
4075 * This is called with the session lock held. It will allocate
4076 * the wrb and sgl if needed for the command. And it will prep
4077 * the pdu's itt. beiscsi_parse_pdu will later translate
4078 * the pdu itt to the libiscsi task itt.
4079 */
4080static int beiscsi_alloc_pdu(struct iscsi_task *task, uint8_t opcode)
4081{
4082 struct beiscsi_io_task *io_task = task->dd_data;
4083 struct iscsi_conn *conn = task->conn;
4084 struct beiscsi_conn *beiscsi_conn = conn->dd_data;
4085 struct beiscsi_hba *phba = beiscsi_conn->phba;
4086 struct hwi_wrb_context *pwrb_context;
4087 struct hwi_controller *phwi_ctrlr;
4088 itt_t itt;
2afc95bf
JK
4089 struct beiscsi_session *beiscsi_sess = beiscsi_conn->beiscsi_sess;
4090 dma_addr_t paddr;
6733b39a 4091
2afc95bf 4092 io_task->cmd_bhs = pci_pool_alloc(beiscsi_sess->bhs_pool,
bc7accec 4093 GFP_ATOMIC, &paddr);
2afc95bf
JK
4094 if (!io_task->cmd_bhs)
4095 return -ENOMEM;
2afc95bf 4096 io_task->bhs_pa.u.a64.address = paddr;
bfead3b2 4097 io_task->libiscsi_itt = (itt_t)task->itt;
6733b39a
JK
4098 io_task->conn = beiscsi_conn;
4099
4100 task->hdr = (struct iscsi_hdr *)&io_task->cmd_bhs->iscsi_hdr;
4101 task->hdr_max = sizeof(struct be_cmd_bhs);
d2cecf0d 4102 io_task->psgl_handle = NULL;
3ec78271 4103 io_task->pwrb_handle = NULL;
6733b39a
JK
4104
4105 if (task->sc) {
4106 spin_lock(&phba->io_sgl_lock);
4107 io_task->psgl_handle = alloc_io_sgl_handle(phba);
4108 spin_unlock(&phba->io_sgl_lock);
8359c79b
JSJ
4109 if (!io_task->psgl_handle) {
4110 beiscsi_log(phba, KERN_ERR,
4111 BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG,
4112 "BM_%d : Alloc of IO_SGL_ICD Failed"
4113 "for the CID : %d\n",
4114 beiscsi_conn->beiscsi_conn_cid);
2afc95bf 4115 goto free_hndls;
8359c79b 4116 }
d2cecf0d
JK
4117 io_task->pwrb_handle = alloc_wrb_handle(phba,
4118 beiscsi_conn->beiscsi_conn_cid -
4119 phba->fw_config.iscsi_cid_start);
8359c79b
JSJ
4120 if (!io_task->pwrb_handle) {
4121 beiscsi_log(phba, KERN_ERR,
4122 BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG,
4123 "BM_%d : Alloc of WRB_HANDLE Failed"
4124 "for the CID : %d\n",
4125 beiscsi_conn->beiscsi_conn_cid);
d2cecf0d 4126 goto free_io_hndls;
8359c79b 4127 }
6733b39a
JK
4128 } else {
4129 io_task->scsi_cmnd = NULL;
d7aea67b 4130 if ((opcode & ISCSI_OPCODE_MASK) == ISCSI_OP_LOGIN) {
6733b39a
JK
4131 if (!beiscsi_conn->login_in_progress) {
4132 spin_lock(&phba->mgmt_sgl_lock);
4133 io_task->psgl_handle = (struct sgl_handle *)
4134 alloc_mgmt_sgl_handle(phba);
4135 spin_unlock(&phba->mgmt_sgl_lock);
8359c79b
JSJ
4136 if (!io_task->psgl_handle) {
4137 beiscsi_log(phba, KERN_ERR,
4138 BEISCSI_LOG_IO |
4139 BEISCSI_LOG_CONFIG,
4140 "BM_%d : Alloc of MGMT_SGL_ICD Failed"
4141 "for the CID : %d\n",
4142 beiscsi_conn->
4143 beiscsi_conn_cid);
2afc95bf 4144 goto free_hndls;
8359c79b 4145 }
2afc95bf 4146
6733b39a
JK
4147 beiscsi_conn->login_in_progress = 1;
4148 beiscsi_conn->plogin_sgl_handle =
4149 io_task->psgl_handle;
d2cecf0d
JK
4150 io_task->pwrb_handle =
4151 alloc_wrb_handle(phba,
4152 beiscsi_conn->beiscsi_conn_cid -
4153 phba->fw_config.iscsi_cid_start);
8359c79b
JSJ
4154 if (!io_task->pwrb_handle) {
4155 beiscsi_log(phba, KERN_ERR,
4156 BEISCSI_LOG_IO |
4157 BEISCSI_LOG_CONFIG,
4158 "BM_%d : Alloc of WRB_HANDLE Failed"
4159 "for the CID : %d\n",
4160 beiscsi_conn->
4161 beiscsi_conn_cid);
4162 goto free_mgmt_hndls;
4163 }
d2cecf0d
JK
4164 beiscsi_conn->plogin_wrb_handle =
4165 io_task->pwrb_handle;
4166
6733b39a
JK
4167 } else {
4168 io_task->psgl_handle =
4169 beiscsi_conn->plogin_sgl_handle;
d2cecf0d
JK
4170 io_task->pwrb_handle =
4171 beiscsi_conn->plogin_wrb_handle;
6733b39a 4172 }
1282ab76 4173 beiscsi_conn->task = task;
6733b39a
JK
4174 } else {
4175 spin_lock(&phba->mgmt_sgl_lock);
4176 io_task->psgl_handle = alloc_mgmt_sgl_handle(phba);
4177 spin_unlock(&phba->mgmt_sgl_lock);
8359c79b
JSJ
4178 if (!io_task->psgl_handle) {
4179 beiscsi_log(phba, KERN_ERR,
4180 BEISCSI_LOG_IO |
4181 BEISCSI_LOG_CONFIG,
4182 "BM_%d : Alloc of MGMT_SGL_ICD Failed"
4183 "for the CID : %d\n",
4184 beiscsi_conn->
4185 beiscsi_conn_cid);
2afc95bf 4186 goto free_hndls;
8359c79b 4187 }
d2cecf0d
JK
4188 io_task->pwrb_handle =
4189 alloc_wrb_handle(phba,
4190 beiscsi_conn->beiscsi_conn_cid -
4191 phba->fw_config.iscsi_cid_start);
8359c79b
JSJ
4192 if (!io_task->pwrb_handle) {
4193 beiscsi_log(phba, KERN_ERR,
4194 BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG,
4195 "BM_%d : Alloc of WRB_HANDLE Failed"
4196 "for the CID : %d\n",
4197 beiscsi_conn->beiscsi_conn_cid);
d2cecf0d 4198 goto free_mgmt_hndls;
8359c79b 4199 }
d2cecf0d 4200
6733b39a
JK
4201 }
4202 }
bfead3b2
JK
4203 itt = (itt_t) cpu_to_be32(((unsigned int)io_task->pwrb_handle->
4204 wrb_index << 16) | (unsigned int)
4205 (io_task->psgl_handle->sgl_index));
32951dd8 4206 io_task->pwrb_handle->pio_handle = task;
bfead3b2 4207
6733b39a
JK
4208 io_task->cmd_bhs->iscsi_hdr.itt = itt;
4209 return 0;
2afc95bf 4210
d2cecf0d
JK
4211free_io_hndls:
4212 spin_lock(&phba->io_sgl_lock);
4213 free_io_sgl_handle(phba, io_task->psgl_handle);
4214 spin_unlock(&phba->io_sgl_lock);
4215 goto free_hndls;
4216free_mgmt_hndls:
4217 spin_lock(&phba->mgmt_sgl_lock);
4218 free_mgmt_sgl_handle(phba, io_task->psgl_handle);
4219 spin_unlock(&phba->mgmt_sgl_lock);
2afc95bf
JK
4220free_hndls:
4221 phwi_ctrlr = phba->phwi_ctrlr;
7da50879
JK
4222 pwrb_context = &phwi_ctrlr->wrb_context[
4223 beiscsi_conn->beiscsi_conn_cid -
4224 phba->fw_config.iscsi_cid_start];
d2cecf0d
JK
4225 if (io_task->pwrb_handle)
4226 free_wrb_handle(phba, pwrb_context, io_task->pwrb_handle);
2afc95bf
JK
4227 io_task->pwrb_handle = NULL;
4228 pci_pool_free(beiscsi_sess->bhs_pool, io_task->cmd_bhs,
4229 io_task->bhs_pa.u.a64.address);
1282ab76 4230 io_task->cmd_bhs = NULL;
2afc95bf 4231 return -ENOMEM;
6733b39a
JK
4232}
4233
6733b39a
JK
4234static int beiscsi_iotask(struct iscsi_task *task, struct scatterlist *sg,
4235 unsigned int num_sg, unsigned int xferlen,
4236 unsigned int writedir)
4237{
4238
4239 struct beiscsi_io_task *io_task = task->dd_data;
4240 struct iscsi_conn *conn = task->conn;
4241 struct beiscsi_conn *beiscsi_conn = conn->dd_data;
4242 struct beiscsi_hba *phba = beiscsi_conn->phba;
4243 struct iscsi_wrb *pwrb = NULL;
4244 unsigned int doorbell = 0;
4245
4246 pwrb = io_task->pwrb_handle->pwrb;
6733b39a
JK
4247 io_task->cmd_bhs->iscsi_hdr.exp_statsn = 0;
4248 io_task->bhs_len = sizeof(struct be_cmd_bhs);
4249
4250 if (writedir) {
32951dd8
JK
4251 AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb,
4252 INI_WR_CMD);
6733b39a 4253 AMAP_SET_BITS(struct amap_iscsi_wrb, dsp, pwrb, 1);
6733b39a 4254 } else {
32951dd8
JK
4255 AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb,
4256 INI_RD_CMD);
6733b39a
JK
4257 AMAP_SET_BITS(struct amap_iscsi_wrb, dsp, pwrb, 0);
4258 }
6733b39a
JK
4259
4260 AMAP_SET_BITS(struct amap_iscsi_wrb, lun, pwrb,
dc63aac6
JK
4261 cpu_to_be16(*(unsigned short *)
4262 &io_task->cmd_bhs->iscsi_hdr.lun));
6733b39a
JK
4263 AMAP_SET_BITS(struct amap_iscsi_wrb, r2t_exp_dtl, pwrb, xferlen);
4264 AMAP_SET_BITS(struct amap_iscsi_wrb, wrb_idx, pwrb,
4265 io_task->pwrb_handle->wrb_index);
4266 AMAP_SET_BITS(struct amap_iscsi_wrb, cmdsn_itt, pwrb,
4267 be32_to_cpu(task->cmdsn));
4268 AMAP_SET_BITS(struct amap_iscsi_wrb, sgl_icd_idx, pwrb,
4269 io_task->psgl_handle->sgl_index);
4270
4271 hwi_write_sgl(pwrb, sg, num_sg, io_task);
4272
4273 AMAP_SET_BITS(struct amap_iscsi_wrb, ptr2nextwrb, pwrb,
4274 io_task->pwrb_handle->nxt_wrb_index);
4275 be_dws_le_to_cpu(pwrb, sizeof(struct iscsi_wrb));
4276
4277 doorbell |= beiscsi_conn->beiscsi_conn_cid & DB_WRB_POST_CID_MASK;
32951dd8 4278 doorbell |= (io_task->pwrb_handle->wrb_index &
6733b39a
JK
4279 DB_DEF_PDU_WRB_INDEX_MASK) << DB_DEF_PDU_WRB_INDEX_SHIFT;
4280 doorbell |= 1 << DB_DEF_PDU_NUM_POSTED_SHIFT;
4281
4282 iowrite32(doorbell, phba->db_va + DB_TXULP0_OFFSET);
4283 return 0;
4284}
4285
4286static int beiscsi_mtask(struct iscsi_task *task)
4287{
dafab8e0 4288 struct beiscsi_io_task *io_task = task->dd_data;
6733b39a
JK
4289 struct iscsi_conn *conn = task->conn;
4290 struct beiscsi_conn *beiscsi_conn = conn->dd_data;
4291 struct beiscsi_hba *phba = beiscsi_conn->phba;
4292 struct iscsi_wrb *pwrb = NULL;
4293 unsigned int doorbell = 0;
dafab8e0 4294 unsigned int cid;
6733b39a 4295
bfead3b2 4296 cid = beiscsi_conn->beiscsi_conn_cid;
6733b39a 4297 pwrb = io_task->pwrb_handle->pwrb;
caf818f1 4298 memset(pwrb, 0, sizeof(*pwrb));
6733b39a
JK
4299 AMAP_SET_BITS(struct amap_iscsi_wrb, cmdsn_itt, pwrb,
4300 be32_to_cpu(task->cmdsn));
4301 AMAP_SET_BITS(struct amap_iscsi_wrb, wrb_idx, pwrb,
4302 io_task->pwrb_handle->wrb_index);
4303 AMAP_SET_BITS(struct amap_iscsi_wrb, sgl_icd_idx, pwrb,
4304 io_task->psgl_handle->sgl_index);
dafab8e0 4305
6733b39a
JK
4306 switch (task->hdr->opcode & ISCSI_OPCODE_MASK) {
4307 case ISCSI_OP_LOGIN:
32951dd8
JK
4308 AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb,
4309 TGT_DM_CMD);
6733b39a
JK
4310 AMAP_SET_BITS(struct amap_iscsi_wrb, dmsg, pwrb, 0);
4311 AMAP_SET_BITS(struct amap_iscsi_wrb, cmdsn_itt, pwrb, 1);
4312 hwi_write_buffer(pwrb, task);
4313 break;
4314 case ISCSI_OP_NOOP_OUT:
1390b01b
JK
4315 if (task->hdr->ttt != ISCSI_RESERVED_TAG) {
4316 AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb,
4317 TGT_DM_CMD);
4318 AMAP_SET_BITS(struct amap_iscsi_wrb, cmdsn_itt,
4319 pwrb, 0);
685e16fd 4320 AMAP_SET_BITS(struct amap_iscsi_wrb, dmsg, pwrb, 1);
1390b01b
JK
4321 } else {
4322 AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb,
4323 INI_RD_CMD);
685e16fd 4324 AMAP_SET_BITS(struct amap_iscsi_wrb, dmsg, pwrb, 0);
1390b01b 4325 }
6733b39a
JK
4326 hwi_write_buffer(pwrb, task);
4327 break;
4328 case ISCSI_OP_TEXT:
32951dd8 4329 AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb,
b30c6dab 4330 TGT_DM_CMD);
0ecb0b45 4331 AMAP_SET_BITS(struct amap_iscsi_wrb, dmsg, pwrb, 0);
6733b39a
JK
4332 hwi_write_buffer(pwrb, task);
4333 break;
4334 case ISCSI_OP_SCSI_TMFUNC:
32951dd8
JK
4335 AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb,
4336 INI_TMF_CMD);
6733b39a
JK
4337 AMAP_SET_BITS(struct amap_iscsi_wrb, dmsg, pwrb, 0);
4338 hwi_write_buffer(pwrb, task);
4339 break;
4340 case ISCSI_OP_LOGOUT:
4341 AMAP_SET_BITS(struct amap_iscsi_wrb, dmsg, pwrb, 0);
4342 AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb,
dafab8e0 4343 HWH_TYPE_LOGOUT);
6733b39a
JK
4344 hwi_write_buffer(pwrb, task);
4345 break;
4346
4347 default:
99bc5d55
JSJ
4348 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
4349 "BM_%d : opcode =%d Not supported\n",
4350 task->hdr->opcode & ISCSI_OPCODE_MASK);
4351
6733b39a
JK
4352 return -EINVAL;
4353 }
4354
4355 AMAP_SET_BITS(struct amap_iscsi_wrb, r2t_exp_dtl, pwrb,
51a46250 4356 task->data_count);
6733b39a
JK
4357 AMAP_SET_BITS(struct amap_iscsi_wrb, ptr2nextwrb, pwrb,
4358 io_task->pwrb_handle->nxt_wrb_index);
4359 be_dws_le_to_cpu(pwrb, sizeof(struct iscsi_wrb));
4360
bfead3b2 4361 doorbell |= cid & DB_WRB_POST_CID_MASK;
32951dd8 4362 doorbell |= (io_task->pwrb_handle->wrb_index &
6733b39a
JK
4363 DB_DEF_PDU_WRB_INDEX_MASK) << DB_DEF_PDU_WRB_INDEX_SHIFT;
4364 doorbell |= 1 << DB_DEF_PDU_NUM_POSTED_SHIFT;
4365 iowrite32(doorbell, phba->db_va + DB_TXULP0_OFFSET);
4366 return 0;
4367}
4368
4369static int beiscsi_task_xmit(struct iscsi_task *task)
4370{
6733b39a
JK
4371 struct beiscsi_io_task *io_task = task->dd_data;
4372 struct scsi_cmnd *sc = task->sc;
6733b39a
JK
4373 struct scatterlist *sg;
4374 int num_sg;
4375 unsigned int writedir = 0, xferlen = 0;
4376
6733b39a
JK
4377 if (!sc)
4378 return beiscsi_mtask(task);
4379
4380 io_task->scsi_cmnd = sc;
4381 num_sg = scsi_dma_map(sc);
4382 if (num_sg < 0) {
99bc5d55
JSJ
4383 struct iscsi_conn *conn = task->conn;
4384 struct beiscsi_hba *phba = NULL;
4385
4386 phba = ((struct beiscsi_conn *)conn->dd_data)->phba;
4387 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_IO,
4388 "BM_%d : scsi_dma_map Failed\n");
4389
6733b39a
JK
4390 return num_sg;
4391 }
6733b39a
JK
4392 xferlen = scsi_bufflen(sc);
4393 sg = scsi_sglist(sc);
99bc5d55 4394 if (sc->sc_data_direction == DMA_TO_DEVICE)
6733b39a 4395 writedir = 1;
99bc5d55 4396 else
6733b39a 4397 writedir = 0;
99bc5d55 4398
6733b39a
JK
4399 return beiscsi_iotask(task, sg, num_sg, xferlen, writedir);
4400}
4401
ffce3e2e
JK
4402/**
4403 * beiscsi_bsg_request - handle bsg request from ISCSI transport
4404 * @job: job to handle
4405 */
4406static int beiscsi_bsg_request(struct bsg_job *job)
4407{
4408 struct Scsi_Host *shost;
4409 struct beiscsi_hba *phba;
4410 struct iscsi_bsg_request *bsg_req = job->request;
4411 int rc = -EINVAL;
4412 unsigned int tag;
4413 struct be_dma_mem nonemb_cmd;
4414 struct be_cmd_resp_hdr *resp;
4415 struct iscsi_bsg_reply *bsg_reply = job->reply;
4416 unsigned short status, extd_status;
4417
4418 shost = iscsi_job_to_shost(job);
4419 phba = iscsi_host_priv(shost);
4420
4421 switch (bsg_req->msgcode) {
4422 case ISCSI_BSG_HST_VENDOR:
4423 nonemb_cmd.va = pci_alloc_consistent(phba->ctrl.pdev,
4424 job->request_payload.payload_len,
4425 &nonemb_cmd.dma);
4426 if (nonemb_cmd.va == NULL) {
99bc5d55
JSJ
4427 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
4428 "BM_%d : Failed to allocate memory for "
4429 "beiscsi_bsg_request\n");
8359c79b 4430 return -ENOMEM;
ffce3e2e
JK
4431 }
4432 tag = mgmt_vendor_specific_fw_cmd(&phba->ctrl, phba, job,
4433 &nonemb_cmd);
4434 if (!tag) {
99bc5d55 4435 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
8359c79b 4436 "BM_%d : MBX Tag Allocation Failed\n");
99bc5d55 4437
ffce3e2e
JK
4438 pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size,
4439 nonemb_cmd.va, nonemb_cmd.dma);
4440 return -EAGAIN;
4441 } else
4442 wait_event_interruptible(phba->ctrl.mcc_wait[tag],
4443 phba->ctrl.mcc_numtag[tag]);
4444 extd_status = (phba->ctrl.mcc_numtag[tag] & 0x0000FF00) >> 8;
4445 status = phba->ctrl.mcc_numtag[tag] & 0x000000FF;
4446 free_mcc_tag(&phba->ctrl, tag);
4447 resp = (struct be_cmd_resp_hdr *)nonemb_cmd.va;
4448 sg_copy_from_buffer(job->reply_payload.sg_list,
4449 job->reply_payload.sg_cnt,
4450 nonemb_cmd.va, (resp->response_length
4451 + sizeof(*resp)));
4452 bsg_reply->reply_payload_rcv_len = resp->response_length;
4453 bsg_reply->result = status;
4454 bsg_job_done(job, bsg_reply->result,
4455 bsg_reply->reply_payload_rcv_len);
4456 pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size,
4457 nonemb_cmd.va, nonemb_cmd.dma);
4458 if (status || extd_status) {
99bc5d55 4459 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
8359c79b 4460 "BM_%d : MBX Cmd Failed"
99bc5d55
JSJ
4461 " status = %d extd_status = %d\n",
4462 status, extd_status);
4463
ffce3e2e 4464 return -EIO;
8359c79b
JSJ
4465 } else {
4466 rc = 0;
ffce3e2e
JK
4467 }
4468 break;
4469
4470 default:
99bc5d55
JSJ
4471 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
4472 "BM_%d : Unsupported bsg command: 0x%x\n",
4473 bsg_req->msgcode);
ffce3e2e
JK
4474 break;
4475 }
4476
4477 return rc;
4478}
4479
99bc5d55
JSJ
4480void beiscsi_hba_attrs_init(struct beiscsi_hba *phba)
4481{
4482 /* Set the logging parameter */
4483 beiscsi_log_enable_init(phba, beiscsi_log_enable);
4484}
4485
4d4d1ef8
JSJ
4486/*
4487 * beiscsi_quiesce()- Cleanup Driver resources
4488 * @phba: Instance Priv structure
4489 *
4490 * Free the OS and HW resources held by the driver
4491 **/
25602c97 4492static void beiscsi_quiesce(struct beiscsi_hba *phba)
6733b39a 4493{
bfead3b2
JK
4494 struct hwi_controller *phwi_ctrlr;
4495 struct hwi_context_memory *phwi_context;
4496 struct be_eq_obj *pbe_eq;
4497 unsigned int i, msix_vec;
6733b39a 4498
bfead3b2
JK
4499 phwi_ctrlr = phba->phwi_ctrlr;
4500 phwi_context = phwi_ctrlr->phwi_ctxt;
6733b39a 4501 hwi_disable_intr(phba);
bfead3b2
JK
4502 if (phba->msix_enabled) {
4503 for (i = 0; i <= phba->num_cpus; i++) {
4504 msix_vec = phba->msix_entries[i].vector;
4505 free_irq(msix_vec, &phwi_context->be_eq[i]);
8fcfb210 4506 kfree(phba->msi_name[i]);
bfead3b2
JK
4507 }
4508 } else
4509 if (phba->pcidev->irq)
4510 free_irq(phba->pcidev->irq, phba);
4511 pci_disable_msix(phba->pcidev);
6733b39a
JK
4512 destroy_workqueue(phba->wq);
4513 if (blk_iopoll_enabled)
bfead3b2
JK
4514 for (i = 0; i < phba->num_cpus; i++) {
4515 pbe_eq = &phwi_context->be_eq[i];
4516 blk_iopoll_disable(&pbe_eq->iopoll);
4517 }
6733b39a
JK
4518
4519 beiscsi_clean_port(phba);
4520 beiscsi_free_mem(phba);
e9b91193 4521
6733b39a
JK
4522 beiscsi_unmap_pci_function(phba);
4523 pci_free_consistent(phba->pcidev,
4524 phba->ctrl.mbox_mem_alloced.size,
4525 phba->ctrl.mbox_mem_alloced.va,
4526 phba->ctrl.mbox_mem_alloced.dma);
25602c97
JK
4527}
4528
4529static void beiscsi_remove(struct pci_dev *pcidev)
4530{
4531
4532 struct beiscsi_hba *phba = NULL;
4533
4534 phba = pci_get_drvdata(pcidev);
4535 if (!phba) {
4536 dev_err(&pcidev->dev, "beiscsi_remove called with no phba\n");
4537 return;
4538 }
4539
0e43895e 4540 beiscsi_destroy_def_ifaces(phba);
25602c97 4541 beiscsi_quiesce(phba);
9d045163 4542 iscsi_boot_destroy_kset(phba->boot_kset);
6733b39a
JK
4543 iscsi_host_remove(phba->shost);
4544 pci_dev_put(phba->pcidev);
4545 iscsi_host_free(phba->shost);
8dce69ff 4546 pci_disable_device(pcidev);
6733b39a
JK
4547}
4548
25602c97
JK
4549static void beiscsi_shutdown(struct pci_dev *pcidev)
4550{
4551
4552 struct beiscsi_hba *phba = NULL;
4553
4554 phba = (struct beiscsi_hba *)pci_get_drvdata(pcidev);
4555 if (!phba) {
4556 dev_err(&pcidev->dev, "beiscsi_shutdown called with no phba\n");
4557 return;
4558 }
4559
4560 beiscsi_quiesce(phba);
8dce69ff 4561 pci_disable_device(pcidev);
25602c97
JK
4562}
4563
bfead3b2
JK
4564static void beiscsi_msix_enable(struct beiscsi_hba *phba)
4565{
4566 int i, status;
4567
4568 for (i = 0; i <= phba->num_cpus; i++)
4569 phba->msix_entries[i].entry = i;
4570
4571 status = pci_enable_msix(phba->pcidev, phba->msix_entries,
4572 (phba->num_cpus + 1));
4573 if (!status)
4574 phba->msix_enabled = true;
4575
4576 return;
4577}
4578
6733b39a
JK
4579static int __devinit beiscsi_dev_probe(struct pci_dev *pcidev,
4580 const struct pci_device_id *id)
4581{
4582 struct beiscsi_hba *phba = NULL;
bfead3b2
JK
4583 struct hwi_controller *phwi_ctrlr;
4584 struct hwi_context_memory *phwi_context;
4585 struct be_eq_obj *pbe_eq;
107dfcba 4586 int ret, i;
6733b39a
JK
4587
4588 ret = beiscsi_enable_pci(pcidev);
4589 if (ret < 0) {
99bc5d55
JSJ
4590 dev_err(&pcidev->dev,
4591 "beiscsi_dev_probe - Failed to enable pci device\n");
6733b39a
JK
4592 return ret;
4593 }
4594
4595 phba = beiscsi_hba_alloc(pcidev);
4596 if (!phba) {
99bc5d55
JSJ
4597 dev_err(&pcidev->dev,
4598 "beiscsi_dev_probe - Failed in beiscsi_hba_alloc\n");
6733b39a
JK
4599 goto disable_pci;
4600 }
4601
99bc5d55
JSJ
4602 /* Initialize Driver configuration Paramters */
4603 beiscsi_hba_attrs_init(phba);
4604
f98c96b0
JK
4605 switch (pcidev->device) {
4606 case BE_DEVICE_ID1:
4607 case OC_DEVICE_ID1:
4608 case OC_DEVICE_ID2:
4609 phba->generation = BE_GEN2;
4610 break;
4611 case BE_DEVICE_ID2:
4612 case OC_DEVICE_ID3:
4613 phba->generation = BE_GEN3;
4614 break;
139a1b1e
JSJ
4615 case OC_SKH_ID1:
4616 phba->generation = BE_GEN4;
f98c96b0
JK
4617 default:
4618 phba->generation = 0;
4619 }
4620
bfead3b2 4621 if (enable_msix)
107dfcba 4622 find_num_cpus(phba);
bfead3b2 4623 else
107dfcba
JSJ
4624 phba->num_cpus = 1;
4625
99bc5d55
JSJ
4626 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
4627 "BM_%d : num_cpus = %d\n",
4628 phba->num_cpus);
bfead3b2 4629
b547f2d6 4630 if (enable_msix) {
bfead3b2 4631 beiscsi_msix_enable(phba);
b547f2d6
JK
4632 if (!phba->msix_enabled)
4633 phba->num_cpus = 1;
4634 }
6733b39a
JK
4635 ret = be_ctrl_init(phba, pcidev);
4636 if (ret) {
99bc5d55
JSJ
4637 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
4638 "BM_%d : beiscsi_dev_probe-"
4639 "Failed in be_ctrl_init\n");
6733b39a
JK
4640 goto hba_free;
4641 }
4642
4d4d1ef8
JSJ
4643 ret = beiscsi_cmd_reset_function(phba);
4644 if (ret) {
4645 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
4646 "BM_%d : Reset Failed. Aborting Crashdump\n");
4647 goto hba_free;
4648 }
4649 ret = be_chk_reset_complete(phba);
4650 if (ret) {
4651 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
4652 "BM_%d : Failed to get out of reset."
4653 "Aborting Crashdump\n");
4654 goto hba_free;
e9b91193
JK
4655 }
4656
6733b39a
JK
4657 spin_lock_init(&phba->io_sgl_lock);
4658 spin_lock_init(&phba->mgmt_sgl_lock);
4659 spin_lock_init(&phba->isr_lock);
7da50879
JK
4660 ret = mgmt_get_fw_config(&phba->ctrl, phba);
4661 if (ret != 0) {
99bc5d55
JSJ
4662 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
4663 "BM_%d : Error getting fw config\n");
7da50879
JK
4664 goto free_port;
4665 }
4666 phba->shost->max_id = phba->fw_config.iscsi_cid_count;
6733b39a 4667 beiscsi_get_params(phba);
aa874f07 4668 phba->shost->can_queue = phba->params.ios_per_ctrl;
6733b39a
JK
4669 ret = beiscsi_init_port(phba);
4670 if (ret < 0) {
99bc5d55
JSJ
4671 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
4672 "BM_%d : beiscsi_dev_probe-"
4673 "Failed in beiscsi_init_port\n");
6733b39a
JK
4674 goto free_port;
4675 }
4676
756d29c8
JK
4677 for (i = 0; i < MAX_MCC_CMD ; i++) {
4678 init_waitqueue_head(&phba->ctrl.mcc_wait[i + 1]);
4679 phba->ctrl.mcc_tag[i] = i + 1;
4680 phba->ctrl.mcc_numtag[i + 1] = 0;
4681 phba->ctrl.mcc_tag_available++;
4682 }
4683
4684 phba->ctrl.mcc_alloc_index = phba->ctrl.mcc_free_index = 0;
4685
72fb46a9 4686 snprintf(phba->wq_name, sizeof(phba->wq_name), "beiscsi_%02x_wq",
6733b39a 4687 phba->shost->host_no);
278274d5 4688 phba->wq = alloc_workqueue(phba->wq_name, WQ_MEM_RECLAIM, 1);
6733b39a 4689 if (!phba->wq) {
99bc5d55
JSJ
4690 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
4691 "BM_%d : beiscsi_dev_probe-"
4692 "Failed to allocate work queue\n");
6733b39a
JK
4693 goto free_twq;
4694 }
4695
6733b39a 4696
bfead3b2
JK
4697 phwi_ctrlr = phba->phwi_ctrlr;
4698 phwi_context = phwi_ctrlr->phwi_ctxt;
72fb46a9 4699
6733b39a 4700 if (blk_iopoll_enabled) {
bfead3b2
JK
4701 for (i = 0; i < phba->num_cpus; i++) {
4702 pbe_eq = &phwi_context->be_eq[i];
4703 blk_iopoll_init(&pbe_eq->iopoll, be_iopoll_budget,
4704 be_iopoll);
4705 blk_iopoll_enable(&pbe_eq->iopoll);
4706 }
72fb46a9
JSJ
4707
4708 i = (phba->msix_enabled) ? i : 0;
4709 /* Work item for MCC handling */
4710 pbe_eq = &phwi_context->be_eq[i];
4711 INIT_WORK(&pbe_eq->work_cqs, beiscsi_process_all_cqs);
4712 } else {
4713 if (phba->msix_enabled) {
4714 for (i = 0; i <= phba->num_cpus; i++) {
4715 pbe_eq = &phwi_context->be_eq[i];
4716 INIT_WORK(&pbe_eq->work_cqs,
4717 beiscsi_process_all_cqs);
4718 }
4719 } else {
4720 pbe_eq = &phwi_context->be_eq[0];
4721 INIT_WORK(&pbe_eq->work_cqs,
4722 beiscsi_process_all_cqs);
4723 }
6733b39a 4724 }
72fb46a9 4725
6733b39a
JK
4726 ret = beiscsi_init_irqs(phba);
4727 if (ret < 0) {
99bc5d55
JSJ
4728 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
4729 "BM_%d : beiscsi_dev_probe-"
4730 "Failed to beiscsi_init_irqs\n");
6733b39a
JK
4731 goto free_blkenbld;
4732 }
238f6b72 4733 hwi_enable_intr(phba);
f457a46f
MC
4734
4735 if (beiscsi_setup_boot_info(phba))
4736 /*
4737 * log error but continue, because we may not be using
4738 * iscsi boot.
4739 */
99bc5d55
JSJ
4740 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
4741 "BM_%d : Could not set up "
4742 "iSCSI boot info.\n");
f457a46f 4743
0e43895e 4744 beiscsi_create_def_ifaces(phba);
99bc5d55
JSJ
4745 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
4746 "\n\n\n BM_%d : SUCCESS - DRIVER LOADED\n\n\n");
6733b39a
JK
4747 return 0;
4748
6733b39a
JK
4749free_blkenbld:
4750 destroy_workqueue(phba->wq);
4751 if (blk_iopoll_enabled)
bfead3b2
JK
4752 for (i = 0; i < phba->num_cpus; i++) {
4753 pbe_eq = &phwi_context->be_eq[i];
4754 blk_iopoll_disable(&pbe_eq->iopoll);
4755 }
6733b39a
JK
4756free_twq:
4757 beiscsi_clean_port(phba);
4758 beiscsi_free_mem(phba);
4759free_port:
4760 pci_free_consistent(phba->pcidev,
4761 phba->ctrl.mbox_mem_alloced.size,
4762 phba->ctrl.mbox_mem_alloced.va,
4763 phba->ctrl.mbox_mem_alloced.dma);
4764 beiscsi_unmap_pci_function(phba);
4765hba_free:
238f6b72
JK
4766 if (phba->msix_enabled)
4767 pci_disable_msix(phba->pcidev);
6733b39a
JK
4768 iscsi_host_remove(phba->shost);
4769 pci_dev_put(phba->pcidev);
4770 iscsi_host_free(phba->shost);
4771disable_pci:
4772 pci_disable_device(pcidev);
4773 return ret;
4774}
4775
4776struct iscsi_transport beiscsi_iscsi_transport = {
4777 .owner = THIS_MODULE,
4778 .name = DRV_NAME,
9db0fb3a 4779 .caps = CAP_RECOVERY_L0 | CAP_HDRDGST | CAP_TEXT_NEGO |
6733b39a 4780 CAP_MULTI_R2T | CAP_DATADGST | CAP_DATA_PATH_OFFLOAD,
6733b39a
JK
4781 .create_session = beiscsi_session_create,
4782 .destroy_session = beiscsi_session_destroy,
4783 .create_conn = beiscsi_conn_create,
4784 .bind_conn = beiscsi_conn_bind,
4785 .destroy_conn = iscsi_conn_teardown,
3128c6c7 4786 .attr_is_visible = be2iscsi_attr_is_visible,
0e43895e
MC
4787 .set_iface_param = be2iscsi_iface_set_param,
4788 .get_iface_param = be2iscsi_iface_get_param,
6733b39a 4789 .set_param = beiscsi_set_param,
c7f7fd5b 4790 .get_conn_param = iscsi_conn_get_param,
6733b39a
JK
4791 .get_session_param = iscsi_session_get_param,
4792 .get_host_param = beiscsi_get_host_param,
4793 .start_conn = beiscsi_conn_start,
fa95d206 4794 .stop_conn = iscsi_conn_stop,
6733b39a
JK
4795 .send_pdu = iscsi_conn_send_pdu,
4796 .xmit_task = beiscsi_task_xmit,
4797 .cleanup_task = beiscsi_cleanup_task,
4798 .alloc_pdu = beiscsi_alloc_pdu,
4799 .parse_pdu_itt = beiscsi_parse_pdu,
4800 .get_stats = beiscsi_conn_get_stats,
c7f7fd5b 4801 .get_ep_param = beiscsi_ep_get_param,
6733b39a
JK
4802 .ep_connect = beiscsi_ep_connect,
4803 .ep_poll = beiscsi_ep_poll,
4804 .ep_disconnect = beiscsi_ep_disconnect,
4805 .session_recovery_timedout = iscsi_session_recovery_timedout,
ffce3e2e 4806 .bsg_request = beiscsi_bsg_request,
6733b39a
JK
4807};
4808
4809static struct pci_driver beiscsi_pci_driver = {
4810 .name = DRV_NAME,
4811 .probe = beiscsi_dev_probe,
4812 .remove = beiscsi_remove,
25602c97 4813 .shutdown = beiscsi_shutdown,
6733b39a
JK
4814 .id_table = beiscsi_pci_id_table
4815};
4816
bfead3b2 4817
6733b39a
JK
4818static int __init beiscsi_module_init(void)
4819{
4820 int ret;
4821
4822 beiscsi_scsi_transport =
4823 iscsi_register_transport(&beiscsi_iscsi_transport);
4824 if (!beiscsi_scsi_transport) {
99bc5d55
JSJ
4825 printk(KERN_ERR
4826 "beiscsi_module_init - Unable to register beiscsi transport.\n");
f55a24f2 4827 return -ENOMEM;
6733b39a 4828 }
99bc5d55
JSJ
4829 printk(KERN_INFO "In beiscsi_module_init, tt=%p\n",
4830 &beiscsi_iscsi_transport);
6733b39a
JK
4831
4832 ret = pci_register_driver(&beiscsi_pci_driver);
4833 if (ret) {
99bc5d55
JSJ
4834 printk(KERN_ERR
4835 "beiscsi_module_init - Unable to register beiscsi pci driver.\n");
6733b39a
JK
4836 goto unregister_iscsi_transport;
4837 }
4838 return 0;
4839
4840unregister_iscsi_transport:
4841 iscsi_unregister_transport(&beiscsi_iscsi_transport);
4842 return ret;
4843}
4844
4845static void __exit beiscsi_module_exit(void)
4846{
4847 pci_unregister_driver(&beiscsi_pci_driver);
4848 iscsi_unregister_transport(&beiscsi_iscsi_transport);
4849}
4850
4851module_init(beiscsi_module_init);
4852module_exit(beiscsi_module_exit);