]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - drivers/scsi/be2iscsi/be_main.c
be2iscsi: Fix to handle misconfigured optics events
[mirror_ubuntu-bionic-kernel.git] / drivers / scsi / be2iscsi / be_main.c
CommitLineData
6733b39a 1/**
c4f39bda 2 * Copyright (C) 2005 - 2015 Emulex
6733b39a
JK
3 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
9 *
4627de93 10 * Written by: Jayamohan Kallickal (jayamohan.kallickal@avagotech.com)
6733b39a
JK
11 *
12 * Contact Information:
4627de93 13 * linux-drivers@avagotech.com
6733b39a 14 *
c4f39bda 15 * Emulex
255fa9a3
JK
16 * 3333 Susan Street
17 * Costa Mesa, CA 92626
6733b39a 18 */
255fa9a3 19
6733b39a
JK
20#include <linux/reboot.h>
21#include <linux/delay.h>
5a0e3ad6 22#include <linux/slab.h>
6733b39a
JK
23#include <linux/interrupt.h>
24#include <linux/blkdev.h>
25#include <linux/pci.h>
26#include <linux/string.h>
27#include <linux/kernel.h>
28#include <linux/semaphore.h>
c7acc5b8 29#include <linux/iscsi_boot_sysfs.h>
acf3368f 30#include <linux/module.h>
ffce3e2e 31#include <linux/bsg-lib.h>
6733b39a
JK
32
33#include <scsi/libiscsi.h>
ffce3e2e
JK
34#include <scsi/scsi_bsg_iscsi.h>
35#include <scsi/scsi_netlink.h>
6733b39a
JK
36#include <scsi/scsi_transport_iscsi.h>
37#include <scsi/scsi_transport.h>
38#include <scsi/scsi_cmnd.h>
39#include <scsi/scsi_device.h>
40#include <scsi/scsi_host.h>
41#include <scsi/scsi.h>
42#include "be_main.h"
43#include "be_iscsi.h"
44#include "be_mgmt.h"
0a513dd8 45#include "be_cmds.h"
6733b39a
JK
46
47static unsigned int be_iopoll_budget = 10;
48static unsigned int be_max_phys_size = 64;
bfead3b2 49static unsigned int enable_msix = 1;
6733b39a 50
6733b39a 51MODULE_DESCRIPTION(DRV_DESC " " BUILD_STR);
76d15dbd 52MODULE_VERSION(BUILD_STR);
c4f39bda 53MODULE_AUTHOR("Emulex Corporation");
6733b39a
JK
54MODULE_LICENSE("GPL");
55module_param(be_iopoll_budget, int, 0);
56module_param(enable_msix, int, 0);
57module_param(be_max_phys_size, uint, S_IRUGO);
99bc5d55
JSJ
58MODULE_PARM_DESC(be_max_phys_size,
59 "Maximum Size (In Kilobytes) of physically contiguous "
60 "memory that can be allocated. Range is 16 - 128");
61
62#define beiscsi_disp_param(_name)\
63ssize_t \
64beiscsi_##_name##_disp(struct device *dev,\
65 struct device_attribute *attrib, char *buf) \
66{ \
67 struct Scsi_Host *shost = class_to_shost(dev);\
68 struct beiscsi_hba *phba = iscsi_host_priv(shost); \
69 uint32_t param_val = 0; \
70 param_val = phba->attr_##_name;\
71 return snprintf(buf, PAGE_SIZE, "%d\n",\
72 phba->attr_##_name);\
73}
74
75#define beiscsi_change_param(_name, _minval, _maxval, _defaval)\
76int \
77beiscsi_##_name##_change(struct beiscsi_hba *phba, uint32_t val)\
78{\
79 if (val >= _minval && val <= _maxval) {\
80 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,\
81 "BA_%d : beiscsi_"#_name" updated "\
82 "from 0x%x ==> 0x%x\n",\
83 phba->attr_##_name, val); \
84 phba->attr_##_name = val;\
85 return 0;\
86 } \
87 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, \
88 "BA_%d beiscsi_"#_name" attribute "\
89 "cannot be updated to 0x%x, "\
90 "range allowed is ["#_minval" - "#_maxval"]\n", val);\
91 return -EINVAL;\
92}
93
94#define beiscsi_store_param(_name) \
95ssize_t \
96beiscsi_##_name##_store(struct device *dev,\
97 struct device_attribute *attr, const char *buf,\
98 size_t count) \
99{ \
100 struct Scsi_Host *shost = class_to_shost(dev);\
101 struct beiscsi_hba *phba = iscsi_host_priv(shost);\
102 uint32_t param_val = 0;\
103 if (!isdigit(buf[0]))\
104 return -EINVAL;\
105 if (sscanf(buf, "%i", &param_val) != 1)\
106 return -EINVAL;\
107 if (beiscsi_##_name##_change(phba, param_val) == 0) \
108 return strlen(buf);\
109 else \
110 return -EINVAL;\
111}
112
113#define beiscsi_init_param(_name, _minval, _maxval, _defval) \
114int \
115beiscsi_##_name##_init(struct beiscsi_hba *phba, uint32_t val) \
116{ \
117 if (val >= _minval && val <= _maxval) {\
118 phba->attr_##_name = val;\
119 return 0;\
120 } \
121 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,\
122 "BA_%d beiscsi_"#_name" attribute " \
123 "cannot be updated to 0x%x, "\
124 "range allowed is ["#_minval" - "#_maxval"]\n", val);\
125 phba->attr_##_name = _defval;\
126 return -EINVAL;\
127}
128
129#define BEISCSI_RW_ATTR(_name, _minval, _maxval, _defval, _descp) \
130static uint beiscsi_##_name = _defval;\
131module_param(beiscsi_##_name, uint, S_IRUGO);\
132MODULE_PARM_DESC(beiscsi_##_name, _descp);\
133beiscsi_disp_param(_name)\
134beiscsi_change_param(_name, _minval, _maxval, _defval)\
135beiscsi_store_param(_name)\
136beiscsi_init_param(_name, _minval, _maxval, _defval)\
137DEVICE_ATTR(beiscsi_##_name, S_IRUGO | S_IWUSR,\
138 beiscsi_##_name##_disp, beiscsi_##_name##_store)
139
140/*
141 * When new log level added update the
142 * the MAX allowed value for log_enable
143 */
144BEISCSI_RW_ATTR(log_enable, 0x00,
145 0xFF, 0x00, "Enable logging Bit Mask\n"
146 "\t\t\t\tInitialization Events : 0x01\n"
147 "\t\t\t\tMailbox Events : 0x02\n"
148 "\t\t\t\tMiscellaneous Events : 0x04\n"
149 "\t\t\t\tError Handling : 0x08\n"
150 "\t\t\t\tIO Path Events : 0x10\n"
afb96058
JK
151 "\t\t\t\tConfiguration Path : 0x20\n"
152 "\t\t\t\tiSCSI Protocol : 0x40\n");
99bc5d55 153
5cac7596 154DEVICE_ATTR(beiscsi_drvr_ver, S_IRUGO, beiscsi_drvr_ver_disp, NULL);
26000db7 155DEVICE_ATTR(beiscsi_adapter_family, S_IRUGO, beiscsi_adap_family_disp, NULL);
22661e25 156DEVICE_ATTR(beiscsi_fw_ver, S_IRUGO, beiscsi_fw_ver_disp, NULL);
d3fea9af 157DEVICE_ATTR(beiscsi_phys_port, S_IRUGO, beiscsi_phys_port_disp, NULL);
6103c1f7
JK
158DEVICE_ATTR(beiscsi_active_session_count, S_IRUGO,
159 beiscsi_active_session_disp, NULL);
160DEVICE_ATTR(beiscsi_free_session_count, S_IRUGO,
161 beiscsi_free_session_disp, NULL);
99bc5d55
JSJ
162struct device_attribute *beiscsi_attrs[] = {
163 &dev_attr_beiscsi_log_enable,
5cac7596 164 &dev_attr_beiscsi_drvr_ver,
26000db7 165 &dev_attr_beiscsi_adapter_family,
22661e25 166 &dev_attr_beiscsi_fw_ver,
6103c1f7
JK
167 &dev_attr_beiscsi_active_session_count,
168 &dev_attr_beiscsi_free_session_count,
d3fea9af 169 &dev_attr_beiscsi_phys_port,
99bc5d55
JSJ
170 NULL,
171};
6733b39a 172
6763daae
JSJ
173static char const *cqe_desc[] = {
174 "RESERVED_DESC",
175 "SOL_CMD_COMPLETE",
176 "SOL_CMD_KILLED_DATA_DIGEST_ERR",
177 "CXN_KILLED_PDU_SIZE_EXCEEDS_DSL",
178 "CXN_KILLED_BURST_LEN_MISMATCH",
179 "CXN_KILLED_AHS_RCVD",
180 "CXN_KILLED_HDR_DIGEST_ERR",
181 "CXN_KILLED_UNKNOWN_HDR",
182 "CXN_KILLED_STALE_ITT_TTT_RCVD",
183 "CXN_KILLED_INVALID_ITT_TTT_RCVD",
184 "CXN_KILLED_RST_RCVD",
185 "CXN_KILLED_TIMED_OUT",
186 "CXN_KILLED_RST_SENT",
187 "CXN_KILLED_FIN_RCVD",
188 "CXN_KILLED_BAD_UNSOL_PDU_RCVD",
189 "CXN_KILLED_BAD_WRB_INDEX_ERROR",
190 "CXN_KILLED_OVER_RUN_RESIDUAL",
191 "CXN_KILLED_UNDER_RUN_RESIDUAL",
192 "CMD_KILLED_INVALID_STATSN_RCVD",
193 "CMD_KILLED_INVALID_R2T_RCVD",
194 "CMD_CXN_KILLED_LUN_INVALID",
195 "CMD_CXN_KILLED_ICD_INVALID",
196 "CMD_CXN_KILLED_ITT_INVALID",
197 "CMD_CXN_KILLED_SEQ_OUTOFORDER",
198 "CMD_CXN_KILLED_INVALID_DATASN_RCVD",
199 "CXN_INVALIDATE_NOTIFY",
200 "CXN_INVALIDATE_INDEX_NOTIFY",
201 "CMD_INVALIDATED_NOTIFY",
202 "UNSOL_HDR_NOTIFY",
203 "UNSOL_DATA_NOTIFY",
204 "UNSOL_DATA_DIGEST_ERROR_NOTIFY",
205 "DRIVERMSG_NOTIFY",
206 "CXN_KILLED_CMND_DATA_NOT_ON_SAME_CONN",
207 "SOL_CMD_KILLED_DIF_ERR",
208 "CXN_KILLED_SYN_RCVD",
209 "CXN_KILLED_IMM_DATA_RCVD"
210};
211
6733b39a
JK
212static int beiscsi_slave_configure(struct scsi_device *sdev)
213{
214 blk_queue_max_segment_size(sdev->request_queue, 65536);
215 return 0;
216}
217
4183122d
JK
218static int beiscsi_eh_abort(struct scsi_cmnd *sc)
219{
220 struct iscsi_cls_session *cls_session;
221 struct iscsi_task *aborted_task = (struct iscsi_task *)sc->SCp.ptr;
222 struct beiscsi_io_task *aborted_io_task;
223 struct iscsi_conn *conn;
224 struct beiscsi_conn *beiscsi_conn;
225 struct beiscsi_hba *phba;
226 struct iscsi_session *session;
227 struct invalidate_command_table *inv_tbl;
3cbb7a74 228 struct be_dma_mem nonemb_cmd;
4183122d 229 unsigned int cid, tag, num_invalidate;
1957aa7f 230 int rc;
4183122d
JK
231
232 cls_session = starget_to_session(scsi_target(sc->device));
233 session = cls_session->dd_data;
234
659743b0 235 spin_lock_bh(&session->frwd_lock);
4183122d
JK
236 if (!aborted_task || !aborted_task->sc) {
237 /* we raced */
659743b0 238 spin_unlock_bh(&session->frwd_lock);
4183122d
JK
239 return SUCCESS;
240 }
241
242 aborted_io_task = aborted_task->dd_data;
243 if (!aborted_io_task->scsi_cmnd) {
244 /* raced or invalid command */
659743b0 245 spin_unlock_bh(&session->frwd_lock);
4183122d
JK
246 return SUCCESS;
247 }
659743b0 248 spin_unlock_bh(&session->frwd_lock);
7626c06b
JK
249 /* Invalidate WRB Posted for this Task */
250 AMAP_SET_BITS(struct amap_iscsi_wrb, invld,
251 aborted_io_task->pwrb_handle->pwrb,
252 1);
253
4183122d
JK
254 conn = aborted_task->conn;
255 beiscsi_conn = conn->dd_data;
256 phba = beiscsi_conn->phba;
257
258 /* invalidate iocb */
259 cid = beiscsi_conn->beiscsi_conn_cid;
260 inv_tbl = phba->inv_tbl;
261 memset(inv_tbl, 0x0, sizeof(*inv_tbl));
262 inv_tbl->cid = cid;
263 inv_tbl->icd = aborted_io_task->psgl_handle->sgl_index;
264 num_invalidate = 1;
3cbb7a74
JK
265 nonemb_cmd.va = pci_alloc_consistent(phba->ctrl.pdev,
266 sizeof(struct invalidate_commands_params_in),
267 &nonemb_cmd.dma);
268 if (nonemb_cmd.va == NULL) {
99bc5d55
JSJ
269 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_EH,
270 "BM_%d : Failed to allocate memory for"
271 "mgmt_invalidate_icds\n");
3cbb7a74
JK
272 return FAILED;
273 }
274 nonemb_cmd.size = sizeof(struct invalidate_commands_params_in);
275
276 tag = mgmt_invalidate_icds(phba, inv_tbl, num_invalidate,
277 cid, &nonemb_cmd);
4183122d 278 if (!tag) {
99bc5d55
JSJ
279 beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_EH,
280 "BM_%d : mgmt_invalidate_icds could not be"
281 "submitted\n");
3cbb7a74
JK
282 pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size,
283 nonemb_cmd.va, nonemb_cmd.dma);
284
4183122d 285 return FAILED;
4183122d 286 }
e175defe 287
1957aa7f
JK
288 rc = beiscsi_mccq_compl(phba, tag, NULL, &nonemb_cmd);
289 if (rc != -EBUSY)
290 pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size,
291 nonemb_cmd.va, nonemb_cmd.dma);
292
4183122d
JK
293 return iscsi_eh_abort(sc);
294}
295
296static int beiscsi_eh_device_reset(struct scsi_cmnd *sc)
297{
298 struct iscsi_task *abrt_task;
299 struct beiscsi_io_task *abrt_io_task;
300 struct iscsi_conn *conn;
301 struct beiscsi_conn *beiscsi_conn;
302 struct beiscsi_hba *phba;
303 struct iscsi_session *session;
304 struct iscsi_cls_session *cls_session;
305 struct invalidate_command_table *inv_tbl;
3cbb7a74 306 struct be_dma_mem nonemb_cmd;
4183122d 307 unsigned int cid, tag, i, num_invalidate;
1957aa7f 308 int rc;
4183122d
JK
309
310 /* invalidate iocbs */
311 cls_session = starget_to_session(scsi_target(sc->device));
312 session = cls_session->dd_data;
659743b0 313 spin_lock_bh(&session->frwd_lock);
db7f7709 314 if (!session->leadconn || session->state != ISCSI_STATE_LOGGED_IN) {
659743b0 315 spin_unlock_bh(&session->frwd_lock);
db7f7709
JK
316 return FAILED;
317 }
4183122d
JK
318 conn = session->leadconn;
319 beiscsi_conn = conn->dd_data;
320 phba = beiscsi_conn->phba;
321 cid = beiscsi_conn->beiscsi_conn_cid;
322 inv_tbl = phba->inv_tbl;
323 memset(inv_tbl, 0x0, sizeof(*inv_tbl) * BE2_CMDS_PER_CXN);
324 num_invalidate = 0;
325 for (i = 0; i < conn->session->cmds_max; i++) {
326 abrt_task = conn->session->cmds[i];
327 abrt_io_task = abrt_task->dd_data;
328 if (!abrt_task->sc || abrt_task->state == ISCSI_TASK_FREE)
329 continue;
330
126e964a 331 if (sc->device->lun != abrt_task->sc->device->lun)
4183122d
JK
332 continue;
333
7626c06b
JK
334 /* Invalidate WRB Posted for this Task */
335 AMAP_SET_BITS(struct amap_iscsi_wrb, invld,
336 abrt_io_task->pwrb_handle->pwrb,
337 1);
338
4183122d
JK
339 inv_tbl->cid = cid;
340 inv_tbl->icd = abrt_io_task->psgl_handle->sgl_index;
341 num_invalidate++;
342 inv_tbl++;
343 }
659743b0 344 spin_unlock_bh(&session->frwd_lock);
4183122d
JK
345 inv_tbl = phba->inv_tbl;
346
3cbb7a74
JK
347 nonemb_cmd.va = pci_alloc_consistent(phba->ctrl.pdev,
348 sizeof(struct invalidate_commands_params_in),
349 &nonemb_cmd.dma);
350 if (nonemb_cmd.va == NULL) {
99bc5d55
JSJ
351 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_EH,
352 "BM_%d : Failed to allocate memory for"
353 "mgmt_invalidate_icds\n");
3cbb7a74
JK
354 return FAILED;
355 }
356 nonemb_cmd.size = sizeof(struct invalidate_commands_params_in);
357 memset(nonemb_cmd.va, 0, nonemb_cmd.size);
358 tag = mgmt_invalidate_icds(phba, inv_tbl, num_invalidate,
359 cid, &nonemb_cmd);
4183122d 360 if (!tag) {
99bc5d55
JSJ
361 beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_EH,
362 "BM_%d : mgmt_invalidate_icds could not be"
363 " submitted\n");
3cbb7a74
JK
364 pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size,
365 nonemb_cmd.va, nonemb_cmd.dma);
4183122d 366 return FAILED;
4183122d 367 }
e175defe 368
1957aa7f
JK
369 rc = beiscsi_mccq_compl(phba, tag, NULL, &nonemb_cmd);
370 if (rc != -EBUSY)
371 pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size,
372 nonemb_cmd.va, nonemb_cmd.dma);
4183122d 373 return iscsi_eh_device_reset(sc);
4183122d
JK
374}
375
c7acc5b8
JK
376static ssize_t beiscsi_show_boot_tgt_info(void *data, int type, char *buf)
377{
378 struct beiscsi_hba *phba = data;
f457a46f
MC
379 struct mgmt_session_info *boot_sess = &phba->boot_sess;
380 struct mgmt_conn_info *boot_conn = &boot_sess->conn_list[0];
c7acc5b8
JK
381 char *str = buf;
382 int rc;
383
384 switch (type) {
385 case ISCSI_BOOT_TGT_NAME:
386 rc = sprintf(buf, "%.*s\n",
f457a46f
MC
387 (int)strlen(boot_sess->target_name),
388 (char *)&boot_sess->target_name);
c7acc5b8
JK
389 break;
390 case ISCSI_BOOT_TGT_IP_ADDR:
f457a46f 391 if (boot_conn->dest_ipaddr.ip_type == 0x1)
c7acc5b8 392 rc = sprintf(buf, "%pI4\n",
0e43895e 393 (char *)&boot_conn->dest_ipaddr.addr);
c7acc5b8
JK
394 else
395 rc = sprintf(str, "%pI6\n",
0e43895e 396 (char *)&boot_conn->dest_ipaddr.addr);
c7acc5b8
JK
397 break;
398 case ISCSI_BOOT_TGT_PORT:
f457a46f 399 rc = sprintf(str, "%d\n", boot_conn->dest_port);
c7acc5b8
JK
400 break;
401
402 case ISCSI_BOOT_TGT_CHAP_NAME:
403 rc = sprintf(str, "%.*s\n",
f457a46f
MC
404 boot_conn->negotiated_login_options.auth_data.chap.
405 target_chap_name_length,
406 (char *)&boot_conn->negotiated_login_options.
407 auth_data.chap.target_chap_name);
c7acc5b8
JK
408 break;
409 case ISCSI_BOOT_TGT_CHAP_SECRET:
410 rc = sprintf(str, "%.*s\n",
f457a46f
MC
411 boot_conn->negotiated_login_options.auth_data.chap.
412 target_secret_length,
413 (char *)&boot_conn->negotiated_login_options.
414 auth_data.chap.target_secret);
c7acc5b8
JK
415 break;
416 case ISCSI_BOOT_TGT_REV_CHAP_NAME:
417 rc = sprintf(str, "%.*s\n",
f457a46f
MC
418 boot_conn->negotiated_login_options.auth_data.chap.
419 intr_chap_name_length,
420 (char *)&boot_conn->negotiated_login_options.
421 auth_data.chap.intr_chap_name);
c7acc5b8
JK
422 break;
423 case ISCSI_BOOT_TGT_REV_CHAP_SECRET:
f457a46f
MC
424 rc = sprintf(str, "%.*s\n",
425 boot_conn->negotiated_login_options.auth_data.chap.
426 intr_secret_length,
427 (char *)&boot_conn->negotiated_login_options.
428 auth_data.chap.intr_secret);
c7acc5b8
JK
429 break;
430 case ISCSI_BOOT_TGT_FLAGS:
f457a46f 431 rc = sprintf(str, "2\n");
c7acc5b8
JK
432 break;
433 case ISCSI_BOOT_TGT_NIC_ASSOC:
f457a46f 434 rc = sprintf(str, "0\n");
c7acc5b8
JK
435 break;
436 default:
437 rc = -ENOSYS;
438 break;
439 }
440 return rc;
441}
442
443static ssize_t beiscsi_show_boot_ini_info(void *data, int type, char *buf)
444{
445 struct beiscsi_hba *phba = data;
446 char *str = buf;
447 int rc;
448
449 switch (type) {
450 case ISCSI_BOOT_INI_INITIATOR_NAME:
451 rc = sprintf(str, "%s\n", phba->boot_sess.initiator_iscsiname);
452 break;
453 default:
454 rc = -ENOSYS;
455 break;
456 }
457 return rc;
458}
459
460static ssize_t beiscsi_show_boot_eth_info(void *data, int type, char *buf)
461{
462 struct beiscsi_hba *phba = data;
463 char *str = buf;
464 int rc;
465
466 switch (type) {
467 case ISCSI_BOOT_ETH_FLAGS:
f457a46f 468 rc = sprintf(str, "2\n");
c7acc5b8
JK
469 break;
470 case ISCSI_BOOT_ETH_INDEX:
f457a46f 471 rc = sprintf(str, "0\n");
c7acc5b8
JK
472 break;
473 case ISCSI_BOOT_ETH_MAC:
0e43895e
MC
474 rc = beiscsi_get_macaddr(str, phba);
475 break;
c7acc5b8
JK
476 default:
477 rc = -ENOSYS;
478 break;
479 }
480 return rc;
481}
482
483
587a1f16 484static umode_t beiscsi_tgt_get_attr_visibility(void *data, int type)
c7acc5b8 485{
587a1f16 486 umode_t rc;
c7acc5b8
JK
487
488 switch (type) {
489 case ISCSI_BOOT_TGT_NAME:
490 case ISCSI_BOOT_TGT_IP_ADDR:
491 case ISCSI_BOOT_TGT_PORT:
492 case ISCSI_BOOT_TGT_CHAP_NAME:
493 case ISCSI_BOOT_TGT_CHAP_SECRET:
494 case ISCSI_BOOT_TGT_REV_CHAP_NAME:
495 case ISCSI_BOOT_TGT_REV_CHAP_SECRET:
496 case ISCSI_BOOT_TGT_NIC_ASSOC:
497 case ISCSI_BOOT_TGT_FLAGS:
498 rc = S_IRUGO;
499 break;
500 default:
501 rc = 0;
502 break;
503 }
504 return rc;
505}
506
587a1f16 507static umode_t beiscsi_ini_get_attr_visibility(void *data, int type)
c7acc5b8 508{
587a1f16 509 umode_t rc;
c7acc5b8
JK
510
511 switch (type) {
512 case ISCSI_BOOT_INI_INITIATOR_NAME:
513 rc = S_IRUGO;
514 break;
515 default:
516 rc = 0;
517 break;
518 }
519 return rc;
520}
521
522
587a1f16 523static umode_t beiscsi_eth_get_attr_visibility(void *data, int type)
c7acc5b8 524{
587a1f16 525 umode_t rc;
c7acc5b8
JK
526
527 switch (type) {
528 case ISCSI_BOOT_ETH_FLAGS:
529 case ISCSI_BOOT_ETH_MAC:
530 case ISCSI_BOOT_ETH_INDEX:
531 rc = S_IRUGO;
532 break;
533 default:
534 rc = 0;
535 break;
536 }
537 return rc;
538}
539
bfead3b2 540/*------------------- PCI Driver operations and data ----------------- */
9baa3c34 541static const struct pci_device_id beiscsi_pci_id_table[] = {
bfead3b2 542 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
f98c96b0 543 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
bfead3b2
JK
544 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
545 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
546 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID3) },
139a1b1e 547 { PCI_DEVICE(ELX_VENDOR_ID, OC_SKH_ID1) },
bfead3b2
JK
548 { 0 }
549};
550MODULE_DEVICE_TABLE(pci, beiscsi_pci_id_table);
551
99bc5d55 552
6733b39a
JK
553static struct scsi_host_template beiscsi_sht = {
554 .module = THIS_MODULE,
c4f39bda 555 .name = "Emulex 10Gbe open-iscsi Initiator Driver",
6733b39a
JK
556 .proc_name = DRV_NAME,
557 .queuecommand = iscsi_queuecommand,
db5ed4df 558 .change_queue_depth = scsi_change_queue_depth,
6733b39a
JK
559 .slave_configure = beiscsi_slave_configure,
560 .target_alloc = iscsi_target_alloc,
4183122d
JK
561 .eh_abort_handler = beiscsi_eh_abort,
562 .eh_device_reset_handler = beiscsi_eh_device_reset,
309ce156 563 .eh_target_reset_handler = iscsi_eh_session_reset,
99bc5d55 564 .shost_attrs = beiscsi_attrs,
6733b39a
JK
565 .sg_tablesize = BEISCSI_SGLIST_ELEMENTS,
566 .can_queue = BE2_IO_DEPTH,
567 .this_id = -1,
568 .max_sectors = BEISCSI_MAX_SECTORS,
569 .cmd_per_lun = BEISCSI_CMD_PER_LUN,
570 .use_clustering = ENABLE_CLUSTERING,
ffce3e2e 571 .vendor_id = SCSI_NL_VID_TYPE_PCI | BE_VENDOR_ID,
c40ecc12 572 .track_queue_depth = 1,
6733b39a 573};
6733b39a 574
bfead3b2 575static struct scsi_transport_template *beiscsi_scsi_transport;
6733b39a
JK
576
577static struct beiscsi_hba *beiscsi_hba_alloc(struct pci_dev *pcidev)
578{
579 struct beiscsi_hba *phba;
580 struct Scsi_Host *shost;
581
582 shost = iscsi_host_alloc(&beiscsi_sht, sizeof(*phba), 0);
583 if (!shost) {
99bc5d55
JSJ
584 dev_err(&pcidev->dev,
585 "beiscsi_hba_alloc - iscsi_host_alloc failed\n");
6733b39a
JK
586 return NULL;
587 }
6733b39a
JK
588 shost->max_id = BE2_MAX_SESSIONS;
589 shost->max_channel = 0;
590 shost->max_cmd_len = BEISCSI_MAX_CMD_LEN;
591 shost->max_lun = BEISCSI_NUM_MAX_LUN;
592 shost->transportt = beiscsi_scsi_transport;
6733b39a
JK
593 phba = iscsi_host_priv(shost);
594 memset(phba, 0, sizeof(*phba));
595 phba->shost = shost;
596 phba->pcidev = pci_dev_get(pcidev);
2807afb7 597 pci_set_drvdata(pcidev, phba);
0e43895e 598 phba->interface_handle = 0xFFFFFFFF;
6733b39a 599
6733b39a 600 return phba;
6733b39a
JK
601}
602
603static void beiscsi_unmap_pci_function(struct beiscsi_hba *phba)
604{
605 if (phba->csr_va) {
606 iounmap(phba->csr_va);
607 phba->csr_va = NULL;
608 }
609 if (phba->db_va) {
610 iounmap(phba->db_va);
611 phba->db_va = NULL;
612 }
613 if (phba->pci_va) {
614 iounmap(phba->pci_va);
615 phba->pci_va = NULL;
616 }
617}
618
619static int beiscsi_map_pci_bars(struct beiscsi_hba *phba,
620 struct pci_dev *pcidev)
621{
622 u8 __iomem *addr;
f98c96b0 623 int pcicfg_reg;
6733b39a
JK
624
625 addr = ioremap_nocache(pci_resource_start(pcidev, 2),
626 pci_resource_len(pcidev, 2));
627 if (addr == NULL)
628 return -ENOMEM;
629 phba->ctrl.csr = addr;
630 phba->csr_va = addr;
631 phba->csr_pa.u.a64.address = pci_resource_start(pcidev, 2);
632
633 addr = ioremap_nocache(pci_resource_start(pcidev, 4), 128 * 1024);
634 if (addr == NULL)
635 goto pci_map_err;
636 phba->ctrl.db = addr;
637 phba->db_va = addr;
638 phba->db_pa.u.a64.address = pci_resource_start(pcidev, 4);
639
f98c96b0
JK
640 if (phba->generation == BE_GEN2)
641 pcicfg_reg = 1;
642 else
643 pcicfg_reg = 0;
644
645 addr = ioremap_nocache(pci_resource_start(pcidev, pcicfg_reg),
646 pci_resource_len(pcidev, pcicfg_reg));
647
6733b39a
JK
648 if (addr == NULL)
649 goto pci_map_err;
650 phba->ctrl.pcicfg = addr;
651 phba->pci_va = addr;
f98c96b0 652 phba->pci_pa.u.a64.address = pci_resource_start(pcidev, pcicfg_reg);
6733b39a
JK
653 return 0;
654
655pci_map_err:
656 beiscsi_unmap_pci_function(phba);
657 return -ENOMEM;
658}
659
660static int beiscsi_enable_pci(struct pci_dev *pcidev)
661{
662 int ret;
663
664 ret = pci_enable_device(pcidev);
665 if (ret) {
99bc5d55
JSJ
666 dev_err(&pcidev->dev,
667 "beiscsi_enable_pci - enable device failed\n");
6733b39a
JK
668 return ret;
669 }
670
e307f3ac
JSJ
671 ret = pci_request_regions(pcidev, DRV_NAME);
672 if (ret) {
673 dev_err(&pcidev->dev,
674 "beiscsi_enable_pci - request region failed\n");
675 goto pci_dev_disable;
676 }
677
bfead3b2 678 pci_set_master(pcidev);
6c57625b
JK
679 ret = pci_set_dma_mask(pcidev, DMA_BIT_MASK(64));
680 if (ret) {
681 ret = pci_set_dma_mask(pcidev, DMA_BIT_MASK(32));
682 if (ret) {
683 dev_err(&pcidev->dev, "Could not set PCI DMA Mask\n");
e307f3ac 684 goto pci_region_release;
6c57625b
JK
685 } else {
686 ret = pci_set_consistent_dma_mask(pcidev,
687 DMA_BIT_MASK(32));
688 }
689 } else {
690 ret = pci_set_consistent_dma_mask(pcidev, DMA_BIT_MASK(64));
6733b39a
JK
691 if (ret) {
692 dev_err(&pcidev->dev, "Could not set PCI DMA Mask\n");
e307f3ac 693 goto pci_region_release;
6733b39a
JK
694 }
695 }
696 return 0;
e307f3ac
JSJ
697
698pci_region_release:
699 pci_release_regions(pcidev);
700pci_dev_disable:
701 pci_disable_device(pcidev);
702
703 return ret;
6733b39a
JK
704}
705
706static int be_ctrl_init(struct beiscsi_hba *phba, struct pci_dev *pdev)
707{
708 struct be_ctrl_info *ctrl = &phba->ctrl;
709 struct be_dma_mem *mbox_mem_alloc = &ctrl->mbox_mem_alloced;
710 struct be_dma_mem *mbox_mem_align = &ctrl->mbox_mem;
711 int status = 0;
712
713 ctrl->pdev = pdev;
714 status = beiscsi_map_pci_bars(phba, pdev);
715 if (status)
716 return status;
6733b39a
JK
717 mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
718 mbox_mem_alloc->va = pci_alloc_consistent(pdev,
719 mbox_mem_alloc->size,
720 &mbox_mem_alloc->dma);
721 if (!mbox_mem_alloc->va) {
722 beiscsi_unmap_pci_function(phba);
a49e06d5 723 return -ENOMEM;
6733b39a
JK
724 }
725
726 mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
727 mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
728 mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
729 memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
c03a50f7 730 mutex_init(&ctrl->mbox_lock);
bfead3b2
JK
731 spin_lock_init(&phba->ctrl.mcc_lock);
732 spin_lock_init(&phba->ctrl.mcc_cq_lock);
733
6733b39a
JK
734 return status;
735}
736
843ae752
JK
737/**
738 * beiscsi_get_params()- Set the config paramters
739 * @phba: ptr device priv structure
740 **/
6733b39a
JK
741static void beiscsi_get_params(struct beiscsi_hba *phba)
742{
843ae752
JK
743 uint32_t total_cid_count = 0;
744 uint32_t total_icd_count = 0;
745 uint8_t ulp_num = 0;
746
747 total_cid_count = BEISCSI_GET_CID_COUNT(phba, BEISCSI_ULP0) +
748 BEISCSI_GET_CID_COUNT(phba, BEISCSI_ULP1);
749
cf987b79
JK
750 for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) {
751 uint32_t align_mask = 0;
752 uint32_t icd_post_per_page = 0;
753 uint32_t icd_count_unavailable = 0;
754 uint32_t icd_start = 0, icd_count = 0;
755 uint32_t icd_start_align = 0, icd_count_align = 0;
756
843ae752 757 if (test_bit(ulp_num, &phba->fw_config.ulp_supported)) {
cf987b79
JK
758 icd_start = phba->fw_config.iscsi_icd_start[ulp_num];
759 icd_count = phba->fw_config.iscsi_icd_count[ulp_num];
760
761 /* Get ICD count that can be posted on each page */
762 icd_post_per_page = (PAGE_SIZE / (BE2_SGE *
763 sizeof(struct iscsi_sge)));
764 align_mask = (icd_post_per_page - 1);
765
766 /* Check if icd_start is aligned ICD per page posting */
767 if (icd_start % icd_post_per_page) {
768 icd_start_align = ((icd_start +
769 icd_post_per_page) &
770 ~(align_mask));
771 phba->fw_config.
772 iscsi_icd_start[ulp_num] =
773 icd_start_align;
774 }
775
776 icd_count_align = (icd_count & ~align_mask);
777
778 /* ICD discarded in the process of alignment */
779 if (icd_start_align)
780 icd_count_unavailable = ((icd_start_align -
781 icd_start) +
782 (icd_count -
783 icd_count_align));
784
785 /* Updated ICD count available */
786 phba->fw_config.iscsi_icd_count[ulp_num] = (icd_count -
787 icd_count_unavailable);
788
789 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
790 "BM_%d : Aligned ICD values\n"
791 "\t ICD Start : %d\n"
792 "\t ICD Count : %d\n"
793 "\t ICD Discarded : %d\n",
794 phba->fw_config.
795 iscsi_icd_start[ulp_num],
796 phba->fw_config.
797 iscsi_icd_count[ulp_num],
798 icd_count_unavailable);
843ae752
JK
799 break;
800 }
cf987b79 801 }
843ae752 802
cf987b79 803 total_icd_count = phba->fw_config.iscsi_icd_count[ulp_num];
843ae752
JK
804 phba->params.ios_per_ctrl = (total_icd_count -
805 (total_cid_count +
806 BE2_TMFS + BE2_NOPOUT_REQ));
807 phba->params.cxns_per_ctrl = total_cid_count;
808 phba->params.asyncpdus_per_ctrl = total_cid_count;
809 phba->params.icds_per_ctrl = total_icd_count;
6733b39a
JK
810 phba->params.num_sge_per_io = BE2_SGE;
811 phba->params.defpdu_hdr_sz = BE2_DEFPDU_HDR_SZ;
812 phba->params.defpdu_data_sz = BE2_DEFPDU_DATA_SZ;
813 phba->params.eq_timer = 64;
843ae752
JK
814 phba->params.num_eq_entries = 1024;
815 phba->params.num_cq_entries = 1024;
6733b39a
JK
816 phba->params.wrbs_per_cxn = 256;
817}
818
819static void hwi_ring_eq_db(struct beiscsi_hba *phba,
820 unsigned int id, unsigned int clr_interrupt,
821 unsigned int num_processed,
822 unsigned char rearm, unsigned char event)
823{
824 u32 val = 0;
e08b3c8b 825
6733b39a
JK
826 if (rearm)
827 val |= 1 << DB_EQ_REARM_SHIFT;
828 if (clr_interrupt)
829 val |= 1 << DB_EQ_CLR_SHIFT;
830 if (event)
831 val |= 1 << DB_EQ_EVNT_SHIFT;
e08b3c8b 832
6733b39a 833 val |= num_processed << DB_EQ_NUM_POPPED_SHIFT;
e08b3c8b
JK
834 /* Setting lower order EQ_ID Bits */
835 val |= (id & DB_EQ_RING_ID_LOW_MASK);
836
837 /* Setting Higher order EQ_ID Bits */
838 val |= (((id >> DB_EQ_HIGH_FEILD_SHIFT) &
839 DB_EQ_RING_ID_HIGH_MASK)
840 << DB_EQ_HIGH_SET_SHIFT);
841
6733b39a
JK
842 iowrite32(val, phba->db_va + DB_EQ_OFFSET);
843}
844
bfead3b2
JK
845/**
846 * be_isr_mcc - The isr routine of the driver.
847 * @irq: Not used
848 * @dev_id: Pointer to host adapter structure
849 */
850static irqreturn_t be_isr_mcc(int irq, void *dev_id)
851{
852 struct beiscsi_hba *phba;
853 struct be_eq_entry *eqe = NULL;
854 struct be_queue_info *eq;
855 struct be_queue_info *mcc;
856 unsigned int num_eq_processed;
857 struct be_eq_obj *pbe_eq;
858 unsigned long flags;
859
860 pbe_eq = dev_id;
861 eq = &pbe_eq->q;
862 phba = pbe_eq->phba;
863 mcc = &phba->ctrl.mcc_obj.cq;
864 eqe = queue_tail_node(eq);
bfead3b2
JK
865
866 num_eq_processed = 0;
867
868 while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32]
869 & EQE_VALID_MASK) {
870 if (((eqe->dw[offsetof(struct amap_eq_entry,
871 resource_id) / 32] &
872 EQE_RESID_MASK) >> 16) == mcc->id) {
873 spin_lock_irqsave(&phba->isr_lock, flags);
72fb46a9 874 pbe_eq->todo_mcc_cq = true;
bfead3b2
JK
875 spin_unlock_irqrestore(&phba->isr_lock, flags);
876 }
877 AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0);
878 queue_tail_inc(eq);
879 eqe = queue_tail_node(eq);
880 num_eq_processed++;
881 }
72fb46a9
JSJ
882 if (pbe_eq->todo_mcc_cq)
883 queue_work(phba->wq, &pbe_eq->work_cqs);
bfead3b2
JK
884 if (num_eq_processed)
885 hwi_ring_eq_db(phba, eq->id, 1, num_eq_processed, 1, 1);
886
887 return IRQ_HANDLED;
888}
889
890/**
891 * be_isr_msix - The isr routine of the driver.
892 * @irq: Not used
893 * @dev_id: Pointer to host adapter structure
894 */
895static irqreturn_t be_isr_msix(int irq, void *dev_id)
896{
897 struct beiscsi_hba *phba;
898 struct be_eq_entry *eqe = NULL;
899 struct be_queue_info *eq;
900 struct be_queue_info *cq;
901 unsigned int num_eq_processed;
902 struct be_eq_obj *pbe_eq;
bfead3b2
JK
903
904 pbe_eq = dev_id;
905 eq = &pbe_eq->q;
906 cq = pbe_eq->cq;
907 eqe = queue_tail_node(eq);
bfead3b2
JK
908
909 phba = pbe_eq->phba;
910 num_eq_processed = 0;
89f8b33c
JA
911 while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32]
912 & EQE_VALID_MASK) {
ea51190c 913 irq_poll_sched(&pbe_eq->iopoll);
bfead3b2 914
89f8b33c
JA
915 AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0);
916 queue_tail_inc(eq);
917 eqe = queue_tail_node(eq);
918 num_eq_processed++;
bfead3b2 919 }
72fb46a9
JSJ
920
921 if (num_eq_processed)
922 hwi_ring_eq_db(phba, eq->id, 1, num_eq_processed, 0, 1);
923
924 return IRQ_HANDLED;
bfead3b2
JK
925}
926
6733b39a
JK
927/**
928 * be_isr - The isr routine of the driver.
929 * @irq: Not used
930 * @dev_id: Pointer to host adapter structure
931 */
932static irqreturn_t be_isr(int irq, void *dev_id)
933{
934 struct beiscsi_hba *phba;
935 struct hwi_controller *phwi_ctrlr;
936 struct hwi_context_memory *phwi_context;
937 struct be_eq_entry *eqe = NULL;
938 struct be_queue_info *eq;
bfead3b2 939 struct be_queue_info *mcc;
6733b39a 940 unsigned long flags, index;
bfead3b2 941 unsigned int num_mcceq_processed, num_ioeq_processed;
6733b39a 942 struct be_ctrl_info *ctrl;
bfead3b2 943 struct be_eq_obj *pbe_eq;
6733b39a
JK
944 int isr;
945
946 phba = dev_id;
6eab04a8 947 ctrl = &phba->ctrl;
bfead3b2
JK
948 isr = ioread32(ctrl->csr + CEV_ISR0_OFFSET +
949 (PCI_FUNC(ctrl->pdev->devfn) * CEV_ISR_SIZE));
950 if (!isr)
951 return IRQ_NONE;
6733b39a
JK
952
953 phwi_ctrlr = phba->phwi_ctrlr;
954 phwi_context = phwi_ctrlr->phwi_ctxt;
bfead3b2
JK
955 pbe_eq = &phwi_context->be_eq[0];
956
957 eq = &phwi_context->be_eq[0].q;
958 mcc = &phba->ctrl.mcc_obj.cq;
6733b39a
JK
959 index = 0;
960 eqe = queue_tail_node(eq);
6733b39a 961
bfead3b2
JK
962 num_ioeq_processed = 0;
963 num_mcceq_processed = 0;
89f8b33c
JA
964 while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32]
965 & EQE_VALID_MASK) {
966 if (((eqe->dw[offsetof(struct amap_eq_entry,
967 resource_id) / 32] &
968 EQE_RESID_MASK) >> 16) == mcc->id) {
969 spin_lock_irqsave(&phba->isr_lock, flags);
970 pbe_eq->todo_mcc_cq = true;
971 spin_unlock_irqrestore(&phba->isr_lock, flags);
972 num_mcceq_processed++;
973 } else {
ea51190c 974 irq_poll_sched(&pbe_eq->iopoll);
bfead3b2 975 num_ioeq_processed++;
6733b39a 976 }
89f8b33c
JA
977 AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0);
978 queue_tail_inc(eq);
979 eqe = queue_tail_node(eq);
980 }
981 if (num_ioeq_processed || num_mcceq_processed) {
982 if (pbe_eq->todo_mcc_cq)
72fb46a9 983 queue_work(phba->wq, &pbe_eq->work_cqs);
6733b39a 984
89f8b33c 985 if ((num_mcceq_processed) && (!num_ioeq_processed))
bfead3b2 986 hwi_ring_eq_db(phba, eq->id, 0,
89f8b33c
JA
987 (num_ioeq_processed +
988 num_mcceq_processed) , 1, 1);
989 else
990 hwi_ring_eq_db(phba, eq->id, 0,
991 (num_ioeq_processed +
992 num_mcceq_processed), 0, 1);
993
994 return IRQ_HANDLED;
995 } else
996 return IRQ_NONE;
6733b39a
JK
997}
998
999static int beiscsi_init_irqs(struct beiscsi_hba *phba)
1000{
1001 struct pci_dev *pcidev = phba->pcidev;
bfead3b2
JK
1002 struct hwi_controller *phwi_ctrlr;
1003 struct hwi_context_memory *phwi_context;
4f5af07e 1004 int ret, msix_vec, i, j;
6733b39a 1005
bfead3b2
JK
1006 phwi_ctrlr = phba->phwi_ctrlr;
1007 phwi_context = phwi_ctrlr->phwi_ctxt;
1008
1009 if (phba->msix_enabled) {
1010 for (i = 0; i < phba->num_cpus; i++) {
8fcfb210
JK
1011 phba->msi_name[i] = kzalloc(BEISCSI_MSI_NAME,
1012 GFP_KERNEL);
1013 if (!phba->msi_name[i]) {
1014 ret = -ENOMEM;
1015 goto free_msix_irqs;
1016 }
1017
1018 sprintf(phba->msi_name[i], "beiscsi_%02x_%02x",
1019 phba->shost->host_no, i);
bfead3b2 1020 msix_vec = phba->msix_entries[i].vector;
8fcfb210
JK
1021 ret = request_irq(msix_vec, be_isr_msix, 0,
1022 phba->msi_name[i],
bfead3b2 1023 &phwi_context->be_eq[i]);
4f5af07e 1024 if (ret) {
99bc5d55
JSJ
1025 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
1026 "BM_%d : beiscsi_init_irqs-Failed to"
1027 "register msix for i = %d\n",
1028 i);
8fcfb210 1029 kfree(phba->msi_name[i]);
4f5af07e
JK
1030 goto free_msix_irqs;
1031 }
bfead3b2 1032 }
8fcfb210
JK
1033 phba->msi_name[i] = kzalloc(BEISCSI_MSI_NAME, GFP_KERNEL);
1034 if (!phba->msi_name[i]) {
1035 ret = -ENOMEM;
1036 goto free_msix_irqs;
1037 }
1038 sprintf(phba->msi_name[i], "beiscsi_mcc_%02x",
1039 phba->shost->host_no);
bfead3b2 1040 msix_vec = phba->msix_entries[i].vector;
8fcfb210 1041 ret = request_irq(msix_vec, be_isr_mcc, 0, phba->msi_name[i],
bfead3b2 1042 &phwi_context->be_eq[i]);
4f5af07e 1043 if (ret) {
99bc5d55
JSJ
1044 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT ,
1045 "BM_%d : beiscsi_init_irqs-"
1046 "Failed to register beiscsi_msix_mcc\n");
8fcfb210 1047 kfree(phba->msi_name[i]);
4f5af07e
JK
1048 goto free_msix_irqs;
1049 }
1050
bfead3b2
JK
1051 } else {
1052 ret = request_irq(pcidev->irq, be_isr, IRQF_SHARED,
1053 "beiscsi", phba);
1054 if (ret) {
99bc5d55
JSJ
1055 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
1056 "BM_%d : beiscsi_init_irqs-"
1057 "Failed to register irq\\n");
bfead3b2
JK
1058 return ret;
1059 }
6733b39a
JK
1060 }
1061 return 0;
4f5af07e 1062free_msix_irqs:
8fcfb210
JK
1063 for (j = i - 1; j >= 0; j--) {
1064 kfree(phba->msi_name[j]);
1065 msix_vec = phba->msix_entries[j].vector;
4f5af07e 1066 free_irq(msix_vec, &phwi_context->be_eq[j]);
8fcfb210 1067 }
4f5af07e 1068 return ret;
6733b39a
JK
1069}
1070
e08b3c8b 1071void hwi_ring_cq_db(struct beiscsi_hba *phba,
6733b39a
JK
1072 unsigned int id, unsigned int num_processed,
1073 unsigned char rearm, unsigned char event)
1074{
1075 u32 val = 0;
e08b3c8b 1076
6733b39a
JK
1077 if (rearm)
1078 val |= 1 << DB_CQ_REARM_SHIFT;
e08b3c8b 1079
6733b39a 1080 val |= num_processed << DB_CQ_NUM_POPPED_SHIFT;
e08b3c8b
JK
1081
1082 /* Setting lower order CQ_ID Bits */
1083 val |= (id & DB_CQ_RING_ID_LOW_MASK);
1084
1085 /* Setting Higher order CQ_ID Bits */
1086 val |= (((id >> DB_CQ_HIGH_FEILD_SHIFT) &
1087 DB_CQ_RING_ID_HIGH_MASK)
1088 << DB_CQ_HIGH_SET_SHIFT);
1089
6733b39a
JK
1090 iowrite32(val, phba->db_va + DB_CQ_OFFSET);
1091}
1092
6733b39a
JK
1093static unsigned int
1094beiscsi_process_async_pdu(struct beiscsi_conn *beiscsi_conn,
1095 struct beiscsi_hba *phba,
6733b39a
JK
1096 struct pdu_base *ppdu,
1097 unsigned long pdu_len,
1098 void *pbuffer, unsigned long buf_len)
1099{
1100 struct iscsi_conn *conn = beiscsi_conn->conn;
1101 struct iscsi_session *session = conn->session;
bfead3b2
JK
1102 struct iscsi_task *task;
1103 struct beiscsi_io_task *io_task;
1104 struct iscsi_hdr *login_hdr;
6733b39a
JK
1105
1106 switch (ppdu->dw[offsetof(struct amap_pdu_base, opcode) / 32] &
1107 PDUBASE_OPCODE_MASK) {
1108 case ISCSI_OP_NOOP_IN:
1109 pbuffer = NULL;
1110 buf_len = 0;
1111 break;
1112 case ISCSI_OP_ASYNC_EVENT:
1113 break;
1114 case ISCSI_OP_REJECT:
1115 WARN_ON(!pbuffer);
1116 WARN_ON(!(buf_len == 48));
99bc5d55
JSJ
1117 beiscsi_log(phba, KERN_ERR,
1118 BEISCSI_LOG_CONFIG | BEISCSI_LOG_IO,
1119 "BM_%d : In ISCSI_OP_REJECT\n");
6733b39a
JK
1120 break;
1121 case ISCSI_OP_LOGIN_RSP:
7bd6e25c 1122 case ISCSI_OP_TEXT_RSP:
bfead3b2
JK
1123 task = conn->login_task;
1124 io_task = task->dd_data;
1125 login_hdr = (struct iscsi_hdr *)ppdu;
1126 login_hdr->itt = io_task->libiscsi_itt;
6733b39a
JK
1127 break;
1128 default:
99bc5d55
JSJ
1129 beiscsi_log(phba, KERN_WARNING,
1130 BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG,
1131 "BM_%d : Unrecognized opcode 0x%x in async msg\n",
1132 (ppdu->
6733b39a 1133 dw[offsetof(struct amap_pdu_base, opcode) / 32]
99bc5d55 1134 & PDUBASE_OPCODE_MASK));
6733b39a
JK
1135 return 1;
1136 }
1137
659743b0 1138 spin_lock_bh(&session->back_lock);
6733b39a 1139 __iscsi_complete_pdu(conn, (struct iscsi_hdr *)ppdu, pbuffer, buf_len);
659743b0 1140 spin_unlock_bh(&session->back_lock);
6733b39a
JK
1141 return 0;
1142}
1143
1144static struct sgl_handle *alloc_io_sgl_handle(struct beiscsi_hba *phba)
1145{
1146 struct sgl_handle *psgl_handle;
1147
1148 if (phba->io_sgl_hndl_avbl) {
99bc5d55
JSJ
1149 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_IO,
1150 "BM_%d : In alloc_io_sgl_handle,"
1151 " io_sgl_alloc_index=%d\n",
1152 phba->io_sgl_alloc_index);
1153
6733b39a
JK
1154 psgl_handle = phba->io_sgl_hndl_base[phba->
1155 io_sgl_alloc_index];
1156 phba->io_sgl_hndl_base[phba->io_sgl_alloc_index] = NULL;
1157 phba->io_sgl_hndl_avbl--;
bfead3b2
JK
1158 if (phba->io_sgl_alloc_index == (phba->params.
1159 ios_per_ctrl - 1))
6733b39a
JK
1160 phba->io_sgl_alloc_index = 0;
1161 else
1162 phba->io_sgl_alloc_index++;
1163 } else
1164 psgl_handle = NULL;
1165 return psgl_handle;
1166}
1167
1168static void
1169free_io_sgl_handle(struct beiscsi_hba *phba, struct sgl_handle *psgl_handle)
1170{
99bc5d55
JSJ
1171 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_IO,
1172 "BM_%d : In free_,io_sgl_free_index=%d\n",
1173 phba->io_sgl_free_index);
1174
6733b39a
JK
1175 if (phba->io_sgl_hndl_base[phba->io_sgl_free_index]) {
1176 /*
1177 * this can happen if clean_task is called on a task that
1178 * failed in xmit_task or alloc_pdu.
1179 */
99bc5d55
JSJ
1180 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_IO,
1181 "BM_%d : Double Free in IO SGL io_sgl_free_index=%d,"
1182 "value there=%p\n", phba->io_sgl_free_index,
1183 phba->io_sgl_hndl_base
1184 [phba->io_sgl_free_index]);
6733b39a
JK
1185 return;
1186 }
1187 phba->io_sgl_hndl_base[phba->io_sgl_free_index] = psgl_handle;
1188 phba->io_sgl_hndl_avbl++;
1189 if (phba->io_sgl_free_index == (phba->params.ios_per_ctrl - 1))
1190 phba->io_sgl_free_index = 0;
1191 else
1192 phba->io_sgl_free_index++;
1193}
1194
1195/**
1196 * alloc_wrb_handle - To allocate a wrb handle
1197 * @phba: The hba pointer
1198 * @cid: The cid to use for allocation
340c99e9 1199 * @pwrb_context: ptr to ptr to wrb context
6733b39a
JK
1200 *
1201 * This happens under session_lock until submission to chip
1202 */
340c99e9
JSJ
1203struct wrb_handle *alloc_wrb_handle(struct beiscsi_hba *phba, unsigned int cid,
1204 struct hwi_wrb_context **pcontext)
6733b39a
JK
1205{
1206 struct hwi_wrb_context *pwrb_context;
1207 struct hwi_controller *phwi_ctrlr;
340c99e9 1208 struct wrb_handle *pwrb_handle;
a7909b39 1209 uint16_t cri_index = BE_GET_CRI_FROM_CID(cid);
6733b39a
JK
1210
1211 phwi_ctrlr = phba->phwi_ctrlr;
a7909b39 1212 pwrb_context = &phwi_ctrlr->wrb_context[cri_index];
d5431488 1213 if (pwrb_context->wrb_handles_available >= 2) {
bfead3b2
JK
1214 pwrb_handle = pwrb_context->pwrb_handle_base[
1215 pwrb_context->alloc_index];
1216 pwrb_context->wrb_handles_available--;
bfead3b2
JK
1217 if (pwrb_context->alloc_index ==
1218 (phba->params.wrbs_per_cxn - 1))
1219 pwrb_context->alloc_index = 0;
1220 else
1221 pwrb_context->alloc_index++;
340c99e9
JSJ
1222
1223 /* Return the context address */
1224 *pcontext = pwrb_context;
bfead3b2
JK
1225 } else
1226 pwrb_handle = NULL;
6733b39a
JK
1227 return pwrb_handle;
1228}
1229
1230/**
1231 * free_wrb_handle - To free the wrb handle back to pool
1232 * @phba: The hba pointer
1233 * @pwrb_context: The context to free from
1234 * @pwrb_handle: The wrb_handle to free
1235 *
1236 * This happens under session_lock until submission to chip
1237 */
1238static void
1239free_wrb_handle(struct beiscsi_hba *phba, struct hwi_wrb_context *pwrb_context,
1240 struct wrb_handle *pwrb_handle)
1241{
32951dd8 1242 pwrb_context->pwrb_handle_base[pwrb_context->free_index] = pwrb_handle;
bfead3b2
JK
1243 pwrb_context->wrb_handles_available++;
1244 if (pwrb_context->free_index == (phba->params.wrbs_per_cxn - 1))
1245 pwrb_context->free_index = 0;
1246 else
1247 pwrb_context->free_index++;
1248
99bc5d55
JSJ
1249 beiscsi_log(phba, KERN_INFO,
1250 BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG,
1251 "BM_%d : FREE WRB: pwrb_handle=%p free_index=0x%x"
1252 "wrb_handles_available=%d\n",
1253 pwrb_handle, pwrb_context->free_index,
1254 pwrb_context->wrb_handles_available);
6733b39a
JK
1255}
1256
1257static struct sgl_handle *alloc_mgmt_sgl_handle(struct beiscsi_hba *phba)
1258{
1259 struct sgl_handle *psgl_handle;
1260
1261 if (phba->eh_sgl_hndl_avbl) {
1262 psgl_handle = phba->eh_sgl_hndl_base[phba->eh_sgl_alloc_index];
1263 phba->eh_sgl_hndl_base[phba->eh_sgl_alloc_index] = NULL;
99bc5d55
JSJ
1264 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_CONFIG,
1265 "BM_%d : mgmt_sgl_alloc_index=%d=0x%x\n",
1266 phba->eh_sgl_alloc_index,
1267 phba->eh_sgl_alloc_index);
1268
6733b39a
JK
1269 phba->eh_sgl_hndl_avbl--;
1270 if (phba->eh_sgl_alloc_index ==
1271 (phba->params.icds_per_ctrl - phba->params.ios_per_ctrl -
1272 1))
1273 phba->eh_sgl_alloc_index = 0;
1274 else
1275 phba->eh_sgl_alloc_index++;
1276 } else
1277 psgl_handle = NULL;
1278 return psgl_handle;
1279}
1280
1281void
1282free_mgmt_sgl_handle(struct beiscsi_hba *phba, struct sgl_handle *psgl_handle)
1283{
1284
99bc5d55
JSJ
1285 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_CONFIG,
1286 "BM_%d : In free_mgmt_sgl_handle,"
1287 "eh_sgl_free_index=%d\n",
1288 phba->eh_sgl_free_index);
1289
6733b39a
JK
1290 if (phba->eh_sgl_hndl_base[phba->eh_sgl_free_index]) {
1291 /*
1292 * this can happen if clean_task is called on a task that
1293 * failed in xmit_task or alloc_pdu.
1294 */
99bc5d55
JSJ
1295 beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_CONFIG,
1296 "BM_%d : Double Free in eh SGL ,"
1297 "eh_sgl_free_index=%d\n",
1298 phba->eh_sgl_free_index);
6733b39a
JK
1299 return;
1300 }
1301 phba->eh_sgl_hndl_base[phba->eh_sgl_free_index] = psgl_handle;
1302 phba->eh_sgl_hndl_avbl++;
1303 if (phba->eh_sgl_free_index ==
1304 (phba->params.icds_per_ctrl - phba->params.ios_per_ctrl - 1))
1305 phba->eh_sgl_free_index = 0;
1306 else
1307 phba->eh_sgl_free_index++;
1308}
1309
1310static void
1311be_complete_io(struct beiscsi_conn *beiscsi_conn,
73133261
JSJ
1312 struct iscsi_task *task,
1313 struct common_sol_cqe *csol_cqe)
6733b39a
JK
1314{
1315 struct beiscsi_io_task *io_task = task->dd_data;
1316 struct be_status_bhs *sts_bhs =
1317 (struct be_status_bhs *)io_task->cmd_bhs;
1318 struct iscsi_conn *conn = beiscsi_conn->conn;
6733b39a
JK
1319 unsigned char *sense;
1320 u32 resid = 0, exp_cmdsn, max_cmdsn;
1321 u8 rsp, status, flags;
1322
73133261
JSJ
1323 exp_cmdsn = csol_cqe->exp_cmdsn;
1324 max_cmdsn = (csol_cqe->exp_cmdsn +
1325 csol_cqe->cmd_wnd - 1);
1326 rsp = csol_cqe->i_resp;
1327 status = csol_cqe->i_sts;
1328 flags = csol_cqe->i_flags;
1329 resid = csol_cqe->res_cnt;
1330
bd535451 1331 if (!task->sc) {
da334977 1332 if (io_task->scsi_cmnd) {
bd535451 1333 scsi_dma_unmap(io_task->scsi_cmnd);
da334977
JK
1334 io_task->scsi_cmnd = NULL;
1335 }
6733b39a 1336
bd535451
JK
1337 return;
1338 }
6733b39a
JK
1339 task->sc->result = (DID_OK << 16) | status;
1340 if (rsp != ISCSI_STATUS_CMD_COMPLETED) {
1341 task->sc->result = DID_ERROR << 16;
1342 goto unmap;
1343 }
1344
1345 /* bidi not initially supported */
1346 if (flags & (ISCSI_FLAG_CMD_UNDERFLOW | ISCSI_FLAG_CMD_OVERFLOW)) {
6733b39a
JK
1347 if (!status && (flags & ISCSI_FLAG_CMD_OVERFLOW))
1348 task->sc->result = DID_ERROR << 16;
1349
1350 if (flags & ISCSI_FLAG_CMD_UNDERFLOW) {
1351 scsi_set_resid(task->sc, resid);
1352 if (!status && (scsi_bufflen(task->sc) - resid <
1353 task->sc->underflow))
1354 task->sc->result = DID_ERROR << 16;
1355 }
1356 }
1357
1358 if (status == SAM_STAT_CHECK_CONDITION) {
4053a4be 1359 u16 sense_len;
bfead3b2 1360 unsigned short *slen = (unsigned short *)sts_bhs->sense_info;
4053a4be 1361
6733b39a 1362 sense = sts_bhs->sense_info + sizeof(unsigned short);
4053a4be 1363 sense_len = be16_to_cpu(*slen);
6733b39a
JK
1364 memcpy(task->sc->sense_buffer, sense,
1365 min_t(u16, sense_len, SCSI_SENSE_BUFFERSIZE));
1366 }
756d29c8 1367
73133261
JSJ
1368 if (io_task->cmd_bhs->iscsi_hdr.flags & ISCSI_FLAG_CMD_READ)
1369 conn->rxdata_octets += resid;
6733b39a 1370unmap:
eb1c4692
JSJ
1371 if (io_task->scsi_cmnd) {
1372 scsi_dma_unmap(io_task->scsi_cmnd);
1373 io_task->scsi_cmnd = NULL;
1374 }
6733b39a
JK
1375 iscsi_complete_scsi_task(task, exp_cmdsn, max_cmdsn);
1376}
1377
1378static void
1379be_complete_logout(struct beiscsi_conn *beiscsi_conn,
73133261
JSJ
1380 struct iscsi_task *task,
1381 struct common_sol_cqe *csol_cqe)
6733b39a
JK
1382{
1383 struct iscsi_logout_rsp *hdr;
bfead3b2 1384 struct beiscsi_io_task *io_task = task->dd_data;
6733b39a
JK
1385 struct iscsi_conn *conn = beiscsi_conn->conn;
1386
1387 hdr = (struct iscsi_logout_rsp *)task->hdr;
7bd6e25c 1388 hdr->opcode = ISCSI_OP_LOGOUT_RSP;
6733b39a
JK
1389 hdr->t2wait = 5;
1390 hdr->t2retain = 0;
73133261
JSJ
1391 hdr->flags = csol_cqe->i_flags;
1392 hdr->response = csol_cqe->i_resp;
702dc5e8
JK
1393 hdr->exp_cmdsn = cpu_to_be32(csol_cqe->exp_cmdsn);
1394 hdr->max_cmdsn = cpu_to_be32(csol_cqe->exp_cmdsn +
1395 csol_cqe->cmd_wnd - 1);
73133261 1396
7bd6e25c
JK
1397 hdr->dlength[0] = 0;
1398 hdr->dlength[1] = 0;
1399 hdr->dlength[2] = 0;
6733b39a 1400 hdr->hlength = 0;
bfead3b2 1401 hdr->itt = io_task->libiscsi_itt;
6733b39a
JK
1402 __iscsi_complete_pdu(conn, (struct iscsi_hdr *)hdr, NULL, 0);
1403}
1404
1405static void
1406be_complete_tmf(struct beiscsi_conn *beiscsi_conn,
73133261
JSJ
1407 struct iscsi_task *task,
1408 struct common_sol_cqe *csol_cqe)
6733b39a
JK
1409{
1410 struct iscsi_tm_rsp *hdr;
1411 struct iscsi_conn *conn = beiscsi_conn->conn;
bfead3b2 1412 struct beiscsi_io_task *io_task = task->dd_data;
6733b39a
JK
1413
1414 hdr = (struct iscsi_tm_rsp *)task->hdr;
7bd6e25c 1415 hdr->opcode = ISCSI_OP_SCSI_TMFUNC_RSP;
73133261
JSJ
1416 hdr->flags = csol_cqe->i_flags;
1417 hdr->response = csol_cqe->i_resp;
702dc5e8
JK
1418 hdr->exp_cmdsn = cpu_to_be32(csol_cqe->exp_cmdsn);
1419 hdr->max_cmdsn = cpu_to_be32(csol_cqe->exp_cmdsn +
1420 csol_cqe->cmd_wnd - 1);
73133261 1421
bfead3b2 1422 hdr->itt = io_task->libiscsi_itt;
6733b39a
JK
1423 __iscsi_complete_pdu(conn, (struct iscsi_hdr *)hdr, NULL, 0);
1424}
1425
1426static void
1427hwi_complete_drvr_msgs(struct beiscsi_conn *beiscsi_conn,
1428 struct beiscsi_hba *phba, struct sol_cqe *psol)
1429{
1430 struct hwi_wrb_context *pwrb_context;
bfead3b2 1431 struct wrb_handle *pwrb_handle = NULL;
6733b39a 1432 struct hwi_controller *phwi_ctrlr;
bfead3b2
JK
1433 struct iscsi_task *task;
1434 struct beiscsi_io_task *io_task;
a7909b39 1435 uint16_t wrb_index, cid, cri_index;
6733b39a
JK
1436
1437 phwi_ctrlr = phba->phwi_ctrlr;
2c9dfd36
JK
1438 if (is_chip_be2_be3r(phba)) {
1439 wrb_index = AMAP_GET_BITS(struct amap_it_dmsg_cqe,
73133261 1440 wrb_idx, psol);
2c9dfd36 1441 cid = AMAP_GET_BITS(struct amap_it_dmsg_cqe,
73133261
JSJ
1442 cid, psol);
1443 } else {
2c9dfd36 1444 wrb_index = AMAP_GET_BITS(struct amap_it_dmsg_cqe_v2,
73133261 1445 wrb_idx, psol);
2c9dfd36 1446 cid = AMAP_GET_BITS(struct amap_it_dmsg_cqe_v2,
73133261
JSJ
1447 cid, psol);
1448 }
1449
a7909b39
JK
1450 cri_index = BE_GET_CRI_FROM_CID(cid);
1451 pwrb_context = &phwi_ctrlr->wrb_context[cri_index];
73133261 1452 pwrb_handle = pwrb_context->pwrb_handle_basestd[wrb_index];
32951dd8 1453 task = pwrb_handle->pio_handle;
35e66019 1454
bfead3b2 1455 io_task = task->dd_data;
4a4a11b9
JK
1456 memset(io_task->pwrb_handle->pwrb, 0, sizeof(struct iscsi_wrb));
1457 iscsi_put_task(task);
6733b39a
JK
1458}
1459
1460static void
1461be_complete_nopin_resp(struct beiscsi_conn *beiscsi_conn,
73133261
JSJ
1462 struct iscsi_task *task,
1463 struct common_sol_cqe *csol_cqe)
6733b39a
JK
1464{
1465 struct iscsi_nopin *hdr;
1466 struct iscsi_conn *conn = beiscsi_conn->conn;
bfead3b2 1467 struct beiscsi_io_task *io_task = task->dd_data;
6733b39a
JK
1468
1469 hdr = (struct iscsi_nopin *)task->hdr;
73133261
JSJ
1470 hdr->flags = csol_cqe->i_flags;
1471 hdr->exp_cmdsn = cpu_to_be32(csol_cqe->exp_cmdsn);
702dc5e8
JK
1472 hdr->max_cmdsn = cpu_to_be32(csol_cqe->exp_cmdsn +
1473 csol_cqe->cmd_wnd - 1);
73133261 1474
6733b39a 1475 hdr->opcode = ISCSI_OP_NOOP_IN;
bfead3b2 1476 hdr->itt = io_task->libiscsi_itt;
6733b39a
JK
1477 __iscsi_complete_pdu(conn, (struct iscsi_hdr *)hdr, NULL, 0);
1478}
1479
73133261
JSJ
1480static void adapter_get_sol_cqe(struct beiscsi_hba *phba,
1481 struct sol_cqe *psol,
1482 struct common_sol_cqe *csol_cqe)
1483{
2c9dfd36
JK
1484 if (is_chip_be2_be3r(phba)) {
1485 csol_cqe->exp_cmdsn = AMAP_GET_BITS(struct amap_sol_cqe,
1486 i_exp_cmd_sn, psol);
1487 csol_cqe->res_cnt = AMAP_GET_BITS(struct amap_sol_cqe,
1488 i_res_cnt, psol);
1489 csol_cqe->cmd_wnd = AMAP_GET_BITS(struct amap_sol_cqe,
1490 i_cmd_wnd, psol);
1491 csol_cqe->wrb_index = AMAP_GET_BITS(struct amap_sol_cqe,
1492 wrb_index, psol);
1493 csol_cqe->cid = AMAP_GET_BITS(struct amap_sol_cqe,
1494 cid, psol);
1495 csol_cqe->hw_sts = AMAP_GET_BITS(struct amap_sol_cqe,
1496 hw_sts, psol);
1497 csol_cqe->i_resp = AMAP_GET_BITS(struct amap_sol_cqe,
1498 i_resp, psol);
1499 csol_cqe->i_sts = AMAP_GET_BITS(struct amap_sol_cqe,
1500 i_sts, psol);
1501 csol_cqe->i_flags = AMAP_GET_BITS(struct amap_sol_cqe,
1502 i_flags, psol);
1503 } else {
73133261
JSJ
1504 csol_cqe->exp_cmdsn = AMAP_GET_BITS(struct amap_sol_cqe_v2,
1505 i_exp_cmd_sn, psol);
1506 csol_cqe->res_cnt = AMAP_GET_BITS(struct amap_sol_cqe_v2,
1507 i_res_cnt, psol);
1508 csol_cqe->wrb_index = AMAP_GET_BITS(struct amap_sol_cqe_v2,
1509 wrb_index, psol);
1510 csol_cqe->cid = AMAP_GET_BITS(struct amap_sol_cqe_v2,
1511 cid, psol);
1512 csol_cqe->hw_sts = AMAP_GET_BITS(struct amap_sol_cqe_v2,
1513 hw_sts, psol);
702dc5e8 1514 csol_cqe->cmd_wnd = AMAP_GET_BITS(struct amap_sol_cqe_v2,
73133261
JSJ
1515 i_cmd_wnd, psol);
1516 if (AMAP_GET_BITS(struct amap_sol_cqe_v2,
1517 cmd_cmpl, psol))
1518 csol_cqe->i_sts = AMAP_GET_BITS(struct amap_sol_cqe_v2,
1519 i_sts, psol);
1520 else
1521 csol_cqe->i_resp = AMAP_GET_BITS(struct amap_sol_cqe_v2,
1522 i_sts, psol);
1523 if (AMAP_GET_BITS(struct amap_sol_cqe_v2,
1524 u, psol))
1525 csol_cqe->i_flags = ISCSI_FLAG_CMD_UNDERFLOW;
1526
1527 if (AMAP_GET_BITS(struct amap_sol_cqe_v2,
1528 o, psol))
1529 csol_cqe->i_flags |= ISCSI_FLAG_CMD_OVERFLOW;
73133261
JSJ
1530 }
1531}
1532
1533
6733b39a
JK
1534static void hwi_complete_cmd(struct beiscsi_conn *beiscsi_conn,
1535 struct beiscsi_hba *phba, struct sol_cqe *psol)
1536{
1537 struct hwi_wrb_context *pwrb_context;
1538 struct wrb_handle *pwrb_handle;
1539 struct iscsi_wrb *pwrb = NULL;
1540 struct hwi_controller *phwi_ctrlr;
1541 struct iscsi_task *task;
bfead3b2 1542 unsigned int type;
6733b39a
JK
1543 struct iscsi_conn *conn = beiscsi_conn->conn;
1544 struct iscsi_session *session = conn->session;
73133261 1545 struct common_sol_cqe csol_cqe = {0};
a7909b39 1546 uint16_t cri_index = 0;
6733b39a
JK
1547
1548 phwi_ctrlr = phba->phwi_ctrlr;
73133261
JSJ
1549
1550 /* Copy the elements to a common structure */
1551 adapter_get_sol_cqe(phba, psol, &csol_cqe);
1552
a7909b39
JK
1553 cri_index = BE_GET_CRI_FROM_CID(csol_cqe.cid);
1554 pwrb_context = &phwi_ctrlr->wrb_context[cri_index];
73133261
JSJ
1555
1556 pwrb_handle = pwrb_context->pwrb_handle_basestd[
1557 csol_cqe.wrb_index];
1558
32951dd8
JK
1559 task = pwrb_handle->pio_handle;
1560 pwrb = pwrb_handle->pwrb;
73133261 1561 type = ((struct beiscsi_io_task *)task->dd_data)->wrb_type;
32951dd8 1562
659743b0 1563 spin_lock_bh(&session->back_lock);
bfead3b2 1564 switch (type) {
6733b39a
JK
1565 case HWH_TYPE_IO:
1566 case HWH_TYPE_IO_RD:
1567 if ((task->hdr->opcode & ISCSI_OPCODE_MASK) ==
dafab8e0 1568 ISCSI_OP_NOOP_OUT)
73133261 1569 be_complete_nopin_resp(beiscsi_conn, task, &csol_cqe);
dafab8e0 1570 else
73133261 1571 be_complete_io(beiscsi_conn, task, &csol_cqe);
6733b39a
JK
1572 break;
1573
1574 case HWH_TYPE_LOGOUT:
dafab8e0 1575 if ((task->hdr->opcode & ISCSI_OPCODE_MASK) == ISCSI_OP_LOGOUT)
73133261 1576 be_complete_logout(beiscsi_conn, task, &csol_cqe);
dafab8e0 1577 else
73133261 1578 be_complete_tmf(beiscsi_conn, task, &csol_cqe);
6733b39a
JK
1579 break;
1580
1581 case HWH_TYPE_LOGIN:
99bc5d55
JSJ
1582 beiscsi_log(phba, KERN_ERR,
1583 BEISCSI_LOG_CONFIG | BEISCSI_LOG_IO,
1584 "BM_%d :\t\t No HWH_TYPE_LOGIN Expected in"
1585 " hwi_complete_cmd- Solicited path\n");
6733b39a
JK
1586 break;
1587
6733b39a 1588 case HWH_TYPE_NOP:
73133261 1589 be_complete_nopin_resp(beiscsi_conn, task, &csol_cqe);
6733b39a
JK
1590 break;
1591
1592 default:
99bc5d55
JSJ
1593 beiscsi_log(phba, KERN_WARNING,
1594 BEISCSI_LOG_CONFIG | BEISCSI_LOG_IO,
1595 "BM_%d : In hwi_complete_cmd, unknown type = %d"
1596 "wrb_index 0x%x CID 0x%x\n", type,
73133261
JSJ
1597 csol_cqe.wrb_index,
1598 csol_cqe.cid);
6733b39a
JK
1599 break;
1600 }
35e66019 1601
659743b0 1602 spin_unlock_bh(&session->back_lock);
6733b39a
JK
1603}
1604
1605static struct list_head *hwi_get_async_busy_list(struct hwi_async_pdu_context
1606 *pasync_ctx, unsigned int is_header,
1607 unsigned int host_write_ptr)
1608{
1609 if (is_header)
1610 return &pasync_ctx->async_entry[host_write_ptr].
1611 header_busy_list;
1612 else
1613 return &pasync_ctx->async_entry[host_write_ptr].data_busy_list;
1614}
1615
1616static struct async_pdu_handle *
1617hwi_get_async_handle(struct beiscsi_hba *phba,
1618 struct beiscsi_conn *beiscsi_conn,
1619 struct hwi_async_pdu_context *pasync_ctx,
1620 struct i_t_dpdu_cqe *pdpdu_cqe, unsigned int *pcq_index)
1621{
1622 struct be_bus_address phys_addr;
1623 struct list_head *pbusy_list;
1624 struct async_pdu_handle *pasync_handle = NULL;
6733b39a 1625 unsigned char is_header = 0;
73133261
JSJ
1626 unsigned int index, dpl;
1627
2c9dfd36
JK
1628 if (is_chip_be2_be3r(phba)) {
1629 dpl = AMAP_GET_BITS(struct amap_i_t_dpdu_cqe,
73133261 1630 dpl, pdpdu_cqe);
2c9dfd36 1631 index = AMAP_GET_BITS(struct amap_i_t_dpdu_cqe,
73133261
JSJ
1632 index, pdpdu_cqe);
1633 } else {
2c9dfd36 1634 dpl = AMAP_GET_BITS(struct amap_i_t_dpdu_cqe_v2,
73133261 1635 dpl, pdpdu_cqe);
2c9dfd36 1636 index = AMAP_GET_BITS(struct amap_i_t_dpdu_cqe_v2,
73133261
JSJ
1637 index, pdpdu_cqe);
1638 }
6733b39a
JK
1639
1640 phys_addr.u.a32.address_lo =
73133261
JSJ
1641 (pdpdu_cqe->dw[offsetof(struct amap_i_t_dpdu_cqe,
1642 db_addr_lo) / 32] - dpl);
6733b39a 1643 phys_addr.u.a32.address_hi =
73133261
JSJ
1644 pdpdu_cqe->dw[offsetof(struct amap_i_t_dpdu_cqe,
1645 db_addr_hi) / 32];
6733b39a
JK
1646
1647 phys_addr.u.a64.address =
1648 *((unsigned long long *)(&phys_addr.u.a64.address));
1649
1650 switch (pdpdu_cqe->dw[offsetof(struct amap_i_t_dpdu_cqe, code) / 32]
1651 & PDUCQE_CODE_MASK) {
1652 case UNSOL_HDR_NOTIFY:
1653 is_header = 1;
1654
73133261
JSJ
1655 pbusy_list = hwi_get_async_busy_list(pasync_ctx,
1656 is_header, index);
6733b39a
JK
1657 break;
1658 case UNSOL_DATA_NOTIFY:
73133261
JSJ
1659 pbusy_list = hwi_get_async_busy_list(pasync_ctx,
1660 is_header, index);
6733b39a
JK
1661 break;
1662 default:
1663 pbusy_list = NULL;
99bc5d55
JSJ
1664 beiscsi_log(phba, KERN_WARNING,
1665 BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG,
1666 "BM_%d : Unexpected code=%d\n",
1667 pdpdu_cqe->dw[offsetof(struct amap_i_t_dpdu_cqe,
1668 code) / 32] & PDUCQE_CODE_MASK);
6733b39a
JK
1669 return NULL;
1670 }
1671
6733b39a
JK
1672 WARN_ON(list_empty(pbusy_list));
1673 list_for_each_entry(pasync_handle, pbusy_list, link) {
dc63aac6 1674 if (pasync_handle->pa.u.a64.address == phys_addr.u.a64.address)
6733b39a
JK
1675 break;
1676 }
1677
1678 WARN_ON(!pasync_handle);
1679
8a86e833
JK
1680 pasync_handle->cri = BE_GET_ASYNC_CRI_FROM_CID(
1681 beiscsi_conn->beiscsi_conn_cid);
6733b39a 1682 pasync_handle->is_header = is_header;
73133261
JSJ
1683 pasync_handle->buffer_len = dpl;
1684 *pcq_index = index;
6733b39a 1685
6733b39a
JK
1686 return pasync_handle;
1687}
1688
1689static unsigned int
99bc5d55
JSJ
1690hwi_update_async_writables(struct beiscsi_hba *phba,
1691 struct hwi_async_pdu_context *pasync_ctx,
1692 unsigned int is_header, unsigned int cq_index)
6733b39a
JK
1693{
1694 struct list_head *pbusy_list;
1695 struct async_pdu_handle *pasync_handle;
1696 unsigned int num_entries, writables = 0;
1697 unsigned int *pep_read_ptr, *pwritables;
1698
dc63aac6 1699 num_entries = pasync_ctx->num_entries;
6733b39a
JK
1700 if (is_header) {
1701 pep_read_ptr = &pasync_ctx->async_header.ep_read_ptr;
1702 pwritables = &pasync_ctx->async_header.writables;
6733b39a
JK
1703 } else {
1704 pep_read_ptr = &pasync_ctx->async_data.ep_read_ptr;
1705 pwritables = &pasync_ctx->async_data.writables;
6733b39a
JK
1706 }
1707
1708 while ((*pep_read_ptr) != cq_index) {
1709 (*pep_read_ptr)++;
1710 *pep_read_ptr = (*pep_read_ptr) % num_entries;
1711
1712 pbusy_list = hwi_get_async_busy_list(pasync_ctx, is_header,
1713 *pep_read_ptr);
1714 if (writables == 0)
1715 WARN_ON(list_empty(pbusy_list));
1716
1717 if (!list_empty(pbusy_list)) {
1718 pasync_handle = list_entry(pbusy_list->next,
1719 struct async_pdu_handle,
1720 link);
1721 WARN_ON(!pasync_handle);
1722 pasync_handle->consumed = 1;
1723 }
1724
1725 writables++;
1726 }
1727
1728 if (!writables) {
99bc5d55
JSJ
1729 beiscsi_log(phba, KERN_ERR,
1730 BEISCSI_LOG_CONFIG | BEISCSI_LOG_IO,
1731 "BM_%d : Duplicate notification received - index 0x%x!!\n",
1732 cq_index);
6733b39a
JK
1733 WARN_ON(1);
1734 }
1735
1736 *pwritables = *pwritables + writables;
1737 return 0;
1738}
1739
9728d8d0 1740static void hwi_free_async_msg(struct beiscsi_hba *phba,
8a86e833
JK
1741 struct hwi_async_pdu_context *pasync_ctx,
1742 unsigned int cri)
6733b39a 1743{
6733b39a
JK
1744 struct async_pdu_handle *pasync_handle, *tmp_handle;
1745 struct list_head *plist;
6733b39a 1746
6733b39a 1747 plist = &pasync_ctx->async_entry[cri].wait_queue.list;
6733b39a
JK
1748 list_for_each_entry_safe(pasync_handle, tmp_handle, plist, link) {
1749 list_del(&pasync_handle->link);
1750
9728d8d0 1751 if (pasync_handle->is_header) {
6733b39a
JK
1752 list_add_tail(&pasync_handle->link,
1753 &pasync_ctx->async_header.free_list);
1754 pasync_ctx->async_header.free_entries++;
6733b39a
JK
1755 } else {
1756 list_add_tail(&pasync_handle->link,
1757 &pasync_ctx->async_data.free_list);
1758 pasync_ctx->async_data.free_entries++;
6733b39a
JK
1759 }
1760 }
1761
1762 INIT_LIST_HEAD(&pasync_ctx->async_entry[cri].wait_queue.list);
1763 pasync_ctx->async_entry[cri].wait_queue.hdr_received = 0;
1764 pasync_ctx->async_entry[cri].wait_queue.bytes_received = 0;
6733b39a
JK
1765}
1766
1767static struct phys_addr *
1768hwi_get_ring_address(struct hwi_async_pdu_context *pasync_ctx,
1769 unsigned int is_header, unsigned int host_write_ptr)
1770{
1771 struct phys_addr *pasync_sge = NULL;
1772
1773 if (is_header)
1774 pasync_sge = pasync_ctx->async_header.ring_base;
1775 else
1776 pasync_sge = pasync_ctx->async_data.ring_base;
1777
1778 return pasync_sge + host_write_ptr;
1779}
1780
1781static void hwi_post_async_buffers(struct beiscsi_hba *phba,
8a86e833 1782 unsigned int is_header, uint8_t ulp_num)
6733b39a
JK
1783{
1784 struct hwi_controller *phwi_ctrlr;
1785 struct hwi_async_pdu_context *pasync_ctx;
1786 struct async_pdu_handle *pasync_handle;
1787 struct list_head *pfree_link, *pbusy_list;
1788 struct phys_addr *pasync_sge;
1789 unsigned int ring_id, num_entries;
8a86e833 1790 unsigned int host_write_num, doorbell_offset;
6733b39a
JK
1791 unsigned int writables;
1792 unsigned int i = 0;
1793 u32 doorbell = 0;
1794
1795 phwi_ctrlr = phba->phwi_ctrlr;
8a86e833 1796 pasync_ctx = HWI_GET_ASYNC_PDU_CTX(phwi_ctrlr, ulp_num);
dc63aac6 1797 num_entries = pasync_ctx->num_entries;
6733b39a
JK
1798
1799 if (is_header) {
6733b39a
JK
1800 writables = min(pasync_ctx->async_header.writables,
1801 pasync_ctx->async_header.free_entries);
1802 pfree_link = pasync_ctx->async_header.free_list.next;
1803 host_write_num = pasync_ctx->async_header.host_write_ptr;
8a86e833
JK
1804 ring_id = phwi_ctrlr->default_pdu_hdr[ulp_num].id;
1805 doorbell_offset = phwi_ctrlr->default_pdu_hdr[ulp_num].
1806 doorbell_offset;
6733b39a 1807 } else {
6733b39a
JK
1808 writables = min(pasync_ctx->async_data.writables,
1809 pasync_ctx->async_data.free_entries);
1810 pfree_link = pasync_ctx->async_data.free_list.next;
1811 host_write_num = pasync_ctx->async_data.host_write_ptr;
8a86e833
JK
1812 ring_id = phwi_ctrlr->default_pdu_data[ulp_num].id;
1813 doorbell_offset = phwi_ctrlr->default_pdu_data[ulp_num].
1814 doorbell_offset;
6733b39a
JK
1815 }
1816
1817 writables = (writables / 8) * 8;
1818 if (writables) {
1819 for (i = 0; i < writables; i++) {
1820 pbusy_list =
1821 hwi_get_async_busy_list(pasync_ctx, is_header,
1822 host_write_num);
1823 pasync_handle =
1824 list_entry(pfree_link, struct async_pdu_handle,
1825 link);
1826 WARN_ON(!pasync_handle);
1827 pasync_handle->consumed = 0;
1828
1829 pfree_link = pfree_link->next;
1830
1831 pasync_sge = hwi_get_ring_address(pasync_ctx,
1832 is_header, host_write_num);
1833
1834 pasync_sge->hi = pasync_handle->pa.u.a32.address_lo;
1835 pasync_sge->lo = pasync_handle->pa.u.a32.address_hi;
1836
1837 list_move(&pasync_handle->link, pbusy_list);
1838
1839 host_write_num++;
1840 host_write_num = host_write_num % num_entries;
1841 }
1842
1843 if (is_header) {
1844 pasync_ctx->async_header.host_write_ptr =
1845 host_write_num;
1846 pasync_ctx->async_header.free_entries -= writables;
1847 pasync_ctx->async_header.writables -= writables;
1848 pasync_ctx->async_header.busy_entries += writables;
1849 } else {
1850 pasync_ctx->async_data.host_write_ptr = host_write_num;
1851 pasync_ctx->async_data.free_entries -= writables;
1852 pasync_ctx->async_data.writables -= writables;
1853 pasync_ctx->async_data.busy_entries += writables;
1854 }
1855
1856 doorbell |= ring_id & DB_DEF_PDU_RING_ID_MASK;
1857 doorbell |= 1 << DB_DEF_PDU_REARM_SHIFT;
1858 doorbell |= 0 << DB_DEF_PDU_EVENT_SHIFT;
1859 doorbell |= (writables & DB_DEF_PDU_CQPROC_MASK)
1860 << DB_DEF_PDU_CQPROC_SHIFT;
1861
8a86e833 1862 iowrite32(doorbell, phba->db_va + doorbell_offset);
6733b39a
JK
1863 }
1864}
1865
1866static void hwi_flush_default_pdu_buffer(struct beiscsi_hba *phba,
1867 struct beiscsi_conn *beiscsi_conn,
1868 struct i_t_dpdu_cqe *pdpdu_cqe)
1869{
1870 struct hwi_controller *phwi_ctrlr;
1871 struct hwi_async_pdu_context *pasync_ctx;
1872 struct async_pdu_handle *pasync_handle = NULL;
1873 unsigned int cq_index = -1;
8a86e833
JK
1874 uint16_t cri_index = BE_GET_CRI_FROM_CID(
1875 beiscsi_conn->beiscsi_conn_cid);
6733b39a
JK
1876
1877 phwi_ctrlr = phba->phwi_ctrlr;
8a86e833
JK
1878 pasync_ctx = HWI_GET_ASYNC_PDU_CTX(phwi_ctrlr,
1879 BEISCSI_GET_ULP_FROM_CRI(phwi_ctrlr,
1880 cri_index));
6733b39a
JK
1881
1882 pasync_handle = hwi_get_async_handle(phba, beiscsi_conn, pasync_ctx,
1883 pdpdu_cqe, &cq_index);
1884 BUG_ON(pasync_handle->is_header != 0);
1885 if (pasync_handle->consumed == 0)
99bc5d55
JSJ
1886 hwi_update_async_writables(phba, pasync_ctx,
1887 pasync_handle->is_header, cq_index);
6733b39a 1888
8a86e833
JK
1889 hwi_free_async_msg(phba, pasync_ctx, pasync_handle->cri);
1890 hwi_post_async_buffers(phba, pasync_handle->is_header,
1891 BEISCSI_GET_ULP_FROM_CRI(phwi_ctrlr,
1892 cri_index));
6733b39a
JK
1893}
1894
1895static unsigned int
1896hwi_fwd_async_msg(struct beiscsi_conn *beiscsi_conn,
1897 struct beiscsi_hba *phba,
1898 struct hwi_async_pdu_context *pasync_ctx, unsigned short cri)
1899{
1900 struct list_head *plist;
1901 struct async_pdu_handle *pasync_handle;
1902 void *phdr = NULL;
1903 unsigned int hdr_len = 0, buf_len = 0;
1904 unsigned int status, index = 0, offset = 0;
1905 void *pfirst_buffer = NULL;
1906 unsigned int num_buf = 0;
1907
1908 plist = &pasync_ctx->async_entry[cri].wait_queue.list;
1909
1910 list_for_each_entry(pasync_handle, plist, link) {
1911 if (index == 0) {
1912 phdr = pasync_handle->pbuffer;
1913 hdr_len = pasync_handle->buffer_len;
1914 } else {
1915 buf_len = pasync_handle->buffer_len;
1916 if (!num_buf) {
1917 pfirst_buffer = pasync_handle->pbuffer;
1918 num_buf++;
1919 }
1920 memcpy(pfirst_buffer + offset,
1921 pasync_handle->pbuffer, buf_len);
f2ba02b8 1922 offset += buf_len;
6733b39a
JK
1923 }
1924 index++;
1925 }
1926
1927 status = beiscsi_process_async_pdu(beiscsi_conn, phba,
7da50879 1928 phdr, hdr_len, pfirst_buffer,
f2ba02b8 1929 offset);
6733b39a 1930
8a86e833 1931 hwi_free_async_msg(phba, pasync_ctx, cri);
6733b39a
JK
1932 return 0;
1933}
1934
1935static unsigned int
1936hwi_gather_async_pdu(struct beiscsi_conn *beiscsi_conn,
1937 struct beiscsi_hba *phba,
1938 struct async_pdu_handle *pasync_handle)
1939{
1940 struct hwi_async_pdu_context *pasync_ctx;
1941 struct hwi_controller *phwi_ctrlr;
1942 unsigned int bytes_needed = 0, status = 0;
1943 unsigned short cri = pasync_handle->cri;
1944 struct pdu_base *ppdu;
1945
1946 phwi_ctrlr = phba->phwi_ctrlr;
8a86e833
JK
1947 pasync_ctx = HWI_GET_ASYNC_PDU_CTX(phwi_ctrlr,
1948 BEISCSI_GET_ULP_FROM_CRI(phwi_ctrlr,
1949 BE_GET_CRI_FROM_CID(beiscsi_conn->
1950 beiscsi_conn_cid)));
6733b39a
JK
1951
1952 list_del(&pasync_handle->link);
1953 if (pasync_handle->is_header) {
1954 pasync_ctx->async_header.busy_entries--;
1955 if (pasync_ctx->async_entry[cri].wait_queue.hdr_received) {
8a86e833 1956 hwi_free_async_msg(phba, pasync_ctx, cri);
6733b39a
JK
1957 BUG();
1958 }
1959
1960 pasync_ctx->async_entry[cri].wait_queue.bytes_received = 0;
1961 pasync_ctx->async_entry[cri].wait_queue.hdr_received = 1;
1962 pasync_ctx->async_entry[cri].wait_queue.hdr_len =
1963 (unsigned short)pasync_handle->buffer_len;
1964 list_add_tail(&pasync_handle->link,
1965 &pasync_ctx->async_entry[cri].wait_queue.list);
1966
1967 ppdu = pasync_handle->pbuffer;
1968 bytes_needed = ((((ppdu->dw[offsetof(struct amap_pdu_base,
1969 data_len_hi) / 32] & PDUBASE_DATALENHI_MASK) << 8) &
1970 0xFFFF0000) | ((be16_to_cpu((ppdu->
1971 dw[offsetof(struct amap_pdu_base, data_len_lo) / 32]
1972 & PDUBASE_DATALENLO_MASK) >> 16)) & 0x0000FFFF));
1973
1974 if (status == 0) {
1975 pasync_ctx->async_entry[cri].wait_queue.bytes_needed =
1976 bytes_needed;
1977
1978 if (bytes_needed == 0)
1979 status = hwi_fwd_async_msg(beiscsi_conn, phba,
1980 pasync_ctx, cri);
1981 }
1982 } else {
1983 pasync_ctx->async_data.busy_entries--;
1984 if (pasync_ctx->async_entry[cri].wait_queue.hdr_received) {
1985 list_add_tail(&pasync_handle->link,
1986 &pasync_ctx->async_entry[cri].wait_queue.
1987 list);
1988 pasync_ctx->async_entry[cri].wait_queue.
1989 bytes_received +=
1990 (unsigned short)pasync_handle->buffer_len;
1991
1992 if (pasync_ctx->async_entry[cri].wait_queue.
1993 bytes_received >=
1994 pasync_ctx->async_entry[cri].wait_queue.
1995 bytes_needed)
1996 status = hwi_fwd_async_msg(beiscsi_conn, phba,
1997 pasync_ctx, cri);
1998 }
1999 }
2000 return status;
2001}
2002
2003static void hwi_process_default_pdu_ring(struct beiscsi_conn *beiscsi_conn,
2004 struct beiscsi_hba *phba,
2005 struct i_t_dpdu_cqe *pdpdu_cqe)
2006{
2007 struct hwi_controller *phwi_ctrlr;
2008 struct hwi_async_pdu_context *pasync_ctx;
2009 struct async_pdu_handle *pasync_handle = NULL;
2010 unsigned int cq_index = -1;
8a86e833
JK
2011 uint16_t cri_index = BE_GET_CRI_FROM_CID(
2012 beiscsi_conn->beiscsi_conn_cid);
6733b39a
JK
2013
2014 phwi_ctrlr = phba->phwi_ctrlr;
8a86e833
JK
2015 pasync_ctx = HWI_GET_ASYNC_PDU_CTX(phwi_ctrlr,
2016 BEISCSI_GET_ULP_FROM_CRI(phwi_ctrlr,
2017 cri_index));
2018
6733b39a
JK
2019 pasync_handle = hwi_get_async_handle(phba, beiscsi_conn, pasync_ctx,
2020 pdpdu_cqe, &cq_index);
2021
2022 if (pasync_handle->consumed == 0)
99bc5d55
JSJ
2023 hwi_update_async_writables(phba, pasync_ctx,
2024 pasync_handle->is_header, cq_index);
2025
6733b39a 2026 hwi_gather_async_pdu(beiscsi_conn, phba, pasync_handle);
8a86e833
JK
2027 hwi_post_async_buffers(phba, pasync_handle->is_header,
2028 BEISCSI_GET_ULP_FROM_CRI(
2029 phwi_ctrlr, cri_index));
6733b39a
JK
2030}
2031
756d29c8
JK
2032static void beiscsi_process_mcc_isr(struct beiscsi_hba *phba)
2033{
2034 struct be_queue_info *mcc_cq;
2035 struct be_mcc_compl *mcc_compl;
2036 unsigned int num_processed = 0;
2037
2038 mcc_cq = &phba->ctrl.mcc_obj.cq;
2039 mcc_compl = queue_tail_node(mcc_cq);
2040 mcc_compl->flags = le32_to_cpu(mcc_compl->flags);
2041 while (mcc_compl->flags & CQE_FLAGS_VALID_MASK) {
2042
2043 if (num_processed >= 32) {
2044 hwi_ring_cq_db(phba, mcc_cq->id,
2045 num_processed, 0, 0);
2046 num_processed = 0;
2047 }
2048 if (mcc_compl->flags & CQE_FLAGS_ASYNC_MASK) {
53aefe25 2049 beiscsi_process_async_event(phba, mcc_compl);
756d29c8
JK
2050 } else if (mcc_compl->flags & CQE_FLAGS_COMPLETED_MASK) {
2051 be_mcc_compl_process_isr(&phba->ctrl, mcc_compl);
2052 atomic_dec(&phba->ctrl.mcc_obj.q.used);
2053 }
2054
2055 mcc_compl->flags = 0;
2056 queue_tail_inc(mcc_cq);
2057 mcc_compl = queue_tail_node(mcc_cq);
2058 mcc_compl->flags = le32_to_cpu(mcc_compl->flags);
2059 num_processed++;
2060 }
2061
2062 if (num_processed > 0)
2063 hwi_ring_cq_db(phba, mcc_cq->id, num_processed, 1, 0);
2064
2065}
bfead3b2 2066
6763daae
JSJ
2067/**
2068 * beiscsi_process_cq()- Process the Completion Queue
2069 * @pbe_eq: Event Q on which the Completion has come
2070 *
2071 * return
2072 * Number of Completion Entries processed.
2073 **/
b7ab35b1 2074unsigned int beiscsi_process_cq(struct be_eq_obj *pbe_eq)
6733b39a 2075{
6733b39a
JK
2076 struct be_queue_info *cq;
2077 struct sol_cqe *sol;
2078 struct dmsg_cqe *dmsg;
2079 unsigned int num_processed = 0;
2080 unsigned int tot_nump = 0;
0a513dd8 2081 unsigned short code = 0, cid = 0;
a7909b39 2082 uint16_t cri_index = 0;
6733b39a 2083 struct beiscsi_conn *beiscsi_conn;
c2462288
JK
2084 struct beiscsi_endpoint *beiscsi_ep;
2085 struct iscsi_endpoint *ep;
bfead3b2 2086 struct beiscsi_hba *phba;
6733b39a 2087
bfead3b2 2088 cq = pbe_eq->cq;
6733b39a 2089 sol = queue_tail_node(cq);
bfead3b2 2090 phba = pbe_eq->phba;
6733b39a
JK
2091
2092 while (sol->dw[offsetof(struct amap_sol_cqe, valid) / 32] &
2093 CQE_VALID_MASK) {
2094 be_dws_le_to_cpu(sol, sizeof(struct sol_cqe));
2095
73133261
JSJ
2096 code = (sol->dw[offsetof(struct amap_sol_cqe, code) /
2097 32] & CQE_CODE_MASK);
2098
2099 /* Get the CID */
2c9dfd36
JK
2100 if (is_chip_be2_be3r(phba)) {
2101 cid = AMAP_GET_BITS(struct amap_sol_cqe, cid, sol);
2102 } else {
73133261
JSJ
2103 if ((code == DRIVERMSG_NOTIFY) ||
2104 (code == UNSOL_HDR_NOTIFY) ||
2105 (code == UNSOL_DATA_NOTIFY))
2106 cid = AMAP_GET_BITS(
2107 struct amap_i_t_dpdu_cqe_v2,
2108 cid, sol);
2109 else
2110 cid = AMAP_GET_BITS(struct amap_sol_cqe_v2,
2111 cid, sol);
2c9dfd36 2112 }
32951dd8 2113
a7909b39
JK
2114 cri_index = BE_GET_CRI_FROM_CID(cid);
2115 ep = phba->ep_array[cri_index];
b7ab35b1
JK
2116
2117 if (ep == NULL) {
2118 /* connection has already been freed
2119 * just move on to next one
2120 */
2121 beiscsi_log(phba, KERN_WARNING,
2122 BEISCSI_LOG_INIT,
2123 "BM_%d : proc cqe of disconn ep: cid %d\n",
2124 cid);
2125 goto proc_next_cqe;
2126 }
2127
c2462288
JK
2128 beiscsi_ep = ep->dd_data;
2129 beiscsi_conn = beiscsi_ep->conn;
756d29c8 2130
6733b39a 2131 if (num_processed >= 32) {
bfead3b2 2132 hwi_ring_cq_db(phba, cq->id,
6733b39a
JK
2133 num_processed, 0, 0);
2134 tot_nump += num_processed;
2135 num_processed = 0;
2136 }
2137
0a513dd8 2138 switch (code) {
6733b39a
JK
2139 case SOL_CMD_COMPLETE:
2140 hwi_complete_cmd(beiscsi_conn, phba, sol);
2141 break;
2142 case DRIVERMSG_NOTIFY:
99bc5d55
JSJ
2143 beiscsi_log(phba, KERN_INFO,
2144 BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG,
6763daae
JSJ
2145 "BM_%d : Received %s[%d] on CID : %d\n",
2146 cqe_desc[code], code, cid);
99bc5d55 2147
6733b39a
JK
2148 dmsg = (struct dmsg_cqe *)sol;
2149 hwi_complete_drvr_msgs(beiscsi_conn, phba, sol);
2150 break;
2151 case UNSOL_HDR_NOTIFY:
99bc5d55
JSJ
2152 beiscsi_log(phba, KERN_INFO,
2153 BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG,
6763daae
JSJ
2154 "BM_%d : Received %s[%d] on CID : %d\n",
2155 cqe_desc[code], code, cid);
99bc5d55 2156
8f09a3b9 2157 spin_lock_bh(&phba->async_pdu_lock);
bfead3b2
JK
2158 hwi_process_default_pdu_ring(beiscsi_conn, phba,
2159 (struct i_t_dpdu_cqe *)sol);
8f09a3b9 2160 spin_unlock_bh(&phba->async_pdu_lock);
bfead3b2 2161 break;
6733b39a 2162 case UNSOL_DATA_NOTIFY:
99bc5d55
JSJ
2163 beiscsi_log(phba, KERN_INFO,
2164 BEISCSI_LOG_CONFIG | BEISCSI_LOG_IO,
6763daae
JSJ
2165 "BM_%d : Received %s[%d] on CID : %d\n",
2166 cqe_desc[code], code, cid);
99bc5d55 2167
8f09a3b9 2168 spin_lock_bh(&phba->async_pdu_lock);
6733b39a
JK
2169 hwi_process_default_pdu_ring(beiscsi_conn, phba,
2170 (struct i_t_dpdu_cqe *)sol);
8f09a3b9 2171 spin_unlock_bh(&phba->async_pdu_lock);
6733b39a
JK
2172 break;
2173 case CXN_INVALIDATE_INDEX_NOTIFY:
2174 case CMD_INVALIDATED_NOTIFY:
2175 case CXN_INVALIDATE_NOTIFY:
99bc5d55
JSJ
2176 beiscsi_log(phba, KERN_ERR,
2177 BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG,
6763daae
JSJ
2178 "BM_%d : Ignoring %s[%d] on CID : %d\n",
2179 cqe_desc[code], code, cid);
6733b39a
JK
2180 break;
2181 case SOL_CMD_KILLED_DATA_DIGEST_ERR:
2182 case CMD_KILLED_INVALID_STATSN_RCVD:
2183 case CMD_KILLED_INVALID_R2T_RCVD:
2184 case CMD_CXN_KILLED_LUN_INVALID:
2185 case CMD_CXN_KILLED_ICD_INVALID:
2186 case CMD_CXN_KILLED_ITT_INVALID:
2187 case CMD_CXN_KILLED_SEQ_OUTOFORDER:
2188 case CMD_CXN_KILLED_INVALID_DATASN_RCVD:
99bc5d55
JSJ
2189 beiscsi_log(phba, KERN_ERR,
2190 BEISCSI_LOG_CONFIG | BEISCSI_LOG_IO,
6763daae
JSJ
2191 "BM_%d : Cmd Notification %s[%d] on CID : %d\n",
2192 cqe_desc[code], code, cid);
6733b39a
JK
2193 break;
2194 case UNSOL_DATA_DIGEST_ERROR_NOTIFY:
99bc5d55
JSJ
2195 beiscsi_log(phba, KERN_ERR,
2196 BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG,
6763daae
JSJ
2197 "BM_%d : Dropping %s[%d] on DPDU ring on CID : %d\n",
2198 cqe_desc[code], code, cid);
8f09a3b9 2199 spin_lock_bh(&phba->async_pdu_lock);
6733b39a
JK
2200 hwi_flush_default_pdu_buffer(phba, beiscsi_conn,
2201 (struct i_t_dpdu_cqe *) sol);
8f09a3b9 2202 spin_unlock_bh(&phba->async_pdu_lock);
6733b39a
JK
2203 break;
2204 case CXN_KILLED_PDU_SIZE_EXCEEDS_DSL:
2205 case CXN_KILLED_BURST_LEN_MISMATCH:
2206 case CXN_KILLED_AHS_RCVD:
2207 case CXN_KILLED_HDR_DIGEST_ERR:
2208 case CXN_KILLED_UNKNOWN_HDR:
2209 case CXN_KILLED_STALE_ITT_TTT_RCVD:
2210 case CXN_KILLED_INVALID_ITT_TTT_RCVD:
2211 case CXN_KILLED_TIMED_OUT:
2212 case CXN_KILLED_FIN_RCVD:
6763daae
JSJ
2213 case CXN_KILLED_RST_SENT:
2214 case CXN_KILLED_RST_RCVD:
6733b39a
JK
2215 case CXN_KILLED_BAD_UNSOL_PDU_RCVD:
2216 case CXN_KILLED_BAD_WRB_INDEX_ERROR:
2217 case CXN_KILLED_OVER_RUN_RESIDUAL:
2218 case CXN_KILLED_UNDER_RUN_RESIDUAL:
2219 case CXN_KILLED_CMND_DATA_NOT_ON_SAME_CONN:
99bc5d55
JSJ
2220 beiscsi_log(phba, KERN_ERR,
2221 BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG,
6763daae
JSJ
2222 "BM_%d : Event %s[%d] received on CID : %d\n",
2223 cqe_desc[code], code, cid);
0a513dd8
JSJ
2224 if (beiscsi_conn)
2225 iscsi_conn_failure(beiscsi_conn->conn,
2226 ISCSI_ERR_CONN_FAILED);
6733b39a
JK
2227 break;
2228 default:
99bc5d55
JSJ
2229 beiscsi_log(phba, KERN_ERR,
2230 BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG,
6763daae
JSJ
2231 "BM_%d : Invalid CQE Event Received Code : %d"
2232 "CID 0x%x...\n",
0a513dd8 2233 code, cid);
6733b39a
JK
2234 break;
2235 }
2236
b7ab35b1 2237proc_next_cqe:
6733b39a
JK
2238 AMAP_SET_BITS(struct amap_sol_cqe, valid, sol, 0);
2239 queue_tail_inc(cq);
2240 sol = queue_tail_node(cq);
2241 num_processed++;
2242 }
2243
2244 if (num_processed > 0) {
2245 tot_nump += num_processed;
bfead3b2 2246 hwi_ring_cq_db(phba, cq->id, num_processed, 1, 0);
6733b39a
JK
2247 }
2248 return tot_nump;
2249}
2250
756d29c8 2251void beiscsi_process_all_cqs(struct work_struct *work)
6733b39a
JK
2252{
2253 unsigned long flags;
bfead3b2
JK
2254 struct hwi_controller *phwi_ctrlr;
2255 struct hwi_context_memory *phwi_context;
72fb46a9
JSJ
2256 struct beiscsi_hba *phba;
2257 struct be_eq_obj *pbe_eq =
2258 container_of(work, struct be_eq_obj, work_cqs);
6733b39a 2259
72fb46a9 2260 phba = pbe_eq->phba;
bfead3b2
JK
2261 phwi_ctrlr = phba->phwi_ctrlr;
2262 phwi_context = phwi_ctrlr->phwi_ctxt;
bfead3b2 2263
72fb46a9 2264 if (pbe_eq->todo_mcc_cq) {
6733b39a 2265 spin_lock_irqsave(&phba->isr_lock, flags);
72fb46a9 2266 pbe_eq->todo_mcc_cq = false;
6733b39a 2267 spin_unlock_irqrestore(&phba->isr_lock, flags);
756d29c8 2268 beiscsi_process_mcc_isr(phba);
6733b39a
JK
2269 }
2270
72fb46a9 2271 if (pbe_eq->todo_cq) {
6733b39a 2272 spin_lock_irqsave(&phba->isr_lock, flags);
72fb46a9 2273 pbe_eq->todo_cq = false;
6733b39a 2274 spin_unlock_irqrestore(&phba->isr_lock, flags);
bfead3b2 2275 beiscsi_process_cq(pbe_eq);
6733b39a 2276 }
72fb46a9
JSJ
2277
2278 /* rearm EQ for further interrupts */
2279 hwi_ring_eq_db(phba, pbe_eq->q.id, 0, 0, 1, 1);
6733b39a
JK
2280}
2281
511cbce2 2282static int be_iopoll(struct irq_poll *iop, int budget)
6733b39a 2283{
ad3f428e 2284 unsigned int ret;
6733b39a 2285 struct beiscsi_hba *phba;
bfead3b2 2286 struct be_eq_obj *pbe_eq;
6733b39a 2287
bfead3b2
JK
2288 pbe_eq = container_of(iop, struct be_eq_obj, iopoll);
2289 ret = beiscsi_process_cq(pbe_eq);
73af08e1 2290 pbe_eq->cq_count += ret;
6733b39a 2291 if (ret < budget) {
bfead3b2 2292 phba = pbe_eq->phba;
511cbce2 2293 irq_poll_complete(iop);
99bc5d55
JSJ
2294 beiscsi_log(phba, KERN_INFO,
2295 BEISCSI_LOG_CONFIG | BEISCSI_LOG_IO,
2296 "BM_%d : rearm pbe_eq->q.id =%d\n",
2297 pbe_eq->q.id);
bfead3b2 2298 hwi_ring_eq_db(phba, pbe_eq->q.id, 0, 0, 1, 1);
6733b39a
JK
2299 }
2300 return ret;
2301}
2302
09a1093a
JSJ
2303static void
2304hwi_write_sgl_v2(struct iscsi_wrb *pwrb, struct scatterlist *sg,
2305 unsigned int num_sg, struct beiscsi_io_task *io_task)
2306{
2307 struct iscsi_sge *psgl;
2308 unsigned int sg_len, index;
2309 unsigned int sge_len = 0;
2310 unsigned long long addr;
2311 struct scatterlist *l_sg;
2312 unsigned int offset;
2313
2314 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, iscsi_bhs_addr_lo, pwrb,
2315 io_task->bhs_pa.u.a32.address_lo);
2316 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, iscsi_bhs_addr_hi, pwrb,
2317 io_task->bhs_pa.u.a32.address_hi);
2318
2319 l_sg = sg;
2320 for (index = 0; (index < num_sg) && (index < 2); index++,
2321 sg = sg_next(sg)) {
2322 if (index == 0) {
2323 sg_len = sg_dma_len(sg);
2324 addr = (u64) sg_dma_address(sg);
2325 AMAP_SET_BITS(struct amap_iscsi_wrb_v2,
2326 sge0_addr_lo, pwrb,
2327 lower_32_bits(addr));
2328 AMAP_SET_BITS(struct amap_iscsi_wrb_v2,
2329 sge0_addr_hi, pwrb,
2330 upper_32_bits(addr));
2331 AMAP_SET_BITS(struct amap_iscsi_wrb_v2,
2332 sge0_len, pwrb,
2333 sg_len);
2334 sge_len = sg_len;
2335 } else {
2336 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, sge1_r2t_offset,
2337 pwrb, sge_len);
2338 sg_len = sg_dma_len(sg);
2339 addr = (u64) sg_dma_address(sg);
2340 AMAP_SET_BITS(struct amap_iscsi_wrb_v2,
2341 sge1_addr_lo, pwrb,
2342 lower_32_bits(addr));
2343 AMAP_SET_BITS(struct amap_iscsi_wrb_v2,
2344 sge1_addr_hi, pwrb,
2345 upper_32_bits(addr));
2346 AMAP_SET_BITS(struct amap_iscsi_wrb_v2,
2347 sge1_len, pwrb,
2348 sg_len);
2349 }
2350 }
2351 psgl = (struct iscsi_sge *)io_task->psgl_handle->pfrag;
2352 memset(psgl, 0, sizeof(*psgl) * BE2_SGE);
2353
2354 AMAP_SET_BITS(struct amap_iscsi_sge, len, psgl, io_task->bhs_len - 2);
2355
2356 AMAP_SET_BITS(struct amap_iscsi_sge, addr_hi, psgl,
2357 io_task->bhs_pa.u.a32.address_hi);
2358 AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, psgl,
2359 io_task->bhs_pa.u.a32.address_lo);
2360
2361 if (num_sg == 1) {
2362 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, sge0_last, pwrb,
2363 1);
2364 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, sge1_last, pwrb,
2365 0);
2366 } else if (num_sg == 2) {
2367 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, sge0_last, pwrb,
2368 0);
2369 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, sge1_last, pwrb,
2370 1);
2371 } else {
2372 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, sge0_last, pwrb,
2373 0);
2374 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, sge1_last, pwrb,
2375 0);
2376 }
2377
2378 sg = l_sg;
2379 psgl++;
2380 psgl++;
2381 offset = 0;
2382 for (index = 0; index < num_sg; index++, sg = sg_next(sg), psgl++) {
2383 sg_len = sg_dma_len(sg);
2384 addr = (u64) sg_dma_address(sg);
2385 AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, psgl,
2386 lower_32_bits(addr));
2387 AMAP_SET_BITS(struct amap_iscsi_sge, addr_hi, psgl,
2388 upper_32_bits(addr));
2389 AMAP_SET_BITS(struct amap_iscsi_sge, len, psgl, sg_len);
2390 AMAP_SET_BITS(struct amap_iscsi_sge, sge_offset, psgl, offset);
2391 AMAP_SET_BITS(struct amap_iscsi_sge, last_sge, psgl, 0);
2392 offset += sg_len;
2393 }
2394 psgl--;
2395 AMAP_SET_BITS(struct amap_iscsi_sge, last_sge, psgl, 1);
2396}
2397
6733b39a
JK
2398static void
2399hwi_write_sgl(struct iscsi_wrb *pwrb, struct scatterlist *sg,
2400 unsigned int num_sg, struct beiscsi_io_task *io_task)
2401{
2402 struct iscsi_sge *psgl;
58ff4bd0 2403 unsigned int sg_len, index;
6733b39a
JK
2404 unsigned int sge_len = 0;
2405 unsigned long long addr;
2406 struct scatterlist *l_sg;
2407 unsigned int offset;
2408
2409 AMAP_SET_BITS(struct amap_iscsi_wrb, iscsi_bhs_addr_lo, pwrb,
2410 io_task->bhs_pa.u.a32.address_lo);
2411 AMAP_SET_BITS(struct amap_iscsi_wrb, iscsi_bhs_addr_hi, pwrb,
2412 io_task->bhs_pa.u.a32.address_hi);
2413
2414 l_sg = sg;
48bd86cf
JK
2415 for (index = 0; (index < num_sg) && (index < 2); index++,
2416 sg = sg_next(sg)) {
6733b39a
JK
2417 if (index == 0) {
2418 sg_len = sg_dma_len(sg);
2419 addr = (u64) sg_dma_address(sg);
2420 AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_addr_lo, pwrb,
457ff3b7 2421 ((u32)(addr & 0xFFFFFFFF)));
6733b39a 2422 AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_addr_hi, pwrb,
457ff3b7 2423 ((u32)(addr >> 32)));
6733b39a
JK
2424 AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_len, pwrb,
2425 sg_len);
2426 sge_len = sg_len;
6733b39a 2427 } else {
6733b39a
JK
2428 AMAP_SET_BITS(struct amap_iscsi_wrb, sge1_r2t_offset,
2429 pwrb, sge_len);
2430 sg_len = sg_dma_len(sg);
2431 addr = (u64) sg_dma_address(sg);
2432 AMAP_SET_BITS(struct amap_iscsi_wrb, sge1_addr_lo, pwrb,
457ff3b7 2433 ((u32)(addr & 0xFFFFFFFF)));
6733b39a 2434 AMAP_SET_BITS(struct amap_iscsi_wrb, sge1_addr_hi, pwrb,
457ff3b7 2435 ((u32)(addr >> 32)));
6733b39a
JK
2436 AMAP_SET_BITS(struct amap_iscsi_wrb, sge1_len, pwrb,
2437 sg_len);
2438 }
2439 }
2440 psgl = (struct iscsi_sge *)io_task->psgl_handle->pfrag;
2441 memset(psgl, 0, sizeof(*psgl) * BE2_SGE);
2442
2443 AMAP_SET_BITS(struct amap_iscsi_sge, len, psgl, io_task->bhs_len - 2);
2444
2445 AMAP_SET_BITS(struct amap_iscsi_sge, addr_hi, psgl,
2446 io_task->bhs_pa.u.a32.address_hi);
2447 AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, psgl,
2448 io_task->bhs_pa.u.a32.address_lo);
2449
caf818f1
JK
2450 if (num_sg == 1) {
2451 AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_last, pwrb,
2452 1);
2453 AMAP_SET_BITS(struct amap_iscsi_wrb, sge1_last, pwrb,
2454 0);
2455 } else if (num_sg == 2) {
2456 AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_last, pwrb,
2457 0);
2458 AMAP_SET_BITS(struct amap_iscsi_wrb, sge1_last, pwrb,
2459 1);
2460 } else {
2461 AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_last, pwrb,
2462 0);
2463 AMAP_SET_BITS(struct amap_iscsi_wrb, sge1_last, pwrb,
2464 0);
2465 }
6733b39a
JK
2466 sg = l_sg;
2467 psgl++;
2468 psgl++;
2469 offset = 0;
48bd86cf 2470 for (index = 0; index < num_sg; index++, sg = sg_next(sg), psgl++) {
6733b39a
JK
2471 sg_len = sg_dma_len(sg);
2472 addr = (u64) sg_dma_address(sg);
2473 AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, psgl,
2474 (addr & 0xFFFFFFFF));
2475 AMAP_SET_BITS(struct amap_iscsi_sge, addr_hi, psgl,
2476 (addr >> 32));
2477 AMAP_SET_BITS(struct amap_iscsi_sge, len, psgl, sg_len);
2478 AMAP_SET_BITS(struct amap_iscsi_sge, sge_offset, psgl, offset);
2479 AMAP_SET_BITS(struct amap_iscsi_sge, last_sge, psgl, 0);
2480 offset += sg_len;
2481 }
2482 psgl--;
2483 AMAP_SET_BITS(struct amap_iscsi_sge, last_sge, psgl, 1);
2484}
2485
d629c471
JSJ
2486/**
2487 * hwi_write_buffer()- Populate the WRB with task info
2488 * @pwrb: ptr to the WRB entry
2489 * @task: iscsi task which is to be executed
2490 **/
6733b39a
JK
2491static void hwi_write_buffer(struct iscsi_wrb *pwrb, struct iscsi_task *task)
2492{
2493 struct iscsi_sge *psgl;
6733b39a
JK
2494 struct beiscsi_io_task *io_task = task->dd_data;
2495 struct beiscsi_conn *beiscsi_conn = io_task->conn;
2496 struct beiscsi_hba *phba = beiscsi_conn->phba;
09a1093a 2497 uint8_t dsp_value = 0;
6733b39a
JK
2498
2499 io_task->bhs_len = sizeof(struct be_nonio_bhs) - 2;
2500 AMAP_SET_BITS(struct amap_iscsi_wrb, iscsi_bhs_addr_lo, pwrb,
2501 io_task->bhs_pa.u.a32.address_lo);
2502 AMAP_SET_BITS(struct amap_iscsi_wrb, iscsi_bhs_addr_hi, pwrb,
2503 io_task->bhs_pa.u.a32.address_hi);
2504
2505 if (task->data) {
09a1093a
JSJ
2506
2507 /* Check for the data_count */
2508 dsp_value = (task->data_count) ? 1 : 0;
2509
2c9dfd36
JK
2510 if (is_chip_be2_be3r(phba))
2511 AMAP_SET_BITS(struct amap_iscsi_wrb, dsp,
09a1093a
JSJ
2512 pwrb, dsp_value);
2513 else
2c9dfd36 2514 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, dsp,
09a1093a
JSJ
2515 pwrb, dsp_value);
2516
2517 /* Map addr only if there is data_count */
2518 if (dsp_value) {
d629c471
JSJ
2519 io_task->mtask_addr = pci_map_single(phba->pcidev,
2520 task->data,
2521 task->data_count,
2522 PCI_DMA_TODEVICE);
d629c471 2523 io_task->mtask_data_count = task->data_count;
09a1093a 2524 } else
d629c471 2525 io_task->mtask_addr = 0;
09a1093a 2526
6733b39a 2527 AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_addr_lo, pwrb,
d629c471 2528 lower_32_bits(io_task->mtask_addr));
6733b39a 2529 AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_addr_hi, pwrb,
d629c471 2530 upper_32_bits(io_task->mtask_addr));
6733b39a
JK
2531 AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_len, pwrb,
2532 task->data_count);
2533
2534 AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_last, pwrb, 1);
2535 } else {
2536 AMAP_SET_BITS(struct amap_iscsi_wrb, dsp, pwrb, 0);
d629c471 2537 io_task->mtask_addr = 0;
6733b39a
JK
2538 }
2539
2540 psgl = (struct iscsi_sge *)io_task->psgl_handle->pfrag;
2541
2542 AMAP_SET_BITS(struct amap_iscsi_sge, len, psgl, io_task->bhs_len);
2543
2544 AMAP_SET_BITS(struct amap_iscsi_sge, addr_hi, psgl,
2545 io_task->bhs_pa.u.a32.address_hi);
2546 AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, psgl,
2547 io_task->bhs_pa.u.a32.address_lo);
2548 if (task->data) {
2549 psgl++;
2550 AMAP_SET_BITS(struct amap_iscsi_sge, addr_hi, psgl, 0);
2551 AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, psgl, 0);
2552 AMAP_SET_BITS(struct amap_iscsi_sge, len, psgl, 0);
2553 AMAP_SET_BITS(struct amap_iscsi_sge, sge_offset, psgl, 0);
2554 AMAP_SET_BITS(struct amap_iscsi_sge, rsvd0, psgl, 0);
2555 AMAP_SET_BITS(struct amap_iscsi_sge, last_sge, psgl, 0);
2556
2557 psgl++;
2558 if (task->data) {
2559 AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, psgl,
d629c471 2560 lower_32_bits(io_task->mtask_addr));
6733b39a 2561 AMAP_SET_BITS(struct amap_iscsi_sge, addr_hi, psgl,
d629c471 2562 upper_32_bits(io_task->mtask_addr));
6733b39a
JK
2563 }
2564 AMAP_SET_BITS(struct amap_iscsi_sge, len, psgl, 0x106);
2565 }
2566 AMAP_SET_BITS(struct amap_iscsi_sge, last_sge, psgl, 1);
2567}
2568
843ae752
JK
2569/**
2570 * beiscsi_find_mem_req()- Find mem needed
2571 * @phba: ptr to HBA struct
2572 **/
6733b39a
JK
2573static void beiscsi_find_mem_req(struct beiscsi_hba *phba)
2574{
8a86e833 2575 uint8_t mem_descr_index, ulp_num;
bfead3b2 2576 unsigned int num_cq_pages, num_async_pdu_buf_pages;
6733b39a
JK
2577 unsigned int num_async_pdu_data_pages, wrb_sz_per_cxn;
2578 unsigned int num_async_pdu_buf_sgl_pages, num_async_pdu_data_sgl_pages;
2579
2580 num_cq_pages = PAGES_REQUIRED(phba->params.num_cq_entries * \
2581 sizeof(struct sol_cqe));
6733b39a
JK
2582
2583 phba->params.hwi_ws_sz = sizeof(struct hwi_controller);
2584
2585 phba->mem_req[ISCSI_MEM_GLOBAL_HEADER] = 2 *
2586 BE_ISCSI_PDU_HEADER_SIZE;
2587 phba->mem_req[HWI_MEM_ADDN_CONTEXT] =
2588 sizeof(struct hwi_context_memory);
2589
6733b39a
JK
2590
2591 phba->mem_req[HWI_MEM_WRB] = sizeof(struct iscsi_wrb)
2592 * (phba->params.wrbs_per_cxn)
2593 * phba->params.cxns_per_ctrl;
2594 wrb_sz_per_cxn = sizeof(struct wrb_handle) *
2595 (phba->params.wrbs_per_cxn);
2596 phba->mem_req[HWI_MEM_WRBH] = roundup_pow_of_two((wrb_sz_per_cxn) *
2597 phba->params.cxns_per_ctrl);
2598
2599 phba->mem_req[HWI_MEM_SGLH] = sizeof(struct sgl_handle) *
2600 phba->params.icds_per_ctrl;
2601 phba->mem_req[HWI_MEM_SGE] = sizeof(struct iscsi_sge) *
2602 phba->params.num_sge_per_io * phba->params.icds_per_ctrl;
8a86e833
JK
2603 for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) {
2604 if (test_bit(ulp_num, &phba->fw_config.ulp_supported)) {
6733b39a 2605
8a86e833
JK
2606 num_async_pdu_buf_sgl_pages =
2607 PAGES_REQUIRED(BEISCSI_GET_CID_COUNT(
2608 phba, ulp_num) *
2609 sizeof(struct phys_addr));
2610
2611 num_async_pdu_buf_pages =
2612 PAGES_REQUIRED(BEISCSI_GET_CID_COUNT(
2613 phba, ulp_num) *
2614 phba->params.defpdu_hdr_sz);
2615
2616 num_async_pdu_data_pages =
2617 PAGES_REQUIRED(BEISCSI_GET_CID_COUNT(
2618 phba, ulp_num) *
2619 phba->params.defpdu_data_sz);
2620
2621 num_async_pdu_data_sgl_pages =
2622 PAGES_REQUIRED(BEISCSI_GET_CID_COUNT(
2623 phba, ulp_num) *
2624 sizeof(struct phys_addr));
2625
a129d92f
JK
2626 mem_descr_index = (HWI_MEM_TEMPLATE_HDR_ULP0 +
2627 (ulp_num * MEM_DESCR_OFFSET));
2628 phba->mem_req[mem_descr_index] =
2629 BEISCSI_GET_CID_COUNT(phba, ulp_num) *
2630 BEISCSI_TEMPLATE_HDR_PER_CXN_SIZE;
2631
8a86e833
JK
2632 mem_descr_index = (HWI_MEM_ASYNC_HEADER_BUF_ULP0 +
2633 (ulp_num * MEM_DESCR_OFFSET));
2634 phba->mem_req[mem_descr_index] =
2635 num_async_pdu_buf_pages *
2636 PAGE_SIZE;
2637
2638 mem_descr_index = (HWI_MEM_ASYNC_DATA_BUF_ULP0 +
2639 (ulp_num * MEM_DESCR_OFFSET));
2640 phba->mem_req[mem_descr_index] =
2641 num_async_pdu_data_pages *
2642 PAGE_SIZE;
2643
2644 mem_descr_index = (HWI_MEM_ASYNC_HEADER_RING_ULP0 +
2645 (ulp_num * MEM_DESCR_OFFSET));
2646 phba->mem_req[mem_descr_index] =
2647 num_async_pdu_buf_sgl_pages *
2648 PAGE_SIZE;
2649
2650 mem_descr_index = (HWI_MEM_ASYNC_DATA_RING_ULP0 +
2651 (ulp_num * MEM_DESCR_OFFSET));
2652 phba->mem_req[mem_descr_index] =
2653 num_async_pdu_data_sgl_pages *
2654 PAGE_SIZE;
2655
2656 mem_descr_index = (HWI_MEM_ASYNC_HEADER_HANDLE_ULP0 +
2657 (ulp_num * MEM_DESCR_OFFSET));
2658 phba->mem_req[mem_descr_index] =
2659 BEISCSI_GET_CID_COUNT(phba, ulp_num) *
2660 sizeof(struct async_pdu_handle);
2661
2662 mem_descr_index = (HWI_MEM_ASYNC_DATA_HANDLE_ULP0 +
2663 (ulp_num * MEM_DESCR_OFFSET));
2664 phba->mem_req[mem_descr_index] =
2665 BEISCSI_GET_CID_COUNT(phba, ulp_num) *
2666 sizeof(struct async_pdu_handle);
2667
2668 mem_descr_index = (HWI_MEM_ASYNC_PDU_CONTEXT_ULP0 +
2669 (ulp_num * MEM_DESCR_OFFSET));
2670 phba->mem_req[mem_descr_index] =
2671 sizeof(struct hwi_async_pdu_context) +
2672 (BEISCSI_GET_CID_COUNT(phba, ulp_num) *
2673 sizeof(struct hwi_async_entry));
2674 }
2675 }
6733b39a
JK
2676}
2677
2678static int beiscsi_alloc_mem(struct beiscsi_hba *phba)
2679{
6733b39a 2680 dma_addr_t bus_add;
a7909b39
JK
2681 struct hwi_controller *phwi_ctrlr;
2682 struct be_mem_descriptor *mem_descr;
6733b39a
JK
2683 struct mem_array *mem_arr, *mem_arr_orig;
2684 unsigned int i, j, alloc_size, curr_alloc_size;
2685
3ec78271 2686 phba->phwi_ctrlr = kzalloc(phba->params.hwi_ws_sz, GFP_KERNEL);
6733b39a
JK
2687 if (!phba->phwi_ctrlr)
2688 return -ENOMEM;
2689
a7909b39
JK
2690 /* Allocate memory for wrb_context */
2691 phwi_ctrlr = phba->phwi_ctrlr;
2692 phwi_ctrlr->wrb_context = kzalloc(sizeof(struct hwi_wrb_context) *
2693 phba->params.cxns_per_ctrl,
2694 GFP_KERNEL);
2695 if (!phwi_ctrlr->wrb_context)
2696 return -ENOMEM;
2697
6733b39a
JK
2698 phba->init_mem = kcalloc(SE_MEM_MAX, sizeof(*mem_descr),
2699 GFP_KERNEL);
2700 if (!phba->init_mem) {
a7909b39 2701 kfree(phwi_ctrlr->wrb_context);
6733b39a
JK
2702 kfree(phba->phwi_ctrlr);
2703 return -ENOMEM;
2704 }
2705
2706 mem_arr_orig = kmalloc(sizeof(*mem_arr_orig) * BEISCSI_MAX_FRAGS_INIT,
2707 GFP_KERNEL);
2708 if (!mem_arr_orig) {
2709 kfree(phba->init_mem);
a7909b39 2710 kfree(phwi_ctrlr->wrb_context);
6733b39a
JK
2711 kfree(phba->phwi_ctrlr);
2712 return -ENOMEM;
2713 }
2714
2715 mem_descr = phba->init_mem;
2716 for (i = 0; i < SE_MEM_MAX; i++) {
8a86e833
JK
2717 if (!phba->mem_req[i]) {
2718 mem_descr->mem_array = NULL;
2719 mem_descr++;
2720 continue;
2721 }
2722
6733b39a
JK
2723 j = 0;
2724 mem_arr = mem_arr_orig;
2725 alloc_size = phba->mem_req[i];
2726 memset(mem_arr, 0, sizeof(struct mem_array) *
2727 BEISCSI_MAX_FRAGS_INIT);
2728 curr_alloc_size = min(be_max_phys_size * 1024, alloc_size);
2729 do {
2730 mem_arr->virtual_address = pci_alloc_consistent(
2731 phba->pcidev,
2732 curr_alloc_size,
2733 &bus_add);
2734 if (!mem_arr->virtual_address) {
2735 if (curr_alloc_size <= BE_MIN_MEM_SIZE)
2736 goto free_mem;
2737 if (curr_alloc_size -
2738 rounddown_pow_of_two(curr_alloc_size))
2739 curr_alloc_size = rounddown_pow_of_two
2740 (curr_alloc_size);
2741 else
2742 curr_alloc_size = curr_alloc_size / 2;
2743 } else {
2744 mem_arr->bus_address.u.
2745 a64.address = (__u64) bus_add;
2746 mem_arr->size = curr_alloc_size;
2747 alloc_size -= curr_alloc_size;
2748 curr_alloc_size = min(be_max_phys_size *
2749 1024, alloc_size);
2750 j++;
2751 mem_arr++;
2752 }
2753 } while (alloc_size);
2754 mem_descr->num_elements = j;
2755 mem_descr->size_in_bytes = phba->mem_req[i];
2756 mem_descr->mem_array = kmalloc(sizeof(*mem_arr) * j,
2757 GFP_KERNEL);
2758 if (!mem_descr->mem_array)
2759 goto free_mem;
2760
2761 memcpy(mem_descr->mem_array, mem_arr_orig,
2762 sizeof(struct mem_array) * j);
2763 mem_descr++;
2764 }
2765 kfree(mem_arr_orig);
2766 return 0;
2767free_mem:
2768 mem_descr->num_elements = j;
2769 while ((i) || (j)) {
2770 for (j = mem_descr->num_elements; j > 0; j--) {
2771 pci_free_consistent(phba->pcidev,
2772 mem_descr->mem_array[j - 1].size,
2773 mem_descr->mem_array[j - 1].
2774 virtual_address,
457ff3b7
JK
2775 (unsigned long)mem_descr->
2776 mem_array[j - 1].
6733b39a
JK
2777 bus_address.u.a64.address);
2778 }
2779 if (i) {
2780 i--;
2781 kfree(mem_descr->mem_array);
2782 mem_descr--;
2783 }
2784 }
2785 kfree(mem_arr_orig);
2786 kfree(phba->init_mem);
a7909b39 2787 kfree(phba->phwi_ctrlr->wrb_context);
6733b39a
JK
2788 kfree(phba->phwi_ctrlr);
2789 return -ENOMEM;
2790}
2791
2792static int beiscsi_get_memory(struct beiscsi_hba *phba)
2793{
2794 beiscsi_find_mem_req(phba);
2795 return beiscsi_alloc_mem(phba);
2796}
2797
2798static void iscsi_init_global_templates(struct beiscsi_hba *phba)
2799{
2800 struct pdu_data_out *pdata_out;
2801 struct pdu_nop_out *pnop_out;
2802 struct be_mem_descriptor *mem_descr;
2803
2804 mem_descr = phba->init_mem;
2805 mem_descr += ISCSI_MEM_GLOBAL_HEADER;
2806 pdata_out =
2807 (struct pdu_data_out *)mem_descr->mem_array[0].virtual_address;
2808 memset(pdata_out, 0, BE_ISCSI_PDU_HEADER_SIZE);
2809
2810 AMAP_SET_BITS(struct amap_pdu_data_out, opcode, pdata_out,
2811 IIOC_SCSI_DATA);
2812
2813 pnop_out =
2814 (struct pdu_nop_out *)((unsigned char *)mem_descr->mem_array[0].
2815 virtual_address + BE_ISCSI_PDU_HEADER_SIZE);
2816
2817 memset(pnop_out, 0, BE_ISCSI_PDU_HEADER_SIZE);
2818 AMAP_SET_BITS(struct amap_pdu_nop_out, ttt, pnop_out, 0xFFFFFFFF);
2819 AMAP_SET_BITS(struct amap_pdu_nop_out, f_bit, pnop_out, 1);
2820 AMAP_SET_BITS(struct amap_pdu_nop_out, i_bit, pnop_out, 0);
2821}
2822
3ec78271 2823static int beiscsi_init_wrb_handle(struct beiscsi_hba *phba)
6733b39a
JK
2824{
2825 struct be_mem_descriptor *mem_descr_wrbh, *mem_descr_wrb;
a7909b39 2826 struct hwi_context_memory *phwi_ctxt;
3ec78271 2827 struct wrb_handle *pwrb_handle = NULL;
6733b39a
JK
2828 struct hwi_controller *phwi_ctrlr;
2829 struct hwi_wrb_context *pwrb_context;
3ec78271
JK
2830 struct iscsi_wrb *pwrb = NULL;
2831 unsigned int num_cxn_wrbh = 0;
2832 unsigned int num_cxn_wrb = 0, j, idx = 0, index;
6733b39a
JK
2833
2834 mem_descr_wrbh = phba->init_mem;
2835 mem_descr_wrbh += HWI_MEM_WRBH;
2836
2837 mem_descr_wrb = phba->init_mem;
2838 mem_descr_wrb += HWI_MEM_WRB;
6733b39a
JK
2839 phwi_ctrlr = phba->phwi_ctrlr;
2840
a7909b39
JK
2841 /* Allocate memory for WRBQ */
2842 phwi_ctxt = phwi_ctrlr->phwi_ctxt;
2843 phwi_ctxt->be_wrbq = kzalloc(sizeof(struct be_queue_info) *
843ae752 2844 phba->params.cxns_per_ctrl,
a7909b39
JK
2845 GFP_KERNEL);
2846 if (!phwi_ctxt->be_wrbq) {
2847 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
2848 "BM_%d : WRBQ Mem Alloc Failed\n");
2849 return -ENOMEM;
2850 }
2851
2852 for (index = 0; index < phba->params.cxns_per_ctrl; index++) {
6733b39a 2853 pwrb_context = &phwi_ctrlr->wrb_context[index];
6733b39a
JK
2854 pwrb_context->pwrb_handle_base =
2855 kzalloc(sizeof(struct wrb_handle *) *
2856 phba->params.wrbs_per_cxn, GFP_KERNEL);
3ec78271 2857 if (!pwrb_context->pwrb_handle_base) {
99bc5d55
JSJ
2858 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
2859 "BM_%d : Mem Alloc Failed. Failing to load\n");
3ec78271
JK
2860 goto init_wrb_hndl_failed;
2861 }
6733b39a
JK
2862 pwrb_context->pwrb_handle_basestd =
2863 kzalloc(sizeof(struct wrb_handle *) *
2864 phba->params.wrbs_per_cxn, GFP_KERNEL);
3ec78271 2865 if (!pwrb_context->pwrb_handle_basestd) {
99bc5d55
JSJ
2866 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
2867 "BM_%d : Mem Alloc Failed. Failing to load\n");
3ec78271
JK
2868 goto init_wrb_hndl_failed;
2869 }
2870 if (!num_cxn_wrbh) {
2871 pwrb_handle =
2872 mem_descr_wrbh->mem_array[idx].virtual_address;
2873 num_cxn_wrbh = ((mem_descr_wrbh->mem_array[idx].size) /
2874 ((sizeof(struct wrb_handle)) *
2875 phba->params.wrbs_per_cxn));
2876 idx++;
2877 }
2878 pwrb_context->alloc_index = 0;
2879 pwrb_context->wrb_handles_available = 0;
2880 pwrb_context->free_index = 0;
2881
6733b39a 2882 if (num_cxn_wrbh) {
6733b39a
JK
2883 for (j = 0; j < phba->params.wrbs_per_cxn; j++) {
2884 pwrb_context->pwrb_handle_base[j] = pwrb_handle;
2885 pwrb_context->pwrb_handle_basestd[j] =
2886 pwrb_handle;
2887 pwrb_context->wrb_handles_available++;
bfead3b2 2888 pwrb_handle->wrb_index = j;
6733b39a
JK
2889 pwrb_handle++;
2890 }
6733b39a
JK
2891 num_cxn_wrbh--;
2892 }
2893 }
2894 idx = 0;
a7909b39 2895 for (index = 0; index < phba->params.cxns_per_ctrl; index++) {
6733b39a 2896 pwrb_context = &phwi_ctrlr->wrb_context[index];
3ec78271 2897 if (!num_cxn_wrb) {
6733b39a 2898 pwrb = mem_descr_wrb->mem_array[idx].virtual_address;
7c56533c 2899 num_cxn_wrb = (mem_descr_wrb->mem_array[idx].size) /
3ec78271
JK
2900 ((sizeof(struct iscsi_wrb) *
2901 phba->params.wrbs_per_cxn));
2902 idx++;
2903 }
2904
2905 if (num_cxn_wrb) {
6733b39a
JK
2906 for (j = 0; j < phba->params.wrbs_per_cxn; j++) {
2907 pwrb_handle = pwrb_context->pwrb_handle_base[j];
2908 pwrb_handle->pwrb = pwrb;
2909 pwrb++;
2910 }
2911 num_cxn_wrb--;
2912 }
2913 }
3ec78271
JK
2914 return 0;
2915init_wrb_hndl_failed:
2916 for (j = index; j > 0; j--) {
2917 pwrb_context = &phwi_ctrlr->wrb_context[j];
2918 kfree(pwrb_context->pwrb_handle_base);
2919 kfree(pwrb_context->pwrb_handle_basestd);
2920 }
2921 return -ENOMEM;
6733b39a
JK
2922}
2923
a7909b39 2924static int hwi_init_async_pdu_ctx(struct beiscsi_hba *phba)
6733b39a 2925{
8a86e833 2926 uint8_t ulp_num;
6733b39a
JK
2927 struct hwi_controller *phwi_ctrlr;
2928 struct hba_parameters *p = &phba->params;
2929 struct hwi_async_pdu_context *pasync_ctx;
2930 struct async_pdu_handle *pasync_header_h, *pasync_data_h;
dc63aac6 2931 unsigned int index, idx, num_per_mem, num_async_data;
6733b39a
JK
2932 struct be_mem_descriptor *mem_descr;
2933
8a86e833
JK
2934 for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) {
2935 if (test_bit(ulp_num, &phba->fw_config.ulp_supported)) {
6733b39a 2936
8a86e833
JK
2937 mem_descr = (struct be_mem_descriptor *)phba->init_mem;
2938 mem_descr += (HWI_MEM_ASYNC_PDU_CONTEXT_ULP0 +
2939 (ulp_num * MEM_DESCR_OFFSET));
2940
2941 phwi_ctrlr = phba->phwi_ctrlr;
2942 phwi_ctrlr->phwi_ctxt->pasync_ctx[ulp_num] =
2943 (struct hwi_async_pdu_context *)
2944 mem_descr->mem_array[0].virtual_address;
2945
2946 pasync_ctx = phwi_ctrlr->phwi_ctxt->pasync_ctx[ulp_num];
2947 memset(pasync_ctx, 0, sizeof(*pasync_ctx));
2948
2949 pasync_ctx->async_entry =
2950 (struct hwi_async_entry *)
2951 ((long unsigned int)pasync_ctx +
2952 sizeof(struct hwi_async_pdu_context));
2953
2954 pasync_ctx->num_entries = BEISCSI_GET_CID_COUNT(phba,
2955 ulp_num);
2956 pasync_ctx->buffer_size = p->defpdu_hdr_sz;
2957
2958 mem_descr = (struct be_mem_descriptor *)phba->init_mem;
2959 mem_descr += HWI_MEM_ASYNC_HEADER_BUF_ULP0 +
2960 (ulp_num * MEM_DESCR_OFFSET);
2961 if (mem_descr->mem_array[0].virtual_address) {
2962 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
2963 "BM_%d : hwi_init_async_pdu_ctx"
2964 " HWI_MEM_ASYNC_HEADER_BUF_ULP%d va=%p\n",
2965 ulp_num,
2966 mem_descr->mem_array[0].
2967 virtual_address);
2968 } else
2969 beiscsi_log(phba, KERN_WARNING,
2970 BEISCSI_LOG_INIT,
2971 "BM_%d : No Virtual address for ULP : %d\n",
2972 ulp_num);
2973
2974 pasync_ctx->async_header.va_base =
6733b39a 2975 mem_descr->mem_array[0].virtual_address;
6733b39a 2976
8a86e833
JK
2977 pasync_ctx->async_header.pa_base.u.a64.address =
2978 mem_descr->mem_array[0].
2979 bus_address.u.a64.address;
6733b39a 2980
8a86e833
JK
2981 mem_descr = (struct be_mem_descriptor *)phba->init_mem;
2982 mem_descr += HWI_MEM_ASYNC_HEADER_RING_ULP0 +
2983 (ulp_num * MEM_DESCR_OFFSET);
2984 if (mem_descr->mem_array[0].virtual_address) {
2985 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
2986 "BM_%d : hwi_init_async_pdu_ctx"
2987 " HWI_MEM_ASYNC_HEADER_RING_ULP%d va=%p\n",
2988 ulp_num,
2989 mem_descr->mem_array[0].
2990 virtual_address);
2991 } else
2992 beiscsi_log(phba, KERN_WARNING,
2993 BEISCSI_LOG_INIT,
2994 "BM_%d : No Virtual address for ULP : %d\n",
2995 ulp_num);
2996
2997 pasync_ctx->async_header.ring_base =
2998 mem_descr->mem_array[0].virtual_address;
6733b39a 2999
8a86e833
JK
3000 mem_descr = (struct be_mem_descriptor *)phba->init_mem;
3001 mem_descr += HWI_MEM_ASYNC_HEADER_HANDLE_ULP0 +
3002 (ulp_num * MEM_DESCR_OFFSET);
3003 if (mem_descr->mem_array[0].virtual_address) {
3004 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
3005 "BM_%d : hwi_init_async_pdu_ctx"
3006 " HWI_MEM_ASYNC_HEADER_HANDLE_ULP%d va=%p\n",
3007 ulp_num,
3008 mem_descr->mem_array[0].
3009 virtual_address);
3010 } else
3011 beiscsi_log(phba, KERN_WARNING,
3012 BEISCSI_LOG_INIT,
3013 "BM_%d : No Virtual address for ULP : %d\n",
3014 ulp_num);
3015
3016 pasync_ctx->async_header.handle_base =
3017 mem_descr->mem_array[0].virtual_address;
3018 pasync_ctx->async_header.writables = 0;
3019 INIT_LIST_HEAD(&pasync_ctx->async_header.free_list);
3020
3021 mem_descr = (struct be_mem_descriptor *)phba->init_mem;
3022 mem_descr += HWI_MEM_ASYNC_DATA_RING_ULP0 +
3023 (ulp_num * MEM_DESCR_OFFSET);
3024 if (mem_descr->mem_array[0].virtual_address) {
3025 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
3026 "BM_%d : hwi_init_async_pdu_ctx"
3027 " HWI_MEM_ASYNC_DATA_RING_ULP%d va=%p\n",
3028 ulp_num,
3029 mem_descr->mem_array[0].
3030 virtual_address);
3031 } else
3032 beiscsi_log(phba, KERN_WARNING,
3033 BEISCSI_LOG_INIT,
3034 "BM_%d : No Virtual address for ULP : %d\n",
3035 ulp_num);
3036
3037 pasync_ctx->async_data.ring_base =
3038 mem_descr->mem_array[0].virtual_address;
6733b39a 3039
8a86e833
JK
3040 mem_descr = (struct be_mem_descriptor *)phba->init_mem;
3041 mem_descr += HWI_MEM_ASYNC_DATA_HANDLE_ULP0 +
3042 (ulp_num * MEM_DESCR_OFFSET);
3043 if (!mem_descr->mem_array[0].virtual_address)
3044 beiscsi_log(phba, KERN_WARNING,
3045 BEISCSI_LOG_INIT,
3046 "BM_%d : No Virtual address for ULP : %d\n",
3047 ulp_num);
99bc5d55 3048
8a86e833
JK
3049 pasync_ctx->async_data.handle_base =
3050 mem_descr->mem_array[0].virtual_address;
3051 pasync_ctx->async_data.writables = 0;
3052 INIT_LIST_HEAD(&pasync_ctx->async_data.free_list);
3053
3054 pasync_header_h =
3055 (struct async_pdu_handle *)
3056 pasync_ctx->async_header.handle_base;
3057 pasync_data_h =
3058 (struct async_pdu_handle *)
3059 pasync_ctx->async_data.handle_base;
3060
3061 mem_descr = (struct be_mem_descriptor *)phba->init_mem;
3062 mem_descr += HWI_MEM_ASYNC_DATA_BUF_ULP0 +
3063 (ulp_num * MEM_DESCR_OFFSET);
3064 if (mem_descr->mem_array[0].virtual_address) {
3065 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
3066 "BM_%d : hwi_init_async_pdu_ctx"
3067 " HWI_MEM_ASYNC_DATA_BUF_ULP%d va=%p\n",
3068 ulp_num,
3069 mem_descr->mem_array[0].
3070 virtual_address);
3071 } else
3072 beiscsi_log(phba, KERN_WARNING,
3073 BEISCSI_LOG_INIT,
3074 "BM_%d : No Virtual address for ULP : %d\n",
3075 ulp_num);
3076
3077 idx = 0;
dc63aac6
JK
3078 pasync_ctx->async_data.va_base =
3079 mem_descr->mem_array[idx].virtual_address;
3080 pasync_ctx->async_data.pa_base.u.a64.address =
3081 mem_descr->mem_array[idx].
3082 bus_address.u.a64.address;
3083
3084 num_async_data = ((mem_descr->mem_array[idx].size) /
3085 phba->params.defpdu_data_sz);
8a86e833 3086 num_per_mem = 0;
6733b39a 3087
8a86e833
JK
3088 for (index = 0; index < BEISCSI_GET_CID_COUNT
3089 (phba, ulp_num); index++) {
3090 pasync_header_h->cri = -1;
3091 pasync_header_h->index = (char)index;
3092 INIT_LIST_HEAD(&pasync_header_h->link);
3093 pasync_header_h->pbuffer =
3094 (void *)((unsigned long)
3095 (pasync_ctx->
3096 async_header.va_base) +
3097 (p->defpdu_hdr_sz * index));
3098
3099 pasync_header_h->pa.u.a64.address =
3100 pasync_ctx->async_header.pa_base.u.a64.
3101 address + (p->defpdu_hdr_sz * index);
3102
3103 list_add_tail(&pasync_header_h->link,
3104 &pasync_ctx->async_header.
3105 free_list);
3106 pasync_header_h++;
3107 pasync_ctx->async_header.free_entries++;
3108 pasync_ctx->async_header.writables++;
3109
3110 INIT_LIST_HEAD(&pasync_ctx->async_entry[index].
3111 wait_queue.list);
3112 INIT_LIST_HEAD(&pasync_ctx->async_entry[index].
3113 header_busy_list);
3114 pasync_data_h->cri = -1;
3115 pasync_data_h->index = (char)index;
3116 INIT_LIST_HEAD(&pasync_data_h->link);
3117
3118 if (!num_async_data) {
3119 num_per_mem = 0;
3120 idx++;
3121 pasync_ctx->async_data.va_base =
3122 mem_descr->mem_array[idx].
3123 virtual_address;
3124 pasync_ctx->async_data.pa_base.u.
3125 a64.address =
3126 mem_descr->mem_array[idx].
3127 bus_address.u.a64.address;
3128 num_async_data =
3129 ((mem_descr->mem_array[idx].
3130 size) /
3131 phba->params.defpdu_data_sz);
3132 }
3133 pasync_data_h->pbuffer =
3134 (void *)((unsigned long)
3135 (pasync_ctx->async_data.va_base) +
3136 (p->defpdu_data_sz * num_per_mem));
3137
3138 pasync_data_h->pa.u.a64.address =
3139 pasync_ctx->async_data.pa_base.u.a64.
3140 address + (p->defpdu_data_sz *
3141 num_per_mem);
3142 num_per_mem++;
3143 num_async_data--;
3144
3145 list_add_tail(&pasync_data_h->link,
3146 &pasync_ctx->async_data.
3147 free_list);
3148 pasync_data_h++;
3149 pasync_ctx->async_data.free_entries++;
3150 pasync_ctx->async_data.writables++;
3151
3152 INIT_LIST_HEAD(&pasync_ctx->async_entry[index].
3153 data_busy_list);
3154 }
6733b39a 3155
8a86e833
JK
3156 pasync_ctx->async_header.host_write_ptr = 0;
3157 pasync_ctx->async_header.ep_read_ptr = -1;
3158 pasync_ctx->async_data.host_write_ptr = 0;
3159 pasync_ctx->async_data.ep_read_ptr = -1;
3160 }
6733b39a
JK
3161 }
3162
a7909b39 3163 return 0;
6733b39a
JK
3164}
3165
3166static int
3167be_sgl_create_contiguous(void *virtual_address,
3168 u64 physical_address, u32 length,
3169 struct be_dma_mem *sgl)
3170{
3171 WARN_ON(!virtual_address);
3172 WARN_ON(!physical_address);
dd29dae0 3173 WARN_ON(!length);
6733b39a
JK
3174 WARN_ON(!sgl);
3175
3176 sgl->va = virtual_address;
457ff3b7 3177 sgl->dma = (unsigned long)physical_address;
6733b39a
JK
3178 sgl->size = length;
3179
3180 return 0;
3181}
3182
3183static void be_sgl_destroy_contiguous(struct be_dma_mem *sgl)
3184{
3185 memset(sgl, 0, sizeof(*sgl));
3186}
3187
3188static void
3189hwi_build_be_sgl_arr(struct beiscsi_hba *phba,
3190 struct mem_array *pmem, struct be_dma_mem *sgl)
3191{
3192 if (sgl->va)
3193 be_sgl_destroy_contiguous(sgl);
3194
3195 be_sgl_create_contiguous(pmem->virtual_address,
3196 pmem->bus_address.u.a64.address,
3197 pmem->size, sgl);
3198}
3199
3200static void
3201hwi_build_be_sgl_by_offset(struct beiscsi_hba *phba,
3202 struct mem_array *pmem, struct be_dma_mem *sgl)
3203{
3204 if (sgl->va)
3205 be_sgl_destroy_contiguous(sgl);
3206
3207 be_sgl_create_contiguous((unsigned char *)pmem->virtual_address,
3208 pmem->bus_address.u.a64.address,
3209 pmem->size, sgl);
3210}
3211
3212static int be_fill_queue(struct be_queue_info *q,
3213 u16 len, u16 entry_size, void *vaddress)
3214{
3215 struct be_dma_mem *mem = &q->dma_mem;
3216
3217 memset(q, 0, sizeof(*q));
3218 q->len = len;
3219 q->entry_size = entry_size;
3220 mem->size = len * entry_size;
3221 mem->va = vaddress;
3222 if (!mem->va)
3223 return -ENOMEM;
3224 memset(mem->va, 0, mem->size);
3225 return 0;
3226}
3227
bfead3b2 3228static int beiscsi_create_eqs(struct beiscsi_hba *phba,
6733b39a
JK
3229 struct hwi_context_memory *phwi_context)
3230{
bfead3b2 3231 unsigned int i, num_eq_pages;
99bc5d55 3232 int ret = 0, eq_for_mcc;
6733b39a
JK
3233 struct be_queue_info *eq;
3234 struct be_dma_mem *mem;
6733b39a 3235 void *eq_vaddress;
bfead3b2 3236 dma_addr_t paddr;
6733b39a 3237
bfead3b2
JK
3238 num_eq_pages = PAGES_REQUIRED(phba->params.num_eq_entries * \
3239 sizeof(struct be_eq_entry));
6733b39a 3240
bfead3b2
JK
3241 if (phba->msix_enabled)
3242 eq_for_mcc = 1;
3243 else
3244 eq_for_mcc = 0;
3245 for (i = 0; i < (phba->num_cpus + eq_for_mcc); i++) {
3246 eq = &phwi_context->be_eq[i].q;
3247 mem = &eq->dma_mem;
3248 phwi_context->be_eq[i].phba = phba;
3249 eq_vaddress = pci_alloc_consistent(phba->pcidev,
3250 num_eq_pages * PAGE_SIZE,
3251 &paddr);
3252 if (!eq_vaddress)
3253 goto create_eq_error;
3254
3255 mem->va = eq_vaddress;
3256 ret = be_fill_queue(eq, phba->params.num_eq_entries,
3257 sizeof(struct be_eq_entry), eq_vaddress);
3258 if (ret) {
99bc5d55
JSJ
3259 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
3260 "BM_%d : be_fill_queue Failed for EQ\n");
bfead3b2
JK
3261 goto create_eq_error;
3262 }
6733b39a 3263
bfead3b2
JK
3264 mem->dma = paddr;
3265 ret = beiscsi_cmd_eq_create(&phba->ctrl, eq,
3266 phwi_context->cur_eqd);
3267 if (ret) {
99bc5d55
JSJ
3268 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
3269 "BM_%d : beiscsi_cmd_eq_create"
3270 "Failed for EQ\n");
bfead3b2
JK
3271 goto create_eq_error;
3272 }
99bc5d55
JSJ
3273
3274 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
3275 "BM_%d : eqid = %d\n",
3276 phwi_context->be_eq[i].q.id);
6733b39a 3277 }
6733b39a 3278 return 0;
bfead3b2 3279create_eq_error:
107dfcba 3280 for (i = 0; i < (phba->num_cpus + eq_for_mcc); i++) {
bfead3b2
JK
3281 eq = &phwi_context->be_eq[i].q;
3282 mem = &eq->dma_mem;
3283 if (mem->va)
3284 pci_free_consistent(phba->pcidev, num_eq_pages
3285 * PAGE_SIZE,
3286 mem->va, mem->dma);
3287 }
3288 return ret;
6733b39a
JK
3289}
3290
bfead3b2 3291static int beiscsi_create_cqs(struct beiscsi_hba *phba,
6733b39a
JK
3292 struct hwi_context_memory *phwi_context)
3293{
bfead3b2 3294 unsigned int i, num_cq_pages;
99bc5d55 3295 int ret = 0;
6733b39a
JK
3296 struct be_queue_info *cq, *eq;
3297 struct be_dma_mem *mem;
bfead3b2 3298 struct be_eq_obj *pbe_eq;
6733b39a 3299 void *cq_vaddress;
bfead3b2 3300 dma_addr_t paddr;
6733b39a 3301
bfead3b2
JK
3302 num_cq_pages = PAGES_REQUIRED(phba->params.num_cq_entries * \
3303 sizeof(struct sol_cqe));
6733b39a 3304
bfead3b2
JK
3305 for (i = 0; i < phba->num_cpus; i++) {
3306 cq = &phwi_context->be_cq[i];
3307 eq = &phwi_context->be_eq[i].q;
3308 pbe_eq = &phwi_context->be_eq[i];
3309 pbe_eq->cq = cq;
3310 pbe_eq->phba = phba;
3311 mem = &cq->dma_mem;
3312 cq_vaddress = pci_alloc_consistent(phba->pcidev,
3313 num_cq_pages * PAGE_SIZE,
3314 &paddr);
3315 if (!cq_vaddress)
3316 goto create_cq_error;
7da50879 3317 ret = be_fill_queue(cq, phba->params.num_cq_entries,
bfead3b2
JK
3318 sizeof(struct sol_cqe), cq_vaddress);
3319 if (ret) {
99bc5d55
JSJ
3320 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
3321 "BM_%d : be_fill_queue Failed "
3322 "for ISCSI CQ\n");
bfead3b2
JK
3323 goto create_cq_error;
3324 }
3325
3326 mem->dma = paddr;
3327 ret = beiscsi_cmd_cq_create(&phba->ctrl, cq, eq, false,
3328 false, 0);
3329 if (ret) {
99bc5d55
JSJ
3330 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
3331 "BM_%d : beiscsi_cmd_eq_create"
3332 "Failed for ISCSI CQ\n");
bfead3b2
JK
3333 goto create_cq_error;
3334 }
99bc5d55
JSJ
3335 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
3336 "BM_%d : iscsi cq_id is %d for eq_id %d\n"
3337 "iSCSI CQ CREATED\n", cq->id, eq->id);
6733b39a 3338 }
6733b39a 3339 return 0;
bfead3b2
JK
3340
3341create_cq_error:
3342 for (i = 0; i < phba->num_cpus; i++) {
3343 cq = &phwi_context->be_cq[i];
3344 mem = &cq->dma_mem;
3345 if (mem->va)
3346 pci_free_consistent(phba->pcidev, num_cq_pages
3347 * PAGE_SIZE,
3348 mem->va, mem->dma);
3349 }
3350 return ret;
3351
6733b39a
JK
3352}
3353
3354static int
3355beiscsi_create_def_hdr(struct beiscsi_hba *phba,
3356 struct hwi_context_memory *phwi_context,
3357 struct hwi_controller *phwi_ctrlr,
8a86e833 3358 unsigned int def_pdu_ring_sz, uint8_t ulp_num)
6733b39a
JK
3359{
3360 unsigned int idx;
3361 int ret;
3362 struct be_queue_info *dq, *cq;
3363 struct be_dma_mem *mem;
3364 struct be_mem_descriptor *mem_descr;
3365 void *dq_vaddress;
3366
3367 idx = 0;
8a86e833 3368 dq = &phwi_context->be_def_hdrq[ulp_num];
bfead3b2 3369 cq = &phwi_context->be_cq[0];
6733b39a
JK
3370 mem = &dq->dma_mem;
3371 mem_descr = phba->init_mem;
8a86e833
JK
3372 mem_descr += HWI_MEM_ASYNC_HEADER_RING_ULP0 +
3373 (ulp_num * MEM_DESCR_OFFSET);
6733b39a
JK
3374 dq_vaddress = mem_descr->mem_array[idx].virtual_address;
3375 ret = be_fill_queue(dq, mem_descr->mem_array[0].size /
3376 sizeof(struct phys_addr),
3377 sizeof(struct phys_addr), dq_vaddress);
3378 if (ret) {
99bc5d55 3379 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
8a86e833
JK
3380 "BM_%d : be_fill_queue Failed for DEF PDU HDR on ULP : %d\n",
3381 ulp_num);
3382
6733b39a
JK
3383 return ret;
3384 }
457ff3b7
JK
3385 mem->dma = (unsigned long)mem_descr->mem_array[idx].
3386 bus_address.u.a64.address;
6733b39a
JK
3387 ret = be_cmd_create_default_pdu_queue(&phba->ctrl, cq, dq,
3388 def_pdu_ring_sz,
8a86e833
JK
3389 phba->params.defpdu_hdr_sz,
3390 BEISCSI_DEFQ_HDR, ulp_num);
6733b39a 3391 if (ret) {
99bc5d55 3392 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
8a86e833
JK
3393 "BM_%d : be_cmd_create_default_pdu_queue Failed DEFHDR on ULP : %d\n",
3394 ulp_num);
3395
6733b39a
JK
3396 return ret;
3397 }
99bc5d55 3398
8a86e833
JK
3399 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
3400 "BM_%d : iscsi hdr def pdu id for ULP : %d is %d\n",
3401 ulp_num,
3402 phwi_context->be_def_hdrq[ulp_num].id);
3403 hwi_post_async_buffers(phba, BEISCSI_DEFQ_HDR, ulp_num);
6733b39a
JK
3404 return 0;
3405}
3406
3407static int
3408beiscsi_create_def_data(struct beiscsi_hba *phba,
3409 struct hwi_context_memory *phwi_context,
3410 struct hwi_controller *phwi_ctrlr,
8a86e833 3411 unsigned int def_pdu_ring_sz, uint8_t ulp_num)
6733b39a
JK
3412{
3413 unsigned int idx;
3414 int ret;
3415 struct be_queue_info *dataq, *cq;
3416 struct be_dma_mem *mem;
3417 struct be_mem_descriptor *mem_descr;
3418 void *dq_vaddress;
3419
3420 idx = 0;
8a86e833 3421 dataq = &phwi_context->be_def_dataq[ulp_num];
bfead3b2 3422 cq = &phwi_context->be_cq[0];
6733b39a
JK
3423 mem = &dataq->dma_mem;
3424 mem_descr = phba->init_mem;
8a86e833
JK
3425 mem_descr += HWI_MEM_ASYNC_DATA_RING_ULP0 +
3426 (ulp_num * MEM_DESCR_OFFSET);
6733b39a
JK
3427 dq_vaddress = mem_descr->mem_array[idx].virtual_address;
3428 ret = be_fill_queue(dataq, mem_descr->mem_array[0].size /
3429 sizeof(struct phys_addr),
3430 sizeof(struct phys_addr), dq_vaddress);
3431 if (ret) {
99bc5d55 3432 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
8a86e833
JK
3433 "BM_%d : be_fill_queue Failed for DEF PDU "
3434 "DATA on ULP : %d\n",
3435 ulp_num);
3436
6733b39a
JK
3437 return ret;
3438 }
457ff3b7
JK
3439 mem->dma = (unsigned long)mem_descr->mem_array[idx].
3440 bus_address.u.a64.address;
6733b39a
JK
3441 ret = be_cmd_create_default_pdu_queue(&phba->ctrl, cq, dataq,
3442 def_pdu_ring_sz,
8a86e833
JK
3443 phba->params.defpdu_data_sz,
3444 BEISCSI_DEFQ_DATA, ulp_num);
6733b39a 3445 if (ret) {
99bc5d55
JSJ
3446 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
3447 "BM_%d be_cmd_create_default_pdu_queue"
8a86e833
JK
3448 " Failed for DEF PDU DATA on ULP : %d\n",
3449 ulp_num);
6733b39a
JK
3450 return ret;
3451 }
8a86e833 3452
99bc5d55 3453 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
8a86e833
JK
3454 "BM_%d : iscsi def data id on ULP : %d is %d\n",
3455 ulp_num,
3456 phwi_context->be_def_dataq[ulp_num].id);
99bc5d55 3457
8a86e833 3458 hwi_post_async_buffers(phba, BEISCSI_DEFQ_DATA, ulp_num);
99bc5d55 3459 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
8a86e833
JK
3460 "BM_%d : DEFAULT PDU DATA RING CREATED"
3461 "on ULP : %d\n", ulp_num);
99bc5d55 3462
6733b39a
JK
3463 return 0;
3464}
3465
15a90fe0
JK
3466
3467static int
3468beiscsi_post_template_hdr(struct beiscsi_hba *phba)
3469{
3470 struct be_mem_descriptor *mem_descr;
3471 struct mem_array *pm_arr;
3472 struct be_dma_mem sgl;
a129d92f 3473 int status, ulp_num;
15a90fe0 3474
a129d92f
JK
3475 for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) {
3476 if (test_bit(ulp_num, &phba->fw_config.ulp_supported)) {
3477 mem_descr = (struct be_mem_descriptor *)phba->init_mem;
3478 mem_descr += HWI_MEM_TEMPLATE_HDR_ULP0 +
3479 (ulp_num * MEM_DESCR_OFFSET);
3480 pm_arr = mem_descr->mem_array;
15a90fe0 3481
a129d92f
JK
3482 hwi_build_be_sgl_arr(phba, pm_arr, &sgl);
3483 status = be_cmd_iscsi_post_template_hdr(
3484 &phba->ctrl, &sgl);
15a90fe0 3485
a129d92f
JK
3486 if (status != 0) {
3487 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
3488 "BM_%d : Post Template HDR Failed for"
3489 "ULP_%d\n", ulp_num);
3490 return status;
3491 }
3492
3493 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
3494 "BM_%d : Template HDR Pages Posted for"
3495 "ULP_%d\n", ulp_num);
15a90fe0
JK
3496 }
3497 }
15a90fe0
JK
3498 return 0;
3499}
3500
6733b39a
JK
3501static int
3502beiscsi_post_pages(struct beiscsi_hba *phba)
3503{
3504 struct be_mem_descriptor *mem_descr;
3505 struct mem_array *pm_arr;
3506 unsigned int page_offset, i;
3507 struct be_dma_mem sgl;
843ae752 3508 int status, ulp_num = 0;
6733b39a
JK
3509
3510 mem_descr = phba->init_mem;
3511 mem_descr += HWI_MEM_SGE;
3512 pm_arr = mem_descr->mem_array;
3513
90622db3
JK
3514 for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++)
3515 if (test_bit(ulp_num, &phba->fw_config.ulp_supported))
3516 break;
3517
6733b39a 3518 page_offset = (sizeof(struct iscsi_sge) * phba->params.num_sge_per_io *
843ae752 3519 phba->fw_config.iscsi_icd_start[ulp_num]) / PAGE_SIZE;
6733b39a
JK
3520 for (i = 0; i < mem_descr->num_elements; i++) {
3521 hwi_build_be_sgl_arr(phba, pm_arr, &sgl);
3522 status = be_cmd_iscsi_post_sgl_pages(&phba->ctrl, &sgl,
3523 page_offset,
3524 (pm_arr->size / PAGE_SIZE));
3525 page_offset += pm_arr->size / PAGE_SIZE;
3526 if (status != 0) {
99bc5d55
JSJ
3527 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
3528 "BM_%d : post sgl failed.\n");
6733b39a
JK
3529 return status;
3530 }
3531 pm_arr++;
3532 }
99bc5d55
JSJ
3533 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
3534 "BM_%d : POSTED PAGES\n");
6733b39a
JK
3535 return 0;
3536}
3537
bfead3b2
JK
3538static void be_queue_free(struct beiscsi_hba *phba, struct be_queue_info *q)
3539{
3540 struct be_dma_mem *mem = &q->dma_mem;
c8b25598 3541 if (mem->va) {
bfead3b2
JK
3542 pci_free_consistent(phba->pcidev, mem->size,
3543 mem->va, mem->dma);
c8b25598
JK
3544 mem->va = NULL;
3545 }
bfead3b2
JK
3546}
3547
3548static int be_queue_alloc(struct beiscsi_hba *phba, struct be_queue_info *q,
3549 u16 len, u16 entry_size)
3550{
3551 struct be_dma_mem *mem = &q->dma_mem;
3552
3553 memset(q, 0, sizeof(*q));
3554 q->len = len;
3555 q->entry_size = entry_size;
3556 mem->size = len * entry_size;
7c845eb5 3557 mem->va = pci_zalloc_consistent(phba->pcidev, mem->size, &mem->dma);
bfead3b2 3558 if (!mem->va)
d3ad2bb3 3559 return -ENOMEM;
bfead3b2
JK
3560 return 0;
3561}
3562
6733b39a
JK
3563static int
3564beiscsi_create_wrb_rings(struct beiscsi_hba *phba,
3565 struct hwi_context_memory *phwi_context,
3566 struct hwi_controller *phwi_ctrlr)
3567{
3568 unsigned int wrb_mem_index, offset, size, num_wrb_rings;
3569 u64 pa_addr_lo;
4eea99d5 3570 unsigned int idx, num, i, ulp_num;
6733b39a
JK
3571 struct mem_array *pwrb_arr;
3572 void *wrb_vaddr;
3573 struct be_dma_mem sgl;
3574 struct be_mem_descriptor *mem_descr;
a7909b39 3575 struct hwi_wrb_context *pwrb_context;
6733b39a 3576 int status;
4eea99d5
JK
3577 uint8_t ulp_count = 0, ulp_base_num = 0;
3578 uint16_t cid_count_ulp[BEISCSI_ULP_COUNT] = { 0 };
6733b39a
JK
3579
3580 idx = 0;
3581 mem_descr = phba->init_mem;
3582 mem_descr += HWI_MEM_WRB;
3583 pwrb_arr = kmalloc(sizeof(*pwrb_arr) * phba->params.cxns_per_ctrl,
3584 GFP_KERNEL);
3585 if (!pwrb_arr) {
99bc5d55
JSJ
3586 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
3587 "BM_%d : Memory alloc failed in create wrb ring.\n");
6733b39a
JK
3588 return -ENOMEM;
3589 }
3590 wrb_vaddr = mem_descr->mem_array[idx].virtual_address;
3591 pa_addr_lo = mem_descr->mem_array[idx].bus_address.u.a64.address;
3592 num_wrb_rings = mem_descr->mem_array[idx].size /
3593 (phba->params.wrbs_per_cxn * sizeof(struct iscsi_wrb));
3594
3595 for (num = 0; num < phba->params.cxns_per_ctrl; num++) {
3596 if (num_wrb_rings) {
3597 pwrb_arr[num].virtual_address = wrb_vaddr;
3598 pwrb_arr[num].bus_address.u.a64.address = pa_addr_lo;
3599 pwrb_arr[num].size = phba->params.wrbs_per_cxn *
3600 sizeof(struct iscsi_wrb);
3601 wrb_vaddr += pwrb_arr[num].size;
3602 pa_addr_lo += pwrb_arr[num].size;
3603 num_wrb_rings--;
3604 } else {
3605 idx++;
3606 wrb_vaddr = mem_descr->mem_array[idx].virtual_address;
3607 pa_addr_lo = mem_descr->mem_array[idx].\
3608 bus_address.u.a64.address;
3609 num_wrb_rings = mem_descr->mem_array[idx].size /
3610 (phba->params.wrbs_per_cxn *
3611 sizeof(struct iscsi_wrb));
3612 pwrb_arr[num].virtual_address = wrb_vaddr;
3613 pwrb_arr[num].bus_address.u.a64.address\
3614 = pa_addr_lo;
3615 pwrb_arr[num].size = phba->params.wrbs_per_cxn *
3616 sizeof(struct iscsi_wrb);
3617 wrb_vaddr += pwrb_arr[num].size;
3618 pa_addr_lo += pwrb_arr[num].size;
3619 num_wrb_rings--;
3620 }
3621 }
4eea99d5
JK
3622
3623 /* Get the ULP Count */
3624 for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++)
3625 if (test_bit(ulp_num, &phba->fw_config.ulp_supported)) {
3626 ulp_count++;
3627 ulp_base_num = ulp_num;
3628 cid_count_ulp[ulp_num] =
3629 BEISCSI_GET_CID_COUNT(phba, ulp_num);
3630 }
3631
6733b39a
JK
3632 for (i = 0; i < phba->params.cxns_per_ctrl; i++) {
3633 wrb_mem_index = 0;
3634 offset = 0;
3635 size = 0;
3636
4eea99d5
JK
3637 if (ulp_count > 1) {
3638 ulp_base_num = (ulp_base_num + 1) % BEISCSI_ULP_COUNT;
3639
3640 if (!cid_count_ulp[ulp_base_num])
3641 ulp_base_num = (ulp_base_num + 1) %
3642 BEISCSI_ULP_COUNT;
3643
3644 cid_count_ulp[ulp_base_num]--;
3645 }
3646
3647
6733b39a
JK
3648 hwi_build_be_sgl_by_offset(phba, &pwrb_arr[i], &sgl);
3649 status = be_cmd_wrbq_create(&phba->ctrl, &sgl,
4eea99d5
JK
3650 &phwi_context->be_wrbq[i],
3651 &phwi_ctrlr->wrb_context[i],
3652 ulp_base_num);
6733b39a 3653 if (status != 0) {
99bc5d55
JSJ
3654 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
3655 "BM_%d : wrbq create failed.");
1462b8ff 3656 kfree(pwrb_arr);
6733b39a
JK
3657 return status;
3658 }
a7909b39 3659 pwrb_context = &phwi_ctrlr->wrb_context[i];
a7909b39 3660 BE_SET_CID_TO_CRI(i, pwrb_context->cid);
6733b39a
JK
3661 }
3662 kfree(pwrb_arr);
3663 return 0;
3664}
3665
3666static void free_wrb_handles(struct beiscsi_hba *phba)
3667{
3668 unsigned int index;
3669 struct hwi_controller *phwi_ctrlr;
3670 struct hwi_wrb_context *pwrb_context;
3671
3672 phwi_ctrlr = phba->phwi_ctrlr;
a7909b39 3673 for (index = 0; index < phba->params.cxns_per_ctrl; index++) {
6733b39a
JK
3674 pwrb_context = &phwi_ctrlr->wrb_context[index];
3675 kfree(pwrb_context->pwrb_handle_base);
3676 kfree(pwrb_context->pwrb_handle_basestd);
3677 }
3678}
3679
bfead3b2
JK
3680static void be_mcc_queues_destroy(struct beiscsi_hba *phba)
3681{
3682 struct be_queue_info *q;
3683 struct be_ctrl_info *ctrl = &phba->ctrl;
3684
3685 q = &phba->ctrl.mcc_obj.q;
4e2bdf7a 3686 if (q->created) {
bfead3b2 3687 beiscsi_cmd_q_destroy(ctrl, q, QTYPE_MCCQ);
4e2bdf7a
JSJ
3688 be_queue_free(phba, q);
3689 }
bfead3b2
JK
3690
3691 q = &phba->ctrl.mcc_obj.cq;
4e2bdf7a 3692 if (q->created) {
bfead3b2 3693 beiscsi_cmd_q_destroy(ctrl, q, QTYPE_CQ);
4e2bdf7a
JSJ
3694 be_queue_free(phba, q);
3695 }
bfead3b2
JK
3696}
3697
6733b39a
JK
3698static void hwi_cleanup(struct beiscsi_hba *phba)
3699{
3700 struct be_queue_info *q;
3701 struct be_ctrl_info *ctrl = &phba->ctrl;
3702 struct hwi_controller *phwi_ctrlr;
3703 struct hwi_context_memory *phwi_context;
a7909b39 3704 struct hwi_async_pdu_context *pasync_ctx;
23188167 3705 int i, eq_for_mcc, ulp_num;
6733b39a
JK
3706
3707 phwi_ctrlr = phba->phwi_ctrlr;
3708 phwi_context = phwi_ctrlr->phwi_ctxt;
15a90fe0
JK
3709
3710 be_cmd_iscsi_remove_template_hdr(ctrl);
3711
6733b39a
JK
3712 for (i = 0; i < phba->params.cxns_per_ctrl; i++) {
3713 q = &phwi_context->be_wrbq[i];
3714 if (q->created)
3715 beiscsi_cmd_q_destroy(ctrl, q, QTYPE_WRBQ);
3716 }
a7909b39 3717 kfree(phwi_context->be_wrbq);
6733b39a
JK
3718 free_wrb_handles(phba);
3719
8a86e833
JK
3720 for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) {
3721 if (test_bit(ulp_num, &phba->fw_config.ulp_supported)) {
6733b39a 3722
8a86e833
JK
3723 q = &phwi_context->be_def_hdrq[ulp_num];
3724 if (q->created)
3725 beiscsi_cmd_q_destroy(ctrl, q, QTYPE_DPDUQ);
3726
3727 q = &phwi_context->be_def_dataq[ulp_num];
3728 if (q->created)
3729 beiscsi_cmd_q_destroy(ctrl, q, QTYPE_DPDUQ);
3730
3731 pasync_ctx = phwi_ctrlr->phwi_ctxt->pasync_ctx[ulp_num];
3732 }
3733 }
6733b39a
JK
3734
3735 beiscsi_cmd_q_destroy(ctrl, NULL, QTYPE_SGL);
3736
bfead3b2
JK
3737 for (i = 0; i < (phba->num_cpus); i++) {
3738 q = &phwi_context->be_cq[i];
4e2bdf7a
JSJ
3739 if (q->created) {
3740 be_queue_free(phba, q);
bfead3b2 3741 beiscsi_cmd_q_destroy(ctrl, q, QTYPE_CQ);
4e2bdf7a 3742 }
bfead3b2 3743 }
23188167
JK
3744
3745 be_mcc_queues_destroy(phba);
bfead3b2 3746 if (phba->msix_enabled)
23188167 3747 eq_for_mcc = 1;
bfead3b2 3748 else
23188167
JK
3749 eq_for_mcc = 0;
3750 for (i = 0; i < (phba->num_cpus + eq_for_mcc); i++) {
bfead3b2 3751 q = &phwi_context->be_eq[i].q;
4e2bdf7a
JSJ
3752 if (q->created) {
3753 be_queue_free(phba, q);
bfead3b2 3754 beiscsi_cmd_q_destroy(ctrl, q, QTYPE_EQ);
4e2bdf7a 3755 }
bfead3b2 3756 }
0283fbb1 3757 be_cmd_fw_uninit(ctrl);
bfead3b2 3758}
6733b39a 3759
bfead3b2
JK
3760static int be_mcc_queues_create(struct beiscsi_hba *phba,
3761 struct hwi_context_memory *phwi_context)
3762{
3763 struct be_queue_info *q, *cq;
3764 struct be_ctrl_info *ctrl = &phba->ctrl;
3765
3766 /* Alloc MCC compl queue */
3767 cq = &phba->ctrl.mcc_obj.cq;
3768 if (be_queue_alloc(phba, cq, MCC_CQ_LEN,
3769 sizeof(struct be_mcc_compl)))
3770 goto err;
3771 /* Ask BE to create MCC compl queue; */
3772 if (phba->msix_enabled) {
3773 if (beiscsi_cmd_cq_create(ctrl, cq, &phwi_context->be_eq
3774 [phba->num_cpus].q, false, true, 0))
3775 goto mcc_cq_free;
3776 } else {
3777 if (beiscsi_cmd_cq_create(ctrl, cq, &phwi_context->be_eq[0].q,
3778 false, true, 0))
3779 goto mcc_cq_free;
3780 }
3781
3782 /* Alloc MCC queue */
3783 q = &phba->ctrl.mcc_obj.q;
3784 if (be_queue_alloc(phba, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
3785 goto mcc_cq_destroy;
3786
3787 /* Ask BE to create MCC queue */
35e66019 3788 if (beiscsi_cmd_mccq_create(phba, q, cq))
bfead3b2
JK
3789 goto mcc_q_free;
3790
3791 return 0;
3792
3793mcc_q_free:
3794 be_queue_free(phba, q);
3795mcc_cq_destroy:
3796 beiscsi_cmd_q_destroy(ctrl, cq, QTYPE_CQ);
3797mcc_cq_free:
3798 be_queue_free(phba, cq);
3799err:
d3ad2bb3 3800 return -ENOMEM;
bfead3b2
JK
3801}
3802
107dfcba
JSJ
3803/**
3804 * find_num_cpus()- Get the CPU online count
3805 * @phba: ptr to priv structure
3806 *
3807 * CPU count is used for creating EQ.
3808 **/
3809static void find_num_cpus(struct beiscsi_hba *phba)
bfead3b2
JK
3810{
3811 int num_cpus = 0;
3812
3813 num_cpus = num_online_cpus();
bfead3b2 3814
22abeef0
JSJ
3815 switch (phba->generation) {
3816 case BE_GEN2:
3817 case BE_GEN3:
3818 phba->num_cpus = (num_cpus > BEISCSI_MAX_NUM_CPUS) ?
3819 BEISCSI_MAX_NUM_CPUS : num_cpus;
3820 break;
3821 case BE_GEN4:
68c26a3a
JK
3822 /*
3823 * If eqid_count == 1 fall back to
3824 * INTX mechanism
3825 **/
3826 if (phba->fw_config.eqid_count == 1) {
3827 enable_msix = 0;
3828 phba->num_cpus = 1;
3829 return;
3830 }
3831
3832 phba->num_cpus =
3833 (num_cpus > (phba->fw_config.eqid_count - 1)) ?
3834 (phba->fw_config.eqid_count - 1) : num_cpus;
22abeef0
JSJ
3835 break;
3836 default:
3837 phba->num_cpus = 1;
3838 }
6733b39a
JK
3839}
3840
3841static int hwi_init_port(struct beiscsi_hba *phba)
3842{
3843 struct hwi_controller *phwi_ctrlr;
3844 struct hwi_context_memory *phwi_context;
3845 unsigned int def_pdu_ring_sz;
3846 struct be_ctrl_info *ctrl = &phba->ctrl;
8a86e833 3847 int status, ulp_num;
6733b39a 3848
6733b39a 3849 phwi_ctrlr = phba->phwi_ctrlr;
6733b39a 3850 phwi_context = phwi_ctrlr->phwi_ctxt;
73af08e1 3851 phwi_context->max_eqd = 128;
bfead3b2 3852 phwi_context->min_eqd = 0;
73af08e1 3853 phwi_context->cur_eqd = 0;
6733b39a 3854 be_cmd_fw_initialize(&phba->ctrl);
53aefe25
JB
3855 /* set optic state to unknown */
3856 phba->optic_state = 0xff;
bfead3b2
JK
3857
3858 status = beiscsi_create_eqs(phba, phwi_context);
6733b39a 3859 if (status != 0) {
99bc5d55
JSJ
3860 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
3861 "BM_%d : EQ not created\n");
6733b39a
JK
3862 goto error;
3863 }
3864
bfead3b2
JK
3865 status = be_mcc_queues_create(phba, phwi_context);
3866 if (status != 0)
3867 goto error;
3868
3869 status = mgmt_check_supported_fw(ctrl, phba);
6733b39a 3870 if (status != 0) {
99bc5d55
JSJ
3871 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
3872 "BM_%d : Unsupported fw version\n");
6733b39a
JK
3873 goto error;
3874 }
3875
bfead3b2 3876 status = beiscsi_create_cqs(phba, phwi_context);
6733b39a 3877 if (status != 0) {
99bc5d55
JSJ
3878 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
3879 "BM_%d : CQ not created\n");
6733b39a
JK
3880 goto error;
3881 }
3882
8a86e833
JK
3883 for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) {
3884 if (test_bit(ulp_num, &phba->fw_config.ulp_supported)) {
6733b39a 3885
8a86e833
JK
3886 def_pdu_ring_sz =
3887 BEISCSI_GET_CID_COUNT(phba, ulp_num) *
3888 sizeof(struct phys_addr);
3889
3890 status = beiscsi_create_def_hdr(phba, phwi_context,
3891 phwi_ctrlr,
3892 def_pdu_ring_sz,
3893 ulp_num);
3894 if (status != 0) {
3895 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
3896 "BM_%d : Default Header not created for ULP : %d\n",
3897 ulp_num);
3898 goto error;
3899 }
3900
3901 status = beiscsi_create_def_data(phba, phwi_context,
3902 phwi_ctrlr,
3903 def_pdu_ring_sz,
3904 ulp_num);
3905 if (status != 0) {
3906 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
3907 "BM_%d : Default Data not created for ULP : %d\n",
3908 ulp_num);
3909 goto error;
3910 }
3911 }
6733b39a
JK
3912 }
3913
3914 status = beiscsi_post_pages(phba);
3915 if (status != 0) {
99bc5d55
JSJ
3916 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
3917 "BM_%d : Post SGL Pages Failed\n");
6733b39a
JK
3918 goto error;
3919 }
3920
15a90fe0
JK
3921 status = beiscsi_post_template_hdr(phba);
3922 if (status != 0) {
3923 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
3924 "BM_%d : Template HDR Posting for CXN Failed\n");
3925 }
3926
6733b39a
JK
3927 status = beiscsi_create_wrb_rings(phba, phwi_context, phwi_ctrlr);
3928 if (status != 0) {
99bc5d55
JSJ
3929 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
3930 "BM_%d : WRB Rings not created\n");
6733b39a
JK
3931 goto error;
3932 }
3933
8a86e833
JK
3934 for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) {
3935 uint16_t async_arr_idx = 0;
3936
3937 if (test_bit(ulp_num, &phba->fw_config.ulp_supported)) {
3938 uint16_t cri = 0;
3939 struct hwi_async_pdu_context *pasync_ctx;
3940
3941 pasync_ctx = HWI_GET_ASYNC_PDU_CTX(
3942 phwi_ctrlr, ulp_num);
3943 for (cri = 0; cri <
3944 phba->params.cxns_per_ctrl; cri++) {
3945 if (ulp_num == BEISCSI_GET_ULP_FROM_CRI
3946 (phwi_ctrlr, cri))
3947 pasync_ctx->cid_to_async_cri_map[
3948 phwi_ctrlr->wrb_context[cri].cid] =
3949 async_arr_idx++;
3950 }
3951 }
3952 }
3953
99bc5d55
JSJ
3954 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
3955 "BM_%d : hwi_init_port success\n");
6733b39a
JK
3956 return 0;
3957
3958error:
99bc5d55
JSJ
3959 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
3960 "BM_%d : hwi_init_port failed");
6733b39a 3961 hwi_cleanup(phba);
a49e06d5 3962 return status;
6733b39a
JK
3963}
3964
6733b39a
JK
3965static int hwi_init_controller(struct beiscsi_hba *phba)
3966{
3967 struct hwi_controller *phwi_ctrlr;
3968
3969 phwi_ctrlr = phba->phwi_ctrlr;
3970 if (1 == phba->init_mem[HWI_MEM_ADDN_CONTEXT].num_elements) {
3971 phwi_ctrlr->phwi_ctxt = (struct hwi_context_memory *)phba->
3972 init_mem[HWI_MEM_ADDN_CONTEXT].mem_array[0].virtual_address;
99bc5d55
JSJ
3973 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
3974 "BM_%d : phwi_ctrlr->phwi_ctxt=%p\n",
3975 phwi_ctrlr->phwi_ctxt);
6733b39a 3976 } else {
99bc5d55
JSJ
3977 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
3978 "BM_%d : HWI_MEM_ADDN_CONTEXT is more "
3979 "than one element.Failing to load\n");
6733b39a
JK
3980 return -ENOMEM;
3981 }
3982
3983 iscsi_init_global_templates(phba);
3ec78271
JK
3984 if (beiscsi_init_wrb_handle(phba))
3985 return -ENOMEM;
3986
a7909b39
JK
3987 if (hwi_init_async_pdu_ctx(phba)) {
3988 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
3989 "BM_%d : hwi_init_async_pdu_ctx failed\n");
3990 return -ENOMEM;
3991 }
3992
6733b39a 3993 if (hwi_init_port(phba) != 0) {
99bc5d55
JSJ
3994 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
3995 "BM_%d : hwi_init_controller failed\n");
3996
6733b39a
JK
3997 return -ENOMEM;
3998 }
3999 return 0;
4000}
4001
4002static void beiscsi_free_mem(struct beiscsi_hba *phba)
4003{
4004 struct be_mem_descriptor *mem_descr;
4005 int i, j;
4006
4007 mem_descr = phba->init_mem;
4008 i = 0;
4009 j = 0;
4010 for (i = 0; i < SE_MEM_MAX; i++) {
4011 for (j = mem_descr->num_elements; j > 0; j--) {
4012 pci_free_consistent(phba->pcidev,
4013 mem_descr->mem_array[j - 1].size,
4014 mem_descr->mem_array[j - 1].virtual_address,
457ff3b7
JK
4015 (unsigned long)mem_descr->mem_array[j - 1].
4016 bus_address.u.a64.address);
6733b39a 4017 }
8a86e833 4018
6733b39a
JK
4019 kfree(mem_descr->mem_array);
4020 mem_descr++;
4021 }
4022 kfree(phba->init_mem);
a7909b39 4023 kfree(phba->phwi_ctrlr->wrb_context);
6733b39a
JK
4024 kfree(phba->phwi_ctrlr);
4025}
4026
4027static int beiscsi_init_controller(struct beiscsi_hba *phba)
4028{
4029 int ret = -ENOMEM;
4030
4031 ret = beiscsi_get_memory(phba);
4032 if (ret < 0) {
99bc5d55
JSJ
4033 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
4034 "BM_%d : beiscsi_dev_probe -"
4035 "Failed in beiscsi_alloc_memory\n");
6733b39a
JK
4036 return ret;
4037 }
4038
4039 ret = hwi_init_controller(phba);
4040 if (ret)
4041 goto free_init;
99bc5d55
JSJ
4042 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
4043 "BM_%d : Return success from beiscsi_init_controller");
4044
6733b39a
JK
4045 return 0;
4046
4047free_init:
4048 beiscsi_free_mem(phba);
a49e06d5 4049 return ret;
6733b39a
JK
4050}
4051
4052static int beiscsi_init_sgl_handle(struct beiscsi_hba *phba)
4053{
4054 struct be_mem_descriptor *mem_descr_sglh, *mem_descr_sg;
4055 struct sgl_handle *psgl_handle;
4056 struct iscsi_sge *pfrag;
90622db3
JK
4057 unsigned int arr_index, i, idx;
4058 unsigned int ulp_icd_start, ulp_num = 0;
6733b39a
JK
4059
4060 phba->io_sgl_hndl_avbl = 0;
4061 phba->eh_sgl_hndl_avbl = 0;
bfead3b2 4062
6733b39a
JK
4063 mem_descr_sglh = phba->init_mem;
4064 mem_descr_sglh += HWI_MEM_SGLH;
4065 if (1 == mem_descr_sglh->num_elements) {
4066 phba->io_sgl_hndl_base = kzalloc(sizeof(struct sgl_handle *) *
4067 phba->params.ios_per_ctrl,
4068 GFP_KERNEL);
4069 if (!phba->io_sgl_hndl_base) {
99bc5d55
JSJ
4070 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
4071 "BM_%d : Mem Alloc Failed. Failing to load\n");
6733b39a
JK
4072 return -ENOMEM;
4073 }
4074 phba->eh_sgl_hndl_base = kzalloc(sizeof(struct sgl_handle *) *
4075 (phba->params.icds_per_ctrl -
4076 phba->params.ios_per_ctrl),
4077 GFP_KERNEL);
4078 if (!phba->eh_sgl_hndl_base) {
4079 kfree(phba->io_sgl_hndl_base);
99bc5d55
JSJ
4080 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
4081 "BM_%d : Mem Alloc Failed. Failing to load\n");
6733b39a
JK
4082 return -ENOMEM;
4083 }
4084 } else {
99bc5d55
JSJ
4085 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
4086 "BM_%d : HWI_MEM_SGLH is more than one element."
4087 "Failing to load\n");
6733b39a
JK
4088 return -ENOMEM;
4089 }
4090
4091 arr_index = 0;
4092 idx = 0;
4093 while (idx < mem_descr_sglh->num_elements) {
4094 psgl_handle = mem_descr_sglh->mem_array[idx].virtual_address;
4095
4096 for (i = 0; i < (mem_descr_sglh->mem_array[idx].size /
4097 sizeof(struct sgl_handle)); i++) {
4098 if (arr_index < phba->params.ios_per_ctrl) {
4099 phba->io_sgl_hndl_base[arr_index] = psgl_handle;
4100 phba->io_sgl_hndl_avbl++;
4101 arr_index++;
4102 } else {
4103 phba->eh_sgl_hndl_base[arr_index -
4104 phba->params.ios_per_ctrl] =
4105 psgl_handle;
4106 arr_index++;
4107 phba->eh_sgl_hndl_avbl++;
4108 }
4109 psgl_handle++;
4110 }
4111 idx++;
4112 }
99bc5d55
JSJ
4113 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
4114 "BM_%d : phba->io_sgl_hndl_avbl=%d"
4115 "phba->eh_sgl_hndl_avbl=%d\n",
4116 phba->io_sgl_hndl_avbl,
4117 phba->eh_sgl_hndl_avbl);
4118
6733b39a
JK
4119 mem_descr_sg = phba->init_mem;
4120 mem_descr_sg += HWI_MEM_SGE;
99bc5d55
JSJ
4121 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
4122 "\n BM_%d : mem_descr_sg->num_elements=%d\n",
4123 mem_descr_sg->num_elements);
4124
90622db3
JK
4125 for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++)
4126 if (test_bit(ulp_num, &phba->fw_config.ulp_supported))
4127 break;
4128
4129 ulp_icd_start = phba->fw_config.iscsi_icd_start[ulp_num];
4130
6733b39a
JK
4131 arr_index = 0;
4132 idx = 0;
4133 while (idx < mem_descr_sg->num_elements) {
4134 pfrag = mem_descr_sg->mem_array[idx].virtual_address;
4135
4136 for (i = 0;
4137 i < (mem_descr_sg->mem_array[idx].size) /
4138 (sizeof(struct iscsi_sge) * phba->params.num_sge_per_io);
4139 i++) {
4140 if (arr_index < phba->params.ios_per_ctrl)
4141 psgl_handle = phba->io_sgl_hndl_base[arr_index];
4142 else
4143 psgl_handle = phba->eh_sgl_hndl_base[arr_index -
4144 phba->params.ios_per_ctrl];
4145 psgl_handle->pfrag = pfrag;
4146 AMAP_SET_BITS(struct amap_iscsi_sge, addr_hi, pfrag, 0);
4147 AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, pfrag, 0);
4148 pfrag += phba->params.num_sge_per_io;
90622db3 4149 psgl_handle->sgl_index = ulp_icd_start + arr_index++;
6733b39a
JK
4150 }
4151 idx++;
4152 }
4153 phba->io_sgl_free_index = 0;
4154 phba->io_sgl_alloc_index = 0;
4155 phba->eh_sgl_free_index = 0;
4156 phba->eh_sgl_alloc_index = 0;
4157 return 0;
4158}
4159
4160static int hba_setup_cid_tbls(struct beiscsi_hba *phba)
4161{
0a3db7c0
JK
4162 int ret;
4163 uint16_t i, ulp_num;
4164 struct ulp_cid_info *ptr_cid_info = NULL;
6733b39a 4165
0a3db7c0
JK
4166 for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) {
4167 if (test_bit(ulp_num, (void *)&phba->fw_config.ulp_supported)) {
4168 ptr_cid_info = kzalloc(sizeof(struct ulp_cid_info),
4169 GFP_KERNEL);
4170
4171 if (!ptr_cid_info) {
4172 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
4173 "BM_%d : Failed to allocate memory"
4174 "for ULP_CID_INFO for ULP : %d\n",
4175 ulp_num);
4176 ret = -ENOMEM;
4177 goto free_memory;
4178
4179 }
4180
4181 /* Allocate memory for CID array */
4182 ptr_cid_info->cid_array = kzalloc(sizeof(void *) *
4183 BEISCSI_GET_CID_COUNT(phba,
4184 ulp_num), GFP_KERNEL);
4185 if (!ptr_cid_info->cid_array) {
4186 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
4187 "BM_%d : Failed to allocate memory"
4188 "for CID_ARRAY for ULP : %d\n",
4189 ulp_num);
4190 kfree(ptr_cid_info);
4191 ptr_cid_info = NULL;
4192 ret = -ENOMEM;
4193
4194 goto free_memory;
4195 }
4196 ptr_cid_info->avlbl_cids = BEISCSI_GET_CID_COUNT(
4197 phba, ulp_num);
4198
4199 /* Save the cid_info_array ptr */
4200 phba->cid_array_info[ulp_num] = ptr_cid_info;
4201 }
6733b39a 4202 }
c2462288 4203 phba->ep_array = kzalloc(sizeof(struct iscsi_endpoint *) *
a7909b39 4204 phba->params.cxns_per_ctrl, GFP_KERNEL);
6733b39a 4205 if (!phba->ep_array) {
99bc5d55
JSJ
4206 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
4207 "BM_%d : Failed to allocate memory in "
4208 "hba_setup_cid_tbls\n");
0a3db7c0
JK
4209 ret = -ENOMEM;
4210
4211 goto free_memory;
6733b39a 4212 }
a7909b39
JK
4213
4214 phba->conn_table = kzalloc(sizeof(struct beiscsi_conn *) *
4215 phba->params.cxns_per_ctrl, GFP_KERNEL);
4216 if (!phba->conn_table) {
4217 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
4218 "BM_%d : Failed to allocate memory in"
4219 "hba_setup_cid_tbls\n");
4220
a7909b39 4221 kfree(phba->ep_array);
a7909b39 4222 phba->ep_array = NULL;
0a3db7c0 4223 ret = -ENOMEM;
5f2d25ef
TH
4224
4225 goto free_memory;
6733b39a 4226 }
a7909b39 4227
0a3db7c0
JK
4228 for (i = 0; i < phba->params.cxns_per_ctrl; i++) {
4229 ulp_num = phba->phwi_ctrlr->wrb_context[i].ulp_num;
4230
4231 ptr_cid_info = phba->cid_array_info[ulp_num];
4232 ptr_cid_info->cid_array[ptr_cid_info->cid_alloc++] =
4233 phba->phwi_ctrlr->wrb_context[i].cid;
4234
4235 }
4236
4237 for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) {
4238 if (test_bit(ulp_num, (void *)&phba->fw_config.ulp_supported)) {
4239 ptr_cid_info = phba->cid_array_info[ulp_num];
a7909b39 4240
0a3db7c0
JK
4241 ptr_cid_info->cid_alloc = 0;
4242 ptr_cid_info->cid_free = 0;
4243 }
4244 }
6733b39a 4245 return 0;
0a3db7c0
JK
4246
4247free_memory:
4248 for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) {
4249 if (test_bit(ulp_num, (void *)&phba->fw_config.ulp_supported)) {
4250 ptr_cid_info = phba->cid_array_info[ulp_num];
4251
4252 if (ptr_cid_info) {
4253 kfree(ptr_cid_info->cid_array);
4254 kfree(ptr_cid_info);
4255 phba->cid_array_info[ulp_num] = NULL;
4256 }
4257 }
4258 }
4259
4260 return ret;
6733b39a
JK
4261}
4262
238f6b72 4263static void hwi_enable_intr(struct beiscsi_hba *phba)
6733b39a
JK
4264{
4265 struct be_ctrl_info *ctrl = &phba->ctrl;
4266 struct hwi_controller *phwi_ctrlr;
4267 struct hwi_context_memory *phwi_context;
4268 struct be_queue_info *eq;
4269 u8 __iomem *addr;
bfead3b2 4270 u32 reg, i;
6733b39a
JK
4271 u32 enabled;
4272
4273 phwi_ctrlr = phba->phwi_ctrlr;
4274 phwi_context = phwi_ctrlr->phwi_ctxt;
4275
6733b39a
JK
4276 addr = (u8 __iomem *) ((u8 __iomem *) ctrl->pcicfg +
4277 PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET);
4278 reg = ioread32(addr);
6733b39a
JK
4279
4280 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
4281 if (!enabled) {
4282 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
99bc5d55
JSJ
4283 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
4284 "BM_%d : reg =x%08x addr=%p\n", reg, addr);
6733b39a 4285 iowrite32(reg, addr);
665d6d94
JK
4286 }
4287
4288 if (!phba->msix_enabled) {
4289 eq = &phwi_context->be_eq[0].q;
99bc5d55
JSJ
4290 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
4291 "BM_%d : eq->id=%d\n", eq->id);
4292
665d6d94
JK
4293 hwi_ring_eq_db(phba, eq->id, 0, 0, 1, 1);
4294 } else {
4295 for (i = 0; i <= phba->num_cpus; i++) {
4296 eq = &phwi_context->be_eq[i].q;
99bc5d55
JSJ
4297 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
4298 "BM_%d : eq->id=%d\n", eq->id);
bfead3b2
JK
4299 hwi_ring_eq_db(phba, eq->id, 0, 0, 1, 1);
4300 }
c03af1ae 4301 }
6733b39a
JK
4302}
4303
4304static void hwi_disable_intr(struct beiscsi_hba *phba)
4305{
4306 struct be_ctrl_info *ctrl = &phba->ctrl;
4307
4308 u8 __iomem *addr = ctrl->pcicfg + PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET;
4309 u32 reg = ioread32(addr);
4310
4311 u32 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
4312 if (enabled) {
4313 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
4314 iowrite32(reg, addr);
4315 } else
99bc5d55
JSJ
4316 beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_INIT,
4317 "BM_%d : In hwi_disable_intr, Already Disabled\n");
6733b39a
JK
4318}
4319
9aef4200
JSJ
4320/**
4321 * beiscsi_get_boot_info()- Get the boot session info
4322 * @phba: The device priv structure instance
4323 *
4324 * Get the boot target info and store in driver priv structure
4325 *
4326 * return values
4327 * Success: 0
4328 * Failure: Non-Zero Value
4329 **/
c7acc5b8
JK
4330static int beiscsi_get_boot_info(struct beiscsi_hba *phba)
4331{
0e43895e 4332 struct be_cmd_get_session_resp *session_resp;
c7acc5b8 4333 struct be_dma_mem nonemb_cmd;
e175defe 4334 unsigned int tag;
9aef4200 4335 unsigned int s_handle;
f457a46f 4336 int ret = -ENOMEM;
c7acc5b8 4337
9aef4200
JSJ
4338 /* Get the session handle of the boot target */
4339 ret = be_mgmt_get_boot_shandle(phba, &s_handle);
4340 if (ret) {
99bc5d55
JSJ
4341 beiscsi_log(phba, KERN_ERR,
4342 BEISCSI_LOG_INIT | BEISCSI_LOG_CONFIG,
4343 "BM_%d : No boot session\n");
3efde862
JSJ
4344
4345 if (ret == -ENXIO)
4346 phba->get_boot = 0;
4347
4348
9aef4200 4349 return ret;
c7acc5b8 4350 }
3efde862 4351 phba->get_boot = 0;
7c845eb5
JP
4352 nonemb_cmd.va = pci_zalloc_consistent(phba->ctrl.pdev,
4353 sizeof(*session_resp),
4354 &nonemb_cmd.dma);
c7acc5b8 4355 if (nonemb_cmd.va == NULL) {
99bc5d55
JSJ
4356 beiscsi_log(phba, KERN_ERR,
4357 BEISCSI_LOG_INIT | BEISCSI_LOG_CONFIG,
4358 "BM_%d : Failed to allocate memory for"
4359 "beiscsi_get_session_info\n");
4360
c7acc5b8
JK
4361 return -ENOMEM;
4362 }
4363
9aef4200 4364 tag = mgmt_get_session_info(phba, s_handle,
0e43895e 4365 &nonemb_cmd);
c7acc5b8 4366 if (!tag) {
99bc5d55
JSJ
4367 beiscsi_log(phba, KERN_ERR,
4368 BEISCSI_LOG_INIT | BEISCSI_LOG_CONFIG,
4369 "BM_%d : beiscsi_get_session_info"
4370 " Failed\n");
4371
c7acc5b8 4372 goto boot_freemem;
e175defe 4373 }
c7acc5b8 4374
1957aa7f 4375 ret = beiscsi_mccq_compl(phba, tag, NULL, &nonemb_cmd);
e175defe 4376 if (ret) {
99bc5d55
JSJ
4377 beiscsi_log(phba, KERN_ERR,
4378 BEISCSI_LOG_INIT | BEISCSI_LOG_CONFIG,
e175defe 4379 "BM_%d : beiscsi_get_session_info Failed");
1957aa7f
JK
4380
4381 if (ret != -EBUSY)
4382 goto boot_freemem;
4383 else
4384 return ret;
c7acc5b8 4385 }
e175defe 4386
c7acc5b8 4387 session_resp = nonemb_cmd.va ;
f457a46f 4388
c7acc5b8
JK
4389 memcpy(&phba->boot_sess, &session_resp->session_info,
4390 sizeof(struct mgmt_session_info));
3f4134c1
JSJ
4391
4392 beiscsi_logout_fw_sess(phba,
4393 phba->boot_sess.session_handle);
f457a46f
MC
4394 ret = 0;
4395
c7acc5b8
JK
4396boot_freemem:
4397 pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size,
4398 nonemb_cmd.va, nonemb_cmd.dma);
f457a46f
MC
4399 return ret;
4400}
4401
4402static void beiscsi_boot_release(void *data)
4403{
4404 struct beiscsi_hba *phba = data;
4405
4406 scsi_host_put(phba->shost);
4407}
4408
4409static int beiscsi_setup_boot_info(struct beiscsi_hba *phba)
4410{
4411 struct iscsi_boot_kobj *boot_kobj;
4412
a3d313ea
JK
4413 /* it has been created previously */
4414 if (phba->boot_kset)
4415 return 0;
4416
f457a46f
MC
4417 /* get boot info using mgmt cmd */
4418 if (beiscsi_get_boot_info(phba))
4419 /* Try to see if we can carry on without this */
4420 return 0;
4421
4422 phba->boot_kset = iscsi_boot_create_host_kset(phba->shost->host_no);
4423 if (!phba->boot_kset)
4424 return -ENOMEM;
4425
4426 /* get a ref because the show function will ref the phba */
4427 if (!scsi_host_get(phba->shost))
4428 goto free_kset;
4429 boot_kobj = iscsi_boot_create_target(phba->boot_kset, 0, phba,
4430 beiscsi_show_boot_tgt_info,
4431 beiscsi_tgt_get_attr_visibility,
4432 beiscsi_boot_release);
4433 if (!boot_kobj)
4434 goto put_shost;
4435
4436 if (!scsi_host_get(phba->shost))
4437 goto free_kset;
4438 boot_kobj = iscsi_boot_create_initiator(phba->boot_kset, 0, phba,
4439 beiscsi_show_boot_ini_info,
4440 beiscsi_ini_get_attr_visibility,
4441 beiscsi_boot_release);
4442 if (!boot_kobj)
4443 goto put_shost;
4444
4445 if (!scsi_host_get(phba->shost))
4446 goto free_kset;
4447 boot_kobj = iscsi_boot_create_ethernet(phba->boot_kset, 0, phba,
4448 beiscsi_show_boot_eth_info,
4449 beiscsi_eth_get_attr_visibility,
4450 beiscsi_boot_release);
4451 if (!boot_kobj)
4452 goto put_shost;
4453 return 0;
4454
4455put_shost:
4456 scsi_host_put(phba->shost);
4457free_kset:
4458 iscsi_boot_destroy_kset(phba->boot_kset);
c7acc5b8
JK
4459 return -ENOMEM;
4460}
4461
6733b39a
JK
4462static int beiscsi_init_port(struct beiscsi_hba *phba)
4463{
4464 int ret;
4465
4466 ret = beiscsi_init_controller(phba);
4467 if (ret < 0) {
99bc5d55
JSJ
4468 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
4469 "BM_%d : beiscsi_dev_probe - Failed in"
4470 "beiscsi_init_controller\n");
6733b39a
JK
4471 return ret;
4472 }
4473 ret = beiscsi_init_sgl_handle(phba);
4474 if (ret < 0) {
99bc5d55
JSJ
4475 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
4476 "BM_%d : beiscsi_dev_probe - Failed in"
4477 "beiscsi_init_sgl_handle\n");
6733b39a
JK
4478 goto do_cleanup_ctrlr;
4479 }
4480
4481 if (hba_setup_cid_tbls(phba)) {
99bc5d55
JSJ
4482 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
4483 "BM_%d : Failed in hba_setup_cid_tbls\n");
6733b39a
JK
4484 kfree(phba->io_sgl_hndl_base);
4485 kfree(phba->eh_sgl_hndl_base);
4486 goto do_cleanup_ctrlr;
4487 }
4488
4489 return ret;
4490
4491do_cleanup_ctrlr:
4492 hwi_cleanup(phba);
4493 return ret;
4494}
4495
4496static void hwi_purge_eq(struct beiscsi_hba *phba)
4497{
4498 struct hwi_controller *phwi_ctrlr;
4499 struct hwi_context_memory *phwi_context;
4500 struct be_queue_info *eq;
4501 struct be_eq_entry *eqe = NULL;
bfead3b2 4502 int i, eq_msix;
756d29c8 4503 unsigned int num_processed;
6733b39a
JK
4504
4505 phwi_ctrlr = phba->phwi_ctrlr;
4506 phwi_context = phwi_ctrlr->phwi_ctxt;
bfead3b2
JK
4507 if (phba->msix_enabled)
4508 eq_msix = 1;
4509 else
4510 eq_msix = 0;
6733b39a 4511
bfead3b2
JK
4512 for (i = 0; i < (phba->num_cpus + eq_msix); i++) {
4513 eq = &phwi_context->be_eq[i].q;
6733b39a 4514 eqe = queue_tail_node(eq);
756d29c8 4515 num_processed = 0;
bfead3b2
JK
4516 while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32]
4517 & EQE_VALID_MASK) {
4518 AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0);
4519 queue_tail_inc(eq);
4520 eqe = queue_tail_node(eq);
756d29c8 4521 num_processed++;
bfead3b2 4522 }
756d29c8
JK
4523
4524 if (num_processed)
4525 hwi_ring_eq_db(phba, eq->id, 1, num_processed, 1, 1);
6733b39a
JK
4526 }
4527}
4528
4529static void beiscsi_clean_port(struct beiscsi_hba *phba)
4530{
0a3db7c0
JK
4531 int mgmt_status, ulp_num;
4532 struct ulp_cid_info *ptr_cid_info = NULL;
6733b39a 4533
bd41c2bd
JK
4534 for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) {
4535 if (test_bit(ulp_num, (void *)&phba->fw_config.ulp_supported)) {
4536 mgmt_status = mgmt_epfw_cleanup(phba, ulp_num);
4537 if (mgmt_status)
4538 beiscsi_log(phba, KERN_WARNING,
4539 BEISCSI_LOG_INIT,
4540 "BM_%d : mgmt_epfw_cleanup FAILED"
4541 " for ULP_%d\n", ulp_num);
4542 }
4543 }
756d29c8 4544
6733b39a 4545 hwi_purge_eq(phba);
756d29c8 4546 hwi_cleanup(phba);
6733b39a
JK
4547 kfree(phba->io_sgl_hndl_base);
4548 kfree(phba->eh_sgl_hndl_base);
6733b39a 4549 kfree(phba->ep_array);
a7909b39 4550 kfree(phba->conn_table);
0a3db7c0
JK
4551
4552 for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) {
4553 if (test_bit(ulp_num, (void *)&phba->fw_config.ulp_supported)) {
4554 ptr_cid_info = phba->cid_array_info[ulp_num];
4555
4556 if (ptr_cid_info) {
4557 kfree(ptr_cid_info->cid_array);
4558 kfree(ptr_cid_info);
4559 phba->cid_array_info[ulp_num] = NULL;
4560 }
4561 }
4562 }
4563
6733b39a
JK
4564}
4565
43f388b0
JK
4566/**
4567 * beiscsi_free_mgmt_task_handles()- Free driver CXN resources
4568 * @beiscsi_conn: ptr to the conn to be cleaned up
4a4a11b9 4569 * @task: ptr to iscsi_task resource to be freed.
43f388b0
JK
4570 *
4571 * Free driver mgmt resources binded to CXN.
4572 **/
4573void
4a4a11b9
JK
4574beiscsi_free_mgmt_task_handles(struct beiscsi_conn *beiscsi_conn,
4575 struct iscsi_task *task)
43f388b0
JK
4576{
4577 struct beiscsi_io_task *io_task;
4578 struct beiscsi_hba *phba = beiscsi_conn->phba;
4579 struct hwi_wrb_context *pwrb_context;
4580 struct hwi_controller *phwi_ctrlr;
a7909b39
JK
4581 uint16_t cri_index = BE_GET_CRI_FROM_CID(
4582 beiscsi_conn->beiscsi_conn_cid);
43f388b0
JK
4583
4584 phwi_ctrlr = phba->phwi_ctrlr;
a7909b39
JK
4585 pwrb_context = &phwi_ctrlr->wrb_context[cri_index];
4586
4a4a11b9 4587 io_task = task->dd_data;
43f388b0
JK
4588
4589 if (io_task->pwrb_handle) {
4590 memset(io_task->pwrb_handle->pwrb, 0,
4591 sizeof(struct iscsi_wrb));
4592 free_wrb_handle(phba, pwrb_context,
4593 io_task->pwrb_handle);
4594 io_task->pwrb_handle = NULL;
4595 }
4596
4597 if (io_task->psgl_handle) {
4598 spin_lock_bh(&phba->mgmt_sgl_lock);
4599 free_mgmt_sgl_handle(phba,
4600 io_task->psgl_handle);
43f388b0 4601 io_task->psgl_handle = NULL;
4a4a11b9 4602 spin_unlock_bh(&phba->mgmt_sgl_lock);
43f388b0
JK
4603 }
4604
eb1c4692 4605 if (io_task->mtask_addr) {
43f388b0
JK
4606 pci_unmap_single(phba->pcidev,
4607 io_task->mtask_addr,
4608 io_task->mtask_data_count,
4609 PCI_DMA_TODEVICE);
eb1c4692
JSJ
4610 io_task->mtask_addr = 0;
4611 }
43f388b0
JK
4612}
4613
d629c471
JSJ
4614/**
4615 * beiscsi_cleanup_task()- Free driver resources of the task
4616 * @task: ptr to the iscsi task
4617 *
4618 **/
1282ab76
MC
4619static void beiscsi_cleanup_task(struct iscsi_task *task)
4620{
4621 struct beiscsi_io_task *io_task = task->dd_data;
4622 struct iscsi_conn *conn = task->conn;
4623 struct beiscsi_conn *beiscsi_conn = conn->dd_data;
4624 struct beiscsi_hba *phba = beiscsi_conn->phba;
4625 struct beiscsi_session *beiscsi_sess = beiscsi_conn->beiscsi_sess;
4626 struct hwi_wrb_context *pwrb_context;
4627 struct hwi_controller *phwi_ctrlr;
a7909b39
JK
4628 uint16_t cri_index = BE_GET_CRI_FROM_CID(
4629 beiscsi_conn->beiscsi_conn_cid);
1282ab76
MC
4630
4631 phwi_ctrlr = phba->phwi_ctrlr;
a7909b39 4632 pwrb_context = &phwi_ctrlr->wrb_context[cri_index];
1282ab76
MC
4633
4634 if (io_task->cmd_bhs) {
4635 pci_pool_free(beiscsi_sess->bhs_pool, io_task->cmd_bhs,
4636 io_task->bhs_pa.u.a64.address);
4637 io_task->cmd_bhs = NULL;
4638 }
4639
4640 if (task->sc) {
4641 if (io_task->pwrb_handle) {
4642 free_wrb_handle(phba, pwrb_context,
4643 io_task->pwrb_handle);
4644 io_task->pwrb_handle = NULL;
4645 }
4646
4647 if (io_task->psgl_handle) {
4648 spin_lock(&phba->io_sgl_lock);
4649 free_io_sgl_handle(phba, io_task->psgl_handle);
4650 spin_unlock(&phba->io_sgl_lock);
4651 io_task->psgl_handle = NULL;
4652 }
da334977
JK
4653
4654 if (io_task->scsi_cmnd) {
4655 scsi_dma_unmap(io_task->scsi_cmnd);
4656 io_task->scsi_cmnd = NULL;
4657 }
1282ab76 4658 } else {
43f388b0 4659 if (!beiscsi_conn->login_in_progress)
4a4a11b9 4660 beiscsi_free_mgmt_task_handles(beiscsi_conn, task);
1282ab76
MC
4661 }
4662}
4663
6733b39a
JK
4664void
4665beiscsi_offload_connection(struct beiscsi_conn *beiscsi_conn,
4666 struct beiscsi_offload_params *params)
4667{
4668 struct wrb_handle *pwrb_handle;
340c99e9 4669 struct hwi_wrb_context *pwrb_context = NULL;
6733b39a 4670 struct beiscsi_hba *phba = beiscsi_conn->phba;
1282ab76
MC
4671 struct iscsi_task *task = beiscsi_conn->task;
4672 struct iscsi_session *session = task->conn->session;
6733b39a
JK
4673 u32 doorbell = 0;
4674
4675 /*
4676 * We can always use 0 here because it is reserved by libiscsi for
4677 * login/startup related tasks.
4678 */
1282ab76 4679 beiscsi_conn->login_in_progress = 0;
659743b0 4680 spin_lock_bh(&session->back_lock);
1282ab76 4681 beiscsi_cleanup_task(task);
659743b0 4682 spin_unlock_bh(&session->back_lock);
1282ab76 4683
340c99e9
JSJ
4684 pwrb_handle = alloc_wrb_handle(phba, beiscsi_conn->beiscsi_conn_cid,
4685 &pwrb_context);
6733b39a 4686
acb9693c 4687 /* Check for the adapter family */
2c9dfd36 4688 if (is_chip_be2_be3r(phba))
acb9693c 4689 beiscsi_offload_cxn_v0(params, pwrb_handle,
340c99e9
JSJ
4690 phba->init_mem,
4691 pwrb_context);
2c9dfd36 4692 else
340c99e9
JSJ
4693 beiscsi_offload_cxn_v2(params, pwrb_handle,
4694 pwrb_context);
6733b39a 4695
acb9693c
JSJ
4696 be_dws_le_to_cpu(pwrb_handle->pwrb,
4697 sizeof(struct iscsi_target_context_update_wrb));
6733b39a
JK
4698
4699 doorbell |= beiscsi_conn->beiscsi_conn_cid & DB_WRB_POST_CID_MASK;
32951dd8 4700 doorbell |= (pwrb_handle->wrb_index & DB_DEF_PDU_WRB_INDEX_MASK)
bfead3b2 4701 << DB_DEF_PDU_WRB_INDEX_SHIFT;
6733b39a 4702 doorbell |= 1 << DB_DEF_PDU_NUM_POSTED_SHIFT;
1e4be6ff
JK
4703 iowrite32(doorbell, phba->db_va +
4704 beiscsi_conn->doorbell_offset);
6733b39a
JK
4705}
4706
4707static void beiscsi_parse_pdu(struct iscsi_conn *conn, itt_t itt,
4708 int *index, int *age)
4709{
bfead3b2 4710 *index = (int)itt;
6733b39a
JK
4711 if (age)
4712 *age = conn->session->age;
4713}
4714
4715/**
4716 * beiscsi_alloc_pdu - allocates pdu and related resources
4717 * @task: libiscsi task
4718 * @opcode: opcode of pdu for task
4719 *
4720 * This is called with the session lock held. It will allocate
4721 * the wrb and sgl if needed for the command. And it will prep
4722 * the pdu's itt. beiscsi_parse_pdu will later translate
4723 * the pdu itt to the libiscsi task itt.
4724 */
4725static int beiscsi_alloc_pdu(struct iscsi_task *task, uint8_t opcode)
4726{
4727 struct beiscsi_io_task *io_task = task->dd_data;
4728 struct iscsi_conn *conn = task->conn;
4729 struct beiscsi_conn *beiscsi_conn = conn->dd_data;
4730 struct beiscsi_hba *phba = beiscsi_conn->phba;
4731 struct hwi_wrb_context *pwrb_context;
4732 struct hwi_controller *phwi_ctrlr;
4733 itt_t itt;
a7909b39 4734 uint16_t cri_index = 0;
2afc95bf
JK
4735 struct beiscsi_session *beiscsi_sess = beiscsi_conn->beiscsi_sess;
4736 dma_addr_t paddr;
6733b39a 4737
2afc95bf 4738 io_task->cmd_bhs = pci_pool_alloc(beiscsi_sess->bhs_pool,
bc7accec 4739 GFP_ATOMIC, &paddr);
2afc95bf
JK
4740 if (!io_task->cmd_bhs)
4741 return -ENOMEM;
2afc95bf 4742 io_task->bhs_pa.u.a64.address = paddr;
bfead3b2 4743 io_task->libiscsi_itt = (itt_t)task->itt;
6733b39a
JK
4744 io_task->conn = beiscsi_conn;
4745
4746 task->hdr = (struct iscsi_hdr *)&io_task->cmd_bhs->iscsi_hdr;
4747 task->hdr_max = sizeof(struct be_cmd_bhs);
d2cecf0d 4748 io_task->psgl_handle = NULL;
3ec78271 4749 io_task->pwrb_handle = NULL;
6733b39a
JK
4750
4751 if (task->sc) {
4752 spin_lock(&phba->io_sgl_lock);
4753 io_task->psgl_handle = alloc_io_sgl_handle(phba);
4754 spin_unlock(&phba->io_sgl_lock);
8359c79b
JSJ
4755 if (!io_task->psgl_handle) {
4756 beiscsi_log(phba, KERN_ERR,
4757 BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG,
4758 "BM_%d : Alloc of IO_SGL_ICD Failed"
4759 "for the CID : %d\n",
4760 beiscsi_conn->beiscsi_conn_cid);
2afc95bf 4761 goto free_hndls;
8359c79b 4762 }
d2cecf0d 4763 io_task->pwrb_handle = alloc_wrb_handle(phba,
340c99e9
JSJ
4764 beiscsi_conn->beiscsi_conn_cid,
4765 &io_task->pwrb_context);
8359c79b
JSJ
4766 if (!io_task->pwrb_handle) {
4767 beiscsi_log(phba, KERN_ERR,
4768 BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG,
4769 "BM_%d : Alloc of WRB_HANDLE Failed"
4770 "for the CID : %d\n",
4771 beiscsi_conn->beiscsi_conn_cid);
d2cecf0d 4772 goto free_io_hndls;
8359c79b 4773 }
6733b39a
JK
4774 } else {
4775 io_task->scsi_cmnd = NULL;
d7aea67b 4776 if ((opcode & ISCSI_OPCODE_MASK) == ISCSI_OP_LOGIN) {
43f388b0 4777 beiscsi_conn->task = task;
6733b39a
JK
4778 if (!beiscsi_conn->login_in_progress) {
4779 spin_lock(&phba->mgmt_sgl_lock);
4780 io_task->psgl_handle = (struct sgl_handle *)
4781 alloc_mgmt_sgl_handle(phba);
4782 spin_unlock(&phba->mgmt_sgl_lock);
8359c79b
JSJ
4783 if (!io_task->psgl_handle) {
4784 beiscsi_log(phba, KERN_ERR,
4785 BEISCSI_LOG_IO |
4786 BEISCSI_LOG_CONFIG,
4787 "BM_%d : Alloc of MGMT_SGL_ICD Failed"
4788 "for the CID : %d\n",
4789 beiscsi_conn->
4790 beiscsi_conn_cid);
2afc95bf 4791 goto free_hndls;
8359c79b 4792 }
2afc95bf 4793
6733b39a
JK
4794 beiscsi_conn->login_in_progress = 1;
4795 beiscsi_conn->plogin_sgl_handle =
4796 io_task->psgl_handle;
d2cecf0d
JK
4797 io_task->pwrb_handle =
4798 alloc_wrb_handle(phba,
340c99e9
JSJ
4799 beiscsi_conn->beiscsi_conn_cid,
4800 &io_task->pwrb_context);
8359c79b
JSJ
4801 if (!io_task->pwrb_handle) {
4802 beiscsi_log(phba, KERN_ERR,
4803 BEISCSI_LOG_IO |
4804 BEISCSI_LOG_CONFIG,
4805 "BM_%d : Alloc of WRB_HANDLE Failed"
4806 "for the CID : %d\n",
4807 beiscsi_conn->
4808 beiscsi_conn_cid);
4809 goto free_mgmt_hndls;
4810 }
d2cecf0d
JK
4811 beiscsi_conn->plogin_wrb_handle =
4812 io_task->pwrb_handle;
4813
6733b39a
JK
4814 } else {
4815 io_task->psgl_handle =
4816 beiscsi_conn->plogin_sgl_handle;
d2cecf0d
JK
4817 io_task->pwrb_handle =
4818 beiscsi_conn->plogin_wrb_handle;
6733b39a
JK
4819 }
4820 } else {
4821 spin_lock(&phba->mgmt_sgl_lock);
4822 io_task->psgl_handle = alloc_mgmt_sgl_handle(phba);
4823 spin_unlock(&phba->mgmt_sgl_lock);
8359c79b
JSJ
4824 if (!io_task->psgl_handle) {
4825 beiscsi_log(phba, KERN_ERR,
4826 BEISCSI_LOG_IO |
4827 BEISCSI_LOG_CONFIG,
4828 "BM_%d : Alloc of MGMT_SGL_ICD Failed"
4829 "for the CID : %d\n",
4830 beiscsi_conn->
4831 beiscsi_conn_cid);
2afc95bf 4832 goto free_hndls;
8359c79b 4833 }
d2cecf0d
JK
4834 io_task->pwrb_handle =
4835 alloc_wrb_handle(phba,
340c99e9
JSJ
4836 beiscsi_conn->beiscsi_conn_cid,
4837 &io_task->pwrb_context);
8359c79b
JSJ
4838 if (!io_task->pwrb_handle) {
4839 beiscsi_log(phba, KERN_ERR,
4840 BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG,
4841 "BM_%d : Alloc of WRB_HANDLE Failed"
4842 "for the CID : %d\n",
4843 beiscsi_conn->beiscsi_conn_cid);
d2cecf0d 4844 goto free_mgmt_hndls;
8359c79b 4845 }
d2cecf0d 4846
6733b39a
JK
4847 }
4848 }
bfead3b2
JK
4849 itt = (itt_t) cpu_to_be32(((unsigned int)io_task->pwrb_handle->
4850 wrb_index << 16) | (unsigned int)
4851 (io_task->psgl_handle->sgl_index));
32951dd8 4852 io_task->pwrb_handle->pio_handle = task;
bfead3b2 4853
6733b39a
JK
4854 io_task->cmd_bhs->iscsi_hdr.itt = itt;
4855 return 0;
2afc95bf 4856
d2cecf0d
JK
4857free_io_hndls:
4858 spin_lock(&phba->io_sgl_lock);
4859 free_io_sgl_handle(phba, io_task->psgl_handle);
4860 spin_unlock(&phba->io_sgl_lock);
4861 goto free_hndls;
4862free_mgmt_hndls:
4863 spin_lock(&phba->mgmt_sgl_lock);
4864 free_mgmt_sgl_handle(phba, io_task->psgl_handle);
a7909b39 4865 io_task->psgl_handle = NULL;
d2cecf0d 4866 spin_unlock(&phba->mgmt_sgl_lock);
2afc95bf
JK
4867free_hndls:
4868 phwi_ctrlr = phba->phwi_ctrlr;
a7909b39
JK
4869 cri_index = BE_GET_CRI_FROM_CID(
4870 beiscsi_conn->beiscsi_conn_cid);
4871 pwrb_context = &phwi_ctrlr->wrb_context[cri_index];
d2cecf0d
JK
4872 if (io_task->pwrb_handle)
4873 free_wrb_handle(phba, pwrb_context, io_task->pwrb_handle);
2afc95bf
JK
4874 io_task->pwrb_handle = NULL;
4875 pci_pool_free(beiscsi_sess->bhs_pool, io_task->cmd_bhs,
4876 io_task->bhs_pa.u.a64.address);
1282ab76 4877 io_task->cmd_bhs = NULL;
2afc95bf 4878 return -ENOMEM;
6733b39a 4879}
09a1093a
JSJ
4880int beiscsi_iotask_v2(struct iscsi_task *task, struct scatterlist *sg,
4881 unsigned int num_sg, unsigned int xferlen,
4882 unsigned int writedir)
4883{
4884
4885 struct beiscsi_io_task *io_task = task->dd_data;
4886 struct iscsi_conn *conn = task->conn;
4887 struct beiscsi_conn *beiscsi_conn = conn->dd_data;
4888 struct beiscsi_hba *phba = beiscsi_conn->phba;
4889 struct iscsi_wrb *pwrb = NULL;
4890 unsigned int doorbell = 0;
4891
4892 pwrb = io_task->pwrb_handle->pwrb;
09a1093a
JSJ
4893
4894 io_task->cmd_bhs->iscsi_hdr.exp_statsn = 0;
4895 io_task->bhs_len = sizeof(struct be_cmd_bhs);
4896
4897 if (writedir) {
4898 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, type, pwrb,
4899 INI_WR_CMD);
4900 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, dsp, pwrb, 1);
4901 } else {
4902 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, type, pwrb,
4903 INI_RD_CMD);
4904 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, dsp, pwrb, 0);
4905 }
4906
4907 io_task->wrb_type = AMAP_GET_BITS(struct amap_iscsi_wrb_v2,
4908 type, pwrb);
4909
4910 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, lun, pwrb,
4911 cpu_to_be16(*(unsigned short *)
4912 &io_task->cmd_bhs->iscsi_hdr.lun));
4913 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, r2t_exp_dtl, pwrb, xferlen);
4914 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, wrb_idx, pwrb,
4915 io_task->pwrb_handle->wrb_index);
4916 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, cmdsn_itt, pwrb,
4917 be32_to_cpu(task->cmdsn));
4918 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, sgl_idx, pwrb,
4919 io_task->psgl_handle->sgl_index);
4920
4921 hwi_write_sgl_v2(pwrb, sg, num_sg, io_task);
4922 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, ptr2nextwrb, pwrb,
340c99e9
JSJ
4923 io_task->pwrb_handle->wrb_index);
4924 if (io_task->pwrb_context->plast_wrb)
4925 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, ptr2nextwrb,
4926 io_task->pwrb_context->plast_wrb,
4927 io_task->pwrb_handle->wrb_index);
4928 io_task->pwrb_context->plast_wrb = pwrb;
09a1093a
JSJ
4929
4930 be_dws_le_to_cpu(pwrb, sizeof(struct iscsi_wrb));
4931
4932 doorbell |= beiscsi_conn->beiscsi_conn_cid & DB_WRB_POST_CID_MASK;
4933 doorbell |= (io_task->pwrb_handle->wrb_index &
4934 DB_DEF_PDU_WRB_INDEX_MASK) <<
4935 DB_DEF_PDU_WRB_INDEX_SHIFT;
4936 doorbell |= 1 << DB_DEF_PDU_NUM_POSTED_SHIFT;
1e4be6ff
JK
4937 iowrite32(doorbell, phba->db_va +
4938 beiscsi_conn->doorbell_offset);
09a1093a
JSJ
4939 return 0;
4940}
6733b39a 4941
6733b39a
JK
4942static int beiscsi_iotask(struct iscsi_task *task, struct scatterlist *sg,
4943 unsigned int num_sg, unsigned int xferlen,
4944 unsigned int writedir)
4945{
4946
4947 struct beiscsi_io_task *io_task = task->dd_data;
4948 struct iscsi_conn *conn = task->conn;
4949 struct beiscsi_conn *beiscsi_conn = conn->dd_data;
4950 struct beiscsi_hba *phba = beiscsi_conn->phba;
4951 struct iscsi_wrb *pwrb = NULL;
4952 unsigned int doorbell = 0;
4953
4954 pwrb = io_task->pwrb_handle->pwrb;
6733b39a
JK
4955 io_task->cmd_bhs->iscsi_hdr.exp_statsn = 0;
4956 io_task->bhs_len = sizeof(struct be_cmd_bhs);
4957
4958 if (writedir) {
32951dd8
JK
4959 AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb,
4960 INI_WR_CMD);
6733b39a 4961 AMAP_SET_BITS(struct amap_iscsi_wrb, dsp, pwrb, 1);
6733b39a 4962 } else {
32951dd8
JK
4963 AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb,
4964 INI_RD_CMD);
6733b39a
JK
4965 AMAP_SET_BITS(struct amap_iscsi_wrb, dsp, pwrb, 0);
4966 }
6733b39a 4967
09a1093a
JSJ
4968 io_task->wrb_type = AMAP_GET_BITS(struct amap_iscsi_wrb,
4969 type, pwrb);
4970
6733b39a 4971 AMAP_SET_BITS(struct amap_iscsi_wrb, lun, pwrb,
dc63aac6
JK
4972 cpu_to_be16(*(unsigned short *)
4973 &io_task->cmd_bhs->iscsi_hdr.lun));
6733b39a
JK
4974 AMAP_SET_BITS(struct amap_iscsi_wrb, r2t_exp_dtl, pwrb, xferlen);
4975 AMAP_SET_BITS(struct amap_iscsi_wrb, wrb_idx, pwrb,
4976 io_task->pwrb_handle->wrb_index);
4977 AMAP_SET_BITS(struct amap_iscsi_wrb, cmdsn_itt, pwrb,
4978 be32_to_cpu(task->cmdsn));
4979 AMAP_SET_BITS(struct amap_iscsi_wrb, sgl_icd_idx, pwrb,
4980 io_task->psgl_handle->sgl_index);
4981
4982 hwi_write_sgl(pwrb, sg, num_sg, io_task);
4983
4984 AMAP_SET_BITS(struct amap_iscsi_wrb, ptr2nextwrb, pwrb,
340c99e9
JSJ
4985 io_task->pwrb_handle->wrb_index);
4986 if (io_task->pwrb_context->plast_wrb)
4987 AMAP_SET_BITS(struct amap_iscsi_wrb, ptr2nextwrb,
4988 io_task->pwrb_context->plast_wrb,
4989 io_task->pwrb_handle->wrb_index);
4990 io_task->pwrb_context->plast_wrb = pwrb;
4991
6733b39a
JK
4992 be_dws_le_to_cpu(pwrb, sizeof(struct iscsi_wrb));
4993
4994 doorbell |= beiscsi_conn->beiscsi_conn_cid & DB_WRB_POST_CID_MASK;
32951dd8 4995 doorbell |= (io_task->pwrb_handle->wrb_index &
6733b39a
JK
4996 DB_DEF_PDU_WRB_INDEX_MASK) << DB_DEF_PDU_WRB_INDEX_SHIFT;
4997 doorbell |= 1 << DB_DEF_PDU_NUM_POSTED_SHIFT;
4998
1e4be6ff
JK
4999 iowrite32(doorbell, phba->db_va +
5000 beiscsi_conn->doorbell_offset);
6733b39a
JK
5001 return 0;
5002}
5003
5004static int beiscsi_mtask(struct iscsi_task *task)
5005{
dafab8e0 5006 struct beiscsi_io_task *io_task = task->dd_data;
6733b39a
JK
5007 struct iscsi_conn *conn = task->conn;
5008 struct beiscsi_conn *beiscsi_conn = conn->dd_data;
5009 struct beiscsi_hba *phba = beiscsi_conn->phba;
5010 struct iscsi_wrb *pwrb = NULL;
5011 unsigned int doorbell = 0;
dafab8e0 5012 unsigned int cid;
09a1093a 5013 unsigned int pwrb_typeoffset = 0;
6733b39a 5014
bfead3b2 5015 cid = beiscsi_conn->beiscsi_conn_cid;
6733b39a 5016 pwrb = io_task->pwrb_handle->pwrb;
caf818f1 5017 memset(pwrb, 0, sizeof(*pwrb));
09a1093a 5018
2c9dfd36 5019 if (is_chip_be2_be3r(phba)) {
09a1093a
JSJ
5020 AMAP_SET_BITS(struct amap_iscsi_wrb, cmdsn_itt, pwrb,
5021 be32_to_cpu(task->cmdsn));
5022 AMAP_SET_BITS(struct amap_iscsi_wrb, wrb_idx, pwrb,
5023 io_task->pwrb_handle->wrb_index);
5024 AMAP_SET_BITS(struct amap_iscsi_wrb, sgl_icd_idx, pwrb,
5025 io_task->psgl_handle->sgl_index);
5026 AMAP_SET_BITS(struct amap_iscsi_wrb, r2t_exp_dtl, pwrb,
5027 task->data_count);
5028 AMAP_SET_BITS(struct amap_iscsi_wrb, ptr2nextwrb, pwrb,
340c99e9
JSJ
5029 io_task->pwrb_handle->wrb_index);
5030 if (io_task->pwrb_context->plast_wrb)
5031 AMAP_SET_BITS(struct amap_iscsi_wrb, ptr2nextwrb,
5032 io_task->pwrb_context->plast_wrb,
5033 io_task->pwrb_handle->wrb_index);
5034 io_task->pwrb_context->plast_wrb = pwrb;
5035
09a1093a 5036 pwrb_typeoffset = BE_WRB_TYPE_OFFSET;
2c9dfd36
JK
5037 } else {
5038 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, cmdsn_itt, pwrb,
5039 be32_to_cpu(task->cmdsn));
5040 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, wrb_idx, pwrb,
5041 io_task->pwrb_handle->wrb_index);
5042 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, sgl_idx, pwrb,
5043 io_task->psgl_handle->sgl_index);
5044 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, r2t_exp_dtl, pwrb,
5045 task->data_count);
5046 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, ptr2nextwrb, pwrb,
340c99e9
JSJ
5047 io_task->pwrb_handle->wrb_index);
5048 if (io_task->pwrb_context->plast_wrb)
5049 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, ptr2nextwrb,
5050 io_task->pwrb_context->plast_wrb,
5051 io_task->pwrb_handle->wrb_index);
5052 io_task->pwrb_context->plast_wrb = pwrb;
5053
2c9dfd36 5054 pwrb_typeoffset = SKH_WRB_TYPE_OFFSET;
09a1093a
JSJ
5055 }
5056
dafab8e0 5057
6733b39a
JK
5058 switch (task->hdr->opcode & ISCSI_OPCODE_MASK) {
5059 case ISCSI_OP_LOGIN:
6733b39a 5060 AMAP_SET_BITS(struct amap_iscsi_wrb, cmdsn_itt, pwrb, 1);
09a1093a 5061 ADAPTER_SET_WRB_TYPE(pwrb, TGT_DM_CMD, pwrb_typeoffset);
6733b39a
JK
5062 hwi_write_buffer(pwrb, task);
5063 break;
5064 case ISCSI_OP_NOOP_OUT:
1390b01b 5065 if (task->hdr->ttt != ISCSI_RESERVED_TAG) {
09a1093a 5066 ADAPTER_SET_WRB_TYPE(pwrb, TGT_DM_CMD, pwrb_typeoffset);
2c9dfd36
JK
5067 if (is_chip_be2_be3r(phba))
5068 AMAP_SET_BITS(struct amap_iscsi_wrb,
09a1093a
JSJ
5069 dmsg, pwrb, 1);
5070 else
2c9dfd36 5071 AMAP_SET_BITS(struct amap_iscsi_wrb_v2,
09a1093a 5072 dmsg, pwrb, 1);
1390b01b 5073 } else {
09a1093a 5074 ADAPTER_SET_WRB_TYPE(pwrb, INI_RD_CMD, pwrb_typeoffset);
2c9dfd36
JK
5075 if (is_chip_be2_be3r(phba))
5076 AMAP_SET_BITS(struct amap_iscsi_wrb,
09a1093a
JSJ
5077 dmsg, pwrb, 0);
5078 else
2c9dfd36 5079 AMAP_SET_BITS(struct amap_iscsi_wrb_v2,
09a1093a 5080 dmsg, pwrb, 0);
1390b01b 5081 }
6733b39a
JK
5082 hwi_write_buffer(pwrb, task);
5083 break;
5084 case ISCSI_OP_TEXT:
09a1093a 5085 ADAPTER_SET_WRB_TYPE(pwrb, TGT_DM_CMD, pwrb_typeoffset);
6733b39a
JK
5086 hwi_write_buffer(pwrb, task);
5087 break;
5088 case ISCSI_OP_SCSI_TMFUNC:
09a1093a 5089 ADAPTER_SET_WRB_TYPE(pwrb, INI_TMF_CMD, pwrb_typeoffset);
6733b39a
JK
5090 hwi_write_buffer(pwrb, task);
5091 break;
5092 case ISCSI_OP_LOGOUT:
09a1093a 5093 ADAPTER_SET_WRB_TYPE(pwrb, HWH_TYPE_LOGOUT, pwrb_typeoffset);
6733b39a
JK
5094 hwi_write_buffer(pwrb, task);
5095 break;
5096
5097 default:
99bc5d55
JSJ
5098 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
5099 "BM_%d : opcode =%d Not supported\n",
5100 task->hdr->opcode & ISCSI_OPCODE_MASK);
5101
6733b39a
JK
5102 return -EINVAL;
5103 }
5104
09a1093a 5105 /* Set the task type */
2c9dfd36
JK
5106 io_task->wrb_type = (is_chip_be2_be3r(phba)) ?
5107 AMAP_GET_BITS(struct amap_iscsi_wrb, type, pwrb) :
5108 AMAP_GET_BITS(struct amap_iscsi_wrb_v2, type, pwrb);
6733b39a 5109
bfead3b2 5110 doorbell |= cid & DB_WRB_POST_CID_MASK;
32951dd8 5111 doorbell |= (io_task->pwrb_handle->wrb_index &
6733b39a
JK
5112 DB_DEF_PDU_WRB_INDEX_MASK) << DB_DEF_PDU_WRB_INDEX_SHIFT;
5113 doorbell |= 1 << DB_DEF_PDU_NUM_POSTED_SHIFT;
1e4be6ff
JK
5114 iowrite32(doorbell, phba->db_va +
5115 beiscsi_conn->doorbell_offset);
6733b39a
JK
5116 return 0;
5117}
5118
5119static int beiscsi_task_xmit(struct iscsi_task *task)
5120{
6733b39a
JK
5121 struct beiscsi_io_task *io_task = task->dd_data;
5122 struct scsi_cmnd *sc = task->sc;
09a1093a 5123 struct beiscsi_hba *phba = NULL;
6733b39a
JK
5124 struct scatterlist *sg;
5125 int num_sg;
5126 unsigned int writedir = 0, xferlen = 0;
5127
09a1093a
JSJ
5128 phba = ((struct beiscsi_conn *)task->conn->dd_data)->phba;
5129
6733b39a
JK
5130 if (!sc)
5131 return beiscsi_mtask(task);
5132
5133 io_task->scsi_cmnd = sc;
5134 num_sg = scsi_dma_map(sc);
5135 if (num_sg < 0) {
99bc5d55
JSJ
5136 struct iscsi_conn *conn = task->conn;
5137 struct beiscsi_hba *phba = NULL;
5138
5139 phba = ((struct beiscsi_conn *)conn->dd_data)->phba;
afb96058
JK
5140 beiscsi_log(phba, KERN_ERR,
5141 BEISCSI_LOG_IO | BEISCSI_LOG_ISCSI,
5142 "BM_%d : scsi_dma_map Failed "
5143 "Driver_ITT : 0x%x ITT : 0x%x Xferlen : 0x%x\n",
5144 be32_to_cpu(io_task->cmd_bhs->iscsi_hdr.itt),
5145 io_task->libiscsi_itt, scsi_bufflen(sc));
99bc5d55 5146
6733b39a
JK
5147 return num_sg;
5148 }
6733b39a
JK
5149 xferlen = scsi_bufflen(sc);
5150 sg = scsi_sglist(sc);
99bc5d55 5151 if (sc->sc_data_direction == DMA_TO_DEVICE)
6733b39a 5152 writedir = 1;
99bc5d55 5153 else
6733b39a 5154 writedir = 0;
99bc5d55 5155
09a1093a 5156 return phba->iotask_fn(task, sg, num_sg, xferlen, writedir);
6733b39a
JK
5157}
5158
ffce3e2e
JK
5159/**
5160 * beiscsi_bsg_request - handle bsg request from ISCSI transport
5161 * @job: job to handle
5162 */
5163static int beiscsi_bsg_request(struct bsg_job *job)
5164{
5165 struct Scsi_Host *shost;
5166 struct beiscsi_hba *phba;
5167 struct iscsi_bsg_request *bsg_req = job->request;
5168 int rc = -EINVAL;
5169 unsigned int tag;
5170 struct be_dma_mem nonemb_cmd;
5171 struct be_cmd_resp_hdr *resp;
5172 struct iscsi_bsg_reply *bsg_reply = job->reply;
5173 unsigned short status, extd_status;
5174
5175 shost = iscsi_job_to_shost(job);
5176 phba = iscsi_host_priv(shost);
5177
5178 switch (bsg_req->msgcode) {
5179 case ISCSI_BSG_HST_VENDOR:
5180 nonemb_cmd.va = pci_alloc_consistent(phba->ctrl.pdev,
5181 job->request_payload.payload_len,
5182 &nonemb_cmd.dma);
5183 if (nonemb_cmd.va == NULL) {
99bc5d55
JSJ
5184 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
5185 "BM_%d : Failed to allocate memory for "
5186 "beiscsi_bsg_request\n");
8359c79b 5187 return -ENOMEM;
ffce3e2e
JK
5188 }
5189 tag = mgmt_vendor_specific_fw_cmd(&phba->ctrl, phba, job,
5190 &nonemb_cmd);
5191 if (!tag) {
99bc5d55 5192 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
8359c79b 5193 "BM_%d : MBX Tag Allocation Failed\n");
99bc5d55 5194
ffce3e2e
JK
5195 pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size,
5196 nonemb_cmd.va, nonemb_cmd.dma);
5197 return -EAGAIN;
e175defe
JSJ
5198 }
5199
5200 rc = wait_event_interruptible_timeout(
5201 phba->ctrl.mcc_wait[tag],
5202 phba->ctrl.mcc_numtag[tag],
5203 msecs_to_jiffies(
5204 BEISCSI_HOST_MBX_TIMEOUT));
ffce3e2e
JK
5205 extd_status = (phba->ctrl.mcc_numtag[tag] & 0x0000FF00) >> 8;
5206 status = phba->ctrl.mcc_numtag[tag] & 0x000000FF;
5207 free_mcc_tag(&phba->ctrl, tag);
5208 resp = (struct be_cmd_resp_hdr *)nonemb_cmd.va;
5209 sg_copy_from_buffer(job->reply_payload.sg_list,
5210 job->reply_payload.sg_cnt,
5211 nonemb_cmd.va, (resp->response_length
5212 + sizeof(*resp)));
5213 bsg_reply->reply_payload_rcv_len = resp->response_length;
5214 bsg_reply->result = status;
5215 bsg_job_done(job, bsg_reply->result,
5216 bsg_reply->reply_payload_rcv_len);
5217 pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size,
5218 nonemb_cmd.va, nonemb_cmd.dma);
5219 if (status || extd_status) {
99bc5d55 5220 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
8359c79b 5221 "BM_%d : MBX Cmd Failed"
99bc5d55
JSJ
5222 " status = %d extd_status = %d\n",
5223 status, extd_status);
5224
ffce3e2e 5225 return -EIO;
8359c79b
JSJ
5226 } else {
5227 rc = 0;
ffce3e2e
JK
5228 }
5229 break;
5230
5231 default:
99bc5d55
JSJ
5232 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
5233 "BM_%d : Unsupported bsg command: 0x%x\n",
5234 bsg_req->msgcode);
ffce3e2e
JK
5235 break;
5236 }
5237
5238 return rc;
5239}
5240
99bc5d55
JSJ
5241void beiscsi_hba_attrs_init(struct beiscsi_hba *phba)
5242{
5243 /* Set the logging parameter */
5244 beiscsi_log_enable_init(phba, beiscsi_log_enable);
5245}
5246
4d4d1ef8
JSJ
5247/*
5248 * beiscsi_quiesce()- Cleanup Driver resources
5249 * @phba: Instance Priv structure
3567f36a 5250 * @unload_state:i Clean or EEH unload state
4d4d1ef8
JSJ
5251 *
5252 * Free the OS and HW resources held by the driver
5253 **/
3567f36a
JK
5254static void beiscsi_quiesce(struct beiscsi_hba *phba,
5255 uint32_t unload_state)
6733b39a 5256{
bfead3b2
JK
5257 struct hwi_controller *phwi_ctrlr;
5258 struct hwi_context_memory *phwi_context;
5259 struct be_eq_obj *pbe_eq;
5260 unsigned int i, msix_vec;
6733b39a 5261
bfead3b2
JK
5262 phwi_ctrlr = phba->phwi_ctrlr;
5263 phwi_context = phwi_ctrlr->phwi_ctxt;
6733b39a 5264 hwi_disable_intr(phba);
bfead3b2
JK
5265 if (phba->msix_enabled) {
5266 for (i = 0; i <= phba->num_cpus; i++) {
5267 msix_vec = phba->msix_entries[i].vector;
3567f36a 5268 synchronize_irq(msix_vec);
bfead3b2 5269 free_irq(msix_vec, &phwi_context->be_eq[i]);
8fcfb210 5270 kfree(phba->msi_name[i]);
bfead3b2
JK
5271 }
5272 } else
3567f36a
JK
5273 if (phba->pcidev->irq) {
5274 synchronize_irq(phba->pcidev->irq);
bfead3b2 5275 free_irq(phba->pcidev->irq, phba);
3567f36a 5276 }
bfead3b2 5277 pci_disable_msix(phba->pcidev);
53281edb 5278 cancel_delayed_work_sync(&phba->beiscsi_hw_check_task);
3567f36a 5279
89f8b33c
JA
5280 for (i = 0; i < phba->num_cpus; i++) {
5281 pbe_eq = &phwi_context->be_eq[i];
511cbce2 5282 irq_poll_disable(&pbe_eq->iopoll);
89f8b33c 5283 }
6733b39a 5284
3567f36a
JK
5285 if (unload_state == BEISCSI_CLEAN_UNLOAD) {
5286 destroy_workqueue(phba->wq);
5287 beiscsi_clean_port(phba);
5288 beiscsi_free_mem(phba);
e9b91193 5289
3567f36a
JK
5290 beiscsi_unmap_pci_function(phba);
5291 pci_free_consistent(phba->pcidev,
5292 phba->ctrl.mbox_mem_alloced.size,
5293 phba->ctrl.mbox_mem_alloced.va,
5294 phba->ctrl.mbox_mem_alloced.dma);
5295 } else {
5296 hwi_purge_eq(phba);
5297 hwi_cleanup(phba);
5298 }
7a158003 5299
25602c97
JK
5300}
5301
5302static void beiscsi_remove(struct pci_dev *pcidev)
5303{
25602c97
JK
5304 struct beiscsi_hba *phba = NULL;
5305
5306 phba = pci_get_drvdata(pcidev);
5307 if (!phba) {
5308 dev_err(&pcidev->dev, "beiscsi_remove called with no phba\n");
5309 return;
5310 }
5311
0e43895e 5312 beiscsi_destroy_def_ifaces(phba);
9d045163 5313 iscsi_boot_destroy_kset(phba->boot_kset);
6733b39a 5314 iscsi_host_remove(phba->shost);
cdaa4ded 5315 beiscsi_quiesce(phba, BEISCSI_CLEAN_UNLOAD);
6733b39a
JK
5316 pci_dev_put(phba->pcidev);
5317 iscsi_host_free(phba->shost);
3567f36a
JK
5318 pci_disable_pcie_error_reporting(pcidev);
5319 pci_set_drvdata(pcidev, NULL);
e307f3ac 5320 pci_release_regions(pcidev);
8dce69ff 5321 pci_disable_device(pcidev);
6733b39a
JK
5322}
5323
bfead3b2
JK
5324static void beiscsi_msix_enable(struct beiscsi_hba *phba)
5325{
5326 int i, status;
5327
5328 for (i = 0; i <= phba->num_cpus; i++)
5329 phba->msix_entries[i].entry = i;
5330
e149fc13
AG
5331 status = pci_enable_msix_range(phba->pcidev, phba->msix_entries,
5332 phba->num_cpus + 1, phba->num_cpus + 1);
5333 if (status > 0)
bfead3b2
JK
5334 phba->msix_enabled = true;
5335
5336 return;
5337}
5338
73af08e1
JK
5339static void be_eqd_update(struct beiscsi_hba *phba)
5340{
5341 struct be_set_eqd set_eqd[MAX_CPUS];
5342 struct be_aic_obj *aic;
5343 struct be_eq_obj *pbe_eq;
5344 struct hwi_controller *phwi_ctrlr;
5345 struct hwi_context_memory *phwi_context;
5346 int eqd, i, num = 0;
5347 ulong now;
5348 u32 pps, delta;
5349 unsigned int tag;
5350
5351 phwi_ctrlr = phba->phwi_ctrlr;
5352 phwi_context = phwi_ctrlr->phwi_ctxt;
5353
5354 for (i = 0; i <= phba->num_cpus; i++) {
5355 aic = &phba->aic_obj[i];
5356 pbe_eq = &phwi_context->be_eq[i];
5357 now = jiffies;
5358 if (!aic->jiffs || time_before(now, aic->jiffs) ||
5359 pbe_eq->cq_count < aic->eq_prev) {
5360 aic->jiffs = now;
5361 aic->eq_prev = pbe_eq->cq_count;
5362 continue;
5363 }
5364 delta = jiffies_to_msecs(now - aic->jiffs);
5365 pps = (((u32)(pbe_eq->cq_count - aic->eq_prev) * 1000) / delta);
5366 eqd = (pps / 1500) << 2;
5367
5368 if (eqd < 8)
5369 eqd = 0;
5370 eqd = min_t(u32, eqd, phwi_context->max_eqd);
5371 eqd = max_t(u32, eqd, phwi_context->min_eqd);
5372
5373 aic->jiffs = now;
5374 aic->eq_prev = pbe_eq->cq_count;
5375
5376 if (eqd != aic->prev_eqd) {
5377 set_eqd[num].delay_multiplier = (eqd * 65)/100;
5378 set_eqd[num].eq_id = pbe_eq->q.id;
5379 aic->prev_eqd = eqd;
5380 num++;
5381 }
5382 }
5383 if (num) {
5384 tag = be_cmd_modify_eq_delay(phba, set_eqd, num);
5385 if (tag)
5386 beiscsi_mccq_compl(phba, tag, NULL, NULL);
5387 }
5388}
5389
a3d313ea
JK
5390static void be_check_boot_session(struct beiscsi_hba *phba)
5391{
5392 if (beiscsi_setup_boot_info(phba))
5393 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
5394 "BM_%d : Could not set up "
5395 "iSCSI boot info on async event.\n");
5396}
5397
7a158003
JSJ
5398/*
5399 * beiscsi_hw_health_check()- Check adapter health
5400 * @work: work item to check HW health
5401 *
5402 * Check if adapter in an unrecoverable state or not.
5403 **/
5404static void
5405beiscsi_hw_health_check(struct work_struct *work)
5406{
5407 struct beiscsi_hba *phba =
5408 container_of(work, struct beiscsi_hba,
5409 beiscsi_hw_check_task.work);
5410
73af08e1
JK
5411 be_eqd_update(phba);
5412
a3d313ea 5413 if (phba->state & BE_ADAPTER_CHECK_BOOT) {
3efde862
JSJ
5414 if ((phba->get_boot > 0) && (!phba->boot_kset)) {
5415 phba->get_boot--;
5416 if (!(phba->get_boot % BE_GET_BOOT_TO))
5417 be_check_boot_session(phba);
5418 } else {
5419 phba->state &= ~BE_ADAPTER_CHECK_BOOT;
5420 phba->get_boot = 0;
5421 }
a3d313ea
JK
5422 }
5423
7a158003
JSJ
5424 beiscsi_ue_detect(phba);
5425
5426 schedule_delayed_work(&phba->beiscsi_hw_check_task,
5427 msecs_to_jiffies(1000));
5428}
5429
3567f36a
JK
5430
5431static pci_ers_result_t beiscsi_eeh_err_detected(struct pci_dev *pdev,
5432 pci_channel_state_t state)
5433{
5434 struct beiscsi_hba *phba = NULL;
5435
5436 phba = (struct beiscsi_hba *)pci_get_drvdata(pdev);
5437 phba->state |= BE_ADAPTER_PCI_ERR;
5438
5439 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
5440 "BM_%d : EEH error detected\n");
5441
5442 beiscsi_quiesce(phba, BEISCSI_EEH_UNLOAD);
5443
5444 if (state == pci_channel_io_perm_failure) {
5445 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
5446 "BM_%d : EEH : State PERM Failure");
5447 return PCI_ERS_RESULT_DISCONNECT;
5448 }
5449
5450 pci_disable_device(pdev);
5451
5452 /* The error could cause the FW to trigger a flash debug dump.
5453 * Resetting the card while flash dump is in progress
5454 * can cause it not to recover; wait for it to finish.
5455 * Wait only for first function as it is needed only once per
5456 * adapter.
5457 **/
5458 if (pdev->devfn == 0)
5459 ssleep(30);
5460
5461 return PCI_ERS_RESULT_NEED_RESET;
5462}
5463
5464static pci_ers_result_t beiscsi_eeh_reset(struct pci_dev *pdev)
5465{
5466 struct beiscsi_hba *phba = NULL;
5467 int status = 0;
5468
5469 phba = (struct beiscsi_hba *)pci_get_drvdata(pdev);
5470
5471 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
5472 "BM_%d : EEH Reset\n");
5473
5474 status = pci_enable_device(pdev);
5475 if (status)
5476 return PCI_ERS_RESULT_DISCONNECT;
5477
5478 pci_set_master(pdev);
5479 pci_set_power_state(pdev, PCI_D0);
5480 pci_restore_state(pdev);
5481
5482 /* Wait for the CHIP Reset to complete */
5483 status = be_chk_reset_complete(phba);
5484 if (!status) {
5485 beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_INIT,
5486 "BM_%d : EEH Reset Completed\n");
5487 } else {
5488 beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_INIT,
5489 "BM_%d : EEH Reset Completion Failure\n");
5490 return PCI_ERS_RESULT_DISCONNECT;
5491 }
5492
5493 pci_cleanup_aer_uncorrect_error_status(pdev);
5494 return PCI_ERS_RESULT_RECOVERED;
5495}
5496
5497static void beiscsi_eeh_resume(struct pci_dev *pdev)
5498{
5499 int ret = 0, i;
5500 struct be_eq_obj *pbe_eq;
5501 struct beiscsi_hba *phba = NULL;
5502 struct hwi_controller *phwi_ctrlr;
5503 struct hwi_context_memory *phwi_context;
5504
5505 phba = (struct beiscsi_hba *)pci_get_drvdata(pdev);
5506 pci_save_state(pdev);
5507
5508 if (enable_msix)
5509 find_num_cpus(phba);
5510 else
5511 phba->num_cpus = 1;
5512
5513 if (enable_msix) {
5514 beiscsi_msix_enable(phba);
5515 if (!phba->msix_enabled)
5516 phba->num_cpus = 1;
5517 }
5518
5519 ret = beiscsi_cmd_reset_function(phba);
5520 if (ret) {
5521 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
5522 "BM_%d : Reset Failed\n");
5523 goto ret_err;
5524 }
5525
5526 ret = be_chk_reset_complete(phba);
5527 if (ret) {
5528 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
5529 "BM_%d : Failed to get out of reset.\n");
5530 goto ret_err;
5531 }
5532
5533 beiscsi_get_params(phba);
5534 phba->shost->max_id = phba->params.cxns_per_ctrl;
5535 phba->shost->can_queue = phba->params.ios_per_ctrl;
5536 ret = hwi_init_controller(phba);
5537
5538 for (i = 0; i < MAX_MCC_CMD; i++) {
5539 init_waitqueue_head(&phba->ctrl.mcc_wait[i + 1]);
5540 phba->ctrl.mcc_tag[i] = i + 1;
5541 phba->ctrl.mcc_numtag[i + 1] = 0;
5542 phba->ctrl.mcc_tag_available++;
5543 }
5544
5545 phwi_ctrlr = phba->phwi_ctrlr;
5546 phwi_context = phwi_ctrlr->phwi_ctxt;
5547
89f8b33c 5548 for (i = 0; i < phba->num_cpus; i++) {
3567f36a 5549 pbe_eq = &phwi_context->be_eq[i];
511cbce2 5550 irq_poll_init(&pbe_eq->iopoll, be_iopoll_budget,
89f8b33c 5551 be_iopoll);
3567f36a
JK
5552 }
5553
89f8b33c
JA
5554 i = (phba->msix_enabled) ? i : 0;
5555 /* Work item for MCC handling */
5556 pbe_eq = &phwi_context->be_eq[i];
5557 INIT_WORK(&pbe_eq->work_cqs, beiscsi_process_all_cqs);
5558
3567f36a
JK
5559 ret = beiscsi_init_irqs(phba);
5560 if (ret < 0) {
5561 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
5562 "BM_%d : beiscsi_eeh_resume - "
5563 "Failed to beiscsi_init_irqs\n");
5564 goto ret_err;
5565 }
5566
5567 hwi_enable_intr(phba);
5568 phba->state &= ~BE_ADAPTER_PCI_ERR;
5569
5570 return;
5571ret_err:
5572 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
5573 "BM_%d : AER EEH Resume Failed\n");
5574}
5575
6f039790
GKH
5576static int beiscsi_dev_probe(struct pci_dev *pcidev,
5577 const struct pci_device_id *id)
6733b39a
JK
5578{
5579 struct beiscsi_hba *phba = NULL;
bfead3b2
JK
5580 struct hwi_controller *phwi_ctrlr;
5581 struct hwi_context_memory *phwi_context;
5582 struct be_eq_obj *pbe_eq;
3567f36a 5583 int ret = 0, i;
6733b39a
JK
5584
5585 ret = beiscsi_enable_pci(pcidev);
5586 if (ret < 0) {
99bc5d55
JSJ
5587 dev_err(&pcidev->dev,
5588 "beiscsi_dev_probe - Failed to enable pci device\n");
6733b39a
JK
5589 return ret;
5590 }
5591
5592 phba = beiscsi_hba_alloc(pcidev);
5593 if (!phba) {
99bc5d55
JSJ
5594 dev_err(&pcidev->dev,
5595 "beiscsi_dev_probe - Failed in beiscsi_hba_alloc\n");
6733b39a
JK
5596 goto disable_pci;
5597 }
5598
3567f36a
JK
5599 /* Enable EEH reporting */
5600 ret = pci_enable_pcie_error_reporting(pcidev);
5601 if (ret)
5602 beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_INIT,
5603 "BM_%d : PCIe Error Reporting "
5604 "Enabling Failed\n");
5605
5606 pci_save_state(pcidev);
5607
99bc5d55
JSJ
5608 /* Initialize Driver configuration Paramters */
5609 beiscsi_hba_attrs_init(phba);
5610
e175defe 5611 phba->fw_timeout = false;
6c83185a 5612 phba->mac_addr_set = false;
e175defe
JSJ
5613
5614
f98c96b0
JK
5615 switch (pcidev->device) {
5616 case BE_DEVICE_ID1:
5617 case OC_DEVICE_ID1:
5618 case OC_DEVICE_ID2:
5619 phba->generation = BE_GEN2;
09a1093a 5620 phba->iotask_fn = beiscsi_iotask;
f98c96b0
JK
5621 break;
5622 case BE_DEVICE_ID2:
5623 case OC_DEVICE_ID3:
5624 phba->generation = BE_GEN3;
09a1093a 5625 phba->iotask_fn = beiscsi_iotask;
f98c96b0 5626 break;
139a1b1e
JSJ
5627 case OC_SKH_ID1:
5628 phba->generation = BE_GEN4;
09a1093a 5629 phba->iotask_fn = beiscsi_iotask_v2;
bf9131cb 5630 break;
f98c96b0
JK
5631 default:
5632 phba->generation = 0;
5633 }
5634
6733b39a
JK
5635 ret = be_ctrl_init(phba, pcidev);
5636 if (ret) {
99bc5d55
JSJ
5637 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
5638 "BM_%d : beiscsi_dev_probe-"
5639 "Failed in be_ctrl_init\n");
6733b39a
JK
5640 goto hba_free;
5641 }
5642
cdaa4ded
JB
5643 /*
5644 * FUNCTION_RESET should clean up any stale info in FW for this fn
5645 */
4d4d1ef8
JSJ
5646 ret = beiscsi_cmd_reset_function(phba);
5647 if (ret) {
5648 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
92665a66 5649 "BM_%d : Reset Failed\n");
4d4d1ef8
JSJ
5650 goto hba_free;
5651 }
5652 ret = be_chk_reset_complete(phba);
5653 if (ret) {
5654 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
92665a66 5655 "BM_%d : Failed to get out of reset.\n");
4d4d1ef8 5656 goto hba_free;
e9b91193
JK
5657 }
5658
6733b39a
JK
5659 spin_lock_init(&phba->io_sgl_lock);
5660 spin_lock_init(&phba->mgmt_sgl_lock);
5661 spin_lock_init(&phba->isr_lock);
8f09a3b9 5662 spin_lock_init(&phba->async_pdu_lock);
7da50879
JK
5663 ret = mgmt_get_fw_config(&phba->ctrl, phba);
5664 if (ret != 0) {
99bc5d55
JSJ
5665 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
5666 "BM_%d : Error getting fw config\n");
7da50879
JK
5667 goto free_port;
5668 }
53aefe25 5669 mgmt_get_port_name(&phba->ctrl, phba);
68c26a3a
JK
5670
5671 if (enable_msix)
5672 find_num_cpus(phba);
5673 else
5674 phba->num_cpus = 1;
5675
5676 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
5677 "BM_%d : num_cpus = %d\n",
5678 phba->num_cpus);
5679
5680 if (enable_msix) {
5681 beiscsi_msix_enable(phba);
5682 if (!phba->msix_enabled)
5683 phba->num_cpus = 1;
5684 }
5685
843ae752 5686 phba->shost->max_id = phba->params.cxns_per_ctrl;
6733b39a 5687 beiscsi_get_params(phba);
aa874f07 5688 phba->shost->can_queue = phba->params.ios_per_ctrl;
6733b39a
JK
5689 ret = beiscsi_init_port(phba);
5690 if (ret < 0) {
99bc5d55
JSJ
5691 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
5692 "BM_%d : beiscsi_dev_probe-"
5693 "Failed in beiscsi_init_port\n");
6733b39a
JK
5694 goto free_port;
5695 }
5696
3567f36a 5697 for (i = 0; i < MAX_MCC_CMD; i++) {
756d29c8
JK
5698 init_waitqueue_head(&phba->ctrl.mcc_wait[i + 1]);
5699 phba->ctrl.mcc_tag[i] = i + 1;
5700 phba->ctrl.mcc_numtag[i + 1] = 0;
5701 phba->ctrl.mcc_tag_available++;
1957aa7f 5702 memset(&phba->ctrl.ptag_state[i].tag_mem_state, 0,
8fc01eaa 5703 sizeof(struct be_dma_mem));
756d29c8
JK
5704 }
5705
5706 phba->ctrl.mcc_alloc_index = phba->ctrl.mcc_free_index = 0;
5707
72fb46a9 5708 snprintf(phba->wq_name, sizeof(phba->wq_name), "beiscsi_%02x_wq",
6733b39a 5709 phba->shost->host_no);
d8537548 5710 phba->wq = alloc_workqueue("%s", WQ_MEM_RECLAIM, 1, phba->wq_name);
6733b39a 5711 if (!phba->wq) {
99bc5d55
JSJ
5712 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
5713 "BM_%d : beiscsi_dev_probe-"
5714 "Failed to allocate work queue\n");
6733b39a
JK
5715 goto free_twq;
5716 }
5717
7a158003
JSJ
5718 INIT_DELAYED_WORK(&phba->beiscsi_hw_check_task,
5719 beiscsi_hw_health_check);
6733b39a 5720
bfead3b2
JK
5721 phwi_ctrlr = phba->phwi_ctrlr;
5722 phwi_context = phwi_ctrlr->phwi_ctxt;
72fb46a9 5723
89f8b33c 5724 for (i = 0; i < phba->num_cpus; i++) {
72fb46a9 5725 pbe_eq = &phwi_context->be_eq[i];
511cbce2 5726 irq_poll_init(&pbe_eq->iopoll, be_iopoll_budget,
89f8b33c 5727 be_iopoll);
6733b39a 5728 }
72fb46a9 5729
89f8b33c
JA
5730 i = (phba->msix_enabled) ? i : 0;
5731 /* Work item for MCC handling */
5732 pbe_eq = &phwi_context->be_eq[i];
5733 INIT_WORK(&pbe_eq->work_cqs, beiscsi_process_all_cqs);
5734
6733b39a
JK
5735 ret = beiscsi_init_irqs(phba);
5736 if (ret < 0) {
99bc5d55
JSJ
5737 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
5738 "BM_%d : beiscsi_dev_probe-"
5739 "Failed to beiscsi_init_irqs\n");
6733b39a
JK
5740 goto free_blkenbld;
5741 }
238f6b72 5742 hwi_enable_intr(phba);
f457a46f 5743
0598b8af
JK
5744 if (iscsi_host_add(phba->shost, &phba->pcidev->dev))
5745 goto free_blkenbld;
5746
f457a46f
MC
5747 if (beiscsi_setup_boot_info(phba))
5748 /*
5749 * log error but continue, because we may not be using
5750 * iscsi boot.
5751 */
99bc5d55
JSJ
5752 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
5753 "BM_%d : Could not set up "
5754 "iSCSI boot info.\n");
f457a46f 5755
0e43895e 5756 beiscsi_create_def_ifaces(phba);
7a158003
JSJ
5757 schedule_delayed_work(&phba->beiscsi_hw_check_task,
5758 msecs_to_jiffies(1000));
5759
99bc5d55
JSJ
5760 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
5761 "\n\n\n BM_%d : SUCCESS - DRIVER LOADED\n\n\n");
6733b39a
JK
5762 return 0;
5763
6733b39a
JK
5764free_blkenbld:
5765 destroy_workqueue(phba->wq);
89f8b33c
JA
5766 for (i = 0; i < phba->num_cpus; i++) {
5767 pbe_eq = &phwi_context->be_eq[i];
511cbce2 5768 irq_poll_disable(&pbe_eq->iopoll);
89f8b33c 5769 }
6733b39a
JK
5770free_twq:
5771 beiscsi_clean_port(phba);
5772 beiscsi_free_mem(phba);
5773free_port:
5774 pci_free_consistent(phba->pcidev,
5775 phba->ctrl.mbox_mem_alloced.size,
5776 phba->ctrl.mbox_mem_alloced.va,
5777 phba->ctrl.mbox_mem_alloced.dma);
5778 beiscsi_unmap_pci_function(phba);
5779hba_free:
238f6b72
JK
5780 if (phba->msix_enabled)
5781 pci_disable_msix(phba->pcidev);
6733b39a
JK
5782 pci_dev_put(phba->pcidev);
5783 iscsi_host_free(phba->shost);
2e7cee02 5784 pci_set_drvdata(pcidev, NULL);
6733b39a 5785disable_pci:
e307f3ac 5786 pci_release_regions(pcidev);
6733b39a
JK
5787 pci_disable_device(pcidev);
5788 return ret;
5789}
5790
3567f36a
JK
5791static struct pci_error_handlers beiscsi_eeh_handlers = {
5792 .error_detected = beiscsi_eeh_err_detected,
5793 .slot_reset = beiscsi_eeh_reset,
5794 .resume = beiscsi_eeh_resume,
5795};
5796
6733b39a
JK
5797struct iscsi_transport beiscsi_iscsi_transport = {
5798 .owner = THIS_MODULE,
5799 .name = DRV_NAME,
9db0fb3a 5800 .caps = CAP_RECOVERY_L0 | CAP_HDRDGST | CAP_TEXT_NEGO |
6733b39a 5801 CAP_MULTI_R2T | CAP_DATADGST | CAP_DATA_PATH_OFFLOAD,
6733b39a
JK
5802 .create_session = beiscsi_session_create,
5803 .destroy_session = beiscsi_session_destroy,
5804 .create_conn = beiscsi_conn_create,
5805 .bind_conn = beiscsi_conn_bind,
5806 .destroy_conn = iscsi_conn_teardown,
3128c6c7 5807 .attr_is_visible = be2iscsi_attr_is_visible,
0e43895e
MC
5808 .set_iface_param = be2iscsi_iface_set_param,
5809 .get_iface_param = be2iscsi_iface_get_param,
6733b39a 5810 .set_param = beiscsi_set_param,
c7f7fd5b 5811 .get_conn_param = iscsi_conn_get_param,
6733b39a
JK
5812 .get_session_param = iscsi_session_get_param,
5813 .get_host_param = beiscsi_get_host_param,
5814 .start_conn = beiscsi_conn_start,
fa95d206 5815 .stop_conn = iscsi_conn_stop,
6733b39a
JK
5816 .send_pdu = iscsi_conn_send_pdu,
5817 .xmit_task = beiscsi_task_xmit,
5818 .cleanup_task = beiscsi_cleanup_task,
5819 .alloc_pdu = beiscsi_alloc_pdu,
5820 .parse_pdu_itt = beiscsi_parse_pdu,
5821 .get_stats = beiscsi_conn_get_stats,
c7f7fd5b 5822 .get_ep_param = beiscsi_ep_get_param,
6733b39a
JK
5823 .ep_connect = beiscsi_ep_connect,
5824 .ep_poll = beiscsi_ep_poll,
5825 .ep_disconnect = beiscsi_ep_disconnect,
5826 .session_recovery_timedout = iscsi_session_recovery_timedout,
ffce3e2e 5827 .bsg_request = beiscsi_bsg_request,
6733b39a
JK
5828};
5829
5830static struct pci_driver beiscsi_pci_driver = {
5831 .name = DRV_NAME,
5832 .probe = beiscsi_dev_probe,
5833 .remove = beiscsi_remove,
3567f36a
JK
5834 .id_table = beiscsi_pci_id_table,
5835 .err_handler = &beiscsi_eeh_handlers
6733b39a
JK
5836};
5837
bfead3b2 5838
6733b39a
JK
5839static int __init beiscsi_module_init(void)
5840{
5841 int ret;
5842
5843 beiscsi_scsi_transport =
5844 iscsi_register_transport(&beiscsi_iscsi_transport);
5845 if (!beiscsi_scsi_transport) {
99bc5d55
JSJ
5846 printk(KERN_ERR
5847 "beiscsi_module_init - Unable to register beiscsi transport.\n");
f55a24f2 5848 return -ENOMEM;
6733b39a 5849 }
99bc5d55
JSJ
5850 printk(KERN_INFO "In beiscsi_module_init, tt=%p\n",
5851 &beiscsi_iscsi_transport);
6733b39a
JK
5852
5853 ret = pci_register_driver(&beiscsi_pci_driver);
5854 if (ret) {
99bc5d55
JSJ
5855 printk(KERN_ERR
5856 "beiscsi_module_init - Unable to register beiscsi pci driver.\n");
6733b39a
JK
5857 goto unregister_iscsi_transport;
5858 }
5859 return 0;
5860
5861unregister_iscsi_transport:
5862 iscsi_unregister_transport(&beiscsi_iscsi_transport);
5863 return ret;
5864}
5865
5866static void __exit beiscsi_module_exit(void)
5867{
5868 pci_unregister_driver(&beiscsi_pci_driver);
5869 iscsi_unregister_transport(&beiscsi_iscsi_transport);
5870}
5871
5872module_init(beiscsi_module_init);
5873module_exit(beiscsi_module_exit);