]>
Commit | Line | Data |
---|---|---|
ace7f46b MR |
1 | /* |
2 | * QLogic iSCSI Offload Driver | |
3 | * Copyright (c) 2016 Cavium Inc. | |
4 | * | |
5 | * This software is available under the terms of the GNU General Public License | |
6 | * (GPL) Version 2, available from the file COPYING in the main directory of | |
7 | * this source tree. | |
8 | */ | |
9 | ||
10 | #include <linux/module.h> | |
11 | #include <linux/pci.h> | |
12 | #include <linux/kernel.h> | |
13 | #include <linux/if_arp.h> | |
14 | #include <scsi/iscsi_if.h> | |
15 | #include <linux/inet.h> | |
16 | #include <net/arp.h> | |
17 | #include <linux/list.h> | |
18 | #include <linux/kthread.h> | |
19 | #include <linux/mm.h> | |
20 | #include <linux/if_vlan.h> | |
21 | #include <linux/cpu.h> | |
22 | ||
23 | #include <scsi/scsi_cmnd.h> | |
24 | #include <scsi/scsi_device.h> | |
25 | #include <scsi/scsi_eh.h> | |
26 | #include <scsi/scsi_host.h> | |
27 | #include <scsi/scsi.h> | |
28 | ||
29 | #include "qedi.h" | |
30 | #include "qedi_gbl.h" | |
31 | #include "qedi_iscsi.h" | |
32 | ||
33 | static uint qedi_fw_debug; | |
34 | module_param(qedi_fw_debug, uint, 0644); | |
35 | MODULE_PARM_DESC(qedi_fw_debug, " Firmware debug level 0(default) to 3"); | |
36 | ||
37 | uint qedi_dbg_log = QEDI_LOG_WARN | QEDI_LOG_SCSI_TM; | |
38 | module_param(qedi_dbg_log, uint, 0644); | |
39 | MODULE_PARM_DESC(qedi_dbg_log, " Default debug level"); | |
40 | ||
41 | uint qedi_io_tracing; | |
42 | module_param(qedi_io_tracing, uint, 0644); | |
43 | MODULE_PARM_DESC(qedi_io_tracing, | |
44 | " Enable logging of SCSI requests/completions into trace buffer. (default off)."); | |
45 | ||
46 | const struct qed_iscsi_ops *qedi_ops; | |
47 | static struct scsi_transport_template *qedi_scsi_transport; | |
48 | static struct pci_driver qedi_pci_driver; | |
49 | static DEFINE_PER_CPU(struct qedi_percpu_s, qedi_percpu); | |
50 | static LIST_HEAD(qedi_udev_list); | |
51 | /* Static function declaration */ | |
52 | static int qedi_alloc_global_queues(struct qedi_ctx *qedi); | |
53 | static void qedi_free_global_queues(struct qedi_ctx *qedi); | |
54 | static struct qedi_cmd *qedi_get_cmd_from_tid(struct qedi_ctx *qedi, u32 tid); | |
55 | static void qedi_reset_uio_rings(struct qedi_uio_dev *udev); | |
56 | static void qedi_ll2_free_skbs(struct qedi_ctx *qedi); | |
57 | ||
58 | static int qedi_iscsi_event_cb(void *context, u8 fw_event_code, void *fw_handle) | |
59 | { | |
60 | struct qedi_ctx *qedi; | |
61 | struct qedi_endpoint *qedi_ep; | |
62 | struct async_data *data; | |
63 | int rval = 0; | |
64 | ||
65 | if (!context || !fw_handle) { | |
66 | QEDI_ERR(NULL, "Recv event with ctx NULL\n"); | |
67 | return -EINVAL; | |
68 | } | |
69 | ||
70 | qedi = (struct qedi_ctx *)context; | |
71 | QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO, | |
72 | "Recv Event %d fw_handle %p\n", fw_event_code, fw_handle); | |
73 | ||
74 | data = (struct async_data *)fw_handle; | |
75 | QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO, | |
76 | "cid=0x%x tid=0x%x err-code=0x%x fw-dbg-param=0x%x\n", | |
77 | data->cid, data->itid, data->error_code, | |
78 | data->fw_debug_param); | |
79 | ||
80 | qedi_ep = qedi->ep_tbl[data->cid]; | |
81 | ||
82 | if (!qedi_ep) { | |
83 | QEDI_WARN(&qedi->dbg_ctx, | |
84 | "Cannot process event, ep already disconnected, cid=0x%x\n", | |
85 | data->cid); | |
86 | WARN_ON(1); | |
87 | return -ENODEV; | |
88 | } | |
89 | ||
90 | switch (fw_event_code) { | |
91 | case ISCSI_EVENT_TYPE_ASYN_CONNECT_COMPLETE: | |
92 | if (qedi_ep->state == EP_STATE_OFLDCONN_START) | |
93 | qedi_ep->state = EP_STATE_OFLDCONN_COMPL; | |
94 | ||
95 | wake_up_interruptible(&qedi_ep->tcp_ofld_wait); | |
96 | break; | |
97 | case ISCSI_EVENT_TYPE_ASYN_TERMINATE_DONE: | |
98 | qedi_ep->state = EP_STATE_DISCONN_COMPL; | |
99 | wake_up_interruptible(&qedi_ep->tcp_ofld_wait); | |
100 | break; | |
101 | case ISCSI_EVENT_TYPE_ISCSI_CONN_ERROR: | |
102 | qedi_process_iscsi_error(qedi_ep, data); | |
103 | break; | |
104 | case ISCSI_EVENT_TYPE_ASYN_ABORT_RCVD: | |
105 | case ISCSI_EVENT_TYPE_ASYN_SYN_RCVD: | |
106 | case ISCSI_EVENT_TYPE_ASYN_MAX_RT_TIME: | |
107 | case ISCSI_EVENT_TYPE_ASYN_MAX_RT_CNT: | |
108 | case ISCSI_EVENT_TYPE_ASYN_MAX_KA_PROBES_CNT: | |
109 | case ISCSI_EVENT_TYPE_ASYN_FIN_WAIT2: | |
110 | case ISCSI_EVENT_TYPE_TCP_CONN_ERROR: | |
111 | qedi_process_tcp_error(qedi_ep, data); | |
112 | break; | |
113 | default: | |
114 | QEDI_ERR(&qedi->dbg_ctx, "Recv Unknown Event %u\n", | |
115 | fw_event_code); | |
116 | } | |
117 | ||
118 | return rval; | |
119 | } | |
120 | ||
121 | static int qedi_uio_open(struct uio_info *uinfo, struct inode *inode) | |
122 | { | |
123 | struct qedi_uio_dev *udev = uinfo->priv; | |
124 | struct qedi_ctx *qedi = udev->qedi; | |
125 | ||
126 | if (!capable(CAP_NET_ADMIN)) | |
127 | return -EPERM; | |
128 | ||
129 | if (udev->uio_dev != -1) | |
130 | return -EBUSY; | |
131 | ||
132 | rtnl_lock(); | |
133 | udev->uio_dev = iminor(inode); | |
134 | qedi_reset_uio_rings(udev); | |
135 | set_bit(UIO_DEV_OPENED, &qedi->flags); | |
136 | rtnl_unlock(); | |
137 | ||
138 | return 0; | |
139 | } | |
140 | ||
141 | static int qedi_uio_close(struct uio_info *uinfo, struct inode *inode) | |
142 | { | |
143 | struct qedi_uio_dev *udev = uinfo->priv; | |
144 | struct qedi_ctx *qedi = udev->qedi; | |
145 | ||
146 | udev->uio_dev = -1; | |
147 | clear_bit(UIO_DEV_OPENED, &qedi->flags); | |
148 | qedi_ll2_free_skbs(qedi); | |
149 | return 0; | |
150 | } | |
151 | ||
152 | static void __qedi_free_uio_rings(struct qedi_uio_dev *udev) | |
153 | { | |
5e901d0b AE |
154 | if (udev->uctrl) { |
155 | free_page((unsigned long)udev->uctrl); | |
156 | udev->uctrl = NULL; | |
157 | } | |
158 | ||
ace7f46b MR |
159 | if (udev->ll2_ring) { |
160 | free_page((unsigned long)udev->ll2_ring); | |
161 | udev->ll2_ring = NULL; | |
162 | } | |
163 | ||
164 | if (udev->ll2_buf) { | |
165 | free_pages((unsigned long)udev->ll2_buf, 2); | |
166 | udev->ll2_buf = NULL; | |
167 | } | |
168 | } | |
169 | ||
170 | static void __qedi_free_uio(struct qedi_uio_dev *udev) | |
171 | { | |
172 | uio_unregister_device(&udev->qedi_uinfo); | |
173 | ||
174 | __qedi_free_uio_rings(udev); | |
175 | ||
176 | pci_dev_put(udev->pdev); | |
ace7f46b MR |
177 | kfree(udev); |
178 | } | |
179 | ||
180 | static void qedi_free_uio(struct qedi_uio_dev *udev) | |
181 | { | |
182 | if (!udev) | |
183 | return; | |
184 | ||
185 | list_del_init(&udev->list); | |
186 | __qedi_free_uio(udev); | |
187 | } | |
188 | ||
189 | static void qedi_reset_uio_rings(struct qedi_uio_dev *udev) | |
190 | { | |
191 | struct qedi_ctx *qedi = NULL; | |
192 | struct qedi_uio_ctrl *uctrl = NULL; | |
193 | ||
194 | qedi = udev->qedi; | |
195 | uctrl = udev->uctrl; | |
196 | ||
197 | spin_lock_bh(&qedi->ll2_lock); | |
198 | uctrl->host_rx_cons = 0; | |
199 | uctrl->hw_rx_prod = 0; | |
200 | uctrl->hw_rx_bd_prod = 0; | |
201 | uctrl->host_rx_bd_cons = 0; | |
202 | ||
203 | memset(udev->ll2_ring, 0, udev->ll2_ring_size); | |
204 | memset(udev->ll2_buf, 0, udev->ll2_buf_size); | |
205 | spin_unlock_bh(&qedi->ll2_lock); | |
206 | } | |
207 | ||
208 | static int __qedi_alloc_uio_rings(struct qedi_uio_dev *udev) | |
209 | { | |
210 | int rc = 0; | |
211 | ||
212 | if (udev->ll2_ring || udev->ll2_buf) | |
213 | return rc; | |
214 | ||
5e901d0b AE |
215 | /* Memory for control area. */ |
216 | udev->uctrl = (void *)get_zeroed_page(GFP_KERNEL); | |
217 | if (!udev->uctrl) | |
218 | return -ENOMEM; | |
219 | ||
ace7f46b MR |
220 | /* Allocating memory for LL2 ring */ |
221 | udev->ll2_ring_size = QEDI_PAGE_SIZE; | |
222 | udev->ll2_ring = (void *)get_zeroed_page(GFP_KERNEL | __GFP_COMP); | |
223 | if (!udev->ll2_ring) { | |
224 | rc = -ENOMEM; | |
225 | goto exit_alloc_ring; | |
226 | } | |
227 | ||
228 | /* Allocating memory for Tx/Rx pkt buffer */ | |
229 | udev->ll2_buf_size = TX_RX_RING * LL2_SINGLE_BUF_SIZE; | |
230 | udev->ll2_buf_size = QEDI_PAGE_ALIGN(udev->ll2_buf_size); | |
231 | udev->ll2_buf = (void *)__get_free_pages(GFP_KERNEL | __GFP_COMP | | |
232 | __GFP_ZERO, 2); | |
233 | if (!udev->ll2_buf) { | |
234 | rc = -ENOMEM; | |
235 | goto exit_alloc_buf; | |
236 | } | |
237 | return rc; | |
238 | ||
239 | exit_alloc_buf: | |
240 | free_page((unsigned long)udev->ll2_ring); | |
241 | udev->ll2_ring = NULL; | |
242 | exit_alloc_ring: | |
243 | return rc; | |
244 | } | |
245 | ||
246 | static int qedi_alloc_uio_rings(struct qedi_ctx *qedi) | |
247 | { | |
248 | struct qedi_uio_dev *udev = NULL; | |
ace7f46b MR |
249 | int rc = 0; |
250 | ||
251 | list_for_each_entry(udev, &qedi_udev_list, list) { | |
252 | if (udev->pdev == qedi->pdev) { | |
253 | udev->qedi = qedi; | |
254 | if (__qedi_alloc_uio_rings(udev)) { | |
255 | udev->qedi = NULL; | |
256 | return -ENOMEM; | |
257 | } | |
258 | qedi->udev = udev; | |
259 | return 0; | |
260 | } | |
261 | } | |
262 | ||
263 | udev = kzalloc(sizeof(*udev), GFP_KERNEL); | |
264 | if (!udev) { | |
265 | rc = -ENOMEM; | |
266 | goto err_udev; | |
267 | } | |
268 | ||
ace7f46b MR |
269 | udev->uio_dev = -1; |
270 | ||
271 | udev->qedi = qedi; | |
272 | udev->pdev = qedi->pdev; | |
ace7f46b MR |
273 | |
274 | rc = __qedi_alloc_uio_rings(udev); | |
275 | if (rc) | |
5e901d0b | 276 | goto err_uctrl; |
ace7f46b MR |
277 | |
278 | list_add(&udev->list, &qedi_udev_list); | |
279 | ||
280 | pci_dev_get(udev->pdev); | |
281 | qedi->udev = udev; | |
282 | ||
283 | udev->tx_pkt = udev->ll2_buf; | |
284 | udev->rx_pkt = udev->ll2_buf + LL2_SINGLE_BUF_SIZE; | |
285 | return 0; | |
286 | ||
ace7f46b MR |
287 | err_uctrl: |
288 | kfree(udev); | |
289 | err_udev: | |
290 | return -ENOMEM; | |
291 | } | |
292 | ||
293 | static int qedi_init_uio(struct qedi_ctx *qedi) | |
294 | { | |
295 | struct qedi_uio_dev *udev = qedi->udev; | |
296 | struct uio_info *uinfo; | |
297 | int ret = 0; | |
298 | ||
299 | if (!udev) | |
300 | return -ENOMEM; | |
301 | ||
302 | uinfo = &udev->qedi_uinfo; | |
303 | ||
304 | uinfo->mem[0].addr = (unsigned long)udev->uctrl; | |
305 | uinfo->mem[0].size = sizeof(struct qedi_uio_ctrl); | |
306 | uinfo->mem[0].memtype = UIO_MEM_LOGICAL; | |
307 | ||
308 | uinfo->mem[1].addr = (unsigned long)udev->ll2_ring; | |
309 | uinfo->mem[1].size = udev->ll2_ring_size; | |
310 | uinfo->mem[1].memtype = UIO_MEM_LOGICAL; | |
311 | ||
312 | uinfo->mem[2].addr = (unsigned long)udev->ll2_buf; | |
313 | uinfo->mem[2].size = udev->ll2_buf_size; | |
314 | uinfo->mem[2].memtype = UIO_MEM_LOGICAL; | |
315 | ||
316 | uinfo->name = "qedi_uio"; | |
317 | uinfo->version = QEDI_MODULE_VERSION; | |
318 | uinfo->irq = UIO_IRQ_CUSTOM; | |
319 | ||
320 | uinfo->open = qedi_uio_open; | |
321 | uinfo->release = qedi_uio_close; | |
322 | ||
323 | if (udev->uio_dev == -1) { | |
324 | if (!uinfo->priv) { | |
325 | uinfo->priv = udev; | |
326 | ||
327 | ret = uio_register_device(&udev->pdev->dev, uinfo); | |
328 | if (ret) { | |
329 | QEDI_ERR(&qedi->dbg_ctx, | |
330 | "UIO registration failed\n"); | |
331 | } | |
332 | } | |
333 | } | |
334 | ||
335 | return ret; | |
336 | } | |
337 | ||
338 | static int qedi_alloc_and_init_sb(struct qedi_ctx *qedi, | |
339 | struct qed_sb_info *sb_info, u16 sb_id) | |
340 | { | |
341 | struct status_block *sb_virt; | |
342 | dma_addr_t sb_phys; | |
343 | int ret; | |
344 | ||
345 | sb_virt = dma_alloc_coherent(&qedi->pdev->dev, | |
346 | sizeof(struct status_block), &sb_phys, | |
347 | GFP_KERNEL); | |
348 | if (!sb_virt) { | |
349 | QEDI_ERR(&qedi->dbg_ctx, | |
350 | "Status block allocation failed for id = %d.\n", | |
351 | sb_id); | |
352 | return -ENOMEM; | |
353 | } | |
354 | ||
355 | ret = qedi_ops->common->sb_init(qedi->cdev, sb_info, sb_virt, sb_phys, | |
356 | sb_id, QED_SB_TYPE_STORAGE); | |
357 | if (ret) { | |
358 | QEDI_ERR(&qedi->dbg_ctx, | |
359 | "Status block initialization failed for id = %d.\n", | |
360 | sb_id); | |
361 | return ret; | |
362 | } | |
363 | ||
364 | return 0; | |
365 | } | |
366 | ||
367 | static void qedi_free_sb(struct qedi_ctx *qedi) | |
368 | { | |
369 | struct qed_sb_info *sb_info; | |
370 | int id; | |
371 | ||
372 | for (id = 0; id < MIN_NUM_CPUS_MSIX(qedi); id++) { | |
373 | sb_info = &qedi->sb_array[id]; | |
374 | if (sb_info->sb_virt) | |
375 | dma_free_coherent(&qedi->pdev->dev, | |
376 | sizeof(*sb_info->sb_virt), | |
377 | (void *)sb_info->sb_virt, | |
378 | sb_info->sb_phys); | |
379 | } | |
380 | } | |
381 | ||
382 | static void qedi_free_fp(struct qedi_ctx *qedi) | |
383 | { | |
384 | kfree(qedi->fp_array); | |
385 | kfree(qedi->sb_array); | |
386 | } | |
387 | ||
388 | static void qedi_destroy_fp(struct qedi_ctx *qedi) | |
389 | { | |
390 | qedi_free_sb(qedi); | |
391 | qedi_free_fp(qedi); | |
392 | } | |
393 | ||
394 | static int qedi_alloc_fp(struct qedi_ctx *qedi) | |
395 | { | |
396 | int ret = 0; | |
397 | ||
398 | qedi->fp_array = kcalloc(MIN_NUM_CPUS_MSIX(qedi), | |
399 | sizeof(struct qedi_fastpath), GFP_KERNEL); | |
400 | if (!qedi->fp_array) { | |
401 | QEDI_ERR(&qedi->dbg_ctx, | |
402 | "fastpath fp array allocation failed.\n"); | |
403 | return -ENOMEM; | |
404 | } | |
405 | ||
406 | qedi->sb_array = kcalloc(MIN_NUM_CPUS_MSIX(qedi), | |
407 | sizeof(struct qed_sb_info), GFP_KERNEL); | |
408 | if (!qedi->sb_array) { | |
409 | QEDI_ERR(&qedi->dbg_ctx, | |
410 | "fastpath sb array allocation failed.\n"); | |
411 | ret = -ENOMEM; | |
412 | goto free_fp; | |
413 | } | |
414 | ||
415 | return ret; | |
416 | ||
417 | free_fp: | |
418 | qedi_free_fp(qedi); | |
419 | return ret; | |
420 | } | |
421 | ||
422 | static void qedi_int_fp(struct qedi_ctx *qedi) | |
423 | { | |
424 | struct qedi_fastpath *fp; | |
425 | int id; | |
426 | ||
427 | memset(qedi->fp_array, 0, MIN_NUM_CPUS_MSIX(qedi) * | |
428 | sizeof(*qedi->fp_array)); | |
429 | memset(qedi->sb_array, 0, MIN_NUM_CPUS_MSIX(qedi) * | |
430 | sizeof(*qedi->sb_array)); | |
431 | ||
432 | for (id = 0; id < MIN_NUM_CPUS_MSIX(qedi); id++) { | |
433 | fp = &qedi->fp_array[id]; | |
434 | fp->sb_info = &qedi->sb_array[id]; | |
435 | fp->sb_id = id; | |
436 | fp->qedi = qedi; | |
437 | snprintf(fp->name, sizeof(fp->name), "%s-fp-%d", | |
438 | "qedi", id); | |
439 | ||
440 | /* fp_array[i] ---- irq cookie | |
441 | * So init data which is needed in int ctx | |
442 | */ | |
443 | } | |
444 | } | |
445 | ||
446 | static int qedi_prepare_fp(struct qedi_ctx *qedi) | |
447 | { | |
448 | struct qedi_fastpath *fp; | |
449 | int id, ret = 0; | |
450 | ||
451 | ret = qedi_alloc_fp(qedi); | |
452 | if (ret) | |
453 | goto err; | |
454 | ||
455 | qedi_int_fp(qedi); | |
456 | ||
457 | for (id = 0; id < MIN_NUM_CPUS_MSIX(qedi); id++) { | |
458 | fp = &qedi->fp_array[id]; | |
459 | ret = qedi_alloc_and_init_sb(qedi, fp->sb_info, fp->sb_id); | |
460 | if (ret) { | |
461 | QEDI_ERR(&qedi->dbg_ctx, | |
462 | "SB allocation and initialization failed.\n"); | |
463 | ret = -EIO; | |
464 | goto err_init; | |
465 | } | |
466 | } | |
467 | ||
468 | return 0; | |
469 | ||
470 | err_init: | |
471 | qedi_free_sb(qedi); | |
472 | qedi_free_fp(qedi); | |
473 | err: | |
474 | return ret; | |
475 | } | |
476 | ||
477 | static int qedi_setup_cid_que(struct qedi_ctx *qedi) | |
478 | { | |
479 | int i; | |
480 | ||
481 | qedi->cid_que.cid_que_base = kmalloc_array(qedi->max_active_conns, | |
482 | sizeof(u32), GFP_KERNEL); | |
483 | if (!qedi->cid_que.cid_que_base) | |
484 | return -ENOMEM; | |
485 | ||
486 | qedi->cid_que.conn_cid_tbl = kmalloc_array(qedi->max_active_conns, | |
487 | sizeof(struct qedi_conn *), | |
488 | GFP_KERNEL); | |
489 | if (!qedi->cid_que.conn_cid_tbl) { | |
490 | kfree(qedi->cid_que.cid_que_base); | |
491 | qedi->cid_que.cid_que_base = NULL; | |
492 | return -ENOMEM; | |
493 | } | |
494 | ||
495 | qedi->cid_que.cid_que = (u32 *)qedi->cid_que.cid_que_base; | |
496 | qedi->cid_que.cid_q_prod_idx = 0; | |
497 | qedi->cid_que.cid_q_cons_idx = 0; | |
498 | qedi->cid_que.cid_q_max_idx = qedi->max_active_conns; | |
499 | qedi->cid_que.cid_free_cnt = qedi->max_active_conns; | |
500 | ||
501 | for (i = 0; i < qedi->max_active_conns; i++) { | |
502 | qedi->cid_que.cid_que[i] = i; | |
503 | qedi->cid_que.conn_cid_tbl[i] = NULL; | |
504 | } | |
505 | ||
506 | return 0; | |
507 | } | |
508 | ||
509 | static void qedi_release_cid_que(struct qedi_ctx *qedi) | |
510 | { | |
511 | kfree(qedi->cid_que.cid_que_base); | |
512 | qedi->cid_que.cid_que_base = NULL; | |
513 | ||
514 | kfree(qedi->cid_que.conn_cid_tbl); | |
515 | qedi->cid_que.conn_cid_tbl = NULL; | |
516 | } | |
517 | ||
518 | static int qedi_init_id_tbl(struct qedi_portid_tbl *id_tbl, u16 size, | |
519 | u16 start_id, u16 next) | |
520 | { | |
521 | id_tbl->start = start_id; | |
522 | id_tbl->max = size; | |
523 | id_tbl->next = next; | |
524 | spin_lock_init(&id_tbl->lock); | |
525 | id_tbl->table = kzalloc(DIV_ROUND_UP(size, 32) * 4, GFP_KERNEL); | |
526 | if (!id_tbl->table) | |
527 | return -ENOMEM; | |
528 | ||
529 | return 0; | |
530 | } | |
531 | ||
532 | static void qedi_free_id_tbl(struct qedi_portid_tbl *id_tbl) | |
533 | { | |
534 | kfree(id_tbl->table); | |
535 | id_tbl->table = NULL; | |
536 | } | |
537 | ||
538 | int qedi_alloc_id(struct qedi_portid_tbl *id_tbl, u16 id) | |
539 | { | |
540 | int ret = -1; | |
541 | ||
542 | id -= id_tbl->start; | |
543 | if (id >= id_tbl->max) | |
544 | return ret; | |
545 | ||
546 | spin_lock(&id_tbl->lock); | |
547 | if (!test_bit(id, id_tbl->table)) { | |
548 | set_bit(id, id_tbl->table); | |
549 | ret = 0; | |
550 | } | |
551 | spin_unlock(&id_tbl->lock); | |
552 | return ret; | |
553 | } | |
554 | ||
555 | u16 qedi_alloc_new_id(struct qedi_portid_tbl *id_tbl) | |
556 | { | |
557 | u16 id; | |
558 | ||
559 | spin_lock(&id_tbl->lock); | |
560 | id = find_next_zero_bit(id_tbl->table, id_tbl->max, id_tbl->next); | |
561 | if (id >= id_tbl->max) { | |
562 | id = QEDI_LOCAL_PORT_INVALID; | |
563 | if (id_tbl->next != 0) { | |
564 | id = find_first_zero_bit(id_tbl->table, id_tbl->next); | |
565 | if (id >= id_tbl->next) | |
566 | id = QEDI_LOCAL_PORT_INVALID; | |
567 | } | |
568 | } | |
569 | ||
570 | if (id < id_tbl->max) { | |
571 | set_bit(id, id_tbl->table); | |
572 | id_tbl->next = (id + 1) & (id_tbl->max - 1); | |
573 | id += id_tbl->start; | |
574 | } | |
575 | ||
576 | spin_unlock(&id_tbl->lock); | |
577 | ||
578 | return id; | |
579 | } | |
580 | ||
581 | void qedi_free_id(struct qedi_portid_tbl *id_tbl, u16 id) | |
582 | { | |
583 | if (id == QEDI_LOCAL_PORT_INVALID) | |
584 | return; | |
585 | ||
586 | id -= id_tbl->start; | |
587 | if (id >= id_tbl->max) | |
588 | return; | |
589 | ||
590 | clear_bit(id, id_tbl->table); | |
591 | } | |
592 | ||
593 | static void qedi_cm_free_mem(struct qedi_ctx *qedi) | |
594 | { | |
595 | kfree(qedi->ep_tbl); | |
596 | qedi->ep_tbl = NULL; | |
597 | qedi_free_id_tbl(&qedi->lcl_port_tbl); | |
598 | } | |
599 | ||
600 | static int qedi_cm_alloc_mem(struct qedi_ctx *qedi) | |
601 | { | |
602 | u16 port_id; | |
603 | ||
604 | qedi->ep_tbl = kzalloc((qedi->max_active_conns * | |
605 | sizeof(struct qedi_endpoint *)), GFP_KERNEL); | |
606 | if (!qedi->ep_tbl) | |
607 | return -ENOMEM; | |
608 | port_id = prandom_u32() % QEDI_LOCAL_PORT_RANGE; | |
609 | if (qedi_init_id_tbl(&qedi->lcl_port_tbl, QEDI_LOCAL_PORT_RANGE, | |
610 | QEDI_LOCAL_PORT_MIN, port_id)) { | |
611 | qedi_cm_free_mem(qedi); | |
612 | return -ENOMEM; | |
613 | } | |
614 | ||
615 | return 0; | |
616 | } | |
617 | ||
618 | static struct qedi_ctx *qedi_host_alloc(struct pci_dev *pdev) | |
619 | { | |
620 | struct Scsi_Host *shost; | |
621 | struct qedi_ctx *qedi = NULL; | |
622 | ||
623 | shost = iscsi_host_alloc(&qedi_host_template, | |
624 | sizeof(struct qedi_ctx), 0); | |
625 | if (!shost) { | |
626 | QEDI_ERR(NULL, "Could not allocate shost\n"); | |
627 | goto exit_setup_shost; | |
628 | } | |
629 | ||
630 | shost->max_id = QEDI_MAX_ISCSI_CONNS_PER_HBA; | |
631 | shost->max_channel = 0; | |
632 | shost->max_lun = ~0; | |
633 | shost->max_cmd_len = 16; | |
634 | shost->transportt = qedi_scsi_transport; | |
635 | ||
636 | qedi = iscsi_host_priv(shost); | |
637 | memset(qedi, 0, sizeof(*qedi)); | |
638 | qedi->shost = shost; | |
639 | qedi->dbg_ctx.host_no = shost->host_no; | |
640 | qedi->pdev = pdev; | |
641 | qedi->dbg_ctx.pdev = pdev; | |
642 | qedi->max_active_conns = ISCSI_MAX_SESS_PER_HBA; | |
643 | qedi->max_sqes = QEDI_SQ_SIZE; | |
644 | ||
645 | if (shost_use_blk_mq(shost)) | |
646 | shost->nr_hw_queues = MIN_NUM_CPUS_MSIX(qedi); | |
647 | ||
648 | pci_set_drvdata(pdev, qedi); | |
649 | ||
650 | exit_setup_shost: | |
651 | return qedi; | |
652 | } | |
653 | ||
654 | static int qedi_ll2_rx(void *cookie, struct sk_buff *skb, u32 arg1, u32 arg2) | |
655 | { | |
656 | struct qedi_ctx *qedi = (struct qedi_ctx *)cookie; | |
657 | struct qedi_uio_dev *udev; | |
658 | struct qedi_uio_ctrl *uctrl; | |
659 | struct skb_work_list *work; | |
660 | u32 prod; | |
661 | ||
662 | if (!qedi) { | |
663 | QEDI_ERR(NULL, "qedi is NULL\n"); | |
664 | return -1; | |
665 | } | |
666 | ||
667 | if (!test_bit(UIO_DEV_OPENED, &qedi->flags)) { | |
668 | QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_UIO, | |
669 | "UIO DEV is not opened\n"); | |
670 | kfree_skb(skb); | |
671 | return 0; | |
672 | } | |
673 | ||
674 | udev = qedi->udev; | |
675 | uctrl = udev->uctrl; | |
676 | ||
677 | work = kzalloc(sizeof(*work), GFP_ATOMIC); | |
678 | if (!work) { | |
679 | QEDI_WARN(&qedi->dbg_ctx, | |
680 | "Could not allocate work so dropping frame.\n"); | |
681 | kfree_skb(skb); | |
682 | return 0; | |
683 | } | |
684 | ||
685 | INIT_LIST_HEAD(&work->list); | |
686 | work->skb = skb; | |
687 | ||
688 | if (skb_vlan_tag_present(skb)) | |
689 | work->vlan_id = skb_vlan_tag_get(skb); | |
690 | ||
691 | if (work->vlan_id) | |
692 | __vlan_insert_tag(work->skb, htons(ETH_P_8021Q), work->vlan_id); | |
693 | ||
694 | spin_lock_bh(&qedi->ll2_lock); | |
695 | list_add_tail(&work->list, &qedi->ll2_skb_list); | |
696 | ||
697 | ++uctrl->hw_rx_prod_cnt; | |
698 | prod = (uctrl->hw_rx_prod + 1) % RX_RING; | |
699 | if (prod != uctrl->host_rx_cons) { | |
700 | uctrl->hw_rx_prod = prod; | |
701 | spin_unlock_bh(&qedi->ll2_lock); | |
702 | wake_up_process(qedi->ll2_recv_thread); | |
703 | return 0; | |
704 | } | |
705 | ||
706 | spin_unlock_bh(&qedi->ll2_lock); | |
707 | return 0; | |
708 | } | |
709 | ||
710 | /* map this skb to iscsiuio mmaped region */ | |
711 | static int qedi_ll2_process_skb(struct qedi_ctx *qedi, struct sk_buff *skb, | |
712 | u16 vlan_id) | |
713 | { | |
714 | struct qedi_uio_dev *udev = NULL; | |
715 | struct qedi_uio_ctrl *uctrl = NULL; | |
716 | struct qedi_rx_bd rxbd; | |
717 | struct qedi_rx_bd *p_rxbd; | |
718 | u32 rx_bd_prod; | |
719 | void *pkt; | |
720 | int len = 0; | |
721 | ||
722 | if (!qedi) { | |
723 | QEDI_ERR(NULL, "qedi is NULL\n"); | |
724 | return -1; | |
725 | } | |
726 | ||
727 | udev = qedi->udev; | |
728 | uctrl = udev->uctrl; | |
729 | pkt = udev->rx_pkt + (uctrl->hw_rx_prod * LL2_SINGLE_BUF_SIZE); | |
730 | len = min_t(u32, skb->len, (u32)LL2_SINGLE_BUF_SIZE); | |
731 | memcpy(pkt, skb->data, len); | |
732 | ||
733 | memset(&rxbd, 0, sizeof(rxbd)); | |
734 | rxbd.rx_pkt_index = uctrl->hw_rx_prod; | |
735 | rxbd.rx_pkt_len = len; | |
736 | rxbd.vlan_id = vlan_id; | |
737 | ||
738 | uctrl->hw_rx_bd_prod = (uctrl->hw_rx_bd_prod + 1) % QEDI_NUM_RX_BD; | |
739 | rx_bd_prod = uctrl->hw_rx_bd_prod; | |
740 | p_rxbd = (struct qedi_rx_bd *)udev->ll2_ring; | |
741 | p_rxbd += rx_bd_prod; | |
742 | ||
743 | memcpy(p_rxbd, &rxbd, sizeof(rxbd)); | |
744 | ||
745 | /* notify the iscsiuio about new packet */ | |
746 | uio_event_notify(&udev->qedi_uinfo); | |
747 | ||
748 | return 0; | |
749 | } | |
750 | ||
751 | static void qedi_ll2_free_skbs(struct qedi_ctx *qedi) | |
752 | { | |
753 | struct skb_work_list *work, *work_tmp; | |
754 | ||
755 | spin_lock_bh(&qedi->ll2_lock); | |
756 | list_for_each_entry_safe(work, work_tmp, &qedi->ll2_skb_list, list) { | |
757 | list_del(&work->list); | |
758 | if (work->skb) | |
759 | kfree_skb(work->skb); | |
760 | kfree(work); | |
761 | } | |
762 | spin_unlock_bh(&qedi->ll2_lock); | |
763 | } | |
764 | ||
765 | static int qedi_ll2_recv_thread(void *arg) | |
766 | { | |
767 | struct qedi_ctx *qedi = (struct qedi_ctx *)arg; | |
768 | struct skb_work_list *work, *work_tmp; | |
769 | ||
770 | set_user_nice(current, -20); | |
771 | ||
772 | while (!kthread_should_stop()) { | |
773 | spin_lock_bh(&qedi->ll2_lock); | |
774 | list_for_each_entry_safe(work, work_tmp, &qedi->ll2_skb_list, | |
775 | list) { | |
776 | list_del(&work->list); | |
777 | qedi_ll2_process_skb(qedi, work->skb, work->vlan_id); | |
778 | kfree_skb(work->skb); | |
779 | kfree(work); | |
780 | } | |
781 | set_current_state(TASK_INTERRUPTIBLE); | |
782 | spin_unlock_bh(&qedi->ll2_lock); | |
783 | schedule(); | |
784 | } | |
785 | ||
786 | __set_current_state(TASK_RUNNING); | |
787 | return 0; | |
788 | } | |
789 | ||
790 | static int qedi_set_iscsi_pf_param(struct qedi_ctx *qedi) | |
791 | { | |
792 | u8 num_sq_pages; | |
793 | u32 log_page_size; | |
794 | int rval = 0; | |
795 | ||
796 | QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_DISC, "Min number of MSIX %d\n", | |
797 | MIN_NUM_CPUS_MSIX(qedi)); | |
798 | ||
799 | num_sq_pages = (MAX_OUSTANDING_TASKS_PER_CON * 8) / PAGE_SIZE; | |
800 | ||
801 | qedi->num_queues = MIN_NUM_CPUS_MSIX(qedi); | |
802 | ||
803 | memset(&qedi->pf_params.iscsi_pf_params, 0, | |
804 | sizeof(qedi->pf_params.iscsi_pf_params)); | |
805 | ||
806 | qedi->p_cpuq = pci_alloc_consistent(qedi->pdev, | |
807 | qedi->num_queues * sizeof(struct qedi_glbl_q_params), | |
808 | &qedi->hw_p_cpuq); | |
809 | if (!qedi->p_cpuq) { | |
810 | QEDI_ERR(&qedi->dbg_ctx, "pci_alloc_consistent fail\n"); | |
811 | rval = -1; | |
812 | goto err_alloc_mem; | |
813 | } | |
814 | ||
815 | rval = qedi_alloc_global_queues(qedi); | |
816 | if (rval) { | |
817 | QEDI_ERR(&qedi->dbg_ctx, "Global queue allocation failed.\n"); | |
818 | rval = -1; | |
819 | goto err_alloc_mem; | |
820 | } | |
821 | ||
822 | qedi->pf_params.iscsi_pf_params.num_cons = QEDI_MAX_ISCSI_CONNS_PER_HBA; | |
823 | qedi->pf_params.iscsi_pf_params.num_tasks = QEDI_MAX_ISCSI_TASK; | |
824 | qedi->pf_params.iscsi_pf_params.half_way_close_timeout = 10; | |
825 | qedi->pf_params.iscsi_pf_params.num_sq_pages_in_ring = num_sq_pages; | |
826 | qedi->pf_params.iscsi_pf_params.num_r2tq_pages_in_ring = num_sq_pages; | |
827 | qedi->pf_params.iscsi_pf_params.num_uhq_pages_in_ring = num_sq_pages; | |
828 | qedi->pf_params.iscsi_pf_params.num_queues = qedi->num_queues; | |
829 | qedi->pf_params.iscsi_pf_params.debug_mode = qedi_fw_debug; | |
962ea1c0 | 830 | qedi->pf_params.iscsi_pf_params.two_msl_timer = 4000; |
3d61a313 | 831 | qedi->pf_params.iscsi_pf_params.max_fin_rt = 2; |
ace7f46b MR |
832 | |
833 | for (log_page_size = 0 ; log_page_size < 32 ; log_page_size++) { | |
834 | if ((1 << log_page_size) == PAGE_SIZE) | |
835 | break; | |
836 | } | |
837 | qedi->pf_params.iscsi_pf_params.log_page_size = log_page_size; | |
838 | ||
839 | qedi->pf_params.iscsi_pf_params.glbl_q_params_addr = | |
840 | (u64)qedi->hw_p_cpuq; | |
841 | ||
842 | /* RQ BDQ initializations. | |
843 | * rq_num_entries: suggested value for Initiator is 16 (4KB RQ) | |
844 | * rqe_log_size: 8 for 256B RQE | |
845 | */ | |
846 | qedi->pf_params.iscsi_pf_params.rqe_log_size = 8; | |
847 | /* BDQ address and size */ | |
848 | qedi->pf_params.iscsi_pf_params.bdq_pbl_base_addr[BDQ_ID_RQ] = | |
849 | qedi->bdq_pbl_list_dma; | |
850 | qedi->pf_params.iscsi_pf_params.bdq_pbl_num_entries[BDQ_ID_RQ] = | |
851 | qedi->bdq_pbl_list_num_entries; | |
852 | qedi->pf_params.iscsi_pf_params.rq_buffer_size = QEDI_BDQ_BUF_SIZE; | |
853 | ||
854 | /* cq_num_entries: num_tasks + rq_num_entries */ | |
855 | qedi->pf_params.iscsi_pf_params.cq_num_entries = 2048; | |
856 | ||
857 | qedi->pf_params.iscsi_pf_params.gl_rq_pi = QEDI_PROTO_CQ_PROD_IDX; | |
858 | qedi->pf_params.iscsi_pf_params.gl_cmd_pi = 1; | |
859 | qedi->pf_params.iscsi_pf_params.ooo_enable = 1; | |
860 | ||
861 | err_alloc_mem: | |
862 | return rval; | |
863 | } | |
864 | ||
865 | /* Free DMA coherent memory for array of queue pointers we pass to qed */ | |
866 | static void qedi_free_iscsi_pf_param(struct qedi_ctx *qedi) | |
867 | { | |
868 | size_t size = 0; | |
869 | ||
870 | if (qedi->p_cpuq) { | |
871 | size = qedi->num_queues * sizeof(struct qedi_glbl_q_params); | |
872 | pci_free_consistent(qedi->pdev, size, qedi->p_cpuq, | |
873 | qedi->hw_p_cpuq); | |
874 | } | |
875 | ||
876 | qedi_free_global_queues(qedi); | |
877 | ||
878 | kfree(qedi->global_queues); | |
879 | } | |
880 | ||
881 | static void qedi_link_update(void *dev, struct qed_link_output *link) | |
882 | { | |
883 | struct qedi_ctx *qedi = (struct qedi_ctx *)dev; | |
884 | ||
885 | if (link->link_up) { | |
886 | QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO, "Link Up event.\n"); | |
887 | atomic_set(&qedi->link_state, QEDI_LINK_UP); | |
888 | } else { | |
889 | QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO, | |
890 | "Link Down event.\n"); | |
891 | atomic_set(&qedi->link_state, QEDI_LINK_DOWN); | |
892 | } | |
893 | } | |
894 | ||
895 | static struct qed_iscsi_cb_ops qedi_cb_ops = { | |
896 | { | |
897 | .link_update = qedi_link_update, | |
898 | } | |
899 | }; | |
900 | ||
901 | static int qedi_queue_cqe(struct qedi_ctx *qedi, union iscsi_cqe *cqe, | |
902 | u16 que_idx, struct qedi_percpu_s *p) | |
903 | { | |
904 | struct qedi_work *qedi_work; | |
905 | struct qedi_conn *q_conn; | |
906 | struct iscsi_conn *conn; | |
907 | struct qedi_cmd *qedi_cmd; | |
908 | u32 iscsi_cid; | |
909 | int rc = 0; | |
910 | ||
911 | iscsi_cid = cqe->cqe_common.conn_id; | |
912 | q_conn = qedi->cid_que.conn_cid_tbl[iscsi_cid]; | |
913 | if (!q_conn) { | |
914 | QEDI_WARN(&qedi->dbg_ctx, | |
915 | "Session no longer exists for cid=0x%x!!\n", | |
916 | iscsi_cid); | |
917 | return -1; | |
918 | } | |
919 | conn = q_conn->cls_conn->dd_data; | |
920 | ||
921 | switch (cqe->cqe_common.cqe_type) { | |
922 | case ISCSI_CQE_TYPE_SOLICITED: | |
923 | case ISCSI_CQE_TYPE_SOLICITED_WITH_SENSE: | |
924 | qedi_cmd = qedi_get_cmd_from_tid(qedi, cqe->cqe_solicited.itid); | |
925 | if (!qedi_cmd) { | |
926 | rc = -1; | |
927 | break; | |
928 | } | |
929 | INIT_LIST_HEAD(&qedi_cmd->cqe_work.list); | |
930 | qedi_cmd->cqe_work.qedi = qedi; | |
931 | memcpy(&qedi_cmd->cqe_work.cqe, cqe, sizeof(union iscsi_cqe)); | |
932 | qedi_cmd->cqe_work.que_idx = que_idx; | |
933 | qedi_cmd->cqe_work.is_solicited = true; | |
934 | list_add_tail(&qedi_cmd->cqe_work.list, &p->work_list); | |
935 | break; | |
936 | case ISCSI_CQE_TYPE_UNSOLICITED: | |
937 | case ISCSI_CQE_TYPE_DUMMY: | |
938 | case ISCSI_CQE_TYPE_TASK_CLEANUP: | |
939 | qedi_work = kzalloc(sizeof(*qedi_work), GFP_ATOMIC); | |
940 | if (!qedi_work) { | |
941 | rc = -1; | |
942 | break; | |
943 | } | |
944 | INIT_LIST_HEAD(&qedi_work->list); | |
945 | qedi_work->qedi = qedi; | |
946 | memcpy(&qedi_work->cqe, cqe, sizeof(union iscsi_cqe)); | |
947 | qedi_work->que_idx = que_idx; | |
948 | qedi_work->is_solicited = false; | |
949 | list_add_tail(&qedi_work->list, &p->work_list); | |
950 | break; | |
951 | default: | |
952 | rc = -1; | |
953 | QEDI_ERR(&qedi->dbg_ctx, "FW Error cqe.\n"); | |
954 | } | |
955 | return rc; | |
956 | } | |
957 | ||
958 | static bool qedi_process_completions(struct qedi_fastpath *fp) | |
959 | { | |
960 | struct qedi_ctx *qedi = fp->qedi; | |
961 | struct qed_sb_info *sb_info = fp->sb_info; | |
962 | struct status_block *sb = sb_info->sb_virt; | |
963 | struct qedi_percpu_s *p = NULL; | |
964 | struct global_queue *que; | |
965 | u16 prod_idx; | |
966 | unsigned long flags; | |
967 | union iscsi_cqe *cqe; | |
968 | int cpu; | |
969 | int ret; | |
970 | ||
971 | /* Get the current firmware producer index */ | |
972 | prod_idx = sb->pi_array[QEDI_PROTO_CQ_PROD_IDX]; | |
973 | ||
974 | if (prod_idx >= QEDI_CQ_SIZE) | |
975 | prod_idx = prod_idx % QEDI_CQ_SIZE; | |
976 | ||
977 | que = qedi->global_queues[fp->sb_id]; | |
978 | QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_IO, | |
979 | "Before: global queue=%p prod_idx=%d cons_idx=%d, sb_id=%d\n", | |
980 | que, prod_idx, que->cq_cons_idx, fp->sb_id); | |
981 | ||
982 | qedi->intr_cpu = fp->sb_id; | |
983 | cpu = smp_processor_id(); | |
984 | p = &per_cpu(qedi_percpu, cpu); | |
985 | ||
986 | if (unlikely(!p->iothread)) | |
987 | WARN_ON(1); | |
988 | ||
989 | spin_lock_irqsave(&p->p_work_lock, flags); | |
990 | while (que->cq_cons_idx != prod_idx) { | |
991 | cqe = &que->cq[que->cq_cons_idx]; | |
992 | ||
993 | QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_IO, | |
994 | "cqe=%p prod_idx=%d cons_idx=%d.\n", | |
995 | cqe, prod_idx, que->cq_cons_idx); | |
996 | ||
997 | ret = qedi_queue_cqe(qedi, cqe, fp->sb_id, p); | |
998 | if (ret) | |
999 | continue; | |
1000 | ||
1001 | que->cq_cons_idx++; | |
1002 | if (que->cq_cons_idx == QEDI_CQ_SIZE) | |
1003 | que->cq_cons_idx = 0; | |
1004 | } | |
1005 | wake_up_process(p->iothread); | |
1006 | spin_unlock_irqrestore(&p->p_work_lock, flags); | |
1007 | ||
1008 | return true; | |
1009 | } | |
1010 | ||
1011 | static bool qedi_fp_has_work(struct qedi_fastpath *fp) | |
1012 | { | |
1013 | struct qedi_ctx *qedi = fp->qedi; | |
1014 | struct global_queue *que; | |
1015 | struct qed_sb_info *sb_info = fp->sb_info; | |
1016 | struct status_block *sb = sb_info->sb_virt; | |
1017 | u16 prod_idx; | |
1018 | ||
1019 | barrier(); | |
1020 | ||
1021 | /* Get the current firmware producer index */ | |
1022 | prod_idx = sb->pi_array[QEDI_PROTO_CQ_PROD_IDX]; | |
1023 | ||
1024 | /* Get the pointer to the global CQ this completion is on */ | |
1025 | que = qedi->global_queues[fp->sb_id]; | |
1026 | ||
1027 | /* prod idx wrap around uint16 */ | |
1028 | if (prod_idx >= QEDI_CQ_SIZE) | |
1029 | prod_idx = prod_idx % QEDI_CQ_SIZE; | |
1030 | ||
1031 | return (que->cq_cons_idx != prod_idx); | |
1032 | } | |
1033 | ||
1034 | /* MSI-X fastpath handler code */ | |
1035 | static irqreturn_t qedi_msix_handler(int irq, void *dev_id) | |
1036 | { | |
1037 | struct qedi_fastpath *fp = dev_id; | |
1038 | struct qedi_ctx *qedi = fp->qedi; | |
1039 | bool wake_io_thread = true; | |
1040 | ||
1041 | qed_sb_ack(fp->sb_info, IGU_INT_DISABLE, 0); | |
1042 | ||
1043 | process_again: | |
1044 | wake_io_thread = qedi_process_completions(fp); | |
1045 | if (wake_io_thread) { | |
1046 | QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_DISC, | |
1047 | "process already running\n"); | |
1048 | } | |
1049 | ||
1050 | if (qedi_fp_has_work(fp) == 0) | |
1051 | qed_sb_update_sb_idx(fp->sb_info); | |
1052 | ||
1053 | /* Check for more work */ | |
1054 | rmb(); | |
1055 | ||
1056 | if (qedi_fp_has_work(fp) == 0) | |
1057 | qed_sb_ack(fp->sb_info, IGU_INT_ENABLE, 1); | |
1058 | else | |
1059 | goto process_again; | |
1060 | ||
1061 | return IRQ_HANDLED; | |
1062 | } | |
1063 | ||
1064 | /* simd handler for MSI/INTa */ | |
1065 | static void qedi_simd_int_handler(void *cookie) | |
1066 | { | |
1067 | /* Cookie is qedi_ctx struct */ | |
1068 | struct qedi_ctx *qedi = (struct qedi_ctx *)cookie; | |
1069 | ||
1070 | QEDI_WARN(&qedi->dbg_ctx, "qedi=%p.\n", qedi); | |
1071 | } | |
1072 | ||
1073 | #define QEDI_SIMD_HANDLER_NUM 0 | |
1074 | static void qedi_sync_free_irqs(struct qedi_ctx *qedi) | |
1075 | { | |
1076 | int i; | |
1077 | ||
1078 | if (qedi->int_info.msix_cnt) { | |
1079 | for (i = 0; i < qedi->int_info.used_cnt; i++) { | |
1080 | synchronize_irq(qedi->int_info.msix[i].vector); | |
1081 | irq_set_affinity_hint(qedi->int_info.msix[i].vector, | |
1082 | NULL); | |
1083 | free_irq(qedi->int_info.msix[i].vector, | |
1084 | &qedi->fp_array[i]); | |
1085 | } | |
1086 | } else { | |
1087 | qedi_ops->common->simd_handler_clean(qedi->cdev, | |
1088 | QEDI_SIMD_HANDLER_NUM); | |
1089 | } | |
1090 | ||
1091 | qedi->int_info.used_cnt = 0; | |
1092 | qedi_ops->common->set_fp_int(qedi->cdev, 0); | |
1093 | } | |
1094 | ||
1095 | static int qedi_request_msix_irq(struct qedi_ctx *qedi) | |
1096 | { | |
1097 | int i, rc, cpu; | |
1098 | ||
1099 | cpu = cpumask_first(cpu_online_mask); | |
1100 | for (i = 0; i < MIN_NUM_CPUS_MSIX(qedi); i++) { | |
1101 | rc = request_irq(qedi->int_info.msix[i].vector, | |
1102 | qedi_msix_handler, 0, "qedi", | |
1103 | &qedi->fp_array[i]); | |
1104 | ||
1105 | if (rc) { | |
1106 | QEDI_WARN(&qedi->dbg_ctx, "request_irq failed.\n"); | |
1107 | qedi_sync_free_irqs(qedi); | |
1108 | return rc; | |
1109 | } | |
1110 | qedi->int_info.used_cnt++; | |
1111 | rc = irq_set_affinity_hint(qedi->int_info.msix[i].vector, | |
1112 | get_cpu_mask(cpu)); | |
1113 | cpu = cpumask_next(cpu, cpu_online_mask); | |
1114 | } | |
1115 | ||
1116 | return 0; | |
1117 | } | |
1118 | ||
1119 | static int qedi_setup_int(struct qedi_ctx *qedi) | |
1120 | { | |
1121 | int rc = 0; | |
1122 | ||
1123 | rc = qedi_ops->common->set_fp_int(qedi->cdev, num_online_cpus()); | |
1124 | rc = qedi_ops->common->get_fp_int(qedi->cdev, &qedi->int_info); | |
1125 | if (rc) | |
1126 | goto exit_setup_int; | |
1127 | ||
1128 | QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_DISC, | |
1129 | "Number of msix_cnt = 0x%x num of cpus = 0x%x\n", | |
1130 | qedi->int_info.msix_cnt, num_online_cpus()); | |
1131 | ||
1132 | if (qedi->int_info.msix_cnt) { | |
1133 | rc = qedi_request_msix_irq(qedi); | |
1134 | goto exit_setup_int; | |
1135 | } else { | |
1136 | qedi_ops->common->simd_handler_config(qedi->cdev, &qedi, | |
1137 | QEDI_SIMD_HANDLER_NUM, | |
1138 | qedi_simd_int_handler); | |
1139 | qedi->int_info.used_cnt = 1; | |
1140 | } | |
1141 | ||
1142 | exit_setup_int: | |
1143 | return rc; | |
1144 | } | |
1145 | ||
1146 | static void qedi_free_bdq(struct qedi_ctx *qedi) | |
1147 | { | |
1148 | int i; | |
1149 | ||
1150 | if (qedi->bdq_pbl_list) | |
1151 | dma_free_coherent(&qedi->pdev->dev, PAGE_SIZE, | |
1152 | qedi->bdq_pbl_list, qedi->bdq_pbl_list_dma); | |
1153 | ||
1154 | if (qedi->bdq_pbl) | |
1155 | dma_free_coherent(&qedi->pdev->dev, qedi->bdq_pbl_mem_size, | |
1156 | qedi->bdq_pbl, qedi->bdq_pbl_dma); | |
1157 | ||
1158 | for (i = 0; i < QEDI_BDQ_NUM; i++) { | |
1159 | if (qedi->bdq[i].buf_addr) { | |
1160 | dma_free_coherent(&qedi->pdev->dev, QEDI_BDQ_BUF_SIZE, | |
1161 | qedi->bdq[i].buf_addr, | |
1162 | qedi->bdq[i].buf_dma); | |
1163 | } | |
1164 | } | |
1165 | } | |
1166 | ||
1167 | static void qedi_free_global_queues(struct qedi_ctx *qedi) | |
1168 | { | |
1169 | int i; | |
1170 | struct global_queue **gl = qedi->global_queues; | |
1171 | ||
1172 | for (i = 0; i < qedi->num_queues; i++) { | |
1173 | if (!gl[i]) | |
1174 | continue; | |
1175 | ||
1176 | if (gl[i]->cq) | |
1177 | dma_free_coherent(&qedi->pdev->dev, gl[i]->cq_mem_size, | |
1178 | gl[i]->cq, gl[i]->cq_dma); | |
1179 | if (gl[i]->cq_pbl) | |
1180 | dma_free_coherent(&qedi->pdev->dev, gl[i]->cq_pbl_size, | |
1181 | gl[i]->cq_pbl, gl[i]->cq_pbl_dma); | |
1182 | ||
1183 | kfree(gl[i]); | |
1184 | } | |
1185 | qedi_free_bdq(qedi); | |
1186 | } | |
1187 | ||
1188 | static int qedi_alloc_bdq(struct qedi_ctx *qedi) | |
1189 | { | |
1190 | int i; | |
1191 | struct scsi_bd *pbl; | |
1192 | u64 *list; | |
1193 | dma_addr_t page; | |
1194 | ||
1195 | /* Alloc dma memory for BDQ buffers */ | |
1196 | for (i = 0; i < QEDI_BDQ_NUM; i++) { | |
1197 | qedi->bdq[i].buf_addr = | |
1198 | dma_alloc_coherent(&qedi->pdev->dev, | |
1199 | QEDI_BDQ_BUF_SIZE, | |
1200 | &qedi->bdq[i].buf_dma, | |
1201 | GFP_KERNEL); | |
1202 | if (!qedi->bdq[i].buf_addr) { | |
1203 | QEDI_ERR(&qedi->dbg_ctx, | |
1204 | "Could not allocate BDQ buffer %d.\n", i); | |
1205 | return -ENOMEM; | |
1206 | } | |
1207 | } | |
1208 | ||
1209 | /* Alloc dma memory for BDQ page buffer list */ | |
1210 | qedi->bdq_pbl_mem_size = QEDI_BDQ_NUM * sizeof(struct scsi_bd); | |
1211 | qedi->bdq_pbl_mem_size = ALIGN(qedi->bdq_pbl_mem_size, PAGE_SIZE); | |
1212 | qedi->rq_num_entries = qedi->bdq_pbl_mem_size / sizeof(struct scsi_bd); | |
1213 | ||
1214 | QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_CONN, "rq_num_entries = %d.\n", | |
1215 | qedi->rq_num_entries); | |
1216 | ||
1217 | qedi->bdq_pbl = dma_alloc_coherent(&qedi->pdev->dev, | |
1218 | qedi->bdq_pbl_mem_size, | |
1219 | &qedi->bdq_pbl_dma, GFP_KERNEL); | |
1220 | if (!qedi->bdq_pbl) { | |
1221 | QEDI_ERR(&qedi->dbg_ctx, "Could not allocate BDQ PBL.\n"); | |
1222 | return -ENOMEM; | |
1223 | } | |
1224 | ||
1225 | /* | |
1226 | * Populate BDQ PBL with physical and virtual address of individual | |
1227 | * BDQ buffers | |
1228 | */ | |
1229 | pbl = (struct scsi_bd *)qedi->bdq_pbl; | |
1230 | for (i = 0; i < QEDI_BDQ_NUM; i++) { | |
1231 | pbl->address.hi = | |
1232 | cpu_to_le32(QEDI_U64_HI(qedi->bdq[i].buf_dma)); | |
1233 | pbl->address.lo = | |
1234 | cpu_to_le32(QEDI_U64_LO(qedi->bdq[i].buf_dma)); | |
1235 | QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_CONN, | |
1236 | "pbl [0x%p] pbl->address hi [0x%llx] lo [0x%llx], idx [%d]\n", | |
1237 | pbl, pbl->address.hi, pbl->address.lo, i); | |
1238 | pbl->opaque.hi = 0; | |
1239 | pbl->opaque.lo = cpu_to_le32(QEDI_U64_LO(i)); | |
1240 | pbl++; | |
1241 | } | |
1242 | ||
1243 | /* Allocate list of PBL pages */ | |
1244 | qedi->bdq_pbl_list = dma_alloc_coherent(&qedi->pdev->dev, | |
1245 | PAGE_SIZE, | |
1246 | &qedi->bdq_pbl_list_dma, | |
1247 | GFP_KERNEL); | |
1248 | if (!qedi->bdq_pbl_list) { | |
1249 | QEDI_ERR(&qedi->dbg_ctx, | |
1250 | "Could not allocate list of PBL pages.\n"); | |
1251 | return -ENOMEM; | |
1252 | } | |
1253 | memset(qedi->bdq_pbl_list, 0, PAGE_SIZE); | |
1254 | ||
1255 | /* | |
1256 | * Now populate PBL list with pages that contain pointers to the | |
1257 | * individual buffers. | |
1258 | */ | |
1259 | qedi->bdq_pbl_list_num_entries = qedi->bdq_pbl_mem_size / PAGE_SIZE; | |
1260 | list = (u64 *)qedi->bdq_pbl_list; | |
1261 | page = qedi->bdq_pbl_list_dma; | |
1262 | for (i = 0; i < qedi->bdq_pbl_list_num_entries; i++) { | |
1263 | *list = qedi->bdq_pbl_dma; | |
1264 | list++; | |
1265 | page += PAGE_SIZE; | |
1266 | } | |
1267 | ||
1268 | return 0; | |
1269 | } | |
1270 | ||
1271 | static int qedi_alloc_global_queues(struct qedi_ctx *qedi) | |
1272 | { | |
1273 | u32 *list; | |
1274 | int i; | |
1275 | int status = 0, rc; | |
1276 | u32 *pbl; | |
1277 | dma_addr_t page; | |
1278 | int num_pages; | |
1279 | ||
1280 | /* | |
1281 | * Number of global queues (CQ / RQ). This should | |
1282 | * be <= number of available MSIX vectors for the PF | |
1283 | */ | |
1284 | if (!qedi->num_queues) { | |
1285 | QEDI_ERR(&qedi->dbg_ctx, "No MSI-X vectors available!\n"); | |
1286 | return 1; | |
1287 | } | |
1288 | ||
1289 | /* Make sure we allocated the PBL that will contain the physical | |
1290 | * addresses of our queues | |
1291 | */ | |
1292 | if (!qedi->p_cpuq) { | |
1293 | status = 1; | |
1294 | goto mem_alloc_failure; | |
1295 | } | |
1296 | ||
1297 | qedi->global_queues = kzalloc((sizeof(struct global_queue *) * | |
1298 | qedi->num_queues), GFP_KERNEL); | |
1299 | if (!qedi->global_queues) { | |
1300 | QEDI_ERR(&qedi->dbg_ctx, | |
1301 | "Unable to allocate global queues array ptr memory\n"); | |
1302 | return -ENOMEM; | |
1303 | } | |
1304 | QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_DISC, | |
1305 | "qedi->global_queues=%p.\n", qedi->global_queues); | |
1306 | ||
1307 | /* Allocate DMA coherent buffers for BDQ */ | |
1308 | rc = qedi_alloc_bdq(qedi); | |
1309 | if (rc) | |
1310 | goto mem_alloc_failure; | |
1311 | ||
1312 | /* Allocate a CQ and an associated PBL for each MSI-X | |
1313 | * vector. | |
1314 | */ | |
1315 | for (i = 0; i < qedi->num_queues; i++) { | |
1316 | qedi->global_queues[i] = | |
1317 | kzalloc(sizeof(*qedi->global_queues[0]), | |
1318 | GFP_KERNEL); | |
1319 | if (!qedi->global_queues[i]) { | |
1320 | QEDI_ERR(&qedi->dbg_ctx, | |
1321 | "Unable to allocation global queue %d.\n", i); | |
1322 | goto mem_alloc_failure; | |
1323 | } | |
1324 | ||
1325 | qedi->global_queues[i]->cq_mem_size = | |
1326 | (QEDI_CQ_SIZE + 8) * sizeof(union iscsi_cqe); | |
1327 | qedi->global_queues[i]->cq_mem_size = | |
1328 | (qedi->global_queues[i]->cq_mem_size + | |
1329 | (QEDI_PAGE_SIZE - 1)); | |
1330 | ||
1331 | qedi->global_queues[i]->cq_pbl_size = | |
1332 | (qedi->global_queues[i]->cq_mem_size / | |
1333 | QEDI_PAGE_SIZE) * sizeof(void *); | |
1334 | qedi->global_queues[i]->cq_pbl_size = | |
1335 | (qedi->global_queues[i]->cq_pbl_size + | |
1336 | (QEDI_PAGE_SIZE - 1)); | |
1337 | ||
1338 | qedi->global_queues[i]->cq = | |
1339 | dma_alloc_coherent(&qedi->pdev->dev, | |
1340 | qedi->global_queues[i]->cq_mem_size, | |
1341 | &qedi->global_queues[i]->cq_dma, | |
1342 | GFP_KERNEL); | |
1343 | ||
1344 | if (!qedi->global_queues[i]->cq) { | |
1345 | QEDI_WARN(&qedi->dbg_ctx, | |
1346 | "Could not allocate cq.\n"); | |
1347 | status = -ENOMEM; | |
1348 | goto mem_alloc_failure; | |
1349 | } | |
1350 | memset(qedi->global_queues[i]->cq, 0, | |
1351 | qedi->global_queues[i]->cq_mem_size); | |
1352 | ||
1353 | qedi->global_queues[i]->cq_pbl = | |
1354 | dma_alloc_coherent(&qedi->pdev->dev, | |
1355 | qedi->global_queues[i]->cq_pbl_size, | |
1356 | &qedi->global_queues[i]->cq_pbl_dma, | |
1357 | GFP_KERNEL); | |
1358 | ||
1359 | if (!qedi->global_queues[i]->cq_pbl) { | |
1360 | QEDI_WARN(&qedi->dbg_ctx, | |
1361 | "Could not allocate cq PBL.\n"); | |
1362 | status = -ENOMEM; | |
1363 | goto mem_alloc_failure; | |
1364 | } | |
1365 | memset(qedi->global_queues[i]->cq_pbl, 0, | |
1366 | qedi->global_queues[i]->cq_pbl_size); | |
1367 | ||
1368 | /* Create PBL */ | |
1369 | num_pages = qedi->global_queues[i]->cq_mem_size / | |
1370 | QEDI_PAGE_SIZE; | |
1371 | page = qedi->global_queues[i]->cq_dma; | |
1372 | pbl = (u32 *)qedi->global_queues[i]->cq_pbl; | |
1373 | ||
1374 | while (num_pages--) { | |
1375 | *pbl = (u32)page; | |
1376 | pbl++; | |
1377 | *pbl = (u32)((u64)page >> 32); | |
1378 | pbl++; | |
1379 | page += QEDI_PAGE_SIZE; | |
1380 | } | |
1381 | } | |
1382 | ||
1383 | list = (u32 *)qedi->p_cpuq; | |
1384 | ||
1385 | /* | |
1386 | * The list is built as follows: CQ#0 PBL pointer, RQ#0 PBL pointer, | |
1387 | * CQ#1 PBL pointer, RQ#1 PBL pointer, etc. Each PBL pointer points | |
1388 | * to the physical address which contains an array of pointers to the | |
1389 | * physical addresses of the specific queue pages. | |
1390 | */ | |
1391 | for (i = 0; i < qedi->num_queues; i++) { | |
1392 | *list = (u32)qedi->global_queues[i]->cq_pbl_dma; | |
1393 | list++; | |
1394 | *list = (u32)((u64)qedi->global_queues[i]->cq_pbl_dma >> 32); | |
1395 | list++; | |
1396 | ||
1397 | *list = (u32)0; | |
1398 | list++; | |
1399 | *list = (u32)((u64)0 >> 32); | |
1400 | list++; | |
1401 | } | |
1402 | ||
1403 | return 0; | |
1404 | ||
1405 | mem_alloc_failure: | |
1406 | qedi_free_global_queues(qedi); | |
1407 | return status; | |
1408 | } | |
1409 | ||
1410 | int qedi_alloc_sq(struct qedi_ctx *qedi, struct qedi_endpoint *ep) | |
1411 | { | |
1412 | int rval = 0; | |
1413 | u32 *pbl; | |
1414 | dma_addr_t page; | |
1415 | int num_pages; | |
1416 | ||
1417 | if (!ep) | |
1418 | return -EIO; | |
1419 | ||
1420 | /* Calculate appropriate queue and PBL sizes */ | |
1421 | ep->sq_mem_size = QEDI_SQ_SIZE * sizeof(struct iscsi_wqe); | |
1422 | ep->sq_mem_size += QEDI_PAGE_SIZE - 1; | |
1423 | ||
1424 | ep->sq_pbl_size = (ep->sq_mem_size / QEDI_PAGE_SIZE) * sizeof(void *); | |
1425 | ep->sq_pbl_size = ep->sq_pbl_size + QEDI_PAGE_SIZE; | |
1426 | ||
1427 | ep->sq = dma_alloc_coherent(&qedi->pdev->dev, ep->sq_mem_size, | |
1428 | &ep->sq_dma, GFP_KERNEL); | |
1429 | if (!ep->sq) { | |
1430 | QEDI_WARN(&qedi->dbg_ctx, | |
1431 | "Could not allocate send queue.\n"); | |
1432 | rval = -ENOMEM; | |
1433 | goto out; | |
1434 | } | |
1435 | memset(ep->sq, 0, ep->sq_mem_size); | |
1436 | ||
1437 | ep->sq_pbl = dma_alloc_coherent(&qedi->pdev->dev, ep->sq_pbl_size, | |
1438 | &ep->sq_pbl_dma, GFP_KERNEL); | |
1439 | if (!ep->sq_pbl) { | |
1440 | QEDI_WARN(&qedi->dbg_ctx, | |
1441 | "Could not allocate send queue PBL.\n"); | |
1442 | rval = -ENOMEM; | |
1443 | goto out_free_sq; | |
1444 | } | |
1445 | memset(ep->sq_pbl, 0, ep->sq_pbl_size); | |
1446 | ||
1447 | /* Create PBL */ | |
1448 | num_pages = ep->sq_mem_size / QEDI_PAGE_SIZE; | |
1449 | page = ep->sq_dma; | |
1450 | pbl = (u32 *)ep->sq_pbl; | |
1451 | ||
1452 | while (num_pages--) { | |
1453 | *pbl = (u32)page; | |
1454 | pbl++; | |
1455 | *pbl = (u32)((u64)page >> 32); | |
1456 | pbl++; | |
1457 | page += QEDI_PAGE_SIZE; | |
1458 | } | |
1459 | ||
1460 | return rval; | |
1461 | ||
1462 | out_free_sq: | |
1463 | dma_free_coherent(&qedi->pdev->dev, ep->sq_mem_size, ep->sq, | |
1464 | ep->sq_dma); | |
1465 | out: | |
1466 | return rval; | |
1467 | } | |
1468 | ||
1469 | void qedi_free_sq(struct qedi_ctx *qedi, struct qedi_endpoint *ep) | |
1470 | { | |
1471 | if (ep->sq_pbl) | |
1472 | dma_free_coherent(&qedi->pdev->dev, ep->sq_pbl_size, ep->sq_pbl, | |
1473 | ep->sq_pbl_dma); | |
1474 | if (ep->sq) | |
1475 | dma_free_coherent(&qedi->pdev->dev, ep->sq_mem_size, ep->sq, | |
1476 | ep->sq_dma); | |
1477 | } | |
1478 | ||
1479 | int qedi_get_task_idx(struct qedi_ctx *qedi) | |
1480 | { | |
1481 | s16 tmp_idx; | |
1482 | ||
1483 | again: | |
1484 | tmp_idx = find_first_zero_bit(qedi->task_idx_map, | |
1485 | MAX_ISCSI_TASK_ENTRIES); | |
1486 | ||
1487 | if (tmp_idx >= MAX_ISCSI_TASK_ENTRIES) { | |
1488 | QEDI_ERR(&qedi->dbg_ctx, "FW task context pool is full.\n"); | |
1489 | tmp_idx = -1; | |
1490 | goto err_idx; | |
1491 | } | |
1492 | ||
1493 | if (test_and_set_bit(tmp_idx, qedi->task_idx_map)) | |
1494 | goto again; | |
1495 | ||
1496 | err_idx: | |
1497 | return tmp_idx; | |
1498 | } | |
1499 | ||
1500 | void qedi_clear_task_idx(struct qedi_ctx *qedi, int idx) | |
1501 | { | |
1502 | if (!test_and_clear_bit(idx, qedi->task_idx_map)) { | |
1503 | QEDI_ERR(&qedi->dbg_ctx, | |
1504 | "FW task context, already cleared, tid=0x%x\n", idx); | |
1505 | WARN_ON(1); | |
1506 | } | |
1507 | } | |
1508 | ||
1509 | void qedi_update_itt_map(struct qedi_ctx *qedi, u32 tid, u32 proto_itt, | |
1510 | struct qedi_cmd *cmd) | |
1511 | { | |
1512 | qedi->itt_map[tid].itt = proto_itt; | |
1513 | qedi->itt_map[tid].p_cmd = cmd; | |
1514 | ||
1515 | QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_CONN, | |
1516 | "update itt map tid=0x%x, with proto itt=0x%x\n", tid, | |
1517 | qedi->itt_map[tid].itt); | |
1518 | } | |
1519 | ||
1520 | void qedi_get_task_tid(struct qedi_ctx *qedi, u32 itt, s16 *tid) | |
1521 | { | |
1522 | u16 i; | |
1523 | ||
1524 | for (i = 0; i < MAX_ISCSI_TASK_ENTRIES; i++) { | |
1525 | if (qedi->itt_map[i].itt == itt) { | |
1526 | *tid = i; | |
1527 | QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_CONN, | |
1528 | "Ref itt=0x%x, found at tid=0x%x\n", | |
1529 | itt, *tid); | |
1530 | return; | |
1531 | } | |
1532 | } | |
1533 | ||
1534 | WARN_ON(1); | |
1535 | } | |
1536 | ||
1537 | void qedi_get_proto_itt(struct qedi_ctx *qedi, u32 tid, u32 *proto_itt) | |
1538 | { | |
1539 | *proto_itt = qedi->itt_map[tid].itt; | |
1540 | QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_CONN, | |
1541 | "Get itt map tid [0x%x with proto itt[0x%x]", | |
1542 | tid, *proto_itt); | |
1543 | } | |
1544 | ||
1545 | struct qedi_cmd *qedi_get_cmd_from_tid(struct qedi_ctx *qedi, u32 tid) | |
1546 | { | |
1547 | struct qedi_cmd *cmd = NULL; | |
1548 | ||
1549 | if (tid > MAX_ISCSI_TASK_ENTRIES) | |
1550 | return NULL; | |
1551 | ||
1552 | cmd = qedi->itt_map[tid].p_cmd; | |
1553 | if (cmd->task_id != tid) | |
1554 | return NULL; | |
1555 | ||
1556 | qedi->itt_map[tid].p_cmd = NULL; | |
1557 | ||
1558 | return cmd; | |
1559 | } | |
1560 | ||
1561 | static int qedi_alloc_itt(struct qedi_ctx *qedi) | |
1562 | { | |
1563 | qedi->itt_map = kcalloc(MAX_ISCSI_TASK_ENTRIES, | |
1564 | sizeof(struct qedi_itt_map), GFP_KERNEL); | |
1565 | if (!qedi->itt_map) { | |
1566 | QEDI_ERR(&qedi->dbg_ctx, | |
1567 | "Unable to allocate itt map array memory\n"); | |
1568 | return -ENOMEM; | |
1569 | } | |
1570 | return 0; | |
1571 | } | |
1572 | ||
1573 | static void qedi_free_itt(struct qedi_ctx *qedi) | |
1574 | { | |
1575 | kfree(qedi->itt_map); | |
1576 | } | |
1577 | ||
1578 | static struct qed_ll2_cb_ops qedi_ll2_cb_ops = { | |
1579 | .rx_cb = qedi_ll2_rx, | |
1580 | .tx_cb = NULL, | |
1581 | }; | |
1582 | ||
1583 | static int qedi_percpu_io_thread(void *arg) | |
1584 | { | |
1585 | struct qedi_percpu_s *p = arg; | |
1586 | struct qedi_work *work, *tmp; | |
1587 | unsigned long flags; | |
1588 | LIST_HEAD(work_list); | |
1589 | ||
1590 | set_user_nice(current, -20); | |
1591 | ||
1592 | while (!kthread_should_stop()) { | |
1593 | spin_lock_irqsave(&p->p_work_lock, flags); | |
1594 | while (!list_empty(&p->work_list)) { | |
1595 | list_splice_init(&p->work_list, &work_list); | |
1596 | spin_unlock_irqrestore(&p->p_work_lock, flags); | |
1597 | ||
1598 | list_for_each_entry_safe(work, tmp, &work_list, list) { | |
1599 | list_del_init(&work->list); | |
1600 | qedi_fp_process_cqes(work); | |
1601 | if (!work->is_solicited) | |
1602 | kfree(work); | |
1603 | } | |
1604 | cond_resched(); | |
1605 | spin_lock_irqsave(&p->p_work_lock, flags); | |
1606 | } | |
1607 | set_current_state(TASK_INTERRUPTIBLE); | |
1608 | spin_unlock_irqrestore(&p->p_work_lock, flags); | |
1609 | schedule(); | |
1610 | } | |
1611 | __set_current_state(TASK_RUNNING); | |
1612 | ||
1613 | return 0; | |
1614 | } | |
1615 | ||
a98d1a0c | 1616 | static int qedi_cpu_online(unsigned int cpu) |
ace7f46b | 1617 | { |
a98d1a0c | 1618 | struct qedi_percpu_s *p = this_cpu_ptr(&qedi_percpu); |
ace7f46b MR |
1619 | struct task_struct *thread; |
1620 | ||
ace7f46b MR |
1621 | thread = kthread_create_on_node(qedi_percpu_io_thread, (void *)p, |
1622 | cpu_to_node(cpu), | |
1623 | "qedi_thread/%d", cpu); | |
a98d1a0c TG |
1624 | if (IS_ERR(thread)) |
1625 | return PTR_ERR(thread); | |
1626 | ||
1627 | kthread_bind(thread, cpu); | |
1628 | p->iothread = thread; | |
1629 | wake_up_process(thread); | |
1630 | return 0; | |
ace7f46b MR |
1631 | } |
1632 | ||
a98d1a0c | 1633 | static int qedi_cpu_offline(unsigned int cpu) |
ace7f46b | 1634 | { |
a98d1a0c | 1635 | struct qedi_percpu_s *p = this_cpu_ptr(&qedi_percpu); |
ace7f46b | 1636 | struct qedi_work *work, *tmp; |
a98d1a0c | 1637 | struct task_struct *thread; |
ace7f46b | 1638 | |
ace7f46b MR |
1639 | spin_lock_bh(&p->p_work_lock); |
1640 | thread = p->iothread; | |
1641 | p->iothread = NULL; | |
1642 | ||
1643 | list_for_each_entry_safe(work, tmp, &p->work_list, list) { | |
1644 | list_del_init(&work->list); | |
1645 | qedi_fp_process_cqes(work); | |
1646 | if (!work->is_solicited) | |
1647 | kfree(work); | |
1648 | } | |
1649 | ||
1650 | spin_unlock_bh(&p->p_work_lock); | |
1651 | if (thread) | |
1652 | kthread_stop(thread); | |
a98d1a0c | 1653 | return 0; |
ace7f46b MR |
1654 | } |
1655 | ||
ace7f46b MR |
1656 | void qedi_reset_host_mtu(struct qedi_ctx *qedi, u16 mtu) |
1657 | { | |
1658 | struct qed_ll2_params params; | |
1659 | ||
1660 | qedi_recover_all_conns(qedi); | |
1661 | ||
1662 | qedi_ops->ll2->stop(qedi->cdev); | |
1663 | qedi_ll2_free_skbs(qedi); | |
1664 | ||
1665 | QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO, "old MTU %u, new MTU %u\n", | |
1666 | qedi->ll2_mtu, mtu); | |
1667 | memset(¶ms, 0, sizeof(params)); | |
1668 | qedi->ll2_mtu = mtu; | |
1669 | params.mtu = qedi->ll2_mtu + IPV6_HDR_LEN + TCP_HDR_LEN; | |
1670 | params.drop_ttl0_packets = 0; | |
1671 | params.rx_vlan_stripping = 1; | |
1672 | ether_addr_copy(params.ll2_mac_address, qedi->dev_info.common.hw_mac); | |
1673 | qedi_ops->ll2->start(qedi->cdev, ¶ms); | |
1674 | } | |
1675 | ||
1676 | static void __qedi_remove(struct pci_dev *pdev, int mode) | |
1677 | { | |
1678 | struct qedi_ctx *qedi = pci_get_drvdata(pdev); | |
1679 | ||
1680 | if (qedi->tmf_thread) { | |
1681 | flush_workqueue(qedi->tmf_thread); | |
1682 | destroy_workqueue(qedi->tmf_thread); | |
1683 | qedi->tmf_thread = NULL; | |
1684 | } | |
1685 | ||
1686 | if (qedi->offload_thread) { | |
1687 | flush_workqueue(qedi->offload_thread); | |
1688 | destroy_workqueue(qedi->offload_thread); | |
1689 | qedi->offload_thread = NULL; | |
1690 | } | |
1691 | ||
1692 | #ifdef CONFIG_DEBUG_FS | |
1693 | qedi_dbg_host_exit(&qedi->dbg_ctx); | |
1694 | #endif | |
1695 | if (!test_bit(QEDI_IN_OFFLINE, &qedi->flags)) | |
1696 | qedi_ops->common->set_power_state(qedi->cdev, PCI_D0); | |
1697 | ||
1698 | qedi_sync_free_irqs(qedi); | |
1699 | ||
1700 | if (!test_bit(QEDI_IN_OFFLINE, &qedi->flags)) { | |
1701 | qedi_ops->stop(qedi->cdev); | |
1702 | qedi_ops->ll2->stop(qedi->cdev); | |
1703 | } | |
1704 | ||
1705 | if (mode == QEDI_MODE_NORMAL) | |
1706 | qedi_free_iscsi_pf_param(qedi); | |
1707 | ||
1708 | if (!test_bit(QEDI_IN_OFFLINE, &qedi->flags)) { | |
1709 | qedi_ops->common->slowpath_stop(qedi->cdev); | |
1710 | qedi_ops->common->remove(qedi->cdev); | |
1711 | } | |
1712 | ||
1713 | qedi_destroy_fp(qedi); | |
1714 | ||
1715 | if (mode == QEDI_MODE_NORMAL) { | |
1716 | qedi_release_cid_que(qedi); | |
1717 | qedi_cm_free_mem(qedi); | |
1718 | qedi_free_uio(qedi->udev); | |
1719 | qedi_free_itt(qedi); | |
1720 | ||
1721 | iscsi_host_remove(qedi->shost); | |
1722 | iscsi_host_free(qedi->shost); | |
1723 | ||
1724 | if (qedi->ll2_recv_thread) { | |
1725 | kthread_stop(qedi->ll2_recv_thread); | |
1726 | qedi->ll2_recv_thread = NULL; | |
1727 | } | |
1728 | qedi_ll2_free_skbs(qedi); | |
1729 | } | |
1730 | } | |
1731 | ||
1732 | static int __qedi_probe(struct pci_dev *pdev, int mode) | |
1733 | { | |
1734 | struct qedi_ctx *qedi; | |
1735 | struct qed_ll2_params params; | |
1736 | u32 dp_module = 0; | |
1737 | u8 dp_level = 0; | |
1738 | bool is_vf = false; | |
1739 | char host_buf[16]; | |
1740 | struct qed_link_params link_params; | |
1741 | struct qed_slowpath_params sp_params; | |
1742 | struct qed_probe_params qed_params; | |
1743 | void *task_start, *task_end; | |
1744 | int rc; | |
1745 | u16 tmp; | |
1746 | ||
1747 | if (mode != QEDI_MODE_RECOVERY) { | |
1748 | qedi = qedi_host_alloc(pdev); | |
1749 | if (!qedi) { | |
1750 | rc = -ENOMEM; | |
1751 | goto exit_probe; | |
1752 | } | |
1753 | } else { | |
1754 | qedi = pci_get_drvdata(pdev); | |
1755 | } | |
1756 | ||
1757 | memset(&qed_params, 0, sizeof(qed_params)); | |
1758 | qed_params.protocol = QED_PROTOCOL_ISCSI; | |
1759 | qed_params.dp_module = dp_module; | |
1760 | qed_params.dp_level = dp_level; | |
1761 | qed_params.is_vf = is_vf; | |
1762 | qedi->cdev = qedi_ops->common->probe(pdev, &qed_params); | |
1763 | if (!qedi->cdev) { | |
1764 | rc = -ENODEV; | |
1765 | QEDI_ERR(&qedi->dbg_ctx, "Cannot initialize hardware\n"); | |
1766 | goto free_host; | |
1767 | } | |
1768 | ||
1769 | qedi->msix_count = MAX_NUM_MSIX_PF; | |
1770 | atomic_set(&qedi->link_state, QEDI_LINK_DOWN); | |
1771 | ||
1772 | if (mode != QEDI_MODE_RECOVERY) { | |
1773 | rc = qedi_set_iscsi_pf_param(qedi); | |
1774 | if (rc) { | |
1775 | rc = -ENOMEM; | |
1776 | QEDI_ERR(&qedi->dbg_ctx, | |
1777 | "Set iSCSI pf param fail\n"); | |
1778 | goto free_host; | |
1779 | } | |
1780 | } | |
1781 | ||
1782 | qedi_ops->common->update_pf_params(qedi->cdev, &qedi->pf_params); | |
1783 | ||
1784 | rc = qedi_prepare_fp(qedi); | |
1785 | if (rc) { | |
1786 | QEDI_ERR(&qedi->dbg_ctx, "Cannot start slowpath.\n"); | |
1787 | goto free_pf_params; | |
1788 | } | |
1789 | ||
1790 | /* Start the Slowpath-process */ | |
1791 | memset(&sp_params, 0, sizeof(struct qed_slowpath_params)); | |
1792 | sp_params.int_mode = QED_INT_MODE_MSIX; | |
1793 | sp_params.drv_major = QEDI_DRIVER_MAJOR_VER; | |
1794 | sp_params.drv_minor = QEDI_DRIVER_MINOR_VER; | |
1795 | sp_params.drv_rev = QEDI_DRIVER_REV_VER; | |
1796 | sp_params.drv_eng = QEDI_DRIVER_ENG_VER; | |
1797 | strlcpy(sp_params.name, "qedi iSCSI", QED_DRV_VER_STR_SIZE); | |
1798 | rc = qedi_ops->common->slowpath_start(qedi->cdev, &sp_params); | |
1799 | if (rc) { | |
1800 | QEDI_ERR(&qedi->dbg_ctx, "Cannot start slowpath\n"); | |
1801 | goto stop_hw; | |
1802 | } | |
1803 | ||
1804 | /* update_pf_params needs to be called before and after slowpath | |
1805 | * start | |
1806 | */ | |
1807 | qedi_ops->common->update_pf_params(qedi->cdev, &qedi->pf_params); | |
1808 | ||
d1a9ccc4 | 1809 | rc = qedi_setup_int(qedi); |
ace7f46b MR |
1810 | if (rc) |
1811 | goto stop_iscsi_func; | |
1812 | ||
1813 | qedi_ops->common->set_power_state(qedi->cdev, PCI_D0); | |
1814 | ||
1815 | /* Learn information crucial for qedi to progress */ | |
1816 | rc = qedi_ops->fill_dev_info(qedi->cdev, &qedi->dev_info); | |
1817 | if (rc) | |
1818 | goto stop_iscsi_func; | |
1819 | ||
1820 | /* Record BDQ producer doorbell addresses */ | |
1821 | qedi->bdq_primary_prod = qedi->dev_info.primary_dbq_rq_addr; | |
1822 | qedi->bdq_secondary_prod = qedi->dev_info.secondary_bdq_rq_addr; | |
1823 | QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_DISC, | |
1824 | "BDQ primary_prod=%p secondary_prod=%p.\n", | |
1825 | qedi->bdq_primary_prod, | |
1826 | qedi->bdq_secondary_prod); | |
1827 | ||
1828 | /* | |
1829 | * We need to write the number of BDs in the BDQ we've preallocated so | |
1830 | * the f/w will do a prefetch and we'll get an unsolicited CQE when a | |
1831 | * packet arrives. | |
1832 | */ | |
1833 | qedi->bdq_prod_idx = QEDI_BDQ_NUM; | |
1834 | QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_DISC, | |
1835 | "Writing %d to primary and secondary BDQ doorbell registers.\n", | |
1836 | qedi->bdq_prod_idx); | |
1837 | writew(qedi->bdq_prod_idx, qedi->bdq_primary_prod); | |
1838 | tmp = readw(qedi->bdq_primary_prod); | |
1839 | writew(qedi->bdq_prod_idx, qedi->bdq_secondary_prod); | |
1840 | tmp = readw(qedi->bdq_secondary_prod); | |
1841 | ||
1842 | ether_addr_copy(qedi->mac, qedi->dev_info.common.hw_mac); | |
1843 | QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_DISC, "MAC address is %pM.\n", | |
1844 | qedi->mac); | |
1845 | ||
1846 | sprintf(host_buf, "host_%d", qedi->shost->host_no); | |
1847 | qedi_ops->common->set_id(qedi->cdev, host_buf, QEDI_MODULE_VERSION); | |
1848 | ||
1849 | qedi_ops->register_ops(qedi->cdev, &qedi_cb_ops, qedi); | |
1850 | ||
1851 | memset(¶ms, 0, sizeof(params)); | |
1852 | params.mtu = DEF_PATH_MTU + IPV6_HDR_LEN + TCP_HDR_LEN; | |
1853 | qedi->ll2_mtu = DEF_PATH_MTU; | |
1854 | params.drop_ttl0_packets = 0; | |
1855 | params.rx_vlan_stripping = 1; | |
1856 | ether_addr_copy(params.ll2_mac_address, qedi->dev_info.common.hw_mac); | |
1857 | ||
1858 | if (mode != QEDI_MODE_RECOVERY) { | |
1859 | /* set up rx path */ | |
1860 | INIT_LIST_HEAD(&qedi->ll2_skb_list); | |
1861 | spin_lock_init(&qedi->ll2_lock); | |
1862 | /* start qedi context */ | |
1863 | spin_lock_init(&qedi->hba_lock); | |
1864 | spin_lock_init(&qedi->task_idx_lock); | |
1865 | } | |
1866 | qedi_ops->ll2->register_cb_ops(qedi->cdev, &qedi_ll2_cb_ops, qedi); | |
1867 | qedi_ops->ll2->start(qedi->cdev, ¶ms); | |
1868 | ||
1869 | if (mode != QEDI_MODE_RECOVERY) { | |
1870 | qedi->ll2_recv_thread = kthread_run(qedi_ll2_recv_thread, | |
1871 | (void *)qedi, | |
1872 | "qedi_ll2_thread"); | |
1873 | } | |
1874 | ||
1875 | rc = qedi_ops->start(qedi->cdev, &qedi->tasks, | |
1876 | qedi, qedi_iscsi_event_cb); | |
1877 | if (rc) { | |
1878 | rc = -ENODEV; | |
1879 | QEDI_ERR(&qedi->dbg_ctx, "Cannot start iSCSI function\n"); | |
1880 | goto stop_slowpath; | |
1881 | } | |
1882 | ||
1883 | task_start = qedi_get_task_mem(&qedi->tasks, 0); | |
1884 | task_end = qedi_get_task_mem(&qedi->tasks, MAX_TID_BLOCKS_ISCSI - 1); | |
1885 | QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_DISC, | |
1886 | "Task context start=%p, end=%p block_size=%u.\n", | |
1887 | task_start, task_end, qedi->tasks.size); | |
1888 | ||
1889 | memset(&link_params, 0, sizeof(link_params)); | |
1890 | link_params.link_up = true; | |
1891 | rc = qedi_ops->common->set_link(qedi->cdev, &link_params); | |
1892 | if (rc) { | |
1893 | QEDI_WARN(&qedi->dbg_ctx, "Link set up failed.\n"); | |
1894 | atomic_set(&qedi->link_state, QEDI_LINK_DOWN); | |
1895 | } | |
1896 | ||
1897 | #ifdef CONFIG_DEBUG_FS | |
1898 | qedi_dbg_host_init(&qedi->dbg_ctx, &qedi_debugfs_ops, | |
1899 | &qedi_dbg_fops); | |
1900 | #endif | |
1901 | QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO, | |
1902 | "QLogic FastLinQ iSCSI Module qedi %s, FW %d.%d.%d.%d\n", | |
1903 | QEDI_MODULE_VERSION, FW_MAJOR_VERSION, FW_MINOR_VERSION, | |
1904 | FW_REVISION_VERSION, FW_ENGINEERING_VERSION); | |
1905 | ||
1906 | if (mode == QEDI_MODE_NORMAL) { | |
1907 | if (iscsi_host_add(qedi->shost, &pdev->dev)) { | |
1908 | QEDI_ERR(&qedi->dbg_ctx, | |
1909 | "Could not add iscsi host\n"); | |
1910 | rc = -ENOMEM; | |
1911 | goto remove_host; | |
1912 | } | |
1913 | ||
1914 | /* Allocate uio buffers */ | |
1915 | rc = qedi_alloc_uio_rings(qedi); | |
1916 | if (rc) { | |
1917 | QEDI_ERR(&qedi->dbg_ctx, | |
1918 | "UIO alloc ring failed err=%d\n", rc); | |
1919 | goto remove_host; | |
1920 | } | |
1921 | ||
1922 | rc = qedi_init_uio(qedi); | |
1923 | if (rc) { | |
1924 | QEDI_ERR(&qedi->dbg_ctx, | |
1925 | "UIO init failed, err=%d\n", rc); | |
1926 | goto free_uio; | |
1927 | } | |
1928 | ||
1929 | /* host the array on iscsi_conn */ | |
1930 | rc = qedi_setup_cid_que(qedi); | |
1931 | if (rc) { | |
1932 | QEDI_ERR(&qedi->dbg_ctx, | |
1933 | "Could not setup cid que\n"); | |
1934 | goto free_uio; | |
1935 | } | |
1936 | ||
1937 | rc = qedi_cm_alloc_mem(qedi); | |
1938 | if (rc) { | |
1939 | QEDI_ERR(&qedi->dbg_ctx, | |
1940 | "Could not alloc cm memory\n"); | |
1941 | goto free_cid_que; | |
1942 | } | |
1943 | ||
1944 | rc = qedi_alloc_itt(qedi); | |
1945 | if (rc) { | |
1946 | QEDI_ERR(&qedi->dbg_ctx, | |
1947 | "Could not alloc itt memory\n"); | |
1948 | goto free_cid_que; | |
1949 | } | |
1950 | ||
1951 | sprintf(host_buf, "host_%d", qedi->shost->host_no); | |
1952 | qedi->tmf_thread = create_singlethread_workqueue(host_buf); | |
1953 | if (!qedi->tmf_thread) { | |
1954 | QEDI_ERR(&qedi->dbg_ctx, | |
1955 | "Unable to start tmf thread!\n"); | |
1956 | rc = -ENODEV; | |
1957 | goto free_cid_que; | |
1958 | } | |
1959 | ||
1960 | sprintf(host_buf, "qedi_ofld%d", qedi->shost->host_no); | |
1961 | qedi->offload_thread = create_workqueue(host_buf); | |
1962 | if (!qedi->offload_thread) { | |
1963 | QEDI_ERR(&qedi->dbg_ctx, | |
1964 | "Unable to start offload thread!\n"); | |
1965 | rc = -ENODEV; | |
1966 | goto free_cid_que; | |
1967 | } | |
1968 | ||
1969 | /* F/w needs 1st task context memory entry for performance */ | |
1970 | set_bit(QEDI_RESERVE_TASK_ID, qedi->task_idx_map); | |
1971 | atomic_set(&qedi->num_offloads, 0); | |
1972 | } | |
1973 | ||
1974 | return 0; | |
1975 | ||
1976 | free_cid_que: | |
1977 | qedi_release_cid_que(qedi); | |
1978 | free_uio: | |
1979 | qedi_free_uio(qedi->udev); | |
1980 | remove_host: | |
1981 | #ifdef CONFIG_DEBUG_FS | |
1982 | qedi_dbg_host_exit(&qedi->dbg_ctx); | |
1983 | #endif | |
1984 | iscsi_host_remove(qedi->shost); | |
1985 | stop_iscsi_func: | |
1986 | qedi_ops->stop(qedi->cdev); | |
1987 | stop_slowpath: | |
1988 | qedi_ops->common->slowpath_stop(qedi->cdev); | |
1989 | stop_hw: | |
1990 | qedi_ops->common->remove(qedi->cdev); | |
1991 | free_pf_params: | |
1992 | qedi_free_iscsi_pf_param(qedi); | |
1993 | free_host: | |
1994 | iscsi_host_free(qedi->shost); | |
1995 | exit_probe: | |
1996 | return rc; | |
1997 | } | |
1998 | ||
1999 | static int qedi_probe(struct pci_dev *pdev, const struct pci_device_id *id) | |
2000 | { | |
2001 | return __qedi_probe(pdev, QEDI_MODE_NORMAL); | |
2002 | } | |
2003 | ||
2004 | static void qedi_remove(struct pci_dev *pdev) | |
2005 | { | |
2006 | __qedi_remove(pdev, QEDI_MODE_NORMAL); | |
2007 | } | |
2008 | ||
2009 | static struct pci_device_id qedi_pci_tbl[] = { | |
2010 | { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, 0x165E) }, | |
04688525 | 2011 | { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, 0x8084) }, |
ace7f46b MR |
2012 | { 0 }, |
2013 | }; | |
2014 | MODULE_DEVICE_TABLE(pci, qedi_pci_tbl); | |
2015 | ||
a98d1a0c TG |
2016 | static enum cpuhp_state qedi_cpuhp_state; |
2017 | ||
ace7f46b MR |
2018 | static struct pci_driver qedi_pci_driver = { |
2019 | .name = QEDI_MODULE_NAME, | |
2020 | .id_table = qedi_pci_tbl, | |
2021 | .probe = qedi_probe, | |
2022 | .remove = qedi_remove, | |
2023 | }; | |
2024 | ||
2025 | static int __init qedi_init(void) | |
2026 | { | |
ace7f46b | 2027 | struct qedi_percpu_s *p; |
a98d1a0c | 2028 | int cpu, rc = 0; |
ace7f46b MR |
2029 | |
2030 | qedi_ops = qed_get_iscsi_ops(); | |
2031 | if (!qedi_ops) { | |
2032 | QEDI_ERR(NULL, "Failed to get qed iSCSI operations\n"); | |
a98d1a0c | 2033 | return -EINVAL; |
ace7f46b MR |
2034 | } |
2035 | ||
2036 | #ifdef CONFIG_DEBUG_FS | |
2037 | qedi_dbg_init("qedi"); | |
2038 | #endif | |
2039 | ||
2040 | qedi_scsi_transport = iscsi_register_transport(&qedi_iscsi_transport); | |
2041 | if (!qedi_scsi_transport) { | |
2042 | QEDI_ERR(NULL, "Could not register qedi transport"); | |
2043 | rc = -ENOMEM; | |
2044 | goto exit_qedi_init_1; | |
2045 | } | |
2046 | ||
ace7f46b MR |
2047 | for_each_possible_cpu(cpu) { |
2048 | p = &per_cpu(qedi_percpu, cpu); | |
2049 | INIT_LIST_HEAD(&p->work_list); | |
2050 | spin_lock_init(&p->p_work_lock); | |
2051 | p->iothread = NULL; | |
2052 | } | |
2053 | ||
a98d1a0c TG |
2054 | rc = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "scsi/qedi:online", |
2055 | qedi_cpu_online, qedi_cpu_offline); | |
2056 | if (rc < 0) | |
2057 | goto exit_qedi_init_2; | |
2058 | qedi_cpuhp_state = rc; | |
ace7f46b | 2059 | |
a98d1a0c TG |
2060 | rc = pci_register_driver(&qedi_pci_driver); |
2061 | if (rc) { | |
2062 | QEDI_ERR(NULL, "Failed to register driver\n"); | |
2063 | goto exit_qedi_hp; | |
2064 | } | |
2065 | ||
2066 | return 0; | |
ace7f46b | 2067 | |
a98d1a0c TG |
2068 | exit_qedi_hp: |
2069 | cpuhp_remove_state(qedi_cpuhp_state); | |
ace7f46b MR |
2070 | exit_qedi_init_2: |
2071 | iscsi_unregister_transport(&qedi_iscsi_transport); | |
2072 | exit_qedi_init_1: | |
2073 | #ifdef CONFIG_DEBUG_FS | |
2074 | qedi_dbg_exit(); | |
2075 | #endif | |
2076 | qed_put_iscsi_ops(); | |
ace7f46b MR |
2077 | return rc; |
2078 | } | |
2079 | ||
2080 | static void __exit qedi_cleanup(void) | |
2081 | { | |
ace7f46b | 2082 | pci_unregister_driver(&qedi_pci_driver); |
a98d1a0c | 2083 | cpuhp_remove_state(qedi_cpuhp_state); |
ace7f46b MR |
2084 | iscsi_unregister_transport(&qedi_iscsi_transport); |
2085 | ||
2086 | #ifdef CONFIG_DEBUG_FS | |
2087 | qedi_dbg_exit(); | |
2088 | #endif | |
2089 | qed_put_iscsi_ops(); | |
2090 | } | |
2091 | ||
2092 | MODULE_DESCRIPTION("QLogic FastLinQ 4xxxx iSCSI Module"); | |
2093 | MODULE_LICENSE("GPL"); | |
2094 | MODULE_AUTHOR("QLogic Corporation"); | |
2095 | MODULE_VERSION(QEDI_MODULE_VERSION); | |
2096 | module_init(qedi_init); | |
2097 | module_exit(qedi_cleanup); |