]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - drivers/scsi/snic/snic_main.c
Merge branches 'for-4.4/upstream-fixes', 'for-4.5/async-suspend', 'for-4.5/container...
[mirror_ubuntu-artful-kernel.git] / drivers / scsi / snic / snic_main.c
1 /*
2 * Copyright 2014 Cisco Systems, Inc. All rights reserved.
3 *
4 * This program is free software; you may redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; version 2 of the License.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
9 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
10 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
11 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
12 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
13 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
14 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
15 * SOFTWARE.
16 */
17
18 #include <linux/module.h>
19 #include <linux/mempool.h>
20 #include <linux/string.h>
21 #include <linux/slab.h>
22 #include <linux/errno.h>
23 #include <linux/init.h>
24 #include <linux/pci.h>
25 #include <linux/skbuff.h>
26 #include <linux/interrupt.h>
27 #include <linux/spinlock.h>
28 #include <linux/workqueue.h>
29 #include <scsi/scsi_host.h>
30 #include <scsi/scsi_tcq.h>
31
32 #include "snic.h"
33 #include "snic_fwint.h"
34
35 #define PCI_DEVICE_ID_CISCO_SNIC 0x0046
36
37 /* Supported devices by snic module */
38 static struct pci_device_id snic_id_table[] = {
39 {PCI_DEVICE(0x1137, PCI_DEVICE_ID_CISCO_SNIC) },
40 { 0, } /* end of table */
41 };
42
43 unsigned int snic_log_level = 0x0;
44 module_param(snic_log_level, int, S_IRUGO|S_IWUSR);
45 MODULE_PARM_DESC(snic_log_level, "bitmask for snic logging levels");
46
47 #ifdef CONFIG_SCSI_SNIC_DEBUG_FS
48 unsigned int snic_trace_max_pages = 16;
49 module_param(snic_trace_max_pages, uint, S_IRUGO|S_IWUSR);
50 MODULE_PARM_DESC(snic_trace_max_pages,
51 "Total allocated memory pages for snic trace buffer");
52
53 #endif
54 unsigned int snic_max_qdepth = SNIC_DFLT_QUEUE_DEPTH;
55 module_param(snic_max_qdepth, uint, S_IRUGO | S_IWUSR);
56 MODULE_PARM_DESC(snic_max_qdepth, "Queue depth to report for each LUN");
57
58 /*
59 * snic_slave_alloc : callback function to SCSI Mid Layer, called on
60 * scsi device initialization.
61 */
62 static int
63 snic_slave_alloc(struct scsi_device *sdev)
64 {
65 struct snic_tgt *tgt = starget_to_tgt(scsi_target(sdev));
66
67 if (!tgt || snic_tgt_chkready(tgt))
68 return -ENXIO;
69
70 return 0;
71 }
72
73 /*
74 * snic_slave_configure : callback function to SCSI Mid Layer, called on
75 * scsi device initialization.
76 */
77 static int
78 snic_slave_configure(struct scsi_device *sdev)
79 {
80 struct snic *snic = shost_priv(sdev->host);
81 u32 qdepth = 0, max_ios = 0;
82 int tmo = SNIC_DFLT_CMD_TIMEOUT * HZ;
83
84 /* Set Queue Depth */
85 max_ios = snic_max_qdepth;
86 qdepth = min_t(u32, max_ios, SNIC_MAX_QUEUE_DEPTH);
87 scsi_change_queue_depth(sdev, qdepth);
88
89 if (snic->fwinfo.io_tmo > 1)
90 tmo = snic->fwinfo.io_tmo * HZ;
91
92 /* FW requires extended timeouts */
93 blk_queue_rq_timeout(sdev->request_queue, tmo);
94
95 return 0;
96 }
97
98 static int
99 snic_change_queue_depth(struct scsi_device *sdev, int qdepth)
100 {
101 int qsz = 0;
102
103 qsz = min_t(u32, qdepth, SNIC_MAX_QUEUE_DEPTH);
104 scsi_change_queue_depth(sdev, qsz);
105 SNIC_INFO("QDepth Changed to %d\n", sdev->queue_depth);
106
107 return sdev->queue_depth;
108 }
109
110 static struct scsi_host_template snic_host_template = {
111 .module = THIS_MODULE,
112 .name = SNIC_DRV_NAME,
113 .queuecommand = snic_queuecommand,
114 .eh_abort_handler = snic_abort_cmd,
115 .eh_device_reset_handler = snic_device_reset,
116 .eh_host_reset_handler = snic_host_reset,
117 .slave_alloc = snic_slave_alloc,
118 .slave_configure = snic_slave_configure,
119 .change_queue_depth = snic_change_queue_depth,
120 .this_id = -1,
121 .cmd_per_lun = SNIC_DFLT_QUEUE_DEPTH,
122 .can_queue = SNIC_MAX_IO_REQ,
123 .use_clustering = ENABLE_CLUSTERING,
124 .sg_tablesize = SNIC_MAX_SG_DESC_CNT,
125 .max_sectors = 0x800,
126 .shost_attrs = snic_attrs,
127 .track_queue_depth = 1,
128 .cmd_size = sizeof(struct snic_internal_io_state),
129 .proc_name = "snic_scsi",
130 };
131
132 /*
133 * snic_handle_link_event : Handles link events such as link up/down/error
134 */
135 void
136 snic_handle_link_event(struct snic *snic)
137 {
138 unsigned long flags;
139
140 spin_lock_irqsave(&snic->snic_lock, flags);
141 if (snic->stop_link_events) {
142 spin_unlock_irqrestore(&snic->snic_lock, flags);
143
144 return;
145 }
146 spin_unlock_irqrestore(&snic->snic_lock, flags);
147
148 queue_work(snic_glob->event_q, &snic->link_work);
149 } /* end of snic_handle_link_event */
150
151 /*
152 * snic_notify_set : sets notification area
153 * This notification area is to receive events from fw
154 * Note: snic supports only MSIX interrupts, in which we can just call
155 * svnic_dev_notify_set directly
156 */
157 static int
158 snic_notify_set(struct snic *snic)
159 {
160 int ret = 0;
161 enum vnic_dev_intr_mode intr_mode;
162
163 intr_mode = svnic_dev_get_intr_mode(snic->vdev);
164
165 if (intr_mode == VNIC_DEV_INTR_MODE_MSIX) {
166 ret = svnic_dev_notify_set(snic->vdev, SNIC_MSIX_ERR_NOTIFY);
167 } else {
168 SNIC_HOST_ERR(snic->shost,
169 "Interrupt mode should be setup before devcmd notify set %d\n",
170 intr_mode);
171 ret = -1;
172 }
173
174 return ret;
175 } /* end of snic_notify_set */
176
177 /*
178 * snic_dev_wait : polls vnic open status.
179 */
180 static int
181 snic_dev_wait(struct vnic_dev *vdev,
182 int (*start)(struct vnic_dev *, int),
183 int (*finished)(struct vnic_dev *, int *),
184 int arg)
185 {
186 unsigned long time;
187 int ret, done;
188 int retry_cnt = 0;
189
190 ret = start(vdev, arg);
191 if (ret)
192 return ret;
193
194 /*
195 * Wait for func to complete...2 seconds max.
196 *
197 * Sometimes schedule_timeout_uninterruptible take long time
198 * to wakeup, which results skipping retry. The retry counter
199 * ensures to retry at least two times.
200 */
201 time = jiffies + (HZ * 2);
202 do {
203 ret = finished(vdev, &done);
204 if (ret)
205 return ret;
206
207 if (done)
208 return 0;
209 schedule_timeout_uninterruptible(HZ/10);
210 ++retry_cnt;
211 } while (time_after(time, jiffies) || (retry_cnt < 3));
212
213 return -ETIMEDOUT;
214 } /* end of snic_dev_wait */
215
216 /*
217 * snic_cleanup: called by snic_remove
218 * Stops the snic device, masks all interrupts, Completed CQ entries are
219 * drained. Posted WQ/RQ/Copy-WQ entries are cleanup
220 */
221 static int
222 snic_cleanup(struct snic *snic)
223 {
224 unsigned int i;
225 int ret;
226
227 svnic_dev_disable(snic->vdev);
228 for (i = 0; i < snic->intr_count; i++)
229 svnic_intr_mask(&snic->intr[i]);
230
231 for (i = 0; i < snic->wq_count; i++) {
232 ret = svnic_wq_disable(&snic->wq[i]);
233 if (ret)
234 return ret;
235 }
236
237 /* Clean up completed IOs */
238 snic_fwcq_cmpl_handler(snic, -1);
239
240 snic_wq_cmpl_handler(snic, -1);
241
242 /* Clean up the IOs that have not completed */
243 for (i = 0; i < snic->wq_count; i++)
244 svnic_wq_clean(&snic->wq[i], snic_free_wq_buf);
245
246 for (i = 0; i < snic->cq_count; i++)
247 svnic_cq_clean(&snic->cq[i]);
248
249 for (i = 0; i < snic->intr_count; i++)
250 svnic_intr_clean(&snic->intr[i]);
251
252 /* Cleanup snic specific requests */
253 snic_free_all_untagged_reqs(snic);
254
255 /* Cleanup Pending SCSI commands */
256 snic_shutdown_scsi_cleanup(snic);
257
258 for (i = 0; i < SNIC_REQ_MAX_CACHES; i++)
259 mempool_destroy(snic->req_pool[i]);
260
261 return 0;
262 } /* end of snic_cleanup */
263
264
265 static void
266 snic_iounmap(struct snic *snic)
267 {
268 if (snic->bar0.vaddr)
269 iounmap(snic->bar0.vaddr);
270 }
271
272 /*
273 * snic_vdev_open_done : polls for svnic_dev_open cmd completion.
274 */
275 static int
276 snic_vdev_open_done(struct vnic_dev *vdev, int *done)
277 {
278 struct snic *snic = svnic_dev_priv(vdev);
279 int ret;
280 int nretries = 5;
281
282 do {
283 ret = svnic_dev_open_done(vdev, done);
284 if (ret == 0)
285 break;
286
287 SNIC_HOST_INFO(snic->shost, "VNIC_DEV_OPEN Timedout.\n");
288 } while (nretries--);
289
290 return ret;
291 } /* end of snic_vdev_open_done */
292
293 /*
294 * snic_add_host : registers scsi host with ML
295 */
296 static int
297 snic_add_host(struct Scsi_Host *shost, struct pci_dev *pdev)
298 {
299 int ret = 0;
300
301 ret = scsi_add_host(shost, &pdev->dev);
302 if (ret) {
303 SNIC_HOST_ERR(shost,
304 "snic: scsi_add_host failed. %d\n",
305 ret);
306
307 return ret;
308 }
309
310 SNIC_BUG_ON(shost->work_q != NULL);
311 snprintf(shost->work_q_name, sizeof(shost->work_q_name), "scsi_wq_%d",
312 shost->host_no);
313 shost->work_q = create_singlethread_workqueue(shost->work_q_name);
314 if (!shost->work_q) {
315 SNIC_HOST_ERR(shost, "Failed to Create ScsiHost wq.\n");
316
317 ret = -ENOMEM;
318 }
319
320 return ret;
321 } /* end of snic_add_host */
322
323 static void
324 snic_del_host(struct Scsi_Host *shost)
325 {
326 if (!shost->work_q)
327 return;
328
329 destroy_workqueue(shost->work_q);
330 shost->work_q = NULL;
331 scsi_remove_host(shost);
332 }
333
334 int
335 snic_get_state(struct snic *snic)
336 {
337 return atomic_read(&snic->state);
338 }
339
340 void
341 snic_set_state(struct snic *snic, enum snic_state state)
342 {
343 SNIC_HOST_INFO(snic->shost, "snic state change from %s to %s\n",
344 snic_state_to_str(snic_get_state(snic)),
345 snic_state_to_str(state));
346
347 atomic_set(&snic->state, state);
348 }
349
350 /*
351 * snic_probe : Initialize the snic interface.
352 */
353 static int
354 snic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
355 {
356 struct Scsi_Host *shost;
357 struct snic *snic;
358 mempool_t *pool;
359 unsigned long flags;
360 u32 max_ios = 0;
361 int ret, i;
362
363 /* Device Information */
364 SNIC_INFO("snic device %4x:%4x:%4x:%4x: ",
365 pdev->vendor, pdev->device, pdev->subsystem_vendor,
366 pdev->subsystem_device);
367
368 SNIC_INFO("snic device bus %x: slot %x: fn %x\n",
369 pdev->bus->number, PCI_SLOT(pdev->devfn),
370 PCI_FUNC(pdev->devfn));
371
372 /*
373 * Allocate SCSI Host and setup association between host, and snic
374 */
375 shost = scsi_host_alloc(&snic_host_template, sizeof(struct snic));
376 if (!shost) {
377 SNIC_ERR("Unable to alloc scsi_host\n");
378 ret = -ENOMEM;
379
380 goto prob_end;
381 }
382 snic = shost_priv(shost);
383 snic->shost = shost;
384
385 snprintf(snic->name, sizeof(snic->name) - 1, "%s%d", SNIC_DRV_NAME,
386 shost->host_no);
387
388 SNIC_HOST_INFO(shost,
389 "snic%d = %p shost = %p device bus %x: slot %x: fn %x\n",
390 shost->host_no, snic, shost, pdev->bus->number,
391 PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn));
392 #ifdef CONFIG_SCSI_SNIC_DEBUG_FS
393 /* Per snic debugfs init */
394 ret = snic_stats_debugfs_init(snic);
395 if (ret) {
396 SNIC_HOST_ERR(snic->shost,
397 "Failed to initialize debugfs stats\n");
398 snic_stats_debugfs_remove(snic);
399 }
400 #endif
401
402 /* Setup PCI Resources */
403 pci_set_drvdata(pdev, snic);
404 snic->pdev = pdev;
405
406 ret = pci_enable_device(pdev);
407 if (ret) {
408 SNIC_HOST_ERR(shost,
409 "Cannot enable PCI Resources, aborting : %d\n",
410 ret);
411
412 goto err_free_snic;
413 }
414
415 ret = pci_request_regions(pdev, SNIC_DRV_NAME);
416 if (ret) {
417 SNIC_HOST_ERR(shost,
418 "Cannot obtain PCI Resources, aborting : %d\n",
419 ret);
420
421 goto err_pci_disable;
422 }
423
424 pci_set_master(pdev);
425
426 /*
427 * Query PCI Controller on system for DMA addressing
428 * limitation for the device. Try 43-bit first, and
429 * fail to 32-bit.
430 */
431 ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(43));
432 if (ret) {
433 ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
434 if (ret) {
435 SNIC_HOST_ERR(shost,
436 "No Usable DMA Configuration, aborting %d\n",
437 ret);
438
439 goto err_rel_regions;
440 }
441
442 ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
443 if (ret) {
444 SNIC_HOST_ERR(shost,
445 "Unable to obtain 32-bit DMA for consistent allocations, aborting: %d\n",
446 ret);
447
448 goto err_rel_regions;
449 }
450 } else {
451 ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(43));
452 if (ret) {
453 SNIC_HOST_ERR(shost,
454 "Unable to obtain 43-bit DMA for consistent allocations. aborting: %d\n",
455 ret);
456
457 goto err_rel_regions;
458 }
459 }
460
461
462 /* Map vNIC resources from BAR0 */
463 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
464 SNIC_HOST_ERR(shost, "BAR0 not memory mappable aborting.\n");
465
466 ret = -ENODEV;
467 goto err_rel_regions;
468 }
469
470 snic->bar0.vaddr = pci_iomap(pdev, 0, 0);
471 if (!snic->bar0.vaddr) {
472 SNIC_HOST_ERR(shost,
473 "Cannot memory map BAR0 res hdr aborting.\n");
474
475 ret = -ENODEV;
476 goto err_rel_regions;
477 }
478
479 snic->bar0.bus_addr = pci_resource_start(pdev, 0);
480 snic->bar0.len = pci_resource_len(pdev, 0);
481 SNIC_BUG_ON(snic->bar0.bus_addr == 0);
482
483 /* Devcmd2 Resource Allocation and Initialization */
484 snic->vdev = svnic_dev_alloc_discover(NULL, snic, pdev, &snic->bar0, 1);
485 if (!snic->vdev) {
486 SNIC_HOST_ERR(shost, "vNIC Resource Discovery Failed.\n");
487
488 ret = -ENODEV;
489 goto err_iounmap;
490 }
491
492 ret = svnic_dev_cmd_init(snic->vdev, 0);
493 if (ret) {
494 SNIC_HOST_INFO(shost, "Devcmd2 Init Failed. err = %d\n", ret);
495
496 goto err_vnic_unreg;
497 }
498
499 ret = snic_dev_wait(snic->vdev, svnic_dev_open, snic_vdev_open_done, 0);
500 if (ret) {
501 SNIC_HOST_ERR(shost,
502 "vNIC dev open failed, aborting. %d\n",
503 ret);
504
505 goto err_vnic_unreg;
506 }
507
508 ret = svnic_dev_init(snic->vdev, 0);
509 if (ret) {
510 SNIC_HOST_ERR(shost,
511 "vNIC dev init failed. aborting. %d\n",
512 ret);
513
514 goto err_dev_close;
515 }
516
517 /* Get vNIC information */
518 ret = snic_get_vnic_config(snic);
519 if (ret) {
520 SNIC_HOST_ERR(shost,
521 "Get vNIC configuration failed, aborting. %d\n",
522 ret);
523
524 goto err_dev_close;
525 }
526
527 /* Configure Maximum Outstanding IO reqs */
528 max_ios = snic->config.io_throttle_count;
529 if (max_ios != SNIC_UCSM_DFLT_THROTTLE_CNT_BLD)
530 shost->can_queue = min_t(u32, SNIC_MAX_IO_REQ,
531 max_t(u32, SNIC_MIN_IO_REQ, max_ios));
532
533 snic->max_tag_id = shost->can_queue;
534
535 shost->max_lun = snic->config.luns_per_tgt;
536 shost->max_id = SNIC_MAX_TARGET;
537
538 shost->max_cmd_len = MAX_COMMAND_SIZE; /*defined in scsi_cmnd.h*/
539
540 snic_get_res_counts(snic);
541
542 /*
543 * Assumption: Only MSIx is supported
544 */
545 ret = snic_set_intr_mode(snic);
546 if (ret) {
547 SNIC_HOST_ERR(shost,
548 "Failed to set intr mode aborting. %d\n",
549 ret);
550
551 goto err_dev_close;
552 }
553
554 ret = snic_alloc_vnic_res(snic);
555 if (ret) {
556 SNIC_HOST_ERR(shost,
557 "Failed to alloc vNIC resources aborting. %d\n",
558 ret);
559
560 goto err_clear_intr;
561 }
562
563 /* Initialize specific lists */
564 INIT_LIST_HEAD(&snic->list);
565
566 /*
567 * spl_cmd_list for maintaining snic specific cmds
568 * such as EXCH_VER_REQ, REPORT_TARGETS etc
569 */
570 INIT_LIST_HEAD(&snic->spl_cmd_list);
571 spin_lock_init(&snic->spl_cmd_lock);
572
573 /* initialize all snic locks */
574 spin_lock_init(&snic->snic_lock);
575
576 for (i = 0; i < SNIC_WQ_MAX; i++)
577 spin_lock_init(&snic->wq_lock[i]);
578
579 for (i = 0; i < SNIC_IO_LOCKS; i++)
580 spin_lock_init(&snic->io_req_lock[i]);
581
582 pool = mempool_create_slab_pool(2,
583 snic_glob->req_cache[SNIC_REQ_CACHE_DFLT_SGL]);
584 if (!pool) {
585 SNIC_HOST_ERR(shost, "dflt sgl pool creation failed\n");
586
587 goto err_free_res;
588 }
589
590 snic->req_pool[SNIC_REQ_CACHE_DFLT_SGL] = pool;
591
592 pool = mempool_create_slab_pool(2,
593 snic_glob->req_cache[SNIC_REQ_CACHE_MAX_SGL]);
594 if (!pool) {
595 SNIC_HOST_ERR(shost, "max sgl pool creation failed\n");
596
597 goto err_free_dflt_sgl_pool;
598 }
599
600 snic->req_pool[SNIC_REQ_CACHE_MAX_SGL] = pool;
601
602 pool = mempool_create_slab_pool(2,
603 snic_glob->req_cache[SNIC_REQ_TM_CACHE]);
604 if (!pool) {
605 SNIC_HOST_ERR(shost, "snic tmreq info pool creation failed.\n");
606
607 goto err_free_max_sgl_pool;
608 }
609
610 snic->req_pool[SNIC_REQ_TM_CACHE] = pool;
611
612 /* Initialize snic state */
613 atomic_set(&snic->state, SNIC_INIT);
614
615 atomic_set(&snic->ios_inflight, 0);
616
617 /* Setup notification buffer area */
618 ret = snic_notify_set(snic);
619 if (ret) {
620 SNIC_HOST_ERR(shost,
621 "Failed to alloc notify buffer aborting. %d\n",
622 ret);
623
624 goto err_free_tmreq_pool;
625 }
626
627 /*
628 * Initialization done with PCI system, hardware, firmware.
629 * Add shost to SCSI
630 */
631 ret = snic_add_host(shost, pdev);
632 if (ret) {
633 SNIC_HOST_ERR(shost,
634 "Adding scsi host Failed ... exiting. %d\n",
635 ret);
636
637 goto err_notify_unset;
638 }
639
640 spin_lock_irqsave(&snic_glob->snic_list_lock, flags);
641 list_add_tail(&snic->list, &snic_glob->snic_list);
642 spin_unlock_irqrestore(&snic_glob->snic_list_lock, flags);
643
644 snic_disc_init(&snic->disc);
645 INIT_WORK(&snic->tgt_work, snic_handle_tgt_disc);
646 INIT_WORK(&snic->disc_work, snic_handle_disc);
647 INIT_WORK(&snic->link_work, snic_handle_link);
648
649 /* Enable all queues */
650 for (i = 0; i < snic->wq_count; i++)
651 svnic_wq_enable(&snic->wq[i]);
652
653 ret = svnic_dev_enable_wait(snic->vdev);
654 if (ret) {
655 SNIC_HOST_ERR(shost,
656 "vNIC dev enable failed w/ error %d\n",
657 ret);
658
659 goto err_vdev_enable;
660 }
661
662 ret = snic_request_intr(snic);
663 if (ret) {
664 SNIC_HOST_ERR(shost, "Unable to request irq. %d\n", ret);
665
666 goto err_req_intr;
667 }
668
669 for (i = 0; i < snic->intr_count; i++)
670 svnic_intr_unmask(&snic->intr[i]);
671
672 snic_set_state(snic, SNIC_ONLINE);
673
674 /* Get snic params */
675 ret = snic_get_conf(snic);
676 if (ret) {
677 SNIC_HOST_ERR(shost,
678 "Failed to get snic io config from FW w err %d\n",
679 ret);
680
681 goto err_get_conf;
682 }
683
684 ret = snic_disc_start(snic);
685 if (ret) {
686 SNIC_HOST_ERR(shost, "snic_probe:Discovery Failed w err = %d\n",
687 ret);
688
689 goto err_get_conf;
690 }
691
692 SNIC_HOST_INFO(shost, "SNIC Device Probe Successful.\n");
693
694 return 0;
695
696 err_get_conf:
697 snic_free_all_untagged_reqs(snic);
698
699 for (i = 0; i < snic->intr_count; i++)
700 svnic_intr_mask(&snic->intr[i]);
701
702 snic_free_intr(snic);
703
704 err_req_intr:
705 svnic_dev_disable(snic->vdev);
706
707 err_vdev_enable:
708 for (i = 0; i < snic->wq_count; i++) {
709 int rc = 0;
710
711 rc = svnic_wq_disable(&snic->wq[i]);
712 if (rc) {
713 SNIC_HOST_ERR(shost,
714 "WQ Disable Failed w/ err = %d\n", rc);
715
716 break;
717 }
718 }
719 snic_del_host(snic->shost);
720
721 err_notify_unset:
722 svnic_dev_notify_unset(snic->vdev);
723
724 err_free_tmreq_pool:
725 mempool_destroy(snic->req_pool[SNIC_REQ_TM_CACHE]);
726
727 err_free_max_sgl_pool:
728 mempool_destroy(snic->req_pool[SNIC_REQ_CACHE_MAX_SGL]);
729
730 err_free_dflt_sgl_pool:
731 mempool_destroy(snic->req_pool[SNIC_REQ_CACHE_DFLT_SGL]);
732
733 err_free_res:
734 snic_free_vnic_res(snic);
735
736 err_clear_intr:
737 snic_clear_intr_mode(snic);
738
739 err_dev_close:
740 svnic_dev_close(snic->vdev);
741
742 err_vnic_unreg:
743 svnic_dev_unregister(snic->vdev);
744
745 err_iounmap:
746 snic_iounmap(snic);
747
748 err_rel_regions:
749 pci_release_regions(pdev);
750
751 err_pci_disable:
752 pci_disable_device(pdev);
753
754 err_free_snic:
755 #ifdef CONFIG_SCSI_SNIC_DEBUG_FS
756 snic_stats_debugfs_remove(snic);
757 #endif
758 scsi_host_put(shost);
759 pci_set_drvdata(pdev, NULL);
760
761 prob_end:
762 SNIC_INFO("sNIC device : bus %d: slot %d: fn %d Registration Failed.\n",
763 pdev->bus->number, PCI_SLOT(pdev->devfn),
764 PCI_FUNC(pdev->devfn));
765
766 return ret;
767 } /* end of snic_probe */
768
769
770 /*
771 * snic_remove : invoked on unbinding the interface to cleanup the
772 * resources allocated in snic_probe on initialization.
773 */
774 static void
775 snic_remove(struct pci_dev *pdev)
776 {
777 struct snic *snic = pci_get_drvdata(pdev);
778 unsigned long flags;
779
780 if (!snic) {
781 SNIC_INFO("sNIC dev: bus %d slot %d fn %d snic inst is null.\n",
782 pdev->bus->number, PCI_SLOT(pdev->devfn),
783 PCI_FUNC(pdev->devfn));
784
785 return;
786 }
787
788 /*
789 * Mark state so that the workqueue thread stops forwarding
790 * received frames and link events. ISR and other threads
791 * that can queue work items will also stop creating work
792 * items on the snic workqueue
793 */
794 snic_set_state(snic, SNIC_OFFLINE);
795 spin_lock_irqsave(&snic->snic_lock, flags);
796 snic->stop_link_events = 1;
797 spin_unlock_irqrestore(&snic->snic_lock, flags);
798
799 flush_workqueue(snic_glob->event_q);
800 snic_disc_term(snic);
801
802 spin_lock_irqsave(&snic->snic_lock, flags);
803 snic->in_remove = 1;
804 spin_unlock_irqrestore(&snic->snic_lock, flags);
805
806 /*
807 * This stops the snic device, masks all interrupts, Completed
808 * CQ entries are drained. Posted WQ/RQ/Copy-WQ entries are
809 * cleanup
810 */
811 snic_cleanup(snic);
812
813 spin_lock_irqsave(&snic_glob->snic_list_lock, flags);
814 list_del(&snic->list);
815 spin_unlock_irqrestore(&snic_glob->snic_list_lock, flags);
816
817 snic_tgt_del_all(snic);
818 #ifdef CONFIG_SCSI_SNIC_DEBUG_FS
819 snic_stats_debugfs_remove(snic);
820 #endif
821 snic_del_host(snic->shost);
822
823 svnic_dev_notify_unset(snic->vdev);
824 snic_free_intr(snic);
825 snic_free_vnic_res(snic);
826 snic_clear_intr_mode(snic);
827 svnic_dev_close(snic->vdev);
828 svnic_dev_unregister(snic->vdev);
829 snic_iounmap(snic);
830 pci_release_regions(pdev);
831 pci_disable_device(pdev);
832 pci_set_drvdata(pdev, NULL);
833
834 /* this frees Scsi_Host and snic memory (continuous chunk) */
835 scsi_host_put(snic->shost);
836 } /* end of snic_remove */
837
838
839 struct snic_global *snic_glob;
840
841 /*
842 * snic_global_data_init: Initialize SNIC Global Data
843 * Notes: All the global lists, variables should be part of global data
844 * this helps in debugging.
845 */
846 static int
847 snic_global_data_init(void)
848 {
849 int ret = 0;
850 struct kmem_cache *cachep;
851 ssize_t len = 0;
852
853 snic_glob = kzalloc(sizeof(*snic_glob), GFP_KERNEL);
854
855 if (!snic_glob) {
856 SNIC_ERR("Failed to allocate Global Context.\n");
857
858 ret = -ENOMEM;
859 goto gdi_end;
860 }
861
862 #ifdef CONFIG_SCSI_SNIC_DEBUG_FS
863 /* Debugfs related Initialization */
864 /* Create debugfs entries for snic */
865 ret = snic_debugfs_init();
866 if (ret < 0) {
867 SNIC_ERR("Failed to create sysfs dir for tracing and stats.\n");
868 snic_debugfs_term();
869 /* continue even if it fails */
870 }
871
872 /* Trace related Initialization */
873 /* Allocate memory for trace buffer */
874 ret = snic_trc_init();
875 if (ret < 0) {
876 SNIC_ERR("Trace buffer init failed, SNIC tracing disabled\n");
877 snic_trc_free();
878 /* continue even if it fails */
879 }
880
881 #endif
882 INIT_LIST_HEAD(&snic_glob->snic_list);
883 spin_lock_init(&snic_glob->snic_list_lock);
884
885 /* Create a cache for allocation of snic_host_req+default size ESGLs */
886 len = sizeof(struct snic_req_info);
887 len += sizeof(struct snic_host_req) + sizeof(struct snic_dflt_sgl);
888 cachep = kmem_cache_create("snic_req_dfltsgl", len, SNIC_SG_DESC_ALIGN,
889 SLAB_HWCACHE_ALIGN, NULL);
890 if (!cachep) {
891 SNIC_ERR("Failed to create snic default sgl slab\n");
892 ret = -ENOMEM;
893
894 goto err_dflt_req_slab;
895 }
896 snic_glob->req_cache[SNIC_REQ_CACHE_DFLT_SGL] = cachep;
897
898 /* Create a cache for allocation of max size Extended SGLs */
899 len = sizeof(struct snic_req_info);
900 len += sizeof(struct snic_host_req) + sizeof(struct snic_max_sgl);
901 cachep = kmem_cache_create("snic_req_maxsgl", len, SNIC_SG_DESC_ALIGN,
902 SLAB_HWCACHE_ALIGN, NULL);
903 if (!cachep) {
904 SNIC_ERR("Failed to create snic max sgl slab\n");
905 ret = -ENOMEM;
906
907 goto err_max_req_slab;
908 }
909 snic_glob->req_cache[SNIC_REQ_CACHE_MAX_SGL] = cachep;
910
911 len = sizeof(struct snic_host_req);
912 cachep = kmem_cache_create("snic_req_maxsgl", len, SNIC_SG_DESC_ALIGN,
913 SLAB_HWCACHE_ALIGN, NULL);
914 if (!cachep) {
915 SNIC_ERR("Failed to create snic tm req slab\n");
916 ret = -ENOMEM;
917
918 goto err_tmreq_slab;
919 }
920 snic_glob->req_cache[SNIC_REQ_TM_CACHE] = cachep;
921
922 /* snic_event queue */
923 snic_glob->event_q = create_singlethread_workqueue("snic_event_wq");
924 if (!snic_glob->event_q) {
925 SNIC_ERR("snic event queue create failed\n");
926 ret = -ENOMEM;
927
928 goto err_eventq;
929 }
930
931 return ret;
932
933 err_eventq:
934 kmem_cache_destroy(snic_glob->req_cache[SNIC_REQ_TM_CACHE]);
935
936 err_tmreq_slab:
937 kmem_cache_destroy(snic_glob->req_cache[SNIC_REQ_CACHE_MAX_SGL]);
938
939 err_max_req_slab:
940 kmem_cache_destroy(snic_glob->req_cache[SNIC_REQ_CACHE_DFLT_SGL]);
941
942 err_dflt_req_slab:
943 #ifdef CONFIG_SCSI_SNIC_DEBUG_FS
944 snic_trc_free();
945 snic_debugfs_term();
946 #endif
947 kfree(snic_glob);
948 snic_glob = NULL;
949
950 gdi_end:
951 return ret;
952 } /* end of snic_glob_init */
953
954 /*
955 * snic_global_data_cleanup : Frees SNIC Global Data
956 */
957 static void
958 snic_global_data_cleanup(void)
959 {
960 SNIC_BUG_ON(snic_glob == NULL);
961
962 destroy_workqueue(snic_glob->event_q);
963 kmem_cache_destroy(snic_glob->req_cache[SNIC_REQ_TM_CACHE]);
964 kmem_cache_destroy(snic_glob->req_cache[SNIC_REQ_CACHE_MAX_SGL]);
965 kmem_cache_destroy(snic_glob->req_cache[SNIC_REQ_CACHE_DFLT_SGL]);
966
967 #ifdef CONFIG_SCSI_SNIC_DEBUG_FS
968 /* Freeing Trace Resources */
969 snic_trc_free();
970
971 /* Freeing Debugfs Resources */
972 snic_debugfs_term();
973 #endif
974 kfree(snic_glob);
975 snic_glob = NULL;
976 } /* end of snic_glob_cleanup */
977
978 static struct pci_driver snic_driver = {
979 .name = SNIC_DRV_NAME,
980 .id_table = snic_id_table,
981 .probe = snic_probe,
982 .remove = snic_remove,
983 };
984
985 static int __init
986 snic_init_module(void)
987 {
988 int ret = 0;
989
990 #ifndef __x86_64__
991 SNIC_INFO("SNIC Driver is supported only for x86_64 platforms!\n");
992 add_taint(TAINT_CPU_OUT_OF_SPEC, LOCKDEP_STILL_OK);
993 #endif
994
995 SNIC_INFO("%s, ver %s\n", SNIC_DRV_DESCRIPTION, SNIC_DRV_VERSION);
996
997 ret = snic_global_data_init();
998 if (ret) {
999 SNIC_ERR("Failed to Initialize Global Data.\n");
1000
1001 return ret;
1002 }
1003
1004 ret = pci_register_driver(&snic_driver);
1005 if (ret < 0) {
1006 SNIC_ERR("PCI driver register error\n");
1007
1008 goto err_pci_reg;
1009 }
1010
1011 return ret;
1012
1013 err_pci_reg:
1014 snic_global_data_cleanup();
1015
1016 return ret;
1017 }
1018
1019 static void __exit
1020 snic_cleanup_module(void)
1021 {
1022 pci_unregister_driver(&snic_driver);
1023 snic_global_data_cleanup();
1024 }
1025
1026 module_init(snic_init_module);
1027 module_exit(snic_cleanup_module);
1028
1029 MODULE_LICENSE("GPL v2");
1030 MODULE_DESCRIPTION(SNIC_DRV_DESCRIPTION);
1031 MODULE_VERSION(SNIC_DRV_VERSION);
1032 MODULE_DEVICE_TABLE(pci, snic_id_table);
1033 MODULE_AUTHOR("Narsimhulu Musini <nmusini@cisco.com>, "
1034 "Sesidhar Baddela <sebaddel@cisco.com>");