]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blob - drivers/scsi/snic/snic_main.c
Merge tag 'keystone-dts-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/ssant...
[mirror_ubuntu-zesty-kernel.git] / drivers / scsi / snic / snic_main.c
1 /*
2 * Copyright 2014 Cisco Systems, Inc. All rights reserved.
3 *
4 * This program is free software; you may redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; version 2 of the License.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
9 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
10 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
11 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
12 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
13 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
14 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
15 * SOFTWARE.
16 */
17
18 #include <linux/module.h>
19 #include <linux/mempool.h>
20 #include <linux/string.h>
21 #include <linux/slab.h>
22 #include <linux/errno.h>
23 #include <linux/init.h>
24 #include <linux/pci.h>
25 #include <linux/skbuff.h>
26 #include <linux/interrupt.h>
27 #include <linux/spinlock.h>
28 #include <linux/workqueue.h>
29 #include <scsi/scsi_host.h>
30 #include <scsi/scsi_tcq.h>
31
32 #include "snic.h"
33 #include "snic_fwint.h"
34
35 #define PCI_DEVICE_ID_CISCO_SNIC 0x0046
36
37 /* Supported devices by snic module */
38 static struct pci_device_id snic_id_table[] = {
39 {PCI_DEVICE(0x1137, PCI_DEVICE_ID_CISCO_SNIC) },
40 { 0, } /* end of table */
41 };
42
43 unsigned int snic_log_level = 0x0;
44 module_param(snic_log_level, int, S_IRUGO|S_IWUSR);
45 MODULE_PARM_DESC(snic_log_level, "bitmask for snic logging levels");
46
47 #ifdef CONFIG_SCSI_SNIC_DEBUG_FS
48 unsigned int snic_trace_max_pages = 16;
49 module_param(snic_trace_max_pages, uint, S_IRUGO|S_IWUSR);
50 MODULE_PARM_DESC(snic_trace_max_pages,
51 "Total allocated memory pages for snic trace buffer");
52
53 #endif
54 unsigned int snic_max_qdepth = SNIC_DFLT_QUEUE_DEPTH;
55 module_param(snic_max_qdepth, uint, S_IRUGO | S_IWUSR);
56 MODULE_PARM_DESC(snic_max_qdepth, "Queue depth to report for each LUN");
57
58 /*
59 * snic_slave_alloc : callback function to SCSI Mid Layer, called on
60 * scsi device initialization.
61 */
62 static int
63 snic_slave_alloc(struct scsi_device *sdev)
64 {
65 struct snic_tgt *tgt = starget_to_tgt(scsi_target(sdev));
66
67 if (!tgt || snic_tgt_chkready(tgt))
68 return -ENXIO;
69
70 return 0;
71 }
72
73 /*
74 * snic_slave_configure : callback function to SCSI Mid Layer, called on
75 * scsi device initialization.
76 */
77 static int
78 snic_slave_configure(struct scsi_device *sdev)
79 {
80 struct snic *snic = shost_priv(sdev->host);
81 u32 qdepth = 0, max_ios = 0;
82 int tmo = SNIC_DFLT_CMD_TIMEOUT * HZ;
83
84 /* Set Queue Depth */
85 max_ios = snic_max_qdepth;
86 qdepth = min_t(u32, max_ios, SNIC_MAX_QUEUE_DEPTH);
87 scsi_change_queue_depth(sdev, qdepth);
88
89 if (snic->fwinfo.io_tmo > 1)
90 tmo = snic->fwinfo.io_tmo * HZ;
91
92 /* FW requires extended timeouts */
93 blk_queue_rq_timeout(sdev->request_queue, tmo);
94
95 return 0;
96 }
97
98 static int
99 snic_change_queue_depth(struct scsi_device *sdev, int qdepth)
100 {
101 int qsz = 0;
102
103 qsz = min_t(u32, qdepth, SNIC_MAX_QUEUE_DEPTH);
104 scsi_change_queue_depth(sdev, qsz);
105 SNIC_INFO("QDepth Changed to %d\n", sdev->queue_depth);
106
107 return sdev->queue_depth;
108 }
109
110 static struct scsi_host_template snic_host_template = {
111 .module = THIS_MODULE,
112 .name = SNIC_DRV_NAME,
113 .queuecommand = snic_queuecommand,
114 .eh_abort_handler = snic_abort_cmd,
115 .eh_device_reset_handler = snic_device_reset,
116 .eh_host_reset_handler = snic_host_reset,
117 .slave_alloc = snic_slave_alloc,
118 .slave_configure = snic_slave_configure,
119 .change_queue_depth = snic_change_queue_depth,
120 .this_id = -1,
121 .cmd_per_lun = SNIC_DFLT_QUEUE_DEPTH,
122 .can_queue = SNIC_MAX_IO_REQ,
123 .use_clustering = ENABLE_CLUSTERING,
124 .sg_tablesize = SNIC_MAX_SG_DESC_CNT,
125 .max_sectors = 0x800,
126 .shost_attrs = snic_attrs,
127 .use_blk_tags = 1,
128 .track_queue_depth = 1,
129 .cmd_size = sizeof(struct snic_internal_io_state),
130 .proc_name = "snic_scsi",
131 };
132
133 /*
134 * snic_handle_link_event : Handles link events such as link up/down/error
135 */
136 void
137 snic_handle_link_event(struct snic *snic)
138 {
139 unsigned long flags;
140
141 spin_lock_irqsave(&snic->snic_lock, flags);
142 if (snic->stop_link_events) {
143 spin_unlock_irqrestore(&snic->snic_lock, flags);
144
145 return;
146 }
147 spin_unlock_irqrestore(&snic->snic_lock, flags);
148
149 queue_work(snic_glob->event_q, &snic->link_work);
150 } /* end of snic_handle_link_event */
151
152 /*
153 * snic_notify_set : sets notification area
154 * This notification area is to receive events from fw
155 * Note: snic supports only MSIX interrupts, in which we can just call
156 * svnic_dev_notify_set directly
157 */
158 static int
159 snic_notify_set(struct snic *snic)
160 {
161 int ret = 0;
162 enum vnic_dev_intr_mode intr_mode;
163
164 intr_mode = svnic_dev_get_intr_mode(snic->vdev);
165
166 if (intr_mode == VNIC_DEV_INTR_MODE_MSIX) {
167 ret = svnic_dev_notify_set(snic->vdev, SNIC_MSIX_ERR_NOTIFY);
168 } else {
169 SNIC_HOST_ERR(snic->shost,
170 "Interrupt mode should be setup before devcmd notify set %d\n",
171 intr_mode);
172 ret = -1;
173 }
174
175 return ret;
176 } /* end of snic_notify_set */
177
178 /*
179 * snic_dev_wait : polls vnic open status.
180 */
181 static int
182 snic_dev_wait(struct vnic_dev *vdev,
183 int (*start)(struct vnic_dev *, int),
184 int (*finished)(struct vnic_dev *, int *),
185 int arg)
186 {
187 unsigned long time;
188 int ret, done;
189 int retry_cnt = 0;
190
191 ret = start(vdev, arg);
192 if (ret)
193 return ret;
194
195 /*
196 * Wait for func to complete...2 seconds max.
197 *
198 * Sometimes schedule_timeout_uninterruptible take long time
199 * to wakeup, which results skipping retry. The retry counter
200 * ensures to retry at least two times.
201 */
202 time = jiffies + (HZ * 2);
203 do {
204 ret = finished(vdev, &done);
205 if (ret)
206 return ret;
207
208 if (done)
209 return 0;
210 schedule_timeout_uninterruptible(HZ/10);
211 ++retry_cnt;
212 } while (time_after(time, jiffies) || (retry_cnt < 3));
213
214 return -ETIMEDOUT;
215 } /* end of snic_dev_wait */
216
217 /*
218 * snic_cleanup: called by snic_remove
219 * Stops the snic device, masks all interrupts, Completed CQ entries are
220 * drained. Posted WQ/RQ/Copy-WQ entries are cleanup
221 */
222 static int
223 snic_cleanup(struct snic *snic)
224 {
225 unsigned int i;
226 int ret;
227
228 svnic_dev_disable(snic->vdev);
229 for (i = 0; i < snic->intr_count; i++)
230 svnic_intr_mask(&snic->intr[i]);
231
232 for (i = 0; i < snic->wq_count; i++) {
233 ret = svnic_wq_disable(&snic->wq[i]);
234 if (ret)
235 return ret;
236 }
237
238 /* Clean up completed IOs */
239 snic_fwcq_cmpl_handler(snic, -1);
240
241 snic_wq_cmpl_handler(snic, -1);
242
243 /* Clean up the IOs that have not completed */
244 for (i = 0; i < snic->wq_count; i++)
245 svnic_wq_clean(&snic->wq[i], snic_free_wq_buf);
246
247 for (i = 0; i < snic->cq_count; i++)
248 svnic_cq_clean(&snic->cq[i]);
249
250 for (i = 0; i < snic->intr_count; i++)
251 svnic_intr_clean(&snic->intr[i]);
252
253 /* Cleanup snic specific requests */
254 snic_free_all_untagged_reqs(snic);
255
256 /* Cleanup Pending SCSI commands */
257 snic_shutdown_scsi_cleanup(snic);
258
259 for (i = 0; i < SNIC_REQ_MAX_CACHES; i++)
260 mempool_destroy(snic->req_pool[i]);
261
262 return 0;
263 } /* end of snic_cleanup */
264
265
266 static void
267 snic_iounmap(struct snic *snic)
268 {
269 if (snic->bar0.vaddr)
270 iounmap(snic->bar0.vaddr);
271 }
272
273 /*
274 * snic_vdev_open_done : polls for svnic_dev_open cmd completion.
275 */
276 static int
277 snic_vdev_open_done(struct vnic_dev *vdev, int *done)
278 {
279 struct snic *snic = svnic_dev_priv(vdev);
280 int ret;
281 int nretries = 5;
282
283 do {
284 ret = svnic_dev_open_done(vdev, done);
285 if (ret == 0)
286 break;
287
288 SNIC_HOST_INFO(snic->shost, "VNIC_DEV_OPEN Timedout.\n");
289 } while (nretries--);
290
291 return ret;
292 } /* end of snic_vdev_open_done */
293
294 /*
295 * snic_add_host : registers scsi host with ML
296 */
297 static int
298 snic_add_host(struct Scsi_Host *shost, struct pci_dev *pdev)
299 {
300 int ret = 0;
301
302 ret = scsi_add_host(shost, &pdev->dev);
303 if (ret) {
304 SNIC_HOST_ERR(shost,
305 "snic: scsi_add_host failed. %d\n",
306 ret);
307
308 return ret;
309 }
310
311 SNIC_BUG_ON(shost->work_q != NULL);
312 snprintf(shost->work_q_name, sizeof(shost->work_q_name), "scsi_wq_%d",
313 shost->host_no);
314 shost->work_q = create_singlethread_workqueue(shost->work_q_name);
315 if (!shost->work_q) {
316 SNIC_HOST_ERR(shost, "Failed to Create ScsiHost wq.\n");
317
318 ret = -ENOMEM;
319 }
320
321 return ret;
322 } /* end of snic_add_host */
323
324 static void
325 snic_del_host(struct Scsi_Host *shost)
326 {
327 if (!shost->work_q)
328 return;
329
330 destroy_workqueue(shost->work_q);
331 shost->work_q = NULL;
332 scsi_remove_host(shost);
333 }
334
335 int
336 snic_get_state(struct snic *snic)
337 {
338 return atomic_read(&snic->state);
339 }
340
341 void
342 snic_set_state(struct snic *snic, enum snic_state state)
343 {
344 SNIC_HOST_INFO(snic->shost, "snic state change from %s to %s\n",
345 snic_state_to_str(snic_get_state(snic)),
346 snic_state_to_str(state));
347
348 atomic_set(&snic->state, state);
349 }
350
351 /*
352 * snic_probe : Initialize the snic interface.
353 */
354 static int
355 snic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
356 {
357 struct Scsi_Host *shost;
358 struct snic *snic;
359 mempool_t *pool;
360 unsigned long flags;
361 u32 max_ios = 0;
362 int ret, i;
363
364 /* Device Information */
365 SNIC_INFO("snic device %4x:%4x:%4x:%4x: ",
366 pdev->vendor, pdev->device, pdev->subsystem_vendor,
367 pdev->subsystem_device);
368
369 SNIC_INFO("snic device bus %x: slot %x: fn %x\n",
370 pdev->bus->number, PCI_SLOT(pdev->devfn),
371 PCI_FUNC(pdev->devfn));
372
373 /*
374 * Allocate SCSI Host and setup association between host, and snic
375 */
376 shost = scsi_host_alloc(&snic_host_template, sizeof(struct snic));
377 if (!shost) {
378 SNIC_ERR("Unable to alloc scsi_host\n");
379 ret = -ENOMEM;
380
381 goto prob_end;
382 }
383 snic = shost_priv(shost);
384 snic->shost = shost;
385
386 snprintf(snic->name, sizeof(snic->name) - 1, "%s%d", SNIC_DRV_NAME,
387 shost->host_no);
388
389 SNIC_HOST_INFO(shost,
390 "snic%d = %p shost = %p device bus %x: slot %x: fn %x\n",
391 shost->host_no, snic, shost, pdev->bus->number,
392 PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn));
393 #ifdef CONFIG_SCSI_SNIC_DEBUG_FS
394 /* Per snic debugfs init */
395 ret = snic_stats_debugfs_init(snic);
396 if (ret) {
397 SNIC_HOST_ERR(snic->shost,
398 "Failed to initialize debugfs stats\n");
399 snic_stats_debugfs_remove(snic);
400 }
401 #endif
402
403 /* Setup PCI Resources */
404 pci_set_drvdata(pdev, snic);
405 snic->pdev = pdev;
406
407 ret = pci_enable_device(pdev);
408 if (ret) {
409 SNIC_HOST_ERR(shost,
410 "Cannot enable PCI Resources, aborting : %d\n",
411 ret);
412
413 goto err_free_snic;
414 }
415
416 ret = pci_request_regions(pdev, SNIC_DRV_NAME);
417 if (ret) {
418 SNIC_HOST_ERR(shost,
419 "Cannot obtain PCI Resources, aborting : %d\n",
420 ret);
421
422 goto err_pci_disable;
423 }
424
425 pci_set_master(pdev);
426
427 /*
428 * Query PCI Controller on system for DMA addressing
429 * limitation for the device. Try 43-bit first, and
430 * fail to 32-bit.
431 */
432 ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(43));
433 if (ret) {
434 ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
435 if (ret) {
436 SNIC_HOST_ERR(shost,
437 "No Usable DMA Configuration, aborting %d\n",
438 ret);
439
440 goto err_rel_regions;
441 }
442
443 ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
444 if (ret) {
445 SNIC_HOST_ERR(shost,
446 "Unable to obtain 32-bit DMA for consistent allocations, aborting: %d\n",
447 ret);
448
449 goto err_rel_regions;
450 }
451 } else {
452 ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(43));
453 if (ret) {
454 SNIC_HOST_ERR(shost,
455 "Unable to obtain 43-bit DMA for consistent allocations. aborting: %d\n",
456 ret);
457
458 goto err_rel_regions;
459 }
460 }
461
462
463 /* Map vNIC resources from BAR0 */
464 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
465 SNIC_HOST_ERR(shost, "BAR0 not memory mappable aborting.\n");
466
467 ret = -ENODEV;
468 goto err_rel_regions;
469 }
470
471 snic->bar0.vaddr = pci_iomap(pdev, 0, 0);
472 if (!snic->bar0.vaddr) {
473 SNIC_HOST_ERR(shost,
474 "Cannot memory map BAR0 res hdr aborting.\n");
475
476 ret = -ENODEV;
477 goto err_rel_regions;
478 }
479
480 snic->bar0.bus_addr = pci_resource_start(pdev, 0);
481 snic->bar0.len = pci_resource_len(pdev, 0);
482 SNIC_BUG_ON(snic->bar0.bus_addr == 0);
483
484 /* Devcmd2 Resource Allocation and Initialization */
485 snic->vdev = svnic_dev_alloc_discover(NULL, snic, pdev, &snic->bar0, 1);
486 if (!snic->vdev) {
487 SNIC_HOST_ERR(shost, "vNIC Resource Discovery Failed.\n");
488
489 ret = -ENODEV;
490 goto err_iounmap;
491 }
492
493 ret = svnic_dev_cmd_init(snic->vdev, 0);
494 if (ret) {
495 SNIC_HOST_INFO(shost, "Devcmd2 Init Failed. err = %d\n", ret);
496
497 goto err_vnic_unreg;
498 }
499
500 ret = snic_dev_wait(snic->vdev, svnic_dev_open, snic_vdev_open_done, 0);
501 if (ret) {
502 SNIC_HOST_ERR(shost,
503 "vNIC dev open failed, aborting. %d\n",
504 ret);
505
506 goto err_vnic_unreg;
507 }
508
509 ret = svnic_dev_init(snic->vdev, 0);
510 if (ret) {
511 SNIC_HOST_ERR(shost,
512 "vNIC dev init failed. aborting. %d\n",
513 ret);
514
515 goto err_dev_close;
516 }
517
518 /* Get vNIC information */
519 ret = snic_get_vnic_config(snic);
520 if (ret) {
521 SNIC_HOST_ERR(shost,
522 "Get vNIC configuration failed, aborting. %d\n",
523 ret);
524
525 goto err_dev_close;
526 }
527
528 /* Configure Maximum Outstanding IO reqs */
529 max_ios = snic->config.io_throttle_count;
530 if (max_ios != SNIC_UCSM_DFLT_THROTTLE_CNT_BLD)
531 shost->can_queue = min_t(u32, SNIC_MAX_IO_REQ,
532 max_t(u32, SNIC_MIN_IO_REQ, max_ios));
533
534 snic->max_tag_id = shost->can_queue;
535
536 ret = scsi_init_shared_tag_map(shost, snic->max_tag_id);
537 if (ret) {
538 SNIC_HOST_ERR(shost,
539 "Unable to alloc shared tag map. %d\n",
540 ret);
541
542 goto err_dev_close;
543 }
544
545 shost->max_lun = snic->config.luns_per_tgt;
546 shost->max_id = SNIC_MAX_TARGET;
547
548 shost->max_cmd_len = MAX_COMMAND_SIZE; /*defined in scsi_cmnd.h*/
549
550 snic_get_res_counts(snic);
551
552 /*
553 * Assumption: Only MSIx is supported
554 */
555 ret = snic_set_intr_mode(snic);
556 if (ret) {
557 SNIC_HOST_ERR(shost,
558 "Failed to set intr mode aborting. %d\n",
559 ret);
560
561 goto err_dev_close;
562 }
563
564 ret = snic_alloc_vnic_res(snic);
565 if (ret) {
566 SNIC_HOST_ERR(shost,
567 "Failed to alloc vNIC resources aborting. %d\n",
568 ret);
569
570 goto err_clear_intr;
571 }
572
573 /* Initialize specific lists */
574 INIT_LIST_HEAD(&snic->list);
575
576 /*
577 * spl_cmd_list for maintaining snic specific cmds
578 * such as EXCH_VER_REQ, REPORT_TARGETS etc
579 */
580 INIT_LIST_HEAD(&snic->spl_cmd_list);
581 spin_lock_init(&snic->spl_cmd_lock);
582
583 /* initialize all snic locks */
584 spin_lock_init(&snic->snic_lock);
585
586 for (i = 0; i < SNIC_WQ_MAX; i++)
587 spin_lock_init(&snic->wq_lock[i]);
588
589 for (i = 0; i < SNIC_IO_LOCKS; i++)
590 spin_lock_init(&snic->io_req_lock[i]);
591
592 pool = mempool_create_slab_pool(2,
593 snic_glob->req_cache[SNIC_REQ_CACHE_DFLT_SGL]);
594 if (!pool) {
595 SNIC_HOST_ERR(shost, "dflt sgl pool creation failed\n");
596
597 goto err_free_res;
598 }
599
600 snic->req_pool[SNIC_REQ_CACHE_DFLT_SGL] = pool;
601
602 pool = mempool_create_slab_pool(2,
603 snic_glob->req_cache[SNIC_REQ_CACHE_MAX_SGL]);
604 if (!pool) {
605 SNIC_HOST_ERR(shost, "max sgl pool creation failed\n");
606
607 goto err_free_dflt_sgl_pool;
608 }
609
610 snic->req_pool[SNIC_REQ_CACHE_MAX_SGL] = pool;
611
612 pool = mempool_create_slab_pool(2,
613 snic_glob->req_cache[SNIC_REQ_TM_CACHE]);
614 if (!pool) {
615 SNIC_HOST_ERR(shost, "snic tmreq info pool creation failed.\n");
616
617 goto err_free_max_sgl_pool;
618 }
619
620 snic->req_pool[SNIC_REQ_TM_CACHE] = pool;
621
622 /* Initialize snic state */
623 atomic_set(&snic->state, SNIC_INIT);
624
625 atomic_set(&snic->ios_inflight, 0);
626
627 /* Setup notification buffer area */
628 ret = snic_notify_set(snic);
629 if (ret) {
630 SNIC_HOST_ERR(shost,
631 "Failed to alloc notify buffer aborting. %d\n",
632 ret);
633
634 goto err_free_tmreq_pool;
635 }
636
637 /*
638 * Initialization done with PCI system, hardware, firmware.
639 * Add shost to SCSI
640 */
641 ret = snic_add_host(shost, pdev);
642 if (ret) {
643 SNIC_HOST_ERR(shost,
644 "Adding scsi host Failed ... exiting. %d\n",
645 ret);
646
647 goto err_notify_unset;
648 }
649
650 spin_lock_irqsave(&snic_glob->snic_list_lock, flags);
651 list_add_tail(&snic->list, &snic_glob->snic_list);
652 spin_unlock_irqrestore(&snic_glob->snic_list_lock, flags);
653
654 snic_disc_init(&snic->disc);
655 INIT_WORK(&snic->tgt_work, snic_handle_tgt_disc);
656 INIT_WORK(&snic->disc_work, snic_handle_disc);
657 INIT_WORK(&snic->link_work, snic_handle_link);
658
659 /* Enable all queues */
660 for (i = 0; i < snic->wq_count; i++)
661 svnic_wq_enable(&snic->wq[i]);
662
663 ret = svnic_dev_enable_wait(snic->vdev);
664 if (ret) {
665 SNIC_HOST_ERR(shost,
666 "vNIC dev enable failed w/ error %d\n",
667 ret);
668
669 goto err_vdev_enable;
670 }
671
672 ret = snic_request_intr(snic);
673 if (ret) {
674 SNIC_HOST_ERR(shost, "Unable to request irq. %d\n", ret);
675
676 goto err_req_intr;
677 }
678
679 for (i = 0; i < snic->intr_count; i++)
680 svnic_intr_unmask(&snic->intr[i]);
681
682 snic_set_state(snic, SNIC_ONLINE);
683
684 /* Get snic params */
685 ret = snic_get_conf(snic);
686 if (ret) {
687 SNIC_HOST_ERR(shost,
688 "Failed to get snic io config from FW w err %d\n",
689 ret);
690
691 goto err_get_conf;
692 }
693
694 ret = snic_disc_start(snic);
695 if (ret) {
696 SNIC_HOST_ERR(shost, "snic_probe:Discovery Failed w err = %d\n",
697 ret);
698
699 goto err_get_conf;
700 }
701
702 SNIC_HOST_INFO(shost, "SNIC Device Probe Successful.\n");
703
704 return 0;
705
706 err_get_conf:
707 snic_free_all_untagged_reqs(snic);
708
709 for (i = 0; i < snic->intr_count; i++)
710 svnic_intr_mask(&snic->intr[i]);
711
712 snic_free_intr(snic);
713
714 err_req_intr:
715 svnic_dev_disable(snic->vdev);
716
717 err_vdev_enable:
718 for (i = 0; i < snic->wq_count; i++) {
719 int rc = 0;
720
721 rc = svnic_wq_disable(&snic->wq[i]);
722 if (rc) {
723 SNIC_HOST_ERR(shost,
724 "WQ Disable Failed w/ err = %d\n", rc);
725
726 break;
727 }
728 }
729 snic_del_host(snic->shost);
730
731 err_notify_unset:
732 svnic_dev_notify_unset(snic->vdev);
733
734 err_free_tmreq_pool:
735 mempool_destroy(snic->req_pool[SNIC_REQ_TM_CACHE]);
736
737 err_free_max_sgl_pool:
738 mempool_destroy(snic->req_pool[SNIC_REQ_CACHE_MAX_SGL]);
739
740 err_free_dflt_sgl_pool:
741 mempool_destroy(snic->req_pool[SNIC_REQ_CACHE_DFLT_SGL]);
742
743 err_free_res:
744 snic_free_vnic_res(snic);
745
746 err_clear_intr:
747 snic_clear_intr_mode(snic);
748
749 err_dev_close:
750 svnic_dev_close(snic->vdev);
751
752 err_vnic_unreg:
753 svnic_dev_unregister(snic->vdev);
754
755 err_iounmap:
756 snic_iounmap(snic);
757
758 err_rel_regions:
759 pci_release_regions(pdev);
760
761 err_pci_disable:
762 pci_disable_device(pdev);
763
764 err_free_snic:
765 #ifdef CONFIG_SCSI_SNIC_DEBUG_FS
766 snic_stats_debugfs_remove(snic);
767 #endif
768 scsi_host_put(shost);
769 pci_set_drvdata(pdev, NULL);
770
771 prob_end:
772 SNIC_INFO("sNIC device : bus %d: slot %d: fn %d Registration Failed.\n",
773 pdev->bus->number, PCI_SLOT(pdev->devfn),
774 PCI_FUNC(pdev->devfn));
775
776 return ret;
777 } /* end of snic_probe */
778
779
780 /*
781 * snic_remove : invoked on unbinding the interface to cleanup the
782 * resources allocated in snic_probe on initialization.
783 */
784 static void
785 snic_remove(struct pci_dev *pdev)
786 {
787 struct snic *snic = pci_get_drvdata(pdev);
788 unsigned long flags;
789
790 if (!snic) {
791 SNIC_INFO("sNIC dev: bus %d slot %d fn %d snic inst is null.\n",
792 pdev->bus->number, PCI_SLOT(pdev->devfn),
793 PCI_FUNC(pdev->devfn));
794
795 return;
796 }
797
798 /*
799 * Mark state so that the workqueue thread stops forwarding
800 * received frames and link events. ISR and other threads
801 * that can queue work items will also stop creating work
802 * items on the snic workqueue
803 */
804 snic_set_state(snic, SNIC_OFFLINE);
805 spin_lock_irqsave(&snic->snic_lock, flags);
806 snic->stop_link_events = 1;
807 spin_unlock_irqrestore(&snic->snic_lock, flags);
808
809 flush_workqueue(snic_glob->event_q);
810 snic_disc_term(snic);
811
812 spin_lock_irqsave(&snic->snic_lock, flags);
813 snic->in_remove = 1;
814 spin_unlock_irqrestore(&snic->snic_lock, flags);
815
816 /*
817 * This stops the snic device, masks all interrupts, Completed
818 * CQ entries are drained. Posted WQ/RQ/Copy-WQ entries are
819 * cleanup
820 */
821 snic_cleanup(snic);
822
823 spin_lock_irqsave(&snic_glob->snic_list_lock, flags);
824 list_del(&snic->list);
825 spin_unlock_irqrestore(&snic_glob->snic_list_lock, flags);
826
827 snic_tgt_del_all(snic);
828 #ifdef CONFIG_SCSI_SNIC_DEBUG_FS
829 snic_stats_debugfs_remove(snic);
830 #endif
831 snic_del_host(snic->shost);
832
833 svnic_dev_notify_unset(snic->vdev);
834 snic_free_intr(snic);
835 snic_free_vnic_res(snic);
836 snic_clear_intr_mode(snic);
837 svnic_dev_close(snic->vdev);
838 svnic_dev_unregister(snic->vdev);
839 snic_iounmap(snic);
840 pci_release_regions(pdev);
841 pci_disable_device(pdev);
842 pci_set_drvdata(pdev, NULL);
843
844 /* this frees Scsi_Host and snic memory (continuous chunk) */
845 scsi_host_put(snic->shost);
846 } /* end of snic_remove */
847
848
849 struct snic_global *snic_glob;
850
851 /*
852 * snic_global_data_init: Initialize SNIC Global Data
853 * Notes: All the global lists, variables should be part of global data
854 * this helps in debugging.
855 */
856 static int
857 snic_global_data_init(void)
858 {
859 int ret = 0;
860 struct kmem_cache *cachep;
861 ssize_t len = 0;
862
863 snic_glob = kzalloc(sizeof(*snic_glob), GFP_KERNEL);
864
865 if (!snic_glob) {
866 SNIC_ERR("Failed to allocate Global Context.\n");
867
868 ret = -ENOMEM;
869 goto gdi_end;
870 }
871
872 #ifdef CONFIG_SCSI_SNIC_DEBUG_FS
873 /* Debugfs related Initialization */
874 /* Create debugfs entries for snic */
875 ret = snic_debugfs_init();
876 if (ret < 0) {
877 SNIC_ERR("Failed to create sysfs dir for tracing and stats.\n");
878 snic_debugfs_term();
879 /* continue even if it fails */
880 }
881
882 /* Trace related Initialization */
883 /* Allocate memory for trace buffer */
884 ret = snic_trc_init();
885 if (ret < 0) {
886 SNIC_ERR("Trace buffer init failed, SNIC tracing disabled\n");
887 snic_trc_free();
888 /* continue even if it fails */
889 }
890
891 #endif
892 INIT_LIST_HEAD(&snic_glob->snic_list);
893 spin_lock_init(&snic_glob->snic_list_lock);
894
895 /* Create a cache for allocation of snic_host_req+default size ESGLs */
896 len = sizeof(struct snic_req_info);
897 len += sizeof(struct snic_host_req) + sizeof(struct snic_dflt_sgl);
898 cachep = kmem_cache_create("snic_req_dfltsgl", len, SNIC_SG_DESC_ALIGN,
899 SLAB_HWCACHE_ALIGN, NULL);
900 if (!cachep) {
901 SNIC_ERR("Failed to create snic default sgl slab\n");
902 ret = -ENOMEM;
903
904 goto err_dflt_req_slab;
905 }
906 snic_glob->req_cache[SNIC_REQ_CACHE_DFLT_SGL] = cachep;
907
908 /* Create a cache for allocation of max size Extended SGLs */
909 len = sizeof(struct snic_req_info);
910 len += sizeof(struct snic_host_req) + sizeof(struct snic_max_sgl);
911 cachep = kmem_cache_create("snic_req_maxsgl", len, SNIC_SG_DESC_ALIGN,
912 SLAB_HWCACHE_ALIGN, NULL);
913 if (!cachep) {
914 SNIC_ERR("Failed to create snic max sgl slab\n");
915 ret = -ENOMEM;
916
917 goto err_max_req_slab;
918 }
919 snic_glob->req_cache[SNIC_REQ_CACHE_MAX_SGL] = cachep;
920
921 len = sizeof(struct snic_host_req);
922 cachep = kmem_cache_create("snic_req_maxsgl", len, SNIC_SG_DESC_ALIGN,
923 SLAB_HWCACHE_ALIGN, NULL);
924 if (!cachep) {
925 SNIC_ERR("Failed to create snic tm req slab\n");
926 ret = -ENOMEM;
927
928 goto err_tmreq_slab;
929 }
930 snic_glob->req_cache[SNIC_REQ_TM_CACHE] = cachep;
931
932 /* snic_event queue */
933 snic_glob->event_q = create_singlethread_workqueue("snic_event_wq");
934 if (!snic_glob->event_q) {
935 SNIC_ERR("snic event queue create failed\n");
936 ret = -ENOMEM;
937
938 goto err_eventq;
939 }
940
941 return ret;
942
943 err_eventq:
944 kmem_cache_destroy(snic_glob->req_cache[SNIC_REQ_TM_CACHE]);
945
946 err_tmreq_slab:
947 kmem_cache_destroy(snic_glob->req_cache[SNIC_REQ_CACHE_MAX_SGL]);
948
949 err_max_req_slab:
950 kmem_cache_destroy(snic_glob->req_cache[SNIC_REQ_CACHE_DFLT_SGL]);
951
952 err_dflt_req_slab:
953 #ifdef CONFIG_SCSI_SNIC_DEBUG_FS
954 snic_trc_free();
955 snic_debugfs_term();
956 #endif
957 kfree(snic_glob);
958 snic_glob = NULL;
959
960 gdi_end:
961 return ret;
962 } /* end of snic_glob_init */
963
964 /*
965 * snic_global_data_cleanup : Frees SNIC Global Data
966 */
967 static void
968 snic_global_data_cleanup(void)
969 {
970 SNIC_BUG_ON(snic_glob == NULL);
971
972 destroy_workqueue(snic_glob->event_q);
973 kmem_cache_destroy(snic_glob->req_cache[SNIC_REQ_TM_CACHE]);
974 kmem_cache_destroy(snic_glob->req_cache[SNIC_REQ_CACHE_MAX_SGL]);
975 kmem_cache_destroy(snic_glob->req_cache[SNIC_REQ_CACHE_DFLT_SGL]);
976
977 #ifdef CONFIG_SCSI_SNIC_DEBUG_FS
978 /* Freeing Trace Resources */
979 snic_trc_free();
980
981 /* Freeing Debugfs Resources */
982 snic_debugfs_term();
983 #endif
984 kfree(snic_glob);
985 snic_glob = NULL;
986 } /* end of snic_glob_cleanup */
987
988 static struct pci_driver snic_driver = {
989 .name = SNIC_DRV_NAME,
990 .id_table = snic_id_table,
991 .probe = snic_probe,
992 .remove = snic_remove,
993 };
994
995 static int __init
996 snic_init_module(void)
997 {
998 int ret = 0;
999
1000 #ifndef __x86_64__
1001 SNIC_INFO("SNIC Driver is supported only for x86_64 platforms!\n");
1002 add_taint(TAINT_CPU_OUT_OF_SPEC, LOCKDEP_STILL_OK);
1003 #endif
1004
1005 SNIC_INFO("%s, ver %s\n", SNIC_DRV_DESCRIPTION, SNIC_DRV_VERSION);
1006
1007 ret = snic_global_data_init();
1008 if (ret) {
1009 SNIC_ERR("Failed to Initialize Global Data.\n");
1010
1011 return ret;
1012 }
1013
1014 ret = pci_register_driver(&snic_driver);
1015 if (ret < 0) {
1016 SNIC_ERR("PCI driver register error\n");
1017
1018 goto err_pci_reg;
1019 }
1020
1021 return ret;
1022
1023 err_pci_reg:
1024 snic_global_data_cleanup();
1025
1026 return ret;
1027 }
1028
1029 static void __exit
1030 snic_cleanup_module(void)
1031 {
1032 pci_unregister_driver(&snic_driver);
1033 snic_global_data_cleanup();
1034 }
1035
1036 module_init(snic_init_module);
1037 module_exit(snic_cleanup_module);
1038
1039 MODULE_LICENSE("GPL v2");
1040 MODULE_DESCRIPTION(SNIC_DRV_DESCRIPTION);
1041 MODULE_VERSION(SNIC_DRV_VERSION);
1042 MODULE_DEVICE_TABLE(pci, snic_id_table);
1043 MODULE_AUTHOR("Narsimhulu Musini <nmusini@cisco.com>, "
1044 "Sesidhar Baddela <sebaddel@cisco.com>");