]>
Commit | Line | Data |
---|---|---|
11fdf7f2 | 1 | // SPDX-License-Identifier: GPL-2.0 |
7c673cae | 2 | /*- |
11fdf7f2 | 3 | * Copyright(c) 2010-2017 Intel Corporation. All rights reserved. |
7c673cae FG |
4 | */ |
5 | ||
6 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | |
7 | ||
8 | #include <linux/device.h> | |
9 | #include <linux/module.h> | |
10 | #include <linux/pci.h> | |
11 | #include <linux/uio_driver.h> | |
12 | #include <linux/io.h> | |
11fdf7f2 | 13 | #include <linux/irq.h> |
7c673cae FG |
14 | #include <linux/msi.h> |
15 | #include <linux/version.h> | |
16 | #include <linux/slab.h> | |
17 | ||
7c673cae FG |
18 | #include <rte_pci_dev_features.h> |
19 | ||
20 | #include "compat.h" | |
21 | ||
22 | /** | |
23 | * A structure describing the private information for a uio device. | |
24 | */ | |
25 | struct rte_uio_pci_dev { | |
26 | struct uio_info info; | |
27 | struct pci_dev *pdev; | |
28 | enum rte_intr_mode mode; | |
9f95a23c | 29 | atomic_t refcnt; |
7c673cae FG |
30 | }; |
31 | ||
11fdf7f2 | 32 | static int wc_activate; |
7c673cae FG |
33 | static char *intr_mode; |
34 | static enum rte_intr_mode igbuio_intr_mode_preferred = RTE_INTR_MODE_MSIX; | |
7c673cae FG |
35 | /* sriov sysfs */ |
36 | static ssize_t | |
37 | show_max_vfs(struct device *dev, struct device_attribute *attr, | |
38 | char *buf) | |
39 | { | |
40 | return snprintf(buf, 10, "%u\n", dev_num_vf(dev)); | |
41 | } | |
42 | ||
43 | static ssize_t | |
44 | store_max_vfs(struct device *dev, struct device_attribute *attr, | |
45 | const char *buf, size_t count) | |
46 | { | |
47 | int err = 0; | |
48 | unsigned long max_vfs; | |
49 | struct pci_dev *pdev = to_pci_dev(dev); | |
50 | ||
51 | if (0 != kstrtoul(buf, 0, &max_vfs)) | |
52 | return -EINVAL; | |
53 | ||
54 | if (0 == max_vfs) | |
55 | pci_disable_sriov(pdev); | |
56 | else if (0 == pci_num_vf(pdev)) | |
57 | err = pci_enable_sriov(pdev, max_vfs); | |
58 | else /* do nothing if change max_vfs number */ | |
59 | err = -EINVAL; | |
60 | ||
61 | return err ? err : count; | |
62 | } | |
63 | ||
64 | static DEVICE_ATTR(max_vfs, S_IRUGO | S_IWUSR, show_max_vfs, store_max_vfs); | |
65 | ||
66 | static struct attribute *dev_attrs[] = { | |
67 | &dev_attr_max_vfs.attr, | |
68 | NULL, | |
69 | }; | |
70 | ||
71 | static const struct attribute_group dev_attr_grp = { | |
72 | .attrs = dev_attrs, | |
73 | }; | |
11fdf7f2 TL |
74 | |
75 | #ifndef HAVE_PCI_MSI_MASK_IRQ | |
7c673cae FG |
76 | /* |
77 | * It masks the msix on/off of generating MSI-X messages. | |
78 | */ | |
79 | static void | |
11fdf7f2 | 80 | igbuio_msix_mask_irq(struct msi_desc *desc, s32 state) |
7c673cae FG |
81 | { |
82 | u32 mask_bits = desc->masked; | |
11fdf7f2 | 83 | unsigned int offset = desc->msi_attrib.entry_nr * PCI_MSIX_ENTRY_SIZE + |
7c673cae FG |
84 | PCI_MSIX_ENTRY_VECTOR_CTRL; |
85 | ||
86 | if (state != 0) | |
87 | mask_bits &= ~PCI_MSIX_ENTRY_CTRL_MASKBIT; | |
88 | else | |
89 | mask_bits |= PCI_MSIX_ENTRY_CTRL_MASKBIT; | |
90 | ||
91 | if (mask_bits != desc->masked) { | |
92 | writel(mask_bits, desc->mask_base + offset); | |
93 | readl(desc->mask_base); | |
94 | desc->masked = mask_bits; | |
95 | } | |
96 | } | |
97 | ||
11fdf7f2 TL |
98 | /* |
99 | * It masks the msi on/off of generating MSI messages. | |
100 | */ | |
101 | static void | |
102 | igbuio_msi_mask_irq(struct pci_dev *pdev, struct msi_desc *desc, int32_t state) | |
103 | { | |
104 | u32 mask_bits = desc->masked; | |
105 | u32 offset = desc->irq - pdev->irq; | |
106 | u32 mask = 1 << offset; | |
107 | ||
108 | if (!desc->msi_attrib.maskbit) | |
109 | return; | |
110 | ||
111 | if (state != 0) | |
112 | mask_bits &= ~mask; | |
113 | else | |
114 | mask_bits |= mask; | |
115 | ||
116 | if (mask_bits != desc->masked) { | |
117 | pci_write_config_dword(pdev, desc->mask_pos, mask_bits); | |
118 | desc->masked = mask_bits; | |
119 | } | |
120 | } | |
121 | ||
122 | static void | |
123 | igbuio_mask_irq(struct pci_dev *pdev, enum rte_intr_mode mode, s32 irq_state) | |
124 | { | |
125 | struct msi_desc *desc; | |
126 | struct list_head *msi_list; | |
127 | ||
128 | #ifdef HAVE_MSI_LIST_IN_GENERIC_DEVICE | |
129 | msi_list = &pdev->dev.msi_list; | |
130 | #else | |
131 | msi_list = &pdev->msi_list; | |
132 | #endif | |
133 | ||
134 | if (mode == RTE_INTR_MODE_MSIX) { | |
135 | list_for_each_entry(desc, msi_list, list) | |
136 | igbuio_msix_mask_irq(desc, irq_state); | |
137 | } else if (mode == RTE_INTR_MODE_MSI) { | |
138 | list_for_each_entry(desc, msi_list, list) | |
139 | igbuio_msi_mask_irq(pdev, desc, irq_state); | |
140 | } | |
141 | } | |
142 | #endif | |
143 | ||
7c673cae FG |
144 | /** |
145 | * This is the irqcontrol callback to be registered to uio_info. | |
146 | * It can be used to disable/enable interrupt from user space processes. | |
147 | * | |
148 | * @param info | |
149 | * pointer to uio_info. | |
150 | * @param irq_state | |
151 | * state value. 1 to enable interrupt, 0 to disable interrupt. | |
152 | * | |
153 | * @return | |
154 | * - On success, 0. | |
155 | * - On failure, a negative value. | |
156 | */ | |
157 | static int | |
158 | igbuio_pci_irqcontrol(struct uio_info *info, s32 irq_state) | |
159 | { | |
160 | struct rte_uio_pci_dev *udev = info->priv; | |
161 | struct pci_dev *pdev = udev->pdev; | |
162 | ||
11fdf7f2 TL |
163 | #ifdef HAVE_PCI_MSI_MASK_IRQ |
164 | struct irq_data *irq = irq_get_irq_data(udev->info.irq); | |
165 | #endif | |
7c673cae | 166 | |
11fdf7f2 | 167 | pci_cfg_access_lock(pdev); |
7c673cae | 168 | |
11fdf7f2 TL |
169 | if (udev->mode == RTE_INTR_MODE_MSIX || udev->mode == RTE_INTR_MODE_MSI) { |
170 | #ifdef HAVE_PCI_MSI_MASK_IRQ | |
171 | if (irq_state == 1) | |
172 | pci_msi_unmask_irq(irq); | |
173 | else | |
174 | pci_msi_mask_irq(irq); | |
7c673cae | 175 | #else |
11fdf7f2 | 176 | igbuio_mask_irq(pdev, udev->mode, irq_state); |
7c673cae FG |
177 | #endif |
178 | } | |
11fdf7f2 TL |
179 | |
180 | if (udev->mode == RTE_INTR_MODE_LEGACY) | |
181 | pci_intx(pdev, !!irq_state); | |
182 | ||
7c673cae FG |
183 | pci_cfg_access_unlock(pdev); |
184 | ||
185 | return 0; | |
186 | } | |
187 | ||
188 | /** | |
189 | * This is interrupt handler which will check if the interrupt is for the right device. | |
190 | * If yes, disable it here and will be enable later. | |
191 | */ | |
192 | static irqreturn_t | |
11fdf7f2 | 193 | igbuio_pci_irqhandler(int irq, void *dev_id) |
7c673cae | 194 | { |
11fdf7f2 TL |
195 | struct rte_uio_pci_dev *udev = (struct rte_uio_pci_dev *)dev_id; |
196 | struct uio_info *info = &udev->info; | |
7c673cae FG |
197 | |
198 | /* Legacy mode need to mask in hardware */ | |
199 | if (udev->mode == RTE_INTR_MODE_LEGACY && | |
200 | !pci_check_and_mask_intx(udev->pdev)) | |
201 | return IRQ_NONE; | |
202 | ||
11fdf7f2 TL |
203 | uio_event_notify(info); |
204 | ||
7c673cae FG |
205 | /* Message signal mode, no share IRQ and automasked */ |
206 | return IRQ_HANDLED; | |
207 | } | |
208 | ||
7c673cae | 209 | static int |
11fdf7f2 | 210 | igbuio_pci_enable_interrupts(struct rte_uio_pci_dev *udev) |
7c673cae | 211 | { |
11fdf7f2 TL |
212 | int err = 0; |
213 | #ifndef HAVE_ALLOC_IRQ_VECTORS | |
214 | struct msix_entry msix_entry; | |
215 | #endif | |
216 | ||
217 | switch (igbuio_intr_mode_preferred) { | |
218 | case RTE_INTR_MODE_MSIX: | |
219 | /* Only 1 msi-x vector needed */ | |
220 | #ifndef HAVE_ALLOC_IRQ_VECTORS | |
221 | msix_entry.entry = 0; | |
222 | if (pci_enable_msix(udev->pdev, &msix_entry, 1) == 0) { | |
223 | dev_dbg(&udev->pdev->dev, "using MSI-X"); | |
224 | udev->info.irq_flags = IRQF_NO_THREAD; | |
225 | udev->info.irq = msix_entry.vector; | |
226 | udev->mode = RTE_INTR_MODE_MSIX; | |
227 | break; | |
228 | } | |
229 | #else | |
230 | if (pci_alloc_irq_vectors(udev->pdev, 1, 1, PCI_IRQ_MSIX) == 1) { | |
231 | dev_dbg(&udev->pdev->dev, "using MSI-X"); | |
232 | udev->info.irq_flags = IRQF_NO_THREAD; | |
233 | udev->info.irq = pci_irq_vector(udev->pdev, 0); | |
234 | udev->mode = RTE_INTR_MODE_MSIX; | |
235 | break; | |
236 | } | |
237 | #endif | |
7c673cae | 238 | |
f67539c2 | 239 | /* falls through - to MSI */ |
11fdf7f2 TL |
240 | case RTE_INTR_MODE_MSI: |
241 | #ifndef HAVE_ALLOC_IRQ_VECTORS | |
242 | if (pci_enable_msi(udev->pdev) == 0) { | |
243 | dev_dbg(&udev->pdev->dev, "using MSI"); | |
244 | udev->info.irq_flags = IRQF_NO_THREAD; | |
245 | udev->info.irq = udev->pdev->irq; | |
246 | udev->mode = RTE_INTR_MODE_MSI; | |
247 | break; | |
248 | } | |
249 | #else | |
250 | if (pci_alloc_irq_vectors(udev->pdev, 1, 1, PCI_IRQ_MSI) == 1) { | |
251 | dev_dbg(&udev->pdev->dev, "using MSI"); | |
252 | udev->info.irq_flags = IRQF_NO_THREAD; | |
253 | udev->info.irq = pci_irq_vector(udev->pdev, 0); | |
254 | udev->mode = RTE_INTR_MODE_MSI; | |
255 | break; | |
256 | } | |
7c673cae | 257 | #endif |
f67539c2 | 258 | /* falls through - to INTX */ |
11fdf7f2 TL |
259 | case RTE_INTR_MODE_LEGACY: |
260 | if (pci_intx_mask_supported(udev->pdev)) { | |
261 | dev_dbg(&udev->pdev->dev, "using INTX"); | |
262 | udev->info.irq_flags = IRQF_SHARED | IRQF_NO_THREAD; | |
263 | udev->info.irq = udev->pdev->irq; | |
264 | udev->mode = RTE_INTR_MODE_LEGACY; | |
265 | break; | |
266 | } | |
267 | dev_notice(&udev->pdev->dev, "PCI INTX mask not supported\n"); | |
f67539c2 | 268 | /* falls through - to no IRQ */ |
11fdf7f2 TL |
269 | case RTE_INTR_MODE_NONE: |
270 | udev->mode = RTE_INTR_MODE_NONE; | |
271 | udev->info.irq = UIO_IRQ_NONE; | |
272 | break; | |
273 | ||
274 | default: | |
275 | dev_err(&udev->pdev->dev, "invalid IRQ mode %u", | |
276 | igbuio_intr_mode_preferred); | |
277 | udev->info.irq = UIO_IRQ_NONE; | |
278 | err = -EINVAL; | |
279 | } | |
7c673cae | 280 | |
11fdf7f2 TL |
281 | if (udev->info.irq != UIO_IRQ_NONE) |
282 | err = request_irq(udev->info.irq, igbuio_pci_irqhandler, | |
283 | udev->info.irq_flags, udev->info.name, | |
284 | udev); | |
285 | dev_info(&udev->pdev->dev, "uio device registered with irq %ld\n", | |
286 | udev->info.irq); | |
287 | ||
288 | return err; | |
7c673cae FG |
289 | } |
290 | ||
11fdf7f2 TL |
291 | static void |
292 | igbuio_pci_disable_interrupts(struct rte_uio_pci_dev *udev) | |
293 | { | |
294 | if (udev->info.irq) { | |
295 | free_irq(udev->info.irq, udev); | |
296 | udev->info.irq = 0; | |
297 | } | |
298 | ||
299 | #ifndef HAVE_ALLOC_IRQ_VECTORS | |
300 | if (udev->mode == RTE_INTR_MODE_MSIX) | |
301 | pci_disable_msix(udev->pdev); | |
302 | if (udev->mode == RTE_INTR_MODE_MSI) | |
303 | pci_disable_msi(udev->pdev); | |
304 | #else | |
305 | if (udev->mode == RTE_INTR_MODE_MSIX || | |
306 | udev->mode == RTE_INTR_MODE_MSI) | |
307 | pci_free_irq_vectors(udev->pdev); | |
308 | #endif | |
309 | } | |
310 | ||
311 | ||
7c673cae | 312 | /** |
11fdf7f2 | 313 | * This gets called while opening uio device file. |
7c673cae FG |
314 | */ |
315 | static int | |
11fdf7f2 | 316 | igbuio_pci_open(struct uio_info *info, struct inode *inode) |
7c673cae | 317 | { |
11fdf7f2 TL |
318 | struct rte_uio_pci_dev *udev = info->priv; |
319 | struct pci_dev *dev = udev->pdev; | |
320 | int err; | |
7c673cae | 321 | |
9f95a23c | 322 | if (atomic_inc_return(&udev->refcnt) != 1) |
11fdf7f2 | 323 | return 0; |
7c673cae | 324 | |
11fdf7f2 TL |
325 | /* set bus master, which was cleared by the reset function */ |
326 | pci_set_master(dev); | |
7c673cae | 327 | |
11fdf7f2 TL |
328 | /* enable interrupts */ |
329 | err = igbuio_pci_enable_interrupts(udev); | |
11fdf7f2 | 330 | if (err) { |
9f95a23c | 331 | atomic_dec(&udev->refcnt); |
11fdf7f2 | 332 | dev_err(&dev->dev, "Enable interrupt fails\n"); |
7c673cae | 333 | } |
9f95a23c | 334 | return err; |
11fdf7f2 TL |
335 | } |
336 | ||
337 | static int | |
338 | igbuio_pci_release(struct uio_info *info, struct inode *inode) | |
339 | { | |
340 | struct rte_uio_pci_dev *udev = info->priv; | |
341 | struct pci_dev *dev = udev->pdev; | |
342 | ||
9f95a23c TL |
343 | if (atomic_dec_and_test(&udev->refcnt)) { |
344 | /* disable interrupts */ | |
345 | igbuio_pci_disable_interrupts(udev); | |
11fdf7f2 | 346 | |
9f95a23c TL |
347 | /* stop the device from further DMA */ |
348 | pci_clear_master(dev); | |
349 | } | |
11fdf7f2 | 350 | |
11fdf7f2 | 351 | return 0; |
7c673cae | 352 | } |
7c673cae FG |
353 | |
354 | /* Remap pci resources described by bar #pci_bar in uio resource n. */ | |
355 | static int | |
356 | igbuio_pci_setup_iomem(struct pci_dev *dev, struct uio_info *info, | |
357 | int n, int pci_bar, const char *name) | |
358 | { | |
359 | unsigned long addr, len; | |
360 | void *internal_addr; | |
361 | ||
362 | if (n >= ARRAY_SIZE(info->mem)) | |
363 | return -EINVAL; | |
364 | ||
365 | addr = pci_resource_start(dev, pci_bar); | |
366 | len = pci_resource_len(dev, pci_bar); | |
367 | if (addr == 0 || len == 0) | |
368 | return -1; | |
11fdf7f2 TL |
369 | if (wc_activate == 0) { |
370 | internal_addr = ioremap(addr, len); | |
371 | if (internal_addr == NULL) | |
372 | return -1; | |
373 | } else { | |
374 | internal_addr = NULL; | |
375 | } | |
7c673cae FG |
376 | info->mem[n].name = name; |
377 | info->mem[n].addr = addr; | |
378 | info->mem[n].internal_addr = internal_addr; | |
379 | info->mem[n].size = len; | |
380 | info->mem[n].memtype = UIO_MEM_PHYS; | |
381 | return 0; | |
382 | } | |
383 | ||
384 | /* Get pci port io resources described by bar #pci_bar in uio resource n. */ | |
385 | static int | |
386 | igbuio_pci_setup_ioport(struct pci_dev *dev, struct uio_info *info, | |
387 | int n, int pci_bar, const char *name) | |
388 | { | |
389 | unsigned long addr, len; | |
390 | ||
391 | if (n >= ARRAY_SIZE(info->port)) | |
392 | return -EINVAL; | |
393 | ||
394 | addr = pci_resource_start(dev, pci_bar); | |
395 | len = pci_resource_len(dev, pci_bar); | |
396 | if (addr == 0 || len == 0) | |
397 | return -EINVAL; | |
398 | ||
399 | info->port[n].name = name; | |
400 | info->port[n].start = addr; | |
401 | info->port[n].size = len; | |
402 | info->port[n].porttype = UIO_PORT_X86; | |
403 | ||
404 | return 0; | |
405 | } | |
406 | ||
407 | /* Unmap previously ioremap'd resources */ | |
408 | static void | |
409 | igbuio_pci_release_iomem(struct uio_info *info) | |
410 | { | |
411 | int i; | |
412 | ||
413 | for (i = 0; i < MAX_UIO_MAPS; i++) { | |
414 | if (info->mem[i].internal_addr) | |
415 | iounmap(info->mem[i].internal_addr); | |
416 | } | |
417 | } | |
418 | ||
419 | static int | |
420 | igbuio_setup_bars(struct pci_dev *dev, struct uio_info *info) | |
421 | { | |
422 | int i, iom, iop, ret; | |
423 | unsigned long flags; | |
424 | static const char *bar_names[PCI_STD_RESOURCE_END + 1] = { | |
425 | "BAR0", | |
426 | "BAR1", | |
427 | "BAR2", | |
428 | "BAR3", | |
429 | "BAR4", | |
430 | "BAR5", | |
431 | }; | |
432 | ||
433 | iom = 0; | |
434 | iop = 0; | |
435 | ||
436 | for (i = 0; i < ARRAY_SIZE(bar_names); i++) { | |
437 | if (pci_resource_len(dev, i) != 0 && | |
438 | pci_resource_start(dev, i) != 0) { | |
439 | flags = pci_resource_flags(dev, i); | |
440 | if (flags & IORESOURCE_MEM) { | |
441 | ret = igbuio_pci_setup_iomem(dev, info, iom, | |
442 | i, bar_names[i]); | |
443 | if (ret != 0) | |
444 | return ret; | |
445 | iom++; | |
446 | } else if (flags & IORESOURCE_IO) { | |
447 | ret = igbuio_pci_setup_ioport(dev, info, iop, | |
448 | i, bar_names[i]); | |
449 | if (ret != 0) | |
450 | return ret; | |
451 | iop++; | |
452 | } | |
453 | } | |
454 | } | |
455 | ||
11fdf7f2 | 456 | return (iom != 0 || iop != 0) ? ret : -ENOENT; |
7c673cae FG |
457 | } |
458 | ||
459 | #if LINUX_VERSION_CODE < KERNEL_VERSION(3, 8, 0) | |
460 | static int __devinit | |
461 | #else | |
462 | static int | |
463 | #endif | |
464 | igbuio_pci_probe(struct pci_dev *dev, const struct pci_device_id *id) | |
465 | { | |
466 | struct rte_uio_pci_dev *udev; | |
11fdf7f2 TL |
467 | dma_addr_t map_dma_addr; |
468 | void *map_addr; | |
7c673cae FG |
469 | int err; |
470 | ||
11fdf7f2 TL |
471 | #ifdef HAVE_PCI_IS_BRIDGE_API |
472 | if (pci_is_bridge(dev)) { | |
473 | dev_warn(&dev->dev, "Ignoring PCI bridge device\n"); | |
474 | return -ENODEV; | |
475 | } | |
476 | #endif | |
477 | ||
7c673cae FG |
478 | udev = kzalloc(sizeof(struct rte_uio_pci_dev), GFP_KERNEL); |
479 | if (!udev) | |
480 | return -ENOMEM; | |
481 | ||
482 | /* | |
483 | * enable device: ask low-level code to enable I/O and | |
484 | * memory | |
485 | */ | |
486 | err = pci_enable_device(dev); | |
487 | if (err != 0) { | |
488 | dev_err(&dev->dev, "Cannot enable PCI device\n"); | |
489 | goto fail_free; | |
490 | } | |
491 | ||
492 | /* enable bus mastering on the device */ | |
493 | pci_set_master(dev); | |
494 | ||
495 | /* remap IO memory */ | |
496 | err = igbuio_setup_bars(dev, &udev->info); | |
497 | if (err != 0) | |
498 | goto fail_release_iomem; | |
499 | ||
500 | /* set 64-bit DMA mask */ | |
501 | err = pci_set_dma_mask(dev, DMA_BIT_MASK(64)); | |
502 | if (err != 0) { | |
503 | dev_err(&dev->dev, "Cannot set DMA mask\n"); | |
504 | goto fail_release_iomem; | |
505 | } | |
506 | ||
507 | err = pci_set_consistent_dma_mask(dev, DMA_BIT_MASK(64)); | |
508 | if (err != 0) { | |
509 | dev_err(&dev->dev, "Cannot set consistent DMA mask\n"); | |
510 | goto fail_release_iomem; | |
511 | } | |
512 | ||
513 | /* fill uio infos */ | |
514 | udev->info.name = "igb_uio"; | |
515 | udev->info.version = "0.1"; | |
7c673cae | 516 | udev->info.irqcontrol = igbuio_pci_irqcontrol; |
11fdf7f2 TL |
517 | udev->info.open = igbuio_pci_open; |
518 | udev->info.release = igbuio_pci_release; | |
7c673cae FG |
519 | udev->info.priv = udev; |
520 | udev->pdev = dev; | |
9f95a23c | 521 | atomic_set(&udev->refcnt, 0); |
7c673cae | 522 | |
7c673cae FG |
523 | err = sysfs_create_group(&dev->dev.kobj, &dev_attr_grp); |
524 | if (err != 0) | |
525 | goto fail_release_iomem; | |
526 | ||
527 | /* register uio driver */ | |
528 | err = uio_register_device(&dev->dev, &udev->info); | |
529 | if (err != 0) | |
530 | goto fail_remove_group; | |
531 | ||
532 | pci_set_drvdata(dev, udev); | |
533 | ||
11fdf7f2 TL |
534 | /* |
535 | * Doing a harmless dma mapping for attaching the device to | |
536 | * the iommu identity mapping if kernel boots with iommu=pt. | |
537 | * Note this is not a problem if no IOMMU at all. | |
538 | */ | |
539 | map_addr = dma_alloc_coherent(&dev->dev, 1024, &map_dma_addr, | |
540 | GFP_KERNEL); | |
541 | if (map_addr) | |
542 | memset(map_addr, 0, 1024); | |
543 | ||
544 | if (!map_addr) | |
545 | dev_info(&dev->dev, "dma mapping failed\n"); | |
546 | else { | |
547 | dev_info(&dev->dev, "mapping 1K dma=%#llx host=%p\n", | |
548 | (unsigned long long)map_dma_addr, map_addr); | |
549 | ||
550 | dma_free_coherent(&dev->dev, 1024, map_addr, map_dma_addr); | |
551 | dev_info(&dev->dev, "unmapping 1K dma=%#llx host=%p\n", | |
552 | (unsigned long long)map_dma_addr, map_addr); | |
553 | } | |
7c673cae FG |
554 | |
555 | return 0; | |
556 | ||
557 | fail_remove_group: | |
558 | sysfs_remove_group(&dev->dev.kobj, &dev_attr_grp); | |
559 | fail_release_iomem: | |
560 | igbuio_pci_release_iomem(&udev->info); | |
7c673cae FG |
561 | pci_disable_device(dev); |
562 | fail_free: | |
563 | kfree(udev); | |
564 | ||
565 | return err; | |
566 | } | |
567 | ||
568 | static void | |
569 | igbuio_pci_remove(struct pci_dev *dev) | |
570 | { | |
571 | struct rte_uio_pci_dev *udev = pci_get_drvdata(dev); | |
572 | ||
9f95a23c TL |
573 | igbuio_pci_release(&udev->info, NULL); |
574 | ||
7c673cae FG |
575 | sysfs_remove_group(&dev->dev.kobj, &dev_attr_grp); |
576 | uio_unregister_device(&udev->info); | |
577 | igbuio_pci_release_iomem(&udev->info); | |
7c673cae FG |
578 | pci_disable_device(dev); |
579 | pci_set_drvdata(dev, NULL); | |
580 | kfree(udev); | |
581 | } | |
582 | ||
583 | static int | |
584 | igbuio_config_intr_mode(char *intr_str) | |
585 | { | |
586 | if (!intr_str) { | |
587 | pr_info("Use MSIX interrupt by default\n"); | |
588 | return 0; | |
589 | } | |
590 | ||
591 | if (!strcmp(intr_str, RTE_INTR_MODE_MSIX_NAME)) { | |
592 | igbuio_intr_mode_preferred = RTE_INTR_MODE_MSIX; | |
593 | pr_info("Use MSIX interrupt\n"); | |
11fdf7f2 TL |
594 | } else if (!strcmp(intr_str, RTE_INTR_MODE_MSI_NAME)) { |
595 | igbuio_intr_mode_preferred = RTE_INTR_MODE_MSI; | |
596 | pr_info("Use MSI interrupt\n"); | |
7c673cae FG |
597 | } else if (!strcmp(intr_str, RTE_INTR_MODE_LEGACY_NAME)) { |
598 | igbuio_intr_mode_preferred = RTE_INTR_MODE_LEGACY; | |
599 | pr_info("Use legacy interrupt\n"); | |
600 | } else { | |
601 | pr_info("Error: bad parameter - %s\n", intr_str); | |
602 | return -EINVAL; | |
603 | } | |
604 | ||
605 | return 0; | |
606 | } | |
607 | ||
608 | static struct pci_driver igbuio_pci_driver = { | |
609 | .name = "igb_uio", | |
610 | .id_table = NULL, | |
611 | .probe = igbuio_pci_probe, | |
612 | .remove = igbuio_pci_remove, | |
613 | }; | |
614 | ||
615 | static int __init | |
616 | igbuio_pci_init_module(void) | |
617 | { | |
618 | int ret; | |
619 | ||
11fdf7f2 TL |
620 | if (igbuio_kernel_is_locked_down()) { |
621 | pr_err("Not able to use module, kernel lock down is enabled\n"); | |
622 | return -EINVAL; | |
623 | } | |
624 | ||
625 | if (wc_activate != 0) | |
626 | pr_info("wc_activate is set\n"); | |
627 | ||
7c673cae FG |
628 | ret = igbuio_config_intr_mode(intr_mode); |
629 | if (ret < 0) | |
630 | return ret; | |
631 | ||
632 | return pci_register_driver(&igbuio_pci_driver); | |
633 | } | |
634 | ||
635 | static void __exit | |
636 | igbuio_pci_exit_module(void) | |
637 | { | |
638 | pci_unregister_driver(&igbuio_pci_driver); | |
639 | } | |
640 | ||
641 | module_init(igbuio_pci_init_module); | |
642 | module_exit(igbuio_pci_exit_module); | |
643 | ||
644 | module_param(intr_mode, charp, S_IRUGO); | |
645 | MODULE_PARM_DESC(intr_mode, | |
646 | "igb_uio interrupt mode (default=msix):\n" | |
647 | " " RTE_INTR_MODE_MSIX_NAME " Use MSIX interrupt\n" | |
11fdf7f2 | 648 | " " RTE_INTR_MODE_MSI_NAME " Use MSI interrupt\n" |
7c673cae FG |
649 | " " RTE_INTR_MODE_LEGACY_NAME " Use Legacy interrupt\n" |
650 | "\n"); | |
651 | ||
11fdf7f2 TL |
652 | module_param(wc_activate, int, 0); |
653 | MODULE_PARM_DESC(wc_activate, | |
654 | "Activate support for write combining (WC) (default=0)\n" | |
655 | " 0 - disable\n" | |
656 | " other - enable\n"); | |
657 | ||
7c673cae FG |
658 | MODULE_DESCRIPTION("UIO driver for Intel IGB PCI cards"); |
659 | MODULE_LICENSE("GPL"); | |
660 | MODULE_AUTHOR("Intel Corporation"); |