]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - drivers/virtio/virtio_pci_modern.c
Merge branch 'perf-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel...
[mirror_ubuntu-bionic-kernel.git] / drivers / virtio / virtio_pci_modern.c
1 /*
2 * Virtio PCI driver - modern (virtio 1.0) device support
3 *
4 * This module allows virtio devices to be used over a virtual PCI device.
5 * This can be used with QEMU based VMMs like KVM or Xen.
6 *
7 * Copyright IBM Corp. 2007
8 * Copyright Red Hat, Inc. 2014
9 *
10 * Authors:
11 * Anthony Liguori <aliguori@us.ibm.com>
12 * Rusty Russell <rusty@rustcorp.com.au>
13 * Michael S. Tsirkin <mst@redhat.com>
14 *
15 * This work is licensed under the terms of the GNU GPL, version 2 or later.
16 * See the COPYING file in the top-level directory.
17 *
18 */
19
20 #define VIRTIO_PCI_NO_LEGACY
21 #include "virtio_pci_common.h"
22
23 static void __iomem *map_capability(struct pci_dev *dev, int off,
24 size_t minlen,
25 u32 align,
26 u32 start, u32 size,
27 size_t *len)
28 {
29 u8 bar;
30 u32 offset, length;
31 void __iomem *p;
32
33 pci_read_config_byte(dev, off + offsetof(struct virtio_pci_cap,
34 bar),
35 &bar);
36 pci_read_config_dword(dev, off + offsetof(struct virtio_pci_cap, offset),
37 &offset);
38 pci_read_config_dword(dev, off + offsetof(struct virtio_pci_cap, length),
39 &length);
40
41 if (length <= start) {
42 dev_err(&dev->dev,
43 "virtio_pci: bad capability len %u (>%u expected)\n",
44 length, start);
45 return NULL;
46 }
47
48 if (length - start < minlen) {
49 dev_err(&dev->dev,
50 "virtio_pci: bad capability len %u (>=%zu expected)\n",
51 length, minlen);
52 return NULL;
53 }
54
55 length -= start;
56
57 if (start + offset < offset) {
58 dev_err(&dev->dev,
59 "virtio_pci: map wrap-around %u+%u\n",
60 start, offset);
61 return NULL;
62 }
63
64 offset += start;
65
66 if (offset & (align - 1)) {
67 dev_err(&dev->dev,
68 "virtio_pci: offset %u not aligned to %u\n",
69 offset, align);
70 return NULL;
71 }
72
73 if (length > size)
74 length = size;
75
76 if (len)
77 *len = length;
78
79 if (minlen + offset < minlen ||
80 minlen + offset > pci_resource_len(dev, bar)) {
81 dev_err(&dev->dev,
82 "virtio_pci: map virtio %zu@%u "
83 "out of range on bar %i length %lu\n",
84 minlen, offset,
85 bar, (unsigned long)pci_resource_len(dev, bar));
86 return NULL;
87 }
88
89 p = pci_iomap_range(dev, bar, offset, length);
90 if (!p)
91 dev_err(&dev->dev,
92 "virtio_pci: unable to map virtio %u@%u on bar %i\n",
93 length, offset, bar);
94 return p;
95 }
96
97 static void iowrite64_twopart(u64 val, __le32 __iomem *lo, __le32 __iomem *hi)
98 {
99 iowrite32((u32)val, lo);
100 iowrite32(val >> 32, hi);
101 }
102
103 /* virtio config->get_features() implementation */
104 static u64 vp_get_features(struct virtio_device *vdev)
105 {
106 struct virtio_pci_device *vp_dev = to_vp_device(vdev);
107 u64 features;
108
109 iowrite32(0, &vp_dev->common->device_feature_select);
110 features = ioread32(&vp_dev->common->device_feature);
111 iowrite32(1, &vp_dev->common->device_feature_select);
112 features |= ((u64)ioread32(&vp_dev->common->device_feature) << 32);
113
114 return features;
115 }
116
117 /* virtio config->finalize_features() implementation */
118 static int vp_finalize_features(struct virtio_device *vdev)
119 {
120 struct virtio_pci_device *vp_dev = to_vp_device(vdev);
121
122 /* Give virtio_ring a chance to accept features. */
123 vring_transport_features(vdev);
124
125 if (!__virtio_test_bit(vdev, VIRTIO_F_VERSION_1)) {
126 dev_err(&vdev->dev, "virtio: device uses modern interface "
127 "but does not have VIRTIO_F_VERSION_1\n");
128 return -EINVAL;
129 }
130
131 iowrite32(0, &vp_dev->common->guest_feature_select);
132 iowrite32((u32)vdev->features, &vp_dev->common->guest_feature);
133 iowrite32(1, &vp_dev->common->guest_feature_select);
134 iowrite32(vdev->features >> 32, &vp_dev->common->guest_feature);
135
136 return 0;
137 }
138
139 /* virtio config->get() implementation */
140 static void vp_get(struct virtio_device *vdev, unsigned offset,
141 void *buf, unsigned len)
142 {
143 struct virtio_pci_device *vp_dev = to_vp_device(vdev);
144 u8 b;
145 __le16 w;
146 __le32 l;
147
148 BUG_ON(offset + len > vp_dev->device_len);
149
150 switch (len) {
151 case 1:
152 b = ioread8(vp_dev->device + offset);
153 memcpy(buf, &b, sizeof b);
154 break;
155 case 2:
156 w = cpu_to_le16(ioread16(vp_dev->device + offset));
157 memcpy(buf, &w, sizeof w);
158 break;
159 case 4:
160 l = cpu_to_le32(ioread32(vp_dev->device + offset));
161 memcpy(buf, &l, sizeof l);
162 break;
163 case 8:
164 l = cpu_to_le32(ioread32(vp_dev->device + offset));
165 memcpy(buf, &l, sizeof l);
166 l = cpu_to_le32(ioread32(vp_dev->device + offset + sizeof l));
167 memcpy(buf + sizeof l, &l, sizeof l);
168 break;
169 default:
170 BUG();
171 }
172 }
173
174 /* the config->set() implementation. it's symmetric to the config->get()
175 * implementation */
176 static void vp_set(struct virtio_device *vdev, unsigned offset,
177 const void *buf, unsigned len)
178 {
179 struct virtio_pci_device *vp_dev = to_vp_device(vdev);
180 u8 b;
181 __le16 w;
182 __le32 l;
183
184 BUG_ON(offset + len > vp_dev->device_len);
185
186 switch (len) {
187 case 1:
188 memcpy(&b, buf, sizeof b);
189 iowrite8(b, vp_dev->device + offset);
190 break;
191 case 2:
192 memcpy(&w, buf, sizeof w);
193 iowrite16(le16_to_cpu(w), vp_dev->device + offset);
194 break;
195 case 4:
196 memcpy(&l, buf, sizeof l);
197 iowrite32(le32_to_cpu(l), vp_dev->device + offset);
198 break;
199 case 8:
200 memcpy(&l, buf, sizeof l);
201 iowrite32(le32_to_cpu(l), vp_dev->device + offset);
202 memcpy(&l, buf + sizeof l, sizeof l);
203 iowrite32(le32_to_cpu(l), vp_dev->device + offset + sizeof l);
204 break;
205 default:
206 BUG();
207 }
208 }
209
210 static u32 vp_generation(struct virtio_device *vdev)
211 {
212 struct virtio_pci_device *vp_dev = to_vp_device(vdev);
213 return ioread8(&vp_dev->common->config_generation);
214 }
215
216 /* config->{get,set}_status() implementations */
217 static u8 vp_get_status(struct virtio_device *vdev)
218 {
219 struct virtio_pci_device *vp_dev = to_vp_device(vdev);
220 return ioread8(&vp_dev->common->device_status);
221 }
222
223 static void vp_set_status(struct virtio_device *vdev, u8 status)
224 {
225 struct virtio_pci_device *vp_dev = to_vp_device(vdev);
226 /* We should never be setting status to 0. */
227 BUG_ON(status == 0);
228 iowrite8(status, &vp_dev->common->device_status);
229 }
230
231 static void vp_reset(struct virtio_device *vdev)
232 {
233 struct virtio_pci_device *vp_dev = to_vp_device(vdev);
234 /* 0 status means a reset. */
235 iowrite8(0, &vp_dev->common->device_status);
236 /* Flush out the status write, and flush in device writes,
237 * including MSI-X interrupts, if any. */
238 ioread8(&vp_dev->common->device_status);
239 /* Flush pending VQ/configuration callbacks. */
240 vp_synchronize_vectors(vdev);
241 }
242
243 static u16 vp_config_vector(struct virtio_pci_device *vp_dev, u16 vector)
244 {
245 /* Setup the vector used for configuration events */
246 iowrite16(vector, &vp_dev->common->msix_config);
247 /* Verify we had enough resources to assign the vector */
248 /* Will also flush the write out to device */
249 return ioread16(&vp_dev->common->msix_config);
250 }
251
252 static size_t vring_pci_size(u16 num)
253 {
254 /* We only need a cacheline separation. */
255 return PAGE_ALIGN(vring_size(num, SMP_CACHE_BYTES));
256 }
257
258 static void *alloc_virtqueue_pages(int *num)
259 {
260 void *pages;
261
262 /* TODO: allocate each queue chunk individually */
263 for (; *num && vring_pci_size(*num) > PAGE_SIZE; *num /= 2) {
264 pages = alloc_pages_exact(vring_pci_size(*num),
265 GFP_KERNEL|__GFP_ZERO|__GFP_NOWARN);
266 if (pages)
267 return pages;
268 }
269
270 if (!*num)
271 return NULL;
272
273 /* Try to get a single page. You are my only hope! */
274 return alloc_pages_exact(vring_pci_size(*num), GFP_KERNEL|__GFP_ZERO);
275 }
276
277 static struct virtqueue *setup_vq(struct virtio_pci_device *vp_dev,
278 struct virtio_pci_vq_info *info,
279 unsigned index,
280 void (*callback)(struct virtqueue *vq),
281 const char *name,
282 u16 msix_vec)
283 {
284 struct virtio_pci_common_cfg __iomem *cfg = vp_dev->common;
285 struct virtqueue *vq;
286 u16 num, off;
287 int err;
288
289 if (index >= ioread16(&cfg->num_queues))
290 return ERR_PTR(-ENOENT);
291
292 /* Select the queue we're interested in */
293 iowrite16(index, &cfg->queue_select);
294
295 /* Check if queue is either not available or already active. */
296 num = ioread16(&cfg->queue_size);
297 if (!num || ioread16(&cfg->queue_enable))
298 return ERR_PTR(-ENOENT);
299
300 if (num & (num - 1)) {
301 dev_warn(&vp_dev->pci_dev->dev, "bad queue size %u", num);
302 return ERR_PTR(-EINVAL);
303 }
304
305 /* get offset of notification word for this vq */
306 off = ioread16(&cfg->queue_notify_off);
307
308 info->num = num;
309 info->msix_vector = msix_vec;
310
311 info->queue = alloc_virtqueue_pages(&info->num);
312 if (info->queue == NULL)
313 return ERR_PTR(-ENOMEM);
314
315 /* create the vring */
316 vq = vring_new_virtqueue(index, info->num,
317 SMP_CACHE_BYTES, &vp_dev->vdev,
318 true, info->queue, vp_notify, callback, name);
319 if (!vq) {
320 err = -ENOMEM;
321 goto err_new_queue;
322 }
323
324 /* activate the queue */
325 iowrite16(num, &cfg->queue_size);
326 iowrite64_twopart(virt_to_phys(info->queue),
327 &cfg->queue_desc_lo, &cfg->queue_desc_hi);
328 iowrite64_twopart(virt_to_phys(virtqueue_get_avail(vq)),
329 &cfg->queue_avail_lo, &cfg->queue_avail_hi);
330 iowrite64_twopart(virt_to_phys(virtqueue_get_used(vq)),
331 &cfg->queue_used_lo, &cfg->queue_used_hi);
332
333 if (vp_dev->notify_base) {
334 /* offset should not wrap */
335 if ((u64)off * vp_dev->notify_offset_multiplier + 2
336 > vp_dev->notify_len) {
337 dev_warn(&vp_dev->pci_dev->dev,
338 "bad notification offset %u (x %u) "
339 "for queue %u > %zd",
340 off, vp_dev->notify_offset_multiplier,
341 index, vp_dev->notify_len);
342 err = -EINVAL;
343 goto err_map_notify;
344 }
345 vq->priv = (void __force *)vp_dev->notify_base +
346 off * vp_dev->notify_offset_multiplier;
347 } else {
348 vq->priv = (void __force *)map_capability(vp_dev->pci_dev,
349 vp_dev->notify_map_cap, 2, 2,
350 off * vp_dev->notify_offset_multiplier, 2,
351 NULL);
352 }
353
354 if (!vq->priv) {
355 err = -ENOMEM;
356 goto err_map_notify;
357 }
358
359 if (msix_vec != VIRTIO_MSI_NO_VECTOR) {
360 iowrite16(msix_vec, &cfg->queue_msix_vector);
361 msix_vec = ioread16(&cfg->queue_msix_vector);
362 if (msix_vec == VIRTIO_MSI_NO_VECTOR) {
363 err = -EBUSY;
364 goto err_assign_vector;
365 }
366 }
367
368 return vq;
369
370 err_assign_vector:
371 if (!vp_dev->notify_base)
372 pci_iounmap(vp_dev->pci_dev, (void __iomem __force *)vq->priv);
373 err_map_notify:
374 vring_del_virtqueue(vq);
375 err_new_queue:
376 free_pages_exact(info->queue, vring_pci_size(info->num));
377 return ERR_PTR(err);
378 }
379
380 static int vp_modern_find_vqs(struct virtio_device *vdev, unsigned nvqs,
381 struct virtqueue *vqs[],
382 vq_callback_t *callbacks[],
383 const char *names[])
384 {
385 struct virtio_pci_device *vp_dev = to_vp_device(vdev);
386 struct virtqueue *vq;
387 int rc = vp_find_vqs(vdev, nvqs, vqs, callbacks, names);
388
389 if (rc)
390 return rc;
391
392 /* Select and activate all queues. Has to be done last: once we do
393 * this, there's no way to go back except reset.
394 */
395 list_for_each_entry(vq, &vdev->vqs, list) {
396 iowrite16(vq->index, &vp_dev->common->queue_select);
397 iowrite16(1, &vp_dev->common->queue_enable);
398 }
399
400 return 0;
401 }
402
403 static void del_vq(struct virtio_pci_vq_info *info)
404 {
405 struct virtqueue *vq = info->vq;
406 struct virtio_pci_device *vp_dev = to_vp_device(vq->vdev);
407
408 iowrite16(vq->index, &vp_dev->common->queue_select);
409
410 if (vp_dev->msix_enabled) {
411 iowrite16(VIRTIO_MSI_NO_VECTOR,
412 &vp_dev->common->queue_msix_vector);
413 /* Flush the write out to device */
414 ioread16(&vp_dev->common->queue_msix_vector);
415 }
416
417 if (!vp_dev->notify_base)
418 pci_iounmap(vp_dev->pci_dev, (void __force __iomem *)vq->priv);
419
420 vring_del_virtqueue(vq);
421
422 free_pages_exact(info->queue, vring_pci_size(info->num));
423 }
424
425 static const struct virtio_config_ops virtio_pci_config_nodev_ops = {
426 .get = NULL,
427 .set = NULL,
428 .generation = vp_generation,
429 .get_status = vp_get_status,
430 .set_status = vp_set_status,
431 .reset = vp_reset,
432 .find_vqs = vp_modern_find_vqs,
433 .del_vqs = vp_del_vqs,
434 .get_features = vp_get_features,
435 .finalize_features = vp_finalize_features,
436 .bus_name = vp_bus_name,
437 .set_vq_affinity = vp_set_vq_affinity,
438 };
439
440 static const struct virtio_config_ops virtio_pci_config_ops = {
441 .get = vp_get,
442 .set = vp_set,
443 .generation = vp_generation,
444 .get_status = vp_get_status,
445 .set_status = vp_set_status,
446 .reset = vp_reset,
447 .find_vqs = vp_modern_find_vqs,
448 .del_vqs = vp_del_vqs,
449 .get_features = vp_get_features,
450 .finalize_features = vp_finalize_features,
451 .bus_name = vp_bus_name,
452 .set_vq_affinity = vp_set_vq_affinity,
453 };
454
455 /**
456 * virtio_pci_find_capability - walk capabilities to find device info.
457 * @dev: the pci device
458 * @cfg_type: the VIRTIO_PCI_CAP_* value we seek
459 * @ioresource_types: IORESOURCE_MEM and/or IORESOURCE_IO.
460 *
461 * Returns offset of the capability, or 0.
462 */
463 static inline int virtio_pci_find_capability(struct pci_dev *dev, u8 cfg_type,
464 u32 ioresource_types)
465 {
466 int pos;
467
468 for (pos = pci_find_capability(dev, PCI_CAP_ID_VNDR);
469 pos > 0;
470 pos = pci_find_next_capability(dev, pos, PCI_CAP_ID_VNDR)) {
471 u8 type, bar;
472 pci_read_config_byte(dev, pos + offsetof(struct virtio_pci_cap,
473 cfg_type),
474 &type);
475 pci_read_config_byte(dev, pos + offsetof(struct virtio_pci_cap,
476 bar),
477 &bar);
478
479 /* Ignore structures with reserved BAR values */
480 if (bar > 0x5)
481 continue;
482
483 if (type == cfg_type) {
484 if (pci_resource_len(dev, bar) &&
485 pci_resource_flags(dev, bar) & ioresource_types)
486 return pos;
487 }
488 }
489 return 0;
490 }
491
492 /* This is part of the ABI. Don't screw with it. */
493 static inline void check_offsets(void)
494 {
495 /* Note: disk space was harmed in compilation of this function. */
496 BUILD_BUG_ON(VIRTIO_PCI_CAP_VNDR !=
497 offsetof(struct virtio_pci_cap, cap_vndr));
498 BUILD_BUG_ON(VIRTIO_PCI_CAP_NEXT !=
499 offsetof(struct virtio_pci_cap, cap_next));
500 BUILD_BUG_ON(VIRTIO_PCI_CAP_LEN !=
501 offsetof(struct virtio_pci_cap, cap_len));
502 BUILD_BUG_ON(VIRTIO_PCI_CAP_CFG_TYPE !=
503 offsetof(struct virtio_pci_cap, cfg_type));
504 BUILD_BUG_ON(VIRTIO_PCI_CAP_BAR !=
505 offsetof(struct virtio_pci_cap, bar));
506 BUILD_BUG_ON(VIRTIO_PCI_CAP_OFFSET !=
507 offsetof(struct virtio_pci_cap, offset));
508 BUILD_BUG_ON(VIRTIO_PCI_CAP_LENGTH !=
509 offsetof(struct virtio_pci_cap, length));
510 BUILD_BUG_ON(VIRTIO_PCI_NOTIFY_CAP_MULT !=
511 offsetof(struct virtio_pci_notify_cap,
512 notify_off_multiplier));
513 BUILD_BUG_ON(VIRTIO_PCI_COMMON_DFSELECT !=
514 offsetof(struct virtio_pci_common_cfg,
515 device_feature_select));
516 BUILD_BUG_ON(VIRTIO_PCI_COMMON_DF !=
517 offsetof(struct virtio_pci_common_cfg, device_feature));
518 BUILD_BUG_ON(VIRTIO_PCI_COMMON_GFSELECT !=
519 offsetof(struct virtio_pci_common_cfg,
520 guest_feature_select));
521 BUILD_BUG_ON(VIRTIO_PCI_COMMON_GF !=
522 offsetof(struct virtio_pci_common_cfg, guest_feature));
523 BUILD_BUG_ON(VIRTIO_PCI_COMMON_MSIX !=
524 offsetof(struct virtio_pci_common_cfg, msix_config));
525 BUILD_BUG_ON(VIRTIO_PCI_COMMON_NUMQ !=
526 offsetof(struct virtio_pci_common_cfg, num_queues));
527 BUILD_BUG_ON(VIRTIO_PCI_COMMON_STATUS !=
528 offsetof(struct virtio_pci_common_cfg, device_status));
529 BUILD_BUG_ON(VIRTIO_PCI_COMMON_CFGGENERATION !=
530 offsetof(struct virtio_pci_common_cfg, config_generation));
531 BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_SELECT !=
532 offsetof(struct virtio_pci_common_cfg, queue_select));
533 BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_SIZE !=
534 offsetof(struct virtio_pci_common_cfg, queue_size));
535 BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_MSIX !=
536 offsetof(struct virtio_pci_common_cfg, queue_msix_vector));
537 BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_ENABLE !=
538 offsetof(struct virtio_pci_common_cfg, queue_enable));
539 BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_NOFF !=
540 offsetof(struct virtio_pci_common_cfg, queue_notify_off));
541 BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_DESCLO !=
542 offsetof(struct virtio_pci_common_cfg, queue_desc_lo));
543 BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_DESCHI !=
544 offsetof(struct virtio_pci_common_cfg, queue_desc_hi));
545 BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_AVAILLO !=
546 offsetof(struct virtio_pci_common_cfg, queue_avail_lo));
547 BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_AVAILHI !=
548 offsetof(struct virtio_pci_common_cfg, queue_avail_hi));
549 BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_USEDLO !=
550 offsetof(struct virtio_pci_common_cfg, queue_used_lo));
551 BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_USEDHI !=
552 offsetof(struct virtio_pci_common_cfg, queue_used_hi));
553 }
554
555 /* the PCI probing function */
556 int virtio_pci_modern_probe(struct virtio_pci_device *vp_dev)
557 {
558 struct pci_dev *pci_dev = vp_dev->pci_dev;
559 int err, common, isr, notify, device;
560 u32 notify_length;
561 u32 notify_offset;
562
563 check_offsets();
564
565 /* We only own devices >= 0x1000 and <= 0x107f: leave the rest. */
566 if (pci_dev->device < 0x1000 || pci_dev->device > 0x107f)
567 return -ENODEV;
568
569 if (pci_dev->device < 0x1040) {
570 /* Transitional devices: use the PCI subsystem device id as
571 * virtio device id, same as legacy driver always did.
572 */
573 vp_dev->vdev.id.device = pci_dev->subsystem_device;
574 } else {
575 /* Modern devices: simply use PCI device id, but start from 0x1040. */
576 vp_dev->vdev.id.device = pci_dev->device - 0x1040;
577 }
578 vp_dev->vdev.id.vendor = pci_dev->subsystem_vendor;
579
580 if (virtio_device_is_legacy_only(vp_dev->vdev.id))
581 return -ENODEV;
582
583 /* check for a common config: if not, use legacy mode (bar 0). */
584 common = virtio_pci_find_capability(pci_dev, VIRTIO_PCI_CAP_COMMON_CFG,
585 IORESOURCE_IO | IORESOURCE_MEM);
586 if (!common) {
587 dev_info(&pci_dev->dev,
588 "virtio_pci: leaving for legacy driver\n");
589 return -ENODEV;
590 }
591
592 /* If common is there, these should be too... */
593 isr = virtio_pci_find_capability(pci_dev, VIRTIO_PCI_CAP_ISR_CFG,
594 IORESOURCE_IO | IORESOURCE_MEM);
595 notify = virtio_pci_find_capability(pci_dev, VIRTIO_PCI_CAP_NOTIFY_CFG,
596 IORESOURCE_IO | IORESOURCE_MEM);
597 if (!isr || !notify) {
598 dev_err(&pci_dev->dev,
599 "virtio_pci: missing capabilities %i/%i/%i\n",
600 common, isr, notify);
601 return -EINVAL;
602 }
603
604 /* Device capability is only mandatory for devices that have
605 * device-specific configuration.
606 */
607 device = virtio_pci_find_capability(pci_dev, VIRTIO_PCI_CAP_DEVICE_CFG,
608 IORESOURCE_IO | IORESOURCE_MEM);
609
610 err = -EINVAL;
611 vp_dev->common = map_capability(pci_dev, common,
612 sizeof(struct virtio_pci_common_cfg), 4,
613 0, sizeof(struct virtio_pci_common_cfg),
614 NULL);
615 if (!vp_dev->common)
616 goto err_map_common;
617 vp_dev->isr = map_capability(pci_dev, isr, sizeof(u8), 1,
618 0, 1,
619 NULL);
620 if (!vp_dev->isr)
621 goto err_map_isr;
622
623 /* Read notify_off_multiplier from config space. */
624 pci_read_config_dword(pci_dev,
625 notify + offsetof(struct virtio_pci_notify_cap,
626 notify_off_multiplier),
627 &vp_dev->notify_offset_multiplier);
628 /* Read notify length and offset from config space. */
629 pci_read_config_dword(pci_dev,
630 notify + offsetof(struct virtio_pci_notify_cap,
631 cap.length),
632 &notify_length);
633
634 pci_read_config_dword(pci_dev,
635 notify + offsetof(struct virtio_pci_notify_cap,
636 cap.length),
637 &notify_offset);
638
639 /* We don't know how many VQs we'll map, ahead of the time.
640 * If notify length is small, map it all now.
641 * Otherwise, map each VQ individually later.
642 */
643 if ((u64)notify_length + (notify_offset % PAGE_SIZE) <= PAGE_SIZE) {
644 vp_dev->notify_base = map_capability(pci_dev, notify, 2, 2,
645 0, notify_length,
646 &vp_dev->notify_len);
647 if (!vp_dev->notify_base)
648 goto err_map_notify;
649 } else {
650 vp_dev->notify_map_cap = notify;
651 }
652
653 /* Again, we don't know how much we should map, but PAGE_SIZE
654 * is more than enough for all existing devices.
655 */
656 if (device) {
657 vp_dev->device = map_capability(pci_dev, device, 0, 4,
658 0, PAGE_SIZE,
659 &vp_dev->device_len);
660 if (!vp_dev->device)
661 goto err_map_device;
662
663 vp_dev->vdev.config = &virtio_pci_config_ops;
664 } else {
665 vp_dev->vdev.config = &virtio_pci_config_nodev_ops;
666 }
667
668 vp_dev->config_vector = vp_config_vector;
669 vp_dev->setup_vq = setup_vq;
670 vp_dev->del_vq = del_vq;
671
672 return 0;
673
674 err_map_device:
675 if (vp_dev->notify_base)
676 pci_iounmap(pci_dev, vp_dev->notify_base);
677 err_map_notify:
678 pci_iounmap(pci_dev, vp_dev->isr);
679 err_map_isr:
680 pci_iounmap(pci_dev, vp_dev->common);
681 err_map_common:
682 return err;
683 }
684
685 void virtio_pci_modern_remove(struct virtio_pci_device *vp_dev)
686 {
687 struct pci_dev *pci_dev = vp_dev->pci_dev;
688
689 if (vp_dev->device)
690 pci_iounmap(pci_dev, vp_dev->device);
691 if (vp_dev->notify_base)
692 pci_iounmap(pci_dev, vp_dev->notify_base);
693 pci_iounmap(pci_dev, vp_dev->isr);
694 pci_iounmap(pci_dev, vp_dev->common);
695 }