]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - drivers/misc/mic/vop/vop_main.c
Merge tag 'efi-urgent' of git://git.kernel.org/pub/scm/linux/kernel/git/efi/efi into...
[mirror_ubuntu-artful-kernel.git] / drivers / misc / mic / vop / vop_main.c
1 /*
2 * Intel MIC Platform Software Stack (MPSS)
3 *
4 * Copyright(c) 2016 Intel Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License, version 2, as
8 * published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License for more details.
14 *
15 * The full GNU General Public License is included in this distribution in
16 * the file called "COPYING".
17 *
18 * Adapted from:
19 *
20 * virtio for kvm on s390
21 *
22 * Copyright IBM Corp. 2008
23 *
24 * This program is free software; you can redistribute it and/or modify
25 * it under the terms of the GNU General Public License (version 2 only)
26 * as published by the Free Software Foundation.
27 *
28 * Author(s): Christian Borntraeger <borntraeger@de.ibm.com>
29 *
30 * Intel Virtio Over PCIe (VOP) driver.
31 *
32 */
33 #include <linux/delay.h>
34 #include <linux/module.h>
35 #include <linux/sched.h>
36 #include <linux/dma-mapping.h>
37
38 #include "vop_main.h"
39
40 #define VOP_MAX_VRINGS 4
41
42 /*
43 * _vop_vdev - Allocated per virtio device instance injected by the peer.
44 *
45 * @vdev: Virtio device
46 * @desc: Virtio device page descriptor
47 * @dc: Virtio device control
48 * @vpdev: VOP device which is the parent for this virtio device
49 * @vr: Buffer for accessing the VRING
50 * @used: Buffer for used
51 * @used_size: Size of the used buffer
52 * @reset_done: Track whether VOP reset is complete
53 * @virtio_cookie: Cookie returned upon requesting a interrupt
54 * @c2h_vdev_db: The doorbell used by the guest to interrupt the host
55 * @h2c_vdev_db: The doorbell used by the host to interrupt the guest
56 * @dnode: The destination node
57 */
58 struct _vop_vdev {
59 struct virtio_device vdev;
60 struct mic_device_desc __iomem *desc;
61 struct mic_device_ctrl __iomem *dc;
62 struct vop_device *vpdev;
63 void __iomem *vr[VOP_MAX_VRINGS];
64 dma_addr_t used[VOP_MAX_VRINGS];
65 int used_size[VOP_MAX_VRINGS];
66 struct completion reset_done;
67 struct mic_irq *virtio_cookie;
68 int c2h_vdev_db;
69 int h2c_vdev_db;
70 int dnode;
71 };
72
73 #define to_vopvdev(vd) container_of(vd, struct _vop_vdev, vdev)
74
75 #define _vop_aligned_desc_size(d) __mic_align(_vop_desc_size(d), 8)
76
77 /* Helper API to obtain the parent of the virtio device */
78 static inline struct device *_vop_dev(struct _vop_vdev *vdev)
79 {
80 return vdev->vdev.dev.parent;
81 }
82
83 static inline unsigned _vop_desc_size(struct mic_device_desc __iomem *desc)
84 {
85 return sizeof(*desc)
86 + ioread8(&desc->num_vq) * sizeof(struct mic_vqconfig)
87 + ioread8(&desc->feature_len) * 2
88 + ioread8(&desc->config_len);
89 }
90
91 static inline struct mic_vqconfig __iomem *
92 _vop_vq_config(struct mic_device_desc __iomem *desc)
93 {
94 return (struct mic_vqconfig __iomem *)(desc + 1);
95 }
96
97 static inline u8 __iomem *
98 _vop_vq_features(struct mic_device_desc __iomem *desc)
99 {
100 return (u8 __iomem *)(_vop_vq_config(desc) + ioread8(&desc->num_vq));
101 }
102
103 static inline u8 __iomem *
104 _vop_vq_configspace(struct mic_device_desc __iomem *desc)
105 {
106 return _vop_vq_features(desc) + ioread8(&desc->feature_len) * 2;
107 }
108
109 static inline unsigned
110 _vop_total_desc_size(struct mic_device_desc __iomem *desc)
111 {
112 return _vop_aligned_desc_size(desc) + sizeof(struct mic_device_ctrl);
113 }
114
115 /* This gets the device's feature bits. */
116 static u64 vop_get_features(struct virtio_device *vdev)
117 {
118 unsigned int i, bits;
119 u32 features = 0;
120 struct mic_device_desc __iomem *desc = to_vopvdev(vdev)->desc;
121 u8 __iomem *in_features = _vop_vq_features(desc);
122 int feature_len = ioread8(&desc->feature_len);
123
124 bits = min_t(unsigned, feature_len, sizeof(vdev->features)) * 8;
125 for (i = 0; i < bits; i++)
126 if (ioread8(&in_features[i / 8]) & (BIT(i % 8)))
127 features |= BIT(i);
128
129 return features;
130 }
131
132 static int vop_finalize_features(struct virtio_device *vdev)
133 {
134 unsigned int i, bits;
135 struct mic_device_desc __iomem *desc = to_vopvdev(vdev)->desc;
136 u8 feature_len = ioread8(&desc->feature_len);
137 /* Second half of bitmap is features we accept. */
138 u8 __iomem *out_features =
139 _vop_vq_features(desc) + feature_len;
140
141 /* Give virtio_ring a chance to accept features. */
142 vring_transport_features(vdev);
143
144 memset_io(out_features, 0, feature_len);
145 bits = min_t(unsigned, feature_len,
146 sizeof(vdev->features)) * 8;
147 for (i = 0; i < bits; i++) {
148 if (__virtio_test_bit(vdev, i))
149 iowrite8(ioread8(&out_features[i / 8]) | (1 << (i % 8)),
150 &out_features[i / 8]);
151 }
152 return 0;
153 }
154
155 /*
156 * Reading and writing elements in config space
157 */
158 static void vop_get(struct virtio_device *vdev, unsigned int offset,
159 void *buf, unsigned len)
160 {
161 struct mic_device_desc __iomem *desc = to_vopvdev(vdev)->desc;
162
163 if (offset + len > ioread8(&desc->config_len))
164 return;
165 memcpy_fromio(buf, _vop_vq_configspace(desc) + offset, len);
166 }
167
168 static void vop_set(struct virtio_device *vdev, unsigned int offset,
169 const void *buf, unsigned len)
170 {
171 struct mic_device_desc __iomem *desc = to_vopvdev(vdev)->desc;
172
173 if (offset + len > ioread8(&desc->config_len))
174 return;
175 memcpy_toio(_vop_vq_configspace(desc) + offset, buf, len);
176 }
177
178 /*
179 * The operations to get and set the status word just access the status
180 * field of the device descriptor. set_status also interrupts the host
181 * to tell about status changes.
182 */
183 static u8 vop_get_status(struct virtio_device *vdev)
184 {
185 return ioread8(&to_vopvdev(vdev)->desc->status);
186 }
187
188 static void vop_set_status(struct virtio_device *dev, u8 status)
189 {
190 struct _vop_vdev *vdev = to_vopvdev(dev);
191 struct vop_device *vpdev = vdev->vpdev;
192
193 if (!status)
194 return;
195 iowrite8(status, &vdev->desc->status);
196 vpdev->hw_ops->send_intr(vpdev, vdev->c2h_vdev_db);
197 }
198
199 /* Inform host on a virtio device reset and wait for ack from host */
200 static void vop_reset_inform_host(struct virtio_device *dev)
201 {
202 struct _vop_vdev *vdev = to_vopvdev(dev);
203 struct mic_device_ctrl __iomem *dc = vdev->dc;
204 struct vop_device *vpdev = vdev->vpdev;
205 int retry;
206
207 iowrite8(0, &dc->host_ack);
208 iowrite8(1, &dc->vdev_reset);
209 vpdev->hw_ops->send_intr(vpdev, vdev->c2h_vdev_db);
210
211 /* Wait till host completes all card accesses and acks the reset */
212 for (retry = 100; retry--;) {
213 if (ioread8(&dc->host_ack))
214 break;
215 msleep(100);
216 };
217
218 dev_dbg(_vop_dev(vdev), "%s: retry: %d\n", __func__, retry);
219
220 /* Reset status to 0 in case we timed out */
221 iowrite8(0, &vdev->desc->status);
222 }
223
224 static void vop_reset(struct virtio_device *dev)
225 {
226 struct _vop_vdev *vdev = to_vopvdev(dev);
227
228 dev_dbg(_vop_dev(vdev), "%s: virtio id %d\n",
229 __func__, dev->id.device);
230
231 vop_reset_inform_host(dev);
232 complete_all(&vdev->reset_done);
233 }
234
235 /*
236 * The virtio_ring code calls this API when it wants to notify the Host.
237 */
238 static bool vop_notify(struct virtqueue *vq)
239 {
240 struct _vop_vdev *vdev = vq->priv;
241 struct vop_device *vpdev = vdev->vpdev;
242
243 vpdev->hw_ops->send_intr(vpdev, vdev->c2h_vdev_db);
244 return true;
245 }
246
247 static void vop_del_vq(struct virtqueue *vq, int n)
248 {
249 struct _vop_vdev *vdev = to_vopvdev(vq->vdev);
250 struct vring *vr = (struct vring *)(vq + 1);
251 struct vop_device *vpdev = vdev->vpdev;
252
253 dma_unmap_single(&vpdev->dev, vdev->used[n],
254 vdev->used_size[n], DMA_BIDIRECTIONAL);
255 free_pages((unsigned long)vr->used, get_order(vdev->used_size[n]));
256 vring_del_virtqueue(vq);
257 vpdev->hw_ops->iounmap(vpdev, vdev->vr[n]);
258 vdev->vr[n] = NULL;
259 }
260
261 static void vop_del_vqs(struct virtio_device *dev)
262 {
263 struct _vop_vdev *vdev = to_vopvdev(dev);
264 struct virtqueue *vq, *n;
265 int idx = 0;
266
267 dev_dbg(_vop_dev(vdev), "%s\n", __func__);
268
269 list_for_each_entry_safe(vq, n, &dev->vqs, list)
270 vop_del_vq(vq, idx++);
271 }
272
273 /*
274 * This routine will assign vring's allocated in host/io memory. Code in
275 * virtio_ring.c however continues to access this io memory as if it were local
276 * memory without io accessors.
277 */
278 static struct virtqueue *vop_find_vq(struct virtio_device *dev,
279 unsigned index,
280 void (*callback)(struct virtqueue *vq),
281 const char *name)
282 {
283 struct _vop_vdev *vdev = to_vopvdev(dev);
284 struct vop_device *vpdev = vdev->vpdev;
285 struct mic_vqconfig __iomem *vqconfig;
286 struct mic_vqconfig config;
287 struct virtqueue *vq;
288 void __iomem *va;
289 struct _mic_vring_info __iomem *info;
290 void *used;
291 int vr_size, _vr_size, err, magic;
292 struct vring *vr;
293 u8 type = ioread8(&vdev->desc->type);
294
295 if (index >= ioread8(&vdev->desc->num_vq))
296 return ERR_PTR(-ENOENT);
297
298 if (!name)
299 return ERR_PTR(-ENOENT);
300
301 /* First assign the vring's allocated in host memory */
302 vqconfig = _vop_vq_config(vdev->desc) + index;
303 memcpy_fromio(&config, vqconfig, sizeof(config));
304 _vr_size = vring_size(le16_to_cpu(config.num), MIC_VIRTIO_RING_ALIGN);
305 vr_size = PAGE_ALIGN(_vr_size + sizeof(struct _mic_vring_info));
306 va = vpdev->hw_ops->ioremap(vpdev, le64_to_cpu(config.address),
307 vr_size);
308 if (!va)
309 return ERR_PTR(-ENOMEM);
310 vdev->vr[index] = va;
311 memset_io(va, 0x0, _vr_size);
312 vq = vring_new_virtqueue(
313 index,
314 le16_to_cpu(config.num), MIC_VIRTIO_RING_ALIGN,
315 dev,
316 false,
317 (void __force *)va, vop_notify, callback, name);
318 if (!vq) {
319 err = -ENOMEM;
320 goto unmap;
321 }
322 info = va + _vr_size;
323 magic = ioread32(&info->magic);
324
325 if (WARN(magic != MIC_MAGIC + type + index, "magic mismatch")) {
326 err = -EIO;
327 goto unmap;
328 }
329
330 /* Allocate and reassign used ring now */
331 vdev->used_size[index] = PAGE_ALIGN(sizeof(__u16) * 3 +
332 sizeof(struct vring_used_elem) *
333 le16_to_cpu(config.num));
334 used = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
335 get_order(vdev->used_size[index]));
336 if (!used) {
337 err = -ENOMEM;
338 dev_err(_vop_dev(vdev), "%s %d err %d\n",
339 __func__, __LINE__, err);
340 goto del_vq;
341 }
342 vdev->used[index] = dma_map_single(&vpdev->dev, used,
343 vdev->used_size[index],
344 DMA_BIDIRECTIONAL);
345 if (dma_mapping_error(&vpdev->dev, vdev->used[index])) {
346 err = -ENOMEM;
347 dev_err(_vop_dev(vdev), "%s %d err %d\n",
348 __func__, __LINE__, err);
349 goto free_used;
350 }
351 writeq(vdev->used[index], &vqconfig->used_address);
352 /*
353 * To reassign the used ring here we are directly accessing
354 * struct vring_virtqueue which is a private data structure
355 * in virtio_ring.c. At the minimum, a BUILD_BUG_ON() in
356 * vring_new_virtqueue() would ensure that
357 * (&vq->vring == (struct vring *) (&vq->vq + 1));
358 */
359 vr = (struct vring *)(vq + 1);
360 vr->used = used;
361
362 vq->priv = vdev;
363 return vq;
364 free_used:
365 free_pages((unsigned long)used,
366 get_order(vdev->used_size[index]));
367 del_vq:
368 vring_del_virtqueue(vq);
369 unmap:
370 vpdev->hw_ops->iounmap(vpdev, vdev->vr[index]);
371 return ERR_PTR(err);
372 }
373
374 static int vop_find_vqs(struct virtio_device *dev, unsigned nvqs,
375 struct virtqueue *vqs[],
376 vq_callback_t *callbacks[],
377 const char * const names[], struct irq_affinity *desc)
378 {
379 struct _vop_vdev *vdev = to_vopvdev(dev);
380 struct vop_device *vpdev = vdev->vpdev;
381 struct mic_device_ctrl __iomem *dc = vdev->dc;
382 int i, err, retry;
383
384 /* We must have this many virtqueues. */
385 if (nvqs > ioread8(&vdev->desc->num_vq))
386 return -ENOENT;
387
388 for (i = 0; i < nvqs; ++i) {
389 dev_dbg(_vop_dev(vdev), "%s: %d: %s\n",
390 __func__, i, names[i]);
391 vqs[i] = vop_find_vq(dev, i, callbacks[i], names[i]);
392 if (IS_ERR(vqs[i])) {
393 err = PTR_ERR(vqs[i]);
394 goto error;
395 }
396 }
397
398 iowrite8(1, &dc->used_address_updated);
399 /*
400 * Send an interrupt to the host to inform it that used
401 * rings have been re-assigned.
402 */
403 vpdev->hw_ops->send_intr(vpdev, vdev->c2h_vdev_db);
404 for (retry = 100; --retry;) {
405 if (!ioread8(&dc->used_address_updated))
406 break;
407 msleep(100);
408 };
409
410 dev_dbg(_vop_dev(vdev), "%s: retry: %d\n", __func__, retry);
411 if (!retry) {
412 err = -ENODEV;
413 goto error;
414 }
415
416 return 0;
417 error:
418 vop_del_vqs(dev);
419 return err;
420 }
421
422 /*
423 * The config ops structure as defined by virtio config
424 */
425 static struct virtio_config_ops vop_vq_config_ops = {
426 .get_features = vop_get_features,
427 .finalize_features = vop_finalize_features,
428 .get = vop_get,
429 .set = vop_set,
430 .get_status = vop_get_status,
431 .set_status = vop_set_status,
432 .reset = vop_reset,
433 .find_vqs = vop_find_vqs,
434 .del_vqs = vop_del_vqs,
435 };
436
437 static irqreturn_t vop_virtio_intr_handler(int irq, void *data)
438 {
439 struct _vop_vdev *vdev = data;
440 struct vop_device *vpdev = vdev->vpdev;
441 struct virtqueue *vq;
442
443 vpdev->hw_ops->ack_interrupt(vpdev, vdev->h2c_vdev_db);
444 list_for_each_entry(vq, &vdev->vdev.vqs, list)
445 vring_interrupt(0, vq);
446
447 return IRQ_HANDLED;
448 }
449
450 static void vop_virtio_release_dev(struct device *_d)
451 {
452 /*
453 * No need for a release method similar to virtio PCI.
454 * Provide an empty one to avoid getting a warning from core.
455 */
456 }
457
458 /*
459 * adds a new device and register it with virtio
460 * appropriate drivers are loaded by the device model
461 */
462 static int _vop_add_device(struct mic_device_desc __iomem *d,
463 unsigned int offset, struct vop_device *vpdev,
464 int dnode)
465 {
466 struct _vop_vdev *vdev;
467 int ret;
468 u8 type = ioread8(&d->type);
469
470 vdev = kzalloc(sizeof(*vdev), GFP_KERNEL);
471 if (!vdev)
472 return -ENOMEM;
473
474 vdev->vpdev = vpdev;
475 vdev->vdev.dev.parent = &vpdev->dev;
476 vdev->vdev.dev.release = vop_virtio_release_dev;
477 vdev->vdev.id.device = type;
478 vdev->vdev.config = &vop_vq_config_ops;
479 vdev->desc = d;
480 vdev->dc = (void __iomem *)d + _vop_aligned_desc_size(d);
481 vdev->dnode = dnode;
482 vdev->vdev.priv = (void *)(u64)dnode;
483 init_completion(&vdev->reset_done);
484
485 vdev->h2c_vdev_db = vpdev->hw_ops->next_db(vpdev);
486 vdev->virtio_cookie = vpdev->hw_ops->request_irq(vpdev,
487 vop_virtio_intr_handler, "virtio intr",
488 vdev, vdev->h2c_vdev_db);
489 if (IS_ERR(vdev->virtio_cookie)) {
490 ret = PTR_ERR(vdev->virtio_cookie);
491 goto kfree;
492 }
493 iowrite8((u8)vdev->h2c_vdev_db, &vdev->dc->h2c_vdev_db);
494 vdev->c2h_vdev_db = ioread8(&vdev->dc->c2h_vdev_db);
495
496 ret = register_virtio_device(&vdev->vdev);
497 if (ret) {
498 dev_err(_vop_dev(vdev),
499 "Failed to register vop device %u type %u\n",
500 offset, type);
501 goto free_irq;
502 }
503 writeq((u64)vdev, &vdev->dc->vdev);
504 dev_dbg(_vop_dev(vdev), "%s: registered vop device %u type %u vdev %p\n",
505 __func__, offset, type, vdev);
506
507 return 0;
508
509 free_irq:
510 vpdev->hw_ops->free_irq(vpdev, vdev->virtio_cookie, vdev);
511 kfree:
512 kfree(vdev);
513 return ret;
514 }
515
516 /*
517 * match for a vop device with a specific desc pointer
518 */
519 static int vop_match_desc(struct device *dev, void *data)
520 {
521 struct virtio_device *_dev = dev_to_virtio(dev);
522 struct _vop_vdev *vdev = to_vopvdev(_dev);
523
524 return vdev->desc == (void __iomem *)data;
525 }
526
527 static void _vop_handle_config_change(struct mic_device_desc __iomem *d,
528 unsigned int offset,
529 struct vop_device *vpdev)
530 {
531 struct mic_device_ctrl __iomem *dc
532 = (void __iomem *)d + _vop_aligned_desc_size(d);
533 struct _vop_vdev *vdev = (struct _vop_vdev *)readq(&dc->vdev);
534
535 if (ioread8(&dc->config_change) != MIC_VIRTIO_PARAM_CONFIG_CHANGED)
536 return;
537
538 dev_dbg(&vpdev->dev, "%s %d\n", __func__, __LINE__);
539 virtio_config_changed(&vdev->vdev);
540 iowrite8(1, &dc->guest_ack);
541 }
542
543 /*
544 * removes a virtio device if a hot remove event has been
545 * requested by the host.
546 */
547 static int _vop_remove_device(struct mic_device_desc __iomem *d,
548 unsigned int offset, struct vop_device *vpdev)
549 {
550 struct mic_device_ctrl __iomem *dc
551 = (void __iomem *)d + _vop_aligned_desc_size(d);
552 struct _vop_vdev *vdev = (struct _vop_vdev *)readq(&dc->vdev);
553 u8 status;
554 int ret = -1;
555
556 if (ioread8(&dc->config_change) == MIC_VIRTIO_PARAM_DEV_REMOVE) {
557 dev_dbg(&vpdev->dev,
558 "%s %d config_change %d type %d vdev %p\n",
559 __func__, __LINE__,
560 ioread8(&dc->config_change), ioread8(&d->type), vdev);
561 status = ioread8(&d->status);
562 reinit_completion(&vdev->reset_done);
563 unregister_virtio_device(&vdev->vdev);
564 vpdev->hw_ops->free_irq(vpdev, vdev->virtio_cookie, vdev);
565 iowrite8(-1, &dc->h2c_vdev_db);
566 if (status & VIRTIO_CONFIG_S_DRIVER_OK)
567 wait_for_completion(&vdev->reset_done);
568 kfree(vdev);
569 iowrite8(1, &dc->guest_ack);
570 dev_dbg(&vpdev->dev, "%s %d guest_ack %d\n",
571 __func__, __LINE__, ioread8(&dc->guest_ack));
572 iowrite8(-1, &d->type);
573 ret = 0;
574 }
575 return ret;
576 }
577
578 #define REMOVE_DEVICES true
579
580 static void _vop_scan_devices(void __iomem *dp, struct vop_device *vpdev,
581 bool remove, int dnode)
582 {
583 s8 type;
584 unsigned int i;
585 struct mic_device_desc __iomem *d;
586 struct mic_device_ctrl __iomem *dc;
587 struct device *dev;
588 int ret;
589
590 for (i = sizeof(struct mic_bootparam);
591 i < MIC_DP_SIZE; i += _vop_total_desc_size(d)) {
592 d = dp + i;
593 dc = (void __iomem *)d + _vop_aligned_desc_size(d);
594 /*
595 * This read barrier is paired with the corresponding write
596 * barrier on the host which is inserted before adding or
597 * removing a virtio device descriptor, by updating the type.
598 */
599 rmb();
600 type = ioread8(&d->type);
601
602 /* end of list */
603 if (type == 0)
604 break;
605
606 if (type == -1)
607 continue;
608
609 /* device already exists */
610 dev = device_find_child(&vpdev->dev, (void __force *)d,
611 vop_match_desc);
612 if (dev) {
613 if (remove)
614 iowrite8(MIC_VIRTIO_PARAM_DEV_REMOVE,
615 &dc->config_change);
616 put_device(dev);
617 _vop_handle_config_change(d, i, vpdev);
618 ret = _vop_remove_device(d, i, vpdev);
619 if (remove) {
620 iowrite8(0, &dc->config_change);
621 iowrite8(0, &dc->guest_ack);
622 }
623 continue;
624 }
625
626 /* new device */
627 dev_dbg(&vpdev->dev, "%s %d Adding new virtio device %p\n",
628 __func__, __LINE__, d);
629 if (!remove)
630 _vop_add_device(d, i, vpdev, dnode);
631 }
632 }
633
634 static void vop_scan_devices(struct vop_info *vi,
635 struct vop_device *vpdev, bool remove)
636 {
637 void __iomem *dp = vpdev->hw_ops->get_remote_dp(vpdev);
638
639 if (!dp)
640 return;
641 mutex_lock(&vi->vop_mutex);
642 _vop_scan_devices(dp, vpdev, remove, vpdev->dnode);
643 mutex_unlock(&vi->vop_mutex);
644 }
645
646 /*
647 * vop_hotplug_device tries to find changes in the device page.
648 */
649 static void vop_hotplug_devices(struct work_struct *work)
650 {
651 struct vop_info *vi = container_of(work, struct vop_info,
652 hotplug_work);
653
654 vop_scan_devices(vi, vi->vpdev, !REMOVE_DEVICES);
655 }
656
657 /*
658 * Interrupt handler for hot plug/config changes etc.
659 */
660 static irqreturn_t vop_extint_handler(int irq, void *data)
661 {
662 struct vop_info *vi = data;
663 struct mic_bootparam __iomem *bp;
664 struct vop_device *vpdev = vi->vpdev;
665
666 bp = vpdev->hw_ops->get_remote_dp(vpdev);
667 dev_dbg(&vpdev->dev, "%s %d hotplug work\n",
668 __func__, __LINE__);
669 vpdev->hw_ops->ack_interrupt(vpdev, ioread8(&bp->h2c_config_db));
670 schedule_work(&vi->hotplug_work);
671 return IRQ_HANDLED;
672 }
673
674 static int vop_driver_probe(struct vop_device *vpdev)
675 {
676 struct vop_info *vi;
677 int rc;
678
679 vi = kzalloc(sizeof(*vi), GFP_KERNEL);
680 if (!vi) {
681 rc = -ENOMEM;
682 goto exit;
683 }
684 dev_set_drvdata(&vpdev->dev, vi);
685 vi->vpdev = vpdev;
686
687 mutex_init(&vi->vop_mutex);
688 INIT_WORK(&vi->hotplug_work, vop_hotplug_devices);
689 if (vpdev->dnode) {
690 rc = vop_host_init(vi);
691 if (rc < 0)
692 goto free;
693 } else {
694 struct mic_bootparam __iomem *bootparam;
695
696 vop_scan_devices(vi, vpdev, !REMOVE_DEVICES);
697
698 vi->h2c_config_db = vpdev->hw_ops->next_db(vpdev);
699 vi->cookie = vpdev->hw_ops->request_irq(vpdev,
700 vop_extint_handler,
701 "virtio_config_intr",
702 vi, vi->h2c_config_db);
703 if (IS_ERR(vi->cookie)) {
704 rc = PTR_ERR(vi->cookie);
705 goto free;
706 }
707 bootparam = vpdev->hw_ops->get_remote_dp(vpdev);
708 iowrite8(vi->h2c_config_db, &bootparam->h2c_config_db);
709 }
710 vop_init_debugfs(vi);
711 return 0;
712 free:
713 kfree(vi);
714 exit:
715 return rc;
716 }
717
718 static void vop_driver_remove(struct vop_device *vpdev)
719 {
720 struct vop_info *vi = dev_get_drvdata(&vpdev->dev);
721
722 if (vpdev->dnode) {
723 vop_host_uninit(vi);
724 } else {
725 struct mic_bootparam __iomem *bootparam =
726 vpdev->hw_ops->get_remote_dp(vpdev);
727 if (bootparam)
728 iowrite8(-1, &bootparam->h2c_config_db);
729 vpdev->hw_ops->free_irq(vpdev, vi->cookie, vi);
730 flush_work(&vi->hotplug_work);
731 vop_scan_devices(vi, vpdev, REMOVE_DEVICES);
732 }
733 vop_exit_debugfs(vi);
734 kfree(vi);
735 }
736
737 static struct vop_device_id id_table[] = {
738 { VOP_DEV_TRNSP, VOP_DEV_ANY_ID },
739 { 0 },
740 };
741
742 static struct vop_driver vop_driver = {
743 .driver.name = KBUILD_MODNAME,
744 .driver.owner = THIS_MODULE,
745 .id_table = id_table,
746 .probe = vop_driver_probe,
747 .remove = vop_driver_remove,
748 };
749
750 module_vop_driver(vop_driver);
751
752 MODULE_DEVICE_TABLE(mbus, id_table);
753 MODULE_AUTHOR("Intel Corporation");
754 MODULE_DESCRIPTION("Intel(R) Virtio Over PCIe (VOP) driver");
755 MODULE_LICENSE("GPL v2");