]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blame - drivers/misc/mic/vop/vop_main.c
Merge branch 'timers-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel...
[mirror_ubuntu-jammy-kernel.git] / drivers / misc / mic / vop / vop_main.c
CommitLineData
4e43d779 1// SPDX-License-Identifier: GPL-2.0-only
c1becd28
AD
2/*
3 * Intel MIC Platform Software Stack (MPSS)
4 *
5 * Copyright(c) 2016 Intel Corporation.
6 *
c1becd28
AD
7 * Adapted from:
8 *
9 * virtio for kvm on s390
10 *
11 * Copyright IBM Corp. 2008
12 *
c1becd28
AD
13 * Author(s): Christian Borntraeger <borntraeger@de.ibm.com>
14 *
15 * Intel Virtio Over PCIe (VOP) driver.
c1becd28
AD
16 */
17#include <linux/delay.h>
18#include <linux/module.h>
19#include <linux/sched.h>
20#include <linux/dma-mapping.h>
96c12ef9 21#include <linux/io-64-nonatomic-lo-hi.h>
c1becd28
AD
22
23#include "vop_main.h"
24
25#define VOP_MAX_VRINGS 4
26
27/*
28 * _vop_vdev - Allocated per virtio device instance injected by the peer.
29 *
30 * @vdev: Virtio device
31 * @desc: Virtio device page descriptor
32 * @dc: Virtio device control
33 * @vpdev: VOP device which is the parent for this virtio device
34 * @vr: Buffer for accessing the VRING
4bf13fdb
VW
35 * @used_virt: Virtual address of used ring
36 * @used: DMA address of used ring
c1becd28
AD
37 * @used_size: Size of the used buffer
38 * @reset_done: Track whether VOP reset is complete
39 * @virtio_cookie: Cookie returned upon requesting a interrupt
40 * @c2h_vdev_db: The doorbell used by the guest to interrupt the host
41 * @h2c_vdev_db: The doorbell used by the host to interrupt the guest
42 * @dnode: The destination node
43 */
44struct _vop_vdev {
45 struct virtio_device vdev;
46 struct mic_device_desc __iomem *desc;
47 struct mic_device_ctrl __iomem *dc;
48 struct vop_device *vpdev;
49 void __iomem *vr[VOP_MAX_VRINGS];
4bf13fdb 50 void *used_virt[VOP_MAX_VRINGS];
c1becd28
AD
51 dma_addr_t used[VOP_MAX_VRINGS];
52 int used_size[VOP_MAX_VRINGS];
53 struct completion reset_done;
54 struct mic_irq *virtio_cookie;
55 int c2h_vdev_db;
56 int h2c_vdev_db;
57 int dnode;
58};
59
60#define to_vopvdev(vd) container_of(vd, struct _vop_vdev, vdev)
61
62#define _vop_aligned_desc_size(d) __mic_align(_vop_desc_size(d), 8)
63
64/* Helper API to obtain the parent of the virtio device */
65static inline struct device *_vop_dev(struct _vop_vdev *vdev)
66{
67 return vdev->vdev.dev.parent;
68}
69
70static inline unsigned _vop_desc_size(struct mic_device_desc __iomem *desc)
71{
72 return sizeof(*desc)
73 + ioread8(&desc->num_vq) * sizeof(struct mic_vqconfig)
74 + ioread8(&desc->feature_len) * 2
75 + ioread8(&desc->config_len);
76}
77
78static inline struct mic_vqconfig __iomem *
79_vop_vq_config(struct mic_device_desc __iomem *desc)
80{
81 return (struct mic_vqconfig __iomem *)(desc + 1);
82}
83
84static inline u8 __iomem *
85_vop_vq_features(struct mic_device_desc __iomem *desc)
86{
87 return (u8 __iomem *)(_vop_vq_config(desc) + ioread8(&desc->num_vq));
88}
89
90static inline u8 __iomem *
91_vop_vq_configspace(struct mic_device_desc __iomem *desc)
92{
93 return _vop_vq_features(desc) + ioread8(&desc->feature_len) * 2;
94}
95
96static inline unsigned
97_vop_total_desc_size(struct mic_device_desc __iomem *desc)
98{
99 return _vop_aligned_desc_size(desc) + sizeof(struct mic_device_ctrl);
100}
101
102/* This gets the device's feature bits. */
103static u64 vop_get_features(struct virtio_device *vdev)
104{
105 unsigned int i, bits;
8216e7e3 106 u64 features = 0;
c1becd28
AD
107 struct mic_device_desc __iomem *desc = to_vopvdev(vdev)->desc;
108 u8 __iomem *in_features = _vop_vq_features(desc);
109 int feature_len = ioread8(&desc->feature_len);
110
111 bits = min_t(unsigned, feature_len, sizeof(vdev->features)) * 8;
112 for (i = 0; i < bits; i++)
113 if (ioread8(&in_features[i / 8]) & (BIT(i % 8)))
8216e7e3 114 features |= BIT_ULL(i);
c1becd28
AD
115
116 return features;
117}
118
3a814fdf
TB
119static void vop_transport_features(struct virtio_device *vdev)
120{
121 /*
122 * Packed ring isn't enabled on virtio_vop for now,
123 * because virtio_vop uses vring_new_virtqueue() which
124 * creates virtio rings on preallocated memory.
125 */
126 __virtio_clear_bit(vdev, VIRTIO_F_RING_PACKED);
127}
128
c1becd28
AD
129static int vop_finalize_features(struct virtio_device *vdev)
130{
131 unsigned int i, bits;
132 struct mic_device_desc __iomem *desc = to_vopvdev(vdev)->desc;
133 u8 feature_len = ioread8(&desc->feature_len);
134 /* Second half of bitmap is features we accept. */
135 u8 __iomem *out_features =
136 _vop_vq_features(desc) + feature_len;
137
138 /* Give virtio_ring a chance to accept features. */
139 vring_transport_features(vdev);
140
3a814fdf
TB
141 /* Give virtio_vop a chance to accept features. */
142 vop_transport_features(vdev);
143
c1becd28
AD
144 memset_io(out_features, 0, feature_len);
145 bits = min_t(unsigned, feature_len,
146 sizeof(vdev->features)) * 8;
147 for (i = 0; i < bits; i++) {
148 if (__virtio_test_bit(vdev, i))
149 iowrite8(ioread8(&out_features[i / 8]) | (1 << (i % 8)),
150 &out_features[i / 8]);
151 }
152 return 0;
153}
154
155/*
156 * Reading and writing elements in config space
157 */
158static void vop_get(struct virtio_device *vdev, unsigned int offset,
159 void *buf, unsigned len)
160{
161 struct mic_device_desc __iomem *desc = to_vopvdev(vdev)->desc;
162
163 if (offset + len > ioread8(&desc->config_len))
164 return;
165 memcpy_fromio(buf, _vop_vq_configspace(desc) + offset, len);
166}
167
168static void vop_set(struct virtio_device *vdev, unsigned int offset,
169 const void *buf, unsigned len)
170{
171 struct mic_device_desc __iomem *desc = to_vopvdev(vdev)->desc;
172
173 if (offset + len > ioread8(&desc->config_len))
174 return;
175 memcpy_toio(_vop_vq_configspace(desc) + offset, buf, len);
176}
177
178/*
179 * The operations to get and set the status word just access the status
180 * field of the device descriptor. set_status also interrupts the host
181 * to tell about status changes.
182 */
183static u8 vop_get_status(struct virtio_device *vdev)
184{
185 return ioread8(&to_vopvdev(vdev)->desc->status);
186}
187
188static void vop_set_status(struct virtio_device *dev, u8 status)
189{
190 struct _vop_vdev *vdev = to_vopvdev(dev);
191 struct vop_device *vpdev = vdev->vpdev;
192
193 if (!status)
194 return;
195 iowrite8(status, &vdev->desc->status);
196 vpdev->hw_ops->send_intr(vpdev, vdev->c2h_vdev_db);
197}
198
199/* Inform host on a virtio device reset and wait for ack from host */
200static void vop_reset_inform_host(struct virtio_device *dev)
201{
202 struct _vop_vdev *vdev = to_vopvdev(dev);
203 struct mic_device_ctrl __iomem *dc = vdev->dc;
204 struct vop_device *vpdev = vdev->vpdev;
205 int retry;
206
207 iowrite8(0, &dc->host_ack);
208 iowrite8(1, &dc->vdev_reset);
209 vpdev->hw_ops->send_intr(vpdev, vdev->c2h_vdev_db);
210
211 /* Wait till host completes all card accesses and acks the reset */
212 for (retry = 100; retry--;) {
213 if (ioread8(&dc->host_ack))
214 break;
215 msleep(100);
080038cc 216 }
c1becd28
AD
217
218 dev_dbg(_vop_dev(vdev), "%s: retry: %d\n", __func__, retry);
219
220 /* Reset status to 0 in case we timed out */
221 iowrite8(0, &vdev->desc->status);
222}
223
224static void vop_reset(struct virtio_device *dev)
225{
226 struct _vop_vdev *vdev = to_vopvdev(dev);
227
228 dev_dbg(_vop_dev(vdev), "%s: virtio id %d\n",
229 __func__, dev->id.device);
230
231 vop_reset_inform_host(dev);
232 complete_all(&vdev->reset_done);
233}
234
235/*
236 * The virtio_ring code calls this API when it wants to notify the Host.
237 */
238static bool vop_notify(struct virtqueue *vq)
239{
240 struct _vop_vdev *vdev = vq->priv;
241 struct vop_device *vpdev = vdev->vpdev;
242
243 vpdev->hw_ops->send_intr(vpdev, vdev->c2h_vdev_db);
244 return true;
245}
246
247static void vop_del_vq(struct virtqueue *vq, int n)
248{
249 struct _vop_vdev *vdev = to_vopvdev(vq->vdev);
c1becd28
AD
250 struct vop_device *vpdev = vdev->vpdev;
251
252 dma_unmap_single(&vpdev->dev, vdev->used[n],
253 vdev->used_size[n], DMA_BIDIRECTIONAL);
4bf13fdb
VW
254 free_pages((unsigned long)vdev->used_virt[n],
255 get_order(vdev->used_size[n]));
c1becd28 256 vring_del_virtqueue(vq);
fbc63864 257 vpdev->hw_ops->unmap(vpdev, vdev->vr[n]);
c1becd28
AD
258 vdev->vr[n] = NULL;
259}
260
261static void vop_del_vqs(struct virtio_device *dev)
262{
263 struct _vop_vdev *vdev = to_vopvdev(dev);
264 struct virtqueue *vq, *n;
265 int idx = 0;
266
267 dev_dbg(_vop_dev(vdev), "%s\n", __func__);
268
269 list_for_each_entry_safe(vq, n, &dev->vqs, list)
270 vop_del_vq(vq, idx++);
271}
272
5aa60834
VW
273static struct virtqueue *vop_new_virtqueue(unsigned int index,
274 unsigned int num,
275 struct virtio_device *vdev,
276 bool context,
277 void *pages,
278 bool (*notify)(struct virtqueue *vq),
279 void (*callback)(struct virtqueue *vq),
280 const char *name,
281 void *used)
282{
283 bool weak_barriers = false;
284 struct vring vring;
285
286 vring_init(&vring, num, pages, MIC_VIRTIO_RING_ALIGN);
287 vring.used = used;
288
289 return __vring_new_virtqueue(index, vring, vdev, weak_barriers, context,
290 notify, callback, name);
291}
292
c1becd28
AD
293/*
294 * This routine will assign vring's allocated in host/io memory. Code in
295 * virtio_ring.c however continues to access this io memory as if it were local
296 * memory without io accessors.
297 */
298static struct virtqueue *vop_find_vq(struct virtio_device *dev,
299 unsigned index,
300 void (*callback)(struct virtqueue *vq),
f94682dd 301 const char *name, bool ctx)
c1becd28
AD
302{
303 struct _vop_vdev *vdev = to_vopvdev(dev);
304 struct vop_device *vpdev = vdev->vpdev;
305 struct mic_vqconfig __iomem *vqconfig;
306 struct mic_vqconfig config;
307 struct virtqueue *vq;
308 void __iomem *va;
309 struct _mic_vring_info __iomem *info;
310 void *used;
311 int vr_size, _vr_size, err, magic;
c1becd28
AD
312 u8 type = ioread8(&vdev->desc->type);
313
314 if (index >= ioread8(&vdev->desc->num_vq))
315 return ERR_PTR(-ENOENT);
316
317 if (!name)
318 return ERR_PTR(-ENOENT);
319
320 /* First assign the vring's allocated in host memory */
321 vqconfig = _vop_vq_config(vdev->desc) + index;
322 memcpy_fromio(&config, vqconfig, sizeof(config));
323 _vr_size = vring_size(le16_to_cpu(config.num), MIC_VIRTIO_RING_ALIGN);
324 vr_size = PAGE_ALIGN(_vr_size + sizeof(struct _mic_vring_info));
fbc63864 325 va = vpdev->hw_ops->remap(vpdev, le64_to_cpu(config.address), vr_size);
c1becd28
AD
326 if (!va)
327 return ERR_PTR(-ENOMEM);
328 vdev->vr[index] = va;
329 memset_io(va, 0x0, _vr_size);
5aa60834 330
c1becd28
AD
331 info = va + _vr_size;
332 magic = ioread32(&info->magic);
333
334 if (WARN(magic != MIC_MAGIC + type + index, "magic mismatch")) {
335 err = -EIO;
336 goto unmap;
337 }
338
c1becd28
AD
339 vdev->used_size[index] = PAGE_ALIGN(sizeof(__u16) * 3 +
340 sizeof(struct vring_used_elem) *
341 le16_to_cpu(config.num));
342 used = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
343 get_order(vdev->used_size[index]));
4bf13fdb 344 vdev->used_virt[index] = used;
c1becd28
AD
345 if (!used) {
346 err = -ENOMEM;
347 dev_err(_vop_dev(vdev), "%s %d err %d\n",
348 __func__, __LINE__, err);
5aa60834
VW
349 goto unmap;
350 }
351
352 vq = vop_new_virtqueue(index, le16_to_cpu(config.num), dev, ctx,
353 (void __force *)va, vop_notify, callback,
354 name, used);
355 if (!vq) {
356 err = -ENOMEM;
357 goto free_used;
c1becd28 358 }
5aa60834 359
c1becd28
AD
360 vdev->used[index] = dma_map_single(&vpdev->dev, used,
361 vdev->used_size[index],
362 DMA_BIDIRECTIONAL);
363 if (dma_mapping_error(&vpdev->dev, vdev->used[index])) {
364 err = -ENOMEM;
365 dev_err(_vop_dev(vdev), "%s %d err %d\n",
366 __func__, __LINE__, err);
5aa60834 367 goto del_vq;
c1becd28
AD
368 }
369 writeq(vdev->used[index], &vqconfig->used_address);
c1becd28
AD
370
371 vq->priv = vdev;
372 return vq;
5aa60834
VW
373del_vq:
374 vring_del_virtqueue(vq);
c1becd28
AD
375free_used:
376 free_pages((unsigned long)used,
377 get_order(vdev->used_size[index]));
c1becd28 378unmap:
fbc63864 379 vpdev->hw_ops->unmap(vpdev, vdev->vr[index]);
c1becd28
AD
380 return ERR_PTR(err);
381}
382
383static int vop_find_vqs(struct virtio_device *dev, unsigned nvqs,
384 struct virtqueue *vqs[],
385 vq_callback_t *callbacks[],
f94682dd
MT
386 const char * const names[], const bool *ctx,
387 struct irq_affinity *desc)
c1becd28
AD
388{
389 struct _vop_vdev *vdev = to_vopvdev(dev);
390 struct vop_device *vpdev = vdev->vpdev;
391 struct mic_device_ctrl __iomem *dc = vdev->dc;
a229989d 392 int i, err, retry, queue_idx = 0;
c1becd28
AD
393
394 /* We must have this many virtqueues. */
395 if (nvqs > ioread8(&vdev->desc->num_vq))
396 return -ENOENT;
397
398 for (i = 0; i < nvqs; ++i) {
a229989d
WW
399 if (!names[i]) {
400 vqs[i] = NULL;
401 continue;
402 }
403
c1becd28
AD
404 dev_dbg(_vop_dev(vdev), "%s: %d: %s\n",
405 __func__, i, names[i]);
a229989d 406 vqs[i] = vop_find_vq(dev, queue_idx++, callbacks[i], names[i],
f94682dd 407 ctx ? ctx[i] : false);
c1becd28
AD
408 if (IS_ERR(vqs[i])) {
409 err = PTR_ERR(vqs[i]);
410 goto error;
411 }
412 }
413
414 iowrite8(1, &dc->used_address_updated);
415 /*
416 * Send an interrupt to the host to inform it that used
417 * rings have been re-assigned.
418 */
419 vpdev->hw_ops->send_intr(vpdev, vdev->c2h_vdev_db);
420 for (retry = 100; --retry;) {
421 if (!ioread8(&dc->used_address_updated))
422 break;
423 msleep(100);
080038cc 424 }
c1becd28
AD
425
426 dev_dbg(_vop_dev(vdev), "%s: retry: %d\n", __func__, retry);
427 if (!retry) {
428 err = -ENODEV;
429 goto error;
430 }
431
432 return 0;
433error:
434 vop_del_vqs(dev);
435 return err;
436}
437
438/*
439 * The config ops structure as defined by virtio config
440 */
441static struct virtio_config_ops vop_vq_config_ops = {
442 .get_features = vop_get_features,
443 .finalize_features = vop_finalize_features,
444 .get = vop_get,
445 .set = vop_set,
446 .get_status = vop_get_status,
447 .set_status = vop_set_status,
448 .reset = vop_reset,
449 .find_vqs = vop_find_vqs,
450 .del_vqs = vop_del_vqs,
451};
452
453static irqreturn_t vop_virtio_intr_handler(int irq, void *data)
454{
455 struct _vop_vdev *vdev = data;
456 struct vop_device *vpdev = vdev->vpdev;
457 struct virtqueue *vq;
458
459 vpdev->hw_ops->ack_interrupt(vpdev, vdev->h2c_vdev_db);
460 list_for_each_entry(vq, &vdev->vdev.vqs, list)
461 vring_interrupt(0, vq);
462
463 return IRQ_HANDLED;
464}
465
466static void vop_virtio_release_dev(struct device *_d)
467{
0063e8bb 468 struct virtio_device *vdev =
469 container_of(_d, struct virtio_device, dev);
470 struct _vop_vdev *vop_vdev =
471 container_of(vdev, struct _vop_vdev, vdev);
472
473 kfree(vop_vdev);
c1becd28
AD
474}
475
476/*
477 * adds a new device and register it with virtio
478 * appropriate drivers are loaded by the device model
479 */
480static int _vop_add_device(struct mic_device_desc __iomem *d,
481 unsigned int offset, struct vop_device *vpdev,
482 int dnode)
483{
0063e8bb 484 struct _vop_vdev *vdev, *reg_dev = NULL;
c1becd28
AD
485 int ret;
486 u8 type = ioread8(&d->type);
487
488 vdev = kzalloc(sizeof(*vdev), GFP_KERNEL);
489 if (!vdev)
490 return -ENOMEM;
491
492 vdev->vpdev = vpdev;
493 vdev->vdev.dev.parent = &vpdev->dev;
494 vdev->vdev.dev.release = vop_virtio_release_dev;
495 vdev->vdev.id.device = type;
496 vdev->vdev.config = &vop_vq_config_ops;
497 vdev->desc = d;
498 vdev->dc = (void __iomem *)d + _vop_aligned_desc_size(d);
499 vdev->dnode = dnode;
ba01cea2 500 vdev->vdev.priv = (void *)(unsigned long)dnode;
c1becd28
AD
501 init_completion(&vdev->reset_done);
502
503 vdev->h2c_vdev_db = vpdev->hw_ops->next_db(vpdev);
504 vdev->virtio_cookie = vpdev->hw_ops->request_irq(vpdev,
505 vop_virtio_intr_handler, "virtio intr",
506 vdev, vdev->h2c_vdev_db);
507 if (IS_ERR(vdev->virtio_cookie)) {
508 ret = PTR_ERR(vdev->virtio_cookie);
509 goto kfree;
510 }
511 iowrite8((u8)vdev->h2c_vdev_db, &vdev->dc->h2c_vdev_db);
512 vdev->c2h_vdev_db = ioread8(&vdev->dc->c2h_vdev_db);
513
514 ret = register_virtio_device(&vdev->vdev);
0063e8bb 515 reg_dev = vdev;
c1becd28
AD
516 if (ret) {
517 dev_err(_vop_dev(vdev),
518 "Failed to register vop device %u type %u\n",
519 offset, type);
520 goto free_irq;
521 }
ba01cea2 522 writeq((unsigned long)vdev, &vdev->dc->vdev);
c1becd28
AD
523 dev_dbg(_vop_dev(vdev), "%s: registered vop device %u type %u vdev %p\n",
524 __func__, offset, type, vdev);
525
526 return 0;
527
528free_irq:
529 vpdev->hw_ops->free_irq(vpdev, vdev->virtio_cookie, vdev);
530kfree:
0063e8bb 531 if (reg_dev)
532 put_device(&vdev->vdev.dev);
533 else
534 kfree(vdev);
c1becd28
AD
535 return ret;
536}
537
538/*
539 * match for a vop device with a specific desc pointer
540 */
541static int vop_match_desc(struct device *dev, void *data)
542{
543 struct virtio_device *_dev = dev_to_virtio(dev);
544 struct _vop_vdev *vdev = to_vopvdev(_dev);
545
546 return vdev->desc == (void __iomem *)data;
547}
548
ba01cea2
VW
549static struct _vop_vdev *vop_dc_to_vdev(struct mic_device_ctrl *dc)
550{
551 return (struct _vop_vdev *)(unsigned long)readq(&dc->vdev);
552}
553
c1becd28
AD
554static void _vop_handle_config_change(struct mic_device_desc __iomem *d,
555 unsigned int offset,
556 struct vop_device *vpdev)
557{
558 struct mic_device_ctrl __iomem *dc
559 = (void __iomem *)d + _vop_aligned_desc_size(d);
ba01cea2 560 struct _vop_vdev *vdev = vop_dc_to_vdev(dc);
c1becd28
AD
561
562 if (ioread8(&dc->config_change) != MIC_VIRTIO_PARAM_CONFIG_CHANGED)
563 return;
564
565 dev_dbg(&vpdev->dev, "%s %d\n", __func__, __LINE__);
566 virtio_config_changed(&vdev->vdev);
567 iowrite8(1, &dc->guest_ack);
568}
569
570/*
571 * removes a virtio device if a hot remove event has been
572 * requested by the host.
573 */
574static int _vop_remove_device(struct mic_device_desc __iomem *d,
575 unsigned int offset, struct vop_device *vpdev)
576{
577 struct mic_device_ctrl __iomem *dc
578 = (void __iomem *)d + _vop_aligned_desc_size(d);
ba01cea2 579 struct _vop_vdev *vdev = vop_dc_to_vdev(dc);
c1becd28
AD
580 u8 status;
581 int ret = -1;
582
583 if (ioread8(&dc->config_change) == MIC_VIRTIO_PARAM_DEV_REMOVE) {
70ed7148
VW
584 struct device *dev = get_device(&vdev->vdev.dev);
585
c1becd28
AD
586 dev_dbg(&vpdev->dev,
587 "%s %d config_change %d type %d vdev %p\n",
588 __func__, __LINE__,
589 ioread8(&dc->config_change), ioread8(&d->type), vdev);
590 status = ioread8(&d->status);
591 reinit_completion(&vdev->reset_done);
592 unregister_virtio_device(&vdev->vdev);
593 vpdev->hw_ops->free_irq(vpdev, vdev->virtio_cookie, vdev);
594 iowrite8(-1, &dc->h2c_vdev_db);
595 if (status & VIRTIO_CONFIG_S_DRIVER_OK)
596 wait_for_completion(&vdev->reset_done);
70ed7148 597 put_device(dev);
c1becd28
AD
598 iowrite8(1, &dc->guest_ack);
599 dev_dbg(&vpdev->dev, "%s %d guest_ack %d\n",
600 __func__, __LINE__, ioread8(&dc->guest_ack));
601 iowrite8(-1, &d->type);
602 ret = 0;
603 }
604 return ret;
605}
606
607#define REMOVE_DEVICES true
608
609static void _vop_scan_devices(void __iomem *dp, struct vop_device *vpdev,
610 bool remove, int dnode)
611{
612 s8 type;
613 unsigned int i;
614 struct mic_device_desc __iomem *d;
615 struct mic_device_ctrl __iomem *dc;
616 struct device *dev;
617 int ret;
618
619 for (i = sizeof(struct mic_bootparam);
620 i < MIC_DP_SIZE; i += _vop_total_desc_size(d)) {
621 d = dp + i;
622 dc = (void __iomem *)d + _vop_aligned_desc_size(d);
623 /*
624 * This read barrier is paired with the corresponding write
625 * barrier on the host which is inserted before adding or
626 * removing a virtio device descriptor, by updating the type.
627 */
628 rmb();
629 type = ioread8(&d->type);
630
631 /* end of list */
632 if (type == 0)
633 break;
634
635 if (type == -1)
636 continue;
637
638 /* device already exists */
639 dev = device_find_child(&vpdev->dev, (void __force *)d,
640 vop_match_desc);
641 if (dev) {
642 if (remove)
643 iowrite8(MIC_VIRTIO_PARAM_DEV_REMOVE,
644 &dc->config_change);
645 put_device(dev);
646 _vop_handle_config_change(d, i, vpdev);
647 ret = _vop_remove_device(d, i, vpdev);
648 if (remove) {
649 iowrite8(0, &dc->config_change);
650 iowrite8(0, &dc->guest_ack);
651 }
652 continue;
653 }
654
655 /* new device */
656 dev_dbg(&vpdev->dev, "%s %d Adding new virtio device %p\n",
657 __func__, __LINE__, d);
658 if (!remove)
659 _vop_add_device(d, i, vpdev, dnode);
660 }
661}
662
663static void vop_scan_devices(struct vop_info *vi,
664 struct vop_device *vpdev, bool remove)
665{
666 void __iomem *dp = vpdev->hw_ops->get_remote_dp(vpdev);
667
668 if (!dp)
669 return;
670 mutex_lock(&vi->vop_mutex);
671 _vop_scan_devices(dp, vpdev, remove, vpdev->dnode);
672 mutex_unlock(&vi->vop_mutex);
673}
674
675/*
676 * vop_hotplug_device tries to find changes in the device page.
677 */
678static void vop_hotplug_devices(struct work_struct *work)
679{
680 struct vop_info *vi = container_of(work, struct vop_info,
681 hotplug_work);
682
683 vop_scan_devices(vi, vi->vpdev, !REMOVE_DEVICES);
684}
685
686/*
687 * Interrupt handler for hot plug/config changes etc.
688 */
689static irqreturn_t vop_extint_handler(int irq, void *data)
690{
691 struct vop_info *vi = data;
692 struct mic_bootparam __iomem *bp;
693 struct vop_device *vpdev = vi->vpdev;
694
695 bp = vpdev->hw_ops->get_remote_dp(vpdev);
696 dev_dbg(&vpdev->dev, "%s %d hotplug work\n",
697 __func__, __LINE__);
698 vpdev->hw_ops->ack_interrupt(vpdev, ioread8(&bp->h2c_config_db));
699 schedule_work(&vi->hotplug_work);
700 return IRQ_HANDLED;
701}
702
703static int vop_driver_probe(struct vop_device *vpdev)
704{
705 struct vop_info *vi;
706 int rc;
707
708 vi = kzalloc(sizeof(*vi), GFP_KERNEL);
709 if (!vi) {
710 rc = -ENOMEM;
711 goto exit;
712 }
713 dev_set_drvdata(&vpdev->dev, vi);
714 vi->vpdev = vpdev;
715
716 mutex_init(&vi->vop_mutex);
717 INIT_WORK(&vi->hotplug_work, vop_hotplug_devices);
718 if (vpdev->dnode) {
719 rc = vop_host_init(vi);
720 if (rc < 0)
721 goto free;
722 } else {
723 struct mic_bootparam __iomem *bootparam;
724
725 vop_scan_devices(vi, vpdev, !REMOVE_DEVICES);
726
727 vi->h2c_config_db = vpdev->hw_ops->next_db(vpdev);
728 vi->cookie = vpdev->hw_ops->request_irq(vpdev,
729 vop_extint_handler,
730 "virtio_config_intr",
731 vi, vi->h2c_config_db);
732 if (IS_ERR(vi->cookie)) {
733 rc = PTR_ERR(vi->cookie);
734 goto free;
735 }
736 bootparam = vpdev->hw_ops->get_remote_dp(vpdev);
737 iowrite8(vi->h2c_config_db, &bootparam->h2c_config_db);
738 }
739 vop_init_debugfs(vi);
740 return 0;
741free:
742 kfree(vi);
743exit:
744 return rc;
745}
746
747static void vop_driver_remove(struct vop_device *vpdev)
748{
749 struct vop_info *vi = dev_get_drvdata(&vpdev->dev);
750
751 if (vpdev->dnode) {
752 vop_host_uninit(vi);
753 } else {
754 struct mic_bootparam __iomem *bootparam =
755 vpdev->hw_ops->get_remote_dp(vpdev);
756 if (bootparam)
757 iowrite8(-1, &bootparam->h2c_config_db);
758 vpdev->hw_ops->free_irq(vpdev, vi->cookie, vi);
759 flush_work(&vi->hotplug_work);
760 vop_scan_devices(vi, vpdev, REMOVE_DEVICES);
761 }
762 vop_exit_debugfs(vi);
763 kfree(vi);
764}
765
766static struct vop_device_id id_table[] = {
767 { VOP_DEV_TRNSP, VOP_DEV_ANY_ID },
768 { 0 },
769};
770
771static struct vop_driver vop_driver = {
772 .driver.name = KBUILD_MODNAME,
773 .driver.owner = THIS_MODULE,
774 .id_table = id_table,
775 .probe = vop_driver_probe,
776 .remove = vop_driver_remove,
777};
778
779module_vop_driver(vop_driver);
780
781MODULE_DEVICE_TABLE(mbus, id_table);
782MODULE_AUTHOR("Intel Corporation");
783MODULE_DESCRIPTION("Intel(R) Virtio Over PCIe (VOP) driver");
784MODULE_LICENSE("GPL v2");