]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - virt/kvm/arm/vgic/vgic-its.c
KVM: arm64: vgic-its: Interpret MAPD Size field and check related errors
[mirror_ubuntu-artful-kernel.git] / virt / kvm / arm / vgic / vgic-its.c
CommitLineData
59c5ab40
AP
1/*
2 * GICv3 ITS emulation
3 *
4 * Copyright (C) 2015,2016 ARM Ltd.
5 * Author: Andre Przywara <andre.przywara@arm.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program. If not, see <http://www.gnu.org/licenses/>.
18 */
19
20#include <linux/cpu.h>
21#include <linux/kvm.h>
22#include <linux/kvm_host.h>
23#include <linux/interrupt.h>
424c3383 24#include <linux/list.h>
1085fdc6 25#include <linux/uaccess.h>
59c5ab40
AP
26
27#include <linux/irqchip/arm-gic-v3.h>
28
29#include <asm/kvm_emulate.h>
30#include <asm/kvm_arm.h>
31#include <asm/kvm_mmu.h>
32
33#include "vgic.h"
34#include "vgic-mmio.h"
35
71afe470
EA
36static int vgic_its_save_tables_v0(struct vgic_its *its);
37static int vgic_its_restore_tables_v0(struct vgic_its *its);
38static int vgic_its_commit_v0(struct vgic_its *its);
39
df9f58fb
AP
40/*
41 * Creates a new (reference to a) struct vgic_irq for a given LPI.
42 * If this LPI is already mapped on another ITS, we increase its refcount
43 * and return a pointer to the existing structure.
44 * If this is a "new" LPI, we allocate and initialize a new struct vgic_irq.
45 * This function returns a pointer to the _unlocked_ structure.
46 */
47static struct vgic_irq *vgic_add_lpi(struct kvm *kvm, u32 intid)
48{
49 struct vgic_dist *dist = &kvm->arch.vgic;
50 struct vgic_irq *irq = vgic_get_irq(kvm, NULL, intid), *oldirq;
51
52 /* In this case there is no put, since we keep the reference. */
53 if (irq)
54 return irq;
55
56 irq = kzalloc(sizeof(struct vgic_irq), GFP_KERNEL);
57 if (!irq)
99e5e886 58 return ERR_PTR(-ENOMEM);
df9f58fb
AP
59
60 INIT_LIST_HEAD(&irq->lpi_list);
61 INIT_LIST_HEAD(&irq->ap_list);
62 spin_lock_init(&irq->irq_lock);
63
64 irq->config = VGIC_CONFIG_EDGE;
65 kref_init(&irq->refcount);
66 irq->intid = intid;
67
68 spin_lock(&dist->lpi_list_lock);
69
70 /*
71 * There could be a race with another vgic_add_lpi(), so we need to
72 * check that we don't add a second list entry with the same LPI.
73 */
74 list_for_each_entry(oldirq, &dist->lpi_list_head, lpi_list) {
75 if (oldirq->intid != intid)
76 continue;
77
78 /* Someone was faster with adding this LPI, lets use that. */
79 kfree(irq);
80 irq = oldirq;
81
82 /*
83 * This increases the refcount, the caller is expected to
84 * call vgic_put_irq() on the returned pointer once it's
85 * finished with the IRQ.
86 */
d97594e6 87 vgic_get_irq_kref(irq);
df9f58fb
AP
88
89 goto out_unlock;
90 }
91
92 list_add_tail(&irq->lpi_list, &dist->lpi_list_head);
93 dist->lpi_list_count++;
94
95out_unlock:
96 spin_unlock(&dist->lpi_list_lock);
97
98 return irq;
99}
100
424c3383
AP
101struct its_device {
102 struct list_head dev_list;
103
104 /* the head for the list of ITTEs */
105 struct list_head itt_head;
0d44cdb6 106 u32 num_eventid_bits;
424c3383
AP
107 u32 device_id;
108};
109
110#define COLLECTION_NOT_MAPPED ((u32)~0)
111
112struct its_collection {
113 struct list_head coll_list;
114
115 u32 collection_id;
116 u32 target_addr;
117};
118
119#define its_is_collection_mapped(coll) ((coll) && \
120 ((coll)->target_addr != COLLECTION_NOT_MAPPED))
121
9ce91c72
EA
122struct its_ite {
123 struct list_head ite_list;
424c3383 124
3802411d 125 struct vgic_irq *irq;
424c3383
AP
126 struct its_collection *collection;
127 u32 lpi;
128 u32 event_id;
129};
130
71afe470
EA
131/**
132 * struct vgic_its_abi - ITS abi ops and settings
133 * @cte_esz: collection table entry size
134 * @dte_esz: device table entry size
135 * @ite_esz: interrupt translation table entry size
136 * @save tables: save the ITS tables into guest RAM
137 * @restore_tables: restore the ITS internal structs from tables
138 * stored in guest RAM
139 * @commit: initialize the registers which expose the ABI settings,
140 * especially the entry sizes
141 */
142struct vgic_its_abi {
143 int cte_esz;
144 int dte_esz;
145 int ite_esz;
146 int (*save_tables)(struct vgic_its *its);
147 int (*restore_tables)(struct vgic_its *its);
148 int (*commit)(struct vgic_its *its);
149};
150
151static const struct vgic_its_abi its_table_abi_versions[] = {
152 [0] = {.cte_esz = 8, .dte_esz = 8, .ite_esz = 8,
153 .save_tables = vgic_its_save_tables_v0,
154 .restore_tables = vgic_its_restore_tables_v0,
155 .commit = vgic_its_commit_v0,
156 },
157};
158
159#define NR_ITS_ABIS ARRAY_SIZE(its_table_abi_versions)
160
161inline const struct vgic_its_abi *vgic_its_get_abi(struct vgic_its *its)
162{
163 return &its_table_abi_versions[its->abi_rev];
164}
165
166int vgic_its_set_abi(struct vgic_its *its, int rev)
167{
168 const struct vgic_its_abi *abi;
169
170 its->abi_rev = rev;
171 abi = vgic_its_get_abi(its);
172 return abi->commit(its);
173}
174
df9f58fb
AP
175/*
176 * Find and returns a device in the device table for an ITS.
177 * Must be called with the its_lock mutex held.
178 */
179static struct its_device *find_its_device(struct vgic_its *its, u32 device_id)
180{
181 struct its_device *device;
182
183 list_for_each_entry(device, &its->device_list, dev_list)
184 if (device_id == device->device_id)
185 return device;
186
187 return NULL;
188}
189
190/*
191 * Find and returns an interrupt translation table entry (ITTE) for a given
192 * Device ID/Event ID pair on an ITS.
193 * Must be called with the its_lock mutex held.
194 */
9ce91c72 195static struct its_ite *find_ite(struct vgic_its *its, u32 device_id,
df9f58fb
AP
196 u32 event_id)
197{
198 struct its_device *device;
9ce91c72 199 struct its_ite *ite;
df9f58fb
AP
200
201 device = find_its_device(its, device_id);
202 if (device == NULL)
203 return NULL;
204
9ce91c72
EA
205 list_for_each_entry(ite, &device->itt_head, ite_list)
206 if (ite->event_id == event_id)
207 return ite;
df9f58fb
AP
208
209 return NULL;
210}
211
212/* To be used as an iterator this macro misses the enclosing parentheses */
9ce91c72 213#define for_each_lpi_its(dev, ite, its) \
df9f58fb 214 list_for_each_entry(dev, &(its)->device_list, dev_list) \
9ce91c72 215 list_for_each_entry(ite, &(dev)->itt_head, ite_list)
df9f58fb 216
424c3383
AP
217/*
218 * We only implement 48 bits of PA at the moment, although the ITS
219 * supports more. Let's be restrictive here.
220 */
df9f58fb 221#define BASER_ADDRESS(x) ((x) & GENMASK_ULL(47, 16))
424c3383 222#define CBASER_ADDRESS(x) ((x) & GENMASK_ULL(47, 12))
33d3bc95 223#define PENDBASER_ADDRESS(x) ((x) & GENMASK_ULL(47, 16))
f9f77af9
AP
224#define PROPBASER_ADDRESS(x) ((x) & GENMASK_ULL(47, 12))
225
226#define GIC_LPI_OFFSET 8192
227
0d44cdb6
EA
228#define VITS_TYPER_IDBITS 16
229
df9f58fb
AP
230/*
231 * Finds and returns a collection in the ITS collection table.
232 * Must be called with the its_lock mutex held.
233 */
234static struct its_collection *find_collection(struct vgic_its *its, int coll_id)
235{
236 struct its_collection *collection;
237
238 list_for_each_entry(collection, &its->collection_list, coll_list) {
239 if (coll_id == collection->collection_id)
240 return collection;
241 }
242
243 return NULL;
244}
245
f9f77af9
AP
246#define LPI_PROP_ENABLE_BIT(p) ((p) & LPI_PROP_ENABLED)
247#define LPI_PROP_PRIORITY(p) ((p) & 0xfc)
248
249/*
250 * Reads the configuration data for a given LPI from guest memory and
251 * updates the fields in struct vgic_irq.
252 * If filter_vcpu is not NULL, applies only if the IRQ is targeting this
253 * VCPU. Unconditionally applies if filter_vcpu is NULL.
254 */
255static int update_lpi_config(struct kvm *kvm, struct vgic_irq *irq,
256 struct kvm_vcpu *filter_vcpu)
257{
258 u64 propbase = PROPBASER_ADDRESS(kvm->arch.vgic.propbaser);
259 u8 prop;
260 int ret;
261
262 ret = kvm_read_guest(kvm, propbase + irq->intid - GIC_LPI_OFFSET,
263 &prop, 1);
264
265 if (ret)
266 return ret;
267
268 spin_lock(&irq->irq_lock);
269
270 if (!filter_vcpu || filter_vcpu == irq->target_vcpu) {
271 irq->priority = LPI_PROP_PRIORITY(prop);
272 irq->enabled = LPI_PROP_ENABLE_BIT(prop);
273
274 vgic_queue_irq_unlock(kvm, irq);
275 } else {
276 spin_unlock(&irq->irq_lock);
277 }
278
279 return 0;
280}
33d3bc95
AP
281
282/*
283 * Create a snapshot of the current LPI list, so that we can enumerate all
284 * LPIs without holding any lock.
285 * Returns the array length and puts the kmalloc'ed array into intid_ptr.
286 */
287static int vgic_copy_lpi_list(struct kvm *kvm, u32 **intid_ptr)
288{
289 struct vgic_dist *dist = &kvm->arch.vgic;
290 struct vgic_irq *irq;
291 u32 *intids;
292 int irq_count = dist->lpi_list_count, i = 0;
293
294 /*
295 * We use the current value of the list length, which may change
296 * after the kmalloc. We don't care, because the guest shouldn't
297 * change anything while the command handling is still running,
298 * and in the worst case we would miss a new IRQ, which one wouldn't
299 * expect to be covered by this command anyway.
300 */
301 intids = kmalloc_array(irq_count, sizeof(intids[0]), GFP_KERNEL);
302 if (!intids)
303 return -ENOMEM;
304
305 spin_lock(&dist->lpi_list_lock);
306 list_for_each_entry(irq, &dist->lpi_list_head, lpi_list) {
307 /* We don't need to "get" the IRQ, as we hold the list lock. */
308 intids[i] = irq->intid;
309 if (++i == irq_count)
310 break;
311 }
312 spin_unlock(&dist->lpi_list_lock);
313
314 *intid_ptr = intids;
315 return irq_count;
316}
317
df9f58fb
AP
318/*
319 * Promotes the ITS view of affinity of an ITTE (which redistributor this LPI
320 * is targeting) to the VGIC's view, which deals with target VCPUs.
321 * Needs to be called whenever either the collection for a LPIs has
322 * changed or the collection itself got retargeted.
323 */
9ce91c72 324static void update_affinity_ite(struct kvm *kvm, struct its_ite *ite)
df9f58fb
AP
325{
326 struct kvm_vcpu *vcpu;
327
9ce91c72 328 if (!its_is_collection_mapped(ite->collection))
df9f58fb
AP
329 return;
330
9ce91c72 331 vcpu = kvm_get_vcpu(kvm, ite->collection->target_addr);
df9f58fb 332
9ce91c72
EA
333 spin_lock(&ite->irq->irq_lock);
334 ite->irq->target_vcpu = vcpu;
335 spin_unlock(&ite->irq->irq_lock);
df9f58fb
AP
336}
337
338/*
339 * Updates the target VCPU for every LPI targeting this collection.
340 * Must be called with the its_lock mutex held.
341 */
342static void update_affinity_collection(struct kvm *kvm, struct vgic_its *its,
343 struct its_collection *coll)
344{
345 struct its_device *device;
9ce91c72 346 struct its_ite *ite;
df9f58fb 347
9ce91c72
EA
348 for_each_lpi_its(device, ite, its) {
349 if (!ite->collection || coll != ite->collection)
df9f58fb
AP
350 continue;
351
9ce91c72 352 update_affinity_ite(kvm, ite);
df9f58fb
AP
353 }
354}
355
356static u32 max_lpis_propbaser(u64 propbaser)
357{
358 int nr_idbits = (propbaser & 0x1f) + 1;
359
360 return 1U << min(nr_idbits, INTERRUPT_ID_BITS_ITS);
361}
362
33d3bc95
AP
363/*
364 * Scan the whole LPI pending table and sync the pending bit in there
365 * with our own data structures. This relies on the LPI being
366 * mapped before.
367 */
368static int its_sync_lpi_pending_table(struct kvm_vcpu *vcpu)
369{
370 gpa_t pendbase = PENDBASER_ADDRESS(vcpu->arch.vgic_cpu.pendbaser);
371 struct vgic_irq *irq;
372 int last_byte_offset = -1;
373 int ret = 0;
374 u32 *intids;
375 int nr_irqs, i;
376
377 nr_irqs = vgic_copy_lpi_list(vcpu->kvm, &intids);
378 if (nr_irqs < 0)
379 return nr_irqs;
380
381 for (i = 0; i < nr_irqs; i++) {
382 int byte_offset, bit_nr;
383 u8 pendmask;
384
385 byte_offset = intids[i] / BITS_PER_BYTE;
386 bit_nr = intids[i] % BITS_PER_BYTE;
387
388 /*
389 * For contiguously allocated LPIs chances are we just read
390 * this very same byte in the last iteration. Reuse that.
391 */
392 if (byte_offset != last_byte_offset) {
393 ret = kvm_read_guest(vcpu->kvm, pendbase + byte_offset,
394 &pendmask, 1);
395 if (ret) {
396 kfree(intids);
397 return ret;
398 }
399 last_byte_offset = byte_offset;
400 }
401
402 irq = vgic_get_irq(vcpu->kvm, NULL, intids[i]);
403 spin_lock(&irq->irq_lock);
8694e4da 404 irq->pending_latch = pendmask & (1U << bit_nr);
33d3bc95
AP
405 vgic_queue_irq_unlock(vcpu->kvm, irq);
406 vgic_put_irq(vcpu->kvm, irq);
407 }
408
409 kfree(intids);
410
411 return ret;
412}
424c3383 413
424c3383
AP
414static unsigned long vgic_mmio_read_its_typer(struct kvm *kvm,
415 struct vgic_its *its,
416 gpa_t addr, unsigned int len)
417{
71afe470 418 const struct vgic_its_abi *abi = vgic_its_get_abi(its);
424c3383
AP
419 u64 reg = GITS_TYPER_PLPIS;
420
421 /*
422 * We use linear CPU numbers for redistributor addressing,
423 * so GITS_TYPER.PTA is 0.
424 * Also we force all PROPBASER registers to be the same, so
425 * CommonLPIAff is 0 as well.
426 * To avoid memory waste in the guest, we keep the number of IDBits and
427 * DevBits low - as least for the time being.
428 */
429 reg |= 0x0f << GITS_TYPER_DEVBITS_SHIFT;
0d44cdb6 430 reg |= GIC_ENCODE_SZ(VITS_TYPER_IDBITS, 5) << GITS_TYPER_IDBITS_SHIFT;
71afe470 431 reg |= GIC_ENCODE_SZ(abi->ite_esz, 4) << GITS_TYPER_ITT_ENTRY_SIZE_SHIFT;
424c3383
AP
432
433 return extract_bytes(reg, addr & 7, len);
434}
435
436static unsigned long vgic_mmio_read_its_iidr(struct kvm *kvm,
437 struct vgic_its *its,
438 gpa_t addr, unsigned int len)
439{
ab01c6bd
EA
440 u32 val;
441
442 val = (its->abi_rev << GITS_IIDR_REV_SHIFT) & GITS_IIDR_REV_MASK;
443 val |= (PRODUCT_ID_KVM << GITS_IIDR_PRODUCTID_SHIFT) | IMPLEMENTER_ARM;
444 return val;
445}
446
447static int vgic_mmio_uaccess_write_its_iidr(struct kvm *kvm,
448 struct vgic_its *its,
449 gpa_t addr, unsigned int len,
450 unsigned long val)
451{
452 u32 rev = GITS_IIDR_REV(val);
453
454 if (rev >= NR_ITS_ABIS)
455 return -EINVAL;
456 return vgic_its_set_abi(its, rev);
424c3383
AP
457}
458
459static unsigned long vgic_mmio_read_its_idregs(struct kvm *kvm,
460 struct vgic_its *its,
461 gpa_t addr, unsigned int len)
462{
463 switch (addr & 0xffff) {
464 case GITS_PIDR0:
465 return 0x92; /* part number, bits[7:0] */
466 case GITS_PIDR1:
467 return 0xb4; /* part number, bits[11:8] */
468 case GITS_PIDR2:
469 return GIC_PIDR2_ARCH_GICv3 | 0x0b;
470 case GITS_PIDR4:
471 return 0x40; /* This is a 64K software visible page */
472 /* The following are the ID registers for (any) GIC. */
473 case GITS_CIDR0:
474 return 0x0d;
475 case GITS_CIDR1:
476 return 0xf0;
477 case GITS_CIDR2:
478 return 0x05;
479 case GITS_CIDR3:
480 return 0xb1;
481 }
482
483 return 0;
484}
485
2891a7df
AP
486/*
487 * Find the target VCPU and the LPI number for a given devid/eventid pair
488 * and make this IRQ pending, possibly injecting it.
489 * Must be called with the its_lock mutex held.
fd837b08
AP
490 * Returns 0 on success, a positive error value for any ITS mapping
491 * related errors and negative error values for generic errors.
2891a7df 492 */
fd837b08
AP
493static int vgic_its_trigger_msi(struct kvm *kvm, struct vgic_its *its,
494 u32 devid, u32 eventid)
2891a7df 495{
fd837b08 496 struct kvm_vcpu *vcpu;
9ce91c72 497 struct its_ite *ite;
2891a7df
AP
498
499 if (!its->enabled)
fd837b08 500 return -EBUSY;
2891a7df 501
9ce91c72
EA
502 ite = find_ite(its, devid, eventid);
503 if (!ite || !its_is_collection_mapped(ite->collection))
fd837b08
AP
504 return E_ITS_INT_UNMAPPED_INTERRUPT;
505
9ce91c72 506 vcpu = kvm_get_vcpu(kvm, ite->collection->target_addr);
fd837b08
AP
507 if (!vcpu)
508 return E_ITS_INT_UNMAPPED_INTERRUPT;
509
510 if (!vcpu->arch.vgic_cpu.lpis_enabled)
511 return -EBUSY;
512
9ce91c72
EA
513 spin_lock(&ite->irq->irq_lock);
514 ite->irq->pending_latch = true;
515 vgic_queue_irq_unlock(kvm, ite->irq);
fd837b08
AP
516
517 return 0;
2891a7df
AP
518}
519
505a19ee
AP
520static struct vgic_io_device *vgic_get_its_iodev(struct kvm_io_device *dev)
521{
522 struct vgic_io_device *iodev;
523
524 if (dev->ops != &kvm_io_gic_ops)
525 return NULL;
526
527 iodev = container_of(dev, struct vgic_io_device, dev);
528
529 if (iodev->iodev_type != IODEV_ITS)
530 return NULL;
531
532 return iodev;
533}
534
2891a7df
AP
535/*
536 * Queries the KVM IO bus framework to get the ITS pointer from the given
537 * doorbell address.
538 * We then call vgic_its_trigger_msi() with the decoded data.
fd837b08 539 * According to the KVM_SIGNAL_MSI API description returns 1 on success.
2891a7df
AP
540 */
541int vgic_its_inject_msi(struct kvm *kvm, struct kvm_msi *msi)
542{
543 u64 address;
544 struct kvm_io_device *kvm_io_dev;
545 struct vgic_io_device *iodev;
fd837b08 546 int ret;
2891a7df
AP
547
548 if (!vgic_has_its(kvm))
549 return -ENODEV;
550
551 if (!(msi->flags & KVM_MSI_VALID_DEVID))
552 return -EINVAL;
553
554 address = (u64)msi->address_hi << 32 | msi->address_lo;
555
556 kvm_io_dev = kvm_io_bus_get_dev(kvm, KVM_MMIO_BUS, address);
557 if (!kvm_io_dev)
505a19ee 558 return -EINVAL;
2891a7df 559
505a19ee
AP
560 iodev = vgic_get_its_iodev(kvm_io_dev);
561 if (!iodev)
562 return -EINVAL;
2891a7df
AP
563
564 mutex_lock(&iodev->its->its_lock);
fd837b08 565 ret = vgic_its_trigger_msi(kvm, iodev->its, msi->devid, msi->data);
2891a7df
AP
566 mutex_unlock(&iodev->its->its_lock);
567
fd837b08
AP
568 if (ret < 0)
569 return ret;
570
571 /*
572 * KVM_SIGNAL_MSI demands a return value > 0 for success and 0
573 * if the guest has blocked the MSI. So we map any LPI mapping
574 * related error to that.
575 */
576 if (ret)
577 return 0;
578 else
579 return 1;
2891a7df
AP
580}
581
424c3383 582/* Requires the its_lock to be held. */
9ce91c72 583static void its_free_ite(struct kvm *kvm, struct its_ite *ite)
424c3383 584{
9ce91c72 585 list_del(&ite->ite_list);
3802411d
AP
586
587 /* This put matches the get in vgic_add_lpi. */
9ce91c72
EA
588 if (ite->irq)
589 vgic_put_irq(kvm, ite->irq);
3802411d 590
9ce91c72 591 kfree(ite);
424c3383
AP
592}
593
df9f58fb
AP
594static u64 its_cmd_mask_field(u64 *its_cmd, int word, int shift, int size)
595{
596 return (le64_to_cpu(its_cmd[word]) >> shift) & (BIT_ULL(size) - 1);
597}
598
599#define its_cmd_get_command(cmd) its_cmd_mask_field(cmd, 0, 0, 8)
600#define its_cmd_get_deviceid(cmd) its_cmd_mask_field(cmd, 0, 32, 32)
0d44cdb6 601#define its_cmd_get_size(cmd) (its_cmd_mask_field(cmd, 1, 0, 5) + 1)
df9f58fb
AP
602#define its_cmd_get_id(cmd) its_cmd_mask_field(cmd, 1, 0, 32)
603#define its_cmd_get_physical_id(cmd) its_cmd_mask_field(cmd, 1, 32, 32)
604#define its_cmd_get_collection(cmd) its_cmd_mask_field(cmd, 2, 0, 16)
605#define its_cmd_get_target_addr(cmd) its_cmd_mask_field(cmd, 2, 16, 32)
606#define its_cmd_get_validbit(cmd) its_cmd_mask_field(cmd, 2, 63, 1)
607
608/*
609 * The DISCARD command frees an Interrupt Translation Table Entry (ITTE).
610 * Must be called with the its_lock mutex held.
611 */
612static int vgic_its_cmd_handle_discard(struct kvm *kvm, struct vgic_its *its,
613 u64 *its_cmd)
614{
615 u32 device_id = its_cmd_get_deviceid(its_cmd);
616 u32 event_id = its_cmd_get_id(its_cmd);
9ce91c72 617 struct its_ite *ite;
df9f58fb
AP
618
619
9ce91c72
EA
620 ite = find_ite(its, device_id, event_id);
621 if (ite && ite->collection) {
df9f58fb
AP
622 /*
623 * Though the spec talks about removing the pending state, we
624 * don't bother here since we clear the ITTE anyway and the
625 * pending state is a property of the ITTE struct.
626 */
9ce91c72 627 its_free_ite(kvm, ite);
df9f58fb
AP
628 return 0;
629 }
630
631 return E_ITS_DISCARD_UNMAPPED_INTERRUPT;
632}
633
634/*
635 * The MOVI command moves an ITTE to a different collection.
636 * Must be called with the its_lock mutex held.
637 */
638static int vgic_its_cmd_handle_movi(struct kvm *kvm, struct vgic_its *its,
639 u64 *its_cmd)
640{
641 u32 device_id = its_cmd_get_deviceid(its_cmd);
642 u32 event_id = its_cmd_get_id(its_cmd);
643 u32 coll_id = its_cmd_get_collection(its_cmd);
644 struct kvm_vcpu *vcpu;
9ce91c72 645 struct its_ite *ite;
df9f58fb
AP
646 struct its_collection *collection;
647
9ce91c72
EA
648 ite = find_ite(its, device_id, event_id);
649 if (!ite)
df9f58fb
AP
650 return E_ITS_MOVI_UNMAPPED_INTERRUPT;
651
9ce91c72 652 if (!its_is_collection_mapped(ite->collection))
df9f58fb
AP
653 return E_ITS_MOVI_UNMAPPED_COLLECTION;
654
655 collection = find_collection(its, coll_id);
656 if (!its_is_collection_mapped(collection))
657 return E_ITS_MOVI_UNMAPPED_COLLECTION;
658
9ce91c72 659 ite->collection = collection;
df9f58fb
AP
660 vcpu = kvm_get_vcpu(kvm, collection->target_addr);
661
9ce91c72
EA
662 spin_lock(&ite->irq->irq_lock);
663 ite->irq->target_vcpu = vcpu;
664 spin_unlock(&ite->irq->irq_lock);
df9f58fb
AP
665
666 return 0;
667}
668
6d03a68f
MZ
669/*
670 * Check whether an ID can be stored into the corresponding guest table.
671 * For a direct table this is pretty easy, but gets a bit nasty for
672 * indirect tables. We check whether the resulting guest physical address
673 * is actually valid (covered by a memslot and guest accessbible).
674 * For this we have to read the respective first level entry.
675 */
676static bool vgic_its_check_id(struct vgic_its *its, u64 baser, int id)
677{
678 int l1_tbl_size = GITS_BASER_NR_PAGES(baser) * SZ_64K;
679 int index;
680 u64 indirect_ptr;
681 gfn_t gfn;
e29bd6f2 682 int esz = GITS_BASER_ENTRY_SIZE(baser);
6d03a68f
MZ
683
684 if (!(baser & GITS_BASER_INDIRECT)) {
685 phys_addr_t addr;
686
e29bd6f2 687 if (id >= (l1_tbl_size / esz))
6d03a68f
MZ
688 return false;
689
e29bd6f2 690 addr = BASER_ADDRESS(baser) + id * esz;
6d03a68f
MZ
691 gfn = addr >> PAGE_SHIFT;
692
693 return kvm_is_visible_gfn(its->dev->kvm, gfn);
694 }
695
696 /* calculate and check the index into the 1st level */
e29bd6f2 697 index = id / (SZ_64K / esz);
6d03a68f
MZ
698 if (index >= (l1_tbl_size / sizeof(u64)))
699 return false;
700
701 /* Each 1st level entry is represented by a 64-bit value. */
702 if (kvm_read_guest(its->dev->kvm,
703 BASER_ADDRESS(baser) + index * sizeof(indirect_ptr),
704 &indirect_ptr, sizeof(indirect_ptr)))
705 return false;
706
707 indirect_ptr = le64_to_cpu(indirect_ptr);
708
709 /* check the valid bit of the first level entry */
710 if (!(indirect_ptr & BIT_ULL(63)))
711 return false;
712
713 /*
714 * Mask the guest physical address and calculate the frame number.
715 * Any address beyond our supported 48 bits of PA will be caught
716 * by the actual check in the final step.
717 */
718 indirect_ptr &= GENMASK_ULL(51, 16);
719
720 /* Find the address of the actual entry */
e29bd6f2
VM
721 index = id % (SZ_64K / esz);
722 indirect_ptr += index * esz;
6d03a68f
MZ
723 gfn = indirect_ptr >> PAGE_SHIFT;
724
725 return kvm_is_visible_gfn(its->dev->kvm, gfn);
726}
727
17a21f58
MZ
728static int vgic_its_alloc_collection(struct vgic_its *its,
729 struct its_collection **colp,
df9f58fb
AP
730 u32 coll_id)
731{
17a21f58
MZ
732 struct its_collection *collection;
733
6d03a68f
MZ
734 if (!vgic_its_check_id(its, its->baser_coll_table, coll_id))
735 return E_ITS_MAPC_COLLECTION_OOR;
736
17a21f58
MZ
737 collection = kzalloc(sizeof(*collection), GFP_KERNEL);
738
df9f58fb
AP
739 collection->collection_id = coll_id;
740 collection->target_addr = COLLECTION_NOT_MAPPED;
741
742 list_add_tail(&collection->coll_list, &its->collection_list);
17a21f58
MZ
743 *colp = collection;
744
745 return 0;
746}
747
748static void vgic_its_free_collection(struct vgic_its *its, u32 coll_id)
749{
750 struct its_collection *collection;
751 struct its_device *device;
9ce91c72 752 struct its_ite *ite;
17a21f58
MZ
753
754 /*
755 * Clearing the mapping for that collection ID removes the
756 * entry from the list. If there wasn't any before, we can
757 * go home early.
758 */
759 collection = find_collection(its, coll_id);
760 if (!collection)
761 return;
762
9ce91c72
EA
763 for_each_lpi_its(device, ite, its)
764 if (ite->collection &&
765 ite->collection->collection_id == coll_id)
766 ite->collection = NULL;
17a21f58
MZ
767
768 list_del(&collection->coll_list);
769 kfree(collection);
df9f58fb
AP
770}
771
772/*
773 * The MAPTI and MAPI commands map LPIs to ITTEs.
774 * Must be called with its_lock mutex held.
775 */
776static int vgic_its_cmd_handle_mapi(struct kvm *kvm, struct vgic_its *its,
a3e7aa27 777 u64 *its_cmd)
df9f58fb
AP
778{
779 u32 device_id = its_cmd_get_deviceid(its_cmd);
780 u32 event_id = its_cmd_get_id(its_cmd);
781 u32 coll_id = its_cmd_get_collection(its_cmd);
9ce91c72 782 struct its_ite *ite;
df9f58fb
AP
783 struct its_device *device;
784 struct its_collection *collection, *new_coll = NULL;
785 int lpi_nr;
99e5e886 786 struct vgic_irq *irq;
df9f58fb
AP
787
788 device = find_its_device(its, device_id);
789 if (!device)
790 return E_ITS_MAPTI_UNMAPPED_DEVICE;
791
0d44cdb6
EA
792 if (event_id >= BIT_ULL(device->num_eventid_bits))
793 return E_ITS_MAPTI_ID_OOR;
794
a3e7aa27 795 if (its_cmd_get_command(its_cmd) == GITS_CMD_MAPTI)
df9f58fb
AP
796 lpi_nr = its_cmd_get_physical_id(its_cmd);
797 else
798 lpi_nr = event_id;
799 if (lpi_nr < GIC_LPI_OFFSET ||
3a88bded
MZ
800 lpi_nr >= max_lpis_propbaser(kvm->arch.vgic.propbaser))
801 return E_ITS_MAPTI_PHYSICALID_OOR;
802
286054a7 803 /* If there is an existing mapping, behavior is UNPREDICTABLE. */
9ce91c72 804 if (find_ite(its, device_id, event_id))
286054a7
AP
805 return 0;
806
3a88bded
MZ
807 collection = find_collection(its, coll_id);
808 if (!collection) {
809 int ret = vgic_its_alloc_collection(its, &collection, coll_id);
810 if (ret)
811 return ret;
812 new_coll = collection;
df9f58fb
AP
813 }
814
9ce91c72
EA
815 ite = kzalloc(sizeof(struct its_ite), GFP_KERNEL);
816 if (!ite) {
286054a7
AP
817 if (new_coll)
818 vgic_its_free_collection(its, coll_id);
819 return -ENOMEM;
df9f58fb
AP
820 }
821
9ce91c72
EA
822 ite->event_id = event_id;
823 list_add_tail(&ite->ite_list, &device->itt_head);
286054a7 824
9ce91c72
EA
825 ite->collection = collection;
826 ite->lpi = lpi_nr;
99e5e886
CD
827
828 irq = vgic_add_lpi(kvm, lpi_nr);
829 if (IS_ERR(irq)) {
830 if (new_coll)
831 vgic_its_free_collection(its, coll_id);
9ce91c72 832 its_free_ite(kvm, ite);
99e5e886
CD
833 return PTR_ERR(irq);
834 }
9ce91c72 835 ite->irq = irq;
99e5e886 836
9ce91c72 837 update_affinity_ite(kvm, ite);
df9f58fb
AP
838
839 /*
840 * We "cache" the configuration table entries in out struct vgic_irq's.
841 * However we only have those structs for mapped IRQs, so we read in
842 * the respective config data from memory here upon mapping the LPI.
843 */
9ce91c72 844 update_lpi_config(kvm, ite->irq, NULL);
df9f58fb
AP
845
846 return 0;
847}
848
849/* Requires the its_lock to be held. */
850static void vgic_its_unmap_device(struct kvm *kvm, struct its_device *device)
851{
9ce91c72 852 struct its_ite *ite, *temp;
df9f58fb
AP
853
854 /*
855 * The spec says that unmapping a device with still valid
856 * ITTEs associated is UNPREDICTABLE. We remove all ITTEs,
857 * since we cannot leave the memory unreferenced.
858 */
9ce91c72
EA
859 list_for_each_entry_safe(ite, temp, &device->itt_head, ite_list)
860 its_free_ite(kvm, ite);
df9f58fb
AP
861
862 list_del(&device->dev_list);
863 kfree(device);
864}
865
df9f58fb
AP
866/*
867 * MAPD maps or unmaps a device ID to Interrupt Translation Tables (ITTs).
868 * Must be called with the its_lock mutex held.
869 */
870static int vgic_its_cmd_handle_mapd(struct kvm *kvm, struct vgic_its *its,
871 u64 *its_cmd)
872{
873 u32 device_id = its_cmd_get_deviceid(its_cmd);
874 bool valid = its_cmd_get_validbit(its_cmd);
0d44cdb6 875 u8 num_eventid_bits = its_cmd_get_size(its_cmd);
df9f58fb
AP
876 struct its_device *device;
877
6d03a68f 878 if (!vgic_its_check_id(its, its->baser_device_table, device_id))
df9f58fb
AP
879 return E_ITS_MAPD_DEVICE_OOR;
880
0d44cdb6
EA
881 if (valid && num_eventid_bits > VITS_TYPER_IDBITS)
882 return E_ITS_MAPD_ITTSIZE_OOR;
883
df9f58fb
AP
884 device = find_its_device(its, device_id);
885
886 /*
887 * The spec says that calling MAPD on an already mapped device
888 * invalidates all cached data for this device. We implement this
889 * by removing the mapping and re-establishing it.
890 */
891 if (device)
892 vgic_its_unmap_device(kvm, device);
893
894 /*
895 * The spec does not say whether unmapping a not-mapped device
896 * is an error, so we are done in any case.
897 */
898 if (!valid)
899 return 0;
900
901 device = kzalloc(sizeof(struct its_device), GFP_KERNEL);
902 if (!device)
903 return -ENOMEM;
904
905 device->device_id = device_id;
0d44cdb6
EA
906 device->num_eventid_bits = num_eventid_bits;
907
df9f58fb
AP
908 INIT_LIST_HEAD(&device->itt_head);
909
910 list_add_tail(&device->dev_list, &its->device_list);
911
912 return 0;
913}
914
df9f58fb
AP
915/*
916 * The MAPC command maps collection IDs to redistributors.
917 * Must be called with the its_lock mutex held.
918 */
919static int vgic_its_cmd_handle_mapc(struct kvm *kvm, struct vgic_its *its,
920 u64 *its_cmd)
921{
922 u16 coll_id;
923 u32 target_addr;
924 struct its_collection *collection;
925 bool valid;
926
927 valid = its_cmd_get_validbit(its_cmd);
928 coll_id = its_cmd_get_collection(its_cmd);
929 target_addr = its_cmd_get_target_addr(its_cmd);
930
931 if (target_addr >= atomic_read(&kvm->online_vcpus))
932 return E_ITS_MAPC_PROCNUM_OOR;
933
df9f58fb 934 if (!valid) {
17a21f58 935 vgic_its_free_collection(its, coll_id);
df9f58fb 936 } else {
17a21f58
MZ
937 collection = find_collection(its, coll_id);
938
df9f58fb 939 if (!collection) {
17a21f58 940 int ret;
df9f58fb 941
17a21f58
MZ
942 ret = vgic_its_alloc_collection(its, &collection,
943 coll_id);
944 if (ret)
945 return ret;
df9f58fb
AP
946 collection->target_addr = target_addr;
947 } else {
948 collection->target_addr = target_addr;
949 update_affinity_collection(kvm, its, collection);
950 }
951 }
952
953 return 0;
954}
955
956/*
957 * The CLEAR command removes the pending state for a particular LPI.
958 * Must be called with the its_lock mutex held.
959 */
960static int vgic_its_cmd_handle_clear(struct kvm *kvm, struct vgic_its *its,
961 u64 *its_cmd)
962{
963 u32 device_id = its_cmd_get_deviceid(its_cmd);
964 u32 event_id = its_cmd_get_id(its_cmd);
9ce91c72 965 struct its_ite *ite;
df9f58fb
AP
966
967
9ce91c72
EA
968 ite = find_ite(its, device_id, event_id);
969 if (!ite)
df9f58fb
AP
970 return E_ITS_CLEAR_UNMAPPED_INTERRUPT;
971
9ce91c72 972 ite->irq->pending_latch = false;
df9f58fb
AP
973
974 return 0;
975}
976
977/*
978 * The INV command syncs the configuration bits from the memory table.
979 * Must be called with the its_lock mutex held.
980 */
981static int vgic_its_cmd_handle_inv(struct kvm *kvm, struct vgic_its *its,
982 u64 *its_cmd)
983{
984 u32 device_id = its_cmd_get_deviceid(its_cmd);
985 u32 event_id = its_cmd_get_id(its_cmd);
9ce91c72 986 struct its_ite *ite;
df9f58fb
AP
987
988
9ce91c72
EA
989 ite = find_ite(its, device_id, event_id);
990 if (!ite)
df9f58fb
AP
991 return E_ITS_INV_UNMAPPED_INTERRUPT;
992
9ce91c72 993 return update_lpi_config(kvm, ite->irq, NULL);
df9f58fb
AP
994}
995
996/*
997 * The INVALL command requests flushing of all IRQ data in this collection.
998 * Find the VCPU mapped to that collection, then iterate over the VM's list
999 * of mapped LPIs and update the configuration for each IRQ which targets
1000 * the specified vcpu. The configuration will be read from the in-memory
1001 * configuration table.
1002 * Must be called with the its_lock mutex held.
1003 */
1004static int vgic_its_cmd_handle_invall(struct kvm *kvm, struct vgic_its *its,
1005 u64 *its_cmd)
1006{
1007 u32 coll_id = its_cmd_get_collection(its_cmd);
1008 struct its_collection *collection;
1009 struct kvm_vcpu *vcpu;
1010 struct vgic_irq *irq;
1011 u32 *intids;
1012 int irq_count, i;
1013
1014 collection = find_collection(its, coll_id);
1015 if (!its_is_collection_mapped(collection))
1016 return E_ITS_INVALL_UNMAPPED_COLLECTION;
1017
1018 vcpu = kvm_get_vcpu(kvm, collection->target_addr);
1019
1020 irq_count = vgic_copy_lpi_list(kvm, &intids);
1021 if (irq_count < 0)
1022 return irq_count;
1023
1024 for (i = 0; i < irq_count; i++) {
1025 irq = vgic_get_irq(kvm, NULL, intids[i]);
1026 if (!irq)
1027 continue;
1028 update_lpi_config(kvm, irq, vcpu);
1029 vgic_put_irq(kvm, irq);
1030 }
1031
1032 kfree(intids);
1033
1034 return 0;
1035}
1036
1037/*
1038 * The MOVALL command moves the pending state of all IRQs targeting one
1039 * redistributor to another. We don't hold the pending state in the VCPUs,
1040 * but in the IRQs instead, so there is really not much to do for us here.
1041 * However the spec says that no IRQ must target the old redistributor
1042 * afterwards, so we make sure that no LPI is using the associated target_vcpu.
1043 * This command affects all LPIs in the system that target that redistributor.
1044 */
1045static int vgic_its_cmd_handle_movall(struct kvm *kvm, struct vgic_its *its,
1046 u64 *its_cmd)
1047{
1048 struct vgic_dist *dist = &kvm->arch.vgic;
1049 u32 target1_addr = its_cmd_get_target_addr(its_cmd);
1050 u32 target2_addr = its_cmd_mask_field(its_cmd, 3, 16, 32);
1051 struct kvm_vcpu *vcpu1, *vcpu2;
1052 struct vgic_irq *irq;
1053
1054 if (target1_addr >= atomic_read(&kvm->online_vcpus) ||
1055 target2_addr >= atomic_read(&kvm->online_vcpus))
1056 return E_ITS_MOVALL_PROCNUM_OOR;
1057
1058 if (target1_addr == target2_addr)
1059 return 0;
1060
1061 vcpu1 = kvm_get_vcpu(kvm, target1_addr);
1062 vcpu2 = kvm_get_vcpu(kvm, target2_addr);
1063
1064 spin_lock(&dist->lpi_list_lock);
1065
1066 list_for_each_entry(irq, &dist->lpi_list_head, lpi_list) {
1067 spin_lock(&irq->irq_lock);
1068
1069 if (irq->target_vcpu == vcpu1)
1070 irq->target_vcpu = vcpu2;
1071
1072 spin_unlock(&irq->irq_lock);
1073 }
1074
1075 spin_unlock(&dist->lpi_list_lock);
1076
1077 return 0;
1078}
1079
2891a7df
AP
1080/*
1081 * The INT command injects the LPI associated with that DevID/EvID pair.
1082 * Must be called with the its_lock mutex held.
1083 */
1084static int vgic_its_cmd_handle_int(struct kvm *kvm, struct vgic_its *its,
1085 u64 *its_cmd)
1086{
1087 u32 msi_data = its_cmd_get_id(its_cmd);
1088 u64 msi_devid = its_cmd_get_deviceid(its_cmd);
1089
fd837b08 1090 return vgic_its_trigger_msi(kvm, its, msi_devid, msi_data);
2891a7df
AP
1091}
1092
df9f58fb
AP
1093/*
1094 * This function is called with the its_cmd lock held, but the ITS data
1095 * structure lock dropped.
1096 */
424c3383
AP
1097static int vgic_its_handle_command(struct kvm *kvm, struct vgic_its *its,
1098 u64 *its_cmd)
1099{
df9f58fb
AP
1100 int ret = -ENODEV;
1101
1102 mutex_lock(&its->its_lock);
a3e7aa27 1103 switch (its_cmd_get_command(its_cmd)) {
df9f58fb
AP
1104 case GITS_CMD_MAPD:
1105 ret = vgic_its_cmd_handle_mapd(kvm, its, its_cmd);
1106 break;
1107 case GITS_CMD_MAPC:
1108 ret = vgic_its_cmd_handle_mapc(kvm, its, its_cmd);
1109 break;
1110 case GITS_CMD_MAPI:
a3e7aa27 1111 ret = vgic_its_cmd_handle_mapi(kvm, its, its_cmd);
df9f58fb
AP
1112 break;
1113 case GITS_CMD_MAPTI:
a3e7aa27 1114 ret = vgic_its_cmd_handle_mapi(kvm, its, its_cmd);
df9f58fb
AP
1115 break;
1116 case GITS_CMD_MOVI:
1117 ret = vgic_its_cmd_handle_movi(kvm, its, its_cmd);
1118 break;
1119 case GITS_CMD_DISCARD:
1120 ret = vgic_its_cmd_handle_discard(kvm, its, its_cmd);
1121 break;
1122 case GITS_CMD_CLEAR:
1123 ret = vgic_its_cmd_handle_clear(kvm, its, its_cmd);
1124 break;
1125 case GITS_CMD_MOVALL:
1126 ret = vgic_its_cmd_handle_movall(kvm, its, its_cmd);
1127 break;
2891a7df
AP
1128 case GITS_CMD_INT:
1129 ret = vgic_its_cmd_handle_int(kvm, its, its_cmd);
1130 break;
df9f58fb
AP
1131 case GITS_CMD_INV:
1132 ret = vgic_its_cmd_handle_inv(kvm, its, its_cmd);
1133 break;
1134 case GITS_CMD_INVALL:
1135 ret = vgic_its_cmd_handle_invall(kvm, its, its_cmd);
1136 break;
1137 case GITS_CMD_SYNC:
1138 /* we ignore this command: we are in sync all of the time */
1139 ret = 0;
1140 break;
1141 }
1142 mutex_unlock(&its->its_lock);
1143
1144 return ret;
424c3383
AP
1145}
1146
1147static u64 vgic_sanitise_its_baser(u64 reg)
1148{
1149 reg = vgic_sanitise_field(reg, GITS_BASER_SHAREABILITY_MASK,
1150 GITS_BASER_SHAREABILITY_SHIFT,
1151 vgic_sanitise_shareability);
1152 reg = vgic_sanitise_field(reg, GITS_BASER_INNER_CACHEABILITY_MASK,
1153 GITS_BASER_INNER_CACHEABILITY_SHIFT,
1154 vgic_sanitise_inner_cacheability);
1155 reg = vgic_sanitise_field(reg, GITS_BASER_OUTER_CACHEABILITY_MASK,
1156 GITS_BASER_OUTER_CACHEABILITY_SHIFT,
1157 vgic_sanitise_outer_cacheability);
1158
1159 /* Bits 15:12 contain bits 51:48 of the PA, which we don't support. */
1160 reg &= ~GENMASK_ULL(15, 12);
1161
1162 /* We support only one (ITS) page size: 64K */
1163 reg = (reg & ~GITS_BASER_PAGE_SIZE_MASK) | GITS_BASER_PAGE_SIZE_64K;
1164
1165 return reg;
1166}
1167
1168static u64 vgic_sanitise_its_cbaser(u64 reg)
1169{
1170 reg = vgic_sanitise_field(reg, GITS_CBASER_SHAREABILITY_MASK,
1171 GITS_CBASER_SHAREABILITY_SHIFT,
1172 vgic_sanitise_shareability);
1173 reg = vgic_sanitise_field(reg, GITS_CBASER_INNER_CACHEABILITY_MASK,
1174 GITS_CBASER_INNER_CACHEABILITY_SHIFT,
1175 vgic_sanitise_inner_cacheability);
1176 reg = vgic_sanitise_field(reg, GITS_CBASER_OUTER_CACHEABILITY_MASK,
1177 GITS_CBASER_OUTER_CACHEABILITY_SHIFT,
1178 vgic_sanitise_outer_cacheability);
1179
1180 /*
1181 * Sanitise the physical address to be 64k aligned.
1182 * Also limit the physical addresses to 48 bits.
1183 */
1184 reg &= ~(GENMASK_ULL(51, 48) | GENMASK_ULL(15, 12));
1185
1186 return reg;
1187}
1188
1189static unsigned long vgic_mmio_read_its_cbaser(struct kvm *kvm,
1190 struct vgic_its *its,
1191 gpa_t addr, unsigned int len)
1192{
1193 return extract_bytes(its->cbaser, addr & 7, len);
1194}
1195
1196static void vgic_mmio_write_its_cbaser(struct kvm *kvm, struct vgic_its *its,
1197 gpa_t addr, unsigned int len,
1198 unsigned long val)
1199{
1200 /* When GITS_CTLR.Enable is 1, this register is RO. */
1201 if (its->enabled)
1202 return;
1203
1204 mutex_lock(&its->cmd_lock);
1205 its->cbaser = update_64bit_reg(its->cbaser, addr & 7, len, val);
1206 its->cbaser = vgic_sanitise_its_cbaser(its->cbaser);
1207 its->creadr = 0;
1208 /*
1209 * CWRITER is architecturally UNKNOWN on reset, but we need to reset
1210 * it to CREADR to make sure we start with an empty command buffer.
1211 */
1212 its->cwriter = its->creadr;
1213 mutex_unlock(&its->cmd_lock);
1214}
1215
1216#define ITS_CMD_BUFFER_SIZE(baser) ((((baser) & 0xff) + 1) << 12)
1217#define ITS_CMD_SIZE 32
1218#define ITS_CMD_OFFSET(reg) ((reg) & GENMASK(19, 5))
1219
a5e1e6ca
AP
1220/* Must be called with the cmd_lock held. */
1221static void vgic_its_process_commands(struct kvm *kvm, struct vgic_its *its)
424c3383
AP
1222{
1223 gpa_t cbaser;
1224 u64 cmd_buf[4];
424c3383 1225
a5e1e6ca
AP
1226 /* Commands are only processed when the ITS is enabled. */
1227 if (!its->enabled)
424c3383 1228 return;
424c3383 1229
424c3383
AP
1230 cbaser = CBASER_ADDRESS(its->cbaser);
1231
1232 while (its->cwriter != its->creadr) {
1233 int ret = kvm_read_guest(kvm, cbaser + its->creadr,
1234 cmd_buf, ITS_CMD_SIZE);
1235 /*
1236 * If kvm_read_guest() fails, this could be due to the guest
1237 * programming a bogus value in CBASER or something else going
1238 * wrong from which we cannot easily recover.
1239 * According to section 6.3.2 in the GICv3 spec we can just
1240 * ignore that command then.
1241 */
1242 if (!ret)
1243 vgic_its_handle_command(kvm, its, cmd_buf);
1244
1245 its->creadr += ITS_CMD_SIZE;
1246 if (its->creadr == ITS_CMD_BUFFER_SIZE(its->cbaser))
1247 its->creadr = 0;
1248 }
a5e1e6ca
AP
1249}
1250
1251/*
1252 * By writing to CWRITER the guest announces new commands to be processed.
1253 * To avoid any races in the first place, we take the its_cmd lock, which
1254 * protects our ring buffer variables, so that there is only one user
1255 * per ITS handling commands at a given time.
1256 */
1257static void vgic_mmio_write_its_cwriter(struct kvm *kvm, struct vgic_its *its,
1258 gpa_t addr, unsigned int len,
1259 unsigned long val)
1260{
1261 u64 reg;
1262
1263 if (!its)
1264 return;
1265
1266 mutex_lock(&its->cmd_lock);
1267
1268 reg = update_64bit_reg(its->cwriter, addr & 7, len, val);
1269 reg = ITS_CMD_OFFSET(reg);
1270 if (reg >= ITS_CMD_BUFFER_SIZE(its->cbaser)) {
1271 mutex_unlock(&its->cmd_lock);
1272 return;
1273 }
1274 its->cwriter = reg;
1275
1276 vgic_its_process_commands(kvm, its);
424c3383
AP
1277
1278 mutex_unlock(&its->cmd_lock);
1279}
1280
1281static unsigned long vgic_mmio_read_its_cwriter(struct kvm *kvm,
1282 struct vgic_its *its,
1283 gpa_t addr, unsigned int len)
1284{
1285 return extract_bytes(its->cwriter, addr & 0x7, len);
1286}
1287
1288static unsigned long vgic_mmio_read_its_creadr(struct kvm *kvm,
1289 struct vgic_its *its,
1290 gpa_t addr, unsigned int len)
1291{
1292 return extract_bytes(its->creadr, addr & 0x7, len);
1293}
1294
0979bfa6
EA
1295static int vgic_mmio_uaccess_write_its_creadr(struct kvm *kvm,
1296 struct vgic_its *its,
1297 gpa_t addr, unsigned int len,
1298 unsigned long val)
1299{
1300 u32 cmd_offset;
1301 int ret = 0;
1302
1303 mutex_lock(&its->cmd_lock);
1304
1305 if (its->enabled) {
1306 ret = -EBUSY;
1307 goto out;
1308 }
1309
1310 cmd_offset = ITS_CMD_OFFSET(val);
1311 if (cmd_offset >= ITS_CMD_BUFFER_SIZE(its->cbaser)) {
1312 ret = -EINVAL;
1313 goto out;
1314 }
1315
1316 its->creadr = cmd_offset;
1317out:
1318 mutex_unlock(&its->cmd_lock);
1319 return ret;
1320}
1321
424c3383
AP
1322#define BASER_INDEX(addr) (((addr) / sizeof(u64)) & 0x7)
1323static unsigned long vgic_mmio_read_its_baser(struct kvm *kvm,
1324 struct vgic_its *its,
1325 gpa_t addr, unsigned int len)
1326{
1327 u64 reg;
1328
1329 switch (BASER_INDEX(addr)) {
1330 case 0:
1331 reg = its->baser_device_table;
1332 break;
1333 case 1:
1334 reg = its->baser_coll_table;
1335 break;
1336 default:
1337 reg = 0;
1338 break;
1339 }
1340
1341 return extract_bytes(reg, addr & 7, len);
1342}
1343
1344#define GITS_BASER_RO_MASK (GENMASK_ULL(52, 48) | GENMASK_ULL(58, 56))
1345static void vgic_mmio_write_its_baser(struct kvm *kvm,
1346 struct vgic_its *its,
1347 gpa_t addr, unsigned int len,
1348 unsigned long val)
1349{
71afe470 1350 const struct vgic_its_abi *abi = vgic_its_get_abi(its);
424c3383
AP
1351 u64 entry_size, device_type;
1352 u64 reg, *regptr, clearbits = 0;
1353
1354 /* When GITS_CTLR.Enable is 1, we ignore write accesses. */
1355 if (its->enabled)
1356 return;
1357
1358 switch (BASER_INDEX(addr)) {
1359 case 0:
1360 regptr = &its->baser_device_table;
71afe470 1361 entry_size = abi->dte_esz;
424c3383
AP
1362 device_type = GITS_BASER_TYPE_DEVICE;
1363 break;
1364 case 1:
1365 regptr = &its->baser_coll_table;
71afe470 1366 entry_size = abi->cte_esz;
424c3383
AP
1367 device_type = GITS_BASER_TYPE_COLLECTION;
1368 clearbits = GITS_BASER_INDIRECT;
1369 break;
1370 default:
1371 return;
1372 }
1373
1374 reg = update_64bit_reg(*regptr, addr & 7, len, val);
1375 reg &= ~GITS_BASER_RO_MASK;
1376 reg &= ~clearbits;
1377
1378 reg |= (entry_size - 1) << GITS_BASER_ENTRY_SIZE_SHIFT;
1379 reg |= device_type << GITS_BASER_TYPE_SHIFT;
1380 reg = vgic_sanitise_its_baser(reg);
1381
1382 *regptr = reg;
1383}
1384
a5e1e6ca
AP
1385static unsigned long vgic_mmio_read_its_ctlr(struct kvm *vcpu,
1386 struct vgic_its *its,
1387 gpa_t addr, unsigned int len)
1388{
1389 u32 reg = 0;
1390
1391 mutex_lock(&its->cmd_lock);
1392 if (its->creadr == its->cwriter)
1393 reg |= GITS_CTLR_QUIESCENT;
1394 if (its->enabled)
1395 reg |= GITS_CTLR_ENABLE;
1396 mutex_unlock(&its->cmd_lock);
1397
1398 return reg;
1399}
1400
1401static void vgic_mmio_write_its_ctlr(struct kvm *kvm, struct vgic_its *its,
1402 gpa_t addr, unsigned int len,
1403 unsigned long val)
1404{
1405 mutex_lock(&its->cmd_lock);
1406
1407 its->enabled = !!(val & GITS_CTLR_ENABLE);
1408
1409 /*
1410 * Try to process any pending commands. This function bails out early
1411 * if the ITS is disabled or no commands have been queued.
1412 */
1413 vgic_its_process_commands(kvm, its);
1414
1415 mutex_unlock(&its->cmd_lock);
1416}
1417
59c5ab40
AP
1418#define REGISTER_ITS_DESC(off, rd, wr, length, acc) \
1419{ \
1420 .reg_offset = off, \
1421 .len = length, \
1422 .access_flags = acc, \
1423 .its_read = rd, \
1424 .its_write = wr, \
1425}
1426
0979bfa6
EA
1427#define REGISTER_ITS_DESC_UACCESS(off, rd, wr, uwr, length, acc)\
1428{ \
1429 .reg_offset = off, \
1430 .len = length, \
1431 .access_flags = acc, \
1432 .its_read = rd, \
1433 .its_write = wr, \
1434 .uaccess_its_write = uwr, \
1435}
1436
59c5ab40
AP
1437static void its_mmio_write_wi(struct kvm *kvm, struct vgic_its *its,
1438 gpa_t addr, unsigned int len, unsigned long val)
1439{
1440 /* Ignore */
1441}
1442
1443static struct vgic_register_region its_registers[] = {
1444 REGISTER_ITS_DESC(GITS_CTLR,
424c3383 1445 vgic_mmio_read_its_ctlr, vgic_mmio_write_its_ctlr, 4,
59c5ab40 1446 VGIC_ACCESS_32bit),
ab01c6bd
EA
1447 REGISTER_ITS_DESC_UACCESS(GITS_IIDR,
1448 vgic_mmio_read_its_iidr, its_mmio_write_wi,
1449 vgic_mmio_uaccess_write_its_iidr, 4,
59c5ab40
AP
1450 VGIC_ACCESS_32bit),
1451 REGISTER_ITS_DESC(GITS_TYPER,
424c3383 1452 vgic_mmio_read_its_typer, its_mmio_write_wi, 8,
59c5ab40
AP
1453 VGIC_ACCESS_64bit | VGIC_ACCESS_32bit),
1454 REGISTER_ITS_DESC(GITS_CBASER,
424c3383 1455 vgic_mmio_read_its_cbaser, vgic_mmio_write_its_cbaser, 8,
59c5ab40
AP
1456 VGIC_ACCESS_64bit | VGIC_ACCESS_32bit),
1457 REGISTER_ITS_DESC(GITS_CWRITER,
424c3383 1458 vgic_mmio_read_its_cwriter, vgic_mmio_write_its_cwriter, 8,
59c5ab40 1459 VGIC_ACCESS_64bit | VGIC_ACCESS_32bit),
0979bfa6
EA
1460 REGISTER_ITS_DESC_UACCESS(GITS_CREADR,
1461 vgic_mmio_read_its_creadr, its_mmio_write_wi,
1462 vgic_mmio_uaccess_write_its_creadr, 8,
59c5ab40
AP
1463 VGIC_ACCESS_64bit | VGIC_ACCESS_32bit),
1464 REGISTER_ITS_DESC(GITS_BASER,
424c3383 1465 vgic_mmio_read_its_baser, vgic_mmio_write_its_baser, 0x40,
59c5ab40
AP
1466 VGIC_ACCESS_64bit | VGIC_ACCESS_32bit),
1467 REGISTER_ITS_DESC(GITS_IDREGS_BASE,
424c3383 1468 vgic_mmio_read_its_idregs, its_mmio_write_wi, 0x30,
59c5ab40
AP
1469 VGIC_ACCESS_32bit),
1470};
1471
33d3bc95
AP
1472/* This is called on setting the LPI enable bit in the redistributor. */
1473void vgic_enable_lpis(struct kvm_vcpu *vcpu)
1474{
1475 if (!(vcpu->arch.vgic_cpu.pendbaser & GICR_PENDBASER_PTZ))
1476 its_sync_lpi_pending_table(vcpu);
1477}
1478
c7735769 1479static int vgic_register_its_iodev(struct kvm *kvm, struct vgic_its *its)
59c5ab40
AP
1480{
1481 struct vgic_io_device *iodev = &its->iodev;
1482 int ret;
1483
c7735769
AP
1484 if (!its->initialized)
1485 return -EBUSY;
1085fdc6 1486
59c5ab40
AP
1487 if (IS_VGIC_ADDR_UNDEF(its->vgic_its_base))
1488 return -ENXIO;
1489
1490 iodev->regions = its_registers;
1491 iodev->nr_regions = ARRAY_SIZE(its_registers);
1492 kvm_iodevice_init(&iodev->dev, &kvm_io_gic_ops);
1493
1494 iodev->base_addr = its->vgic_its_base;
1495 iodev->iodev_type = IODEV_ITS;
1496 iodev->its = its;
1497 mutex_lock(&kvm->slots_lock);
1498 ret = kvm_io_bus_register_dev(kvm, KVM_MMIO_BUS, iodev->base_addr,
1499 KVM_VGIC_V3_ITS_SIZE, &iodev->dev);
1500 mutex_unlock(&kvm->slots_lock);
1501
1502 return ret;
1503}
1085fdc6 1504
424c3383
AP
1505#define INITIAL_BASER_VALUE \
1506 (GIC_BASER_CACHEABILITY(GITS_BASER, INNER, RaWb) | \
1507 GIC_BASER_CACHEABILITY(GITS_BASER, OUTER, SameAsInner) | \
1508 GIC_BASER_SHAREABILITY(GITS_BASER, InnerShareable) | \
424c3383
AP
1509 GITS_BASER_PAGE_SIZE_64K)
1510
1511#define INITIAL_PROPBASER_VALUE \
1512 (GIC_BASER_CACHEABILITY(GICR_PROPBASER, INNER, RaWb) | \
1513 GIC_BASER_CACHEABILITY(GICR_PROPBASER, OUTER, SameAsInner) | \
1514 GIC_BASER_SHAREABILITY(GICR_PROPBASER, InnerShareable))
1515
1085fdc6
AP
1516static int vgic_its_create(struct kvm_device *dev, u32 type)
1517{
1518 struct vgic_its *its;
1519
1520 if (type != KVM_DEV_TYPE_ARM_VGIC_ITS)
1521 return -ENODEV;
1522
1523 its = kzalloc(sizeof(struct vgic_its), GFP_KERNEL);
1524 if (!its)
1525 return -ENOMEM;
1526
424c3383
AP
1527 mutex_init(&its->its_lock);
1528 mutex_init(&its->cmd_lock);
1529
1085fdc6
AP
1530 its->vgic_its_base = VGIC_ADDR_UNDEF;
1531
424c3383
AP
1532 INIT_LIST_HEAD(&its->device_list);
1533 INIT_LIST_HEAD(&its->collection_list);
1534
1085fdc6
AP
1535 dev->kvm->arch.vgic.has_its = true;
1536 its->initialized = false;
1537 its->enabled = false;
bb717644 1538 its->dev = dev;
1085fdc6 1539
424c3383
AP
1540 its->baser_device_table = INITIAL_BASER_VALUE |
1541 ((u64)GITS_BASER_TYPE_DEVICE << GITS_BASER_TYPE_SHIFT);
1542 its->baser_coll_table = INITIAL_BASER_VALUE |
1543 ((u64)GITS_BASER_TYPE_COLLECTION << GITS_BASER_TYPE_SHIFT);
1544 dev->kvm->arch.vgic.propbaser = INITIAL_PROPBASER_VALUE;
1545
1085fdc6
AP
1546 dev->private = its;
1547
71afe470 1548 return vgic_its_set_abi(its, NR_ITS_ABIS - 1);
1085fdc6
AP
1549}
1550
1551static void vgic_its_destroy(struct kvm_device *kvm_dev)
1552{
424c3383 1553 struct kvm *kvm = kvm_dev->kvm;
1085fdc6 1554 struct vgic_its *its = kvm_dev->private;
424c3383 1555 struct its_device *dev;
9ce91c72 1556 struct its_ite *ite;
424c3383
AP
1557 struct list_head *dev_cur, *dev_temp;
1558 struct list_head *cur, *temp;
1559
1560 /*
1561 * We may end up here without the lists ever having been initialized.
1562 * Check this and bail out early to avoid dereferencing a NULL pointer.
1563 */
1564 if (!its->device_list.next)
1565 return;
1566
1567 mutex_lock(&its->its_lock);
1568 list_for_each_safe(dev_cur, dev_temp, &its->device_list) {
1569 dev = container_of(dev_cur, struct its_device, dev_list);
1570 list_for_each_safe(cur, temp, &dev->itt_head) {
9ce91c72
EA
1571 ite = (container_of(cur, struct its_ite, ite_list));
1572 its_free_ite(kvm, ite);
424c3383
AP
1573 }
1574 list_del(dev_cur);
1575 kfree(dev);
1576 }
1577
1578 list_for_each_safe(cur, temp, &its->collection_list) {
1579 list_del(cur);
1580 kfree(container_of(cur, struct its_collection, coll_list));
1581 }
1582 mutex_unlock(&its->its_lock);
1085fdc6
AP
1583
1584 kfree(its);
1585}
1586
876ae234
EA
1587int vgic_its_has_attr_regs(struct kvm_device *dev,
1588 struct kvm_device_attr *attr)
1589{
8331c23c
EA
1590 const struct vgic_register_region *region;
1591 gpa_t offset = attr->attr;
1592 int align;
1593
1594 align = (offset < GITS_TYPER) || (offset >= GITS_PIDR4) ? 0x3 : 0x7;
1595
1596 if (offset & align)
1597 return -EINVAL;
1598
1599 region = vgic_find_mmio_region(its_registers,
1600 ARRAY_SIZE(its_registers),
1601 offset);
1602 if (!region)
1603 return -ENXIO;
1604
1605 return 0;
876ae234
EA
1606}
1607
1608int vgic_its_attr_regs_access(struct kvm_device *dev,
1609 struct kvm_device_attr *attr,
1610 u64 *reg, bool is_write)
1611{
8331c23c
EA
1612 const struct vgic_register_region *region;
1613 struct vgic_its *its;
1614 gpa_t addr, offset;
1615 unsigned int len;
1616 int align, ret = 0;
1617
1618 its = dev->private;
1619 offset = attr->attr;
1620
1621 /*
1622 * Although the spec supports upper/lower 32-bit accesses to
1623 * 64-bit ITS registers, the userspace ABI requires 64-bit
1624 * accesses to all 64-bit wide registers. We therefore only
1625 * support 32-bit accesses to GITS_CTLR, GITS_IIDR and GITS ID
1626 * registers
1627 */
1628 if ((offset < GITS_TYPER) || (offset >= GITS_PIDR4))
1629 align = 0x3;
1630 else
1631 align = 0x7;
1632
1633 if (offset & align)
1634 return -EINVAL;
1635
1636 mutex_lock(&dev->kvm->lock);
1637
1638 if (IS_VGIC_ADDR_UNDEF(its->vgic_its_base)) {
1639 ret = -ENXIO;
1640 goto out;
1641 }
1642
1643 region = vgic_find_mmio_region(its_registers,
1644 ARRAY_SIZE(its_registers),
1645 offset);
1646 if (!region) {
1647 ret = -ENXIO;
1648 goto out;
1649 }
1650
1651 if (!lock_all_vcpus(dev->kvm)) {
1652 ret = -EBUSY;
1653 goto out;
1654 }
1655
1656 addr = its->vgic_its_base + offset;
1657
1658 len = region->access_flags & VGIC_ACCESS_64bit ? 8 : 4;
1659
1660 if (is_write) {
1661 if (region->uaccess_its_write)
1662 ret = region->uaccess_its_write(dev->kvm, its, addr,
1663 len, *reg);
1664 else
1665 region->its_write(dev->kvm, its, addr, len, *reg);
1666 } else {
1667 *reg = region->its_read(dev->kvm, its, addr, len);
1668 }
1669 unlock_all_vcpus(dev->kvm);
1670out:
1671 mutex_unlock(&dev->kvm->lock);
1672 return ret;
876ae234
EA
1673}
1674
71afe470
EA
1675/**
1676 * vgic_its_save_tables_v0 - Save the ITS tables into guest ARM
1677 * according to v0 ABI
1678 */
1679static int vgic_its_save_tables_v0(struct vgic_its *its)
1680{
1681 return -ENXIO;
1682}
1683
1684/**
1685 * vgic_its_restore_tables_v0 - Restore the ITS tables from guest RAM
1686 * to internal data structs according to V0 ABI
1687 *
1688 */
1689static int vgic_its_restore_tables_v0(struct vgic_its *its)
1690{
1691 return -ENXIO;
1692}
1693
1694static int vgic_its_commit_v0(struct vgic_its *its)
1695{
1696 const struct vgic_its_abi *abi;
1697
1698 abi = vgic_its_get_abi(its);
1699 its->baser_coll_table &= ~GITS_BASER_ENTRY_SIZE_MASK;
1700 its->baser_device_table &= ~GITS_BASER_ENTRY_SIZE_MASK;
1701
1702 its->baser_coll_table |= (GIC_ENCODE_SZ(abi->cte_esz, 5)
1703 << GITS_BASER_ENTRY_SIZE_SHIFT);
1704
1705 its->baser_device_table |= (GIC_ENCODE_SZ(abi->dte_esz, 5)
1706 << GITS_BASER_ENTRY_SIZE_SHIFT);
1707 return 0;
1708}
1709
1085fdc6
AP
1710static int vgic_its_has_attr(struct kvm_device *dev,
1711 struct kvm_device_attr *attr)
1712{
1713 switch (attr->group) {
1714 case KVM_DEV_ARM_VGIC_GRP_ADDR:
1715 switch (attr->attr) {
1716 case KVM_VGIC_ITS_ADDR_TYPE:
1717 return 0;
1718 }
1719 break;
1720 case KVM_DEV_ARM_VGIC_GRP_CTRL:
1721 switch (attr->attr) {
1722 case KVM_DEV_ARM_VGIC_CTRL_INIT:
1723 return 0;
1724 }
1725 break;
876ae234
EA
1726 case KVM_DEV_ARM_VGIC_GRP_ITS_REGS:
1727 return vgic_its_has_attr_regs(dev, attr);
1085fdc6
AP
1728 }
1729 return -ENXIO;
1730}
1731
1732static int vgic_its_set_attr(struct kvm_device *dev,
1733 struct kvm_device_attr *attr)
1734{
1735 struct vgic_its *its = dev->private;
1736 int ret;
1737
1738 switch (attr->group) {
1739 case KVM_DEV_ARM_VGIC_GRP_ADDR: {
1740 u64 __user *uaddr = (u64 __user *)(long)attr->addr;
1741 unsigned long type = (unsigned long)attr->attr;
1742 u64 addr;
1743
1744 if (type != KVM_VGIC_ITS_ADDR_TYPE)
1745 return -ENODEV;
1746
1085fdc6
AP
1747 if (copy_from_user(&addr, uaddr, sizeof(addr)))
1748 return -EFAULT;
1749
1750 ret = vgic_check_ioaddr(dev->kvm, &its->vgic_its_base,
1751 addr, SZ_64K);
1752 if (ret)
1753 return ret;
1754
1755 its->vgic_its_base = addr;
1756
1757 return 0;
1758 }
1759 case KVM_DEV_ARM_VGIC_GRP_CTRL:
1760 switch (attr->attr) {
1761 case KVM_DEV_ARM_VGIC_CTRL_INIT:
c7735769
AP
1762 its->initialized = true;
1763
1764 return 0;
1085fdc6
AP
1765 }
1766 break;
876ae234
EA
1767 case KVM_DEV_ARM_VGIC_GRP_ITS_REGS: {
1768 u64 __user *uaddr = (u64 __user *)(long)attr->addr;
1769 u64 reg;
1770
1771 if (get_user(reg, uaddr))
1772 return -EFAULT;
1773
1774 return vgic_its_attr_regs_access(dev, attr, &reg, true);
1775 }
1085fdc6
AP
1776 }
1777 return -ENXIO;
1778}
1779
1780static int vgic_its_get_attr(struct kvm_device *dev,
1781 struct kvm_device_attr *attr)
1782{
1783 switch (attr->group) {
1784 case KVM_DEV_ARM_VGIC_GRP_ADDR: {
1785 struct vgic_its *its = dev->private;
1786 u64 addr = its->vgic_its_base;
1787 u64 __user *uaddr = (u64 __user *)(long)attr->addr;
1788 unsigned long type = (unsigned long)attr->attr;
1789
1790 if (type != KVM_VGIC_ITS_ADDR_TYPE)
1791 return -ENODEV;
1792
1793 if (copy_to_user(uaddr, &addr, sizeof(addr)))
1794 return -EFAULT;
1795 break;
876ae234
EA
1796 }
1797 case KVM_DEV_ARM_VGIC_GRP_ITS_REGS: {
1798 u64 __user *uaddr = (u64 __user *)(long)attr->addr;
1799 u64 reg;
1800 int ret;
1801
1802 ret = vgic_its_attr_regs_access(dev, attr, &reg, false);
1803 if (ret)
1804 return ret;
1805 return put_user(reg, uaddr);
1806 }
1085fdc6
AP
1807 default:
1808 return -ENXIO;
1809 }
1085fdc6
AP
1810
1811 return 0;
1812}
1813
1814static struct kvm_device_ops kvm_arm_vgic_its_ops = {
1815 .name = "kvm-arm-vgic-its",
1816 .create = vgic_its_create,
1817 .destroy = vgic_its_destroy,
1818 .set_attr = vgic_its_set_attr,
1819 .get_attr = vgic_its_get_attr,
1820 .has_attr = vgic_its_has_attr,
1821};
1822
1823int kvm_vgic_register_its_device(void)
1824{
1825 return kvm_register_device_ops(&kvm_arm_vgic_its_ops,
1826 KVM_DEV_TYPE_ARM_VGIC_ITS);
1827}
c7735769
AP
1828
1829/*
1830 * Registers all ITSes with the kvm_io_bus framework.
1831 * To follow the existing VGIC initialization sequence, this has to be
1832 * done as late as possible, just before the first VCPU runs.
1833 */
1834int vgic_register_its_iodevs(struct kvm *kvm)
1835{
1836 struct kvm_device *dev;
1837 int ret = 0;
1838
1839 list_for_each_entry(dev, &kvm->devices, vm_node) {
1840 if (dev->ops != &kvm_arm_vgic_its_ops)
1841 continue;
1842
1843 ret = vgic_register_its_iodev(kvm, dev->private);
1844 if (ret)
1845 return ret;
1846 /*
1847 * We don't need to care about tearing down previously
1848 * registered ITSes, as the kvm_io_bus framework removes
1849 * them for us if the VM gets destroyed.
1850 */
1851 }
1852
1853 return ret;
1854}