]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - virt/kvm/arm/vgic/vgic-its.c
KVM: arm/arm64: vITS: Add MSI translation helpers
[mirror_ubuntu-bionic-kernel.git] / virt / kvm / arm / vgic / vgic-its.c
CommitLineData
59c5ab40
AP
1/*
2 * GICv3 ITS emulation
3 *
4 * Copyright (C) 2015,2016 ARM Ltd.
5 * Author: Andre Przywara <andre.przywara@arm.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program. If not, see <http://www.gnu.org/licenses/>.
18 */
19
20#include <linux/cpu.h>
21#include <linux/kvm.h>
22#include <linux/kvm_host.h>
23#include <linux/interrupt.h>
424c3383 24#include <linux/list.h>
1085fdc6 25#include <linux/uaccess.h>
57a9a117 26#include <linux/list_sort.h>
59c5ab40
AP
27
28#include <linux/irqchip/arm-gic-v3.h>
29
30#include <asm/kvm_emulate.h>
31#include <asm/kvm_arm.h>
32#include <asm/kvm_mmu.h>
33
34#include "vgic.h"
35#include "vgic-mmio.h"
36
71afe470
EA
37static int vgic_its_save_tables_v0(struct vgic_its *its);
38static int vgic_its_restore_tables_v0(struct vgic_its *its);
39static int vgic_its_commit_v0(struct vgic_its *its);
06bd5359
EA
40static int update_lpi_config(struct kvm *kvm, struct vgic_irq *irq,
41 struct kvm_vcpu *filter_vcpu);
71afe470 42
df9f58fb
AP
43/*
44 * Creates a new (reference to a) struct vgic_irq for a given LPI.
45 * If this LPI is already mapped on another ITS, we increase its refcount
46 * and return a pointer to the existing structure.
47 * If this is a "new" LPI, we allocate and initialize a new struct vgic_irq.
48 * This function returns a pointer to the _unlocked_ structure.
49 */
06bd5359
EA
50static struct vgic_irq *vgic_add_lpi(struct kvm *kvm, u32 intid,
51 struct kvm_vcpu *vcpu)
df9f58fb
AP
52{
53 struct vgic_dist *dist = &kvm->arch.vgic;
54 struct vgic_irq *irq = vgic_get_irq(kvm, NULL, intid), *oldirq;
06bd5359 55 int ret;
df9f58fb
AP
56
57 /* In this case there is no put, since we keep the reference. */
58 if (irq)
59 return irq;
60
61 irq = kzalloc(sizeof(struct vgic_irq), GFP_KERNEL);
62 if (!irq)
99e5e886 63 return ERR_PTR(-ENOMEM);
df9f58fb
AP
64
65 INIT_LIST_HEAD(&irq->lpi_list);
66 INIT_LIST_HEAD(&irq->ap_list);
67 spin_lock_init(&irq->irq_lock);
68
69 irq->config = VGIC_CONFIG_EDGE;
70 kref_init(&irq->refcount);
71 irq->intid = intid;
06bd5359 72 irq->target_vcpu = vcpu;
df9f58fb
AP
73
74 spin_lock(&dist->lpi_list_lock);
75
76 /*
77 * There could be a race with another vgic_add_lpi(), so we need to
78 * check that we don't add a second list entry with the same LPI.
79 */
80 list_for_each_entry(oldirq, &dist->lpi_list_head, lpi_list) {
81 if (oldirq->intid != intid)
82 continue;
83
84 /* Someone was faster with adding this LPI, lets use that. */
85 kfree(irq);
86 irq = oldirq;
87
88 /*
89 * This increases the refcount, the caller is expected to
90 * call vgic_put_irq() on the returned pointer once it's
91 * finished with the IRQ.
92 */
d97594e6 93 vgic_get_irq_kref(irq);
df9f58fb
AP
94
95 goto out_unlock;
96 }
97
98 list_add_tail(&irq->lpi_list, &dist->lpi_list_head);
99 dist->lpi_list_count++;
100
101out_unlock:
102 spin_unlock(&dist->lpi_list_lock);
103
06bd5359
EA
104 /*
105 * We "cache" the configuration table entries in our struct vgic_irq's.
106 * However we only have those structs for mapped IRQs, so we read in
107 * the respective config data from memory here upon mapping the LPI.
108 */
109 ret = update_lpi_config(kvm, irq, NULL);
110 if (ret)
111 return ERR_PTR(ret);
112
113 ret = vgic_v3_lpi_sync_pending_status(kvm, irq);
114 if (ret)
115 return ERR_PTR(ret);
116
df9f58fb
AP
117 return irq;
118}
119
424c3383
AP
120struct its_device {
121 struct list_head dev_list;
122
123 /* the head for the list of ITTEs */
124 struct list_head itt_head;
0d44cdb6 125 u32 num_eventid_bits;
7333cefe 126 gpa_t itt_addr;
424c3383
AP
127 u32 device_id;
128};
129
130#define COLLECTION_NOT_MAPPED ((u32)~0)
131
132struct its_collection {
133 struct list_head coll_list;
134
135 u32 collection_id;
136 u32 target_addr;
137};
138
139#define its_is_collection_mapped(coll) ((coll) && \
140 ((coll)->target_addr != COLLECTION_NOT_MAPPED))
141
9ce91c72
EA
142struct its_ite {
143 struct list_head ite_list;
424c3383 144
3802411d 145 struct vgic_irq *irq;
424c3383 146 struct its_collection *collection;
424c3383
AP
147 u32 event_id;
148};
149
71afe470
EA
150/**
151 * struct vgic_its_abi - ITS abi ops and settings
152 * @cte_esz: collection table entry size
153 * @dte_esz: device table entry size
154 * @ite_esz: interrupt translation table entry size
155 * @save tables: save the ITS tables into guest RAM
156 * @restore_tables: restore the ITS internal structs from tables
157 * stored in guest RAM
158 * @commit: initialize the registers which expose the ABI settings,
159 * especially the entry sizes
160 */
161struct vgic_its_abi {
162 int cte_esz;
163 int dte_esz;
164 int ite_esz;
165 int (*save_tables)(struct vgic_its *its);
166 int (*restore_tables)(struct vgic_its *its);
167 int (*commit)(struct vgic_its *its);
168};
169
170static const struct vgic_its_abi its_table_abi_versions[] = {
171 [0] = {.cte_esz = 8, .dte_esz = 8, .ite_esz = 8,
172 .save_tables = vgic_its_save_tables_v0,
173 .restore_tables = vgic_its_restore_tables_v0,
174 .commit = vgic_its_commit_v0,
175 },
176};
177
178#define NR_ITS_ABIS ARRAY_SIZE(its_table_abi_versions)
179
180inline const struct vgic_its_abi *vgic_its_get_abi(struct vgic_its *its)
181{
182 return &its_table_abi_versions[its->abi_rev];
183}
184
185int vgic_its_set_abi(struct vgic_its *its, int rev)
186{
187 const struct vgic_its_abi *abi;
188
189 its->abi_rev = rev;
190 abi = vgic_its_get_abi(its);
191 return abi->commit(its);
192}
193
df9f58fb
AP
194/*
195 * Find and returns a device in the device table for an ITS.
196 * Must be called with the its_lock mutex held.
197 */
198static struct its_device *find_its_device(struct vgic_its *its, u32 device_id)
199{
200 struct its_device *device;
201
202 list_for_each_entry(device, &its->device_list, dev_list)
203 if (device_id == device->device_id)
204 return device;
205
206 return NULL;
207}
208
209/*
210 * Find and returns an interrupt translation table entry (ITTE) for a given
211 * Device ID/Event ID pair on an ITS.
212 * Must be called with the its_lock mutex held.
213 */
9ce91c72 214static struct its_ite *find_ite(struct vgic_its *its, u32 device_id,
df9f58fb
AP
215 u32 event_id)
216{
217 struct its_device *device;
9ce91c72 218 struct its_ite *ite;
df9f58fb
AP
219
220 device = find_its_device(its, device_id);
221 if (device == NULL)
222 return NULL;
223
9ce91c72
EA
224 list_for_each_entry(ite, &device->itt_head, ite_list)
225 if (ite->event_id == event_id)
226 return ite;
df9f58fb
AP
227
228 return NULL;
229}
230
231/* To be used as an iterator this macro misses the enclosing parentheses */
9ce91c72 232#define for_each_lpi_its(dev, ite, its) \
df9f58fb 233 list_for_each_entry(dev, &(its)->device_list, dev_list) \
9ce91c72 234 list_for_each_entry(ite, &(dev)->itt_head, ite_list)
df9f58fb 235
424c3383
AP
236/*
237 * We only implement 48 bits of PA at the moment, although the ITS
238 * supports more. Let's be restrictive here.
239 */
df9f58fb 240#define BASER_ADDRESS(x) ((x) & GENMASK_ULL(47, 16))
424c3383 241#define CBASER_ADDRESS(x) ((x) & GENMASK_ULL(47, 12))
f9f77af9
AP
242
243#define GIC_LPI_OFFSET 8192
244
0d44cdb6 245#define VITS_TYPER_IDBITS 16
07a3e9a7 246#define VITS_TYPER_DEVBITS 16
920a7a8f
EA
247#define VITS_DTE_MAX_DEVID_OFFSET (BIT(14) - 1)
248#define VITS_ITE_MAX_EVENTID_OFFSET (BIT(16) - 1)
0d44cdb6 249
df9f58fb
AP
250/*
251 * Finds and returns a collection in the ITS collection table.
252 * Must be called with the its_lock mutex held.
253 */
254static struct its_collection *find_collection(struct vgic_its *its, int coll_id)
255{
256 struct its_collection *collection;
257
258 list_for_each_entry(collection, &its->collection_list, coll_list) {
259 if (coll_id == collection->collection_id)
260 return collection;
261 }
262
263 return NULL;
264}
265
f9f77af9
AP
266#define LPI_PROP_ENABLE_BIT(p) ((p) & LPI_PROP_ENABLED)
267#define LPI_PROP_PRIORITY(p) ((p) & 0xfc)
268
269/*
270 * Reads the configuration data for a given LPI from guest memory and
271 * updates the fields in struct vgic_irq.
272 * If filter_vcpu is not NULL, applies only if the IRQ is targeting this
273 * VCPU. Unconditionally applies if filter_vcpu is NULL.
274 */
275static int update_lpi_config(struct kvm *kvm, struct vgic_irq *irq,
276 struct kvm_vcpu *filter_vcpu)
277{
44de9d68 278 u64 propbase = GICR_PROPBASER_ADDRESS(kvm->arch.vgic.propbaser);
f9f77af9
AP
279 u8 prop;
280 int ret;
006df0f3 281 unsigned long flags;
f9f77af9
AP
282
283 ret = kvm_read_guest(kvm, propbase + irq->intid - GIC_LPI_OFFSET,
284 &prop, 1);
285
286 if (ret)
287 return ret;
288
006df0f3 289 spin_lock_irqsave(&irq->irq_lock, flags);
f9f77af9
AP
290
291 if (!filter_vcpu || filter_vcpu == irq->target_vcpu) {
292 irq->priority = LPI_PROP_PRIORITY(prop);
293 irq->enabled = LPI_PROP_ENABLE_BIT(prop);
294
006df0f3 295 vgic_queue_irq_unlock(kvm, irq, flags);
f9f77af9 296 } else {
006df0f3 297 spin_unlock_irqrestore(&irq->irq_lock, flags);
f9f77af9
AP
298 }
299
300 return 0;
301}
33d3bc95
AP
302
303/*
ccb1d791
EA
304 * Create a snapshot of the current LPIs targeting @vcpu, so that we can
305 * enumerate those LPIs without holding any lock.
306 * Returns their number and puts the kmalloc'ed array into intid_ptr.
33d3bc95 307 */
ccb1d791 308static int vgic_copy_lpi_list(struct kvm_vcpu *vcpu, u32 **intid_ptr)
33d3bc95 309{
ccb1d791 310 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
33d3bc95
AP
311 struct vgic_irq *irq;
312 u32 *intids;
313 int irq_count = dist->lpi_list_count, i = 0;
314
315 /*
316 * We use the current value of the list length, which may change
317 * after the kmalloc. We don't care, because the guest shouldn't
318 * change anything while the command handling is still running,
319 * and in the worst case we would miss a new IRQ, which one wouldn't
320 * expect to be covered by this command anyway.
321 */
322 intids = kmalloc_array(irq_count, sizeof(intids[0]), GFP_KERNEL);
323 if (!intids)
324 return -ENOMEM;
325
326 spin_lock(&dist->lpi_list_lock);
327 list_for_each_entry(irq, &dist->lpi_list_head, lpi_list) {
328 /* We don't need to "get" the IRQ, as we hold the list lock. */
ccb1d791
EA
329 if (irq->target_vcpu != vcpu)
330 continue;
331 intids[i++] = irq->intid;
33d3bc95
AP
332 }
333 spin_unlock(&dist->lpi_list_lock);
334
335 *intid_ptr = intids;
ccb1d791 336 return i;
33d3bc95
AP
337}
338
df9f58fb
AP
339/*
340 * Promotes the ITS view of affinity of an ITTE (which redistributor this LPI
341 * is targeting) to the VGIC's view, which deals with target VCPUs.
342 * Needs to be called whenever either the collection for a LPIs has
343 * changed or the collection itself got retargeted.
344 */
9ce91c72 345static void update_affinity_ite(struct kvm *kvm, struct its_ite *ite)
df9f58fb
AP
346{
347 struct kvm_vcpu *vcpu;
348
9ce91c72 349 if (!its_is_collection_mapped(ite->collection))
df9f58fb
AP
350 return;
351
9ce91c72 352 vcpu = kvm_get_vcpu(kvm, ite->collection->target_addr);
df9f58fb 353
9ce91c72
EA
354 spin_lock(&ite->irq->irq_lock);
355 ite->irq->target_vcpu = vcpu;
356 spin_unlock(&ite->irq->irq_lock);
df9f58fb
AP
357}
358
359/*
360 * Updates the target VCPU for every LPI targeting this collection.
361 * Must be called with the its_lock mutex held.
362 */
363static void update_affinity_collection(struct kvm *kvm, struct vgic_its *its,
364 struct its_collection *coll)
365{
366 struct its_device *device;
9ce91c72 367 struct its_ite *ite;
df9f58fb 368
9ce91c72
EA
369 for_each_lpi_its(device, ite, its) {
370 if (!ite->collection || coll != ite->collection)
df9f58fb
AP
371 continue;
372
9ce91c72 373 update_affinity_ite(kvm, ite);
df9f58fb
AP
374 }
375}
376
377static u32 max_lpis_propbaser(u64 propbaser)
378{
379 int nr_idbits = (propbaser & 0x1f) + 1;
380
381 return 1U << min(nr_idbits, INTERRUPT_ID_BITS_ITS);
382}
383
33d3bc95 384/*
ccb1d791 385 * Sync the pending table pending bit of LPIs targeting @vcpu
33d3bc95
AP
386 * with our own data structures. This relies on the LPI being
387 * mapped before.
388 */
389static int its_sync_lpi_pending_table(struct kvm_vcpu *vcpu)
390{
44de9d68 391 gpa_t pendbase = GICR_PENDBASER_ADDRESS(vcpu->arch.vgic_cpu.pendbaser);
33d3bc95
AP
392 struct vgic_irq *irq;
393 int last_byte_offset = -1;
394 int ret = 0;
395 u32 *intids;
396 int nr_irqs, i;
006df0f3 397 unsigned long flags;
33d3bc95 398
ccb1d791 399 nr_irqs = vgic_copy_lpi_list(vcpu, &intids);
33d3bc95
AP
400 if (nr_irqs < 0)
401 return nr_irqs;
402
403 for (i = 0; i < nr_irqs; i++) {
404 int byte_offset, bit_nr;
405 u8 pendmask;
406
407 byte_offset = intids[i] / BITS_PER_BYTE;
408 bit_nr = intids[i] % BITS_PER_BYTE;
409
410 /*
411 * For contiguously allocated LPIs chances are we just read
412 * this very same byte in the last iteration. Reuse that.
413 */
414 if (byte_offset != last_byte_offset) {
415 ret = kvm_read_guest(vcpu->kvm, pendbase + byte_offset,
416 &pendmask, 1);
417 if (ret) {
418 kfree(intids);
419 return ret;
420 }
421 last_byte_offset = byte_offset;
422 }
423
424 irq = vgic_get_irq(vcpu->kvm, NULL, intids[i]);
006df0f3 425 spin_lock_irqsave(&irq->irq_lock, flags);
8694e4da 426 irq->pending_latch = pendmask & (1U << bit_nr);
006df0f3 427 vgic_queue_irq_unlock(vcpu->kvm, irq, flags);
33d3bc95
AP
428 vgic_put_irq(vcpu->kvm, irq);
429 }
430
431 kfree(intids);
432
433 return ret;
434}
424c3383 435
424c3383
AP
436static unsigned long vgic_mmio_read_its_typer(struct kvm *kvm,
437 struct vgic_its *its,
438 gpa_t addr, unsigned int len)
439{
71afe470 440 const struct vgic_its_abi *abi = vgic_its_get_abi(its);
424c3383
AP
441 u64 reg = GITS_TYPER_PLPIS;
442
443 /*
444 * We use linear CPU numbers for redistributor addressing,
445 * so GITS_TYPER.PTA is 0.
446 * Also we force all PROPBASER registers to be the same, so
447 * CommonLPIAff is 0 as well.
448 * To avoid memory waste in the guest, we keep the number of IDBits and
449 * DevBits low - as least for the time being.
450 */
07a3e9a7 451 reg |= GIC_ENCODE_SZ(VITS_TYPER_DEVBITS, 5) << GITS_TYPER_DEVBITS_SHIFT;
0d44cdb6 452 reg |= GIC_ENCODE_SZ(VITS_TYPER_IDBITS, 5) << GITS_TYPER_IDBITS_SHIFT;
71afe470 453 reg |= GIC_ENCODE_SZ(abi->ite_esz, 4) << GITS_TYPER_ITT_ENTRY_SIZE_SHIFT;
424c3383
AP
454
455 return extract_bytes(reg, addr & 7, len);
456}
457
458static unsigned long vgic_mmio_read_its_iidr(struct kvm *kvm,
459 struct vgic_its *its,
460 gpa_t addr, unsigned int len)
461{
ab01c6bd
EA
462 u32 val;
463
464 val = (its->abi_rev << GITS_IIDR_REV_SHIFT) & GITS_IIDR_REV_MASK;
465 val |= (PRODUCT_ID_KVM << GITS_IIDR_PRODUCTID_SHIFT) | IMPLEMENTER_ARM;
466 return val;
467}
468
469static int vgic_mmio_uaccess_write_its_iidr(struct kvm *kvm,
470 struct vgic_its *its,
471 gpa_t addr, unsigned int len,
472 unsigned long val)
473{
474 u32 rev = GITS_IIDR_REV(val);
475
476 if (rev >= NR_ITS_ABIS)
477 return -EINVAL;
478 return vgic_its_set_abi(its, rev);
424c3383
AP
479}
480
481static unsigned long vgic_mmio_read_its_idregs(struct kvm *kvm,
482 struct vgic_its *its,
483 gpa_t addr, unsigned int len)
484{
485 switch (addr & 0xffff) {
486 case GITS_PIDR0:
487 return 0x92; /* part number, bits[7:0] */
488 case GITS_PIDR1:
489 return 0xb4; /* part number, bits[11:8] */
490 case GITS_PIDR2:
491 return GIC_PIDR2_ARCH_GICv3 | 0x0b;
492 case GITS_PIDR4:
493 return 0x40; /* This is a 64K software visible page */
494 /* The following are the ID registers for (any) GIC. */
495 case GITS_CIDR0:
496 return 0x0d;
497 case GITS_CIDR1:
498 return 0xf0;
499 case GITS_CIDR2:
500 return 0x05;
501 case GITS_CIDR3:
502 return 0xb1;
503 }
504
505 return 0;
506}
507
bebfd2a2
MZ
508int vgic_its_resolve_lpi(struct kvm *kvm, struct vgic_its *its,
509 u32 devid, u32 eventid, struct vgic_irq **irq)
2891a7df 510{
fd837b08 511 struct kvm_vcpu *vcpu;
9ce91c72 512 struct its_ite *ite;
2891a7df
AP
513
514 if (!its->enabled)
fd837b08 515 return -EBUSY;
2891a7df 516
9ce91c72
EA
517 ite = find_ite(its, devid, eventid);
518 if (!ite || !its_is_collection_mapped(ite->collection))
fd837b08
AP
519 return E_ITS_INT_UNMAPPED_INTERRUPT;
520
9ce91c72 521 vcpu = kvm_get_vcpu(kvm, ite->collection->target_addr);
fd837b08
AP
522 if (!vcpu)
523 return E_ITS_INT_UNMAPPED_INTERRUPT;
524
525 if (!vcpu->arch.vgic_cpu.lpis_enabled)
526 return -EBUSY;
527
bebfd2a2 528 *irq = ite->irq;
fd837b08 529 return 0;
2891a7df
AP
530}
531
bebfd2a2 532struct vgic_its *vgic_msi_to_its(struct kvm *kvm, struct kvm_msi *msi)
505a19ee 533{
bebfd2a2
MZ
534 u64 address;
535 struct kvm_io_device *kvm_io_dev;
505a19ee
AP
536 struct vgic_io_device *iodev;
537
bebfd2a2
MZ
538 if (!vgic_has_its(kvm))
539 return ERR_PTR(-ENODEV);
540
541 if (!(msi->flags & KVM_MSI_VALID_DEVID))
542 return ERR_PTR(-EINVAL);
543
544 address = (u64)msi->address_hi << 32 | msi->address_lo;
545
546 kvm_io_dev = kvm_io_bus_get_dev(kvm, KVM_MMIO_BUS, address);
547 if (!kvm_io_dev)
548 return ERR_PTR(-EINVAL);
505a19ee 549
bebfd2a2
MZ
550 if (kvm_io_dev->ops != &kvm_io_gic_ops)
551 return ERR_PTR(-EINVAL);
505a19ee 552
bebfd2a2 553 iodev = container_of(kvm_io_dev, struct vgic_io_device, dev);
505a19ee 554 if (iodev->iodev_type != IODEV_ITS)
bebfd2a2
MZ
555 return ERR_PTR(-EINVAL);
556
557 return iodev->its;
558}
559
560/*
561 * Find the target VCPU and the LPI number for a given devid/eventid pair
562 * and make this IRQ pending, possibly injecting it.
563 * Must be called with the its_lock mutex held.
564 * Returns 0 on success, a positive error value for any ITS mapping
565 * related errors and negative error values for generic errors.
566 */
567static int vgic_its_trigger_msi(struct kvm *kvm, struct vgic_its *its,
568 u32 devid, u32 eventid)
569{
570 struct vgic_irq *irq = NULL;
571 unsigned long flags;
572 int err;
505a19ee 573
bebfd2a2
MZ
574 err = vgic_its_resolve_lpi(kvm, its, devid, eventid, &irq);
575 if (err)
576 return err;
577
578 spin_lock_irqsave(&irq->irq_lock, flags);
579 irq->pending_latch = true;
580 vgic_queue_irq_unlock(kvm, irq, flags);
581
582 return 0;
505a19ee
AP
583}
584
2891a7df
AP
585/*
586 * Queries the KVM IO bus framework to get the ITS pointer from the given
587 * doorbell address.
588 * We then call vgic_its_trigger_msi() with the decoded data.
fd837b08 589 * According to the KVM_SIGNAL_MSI API description returns 1 on success.
2891a7df
AP
590 */
591int vgic_its_inject_msi(struct kvm *kvm, struct kvm_msi *msi)
592{
bebfd2a2 593 struct vgic_its *its;
fd837b08 594 int ret;
2891a7df 595
bebfd2a2
MZ
596 its = vgic_msi_to_its(kvm, msi);
597 if (IS_ERR(its))
598 return PTR_ERR(its);
2891a7df 599
bebfd2a2
MZ
600 mutex_lock(&its->its_lock);
601 ret = vgic_its_trigger_msi(kvm, its, msi->devid, msi->data);
602 mutex_unlock(&its->its_lock);
2891a7df 603
fd837b08
AP
604 if (ret < 0)
605 return ret;
606
607 /*
608 * KVM_SIGNAL_MSI demands a return value > 0 for success and 0
609 * if the guest has blocked the MSI. So we map any LPI mapping
610 * related error to that.
611 */
612 if (ret)
613 return 0;
614 else
615 return 1;
2891a7df
AP
616}
617
424c3383 618/* Requires the its_lock to be held. */
9ce91c72 619static void its_free_ite(struct kvm *kvm, struct its_ite *ite)
424c3383 620{
9ce91c72 621 list_del(&ite->ite_list);
3802411d
AP
622
623 /* This put matches the get in vgic_add_lpi. */
9ce91c72
EA
624 if (ite->irq)
625 vgic_put_irq(kvm, ite->irq);
3802411d 626
9ce91c72 627 kfree(ite);
424c3383
AP
628}
629
df9f58fb
AP
630static u64 its_cmd_mask_field(u64 *its_cmd, int word, int shift, int size)
631{
632 return (le64_to_cpu(its_cmd[word]) >> shift) & (BIT_ULL(size) - 1);
633}
634
635#define its_cmd_get_command(cmd) its_cmd_mask_field(cmd, 0, 0, 8)
636#define its_cmd_get_deviceid(cmd) its_cmd_mask_field(cmd, 0, 32, 32)
0d44cdb6 637#define its_cmd_get_size(cmd) (its_cmd_mask_field(cmd, 1, 0, 5) + 1)
df9f58fb
AP
638#define its_cmd_get_id(cmd) its_cmd_mask_field(cmd, 1, 0, 32)
639#define its_cmd_get_physical_id(cmd) its_cmd_mask_field(cmd, 1, 32, 32)
640#define its_cmd_get_collection(cmd) its_cmd_mask_field(cmd, 2, 0, 16)
7333cefe 641#define its_cmd_get_ittaddr(cmd) (its_cmd_mask_field(cmd, 2, 8, 44) << 8)
df9f58fb
AP
642#define its_cmd_get_target_addr(cmd) its_cmd_mask_field(cmd, 2, 16, 32)
643#define its_cmd_get_validbit(cmd) its_cmd_mask_field(cmd, 2, 63, 1)
644
645/*
646 * The DISCARD command frees an Interrupt Translation Table Entry (ITTE).
647 * Must be called with the its_lock mutex held.
648 */
649static int vgic_its_cmd_handle_discard(struct kvm *kvm, struct vgic_its *its,
650 u64 *its_cmd)
651{
652 u32 device_id = its_cmd_get_deviceid(its_cmd);
653 u32 event_id = its_cmd_get_id(its_cmd);
9ce91c72 654 struct its_ite *ite;
df9f58fb
AP
655
656
9ce91c72
EA
657 ite = find_ite(its, device_id, event_id);
658 if (ite && ite->collection) {
df9f58fb
AP
659 /*
660 * Though the spec talks about removing the pending state, we
661 * don't bother here since we clear the ITTE anyway and the
662 * pending state is a property of the ITTE struct.
663 */
9ce91c72 664 its_free_ite(kvm, ite);
df9f58fb
AP
665 return 0;
666 }
667
668 return E_ITS_DISCARD_UNMAPPED_INTERRUPT;
669}
670
671/*
672 * The MOVI command moves an ITTE to a different collection.
673 * Must be called with the its_lock mutex held.
674 */
675static int vgic_its_cmd_handle_movi(struct kvm *kvm, struct vgic_its *its,
676 u64 *its_cmd)
677{
678 u32 device_id = its_cmd_get_deviceid(its_cmd);
679 u32 event_id = its_cmd_get_id(its_cmd);
680 u32 coll_id = its_cmd_get_collection(its_cmd);
681 struct kvm_vcpu *vcpu;
9ce91c72 682 struct its_ite *ite;
df9f58fb
AP
683 struct its_collection *collection;
684
9ce91c72
EA
685 ite = find_ite(its, device_id, event_id);
686 if (!ite)
df9f58fb
AP
687 return E_ITS_MOVI_UNMAPPED_INTERRUPT;
688
9ce91c72 689 if (!its_is_collection_mapped(ite->collection))
df9f58fb
AP
690 return E_ITS_MOVI_UNMAPPED_COLLECTION;
691
692 collection = find_collection(its, coll_id);
693 if (!its_is_collection_mapped(collection))
694 return E_ITS_MOVI_UNMAPPED_COLLECTION;
695
9ce91c72 696 ite->collection = collection;
df9f58fb
AP
697 vcpu = kvm_get_vcpu(kvm, collection->target_addr);
698
9ce91c72
EA
699 spin_lock(&ite->irq->irq_lock);
700 ite->irq->target_vcpu = vcpu;
701 spin_unlock(&ite->irq->irq_lock);
df9f58fb
AP
702
703 return 0;
704}
705
6d03a68f
MZ
706/*
707 * Check whether an ID can be stored into the corresponding guest table.
708 * For a direct table this is pretty easy, but gets a bit nasty for
709 * indirect tables. We check whether the resulting guest physical address
07a3e9a7 710 * is actually valid (covered by a memslot and guest accessible).
6d03a68f
MZ
711 * For this we have to read the respective first level entry.
712 */
dceff702
EA
713static bool vgic_its_check_id(struct vgic_its *its, u64 baser, u32 id,
714 gpa_t *eaddr)
6d03a68f
MZ
715{
716 int l1_tbl_size = GITS_BASER_NR_PAGES(baser) * SZ_64K;
07a3e9a7
EA
717 u64 indirect_ptr, type = GITS_BASER_TYPE(baser);
718 int esz = GITS_BASER_ENTRY_SIZE(baser);
6d03a68f 719 int index;
6d03a68f 720 gfn_t gfn;
07a3e9a7
EA
721
722 switch (type) {
723 case GITS_BASER_TYPE_DEVICE:
724 if (id >= BIT_ULL(VITS_TYPER_DEVBITS))
725 return false;
726 break;
727 case GITS_BASER_TYPE_COLLECTION:
728 /* as GITS_TYPER.CIL == 0, ITS supports 16-bit collection ID */
729 if (id >= BIT_ULL(16))
730 return false;
731 break;
732 default:
733 return false;
734 }
6d03a68f
MZ
735
736 if (!(baser & GITS_BASER_INDIRECT)) {
737 phys_addr_t addr;
738
e29bd6f2 739 if (id >= (l1_tbl_size / esz))
6d03a68f
MZ
740 return false;
741
e29bd6f2 742 addr = BASER_ADDRESS(baser) + id * esz;
6d03a68f
MZ
743 gfn = addr >> PAGE_SHIFT;
744
dceff702
EA
745 if (eaddr)
746 *eaddr = addr;
6d03a68f
MZ
747 return kvm_is_visible_gfn(its->dev->kvm, gfn);
748 }
749
750 /* calculate and check the index into the 1st level */
e29bd6f2 751 index = id / (SZ_64K / esz);
6d03a68f
MZ
752 if (index >= (l1_tbl_size / sizeof(u64)))
753 return false;
754
755 /* Each 1st level entry is represented by a 64-bit value. */
756 if (kvm_read_guest(its->dev->kvm,
757 BASER_ADDRESS(baser) + index * sizeof(indirect_ptr),
758 &indirect_ptr, sizeof(indirect_ptr)))
759 return false;
760
761 indirect_ptr = le64_to_cpu(indirect_ptr);
762
763 /* check the valid bit of the first level entry */
764 if (!(indirect_ptr & BIT_ULL(63)))
765 return false;
766
767 /*
768 * Mask the guest physical address and calculate the frame number.
769 * Any address beyond our supported 48 bits of PA will be caught
770 * by the actual check in the final step.
771 */
772 indirect_ptr &= GENMASK_ULL(51, 16);
773
774 /* Find the address of the actual entry */
e29bd6f2
VM
775 index = id % (SZ_64K / esz);
776 indirect_ptr += index * esz;
6d03a68f
MZ
777 gfn = indirect_ptr >> PAGE_SHIFT;
778
dceff702
EA
779 if (eaddr)
780 *eaddr = indirect_ptr;
6d03a68f
MZ
781 return kvm_is_visible_gfn(its->dev->kvm, gfn);
782}
783
17a21f58
MZ
784static int vgic_its_alloc_collection(struct vgic_its *its,
785 struct its_collection **colp,
df9f58fb
AP
786 u32 coll_id)
787{
17a21f58
MZ
788 struct its_collection *collection;
789
dceff702 790 if (!vgic_its_check_id(its, its->baser_coll_table, coll_id, NULL))
6d03a68f
MZ
791 return E_ITS_MAPC_COLLECTION_OOR;
792
17a21f58
MZ
793 collection = kzalloc(sizeof(*collection), GFP_KERNEL);
794
df9f58fb
AP
795 collection->collection_id = coll_id;
796 collection->target_addr = COLLECTION_NOT_MAPPED;
797
798 list_add_tail(&collection->coll_list, &its->collection_list);
17a21f58
MZ
799 *colp = collection;
800
801 return 0;
802}
803
804static void vgic_its_free_collection(struct vgic_its *its, u32 coll_id)
805{
806 struct its_collection *collection;
807 struct its_device *device;
9ce91c72 808 struct its_ite *ite;
17a21f58
MZ
809
810 /*
811 * Clearing the mapping for that collection ID removes the
812 * entry from the list. If there wasn't any before, we can
813 * go home early.
814 */
815 collection = find_collection(its, coll_id);
816 if (!collection)
817 return;
818
9ce91c72
EA
819 for_each_lpi_its(device, ite, its)
820 if (ite->collection &&
821 ite->collection->collection_id == coll_id)
822 ite->collection = NULL;
17a21f58
MZ
823
824 list_del(&collection->coll_list);
825 kfree(collection);
df9f58fb
AP
826}
827
528297f5
EA
828/* Must be called with its_lock mutex held */
829static struct its_ite *vgic_its_alloc_ite(struct its_device *device,
830 struct its_collection *collection,
7c7d2fa1 831 u32 event_id)
528297f5
EA
832{
833 struct its_ite *ite;
834
835 ite = kzalloc(sizeof(*ite), GFP_KERNEL);
836 if (!ite)
837 return ERR_PTR(-ENOMEM);
838
839 ite->event_id = event_id;
840 ite->collection = collection;
528297f5
EA
841
842 list_add_tail(&ite->ite_list, &device->itt_head);
843 return ite;
844}
845
df9f58fb
AP
846/*
847 * The MAPTI and MAPI commands map LPIs to ITTEs.
848 * Must be called with its_lock mutex held.
849 */
850static int vgic_its_cmd_handle_mapi(struct kvm *kvm, struct vgic_its *its,
a3e7aa27 851 u64 *its_cmd)
df9f58fb
AP
852{
853 u32 device_id = its_cmd_get_deviceid(its_cmd);
854 u32 event_id = its_cmd_get_id(its_cmd);
855 u32 coll_id = its_cmd_get_collection(its_cmd);
9ce91c72 856 struct its_ite *ite;
06bd5359 857 struct kvm_vcpu *vcpu = NULL;
df9f58fb
AP
858 struct its_device *device;
859 struct its_collection *collection, *new_coll = NULL;
99e5e886 860 struct vgic_irq *irq;
528297f5 861 int lpi_nr;
df9f58fb
AP
862
863 device = find_its_device(its, device_id);
864 if (!device)
865 return E_ITS_MAPTI_UNMAPPED_DEVICE;
866
0d44cdb6
EA
867 if (event_id >= BIT_ULL(device->num_eventid_bits))
868 return E_ITS_MAPTI_ID_OOR;
869
a3e7aa27 870 if (its_cmd_get_command(its_cmd) == GITS_CMD_MAPTI)
df9f58fb
AP
871 lpi_nr = its_cmd_get_physical_id(its_cmd);
872 else
873 lpi_nr = event_id;
874 if (lpi_nr < GIC_LPI_OFFSET ||
3a88bded
MZ
875 lpi_nr >= max_lpis_propbaser(kvm->arch.vgic.propbaser))
876 return E_ITS_MAPTI_PHYSICALID_OOR;
877
286054a7 878 /* If there is an existing mapping, behavior is UNPREDICTABLE. */
9ce91c72 879 if (find_ite(its, device_id, event_id))
286054a7
AP
880 return 0;
881
3a88bded
MZ
882 collection = find_collection(its, coll_id);
883 if (!collection) {
884 int ret = vgic_its_alloc_collection(its, &collection, coll_id);
885 if (ret)
886 return ret;
887 new_coll = collection;
df9f58fb
AP
888 }
889
7c7d2fa1 890 ite = vgic_its_alloc_ite(device, collection, event_id);
528297f5 891 if (IS_ERR(ite)) {
286054a7
AP
892 if (new_coll)
893 vgic_its_free_collection(its, coll_id);
528297f5 894 return PTR_ERR(ite);
df9f58fb
AP
895 }
896
06bd5359
EA
897 if (its_is_collection_mapped(collection))
898 vcpu = kvm_get_vcpu(kvm, collection->target_addr);
899
900 irq = vgic_add_lpi(kvm, lpi_nr, vcpu);
99e5e886
CD
901 if (IS_ERR(irq)) {
902 if (new_coll)
903 vgic_its_free_collection(its, coll_id);
9ce91c72 904 its_free_ite(kvm, ite);
99e5e886
CD
905 return PTR_ERR(irq);
906 }
9ce91c72 907 ite->irq = irq;
99e5e886 908
df9f58fb
AP
909 return 0;
910}
911
912/* Requires the its_lock to be held. */
0a0d389e 913static void vgic_its_free_device(struct kvm *kvm, struct its_device *device)
df9f58fb 914{
9ce91c72 915 struct its_ite *ite, *temp;
df9f58fb
AP
916
917 /*
918 * The spec says that unmapping a device with still valid
919 * ITTEs associated is UNPREDICTABLE. We remove all ITTEs,
920 * since we cannot leave the memory unreferenced.
921 */
9ce91c72
EA
922 list_for_each_entry_safe(ite, temp, &device->itt_head, ite_list)
923 its_free_ite(kvm, ite);
df9f58fb
AP
924
925 list_del(&device->dev_list);
926 kfree(device);
927}
928
2f609a03 929/* its lock must be held */
930static void vgic_its_free_device_list(struct kvm *kvm, struct vgic_its *its)
931{
932 struct its_device *cur, *temp;
933
934 list_for_each_entry_safe(cur, temp, &its->device_list, dev_list)
935 vgic_its_free_device(kvm, cur);
936}
937
938/* its lock must be held */
939static void vgic_its_free_collection_list(struct kvm *kvm, struct vgic_its *its)
940{
941 struct its_collection *cur, *temp;
942
943 list_for_each_entry_safe(cur, temp, &its->collection_list, coll_list)
944 vgic_its_free_collection(its, cur->collection_id);
945}
946
528297f5
EA
947/* Must be called with its_lock mutex held */
948static struct its_device *vgic_its_alloc_device(struct vgic_its *its,
949 u32 device_id, gpa_t itt_addr,
950 u8 num_eventid_bits)
951{
952 struct its_device *device;
953
954 device = kzalloc(sizeof(*device), GFP_KERNEL);
955 if (!device)
956 return ERR_PTR(-ENOMEM);
957
958 device->device_id = device_id;
959 device->itt_addr = itt_addr;
960 device->num_eventid_bits = num_eventid_bits;
961 INIT_LIST_HEAD(&device->itt_head);
962
963 list_add_tail(&device->dev_list, &its->device_list);
964 return device;
965}
966
df9f58fb
AP
967/*
968 * MAPD maps or unmaps a device ID to Interrupt Translation Tables (ITTs).
969 * Must be called with the its_lock mutex held.
970 */
971static int vgic_its_cmd_handle_mapd(struct kvm *kvm, struct vgic_its *its,
972 u64 *its_cmd)
973{
974 u32 device_id = its_cmd_get_deviceid(its_cmd);
975 bool valid = its_cmd_get_validbit(its_cmd);
0d44cdb6 976 u8 num_eventid_bits = its_cmd_get_size(its_cmd);
7333cefe 977 gpa_t itt_addr = its_cmd_get_ittaddr(its_cmd);
df9f58fb
AP
978 struct its_device *device;
979
dceff702 980 if (!vgic_its_check_id(its, its->baser_device_table, device_id, NULL))
df9f58fb
AP
981 return E_ITS_MAPD_DEVICE_OOR;
982
0d44cdb6
EA
983 if (valid && num_eventid_bits > VITS_TYPER_IDBITS)
984 return E_ITS_MAPD_ITTSIZE_OOR;
985
df9f58fb
AP
986 device = find_its_device(its, device_id);
987
988 /*
989 * The spec says that calling MAPD on an already mapped device
990 * invalidates all cached data for this device. We implement this
991 * by removing the mapping and re-establishing it.
992 */
993 if (device)
0a0d389e 994 vgic_its_free_device(kvm, device);
df9f58fb
AP
995
996 /*
997 * The spec does not say whether unmapping a not-mapped device
998 * is an error, so we are done in any case.
999 */
1000 if (!valid)
1001 return 0;
1002
528297f5
EA
1003 device = vgic_its_alloc_device(its, device_id, itt_addr,
1004 num_eventid_bits);
1005 if (IS_ERR(device))
1006 return PTR_ERR(device);
df9f58fb
AP
1007
1008 return 0;
1009}
1010
df9f58fb
AP
1011/*
1012 * The MAPC command maps collection IDs to redistributors.
1013 * Must be called with the its_lock mutex held.
1014 */
1015static int vgic_its_cmd_handle_mapc(struct kvm *kvm, struct vgic_its *its,
1016 u64 *its_cmd)
1017{
1018 u16 coll_id;
1019 u32 target_addr;
1020 struct its_collection *collection;
1021 bool valid;
1022
1023 valid = its_cmd_get_validbit(its_cmd);
1024 coll_id = its_cmd_get_collection(its_cmd);
1025 target_addr = its_cmd_get_target_addr(its_cmd);
1026
1027 if (target_addr >= atomic_read(&kvm->online_vcpus))
1028 return E_ITS_MAPC_PROCNUM_OOR;
1029
df9f58fb 1030 if (!valid) {
17a21f58 1031 vgic_its_free_collection(its, coll_id);
df9f58fb 1032 } else {
17a21f58
MZ
1033 collection = find_collection(its, coll_id);
1034
df9f58fb 1035 if (!collection) {
17a21f58 1036 int ret;
df9f58fb 1037
17a21f58
MZ
1038 ret = vgic_its_alloc_collection(its, &collection,
1039 coll_id);
1040 if (ret)
1041 return ret;
df9f58fb
AP
1042 collection->target_addr = target_addr;
1043 } else {
1044 collection->target_addr = target_addr;
1045 update_affinity_collection(kvm, its, collection);
1046 }
1047 }
1048
1049 return 0;
1050}
1051
1052/*
1053 * The CLEAR command removes the pending state for a particular LPI.
1054 * Must be called with the its_lock mutex held.
1055 */
1056static int vgic_its_cmd_handle_clear(struct kvm *kvm, struct vgic_its *its,
1057 u64 *its_cmd)
1058{
1059 u32 device_id = its_cmd_get_deviceid(its_cmd);
1060 u32 event_id = its_cmd_get_id(its_cmd);
9ce91c72 1061 struct its_ite *ite;
df9f58fb
AP
1062
1063
9ce91c72
EA
1064 ite = find_ite(its, device_id, event_id);
1065 if (!ite)
df9f58fb
AP
1066 return E_ITS_CLEAR_UNMAPPED_INTERRUPT;
1067
9ce91c72 1068 ite->irq->pending_latch = false;
df9f58fb
AP
1069
1070 return 0;
1071}
1072
1073/*
1074 * The INV command syncs the configuration bits from the memory table.
1075 * Must be called with the its_lock mutex held.
1076 */
1077static int vgic_its_cmd_handle_inv(struct kvm *kvm, struct vgic_its *its,
1078 u64 *its_cmd)
1079{
1080 u32 device_id = its_cmd_get_deviceid(its_cmd);
1081 u32 event_id = its_cmd_get_id(its_cmd);
9ce91c72 1082 struct its_ite *ite;
df9f58fb
AP
1083
1084
9ce91c72
EA
1085 ite = find_ite(its, device_id, event_id);
1086 if (!ite)
df9f58fb
AP
1087 return E_ITS_INV_UNMAPPED_INTERRUPT;
1088
9ce91c72 1089 return update_lpi_config(kvm, ite->irq, NULL);
df9f58fb
AP
1090}
1091
1092/*
1093 * The INVALL command requests flushing of all IRQ data in this collection.
1094 * Find the VCPU mapped to that collection, then iterate over the VM's list
1095 * of mapped LPIs and update the configuration for each IRQ which targets
1096 * the specified vcpu. The configuration will be read from the in-memory
1097 * configuration table.
1098 * Must be called with the its_lock mutex held.
1099 */
1100static int vgic_its_cmd_handle_invall(struct kvm *kvm, struct vgic_its *its,
1101 u64 *its_cmd)
1102{
1103 u32 coll_id = its_cmd_get_collection(its_cmd);
1104 struct its_collection *collection;
1105 struct kvm_vcpu *vcpu;
1106 struct vgic_irq *irq;
1107 u32 *intids;
1108 int irq_count, i;
1109
1110 collection = find_collection(its, coll_id);
1111 if (!its_is_collection_mapped(collection))
1112 return E_ITS_INVALL_UNMAPPED_COLLECTION;
1113
1114 vcpu = kvm_get_vcpu(kvm, collection->target_addr);
1115
ccb1d791 1116 irq_count = vgic_copy_lpi_list(vcpu, &intids);
df9f58fb
AP
1117 if (irq_count < 0)
1118 return irq_count;
1119
1120 for (i = 0; i < irq_count; i++) {
1121 irq = vgic_get_irq(kvm, NULL, intids[i]);
1122 if (!irq)
1123 continue;
1124 update_lpi_config(kvm, irq, vcpu);
1125 vgic_put_irq(kvm, irq);
1126 }
1127
1128 kfree(intids);
1129
1130 return 0;
1131}
1132
1133/*
1134 * The MOVALL command moves the pending state of all IRQs targeting one
1135 * redistributor to another. We don't hold the pending state in the VCPUs,
1136 * but in the IRQs instead, so there is really not much to do for us here.
1137 * However the spec says that no IRQ must target the old redistributor
1138 * afterwards, so we make sure that no LPI is using the associated target_vcpu.
1139 * This command affects all LPIs in the system that target that redistributor.
1140 */
1141static int vgic_its_cmd_handle_movall(struct kvm *kvm, struct vgic_its *its,
1142 u64 *its_cmd)
1143{
1144 struct vgic_dist *dist = &kvm->arch.vgic;
1145 u32 target1_addr = its_cmd_get_target_addr(its_cmd);
1146 u32 target2_addr = its_cmd_mask_field(its_cmd, 3, 16, 32);
1147 struct kvm_vcpu *vcpu1, *vcpu2;
1148 struct vgic_irq *irq;
1149
1150 if (target1_addr >= atomic_read(&kvm->online_vcpus) ||
1151 target2_addr >= atomic_read(&kvm->online_vcpus))
1152 return E_ITS_MOVALL_PROCNUM_OOR;
1153
1154 if (target1_addr == target2_addr)
1155 return 0;
1156
1157 vcpu1 = kvm_get_vcpu(kvm, target1_addr);
1158 vcpu2 = kvm_get_vcpu(kvm, target2_addr);
1159
1160 spin_lock(&dist->lpi_list_lock);
1161
1162 list_for_each_entry(irq, &dist->lpi_list_head, lpi_list) {
1163 spin_lock(&irq->irq_lock);
1164
1165 if (irq->target_vcpu == vcpu1)
1166 irq->target_vcpu = vcpu2;
1167
1168 spin_unlock(&irq->irq_lock);
1169 }
1170
1171 spin_unlock(&dist->lpi_list_lock);
1172
1173 return 0;
1174}
1175
2891a7df
AP
1176/*
1177 * The INT command injects the LPI associated with that DevID/EvID pair.
1178 * Must be called with the its_lock mutex held.
1179 */
1180static int vgic_its_cmd_handle_int(struct kvm *kvm, struct vgic_its *its,
1181 u64 *its_cmd)
1182{
1183 u32 msi_data = its_cmd_get_id(its_cmd);
1184 u64 msi_devid = its_cmd_get_deviceid(its_cmd);
1185
fd837b08 1186 return vgic_its_trigger_msi(kvm, its, msi_devid, msi_data);
2891a7df
AP
1187}
1188
df9f58fb
AP
1189/*
1190 * This function is called with the its_cmd lock held, but the ITS data
1191 * structure lock dropped.
1192 */
424c3383
AP
1193static int vgic_its_handle_command(struct kvm *kvm, struct vgic_its *its,
1194 u64 *its_cmd)
1195{
df9f58fb
AP
1196 int ret = -ENODEV;
1197
1198 mutex_lock(&its->its_lock);
a3e7aa27 1199 switch (its_cmd_get_command(its_cmd)) {
df9f58fb
AP
1200 case GITS_CMD_MAPD:
1201 ret = vgic_its_cmd_handle_mapd(kvm, its, its_cmd);
1202 break;
1203 case GITS_CMD_MAPC:
1204 ret = vgic_its_cmd_handle_mapc(kvm, its, its_cmd);
1205 break;
1206 case GITS_CMD_MAPI:
a3e7aa27 1207 ret = vgic_its_cmd_handle_mapi(kvm, its, its_cmd);
df9f58fb
AP
1208 break;
1209 case GITS_CMD_MAPTI:
a3e7aa27 1210 ret = vgic_its_cmd_handle_mapi(kvm, its, its_cmd);
df9f58fb
AP
1211 break;
1212 case GITS_CMD_MOVI:
1213 ret = vgic_its_cmd_handle_movi(kvm, its, its_cmd);
1214 break;
1215 case GITS_CMD_DISCARD:
1216 ret = vgic_its_cmd_handle_discard(kvm, its, its_cmd);
1217 break;
1218 case GITS_CMD_CLEAR:
1219 ret = vgic_its_cmd_handle_clear(kvm, its, its_cmd);
1220 break;
1221 case GITS_CMD_MOVALL:
1222 ret = vgic_its_cmd_handle_movall(kvm, its, its_cmd);
1223 break;
2891a7df
AP
1224 case GITS_CMD_INT:
1225 ret = vgic_its_cmd_handle_int(kvm, its, its_cmd);
1226 break;
df9f58fb
AP
1227 case GITS_CMD_INV:
1228 ret = vgic_its_cmd_handle_inv(kvm, its, its_cmd);
1229 break;
1230 case GITS_CMD_INVALL:
1231 ret = vgic_its_cmd_handle_invall(kvm, its, its_cmd);
1232 break;
1233 case GITS_CMD_SYNC:
1234 /* we ignore this command: we are in sync all of the time */
1235 ret = 0;
1236 break;
1237 }
1238 mutex_unlock(&its->its_lock);
1239
1240 return ret;
424c3383
AP
1241}
1242
1243static u64 vgic_sanitise_its_baser(u64 reg)
1244{
1245 reg = vgic_sanitise_field(reg, GITS_BASER_SHAREABILITY_MASK,
1246 GITS_BASER_SHAREABILITY_SHIFT,
1247 vgic_sanitise_shareability);
1248 reg = vgic_sanitise_field(reg, GITS_BASER_INNER_CACHEABILITY_MASK,
1249 GITS_BASER_INNER_CACHEABILITY_SHIFT,
1250 vgic_sanitise_inner_cacheability);
1251 reg = vgic_sanitise_field(reg, GITS_BASER_OUTER_CACHEABILITY_MASK,
1252 GITS_BASER_OUTER_CACHEABILITY_SHIFT,
1253 vgic_sanitise_outer_cacheability);
1254
1255 /* Bits 15:12 contain bits 51:48 of the PA, which we don't support. */
1256 reg &= ~GENMASK_ULL(15, 12);
1257
1258 /* We support only one (ITS) page size: 64K */
1259 reg = (reg & ~GITS_BASER_PAGE_SIZE_MASK) | GITS_BASER_PAGE_SIZE_64K;
1260
1261 return reg;
1262}
1263
1264static u64 vgic_sanitise_its_cbaser(u64 reg)
1265{
1266 reg = vgic_sanitise_field(reg, GITS_CBASER_SHAREABILITY_MASK,
1267 GITS_CBASER_SHAREABILITY_SHIFT,
1268 vgic_sanitise_shareability);
1269 reg = vgic_sanitise_field(reg, GITS_CBASER_INNER_CACHEABILITY_MASK,
1270 GITS_CBASER_INNER_CACHEABILITY_SHIFT,
1271 vgic_sanitise_inner_cacheability);
1272 reg = vgic_sanitise_field(reg, GITS_CBASER_OUTER_CACHEABILITY_MASK,
1273 GITS_CBASER_OUTER_CACHEABILITY_SHIFT,
1274 vgic_sanitise_outer_cacheability);
1275
1276 /*
1277 * Sanitise the physical address to be 64k aligned.
1278 * Also limit the physical addresses to 48 bits.
1279 */
1280 reg &= ~(GENMASK_ULL(51, 48) | GENMASK_ULL(15, 12));
1281
1282 return reg;
1283}
1284
1285static unsigned long vgic_mmio_read_its_cbaser(struct kvm *kvm,
1286 struct vgic_its *its,
1287 gpa_t addr, unsigned int len)
1288{
1289 return extract_bytes(its->cbaser, addr & 7, len);
1290}
1291
1292static void vgic_mmio_write_its_cbaser(struct kvm *kvm, struct vgic_its *its,
1293 gpa_t addr, unsigned int len,
1294 unsigned long val)
1295{
1296 /* When GITS_CTLR.Enable is 1, this register is RO. */
1297 if (its->enabled)
1298 return;
1299
1300 mutex_lock(&its->cmd_lock);
1301 its->cbaser = update_64bit_reg(its->cbaser, addr & 7, len, val);
1302 its->cbaser = vgic_sanitise_its_cbaser(its->cbaser);
1303 its->creadr = 0;
1304 /*
1305 * CWRITER is architecturally UNKNOWN on reset, but we need to reset
1306 * it to CREADR to make sure we start with an empty command buffer.
1307 */
1308 its->cwriter = its->creadr;
1309 mutex_unlock(&its->cmd_lock);
1310}
1311
1312#define ITS_CMD_BUFFER_SIZE(baser) ((((baser) & 0xff) + 1) << 12)
1313#define ITS_CMD_SIZE 32
1314#define ITS_CMD_OFFSET(reg) ((reg) & GENMASK(19, 5))
1315
a5e1e6ca
AP
1316/* Must be called with the cmd_lock held. */
1317static void vgic_its_process_commands(struct kvm *kvm, struct vgic_its *its)
424c3383
AP
1318{
1319 gpa_t cbaser;
1320 u64 cmd_buf[4];
424c3383 1321
a5e1e6ca
AP
1322 /* Commands are only processed when the ITS is enabled. */
1323 if (!its->enabled)
424c3383 1324 return;
424c3383 1325
424c3383
AP
1326 cbaser = CBASER_ADDRESS(its->cbaser);
1327
1328 while (its->cwriter != its->creadr) {
1329 int ret = kvm_read_guest(kvm, cbaser + its->creadr,
1330 cmd_buf, ITS_CMD_SIZE);
1331 /*
1332 * If kvm_read_guest() fails, this could be due to the guest
1333 * programming a bogus value in CBASER or something else going
1334 * wrong from which we cannot easily recover.
1335 * According to section 6.3.2 in the GICv3 spec we can just
1336 * ignore that command then.
1337 */
1338 if (!ret)
1339 vgic_its_handle_command(kvm, its, cmd_buf);
1340
1341 its->creadr += ITS_CMD_SIZE;
1342 if (its->creadr == ITS_CMD_BUFFER_SIZE(its->cbaser))
1343 its->creadr = 0;
1344 }
a5e1e6ca
AP
1345}
1346
1347/*
1348 * By writing to CWRITER the guest announces new commands to be processed.
1349 * To avoid any races in the first place, we take the its_cmd lock, which
1350 * protects our ring buffer variables, so that there is only one user
1351 * per ITS handling commands at a given time.
1352 */
1353static void vgic_mmio_write_its_cwriter(struct kvm *kvm, struct vgic_its *its,
1354 gpa_t addr, unsigned int len,
1355 unsigned long val)
1356{
1357 u64 reg;
1358
1359 if (!its)
1360 return;
1361
1362 mutex_lock(&its->cmd_lock);
1363
1364 reg = update_64bit_reg(its->cwriter, addr & 7, len, val);
1365 reg = ITS_CMD_OFFSET(reg);
1366 if (reg >= ITS_CMD_BUFFER_SIZE(its->cbaser)) {
1367 mutex_unlock(&its->cmd_lock);
1368 return;
1369 }
1370 its->cwriter = reg;
1371
1372 vgic_its_process_commands(kvm, its);
424c3383
AP
1373
1374 mutex_unlock(&its->cmd_lock);
1375}
1376
1377static unsigned long vgic_mmio_read_its_cwriter(struct kvm *kvm,
1378 struct vgic_its *its,
1379 gpa_t addr, unsigned int len)
1380{
1381 return extract_bytes(its->cwriter, addr & 0x7, len);
1382}
1383
1384static unsigned long vgic_mmio_read_its_creadr(struct kvm *kvm,
1385 struct vgic_its *its,
1386 gpa_t addr, unsigned int len)
1387{
1388 return extract_bytes(its->creadr, addr & 0x7, len);
1389}
1390
0979bfa6
EA
1391static int vgic_mmio_uaccess_write_its_creadr(struct kvm *kvm,
1392 struct vgic_its *its,
1393 gpa_t addr, unsigned int len,
1394 unsigned long val)
1395{
1396 u32 cmd_offset;
1397 int ret = 0;
1398
1399 mutex_lock(&its->cmd_lock);
1400
1401 if (its->enabled) {
1402 ret = -EBUSY;
1403 goto out;
1404 }
1405
1406 cmd_offset = ITS_CMD_OFFSET(val);
1407 if (cmd_offset >= ITS_CMD_BUFFER_SIZE(its->cbaser)) {
1408 ret = -EINVAL;
1409 goto out;
1410 }
1411
1412 its->creadr = cmd_offset;
1413out:
1414 mutex_unlock(&its->cmd_lock);
1415 return ret;
1416}
1417
424c3383
AP
1418#define BASER_INDEX(addr) (((addr) / sizeof(u64)) & 0x7)
1419static unsigned long vgic_mmio_read_its_baser(struct kvm *kvm,
1420 struct vgic_its *its,
1421 gpa_t addr, unsigned int len)
1422{
1423 u64 reg;
1424
1425 switch (BASER_INDEX(addr)) {
1426 case 0:
1427 reg = its->baser_device_table;
1428 break;
1429 case 1:
1430 reg = its->baser_coll_table;
1431 break;
1432 default:
1433 reg = 0;
1434 break;
1435 }
1436
1437 return extract_bytes(reg, addr & 7, len);
1438}
1439
1440#define GITS_BASER_RO_MASK (GENMASK_ULL(52, 48) | GENMASK_ULL(58, 56))
1441static void vgic_mmio_write_its_baser(struct kvm *kvm,
1442 struct vgic_its *its,
1443 gpa_t addr, unsigned int len,
1444 unsigned long val)
1445{
71afe470 1446 const struct vgic_its_abi *abi = vgic_its_get_abi(its);
36d6961c 1447 u64 entry_size, table_type;
424c3383
AP
1448 u64 reg, *regptr, clearbits = 0;
1449
1450 /* When GITS_CTLR.Enable is 1, we ignore write accesses. */
1451 if (its->enabled)
1452 return;
1453
1454 switch (BASER_INDEX(addr)) {
1455 case 0:
1456 regptr = &its->baser_device_table;
71afe470 1457 entry_size = abi->dte_esz;
36d6961c 1458 table_type = GITS_BASER_TYPE_DEVICE;
424c3383
AP
1459 break;
1460 case 1:
1461 regptr = &its->baser_coll_table;
71afe470 1462 entry_size = abi->cte_esz;
36d6961c 1463 table_type = GITS_BASER_TYPE_COLLECTION;
424c3383
AP
1464 clearbits = GITS_BASER_INDIRECT;
1465 break;
1466 default:
1467 return;
1468 }
1469
1470 reg = update_64bit_reg(*regptr, addr & 7, len, val);
1471 reg &= ~GITS_BASER_RO_MASK;
1472 reg &= ~clearbits;
1473
1474 reg |= (entry_size - 1) << GITS_BASER_ENTRY_SIZE_SHIFT;
36d6961c 1475 reg |= table_type << GITS_BASER_TYPE_SHIFT;
424c3383
AP
1476 reg = vgic_sanitise_its_baser(reg);
1477
1478 *regptr = reg;
36d6961c
EA
1479
1480 if (!(reg & GITS_BASER_VALID)) {
1481 /* Take the its_lock to prevent a race with a save/restore */
1482 mutex_lock(&its->its_lock);
1483 switch (table_type) {
1484 case GITS_BASER_TYPE_DEVICE:
1485 vgic_its_free_device_list(kvm, its);
1486 break;
1487 case GITS_BASER_TYPE_COLLECTION:
1488 vgic_its_free_collection_list(kvm, its);
1489 break;
1490 }
1491 mutex_unlock(&its->its_lock);
1492 }
424c3383
AP
1493}
1494
a5e1e6ca
AP
1495static unsigned long vgic_mmio_read_its_ctlr(struct kvm *vcpu,
1496 struct vgic_its *its,
1497 gpa_t addr, unsigned int len)
1498{
1499 u32 reg = 0;
1500
1501 mutex_lock(&its->cmd_lock);
1502 if (its->creadr == its->cwriter)
1503 reg |= GITS_CTLR_QUIESCENT;
1504 if (its->enabled)
1505 reg |= GITS_CTLR_ENABLE;
1506 mutex_unlock(&its->cmd_lock);
1507
1508 return reg;
1509}
1510
1511static void vgic_mmio_write_its_ctlr(struct kvm *kvm, struct vgic_its *its,
1512 gpa_t addr, unsigned int len,
1513 unsigned long val)
1514{
1515 mutex_lock(&its->cmd_lock);
1516
1517 its->enabled = !!(val & GITS_CTLR_ENABLE);
1518
1519 /*
1520 * Try to process any pending commands. This function bails out early
1521 * if the ITS is disabled or no commands have been queued.
1522 */
1523 vgic_its_process_commands(kvm, its);
1524
1525 mutex_unlock(&its->cmd_lock);
1526}
1527
59c5ab40
AP
1528#define REGISTER_ITS_DESC(off, rd, wr, length, acc) \
1529{ \
1530 .reg_offset = off, \
1531 .len = length, \
1532 .access_flags = acc, \
1533 .its_read = rd, \
1534 .its_write = wr, \
1535}
1536
0979bfa6
EA
1537#define REGISTER_ITS_DESC_UACCESS(off, rd, wr, uwr, length, acc)\
1538{ \
1539 .reg_offset = off, \
1540 .len = length, \
1541 .access_flags = acc, \
1542 .its_read = rd, \
1543 .its_write = wr, \
1544 .uaccess_its_write = uwr, \
1545}
1546
59c5ab40
AP
1547static void its_mmio_write_wi(struct kvm *kvm, struct vgic_its *its,
1548 gpa_t addr, unsigned int len, unsigned long val)
1549{
1550 /* Ignore */
1551}
1552
1553static struct vgic_register_region its_registers[] = {
1554 REGISTER_ITS_DESC(GITS_CTLR,
424c3383 1555 vgic_mmio_read_its_ctlr, vgic_mmio_write_its_ctlr, 4,
59c5ab40 1556 VGIC_ACCESS_32bit),
ab01c6bd
EA
1557 REGISTER_ITS_DESC_UACCESS(GITS_IIDR,
1558 vgic_mmio_read_its_iidr, its_mmio_write_wi,
1559 vgic_mmio_uaccess_write_its_iidr, 4,
59c5ab40
AP
1560 VGIC_ACCESS_32bit),
1561 REGISTER_ITS_DESC(GITS_TYPER,
424c3383 1562 vgic_mmio_read_its_typer, its_mmio_write_wi, 8,
59c5ab40
AP
1563 VGIC_ACCESS_64bit | VGIC_ACCESS_32bit),
1564 REGISTER_ITS_DESC(GITS_CBASER,
424c3383 1565 vgic_mmio_read_its_cbaser, vgic_mmio_write_its_cbaser, 8,
59c5ab40
AP
1566 VGIC_ACCESS_64bit | VGIC_ACCESS_32bit),
1567 REGISTER_ITS_DESC(GITS_CWRITER,
424c3383 1568 vgic_mmio_read_its_cwriter, vgic_mmio_write_its_cwriter, 8,
59c5ab40 1569 VGIC_ACCESS_64bit | VGIC_ACCESS_32bit),
0979bfa6
EA
1570 REGISTER_ITS_DESC_UACCESS(GITS_CREADR,
1571 vgic_mmio_read_its_creadr, its_mmio_write_wi,
1572 vgic_mmio_uaccess_write_its_creadr, 8,
59c5ab40
AP
1573 VGIC_ACCESS_64bit | VGIC_ACCESS_32bit),
1574 REGISTER_ITS_DESC(GITS_BASER,
424c3383 1575 vgic_mmio_read_its_baser, vgic_mmio_write_its_baser, 0x40,
59c5ab40
AP
1576 VGIC_ACCESS_64bit | VGIC_ACCESS_32bit),
1577 REGISTER_ITS_DESC(GITS_IDREGS_BASE,
424c3383 1578 vgic_mmio_read_its_idregs, its_mmio_write_wi, 0x30,
59c5ab40
AP
1579 VGIC_ACCESS_32bit),
1580};
1581
33d3bc95
AP
1582/* This is called on setting the LPI enable bit in the redistributor. */
1583void vgic_enable_lpis(struct kvm_vcpu *vcpu)
1584{
1585 if (!(vcpu->arch.vgic_cpu.pendbaser & GICR_PENDBASER_PTZ))
1586 its_sync_lpi_pending_table(vcpu);
1587}
1588
30e1b684
CD
1589static int vgic_register_its_iodev(struct kvm *kvm, struct vgic_its *its,
1590 u64 addr)
59c5ab40
AP
1591{
1592 struct vgic_io_device *iodev = &its->iodev;
1593 int ret;
1594
30e1b684
CD
1595 mutex_lock(&kvm->slots_lock);
1596 if (!IS_VGIC_ADDR_UNDEF(its->vgic_its_base)) {
1597 ret = -EBUSY;
1598 goto out;
1599 }
59c5ab40 1600
30e1b684 1601 its->vgic_its_base = addr;
59c5ab40
AP
1602 iodev->regions = its_registers;
1603 iodev->nr_regions = ARRAY_SIZE(its_registers);
1604 kvm_iodevice_init(&iodev->dev, &kvm_io_gic_ops);
1605
1606 iodev->base_addr = its->vgic_its_base;
1607 iodev->iodev_type = IODEV_ITS;
1608 iodev->its = its;
59c5ab40
AP
1609 ret = kvm_io_bus_register_dev(kvm, KVM_MMIO_BUS, iodev->base_addr,
1610 KVM_VGIC_V3_ITS_SIZE, &iodev->dev);
30e1b684 1611out:
59c5ab40
AP
1612 mutex_unlock(&kvm->slots_lock);
1613
1614 return ret;
1615}
1085fdc6 1616
424c3383
AP
1617#define INITIAL_BASER_VALUE \
1618 (GIC_BASER_CACHEABILITY(GITS_BASER, INNER, RaWb) | \
1619 GIC_BASER_CACHEABILITY(GITS_BASER, OUTER, SameAsInner) | \
1620 GIC_BASER_SHAREABILITY(GITS_BASER, InnerShareable) | \
424c3383
AP
1621 GITS_BASER_PAGE_SIZE_64K)
1622
1623#define INITIAL_PROPBASER_VALUE \
1624 (GIC_BASER_CACHEABILITY(GICR_PROPBASER, INNER, RaWb) | \
1625 GIC_BASER_CACHEABILITY(GICR_PROPBASER, OUTER, SameAsInner) | \
1626 GIC_BASER_SHAREABILITY(GICR_PROPBASER, InnerShareable))
1627
1085fdc6
AP
1628static int vgic_its_create(struct kvm_device *dev, u32 type)
1629{
1630 struct vgic_its *its;
1631
1632 if (type != KVM_DEV_TYPE_ARM_VGIC_ITS)
1633 return -ENODEV;
1634
1635 its = kzalloc(sizeof(struct vgic_its), GFP_KERNEL);
1636 if (!its)
1637 return -ENOMEM;
1638
424c3383
AP
1639 mutex_init(&its->its_lock);
1640 mutex_init(&its->cmd_lock);
1641
1085fdc6
AP
1642 its->vgic_its_base = VGIC_ADDR_UNDEF;
1643
424c3383
AP
1644 INIT_LIST_HEAD(&its->device_list);
1645 INIT_LIST_HEAD(&its->collection_list);
1646
79962a5c 1647 dev->kvm->arch.vgic.msis_require_devid = true;
1085fdc6 1648 dev->kvm->arch.vgic.has_its = true;
1085fdc6 1649 its->enabled = false;
bb717644 1650 its->dev = dev;
1085fdc6 1651
424c3383
AP
1652 its->baser_device_table = INITIAL_BASER_VALUE |
1653 ((u64)GITS_BASER_TYPE_DEVICE << GITS_BASER_TYPE_SHIFT);
1654 its->baser_coll_table = INITIAL_BASER_VALUE |
1655 ((u64)GITS_BASER_TYPE_COLLECTION << GITS_BASER_TYPE_SHIFT);
1656 dev->kvm->arch.vgic.propbaser = INITIAL_PROPBASER_VALUE;
1657
1085fdc6
AP
1658 dev->private = its;
1659
71afe470 1660 return vgic_its_set_abi(its, NR_ITS_ABIS - 1);
1085fdc6
AP
1661}
1662
1663static void vgic_its_destroy(struct kvm_device *kvm_dev)
1664{
424c3383 1665 struct kvm *kvm = kvm_dev->kvm;
1085fdc6 1666 struct vgic_its *its = kvm_dev->private;
424c3383
AP
1667
1668 mutex_lock(&its->its_lock);
a2b19e6e 1669
2f609a03 1670 vgic_its_free_device_list(kvm, its);
1671 vgic_its_free_collection_list(kvm, its);
a2b19e6e 1672
424c3383 1673 mutex_unlock(&its->its_lock);
1085fdc6
AP
1674 kfree(its);
1675}
1676
876ae234
EA
1677int vgic_its_has_attr_regs(struct kvm_device *dev,
1678 struct kvm_device_attr *attr)
1679{
8331c23c
EA
1680 const struct vgic_register_region *region;
1681 gpa_t offset = attr->attr;
1682 int align;
1683
1684 align = (offset < GITS_TYPER) || (offset >= GITS_PIDR4) ? 0x3 : 0x7;
1685
1686 if (offset & align)
1687 return -EINVAL;
1688
1689 region = vgic_find_mmio_region(its_registers,
1690 ARRAY_SIZE(its_registers),
1691 offset);
1692 if (!region)
1693 return -ENXIO;
1694
1695 return 0;
876ae234
EA
1696}
1697
1698int vgic_its_attr_regs_access(struct kvm_device *dev,
1699 struct kvm_device_attr *attr,
1700 u64 *reg, bool is_write)
1701{
8331c23c
EA
1702 const struct vgic_register_region *region;
1703 struct vgic_its *its;
1704 gpa_t addr, offset;
1705 unsigned int len;
1706 int align, ret = 0;
1707
1708 its = dev->private;
1709 offset = attr->attr;
1710
1711 /*
1712 * Although the spec supports upper/lower 32-bit accesses to
1713 * 64-bit ITS registers, the userspace ABI requires 64-bit
1714 * accesses to all 64-bit wide registers. We therefore only
1715 * support 32-bit accesses to GITS_CTLR, GITS_IIDR and GITS ID
1716 * registers
1717 */
1718 if ((offset < GITS_TYPER) || (offset >= GITS_PIDR4))
1719 align = 0x3;
1720 else
1721 align = 0x7;
1722
1723 if (offset & align)
1724 return -EINVAL;
1725
1726 mutex_lock(&dev->kvm->lock);
1727
1728 if (IS_VGIC_ADDR_UNDEF(its->vgic_its_base)) {
1729 ret = -ENXIO;
1730 goto out;
1731 }
1732
1733 region = vgic_find_mmio_region(its_registers,
1734 ARRAY_SIZE(its_registers),
1735 offset);
1736 if (!region) {
1737 ret = -ENXIO;
1738 goto out;
1739 }
1740
1741 if (!lock_all_vcpus(dev->kvm)) {
1742 ret = -EBUSY;
1743 goto out;
1744 }
1745
1746 addr = its->vgic_its_base + offset;
1747
1748 len = region->access_flags & VGIC_ACCESS_64bit ? 8 : 4;
1749
1750 if (is_write) {
1751 if (region->uaccess_its_write)
1752 ret = region->uaccess_its_write(dev->kvm, its, addr,
1753 len, *reg);
1754 else
1755 region->its_write(dev->kvm, its, addr, len, *reg);
1756 } else {
1757 *reg = region->its_read(dev->kvm, its, addr, len);
1758 }
1759 unlock_all_vcpus(dev->kvm);
1760out:
1761 mutex_unlock(&dev->kvm->lock);
1762 return ret;
876ae234
EA
1763}
1764
57a9a117
EA
1765static u32 compute_next_devid_offset(struct list_head *h,
1766 struct its_device *dev)
920a7a8f
EA
1767{
1768 struct its_device *next;
1769 u32 next_offset;
1770
1771 if (list_is_last(&dev->dev_list, h))
1772 return 0;
1773 next = list_next_entry(dev, dev_list);
1774 next_offset = next->device_id - dev->device_id;
1775
1776 return min_t(u32, next_offset, VITS_DTE_MAX_DEVID_OFFSET);
1777}
1778
eff484e0 1779static u32 compute_next_eventid_offset(struct list_head *h, struct its_ite *ite)
920a7a8f
EA
1780{
1781 struct its_ite *next;
1782 u32 next_offset;
1783
1784 if (list_is_last(&ite->ite_list, h))
1785 return 0;
1786 next = list_next_entry(ite, ite_list);
1787 next_offset = next->event_id - ite->event_id;
1788
1789 return min_t(u32, next_offset, VITS_ITE_MAX_EVENTID_OFFSET);
1790}
1791
1792/**
1793 * entry_fn_t - Callback called on a table entry restore path
1794 * @its: its handle
1795 * @id: id of the entry
1796 * @entry: pointer to the entry
1797 * @opaque: pointer to an opaque data
1798 *
1799 * Return: < 0 on error, 0 if last element was identified, id offset to next
1800 * element otherwise
1801 */
1802typedef int (*entry_fn_t)(struct vgic_its *its, u32 id, void *entry,
1803 void *opaque);
1804
1805/**
1806 * scan_its_table - Scan a contiguous table in guest RAM and applies a function
1807 * to each entry
1808 *
1809 * @its: its handle
1810 * @base: base gpa of the table
1811 * @size: size of the table in bytes
1812 * @esz: entry size in bytes
1813 * @start_id: the ID of the first entry in the table
1814 * (non zero for 2d level tables)
1815 * @fn: function to apply on each entry
1816 *
1817 * Return: < 0 on error, 0 if last element was identified, 1 otherwise
1818 * (the last element may not be found on second level tables)
1819 */
57a9a117
EA
1820static int scan_its_table(struct vgic_its *its, gpa_t base, int size, int esz,
1821 int start_id, entry_fn_t fn, void *opaque)
920a7a8f
EA
1822{
1823 void *entry = kzalloc(esz, GFP_KERNEL);
1824 struct kvm *kvm = its->dev->kvm;
1825 unsigned long len = size;
1826 int id = start_id;
1827 gpa_t gpa = base;
1828 int ret;
1829
1830 while (len > 0) {
1831 int next_offset;
1832 size_t byte_offset;
1833
1834 ret = kvm_read_guest(kvm, gpa, entry, esz);
1835 if (ret)
1836 goto out;
1837
1838 next_offset = fn(its, id, entry, opaque);
1839 if (next_offset <= 0) {
1840 ret = next_offset;
1841 goto out;
1842 }
1843
1844 byte_offset = next_offset * esz;
1845 id += next_offset;
1846 gpa += byte_offset;
1847 len -= byte_offset;
1848 }
1849 ret = 1;
1850
1851out:
1852 kfree(entry);
1853 return ret;
1854}
1855
eff484e0
EA
1856/**
1857 * vgic_its_save_ite - Save an interrupt translation entry at @gpa
1858 */
1859static int vgic_its_save_ite(struct vgic_its *its, struct its_device *dev,
1860 struct its_ite *ite, gpa_t gpa, int ite_esz)
1861{
1862 struct kvm *kvm = its->dev->kvm;
1863 u32 next_offset;
1864 u64 val;
1865
1866 next_offset = compute_next_eventid_offset(&dev->itt_head, ite);
1867 val = ((u64)next_offset << KVM_ITS_ITE_NEXT_SHIFT) |
7c7d2fa1 1868 ((u64)ite->irq->intid << KVM_ITS_ITE_PINTID_SHIFT) |
eff484e0
EA
1869 ite->collection->collection_id;
1870 val = cpu_to_le64(val);
1871 return kvm_write_guest(kvm, gpa, &val, ite_esz);
1872}
1873
1874/**
1875 * vgic_its_restore_ite - restore an interrupt translation entry
1876 * @event_id: id used for indexing
1877 * @ptr: pointer to the ITE entry
1878 * @opaque: pointer to the its_device
1879 */
1880static int vgic_its_restore_ite(struct vgic_its *its, u32 event_id,
1881 void *ptr, void *opaque)
1882{
1883 struct its_device *dev = (struct its_device *)opaque;
1884 struct its_collection *collection;
1885 struct kvm *kvm = its->dev->kvm;
1886 struct kvm_vcpu *vcpu = NULL;
1887 u64 val;
1888 u64 *p = (u64 *)ptr;
1889 struct vgic_irq *irq;
1890 u32 coll_id, lpi_id;
1891 struct its_ite *ite;
1892 u32 offset;
1893
1894 val = *p;
1895
1896 val = le64_to_cpu(val);
1897
1898 coll_id = val & KVM_ITS_ITE_ICID_MASK;
1899 lpi_id = (val & KVM_ITS_ITE_PINTID_MASK) >> KVM_ITS_ITE_PINTID_SHIFT;
1900
1901 if (!lpi_id)
1902 return 1; /* invalid entry, no choice but to scan next entry */
1903
1904 if (lpi_id < VGIC_MIN_LPI)
1905 return -EINVAL;
1906
1907 offset = val >> KVM_ITS_ITE_NEXT_SHIFT;
1908 if (event_id + offset >= BIT_ULL(dev->num_eventid_bits))
1909 return -EINVAL;
1910
1911 collection = find_collection(its, coll_id);
1912 if (!collection)
1913 return -EINVAL;
1914
7c7d2fa1 1915 ite = vgic_its_alloc_ite(dev, collection, event_id);
eff484e0
EA
1916 if (IS_ERR(ite))
1917 return PTR_ERR(ite);
1918
1919 if (its_is_collection_mapped(collection))
1920 vcpu = kvm_get_vcpu(kvm, collection->target_addr);
1921
1922 irq = vgic_add_lpi(kvm, lpi_id, vcpu);
1923 if (IS_ERR(irq))
1924 return PTR_ERR(irq);
1925 ite->irq = irq;
1926
1927 return offset;
1928}
1929
1930static int vgic_its_ite_cmp(void *priv, struct list_head *a,
1931 struct list_head *b)
1932{
1933 struct its_ite *itea = container_of(a, struct its_ite, ite_list);
1934 struct its_ite *iteb = container_of(b, struct its_ite, ite_list);
1935
1936 if (itea->event_id < iteb->event_id)
1937 return -1;
1938 else
1939 return 1;
1940}
1941
57a9a117
EA
1942static int vgic_its_save_itt(struct vgic_its *its, struct its_device *device)
1943{
eff484e0
EA
1944 const struct vgic_its_abi *abi = vgic_its_get_abi(its);
1945 gpa_t base = device->itt_addr;
1946 struct its_ite *ite;
1947 int ret;
1948 int ite_esz = abi->ite_esz;
1949
1950 list_sort(NULL, &device->itt_head, vgic_its_ite_cmp);
1951
1952 list_for_each_entry(ite, &device->itt_head, ite_list) {
1953 gpa_t gpa = base + ite->event_id * ite_esz;
1954
1955 ret = vgic_its_save_ite(its, device, ite, gpa, ite_esz);
1956 if (ret)
1957 return ret;
1958 }
1959 return 0;
57a9a117
EA
1960}
1961
1962static int vgic_its_restore_itt(struct vgic_its *its, struct its_device *dev)
1963{
eff484e0
EA
1964 const struct vgic_its_abi *abi = vgic_its_get_abi(its);
1965 gpa_t base = dev->itt_addr;
1966 int ret;
1967 int ite_esz = abi->ite_esz;
1968 size_t max_size = BIT_ULL(dev->num_eventid_bits) * ite_esz;
1969
1970 ret = scan_its_table(its, base, max_size, ite_esz, 0,
1971 vgic_its_restore_ite, dev);
1972
1973 return ret;
57a9a117
EA
1974}
1975
1976/**
1977 * vgic_its_save_dte - Save a device table entry at a given GPA
1978 *
1979 * @its: ITS handle
1980 * @dev: ITS device
1981 * @ptr: GPA
1982 */
1983static int vgic_its_save_dte(struct vgic_its *its, struct its_device *dev,
1984 gpa_t ptr, int dte_esz)
1985{
1986 struct kvm *kvm = its->dev->kvm;
1987 u64 val, itt_addr_field;
1988 u32 next_offset;
1989
1990 itt_addr_field = dev->itt_addr >> 8;
1991 next_offset = compute_next_devid_offset(&its->device_list, dev);
1992 val = (1ULL << KVM_ITS_DTE_VALID_SHIFT |
1993 ((u64)next_offset << KVM_ITS_DTE_NEXT_SHIFT) |
1994 (itt_addr_field << KVM_ITS_DTE_ITTADDR_SHIFT) |
1995 (dev->num_eventid_bits - 1));
1996 val = cpu_to_le64(val);
1997 return kvm_write_guest(kvm, ptr, &val, dte_esz);
1998}
1999
2000/**
2001 * vgic_its_restore_dte - restore a device table entry
2002 *
2003 * @its: its handle
2004 * @id: device id the DTE corresponds to
2005 * @ptr: kernel VA where the 8 byte DTE is located
2006 * @opaque: unused
2007 *
2008 * Return: < 0 on error, 0 if the dte is the last one, id offset to the
2009 * next dte otherwise
2010 */
2011static int vgic_its_restore_dte(struct vgic_its *its, u32 id,
2012 void *ptr, void *opaque)
2013{
2014 struct its_device *dev;
2015 gpa_t itt_addr;
2016 u8 num_eventid_bits;
2017 u64 entry = *(u64 *)ptr;
2018 bool valid;
2019 u32 offset;
2020 int ret;
2021
2022 entry = le64_to_cpu(entry);
2023
2024 valid = entry >> KVM_ITS_DTE_VALID_SHIFT;
2025 num_eventid_bits = (entry & KVM_ITS_DTE_SIZE_MASK) + 1;
2026 itt_addr = ((entry & KVM_ITS_DTE_ITTADDR_MASK)
2027 >> KVM_ITS_DTE_ITTADDR_SHIFT) << 8;
2028
2029 if (!valid)
2030 return 1;
2031
2032 /* dte entry is valid */
2033 offset = (entry & KVM_ITS_DTE_NEXT_MASK) >> KVM_ITS_DTE_NEXT_SHIFT;
2034
2035 dev = vgic_its_alloc_device(its, id, itt_addr, num_eventid_bits);
2036 if (IS_ERR(dev))
2037 return PTR_ERR(dev);
2038
2039 ret = vgic_its_restore_itt(its, dev);
a2b19e6e
CD
2040 if (ret) {
2041 vgic_its_free_device(its->dev->kvm, dev);
57a9a117 2042 return ret;
a2b19e6e 2043 }
57a9a117
EA
2044
2045 return offset;
2046}
2047
2048static int vgic_its_device_cmp(void *priv, struct list_head *a,
2049 struct list_head *b)
2050{
2051 struct its_device *deva = container_of(a, struct its_device, dev_list);
2052 struct its_device *devb = container_of(b, struct its_device, dev_list);
2053
2054 if (deva->device_id < devb->device_id)
2055 return -1;
2056 else
2057 return 1;
2058}
2059
3b65808f
EA
2060/**
2061 * vgic_its_save_device_tables - Save the device table and all ITT
2062 * into guest RAM
57a9a117
EA
2063 *
2064 * L1/L2 handling is hidden by vgic_its_check_id() helper which directly
2065 * returns the GPA of the device entry
3b65808f
EA
2066 */
2067static int vgic_its_save_device_tables(struct vgic_its *its)
2068{
57a9a117
EA
2069 const struct vgic_its_abi *abi = vgic_its_get_abi(its);
2070 struct its_device *dev;
2071 int dte_esz = abi->dte_esz;
2072 u64 baser;
2073
2074 baser = its->baser_device_table;
2075
2076 list_sort(NULL, &its->device_list, vgic_its_device_cmp);
2077
2078 list_for_each_entry(dev, &its->device_list, dev_list) {
2079 int ret;
2080 gpa_t eaddr;
2081
2082 if (!vgic_its_check_id(its, baser,
2083 dev->device_id, &eaddr))
2084 return -EINVAL;
2085
2086 ret = vgic_its_save_itt(its, dev);
2087 if (ret)
2088 return ret;
2089
2090 ret = vgic_its_save_dte(its, dev, eaddr, dte_esz);
2091 if (ret)
2092 return ret;
2093 }
2094 return 0;
2095}
2096
2097/**
2098 * handle_l1_dte - callback used for L1 device table entries (2 stage case)
2099 *
2100 * @its: its handle
2101 * @id: index of the entry in the L1 table
2102 * @addr: kernel VA
2103 * @opaque: unused
2104 *
2105 * L1 table entries are scanned by steps of 1 entry
2106 * Return < 0 if error, 0 if last dte was found when scanning the L2
2107 * table, +1 otherwise (meaning next L1 entry must be scanned)
2108 */
2109static int handle_l1_dte(struct vgic_its *its, u32 id, void *addr,
2110 void *opaque)
2111{
2112 const struct vgic_its_abi *abi = vgic_its_get_abi(its);
2113 int l2_start_id = id * (SZ_64K / abi->dte_esz);
2114 u64 entry = *(u64 *)addr;
2115 int dte_esz = abi->dte_esz;
2116 gpa_t gpa;
2117 int ret;
2118
2119 entry = le64_to_cpu(entry);
2120
2121 if (!(entry & KVM_ITS_L1E_VALID_MASK))
2122 return 1;
2123
2124 gpa = entry & KVM_ITS_L1E_ADDR_MASK;
2125
2126 ret = scan_its_table(its, gpa, SZ_64K, dte_esz,
2127 l2_start_id, vgic_its_restore_dte, NULL);
2128
2129 if (ret <= 0)
2130 return ret;
2131
2132 return 1;
3b65808f
EA
2133}
2134
2135/**
2136 * vgic_its_restore_device_tables - Restore the device table and all ITT
2137 * from guest RAM to internal data structs
2138 */
2139static int vgic_its_restore_device_tables(struct vgic_its *its)
2140{
57a9a117
EA
2141 const struct vgic_its_abi *abi = vgic_its_get_abi(its);
2142 u64 baser = its->baser_device_table;
2143 int l1_esz, ret;
2144 int l1_tbl_size = GITS_BASER_NR_PAGES(baser) * SZ_64K;
2145 gpa_t l1_gpa;
2146
2147 if (!(baser & GITS_BASER_VALID))
2148 return 0;
2149
2150 l1_gpa = BASER_ADDRESS(baser);
2151
2152 if (baser & GITS_BASER_INDIRECT) {
2153 l1_esz = GITS_LVL1_ENTRY_SIZE;
2154 ret = scan_its_table(its, l1_gpa, l1_tbl_size, l1_esz, 0,
2155 handle_l1_dte, NULL);
2156 } else {
2157 l1_esz = abi->dte_esz;
2158 ret = scan_its_table(its, l1_gpa, l1_tbl_size, l1_esz, 0,
2159 vgic_its_restore_dte, NULL);
2160 }
2161
2162 if (ret > 0)
2163 ret = -EINVAL;
2164
2165 return ret;
3b65808f
EA
2166}
2167
ea1ad53e
EA
2168static int vgic_its_save_cte(struct vgic_its *its,
2169 struct its_collection *collection,
2170 gpa_t gpa, int esz)
2171{
2172 u64 val;
2173
2174 val = (1ULL << KVM_ITS_CTE_VALID_SHIFT |
2175 ((u64)collection->target_addr << KVM_ITS_CTE_RDBASE_SHIFT) |
2176 collection->collection_id);
2177 val = cpu_to_le64(val);
2178 return kvm_write_guest(its->dev->kvm, gpa, &val, esz);
2179}
2180
2181static int vgic_its_restore_cte(struct vgic_its *its, gpa_t gpa, int esz)
2182{
2183 struct its_collection *collection;
2184 struct kvm *kvm = its->dev->kvm;
2185 u32 target_addr, coll_id;
2186 u64 val;
2187 int ret;
2188
2189 BUG_ON(esz > sizeof(val));
2190 ret = kvm_read_guest(kvm, gpa, &val, esz);
2191 if (ret)
2192 return ret;
2193 val = le64_to_cpu(val);
2194 if (!(val & KVM_ITS_CTE_VALID_MASK))
2195 return 0;
2196
2197 target_addr = (u32)(val >> KVM_ITS_CTE_RDBASE_SHIFT);
2198 coll_id = val & KVM_ITS_CTE_ICID_MASK;
2199
2200 if (target_addr >= atomic_read(&kvm->online_vcpus))
2201 return -EINVAL;
2202
2203 collection = find_collection(its, coll_id);
2204 if (collection)
2205 return -EEXIST;
2206 ret = vgic_its_alloc_collection(its, &collection, coll_id);
2207 if (ret)
2208 return ret;
2209 collection->target_addr = target_addr;
2210 return 1;
2211}
2212
3b65808f
EA
2213/**
2214 * vgic_its_save_collection_table - Save the collection table into
2215 * guest RAM
2216 */
2217static int vgic_its_save_collection_table(struct vgic_its *its)
2218{
ea1ad53e
EA
2219 const struct vgic_its_abi *abi = vgic_its_get_abi(its);
2220 struct its_collection *collection;
2221 u64 val;
2222 gpa_t gpa;
2223 size_t max_size, filled = 0;
2224 int ret, cte_esz = abi->cte_esz;
2225
2226 gpa = BASER_ADDRESS(its->baser_coll_table);
2227 if (!gpa)
2228 return 0;
2229
2230 max_size = GITS_BASER_NR_PAGES(its->baser_coll_table) * SZ_64K;
2231
2232 list_for_each_entry(collection, &its->collection_list, coll_list) {
2233 ret = vgic_its_save_cte(its, collection, gpa, cte_esz);
2234 if (ret)
2235 return ret;
2236 gpa += cte_esz;
2237 filled += cte_esz;
2238 }
2239
2240 if (filled == max_size)
2241 return 0;
2242
2243 /*
2244 * table is not fully filled, add a last dummy element
2245 * with valid bit unset
2246 */
2247 val = 0;
2248 BUG_ON(cte_esz > sizeof(val));
2249 ret = kvm_write_guest(its->dev->kvm, gpa, &val, cte_esz);
2250 return ret;
3b65808f
EA
2251}
2252
2253/**
2254 * vgic_its_restore_collection_table - reads the collection table
2255 * in guest memory and restores the ITS internal state. Requires the
2256 * BASER registers to be restored before.
2257 */
2258static int vgic_its_restore_collection_table(struct vgic_its *its)
2259{
ea1ad53e
EA
2260 const struct vgic_its_abi *abi = vgic_its_get_abi(its);
2261 int cte_esz = abi->cte_esz;
2262 size_t max_size, read = 0;
2263 gpa_t gpa;
2264 int ret;
2265
2266 if (!(its->baser_coll_table & GITS_BASER_VALID))
2267 return 0;
2268
2269 gpa = BASER_ADDRESS(its->baser_coll_table);
2270
2271 max_size = GITS_BASER_NR_PAGES(its->baser_coll_table) * SZ_64K;
2272
2273 while (read < max_size) {
2274 ret = vgic_its_restore_cte(its, gpa, cte_esz);
2275 if (ret <= 0)
2276 break;
2277 gpa += cte_esz;
2278 read += cte_esz;
2279 }
2280 return ret;
3b65808f
EA
2281}
2282
71afe470
EA
2283/**
2284 * vgic_its_save_tables_v0 - Save the ITS tables into guest ARM
2285 * according to v0 ABI
2286 */
2287static int vgic_its_save_tables_v0(struct vgic_its *its)
2288{
3b65808f
EA
2289 int ret;
2290
3b65808f
EA
2291 ret = vgic_its_save_device_tables(its);
2292 if (ret)
3eb4271b 2293 return ret;
3b65808f 2294
3eb4271b 2295 return vgic_its_save_collection_table(its);
71afe470
EA
2296}
2297
2298/**
2299 * vgic_its_restore_tables_v0 - Restore the ITS tables from guest RAM
2300 * to internal data structs according to V0 ABI
2301 *
2302 */
2303static int vgic_its_restore_tables_v0(struct vgic_its *its)
2304{
3b65808f
EA
2305 int ret;
2306
3b65808f
EA
2307 ret = vgic_its_restore_collection_table(its);
2308 if (ret)
3eb4271b 2309 return ret;
3b65808f 2310
3eb4271b 2311 return vgic_its_restore_device_tables(its);
71afe470
EA
2312}
2313
2314static int vgic_its_commit_v0(struct vgic_its *its)
2315{
2316 const struct vgic_its_abi *abi;
2317
2318 abi = vgic_its_get_abi(its);
2319 its->baser_coll_table &= ~GITS_BASER_ENTRY_SIZE_MASK;
2320 its->baser_device_table &= ~GITS_BASER_ENTRY_SIZE_MASK;
2321
2322 its->baser_coll_table |= (GIC_ENCODE_SZ(abi->cte_esz, 5)
2323 << GITS_BASER_ENTRY_SIZE_SHIFT);
2324
2325 its->baser_device_table |= (GIC_ENCODE_SZ(abi->dte_esz, 5)
2326 << GITS_BASER_ENTRY_SIZE_SHIFT);
2327 return 0;
2328}
2329
3eb4271b
EA
2330static void vgic_its_reset(struct kvm *kvm, struct vgic_its *its)
2331{
2332 /* We need to keep the ABI specific field values */
2333 its->baser_coll_table &= ~GITS_BASER_VALID;
2334 its->baser_device_table &= ~GITS_BASER_VALID;
2335 its->cbaser = 0;
2336 its->creadr = 0;
2337 its->cwriter = 0;
2338 its->enabled = 0;
2339 vgic_its_free_device_list(kvm, its);
2340 vgic_its_free_collection_list(kvm, its);
2341}
2342
1085fdc6
AP
2343static int vgic_its_has_attr(struct kvm_device *dev,
2344 struct kvm_device_attr *attr)
2345{
2346 switch (attr->group) {
2347 case KVM_DEV_ARM_VGIC_GRP_ADDR:
2348 switch (attr->attr) {
2349 case KVM_VGIC_ITS_ADDR_TYPE:
2350 return 0;
2351 }
2352 break;
2353 case KVM_DEV_ARM_VGIC_GRP_CTRL:
2354 switch (attr->attr) {
2355 case KVM_DEV_ARM_VGIC_CTRL_INIT:
2356 return 0;
3eb4271b
EA
2357 case KVM_DEV_ARM_ITS_CTRL_RESET:
2358 return 0;
3b65808f
EA
2359 case KVM_DEV_ARM_ITS_SAVE_TABLES:
2360 return 0;
2361 case KVM_DEV_ARM_ITS_RESTORE_TABLES:
2362 return 0;
1085fdc6
AP
2363 }
2364 break;
876ae234
EA
2365 case KVM_DEV_ARM_VGIC_GRP_ITS_REGS:
2366 return vgic_its_has_attr_regs(dev, attr);
1085fdc6
AP
2367 }
2368 return -ENXIO;
2369}
2370
3eb4271b
EA
2371static int vgic_its_ctrl(struct kvm *kvm, struct vgic_its *its, u64 attr)
2372{
2373 const struct vgic_its_abi *abi = vgic_its_get_abi(its);
2374 int ret = 0;
2375
2376 if (attr == KVM_DEV_ARM_VGIC_CTRL_INIT) /* Nothing to do */
2377 return 0;
2378
2379 mutex_lock(&kvm->lock);
2380 mutex_lock(&its->its_lock);
2381
2382 if (!lock_all_vcpus(kvm)) {
2383 mutex_unlock(&its->its_lock);
2384 mutex_unlock(&kvm->lock);
2385 return -EBUSY;
2386 }
2387
2388 switch (attr) {
2389 case KVM_DEV_ARM_ITS_CTRL_RESET:
2390 vgic_its_reset(kvm, its);
2391 break;
2392 case KVM_DEV_ARM_ITS_SAVE_TABLES:
2393 ret = abi->save_tables(its);
2394 break;
2395 case KVM_DEV_ARM_ITS_RESTORE_TABLES:
2396 ret = abi->restore_tables(its);
2397 break;
2398 }
2399
2400 unlock_all_vcpus(kvm);
2401 mutex_unlock(&its->its_lock);
2402 mutex_unlock(&kvm->lock);
2403 return ret;
2404}
2405
1085fdc6
AP
2406static int vgic_its_set_attr(struct kvm_device *dev,
2407 struct kvm_device_attr *attr)
2408{
2409 struct vgic_its *its = dev->private;
2410 int ret;
2411
2412 switch (attr->group) {
2413 case KVM_DEV_ARM_VGIC_GRP_ADDR: {
2414 u64 __user *uaddr = (u64 __user *)(long)attr->addr;
2415 unsigned long type = (unsigned long)attr->attr;
2416 u64 addr;
2417
2418 if (type != KVM_VGIC_ITS_ADDR_TYPE)
2419 return -ENODEV;
2420
1085fdc6
AP
2421 if (copy_from_user(&addr, uaddr, sizeof(addr)))
2422 return -EFAULT;
2423
2424 ret = vgic_check_ioaddr(dev->kvm, &its->vgic_its_base,
2425 addr, SZ_64K);
2426 if (ret)
2427 return ret;
2428
30e1b684 2429 return vgic_register_its_iodev(dev->kvm, its, addr);
1085fdc6 2430 }
3eb4271b
EA
2431 case KVM_DEV_ARM_VGIC_GRP_CTRL:
2432 return vgic_its_ctrl(dev->kvm, its, attr->attr);
876ae234
EA
2433 case KVM_DEV_ARM_VGIC_GRP_ITS_REGS: {
2434 u64 __user *uaddr = (u64 __user *)(long)attr->addr;
2435 u64 reg;
2436
2437 if (get_user(reg, uaddr))
2438 return -EFAULT;
2439
2440 return vgic_its_attr_regs_access(dev, attr, &reg, true);
2441 }
1085fdc6
AP
2442 }
2443 return -ENXIO;
2444}
2445
2446static int vgic_its_get_attr(struct kvm_device *dev,
2447 struct kvm_device_attr *attr)
2448{
2449 switch (attr->group) {
2450 case KVM_DEV_ARM_VGIC_GRP_ADDR: {
2451 struct vgic_its *its = dev->private;
2452 u64 addr = its->vgic_its_base;
2453 u64 __user *uaddr = (u64 __user *)(long)attr->addr;
2454 unsigned long type = (unsigned long)attr->attr;
2455
2456 if (type != KVM_VGIC_ITS_ADDR_TYPE)
2457 return -ENODEV;
2458
2459 if (copy_to_user(uaddr, &addr, sizeof(addr)))
2460 return -EFAULT;
2461 break;
876ae234
EA
2462 }
2463 case KVM_DEV_ARM_VGIC_GRP_ITS_REGS: {
2464 u64 __user *uaddr = (u64 __user *)(long)attr->addr;
2465 u64 reg;
2466 int ret;
2467
2468 ret = vgic_its_attr_regs_access(dev, attr, &reg, false);
2469 if (ret)
2470 return ret;
2471 return put_user(reg, uaddr);
2472 }
1085fdc6
AP
2473 default:
2474 return -ENXIO;
2475 }
1085fdc6
AP
2476
2477 return 0;
2478}
2479
2480static struct kvm_device_ops kvm_arm_vgic_its_ops = {
2481 .name = "kvm-arm-vgic-its",
2482 .create = vgic_its_create,
2483 .destroy = vgic_its_destroy,
2484 .set_attr = vgic_its_set_attr,
2485 .get_attr = vgic_its_get_attr,
2486 .has_attr = vgic_its_has_attr,
2487};
2488
2489int kvm_vgic_register_its_device(void)
2490{
2491 return kvm_register_device_ops(&kvm_arm_vgic_its_ops,
2492 KVM_DEV_TYPE_ARM_VGIC_ITS);
2493}