4 * Copyright (C) 2015,2016 ARM Ltd.
5 * Author: Andre Przywara <andre.przywara@arm.com>
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program. If not, see <http://www.gnu.org/licenses/>.
20 #include <linux/cpu.h>
21 #include <linux/kvm.h>
22 #include <linux/kvm_host.h>
23 #include <linux/interrupt.h>
24 #include <linux/list.h>
25 #include <linux/uaccess.h>
26 #include <linux/list_sort.h>
28 #include <linux/irqchip/arm-gic-v3.h>
30 #include <asm/kvm_emulate.h>
31 #include <asm/kvm_arm.h>
32 #include <asm/kvm_mmu.h>
35 #include "vgic-mmio.h"
37 static int vgic_its_save_tables_v0(struct vgic_its
*its
);
38 static int vgic_its_restore_tables_v0(struct vgic_its
*its
);
39 static int vgic_its_commit_v0(struct vgic_its
*its
);
40 static int update_lpi_config(struct kvm
*kvm
, struct vgic_irq
*irq
,
41 struct kvm_vcpu
*filter_vcpu
);
44 * Creates a new (reference to a) struct vgic_irq for a given LPI.
45 * If this LPI is already mapped on another ITS, we increase its refcount
46 * and return a pointer to the existing structure.
47 * If this is a "new" LPI, we allocate and initialize a new struct vgic_irq.
48 * This function returns a pointer to the _unlocked_ structure.
50 static struct vgic_irq
*vgic_add_lpi(struct kvm
*kvm
, u32 intid
,
51 struct kvm_vcpu
*vcpu
)
53 struct vgic_dist
*dist
= &kvm
->arch
.vgic
;
54 struct vgic_irq
*irq
= vgic_get_irq(kvm
, NULL
, intid
), *oldirq
;
57 /* In this case there is no put, since we keep the reference. */
61 irq
= kzalloc(sizeof(struct vgic_irq
), GFP_KERNEL
);
63 return ERR_PTR(-ENOMEM
);
65 INIT_LIST_HEAD(&irq
->lpi_list
);
66 INIT_LIST_HEAD(&irq
->ap_list
);
67 spin_lock_init(&irq
->irq_lock
);
69 irq
->config
= VGIC_CONFIG_EDGE
;
70 kref_init(&irq
->refcount
);
72 irq
->target_vcpu
= vcpu
;
74 spin_lock(&dist
->lpi_list_lock
);
77 * There could be a race with another vgic_add_lpi(), so we need to
78 * check that we don't add a second list entry with the same LPI.
80 list_for_each_entry(oldirq
, &dist
->lpi_list_head
, lpi_list
) {
81 if (oldirq
->intid
!= intid
)
84 /* Someone was faster with adding this LPI, lets use that. */
89 * This increases the refcount, the caller is expected to
90 * call vgic_put_irq() on the returned pointer once it's
91 * finished with the IRQ.
93 vgic_get_irq_kref(irq
);
98 list_add_tail(&irq
->lpi_list
, &dist
->lpi_list_head
);
99 dist
->lpi_list_count
++;
102 spin_unlock(&dist
->lpi_list_lock
);
105 * We "cache" the configuration table entries in our struct vgic_irq's.
106 * However we only have those structs for mapped IRQs, so we read in
107 * the respective config data from memory here upon mapping the LPI.
109 ret
= update_lpi_config(kvm
, irq
, NULL
);
113 ret
= vgic_v3_lpi_sync_pending_status(kvm
, irq
);
121 struct list_head dev_list
;
123 /* the head for the list of ITTEs */
124 struct list_head itt_head
;
125 u32 num_eventid_bits
;
130 #define COLLECTION_NOT_MAPPED ((u32)~0)
132 struct its_collection
{
133 struct list_head coll_list
;
139 #define its_is_collection_mapped(coll) ((coll) && \
140 ((coll)->target_addr != COLLECTION_NOT_MAPPED))
143 struct list_head ite_list
;
145 struct vgic_irq
*irq
;
146 struct its_collection
*collection
;
151 * struct vgic_its_abi - ITS abi ops and settings
152 * @cte_esz: collection table entry size
153 * @dte_esz: device table entry size
154 * @ite_esz: interrupt translation table entry size
155 * @save tables: save the ITS tables into guest RAM
156 * @restore_tables: restore the ITS internal structs from tables
157 * stored in guest RAM
158 * @commit: initialize the registers which expose the ABI settings,
159 * especially the entry sizes
161 struct vgic_its_abi
{
165 int (*save_tables
)(struct vgic_its
*its
);
166 int (*restore_tables
)(struct vgic_its
*its
);
167 int (*commit
)(struct vgic_its
*its
);
170 static const struct vgic_its_abi its_table_abi_versions
[] = {
171 [0] = {.cte_esz
= 8, .dte_esz
= 8, .ite_esz
= 8,
172 .save_tables
= vgic_its_save_tables_v0
,
173 .restore_tables
= vgic_its_restore_tables_v0
,
174 .commit
= vgic_its_commit_v0
,
178 #define NR_ITS_ABIS ARRAY_SIZE(its_table_abi_versions)
180 inline const struct vgic_its_abi
*vgic_its_get_abi(struct vgic_its
*its
)
182 return &its_table_abi_versions
[its
->abi_rev
];
185 int vgic_its_set_abi(struct vgic_its
*its
, int rev
)
187 const struct vgic_its_abi
*abi
;
190 abi
= vgic_its_get_abi(its
);
191 return abi
->commit(its
);
195 * Find and returns a device in the device table for an ITS.
196 * Must be called with the its_lock mutex held.
198 static struct its_device
*find_its_device(struct vgic_its
*its
, u32 device_id
)
200 struct its_device
*device
;
202 list_for_each_entry(device
, &its
->device_list
, dev_list
)
203 if (device_id
== device
->device_id
)
210 * Find and returns an interrupt translation table entry (ITTE) for a given
211 * Device ID/Event ID pair on an ITS.
212 * Must be called with the its_lock mutex held.
214 static struct its_ite
*find_ite(struct vgic_its
*its
, u32 device_id
,
217 struct its_device
*device
;
220 device
= find_its_device(its
, device_id
);
224 list_for_each_entry(ite
, &device
->itt_head
, ite_list
)
225 if (ite
->event_id
== event_id
)
231 /* To be used as an iterator this macro misses the enclosing parentheses */
232 #define for_each_lpi_its(dev, ite, its) \
233 list_for_each_entry(dev, &(its)->device_list, dev_list) \
234 list_for_each_entry(ite, &(dev)->itt_head, ite_list)
237 * We only implement 48 bits of PA at the moment, although the ITS
238 * supports more. Let's be restrictive here.
240 #define BASER_ADDRESS(x) ((x) & GENMASK_ULL(47, 16))
241 #define CBASER_ADDRESS(x) ((x) & GENMASK_ULL(47, 12))
243 #define GIC_LPI_OFFSET 8192
245 #define VITS_TYPER_IDBITS 16
246 #define VITS_TYPER_DEVBITS 16
247 #define VITS_DTE_MAX_DEVID_OFFSET (BIT(14) - 1)
248 #define VITS_ITE_MAX_EVENTID_OFFSET (BIT(16) - 1)
251 * Finds and returns a collection in the ITS collection table.
252 * Must be called with the its_lock mutex held.
254 static struct its_collection
*find_collection(struct vgic_its
*its
, int coll_id
)
256 struct its_collection
*collection
;
258 list_for_each_entry(collection
, &its
->collection_list
, coll_list
) {
259 if (coll_id
== collection
->collection_id
)
266 #define LPI_PROP_ENABLE_BIT(p) ((p) & LPI_PROP_ENABLED)
267 #define LPI_PROP_PRIORITY(p) ((p) & 0xfc)
270 * Reads the configuration data for a given LPI from guest memory and
271 * updates the fields in struct vgic_irq.
272 * If filter_vcpu is not NULL, applies only if the IRQ is targeting this
273 * VCPU. Unconditionally applies if filter_vcpu is NULL.
275 static int update_lpi_config(struct kvm
*kvm
, struct vgic_irq
*irq
,
276 struct kvm_vcpu
*filter_vcpu
)
278 u64 propbase
= GICR_PROPBASER_ADDRESS(kvm
->arch
.vgic
.propbaser
);
282 ret
= kvm_read_guest(kvm
, propbase
+ irq
->intid
- GIC_LPI_OFFSET
,
288 spin_lock(&irq
->irq_lock
);
290 if (!filter_vcpu
|| filter_vcpu
== irq
->target_vcpu
) {
291 irq
->priority
= LPI_PROP_PRIORITY(prop
);
292 irq
->enabled
= LPI_PROP_ENABLE_BIT(prop
);
294 vgic_queue_irq_unlock(kvm
, irq
);
296 spin_unlock(&irq
->irq_lock
);
303 * Create a snapshot of the current LPIs targeting @vcpu, so that we can
304 * enumerate those LPIs without holding any lock.
305 * Returns their number and puts the kmalloc'ed array into intid_ptr.
307 static int vgic_copy_lpi_list(struct kvm_vcpu
*vcpu
, u32
**intid_ptr
)
309 struct vgic_dist
*dist
= &vcpu
->kvm
->arch
.vgic
;
310 struct vgic_irq
*irq
;
312 int irq_count
= dist
->lpi_list_count
, i
= 0;
315 * We use the current value of the list length, which may change
316 * after the kmalloc. We don't care, because the guest shouldn't
317 * change anything while the command handling is still running,
318 * and in the worst case we would miss a new IRQ, which one wouldn't
319 * expect to be covered by this command anyway.
321 intids
= kmalloc_array(irq_count
, sizeof(intids
[0]), GFP_KERNEL
);
325 spin_lock(&dist
->lpi_list_lock
);
326 list_for_each_entry(irq
, &dist
->lpi_list_head
, lpi_list
) {
327 /* We don't need to "get" the IRQ, as we hold the list lock. */
328 if (irq
->target_vcpu
!= vcpu
)
330 intids
[i
++] = irq
->intid
;
332 spin_unlock(&dist
->lpi_list_lock
);
339 * Promotes the ITS view of affinity of an ITTE (which redistributor this LPI
340 * is targeting) to the VGIC's view, which deals with target VCPUs.
341 * Needs to be called whenever either the collection for a LPIs has
342 * changed or the collection itself got retargeted.
344 static void update_affinity_ite(struct kvm
*kvm
, struct its_ite
*ite
)
346 struct kvm_vcpu
*vcpu
;
348 if (!its_is_collection_mapped(ite
->collection
))
351 vcpu
= kvm_get_vcpu(kvm
, ite
->collection
->target_addr
);
353 spin_lock(&ite
->irq
->irq_lock
);
354 ite
->irq
->target_vcpu
= vcpu
;
355 spin_unlock(&ite
->irq
->irq_lock
);
359 * Updates the target VCPU for every LPI targeting this collection.
360 * Must be called with the its_lock mutex held.
362 static void update_affinity_collection(struct kvm
*kvm
, struct vgic_its
*its
,
363 struct its_collection
*coll
)
365 struct its_device
*device
;
368 for_each_lpi_its(device
, ite
, its
) {
369 if (!ite
->collection
|| coll
!= ite
->collection
)
372 update_affinity_ite(kvm
, ite
);
376 static u32
max_lpis_propbaser(u64 propbaser
)
378 int nr_idbits
= (propbaser
& 0x1f) + 1;
380 return 1U << min(nr_idbits
, INTERRUPT_ID_BITS_ITS
);
384 * Sync the pending table pending bit of LPIs targeting @vcpu
385 * with our own data structures. This relies on the LPI being
388 static int its_sync_lpi_pending_table(struct kvm_vcpu
*vcpu
)
390 gpa_t pendbase
= GICR_PENDBASER_ADDRESS(vcpu
->arch
.vgic_cpu
.pendbaser
);
391 struct vgic_irq
*irq
;
392 int last_byte_offset
= -1;
397 nr_irqs
= vgic_copy_lpi_list(vcpu
, &intids
);
401 for (i
= 0; i
< nr_irqs
; i
++) {
402 int byte_offset
, bit_nr
;
405 byte_offset
= intids
[i
] / BITS_PER_BYTE
;
406 bit_nr
= intids
[i
] % BITS_PER_BYTE
;
409 * For contiguously allocated LPIs chances are we just read
410 * this very same byte in the last iteration. Reuse that.
412 if (byte_offset
!= last_byte_offset
) {
413 ret
= kvm_read_guest(vcpu
->kvm
, pendbase
+ byte_offset
,
419 last_byte_offset
= byte_offset
;
422 irq
= vgic_get_irq(vcpu
->kvm
, NULL
, intids
[i
]);
423 spin_lock(&irq
->irq_lock
);
424 irq
->pending_latch
= pendmask
& (1U << bit_nr
);
425 vgic_queue_irq_unlock(vcpu
->kvm
, irq
);
426 vgic_put_irq(vcpu
->kvm
, irq
);
434 static unsigned long vgic_mmio_read_its_typer(struct kvm
*kvm
,
435 struct vgic_its
*its
,
436 gpa_t addr
, unsigned int len
)
438 const struct vgic_its_abi
*abi
= vgic_its_get_abi(its
);
439 u64 reg
= GITS_TYPER_PLPIS
;
442 * We use linear CPU numbers for redistributor addressing,
443 * so GITS_TYPER.PTA is 0.
444 * Also we force all PROPBASER registers to be the same, so
445 * CommonLPIAff is 0 as well.
446 * To avoid memory waste in the guest, we keep the number of IDBits and
447 * DevBits low - as least for the time being.
449 reg
|= GIC_ENCODE_SZ(VITS_TYPER_DEVBITS
, 5) << GITS_TYPER_DEVBITS_SHIFT
;
450 reg
|= GIC_ENCODE_SZ(VITS_TYPER_IDBITS
, 5) << GITS_TYPER_IDBITS_SHIFT
;
451 reg
|= GIC_ENCODE_SZ(abi
->ite_esz
, 4) << GITS_TYPER_ITT_ENTRY_SIZE_SHIFT
;
453 return extract_bytes(reg
, addr
& 7, len
);
456 static unsigned long vgic_mmio_read_its_iidr(struct kvm
*kvm
,
457 struct vgic_its
*its
,
458 gpa_t addr
, unsigned int len
)
462 val
= (its
->abi_rev
<< GITS_IIDR_REV_SHIFT
) & GITS_IIDR_REV_MASK
;
463 val
|= (PRODUCT_ID_KVM
<< GITS_IIDR_PRODUCTID_SHIFT
) | IMPLEMENTER_ARM
;
467 static int vgic_mmio_uaccess_write_its_iidr(struct kvm
*kvm
,
468 struct vgic_its
*its
,
469 gpa_t addr
, unsigned int len
,
472 u32 rev
= GITS_IIDR_REV(val
);
474 if (rev
>= NR_ITS_ABIS
)
476 return vgic_its_set_abi(its
, rev
);
479 static unsigned long vgic_mmio_read_its_idregs(struct kvm
*kvm
,
480 struct vgic_its
*its
,
481 gpa_t addr
, unsigned int len
)
483 switch (addr
& 0xffff) {
485 return 0x92; /* part number, bits[7:0] */
487 return 0xb4; /* part number, bits[11:8] */
489 return GIC_PIDR2_ARCH_GICv3
| 0x0b;
491 return 0x40; /* This is a 64K software visible page */
492 /* The following are the ID registers for (any) GIC. */
507 * Find the target VCPU and the LPI number for a given devid/eventid pair
508 * and make this IRQ pending, possibly injecting it.
509 * Must be called with the its_lock mutex held.
510 * Returns 0 on success, a positive error value for any ITS mapping
511 * related errors and negative error values for generic errors.
513 static int vgic_its_trigger_msi(struct kvm
*kvm
, struct vgic_its
*its
,
514 u32 devid
, u32 eventid
)
516 struct kvm_vcpu
*vcpu
;
522 ite
= find_ite(its
, devid
, eventid
);
523 if (!ite
|| !its_is_collection_mapped(ite
->collection
))
524 return E_ITS_INT_UNMAPPED_INTERRUPT
;
526 vcpu
= kvm_get_vcpu(kvm
, ite
->collection
->target_addr
);
528 return E_ITS_INT_UNMAPPED_INTERRUPT
;
530 if (!vcpu
->arch
.vgic_cpu
.lpis_enabled
)
533 spin_lock(&ite
->irq
->irq_lock
);
534 ite
->irq
->pending_latch
= true;
535 vgic_queue_irq_unlock(kvm
, ite
->irq
);
540 static struct vgic_io_device
*vgic_get_its_iodev(struct kvm_io_device
*dev
)
542 struct vgic_io_device
*iodev
;
544 if (dev
->ops
!= &kvm_io_gic_ops
)
547 iodev
= container_of(dev
, struct vgic_io_device
, dev
);
549 if (iodev
->iodev_type
!= IODEV_ITS
)
556 * Queries the KVM IO bus framework to get the ITS pointer from the given
558 * We then call vgic_its_trigger_msi() with the decoded data.
559 * According to the KVM_SIGNAL_MSI API description returns 1 on success.
561 int vgic_its_inject_msi(struct kvm
*kvm
, struct kvm_msi
*msi
)
564 struct kvm_io_device
*kvm_io_dev
;
565 struct vgic_io_device
*iodev
;
568 if (!vgic_has_its(kvm
))
571 if (!(msi
->flags
& KVM_MSI_VALID_DEVID
))
574 address
= (u64
)msi
->address_hi
<< 32 | msi
->address_lo
;
576 kvm_io_dev
= kvm_io_bus_get_dev(kvm
, KVM_MMIO_BUS
, address
);
580 iodev
= vgic_get_its_iodev(kvm_io_dev
);
584 mutex_lock(&iodev
->its
->its_lock
);
585 ret
= vgic_its_trigger_msi(kvm
, iodev
->its
, msi
->devid
, msi
->data
);
586 mutex_unlock(&iodev
->its
->its_lock
);
592 * KVM_SIGNAL_MSI demands a return value > 0 for success and 0
593 * if the guest has blocked the MSI. So we map any LPI mapping
594 * related error to that.
602 /* Requires the its_lock to be held. */
603 static void its_free_ite(struct kvm
*kvm
, struct its_ite
*ite
)
605 list_del(&ite
->ite_list
);
607 /* This put matches the get in vgic_add_lpi. */
609 vgic_put_irq(kvm
, ite
->irq
);
614 static u64
its_cmd_mask_field(u64
*its_cmd
, int word
, int shift
, int size
)
616 return (le64_to_cpu(its_cmd
[word
]) >> shift
) & (BIT_ULL(size
) - 1);
619 #define its_cmd_get_command(cmd) its_cmd_mask_field(cmd, 0, 0, 8)
620 #define its_cmd_get_deviceid(cmd) its_cmd_mask_field(cmd, 0, 32, 32)
621 #define its_cmd_get_size(cmd) (its_cmd_mask_field(cmd, 1, 0, 5) + 1)
622 #define its_cmd_get_id(cmd) its_cmd_mask_field(cmd, 1, 0, 32)
623 #define its_cmd_get_physical_id(cmd) its_cmd_mask_field(cmd, 1, 32, 32)
624 #define its_cmd_get_collection(cmd) its_cmd_mask_field(cmd, 2, 0, 16)
625 #define its_cmd_get_ittaddr(cmd) (its_cmd_mask_field(cmd, 2, 8, 44) << 8)
626 #define its_cmd_get_target_addr(cmd) its_cmd_mask_field(cmd, 2, 16, 32)
627 #define its_cmd_get_validbit(cmd) its_cmd_mask_field(cmd, 2, 63, 1)
630 * The DISCARD command frees an Interrupt Translation Table Entry (ITTE).
631 * Must be called with the its_lock mutex held.
633 static int vgic_its_cmd_handle_discard(struct kvm
*kvm
, struct vgic_its
*its
,
636 u32 device_id
= its_cmd_get_deviceid(its_cmd
);
637 u32 event_id
= its_cmd_get_id(its_cmd
);
641 ite
= find_ite(its
, device_id
, event_id
);
642 if (ite
&& ite
->collection
) {
644 * Though the spec talks about removing the pending state, we
645 * don't bother here since we clear the ITTE anyway and the
646 * pending state is a property of the ITTE struct.
648 its_free_ite(kvm
, ite
);
652 return E_ITS_DISCARD_UNMAPPED_INTERRUPT
;
656 * The MOVI command moves an ITTE to a different collection.
657 * Must be called with the its_lock mutex held.
659 static int vgic_its_cmd_handle_movi(struct kvm
*kvm
, struct vgic_its
*its
,
662 u32 device_id
= its_cmd_get_deviceid(its_cmd
);
663 u32 event_id
= its_cmd_get_id(its_cmd
);
664 u32 coll_id
= its_cmd_get_collection(its_cmd
);
665 struct kvm_vcpu
*vcpu
;
667 struct its_collection
*collection
;
669 ite
= find_ite(its
, device_id
, event_id
);
671 return E_ITS_MOVI_UNMAPPED_INTERRUPT
;
673 if (!its_is_collection_mapped(ite
->collection
))
674 return E_ITS_MOVI_UNMAPPED_COLLECTION
;
676 collection
= find_collection(its
, coll_id
);
677 if (!its_is_collection_mapped(collection
))
678 return E_ITS_MOVI_UNMAPPED_COLLECTION
;
680 ite
->collection
= collection
;
681 vcpu
= kvm_get_vcpu(kvm
, collection
->target_addr
);
683 spin_lock(&ite
->irq
->irq_lock
);
684 ite
->irq
->target_vcpu
= vcpu
;
685 spin_unlock(&ite
->irq
->irq_lock
);
691 * Check whether an ID can be stored into the corresponding guest table.
692 * For a direct table this is pretty easy, but gets a bit nasty for
693 * indirect tables. We check whether the resulting guest physical address
694 * is actually valid (covered by a memslot and guest accessible).
695 * For this we have to read the respective first level entry.
697 static bool vgic_its_check_id(struct vgic_its
*its
, u64 baser
, u32 id
,
700 int l1_tbl_size
= GITS_BASER_NR_PAGES(baser
) * SZ_64K
;
701 u64 indirect_ptr
, type
= GITS_BASER_TYPE(baser
);
702 int esz
= GITS_BASER_ENTRY_SIZE(baser
);
707 case GITS_BASER_TYPE_DEVICE
:
708 if (id
>= BIT_ULL(VITS_TYPER_DEVBITS
))
711 case GITS_BASER_TYPE_COLLECTION
:
712 /* as GITS_TYPER.CIL == 0, ITS supports 16-bit collection ID */
713 if (id
>= BIT_ULL(16))
720 if (!(baser
& GITS_BASER_INDIRECT
)) {
723 if (id
>= (l1_tbl_size
/ esz
))
726 addr
= BASER_ADDRESS(baser
) + id
* esz
;
727 gfn
= addr
>> PAGE_SHIFT
;
731 return kvm_is_visible_gfn(its
->dev
->kvm
, gfn
);
734 /* calculate and check the index into the 1st level */
735 index
= id
/ (SZ_64K
/ esz
);
736 if (index
>= (l1_tbl_size
/ sizeof(u64
)))
739 /* Each 1st level entry is represented by a 64-bit value. */
740 if (kvm_read_guest(its
->dev
->kvm
,
741 BASER_ADDRESS(baser
) + index
* sizeof(indirect_ptr
),
742 &indirect_ptr
, sizeof(indirect_ptr
)))
745 indirect_ptr
= le64_to_cpu(indirect_ptr
);
747 /* check the valid bit of the first level entry */
748 if (!(indirect_ptr
& BIT_ULL(63)))
752 * Mask the guest physical address and calculate the frame number.
753 * Any address beyond our supported 48 bits of PA will be caught
754 * by the actual check in the final step.
756 indirect_ptr
&= GENMASK_ULL(51, 16);
758 /* Find the address of the actual entry */
759 index
= id
% (SZ_64K
/ esz
);
760 indirect_ptr
+= index
* esz
;
761 gfn
= indirect_ptr
>> PAGE_SHIFT
;
764 *eaddr
= indirect_ptr
;
765 return kvm_is_visible_gfn(its
->dev
->kvm
, gfn
);
768 static int vgic_its_alloc_collection(struct vgic_its
*its
,
769 struct its_collection
**colp
,
772 struct its_collection
*collection
;
774 if (!vgic_its_check_id(its
, its
->baser_coll_table
, coll_id
, NULL
))
775 return E_ITS_MAPC_COLLECTION_OOR
;
777 collection
= kzalloc(sizeof(*collection
), GFP_KERNEL
);
779 collection
->collection_id
= coll_id
;
780 collection
->target_addr
= COLLECTION_NOT_MAPPED
;
782 list_add_tail(&collection
->coll_list
, &its
->collection_list
);
788 static void vgic_its_free_collection(struct vgic_its
*its
, u32 coll_id
)
790 struct its_collection
*collection
;
791 struct its_device
*device
;
795 * Clearing the mapping for that collection ID removes the
796 * entry from the list. If there wasn't any before, we can
799 collection
= find_collection(its
, coll_id
);
803 for_each_lpi_its(device
, ite
, its
)
804 if (ite
->collection
&&
805 ite
->collection
->collection_id
== coll_id
)
806 ite
->collection
= NULL
;
808 list_del(&collection
->coll_list
);
812 /* Must be called with its_lock mutex held */
813 static struct its_ite
*vgic_its_alloc_ite(struct its_device
*device
,
814 struct its_collection
*collection
,
819 ite
= kzalloc(sizeof(*ite
), GFP_KERNEL
);
821 return ERR_PTR(-ENOMEM
);
823 ite
->event_id
= event_id
;
824 ite
->collection
= collection
;
826 list_add_tail(&ite
->ite_list
, &device
->itt_head
);
831 * The MAPTI and MAPI commands map LPIs to ITTEs.
832 * Must be called with its_lock mutex held.
834 static int vgic_its_cmd_handle_mapi(struct kvm
*kvm
, struct vgic_its
*its
,
837 u32 device_id
= its_cmd_get_deviceid(its_cmd
);
838 u32 event_id
= its_cmd_get_id(its_cmd
);
839 u32 coll_id
= its_cmd_get_collection(its_cmd
);
841 struct kvm_vcpu
*vcpu
= NULL
;
842 struct its_device
*device
;
843 struct its_collection
*collection
, *new_coll
= NULL
;
844 struct vgic_irq
*irq
;
847 device
= find_its_device(its
, device_id
);
849 return E_ITS_MAPTI_UNMAPPED_DEVICE
;
851 if (event_id
>= BIT_ULL(device
->num_eventid_bits
))
852 return E_ITS_MAPTI_ID_OOR
;
854 if (its_cmd_get_command(its_cmd
) == GITS_CMD_MAPTI
)
855 lpi_nr
= its_cmd_get_physical_id(its_cmd
);
858 if (lpi_nr
< GIC_LPI_OFFSET
||
859 lpi_nr
>= max_lpis_propbaser(kvm
->arch
.vgic
.propbaser
))
860 return E_ITS_MAPTI_PHYSICALID_OOR
;
862 /* If there is an existing mapping, behavior is UNPREDICTABLE. */
863 if (find_ite(its
, device_id
, event_id
))
866 collection
= find_collection(its
, coll_id
);
868 int ret
= vgic_its_alloc_collection(its
, &collection
, coll_id
);
871 new_coll
= collection
;
874 ite
= vgic_its_alloc_ite(device
, collection
, event_id
);
877 vgic_its_free_collection(its
, coll_id
);
881 if (its_is_collection_mapped(collection
))
882 vcpu
= kvm_get_vcpu(kvm
, collection
->target_addr
);
884 irq
= vgic_add_lpi(kvm
, lpi_nr
, vcpu
);
887 vgic_its_free_collection(its
, coll_id
);
888 its_free_ite(kvm
, ite
);
896 /* Requires the its_lock to be held. */
897 static void vgic_its_unmap_device(struct kvm
*kvm
, struct its_device
*device
)
899 struct its_ite
*ite
, *temp
;
902 * The spec says that unmapping a device with still valid
903 * ITTEs associated is UNPREDICTABLE. We remove all ITTEs,
904 * since we cannot leave the memory unreferenced.
906 list_for_each_entry_safe(ite
, temp
, &device
->itt_head
, ite_list
)
907 its_free_ite(kvm
, ite
);
909 list_del(&device
->dev_list
);
913 /* Must be called with its_lock mutex held */
914 static struct its_device
*vgic_its_alloc_device(struct vgic_its
*its
,
915 u32 device_id
, gpa_t itt_addr
,
918 struct its_device
*device
;
920 device
= kzalloc(sizeof(*device
), GFP_KERNEL
);
922 return ERR_PTR(-ENOMEM
);
924 device
->device_id
= device_id
;
925 device
->itt_addr
= itt_addr
;
926 device
->num_eventid_bits
= num_eventid_bits
;
927 INIT_LIST_HEAD(&device
->itt_head
);
929 list_add_tail(&device
->dev_list
, &its
->device_list
);
934 * MAPD maps or unmaps a device ID to Interrupt Translation Tables (ITTs).
935 * Must be called with the its_lock mutex held.
937 static int vgic_its_cmd_handle_mapd(struct kvm
*kvm
, struct vgic_its
*its
,
940 u32 device_id
= its_cmd_get_deviceid(its_cmd
);
941 bool valid
= its_cmd_get_validbit(its_cmd
);
942 u8 num_eventid_bits
= its_cmd_get_size(its_cmd
);
943 gpa_t itt_addr
= its_cmd_get_ittaddr(its_cmd
);
944 struct its_device
*device
;
946 if (!vgic_its_check_id(its
, its
->baser_device_table
, device_id
, NULL
))
947 return E_ITS_MAPD_DEVICE_OOR
;
949 if (valid
&& num_eventid_bits
> VITS_TYPER_IDBITS
)
950 return E_ITS_MAPD_ITTSIZE_OOR
;
952 device
= find_its_device(its
, device_id
);
955 * The spec says that calling MAPD on an already mapped device
956 * invalidates all cached data for this device. We implement this
957 * by removing the mapping and re-establishing it.
960 vgic_its_unmap_device(kvm
, device
);
963 * The spec does not say whether unmapping a not-mapped device
964 * is an error, so we are done in any case.
969 device
= vgic_its_alloc_device(its
, device_id
, itt_addr
,
972 return PTR_ERR(device
);
978 * The MAPC command maps collection IDs to redistributors.
979 * Must be called with the its_lock mutex held.
981 static int vgic_its_cmd_handle_mapc(struct kvm
*kvm
, struct vgic_its
*its
,
986 struct its_collection
*collection
;
989 valid
= its_cmd_get_validbit(its_cmd
);
990 coll_id
= its_cmd_get_collection(its_cmd
);
991 target_addr
= its_cmd_get_target_addr(its_cmd
);
993 if (target_addr
>= atomic_read(&kvm
->online_vcpus
))
994 return E_ITS_MAPC_PROCNUM_OOR
;
997 vgic_its_free_collection(its
, coll_id
);
999 collection
= find_collection(its
, coll_id
);
1004 ret
= vgic_its_alloc_collection(its
, &collection
,
1008 collection
->target_addr
= target_addr
;
1010 collection
->target_addr
= target_addr
;
1011 update_affinity_collection(kvm
, its
, collection
);
1019 * The CLEAR command removes the pending state for a particular LPI.
1020 * Must be called with the its_lock mutex held.
1022 static int vgic_its_cmd_handle_clear(struct kvm
*kvm
, struct vgic_its
*its
,
1025 u32 device_id
= its_cmd_get_deviceid(its_cmd
);
1026 u32 event_id
= its_cmd_get_id(its_cmd
);
1027 struct its_ite
*ite
;
1030 ite
= find_ite(its
, device_id
, event_id
);
1032 return E_ITS_CLEAR_UNMAPPED_INTERRUPT
;
1034 ite
->irq
->pending_latch
= false;
1040 * The INV command syncs the configuration bits from the memory table.
1041 * Must be called with the its_lock mutex held.
1043 static int vgic_its_cmd_handle_inv(struct kvm
*kvm
, struct vgic_its
*its
,
1046 u32 device_id
= its_cmd_get_deviceid(its_cmd
);
1047 u32 event_id
= its_cmd_get_id(its_cmd
);
1048 struct its_ite
*ite
;
1051 ite
= find_ite(its
, device_id
, event_id
);
1053 return E_ITS_INV_UNMAPPED_INTERRUPT
;
1055 return update_lpi_config(kvm
, ite
->irq
, NULL
);
1059 * The INVALL command requests flushing of all IRQ data in this collection.
1060 * Find the VCPU mapped to that collection, then iterate over the VM's list
1061 * of mapped LPIs and update the configuration for each IRQ which targets
1062 * the specified vcpu. The configuration will be read from the in-memory
1063 * configuration table.
1064 * Must be called with the its_lock mutex held.
1066 static int vgic_its_cmd_handle_invall(struct kvm
*kvm
, struct vgic_its
*its
,
1069 u32 coll_id
= its_cmd_get_collection(its_cmd
);
1070 struct its_collection
*collection
;
1071 struct kvm_vcpu
*vcpu
;
1072 struct vgic_irq
*irq
;
1076 collection
= find_collection(its
, coll_id
);
1077 if (!its_is_collection_mapped(collection
))
1078 return E_ITS_INVALL_UNMAPPED_COLLECTION
;
1080 vcpu
= kvm_get_vcpu(kvm
, collection
->target_addr
);
1082 irq_count
= vgic_copy_lpi_list(vcpu
, &intids
);
1086 for (i
= 0; i
< irq_count
; i
++) {
1087 irq
= vgic_get_irq(kvm
, NULL
, intids
[i
]);
1090 update_lpi_config(kvm
, irq
, vcpu
);
1091 vgic_put_irq(kvm
, irq
);
1100 * The MOVALL command moves the pending state of all IRQs targeting one
1101 * redistributor to another. We don't hold the pending state in the VCPUs,
1102 * but in the IRQs instead, so there is really not much to do for us here.
1103 * However the spec says that no IRQ must target the old redistributor
1104 * afterwards, so we make sure that no LPI is using the associated target_vcpu.
1105 * This command affects all LPIs in the system that target that redistributor.
1107 static int vgic_its_cmd_handle_movall(struct kvm
*kvm
, struct vgic_its
*its
,
1110 struct vgic_dist
*dist
= &kvm
->arch
.vgic
;
1111 u32 target1_addr
= its_cmd_get_target_addr(its_cmd
);
1112 u32 target2_addr
= its_cmd_mask_field(its_cmd
, 3, 16, 32);
1113 struct kvm_vcpu
*vcpu1
, *vcpu2
;
1114 struct vgic_irq
*irq
;
1116 if (target1_addr
>= atomic_read(&kvm
->online_vcpus
) ||
1117 target2_addr
>= atomic_read(&kvm
->online_vcpus
))
1118 return E_ITS_MOVALL_PROCNUM_OOR
;
1120 if (target1_addr
== target2_addr
)
1123 vcpu1
= kvm_get_vcpu(kvm
, target1_addr
);
1124 vcpu2
= kvm_get_vcpu(kvm
, target2_addr
);
1126 spin_lock(&dist
->lpi_list_lock
);
1128 list_for_each_entry(irq
, &dist
->lpi_list_head
, lpi_list
) {
1129 spin_lock(&irq
->irq_lock
);
1131 if (irq
->target_vcpu
== vcpu1
)
1132 irq
->target_vcpu
= vcpu2
;
1134 spin_unlock(&irq
->irq_lock
);
1137 spin_unlock(&dist
->lpi_list_lock
);
1143 * The INT command injects the LPI associated with that DevID/EvID pair.
1144 * Must be called with the its_lock mutex held.
1146 static int vgic_its_cmd_handle_int(struct kvm
*kvm
, struct vgic_its
*its
,
1149 u32 msi_data
= its_cmd_get_id(its_cmd
);
1150 u64 msi_devid
= its_cmd_get_deviceid(its_cmd
);
1152 return vgic_its_trigger_msi(kvm
, its
, msi_devid
, msi_data
);
1156 * This function is called with the its_cmd lock held, but the ITS data
1157 * structure lock dropped.
1159 static int vgic_its_handle_command(struct kvm
*kvm
, struct vgic_its
*its
,
1164 mutex_lock(&its
->its_lock
);
1165 switch (its_cmd_get_command(its_cmd
)) {
1167 ret
= vgic_its_cmd_handle_mapd(kvm
, its
, its_cmd
);
1170 ret
= vgic_its_cmd_handle_mapc(kvm
, its
, its_cmd
);
1173 ret
= vgic_its_cmd_handle_mapi(kvm
, its
, its_cmd
);
1175 case GITS_CMD_MAPTI
:
1176 ret
= vgic_its_cmd_handle_mapi(kvm
, its
, its_cmd
);
1179 ret
= vgic_its_cmd_handle_movi(kvm
, its
, its_cmd
);
1181 case GITS_CMD_DISCARD
:
1182 ret
= vgic_its_cmd_handle_discard(kvm
, its
, its_cmd
);
1184 case GITS_CMD_CLEAR
:
1185 ret
= vgic_its_cmd_handle_clear(kvm
, its
, its_cmd
);
1187 case GITS_CMD_MOVALL
:
1188 ret
= vgic_its_cmd_handle_movall(kvm
, its
, its_cmd
);
1191 ret
= vgic_its_cmd_handle_int(kvm
, its
, its_cmd
);
1194 ret
= vgic_its_cmd_handle_inv(kvm
, its
, its_cmd
);
1196 case GITS_CMD_INVALL
:
1197 ret
= vgic_its_cmd_handle_invall(kvm
, its
, its_cmd
);
1200 /* we ignore this command: we are in sync all of the time */
1204 mutex_unlock(&its
->its_lock
);
1209 static u64
vgic_sanitise_its_baser(u64 reg
)
1211 reg
= vgic_sanitise_field(reg
, GITS_BASER_SHAREABILITY_MASK
,
1212 GITS_BASER_SHAREABILITY_SHIFT
,
1213 vgic_sanitise_shareability
);
1214 reg
= vgic_sanitise_field(reg
, GITS_BASER_INNER_CACHEABILITY_MASK
,
1215 GITS_BASER_INNER_CACHEABILITY_SHIFT
,
1216 vgic_sanitise_inner_cacheability
);
1217 reg
= vgic_sanitise_field(reg
, GITS_BASER_OUTER_CACHEABILITY_MASK
,
1218 GITS_BASER_OUTER_CACHEABILITY_SHIFT
,
1219 vgic_sanitise_outer_cacheability
);
1221 /* Bits 15:12 contain bits 51:48 of the PA, which we don't support. */
1222 reg
&= ~GENMASK_ULL(15, 12);
1224 /* We support only one (ITS) page size: 64K */
1225 reg
= (reg
& ~GITS_BASER_PAGE_SIZE_MASK
) | GITS_BASER_PAGE_SIZE_64K
;
1230 static u64
vgic_sanitise_its_cbaser(u64 reg
)
1232 reg
= vgic_sanitise_field(reg
, GITS_CBASER_SHAREABILITY_MASK
,
1233 GITS_CBASER_SHAREABILITY_SHIFT
,
1234 vgic_sanitise_shareability
);
1235 reg
= vgic_sanitise_field(reg
, GITS_CBASER_INNER_CACHEABILITY_MASK
,
1236 GITS_CBASER_INNER_CACHEABILITY_SHIFT
,
1237 vgic_sanitise_inner_cacheability
);
1238 reg
= vgic_sanitise_field(reg
, GITS_CBASER_OUTER_CACHEABILITY_MASK
,
1239 GITS_CBASER_OUTER_CACHEABILITY_SHIFT
,
1240 vgic_sanitise_outer_cacheability
);
1243 * Sanitise the physical address to be 64k aligned.
1244 * Also limit the physical addresses to 48 bits.
1246 reg
&= ~(GENMASK_ULL(51, 48) | GENMASK_ULL(15, 12));
1251 static unsigned long vgic_mmio_read_its_cbaser(struct kvm
*kvm
,
1252 struct vgic_its
*its
,
1253 gpa_t addr
, unsigned int len
)
1255 return extract_bytes(its
->cbaser
, addr
& 7, len
);
1258 static void vgic_mmio_write_its_cbaser(struct kvm
*kvm
, struct vgic_its
*its
,
1259 gpa_t addr
, unsigned int len
,
1262 /* When GITS_CTLR.Enable is 1, this register is RO. */
1266 mutex_lock(&its
->cmd_lock
);
1267 its
->cbaser
= update_64bit_reg(its
->cbaser
, addr
& 7, len
, val
);
1268 its
->cbaser
= vgic_sanitise_its_cbaser(its
->cbaser
);
1271 * CWRITER is architecturally UNKNOWN on reset, but we need to reset
1272 * it to CREADR to make sure we start with an empty command buffer.
1274 its
->cwriter
= its
->creadr
;
1275 mutex_unlock(&its
->cmd_lock
);
1278 #define ITS_CMD_BUFFER_SIZE(baser) ((((baser) & 0xff) + 1) << 12)
1279 #define ITS_CMD_SIZE 32
1280 #define ITS_CMD_OFFSET(reg) ((reg) & GENMASK(19, 5))
1282 /* Must be called with the cmd_lock held. */
1283 static void vgic_its_process_commands(struct kvm
*kvm
, struct vgic_its
*its
)
1288 /* Commands are only processed when the ITS is enabled. */
1292 cbaser
= CBASER_ADDRESS(its
->cbaser
);
1294 while (its
->cwriter
!= its
->creadr
) {
1295 int ret
= kvm_read_guest(kvm
, cbaser
+ its
->creadr
,
1296 cmd_buf
, ITS_CMD_SIZE
);
1298 * If kvm_read_guest() fails, this could be due to the guest
1299 * programming a bogus value in CBASER or something else going
1300 * wrong from which we cannot easily recover.
1301 * According to section 6.3.2 in the GICv3 spec we can just
1302 * ignore that command then.
1305 vgic_its_handle_command(kvm
, its
, cmd_buf
);
1307 its
->creadr
+= ITS_CMD_SIZE
;
1308 if (its
->creadr
== ITS_CMD_BUFFER_SIZE(its
->cbaser
))
1314 * By writing to CWRITER the guest announces new commands to be processed.
1315 * To avoid any races in the first place, we take the its_cmd lock, which
1316 * protects our ring buffer variables, so that there is only one user
1317 * per ITS handling commands at a given time.
1319 static void vgic_mmio_write_its_cwriter(struct kvm
*kvm
, struct vgic_its
*its
,
1320 gpa_t addr
, unsigned int len
,
1328 mutex_lock(&its
->cmd_lock
);
1330 reg
= update_64bit_reg(its
->cwriter
, addr
& 7, len
, val
);
1331 reg
= ITS_CMD_OFFSET(reg
);
1332 if (reg
>= ITS_CMD_BUFFER_SIZE(its
->cbaser
)) {
1333 mutex_unlock(&its
->cmd_lock
);
1338 vgic_its_process_commands(kvm
, its
);
1340 mutex_unlock(&its
->cmd_lock
);
1343 static unsigned long vgic_mmio_read_its_cwriter(struct kvm
*kvm
,
1344 struct vgic_its
*its
,
1345 gpa_t addr
, unsigned int len
)
1347 return extract_bytes(its
->cwriter
, addr
& 0x7, len
);
1350 static unsigned long vgic_mmio_read_its_creadr(struct kvm
*kvm
,
1351 struct vgic_its
*its
,
1352 gpa_t addr
, unsigned int len
)
1354 return extract_bytes(its
->creadr
, addr
& 0x7, len
);
1357 static int vgic_mmio_uaccess_write_its_creadr(struct kvm
*kvm
,
1358 struct vgic_its
*its
,
1359 gpa_t addr
, unsigned int len
,
1365 mutex_lock(&its
->cmd_lock
);
1372 cmd_offset
= ITS_CMD_OFFSET(val
);
1373 if (cmd_offset
>= ITS_CMD_BUFFER_SIZE(its
->cbaser
)) {
1378 its
->creadr
= cmd_offset
;
1380 mutex_unlock(&its
->cmd_lock
);
1384 #define BASER_INDEX(addr) (((addr) / sizeof(u64)) & 0x7)
1385 static unsigned long vgic_mmio_read_its_baser(struct kvm
*kvm
,
1386 struct vgic_its
*its
,
1387 gpa_t addr
, unsigned int len
)
1391 switch (BASER_INDEX(addr
)) {
1393 reg
= its
->baser_device_table
;
1396 reg
= its
->baser_coll_table
;
1403 return extract_bytes(reg
, addr
& 7, len
);
1406 #define GITS_BASER_RO_MASK (GENMASK_ULL(52, 48) | GENMASK_ULL(58, 56))
1407 static void vgic_mmio_write_its_baser(struct kvm
*kvm
,
1408 struct vgic_its
*its
,
1409 gpa_t addr
, unsigned int len
,
1412 const struct vgic_its_abi
*abi
= vgic_its_get_abi(its
);
1413 u64 entry_size
, device_type
;
1414 u64 reg
, *regptr
, clearbits
= 0;
1416 /* When GITS_CTLR.Enable is 1, we ignore write accesses. */
1420 switch (BASER_INDEX(addr
)) {
1422 regptr
= &its
->baser_device_table
;
1423 entry_size
= abi
->dte_esz
;
1424 device_type
= GITS_BASER_TYPE_DEVICE
;
1427 regptr
= &its
->baser_coll_table
;
1428 entry_size
= abi
->cte_esz
;
1429 device_type
= GITS_BASER_TYPE_COLLECTION
;
1430 clearbits
= GITS_BASER_INDIRECT
;
1436 reg
= update_64bit_reg(*regptr
, addr
& 7, len
, val
);
1437 reg
&= ~GITS_BASER_RO_MASK
;
1440 reg
|= (entry_size
- 1) << GITS_BASER_ENTRY_SIZE_SHIFT
;
1441 reg
|= device_type
<< GITS_BASER_TYPE_SHIFT
;
1442 reg
= vgic_sanitise_its_baser(reg
);
1447 static unsigned long vgic_mmio_read_its_ctlr(struct kvm
*vcpu
,
1448 struct vgic_its
*its
,
1449 gpa_t addr
, unsigned int len
)
1453 mutex_lock(&its
->cmd_lock
);
1454 if (its
->creadr
== its
->cwriter
)
1455 reg
|= GITS_CTLR_QUIESCENT
;
1457 reg
|= GITS_CTLR_ENABLE
;
1458 mutex_unlock(&its
->cmd_lock
);
1463 static void vgic_mmio_write_its_ctlr(struct kvm
*kvm
, struct vgic_its
*its
,
1464 gpa_t addr
, unsigned int len
,
1467 mutex_lock(&its
->cmd_lock
);
1469 its
->enabled
= !!(val
& GITS_CTLR_ENABLE
);
1472 * Try to process any pending commands. This function bails out early
1473 * if the ITS is disabled or no commands have been queued.
1475 vgic_its_process_commands(kvm
, its
);
1477 mutex_unlock(&its
->cmd_lock
);
1480 #define REGISTER_ITS_DESC(off, rd, wr, length, acc) \
1482 .reg_offset = off, \
1484 .access_flags = acc, \
1489 #define REGISTER_ITS_DESC_UACCESS(off, rd, wr, uwr, length, acc)\
1491 .reg_offset = off, \
1493 .access_flags = acc, \
1496 .uaccess_its_write = uwr, \
1499 static void its_mmio_write_wi(struct kvm
*kvm
, struct vgic_its
*its
,
1500 gpa_t addr
, unsigned int len
, unsigned long val
)
1505 static struct vgic_register_region its_registers
[] = {
1506 REGISTER_ITS_DESC(GITS_CTLR
,
1507 vgic_mmio_read_its_ctlr
, vgic_mmio_write_its_ctlr
, 4,
1509 REGISTER_ITS_DESC_UACCESS(GITS_IIDR
,
1510 vgic_mmio_read_its_iidr
, its_mmio_write_wi
,
1511 vgic_mmio_uaccess_write_its_iidr
, 4,
1513 REGISTER_ITS_DESC(GITS_TYPER
,
1514 vgic_mmio_read_its_typer
, its_mmio_write_wi
, 8,
1515 VGIC_ACCESS_64bit
| VGIC_ACCESS_32bit
),
1516 REGISTER_ITS_DESC(GITS_CBASER
,
1517 vgic_mmio_read_its_cbaser
, vgic_mmio_write_its_cbaser
, 8,
1518 VGIC_ACCESS_64bit
| VGIC_ACCESS_32bit
),
1519 REGISTER_ITS_DESC(GITS_CWRITER
,
1520 vgic_mmio_read_its_cwriter
, vgic_mmio_write_its_cwriter
, 8,
1521 VGIC_ACCESS_64bit
| VGIC_ACCESS_32bit
),
1522 REGISTER_ITS_DESC_UACCESS(GITS_CREADR
,
1523 vgic_mmio_read_its_creadr
, its_mmio_write_wi
,
1524 vgic_mmio_uaccess_write_its_creadr
, 8,
1525 VGIC_ACCESS_64bit
| VGIC_ACCESS_32bit
),
1526 REGISTER_ITS_DESC(GITS_BASER
,
1527 vgic_mmio_read_its_baser
, vgic_mmio_write_its_baser
, 0x40,
1528 VGIC_ACCESS_64bit
| VGIC_ACCESS_32bit
),
1529 REGISTER_ITS_DESC(GITS_IDREGS_BASE
,
1530 vgic_mmio_read_its_idregs
, its_mmio_write_wi
, 0x30,
1534 /* This is called on setting the LPI enable bit in the redistributor. */
1535 void vgic_enable_lpis(struct kvm_vcpu
*vcpu
)
1537 if (!(vcpu
->arch
.vgic_cpu
.pendbaser
& GICR_PENDBASER_PTZ
))
1538 its_sync_lpi_pending_table(vcpu
);
1541 static int vgic_register_its_iodev(struct kvm
*kvm
, struct vgic_its
*its
,
1544 struct vgic_io_device
*iodev
= &its
->iodev
;
1547 mutex_lock(&kvm
->slots_lock
);
1548 if (!IS_VGIC_ADDR_UNDEF(its
->vgic_its_base
)) {
1553 its
->vgic_its_base
= addr
;
1554 iodev
->regions
= its_registers
;
1555 iodev
->nr_regions
= ARRAY_SIZE(its_registers
);
1556 kvm_iodevice_init(&iodev
->dev
, &kvm_io_gic_ops
);
1558 iodev
->base_addr
= its
->vgic_its_base
;
1559 iodev
->iodev_type
= IODEV_ITS
;
1561 ret
= kvm_io_bus_register_dev(kvm
, KVM_MMIO_BUS
, iodev
->base_addr
,
1562 KVM_VGIC_V3_ITS_SIZE
, &iodev
->dev
);
1564 mutex_unlock(&kvm
->slots_lock
);
1569 #define INITIAL_BASER_VALUE \
1570 (GIC_BASER_CACHEABILITY(GITS_BASER, INNER, RaWb) | \
1571 GIC_BASER_CACHEABILITY(GITS_BASER, OUTER, SameAsInner) | \
1572 GIC_BASER_SHAREABILITY(GITS_BASER, InnerShareable) | \
1573 GITS_BASER_PAGE_SIZE_64K)
1575 #define INITIAL_PROPBASER_VALUE \
1576 (GIC_BASER_CACHEABILITY(GICR_PROPBASER, INNER, RaWb) | \
1577 GIC_BASER_CACHEABILITY(GICR_PROPBASER, OUTER, SameAsInner) | \
1578 GIC_BASER_SHAREABILITY(GICR_PROPBASER, InnerShareable))
1580 static int vgic_its_create(struct kvm_device
*dev
, u32 type
)
1582 struct vgic_its
*its
;
1584 if (type
!= KVM_DEV_TYPE_ARM_VGIC_ITS
)
1587 its
= kzalloc(sizeof(struct vgic_its
), GFP_KERNEL
);
1591 mutex_init(&its
->its_lock
);
1592 mutex_init(&its
->cmd_lock
);
1594 its
->vgic_its_base
= VGIC_ADDR_UNDEF
;
1596 INIT_LIST_HEAD(&its
->device_list
);
1597 INIT_LIST_HEAD(&its
->collection_list
);
1599 dev
->kvm
->arch
.vgic
.msis_require_devid
= true;
1600 dev
->kvm
->arch
.vgic
.has_its
= true;
1601 its
->enabled
= false;
1604 its
->baser_device_table
= INITIAL_BASER_VALUE
|
1605 ((u64
)GITS_BASER_TYPE_DEVICE
<< GITS_BASER_TYPE_SHIFT
);
1606 its
->baser_coll_table
= INITIAL_BASER_VALUE
|
1607 ((u64
)GITS_BASER_TYPE_COLLECTION
<< GITS_BASER_TYPE_SHIFT
);
1608 dev
->kvm
->arch
.vgic
.propbaser
= INITIAL_PROPBASER_VALUE
;
1612 return vgic_its_set_abi(its
, NR_ITS_ABIS
- 1);
1615 static void vgic_its_free_device(struct kvm
*kvm
, struct its_device
*dev
)
1617 struct its_ite
*ite
, *tmp
;
1619 list_for_each_entry_safe(ite
, tmp
, &dev
->itt_head
, ite_list
)
1620 its_free_ite(kvm
, ite
);
1621 list_del(&dev
->dev_list
);
1625 static void vgic_its_destroy(struct kvm_device
*kvm_dev
)
1627 struct kvm
*kvm
= kvm_dev
->kvm
;
1628 struct vgic_its
*its
= kvm_dev
->private;
1629 struct list_head
*cur
, *temp
;
1632 * We may end up here without the lists ever having been initialized.
1633 * Check this and bail out early to avoid dereferencing a NULL pointer.
1635 if (!its
->device_list
.next
)
1638 mutex_lock(&its
->its_lock
);
1639 list_for_each_safe(cur
, temp
, &its
->device_list
) {
1640 struct its_device
*dev
;
1642 dev
= list_entry(cur
, struct its_device
, dev_list
);
1643 vgic_its_free_device(kvm
, dev
);
1646 list_for_each_safe(cur
, temp
, &its
->collection_list
) {
1647 struct its_collection
*coll
;
1649 coll
= list_entry(cur
, struct its_collection
, coll_list
);
1653 mutex_unlock(&its
->its_lock
);
1658 int vgic_its_has_attr_regs(struct kvm_device
*dev
,
1659 struct kvm_device_attr
*attr
)
1661 const struct vgic_register_region
*region
;
1662 gpa_t offset
= attr
->attr
;
1665 align
= (offset
< GITS_TYPER
) || (offset
>= GITS_PIDR4
) ? 0x3 : 0x7;
1670 region
= vgic_find_mmio_region(its_registers
,
1671 ARRAY_SIZE(its_registers
),
1679 int vgic_its_attr_regs_access(struct kvm_device
*dev
,
1680 struct kvm_device_attr
*attr
,
1681 u64
*reg
, bool is_write
)
1683 const struct vgic_register_region
*region
;
1684 struct vgic_its
*its
;
1690 offset
= attr
->attr
;
1693 * Although the spec supports upper/lower 32-bit accesses to
1694 * 64-bit ITS registers, the userspace ABI requires 64-bit
1695 * accesses to all 64-bit wide registers. We therefore only
1696 * support 32-bit accesses to GITS_CTLR, GITS_IIDR and GITS ID
1699 if ((offset
< GITS_TYPER
) || (offset
>= GITS_PIDR4
))
1707 mutex_lock(&dev
->kvm
->lock
);
1709 if (IS_VGIC_ADDR_UNDEF(its
->vgic_its_base
)) {
1714 region
= vgic_find_mmio_region(its_registers
,
1715 ARRAY_SIZE(its_registers
),
1722 if (!lock_all_vcpus(dev
->kvm
)) {
1727 addr
= its
->vgic_its_base
+ offset
;
1729 len
= region
->access_flags
& VGIC_ACCESS_64bit
? 8 : 4;
1732 if (region
->uaccess_its_write
)
1733 ret
= region
->uaccess_its_write(dev
->kvm
, its
, addr
,
1736 region
->its_write(dev
->kvm
, its
, addr
, len
, *reg
);
1738 *reg
= region
->its_read(dev
->kvm
, its
, addr
, len
);
1740 unlock_all_vcpus(dev
->kvm
);
1742 mutex_unlock(&dev
->kvm
->lock
);
1746 static u32
compute_next_devid_offset(struct list_head
*h
,
1747 struct its_device
*dev
)
1749 struct its_device
*next
;
1752 if (list_is_last(&dev
->dev_list
, h
))
1754 next
= list_next_entry(dev
, dev_list
);
1755 next_offset
= next
->device_id
- dev
->device_id
;
1757 return min_t(u32
, next_offset
, VITS_DTE_MAX_DEVID_OFFSET
);
1760 static u32
compute_next_eventid_offset(struct list_head
*h
, struct its_ite
*ite
)
1762 struct its_ite
*next
;
1765 if (list_is_last(&ite
->ite_list
, h
))
1767 next
= list_next_entry(ite
, ite_list
);
1768 next_offset
= next
->event_id
- ite
->event_id
;
1770 return min_t(u32
, next_offset
, VITS_ITE_MAX_EVENTID_OFFSET
);
1774 * entry_fn_t - Callback called on a table entry restore path
1776 * @id: id of the entry
1777 * @entry: pointer to the entry
1778 * @opaque: pointer to an opaque data
1780 * Return: < 0 on error, 0 if last element was identified, id offset to next
1783 typedef int (*entry_fn_t
)(struct vgic_its
*its
, u32 id
, void *entry
,
1787 * scan_its_table - Scan a contiguous table in guest RAM and applies a function
1791 * @base: base gpa of the table
1792 * @size: size of the table in bytes
1793 * @esz: entry size in bytes
1794 * @start_id: the ID of the first entry in the table
1795 * (non zero for 2d level tables)
1796 * @fn: function to apply on each entry
1798 * Return: < 0 on error, 0 if last element was identified, 1 otherwise
1799 * (the last element may not be found on second level tables)
1801 static int scan_its_table(struct vgic_its
*its
, gpa_t base
, int size
, int esz
,
1802 int start_id
, entry_fn_t fn
, void *opaque
)
1804 void *entry
= kzalloc(esz
, GFP_KERNEL
);
1805 struct kvm
*kvm
= its
->dev
->kvm
;
1806 unsigned long len
= size
;
1815 ret
= kvm_read_guest(kvm
, gpa
, entry
, esz
);
1819 next_offset
= fn(its
, id
, entry
, opaque
);
1820 if (next_offset
<= 0) {
1825 byte_offset
= next_offset
* esz
;
1838 * vgic_its_save_ite - Save an interrupt translation entry at @gpa
1840 static int vgic_its_save_ite(struct vgic_its
*its
, struct its_device
*dev
,
1841 struct its_ite
*ite
, gpa_t gpa
, int ite_esz
)
1843 struct kvm
*kvm
= its
->dev
->kvm
;
1847 next_offset
= compute_next_eventid_offset(&dev
->itt_head
, ite
);
1848 val
= ((u64
)next_offset
<< KVM_ITS_ITE_NEXT_SHIFT
) |
1849 ((u64
)ite
->irq
->intid
<< KVM_ITS_ITE_PINTID_SHIFT
) |
1850 ite
->collection
->collection_id
;
1851 val
= cpu_to_le64(val
);
1852 return kvm_write_guest(kvm
, gpa
, &val
, ite_esz
);
1856 * vgic_its_restore_ite - restore an interrupt translation entry
1857 * @event_id: id used for indexing
1858 * @ptr: pointer to the ITE entry
1859 * @opaque: pointer to the its_device
1861 static int vgic_its_restore_ite(struct vgic_its
*its
, u32 event_id
,
1862 void *ptr
, void *opaque
)
1864 struct its_device
*dev
= (struct its_device
*)opaque
;
1865 struct its_collection
*collection
;
1866 struct kvm
*kvm
= its
->dev
->kvm
;
1867 struct kvm_vcpu
*vcpu
= NULL
;
1869 u64
*p
= (u64
*)ptr
;
1870 struct vgic_irq
*irq
;
1871 u32 coll_id
, lpi_id
;
1872 struct its_ite
*ite
;
1877 val
= le64_to_cpu(val
);
1879 coll_id
= val
& KVM_ITS_ITE_ICID_MASK
;
1880 lpi_id
= (val
& KVM_ITS_ITE_PINTID_MASK
) >> KVM_ITS_ITE_PINTID_SHIFT
;
1883 return 1; /* invalid entry, no choice but to scan next entry */
1885 if (lpi_id
< VGIC_MIN_LPI
)
1888 offset
= val
>> KVM_ITS_ITE_NEXT_SHIFT
;
1889 if (event_id
+ offset
>= BIT_ULL(dev
->num_eventid_bits
))
1892 collection
= find_collection(its
, coll_id
);
1896 ite
= vgic_its_alloc_ite(dev
, collection
, event_id
);
1898 return PTR_ERR(ite
);
1900 if (its_is_collection_mapped(collection
))
1901 vcpu
= kvm_get_vcpu(kvm
, collection
->target_addr
);
1903 irq
= vgic_add_lpi(kvm
, lpi_id
, vcpu
);
1905 return PTR_ERR(irq
);
1911 static int vgic_its_ite_cmp(void *priv
, struct list_head
*a
,
1912 struct list_head
*b
)
1914 struct its_ite
*itea
= container_of(a
, struct its_ite
, ite_list
);
1915 struct its_ite
*iteb
= container_of(b
, struct its_ite
, ite_list
);
1917 if (itea
->event_id
< iteb
->event_id
)
1923 static int vgic_its_save_itt(struct vgic_its
*its
, struct its_device
*device
)
1925 const struct vgic_its_abi
*abi
= vgic_its_get_abi(its
);
1926 gpa_t base
= device
->itt_addr
;
1927 struct its_ite
*ite
;
1929 int ite_esz
= abi
->ite_esz
;
1931 list_sort(NULL
, &device
->itt_head
, vgic_its_ite_cmp
);
1933 list_for_each_entry(ite
, &device
->itt_head
, ite_list
) {
1934 gpa_t gpa
= base
+ ite
->event_id
* ite_esz
;
1936 ret
= vgic_its_save_ite(its
, device
, ite
, gpa
, ite_esz
);
1943 static int vgic_its_restore_itt(struct vgic_its
*its
, struct its_device
*dev
)
1945 const struct vgic_its_abi
*abi
= vgic_its_get_abi(its
);
1946 gpa_t base
= dev
->itt_addr
;
1948 int ite_esz
= abi
->ite_esz
;
1949 size_t max_size
= BIT_ULL(dev
->num_eventid_bits
) * ite_esz
;
1951 ret
= scan_its_table(its
, base
, max_size
, ite_esz
, 0,
1952 vgic_its_restore_ite
, dev
);
1958 * vgic_its_save_dte - Save a device table entry at a given GPA
1964 static int vgic_its_save_dte(struct vgic_its
*its
, struct its_device
*dev
,
1965 gpa_t ptr
, int dte_esz
)
1967 struct kvm
*kvm
= its
->dev
->kvm
;
1968 u64 val
, itt_addr_field
;
1971 itt_addr_field
= dev
->itt_addr
>> 8;
1972 next_offset
= compute_next_devid_offset(&its
->device_list
, dev
);
1973 val
= (1ULL << KVM_ITS_DTE_VALID_SHIFT
|
1974 ((u64
)next_offset
<< KVM_ITS_DTE_NEXT_SHIFT
) |
1975 (itt_addr_field
<< KVM_ITS_DTE_ITTADDR_SHIFT
) |
1976 (dev
->num_eventid_bits
- 1));
1977 val
= cpu_to_le64(val
);
1978 return kvm_write_guest(kvm
, ptr
, &val
, dte_esz
);
1982 * vgic_its_restore_dte - restore a device table entry
1985 * @id: device id the DTE corresponds to
1986 * @ptr: kernel VA where the 8 byte DTE is located
1989 * Return: < 0 on error, 0 if the dte is the last one, id offset to the
1990 * next dte otherwise
1992 static int vgic_its_restore_dte(struct vgic_its
*its
, u32 id
,
1993 void *ptr
, void *opaque
)
1995 struct its_device
*dev
;
1997 u8 num_eventid_bits
;
1998 u64 entry
= *(u64
*)ptr
;
2003 entry
= le64_to_cpu(entry
);
2005 valid
= entry
>> KVM_ITS_DTE_VALID_SHIFT
;
2006 num_eventid_bits
= (entry
& KVM_ITS_DTE_SIZE_MASK
) + 1;
2007 itt_addr
= ((entry
& KVM_ITS_DTE_ITTADDR_MASK
)
2008 >> KVM_ITS_DTE_ITTADDR_SHIFT
) << 8;
2013 /* dte entry is valid */
2014 offset
= (entry
& KVM_ITS_DTE_NEXT_MASK
) >> KVM_ITS_DTE_NEXT_SHIFT
;
2016 dev
= vgic_its_alloc_device(its
, id
, itt_addr
, num_eventid_bits
);
2018 return PTR_ERR(dev
);
2020 ret
= vgic_its_restore_itt(its
, dev
);
2022 vgic_its_free_device(its
->dev
->kvm
, dev
);
2029 static int vgic_its_device_cmp(void *priv
, struct list_head
*a
,
2030 struct list_head
*b
)
2032 struct its_device
*deva
= container_of(a
, struct its_device
, dev_list
);
2033 struct its_device
*devb
= container_of(b
, struct its_device
, dev_list
);
2035 if (deva
->device_id
< devb
->device_id
)
2042 * vgic_its_save_device_tables - Save the device table and all ITT
2045 * L1/L2 handling is hidden by vgic_its_check_id() helper which directly
2046 * returns the GPA of the device entry
2048 static int vgic_its_save_device_tables(struct vgic_its
*its
)
2050 const struct vgic_its_abi
*abi
= vgic_its_get_abi(its
);
2051 struct its_device
*dev
;
2052 int dte_esz
= abi
->dte_esz
;
2055 baser
= its
->baser_device_table
;
2057 list_sort(NULL
, &its
->device_list
, vgic_its_device_cmp
);
2059 list_for_each_entry(dev
, &its
->device_list
, dev_list
) {
2063 if (!vgic_its_check_id(its
, baser
,
2064 dev
->device_id
, &eaddr
))
2067 ret
= vgic_its_save_itt(its
, dev
);
2071 ret
= vgic_its_save_dte(its
, dev
, eaddr
, dte_esz
);
2079 * handle_l1_dte - callback used for L1 device table entries (2 stage case)
2082 * @id: index of the entry in the L1 table
2086 * L1 table entries are scanned by steps of 1 entry
2087 * Return < 0 if error, 0 if last dte was found when scanning the L2
2088 * table, +1 otherwise (meaning next L1 entry must be scanned)
2090 static int handle_l1_dte(struct vgic_its
*its
, u32 id
, void *addr
,
2093 const struct vgic_its_abi
*abi
= vgic_its_get_abi(its
);
2094 int l2_start_id
= id
* (SZ_64K
/ abi
->dte_esz
);
2095 u64 entry
= *(u64
*)addr
;
2096 int dte_esz
= abi
->dte_esz
;
2100 entry
= le64_to_cpu(entry
);
2102 if (!(entry
& KVM_ITS_L1E_VALID_MASK
))
2105 gpa
= entry
& KVM_ITS_L1E_ADDR_MASK
;
2107 ret
= scan_its_table(its
, gpa
, SZ_64K
, dte_esz
,
2108 l2_start_id
, vgic_its_restore_dte
, NULL
);
2117 * vgic_its_restore_device_tables - Restore the device table and all ITT
2118 * from guest RAM to internal data structs
2120 static int vgic_its_restore_device_tables(struct vgic_its
*its
)
2122 const struct vgic_its_abi
*abi
= vgic_its_get_abi(its
);
2123 u64 baser
= its
->baser_device_table
;
2125 int l1_tbl_size
= GITS_BASER_NR_PAGES(baser
) * SZ_64K
;
2128 if (!(baser
& GITS_BASER_VALID
))
2131 l1_gpa
= BASER_ADDRESS(baser
);
2133 if (baser
& GITS_BASER_INDIRECT
) {
2134 l1_esz
= GITS_LVL1_ENTRY_SIZE
;
2135 ret
= scan_its_table(its
, l1_gpa
, l1_tbl_size
, l1_esz
, 0,
2136 handle_l1_dte
, NULL
);
2138 l1_esz
= abi
->dte_esz
;
2139 ret
= scan_its_table(its
, l1_gpa
, l1_tbl_size
, l1_esz
, 0,
2140 vgic_its_restore_dte
, NULL
);
2149 static int vgic_its_save_cte(struct vgic_its
*its
,
2150 struct its_collection
*collection
,
2155 val
= (1ULL << KVM_ITS_CTE_VALID_SHIFT
|
2156 ((u64
)collection
->target_addr
<< KVM_ITS_CTE_RDBASE_SHIFT
) |
2157 collection
->collection_id
);
2158 val
= cpu_to_le64(val
);
2159 return kvm_write_guest(its
->dev
->kvm
, gpa
, &val
, esz
);
2162 static int vgic_its_restore_cte(struct vgic_its
*its
, gpa_t gpa
, int esz
)
2164 struct its_collection
*collection
;
2165 struct kvm
*kvm
= its
->dev
->kvm
;
2166 u32 target_addr
, coll_id
;
2170 BUG_ON(esz
> sizeof(val
));
2171 ret
= kvm_read_guest(kvm
, gpa
, &val
, esz
);
2174 val
= le64_to_cpu(val
);
2175 if (!(val
& KVM_ITS_CTE_VALID_MASK
))
2178 target_addr
= (u32
)(val
>> KVM_ITS_CTE_RDBASE_SHIFT
);
2179 coll_id
= val
& KVM_ITS_CTE_ICID_MASK
;
2181 if (target_addr
>= atomic_read(&kvm
->online_vcpus
))
2184 collection
= find_collection(its
, coll_id
);
2187 ret
= vgic_its_alloc_collection(its
, &collection
, coll_id
);
2190 collection
->target_addr
= target_addr
;
2195 * vgic_its_save_collection_table - Save the collection table into
2198 static int vgic_its_save_collection_table(struct vgic_its
*its
)
2200 const struct vgic_its_abi
*abi
= vgic_its_get_abi(its
);
2201 struct its_collection
*collection
;
2204 size_t max_size
, filled
= 0;
2205 int ret
, cte_esz
= abi
->cte_esz
;
2207 gpa
= BASER_ADDRESS(its
->baser_coll_table
);
2211 max_size
= GITS_BASER_NR_PAGES(its
->baser_coll_table
) * SZ_64K
;
2213 list_for_each_entry(collection
, &its
->collection_list
, coll_list
) {
2214 ret
= vgic_its_save_cte(its
, collection
, gpa
, cte_esz
);
2221 if (filled
== max_size
)
2225 * table is not fully filled, add a last dummy element
2226 * with valid bit unset
2229 BUG_ON(cte_esz
> sizeof(val
));
2230 ret
= kvm_write_guest(its
->dev
->kvm
, gpa
, &val
, cte_esz
);
2235 * vgic_its_restore_collection_table - reads the collection table
2236 * in guest memory and restores the ITS internal state. Requires the
2237 * BASER registers to be restored before.
2239 static int vgic_its_restore_collection_table(struct vgic_its
*its
)
2241 const struct vgic_its_abi
*abi
= vgic_its_get_abi(its
);
2242 int cte_esz
= abi
->cte_esz
;
2243 size_t max_size
, read
= 0;
2247 if (!(its
->baser_coll_table
& GITS_BASER_VALID
))
2250 gpa
= BASER_ADDRESS(its
->baser_coll_table
);
2252 max_size
= GITS_BASER_NR_PAGES(its
->baser_coll_table
) * SZ_64K
;
2254 while (read
< max_size
) {
2255 ret
= vgic_its_restore_cte(its
, gpa
, cte_esz
);
2265 * vgic_its_save_tables_v0 - Save the ITS tables into guest ARM
2266 * according to v0 ABI
2268 static int vgic_its_save_tables_v0(struct vgic_its
*its
)
2270 struct kvm
*kvm
= its
->dev
->kvm
;
2273 mutex_lock(&kvm
->lock
);
2274 mutex_lock(&its
->its_lock
);
2276 if (!lock_all_vcpus(kvm
)) {
2277 mutex_unlock(&its
->its_lock
);
2278 mutex_unlock(&kvm
->lock
);
2282 ret
= vgic_its_save_device_tables(its
);
2286 ret
= vgic_its_save_collection_table(its
);
2289 unlock_all_vcpus(kvm
);
2290 mutex_unlock(&its
->its_lock
);
2291 mutex_unlock(&kvm
->lock
);
2296 * vgic_its_restore_tables_v0 - Restore the ITS tables from guest RAM
2297 * to internal data structs according to V0 ABI
2300 static int vgic_its_restore_tables_v0(struct vgic_its
*its
)
2302 struct kvm
*kvm
= its
->dev
->kvm
;
2305 mutex_lock(&kvm
->lock
);
2306 mutex_lock(&its
->its_lock
);
2308 if (!lock_all_vcpus(kvm
)) {
2309 mutex_unlock(&its
->its_lock
);
2310 mutex_unlock(&kvm
->lock
);
2314 ret
= vgic_its_restore_collection_table(its
);
2318 ret
= vgic_its_restore_device_tables(its
);
2320 unlock_all_vcpus(kvm
);
2321 mutex_unlock(&its
->its_lock
);
2322 mutex_unlock(&kvm
->lock
);
2327 static int vgic_its_commit_v0(struct vgic_its
*its
)
2329 const struct vgic_its_abi
*abi
;
2331 abi
= vgic_its_get_abi(its
);
2332 its
->baser_coll_table
&= ~GITS_BASER_ENTRY_SIZE_MASK
;
2333 its
->baser_device_table
&= ~GITS_BASER_ENTRY_SIZE_MASK
;
2335 its
->baser_coll_table
|= (GIC_ENCODE_SZ(abi
->cte_esz
, 5)
2336 << GITS_BASER_ENTRY_SIZE_SHIFT
);
2338 its
->baser_device_table
|= (GIC_ENCODE_SZ(abi
->dte_esz
, 5)
2339 << GITS_BASER_ENTRY_SIZE_SHIFT
);
2343 static int vgic_its_has_attr(struct kvm_device
*dev
,
2344 struct kvm_device_attr
*attr
)
2346 switch (attr
->group
) {
2347 case KVM_DEV_ARM_VGIC_GRP_ADDR
:
2348 switch (attr
->attr
) {
2349 case KVM_VGIC_ITS_ADDR_TYPE
:
2353 case KVM_DEV_ARM_VGIC_GRP_CTRL
:
2354 switch (attr
->attr
) {
2355 case KVM_DEV_ARM_VGIC_CTRL_INIT
:
2357 case KVM_DEV_ARM_ITS_SAVE_TABLES
:
2359 case KVM_DEV_ARM_ITS_RESTORE_TABLES
:
2363 case KVM_DEV_ARM_VGIC_GRP_ITS_REGS
:
2364 return vgic_its_has_attr_regs(dev
, attr
);
2369 static int vgic_its_set_attr(struct kvm_device
*dev
,
2370 struct kvm_device_attr
*attr
)
2372 struct vgic_its
*its
= dev
->private;
2375 switch (attr
->group
) {
2376 case KVM_DEV_ARM_VGIC_GRP_ADDR
: {
2377 u64 __user
*uaddr
= (u64 __user
*)(long)attr
->addr
;
2378 unsigned long type
= (unsigned long)attr
->attr
;
2381 if (type
!= KVM_VGIC_ITS_ADDR_TYPE
)
2384 if (copy_from_user(&addr
, uaddr
, sizeof(addr
)))
2387 ret
= vgic_check_ioaddr(dev
->kvm
, &its
->vgic_its_base
,
2392 return vgic_register_its_iodev(dev
->kvm
, its
, addr
);
2394 case KVM_DEV_ARM_VGIC_GRP_CTRL
: {
2395 const struct vgic_its_abi
*abi
= vgic_its_get_abi(its
);
2397 switch (attr
->attr
) {
2398 case KVM_DEV_ARM_VGIC_CTRL_INIT
:
2401 case KVM_DEV_ARM_ITS_SAVE_TABLES
:
2402 return abi
->save_tables(its
);
2403 case KVM_DEV_ARM_ITS_RESTORE_TABLES
:
2404 return abi
->restore_tables(its
);
2407 case KVM_DEV_ARM_VGIC_GRP_ITS_REGS
: {
2408 u64 __user
*uaddr
= (u64 __user
*)(long)attr
->addr
;
2411 if (get_user(reg
, uaddr
))
2414 return vgic_its_attr_regs_access(dev
, attr
, ®
, true);
2420 static int vgic_its_get_attr(struct kvm_device
*dev
,
2421 struct kvm_device_attr
*attr
)
2423 switch (attr
->group
) {
2424 case KVM_DEV_ARM_VGIC_GRP_ADDR
: {
2425 struct vgic_its
*its
= dev
->private;
2426 u64 addr
= its
->vgic_its_base
;
2427 u64 __user
*uaddr
= (u64 __user
*)(long)attr
->addr
;
2428 unsigned long type
= (unsigned long)attr
->attr
;
2430 if (type
!= KVM_VGIC_ITS_ADDR_TYPE
)
2433 if (copy_to_user(uaddr
, &addr
, sizeof(addr
)))
2437 case KVM_DEV_ARM_VGIC_GRP_ITS_REGS
: {
2438 u64 __user
*uaddr
= (u64 __user
*)(long)attr
->addr
;
2442 ret
= vgic_its_attr_regs_access(dev
, attr
, ®
, false);
2445 return put_user(reg
, uaddr
);
2454 static struct kvm_device_ops kvm_arm_vgic_its_ops
= {
2455 .name
= "kvm-arm-vgic-its",
2456 .create
= vgic_its_create
,
2457 .destroy
= vgic_its_destroy
,
2458 .set_attr
= vgic_its_set_attr
,
2459 .get_attr
= vgic_its_get_attr
,
2460 .has_attr
= vgic_its_has_attr
,
2463 int kvm_vgic_register_its_device(void)
2465 return kvm_register_device_ops(&kvm_arm_vgic_its_ops
,
2466 KVM_DEV_TYPE_ARM_VGIC_ITS
);