]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - virt/kvm/arm/vgic/vgic-its.c
arm/arm64: vgic: turn vgic_find_mmio_region into public
[mirror_ubuntu-artful-kernel.git] / virt / kvm / arm / vgic / vgic-its.c
CommitLineData
59c5ab40
AP
1/*
2 * GICv3 ITS emulation
3 *
4 * Copyright (C) 2015,2016 ARM Ltd.
5 * Author: Andre Przywara <andre.przywara@arm.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program. If not, see <http://www.gnu.org/licenses/>.
18 */
19
20#include <linux/cpu.h>
21#include <linux/kvm.h>
22#include <linux/kvm_host.h>
23#include <linux/interrupt.h>
424c3383 24#include <linux/list.h>
1085fdc6 25#include <linux/uaccess.h>
59c5ab40
AP
26
27#include <linux/irqchip/arm-gic-v3.h>
28
29#include <asm/kvm_emulate.h>
30#include <asm/kvm_arm.h>
31#include <asm/kvm_mmu.h>
32
33#include "vgic.h"
34#include "vgic-mmio.h"
35
df9f58fb
AP
36/*
37 * Creates a new (reference to a) struct vgic_irq for a given LPI.
38 * If this LPI is already mapped on another ITS, we increase its refcount
39 * and return a pointer to the existing structure.
40 * If this is a "new" LPI, we allocate and initialize a new struct vgic_irq.
41 * This function returns a pointer to the _unlocked_ structure.
42 */
43static struct vgic_irq *vgic_add_lpi(struct kvm *kvm, u32 intid)
44{
45 struct vgic_dist *dist = &kvm->arch.vgic;
46 struct vgic_irq *irq = vgic_get_irq(kvm, NULL, intid), *oldirq;
47
48 /* In this case there is no put, since we keep the reference. */
49 if (irq)
50 return irq;
51
52 irq = kzalloc(sizeof(struct vgic_irq), GFP_KERNEL);
53 if (!irq)
99e5e886 54 return ERR_PTR(-ENOMEM);
df9f58fb
AP
55
56 INIT_LIST_HEAD(&irq->lpi_list);
57 INIT_LIST_HEAD(&irq->ap_list);
58 spin_lock_init(&irq->irq_lock);
59
60 irq->config = VGIC_CONFIG_EDGE;
61 kref_init(&irq->refcount);
62 irq->intid = intid;
63
64 spin_lock(&dist->lpi_list_lock);
65
66 /*
67 * There could be a race with another vgic_add_lpi(), so we need to
68 * check that we don't add a second list entry with the same LPI.
69 */
70 list_for_each_entry(oldirq, &dist->lpi_list_head, lpi_list) {
71 if (oldirq->intid != intid)
72 continue;
73
74 /* Someone was faster with adding this LPI, lets use that. */
75 kfree(irq);
76 irq = oldirq;
77
78 /*
79 * This increases the refcount, the caller is expected to
80 * call vgic_put_irq() on the returned pointer once it's
81 * finished with the IRQ.
82 */
d97594e6 83 vgic_get_irq_kref(irq);
df9f58fb
AP
84
85 goto out_unlock;
86 }
87
88 list_add_tail(&irq->lpi_list, &dist->lpi_list_head);
89 dist->lpi_list_count++;
90
91out_unlock:
92 spin_unlock(&dist->lpi_list_lock);
93
94 return irq;
95}
96
424c3383
AP
97struct its_device {
98 struct list_head dev_list;
99
100 /* the head for the list of ITTEs */
101 struct list_head itt_head;
102 u32 device_id;
103};
104
105#define COLLECTION_NOT_MAPPED ((u32)~0)
106
107struct its_collection {
108 struct list_head coll_list;
109
110 u32 collection_id;
111 u32 target_addr;
112};
113
114#define its_is_collection_mapped(coll) ((coll) && \
115 ((coll)->target_addr != COLLECTION_NOT_MAPPED))
116
9ce91c72
EA
117struct its_ite {
118 struct list_head ite_list;
424c3383 119
3802411d 120 struct vgic_irq *irq;
424c3383
AP
121 struct its_collection *collection;
122 u32 lpi;
123 u32 event_id;
124};
125
df9f58fb
AP
126/*
127 * Find and returns a device in the device table for an ITS.
128 * Must be called with the its_lock mutex held.
129 */
130static struct its_device *find_its_device(struct vgic_its *its, u32 device_id)
131{
132 struct its_device *device;
133
134 list_for_each_entry(device, &its->device_list, dev_list)
135 if (device_id == device->device_id)
136 return device;
137
138 return NULL;
139}
140
141/*
142 * Find and returns an interrupt translation table entry (ITTE) for a given
143 * Device ID/Event ID pair on an ITS.
144 * Must be called with the its_lock mutex held.
145 */
9ce91c72 146static struct its_ite *find_ite(struct vgic_its *its, u32 device_id,
df9f58fb
AP
147 u32 event_id)
148{
149 struct its_device *device;
9ce91c72 150 struct its_ite *ite;
df9f58fb
AP
151
152 device = find_its_device(its, device_id);
153 if (device == NULL)
154 return NULL;
155
9ce91c72
EA
156 list_for_each_entry(ite, &device->itt_head, ite_list)
157 if (ite->event_id == event_id)
158 return ite;
df9f58fb
AP
159
160 return NULL;
161}
162
163/* To be used as an iterator this macro misses the enclosing parentheses */
9ce91c72 164#define for_each_lpi_its(dev, ite, its) \
df9f58fb 165 list_for_each_entry(dev, &(its)->device_list, dev_list) \
9ce91c72 166 list_for_each_entry(ite, &(dev)->itt_head, ite_list)
df9f58fb 167
424c3383
AP
168/*
169 * We only implement 48 bits of PA at the moment, although the ITS
170 * supports more. Let's be restrictive here.
171 */
df9f58fb 172#define BASER_ADDRESS(x) ((x) & GENMASK_ULL(47, 16))
424c3383 173#define CBASER_ADDRESS(x) ((x) & GENMASK_ULL(47, 12))
33d3bc95 174#define PENDBASER_ADDRESS(x) ((x) & GENMASK_ULL(47, 16))
f9f77af9
AP
175#define PROPBASER_ADDRESS(x) ((x) & GENMASK_ULL(47, 12))
176
177#define GIC_LPI_OFFSET 8192
178
df9f58fb
AP
179/*
180 * Finds and returns a collection in the ITS collection table.
181 * Must be called with the its_lock mutex held.
182 */
183static struct its_collection *find_collection(struct vgic_its *its, int coll_id)
184{
185 struct its_collection *collection;
186
187 list_for_each_entry(collection, &its->collection_list, coll_list) {
188 if (coll_id == collection->collection_id)
189 return collection;
190 }
191
192 return NULL;
193}
194
f9f77af9
AP
195#define LPI_PROP_ENABLE_BIT(p) ((p) & LPI_PROP_ENABLED)
196#define LPI_PROP_PRIORITY(p) ((p) & 0xfc)
197
198/*
199 * Reads the configuration data for a given LPI from guest memory and
200 * updates the fields in struct vgic_irq.
201 * If filter_vcpu is not NULL, applies only if the IRQ is targeting this
202 * VCPU. Unconditionally applies if filter_vcpu is NULL.
203 */
204static int update_lpi_config(struct kvm *kvm, struct vgic_irq *irq,
205 struct kvm_vcpu *filter_vcpu)
206{
207 u64 propbase = PROPBASER_ADDRESS(kvm->arch.vgic.propbaser);
208 u8 prop;
209 int ret;
210
211 ret = kvm_read_guest(kvm, propbase + irq->intid - GIC_LPI_OFFSET,
212 &prop, 1);
213
214 if (ret)
215 return ret;
216
217 spin_lock(&irq->irq_lock);
218
219 if (!filter_vcpu || filter_vcpu == irq->target_vcpu) {
220 irq->priority = LPI_PROP_PRIORITY(prop);
221 irq->enabled = LPI_PROP_ENABLE_BIT(prop);
222
223 vgic_queue_irq_unlock(kvm, irq);
224 } else {
225 spin_unlock(&irq->irq_lock);
226 }
227
228 return 0;
229}
33d3bc95
AP
230
231/*
232 * Create a snapshot of the current LPI list, so that we can enumerate all
233 * LPIs without holding any lock.
234 * Returns the array length and puts the kmalloc'ed array into intid_ptr.
235 */
236static int vgic_copy_lpi_list(struct kvm *kvm, u32 **intid_ptr)
237{
238 struct vgic_dist *dist = &kvm->arch.vgic;
239 struct vgic_irq *irq;
240 u32 *intids;
241 int irq_count = dist->lpi_list_count, i = 0;
242
243 /*
244 * We use the current value of the list length, which may change
245 * after the kmalloc. We don't care, because the guest shouldn't
246 * change anything while the command handling is still running,
247 * and in the worst case we would miss a new IRQ, which one wouldn't
248 * expect to be covered by this command anyway.
249 */
250 intids = kmalloc_array(irq_count, sizeof(intids[0]), GFP_KERNEL);
251 if (!intids)
252 return -ENOMEM;
253
254 spin_lock(&dist->lpi_list_lock);
255 list_for_each_entry(irq, &dist->lpi_list_head, lpi_list) {
256 /* We don't need to "get" the IRQ, as we hold the list lock. */
257 intids[i] = irq->intid;
258 if (++i == irq_count)
259 break;
260 }
261 spin_unlock(&dist->lpi_list_lock);
262
263 *intid_ptr = intids;
264 return irq_count;
265}
266
df9f58fb
AP
267/*
268 * Promotes the ITS view of affinity of an ITTE (which redistributor this LPI
269 * is targeting) to the VGIC's view, which deals with target VCPUs.
270 * Needs to be called whenever either the collection for a LPIs has
271 * changed or the collection itself got retargeted.
272 */
9ce91c72 273static void update_affinity_ite(struct kvm *kvm, struct its_ite *ite)
df9f58fb
AP
274{
275 struct kvm_vcpu *vcpu;
276
9ce91c72 277 if (!its_is_collection_mapped(ite->collection))
df9f58fb
AP
278 return;
279
9ce91c72 280 vcpu = kvm_get_vcpu(kvm, ite->collection->target_addr);
df9f58fb 281
9ce91c72
EA
282 spin_lock(&ite->irq->irq_lock);
283 ite->irq->target_vcpu = vcpu;
284 spin_unlock(&ite->irq->irq_lock);
df9f58fb
AP
285}
286
287/*
288 * Updates the target VCPU for every LPI targeting this collection.
289 * Must be called with the its_lock mutex held.
290 */
291static void update_affinity_collection(struct kvm *kvm, struct vgic_its *its,
292 struct its_collection *coll)
293{
294 struct its_device *device;
9ce91c72 295 struct its_ite *ite;
df9f58fb 296
9ce91c72
EA
297 for_each_lpi_its(device, ite, its) {
298 if (!ite->collection || coll != ite->collection)
df9f58fb
AP
299 continue;
300
9ce91c72 301 update_affinity_ite(kvm, ite);
df9f58fb
AP
302 }
303}
304
305static u32 max_lpis_propbaser(u64 propbaser)
306{
307 int nr_idbits = (propbaser & 0x1f) + 1;
308
309 return 1U << min(nr_idbits, INTERRUPT_ID_BITS_ITS);
310}
311
33d3bc95
AP
312/*
313 * Scan the whole LPI pending table and sync the pending bit in there
314 * with our own data structures. This relies on the LPI being
315 * mapped before.
316 */
317static int its_sync_lpi_pending_table(struct kvm_vcpu *vcpu)
318{
319 gpa_t pendbase = PENDBASER_ADDRESS(vcpu->arch.vgic_cpu.pendbaser);
320 struct vgic_irq *irq;
321 int last_byte_offset = -1;
322 int ret = 0;
323 u32 *intids;
324 int nr_irqs, i;
325
326 nr_irqs = vgic_copy_lpi_list(vcpu->kvm, &intids);
327 if (nr_irqs < 0)
328 return nr_irqs;
329
330 for (i = 0; i < nr_irqs; i++) {
331 int byte_offset, bit_nr;
332 u8 pendmask;
333
334 byte_offset = intids[i] / BITS_PER_BYTE;
335 bit_nr = intids[i] % BITS_PER_BYTE;
336
337 /*
338 * For contiguously allocated LPIs chances are we just read
339 * this very same byte in the last iteration. Reuse that.
340 */
341 if (byte_offset != last_byte_offset) {
342 ret = kvm_read_guest(vcpu->kvm, pendbase + byte_offset,
343 &pendmask, 1);
344 if (ret) {
345 kfree(intids);
346 return ret;
347 }
348 last_byte_offset = byte_offset;
349 }
350
351 irq = vgic_get_irq(vcpu->kvm, NULL, intids[i]);
352 spin_lock(&irq->irq_lock);
8694e4da 353 irq->pending_latch = pendmask & (1U << bit_nr);
33d3bc95
AP
354 vgic_queue_irq_unlock(vcpu->kvm, irq);
355 vgic_put_irq(vcpu->kvm, irq);
356 }
357
358 kfree(intids);
359
360 return ret;
361}
424c3383 362
424c3383
AP
363static unsigned long vgic_mmio_read_its_typer(struct kvm *kvm,
364 struct vgic_its *its,
365 gpa_t addr, unsigned int len)
366{
367 u64 reg = GITS_TYPER_PLPIS;
368
369 /*
370 * We use linear CPU numbers for redistributor addressing,
371 * so GITS_TYPER.PTA is 0.
372 * Also we force all PROPBASER registers to be the same, so
373 * CommonLPIAff is 0 as well.
374 * To avoid memory waste in the guest, we keep the number of IDBits and
375 * DevBits low - as least for the time being.
376 */
377 reg |= 0x0f << GITS_TYPER_DEVBITS_SHIFT;
378 reg |= 0x0f << GITS_TYPER_IDBITS_SHIFT;
379
380 return extract_bytes(reg, addr & 7, len);
381}
382
383static unsigned long vgic_mmio_read_its_iidr(struct kvm *kvm,
384 struct vgic_its *its,
385 gpa_t addr, unsigned int len)
386{
387 return (PRODUCT_ID_KVM << 24) | (IMPLEMENTER_ARM << 0);
388}
389
390static unsigned long vgic_mmio_read_its_idregs(struct kvm *kvm,
391 struct vgic_its *its,
392 gpa_t addr, unsigned int len)
393{
394 switch (addr & 0xffff) {
395 case GITS_PIDR0:
396 return 0x92; /* part number, bits[7:0] */
397 case GITS_PIDR1:
398 return 0xb4; /* part number, bits[11:8] */
399 case GITS_PIDR2:
400 return GIC_PIDR2_ARCH_GICv3 | 0x0b;
401 case GITS_PIDR4:
402 return 0x40; /* This is a 64K software visible page */
403 /* The following are the ID registers for (any) GIC. */
404 case GITS_CIDR0:
405 return 0x0d;
406 case GITS_CIDR1:
407 return 0xf0;
408 case GITS_CIDR2:
409 return 0x05;
410 case GITS_CIDR3:
411 return 0xb1;
412 }
413
414 return 0;
415}
416
2891a7df
AP
417/*
418 * Find the target VCPU and the LPI number for a given devid/eventid pair
419 * and make this IRQ pending, possibly injecting it.
420 * Must be called with the its_lock mutex held.
fd837b08
AP
421 * Returns 0 on success, a positive error value for any ITS mapping
422 * related errors and negative error values for generic errors.
2891a7df 423 */
fd837b08
AP
424static int vgic_its_trigger_msi(struct kvm *kvm, struct vgic_its *its,
425 u32 devid, u32 eventid)
2891a7df 426{
fd837b08 427 struct kvm_vcpu *vcpu;
9ce91c72 428 struct its_ite *ite;
2891a7df
AP
429
430 if (!its->enabled)
fd837b08 431 return -EBUSY;
2891a7df 432
9ce91c72
EA
433 ite = find_ite(its, devid, eventid);
434 if (!ite || !its_is_collection_mapped(ite->collection))
fd837b08
AP
435 return E_ITS_INT_UNMAPPED_INTERRUPT;
436
9ce91c72 437 vcpu = kvm_get_vcpu(kvm, ite->collection->target_addr);
fd837b08
AP
438 if (!vcpu)
439 return E_ITS_INT_UNMAPPED_INTERRUPT;
440
441 if (!vcpu->arch.vgic_cpu.lpis_enabled)
442 return -EBUSY;
443
9ce91c72
EA
444 spin_lock(&ite->irq->irq_lock);
445 ite->irq->pending_latch = true;
446 vgic_queue_irq_unlock(kvm, ite->irq);
fd837b08
AP
447
448 return 0;
2891a7df
AP
449}
450
505a19ee
AP
451static struct vgic_io_device *vgic_get_its_iodev(struct kvm_io_device *dev)
452{
453 struct vgic_io_device *iodev;
454
455 if (dev->ops != &kvm_io_gic_ops)
456 return NULL;
457
458 iodev = container_of(dev, struct vgic_io_device, dev);
459
460 if (iodev->iodev_type != IODEV_ITS)
461 return NULL;
462
463 return iodev;
464}
465
2891a7df
AP
466/*
467 * Queries the KVM IO bus framework to get the ITS pointer from the given
468 * doorbell address.
469 * We then call vgic_its_trigger_msi() with the decoded data.
fd837b08 470 * According to the KVM_SIGNAL_MSI API description returns 1 on success.
2891a7df
AP
471 */
472int vgic_its_inject_msi(struct kvm *kvm, struct kvm_msi *msi)
473{
474 u64 address;
475 struct kvm_io_device *kvm_io_dev;
476 struct vgic_io_device *iodev;
fd837b08 477 int ret;
2891a7df
AP
478
479 if (!vgic_has_its(kvm))
480 return -ENODEV;
481
482 if (!(msi->flags & KVM_MSI_VALID_DEVID))
483 return -EINVAL;
484
485 address = (u64)msi->address_hi << 32 | msi->address_lo;
486
487 kvm_io_dev = kvm_io_bus_get_dev(kvm, KVM_MMIO_BUS, address);
488 if (!kvm_io_dev)
505a19ee 489 return -EINVAL;
2891a7df 490
505a19ee
AP
491 iodev = vgic_get_its_iodev(kvm_io_dev);
492 if (!iodev)
493 return -EINVAL;
2891a7df
AP
494
495 mutex_lock(&iodev->its->its_lock);
fd837b08 496 ret = vgic_its_trigger_msi(kvm, iodev->its, msi->devid, msi->data);
2891a7df
AP
497 mutex_unlock(&iodev->its->its_lock);
498
fd837b08
AP
499 if (ret < 0)
500 return ret;
501
502 /*
503 * KVM_SIGNAL_MSI demands a return value > 0 for success and 0
504 * if the guest has blocked the MSI. So we map any LPI mapping
505 * related error to that.
506 */
507 if (ret)
508 return 0;
509 else
510 return 1;
2891a7df
AP
511}
512
424c3383 513/* Requires the its_lock to be held. */
9ce91c72 514static void its_free_ite(struct kvm *kvm, struct its_ite *ite)
424c3383 515{
9ce91c72 516 list_del(&ite->ite_list);
3802411d
AP
517
518 /* This put matches the get in vgic_add_lpi. */
9ce91c72
EA
519 if (ite->irq)
520 vgic_put_irq(kvm, ite->irq);
3802411d 521
9ce91c72 522 kfree(ite);
424c3383
AP
523}
524
df9f58fb
AP
525static u64 its_cmd_mask_field(u64 *its_cmd, int word, int shift, int size)
526{
527 return (le64_to_cpu(its_cmd[word]) >> shift) & (BIT_ULL(size) - 1);
528}
529
530#define its_cmd_get_command(cmd) its_cmd_mask_field(cmd, 0, 0, 8)
531#define its_cmd_get_deviceid(cmd) its_cmd_mask_field(cmd, 0, 32, 32)
532#define its_cmd_get_id(cmd) its_cmd_mask_field(cmd, 1, 0, 32)
533#define its_cmd_get_physical_id(cmd) its_cmd_mask_field(cmd, 1, 32, 32)
534#define its_cmd_get_collection(cmd) its_cmd_mask_field(cmd, 2, 0, 16)
535#define its_cmd_get_target_addr(cmd) its_cmd_mask_field(cmd, 2, 16, 32)
536#define its_cmd_get_validbit(cmd) its_cmd_mask_field(cmd, 2, 63, 1)
537
538/*
539 * The DISCARD command frees an Interrupt Translation Table Entry (ITTE).
540 * Must be called with the its_lock mutex held.
541 */
542static int vgic_its_cmd_handle_discard(struct kvm *kvm, struct vgic_its *its,
543 u64 *its_cmd)
544{
545 u32 device_id = its_cmd_get_deviceid(its_cmd);
546 u32 event_id = its_cmd_get_id(its_cmd);
9ce91c72 547 struct its_ite *ite;
df9f58fb
AP
548
549
9ce91c72
EA
550 ite = find_ite(its, device_id, event_id);
551 if (ite && ite->collection) {
df9f58fb
AP
552 /*
553 * Though the spec talks about removing the pending state, we
554 * don't bother here since we clear the ITTE anyway and the
555 * pending state is a property of the ITTE struct.
556 */
9ce91c72 557 its_free_ite(kvm, ite);
df9f58fb
AP
558 return 0;
559 }
560
561 return E_ITS_DISCARD_UNMAPPED_INTERRUPT;
562}
563
564/*
565 * The MOVI command moves an ITTE to a different collection.
566 * Must be called with the its_lock mutex held.
567 */
568static int vgic_its_cmd_handle_movi(struct kvm *kvm, struct vgic_its *its,
569 u64 *its_cmd)
570{
571 u32 device_id = its_cmd_get_deviceid(its_cmd);
572 u32 event_id = its_cmd_get_id(its_cmd);
573 u32 coll_id = its_cmd_get_collection(its_cmd);
574 struct kvm_vcpu *vcpu;
9ce91c72 575 struct its_ite *ite;
df9f58fb
AP
576 struct its_collection *collection;
577
9ce91c72
EA
578 ite = find_ite(its, device_id, event_id);
579 if (!ite)
df9f58fb
AP
580 return E_ITS_MOVI_UNMAPPED_INTERRUPT;
581
9ce91c72 582 if (!its_is_collection_mapped(ite->collection))
df9f58fb
AP
583 return E_ITS_MOVI_UNMAPPED_COLLECTION;
584
585 collection = find_collection(its, coll_id);
586 if (!its_is_collection_mapped(collection))
587 return E_ITS_MOVI_UNMAPPED_COLLECTION;
588
9ce91c72 589 ite->collection = collection;
df9f58fb
AP
590 vcpu = kvm_get_vcpu(kvm, collection->target_addr);
591
9ce91c72
EA
592 spin_lock(&ite->irq->irq_lock);
593 ite->irq->target_vcpu = vcpu;
594 spin_unlock(&ite->irq->irq_lock);
df9f58fb
AP
595
596 return 0;
597}
598
6d03a68f
MZ
599/*
600 * Check whether an ID can be stored into the corresponding guest table.
601 * For a direct table this is pretty easy, but gets a bit nasty for
602 * indirect tables. We check whether the resulting guest physical address
603 * is actually valid (covered by a memslot and guest accessbible).
604 * For this we have to read the respective first level entry.
605 */
606static bool vgic_its_check_id(struct vgic_its *its, u64 baser, int id)
607{
608 int l1_tbl_size = GITS_BASER_NR_PAGES(baser) * SZ_64K;
609 int index;
610 u64 indirect_ptr;
611 gfn_t gfn;
e29bd6f2 612 int esz = GITS_BASER_ENTRY_SIZE(baser);
6d03a68f
MZ
613
614 if (!(baser & GITS_BASER_INDIRECT)) {
615 phys_addr_t addr;
616
e29bd6f2 617 if (id >= (l1_tbl_size / esz))
6d03a68f
MZ
618 return false;
619
e29bd6f2 620 addr = BASER_ADDRESS(baser) + id * esz;
6d03a68f
MZ
621 gfn = addr >> PAGE_SHIFT;
622
623 return kvm_is_visible_gfn(its->dev->kvm, gfn);
624 }
625
626 /* calculate and check the index into the 1st level */
e29bd6f2 627 index = id / (SZ_64K / esz);
6d03a68f
MZ
628 if (index >= (l1_tbl_size / sizeof(u64)))
629 return false;
630
631 /* Each 1st level entry is represented by a 64-bit value. */
632 if (kvm_read_guest(its->dev->kvm,
633 BASER_ADDRESS(baser) + index * sizeof(indirect_ptr),
634 &indirect_ptr, sizeof(indirect_ptr)))
635 return false;
636
637 indirect_ptr = le64_to_cpu(indirect_ptr);
638
639 /* check the valid bit of the first level entry */
640 if (!(indirect_ptr & BIT_ULL(63)))
641 return false;
642
643 /*
644 * Mask the guest physical address and calculate the frame number.
645 * Any address beyond our supported 48 bits of PA will be caught
646 * by the actual check in the final step.
647 */
648 indirect_ptr &= GENMASK_ULL(51, 16);
649
650 /* Find the address of the actual entry */
e29bd6f2
VM
651 index = id % (SZ_64K / esz);
652 indirect_ptr += index * esz;
6d03a68f
MZ
653 gfn = indirect_ptr >> PAGE_SHIFT;
654
655 return kvm_is_visible_gfn(its->dev->kvm, gfn);
656}
657
17a21f58
MZ
658static int vgic_its_alloc_collection(struct vgic_its *its,
659 struct its_collection **colp,
df9f58fb
AP
660 u32 coll_id)
661{
17a21f58
MZ
662 struct its_collection *collection;
663
6d03a68f
MZ
664 if (!vgic_its_check_id(its, its->baser_coll_table, coll_id))
665 return E_ITS_MAPC_COLLECTION_OOR;
666
17a21f58
MZ
667 collection = kzalloc(sizeof(*collection), GFP_KERNEL);
668
df9f58fb
AP
669 collection->collection_id = coll_id;
670 collection->target_addr = COLLECTION_NOT_MAPPED;
671
672 list_add_tail(&collection->coll_list, &its->collection_list);
17a21f58
MZ
673 *colp = collection;
674
675 return 0;
676}
677
678static void vgic_its_free_collection(struct vgic_its *its, u32 coll_id)
679{
680 struct its_collection *collection;
681 struct its_device *device;
9ce91c72 682 struct its_ite *ite;
17a21f58
MZ
683
684 /*
685 * Clearing the mapping for that collection ID removes the
686 * entry from the list. If there wasn't any before, we can
687 * go home early.
688 */
689 collection = find_collection(its, coll_id);
690 if (!collection)
691 return;
692
9ce91c72
EA
693 for_each_lpi_its(device, ite, its)
694 if (ite->collection &&
695 ite->collection->collection_id == coll_id)
696 ite->collection = NULL;
17a21f58
MZ
697
698 list_del(&collection->coll_list);
699 kfree(collection);
df9f58fb
AP
700}
701
702/*
703 * The MAPTI and MAPI commands map LPIs to ITTEs.
704 * Must be called with its_lock mutex held.
705 */
706static int vgic_its_cmd_handle_mapi(struct kvm *kvm, struct vgic_its *its,
a3e7aa27 707 u64 *its_cmd)
df9f58fb
AP
708{
709 u32 device_id = its_cmd_get_deviceid(its_cmd);
710 u32 event_id = its_cmd_get_id(its_cmd);
711 u32 coll_id = its_cmd_get_collection(its_cmd);
9ce91c72 712 struct its_ite *ite;
df9f58fb
AP
713 struct its_device *device;
714 struct its_collection *collection, *new_coll = NULL;
715 int lpi_nr;
99e5e886 716 struct vgic_irq *irq;
df9f58fb
AP
717
718 device = find_its_device(its, device_id);
719 if (!device)
720 return E_ITS_MAPTI_UNMAPPED_DEVICE;
721
a3e7aa27 722 if (its_cmd_get_command(its_cmd) == GITS_CMD_MAPTI)
df9f58fb
AP
723 lpi_nr = its_cmd_get_physical_id(its_cmd);
724 else
725 lpi_nr = event_id;
726 if (lpi_nr < GIC_LPI_OFFSET ||
3a88bded
MZ
727 lpi_nr >= max_lpis_propbaser(kvm->arch.vgic.propbaser))
728 return E_ITS_MAPTI_PHYSICALID_OOR;
729
286054a7 730 /* If there is an existing mapping, behavior is UNPREDICTABLE. */
9ce91c72 731 if (find_ite(its, device_id, event_id))
286054a7
AP
732 return 0;
733
3a88bded
MZ
734 collection = find_collection(its, coll_id);
735 if (!collection) {
736 int ret = vgic_its_alloc_collection(its, &collection, coll_id);
737 if (ret)
738 return ret;
739 new_coll = collection;
df9f58fb
AP
740 }
741
9ce91c72
EA
742 ite = kzalloc(sizeof(struct its_ite), GFP_KERNEL);
743 if (!ite) {
286054a7
AP
744 if (new_coll)
745 vgic_its_free_collection(its, coll_id);
746 return -ENOMEM;
df9f58fb
AP
747 }
748
9ce91c72
EA
749 ite->event_id = event_id;
750 list_add_tail(&ite->ite_list, &device->itt_head);
286054a7 751
9ce91c72
EA
752 ite->collection = collection;
753 ite->lpi = lpi_nr;
99e5e886
CD
754
755 irq = vgic_add_lpi(kvm, lpi_nr);
756 if (IS_ERR(irq)) {
757 if (new_coll)
758 vgic_its_free_collection(its, coll_id);
9ce91c72 759 its_free_ite(kvm, ite);
99e5e886
CD
760 return PTR_ERR(irq);
761 }
9ce91c72 762 ite->irq = irq;
99e5e886 763
9ce91c72 764 update_affinity_ite(kvm, ite);
df9f58fb
AP
765
766 /*
767 * We "cache" the configuration table entries in out struct vgic_irq's.
768 * However we only have those structs for mapped IRQs, so we read in
769 * the respective config data from memory here upon mapping the LPI.
770 */
9ce91c72 771 update_lpi_config(kvm, ite->irq, NULL);
df9f58fb
AP
772
773 return 0;
774}
775
776/* Requires the its_lock to be held. */
777static void vgic_its_unmap_device(struct kvm *kvm, struct its_device *device)
778{
9ce91c72 779 struct its_ite *ite, *temp;
df9f58fb
AP
780
781 /*
782 * The spec says that unmapping a device with still valid
783 * ITTEs associated is UNPREDICTABLE. We remove all ITTEs,
784 * since we cannot leave the memory unreferenced.
785 */
9ce91c72
EA
786 list_for_each_entry_safe(ite, temp, &device->itt_head, ite_list)
787 its_free_ite(kvm, ite);
df9f58fb
AP
788
789 list_del(&device->dev_list);
790 kfree(device);
791}
792
df9f58fb
AP
793/*
794 * MAPD maps or unmaps a device ID to Interrupt Translation Tables (ITTs).
795 * Must be called with the its_lock mutex held.
796 */
797static int vgic_its_cmd_handle_mapd(struct kvm *kvm, struct vgic_its *its,
798 u64 *its_cmd)
799{
800 u32 device_id = its_cmd_get_deviceid(its_cmd);
801 bool valid = its_cmd_get_validbit(its_cmd);
802 struct its_device *device;
803
6d03a68f 804 if (!vgic_its_check_id(its, its->baser_device_table, device_id))
df9f58fb
AP
805 return E_ITS_MAPD_DEVICE_OOR;
806
807 device = find_its_device(its, device_id);
808
809 /*
810 * The spec says that calling MAPD on an already mapped device
811 * invalidates all cached data for this device. We implement this
812 * by removing the mapping and re-establishing it.
813 */
814 if (device)
815 vgic_its_unmap_device(kvm, device);
816
817 /*
818 * The spec does not say whether unmapping a not-mapped device
819 * is an error, so we are done in any case.
820 */
821 if (!valid)
822 return 0;
823
824 device = kzalloc(sizeof(struct its_device), GFP_KERNEL);
825 if (!device)
826 return -ENOMEM;
827
828 device->device_id = device_id;
829 INIT_LIST_HEAD(&device->itt_head);
830
831 list_add_tail(&device->dev_list, &its->device_list);
832
833 return 0;
834}
835
df9f58fb
AP
836/*
837 * The MAPC command maps collection IDs to redistributors.
838 * Must be called with the its_lock mutex held.
839 */
840static int vgic_its_cmd_handle_mapc(struct kvm *kvm, struct vgic_its *its,
841 u64 *its_cmd)
842{
843 u16 coll_id;
844 u32 target_addr;
845 struct its_collection *collection;
846 bool valid;
847
848 valid = its_cmd_get_validbit(its_cmd);
849 coll_id = its_cmd_get_collection(its_cmd);
850 target_addr = its_cmd_get_target_addr(its_cmd);
851
852 if (target_addr >= atomic_read(&kvm->online_vcpus))
853 return E_ITS_MAPC_PROCNUM_OOR;
854
df9f58fb 855 if (!valid) {
17a21f58 856 vgic_its_free_collection(its, coll_id);
df9f58fb 857 } else {
17a21f58
MZ
858 collection = find_collection(its, coll_id);
859
df9f58fb 860 if (!collection) {
17a21f58 861 int ret;
df9f58fb 862
17a21f58
MZ
863 ret = vgic_its_alloc_collection(its, &collection,
864 coll_id);
865 if (ret)
866 return ret;
df9f58fb
AP
867 collection->target_addr = target_addr;
868 } else {
869 collection->target_addr = target_addr;
870 update_affinity_collection(kvm, its, collection);
871 }
872 }
873
874 return 0;
875}
876
877/*
878 * The CLEAR command removes the pending state for a particular LPI.
879 * Must be called with the its_lock mutex held.
880 */
881static int vgic_its_cmd_handle_clear(struct kvm *kvm, struct vgic_its *its,
882 u64 *its_cmd)
883{
884 u32 device_id = its_cmd_get_deviceid(its_cmd);
885 u32 event_id = its_cmd_get_id(its_cmd);
9ce91c72 886 struct its_ite *ite;
df9f58fb
AP
887
888
9ce91c72
EA
889 ite = find_ite(its, device_id, event_id);
890 if (!ite)
df9f58fb
AP
891 return E_ITS_CLEAR_UNMAPPED_INTERRUPT;
892
9ce91c72 893 ite->irq->pending_latch = false;
df9f58fb
AP
894
895 return 0;
896}
897
898/*
899 * The INV command syncs the configuration bits from the memory table.
900 * Must be called with the its_lock mutex held.
901 */
902static int vgic_its_cmd_handle_inv(struct kvm *kvm, struct vgic_its *its,
903 u64 *its_cmd)
904{
905 u32 device_id = its_cmd_get_deviceid(its_cmd);
906 u32 event_id = its_cmd_get_id(its_cmd);
9ce91c72 907 struct its_ite *ite;
df9f58fb
AP
908
909
9ce91c72
EA
910 ite = find_ite(its, device_id, event_id);
911 if (!ite)
df9f58fb
AP
912 return E_ITS_INV_UNMAPPED_INTERRUPT;
913
9ce91c72 914 return update_lpi_config(kvm, ite->irq, NULL);
df9f58fb
AP
915}
916
917/*
918 * The INVALL command requests flushing of all IRQ data in this collection.
919 * Find the VCPU mapped to that collection, then iterate over the VM's list
920 * of mapped LPIs and update the configuration for each IRQ which targets
921 * the specified vcpu. The configuration will be read from the in-memory
922 * configuration table.
923 * Must be called with the its_lock mutex held.
924 */
925static int vgic_its_cmd_handle_invall(struct kvm *kvm, struct vgic_its *its,
926 u64 *its_cmd)
927{
928 u32 coll_id = its_cmd_get_collection(its_cmd);
929 struct its_collection *collection;
930 struct kvm_vcpu *vcpu;
931 struct vgic_irq *irq;
932 u32 *intids;
933 int irq_count, i;
934
935 collection = find_collection(its, coll_id);
936 if (!its_is_collection_mapped(collection))
937 return E_ITS_INVALL_UNMAPPED_COLLECTION;
938
939 vcpu = kvm_get_vcpu(kvm, collection->target_addr);
940
941 irq_count = vgic_copy_lpi_list(kvm, &intids);
942 if (irq_count < 0)
943 return irq_count;
944
945 for (i = 0; i < irq_count; i++) {
946 irq = vgic_get_irq(kvm, NULL, intids[i]);
947 if (!irq)
948 continue;
949 update_lpi_config(kvm, irq, vcpu);
950 vgic_put_irq(kvm, irq);
951 }
952
953 kfree(intids);
954
955 return 0;
956}
957
958/*
959 * The MOVALL command moves the pending state of all IRQs targeting one
960 * redistributor to another. We don't hold the pending state in the VCPUs,
961 * but in the IRQs instead, so there is really not much to do for us here.
962 * However the spec says that no IRQ must target the old redistributor
963 * afterwards, so we make sure that no LPI is using the associated target_vcpu.
964 * This command affects all LPIs in the system that target that redistributor.
965 */
966static int vgic_its_cmd_handle_movall(struct kvm *kvm, struct vgic_its *its,
967 u64 *its_cmd)
968{
969 struct vgic_dist *dist = &kvm->arch.vgic;
970 u32 target1_addr = its_cmd_get_target_addr(its_cmd);
971 u32 target2_addr = its_cmd_mask_field(its_cmd, 3, 16, 32);
972 struct kvm_vcpu *vcpu1, *vcpu2;
973 struct vgic_irq *irq;
974
975 if (target1_addr >= atomic_read(&kvm->online_vcpus) ||
976 target2_addr >= atomic_read(&kvm->online_vcpus))
977 return E_ITS_MOVALL_PROCNUM_OOR;
978
979 if (target1_addr == target2_addr)
980 return 0;
981
982 vcpu1 = kvm_get_vcpu(kvm, target1_addr);
983 vcpu2 = kvm_get_vcpu(kvm, target2_addr);
984
985 spin_lock(&dist->lpi_list_lock);
986
987 list_for_each_entry(irq, &dist->lpi_list_head, lpi_list) {
988 spin_lock(&irq->irq_lock);
989
990 if (irq->target_vcpu == vcpu1)
991 irq->target_vcpu = vcpu2;
992
993 spin_unlock(&irq->irq_lock);
994 }
995
996 spin_unlock(&dist->lpi_list_lock);
997
998 return 0;
999}
1000
2891a7df
AP
1001/*
1002 * The INT command injects the LPI associated with that DevID/EvID pair.
1003 * Must be called with the its_lock mutex held.
1004 */
1005static int vgic_its_cmd_handle_int(struct kvm *kvm, struct vgic_its *its,
1006 u64 *its_cmd)
1007{
1008 u32 msi_data = its_cmd_get_id(its_cmd);
1009 u64 msi_devid = its_cmd_get_deviceid(its_cmd);
1010
fd837b08 1011 return vgic_its_trigger_msi(kvm, its, msi_devid, msi_data);
2891a7df
AP
1012}
1013
df9f58fb
AP
1014/*
1015 * This function is called with the its_cmd lock held, but the ITS data
1016 * structure lock dropped.
1017 */
424c3383
AP
1018static int vgic_its_handle_command(struct kvm *kvm, struct vgic_its *its,
1019 u64 *its_cmd)
1020{
df9f58fb
AP
1021 int ret = -ENODEV;
1022
1023 mutex_lock(&its->its_lock);
a3e7aa27 1024 switch (its_cmd_get_command(its_cmd)) {
df9f58fb
AP
1025 case GITS_CMD_MAPD:
1026 ret = vgic_its_cmd_handle_mapd(kvm, its, its_cmd);
1027 break;
1028 case GITS_CMD_MAPC:
1029 ret = vgic_its_cmd_handle_mapc(kvm, its, its_cmd);
1030 break;
1031 case GITS_CMD_MAPI:
a3e7aa27 1032 ret = vgic_its_cmd_handle_mapi(kvm, its, its_cmd);
df9f58fb
AP
1033 break;
1034 case GITS_CMD_MAPTI:
a3e7aa27 1035 ret = vgic_its_cmd_handle_mapi(kvm, its, its_cmd);
df9f58fb
AP
1036 break;
1037 case GITS_CMD_MOVI:
1038 ret = vgic_its_cmd_handle_movi(kvm, its, its_cmd);
1039 break;
1040 case GITS_CMD_DISCARD:
1041 ret = vgic_its_cmd_handle_discard(kvm, its, its_cmd);
1042 break;
1043 case GITS_CMD_CLEAR:
1044 ret = vgic_its_cmd_handle_clear(kvm, its, its_cmd);
1045 break;
1046 case GITS_CMD_MOVALL:
1047 ret = vgic_its_cmd_handle_movall(kvm, its, its_cmd);
1048 break;
2891a7df
AP
1049 case GITS_CMD_INT:
1050 ret = vgic_its_cmd_handle_int(kvm, its, its_cmd);
1051 break;
df9f58fb
AP
1052 case GITS_CMD_INV:
1053 ret = vgic_its_cmd_handle_inv(kvm, its, its_cmd);
1054 break;
1055 case GITS_CMD_INVALL:
1056 ret = vgic_its_cmd_handle_invall(kvm, its, its_cmd);
1057 break;
1058 case GITS_CMD_SYNC:
1059 /* we ignore this command: we are in sync all of the time */
1060 ret = 0;
1061 break;
1062 }
1063 mutex_unlock(&its->its_lock);
1064
1065 return ret;
424c3383
AP
1066}
1067
1068static u64 vgic_sanitise_its_baser(u64 reg)
1069{
1070 reg = vgic_sanitise_field(reg, GITS_BASER_SHAREABILITY_MASK,
1071 GITS_BASER_SHAREABILITY_SHIFT,
1072 vgic_sanitise_shareability);
1073 reg = vgic_sanitise_field(reg, GITS_BASER_INNER_CACHEABILITY_MASK,
1074 GITS_BASER_INNER_CACHEABILITY_SHIFT,
1075 vgic_sanitise_inner_cacheability);
1076 reg = vgic_sanitise_field(reg, GITS_BASER_OUTER_CACHEABILITY_MASK,
1077 GITS_BASER_OUTER_CACHEABILITY_SHIFT,
1078 vgic_sanitise_outer_cacheability);
1079
1080 /* Bits 15:12 contain bits 51:48 of the PA, which we don't support. */
1081 reg &= ~GENMASK_ULL(15, 12);
1082
1083 /* We support only one (ITS) page size: 64K */
1084 reg = (reg & ~GITS_BASER_PAGE_SIZE_MASK) | GITS_BASER_PAGE_SIZE_64K;
1085
1086 return reg;
1087}
1088
1089static u64 vgic_sanitise_its_cbaser(u64 reg)
1090{
1091 reg = vgic_sanitise_field(reg, GITS_CBASER_SHAREABILITY_MASK,
1092 GITS_CBASER_SHAREABILITY_SHIFT,
1093 vgic_sanitise_shareability);
1094 reg = vgic_sanitise_field(reg, GITS_CBASER_INNER_CACHEABILITY_MASK,
1095 GITS_CBASER_INNER_CACHEABILITY_SHIFT,
1096 vgic_sanitise_inner_cacheability);
1097 reg = vgic_sanitise_field(reg, GITS_CBASER_OUTER_CACHEABILITY_MASK,
1098 GITS_CBASER_OUTER_CACHEABILITY_SHIFT,
1099 vgic_sanitise_outer_cacheability);
1100
1101 /*
1102 * Sanitise the physical address to be 64k aligned.
1103 * Also limit the physical addresses to 48 bits.
1104 */
1105 reg &= ~(GENMASK_ULL(51, 48) | GENMASK_ULL(15, 12));
1106
1107 return reg;
1108}
1109
1110static unsigned long vgic_mmio_read_its_cbaser(struct kvm *kvm,
1111 struct vgic_its *its,
1112 gpa_t addr, unsigned int len)
1113{
1114 return extract_bytes(its->cbaser, addr & 7, len);
1115}
1116
1117static void vgic_mmio_write_its_cbaser(struct kvm *kvm, struct vgic_its *its,
1118 gpa_t addr, unsigned int len,
1119 unsigned long val)
1120{
1121 /* When GITS_CTLR.Enable is 1, this register is RO. */
1122 if (its->enabled)
1123 return;
1124
1125 mutex_lock(&its->cmd_lock);
1126 its->cbaser = update_64bit_reg(its->cbaser, addr & 7, len, val);
1127 its->cbaser = vgic_sanitise_its_cbaser(its->cbaser);
1128 its->creadr = 0;
1129 /*
1130 * CWRITER is architecturally UNKNOWN on reset, but we need to reset
1131 * it to CREADR to make sure we start with an empty command buffer.
1132 */
1133 its->cwriter = its->creadr;
1134 mutex_unlock(&its->cmd_lock);
1135}
1136
1137#define ITS_CMD_BUFFER_SIZE(baser) ((((baser) & 0xff) + 1) << 12)
1138#define ITS_CMD_SIZE 32
1139#define ITS_CMD_OFFSET(reg) ((reg) & GENMASK(19, 5))
1140
a5e1e6ca
AP
1141/* Must be called with the cmd_lock held. */
1142static void vgic_its_process_commands(struct kvm *kvm, struct vgic_its *its)
424c3383
AP
1143{
1144 gpa_t cbaser;
1145 u64 cmd_buf[4];
424c3383 1146
a5e1e6ca
AP
1147 /* Commands are only processed when the ITS is enabled. */
1148 if (!its->enabled)
424c3383 1149 return;
424c3383 1150
424c3383
AP
1151 cbaser = CBASER_ADDRESS(its->cbaser);
1152
1153 while (its->cwriter != its->creadr) {
1154 int ret = kvm_read_guest(kvm, cbaser + its->creadr,
1155 cmd_buf, ITS_CMD_SIZE);
1156 /*
1157 * If kvm_read_guest() fails, this could be due to the guest
1158 * programming a bogus value in CBASER or something else going
1159 * wrong from which we cannot easily recover.
1160 * According to section 6.3.2 in the GICv3 spec we can just
1161 * ignore that command then.
1162 */
1163 if (!ret)
1164 vgic_its_handle_command(kvm, its, cmd_buf);
1165
1166 its->creadr += ITS_CMD_SIZE;
1167 if (its->creadr == ITS_CMD_BUFFER_SIZE(its->cbaser))
1168 its->creadr = 0;
1169 }
a5e1e6ca
AP
1170}
1171
1172/*
1173 * By writing to CWRITER the guest announces new commands to be processed.
1174 * To avoid any races in the first place, we take the its_cmd lock, which
1175 * protects our ring buffer variables, so that there is only one user
1176 * per ITS handling commands at a given time.
1177 */
1178static void vgic_mmio_write_its_cwriter(struct kvm *kvm, struct vgic_its *its,
1179 gpa_t addr, unsigned int len,
1180 unsigned long val)
1181{
1182 u64 reg;
1183
1184 if (!its)
1185 return;
1186
1187 mutex_lock(&its->cmd_lock);
1188
1189 reg = update_64bit_reg(its->cwriter, addr & 7, len, val);
1190 reg = ITS_CMD_OFFSET(reg);
1191 if (reg >= ITS_CMD_BUFFER_SIZE(its->cbaser)) {
1192 mutex_unlock(&its->cmd_lock);
1193 return;
1194 }
1195 its->cwriter = reg;
1196
1197 vgic_its_process_commands(kvm, its);
424c3383
AP
1198
1199 mutex_unlock(&its->cmd_lock);
1200}
1201
1202static unsigned long vgic_mmio_read_its_cwriter(struct kvm *kvm,
1203 struct vgic_its *its,
1204 gpa_t addr, unsigned int len)
1205{
1206 return extract_bytes(its->cwriter, addr & 0x7, len);
1207}
1208
1209static unsigned long vgic_mmio_read_its_creadr(struct kvm *kvm,
1210 struct vgic_its *its,
1211 gpa_t addr, unsigned int len)
1212{
1213 return extract_bytes(its->creadr, addr & 0x7, len);
1214}
1215
1216#define BASER_INDEX(addr) (((addr) / sizeof(u64)) & 0x7)
1217static unsigned long vgic_mmio_read_its_baser(struct kvm *kvm,
1218 struct vgic_its *its,
1219 gpa_t addr, unsigned int len)
1220{
1221 u64 reg;
1222
1223 switch (BASER_INDEX(addr)) {
1224 case 0:
1225 reg = its->baser_device_table;
1226 break;
1227 case 1:
1228 reg = its->baser_coll_table;
1229 break;
1230 default:
1231 reg = 0;
1232 break;
1233 }
1234
1235 return extract_bytes(reg, addr & 7, len);
1236}
1237
1238#define GITS_BASER_RO_MASK (GENMASK_ULL(52, 48) | GENMASK_ULL(58, 56))
1239static void vgic_mmio_write_its_baser(struct kvm *kvm,
1240 struct vgic_its *its,
1241 gpa_t addr, unsigned int len,
1242 unsigned long val)
1243{
1244 u64 entry_size, device_type;
1245 u64 reg, *regptr, clearbits = 0;
1246
1247 /* When GITS_CTLR.Enable is 1, we ignore write accesses. */
1248 if (its->enabled)
1249 return;
1250
1251 switch (BASER_INDEX(addr)) {
1252 case 0:
1253 regptr = &its->baser_device_table;
1254 entry_size = 8;
1255 device_type = GITS_BASER_TYPE_DEVICE;
1256 break;
1257 case 1:
1258 regptr = &its->baser_coll_table;
1259 entry_size = 8;
1260 device_type = GITS_BASER_TYPE_COLLECTION;
1261 clearbits = GITS_BASER_INDIRECT;
1262 break;
1263 default:
1264 return;
1265 }
1266
1267 reg = update_64bit_reg(*regptr, addr & 7, len, val);
1268 reg &= ~GITS_BASER_RO_MASK;
1269 reg &= ~clearbits;
1270
1271 reg |= (entry_size - 1) << GITS_BASER_ENTRY_SIZE_SHIFT;
1272 reg |= device_type << GITS_BASER_TYPE_SHIFT;
1273 reg = vgic_sanitise_its_baser(reg);
1274
1275 *regptr = reg;
1276}
1277
a5e1e6ca
AP
1278static unsigned long vgic_mmio_read_its_ctlr(struct kvm *vcpu,
1279 struct vgic_its *its,
1280 gpa_t addr, unsigned int len)
1281{
1282 u32 reg = 0;
1283
1284 mutex_lock(&its->cmd_lock);
1285 if (its->creadr == its->cwriter)
1286 reg |= GITS_CTLR_QUIESCENT;
1287 if (its->enabled)
1288 reg |= GITS_CTLR_ENABLE;
1289 mutex_unlock(&its->cmd_lock);
1290
1291 return reg;
1292}
1293
1294static void vgic_mmio_write_its_ctlr(struct kvm *kvm, struct vgic_its *its,
1295 gpa_t addr, unsigned int len,
1296 unsigned long val)
1297{
1298 mutex_lock(&its->cmd_lock);
1299
1300 its->enabled = !!(val & GITS_CTLR_ENABLE);
1301
1302 /*
1303 * Try to process any pending commands. This function bails out early
1304 * if the ITS is disabled or no commands have been queued.
1305 */
1306 vgic_its_process_commands(kvm, its);
1307
1308 mutex_unlock(&its->cmd_lock);
1309}
1310
59c5ab40
AP
1311#define REGISTER_ITS_DESC(off, rd, wr, length, acc) \
1312{ \
1313 .reg_offset = off, \
1314 .len = length, \
1315 .access_flags = acc, \
1316 .its_read = rd, \
1317 .its_write = wr, \
1318}
1319
59c5ab40
AP
1320static void its_mmio_write_wi(struct kvm *kvm, struct vgic_its *its,
1321 gpa_t addr, unsigned int len, unsigned long val)
1322{
1323 /* Ignore */
1324}
1325
1326static struct vgic_register_region its_registers[] = {
1327 REGISTER_ITS_DESC(GITS_CTLR,
424c3383 1328 vgic_mmio_read_its_ctlr, vgic_mmio_write_its_ctlr, 4,
59c5ab40
AP
1329 VGIC_ACCESS_32bit),
1330 REGISTER_ITS_DESC(GITS_IIDR,
424c3383 1331 vgic_mmio_read_its_iidr, its_mmio_write_wi, 4,
59c5ab40
AP
1332 VGIC_ACCESS_32bit),
1333 REGISTER_ITS_DESC(GITS_TYPER,
424c3383 1334 vgic_mmio_read_its_typer, its_mmio_write_wi, 8,
59c5ab40
AP
1335 VGIC_ACCESS_64bit | VGIC_ACCESS_32bit),
1336 REGISTER_ITS_DESC(GITS_CBASER,
424c3383 1337 vgic_mmio_read_its_cbaser, vgic_mmio_write_its_cbaser, 8,
59c5ab40
AP
1338 VGIC_ACCESS_64bit | VGIC_ACCESS_32bit),
1339 REGISTER_ITS_DESC(GITS_CWRITER,
424c3383 1340 vgic_mmio_read_its_cwriter, vgic_mmio_write_its_cwriter, 8,
59c5ab40
AP
1341 VGIC_ACCESS_64bit | VGIC_ACCESS_32bit),
1342 REGISTER_ITS_DESC(GITS_CREADR,
424c3383 1343 vgic_mmio_read_its_creadr, its_mmio_write_wi, 8,
59c5ab40
AP
1344 VGIC_ACCESS_64bit | VGIC_ACCESS_32bit),
1345 REGISTER_ITS_DESC(GITS_BASER,
424c3383 1346 vgic_mmio_read_its_baser, vgic_mmio_write_its_baser, 0x40,
59c5ab40
AP
1347 VGIC_ACCESS_64bit | VGIC_ACCESS_32bit),
1348 REGISTER_ITS_DESC(GITS_IDREGS_BASE,
424c3383 1349 vgic_mmio_read_its_idregs, its_mmio_write_wi, 0x30,
59c5ab40
AP
1350 VGIC_ACCESS_32bit),
1351};
1352
33d3bc95
AP
1353/* This is called on setting the LPI enable bit in the redistributor. */
1354void vgic_enable_lpis(struct kvm_vcpu *vcpu)
1355{
1356 if (!(vcpu->arch.vgic_cpu.pendbaser & GICR_PENDBASER_PTZ))
1357 its_sync_lpi_pending_table(vcpu);
1358}
1359
c7735769 1360static int vgic_register_its_iodev(struct kvm *kvm, struct vgic_its *its)
59c5ab40
AP
1361{
1362 struct vgic_io_device *iodev = &its->iodev;
1363 int ret;
1364
c7735769
AP
1365 if (!its->initialized)
1366 return -EBUSY;
1085fdc6 1367
59c5ab40
AP
1368 if (IS_VGIC_ADDR_UNDEF(its->vgic_its_base))
1369 return -ENXIO;
1370
1371 iodev->regions = its_registers;
1372 iodev->nr_regions = ARRAY_SIZE(its_registers);
1373 kvm_iodevice_init(&iodev->dev, &kvm_io_gic_ops);
1374
1375 iodev->base_addr = its->vgic_its_base;
1376 iodev->iodev_type = IODEV_ITS;
1377 iodev->its = its;
1378 mutex_lock(&kvm->slots_lock);
1379 ret = kvm_io_bus_register_dev(kvm, KVM_MMIO_BUS, iodev->base_addr,
1380 KVM_VGIC_V3_ITS_SIZE, &iodev->dev);
1381 mutex_unlock(&kvm->slots_lock);
1382
1383 return ret;
1384}
1085fdc6 1385
424c3383
AP
1386#define INITIAL_BASER_VALUE \
1387 (GIC_BASER_CACHEABILITY(GITS_BASER, INNER, RaWb) | \
1388 GIC_BASER_CACHEABILITY(GITS_BASER, OUTER, SameAsInner) | \
1389 GIC_BASER_SHAREABILITY(GITS_BASER, InnerShareable) | \
1390 ((8ULL - 1) << GITS_BASER_ENTRY_SIZE_SHIFT) | \
1391 GITS_BASER_PAGE_SIZE_64K)
1392
1393#define INITIAL_PROPBASER_VALUE \
1394 (GIC_BASER_CACHEABILITY(GICR_PROPBASER, INNER, RaWb) | \
1395 GIC_BASER_CACHEABILITY(GICR_PROPBASER, OUTER, SameAsInner) | \
1396 GIC_BASER_SHAREABILITY(GICR_PROPBASER, InnerShareable))
1397
1085fdc6
AP
1398static int vgic_its_create(struct kvm_device *dev, u32 type)
1399{
1400 struct vgic_its *its;
1401
1402 if (type != KVM_DEV_TYPE_ARM_VGIC_ITS)
1403 return -ENODEV;
1404
1405 its = kzalloc(sizeof(struct vgic_its), GFP_KERNEL);
1406 if (!its)
1407 return -ENOMEM;
1408
424c3383
AP
1409 mutex_init(&its->its_lock);
1410 mutex_init(&its->cmd_lock);
1411
1085fdc6
AP
1412 its->vgic_its_base = VGIC_ADDR_UNDEF;
1413
424c3383
AP
1414 INIT_LIST_HEAD(&its->device_list);
1415 INIT_LIST_HEAD(&its->collection_list);
1416
1085fdc6
AP
1417 dev->kvm->arch.vgic.has_its = true;
1418 its->initialized = false;
1419 its->enabled = false;
bb717644 1420 its->dev = dev;
1085fdc6 1421
424c3383
AP
1422 its->baser_device_table = INITIAL_BASER_VALUE |
1423 ((u64)GITS_BASER_TYPE_DEVICE << GITS_BASER_TYPE_SHIFT);
1424 its->baser_coll_table = INITIAL_BASER_VALUE |
1425 ((u64)GITS_BASER_TYPE_COLLECTION << GITS_BASER_TYPE_SHIFT);
1426 dev->kvm->arch.vgic.propbaser = INITIAL_PROPBASER_VALUE;
1427
1085fdc6
AP
1428 dev->private = its;
1429
1430 return 0;
1431}
1432
1433static void vgic_its_destroy(struct kvm_device *kvm_dev)
1434{
424c3383 1435 struct kvm *kvm = kvm_dev->kvm;
1085fdc6 1436 struct vgic_its *its = kvm_dev->private;
424c3383 1437 struct its_device *dev;
9ce91c72 1438 struct its_ite *ite;
424c3383
AP
1439 struct list_head *dev_cur, *dev_temp;
1440 struct list_head *cur, *temp;
1441
1442 /*
1443 * We may end up here without the lists ever having been initialized.
1444 * Check this and bail out early to avoid dereferencing a NULL pointer.
1445 */
1446 if (!its->device_list.next)
1447 return;
1448
1449 mutex_lock(&its->its_lock);
1450 list_for_each_safe(dev_cur, dev_temp, &its->device_list) {
1451 dev = container_of(dev_cur, struct its_device, dev_list);
1452 list_for_each_safe(cur, temp, &dev->itt_head) {
9ce91c72
EA
1453 ite = (container_of(cur, struct its_ite, ite_list));
1454 its_free_ite(kvm, ite);
424c3383
AP
1455 }
1456 list_del(dev_cur);
1457 kfree(dev);
1458 }
1459
1460 list_for_each_safe(cur, temp, &its->collection_list) {
1461 list_del(cur);
1462 kfree(container_of(cur, struct its_collection, coll_list));
1463 }
1464 mutex_unlock(&its->its_lock);
1085fdc6
AP
1465
1466 kfree(its);
1467}
1468
1469static int vgic_its_has_attr(struct kvm_device *dev,
1470 struct kvm_device_attr *attr)
1471{
1472 switch (attr->group) {
1473 case KVM_DEV_ARM_VGIC_GRP_ADDR:
1474 switch (attr->attr) {
1475 case KVM_VGIC_ITS_ADDR_TYPE:
1476 return 0;
1477 }
1478 break;
1479 case KVM_DEV_ARM_VGIC_GRP_CTRL:
1480 switch (attr->attr) {
1481 case KVM_DEV_ARM_VGIC_CTRL_INIT:
1482 return 0;
1483 }
1484 break;
1485 }
1486 return -ENXIO;
1487}
1488
1489static int vgic_its_set_attr(struct kvm_device *dev,
1490 struct kvm_device_attr *attr)
1491{
1492 struct vgic_its *its = dev->private;
1493 int ret;
1494
1495 switch (attr->group) {
1496 case KVM_DEV_ARM_VGIC_GRP_ADDR: {
1497 u64 __user *uaddr = (u64 __user *)(long)attr->addr;
1498 unsigned long type = (unsigned long)attr->attr;
1499 u64 addr;
1500
1501 if (type != KVM_VGIC_ITS_ADDR_TYPE)
1502 return -ENODEV;
1503
1085fdc6
AP
1504 if (copy_from_user(&addr, uaddr, sizeof(addr)))
1505 return -EFAULT;
1506
1507 ret = vgic_check_ioaddr(dev->kvm, &its->vgic_its_base,
1508 addr, SZ_64K);
1509 if (ret)
1510 return ret;
1511
1512 its->vgic_its_base = addr;
1513
1514 return 0;
1515 }
1516 case KVM_DEV_ARM_VGIC_GRP_CTRL:
1517 switch (attr->attr) {
1518 case KVM_DEV_ARM_VGIC_CTRL_INIT:
c7735769
AP
1519 its->initialized = true;
1520
1521 return 0;
1085fdc6
AP
1522 }
1523 break;
1524 }
1525 return -ENXIO;
1526}
1527
1528static int vgic_its_get_attr(struct kvm_device *dev,
1529 struct kvm_device_attr *attr)
1530{
1531 switch (attr->group) {
1532 case KVM_DEV_ARM_VGIC_GRP_ADDR: {
1533 struct vgic_its *its = dev->private;
1534 u64 addr = its->vgic_its_base;
1535 u64 __user *uaddr = (u64 __user *)(long)attr->addr;
1536 unsigned long type = (unsigned long)attr->attr;
1537
1538 if (type != KVM_VGIC_ITS_ADDR_TYPE)
1539 return -ENODEV;
1540
1541 if (copy_to_user(uaddr, &addr, sizeof(addr)))
1542 return -EFAULT;
1543 break;
1544 default:
1545 return -ENXIO;
1546 }
1547 }
1548
1549 return 0;
1550}
1551
1552static struct kvm_device_ops kvm_arm_vgic_its_ops = {
1553 .name = "kvm-arm-vgic-its",
1554 .create = vgic_its_create,
1555 .destroy = vgic_its_destroy,
1556 .set_attr = vgic_its_set_attr,
1557 .get_attr = vgic_its_get_attr,
1558 .has_attr = vgic_its_has_attr,
1559};
1560
1561int kvm_vgic_register_its_device(void)
1562{
1563 return kvm_register_device_ops(&kvm_arm_vgic_its_ops,
1564 KVM_DEV_TYPE_ARM_VGIC_ITS);
1565}
c7735769
AP
1566
1567/*
1568 * Registers all ITSes with the kvm_io_bus framework.
1569 * To follow the existing VGIC initialization sequence, this has to be
1570 * done as late as possible, just before the first VCPU runs.
1571 */
1572int vgic_register_its_iodevs(struct kvm *kvm)
1573{
1574 struct kvm_device *dev;
1575 int ret = 0;
1576
1577 list_for_each_entry(dev, &kvm->devices, vm_node) {
1578 if (dev->ops != &kvm_arm_vgic_its_ops)
1579 continue;
1580
1581 ret = vgic_register_its_iodev(kvm, dev->private);
1582 if (ret)
1583 return ret;
1584 /*
1585 * We don't need to care about tearing down previously
1586 * registered ITSes, as the kvm_io_bus framework removes
1587 * them for us if the VM gets destroyed.
1588 */
1589 }
1590
1591 return ret;
1592}