2 * Copyright (C) 2013-2017 ARM Limited, All Rights Reserved.
3 * Author: Marc Zyngier <marc.zyngier@arm.com>
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program. If not, see <http://www.gnu.org/licenses/>.
18 #include <linux/acpi.h>
19 #include <linux/acpi_iort.h>
20 #include <linux/bitmap.h>
21 #include <linux/cpu.h>
22 #include <linux/crash_dump.h>
23 #include <linux/delay.h>
24 #include <linux/dma-iommu.h>
25 #include <linux/efi.h>
26 #include <linux/interrupt.h>
27 #include <linux/irqdomain.h>
28 #include <linux/list.h>
29 #include <linux/list_sort.h>
30 #include <linux/log2.h>
31 #include <linux/memblock.h>
33 #include <linux/msi.h>
35 #include <linux/of_address.h>
36 #include <linux/of_irq.h>
37 #include <linux/of_pci.h>
38 #include <linux/of_platform.h>
39 #include <linux/percpu.h>
40 #include <linux/slab.h>
41 #include <linux/syscore_ops.h>
43 #include <linux/irqchip.h>
44 #include <linux/irqchip/arm-gic-v3.h>
45 #include <linux/irqchip/arm-gic-v4.h>
47 #include <asm/cputype.h>
48 #include <asm/exception.h>
50 #include "irq-gic-common.h"
52 #define ITS_FLAGS_CMDQ_NEEDS_FLUSHING (1ULL << 0)
53 #define ITS_FLAGS_WORKAROUND_CAVIUM_22375 (1ULL << 1)
54 #define ITS_FLAGS_WORKAROUND_CAVIUM_23144 (1ULL << 2)
55 #define ITS_FLAGS_SAVE_SUSPEND_STATE (1ULL << 3)
57 #define RDIST_FLAGS_PROPBASE_NEEDS_FLUSHING (1 << 0)
58 #define RDIST_FLAGS_RD_TABLES_PREALLOCATED (1 << 1)
60 static u32 lpi_id_bits
;
63 * We allocate memory for PROPBASE to cover 2 ^ lpi_id_bits LPIs to
64 * deal with (one configuration byte per interrupt). PENDBASE has to
65 * be 64kB aligned (one bit per LPI, plus 8192 bits for SPI/PPI/SGI).
67 #define LPI_NRBITS lpi_id_bits
68 #define LPI_PROPBASE_SZ ALIGN(BIT(LPI_NRBITS), SZ_64K)
69 #define LPI_PENDBASE_SZ ALIGN(BIT(LPI_NRBITS) / 8, SZ_64K)
71 #define LPI_PROP_DEFAULT_PRIO GICD_INT_DEF_PRI
74 * Collection structure - just an ID, and a redistributor address to
75 * ping. We use one per CPU as a bag of interrupts assigned to this
78 struct its_collection
{
84 * The ITS_BASER structure - contains memory information, cached
85 * value of BASER register configuration and ITS page size.
97 * The ITS structure - contains most of the infrastructure, with the
98 * top-level MSI domain, the command queue, the collections, and the
99 * list of devices writing to it.
101 * dev_alloc_lock has to be taken for device allocations, while the
102 * spinlock must be taken to parse data structures such as the device
107 struct mutex dev_alloc_lock
;
108 struct list_head entry
;
110 phys_addr_t phys_base
;
111 struct its_cmd_block
*cmd_base
;
112 struct its_cmd_block
*cmd_write
;
113 struct its_baser tables
[GITS_BASER_NR_REGS
];
114 struct its_collection
*collections
;
115 struct fwnode_handle
*fwnode_handle
;
116 u64 (*get_msi_base
)(struct its_device
*its_dev
);
119 struct list_head its_device_list
;
121 unsigned long list_nr
;
125 unsigned int msi_domain_flags
;
126 u32 pre_its_base
; /* for Socionext Synquacer */
128 int vlpi_redist_offset
;
131 #define ITS_ITT_ALIGN SZ_256
133 /* The maximum number of VPEID bits supported by VLPI commands */
134 #define ITS_MAX_VPEID_BITS (16)
135 #define ITS_MAX_VPEID (1 << (ITS_MAX_VPEID_BITS))
137 /* Convert page order to size in bytes */
138 #define PAGE_ORDER_TO_SIZE(o) (PAGE_SIZE << (o))
140 struct event_lpi_map
{
141 unsigned long *lpi_map
;
143 irq_hw_number_t lpi_base
;
145 struct mutex vlpi_lock
;
147 struct its_vlpi_map
*vlpi_maps
;
152 * The ITS view of a device - belongs to an ITS, owns an interrupt
153 * translation table, and a list of interrupts. If it some of its
154 * LPIs are injected into a guest (GICv4), the event_map.vm field
155 * indicates which one.
158 struct list_head entry
;
159 struct its_node
*its
;
160 struct event_lpi_map event_map
;
169 struct its_device
*dev
;
170 struct its_vpe
**vpes
;
174 static LIST_HEAD(its_nodes
);
175 static DEFINE_RAW_SPINLOCK(its_lock
);
176 static struct rdists
*gic_rdists
;
177 static struct irq_domain
*its_parent
;
179 static unsigned long its_list_map
;
180 static u16 vmovp_seq_num
;
181 static DEFINE_RAW_SPINLOCK(vmovp_lock
);
183 static DEFINE_IDA(its_vpeid_ida
);
185 #define gic_data_rdist() (raw_cpu_ptr(gic_rdists->rdist))
186 #define gic_data_rdist_cpu(cpu) (per_cpu_ptr(gic_rdists->rdist, cpu))
187 #define gic_data_rdist_rd_base() (gic_data_rdist()->rd_base)
188 #define gic_data_rdist_vlpi_base() (gic_data_rdist_rd_base() + SZ_128K)
190 static struct its_collection
*dev_event_to_col(struct its_device
*its_dev
,
193 struct its_node
*its
= its_dev
->its
;
195 return its
->collections
+ its_dev
->event_map
.col_map
[event
];
198 static struct its_collection
*valid_col(struct its_collection
*col
)
200 if (WARN_ON_ONCE(col
->target_address
& GENMASK_ULL(0, 15)))
206 static struct its_vpe
*valid_vpe(struct its_node
*its
, struct its_vpe
*vpe
)
208 if (valid_col(its
->collections
+ vpe
->col_idx
))
215 * ITS command descriptors - parameters to be encoded in a command
218 struct its_cmd_desc
{
221 struct its_device
*dev
;
226 struct its_device
*dev
;
231 struct its_device
*dev
;
236 struct its_device
*dev
;
241 struct its_collection
*col
;
246 struct its_device
*dev
;
252 struct its_device
*dev
;
253 struct its_collection
*col
;
258 struct its_device
*dev
;
263 struct its_collection
*col
;
272 struct its_collection
*col
;
278 struct its_device
*dev
;
286 struct its_device
*dev
;
293 struct its_collection
*col
;
301 * The ITS command block, which is what the ITS actually parses.
303 struct its_cmd_block
{
307 #define ITS_CMD_QUEUE_SZ SZ_64K
308 #define ITS_CMD_QUEUE_NR_ENTRIES (ITS_CMD_QUEUE_SZ / sizeof(struct its_cmd_block))
310 typedef struct its_collection
*(*its_cmd_builder_t
)(struct its_node
*,
311 struct its_cmd_block
*,
312 struct its_cmd_desc
*);
314 typedef struct its_vpe
*(*its_cmd_vbuilder_t
)(struct its_node
*,
315 struct its_cmd_block
*,
316 struct its_cmd_desc
*);
318 static void its_mask_encode(u64
*raw_cmd
, u64 val
, int h
, int l
)
320 u64 mask
= GENMASK_ULL(h
, l
);
322 *raw_cmd
|= (val
<< l
) & mask
;
325 static void its_encode_cmd(struct its_cmd_block
*cmd
, u8 cmd_nr
)
327 its_mask_encode(&cmd
->raw_cmd
[0], cmd_nr
, 7, 0);
330 static void its_encode_devid(struct its_cmd_block
*cmd
, u32 devid
)
332 its_mask_encode(&cmd
->raw_cmd
[0], devid
, 63, 32);
335 static void its_encode_event_id(struct its_cmd_block
*cmd
, u32 id
)
337 its_mask_encode(&cmd
->raw_cmd
[1], id
, 31, 0);
340 static void its_encode_phys_id(struct its_cmd_block
*cmd
, u32 phys_id
)
342 its_mask_encode(&cmd
->raw_cmd
[1], phys_id
, 63, 32);
345 static void its_encode_size(struct its_cmd_block
*cmd
, u8 size
)
347 its_mask_encode(&cmd
->raw_cmd
[1], size
, 4, 0);
350 static void its_encode_itt(struct its_cmd_block
*cmd
, u64 itt_addr
)
352 its_mask_encode(&cmd
->raw_cmd
[2], itt_addr
>> 8, 51, 8);
355 static void its_encode_valid(struct its_cmd_block
*cmd
, int valid
)
357 its_mask_encode(&cmd
->raw_cmd
[2], !!valid
, 63, 63);
360 static void its_encode_target(struct its_cmd_block
*cmd
, u64 target_addr
)
362 its_mask_encode(&cmd
->raw_cmd
[2], target_addr
>> 16, 51, 16);
365 static void its_encode_collection(struct its_cmd_block
*cmd
, u16 col
)
367 its_mask_encode(&cmd
->raw_cmd
[2], col
, 15, 0);
370 static void its_encode_vpeid(struct its_cmd_block
*cmd
, u16 vpeid
)
372 its_mask_encode(&cmd
->raw_cmd
[1], vpeid
, 47, 32);
375 static void its_encode_virt_id(struct its_cmd_block
*cmd
, u32 virt_id
)
377 its_mask_encode(&cmd
->raw_cmd
[2], virt_id
, 31, 0);
380 static void its_encode_db_phys_id(struct its_cmd_block
*cmd
, u32 db_phys_id
)
382 its_mask_encode(&cmd
->raw_cmd
[2], db_phys_id
, 63, 32);
385 static void its_encode_db_valid(struct its_cmd_block
*cmd
, bool db_valid
)
387 its_mask_encode(&cmd
->raw_cmd
[2], db_valid
, 0, 0);
390 static void its_encode_seq_num(struct its_cmd_block
*cmd
, u16 seq_num
)
392 its_mask_encode(&cmd
->raw_cmd
[0], seq_num
, 47, 32);
395 static void its_encode_its_list(struct its_cmd_block
*cmd
, u16 its_list
)
397 its_mask_encode(&cmd
->raw_cmd
[1], its_list
, 15, 0);
400 static void its_encode_vpt_addr(struct its_cmd_block
*cmd
, u64 vpt_pa
)
402 its_mask_encode(&cmd
->raw_cmd
[3], vpt_pa
>> 16, 51, 16);
405 static void its_encode_vpt_size(struct its_cmd_block
*cmd
, u8 vpt_size
)
407 its_mask_encode(&cmd
->raw_cmd
[3], vpt_size
, 4, 0);
410 static inline void its_fixup_cmd(struct its_cmd_block
*cmd
)
412 /* Let's fixup BE commands */
413 cmd
->raw_cmd
[0] = cpu_to_le64(cmd
->raw_cmd
[0]);
414 cmd
->raw_cmd
[1] = cpu_to_le64(cmd
->raw_cmd
[1]);
415 cmd
->raw_cmd
[2] = cpu_to_le64(cmd
->raw_cmd
[2]);
416 cmd
->raw_cmd
[3] = cpu_to_le64(cmd
->raw_cmd
[3]);
419 static struct its_collection
*its_build_mapd_cmd(struct its_node
*its
,
420 struct its_cmd_block
*cmd
,
421 struct its_cmd_desc
*desc
)
423 unsigned long itt_addr
;
424 u8 size
= ilog2(desc
->its_mapd_cmd
.dev
->nr_ites
);
426 itt_addr
= virt_to_phys(desc
->its_mapd_cmd
.dev
->itt
);
427 itt_addr
= ALIGN(itt_addr
, ITS_ITT_ALIGN
);
429 its_encode_cmd(cmd
, GITS_CMD_MAPD
);
430 its_encode_devid(cmd
, desc
->its_mapd_cmd
.dev
->device_id
);
431 its_encode_size(cmd
, size
- 1);
432 its_encode_itt(cmd
, itt_addr
);
433 its_encode_valid(cmd
, desc
->its_mapd_cmd
.valid
);
440 static struct its_collection
*its_build_mapc_cmd(struct its_node
*its
,
441 struct its_cmd_block
*cmd
,
442 struct its_cmd_desc
*desc
)
444 its_encode_cmd(cmd
, GITS_CMD_MAPC
);
445 its_encode_collection(cmd
, desc
->its_mapc_cmd
.col
->col_id
);
446 its_encode_target(cmd
, desc
->its_mapc_cmd
.col
->target_address
);
447 its_encode_valid(cmd
, desc
->its_mapc_cmd
.valid
);
451 return desc
->its_mapc_cmd
.col
;
454 static struct its_collection
*its_build_mapti_cmd(struct its_node
*its
,
455 struct its_cmd_block
*cmd
,
456 struct its_cmd_desc
*desc
)
458 struct its_collection
*col
;
460 col
= dev_event_to_col(desc
->its_mapti_cmd
.dev
,
461 desc
->its_mapti_cmd
.event_id
);
463 its_encode_cmd(cmd
, GITS_CMD_MAPTI
);
464 its_encode_devid(cmd
, desc
->its_mapti_cmd
.dev
->device_id
);
465 its_encode_event_id(cmd
, desc
->its_mapti_cmd
.event_id
);
466 its_encode_phys_id(cmd
, desc
->its_mapti_cmd
.phys_id
);
467 its_encode_collection(cmd
, col
->col_id
);
471 return valid_col(col
);
474 static struct its_collection
*its_build_movi_cmd(struct its_node
*its
,
475 struct its_cmd_block
*cmd
,
476 struct its_cmd_desc
*desc
)
478 struct its_collection
*col
;
480 col
= dev_event_to_col(desc
->its_movi_cmd
.dev
,
481 desc
->its_movi_cmd
.event_id
);
483 its_encode_cmd(cmd
, GITS_CMD_MOVI
);
484 its_encode_devid(cmd
, desc
->its_movi_cmd
.dev
->device_id
);
485 its_encode_event_id(cmd
, desc
->its_movi_cmd
.event_id
);
486 its_encode_collection(cmd
, desc
->its_movi_cmd
.col
->col_id
);
490 return valid_col(col
);
493 static struct its_collection
*its_build_discard_cmd(struct its_node
*its
,
494 struct its_cmd_block
*cmd
,
495 struct its_cmd_desc
*desc
)
497 struct its_collection
*col
;
499 col
= dev_event_to_col(desc
->its_discard_cmd
.dev
,
500 desc
->its_discard_cmd
.event_id
);
502 its_encode_cmd(cmd
, GITS_CMD_DISCARD
);
503 its_encode_devid(cmd
, desc
->its_discard_cmd
.dev
->device_id
);
504 its_encode_event_id(cmd
, desc
->its_discard_cmd
.event_id
);
508 return valid_col(col
);
511 static struct its_collection
*its_build_inv_cmd(struct its_node
*its
,
512 struct its_cmd_block
*cmd
,
513 struct its_cmd_desc
*desc
)
515 struct its_collection
*col
;
517 col
= dev_event_to_col(desc
->its_inv_cmd
.dev
,
518 desc
->its_inv_cmd
.event_id
);
520 its_encode_cmd(cmd
, GITS_CMD_INV
);
521 its_encode_devid(cmd
, desc
->its_inv_cmd
.dev
->device_id
);
522 its_encode_event_id(cmd
, desc
->its_inv_cmd
.event_id
);
526 return valid_col(col
);
529 static struct its_collection
*its_build_int_cmd(struct its_node
*its
,
530 struct its_cmd_block
*cmd
,
531 struct its_cmd_desc
*desc
)
533 struct its_collection
*col
;
535 col
= dev_event_to_col(desc
->its_int_cmd
.dev
,
536 desc
->its_int_cmd
.event_id
);
538 its_encode_cmd(cmd
, GITS_CMD_INT
);
539 its_encode_devid(cmd
, desc
->its_int_cmd
.dev
->device_id
);
540 its_encode_event_id(cmd
, desc
->its_int_cmd
.event_id
);
544 return valid_col(col
);
547 static struct its_collection
*its_build_clear_cmd(struct its_node
*its
,
548 struct its_cmd_block
*cmd
,
549 struct its_cmd_desc
*desc
)
551 struct its_collection
*col
;
553 col
= dev_event_to_col(desc
->its_clear_cmd
.dev
,
554 desc
->its_clear_cmd
.event_id
);
556 its_encode_cmd(cmd
, GITS_CMD_CLEAR
);
557 its_encode_devid(cmd
, desc
->its_clear_cmd
.dev
->device_id
);
558 its_encode_event_id(cmd
, desc
->its_clear_cmd
.event_id
);
562 return valid_col(col
);
565 static struct its_collection
*its_build_invall_cmd(struct its_node
*its
,
566 struct its_cmd_block
*cmd
,
567 struct its_cmd_desc
*desc
)
569 its_encode_cmd(cmd
, GITS_CMD_INVALL
);
570 its_encode_collection(cmd
, desc
->its_mapc_cmd
.col
->col_id
);
577 static struct its_vpe
*its_build_vinvall_cmd(struct its_node
*its
,
578 struct its_cmd_block
*cmd
,
579 struct its_cmd_desc
*desc
)
581 its_encode_cmd(cmd
, GITS_CMD_VINVALL
);
582 its_encode_vpeid(cmd
, desc
->its_vinvall_cmd
.vpe
->vpe_id
);
586 return valid_vpe(its
, desc
->its_vinvall_cmd
.vpe
);
589 static struct its_vpe
*its_build_vmapp_cmd(struct its_node
*its
,
590 struct its_cmd_block
*cmd
,
591 struct its_cmd_desc
*desc
)
593 unsigned long vpt_addr
;
596 vpt_addr
= virt_to_phys(page_address(desc
->its_vmapp_cmd
.vpe
->vpt_page
));
597 target
= desc
->its_vmapp_cmd
.col
->target_address
+ its
->vlpi_redist_offset
;
599 its_encode_cmd(cmd
, GITS_CMD_VMAPP
);
600 its_encode_vpeid(cmd
, desc
->its_vmapp_cmd
.vpe
->vpe_id
);
601 its_encode_valid(cmd
, desc
->its_vmapp_cmd
.valid
);
602 its_encode_target(cmd
, target
);
603 its_encode_vpt_addr(cmd
, vpt_addr
);
604 its_encode_vpt_size(cmd
, LPI_NRBITS
- 1);
608 return valid_vpe(its
, desc
->its_vmapp_cmd
.vpe
);
611 static struct its_vpe
*its_build_vmapti_cmd(struct its_node
*its
,
612 struct its_cmd_block
*cmd
,
613 struct its_cmd_desc
*desc
)
617 if (desc
->its_vmapti_cmd
.db_enabled
)
618 db
= desc
->its_vmapti_cmd
.vpe
->vpe_db_lpi
;
622 its_encode_cmd(cmd
, GITS_CMD_VMAPTI
);
623 its_encode_devid(cmd
, desc
->its_vmapti_cmd
.dev
->device_id
);
624 its_encode_vpeid(cmd
, desc
->its_vmapti_cmd
.vpe
->vpe_id
);
625 its_encode_event_id(cmd
, desc
->its_vmapti_cmd
.event_id
);
626 its_encode_db_phys_id(cmd
, db
);
627 its_encode_virt_id(cmd
, desc
->its_vmapti_cmd
.virt_id
);
631 return valid_vpe(its
, desc
->its_vmapti_cmd
.vpe
);
634 static struct its_vpe
*its_build_vmovi_cmd(struct its_node
*its
,
635 struct its_cmd_block
*cmd
,
636 struct its_cmd_desc
*desc
)
640 if (desc
->its_vmovi_cmd
.db_enabled
)
641 db
= desc
->its_vmovi_cmd
.vpe
->vpe_db_lpi
;
645 its_encode_cmd(cmd
, GITS_CMD_VMOVI
);
646 its_encode_devid(cmd
, desc
->its_vmovi_cmd
.dev
->device_id
);
647 its_encode_vpeid(cmd
, desc
->its_vmovi_cmd
.vpe
->vpe_id
);
648 its_encode_event_id(cmd
, desc
->its_vmovi_cmd
.event_id
);
649 its_encode_db_phys_id(cmd
, db
);
650 its_encode_db_valid(cmd
, true);
654 return valid_vpe(its
, desc
->its_vmovi_cmd
.vpe
);
657 static struct its_vpe
*its_build_vmovp_cmd(struct its_node
*its
,
658 struct its_cmd_block
*cmd
,
659 struct its_cmd_desc
*desc
)
663 target
= desc
->its_vmovp_cmd
.col
->target_address
+ its
->vlpi_redist_offset
;
664 its_encode_cmd(cmd
, GITS_CMD_VMOVP
);
665 its_encode_seq_num(cmd
, desc
->its_vmovp_cmd
.seq_num
);
666 its_encode_its_list(cmd
, desc
->its_vmovp_cmd
.its_list
);
667 its_encode_vpeid(cmd
, desc
->its_vmovp_cmd
.vpe
->vpe_id
);
668 its_encode_target(cmd
, target
);
672 return valid_vpe(its
, desc
->its_vmovp_cmd
.vpe
);
675 static u64
its_cmd_ptr_to_offset(struct its_node
*its
,
676 struct its_cmd_block
*ptr
)
678 return (ptr
- its
->cmd_base
) * sizeof(*ptr
);
681 static int its_queue_full(struct its_node
*its
)
686 widx
= its
->cmd_write
- its
->cmd_base
;
687 ridx
= readl_relaxed(its
->base
+ GITS_CREADR
) / sizeof(struct its_cmd_block
);
689 /* This is incredibly unlikely to happen, unless the ITS locks up. */
690 if (((widx
+ 1) % ITS_CMD_QUEUE_NR_ENTRIES
) == ridx
)
696 static struct its_cmd_block
*its_allocate_entry(struct its_node
*its
)
698 struct its_cmd_block
*cmd
;
699 u32 count
= 1000000; /* 1s! */
701 while (its_queue_full(its
)) {
704 pr_err_ratelimited("ITS queue not draining\n");
711 cmd
= its
->cmd_write
++;
713 /* Handle queue wrapping */
714 if (its
->cmd_write
== (its
->cmd_base
+ ITS_CMD_QUEUE_NR_ENTRIES
))
715 its
->cmd_write
= its
->cmd_base
;
726 static struct its_cmd_block
*its_post_commands(struct its_node
*its
)
728 u64 wr
= its_cmd_ptr_to_offset(its
, its
->cmd_write
);
730 writel_relaxed(wr
, its
->base
+ GITS_CWRITER
);
732 return its
->cmd_write
;
735 static void its_flush_cmd(struct its_node
*its
, struct its_cmd_block
*cmd
)
738 * Make sure the commands written to memory are observable by
741 if (its
->flags
& ITS_FLAGS_CMDQ_NEEDS_FLUSHING
)
742 gic_flush_dcache_to_poc(cmd
, sizeof(*cmd
));
747 static int its_wait_for_range_completion(struct its_node
*its
,
748 struct its_cmd_block
*from
,
749 struct its_cmd_block
*to
)
751 u64 rd_idx
, from_idx
, to_idx
;
752 u32 count
= 1000000; /* 1s! */
754 from_idx
= its_cmd_ptr_to_offset(its
, from
);
755 to_idx
= its_cmd_ptr_to_offset(its
, to
);
758 rd_idx
= readl_relaxed(its
->base
+ GITS_CREADR
);
761 if (from_idx
< to_idx
&& rd_idx
>= to_idx
)
765 if (from_idx
>= to_idx
&& rd_idx
>= to_idx
&& rd_idx
< from_idx
)
770 pr_err_ratelimited("ITS queue timeout (%llu %llu %llu)\n",
771 from_idx
, to_idx
, rd_idx
);
781 /* Warning, macro hell follows */
782 #define BUILD_SINGLE_CMD_FUNC(name, buildtype, synctype, buildfn) \
783 void name(struct its_node *its, \
785 struct its_cmd_desc *desc) \
787 struct its_cmd_block *cmd, *sync_cmd, *next_cmd; \
788 synctype *sync_obj; \
789 unsigned long flags; \
791 raw_spin_lock_irqsave(&its->lock, flags); \
793 cmd = its_allocate_entry(its); \
794 if (!cmd) { /* We're soooooo screewed... */ \
795 raw_spin_unlock_irqrestore(&its->lock, flags); \
798 sync_obj = builder(its, cmd, desc); \
799 its_flush_cmd(its, cmd); \
802 sync_cmd = its_allocate_entry(its); \
806 buildfn(its, sync_cmd, sync_obj); \
807 its_flush_cmd(its, sync_cmd); \
811 next_cmd = its_post_commands(its); \
812 raw_spin_unlock_irqrestore(&its->lock, flags); \
814 if (its_wait_for_range_completion(its, cmd, next_cmd)) \
815 pr_err_ratelimited("ITS cmd %ps failed\n", builder); \
818 static void its_build_sync_cmd(struct its_node
*its
,
819 struct its_cmd_block
*sync_cmd
,
820 struct its_collection
*sync_col
)
822 its_encode_cmd(sync_cmd
, GITS_CMD_SYNC
);
823 its_encode_target(sync_cmd
, sync_col
->target_address
);
825 its_fixup_cmd(sync_cmd
);
828 static BUILD_SINGLE_CMD_FUNC(its_send_single_command
, its_cmd_builder_t
,
829 struct its_collection
, its_build_sync_cmd
)
831 static void its_build_vsync_cmd(struct its_node
*its
,
832 struct its_cmd_block
*sync_cmd
,
833 struct its_vpe
*sync_vpe
)
835 its_encode_cmd(sync_cmd
, GITS_CMD_VSYNC
);
836 its_encode_vpeid(sync_cmd
, sync_vpe
->vpe_id
);
838 its_fixup_cmd(sync_cmd
);
841 static BUILD_SINGLE_CMD_FUNC(its_send_single_vcommand
, its_cmd_vbuilder_t
,
842 struct its_vpe
, its_build_vsync_cmd
)
844 static void its_send_int(struct its_device
*dev
, u32 event_id
)
846 struct its_cmd_desc desc
;
848 desc
.its_int_cmd
.dev
= dev
;
849 desc
.its_int_cmd
.event_id
= event_id
;
851 its_send_single_command(dev
->its
, its_build_int_cmd
, &desc
);
854 static void its_send_clear(struct its_device
*dev
, u32 event_id
)
856 struct its_cmd_desc desc
;
858 desc
.its_clear_cmd
.dev
= dev
;
859 desc
.its_clear_cmd
.event_id
= event_id
;
861 its_send_single_command(dev
->its
, its_build_clear_cmd
, &desc
);
864 static void its_send_inv(struct its_device
*dev
, u32 event_id
)
866 struct its_cmd_desc desc
;
868 desc
.its_inv_cmd
.dev
= dev
;
869 desc
.its_inv_cmd
.event_id
= event_id
;
871 its_send_single_command(dev
->its
, its_build_inv_cmd
, &desc
);
874 static void its_send_mapd(struct its_device
*dev
, int valid
)
876 struct its_cmd_desc desc
;
878 desc
.its_mapd_cmd
.dev
= dev
;
879 desc
.its_mapd_cmd
.valid
= !!valid
;
881 its_send_single_command(dev
->its
, its_build_mapd_cmd
, &desc
);
884 static void its_send_mapc(struct its_node
*its
, struct its_collection
*col
,
887 struct its_cmd_desc desc
;
889 desc
.its_mapc_cmd
.col
= col
;
890 desc
.its_mapc_cmd
.valid
= !!valid
;
892 its_send_single_command(its
, its_build_mapc_cmd
, &desc
);
895 static void its_send_mapti(struct its_device
*dev
, u32 irq_id
, u32 id
)
897 struct its_cmd_desc desc
;
899 desc
.its_mapti_cmd
.dev
= dev
;
900 desc
.its_mapti_cmd
.phys_id
= irq_id
;
901 desc
.its_mapti_cmd
.event_id
= id
;
903 its_send_single_command(dev
->its
, its_build_mapti_cmd
, &desc
);
906 static void its_send_movi(struct its_device
*dev
,
907 struct its_collection
*col
, u32 id
)
909 struct its_cmd_desc desc
;
911 desc
.its_movi_cmd
.dev
= dev
;
912 desc
.its_movi_cmd
.col
= col
;
913 desc
.its_movi_cmd
.event_id
= id
;
915 its_send_single_command(dev
->its
, its_build_movi_cmd
, &desc
);
918 static void its_send_discard(struct its_device
*dev
, u32 id
)
920 struct its_cmd_desc desc
;
922 desc
.its_discard_cmd
.dev
= dev
;
923 desc
.its_discard_cmd
.event_id
= id
;
925 its_send_single_command(dev
->its
, its_build_discard_cmd
, &desc
);
928 static void its_send_invall(struct its_node
*its
, struct its_collection
*col
)
930 struct its_cmd_desc desc
;
932 desc
.its_invall_cmd
.col
= col
;
934 its_send_single_command(its
, its_build_invall_cmd
, &desc
);
937 static void its_send_vmapti(struct its_device
*dev
, u32 id
)
939 struct its_vlpi_map
*map
= &dev
->event_map
.vlpi_maps
[id
];
940 struct its_cmd_desc desc
;
942 desc
.its_vmapti_cmd
.vpe
= map
->vpe
;
943 desc
.its_vmapti_cmd
.dev
= dev
;
944 desc
.its_vmapti_cmd
.virt_id
= map
->vintid
;
945 desc
.its_vmapti_cmd
.event_id
= id
;
946 desc
.its_vmapti_cmd
.db_enabled
= map
->db_enabled
;
948 its_send_single_vcommand(dev
->its
, its_build_vmapti_cmd
, &desc
);
951 static void its_send_vmovi(struct its_device
*dev
, u32 id
)
953 struct its_vlpi_map
*map
= &dev
->event_map
.vlpi_maps
[id
];
954 struct its_cmd_desc desc
;
956 desc
.its_vmovi_cmd
.vpe
= map
->vpe
;
957 desc
.its_vmovi_cmd
.dev
= dev
;
958 desc
.its_vmovi_cmd
.event_id
= id
;
959 desc
.its_vmovi_cmd
.db_enabled
= map
->db_enabled
;
961 its_send_single_vcommand(dev
->its
, its_build_vmovi_cmd
, &desc
);
964 static void its_send_vmapp(struct its_node
*its
,
965 struct its_vpe
*vpe
, bool valid
)
967 struct its_cmd_desc desc
;
969 desc
.its_vmapp_cmd
.vpe
= vpe
;
970 desc
.its_vmapp_cmd
.valid
= valid
;
971 desc
.its_vmapp_cmd
.col
= &its
->collections
[vpe
->col_idx
];
973 its_send_single_vcommand(its
, its_build_vmapp_cmd
, &desc
);
976 static void its_send_vmovp(struct its_vpe
*vpe
)
978 struct its_cmd_desc desc
;
979 struct its_node
*its
;
981 int col_id
= vpe
->col_idx
;
983 desc
.its_vmovp_cmd
.vpe
= vpe
;
984 desc
.its_vmovp_cmd
.its_list
= (u16
)its_list_map
;
987 its
= list_first_entry(&its_nodes
, struct its_node
, entry
);
988 desc
.its_vmovp_cmd
.seq_num
= 0;
989 desc
.its_vmovp_cmd
.col
= &its
->collections
[col_id
];
990 its_send_single_vcommand(its
, its_build_vmovp_cmd
, &desc
);
995 * Yet another marvel of the architecture. If using the
996 * its_list "feature", we need to make sure that all ITSs
997 * receive all VMOVP commands in the same order. The only way
998 * to guarantee this is to make vmovp a serialization point.
1002 raw_spin_lock_irqsave(&vmovp_lock
, flags
);
1004 desc
.its_vmovp_cmd
.seq_num
= vmovp_seq_num
++;
1007 list_for_each_entry(its
, &its_nodes
, entry
) {
1011 if (!vpe
->its_vm
->vlpi_count
[its
->list_nr
])
1014 desc
.its_vmovp_cmd
.col
= &its
->collections
[col_id
];
1015 its_send_single_vcommand(its
, its_build_vmovp_cmd
, &desc
);
1018 raw_spin_unlock_irqrestore(&vmovp_lock
, flags
);
1021 static void its_send_vinvall(struct its_node
*its
, struct its_vpe
*vpe
)
1023 struct its_cmd_desc desc
;
1025 desc
.its_vinvall_cmd
.vpe
= vpe
;
1026 its_send_single_vcommand(its
, its_build_vinvall_cmd
, &desc
);
1030 * irqchip functions - assumes MSI, mostly.
1033 static inline u32
its_get_event_id(struct irq_data
*d
)
1035 struct its_device
*its_dev
= irq_data_get_irq_chip_data(d
);
1036 return d
->hwirq
- its_dev
->event_map
.lpi_base
;
1039 static void lpi_write_config(struct irq_data
*d
, u8 clr
, u8 set
)
1041 irq_hw_number_t hwirq
;
1045 if (irqd_is_forwarded_to_vcpu(d
)) {
1046 struct its_device
*its_dev
= irq_data_get_irq_chip_data(d
);
1047 u32 event
= its_get_event_id(d
);
1048 struct its_vlpi_map
*map
;
1050 va
= page_address(its_dev
->event_map
.vm
->vprop_page
);
1051 map
= &its_dev
->event_map
.vlpi_maps
[event
];
1052 hwirq
= map
->vintid
;
1054 /* Remember the updated property */
1055 map
->properties
&= ~clr
;
1056 map
->properties
|= set
| LPI_PROP_GROUP1
;
1058 va
= gic_rdists
->prop_table_va
;
1062 cfg
= va
+ hwirq
- 8192;
1064 *cfg
|= set
| LPI_PROP_GROUP1
;
1067 * Make the above write visible to the redistributors.
1068 * And yes, we're flushing exactly: One. Single. Byte.
1071 if (gic_rdists
->flags
& RDIST_FLAGS_PROPBASE_NEEDS_FLUSHING
)
1072 gic_flush_dcache_to_poc(cfg
, sizeof(*cfg
));
1077 static void lpi_update_config(struct irq_data
*d
, u8 clr
, u8 set
)
1079 struct its_device
*its_dev
= irq_data_get_irq_chip_data(d
);
1081 lpi_write_config(d
, clr
, set
);
1082 its_send_inv(its_dev
, its_get_event_id(d
));
1085 static void its_vlpi_set_doorbell(struct irq_data
*d
, bool enable
)
1087 struct its_device
*its_dev
= irq_data_get_irq_chip_data(d
);
1088 u32 event
= its_get_event_id(d
);
1090 if (its_dev
->event_map
.vlpi_maps
[event
].db_enabled
== enable
)
1093 its_dev
->event_map
.vlpi_maps
[event
].db_enabled
= enable
;
1096 * More fun with the architecture:
1098 * Ideally, we'd issue a VMAPTI to set the doorbell to its LPI
1099 * value or to 1023, depending on the enable bit. But that
1100 * would be issueing a mapping for an /existing/ DevID+EventID
1101 * pair, which is UNPREDICTABLE. Instead, let's issue a VMOVI
1102 * to the /same/ vPE, using this opportunity to adjust the
1103 * doorbell. Mouahahahaha. We loves it, Precious.
1105 its_send_vmovi(its_dev
, event
);
1108 static void its_mask_irq(struct irq_data
*d
)
1110 if (irqd_is_forwarded_to_vcpu(d
))
1111 its_vlpi_set_doorbell(d
, false);
1113 lpi_update_config(d
, LPI_PROP_ENABLED
, 0);
1116 static void its_unmask_irq(struct irq_data
*d
)
1118 if (irqd_is_forwarded_to_vcpu(d
))
1119 its_vlpi_set_doorbell(d
, true);
1121 lpi_update_config(d
, 0, LPI_PROP_ENABLED
);
1124 static int its_set_affinity(struct irq_data
*d
, const struct cpumask
*mask_val
,
1128 const struct cpumask
*cpu_mask
= cpu_online_mask
;
1129 struct its_device
*its_dev
= irq_data_get_irq_chip_data(d
);
1130 struct its_collection
*target_col
;
1131 u32 id
= its_get_event_id(d
);
1133 /* A forwarded interrupt should use irq_set_vcpu_affinity */
1134 if (irqd_is_forwarded_to_vcpu(d
))
1137 /* lpi cannot be routed to a redistributor that is on a foreign node */
1138 if (its_dev
->its
->flags
& ITS_FLAGS_WORKAROUND_CAVIUM_23144
) {
1139 if (its_dev
->its
->numa_node
>= 0) {
1140 cpu_mask
= cpumask_of_node(its_dev
->its
->numa_node
);
1141 if (!cpumask_intersects(mask_val
, cpu_mask
))
1146 cpu
= cpumask_any_and(mask_val
, cpu_mask
);
1148 if (cpu
>= nr_cpu_ids
)
1151 /* don't set the affinity when the target cpu is same as current one */
1152 if (cpu
!= its_dev
->event_map
.col_map
[id
]) {
1153 target_col
= &its_dev
->its
->collections
[cpu
];
1154 its_send_movi(its_dev
, target_col
, id
);
1155 its_dev
->event_map
.col_map
[id
] = cpu
;
1156 irq_data_update_effective_affinity(d
, cpumask_of(cpu
));
1159 return IRQ_SET_MASK_OK_DONE
;
1162 static u64
its_irq_get_msi_base(struct its_device
*its_dev
)
1164 struct its_node
*its
= its_dev
->its
;
1166 return its
->phys_base
+ GITS_TRANSLATER
;
1169 static void its_irq_compose_msi_msg(struct irq_data
*d
, struct msi_msg
*msg
)
1171 struct its_device
*its_dev
= irq_data_get_irq_chip_data(d
);
1172 struct its_node
*its
;
1176 addr
= its
->get_msi_base(its_dev
);
1178 msg
->address_lo
= lower_32_bits(addr
);
1179 msg
->address_hi
= upper_32_bits(addr
);
1180 msg
->data
= its_get_event_id(d
);
1182 iommu_dma_map_msi_msg(d
->irq
, msg
);
1185 static int its_irq_set_irqchip_state(struct irq_data
*d
,
1186 enum irqchip_irq_state which
,
1189 struct its_device
*its_dev
= irq_data_get_irq_chip_data(d
);
1190 u32 event
= its_get_event_id(d
);
1192 if (which
!= IRQCHIP_STATE_PENDING
)
1196 its_send_int(its_dev
, event
);
1198 its_send_clear(its_dev
, event
);
1203 static void its_map_vm(struct its_node
*its
, struct its_vm
*vm
)
1205 unsigned long flags
;
1207 /* Not using the ITS list? Everything is always mapped. */
1211 raw_spin_lock_irqsave(&vmovp_lock
, flags
);
1214 * If the VM wasn't mapped yet, iterate over the vpes and get
1217 vm
->vlpi_count
[its
->list_nr
]++;
1219 if (vm
->vlpi_count
[its
->list_nr
] == 1) {
1222 for (i
= 0; i
< vm
->nr_vpes
; i
++) {
1223 struct its_vpe
*vpe
= vm
->vpes
[i
];
1224 struct irq_data
*d
= irq_get_irq_data(vpe
->irq
);
1226 /* Map the VPE to the first possible CPU */
1227 vpe
->col_idx
= cpumask_first(cpu_online_mask
);
1228 its_send_vmapp(its
, vpe
, true);
1229 its_send_vinvall(its
, vpe
);
1230 irq_data_update_effective_affinity(d
, cpumask_of(vpe
->col_idx
));
1234 raw_spin_unlock_irqrestore(&vmovp_lock
, flags
);
1237 static void its_unmap_vm(struct its_node
*its
, struct its_vm
*vm
)
1239 unsigned long flags
;
1241 /* Not using the ITS list? Everything is always mapped. */
1245 raw_spin_lock_irqsave(&vmovp_lock
, flags
);
1247 if (!--vm
->vlpi_count
[its
->list_nr
]) {
1250 for (i
= 0; i
< vm
->nr_vpes
; i
++)
1251 its_send_vmapp(its
, vm
->vpes
[i
], false);
1254 raw_spin_unlock_irqrestore(&vmovp_lock
, flags
);
1257 static int its_vlpi_map(struct irq_data
*d
, struct its_cmd_info
*info
)
1259 struct its_device
*its_dev
= irq_data_get_irq_chip_data(d
);
1260 u32 event
= its_get_event_id(d
);
1266 mutex_lock(&its_dev
->event_map
.vlpi_lock
);
1268 if (!its_dev
->event_map
.vm
) {
1269 struct its_vlpi_map
*maps
;
1271 maps
= kcalloc(its_dev
->event_map
.nr_lpis
, sizeof(*maps
),
1278 its_dev
->event_map
.vm
= info
->map
->vm
;
1279 its_dev
->event_map
.vlpi_maps
= maps
;
1280 } else if (its_dev
->event_map
.vm
!= info
->map
->vm
) {
1285 /* Get our private copy of the mapping information */
1286 its_dev
->event_map
.vlpi_maps
[event
] = *info
->map
;
1288 if (irqd_is_forwarded_to_vcpu(d
)) {
1289 /* Already mapped, move it around */
1290 its_send_vmovi(its_dev
, event
);
1292 /* Ensure all the VPEs are mapped on this ITS */
1293 its_map_vm(its_dev
->its
, info
->map
->vm
);
1296 * Flag the interrupt as forwarded so that we can
1297 * start poking the virtual property table.
1299 irqd_set_forwarded_to_vcpu(d
);
1301 /* Write out the property to the prop table */
1302 lpi_write_config(d
, 0xff, info
->map
->properties
);
1304 /* Drop the physical mapping */
1305 its_send_discard(its_dev
, event
);
1307 /* and install the virtual one */
1308 its_send_vmapti(its_dev
, event
);
1310 /* Increment the number of VLPIs */
1311 its_dev
->event_map
.nr_vlpis
++;
1315 mutex_unlock(&its_dev
->event_map
.vlpi_lock
);
1319 static int its_vlpi_get(struct irq_data
*d
, struct its_cmd_info
*info
)
1321 struct its_device
*its_dev
= irq_data_get_irq_chip_data(d
);
1322 u32 event
= its_get_event_id(d
);
1325 mutex_lock(&its_dev
->event_map
.vlpi_lock
);
1327 if (!its_dev
->event_map
.vm
||
1328 !its_dev
->event_map
.vlpi_maps
[event
].vm
) {
1333 /* Copy our mapping information to the incoming request */
1334 *info
->map
= its_dev
->event_map
.vlpi_maps
[event
];
1337 mutex_unlock(&its_dev
->event_map
.vlpi_lock
);
1341 static int its_vlpi_unmap(struct irq_data
*d
)
1343 struct its_device
*its_dev
= irq_data_get_irq_chip_data(d
);
1344 u32 event
= its_get_event_id(d
);
1347 mutex_lock(&its_dev
->event_map
.vlpi_lock
);
1349 if (!its_dev
->event_map
.vm
|| !irqd_is_forwarded_to_vcpu(d
)) {
1354 /* Drop the virtual mapping */
1355 its_send_discard(its_dev
, event
);
1357 /* and restore the physical one */
1358 irqd_clr_forwarded_to_vcpu(d
);
1359 its_send_mapti(its_dev
, d
->hwirq
, event
);
1360 lpi_update_config(d
, 0xff, (LPI_PROP_DEFAULT_PRIO
|
1364 /* Potentially unmap the VM from this ITS */
1365 its_unmap_vm(its_dev
->its
, its_dev
->event_map
.vm
);
1368 * Drop the refcount and make the device available again if
1369 * this was the last VLPI.
1371 if (!--its_dev
->event_map
.nr_vlpis
) {
1372 its_dev
->event_map
.vm
= NULL
;
1373 kfree(its_dev
->event_map
.vlpi_maps
);
1377 mutex_unlock(&its_dev
->event_map
.vlpi_lock
);
1381 static int its_vlpi_prop_update(struct irq_data
*d
, struct its_cmd_info
*info
)
1383 struct its_device
*its_dev
= irq_data_get_irq_chip_data(d
);
1385 if (!its_dev
->event_map
.vm
|| !irqd_is_forwarded_to_vcpu(d
))
1388 if (info
->cmd_type
== PROP_UPDATE_AND_INV_VLPI
)
1389 lpi_update_config(d
, 0xff, info
->config
);
1391 lpi_write_config(d
, 0xff, info
->config
);
1392 its_vlpi_set_doorbell(d
, !!(info
->config
& LPI_PROP_ENABLED
));
1397 static int its_irq_set_vcpu_affinity(struct irq_data
*d
, void *vcpu_info
)
1399 struct its_device
*its_dev
= irq_data_get_irq_chip_data(d
);
1400 struct its_cmd_info
*info
= vcpu_info
;
1403 if (!its_dev
->its
->is_v4
)
1406 /* Unmap request? */
1408 return its_vlpi_unmap(d
);
1410 switch (info
->cmd_type
) {
1412 return its_vlpi_map(d
, info
);
1415 return its_vlpi_get(d
, info
);
1417 case PROP_UPDATE_VLPI
:
1418 case PROP_UPDATE_AND_INV_VLPI
:
1419 return its_vlpi_prop_update(d
, info
);
1426 static struct irq_chip its_irq_chip
= {
1428 .irq_mask
= its_mask_irq
,
1429 .irq_unmask
= its_unmask_irq
,
1430 .irq_eoi
= irq_chip_eoi_parent
,
1431 .irq_set_affinity
= its_set_affinity
,
1432 .irq_compose_msi_msg
= its_irq_compose_msi_msg
,
1433 .irq_set_irqchip_state
= its_irq_set_irqchip_state
,
1434 .irq_set_vcpu_affinity
= its_irq_set_vcpu_affinity
,
1439 * How we allocate LPIs:
1441 * lpi_range_list contains ranges of LPIs that are to available to
1442 * allocate from. To allocate LPIs, just pick the first range that
1443 * fits the required allocation, and reduce it by the required
1444 * amount. Once empty, remove the range from the list.
1446 * To free a range of LPIs, add a free range to the list, sort it and
1447 * merge the result if the new range happens to be adjacent to an
1448 * already free block.
1450 * The consequence of the above is that allocation is cost is low, but
1451 * freeing is expensive. We assumes that freeing rarely occurs.
1453 #define ITS_MAX_LPI_NRBITS 16 /* 64K LPIs */
1455 static DEFINE_MUTEX(lpi_range_lock
);
1456 static LIST_HEAD(lpi_range_list
);
1459 struct list_head entry
;
1464 static struct lpi_range
*mk_lpi_range(u32 base
, u32 span
)
1466 struct lpi_range
*range
;
1468 range
= kzalloc(sizeof(*range
), GFP_KERNEL
);
1470 INIT_LIST_HEAD(&range
->entry
);
1471 range
->base_id
= base
;
1478 static int lpi_range_cmp(void *priv
, struct list_head
*a
, struct list_head
*b
)
1480 struct lpi_range
*ra
, *rb
;
1482 ra
= container_of(a
, struct lpi_range
, entry
);
1483 rb
= container_of(b
, struct lpi_range
, entry
);
1485 return ra
->base_id
- rb
->base_id
;
1488 static void merge_lpi_ranges(void)
1490 struct lpi_range
*range
, *tmp
;
1492 list_for_each_entry_safe(range
, tmp
, &lpi_range_list
, entry
) {
1493 if (!list_is_last(&range
->entry
, &lpi_range_list
) &&
1494 (tmp
->base_id
== (range
->base_id
+ range
->span
))) {
1495 tmp
->base_id
= range
->base_id
;
1496 tmp
->span
+= range
->span
;
1497 list_del(&range
->entry
);
1503 static int alloc_lpi_range(u32 nr_lpis
, u32
*base
)
1505 struct lpi_range
*range
, *tmp
;
1508 mutex_lock(&lpi_range_lock
);
1510 list_for_each_entry_safe(range
, tmp
, &lpi_range_list
, entry
) {
1511 if (range
->span
>= nr_lpis
) {
1512 *base
= range
->base_id
;
1513 range
->base_id
+= nr_lpis
;
1514 range
->span
-= nr_lpis
;
1516 if (range
->span
== 0) {
1517 list_del(&range
->entry
);
1526 mutex_unlock(&lpi_range_lock
);
1528 pr_debug("ITS: alloc %u:%u\n", *base
, nr_lpis
);
1532 static int free_lpi_range(u32 base
, u32 nr_lpis
)
1534 struct lpi_range
*new;
1537 mutex_lock(&lpi_range_lock
);
1539 new = mk_lpi_range(base
, nr_lpis
);
1545 list_add(&new->entry
, &lpi_range_list
);
1546 list_sort(NULL
, &lpi_range_list
, lpi_range_cmp
);
1549 mutex_unlock(&lpi_range_lock
);
1553 static int __init
its_lpi_init(u32 id_bits
)
1555 u32 lpis
= (1UL << id_bits
) - 8192;
1559 numlpis
= 1UL << GICD_TYPER_NUM_LPIS(gic_rdists
->gicd_typer
);
1561 if (numlpis
> 2 && !WARN_ON(numlpis
> lpis
)) {
1563 pr_info("ITS: Using hypervisor restricted LPI range [%u]\n",
1568 * Initializing the allocator is just the same as freeing the
1569 * full range of LPIs.
1571 err
= free_lpi_range(8192, lpis
);
1572 pr_debug("ITS: Allocator initialized for %u LPIs\n", lpis
);
1576 static unsigned long *its_lpi_alloc(int nr_irqs
, u32
*base
, int *nr_ids
)
1578 unsigned long *bitmap
= NULL
;
1582 err
= alloc_lpi_range(nr_irqs
, base
);
1587 } while (nr_irqs
> 0);
1595 bitmap
= kcalloc(BITS_TO_LONGS(nr_irqs
), sizeof (long), GFP_ATOMIC
);
1603 *base
= *nr_ids
= 0;
1608 static void its_lpi_free(unsigned long *bitmap
, u32 base
, u32 nr_ids
)
1610 WARN_ON(free_lpi_range(base
, nr_ids
));
1614 static void gic_reset_prop_table(void *va
)
1616 /* Priority 0xa0, Group-1, disabled */
1617 memset(va
, LPI_PROP_DEFAULT_PRIO
| LPI_PROP_GROUP1
, LPI_PROPBASE_SZ
);
1619 /* Make sure the GIC will observe the written configuration */
1620 gic_flush_dcache_to_poc(va
, LPI_PROPBASE_SZ
);
1623 static struct page
*its_allocate_prop_table(gfp_t gfp_flags
)
1625 struct page
*prop_page
;
1627 prop_page
= alloc_pages(gfp_flags
, get_order(LPI_PROPBASE_SZ
));
1631 gic_reset_prop_table(page_address(prop_page
));
1636 static void its_free_prop_table(struct page
*prop_page
)
1638 free_pages((unsigned long)page_address(prop_page
),
1639 get_order(LPI_PROPBASE_SZ
));
1642 static bool gic_check_reserved_range(phys_addr_t addr
, unsigned long size
)
1644 phys_addr_t start
, end
, addr_end
;
1648 * We don't bother checking for a kdump kernel as by
1649 * construction, the LPI tables are out of this kernel's
1652 if (is_kdump_kernel())
1655 addr_end
= addr
+ size
- 1;
1657 for_each_reserved_mem_region(i
, &start
, &end
) {
1658 if (addr
>= start
&& addr_end
<= end
)
1662 /* Not found, not a good sign... */
1663 pr_warn("GICv3: Expected reserved range [%pa:%pa], not found\n",
1665 add_taint(TAINT_CRAP
, LOCKDEP_STILL_OK
);
1669 static int gic_reserve_range(phys_addr_t addr
, unsigned long size
)
1671 if (efi_enabled(EFI_CONFIG_TABLES
))
1672 return efi_mem_reserve_persistent(addr
, size
);
1677 static int __init
its_setup_lpi_prop_table(void)
1679 if (gic_rdists
->flags
& RDIST_FLAGS_RD_TABLES_PREALLOCATED
) {
1682 val
= gicr_read_propbaser(gic_data_rdist_rd_base() + GICR_PROPBASER
);
1683 lpi_id_bits
= (val
& GICR_PROPBASER_IDBITS_MASK
) + 1;
1685 gic_rdists
->prop_table_pa
= val
& GENMASK_ULL(51, 12);
1686 gic_rdists
->prop_table_va
= memremap(gic_rdists
->prop_table_pa
,
1689 gic_reset_prop_table(gic_rdists
->prop_table_va
);
1693 lpi_id_bits
= min_t(u32
,
1694 GICD_TYPER_ID_BITS(gic_rdists
->gicd_typer
),
1695 ITS_MAX_LPI_NRBITS
);
1696 page
= its_allocate_prop_table(GFP_NOWAIT
);
1698 pr_err("Failed to allocate PROPBASE\n");
1702 gic_rdists
->prop_table_pa
= page_to_phys(page
);
1703 gic_rdists
->prop_table_va
= page_address(page
);
1704 WARN_ON(gic_reserve_range(gic_rdists
->prop_table_pa
,
1708 pr_info("GICv3: using LPI property table @%pa\n",
1709 &gic_rdists
->prop_table_pa
);
1711 return its_lpi_init(lpi_id_bits
);
1714 static const char *its_base_type_string
[] = {
1715 [GITS_BASER_TYPE_DEVICE
] = "Devices",
1716 [GITS_BASER_TYPE_VCPU
] = "Virtual CPUs",
1717 [GITS_BASER_TYPE_RESERVED3
] = "Reserved (3)",
1718 [GITS_BASER_TYPE_COLLECTION
] = "Interrupt Collections",
1719 [GITS_BASER_TYPE_RESERVED5
] = "Reserved (5)",
1720 [GITS_BASER_TYPE_RESERVED6
] = "Reserved (6)",
1721 [GITS_BASER_TYPE_RESERVED7
] = "Reserved (7)",
1724 static u64
its_read_baser(struct its_node
*its
, struct its_baser
*baser
)
1726 u32 idx
= baser
- its
->tables
;
1728 return gits_read_baser(its
->base
+ GITS_BASER
+ (idx
<< 3));
1731 static void its_write_baser(struct its_node
*its
, struct its_baser
*baser
,
1734 u32 idx
= baser
- its
->tables
;
1736 gits_write_baser(val
, its
->base
+ GITS_BASER
+ (idx
<< 3));
1737 baser
->val
= its_read_baser(its
, baser
);
1740 static int its_setup_baser(struct its_node
*its
, struct its_baser
*baser
,
1741 u64 cache
, u64 shr
, u32 psz
, u32 order
,
1744 u64 val
= its_read_baser(its
, baser
);
1745 u64 esz
= GITS_BASER_ENTRY_SIZE(val
);
1746 u64 type
= GITS_BASER_TYPE(val
);
1747 u64 baser_phys
, tmp
;
1753 alloc_pages
= (PAGE_ORDER_TO_SIZE(order
) / psz
);
1754 if (alloc_pages
> GITS_BASER_PAGES_MAX
) {
1755 pr_warn("ITS@%pa: %s too large, reduce ITS pages %u->%u\n",
1756 &its
->phys_base
, its_base_type_string
[type
],
1757 alloc_pages
, GITS_BASER_PAGES_MAX
);
1758 alloc_pages
= GITS_BASER_PAGES_MAX
;
1759 order
= get_order(GITS_BASER_PAGES_MAX
* psz
);
1762 page
= alloc_pages_node(its
->numa_node
, GFP_KERNEL
| __GFP_ZERO
, order
);
1766 base
= (void *)page_address(page
);
1767 baser_phys
= virt_to_phys(base
);
1769 /* Check if the physical address of the memory is above 48bits */
1770 if (IS_ENABLED(CONFIG_ARM64_64K_PAGES
) && (baser_phys
>> 48)) {
1772 /* 52bit PA is supported only when PageSize=64K */
1773 if (psz
!= SZ_64K
) {
1774 pr_err("ITS: no 52bit PA support when psz=%d\n", psz
);
1775 free_pages((unsigned long)base
, order
);
1779 /* Convert 52bit PA to 48bit field */
1780 baser_phys
= GITS_BASER_PHYS_52_to_48(baser_phys
);
1785 (type
<< GITS_BASER_TYPE_SHIFT
) |
1786 ((esz
- 1) << GITS_BASER_ENTRY_SIZE_SHIFT
) |
1787 ((alloc_pages
- 1) << GITS_BASER_PAGES_SHIFT
) |
1792 val
|= indirect
? GITS_BASER_INDIRECT
: 0x0;
1796 val
|= GITS_BASER_PAGE_SIZE_4K
;
1799 val
|= GITS_BASER_PAGE_SIZE_16K
;
1802 val
|= GITS_BASER_PAGE_SIZE_64K
;
1806 its_write_baser(its
, baser
, val
);
1809 if ((val
^ tmp
) & GITS_BASER_SHAREABILITY_MASK
) {
1811 * Shareability didn't stick. Just use
1812 * whatever the read reported, which is likely
1813 * to be the only thing this redistributor
1814 * supports. If that's zero, make it
1815 * non-cacheable as well.
1817 shr
= tmp
& GITS_BASER_SHAREABILITY_MASK
;
1819 cache
= GITS_BASER_nC
;
1820 gic_flush_dcache_to_poc(base
, PAGE_ORDER_TO_SIZE(order
));
1825 if ((val
^ tmp
) & GITS_BASER_PAGE_SIZE_MASK
) {
1827 * Page size didn't stick. Let's try a smaller
1828 * size and retry. If we reach 4K, then
1829 * something is horribly wrong...
1831 free_pages((unsigned long)base
, order
);
1837 goto retry_alloc_baser
;
1840 goto retry_alloc_baser
;
1845 pr_err("ITS@%pa: %s doesn't stick: %llx %llx\n",
1846 &its
->phys_base
, its_base_type_string
[type
],
1848 free_pages((unsigned long)base
, order
);
1852 baser
->order
= order
;
1855 tmp
= indirect
? GITS_LVL1_ENTRY_SIZE
: esz
;
1857 pr_info("ITS@%pa: allocated %d %s @%lx (%s, esz %d, psz %dK, shr %d)\n",
1858 &its
->phys_base
, (int)(PAGE_ORDER_TO_SIZE(order
) / (int)tmp
),
1859 its_base_type_string
[type
],
1860 (unsigned long)virt_to_phys(base
),
1861 indirect
? "indirect" : "flat", (int)esz
,
1862 psz
/ SZ_1K
, (int)shr
>> GITS_BASER_SHAREABILITY_SHIFT
);
1867 static bool its_parse_indirect_baser(struct its_node
*its
,
1868 struct its_baser
*baser
,
1869 u32 psz
, u32
*order
, u32 ids
)
1871 u64 tmp
= its_read_baser(its
, baser
);
1872 u64 type
= GITS_BASER_TYPE(tmp
);
1873 u64 esz
= GITS_BASER_ENTRY_SIZE(tmp
);
1874 u64 val
= GITS_BASER_InnerShareable
| GITS_BASER_RaWaWb
;
1875 u32 new_order
= *order
;
1876 bool indirect
= false;
1878 /* No need to enable Indirection if memory requirement < (psz*2)bytes */
1879 if ((esz
<< ids
) > (psz
* 2)) {
1881 * Find out whether hw supports a single or two-level table by
1882 * table by reading bit at offset '62' after writing '1' to it.
1884 its_write_baser(its
, baser
, val
| GITS_BASER_INDIRECT
);
1885 indirect
= !!(baser
->val
& GITS_BASER_INDIRECT
);
1889 * The size of the lvl2 table is equal to ITS page size
1890 * which is 'psz'. For computing lvl1 table size,
1891 * subtract ID bits that sparse lvl2 table from 'ids'
1892 * which is reported by ITS hardware times lvl1 table
1895 ids
-= ilog2(psz
/ (int)esz
);
1896 esz
= GITS_LVL1_ENTRY_SIZE
;
1901 * Allocate as many entries as required to fit the
1902 * range of device IDs that the ITS can grok... The ID
1903 * space being incredibly sparse, this results in a
1904 * massive waste of memory if two-level device table
1905 * feature is not supported by hardware.
1907 new_order
= max_t(u32
, get_order(esz
<< ids
), new_order
);
1908 if (new_order
>= MAX_ORDER
) {
1909 new_order
= MAX_ORDER
- 1;
1910 ids
= ilog2(PAGE_ORDER_TO_SIZE(new_order
) / (int)esz
);
1911 pr_warn("ITS@%pa: %s Table too large, reduce ids %u->%u\n",
1912 &its
->phys_base
, its_base_type_string
[type
],
1913 its
->device_ids
, ids
);
1921 static void its_free_tables(struct its_node
*its
)
1925 for (i
= 0; i
< GITS_BASER_NR_REGS
; i
++) {
1926 if (its
->tables
[i
].base
) {
1927 free_pages((unsigned long)its
->tables
[i
].base
,
1928 its
->tables
[i
].order
);
1929 its
->tables
[i
].base
= NULL
;
1934 static int its_alloc_tables(struct its_node
*its
)
1936 u64 shr
= GITS_BASER_InnerShareable
;
1937 u64 cache
= GITS_BASER_RaWaWb
;
1941 if (its
->flags
& ITS_FLAGS_WORKAROUND_CAVIUM_22375
)
1942 /* erratum 24313: ignore memory access type */
1943 cache
= GITS_BASER_nCnB
;
1945 for (i
= 0; i
< GITS_BASER_NR_REGS
; i
++) {
1946 struct its_baser
*baser
= its
->tables
+ i
;
1947 u64 val
= its_read_baser(its
, baser
);
1948 u64 type
= GITS_BASER_TYPE(val
);
1949 u32 order
= get_order(psz
);
1950 bool indirect
= false;
1953 case GITS_BASER_TYPE_NONE
:
1956 case GITS_BASER_TYPE_DEVICE
:
1957 indirect
= its_parse_indirect_baser(its
, baser
,
1962 case GITS_BASER_TYPE_VCPU
:
1963 indirect
= its_parse_indirect_baser(its
, baser
,
1965 ITS_MAX_VPEID_BITS
);
1969 err
= its_setup_baser(its
, baser
, cache
, shr
, psz
, order
, indirect
);
1971 its_free_tables(its
);
1975 /* Update settings which will be used for next BASERn */
1977 cache
= baser
->val
& GITS_BASER_CACHEABILITY_MASK
;
1978 shr
= baser
->val
& GITS_BASER_SHAREABILITY_MASK
;
1984 static int its_alloc_collections(struct its_node
*its
)
1988 its
->collections
= kcalloc(nr_cpu_ids
, sizeof(*its
->collections
),
1990 if (!its
->collections
)
1993 for (i
= 0; i
< nr_cpu_ids
; i
++)
1994 its
->collections
[i
].target_address
= ~0ULL;
1999 static struct page
*its_allocate_pending_table(gfp_t gfp_flags
)
2001 struct page
*pend_page
;
2003 pend_page
= alloc_pages(gfp_flags
| __GFP_ZERO
,
2004 get_order(LPI_PENDBASE_SZ
));
2008 /* Make sure the GIC will observe the zero-ed page */
2009 gic_flush_dcache_to_poc(page_address(pend_page
), LPI_PENDBASE_SZ
);
2014 static void its_free_pending_table(struct page
*pt
)
2016 free_pages((unsigned long)page_address(pt
), get_order(LPI_PENDBASE_SZ
));
2020 * Booting with kdump and LPIs enabled is generally fine. Any other
2021 * case is wrong in the absence of firmware/EFI support.
2023 static bool enabled_lpis_allowed(void)
2028 /* Check whether the property table is in a reserved region */
2029 val
= gicr_read_propbaser(gic_data_rdist_rd_base() + GICR_PROPBASER
);
2030 addr
= val
& GENMASK_ULL(51, 12);
2032 return gic_check_reserved_range(addr
, LPI_PROPBASE_SZ
);
2035 static int __init
allocate_lpi_tables(void)
2041 * If LPIs are enabled while we run this from the boot CPU,
2042 * flag the RD tables as pre-allocated if the stars do align.
2044 val
= readl_relaxed(gic_data_rdist_rd_base() + GICR_CTLR
);
2045 if ((val
& GICR_CTLR_ENABLE_LPIS
) && enabled_lpis_allowed()) {
2046 gic_rdists
->flags
|= (RDIST_FLAGS_RD_TABLES_PREALLOCATED
|
2047 RDIST_FLAGS_PROPBASE_NEEDS_FLUSHING
);
2048 pr_info("GICv3: Using preallocated redistributor tables\n");
2051 err
= its_setup_lpi_prop_table();
2056 * We allocate all the pending tables anyway, as we may have a
2057 * mix of RDs that have had LPIs enabled, and some that
2058 * don't. We'll free the unused ones as each CPU comes online.
2060 for_each_possible_cpu(cpu
) {
2061 struct page
*pend_page
;
2063 pend_page
= its_allocate_pending_table(GFP_NOWAIT
);
2065 pr_err("Failed to allocate PENDBASE for CPU%d\n", cpu
);
2069 gic_data_rdist_cpu(cpu
)->pend_page
= pend_page
;
2075 static u64
its_clear_vpend_valid(void __iomem
*vlpi_base
)
2077 u32 count
= 1000000; /* 1s! */
2081 val
= gits_read_vpendbaser(vlpi_base
+ GICR_VPENDBASER
);
2082 val
&= ~GICR_VPENDBASER_Valid
;
2083 gits_write_vpendbaser(val
, vlpi_base
+ GICR_VPENDBASER
);
2086 val
= gits_read_vpendbaser(vlpi_base
+ GICR_VPENDBASER
);
2087 clean
= !(val
& GICR_VPENDBASER_Dirty
);
2093 } while (!clean
&& count
);
2098 static void its_cpu_init_lpis(void)
2100 void __iomem
*rbase
= gic_data_rdist_rd_base();
2101 struct page
*pend_page
;
2105 if (gic_data_rdist()->lpi_enabled
)
2108 val
= readl_relaxed(rbase
+ GICR_CTLR
);
2109 if ((gic_rdists
->flags
& RDIST_FLAGS_RD_TABLES_PREALLOCATED
) &&
2110 (val
& GICR_CTLR_ENABLE_LPIS
)) {
2112 * Check that we get the same property table on all
2113 * RDs. If we don't, this is hopeless.
2115 paddr
= gicr_read_propbaser(rbase
+ GICR_PROPBASER
);
2116 paddr
&= GENMASK_ULL(51, 12);
2117 if (WARN_ON(gic_rdists
->prop_table_pa
!= paddr
))
2118 add_taint(TAINT_CRAP
, LOCKDEP_STILL_OK
);
2120 paddr
= gicr_read_pendbaser(rbase
+ GICR_PENDBASER
);
2121 paddr
&= GENMASK_ULL(51, 16);
2123 WARN_ON(!gic_check_reserved_range(paddr
, LPI_PENDBASE_SZ
));
2124 its_free_pending_table(gic_data_rdist()->pend_page
);
2125 gic_data_rdist()->pend_page
= NULL
;
2130 pend_page
= gic_data_rdist()->pend_page
;
2131 paddr
= page_to_phys(pend_page
);
2132 WARN_ON(gic_reserve_range(paddr
, LPI_PENDBASE_SZ
));
2135 val
= (gic_rdists
->prop_table_pa
|
2136 GICR_PROPBASER_InnerShareable
|
2137 GICR_PROPBASER_RaWaWb
|
2138 ((LPI_NRBITS
- 1) & GICR_PROPBASER_IDBITS_MASK
));
2140 gicr_write_propbaser(val
, rbase
+ GICR_PROPBASER
);
2141 tmp
= gicr_read_propbaser(rbase
+ GICR_PROPBASER
);
2143 if ((tmp
^ val
) & GICR_PROPBASER_SHAREABILITY_MASK
) {
2144 if (!(tmp
& GICR_PROPBASER_SHAREABILITY_MASK
)) {
2146 * The HW reports non-shareable, we must
2147 * remove the cacheability attributes as
2150 val
&= ~(GICR_PROPBASER_SHAREABILITY_MASK
|
2151 GICR_PROPBASER_CACHEABILITY_MASK
);
2152 val
|= GICR_PROPBASER_nC
;
2153 gicr_write_propbaser(val
, rbase
+ GICR_PROPBASER
);
2155 pr_info_once("GIC: using cache flushing for LPI property table\n");
2156 gic_rdists
->flags
|= RDIST_FLAGS_PROPBASE_NEEDS_FLUSHING
;
2160 val
= (page_to_phys(pend_page
) |
2161 GICR_PENDBASER_InnerShareable
|
2162 GICR_PENDBASER_RaWaWb
);
2164 gicr_write_pendbaser(val
, rbase
+ GICR_PENDBASER
);
2165 tmp
= gicr_read_pendbaser(rbase
+ GICR_PENDBASER
);
2167 if (!(tmp
& GICR_PENDBASER_SHAREABILITY_MASK
)) {
2169 * The HW reports non-shareable, we must remove the
2170 * cacheability attributes as well.
2172 val
&= ~(GICR_PENDBASER_SHAREABILITY_MASK
|
2173 GICR_PENDBASER_CACHEABILITY_MASK
);
2174 val
|= GICR_PENDBASER_nC
;
2175 gicr_write_pendbaser(val
, rbase
+ GICR_PENDBASER
);
2179 val
= readl_relaxed(rbase
+ GICR_CTLR
);
2180 val
|= GICR_CTLR_ENABLE_LPIS
;
2181 writel_relaxed(val
, rbase
+ GICR_CTLR
);
2183 if (gic_rdists
->has_vlpis
) {
2184 void __iomem
*vlpi_base
= gic_data_rdist_vlpi_base();
2187 * It's possible for CPU to receive VLPIs before it is
2188 * sheduled as a vPE, especially for the first CPU, and the
2189 * VLPI with INTID larger than 2^(IDbits+1) will be considered
2190 * as out of range and dropped by GIC.
2191 * So we initialize IDbits to known value to avoid VLPI drop.
2193 val
= (LPI_NRBITS
- 1) & GICR_VPROPBASER_IDBITS_MASK
;
2194 pr_debug("GICv4: CPU%d: Init IDbits to 0x%llx for GICR_VPROPBASER\n",
2195 smp_processor_id(), val
);
2196 gits_write_vpropbaser(val
, vlpi_base
+ GICR_VPROPBASER
);
2199 * Also clear Valid bit of GICR_VPENDBASER, in case some
2200 * ancient programming gets left in and has possibility of
2201 * corrupting memory.
2203 val
= its_clear_vpend_valid(vlpi_base
);
2204 WARN_ON(val
& GICR_VPENDBASER_Dirty
);
2207 /* Make sure the GIC has seen the above */
2210 gic_data_rdist()->lpi_enabled
= true;
2211 pr_info("GICv3: CPU%d: using %s LPI pending table @%pa\n",
2213 gic_data_rdist()->pend_page
? "allocated" : "reserved",
2217 static void its_cpu_init_collection(struct its_node
*its
)
2219 int cpu
= smp_processor_id();
2222 /* avoid cross node collections and its mapping */
2223 if (its
->flags
& ITS_FLAGS_WORKAROUND_CAVIUM_23144
) {
2224 struct device_node
*cpu_node
;
2226 cpu_node
= of_get_cpu_node(cpu
, NULL
);
2227 if (its
->numa_node
!= NUMA_NO_NODE
&&
2228 its
->numa_node
!= of_node_to_nid(cpu_node
))
2233 * We now have to bind each collection to its target
2236 if (gic_read_typer(its
->base
+ GITS_TYPER
) & GITS_TYPER_PTA
) {
2238 * This ITS wants the physical address of the
2241 target
= gic_data_rdist()->phys_base
;
2243 /* This ITS wants a linear CPU number. */
2244 target
= gic_read_typer(gic_data_rdist_rd_base() + GICR_TYPER
);
2245 target
= GICR_TYPER_CPU_NUMBER(target
) << 16;
2248 /* Perform collection mapping */
2249 its
->collections
[cpu
].target_address
= target
;
2250 its
->collections
[cpu
].col_id
= cpu
;
2252 its_send_mapc(its
, &its
->collections
[cpu
], 1);
2253 its_send_invall(its
, &its
->collections
[cpu
]);
2256 static void its_cpu_init_collections(void)
2258 struct its_node
*its
;
2260 raw_spin_lock(&its_lock
);
2262 list_for_each_entry(its
, &its_nodes
, entry
)
2263 its_cpu_init_collection(its
);
2265 raw_spin_unlock(&its_lock
);
2268 static struct its_device
*its_find_device(struct its_node
*its
, u32 dev_id
)
2270 struct its_device
*its_dev
= NULL
, *tmp
;
2271 unsigned long flags
;
2273 raw_spin_lock_irqsave(&its
->lock
, flags
);
2275 list_for_each_entry(tmp
, &its
->its_device_list
, entry
) {
2276 if (tmp
->device_id
== dev_id
) {
2282 raw_spin_unlock_irqrestore(&its
->lock
, flags
);
2287 static struct its_baser
*its_get_baser(struct its_node
*its
, u32 type
)
2291 for (i
= 0; i
< GITS_BASER_NR_REGS
; i
++) {
2292 if (GITS_BASER_TYPE(its
->tables
[i
].val
) == type
)
2293 return &its
->tables
[i
];
2299 static bool its_alloc_table_entry(struct its_node
*its
,
2300 struct its_baser
*baser
, u32 id
)
2306 /* Don't allow device id that exceeds single, flat table limit */
2307 esz
= GITS_BASER_ENTRY_SIZE(baser
->val
);
2308 if (!(baser
->val
& GITS_BASER_INDIRECT
))
2309 return (id
< (PAGE_ORDER_TO_SIZE(baser
->order
) / esz
));
2311 /* Compute 1st level table index & check if that exceeds table limit */
2312 idx
= id
>> ilog2(baser
->psz
/ esz
);
2313 if (idx
>= (PAGE_ORDER_TO_SIZE(baser
->order
) / GITS_LVL1_ENTRY_SIZE
))
2316 table
= baser
->base
;
2318 /* Allocate memory for 2nd level table */
2320 page
= alloc_pages_node(its
->numa_node
, GFP_KERNEL
| __GFP_ZERO
,
2321 get_order(baser
->psz
));
2325 /* Flush Lvl2 table to PoC if hw doesn't support coherency */
2326 if (!(baser
->val
& GITS_BASER_SHAREABILITY_MASK
))
2327 gic_flush_dcache_to_poc(page_address(page
), baser
->psz
);
2329 table
[idx
] = cpu_to_le64(page_to_phys(page
) | GITS_BASER_VALID
);
2331 /* Flush Lvl1 entry to PoC if hw doesn't support coherency */
2332 if (!(baser
->val
& GITS_BASER_SHAREABILITY_MASK
))
2333 gic_flush_dcache_to_poc(table
+ idx
, GITS_LVL1_ENTRY_SIZE
);
2335 /* Ensure updated table contents are visible to ITS hardware */
2342 static bool its_alloc_device_table(struct its_node
*its
, u32 dev_id
)
2344 struct its_baser
*baser
;
2346 baser
= its_get_baser(its
, GITS_BASER_TYPE_DEVICE
);
2348 /* Don't allow device id that exceeds ITS hardware limit */
2350 return (ilog2(dev_id
) < its
->device_ids
);
2352 return its_alloc_table_entry(its
, baser
, dev_id
);
2355 static bool its_alloc_vpe_table(u32 vpe_id
)
2357 struct its_node
*its
;
2360 * Make sure the L2 tables are allocated on *all* v4 ITSs. We
2361 * could try and only do it on ITSs corresponding to devices
2362 * that have interrupts targeted at this VPE, but the
2363 * complexity becomes crazy (and you have tons of memory
2366 list_for_each_entry(its
, &its_nodes
, entry
) {
2367 struct its_baser
*baser
;
2372 baser
= its_get_baser(its
, GITS_BASER_TYPE_VCPU
);
2376 if (!its_alloc_table_entry(its
, baser
, vpe_id
))
2383 static struct its_device
*its_create_device(struct its_node
*its
, u32 dev_id
,
2384 int nvecs
, bool alloc_lpis
)
2386 struct its_device
*dev
;
2387 unsigned long *lpi_map
= NULL
;
2388 unsigned long flags
;
2389 u16
*col_map
= NULL
;
2396 if (!its_alloc_device_table(its
, dev_id
))
2399 if (WARN_ON(!is_power_of_2(nvecs
)))
2400 nvecs
= roundup_pow_of_two(nvecs
);
2402 dev
= kzalloc(sizeof(*dev
), GFP_KERNEL
);
2404 * Even if the device wants a single LPI, the ITT must be
2405 * sized as a power of two (and you need at least one bit...).
2407 nr_ites
= max(2, nvecs
);
2408 sz
= nr_ites
* its
->ite_size
;
2409 sz
= max(sz
, ITS_ITT_ALIGN
) + ITS_ITT_ALIGN
- 1;
2410 itt
= kzalloc_node(sz
, GFP_KERNEL
, its
->numa_node
);
2412 lpi_map
= its_lpi_alloc(nvecs
, &lpi_base
, &nr_lpis
);
2414 col_map
= kcalloc(nr_lpis
, sizeof(*col_map
),
2417 col_map
= kcalloc(nr_ites
, sizeof(*col_map
), GFP_KERNEL
);
2422 if (!dev
|| !itt
|| !col_map
|| (!lpi_map
&& alloc_lpis
)) {
2430 gic_flush_dcache_to_poc(itt
, sz
);
2434 dev
->nr_ites
= nr_ites
;
2435 dev
->event_map
.lpi_map
= lpi_map
;
2436 dev
->event_map
.col_map
= col_map
;
2437 dev
->event_map
.lpi_base
= lpi_base
;
2438 dev
->event_map
.nr_lpis
= nr_lpis
;
2439 mutex_init(&dev
->event_map
.vlpi_lock
);
2440 dev
->device_id
= dev_id
;
2441 INIT_LIST_HEAD(&dev
->entry
);
2443 raw_spin_lock_irqsave(&its
->lock
, flags
);
2444 list_add(&dev
->entry
, &its
->its_device_list
);
2445 raw_spin_unlock_irqrestore(&its
->lock
, flags
);
2447 /* Map device to its ITT */
2448 its_send_mapd(dev
, 1);
2453 static void its_free_device(struct its_device
*its_dev
)
2455 unsigned long flags
;
2457 raw_spin_lock_irqsave(&its_dev
->its
->lock
, flags
);
2458 list_del(&its_dev
->entry
);
2459 raw_spin_unlock_irqrestore(&its_dev
->its
->lock
, flags
);
2460 kfree(its_dev
->itt
);
2464 static int its_alloc_device_irq(struct its_device
*dev
, int nvecs
, irq_hw_number_t
*hwirq
)
2468 idx
= bitmap_find_free_region(dev
->event_map
.lpi_map
,
2469 dev
->event_map
.nr_lpis
,
2470 get_count_order(nvecs
));
2474 *hwirq
= dev
->event_map
.lpi_base
+ idx
;
2475 set_bit(idx
, dev
->event_map
.lpi_map
);
2480 static int its_msi_prepare(struct irq_domain
*domain
, struct device
*dev
,
2481 int nvec
, msi_alloc_info_t
*info
)
2483 struct its_node
*its
;
2484 struct its_device
*its_dev
;
2485 struct msi_domain_info
*msi_info
;
2490 * We ignore "dev" entierely, and rely on the dev_id that has
2491 * been passed via the scratchpad. This limits this domain's
2492 * usefulness to upper layers that definitely know that they
2493 * are built on top of the ITS.
2495 dev_id
= info
->scratchpad
[0].ul
;
2497 msi_info
= msi_get_domain_info(domain
);
2498 its
= msi_info
->data
;
2500 if (!gic_rdists
->has_direct_lpi
&&
2502 vpe_proxy
.dev
->its
== its
&&
2503 dev_id
== vpe_proxy
.dev
->device_id
) {
2504 /* Bad luck. Get yourself a better implementation */
2505 WARN_ONCE(1, "DevId %x clashes with GICv4 VPE proxy device\n",
2510 mutex_lock(&its
->dev_alloc_lock
);
2511 its_dev
= its_find_device(its
, dev_id
);
2514 * We already have seen this ID, probably through
2515 * another alias (PCI bridge of some sort). No need to
2516 * create the device.
2518 its_dev
->shared
= true;
2519 pr_debug("Reusing ITT for devID %x\n", dev_id
);
2523 its_dev
= its_create_device(its
, dev_id
, nvec
, true);
2529 pr_debug("ITT %d entries, %d bits\n", nvec
, ilog2(nvec
));
2531 mutex_unlock(&its
->dev_alloc_lock
);
2532 info
->scratchpad
[0].ptr
= its_dev
;
2536 static struct msi_domain_ops its_msi_domain_ops
= {
2537 .msi_prepare
= its_msi_prepare
,
2540 static int its_irq_gic_domain_alloc(struct irq_domain
*domain
,
2542 irq_hw_number_t hwirq
)
2544 struct irq_fwspec fwspec
;
2546 if (irq_domain_get_of_node(domain
->parent
)) {
2547 fwspec
.fwnode
= domain
->parent
->fwnode
;
2548 fwspec
.param_count
= 3;
2549 fwspec
.param
[0] = GIC_IRQ_TYPE_LPI
;
2550 fwspec
.param
[1] = hwirq
;
2551 fwspec
.param
[2] = IRQ_TYPE_EDGE_RISING
;
2552 } else if (is_fwnode_irqchip(domain
->parent
->fwnode
)) {
2553 fwspec
.fwnode
= domain
->parent
->fwnode
;
2554 fwspec
.param_count
= 2;
2555 fwspec
.param
[0] = hwirq
;
2556 fwspec
.param
[1] = IRQ_TYPE_EDGE_RISING
;
2561 return irq_domain_alloc_irqs_parent(domain
, virq
, 1, &fwspec
);
2564 static int its_irq_domain_alloc(struct irq_domain
*domain
, unsigned int virq
,
2565 unsigned int nr_irqs
, void *args
)
2567 msi_alloc_info_t
*info
= args
;
2568 struct its_device
*its_dev
= info
->scratchpad
[0].ptr
;
2569 irq_hw_number_t hwirq
;
2573 err
= its_alloc_device_irq(its_dev
, nr_irqs
, &hwirq
);
2577 for (i
= 0; i
< nr_irqs
; i
++) {
2578 err
= its_irq_gic_domain_alloc(domain
, virq
+ i
, hwirq
+ i
);
2582 irq_domain_set_hwirq_and_chip(domain
, virq
+ i
,
2583 hwirq
+ i
, &its_irq_chip
, its_dev
);
2584 irqd_set_single_target(irq_desc_get_irq_data(irq_to_desc(virq
+ i
)));
2585 pr_debug("ID:%d pID:%d vID:%d\n",
2586 (int)(hwirq
+ i
- its_dev
->event_map
.lpi_base
),
2587 (int)(hwirq
+ i
), virq
+ i
);
2593 static int its_irq_domain_activate(struct irq_domain
*domain
,
2594 struct irq_data
*d
, bool reserve
)
2596 struct its_device
*its_dev
= irq_data_get_irq_chip_data(d
);
2597 u32 event
= its_get_event_id(d
);
2598 const struct cpumask
*cpu_mask
= cpu_online_mask
;
2601 /* get the cpu_mask of local node */
2602 if (its_dev
->its
->numa_node
>= 0)
2603 cpu_mask
= cpumask_of_node(its_dev
->its
->numa_node
);
2605 /* Bind the LPI to the first possible CPU */
2606 cpu
= cpumask_first_and(cpu_mask
, cpu_online_mask
);
2607 if (cpu
>= nr_cpu_ids
) {
2608 if (its_dev
->its
->flags
& ITS_FLAGS_WORKAROUND_CAVIUM_23144
)
2611 cpu
= cpumask_first(cpu_online_mask
);
2614 its_dev
->event_map
.col_map
[event
] = cpu
;
2615 irq_data_update_effective_affinity(d
, cpumask_of(cpu
));
2617 /* Map the GIC IRQ and event to the device */
2618 its_send_mapti(its_dev
, d
->hwirq
, event
);
2622 static void its_irq_domain_deactivate(struct irq_domain
*domain
,
2625 struct its_device
*its_dev
= irq_data_get_irq_chip_data(d
);
2626 u32 event
= its_get_event_id(d
);
2628 /* Stop the delivery of interrupts */
2629 its_send_discard(its_dev
, event
);
2632 static void its_irq_domain_free(struct irq_domain
*domain
, unsigned int virq
,
2633 unsigned int nr_irqs
)
2635 struct irq_data
*d
= irq_domain_get_irq_data(domain
, virq
);
2636 struct its_device
*its_dev
= irq_data_get_irq_chip_data(d
);
2637 struct its_node
*its
= its_dev
->its
;
2640 for (i
= 0; i
< nr_irqs
; i
++) {
2641 struct irq_data
*data
= irq_domain_get_irq_data(domain
,
2643 u32 event
= its_get_event_id(data
);
2645 /* Mark interrupt index as unused */
2646 clear_bit(event
, its_dev
->event_map
.lpi_map
);
2648 /* Nuke the entry in the domain */
2649 irq_domain_reset_irq_data(data
);
2652 mutex_lock(&its
->dev_alloc_lock
);
2655 * If all interrupts have been freed, start mopping the
2656 * floor. This is conditionned on the device not being shared.
2658 if (!its_dev
->shared
&&
2659 bitmap_empty(its_dev
->event_map
.lpi_map
,
2660 its_dev
->event_map
.nr_lpis
)) {
2661 its_lpi_free(its_dev
->event_map
.lpi_map
,
2662 its_dev
->event_map
.lpi_base
,
2663 its_dev
->event_map
.nr_lpis
);
2664 kfree(its_dev
->event_map
.col_map
);
2666 /* Unmap device/itt */
2667 its_send_mapd(its_dev
, 0);
2668 its_free_device(its_dev
);
2671 mutex_unlock(&its
->dev_alloc_lock
);
2673 irq_domain_free_irqs_parent(domain
, virq
, nr_irqs
);
2676 static const struct irq_domain_ops its_domain_ops
= {
2677 .alloc
= its_irq_domain_alloc
,
2678 .free
= its_irq_domain_free
,
2679 .activate
= its_irq_domain_activate
,
2680 .deactivate
= its_irq_domain_deactivate
,
2686 * If a GICv4 doesn't implement Direct LPIs (which is extremely
2687 * likely), the only way to perform an invalidate is to use a fake
2688 * device to issue an INV command, implying that the LPI has first
2689 * been mapped to some event on that device. Since this is not exactly
2690 * cheap, we try to keep that mapping around as long as possible, and
2691 * only issue an UNMAP if we're short on available slots.
2693 * Broken by design(tm).
2695 static void its_vpe_db_proxy_unmap_locked(struct its_vpe
*vpe
)
2697 /* Already unmapped? */
2698 if (vpe
->vpe_proxy_event
== -1)
2701 its_send_discard(vpe_proxy
.dev
, vpe
->vpe_proxy_event
);
2702 vpe_proxy
.vpes
[vpe
->vpe_proxy_event
] = NULL
;
2705 * We don't track empty slots at all, so let's move the
2706 * next_victim pointer if we can quickly reuse that slot
2707 * instead of nuking an existing entry. Not clear that this is
2708 * always a win though, and this might just generate a ripple
2709 * effect... Let's just hope VPEs don't migrate too often.
2711 if (vpe_proxy
.vpes
[vpe_proxy
.next_victim
])
2712 vpe_proxy
.next_victim
= vpe
->vpe_proxy_event
;
2714 vpe
->vpe_proxy_event
= -1;
2717 static void its_vpe_db_proxy_unmap(struct its_vpe
*vpe
)
2719 if (!gic_rdists
->has_direct_lpi
) {
2720 unsigned long flags
;
2722 raw_spin_lock_irqsave(&vpe_proxy
.lock
, flags
);
2723 its_vpe_db_proxy_unmap_locked(vpe
);
2724 raw_spin_unlock_irqrestore(&vpe_proxy
.lock
, flags
);
2728 static void its_vpe_db_proxy_map_locked(struct its_vpe
*vpe
)
2730 /* Already mapped? */
2731 if (vpe
->vpe_proxy_event
!= -1)
2734 /* This slot was already allocated. Kick the other VPE out. */
2735 if (vpe_proxy
.vpes
[vpe_proxy
.next_victim
])
2736 its_vpe_db_proxy_unmap_locked(vpe_proxy
.vpes
[vpe_proxy
.next_victim
]);
2738 /* Map the new VPE instead */
2739 vpe_proxy
.vpes
[vpe_proxy
.next_victim
] = vpe
;
2740 vpe
->vpe_proxy_event
= vpe_proxy
.next_victim
;
2741 vpe_proxy
.next_victim
= (vpe_proxy
.next_victim
+ 1) % vpe_proxy
.dev
->nr_ites
;
2743 vpe_proxy
.dev
->event_map
.col_map
[vpe
->vpe_proxy_event
] = vpe
->col_idx
;
2744 its_send_mapti(vpe_proxy
.dev
, vpe
->vpe_db_lpi
, vpe
->vpe_proxy_event
);
2747 static void its_vpe_db_proxy_move(struct its_vpe
*vpe
, int from
, int to
)
2749 unsigned long flags
;
2750 struct its_collection
*target_col
;
2752 if (gic_rdists
->has_direct_lpi
) {
2753 void __iomem
*rdbase
;
2755 rdbase
= per_cpu_ptr(gic_rdists
->rdist
, from
)->rd_base
;
2756 gic_write_lpir(vpe
->vpe_db_lpi
, rdbase
+ GICR_CLRLPIR
);
2757 while (gic_read_lpir(rdbase
+ GICR_SYNCR
) & 1)
2763 raw_spin_lock_irqsave(&vpe_proxy
.lock
, flags
);
2765 its_vpe_db_proxy_map_locked(vpe
);
2767 target_col
= &vpe_proxy
.dev
->its
->collections
[to
];
2768 its_send_movi(vpe_proxy
.dev
, target_col
, vpe
->vpe_proxy_event
);
2769 vpe_proxy
.dev
->event_map
.col_map
[vpe
->vpe_proxy_event
] = to
;
2771 raw_spin_unlock_irqrestore(&vpe_proxy
.lock
, flags
);
2774 static int its_vpe_set_affinity(struct irq_data
*d
,
2775 const struct cpumask
*mask_val
,
2778 struct its_vpe
*vpe
= irq_data_get_irq_chip_data(d
);
2779 int cpu
= cpumask_first(mask_val
);
2782 * Changing affinity is mega expensive, so let's be as lazy as
2783 * we can and only do it if we really have to. Also, if mapped
2784 * into the proxy device, we need to move the doorbell
2785 * interrupt to its new location.
2787 if (vpe
->col_idx
!= cpu
) {
2788 int from
= vpe
->col_idx
;
2791 its_send_vmovp(vpe
);
2792 its_vpe_db_proxy_move(vpe
, from
, cpu
);
2795 irq_data_update_effective_affinity(d
, cpumask_of(cpu
));
2797 return IRQ_SET_MASK_OK_DONE
;
2800 static void its_vpe_schedule(struct its_vpe
*vpe
)
2802 void __iomem
*vlpi_base
= gic_data_rdist_vlpi_base();
2805 /* Schedule the VPE */
2806 val
= virt_to_phys(page_address(vpe
->its_vm
->vprop_page
)) &
2807 GENMASK_ULL(51, 12);
2808 val
|= (LPI_NRBITS
- 1) & GICR_VPROPBASER_IDBITS_MASK
;
2809 val
|= GICR_VPROPBASER_RaWb
;
2810 val
|= GICR_VPROPBASER_InnerShareable
;
2811 gits_write_vpropbaser(val
, vlpi_base
+ GICR_VPROPBASER
);
2813 val
= virt_to_phys(page_address(vpe
->vpt_page
)) &
2814 GENMASK_ULL(51, 16);
2815 val
|= GICR_VPENDBASER_RaWaWb
;
2816 val
|= GICR_VPENDBASER_NonShareable
;
2818 * There is no good way of finding out if the pending table is
2819 * empty as we can race against the doorbell interrupt very
2820 * easily. So in the end, vpe->pending_last is only an
2821 * indication that the vcpu has something pending, not one
2822 * that the pending table is empty. A good implementation
2823 * would be able to read its coarse map pretty quickly anyway,
2824 * making this a tolerable issue.
2826 val
|= GICR_VPENDBASER_PendingLast
;
2827 val
|= vpe
->idai
? GICR_VPENDBASER_IDAI
: 0;
2828 val
|= GICR_VPENDBASER_Valid
;
2829 gits_write_vpendbaser(val
, vlpi_base
+ GICR_VPENDBASER
);
2832 static void its_vpe_deschedule(struct its_vpe
*vpe
)
2834 void __iomem
*vlpi_base
= gic_data_rdist_vlpi_base();
2837 val
= its_clear_vpend_valid(vlpi_base
);
2839 if (unlikely(val
& GICR_VPENDBASER_Dirty
)) {
2840 pr_err_ratelimited("ITS virtual pending table not cleaning\n");
2842 vpe
->pending_last
= true;
2844 vpe
->idai
= !!(val
& GICR_VPENDBASER_IDAI
);
2845 vpe
->pending_last
= !!(val
& GICR_VPENDBASER_PendingLast
);
2849 static void its_vpe_invall(struct its_vpe
*vpe
)
2851 struct its_node
*its
;
2853 list_for_each_entry(its
, &its_nodes
, entry
) {
2857 if (its_list_map
&& !vpe
->its_vm
->vlpi_count
[its
->list_nr
])
2861 * Sending a VINVALL to a single ITS is enough, as all
2862 * we need is to reach the redistributors.
2864 its_send_vinvall(its
, vpe
);
2869 static int its_vpe_set_vcpu_affinity(struct irq_data
*d
, void *vcpu_info
)
2871 struct its_vpe
*vpe
= irq_data_get_irq_chip_data(d
);
2872 struct its_cmd_info
*info
= vcpu_info
;
2874 switch (info
->cmd_type
) {
2876 its_vpe_schedule(vpe
);
2879 case DESCHEDULE_VPE
:
2880 its_vpe_deschedule(vpe
);
2884 its_vpe_invall(vpe
);
2892 static void its_vpe_send_cmd(struct its_vpe
*vpe
,
2893 void (*cmd
)(struct its_device
*, u32
))
2895 unsigned long flags
;
2897 raw_spin_lock_irqsave(&vpe_proxy
.lock
, flags
);
2899 its_vpe_db_proxy_map_locked(vpe
);
2900 cmd(vpe_proxy
.dev
, vpe
->vpe_proxy_event
);
2902 raw_spin_unlock_irqrestore(&vpe_proxy
.lock
, flags
);
2905 static void its_vpe_send_inv(struct irq_data
*d
)
2907 struct its_vpe
*vpe
= irq_data_get_irq_chip_data(d
);
2909 if (gic_rdists
->has_direct_lpi
) {
2910 void __iomem
*rdbase
;
2912 rdbase
= per_cpu_ptr(gic_rdists
->rdist
, vpe
->col_idx
)->rd_base
;
2913 gic_write_lpir(vpe
->vpe_db_lpi
, rdbase
+ GICR_INVLPIR
);
2914 while (gic_read_lpir(rdbase
+ GICR_SYNCR
) & 1)
2917 its_vpe_send_cmd(vpe
, its_send_inv
);
2921 static void its_vpe_mask_irq(struct irq_data
*d
)
2924 * We need to unmask the LPI, which is described by the parent
2925 * irq_data. Instead of calling into the parent (which won't
2926 * exactly do the right thing, let's simply use the
2927 * parent_data pointer. Yes, I'm naughty.
2929 lpi_write_config(d
->parent_data
, LPI_PROP_ENABLED
, 0);
2930 its_vpe_send_inv(d
);
2933 static void its_vpe_unmask_irq(struct irq_data
*d
)
2935 /* Same hack as above... */
2936 lpi_write_config(d
->parent_data
, 0, LPI_PROP_ENABLED
);
2937 its_vpe_send_inv(d
);
2940 static int its_vpe_set_irqchip_state(struct irq_data
*d
,
2941 enum irqchip_irq_state which
,
2944 struct its_vpe
*vpe
= irq_data_get_irq_chip_data(d
);
2946 if (which
!= IRQCHIP_STATE_PENDING
)
2949 if (gic_rdists
->has_direct_lpi
) {
2950 void __iomem
*rdbase
;
2952 rdbase
= per_cpu_ptr(gic_rdists
->rdist
, vpe
->col_idx
)->rd_base
;
2954 gic_write_lpir(vpe
->vpe_db_lpi
, rdbase
+ GICR_SETLPIR
);
2956 gic_write_lpir(vpe
->vpe_db_lpi
, rdbase
+ GICR_CLRLPIR
);
2957 while (gic_read_lpir(rdbase
+ GICR_SYNCR
) & 1)
2962 its_vpe_send_cmd(vpe
, its_send_int
);
2964 its_vpe_send_cmd(vpe
, its_send_clear
);
2970 static struct irq_chip its_vpe_irq_chip
= {
2971 .name
= "GICv4-vpe",
2972 .irq_mask
= its_vpe_mask_irq
,
2973 .irq_unmask
= its_vpe_unmask_irq
,
2974 .irq_eoi
= irq_chip_eoi_parent
,
2975 .irq_set_affinity
= its_vpe_set_affinity
,
2976 .irq_set_irqchip_state
= its_vpe_set_irqchip_state
,
2977 .irq_set_vcpu_affinity
= its_vpe_set_vcpu_affinity
,
2980 static int its_vpe_id_alloc(void)
2982 return ida_simple_get(&its_vpeid_ida
, 0, ITS_MAX_VPEID
, GFP_KERNEL
);
2985 static void its_vpe_id_free(u16 id
)
2987 ida_simple_remove(&its_vpeid_ida
, id
);
2990 static int its_vpe_init(struct its_vpe
*vpe
)
2992 struct page
*vpt_page
;
2995 /* Allocate vpe_id */
2996 vpe_id
= its_vpe_id_alloc();
3001 vpt_page
= its_allocate_pending_table(GFP_KERNEL
);
3003 its_vpe_id_free(vpe_id
);
3007 if (!its_alloc_vpe_table(vpe_id
)) {
3008 its_vpe_id_free(vpe_id
);
3009 its_free_pending_table(vpe
->vpt_page
);
3013 vpe
->vpe_id
= vpe_id
;
3014 vpe
->vpt_page
= vpt_page
;
3015 vpe
->vpe_proxy_event
= -1;
3020 static void its_vpe_teardown(struct its_vpe
*vpe
)
3022 its_vpe_db_proxy_unmap(vpe
);
3023 its_vpe_id_free(vpe
->vpe_id
);
3024 its_free_pending_table(vpe
->vpt_page
);
3027 static void its_vpe_irq_domain_free(struct irq_domain
*domain
,
3029 unsigned int nr_irqs
)
3031 struct its_vm
*vm
= domain
->host_data
;
3034 irq_domain_free_irqs_parent(domain
, virq
, nr_irqs
);
3036 for (i
= 0; i
< nr_irqs
; i
++) {
3037 struct irq_data
*data
= irq_domain_get_irq_data(domain
,
3039 struct its_vpe
*vpe
= irq_data_get_irq_chip_data(data
);
3041 BUG_ON(vm
!= vpe
->its_vm
);
3043 clear_bit(data
->hwirq
, vm
->db_bitmap
);
3044 its_vpe_teardown(vpe
);
3045 irq_domain_reset_irq_data(data
);
3048 if (bitmap_empty(vm
->db_bitmap
, vm
->nr_db_lpis
)) {
3049 its_lpi_free(vm
->db_bitmap
, vm
->db_lpi_base
, vm
->nr_db_lpis
);
3050 its_free_prop_table(vm
->vprop_page
);
3054 static int its_vpe_irq_domain_alloc(struct irq_domain
*domain
, unsigned int virq
,
3055 unsigned int nr_irqs
, void *args
)
3057 struct its_vm
*vm
= args
;
3058 unsigned long *bitmap
;
3059 struct page
*vprop_page
;
3060 int base
, nr_ids
, i
, err
= 0;
3064 bitmap
= its_lpi_alloc(roundup_pow_of_two(nr_irqs
), &base
, &nr_ids
);
3068 if (nr_ids
< nr_irqs
) {
3069 its_lpi_free(bitmap
, base
, nr_ids
);
3073 vprop_page
= its_allocate_prop_table(GFP_KERNEL
);
3075 its_lpi_free(bitmap
, base
, nr_ids
);
3079 vm
->db_bitmap
= bitmap
;
3080 vm
->db_lpi_base
= base
;
3081 vm
->nr_db_lpis
= nr_ids
;
3082 vm
->vprop_page
= vprop_page
;
3084 for (i
= 0; i
< nr_irqs
; i
++) {
3085 vm
->vpes
[i
]->vpe_db_lpi
= base
+ i
;
3086 err
= its_vpe_init(vm
->vpes
[i
]);
3089 err
= its_irq_gic_domain_alloc(domain
, virq
+ i
,
3090 vm
->vpes
[i
]->vpe_db_lpi
);
3093 irq_domain_set_hwirq_and_chip(domain
, virq
+ i
, i
,
3094 &its_vpe_irq_chip
, vm
->vpes
[i
]);
3100 its_vpe_irq_domain_free(domain
, virq
, i
- 1);
3102 its_lpi_free(bitmap
, base
, nr_ids
);
3103 its_free_prop_table(vprop_page
);
3109 static int its_vpe_irq_domain_activate(struct irq_domain
*domain
,
3110 struct irq_data
*d
, bool reserve
)
3112 struct its_vpe
*vpe
= irq_data_get_irq_chip_data(d
);
3113 struct its_node
*its
;
3115 /* If we use the list map, we issue VMAPP on demand... */
3119 /* Map the VPE to the first possible CPU */
3120 vpe
->col_idx
= cpumask_first(cpu_online_mask
);
3122 list_for_each_entry(its
, &its_nodes
, entry
) {
3126 its_send_vmapp(its
, vpe
, true);
3127 its_send_vinvall(its
, vpe
);
3130 irq_data_update_effective_affinity(d
, cpumask_of(vpe
->col_idx
));
3135 static void its_vpe_irq_domain_deactivate(struct irq_domain
*domain
,
3138 struct its_vpe
*vpe
= irq_data_get_irq_chip_data(d
);
3139 struct its_node
*its
;
3142 * If we use the list map, we unmap the VPE once no VLPIs are
3143 * associated with the VM.
3148 list_for_each_entry(its
, &its_nodes
, entry
) {
3152 its_send_vmapp(its
, vpe
, false);
3156 static const struct irq_domain_ops its_vpe_domain_ops
= {
3157 .alloc
= its_vpe_irq_domain_alloc
,
3158 .free
= its_vpe_irq_domain_free
,
3159 .activate
= its_vpe_irq_domain_activate
,
3160 .deactivate
= its_vpe_irq_domain_deactivate
,
3163 static int its_force_quiescent(void __iomem
*base
)
3165 u32 count
= 1000000; /* 1s */
3168 val
= readl_relaxed(base
+ GITS_CTLR
);
3170 * GIC architecture specification requires the ITS to be both
3171 * disabled and quiescent for writes to GITS_BASER<n> or
3172 * GITS_CBASER to not have UNPREDICTABLE results.
3174 if ((val
& GITS_CTLR_QUIESCENT
) && !(val
& GITS_CTLR_ENABLE
))
3177 /* Disable the generation of all interrupts to this ITS */
3178 val
&= ~(GITS_CTLR_ENABLE
| GITS_CTLR_ImDe
);
3179 writel_relaxed(val
, base
+ GITS_CTLR
);
3181 /* Poll GITS_CTLR and wait until ITS becomes quiescent */
3183 val
= readl_relaxed(base
+ GITS_CTLR
);
3184 if (val
& GITS_CTLR_QUIESCENT
)
3196 static bool __maybe_unused
its_enable_quirk_cavium_22375(void *data
)
3198 struct its_node
*its
= data
;
3200 /* erratum 22375: only alloc 8MB table size */
3201 its
->device_ids
= 0x14; /* 20 bits, 8MB */
3202 its
->flags
|= ITS_FLAGS_WORKAROUND_CAVIUM_22375
;
3207 static bool __maybe_unused
its_enable_quirk_cavium_23144(void *data
)
3209 struct its_node
*its
= data
;
3211 its
->flags
|= ITS_FLAGS_WORKAROUND_CAVIUM_23144
;
3216 static bool __maybe_unused
its_enable_quirk_qdf2400_e0065(void *data
)
3218 struct its_node
*its
= data
;
3220 /* On QDF2400, the size of the ITE is 16Bytes */
3226 static u64
its_irq_get_msi_base_pre_its(struct its_device
*its_dev
)
3228 struct its_node
*its
= its_dev
->its
;
3231 * The Socionext Synquacer SoC has a so-called 'pre-ITS',
3232 * which maps 32-bit writes targeted at a separate window of
3233 * size '4 << device_id_bits' onto writes to GITS_TRANSLATER
3234 * with device ID taken from bits [device_id_bits + 1:2] of
3235 * the window offset.
3237 return its
->pre_its_base
+ (its_dev
->device_id
<< 2);
3240 static bool __maybe_unused
its_enable_quirk_socionext_synquacer(void *data
)
3242 struct its_node
*its
= data
;
3243 u32 pre_its_window
[2];
3246 if (!fwnode_property_read_u32_array(its
->fwnode_handle
,
3247 "socionext,synquacer-pre-its",
3249 ARRAY_SIZE(pre_its_window
))) {
3251 its
->pre_its_base
= pre_its_window
[0];
3252 its
->get_msi_base
= its_irq_get_msi_base_pre_its
;
3254 ids
= ilog2(pre_its_window
[1]) - 2;
3255 if (its
->device_ids
> ids
)
3256 its
->device_ids
= ids
;
3258 /* the pre-ITS breaks isolation, so disable MSI remapping */
3259 its
->msi_domain_flags
&= ~IRQ_DOMAIN_FLAG_MSI_REMAP
;
3265 static bool __maybe_unused
its_enable_quirk_hip07_161600802(void *data
)
3267 struct its_node
*its
= data
;
3270 * Hip07 insists on using the wrong address for the VLPI
3271 * page. Trick it into doing the right thing...
3273 its
->vlpi_redist_offset
= SZ_128K
;
3277 static const struct gic_quirk its_quirks
[] = {
3278 #ifdef CONFIG_CAVIUM_ERRATUM_22375
3280 .desc
= "ITS: Cavium errata 22375, 24313",
3281 .iidr
= 0xa100034c, /* ThunderX pass 1.x */
3283 .init
= its_enable_quirk_cavium_22375
,
3286 #ifdef CONFIG_CAVIUM_ERRATUM_23144
3288 .desc
= "ITS: Cavium erratum 23144",
3289 .iidr
= 0xa100034c, /* ThunderX pass 1.x */
3291 .init
= its_enable_quirk_cavium_23144
,
3294 #ifdef CONFIG_QCOM_QDF2400_ERRATUM_0065
3296 .desc
= "ITS: QDF2400 erratum 0065",
3297 .iidr
= 0x00001070, /* QDF2400 ITS rev 1.x */
3299 .init
= its_enable_quirk_qdf2400_e0065
,
3302 #ifdef CONFIG_SOCIONEXT_SYNQUACER_PREITS
3305 * The Socionext Synquacer SoC incorporates ARM's own GIC-500
3306 * implementation, but with a 'pre-ITS' added that requires
3307 * special handling in software.
3309 .desc
= "ITS: Socionext Synquacer pre-ITS",
3312 .init
= its_enable_quirk_socionext_synquacer
,
3315 #ifdef CONFIG_HISILICON_ERRATUM_161600802
3317 .desc
= "ITS: Hip07 erratum 161600802",
3320 .init
= its_enable_quirk_hip07_161600802
,
3327 static void its_enable_quirks(struct its_node
*its
)
3329 u32 iidr
= readl_relaxed(its
->base
+ GITS_IIDR
);
3331 gic_enable_quirks(iidr
, its_quirks
, its
);
3334 static int its_save_disable(void)
3336 struct its_node
*its
;
3339 raw_spin_lock(&its_lock
);
3340 list_for_each_entry(its
, &its_nodes
, entry
) {
3343 if (!(its
->flags
& ITS_FLAGS_SAVE_SUSPEND_STATE
))
3347 its
->ctlr_save
= readl_relaxed(base
+ GITS_CTLR
);
3348 err
= its_force_quiescent(base
);
3350 pr_err("ITS@%pa: failed to quiesce: %d\n",
3351 &its
->phys_base
, err
);
3352 writel_relaxed(its
->ctlr_save
, base
+ GITS_CTLR
);
3356 its
->cbaser_save
= gits_read_cbaser(base
+ GITS_CBASER
);
3361 list_for_each_entry_continue_reverse(its
, &its_nodes
, entry
) {
3364 if (!(its
->flags
& ITS_FLAGS_SAVE_SUSPEND_STATE
))
3368 writel_relaxed(its
->ctlr_save
, base
+ GITS_CTLR
);
3371 raw_spin_unlock(&its_lock
);
3376 static void its_restore_enable(void)
3378 struct its_node
*its
;
3381 raw_spin_lock(&its_lock
);
3382 list_for_each_entry(its
, &its_nodes
, entry
) {
3386 if (!(its
->flags
& ITS_FLAGS_SAVE_SUSPEND_STATE
))
3392 * Make sure that the ITS is disabled. If it fails to quiesce,
3393 * don't restore it since writing to CBASER or BASER<n>
3394 * registers is undefined according to the GIC v3 ITS
3397 ret
= its_force_quiescent(base
);
3399 pr_err("ITS@%pa: failed to quiesce on resume: %d\n",
3400 &its
->phys_base
, ret
);
3404 gits_write_cbaser(its
->cbaser_save
, base
+ GITS_CBASER
);
3407 * Writing CBASER resets CREADR to 0, so make CWRITER and
3408 * cmd_write line up with it.
3410 its
->cmd_write
= its
->cmd_base
;
3411 gits_write_cwriter(0, base
+ GITS_CWRITER
);
3413 /* Restore GITS_BASER from the value cache. */
3414 for (i
= 0; i
< GITS_BASER_NR_REGS
; i
++) {
3415 struct its_baser
*baser
= &its
->tables
[i
];
3417 if (!(baser
->val
& GITS_BASER_VALID
))
3420 its_write_baser(its
, baser
, baser
->val
);
3422 writel_relaxed(its
->ctlr_save
, base
+ GITS_CTLR
);
3425 * Reinit the collection if it's stored in the ITS. This is
3426 * indicated by the col_id being less than the HCC field.
3427 * CID < HCC as specified in the GIC v3 Documentation.
3429 if (its
->collections
[smp_processor_id()].col_id
<
3430 GITS_TYPER_HCC(gic_read_typer(base
+ GITS_TYPER
)))
3431 its_cpu_init_collection(its
);
3433 raw_spin_unlock(&its_lock
);
3436 static struct syscore_ops its_syscore_ops
= {
3437 .suspend
= its_save_disable
,
3438 .resume
= its_restore_enable
,
3441 static int its_init_domain(struct fwnode_handle
*handle
, struct its_node
*its
)
3443 struct irq_domain
*inner_domain
;
3444 struct msi_domain_info
*info
;
3446 info
= kzalloc(sizeof(*info
), GFP_KERNEL
);
3450 inner_domain
= irq_domain_create_tree(handle
, &its_domain_ops
, its
);
3451 if (!inner_domain
) {
3456 inner_domain
->parent
= its_parent
;
3457 irq_domain_update_bus_token(inner_domain
, DOMAIN_BUS_NEXUS
);
3458 inner_domain
->flags
|= its
->msi_domain_flags
;
3459 info
->ops
= &its_msi_domain_ops
;
3461 inner_domain
->host_data
= info
;
3466 static int its_init_vpe_domain(void)
3468 struct its_node
*its
;
3472 if (gic_rdists
->has_direct_lpi
) {
3473 pr_info("ITS: Using DirectLPI for VPE invalidation\n");
3477 /* Any ITS will do, even if not v4 */
3478 its
= list_first_entry(&its_nodes
, struct its_node
, entry
);
3480 entries
= roundup_pow_of_two(nr_cpu_ids
);
3481 vpe_proxy
.vpes
= kcalloc(entries
, sizeof(*vpe_proxy
.vpes
),
3483 if (!vpe_proxy
.vpes
) {
3484 pr_err("ITS: Can't allocate GICv4 proxy device array\n");
3488 /* Use the last possible DevID */
3489 devid
= GENMASK(its
->device_ids
- 1, 0);
3490 vpe_proxy
.dev
= its_create_device(its
, devid
, entries
, false);
3491 if (!vpe_proxy
.dev
) {
3492 kfree(vpe_proxy
.vpes
);
3493 pr_err("ITS: Can't allocate GICv4 proxy device\n");
3497 BUG_ON(entries
> vpe_proxy
.dev
->nr_ites
);
3499 raw_spin_lock_init(&vpe_proxy
.lock
);
3500 vpe_proxy
.next_victim
= 0;
3501 pr_info("ITS: Allocated DevID %x as GICv4 proxy device (%d slots)\n",
3502 devid
, vpe_proxy
.dev
->nr_ites
);
3507 static int __init
its_compute_its_list_map(struct resource
*res
,
3508 void __iomem
*its_base
)
3514 * This is assumed to be done early enough that we're
3515 * guaranteed to be single-threaded, hence no
3516 * locking. Should this change, we should address
3519 its_number
= find_first_zero_bit(&its_list_map
, GICv4_ITS_LIST_MAX
);
3520 if (its_number
>= GICv4_ITS_LIST_MAX
) {
3521 pr_err("ITS@%pa: No ITSList entry available!\n",
3526 ctlr
= readl_relaxed(its_base
+ GITS_CTLR
);
3527 ctlr
&= ~GITS_CTLR_ITS_NUMBER
;
3528 ctlr
|= its_number
<< GITS_CTLR_ITS_NUMBER_SHIFT
;
3529 writel_relaxed(ctlr
, its_base
+ GITS_CTLR
);
3530 ctlr
= readl_relaxed(its_base
+ GITS_CTLR
);
3531 if ((ctlr
& GITS_CTLR_ITS_NUMBER
) != (its_number
<< GITS_CTLR_ITS_NUMBER_SHIFT
)) {
3532 its_number
= ctlr
& GITS_CTLR_ITS_NUMBER
;
3533 its_number
>>= GITS_CTLR_ITS_NUMBER_SHIFT
;
3536 if (test_and_set_bit(its_number
, &its_list_map
)) {
3537 pr_err("ITS@%pa: Duplicate ITSList entry %d\n",
3538 &res
->start
, its_number
);
3545 static int __init
its_probe_one(struct resource
*res
,
3546 struct fwnode_handle
*handle
, int numa_node
)
3548 struct its_node
*its
;
3549 void __iomem
*its_base
;
3551 u64 baser
, tmp
, typer
;
3555 its_base
= ioremap(res
->start
, resource_size(res
));
3557 pr_warn("ITS@%pa: Unable to map ITS registers\n", &res
->start
);
3561 val
= readl_relaxed(its_base
+ GITS_PIDR2
) & GIC_PIDR2_ARCH_MASK
;
3562 if (val
!= 0x30 && val
!= 0x40) {
3563 pr_warn("ITS@%pa: No ITS detected, giving up\n", &res
->start
);
3568 err
= its_force_quiescent(its_base
);
3570 pr_warn("ITS@%pa: Failed to quiesce, giving up\n", &res
->start
);
3574 pr_info("ITS %pR\n", res
);
3576 its
= kzalloc(sizeof(*its
), GFP_KERNEL
);
3582 raw_spin_lock_init(&its
->lock
);
3583 mutex_init(&its
->dev_alloc_lock
);
3584 INIT_LIST_HEAD(&its
->entry
);
3585 INIT_LIST_HEAD(&its
->its_device_list
);
3586 typer
= gic_read_typer(its_base
+ GITS_TYPER
);
3587 its
->base
= its_base
;
3588 its
->phys_base
= res
->start
;
3589 its
->ite_size
= GITS_TYPER_ITT_ENTRY_SIZE(typer
);
3590 its
->device_ids
= GITS_TYPER_DEVBITS(typer
);
3591 its
->is_v4
= !!(typer
& GITS_TYPER_VLPIS
);
3593 if (!(typer
& GITS_TYPER_VMOVP
)) {
3594 err
= its_compute_its_list_map(res
, its_base
);
3600 pr_info("ITS@%pa: Using ITS number %d\n",
3603 pr_info("ITS@%pa: Single VMOVP capable\n", &res
->start
);
3607 its
->numa_node
= numa_node
;
3609 page
= alloc_pages_node(its
->numa_node
, GFP_KERNEL
| __GFP_ZERO
,
3610 get_order(ITS_CMD_QUEUE_SZ
));
3615 its
->cmd_base
= (void *)page_address(page
);
3616 its
->cmd_write
= its
->cmd_base
;
3617 its
->fwnode_handle
= handle
;
3618 its
->get_msi_base
= its_irq_get_msi_base
;
3619 its
->msi_domain_flags
= IRQ_DOMAIN_FLAG_MSI_REMAP
;
3621 its_enable_quirks(its
);
3623 err
= its_alloc_tables(its
);
3627 err
= its_alloc_collections(its
);
3629 goto out_free_tables
;
3631 baser
= (virt_to_phys(its
->cmd_base
) |
3632 GITS_CBASER_RaWaWb
|
3633 GITS_CBASER_InnerShareable
|
3634 (ITS_CMD_QUEUE_SZ
/ SZ_4K
- 1) |
3637 gits_write_cbaser(baser
, its
->base
+ GITS_CBASER
);
3638 tmp
= gits_read_cbaser(its
->base
+ GITS_CBASER
);
3640 if ((tmp
^ baser
) & GITS_CBASER_SHAREABILITY_MASK
) {
3641 if (!(tmp
& GITS_CBASER_SHAREABILITY_MASK
)) {
3643 * The HW reports non-shareable, we must
3644 * remove the cacheability attributes as
3647 baser
&= ~(GITS_CBASER_SHAREABILITY_MASK
|
3648 GITS_CBASER_CACHEABILITY_MASK
);
3649 baser
|= GITS_CBASER_nC
;
3650 gits_write_cbaser(baser
, its
->base
+ GITS_CBASER
);
3652 pr_info("ITS: using cache flushing for cmd queue\n");
3653 its
->flags
|= ITS_FLAGS_CMDQ_NEEDS_FLUSHING
;
3656 gits_write_cwriter(0, its
->base
+ GITS_CWRITER
);
3657 ctlr
= readl_relaxed(its
->base
+ GITS_CTLR
);
3658 ctlr
|= GITS_CTLR_ENABLE
;
3660 ctlr
|= GITS_CTLR_ImDe
;
3661 writel_relaxed(ctlr
, its
->base
+ GITS_CTLR
);
3663 if (GITS_TYPER_HCC(typer
))
3664 its
->flags
|= ITS_FLAGS_SAVE_SUSPEND_STATE
;
3666 err
= its_init_domain(handle
, its
);
3668 goto out_free_tables
;
3670 raw_spin_lock(&its_lock
);
3671 list_add(&its
->entry
, &its_nodes
);
3672 raw_spin_unlock(&its_lock
);
3677 its_free_tables(its
);
3679 free_pages((unsigned long)its
->cmd_base
, get_order(ITS_CMD_QUEUE_SZ
));
3684 pr_err("ITS@%pa: failed probing (%d)\n", &res
->start
, err
);
3688 static bool gic_rdists_supports_plpis(void)
3690 return !!(gic_read_typer(gic_data_rdist_rd_base() + GICR_TYPER
) & GICR_TYPER_PLPIS
);
3693 static int redist_disable_lpis(void)
3695 void __iomem
*rbase
= gic_data_rdist_rd_base();
3696 u64 timeout
= USEC_PER_SEC
;
3699 if (!gic_rdists_supports_plpis()) {
3700 pr_info("CPU%d: LPIs not supported\n", smp_processor_id());
3704 val
= readl_relaxed(rbase
+ GICR_CTLR
);
3705 if (!(val
& GICR_CTLR_ENABLE_LPIS
))
3709 * If coming via a CPU hotplug event, we don't need to disable
3710 * LPIs before trying to re-enable them. They are already
3711 * configured and all is well in the world.
3713 * If running with preallocated tables, there is nothing to do.
3715 if (gic_data_rdist()->lpi_enabled
||
3716 (gic_rdists
->flags
& RDIST_FLAGS_RD_TABLES_PREALLOCATED
))
3720 * From that point on, we only try to do some damage control.
3722 pr_warn("GICv3: CPU%d: Booted with LPIs enabled, memory probably corrupted\n",
3723 smp_processor_id());
3724 add_taint(TAINT_CRAP
, LOCKDEP_STILL_OK
);
3727 val
&= ~GICR_CTLR_ENABLE_LPIS
;
3728 writel_relaxed(val
, rbase
+ GICR_CTLR
);
3730 /* Make sure any change to GICR_CTLR is observable by the GIC */
3734 * Software must observe RWP==0 after clearing GICR_CTLR.EnableLPIs
3735 * from 1 to 0 before programming GICR_PEND{PROP}BASER registers.
3736 * Error out if we time out waiting for RWP to clear.
3738 while (readl_relaxed(rbase
+ GICR_CTLR
) & GICR_CTLR_RWP
) {
3740 pr_err("CPU%d: Timeout while disabling LPIs\n",
3741 smp_processor_id());
3749 * After it has been written to 1, it is IMPLEMENTATION
3750 * DEFINED whether GICR_CTLR.EnableLPI becomes RES1 or can be
3751 * cleared to 0. Error out if clearing the bit failed.
3753 if (readl_relaxed(rbase
+ GICR_CTLR
) & GICR_CTLR_ENABLE_LPIS
) {
3754 pr_err("CPU%d: Failed to disable LPIs\n", smp_processor_id());
3761 int its_cpu_init(void)
3763 if (!list_empty(&its_nodes
)) {
3766 ret
= redist_disable_lpis();
3770 its_cpu_init_lpis();
3771 its_cpu_init_collections();
3777 static const struct of_device_id its_device_id
[] = {
3778 { .compatible
= "arm,gic-v3-its", },
3782 static int __init
its_of_probe(struct device_node
*node
)
3784 struct device_node
*np
;
3785 struct resource res
;
3787 for (np
= of_find_matching_node(node
, its_device_id
); np
;
3788 np
= of_find_matching_node(np
, its_device_id
)) {
3789 if (!of_device_is_available(np
))
3791 if (!of_property_read_bool(np
, "msi-controller")) {
3792 pr_warn("%pOF: no msi-controller property, ITS ignored\n",
3797 if (of_address_to_resource(np
, 0, &res
)) {
3798 pr_warn("%pOF: no regs?\n", np
);
3802 its_probe_one(&res
, &np
->fwnode
, of_node_to_nid(np
));
3809 #define ACPI_GICV3_ITS_MEM_SIZE (SZ_128K)
3811 #ifdef CONFIG_ACPI_NUMA
3812 struct its_srat_map
{
3819 static struct its_srat_map
*its_srat_maps __initdata
;
3820 static int its_in_srat __initdata
;
3822 static int __init
acpi_get_its_numa_node(u32 its_id
)
3826 for (i
= 0; i
< its_in_srat
; i
++) {
3827 if (its_id
== its_srat_maps
[i
].its_id
)
3828 return its_srat_maps
[i
].numa_node
;
3830 return NUMA_NO_NODE
;
3833 static int __init
gic_acpi_match_srat_its(union acpi_subtable_headers
*header
,
3834 const unsigned long end
)
3839 static int __init
gic_acpi_parse_srat_its(union acpi_subtable_headers
*header
,
3840 const unsigned long end
)
3843 struct acpi_srat_gic_its_affinity
*its_affinity
;
3845 its_affinity
= (struct acpi_srat_gic_its_affinity
*)header
;
3849 if (its_affinity
->header
.length
< sizeof(*its_affinity
)) {
3850 pr_err("SRAT: Invalid header length %d in ITS affinity\n",
3851 its_affinity
->header
.length
);
3855 node
= acpi_map_pxm_to_node(its_affinity
->proximity_domain
);
3857 if (node
== NUMA_NO_NODE
|| node
>= MAX_NUMNODES
) {
3858 pr_err("SRAT: Invalid NUMA node %d in ITS affinity\n", node
);
3862 its_srat_maps
[its_in_srat
].numa_node
= node
;
3863 its_srat_maps
[its_in_srat
].its_id
= its_affinity
->its_id
;
3865 pr_info("SRAT: PXM %d -> ITS %d -> Node %d\n",
3866 its_affinity
->proximity_domain
, its_affinity
->its_id
, node
);
3871 static void __init
acpi_table_parse_srat_its(void)
3875 count
= acpi_table_parse_entries(ACPI_SIG_SRAT
,
3876 sizeof(struct acpi_table_srat
),
3877 ACPI_SRAT_TYPE_GIC_ITS_AFFINITY
,
3878 gic_acpi_match_srat_its
, 0);
3882 its_srat_maps
= kmalloc_array(count
, sizeof(struct its_srat_map
),
3884 if (!its_srat_maps
) {
3885 pr_warn("SRAT: Failed to allocate memory for its_srat_maps!\n");
3889 acpi_table_parse_entries(ACPI_SIG_SRAT
,
3890 sizeof(struct acpi_table_srat
),
3891 ACPI_SRAT_TYPE_GIC_ITS_AFFINITY
,
3892 gic_acpi_parse_srat_its
, 0);
3895 /* free the its_srat_maps after ITS probing */
3896 static void __init
acpi_its_srat_maps_free(void)
3898 kfree(its_srat_maps
);
3901 static void __init
acpi_table_parse_srat_its(void) { }
3902 static int __init
acpi_get_its_numa_node(u32 its_id
) { return NUMA_NO_NODE
; }
3903 static void __init
acpi_its_srat_maps_free(void) { }
3906 static int __init
gic_acpi_parse_madt_its(union acpi_subtable_headers
*header
,
3907 const unsigned long end
)
3909 struct acpi_madt_generic_translator
*its_entry
;
3910 struct fwnode_handle
*dom_handle
;
3911 struct resource res
;
3914 its_entry
= (struct acpi_madt_generic_translator
*)header
;
3915 memset(&res
, 0, sizeof(res
));
3916 res
.start
= its_entry
->base_address
;
3917 res
.end
= its_entry
->base_address
+ ACPI_GICV3_ITS_MEM_SIZE
- 1;
3918 res
.flags
= IORESOURCE_MEM
;
3920 dom_handle
= irq_domain_alloc_fwnode((void *)its_entry
->base_address
);
3922 pr_err("ITS@%pa: Unable to allocate GICv3 ITS domain token\n",
3927 err
= iort_register_domain_token(its_entry
->translation_id
, res
.start
,
3930 pr_err("ITS@%pa: Unable to register GICv3 ITS domain token (ITS ID %d) to IORT\n",
3931 &res
.start
, its_entry
->translation_id
);
3935 err
= its_probe_one(&res
, dom_handle
,
3936 acpi_get_its_numa_node(its_entry
->translation_id
));
3940 iort_deregister_domain_token(its_entry
->translation_id
);
3942 irq_domain_free_fwnode(dom_handle
);
3946 static void __init
its_acpi_probe(void)
3948 acpi_table_parse_srat_its();
3949 acpi_table_parse_madt(ACPI_MADT_TYPE_GENERIC_TRANSLATOR
,
3950 gic_acpi_parse_madt_its
, 0);
3951 acpi_its_srat_maps_free();
3954 static void __init
its_acpi_probe(void) { }
3957 int __init
its_init(struct fwnode_handle
*handle
, struct rdists
*rdists
,
3958 struct irq_domain
*parent_domain
)
3960 struct device_node
*of_node
;
3961 struct its_node
*its
;
3962 bool has_v4
= false;
3965 its_parent
= parent_domain
;
3966 of_node
= to_of_node(handle
);
3968 its_of_probe(of_node
);
3972 if (list_empty(&its_nodes
)) {
3973 pr_warn("ITS: No ITS available, not enabling LPIs\n");
3977 gic_rdists
= rdists
;
3979 err
= allocate_lpi_tables();
3983 list_for_each_entry(its
, &its_nodes
, entry
)
3984 has_v4
|= its
->is_v4
;
3986 if (has_v4
& rdists
->has_vlpis
) {
3987 if (its_init_vpe_domain() ||
3988 its_init_v4(parent_domain
, &its_vpe_domain_ops
)) {
3989 rdists
->has_vlpis
= false;
3990 pr_err("ITS: Disabling GICv4 support\n");
3994 register_syscore_ops(&its_syscore_ops
);