]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blame - include/linux/io-pgtable.h
Merge tag 'staging-5.13-rc6' of git://git.kernel.org/pub/scm/linux/kernel/git/gregkh...
[mirror_ubuntu-jammy-kernel.git] / include / linux / io-pgtable.h
CommitLineData
b2441318 1/* SPDX-License-Identifier: GPL-2.0 */
fdb1d7be
WD
2#ifndef __IO_PGTABLE_H
3#define __IO_PGTABLE_H
a2d3a382 4
e5fc9753 5#include <linux/bitops.h>
a2d3a382 6#include <linux/iommu.h>
fdb1d7be
WD
7
8/*
9 * Public API for use by IOMMU drivers
10 */
11enum io_pgtable_fmt {
e1d3c0fd
WD
12 ARM_32_LPAE_S1,
13 ARM_32_LPAE_S2,
14 ARM_64_LPAE_S1,
15 ARM_64_LPAE_S2,
e5fc9753 16 ARM_V7S,
d08d42de 17 ARM_MALI_LPAE,
c9b258c6 18 AMD_IOMMU_V1,
fdb1d7be
WD
19 IO_PGTABLE_NUM_FMTS,
20};
21
22/**
298f7889 23 * struct iommu_flush_ops - IOMMU callbacks for TLB and page table management.
fdb1d7be 24 *
3445545b
WD
25 * @tlb_flush_all: Synchronously invalidate the entire TLB context.
26 * @tlb_flush_walk: Synchronously invalidate all intermediate TLB state
27 * (sometimes referred to as the "walk cache") for a virtual
28 * address range.
abfd6fe0 29 * @tlb_add_page: Optional callback to queue up leaf TLB invalidation for a
3951c41a
WD
30 * single page. IOMMUs that cannot batch TLB invalidation
31 * operations efficiently will typically issue them here, but
32 * others may decide to update the iommu_iotlb_gather structure
aae4c8e2 33 * and defer the invalidation until iommu_iotlb_sync() instead.
fdb1d7be
WD
34 *
35 * Note that these can all be called in atomic context and must therefore
36 * not block.
37 */
298f7889 38struct iommu_flush_ops {
fdb1d7be 39 void (*tlb_flush_all)(void *cookie);
3445545b
WD
40 void (*tlb_flush_walk)(unsigned long iova, size_t size, size_t granule,
41 void *cookie);
3951c41a
WD
42 void (*tlb_add_page)(struct iommu_iotlb_gather *gather,
43 unsigned long iova, size_t granule, void *cookie);
fdb1d7be
WD
44};
45
46/**
47 * struct io_pgtable_cfg - Configuration data for a set of page tables.
48 *
49 * @quirks: A bitmap of hardware quirks that require some special
50 * action by the low-level page table allocator.
51 * @pgsize_bitmap: A bitmap of page sizes supported by this set of page
52 * tables.
53 * @ias: Input address (iova) size, in bits.
54 * @oas: Output address (paddr) size, in bits.
4f41845b
WD
55 * @coherent_walk A flag to indicate whether or not page table walks made
56 * by the IOMMU are coherent with the CPU caches.
fdb1d7be 57 * @tlb: TLB management callbacks for this set of tables.
f8d54961
RM
58 * @iommu_dev: The device representing the DMA configuration for the
59 * page table walker.
fdb1d7be
WD
60 */
61struct io_pgtable_cfg {
3850db49
RM
62 /*
63 * IO_PGTABLE_QUIRK_ARM_NS: (ARM formats) Set NS and NSTABLE bits in
64 * stage 1 PTEs, for hardware which insists on validating them
65 * even in non-secure state where they should normally be ignored.
66 *
67 * IO_PGTABLE_QUIRK_NO_PERMS: Ignore the IOMMU_READ, IOMMU_WRITE and
68 * IOMMU_NOEXEC flags and map everything with full access, for
69 * hardware which does not implement the permissions of a given
70 * format, and/or requires some format-specific default value.
71 *
4c019de6 72 * IO_PGTABLE_QUIRK_ARM_MTK_EXT: (ARM v7s format) MediaTek IOMMUs extend
40596d2f
YW
73 * to support up to 35 bits PA where the bit32, bit33 and bit34 are
74 * encoded in the bit9, bit4 and bit5 of the PTE respectively.
81b3c252 75 *
b6b65ca2
ZL
76 * IO_PGTABLE_QUIRK_NON_STRICT: Skip issuing synchronous leaf TLBIs
77 * on unmap, for DMA domains using the flush queue mechanism for
78 * delayed invalidation.
db690301
RM
79 *
80 * IO_PGTABLE_QUIRK_ARM_TTBR1: (ARM LPAE format) Configure the table
81 * for use in the upper half of a split address space.
e67890c9
SPR
82 *
83 * IO_PGTABLE_QUIRK_ARM_OUTER_WBWA: Override the outer-cacheability
84 * attributes set in the TCR for a non-coherent page-table walker.
3850db49
RM
85 */
86 #define IO_PGTABLE_QUIRK_ARM_NS BIT(0)
87 #define IO_PGTABLE_QUIRK_NO_PERMS BIT(1)
73d50811 88 #define IO_PGTABLE_QUIRK_ARM_MTK_EXT BIT(3)
4f41845b 89 #define IO_PGTABLE_QUIRK_NON_STRICT BIT(4)
db690301 90 #define IO_PGTABLE_QUIRK_ARM_TTBR1 BIT(5)
e67890c9 91 #define IO_PGTABLE_QUIRK_ARM_OUTER_WBWA BIT(6)
3850db49 92 unsigned long quirks;
fdb1d7be
WD
93 unsigned long pgsize_bitmap;
94 unsigned int ias;
95 unsigned int oas;
4f41845b 96 bool coherent_walk;
298f7889 97 const struct iommu_flush_ops *tlb;
f8d54961 98 struct device *iommu_dev;
fdb1d7be
WD
99
100 /* Low-level data specific to the table format */
101 union {
e1d3c0fd 102 struct {
d1e5f26f 103 u64 ttbr;
fb485eb1
RM
104 struct {
105 u32 ips:3;
106 u32 tg:2;
107 u32 sh:2;
108 u32 orgn:2;
109 u32 irgn:2;
110 u32 tsz:6;
111 } tcr;
205577ab 112 u64 mair;
e1d3c0fd
WD
113 } arm_lpae_s1_cfg;
114
115 struct {
116 u64 vttbr;
ac4b80e5
WD
117 struct {
118 u32 ps:3;
119 u32 tg:2;
120 u32 sh:2;
121 u32 orgn:2;
122 u32 irgn:2;
123 u32 sl:2;
124 u32 tsz:6;
125 } vtcr;
e1d3c0fd 126 } arm_lpae_s2_cfg;
e5fc9753
RM
127
128 struct {
d1e5f26f 129 u32 ttbr;
e5fc9753
RM
130 u32 tcr;
131 u32 nmrr;
132 u32 prrr;
133 } arm_v7s_cfg;
d08d42de
RH
134
135 struct {
136 u64 transtab;
137 u64 memattr;
138 } arm_mali_lpae_cfg;
fdb1d7be
WD
139 };
140};
141
142/**
143 * struct io_pgtable_ops - Page table manipulation API for IOMMU drivers.
144 *
145 * @map: Map a physically contiguous memory region.
146 * @unmap: Unmap a physically contiguous memory region.
147 * @iova_to_phys: Translate iova to physical address.
148 *
149 * These functions map directly onto the iommu_ops member functions with
150 * the same names.
151 */
152struct io_pgtable_ops {
153 int (*map)(struct io_pgtable_ops *ops, unsigned long iova,
f34ce7a7 154 phys_addr_t paddr, size_t size, int prot, gfp_t gfp);
193e67c0 155 size_t (*unmap)(struct io_pgtable_ops *ops, unsigned long iova,
a2d3a382 156 size_t size, struct iommu_iotlb_gather *gather);
fdb1d7be
WD
157 phys_addr_t (*iova_to_phys)(struct io_pgtable_ops *ops,
158 unsigned long iova);
159};
160
161/**
162 * alloc_io_pgtable_ops() - Allocate a page table allocator for use by an IOMMU.
163 *
164 * @fmt: The page table format.
165 * @cfg: The page table configuration. This will be modified to represent
166 * the configuration actually provided by the allocator (e.g. the
167 * pgsize_bitmap may be restricted).
168 * @cookie: An opaque token provided by the IOMMU driver and passed back to
169 * the callback routines in cfg->tlb.
170 */
171struct io_pgtable_ops *alloc_io_pgtable_ops(enum io_pgtable_fmt fmt,
172 struct io_pgtable_cfg *cfg,
173 void *cookie);
174
175/**
176 * free_io_pgtable_ops() - Free an io_pgtable_ops structure. The caller
177 * *must* ensure that the page table is no longer
178 * live, but the TLB can be dirty.
179 *
180 * @ops: The ops returned from alloc_io_pgtable_ops.
181 */
182void free_io_pgtable_ops(struct io_pgtable_ops *ops);
183
184
185/*
186 * Internal structures for page table allocator implementations.
187 */
188
189/**
190 * struct io_pgtable - Internal structure describing a set of page tables.
191 *
192 * @fmt: The page table format.
193 * @cookie: An opaque token provided by the IOMMU driver and passed back to
194 * any callback routines.
195 * @cfg: A copy of the page table configuration.
196 * @ops: The page table operations in use for this set of page tables.
197 */
198struct io_pgtable {
199 enum io_pgtable_fmt fmt;
200 void *cookie;
201 struct io_pgtable_cfg cfg;
202 struct io_pgtable_ops ops;
203};
204
fdc38967
RM
205#define io_pgtable_ops_to_pgtable(x) container_of((x), struct io_pgtable, ops)
206
507e4c9d
RM
207static inline void io_pgtable_tlb_flush_all(struct io_pgtable *iop)
208{
77e0992a
YW
209 if (iop->cfg.tlb && iop->cfg.tlb->tlb_flush_all)
210 iop->cfg.tlb->tlb_flush_all(iop->cookie);
507e4c9d
RM
211}
212
10b7a7d9
WD
213static inline void
214io_pgtable_tlb_flush_walk(struct io_pgtable *iop, unsigned long iova,
215 size_t size, size_t granule)
216{
77e0992a
YW
217 if (iop->cfg.tlb && iop->cfg.tlb->tlb_flush_walk)
218 iop->cfg.tlb->tlb_flush_walk(iova, size, granule, iop->cookie);
10b7a7d9
WD
219}
220
abfd6fe0 221static inline void
3951c41a
WD
222io_pgtable_tlb_add_page(struct io_pgtable *iop,
223 struct iommu_iotlb_gather * gather, unsigned long iova,
abfd6fe0 224 size_t granule)
507e4c9d 225{
77e0992a 226 if (iop->cfg.tlb && iop->cfg.tlb->tlb_add_page)
3951c41a 227 iop->cfg.tlb->tlb_add_page(gather, iova, granule, iop->cookie);
507e4c9d
RM
228}
229
fdb1d7be
WD
230/**
231 * struct io_pgtable_init_fns - Alloc/free a set of page tables for a
232 * particular format.
233 *
234 * @alloc: Allocate a set of page tables described by cfg.
235 * @free: Free the page tables associated with iop.
236 */
237struct io_pgtable_init_fns {
238 struct io_pgtable *(*alloc)(struct io_pgtable_cfg *cfg, void *cookie);
239 void (*free)(struct io_pgtable *iop);
240};
241
2e169bb3
JR
242extern struct io_pgtable_init_fns io_pgtable_arm_32_lpae_s1_init_fns;
243extern struct io_pgtable_init_fns io_pgtable_arm_32_lpae_s2_init_fns;
244extern struct io_pgtable_init_fns io_pgtable_arm_64_lpae_s1_init_fns;
245extern struct io_pgtable_init_fns io_pgtable_arm_64_lpae_s2_init_fns;
e5fc9753 246extern struct io_pgtable_init_fns io_pgtable_arm_v7s_init_fns;
d08d42de 247extern struct io_pgtable_init_fns io_pgtable_arm_mali_lpae_init_fns;
c9b258c6 248extern struct io_pgtable_init_fns io_pgtable_amd_iommu_v1_init_fns;
2e169bb3 249
fdb1d7be 250#endif /* __IO_PGTABLE_H */