]>
Commit | Line | Data |
---|---|---|
55716d26 | 1 | /* SPDX-License-Identifier: GPL-2.0-only */ |
f8de50eb KA |
2 | /* |
3 | * Copyright (c) 2006, Intel Corporation. | |
4 | * | |
98bcef56 | 5 | * Copyright (C) 2006-2008 Intel Corporation |
6 | * Author: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com> | |
f8de50eb KA |
7 | */ |
8 | ||
9 | #ifndef _IOVA_H_ | |
10 | #define _IOVA_H_ | |
11 | ||
12 | #include <linux/types.h> | |
13 | #include <linux/kernel.h> | |
14 | #include <linux/rbtree.h> | |
fb418dab | 15 | #include <linux/atomic.h> |
f8de50eb KA |
16 | #include <linux/dma-mapping.h> |
17 | ||
f8de50eb KA |
18 | /* iova structure */ |
19 | struct iova { | |
20 | struct rb_node node; | |
9257b4a2 OP |
21 | unsigned long pfn_hi; /* Highest allocated pfn */ |
22 | unsigned long pfn_lo; /* Lowest allocated pfn */ | |
23 | }; | |
24 | ||
25 | struct iova_magazine; | |
26 | struct iova_cpu_rcache; | |
27 | ||
28 | #define IOVA_RANGE_CACHE_MAX_SIZE 6 /* log of max cached IOVA range size (in pages) */ | |
29 | #define MAX_GLOBAL_MAGS 32 /* magazines per bin */ | |
30 | ||
31 | struct iova_rcache { | |
32 | spinlock_t lock; | |
33 | unsigned long depot_size; | |
34 | struct iova_magazine *depot[MAX_GLOBAL_MAGS]; | |
35 | struct iova_cpu_rcache __percpu *cpu_rcaches; | |
f8de50eb KA |
36 | }; |
37 | ||
42f87e71 JR |
38 | struct iova_domain; |
39 | ||
40 | /* Call-Back from IOVA code into IOMMU drivers */ | |
41 | typedef void (* iova_flush_cb)(struct iova_domain *domain); | |
42 | ||
43 | /* Destructor for per-entry data */ | |
44 | typedef void (* iova_entry_dtor)(unsigned long data); | |
45 | ||
46 | /* Number of entries per Flush Queue */ | |
47 | #define IOVA_FQ_SIZE 256 | |
48 | ||
9a005a80 JR |
49 | /* Timeout (in ms) after which entries are flushed from the Flush-Queue */ |
50 | #define IOVA_FQ_TIMEOUT 10 | |
51 | ||
42f87e71 JR |
52 | /* Flush Queue entry for defered flushing */ |
53 | struct iova_fq_entry { | |
54 | unsigned long iova_pfn; | |
55 | unsigned long pages; | |
56 | unsigned long data; | |
fb418dab | 57 | u64 counter; /* Flush counter when this entrie was added */ |
42f87e71 JR |
58 | }; |
59 | ||
60 | /* Per-CPU Flush Queue structure */ | |
61 | struct iova_fq { | |
62 | struct iova_fq_entry entries[IOVA_FQ_SIZE]; | |
63 | unsigned head, tail; | |
8109c2a2 | 64 | spinlock_t lock; |
42f87e71 JR |
65 | }; |
66 | ||
f8de50eb KA |
67 | /* holds all the iova translations for a domain */ |
68 | struct iova_domain { | |
f8de50eb KA |
69 | spinlock_t iova_rbtree_lock; /* Lock to protect update of rbtree */ |
70 | struct rb_root rbroot; /* iova domain rbtree root */ | |
e60aa7b5 RM |
71 | struct rb_node *cached_node; /* Save last alloced node */ |
72 | struct rb_node *cached32_node; /* Save last 32-bit alloced node */ | |
0fb5fe87 | 73 | unsigned long granule; /* pfn granularity for this domain */ |
1b722500 | 74 | unsigned long start_pfn; /* Lower limit for this domain */ |
f661197e | 75 | unsigned long dma_32bit_pfn; |
bee60e94 | 76 | unsigned long max32_alloc_size; /* Size of last failed allocation */ |
14bd9a60 JQ |
77 | struct iova_fq __percpu *fq; /* Flush Queue */ |
78 | ||
79 | atomic64_t fq_flush_start_cnt; /* Number of TLB flushes that | |
80 | have been started */ | |
81 | ||
82 | atomic64_t fq_flush_finish_cnt; /* Number of TLB flushes that | |
83 | have been finished */ | |
84 | ||
bb68b2fb | 85 | struct iova anchor; /* rbtree lookup anchor */ |
9257b4a2 | 86 | struct iova_rcache rcaches[IOVA_RANGE_CACHE_MAX_SIZE]; /* IOVA range caches */ |
42f87e71 JR |
87 | |
88 | iova_flush_cb flush_cb; /* Call-Back function to flush IOMMU | |
89 | TLBs */ | |
90 | ||
91 | iova_entry_dtor entry_dtor; /* IOMMU driver specific destructor for | |
92 | iova entry */ | |
93 | ||
9a005a80 JR |
94 | struct timer_list fq_timer; /* Timer to regularily empty the |
95 | flush-queues */ | |
96 | atomic_t fq_timer_on; /* 1 when timer is active, 0 | |
97 | when not */ | |
f8de50eb KA |
98 | }; |
99 | ||
a156ef99 JL |
100 | static inline unsigned long iova_size(struct iova *iova) |
101 | { | |
102 | return iova->pfn_hi - iova->pfn_lo + 1; | |
103 | } | |
104 | ||
0fb5fe87 RM |
105 | static inline unsigned long iova_shift(struct iova_domain *iovad) |
106 | { | |
107 | return __ffs(iovad->granule); | |
108 | } | |
109 | ||
110 | static inline unsigned long iova_mask(struct iova_domain *iovad) | |
111 | { | |
112 | return iovad->granule - 1; | |
113 | } | |
114 | ||
115 | static inline size_t iova_offset(struct iova_domain *iovad, dma_addr_t iova) | |
116 | { | |
117 | return iova & iova_mask(iovad); | |
118 | } | |
119 | ||
120 | static inline size_t iova_align(struct iova_domain *iovad, size_t size) | |
121 | { | |
122 | return ALIGN(size, iovad->granule); | |
123 | } | |
124 | ||
125 | static inline dma_addr_t iova_dma_addr(struct iova_domain *iovad, struct iova *iova) | |
126 | { | |
127 | return (dma_addr_t)iova->pfn_lo << iova_shift(iovad); | |
128 | } | |
129 | ||
130 | static inline unsigned long iova_pfn(struct iova_domain *iovad, dma_addr_t iova) | |
131 | { | |
132 | return iova >> iova_shift(iovad); | |
133 | } | |
134 | ||
b4d8c7ae | 135 | #if IS_ENABLED(CONFIG_IOMMU_IOVA) |
ae1ff3d6 SA |
136 | int iova_cache_get(void); |
137 | void iova_cache_put(void); | |
85b45456 | 138 | |
f8de50eb KA |
139 | struct iova *alloc_iova_mem(void); |
140 | void free_iova_mem(struct iova *iova); | |
141 | void free_iova(struct iova_domain *iovad, unsigned long pfn); | |
142 | void __free_iova(struct iova_domain *iovad, struct iova *iova); | |
143 | struct iova *alloc_iova(struct iova_domain *iovad, unsigned long size, | |
f76aec76 KA |
144 | unsigned long limit_pfn, |
145 | bool size_aligned); | |
9257b4a2 OP |
146 | void free_iova_fast(struct iova_domain *iovad, unsigned long pfn, |
147 | unsigned long size); | |
19282101 JR |
148 | void queue_iova(struct iova_domain *iovad, |
149 | unsigned long pfn, unsigned long pages, | |
150 | unsigned long data); | |
9257b4a2 | 151 | unsigned long alloc_iova_fast(struct iova_domain *iovad, unsigned long size, |
538d5b33 | 152 | unsigned long limit_pfn, bool flush_rcache); |
f8de50eb KA |
153 | struct iova *reserve_iova(struct iova_domain *iovad, unsigned long pfn_lo, |
154 | unsigned long pfn_hi); | |
155 | void copy_reserved_iova(struct iova_domain *from, struct iova_domain *to); | |
0fb5fe87 | 156 | void init_iova_domain(struct iova_domain *iovad, unsigned long granule, |
aa3ac946 | 157 | unsigned long start_pfn); |
effa4678 | 158 | bool has_iova_flush_queue(struct iova_domain *iovad); |
42f87e71 JR |
159 | int init_iova_flush_queue(struct iova_domain *iovad, |
160 | iova_flush_cb flush_cb, iova_entry_dtor entry_dtor); | |
f8de50eb KA |
161 | struct iova *find_iova(struct iova_domain *iovad, unsigned long pfn); |
162 | void put_iova_domain(struct iova_domain *iovad); | |
75f05569 JL |
163 | struct iova *split_and_remove_iova(struct iova_domain *iovad, |
164 | struct iova *iova, unsigned long pfn_lo, unsigned long pfn_hi); | |
9257b4a2 | 165 | void free_cpu_cached_iovas(unsigned int cpu, struct iova_domain *iovad); |
21aff52a TR |
166 | #else |
167 | static inline int iova_cache_get(void) | |
168 | { | |
169 | return -ENOTSUPP; | |
170 | } | |
171 | ||
172 | static inline void iova_cache_put(void) | |
173 | { | |
174 | } | |
175 | ||
176 | static inline struct iova *alloc_iova_mem(void) | |
177 | { | |
178 | return NULL; | |
179 | } | |
180 | ||
181 | static inline void free_iova_mem(struct iova *iova) | |
182 | { | |
183 | } | |
184 | ||
185 | static inline void free_iova(struct iova_domain *iovad, unsigned long pfn) | |
186 | { | |
187 | } | |
188 | ||
189 | static inline void __free_iova(struct iova_domain *iovad, struct iova *iova) | |
190 | { | |
191 | } | |
192 | ||
193 | static inline struct iova *alloc_iova(struct iova_domain *iovad, | |
194 | unsigned long size, | |
195 | unsigned long limit_pfn, | |
196 | bool size_aligned) | |
197 | { | |
198 | return NULL; | |
199 | } | |
200 | ||
201 | static inline void free_iova_fast(struct iova_domain *iovad, | |
202 | unsigned long pfn, | |
203 | unsigned long size) | |
204 | { | |
205 | } | |
206 | ||
19282101 JR |
207 | static inline void queue_iova(struct iova_domain *iovad, |
208 | unsigned long pfn, unsigned long pages, | |
209 | unsigned long data) | |
210 | { | |
211 | } | |
212 | ||
21aff52a TR |
213 | static inline unsigned long alloc_iova_fast(struct iova_domain *iovad, |
214 | unsigned long size, | |
538d5b33 TN |
215 | unsigned long limit_pfn, |
216 | bool flush_rcache) | |
21aff52a TR |
217 | { |
218 | return 0; | |
219 | } | |
220 | ||
221 | static inline struct iova *reserve_iova(struct iova_domain *iovad, | |
222 | unsigned long pfn_lo, | |
223 | unsigned long pfn_hi) | |
224 | { | |
225 | return NULL; | |
226 | } | |
227 | ||
228 | static inline void copy_reserved_iova(struct iova_domain *from, | |
229 | struct iova_domain *to) | |
230 | { | |
231 | } | |
232 | ||
233 | static inline void init_iova_domain(struct iova_domain *iovad, | |
234 | unsigned long granule, | |
aa3ac946 | 235 | unsigned long start_pfn) |
21aff52a TR |
236 | { |
237 | } | |
238 | ||
201c1db9 | 239 | static inline bool has_iova_flush_queue(struct iova_domain *iovad) |
effa4678 DS |
240 | { |
241 | return false; | |
242 | } | |
243 | ||
42f87e71 JR |
244 | static inline int init_iova_flush_queue(struct iova_domain *iovad, |
245 | iova_flush_cb flush_cb, | |
246 | iova_entry_dtor entry_dtor) | |
247 | { | |
248 | return -ENODEV; | |
249 | } | |
250 | ||
21aff52a TR |
251 | static inline struct iova *find_iova(struct iova_domain *iovad, |
252 | unsigned long pfn) | |
253 | { | |
254 | return NULL; | |
255 | } | |
256 | ||
257 | static inline void put_iova_domain(struct iova_domain *iovad) | |
258 | { | |
259 | } | |
260 | ||
261 | static inline struct iova *split_and_remove_iova(struct iova_domain *iovad, | |
262 | struct iova *iova, | |
263 | unsigned long pfn_lo, | |
264 | unsigned long pfn_hi) | |
265 | { | |
266 | return NULL; | |
267 | } | |
268 | ||
269 | static inline void free_cpu_cached_iovas(unsigned int cpu, | |
270 | struct iova_domain *iovad) | |
271 | { | |
272 | } | |
273 | #endif | |
f8de50eb KA |
274 | |
275 | #endif |