]>
Commit | Line | Data |
---|---|---|
b2441318 | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
1da177e4 LT |
2 | #ifndef _LINUX_HIGHMEM_H |
3 | #define _LINUX_HIGHMEM_H | |
4 | ||
1da177e4 | 5 | #include <linux/fs.h> |
597781f3 | 6 | #include <linux/kernel.h> |
187f1882 | 7 | #include <linux/bug.h> |
1da177e4 | 8 | #include <linux/mm.h> |
ad76fb6b | 9 | #include <linux/uaccess.h> |
43b3a0c7 | 10 | #include <linux/hardirq.h> |
1da177e4 LT |
11 | |
12 | #include <asm/cacheflush.h> | |
13 | ||
03beb076 | 14 | #ifndef ARCH_HAS_FLUSH_ANON_PAGE |
a6f36be3 | 15 | static inline void flush_anon_page(struct vm_area_struct *vma, struct page *page, unsigned long vmaddr) |
03beb076 JB |
16 | { |
17 | } | |
18 | #endif | |
19 | ||
5a3a5a98 JB |
20 | #ifndef ARCH_HAS_FLUSH_KERNEL_DCACHE_PAGE |
21 | static inline void flush_kernel_dcache_page(struct page *page) | |
22 | { | |
23 | } | |
9df5f741 JB |
24 | static inline void flush_kernel_vmap_range(void *vaddr, int size) |
25 | { | |
26 | } | |
27 | static inline void invalidate_kernel_vmap_range(void *vaddr, int size) | |
28 | { | |
29 | } | |
5a3a5a98 JB |
30 | #endif |
31 | ||
298fa1ad TG |
32 | /* |
33 | * Outside of CONFIG_HIGHMEM to support X86 32bit iomap_atomic() cruft. | |
34 | */ | |
35 | #ifdef CONFIG_KMAP_LOCAL | |
36 | void *__kmap_local_pfn_prot(unsigned long pfn, pgprot_t prot); | |
37 | void *__kmap_local_page_prot(struct page *page, pgprot_t prot); | |
38 | void kunmap_local_indexed(void *vaddr); | |
39 | #endif | |
40 | ||
3688e07f | 41 | #ifdef CONFIG_HIGHMEM |
1da177e4 LT |
42 | #include <asm/highmem.h> |
43 | ||
525aaf9b IW |
44 | #ifndef ARCH_HAS_KMAP_FLUSH_TLB |
45 | static inline void kmap_flush_tlb(unsigned long addr) { } | |
46 | #endif | |
47 | ||
090e77e1 IW |
48 | #ifndef kmap_prot |
49 | #define kmap_prot PAGE_KERNEL | |
50 | #endif | |
51 | ||
525aaf9b IW |
52 | void *kmap_high(struct page *page); |
53 | static inline void *kmap(struct page *page) | |
54 | { | |
55 | void *addr; | |
56 | ||
57 | might_sleep(); | |
58 | if (!PageHighMem(page)) | |
59 | addr = page_address(page); | |
60 | else | |
61 | addr = kmap_high(page); | |
62 | kmap_flush_tlb((unsigned long)addr); | |
63 | return addr; | |
64 | } | |
65 | ||
e23c4597 IW |
66 | void kunmap_high(struct page *page); |
67 | ||
68 | static inline void kunmap(struct page *page) | |
69 | { | |
70 | might_sleep(); | |
71 | if (!PageHighMem(page)) | |
72 | return; | |
73 | kunmap_high(page); | |
74 | } | |
75 | ||
78b6d91e IW |
76 | /* |
77 | * kmap_atomic/kunmap_atomic is significantly faster than kmap/kunmap because | |
78 | * no global lock is needed and because the kmap code must perform a global TLB | |
79 | * invalidation when the kmap pool wraps. | |
80 | * | |
3ecabd31 | 81 | * However when holding an atomic kmap it is not legal to sleep, so atomic |
78b6d91e IW |
82 | * kmaps are appropriate for short, tight code paths only. |
83 | * | |
84 | * The use of kmap_atomic/kunmap_atomic is discouraged - kmap/kunmap | |
85 | * gives a more generic (and caching) interface. But kmap_atomic can | |
86 | * be used in IRQ contexts, so in some (very limited) cases we need | |
87 | * it. | |
88 | */ | |
298fa1ad TG |
89 | |
90 | #ifndef CONFIG_KMAP_LOCAL | |
91 | void *kmap_atomic_high_prot(struct page *page, pgprot_t prot); | |
92 | void kunmap_atomic_high(void *kvaddr); | |
93 | ||
20b271df | 94 | static inline void *kmap_atomic_prot(struct page *page, pgprot_t prot) |
78b6d91e IW |
95 | { |
96 | preempt_disable(); | |
97 | pagefault_disable(); | |
98 | if (!PageHighMem(page)) | |
99 | return page_address(page); | |
20b271df | 100 | return kmap_atomic_high_prot(page, prot); |
78b6d91e | 101 | } |
298fa1ad TG |
102 | |
103 | static inline void __kunmap_atomic(void *vaddr) | |
104 | { | |
105 | kunmap_atomic_high(vaddr); | |
106 | } | |
107 | #else /* !CONFIG_KMAP_LOCAL */ | |
108 | ||
109 | static inline void *kmap_atomic_prot(struct page *page, pgprot_t prot) | |
110 | { | |
111 | preempt_disable(); | |
112 | pagefault_disable(); | |
113 | return __kmap_local_page_prot(page, prot); | |
114 | } | |
115 | ||
116 | static inline void *kmap_atomic_pfn(unsigned long pfn) | |
117 | { | |
118 | preempt_disable(); | |
119 | pagefault_disable(); | |
120 | return __kmap_local_pfn_prot(pfn, kmap_prot); | |
121 | } | |
122 | ||
123 | static inline void __kunmap_atomic(void *addr) | |
124 | { | |
125 | kunmap_local_indexed(addr); | |
126 | } | |
127 | ||
128 | #endif /* CONFIG_KMAP_LOCAL */ | |
129 | ||
130 | static inline void *kmap_atomic(struct page *page) | |
131 | { | |
132 | return kmap_atomic_prot(page, kmap_prot); | |
133 | } | |
78b6d91e | 134 | |
1da177e4 LT |
135 | /* declarations for linux/mm/highmem.c */ |
136 | unsigned int nr_free_highpages(void); | |
ca79b0c2 AK |
137 | extern atomic_long_t _totalhigh_pages; |
138 | static inline unsigned long totalhigh_pages(void) | |
139 | { | |
140 | return (unsigned long)atomic_long_read(&_totalhigh_pages); | |
141 | } | |
142 | ||
143 | static inline void totalhigh_pages_inc(void) | |
144 | { | |
145 | atomic_long_inc(&_totalhigh_pages); | |
146 | } | |
147 | ||
ca79b0c2 AK |
148 | static inline void totalhigh_pages_add(long count) |
149 | { | |
150 | atomic_long_add(count, &_totalhigh_pages); | |
151 | } | |
152 | ||
ce6234b5 JF |
153 | void kmap_flush_unused(void); |
154 | ||
5a178119 MG |
155 | struct page *kmap_to_page(void *addr); |
156 | ||
1da177e4 LT |
157 | #else /* CONFIG_HIGHMEM */ |
158 | ||
159 | static inline unsigned int nr_free_highpages(void) { return 0; } | |
160 | ||
5a178119 MG |
161 | static inline struct page *kmap_to_page(void *addr) |
162 | { | |
163 | return virt_to_page(addr); | |
164 | } | |
165 | ||
ca79b0c2 | 166 | static inline unsigned long totalhigh_pages(void) { return 0UL; } |
c1f60a5a | 167 | |
1da177e4 LT |
168 | static inline void *kmap(struct page *page) |
169 | { | |
170 | might_sleep(); | |
171 | return page_address(page); | |
172 | } | |
173 | ||
e23c4597 IW |
174 | static inline void kunmap_high(struct page *page) |
175 | { | |
176 | } | |
177 | ||
31c91132 MW |
178 | static inline void kunmap(struct page *page) |
179 | { | |
7438f363 IW |
180 | #ifdef ARCH_HAS_FLUSH_ON_KUNMAP |
181 | kunmap_flush_on_unmap(page_address(page)); | |
182 | #endif | |
31c91132 | 183 | } |
1da177e4 | 184 | |
a24401bc | 185 | static inline void *kmap_atomic(struct page *page) |
254f9c5c | 186 | { |
2cb7c9cb | 187 | preempt_disable(); |
254f9c5c GU |
188 | pagefault_disable(); |
189 | return page_address(page); | |
190 | } | |
191 | ||
298fa1ad TG |
192 | static inline void *kmap_atomic_prot(struct page *page, pgprot_t prot) |
193 | { | |
194 | return kmap_atomic(page); | |
195 | } | |
196 | ||
197 | static inline void *kmap_atomic_pfn(unsigned long pfn) | |
198 | { | |
199 | return kmap_atomic(pfn_to_page(pfn)); | |
200 | } | |
201 | ||
202 | static inline void __kunmap_atomic(void *addr) | |
4e60c86b | 203 | { |
abca2500 | 204 | /* |
7438f363 | 205 | * Mostly nothing to do in the CONFIG_HIGHMEM=n case as kunmap_atomic() |
298fa1ad | 206 | * handles re-enabling faults and preemption |
abca2500 | 207 | */ |
7438f363 IW |
208 | #ifdef ARCH_HAS_FLUSH_ON_KUNMAP |
209 | kunmap_flush_on_unmap(addr); | |
210 | #endif | |
4e60c86b AK |
211 | } |
212 | ||
ce6234b5 | 213 | #define kmap_flush_unused() do {} while(0) |
1da177e4 LT |
214 | |
215 | #endif /* CONFIG_HIGHMEM */ | |
216 | ||
298fa1ad | 217 | #if !defined(CONFIG_KMAP_LOCAL) |
157e118b | 218 | #if defined(CONFIG_HIGHMEM) |
a8e23a29 PZ |
219 | |
220 | DECLARE_PER_CPU(int, __kmap_atomic_idx); | |
221 | ||
222 | static inline int kmap_atomic_idx_push(void) | |
223 | { | |
cfb82434 CL |
224 | int idx = __this_cpu_inc_return(__kmap_atomic_idx) - 1; |
225 | ||
a8e23a29 PZ |
226 | #ifdef CONFIG_DEBUG_HIGHMEM |
227 | WARN_ON_ONCE(in_irq() && !irqs_disabled()); | |
1d352bfd | 228 | BUG_ON(idx >= KM_TYPE_NR); |
a8e23a29 PZ |
229 | #endif |
230 | return idx; | |
231 | } | |
232 | ||
20273941 PZ |
233 | static inline int kmap_atomic_idx(void) |
234 | { | |
cfb82434 | 235 | return __this_cpu_read(__kmap_atomic_idx) - 1; |
20273941 PZ |
236 | } |
237 | ||
cfb82434 | 238 | static inline void kmap_atomic_idx_pop(void) |
a8e23a29 | 239 | { |
a8e23a29 | 240 | #ifdef CONFIG_DEBUG_HIGHMEM |
cfb82434 CL |
241 | int idx = __this_cpu_dec_return(__kmap_atomic_idx); |
242 | ||
a8e23a29 | 243 | BUG_ON(idx < 0); |
cfb82434 CL |
244 | #else |
245 | __this_cpu_dec(__kmap_atomic_idx); | |
a8e23a29 | 246 | #endif |
a8e23a29 | 247 | } |
298fa1ad | 248 | #endif |
a8e23a29 PZ |
249 | #endif |
250 | ||
3e4d3af5 PZ |
251 | /* |
252 | * Prevent people trying to call kunmap_atomic() as if it were kunmap() | |
253 | * kunmap_atomic() should get the return value of kmap_atomic, not the page. | |
254 | */ | |
298fa1ad TG |
255 | #define kunmap_atomic(__addr) \ |
256 | do { \ | |
257 | BUILD_BUG_ON(__same_type((__addr), struct page *)); \ | |
258 | __kunmap_atomic(__addr); \ | |
259 | pagefault_enable(); \ | |
260 | preempt_enable(); \ | |
980c19e3 CW |
261 | } while (0) |
262 | ||
1da177e4 | 263 | /* when CONFIG_HIGHMEM is not set these will be plain clear/copy_page */ |
487ff320 | 264 | #ifndef clear_user_highpage |
1da177e4 LT |
265 | static inline void clear_user_highpage(struct page *page, unsigned long vaddr) |
266 | { | |
1ec9c5dd | 267 | void *addr = kmap_atomic(page); |
1da177e4 | 268 | clear_user_page(addr, vaddr, page); |
1ec9c5dd | 269 | kunmap_atomic(addr); |
1da177e4 | 270 | } |
487ff320 | 271 | #endif |
1da177e4 LT |
272 | |
273 | #ifndef __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE | |
769848c0 MG |
274 | /** |
275 | * __alloc_zeroed_user_highpage - Allocate a zeroed HIGHMEM page for a VMA with caller-specified movable GFP flags | |
276 | * @movableflags: The GFP flags related to the pages future ability to move like __GFP_MOVABLE | |
277 | * @vma: The VMA the page is to be allocated for | |
278 | * @vaddr: The virtual address the page will be inserted into | |
279 | * | |
280 | * This function will allocate a page for a VMA but the caller is expected | |
281 | * to specify via movableflags whether the page will be movable in the | |
282 | * future or not | |
283 | * | |
284 | * An architecture may override this function by defining | |
285 | * __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE and providing their own | |
286 | * implementation. | |
287 | */ | |
1da177e4 | 288 | static inline struct page * |
769848c0 MG |
289 | __alloc_zeroed_user_highpage(gfp_t movableflags, |
290 | struct vm_area_struct *vma, | |
291 | unsigned long vaddr) | |
1da177e4 | 292 | { |
769848c0 MG |
293 | struct page *page = alloc_page_vma(GFP_HIGHUSER | movableflags, |
294 | vma, vaddr); | |
1da177e4 LT |
295 | |
296 | if (page) | |
297 | clear_user_highpage(page, vaddr); | |
298 | ||
299 | return page; | |
300 | } | |
301 | #endif | |
302 | ||
769848c0 MG |
303 | /** |
304 | * alloc_zeroed_user_highpage_movable - Allocate a zeroed HIGHMEM page for a VMA that the caller knows can move | |
305 | * @vma: The VMA the page is to be allocated for | |
306 | * @vaddr: The virtual address the page will be inserted into | |
307 | * | |
308 | * This function will allocate a page for a VMA that the caller knows will | |
309 | * be able to migrate in the future using move_pages() or reclaimed | |
310 | */ | |
311 | static inline struct page * | |
312 | alloc_zeroed_user_highpage_movable(struct vm_area_struct *vma, | |
313 | unsigned long vaddr) | |
314 | { | |
315 | return __alloc_zeroed_user_highpage(__GFP_MOVABLE, vma, vaddr); | |
316 | } | |
317 | ||
1da177e4 LT |
318 | static inline void clear_highpage(struct page *page) |
319 | { | |
1ec9c5dd | 320 | void *kaddr = kmap_atomic(page); |
1da177e4 | 321 | clear_page(kaddr); |
1ec9c5dd | 322 | kunmap_atomic(kaddr); |
1da177e4 LT |
323 | } |
324 | ||
eebd2aa3 CL |
325 | static inline void zero_user_segments(struct page *page, |
326 | unsigned start1, unsigned end1, | |
327 | unsigned start2, unsigned end2) | |
328 | { | |
1ec9c5dd | 329 | void *kaddr = kmap_atomic(page); |
eebd2aa3 CL |
330 | |
331 | BUG_ON(end1 > PAGE_SIZE || end2 > PAGE_SIZE); | |
332 | ||
333 | if (end1 > start1) | |
334 | memset(kaddr + start1, 0, end1 - start1); | |
335 | ||
336 | if (end2 > start2) | |
337 | memset(kaddr + start2, 0, end2 - start2); | |
338 | ||
1ec9c5dd | 339 | kunmap_atomic(kaddr); |
eebd2aa3 CL |
340 | flush_dcache_page(page); |
341 | } | |
342 | ||
343 | static inline void zero_user_segment(struct page *page, | |
344 | unsigned start, unsigned end) | |
345 | { | |
346 | zero_user_segments(page, start, end, 0, 0); | |
347 | } | |
348 | ||
349 | static inline void zero_user(struct page *page, | |
350 | unsigned start, unsigned size) | |
351 | { | |
352 | zero_user_segments(page, start, start + size, 0, 0); | |
353 | } | |
01f2705d | 354 | |
77fff4ae AN |
355 | #ifndef __HAVE_ARCH_COPY_USER_HIGHPAGE |
356 | ||
9de455b2 AN |
357 | static inline void copy_user_highpage(struct page *to, struct page *from, |
358 | unsigned long vaddr, struct vm_area_struct *vma) | |
1da177e4 LT |
359 | { |
360 | char *vfrom, *vto; | |
361 | ||
1ec9c5dd CW |
362 | vfrom = kmap_atomic(from); |
363 | vto = kmap_atomic(to); | |
1da177e4 | 364 | copy_user_page(vto, vfrom, vaddr, to); |
1ec9c5dd CW |
365 | kunmap_atomic(vto); |
366 | kunmap_atomic(vfrom); | |
1da177e4 LT |
367 | } |
368 | ||
77fff4ae AN |
369 | #endif |
370 | ||
a4602b62 KA |
371 | #ifndef __HAVE_ARCH_COPY_HIGHPAGE |
372 | ||
1da177e4 LT |
373 | static inline void copy_highpage(struct page *to, struct page *from) |
374 | { | |
375 | char *vfrom, *vto; | |
376 | ||
1ec9c5dd CW |
377 | vfrom = kmap_atomic(from); |
378 | vto = kmap_atomic(to); | |
1da177e4 | 379 | copy_page(vto, vfrom); |
1ec9c5dd CW |
380 | kunmap_atomic(vto); |
381 | kunmap_atomic(vfrom); | |
1da177e4 LT |
382 | } |
383 | ||
a4602b62 KA |
384 | #endif |
385 | ||
1da177e4 | 386 | #endif /* _LINUX_HIGHMEM_H */ |