]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blame - include/linux/highmem.h
Merge tag 'fs.move_mount.move_mount_set_group.v5.15' of git://git.kernel.org/pub...
[mirror_ubuntu-jammy-kernel.git] / include / linux / highmem.h
CommitLineData
b2441318 1/* SPDX-License-Identifier: GPL-2.0 */
1da177e4
LT
2#ifndef _LINUX_HIGHMEM_H
3#define _LINUX_HIGHMEM_H
4
1da177e4 5#include <linux/fs.h>
597781f3 6#include <linux/kernel.h>
187f1882 7#include <linux/bug.h>
1da177e4 8#include <linux/mm.h>
ad76fb6b 9#include <linux/uaccess.h>
43b3a0c7 10#include <linux/hardirq.h>
1da177e4
LT
11
12#include <asm/cacheflush.h>
13
13f876ba 14#include "highmem-internal.h"
03beb076 15
13f876ba
TG
16/**
17 * kmap - Map a page for long term usage
18 * @page: Pointer to the page to be mapped
19 *
20 * Returns: The virtual address of the mapping
21 *
22 * Can only be invoked from preemptible task context because on 32bit
23 * systems with CONFIG_HIGHMEM enabled this function might sleep.
24 *
25 * For systems with CONFIG_HIGHMEM=n and for pages in the low memory area
26 * this returns the virtual address of the direct kernel mapping.
27 *
28 * The returned virtual address is globally visible and valid up to the
29 * point where it is unmapped via kunmap(). The pointer can be handed to
30 * other contexts.
31 *
32 * For highmem pages on 32bit systems this can be slow as the mapping space
33 * is limited and protected by a global lock. In case that there is no
34 * mapping slot available the function blocks until a slot is released via
35 * kunmap().
298fa1ad 36 */
13f876ba 37static inline void *kmap(struct page *page);
525aaf9b 38
13f876ba
TG
39/**
40 * kunmap - Unmap the virtual address mapped by kmap()
41 * @addr: Virtual address to be unmapped
42 *
43 * Counterpart to kmap(). A NOOP for CONFIG_HIGHMEM=n and for mappings of
44 * pages in the low memory area.
78b6d91e 45 */
13f876ba 46static inline void kunmap(struct page *page);
298fa1ad 47
13f876ba
TG
48/**
49 * kmap_to_page - Get the page for a kmap'ed address
50 * @addr: The address to look up
51 *
52 * Returns: The page which is mapped to @addr.
53 */
54static inline struct page *kmap_to_page(void *addr);
1da177e4 55
13f876ba
TG
56/**
57 * kmap_flush_unused - Flush all unused kmap mappings in order to
58 * remove stray mappings
59 */
60static inline void kmap_flush_unused(void);
1da177e4 61
13f876ba 62/**
f3ba3c71 63 * kmap_local_page - Map a page for temporary usage
13f876ba
TG
64 * @page: Pointer to the page to be mapped
65 *
66 * Returns: The virtual address of the mapping
67 *
13f876ba
TG
68 * Can be invoked from any context.
69 *
70 * Requires careful handling when nesting multiple mappings because the map
71 * management is stack based. The unmap has to be in the reverse order of
72 * the map operation:
73 *
f3ba3c71
TG
74 * addr1 = kmap_local_page(page1);
75 * addr2 = kmap_local_page(page2);
13f876ba 76 * ...
f3ba3c71
TG
77 * kunmap_local(addr2);
78 * kunmap_local(addr1);
13f876ba
TG
79 *
80 * Unmapping addr1 before addr2 is invalid and causes malfunction.
81 *
82 * Contrary to kmap() mappings the mapping is only valid in the context of
83 * the caller and cannot be handed to other contexts.
84 *
85 * On CONFIG_HIGHMEM=n kernels and for low memory pages this returns the
86 * virtual address of the direct mapping. Only real highmem pages are
87 * temporarily mapped.
88 *
f3ba3c71
TG
89 * While it is significantly faster than kmap() for the higmem case it
90 * comes with restrictions about the pointer validity. Only use when really
91 * necessary.
92 *
93 * On HIGHMEM enabled systems mapping a highmem page has the side effect of
94 * disabling migration in order to keep the virtual address stable across
95 * preemption. No caller of kmap_local_page() can rely on this side effect.
96 */
97static inline void *kmap_local_page(struct page *page);
98
99/**
100 * kmap_atomic - Atomically map a page for temporary usage - Deprecated!
101 * @page: Pointer to the page to be mapped
102 *
103 * Returns: The virtual address of the mapping
104 *
105 * Effectively a wrapper around kmap_local_page() which disables pagefaults
106 * and preemption.
107 *
108 * Do not use in new code. Use kmap_local_page() instead.
13f876ba
TG
109 */
110static inline void *kmap_atomic(struct page *page);
5a178119 111
13f876ba
TG
112/**
113 * kunmap_atomic - Unmap the virtual address mapped by kmap_atomic()
114 * @addr: Virtual address to be unmapped
115 *
116 * Counterpart to kmap_atomic().
117 *
f3ba3c71
TG
118 * Effectively a wrapper around kunmap_local() which additionally undoes
119 * the side effects of kmap_atomic(), i.e. reenabling pagefaults and
13f876ba 120 * preemption.
13f876ba 121 */
c1f60a5a 122
13f876ba
TG
123/* Highmem related interfaces for management code */
124static inline unsigned int nr_free_highpages(void);
125static inline unsigned long totalhigh_pages(void);
1da177e4 126
13f876ba
TG
127#ifndef ARCH_HAS_FLUSH_ANON_PAGE
128static inline void flush_anon_page(struct vm_area_struct *vma, struct page *page, unsigned long vmaddr)
e23c4597
IW
129{
130}
7438f363 131#endif
1da177e4 132
13f876ba
TG
133#ifndef ARCH_HAS_FLUSH_KERNEL_DCACHE_PAGE
134static inline void flush_kernel_dcache_page(struct page *page)
254f9c5c 135{
254f9c5c 136}
13f876ba 137static inline void flush_kernel_vmap_range(void *vaddr, int size)
298fa1ad 138{
298fa1ad 139}
13f876ba 140static inline void invalidate_kernel_vmap_range(void *vaddr, int size)
298fa1ad 141{
298fa1ad 142}
7438f363 143#endif
980c19e3 144
1da177e4 145/* when CONFIG_HIGHMEM is not set these will be plain clear/copy_page */
487ff320 146#ifndef clear_user_highpage
1da177e4
LT
147static inline void clear_user_highpage(struct page *page, unsigned long vaddr)
148{
1ec9c5dd 149 void *addr = kmap_atomic(page);
1da177e4 150 clear_user_page(addr, vaddr, page);
1ec9c5dd 151 kunmap_atomic(addr);
1da177e4 152}
487ff320 153#endif
1da177e4 154
92638b4e 155#ifndef __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE_MOVABLE
769848c0 156/**
92638b4e 157 * alloc_zeroed_user_highpage_movable - Allocate a zeroed HIGHMEM page for a VMA that the caller knows can move
769848c0
MG
158 * @vma: The VMA the page is to be allocated for
159 * @vaddr: The virtual address the page will be inserted into
160 *
92638b4e
PC
161 * This function will allocate a page for a VMA that the caller knows will
162 * be able to migrate in the future using move_pages() or reclaimed
769848c0
MG
163 *
164 * An architecture may override this function by defining
92638b4e 165 * __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE_MOVABLE and providing their own
769848c0
MG
166 * implementation.
167 */
1da177e4 168static inline struct page *
92638b4e
PC
169alloc_zeroed_user_highpage_movable(struct vm_area_struct *vma,
170 unsigned long vaddr)
1da177e4 171{
92638b4e 172 struct page *page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, vaddr);
1da177e4
LT
173
174 if (page)
175 clear_user_highpage(page, vaddr);
176
177 return page;
178}
179#endif
180
181static inline void clear_highpage(struct page *page)
182{
1ec9c5dd 183 void *kaddr = kmap_atomic(page);
1da177e4 184 clear_page(kaddr);
1ec9c5dd 185 kunmap_atomic(kaddr);
1da177e4
LT
186}
187
013bb59d
PC
188#ifndef __HAVE_ARCH_TAG_CLEAR_HIGHPAGE
189
190static inline void tag_clear_highpage(struct page *page)
191{
192}
193
194#endif
195
0060ef3b
MWO
196/*
197 * If we pass in a base or tail page, we can zero up to PAGE_SIZE.
198 * If we pass in a head page, we can zero up to the size of the compound page.
199 */
200#if defined(CONFIG_HIGHMEM) && defined(CONFIG_TRANSPARENT_HUGEPAGE)
201void zero_user_segments(struct page *page, unsigned start1, unsigned end1,
202 unsigned start2, unsigned end2);
203#else /* !HIGHMEM || !TRANSPARENT_HUGEPAGE */
eebd2aa3 204static inline void zero_user_segments(struct page *page,
0060ef3b
MWO
205 unsigned start1, unsigned end1,
206 unsigned start2, unsigned end2)
eebd2aa3 207{
1ec9c5dd 208 void *kaddr = kmap_atomic(page);
0060ef3b 209 unsigned int i;
eebd2aa3 210
0060ef3b 211 BUG_ON(end1 > page_size(page) || end2 > page_size(page));
eebd2aa3
CL
212
213 if (end1 > start1)
214 memset(kaddr + start1, 0, end1 - start1);
215
216 if (end2 > start2)
217 memset(kaddr + start2, 0, end2 - start2);
218
1ec9c5dd 219 kunmap_atomic(kaddr);
0060ef3b
MWO
220 for (i = 0; i < compound_nr(page); i++)
221 flush_dcache_page(page + i);
eebd2aa3 222}
0060ef3b 223#endif /* !HIGHMEM || !TRANSPARENT_HUGEPAGE */
eebd2aa3
CL
224
225static inline void zero_user_segment(struct page *page,
226 unsigned start, unsigned end)
227{
228 zero_user_segments(page, start, end, 0, 0);
229}
230
231static inline void zero_user(struct page *page,
232 unsigned start, unsigned size)
233{
234 zero_user_segments(page, start, start + size, 0, 0);
235}
01f2705d 236
77fff4ae
AN
237#ifndef __HAVE_ARCH_COPY_USER_HIGHPAGE
238
9de455b2
AN
239static inline void copy_user_highpage(struct page *to, struct page *from,
240 unsigned long vaddr, struct vm_area_struct *vma)
1da177e4
LT
241{
242 char *vfrom, *vto;
243
1ec9c5dd
CW
244 vfrom = kmap_atomic(from);
245 vto = kmap_atomic(to);
1da177e4 246 copy_user_page(vto, vfrom, vaddr, to);
1ec9c5dd
CW
247 kunmap_atomic(vto);
248 kunmap_atomic(vfrom);
1da177e4
LT
249}
250
77fff4ae
AN
251#endif
252
a4602b62
KA
253#ifndef __HAVE_ARCH_COPY_HIGHPAGE
254
1da177e4
LT
255static inline void copy_highpage(struct page *to, struct page *from)
256{
257 char *vfrom, *vto;
258
1ec9c5dd
CW
259 vfrom = kmap_atomic(from);
260 vto = kmap_atomic(to);
1da177e4 261 copy_page(vto, vfrom);
1ec9c5dd
CW
262 kunmap_atomic(vto);
263 kunmap_atomic(vfrom);
1da177e4
LT
264}
265
a4602b62
KA
266#endif
267
6a0996db
IW
268static inline void memcpy_page(struct page *dst_page, size_t dst_off,
269 struct page *src_page, size_t src_off,
270 size_t len)
271{
272 char *dst = kmap_local_page(dst_page);
273 char *src = kmap_local_page(src_page);
274
ca18f6ea 275 VM_BUG_ON(dst_off + len > PAGE_SIZE || src_off + len > PAGE_SIZE);
6a0996db
IW
276 memcpy(dst + dst_off, src + src_off, len);
277 kunmap_local(src);
278 kunmap_local(dst);
279}
280
281static inline void memmove_page(struct page *dst_page, size_t dst_off,
282 struct page *src_page, size_t src_off,
283 size_t len)
284{
285 char *dst = kmap_local_page(dst_page);
286 char *src = kmap_local_page(src_page);
287
ca18f6ea 288 VM_BUG_ON(dst_off + len > PAGE_SIZE || src_off + len > PAGE_SIZE);
6a0996db
IW
289 memmove(dst + dst_off, src + src_off, len);
290 kunmap_local(src);
291 kunmap_local(dst);
292}
293
294static inline void memset_page(struct page *page, size_t offset, int val,
295 size_t len)
296{
297 char *addr = kmap_local_page(page);
298
ca18f6ea 299 VM_BUG_ON(offset + len > PAGE_SIZE);
6a0996db
IW
300 memset(addr + offset, val, len);
301 kunmap_local(addr);
302}
303
bb90d4bc
IW
304static inline void memcpy_from_page(char *to, struct page *page,
305 size_t offset, size_t len)
306{
61b205f5 307 char *from = kmap_local_page(page);
bb90d4bc 308
ca18f6ea 309 VM_BUG_ON(offset + len > PAGE_SIZE);
bb90d4bc 310 memcpy(to, from + offset, len);
61b205f5 311 kunmap_local(from);
bb90d4bc
IW
312}
313
314static inline void memcpy_to_page(struct page *page, size_t offset,
315 const char *from, size_t len)
316{
61b205f5 317 char *to = kmap_local_page(page);
bb90d4bc 318
ca18f6ea 319 VM_BUG_ON(offset + len > PAGE_SIZE);
bb90d4bc 320 memcpy(to + offset, from, len);
8dad53a1 321 flush_dcache_page(page);
61b205f5 322 kunmap_local(to);
bb90d4bc
IW
323}
324
28961998
IW
325static inline void memzero_page(struct page *page, size_t offset, size_t len)
326{
d9a42b53 327 char *addr = kmap_local_page(page);
28961998 328 memset(addr + offset, 0, len);
8dad53a1 329 flush_dcache_page(page);
d9a42b53 330 kunmap_local(addr);
28961998
IW
331}
332
1da177e4 333#endif /* _LINUX_HIGHMEM_H */