]>
git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blob - include/linux/highmem.h
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _LINUX_HIGHMEM_H
3 #define _LINUX_HIGHMEM_H
6 #include <linux/kernel.h>
9 #include <linux/uaccess.h>
10 #include <linux/hardirq.h>
12 #include <asm/cacheflush.h>
14 #include "highmem-internal.h"
17 * kmap - Map a page for long term usage
18 * @page: Pointer to the page to be mapped
20 * Returns: The virtual address of the mapping
22 * Can only be invoked from preemptible task context because on 32bit
23 * systems with CONFIG_HIGHMEM enabled this function might sleep.
25 * For systems with CONFIG_HIGHMEM=n and for pages in the low memory area
26 * this returns the virtual address of the direct kernel mapping.
28 * The returned virtual address is globally visible and valid up to the
29 * point where it is unmapped via kunmap(). The pointer can be handed to
32 * For highmem pages on 32bit systems this can be slow as the mapping space
33 * is limited and protected by a global lock. In case that there is no
34 * mapping slot available the function blocks until a slot is released via
37 static inline void *kmap(struct page
*page
);
40 * kunmap - Unmap the virtual address mapped by kmap()
41 * @addr: Virtual address to be unmapped
43 * Counterpart to kmap(). A NOOP for CONFIG_HIGHMEM=n and for mappings of
44 * pages in the low memory area.
46 static inline void kunmap(struct page
*page
);
49 * kmap_to_page - Get the page for a kmap'ed address
50 * @addr: The address to look up
52 * Returns: The page which is mapped to @addr.
54 static inline struct page
*kmap_to_page(void *addr
);
57 * kmap_flush_unused - Flush all unused kmap mappings in order to
58 * remove stray mappings
60 static inline void kmap_flush_unused(void);
63 * kmap_local_page - Map a page for temporary usage
64 * @page: Pointer to the page to be mapped
66 * Returns: The virtual address of the mapping
68 * Can be invoked from any context.
70 * Requires careful handling when nesting multiple mappings because the map
71 * management is stack based. The unmap has to be in the reverse order of
74 * addr1 = kmap_local_page(page1);
75 * addr2 = kmap_local_page(page2);
77 * kunmap_local(addr2);
78 * kunmap_local(addr1);
80 * Unmapping addr1 before addr2 is invalid and causes malfunction.
82 * Contrary to kmap() mappings the mapping is only valid in the context of
83 * the caller and cannot be handed to other contexts.
85 * On CONFIG_HIGHMEM=n kernels and for low memory pages this returns the
86 * virtual address of the direct mapping. Only real highmem pages are
89 * While it is significantly faster than kmap() for the higmem case it
90 * comes with restrictions about the pointer validity. Only use when really
93 * On HIGHMEM enabled systems mapping a highmem page has the side effect of
94 * disabling migration in order to keep the virtual address stable across
95 * preemption. No caller of kmap_local_page() can rely on this side effect.
97 static inline void *kmap_local_page(struct page
*page
);
100 * kmap_atomic - Atomically map a page for temporary usage - Deprecated!
101 * @page: Pointer to the page to be mapped
103 * Returns: The virtual address of the mapping
105 * Effectively a wrapper around kmap_local_page() which disables pagefaults
108 * Do not use in new code. Use kmap_local_page() instead.
110 static inline void *kmap_atomic(struct page
*page
);
113 * kunmap_atomic - Unmap the virtual address mapped by kmap_atomic()
114 * @addr: Virtual address to be unmapped
116 * Counterpart to kmap_atomic().
118 * Effectively a wrapper around kunmap_local() which additionally undoes
119 * the side effects of kmap_atomic(), i.e. reenabling pagefaults and
123 /* Highmem related interfaces for management code */
124 static inline unsigned int nr_free_highpages(void);
125 static inline unsigned long totalhigh_pages(void);
127 #ifndef ARCH_HAS_FLUSH_ANON_PAGE
128 static inline void flush_anon_page(struct vm_area_struct
*vma
, struct page
*page
, unsigned long vmaddr
)
133 #ifndef ARCH_HAS_FLUSH_KERNEL_DCACHE_PAGE
134 static inline void flush_kernel_dcache_page(struct page
*page
)
137 static inline void flush_kernel_vmap_range(void *vaddr
, int size
)
140 static inline void invalidate_kernel_vmap_range(void *vaddr
, int size
)
145 /* when CONFIG_HIGHMEM is not set these will be plain clear/copy_page */
146 #ifndef clear_user_highpage
147 static inline void clear_user_highpage(struct page
*page
, unsigned long vaddr
)
149 void *addr
= kmap_atomic(page
);
150 clear_user_page(addr
, vaddr
, page
);
155 #ifndef __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE
157 * __alloc_zeroed_user_highpage - Allocate a zeroed HIGHMEM page for a VMA with caller-specified movable GFP flags
158 * @movableflags: The GFP flags related to the pages future ability to move like __GFP_MOVABLE
159 * @vma: The VMA the page is to be allocated for
160 * @vaddr: The virtual address the page will be inserted into
162 * This function will allocate a page for a VMA but the caller is expected
163 * to specify via movableflags whether the page will be movable in the
166 * An architecture may override this function by defining
167 * __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE and providing their own
170 static inline struct page
*
171 __alloc_zeroed_user_highpage(gfp_t movableflags
,
172 struct vm_area_struct
*vma
,
175 struct page
*page
= alloc_page_vma(GFP_HIGHUSER
| movableflags
,
179 clear_user_highpage(page
, vaddr
);
186 * alloc_zeroed_user_highpage_movable - Allocate a zeroed HIGHMEM page for a VMA that the caller knows can move
187 * @vma: The VMA the page is to be allocated for
188 * @vaddr: The virtual address the page will be inserted into
190 * This function will allocate a page for a VMA that the caller knows will
191 * be able to migrate in the future using move_pages() or reclaimed
193 static inline struct page
*
194 alloc_zeroed_user_highpage_movable(struct vm_area_struct
*vma
,
197 return __alloc_zeroed_user_highpage(__GFP_MOVABLE
, vma
, vaddr
);
200 static inline void clear_highpage(struct page
*page
)
202 void *kaddr
= kmap_atomic(page
);
204 kunmap_atomic(kaddr
);
208 * If we pass in a base or tail page, we can zero up to PAGE_SIZE.
209 * If we pass in a head page, we can zero up to the size of the compound page.
211 #if defined(CONFIG_HIGHMEM) && defined(CONFIG_TRANSPARENT_HUGEPAGE)
212 void zero_user_segments(struct page
*page
, unsigned start1
, unsigned end1
,
213 unsigned start2
, unsigned end2
);
214 #else /* !HIGHMEM || !TRANSPARENT_HUGEPAGE */
215 static inline void zero_user_segments(struct page
*page
,
216 unsigned start1
, unsigned end1
,
217 unsigned start2
, unsigned end2
)
219 void *kaddr
= kmap_atomic(page
);
222 BUG_ON(end1
> page_size(page
) || end2
> page_size(page
));
225 memset(kaddr
+ start1
, 0, end1
- start1
);
228 memset(kaddr
+ start2
, 0, end2
- start2
);
230 kunmap_atomic(kaddr
);
231 for (i
= 0; i
< compound_nr(page
); i
++)
232 flush_dcache_page(page
+ i
);
234 #endif /* !HIGHMEM || !TRANSPARENT_HUGEPAGE */
236 static inline void zero_user_segment(struct page
*page
,
237 unsigned start
, unsigned end
)
239 zero_user_segments(page
, start
, end
, 0, 0);
242 static inline void zero_user(struct page
*page
,
243 unsigned start
, unsigned size
)
245 zero_user_segments(page
, start
, start
+ size
, 0, 0);
248 #ifndef __HAVE_ARCH_COPY_USER_HIGHPAGE
250 static inline void copy_user_highpage(struct page
*to
, struct page
*from
,
251 unsigned long vaddr
, struct vm_area_struct
*vma
)
255 vfrom
= kmap_atomic(from
);
256 vto
= kmap_atomic(to
);
257 copy_user_page(vto
, vfrom
, vaddr
, to
);
259 kunmap_atomic(vfrom
);
264 #ifndef __HAVE_ARCH_COPY_HIGHPAGE
266 static inline void copy_highpage(struct page
*to
, struct page
*from
)
270 vfrom
= kmap_atomic(from
);
271 vto
= kmap_atomic(to
);
272 copy_page(vto
, vfrom
);
274 kunmap_atomic(vfrom
);
279 #endif /* _LINUX_HIGHMEM_H */