1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _LINUX_HIGHMEM_H
3 #define _LINUX_HIGHMEM_H
6 #include <linux/kernel.h>
9 #include <linux/uaccess.h>
10 #include <linux/hardirq.h>
12 #include <asm/cacheflush.h>
14 #ifndef ARCH_HAS_FLUSH_ANON_PAGE
15 static inline void flush_anon_page(struct vm_area_struct
*vma
, struct page
*page
, unsigned long vmaddr
)
20 #ifndef ARCH_HAS_FLUSH_KERNEL_DCACHE_PAGE
21 static inline void flush_kernel_dcache_page(struct page
*page
)
24 static inline void flush_kernel_vmap_range(void *vaddr
, int size
)
27 static inline void invalidate_kernel_vmap_range(void *vaddr
, int size
)
32 #include <asm/kmap_types.h>
35 #include <asm/highmem.h>
37 /* declarations for linux/mm/highmem.c */
38 unsigned int nr_free_highpages(void);
39 extern atomic_long_t _totalhigh_pages
;
40 static inline unsigned long totalhigh_pages(void)
42 return (unsigned long)atomic_long_read(&_totalhigh_pages
);
45 static inline void totalhigh_pages_inc(void)
47 atomic_long_inc(&_totalhigh_pages
);
50 static inline void totalhigh_pages_dec(void)
52 atomic_long_dec(&_totalhigh_pages
);
55 static inline void totalhigh_pages_add(long count
)
57 atomic_long_add(count
, &_totalhigh_pages
);
60 static inline void totalhigh_pages_set(long val
)
62 atomic_long_set(&_totalhigh_pages
, val
);
65 void kmap_flush_unused(void);
67 struct page
*kmap_to_page(void *addr
);
69 #else /* CONFIG_HIGHMEM */
71 static inline unsigned int nr_free_highpages(void) { return 0; }
73 static inline struct page
*kmap_to_page(void *addr
)
75 return virt_to_page(addr
);
78 static inline unsigned long totalhigh_pages(void) { return 0UL; }
81 static inline void *kmap(struct page
*page
)
84 return page_address(page
);
87 static inline void kunmap(struct page
*page
)
91 static inline void *kmap_atomic(struct page
*page
)
95 return page_address(page
);
97 #define kmap_atomic_prot(page, prot) kmap_atomic(page)
99 static inline void __kunmap_atomic(void *addr
)
105 #define kmap_atomic_pfn(pfn) kmap_atomic(pfn_to_page(pfn))
107 #define kmap_flush_unused() do {} while(0)
110 #endif /* CONFIG_HIGHMEM */
112 #if defined(CONFIG_HIGHMEM) || defined(CONFIG_X86_32)
114 DECLARE_PER_CPU(int, __kmap_atomic_idx
);
116 static inline int kmap_atomic_idx_push(void)
118 int idx
= __this_cpu_inc_return(__kmap_atomic_idx
) - 1;
120 #ifdef CONFIG_DEBUG_HIGHMEM
121 WARN_ON_ONCE(in_irq() && !irqs_disabled());
122 BUG_ON(idx
>= KM_TYPE_NR
);
127 static inline int kmap_atomic_idx(void)
129 return __this_cpu_read(__kmap_atomic_idx
) - 1;
132 static inline void kmap_atomic_idx_pop(void)
134 #ifdef CONFIG_DEBUG_HIGHMEM
135 int idx
= __this_cpu_dec_return(__kmap_atomic_idx
);
139 __this_cpu_dec(__kmap_atomic_idx
);
146 * Prevent people trying to call kunmap_atomic() as if it were kunmap()
147 * kunmap_atomic() should get the return value of kmap_atomic, not the page.
149 #define kunmap_atomic(addr) \
151 BUILD_BUG_ON(__same_type((addr), struct page *)); \
152 __kunmap_atomic(addr); \
156 /* when CONFIG_HIGHMEM is not set these will be plain clear/copy_page */
157 #ifndef clear_user_highpage
158 static inline void clear_user_highpage(struct page
*page
, unsigned long vaddr
)
160 void *addr
= kmap_atomic(page
);
161 clear_user_page(addr
, vaddr
, page
);
166 #ifndef __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE
168 * __alloc_zeroed_user_highpage - Allocate a zeroed HIGHMEM page for a VMA with caller-specified movable GFP flags
169 * @movableflags: The GFP flags related to the pages future ability to move like __GFP_MOVABLE
170 * @vma: The VMA the page is to be allocated for
171 * @vaddr: The virtual address the page will be inserted into
173 * This function will allocate a page for a VMA but the caller is expected
174 * to specify via movableflags whether the page will be movable in the
177 * An architecture may override this function by defining
178 * __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE and providing their own
181 static inline struct page
*
182 __alloc_zeroed_user_highpage(gfp_t movableflags
,
183 struct vm_area_struct
*vma
,
186 struct page
*page
= alloc_page_vma(GFP_HIGHUSER
| movableflags
,
190 clear_user_highpage(page
, vaddr
);
197 * alloc_zeroed_user_highpage_movable - Allocate a zeroed HIGHMEM page for a VMA that the caller knows can move
198 * @vma: The VMA the page is to be allocated for
199 * @vaddr: The virtual address the page will be inserted into
201 * This function will allocate a page for a VMA that the caller knows will
202 * be able to migrate in the future using move_pages() or reclaimed
204 static inline struct page
*
205 alloc_zeroed_user_highpage_movable(struct vm_area_struct
*vma
,
208 return __alloc_zeroed_user_highpage(__GFP_MOVABLE
, vma
, vaddr
);
211 static inline void clear_highpage(struct page
*page
)
213 void *kaddr
= kmap_atomic(page
);
215 kunmap_atomic(kaddr
);
218 static inline void zero_user_segments(struct page
*page
,
219 unsigned start1
, unsigned end1
,
220 unsigned start2
, unsigned end2
)
222 void *kaddr
= kmap_atomic(page
);
224 BUG_ON(end1
> PAGE_SIZE
|| end2
> PAGE_SIZE
);
227 memset(kaddr
+ start1
, 0, end1
- start1
);
230 memset(kaddr
+ start2
, 0, end2
- start2
);
232 kunmap_atomic(kaddr
);
233 flush_dcache_page(page
);
236 static inline void zero_user_segment(struct page
*page
,
237 unsigned start
, unsigned end
)
239 zero_user_segments(page
, start
, end
, 0, 0);
242 static inline void zero_user(struct page
*page
,
243 unsigned start
, unsigned size
)
245 zero_user_segments(page
, start
, start
+ size
, 0, 0);
248 #ifndef __HAVE_ARCH_COPY_USER_HIGHPAGE
250 static inline void copy_user_highpage(struct page
*to
, struct page
*from
,
251 unsigned long vaddr
, struct vm_area_struct
*vma
)
255 vfrom
= kmap_atomic(from
);
256 vto
= kmap_atomic(to
);
257 copy_user_page(vto
, vfrom
, vaddr
, to
);
259 kunmap_atomic(vfrom
);
264 #ifndef __HAVE_ARCH_COPY_HIGHPAGE
266 static inline void copy_highpage(struct page
*to
, struct page
*from
)
270 vfrom
= kmap_atomic(from
);
271 vto
= kmap_atomic(to
);
272 copy_page(vto
, vfrom
);
274 kunmap_atomic(vfrom
);
279 #endif /* _LINUX_HIGHMEM_H */