]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | #ifndef _LINUX_HIGHMEM_H |
2 | #define _LINUX_HIGHMEM_H | |
3 | ||
1da177e4 | 4 | #include <linux/fs.h> |
597781f3 | 5 | #include <linux/kernel.h> |
187f1882 | 6 | #include <linux/bug.h> |
1da177e4 | 7 | #include <linux/mm.h> |
ad76fb6b | 8 | #include <linux/uaccess.h> |
43b3a0c7 | 9 | #include <linux/hardirq.h> |
1da177e4 LT |
10 | |
11 | #include <asm/cacheflush.h> | |
12 | ||
03beb076 | 13 | #ifndef ARCH_HAS_FLUSH_ANON_PAGE |
a6f36be3 | 14 | static inline void flush_anon_page(struct vm_area_struct *vma, struct page *page, unsigned long vmaddr) |
03beb076 JB |
15 | { |
16 | } | |
17 | #endif | |
18 | ||
5a3a5a98 JB |
19 | #ifndef ARCH_HAS_FLUSH_KERNEL_DCACHE_PAGE |
20 | static inline void flush_kernel_dcache_page(struct page *page) | |
21 | { | |
22 | } | |
9df5f741 JB |
23 | static inline void flush_kernel_vmap_range(void *vaddr, int size) |
24 | { | |
25 | } | |
26 | static inline void invalidate_kernel_vmap_range(void *vaddr, int size) | |
27 | { | |
28 | } | |
5a3a5a98 JB |
29 | #endif |
30 | ||
3688e07f KG |
31 | #include <asm/kmap_types.h> |
32 | ||
3688e07f | 33 | #ifdef CONFIG_HIGHMEM |
1da177e4 LT |
34 | #include <asm/highmem.h> |
35 | ||
36 | /* declarations for linux/mm/highmem.c */ | |
37 | unsigned int nr_free_highpages(void); | |
c1f60a5a | 38 | extern unsigned long totalhigh_pages; |
1da177e4 | 39 | |
ce6234b5 JF |
40 | void kmap_flush_unused(void); |
41 | ||
5a178119 MG |
42 | struct page *kmap_to_page(void *addr); |
43 | ||
1da177e4 LT |
44 | #else /* CONFIG_HIGHMEM */ |
45 | ||
46 | static inline unsigned int nr_free_highpages(void) { return 0; } | |
47 | ||
5a178119 MG |
48 | static inline struct page *kmap_to_page(void *addr) |
49 | { | |
50 | return virt_to_page(addr); | |
51 | } | |
52 | ||
4b529401 | 53 | #define totalhigh_pages 0UL |
c1f60a5a | 54 | |
a6ca1b99 | 55 | #ifndef ARCH_HAS_KMAP |
1da177e4 LT |
56 | static inline void *kmap(struct page *page) |
57 | { | |
58 | might_sleep(); | |
59 | return page_address(page); | |
60 | } | |
61 | ||
31c91132 MW |
62 | static inline void kunmap(struct page *page) |
63 | { | |
64 | } | |
1da177e4 | 65 | |
a24401bc | 66 | static inline void *kmap_atomic(struct page *page) |
254f9c5c GU |
67 | { |
68 | pagefault_disable(); | |
69 | return page_address(page); | |
70 | } | |
a24401bc | 71 | #define kmap_atomic_prot(page, prot) kmap_atomic(page) |
254f9c5c | 72 | |
3e4d3af5 | 73 | static inline void __kunmap_atomic(void *addr) |
4e60c86b AK |
74 | { |
75 | pagefault_enable(); | |
76 | } | |
77 | ||
3e4d3af5 | 78 | #define kmap_atomic_pfn(pfn) kmap_atomic(pfn_to_page(pfn)) |
1da177e4 | 79 | #define kmap_atomic_to_page(ptr) virt_to_page(ptr) |
ce6234b5 JF |
80 | |
81 | #define kmap_flush_unused() do {} while(0) | |
a6ca1b99 | 82 | #endif |
1da177e4 LT |
83 | |
84 | #endif /* CONFIG_HIGHMEM */ | |
85 | ||
a8e23a29 PZ |
86 | #if defined(CONFIG_HIGHMEM) || defined(CONFIG_X86_32) |
87 | ||
88 | DECLARE_PER_CPU(int, __kmap_atomic_idx); | |
89 | ||
90 | static inline int kmap_atomic_idx_push(void) | |
91 | { | |
cfb82434 CL |
92 | int idx = __this_cpu_inc_return(__kmap_atomic_idx) - 1; |
93 | ||
a8e23a29 PZ |
94 | #ifdef CONFIG_DEBUG_HIGHMEM |
95 | WARN_ON_ONCE(in_irq() && !irqs_disabled()); | |
96 | BUG_ON(idx > KM_TYPE_NR); | |
97 | #endif | |
98 | return idx; | |
99 | } | |
100 | ||
20273941 PZ |
101 | static inline int kmap_atomic_idx(void) |
102 | { | |
cfb82434 | 103 | return __this_cpu_read(__kmap_atomic_idx) - 1; |
20273941 PZ |
104 | } |
105 | ||
cfb82434 | 106 | static inline void kmap_atomic_idx_pop(void) |
a8e23a29 | 107 | { |
a8e23a29 | 108 | #ifdef CONFIG_DEBUG_HIGHMEM |
cfb82434 CL |
109 | int idx = __this_cpu_dec_return(__kmap_atomic_idx); |
110 | ||
a8e23a29 | 111 | BUG_ON(idx < 0); |
cfb82434 CL |
112 | #else |
113 | __this_cpu_dec(__kmap_atomic_idx); | |
a8e23a29 | 114 | #endif |
a8e23a29 PZ |
115 | } |
116 | ||
117 | #endif | |
118 | ||
3e4d3af5 PZ |
119 | /* |
120 | * Prevent people trying to call kunmap_atomic() as if it were kunmap() | |
121 | * kunmap_atomic() should get the return value of kmap_atomic, not the page. | |
122 | */ | |
1285e4c8 | 123 | #define kunmap_atomic(addr) \ |
980c19e3 CW |
124 | do { \ |
125 | BUILD_BUG_ON(__same_type((addr), struct page *)); \ | |
126 | __kunmap_atomic(addr); \ | |
127 | } while (0) | |
128 | ||
980c19e3 | 129 | |
1da177e4 | 130 | /* when CONFIG_HIGHMEM is not set these will be plain clear/copy_page */ |
487ff320 | 131 | #ifndef clear_user_highpage |
1da177e4 LT |
132 | static inline void clear_user_highpage(struct page *page, unsigned long vaddr) |
133 | { | |
1ec9c5dd | 134 | void *addr = kmap_atomic(page); |
1da177e4 | 135 | clear_user_page(addr, vaddr, page); |
1ec9c5dd | 136 | kunmap_atomic(addr); |
1da177e4 | 137 | } |
487ff320 | 138 | #endif |
1da177e4 LT |
139 | |
140 | #ifndef __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE | |
769848c0 MG |
141 | /** |
142 | * __alloc_zeroed_user_highpage - Allocate a zeroed HIGHMEM page for a VMA with caller-specified movable GFP flags | |
143 | * @movableflags: The GFP flags related to the pages future ability to move like __GFP_MOVABLE | |
144 | * @vma: The VMA the page is to be allocated for | |
145 | * @vaddr: The virtual address the page will be inserted into | |
146 | * | |
147 | * This function will allocate a page for a VMA but the caller is expected | |
148 | * to specify via movableflags whether the page will be movable in the | |
149 | * future or not | |
150 | * | |
151 | * An architecture may override this function by defining | |
152 | * __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE and providing their own | |
153 | * implementation. | |
154 | */ | |
1da177e4 | 155 | static inline struct page * |
769848c0 MG |
156 | __alloc_zeroed_user_highpage(gfp_t movableflags, |
157 | struct vm_area_struct *vma, | |
158 | unsigned long vaddr) | |
1da177e4 | 159 | { |
769848c0 MG |
160 | struct page *page = alloc_page_vma(GFP_HIGHUSER | movableflags, |
161 | vma, vaddr); | |
1da177e4 LT |
162 | |
163 | if (page) | |
164 | clear_user_highpage(page, vaddr); | |
165 | ||
166 | return page; | |
167 | } | |
168 | #endif | |
169 | ||
769848c0 MG |
170 | /** |
171 | * alloc_zeroed_user_highpage_movable - Allocate a zeroed HIGHMEM page for a VMA that the caller knows can move | |
172 | * @vma: The VMA the page is to be allocated for | |
173 | * @vaddr: The virtual address the page will be inserted into | |
174 | * | |
175 | * This function will allocate a page for a VMA that the caller knows will | |
176 | * be able to migrate in the future using move_pages() or reclaimed | |
177 | */ | |
178 | static inline struct page * | |
179 | alloc_zeroed_user_highpage_movable(struct vm_area_struct *vma, | |
180 | unsigned long vaddr) | |
181 | { | |
182 | return __alloc_zeroed_user_highpage(__GFP_MOVABLE, vma, vaddr); | |
183 | } | |
184 | ||
1da177e4 LT |
185 | static inline void clear_highpage(struct page *page) |
186 | { | |
1ec9c5dd | 187 | void *kaddr = kmap_atomic(page); |
1da177e4 | 188 | clear_page(kaddr); |
1ec9c5dd | 189 | kunmap_atomic(kaddr); |
1da177e4 LT |
190 | } |
191 | ||
eebd2aa3 CL |
192 | static inline void zero_user_segments(struct page *page, |
193 | unsigned start1, unsigned end1, | |
194 | unsigned start2, unsigned end2) | |
195 | { | |
1ec9c5dd | 196 | void *kaddr = kmap_atomic(page); |
eebd2aa3 CL |
197 | |
198 | BUG_ON(end1 > PAGE_SIZE || end2 > PAGE_SIZE); | |
199 | ||
200 | if (end1 > start1) | |
201 | memset(kaddr + start1, 0, end1 - start1); | |
202 | ||
203 | if (end2 > start2) | |
204 | memset(kaddr + start2, 0, end2 - start2); | |
205 | ||
1ec9c5dd | 206 | kunmap_atomic(kaddr); |
eebd2aa3 CL |
207 | flush_dcache_page(page); |
208 | } | |
209 | ||
210 | static inline void zero_user_segment(struct page *page, | |
211 | unsigned start, unsigned end) | |
212 | { | |
213 | zero_user_segments(page, start, end, 0, 0); | |
214 | } | |
215 | ||
216 | static inline void zero_user(struct page *page, | |
217 | unsigned start, unsigned size) | |
218 | { | |
219 | zero_user_segments(page, start, start + size, 0, 0); | |
220 | } | |
01f2705d | 221 | |
77fff4ae AN |
222 | #ifndef __HAVE_ARCH_COPY_USER_HIGHPAGE |
223 | ||
9de455b2 AN |
224 | static inline void copy_user_highpage(struct page *to, struct page *from, |
225 | unsigned long vaddr, struct vm_area_struct *vma) | |
1da177e4 LT |
226 | { |
227 | char *vfrom, *vto; | |
228 | ||
1ec9c5dd CW |
229 | vfrom = kmap_atomic(from); |
230 | vto = kmap_atomic(to); | |
1da177e4 | 231 | copy_user_page(vto, vfrom, vaddr, to); |
1ec9c5dd CW |
232 | kunmap_atomic(vto); |
233 | kunmap_atomic(vfrom); | |
1da177e4 LT |
234 | } |
235 | ||
77fff4ae AN |
236 | #endif |
237 | ||
1da177e4 LT |
238 | static inline void copy_highpage(struct page *to, struct page *from) |
239 | { | |
240 | char *vfrom, *vto; | |
241 | ||
1ec9c5dd CW |
242 | vfrom = kmap_atomic(from); |
243 | vto = kmap_atomic(to); | |
1da177e4 | 244 | copy_page(vto, vfrom); |
1ec9c5dd CW |
245 | kunmap_atomic(vto); |
246 | kunmap_atomic(vfrom); | |
1da177e4 LT |
247 | } |
248 | ||
249 | #endif /* _LINUX_HIGHMEM_H */ |