]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blame - include/linux/highmem.h
Merge git://git.kernel.org/pub/scm/linux/kernel/git/linville/wireless-next-2.6 into...
[mirror_ubuntu-zesty-kernel.git] / include / linux / highmem.h
CommitLineData
1da177e4
LT
1#ifndef _LINUX_HIGHMEM_H
2#define _LINUX_HIGHMEM_H
3
1da177e4 4#include <linux/fs.h>
597781f3 5#include <linux/kernel.h>
1da177e4 6#include <linux/mm.h>
ad76fb6b 7#include <linux/uaccess.h>
43b3a0c7 8#include <linux/hardirq.h>
1da177e4
LT
9
10#include <asm/cacheflush.h>
11
03beb076 12#ifndef ARCH_HAS_FLUSH_ANON_PAGE
a6f36be3 13static inline void flush_anon_page(struct vm_area_struct *vma, struct page *page, unsigned long vmaddr)
03beb076
JB
14{
15}
16#endif
17
5a3a5a98
JB
18#ifndef ARCH_HAS_FLUSH_KERNEL_DCACHE_PAGE
19static inline void flush_kernel_dcache_page(struct page *page)
20{
21}
9df5f741
JB
22static inline void flush_kernel_vmap_range(void *vaddr, int size)
23{
24}
25static inline void invalidate_kernel_vmap_range(void *vaddr, int size)
26{
27}
5a3a5a98
JB
28#endif
29
3688e07f
KG
30#include <asm/kmap_types.h>
31
3688e07f 32#ifdef CONFIG_HIGHMEM
1da177e4
LT
33#include <asm/highmem.h>
34
35/* declarations for linux/mm/highmem.c */
36unsigned int nr_free_highpages(void);
c1f60a5a 37extern unsigned long totalhigh_pages;
1da177e4 38
ce6234b5
JF
39void kmap_flush_unused(void);
40
1da177e4
LT
41#else /* CONFIG_HIGHMEM */
42
43static inline unsigned int nr_free_highpages(void) { return 0; }
44
4b529401 45#define totalhigh_pages 0UL
c1f60a5a 46
a6ca1b99 47#ifndef ARCH_HAS_KMAP
1da177e4
LT
48static inline void *kmap(struct page *page)
49{
50 might_sleep();
51 return page_address(page);
52}
53
31c91132
MW
54static inline void kunmap(struct page *page)
55{
56}
1da177e4 57
3e4d3af5 58static inline void *__kmap_atomic(struct page *page)
254f9c5c
GU
59{
60 pagefault_disable();
61 return page_address(page);
62}
3e4d3af5 63#define kmap_atomic_prot(page, prot) __kmap_atomic(page)
254f9c5c 64
3e4d3af5 65static inline void __kunmap_atomic(void *addr)
4e60c86b
AK
66{
67 pagefault_enable();
68}
69
3e4d3af5 70#define kmap_atomic_pfn(pfn) kmap_atomic(pfn_to_page(pfn))
1da177e4 71#define kmap_atomic_to_page(ptr) virt_to_page(ptr)
ce6234b5
JF
72
73#define kmap_flush_unused() do {} while(0)
a6ca1b99 74#endif
1da177e4
LT
75
76#endif /* CONFIG_HIGHMEM */
77
a8e23a29
PZ
78#if defined(CONFIG_HIGHMEM) || defined(CONFIG_X86_32)
79
80DECLARE_PER_CPU(int, __kmap_atomic_idx);
81
82static inline int kmap_atomic_idx_push(void)
83{
cfb82434
CL
84 int idx = __this_cpu_inc_return(__kmap_atomic_idx) - 1;
85
a8e23a29
PZ
86#ifdef CONFIG_DEBUG_HIGHMEM
87 WARN_ON_ONCE(in_irq() && !irqs_disabled());
88 BUG_ON(idx > KM_TYPE_NR);
89#endif
90 return idx;
91}
92
20273941
PZ
93static inline int kmap_atomic_idx(void)
94{
cfb82434 95 return __this_cpu_read(__kmap_atomic_idx) - 1;
20273941
PZ
96}
97
cfb82434 98static inline void kmap_atomic_idx_pop(void)
a8e23a29 99{
a8e23a29 100#ifdef CONFIG_DEBUG_HIGHMEM
cfb82434
CL
101 int idx = __this_cpu_dec_return(__kmap_atomic_idx);
102
a8e23a29 103 BUG_ON(idx < 0);
cfb82434
CL
104#else
105 __this_cpu_dec(__kmap_atomic_idx);
a8e23a29 106#endif
a8e23a29
PZ
107}
108
109#endif
110
3e4d3af5
PZ
111/*
112 * Make both: kmap_atomic(page, idx) and kmap_atomic(page) work.
113 */
114#define kmap_atomic(page, args...) __kmap_atomic(page)
115
116/*
117 * Prevent people trying to call kunmap_atomic() as if it were kunmap()
118 * kunmap_atomic() should get the return value of kmap_atomic, not the page.
119 */
120#define kunmap_atomic(addr, args...) \
121do { \
122 BUILD_BUG_ON(__same_type((addr), struct page *)); \
123 __kunmap_atomic(addr); \
124} while (0)
597781f3 125
1da177e4 126/* when CONFIG_HIGHMEM is not set these will be plain clear/copy_page */
487ff320 127#ifndef clear_user_highpage
1da177e4
LT
128static inline void clear_user_highpage(struct page *page, unsigned long vaddr)
129{
130 void *addr = kmap_atomic(page, KM_USER0);
131 clear_user_page(addr, vaddr, page);
132 kunmap_atomic(addr, KM_USER0);
1da177e4 133}
487ff320 134#endif
1da177e4
LT
135
136#ifndef __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE
769848c0
MG
137/**
138 * __alloc_zeroed_user_highpage - Allocate a zeroed HIGHMEM page for a VMA with caller-specified movable GFP flags
139 * @movableflags: The GFP flags related to the pages future ability to move like __GFP_MOVABLE
140 * @vma: The VMA the page is to be allocated for
141 * @vaddr: The virtual address the page will be inserted into
142 *
143 * This function will allocate a page for a VMA but the caller is expected
144 * to specify via movableflags whether the page will be movable in the
145 * future or not
146 *
147 * An architecture may override this function by defining
148 * __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE and providing their own
149 * implementation.
150 */
1da177e4 151static inline struct page *
769848c0
MG
152__alloc_zeroed_user_highpage(gfp_t movableflags,
153 struct vm_area_struct *vma,
154 unsigned long vaddr)
1da177e4 155{
769848c0
MG
156 struct page *page = alloc_page_vma(GFP_HIGHUSER | movableflags,
157 vma, vaddr);
1da177e4
LT
158
159 if (page)
160 clear_user_highpage(page, vaddr);
161
162 return page;
163}
164#endif
165
769848c0
MG
166/**
167 * alloc_zeroed_user_highpage_movable - Allocate a zeroed HIGHMEM page for a VMA that the caller knows can move
168 * @vma: The VMA the page is to be allocated for
169 * @vaddr: The virtual address the page will be inserted into
170 *
171 * This function will allocate a page for a VMA that the caller knows will
172 * be able to migrate in the future using move_pages() or reclaimed
173 */
174static inline struct page *
175alloc_zeroed_user_highpage_movable(struct vm_area_struct *vma,
176 unsigned long vaddr)
177{
178 return __alloc_zeroed_user_highpage(__GFP_MOVABLE, vma, vaddr);
179}
180
1da177e4
LT
181static inline void clear_highpage(struct page *page)
182{
183 void *kaddr = kmap_atomic(page, KM_USER0);
184 clear_page(kaddr);
185 kunmap_atomic(kaddr, KM_USER0);
186}
187
eebd2aa3
CL
188static inline void zero_user_segments(struct page *page,
189 unsigned start1, unsigned end1,
190 unsigned start2, unsigned end2)
191{
192 void *kaddr = kmap_atomic(page, KM_USER0);
193
194 BUG_ON(end1 > PAGE_SIZE || end2 > PAGE_SIZE);
195
196 if (end1 > start1)
197 memset(kaddr + start1, 0, end1 - start1);
198
199 if (end2 > start2)
200 memset(kaddr + start2, 0, end2 - start2);
201
202 kunmap_atomic(kaddr, KM_USER0);
203 flush_dcache_page(page);
204}
205
206static inline void zero_user_segment(struct page *page,
207 unsigned start, unsigned end)
208{
209 zero_user_segments(page, start, end, 0, 0);
210}
211
212static inline void zero_user(struct page *page,
213 unsigned start, unsigned size)
214{
215 zero_user_segments(page, start, start + size, 0, 0);
216}
01f2705d 217
f37bc271 218static inline void __deprecated memclear_highpage_flush(struct page *page,
01f2705d 219 unsigned int offset, unsigned int size)
1da177e4 220{
eebd2aa3 221 zero_user(page, offset, size);
1da177e4
LT
222}
223
77fff4ae
AN
224#ifndef __HAVE_ARCH_COPY_USER_HIGHPAGE
225
9de455b2
AN
226static inline void copy_user_highpage(struct page *to, struct page *from,
227 unsigned long vaddr, struct vm_area_struct *vma)
1da177e4
LT
228{
229 char *vfrom, *vto;
230
231 vfrom = kmap_atomic(from, KM_USER0);
232 vto = kmap_atomic(to, KM_USER1);
233 copy_user_page(vto, vfrom, vaddr, to);
1da177e4 234 kunmap_atomic(vto, KM_USER1);
61ecdb80 235 kunmap_atomic(vfrom, KM_USER0);
1da177e4
LT
236}
237
77fff4ae
AN
238#endif
239
1da177e4
LT
240static inline void copy_highpage(struct page *to, struct page *from)
241{
242 char *vfrom, *vto;
243
244 vfrom = kmap_atomic(from, KM_USER0);
245 vto = kmap_atomic(to, KM_USER1);
246 copy_page(vto, vfrom);
1da177e4 247 kunmap_atomic(vto, KM_USER1);
61ecdb80 248 kunmap_atomic(vfrom, KM_USER0);
1da177e4
LT
249}
250
251#endif /* _LINUX_HIGHMEM_H */