]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - include/linux/pagemap.h
Merge branch 'release' of master.kernel.org:/pub/scm/linux/kernel/git/aegl/linux-2.6
[mirror_ubuntu-artful-kernel.git] / include / linux / pagemap.h
1 #ifndef _LINUX_PAGEMAP_H
2 #define _LINUX_PAGEMAP_H
3
4 /*
5 * Copyright 1995 Linus Torvalds
6 */
7 #include <linux/mm.h>
8 #include <linux/fs.h>
9 #include <linux/list.h>
10 #include <linux/highmem.h>
11 #include <linux/compiler.h>
12 #include <asm/uaccess.h>
13 #include <linux/gfp.h>
14
15 /*
16 * Bits in mapping->flags. The lower __GFP_BITS_SHIFT bits are the page
17 * allocation mode flags.
18 */
19 #define AS_EIO (__GFP_BITS_SHIFT + 0) /* IO error on async write */
20 #define AS_ENOSPC (__GFP_BITS_SHIFT + 1) /* ENOSPC on async write */
21
22 static inline gfp_t mapping_gfp_mask(struct address_space * mapping)
23 {
24 return (__force gfp_t)mapping->flags & __GFP_BITS_MASK;
25 }
26
27 /*
28 * This is non-atomic. Only to be used before the mapping is activated.
29 * Probably needs a barrier...
30 */
31 static inline void mapping_set_gfp_mask(struct address_space *m, gfp_t mask)
32 {
33 m->flags = (m->flags & ~(__force unsigned long)__GFP_BITS_MASK) |
34 (__force unsigned long)mask;
35 }
36
37 /*
38 * The page cache can done in larger chunks than
39 * one page, because it allows for more efficient
40 * throughput (it can then be mapped into user
41 * space in smaller chunks for same flexibility).
42 *
43 * Or rather, it _will_ be done in larger chunks.
44 */
45 #define PAGE_CACHE_SHIFT PAGE_SHIFT
46 #define PAGE_CACHE_SIZE PAGE_SIZE
47 #define PAGE_CACHE_MASK PAGE_MASK
48 #define PAGE_CACHE_ALIGN(addr) (((addr)+PAGE_CACHE_SIZE-1)&PAGE_CACHE_MASK)
49
50 #define page_cache_get(page) get_page(page)
51 #define page_cache_release(page) put_page(page)
52 void release_pages(struct page **pages, int nr, int cold);
53
54 #ifdef CONFIG_NUMA
55 extern struct page *__page_cache_alloc(gfp_t gfp);
56 #else
57 static inline struct page *__page_cache_alloc(gfp_t gfp)
58 {
59 return alloc_pages(gfp, 0);
60 }
61 #endif
62
63 static inline struct page *page_cache_alloc(struct address_space *x)
64 {
65 return __page_cache_alloc(mapping_gfp_mask(x));
66 }
67
68 static inline struct page *page_cache_alloc_cold(struct address_space *x)
69 {
70 return __page_cache_alloc(mapping_gfp_mask(x)|__GFP_COLD);
71 }
72
73 typedef int filler_t(void *, struct page *);
74
75 extern struct page * find_get_page(struct address_space *mapping,
76 unsigned long index);
77 extern struct page * find_lock_page(struct address_space *mapping,
78 unsigned long index);
79 extern __deprecated_for_modules struct page * find_trylock_page(
80 struct address_space *mapping, unsigned long index);
81 extern struct page * find_or_create_page(struct address_space *mapping,
82 unsigned long index, gfp_t gfp_mask);
83 unsigned find_get_pages(struct address_space *mapping, pgoff_t start,
84 unsigned int nr_pages, struct page **pages);
85 unsigned find_get_pages_contig(struct address_space *mapping, pgoff_t start,
86 unsigned int nr_pages, struct page **pages);
87 unsigned find_get_pages_tag(struct address_space *mapping, pgoff_t *index,
88 int tag, unsigned int nr_pages, struct page **pages);
89
90 /*
91 * Returns locked page at given index in given cache, creating it if needed.
92 */
93 static inline struct page *grab_cache_page(struct address_space *mapping, unsigned long index)
94 {
95 return find_or_create_page(mapping, index, mapping_gfp_mask(mapping));
96 }
97
98 extern struct page * grab_cache_page_nowait(struct address_space *mapping,
99 unsigned long index);
100 extern struct page * read_cache_page(struct address_space *mapping,
101 unsigned long index, filler_t *filler,
102 void *data);
103 extern int read_cache_pages(struct address_space *mapping,
104 struct list_head *pages, filler_t *filler, void *data);
105
106 static inline struct page *read_mapping_page(struct address_space *mapping,
107 unsigned long index, void *data)
108 {
109 filler_t *filler = (filler_t *)mapping->a_ops->readpage;
110 return read_cache_page(mapping, index, filler, data);
111 }
112
113 int add_to_page_cache(struct page *page, struct address_space *mapping,
114 unsigned long index, gfp_t gfp_mask);
115 int add_to_page_cache_lru(struct page *page, struct address_space *mapping,
116 unsigned long index, gfp_t gfp_mask);
117 extern void remove_from_page_cache(struct page *page);
118 extern void __remove_from_page_cache(struct page *page);
119
120 /*
121 * Return byte-offset into filesystem object for page.
122 */
123 static inline loff_t page_offset(struct page *page)
124 {
125 return ((loff_t)page->index) << PAGE_CACHE_SHIFT;
126 }
127
128 static inline pgoff_t linear_page_index(struct vm_area_struct *vma,
129 unsigned long address)
130 {
131 pgoff_t pgoff = (address - vma->vm_start) >> PAGE_SHIFT;
132 pgoff += vma->vm_pgoff;
133 return pgoff >> (PAGE_CACHE_SHIFT - PAGE_SHIFT);
134 }
135
136 extern void FASTCALL(__lock_page(struct page *page));
137 extern void FASTCALL(__lock_page_nosync(struct page *page));
138 extern void FASTCALL(unlock_page(struct page *page));
139
140 /*
141 * lock_page may only be called if we have the page's inode pinned.
142 */
143 static inline void lock_page(struct page *page)
144 {
145 might_sleep();
146 if (TestSetPageLocked(page))
147 __lock_page(page);
148 }
149
150 /*
151 * lock_page_nosync should only be used if we can't pin the page's inode.
152 * Doesn't play quite so well with block device plugging.
153 */
154 static inline void lock_page_nosync(struct page *page)
155 {
156 might_sleep();
157 if (TestSetPageLocked(page))
158 __lock_page_nosync(page);
159 }
160
161 /*
162 * This is exported only for wait_on_page_locked/wait_on_page_writeback.
163 * Never use this directly!
164 */
165 extern void FASTCALL(wait_on_page_bit(struct page *page, int bit_nr));
166
167 /*
168 * Wait for a page to be unlocked.
169 *
170 * This must be called with the caller "holding" the page,
171 * ie with increased "page->count" so that the page won't
172 * go away during the wait..
173 */
174 static inline void wait_on_page_locked(struct page *page)
175 {
176 if (PageLocked(page))
177 wait_on_page_bit(page, PG_locked);
178 }
179
180 /*
181 * Wait for a page to complete writeback
182 */
183 static inline void wait_on_page_writeback(struct page *page)
184 {
185 if (PageWriteback(page))
186 wait_on_page_bit(page, PG_writeback);
187 }
188
189 extern void end_page_writeback(struct page *page);
190
191 /*
192 * Fault a userspace page into pagetables. Return non-zero on a fault.
193 *
194 * This assumes that two userspace pages are always sufficient. That's
195 * not true if PAGE_CACHE_SIZE > PAGE_SIZE.
196 */
197 static inline int fault_in_pages_writeable(char __user *uaddr, int size)
198 {
199 int ret;
200
201 /*
202 * Writing zeroes into userspace here is OK, because we know that if
203 * the zero gets there, we'll be overwriting it.
204 */
205 ret = __put_user(0, uaddr);
206 if (ret == 0) {
207 char __user *end = uaddr + size - 1;
208
209 /*
210 * If the page was already mapped, this will get a cache miss
211 * for sure, so try to avoid doing it.
212 */
213 if (((unsigned long)uaddr & PAGE_MASK) !=
214 ((unsigned long)end & PAGE_MASK))
215 ret = __put_user(0, end);
216 }
217 return ret;
218 }
219
220 static inline void fault_in_pages_readable(const char __user *uaddr, int size)
221 {
222 volatile char c;
223 int ret;
224
225 ret = __get_user(c, uaddr);
226 if (ret == 0) {
227 const char __user *end = uaddr + size - 1;
228
229 if (((unsigned long)uaddr & PAGE_MASK) !=
230 ((unsigned long)end & PAGE_MASK))
231 __get_user(c, end);
232 }
233 }
234
235 #endif /* _LINUX_PAGEMAP_H */