]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * linux/mm/memory.c | |
3 | * | |
4 | * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds | |
5 | */ | |
6 | ||
7 | /* | |
8 | * demand-loading started 01.12.91 - seems it is high on the list of | |
9 | * things wanted, and it should be easy to implement. - Linus | |
10 | */ | |
11 | ||
12 | /* | |
13 | * Ok, demand-loading was easy, shared pages a little bit tricker. Shared | |
14 | * pages started 02.12.91, seems to work. - Linus. | |
15 | * | |
16 | * Tested sharing by executing about 30 /bin/sh: under the old kernel it | |
17 | * would have taken more than the 6M I have free, but it worked well as | |
18 | * far as I could see. | |
19 | * | |
20 | * Also corrected some "invalidate()"s - I wasn't doing enough of them. | |
21 | */ | |
22 | ||
23 | /* | |
24 | * Real VM (paging to/from disk) started 18.12.91. Much more work and | |
25 | * thought has to go into this. Oh, well.. | |
26 | * 19.12.91 - works, somewhat. Sometimes I get faults, don't know why. | |
27 | * Found it. Everything seems to work now. | |
28 | * 20.12.91 - Ok, making the swap-device changeable like the root. | |
29 | */ | |
30 | ||
31 | /* | |
32 | * 05.04.94 - Multi-page memory management added for v1.1. | |
33 | * Idea by Alex Bligh (alex@cconcepts.co.uk) | |
34 | * | |
35 | * 16.07.99 - Support of BIGMEM added by Gerhard Wichert, Siemens AG | |
36 | * (Gerhard.Wichert@pdb.siemens.de) | |
37 | * | |
38 | * Aug/Sep 2004 Changed to four level page tables (Andi Kleen) | |
39 | */ | |
40 | ||
41 | #include <linux/kernel_stat.h> | |
42 | #include <linux/mm.h> | |
43 | #include <linux/hugetlb.h> | |
44 | #include <linux/mman.h> | |
45 | #include <linux/swap.h> | |
46 | #include <linux/highmem.h> | |
47 | #include <linux/pagemap.h> | |
48 | #include <linux/rmap.h> | |
49 | #include <linux/module.h> | |
50 | #include <linux/init.h> | |
51 | ||
52 | #include <asm/pgalloc.h> | |
53 | #include <asm/uaccess.h> | |
54 | #include <asm/tlb.h> | |
55 | #include <asm/tlbflush.h> | |
56 | #include <asm/pgtable.h> | |
57 | ||
58 | #include <linux/swapops.h> | |
59 | #include <linux/elf.h> | |
60 | ||
d41dee36 | 61 | #ifndef CONFIG_NEED_MULTIPLE_NODES |
1da177e4 LT |
62 | /* use the per-pgdat data instead for discontigmem - mbligh */ |
63 | unsigned long max_mapnr; | |
64 | struct page *mem_map; | |
65 | ||
66 | EXPORT_SYMBOL(max_mapnr); | |
67 | EXPORT_SYMBOL(mem_map); | |
68 | #endif | |
69 | ||
70 | unsigned long num_physpages; | |
71 | /* | |
72 | * A number of key systems in x86 including ioremap() rely on the assumption | |
73 | * that high_memory defines the upper bound on direct map memory, then end | |
74 | * of ZONE_NORMAL. Under CONFIG_DISCONTIG this means that max_low_pfn and | |
75 | * highstart_pfn must be the same; there must be no gap between ZONE_NORMAL | |
76 | * and ZONE_HIGHMEM. | |
77 | */ | |
78 | void * high_memory; | |
79 | unsigned long vmalloc_earlyreserve; | |
80 | ||
81 | EXPORT_SYMBOL(num_physpages); | |
82 | EXPORT_SYMBOL(high_memory); | |
83 | EXPORT_SYMBOL(vmalloc_earlyreserve); | |
84 | ||
85 | /* | |
86 | * If a p?d_bad entry is found while walking page tables, report | |
87 | * the error, before resetting entry to p?d_none. Usually (but | |
88 | * very seldom) called out from the p?d_none_or_clear_bad macros. | |
89 | */ | |
90 | ||
91 | void pgd_clear_bad(pgd_t *pgd) | |
92 | { | |
93 | pgd_ERROR(*pgd); | |
94 | pgd_clear(pgd); | |
95 | } | |
96 | ||
97 | void pud_clear_bad(pud_t *pud) | |
98 | { | |
99 | pud_ERROR(*pud); | |
100 | pud_clear(pud); | |
101 | } | |
102 | ||
103 | void pmd_clear_bad(pmd_t *pmd) | |
104 | { | |
105 | pmd_ERROR(*pmd); | |
106 | pmd_clear(pmd); | |
107 | } | |
108 | ||
109 | /* | |
110 | * Note: this doesn't free the actual pages themselves. That | |
111 | * has been handled earlier when unmapping all the memory regions. | |
112 | */ | |
e0da382c | 113 | static void free_pte_range(struct mmu_gather *tlb, pmd_t *pmd) |
1da177e4 | 114 | { |
e0da382c HD |
115 | struct page *page = pmd_page(*pmd); |
116 | pmd_clear(pmd); | |
117 | pte_free_tlb(tlb, page); | |
118 | dec_page_state(nr_page_table_pages); | |
119 | tlb->mm->nr_ptes--; | |
1da177e4 LT |
120 | } |
121 | ||
e0da382c HD |
122 | static inline void free_pmd_range(struct mmu_gather *tlb, pud_t *pud, |
123 | unsigned long addr, unsigned long end, | |
124 | unsigned long floor, unsigned long ceiling) | |
1da177e4 LT |
125 | { |
126 | pmd_t *pmd; | |
127 | unsigned long next; | |
e0da382c | 128 | unsigned long start; |
1da177e4 | 129 | |
e0da382c | 130 | start = addr; |
1da177e4 | 131 | pmd = pmd_offset(pud, addr); |
1da177e4 LT |
132 | do { |
133 | next = pmd_addr_end(addr, end); | |
134 | if (pmd_none_or_clear_bad(pmd)) | |
135 | continue; | |
e0da382c | 136 | free_pte_range(tlb, pmd); |
1da177e4 LT |
137 | } while (pmd++, addr = next, addr != end); |
138 | ||
e0da382c HD |
139 | start &= PUD_MASK; |
140 | if (start < floor) | |
141 | return; | |
142 | if (ceiling) { | |
143 | ceiling &= PUD_MASK; | |
144 | if (!ceiling) | |
145 | return; | |
1da177e4 | 146 | } |
e0da382c HD |
147 | if (end - 1 > ceiling - 1) |
148 | return; | |
149 | ||
150 | pmd = pmd_offset(pud, start); | |
151 | pud_clear(pud); | |
152 | pmd_free_tlb(tlb, pmd); | |
1da177e4 LT |
153 | } |
154 | ||
e0da382c HD |
155 | static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd, |
156 | unsigned long addr, unsigned long end, | |
157 | unsigned long floor, unsigned long ceiling) | |
1da177e4 LT |
158 | { |
159 | pud_t *pud; | |
160 | unsigned long next; | |
e0da382c | 161 | unsigned long start; |
1da177e4 | 162 | |
e0da382c | 163 | start = addr; |
1da177e4 | 164 | pud = pud_offset(pgd, addr); |
1da177e4 LT |
165 | do { |
166 | next = pud_addr_end(addr, end); | |
167 | if (pud_none_or_clear_bad(pud)) | |
168 | continue; | |
e0da382c | 169 | free_pmd_range(tlb, pud, addr, next, floor, ceiling); |
1da177e4 LT |
170 | } while (pud++, addr = next, addr != end); |
171 | ||
e0da382c HD |
172 | start &= PGDIR_MASK; |
173 | if (start < floor) | |
174 | return; | |
175 | if (ceiling) { | |
176 | ceiling &= PGDIR_MASK; | |
177 | if (!ceiling) | |
178 | return; | |
1da177e4 | 179 | } |
e0da382c HD |
180 | if (end - 1 > ceiling - 1) |
181 | return; | |
182 | ||
183 | pud = pud_offset(pgd, start); | |
184 | pgd_clear(pgd); | |
185 | pud_free_tlb(tlb, pud); | |
1da177e4 LT |
186 | } |
187 | ||
188 | /* | |
e0da382c HD |
189 | * This function frees user-level page tables of a process. |
190 | * | |
1da177e4 LT |
191 | * Must be called with pagetable lock held. |
192 | */ | |
3bf5ee95 | 193 | void free_pgd_range(struct mmu_gather **tlb, |
e0da382c HD |
194 | unsigned long addr, unsigned long end, |
195 | unsigned long floor, unsigned long ceiling) | |
1da177e4 LT |
196 | { |
197 | pgd_t *pgd; | |
198 | unsigned long next; | |
e0da382c HD |
199 | unsigned long start; |
200 | ||
201 | /* | |
202 | * The next few lines have given us lots of grief... | |
203 | * | |
204 | * Why are we testing PMD* at this top level? Because often | |
205 | * there will be no work to do at all, and we'd prefer not to | |
206 | * go all the way down to the bottom just to discover that. | |
207 | * | |
208 | * Why all these "- 1"s? Because 0 represents both the bottom | |
209 | * of the address space and the top of it (using -1 for the | |
210 | * top wouldn't help much: the masks would do the wrong thing). | |
211 | * The rule is that addr 0 and floor 0 refer to the bottom of | |
212 | * the address space, but end 0 and ceiling 0 refer to the top | |
213 | * Comparisons need to use "end - 1" and "ceiling - 1" (though | |
214 | * that end 0 case should be mythical). | |
215 | * | |
216 | * Wherever addr is brought up or ceiling brought down, we must | |
217 | * be careful to reject "the opposite 0" before it confuses the | |
218 | * subsequent tests. But what about where end is brought down | |
219 | * by PMD_SIZE below? no, end can't go down to 0 there. | |
220 | * | |
221 | * Whereas we round start (addr) and ceiling down, by different | |
222 | * masks at different levels, in order to test whether a table | |
223 | * now has no other vmas using it, so can be freed, we don't | |
224 | * bother to round floor or end up - the tests don't need that. | |
225 | */ | |
1da177e4 | 226 | |
e0da382c HD |
227 | addr &= PMD_MASK; |
228 | if (addr < floor) { | |
229 | addr += PMD_SIZE; | |
230 | if (!addr) | |
231 | return; | |
232 | } | |
233 | if (ceiling) { | |
234 | ceiling &= PMD_MASK; | |
235 | if (!ceiling) | |
236 | return; | |
237 | } | |
238 | if (end - 1 > ceiling - 1) | |
239 | end -= PMD_SIZE; | |
240 | if (addr > end - 1) | |
241 | return; | |
242 | ||
243 | start = addr; | |
3bf5ee95 | 244 | pgd = pgd_offset((*tlb)->mm, addr); |
1da177e4 LT |
245 | do { |
246 | next = pgd_addr_end(addr, end); | |
247 | if (pgd_none_or_clear_bad(pgd)) | |
248 | continue; | |
3bf5ee95 | 249 | free_pud_range(*tlb, pgd, addr, next, floor, ceiling); |
1da177e4 | 250 | } while (pgd++, addr = next, addr != end); |
e0da382c | 251 | |
4d6ddfa9 | 252 | if (!(*tlb)->fullmm) |
3bf5ee95 | 253 | flush_tlb_pgtables((*tlb)->mm, start, end); |
e0da382c HD |
254 | } |
255 | ||
256 | void free_pgtables(struct mmu_gather **tlb, struct vm_area_struct *vma, | |
3bf5ee95 | 257 | unsigned long floor, unsigned long ceiling) |
e0da382c HD |
258 | { |
259 | while (vma) { | |
260 | struct vm_area_struct *next = vma->vm_next; | |
261 | unsigned long addr = vma->vm_start; | |
262 | ||
3bf5ee95 HD |
263 | if (is_hugepage_only_range(vma->vm_mm, addr, HPAGE_SIZE)) { |
264 | hugetlb_free_pgd_range(tlb, addr, vma->vm_end, | |
e0da382c | 265 | floor, next? next->vm_start: ceiling); |
3bf5ee95 HD |
266 | } else { |
267 | /* | |
268 | * Optimization: gather nearby vmas into one call down | |
269 | */ | |
270 | while (next && next->vm_start <= vma->vm_end + PMD_SIZE | |
271 | && !is_hugepage_only_range(vma->vm_mm, next->vm_start, | |
272 | HPAGE_SIZE)) { | |
273 | vma = next; | |
274 | next = vma->vm_next; | |
275 | } | |
276 | free_pgd_range(tlb, addr, vma->vm_end, | |
277 | floor, next? next->vm_start: ceiling); | |
278 | } | |
e0da382c HD |
279 | vma = next; |
280 | } | |
1da177e4 LT |
281 | } |
282 | ||
3bf5ee95 HD |
283 | pte_t fastcall *pte_alloc_map(struct mm_struct *mm, pmd_t *pmd, |
284 | unsigned long address) | |
1da177e4 LT |
285 | { |
286 | if (!pmd_present(*pmd)) { | |
287 | struct page *new; | |
288 | ||
289 | spin_unlock(&mm->page_table_lock); | |
290 | new = pte_alloc_one(mm, address); | |
291 | spin_lock(&mm->page_table_lock); | |
292 | if (!new) | |
293 | return NULL; | |
294 | /* | |
295 | * Because we dropped the lock, we should re-check the | |
296 | * entry, as somebody else could have populated it.. | |
297 | */ | |
298 | if (pmd_present(*pmd)) { | |
299 | pte_free(new); | |
300 | goto out; | |
301 | } | |
302 | mm->nr_ptes++; | |
303 | inc_page_state(nr_page_table_pages); | |
304 | pmd_populate(mm, pmd, new); | |
305 | } | |
306 | out: | |
307 | return pte_offset_map(pmd, address); | |
308 | } | |
309 | ||
310 | pte_t fastcall * pte_alloc_kernel(struct mm_struct *mm, pmd_t *pmd, unsigned long address) | |
311 | { | |
312 | if (!pmd_present(*pmd)) { | |
313 | pte_t *new; | |
314 | ||
315 | spin_unlock(&mm->page_table_lock); | |
316 | new = pte_alloc_one_kernel(mm, address); | |
317 | spin_lock(&mm->page_table_lock); | |
318 | if (!new) | |
319 | return NULL; | |
320 | ||
321 | /* | |
322 | * Because we dropped the lock, we should re-check the | |
323 | * entry, as somebody else could have populated it.. | |
324 | */ | |
325 | if (pmd_present(*pmd)) { | |
326 | pte_free_kernel(new); | |
327 | goto out; | |
328 | } | |
329 | pmd_populate_kernel(mm, pmd, new); | |
330 | } | |
331 | out: | |
332 | return pte_offset_kernel(pmd, address); | |
333 | } | |
334 | ||
ae859762 HD |
335 | static inline void add_mm_rss(struct mm_struct *mm, int file_rss, int anon_rss) |
336 | { | |
337 | if (file_rss) | |
338 | add_mm_counter(mm, file_rss, file_rss); | |
339 | if (anon_rss) | |
340 | add_mm_counter(mm, anon_rss, anon_rss); | |
341 | } | |
342 | ||
b5810039 NP |
343 | /* |
344 | * This function is called to print an error when a pte in a | |
345 | * !VM_RESERVED region is found pointing to an invalid pfn (which | |
346 | * is an error. | |
347 | * | |
348 | * The calling function must still handle the error. | |
349 | */ | |
350 | void print_bad_pte(struct vm_area_struct *vma, pte_t pte, unsigned long vaddr) | |
351 | { | |
352 | printk(KERN_ERR "Bad pte = %08llx, process = %s, " | |
353 | "vm_flags = %lx, vaddr = %lx\n", | |
354 | (long long)pte_val(pte), | |
355 | (vma->vm_mm == current->mm ? current->comm : "???"), | |
356 | vma->vm_flags, vaddr); | |
357 | dump_stack(); | |
358 | } | |
359 | ||
1da177e4 LT |
360 | /* |
361 | * copy one vm_area from one task to the other. Assumes the page tables | |
362 | * already present in the new task to be cleared in the whole range | |
363 | * covered by this vma. | |
364 | * | |
365 | * dst->page_table_lock is held on entry and exit, | |
366 | * but may be dropped within p[mg]d_alloc() and pte_alloc_map(). | |
367 | */ | |
368 | ||
8c103762 | 369 | static inline void |
1da177e4 | 370 | copy_one_pte(struct mm_struct *dst_mm, struct mm_struct *src_mm, |
b5810039 | 371 | pte_t *dst_pte, pte_t *src_pte, struct vm_area_struct *vma, |
8c103762 | 372 | unsigned long addr, int *rss) |
1da177e4 | 373 | { |
b5810039 | 374 | unsigned long vm_flags = vma->vm_flags; |
1da177e4 LT |
375 | pte_t pte = *src_pte; |
376 | struct page *page; | |
377 | unsigned long pfn; | |
378 | ||
379 | /* pte contains position in swap or file, so copy. */ | |
380 | if (unlikely(!pte_present(pte))) { | |
381 | if (!pte_file(pte)) { | |
382 | swap_duplicate(pte_to_swp_entry(pte)); | |
383 | /* make sure dst_mm is on swapoff's mmlist. */ | |
384 | if (unlikely(list_empty(&dst_mm->mmlist))) { | |
385 | spin_lock(&mmlist_lock); | |
386 | list_add(&dst_mm->mmlist, &src_mm->mmlist); | |
387 | spin_unlock(&mmlist_lock); | |
388 | } | |
389 | } | |
ae859762 | 390 | goto out_set_pte; |
1da177e4 LT |
391 | } |
392 | ||
b5810039 NP |
393 | /* If the region is VM_RESERVED, the mapping is not |
394 | * mapped via rmap - duplicate the pte as is. | |
395 | */ | |
396 | if (vm_flags & VM_RESERVED) | |
397 | goto out_set_pte; | |
398 | ||
1da177e4 | 399 | pfn = pte_pfn(pte); |
b5810039 NP |
400 | /* If the pte points outside of valid memory but |
401 | * the region is not VM_RESERVED, we have a problem. | |
1da177e4 | 402 | */ |
b5810039 NP |
403 | if (unlikely(!pfn_valid(pfn))) { |
404 | print_bad_pte(vma, pte, addr); | |
405 | goto out_set_pte; /* try to do something sane */ | |
406 | } | |
1da177e4 | 407 | |
b5810039 | 408 | page = pfn_to_page(pfn); |
1da177e4 LT |
409 | |
410 | /* | |
411 | * If it's a COW mapping, write protect it both | |
412 | * in the parent and the child | |
413 | */ | |
414 | if ((vm_flags & (VM_SHARED | VM_MAYWRITE)) == VM_MAYWRITE) { | |
415 | ptep_set_wrprotect(src_mm, addr, src_pte); | |
416 | pte = *src_pte; | |
417 | } | |
418 | ||
419 | /* | |
420 | * If it's a shared mapping, mark it clean in | |
421 | * the child | |
422 | */ | |
423 | if (vm_flags & VM_SHARED) | |
424 | pte = pte_mkclean(pte); | |
425 | pte = pte_mkold(pte); | |
426 | get_page(page); | |
1da177e4 | 427 | page_dup_rmap(page); |
8c103762 | 428 | rss[!!PageAnon(page)]++; |
ae859762 HD |
429 | |
430 | out_set_pte: | |
431 | set_pte_at(dst_mm, addr, dst_pte, pte); | |
1da177e4 LT |
432 | } |
433 | ||
434 | static int copy_pte_range(struct mm_struct *dst_mm, struct mm_struct *src_mm, | |
435 | pmd_t *dst_pmd, pmd_t *src_pmd, struct vm_area_struct *vma, | |
436 | unsigned long addr, unsigned long end) | |
437 | { | |
438 | pte_t *src_pte, *dst_pte; | |
e040f218 | 439 | int progress = 0; |
8c103762 | 440 | int rss[2]; |
1da177e4 LT |
441 | |
442 | again: | |
ae859762 | 443 | rss[1] = rss[0] = 0; |
1da177e4 LT |
444 | dst_pte = pte_alloc_map(dst_mm, dst_pmd, addr); |
445 | if (!dst_pte) | |
446 | return -ENOMEM; | |
447 | src_pte = pte_offset_map_nested(src_pmd, addr); | |
448 | ||
1da177e4 LT |
449 | spin_lock(&src_mm->page_table_lock); |
450 | do { | |
451 | /* | |
452 | * We are holding two locks at this point - either of them | |
453 | * could generate latencies in another task on another CPU. | |
454 | */ | |
e040f218 HD |
455 | if (progress >= 32) { |
456 | progress = 0; | |
457 | if (need_resched() || | |
458 | need_lockbreak(&src_mm->page_table_lock) || | |
459 | need_lockbreak(&dst_mm->page_table_lock)) | |
460 | break; | |
461 | } | |
1da177e4 LT |
462 | if (pte_none(*src_pte)) { |
463 | progress++; | |
464 | continue; | |
465 | } | |
8c103762 | 466 | copy_one_pte(dst_mm, src_mm, dst_pte, src_pte, vma, addr, rss); |
1da177e4 LT |
467 | progress += 8; |
468 | } while (dst_pte++, src_pte++, addr += PAGE_SIZE, addr != end); | |
469 | spin_unlock(&src_mm->page_table_lock); | |
470 | ||
471 | pte_unmap_nested(src_pte - 1); | |
472 | pte_unmap(dst_pte - 1); | |
ae859762 | 473 | add_mm_rss(dst_mm, rss[0], rss[1]); |
1da177e4 LT |
474 | cond_resched_lock(&dst_mm->page_table_lock); |
475 | if (addr != end) | |
476 | goto again; | |
477 | return 0; | |
478 | } | |
479 | ||
480 | static inline int copy_pmd_range(struct mm_struct *dst_mm, struct mm_struct *src_mm, | |
481 | pud_t *dst_pud, pud_t *src_pud, struct vm_area_struct *vma, | |
482 | unsigned long addr, unsigned long end) | |
483 | { | |
484 | pmd_t *src_pmd, *dst_pmd; | |
485 | unsigned long next; | |
486 | ||
487 | dst_pmd = pmd_alloc(dst_mm, dst_pud, addr); | |
488 | if (!dst_pmd) | |
489 | return -ENOMEM; | |
490 | src_pmd = pmd_offset(src_pud, addr); | |
491 | do { | |
492 | next = pmd_addr_end(addr, end); | |
493 | if (pmd_none_or_clear_bad(src_pmd)) | |
494 | continue; | |
495 | if (copy_pte_range(dst_mm, src_mm, dst_pmd, src_pmd, | |
496 | vma, addr, next)) | |
497 | return -ENOMEM; | |
498 | } while (dst_pmd++, src_pmd++, addr = next, addr != end); | |
499 | return 0; | |
500 | } | |
501 | ||
502 | static inline int copy_pud_range(struct mm_struct *dst_mm, struct mm_struct *src_mm, | |
503 | pgd_t *dst_pgd, pgd_t *src_pgd, struct vm_area_struct *vma, | |
504 | unsigned long addr, unsigned long end) | |
505 | { | |
506 | pud_t *src_pud, *dst_pud; | |
507 | unsigned long next; | |
508 | ||
509 | dst_pud = pud_alloc(dst_mm, dst_pgd, addr); | |
510 | if (!dst_pud) | |
511 | return -ENOMEM; | |
512 | src_pud = pud_offset(src_pgd, addr); | |
513 | do { | |
514 | next = pud_addr_end(addr, end); | |
515 | if (pud_none_or_clear_bad(src_pud)) | |
516 | continue; | |
517 | if (copy_pmd_range(dst_mm, src_mm, dst_pud, src_pud, | |
518 | vma, addr, next)) | |
519 | return -ENOMEM; | |
520 | } while (dst_pud++, src_pud++, addr = next, addr != end); | |
521 | return 0; | |
522 | } | |
523 | ||
524 | int copy_page_range(struct mm_struct *dst_mm, struct mm_struct *src_mm, | |
525 | struct vm_area_struct *vma) | |
526 | { | |
527 | pgd_t *src_pgd, *dst_pgd; | |
528 | unsigned long next; | |
529 | unsigned long addr = vma->vm_start; | |
530 | unsigned long end = vma->vm_end; | |
531 | ||
d992895b NP |
532 | /* |
533 | * Don't copy ptes where a page fault will fill them correctly. | |
534 | * Fork becomes much lighter when there are big shared or private | |
535 | * readonly mappings. The tradeoff is that copy_page_range is more | |
536 | * efficient than faulting. | |
537 | */ | |
538 | if (!(vma->vm_flags & (VM_HUGETLB|VM_NONLINEAR|VM_RESERVED))) { | |
539 | if (!vma->anon_vma) | |
540 | return 0; | |
541 | } | |
542 | ||
1da177e4 LT |
543 | if (is_vm_hugetlb_page(vma)) |
544 | return copy_hugetlb_page_range(dst_mm, src_mm, vma); | |
545 | ||
546 | dst_pgd = pgd_offset(dst_mm, addr); | |
547 | src_pgd = pgd_offset(src_mm, addr); | |
548 | do { | |
549 | next = pgd_addr_end(addr, end); | |
550 | if (pgd_none_or_clear_bad(src_pgd)) | |
551 | continue; | |
552 | if (copy_pud_range(dst_mm, src_mm, dst_pgd, src_pgd, | |
553 | vma, addr, next)) | |
554 | return -ENOMEM; | |
555 | } while (dst_pgd++, src_pgd++, addr = next, addr != end); | |
556 | return 0; | |
557 | } | |
558 | ||
b5810039 NP |
559 | static void zap_pte_range(struct mmu_gather *tlb, |
560 | struct vm_area_struct *vma, pmd_t *pmd, | |
1da177e4 LT |
561 | unsigned long addr, unsigned long end, |
562 | struct zap_details *details) | |
563 | { | |
b5810039 | 564 | struct mm_struct *mm = tlb->mm; |
1da177e4 | 565 | pte_t *pte; |
ae859762 HD |
566 | int file_rss = 0; |
567 | int anon_rss = 0; | |
1da177e4 LT |
568 | |
569 | pte = pte_offset_map(pmd, addr); | |
570 | do { | |
571 | pte_t ptent = *pte; | |
572 | if (pte_none(ptent)) | |
573 | continue; | |
574 | if (pte_present(ptent)) { | |
575 | struct page *page = NULL; | |
b5810039 NP |
576 | if (!(vma->vm_flags & VM_RESERVED)) { |
577 | unsigned long pfn = pte_pfn(ptent); | |
578 | if (unlikely(!pfn_valid(pfn))) | |
579 | print_bad_pte(vma, ptent, addr); | |
580 | else | |
581 | page = pfn_to_page(pfn); | |
1da177e4 LT |
582 | } |
583 | if (unlikely(details) && page) { | |
584 | /* | |
585 | * unmap_shared_mapping_pages() wants to | |
586 | * invalidate cache without truncating: | |
587 | * unmap shared but keep private pages. | |
588 | */ | |
589 | if (details->check_mapping && | |
590 | details->check_mapping != page->mapping) | |
591 | continue; | |
592 | /* | |
593 | * Each page->index must be checked when | |
594 | * invalidating or truncating nonlinear. | |
595 | */ | |
596 | if (details->nonlinear_vma && | |
597 | (page->index < details->first_index || | |
598 | page->index > details->last_index)) | |
599 | continue; | |
600 | } | |
b5810039 | 601 | ptent = ptep_get_and_clear_full(mm, addr, pte, |
a600388d | 602 | tlb->fullmm); |
1da177e4 LT |
603 | tlb_remove_tlb_entry(tlb, pte, addr); |
604 | if (unlikely(!page)) | |
605 | continue; | |
606 | if (unlikely(details) && details->nonlinear_vma | |
607 | && linear_page_index(details->nonlinear_vma, | |
608 | addr) != page->index) | |
b5810039 | 609 | set_pte_at(mm, addr, pte, |
1da177e4 | 610 | pgoff_to_pte(page->index)); |
1da177e4 | 611 | if (PageAnon(page)) |
ae859762 | 612 | anon_rss++; |
6237bcd9 HD |
613 | else { |
614 | if (pte_dirty(ptent)) | |
615 | set_page_dirty(page); | |
616 | if (pte_young(ptent)) | |
617 | mark_page_accessed(page); | |
ae859762 | 618 | file_rss++; |
6237bcd9 | 619 | } |
1da177e4 LT |
620 | page_remove_rmap(page); |
621 | tlb_remove_page(tlb, page); | |
622 | continue; | |
623 | } | |
624 | /* | |
625 | * If details->check_mapping, we leave swap entries; | |
626 | * if details->nonlinear_vma, we leave file entries. | |
627 | */ | |
628 | if (unlikely(details)) | |
629 | continue; | |
630 | if (!pte_file(ptent)) | |
631 | free_swap_and_cache(pte_to_swp_entry(ptent)); | |
b5810039 | 632 | pte_clear_full(mm, addr, pte, tlb->fullmm); |
1da177e4 | 633 | } while (pte++, addr += PAGE_SIZE, addr != end); |
ae859762 | 634 | |
b5810039 | 635 | add_mm_rss(mm, -file_rss, -anon_rss); |
1da177e4 LT |
636 | pte_unmap(pte - 1); |
637 | } | |
638 | ||
b5810039 NP |
639 | static inline void zap_pmd_range(struct mmu_gather *tlb, |
640 | struct vm_area_struct *vma, pud_t *pud, | |
1da177e4 LT |
641 | unsigned long addr, unsigned long end, |
642 | struct zap_details *details) | |
643 | { | |
644 | pmd_t *pmd; | |
645 | unsigned long next; | |
646 | ||
647 | pmd = pmd_offset(pud, addr); | |
648 | do { | |
649 | next = pmd_addr_end(addr, end); | |
650 | if (pmd_none_or_clear_bad(pmd)) | |
651 | continue; | |
b5810039 | 652 | zap_pte_range(tlb, vma, pmd, addr, next, details); |
1da177e4 LT |
653 | } while (pmd++, addr = next, addr != end); |
654 | } | |
655 | ||
b5810039 NP |
656 | static inline void zap_pud_range(struct mmu_gather *tlb, |
657 | struct vm_area_struct *vma, pgd_t *pgd, | |
1da177e4 LT |
658 | unsigned long addr, unsigned long end, |
659 | struct zap_details *details) | |
660 | { | |
661 | pud_t *pud; | |
662 | unsigned long next; | |
663 | ||
664 | pud = pud_offset(pgd, addr); | |
665 | do { | |
666 | next = pud_addr_end(addr, end); | |
667 | if (pud_none_or_clear_bad(pud)) | |
668 | continue; | |
b5810039 | 669 | zap_pmd_range(tlb, vma, pud, addr, next, details); |
1da177e4 LT |
670 | } while (pud++, addr = next, addr != end); |
671 | } | |
672 | ||
673 | static void unmap_page_range(struct mmu_gather *tlb, struct vm_area_struct *vma, | |
674 | unsigned long addr, unsigned long end, | |
675 | struct zap_details *details) | |
676 | { | |
677 | pgd_t *pgd; | |
678 | unsigned long next; | |
679 | ||
680 | if (details && !details->check_mapping && !details->nonlinear_vma) | |
681 | details = NULL; | |
682 | ||
683 | BUG_ON(addr >= end); | |
684 | tlb_start_vma(tlb, vma); | |
685 | pgd = pgd_offset(vma->vm_mm, addr); | |
686 | do { | |
687 | next = pgd_addr_end(addr, end); | |
688 | if (pgd_none_or_clear_bad(pgd)) | |
689 | continue; | |
b5810039 | 690 | zap_pud_range(tlb, vma, pgd, addr, next, details); |
1da177e4 LT |
691 | } while (pgd++, addr = next, addr != end); |
692 | tlb_end_vma(tlb, vma); | |
693 | } | |
694 | ||
695 | #ifdef CONFIG_PREEMPT | |
696 | # define ZAP_BLOCK_SIZE (8 * PAGE_SIZE) | |
697 | #else | |
698 | /* No preempt: go for improved straight-line efficiency */ | |
699 | # define ZAP_BLOCK_SIZE (1024 * PAGE_SIZE) | |
700 | #endif | |
701 | ||
702 | /** | |
703 | * unmap_vmas - unmap a range of memory covered by a list of vma's | |
704 | * @tlbp: address of the caller's struct mmu_gather | |
705 | * @mm: the controlling mm_struct | |
706 | * @vma: the starting vma | |
707 | * @start_addr: virtual address at which to start unmapping | |
708 | * @end_addr: virtual address at which to end unmapping | |
709 | * @nr_accounted: Place number of unmapped pages in vm-accountable vma's here | |
710 | * @details: details of nonlinear truncation or shared cache invalidation | |
711 | * | |
ee39b37b | 712 | * Returns the end address of the unmapping (restart addr if interrupted). |
1da177e4 LT |
713 | * |
714 | * Unmap all pages in the vma list. Called under page_table_lock. | |
715 | * | |
716 | * We aim to not hold page_table_lock for too long (for scheduling latency | |
717 | * reasons). So zap pages in ZAP_BLOCK_SIZE bytecounts. This means we need to | |
718 | * return the ending mmu_gather to the caller. | |
719 | * | |
720 | * Only addresses between `start' and `end' will be unmapped. | |
721 | * | |
722 | * The VMA list must be sorted in ascending virtual address order. | |
723 | * | |
724 | * unmap_vmas() assumes that the caller will flush the whole unmapped address | |
725 | * range after unmap_vmas() returns. So the only responsibility here is to | |
726 | * ensure that any thus-far unmapped pages are flushed before unmap_vmas() | |
727 | * drops the lock and schedules. | |
728 | */ | |
ee39b37b | 729 | unsigned long unmap_vmas(struct mmu_gather **tlbp, struct mm_struct *mm, |
1da177e4 LT |
730 | struct vm_area_struct *vma, unsigned long start_addr, |
731 | unsigned long end_addr, unsigned long *nr_accounted, | |
732 | struct zap_details *details) | |
733 | { | |
734 | unsigned long zap_bytes = ZAP_BLOCK_SIZE; | |
735 | unsigned long tlb_start = 0; /* For tlb_finish_mmu */ | |
736 | int tlb_start_valid = 0; | |
ee39b37b | 737 | unsigned long start = start_addr; |
1da177e4 | 738 | spinlock_t *i_mmap_lock = details? details->i_mmap_lock: NULL; |
4d6ddfa9 | 739 | int fullmm = (*tlbp)->fullmm; |
1da177e4 LT |
740 | |
741 | for ( ; vma && vma->vm_start < end_addr; vma = vma->vm_next) { | |
1da177e4 LT |
742 | unsigned long end; |
743 | ||
744 | start = max(vma->vm_start, start_addr); | |
745 | if (start >= vma->vm_end) | |
746 | continue; | |
747 | end = min(vma->vm_end, end_addr); | |
748 | if (end <= vma->vm_start) | |
749 | continue; | |
750 | ||
751 | if (vma->vm_flags & VM_ACCOUNT) | |
752 | *nr_accounted += (end - start) >> PAGE_SHIFT; | |
753 | ||
1da177e4 LT |
754 | while (start != end) { |
755 | unsigned long block; | |
756 | ||
757 | if (!tlb_start_valid) { | |
758 | tlb_start = start; | |
759 | tlb_start_valid = 1; | |
760 | } | |
761 | ||
762 | if (is_vm_hugetlb_page(vma)) { | |
763 | block = end - start; | |
764 | unmap_hugepage_range(vma, start, end); | |
765 | } else { | |
766 | block = min(zap_bytes, end - start); | |
767 | unmap_page_range(*tlbp, vma, start, | |
768 | start + block, details); | |
769 | } | |
770 | ||
771 | start += block; | |
772 | zap_bytes -= block; | |
773 | if ((long)zap_bytes > 0) | |
774 | continue; | |
775 | ||
776 | tlb_finish_mmu(*tlbp, tlb_start, start); | |
777 | ||
778 | if (need_resched() || | |
779 | need_lockbreak(&mm->page_table_lock) || | |
780 | (i_mmap_lock && need_lockbreak(i_mmap_lock))) { | |
781 | if (i_mmap_lock) { | |
782 | /* must reset count of rss freed */ | |
783 | *tlbp = tlb_gather_mmu(mm, fullmm); | |
1da177e4 LT |
784 | goto out; |
785 | } | |
786 | spin_unlock(&mm->page_table_lock); | |
787 | cond_resched(); | |
788 | spin_lock(&mm->page_table_lock); | |
789 | } | |
790 | ||
791 | *tlbp = tlb_gather_mmu(mm, fullmm); | |
792 | tlb_start_valid = 0; | |
793 | zap_bytes = ZAP_BLOCK_SIZE; | |
794 | } | |
795 | } | |
796 | out: | |
ee39b37b | 797 | return start; /* which is now the end (or restart) address */ |
1da177e4 LT |
798 | } |
799 | ||
800 | /** | |
801 | * zap_page_range - remove user pages in a given range | |
802 | * @vma: vm_area_struct holding the applicable pages | |
803 | * @address: starting address of pages to zap | |
804 | * @size: number of bytes to zap | |
805 | * @details: details of nonlinear truncation or shared cache invalidation | |
806 | */ | |
ee39b37b | 807 | unsigned long zap_page_range(struct vm_area_struct *vma, unsigned long address, |
1da177e4 LT |
808 | unsigned long size, struct zap_details *details) |
809 | { | |
810 | struct mm_struct *mm = vma->vm_mm; | |
811 | struct mmu_gather *tlb; | |
812 | unsigned long end = address + size; | |
813 | unsigned long nr_accounted = 0; | |
814 | ||
815 | if (is_vm_hugetlb_page(vma)) { | |
816 | zap_hugepage_range(vma, address, size); | |
ee39b37b | 817 | return end; |
1da177e4 LT |
818 | } |
819 | ||
820 | lru_add_drain(); | |
821 | spin_lock(&mm->page_table_lock); | |
822 | tlb = tlb_gather_mmu(mm, 0); | |
ee39b37b | 823 | end = unmap_vmas(&tlb, mm, vma, address, end, &nr_accounted, details); |
1da177e4 LT |
824 | tlb_finish_mmu(tlb, address, end); |
825 | spin_unlock(&mm->page_table_lock); | |
ee39b37b | 826 | return end; |
1da177e4 LT |
827 | } |
828 | ||
829 | /* | |
830 | * Do a quick page-table lookup for a single page. | |
831 | * mm->page_table_lock must be held. | |
832 | */ | |
1aaf18ff AM |
833 | static struct page *__follow_page(struct mm_struct *mm, unsigned long address, |
834 | int read, int write, int accessed) | |
1da177e4 LT |
835 | { |
836 | pgd_t *pgd; | |
837 | pud_t *pud; | |
838 | pmd_t *pmd; | |
839 | pte_t *ptep, pte; | |
840 | unsigned long pfn; | |
841 | struct page *page; | |
842 | ||
843 | page = follow_huge_addr(mm, address, write); | |
844 | if (! IS_ERR(page)) | |
845 | return page; | |
846 | ||
847 | pgd = pgd_offset(mm, address); | |
848 | if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd))) | |
849 | goto out; | |
850 | ||
851 | pud = pud_offset(pgd, address); | |
852 | if (pud_none(*pud) || unlikely(pud_bad(*pud))) | |
853 | goto out; | |
854 | ||
855 | pmd = pmd_offset(pud, address); | |
856 | if (pmd_none(*pmd) || unlikely(pmd_bad(*pmd))) | |
857 | goto out; | |
858 | if (pmd_huge(*pmd)) | |
859 | return follow_huge_pmd(mm, address, pmd, write); | |
860 | ||
861 | ptep = pte_offset_map(pmd, address); | |
862 | if (!ptep) | |
863 | goto out; | |
864 | ||
865 | pte = *ptep; | |
866 | pte_unmap(ptep); | |
867 | if (pte_present(pte)) { | |
f33ea7f4 | 868 | if (write && !pte_write(pte)) |
1da177e4 LT |
869 | goto out; |
870 | if (read && !pte_read(pte)) | |
871 | goto out; | |
872 | pfn = pte_pfn(pte); | |
873 | if (pfn_valid(pfn)) { | |
874 | page = pfn_to_page(pfn); | |
f33ea7f4 NP |
875 | if (accessed) { |
876 | if (write && !pte_dirty(pte) &&!PageDirty(page)) | |
877 | set_page_dirty(page); | |
1aaf18ff | 878 | mark_page_accessed(page); |
f33ea7f4 | 879 | } |
1da177e4 LT |
880 | return page; |
881 | } | |
882 | } | |
883 | ||
884 | out: | |
885 | return NULL; | |
886 | } | |
887 | ||
1aaf18ff | 888 | inline struct page * |
1da177e4 LT |
889 | follow_page(struct mm_struct *mm, unsigned long address, int write) |
890 | { | |
1aaf18ff | 891 | return __follow_page(mm, address, 0, write, 1); |
1da177e4 LT |
892 | } |
893 | ||
1aaf18ff AM |
894 | /* |
895 | * check_user_page_readable() can be called frm niterrupt context by oprofile, | |
896 | * so we need to avoid taking any non-irq-safe locks | |
897 | */ | |
898 | int check_user_page_readable(struct mm_struct *mm, unsigned long address) | |
1da177e4 | 899 | { |
1aaf18ff | 900 | return __follow_page(mm, address, 1, 0, 0) != NULL; |
1da177e4 | 901 | } |
1da177e4 LT |
902 | EXPORT_SYMBOL(check_user_page_readable); |
903 | ||
1da177e4 LT |
904 | static inline int |
905 | untouched_anonymous_page(struct mm_struct* mm, struct vm_area_struct *vma, | |
906 | unsigned long address) | |
907 | { | |
908 | pgd_t *pgd; | |
909 | pud_t *pud; | |
910 | pmd_t *pmd; | |
911 | ||
912 | /* Check if the vma is for an anonymous mapping. */ | |
913 | if (vma->vm_ops && vma->vm_ops->nopage) | |
914 | return 0; | |
915 | ||
916 | /* Check if page directory entry exists. */ | |
917 | pgd = pgd_offset(mm, address); | |
918 | if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd))) | |
919 | return 1; | |
920 | ||
921 | pud = pud_offset(pgd, address); | |
922 | if (pud_none(*pud) || unlikely(pud_bad(*pud))) | |
923 | return 1; | |
924 | ||
925 | /* Check if page middle directory entry exists. */ | |
926 | pmd = pmd_offset(pud, address); | |
927 | if (pmd_none(*pmd) || unlikely(pmd_bad(*pmd))) | |
928 | return 1; | |
929 | ||
930 | /* There is a pte slot for 'address' in 'mm'. */ | |
931 | return 0; | |
932 | } | |
933 | ||
1da177e4 LT |
934 | int get_user_pages(struct task_struct *tsk, struct mm_struct *mm, |
935 | unsigned long start, int len, int write, int force, | |
936 | struct page **pages, struct vm_area_struct **vmas) | |
937 | { | |
938 | int i; | |
939 | unsigned int flags; | |
940 | ||
941 | /* | |
942 | * Require read or write permissions. | |
943 | * If 'force' is set, we only require the "MAY" flags. | |
944 | */ | |
945 | flags = write ? (VM_WRITE | VM_MAYWRITE) : (VM_READ | VM_MAYREAD); | |
946 | flags &= force ? (VM_MAYREAD | VM_MAYWRITE) : (VM_READ | VM_WRITE); | |
947 | i = 0; | |
948 | ||
949 | do { | |
950 | struct vm_area_struct * vma; | |
951 | ||
952 | vma = find_extend_vma(mm, start); | |
953 | if (!vma && in_gate_area(tsk, start)) { | |
954 | unsigned long pg = start & PAGE_MASK; | |
955 | struct vm_area_struct *gate_vma = get_gate_vma(tsk); | |
956 | pgd_t *pgd; | |
957 | pud_t *pud; | |
958 | pmd_t *pmd; | |
959 | pte_t *pte; | |
960 | if (write) /* user gate pages are read-only */ | |
961 | return i ? : -EFAULT; | |
962 | if (pg > TASK_SIZE) | |
963 | pgd = pgd_offset_k(pg); | |
964 | else | |
965 | pgd = pgd_offset_gate(mm, pg); | |
966 | BUG_ON(pgd_none(*pgd)); | |
967 | pud = pud_offset(pgd, pg); | |
968 | BUG_ON(pud_none(*pud)); | |
969 | pmd = pmd_offset(pud, pg); | |
690dbe1c HD |
970 | if (pmd_none(*pmd)) |
971 | return i ? : -EFAULT; | |
1da177e4 | 972 | pte = pte_offset_map(pmd, pg); |
690dbe1c HD |
973 | if (pte_none(*pte)) { |
974 | pte_unmap(pte); | |
975 | return i ? : -EFAULT; | |
976 | } | |
1da177e4 LT |
977 | if (pages) { |
978 | pages[i] = pte_page(*pte); | |
979 | get_page(pages[i]); | |
980 | } | |
981 | pte_unmap(pte); | |
982 | if (vmas) | |
983 | vmas[i] = gate_vma; | |
984 | i++; | |
985 | start += PAGE_SIZE; | |
986 | len--; | |
987 | continue; | |
988 | } | |
989 | ||
b5810039 | 990 | if (!vma || (vma->vm_flags & (VM_IO | VM_RESERVED)) |
1da177e4 LT |
991 | || !(flags & vma->vm_flags)) |
992 | return i ? : -EFAULT; | |
993 | ||
994 | if (is_vm_hugetlb_page(vma)) { | |
995 | i = follow_hugetlb_page(mm, vma, pages, vmas, | |
996 | &start, &len, i); | |
997 | continue; | |
998 | } | |
999 | spin_lock(&mm->page_table_lock); | |
1000 | do { | |
f33ea7f4 | 1001 | int write_access = write; |
08ef4729 | 1002 | struct page *page; |
1da177e4 LT |
1003 | |
1004 | cond_resched_lock(&mm->page_table_lock); | |
f33ea7f4 | 1005 | while (!(page = follow_page(mm, start, write_access))) { |
a68d2ebc LT |
1006 | int ret; |
1007 | ||
1da177e4 LT |
1008 | /* |
1009 | * Shortcut for anonymous pages. We don't want | |
1010 | * to force the creation of pages tables for | |
08ef4729 | 1011 | * insanely big anonymously mapped areas that |
1da177e4 LT |
1012 | * nobody touched so far. This is important |
1013 | * for doing a core dump for these mappings. | |
1014 | */ | |
4ceb5db9 | 1015 | if (!write && untouched_anonymous_page(mm,vma,start)) { |
08ef4729 | 1016 | page = ZERO_PAGE(start); |
1da177e4 LT |
1017 | break; |
1018 | } | |
1019 | spin_unlock(&mm->page_table_lock); | |
a68d2ebc LT |
1020 | ret = __handle_mm_fault(mm, vma, start, write_access); |
1021 | ||
1022 | /* | |
1023 | * The VM_FAULT_WRITE bit tells us that do_wp_page has | |
1024 | * broken COW when necessary, even if maybe_mkwrite | |
1025 | * decided not to set pte_write. We can thus safely do | |
1026 | * subsequent page lookups as if they were reads. | |
1027 | */ | |
1028 | if (ret & VM_FAULT_WRITE) | |
f33ea7f4 | 1029 | write_access = 0; |
a68d2ebc LT |
1030 | |
1031 | switch (ret & ~VM_FAULT_WRITE) { | |
1da177e4 LT |
1032 | case VM_FAULT_MINOR: |
1033 | tsk->min_flt++; | |
1034 | break; | |
1035 | case VM_FAULT_MAJOR: | |
1036 | tsk->maj_flt++; | |
1037 | break; | |
1038 | case VM_FAULT_SIGBUS: | |
1039 | return i ? i : -EFAULT; | |
1040 | case VM_FAULT_OOM: | |
1041 | return i ? i : -ENOMEM; | |
1042 | default: | |
1043 | BUG(); | |
1044 | } | |
1da177e4 LT |
1045 | spin_lock(&mm->page_table_lock); |
1046 | } | |
1047 | if (pages) { | |
08ef4729 HD |
1048 | pages[i] = page; |
1049 | flush_dcache_page(page); | |
b5810039 | 1050 | page_cache_get(page); |
1da177e4 LT |
1051 | } |
1052 | if (vmas) | |
1053 | vmas[i] = vma; | |
1054 | i++; | |
1055 | start += PAGE_SIZE; | |
1056 | len--; | |
08ef4729 | 1057 | } while (len && start < vma->vm_end); |
1da177e4 | 1058 | spin_unlock(&mm->page_table_lock); |
08ef4729 | 1059 | } while (len); |
1da177e4 LT |
1060 | return i; |
1061 | } | |
1da177e4 LT |
1062 | EXPORT_SYMBOL(get_user_pages); |
1063 | ||
1064 | static int zeromap_pte_range(struct mm_struct *mm, pmd_t *pmd, | |
1065 | unsigned long addr, unsigned long end, pgprot_t prot) | |
1066 | { | |
1067 | pte_t *pte; | |
1068 | ||
1069 | pte = pte_alloc_map(mm, pmd, addr); | |
1070 | if (!pte) | |
1071 | return -ENOMEM; | |
1072 | do { | |
b5810039 NP |
1073 | struct page *page = ZERO_PAGE(addr); |
1074 | pte_t zero_pte = pte_wrprotect(mk_pte(page, prot)); | |
1075 | page_cache_get(page); | |
1076 | page_add_file_rmap(page); | |
1077 | inc_mm_counter(mm, file_rss); | |
1da177e4 LT |
1078 | BUG_ON(!pte_none(*pte)); |
1079 | set_pte_at(mm, addr, pte, zero_pte); | |
1080 | } while (pte++, addr += PAGE_SIZE, addr != end); | |
1081 | pte_unmap(pte - 1); | |
1082 | return 0; | |
1083 | } | |
1084 | ||
1085 | static inline int zeromap_pmd_range(struct mm_struct *mm, pud_t *pud, | |
1086 | unsigned long addr, unsigned long end, pgprot_t prot) | |
1087 | { | |
1088 | pmd_t *pmd; | |
1089 | unsigned long next; | |
1090 | ||
1091 | pmd = pmd_alloc(mm, pud, addr); | |
1092 | if (!pmd) | |
1093 | return -ENOMEM; | |
1094 | do { | |
1095 | next = pmd_addr_end(addr, end); | |
1096 | if (zeromap_pte_range(mm, pmd, addr, next, prot)) | |
1097 | return -ENOMEM; | |
1098 | } while (pmd++, addr = next, addr != end); | |
1099 | return 0; | |
1100 | } | |
1101 | ||
1102 | static inline int zeromap_pud_range(struct mm_struct *mm, pgd_t *pgd, | |
1103 | unsigned long addr, unsigned long end, pgprot_t prot) | |
1104 | { | |
1105 | pud_t *pud; | |
1106 | unsigned long next; | |
1107 | ||
1108 | pud = pud_alloc(mm, pgd, addr); | |
1109 | if (!pud) | |
1110 | return -ENOMEM; | |
1111 | do { | |
1112 | next = pud_addr_end(addr, end); | |
1113 | if (zeromap_pmd_range(mm, pud, addr, next, prot)) | |
1114 | return -ENOMEM; | |
1115 | } while (pud++, addr = next, addr != end); | |
1116 | return 0; | |
1117 | } | |
1118 | ||
1119 | int zeromap_page_range(struct vm_area_struct *vma, | |
1120 | unsigned long addr, unsigned long size, pgprot_t prot) | |
1121 | { | |
1122 | pgd_t *pgd; | |
1123 | unsigned long next; | |
1124 | unsigned long end = addr + size; | |
1125 | struct mm_struct *mm = vma->vm_mm; | |
1126 | int err; | |
1127 | ||
1128 | BUG_ON(addr >= end); | |
1129 | pgd = pgd_offset(mm, addr); | |
1130 | flush_cache_range(vma, addr, end); | |
1131 | spin_lock(&mm->page_table_lock); | |
1132 | do { | |
1133 | next = pgd_addr_end(addr, end); | |
1134 | err = zeromap_pud_range(mm, pgd, addr, next, prot); | |
1135 | if (err) | |
1136 | break; | |
1137 | } while (pgd++, addr = next, addr != end); | |
1138 | spin_unlock(&mm->page_table_lock); | |
1139 | return err; | |
1140 | } | |
1141 | ||
1142 | /* | |
1143 | * maps a range of physical memory into the requested pages. the old | |
1144 | * mappings are removed. any references to nonexistent pages results | |
1145 | * in null mappings (currently treated as "copy-on-access") | |
1146 | */ | |
1147 | static int remap_pte_range(struct mm_struct *mm, pmd_t *pmd, | |
1148 | unsigned long addr, unsigned long end, | |
1149 | unsigned long pfn, pgprot_t prot) | |
1150 | { | |
1151 | pte_t *pte; | |
1152 | ||
1153 | pte = pte_alloc_map(mm, pmd, addr); | |
1154 | if (!pte) | |
1155 | return -ENOMEM; | |
1156 | do { | |
1157 | BUG_ON(!pte_none(*pte)); | |
b5810039 | 1158 | set_pte_at(mm, addr, pte, pfn_pte(pfn, prot)); |
1da177e4 LT |
1159 | pfn++; |
1160 | } while (pte++, addr += PAGE_SIZE, addr != end); | |
1161 | pte_unmap(pte - 1); | |
1162 | return 0; | |
1163 | } | |
1164 | ||
1165 | static inline int remap_pmd_range(struct mm_struct *mm, pud_t *pud, | |
1166 | unsigned long addr, unsigned long end, | |
1167 | unsigned long pfn, pgprot_t prot) | |
1168 | { | |
1169 | pmd_t *pmd; | |
1170 | unsigned long next; | |
1171 | ||
1172 | pfn -= addr >> PAGE_SHIFT; | |
1173 | pmd = pmd_alloc(mm, pud, addr); | |
1174 | if (!pmd) | |
1175 | return -ENOMEM; | |
1176 | do { | |
1177 | next = pmd_addr_end(addr, end); | |
1178 | if (remap_pte_range(mm, pmd, addr, next, | |
1179 | pfn + (addr >> PAGE_SHIFT), prot)) | |
1180 | return -ENOMEM; | |
1181 | } while (pmd++, addr = next, addr != end); | |
1182 | return 0; | |
1183 | } | |
1184 | ||
1185 | static inline int remap_pud_range(struct mm_struct *mm, pgd_t *pgd, | |
1186 | unsigned long addr, unsigned long end, | |
1187 | unsigned long pfn, pgprot_t prot) | |
1188 | { | |
1189 | pud_t *pud; | |
1190 | unsigned long next; | |
1191 | ||
1192 | pfn -= addr >> PAGE_SHIFT; | |
1193 | pud = pud_alloc(mm, pgd, addr); | |
1194 | if (!pud) | |
1195 | return -ENOMEM; | |
1196 | do { | |
1197 | next = pud_addr_end(addr, end); | |
1198 | if (remap_pmd_range(mm, pud, addr, next, | |
1199 | pfn + (addr >> PAGE_SHIFT), prot)) | |
1200 | return -ENOMEM; | |
1201 | } while (pud++, addr = next, addr != end); | |
1202 | return 0; | |
1203 | } | |
1204 | ||
1205 | /* Note: this is only safe if the mm semaphore is held when called. */ | |
1206 | int remap_pfn_range(struct vm_area_struct *vma, unsigned long addr, | |
1207 | unsigned long pfn, unsigned long size, pgprot_t prot) | |
1208 | { | |
1209 | pgd_t *pgd; | |
1210 | unsigned long next; | |
2d15cab8 | 1211 | unsigned long end = addr + PAGE_ALIGN(size); |
1da177e4 LT |
1212 | struct mm_struct *mm = vma->vm_mm; |
1213 | int err; | |
1214 | ||
1215 | /* | |
1216 | * Physically remapped pages are special. Tell the | |
1217 | * rest of the world about it: | |
1218 | * VM_IO tells people not to look at these pages | |
1219 | * (accesses can have side effects). | |
b5810039 NP |
1220 | * VM_RESERVED tells the core MM not to "manage" these pages |
1221 | * (e.g. refcount, mapcount, try to swap them out). | |
1da177e4 LT |
1222 | */ |
1223 | vma->vm_flags |= VM_IO | VM_RESERVED; | |
1224 | ||
1225 | BUG_ON(addr >= end); | |
1226 | pfn -= addr >> PAGE_SHIFT; | |
1227 | pgd = pgd_offset(mm, addr); | |
1228 | flush_cache_range(vma, addr, end); | |
1229 | spin_lock(&mm->page_table_lock); | |
1230 | do { | |
1231 | next = pgd_addr_end(addr, end); | |
1232 | err = remap_pud_range(mm, pgd, addr, next, | |
1233 | pfn + (addr >> PAGE_SHIFT), prot); | |
1234 | if (err) | |
1235 | break; | |
1236 | } while (pgd++, addr = next, addr != end); | |
1237 | spin_unlock(&mm->page_table_lock); | |
1238 | return err; | |
1239 | } | |
1240 | EXPORT_SYMBOL(remap_pfn_range); | |
1241 | ||
1242 | /* | |
1243 | * Do pte_mkwrite, but only if the vma says VM_WRITE. We do this when | |
1244 | * servicing faults for write access. In the normal case, do always want | |
1245 | * pte_mkwrite. But get_user_pages can cause write faults for mappings | |
1246 | * that do not have writing enabled, when used by access_process_vm. | |
1247 | */ | |
1248 | static inline pte_t maybe_mkwrite(pte_t pte, struct vm_area_struct *vma) | |
1249 | { | |
1250 | if (likely(vma->vm_flags & VM_WRITE)) | |
1251 | pte = pte_mkwrite(pte); | |
1252 | return pte; | |
1253 | } | |
1254 | ||
1da177e4 LT |
1255 | /* |
1256 | * This routine handles present pages, when users try to write | |
1257 | * to a shared page. It is done by copying the page to a new address | |
1258 | * and decrementing the shared-page counter for the old page. | |
1259 | * | |
1da177e4 LT |
1260 | * Note that this routine assumes that the protection checks have been |
1261 | * done by the caller (the low-level page fault routine in most cases). | |
1262 | * Thus we can safely just mark it writable once we've done any necessary | |
1263 | * COW. | |
1264 | * | |
1265 | * We also mark the page dirty at this point even though the page will | |
1266 | * change only once the write actually happens. This avoids a few races, | |
1267 | * and potentially makes it more efficient. | |
1268 | * | |
1269 | * We hold the mm semaphore and the page_table_lock on entry and exit | |
1270 | * with the page_table_lock released. | |
1271 | */ | |
65500d23 HD |
1272 | static int do_wp_page(struct mm_struct *mm, struct vm_area_struct *vma, |
1273 | unsigned long address, pte_t *page_table, pmd_t *pmd, | |
1274 | pte_t orig_pte) | |
1da177e4 LT |
1275 | { |
1276 | struct page *old_page, *new_page; | |
65500d23 | 1277 | unsigned long pfn = pte_pfn(orig_pte); |
1da177e4 | 1278 | pte_t entry; |
65500d23 | 1279 | int ret = VM_FAULT_MINOR; |
1da177e4 | 1280 | |
b5810039 NP |
1281 | BUG_ON(vma->vm_flags & VM_RESERVED); |
1282 | ||
1da177e4 LT |
1283 | if (unlikely(!pfn_valid(pfn))) { |
1284 | /* | |
65500d23 | 1285 | * Page table corrupted: show pte and kill process. |
1da177e4 | 1286 | */ |
b5810039 | 1287 | print_bad_pte(vma, orig_pte, address); |
65500d23 HD |
1288 | ret = VM_FAULT_OOM; |
1289 | goto unlock; | |
1da177e4 LT |
1290 | } |
1291 | old_page = pfn_to_page(pfn); | |
1292 | ||
d296e9cd | 1293 | if (PageAnon(old_page) && !TestSetPageLocked(old_page)) { |
1da177e4 LT |
1294 | int reuse = can_share_swap_page(old_page); |
1295 | unlock_page(old_page); | |
1296 | if (reuse) { | |
1297 | flush_cache_page(vma, address, pfn); | |
65500d23 HD |
1298 | entry = pte_mkyoung(orig_pte); |
1299 | entry = maybe_mkwrite(pte_mkdirty(entry), vma); | |
1da177e4 LT |
1300 | ptep_set_access_flags(vma, address, page_table, entry, 1); |
1301 | update_mmu_cache(vma, address, entry); | |
1302 | lazy_mmu_prot_update(entry); | |
65500d23 HD |
1303 | ret |= VM_FAULT_WRITE; |
1304 | goto unlock; | |
1da177e4 LT |
1305 | } |
1306 | } | |
1da177e4 LT |
1307 | |
1308 | /* | |
1309 | * Ok, we need to copy. Oh, well.. | |
1310 | */ | |
b5810039 | 1311 | page_cache_get(old_page); |
65500d23 | 1312 | pte_unmap(page_table); |
1da177e4 LT |
1313 | spin_unlock(&mm->page_table_lock); |
1314 | ||
1315 | if (unlikely(anon_vma_prepare(vma))) | |
65500d23 | 1316 | goto oom; |
1da177e4 LT |
1317 | if (old_page == ZERO_PAGE(address)) { |
1318 | new_page = alloc_zeroed_user_highpage(vma, address); | |
1319 | if (!new_page) | |
65500d23 | 1320 | goto oom; |
1da177e4 LT |
1321 | } else { |
1322 | new_page = alloc_page_vma(GFP_HIGHUSER, vma, address); | |
1323 | if (!new_page) | |
65500d23 | 1324 | goto oom; |
1da177e4 LT |
1325 | copy_user_highpage(new_page, old_page, address); |
1326 | } | |
65500d23 | 1327 | |
1da177e4 LT |
1328 | /* |
1329 | * Re-check the pte - we dropped the lock | |
1330 | */ | |
1331 | spin_lock(&mm->page_table_lock); | |
1332 | page_table = pte_offset_map(pmd, address); | |
65500d23 | 1333 | if (likely(pte_same(*page_table, orig_pte))) { |
b5810039 NP |
1334 | page_remove_rmap(old_page); |
1335 | if (!PageAnon(old_page)) { | |
4294621f | 1336 | inc_mm_counter(mm, anon_rss); |
b5810039 | 1337 | dec_mm_counter(mm, file_rss); |
4294621f | 1338 | } |
1da177e4 | 1339 | flush_cache_page(vma, address, pfn); |
65500d23 HD |
1340 | entry = mk_pte(new_page, vma->vm_page_prot); |
1341 | entry = maybe_mkwrite(pte_mkdirty(entry), vma); | |
1342 | ptep_establish(vma, address, page_table, entry); | |
1343 | update_mmu_cache(vma, address, entry); | |
1344 | lazy_mmu_prot_update(entry); | |
1345 | ||
1da177e4 LT |
1346 | lru_cache_add_active(new_page); |
1347 | page_add_anon_rmap(new_page, vma, address); | |
1348 | ||
1349 | /* Free the old page.. */ | |
1350 | new_page = old_page; | |
f33ea7f4 | 1351 | ret |= VM_FAULT_WRITE; |
1da177e4 | 1352 | } |
1da177e4 LT |
1353 | page_cache_release(new_page); |
1354 | page_cache_release(old_page); | |
65500d23 HD |
1355 | unlock: |
1356 | pte_unmap(page_table); | |
1da177e4 | 1357 | spin_unlock(&mm->page_table_lock); |
f33ea7f4 | 1358 | return ret; |
65500d23 | 1359 | oom: |
1da177e4 LT |
1360 | page_cache_release(old_page); |
1361 | return VM_FAULT_OOM; | |
1362 | } | |
1363 | ||
1364 | /* | |
1365 | * Helper functions for unmap_mapping_range(). | |
1366 | * | |
1367 | * __ Notes on dropping i_mmap_lock to reduce latency while unmapping __ | |
1368 | * | |
1369 | * We have to restart searching the prio_tree whenever we drop the lock, | |
1370 | * since the iterator is only valid while the lock is held, and anyway | |
1371 | * a later vma might be split and reinserted earlier while lock dropped. | |
1372 | * | |
1373 | * The list of nonlinear vmas could be handled more efficiently, using | |
1374 | * a placeholder, but handle it in the same way until a need is shown. | |
1375 | * It is important to search the prio_tree before nonlinear list: a vma | |
1376 | * may become nonlinear and be shifted from prio_tree to nonlinear list | |
1377 | * while the lock is dropped; but never shifted from list to prio_tree. | |
1378 | * | |
1379 | * In order to make forward progress despite restarting the search, | |
1380 | * vm_truncate_count is used to mark a vma as now dealt with, so we can | |
1381 | * quickly skip it next time around. Since the prio_tree search only | |
1382 | * shows us those vmas affected by unmapping the range in question, we | |
1383 | * can't efficiently keep all vmas in step with mapping->truncate_count: | |
1384 | * so instead reset them all whenever it wraps back to 0 (then go to 1). | |
1385 | * mapping->truncate_count and vma->vm_truncate_count are protected by | |
1386 | * i_mmap_lock. | |
1387 | * | |
1388 | * In order to make forward progress despite repeatedly restarting some | |
ee39b37b | 1389 | * large vma, note the restart_addr from unmap_vmas when it breaks out: |
1da177e4 LT |
1390 | * and restart from that address when we reach that vma again. It might |
1391 | * have been split or merged, shrunk or extended, but never shifted: so | |
1392 | * restart_addr remains valid so long as it remains in the vma's range. | |
1393 | * unmap_mapping_range forces truncate_count to leap over page-aligned | |
1394 | * values so we can save vma's restart_addr in its truncate_count field. | |
1395 | */ | |
1396 | #define is_restart_addr(truncate_count) (!((truncate_count) & ~PAGE_MASK)) | |
1397 | ||
1398 | static void reset_vma_truncate_counts(struct address_space *mapping) | |
1399 | { | |
1400 | struct vm_area_struct *vma; | |
1401 | struct prio_tree_iter iter; | |
1402 | ||
1403 | vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, 0, ULONG_MAX) | |
1404 | vma->vm_truncate_count = 0; | |
1405 | list_for_each_entry(vma, &mapping->i_mmap_nonlinear, shared.vm_set.list) | |
1406 | vma->vm_truncate_count = 0; | |
1407 | } | |
1408 | ||
1409 | static int unmap_mapping_range_vma(struct vm_area_struct *vma, | |
1410 | unsigned long start_addr, unsigned long end_addr, | |
1411 | struct zap_details *details) | |
1412 | { | |
1413 | unsigned long restart_addr; | |
1414 | int need_break; | |
1415 | ||
1416 | again: | |
1417 | restart_addr = vma->vm_truncate_count; | |
1418 | if (is_restart_addr(restart_addr) && start_addr < restart_addr) { | |
1419 | start_addr = restart_addr; | |
1420 | if (start_addr >= end_addr) { | |
1421 | /* Top of vma has been split off since last time */ | |
1422 | vma->vm_truncate_count = details->truncate_count; | |
1423 | return 0; | |
1424 | } | |
1425 | } | |
1426 | ||
ee39b37b HD |
1427 | restart_addr = zap_page_range(vma, start_addr, |
1428 | end_addr - start_addr, details); | |
1da177e4 LT |
1429 | |
1430 | /* | |
1431 | * We cannot rely on the break test in unmap_vmas: | |
1432 | * on the one hand, we don't want to restart our loop | |
1433 | * just because that broke out for the page_table_lock; | |
1434 | * on the other hand, it does no test when vma is small. | |
1435 | */ | |
1436 | need_break = need_resched() || | |
1437 | need_lockbreak(details->i_mmap_lock); | |
1438 | ||
ee39b37b | 1439 | if (restart_addr >= end_addr) { |
1da177e4 LT |
1440 | /* We have now completed this vma: mark it so */ |
1441 | vma->vm_truncate_count = details->truncate_count; | |
1442 | if (!need_break) | |
1443 | return 0; | |
1444 | } else { | |
1445 | /* Note restart_addr in vma's truncate_count field */ | |
ee39b37b | 1446 | vma->vm_truncate_count = restart_addr; |
1da177e4 LT |
1447 | if (!need_break) |
1448 | goto again; | |
1449 | } | |
1450 | ||
1451 | spin_unlock(details->i_mmap_lock); | |
1452 | cond_resched(); | |
1453 | spin_lock(details->i_mmap_lock); | |
1454 | return -EINTR; | |
1455 | } | |
1456 | ||
1457 | static inline void unmap_mapping_range_tree(struct prio_tree_root *root, | |
1458 | struct zap_details *details) | |
1459 | { | |
1460 | struct vm_area_struct *vma; | |
1461 | struct prio_tree_iter iter; | |
1462 | pgoff_t vba, vea, zba, zea; | |
1463 | ||
1464 | restart: | |
1465 | vma_prio_tree_foreach(vma, &iter, root, | |
1466 | details->first_index, details->last_index) { | |
1467 | /* Skip quickly over those we have already dealt with */ | |
1468 | if (vma->vm_truncate_count == details->truncate_count) | |
1469 | continue; | |
1470 | ||
1471 | vba = vma->vm_pgoff; | |
1472 | vea = vba + ((vma->vm_end - vma->vm_start) >> PAGE_SHIFT) - 1; | |
1473 | /* Assume for now that PAGE_CACHE_SHIFT == PAGE_SHIFT */ | |
1474 | zba = details->first_index; | |
1475 | if (zba < vba) | |
1476 | zba = vba; | |
1477 | zea = details->last_index; | |
1478 | if (zea > vea) | |
1479 | zea = vea; | |
1480 | ||
1481 | if (unmap_mapping_range_vma(vma, | |
1482 | ((zba - vba) << PAGE_SHIFT) + vma->vm_start, | |
1483 | ((zea - vba + 1) << PAGE_SHIFT) + vma->vm_start, | |
1484 | details) < 0) | |
1485 | goto restart; | |
1486 | } | |
1487 | } | |
1488 | ||
1489 | static inline void unmap_mapping_range_list(struct list_head *head, | |
1490 | struct zap_details *details) | |
1491 | { | |
1492 | struct vm_area_struct *vma; | |
1493 | ||
1494 | /* | |
1495 | * In nonlinear VMAs there is no correspondence between virtual address | |
1496 | * offset and file offset. So we must perform an exhaustive search | |
1497 | * across *all* the pages in each nonlinear VMA, not just the pages | |
1498 | * whose virtual address lies outside the file truncation point. | |
1499 | */ | |
1500 | restart: | |
1501 | list_for_each_entry(vma, head, shared.vm_set.list) { | |
1502 | /* Skip quickly over those we have already dealt with */ | |
1503 | if (vma->vm_truncate_count == details->truncate_count) | |
1504 | continue; | |
1505 | details->nonlinear_vma = vma; | |
1506 | if (unmap_mapping_range_vma(vma, vma->vm_start, | |
1507 | vma->vm_end, details) < 0) | |
1508 | goto restart; | |
1509 | } | |
1510 | } | |
1511 | ||
1512 | /** | |
1513 | * unmap_mapping_range - unmap the portion of all mmaps | |
1514 | * in the specified address_space corresponding to the specified | |
1515 | * page range in the underlying file. | |
3d41088f | 1516 | * @mapping: the address space containing mmaps to be unmapped. |
1da177e4 LT |
1517 | * @holebegin: byte in first page to unmap, relative to the start of |
1518 | * the underlying file. This will be rounded down to a PAGE_SIZE | |
1519 | * boundary. Note that this is different from vmtruncate(), which | |
1520 | * must keep the partial page. In contrast, we must get rid of | |
1521 | * partial pages. | |
1522 | * @holelen: size of prospective hole in bytes. This will be rounded | |
1523 | * up to a PAGE_SIZE boundary. A holelen of zero truncates to the | |
1524 | * end of the file. | |
1525 | * @even_cows: 1 when truncating a file, unmap even private COWed pages; | |
1526 | * but 0 when invalidating pagecache, don't throw away private data. | |
1527 | */ | |
1528 | void unmap_mapping_range(struct address_space *mapping, | |
1529 | loff_t const holebegin, loff_t const holelen, int even_cows) | |
1530 | { | |
1531 | struct zap_details details; | |
1532 | pgoff_t hba = holebegin >> PAGE_SHIFT; | |
1533 | pgoff_t hlen = (holelen + PAGE_SIZE - 1) >> PAGE_SHIFT; | |
1534 | ||
1535 | /* Check for overflow. */ | |
1536 | if (sizeof(holelen) > sizeof(hlen)) { | |
1537 | long long holeend = | |
1538 | (holebegin + holelen + PAGE_SIZE - 1) >> PAGE_SHIFT; | |
1539 | if (holeend & ~(long long)ULONG_MAX) | |
1540 | hlen = ULONG_MAX - hba + 1; | |
1541 | } | |
1542 | ||
1543 | details.check_mapping = even_cows? NULL: mapping; | |
1544 | details.nonlinear_vma = NULL; | |
1545 | details.first_index = hba; | |
1546 | details.last_index = hba + hlen - 1; | |
1547 | if (details.last_index < details.first_index) | |
1548 | details.last_index = ULONG_MAX; | |
1549 | details.i_mmap_lock = &mapping->i_mmap_lock; | |
1550 | ||
1551 | spin_lock(&mapping->i_mmap_lock); | |
1552 | ||
1553 | /* serialize i_size write against truncate_count write */ | |
1554 | smp_wmb(); | |
1555 | /* Protect against page faults, and endless unmapping loops */ | |
1556 | mapping->truncate_count++; | |
1557 | /* | |
1558 | * For archs where spin_lock has inclusive semantics like ia64 | |
1559 | * this smp_mb() will prevent to read pagetable contents | |
1560 | * before the truncate_count increment is visible to | |
1561 | * other cpus. | |
1562 | */ | |
1563 | smp_mb(); | |
1564 | if (unlikely(is_restart_addr(mapping->truncate_count))) { | |
1565 | if (mapping->truncate_count == 0) | |
1566 | reset_vma_truncate_counts(mapping); | |
1567 | mapping->truncate_count++; | |
1568 | } | |
1569 | details.truncate_count = mapping->truncate_count; | |
1570 | ||
1571 | if (unlikely(!prio_tree_empty(&mapping->i_mmap))) | |
1572 | unmap_mapping_range_tree(&mapping->i_mmap, &details); | |
1573 | if (unlikely(!list_empty(&mapping->i_mmap_nonlinear))) | |
1574 | unmap_mapping_range_list(&mapping->i_mmap_nonlinear, &details); | |
1575 | spin_unlock(&mapping->i_mmap_lock); | |
1576 | } | |
1577 | EXPORT_SYMBOL(unmap_mapping_range); | |
1578 | ||
1579 | /* | |
1580 | * Handle all mappings that got truncated by a "truncate()" | |
1581 | * system call. | |
1582 | * | |
1583 | * NOTE! We have to be ready to update the memory sharing | |
1584 | * between the file and the memory map for a potential last | |
1585 | * incomplete page. Ugly, but necessary. | |
1586 | */ | |
1587 | int vmtruncate(struct inode * inode, loff_t offset) | |
1588 | { | |
1589 | struct address_space *mapping = inode->i_mapping; | |
1590 | unsigned long limit; | |
1591 | ||
1592 | if (inode->i_size < offset) | |
1593 | goto do_expand; | |
1594 | /* | |
1595 | * truncation of in-use swapfiles is disallowed - it would cause | |
1596 | * subsequent swapout to scribble on the now-freed blocks. | |
1597 | */ | |
1598 | if (IS_SWAPFILE(inode)) | |
1599 | goto out_busy; | |
1600 | i_size_write(inode, offset); | |
1601 | unmap_mapping_range(mapping, offset + PAGE_SIZE - 1, 0, 1); | |
1602 | truncate_inode_pages(mapping, offset); | |
1603 | goto out_truncate; | |
1604 | ||
1605 | do_expand: | |
1606 | limit = current->signal->rlim[RLIMIT_FSIZE].rlim_cur; | |
1607 | if (limit != RLIM_INFINITY && offset > limit) | |
1608 | goto out_sig; | |
1609 | if (offset > inode->i_sb->s_maxbytes) | |
1610 | goto out_big; | |
1611 | i_size_write(inode, offset); | |
1612 | ||
1613 | out_truncate: | |
1614 | if (inode->i_op && inode->i_op->truncate) | |
1615 | inode->i_op->truncate(inode); | |
1616 | return 0; | |
1617 | out_sig: | |
1618 | send_sig(SIGXFSZ, current, 0); | |
1619 | out_big: | |
1620 | return -EFBIG; | |
1621 | out_busy: | |
1622 | return -ETXTBSY; | |
1623 | } | |
1624 | ||
1625 | EXPORT_SYMBOL(vmtruncate); | |
1626 | ||
1627 | /* | |
1628 | * Primitive swap readahead code. We simply read an aligned block of | |
1629 | * (1 << page_cluster) entries in the swap area. This method is chosen | |
1630 | * because it doesn't cost us any seek time. We also make sure to queue | |
1631 | * the 'original' request together with the readahead ones... | |
1632 | * | |
1633 | * This has been extended to use the NUMA policies from the mm triggering | |
1634 | * the readahead. | |
1635 | * | |
1636 | * Caller must hold down_read on the vma->vm_mm if vma is not NULL. | |
1637 | */ | |
1638 | void swapin_readahead(swp_entry_t entry, unsigned long addr,struct vm_area_struct *vma) | |
1639 | { | |
1640 | #ifdef CONFIG_NUMA | |
1641 | struct vm_area_struct *next_vma = vma ? vma->vm_next : NULL; | |
1642 | #endif | |
1643 | int i, num; | |
1644 | struct page *new_page; | |
1645 | unsigned long offset; | |
1646 | ||
1647 | /* | |
1648 | * Get the number of handles we should do readahead io to. | |
1649 | */ | |
1650 | num = valid_swaphandles(entry, &offset); | |
1651 | for (i = 0; i < num; offset++, i++) { | |
1652 | /* Ok, do the async read-ahead now */ | |
1653 | new_page = read_swap_cache_async(swp_entry(swp_type(entry), | |
1654 | offset), vma, addr); | |
1655 | if (!new_page) | |
1656 | break; | |
1657 | page_cache_release(new_page); | |
1658 | #ifdef CONFIG_NUMA | |
1659 | /* | |
1660 | * Find the next applicable VMA for the NUMA policy. | |
1661 | */ | |
1662 | addr += PAGE_SIZE; | |
1663 | if (addr == 0) | |
1664 | vma = NULL; | |
1665 | if (vma) { | |
1666 | if (addr >= vma->vm_end) { | |
1667 | vma = next_vma; | |
1668 | next_vma = vma ? vma->vm_next : NULL; | |
1669 | } | |
1670 | if (vma && addr < vma->vm_start) | |
1671 | vma = NULL; | |
1672 | } else { | |
1673 | if (next_vma && addr >= next_vma->vm_start) { | |
1674 | vma = next_vma; | |
1675 | next_vma = vma->vm_next; | |
1676 | } | |
1677 | } | |
1678 | #endif | |
1679 | } | |
1680 | lru_add_drain(); /* Push any new pages onto the LRU now */ | |
1681 | } | |
1682 | ||
1683 | /* | |
1684 | * We hold the mm semaphore and the page_table_lock on entry and | |
1685 | * should release the pagetable lock on exit.. | |
1686 | */ | |
65500d23 HD |
1687 | static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma, |
1688 | unsigned long address, pte_t *page_table, pmd_t *pmd, | |
1689 | int write_access, pte_t orig_pte) | |
1da177e4 LT |
1690 | { |
1691 | struct page *page; | |
65500d23 | 1692 | swp_entry_t entry; |
1da177e4 LT |
1693 | pte_t pte; |
1694 | int ret = VM_FAULT_MINOR; | |
1695 | ||
1696 | pte_unmap(page_table); | |
1697 | spin_unlock(&mm->page_table_lock); | |
65500d23 HD |
1698 | |
1699 | entry = pte_to_swp_entry(orig_pte); | |
1da177e4 LT |
1700 | page = lookup_swap_cache(entry); |
1701 | if (!page) { | |
1702 | swapin_readahead(entry, address, vma); | |
1703 | page = read_swap_cache_async(entry, vma, address); | |
1704 | if (!page) { | |
1705 | /* | |
1706 | * Back out if somebody else faulted in this pte while | |
1707 | * we released the page table lock. | |
1708 | */ | |
1709 | spin_lock(&mm->page_table_lock); | |
1710 | page_table = pte_offset_map(pmd, address); | |
1711 | if (likely(pte_same(*page_table, orig_pte))) | |
1712 | ret = VM_FAULT_OOM; | |
65500d23 | 1713 | goto unlock; |
1da177e4 LT |
1714 | } |
1715 | ||
1716 | /* Had to read the page from swap area: Major fault */ | |
1717 | ret = VM_FAULT_MAJOR; | |
1718 | inc_page_state(pgmajfault); | |
1719 | grab_swap_token(); | |
1720 | } | |
1721 | ||
1722 | mark_page_accessed(page); | |
1723 | lock_page(page); | |
1724 | ||
1725 | /* | |
1726 | * Back out if somebody else faulted in this pte while we | |
1727 | * released the page table lock. | |
1728 | */ | |
1729 | spin_lock(&mm->page_table_lock); | |
1730 | page_table = pte_offset_map(pmd, address); | |
1731 | if (unlikely(!pte_same(*page_table, orig_pte))) { | |
1da177e4 | 1732 | ret = VM_FAULT_MINOR; |
b8107480 KK |
1733 | goto out_nomap; |
1734 | } | |
1735 | ||
1736 | if (unlikely(!PageUptodate(page))) { | |
1737 | ret = VM_FAULT_SIGBUS; | |
1738 | goto out_nomap; | |
1da177e4 LT |
1739 | } |
1740 | ||
1741 | /* The page isn't present yet, go ahead with the fault. */ | |
1da177e4 | 1742 | |
4294621f | 1743 | inc_mm_counter(mm, anon_rss); |
1da177e4 LT |
1744 | pte = mk_pte(page, vma->vm_page_prot); |
1745 | if (write_access && can_share_swap_page(page)) { | |
1746 | pte = maybe_mkwrite(pte_mkdirty(pte), vma); | |
1747 | write_access = 0; | |
1748 | } | |
1da177e4 LT |
1749 | |
1750 | flush_icache_page(vma, page); | |
1751 | set_pte_at(mm, address, page_table, pte); | |
1752 | page_add_anon_rmap(page, vma, address); | |
1753 | ||
c475a8ab HD |
1754 | swap_free(entry); |
1755 | if (vm_swap_full()) | |
1756 | remove_exclusive_swap_page(page); | |
1757 | unlock_page(page); | |
1758 | ||
1da177e4 LT |
1759 | if (write_access) { |
1760 | if (do_wp_page(mm, vma, address, | |
1761 | page_table, pmd, pte) == VM_FAULT_OOM) | |
1762 | ret = VM_FAULT_OOM; | |
1763 | goto out; | |
1764 | } | |
1765 | ||
1766 | /* No need to invalidate - it was non-present before */ | |
1767 | update_mmu_cache(vma, address, pte); | |
1768 | lazy_mmu_prot_update(pte); | |
65500d23 | 1769 | unlock: |
1da177e4 LT |
1770 | pte_unmap(page_table); |
1771 | spin_unlock(&mm->page_table_lock); | |
1772 | out: | |
1773 | return ret; | |
b8107480 KK |
1774 | out_nomap: |
1775 | pte_unmap(page_table); | |
1776 | spin_unlock(&mm->page_table_lock); | |
1777 | unlock_page(page); | |
1778 | page_cache_release(page); | |
65500d23 | 1779 | return ret; |
1da177e4 LT |
1780 | } |
1781 | ||
1782 | /* | |
1783 | * We are called with the MM semaphore and page_table_lock | |
1784 | * spinlock held to protect against concurrent faults in | |
1785 | * multithreaded programs. | |
1786 | */ | |
65500d23 HD |
1787 | static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma, |
1788 | unsigned long address, pte_t *page_table, pmd_t *pmd, | |
1789 | int write_access) | |
1da177e4 | 1790 | { |
b5810039 | 1791 | struct page *page = ZERO_PAGE(addr); |
1da177e4 | 1792 | pte_t entry; |
1da177e4 | 1793 | |
72866f6f | 1794 | /* Mapping of ZERO_PAGE - vm_page_prot is readonly */ |
b5810039 | 1795 | entry = mk_pte(page, vma->vm_page_prot); |
1da177e4 | 1796 | |
1da177e4 LT |
1797 | if (write_access) { |
1798 | /* Allocate our own private page. */ | |
1799 | pte_unmap(page_table); | |
1800 | spin_unlock(&mm->page_table_lock); | |
1801 | ||
1802 | if (unlikely(anon_vma_prepare(vma))) | |
65500d23 HD |
1803 | goto oom; |
1804 | page = alloc_zeroed_user_highpage(vma, address); | |
1da177e4 | 1805 | if (!page) |
65500d23 | 1806 | goto oom; |
1da177e4 LT |
1807 | |
1808 | spin_lock(&mm->page_table_lock); | |
65500d23 | 1809 | page_table = pte_offset_map(pmd, address); |
1da177e4 LT |
1810 | |
1811 | if (!pte_none(*page_table)) { | |
1da177e4 | 1812 | page_cache_release(page); |
65500d23 | 1813 | goto unlock; |
1da177e4 | 1814 | } |
4294621f | 1815 | inc_mm_counter(mm, anon_rss); |
65500d23 HD |
1816 | entry = mk_pte(page, vma->vm_page_prot); |
1817 | entry = maybe_mkwrite(pte_mkdirty(entry), vma); | |
1da177e4 LT |
1818 | lru_cache_add_active(page); |
1819 | SetPageReferenced(page); | |
65500d23 | 1820 | page_add_anon_rmap(page, vma, address); |
b5810039 NP |
1821 | } else { |
1822 | inc_mm_counter(mm, file_rss); | |
1823 | page_add_file_rmap(page); | |
1824 | page_cache_get(page); | |
1da177e4 LT |
1825 | } |
1826 | ||
65500d23 | 1827 | set_pte_at(mm, address, page_table, entry); |
1da177e4 LT |
1828 | |
1829 | /* No need to invalidate - it was non-present before */ | |
65500d23 | 1830 | update_mmu_cache(vma, address, entry); |
1da177e4 | 1831 | lazy_mmu_prot_update(entry); |
65500d23 HD |
1832 | unlock: |
1833 | pte_unmap(page_table); | |
1da177e4 | 1834 | spin_unlock(&mm->page_table_lock); |
1da177e4 | 1835 | return VM_FAULT_MINOR; |
65500d23 | 1836 | oom: |
1da177e4 LT |
1837 | return VM_FAULT_OOM; |
1838 | } | |
1839 | ||
1840 | /* | |
1841 | * do_no_page() tries to create a new page mapping. It aggressively | |
1842 | * tries to share with existing pages, but makes a separate copy if | |
1843 | * the "write_access" parameter is true in order to avoid the next | |
1844 | * page fault. | |
1845 | * | |
1846 | * As this is called only for pages that do not currently exist, we | |
1847 | * do not need to flush old virtual caches or the TLB. | |
1848 | * | |
1849 | * This is called with the MM semaphore held and the page table | |
1850 | * spinlock held. Exit with the spinlock released. | |
1851 | */ | |
65500d23 HD |
1852 | static int do_no_page(struct mm_struct *mm, struct vm_area_struct *vma, |
1853 | unsigned long address, pte_t *page_table, pmd_t *pmd, | |
1854 | int write_access) | |
1da177e4 | 1855 | { |
65500d23 | 1856 | struct page *new_page; |
1da177e4 LT |
1857 | struct address_space *mapping = NULL; |
1858 | pte_t entry; | |
1859 | unsigned int sequence = 0; | |
1860 | int ret = VM_FAULT_MINOR; | |
1861 | int anon = 0; | |
1862 | ||
1da177e4 LT |
1863 | pte_unmap(page_table); |
1864 | spin_unlock(&mm->page_table_lock); | |
1865 | ||
1866 | if (vma->vm_file) { | |
1867 | mapping = vma->vm_file->f_mapping; | |
1868 | sequence = mapping->truncate_count; | |
1869 | smp_rmb(); /* serializes i_size against truncate_count */ | |
1870 | } | |
1871 | retry: | |
1da177e4 LT |
1872 | new_page = vma->vm_ops->nopage(vma, address & PAGE_MASK, &ret); |
1873 | /* | |
1874 | * No smp_rmb is needed here as long as there's a full | |
1875 | * spin_lock/unlock sequence inside the ->nopage callback | |
1876 | * (for the pagecache lookup) that acts as an implicit | |
1877 | * smp_mb() and prevents the i_size read to happen | |
1878 | * after the next truncate_count read. | |
1879 | */ | |
1880 | ||
1881 | /* no page was available -- either SIGBUS or OOM */ | |
1882 | if (new_page == NOPAGE_SIGBUS) | |
1883 | return VM_FAULT_SIGBUS; | |
1884 | if (new_page == NOPAGE_OOM) | |
1885 | return VM_FAULT_OOM; | |
1886 | ||
1887 | /* | |
1888 | * Should we do an early C-O-W break? | |
1889 | */ | |
1890 | if (write_access && !(vma->vm_flags & VM_SHARED)) { | |
1891 | struct page *page; | |
1892 | ||
1893 | if (unlikely(anon_vma_prepare(vma))) | |
1894 | goto oom; | |
1895 | page = alloc_page_vma(GFP_HIGHUSER, vma, address); | |
1896 | if (!page) | |
1897 | goto oom; | |
1898 | copy_user_highpage(page, new_page, address); | |
1899 | page_cache_release(new_page); | |
1900 | new_page = page; | |
1901 | anon = 1; | |
1902 | } | |
1903 | ||
1904 | spin_lock(&mm->page_table_lock); | |
1905 | /* | |
1906 | * For a file-backed vma, someone could have truncated or otherwise | |
1907 | * invalidated this page. If unmap_mapping_range got called, | |
1908 | * retry getting the page. | |
1909 | */ | |
1910 | if (mapping && unlikely(sequence != mapping->truncate_count)) { | |
1da177e4 LT |
1911 | spin_unlock(&mm->page_table_lock); |
1912 | page_cache_release(new_page); | |
65500d23 HD |
1913 | cond_resched(); |
1914 | sequence = mapping->truncate_count; | |
1915 | smp_rmb(); | |
1da177e4 LT |
1916 | goto retry; |
1917 | } | |
1918 | page_table = pte_offset_map(pmd, address); | |
1919 | ||
1920 | /* | |
1921 | * This silly early PAGE_DIRTY setting removes a race | |
1922 | * due to the bad i386 page protection. But it's valid | |
1923 | * for other architectures too. | |
1924 | * | |
1925 | * Note that if write_access is true, we either now have | |
1926 | * an exclusive copy of the page, or this is a shared mapping, | |
1927 | * so we can make it writable and dirty to avoid having to | |
1928 | * handle that later. | |
1929 | */ | |
1930 | /* Only go through if we didn't race with anybody else... */ | |
1931 | if (pte_none(*page_table)) { | |
1da177e4 LT |
1932 | flush_icache_page(vma, new_page); |
1933 | entry = mk_pte(new_page, vma->vm_page_prot); | |
1934 | if (write_access) | |
1935 | entry = maybe_mkwrite(pte_mkdirty(entry), vma); | |
1936 | set_pte_at(mm, address, page_table, entry); | |
1937 | if (anon) { | |
4294621f | 1938 | inc_mm_counter(mm, anon_rss); |
1da177e4 LT |
1939 | lru_cache_add_active(new_page); |
1940 | page_add_anon_rmap(new_page, vma, address); | |
b5810039 | 1941 | } else if (!(vma->vm_flags & VM_RESERVED)) { |
4294621f | 1942 | inc_mm_counter(mm, file_rss); |
1da177e4 | 1943 | page_add_file_rmap(new_page); |
4294621f | 1944 | } |
1da177e4 LT |
1945 | } else { |
1946 | /* One of our sibling threads was faster, back out. */ | |
1da177e4 | 1947 | page_cache_release(new_page); |
65500d23 | 1948 | goto unlock; |
1da177e4 LT |
1949 | } |
1950 | ||
1951 | /* no need to invalidate: a not-present page shouldn't be cached */ | |
1952 | update_mmu_cache(vma, address, entry); | |
1953 | lazy_mmu_prot_update(entry); | |
65500d23 HD |
1954 | unlock: |
1955 | pte_unmap(page_table); | |
1da177e4 | 1956 | spin_unlock(&mm->page_table_lock); |
1da177e4 LT |
1957 | return ret; |
1958 | oom: | |
1959 | page_cache_release(new_page); | |
65500d23 | 1960 | return VM_FAULT_OOM; |
1da177e4 LT |
1961 | } |
1962 | ||
1963 | /* | |
1964 | * Fault of a previously existing named mapping. Repopulate the pte | |
1965 | * from the encoded file_pte if possible. This enables swappable | |
1966 | * nonlinear vmas. | |
1967 | */ | |
65500d23 HD |
1968 | static int do_file_page(struct mm_struct *mm, struct vm_area_struct *vma, |
1969 | unsigned long address, pte_t *page_table, pmd_t *pmd, | |
1970 | int write_access, pte_t orig_pte) | |
1da177e4 | 1971 | { |
65500d23 | 1972 | pgoff_t pgoff; |
1da177e4 LT |
1973 | int err; |
1974 | ||
65500d23 | 1975 | pte_unmap(page_table); |
1da177e4 LT |
1976 | spin_unlock(&mm->page_table_lock); |
1977 | ||
65500d23 HD |
1978 | if (unlikely(!(vma->vm_flags & VM_NONLINEAR))) { |
1979 | /* | |
1980 | * Page table corrupted: show pte and kill process. | |
1981 | */ | |
b5810039 | 1982 | print_bad_pte(vma, orig_pte, address); |
65500d23 HD |
1983 | return VM_FAULT_OOM; |
1984 | } | |
1985 | /* We can then assume vm->vm_ops && vma->vm_ops->populate */ | |
1986 | ||
1987 | pgoff = pte_to_pgoff(orig_pte); | |
1988 | err = vma->vm_ops->populate(vma, address & PAGE_MASK, PAGE_SIZE, | |
1989 | vma->vm_page_prot, pgoff, 0); | |
1da177e4 LT |
1990 | if (err == -ENOMEM) |
1991 | return VM_FAULT_OOM; | |
1992 | if (err) | |
1993 | return VM_FAULT_SIGBUS; | |
1994 | return VM_FAULT_MAJOR; | |
1995 | } | |
1996 | ||
1997 | /* | |
1998 | * These routines also need to handle stuff like marking pages dirty | |
1999 | * and/or accessed for architectures that don't do it in hardware (most | |
2000 | * RISC architectures). The early dirtying is also good on the i386. | |
2001 | * | |
2002 | * There is also a hook called "update_mmu_cache()" that architectures | |
2003 | * with external mmu caches can use to update those (ie the Sparc or | |
2004 | * PowerPC hashed page tables that act as extended TLBs). | |
2005 | * | |
2006 | * Note the "page_table_lock". It is to protect against kswapd removing | |
2007 | * pages from under us. Note that kswapd only ever _removes_ pages, never | |
2008 | * adds them. As such, once we have noticed that the page is not present, | |
2009 | * we can drop the lock early. | |
2010 | * | |
2011 | * The adding of pages is protected by the MM semaphore (which we hold), | |
2012 | * so we don't need to worry about a page being suddenly been added into | |
2013 | * our VM. | |
2014 | * | |
2015 | * We enter with the pagetable spinlock held, we are supposed to | |
2016 | * release it when done. | |
2017 | */ | |
2018 | static inline int handle_pte_fault(struct mm_struct *mm, | |
65500d23 HD |
2019 | struct vm_area_struct *vma, unsigned long address, |
2020 | pte_t *pte, pmd_t *pmd, int write_access) | |
1da177e4 LT |
2021 | { |
2022 | pte_t entry; | |
2023 | ||
2024 | entry = *pte; | |
2025 | if (!pte_present(entry)) { | |
65500d23 HD |
2026 | if (pte_none(entry)) { |
2027 | if (!vma->vm_ops || !vma->vm_ops->nopage) | |
2028 | return do_anonymous_page(mm, vma, address, | |
2029 | pte, pmd, write_access); | |
2030 | return do_no_page(mm, vma, address, | |
2031 | pte, pmd, write_access); | |
2032 | } | |
1da177e4 | 2033 | if (pte_file(entry)) |
65500d23 HD |
2034 | return do_file_page(mm, vma, address, |
2035 | pte, pmd, write_access, entry); | |
2036 | return do_swap_page(mm, vma, address, | |
2037 | pte, pmd, write_access, entry); | |
1da177e4 LT |
2038 | } |
2039 | ||
2040 | if (write_access) { | |
2041 | if (!pte_write(entry)) | |
2042 | return do_wp_page(mm, vma, address, pte, pmd, entry); | |
1da177e4 LT |
2043 | entry = pte_mkdirty(entry); |
2044 | } | |
2045 | entry = pte_mkyoung(entry); | |
2046 | ptep_set_access_flags(vma, address, pte, entry, write_access); | |
2047 | update_mmu_cache(vma, address, entry); | |
2048 | lazy_mmu_prot_update(entry); | |
2049 | pte_unmap(pte); | |
2050 | spin_unlock(&mm->page_table_lock); | |
2051 | return VM_FAULT_MINOR; | |
2052 | } | |
2053 | ||
2054 | /* | |
2055 | * By the time we get here, we already hold the mm semaphore | |
2056 | */ | |
65500d23 | 2057 | int __handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma, |
1da177e4 LT |
2058 | unsigned long address, int write_access) |
2059 | { | |
2060 | pgd_t *pgd; | |
2061 | pud_t *pud; | |
2062 | pmd_t *pmd; | |
2063 | pte_t *pte; | |
2064 | ||
2065 | __set_current_state(TASK_RUNNING); | |
2066 | ||
2067 | inc_page_state(pgfault); | |
2068 | ||
ac9b9c66 HD |
2069 | if (unlikely(is_vm_hugetlb_page(vma))) |
2070 | return hugetlb_fault(mm, vma, address, write_access); | |
1da177e4 LT |
2071 | |
2072 | /* | |
2073 | * We need the page table lock to synchronize with kswapd | |
2074 | * and the SMP-safe atomic PTE updates. | |
2075 | */ | |
2076 | pgd = pgd_offset(mm, address); | |
2077 | spin_lock(&mm->page_table_lock); | |
2078 | ||
2079 | pud = pud_alloc(mm, pgd, address); | |
2080 | if (!pud) | |
2081 | goto oom; | |
2082 | ||
2083 | pmd = pmd_alloc(mm, pud, address); | |
2084 | if (!pmd) | |
2085 | goto oom; | |
2086 | ||
2087 | pte = pte_alloc_map(mm, pmd, address); | |
2088 | if (!pte) | |
2089 | goto oom; | |
2090 | ||
65500d23 | 2091 | return handle_pte_fault(mm, vma, address, pte, pmd, write_access); |
1da177e4 LT |
2092 | |
2093 | oom: | |
2094 | spin_unlock(&mm->page_table_lock); | |
2095 | return VM_FAULT_OOM; | |
2096 | } | |
2097 | ||
2098 | #ifndef __PAGETABLE_PUD_FOLDED | |
2099 | /* | |
2100 | * Allocate page upper directory. | |
2101 | * | |
2102 | * We've already handled the fast-path in-line, and we own the | |
2103 | * page table lock. | |
2104 | */ | |
2105 | pud_t fastcall *__pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address) | |
2106 | { | |
2107 | pud_t *new; | |
2108 | ||
2109 | spin_unlock(&mm->page_table_lock); | |
2110 | new = pud_alloc_one(mm, address); | |
2111 | spin_lock(&mm->page_table_lock); | |
2112 | if (!new) | |
2113 | return NULL; | |
2114 | ||
2115 | /* | |
2116 | * Because we dropped the lock, we should re-check the | |
2117 | * entry, as somebody else could have populated it.. | |
2118 | */ | |
2119 | if (pgd_present(*pgd)) { | |
2120 | pud_free(new); | |
2121 | goto out; | |
2122 | } | |
2123 | pgd_populate(mm, pgd, new); | |
2124 | out: | |
2125 | return pud_offset(pgd, address); | |
2126 | } | |
2127 | #endif /* __PAGETABLE_PUD_FOLDED */ | |
2128 | ||
2129 | #ifndef __PAGETABLE_PMD_FOLDED | |
2130 | /* | |
2131 | * Allocate page middle directory. | |
2132 | * | |
2133 | * We've already handled the fast-path in-line, and we own the | |
2134 | * page table lock. | |
2135 | */ | |
2136 | pmd_t fastcall *__pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address) | |
2137 | { | |
2138 | pmd_t *new; | |
2139 | ||
2140 | spin_unlock(&mm->page_table_lock); | |
2141 | new = pmd_alloc_one(mm, address); | |
2142 | spin_lock(&mm->page_table_lock); | |
2143 | if (!new) | |
2144 | return NULL; | |
2145 | ||
2146 | /* | |
2147 | * Because we dropped the lock, we should re-check the | |
2148 | * entry, as somebody else could have populated it.. | |
2149 | */ | |
2150 | #ifndef __ARCH_HAS_4LEVEL_HACK | |
2151 | if (pud_present(*pud)) { | |
2152 | pmd_free(new); | |
2153 | goto out; | |
2154 | } | |
2155 | pud_populate(mm, pud, new); | |
2156 | #else | |
2157 | if (pgd_present(*pud)) { | |
2158 | pmd_free(new); | |
2159 | goto out; | |
2160 | } | |
2161 | pgd_populate(mm, pud, new); | |
2162 | #endif /* __ARCH_HAS_4LEVEL_HACK */ | |
2163 | ||
2164 | out: | |
2165 | return pmd_offset(pud, address); | |
2166 | } | |
2167 | #endif /* __PAGETABLE_PMD_FOLDED */ | |
2168 | ||
2169 | int make_pages_present(unsigned long addr, unsigned long end) | |
2170 | { | |
2171 | int ret, len, write; | |
2172 | struct vm_area_struct * vma; | |
2173 | ||
2174 | vma = find_vma(current->mm, addr); | |
2175 | if (!vma) | |
2176 | return -1; | |
2177 | write = (vma->vm_flags & VM_WRITE) != 0; | |
2178 | if (addr >= end) | |
2179 | BUG(); | |
2180 | if (end > vma->vm_end) | |
2181 | BUG(); | |
2182 | len = (end+PAGE_SIZE-1)/PAGE_SIZE-addr/PAGE_SIZE; | |
2183 | ret = get_user_pages(current, current->mm, addr, | |
2184 | len, write, 0, NULL, NULL); | |
2185 | if (ret < 0) | |
2186 | return ret; | |
2187 | return ret == len ? 0 : -1; | |
2188 | } | |
2189 | ||
2190 | /* | |
2191 | * Map a vmalloc()-space virtual address to the physical page. | |
2192 | */ | |
2193 | struct page * vmalloc_to_page(void * vmalloc_addr) | |
2194 | { | |
2195 | unsigned long addr = (unsigned long) vmalloc_addr; | |
2196 | struct page *page = NULL; | |
2197 | pgd_t *pgd = pgd_offset_k(addr); | |
2198 | pud_t *pud; | |
2199 | pmd_t *pmd; | |
2200 | pte_t *ptep, pte; | |
2201 | ||
2202 | if (!pgd_none(*pgd)) { | |
2203 | pud = pud_offset(pgd, addr); | |
2204 | if (!pud_none(*pud)) { | |
2205 | pmd = pmd_offset(pud, addr); | |
2206 | if (!pmd_none(*pmd)) { | |
2207 | ptep = pte_offset_map(pmd, addr); | |
2208 | pte = *ptep; | |
2209 | if (pte_present(pte)) | |
2210 | page = pte_page(pte); | |
2211 | pte_unmap(ptep); | |
2212 | } | |
2213 | } | |
2214 | } | |
2215 | return page; | |
2216 | } | |
2217 | ||
2218 | EXPORT_SYMBOL(vmalloc_to_page); | |
2219 | ||
2220 | /* | |
2221 | * Map a vmalloc()-space virtual address to the physical page frame number. | |
2222 | */ | |
2223 | unsigned long vmalloc_to_pfn(void * vmalloc_addr) | |
2224 | { | |
2225 | return page_to_pfn(vmalloc_to_page(vmalloc_addr)); | |
2226 | } | |
2227 | ||
2228 | EXPORT_SYMBOL(vmalloc_to_pfn); | |
2229 | ||
2230 | /* | |
2231 | * update_mem_hiwater | |
2232 | * - update per process rss and vm high water data | |
2233 | */ | |
2234 | void update_mem_hiwater(struct task_struct *tsk) | |
2235 | { | |
2236 | if (tsk->mm) { | |
4294621f | 2237 | unsigned long rss = get_mm_rss(tsk->mm); |
1da177e4 LT |
2238 | |
2239 | if (tsk->mm->hiwater_rss < rss) | |
2240 | tsk->mm->hiwater_rss = rss; | |
2241 | if (tsk->mm->hiwater_vm < tsk->mm->total_vm) | |
2242 | tsk->mm->hiwater_vm = tsk->mm->total_vm; | |
2243 | } | |
2244 | } | |
2245 | ||
2246 | #if !defined(__HAVE_ARCH_GATE_AREA) | |
2247 | ||
2248 | #if defined(AT_SYSINFO_EHDR) | |
5ce7852c | 2249 | static struct vm_area_struct gate_vma; |
1da177e4 LT |
2250 | |
2251 | static int __init gate_vma_init(void) | |
2252 | { | |
2253 | gate_vma.vm_mm = NULL; | |
2254 | gate_vma.vm_start = FIXADDR_USER_START; | |
2255 | gate_vma.vm_end = FIXADDR_USER_END; | |
2256 | gate_vma.vm_page_prot = PAGE_READONLY; | |
b5810039 | 2257 | gate_vma.vm_flags = VM_RESERVED; |
1da177e4 LT |
2258 | return 0; |
2259 | } | |
2260 | __initcall(gate_vma_init); | |
2261 | #endif | |
2262 | ||
2263 | struct vm_area_struct *get_gate_vma(struct task_struct *tsk) | |
2264 | { | |
2265 | #ifdef AT_SYSINFO_EHDR | |
2266 | return &gate_vma; | |
2267 | #else | |
2268 | return NULL; | |
2269 | #endif | |
2270 | } | |
2271 | ||
2272 | int in_gate_area_no_task(unsigned long addr) | |
2273 | { | |
2274 | #ifdef AT_SYSINFO_EHDR | |
2275 | if ((addr >= FIXADDR_USER_START) && (addr < FIXADDR_USER_END)) | |
2276 | return 1; | |
2277 | #endif | |
2278 | return 0; | |
2279 | } | |
2280 | ||
2281 | #endif /* __HAVE_ARCH_GATE_AREA */ |