]>
Commit | Line | Data |
---|---|---|
1da177e4 | 1 | /* |
41151e77 | 2 | * PPC Huge TLB Page Support for Kernel. |
1da177e4 LT |
3 | * |
4 | * Copyright (C) 2003 David Gibson, IBM Corporation. | |
41151e77 | 5 | * Copyright (C) 2011 Becky Bruce, Freescale Semiconductor |
1da177e4 LT |
6 | * |
7 | * Based on the IA-32 version: | |
8 | * Copyright (C) 2002, Rohit Seth <rohit.seth@intel.com> | |
9 | */ | |
10 | ||
1da177e4 | 11 | #include <linux/mm.h> |
883a3e52 | 12 | #include <linux/io.h> |
5a0e3ad6 | 13 | #include <linux/slab.h> |
1da177e4 | 14 | #include <linux/hugetlb.h> |
342d3db7 | 15 | #include <linux/export.h> |
41151e77 BB |
16 | #include <linux/of_fdt.h> |
17 | #include <linux/memblock.h> | |
18 | #include <linux/bootmem.h> | |
13020be8 | 19 | #include <linux/moduleparam.h> |
883a3e52 | 20 | #include <asm/pgtable.h> |
1da177e4 LT |
21 | #include <asm/pgalloc.h> |
22 | #include <asm/tlb.h> | |
41151e77 | 23 | #include <asm/setup.h> |
29409997 AK |
24 | #include <asm/hugetlb.h> |
25 | ||
26 | #ifdef CONFIG_HUGETLB_PAGE | |
1da177e4 | 27 | |
91224346 JT |
28 | #define PAGE_SHIFT_64K 16 |
29 | #define PAGE_SHIFT_16M 24 | |
30 | #define PAGE_SHIFT_16G 34 | |
4ec161cf | 31 | |
41151e77 | 32 | unsigned int HPAGE_SHIFT; |
ec4b2c0c | 33 | |
41151e77 BB |
34 | /* |
35 | * Tracks gpages after the device tree is scanned and before the | |
a6146888 BB |
36 | * huge_boot_pages list is ready. On non-Freescale implementations, this is |
37 | * just used to track 16G pages and so is a single array. FSL-based | |
38 | * implementations may have more than one gpage size, so we need multiple | |
39 | * arrays | |
41151e77 | 40 | */ |
881fde1d | 41 | #ifdef CONFIG_PPC_FSL_BOOK3E |
41151e77 BB |
42 | #define MAX_NUMBER_GPAGES 128 |
43 | struct psize_gpages { | |
44 | u64 gpage_list[MAX_NUMBER_GPAGES]; | |
45 | unsigned int nr_gpages; | |
46 | }; | |
47 | static struct psize_gpages gpage_freearray[MMU_PAGE_COUNT]; | |
881fde1d BB |
48 | #else |
49 | #define MAX_NUMBER_GPAGES 1024 | |
50 | static u64 gpage_freearray[MAX_NUMBER_GPAGES]; | |
51 | static unsigned nr_gpages; | |
41151e77 | 52 | #endif |
f10a04c0 | 53 | |
a4fe3ce7 DG |
54 | #define hugepd_none(hpd) ((hpd).pd == 0) |
55 | ||
a4fe3ce7 DG |
56 | pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr) |
57 | { | |
12bc9f6f | 58 | /* Only called for hugetlbfs pages, hence can ignore THP */ |
891121e6 | 59 | return __find_linux_pte_or_hugepte(mm->pgd, addr, NULL, NULL); |
a4fe3ce7 DG |
60 | } |
61 | ||
f10a04c0 | 62 | static int __hugepte_alloc(struct mm_struct *mm, hugepd_t *hpdp, |
a4fe3ce7 | 63 | unsigned long address, unsigned pdshift, unsigned pshift) |
f10a04c0 | 64 | { |
41151e77 BB |
65 | struct kmem_cache *cachep; |
66 | pte_t *new; | |
67 | ||
881fde1d | 68 | #ifdef CONFIG_PPC_FSL_BOOK3E |
41151e77 BB |
69 | int i; |
70 | int num_hugepd = 1 << (pshift - pdshift); | |
71 | cachep = hugepte_cache; | |
881fde1d BB |
72 | #else |
73 | cachep = PGT_CACHE(pdshift - pshift); | |
41151e77 BB |
74 | #endif |
75 | ||
76 | new = kmem_cache_zalloc(cachep, GFP_KERNEL|__GFP_REPEAT); | |
f10a04c0 | 77 | |
a4fe3ce7 DG |
78 | BUG_ON(pshift > HUGEPD_SHIFT_MASK); |
79 | BUG_ON((unsigned long)new & HUGEPD_SHIFT_MASK); | |
80 | ||
f10a04c0 DG |
81 | if (! new) |
82 | return -ENOMEM; | |
83 | ||
84 | spin_lock(&mm->page_table_lock); | |
881fde1d | 85 | #ifdef CONFIG_PPC_FSL_BOOK3E |
41151e77 BB |
86 | /* |
87 | * We have multiple higher-level entries that point to the same | |
88 | * actual pte location. Fill in each as we go and backtrack on error. | |
89 | * We need all of these so the DTLB pgtable walk code can find the | |
90 | * right higher-level entry without knowing if it's a hugepage or not. | |
91 | */ | |
92 | for (i = 0; i < num_hugepd; i++, hpdp++) { | |
93 | if (unlikely(!hugepd_none(*hpdp))) | |
94 | break; | |
95 | else | |
cf9427b8 | 96 | /* We use the old format for PPC_FSL_BOOK3E */ |
41151e77 BB |
97 | hpdp->pd = ((unsigned long)new & ~PD_HUGE) | pshift; |
98 | } | |
99 | /* If we bailed from the for loop early, an error occurred, clean up */ | |
100 | if (i < num_hugepd) { | |
101 | for (i = i - 1 ; i >= 0; i--, hpdp--) | |
102 | hpdp->pd = 0; | |
103 | kmem_cache_free(cachep, new); | |
104 | } | |
a1cd5419 BB |
105 | #else |
106 | if (!hugepd_none(*hpdp)) | |
107 | kmem_cache_free(cachep, new); | |
cf9427b8 AK |
108 | else { |
109 | #ifdef CONFIG_PPC_BOOK3S_64 | |
c61a8843 | 110 | hpdp->pd = __pa(new) | (shift_to_mmu_psize(pshift) << 2); |
cf9427b8 | 111 | #else |
a1cd5419 | 112 | hpdp->pd = ((unsigned long)new & ~PD_HUGE) | pshift; |
cf9427b8 AK |
113 | #endif |
114 | } | |
41151e77 | 115 | #endif |
f10a04c0 DG |
116 | spin_unlock(&mm->page_table_lock); |
117 | return 0; | |
118 | } | |
119 | ||
a1cd5419 BB |
120 | /* |
121 | * These macros define how to determine which level of the page table holds | |
122 | * the hpdp. | |
123 | */ | |
124 | #ifdef CONFIG_PPC_FSL_BOOK3E | |
125 | #define HUGEPD_PGD_SHIFT PGDIR_SHIFT | |
126 | #define HUGEPD_PUD_SHIFT PUD_SHIFT | |
127 | #else | |
128 | #define HUGEPD_PGD_SHIFT PUD_SHIFT | |
129 | #define HUGEPD_PUD_SHIFT PMD_SHIFT | |
130 | #endif | |
131 | ||
e2b3d202 AK |
132 | #ifdef CONFIG_PPC_BOOK3S_64 |
133 | /* | |
134 | * At this point we do the placement change only for BOOK3S 64. This would | |
135 | * possibly work on other subarchs. | |
136 | */ | |
137 | pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr, unsigned long sz) | |
138 | { | |
139 | pgd_t *pg; | |
140 | pud_t *pu; | |
141 | pmd_t *pm; | |
142 | hugepd_t *hpdp = NULL; | |
143 | unsigned pshift = __ffs(sz); | |
144 | unsigned pdshift = PGDIR_SHIFT; | |
145 | ||
146 | addr &= ~(sz-1); | |
147 | pg = pgd_offset(mm, addr); | |
148 | ||
149 | if (pshift == PGDIR_SHIFT) | |
150 | /* 16GB huge page */ | |
151 | return (pte_t *) pg; | |
152 | else if (pshift > PUD_SHIFT) | |
153 | /* | |
154 | * We need to use hugepd table | |
155 | */ | |
156 | hpdp = (hugepd_t *)pg; | |
157 | else { | |
158 | pdshift = PUD_SHIFT; | |
159 | pu = pud_alloc(mm, pg, addr); | |
160 | if (pshift == PUD_SHIFT) | |
161 | return (pte_t *)pu; | |
162 | else if (pshift > PMD_SHIFT) | |
163 | hpdp = (hugepd_t *)pu; | |
164 | else { | |
165 | pdshift = PMD_SHIFT; | |
166 | pm = pmd_alloc(mm, pu, addr); | |
167 | if (pshift == PMD_SHIFT) | |
168 | /* 16MB hugepage */ | |
169 | return (pte_t *)pm; | |
170 | else | |
171 | hpdp = (hugepd_t *)pm; | |
172 | } | |
173 | } | |
174 | if (!hpdp) | |
175 | return NULL; | |
176 | ||
177 | BUG_ON(!hugepd_none(*hpdp) && !hugepd_ok(*hpdp)); | |
178 | ||
179 | if (hugepd_none(*hpdp) && __hugepte_alloc(mm, hpdp, addr, pdshift, pshift)) | |
180 | return NULL; | |
181 | ||
b30e7590 | 182 | return hugepte_offset(*hpdp, addr, pdshift); |
e2b3d202 AK |
183 | } |
184 | ||
185 | #else | |
186 | ||
a4fe3ce7 | 187 | pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr, unsigned long sz) |
0b26425c | 188 | { |
a4fe3ce7 DG |
189 | pgd_t *pg; |
190 | pud_t *pu; | |
191 | pmd_t *pm; | |
192 | hugepd_t *hpdp = NULL; | |
193 | unsigned pshift = __ffs(sz); | |
194 | unsigned pdshift = PGDIR_SHIFT; | |
195 | ||
196 | addr &= ~(sz-1); | |
197 | ||
198 | pg = pgd_offset(mm, addr); | |
a1cd5419 BB |
199 | |
200 | if (pshift >= HUGEPD_PGD_SHIFT) { | |
a4fe3ce7 DG |
201 | hpdp = (hugepd_t *)pg; |
202 | } else { | |
203 | pdshift = PUD_SHIFT; | |
204 | pu = pud_alloc(mm, pg, addr); | |
a1cd5419 | 205 | if (pshift >= HUGEPD_PUD_SHIFT) { |
a4fe3ce7 DG |
206 | hpdp = (hugepd_t *)pu; |
207 | } else { | |
208 | pdshift = PMD_SHIFT; | |
209 | pm = pmd_alloc(mm, pu, addr); | |
210 | hpdp = (hugepd_t *)pm; | |
211 | } | |
212 | } | |
213 | ||
214 | if (!hpdp) | |
215 | return NULL; | |
216 | ||
217 | BUG_ON(!hugepd_none(*hpdp) && !hugepd_ok(*hpdp)); | |
218 | ||
219 | if (hugepd_none(*hpdp) && __hugepte_alloc(mm, hpdp, addr, pdshift, pshift)) | |
220 | return NULL; | |
221 | ||
b30e7590 | 222 | return hugepte_offset(*hpdp, addr, pdshift); |
4ec161cf | 223 | } |
e2b3d202 | 224 | #endif |
4ec161cf | 225 | |
881fde1d | 226 | #ifdef CONFIG_PPC_FSL_BOOK3E |
658013e9 | 227 | /* Build list of addresses of gigantic pages. This function is used in early |
14ed7409 | 228 | * boot before the buddy allocator is setup. |
658013e9 | 229 | */ |
41151e77 BB |
230 | void add_gpage(u64 addr, u64 page_size, unsigned long number_of_pages) |
231 | { | |
232 | unsigned int idx = shift_to_mmu_psize(__ffs(page_size)); | |
233 | int i; | |
234 | ||
235 | if (addr == 0) | |
236 | return; | |
237 | ||
238 | gpage_freearray[idx].nr_gpages = number_of_pages; | |
239 | ||
240 | for (i = 0; i < number_of_pages; i++) { | |
241 | gpage_freearray[idx].gpage_list[i] = addr; | |
242 | addr += page_size; | |
243 | } | |
244 | } | |
245 | ||
246 | /* | |
247 | * Moves the gigantic page addresses from the temporary list to the | |
248 | * huge_boot_pages list. | |
249 | */ | |
250 | int alloc_bootmem_huge_page(struct hstate *hstate) | |
251 | { | |
252 | struct huge_bootmem_page *m; | |
2415cf12 | 253 | int idx = shift_to_mmu_psize(huge_page_shift(hstate)); |
41151e77 BB |
254 | int nr_gpages = gpage_freearray[idx].nr_gpages; |
255 | ||
256 | if (nr_gpages == 0) | |
257 | return 0; | |
258 | ||
259 | #ifdef CONFIG_HIGHMEM | |
260 | /* | |
261 | * If gpages can be in highmem we can't use the trick of storing the | |
262 | * data structure in the page; allocate space for this | |
263 | */ | |
e39f223f | 264 | m = memblock_virt_alloc(sizeof(struct huge_bootmem_page), 0); |
41151e77 BB |
265 | m->phys = gpage_freearray[idx].gpage_list[--nr_gpages]; |
266 | #else | |
267 | m = phys_to_virt(gpage_freearray[idx].gpage_list[--nr_gpages]); | |
268 | #endif | |
269 | ||
270 | list_add(&m->list, &huge_boot_pages); | |
271 | gpage_freearray[idx].nr_gpages = nr_gpages; | |
272 | gpage_freearray[idx].gpage_list[nr_gpages] = 0; | |
273 | m->hstate = hstate; | |
274 | ||
275 | return 1; | |
276 | } | |
277 | /* | |
278 | * Scan the command line hugepagesz= options for gigantic pages; store those in | |
279 | * a list that we use to allocate the memory once all options are parsed. | |
280 | */ | |
281 | ||
282 | unsigned long gpage_npages[MMU_PAGE_COUNT]; | |
283 | ||
89528127 | 284 | static int __init do_gpage_early_setup(char *param, char *val, |
ecc86170 | 285 | const char *unused, void *arg) |
41151e77 BB |
286 | { |
287 | static phys_addr_t size; | |
288 | unsigned long npages; | |
289 | ||
290 | /* | |
291 | * The hugepagesz and hugepages cmdline options are interleaved. We | |
292 | * use the size variable to keep track of whether or not this was done | |
293 | * properly and skip over instances where it is incorrect. Other | |
294 | * command-line parsing code will issue warnings, so we don't need to. | |
295 | * | |
296 | */ | |
297 | if ((strcmp(param, "default_hugepagesz") == 0) || | |
298 | (strcmp(param, "hugepagesz") == 0)) { | |
299 | size = memparse(val, NULL); | |
300 | } else if (strcmp(param, "hugepages") == 0) { | |
301 | if (size != 0) { | |
302 | if (sscanf(val, "%lu", &npages) <= 0) | |
303 | npages = 0; | |
c4f3eb5f JY |
304 | if (npages > MAX_NUMBER_GPAGES) { |
305 | pr_warn("MMU: %lu pages requested for page " | |
306 | "size %llu KB, limiting to " | |
307 | __stringify(MAX_NUMBER_GPAGES) "\n", | |
308 | npages, size / 1024); | |
309 | npages = MAX_NUMBER_GPAGES; | |
310 | } | |
41151e77 BB |
311 | gpage_npages[shift_to_mmu_psize(__ffs(size))] = npages; |
312 | size = 0; | |
313 | } | |
314 | } | |
315 | return 0; | |
316 | } | |
317 | ||
318 | ||
319 | /* | |
320 | * This function allocates physical space for pages that are larger than the | |
321 | * buddy allocator can handle. We want to allocate these in highmem because | |
322 | * the amount of lowmem is limited. This means that this function MUST be | |
323 | * called before lowmem_end_addr is set up in MMU_init() in order for the lmb | |
324 | * allocate to grab highmem. | |
325 | */ | |
326 | void __init reserve_hugetlb_gpages(void) | |
327 | { | |
328 | static __initdata char cmdline[COMMAND_LINE_SIZE]; | |
329 | phys_addr_t size, base; | |
330 | int i; | |
331 | ||
332 | strlcpy(cmdline, boot_command_line, COMMAND_LINE_SIZE); | |
026cee00 | 333 | parse_args("hugetlb gpages", cmdline, NULL, 0, 0, 0, |
ecc86170 | 334 | NULL, &do_gpage_early_setup); |
41151e77 BB |
335 | |
336 | /* | |
337 | * Walk gpage list in reverse, allocating larger page sizes first. | |
338 | * Skip over unsupported sizes, or sizes that have 0 gpages allocated. | |
339 | * When we reach the point in the list where pages are no longer | |
340 | * considered gpages, we're done. | |
341 | */ | |
342 | for (i = MMU_PAGE_COUNT-1; i >= 0; i--) { | |
343 | if (mmu_psize_defs[i].shift == 0 || gpage_npages[i] == 0) | |
344 | continue; | |
345 | else if (mmu_psize_to_shift(i) < (MAX_ORDER + PAGE_SHIFT)) | |
346 | break; | |
347 | ||
348 | size = (phys_addr_t)(1ULL << mmu_psize_to_shift(i)); | |
349 | base = memblock_alloc_base(size * gpage_npages[i], size, | |
350 | MEMBLOCK_ALLOC_ANYWHERE); | |
351 | add_gpage(base, size, gpage_npages[i]); | |
352 | } | |
353 | } | |
354 | ||
881fde1d | 355 | #else /* !PPC_FSL_BOOK3E */ |
41151e77 BB |
356 | |
357 | /* Build list of addresses of gigantic pages. This function is used in early | |
14ed7409 | 358 | * boot before the buddy allocator is setup. |
41151e77 BB |
359 | */ |
360 | void add_gpage(u64 addr, u64 page_size, unsigned long number_of_pages) | |
658013e9 JT |
361 | { |
362 | if (!addr) | |
363 | return; | |
364 | while (number_of_pages > 0) { | |
365 | gpage_freearray[nr_gpages] = addr; | |
366 | nr_gpages++; | |
367 | number_of_pages--; | |
368 | addr += page_size; | |
369 | } | |
370 | } | |
371 | ||
ec4b2c0c | 372 | /* Moves the gigantic page addresses from the temporary list to the |
0d9ea754 JT |
373 | * huge_boot_pages list. |
374 | */ | |
375 | int alloc_bootmem_huge_page(struct hstate *hstate) | |
ec4b2c0c JT |
376 | { |
377 | struct huge_bootmem_page *m; | |
378 | if (nr_gpages == 0) | |
379 | return 0; | |
380 | m = phys_to_virt(gpage_freearray[--nr_gpages]); | |
381 | gpage_freearray[nr_gpages] = 0; | |
382 | list_add(&m->list, &huge_boot_pages); | |
0d9ea754 | 383 | m->hstate = hstate; |
ec4b2c0c JT |
384 | return 1; |
385 | } | |
41151e77 | 386 | #endif |
ec4b2c0c | 387 | |
881fde1d | 388 | #ifdef CONFIG_PPC_FSL_BOOK3E |
41151e77 BB |
389 | #define HUGEPD_FREELIST_SIZE \ |
390 | ((PAGE_SIZE - sizeof(struct hugepd_freelist)) / sizeof(pte_t)) | |
391 | ||
392 | struct hugepd_freelist { | |
393 | struct rcu_head rcu; | |
394 | unsigned int index; | |
395 | void *ptes[0]; | |
396 | }; | |
397 | ||
398 | static DEFINE_PER_CPU(struct hugepd_freelist *, hugepd_freelist_cur); | |
399 | ||
400 | static void hugepd_free_rcu_callback(struct rcu_head *head) | |
401 | { | |
402 | struct hugepd_freelist *batch = | |
403 | container_of(head, struct hugepd_freelist, rcu); | |
404 | unsigned int i; | |
405 | ||
406 | for (i = 0; i < batch->index; i++) | |
407 | kmem_cache_free(hugepte_cache, batch->ptes[i]); | |
408 | ||
409 | free_page((unsigned long)batch); | |
410 | } | |
411 | ||
412 | static void hugepd_free(struct mmu_gather *tlb, void *hugepte) | |
413 | { | |
414 | struct hugepd_freelist **batchp; | |
415 | ||
08a5bb29 | 416 | batchp = &get_cpu_var(hugepd_freelist_cur); |
41151e77 BB |
417 | |
418 | if (atomic_read(&tlb->mm->mm_users) < 2 || | |
419 | cpumask_equal(mm_cpumask(tlb->mm), | |
420 | cpumask_of(smp_processor_id()))) { | |
421 | kmem_cache_free(hugepte_cache, hugepte); | |
08a5bb29 | 422 | put_cpu_var(hugepd_freelist_cur); |
41151e77 BB |
423 | return; |
424 | } | |
425 | ||
426 | if (*batchp == NULL) { | |
427 | *batchp = (struct hugepd_freelist *)__get_free_page(GFP_ATOMIC); | |
428 | (*batchp)->index = 0; | |
429 | } | |
430 | ||
431 | (*batchp)->ptes[(*batchp)->index++] = hugepte; | |
432 | if ((*batchp)->index == HUGEPD_FREELIST_SIZE) { | |
433 | call_rcu_sched(&(*batchp)->rcu, hugepd_free_rcu_callback); | |
434 | *batchp = NULL; | |
435 | } | |
94b09d75 | 436 | put_cpu_var(hugepd_freelist_cur); |
41151e77 BB |
437 | } |
438 | #endif | |
439 | ||
a4fe3ce7 DG |
440 | static void free_hugepd_range(struct mmu_gather *tlb, hugepd_t *hpdp, int pdshift, |
441 | unsigned long start, unsigned long end, | |
442 | unsigned long floor, unsigned long ceiling) | |
f10a04c0 DG |
443 | { |
444 | pte_t *hugepte = hugepd_page(*hpdp); | |
41151e77 BB |
445 | int i; |
446 | ||
a4fe3ce7 | 447 | unsigned long pdmask = ~((1UL << pdshift) - 1); |
41151e77 BB |
448 | unsigned int num_hugepd = 1; |
449 | ||
881fde1d BB |
450 | #ifdef CONFIG_PPC_FSL_BOOK3E |
451 | /* Note: On fsl the hpdp may be the first of several */ | |
41151e77 | 452 | num_hugepd = (1 << (hugepd_shift(*hpdp) - pdshift)); |
881fde1d BB |
453 | #else |
454 | unsigned int shift = hugepd_shift(*hpdp); | |
41151e77 | 455 | #endif |
a4fe3ce7 DG |
456 | |
457 | start &= pdmask; | |
458 | if (start < floor) | |
459 | return; | |
460 | if (ceiling) { | |
461 | ceiling &= pdmask; | |
462 | if (! ceiling) | |
463 | return; | |
464 | } | |
465 | if (end - 1 > ceiling - 1) | |
466 | return; | |
f10a04c0 | 467 | |
41151e77 BB |
468 | for (i = 0; i < num_hugepd; i++, hpdp++) |
469 | hpdp->pd = 0; | |
470 | ||
881fde1d | 471 | #ifdef CONFIG_PPC_FSL_BOOK3E |
41151e77 | 472 | hugepd_free(tlb, hugepte); |
881fde1d BB |
473 | #else |
474 | pgtable_free_tlb(tlb, hugepte, pdshift - shift); | |
41151e77 | 475 | #endif |
f10a04c0 DG |
476 | } |
477 | ||
f10a04c0 DG |
478 | static void hugetlb_free_pmd_range(struct mmu_gather *tlb, pud_t *pud, |
479 | unsigned long addr, unsigned long end, | |
a4fe3ce7 | 480 | unsigned long floor, unsigned long ceiling) |
f10a04c0 DG |
481 | { |
482 | pmd_t *pmd; | |
483 | unsigned long next; | |
484 | unsigned long start; | |
485 | ||
486 | start = addr; | |
f10a04c0 | 487 | do { |
a1cd5419 | 488 | pmd = pmd_offset(pud, addr); |
f10a04c0 | 489 | next = pmd_addr_end(addr, end); |
b30e7590 | 490 | if (!is_hugepd(__hugepd(pmd_val(*pmd)))) { |
8bbd9f04 AK |
491 | /* |
492 | * if it is not hugepd pointer, we should already find | |
493 | * it cleared. | |
494 | */ | |
495 | WARN_ON(!pmd_none_or_clear_bad(pmd)); | |
f10a04c0 | 496 | continue; |
8bbd9f04 | 497 | } |
a1cd5419 BB |
498 | #ifdef CONFIG_PPC_FSL_BOOK3E |
499 | /* | |
500 | * Increment next by the size of the huge mapping since | |
501 | * there may be more than one entry at this level for a | |
502 | * single hugepage, but all of them point to | |
503 | * the same kmem cache that holds the hugepte. | |
504 | */ | |
505 | next = addr + (1 << hugepd_shift(*(hugepd_t *)pmd)); | |
506 | #endif | |
a4fe3ce7 DG |
507 | free_hugepd_range(tlb, (hugepd_t *)pmd, PMD_SHIFT, |
508 | addr, next, floor, ceiling); | |
a1cd5419 | 509 | } while (addr = next, addr != end); |
f10a04c0 DG |
510 | |
511 | start &= PUD_MASK; | |
512 | if (start < floor) | |
513 | return; | |
514 | if (ceiling) { | |
515 | ceiling &= PUD_MASK; | |
516 | if (!ceiling) | |
517 | return; | |
1da177e4 | 518 | } |
f10a04c0 DG |
519 | if (end - 1 > ceiling - 1) |
520 | return; | |
1da177e4 | 521 | |
f10a04c0 DG |
522 | pmd = pmd_offset(pud, start); |
523 | pud_clear(pud); | |
9e1b32ca | 524 | pmd_free_tlb(tlb, pmd, start); |
50c6a665 | 525 | mm_dec_nr_pmds(tlb->mm); |
f10a04c0 | 526 | } |
f10a04c0 DG |
527 | |
528 | static void hugetlb_free_pud_range(struct mmu_gather *tlb, pgd_t *pgd, | |
529 | unsigned long addr, unsigned long end, | |
530 | unsigned long floor, unsigned long ceiling) | |
531 | { | |
532 | pud_t *pud; | |
533 | unsigned long next; | |
534 | unsigned long start; | |
535 | ||
536 | start = addr; | |
f10a04c0 | 537 | do { |
a1cd5419 | 538 | pud = pud_offset(pgd, addr); |
f10a04c0 | 539 | next = pud_addr_end(addr, end); |
b30e7590 | 540 | if (!is_hugepd(__hugepd(pud_val(*pud)))) { |
4ec161cf JT |
541 | if (pud_none_or_clear_bad(pud)) |
542 | continue; | |
0d9ea754 | 543 | hugetlb_free_pmd_range(tlb, pud, addr, next, floor, |
a4fe3ce7 | 544 | ceiling); |
4ec161cf | 545 | } else { |
a1cd5419 BB |
546 | #ifdef CONFIG_PPC_FSL_BOOK3E |
547 | /* | |
548 | * Increment next by the size of the huge mapping since | |
549 | * there may be more than one entry at this level for a | |
550 | * single hugepage, but all of them point to | |
551 | * the same kmem cache that holds the hugepte. | |
552 | */ | |
553 | next = addr + (1 << hugepd_shift(*(hugepd_t *)pud)); | |
554 | #endif | |
a4fe3ce7 DG |
555 | free_hugepd_range(tlb, (hugepd_t *)pud, PUD_SHIFT, |
556 | addr, next, floor, ceiling); | |
4ec161cf | 557 | } |
a1cd5419 | 558 | } while (addr = next, addr != end); |
f10a04c0 DG |
559 | |
560 | start &= PGDIR_MASK; | |
561 | if (start < floor) | |
562 | return; | |
563 | if (ceiling) { | |
564 | ceiling &= PGDIR_MASK; | |
565 | if (!ceiling) | |
566 | return; | |
567 | } | |
568 | if (end - 1 > ceiling - 1) | |
569 | return; | |
570 | ||
571 | pud = pud_offset(pgd, start); | |
572 | pgd_clear(pgd); | |
9e1b32ca | 573 | pud_free_tlb(tlb, pud, start); |
f10a04c0 DG |
574 | } |
575 | ||
576 | /* | |
577 | * This function frees user-level page tables of a process. | |
f10a04c0 | 578 | */ |
42b77728 | 579 | void hugetlb_free_pgd_range(struct mmu_gather *tlb, |
f10a04c0 DG |
580 | unsigned long addr, unsigned long end, |
581 | unsigned long floor, unsigned long ceiling) | |
582 | { | |
583 | pgd_t *pgd; | |
584 | unsigned long next; | |
f10a04c0 DG |
585 | |
586 | /* | |
a4fe3ce7 DG |
587 | * Because there are a number of different possible pagetable |
588 | * layouts for hugepage ranges, we limit knowledge of how | |
589 | * things should be laid out to the allocation path | |
590 | * (huge_pte_alloc(), above). Everything else works out the | |
591 | * structure as it goes from information in the hugepd | |
592 | * pointers. That means that we can't here use the | |
593 | * optimization used in the normal page free_pgd_range(), of | |
594 | * checking whether we're actually covering a large enough | |
595 | * range to have to do anything at the top level of the walk | |
596 | * instead of at the bottom. | |
f10a04c0 | 597 | * |
a4fe3ce7 DG |
598 | * To make sense of this, you should probably go read the big |
599 | * block comment at the top of the normal free_pgd_range(), | |
600 | * too. | |
f10a04c0 | 601 | */ |
f10a04c0 | 602 | |
f10a04c0 | 603 | do { |
f10a04c0 | 604 | next = pgd_addr_end(addr, end); |
41151e77 | 605 | pgd = pgd_offset(tlb->mm, addr); |
b30e7590 | 606 | if (!is_hugepd(__hugepd(pgd_val(*pgd)))) { |
0b26425c DG |
607 | if (pgd_none_or_clear_bad(pgd)) |
608 | continue; | |
609 | hugetlb_free_pud_range(tlb, pgd, addr, next, floor, ceiling); | |
610 | } else { | |
881fde1d | 611 | #ifdef CONFIG_PPC_FSL_BOOK3E |
41151e77 BB |
612 | /* |
613 | * Increment next by the size of the huge mapping since | |
881fde1d BB |
614 | * there may be more than one entry at the pgd level |
615 | * for a single hugepage, but all of them point to the | |
616 | * same kmem cache that holds the hugepte. | |
41151e77 BB |
617 | */ |
618 | next = addr + (1 << hugepd_shift(*(hugepd_t *)pgd)); | |
619 | #endif | |
a4fe3ce7 DG |
620 | free_hugepd_range(tlb, (hugepd_t *)pgd, PGDIR_SHIFT, |
621 | addr, next, floor, ceiling); | |
0b26425c | 622 | } |
41151e77 | 623 | } while (addr = next, addr != end); |
1da177e4 LT |
624 | } |
625 | ||
691e95fd AK |
626 | /* |
627 | * We are holding mmap_sem, so a parallel huge page collapse cannot run. | |
628 | * To prevent hugepage split, disable irq. | |
629 | */ | |
1da177e4 LT |
630 | struct page * |
631 | follow_huge_addr(struct mm_struct *mm, unsigned long address, int write) | |
632 | { | |
891121e6 | 633 | bool is_thp; |
7b868e81 | 634 | pte_t *ptep, pte; |
a4fe3ce7 | 635 | unsigned shift; |
691e95fd | 636 | unsigned long mask, flags; |
7b868e81 AK |
637 | struct page *page = ERR_PTR(-EINVAL); |
638 | ||
639 | local_irq_save(flags); | |
891121e6 | 640 | ptep = find_linux_pte_or_hugepte(mm->pgd, address, &is_thp, &shift); |
7b868e81 AK |
641 | if (!ptep) |
642 | goto no_page; | |
643 | pte = READ_ONCE(*ptep); | |
12bc9f6f | 644 | /* |
7b868e81 | 645 | * Verify it is a huge page else bail. |
12bc9f6f AK |
646 | * Transparent hugepages are handled by generic code. We can skip them |
647 | * here. | |
648 | */ | |
891121e6 | 649 | if (!shift || is_thp) |
7b868e81 | 650 | goto no_page; |
1da177e4 | 651 | |
7b868e81 AK |
652 | if (!pte_present(pte)) { |
653 | page = NULL; | |
654 | goto no_page; | |
691e95fd | 655 | } |
a4fe3ce7 | 656 | mask = (1UL << shift) - 1; |
7b868e81 | 657 | page = pte_page(pte); |
a4fe3ce7 DG |
658 | if (page) |
659 | page += (address & mask) / PAGE_SIZE; | |
1da177e4 | 660 | |
7b868e81 | 661 | no_page: |
691e95fd | 662 | local_irq_restore(flags); |
1da177e4 LT |
663 | return page; |
664 | } | |
665 | ||
1da177e4 LT |
666 | struct page * |
667 | follow_huge_pmd(struct mm_struct *mm, unsigned long address, | |
668 | pmd_t *pmd, int write) | |
669 | { | |
670 | BUG(); | |
671 | return NULL; | |
672 | } | |
673 | ||
61f77eda NH |
674 | struct page * |
675 | follow_huge_pud(struct mm_struct *mm, unsigned long address, | |
676 | pud_t *pud, int write) | |
677 | { | |
678 | BUG(); | |
679 | return NULL; | |
680 | } | |
681 | ||
39adfa54 DG |
682 | static unsigned long hugepte_addr_end(unsigned long addr, unsigned long end, |
683 | unsigned long sz) | |
684 | { | |
685 | unsigned long __boundary = (addr + sz) & ~(sz-1); | |
686 | return (__boundary - 1 < end - 1) ? __boundary : end; | |
687 | } | |
688 | ||
b30e7590 AK |
689 | int gup_huge_pd(hugepd_t hugepd, unsigned long addr, unsigned pdshift, |
690 | unsigned long end, int write, struct page **pages, int *nr) | |
a4fe3ce7 DG |
691 | { |
692 | pte_t *ptep; | |
b30e7590 | 693 | unsigned long sz = 1UL << hugepd_shift(hugepd); |
39adfa54 | 694 | unsigned long next; |
a4fe3ce7 DG |
695 | |
696 | ptep = hugepte_offset(hugepd, addr, pdshift); | |
697 | do { | |
39adfa54 | 698 | next = hugepte_addr_end(addr, end, sz); |
a4fe3ce7 DG |
699 | if (!gup_hugepte(ptep, sz, addr, end, write, pages, nr)) |
700 | return 0; | |
39adfa54 | 701 | } while (ptep++, addr = next, addr != end); |
a4fe3ce7 DG |
702 | |
703 | return 1; | |
704 | } | |
1da177e4 | 705 | |
76512959 | 706 | #ifdef CONFIG_PPC_MM_SLICES |
1da177e4 LT |
707 | unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, |
708 | unsigned long len, unsigned long pgoff, | |
709 | unsigned long flags) | |
710 | { | |
0d9ea754 JT |
711 | struct hstate *hstate = hstate_file(file); |
712 | int mmu_psize = shift_to_mmu_psize(huge_page_shift(hstate)); | |
48f797de | 713 | |
34d07177 | 714 | return slice_get_unmapped_area(addr, len, flags, mmu_psize, 1); |
1da177e4 | 715 | } |
76512959 | 716 | #endif |
1da177e4 | 717 | |
3340289d MG |
718 | unsigned long vma_mmu_pagesize(struct vm_area_struct *vma) |
719 | { | |
25c29f9e | 720 | #ifdef CONFIG_PPC_MM_SLICES |
3340289d MG |
721 | unsigned int psize = get_slice_psize(vma->vm_mm, vma->vm_start); |
722 | ||
723 | return 1UL << mmu_psize_to_shift(psize); | |
41151e77 BB |
724 | #else |
725 | if (!is_vm_hugetlb_page(vma)) | |
726 | return PAGE_SIZE; | |
727 | ||
728 | return huge_page_size(hstate_vma(vma)); | |
729 | #endif | |
730 | } | |
731 | ||
732 | static inline bool is_power_of_4(unsigned long x) | |
733 | { | |
734 | if (is_power_of_2(x)) | |
735 | return (__ilog2(x) % 2) ? false : true; | |
736 | return false; | |
3340289d MG |
737 | } |
738 | ||
d1837cba | 739 | static int __init add_huge_page_size(unsigned long long size) |
4ec161cf | 740 | { |
d1837cba DG |
741 | int shift = __ffs(size); |
742 | int mmu_psize; | |
a4fe3ce7 | 743 | |
4ec161cf | 744 | /* Check that it is a page size supported by the hardware and |
d1837cba | 745 | * that it fits within pagetable and slice limits. */ |
41151e77 BB |
746 | #ifdef CONFIG_PPC_FSL_BOOK3E |
747 | if ((size < PAGE_SIZE) || !is_power_of_4(size)) | |
748 | return -EINVAL; | |
749 | #else | |
d1837cba DG |
750 | if (!is_power_of_2(size) |
751 | || (shift > SLICE_HIGH_SHIFT) || (shift <= PAGE_SHIFT)) | |
752 | return -EINVAL; | |
41151e77 | 753 | #endif |
91224346 | 754 | |
d1837cba DG |
755 | if ((mmu_psize = shift_to_mmu_psize(shift)) < 0) |
756 | return -EINVAL; | |
757 | ||
d1837cba DG |
758 | BUG_ON(mmu_psize_defs[mmu_psize].shift != shift); |
759 | ||
760 | /* Return if huge page size has already been setup */ | |
761 | if (size_to_hstate(size)) | |
762 | return 0; | |
763 | ||
764 | hugetlb_add_hstate(shift - PAGE_SHIFT); | |
765 | ||
766 | return 0; | |
4ec161cf JT |
767 | } |
768 | ||
769 | static int __init hugepage_setup_sz(char *str) | |
770 | { | |
771 | unsigned long long size; | |
4ec161cf JT |
772 | |
773 | size = memparse(str, &str); | |
774 | ||
d1837cba | 775 | if (add_huge_page_size(size) != 0) |
4ec161cf JT |
776 | printk(KERN_WARNING "Invalid huge page size specified(%llu)\n", size); |
777 | ||
778 | return 1; | |
779 | } | |
780 | __setup("hugepagesz=", hugepage_setup_sz); | |
781 | ||
881fde1d | 782 | #ifdef CONFIG_PPC_FSL_BOOK3E |
41151e77 BB |
783 | struct kmem_cache *hugepte_cache; |
784 | static int __init hugetlbpage_init(void) | |
785 | { | |
786 | int psize; | |
787 | ||
788 | for (psize = 0; psize < MMU_PAGE_COUNT; ++psize) { | |
789 | unsigned shift; | |
790 | ||
791 | if (!mmu_psize_defs[psize].shift) | |
792 | continue; | |
793 | ||
794 | shift = mmu_psize_to_shift(psize); | |
795 | ||
796 | /* Don't treat normal page sizes as huge... */ | |
797 | if (shift != PAGE_SHIFT) | |
798 | if (add_huge_page_size(1ULL << shift) < 0) | |
799 | continue; | |
800 | } | |
801 | ||
802 | /* | |
803 | * Create a kmem cache for hugeptes. The bottom bits in the pte have | |
804 | * size information encoded in them, so align them to allow this | |
805 | */ | |
806 | hugepte_cache = kmem_cache_create("hugepte-cache", sizeof(pte_t), | |
807 | HUGEPD_SHIFT_MASK + 1, 0, NULL); | |
808 | if (hugepte_cache == NULL) | |
809 | panic("%s: Unable to create kmem cache for hugeptes\n", | |
810 | __func__); | |
811 | ||
812 | /* Default hpage size = 4M */ | |
813 | if (mmu_psize_defs[MMU_PAGE_4M].shift) | |
814 | HPAGE_SHIFT = mmu_psize_defs[MMU_PAGE_4M].shift; | |
815 | else | |
816 | panic("%s: Unable to set default huge page size\n", __func__); | |
817 | ||
818 | ||
819 | return 0; | |
820 | } | |
821 | #else | |
f10a04c0 DG |
822 | static int __init hugetlbpage_init(void) |
823 | { | |
a4fe3ce7 | 824 | int psize; |
0d9ea754 | 825 | |
44ae3ab3 | 826 | if (!mmu_has_feature(MMU_FTR_16M_PAGE)) |
f10a04c0 | 827 | return -ENODEV; |
00df438e | 828 | |
d1837cba DG |
829 | for (psize = 0; psize < MMU_PAGE_COUNT; ++psize) { |
830 | unsigned shift; | |
831 | unsigned pdshift; | |
0d9ea754 | 832 | |
d1837cba DG |
833 | if (!mmu_psize_defs[psize].shift) |
834 | continue; | |
00df438e | 835 | |
d1837cba DG |
836 | shift = mmu_psize_to_shift(psize); |
837 | ||
838 | if (add_huge_page_size(1ULL << shift) < 0) | |
839 | continue; | |
840 | ||
841 | if (shift < PMD_SHIFT) | |
842 | pdshift = PMD_SHIFT; | |
843 | else if (shift < PUD_SHIFT) | |
844 | pdshift = PUD_SHIFT; | |
845 | else | |
846 | pdshift = PGDIR_SHIFT; | |
e2b3d202 AK |
847 | /* |
848 | * if we have pdshift and shift value same, we don't | |
849 | * use pgt cache for hugepd. | |
850 | */ | |
851 | if (pdshift != shift) { | |
852 | pgtable_cache_add(pdshift - shift, NULL); | |
853 | if (!PGT_CACHE(pdshift - shift)) | |
854 | panic("hugetlbpage_init(): could not create " | |
855 | "pgtable cache for %d bit pagesize\n", shift); | |
856 | } | |
0d9ea754 | 857 | } |
f10a04c0 | 858 | |
d1837cba DG |
859 | /* Set default large page size. Currently, we pick 16M or 1M |
860 | * depending on what is available | |
861 | */ | |
862 | if (mmu_psize_defs[MMU_PAGE_16M].shift) | |
863 | HPAGE_SHIFT = mmu_psize_defs[MMU_PAGE_16M].shift; | |
864 | else if (mmu_psize_defs[MMU_PAGE_1M].shift) | |
865 | HPAGE_SHIFT = mmu_psize_defs[MMU_PAGE_1M].shift; | |
866 | ||
f10a04c0 DG |
867 | return 0; |
868 | } | |
41151e77 | 869 | #endif |
6f114281 | 870 | arch_initcall(hugetlbpage_init); |
0895ecda DG |
871 | |
872 | void flush_dcache_icache_hugepage(struct page *page) | |
873 | { | |
874 | int i; | |
41151e77 | 875 | void *start; |
0895ecda DG |
876 | |
877 | BUG_ON(!PageCompound(page)); | |
878 | ||
41151e77 BB |
879 | for (i = 0; i < (1UL << compound_order(page)); i++) { |
880 | if (!PageHighMem(page)) { | |
881 | __flush_dcache_icache(page_address(page+i)); | |
882 | } else { | |
2480b208 | 883 | start = kmap_atomic(page+i); |
41151e77 | 884 | __flush_dcache_icache(start); |
2480b208 | 885 | kunmap_atomic(start); |
41151e77 BB |
886 | } |
887 | } | |
0895ecda | 888 | } |
29409997 AK |
889 | |
890 | #endif /* CONFIG_HUGETLB_PAGE */ | |
891 | ||
892 | /* | |
893 | * We have 4 cases for pgds and pmds: | |
894 | * (1) invalid (all zeroes) | |
895 | * (2) pointer to next table, as normal; bottom 6 bits == 0 | |
6a119eae AK |
896 | * (3) leaf pte for huge page _PAGE_PTE set |
897 | * (4) hugepd pointer, _PAGE_PTE = 0 and bits [2..6] indicate size of table | |
0ac52dd7 AK |
898 | * |
899 | * So long as we atomically load page table pointers we are safe against teardown, | |
900 | * we can follow the address down to the the page and take a ref on it. | |
691e95fd AK |
901 | * This function need to be called with interrupts disabled. We use this variant |
902 | * when we have MSR[EE] = 0 but the paca->soft_enabled = 1 | |
29409997 | 903 | */ |
0ac52dd7 | 904 | |
691e95fd | 905 | pte_t *__find_linux_pte_or_hugepte(pgd_t *pgdir, unsigned long ea, |
891121e6 | 906 | bool *is_thp, unsigned *shift) |
29409997 | 907 | { |
0ac52dd7 AK |
908 | pgd_t pgd, *pgdp; |
909 | pud_t pud, *pudp; | |
910 | pmd_t pmd, *pmdp; | |
29409997 AK |
911 | pte_t *ret_pte; |
912 | hugepd_t *hpdp = NULL; | |
913 | unsigned pdshift = PGDIR_SHIFT; | |
914 | ||
915 | if (shift) | |
916 | *shift = 0; | |
917 | ||
891121e6 AK |
918 | if (is_thp) |
919 | *is_thp = false; | |
920 | ||
0ac52dd7 | 921 | pgdp = pgdir + pgd_index(ea); |
4f9c53c8 | 922 | pgd = READ_ONCE(*pgdp); |
ac52ae47 | 923 | /* |
0ac52dd7 AK |
924 | * Always operate on the local stack value. This make sure the |
925 | * value don't get updated by a parallel THP split/collapse, | |
926 | * page fault or a page unmap. The return pte_t * is still not | |
927 | * stable. So should be checked there for above conditions. | |
ac52ae47 | 928 | */ |
0ac52dd7 | 929 | if (pgd_none(pgd)) |
ac52ae47 | 930 | return NULL; |
0ac52dd7 AK |
931 | else if (pgd_huge(pgd)) { |
932 | ret_pte = (pte_t *) pgdp; | |
29409997 | 933 | goto out; |
b30e7590 | 934 | } else if (is_hugepd(__hugepd(pgd_val(pgd)))) |
0ac52dd7 | 935 | hpdp = (hugepd_t *)&pgd; |
ac52ae47 | 936 | else { |
0ac52dd7 AK |
937 | /* |
938 | * Even if we end up with an unmap, the pgtable will not | |
939 | * be freed, because we do an rcu free and here we are | |
940 | * irq disabled | |
941 | */ | |
29409997 | 942 | pdshift = PUD_SHIFT; |
0ac52dd7 | 943 | pudp = pud_offset(&pgd, ea); |
da1a288d | 944 | pud = READ_ONCE(*pudp); |
29409997 | 945 | |
0ac52dd7 | 946 | if (pud_none(pud)) |
ac52ae47 | 947 | return NULL; |
0ac52dd7 AK |
948 | else if (pud_huge(pud)) { |
949 | ret_pte = (pte_t *) pudp; | |
29409997 | 950 | goto out; |
b30e7590 | 951 | } else if (is_hugepd(__hugepd(pud_val(pud)))) |
0ac52dd7 | 952 | hpdp = (hugepd_t *)&pud; |
ac52ae47 | 953 | else { |
29409997 | 954 | pdshift = PMD_SHIFT; |
0ac52dd7 | 955 | pmdp = pmd_offset(&pud, ea); |
da1a288d | 956 | pmd = READ_ONCE(*pmdp); |
ac52ae47 AK |
957 | /* |
958 | * A hugepage collapse is captured by pmd_none, because | |
959 | * it mark the pmd none and do a hpte invalidate. | |
ac52ae47 | 960 | */ |
7d6e7f7f | 961 | if (pmd_none(pmd)) |
ac52ae47 | 962 | return NULL; |
29409997 | 963 | |
891121e6 AK |
964 | if (pmd_trans_huge(pmd)) { |
965 | if (is_thp) | |
966 | *is_thp = true; | |
967 | ret_pte = (pte_t *) pmdp; | |
968 | goto out; | |
969 | } | |
970 | ||
971 | if (pmd_huge(pmd)) { | |
0ac52dd7 | 972 | ret_pte = (pte_t *) pmdp; |
29409997 | 973 | goto out; |
b30e7590 | 974 | } else if (is_hugepd(__hugepd(pmd_val(pmd)))) |
0ac52dd7 | 975 | hpdp = (hugepd_t *)&pmd; |
ac52ae47 | 976 | else |
0ac52dd7 | 977 | return pte_offset_kernel(&pmd, ea); |
29409997 AK |
978 | } |
979 | } | |
980 | if (!hpdp) | |
981 | return NULL; | |
982 | ||
b30e7590 | 983 | ret_pte = hugepte_offset(*hpdp, ea, pdshift); |
29409997 AK |
984 | pdshift = hugepd_shift(*hpdp); |
985 | out: | |
986 | if (shift) | |
987 | *shift = pdshift; | |
988 | return ret_pte; | |
989 | } | |
691e95fd | 990 | EXPORT_SYMBOL_GPL(__find_linux_pte_or_hugepte); |
29409997 AK |
991 | |
992 | int gup_hugepte(pte_t *ptep, unsigned long sz, unsigned long addr, | |
993 | unsigned long end, int write, struct page **pages, int *nr) | |
994 | { | |
995 | unsigned long mask; | |
996 | unsigned long pte_end; | |
ddc58f27 | 997 | struct page *head, *page; |
29409997 AK |
998 | pte_t pte; |
999 | int refs; | |
1000 | ||
1001 | pte_end = (addr + sz) & ~(sz-1); | |
1002 | if (pte_end < end) | |
1003 | end = pte_end; | |
1004 | ||
4f9c53c8 | 1005 | pte = READ_ONCE(*ptep); |
29409997 AK |
1006 | mask = _PAGE_PRESENT | _PAGE_USER; |
1007 | if (write) | |
1008 | mask |= _PAGE_RW; | |
1009 | ||
1010 | if ((pte_val(pte) & mask) != mask) | |
1011 | return 0; | |
1012 | ||
1013 | /* hugepages are never "special" */ | |
1014 | VM_BUG_ON(!pfn_valid(pte_pfn(pte))); | |
1015 | ||
1016 | refs = 0; | |
1017 | head = pte_page(pte); | |
1018 | ||
1019 | page = head + ((addr & (sz-1)) >> PAGE_SHIFT); | |
29409997 AK |
1020 | do { |
1021 | VM_BUG_ON(compound_head(page) != head); | |
1022 | pages[*nr] = page; | |
1023 | (*nr)++; | |
1024 | page++; | |
1025 | refs++; | |
1026 | } while (addr += PAGE_SIZE, addr != end); | |
1027 | ||
1028 | if (!page_cache_add_speculative(head, refs)) { | |
1029 | *nr -= refs; | |
1030 | return 0; | |
1031 | } | |
1032 | ||
1033 | if (unlikely(pte_val(pte) != pte_val(*ptep))) { | |
1034 | /* Could be optimized better */ | |
1035 | *nr -= refs; | |
1036 | while (refs--) | |
1037 | put_page(head); | |
1038 | return 0; | |
1039 | } | |
1040 | ||
29409997 AK |
1041 | return 1; |
1042 | } |