]>
Commit | Line | Data |
---|---|---|
1da177e4 | 1 | /* |
41151e77 | 2 | * PPC Huge TLB Page Support for Kernel. |
1da177e4 LT |
3 | * |
4 | * Copyright (C) 2003 David Gibson, IBM Corporation. | |
41151e77 | 5 | * Copyright (C) 2011 Becky Bruce, Freescale Semiconductor |
1da177e4 LT |
6 | * |
7 | * Based on the IA-32 version: | |
8 | * Copyright (C) 2002, Rohit Seth <rohit.seth@intel.com> | |
9 | */ | |
10 | ||
1da177e4 | 11 | #include <linux/mm.h> |
883a3e52 | 12 | #include <linux/io.h> |
5a0e3ad6 | 13 | #include <linux/slab.h> |
1da177e4 | 14 | #include <linux/hugetlb.h> |
41151e77 BB |
15 | #include <linux/of_fdt.h> |
16 | #include <linux/memblock.h> | |
17 | #include <linux/bootmem.h> | |
13020be8 | 18 | #include <linux/moduleparam.h> |
883a3e52 | 19 | #include <asm/pgtable.h> |
1da177e4 LT |
20 | #include <asm/pgalloc.h> |
21 | #include <asm/tlb.h> | |
41151e77 | 22 | #include <asm/setup.h> |
1da177e4 | 23 | |
91224346 JT |
24 | #define PAGE_SHIFT_64K 16 |
25 | #define PAGE_SHIFT_16M 24 | |
26 | #define PAGE_SHIFT_16G 34 | |
4ec161cf | 27 | |
41151e77 | 28 | unsigned int HPAGE_SHIFT; |
ec4b2c0c | 29 | |
41151e77 BB |
30 | /* |
31 | * Tracks gpages after the device tree is scanned and before the | |
a6146888 BB |
32 | * huge_boot_pages list is ready. On non-Freescale implementations, this is |
33 | * just used to track 16G pages and so is a single array. FSL-based | |
34 | * implementations may have more than one gpage size, so we need multiple | |
35 | * arrays | |
41151e77 | 36 | */ |
881fde1d | 37 | #ifdef CONFIG_PPC_FSL_BOOK3E |
41151e77 BB |
38 | #define MAX_NUMBER_GPAGES 128 |
39 | struct psize_gpages { | |
40 | u64 gpage_list[MAX_NUMBER_GPAGES]; | |
41 | unsigned int nr_gpages; | |
42 | }; | |
43 | static struct psize_gpages gpage_freearray[MMU_PAGE_COUNT]; | |
881fde1d BB |
44 | #else |
45 | #define MAX_NUMBER_GPAGES 1024 | |
46 | static u64 gpage_freearray[MAX_NUMBER_GPAGES]; | |
47 | static unsigned nr_gpages; | |
41151e77 | 48 | #endif |
f10a04c0 | 49 | |
0d9ea754 JT |
50 | static inline int shift_to_mmu_psize(unsigned int shift) |
51 | { | |
d1837cba DG |
52 | int psize; |
53 | ||
54 | for (psize = 0; psize < MMU_PAGE_COUNT; ++psize) | |
55 | if (mmu_psize_defs[psize].shift == shift) | |
56 | return psize; | |
0d9ea754 JT |
57 | return -1; |
58 | } | |
59 | ||
60 | static inline unsigned int mmu_psize_to_shift(unsigned int mmu_psize) | |
61 | { | |
62 | if (mmu_psize_defs[mmu_psize].shift) | |
63 | return mmu_psize_defs[mmu_psize].shift; | |
64 | BUG(); | |
65 | } | |
66 | ||
a4fe3ce7 DG |
67 | #define hugepd_none(hpd) ((hpd).pd == 0) |
68 | ||
a4fe3ce7 DG |
69 | pte_t *find_linux_pte_or_hugepte(pgd_t *pgdir, unsigned long ea, unsigned *shift) |
70 | { | |
71 | pgd_t *pg; | |
72 | pud_t *pu; | |
73 | pmd_t *pm; | |
74 | hugepd_t *hpdp = NULL; | |
75 | unsigned pdshift = PGDIR_SHIFT; | |
76 | ||
77 | if (shift) | |
78 | *shift = 0; | |
79 | ||
80 | pg = pgdir + pgd_index(ea); | |
81 | if (is_hugepd(pg)) { | |
82 | hpdp = (hugepd_t *)pg; | |
83 | } else if (!pgd_none(*pg)) { | |
84 | pdshift = PUD_SHIFT; | |
85 | pu = pud_offset(pg, ea); | |
86 | if (is_hugepd(pu)) | |
87 | hpdp = (hugepd_t *)pu; | |
88 | else if (!pud_none(*pu)) { | |
89 | pdshift = PMD_SHIFT; | |
90 | pm = pmd_offset(pu, ea); | |
91 | if (is_hugepd(pm)) | |
92 | hpdp = (hugepd_t *)pm; | |
93 | else if (!pmd_none(*pm)) { | |
41151e77 | 94 | return pte_offset_kernel(pm, ea); |
a4fe3ce7 DG |
95 | } |
96 | } | |
97 | } | |
98 | ||
99 | if (!hpdp) | |
100 | return NULL; | |
101 | ||
102 | if (shift) | |
103 | *shift = hugepd_shift(*hpdp); | |
104 | return hugepte_offset(hpdp, ea, pdshift); | |
105 | } | |
106 | ||
107 | pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr) | |
108 | { | |
109 | return find_linux_pte_or_hugepte(mm->pgd, addr, NULL); | |
110 | } | |
111 | ||
f10a04c0 | 112 | static int __hugepte_alloc(struct mm_struct *mm, hugepd_t *hpdp, |
a4fe3ce7 | 113 | unsigned long address, unsigned pdshift, unsigned pshift) |
f10a04c0 | 114 | { |
41151e77 BB |
115 | struct kmem_cache *cachep; |
116 | pte_t *new; | |
117 | ||
881fde1d | 118 | #ifdef CONFIG_PPC_FSL_BOOK3E |
41151e77 BB |
119 | int i; |
120 | int num_hugepd = 1 << (pshift - pdshift); | |
121 | cachep = hugepte_cache; | |
881fde1d BB |
122 | #else |
123 | cachep = PGT_CACHE(pdshift - pshift); | |
41151e77 BB |
124 | #endif |
125 | ||
126 | new = kmem_cache_zalloc(cachep, GFP_KERNEL|__GFP_REPEAT); | |
f10a04c0 | 127 | |
a4fe3ce7 DG |
128 | BUG_ON(pshift > HUGEPD_SHIFT_MASK); |
129 | BUG_ON((unsigned long)new & HUGEPD_SHIFT_MASK); | |
130 | ||
f10a04c0 DG |
131 | if (! new) |
132 | return -ENOMEM; | |
133 | ||
134 | spin_lock(&mm->page_table_lock); | |
881fde1d | 135 | #ifdef CONFIG_PPC_FSL_BOOK3E |
41151e77 BB |
136 | /* |
137 | * We have multiple higher-level entries that point to the same | |
138 | * actual pte location. Fill in each as we go and backtrack on error. | |
139 | * We need all of these so the DTLB pgtable walk code can find the | |
140 | * right higher-level entry without knowing if it's a hugepage or not. | |
141 | */ | |
142 | for (i = 0; i < num_hugepd; i++, hpdp++) { | |
143 | if (unlikely(!hugepd_none(*hpdp))) | |
144 | break; | |
145 | else | |
146 | hpdp->pd = ((unsigned long)new & ~PD_HUGE) | pshift; | |
147 | } | |
148 | /* If we bailed from the for loop early, an error occurred, clean up */ | |
149 | if (i < num_hugepd) { | |
150 | for (i = i - 1 ; i >= 0; i--, hpdp--) | |
151 | hpdp->pd = 0; | |
152 | kmem_cache_free(cachep, new); | |
153 | } | |
a1cd5419 BB |
154 | #else |
155 | if (!hugepd_none(*hpdp)) | |
156 | kmem_cache_free(cachep, new); | |
157 | else | |
158 | hpdp->pd = ((unsigned long)new & ~PD_HUGE) | pshift; | |
41151e77 | 159 | #endif |
f10a04c0 DG |
160 | spin_unlock(&mm->page_table_lock); |
161 | return 0; | |
162 | } | |
163 | ||
a1cd5419 BB |
164 | /* |
165 | * These macros define how to determine which level of the page table holds | |
166 | * the hpdp. | |
167 | */ | |
168 | #ifdef CONFIG_PPC_FSL_BOOK3E | |
169 | #define HUGEPD_PGD_SHIFT PGDIR_SHIFT | |
170 | #define HUGEPD_PUD_SHIFT PUD_SHIFT | |
171 | #else | |
172 | #define HUGEPD_PGD_SHIFT PUD_SHIFT | |
173 | #define HUGEPD_PUD_SHIFT PMD_SHIFT | |
174 | #endif | |
175 | ||
a4fe3ce7 | 176 | pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr, unsigned long sz) |
0b26425c | 177 | { |
a4fe3ce7 DG |
178 | pgd_t *pg; |
179 | pud_t *pu; | |
180 | pmd_t *pm; | |
181 | hugepd_t *hpdp = NULL; | |
182 | unsigned pshift = __ffs(sz); | |
183 | unsigned pdshift = PGDIR_SHIFT; | |
184 | ||
185 | addr &= ~(sz-1); | |
186 | ||
187 | pg = pgd_offset(mm, addr); | |
a1cd5419 BB |
188 | |
189 | if (pshift >= HUGEPD_PGD_SHIFT) { | |
a4fe3ce7 DG |
190 | hpdp = (hugepd_t *)pg; |
191 | } else { | |
192 | pdshift = PUD_SHIFT; | |
193 | pu = pud_alloc(mm, pg, addr); | |
a1cd5419 | 194 | if (pshift >= HUGEPD_PUD_SHIFT) { |
a4fe3ce7 DG |
195 | hpdp = (hugepd_t *)pu; |
196 | } else { | |
197 | pdshift = PMD_SHIFT; | |
198 | pm = pmd_alloc(mm, pu, addr); | |
199 | hpdp = (hugepd_t *)pm; | |
200 | } | |
201 | } | |
202 | ||
203 | if (!hpdp) | |
204 | return NULL; | |
205 | ||
206 | BUG_ON(!hugepd_none(*hpdp) && !hugepd_ok(*hpdp)); | |
207 | ||
208 | if (hugepd_none(*hpdp) && __hugepte_alloc(mm, hpdp, addr, pdshift, pshift)) | |
209 | return NULL; | |
210 | ||
211 | return hugepte_offset(hpdp, addr, pdshift); | |
4ec161cf | 212 | } |
4ec161cf | 213 | |
881fde1d | 214 | #ifdef CONFIG_PPC_FSL_BOOK3E |
658013e9 JT |
215 | /* Build list of addresses of gigantic pages. This function is used in early |
216 | * boot before the buddy or bootmem allocator is setup. | |
217 | */ | |
41151e77 BB |
218 | void add_gpage(u64 addr, u64 page_size, unsigned long number_of_pages) |
219 | { | |
220 | unsigned int idx = shift_to_mmu_psize(__ffs(page_size)); | |
221 | int i; | |
222 | ||
223 | if (addr == 0) | |
224 | return; | |
225 | ||
226 | gpage_freearray[idx].nr_gpages = number_of_pages; | |
227 | ||
228 | for (i = 0; i < number_of_pages; i++) { | |
229 | gpage_freearray[idx].gpage_list[i] = addr; | |
230 | addr += page_size; | |
231 | } | |
232 | } | |
233 | ||
234 | /* | |
235 | * Moves the gigantic page addresses from the temporary list to the | |
236 | * huge_boot_pages list. | |
237 | */ | |
238 | int alloc_bootmem_huge_page(struct hstate *hstate) | |
239 | { | |
240 | struct huge_bootmem_page *m; | |
241 | int idx = shift_to_mmu_psize(hstate->order + PAGE_SHIFT); | |
242 | int nr_gpages = gpage_freearray[idx].nr_gpages; | |
243 | ||
244 | if (nr_gpages == 0) | |
245 | return 0; | |
246 | ||
247 | #ifdef CONFIG_HIGHMEM | |
248 | /* | |
249 | * If gpages can be in highmem we can't use the trick of storing the | |
250 | * data structure in the page; allocate space for this | |
251 | */ | |
252 | m = alloc_bootmem(sizeof(struct huge_bootmem_page)); | |
253 | m->phys = gpage_freearray[idx].gpage_list[--nr_gpages]; | |
254 | #else | |
255 | m = phys_to_virt(gpage_freearray[idx].gpage_list[--nr_gpages]); | |
256 | #endif | |
257 | ||
258 | list_add(&m->list, &huge_boot_pages); | |
259 | gpage_freearray[idx].nr_gpages = nr_gpages; | |
260 | gpage_freearray[idx].gpage_list[nr_gpages] = 0; | |
261 | m->hstate = hstate; | |
262 | ||
263 | return 1; | |
264 | } | |
265 | /* | |
266 | * Scan the command line hugepagesz= options for gigantic pages; store those in | |
267 | * a list that we use to allocate the memory once all options are parsed. | |
268 | */ | |
269 | ||
270 | unsigned long gpage_npages[MMU_PAGE_COUNT]; | |
271 | ||
272 | static int __init do_gpage_early_setup(char *param, char *val) | |
273 | { | |
274 | static phys_addr_t size; | |
275 | unsigned long npages; | |
276 | ||
277 | /* | |
278 | * The hugepagesz and hugepages cmdline options are interleaved. We | |
279 | * use the size variable to keep track of whether or not this was done | |
280 | * properly and skip over instances where it is incorrect. Other | |
281 | * command-line parsing code will issue warnings, so we don't need to. | |
282 | * | |
283 | */ | |
284 | if ((strcmp(param, "default_hugepagesz") == 0) || | |
285 | (strcmp(param, "hugepagesz") == 0)) { | |
286 | size = memparse(val, NULL); | |
287 | } else if (strcmp(param, "hugepages") == 0) { | |
288 | if (size != 0) { | |
289 | if (sscanf(val, "%lu", &npages) <= 0) | |
290 | npages = 0; | |
291 | gpage_npages[shift_to_mmu_psize(__ffs(size))] = npages; | |
292 | size = 0; | |
293 | } | |
294 | } | |
295 | return 0; | |
296 | } | |
297 | ||
298 | ||
299 | /* | |
300 | * This function allocates physical space for pages that are larger than the | |
301 | * buddy allocator can handle. We want to allocate these in highmem because | |
302 | * the amount of lowmem is limited. This means that this function MUST be | |
303 | * called before lowmem_end_addr is set up in MMU_init() in order for the lmb | |
304 | * allocate to grab highmem. | |
305 | */ | |
306 | void __init reserve_hugetlb_gpages(void) | |
307 | { | |
308 | static __initdata char cmdline[COMMAND_LINE_SIZE]; | |
309 | phys_addr_t size, base; | |
310 | int i; | |
311 | ||
312 | strlcpy(cmdline, boot_command_line, COMMAND_LINE_SIZE); | |
313 | parse_args("hugetlb gpages", cmdline, NULL, 0, &do_gpage_early_setup); | |
314 | ||
315 | /* | |
316 | * Walk gpage list in reverse, allocating larger page sizes first. | |
317 | * Skip over unsupported sizes, or sizes that have 0 gpages allocated. | |
318 | * When we reach the point in the list where pages are no longer | |
319 | * considered gpages, we're done. | |
320 | */ | |
321 | for (i = MMU_PAGE_COUNT-1; i >= 0; i--) { | |
322 | if (mmu_psize_defs[i].shift == 0 || gpage_npages[i] == 0) | |
323 | continue; | |
324 | else if (mmu_psize_to_shift(i) < (MAX_ORDER + PAGE_SHIFT)) | |
325 | break; | |
326 | ||
327 | size = (phys_addr_t)(1ULL << mmu_psize_to_shift(i)); | |
328 | base = memblock_alloc_base(size * gpage_npages[i], size, | |
329 | MEMBLOCK_ALLOC_ANYWHERE); | |
330 | add_gpage(base, size, gpage_npages[i]); | |
331 | } | |
332 | } | |
333 | ||
881fde1d | 334 | #else /* !PPC_FSL_BOOK3E */ |
41151e77 BB |
335 | |
336 | /* Build list of addresses of gigantic pages. This function is used in early | |
337 | * boot before the buddy or bootmem allocator is setup. | |
338 | */ | |
339 | void add_gpage(u64 addr, u64 page_size, unsigned long number_of_pages) | |
658013e9 JT |
340 | { |
341 | if (!addr) | |
342 | return; | |
343 | while (number_of_pages > 0) { | |
344 | gpage_freearray[nr_gpages] = addr; | |
345 | nr_gpages++; | |
346 | number_of_pages--; | |
347 | addr += page_size; | |
348 | } | |
349 | } | |
350 | ||
ec4b2c0c | 351 | /* Moves the gigantic page addresses from the temporary list to the |
0d9ea754 JT |
352 | * huge_boot_pages list. |
353 | */ | |
354 | int alloc_bootmem_huge_page(struct hstate *hstate) | |
ec4b2c0c JT |
355 | { |
356 | struct huge_bootmem_page *m; | |
357 | if (nr_gpages == 0) | |
358 | return 0; | |
359 | m = phys_to_virt(gpage_freearray[--nr_gpages]); | |
360 | gpage_freearray[nr_gpages] = 0; | |
361 | list_add(&m->list, &huge_boot_pages); | |
0d9ea754 | 362 | m->hstate = hstate; |
ec4b2c0c JT |
363 | return 1; |
364 | } | |
41151e77 | 365 | #endif |
ec4b2c0c | 366 | |
39dde65c KC |
367 | int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep) |
368 | { | |
369 | return 0; | |
370 | } | |
371 | ||
881fde1d | 372 | #ifdef CONFIG_PPC_FSL_BOOK3E |
41151e77 BB |
373 | #define HUGEPD_FREELIST_SIZE \ |
374 | ((PAGE_SIZE - sizeof(struct hugepd_freelist)) / sizeof(pte_t)) | |
375 | ||
376 | struct hugepd_freelist { | |
377 | struct rcu_head rcu; | |
378 | unsigned int index; | |
379 | void *ptes[0]; | |
380 | }; | |
381 | ||
382 | static DEFINE_PER_CPU(struct hugepd_freelist *, hugepd_freelist_cur); | |
383 | ||
384 | static void hugepd_free_rcu_callback(struct rcu_head *head) | |
385 | { | |
386 | struct hugepd_freelist *batch = | |
387 | container_of(head, struct hugepd_freelist, rcu); | |
388 | unsigned int i; | |
389 | ||
390 | for (i = 0; i < batch->index; i++) | |
391 | kmem_cache_free(hugepte_cache, batch->ptes[i]); | |
392 | ||
393 | free_page((unsigned long)batch); | |
394 | } | |
395 | ||
396 | static void hugepd_free(struct mmu_gather *tlb, void *hugepte) | |
397 | { | |
398 | struct hugepd_freelist **batchp; | |
399 | ||
400 | batchp = &__get_cpu_var(hugepd_freelist_cur); | |
401 | ||
402 | if (atomic_read(&tlb->mm->mm_users) < 2 || | |
403 | cpumask_equal(mm_cpumask(tlb->mm), | |
404 | cpumask_of(smp_processor_id()))) { | |
405 | kmem_cache_free(hugepte_cache, hugepte); | |
406 | return; | |
407 | } | |
408 | ||
409 | if (*batchp == NULL) { | |
410 | *batchp = (struct hugepd_freelist *)__get_free_page(GFP_ATOMIC); | |
411 | (*batchp)->index = 0; | |
412 | } | |
413 | ||
414 | (*batchp)->ptes[(*batchp)->index++] = hugepte; | |
415 | if ((*batchp)->index == HUGEPD_FREELIST_SIZE) { | |
416 | call_rcu_sched(&(*batchp)->rcu, hugepd_free_rcu_callback); | |
417 | *batchp = NULL; | |
418 | } | |
419 | } | |
420 | #endif | |
421 | ||
a4fe3ce7 DG |
422 | static void free_hugepd_range(struct mmu_gather *tlb, hugepd_t *hpdp, int pdshift, |
423 | unsigned long start, unsigned long end, | |
424 | unsigned long floor, unsigned long ceiling) | |
f10a04c0 DG |
425 | { |
426 | pte_t *hugepte = hugepd_page(*hpdp); | |
41151e77 BB |
427 | int i; |
428 | ||
a4fe3ce7 | 429 | unsigned long pdmask = ~((1UL << pdshift) - 1); |
41151e77 BB |
430 | unsigned int num_hugepd = 1; |
431 | ||
881fde1d BB |
432 | #ifdef CONFIG_PPC_FSL_BOOK3E |
433 | /* Note: On fsl the hpdp may be the first of several */ | |
41151e77 | 434 | num_hugepd = (1 << (hugepd_shift(*hpdp) - pdshift)); |
881fde1d BB |
435 | #else |
436 | unsigned int shift = hugepd_shift(*hpdp); | |
41151e77 | 437 | #endif |
a4fe3ce7 DG |
438 | |
439 | start &= pdmask; | |
440 | if (start < floor) | |
441 | return; | |
442 | if (ceiling) { | |
443 | ceiling &= pdmask; | |
444 | if (! ceiling) | |
445 | return; | |
446 | } | |
447 | if (end - 1 > ceiling - 1) | |
448 | return; | |
f10a04c0 | 449 | |
41151e77 BB |
450 | for (i = 0; i < num_hugepd; i++, hpdp++) |
451 | hpdp->pd = 0; | |
452 | ||
f10a04c0 | 453 | tlb->need_flush = 1; |
881fde1d BB |
454 | |
455 | #ifdef CONFIG_PPC_FSL_BOOK3E | |
41151e77 | 456 | hugepd_free(tlb, hugepte); |
881fde1d BB |
457 | #else |
458 | pgtable_free_tlb(tlb, hugepte, pdshift - shift); | |
41151e77 | 459 | #endif |
f10a04c0 DG |
460 | } |
461 | ||
f10a04c0 DG |
462 | static void hugetlb_free_pmd_range(struct mmu_gather *tlb, pud_t *pud, |
463 | unsigned long addr, unsigned long end, | |
a4fe3ce7 | 464 | unsigned long floor, unsigned long ceiling) |
f10a04c0 DG |
465 | { |
466 | pmd_t *pmd; | |
467 | unsigned long next; | |
468 | unsigned long start; | |
469 | ||
470 | start = addr; | |
f10a04c0 | 471 | do { |
a1cd5419 | 472 | pmd = pmd_offset(pud, addr); |
f10a04c0 DG |
473 | next = pmd_addr_end(addr, end); |
474 | if (pmd_none(*pmd)) | |
475 | continue; | |
a1cd5419 BB |
476 | #ifdef CONFIG_PPC_FSL_BOOK3E |
477 | /* | |
478 | * Increment next by the size of the huge mapping since | |
479 | * there may be more than one entry at this level for a | |
480 | * single hugepage, but all of them point to | |
481 | * the same kmem cache that holds the hugepte. | |
482 | */ | |
483 | next = addr + (1 << hugepd_shift(*(hugepd_t *)pmd)); | |
484 | #endif | |
a4fe3ce7 DG |
485 | free_hugepd_range(tlb, (hugepd_t *)pmd, PMD_SHIFT, |
486 | addr, next, floor, ceiling); | |
a1cd5419 | 487 | } while (addr = next, addr != end); |
f10a04c0 DG |
488 | |
489 | start &= PUD_MASK; | |
490 | if (start < floor) | |
491 | return; | |
492 | if (ceiling) { | |
493 | ceiling &= PUD_MASK; | |
494 | if (!ceiling) | |
495 | return; | |
1da177e4 | 496 | } |
f10a04c0 DG |
497 | if (end - 1 > ceiling - 1) |
498 | return; | |
1da177e4 | 499 | |
f10a04c0 DG |
500 | pmd = pmd_offset(pud, start); |
501 | pud_clear(pud); | |
9e1b32ca | 502 | pmd_free_tlb(tlb, pmd, start); |
f10a04c0 | 503 | } |
f10a04c0 DG |
504 | |
505 | static void hugetlb_free_pud_range(struct mmu_gather *tlb, pgd_t *pgd, | |
506 | unsigned long addr, unsigned long end, | |
507 | unsigned long floor, unsigned long ceiling) | |
508 | { | |
509 | pud_t *pud; | |
510 | unsigned long next; | |
511 | unsigned long start; | |
512 | ||
513 | start = addr; | |
f10a04c0 | 514 | do { |
a1cd5419 | 515 | pud = pud_offset(pgd, addr); |
f10a04c0 | 516 | next = pud_addr_end(addr, end); |
a4fe3ce7 | 517 | if (!is_hugepd(pud)) { |
4ec161cf JT |
518 | if (pud_none_or_clear_bad(pud)) |
519 | continue; | |
0d9ea754 | 520 | hugetlb_free_pmd_range(tlb, pud, addr, next, floor, |
a4fe3ce7 | 521 | ceiling); |
4ec161cf | 522 | } else { |
a1cd5419 BB |
523 | #ifdef CONFIG_PPC_FSL_BOOK3E |
524 | /* | |
525 | * Increment next by the size of the huge mapping since | |
526 | * there may be more than one entry at this level for a | |
527 | * single hugepage, but all of them point to | |
528 | * the same kmem cache that holds the hugepte. | |
529 | */ | |
530 | next = addr + (1 << hugepd_shift(*(hugepd_t *)pud)); | |
531 | #endif | |
a4fe3ce7 DG |
532 | free_hugepd_range(tlb, (hugepd_t *)pud, PUD_SHIFT, |
533 | addr, next, floor, ceiling); | |
4ec161cf | 534 | } |
a1cd5419 | 535 | } while (addr = next, addr != end); |
f10a04c0 DG |
536 | |
537 | start &= PGDIR_MASK; | |
538 | if (start < floor) | |
539 | return; | |
540 | if (ceiling) { | |
541 | ceiling &= PGDIR_MASK; | |
542 | if (!ceiling) | |
543 | return; | |
544 | } | |
545 | if (end - 1 > ceiling - 1) | |
546 | return; | |
547 | ||
548 | pud = pud_offset(pgd, start); | |
549 | pgd_clear(pgd); | |
9e1b32ca | 550 | pud_free_tlb(tlb, pud, start); |
f10a04c0 DG |
551 | } |
552 | ||
553 | /* | |
554 | * This function frees user-level page tables of a process. | |
555 | * | |
556 | * Must be called with pagetable lock held. | |
557 | */ | |
42b77728 | 558 | void hugetlb_free_pgd_range(struct mmu_gather *tlb, |
f10a04c0 DG |
559 | unsigned long addr, unsigned long end, |
560 | unsigned long floor, unsigned long ceiling) | |
561 | { | |
562 | pgd_t *pgd; | |
563 | unsigned long next; | |
f10a04c0 DG |
564 | |
565 | /* | |
a4fe3ce7 DG |
566 | * Because there are a number of different possible pagetable |
567 | * layouts for hugepage ranges, we limit knowledge of how | |
568 | * things should be laid out to the allocation path | |
569 | * (huge_pte_alloc(), above). Everything else works out the | |
570 | * structure as it goes from information in the hugepd | |
571 | * pointers. That means that we can't here use the | |
572 | * optimization used in the normal page free_pgd_range(), of | |
573 | * checking whether we're actually covering a large enough | |
574 | * range to have to do anything at the top level of the walk | |
575 | * instead of at the bottom. | |
f10a04c0 | 576 | * |
a4fe3ce7 DG |
577 | * To make sense of this, you should probably go read the big |
578 | * block comment at the top of the normal free_pgd_range(), | |
579 | * too. | |
f10a04c0 | 580 | */ |
f10a04c0 | 581 | |
f10a04c0 | 582 | do { |
f10a04c0 | 583 | next = pgd_addr_end(addr, end); |
41151e77 | 584 | pgd = pgd_offset(tlb->mm, addr); |
a4fe3ce7 | 585 | if (!is_hugepd(pgd)) { |
0b26425c DG |
586 | if (pgd_none_or_clear_bad(pgd)) |
587 | continue; | |
588 | hugetlb_free_pud_range(tlb, pgd, addr, next, floor, ceiling); | |
589 | } else { | |
881fde1d | 590 | #ifdef CONFIG_PPC_FSL_BOOK3E |
41151e77 BB |
591 | /* |
592 | * Increment next by the size of the huge mapping since | |
881fde1d BB |
593 | * there may be more than one entry at the pgd level |
594 | * for a single hugepage, but all of them point to the | |
595 | * same kmem cache that holds the hugepte. | |
41151e77 BB |
596 | */ |
597 | next = addr + (1 << hugepd_shift(*(hugepd_t *)pgd)); | |
598 | #endif | |
a4fe3ce7 DG |
599 | free_hugepd_range(tlb, (hugepd_t *)pgd, PGDIR_SHIFT, |
600 | addr, next, floor, ceiling); | |
0b26425c | 601 | } |
41151e77 | 602 | } while (addr = next, addr != end); |
1da177e4 LT |
603 | } |
604 | ||
1da177e4 LT |
605 | struct page * |
606 | follow_huge_addr(struct mm_struct *mm, unsigned long address, int write) | |
607 | { | |
608 | pte_t *ptep; | |
609 | struct page *page; | |
a4fe3ce7 DG |
610 | unsigned shift; |
611 | unsigned long mask; | |
612 | ||
613 | ptep = find_linux_pte_or_hugepte(mm->pgd, address, &shift); | |
1da177e4 | 614 | |
0d9ea754 | 615 | /* Verify it is a huge page else bail. */ |
a4fe3ce7 | 616 | if (!ptep || !shift) |
1da177e4 LT |
617 | return ERR_PTR(-EINVAL); |
618 | ||
a4fe3ce7 | 619 | mask = (1UL << shift) - 1; |
1da177e4 | 620 | page = pte_page(*ptep); |
a4fe3ce7 DG |
621 | if (page) |
622 | page += (address & mask) / PAGE_SIZE; | |
1da177e4 LT |
623 | |
624 | return page; | |
625 | } | |
626 | ||
627 | int pmd_huge(pmd_t pmd) | |
628 | { | |
629 | return 0; | |
630 | } | |
631 | ||
ceb86879 AK |
632 | int pud_huge(pud_t pud) |
633 | { | |
634 | return 0; | |
635 | } | |
636 | ||
1da177e4 LT |
637 | struct page * |
638 | follow_huge_pmd(struct mm_struct *mm, unsigned long address, | |
639 | pmd_t *pmd, int write) | |
640 | { | |
641 | BUG(); | |
642 | return NULL; | |
643 | } | |
644 | ||
a4fe3ce7 DG |
645 | static noinline int gup_hugepte(pte_t *ptep, unsigned long sz, unsigned long addr, |
646 | unsigned long end, int write, struct page **pages, int *nr) | |
647 | { | |
648 | unsigned long mask; | |
649 | unsigned long pte_end; | |
3526741f | 650 | struct page *head, *page, *tail; |
a4fe3ce7 DG |
651 | pte_t pte; |
652 | int refs; | |
653 | ||
654 | pte_end = (addr + sz) & ~(sz-1); | |
655 | if (pte_end < end) | |
656 | end = pte_end; | |
657 | ||
658 | pte = *ptep; | |
659 | mask = _PAGE_PRESENT | _PAGE_USER; | |
660 | if (write) | |
661 | mask |= _PAGE_RW; | |
662 | ||
663 | if ((pte_val(pte) & mask) != mask) | |
664 | return 0; | |
665 | ||
666 | /* hugepages are never "special" */ | |
667 | VM_BUG_ON(!pfn_valid(pte_pfn(pte))); | |
668 | ||
669 | refs = 0; | |
670 | head = pte_page(pte); | |
671 | ||
672 | page = head + ((addr & (sz-1)) >> PAGE_SHIFT); | |
3526741f | 673 | tail = page; |
a4fe3ce7 DG |
674 | do { |
675 | VM_BUG_ON(compound_head(page) != head); | |
676 | pages[*nr] = page; | |
677 | (*nr)++; | |
678 | page++; | |
679 | refs++; | |
680 | } while (addr += PAGE_SIZE, addr != end); | |
681 | ||
682 | if (!page_cache_add_speculative(head, refs)) { | |
683 | *nr -= refs; | |
684 | return 0; | |
685 | } | |
686 | ||
687 | if (unlikely(pte_val(pte) != pte_val(*ptep))) { | |
688 | /* Could be optimized better */ | |
85964684 AA |
689 | *nr -= refs; |
690 | while (refs--) | |
405e44f2 | 691 | put_page(head); |
cf592bf7 AA |
692 | return 0; |
693 | } | |
694 | ||
695 | /* | |
696 | * Any tail page need their mapcount reference taken before we | |
697 | * return. | |
698 | */ | |
699 | while (refs--) { | |
700 | if (PageTail(tail)) | |
701 | get_huge_page_tail(tail); | |
702 | tail++; | |
a4fe3ce7 DG |
703 | } |
704 | ||
705 | return 1; | |
706 | } | |
707 | ||
39adfa54 DG |
708 | static unsigned long hugepte_addr_end(unsigned long addr, unsigned long end, |
709 | unsigned long sz) | |
710 | { | |
711 | unsigned long __boundary = (addr + sz) & ~(sz-1); | |
712 | return (__boundary - 1 < end - 1) ? __boundary : end; | |
713 | } | |
714 | ||
a4fe3ce7 DG |
715 | int gup_hugepd(hugepd_t *hugepd, unsigned pdshift, |
716 | unsigned long addr, unsigned long end, | |
717 | int write, struct page **pages, int *nr) | |
718 | { | |
719 | pte_t *ptep; | |
720 | unsigned long sz = 1UL << hugepd_shift(*hugepd); | |
39adfa54 | 721 | unsigned long next; |
a4fe3ce7 DG |
722 | |
723 | ptep = hugepte_offset(hugepd, addr, pdshift); | |
724 | do { | |
39adfa54 | 725 | next = hugepte_addr_end(addr, end, sz); |
a4fe3ce7 DG |
726 | if (!gup_hugepte(ptep, sz, addr, end, write, pages, nr)) |
727 | return 0; | |
39adfa54 | 728 | } while (ptep++, addr = next, addr != end); |
a4fe3ce7 DG |
729 | |
730 | return 1; | |
731 | } | |
1da177e4 | 732 | |
76512959 | 733 | #ifdef CONFIG_PPC_MM_SLICES |
1da177e4 LT |
734 | unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, |
735 | unsigned long len, unsigned long pgoff, | |
736 | unsigned long flags) | |
737 | { | |
0d9ea754 JT |
738 | struct hstate *hstate = hstate_file(file); |
739 | int mmu_psize = shift_to_mmu_psize(huge_page_shift(hstate)); | |
48f797de | 740 | |
0d9ea754 | 741 | return slice_get_unmapped_area(addr, len, flags, mmu_psize, 1, 0); |
1da177e4 | 742 | } |
76512959 | 743 | #endif |
1da177e4 | 744 | |
3340289d MG |
745 | unsigned long vma_mmu_pagesize(struct vm_area_struct *vma) |
746 | { | |
25c29f9e | 747 | #ifdef CONFIG_PPC_MM_SLICES |
3340289d MG |
748 | unsigned int psize = get_slice_psize(vma->vm_mm, vma->vm_start); |
749 | ||
750 | return 1UL << mmu_psize_to_shift(psize); | |
41151e77 BB |
751 | #else |
752 | if (!is_vm_hugetlb_page(vma)) | |
753 | return PAGE_SIZE; | |
754 | ||
755 | return huge_page_size(hstate_vma(vma)); | |
756 | #endif | |
757 | } | |
758 | ||
759 | static inline bool is_power_of_4(unsigned long x) | |
760 | { | |
761 | if (is_power_of_2(x)) | |
762 | return (__ilog2(x) % 2) ? false : true; | |
763 | return false; | |
3340289d MG |
764 | } |
765 | ||
d1837cba | 766 | static int __init add_huge_page_size(unsigned long long size) |
4ec161cf | 767 | { |
d1837cba DG |
768 | int shift = __ffs(size); |
769 | int mmu_psize; | |
a4fe3ce7 | 770 | |
4ec161cf | 771 | /* Check that it is a page size supported by the hardware and |
d1837cba | 772 | * that it fits within pagetable and slice limits. */ |
41151e77 BB |
773 | #ifdef CONFIG_PPC_FSL_BOOK3E |
774 | if ((size < PAGE_SIZE) || !is_power_of_4(size)) | |
775 | return -EINVAL; | |
776 | #else | |
d1837cba DG |
777 | if (!is_power_of_2(size) |
778 | || (shift > SLICE_HIGH_SHIFT) || (shift <= PAGE_SHIFT)) | |
779 | return -EINVAL; | |
41151e77 | 780 | #endif |
91224346 | 781 | |
d1837cba DG |
782 | if ((mmu_psize = shift_to_mmu_psize(shift)) < 0) |
783 | return -EINVAL; | |
784 | ||
785 | #ifdef CONFIG_SPU_FS_64K_LS | |
786 | /* Disable support for 64K huge pages when 64K SPU local store | |
787 | * support is enabled as the current implementation conflicts. | |
788 | */ | |
789 | if (shift == PAGE_SHIFT_64K) | |
790 | return -EINVAL; | |
791 | #endif /* CONFIG_SPU_FS_64K_LS */ | |
792 | ||
793 | BUG_ON(mmu_psize_defs[mmu_psize].shift != shift); | |
794 | ||
795 | /* Return if huge page size has already been setup */ | |
796 | if (size_to_hstate(size)) | |
797 | return 0; | |
798 | ||
799 | hugetlb_add_hstate(shift - PAGE_SHIFT); | |
800 | ||
801 | return 0; | |
4ec161cf JT |
802 | } |
803 | ||
804 | static int __init hugepage_setup_sz(char *str) | |
805 | { | |
806 | unsigned long long size; | |
4ec161cf JT |
807 | |
808 | size = memparse(str, &str); | |
809 | ||
d1837cba | 810 | if (add_huge_page_size(size) != 0) |
4ec161cf JT |
811 | printk(KERN_WARNING "Invalid huge page size specified(%llu)\n", size); |
812 | ||
813 | return 1; | |
814 | } | |
815 | __setup("hugepagesz=", hugepage_setup_sz); | |
816 | ||
881fde1d | 817 | #ifdef CONFIG_PPC_FSL_BOOK3E |
41151e77 BB |
818 | struct kmem_cache *hugepte_cache; |
819 | static int __init hugetlbpage_init(void) | |
820 | { | |
821 | int psize; | |
822 | ||
823 | for (psize = 0; psize < MMU_PAGE_COUNT; ++psize) { | |
824 | unsigned shift; | |
825 | ||
826 | if (!mmu_psize_defs[psize].shift) | |
827 | continue; | |
828 | ||
829 | shift = mmu_psize_to_shift(psize); | |
830 | ||
831 | /* Don't treat normal page sizes as huge... */ | |
832 | if (shift != PAGE_SHIFT) | |
833 | if (add_huge_page_size(1ULL << shift) < 0) | |
834 | continue; | |
835 | } | |
836 | ||
837 | /* | |
838 | * Create a kmem cache for hugeptes. The bottom bits in the pte have | |
839 | * size information encoded in them, so align them to allow this | |
840 | */ | |
841 | hugepte_cache = kmem_cache_create("hugepte-cache", sizeof(pte_t), | |
842 | HUGEPD_SHIFT_MASK + 1, 0, NULL); | |
843 | if (hugepte_cache == NULL) | |
844 | panic("%s: Unable to create kmem cache for hugeptes\n", | |
845 | __func__); | |
846 | ||
847 | /* Default hpage size = 4M */ | |
848 | if (mmu_psize_defs[MMU_PAGE_4M].shift) | |
849 | HPAGE_SHIFT = mmu_psize_defs[MMU_PAGE_4M].shift; | |
850 | else | |
851 | panic("%s: Unable to set default huge page size\n", __func__); | |
852 | ||
853 | ||
854 | return 0; | |
855 | } | |
856 | #else | |
f10a04c0 DG |
857 | static int __init hugetlbpage_init(void) |
858 | { | |
a4fe3ce7 | 859 | int psize; |
0d9ea754 | 860 | |
44ae3ab3 | 861 | if (!mmu_has_feature(MMU_FTR_16M_PAGE)) |
f10a04c0 | 862 | return -ENODEV; |
00df438e | 863 | |
d1837cba DG |
864 | for (psize = 0; psize < MMU_PAGE_COUNT; ++psize) { |
865 | unsigned shift; | |
866 | unsigned pdshift; | |
0d9ea754 | 867 | |
d1837cba DG |
868 | if (!mmu_psize_defs[psize].shift) |
869 | continue; | |
00df438e | 870 | |
d1837cba DG |
871 | shift = mmu_psize_to_shift(psize); |
872 | ||
873 | if (add_huge_page_size(1ULL << shift) < 0) | |
874 | continue; | |
875 | ||
876 | if (shift < PMD_SHIFT) | |
877 | pdshift = PMD_SHIFT; | |
878 | else if (shift < PUD_SHIFT) | |
879 | pdshift = PUD_SHIFT; | |
880 | else | |
881 | pdshift = PGDIR_SHIFT; | |
882 | ||
883 | pgtable_cache_add(pdshift - shift, NULL); | |
884 | if (!PGT_CACHE(pdshift - shift)) | |
885 | panic("hugetlbpage_init(): could not create " | |
886 | "pgtable cache for %d bit pagesize\n", shift); | |
0d9ea754 | 887 | } |
f10a04c0 | 888 | |
d1837cba DG |
889 | /* Set default large page size. Currently, we pick 16M or 1M |
890 | * depending on what is available | |
891 | */ | |
892 | if (mmu_psize_defs[MMU_PAGE_16M].shift) | |
893 | HPAGE_SHIFT = mmu_psize_defs[MMU_PAGE_16M].shift; | |
894 | else if (mmu_psize_defs[MMU_PAGE_1M].shift) | |
895 | HPAGE_SHIFT = mmu_psize_defs[MMU_PAGE_1M].shift; | |
896 | ||
f10a04c0 DG |
897 | return 0; |
898 | } | |
41151e77 | 899 | #endif |
f10a04c0 | 900 | module_init(hugetlbpage_init); |
0895ecda DG |
901 | |
902 | void flush_dcache_icache_hugepage(struct page *page) | |
903 | { | |
904 | int i; | |
41151e77 | 905 | void *start; |
0895ecda DG |
906 | |
907 | BUG_ON(!PageCompound(page)); | |
908 | ||
41151e77 BB |
909 | for (i = 0; i < (1UL << compound_order(page)); i++) { |
910 | if (!PageHighMem(page)) { | |
911 | __flush_dcache_icache(page_address(page+i)); | |
912 | } else { | |
913 | start = kmap_atomic(page+i, KM_PPC_SYNC_ICACHE); | |
914 | __flush_dcache_icache(start); | |
915 | kunmap_atomic(start, KM_PPC_SYNC_ICACHE); | |
916 | } | |
917 | } | |
0895ecda | 918 | } |