2 * PPC Huge TLB Page Support for Kernel.
4 * Copyright (C) 2003 David Gibson, IBM Corporation.
5 * Copyright (C) 2011 Becky Bruce, Freescale Semiconductor
7 * Based on the IA-32 version:
8 * Copyright (C) 2002, Rohit Seth <rohit.seth@intel.com>
13 #include <linux/slab.h>
14 #include <linux/hugetlb.h>
15 #include <linux/export.h>
16 #include <linux/of_fdt.h>
17 #include <linux/memblock.h>
18 #include <linux/bootmem.h>
19 #include <linux/moduleparam.h>
20 #include <asm/pgtable.h>
21 #include <asm/pgalloc.h>
23 #include <asm/setup.h>
24 #include <asm/hugetlb.h>
26 #ifdef CONFIG_HUGETLB_PAGE
28 #define PAGE_SHIFT_64K 16
29 #define PAGE_SHIFT_512K 19
30 #define PAGE_SHIFT_8M 23
31 #define PAGE_SHIFT_16M 24
32 #define PAGE_SHIFT_16G 34
34 unsigned int HPAGE_SHIFT
;
37 * Tracks gpages after the device tree is scanned and before the
38 * huge_boot_pages list is ready. On non-Freescale implementations, this is
39 * just used to track 16G pages and so is a single array. FSL-based
40 * implementations may have more than one gpage size, so we need multiple
43 #if defined(CONFIG_PPC_FSL_BOOK3E) || defined(CONFIG_PPC_8xx)
44 #define MAX_NUMBER_GPAGES 128
46 u64 gpage_list
[MAX_NUMBER_GPAGES
];
47 unsigned int nr_gpages
;
49 static struct psize_gpages gpage_freearray
[MMU_PAGE_COUNT
];
51 #define MAX_NUMBER_GPAGES 1024
52 static u64 gpage_freearray
[MAX_NUMBER_GPAGES
];
53 static unsigned nr_gpages
;
56 #define hugepd_none(hpd) ((hpd).pd == 0)
58 pte_t
*huge_pte_offset(struct mm_struct
*mm
, unsigned long addr
)
60 /* Only called for hugetlbfs pages, hence can ignore THP */
61 return __find_linux_pte_or_hugepte(mm
->pgd
, addr
, NULL
, NULL
);
64 static int __hugepte_alloc(struct mm_struct
*mm
, hugepd_t
*hpdp
,
65 unsigned long address
, unsigned pdshift
, unsigned pshift
)
67 struct kmem_cache
*cachep
;
72 if (pshift
>= pdshift
) {
73 cachep
= hugepte_cache
;
74 num_hugepd
= 1 << (pshift
- pdshift
);
76 cachep
= PGT_CACHE(pdshift
- pshift
);
80 new = kmem_cache_zalloc(cachep
, GFP_KERNEL
);
82 BUG_ON(pshift
> HUGEPD_SHIFT_MASK
);
83 BUG_ON((unsigned long)new & HUGEPD_SHIFT_MASK
);
89 * Make sure other cpus find the hugepd set only after a
90 * properly initialized page table is visible to them.
91 * For more details look for comment in __pte_alloc().
95 spin_lock(&mm
->page_table_lock
);
98 * We have multiple higher-level entries that point to the same
99 * actual pte location. Fill in each as we go and backtrack on error.
100 * We need all of these so the DTLB pgtable walk code can find the
101 * right higher-level entry without knowing if it's a hugepage or not.
103 for (i
= 0; i
< num_hugepd
; i
++, hpdp
++) {
104 if (unlikely(!hugepd_none(*hpdp
)))
107 #ifdef CONFIG_PPC_BOOK3S_64
108 hpdp
->pd
= __pa(new) |
109 (shift_to_mmu_psize(pshift
) << 2);
110 #elif defined(CONFIG_PPC_8xx)
111 hpdp
->pd
= __pa(new) |
112 (pshift
== PAGE_SHIFT_8M
? _PMD_PAGE_8M
:
116 /* We use the old format for PPC_FSL_BOOK3E */
117 hpdp
->pd
= ((unsigned long)new & ~PD_HUGE
) | pshift
;
120 /* If we bailed from the for loop early, an error occurred, clean up */
121 if (i
< num_hugepd
) {
122 for (i
= i
- 1 ; i
>= 0; i
--, hpdp
--)
124 kmem_cache_free(cachep
, new);
126 spin_unlock(&mm
->page_table_lock
);
131 * These macros define how to determine which level of the page table holds
134 #if defined(CONFIG_PPC_FSL_BOOK3E) || defined(CONFIG_PPC_8xx)
135 #define HUGEPD_PGD_SHIFT PGDIR_SHIFT
136 #define HUGEPD_PUD_SHIFT PUD_SHIFT
138 #define HUGEPD_PGD_SHIFT PUD_SHIFT
139 #define HUGEPD_PUD_SHIFT PMD_SHIFT
143 * At this point we do the placement change only for BOOK3S 64. This would
144 * possibly work on other subarchs.
146 pte_t
*huge_pte_alloc(struct mm_struct
*mm
, unsigned long addr
, unsigned long sz
)
151 hugepd_t
*hpdp
= NULL
;
152 unsigned pshift
= __ffs(sz
);
153 unsigned pdshift
= PGDIR_SHIFT
;
156 pg
= pgd_offset(mm
, addr
);
158 #ifdef CONFIG_PPC_BOOK3S_64
159 if (pshift
== PGDIR_SHIFT
)
162 else if (pshift
> PUD_SHIFT
)
164 * We need to use hugepd table
166 hpdp
= (hugepd_t
*)pg
;
169 pu
= pud_alloc(mm
, pg
, addr
);
170 if (pshift
== PUD_SHIFT
)
172 else if (pshift
> PMD_SHIFT
)
173 hpdp
= (hugepd_t
*)pu
;
176 pm
= pmd_alloc(mm
, pu
, addr
);
177 if (pshift
== PMD_SHIFT
)
181 hpdp
= (hugepd_t
*)pm
;
185 if (pshift
>= HUGEPD_PGD_SHIFT
) {
186 hpdp
= (hugepd_t
*)pg
;
189 pu
= pud_alloc(mm
, pg
, addr
);
190 if (pshift
>= HUGEPD_PUD_SHIFT
) {
191 hpdp
= (hugepd_t
*)pu
;
194 pm
= pmd_alloc(mm
, pu
, addr
);
195 hpdp
= (hugepd_t
*)pm
;
202 BUG_ON(!hugepd_none(*hpdp
) && !hugepd_ok(*hpdp
));
204 if (hugepd_none(*hpdp
) && __hugepte_alloc(mm
, hpdp
, addr
, pdshift
, pshift
))
207 return hugepte_offset(*hpdp
, addr
, pdshift
);
210 #if defined(CONFIG_PPC_FSL_BOOK3E) || defined(CONFIG_PPC_8xx)
211 /* Build list of addresses of gigantic pages. This function is used in early
212 * boot before the buddy allocator is setup.
214 void add_gpage(u64 addr
, u64 page_size
, unsigned long number_of_pages
)
216 unsigned int idx
= shift_to_mmu_psize(__ffs(page_size
));
222 gpage_freearray
[idx
].nr_gpages
= number_of_pages
;
224 for (i
= 0; i
< number_of_pages
; i
++) {
225 gpage_freearray
[idx
].gpage_list
[i
] = addr
;
231 * Moves the gigantic page addresses from the temporary list to the
232 * huge_boot_pages list.
234 int alloc_bootmem_huge_page(struct hstate
*hstate
)
236 struct huge_bootmem_page
*m
;
237 int idx
= shift_to_mmu_psize(huge_page_shift(hstate
));
238 int nr_gpages
= gpage_freearray
[idx
].nr_gpages
;
243 #ifdef CONFIG_HIGHMEM
245 * If gpages can be in highmem we can't use the trick of storing the
246 * data structure in the page; allocate space for this
248 m
= memblock_virt_alloc(sizeof(struct huge_bootmem_page
), 0);
249 m
->phys
= gpage_freearray
[idx
].gpage_list
[--nr_gpages
];
251 m
= phys_to_virt(gpage_freearray
[idx
].gpage_list
[--nr_gpages
]);
254 list_add(&m
->list
, &huge_boot_pages
);
255 gpage_freearray
[idx
].nr_gpages
= nr_gpages
;
256 gpage_freearray
[idx
].gpage_list
[nr_gpages
] = 0;
262 * Scan the command line hugepagesz= options for gigantic pages; store those in
263 * a list that we use to allocate the memory once all options are parsed.
266 unsigned long gpage_npages
[MMU_PAGE_COUNT
];
268 static int __init
do_gpage_early_setup(char *param
, char *val
,
269 const char *unused
, void *arg
)
271 static phys_addr_t size
;
272 unsigned long npages
;
275 * The hugepagesz and hugepages cmdline options are interleaved. We
276 * use the size variable to keep track of whether or not this was done
277 * properly and skip over instances where it is incorrect. Other
278 * command-line parsing code will issue warnings, so we don't need to.
281 if ((strcmp(param
, "default_hugepagesz") == 0) ||
282 (strcmp(param
, "hugepagesz") == 0)) {
283 size
= memparse(val
, NULL
);
284 } else if (strcmp(param
, "hugepages") == 0) {
286 if (sscanf(val
, "%lu", &npages
) <= 0)
288 if (npages
> MAX_NUMBER_GPAGES
) {
289 pr_warn("MMU: %lu pages requested for page "
290 #ifdef CONFIG_PHYS_ADDR_T_64BIT
291 "size %llu KB, limiting to "
293 "size %u KB, limiting to "
295 __stringify(MAX_NUMBER_GPAGES
) "\n",
296 npages
, size
/ 1024);
297 npages
= MAX_NUMBER_GPAGES
;
299 gpage_npages
[shift_to_mmu_psize(__ffs(size
))] = npages
;
308 * This function allocates physical space for pages that are larger than the
309 * buddy allocator can handle. We want to allocate these in highmem because
310 * the amount of lowmem is limited. This means that this function MUST be
311 * called before lowmem_end_addr is set up in MMU_init() in order for the lmb
312 * allocate to grab highmem.
314 void __init
reserve_hugetlb_gpages(void)
316 static __initdata
char cmdline
[COMMAND_LINE_SIZE
];
317 phys_addr_t size
, base
;
320 strlcpy(cmdline
, boot_command_line
, COMMAND_LINE_SIZE
);
321 parse_args("hugetlb gpages", cmdline
, NULL
, 0, 0, 0,
322 NULL
, &do_gpage_early_setup
);
325 * Walk gpage list in reverse, allocating larger page sizes first.
326 * Skip over unsupported sizes, or sizes that have 0 gpages allocated.
327 * When we reach the point in the list where pages are no longer
328 * considered gpages, we're done.
330 for (i
= MMU_PAGE_COUNT
-1; i
>= 0; i
--) {
331 if (mmu_psize_defs
[i
].shift
== 0 || gpage_npages
[i
] == 0)
333 else if (mmu_psize_to_shift(i
) < (MAX_ORDER
+ PAGE_SHIFT
))
336 size
= (phys_addr_t
)(1ULL << mmu_psize_to_shift(i
));
337 base
= memblock_alloc_base(size
* gpage_npages
[i
], size
,
338 MEMBLOCK_ALLOC_ANYWHERE
);
339 add_gpage(base
, size
, gpage_npages
[i
]);
343 #else /* !PPC_FSL_BOOK3E */
345 /* Build list of addresses of gigantic pages. This function is used in early
346 * boot before the buddy allocator is setup.
348 void add_gpage(u64 addr
, u64 page_size
, unsigned long number_of_pages
)
352 while (number_of_pages
> 0) {
353 gpage_freearray
[nr_gpages
] = addr
;
360 /* Moves the gigantic page addresses from the temporary list to the
361 * huge_boot_pages list.
363 int alloc_bootmem_huge_page(struct hstate
*hstate
)
365 struct huge_bootmem_page
*m
;
368 m
= phys_to_virt(gpage_freearray
[--nr_gpages
]);
369 gpage_freearray
[nr_gpages
] = 0;
370 list_add(&m
->list
, &huge_boot_pages
);
376 #if defined(CONFIG_PPC_FSL_BOOK3E) || defined(CONFIG_PPC_8xx)
377 #define HUGEPD_FREELIST_SIZE \
378 ((PAGE_SIZE - sizeof(struct hugepd_freelist)) / sizeof(pte_t))
380 struct hugepd_freelist
{
386 static DEFINE_PER_CPU(struct hugepd_freelist
*, hugepd_freelist_cur
);
388 static void hugepd_free_rcu_callback(struct rcu_head
*head
)
390 struct hugepd_freelist
*batch
=
391 container_of(head
, struct hugepd_freelist
, rcu
);
394 for (i
= 0; i
< batch
->index
; i
++)
395 kmem_cache_free(hugepte_cache
, batch
->ptes
[i
]);
397 free_page((unsigned long)batch
);
400 static void hugepd_free(struct mmu_gather
*tlb
, void *hugepte
)
402 struct hugepd_freelist
**batchp
;
404 batchp
= &get_cpu_var(hugepd_freelist_cur
);
406 if (atomic_read(&tlb
->mm
->mm_users
) < 2 ||
407 cpumask_equal(mm_cpumask(tlb
->mm
),
408 cpumask_of(smp_processor_id()))) {
409 kmem_cache_free(hugepte_cache
, hugepte
);
410 put_cpu_var(hugepd_freelist_cur
);
414 if (*batchp
== NULL
) {
415 *batchp
= (struct hugepd_freelist
*)__get_free_page(GFP_ATOMIC
);
416 (*batchp
)->index
= 0;
419 (*batchp
)->ptes
[(*batchp
)->index
++] = hugepte
;
420 if ((*batchp
)->index
== HUGEPD_FREELIST_SIZE
) {
421 call_rcu_sched(&(*batchp
)->rcu
, hugepd_free_rcu_callback
);
424 put_cpu_var(hugepd_freelist_cur
);
427 static inline void hugepd_free(struct mmu_gather
*tlb
, void *hugepte
) {}
430 static void free_hugepd_range(struct mmu_gather
*tlb
, hugepd_t
*hpdp
, int pdshift
,
431 unsigned long start
, unsigned long end
,
432 unsigned long floor
, unsigned long ceiling
)
434 pte_t
*hugepte
= hugepd_page(*hpdp
);
437 unsigned long pdmask
= ~((1UL << pdshift
) - 1);
438 unsigned int num_hugepd
= 1;
439 unsigned int shift
= hugepd_shift(*hpdp
);
441 /* Note: On fsl the hpdp may be the first of several */
443 num_hugepd
= 1 << (shift
- pdshift
);
453 if (end
- 1 > ceiling
- 1)
456 for (i
= 0; i
< num_hugepd
; i
++, hpdp
++)
459 if (shift
>= pdshift
)
460 hugepd_free(tlb
, hugepte
);
462 pgtable_free_tlb(tlb
, hugepte
, pdshift
- shift
);
465 static void hugetlb_free_pmd_range(struct mmu_gather
*tlb
, pud_t
*pud
,
466 unsigned long addr
, unsigned long end
,
467 unsigned long floor
, unsigned long ceiling
)
477 pmd
= pmd_offset(pud
, addr
);
478 next
= pmd_addr_end(addr
, end
);
479 if (!is_hugepd(__hugepd(pmd_val(*pmd
)))) {
481 * if it is not hugepd pointer, we should already find
484 WARN_ON(!pmd_none_or_clear_bad(pmd
));
488 * Increment next by the size of the huge mapping since
489 * there may be more than one entry at this level for a
490 * single hugepage, but all of them point to
491 * the same kmem cache that holds the hugepte.
493 more
= addr
+ (1 << hugepd_shift(*(hugepd_t
*)pmd
));
497 free_hugepd_range(tlb
, (hugepd_t
*)pmd
, PMD_SHIFT
,
498 addr
, next
, floor
, ceiling
);
499 } while (addr
= next
, addr
!= end
);
509 if (end
- 1 > ceiling
- 1)
512 pmd
= pmd_offset(pud
, start
);
514 pmd_free_tlb(tlb
, pmd
, start
);
515 mm_dec_nr_pmds(tlb
->mm
);
518 static void hugetlb_free_pud_range(struct mmu_gather
*tlb
, pgd_t
*pgd
,
519 unsigned long addr
, unsigned long end
,
520 unsigned long floor
, unsigned long ceiling
)
528 pud
= pud_offset(pgd
, addr
);
529 next
= pud_addr_end(addr
, end
);
530 if (!is_hugepd(__hugepd(pud_val(*pud
)))) {
531 if (pud_none_or_clear_bad(pud
))
533 hugetlb_free_pmd_range(tlb
, pud
, addr
, next
, floor
,
538 * Increment next by the size of the huge mapping since
539 * there may be more than one entry at this level for a
540 * single hugepage, but all of them point to
541 * the same kmem cache that holds the hugepte.
543 more
= addr
+ (1 << hugepd_shift(*(hugepd_t
*)pud
));
547 free_hugepd_range(tlb
, (hugepd_t
*)pud
, PUD_SHIFT
,
548 addr
, next
, floor
, ceiling
);
550 } while (addr
= next
, addr
!= end
);
556 ceiling
&= PGDIR_MASK
;
560 if (end
- 1 > ceiling
- 1)
563 pud
= pud_offset(pgd
, start
);
565 pud_free_tlb(tlb
, pud
, start
);
569 * This function frees user-level page tables of a process.
571 void hugetlb_free_pgd_range(struct mmu_gather
*tlb
,
572 unsigned long addr
, unsigned long end
,
573 unsigned long floor
, unsigned long ceiling
)
579 * Because there are a number of different possible pagetable
580 * layouts for hugepage ranges, we limit knowledge of how
581 * things should be laid out to the allocation path
582 * (huge_pte_alloc(), above). Everything else works out the
583 * structure as it goes from information in the hugepd
584 * pointers. That means that we can't here use the
585 * optimization used in the normal page free_pgd_range(), of
586 * checking whether we're actually covering a large enough
587 * range to have to do anything at the top level of the walk
588 * instead of at the bottom.
590 * To make sense of this, you should probably go read the big
591 * block comment at the top of the normal free_pgd_range(),
596 next
= pgd_addr_end(addr
, end
);
597 pgd
= pgd_offset(tlb
->mm
, addr
);
598 if (!is_hugepd(__hugepd(pgd_val(*pgd
)))) {
599 if (pgd_none_or_clear_bad(pgd
))
601 hugetlb_free_pud_range(tlb
, pgd
, addr
, next
, floor
, ceiling
);
605 * Increment next by the size of the huge mapping since
606 * there may be more than one entry at the pgd level
607 * for a single hugepage, but all of them point to the
608 * same kmem cache that holds the hugepte.
610 more
= addr
+ (1 << hugepd_shift(*(hugepd_t
*)pgd
));
614 free_hugepd_range(tlb
, (hugepd_t
*)pgd
, PGDIR_SHIFT
,
615 addr
, next
, floor
, ceiling
);
617 } while (addr
= next
, addr
!= end
);
621 * We are holding mmap_sem, so a parallel huge page collapse cannot run.
622 * To prevent hugepage split, disable irq.
625 follow_huge_addr(struct mm_struct
*mm
, unsigned long address
, int write
)
630 unsigned long mask
, flags
;
631 struct page
*page
= ERR_PTR(-EINVAL
);
633 local_irq_save(flags
);
634 ptep
= find_linux_pte_or_hugepte(mm
->pgd
, address
, &is_thp
, &shift
);
637 pte
= READ_ONCE(*ptep
);
639 * Verify it is a huge page else bail.
640 * Transparent hugepages are handled by generic code. We can skip them
643 if (!shift
|| is_thp
)
646 if (!pte_present(pte
)) {
650 mask
= (1UL << shift
) - 1;
651 page
= pte_page(pte
);
653 page
+= (address
& mask
) / PAGE_SIZE
;
656 local_irq_restore(flags
);
661 follow_huge_pmd(struct mm_struct
*mm
, unsigned long address
,
662 pmd_t
*pmd
, int write
)
669 follow_huge_pud(struct mm_struct
*mm
, unsigned long address
,
670 pud_t
*pud
, int write
)
676 static unsigned long hugepte_addr_end(unsigned long addr
, unsigned long end
,
679 unsigned long __boundary
= (addr
+ sz
) & ~(sz
-1);
680 return (__boundary
- 1 < end
- 1) ? __boundary
: end
;
683 int gup_huge_pd(hugepd_t hugepd
, unsigned long addr
, unsigned pdshift
,
684 unsigned long end
, int write
, struct page
**pages
, int *nr
)
687 unsigned long sz
= 1UL << hugepd_shift(hugepd
);
690 ptep
= hugepte_offset(hugepd
, addr
, pdshift
);
692 next
= hugepte_addr_end(addr
, end
, sz
);
693 if (!gup_hugepte(ptep
, sz
, addr
, end
, write
, pages
, nr
))
695 } while (ptep
++, addr
= next
, addr
!= end
);
700 #ifdef CONFIG_PPC_MM_SLICES
701 unsigned long hugetlb_get_unmapped_area(struct file
*file
, unsigned long addr
,
702 unsigned long len
, unsigned long pgoff
,
705 struct hstate
*hstate
= hstate_file(file
);
706 int mmu_psize
= shift_to_mmu_psize(huge_page_shift(hstate
));
709 return radix__hugetlb_get_unmapped_area(file
, addr
, len
,
711 return slice_get_unmapped_area(addr
, len
, flags
, mmu_psize
, 1);
715 unsigned long vma_mmu_pagesize(struct vm_area_struct
*vma
)
717 #ifdef CONFIG_PPC_MM_SLICES
718 unsigned int psize
= get_slice_psize(vma
->vm_mm
, vma
->vm_start
);
719 /* With radix we don't use slice, so derive it from vma*/
720 if (!radix_enabled())
721 return 1UL << mmu_psize_to_shift(psize
);
723 if (!is_vm_hugetlb_page(vma
))
726 return huge_page_size(hstate_vma(vma
));
729 static inline bool is_power_of_4(unsigned long x
)
731 if (is_power_of_2(x
))
732 return (__ilog2(x
) % 2) ? false : true;
736 static int __init
add_huge_page_size(unsigned long long size
)
738 int shift
= __ffs(size
);
741 /* Check that it is a page size supported by the hardware and
742 * that it fits within pagetable and slice limits. */
743 if (size
<= PAGE_SIZE
)
745 #if defined(CONFIG_PPC_FSL_BOOK3E)
746 if (!is_power_of_4(size
))
748 #elif !defined(CONFIG_PPC_8xx)
749 if (!is_power_of_2(size
) || (shift
> SLICE_HIGH_SHIFT
))
753 if ((mmu_psize
= shift_to_mmu_psize(shift
)) < 0)
756 BUG_ON(mmu_psize_defs
[mmu_psize
].shift
!= shift
);
758 /* Return if huge page size has already been setup */
759 if (size_to_hstate(size
))
762 hugetlb_add_hstate(shift
- PAGE_SHIFT
);
767 static int __init
hugepage_setup_sz(char *str
)
769 unsigned long long size
;
771 size
= memparse(str
, &str
);
773 if (add_huge_page_size(size
) != 0) {
775 pr_err("Invalid huge page size specified(%llu)\n", size
);
780 __setup("hugepagesz=", hugepage_setup_sz
);
782 struct kmem_cache
*hugepte_cache
;
783 static int __init
hugetlbpage_init(void)
787 #if !defined(CONFIG_PPC_FSL_BOOK3E) && !defined(CONFIG_PPC_8xx)
788 if (!radix_enabled() && !mmu_has_feature(MMU_FTR_16M_PAGE
))
791 for (psize
= 0; psize
< MMU_PAGE_COUNT
; ++psize
) {
795 if (!mmu_psize_defs
[psize
].shift
)
798 shift
= mmu_psize_to_shift(psize
);
800 if (add_huge_page_size(1ULL << shift
) < 0)
803 if (shift
< HUGEPD_PUD_SHIFT
)
805 else if (shift
< HUGEPD_PGD_SHIFT
)
808 pdshift
= PGDIR_SHIFT
;
810 * if we have pdshift and shift value same, we don't
811 * use pgt cache for hugepd.
813 if (pdshift
> shift
) {
814 pgtable_cache_add(pdshift
- shift
, NULL
);
815 if (!PGT_CACHE(pdshift
- shift
))
816 panic("hugetlbpage_init(): could not create "
817 "pgtable cache for %d bit pagesize\n", shift
);
819 #if defined(CONFIG_PPC_FSL_BOOK3E) || defined(CONFIG_PPC_8xx)
820 else if (!hugepte_cache
) {
822 * Create a kmem cache for hugeptes. The bottom bits in
823 * the pte have size information encoded in them, so
824 * align them to allow this
826 hugepte_cache
= kmem_cache_create("hugepte-cache",
828 HUGEPD_SHIFT_MASK
+ 1,
830 if (hugepte_cache
== NULL
)
831 panic("%s: Unable to create kmem cache "
832 "for hugeptes\n", __func__
);
838 #if defined(CONFIG_PPC_FSL_BOOK3E) || defined(CONFIG_PPC_8xx)
839 /* Default hpage size = 4M on FSL_BOOK3E and 512k on 8xx */
840 if (mmu_psize_defs
[MMU_PAGE_4M
].shift
)
841 HPAGE_SHIFT
= mmu_psize_defs
[MMU_PAGE_4M
].shift
;
842 else if (mmu_psize_defs
[MMU_PAGE_512K
].shift
)
843 HPAGE_SHIFT
= mmu_psize_defs
[MMU_PAGE_512K
].shift
;
845 /* Set default large page size. Currently, we pick 16M or 1M
846 * depending on what is available
848 if (mmu_psize_defs
[MMU_PAGE_16M
].shift
)
849 HPAGE_SHIFT
= mmu_psize_defs
[MMU_PAGE_16M
].shift
;
850 else if (mmu_psize_defs
[MMU_PAGE_1M
].shift
)
851 HPAGE_SHIFT
= mmu_psize_defs
[MMU_PAGE_1M
].shift
;
852 else if (mmu_psize_defs
[MMU_PAGE_2M
].shift
)
853 HPAGE_SHIFT
= mmu_psize_defs
[MMU_PAGE_2M
].shift
;
856 panic("%s: Unable to set default huge page size\n", __func__
);
861 arch_initcall(hugetlbpage_init
);
863 void flush_dcache_icache_hugepage(struct page
*page
)
868 BUG_ON(!PageCompound(page
));
870 for (i
= 0; i
< (1UL << compound_order(page
)); i
++) {
871 if (!PageHighMem(page
)) {
872 __flush_dcache_icache(page_address(page
+i
));
874 start
= kmap_atomic(page
+i
);
875 __flush_dcache_icache(start
);
876 kunmap_atomic(start
);
881 #endif /* CONFIG_HUGETLB_PAGE */
884 * We have 4 cases for pgds and pmds:
885 * (1) invalid (all zeroes)
886 * (2) pointer to next table, as normal; bottom 6 bits == 0
887 * (3) leaf pte for huge page _PAGE_PTE set
888 * (4) hugepd pointer, _PAGE_PTE = 0 and bits [2..6] indicate size of table
890 * So long as we atomically load page table pointers we are safe against teardown,
891 * we can follow the address down to the the page and take a ref on it.
892 * This function need to be called with interrupts disabled. We use this variant
893 * when we have MSR[EE] = 0 but the paca->soft_enabled = 1
896 pte_t
*__find_linux_pte_or_hugepte(pgd_t
*pgdir
, unsigned long ea
,
897 bool *is_thp
, unsigned *shift
)
903 hugepd_t
*hpdp
= NULL
;
904 unsigned pdshift
= PGDIR_SHIFT
;
912 pgdp
= pgdir
+ pgd_index(ea
);
913 pgd
= READ_ONCE(*pgdp
);
915 * Always operate on the local stack value. This make sure the
916 * value don't get updated by a parallel THP split/collapse,
917 * page fault or a page unmap. The return pte_t * is still not
918 * stable. So should be checked there for above conditions.
922 else if (pgd_huge(pgd
)) {
923 ret_pte
= (pte_t
*) pgdp
;
925 } else if (is_hugepd(__hugepd(pgd_val(pgd
))))
926 hpdp
= (hugepd_t
*)&pgd
;
929 * Even if we end up with an unmap, the pgtable will not
930 * be freed, because we do an rcu free and here we are
934 pudp
= pud_offset(&pgd
, ea
);
935 pud
= READ_ONCE(*pudp
);
939 else if (pud_huge(pud
)) {
940 ret_pte
= (pte_t
*) pudp
;
942 } else if (is_hugepd(__hugepd(pud_val(pud
))))
943 hpdp
= (hugepd_t
*)&pud
;
946 pmdp
= pmd_offset(&pud
, ea
);
947 pmd
= READ_ONCE(*pmdp
);
949 * A hugepage collapse is captured by pmd_none, because
950 * it mark the pmd none and do a hpte invalidate.
955 if (pmd_trans_huge(pmd
)) {
958 ret_pte
= (pte_t
*) pmdp
;
963 ret_pte
= (pte_t
*) pmdp
;
965 } else if (is_hugepd(__hugepd(pmd_val(pmd
))))
966 hpdp
= (hugepd_t
*)&pmd
;
968 return pte_offset_kernel(&pmd
, ea
);
974 ret_pte
= hugepte_offset(*hpdp
, ea
, pdshift
);
975 pdshift
= hugepd_shift(*hpdp
);
981 EXPORT_SYMBOL_GPL(__find_linux_pte_or_hugepte
);
983 int gup_hugepte(pte_t
*ptep
, unsigned long sz
, unsigned long addr
,
984 unsigned long end
, int write
, struct page
**pages
, int *nr
)
987 unsigned long pte_end
;
988 struct page
*head
, *page
;
992 pte_end
= (addr
+ sz
) & ~(sz
-1);
996 pte
= READ_ONCE(*ptep
);
997 mask
= _PAGE_PRESENT
| _PAGE_READ
;
1000 * On some CPUs like the 8xx, _PAGE_RW hence _PAGE_WRITE is defined
1001 * as 0 and _PAGE_RO has to be set when a page is not writable
1004 mask
|= _PAGE_WRITE
;
1008 if ((pte_val(pte
) & mask
) != mask
)
1011 /* hugepages are never "special" */
1012 VM_BUG_ON(!pfn_valid(pte_pfn(pte
)));
1015 head
= pte_page(pte
);
1017 page
= head
+ ((addr
& (sz
-1)) >> PAGE_SHIFT
);
1019 VM_BUG_ON(compound_head(page
) != head
);
1024 } while (addr
+= PAGE_SIZE
, addr
!= end
);
1026 if (!page_cache_add_speculative(head
, refs
)) {
1031 if (unlikely(pte_val(pte
) != pte_val(*ptep
))) {
1032 /* Could be optimized better */