2 * Page table handling routines for radix page table.
4 * Copyright 2015-2016, Aneesh Kumar K.V, IBM Corporation.
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
11 #include <linux/sched/mm.h>
12 #include <linux/memblock.h>
13 #include <linux/of_fdt.h>
16 #include <asm/pgtable.h>
17 #include <asm/pgalloc.h>
19 #include <asm/machdep.h>
21 #include <asm/firmware.h>
22 #include <asm/powernv.h>
23 #include <asm/sections.h>
24 #include <asm/trace.h>
26 #include <trace/events/thp.h>
28 static int native_register_process_table(unsigned long base
, unsigned long pg_sz
,
29 unsigned long table_size
)
31 unsigned long patb1
= base
| table_size
| PATB_GR
;
33 partition_tb
->patb1
= cpu_to_be64(patb1
);
37 static __ref
void *early_alloc_pgtable(unsigned long size
)
41 pt
= __va(memblock_alloc_base(size
, size
, MEMBLOCK_ALLOC_ANYWHERE
));
47 int radix__map_kernel_page(unsigned long ea
, unsigned long pa
,
49 unsigned int map_page_size
)
56 * Make sure task size is correct as per the max adddr
58 BUILD_BUG_ON(TASK_SIZE_USER64
> RADIX_PGTABLE_RANGE
);
59 if (slab_is_available()) {
60 pgdp
= pgd_offset_k(ea
);
61 pudp
= pud_alloc(&init_mm
, pgdp
, ea
);
64 if (map_page_size
== PUD_SIZE
) {
68 pmdp
= pmd_alloc(&init_mm
, pudp
, ea
);
71 if (map_page_size
== PMD_SIZE
) {
72 ptep
= pmdp_ptep(pmdp
);
75 ptep
= pte_alloc_kernel(pmdp
, ea
);
79 pgdp
= pgd_offset_k(ea
);
80 if (pgd_none(*pgdp
)) {
81 pudp
= early_alloc_pgtable(PUD_TABLE_SIZE
);
83 pgd_populate(&init_mm
, pgdp
, pudp
);
85 pudp
= pud_offset(pgdp
, ea
);
86 if (map_page_size
== PUD_SIZE
) {
90 if (pud_none(*pudp
)) {
91 pmdp
= early_alloc_pgtable(PMD_TABLE_SIZE
);
93 pud_populate(&init_mm
, pudp
, pmdp
);
95 pmdp
= pmd_offset(pudp
, ea
);
96 if (map_page_size
== PMD_SIZE
) {
97 ptep
= pmdp_ptep(pmdp
);
100 if (!pmd_present(*pmdp
)) {
101 ptep
= early_alloc_pgtable(PAGE_SIZE
);
102 BUG_ON(ptep
== NULL
);
103 pmd_populate_kernel(&init_mm
, pmdp
, ptep
);
105 ptep
= pte_offset_kernel(pmdp
, ea
);
109 set_pte_at(&init_mm
, ea
, ptep
, pfn_pte(pa
>> PAGE_SHIFT
, flags
));
114 #ifdef CONFIG_STRICT_KERNEL_RWX
115 void radix__mark_rodata_ro(void)
117 unsigned long start
= (unsigned long)_stext
;
118 unsigned long end
= (unsigned long)__init_begin
;
125 start
= ALIGN_DOWN(start
, PAGE_SIZE
);
126 end
= PAGE_ALIGN(end
); // aligns up
128 pr_devel("marking ro start %lx, end %lx\n", start
, end
);
130 for (idx
= start
; idx
< end
; idx
+= PAGE_SIZE
) {
131 pgdp
= pgd_offset_k(idx
);
132 pudp
= pud_alloc(&init_mm
, pgdp
, idx
);
135 if (pud_huge(*pudp
)) {
136 ptep
= (pte_t
*)pudp
;
139 pmdp
= pmd_alloc(&init_mm
, pudp
, idx
);
142 if (pmd_huge(*pmdp
)) {
143 ptep
= pmdp_ptep(pmdp
);
146 ptep
= pte_alloc_kernel(pmdp
, idx
);
150 radix__pte_update(&init_mm
, idx
, ptep
, _PAGE_WRITE
, 0, 0);
153 radix__flush_tlb_kernel_range(start
, end
);
155 #endif /* CONFIG_STRICT_KERNEL_RWX */
157 static inline void __meminit
print_mapping(unsigned long start
,
164 pr_info("Mapped range 0x%lx - 0x%lx with 0x%lx\n", start
, end
, size
);
167 static int __meminit
create_physical_mapping(unsigned long start
,
170 unsigned long vaddr
, addr
, mapping_size
= 0;
172 unsigned long max_mapping_size
;
173 #ifdef CONFIG_STRICT_KERNEL_RWX
174 int split_text_mapping
= 1;
176 int split_text_mapping
= 0;
179 start
= _ALIGN_UP(start
, PAGE_SIZE
);
180 for (addr
= start
; addr
< end
; addr
+= mapping_size
) {
181 unsigned long gap
, previous_size
;
185 previous_size
= mapping_size
;
186 max_mapping_size
= PUD_SIZE
;
189 if (IS_ALIGNED(addr
, PUD_SIZE
) && gap
>= PUD_SIZE
&&
190 mmu_psize_defs
[MMU_PAGE_1G
].shift
&&
191 PUD_SIZE
<= max_mapping_size
)
192 mapping_size
= PUD_SIZE
;
193 else if (IS_ALIGNED(addr
, PMD_SIZE
) && gap
>= PMD_SIZE
&&
194 mmu_psize_defs
[MMU_PAGE_2M
].shift
)
195 mapping_size
= PMD_SIZE
;
197 mapping_size
= PAGE_SIZE
;
199 if (split_text_mapping
&& (mapping_size
== PUD_SIZE
) &&
200 (addr
<= __pa_symbol(__init_begin
)) &&
201 (addr
+ mapping_size
) >= __pa_symbol(_stext
)) {
202 max_mapping_size
= PMD_SIZE
;
206 if (split_text_mapping
&& (mapping_size
== PMD_SIZE
) &&
207 (addr
<= __pa_symbol(__init_begin
)) &&
208 (addr
+ mapping_size
) >= __pa_symbol(_stext
))
209 mapping_size
= PAGE_SIZE
;
211 if (mapping_size
!= previous_size
) {
212 print_mapping(start
, addr
, previous_size
);
216 vaddr
= (unsigned long)__va(addr
);
218 if (overlaps_kernel_text(vaddr
, vaddr
+ mapping_size
) ||
219 overlaps_interrupt_vector_text(vaddr
, vaddr
+ mapping_size
))
220 prot
= PAGE_KERNEL_X
;
224 rc
= radix__map_kernel_page(vaddr
, addr
, prot
, mapping_size
);
229 print_mapping(start
, addr
, mapping_size
);
233 static void __init
radix_init_pgtable(void)
235 unsigned long rts_field
;
236 struct memblock_region
*reg
;
238 /* We don't support slb for radix */
241 * Create the linear mapping, using standard page size for now
243 for_each_memblock(memory
, reg
)
244 WARN_ON(create_physical_mapping(reg
->base
,
245 reg
->base
+ reg
->size
));
247 * Allocate Partition table and process table for the
250 BUILD_BUG_ON_MSG((PRTB_SIZE_SHIFT
> 36), "Process table size too large.");
251 process_tb
= early_alloc_pgtable(1UL << PRTB_SIZE_SHIFT
);
253 * Fill in the process table.
255 rts_field
= radix__get_tree_size();
256 process_tb
->prtb0
= cpu_to_be64(rts_field
| __pa(init_mm
.pgd
) | RADIX_PGD_INDEX_SIZE
);
258 * Fill in the partition table. We are suppose to use effective address
259 * of process table here. But our linear mapping also enable us to use
260 * physical address here.
262 register_process_table(__pa(process_tb
), 0, PRTB_SIZE_SHIFT
- 12);
263 pr_info("Process table %p and radix root for kernel: %p\n", process_tb
, init_mm
.pgd
);
264 asm volatile("ptesync" : : : "memory");
265 asm volatile(PPC_TLBIE_5(%0,%1,2,1,1) : :
266 "r" (TLBIEL_INVAL_SET_LPID
), "r" (0));
267 asm volatile("eieio; tlbsync; ptesync" : : : "memory");
268 trace_tlbie(0, 0, TLBIEL_INVAL_SET_LPID
, 0, 2, 1, 1);
271 static void __init
radix_init_partition_table(void)
273 unsigned long rts_field
, dw0
;
275 mmu_partition_table_init();
276 rts_field
= radix__get_tree_size();
277 dw0
= rts_field
| __pa(init_mm
.pgd
) | RADIX_PGD_INDEX_SIZE
| PATB_HR
;
278 mmu_partition_table_set_entry(0, dw0
, 0);
280 pr_info("Initializing Radix MMU\n");
281 pr_info("Partition table %p\n", partition_tb
);
284 void __init
radix_init_native(void)
286 register_process_table
= native_register_process_table
;
289 static int __init
get_idx_from_shift(unsigned int shift
)
310 static int __init
radix_dt_scan_page_sizes(unsigned long node
,
311 const char *uname
, int depth
,
318 const char *type
= of_get_flat_dt_prop(node
, "device_type", NULL
);
320 /* We are scanning "cpu" nodes only */
321 if (type
== NULL
|| strcmp(type
, "cpu") != 0)
324 prop
= of_get_flat_dt_prop(node
, "ibm,processor-radix-AP-encodings", &size
);
328 pr_info("Page sizes from device-tree:\n");
329 for (; size
>= 4; size
-= 4, ++prop
) {
331 struct mmu_psize_def
*def
;
333 /* top 3 bit is AP encoding */
334 shift
= be32_to_cpu(prop
[0]) & ~(0xe << 28);
335 ap
= be32_to_cpu(prop
[0]) >> 29;
336 pr_info("Page size shift = %d AP=0x%x\n", shift
, ap
);
338 idx
= get_idx_from_shift(shift
);
342 def
= &mmu_psize_defs
[idx
];
348 cur_cpu_spec
->mmu_features
&= ~MMU_FTR_NO_SLBIE_B
;
352 void __init
radix__early_init_devtree(void)
357 * Try to find the available page sizes in the device-tree
359 rc
= of_scan_flat_dt(radix_dt_scan_page_sizes
, NULL
);
360 if (rc
!= 0) /* Found */
363 * let's assume we have page 4k and 64k support
365 mmu_psize_defs
[MMU_PAGE_4K
].shift
= 12;
366 mmu_psize_defs
[MMU_PAGE_4K
].ap
= 0x0;
368 mmu_psize_defs
[MMU_PAGE_64K
].shift
= 16;
369 mmu_psize_defs
[MMU_PAGE_64K
].ap
= 0x5;
371 #ifdef CONFIG_SPARSEMEM_VMEMMAP
372 if (mmu_psize_defs
[MMU_PAGE_2M
].shift
) {
374 * map vmemmap using 2M if available
376 mmu_vmemmap_psize
= MMU_PAGE_2M
;
378 #endif /* CONFIG_SPARSEMEM_VMEMMAP */
382 static void update_hid_for_radix(void)
385 unsigned long rb
= 3UL << PPC_BITLSHIFT(53); /* IS = 3 */
387 asm volatile("ptesync": : :"memory");
388 /* prs = 0, ric = 2, rs = 0, r = 1 is = 3 */
389 asm volatile(PPC_TLBIE_5(%0, %4, %3, %2, %1)
390 : : "r"(rb
), "i"(1), "i"(0), "i"(2), "r"(0) : "memory");
391 /* prs = 1, ric = 2, rs = 0, r = 1 is = 3 */
392 asm volatile(PPC_TLBIE_5(%0, %4, %3, %2, %1)
393 : : "r"(rb
), "i"(1), "i"(1), "i"(2), "r"(0) : "memory");
394 asm volatile("eieio; tlbsync; ptesync; isync; slbia": : :"memory");
395 trace_tlbie(0, 0, rb
, 0, 2, 0, 1);
396 trace_tlbie(0, 0, rb
, 0, 2, 1, 1);
401 hid0
= mfspr(SPRN_HID0
);
402 hid0
|= HID0_POWER9_RADIX
;
403 mtspr(SPRN_HID0
, hid0
);
404 asm volatile("isync": : :"memory");
406 /* Wait for it to happen */
407 while (!(mfspr(SPRN_HID0
) & HID0_POWER9_RADIX
))
411 static void radix_init_amor(void)
414 * In HV mode, we init AMOR (Authority Mask Override Register) so that
415 * the hypervisor and guest can setup IAMR (Instruction Authority Mask
416 * Register), enable key 0 and set it to 1.
418 * AMOR = 0b1100 .... 0000 (Mask for key 0 is 11)
420 mtspr(SPRN_AMOR
, (3ul << 62));
423 static void radix_init_iamr(void)
428 * The IAMR should set to 0 on DD1.
430 if (cpu_has_feature(CPU_FTR_POWER9_DD1
))
436 * Radix always uses key0 of the IAMR to determine if an access is
437 * allowed. We set bit 0 (IBM bit 1) of key0, to prevent instruction
440 mtspr(SPRN_IAMR
, iamr
);
443 void __init
radix__early_init_mmu(void)
447 #ifdef CONFIG_PPC_64K_PAGES
448 /* PAGE_SIZE mappings */
449 mmu_virtual_psize
= MMU_PAGE_64K
;
451 mmu_virtual_psize
= MMU_PAGE_4K
;
454 #ifdef CONFIG_SPARSEMEM_VMEMMAP
455 /* vmemmap mapping */
456 mmu_vmemmap_psize
= mmu_virtual_psize
;
459 * initialize page table size
461 __pte_index_size
= RADIX_PTE_INDEX_SIZE
;
462 __pmd_index_size
= RADIX_PMD_INDEX_SIZE
;
463 __pud_index_size
= RADIX_PUD_INDEX_SIZE
;
464 __pgd_index_size
= RADIX_PGD_INDEX_SIZE
;
465 __pmd_cache_index
= RADIX_PMD_INDEX_SIZE
;
466 __pte_table_size
= RADIX_PTE_TABLE_SIZE
;
467 __pmd_table_size
= RADIX_PMD_TABLE_SIZE
;
468 __pud_table_size
= RADIX_PUD_TABLE_SIZE
;
469 __pgd_table_size
= RADIX_PGD_TABLE_SIZE
;
471 __pmd_val_bits
= RADIX_PMD_VAL_BITS
;
472 __pud_val_bits
= RADIX_PUD_VAL_BITS
;
473 __pgd_val_bits
= RADIX_PGD_VAL_BITS
;
475 __kernel_virt_start
= RADIX_KERN_VIRT_START
;
476 __kernel_virt_size
= RADIX_KERN_VIRT_SIZE
;
477 __vmalloc_start
= RADIX_VMALLOC_START
;
478 __vmalloc_end
= RADIX_VMALLOC_END
;
479 vmemmap
= (struct page
*)RADIX_VMEMMAP_BASE
;
480 ioremap_bot
= IOREMAP_BASE
;
483 pci_io_base
= ISA_IO_BASE
;
487 * For now radix also use the same frag size
489 __pte_frag_nr
= H_PTE_FRAG_NR
;
490 __pte_frag_size_shift
= H_PTE_FRAG_SIZE_SHIFT
;
492 if (!firmware_has_feature(FW_FEATURE_LPAR
)) {
494 if (cpu_has_feature(CPU_FTR_POWER9_DD1
))
495 update_hid_for_radix();
496 lpcr
= mfspr(SPRN_LPCR
);
497 mtspr(SPRN_LPCR
, lpcr
| LPCR_UPRT
| LPCR_HR
);
498 radix_init_partition_table();
501 radix_init_pseries();
504 memblock_set_current_limit(MEMBLOCK_ALLOC_ANYWHERE
);
507 radix_init_pgtable();
510 void radix__early_init_mmu_secondary(void)
514 * update partition table control register and UPRT
516 if (!firmware_has_feature(FW_FEATURE_LPAR
)) {
518 if (cpu_has_feature(CPU_FTR_POWER9_DD1
))
519 update_hid_for_radix();
521 lpcr
= mfspr(SPRN_LPCR
);
522 mtspr(SPRN_LPCR
, lpcr
| LPCR_UPRT
| LPCR_HR
);
525 __pa(partition_tb
) | (PATB_SIZE_SHIFT
- 12));
531 void radix__mmu_cleanup_all(void)
535 if (!firmware_has_feature(FW_FEATURE_LPAR
)) {
536 lpcr
= mfspr(SPRN_LPCR
);
537 mtspr(SPRN_LPCR
, lpcr
& ~LPCR_UPRT
);
539 powernv_set_nmmu_ptcr(0);
540 radix__flush_tlb_all();
544 void radix__setup_initial_memory_limit(phys_addr_t first_memblock_base
,
545 phys_addr_t first_memblock_size
)
547 /* We don't currently support the first MEMBLOCK not mapping 0
548 * physical on those processors
550 BUG_ON(first_memblock_base
!= 0);
552 * We limit the allocation that depend on ppc64_rma_size
553 * to first_memblock_size. We also clamp it to 1GB to
554 * avoid some funky things such as RTAS bugs.
556 * On radix config we really don't have a limitation
557 * on real mode access. But keeping it as above works
560 ppc64_rma_size
= min_t(u64
, first_memblock_size
, 0x40000000);
562 * Finally limit subsequent allocations. We really don't want
563 * to limit the memblock allocations to rma_size. FIXME!! should
564 * we even limit at all ?
566 memblock_set_current_limit(first_memblock_base
+ first_memblock_size
);
569 #ifdef CONFIG_MEMORY_HOTPLUG
570 static void free_pte_table(pte_t
*pte_start
, pmd_t
*pmd
)
575 for (i
= 0; i
< PTRS_PER_PTE
; i
++) {
581 pte_free_kernel(&init_mm
, pte_start
);
585 static void free_pmd_table(pmd_t
*pmd_start
, pud_t
*pud
)
590 for (i
= 0; i
< PTRS_PER_PMD
; i
++) {
596 pmd_free(&init_mm
, pmd_start
);
600 static void remove_pte_table(pte_t
*pte_start
, unsigned long addr
,
606 pte
= pte_start
+ pte_index(addr
);
607 for (; addr
< end
; addr
= next
, pte
++) {
608 next
= (addr
+ PAGE_SIZE
) & PAGE_MASK
;
612 if (!pte_present(*pte
))
615 if (!PAGE_ALIGNED(addr
) || !PAGE_ALIGNED(next
)) {
617 * The vmemmap_free() and remove_section_mapping()
618 * codepaths call us with aligned addresses.
620 WARN_ONCE(1, "%s: unaligned range\n", __func__
);
624 pte_clear(&init_mm
, addr
, pte
);
628 static void remove_pmd_table(pmd_t
*pmd_start
, unsigned long addr
,
635 pmd
= pmd_start
+ pmd_index(addr
);
636 for (; addr
< end
; addr
= next
, pmd
++) {
637 next
= pmd_addr_end(addr
, end
);
639 if (!pmd_present(*pmd
))
642 if (pmd_huge(*pmd
)) {
643 if (!IS_ALIGNED(addr
, PMD_SIZE
) ||
644 !IS_ALIGNED(next
, PMD_SIZE
)) {
645 WARN_ONCE(1, "%s: unaligned range\n", __func__
);
649 pte_clear(&init_mm
, addr
, (pte_t
*)pmd
);
653 pte_base
= (pte_t
*)pmd_page_vaddr(*pmd
);
654 remove_pte_table(pte_base
, addr
, next
);
655 free_pte_table(pte_base
, pmd
);
659 static void remove_pud_table(pud_t
*pud_start
, unsigned long addr
,
666 pud
= pud_start
+ pud_index(addr
);
667 for (; addr
< end
; addr
= next
, pud
++) {
668 next
= pud_addr_end(addr
, end
);
670 if (!pud_present(*pud
))
673 if (pud_huge(*pud
)) {
674 if (!IS_ALIGNED(addr
, PUD_SIZE
) ||
675 !IS_ALIGNED(next
, PUD_SIZE
)) {
676 WARN_ONCE(1, "%s: unaligned range\n", __func__
);
680 pte_clear(&init_mm
, addr
, (pte_t
*)pud
);
684 pmd_base
= (pmd_t
*)pud_page_vaddr(*pud
);
685 remove_pmd_table(pmd_base
, addr
, next
);
686 free_pmd_table(pmd_base
, pud
);
690 static void remove_pagetable(unsigned long start
, unsigned long end
)
692 unsigned long addr
, next
;
696 spin_lock(&init_mm
.page_table_lock
);
698 for (addr
= start
; addr
< end
; addr
= next
) {
699 next
= pgd_addr_end(addr
, end
);
701 pgd
= pgd_offset_k(addr
);
702 if (!pgd_present(*pgd
))
705 if (pgd_huge(*pgd
)) {
706 if (!IS_ALIGNED(addr
, PGDIR_SIZE
) ||
707 !IS_ALIGNED(next
, PGDIR_SIZE
)) {
708 WARN_ONCE(1, "%s: unaligned range\n", __func__
);
712 pte_clear(&init_mm
, addr
, (pte_t
*)pgd
);
716 pud_base
= (pud_t
*)pgd_page_vaddr(*pgd
);
717 remove_pud_table(pud_base
, addr
, next
);
720 spin_unlock(&init_mm
.page_table_lock
);
721 radix__flush_tlb_kernel_range(start
, end
);
724 int __ref
radix__create_section_mapping(unsigned long start
, unsigned long end
)
726 return create_physical_mapping(start
, end
);
729 int radix__remove_section_mapping(unsigned long start
, unsigned long end
)
731 remove_pagetable(start
, end
);
734 #endif /* CONFIG_MEMORY_HOTPLUG */
736 #ifdef CONFIG_SPARSEMEM_VMEMMAP
737 int __meminit
radix__vmemmap_create_mapping(unsigned long start
,
738 unsigned long page_size
,
741 /* Create a PTE encoding */
742 unsigned long flags
= _PAGE_PRESENT
| _PAGE_ACCESSED
| _PAGE_KERNEL_RW
;
744 BUG_ON(radix__map_kernel_page(start
, phys
, __pgprot(flags
), page_size
));
748 #ifdef CONFIG_MEMORY_HOTPLUG
749 void radix__vmemmap_remove_mapping(unsigned long start
, unsigned long page_size
)
751 remove_pagetable(start
, start
+ page_size
);
756 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
758 unsigned long radix__pmd_hugepage_update(struct mm_struct
*mm
, unsigned long addr
,
759 pmd_t
*pmdp
, unsigned long clr
,
764 #ifdef CONFIG_DEBUG_VM
765 WARN_ON(!radix__pmd_trans_huge(*pmdp
) && !pmd_devmap(*pmdp
));
766 assert_spin_locked(&mm
->page_table_lock
);
769 old
= radix__pte_update(mm
, addr
, (pte_t
*)pmdp
, clr
, set
, 1);
770 trace_hugepage_update(addr
, old
, clr
, set
);
775 pmd_t
radix__pmdp_collapse_flush(struct vm_area_struct
*vma
, unsigned long address
,
781 VM_BUG_ON(address
& ~HPAGE_PMD_MASK
);
782 VM_BUG_ON(radix__pmd_trans_huge(*pmdp
));
783 VM_BUG_ON(pmd_devmap(*pmdp
));
785 * khugepaged calls this for normal pmd
789 /*FIXME!! Verify whether we need this kick below */
790 kick_all_cpus_sync();
791 flush_tlb_range(vma
, address
, address
+ HPAGE_PMD_SIZE
);
796 * For us pgtable_t is pte_t *. Inorder to save the deposisted
797 * page table, we consider the allocated page table as a list
798 * head. On withdraw we need to make sure we zero out the used
799 * list_head memory area.
801 void radix__pgtable_trans_huge_deposit(struct mm_struct
*mm
, pmd_t
*pmdp
,
804 struct list_head
*lh
= (struct list_head
*) pgtable
;
806 assert_spin_locked(pmd_lockptr(mm
, pmdp
));
809 if (!pmd_huge_pte(mm
, pmdp
))
812 list_add(lh
, (struct list_head
*) pmd_huge_pte(mm
, pmdp
));
813 pmd_huge_pte(mm
, pmdp
) = pgtable
;
816 pgtable_t
radix__pgtable_trans_huge_withdraw(struct mm_struct
*mm
, pmd_t
*pmdp
)
820 struct list_head
*lh
;
822 assert_spin_locked(pmd_lockptr(mm
, pmdp
));
825 pgtable
= pmd_huge_pte(mm
, pmdp
);
826 lh
= (struct list_head
*) pgtable
;
828 pmd_huge_pte(mm
, pmdp
) = NULL
;
830 pmd_huge_pte(mm
, pmdp
) = (pgtable_t
) lh
->next
;
833 ptep
= (pte_t
*) pgtable
;
841 pmd_t
radix__pmdp_huge_get_and_clear(struct mm_struct
*mm
,
842 unsigned long addr
, pmd_t
*pmdp
)
847 old
= radix__pmd_hugepage_update(mm
, addr
, pmdp
, ~0UL, 0);
848 old_pmd
= __pmd(old
);
850 * Serialize against find_linux_pte_or_hugepte which does lock-less
851 * lookup in page tables with local interrupts disabled. For huge pages
852 * it casts pmd_t to pte_t. Since format of pte_t is different from
853 * pmd_t we want to prevent transit from pmd pointing to page table
854 * to pmd pointing to huge page (and back) while interrupts are disabled.
855 * We clear pmd to possibly replace it with page table pointer in
856 * different code paths. So make sure we wait for the parallel
857 * find_linux_pte_or_hugepage to finish.
859 kick_all_cpus_sync();
863 int radix__has_transparent_hugepage(void)
865 /* For radix 2M at PMD level means thp */
866 if (mmu_psize_defs
[MMU_PAGE_2M
].shift
== PMD_SHIFT
)
870 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */