2 * Page table handling routines for radix page table.
4 * Copyright 2015-2016, Aneesh Kumar K.V, IBM Corporation.
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
12 #define pr_fmt(fmt) "radix-mmu: " fmt
14 #include <linux/kernel.h>
15 #include <linux/sched/mm.h>
16 #include <linux/memblock.h>
17 #include <linux/of_fdt.h>
19 #include <linux/string_helpers.h>
21 #include <asm/pgtable.h>
22 #include <asm/pgalloc.h>
24 #include <asm/machdep.h>
26 #include <asm/firmware.h>
27 #include <asm/powernv.h>
28 #include <asm/sections.h>
29 #include <asm/trace.h>
31 #include <trace/events/thp.h>
33 unsigned int mmu_pid_bits
;
34 unsigned int mmu_base_pid
;
36 static int native_register_process_table(unsigned long base
, unsigned long pg_sz
,
37 unsigned long table_size
)
39 unsigned long patb0
, patb1
;
41 patb0
= be64_to_cpu(partition_tb
[0].patb0
);
42 patb1
= base
| table_size
| PATB_GR
;
44 mmu_partition_table_set_entry(0, patb0
, patb1
);
49 static __ref
void *early_alloc_pgtable(unsigned long size
)
53 pt
= __va(memblock_alloc_base(size
, size
, MEMBLOCK_ALLOC_ANYWHERE
));
59 int radix__map_kernel_page(unsigned long ea
, unsigned long pa
,
61 unsigned int map_page_size
)
68 * Make sure task size is correct as per the max adddr
70 BUILD_BUG_ON(TASK_SIZE_USER64
> RADIX_PGTABLE_RANGE
);
71 if (slab_is_available()) {
72 pgdp
= pgd_offset_k(ea
);
73 pudp
= pud_alloc(&init_mm
, pgdp
, ea
);
76 if (map_page_size
== PUD_SIZE
) {
80 pmdp
= pmd_alloc(&init_mm
, pudp
, ea
);
83 if (map_page_size
== PMD_SIZE
) {
84 ptep
= pmdp_ptep(pmdp
);
87 ptep
= pte_alloc_kernel(pmdp
, ea
);
91 pgdp
= pgd_offset_k(ea
);
92 if (pgd_none(*pgdp
)) {
93 pudp
= early_alloc_pgtable(PUD_TABLE_SIZE
);
95 pgd_populate(&init_mm
, pgdp
, pudp
);
97 pudp
= pud_offset(pgdp
, ea
);
98 if (map_page_size
== PUD_SIZE
) {
102 if (pud_none(*pudp
)) {
103 pmdp
= early_alloc_pgtable(PMD_TABLE_SIZE
);
104 BUG_ON(pmdp
== NULL
);
105 pud_populate(&init_mm
, pudp
, pmdp
);
107 pmdp
= pmd_offset(pudp
, ea
);
108 if (map_page_size
== PMD_SIZE
) {
109 ptep
= pmdp_ptep(pmdp
);
112 if (!pmd_present(*pmdp
)) {
113 ptep
= early_alloc_pgtable(PAGE_SIZE
);
114 BUG_ON(ptep
== NULL
);
115 pmd_populate_kernel(&init_mm
, pmdp
, ptep
);
117 ptep
= pte_offset_kernel(pmdp
, ea
);
121 set_pte_at(&init_mm
, ea
, ptep
, pfn_pte(pa
>> PAGE_SHIFT
, flags
));
126 #ifdef CONFIG_STRICT_KERNEL_RWX
127 void radix__change_memory_range(unsigned long start
, unsigned long end
,
136 start
= ALIGN_DOWN(start
, PAGE_SIZE
);
137 end
= PAGE_ALIGN(end
); // aligns up
139 pr_debug("Changing flags on range %lx-%lx removing 0x%lx\n",
142 for (idx
= start
; idx
< end
; idx
+= PAGE_SIZE
) {
143 pgdp
= pgd_offset_k(idx
);
144 pudp
= pud_alloc(&init_mm
, pgdp
, idx
);
147 if (pud_huge(*pudp
)) {
148 ptep
= (pte_t
*)pudp
;
151 pmdp
= pmd_alloc(&init_mm
, pudp
, idx
);
154 if (pmd_huge(*pmdp
)) {
155 ptep
= pmdp_ptep(pmdp
);
158 ptep
= pte_alloc_kernel(pmdp
, idx
);
162 radix__pte_update(&init_mm
, idx
, ptep
, clear
, 0, 0);
165 radix__flush_tlb_kernel_range(start
, end
);
168 void radix__mark_rodata_ro(void)
170 unsigned long start
, end
;
173 * mark_rodata_ro() will mark itself as !writable at some point.
174 * Due to DD1 workaround in radix__pte_update(), we'll end up with
175 * an invalid pte and the system will crash quite severly.
177 if (cpu_has_feature(CPU_FTR_POWER9_DD1
)) {
178 pr_warn("Warning: Unable to mark rodata read only on P9 DD1\n");
182 start
= (unsigned long)_stext
;
183 end
= (unsigned long)__init_begin
;
185 radix__change_memory_range(start
, end
, _PAGE_WRITE
);
188 void radix__mark_initmem_nx(void)
190 unsigned long start
= (unsigned long)__init_begin
;
191 unsigned long end
= (unsigned long)__init_end
;
193 radix__change_memory_range(start
, end
, _PAGE_EXEC
);
195 #endif /* CONFIG_STRICT_KERNEL_RWX */
197 static inline void __meminit
print_mapping(unsigned long start
,
206 string_get_size(size
, 1, STRING_UNITS_2
, buf
, sizeof(buf
));
208 pr_info("Mapped 0x%016lx-0x%016lx with %s pages\n", start
, end
, buf
);
211 static int __meminit
create_physical_mapping(unsigned long start
,
214 unsigned long vaddr
, addr
, mapping_size
= 0;
216 unsigned long max_mapping_size
;
217 #ifdef CONFIG_STRICT_KERNEL_RWX
218 int split_text_mapping
= 1;
220 int split_text_mapping
= 0;
223 start
= _ALIGN_UP(start
, PAGE_SIZE
);
224 for (addr
= start
; addr
< end
; addr
+= mapping_size
) {
225 unsigned long gap
, previous_size
;
229 previous_size
= mapping_size
;
230 max_mapping_size
= PUD_SIZE
;
233 if (IS_ALIGNED(addr
, PUD_SIZE
) && gap
>= PUD_SIZE
&&
234 mmu_psize_defs
[MMU_PAGE_1G
].shift
&&
235 PUD_SIZE
<= max_mapping_size
)
236 mapping_size
= PUD_SIZE
;
237 else if (IS_ALIGNED(addr
, PMD_SIZE
) && gap
>= PMD_SIZE
&&
238 mmu_psize_defs
[MMU_PAGE_2M
].shift
)
239 mapping_size
= PMD_SIZE
;
241 mapping_size
= PAGE_SIZE
;
243 if (split_text_mapping
&& (mapping_size
== PUD_SIZE
) &&
244 (addr
<= __pa_symbol(__init_begin
)) &&
245 (addr
+ mapping_size
) >= __pa_symbol(_stext
)) {
246 max_mapping_size
= PMD_SIZE
;
250 if (split_text_mapping
&& (mapping_size
== PMD_SIZE
) &&
251 (addr
<= __pa_symbol(__init_begin
)) &&
252 (addr
+ mapping_size
) >= __pa_symbol(_stext
))
253 mapping_size
= PAGE_SIZE
;
255 if (mapping_size
!= previous_size
) {
256 print_mapping(start
, addr
, previous_size
);
260 vaddr
= (unsigned long)__va(addr
);
262 if (overlaps_kernel_text(vaddr
, vaddr
+ mapping_size
) ||
263 overlaps_interrupt_vector_text(vaddr
, vaddr
+ mapping_size
))
264 prot
= PAGE_KERNEL_X
;
268 rc
= radix__map_kernel_page(vaddr
, addr
, prot
, mapping_size
);
273 print_mapping(start
, addr
, mapping_size
);
277 static void __init
radix_init_pgtable(void)
279 unsigned long rts_field
;
280 struct memblock_region
*reg
;
282 /* We don't support slb for radix */
285 * Create the linear mapping, using standard page size for now
287 for_each_memblock(memory
, reg
)
288 WARN_ON(create_physical_mapping(reg
->base
,
289 reg
->base
+ reg
->size
));
291 /* Find out how many PID bits are supported */
292 if (cpu_has_feature(CPU_FTR_HVMODE
)) {
295 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
297 * When KVM is possible, we only use the top half of the
298 * PID space to avoid collisions between host and guest PIDs
299 * which can cause problems due to prefetch when exiting the
302 mmu_base_pid
= 1 << (mmu_pid_bits
- 1);
307 /* The guest uses the bottom half of the PID space */
314 * Allocate Partition table and process table for the
317 BUG_ON(PRTB_SIZE_SHIFT
> 36);
318 process_tb
= early_alloc_pgtable(1UL << PRTB_SIZE_SHIFT
);
320 * Fill in the process table.
322 rts_field
= radix__get_tree_size();
323 process_tb
->prtb0
= cpu_to_be64(rts_field
| __pa(init_mm
.pgd
) | RADIX_PGD_INDEX_SIZE
);
325 * Fill in the partition table. We are suppose to use effective address
326 * of process table here. But our linear mapping also enable us to use
327 * physical address here.
329 register_process_table(__pa(process_tb
), 0, PRTB_SIZE_SHIFT
- 12);
330 pr_info("Process table %p and radix root for kernel: %p\n", process_tb
, init_mm
.pgd
);
331 asm volatile("ptesync" : : : "memory");
332 asm volatile(PPC_TLBIE_5(%0,%1,2,1,1) : :
333 "r" (TLBIEL_INVAL_SET_LPID
), "r" (0));
334 asm volatile("eieio; tlbsync; ptesync" : : : "memory");
335 trace_tlbie(0, 0, TLBIEL_INVAL_SET_LPID
, 0, 2, 1, 1);
338 static void __init
radix_init_partition_table(void)
340 unsigned long rts_field
, dw0
;
342 mmu_partition_table_init();
343 rts_field
= radix__get_tree_size();
344 dw0
= rts_field
| __pa(init_mm
.pgd
) | RADIX_PGD_INDEX_SIZE
| PATB_HR
;
345 mmu_partition_table_set_entry(0, dw0
, 0);
347 pr_info("Initializing Radix MMU\n");
348 pr_info("Partition table %p\n", partition_tb
);
351 void __init
radix_init_native(void)
353 register_process_table
= native_register_process_table
;
356 static int __init
get_idx_from_shift(unsigned int shift
)
377 static int __init
radix_dt_scan_page_sizes(unsigned long node
,
378 const char *uname
, int depth
,
385 const char *type
= of_get_flat_dt_prop(node
, "device_type", NULL
);
387 /* We are scanning "cpu" nodes only */
388 if (type
== NULL
|| strcmp(type
, "cpu") != 0)
391 /* Find MMU PID size */
392 prop
= of_get_flat_dt_prop(node
, "ibm,mmu-pid-bits", &size
);
393 if (prop
&& size
== 4)
394 mmu_pid_bits
= be32_to_cpup(prop
);
396 /* Grab page size encodings */
397 prop
= of_get_flat_dt_prop(node
, "ibm,processor-radix-AP-encodings", &size
);
401 pr_info("Page sizes from device-tree:\n");
402 for (; size
>= 4; size
-= 4, ++prop
) {
404 struct mmu_psize_def
*def
;
406 /* top 3 bit is AP encoding */
407 shift
= be32_to_cpu(prop
[0]) & ~(0xe << 28);
408 ap
= be32_to_cpu(prop
[0]) >> 29;
409 pr_info("Page size shift = %d AP=0x%x\n", shift
, ap
);
411 idx
= get_idx_from_shift(shift
);
415 def
= &mmu_psize_defs
[idx
];
421 cur_cpu_spec
->mmu_features
&= ~MMU_FTR_NO_SLBIE_B
;
425 void __init
radix__early_init_devtree(void)
430 * Try to find the available page sizes in the device-tree
432 rc
= of_scan_flat_dt(radix_dt_scan_page_sizes
, NULL
);
433 if (rc
!= 0) /* Found */
436 * let's assume we have page 4k and 64k support
438 mmu_psize_defs
[MMU_PAGE_4K
].shift
= 12;
439 mmu_psize_defs
[MMU_PAGE_4K
].ap
= 0x0;
441 mmu_psize_defs
[MMU_PAGE_64K
].shift
= 16;
442 mmu_psize_defs
[MMU_PAGE_64K
].ap
= 0x5;
444 #ifdef CONFIG_SPARSEMEM_VMEMMAP
445 if (mmu_psize_defs
[MMU_PAGE_2M
].shift
) {
447 * map vmemmap using 2M if available
449 mmu_vmemmap_psize
= MMU_PAGE_2M
;
451 #endif /* CONFIG_SPARSEMEM_VMEMMAP */
455 static void update_hid_for_radix(void)
458 unsigned long rb
= 3UL << PPC_BITLSHIFT(53); /* IS = 3 */
460 asm volatile("ptesync": : :"memory");
461 /* prs = 0, ric = 2, rs = 0, r = 1 is = 3 */
462 asm volatile(PPC_TLBIE_5(%0, %4, %3, %2, %1)
463 : : "r"(rb
), "i"(1), "i"(0), "i"(2), "r"(0) : "memory");
464 /* prs = 1, ric = 2, rs = 0, r = 1 is = 3 */
465 asm volatile(PPC_TLBIE_5(%0, %4, %3, %2, %1)
466 : : "r"(rb
), "i"(1), "i"(1), "i"(2), "r"(0) : "memory");
467 asm volatile("eieio; tlbsync; ptesync; isync; slbia": : :"memory");
468 trace_tlbie(0, 0, rb
, 0, 2, 0, 1);
469 trace_tlbie(0, 0, rb
, 0, 2, 1, 1);
474 hid0
= mfspr(SPRN_HID0
);
475 hid0
|= HID0_POWER9_RADIX
;
476 mtspr(SPRN_HID0
, hid0
);
477 asm volatile("isync": : :"memory");
479 /* Wait for it to happen */
480 while (!(mfspr(SPRN_HID0
) & HID0_POWER9_RADIX
))
484 static void radix_init_amor(void)
487 * In HV mode, we init AMOR (Authority Mask Override Register) so that
488 * the hypervisor and guest can setup IAMR (Instruction Authority Mask
489 * Register), enable key 0 and set it to 1.
491 * AMOR = 0b1100 .... 0000 (Mask for key 0 is 11)
493 mtspr(SPRN_AMOR
, (3ul << 62));
496 static void radix_init_iamr(void)
501 * The IAMR should set to 0 on DD1.
503 if (cpu_has_feature(CPU_FTR_POWER9_DD1
))
509 * Radix always uses key0 of the IAMR to determine if an access is
510 * allowed. We set bit 0 (IBM bit 1) of key0, to prevent instruction
513 mtspr(SPRN_IAMR
, iamr
);
516 void __init
radix__early_init_mmu(void)
520 #ifdef CONFIG_PPC_64K_PAGES
521 /* PAGE_SIZE mappings */
522 mmu_virtual_psize
= MMU_PAGE_64K
;
524 mmu_virtual_psize
= MMU_PAGE_4K
;
527 #ifdef CONFIG_SPARSEMEM_VMEMMAP
528 /* vmemmap mapping */
529 mmu_vmemmap_psize
= mmu_virtual_psize
;
532 * initialize page table size
534 __pte_index_size
= RADIX_PTE_INDEX_SIZE
;
535 __pmd_index_size
= RADIX_PMD_INDEX_SIZE
;
536 __pud_index_size
= RADIX_PUD_INDEX_SIZE
;
537 __pgd_index_size
= RADIX_PGD_INDEX_SIZE
;
538 __pmd_cache_index
= RADIX_PMD_INDEX_SIZE
;
539 __pte_table_size
= RADIX_PTE_TABLE_SIZE
;
540 __pmd_table_size
= RADIX_PMD_TABLE_SIZE
;
541 __pud_table_size
= RADIX_PUD_TABLE_SIZE
;
542 __pgd_table_size
= RADIX_PGD_TABLE_SIZE
;
544 __pmd_val_bits
= RADIX_PMD_VAL_BITS
;
545 __pud_val_bits
= RADIX_PUD_VAL_BITS
;
546 __pgd_val_bits
= RADIX_PGD_VAL_BITS
;
548 __kernel_virt_start
= RADIX_KERN_VIRT_START
;
549 __kernel_virt_size
= RADIX_KERN_VIRT_SIZE
;
550 __vmalloc_start
= RADIX_VMALLOC_START
;
551 __vmalloc_end
= RADIX_VMALLOC_END
;
552 __kernel_io_start
= RADIX_KERN_IO_START
;
553 vmemmap
= (struct page
*)RADIX_VMEMMAP_BASE
;
554 ioremap_bot
= IOREMAP_BASE
;
557 pci_io_base
= ISA_IO_BASE
;
561 * For now radix also use the same frag size
563 __pte_frag_nr
= H_PTE_FRAG_NR
;
564 __pte_frag_size_shift
= H_PTE_FRAG_SIZE_SHIFT
;
566 if (!firmware_has_feature(FW_FEATURE_LPAR
)) {
568 if (cpu_has_feature(CPU_FTR_POWER9_DD1
))
569 update_hid_for_radix();
570 lpcr
= mfspr(SPRN_LPCR
);
571 mtspr(SPRN_LPCR
, lpcr
| LPCR_UPRT
| LPCR_HR
);
572 radix_init_partition_table();
575 radix_init_pseries();
578 memblock_set_current_limit(MEMBLOCK_ALLOC_ANYWHERE
);
581 radix_init_pgtable();
584 void radix__early_init_mmu_secondary(void)
588 * update partition table control register and UPRT
590 if (!firmware_has_feature(FW_FEATURE_LPAR
)) {
592 if (cpu_has_feature(CPU_FTR_POWER9_DD1
))
593 update_hid_for_radix();
595 lpcr
= mfspr(SPRN_LPCR
);
596 mtspr(SPRN_LPCR
, lpcr
| LPCR_UPRT
| LPCR_HR
);
599 __pa(partition_tb
) | (PATB_SIZE_SHIFT
- 12));
605 void radix__mmu_cleanup_all(void)
609 if (!firmware_has_feature(FW_FEATURE_LPAR
)) {
610 lpcr
= mfspr(SPRN_LPCR
);
611 mtspr(SPRN_LPCR
, lpcr
& ~LPCR_UPRT
);
613 powernv_set_nmmu_ptcr(0);
614 radix__flush_tlb_all();
618 void radix__setup_initial_memory_limit(phys_addr_t first_memblock_base
,
619 phys_addr_t first_memblock_size
)
621 /* We don't currently support the first MEMBLOCK not mapping 0
622 * physical on those processors
624 BUG_ON(first_memblock_base
!= 0);
626 * We limit the allocation that depend on ppc64_rma_size
627 * to first_memblock_size. We also clamp it to 1GB to
628 * avoid some funky things such as RTAS bugs.
630 * On radix config we really don't have a limitation
631 * on real mode access. But keeping it as above works
634 ppc64_rma_size
= min_t(u64
, first_memblock_size
, 0x40000000);
636 * Finally limit subsequent allocations. We really don't want
637 * to limit the memblock allocations to rma_size. FIXME!! should
638 * we even limit at all ?
640 memblock_set_current_limit(first_memblock_base
+ first_memblock_size
);
643 #ifdef CONFIG_MEMORY_HOTPLUG
644 static void free_pte_table(pte_t
*pte_start
, pmd_t
*pmd
)
649 for (i
= 0; i
< PTRS_PER_PTE
; i
++) {
655 pte_free_kernel(&init_mm
, pte_start
);
659 static void free_pmd_table(pmd_t
*pmd_start
, pud_t
*pud
)
664 for (i
= 0; i
< PTRS_PER_PMD
; i
++) {
670 pmd_free(&init_mm
, pmd_start
);
674 static void remove_pte_table(pte_t
*pte_start
, unsigned long addr
,
680 pte
= pte_start
+ pte_index(addr
);
681 for (; addr
< end
; addr
= next
, pte
++) {
682 next
= (addr
+ PAGE_SIZE
) & PAGE_MASK
;
686 if (!pte_present(*pte
))
689 if (!PAGE_ALIGNED(addr
) || !PAGE_ALIGNED(next
)) {
691 * The vmemmap_free() and remove_section_mapping()
692 * codepaths call us with aligned addresses.
694 WARN_ONCE(1, "%s: unaligned range\n", __func__
);
698 pte_clear(&init_mm
, addr
, pte
);
702 static void remove_pmd_table(pmd_t
*pmd_start
, unsigned long addr
,
709 pmd
= pmd_start
+ pmd_index(addr
);
710 for (; addr
< end
; addr
= next
, pmd
++) {
711 next
= pmd_addr_end(addr
, end
);
713 if (!pmd_present(*pmd
))
716 if (pmd_huge(*pmd
)) {
717 if (!IS_ALIGNED(addr
, PMD_SIZE
) ||
718 !IS_ALIGNED(next
, PMD_SIZE
)) {
719 WARN_ONCE(1, "%s: unaligned range\n", __func__
);
723 pte_clear(&init_mm
, addr
, (pte_t
*)pmd
);
727 pte_base
= (pte_t
*)pmd_page_vaddr(*pmd
);
728 remove_pte_table(pte_base
, addr
, next
);
729 free_pte_table(pte_base
, pmd
);
733 static void remove_pud_table(pud_t
*pud_start
, unsigned long addr
,
740 pud
= pud_start
+ pud_index(addr
);
741 for (; addr
< end
; addr
= next
, pud
++) {
742 next
= pud_addr_end(addr
, end
);
744 if (!pud_present(*pud
))
747 if (pud_huge(*pud
)) {
748 if (!IS_ALIGNED(addr
, PUD_SIZE
) ||
749 !IS_ALIGNED(next
, PUD_SIZE
)) {
750 WARN_ONCE(1, "%s: unaligned range\n", __func__
);
754 pte_clear(&init_mm
, addr
, (pte_t
*)pud
);
758 pmd_base
= (pmd_t
*)pud_page_vaddr(*pud
);
759 remove_pmd_table(pmd_base
, addr
, next
);
760 free_pmd_table(pmd_base
, pud
);
764 static void remove_pagetable(unsigned long start
, unsigned long end
)
766 unsigned long addr
, next
;
770 spin_lock(&init_mm
.page_table_lock
);
772 for (addr
= start
; addr
< end
; addr
= next
) {
773 next
= pgd_addr_end(addr
, end
);
775 pgd
= pgd_offset_k(addr
);
776 if (!pgd_present(*pgd
))
779 if (pgd_huge(*pgd
)) {
780 if (!IS_ALIGNED(addr
, PGDIR_SIZE
) ||
781 !IS_ALIGNED(next
, PGDIR_SIZE
)) {
782 WARN_ONCE(1, "%s: unaligned range\n", __func__
);
786 pte_clear(&init_mm
, addr
, (pte_t
*)pgd
);
790 pud_base
= (pud_t
*)pgd_page_vaddr(*pgd
);
791 remove_pud_table(pud_base
, addr
, next
);
794 spin_unlock(&init_mm
.page_table_lock
);
795 radix__flush_tlb_kernel_range(start
, end
);
798 int __ref
radix__create_section_mapping(unsigned long start
, unsigned long end
)
800 return create_physical_mapping(start
, end
);
803 int radix__remove_section_mapping(unsigned long start
, unsigned long end
)
805 remove_pagetable(start
, end
);
808 #endif /* CONFIG_MEMORY_HOTPLUG */
810 #ifdef CONFIG_SPARSEMEM_VMEMMAP
811 int __meminit
radix__vmemmap_create_mapping(unsigned long start
,
812 unsigned long page_size
,
815 /* Create a PTE encoding */
816 unsigned long flags
= _PAGE_PRESENT
| _PAGE_ACCESSED
| _PAGE_KERNEL_RW
;
818 BUG_ON(radix__map_kernel_page(start
, phys
, __pgprot(flags
), page_size
));
822 #ifdef CONFIG_MEMORY_HOTPLUG
823 void radix__vmemmap_remove_mapping(unsigned long start
, unsigned long page_size
)
825 remove_pagetable(start
, start
+ page_size
);
830 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
832 unsigned long radix__pmd_hugepage_update(struct mm_struct
*mm
, unsigned long addr
,
833 pmd_t
*pmdp
, unsigned long clr
,
838 #ifdef CONFIG_DEBUG_VM
839 WARN_ON(!radix__pmd_trans_huge(*pmdp
) && !pmd_devmap(*pmdp
));
840 assert_spin_locked(&mm
->page_table_lock
);
843 old
= radix__pte_update(mm
, addr
, (pte_t
*)pmdp
, clr
, set
, 1);
844 trace_hugepage_update(addr
, old
, clr
, set
);
849 pmd_t
radix__pmdp_collapse_flush(struct vm_area_struct
*vma
, unsigned long address
,
855 VM_BUG_ON(address
& ~HPAGE_PMD_MASK
);
856 VM_BUG_ON(radix__pmd_trans_huge(*pmdp
));
857 VM_BUG_ON(pmd_devmap(*pmdp
));
859 * khugepaged calls this for normal pmd
864 /*FIXME!! Verify whether we need this kick below */
865 serialize_against_pte_lookup(vma
->vm_mm
);
867 radix__flush_tlb_collapsed_pmd(vma
->vm_mm
, address
);
873 * For us pgtable_t is pte_t *. Inorder to save the deposisted
874 * page table, we consider the allocated page table as a list
875 * head. On withdraw we need to make sure we zero out the used
876 * list_head memory area.
878 void radix__pgtable_trans_huge_deposit(struct mm_struct
*mm
, pmd_t
*pmdp
,
881 struct list_head
*lh
= (struct list_head
*) pgtable
;
883 assert_spin_locked(pmd_lockptr(mm
, pmdp
));
886 if (!pmd_huge_pte(mm
, pmdp
))
889 list_add(lh
, (struct list_head
*) pmd_huge_pte(mm
, pmdp
));
890 pmd_huge_pte(mm
, pmdp
) = pgtable
;
893 pgtable_t
radix__pgtable_trans_huge_withdraw(struct mm_struct
*mm
, pmd_t
*pmdp
)
897 struct list_head
*lh
;
899 assert_spin_locked(pmd_lockptr(mm
, pmdp
));
902 pgtable
= pmd_huge_pte(mm
, pmdp
);
903 lh
= (struct list_head
*) pgtable
;
905 pmd_huge_pte(mm
, pmdp
) = NULL
;
907 pmd_huge_pte(mm
, pmdp
) = (pgtable_t
) lh
->next
;
910 ptep
= (pte_t
*) pgtable
;
918 pmd_t
radix__pmdp_huge_get_and_clear(struct mm_struct
*mm
,
919 unsigned long addr
, pmd_t
*pmdp
)
924 old
= radix__pmd_hugepage_update(mm
, addr
, pmdp
, ~0UL, 0);
925 old_pmd
= __pmd(old
);
927 * Serialize against find_current_mm_pte which does lock-less
928 * lookup in page tables with local interrupts disabled. For huge pages
929 * it casts pmd_t to pte_t. Since format of pte_t is different from
930 * pmd_t we want to prevent transit from pmd pointing to page table
931 * to pmd pointing to huge page (and back) while interrupts are disabled.
932 * We clear pmd to possibly replace it with page table pointer in
933 * different code paths. So make sure we wait for the parallel
934 * find_current_mm_pte to finish.
936 serialize_against_pte_lookup(mm
);
940 int radix__has_transparent_hugepage(void)
942 /* For radix 2M at PMD level means thp */
943 if (mmu_psize_defs
[MMU_PAGE_2M
].shift
== PMD_SHIFT
)
947 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */