2 * Page table handling routines for radix page table.
4 * Copyright 2015-2016, Aneesh Kumar K.V, IBM Corporation.
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
12 #define pr_fmt(fmt) "radix-mmu: " fmt
14 #include <linux/kernel.h>
15 #include <linux/sched/mm.h>
16 #include <linux/memblock.h>
17 #include <linux/of_fdt.h>
19 #include <linux/string_helpers.h>
20 #include <linux/stop_machine.h>
22 #include <asm/pgtable.h>
23 #include <asm/pgalloc.h>
24 #include <asm/mmu_context.h>
26 #include <asm/machdep.h>
28 #include <asm/firmware.h>
29 #include <asm/powernv.h>
30 #include <asm/sections.h>
31 #include <asm/trace.h>
33 #include <trace/events/thp.h>
35 unsigned int mmu_pid_bits
;
36 unsigned int mmu_base_pid
;
38 static int native_register_process_table(unsigned long base
, unsigned long pg_sz
,
39 unsigned long table_size
)
41 unsigned long patb0
, patb1
;
43 patb0
= be64_to_cpu(partition_tb
[0].patb0
);
44 patb1
= base
| table_size
| PATB_GR
;
46 mmu_partition_table_set_entry(0, patb0
, patb1
);
51 static __ref
void *early_alloc_pgtable(unsigned long size
, int nid
,
52 unsigned long region_start
, unsigned long region_end
)
57 if (region_start
|| region_end
) /* has region hint */
58 pa
= memblock_alloc_range(size
, size
, region_start
, region_end
,
60 else if (nid
!= -1) /* has node hint */
61 pa
= memblock_alloc_base_nid(size
, size
,
62 MEMBLOCK_ALLOC_ANYWHERE
,
66 pa
= memblock_alloc_base(size
, size
, MEMBLOCK_ALLOC_ANYWHERE
);
76 static int early_map_kernel_page(unsigned long ea
, unsigned long pa
,
78 unsigned int map_page_size
,
80 unsigned long region_start
, unsigned long region_end
)
82 unsigned long pfn
= pa
>> PAGE_SHIFT
;
88 pgdp
= pgd_offset_k(ea
);
89 if (pgd_none(*pgdp
)) {
90 pudp
= early_alloc_pgtable(PUD_TABLE_SIZE
, nid
,
91 region_start
, region_end
);
92 pgd_populate(&init_mm
, pgdp
, pudp
);
94 pudp
= pud_offset(pgdp
, ea
);
95 if (map_page_size
== PUD_SIZE
) {
99 if (pud_none(*pudp
)) {
100 pmdp
= early_alloc_pgtable(PMD_TABLE_SIZE
, nid
,
101 region_start
, region_end
);
102 pud_populate(&init_mm
, pudp
, pmdp
);
104 pmdp
= pmd_offset(pudp
, ea
);
105 if (map_page_size
== PMD_SIZE
) {
106 ptep
= pmdp_ptep(pmdp
);
109 if (!pmd_present(*pmdp
)) {
110 ptep
= early_alloc_pgtable(PAGE_SIZE
, nid
,
111 region_start
, region_end
);
112 pmd_populate_kernel(&init_mm
, pmdp
, ptep
);
114 ptep
= pte_offset_kernel(pmdp
, ea
);
117 set_pte_at(&init_mm
, ea
, ptep
, pfn_pte(pfn
, flags
));
123 * nid, region_start, and region_end are hints to try to place the page
124 * table memory in the same node or region.
126 static int __map_kernel_page(unsigned long ea
, unsigned long pa
,
128 unsigned int map_page_size
,
130 unsigned long region_start
, unsigned long region_end
)
132 unsigned long pfn
= pa
>> PAGE_SHIFT
;
138 * Make sure task size is correct as per the max adddr
140 BUILD_BUG_ON(TASK_SIZE_USER64
> RADIX_PGTABLE_RANGE
);
142 if (unlikely(!slab_is_available()))
143 return early_map_kernel_page(ea
, pa
, flags
, map_page_size
,
144 nid
, region_start
, region_end
);
147 * Should make page table allocation functions be able to take a
148 * node, so we can place kernel page tables on the right nodes after
151 pgdp
= pgd_offset_k(ea
);
152 pudp
= pud_alloc(&init_mm
, pgdp
, ea
);
155 if (map_page_size
== PUD_SIZE
) {
156 ptep
= (pte_t
*)pudp
;
159 pmdp
= pmd_alloc(&init_mm
, pudp
, ea
);
162 if (map_page_size
== PMD_SIZE
) {
163 ptep
= pmdp_ptep(pmdp
);
166 ptep
= pte_alloc_kernel(pmdp
, ea
);
171 set_pte_at(&init_mm
, ea
, ptep
, pfn_pte(pfn
, flags
));
176 int radix__map_kernel_page(unsigned long ea
, unsigned long pa
,
178 unsigned int map_page_size
)
180 return __map_kernel_page(ea
, pa
, flags
, map_page_size
, -1, 0, 0);
183 #ifdef CONFIG_STRICT_KERNEL_RWX
184 void radix__change_memory_range(unsigned long start
, unsigned long end
,
193 start
= ALIGN_DOWN(start
, PAGE_SIZE
);
194 end
= PAGE_ALIGN(end
); // aligns up
196 pr_debug("Changing flags on range %lx-%lx removing 0x%lx\n",
199 for (idx
= start
; idx
< end
; idx
+= PAGE_SIZE
) {
200 pgdp
= pgd_offset_k(idx
);
201 pudp
= pud_alloc(&init_mm
, pgdp
, idx
);
204 if (pud_huge(*pudp
)) {
205 ptep
= (pte_t
*)pudp
;
208 pmdp
= pmd_alloc(&init_mm
, pudp
, idx
);
211 if (pmd_huge(*pmdp
)) {
212 ptep
= pmdp_ptep(pmdp
);
215 ptep
= pte_alloc_kernel(pmdp
, idx
);
219 radix__pte_update(&init_mm
, idx
, ptep
, clear
, 0, 0);
222 radix__flush_tlb_kernel_range(start
, end
);
225 void radix__mark_rodata_ro(void)
227 unsigned long start
, end
;
229 start
= (unsigned long)_stext
;
230 end
= (unsigned long)__init_begin
;
232 radix__change_memory_range(start
, end
, _PAGE_WRITE
);
235 void radix__mark_initmem_nx(void)
237 unsigned long start
= (unsigned long)__init_begin
;
238 unsigned long end
= (unsigned long)__init_end
;
240 radix__change_memory_range(start
, end
, _PAGE_EXEC
);
242 #endif /* CONFIG_STRICT_KERNEL_RWX */
244 static inline void __meminit
245 print_mapping(unsigned long start
, unsigned long end
, unsigned long size
, bool exec
)
252 string_get_size(size
, 1, STRING_UNITS_2
, buf
, sizeof(buf
));
254 pr_info("Mapped 0x%016lx-0x%016lx with %s pages%s\n", start
, end
, buf
,
255 exec
? " (exec)" : "");
258 static unsigned long next_boundary(unsigned long addr
, unsigned long end
)
260 #ifdef CONFIG_STRICT_KERNEL_RWX
261 if (addr
< __pa_symbol(__init_begin
))
262 return __pa_symbol(__init_begin
);
267 static int __meminit
create_physical_mapping(unsigned long start
,
271 unsigned long vaddr
, addr
, mapping_size
= 0;
272 bool prev_exec
, exec
= false;
276 start
= _ALIGN_UP(start
, PAGE_SIZE
);
277 for (addr
= start
; addr
< end
; addr
+= mapping_size
) {
278 unsigned long gap
, previous_size
;
281 gap
= next_boundary(addr
, end
) - addr
;
282 previous_size
= mapping_size
;
285 if (IS_ALIGNED(addr
, PUD_SIZE
) && gap
>= PUD_SIZE
&&
286 mmu_psize_defs
[MMU_PAGE_1G
].shift
) {
287 mapping_size
= PUD_SIZE
;
289 } else if (IS_ALIGNED(addr
, PMD_SIZE
) && gap
>= PMD_SIZE
&&
290 mmu_psize_defs
[MMU_PAGE_2M
].shift
) {
291 mapping_size
= PMD_SIZE
;
294 mapping_size
= PAGE_SIZE
;
295 psize
= mmu_virtual_psize
;
298 vaddr
= (unsigned long)__va(addr
);
300 if (overlaps_kernel_text(vaddr
, vaddr
+ mapping_size
) ||
301 overlaps_interrupt_vector_text(vaddr
, vaddr
+ mapping_size
)) {
302 prot
= PAGE_KERNEL_X
;
309 if (mapping_size
!= previous_size
|| exec
!= prev_exec
) {
310 print_mapping(start
, addr
, previous_size
, prev_exec
);
314 rc
= __map_kernel_page(vaddr
, addr
, prot
, mapping_size
, nid
, start
, end
);
318 update_page_count(psize
, 1);
321 print_mapping(start
, addr
, mapping_size
, exec
);
325 void __init
radix_init_pgtable(void)
327 unsigned long rts_field
;
328 struct memblock_region
*reg
;
330 /* We don't support slb for radix */
333 * Create the linear mapping, using standard page size for now
335 for_each_memblock(memory
, reg
) {
337 * The memblock allocator is up at this point, so the
338 * page tables will be allocated within the range. No
339 * need or a node (which we don't have yet).
341 WARN_ON(create_physical_mapping(reg
->base
,
342 reg
->base
+ reg
->size
,
346 /* Find out how many PID bits are supported */
347 if (cpu_has_feature(CPU_FTR_HVMODE
)) {
350 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
352 * When KVM is possible, we only use the top half of the
353 * PID space to avoid collisions between host and guest PIDs
354 * which can cause problems due to prefetch when exiting the
357 mmu_base_pid
= 1 << (mmu_pid_bits
- 1);
362 /* The guest uses the bottom half of the PID space */
369 * Allocate Partition table and process table for the
372 BUG_ON(PRTB_SIZE_SHIFT
> 36);
373 process_tb
= early_alloc_pgtable(1UL << PRTB_SIZE_SHIFT
, -1, 0, 0);
375 * Fill in the process table.
377 rts_field
= radix__get_tree_size();
378 process_tb
->prtb0
= cpu_to_be64(rts_field
| __pa(init_mm
.pgd
) | RADIX_PGD_INDEX_SIZE
);
380 * Fill in the partition table. We are suppose to use effective address
381 * of process table here. But our linear mapping also enable us to use
382 * physical address here.
384 register_process_table(__pa(process_tb
), 0, PRTB_SIZE_SHIFT
- 12);
385 pr_info("Process table %p and radix root for kernel: %p\n", process_tb
, init_mm
.pgd
);
386 asm volatile("ptesync" : : : "memory");
387 asm volatile(PPC_TLBIE_5(%0,%1,2,1,1) : :
388 "r" (TLBIEL_INVAL_SET_LPID
), "r" (0));
389 asm volatile("eieio; tlbsync; ptesync" : : : "memory");
390 trace_tlbie(0, 0, TLBIEL_INVAL_SET_LPID
, 0, 2, 1, 1);
393 * The init_mm context is given the first available (non-zero) PID,
394 * which is the "guard PID" and contains no page table. PIDR should
395 * never be set to zero because that duplicates the kernel address
396 * space at the 0x0... offset (quadrant 0)!
398 * An arbitrary PID that may later be allocated by the PID allocator
399 * for userspace processes must not be used either, because that
400 * would cause stale user mappings for that PID on CPUs outside of
401 * the TLB invalidation scheme (because it won't be in mm_cpumask).
403 * So permanently carve out one PID for the purpose of a guard PID.
405 init_mm
.context
.id
= mmu_base_pid
;
409 static void __init
radix_init_partition_table(void)
411 unsigned long rts_field
, dw0
;
413 mmu_partition_table_init();
414 rts_field
= radix__get_tree_size();
415 dw0
= rts_field
| __pa(init_mm
.pgd
) | RADIX_PGD_INDEX_SIZE
| PATB_HR
;
416 mmu_partition_table_set_entry(0, dw0
, 0);
418 pr_info("Initializing Radix MMU\n");
419 pr_info("Partition table %p\n", partition_tb
);
422 void __init
radix_init_native(void)
424 register_process_table
= native_register_process_table
;
427 static int __init
get_idx_from_shift(unsigned int shift
)
448 static int __init
radix_dt_scan_page_sizes(unsigned long node
,
449 const char *uname
, int depth
,
456 const char *type
= of_get_flat_dt_prop(node
, "device_type", NULL
);
458 /* We are scanning "cpu" nodes only */
459 if (type
== NULL
|| strcmp(type
, "cpu") != 0)
462 /* Find MMU PID size */
463 prop
= of_get_flat_dt_prop(node
, "ibm,mmu-pid-bits", &size
);
464 if (prop
&& size
== 4)
465 mmu_pid_bits
= be32_to_cpup(prop
);
467 /* Grab page size encodings */
468 prop
= of_get_flat_dt_prop(node
, "ibm,processor-radix-AP-encodings", &size
);
472 pr_info("Page sizes from device-tree:\n");
473 for (; size
>= 4; size
-= 4, ++prop
) {
475 struct mmu_psize_def
*def
;
477 /* top 3 bit is AP encoding */
478 shift
= be32_to_cpu(prop
[0]) & ~(0xe << 28);
479 ap
= be32_to_cpu(prop
[0]) >> 29;
480 pr_info("Page size shift = %d AP=0x%x\n", shift
, ap
);
482 idx
= get_idx_from_shift(shift
);
486 def
= &mmu_psize_defs
[idx
];
492 cur_cpu_spec
->mmu_features
&= ~MMU_FTR_NO_SLBIE_B
;
496 void __init
radix__early_init_devtree(void)
501 * Try to find the available page sizes in the device-tree
503 rc
= of_scan_flat_dt(radix_dt_scan_page_sizes
, NULL
);
504 if (rc
!= 0) /* Found */
507 * let's assume we have page 4k and 64k support
509 mmu_psize_defs
[MMU_PAGE_4K
].shift
= 12;
510 mmu_psize_defs
[MMU_PAGE_4K
].ap
= 0x0;
512 mmu_psize_defs
[MMU_PAGE_64K
].shift
= 16;
513 mmu_psize_defs
[MMU_PAGE_64K
].ap
= 0x5;
515 #ifdef CONFIG_SPARSEMEM_VMEMMAP
516 if (mmu_psize_defs
[MMU_PAGE_2M
].shift
) {
518 * map vmemmap using 2M if available
520 mmu_vmemmap_psize
= MMU_PAGE_2M
;
522 #endif /* CONFIG_SPARSEMEM_VMEMMAP */
526 static void radix_init_amor(void)
529 * In HV mode, we init AMOR (Authority Mask Override Register) so that
530 * the hypervisor and guest can setup IAMR (Instruction Authority Mask
531 * Register), enable key 0 and set it to 1.
533 * AMOR = 0b1100 .... 0000 (Mask for key 0 is 11)
535 mtspr(SPRN_AMOR
, (3ul << 62));
538 static void radix_init_iamr(void)
541 * Radix always uses key0 of the IAMR to determine if an access is
542 * allowed. We set bit 0 (IBM bit 1) of key0, to prevent instruction
545 mtspr(SPRN_IAMR
, (1ul << 62));
548 void __init
radix__early_init_mmu(void)
552 #ifdef CONFIG_PPC_64K_PAGES
553 /* PAGE_SIZE mappings */
554 mmu_virtual_psize
= MMU_PAGE_64K
;
556 mmu_virtual_psize
= MMU_PAGE_4K
;
559 #ifdef CONFIG_SPARSEMEM_VMEMMAP
560 /* vmemmap mapping */
561 mmu_vmemmap_psize
= mmu_virtual_psize
;
564 * initialize page table size
566 __pte_index_size
= RADIX_PTE_INDEX_SIZE
;
567 __pmd_index_size
= RADIX_PMD_INDEX_SIZE
;
568 __pud_index_size
= RADIX_PUD_INDEX_SIZE
;
569 __pgd_index_size
= RADIX_PGD_INDEX_SIZE
;
570 __pud_cache_index
= RADIX_PUD_INDEX_SIZE
;
571 __pte_table_size
= RADIX_PTE_TABLE_SIZE
;
572 __pmd_table_size
= RADIX_PMD_TABLE_SIZE
;
573 __pud_table_size
= RADIX_PUD_TABLE_SIZE
;
574 __pgd_table_size
= RADIX_PGD_TABLE_SIZE
;
576 __pmd_val_bits
= RADIX_PMD_VAL_BITS
;
577 __pud_val_bits
= RADIX_PUD_VAL_BITS
;
578 __pgd_val_bits
= RADIX_PGD_VAL_BITS
;
580 __kernel_virt_start
= RADIX_KERN_VIRT_START
;
581 __kernel_virt_size
= RADIX_KERN_VIRT_SIZE
;
582 __vmalloc_start
= RADIX_VMALLOC_START
;
583 __vmalloc_end
= RADIX_VMALLOC_END
;
584 __kernel_io_start
= RADIX_KERN_IO_START
;
585 vmemmap
= (struct page
*)RADIX_VMEMMAP_BASE
;
586 ioremap_bot
= IOREMAP_BASE
;
589 pci_io_base
= ISA_IO_BASE
;
591 __pte_frag_nr
= RADIX_PTE_FRAG_NR
;
592 __pte_frag_size_shift
= RADIX_PTE_FRAG_SIZE_SHIFT
;
593 __pmd_frag_nr
= RADIX_PMD_FRAG_NR
;
594 __pmd_frag_size_shift
= RADIX_PMD_FRAG_SIZE_SHIFT
;
596 if (!firmware_has_feature(FW_FEATURE_LPAR
)) {
598 lpcr
= mfspr(SPRN_LPCR
);
599 mtspr(SPRN_LPCR
, lpcr
| LPCR_UPRT
| LPCR_HR
);
600 radix_init_partition_table();
603 radix_init_pseries();
606 memblock_set_current_limit(MEMBLOCK_ALLOC_ANYWHERE
);
609 radix_init_pgtable();
610 /* Switch to the guard PID before turning on MMU */
611 radix__switch_mmu_context(NULL
, &init_mm
);
612 if (cpu_has_feature(CPU_FTR_HVMODE
))
616 void radix__early_init_mmu_secondary(void)
620 * update partition table control register and UPRT
622 if (!firmware_has_feature(FW_FEATURE_LPAR
)) {
623 lpcr
= mfspr(SPRN_LPCR
);
624 mtspr(SPRN_LPCR
, lpcr
| LPCR_UPRT
| LPCR_HR
);
627 __pa(partition_tb
) | (PATB_SIZE_SHIFT
- 12));
632 radix__switch_mmu_context(NULL
, &init_mm
);
633 if (cpu_has_feature(CPU_FTR_HVMODE
))
637 void radix__mmu_cleanup_all(void)
641 if (!firmware_has_feature(FW_FEATURE_LPAR
)) {
642 lpcr
= mfspr(SPRN_LPCR
);
643 mtspr(SPRN_LPCR
, lpcr
& ~LPCR_UPRT
);
645 powernv_set_nmmu_ptcr(0);
646 radix__flush_tlb_all();
650 void radix__setup_initial_memory_limit(phys_addr_t first_memblock_base
,
651 phys_addr_t first_memblock_size
)
653 /* We don't currently support the first MEMBLOCK not mapping 0
654 * physical on those processors
656 BUG_ON(first_memblock_base
!= 0);
659 * Radix mode is not limited by RMA / VRMA addressing.
661 ppc64_rma_size
= ULONG_MAX
;
664 #ifdef CONFIG_MEMORY_HOTPLUG
665 static void free_pte_table(pte_t
*pte_start
, pmd_t
*pmd
)
670 for (i
= 0; i
< PTRS_PER_PTE
; i
++) {
676 pte_free_kernel(&init_mm
, pte_start
);
680 static void free_pmd_table(pmd_t
*pmd_start
, pud_t
*pud
)
685 for (i
= 0; i
< PTRS_PER_PMD
; i
++) {
691 pmd_free(&init_mm
, pmd_start
);
695 struct change_mapping_params
{
699 unsigned long aligned_start
;
700 unsigned long aligned_end
;
703 static int __meminit
stop_machine_change_mapping(void *data
)
705 struct change_mapping_params
*params
=
706 (struct change_mapping_params
*)data
;
711 spin_unlock(&init_mm
.page_table_lock
);
712 pte_clear(&init_mm
, params
->aligned_start
, params
->pte
);
713 create_physical_mapping(params
->aligned_start
, params
->start
, -1);
714 create_physical_mapping(params
->end
, params
->aligned_end
, -1);
715 spin_lock(&init_mm
.page_table_lock
);
719 static void remove_pte_table(pte_t
*pte_start
, unsigned long addr
,
725 pte
= pte_start
+ pte_index(addr
);
726 for (; addr
< end
; addr
= next
, pte
++) {
727 next
= (addr
+ PAGE_SIZE
) & PAGE_MASK
;
731 if (!pte_present(*pte
))
734 if (!PAGE_ALIGNED(addr
) || !PAGE_ALIGNED(next
)) {
736 * The vmemmap_free() and remove_section_mapping()
737 * codepaths call us with aligned addresses.
739 WARN_ONCE(1, "%s: unaligned range\n", __func__
);
743 pte_clear(&init_mm
, addr
, pte
);
748 * clear the pte and potentially split the mapping helper
750 static void __meminit
split_kernel_mapping(unsigned long addr
, unsigned long end
,
751 unsigned long size
, pte_t
*pte
)
753 unsigned long mask
= ~(size
- 1);
754 unsigned long aligned_start
= addr
& mask
;
755 unsigned long aligned_end
= addr
+ size
;
756 struct change_mapping_params params
;
757 bool split_region
= false;
759 if ((end
- addr
) < size
) {
761 * We're going to clear the PTE, but not flushed
762 * the mapping, time to remap and flush. The
763 * effects if visible outside the processor or
764 * if we are running in code close to the
765 * mapping we cleared, we are in trouble.
767 if (overlaps_kernel_text(aligned_start
, addr
) ||
768 overlaps_kernel_text(end
, aligned_end
)) {
770 * Hack, just return, don't pte_clear
772 WARN_ONCE(1, "Linear mapping %lx->%lx overlaps kernel "
773 "text, not splitting\n", addr
, end
);
783 params
.aligned_start
= addr
& ~(size
- 1);
784 params
.aligned_end
= min_t(unsigned long, aligned_end
,
785 (unsigned long)__va(memblock_end_of_DRAM()));
786 stop_machine(stop_machine_change_mapping
, ¶ms
, NULL
);
790 pte_clear(&init_mm
, addr
, pte
);
793 static void remove_pmd_table(pmd_t
*pmd_start
, unsigned long addr
,
800 pmd
= pmd_start
+ pmd_index(addr
);
801 for (; addr
< end
; addr
= next
, pmd
++) {
802 next
= pmd_addr_end(addr
, end
);
804 if (!pmd_present(*pmd
))
807 if (pmd_huge(*pmd
)) {
808 split_kernel_mapping(addr
, end
, PMD_SIZE
, (pte_t
*)pmd
);
812 pte_base
= (pte_t
*)pmd_page_vaddr(*pmd
);
813 remove_pte_table(pte_base
, addr
, next
);
814 free_pte_table(pte_base
, pmd
);
818 static void remove_pud_table(pud_t
*pud_start
, unsigned long addr
,
825 pud
= pud_start
+ pud_index(addr
);
826 for (; addr
< end
; addr
= next
, pud
++) {
827 next
= pud_addr_end(addr
, end
);
829 if (!pud_present(*pud
))
832 if (pud_huge(*pud
)) {
833 split_kernel_mapping(addr
, end
, PUD_SIZE
, (pte_t
*)pud
);
837 pmd_base
= (pmd_t
*)pud_page_vaddr(*pud
);
838 remove_pmd_table(pmd_base
, addr
, next
);
839 free_pmd_table(pmd_base
, pud
);
843 static void __meminit
remove_pagetable(unsigned long start
, unsigned long end
)
845 unsigned long addr
, next
;
849 spin_lock(&init_mm
.page_table_lock
);
851 for (addr
= start
; addr
< end
; addr
= next
) {
852 next
= pgd_addr_end(addr
, end
);
854 pgd
= pgd_offset_k(addr
);
855 if (!pgd_present(*pgd
))
858 if (pgd_huge(*pgd
)) {
859 split_kernel_mapping(addr
, end
, PGDIR_SIZE
, (pte_t
*)pgd
);
863 pud_base
= (pud_t
*)pgd_page_vaddr(*pgd
);
864 remove_pud_table(pud_base
, addr
, next
);
867 spin_unlock(&init_mm
.page_table_lock
);
868 radix__flush_tlb_kernel_range(start
, end
);
871 int __meminit
radix__create_section_mapping(unsigned long start
, unsigned long end
, int nid
)
873 return create_physical_mapping(start
, end
, nid
);
876 int __meminit
radix__remove_section_mapping(unsigned long start
, unsigned long end
)
878 remove_pagetable(start
, end
);
881 #endif /* CONFIG_MEMORY_HOTPLUG */
883 #ifdef CONFIG_SPARSEMEM_VMEMMAP
884 static int __map_kernel_page_nid(unsigned long ea
, unsigned long pa
,
885 pgprot_t flags
, unsigned int map_page_size
,
888 return __map_kernel_page(ea
, pa
, flags
, map_page_size
, nid
, 0, 0);
891 int __meminit
radix__vmemmap_create_mapping(unsigned long start
,
892 unsigned long page_size
,
895 /* Create a PTE encoding */
896 unsigned long flags
= _PAGE_PRESENT
| _PAGE_ACCESSED
| _PAGE_KERNEL_RW
;
897 int nid
= early_pfn_to_nid(phys
>> PAGE_SHIFT
);
900 ret
= __map_kernel_page_nid(start
, phys
, __pgprot(flags
), page_size
, nid
);
906 #ifdef CONFIG_MEMORY_HOTPLUG
907 void __meminit
radix__vmemmap_remove_mapping(unsigned long start
, unsigned long page_size
)
909 remove_pagetable(start
, start
+ page_size
);
914 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
916 unsigned long radix__pmd_hugepage_update(struct mm_struct
*mm
, unsigned long addr
,
917 pmd_t
*pmdp
, unsigned long clr
,
922 #ifdef CONFIG_DEBUG_VM
923 WARN_ON(!radix__pmd_trans_huge(*pmdp
) && !pmd_devmap(*pmdp
));
924 assert_spin_locked(pmd_lockptr(mm
, pmdp
));
927 old
= radix__pte_update(mm
, addr
, (pte_t
*)pmdp
, clr
, set
, 1);
928 trace_hugepage_update(addr
, old
, clr
, set
);
933 pmd_t
radix__pmdp_collapse_flush(struct vm_area_struct
*vma
, unsigned long address
,
939 VM_BUG_ON(address
& ~HPAGE_PMD_MASK
);
940 VM_BUG_ON(radix__pmd_trans_huge(*pmdp
));
941 VM_BUG_ON(pmd_devmap(*pmdp
));
943 * khugepaged calls this for normal pmd
948 /*FIXME!! Verify whether we need this kick below */
949 serialize_against_pte_lookup(vma
->vm_mm
);
951 radix__flush_tlb_collapsed_pmd(vma
->vm_mm
, address
);
957 * For us pgtable_t is pte_t *. Inorder to save the deposisted
958 * page table, we consider the allocated page table as a list
959 * head. On withdraw we need to make sure we zero out the used
960 * list_head memory area.
962 void radix__pgtable_trans_huge_deposit(struct mm_struct
*mm
, pmd_t
*pmdp
,
965 struct list_head
*lh
= (struct list_head
*) pgtable
;
967 assert_spin_locked(pmd_lockptr(mm
, pmdp
));
970 if (!pmd_huge_pte(mm
, pmdp
))
973 list_add(lh
, (struct list_head
*) pmd_huge_pte(mm
, pmdp
));
974 pmd_huge_pte(mm
, pmdp
) = pgtable
;
977 pgtable_t
radix__pgtable_trans_huge_withdraw(struct mm_struct
*mm
, pmd_t
*pmdp
)
981 struct list_head
*lh
;
983 assert_spin_locked(pmd_lockptr(mm
, pmdp
));
986 pgtable
= pmd_huge_pte(mm
, pmdp
);
987 lh
= (struct list_head
*) pgtable
;
989 pmd_huge_pte(mm
, pmdp
) = NULL
;
991 pmd_huge_pte(mm
, pmdp
) = (pgtable_t
) lh
->next
;
994 ptep
= (pte_t
*) pgtable
;
1002 pmd_t
radix__pmdp_huge_get_and_clear(struct mm_struct
*mm
,
1003 unsigned long addr
, pmd_t
*pmdp
)
1008 old
= radix__pmd_hugepage_update(mm
, addr
, pmdp
, ~0UL, 0);
1009 old_pmd
= __pmd(old
);
1011 * Serialize against find_current_mm_pte which does lock-less
1012 * lookup in page tables with local interrupts disabled. For huge pages
1013 * it casts pmd_t to pte_t. Since format of pte_t is different from
1014 * pmd_t we want to prevent transit from pmd pointing to page table
1015 * to pmd pointing to huge page (and back) while interrupts are disabled.
1016 * We clear pmd to possibly replace it with page table pointer in
1017 * different code paths. So make sure we wait for the parallel
1018 * find_current_mm_pte to finish.
1020 serialize_against_pte_lookup(mm
);
1024 int radix__has_transparent_hugepage(void)
1026 /* For radix 2M at PMD level means thp */
1027 if (mmu_psize_defs
[MMU_PAGE_2M
].shift
== PMD_SHIFT
)
1031 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
1033 void radix__ptep_set_access_flags(struct vm_area_struct
*vma
, pte_t
*ptep
,
1034 pte_t entry
, unsigned long address
, int psize
)
1036 struct mm_struct
*mm
= vma
->vm_mm
;
1037 unsigned long set
= pte_val(entry
) & (_PAGE_DIRTY
| _PAGE_ACCESSED
|
1038 _PAGE_RW
| _PAGE_EXEC
);
1040 unsigned long change
= pte_val(entry
) ^ pte_val(*ptep
);
1042 * To avoid NMMU hang while relaxing access, we need mark
1043 * the pte invalid in between.
1045 if ((change
& _PAGE_RW
) && atomic_read(&mm
->context
.copros
) > 0) {
1046 unsigned long old_pte
, new_pte
;
1048 old_pte
= __radix_pte_update(ptep
, _PAGE_PRESENT
, _PAGE_INVALID
);
1052 new_pte
= old_pte
| set
;
1053 radix__flush_tlb_page_psize(mm
, address
, psize
);
1054 __radix_pte_update(ptep
, _PAGE_INVALID
, new_pte
);
1056 __radix_pte_update(ptep
, 0, set
);
1058 * Book3S does not require a TLB flush when relaxing access
1059 * restrictions when the address space is not attached to a
1060 * NMMU, because the core MMU will reload the pte after taking
1061 * an access fault, which is defined by the architectue.
1064 /* See ptesync comment in radix__set_pte_at */