]>
git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blob - arch/powerpc/mm/mem.c
1 // SPDX-License-Identifier: GPL-2.0-or-later
4 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
6 * Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au)
7 * and Cort Dougan (PReP) (cort@cs.nmt.edu)
8 * Copyright (C) 1996 Paul Mackerras
9 * PPC44x/36-bit changes by Matt Porter (mporter@mvista.com)
11 * Derived from "arch/i386/mm/init.c"
12 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
15 #include <linux/export.h>
16 #include <linux/sched.h>
17 #include <linux/kernel.h>
18 #include <linux/errno.h>
19 #include <linux/string.h>
20 #include <linux/gfp.h>
21 #include <linux/types.h>
23 #include <linux/stddef.h>
24 #include <linux/init.h>
25 #include <linux/memblock.h>
26 #include <linux/highmem.h>
27 #include <linux/initrd.h>
28 #include <linux/pagemap.h>
29 #include <linux/suspend.h>
30 #include <linux/hugetlb.h>
31 #include <linux/slab.h>
32 #include <linux/vmalloc.h>
33 #include <linux/memremap.h>
35 #include <asm/pgalloc.h>
38 #include <asm/mmu_context.h>
39 #include <asm/pgtable.h>
42 #include <asm/machdep.h>
43 #include <asm/btext.h>
45 #include <asm/sections.h>
46 #include <asm/sparsemem.h>
48 #include <asm/fixmap.h>
49 #include <asm/swiotlb.h>
52 #include <mm/mmu_decl.h>
54 #ifndef CPU_FTR_COHERENT_ICACHE
55 #define CPU_FTR_COHERENT_ICACHE 0 /* XXX for now */
56 #define CPU_FTR_NOEXECUTE 0
59 unsigned long long memory_limit
;
60 bool init_mem_is_free
;
64 EXPORT_SYMBOL(kmap_pte
);
66 EXPORT_SYMBOL(kmap_prot
);
68 static inline pte_t
*virt_to_kpte(unsigned long vaddr
)
70 return pte_offset_kernel(pmd_offset(pud_offset(pgd_offset_k(vaddr
),
71 vaddr
), vaddr
), vaddr
);
75 pgprot_t
phys_mem_access_prot(struct file
*file
, unsigned long pfn
,
76 unsigned long size
, pgprot_t vma_prot
)
78 if (ppc_md
.phys_mem_access_prot
)
79 return ppc_md
.phys_mem_access_prot(file
, pfn
, size
, vma_prot
);
81 if (!page_is_ram(pfn
))
82 vma_prot
= pgprot_noncached(vma_prot
);
86 EXPORT_SYMBOL(phys_mem_access_prot
);
88 #ifdef CONFIG_MEMORY_HOTPLUG
91 int memory_add_physaddr_to_nid(u64 start
)
93 return hot_add_scn_to_nid(start
);
97 int __weak
create_section_mapping(unsigned long start
, unsigned long end
, int nid
)
102 int __weak
remove_section_mapping(unsigned long start
, unsigned long end
)
107 int __ref
arch_add_memory(int nid
, u64 start
, u64 size
,
108 struct mhp_restrictions
*restrictions
)
110 unsigned long start_pfn
= start
>> PAGE_SHIFT
;
111 unsigned long nr_pages
= size
>> PAGE_SHIFT
;
114 resize_hpt_for_hotplug(memblock_phys_mem_size());
116 start
= (unsigned long)__va(start
);
117 rc
= create_section_mapping(start
, start
+ size
, nid
);
119 pr_warn("Unable to create mapping for hot added memory 0x%llx..0x%llx: %d\n",
120 start
, start
+ size
, rc
);
123 flush_inval_dcache_range(start
, start
+ size
);
125 return __add_pages(nid
, start_pfn
, nr_pages
, restrictions
);
128 #ifdef CONFIG_MEMORY_HOTREMOVE
129 void __ref
arch_remove_memory(int nid
, u64 start
, u64 size
,
130 struct vmem_altmap
*altmap
)
132 unsigned long start_pfn
= start
>> PAGE_SHIFT
;
133 unsigned long nr_pages
= size
>> PAGE_SHIFT
;
138 * If we have an altmap then we need to skip over any reserved PFNs
139 * when querying the zone.
141 page
= pfn_to_page(start_pfn
);
143 page
+= vmem_altmap_offset(altmap
);
145 __remove_pages(page_zone(page
), start_pfn
, nr_pages
, altmap
);
147 /* Remove htab bolted mappings for this section of memory */
148 start
= (unsigned long)__va(start
);
149 flush_inval_dcache_range(start
, start
+ size
);
150 ret
= remove_section_mapping(start
, start
+ size
);
153 /* Ensure all vmalloc mappings are flushed in case they also
154 * hit that section of memory
158 if (resize_hpt_for_hotplug(memblock_phys_mem_size()) == -ENOSPC
)
159 pr_warn("Hash collision while resizing HPT\n");
162 #endif /* CONFIG_MEMORY_HOTPLUG */
164 #ifndef CONFIG_NEED_MULTIPLE_NODES
165 void __init
mem_topology_setup(void)
167 max_low_pfn
= max_pfn
= memblock_end_of_DRAM() >> PAGE_SHIFT
;
168 min_low_pfn
= MEMORY_START
>> PAGE_SHIFT
;
169 #ifdef CONFIG_HIGHMEM
170 max_low_pfn
= lowmem_end_addr
>> PAGE_SHIFT
;
173 /* Place all memblock_regions in the same node and merge contiguous
176 memblock_set_node(0, PHYS_ADDR_MAX
, &memblock
.memory
, 0);
179 void __init
initmem_init(void)
181 /* XXX need to clip this if using highmem? */
182 sparse_memory_present_with_active_regions(0);
186 /* mark pages that don't exist as nosave */
187 static int __init
mark_nonram_nosave(void)
189 struct memblock_region
*reg
, *prev
= NULL
;
191 for_each_memblock(memory
, reg
) {
193 memblock_region_memory_end_pfn(prev
) < memblock_region_memory_base_pfn(reg
))
194 register_nosave_region(memblock_region_memory_end_pfn(prev
),
195 memblock_region_memory_base_pfn(reg
));
200 #else /* CONFIG_NEED_MULTIPLE_NODES */
201 static int __init
mark_nonram_nosave(void)
210 * We setup ZONE_DMA to be 31-bits on all platforms and ZONE_NORMAL to be
211 * everything else. GFP_DMA32 page allocations automatically fall back to
214 * By using 31-bit unconditionally, we can exploit ARCH_ZONE_DMA_BITS to
215 * inform the generic DMA mapping code. 32-bit only devices (if not handled
216 * by an IOMMU anyway) will take a first dip into ZONE_NORMAL and get
217 * otherwise served by ZONE_DMA.
219 static unsigned long max_zone_pfns
[MAX_NR_ZONES
];
222 * paging_init() sets up the page tables - in fact we've already done this.
224 void __init
paging_init(void)
226 unsigned long long total_ram
= memblock_phys_mem_size();
227 phys_addr_t top_of_ram
= memblock_end_of_DRAM();
230 unsigned long v
= __fix_to_virt(__end_of_fixed_addresses
- 1);
231 unsigned long end
= __fix_to_virt(FIX_HOLE
);
233 for (; v
< end
; v
+= PAGE_SIZE
)
234 map_kernel_page(v
, 0, __pgprot(0)); /* XXX gross */
237 #ifdef CONFIG_HIGHMEM
238 map_kernel_page(PKMAP_BASE
, 0, __pgprot(0)); /* XXX gross */
239 pkmap_page_table
= virt_to_kpte(PKMAP_BASE
);
241 kmap_pte
= virt_to_kpte(__fix_to_virt(FIX_KMAP_BEGIN
));
242 kmap_prot
= PAGE_KERNEL
;
243 #endif /* CONFIG_HIGHMEM */
245 printk(KERN_DEBUG
"Top of RAM: 0x%llx, Total RAM: 0x%llx\n",
246 (unsigned long long)top_of_ram
, total_ram
);
247 printk(KERN_DEBUG
"Memory hole size: %ldMB\n",
248 (long int)((top_of_ram
- total_ram
) >> 20));
250 #ifdef CONFIG_ZONE_DMA
251 max_zone_pfns
[ZONE_DMA
] = min(max_low_pfn
, 0x7fffffffUL
>> PAGE_SHIFT
);
253 max_zone_pfns
[ZONE_NORMAL
] = max_low_pfn
;
254 #ifdef CONFIG_HIGHMEM
255 max_zone_pfns
[ZONE_HIGHMEM
] = max_pfn
;
258 free_area_init_nodes(max_zone_pfns
);
260 mark_nonram_nosave();
263 void __init
mem_init(void)
266 * book3s is limited to 16 page sizes due to encoding this in
267 * a 4-bit field for slices.
269 BUILD_BUG_ON(MMU_PAGE_COUNT
> 16);
271 #ifdef CONFIG_SWIOTLB
275 high_memory
= (void *) __va(max_low_pfn
* PAGE_SIZE
);
276 set_max_mapnr(max_pfn
);
279 #ifdef CONFIG_HIGHMEM
281 unsigned long pfn
, highmem_mapnr
;
283 highmem_mapnr
= lowmem_end_addr
>> PAGE_SHIFT
;
284 for (pfn
= highmem_mapnr
; pfn
< max_mapnr
; ++pfn
) {
285 phys_addr_t paddr
= (phys_addr_t
)pfn
<< PAGE_SHIFT
;
286 struct page
*page
= pfn_to_page(pfn
);
287 if (!memblock_is_reserved(paddr
))
288 free_highmem_page(page
);
291 #endif /* CONFIG_HIGHMEM */
293 #if defined(CONFIG_PPC_FSL_BOOK3E) && !defined(CONFIG_SMP)
295 * If smp is enabled, next_tlbcam_idx is initialized in the cpu up
296 * functions.... do it here for the non-smp case.
298 per_cpu(next_tlbcam_idx
, smp_processor_id()) =
299 (mfspr(SPRN_TLB1CFG
) & TLBnCFG_N_ENTRY
) - 1;
302 mem_init_print_info(NULL
);
304 pr_info("Kernel virtual memory layout:\n");
306 pr_info(" * 0x%08lx..0x%08lx : kasan shadow mem\n",
307 KASAN_SHADOW_START
, KASAN_SHADOW_END
);
309 pr_info(" * 0x%08lx..0x%08lx : fixmap\n", FIXADDR_START
, FIXADDR_TOP
);
310 #ifdef CONFIG_HIGHMEM
311 pr_info(" * 0x%08lx..0x%08lx : highmem PTEs\n",
312 PKMAP_BASE
, PKMAP_ADDR(LAST_PKMAP
));
313 #endif /* CONFIG_HIGHMEM */
314 #ifdef CONFIG_NOT_COHERENT_CACHE
315 pr_info(" * 0x%08lx..0x%08lx : consistent mem\n",
316 IOREMAP_TOP
, IOREMAP_TOP
+ CONFIG_CONSISTENT_SIZE
);
317 #endif /* CONFIG_NOT_COHERENT_CACHE */
318 pr_info(" * 0x%08lx..0x%08lx : early ioremap\n",
319 ioremap_bot
, IOREMAP_TOP
);
320 pr_info(" * 0x%08lx..0x%08lx : vmalloc & ioremap\n",
321 VMALLOC_START
, VMALLOC_END
);
322 #endif /* CONFIG_PPC32 */
325 void free_initmem(void)
327 ppc_md
.progress
= ppc_printk_progress
;
329 init_mem_is_free
= true;
330 free_initmem_default(POISON_FREE_INITMEM
);
334 * This is called when a page has been modified by the kernel.
335 * It just marks the page as not i-cache clean. We do the i-cache
336 * flush later when the page is given to a user process, if necessary.
338 void flush_dcache_page(struct page
*page
)
340 if (cpu_has_feature(CPU_FTR_COHERENT_ICACHE
))
342 /* avoid an atomic op if possible */
343 if (test_bit(PG_arch_1
, &page
->flags
))
344 clear_bit(PG_arch_1
, &page
->flags
);
346 EXPORT_SYMBOL(flush_dcache_page
);
348 void flush_dcache_icache_page(struct page
*page
)
350 #ifdef CONFIG_HUGETLB_PAGE
351 if (PageCompound(page
)) {
352 flush_dcache_icache_hugepage(page
);
356 #if defined(CONFIG_PPC_8xx) || defined(CONFIG_PPC64)
357 /* On 8xx there is no need to kmap since highmem is not supported */
358 __flush_dcache_icache(page_address(page
));
360 if (IS_ENABLED(CONFIG_BOOKE
) || sizeof(phys_addr_t
) > sizeof(void *)) {
361 void *start
= kmap_atomic(page
);
362 __flush_dcache_icache(start
);
363 kunmap_atomic(start
);
365 __flush_dcache_icache_phys(page_to_pfn(page
) << PAGE_SHIFT
);
369 EXPORT_SYMBOL(flush_dcache_icache_page
);
371 void clear_user_page(void *page
, unsigned long vaddr
, struct page
*pg
)
376 * We shouldn't have to do this, but some versions of glibc
377 * require it (ld.so assumes zero filled pages are icache clean)
380 flush_dcache_page(pg
);
382 EXPORT_SYMBOL(clear_user_page
);
384 void copy_user_page(void *vto
, void *vfrom
, unsigned long vaddr
,
387 copy_page(vto
, vfrom
);
390 * We should be able to use the following optimisation, however
391 * there are two problems.
392 * Firstly a bug in some versions of binutils meant PLT sections
393 * were not marked executable.
394 * Secondly the first word in the GOT section is blrl, used
395 * to establish the GOT address. Until recently the GOT was
396 * not marked executable.
400 if (!vma
->vm_file
&& ((vma
->vm_flags
& VM_EXEC
) == 0))
404 flush_dcache_page(pg
);
407 void flush_icache_user_range(struct vm_area_struct
*vma
, struct page
*page
,
408 unsigned long addr
, int len
)
412 maddr
= (unsigned long) kmap(page
) + (addr
& ~PAGE_MASK
);
413 flush_icache_range(maddr
, maddr
+ len
);
416 EXPORT_SYMBOL(flush_icache_user_range
);
419 * This is called at the end of handling a user page fault, when the
420 * fault has been handled by updating a PTE in the linux page tables.
421 * We use it to preload an HPTE into the hash table corresponding to
422 * the updated linux PTE.
424 * This must always be called with the pte lock held.
426 void update_mmu_cache(struct vm_area_struct
*vma
, unsigned long address
,
429 #ifdef CONFIG_PPC_BOOK3S
431 * We don't need to worry about _PAGE_PRESENT here because we are
432 * called with either mm->page_table_lock held or ptl lock held
437 if (radix_enabled()) {
438 prefetch((void *)address
);
442 /* We only want HPTEs for linux PTEs that have _PAGE_ACCESSED set */
443 if (!pte_young(*ptep
) || address
>= TASK_SIZE
)
446 /* We try to figure out if we are coming from an instruction
447 * access fault and pass that down to __hash_page so we avoid
448 * double-faulting on execution of fresh text. We have to test
449 * for regs NULL since init will get here first thing at boot
451 * We also avoid filling the hash if not coming from a fault
454 trap
= current
->thread
.regs
? TRAP(current
->thread
.regs
) : 0UL;
466 hash_preload(vma
->vm_mm
, address
, is_exec
, trap
);
467 #endif /* CONFIG_PPC_BOOK3S */
468 #if (defined(CONFIG_PPC_BOOK3E_64) || defined(CONFIG_PPC_FSL_BOOK3E)) \
469 && defined(CONFIG_HUGETLB_PAGE)
470 if (is_vm_hugetlb_page(vma
))
471 book3e_hugetlb_preload(vma
, address
, *ptep
);
476 * System memory should not be in /proc/iomem but various tools expect it
479 static int __init
add_system_ram_resources(void)
481 struct memblock_region
*reg
;
483 for_each_memblock(memory
, reg
) {
484 struct resource
*res
;
485 unsigned long base
= reg
->base
;
486 unsigned long size
= reg
->size
;
488 res
= kzalloc(sizeof(struct resource
), GFP_KERNEL
);
492 res
->name
= "System RAM";
494 res
->end
= base
+ size
- 1;
495 res
->flags
= IORESOURCE_SYSTEM_RAM
| IORESOURCE_BUSY
;
496 WARN_ON(request_resource(&iomem_resource
, res
) < 0);
502 subsys_initcall(add_system_ram_resources
);
504 #ifdef CONFIG_STRICT_DEVMEM
506 * devmem_is_allowed(): check to see if /dev/mem access to a certain address
507 * is valid. The argument is a physical page number.
509 * Access has to be given to non-kernel-ram areas as well, these contain the
510 * PCI mmio resources as well as potential bios/acpi data regions.
512 int devmem_is_allowed(unsigned long pfn
)
514 if (page_is_rtas_user_buf(pfn
))
516 if (iomem_is_exclusive(PFN_PHYS(pfn
)))
518 if (!page_is_ram(pfn
))
522 #endif /* CONFIG_STRICT_DEVMEM */
525 * This is defined in kernel/resource.c but only powerpc needs to export it, for
526 * the EHEA driver. Drop this when drivers/net/ethernet/ibm/ehea is removed.
528 EXPORT_SYMBOL_GPL(walk_system_ram_range
);