]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - arch/powerpc/mm/mem.c
Merge git://git.kernel.org/pub/scm/linux/kernel/git/linville/wireless-next into for...
[mirror_ubuntu-artful-kernel.git] / arch / powerpc / mm / mem.c
1 /*
2 * PowerPC version
3 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
4 *
5 * Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au)
6 * and Cort Dougan (PReP) (cort@cs.nmt.edu)
7 * Copyright (C) 1996 Paul Mackerras
8 * PPC44x/36-bit changes by Matt Porter (mporter@mvista.com)
9 *
10 * Derived from "arch/i386/mm/init.c"
11 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
12 *
13 * This program is free software; you can redistribute it and/or
14 * modify it under the terms of the GNU General Public License
15 * as published by the Free Software Foundation; either version
16 * 2 of the License, or (at your option) any later version.
17 *
18 */
19
20 #include <linux/module.h>
21 #include <linux/sched.h>
22 #include <linux/kernel.h>
23 #include <linux/errno.h>
24 #include <linux/string.h>
25 #include <linux/gfp.h>
26 #include <linux/types.h>
27 #include <linux/mm.h>
28 #include <linux/stddef.h>
29 #include <linux/init.h>
30 #include <linux/bootmem.h>
31 #include <linux/highmem.h>
32 #include <linux/initrd.h>
33 #include <linux/pagemap.h>
34 #include <linux/suspend.h>
35 #include <linux/memblock.h>
36 #include <linux/hugetlb.h>
37
38 #include <asm/pgalloc.h>
39 #include <asm/prom.h>
40 #include <asm/io.h>
41 #include <asm/mmu_context.h>
42 #include <asm/pgtable.h>
43 #include <asm/mmu.h>
44 #include <asm/smp.h>
45 #include <asm/machdep.h>
46 #include <asm/btext.h>
47 #include <asm/tlb.h>
48 #include <asm/sections.h>
49 #include <asm/sparsemem.h>
50 #include <asm/vdso.h>
51 #include <asm/fixmap.h>
52 #include <asm/swiotlb.h>
53
54 #include "mmu_decl.h"
55
56 #ifndef CPU_FTR_COHERENT_ICACHE
57 #define CPU_FTR_COHERENT_ICACHE 0 /* XXX for now */
58 #define CPU_FTR_NOEXECUTE 0
59 #endif
60
61 int init_bootmem_done;
62 int mem_init_done;
63 phys_addr_t memory_limit;
64
65 #ifdef CONFIG_HIGHMEM
66 pte_t *kmap_pte;
67 pgprot_t kmap_prot;
68
69 EXPORT_SYMBOL(kmap_prot);
70 EXPORT_SYMBOL(kmap_pte);
71
72 static inline pte_t *virt_to_kpte(unsigned long vaddr)
73 {
74 return pte_offset_kernel(pmd_offset(pud_offset(pgd_offset_k(vaddr),
75 vaddr), vaddr), vaddr);
76 }
77 #endif
78
79 int page_is_ram(unsigned long pfn)
80 {
81 #ifndef CONFIG_PPC64 /* XXX for now */
82 return pfn < max_pfn;
83 #else
84 unsigned long paddr = (pfn << PAGE_SHIFT);
85 struct memblock_region *reg;
86
87 for_each_memblock(memory, reg)
88 if (paddr >= reg->base && paddr < (reg->base + reg->size))
89 return 1;
90 return 0;
91 #endif
92 }
93
94 pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
95 unsigned long size, pgprot_t vma_prot)
96 {
97 if (ppc_md.phys_mem_access_prot)
98 return ppc_md.phys_mem_access_prot(file, pfn, size, vma_prot);
99
100 if (!page_is_ram(pfn))
101 vma_prot = pgprot_noncached(vma_prot);
102
103 return vma_prot;
104 }
105 EXPORT_SYMBOL(phys_mem_access_prot);
106
107 #ifdef CONFIG_MEMORY_HOTPLUG
108
109 #ifdef CONFIG_NUMA
110 int memory_add_physaddr_to_nid(u64 start)
111 {
112 return hot_add_scn_to_nid(start);
113 }
114 #endif
115
116 int arch_add_memory(int nid, u64 start, u64 size)
117 {
118 struct pglist_data *pgdata;
119 struct zone *zone;
120 unsigned long start_pfn = start >> PAGE_SHIFT;
121 unsigned long nr_pages = size >> PAGE_SHIFT;
122
123 pgdata = NODE_DATA(nid);
124
125 start = (unsigned long)__va(start);
126 create_section_mapping(start, start + size);
127
128 /* this should work for most non-highmem platforms */
129 zone = pgdata->node_zones;
130
131 return __add_pages(nid, zone, start_pfn, nr_pages);
132 }
133 #endif /* CONFIG_MEMORY_HOTPLUG */
134
135 /*
136 * walk_memory_resource() needs to make sure there is no holes in a given
137 * memory range. PPC64 does not maintain the memory layout in /proc/iomem.
138 * Instead it maintains it in memblock.memory structures. Walk through the
139 * memory regions, find holes and callback for contiguous regions.
140 */
141 int
142 walk_system_ram_range(unsigned long start_pfn, unsigned long nr_pages,
143 void *arg, int (*func)(unsigned long, unsigned long, void *))
144 {
145 struct memblock_region *reg;
146 unsigned long end_pfn = start_pfn + nr_pages;
147 unsigned long tstart, tend;
148 int ret = -1;
149
150 for_each_memblock(memory, reg) {
151 tstart = max(start_pfn, memblock_region_memory_base_pfn(reg));
152 tend = min(end_pfn, memblock_region_memory_end_pfn(reg));
153 if (tstart >= tend)
154 continue;
155 ret = (*func)(tstart, tend - tstart, arg);
156 if (ret)
157 break;
158 }
159 return ret;
160 }
161 EXPORT_SYMBOL_GPL(walk_system_ram_range);
162
163 /*
164 * Initialize the bootmem system and give it all the memory we
165 * have available. If we are using highmem, we only put the
166 * lowmem into the bootmem system.
167 */
168 #ifndef CONFIG_NEED_MULTIPLE_NODES
169 void __init do_init_bootmem(void)
170 {
171 unsigned long start, bootmap_pages;
172 unsigned long total_pages;
173 struct memblock_region *reg;
174 int boot_mapsize;
175
176 max_low_pfn = max_pfn = memblock_end_of_DRAM() >> PAGE_SHIFT;
177 total_pages = (memblock_end_of_DRAM() - memstart_addr) >> PAGE_SHIFT;
178 #ifdef CONFIG_HIGHMEM
179 total_pages = total_lowmem >> PAGE_SHIFT;
180 max_low_pfn = lowmem_end_addr >> PAGE_SHIFT;
181 #endif
182
183 /*
184 * Find an area to use for the bootmem bitmap. Calculate the size of
185 * bitmap required as (Total Memory) / PAGE_SIZE / BITS_PER_BYTE.
186 * Add 1 additional page in case the address isn't page-aligned.
187 */
188 bootmap_pages = bootmem_bootmap_pages(total_pages);
189
190 start = memblock_alloc(bootmap_pages << PAGE_SHIFT, PAGE_SIZE);
191
192 min_low_pfn = MEMORY_START >> PAGE_SHIFT;
193 boot_mapsize = init_bootmem_node(NODE_DATA(0), start >> PAGE_SHIFT, min_low_pfn, max_low_pfn);
194
195 /* Add active regions with valid PFNs */
196 for_each_memblock(memory, reg) {
197 unsigned long start_pfn, end_pfn;
198 start_pfn = memblock_region_memory_base_pfn(reg);
199 end_pfn = memblock_region_memory_end_pfn(reg);
200 add_active_range(0, start_pfn, end_pfn);
201 }
202
203 /* Add all physical memory to the bootmem map, mark each area
204 * present.
205 */
206 #ifdef CONFIG_HIGHMEM
207 free_bootmem_with_active_regions(0, lowmem_end_addr >> PAGE_SHIFT);
208
209 /* reserve the sections we're already using */
210 for_each_memblock(reserved, reg) {
211 unsigned long top = reg->base + reg->size - 1;
212 if (top < lowmem_end_addr)
213 reserve_bootmem(reg->base, reg->size, BOOTMEM_DEFAULT);
214 else if (reg->base < lowmem_end_addr) {
215 unsigned long trunc_size = lowmem_end_addr - reg->base;
216 reserve_bootmem(reg->base, trunc_size, BOOTMEM_DEFAULT);
217 }
218 }
219 #else
220 free_bootmem_with_active_regions(0, max_pfn);
221
222 /* reserve the sections we're already using */
223 for_each_memblock(reserved, reg)
224 reserve_bootmem(reg->base, reg->size, BOOTMEM_DEFAULT);
225 #endif
226 /* XXX need to clip this if using highmem? */
227 sparse_memory_present_with_active_regions(0);
228
229 init_bootmem_done = 1;
230 }
231
232 /* mark pages that don't exist as nosave */
233 static int __init mark_nonram_nosave(void)
234 {
235 struct memblock_region *reg, *prev = NULL;
236
237 for_each_memblock(memory, reg) {
238 if (prev &&
239 memblock_region_memory_end_pfn(prev) < memblock_region_memory_base_pfn(reg))
240 register_nosave_region(memblock_region_memory_end_pfn(prev),
241 memblock_region_memory_base_pfn(reg));
242 prev = reg;
243 }
244 return 0;
245 }
246
247 /*
248 * paging_init() sets up the page tables - in fact we've already done this.
249 */
250 void __init paging_init(void)
251 {
252 unsigned long long total_ram = memblock_phys_mem_size();
253 phys_addr_t top_of_ram = memblock_end_of_DRAM();
254 unsigned long max_zone_pfns[MAX_NR_ZONES];
255
256 #ifdef CONFIG_PPC32
257 unsigned long v = __fix_to_virt(__end_of_fixed_addresses - 1);
258 unsigned long end = __fix_to_virt(FIX_HOLE);
259
260 for (; v < end; v += PAGE_SIZE)
261 map_page(v, 0, 0); /* XXX gross */
262 #endif
263
264 #ifdef CONFIG_HIGHMEM
265 map_page(PKMAP_BASE, 0, 0); /* XXX gross */
266 pkmap_page_table = virt_to_kpte(PKMAP_BASE);
267
268 kmap_pte = virt_to_kpte(__fix_to_virt(FIX_KMAP_BEGIN));
269 kmap_prot = PAGE_KERNEL;
270 #endif /* CONFIG_HIGHMEM */
271
272 printk(KERN_DEBUG "Top of RAM: 0x%llx, Total RAM: 0x%llx\n",
273 (unsigned long long)top_of_ram, total_ram);
274 printk(KERN_DEBUG "Memory hole size: %ldMB\n",
275 (long int)((top_of_ram - total_ram) >> 20));
276 memset(max_zone_pfns, 0, sizeof(max_zone_pfns));
277 #ifdef CONFIG_HIGHMEM
278 max_zone_pfns[ZONE_DMA] = lowmem_end_addr >> PAGE_SHIFT;
279 max_zone_pfns[ZONE_HIGHMEM] = top_of_ram >> PAGE_SHIFT;
280 #else
281 max_zone_pfns[ZONE_DMA] = top_of_ram >> PAGE_SHIFT;
282 #endif
283 free_area_init_nodes(max_zone_pfns);
284
285 mark_nonram_nosave();
286 }
287 #endif /* ! CONFIG_NEED_MULTIPLE_NODES */
288
289 void __init mem_init(void)
290 {
291 #ifdef CONFIG_NEED_MULTIPLE_NODES
292 int nid;
293 #endif
294 pg_data_t *pgdat;
295 unsigned long i;
296 struct page *page;
297 unsigned long reservedpages = 0, codesize, initsize, datasize, bsssize;
298
299 #ifdef CONFIG_SWIOTLB
300 if (ppc_swiotlb_enable)
301 swiotlb_init(1);
302 #endif
303
304 num_physpages = memblock_phys_mem_size() >> PAGE_SHIFT;
305 high_memory = (void *) __va(max_low_pfn * PAGE_SIZE);
306
307 #ifdef CONFIG_NEED_MULTIPLE_NODES
308 for_each_online_node(nid) {
309 if (NODE_DATA(nid)->node_spanned_pages != 0) {
310 printk("freeing bootmem node %d\n", nid);
311 totalram_pages +=
312 free_all_bootmem_node(NODE_DATA(nid));
313 }
314 }
315 #else
316 max_mapnr = max_pfn;
317 totalram_pages += free_all_bootmem();
318 #endif
319 for_each_online_pgdat(pgdat) {
320 for (i = 0; i < pgdat->node_spanned_pages; i++) {
321 if (!pfn_valid(pgdat->node_start_pfn + i))
322 continue;
323 page = pgdat_page_nr(pgdat, i);
324 if (PageReserved(page))
325 reservedpages++;
326 }
327 }
328
329 codesize = (unsigned long)&_sdata - (unsigned long)&_stext;
330 datasize = (unsigned long)&_edata - (unsigned long)&_sdata;
331 initsize = (unsigned long)&__init_end - (unsigned long)&__init_begin;
332 bsssize = (unsigned long)&__bss_stop - (unsigned long)&__bss_start;
333
334 #ifdef CONFIG_HIGHMEM
335 {
336 unsigned long pfn, highmem_mapnr;
337
338 highmem_mapnr = lowmem_end_addr >> PAGE_SHIFT;
339 for (pfn = highmem_mapnr; pfn < max_mapnr; ++pfn) {
340 phys_addr_t paddr = (phys_addr_t)pfn << PAGE_SHIFT;
341 struct page *page = pfn_to_page(pfn);
342 if (memblock_is_reserved(paddr))
343 continue;
344 ClearPageReserved(page);
345 init_page_count(page);
346 __free_page(page);
347 totalhigh_pages++;
348 reservedpages--;
349 }
350 totalram_pages += totalhigh_pages;
351 printk(KERN_DEBUG "High memory: %luk\n",
352 totalhigh_pages << (PAGE_SHIFT-10));
353 }
354 #endif /* CONFIG_HIGHMEM */
355
356 #if defined(CONFIG_PPC_FSL_BOOK3E) && !defined(CONFIG_SMP)
357 /*
358 * If smp is enabled, next_tlbcam_idx is initialized in the cpu up
359 * functions.... do it here for the non-smp case.
360 */
361 per_cpu(next_tlbcam_idx, smp_processor_id()) =
362 (mfspr(SPRN_TLB1CFG) & TLBnCFG_N_ENTRY) - 1;
363 #endif
364
365 printk(KERN_INFO "Memory: %luk/%luk available (%luk kernel code, "
366 "%luk reserved, %luk data, %luk bss, %luk init)\n",
367 nr_free_pages() << (PAGE_SHIFT-10),
368 num_physpages << (PAGE_SHIFT-10),
369 codesize >> 10,
370 reservedpages << (PAGE_SHIFT-10),
371 datasize >> 10,
372 bsssize >> 10,
373 initsize >> 10);
374
375 #ifdef CONFIG_PPC32
376 pr_info("Kernel virtual memory layout:\n");
377 pr_info(" * 0x%08lx..0x%08lx : fixmap\n", FIXADDR_START, FIXADDR_TOP);
378 #ifdef CONFIG_HIGHMEM
379 pr_info(" * 0x%08lx..0x%08lx : highmem PTEs\n",
380 PKMAP_BASE, PKMAP_ADDR(LAST_PKMAP));
381 #endif /* CONFIG_HIGHMEM */
382 #ifdef CONFIG_NOT_COHERENT_CACHE
383 pr_info(" * 0x%08lx..0x%08lx : consistent mem\n",
384 IOREMAP_TOP, IOREMAP_TOP + CONFIG_CONSISTENT_SIZE);
385 #endif /* CONFIG_NOT_COHERENT_CACHE */
386 pr_info(" * 0x%08lx..0x%08lx : early ioremap\n",
387 ioremap_bot, IOREMAP_TOP);
388 pr_info(" * 0x%08lx..0x%08lx : vmalloc & ioremap\n",
389 VMALLOC_START, VMALLOC_END);
390 #endif /* CONFIG_PPC32 */
391
392 mem_init_done = 1;
393 }
394
395 void free_initmem(void)
396 {
397 unsigned long addr;
398
399 ppc_md.progress = ppc_printk_progress;
400
401 addr = (unsigned long)__init_begin;
402 for (; addr < (unsigned long)__init_end; addr += PAGE_SIZE) {
403 memset((void *)addr, POISON_FREE_INITMEM, PAGE_SIZE);
404 ClearPageReserved(virt_to_page(addr));
405 init_page_count(virt_to_page(addr));
406 free_page(addr);
407 totalram_pages++;
408 }
409 pr_info("Freeing unused kernel memory: %luk freed\n",
410 ((unsigned long)__init_end -
411 (unsigned long)__init_begin) >> 10);
412 }
413
414 #ifdef CONFIG_BLK_DEV_INITRD
415 void __init free_initrd_mem(unsigned long start, unsigned long end)
416 {
417 if (start >= end)
418 return;
419
420 start = _ALIGN_DOWN(start, PAGE_SIZE);
421 end = _ALIGN_UP(end, PAGE_SIZE);
422 pr_info("Freeing initrd memory: %ldk freed\n", (end - start) >> 10);
423
424 for (; start < end; start += PAGE_SIZE) {
425 ClearPageReserved(virt_to_page(start));
426 init_page_count(virt_to_page(start));
427 free_page(start);
428 totalram_pages++;
429 }
430 }
431 #endif
432
433 /*
434 * This is called when a page has been modified by the kernel.
435 * It just marks the page as not i-cache clean. We do the i-cache
436 * flush later when the page is given to a user process, if necessary.
437 */
438 void flush_dcache_page(struct page *page)
439 {
440 if (cpu_has_feature(CPU_FTR_COHERENT_ICACHE))
441 return;
442 /* avoid an atomic op if possible */
443 if (test_bit(PG_arch_1, &page->flags))
444 clear_bit(PG_arch_1, &page->flags);
445 }
446 EXPORT_SYMBOL(flush_dcache_page);
447
448 void flush_dcache_icache_page(struct page *page)
449 {
450 #ifdef CONFIG_HUGETLB_PAGE
451 if (PageCompound(page)) {
452 flush_dcache_icache_hugepage(page);
453 return;
454 }
455 #endif
456 #ifdef CONFIG_BOOKE
457 {
458 void *start = kmap_atomic(page, KM_PPC_SYNC_ICACHE);
459 __flush_dcache_icache(start);
460 kunmap_atomic(start, KM_PPC_SYNC_ICACHE);
461 }
462 #elif defined(CONFIG_8xx) || defined(CONFIG_PPC64)
463 /* On 8xx there is no need to kmap since highmem is not supported */
464 __flush_dcache_icache(page_address(page));
465 #else
466 __flush_dcache_icache_phys(page_to_pfn(page) << PAGE_SHIFT);
467 #endif
468 }
469
470 void clear_user_page(void *page, unsigned long vaddr, struct page *pg)
471 {
472 clear_page(page);
473
474 /*
475 * We shouldn't have to do this, but some versions of glibc
476 * require it (ld.so assumes zero filled pages are icache clean)
477 * - Anton
478 */
479 flush_dcache_page(pg);
480 }
481 EXPORT_SYMBOL(clear_user_page);
482
483 void copy_user_page(void *vto, void *vfrom, unsigned long vaddr,
484 struct page *pg)
485 {
486 copy_page(vto, vfrom);
487
488 /*
489 * We should be able to use the following optimisation, however
490 * there are two problems.
491 * Firstly a bug in some versions of binutils meant PLT sections
492 * were not marked executable.
493 * Secondly the first word in the GOT section is blrl, used
494 * to establish the GOT address. Until recently the GOT was
495 * not marked executable.
496 * - Anton
497 */
498 #if 0
499 if (!vma->vm_file && ((vma->vm_flags & VM_EXEC) == 0))
500 return;
501 #endif
502
503 flush_dcache_page(pg);
504 }
505
506 void flush_icache_user_range(struct vm_area_struct *vma, struct page *page,
507 unsigned long addr, int len)
508 {
509 unsigned long maddr;
510
511 maddr = (unsigned long) kmap(page) + (addr & ~PAGE_MASK);
512 flush_icache_range(maddr, maddr + len);
513 kunmap(page);
514 }
515 EXPORT_SYMBOL(flush_icache_user_range);
516
517 /*
518 * This is called at the end of handling a user page fault, when the
519 * fault has been handled by updating a PTE in the linux page tables.
520 * We use it to preload an HPTE into the hash table corresponding to
521 * the updated linux PTE.
522 *
523 * This must always be called with the pte lock held.
524 */
525 void update_mmu_cache(struct vm_area_struct *vma, unsigned long address,
526 pte_t *ptep)
527 {
528 #ifdef CONFIG_PPC_STD_MMU
529 unsigned long access = 0, trap;
530
531 /* We only want HPTEs for linux PTEs that have _PAGE_ACCESSED set */
532 if (!pte_young(*ptep) || address >= TASK_SIZE)
533 return;
534
535 /* We try to figure out if we are coming from an instruction
536 * access fault and pass that down to __hash_page so we avoid
537 * double-faulting on execution of fresh text. We have to test
538 * for regs NULL since init will get here first thing at boot
539 *
540 * We also avoid filling the hash if not coming from a fault
541 */
542 if (current->thread.regs == NULL)
543 return;
544 trap = TRAP(current->thread.regs);
545 if (trap == 0x400)
546 access |= _PAGE_EXEC;
547 else if (trap != 0x300)
548 return;
549 hash_preload(vma->vm_mm, address, access, trap);
550 #endif /* CONFIG_PPC_STD_MMU */
551 }