]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - arch/powerpc/mm/init_64.c
include cleanup: Update gfp.h and slab.h includes to prepare for breaking implicit...
[mirror_ubuntu-artful-kernel.git] / arch / powerpc / mm / init_64.c
1 /*
2 * PowerPC version
3 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
4 *
5 * Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au)
6 * and Cort Dougan (PReP) (cort@cs.nmt.edu)
7 * Copyright (C) 1996 Paul Mackerras
8 *
9 * Derived from "arch/i386/mm/init.c"
10 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
11 *
12 * Dave Engebretsen <engebret@us.ibm.com>
13 * Rework for PPC64 port.
14 *
15 * This program is free software; you can redistribute it and/or
16 * modify it under the terms of the GNU General Public License
17 * as published by the Free Software Foundation; either version
18 * 2 of the License, or (at your option) any later version.
19 *
20 */
21
22 #undef DEBUG
23
24 #include <linux/signal.h>
25 #include <linux/sched.h>
26 #include <linux/kernel.h>
27 #include <linux/errno.h>
28 #include <linux/string.h>
29 #include <linux/types.h>
30 #include <linux/mman.h>
31 #include <linux/mm.h>
32 #include <linux/swap.h>
33 #include <linux/stddef.h>
34 #include <linux/vmalloc.h>
35 #include <linux/init.h>
36 #include <linux/delay.h>
37 #include <linux/bootmem.h>
38 #include <linux/highmem.h>
39 #include <linux/idr.h>
40 #include <linux/nodemask.h>
41 #include <linux/module.h>
42 #include <linux/poison.h>
43 #include <linux/lmb.h>
44 #include <linux/hugetlb.h>
45 #include <linux/slab.h>
46
47 #include <asm/pgalloc.h>
48 #include <asm/page.h>
49 #include <asm/prom.h>
50 #include <asm/rtas.h>
51 #include <asm/io.h>
52 #include <asm/mmu_context.h>
53 #include <asm/pgtable.h>
54 #include <asm/mmu.h>
55 #include <asm/uaccess.h>
56 #include <asm/smp.h>
57 #include <asm/machdep.h>
58 #include <asm/tlb.h>
59 #include <asm/eeh.h>
60 #include <asm/processor.h>
61 #include <asm/mmzone.h>
62 #include <asm/cputable.h>
63 #include <asm/sections.h>
64 #include <asm/system.h>
65 #include <asm/iommu.h>
66 #include <asm/abs_addr.h>
67 #include <asm/vdso.h>
68
69 #include "mmu_decl.h"
70
71 #ifdef CONFIG_PPC_STD_MMU_64
72 #if PGTABLE_RANGE > USER_VSID_RANGE
73 #warning Limited user VSID range means pagetable space is wasted
74 #endif
75
76 #if (TASK_SIZE_USER64 < PGTABLE_RANGE) && (TASK_SIZE_USER64 < USER_VSID_RANGE)
77 #warning TASK_SIZE is smaller than it needs to be.
78 #endif
79 #endif /* CONFIG_PPC_STD_MMU_64 */
80
81 phys_addr_t memstart_addr = ~0;
82 phys_addr_t kernstart_addr;
83
84 void free_initmem(void)
85 {
86 unsigned long addr;
87
88 addr = (unsigned long)__init_begin;
89 for (; addr < (unsigned long)__init_end; addr += PAGE_SIZE) {
90 memset((void *)addr, POISON_FREE_INITMEM, PAGE_SIZE);
91 ClearPageReserved(virt_to_page(addr));
92 init_page_count(virt_to_page(addr));
93 free_page(addr);
94 totalram_pages++;
95 }
96 printk ("Freeing unused kernel memory: %luk freed\n",
97 ((unsigned long)__init_end - (unsigned long)__init_begin) >> 10);
98 }
99
100 #ifdef CONFIG_BLK_DEV_INITRD
101 void free_initrd_mem(unsigned long start, unsigned long end)
102 {
103 if (start < end)
104 printk ("Freeing initrd memory: %ldk freed\n", (end - start) >> 10);
105 for (; start < end; start += PAGE_SIZE) {
106 ClearPageReserved(virt_to_page(start));
107 init_page_count(virt_to_page(start));
108 free_page(start);
109 totalram_pages++;
110 }
111 }
112 #endif
113
114 static void pgd_ctor(void *addr)
115 {
116 memset(addr, 0, PGD_TABLE_SIZE);
117 }
118
119 static void pmd_ctor(void *addr)
120 {
121 memset(addr, 0, PMD_TABLE_SIZE);
122 }
123
124 struct kmem_cache *pgtable_cache[MAX_PGTABLE_INDEX_SIZE];
125
126 /*
127 * Create a kmem_cache() for pagetables. This is not used for PTE
128 * pages - they're linked to struct page, come from the normal free
129 * pages pool and have a different entry size (see real_pte_t) to
130 * everything else. Caches created by this function are used for all
131 * the higher level pagetables, and for hugepage pagetables.
132 */
133 void pgtable_cache_add(unsigned shift, void (*ctor)(void *))
134 {
135 char *name;
136 unsigned long table_size = sizeof(void *) << shift;
137 unsigned long align = table_size;
138
139 /* When batching pgtable pointers for RCU freeing, we store
140 * the index size in the low bits. Table alignment must be
141 * big enough to fit it.
142 *
143 * Likewise, hugeapge pagetable pointers contain a (different)
144 * shift value in the low bits. All tables must be aligned so
145 * as to leave enough 0 bits in the address to contain it. */
146 unsigned long minalign = max(MAX_PGTABLE_INDEX_SIZE + 1,
147 HUGEPD_SHIFT_MASK + 1);
148 struct kmem_cache *new;
149
150 /* It would be nice if this was a BUILD_BUG_ON(), but at the
151 * moment, gcc doesn't seem to recognize is_power_of_2 as a
152 * constant expression, so so much for that. */
153 BUG_ON(!is_power_of_2(minalign));
154 BUG_ON((shift < 1) || (shift > MAX_PGTABLE_INDEX_SIZE));
155
156 if (PGT_CACHE(shift))
157 return; /* Already have a cache of this size */
158
159 align = max_t(unsigned long, align, minalign);
160 name = kasprintf(GFP_KERNEL, "pgtable-2^%d", shift);
161 new = kmem_cache_create(name, table_size, align, 0, ctor);
162 PGT_CACHE(shift) = new;
163
164 pr_debug("Allocated pgtable cache for order %d\n", shift);
165 }
166
167
168 void pgtable_cache_init(void)
169 {
170 pgtable_cache_add(PGD_INDEX_SIZE, pgd_ctor);
171 pgtable_cache_add(PMD_INDEX_SIZE, pmd_ctor);
172 if (!PGT_CACHE(PGD_INDEX_SIZE) || !PGT_CACHE(PMD_INDEX_SIZE))
173 panic("Couldn't allocate pgtable caches");
174
175 /* In all current configs, when the PUD index exists it's the
176 * same size as either the pgd or pmd index. Verify that the
177 * initialization above has also created a PUD cache. This
178 * will need re-examiniation if we add new possibilities for
179 * the pagetable layout. */
180 BUG_ON(PUD_INDEX_SIZE && !PGT_CACHE(PUD_INDEX_SIZE));
181 }
182
183 #ifdef CONFIG_SPARSEMEM_VMEMMAP
184 /*
185 * Given an address within the vmemmap, determine the pfn of the page that
186 * represents the start of the section it is within. Note that we have to
187 * do this by hand as the proffered address may not be correctly aligned.
188 * Subtraction of non-aligned pointers produces undefined results.
189 */
190 static unsigned long __meminit vmemmap_section_start(unsigned long page)
191 {
192 unsigned long offset = page - ((unsigned long)(vmemmap));
193
194 /* Return the pfn of the start of the section. */
195 return (offset / sizeof(struct page)) & PAGE_SECTION_MASK;
196 }
197
198 /*
199 * Check if this vmemmap page is already initialised. If any section
200 * which overlaps this vmemmap page is initialised then this page is
201 * initialised already.
202 */
203 static int __meminit vmemmap_populated(unsigned long start, int page_size)
204 {
205 unsigned long end = start + page_size;
206
207 for (; start < end; start += (PAGES_PER_SECTION * sizeof(struct page)))
208 if (pfn_valid(vmemmap_section_start(start)))
209 return 1;
210
211 return 0;
212 }
213
214 /* On hash-based CPUs, the vmemmap is bolted in the hash table.
215 *
216 * On Book3E CPUs, the vmemmap is currently mapped in the top half of
217 * the vmalloc space using normal page tables, though the size of
218 * pages encoded in the PTEs can be different
219 */
220
221 #ifdef CONFIG_PPC_BOOK3E
222 static void __meminit vmemmap_create_mapping(unsigned long start,
223 unsigned long page_size,
224 unsigned long phys)
225 {
226 /* Create a PTE encoding without page size */
227 unsigned long i, flags = _PAGE_PRESENT | _PAGE_ACCESSED |
228 _PAGE_KERNEL_RW;
229
230 /* PTEs only contain page size encodings up to 32M */
231 BUG_ON(mmu_psize_defs[mmu_vmemmap_psize].enc > 0xf);
232
233 /* Encode the size in the PTE */
234 flags |= mmu_psize_defs[mmu_vmemmap_psize].enc << 8;
235
236 /* For each PTE for that area, map things. Note that we don't
237 * increment phys because all PTEs are of the large size and
238 * thus must have the low bits clear
239 */
240 for (i = 0; i < page_size; i += PAGE_SIZE)
241 BUG_ON(map_kernel_page(start + i, phys, flags));
242 }
243 #else /* CONFIG_PPC_BOOK3E */
244 static void __meminit vmemmap_create_mapping(unsigned long start,
245 unsigned long page_size,
246 unsigned long phys)
247 {
248 int mapped = htab_bolt_mapping(start, start + page_size, phys,
249 PAGE_KERNEL, mmu_vmemmap_psize,
250 mmu_kernel_ssize);
251 BUG_ON(mapped < 0);
252 }
253 #endif /* CONFIG_PPC_BOOK3E */
254
255 int __meminit vmemmap_populate(struct page *start_page,
256 unsigned long nr_pages, int node)
257 {
258 unsigned long start = (unsigned long)start_page;
259 unsigned long end = (unsigned long)(start_page + nr_pages);
260 unsigned long page_size = 1 << mmu_psize_defs[mmu_vmemmap_psize].shift;
261
262 /* Align to the page size of the linear mapping. */
263 start = _ALIGN_DOWN(start, page_size);
264
265 pr_debug("vmemmap_populate page %p, %ld pages, node %d\n",
266 start_page, nr_pages, node);
267 pr_debug(" -> map %lx..%lx\n", start, end);
268
269 for (; start < end; start += page_size) {
270 void *p;
271
272 if (vmemmap_populated(start, page_size))
273 continue;
274
275 p = vmemmap_alloc_block(page_size, node);
276 if (!p)
277 return -ENOMEM;
278
279 pr_debug(" * %016lx..%016lx allocated at %p\n",
280 start, start + page_size, p);
281
282 vmemmap_create_mapping(start, page_size, __pa(p));
283 }
284
285 return 0;
286 }
287 #endif /* CONFIG_SPARSEMEM_VMEMMAP */