]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blame - arch/powerpc/mm/init_64.c
powerpc/mm: Cleanup management of kmem_caches for pagetables
[mirror_ubuntu-zesty-kernel.git] / arch / powerpc / mm / init_64.c
CommitLineData
14cf11af
PM
1/*
2 * PowerPC version
3 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
4 *
5 * Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au)
6 * and Cort Dougan (PReP) (cort@cs.nmt.edu)
7 * Copyright (C) 1996 Paul Mackerras
14cf11af
PM
8 *
9 * Derived from "arch/i386/mm/init.c"
10 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
11 *
12 * Dave Engebretsen <engebret@us.ibm.com>
13 * Rework for PPC64 port.
14 *
15 * This program is free software; you can redistribute it and/or
16 * modify it under the terms of the GNU General Public License
17 * as published by the Free Software Foundation; either version
18 * 2 of the License, or (at your option) any later version.
19 *
20 */
21
cec08e7a
BH
22#undef DEBUG
23
14cf11af
PM
24#include <linux/signal.h>
25#include <linux/sched.h>
26#include <linux/kernel.h>
27#include <linux/errno.h>
28#include <linux/string.h>
29#include <linux/types.h>
30#include <linux/mman.h>
31#include <linux/mm.h>
32#include <linux/swap.h>
33#include <linux/stddef.h>
34#include <linux/vmalloc.h>
35#include <linux/init.h>
36#include <linux/delay.h>
37#include <linux/bootmem.h>
38#include <linux/highmem.h>
39#include <linux/idr.h>
40#include <linux/nodemask.h>
41#include <linux/module.h>
c9cf5528 42#include <linux/poison.h>
d9b2b2a2 43#include <linux/lmb.h>
14cf11af
PM
44
45#include <asm/pgalloc.h>
46#include <asm/page.h>
47#include <asm/prom.h>
14cf11af
PM
48#include <asm/rtas.h>
49#include <asm/io.h>
50#include <asm/mmu_context.h>
51#include <asm/pgtable.h>
52#include <asm/mmu.h>
53#include <asm/uaccess.h>
54#include <asm/smp.h>
55#include <asm/machdep.h>
56#include <asm/tlb.h>
57#include <asm/eeh.h>
58#include <asm/processor.h>
59#include <asm/mmzone.h>
60#include <asm/cputable.h>
14cf11af
PM
61#include <asm/sections.h>
62#include <asm/system.h>
63#include <asm/iommu.h>
64#include <asm/abs_addr.h>
65#include <asm/vdso.h>
800fc3ee
DG
66
67#include "mmu_decl.h"
14cf11af 68
94491685 69#ifdef CONFIG_PPC_STD_MMU_64
14cf11af
PM
70#if PGTABLE_RANGE > USER_VSID_RANGE
71#warning Limited user VSID range means pagetable space is wasted
72#endif
73
74#if (TASK_SIZE_USER64 < PGTABLE_RANGE) && (TASK_SIZE_USER64 < USER_VSID_RANGE)
75#warning TASK_SIZE is smaller than it needs to be.
76#endif
94491685 77#endif /* CONFIG_PPC_STD_MMU_64 */
14cf11af 78
37dd2bad
KG
79phys_addr_t memstart_addr = ~0;
80phys_addr_t kernstart_addr;
d7917ba7 81
14cf11af
PM
82void free_initmem(void)
83{
84 unsigned long addr;
85
86 addr = (unsigned long)__init_begin;
87 for (; addr < (unsigned long)__init_end; addr += PAGE_SIZE) {
c9cf5528 88 memset((void *)addr, POISON_FREE_INITMEM, PAGE_SIZE);
14cf11af 89 ClearPageReserved(virt_to_page(addr));
7835e98b 90 init_page_count(virt_to_page(addr));
14cf11af
PM
91 free_page(addr);
92 totalram_pages++;
93 }
94 printk ("Freeing unused kernel memory: %luk freed\n",
95 ((unsigned long)__init_end - (unsigned long)__init_begin) >> 10);
96}
97
98#ifdef CONFIG_BLK_DEV_INITRD
99void free_initrd_mem(unsigned long start, unsigned long end)
100{
101 if (start < end)
102 printk ("Freeing initrd memory: %ldk freed\n", (end - start) >> 10);
103 for (; start < end; start += PAGE_SIZE) {
104 ClearPageReserved(virt_to_page(start));
7835e98b 105 init_page_count(virt_to_page(start));
14cf11af
PM
106 free_page(start);
107 totalram_pages++;
108 }
109}
110#endif
111
51cc5068 112static void pgd_ctor(void *addr)
14cf11af 113{
51cc5068
AD
114 memset(addr, 0, PGD_TABLE_SIZE);
115}
116
117static void pmd_ctor(void *addr)
118{
119 memset(addr, 0, PMD_TABLE_SIZE);
14cf11af
PM
120}
121
a0668cdc
DG
122struct kmem_cache *pgtable_cache[MAX_PGTABLE_INDEX_SIZE];
123
124/*
125 * Create a kmem_cache() for pagetables. This is not used for PTE
126 * pages - they're linked to struct page, come from the normal free
127 * pages pool and have a different entry size (see real_pte_t) to
128 * everything else. Caches created by this function are used for all
129 * the higher level pagetables, and for hugepage pagetables.
130 */
131void pgtable_cache_add(unsigned shift, void (*ctor)(void *))
132{
133 char *name;
134 unsigned long table_size = sizeof(void *) << shift;
135 unsigned long align = table_size;
136
137 /* When batching pgtable pointers for RCU freeing, we store
138 * the index size in the low bits. Table alignment must be
139 * big enough to fit it */
140 unsigned long minalign = MAX_PGTABLE_INDEX_SIZE + 1;
141 struct kmem_cache *new;
142
143 /* It would be nice if this was a BUILD_BUG_ON(), but at the
144 * moment, gcc doesn't seem to recognize is_power_of_2 as a
145 * constant expression, so so much for that. */
146 BUG_ON(!is_power_of_2(minalign));
147 BUG_ON((shift < 1) || (shift > MAX_PGTABLE_INDEX_SIZE));
148
149 if (PGT_CACHE(shift))
150 return; /* Already have a cache of this size */
151
152 align = max_t(unsigned long, align, minalign);
153 name = kasprintf(GFP_KERNEL, "pgtable-2^%d", shift);
154 new = kmem_cache_create(name, table_size, align, 0, ctor);
155 PGT_CACHE(shift) = new;
156
157 pr_debug("Allocated pgtable cache for order %d\n", shift);
158}
159
14cf11af
PM
160
161void pgtable_cache_init(void)
162{
a0668cdc
DG
163 pgtable_cache_add(PGD_INDEX_SIZE, pgd_ctor);
164 pgtable_cache_add(PMD_INDEX_SIZE, pmd_ctor);
165 if (!PGT_CACHE(PGD_INDEX_SIZE) || !PGT_CACHE(PMD_INDEX_SIZE))
166 panic("Couldn't allocate pgtable caches");
167
168 /* In all current configs, when the PUD index exists it's the
169 * same size as either the pgd or pmd index. Verify that the
170 * initialization above has also created a PUD cache. This
171 * will need re-examiniation if we add new possibilities for
172 * the pagetable layout. */
173 BUG_ON(PUD_INDEX_SIZE && !PGT_CACHE(PUD_INDEX_SIZE));
14cf11af 174}
d29eff7b
AW
175
176#ifdef CONFIG_SPARSEMEM_VMEMMAP
177/*
178 * Given an address within the vmemmap, determine the pfn of the page that
179 * represents the start of the section it is within. Note that we have to
180 * do this by hand as the proffered address may not be correctly aligned.
181 * Subtraction of non-aligned pointers produces undefined results.
182 */
09de9ff8 183static unsigned long __meminit vmemmap_section_start(unsigned long page)
d29eff7b
AW
184{
185 unsigned long offset = page - ((unsigned long)(vmemmap));
186
187 /* Return the pfn of the start of the section. */
188 return (offset / sizeof(struct page)) & PAGE_SECTION_MASK;
189}
190
191/*
192 * Check if this vmemmap page is already initialised. If any section
193 * which overlaps this vmemmap page is initialised then this page is
194 * initialised already.
195 */
09de9ff8 196static int __meminit vmemmap_populated(unsigned long start, int page_size)
d29eff7b
AW
197{
198 unsigned long end = start + page_size;
199
200 for (; start < end; start += (PAGES_PER_SECTION * sizeof(struct page)))
201 if (pfn_valid(vmemmap_section_start(start)))
202 return 1;
203
204 return 0;
205}
206
32a74949
BH
207/* On hash-based CPUs, the vmemmap is bolted in the hash table.
208 *
209 * On Book3E CPUs, the vmemmap is currently mapped in the top half of
210 * the vmalloc space using normal page tables, though the size of
211 * pages encoded in the PTEs can be different
212 */
213
214#ifdef CONFIG_PPC_BOOK3E
215static void __meminit vmemmap_create_mapping(unsigned long start,
216 unsigned long page_size,
217 unsigned long phys)
218{
219 /* Create a PTE encoding without page size */
220 unsigned long i, flags = _PAGE_PRESENT | _PAGE_ACCESSED |
221 _PAGE_KERNEL_RW;
222
223 /* PTEs only contain page size encodings up to 32M */
224 BUG_ON(mmu_psize_defs[mmu_vmemmap_psize].enc > 0xf);
225
226 /* Encode the size in the PTE */
227 flags |= mmu_psize_defs[mmu_vmemmap_psize].enc << 8;
228
229 /* For each PTE for that area, map things. Note that we don't
230 * increment phys because all PTEs are of the large size and
231 * thus must have the low bits clear
232 */
233 for (i = 0; i < page_size; i += PAGE_SIZE)
234 BUG_ON(map_kernel_page(start + i, phys, flags));
235}
236#else /* CONFIG_PPC_BOOK3E */
237static void __meminit vmemmap_create_mapping(unsigned long start,
238 unsigned long page_size,
239 unsigned long phys)
240{
241 int mapped = htab_bolt_mapping(start, start + page_size, phys,
242 PAGE_KERNEL, mmu_vmemmap_psize,
243 mmu_kernel_ssize);
244 BUG_ON(mapped < 0);
245}
246#endif /* CONFIG_PPC_BOOK3E */
247
d29eff7b 248int __meminit vmemmap_populate(struct page *start_page,
cec08e7a 249 unsigned long nr_pages, int node)
d29eff7b 250{
d29eff7b
AW
251 unsigned long start = (unsigned long)start_page;
252 unsigned long end = (unsigned long)(start_page + nr_pages);
cec08e7a 253 unsigned long page_size = 1 << mmu_psize_defs[mmu_vmemmap_psize].shift;
d29eff7b 254
d29eff7b
AW
255 /* Align to the page size of the linear mapping. */
256 start = _ALIGN_DOWN(start, page_size);
257
32a74949
BH
258 pr_debug("vmemmap_populate page %p, %ld pages, node %d\n",
259 start_page, nr_pages, node);
260 pr_debug(" -> map %lx..%lx\n", start, end);
261
d29eff7b 262 for (; start < end; start += page_size) {
d29eff7b
AW
263 void *p;
264
265 if (vmemmap_populated(start, page_size))
266 continue;
267
268 p = vmemmap_alloc_block(page_size, node);
269 if (!p)
270 return -ENOMEM;
271
32a74949
BH
272 pr_debug(" * %016lx..%016lx allocated at %p\n",
273 start, start + page_size, p);
d29eff7b 274
32a74949 275 vmemmap_create_mapping(start, page_size, __pa(p));
d29eff7b
AW
276 }
277
278 return 0;
279}
cec08e7a 280#endif /* CONFIG_SPARSEMEM_VMEMMAP */