]>
Commit | Line | Data |
---|---|---|
b2441318 | 1 | // SPDX-License-Identifier: GPL-2.0 |
1da177e4 LT |
2 | /* |
3 | * linux/arch/parisc/mm/init.c | |
4 | * | |
5 | * Copyright (C) 1995 Linus Torvalds | |
6 | * Copyright 1999 SuSE GmbH | |
7 | * changed by Philipp Rumpf | |
8 | * Copyright 1999 Philipp Rumpf (prumpf@tux.org) | |
9 | * Copyright 2004 Randolph Chung (tausq@debian.org) | |
a8f44e38 | 10 | * Copyright 2006-2007 Helge Deller (deller@gmx.de) |
1da177e4 LT |
11 | * |
12 | */ | |
13 | ||
1da177e4 LT |
14 | |
15 | #include <linux/module.h> | |
16 | #include <linux/mm.h> | |
4fe9e1d9 | 17 | #include <linux/memblock.h> |
5a0e3ad6 | 18 | #include <linux/gfp.h> |
1da177e4 LT |
19 | #include <linux/delay.h> |
20 | #include <linux/init.h> | |
1da177e4 LT |
21 | #include <linux/initrd.h> |
22 | #include <linux/swap.h> | |
23 | #include <linux/unistd.h> | |
24 | #include <linux/nodemask.h> /* for node_online_map */ | |
ea1754a0 | 25 | #include <linux/pagemap.h> /* for release_pages */ |
d0cf62fb | 26 | #include <linux/compat.h> |
1da177e4 LT |
27 | |
28 | #include <asm/pgalloc.h> | |
ce8420bb | 29 | #include <asm/pgtable.h> |
1da177e4 LT |
30 | #include <asm/tlb.h> |
31 | #include <asm/pdc_chassis.h> | |
32 | #include <asm/mmzone.h> | |
a581c2a4 | 33 | #include <asm/sections.h> |
d0cf62fb | 34 | #include <asm/msgbuf.h> |
dbdf0760 | 35 | #include <asm/sparsemem.h> |
1da177e4 | 36 | |
1da177e4 | 37 | extern int data_start; |
161bd3bf | 38 | extern void parisc_kernel_start(void); /* Kernel entry point in head.S */ |
1da177e4 | 39 | |
f24ffde4 | 40 | #if CONFIG_PGTABLE_LEVELS == 3 |
c39f52a9 TG |
41 | /* NOTE: This layout exactly conforms to the hybrid L2/L3 page table layout |
42 | * with the first pmd adjacent to the pgd and below it. gcc doesn't actually | |
43 | * guarantee that global objects will be laid out in memory in the same order | |
44 | * as the order of declaration, so put these in different sections and use | |
45 | * the linker script to order them. */ | |
46 | pmd_t pmd0[PTRS_PER_PMD] __attribute__ ((__section__ (".data..vm0.pmd"), aligned(PAGE_SIZE))); | |
47 | #endif | |
48 | ||
49 | pgd_t swapper_pg_dir[PTRS_PER_PGD] __attribute__ ((__section__ (".data..vm0.pgd"), aligned(PAGE_SIZE))); | |
50 | pte_t pg0[PT_INITIAL * PTRS_PER_PTE] __attribute__ ((__section__ (".data..vm0.pte"), aligned(PAGE_SIZE))); | |
51 | ||
1da177e4 LT |
52 | static struct resource data_resource = { |
53 | .name = "Kernel data", | |
35d98e93 | 54 | .flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM, |
1da177e4 LT |
55 | }; |
56 | ||
57 | static struct resource code_resource = { | |
58 | .name = "Kernel code", | |
35d98e93 | 59 | .flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM, |
1da177e4 LT |
60 | }; |
61 | ||
62 | static struct resource pdcdata_resource = { | |
63 | .name = "PDC data (Page Zero)", | |
64 | .start = 0, | |
65 | .end = 0x9ff, | |
66 | .flags = IORESOURCE_BUSY | IORESOURCE_MEM, | |
67 | }; | |
68 | ||
4e617c86 | 69 | static struct resource sysram_resources[MAX_PHYSMEM_RANGES] __ro_after_init; |
1da177e4 LT |
70 | |
71 | /* The following array is initialized from the firmware specific | |
72 | * information retrieved in kernel/inventory.c. | |
73 | */ | |
74 | ||
dbdf0760 HD |
75 | physmem_range_t pmem_ranges[MAX_PHYSMEM_RANGES] __initdata; |
76 | int npmem_ranges __initdata; | |
1da177e4 | 77 | |
a8f44e38 | 78 | #ifdef CONFIG_64BIT |
dbdf0760 | 79 | #define MAX_MEM (1UL << MAX_PHYSMEM_BITS) |
a8f44e38 | 80 | #else /* !CONFIG_64BIT */ |
1da177e4 | 81 | #define MAX_MEM (3584U*1024U*1024U) |
a8f44e38 | 82 | #endif /* !CONFIG_64BIT */ |
1da177e4 | 83 | |
8039de10 | 84 | static unsigned long mem_limit __read_mostly = MAX_MEM; |
1da177e4 LT |
85 | |
86 | static void __init mem_limit_func(void) | |
87 | { | |
88 | char *cp, *end; | |
89 | unsigned long limit; | |
1da177e4 LT |
90 | |
91 | /* We need this before __setup() functions are called */ | |
92 | ||
93 | limit = MAX_MEM; | |
668f9931 | 94 | for (cp = boot_command_line; *cp; ) { |
1da177e4 LT |
95 | if (memcmp(cp, "mem=", 4) == 0) { |
96 | cp += 4; | |
97 | limit = memparse(cp, &end); | |
98 | if (end != cp) | |
99 | break; | |
100 | cp = end; | |
101 | } else { | |
102 | while (*cp != ' ' && *cp) | |
103 | ++cp; | |
104 | while (*cp == ' ') | |
105 | ++cp; | |
106 | } | |
107 | } | |
108 | ||
109 | if (limit < mem_limit) | |
110 | mem_limit = limit; | |
111 | } | |
112 | ||
113 | #define MAX_GAP (0x40000000UL >> PAGE_SHIFT) | |
114 | ||
115 | static void __init setup_bootmem(void) | |
116 | { | |
1da177e4 | 117 | unsigned long mem_max; |
dbdf0760 | 118 | #ifndef CONFIG_SPARSEMEM |
1da177e4 LT |
119 | physmem_range_t pmem_holes[MAX_PHYSMEM_RANGES - 1]; |
120 | int npmem_holes; | |
121 | #endif | |
122 | int i, sysram_resource_count; | |
123 | ||
124 | disable_sr_hashing(); /* Turn off space register hashing */ | |
125 | ||
126 | /* | |
127 | * Sort the ranges. Since the number of ranges is typically | |
128 | * small, and performance is not an issue here, just do | |
129 | * a simple insertion sort. | |
130 | */ | |
131 | ||
132 | for (i = 1; i < npmem_ranges; i++) { | |
133 | int j; | |
134 | ||
135 | for (j = i; j > 0; j--) { | |
dbdf0760 | 136 | physmem_range_t tmp; |
1da177e4 LT |
137 | |
138 | if (pmem_ranges[j-1].start_pfn < | |
139 | pmem_ranges[j].start_pfn) { | |
140 | ||
141 | break; | |
142 | } | |
dbdf0760 HD |
143 | tmp = pmem_ranges[j-1]; |
144 | pmem_ranges[j-1] = pmem_ranges[j]; | |
145 | pmem_ranges[j] = tmp; | |
1da177e4 LT |
146 | } |
147 | } | |
148 | ||
dbdf0760 | 149 | #ifndef CONFIG_SPARSEMEM |
1da177e4 LT |
150 | /* |
151 | * Throw out ranges that are too far apart (controlled by | |
152 | * MAX_GAP). | |
153 | */ | |
154 | ||
155 | for (i = 1; i < npmem_ranges; i++) { | |
156 | if (pmem_ranges[i].start_pfn - | |
157 | (pmem_ranges[i-1].start_pfn + | |
158 | pmem_ranges[i-1].pages) > MAX_GAP) { | |
159 | npmem_ranges = i; | |
160 | printk("Large gap in memory detected (%ld pages). " | |
dbdf0760 | 161 | "Consider turning on CONFIG_SPARSEMEM\n", |
1da177e4 LT |
162 | pmem_ranges[i].start_pfn - |
163 | (pmem_ranges[i-1].start_pfn + | |
164 | pmem_ranges[i-1].pages)); | |
165 | break; | |
166 | } | |
167 | } | |
168 | #endif | |
169 | ||
4fe9e1d9 HD |
170 | /* Print the memory ranges */ |
171 | pr_info("Memory Ranges:\n"); | |
1da177e4 | 172 | |
4fe9e1d9 HD |
173 | for (i = 0; i < npmem_ranges; i++) { |
174 | struct resource *res = &sysram_resources[i]; | |
175 | unsigned long start; | |
176 | unsigned long size; | |
1da177e4 | 177 | |
4fe9e1d9 HD |
178 | size = (pmem_ranges[i].pages << PAGE_SHIFT); |
179 | start = (pmem_ranges[i].start_pfn << PAGE_SHIFT); | |
180 | pr_info("%2d) Start 0x%016lx End 0x%016lx Size %6ld MB\n", | |
181 | i, start, start + (size - 1), size >> 20); | |
1da177e4 | 182 | |
4fe9e1d9 | 183 | /* request memory resource */ |
1da177e4 | 184 | res->name = "System RAM"; |
4fe9e1d9 HD |
185 | res->start = start; |
186 | res->end = start + size - 1; | |
35d98e93 | 187 | res->flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY; |
1da177e4 LT |
188 | request_resource(&iomem_resource, res); |
189 | } | |
190 | ||
4fe9e1d9 HD |
191 | sysram_resource_count = npmem_ranges; |
192 | ||
1da177e4 LT |
193 | /* |
194 | * For 32 bit kernels we limit the amount of memory we can | |
195 | * support, in order to preserve enough kernel address space | |
196 | * for other purposes. For 64 bit kernels we don't normally | |
197 | * limit the memory, but this mechanism can be used to | |
198 | * artificially limit the amount of memory (and it is written | |
199 | * to work with multiple memory ranges). | |
200 | */ | |
201 | ||
202 | mem_limit_func(); /* check for "mem=" argument */ | |
203 | ||
204 | mem_max = 0; | |
1da177e4 LT |
205 | for (i = 0; i < npmem_ranges; i++) { |
206 | unsigned long rsize; | |
207 | ||
208 | rsize = pmem_ranges[i].pages << PAGE_SHIFT; | |
209 | if ((mem_max + rsize) > mem_limit) { | |
210 | printk(KERN_WARNING "Memory truncated to %ld MB\n", mem_limit >> 20); | |
211 | if (mem_max == mem_limit) | |
212 | npmem_ranges = i; | |
213 | else { | |
214 | pmem_ranges[i].pages = (mem_limit >> PAGE_SHIFT) | |
215 | - (mem_max >> PAGE_SHIFT); | |
216 | npmem_ranges = i + 1; | |
217 | mem_max = mem_limit; | |
218 | } | |
1da177e4 LT |
219 | break; |
220 | } | |
1da177e4 LT |
221 | mem_max += rsize; |
222 | } | |
223 | ||
224 | printk(KERN_INFO "Total Memory: %ld MB\n",mem_max >> 20); | |
225 | ||
dbdf0760 | 226 | #ifndef CONFIG_SPARSEMEM |
1da177e4 | 227 | /* Merge the ranges, keeping track of the holes */ |
1da177e4 LT |
228 | { |
229 | unsigned long end_pfn; | |
230 | unsigned long hole_pages; | |
231 | ||
232 | npmem_holes = 0; | |
233 | end_pfn = pmem_ranges[0].start_pfn + pmem_ranges[0].pages; | |
234 | for (i = 1; i < npmem_ranges; i++) { | |
235 | ||
236 | hole_pages = pmem_ranges[i].start_pfn - end_pfn; | |
237 | if (hole_pages) { | |
238 | pmem_holes[npmem_holes].start_pfn = end_pfn; | |
239 | pmem_holes[npmem_holes++].pages = hole_pages; | |
240 | end_pfn += hole_pages; | |
241 | } | |
242 | end_pfn += pmem_ranges[i].pages; | |
243 | } | |
244 | ||
245 | pmem_ranges[0].pages = end_pfn - pmem_ranges[0].start_pfn; | |
246 | npmem_ranges = 1; | |
247 | } | |
248 | #endif | |
249 | ||
1da177e4 LT |
250 | /* |
251 | * Initialize and free the full range of memory in each range. | |
1da177e4 LT |
252 | */ |
253 | ||
1da177e4 LT |
254 | max_pfn = 0; |
255 | for (i = 0; i < npmem_ranges; i++) { | |
256 | unsigned long start_pfn; | |
257 | unsigned long npages; | |
4fe9e1d9 HD |
258 | unsigned long start; |
259 | unsigned long size; | |
1da177e4 LT |
260 | |
261 | start_pfn = pmem_ranges[i].start_pfn; | |
262 | npages = pmem_ranges[i].pages; | |
263 | ||
4fe9e1d9 HD |
264 | start = start_pfn << PAGE_SHIFT; |
265 | size = npages << PAGE_SHIFT; | |
266 | ||
267 | /* add system RAM memblock */ | |
268 | memblock_add(start, size); | |
269 | ||
1da177e4 LT |
270 | if ((start_pfn + npages) > max_pfn) |
271 | max_pfn = start_pfn + npages; | |
272 | } | |
273 | ||
6a528001 MR |
274 | /* |
275 | * We can't use memblock top-down allocations because we only | |
276 | * created the initial mapping up to KERNEL_INITIAL_SIZE in | |
277 | * the assembly bootup code. | |
278 | */ | |
279 | memblock_set_bottom_up(true); | |
280 | ||
5cdb8205 GG |
281 | /* IOMMU is always used to access "high mem" on those boxes |
282 | * that can support enough mem that a PCI device couldn't | |
283 | * directly DMA to any physical addresses. | |
284 | * ISA DMA support will need to revisit this. | |
285 | */ | |
286 | max_low_pfn = max_pfn; | |
287 | ||
1da177e4 LT |
288 | /* reserve PAGE0 pdc memory, kernel text/data/bss & bootmap */ |
289 | ||
290 | #define PDC_CONSOLE_IO_IODC_SIZE 32768 | |
291 | ||
4fe9e1d9 HD |
292 | memblock_reserve(0UL, (unsigned long)(PAGE0->mem_free + |
293 | PDC_CONSOLE_IO_IODC_SIZE)); | |
294 | memblock_reserve(__pa(KERNEL_BINARY_TEXT_START), | |
295 | (unsigned long)(_end - KERNEL_BINARY_TEXT_START)); | |
1da177e4 | 296 | |
dbdf0760 | 297 | #ifndef CONFIG_SPARSEMEM |
1da177e4 LT |
298 | |
299 | /* reserve the holes */ | |
300 | ||
301 | for (i = 0; i < npmem_holes; i++) { | |
4fe9e1d9 HD |
302 | memblock_reserve((pmem_holes[i].start_pfn << PAGE_SHIFT), |
303 | (pmem_holes[i].pages << PAGE_SHIFT)); | |
1da177e4 LT |
304 | } |
305 | #endif | |
306 | ||
307 | #ifdef CONFIG_BLK_DEV_INITRD | |
308 | if (initrd_start) { | |
309 | printk(KERN_INFO "initrd: %08lx-%08lx\n", initrd_start, initrd_end); | |
310 | if (__pa(initrd_start) < mem_max) { | |
311 | unsigned long initrd_reserve; | |
312 | ||
313 | if (__pa(initrd_end) > mem_max) { | |
314 | initrd_reserve = mem_max - __pa(initrd_start); | |
315 | } else { | |
316 | initrd_reserve = initrd_end - initrd_start; | |
317 | } | |
318 | initrd_below_start_ok = 1; | |
319 | printk(KERN_INFO "initrd: reserving %08lx-%08lx (mem_max %08lx)\n", __pa(initrd_start), __pa(initrd_start) + initrd_reserve, mem_max); | |
320 | ||
4fe9e1d9 | 321 | memblock_reserve(__pa(initrd_start), initrd_reserve); |
1da177e4 LT |
322 | } |
323 | } | |
324 | #endif | |
325 | ||
326 | data_resource.start = virt_to_phys(&data_start); | |
c51d476a KM |
327 | data_resource.end = virt_to_phys(_end) - 1; |
328 | code_resource.start = virt_to_phys(_text); | |
1da177e4 LT |
329 | code_resource.end = virt_to_phys(&data_start)-1; |
330 | ||
331 | /* We don't know which region the kernel will be in, so try | |
332 | * all of them. | |
333 | */ | |
334 | for (i = 0; i < sysram_resource_count; i++) { | |
335 | struct resource *res = &sysram_resources[i]; | |
336 | request_resource(res, &code_resource); | |
337 | request_resource(res, &data_resource); | |
338 | } | |
339 | request_resource(&sysram_resources[0], &pdcdata_resource); | |
c9c2877d HD |
340 | |
341 | /* Initialize Page Deallocation Table (PDT) and check for bad memory. */ | |
342 | pdc_pdt_init(); | |
dbdf0760 HD |
343 | |
344 | memblock_allow_resize(); | |
345 | memblock_dump_all(); | |
1da177e4 LT |
346 | } |
347 | ||
8d0e051c | 348 | static bool kernel_set_to_readonly; |
161bd3bf | 349 | |
d7dd2ff1 JB |
350 | static void __init map_pages(unsigned long start_vaddr, |
351 | unsigned long start_paddr, unsigned long size, | |
352 | pgprot_t pgprot, int force) | |
353 | { | |
354 | pgd_t *pg_dir; | |
355 | pmd_t *pmd; | |
356 | pte_t *pg_table; | |
357 | unsigned long end_paddr; | |
358 | unsigned long start_pmd; | |
359 | unsigned long start_pte; | |
360 | unsigned long tmp1; | |
361 | unsigned long tmp2; | |
362 | unsigned long address; | |
363 | unsigned long vaddr; | |
364 | unsigned long ro_start; | |
365 | unsigned long ro_end; | |
8d0e051c | 366 | unsigned long kernel_start, kernel_end; |
d7dd2ff1 JB |
367 | |
368 | ro_start = __pa((unsigned long)_text); | |
369 | ro_end = __pa((unsigned long)&data_start); | |
8d0e051c | 370 | kernel_start = __pa((unsigned long)&__init_begin); |
41b85a11 | 371 | kernel_end = __pa((unsigned long)&_end); |
d7dd2ff1 JB |
372 | |
373 | end_paddr = start_paddr + size; | |
374 | ||
375 | pg_dir = pgd_offset_k(start_vaddr); | |
376 | ||
377 | #if PTRS_PER_PMD == 1 | |
378 | start_pmd = 0; | |
379 | #else | |
380 | start_pmd = ((start_vaddr >> PMD_SHIFT) & (PTRS_PER_PMD - 1)); | |
381 | #endif | |
382 | start_pte = ((start_vaddr >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)); | |
383 | ||
384 | address = start_paddr; | |
385 | vaddr = start_vaddr; | |
386 | while (address < end_paddr) { | |
387 | #if PTRS_PER_PMD == 1 | |
388 | pmd = (pmd_t *)__pa(pg_dir); | |
389 | #else | |
390 | pmd = (pmd_t *)pgd_address(*pg_dir); | |
391 | ||
392 | /* | |
393 | * pmd is physical at this point | |
394 | */ | |
395 | ||
396 | if (!pmd) { | |
6a528001 MR |
397 | pmd = memblock_alloc(PAGE_SIZE << PMD_ORDER, |
398 | PAGE_SIZE << PMD_ORDER); | |
399 | if (!pmd) | |
400 | panic("pmd allocation failed.\n"); | |
d7dd2ff1 JB |
401 | pmd = (pmd_t *) __pa(pmd); |
402 | } | |
403 | ||
404 | pgd_populate(NULL, pg_dir, __va(pmd)); | |
405 | #endif | |
406 | pg_dir++; | |
407 | ||
408 | /* now change pmd to kernel virtual addresses */ | |
409 | ||
410 | pmd = (pmd_t *)__va(pmd) + start_pmd; | |
411 | for (tmp1 = start_pmd; tmp1 < PTRS_PER_PMD; tmp1++, pmd++) { | |
412 | ||
413 | /* | |
414 | * pg_table is physical at this point | |
415 | */ | |
416 | ||
417 | pg_table = (pte_t *)pmd_address(*pmd); | |
418 | if (!pg_table) { | |
6a528001 MR |
419 | pg_table = memblock_alloc(PAGE_SIZE, |
420 | PAGE_SIZE); | |
421 | if (!pg_table) | |
422 | panic("page table allocation failed\n"); | |
d7dd2ff1 JB |
423 | pg_table = (pte_t *) __pa(pg_table); |
424 | } | |
425 | ||
426 | pmd_populate_kernel(NULL, pmd, __va(pg_table)); | |
427 | ||
428 | /* now change pg_table to kernel virtual addresses */ | |
429 | ||
430 | pg_table = (pte_t *) __va(pg_table) + start_pte; | |
431 | for (tmp2 = start_pte; tmp2 < PTRS_PER_PTE; tmp2++, pg_table++) { | |
432 | pte_t pte; | |
8d0e051c HD |
433 | pgprot_t prot; |
434 | bool huge = false; | |
435 | ||
436 | if (force) { | |
437 | prot = pgprot; | |
438 | } else if (address < kernel_start || address >= kernel_end) { | |
439 | /* outside kernel memory */ | |
440 | prot = PAGE_KERNEL; | |
441 | } else if (!kernel_set_to_readonly) { | |
442 | /* still initializing, allow writing to RO memory */ | |
443 | prot = PAGE_KERNEL_RWX; | |
444 | huge = true; | |
445 | } else if (address >= ro_start) { | |
446 | /* Code (ro) and Data areas */ | |
447 | prot = (address < ro_end) ? | |
448 | PAGE_KERNEL_EXEC : PAGE_KERNEL; | |
449 | huge = true; | |
450 | } else { | |
451 | prot = PAGE_KERNEL; | |
41b85a11 | 452 | } |
8d0e051c HD |
453 | |
454 | pte = __mk_pte(address, prot); | |
455 | if (huge) | |
41b85a11 | 456 | pte = pte_mkhuge(pte); |
d7dd2ff1 | 457 | |
3c229b3f HD |
458 | if (address >= end_paddr) |
459 | break; | |
d7dd2ff1 JB |
460 | |
461 | set_pte(pg_table, pte); | |
462 | ||
463 | address += PAGE_SIZE; | |
464 | vaddr += PAGE_SIZE; | |
465 | } | |
466 | start_pte = 0; | |
467 | ||
468 | if (address >= end_paddr) | |
469 | break; | |
470 | } | |
471 | start_pmd = 0; | |
472 | } | |
473 | } | |
474 | ||
3847dab7 HD |
475 | void __init set_kernel_text_rw(int enable_read_write) |
476 | { | |
d19a1290 | 477 | unsigned long start = (unsigned long) __init_begin; |
dfbaecb2 | 478 | unsigned long end = (unsigned long) &data_start; |
3847dab7 HD |
479 | |
480 | map_pages(start, __pa(start), end-start, | |
481 | PAGE_KERNEL_RWX, enable_read_write ? 1:0); | |
482 | ||
c9fa406f JDA |
483 | /* force the kernel to see the new page table entries */ |
484 | flush_cache_all(); | |
485 | flush_tlb_all(); | |
3847dab7 HD |
486 | } |
487 | ||
8d73b180 | 488 | void __ref free_initmem(void) |
1da177e4 | 489 | { |
4fb11781 KM |
490 | unsigned long init_begin = (unsigned long)__init_begin; |
491 | unsigned long init_end = (unsigned long)__init_end; | |
8d0e051c HD |
492 | unsigned long kernel_end = (unsigned long)&_end; |
493 | ||
494 | /* Remap kernel text and data, but do not touch init section yet. */ | |
495 | kernel_set_to_readonly = true; | |
496 | map_pages(init_end, __pa(init_end), kernel_end - init_end, | |
497 | PAGE_KERNEL, 0); | |
1da177e4 | 498 | |
d7dd2ff1 JB |
499 | /* The init text pages are marked R-X. We have to |
500 | * flush the icache and mark them RW- | |
501 | * | |
502 | * This is tricky, because map_pages is in the init section. | |
503 | * Do a dummy remap of the data section first (the data | |
504 | * section is already PAGE_KERNEL) to pull in the TLB entries | |
505 | * for map_kernel */ | |
506 | map_pages(init_begin, __pa(init_begin), init_end - init_begin, | |
507 | PAGE_KERNEL_RWX, 1); | |
508 | /* now remap at PAGE_KERNEL since the TLB is pre-primed to execute | |
509 | * map_pages */ | |
510 | map_pages(init_begin, __pa(init_begin), init_end - init_begin, | |
511 | PAGE_KERNEL, 1); | |
512 | ||
513 | /* force the kernel to see the new TLB entries */ | |
8d0e051c | 514 | __flush_tlb_range(0, init_begin, kernel_end); |
41b85a11 | 515 | |
d7dd2ff1 JB |
516 | /* finally dump all the instructions which were cached, since the |
517 | * pages are no-longer executable */ | |
4fb11781 | 518 | flush_icache_range(init_begin, init_end); |
1da177e4 | 519 | |
41b85a11 | 520 | free_initmem_default(POISON_FREE_INITMEM); |
1da177e4 LT |
521 | |
522 | /* set up a new led state on systems shipped LED State panel */ | |
523 | pdc_chassis_send_status(PDC_CHASSIS_DIRECT_BCOMPLETE); | |
1da177e4 LT |
524 | } |
525 | ||
1bcdd854 | 526 | |
0f5bf6d0 | 527 | #ifdef CONFIG_STRICT_KERNEL_RWX |
1bcdd854 HD |
528 | void mark_rodata_ro(void) |
529 | { | |
1bcdd854 HD |
530 | /* rodata memory was already mapped with KERNEL_RO access rights by |
531 | pagetable_init() and map_pages(). No need to do additional stuff here */ | |
8d0e051c HD |
532 | unsigned long roai_size = __end_ro_after_init - __start_ro_after_init; |
533 | ||
534 | pr_info("Write protected read-only-after-init data: %luk\n", roai_size >> 10); | |
1bcdd854 HD |
535 | } |
536 | #endif | |
537 | ||
538 | ||
1da177e4 LT |
539 | /* |
540 | * Just an arbitrary offset to serve as a "hole" between mapping areas | |
541 | * (between top of physical memory and a potential pcxl dma mapping | |
542 | * area, and below the vmalloc mapping area). | |
543 | * | |
544 | * The current 32K value just means that there will be a 32K "hole" | |
545 | * between mapping areas. That means that any out-of-bounds memory | |
546 | * accesses will hopefully be caught. The vmalloc() routines leaves | |
547 | * a hole of 4kB between each vmalloced area for the same reason. | |
548 | */ | |
549 | ||
550 | /* Leave room for gateway page expansion */ | |
551 | #if KERNEL_MAP_START < GATEWAY_PAGE_SIZE | |
552 | #error KERNEL_MAP_START is in gateway reserved region | |
553 | #endif | |
554 | #define MAP_START (KERNEL_MAP_START) | |
555 | ||
556 | #define VM_MAP_OFFSET (32*1024) | |
557 | #define SET_MAP_OFFSET(x) ((void *)(((unsigned long)(x) + VM_MAP_OFFSET) \ | |
558 | & ~(VM_MAP_OFFSET-1))) | |
559 | ||
4e617c86 | 560 | void *parisc_vmalloc_start __ro_after_init; |
4255f0d2 | 561 | EXPORT_SYMBOL(parisc_vmalloc_start); |
1da177e4 LT |
562 | |
563 | #ifdef CONFIG_PA11 | |
4e617c86 | 564 | unsigned long pcxl_dma_start __ro_after_init; |
1da177e4 LT |
565 | #endif |
566 | ||
567 | void __init mem_init(void) | |
568 | { | |
d0cf62fb HD |
569 | /* Do sanity checks on IPC (compat) structures */ |
570 | BUILD_BUG_ON(sizeof(struct ipc64_perm) != 48); | |
571 | #ifndef CONFIG_64BIT | |
572 | BUILD_BUG_ON(sizeof(struct semid64_ds) != 80); | |
573 | BUILD_BUG_ON(sizeof(struct msqid64_ds) != 104); | |
574 | BUILD_BUG_ON(sizeof(struct shmid64_ds) != 104); | |
575 | #endif | |
576 | #ifdef CONFIG_COMPAT | |
577 | BUILD_BUG_ON(sizeof(struct compat_ipc64_perm) != sizeof(struct ipc64_perm)); | |
578 | BUILD_BUG_ON(sizeof(struct compat_semid64_ds) != 80); | |
579 | BUILD_BUG_ON(sizeof(struct compat_msqid64_ds) != 104); | |
580 | BUILD_BUG_ON(sizeof(struct compat_shmid64_ds) != 104); | |
581 | #endif | |
582 | ||
48d27cb2 HD |
583 | /* Do sanity checks on page table constants */ |
584 | BUILD_BUG_ON(PTE_ENTRY_SIZE != sizeof(pte_t)); | |
585 | BUILD_BUG_ON(PMD_ENTRY_SIZE != sizeof(pmd_t)); | |
586 | BUILD_BUG_ON(PGD_ENTRY_SIZE != sizeof(pgd_t)); | |
587 | BUILD_BUG_ON(PAGE_SHIFT + BITS_PER_PTE + BITS_PER_PMD + BITS_PER_PGD | |
588 | > BITS_PER_LONG); | |
589 | ||
1da177e4 | 590 | high_memory = __va((max_pfn << PAGE_SHIFT)); |
d5c017dd | 591 | set_max_mapnr(page_to_pfn(virt_to_page(high_memory - 1)) + 1); |
c6ffc5ca | 592 | memblock_free_all(); |
1da177e4 | 593 | |
1da177e4 | 594 | #ifdef CONFIG_PA11 |
a34a9b96 | 595 | if (boot_cpu_data.cpu_type == pcxl2 || boot_cpu_data.cpu_type == pcxl) { |
1da177e4 | 596 | pcxl_dma_start = (unsigned long)SET_MAP_OFFSET(MAP_START); |
4255f0d2 HD |
597 | parisc_vmalloc_start = SET_MAP_OFFSET(pcxl_dma_start |
598 | + PCXL_DMA_MAP_SIZE); | |
a34a9b96 | 599 | } else |
1da177e4 | 600 | #endif |
a34a9b96 | 601 | parisc_vmalloc_start = SET_MAP_OFFSET(MAP_START); |
1da177e4 | 602 | |
7d2c7747 | 603 | mem_init_print_info(NULL); |
fd8d0ca2 HD |
604 | |
605 | #if 0 | |
606 | /* | |
607 | * Do not expose the virtual kernel memory layout to userspace. | |
608 | * But keep code for debugging purposes. | |
609 | */ | |
ce8420bb | 610 | printk("virtual kernel memory layout:\n" |
ccfbc68d SS |
611 | " vmalloc : 0x%px - 0x%px (%4ld MB)\n" |
612 | " fixmap : 0x%px - 0x%px (%4ld kB)\n" | |
613 | " memory : 0x%px - 0x%px (%4ld MB)\n" | |
614 | " .init : 0x%px - 0x%px (%4ld kB)\n" | |
615 | " .data : 0x%px - 0x%px (%4ld kB)\n" | |
616 | " .text : 0x%px - 0x%px (%4ld kB)\n", | |
ce8420bb HD |
617 | |
618 | (void*)VMALLOC_START, (void*)VMALLOC_END, | |
619 | (VMALLOC_END - VMALLOC_START) >> 20, | |
620 | ||
ccfbc68d SS |
621 | (void *)FIXMAP_START, (void *)(FIXMAP_START + FIXMAP_SIZE), |
622 | (unsigned long)(FIXMAP_SIZE / 1024), | |
623 | ||
ce8420bb HD |
624 | __va(0), high_memory, |
625 | ((unsigned long)high_memory - (unsigned long)__va(0)) >> 20, | |
626 | ||
53faf291 KM |
627 | __init_begin, __init_end, |
628 | ((unsigned long)__init_end - (unsigned long)__init_begin) >> 10, | |
ce8420bb | 629 | |
53faf291 KM |
630 | _etext, _edata, |
631 | ((unsigned long)_edata - (unsigned long)_etext) >> 10, | |
ce8420bb | 632 | |
53faf291 KM |
633 | _text, _etext, |
634 | ((unsigned long)_etext - (unsigned long)_text) >> 10); | |
ce8420bb | 635 | #endif |
1da177e4 LT |
636 | } |
637 | ||
4e617c86 | 638 | unsigned long *empty_zero_page __ro_after_init; |
22febf1f | 639 | EXPORT_SYMBOL(empty_zero_page); |
1da177e4 | 640 | |
1da177e4 LT |
641 | /* |
642 | * pagetable_init() sets up the page tables | |
643 | * | |
644 | * Note that gateway_init() places the Linux gateway page at page 0. | |
645 | * Since gateway pages cannot be dereferenced this has the desirable | |
646 | * side effect of trapping those pesky NULL-reference errors in the | |
647 | * kernel. | |
648 | */ | |
649 | static void __init pagetable_init(void) | |
650 | { | |
651 | int range; | |
652 | ||
653 | /* Map each physical memory range to its kernel vaddr */ | |
654 | ||
655 | for (range = 0; range < npmem_ranges; range++) { | |
656 | unsigned long start_paddr; | |
657 | unsigned long end_paddr; | |
658 | unsigned long size; | |
659 | ||
660 | start_paddr = pmem_ranges[range].start_pfn << PAGE_SHIFT; | |
1da177e4 | 661 | size = pmem_ranges[range].pages << PAGE_SHIFT; |
41b85a11 | 662 | end_paddr = start_paddr + size; |
1da177e4 LT |
663 | |
664 | map_pages((unsigned long)__va(start_paddr), start_paddr, | |
d7dd2ff1 | 665 | size, PAGE_KERNEL, 0); |
1da177e4 LT |
666 | } |
667 | ||
668 | #ifdef CONFIG_BLK_DEV_INITRD | |
669 | if (initrd_end && initrd_end > mem_limit) { | |
1bcdd854 | 670 | printk(KERN_INFO "initrd: mapping %08lx-%08lx\n", initrd_start, initrd_end); |
1da177e4 | 671 | map_pages(initrd_start, __pa(initrd_start), |
d7dd2ff1 | 672 | initrd_end - initrd_start, PAGE_KERNEL, 0); |
1da177e4 LT |
673 | } |
674 | #endif | |
675 | ||
6a528001 MR |
676 | empty_zero_page = memblock_alloc(PAGE_SIZE, PAGE_SIZE); |
677 | if (!empty_zero_page) | |
678 | panic("zero page allocation failed.\n"); | |
679 | ||
1da177e4 LT |
680 | } |
681 | ||
682 | static void __init gateway_init(void) | |
683 | { | |
684 | unsigned long linux_gateway_page_addr; | |
685 | /* FIXME: This is 'const' in order to trick the compiler | |
686 | into not treating it as DP-relative data. */ | |
687 | extern void * const linux_gateway_page; | |
688 | ||
689 | linux_gateway_page_addr = LINUX_GATEWAY_ADDR & PAGE_MASK; | |
690 | ||
691 | /* | |
692 | * Setup Linux Gateway page. | |
693 | * | |
694 | * The Linux gateway page will reside in kernel space (on virtual | |
695 | * page 0), so it doesn't need to be aliased into user space. | |
696 | */ | |
697 | ||
698 | map_pages(linux_gateway_page_addr, __pa(&linux_gateway_page), | |
d7dd2ff1 | 699 | PAGE_SIZE, PAGE_GATEWAY, 1); |
1da177e4 LT |
700 | } |
701 | ||
dbdf0760 | 702 | static void __init parisc_bootmem_free(void) |
1da177e4 | 703 | { |
dbdf0760 HD |
704 | unsigned long zones_size[MAX_NR_ZONES] = { 0, }; |
705 | unsigned long holes_size[MAX_NR_ZONES] = { 0, }; | |
706 | unsigned long mem_start_pfn = ~0UL, mem_end_pfn = 0, mem_size_pfn = 0; | |
1da177e4 LT |
707 | int i; |
708 | ||
dbdf0760 HD |
709 | for (i = 0; i < npmem_ranges; i++) { |
710 | unsigned long start = pmem_ranges[i].start_pfn; | |
711 | unsigned long size = pmem_ranges[i].pages; | |
712 | unsigned long end = start + size; | |
713 | ||
714 | if (mem_start_pfn > start) | |
715 | mem_start_pfn = start; | |
716 | if (mem_end_pfn < end) | |
717 | mem_end_pfn = end; | |
718 | mem_size_pfn += size; | |
719 | } | |
720 | ||
721 | zones_size[0] = mem_end_pfn - mem_start_pfn; | |
722 | holes_size[0] = zones_size[0] - mem_size_pfn; | |
723 | ||
724 | free_area_init_node(0, zones_size, mem_start_pfn, holes_size); | |
725 | } | |
726 | ||
727 | void __init paging_init(void) | |
728 | { | |
1da177e4 LT |
729 | setup_bootmem(); |
730 | pagetable_init(); | |
731 | gateway_init(); | |
732 | flush_cache_all_local(); /* start with known state */ | |
ce33941f | 733 | flush_tlb_all_local(NULL); |
1da177e4 | 734 | |
dbdf0760 HD |
735 | /* |
736 | * Mark all memblocks as present for sparsemem using | |
737 | * memory_present() and then initialize sparsemem. | |
738 | */ | |
739 | memblocks_present(); | |
740 | sparse_init(); | |
741 | parisc_bootmem_free(); | |
1da177e4 LT |
742 | } |
743 | ||
744 | #ifdef CONFIG_PA20 | |
745 | ||
746 | /* | |
7022672e | 747 | * Currently, all PA20 chips have 18 bit protection IDs, which is the |
1da177e4 LT |
748 | * limiting factor (space ids are 32 bits). |
749 | */ | |
750 | ||
751 | #define NR_SPACE_IDS 262144 | |
752 | ||
753 | #else | |
754 | ||
755 | /* | |
7022672e SA |
756 | * Currently we have a one-to-one relationship between space IDs and |
757 | * protection IDs. Older parisc chips (PCXS, PCXT, PCXL, PCXL2) only | |
758 | * support 15 bit protection IDs, so that is the limiting factor. | |
759 | * PCXT' has 18 bit protection IDs, but only 16 bit spaceids, so it's | |
1da177e4 LT |
760 | * probably not worth the effort for a special case here. |
761 | */ | |
762 | ||
763 | #define NR_SPACE_IDS 32768 | |
764 | ||
765 | #endif /* !CONFIG_PA20 */ | |
766 | ||
767 | #define RECYCLE_THRESHOLD (NR_SPACE_IDS / 2) | |
768 | #define SID_ARRAY_SIZE (NR_SPACE_IDS / (8 * sizeof(long))) | |
769 | ||
770 | static unsigned long space_id[SID_ARRAY_SIZE] = { 1 }; /* disallow space 0 */ | |
771 | static unsigned long dirty_space_id[SID_ARRAY_SIZE]; | |
772 | static unsigned long space_id_index; | |
773 | static unsigned long free_space_ids = NR_SPACE_IDS - 1; | |
774 | static unsigned long dirty_space_ids = 0; | |
775 | ||
776 | static DEFINE_SPINLOCK(sid_lock); | |
777 | ||
778 | unsigned long alloc_sid(void) | |
779 | { | |
780 | unsigned long index; | |
781 | ||
782 | spin_lock(&sid_lock); | |
783 | ||
784 | if (free_space_ids == 0) { | |
785 | if (dirty_space_ids != 0) { | |
786 | spin_unlock(&sid_lock); | |
787 | flush_tlb_all(); /* flush_tlb_all() calls recycle_sids() */ | |
788 | spin_lock(&sid_lock); | |
789 | } | |
2fd83038 | 790 | BUG_ON(free_space_ids == 0); |
1da177e4 LT |
791 | } |
792 | ||
793 | free_space_ids--; | |
794 | ||
795 | index = find_next_zero_bit(space_id, NR_SPACE_IDS, space_id_index); | |
796 | space_id[index >> SHIFT_PER_LONG] |= (1L << (index & (BITS_PER_LONG - 1))); | |
797 | space_id_index = index; | |
798 | ||
799 | spin_unlock(&sid_lock); | |
800 | ||
801 | return index << SPACEID_SHIFT; | |
802 | } | |
803 | ||
804 | void free_sid(unsigned long spaceid) | |
805 | { | |
806 | unsigned long index = spaceid >> SPACEID_SHIFT; | |
807 | unsigned long *dirty_space_offset; | |
808 | ||
809 | dirty_space_offset = dirty_space_id + (index >> SHIFT_PER_LONG); | |
810 | index &= (BITS_PER_LONG - 1); | |
811 | ||
812 | spin_lock(&sid_lock); | |
813 | ||
2fd83038 | 814 | BUG_ON(*dirty_space_offset & (1L << index)); /* attempt to free space id twice */ |
1da177e4 LT |
815 | |
816 | *dirty_space_offset |= (1L << index); | |
817 | dirty_space_ids++; | |
818 | ||
819 | spin_unlock(&sid_lock); | |
820 | } | |
821 | ||
822 | ||
823 | #ifdef CONFIG_SMP | |
824 | static void get_dirty_sids(unsigned long *ndirtyptr,unsigned long *dirty_array) | |
825 | { | |
826 | int i; | |
827 | ||
828 | /* NOTE: sid_lock must be held upon entry */ | |
829 | ||
830 | *ndirtyptr = dirty_space_ids; | |
831 | if (dirty_space_ids != 0) { | |
832 | for (i = 0; i < SID_ARRAY_SIZE; i++) { | |
833 | dirty_array[i] = dirty_space_id[i]; | |
834 | dirty_space_id[i] = 0; | |
835 | } | |
836 | dirty_space_ids = 0; | |
837 | } | |
838 | ||
839 | return; | |
840 | } | |
841 | ||
842 | static void recycle_sids(unsigned long ndirty,unsigned long *dirty_array) | |
843 | { | |
844 | int i; | |
845 | ||
846 | /* NOTE: sid_lock must be held upon entry */ | |
847 | ||
848 | if (ndirty != 0) { | |
849 | for (i = 0; i < SID_ARRAY_SIZE; i++) { | |
850 | space_id[i] ^= dirty_array[i]; | |
851 | } | |
852 | ||
853 | free_space_ids += ndirty; | |
854 | space_id_index = 0; | |
855 | } | |
856 | } | |
857 | ||
858 | #else /* CONFIG_SMP */ | |
859 | ||
860 | static void recycle_sids(void) | |
861 | { | |
862 | int i; | |
863 | ||
864 | /* NOTE: sid_lock must be held upon entry */ | |
865 | ||
866 | if (dirty_space_ids != 0) { | |
867 | for (i = 0; i < SID_ARRAY_SIZE; i++) { | |
868 | space_id[i] ^= dirty_space_id[i]; | |
869 | dirty_space_id[i] = 0; | |
870 | } | |
871 | ||
872 | free_space_ids += dirty_space_ids; | |
873 | dirty_space_ids = 0; | |
874 | space_id_index = 0; | |
875 | } | |
876 | } | |
877 | #endif | |
878 | ||
879 | /* | |
880 | * flush_tlb_all() calls recycle_sids(), since whenever the entire tlb is | |
881 | * purged, we can safely reuse the space ids that were released but | |
882 | * not flushed from the tlb. | |
883 | */ | |
884 | ||
885 | #ifdef CONFIG_SMP | |
886 | ||
887 | static unsigned long recycle_ndirty; | |
888 | static unsigned long recycle_dirty_array[SID_ARRAY_SIZE]; | |
2fd83038 | 889 | static unsigned int recycle_inuse; |
1da177e4 LT |
890 | |
891 | void flush_tlb_all(void) | |
892 | { | |
893 | int do_recycle; | |
894 | ||
416821d3 | 895 | __inc_irq_stat(irq_tlb_count); |
1da177e4 LT |
896 | do_recycle = 0; |
897 | spin_lock(&sid_lock); | |
898 | if (dirty_space_ids > RECYCLE_THRESHOLD) { | |
2fd83038 | 899 | BUG_ON(recycle_inuse); /* FIXME: Use a semaphore/wait queue here */ |
1da177e4 LT |
900 | get_dirty_sids(&recycle_ndirty,recycle_dirty_array); |
901 | recycle_inuse++; | |
902 | do_recycle++; | |
903 | } | |
904 | spin_unlock(&sid_lock); | |
15c8b6c1 | 905 | on_each_cpu(flush_tlb_all_local, NULL, 1); |
1da177e4 LT |
906 | if (do_recycle) { |
907 | spin_lock(&sid_lock); | |
908 | recycle_sids(recycle_ndirty,recycle_dirty_array); | |
909 | recycle_inuse = 0; | |
910 | spin_unlock(&sid_lock); | |
911 | } | |
912 | } | |
913 | #else | |
914 | void flush_tlb_all(void) | |
915 | { | |
416821d3 | 916 | __inc_irq_stat(irq_tlb_count); |
1da177e4 | 917 | spin_lock(&sid_lock); |
1b2425e3 | 918 | flush_tlb_all_local(NULL); |
1da177e4 LT |
919 | recycle_sids(); |
920 | spin_unlock(&sid_lock); | |
921 | } | |
922 | #endif |