]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * linux/arch/parisc/mm/init.c | |
3 | * | |
4 | * Copyright (C) 1995 Linus Torvalds | |
5 | * Copyright 1999 SuSE GmbH | |
6 | * changed by Philipp Rumpf | |
7 | * Copyright 1999 Philipp Rumpf (prumpf@tux.org) | |
8 | * Copyright 2004 Randolph Chung (tausq@debian.org) | |
9 | * | |
10 | */ | |
11 | ||
12 | #include <linux/config.h> | |
13 | ||
14 | #include <linux/module.h> | |
15 | #include <linux/mm.h> | |
16 | #include <linux/bootmem.h> | |
17 | #include <linux/delay.h> | |
18 | #include <linux/init.h> | |
19 | #include <linux/pci.h> /* for hppa_dma_ops and pcxl_dma_ops */ | |
20 | #include <linux/initrd.h> | |
21 | #include <linux/swap.h> | |
22 | #include <linux/unistd.h> | |
23 | #include <linux/nodemask.h> /* for node_online_map */ | |
24 | #include <linux/pagemap.h> /* for release_pages and page_cache_release */ | |
25 | ||
26 | #include <asm/pgalloc.h> | |
27 | #include <asm/tlb.h> | |
28 | #include <asm/pdc_chassis.h> | |
29 | #include <asm/mmzone.h> | |
30 | ||
31 | DEFINE_PER_CPU(struct mmu_gather, mmu_gathers); | |
32 | ||
33 | extern char _text; /* start of kernel code, defined by linker */ | |
34 | extern int data_start; | |
35 | extern char _end; /* end of BSS, defined by linker */ | |
36 | extern char __init_begin, __init_end; | |
37 | ||
38 | #ifdef CONFIG_DISCONTIGMEM | |
39 | struct node_map_data node_data[MAX_NUMNODES]; | |
40 | bootmem_data_t bmem_data[MAX_NUMNODES]; | |
41 | unsigned char pfnnid_map[PFNNID_MAP_MAX]; | |
42 | #endif | |
43 | ||
44 | static struct resource data_resource = { | |
45 | .name = "Kernel data", | |
46 | .flags = IORESOURCE_BUSY | IORESOURCE_MEM, | |
47 | }; | |
48 | ||
49 | static struct resource code_resource = { | |
50 | .name = "Kernel code", | |
51 | .flags = IORESOURCE_BUSY | IORESOURCE_MEM, | |
52 | }; | |
53 | ||
54 | static struct resource pdcdata_resource = { | |
55 | .name = "PDC data (Page Zero)", | |
56 | .start = 0, | |
57 | .end = 0x9ff, | |
58 | .flags = IORESOURCE_BUSY | IORESOURCE_MEM, | |
59 | }; | |
60 | ||
61 | static struct resource sysram_resources[MAX_PHYSMEM_RANGES]; | |
62 | ||
63 | /* The following array is initialized from the firmware specific | |
64 | * information retrieved in kernel/inventory.c. | |
65 | */ | |
66 | ||
67 | physmem_range_t pmem_ranges[MAX_PHYSMEM_RANGES]; | |
68 | int npmem_ranges; | |
69 | ||
70 | #ifdef __LP64__ | |
71 | #define MAX_MEM (~0UL) | |
72 | #else /* !__LP64__ */ | |
73 | #define MAX_MEM (3584U*1024U*1024U) | |
74 | #endif /* !__LP64__ */ | |
75 | ||
76 | static unsigned long mem_limit = MAX_MEM; | |
77 | ||
78 | static void __init mem_limit_func(void) | |
79 | { | |
80 | char *cp, *end; | |
81 | unsigned long limit; | |
82 | extern char saved_command_line[]; | |
83 | ||
84 | /* We need this before __setup() functions are called */ | |
85 | ||
86 | limit = MAX_MEM; | |
87 | for (cp = saved_command_line; *cp; ) { | |
88 | if (memcmp(cp, "mem=", 4) == 0) { | |
89 | cp += 4; | |
90 | limit = memparse(cp, &end); | |
91 | if (end != cp) | |
92 | break; | |
93 | cp = end; | |
94 | } else { | |
95 | while (*cp != ' ' && *cp) | |
96 | ++cp; | |
97 | while (*cp == ' ') | |
98 | ++cp; | |
99 | } | |
100 | } | |
101 | ||
102 | if (limit < mem_limit) | |
103 | mem_limit = limit; | |
104 | } | |
105 | ||
106 | #define MAX_GAP (0x40000000UL >> PAGE_SHIFT) | |
107 | ||
108 | static void __init setup_bootmem(void) | |
109 | { | |
110 | unsigned long bootmap_size; | |
111 | unsigned long mem_max; | |
112 | unsigned long bootmap_pages; | |
113 | unsigned long bootmap_start_pfn; | |
114 | unsigned long bootmap_pfn; | |
115 | #ifndef CONFIG_DISCONTIGMEM | |
116 | physmem_range_t pmem_holes[MAX_PHYSMEM_RANGES - 1]; | |
117 | int npmem_holes; | |
118 | #endif | |
119 | int i, sysram_resource_count; | |
120 | ||
121 | disable_sr_hashing(); /* Turn off space register hashing */ | |
122 | ||
123 | /* | |
124 | * Sort the ranges. Since the number of ranges is typically | |
125 | * small, and performance is not an issue here, just do | |
126 | * a simple insertion sort. | |
127 | */ | |
128 | ||
129 | for (i = 1; i < npmem_ranges; i++) { | |
130 | int j; | |
131 | ||
132 | for (j = i; j > 0; j--) { | |
133 | unsigned long tmp; | |
134 | ||
135 | if (pmem_ranges[j-1].start_pfn < | |
136 | pmem_ranges[j].start_pfn) { | |
137 | ||
138 | break; | |
139 | } | |
140 | tmp = pmem_ranges[j-1].start_pfn; | |
141 | pmem_ranges[j-1].start_pfn = pmem_ranges[j].start_pfn; | |
142 | pmem_ranges[j].start_pfn = tmp; | |
143 | tmp = pmem_ranges[j-1].pages; | |
144 | pmem_ranges[j-1].pages = pmem_ranges[j].pages; | |
145 | pmem_ranges[j].pages = tmp; | |
146 | } | |
147 | } | |
148 | ||
149 | #ifndef CONFIG_DISCONTIGMEM | |
150 | /* | |
151 | * Throw out ranges that are too far apart (controlled by | |
152 | * MAX_GAP). | |
153 | */ | |
154 | ||
155 | for (i = 1; i < npmem_ranges; i++) { | |
156 | if (pmem_ranges[i].start_pfn - | |
157 | (pmem_ranges[i-1].start_pfn + | |
158 | pmem_ranges[i-1].pages) > MAX_GAP) { | |
159 | npmem_ranges = i; | |
160 | printk("Large gap in memory detected (%ld pages). " | |
161 | "Consider turning on CONFIG_DISCONTIGMEM\n", | |
162 | pmem_ranges[i].start_pfn - | |
163 | (pmem_ranges[i-1].start_pfn + | |
164 | pmem_ranges[i-1].pages)); | |
165 | break; | |
166 | } | |
167 | } | |
168 | #endif | |
169 | ||
170 | if (npmem_ranges > 1) { | |
171 | ||
172 | /* Print the memory ranges */ | |
173 | ||
174 | printk(KERN_INFO "Memory Ranges:\n"); | |
175 | ||
176 | for (i = 0; i < npmem_ranges; i++) { | |
177 | unsigned long start; | |
178 | unsigned long size; | |
179 | ||
180 | size = (pmem_ranges[i].pages << PAGE_SHIFT); | |
181 | start = (pmem_ranges[i].start_pfn << PAGE_SHIFT); | |
182 | printk(KERN_INFO "%2d) Start 0x%016lx End 0x%016lx Size %6ld MB\n", | |
183 | i,start, start + (size - 1), size >> 20); | |
184 | } | |
185 | } | |
186 | ||
187 | sysram_resource_count = npmem_ranges; | |
188 | for (i = 0; i < sysram_resource_count; i++) { | |
189 | struct resource *res = &sysram_resources[i]; | |
190 | res->name = "System RAM"; | |
191 | res->start = pmem_ranges[i].start_pfn << PAGE_SHIFT; | |
192 | res->end = res->start + (pmem_ranges[i].pages << PAGE_SHIFT)-1; | |
193 | res->flags = IORESOURCE_MEM | IORESOURCE_BUSY; | |
194 | request_resource(&iomem_resource, res); | |
195 | } | |
196 | ||
197 | /* | |
198 | * For 32 bit kernels we limit the amount of memory we can | |
199 | * support, in order to preserve enough kernel address space | |
200 | * for other purposes. For 64 bit kernels we don't normally | |
201 | * limit the memory, but this mechanism can be used to | |
202 | * artificially limit the amount of memory (and it is written | |
203 | * to work with multiple memory ranges). | |
204 | */ | |
205 | ||
206 | mem_limit_func(); /* check for "mem=" argument */ | |
207 | ||
208 | mem_max = 0; | |
209 | num_physpages = 0; | |
210 | for (i = 0; i < npmem_ranges; i++) { | |
211 | unsigned long rsize; | |
212 | ||
213 | rsize = pmem_ranges[i].pages << PAGE_SHIFT; | |
214 | if ((mem_max + rsize) > mem_limit) { | |
215 | printk(KERN_WARNING "Memory truncated to %ld MB\n", mem_limit >> 20); | |
216 | if (mem_max == mem_limit) | |
217 | npmem_ranges = i; | |
218 | else { | |
219 | pmem_ranges[i].pages = (mem_limit >> PAGE_SHIFT) | |
220 | - (mem_max >> PAGE_SHIFT); | |
221 | npmem_ranges = i + 1; | |
222 | mem_max = mem_limit; | |
223 | } | |
224 | num_physpages += pmem_ranges[i].pages; | |
225 | break; | |
226 | } | |
227 | num_physpages += pmem_ranges[i].pages; | |
228 | mem_max += rsize; | |
229 | } | |
230 | ||
231 | printk(KERN_INFO "Total Memory: %ld MB\n",mem_max >> 20); | |
232 | ||
233 | #ifndef CONFIG_DISCONTIGMEM | |
234 | /* Merge the ranges, keeping track of the holes */ | |
235 | ||
236 | { | |
237 | unsigned long end_pfn; | |
238 | unsigned long hole_pages; | |
239 | ||
240 | npmem_holes = 0; | |
241 | end_pfn = pmem_ranges[0].start_pfn + pmem_ranges[0].pages; | |
242 | for (i = 1; i < npmem_ranges; i++) { | |
243 | ||
244 | hole_pages = pmem_ranges[i].start_pfn - end_pfn; | |
245 | if (hole_pages) { | |
246 | pmem_holes[npmem_holes].start_pfn = end_pfn; | |
247 | pmem_holes[npmem_holes++].pages = hole_pages; | |
248 | end_pfn += hole_pages; | |
249 | } | |
250 | end_pfn += pmem_ranges[i].pages; | |
251 | } | |
252 | ||
253 | pmem_ranges[0].pages = end_pfn - pmem_ranges[0].start_pfn; | |
254 | npmem_ranges = 1; | |
255 | } | |
256 | #endif | |
257 | ||
258 | bootmap_pages = 0; | |
259 | for (i = 0; i < npmem_ranges; i++) | |
260 | bootmap_pages += bootmem_bootmap_pages(pmem_ranges[i].pages); | |
261 | ||
262 | bootmap_start_pfn = PAGE_ALIGN(__pa((unsigned long) &_end)) >> PAGE_SHIFT; | |
263 | ||
264 | #ifdef CONFIG_DISCONTIGMEM | |
265 | for (i = 0; i < MAX_PHYSMEM_RANGES; i++) { | |
266 | memset(NODE_DATA(i), 0, sizeof(pg_data_t)); | |
267 | NODE_DATA(i)->bdata = &bmem_data[i]; | |
268 | } | |
269 | memset(pfnnid_map, 0xff, sizeof(pfnnid_map)); | |
270 | ||
271 | for (i = 0; i < npmem_ranges; i++) | |
272 | node_set_online(i); | |
273 | #endif | |
274 | ||
275 | /* | |
276 | * Initialize and free the full range of memory in each range. | |
277 | * Note that the only writing these routines do are to the bootmap, | |
278 | * and we've made sure to locate the bootmap properly so that they | |
279 | * won't be writing over anything important. | |
280 | */ | |
281 | ||
282 | bootmap_pfn = bootmap_start_pfn; | |
283 | max_pfn = 0; | |
284 | for (i = 0; i < npmem_ranges; i++) { | |
285 | unsigned long start_pfn; | |
286 | unsigned long npages; | |
287 | ||
288 | start_pfn = pmem_ranges[i].start_pfn; | |
289 | npages = pmem_ranges[i].pages; | |
290 | ||
291 | bootmap_size = init_bootmem_node(NODE_DATA(i), | |
292 | bootmap_pfn, | |
293 | start_pfn, | |
294 | (start_pfn + npages) ); | |
295 | free_bootmem_node(NODE_DATA(i), | |
296 | (start_pfn << PAGE_SHIFT), | |
297 | (npages << PAGE_SHIFT) ); | |
298 | bootmap_pfn += (bootmap_size + PAGE_SIZE - 1) >> PAGE_SHIFT; | |
299 | if ((start_pfn + npages) > max_pfn) | |
300 | max_pfn = start_pfn + npages; | |
301 | } | |
302 | ||
303 | if ((bootmap_pfn - bootmap_start_pfn) != bootmap_pages) { | |
304 | printk(KERN_WARNING "WARNING! bootmap sizing is messed up!\n"); | |
305 | BUG(); | |
306 | } | |
307 | ||
308 | /* reserve PAGE0 pdc memory, kernel text/data/bss & bootmap */ | |
309 | ||
310 | #define PDC_CONSOLE_IO_IODC_SIZE 32768 | |
311 | ||
312 | reserve_bootmem_node(NODE_DATA(0), 0UL, | |
313 | (unsigned long)(PAGE0->mem_free + PDC_CONSOLE_IO_IODC_SIZE)); | |
314 | reserve_bootmem_node(NODE_DATA(0),__pa((unsigned long)&_text), | |
315 | (unsigned long)(&_end - &_text)); | |
316 | reserve_bootmem_node(NODE_DATA(0), (bootmap_start_pfn << PAGE_SHIFT), | |
317 | ((bootmap_pfn - bootmap_start_pfn) << PAGE_SHIFT)); | |
318 | ||
319 | #ifndef CONFIG_DISCONTIGMEM | |
320 | ||
321 | /* reserve the holes */ | |
322 | ||
323 | for (i = 0; i < npmem_holes; i++) { | |
324 | reserve_bootmem_node(NODE_DATA(0), | |
325 | (pmem_holes[i].start_pfn << PAGE_SHIFT), | |
326 | (pmem_holes[i].pages << PAGE_SHIFT)); | |
327 | } | |
328 | #endif | |
329 | ||
330 | #ifdef CONFIG_BLK_DEV_INITRD | |
331 | if (initrd_start) { | |
332 | printk(KERN_INFO "initrd: %08lx-%08lx\n", initrd_start, initrd_end); | |
333 | if (__pa(initrd_start) < mem_max) { | |
334 | unsigned long initrd_reserve; | |
335 | ||
336 | if (__pa(initrd_end) > mem_max) { | |
337 | initrd_reserve = mem_max - __pa(initrd_start); | |
338 | } else { | |
339 | initrd_reserve = initrd_end - initrd_start; | |
340 | } | |
341 | initrd_below_start_ok = 1; | |
342 | printk(KERN_INFO "initrd: reserving %08lx-%08lx (mem_max %08lx)\n", __pa(initrd_start), __pa(initrd_start) + initrd_reserve, mem_max); | |
343 | ||
344 | reserve_bootmem_node(NODE_DATA(0),__pa(initrd_start), initrd_reserve); | |
345 | } | |
346 | } | |
347 | #endif | |
348 | ||
349 | data_resource.start = virt_to_phys(&data_start); | |
350 | data_resource.end = virt_to_phys(&_end)-1; | |
351 | code_resource.start = virt_to_phys(&_text); | |
352 | code_resource.end = virt_to_phys(&data_start)-1; | |
353 | ||
354 | /* We don't know which region the kernel will be in, so try | |
355 | * all of them. | |
356 | */ | |
357 | for (i = 0; i < sysram_resource_count; i++) { | |
358 | struct resource *res = &sysram_resources[i]; | |
359 | request_resource(res, &code_resource); | |
360 | request_resource(res, &data_resource); | |
361 | } | |
362 | request_resource(&sysram_resources[0], &pdcdata_resource); | |
363 | } | |
364 | ||
365 | void free_initmem(void) | |
366 | { | |
367 | /* FIXME: */ | |
368 | #if 0 | |
369 | printk(KERN_INFO "NOT FREEING INITMEM (%dk)\n", | |
370 | (&__init_end - &__init_begin) >> 10); | |
371 | return; | |
372 | #else | |
373 | unsigned long addr; | |
374 | ||
375 | printk(KERN_INFO "Freeing unused kernel memory: "); | |
376 | ||
377 | #if 1 | |
378 | /* Attempt to catch anyone trying to execute code here | |
379 | * by filling the page with BRK insns. | |
380 | * | |
381 | * If we disable interrupts for all CPUs, then IPI stops working. | |
382 | * Kinda breaks the global cache flushing. | |
383 | */ | |
384 | local_irq_disable(); | |
385 | ||
386 | memset(&__init_begin, 0x00, | |
387 | (unsigned long)&__init_end - (unsigned long)&__init_begin); | |
388 | ||
389 | flush_data_cache(); | |
390 | asm volatile("sync" : : ); | |
391 | flush_icache_range((unsigned long)&__init_begin, (unsigned long)&__init_end); | |
392 | asm volatile("sync" : : ); | |
393 | ||
394 | local_irq_enable(); | |
395 | #endif | |
396 | ||
397 | addr = (unsigned long)(&__init_begin); | |
398 | for (; addr < (unsigned long)(&__init_end); addr += PAGE_SIZE) { | |
399 | ClearPageReserved(virt_to_page(addr)); | |
400 | set_page_count(virt_to_page(addr), 1); | |
401 | free_page(addr); | |
402 | num_physpages++; | |
403 | totalram_pages++; | |
404 | } | |
405 | ||
406 | /* set up a new led state on systems shipped LED State panel */ | |
407 | pdc_chassis_send_status(PDC_CHASSIS_DIRECT_BCOMPLETE); | |
408 | ||
409 | printk("%luk freed\n", (unsigned long)(&__init_end - &__init_begin) >> 10); | |
410 | #endif | |
411 | } | |
412 | ||
413 | /* | |
414 | * Just an arbitrary offset to serve as a "hole" between mapping areas | |
415 | * (between top of physical memory and a potential pcxl dma mapping | |
416 | * area, and below the vmalloc mapping area). | |
417 | * | |
418 | * The current 32K value just means that there will be a 32K "hole" | |
419 | * between mapping areas. That means that any out-of-bounds memory | |
420 | * accesses will hopefully be caught. The vmalloc() routines leaves | |
421 | * a hole of 4kB between each vmalloced area for the same reason. | |
422 | */ | |
423 | ||
424 | /* Leave room for gateway page expansion */ | |
425 | #if KERNEL_MAP_START < GATEWAY_PAGE_SIZE | |
426 | #error KERNEL_MAP_START is in gateway reserved region | |
427 | #endif | |
428 | #define MAP_START (KERNEL_MAP_START) | |
429 | ||
430 | #define VM_MAP_OFFSET (32*1024) | |
431 | #define SET_MAP_OFFSET(x) ((void *)(((unsigned long)(x) + VM_MAP_OFFSET) \ | |
432 | & ~(VM_MAP_OFFSET-1))) | |
433 | ||
434 | void *vmalloc_start; | |
435 | EXPORT_SYMBOL(vmalloc_start); | |
436 | ||
437 | #ifdef CONFIG_PA11 | |
438 | unsigned long pcxl_dma_start; | |
439 | #endif | |
440 | ||
441 | void __init mem_init(void) | |
442 | { | |
443 | high_memory = __va((max_pfn << PAGE_SHIFT)); | |
444 | ||
445 | #ifndef CONFIG_DISCONTIGMEM | |
446 | max_mapnr = page_to_pfn(virt_to_page(high_memory - 1)) + 1; | |
447 | totalram_pages += free_all_bootmem(); | |
448 | #else | |
449 | { | |
450 | int i; | |
451 | ||
452 | for (i = 0; i < npmem_ranges; i++) | |
453 | totalram_pages += free_all_bootmem_node(NODE_DATA(i)); | |
454 | } | |
455 | #endif | |
456 | ||
457 | printk(KERN_INFO "Memory: %luk available\n", num_physpages << (PAGE_SHIFT-10)); | |
458 | ||
459 | #ifdef CONFIG_PA11 | |
460 | if (hppa_dma_ops == &pcxl_dma_ops) { | |
461 | pcxl_dma_start = (unsigned long)SET_MAP_OFFSET(MAP_START); | |
462 | vmalloc_start = SET_MAP_OFFSET(pcxl_dma_start + PCXL_DMA_MAP_SIZE); | |
463 | } else { | |
464 | pcxl_dma_start = 0; | |
465 | vmalloc_start = SET_MAP_OFFSET(MAP_START); | |
466 | } | |
467 | #else | |
468 | vmalloc_start = SET_MAP_OFFSET(MAP_START); | |
469 | #endif | |
470 | ||
471 | } | |
472 | ||
473 | int do_check_pgt_cache(int low, int high) | |
474 | { | |
475 | return 0; | |
476 | } | |
477 | ||
478 | unsigned long *empty_zero_page; | |
479 | ||
480 | void show_mem(void) | |
481 | { | |
482 | int i,free = 0,total = 0,reserved = 0; | |
483 | int shared = 0, cached = 0; | |
484 | ||
485 | printk(KERN_INFO "Mem-info:\n"); | |
486 | show_free_areas(); | |
487 | printk(KERN_INFO "Free swap: %6ldkB\n", | |
488 | nr_swap_pages<<(PAGE_SHIFT-10)); | |
489 | #ifndef CONFIG_DISCONTIGMEM | |
490 | i = max_mapnr; | |
491 | while (i-- > 0) { | |
492 | total++; | |
493 | if (PageReserved(mem_map+i)) | |
494 | reserved++; | |
495 | else if (PageSwapCache(mem_map+i)) | |
496 | cached++; | |
497 | else if (!page_count(&mem_map[i])) | |
498 | free++; | |
499 | else | |
500 | shared += page_count(&mem_map[i]) - 1; | |
501 | } | |
502 | #else | |
503 | for (i = 0; i < npmem_ranges; i++) { | |
504 | int j; | |
505 | ||
506 | for (j = node_start_pfn(i); j < node_end_pfn(i); j++) { | |
507 | struct page *p; | |
508 | ||
509 | p = node_mem_map(i) + j - node_start_pfn(i); | |
510 | ||
511 | total++; | |
512 | if (PageReserved(p)) | |
513 | reserved++; | |
514 | else if (PageSwapCache(p)) | |
515 | cached++; | |
516 | else if (!page_count(p)) | |
517 | free++; | |
518 | else | |
519 | shared += page_count(p) - 1; | |
520 | } | |
521 | } | |
522 | #endif | |
523 | printk(KERN_INFO "%d pages of RAM\n", total); | |
524 | printk(KERN_INFO "%d reserved pages\n", reserved); | |
525 | printk(KERN_INFO "%d pages shared\n", shared); | |
526 | printk(KERN_INFO "%d pages swap cached\n", cached); | |
527 | ||
528 | ||
529 | #ifdef CONFIG_DISCONTIGMEM | |
530 | { | |
531 | struct zonelist *zl; | |
532 | int i, j, k; | |
533 | ||
534 | for (i = 0; i < npmem_ranges; i++) { | |
535 | for (j = 0; j < MAX_NR_ZONES; j++) { | |
536 | zl = NODE_DATA(i)->node_zonelists + j; | |
537 | ||
538 | printk("Zone list for zone %d on node %d: ", j, i); | |
539 | for (k = 0; zl->zones[k] != NULL; k++) | |
540 | printk("[%d/%s] ", zl->zones[k]->zone_pgdat->node_id, zl->zones[k]->name); | |
541 | printk("\n"); | |
542 | } | |
543 | } | |
544 | } | |
545 | #endif | |
546 | } | |
547 | ||
548 | ||
549 | static void __init map_pages(unsigned long start_vaddr, unsigned long start_paddr, unsigned long size, pgprot_t pgprot) | |
550 | { | |
551 | pgd_t *pg_dir; | |
552 | pmd_t *pmd; | |
553 | pte_t *pg_table; | |
554 | unsigned long end_paddr; | |
555 | unsigned long start_pmd; | |
556 | unsigned long start_pte; | |
557 | unsigned long tmp1; | |
558 | unsigned long tmp2; | |
559 | unsigned long address; | |
560 | unsigned long ro_start; | |
561 | unsigned long ro_end; | |
562 | unsigned long fv_addr; | |
563 | unsigned long gw_addr; | |
564 | extern const unsigned long fault_vector_20; | |
565 | extern void * const linux_gateway_page; | |
566 | ||
567 | ro_start = __pa((unsigned long)&_text); | |
568 | ro_end = __pa((unsigned long)&data_start); | |
569 | fv_addr = __pa((unsigned long)&fault_vector_20) & PAGE_MASK; | |
570 | gw_addr = __pa((unsigned long)&linux_gateway_page) & PAGE_MASK; | |
571 | ||
572 | end_paddr = start_paddr + size; | |
573 | ||
574 | pg_dir = pgd_offset_k(start_vaddr); | |
575 | ||
576 | #if PTRS_PER_PMD == 1 | |
577 | start_pmd = 0; | |
578 | #else | |
579 | start_pmd = ((start_vaddr >> PMD_SHIFT) & (PTRS_PER_PMD - 1)); | |
580 | #endif | |
581 | start_pte = ((start_vaddr >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)); | |
582 | ||
583 | address = start_paddr; | |
584 | while (address < end_paddr) { | |
585 | #if PTRS_PER_PMD == 1 | |
586 | pmd = (pmd_t *)__pa(pg_dir); | |
587 | #else | |
588 | pmd = (pmd_t *)pgd_address(*pg_dir); | |
589 | ||
590 | /* | |
591 | * pmd is physical at this point | |
592 | */ | |
593 | ||
594 | if (!pmd) { | |
595 | pmd = (pmd_t *) alloc_bootmem_low_pages_node(NODE_DATA(0),PAGE_SIZE << PMD_ORDER); | |
596 | pmd = (pmd_t *) __pa(pmd); | |
597 | } | |
598 | ||
599 | pgd_populate(NULL, pg_dir, __va(pmd)); | |
600 | #endif | |
601 | pg_dir++; | |
602 | ||
603 | /* now change pmd to kernel virtual addresses */ | |
604 | ||
605 | pmd = (pmd_t *)__va(pmd) + start_pmd; | |
606 | for (tmp1 = start_pmd; tmp1 < PTRS_PER_PMD; tmp1++,pmd++) { | |
607 | ||
608 | /* | |
609 | * pg_table is physical at this point | |
610 | */ | |
611 | ||
612 | pg_table = (pte_t *)pmd_address(*pmd); | |
613 | if (!pg_table) { | |
614 | pg_table = (pte_t *) | |
615 | alloc_bootmem_low_pages_node(NODE_DATA(0),PAGE_SIZE); | |
616 | pg_table = (pte_t *) __pa(pg_table); | |
617 | } | |
618 | ||
619 | pmd_populate_kernel(NULL, pmd, __va(pg_table)); | |
620 | ||
621 | /* now change pg_table to kernel virtual addresses */ | |
622 | ||
623 | pg_table = (pte_t *) __va(pg_table) + start_pte; | |
624 | for (tmp2 = start_pte; tmp2 < PTRS_PER_PTE; tmp2++,pg_table++) { | |
625 | pte_t pte; | |
626 | ||
627 | /* | |
628 | * Map the fault vector writable so we can | |
629 | * write the HPMC checksum. | |
630 | */ | |
631 | if (address >= ro_start && address < ro_end | |
632 | && address != fv_addr | |
633 | && address != gw_addr) | |
634 | pte = __mk_pte(address, PAGE_KERNEL_RO); | |
635 | else | |
636 | pte = __mk_pte(address, pgprot); | |
637 | ||
638 | if (address >= end_paddr) | |
639 | pte_val(pte) = 0; | |
640 | ||
641 | set_pte(pg_table, pte); | |
642 | ||
643 | address += PAGE_SIZE; | |
644 | } | |
645 | start_pte = 0; | |
646 | ||
647 | if (address >= end_paddr) | |
648 | break; | |
649 | } | |
650 | start_pmd = 0; | |
651 | } | |
652 | } | |
653 | ||
654 | /* | |
655 | * pagetable_init() sets up the page tables | |
656 | * | |
657 | * Note that gateway_init() places the Linux gateway page at page 0. | |
658 | * Since gateway pages cannot be dereferenced this has the desirable | |
659 | * side effect of trapping those pesky NULL-reference errors in the | |
660 | * kernel. | |
661 | */ | |
662 | static void __init pagetable_init(void) | |
663 | { | |
664 | int range; | |
665 | ||
666 | /* Map each physical memory range to its kernel vaddr */ | |
667 | ||
668 | for (range = 0; range < npmem_ranges; range++) { | |
669 | unsigned long start_paddr; | |
670 | unsigned long end_paddr; | |
671 | unsigned long size; | |
672 | ||
673 | start_paddr = pmem_ranges[range].start_pfn << PAGE_SHIFT; | |
674 | end_paddr = start_paddr + (pmem_ranges[range].pages << PAGE_SHIFT); | |
675 | size = pmem_ranges[range].pages << PAGE_SHIFT; | |
676 | ||
677 | map_pages((unsigned long)__va(start_paddr), start_paddr, | |
678 | size, PAGE_KERNEL); | |
679 | } | |
680 | ||
681 | #ifdef CONFIG_BLK_DEV_INITRD | |
682 | if (initrd_end && initrd_end > mem_limit) { | |
683 | printk("initrd: mapping %08lx-%08lx\n", initrd_start, initrd_end); | |
684 | map_pages(initrd_start, __pa(initrd_start), | |
685 | initrd_end - initrd_start, PAGE_KERNEL); | |
686 | } | |
687 | #endif | |
688 | ||
689 | empty_zero_page = alloc_bootmem_pages(PAGE_SIZE); | |
690 | memset(empty_zero_page, 0, PAGE_SIZE); | |
691 | } | |
692 | ||
693 | static void __init gateway_init(void) | |
694 | { | |
695 | unsigned long linux_gateway_page_addr; | |
696 | /* FIXME: This is 'const' in order to trick the compiler | |
697 | into not treating it as DP-relative data. */ | |
698 | extern void * const linux_gateway_page; | |
699 | ||
700 | linux_gateway_page_addr = LINUX_GATEWAY_ADDR & PAGE_MASK; | |
701 | ||
702 | /* | |
703 | * Setup Linux Gateway page. | |
704 | * | |
705 | * The Linux gateway page will reside in kernel space (on virtual | |
706 | * page 0), so it doesn't need to be aliased into user space. | |
707 | */ | |
708 | ||
709 | map_pages(linux_gateway_page_addr, __pa(&linux_gateway_page), | |
710 | PAGE_SIZE, PAGE_GATEWAY); | |
711 | } | |
712 | ||
713 | #ifdef CONFIG_HPUX | |
714 | void | |
715 | map_hpux_gateway_page(struct task_struct *tsk, struct mm_struct *mm) | |
716 | { | |
717 | pgd_t *pg_dir; | |
718 | pmd_t *pmd; | |
719 | pte_t *pg_table; | |
720 | unsigned long start_pmd; | |
721 | unsigned long start_pte; | |
722 | unsigned long address; | |
723 | unsigned long hpux_gw_page_addr; | |
724 | /* FIXME: This is 'const' in order to trick the compiler | |
725 | into not treating it as DP-relative data. */ | |
726 | extern void * const hpux_gateway_page; | |
727 | ||
728 | hpux_gw_page_addr = HPUX_GATEWAY_ADDR & PAGE_MASK; | |
729 | ||
730 | /* | |
731 | * Setup HP-UX Gateway page. | |
732 | * | |
733 | * The HP-UX gateway page resides in the user address space, | |
734 | * so it needs to be aliased into each process. | |
735 | */ | |
736 | ||
737 | pg_dir = pgd_offset(mm,hpux_gw_page_addr); | |
738 | ||
739 | #if PTRS_PER_PMD == 1 | |
740 | start_pmd = 0; | |
741 | #else | |
742 | start_pmd = ((hpux_gw_page_addr >> PMD_SHIFT) & (PTRS_PER_PMD - 1)); | |
743 | #endif | |
744 | start_pte = ((hpux_gw_page_addr >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)); | |
745 | ||
746 | address = __pa(&hpux_gateway_page); | |
747 | #if PTRS_PER_PMD == 1 | |
748 | pmd = (pmd_t *)__pa(pg_dir); | |
749 | #else | |
750 | pmd = (pmd_t *) pgd_address(*pg_dir); | |
751 | ||
752 | /* | |
753 | * pmd is physical at this point | |
754 | */ | |
755 | ||
756 | if (!pmd) { | |
757 | pmd = (pmd_t *) get_zeroed_page(GFP_KERNEL); | |
758 | pmd = (pmd_t *) __pa(pmd); | |
759 | } | |
760 | ||
761 | __pgd_val_set(*pg_dir, PxD_FLAG_PRESENT | PxD_FLAG_VALID | (unsigned long) pmd); | |
762 | #endif | |
763 | /* now change pmd to kernel virtual addresses */ | |
764 | ||
765 | pmd = (pmd_t *)__va(pmd) + start_pmd; | |
766 | ||
767 | /* | |
768 | * pg_table is physical at this point | |
769 | */ | |
770 | ||
771 | pg_table = (pte_t *) pmd_address(*pmd); | |
772 | if (!pg_table) | |
773 | pg_table = (pte_t *) __pa(get_zeroed_page(GFP_KERNEL)); | |
774 | ||
775 | __pmd_val_set(*pmd, PxD_FLAG_PRESENT | PxD_FLAG_VALID | (unsigned long) pg_table); | |
776 | ||
777 | /* now change pg_table to kernel virtual addresses */ | |
778 | ||
779 | pg_table = (pte_t *) __va(pg_table) + start_pte; | |
780 | set_pte(pg_table, __mk_pte(address, PAGE_GATEWAY)); | |
781 | } | |
782 | EXPORT_SYMBOL(map_hpux_gateway_page); | |
783 | #endif | |
784 | ||
785 | extern void flush_tlb_all_local(void); | |
786 | ||
787 | void __init paging_init(void) | |
788 | { | |
789 | int i; | |
790 | ||
791 | setup_bootmem(); | |
792 | pagetable_init(); | |
793 | gateway_init(); | |
794 | flush_cache_all_local(); /* start with known state */ | |
795 | flush_tlb_all_local(); | |
796 | ||
797 | for (i = 0; i < npmem_ranges; i++) { | |
798 | unsigned long zones_size[MAX_NR_ZONES] = { 0, 0, 0 }; | |
799 | ||
800 | /* We have an IOMMU, so all memory can go into a single | |
801 | ZONE_DMA zone. */ | |
802 | zones_size[ZONE_DMA] = pmem_ranges[i].pages; | |
803 | ||
804 | #ifdef CONFIG_DISCONTIGMEM | |
805 | /* Need to initialize the pfnnid_map before we can initialize | |
806 | the zone */ | |
807 | { | |
808 | int j; | |
809 | for (j = (pmem_ranges[i].start_pfn >> PFNNID_SHIFT); | |
810 | j <= ((pmem_ranges[i].start_pfn + pmem_ranges[i].pages) >> PFNNID_SHIFT); | |
811 | j++) { | |
812 | pfnnid_map[j] = i; | |
813 | } | |
814 | } | |
815 | #endif | |
816 | ||
817 | free_area_init_node(i, NODE_DATA(i), zones_size, | |
818 | pmem_ranges[i].start_pfn, NULL); | |
819 | } | |
820 | } | |
821 | ||
822 | #ifdef CONFIG_PA20 | |
823 | ||
824 | /* | |
825 | * Currently, all PA20 chips have 18 bit protection id's, which is the | |
826 | * limiting factor (space ids are 32 bits). | |
827 | */ | |
828 | ||
829 | #define NR_SPACE_IDS 262144 | |
830 | ||
831 | #else | |
832 | ||
833 | /* | |
834 | * Currently we have a one-to-one relationship between space id's and | |
835 | * protection id's. Older parisc chips (PCXS, PCXT, PCXL, PCXL2) only | |
836 | * support 15 bit protection id's, so that is the limiting factor. | |
837 | * PCXT' has 18 bit protection id's, but only 16 bit spaceids, so it's | |
838 | * probably not worth the effort for a special case here. | |
839 | */ | |
840 | ||
841 | #define NR_SPACE_IDS 32768 | |
842 | ||
843 | #endif /* !CONFIG_PA20 */ | |
844 | ||
845 | #define RECYCLE_THRESHOLD (NR_SPACE_IDS / 2) | |
846 | #define SID_ARRAY_SIZE (NR_SPACE_IDS / (8 * sizeof(long))) | |
847 | ||
848 | static unsigned long space_id[SID_ARRAY_SIZE] = { 1 }; /* disallow space 0 */ | |
849 | static unsigned long dirty_space_id[SID_ARRAY_SIZE]; | |
850 | static unsigned long space_id_index; | |
851 | static unsigned long free_space_ids = NR_SPACE_IDS - 1; | |
852 | static unsigned long dirty_space_ids = 0; | |
853 | ||
854 | static DEFINE_SPINLOCK(sid_lock); | |
855 | ||
856 | unsigned long alloc_sid(void) | |
857 | { | |
858 | unsigned long index; | |
859 | ||
860 | spin_lock(&sid_lock); | |
861 | ||
862 | if (free_space_ids == 0) { | |
863 | if (dirty_space_ids != 0) { | |
864 | spin_unlock(&sid_lock); | |
865 | flush_tlb_all(); /* flush_tlb_all() calls recycle_sids() */ | |
866 | spin_lock(&sid_lock); | |
867 | } | |
868 | if (free_space_ids == 0) | |
869 | BUG(); | |
870 | } | |
871 | ||
872 | free_space_ids--; | |
873 | ||
874 | index = find_next_zero_bit(space_id, NR_SPACE_IDS, space_id_index); | |
875 | space_id[index >> SHIFT_PER_LONG] |= (1L << (index & (BITS_PER_LONG - 1))); | |
876 | space_id_index = index; | |
877 | ||
878 | spin_unlock(&sid_lock); | |
879 | ||
880 | return index << SPACEID_SHIFT; | |
881 | } | |
882 | ||
883 | void free_sid(unsigned long spaceid) | |
884 | { | |
885 | unsigned long index = spaceid >> SPACEID_SHIFT; | |
886 | unsigned long *dirty_space_offset; | |
887 | ||
888 | dirty_space_offset = dirty_space_id + (index >> SHIFT_PER_LONG); | |
889 | index &= (BITS_PER_LONG - 1); | |
890 | ||
891 | spin_lock(&sid_lock); | |
892 | ||
893 | if (*dirty_space_offset & (1L << index)) | |
894 | BUG(); /* attempt to free space id twice */ | |
895 | ||
896 | *dirty_space_offset |= (1L << index); | |
897 | dirty_space_ids++; | |
898 | ||
899 | spin_unlock(&sid_lock); | |
900 | } | |
901 | ||
902 | ||
903 | #ifdef CONFIG_SMP | |
904 | static void get_dirty_sids(unsigned long *ndirtyptr,unsigned long *dirty_array) | |
905 | { | |
906 | int i; | |
907 | ||
908 | /* NOTE: sid_lock must be held upon entry */ | |
909 | ||
910 | *ndirtyptr = dirty_space_ids; | |
911 | if (dirty_space_ids != 0) { | |
912 | for (i = 0; i < SID_ARRAY_SIZE; i++) { | |
913 | dirty_array[i] = dirty_space_id[i]; | |
914 | dirty_space_id[i] = 0; | |
915 | } | |
916 | dirty_space_ids = 0; | |
917 | } | |
918 | ||
919 | return; | |
920 | } | |
921 | ||
922 | static void recycle_sids(unsigned long ndirty,unsigned long *dirty_array) | |
923 | { | |
924 | int i; | |
925 | ||
926 | /* NOTE: sid_lock must be held upon entry */ | |
927 | ||
928 | if (ndirty != 0) { | |
929 | for (i = 0; i < SID_ARRAY_SIZE; i++) { | |
930 | space_id[i] ^= dirty_array[i]; | |
931 | } | |
932 | ||
933 | free_space_ids += ndirty; | |
934 | space_id_index = 0; | |
935 | } | |
936 | } | |
937 | ||
938 | #else /* CONFIG_SMP */ | |
939 | ||
940 | static void recycle_sids(void) | |
941 | { | |
942 | int i; | |
943 | ||
944 | /* NOTE: sid_lock must be held upon entry */ | |
945 | ||
946 | if (dirty_space_ids != 0) { | |
947 | for (i = 0; i < SID_ARRAY_SIZE; i++) { | |
948 | space_id[i] ^= dirty_space_id[i]; | |
949 | dirty_space_id[i] = 0; | |
950 | } | |
951 | ||
952 | free_space_ids += dirty_space_ids; | |
953 | dirty_space_ids = 0; | |
954 | space_id_index = 0; | |
955 | } | |
956 | } | |
957 | #endif | |
958 | ||
959 | /* | |
960 | * flush_tlb_all() calls recycle_sids(), since whenever the entire tlb is | |
961 | * purged, we can safely reuse the space ids that were released but | |
962 | * not flushed from the tlb. | |
963 | */ | |
964 | ||
965 | #ifdef CONFIG_SMP | |
966 | ||
967 | static unsigned long recycle_ndirty; | |
968 | static unsigned long recycle_dirty_array[SID_ARRAY_SIZE]; | |
969 | static unsigned int recycle_inuse = 0; | |
970 | ||
971 | void flush_tlb_all(void) | |
972 | { | |
973 | int do_recycle; | |
974 | ||
975 | do_recycle = 0; | |
976 | spin_lock(&sid_lock); | |
977 | if (dirty_space_ids > RECYCLE_THRESHOLD) { | |
978 | if (recycle_inuse) { | |
979 | BUG(); /* FIXME: Use a semaphore/wait queue here */ | |
980 | } | |
981 | get_dirty_sids(&recycle_ndirty,recycle_dirty_array); | |
982 | recycle_inuse++; | |
983 | do_recycle++; | |
984 | } | |
985 | spin_unlock(&sid_lock); | |
986 | on_each_cpu((void (*)(void *))flush_tlb_all_local, NULL, 1, 1); | |
987 | if (do_recycle) { | |
988 | spin_lock(&sid_lock); | |
989 | recycle_sids(recycle_ndirty,recycle_dirty_array); | |
990 | recycle_inuse = 0; | |
991 | spin_unlock(&sid_lock); | |
992 | } | |
993 | } | |
994 | #else | |
995 | void flush_tlb_all(void) | |
996 | { | |
997 | spin_lock(&sid_lock); | |
998 | flush_tlb_all_local(); | |
999 | recycle_sids(); | |
1000 | spin_unlock(&sid_lock); | |
1001 | } | |
1002 | #endif | |
1003 | ||
1004 | #ifdef CONFIG_BLK_DEV_INITRD | |
1005 | void free_initrd_mem(unsigned long start, unsigned long end) | |
1006 | { | |
1007 | #if 0 | |
1008 | if (start < end) | |
1009 | printk(KERN_INFO "Freeing initrd memory: %ldk freed\n", (end - start) >> 10); | |
1010 | for (; start < end; start += PAGE_SIZE) { | |
1011 | ClearPageReserved(virt_to_page(start)); | |
1012 | set_page_count(virt_to_page(start), 1); | |
1013 | free_page(start); | |
1014 | num_physpages++; | |
1015 | totalram_pages++; | |
1016 | } | |
1017 | #endif | |
1018 | } | |
1019 | #endif |