]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * PowerPC version | |
3 | * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) | |
4 | * | |
5 | * Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au) | |
6 | * and Cort Dougan (PReP) (cort@cs.nmt.edu) | |
7 | * Copyright (C) 1996 Paul Mackerras | |
8 | * Amiga/APUS changes by Jesper Skov (jskov@cygnus.co.uk). | |
9 | * | |
10 | * Derived from "arch/i386/mm/init.c" | |
11 | * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds | |
12 | * | |
13 | * Dave Engebretsen <engebret@us.ibm.com> | |
14 | * Rework for PPC64 port. | |
15 | * | |
16 | * This program is free software; you can redistribute it and/or | |
17 | * modify it under the terms of the GNU General Public License | |
18 | * as published by the Free Software Foundation; either version | |
19 | * 2 of the License, or (at your option) any later version. | |
20 | * | |
21 | */ | |
22 | ||
23 | #include <linux/config.h> | |
24 | #include <linux/signal.h> | |
25 | #include <linux/sched.h> | |
26 | #include <linux/kernel.h> | |
27 | #include <linux/errno.h> | |
28 | #include <linux/string.h> | |
29 | #include <linux/types.h> | |
30 | #include <linux/mman.h> | |
31 | #include <linux/mm.h> | |
32 | #include <linux/swap.h> | |
33 | #include <linux/stddef.h> | |
34 | #include <linux/vmalloc.h> | |
35 | #include <linux/init.h> | |
36 | #include <linux/delay.h> | |
37 | #include <linux/bootmem.h> | |
38 | #include <linux/highmem.h> | |
39 | #include <linux/idr.h> | |
40 | #include <linux/nodemask.h> | |
41 | #include <linux/module.h> | |
42 | ||
43 | #include <asm/pgalloc.h> | |
44 | #include <asm/page.h> | |
1da177e4 LT |
45 | #include <asm/prom.h> |
46 | #include <asm/lmb.h> | |
47 | #include <asm/rtas.h> | |
48 | #include <asm/io.h> | |
49 | #include <asm/mmu_context.h> | |
50 | #include <asm/pgtable.h> | |
51 | #include <asm/mmu.h> | |
52 | #include <asm/uaccess.h> | |
53 | #include <asm/smp.h> | |
54 | #include <asm/machdep.h> | |
55 | #include <asm/tlb.h> | |
56 | #include <asm/eeh.h> | |
57 | #include <asm/processor.h> | |
58 | #include <asm/mmzone.h> | |
59 | #include <asm/cputable.h> | |
60 | #include <asm/ppcdebug.h> | |
61 | #include <asm/sections.h> | |
62 | #include <asm/system.h> | |
63 | #include <asm/iommu.h> | |
64 | #include <asm/abs_addr.h> | |
65 | #include <asm/vdso.h> | |
1f8d419e | 66 | #include <asm/imalloc.h> |
1da177e4 | 67 | |
e28f7faf DG |
68 | #if PGTABLE_RANGE > USER_VSID_RANGE |
69 | #warning Limited user VSID range means pagetable space is wasted | |
70 | #endif | |
71 | ||
72 | #if (TASK_SIZE_USER64 < PGTABLE_RANGE) && (TASK_SIZE_USER64 < USER_VSID_RANGE) | |
73 | #warning TASK_SIZE is smaller than it needs to be. | |
74 | #endif | |
75 | ||
1da177e4 LT |
76 | int mem_init_done; |
77 | unsigned long ioremap_bot = IMALLOC_BASE; | |
78 | static unsigned long phbs_io_bot = PHBS_IO_BASE; | |
79 | ||
80 | extern pgd_t swapper_pg_dir[]; | |
81 | extern struct task_struct *current_set[NR_CPUS]; | |
82 | ||
1da177e4 LT |
83 | unsigned long klimit = (unsigned long)_end; |
84 | ||
85 | unsigned long _SDR1=0; | |
86 | unsigned long _ASR=0; | |
87 | ||
88 | /* max amount of RAM to use */ | |
89 | unsigned long __max_memory; | |
90 | ||
91 | /* info on what we think the IO hole is */ | |
92 | unsigned long io_hole_start; | |
93 | unsigned long io_hole_size; | |
94 | ||
95 | void show_mem(void) | |
96 | { | |
97 | unsigned long total = 0, reserved = 0; | |
98 | unsigned long shared = 0, cached = 0; | |
99 | struct page *page; | |
100 | pg_data_t *pgdat; | |
101 | unsigned long i; | |
102 | ||
103 | printk("Mem-info:\n"); | |
104 | show_free_areas(); | |
105 | printk("Free swap: %6ldkB\n", nr_swap_pages<<(PAGE_SHIFT-10)); | |
106 | for_each_pgdat(pgdat) { | |
107 | for (i = 0; i < pgdat->node_spanned_pages; i++) { | |
408fde81 | 108 | page = pgdat_page_nr(pgdat, i); |
1da177e4 LT |
109 | total++; |
110 | if (PageReserved(page)) | |
111 | reserved++; | |
112 | else if (PageSwapCache(page)) | |
113 | cached++; | |
114 | else if (page_count(page)) | |
115 | shared += page_count(page) - 1; | |
116 | } | |
117 | } | |
118 | printk("%ld pages of RAM\n", total); | |
119 | printk("%ld reserved pages\n", reserved); | |
120 | printk("%ld pages shared\n", shared); | |
121 | printk("%ld pages swap cached\n", cached); | |
122 | } | |
123 | ||
124 | #ifdef CONFIG_PPC_ISERIES | |
125 | ||
126 | void __iomem *ioremap(unsigned long addr, unsigned long size) | |
127 | { | |
128 | return (void __iomem *)addr; | |
129 | } | |
130 | ||
131 | extern void __iomem *__ioremap(unsigned long addr, unsigned long size, | |
132 | unsigned long flags) | |
133 | { | |
134 | return (void __iomem *)addr; | |
135 | } | |
136 | ||
137 | void iounmap(volatile void __iomem *addr) | |
138 | { | |
139 | return; | |
140 | } | |
141 | ||
142 | #else | |
143 | ||
144 | /* | |
145 | * map_io_page currently only called by __ioremap | |
146 | * map_io_page adds an entry to the ioremap page table | |
147 | * and adds an entry to the HPT, possibly bolting it | |
148 | */ | |
58366af5 | 149 | static int map_io_page(unsigned long ea, unsigned long pa, int flags) |
1da177e4 LT |
150 | { |
151 | pgd_t *pgdp; | |
58366af5 | 152 | pud_t *pudp; |
1da177e4 LT |
153 | pmd_t *pmdp; |
154 | pte_t *ptep; | |
155 | unsigned long vsid; | |
156 | ||
157 | if (mem_init_done) { | |
20cee16c DG |
158 | spin_lock(&init_mm.page_table_lock); |
159 | pgdp = pgd_offset_k(ea); | |
160 | pudp = pud_alloc(&init_mm, pgdp, ea); | |
58366af5 BH |
161 | if (!pudp) |
162 | return -ENOMEM; | |
20cee16c | 163 | pmdp = pmd_alloc(&init_mm, pudp, ea); |
58366af5 BH |
164 | if (!pmdp) |
165 | return -ENOMEM; | |
20cee16c | 166 | ptep = pte_alloc_kernel(&init_mm, pmdp, ea); |
58366af5 BH |
167 | if (!ptep) |
168 | return -ENOMEM; | |
20cee16c | 169 | set_pte_at(&init_mm, ea, ptep, pfn_pte(pa >> PAGE_SHIFT, |
dfbacdc1 | 170 | __pgprot(flags))); |
20cee16c | 171 | spin_unlock(&init_mm.page_table_lock); |
1da177e4 LT |
172 | } else { |
173 | unsigned long va, vpn, hash, hpteg; | |
174 | ||
175 | /* | |
176 | * If the mm subsystem is not fully up, we cannot create a | |
177 | * linux page table entry for this mapping. Simply bolt an | |
178 | * entry in the hardware page table. | |
179 | */ | |
180 | vsid = get_kernel_vsid(ea); | |
181 | va = (vsid << 28) | (ea & 0xFFFFFFF); | |
182 | vpn = va >> PAGE_SHIFT; | |
183 | ||
184 | hash = hpt_hash(vpn, 0); | |
185 | ||
186 | hpteg = ((hash & htab_hash_mask) * HPTES_PER_GROUP); | |
187 | ||
188 | /* Panic if a pte grpup is full */ | |
96e28449 DG |
189 | if (ppc_md.hpte_insert(hpteg, va, pa >> PAGE_SHIFT, |
190 | HPTE_V_BOLTED, | |
191 | _PAGE_NO_CACHE|_PAGE_GUARDED|PP_RWXX) | |
192 | == -1) { | |
1da177e4 LT |
193 | panic("map_io_page: could not insert mapping"); |
194 | } | |
195 | } | |
58366af5 | 196 | return 0; |
1da177e4 LT |
197 | } |
198 | ||
199 | ||
200 | static void __iomem * __ioremap_com(unsigned long addr, unsigned long pa, | |
201 | unsigned long ea, unsigned long size, | |
202 | unsigned long flags) | |
203 | { | |
204 | unsigned long i; | |
205 | ||
206 | if ((flags & _PAGE_PRESENT) == 0) | |
207 | flags |= pgprot_val(PAGE_KERNEL); | |
1da177e4 | 208 | |
dfbacdc1 | 209 | for (i = 0; i < size; i += PAGE_SIZE) |
58366af5 | 210 | if (map_io_page(ea+i, pa+i, flags)) |
20cee16c | 211 | return NULL; |
1da177e4 LT |
212 | |
213 | return (void __iomem *) (ea + (addr & ~PAGE_MASK)); | |
214 | } | |
215 | ||
216 | ||
217 | void __iomem * | |
218 | ioremap(unsigned long addr, unsigned long size) | |
219 | { | |
dfbacdc1 | 220 | return __ioremap(addr, size, _PAGE_NO_CACHE | _PAGE_GUARDED); |
1da177e4 LT |
221 | } |
222 | ||
58366af5 BH |
223 | void __iomem * __ioremap(unsigned long addr, unsigned long size, |
224 | unsigned long flags) | |
1da177e4 LT |
225 | { |
226 | unsigned long pa, ea; | |
58366af5 | 227 | void __iomem *ret; |
1da177e4 LT |
228 | |
229 | /* | |
230 | * Choose an address to map it to. | |
231 | * Once the imalloc system is running, we use it. | |
232 | * Before that, we map using addresses going | |
233 | * up from ioremap_bot. imalloc will use | |
234 | * the addresses from ioremap_bot through | |
e28f7faf | 235 | * IMALLOC_END |
1da177e4 LT |
236 | * |
237 | */ | |
238 | pa = addr & PAGE_MASK; | |
239 | size = PAGE_ALIGN(addr + size) - pa; | |
240 | ||
241 | if (size == 0) | |
242 | return NULL; | |
243 | ||
244 | if (mem_init_done) { | |
245 | struct vm_struct *area; | |
246 | area = im_get_free_area(size); | |
247 | if (area == NULL) | |
248 | return NULL; | |
249 | ea = (unsigned long)(area->addr); | |
58366af5 BH |
250 | ret = __ioremap_com(addr, pa, ea, size, flags); |
251 | if (!ret) | |
252 | im_free(area->addr); | |
1da177e4 LT |
253 | } else { |
254 | ea = ioremap_bot; | |
58366af5 BH |
255 | ret = __ioremap_com(addr, pa, ea, size, flags); |
256 | if (ret) | |
257 | ioremap_bot += size; | |
1da177e4 | 258 | } |
58366af5 | 259 | return ret; |
1da177e4 LT |
260 | } |
261 | ||
262 | #define IS_PAGE_ALIGNED(_val) ((_val) == ((_val) & PAGE_MASK)) | |
263 | ||
264 | int __ioremap_explicit(unsigned long pa, unsigned long ea, | |
265 | unsigned long size, unsigned long flags) | |
266 | { | |
267 | struct vm_struct *area; | |
58366af5 | 268 | void __iomem *ret; |
1da177e4 LT |
269 | |
270 | /* For now, require page-aligned values for pa, ea, and size */ | |
271 | if (!IS_PAGE_ALIGNED(pa) || !IS_PAGE_ALIGNED(ea) || | |
272 | !IS_PAGE_ALIGNED(size)) { | |
273 | printk(KERN_ERR "unaligned value in %s\n", __FUNCTION__); | |
274 | return 1; | |
275 | } | |
276 | ||
277 | if (!mem_init_done) { | |
278 | /* Two things to consider in this case: | |
279 | * 1) No records will be kept (imalloc, etc) that the region | |
280 | * has been remapped | |
281 | * 2) It won't be easy to iounmap() the region later (because | |
282 | * of 1) | |
283 | */ | |
284 | ; | |
285 | } else { | |
286 | area = im_get_area(ea, size, | |
287 | IM_REGION_UNUSED|IM_REGION_SUBSET|IM_REGION_EXISTS); | |
288 | if (area == NULL) { | |
289 | /* Expected when PHB-dlpar is in play */ | |
290 | return 1; | |
291 | } | |
292 | if (ea != (unsigned long) area->addr) { | |
dfbacdc1 BH |
293 | printk(KERN_ERR "unexpected addr return from " |
294 | "im_get_area\n"); | |
1da177e4 LT |
295 | return 1; |
296 | } | |
297 | } | |
298 | ||
58366af5 BH |
299 | ret = __ioremap_com(pa, pa, ea, size, flags); |
300 | if (ret == NULL) { | |
301 | printk(KERN_ERR "ioremap_explicit() allocation failure !\n"); | |
302 | return 1; | |
303 | } | |
304 | if (ret != (void *) ea) { | |
1da177e4 LT |
305 | printk(KERN_ERR "__ioremap_com() returned unexpected addr\n"); |
306 | return 1; | |
307 | } | |
308 | ||
309 | return 0; | |
310 | } | |
311 | ||
1da177e4 LT |
312 | /* |
313 | * Unmap an IO region and remove it from imalloc'd list. | |
314 | * Access to IO memory should be serialized by driver. | |
315 | * This code is modeled after vmalloc code - unmap_vm_area() | |
316 | * | |
dfbacdc1 | 317 | * XXX what about calls before mem_init_done (ie python_countermeasures()) |
1da177e4 LT |
318 | */ |
319 | void iounmap(volatile void __iomem *token) | |
320 | { | |
1da177e4 LT |
321 | void *addr; |
322 | ||
58366af5 | 323 | if (!mem_init_done) |
1da177e4 | 324 | return; |
1da177e4 LT |
325 | |
326 | addr = (void *) ((unsigned long __force) token & PAGE_MASK); | |
1da177e4 | 327 | |
20cee16c | 328 | im_free(addr); |
1da177e4 LT |
329 | } |
330 | ||
331 | static int iounmap_subset_regions(unsigned long addr, unsigned long size) | |
332 | { | |
333 | struct vm_struct *area; | |
334 | ||
335 | /* Check whether subsets of this region exist */ | |
336 | area = im_get_area(addr, size, IM_REGION_SUPERSET); | |
337 | if (area == NULL) | |
338 | return 1; | |
339 | ||
340 | while (area) { | |
341 | iounmap((void __iomem *) area->addr); | |
342 | area = im_get_area(addr, size, | |
343 | IM_REGION_SUPERSET); | |
344 | } | |
345 | ||
346 | return 0; | |
347 | } | |
348 | ||
349 | int iounmap_explicit(volatile void __iomem *start, unsigned long size) | |
350 | { | |
351 | struct vm_struct *area; | |
352 | unsigned long addr; | |
353 | int rc; | |
354 | ||
355 | addr = (unsigned long __force) start & PAGE_MASK; | |
356 | ||
357 | /* Verify that the region either exists or is a subset of an existing | |
358 | * region. In the latter case, split the parent region to create | |
359 | * the exact region | |
360 | */ | |
361 | area = im_get_area(addr, size, | |
362 | IM_REGION_EXISTS | IM_REGION_SUBSET); | |
363 | if (area == NULL) { | |
364 | /* Determine whether subset regions exist. If so, unmap */ | |
365 | rc = iounmap_subset_regions(addr, size); | |
366 | if (rc) { | |
367 | printk(KERN_ERR | |
368 | "%s() cannot unmap nonexistent range 0x%lx\n", | |
369 | __FUNCTION__, addr); | |
370 | return 1; | |
371 | } | |
372 | } else { | |
373 | iounmap((void __iomem *) area->addr); | |
374 | } | |
375 | /* | |
376 | * FIXME! This can't be right: | |
377 | iounmap(area->addr); | |
378 | * Maybe it should be "iounmap(area);" | |
379 | */ | |
380 | return 0; | |
381 | } | |
382 | ||
383 | #endif | |
384 | ||
385 | EXPORT_SYMBOL(ioremap); | |
386 | EXPORT_SYMBOL(__ioremap); | |
387 | EXPORT_SYMBOL(iounmap); | |
388 | ||
389 | void free_initmem(void) | |
390 | { | |
391 | unsigned long addr; | |
392 | ||
393 | addr = (unsigned long)__init_begin; | |
394 | for (; addr < (unsigned long)__init_end; addr += PAGE_SIZE) { | |
395 | ClearPageReserved(virt_to_page(addr)); | |
396 | set_page_count(virt_to_page(addr), 1); | |
397 | free_page(addr); | |
398 | totalram_pages++; | |
399 | } | |
400 | printk ("Freeing unused kernel memory: %luk freed\n", | |
401 | ((unsigned long)__init_end - (unsigned long)__init_begin) >> 10); | |
402 | } | |
403 | ||
404 | #ifdef CONFIG_BLK_DEV_INITRD | |
405 | void free_initrd_mem(unsigned long start, unsigned long end) | |
406 | { | |
407 | if (start < end) | |
408 | printk ("Freeing initrd memory: %ldk freed\n", (end - start) >> 10); | |
409 | for (; start < end; start += PAGE_SIZE) { | |
410 | ClearPageReserved(virt_to_page(start)); | |
411 | set_page_count(virt_to_page(start), 1); | |
412 | free_page(start); | |
413 | totalram_pages++; | |
414 | } | |
415 | } | |
416 | #endif | |
417 | ||
418 | static DEFINE_SPINLOCK(mmu_context_lock); | |
419 | static DEFINE_IDR(mmu_context_idr); | |
420 | ||
421 | int init_new_context(struct task_struct *tsk, struct mm_struct *mm) | |
422 | { | |
423 | int index; | |
424 | int err; | |
425 | ||
1da177e4 LT |
426 | again: |
427 | if (!idr_pre_get(&mmu_context_idr, GFP_KERNEL)) | |
428 | return -ENOMEM; | |
429 | ||
430 | spin_lock(&mmu_context_lock); | |
431 | err = idr_get_new_above(&mmu_context_idr, NULL, 1, &index); | |
432 | spin_unlock(&mmu_context_lock); | |
433 | ||
434 | if (err == -EAGAIN) | |
435 | goto again; | |
436 | else if (err) | |
437 | return err; | |
438 | ||
439 | if (index > MAX_CONTEXT) { | |
440 | idr_remove(&mmu_context_idr, index); | |
441 | return -ENOMEM; | |
442 | } | |
443 | ||
444 | mm->context.id = index; | |
445 | ||
446 | return 0; | |
447 | } | |
448 | ||
449 | void destroy_context(struct mm_struct *mm) | |
450 | { | |
451 | spin_lock(&mmu_context_lock); | |
452 | idr_remove(&mmu_context_idr, mm->context.id); | |
453 | spin_unlock(&mmu_context_lock); | |
454 | ||
455 | mm->context.id = NO_CONTEXT; | |
1da177e4 LT |
456 | } |
457 | ||
458 | /* | |
459 | * Do very early mm setup. | |
460 | */ | |
461 | void __init mm_init_ppc64(void) | |
462 | { | |
463 | #ifndef CONFIG_PPC_ISERIES | |
464 | unsigned long i; | |
465 | #endif | |
466 | ||
467 | ppc64_boot_msg(0x100, "MM Init"); | |
468 | ||
469 | /* This is the story of the IO hole... please, keep seated, | |
470 | * unfortunately, we are out of oxygen masks at the moment. | |
471 | * So we need some rough way to tell where your big IO hole | |
472 | * is. On pmac, it's between 2G and 4G, on POWER3, it's around | |
473 | * that area as well, on POWER4 we don't have one, etc... | |
474 | * We need that as a "hint" when sizing the TCE table on POWER3 | |
475 | * So far, the simplest way that seem work well enough for us it | |
476 | * to just assume that the first discontinuity in our physical | |
477 | * RAM layout is the IO hole. That may not be correct in the future | |
478 | * (and isn't on iSeries but then we don't care ;) | |
479 | */ | |
480 | ||
481 | #ifndef CONFIG_PPC_ISERIES | |
482 | for (i = 1; i < lmb.memory.cnt; i++) { | |
483 | unsigned long base, prevbase, prevsize; | |
484 | ||
180379dc | 485 | prevbase = lmb.memory.region[i-1].base; |
1da177e4 | 486 | prevsize = lmb.memory.region[i-1].size; |
180379dc | 487 | base = lmb.memory.region[i].base; |
1da177e4 LT |
488 | if (base > (prevbase + prevsize)) { |
489 | io_hole_start = prevbase + prevsize; | |
490 | io_hole_size = base - (prevbase + prevsize); | |
491 | break; | |
492 | } | |
493 | } | |
494 | #endif /* CONFIG_PPC_ISERIES */ | |
495 | if (io_hole_start) | |
496 | printk("IO Hole assumed to be %lx -> %lx\n", | |
497 | io_hole_start, io_hole_start + io_hole_size - 1); | |
498 | ||
499 | ppc64_boot_msg(0x100, "MM Init Done"); | |
500 | } | |
501 | ||
502 | /* | |
503 | * This is called by /dev/mem to know if a given address has to | |
504 | * be mapped non-cacheable or not | |
505 | */ | |
506 | int page_is_ram(unsigned long pfn) | |
507 | { | |
508 | int i; | |
509 | unsigned long paddr = (pfn << PAGE_SHIFT); | |
510 | ||
511 | for (i=0; i < lmb.memory.cnt; i++) { | |
512 | unsigned long base; | |
513 | ||
1da177e4 | 514 | base = lmb.memory.region[i].base; |
180379dc | 515 | |
1da177e4 LT |
516 | if ((paddr >= base) && |
517 | (paddr < (base + lmb.memory.region[i].size))) { | |
518 | return 1; | |
519 | } | |
520 | } | |
521 | ||
522 | return 0; | |
523 | } | |
524 | EXPORT_SYMBOL(page_is_ram); | |
525 | ||
526 | /* | |
527 | * Initialize the bootmem system and give it all the memory we | |
528 | * have available. | |
529 | */ | |
145e6642 | 530 | #ifndef CONFIG_NEED_MULTIPLE_NODES |
1da177e4 LT |
531 | void __init do_init_bootmem(void) |
532 | { | |
533 | unsigned long i; | |
534 | unsigned long start, bootmap_pages; | |
535 | unsigned long total_pages = lmb_end_of_DRAM() >> PAGE_SHIFT; | |
536 | int boot_mapsize; | |
537 | ||
538 | /* | |
539 | * Find an area to use for the bootmem bitmap. Calculate the size of | |
540 | * bitmap required as (Total Memory) / PAGE_SIZE / BITS_PER_BYTE. | |
541 | * Add 1 additional page in case the address isn't page-aligned. | |
542 | */ | |
543 | bootmap_pages = bootmem_bootmap_pages(total_pages); | |
544 | ||
e88bcd1b | 545 | start = lmb_alloc(bootmap_pages<<PAGE_SHIFT, PAGE_SIZE); |
1da177e4 LT |
546 | BUG_ON(!start); |
547 | ||
548 | boot_mapsize = init_bootmem(start >> PAGE_SHIFT, total_pages); | |
549 | ||
550 | max_pfn = max_low_pfn; | |
551 | ||
145e6642 AW |
552 | /* Add all physical memory to the bootmem map, mark each area |
553 | * present. | |
554 | */ | |
1da177e4 | 555 | for (i=0; i < lmb.memory.cnt; i++) { |
180379dc | 556 | unsigned long base, size; |
145e6642 | 557 | unsigned long start_pfn, end_pfn; |
1da177e4 | 558 | |
180379dc | 559 | base = lmb.memory.region[i].base; |
1da177e4 | 560 | size = lmb.memory.region[i].size; |
145e6642 | 561 | |
180379dc | 562 | start_pfn = base >> PAGE_SHIFT; |
145e6642 AW |
563 | end_pfn = start_pfn + (size >> PAGE_SHIFT); |
564 | memory_present(0, start_pfn, end_pfn); | |
565 | ||
180379dc | 566 | free_bootmem(base, size); |
1da177e4 LT |
567 | } |
568 | ||
569 | /* reserve the sections we're already using */ | |
570 | for (i=0; i < lmb.reserved.cnt; i++) { | |
180379dc | 571 | unsigned long base = lmb.reserved.region[i].base; |
1da177e4 LT |
572 | unsigned long size = lmb.reserved.region[i].size; |
573 | ||
180379dc | 574 | reserve_bootmem(base, size); |
1da177e4 LT |
575 | } |
576 | } | |
577 | ||
578 | /* | |
579 | * paging_init() sets up the page tables - in fact we've already done this. | |
580 | */ | |
581 | void __init paging_init(void) | |
582 | { | |
583 | unsigned long zones_size[MAX_NR_ZONES]; | |
584 | unsigned long zholes_size[MAX_NR_ZONES]; | |
585 | unsigned long total_ram = lmb_phys_mem_size(); | |
586 | unsigned long top_of_ram = lmb_end_of_DRAM(); | |
587 | ||
588 | printk(KERN_INFO "Top of RAM: 0x%lx, Total RAM: 0x%lx\n", | |
589 | top_of_ram, total_ram); | |
590 | printk(KERN_INFO "Memory hole size: %ldMB\n", | |
591 | (top_of_ram - total_ram) >> 20); | |
592 | /* | |
593 | * All pages are DMA-able so we put them all in the DMA zone. | |
594 | */ | |
595 | memset(zones_size, 0, sizeof(zones_size)); | |
596 | memset(zholes_size, 0, sizeof(zholes_size)); | |
597 | ||
598 | zones_size[ZONE_DMA] = top_of_ram >> PAGE_SHIFT; | |
599 | zholes_size[ZONE_DMA] = (top_of_ram - total_ram) >> PAGE_SHIFT; | |
600 | ||
25128092 | 601 | free_area_init_node(0, NODE_DATA(0), zones_size, |
1da177e4 LT |
602 | __pa(PAGE_OFFSET) >> PAGE_SHIFT, zholes_size); |
603 | } | |
145e6642 | 604 | #endif /* ! CONFIG_NEED_MULTIPLE_NODES */ |
1da177e4 LT |
605 | |
606 | static struct kcore_list kcore_vmem; | |
607 | ||
608 | static int __init setup_kcore(void) | |
609 | { | |
610 | int i; | |
611 | ||
612 | for (i=0; i < lmb.memory.cnt; i++) { | |
180379dc | 613 | unsigned long base, size; |
1da177e4 LT |
614 | struct kcore_list *kcore_mem; |
615 | ||
180379dc | 616 | base = lmb.memory.region[i].base; |
1da177e4 LT |
617 | size = lmb.memory.region[i].size; |
618 | ||
619 | /* GFP_ATOMIC to avoid might_sleep warnings during boot */ | |
620 | kcore_mem = kmalloc(sizeof(struct kcore_list), GFP_ATOMIC); | |
621 | if (!kcore_mem) | |
622 | panic("mem_init: kmalloc failed\n"); | |
623 | ||
180379dc | 624 | kclist_add(kcore_mem, __va(base), size); |
1da177e4 LT |
625 | } |
626 | ||
627 | kclist_add(&kcore_vmem, (void *)VMALLOC_START, VMALLOC_END-VMALLOC_START); | |
628 | ||
629 | return 0; | |
630 | } | |
631 | module_init(setup_kcore); | |
632 | ||
633 | void __init mem_init(void) | |
634 | { | |
145e6642 | 635 | #ifdef CONFIG_NEED_MULTIPLE_NODES |
1da177e4 LT |
636 | int nid; |
637 | #endif | |
638 | pg_data_t *pgdat; | |
639 | unsigned long i; | |
640 | struct page *page; | |
641 | unsigned long reservedpages = 0, codesize, initsize, datasize, bsssize; | |
642 | ||
643 | num_physpages = max_low_pfn; /* RAM is assumed contiguous */ | |
644 | high_memory = (void *) __va(max_low_pfn * PAGE_SIZE); | |
645 | ||
145e6642 | 646 | #ifdef CONFIG_NEED_MULTIPLE_NODES |
1da177e4 LT |
647 | for_each_online_node(nid) { |
648 | if (NODE_DATA(nid)->node_spanned_pages != 0) { | |
649 | printk("freeing bootmem node %x\n", nid); | |
650 | totalram_pages += | |
651 | free_all_bootmem_node(NODE_DATA(nid)); | |
652 | } | |
653 | } | |
654 | #else | |
655 | max_mapnr = num_physpages; | |
656 | totalram_pages += free_all_bootmem(); | |
657 | #endif | |
658 | ||
659 | for_each_pgdat(pgdat) { | |
660 | for (i = 0; i < pgdat->node_spanned_pages; i++) { | |
408fde81 | 661 | page = pgdat_page_nr(pgdat, i); |
1da177e4 LT |
662 | if (PageReserved(page)) |
663 | reservedpages++; | |
664 | } | |
665 | } | |
666 | ||
667 | codesize = (unsigned long)&_etext - (unsigned long)&_stext; | |
668 | initsize = (unsigned long)&__init_end - (unsigned long)&__init_begin; | |
669 | datasize = (unsigned long)&_edata - (unsigned long)&__init_end; | |
670 | bsssize = (unsigned long)&__bss_stop - (unsigned long)&__bss_start; | |
671 | ||
672 | printk(KERN_INFO "Memory: %luk/%luk available (%luk kernel code, " | |
673 | "%luk reserved, %luk data, %luk bss, %luk init)\n", | |
674 | (unsigned long)nr_free_pages() << (PAGE_SHIFT-10), | |
675 | num_physpages << (PAGE_SHIFT-10), | |
676 | codesize >> 10, | |
677 | reservedpages << (PAGE_SHIFT-10), | |
678 | datasize >> 10, | |
679 | bsssize >> 10, | |
680 | initsize >> 10); | |
681 | ||
682 | mem_init_done = 1; | |
683 | ||
1da177e4 LT |
684 | /* Initialize the vDSO */ |
685 | vdso_init(); | |
686 | } | |
687 | ||
688 | /* | |
689 | * This is called when a page has been modified by the kernel. | |
690 | * It just marks the page as not i-cache clean. We do the i-cache | |
691 | * flush later when the page is given to a user process, if necessary. | |
692 | */ | |
693 | void flush_dcache_page(struct page *page) | |
694 | { | |
695 | if (cpu_has_feature(CPU_FTR_COHERENT_ICACHE)) | |
696 | return; | |
697 | /* avoid an atomic op if possible */ | |
698 | if (test_bit(PG_arch_1, &page->flags)) | |
699 | clear_bit(PG_arch_1, &page->flags); | |
700 | } | |
701 | EXPORT_SYMBOL(flush_dcache_page); | |
702 | ||
703 | void clear_user_page(void *page, unsigned long vaddr, struct page *pg) | |
704 | { | |
705 | clear_page(page); | |
706 | ||
707 | if (cpu_has_feature(CPU_FTR_COHERENT_ICACHE)) | |
708 | return; | |
709 | /* | |
710 | * We shouldnt have to do this, but some versions of glibc | |
711 | * require it (ld.so assumes zero filled pages are icache clean) | |
712 | * - Anton | |
713 | */ | |
714 | ||
715 | /* avoid an atomic op if possible */ | |
716 | if (test_bit(PG_arch_1, &pg->flags)) | |
717 | clear_bit(PG_arch_1, &pg->flags); | |
718 | } | |
719 | EXPORT_SYMBOL(clear_user_page); | |
720 | ||
721 | void copy_user_page(void *vto, void *vfrom, unsigned long vaddr, | |
722 | struct page *pg) | |
723 | { | |
724 | copy_page(vto, vfrom); | |
725 | ||
726 | /* | |
727 | * We should be able to use the following optimisation, however | |
728 | * there are two problems. | |
729 | * Firstly a bug in some versions of binutils meant PLT sections | |
730 | * were not marked executable. | |
731 | * Secondly the first word in the GOT section is blrl, used | |
732 | * to establish the GOT address. Until recently the GOT was | |
733 | * not marked executable. | |
734 | * - Anton | |
735 | */ | |
736 | #if 0 | |
737 | if (!vma->vm_file && ((vma->vm_flags & VM_EXEC) == 0)) | |
738 | return; | |
739 | #endif | |
740 | ||
741 | if (cpu_has_feature(CPU_FTR_COHERENT_ICACHE)) | |
742 | return; | |
743 | ||
744 | /* avoid an atomic op if possible */ | |
745 | if (test_bit(PG_arch_1, &pg->flags)) | |
746 | clear_bit(PG_arch_1, &pg->flags); | |
747 | } | |
748 | ||
749 | void flush_icache_user_range(struct vm_area_struct *vma, struct page *page, | |
750 | unsigned long addr, int len) | |
751 | { | |
752 | unsigned long maddr; | |
753 | ||
754 | maddr = (unsigned long)page_address(page) + (addr & ~PAGE_MASK); | |
755 | flush_icache_range(maddr, maddr + len); | |
756 | } | |
757 | EXPORT_SYMBOL(flush_icache_user_range); | |
758 | ||
759 | /* | |
760 | * This is called at the end of handling a user page fault, when the | |
761 | * fault has been handled by updating a PTE in the linux page tables. | |
762 | * We use it to preload an HPTE into the hash table corresponding to | |
763 | * the updated linux PTE. | |
764 | * | |
765 | * This must always be called with the mm->page_table_lock held | |
766 | */ | |
767 | void update_mmu_cache(struct vm_area_struct *vma, unsigned long ea, | |
768 | pte_t pte) | |
769 | { | |
770 | unsigned long vsid; | |
771 | void *pgdir; | |
772 | pte_t *ptep; | |
773 | int local = 0; | |
774 | cpumask_t tmp; | |
775 | unsigned long flags; | |
776 | ||
777 | /* handle i-cache coherency */ | |
778 | if (!cpu_has_feature(CPU_FTR_COHERENT_ICACHE) && | |
779 | !cpu_has_feature(CPU_FTR_NOEXECUTE)) { | |
780 | unsigned long pfn = pte_pfn(pte); | |
781 | if (pfn_valid(pfn)) { | |
782 | struct page *page = pfn_to_page(pfn); | |
783 | if (!PageReserved(page) | |
784 | && !test_bit(PG_arch_1, &page->flags)) { | |
785 | __flush_dcache_icache(page_address(page)); | |
786 | set_bit(PG_arch_1, &page->flags); | |
787 | } | |
788 | } | |
789 | } | |
790 | ||
791 | /* We only want HPTEs for linux PTEs that have _PAGE_ACCESSED set */ | |
792 | if (!pte_young(pte)) | |
793 | return; | |
794 | ||
795 | pgdir = vma->vm_mm->pgd; | |
796 | if (pgdir == NULL) | |
797 | return; | |
798 | ||
799 | ptep = find_linux_pte(pgdir, ea); | |
800 | if (!ptep) | |
801 | return; | |
802 | ||
803 | vsid = get_vsid(vma->vm_mm->context.id, ea); | |
804 | ||
805 | local_irq_save(flags); | |
806 | tmp = cpumask_of_cpu(smp_processor_id()); | |
807 | if (cpus_equal(vma->vm_mm->cpu_vm_mask, tmp)) | |
808 | local = 1; | |
809 | ||
810 | __hash_page(ea, pte_val(pte) & (_PAGE_USER|_PAGE_RW), vsid, ptep, | |
811 | 0x300, local); | |
812 | local_irq_restore(flags); | |
813 | } | |
814 | ||
815 | void __iomem * reserve_phb_iospace(unsigned long size) | |
816 | { | |
817 | void __iomem *virt_addr; | |
818 | ||
819 | if (phbs_io_bot >= IMALLOC_BASE) | |
820 | panic("reserve_phb_iospace(): phb io space overflow\n"); | |
821 | ||
822 | virt_addr = (void __iomem *) phbs_io_bot; | |
823 | phbs_io_bot += size; | |
824 | ||
825 | return virt_addr; | |
826 | } | |
827 | ||
e28f7faf | 828 | static void zero_ctor(void *addr, kmem_cache_t *cache, unsigned long flags) |
1da177e4 | 829 | { |
e28f7faf | 830 | memset(addr, 0, kmem_cache_size(cache)); |
1da177e4 LT |
831 | } |
832 | ||
e28f7faf DG |
833 | static const int pgtable_cache_size[2] = { |
834 | PTE_TABLE_SIZE, PMD_TABLE_SIZE | |
835 | }; | |
836 | static const char *pgtable_cache_name[ARRAY_SIZE(pgtable_cache_size)] = { | |
837 | "pgd_pte_cache", "pud_pmd_cache", | |
838 | }; | |
839 | ||
840 | kmem_cache_t *pgtable_cache[ARRAY_SIZE(pgtable_cache_size)]; | |
841 | ||
1da177e4 LT |
842 | void pgtable_cache_init(void) |
843 | { | |
e28f7faf DG |
844 | int i; |
845 | ||
846 | BUILD_BUG_ON(PTE_TABLE_SIZE != pgtable_cache_size[PTE_CACHE_NUM]); | |
847 | BUILD_BUG_ON(PMD_TABLE_SIZE != pgtable_cache_size[PMD_CACHE_NUM]); | |
848 | BUILD_BUG_ON(PUD_TABLE_SIZE != pgtable_cache_size[PUD_CACHE_NUM]); | |
849 | BUILD_BUG_ON(PGD_TABLE_SIZE != pgtable_cache_size[PGD_CACHE_NUM]); | |
850 | ||
851 | for (i = 0; i < ARRAY_SIZE(pgtable_cache_size); i++) { | |
852 | int size = pgtable_cache_size[i]; | |
853 | const char *name = pgtable_cache_name[i]; | |
854 | ||
855 | pgtable_cache[i] = kmem_cache_create(name, | |
856 | size, size, | |
857 | SLAB_HWCACHE_ALIGN | |
858 | | SLAB_MUST_HWCACHE_ALIGN, | |
859 | zero_ctor, | |
860 | NULL); | |
861 | if (! pgtable_cache[i]) | |
862 | panic("pgtable_cache_init(): could not create %s!\n", | |
863 | name); | |
864 | } | |
1da177e4 LT |
865 | } |
866 | ||
867 | pgprot_t phys_mem_access_prot(struct file *file, unsigned long addr, | |
868 | unsigned long size, pgprot_t vma_prot) | |
869 | { | |
870 | if (ppc_md.phys_mem_access_prot) | |
871 | return ppc_md.phys_mem_access_prot(file, addr, size, vma_prot); | |
872 | ||
873 | if (!page_is_ram(addr >> PAGE_SHIFT)) | |
874 | vma_prot = __pgprot(pgprot_val(vma_prot) | |
875 | | _PAGE_GUARDED | _PAGE_NO_CACHE); | |
876 | return vma_prot; | |
877 | } | |
878 | EXPORT_SYMBOL(phys_mem_access_prot); |