]>
Commit | Line | Data |
---|---|---|
14cf11af PM |
1 | /* |
2 | * This file contains the routines setting up the linux page tables. | |
3 | * -- paulus | |
4 | * | |
5 | * Derived from arch/ppc/mm/init.c: | |
6 | * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) | |
7 | * | |
8 | * Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au) | |
9 | * and Cort Dougan (PReP) (cort@cs.nmt.edu) | |
10 | * Copyright (C) 1996 Paul Mackerras | |
14cf11af PM |
11 | * |
12 | * Derived from "arch/i386/mm/init.c" | |
13 | * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds | |
14 | * | |
15 | * This program is free software; you can redistribute it and/or | |
16 | * modify it under the terms of the GNU General Public License | |
17 | * as published by the Free Software Foundation; either version | |
18 | * 2 of the License, or (at your option) any later version. | |
19 | * | |
20 | */ | |
21 | ||
14cf11af PM |
22 | #include <linux/kernel.h> |
23 | #include <linux/module.h> | |
24 | #include <linux/types.h> | |
25 | #include <linux/mm.h> | |
26 | #include <linux/vmalloc.h> | |
27 | #include <linux/init.h> | |
28 | #include <linux/highmem.h> | |
95f72d1e | 29 | #include <linux/memblock.h> |
5a0e3ad6 | 30 | #include <linux/slab.h> |
14cf11af PM |
31 | |
32 | #include <asm/pgtable.h> | |
33 | #include <asm/pgalloc.h> | |
2c419bde | 34 | #include <asm/fixmap.h> |
14cf11af | 35 | #include <asm/io.h> |
ae3a197e | 36 | #include <asm/setup.h> |
14cf11af PM |
37 | |
38 | #include "mmu_decl.h" | |
39 | ||
14cf11af | 40 | unsigned long ioremap_bot; |
920573bd | 41 | EXPORT_SYMBOL(ioremap_bot); /* aka VMALLOC_END */ |
14cf11af | 42 | |
060ef9d8 | 43 | extern char etext[], _stext[], _sinittext[], _einittext[]; |
14cf11af | 44 | |
bd721ea7 | 45 | __ref pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address) |
14cf11af PM |
46 | { |
47 | pte_t *pte; | |
14cf11af | 48 | |
f691fa10 | 49 | if (slab_is_available()) { |
32d6bd90 | 50 | pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_ZERO); |
14cf11af | 51 | } else { |
10239733 | 52 | pte = __va(memblock_alloc(PAGE_SIZE, PAGE_SIZE)); |
14cf11af PM |
53 | if (pte) |
54 | clear_page(pte); | |
55 | } | |
56 | return pte; | |
57 | } | |
58 | ||
2f569afd | 59 | pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long address) |
14cf11af PM |
60 | { |
61 | struct page *ptepage; | |
62 | ||
32d6bd90 | 63 | gfp_t flags = GFP_KERNEL | __GFP_ZERO; |
14cf11af PM |
64 | |
65 | ptepage = alloc_pages(flags, 0); | |
2f569afd MS |
66 | if (!ptepage) |
67 | return NULL; | |
4f804943 KS |
68 | if (!pgtable_page_ctor(ptepage)) { |
69 | __free_page(ptepage); | |
70 | return NULL; | |
71 | } | |
14cf11af PM |
72 | return ptepage; |
73 | } | |
74 | ||
14cf11af PM |
75 | void __iomem * |
76 | ioremap(phys_addr_t addr, unsigned long size) | |
77 | { | |
1cdab55d BH |
78 | return __ioremap_caller(addr, size, _PAGE_NO_CACHE | _PAGE_GUARDED, |
79 | __builtin_return_address(0)); | |
14cf11af | 80 | } |
920573bd | 81 | EXPORT_SYMBOL(ioremap); |
14cf11af | 82 | |
be135f40 AB |
83 | void __iomem * |
84 | ioremap_wc(phys_addr_t addr, unsigned long size) | |
85 | { | |
86 | return __ioremap_caller(addr, size, _PAGE_NO_CACHE, | |
87 | __builtin_return_address(0)); | |
88 | } | |
89 | EXPORT_SYMBOL(ioremap_wc); | |
90 | ||
68a64357 | 91 | void __iomem * |
40f1ce7f | 92 | ioremap_prot(phys_addr_t addr, unsigned long size, unsigned long flags) |
68a64357 | 93 | { |
a1f242ff | 94 | /* writeable implies dirty for kernel addresses */ |
a7b9f671 | 95 | if ((flags & (_PAGE_RW | _PAGE_RO)) != _PAGE_RO) |
a1f242ff BH |
96 | flags |= _PAGE_DIRTY | _PAGE_HWWRITE; |
97 | ||
98 | /* we don't want to let _PAGE_USER and _PAGE_EXEC leak out */ | |
ea3cc330 | 99 | flags &= ~(_PAGE_USER | _PAGE_EXEC); |
a1f242ff | 100 | |
55052eec BH |
101 | #ifdef _PAGE_BAP_SR |
102 | /* _PAGE_USER contains _PAGE_BAP_SR on BookE using the new PTE format | |
103 | * which means that we just cleared supervisor access... oops ;-) This | |
104 | * restores it | |
105 | */ | |
106 | flags |= _PAGE_BAP_SR; | |
107 | #endif | |
108 | ||
1cdab55d | 109 | return __ioremap_caller(addr, size, flags, __builtin_return_address(0)); |
68a64357 | 110 | } |
40f1ce7f | 111 | EXPORT_SYMBOL(ioremap_prot); |
68a64357 | 112 | |
14cf11af PM |
113 | void __iomem * |
114 | __ioremap(phys_addr_t addr, unsigned long size, unsigned long flags) | |
1cdab55d BH |
115 | { |
116 | return __ioremap_caller(addr, size, flags, __builtin_return_address(0)); | |
117 | } | |
118 | ||
119 | void __iomem * | |
120 | __ioremap_caller(phys_addr_t addr, unsigned long size, unsigned long flags, | |
121 | void *caller) | |
14cf11af PM |
122 | { |
123 | unsigned long v, i; | |
124 | phys_addr_t p; | |
125 | int err; | |
126 | ||
a1f242ff BH |
127 | /* Make sure we have the base flags */ |
128 | if ((flags & _PAGE_PRESENT) == 0) | |
4f9c53c8 | 129 | flags |= pgprot_val(PAGE_KERNEL); |
a1f242ff BH |
130 | |
131 | /* Non-cacheable page cannot be coherent */ | |
132 | if (flags & _PAGE_NO_CACHE) | |
133 | flags &= ~_PAGE_COHERENT; | |
134 | ||
14cf11af PM |
135 | /* |
136 | * Choose an address to map it to. | |
137 | * Once the vmalloc system is running, we use it. | |
e974cd4b | 138 | * Before then, we use space going down from IOREMAP_TOP |
14cf11af PM |
139 | * (ioremap_bot records where we're up to). |
140 | */ | |
141 | p = addr & PAGE_MASK; | |
142 | size = PAGE_ALIGN(addr + size) - p; | |
143 | ||
144 | /* | |
145 | * If the address lies within the first 16 MB, assume it's in ISA | |
146 | * memory space | |
147 | */ | |
148 | if (p < 16*1024*1024) | |
149 | p += _ISA_MEM_BASE; | |
150 | ||
01695a96 | 151 | #ifndef CONFIG_CRASH_DUMP |
14cf11af PM |
152 | /* |
153 | * Don't allow anybody to remap normal RAM that we're using. | |
154 | * mem_init() sets high_memory so only do the check after that. | |
155 | */ | |
f691fa10 | 156 | if (slab_is_available() && (p < virt_to_phys(high_memory)) && |
95f72d1e | 157 | !(__allow_ioremap_reserved && memblock_is_region_reserved(p, size))) { |
cc83458d | 158 | printk("__ioremap(): phys addr 0x%llx is RAM lr %ps\n", |
37f01d64 | 159 | (unsigned long long)p, __builtin_return_address(0)); |
14cf11af PM |
160 | return NULL; |
161 | } | |
01695a96 | 162 | #endif |
14cf11af PM |
163 | |
164 | if (size == 0) | |
165 | return NULL; | |
166 | ||
167 | /* | |
168 | * Is it already mapped? Perhaps overlapped by a previous | |
3084cdb7 | 169 | * mapping. |
14cf11af | 170 | */ |
3084cdb7 CL |
171 | v = p_block_mapped(p); |
172 | if (v) | |
14cf11af PM |
173 | goto out; |
174 | ||
f691fa10 | 175 | if (slab_is_available()) { |
14cf11af | 176 | struct vm_struct *area; |
1cdab55d | 177 | area = get_vm_area_caller(size, VM_IOREMAP, caller); |
14cf11af PM |
178 | if (area == 0) |
179 | return NULL; | |
7a9d1256 | 180 | area->phys_addr = p; |
14cf11af PM |
181 | v = (unsigned long) area->addr; |
182 | } else { | |
183 | v = (ioremap_bot -= size); | |
184 | } | |
185 | ||
14cf11af PM |
186 | /* |
187 | * Should check if it is a candidate for a BAT mapping | |
188 | */ | |
189 | ||
190 | err = 0; | |
191 | for (i = 0; i < size && err == 0; i += PAGE_SIZE) | |
192 | err = map_page(v+i, p+i, flags); | |
193 | if (err) { | |
f691fa10 | 194 | if (slab_is_available()) |
14cf11af PM |
195 | vunmap((void *)v); |
196 | return NULL; | |
197 | } | |
198 | ||
199 | out: | |
200 | return (void __iomem *) (v + ((unsigned long)addr & ~PAGE_MASK)); | |
201 | } | |
920573bd | 202 | EXPORT_SYMBOL(__ioremap); |
14cf11af PM |
203 | |
204 | void iounmap(volatile void __iomem *addr) | |
205 | { | |
206 | /* | |
207 | * If mapped by BATs then there is nothing to do. | |
208 | * Calling vfree() generates a benign warning. | |
209 | */ | |
3084cdb7 CL |
210 | if (v_block_mapped((unsigned long)addr)) |
211 | return; | |
14cf11af PM |
212 | |
213 | if (addr > high_memory && (unsigned long) addr < ioremap_bot) | |
214 | vunmap((void *) (PAGE_MASK & (unsigned long)addr)); | |
215 | } | |
920573bd | 216 | EXPORT_SYMBOL(iounmap); |
14cf11af | 217 | |
68a64357 | 218 | int map_page(unsigned long va, phys_addr_t pa, int flags) |
14cf11af PM |
219 | { |
220 | pmd_t *pd; | |
221 | pte_t *pg; | |
222 | int err = -ENOMEM; | |
223 | ||
14cf11af | 224 | /* Use upper 10 bits of VA to index the first level map */ |
d1953c88 | 225 | pd = pmd_offset(pud_offset(pgd_offset_k(va), va), va); |
14cf11af | 226 | /* Use middle 10 bits of VA to index the second-level map */ |
e2f2e58e | 227 | pg = pte_alloc_kernel(pd, va); |
14cf11af PM |
228 | if (pg != 0) { |
229 | err = 0; | |
3be4e699 BH |
230 | /* The PTE should never be already set nor present in the |
231 | * hash table | |
232 | */ | |
7021d86a AV |
233 | BUG_ON((pte_val(*pg) & (_PAGE_PRESENT | _PAGE_HASHPTE)) && |
234 | flags); | |
3be4e699 BH |
235 | set_pte_at(&init_mm, va, pg, pfn_pte(pa >> PAGE_SHIFT, |
236 | __pgprot(flags))); | |
14cf11af | 237 | } |
47ce8af4 | 238 | smp_wmb(); |
14cf11af PM |
239 | return err; |
240 | } | |
241 | ||
242 | /* | |
de32400d | 243 | * Map in a chunk of physical memory starting at start. |
14cf11af | 244 | */ |
de32400d | 245 | void __init __mapin_ram_chunk(unsigned long offset, unsigned long top) |
14cf11af | 246 | { |
99c62dd7 KG |
247 | unsigned long v, s, f; |
248 | phys_addr_t p; | |
ee4f2ea4 | 249 | int ktext; |
14cf11af | 250 | |
de32400d | 251 | s = offset; |
ccdcef72 | 252 | v = PAGE_OFFSET + s; |
99c62dd7 | 253 | p = memstart_addr + s; |
de32400d | 254 | for (; s < top; s += PAGE_SIZE) { |
060ef9d8 CL |
255 | ktext = ((char *)v >= _stext && (char *)v < etext) || |
256 | ((char *)v >= _sinittext && (char *)v < _einittext); | |
4f9c53c8 | 257 | f = ktext ? pgprot_val(PAGE_KERNEL_TEXT) : pgprot_val(PAGE_KERNEL); |
14cf11af | 258 | map_page(v, p, f); |
ee4f2ea4 BH |
259 | #ifdef CONFIG_PPC_STD_MMU_32 |
260 | if (ktext) | |
261 | hash_preload(&init_mm, v, 0, 0x300); | |
262 | #endif | |
14cf11af PM |
263 | v += PAGE_SIZE; |
264 | p += PAGE_SIZE; | |
265 | } | |
266 | } | |
267 | ||
de32400d AH |
268 | void __init mapin_ram(void) |
269 | { | |
270 | unsigned long s, top; | |
271 | ||
272 | #ifndef CONFIG_WII | |
273 | top = total_lowmem; | |
274 | s = mmu_mapin_ram(top); | |
275 | __mapin_ram_chunk(s, top); | |
276 | #else | |
277 | if (!wii_hole_size) { | |
278 | s = mmu_mapin_ram(total_lowmem); | |
279 | __mapin_ram_chunk(s, total_lowmem); | |
280 | } else { | |
281 | top = wii_hole_start; | |
282 | s = mmu_mapin_ram(top); | |
283 | __mapin_ram_chunk(s, top); | |
284 | ||
95f72d1e | 285 | top = memblock_end_of_DRAM(); |
de32400d AH |
286 | s = wii_mmu_mapin_mem2(top); |
287 | __mapin_ram_chunk(s, top); | |
288 | } | |
289 | #endif | |
290 | } | |
291 | ||
14cf11af PM |
292 | /* Scan the real Linux page tables and return a PTE pointer for |
293 | * a virtual address in a context. | |
294 | * Returns true (1) if PTE was found, zero otherwise. The pointer to | |
295 | * the PTE pointer is unmodified if PTE is not found. | |
296 | */ | |
297 | int | |
bab70a4a | 298 | get_pteptr(struct mm_struct *mm, unsigned long addr, pte_t **ptep, pmd_t **pmdp) |
14cf11af PM |
299 | { |
300 | pgd_t *pgd; | |
d1953c88 | 301 | pud_t *pud; |
14cf11af PM |
302 | pmd_t *pmd; |
303 | pte_t *pte; | |
304 | int retval = 0; | |
305 | ||
306 | pgd = pgd_offset(mm, addr & PAGE_MASK); | |
307 | if (pgd) { | |
d1953c88 DG |
308 | pud = pud_offset(pgd, addr & PAGE_MASK); |
309 | if (pud && pud_present(*pud)) { | |
310 | pmd = pmd_offset(pud, addr & PAGE_MASK); | |
311 | if (pmd_present(*pmd)) { | |
312 | pte = pte_offset_map(pmd, addr & PAGE_MASK); | |
313 | if (pte) { | |
314 | retval = 1; | |
315 | *ptep = pte; | |
316 | if (pmdp) | |
317 | *pmdp = pmd; | |
318 | /* XXX caller needs to do pte_unmap, yuck */ | |
319 | } | |
320 | } | |
321 | } | |
14cf11af PM |
322 | } |
323 | return(retval); | |
324 | } | |
325 | ||
88df6e90 BH |
326 | #ifdef CONFIG_DEBUG_PAGEALLOC |
327 | ||
328 | static int __change_page_attr(struct page *page, pgprot_t prot) | |
329 | { | |
330 | pte_t *kpte; | |
331 | pmd_t *kpmd; | |
332 | unsigned long address; | |
333 | ||
334 | BUG_ON(PageHighMem(page)); | |
335 | address = (unsigned long)page_address(page); | |
336 | ||
3084cdb7 | 337 | if (v_block_mapped(address)) |
88df6e90 BH |
338 | return 0; |
339 | if (!get_pteptr(&init_mm, address, &kpte, &kpmd)) | |
340 | return -EINVAL; | |
50891457 | 341 | __set_pte_at(&init_mm, address, kpte, mk_pte(page, prot), 0); |
88df6e90 | 342 | wmb(); |
f63837f0 | 343 | flush_tlb_page(NULL, address); |
88df6e90 BH |
344 | pte_unmap(kpte); |
345 | ||
346 | return 0; | |
347 | } | |
348 | ||
349 | /* | |
350 | * Change the page attributes of an page in the linear mapping. | |
351 | * | |
352 | * THIS CONFLICTS WITH BAT MAPPINGS, DEBUG USE ONLY | |
353 | */ | |
354 | static int change_page_attr(struct page *page, int numpages, pgprot_t prot) | |
355 | { | |
356 | int i, err = 0; | |
357 | unsigned long flags; | |
358 | ||
359 | local_irq_save(flags); | |
360 | for (i = 0; i < numpages; i++, page++) { | |
361 | err = __change_page_attr(page, prot); | |
362 | if (err) | |
363 | break; | |
364 | } | |
365 | local_irq_restore(flags); | |
366 | return err; | |
367 | } | |
368 | ||
369 | ||
031bc574 | 370 | void __kernel_map_pages(struct page *page, int numpages, int enable) |
88df6e90 BH |
371 | { |
372 | if (PageHighMem(page)) | |
373 | return; | |
374 | ||
375 | change_page_attr(page, numpages, enable ? PAGE_KERNEL : __pgprot(0)); | |
376 | } | |
377 | #endif /* CONFIG_DEBUG_PAGEALLOC */ | |
2c419bde KG |
378 | |
379 | static int fixmaps; | |
2c419bde KG |
380 | |
381 | void __set_fixmap (enum fixed_addresses idx, phys_addr_t phys, pgprot_t flags) | |
382 | { | |
383 | unsigned long address = __fix_to_virt(idx); | |
384 | ||
385 | if (idx >= __end_of_fixed_addresses) { | |
386 | BUG(); | |
387 | return; | |
388 | } | |
389 | ||
46a74179 | 390 | map_page(address, phys, pgprot_val(flags)); |
2c419bde KG |
391 | fixmaps++; |
392 | } | |
393 | ||
394 | void __this_fixmap_does_not_exist(void) | |
395 | { | |
396 | WARN_ON(1); | |
397 | } |