2 * linux/arch/arm/mm/mmu.c
4 * Copyright (C) 1995-2005 Russell King
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
10 #include <linux/module.h>
11 #include <linux/kernel.h>
12 #include <linux/errno.h>
13 #include <linux/init.h>
14 #include <linux/mman.h>
15 #include <linux/nodemask.h>
16 #include <linux/memblock.h>
18 #include <linux/vmalloc.h>
19 #include <linux/sizes.h>
22 #include <asm/cputype.h>
23 #include <asm/sections.h>
24 #include <asm/cachetype.h>
25 #include <asm/sections.h>
26 #include <asm/setup.h>
27 #include <asm/smp_plat.h>
29 #include <asm/highmem.h>
30 #include <asm/system_info.h>
31 #include <asm/traps.h>
32 #include <asm/procinfo.h>
33 #include <asm/memory.h>
35 #include <asm/mach/arch.h>
36 #include <asm/mach/map.h>
37 #include <asm/mach/pci.h>
43 * empty_zero_page is a special page that is used for
44 * zero-initialized data and COW.
46 struct page
*empty_zero_page
;
47 EXPORT_SYMBOL(empty_zero_page
);
50 * The pmd table for the upper-most set of pages.
54 #define CPOLICY_UNCACHED 0
55 #define CPOLICY_BUFFERED 1
56 #define CPOLICY_WRITETHROUGH 2
57 #define CPOLICY_WRITEBACK 3
58 #define CPOLICY_WRITEALLOC 4
60 static unsigned int cachepolicy __initdata
= CPOLICY_WRITEBACK
;
61 static unsigned int ecc_mask __initdata
= 0;
63 pgprot_t pgprot_kernel
;
64 pgprot_t pgprot_hyp_device
;
66 pgprot_t pgprot_s2_device
;
68 EXPORT_SYMBOL(pgprot_user
);
69 EXPORT_SYMBOL(pgprot_kernel
);
72 const char policy
[16];
79 #ifdef CONFIG_ARM_LPAE
80 #define s2_policy(policy) policy
82 #define s2_policy(policy) 0
85 static struct cachepolicy cache_policies
[] __initdata
= {
89 .pmd
= PMD_SECT_UNCACHED
,
90 .pte
= L_PTE_MT_UNCACHED
,
91 .pte_s2
= s2_policy(L_PTE_S2_MT_UNCACHED
),
95 .pmd
= PMD_SECT_BUFFERED
,
96 .pte
= L_PTE_MT_BUFFERABLE
,
97 .pte_s2
= s2_policy(L_PTE_S2_MT_UNCACHED
),
99 .policy
= "writethrough",
102 .pte
= L_PTE_MT_WRITETHROUGH
,
103 .pte_s2
= s2_policy(L_PTE_S2_MT_WRITETHROUGH
),
105 .policy
= "writeback",
108 .pte
= L_PTE_MT_WRITEBACK
,
109 .pte_s2
= s2_policy(L_PTE_S2_MT_WRITEBACK
),
111 .policy
= "writealloc",
113 .pmd
= PMD_SECT_WBWA
,
114 .pte
= L_PTE_MT_WRITEALLOC
,
115 .pte_s2
= s2_policy(L_PTE_S2_MT_WRITEBACK
),
119 #ifdef CONFIG_CPU_CP15
121 * These are useful for identifying cache coherency
122 * problems by allowing the cache or the cache and
123 * writebuffer to be turned off. (Note: the write
124 * buffer should not be on and the cache off).
126 static int __init
early_cachepolicy(char *p
)
130 for (i
= 0; i
< ARRAY_SIZE(cache_policies
); i
++) {
131 int len
= strlen(cache_policies
[i
].policy
);
133 if (memcmp(p
, cache_policies
[i
].policy
, len
) == 0) {
135 cr_alignment
&= ~cache_policies
[i
].cr_mask
;
136 cr_no_alignment
&= ~cache_policies
[i
].cr_mask
;
140 if (i
== ARRAY_SIZE(cache_policies
))
141 printk(KERN_ERR
"ERROR: unknown or unsupported cache policy\n");
143 * This restriction is partly to do with the way we boot; it is
144 * unpredictable to have memory mapped using two different sets of
145 * memory attributes (shared, type, and cache attribs). We can not
146 * change these attributes once the initial assembly has setup the
149 if (cpu_architecture() >= CPU_ARCH_ARMv6
) {
150 printk(KERN_WARNING
"Only cachepolicy=writeback supported on ARMv6 and later\n");
151 cachepolicy
= CPOLICY_WRITEBACK
;
154 set_cr(cr_alignment
);
157 early_param("cachepolicy", early_cachepolicy
);
159 static int __init
early_nocache(char *__unused
)
161 char *p
= "buffered";
162 printk(KERN_WARNING
"nocache is deprecated; use cachepolicy=%s\n", p
);
163 early_cachepolicy(p
);
166 early_param("nocache", early_nocache
);
168 static int __init
early_nowrite(char *__unused
)
170 char *p
= "uncached";
171 printk(KERN_WARNING
"nowb is deprecated; use cachepolicy=%s\n", p
);
172 early_cachepolicy(p
);
175 early_param("nowb", early_nowrite
);
177 #ifndef CONFIG_ARM_LPAE
178 static int __init
early_ecc(char *p
)
180 if (memcmp(p
, "on", 2) == 0)
181 ecc_mask
= PMD_PROTECTION
;
182 else if (memcmp(p
, "off", 3) == 0)
186 early_param("ecc", early_ecc
);
189 static int __init
noalign_setup(char *__unused
)
191 cr_alignment
&= ~CR_A
;
192 cr_no_alignment
&= ~CR_A
;
193 set_cr(cr_alignment
);
196 __setup("noalign", noalign_setup
);
199 void adjust_cr(unsigned long mask
, unsigned long set
)
207 local_irq_save(flags
);
209 cr_no_alignment
= (cr_no_alignment
& ~mask
) | set
;
210 cr_alignment
= (cr_alignment
& ~mask
) | set
;
212 set_cr((get_cr() & ~mask
) | set
);
214 local_irq_restore(flags
);
218 #else /* ifdef CONFIG_CPU_CP15 */
220 static int __init
early_cachepolicy(char *p
)
222 pr_warning("cachepolicy kernel parameter not supported without cp15\n");
224 early_param("cachepolicy", early_cachepolicy
);
226 static int __init
noalign_setup(char *__unused
)
228 pr_warning("noalign kernel parameter not supported without cp15\n");
230 __setup("noalign", noalign_setup
);
232 #endif /* ifdef CONFIG_CPU_CP15 / else */
234 #define PROT_PTE_DEVICE L_PTE_PRESENT|L_PTE_YOUNG|L_PTE_DIRTY|L_PTE_XN
235 #define PROT_PTE_S2_DEVICE PROT_PTE_DEVICE
236 #define PROT_SECT_DEVICE PMD_TYPE_SECT|PMD_SECT_AP_WRITE
238 static struct mem_type mem_types
[] = {
239 [MT_DEVICE
] = { /* Strongly ordered / ARMv6 shared device */
240 .prot_pte
= PROT_PTE_DEVICE
| L_PTE_MT_DEV_SHARED
|
242 .prot_pte_s2
= s2_policy(PROT_PTE_S2_DEVICE
) |
243 s2_policy(L_PTE_S2_MT_DEV_SHARED
) |
245 .prot_l1
= PMD_TYPE_TABLE
,
246 .prot_sect
= PROT_SECT_DEVICE
| PMD_SECT_S
,
249 [MT_DEVICE_NONSHARED
] = { /* ARMv6 non-shared device */
250 .prot_pte
= PROT_PTE_DEVICE
| L_PTE_MT_DEV_NONSHARED
,
251 .prot_l1
= PMD_TYPE_TABLE
,
252 .prot_sect
= PROT_SECT_DEVICE
,
255 [MT_DEVICE_CACHED
] = { /* ioremap_cached */
256 .prot_pte
= PROT_PTE_DEVICE
| L_PTE_MT_DEV_CACHED
,
257 .prot_l1
= PMD_TYPE_TABLE
,
258 .prot_sect
= PROT_SECT_DEVICE
| PMD_SECT_WB
,
261 [MT_DEVICE_WC
] = { /* ioremap_wc */
262 .prot_pte
= PROT_PTE_DEVICE
| L_PTE_MT_DEV_WC
,
263 .prot_l1
= PMD_TYPE_TABLE
,
264 .prot_sect
= PROT_SECT_DEVICE
,
268 .prot_pte
= PROT_PTE_DEVICE
,
269 .prot_l1
= PMD_TYPE_TABLE
,
270 .prot_sect
= PMD_TYPE_SECT
| PMD_SECT_XN
,
274 .prot_sect
= PMD_TYPE_SECT
| PMD_SECT_XN
,
275 .domain
= DOMAIN_KERNEL
,
277 #ifndef CONFIG_ARM_LPAE
279 .prot_sect
= PMD_TYPE_SECT
| PMD_SECT_XN
| PMD_SECT_MINICACHE
,
280 .domain
= DOMAIN_KERNEL
,
284 .prot_pte
= L_PTE_PRESENT
| L_PTE_YOUNG
| L_PTE_DIRTY
|
286 .prot_l1
= PMD_TYPE_TABLE
,
287 .domain
= DOMAIN_USER
,
289 [MT_HIGH_VECTORS
] = {
290 .prot_pte
= L_PTE_PRESENT
| L_PTE_YOUNG
| L_PTE_DIRTY
|
291 L_PTE_USER
| L_PTE_RDONLY
,
292 .prot_l1
= PMD_TYPE_TABLE
,
293 .domain
= DOMAIN_USER
,
296 .prot_pte
= L_PTE_PRESENT
| L_PTE_YOUNG
| L_PTE_DIRTY
,
297 .prot_l1
= PMD_TYPE_TABLE
,
298 .prot_sect
= PMD_TYPE_SECT
| PMD_SECT_AP_WRITE
,
299 .domain
= DOMAIN_KERNEL
,
302 .prot_pte
= L_PTE_PRESENT
| L_PTE_YOUNG
| L_PTE_DIRTY
|
304 .prot_l1
= PMD_TYPE_TABLE
,
305 .prot_sect
= PMD_TYPE_SECT
| PMD_SECT_AP_WRITE
,
306 .domain
= DOMAIN_KERNEL
,
309 .prot_sect
= PMD_TYPE_SECT
,
310 .domain
= DOMAIN_KERNEL
,
312 [MT_MEMORY_RWX_NONCACHED
] = {
313 .prot_pte
= L_PTE_PRESENT
| L_PTE_YOUNG
| L_PTE_DIRTY
|
315 .prot_l1
= PMD_TYPE_TABLE
,
316 .prot_sect
= PMD_TYPE_SECT
| PMD_SECT_AP_WRITE
,
317 .domain
= DOMAIN_KERNEL
,
319 [MT_MEMORY_RW_DTCM
] = {
320 .prot_pte
= L_PTE_PRESENT
| L_PTE_YOUNG
| L_PTE_DIRTY
|
322 .prot_l1
= PMD_TYPE_TABLE
,
323 .prot_sect
= PMD_TYPE_SECT
| PMD_SECT_XN
,
324 .domain
= DOMAIN_KERNEL
,
326 [MT_MEMORY_RWX_ITCM
] = {
327 .prot_pte
= L_PTE_PRESENT
| L_PTE_YOUNG
| L_PTE_DIRTY
,
328 .prot_l1
= PMD_TYPE_TABLE
,
329 .domain
= DOMAIN_KERNEL
,
331 [MT_MEMORY_RW_SO
] = {
332 .prot_pte
= L_PTE_PRESENT
| L_PTE_YOUNG
| L_PTE_DIRTY
|
333 L_PTE_MT_UNCACHED
| L_PTE_XN
,
334 .prot_l1
= PMD_TYPE_TABLE
,
335 .prot_sect
= PMD_TYPE_SECT
| PMD_SECT_AP_WRITE
| PMD_SECT_S
|
336 PMD_SECT_UNCACHED
| PMD_SECT_XN
,
337 .domain
= DOMAIN_KERNEL
,
339 [MT_MEMORY_DMA_READY
] = {
340 .prot_pte
= L_PTE_PRESENT
| L_PTE_YOUNG
| L_PTE_DIRTY
|
342 .prot_l1
= PMD_TYPE_TABLE
,
343 .domain
= DOMAIN_KERNEL
,
347 const struct mem_type
*get_mem_type(unsigned int type
)
349 return type
< ARRAY_SIZE(mem_types
) ? &mem_types
[type
] : NULL
;
351 EXPORT_SYMBOL(get_mem_type
);
353 #define PTE_SET_FN(_name, pteop) \
354 static int pte_set_##_name(pte_t *ptep, pgtable_t token, unsigned long addr, \
357 pte_t pte = pteop(*ptep); \
359 set_pte_ext(ptep, pte, 0); \
363 #define SET_MEMORY_FN(_name, callback) \
364 int set_memory_##_name(unsigned long addr, int numpages) \
366 unsigned long start = addr; \
367 unsigned long size = PAGE_SIZE*numpages; \
368 unsigned end = start + size; \
370 if (start < MODULES_VADDR || start >= MODULES_END) \
373 if (end < MODULES_VADDR || end >= MODULES_END) \
376 apply_to_page_range(&init_mm, start, size, callback, NULL); \
377 flush_tlb_kernel_range(start, end); \
381 PTE_SET_FN(ro
, pte_wrprotect
)
382 PTE_SET_FN(rw
, pte_mkwrite
)
383 PTE_SET_FN(x
, pte_mkexec
)
384 PTE_SET_FN(nx
, pte_mknexec
)
386 SET_MEMORY_FN(ro
, pte_set_ro
)
387 SET_MEMORY_FN(rw
, pte_set_rw
)
388 SET_MEMORY_FN(x
, pte_set_x
)
389 SET_MEMORY_FN(nx
, pte_set_nx
)
392 * Adjust the PMD section entries according to the CPU in use.
394 static void __init
build_mem_type_table(void)
396 struct cachepolicy
*cp
;
397 unsigned int cr
= get_cr();
398 pteval_t user_pgprot
, kern_pgprot
, vecs_pgprot
;
399 pteval_t hyp_device_pgprot
, s2_pgprot
, s2_device_pgprot
;
400 int cpu_arch
= cpu_architecture();
403 if (cpu_arch
< CPU_ARCH_ARMv6
) {
404 #if defined(CONFIG_CPU_DCACHE_DISABLE)
405 if (cachepolicy
> CPOLICY_BUFFERED
)
406 cachepolicy
= CPOLICY_BUFFERED
;
407 #elif defined(CONFIG_CPU_DCACHE_WRITETHROUGH)
408 if (cachepolicy
> CPOLICY_WRITETHROUGH
)
409 cachepolicy
= CPOLICY_WRITETHROUGH
;
412 if (cpu_arch
< CPU_ARCH_ARMv5
) {
413 if (cachepolicy
>= CPOLICY_WRITEALLOC
)
414 cachepolicy
= CPOLICY_WRITEBACK
;
418 cachepolicy
= CPOLICY_WRITEALLOC
;
421 * Strip out features not present on earlier architectures.
422 * Pre-ARMv5 CPUs don't have TEX bits. Pre-ARMv6 CPUs or those
423 * without extended page tables don't have the 'Shared' bit.
425 if (cpu_arch
< CPU_ARCH_ARMv5
)
426 for (i
= 0; i
< ARRAY_SIZE(mem_types
); i
++)
427 mem_types
[i
].prot_sect
&= ~PMD_SECT_TEX(7);
428 if ((cpu_arch
< CPU_ARCH_ARMv6
|| !(cr
& CR_XP
)) && !cpu_is_xsc3())
429 for (i
= 0; i
< ARRAY_SIZE(mem_types
); i
++)
430 mem_types
[i
].prot_sect
&= ~PMD_SECT_S
;
433 * ARMv5 and lower, bit 4 must be set for page tables (was: cache
434 * "update-able on write" bit on ARM610). However, Xscale and
435 * Xscale3 require this bit to be cleared.
437 if (cpu_is_xscale() || cpu_is_xsc3()) {
438 for (i
= 0; i
< ARRAY_SIZE(mem_types
); i
++) {
439 mem_types
[i
].prot_sect
&= ~PMD_BIT4
;
440 mem_types
[i
].prot_l1
&= ~PMD_BIT4
;
442 } else if (cpu_arch
< CPU_ARCH_ARMv6
) {
443 for (i
= 0; i
< ARRAY_SIZE(mem_types
); i
++) {
444 if (mem_types
[i
].prot_l1
)
445 mem_types
[i
].prot_l1
|= PMD_BIT4
;
446 if (mem_types
[i
].prot_sect
)
447 mem_types
[i
].prot_sect
|= PMD_BIT4
;
452 * Mark the device areas according to the CPU/architecture.
454 if (cpu_is_xsc3() || (cpu_arch
>= CPU_ARCH_ARMv6
&& (cr
& CR_XP
))) {
455 if (!cpu_is_xsc3()) {
457 * Mark device regions on ARMv6+ as execute-never
458 * to prevent speculative instruction fetches.
460 mem_types
[MT_DEVICE
].prot_sect
|= PMD_SECT_XN
;
461 mem_types
[MT_DEVICE_NONSHARED
].prot_sect
|= PMD_SECT_XN
;
462 mem_types
[MT_DEVICE_CACHED
].prot_sect
|= PMD_SECT_XN
;
463 mem_types
[MT_DEVICE_WC
].prot_sect
|= PMD_SECT_XN
;
465 /* Also setup NX memory mapping */
466 mem_types
[MT_MEMORY_RW
].prot_sect
|= PMD_SECT_XN
;
468 if (cpu_arch
>= CPU_ARCH_ARMv7
&& (cr
& CR_TRE
)) {
470 * For ARMv7 with TEX remapping,
471 * - shared device is SXCB=1100
472 * - nonshared device is SXCB=0100
473 * - write combine device mem is SXCB=0001
474 * (Uncached Normal memory)
476 mem_types
[MT_DEVICE
].prot_sect
|= PMD_SECT_TEX(1);
477 mem_types
[MT_DEVICE_NONSHARED
].prot_sect
|= PMD_SECT_TEX(1);
478 mem_types
[MT_DEVICE_WC
].prot_sect
|= PMD_SECT_BUFFERABLE
;
479 } else if (cpu_is_xsc3()) {
482 * - shared device is TEXCB=00101
483 * - nonshared device is TEXCB=01000
484 * - write combine device mem is TEXCB=00100
485 * (Inner/Outer Uncacheable in xsc3 parlance)
487 mem_types
[MT_DEVICE
].prot_sect
|= PMD_SECT_TEX(1) | PMD_SECT_BUFFERED
;
488 mem_types
[MT_DEVICE_NONSHARED
].prot_sect
|= PMD_SECT_TEX(2);
489 mem_types
[MT_DEVICE_WC
].prot_sect
|= PMD_SECT_TEX(1);
492 * For ARMv6 and ARMv7 without TEX remapping,
493 * - shared device is TEXCB=00001
494 * - nonshared device is TEXCB=01000
495 * - write combine device mem is TEXCB=00100
496 * (Uncached Normal in ARMv6 parlance).
498 mem_types
[MT_DEVICE
].prot_sect
|= PMD_SECT_BUFFERED
;
499 mem_types
[MT_DEVICE_NONSHARED
].prot_sect
|= PMD_SECT_TEX(2);
500 mem_types
[MT_DEVICE_WC
].prot_sect
|= PMD_SECT_TEX(1);
504 * On others, write combining is "Uncached/Buffered"
506 mem_types
[MT_DEVICE_WC
].prot_sect
|= PMD_SECT_BUFFERABLE
;
510 * Now deal with the memory-type mappings
512 cp
= &cache_policies
[cachepolicy
];
513 vecs_pgprot
= kern_pgprot
= user_pgprot
= cp
->pte
;
514 s2_pgprot
= cp
->pte_s2
;
515 hyp_device_pgprot
= mem_types
[MT_DEVICE
].prot_pte
;
516 s2_device_pgprot
= mem_types
[MT_DEVICE
].prot_pte_s2
;
519 * ARMv6 and above have extended page tables.
521 if (cpu_arch
>= CPU_ARCH_ARMv6
&& (cr
& CR_XP
)) {
522 #ifndef CONFIG_ARM_LPAE
524 * Mark cache clean areas and XIP ROM read only
525 * from SVC mode and no access from userspace.
527 mem_types
[MT_ROM
].prot_sect
|= PMD_SECT_APX
|PMD_SECT_AP_WRITE
;
528 mem_types
[MT_MINICLEAN
].prot_sect
|= PMD_SECT_APX
|PMD_SECT_AP_WRITE
;
529 mem_types
[MT_CACHECLEAN
].prot_sect
|= PMD_SECT_APX
|PMD_SECT_AP_WRITE
;
534 * Mark memory with the "shared" attribute
537 user_pgprot
|= L_PTE_SHARED
;
538 kern_pgprot
|= L_PTE_SHARED
;
539 vecs_pgprot
|= L_PTE_SHARED
;
540 s2_pgprot
|= L_PTE_SHARED
;
541 mem_types
[MT_DEVICE_WC
].prot_sect
|= PMD_SECT_S
;
542 mem_types
[MT_DEVICE_WC
].prot_pte
|= L_PTE_SHARED
;
543 mem_types
[MT_DEVICE_CACHED
].prot_sect
|= PMD_SECT_S
;
544 mem_types
[MT_DEVICE_CACHED
].prot_pte
|= L_PTE_SHARED
;
545 mem_types
[MT_MEMORY_RWX
].prot_sect
|= PMD_SECT_S
;
546 mem_types
[MT_MEMORY_RWX
].prot_pte
|= L_PTE_SHARED
;
547 mem_types
[MT_MEMORY_RW
].prot_sect
|= PMD_SECT_S
;
548 mem_types
[MT_MEMORY_RW
].prot_pte
|= L_PTE_SHARED
;
549 mem_types
[MT_MEMORY_DMA_READY
].prot_pte
|= L_PTE_SHARED
;
550 mem_types
[MT_MEMORY_RWX_NONCACHED
].prot_sect
|= PMD_SECT_S
;
551 mem_types
[MT_MEMORY_RWX_NONCACHED
].prot_pte
|= L_PTE_SHARED
;
556 * Non-cacheable Normal - intended for memory areas that must
557 * not cause dirty cache line writebacks when used
559 if (cpu_arch
>= CPU_ARCH_ARMv6
) {
560 if (cpu_arch
>= CPU_ARCH_ARMv7
&& (cr
& CR_TRE
)) {
561 /* Non-cacheable Normal is XCB = 001 */
562 mem_types
[MT_MEMORY_RWX_NONCACHED
].prot_sect
|=
565 /* For both ARMv6 and non-TEX-remapping ARMv7 */
566 mem_types
[MT_MEMORY_RWX_NONCACHED
].prot_sect
|=
570 mem_types
[MT_MEMORY_RWX_NONCACHED
].prot_sect
|= PMD_SECT_BUFFERABLE
;
573 #ifdef CONFIG_ARM_LPAE
575 * Do not generate access flag faults for the kernel mappings.
577 for (i
= 0; i
< ARRAY_SIZE(mem_types
); i
++) {
578 mem_types
[i
].prot_pte
|= PTE_EXT_AF
;
579 if (mem_types
[i
].prot_sect
)
580 mem_types
[i
].prot_sect
|= PMD_SECT_AF
;
582 kern_pgprot
|= PTE_EXT_AF
;
583 vecs_pgprot
|= PTE_EXT_AF
;
586 for (i
= 0; i
< 16; i
++) {
587 pteval_t v
= pgprot_val(protection_map
[i
]);
588 protection_map
[i
] = __pgprot(v
| user_pgprot
);
591 mem_types
[MT_LOW_VECTORS
].prot_pte
|= vecs_pgprot
;
592 mem_types
[MT_HIGH_VECTORS
].prot_pte
|= vecs_pgprot
;
594 pgprot_user
= __pgprot(L_PTE_PRESENT
| L_PTE_YOUNG
| user_pgprot
);
595 pgprot_kernel
= __pgprot(L_PTE_PRESENT
| L_PTE_YOUNG
|
596 L_PTE_DIRTY
| kern_pgprot
);
597 pgprot_s2
= __pgprot(L_PTE_PRESENT
| L_PTE_YOUNG
| s2_pgprot
);
598 pgprot_s2_device
= __pgprot(s2_device_pgprot
);
599 pgprot_hyp_device
= __pgprot(hyp_device_pgprot
);
601 mem_types
[MT_LOW_VECTORS
].prot_l1
|= ecc_mask
;
602 mem_types
[MT_HIGH_VECTORS
].prot_l1
|= ecc_mask
;
603 mem_types
[MT_MEMORY_RWX
].prot_sect
|= ecc_mask
| cp
->pmd
;
604 mem_types
[MT_MEMORY_RWX
].prot_pte
|= kern_pgprot
;
605 mem_types
[MT_MEMORY_RW
].prot_sect
|= ecc_mask
| cp
->pmd
;
606 mem_types
[MT_MEMORY_RW
].prot_pte
|= kern_pgprot
;
607 mem_types
[MT_MEMORY_DMA_READY
].prot_pte
|= kern_pgprot
;
608 mem_types
[MT_MEMORY_RWX_NONCACHED
].prot_sect
|= ecc_mask
;
609 mem_types
[MT_ROM
].prot_sect
|= cp
->pmd
;
613 mem_types
[MT_CACHECLEAN
].prot_sect
|= PMD_SECT_WT
;
617 mem_types
[MT_CACHECLEAN
].prot_sect
|= PMD_SECT_WB
;
620 pr_info("Memory policy: %sData cache %s\n",
621 ecc_mask
? "ECC enabled, " : "", cp
->policy
);
623 for (i
= 0; i
< ARRAY_SIZE(mem_types
); i
++) {
624 struct mem_type
*t
= &mem_types
[i
];
626 t
->prot_l1
|= PMD_DOMAIN(t
->domain
);
628 t
->prot_sect
|= PMD_DOMAIN(t
->domain
);
632 #ifdef CONFIG_ARM_DMA_MEM_BUFFERABLE
633 pgprot_t
phys_mem_access_prot(struct file
*file
, unsigned long pfn
,
634 unsigned long size
, pgprot_t vma_prot
)
637 return pgprot_noncached(vma_prot
);
638 else if (file
->f_flags
& O_SYNC
)
639 return pgprot_writecombine(vma_prot
);
642 EXPORT_SYMBOL(phys_mem_access_prot
);
645 #define vectors_base() (vectors_high() ? 0xffff0000 : 0)
647 static void __init
*early_alloc_aligned(unsigned long sz
, unsigned long align
)
649 void *ptr
= __va(memblock_alloc(sz
, align
));
654 static void __init
*early_alloc(unsigned long sz
)
656 return early_alloc_aligned(sz
, sz
);
659 static pte_t
* __init
early_pte_alloc(pmd_t
*pmd
, unsigned long addr
, unsigned long prot
)
661 if (pmd_none(*pmd
)) {
662 pte_t
*pte
= early_alloc(PTE_HWTABLE_OFF
+ PTE_HWTABLE_SIZE
);
663 __pmd_populate(pmd
, __pa(pte
), prot
);
665 BUG_ON(pmd_bad(*pmd
));
666 return pte_offset_kernel(pmd
, addr
);
669 static void __init
alloc_init_pte(pmd_t
*pmd
, unsigned long addr
,
670 unsigned long end
, unsigned long pfn
,
671 const struct mem_type
*type
)
673 pte_t
*pte
= early_pte_alloc(pmd
, addr
, type
->prot_l1
);
675 set_pte_ext(pte
, pfn_pte(pfn
, __pgprot(type
->prot_pte
)), 0);
677 } while (pte
++, addr
+= PAGE_SIZE
, addr
!= end
);
680 static void __init
__map_init_section(pmd_t
*pmd
, unsigned long addr
,
681 unsigned long end
, phys_addr_t phys
,
682 const struct mem_type
*type
)
686 #ifndef CONFIG_ARM_LPAE
688 * In classic MMU format, puds and pmds are folded in to
689 * the pgds. pmd_offset gives the PGD entry. PGDs refer to a
690 * group of L1 entries making up one logical pointer to
691 * an L2 table (2MB), where as PMDs refer to the individual
692 * L1 entries (1MB). Hence increment to get the correct
693 * offset for odd 1MB sections.
694 * (See arch/arm/include/asm/pgtable-2level.h)
696 if (addr
& SECTION_SIZE
)
700 *pmd
= __pmd(phys
| type
->prot_sect
);
701 phys
+= SECTION_SIZE
;
702 } while (pmd
++, addr
+= SECTION_SIZE
, addr
!= end
);
707 static void __init
alloc_init_pmd(pud_t
*pud
, unsigned long addr
,
708 unsigned long end
, phys_addr_t phys
,
709 const struct mem_type
*type
)
711 pmd_t
*pmd
= pmd_offset(pud
, addr
);
716 * With LPAE, we must loop over to map
717 * all the pmds for the given range.
719 next
= pmd_addr_end(addr
, end
);
722 * Try a section mapping - addr, next and phys must all be
723 * aligned to a section boundary.
725 if (type
->prot_sect
&&
726 ((addr
| next
| phys
) & ~SECTION_MASK
) == 0) {
727 __map_init_section(pmd
, addr
, next
, phys
, type
);
729 alloc_init_pte(pmd
, addr
, next
,
730 __phys_to_pfn(phys
), type
);
735 } while (pmd
++, addr
= next
, addr
!= end
);
738 static void __init
alloc_init_pud(pgd_t
*pgd
, unsigned long addr
,
739 unsigned long end
, phys_addr_t phys
,
740 const struct mem_type
*type
)
742 pud_t
*pud
= pud_offset(pgd
, addr
);
746 next
= pud_addr_end(addr
, end
);
747 alloc_init_pmd(pud
, addr
, next
, phys
, type
);
749 } while (pud
++, addr
= next
, addr
!= end
);
752 #ifndef CONFIG_ARM_LPAE
753 static void __init
create_36bit_mapping(struct map_desc
*md
,
754 const struct mem_type
*type
)
756 unsigned long addr
, length
, end
;
761 phys
= __pfn_to_phys(md
->pfn
);
762 length
= PAGE_ALIGN(md
->length
);
764 if (!(cpu_architecture() >= CPU_ARCH_ARMv6
|| cpu_is_xsc3())) {
765 printk(KERN_ERR
"MM: CPU does not support supersection "
766 "mapping for 0x%08llx at 0x%08lx\n",
767 (long long)__pfn_to_phys((u64
)md
->pfn
), addr
);
771 /* N.B. ARMv6 supersections are only defined to work with domain 0.
772 * Since domain assignments can in fact be arbitrary, the
773 * 'domain == 0' check below is required to insure that ARMv6
774 * supersections are only allocated for domain 0 regardless
775 * of the actual domain assignments in use.
778 printk(KERN_ERR
"MM: invalid domain in supersection "
779 "mapping for 0x%08llx at 0x%08lx\n",
780 (long long)__pfn_to_phys((u64
)md
->pfn
), addr
);
784 if ((addr
| length
| __pfn_to_phys(md
->pfn
)) & ~SUPERSECTION_MASK
) {
785 printk(KERN_ERR
"MM: cannot create mapping for 0x%08llx"
786 " at 0x%08lx invalid alignment\n",
787 (long long)__pfn_to_phys((u64
)md
->pfn
), addr
);
792 * Shift bits [35:32] of address into bits [23:20] of PMD
795 phys
|= (((md
->pfn
>> (32 - PAGE_SHIFT
)) & 0xF) << 20);
797 pgd
= pgd_offset_k(addr
);
800 pud_t
*pud
= pud_offset(pgd
, addr
);
801 pmd_t
*pmd
= pmd_offset(pud
, addr
);
804 for (i
= 0; i
< 16; i
++)
805 *pmd
++ = __pmd(phys
| type
->prot_sect
| PMD_SECT_SUPER
);
807 addr
+= SUPERSECTION_SIZE
;
808 phys
+= SUPERSECTION_SIZE
;
809 pgd
+= SUPERSECTION_SIZE
>> PGDIR_SHIFT
;
810 } while (addr
!= end
);
812 #endif /* !CONFIG_ARM_LPAE */
815 * Create the page directory entries and any necessary
816 * page tables for the mapping specified by `md'. We
817 * are able to cope here with varying sizes and address
818 * offsets, and we take full advantage of sections and
821 static void __init
create_mapping(struct map_desc
*md
)
823 unsigned long addr
, length
, end
;
825 const struct mem_type
*type
;
828 if (md
->virtual != vectors_base() && md
->virtual < TASK_SIZE
) {
829 printk(KERN_WARNING
"BUG: not creating mapping for 0x%08llx"
830 " at 0x%08lx in user region\n",
831 (long long)__pfn_to_phys((u64
)md
->pfn
), md
->virtual);
835 if ((md
->type
== MT_DEVICE
|| md
->type
== MT_ROM
) &&
836 md
->virtual >= PAGE_OFFSET
&&
837 (md
->virtual < VMALLOC_START
|| md
->virtual >= VMALLOC_END
)) {
838 printk(KERN_WARNING
"BUG: mapping for 0x%08llx"
839 " at 0x%08lx out of vmalloc space\n",
840 (long long)__pfn_to_phys((u64
)md
->pfn
), md
->virtual);
843 type
= &mem_types
[md
->type
];
845 #ifndef CONFIG_ARM_LPAE
847 * Catch 36-bit addresses
849 if (md
->pfn
>= 0x100000) {
850 create_36bit_mapping(md
, type
);
855 addr
= md
->virtual & PAGE_MASK
;
856 phys
= __pfn_to_phys(md
->pfn
);
857 length
= PAGE_ALIGN(md
->length
+ (md
->virtual & ~PAGE_MASK
));
859 if (type
->prot_l1
== 0 && ((addr
| phys
| length
) & ~SECTION_MASK
)) {
860 printk(KERN_WARNING
"BUG: map for 0x%08llx at 0x%08lx can not "
861 "be mapped using pages, ignoring.\n",
862 (long long)__pfn_to_phys(md
->pfn
), addr
);
866 pgd
= pgd_offset_k(addr
);
869 unsigned long next
= pgd_addr_end(addr
, end
);
871 alloc_init_pud(pgd
, addr
, next
, phys
, type
);
875 } while (pgd
++, addr
!= end
);
879 * Create the architecture specific mappings
881 void __init
iotable_init(struct map_desc
*io_desc
, int nr
)
884 struct vm_struct
*vm
;
885 struct static_vm
*svm
;
890 svm
= early_alloc_aligned(sizeof(*svm
) * nr
, __alignof__(*svm
));
892 for (md
= io_desc
; nr
; md
++, nr
--) {
896 vm
->addr
= (void *)(md
->virtual & PAGE_MASK
);
897 vm
->size
= PAGE_ALIGN(md
->length
+ (md
->virtual & ~PAGE_MASK
));
898 vm
->phys_addr
= __pfn_to_phys(md
->pfn
);
899 vm
->flags
= VM_IOREMAP
| VM_ARM_STATIC_MAPPING
;
900 vm
->flags
|= VM_ARM_MTYPE(md
->type
);
901 vm
->caller
= iotable_init
;
902 add_static_vm_early(svm
++);
906 void __init
vm_reserve_area_early(unsigned long addr
, unsigned long size
,
909 struct vm_struct
*vm
;
910 struct static_vm
*svm
;
912 svm
= early_alloc_aligned(sizeof(*svm
), __alignof__(*svm
));
915 vm
->addr
= (void *)addr
;
917 vm
->flags
= VM_IOREMAP
| VM_ARM_EMPTY_MAPPING
;
919 add_static_vm_early(svm
);
922 #ifndef CONFIG_ARM_LPAE
925 * The Linux PMD is made of two consecutive section entries covering 2MB
926 * (see definition in include/asm/pgtable-2level.h). However a call to
927 * create_mapping() may optimize static mappings by using individual
928 * 1MB section mappings. This leaves the actual PMD potentially half
929 * initialized if the top or bottom section entry isn't used, leaving it
930 * open to problems if a subsequent ioremap() or vmalloc() tries to use
931 * the virtual space left free by that unused section entry.
933 * Let's avoid the issue by inserting dummy vm entries covering the unused
934 * PMD halves once the static mappings are in place.
937 static void __init
pmd_empty_section_gap(unsigned long addr
)
939 vm_reserve_area_early(addr
, SECTION_SIZE
, pmd_empty_section_gap
);
942 static void __init
fill_pmd_gaps(void)
944 struct static_vm
*svm
;
945 struct vm_struct
*vm
;
946 unsigned long addr
, next
= 0;
949 list_for_each_entry(svm
, &static_vmlist
, list
) {
951 addr
= (unsigned long)vm
->addr
;
956 * Check if this vm starts on an odd section boundary.
957 * If so and the first section entry for this PMD is free
958 * then we block the corresponding virtual address.
960 if ((addr
& ~PMD_MASK
) == SECTION_SIZE
) {
961 pmd
= pmd_off_k(addr
);
963 pmd_empty_section_gap(addr
& PMD_MASK
);
967 * Then check if this vm ends on an odd section boundary.
968 * If so and the second section entry for this PMD is empty
969 * then we block the corresponding virtual address.
972 if ((addr
& ~PMD_MASK
) == SECTION_SIZE
) {
973 pmd
= pmd_off_k(addr
) + 1;
975 pmd_empty_section_gap(addr
);
978 /* no need to look at any vm entry until we hit the next PMD */
979 next
= (addr
+ PMD_SIZE
- 1) & PMD_MASK
;
984 #define fill_pmd_gaps() do { } while (0)
987 #if defined(CONFIG_PCI) && !defined(CONFIG_NEED_MACH_IO_H)
988 static void __init
pci_reserve_io(void)
990 struct static_vm
*svm
;
992 svm
= find_static_vm_vaddr((void *)PCI_IO_VIRT_BASE
);
996 vm_reserve_area_early(PCI_IO_VIRT_BASE
, SZ_2M
, pci_reserve_io
);
999 #define pci_reserve_io() do { } while (0)
1002 #ifdef CONFIG_DEBUG_LL
1003 void __init
debug_ll_io_init(void)
1005 struct map_desc map
;
1007 debug_ll_addr(&map
.pfn
, &map
.virtual);
1008 if (!map
.pfn
|| !map
.virtual)
1010 map
.pfn
= __phys_to_pfn(map
.pfn
);
1011 map
.virtual &= PAGE_MASK
;
1012 map
.length
= PAGE_SIZE
;
1013 map
.type
= MT_DEVICE
;
1014 iotable_init(&map
, 1);
1018 static void * __initdata vmalloc_min
=
1019 (void *)(VMALLOC_END
- (240 << 20) - VMALLOC_OFFSET
);
1022 * vmalloc=size forces the vmalloc area to be exactly 'size'
1023 * bytes. This can be used to increase (or decrease) the vmalloc
1024 * area - the default is 240m.
1026 static int __init
early_vmalloc(char *arg
)
1028 unsigned long vmalloc_reserve
= memparse(arg
, NULL
);
1030 if (vmalloc_reserve
< SZ_16M
) {
1031 vmalloc_reserve
= SZ_16M
;
1033 "vmalloc area too small, limiting to %luMB\n",
1034 vmalloc_reserve
>> 20);
1037 if (vmalloc_reserve
> VMALLOC_END
- (PAGE_OFFSET
+ SZ_32M
)) {
1038 vmalloc_reserve
= VMALLOC_END
- (PAGE_OFFSET
+ SZ_32M
);
1040 "vmalloc area is too big, limiting to %luMB\n",
1041 vmalloc_reserve
>> 20);
1044 vmalloc_min
= (void *)(VMALLOC_END
- vmalloc_reserve
);
1047 early_param("vmalloc", early_vmalloc
);
1049 phys_addr_t arm_lowmem_limit __initdata
= 0;
1051 void __init
sanity_check_meminfo(void)
1053 phys_addr_t memblock_limit
= 0;
1054 int i
, j
, highmem
= 0;
1055 phys_addr_t vmalloc_limit
= __pa(vmalloc_min
- 1) + 1;
1057 for (i
= 0, j
= 0; i
< meminfo
.nr_banks
; i
++) {
1058 struct membank
*bank
= &meminfo
.bank
[j
];
1059 phys_addr_t size_limit
;
1061 *bank
= meminfo
.bank
[i
];
1062 size_limit
= bank
->size
;
1064 if (bank
->start
>= vmalloc_limit
)
1067 size_limit
= vmalloc_limit
- bank
->start
;
1069 bank
->highmem
= highmem
;
1071 #ifdef CONFIG_HIGHMEM
1073 * Split those memory banks which are partially overlapping
1074 * the vmalloc area greatly simplifying things later.
1076 if (!highmem
&& bank
->size
> size_limit
) {
1077 if (meminfo
.nr_banks
>= NR_BANKS
) {
1078 printk(KERN_CRIT
"NR_BANKS too low, "
1079 "ignoring high memory\n");
1081 memmove(bank
+ 1, bank
,
1082 (meminfo
.nr_banks
- i
) * sizeof(*bank
));
1085 bank
[1].size
-= size_limit
;
1086 bank
[1].start
= vmalloc_limit
;
1087 bank
[1].highmem
= highmem
= 1;
1090 bank
->size
= size_limit
;
1094 * Highmem banks not allowed with !CONFIG_HIGHMEM.
1097 printk(KERN_NOTICE
"Ignoring RAM at %.8llx-%.8llx "
1098 "(!CONFIG_HIGHMEM).\n",
1099 (unsigned long long)bank
->start
,
1100 (unsigned long long)bank
->start
+ bank
->size
- 1);
1105 * Check whether this memory bank would partially overlap
1108 if (bank
->size
> size_limit
) {
1109 printk(KERN_NOTICE
"Truncating RAM at %.8llx-%.8llx "
1110 "to -%.8llx (vmalloc region overlap).\n",
1111 (unsigned long long)bank
->start
,
1112 (unsigned long long)bank
->start
+ bank
->size
- 1,
1113 (unsigned long long)bank
->start
+ size_limit
- 1);
1114 bank
->size
= size_limit
;
1117 if (!bank
->highmem
) {
1118 phys_addr_t bank_end
= bank
->start
+ bank
->size
;
1120 if (bank_end
> arm_lowmem_limit
)
1121 arm_lowmem_limit
= bank_end
;
1124 * Find the first non-section-aligned page, and point
1125 * memblock_limit at it. This relies on rounding the
1126 * limit down to be section-aligned, which happens at
1127 * the end of this function.
1129 * With this algorithm, the start or end of almost any
1130 * bank can be non-section-aligned. The only exception
1131 * is that the start of the bank 0 must be section-
1132 * aligned, since otherwise memory would need to be
1133 * allocated when mapping the start of bank 0, which
1134 * occurs before any free memory is mapped.
1136 if (!memblock_limit
) {
1137 if (!IS_ALIGNED(bank
->start
, SECTION_SIZE
))
1138 memblock_limit
= bank
->start
;
1139 else if (!IS_ALIGNED(bank_end
, SECTION_SIZE
))
1140 memblock_limit
= bank_end
;
1145 #ifdef CONFIG_HIGHMEM
1147 const char *reason
= NULL
;
1149 if (cache_is_vipt_aliasing()) {
1151 * Interactions between kmap and other mappings
1152 * make highmem support with aliasing VIPT caches
1155 reason
= "with VIPT aliasing cache";
1158 printk(KERN_CRIT
"HIGHMEM is not supported %s, ignoring high memory\n",
1160 while (j
> 0 && meminfo
.bank
[j
- 1].highmem
)
1165 meminfo
.nr_banks
= j
;
1166 high_memory
= __va(arm_lowmem_limit
- 1) + 1;
1169 * Round the memblock limit down to a section size. This
1170 * helps to ensure that we will allocate memory from the
1171 * last full section, which should be mapped.
1174 memblock_limit
= round_down(memblock_limit
, SECTION_SIZE
);
1175 if (!memblock_limit
)
1176 memblock_limit
= arm_lowmem_limit
;
1178 memblock_set_current_limit(memblock_limit
);
1181 static inline void prepare_page_table(void)
1187 * Clear out all the mappings below the kernel image.
1189 for (addr
= 0; addr
< MODULES_VADDR
; addr
+= PMD_SIZE
)
1190 pmd_clear(pmd_off_k(addr
));
1192 #ifdef CONFIG_XIP_KERNEL
1193 /* The XIP kernel is mapped in the module area -- skip over it */
1194 addr
= ((unsigned long)_etext
+ PMD_SIZE
- 1) & PMD_MASK
;
1196 for ( ; addr
< PAGE_OFFSET
; addr
+= PMD_SIZE
)
1197 pmd_clear(pmd_off_k(addr
));
1200 * Find the end of the first block of lowmem.
1202 end
= memblock
.memory
.regions
[0].base
+ memblock
.memory
.regions
[0].size
;
1203 if (end
>= arm_lowmem_limit
)
1204 end
= arm_lowmem_limit
;
1207 * Clear out all the kernel space mappings, except for the first
1208 * memory bank, up to the vmalloc region.
1210 for (addr
= __phys_to_virt(end
);
1211 addr
< VMALLOC_START
; addr
+= PMD_SIZE
)
1212 pmd_clear(pmd_off_k(addr
));
1215 #ifdef CONFIG_ARM_LPAE
1216 /* the first page is reserved for pgd */
1217 #define SWAPPER_PG_DIR_SIZE (PAGE_SIZE + \
1218 PTRS_PER_PGD * PTRS_PER_PMD * sizeof(pmd_t))
1220 #define SWAPPER_PG_DIR_SIZE (PTRS_PER_PGD * sizeof(pgd_t))
1224 * Reserve the special regions of memory
1226 void __init
arm_mm_memblock_reserve(void)
1229 * Reserve the page tables. These are already in use,
1230 * and can only be in node 0.
1232 memblock_reserve(__pa(swapper_pg_dir
), SWAPPER_PG_DIR_SIZE
);
1234 #ifdef CONFIG_SA1111
1236 * Because of the SA1111 DMA bug, we want to preserve our
1237 * precious DMA-able memory...
1239 memblock_reserve(PHYS_OFFSET
, __pa(swapper_pg_dir
) - PHYS_OFFSET
);
1244 * Set up the device mappings. Since we clear out the page tables for all
1245 * mappings above VMALLOC_START, we will remove any debug device mappings.
1246 * This means you have to be careful how you debug this function, or any
1247 * called function. This means you can't use any function or debugging
1248 * method which may touch any device, otherwise the kernel _will_ crash.
1250 static void __init
devicemaps_init(const struct machine_desc
*mdesc
)
1252 struct map_desc map
;
1257 * Allocate the vector page early.
1259 vectors
= early_alloc(PAGE_SIZE
* 2);
1261 early_trap_init(vectors
);
1263 for (addr
= VMALLOC_START
; addr
; addr
+= PMD_SIZE
)
1264 pmd_clear(pmd_off_k(addr
));
1267 * Map the kernel if it is XIP.
1268 * It is always first in the modulearea.
1270 #ifdef CONFIG_XIP_KERNEL
1271 map
.pfn
= __phys_to_pfn(CONFIG_XIP_PHYS_ADDR
& SECTION_MASK
);
1272 map
.virtual = MODULES_VADDR
;
1273 map
.length
= ((unsigned long)_etext
- map
.virtual + ~SECTION_MASK
) & SECTION_MASK
;
1275 create_mapping(&map
);
1279 * Map the cache flushing regions.
1282 map
.pfn
= __phys_to_pfn(FLUSH_BASE_PHYS
);
1283 map
.virtual = FLUSH_BASE
;
1285 map
.type
= MT_CACHECLEAN
;
1286 create_mapping(&map
);
1288 #ifdef FLUSH_BASE_MINICACHE
1289 map
.pfn
= __phys_to_pfn(FLUSH_BASE_PHYS
+ SZ_1M
);
1290 map
.virtual = FLUSH_BASE_MINICACHE
;
1292 map
.type
= MT_MINICLEAN
;
1293 create_mapping(&map
);
1297 * Create a mapping for the machine vectors at the high-vectors
1298 * location (0xffff0000). If we aren't using high-vectors, also
1299 * create a mapping at the low-vectors virtual address.
1301 map
.pfn
= __phys_to_pfn(virt_to_phys(vectors
));
1302 map
.virtual = 0xffff0000;
1303 map
.length
= PAGE_SIZE
;
1304 #ifdef CONFIG_KUSER_HELPERS
1305 map
.type
= MT_HIGH_VECTORS
;
1307 map
.type
= MT_LOW_VECTORS
;
1309 create_mapping(&map
);
1311 if (!vectors_high()) {
1313 map
.length
= PAGE_SIZE
* 2;
1314 map
.type
= MT_LOW_VECTORS
;
1315 create_mapping(&map
);
1318 /* Now create a kernel read-only mapping */
1320 map
.virtual = 0xffff0000 + PAGE_SIZE
;
1321 map
.length
= PAGE_SIZE
;
1322 map
.type
= MT_LOW_VECTORS
;
1323 create_mapping(&map
);
1326 * Ask the machine support to map in the statically mapped devices.
1334 /* Reserve fixed i/o space in VMALLOC region */
1338 * Finally flush the caches and tlb to ensure that we're in a
1339 * consistent state wrt the writebuffer. This also ensures that
1340 * any write-allocated cache lines in the vector page are written
1341 * back. After this point, we can start to touch devices again.
1343 local_flush_tlb_all();
1347 static void __init
kmap_init(void)
1349 #ifdef CONFIG_HIGHMEM
1350 pkmap_page_table
= early_pte_alloc(pmd_off_k(PKMAP_BASE
),
1351 PKMAP_BASE
, _PAGE_KERNEL_TABLE
);
1355 static void __init
map_lowmem(void)
1357 struct memblock_region
*reg
;
1358 unsigned long kernel_x_start
= round_down(__pa(_stext
), SECTION_SIZE
);
1359 unsigned long kernel_x_end
= round_up(__pa(__init_end
), SECTION_SIZE
);
1361 /* Map all the lowmem memory banks. */
1362 for_each_memblock(memory
, reg
) {
1363 phys_addr_t start
= reg
->base
;
1364 phys_addr_t end
= start
+ reg
->size
;
1365 struct map_desc map
;
1367 if (end
> arm_lowmem_limit
)
1368 end
= arm_lowmem_limit
;
1372 if (end
< kernel_x_start
|| start
>= kernel_x_end
) {
1373 map
.pfn
= __phys_to_pfn(start
);
1374 map
.virtual = __phys_to_virt(start
);
1375 map
.length
= end
- start
;
1376 map
.type
= MT_MEMORY_RWX
;
1378 create_mapping(&map
);
1380 /* This better cover the entire kernel */
1381 if (start
< kernel_x_start
) {
1382 map
.pfn
= __phys_to_pfn(start
);
1383 map
.virtual = __phys_to_virt(start
);
1384 map
.length
= kernel_x_start
- start
;
1385 map
.type
= MT_MEMORY_RW
;
1387 create_mapping(&map
);
1390 map
.pfn
= __phys_to_pfn(kernel_x_start
);
1391 map
.virtual = __phys_to_virt(kernel_x_start
);
1392 map
.length
= kernel_x_end
- kernel_x_start
;
1393 map
.type
= MT_MEMORY_RWX
;
1395 create_mapping(&map
);
1397 if (kernel_x_end
< end
) {
1398 map
.pfn
= __phys_to_pfn(kernel_x_end
);
1399 map
.virtual = __phys_to_virt(kernel_x_end
);
1400 map
.length
= end
- kernel_x_end
;
1401 map
.type
= MT_MEMORY_RW
;
1403 create_mapping(&map
);
1409 #ifdef CONFIG_ARM_LPAE
1411 * early_paging_init() recreates boot time page table setup, allowing machines
1412 * to switch over to a high (>4G) address space on LPAE systems
1414 void __init
early_paging_init(const struct machine_desc
*mdesc
,
1415 struct proc_info_list
*procinfo
)
1417 pmdval_t pmdprot
= procinfo
->__cpu_mm_mmu_flags
;
1418 unsigned long map_start
, map_end
;
1420 pud_t
*pud0
, *pudk
, *pud_start
;
1425 if (!(mdesc
->init_meminfo
))
1428 /* remap kernel code and data */
1429 map_start
= init_mm
.start_code
;
1430 map_end
= init_mm
.brk
;
1432 /* get a handle on things... */
1433 pgd0
= pgd_offset_k(0);
1434 pud_start
= pud0
= pud_offset(pgd0
, 0);
1435 pmd0
= pmd_offset(pud0
, 0);
1437 pgdk
= pgd_offset_k(map_start
);
1438 pudk
= pud_offset(pgdk
, map_start
);
1439 pmdk
= pmd_offset(pudk
, map_start
);
1441 mdesc
->init_meminfo();
1443 /* Run the patch stub to update the constants */
1444 fixup_pv_table(&__pv_table_begin
,
1445 (&__pv_table_end
- &__pv_table_begin
) << 2);
1448 * Cache cleaning operations for self-modifying code
1449 * We should clean the entries by MVA but running a
1450 * for loop over every pv_table entry pointer would
1451 * just complicate the code.
1453 flush_cache_louis();
1457 /* remap level 1 table */
1458 for (i
= 0; i
< PTRS_PER_PGD
; pud0
++, i
++) {
1460 __pud(__pa(pmd0
) | PMD_TYPE_TABLE
| L_PGD_SWAPPER
));
1461 pmd0
+= PTRS_PER_PMD
;
1464 /* remap pmds for kernel mapping */
1465 phys
= __pa(map_start
) & PMD_MASK
;
1467 *pmdk
++ = __pmd(phys
| pmdprot
);
1469 } while (phys
< map_end
);
1472 cpu_switch_mm(pgd0
, &init_mm
);
1473 cpu_set_ttbr(1, __pa(pgd0
) + TTBR1_OFFSET
);
1474 local_flush_bp_all();
1475 local_flush_tlb_all();
1480 void __init
early_paging_init(const struct machine_desc
*mdesc
,
1481 struct proc_info_list
*procinfo
)
1483 if (mdesc
->init_meminfo
)
1484 mdesc
->init_meminfo();
1490 * paging_init() sets up the page tables, initialises the zone memory
1491 * maps, and sets up the zero page, bad page and bad page tables.
1493 void __init
paging_init(const struct machine_desc
*mdesc
)
1497 build_mem_type_table();
1498 prepare_page_table();
1500 dma_contiguous_remap();
1501 devicemaps_init(mdesc
);
1505 top_pmd
= pmd_off_k(0xffff0000);
1507 /* allocate the zero page. */
1508 zero_page
= early_alloc(PAGE_SIZE
);
1512 empty_zero_page
= virt_to_page(zero_page
);
1513 __flush_dcache_page(NULL
, empty_zero_page
);