2 * Copyright (C) 1995 Linus Torvalds
3 * Copyright 2010 Tilera Corporation. All Rights Reserved.
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License
7 * as published by the Free Software Foundation, version 2.
9 * This program is distributed in the hope that it will be useful, but
10 * WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
12 * NON INFRINGEMENT. See the GNU General Public License for
16 #include <linux/module.h>
17 #include <linux/signal.h>
18 #include <linux/sched.h>
19 #include <linux/kernel.h>
20 #include <linux/errno.h>
21 #include <linux/string.h>
22 #include <linux/types.h>
23 #include <linux/ptrace.h>
24 #include <linux/mman.h>
26 #include <linux/hugetlb.h>
27 #include <linux/swap.h>
28 #include <linux/smp.h>
29 #include <linux/init.h>
30 #include <linux/highmem.h>
31 #include <linux/pagemap.h>
32 #include <linux/poison.h>
33 #include <linux/bootmem.h>
34 #include <linux/slab.h>
35 #include <linux/proc_fs.h>
36 #include <linux/efi.h>
37 #include <linux/memory_hotplug.h>
38 #include <linux/uaccess.h>
39 #include <asm/mmu_context.h>
40 #include <asm/processor.h>
41 #include <asm/pgtable.h>
42 #include <asm/pgalloc.h>
44 #include <asm/fixmap.h>
46 #include <asm/tlbflush.h>
47 #include <asm/sections.h>
48 #include <asm/setup.h>
49 #include <asm/homecache.h>
50 #include <hv/hypervisor.h>
51 #include <arch/chip.h>
55 #define clear_pgd(pmdptr) (*(pmdptr) = hv_pte(0))
58 unsigned long VMALLOC_RESERVE
= CONFIG_VMALLOC_RESERVE
;
59 EXPORT_SYMBOL(VMALLOC_RESERVE
);
62 /* Create an L2 page table */
63 static pte_t
* __init
alloc_pte(void)
65 return __alloc_bootmem(L2_KERNEL_PGTABLE_SIZE
, HV_PAGE_TABLE_ALIGN
, 0);
69 * L2 page tables per controller. We allocate these all at once from
70 * the bootmem allocator and store them here. This saves on kernel L2
71 * page table memory, compared to allocating a full 64K page per L2
72 * page table, and also means that in cases where we use huge pages,
73 * we are guaranteed to later be able to shatter those huge pages and
74 * switch to using these page tables instead, without requiring
75 * further allocation. Each l2_ptes[] entry points to the first page
76 * table for the first hugepage-size piece of memory on the
77 * controller; other page tables are just indexed directly, i.e. the
78 * L2 page tables are contiguous in memory for each controller.
80 static pte_t
*l2_ptes
[MAX_NUMNODES
];
81 static int num_l2_ptes
[MAX_NUMNODES
];
83 static void init_prealloc_ptes(int node
, int pages
)
85 BUG_ON(pages
& (HV_L2_ENTRIES
-1));
87 num_l2_ptes
[node
] = pages
;
88 l2_ptes
[node
] = __alloc_bootmem(pages
* sizeof(pte_t
),
89 HV_PAGE_TABLE_ALIGN
, 0);
93 pte_t
*get_prealloc_pte(unsigned long pfn
)
95 int node
= pfn_to_nid(pfn
);
96 pfn
&= ~(-1UL << (NR_PA_HIGHBIT_SHIFT
- PAGE_SHIFT
));
97 BUG_ON(node
>= MAX_NUMNODES
);
98 BUG_ON(pfn
>= num_l2_ptes
[node
]);
99 return &l2_ptes
[node
][pfn
];
103 * What caching do we expect pages from the heap to have when
104 * they are allocated during bootup? (Once we've installed the
105 * "real" swapper_pg_dir.)
107 static int initial_heap_home(void)
109 #if CHIP_HAS_CBOX_HOME_MAP()
111 return PAGE_HOME_HASH
;
113 return smp_processor_id();
117 * Place a pointer to an L2 page table in a middle page
120 static void __init
assign_pte(pmd_t
*pmd
, pte_t
*page_table
)
122 phys_addr_t pa
= __pa(page_table
);
123 unsigned long l2_ptfn
= pa
>> HV_LOG2_PAGE_TABLE_ALIGN
;
124 pte_t pteval
= hv_pte_set_ptfn(__pgprot(_PAGE_TABLE
), l2_ptfn
);
125 BUG_ON((pa
& (HV_PAGE_TABLE_ALIGN
-1)) != 0);
126 pteval
= pte_set_home(pteval
, initial_heap_home());
127 *(pte_t
*)pmd
= pteval
;
128 if (page_table
!= (pte_t
*)pmd_page_vaddr(*pmd
))
134 #if HV_L1_SIZE != HV_L2_SIZE
135 # error Rework assumption that L1 and L2 page tables are same size.
138 /* Since pmd_t arrays and pte_t arrays are the same size, just use casts. */
139 static inline pmd_t
*alloc_pmd(void)
141 return (pmd_t
*)alloc_pte();
144 static inline void assign_pmd(pud_t
*pud
, pmd_t
*pmd
)
146 assign_pte((pmd_t
*)pud
, (pte_t
*)pmd
);
149 #endif /* __tilegx__ */
151 /* Replace the given pmd with a full PTE table. */
152 void __init
shatter_pmd(pmd_t
*pmd
)
154 pte_t
*pte
= get_prealloc_pte(pte_pfn(*(pte_t
*)pmd
));
155 assign_pte(pmd
, pte
);
158 #ifdef CONFIG_HIGHMEM
160 * This function initializes a certain range of kernel virtual memory
161 * with new bootmem page tables, everywhere page tables are missing in
166 * NOTE: The pagetables are allocated contiguous on the physical space
167 * so we can cache the place of the first one and move around without
168 * checking the pgd every time.
170 static void __init
page_table_range_init(unsigned long start
,
171 unsigned long end
, pgd_t
*pgd_base
)
178 pgd_idx
= pgd_index(vaddr
);
179 pgd
= pgd_base
+ pgd_idx
;
181 for ( ; (pgd_idx
< PTRS_PER_PGD
) && (vaddr
!= end
); pgd
++, pgd_idx
++) {
182 pmd_t
*pmd
= pmd_offset(pud_offset(pgd
, vaddr
), vaddr
);
184 assign_pte(pmd
, alloc_pte());
188 #endif /* CONFIG_HIGHMEM */
191 #if CHIP_HAS_CBOX_HOME_MAP()
193 static int __initdata ktext_hash
= 1; /* .text pages */
194 static int __initdata kdata_hash
= 1; /* .data and .bss pages */
195 int __write_once hash_default
= 1; /* kernel allocator pages */
196 EXPORT_SYMBOL(hash_default
);
197 int __write_once kstack_hash
= 1; /* if no homecaching, use h4h */
198 #endif /* CHIP_HAS_CBOX_HOME_MAP */
201 * CPUs to use to for striping the pages of kernel data. If hash-for-home
202 * is available, this is only relevant if kcache_hash sets up the
203 * .data and .bss to be page-homed, and we don't want the default mode
204 * of using the full set of kernel cpus for the striping.
206 static __initdata
struct cpumask kdata_mask
;
207 static __initdata
int kdata_arg_seen
;
209 int __write_once kdata_huge
; /* if no homecaching, small pages */
212 /* Combine a generic pgprot_t with cache home to get a cache-aware pgprot. */
213 static pgprot_t __init
construct_pgprot(pgprot_t prot
, int home
)
215 prot
= pte_set_home(prot
, home
);
216 #if CHIP_HAS_CBOX_HOME_MAP()
217 if (home
== PAGE_HOME_IMMUTABLE
) {
219 prot
= hv_pte_set_mode(prot
, HV_PTE_MODE_CACHE_HASH_L3
);
221 prot
= hv_pte_set_mode(prot
, HV_PTE_MODE_CACHE_NO_L3
);
228 * For a given kernel data VA, how should it be cached?
229 * We return the complete pgprot_t with caching bits set.
231 static pgprot_t __init
init_pgprot(ulong address
)
235 enum { CODE_DELTA
= MEM_SV_INTRPT
- PAGE_OFFSET
};
237 #if CHIP_HAS_CBOX_HOME_MAP()
238 /* For kdata=huge, everything is just hash-for-home. */
240 return construct_pgprot(PAGE_KERNEL
, PAGE_HOME_HASH
);
243 /* We map the aliased pages of permanent text inaccessible. */
244 if (address
< (ulong
) _sinittext
- CODE_DELTA
)
248 * We map read-only data non-coherent for performance. We could
249 * use neighborhood caching on TILE64, but it's not clear it's a win.
251 if ((address
>= (ulong
) __start_rodata
&&
252 address
< (ulong
) __end_rodata
) ||
253 address
== (ulong
) empty_zero_page
) {
254 return construct_pgprot(PAGE_KERNEL_RO
, PAGE_HOME_IMMUTABLE
);
257 /* As a performance optimization, keep the boot init stack here. */
258 if (address
>= (ulong
)&init_thread_union
&&
259 address
< (ulong
)&init_thread_union
+ THREAD_SIZE
)
260 return construct_pgprot(PAGE_KERNEL
, smp_processor_id());
263 #if !ATOMIC_LOCKS_FOUND_VIA_TABLE()
264 /* Force the atomic_locks[] array page to be hash-for-home. */
265 if (address
== (ulong
) atomic_locks
)
266 return construct_pgprot(PAGE_KERNEL
, PAGE_HOME_HASH
);
271 * Everything else that isn't data or bss is heap, so mark it
272 * with the initial heap home (hash-for-home, or this cpu). This
273 * includes any addresses after the loaded image and any address before
274 * _einitdata, since we already captured the case of text before
275 * _sinittext, and __pa(einittext) is approximately __pa(sinitdata).
277 * All the LOWMEM pages that we mark this way will get their
278 * struct page homecache properly marked later, in set_page_homes().
279 * The HIGHMEM pages we leave with a default zero for their
280 * homes, but with a zero free_time we don't have to actually
281 * do a flush action the first time we use them, either.
283 if (address
>= (ulong
) _end
|| address
< (ulong
) _einitdata
)
284 return construct_pgprot(PAGE_KERNEL
, initial_heap_home());
286 #if CHIP_HAS_CBOX_HOME_MAP()
287 /* Use hash-for-home if requested for data/bss. */
289 return construct_pgprot(PAGE_KERNEL
, PAGE_HOME_HASH
);
293 * Make the w1data homed like heap to start with, to avoid
294 * making it part of the page-striped data area when we're just
295 * going to convert it to read-only soon anyway.
297 if (address
>= (ulong
)__w1data_begin
&& address
< (ulong
)__w1data_end
)
298 return construct_pgprot(PAGE_KERNEL
, initial_heap_home());
301 * Otherwise we just hand out consecutive cpus. To avoid
302 * requiring this function to hold state, we just walk forward from
303 * _sdata by PAGE_SIZE, skipping the readonly and init data, to reach
304 * the requested address, while walking cpu home around kdata_mask.
305 * This is typically no more than a dozen or so iterations.
307 page
= (((ulong
)__w1data_end
) + PAGE_SIZE
- 1) & PAGE_MASK
;
308 BUG_ON(address
< page
|| address
>= (ulong
)_end
);
309 cpu
= cpumask_first(&kdata_mask
);
310 for (; page
< address
; page
+= PAGE_SIZE
) {
311 if (page
>= (ulong
)&init_thread_union
&&
312 page
< (ulong
)&init_thread_union
+ THREAD_SIZE
)
314 if (page
== (ulong
)empty_zero_page
)
317 #if !ATOMIC_LOCKS_FOUND_VIA_TABLE()
318 if (page
== (ulong
)atomic_locks
)
322 cpu
= cpumask_next(cpu
, &kdata_mask
);
324 cpu
= cpumask_first(&kdata_mask
);
326 return construct_pgprot(PAGE_KERNEL
, cpu
);
330 * This function sets up how we cache the kernel text. If we have
331 * hash-for-home support, normally that is used instead (see the
332 * kcache_hash boot flag for more information). But if we end up
333 * using a page-based caching technique, this option sets up the
334 * details of that. In addition, the "ktext=nocache" option may
335 * always be used to disable local caching of text pages, if desired.
338 static int __initdata ktext_arg_seen
;
339 static int __initdata ktext_small
;
340 static int __initdata ktext_local
;
341 static int __initdata ktext_all
;
342 static int __initdata ktext_nondataplane
;
343 static int __initdata ktext_nocache
;
344 static struct cpumask __initdata ktext_mask
;
346 static int __init
setup_ktext(char *str
)
351 /* If you have a leading "nocache", turn off ktext caching */
352 if (strncmp(str
, "nocache", 7) == 0) {
354 pr_info("ktext: disabling local caching of kernel text\n");
364 /* Default setting on Tile64: use a huge page */
365 if (strcmp(str
, "huge") == 0)
366 pr_info("ktext: using one huge locally cached page\n");
368 /* Pay TLB cost but get no cache benefit: cache small pages locally */
369 else if (strcmp(str
, "local") == 0) {
372 pr_info("ktext: using small pages with local caching\n");
375 /* Neighborhood cache ktext pages on all cpus. */
376 else if (strcmp(str
, "all") == 0) {
379 pr_info("ktext: using maximal caching neighborhood\n");
383 /* Neighborhood ktext pages on specified mask */
384 else if (cpulist_parse(str
, &ktext_mask
) == 0) {
385 char buf
[NR_CPUS
* 5];
386 cpulist_scnprintf(buf
, sizeof(buf
), &ktext_mask
);
387 if (cpumask_weight(&ktext_mask
) > 1) {
389 pr_info("ktext: using caching neighborhood %s "
390 "with small pages\n", buf
);
392 pr_info("ktext: caching on cpu %s with one huge page\n",
403 early_param("ktext", setup_ktext
);
406 static inline pgprot_t
ktext_set_nocache(pgprot_t prot
)
409 prot
= hv_pte_set_nc(prot
);
410 #if CHIP_HAS_NC_AND_NOALLOC_BITS()
412 prot
= hv_pte_set_no_alloc_l2(prot
);
418 static pmd_t
*__init
get_pmd(pgd_t pgtables
[], unsigned long va
)
420 return pmd_offset(pud_offset(&pgtables
[pgd_index(va
)], va
), va
);
423 static pmd_t
*__init
get_pmd(pgd_t pgtables
[], unsigned long va
)
425 pud_t
*pud
= pud_offset(&pgtables
[pgd_index(va
)], va
);
427 assign_pmd(pud
, alloc_pmd());
428 return pmd_offset(pud
, va
);
432 /* Temporary page table we use for staging. */
433 static pgd_t pgtables
[PTRS_PER_PGD
]
434 __attribute__((aligned(HV_PAGE_TABLE_ALIGN
)));
437 * This maps the physical memory to kernel virtual address space, a total
438 * of max_low_pfn pages, by creating page tables starting from address
441 * This routine transitions us from using a set of compiled-in large
442 * pages to using some more precise caching, including removing access
443 * to code pages mapped at PAGE_OFFSET (executed only at MEM_SV_START)
444 * marking read-only data as locally cacheable, striping the remaining
445 * .data and .bss across all the available tiles, and removing access
446 * to pages above the top of RAM (thus ensuring a page fault from a bad
447 * virtual address rather than a hypervisor shoot down for accessing
448 * memory outside the assigned limits).
450 static void __init
kernel_physical_mapping_init(pgd_t
*pgd_base
)
452 unsigned long address
, pfn
;
456 const struct cpumask
*my_cpu_mask
= cpumask_of(smp_processor_id());
457 struct cpumask kstripe_mask
;
460 #if CHIP_HAS_CBOX_HOME_MAP()
461 if (ktext_arg_seen
&& ktext_hash
) {
462 pr_warning("warning: \"ktext\" boot argument ignored"
463 " if \"kcache_hash\" sets up text hash-for-home\n");
467 if (kdata_arg_seen
&& kdata_hash
) {
468 pr_warning("warning: \"kdata\" boot argument ignored"
469 " if \"kcache_hash\" sets up data hash-for-home\n");
472 if (kdata_huge
&& !hash_default
) {
473 pr_warning("warning: disabling \"kdata=huge\"; requires"
474 " kcache_hash=all or =allbutstack\n");
480 * Set up a mask for cpus to use for kernel striping.
481 * This is normally all cpus, but minus dataplane cpus if any.
482 * If the dataplane covers the whole chip, we stripe over
483 * the whole chip too.
485 cpumask_copy(&kstripe_mask
, cpu_possible_mask
);
487 kdata_mask
= kstripe_mask
;
489 /* Allocate and fill in L2 page tables */
490 for (i
= 0; i
< MAX_NUMNODES
; ++i
) {
491 #ifdef CONFIG_HIGHMEM
492 unsigned long end_pfn
= node_lowmem_end_pfn
[i
];
494 unsigned long end_pfn
= node_end_pfn
[i
];
496 unsigned long end_huge_pfn
= 0;
498 /* Pre-shatter the last huge page to allow per-cpu pages. */
500 end_huge_pfn
= end_pfn
- (HPAGE_SIZE
>> PAGE_SHIFT
);
502 pfn
= node_start_pfn
[i
];
504 /* Allocate enough memory to hold L2 page tables for node. */
505 init_prealloc_ptes(i
, end_pfn
- pfn
);
507 address
= (unsigned long) pfn_to_kaddr(pfn
);
508 while (pfn
< end_pfn
) {
509 BUG_ON(address
& (HPAGE_SIZE
-1));
510 pmd
= get_pmd(pgtables
, address
);
511 pte
= get_prealloc_pte(pfn
);
512 if (pfn
< end_huge_pfn
) {
513 pgprot_t prot
= init_pgprot(address
);
514 *(pte_t
*)pmd
= pte_mkhuge(pfn_pte(pfn
, prot
));
515 for (pte_ofs
= 0; pte_ofs
< PTRS_PER_PTE
;
516 pfn
++, pte_ofs
++, address
+= PAGE_SIZE
)
517 pte
[pte_ofs
] = pfn_pte(pfn
, prot
);
520 printk(KERN_DEBUG
"pre-shattered huge"
521 " page at %#lx\n", address
);
522 for (pte_ofs
= 0; pte_ofs
< PTRS_PER_PTE
;
523 pfn
++, pte_ofs
++, address
+= PAGE_SIZE
) {
524 pgprot_t prot
= init_pgprot(address
);
525 pte
[pte_ofs
] = pfn_pte(pfn
, prot
);
527 assign_pte(pmd
, pte
);
533 * Set or check ktext_map now that we have cpu_possible_mask
534 * and kstripe_mask to work with.
537 cpumask_copy(&ktext_mask
, cpu_possible_mask
);
538 else if (ktext_nondataplane
)
539 ktext_mask
= kstripe_mask
;
540 else if (!cpumask_empty(&ktext_mask
)) {
541 /* Sanity-check any mask that was requested */
543 cpumask_andnot(&bad
, &ktext_mask
, cpu_possible_mask
);
544 cpumask_and(&ktext_mask
, &ktext_mask
, cpu_possible_mask
);
545 if (!cpumask_empty(&bad
)) {
546 char buf
[NR_CPUS
* 5];
547 cpulist_scnprintf(buf
, sizeof(buf
), &bad
);
548 pr_info("ktext: not using unavailable cpus %s\n", buf
);
550 if (cpumask_empty(&ktext_mask
)) {
551 pr_warning("ktext: no valid cpus; caching on %d.\n",
553 cpumask_copy(&ktext_mask
,
554 cpumask_of(smp_processor_id()));
558 address
= MEM_SV_INTRPT
;
559 pmd
= get_pmd(pgtables
, address
);
560 pfn
= 0; /* code starts at PA 0 */
562 /* Allocate an L2 PTE for the kernel text */
564 pgprot_t prot
= construct_pgprot(PAGE_KERNEL_EXEC
,
565 PAGE_HOME_IMMUTABLE
);
569 prot
= hv_pte_set_mode(prot
,
570 HV_PTE_MODE_UNCACHED
);
572 prot
= hv_pte_set_mode(prot
,
573 HV_PTE_MODE_CACHE_NO_L3
);
575 prot
= hv_pte_set_mode(prot
,
576 HV_PTE_MODE_CACHE_TILE_L3
);
577 cpu
= cpumask_first(&ktext_mask
);
579 prot
= ktext_set_nocache(prot
);
582 BUG_ON(address
!= (unsigned long)_stext
);
584 for (; address
< (unsigned long)_einittext
;
585 pfn
++, address
+= PAGE_SIZE
) {
586 pte_ofs
= pte_index(address
);
589 assign_pte(pmd
++, pte
);
593 prot
= set_remote_cache_cpu(prot
, cpu
);
594 cpu
= cpumask_next(cpu
, &ktext_mask
);
596 cpu
= cpumask_first(&ktext_mask
);
598 pte
[pte_ofs
] = pfn_pte(pfn
, prot
);
601 assign_pte(pmd
, pte
);
603 pte_t pteval
= pfn_pte(0, PAGE_KERNEL_EXEC
);
604 pteval
= pte_mkhuge(pteval
);
605 #if CHIP_HAS_CBOX_HOME_MAP()
607 pteval
= hv_pte_set_mode(pteval
,
608 HV_PTE_MODE_CACHE_HASH_L3
);
609 pteval
= ktext_set_nocache(pteval
);
611 #endif /* CHIP_HAS_CBOX_HOME_MAP() */
612 if (cpumask_weight(&ktext_mask
) == 1) {
613 pteval
= set_remote_cache_cpu(pteval
,
614 cpumask_first(&ktext_mask
));
615 pteval
= hv_pte_set_mode(pteval
,
616 HV_PTE_MODE_CACHE_TILE_L3
);
617 pteval
= ktext_set_nocache(pteval
);
618 } else if (ktext_nocache
)
619 pteval
= hv_pte_set_mode(pteval
,
620 HV_PTE_MODE_UNCACHED
);
622 pteval
= hv_pte_set_mode(pteval
,
623 HV_PTE_MODE_CACHE_NO_L3
);
624 for (; address
< (unsigned long)_einittext
;
625 pfn
+= PFN_DOWN(HPAGE_SIZE
), address
+= HPAGE_SIZE
)
626 *(pte_t
*)(pmd
++) = pfn_pte(pfn
, pteval
);
629 /* Set swapper_pgprot here so it is flushed to memory right away. */
630 swapper_pgprot
= init_pgprot((unsigned long)swapper_pg_dir
);
633 * Since we may be changing the caching of the stack and page
634 * table itself, we invoke an assembly helper to do the
637 * - flush the cache so we start with an empty slate
638 * - install pgtables[] as the real page table
639 * - flush the TLB so the new page table takes effect
641 rc
= flush_and_install_context(__pa(pgtables
),
642 init_pgprot((unsigned long)pgtables
),
643 __get_cpu_var(current_asid
),
644 cpumask_bits(my_cpu_mask
));
647 /* Copy the page table back to the normal swapper_pg_dir. */
648 memcpy(pgd_base
, pgtables
, sizeof(pgtables
));
649 __install_page_table(pgd_base
, __get_cpu_var(current_asid
),
653 * We just read swapper_pgprot and thus brought it into the cache,
654 * with its new home & caching mode. When we start the other CPUs,
655 * they're going to reference swapper_pgprot via their initial fake
656 * VA-is-PA mappings, which cache everything locally. At that
657 * time, if it's in our cache with a conflicting home, the
658 * simulator's coherence checker will complain. So, flush it out
659 * of our cache; we're not going to ever use it again anyway.
661 __insn_finv(&swapper_pgprot
);
665 * devmem_is_allowed() checks to see if /dev/mem access to a certain address
666 * is valid. The argument is a physical page number.
668 * On Tile, the only valid things for which we can just hand out unchecked
669 * PTEs are the kernel code and data. Anything else might change its
670 * homing with time, and we wouldn't know to adjust the /dev/mem PTEs.
671 * Note that init_thread_union is released to heap soon after boot,
672 * so we include it in the init data.
674 * For TILE-Gx, we might want to consider allowing access to PA
675 * regions corresponding to PCI space, etc.
677 int devmem_is_allowed(unsigned long pagenr
)
679 return pagenr
< kaddr_to_pfn(_end
) &&
680 !(pagenr
>= kaddr_to_pfn(&init_thread_union
) ||
681 pagenr
< kaddr_to_pfn(_einitdata
)) &&
682 !(pagenr
>= kaddr_to_pfn(_sinittext
) ||
683 pagenr
<= kaddr_to_pfn(_einittext
-1));
686 #ifdef CONFIG_HIGHMEM
687 static void __init
permanent_kmaps_init(pgd_t
*pgd_base
)
696 page_table_range_init(vaddr
, vaddr
+ PAGE_SIZE
*LAST_PKMAP
, pgd_base
);
698 pgd
= swapper_pg_dir
+ pgd_index(vaddr
);
699 pud
= pud_offset(pgd
, vaddr
);
700 pmd
= pmd_offset(pud
, vaddr
);
701 pte
= pte_offset_kernel(pmd
, vaddr
);
702 pkmap_page_table
= pte
;
704 #endif /* CONFIG_HIGHMEM */
707 static void __init
init_free_pfn_range(unsigned long start
, unsigned long end
)
710 struct page
*page
= pfn_to_page(start
);
712 for (pfn
= start
; pfn
< end
; ) {
713 /* Optimize by freeing pages in large batches */
714 int order
= __ffs(pfn
);
718 if (order
>= MAX_ORDER
)
721 while (pfn
+ count
> end
) {
725 for (p
= page
, i
= 0; i
< count
; ++i
, ++p
) {
726 __ClearPageReserved(p
);
728 * Hacky direct set to avoid unnecessary
729 * lock take/release for EVERY page here.
731 p
->_count
.counter
= 0;
732 p
->_mapcount
.counter
= -1;
734 init_page_count(page
);
735 __free_pages(page
, order
);
736 totalram_pages
+= count
;
743 static void __init
set_non_bootmem_pages_init(void)
747 unsigned long start
, end
;
748 int nid
= z
->zone_pgdat
->node_id
;
749 int idx
= zone_idx(z
);
751 start
= z
->zone_start_pfn
;
753 continue; /* bootmem */
754 end
= start
+ z
->spanned_pages
;
755 if (idx
== ZONE_NORMAL
) {
756 BUG_ON(start
!= node_start_pfn
[nid
]);
757 start
= node_free_pfn
[nid
];
759 #ifdef CONFIG_HIGHMEM
760 if (idx
== ZONE_HIGHMEM
)
761 totalhigh_pages
+= z
->spanned_pages
;
764 unsigned long percpu_pfn
= node_percpu_pfn
[nid
];
765 if (start
< percpu_pfn
&& end
> percpu_pfn
)
769 if (start
<= pci_reserve_start_pfn
&&
770 end
> pci_reserve_start_pfn
) {
771 if (end
> pci_reserve_end_pfn
)
772 init_free_pfn_range(pci_reserve_end_pfn
, end
);
773 end
= pci_reserve_start_pfn
;
776 init_free_pfn_range(start
, end
);
781 * paging_init() sets up the page tables - note that all of lowmem is
782 * already mapped by head.S.
784 void __init
paging_init(void)
786 #ifdef CONFIG_HIGHMEM
787 unsigned long vaddr
, end
;
792 pgd_t
*pgd_base
= swapper_pg_dir
;
794 kernel_physical_mapping_init(pgd_base
);
796 #ifdef CONFIG_HIGHMEM
798 * Fixed mappings, only the page table structure has to be
799 * created - mappings will be set by set_fixmap():
801 vaddr
= __fix_to_virt(__end_of_fixed_addresses
- 1) & PMD_MASK
;
802 end
= (FIXADDR_TOP
+ PMD_SIZE
- 1) & PMD_MASK
;
803 page_table_range_init(vaddr
, end
, pgd_base
);
804 permanent_kmaps_init(pgd_base
);
809 * Since GX allocates just one pmd_t array worth of vmalloc space,
810 * we go ahead and allocate it statically here, then share it
811 * globally. As a result we don't have to worry about any task
812 * changing init_mm once we get up and running, and there's no
813 * need for e.g. vmalloc_sync_all().
815 BUILD_BUG_ON(pgd_index(VMALLOC_START
) != pgd_index(VMALLOC_END
));
816 pud
= pud_offset(pgd_base
+ pgd_index(VMALLOC_START
), VMALLOC_START
);
817 assign_pmd(pud
, alloc_pmd());
823 * Walk the kernel page tables and derive the page_home() from
824 * the PTEs, so that set_pte() can properly validate the caching
825 * of all PTEs it sees.
827 void __init
set_page_homes(void)
831 static void __init
set_max_mapnr_init(void)
833 #ifdef CONFIG_FLATMEM
834 max_mapnr
= max_low_pfn
;
838 void __init
mem_init(void)
840 int codesize
, datasize
, initsize
;
846 #ifdef CONFIG_FLATMEM
850 #ifdef CONFIG_HIGHMEM
851 /* check that fixmap and pkmap do not overlap */
852 if (PKMAP_ADDR(LAST_PKMAP
-1) >= FIXADDR_START
) {
853 pr_err("fixmap and kmap areas overlap"
854 " - this will crash\n");
855 pr_err("pkstart: %lxh pkend: %lxh fixstart %lxh\n",
856 PKMAP_BASE
, PKMAP_ADDR(LAST_PKMAP
-1),
862 set_max_mapnr_init();
864 /* this will put all bootmem onto the freelists */
865 totalram_pages
+= free_all_bootmem();
867 /* count all remaining LOWMEM and give all HIGHMEM to page allocator */
868 set_non_bootmem_pages_init();
870 codesize
= (unsigned long)&_etext
- (unsigned long)&_text
;
871 datasize
= (unsigned long)&_end
- (unsigned long)&_sdata
;
872 initsize
= (unsigned long)&_einittext
- (unsigned long)&_sinittext
;
873 initsize
+= (unsigned long)&_einitdata
- (unsigned long)&_sinitdata
;
875 pr_info("Memory: %luk/%luk available (%dk kernel code, %dk data, %dk init, %ldk highmem)\n",
876 (unsigned long) nr_free_pages() << (PAGE_SHIFT
-10),
877 num_physpages
<< (PAGE_SHIFT
-10),
881 (unsigned long) (totalhigh_pages
<< (PAGE_SHIFT
-10))
885 * In debug mode, dump some interesting memory mappings.
887 #ifdef CONFIG_HIGHMEM
888 printk(KERN_DEBUG
" KMAP %#lx - %#lx\n",
889 FIXADDR_START
, FIXADDR_TOP
+ PAGE_SIZE
- 1);
890 printk(KERN_DEBUG
" PKMAP %#lx - %#lx\n",
891 PKMAP_BASE
, PKMAP_ADDR(LAST_PKMAP
) - 1);
893 #ifdef CONFIG_HUGEVMAP
894 printk(KERN_DEBUG
" HUGEMAP %#lx - %#lx\n",
895 HUGE_VMAP_BASE
, HUGE_VMAP_END
- 1);
897 printk(KERN_DEBUG
" VMALLOC %#lx - %#lx\n",
898 _VMALLOC_START
, _VMALLOC_END
- 1);
900 for (i
= MAX_NUMNODES
-1; i
>= 0; --i
) {
901 struct pglist_data
*node
= &node_data
[i
];
902 if (node
->node_present_pages
) {
903 unsigned long start
= (unsigned long)
904 pfn_to_kaddr(node
->node_start_pfn
);
905 unsigned long end
= start
+
906 (node
->node_present_pages
<< PAGE_SHIFT
);
907 printk(KERN_DEBUG
" MEM%d %#lx - %#lx\n",
913 for (i
= MAX_NUMNODES
-1; i
>= 0; --i
) {
914 if ((unsigned long)vbase_map
[i
] != -1UL) {
915 printk(KERN_DEBUG
" LOWMEM%d %#lx - %#lx\n",
916 i
, (unsigned long) (vbase_map
[i
]),
917 (unsigned long) (last
-1));
925 * Convert from using one lock for all atomic operations to
928 __init_atomic_per_cpu();
933 * this is for the non-NUMA, single node SMP system case.
934 * Specifically, in the case of x86, we will always add
935 * memory to the highmem for now.
937 #ifndef CONFIG_NEED_MULTIPLE_NODES
938 int arch_add_memory(u64 start
, u64 size
)
940 struct pglist_data
*pgdata
= &contig_page_data
;
941 struct zone
*zone
= pgdata
->node_zones
+ MAX_NR_ZONES
-1;
942 unsigned long start_pfn
= start
>> PAGE_SHIFT
;
943 unsigned long nr_pages
= size
>> PAGE_SHIFT
;
945 return __add_pages(zone
, start_pfn
, nr_pages
);
948 int remove_memory(u64 start
, u64 size
)
954 struct kmem_cache
*pgd_cache
;
956 void __init
pgtable_cache_init(void)
958 pgd_cache
= kmem_cache_create("pgd", SIZEOF_PGD
, SIZEOF_PGD
, 0, NULL
);
960 panic("pgtable_cache_init(): Cannot create pgd cache");
963 #if !CHIP_HAS_COHERENT_LOCAL_CACHE()
965 * The __w1data area holds data that is only written during initialization,
966 * and is read-only and thus freely cacheable thereafter. Fix the page
967 * table entries that cover that region accordingly.
969 static void mark_w1data_ro(void)
971 /* Loop over page table entries */
972 unsigned long addr
= (unsigned long)__w1data_begin
;
973 BUG_ON((addr
& (PAGE_SIZE
-1)) != 0);
974 for (; addr
<= (unsigned long)__w1data_end
- 1; addr
+= PAGE_SIZE
) {
975 unsigned long pfn
= kaddr_to_pfn((void *)addr
);
976 pte_t
*ptep
= virt_to_pte(NULL
, addr
);
977 BUG_ON(pte_huge(*ptep
)); /* not relevant for kdata_huge */
978 set_pte_at(&init_mm
, addr
, ptep
, pfn_pte(pfn
, PAGE_KERNEL_RO
));
983 #ifdef CONFIG_DEBUG_PAGEALLOC
984 static long __write_once initfree
;
986 static long __write_once initfree
= 1;
989 /* Select whether to free (1) or mark unusable (0) the __init pages. */
990 static int __init
set_initfree(char *str
)
993 if (strict_strtol(str
, 0, &val
) == 0) {
995 pr_info("initfree: %s free init pages\n",
996 initfree
? "will" : "won't");
1000 __setup("initfree=", set_initfree
);
1002 static void free_init_pages(char *what
, unsigned long begin
, unsigned long end
)
1004 unsigned long addr
= (unsigned long) begin
;
1006 if (kdata_huge
&& !initfree
) {
1007 pr_warning("Warning: ignoring initfree=0:"
1008 " incompatible with kdata=huge\n");
1011 end
= (end
+ PAGE_SIZE
- 1) & PAGE_MASK
;
1012 local_flush_tlb_pages(NULL
, begin
, PAGE_SIZE
, end
- begin
);
1013 for (addr
= begin
; addr
< end
; addr
+= PAGE_SIZE
) {
1015 * Note we just reset the home here directly in the
1016 * page table. We know this is safe because our caller
1017 * just flushed the caches on all the other cpus,
1018 * and they won't be touching any of these pages.
1020 int pfn
= kaddr_to_pfn((void *)addr
);
1021 struct page
*page
= pfn_to_page(pfn
);
1022 pte_t
*ptep
= virt_to_pte(NULL
, addr
);
1025 * If debugging page accesses then do not free
1026 * this memory but mark them not present - any
1027 * buggy init-section access will create a
1028 * kernel page fault:
1030 pte_clear(&init_mm
, addr
, ptep
);
1033 __ClearPageReserved(page
);
1034 init_page_count(page
);
1035 if (pte_huge(*ptep
))
1036 BUG_ON(!kdata_huge
);
1038 set_pte_at(&init_mm
, addr
, ptep
,
1039 pfn_pte(pfn
, PAGE_KERNEL
));
1040 memset((void *)addr
, POISON_FREE_INITMEM
, PAGE_SIZE
);
1044 pr_info("Freeing %s: %ldk freed\n", what
, (end
- begin
) >> 10);
1047 void free_initmem(void)
1049 const unsigned long text_delta
= MEM_SV_INTRPT
- PAGE_OFFSET
;
1052 * Evict the dirty initdata on the boot cpu, evict the w1data
1053 * wherever it's homed, and evict all the init code everywhere.
1054 * We are guaranteed that no one will touch the init pages any
1055 * more, and although other cpus may be touching the w1data,
1056 * we only actually change the caching on tile64, which won't
1057 * be keeping local copies in the other tiles' caches anyway.
1059 homecache_evict(&cpu_cacheable_map
);
1061 /* Free the data pages that we won't use again after init. */
1062 free_init_pages("unused kernel data",
1063 (unsigned long)_sinitdata
,
1064 (unsigned long)_einitdata
);
1067 * Free the pages mapped from 0xc0000000 that correspond to code
1068 * pages from MEM_SV_INTRPT that we won't use again after init.
1070 free_init_pages("unused kernel text",
1071 (unsigned long)_sinittext
- text_delta
,
1072 (unsigned long)_einittext
- text_delta
);
1074 #if !CHIP_HAS_COHERENT_LOCAL_CACHE()
1076 * Upgrade the .w1data section to globally cached.
1077 * We don't do this on tilepro, since the cache architecture
1078 * pretty much makes it irrelevant, and in any case we end
1079 * up having racing issues with other tiles that may touch
1080 * the data after we flush the cache but before we update
1081 * the PTEs and flush the TLBs, causing sharer shootdowns
1082 * later. Even though this is to clean data, it seems like
1083 * an unnecessary complication.
1088 /* Do a global TLB flush so everyone sees the changes. */