2 * arch/sparc64/mm/init.c
4 * Copyright (C) 1996-1999 David S. Miller (davem@caip.rutgers.edu)
5 * Copyright (C) 1997-1999 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
8 #include <linux/extable.h>
9 #include <linux/kernel.h>
10 #include <linux/sched.h>
11 #include <linux/string.h>
12 #include <linux/init.h>
13 #include <linux/bootmem.h>
15 #include <linux/hugetlb.h>
16 #include <linux/initrd.h>
17 #include <linux/swap.h>
18 #include <linux/pagemap.h>
19 #include <linux/poison.h>
21 #include <linux/seq_file.h>
22 #include <linux/kprobes.h>
23 #include <linux/cache.h>
24 #include <linux/sort.h>
25 #include <linux/ioport.h>
26 #include <linux/percpu.h>
27 #include <linux/memblock.h>
28 #include <linux/mmzone.h>
29 #include <linux/gfp.h>
33 #include <asm/pgalloc.h>
34 #include <asm/pgtable.h>
35 #include <asm/oplib.h>
36 #include <asm/iommu.h>
38 #include <linux/uaccess.h>
39 #include <asm/mmu_context.h>
40 #include <asm/tlbflush.h>
42 #include <asm/starfire.h>
44 #include <asm/spitfire.h>
45 #include <asm/sections.h>
47 #include <asm/hypervisor.h>
49 #include <asm/mdesc.h>
50 #include <asm/cpudata.h>
51 #include <asm/setup.h>
56 unsigned long kern_linear_pte_xor
[4] __read_mostly
;
57 static unsigned long page_cache4v_flag
;
59 /* A bitmap, two bits for every 256MB of physical memory. These two
60 * bits determine what page size we use for kernel linear
61 * translations. They form an index into kern_linear_pte_xor[]. The
62 * value in the indexed slot is XOR'd with the TLB miss virtual
63 * address to form the resulting TTE. The mapping is:
70 * All sun4v chips support 256MB pages. Only SPARC-T4 and later
71 * support 2GB pages, and hopefully future cpus will support the 16GB
72 * pages as well. For slots 2 and 3, we encode a 256MB TTE xor there
73 * if these larger page sizes are not supported by the cpu.
75 * It would be nice to determine this from the machine description
76 * 'cpu' properties, but we need to have this table setup before the
77 * MDESC is initialized.
80 #ifndef CONFIG_DEBUG_PAGEALLOC
81 /* A special kernel TSB for 4MB, 256MB, 2GB and 16GB linear mappings.
82 * Space is allocated for this right after the trap table in
83 * arch/sparc64/kernel/head.S
85 extern struct tsb swapper_4m_tsb
[KERNEL_TSB4M_NENTRIES
];
87 extern struct tsb swapper_tsb
[KERNEL_TSB_NENTRIES
];
89 static unsigned long cpu_pgsz_mask
;
91 #define MAX_BANKS 1024
93 static struct linux_prom64_registers pavail
[MAX_BANKS
];
94 static int pavail_ents
;
96 u64 numa_latency
[MAX_NUMNODES
][MAX_NUMNODES
];
98 static int cmp_p64(const void *a
, const void *b
)
100 const struct linux_prom64_registers
*x
= a
, *y
= b
;
102 if (x
->phys_addr
> y
->phys_addr
)
104 if (x
->phys_addr
< y
->phys_addr
)
109 static void __init
read_obp_memory(const char *property
,
110 struct linux_prom64_registers
*regs
,
113 phandle node
= prom_finddevice("/memory");
114 int prop_size
= prom_getproplen(node
, property
);
117 ents
= prop_size
/ sizeof(struct linux_prom64_registers
);
118 if (ents
> MAX_BANKS
) {
119 prom_printf("The machine has more %s property entries than "
120 "this kernel can support (%d).\n",
121 property
, MAX_BANKS
);
125 ret
= prom_getproperty(node
, property
, (char *) regs
, prop_size
);
127 prom_printf("Couldn't get %s property from /memory.\n",
132 /* Sanitize what we got from the firmware, by page aligning
135 for (i
= 0; i
< ents
; i
++) {
136 unsigned long base
, size
;
138 base
= regs
[i
].phys_addr
;
139 size
= regs
[i
].reg_size
;
142 if (base
& ~PAGE_MASK
) {
143 unsigned long new_base
= PAGE_ALIGN(base
);
145 size
-= new_base
- base
;
146 if ((long) size
< 0L)
151 /* If it is empty, simply get rid of it.
152 * This simplifies the logic of the other
153 * functions that process these arrays.
155 memmove(®s
[i
], ®s
[i
+ 1],
156 (ents
- i
- 1) * sizeof(regs
[0]));
161 regs
[i
].phys_addr
= base
;
162 regs
[i
].reg_size
= size
;
167 sort(regs
, ents
, sizeof(struct linux_prom64_registers
),
171 /* Kernel physical address base and size in bytes. */
172 unsigned long kern_base __read_mostly
;
173 unsigned long kern_size __read_mostly
;
175 /* Initial ramdisk setup */
176 extern unsigned long sparc_ramdisk_image64
;
177 extern unsigned int sparc_ramdisk_image
;
178 extern unsigned int sparc_ramdisk_size
;
180 struct page
*mem_map_zero __read_mostly
;
181 EXPORT_SYMBOL(mem_map_zero
);
183 unsigned int sparc64_highest_unlocked_tlb_ent __read_mostly
;
185 unsigned long sparc64_kern_pri_context __read_mostly
;
186 unsigned long sparc64_kern_pri_nuc_bits __read_mostly
;
187 unsigned long sparc64_kern_sec_context __read_mostly
;
189 int num_kernel_image_mappings
;
191 #ifdef CONFIG_DEBUG_DCFLUSH
192 atomic_t dcpage_flushes
= ATOMIC_INIT(0);
194 atomic_t dcpage_flushes_xcall
= ATOMIC_INIT(0);
198 inline void flush_dcache_page_impl(struct page
*page
)
200 BUG_ON(tlb_type
== hypervisor
);
201 #ifdef CONFIG_DEBUG_DCFLUSH
202 atomic_inc(&dcpage_flushes
);
205 #ifdef DCACHE_ALIASING_POSSIBLE
206 __flush_dcache_page(page_address(page
),
207 ((tlb_type
== spitfire
) &&
208 page_mapping(page
) != NULL
));
210 if (page_mapping(page
) != NULL
&&
211 tlb_type
== spitfire
)
212 __flush_icache_page(__pa(page_address(page
)));
216 #define PG_dcache_dirty PG_arch_1
217 #define PG_dcache_cpu_shift 32UL
218 #define PG_dcache_cpu_mask \
219 ((1UL<<ilog2(roundup_pow_of_two(NR_CPUS)))-1UL)
221 #define dcache_dirty_cpu(page) \
222 (((page)->flags >> PG_dcache_cpu_shift) & PG_dcache_cpu_mask)
224 static inline void set_dcache_dirty(struct page
*page
, int this_cpu
)
226 unsigned long mask
= this_cpu
;
227 unsigned long non_cpu_bits
;
229 non_cpu_bits
= ~(PG_dcache_cpu_mask
<< PG_dcache_cpu_shift
);
230 mask
= (mask
<< PG_dcache_cpu_shift
) | (1UL << PG_dcache_dirty
);
232 __asm__
__volatile__("1:\n\t"
234 "and %%g7, %1, %%g1\n\t"
235 "or %%g1, %0, %%g1\n\t"
236 "casx [%2], %%g7, %%g1\n\t"
238 "bne,pn %%xcc, 1b\n\t"
241 : "r" (mask
), "r" (non_cpu_bits
), "r" (&page
->flags
)
245 static inline void clear_dcache_dirty_cpu(struct page
*page
, unsigned long cpu
)
247 unsigned long mask
= (1UL << PG_dcache_dirty
);
249 __asm__
__volatile__("! test_and_clear_dcache_dirty\n"
252 "srlx %%g7, %4, %%g1\n\t"
253 "and %%g1, %3, %%g1\n\t"
255 "bne,pn %%icc, 2f\n\t"
256 " andn %%g7, %1, %%g1\n\t"
257 "casx [%2], %%g7, %%g1\n\t"
259 "bne,pn %%xcc, 1b\n\t"
263 : "r" (cpu
), "r" (mask
), "r" (&page
->flags
),
264 "i" (PG_dcache_cpu_mask
),
265 "i" (PG_dcache_cpu_shift
)
269 static inline void tsb_insert(struct tsb
*ent
, unsigned long tag
, unsigned long pte
)
271 unsigned long tsb_addr
= (unsigned long) ent
;
273 if (tlb_type
== cheetah_plus
|| tlb_type
== hypervisor
)
274 tsb_addr
= __pa(tsb_addr
);
276 __tsb_insert(tsb_addr
, tag
, pte
);
279 unsigned long _PAGE_ALL_SZ_BITS __read_mostly
;
281 static void flush_dcache(unsigned long pfn
)
285 page
= pfn_to_page(pfn
);
287 unsigned long pg_flags
;
289 pg_flags
= page
->flags
;
290 if (pg_flags
& (1UL << PG_dcache_dirty
)) {
291 int cpu
= ((pg_flags
>> PG_dcache_cpu_shift
) &
293 int this_cpu
= get_cpu();
295 /* This is just to optimize away some function calls
299 flush_dcache_page_impl(page
);
301 smp_flush_dcache_page_impl(page
, cpu
);
303 clear_dcache_dirty_cpu(page
, cpu
);
310 /* mm->context.lock must be held */
311 static void __update_mmu_tsb_insert(struct mm_struct
*mm
, unsigned long tsb_index
,
312 unsigned long tsb_hash_shift
, unsigned long address
,
315 struct tsb
*tsb
= mm
->context
.tsb_block
[tsb_index
].tsb
;
321 tsb
+= ((address
>> tsb_hash_shift
) &
322 (mm
->context
.tsb_block
[tsb_index
].tsb_nentries
- 1UL));
323 tag
= (address
>> 22UL);
324 tsb_insert(tsb
, tag
, tte
);
327 #ifdef CONFIG_HUGETLB_PAGE
328 static void __init
add_huge_page_size(unsigned long size
)
332 if (size_to_hstate(size
))
335 order
= ilog2(size
) - PAGE_SHIFT
;
336 hugetlb_add_hstate(order
);
339 static int __init
hugetlbpage_init(void)
341 add_huge_page_size(1UL << HPAGE_64K_SHIFT
);
342 add_huge_page_size(1UL << HPAGE_SHIFT
);
343 add_huge_page_size(1UL << HPAGE_256MB_SHIFT
);
344 add_huge_page_size(1UL << HPAGE_2GB_SHIFT
);
349 arch_initcall(hugetlbpage_init
);
351 static int __init
setup_hugepagesz(char *string
)
353 unsigned long long hugepage_size
;
354 unsigned int hugepage_shift
;
355 unsigned short hv_pgsz_idx
;
356 unsigned int hv_pgsz_mask
;
359 hugepage_size
= memparse(string
, &string
);
360 hugepage_shift
= ilog2(hugepage_size
);
362 switch (hugepage_shift
) {
363 case HPAGE_2GB_SHIFT
:
364 hv_pgsz_mask
= HV_PGSZ_MASK_2GB
;
365 hv_pgsz_idx
= HV_PGSZ_IDX_2GB
;
367 case HPAGE_256MB_SHIFT
:
368 hv_pgsz_mask
= HV_PGSZ_MASK_256MB
;
369 hv_pgsz_idx
= HV_PGSZ_IDX_256MB
;
372 hv_pgsz_mask
= HV_PGSZ_MASK_4MB
;
373 hv_pgsz_idx
= HV_PGSZ_IDX_4MB
;
375 case HPAGE_64K_SHIFT
:
376 hv_pgsz_mask
= HV_PGSZ_MASK_64K
;
377 hv_pgsz_idx
= HV_PGSZ_IDX_64K
;
383 if ((hv_pgsz_mask
& cpu_pgsz_mask
) == 0U) {
385 pr_err("hugepagesz=%llu not supported by MMU.\n",
390 add_huge_page_size(hugepage_size
);
396 __setup("hugepagesz=", setup_hugepagesz
);
397 #endif /* CONFIG_HUGETLB_PAGE */
399 void update_mmu_cache(struct vm_area_struct
*vma
, unsigned long address
, pte_t
*ptep
)
401 struct mm_struct
*mm
;
405 if (tlb_type
!= hypervisor
) {
406 unsigned long pfn
= pte_pfn(pte
);
414 /* Don't insert a non-valid PTE into the TSB, we'll deadlock. */
415 if (!pte_accessible(mm
, pte
))
418 spin_lock_irqsave(&mm
->context
.lock
, flags
);
420 #if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
421 if ((mm
->context
.hugetlb_pte_count
|| mm
->context
.thp_pte_count
) &&
422 is_hugetlb_pmd(__pmd(pte_val(pte
)))) {
423 /* We are fabricating 8MB pages using 4MB real hw pages. */
424 pte_val(pte
) |= (address
& (1UL << REAL_HPAGE_SHIFT
));
425 __update_mmu_tsb_insert(mm
, MM_TSB_HUGE
, REAL_HPAGE_SHIFT
,
426 address
, pte_val(pte
));
429 __update_mmu_tsb_insert(mm
, MM_TSB_BASE
, PAGE_SHIFT
,
430 address
, pte_val(pte
));
432 spin_unlock_irqrestore(&mm
->context
.lock
, flags
);
435 void flush_dcache_page(struct page
*page
)
437 struct address_space
*mapping
;
440 if (tlb_type
== hypervisor
)
443 /* Do not bother with the expensive D-cache flush if it
444 * is merely the zero page. The 'bigcore' testcase in GDB
445 * causes this case to run millions of times.
447 if (page
== ZERO_PAGE(0))
450 this_cpu
= get_cpu();
452 mapping
= page_mapping(page
);
453 if (mapping
&& !mapping_mapped(mapping
)) {
454 int dirty
= test_bit(PG_dcache_dirty
, &page
->flags
);
456 int dirty_cpu
= dcache_dirty_cpu(page
);
458 if (dirty_cpu
== this_cpu
)
460 smp_flush_dcache_page_impl(page
, dirty_cpu
);
462 set_dcache_dirty(page
, this_cpu
);
464 /* We could delay the flush for the !page_mapping
465 * case too. But that case is for exec env/arg
466 * pages and those are %99 certainly going to get
467 * faulted into the tlb (and thus flushed) anyways.
469 flush_dcache_page_impl(page
);
475 EXPORT_SYMBOL(flush_dcache_page
);
477 void __kprobes
flush_icache_range(unsigned long start
, unsigned long end
)
479 /* Cheetah and Hypervisor platform cpus have coherent I-cache. */
480 if (tlb_type
== spitfire
) {
483 /* This code only runs on Spitfire cpus so this is
484 * why we can assume _PAGE_PADDR_4U.
486 for (kaddr
= start
; kaddr
< end
; kaddr
+= PAGE_SIZE
) {
487 unsigned long paddr
, mask
= _PAGE_PADDR_4U
;
489 if (kaddr
>= PAGE_OFFSET
)
490 paddr
= kaddr
& mask
;
492 pgd_t
*pgdp
= pgd_offset_k(kaddr
);
493 pud_t
*pudp
= pud_offset(pgdp
, kaddr
);
494 pmd_t
*pmdp
= pmd_offset(pudp
, kaddr
);
495 pte_t
*ptep
= pte_offset_kernel(pmdp
, kaddr
);
497 paddr
= pte_val(*ptep
) & mask
;
499 __flush_icache_page(paddr
);
503 EXPORT_SYMBOL(flush_icache_range
);
505 void mmu_info(struct seq_file
*m
)
507 static const char *pgsz_strings
[] = {
508 "8K", "64K", "512K", "4MB", "32MB",
509 "256MB", "2GB", "16GB",
513 if (tlb_type
== cheetah
)
514 seq_printf(m
, "MMU Type\t: Cheetah\n");
515 else if (tlb_type
== cheetah_plus
)
516 seq_printf(m
, "MMU Type\t: Cheetah+\n");
517 else if (tlb_type
== spitfire
)
518 seq_printf(m
, "MMU Type\t: Spitfire\n");
519 else if (tlb_type
== hypervisor
)
520 seq_printf(m
, "MMU Type\t: Hypervisor (sun4v)\n");
522 seq_printf(m
, "MMU Type\t: ???\n");
524 seq_printf(m
, "MMU PGSZs\t: ");
526 for (i
= 0; i
< ARRAY_SIZE(pgsz_strings
); i
++) {
527 if (cpu_pgsz_mask
& (1UL << i
)) {
528 seq_printf(m
, "%s%s",
529 printed
? "," : "", pgsz_strings
[i
]);
535 #ifdef CONFIG_DEBUG_DCFLUSH
536 seq_printf(m
, "DCPageFlushes\t: %d\n",
537 atomic_read(&dcpage_flushes
));
539 seq_printf(m
, "DCPageFlushesXC\t: %d\n",
540 atomic_read(&dcpage_flushes_xcall
));
541 #endif /* CONFIG_SMP */
542 #endif /* CONFIG_DEBUG_DCFLUSH */
545 struct linux_prom_translation prom_trans
[512] __read_mostly
;
546 unsigned int prom_trans_ents __read_mostly
;
548 unsigned long kern_locked_tte_data
;
550 /* The obp translations are saved based on 8k pagesize, since obp can
551 * use a mixture of pagesizes. Misses to the LOW_OBP_ADDRESS ->
552 * HI_OBP_ADDRESS range are handled in ktlb.S.
554 static inline int in_obp_range(unsigned long vaddr
)
556 return (vaddr
>= LOW_OBP_ADDRESS
&&
557 vaddr
< HI_OBP_ADDRESS
);
560 static int cmp_ptrans(const void *a
, const void *b
)
562 const struct linux_prom_translation
*x
= a
, *y
= b
;
564 if (x
->virt
> y
->virt
)
566 if (x
->virt
< y
->virt
)
571 /* Read OBP translations property into 'prom_trans[]'. */
572 static void __init
read_obp_translations(void)
574 int n
, node
, ents
, first
, last
, i
;
576 node
= prom_finddevice("/virtual-memory");
577 n
= prom_getproplen(node
, "translations");
578 if (unlikely(n
== 0 || n
== -1)) {
579 prom_printf("prom_mappings: Couldn't get size.\n");
582 if (unlikely(n
> sizeof(prom_trans
))) {
583 prom_printf("prom_mappings: Size %d is too big.\n", n
);
587 if ((n
= prom_getproperty(node
, "translations",
588 (char *)&prom_trans
[0],
589 sizeof(prom_trans
))) == -1) {
590 prom_printf("prom_mappings: Couldn't get property.\n");
594 n
= n
/ sizeof(struct linux_prom_translation
);
598 sort(prom_trans
, ents
, sizeof(struct linux_prom_translation
),
601 /* Now kick out all the non-OBP entries. */
602 for (i
= 0; i
< ents
; i
++) {
603 if (in_obp_range(prom_trans
[i
].virt
))
607 for (; i
< ents
; i
++) {
608 if (!in_obp_range(prom_trans
[i
].virt
))
613 for (i
= 0; i
< (last
- first
); i
++) {
614 struct linux_prom_translation
*src
= &prom_trans
[i
+ first
];
615 struct linux_prom_translation
*dest
= &prom_trans
[i
];
619 for (; i
< ents
; i
++) {
620 struct linux_prom_translation
*dest
= &prom_trans
[i
];
621 dest
->virt
= dest
->size
= dest
->data
= 0x0UL
;
624 prom_trans_ents
= last
- first
;
626 if (tlb_type
== spitfire
) {
627 /* Clear diag TTE bits. */
628 for (i
= 0; i
< prom_trans_ents
; i
++)
629 prom_trans
[i
].data
&= ~0x0003fe0000000000UL
;
632 /* Force execute bit on. */
633 for (i
= 0; i
< prom_trans_ents
; i
++)
634 prom_trans
[i
].data
|= (tlb_type
== hypervisor
?
635 _PAGE_EXEC_4V
: _PAGE_EXEC_4U
);
638 static void __init
hypervisor_tlb_lock(unsigned long vaddr
,
642 unsigned long ret
= sun4v_mmu_map_perm_addr(vaddr
, 0, pte
, mmu
);
645 prom_printf("hypervisor_tlb_lock[%lx:%x:%lx:%lx]: "
646 "errors with %lx\n", vaddr
, 0, pte
, mmu
, ret
);
651 static unsigned long kern_large_tte(unsigned long paddr
);
653 static void __init
remap_kernel(void)
655 unsigned long phys_page
, tte_vaddr
, tte_data
;
656 int i
, tlb_ent
= sparc64_highest_locked_tlbent();
658 tte_vaddr
= (unsigned long) KERNBASE
;
659 phys_page
= (prom_boot_mapping_phys_low
>> ILOG2_4MB
) << ILOG2_4MB
;
660 tte_data
= kern_large_tte(phys_page
);
662 kern_locked_tte_data
= tte_data
;
664 /* Now lock us into the TLBs via Hypervisor or OBP. */
665 if (tlb_type
== hypervisor
) {
666 for (i
= 0; i
< num_kernel_image_mappings
; i
++) {
667 hypervisor_tlb_lock(tte_vaddr
, tte_data
, HV_MMU_DMMU
);
668 hypervisor_tlb_lock(tte_vaddr
, tte_data
, HV_MMU_IMMU
);
669 tte_vaddr
+= 0x400000;
670 tte_data
+= 0x400000;
673 for (i
= 0; i
< num_kernel_image_mappings
; i
++) {
674 prom_dtlb_load(tlb_ent
- i
, tte_data
, tte_vaddr
);
675 prom_itlb_load(tlb_ent
- i
, tte_data
, tte_vaddr
);
676 tte_vaddr
+= 0x400000;
677 tte_data
+= 0x400000;
679 sparc64_highest_unlocked_tlb_ent
= tlb_ent
- i
;
681 if (tlb_type
== cheetah_plus
) {
682 sparc64_kern_pri_context
= (CTX_CHEETAH_PLUS_CTX0
|
683 CTX_CHEETAH_PLUS_NUC
);
684 sparc64_kern_pri_nuc_bits
= CTX_CHEETAH_PLUS_NUC
;
685 sparc64_kern_sec_context
= CTX_CHEETAH_PLUS_CTX0
;
690 static void __init
inherit_prom_mappings(void)
692 /* Now fixup OBP's idea about where we really are mapped. */
693 printk("Remapping the kernel... ");
698 void prom_world(int enter
)
703 __asm__
__volatile__("flushw");
706 void __flush_dcache_range(unsigned long start
, unsigned long end
)
710 if (tlb_type
== spitfire
) {
713 for (va
= start
; va
< end
; va
+= 32) {
714 spitfire_put_dcache_tag(va
& 0x3fe0, 0x0);
718 } else if (tlb_type
== cheetah
|| tlb_type
== cheetah_plus
) {
721 for (va
= start
; va
< end
; va
+= 32)
722 __asm__
__volatile__("stxa %%g0, [%0] %1\n\t"
726 "i" (ASI_DCACHE_INVALIDATE
));
729 EXPORT_SYMBOL(__flush_dcache_range
);
731 /* get_new_mmu_context() uses "cache + 1". */
732 DEFINE_SPINLOCK(ctx_alloc_lock
);
733 unsigned long tlb_context_cache
= CTX_FIRST_VERSION
;
734 #define MAX_CTX_NR (1UL << CTX_NR_BITS)
735 #define CTX_BMAP_SLOTS BITS_TO_LONGS(MAX_CTX_NR)
736 DECLARE_BITMAP(mmu_context_bmap
, MAX_CTX_NR
);
737 DEFINE_PER_CPU(struct mm_struct
*, per_cpu_secondary_mm
) = {0};
739 static void mmu_context_wrap(void)
741 unsigned long old_ver
= tlb_context_cache
& CTX_VERSION_MASK
;
742 unsigned long new_ver
, new_ctx
, old_ctx
;
743 struct mm_struct
*mm
;
746 bitmap_zero(mmu_context_bmap
, 1 << CTX_NR_BITS
);
748 /* Reserve kernel context */
749 set_bit(0, mmu_context_bmap
);
751 new_ver
= (tlb_context_cache
& CTX_VERSION_MASK
) + CTX_FIRST_VERSION
;
752 if (unlikely(new_ver
== 0))
753 new_ver
= CTX_FIRST_VERSION
;
754 tlb_context_cache
= new_ver
;
757 * Make sure that any new mm that are added into per_cpu_secondary_mm,
758 * are going to go through get_new_mmu_context() path.
763 * Updated versions to current on those CPUs that had valid secondary
766 for_each_online_cpu(cpu
) {
768 * If a new mm is stored after we took this mm from the array,
769 * it will go into get_new_mmu_context() path, because we
770 * already bumped the version in tlb_context_cache.
772 mm
= per_cpu(per_cpu_secondary_mm
, cpu
);
774 if (unlikely(!mm
|| mm
== &init_mm
))
777 old_ctx
= mm
->context
.sparc64_ctx_val
;
778 if (likely((old_ctx
& CTX_VERSION_MASK
) == old_ver
)) {
779 new_ctx
= (old_ctx
& ~CTX_VERSION_MASK
) | new_ver
;
780 set_bit(new_ctx
& CTX_NR_MASK
, mmu_context_bmap
);
781 mm
->context
.sparc64_ctx_val
= new_ctx
;
786 /* Caller does TLB context flushing on local CPU if necessary.
787 * The caller also ensures that CTX_VALID(mm->context) is false.
789 * We must be careful about boundary cases so that we never
790 * let the user have CTX 0 (nucleus) or we ever use a CTX
791 * version of zero (and thus NO_CONTEXT would not be caught
792 * by version mis-match tests in mmu_context.h).
794 * Always invoked with interrupts disabled.
796 void get_new_mmu_context(struct mm_struct
*mm
)
798 unsigned long ctx
, new_ctx
;
799 unsigned long orig_pgsz_bits
;
801 spin_lock(&ctx_alloc_lock
);
803 /* wrap might have happened, test again if our context became valid */
804 if (unlikely(CTX_VALID(mm
->context
)))
806 orig_pgsz_bits
= (mm
->context
.sparc64_ctx_val
& CTX_PGSZ_MASK
);
807 ctx
= (tlb_context_cache
+ 1) & CTX_NR_MASK
;
808 new_ctx
= find_next_zero_bit(mmu_context_bmap
, 1 << CTX_NR_BITS
, ctx
);
809 if (new_ctx
>= (1 << CTX_NR_BITS
)) {
810 new_ctx
= find_next_zero_bit(mmu_context_bmap
, ctx
, 1);
811 if (new_ctx
>= ctx
) {
816 if (mm
->context
.sparc64_ctx_val
)
817 cpumask_clear(mm_cpumask(mm
));
818 mmu_context_bmap
[new_ctx
>>6] |= (1UL << (new_ctx
& 63));
819 new_ctx
|= (tlb_context_cache
& CTX_VERSION_MASK
);
820 tlb_context_cache
= new_ctx
;
821 mm
->context
.sparc64_ctx_val
= new_ctx
| orig_pgsz_bits
;
823 spin_unlock(&ctx_alloc_lock
);
826 static int numa_enabled
= 1;
827 static int numa_debug
;
829 static int __init
early_numa(char *p
)
834 if (strstr(p
, "off"))
837 if (strstr(p
, "debug"))
842 early_param("numa", early_numa
);
844 #define numadbg(f, a...) \
845 do { if (numa_debug) \
846 printk(KERN_INFO f, ## a); \
849 static void __init
find_ramdisk(unsigned long phys_base
)
851 #ifdef CONFIG_BLK_DEV_INITRD
852 if (sparc_ramdisk_image
|| sparc_ramdisk_image64
) {
853 unsigned long ramdisk_image
;
855 /* Older versions of the bootloader only supported a
856 * 32-bit physical address for the ramdisk image
857 * location, stored at sparc_ramdisk_image. Newer
858 * SILO versions set sparc_ramdisk_image to zero and
859 * provide a full 64-bit physical address at
860 * sparc_ramdisk_image64.
862 ramdisk_image
= sparc_ramdisk_image
;
864 ramdisk_image
= sparc_ramdisk_image64
;
866 /* Another bootloader quirk. The bootloader normalizes
867 * the physical address to KERNBASE, so we have to
868 * factor that back out and add in the lowest valid
869 * physical page address to get the true physical address.
871 ramdisk_image
-= KERNBASE
;
872 ramdisk_image
+= phys_base
;
874 numadbg("Found ramdisk at physical address 0x%lx, size %u\n",
875 ramdisk_image
, sparc_ramdisk_size
);
877 initrd_start
= ramdisk_image
;
878 initrd_end
= ramdisk_image
+ sparc_ramdisk_size
;
880 memblock_reserve(initrd_start
, sparc_ramdisk_size
);
882 initrd_start
+= PAGE_OFFSET
;
883 initrd_end
+= PAGE_OFFSET
;
888 struct node_mem_mask
{
892 static struct node_mem_mask node_masks
[MAX_NUMNODES
];
893 static int num_node_masks
;
895 #ifdef CONFIG_NEED_MULTIPLE_NODES
897 struct mdesc_mlgroup
{
904 static struct mdesc_mlgroup
*mlgroups
;
905 static int num_mlgroups
;
907 int numa_cpu_lookup_table
[NR_CPUS
];
908 cpumask_t numa_cpumask_lookup_table
[MAX_NUMNODES
];
910 struct mdesc_mblock
{
913 u64 offset
; /* RA-to-PA */
915 static struct mdesc_mblock
*mblocks
;
916 static int num_mblocks
;
918 static struct mdesc_mblock
* __init
addr_to_mblock(unsigned long addr
)
920 struct mdesc_mblock
*m
= NULL
;
923 for (i
= 0; i
< num_mblocks
; i
++) {
926 if (addr
>= m
->base
&&
927 addr
< (m
->base
+ m
->size
)) {
935 static u64 __init
memblock_nid_range_sun4u(u64 start
, u64 end
, int *nid
)
937 int prev_nid
, new_nid
;
940 for ( ; start
< end
; start
+= PAGE_SIZE
) {
941 for (new_nid
= 0; new_nid
< num_node_masks
; new_nid
++) {
942 struct node_mem_mask
*p
= &node_masks
[new_nid
];
944 if ((start
& p
->mask
) == p
->match
) {
951 if (new_nid
== num_node_masks
) {
953 WARN_ONCE(1, "addr[%Lx] doesn't match a NUMA node rule. Some memory will be owned by node 0.",
958 if (prev_nid
!= new_nid
)
963 return start
> end
? end
: start
;
966 static u64 __init
memblock_nid_range(u64 start
, u64 end
, int *nid
)
968 u64 ret_end
, pa_start
, m_mask
, m_match
, m_end
;
969 struct mdesc_mblock
*mblock
;
972 if (tlb_type
!= hypervisor
)
973 return memblock_nid_range_sun4u(start
, end
, nid
);
975 mblock
= addr_to_mblock(start
);
977 WARN_ONCE(1, "memblock_nid_range: Can't find mblock addr[%Lx]",
985 pa_start
= start
+ mblock
->offset
;
989 for (_nid
= 0; _nid
< num_node_masks
; _nid
++) {
990 struct node_mem_mask
*const m
= &node_masks
[_nid
];
992 if ((pa_start
& m
->mask
) == m
->match
) {
999 if (num_node_masks
== _nid
) {
1000 /* We could not find NUMA group, so default to 0, but lets
1001 * search for latency group, so we could calculate the correct
1002 * end address that we return
1006 for (i
= 0; i
< num_mlgroups
; i
++) {
1007 struct mdesc_mlgroup
*const m
= &mlgroups
[i
];
1009 if ((pa_start
& m
->mask
) == m
->match
) {
1016 if (i
== num_mlgroups
) {
1017 WARN_ONCE(1, "memblock_nid_range: Can't find latency group addr[%Lx]",
1026 * Each latency group has match and mask, and each memory block has an
1027 * offset. An address belongs to a latency group if its address matches
1028 * the following formula: ((addr + offset) & mask) == match
1029 * It is, however, slow to check every single page if it matches a
1030 * particular latency group. As optimization we calculate end value by
1031 * using bit arithmetics.
1033 m_end
= m_match
+ (1ul << __ffs(m_mask
)) - mblock
->offset
;
1034 m_end
+= pa_start
& ~((1ul << fls64(m_mask
)) - 1);
1035 ret_end
= m_end
> end
? end
: m_end
;
1043 /* This must be invoked after performing all of the necessary
1044 * memblock_set_node() calls for 'nid'. We need to be able to get
1045 * correct data from get_pfn_range_for_nid().
1047 static void __init
allocate_node_data(int nid
)
1049 struct pglist_data
*p
;
1050 unsigned long start_pfn
, end_pfn
;
1051 #ifdef CONFIG_NEED_MULTIPLE_NODES
1052 unsigned long paddr
;
1054 paddr
= memblock_alloc_try_nid(sizeof(struct pglist_data
), SMP_CACHE_BYTES
, nid
);
1056 prom_printf("Cannot allocate pglist_data for nid[%d]\n", nid
);
1059 NODE_DATA(nid
) = __va(paddr
);
1060 memset(NODE_DATA(nid
), 0, sizeof(struct pglist_data
));
1062 NODE_DATA(nid
)->node_id
= nid
;
1067 get_pfn_range_for_nid(nid
, &start_pfn
, &end_pfn
);
1068 p
->node_start_pfn
= start_pfn
;
1069 p
->node_spanned_pages
= end_pfn
- start_pfn
;
1072 static void init_node_masks_nonnuma(void)
1074 #ifdef CONFIG_NEED_MULTIPLE_NODES
1078 numadbg("Initializing tables for non-numa.\n");
1080 node_masks
[0].mask
= 0;
1081 node_masks
[0].match
= 0;
1084 #ifdef CONFIG_NEED_MULTIPLE_NODES
1085 for (i
= 0; i
< NR_CPUS
; i
++)
1086 numa_cpu_lookup_table
[i
] = 0;
1088 cpumask_setall(&numa_cpumask_lookup_table
[0]);
1092 #ifdef CONFIG_NEED_MULTIPLE_NODES
1093 struct pglist_data
*node_data
[MAX_NUMNODES
];
1095 EXPORT_SYMBOL(numa_cpu_lookup_table
);
1096 EXPORT_SYMBOL(numa_cpumask_lookup_table
);
1097 EXPORT_SYMBOL(node_data
);
1099 static int scan_pio_for_cfg_handle(struct mdesc_handle
*md
, u64 pio
,
1104 mdesc_for_each_arc(arc
, md
, pio
, MDESC_ARC_TYPE_FWD
) {
1105 u64 target
= mdesc_arc_target(md
, arc
);
1108 val
= mdesc_get_property(md
, target
,
1109 "cfg-handle", NULL
);
1110 if (val
&& *val
== cfg_handle
)
1116 static int scan_arcs_for_cfg_handle(struct mdesc_handle
*md
, u64 grp
,
1119 u64 arc
, candidate
, best_latency
= ~(u64
)0;
1121 candidate
= MDESC_NODE_NULL
;
1122 mdesc_for_each_arc(arc
, md
, grp
, MDESC_ARC_TYPE_FWD
) {
1123 u64 target
= mdesc_arc_target(md
, arc
);
1124 const char *name
= mdesc_node_name(md
, target
);
1127 if (strcmp(name
, "pio-latency-group"))
1130 val
= mdesc_get_property(md
, target
, "latency", NULL
);
1134 if (*val
< best_latency
) {
1136 best_latency
= *val
;
1140 if (candidate
== MDESC_NODE_NULL
)
1143 return scan_pio_for_cfg_handle(md
, candidate
, cfg_handle
);
1146 int of_node_to_nid(struct device_node
*dp
)
1148 const struct linux_prom64_registers
*regs
;
1149 struct mdesc_handle
*md
;
1154 /* This is the right thing to do on currently supported
1155 * SUN4U NUMA platforms as well, as the PCI controller does
1156 * not sit behind any particular memory controller.
1161 regs
= of_get_property(dp
, "reg", NULL
);
1165 cfg_handle
= (regs
->phys_addr
>> 32UL) & 0x0fffffff;
1171 mdesc_for_each_node_by_name(md
, grp
, "group") {
1172 if (!scan_arcs_for_cfg_handle(md
, grp
, cfg_handle
)) {
1184 static void __init
add_node_ranges(void)
1186 struct memblock_region
*reg
;
1187 unsigned long prev_max
;
1190 prev_max
= memblock
.memory
.max
;
1192 for_each_memblock(memory
, reg
) {
1193 unsigned long size
= reg
->size
;
1194 unsigned long start
, end
;
1198 while (start
< end
) {
1199 unsigned long this_end
;
1202 this_end
= memblock_nid_range(start
, end
, &nid
);
1204 numadbg("Setting memblock NUMA node nid[%d] "
1205 "start[%lx] end[%lx]\n",
1206 nid
, start
, this_end
);
1208 memblock_set_node(start
, this_end
- start
,
1209 &memblock
.memory
, nid
);
1210 if (memblock
.memory
.max
!= prev_max
)
1211 goto memblock_resized
;
1217 static int __init
grab_mlgroups(struct mdesc_handle
*md
)
1219 unsigned long paddr
;
1223 mdesc_for_each_node_by_name(md
, node
, "memory-latency-group")
1228 paddr
= memblock_alloc(count
* sizeof(struct mdesc_mlgroup
),
1233 mlgroups
= __va(paddr
);
1234 num_mlgroups
= count
;
1237 mdesc_for_each_node_by_name(md
, node
, "memory-latency-group") {
1238 struct mdesc_mlgroup
*m
= &mlgroups
[count
++];
1243 val
= mdesc_get_property(md
, node
, "latency", NULL
);
1245 val
= mdesc_get_property(md
, node
, "address-match", NULL
);
1247 val
= mdesc_get_property(md
, node
, "address-mask", NULL
);
1250 numadbg("MLGROUP[%d]: node[%llx] latency[%llx] "
1251 "match[%llx] mask[%llx]\n",
1252 count
- 1, m
->node
, m
->latency
, m
->match
, m
->mask
);
1258 static int __init
grab_mblocks(struct mdesc_handle
*md
)
1260 unsigned long paddr
;
1264 mdesc_for_each_node_by_name(md
, node
, "mblock")
1269 paddr
= memblock_alloc(count
* sizeof(struct mdesc_mblock
),
1274 mblocks
= __va(paddr
);
1275 num_mblocks
= count
;
1278 mdesc_for_each_node_by_name(md
, node
, "mblock") {
1279 struct mdesc_mblock
*m
= &mblocks
[count
++];
1282 val
= mdesc_get_property(md
, node
, "base", NULL
);
1284 val
= mdesc_get_property(md
, node
, "size", NULL
);
1286 val
= mdesc_get_property(md
, node
,
1287 "address-congruence-offset", NULL
);
1289 /* The address-congruence-offset property is optional.
1290 * Explicity zero it be identifty this.
1297 numadbg("MBLOCK[%d]: base[%llx] size[%llx] offset[%llx]\n",
1298 count
- 1, m
->base
, m
->size
, m
->offset
);
1304 static void __init
numa_parse_mdesc_group_cpus(struct mdesc_handle
*md
,
1305 u64 grp
, cpumask_t
*mask
)
1309 cpumask_clear(mask
);
1311 mdesc_for_each_arc(arc
, md
, grp
, MDESC_ARC_TYPE_BACK
) {
1312 u64 target
= mdesc_arc_target(md
, arc
);
1313 const char *name
= mdesc_node_name(md
, target
);
1316 if (strcmp(name
, "cpu"))
1318 id
= mdesc_get_property(md
, target
, "id", NULL
);
1319 if (*id
< nr_cpu_ids
)
1320 cpumask_set_cpu(*id
, mask
);
1324 static struct mdesc_mlgroup
* __init
find_mlgroup(u64 node
)
1328 for (i
= 0; i
< num_mlgroups
; i
++) {
1329 struct mdesc_mlgroup
*m
= &mlgroups
[i
];
1330 if (m
->node
== node
)
1336 int __node_distance(int from
, int to
)
1338 if ((from
>= MAX_NUMNODES
) || (to
>= MAX_NUMNODES
)) {
1339 pr_warn("Returning default NUMA distance value for %d->%d\n",
1341 return (from
== to
) ? LOCAL_DISTANCE
: REMOTE_DISTANCE
;
1343 return numa_latency
[from
][to
];
1346 static int __init
find_best_numa_node_for_mlgroup(struct mdesc_mlgroup
*grp
)
1350 for (i
= 0; i
< MAX_NUMNODES
; i
++) {
1351 struct node_mem_mask
*n
= &node_masks
[i
];
1353 if ((grp
->mask
== n
->mask
) && (grp
->match
== n
->match
))
1359 static void __init
find_numa_latencies_for_group(struct mdesc_handle
*md
,
1364 mdesc_for_each_arc(arc
, md
, grp
, MDESC_ARC_TYPE_FWD
) {
1366 u64 target
= mdesc_arc_target(md
, arc
);
1367 struct mdesc_mlgroup
*m
= find_mlgroup(target
);
1371 tnode
= find_best_numa_node_for_mlgroup(m
);
1372 if (tnode
== MAX_NUMNODES
)
1374 numa_latency
[index
][tnode
] = m
->latency
;
1378 static int __init
numa_attach_mlgroup(struct mdesc_handle
*md
, u64 grp
,
1381 struct mdesc_mlgroup
*candidate
= NULL
;
1382 u64 arc
, best_latency
= ~(u64
)0;
1383 struct node_mem_mask
*n
;
1385 mdesc_for_each_arc(arc
, md
, grp
, MDESC_ARC_TYPE_FWD
) {
1386 u64 target
= mdesc_arc_target(md
, arc
);
1387 struct mdesc_mlgroup
*m
= find_mlgroup(target
);
1390 if (m
->latency
< best_latency
) {
1392 best_latency
= m
->latency
;
1398 if (num_node_masks
!= index
) {
1399 printk(KERN_ERR
"Inconsistent NUMA state, "
1400 "index[%d] != num_node_masks[%d]\n",
1401 index
, num_node_masks
);
1405 n
= &node_masks
[num_node_masks
++];
1407 n
->mask
= candidate
->mask
;
1408 n
->match
= candidate
->match
;
1410 numadbg("NUMA NODE[%d]: mask[%lx] match[%lx] (latency[%llx])\n",
1411 index
, n
->mask
, n
->match
, candidate
->latency
);
1416 static int __init
numa_parse_mdesc_group(struct mdesc_handle
*md
, u64 grp
,
1422 numa_parse_mdesc_group_cpus(md
, grp
, &mask
);
1424 for_each_cpu(cpu
, &mask
)
1425 numa_cpu_lookup_table
[cpu
] = index
;
1426 cpumask_copy(&numa_cpumask_lookup_table
[index
], &mask
);
1429 printk(KERN_INFO
"NUMA GROUP[%d]: cpus [ ", index
);
1430 for_each_cpu(cpu
, &mask
)
1435 return numa_attach_mlgroup(md
, grp
, index
);
1438 static int __init
numa_parse_mdesc(void)
1440 struct mdesc_handle
*md
= mdesc_grab();
1441 int i
, j
, err
, count
;
1444 node
= mdesc_node_by_name(md
, MDESC_NODE_NULL
, "latency-groups");
1445 if (node
== MDESC_NODE_NULL
) {
1450 err
= grab_mblocks(md
);
1454 err
= grab_mlgroups(md
);
1459 mdesc_for_each_node_by_name(md
, node
, "group") {
1460 err
= numa_parse_mdesc_group(md
, node
, count
);
1467 mdesc_for_each_node_by_name(md
, node
, "group") {
1468 find_numa_latencies_for_group(md
, node
, count
);
1472 /* Normalize numa latency matrix according to ACPI SLIT spec. */
1473 for (i
= 0; i
< MAX_NUMNODES
; i
++) {
1474 u64 self_latency
= numa_latency
[i
][i
];
1476 for (j
= 0; j
< MAX_NUMNODES
; j
++) {
1477 numa_latency
[i
][j
] =
1478 (numa_latency
[i
][j
] * LOCAL_DISTANCE
) /
1485 for (i
= 0; i
< num_node_masks
; i
++) {
1486 allocate_node_data(i
);
1496 static int __init
numa_parse_jbus(void)
1498 unsigned long cpu
, index
;
1500 /* NUMA node id is encoded in bits 36 and higher, and there is
1501 * a 1-to-1 mapping from CPU ID to NUMA node ID.
1504 for_each_present_cpu(cpu
) {
1505 numa_cpu_lookup_table
[cpu
] = index
;
1506 cpumask_copy(&numa_cpumask_lookup_table
[index
], cpumask_of(cpu
));
1507 node_masks
[index
].mask
= ~((1UL << 36UL) - 1UL);
1508 node_masks
[index
].match
= cpu
<< 36UL;
1512 num_node_masks
= index
;
1516 for (index
= 0; index
< num_node_masks
; index
++) {
1517 allocate_node_data(index
);
1518 node_set_online(index
);
1524 static int __init
numa_parse_sun4u(void)
1526 if (tlb_type
== cheetah
|| tlb_type
== cheetah_plus
) {
1529 __asm__ ("rdpr %%ver, %0" : "=r" (ver
));
1530 if ((ver
>> 32UL) == __JALAPENO_ID
||
1531 (ver
>> 32UL) == __SERRANO_ID
)
1532 return numa_parse_jbus();
1537 static int __init
bootmem_init_numa(void)
1542 numadbg("bootmem_init_numa()\n");
1544 /* Some sane defaults for numa latency values */
1545 for (i
= 0; i
< MAX_NUMNODES
; i
++) {
1546 for (j
= 0; j
< MAX_NUMNODES
; j
++)
1547 numa_latency
[i
][j
] = (i
== j
) ?
1548 LOCAL_DISTANCE
: REMOTE_DISTANCE
;
1552 if (tlb_type
== hypervisor
)
1553 err
= numa_parse_mdesc();
1555 err
= numa_parse_sun4u();
1562 static int bootmem_init_numa(void)
1569 static void __init
bootmem_init_nonnuma(void)
1571 unsigned long top_of_ram
= memblock_end_of_DRAM();
1572 unsigned long total_ram
= memblock_phys_mem_size();
1574 numadbg("bootmem_init_nonnuma()\n");
1576 printk(KERN_INFO
"Top of RAM: 0x%lx, Total RAM: 0x%lx\n",
1577 top_of_ram
, total_ram
);
1578 printk(KERN_INFO
"Memory hole size: %ldMB\n",
1579 (top_of_ram
- total_ram
) >> 20);
1581 init_node_masks_nonnuma();
1582 memblock_set_node(0, (phys_addr_t
)ULLONG_MAX
, &memblock
.memory
, 0);
1583 allocate_node_data(0);
1587 static unsigned long __init
bootmem_init(unsigned long phys_base
)
1589 unsigned long end_pfn
;
1591 end_pfn
= memblock_end_of_DRAM() >> PAGE_SHIFT
;
1592 max_pfn
= max_low_pfn
= end_pfn
;
1593 min_low_pfn
= (phys_base
>> PAGE_SHIFT
);
1595 if (bootmem_init_numa() < 0)
1596 bootmem_init_nonnuma();
1598 /* Dump memblock with node info. */
1599 memblock_dump_all();
1601 /* XXX cpu notifier XXX */
1603 sparse_memory_present_with_active_regions(MAX_NUMNODES
);
1609 static struct linux_prom64_registers pall
[MAX_BANKS
] __initdata
;
1610 static int pall_ents __initdata
;
1612 static unsigned long max_phys_bits
= 40;
1614 bool kern_addr_valid(unsigned long addr
)
1621 if ((long)addr
< 0L) {
1622 unsigned long pa
= __pa(addr
);
1624 if ((pa
>> max_phys_bits
) != 0UL)
1627 return pfn_valid(pa
>> PAGE_SHIFT
);
1630 if (addr
>= (unsigned long) KERNBASE
&&
1631 addr
< (unsigned long)&_end
)
1634 pgd
= pgd_offset_k(addr
);
1638 pud
= pud_offset(pgd
, addr
);
1642 if (pud_large(*pud
))
1643 return pfn_valid(pud_pfn(*pud
));
1645 pmd
= pmd_offset(pud
, addr
);
1649 if (pmd_large(*pmd
))
1650 return pfn_valid(pmd_pfn(*pmd
));
1652 pte
= pte_offset_kernel(pmd
, addr
);
1656 return pfn_valid(pte_pfn(*pte
));
1658 EXPORT_SYMBOL(kern_addr_valid
);
1660 static unsigned long __ref
kernel_map_hugepud(unsigned long vstart
,
1664 const unsigned long mask16gb
= (1UL << 34) - 1UL;
1665 u64 pte_val
= vstart
;
1667 /* Each PUD is 8GB */
1668 if ((vstart
& mask16gb
) ||
1669 (vend
- vstart
<= mask16gb
)) {
1670 pte_val
^= kern_linear_pte_xor
[2];
1671 pud_val(*pud
) = pte_val
| _PAGE_PUD_HUGE
;
1673 return vstart
+ PUD_SIZE
;
1676 pte_val
^= kern_linear_pte_xor
[3];
1677 pte_val
|= _PAGE_PUD_HUGE
;
1679 vend
= vstart
+ mask16gb
+ 1UL;
1680 while (vstart
< vend
) {
1681 pud_val(*pud
) = pte_val
;
1683 pte_val
+= PUD_SIZE
;
1690 static bool kernel_can_map_hugepud(unsigned long vstart
, unsigned long vend
,
1693 if (guard
&& !(vstart
& ~PUD_MASK
) && (vend
- vstart
) >= PUD_SIZE
)
1699 static unsigned long __ref
kernel_map_hugepmd(unsigned long vstart
,
1703 const unsigned long mask256mb
= (1UL << 28) - 1UL;
1704 const unsigned long mask2gb
= (1UL << 31) - 1UL;
1705 u64 pte_val
= vstart
;
1707 /* Each PMD is 8MB */
1708 if ((vstart
& mask256mb
) ||
1709 (vend
- vstart
<= mask256mb
)) {
1710 pte_val
^= kern_linear_pte_xor
[0];
1711 pmd_val(*pmd
) = pte_val
| _PAGE_PMD_HUGE
;
1713 return vstart
+ PMD_SIZE
;
1716 if ((vstart
& mask2gb
) ||
1717 (vend
- vstart
<= mask2gb
)) {
1718 pte_val
^= kern_linear_pte_xor
[1];
1719 pte_val
|= _PAGE_PMD_HUGE
;
1720 vend
= vstart
+ mask256mb
+ 1UL;
1722 pte_val
^= kern_linear_pte_xor
[2];
1723 pte_val
|= _PAGE_PMD_HUGE
;
1724 vend
= vstart
+ mask2gb
+ 1UL;
1727 while (vstart
< vend
) {
1728 pmd_val(*pmd
) = pte_val
;
1730 pte_val
+= PMD_SIZE
;
1738 static bool kernel_can_map_hugepmd(unsigned long vstart
, unsigned long vend
,
1741 if (guard
&& !(vstart
& ~PMD_MASK
) && (vend
- vstart
) >= PMD_SIZE
)
1747 static unsigned long __ref
kernel_map_range(unsigned long pstart
,
1748 unsigned long pend
, pgprot_t prot
,
1751 unsigned long vstart
= PAGE_OFFSET
+ pstart
;
1752 unsigned long vend
= PAGE_OFFSET
+ pend
;
1753 unsigned long alloc_bytes
= 0UL;
1755 if ((vstart
& ~PAGE_MASK
) || (vend
& ~PAGE_MASK
)) {
1756 prom_printf("kernel_map: Unaligned physmem[%lx:%lx]\n",
1761 while (vstart
< vend
) {
1762 unsigned long this_end
, paddr
= __pa(vstart
);
1763 pgd_t
*pgd
= pgd_offset_k(vstart
);
1768 if (pgd_none(*pgd
)) {
1771 new = __alloc_bootmem(PAGE_SIZE
, PAGE_SIZE
, PAGE_SIZE
);
1772 alloc_bytes
+= PAGE_SIZE
;
1773 pgd_populate(&init_mm
, pgd
, new);
1775 pud
= pud_offset(pgd
, vstart
);
1776 if (pud_none(*pud
)) {
1779 if (kernel_can_map_hugepud(vstart
, vend
, use_huge
)) {
1780 vstart
= kernel_map_hugepud(vstart
, vend
, pud
);
1783 new = __alloc_bootmem(PAGE_SIZE
, PAGE_SIZE
, PAGE_SIZE
);
1784 alloc_bytes
+= PAGE_SIZE
;
1785 pud_populate(&init_mm
, pud
, new);
1788 pmd
= pmd_offset(pud
, vstart
);
1789 if (pmd_none(*pmd
)) {
1792 if (kernel_can_map_hugepmd(vstart
, vend
, use_huge
)) {
1793 vstart
= kernel_map_hugepmd(vstart
, vend
, pmd
);
1796 new = __alloc_bootmem(PAGE_SIZE
, PAGE_SIZE
, PAGE_SIZE
);
1797 alloc_bytes
+= PAGE_SIZE
;
1798 pmd_populate_kernel(&init_mm
, pmd
, new);
1801 pte
= pte_offset_kernel(pmd
, vstart
);
1802 this_end
= (vstart
+ PMD_SIZE
) & PMD_MASK
;
1803 if (this_end
> vend
)
1806 while (vstart
< this_end
) {
1807 pte_val(*pte
) = (paddr
| pgprot_val(prot
));
1809 vstart
+= PAGE_SIZE
;
1818 static void __init
flush_all_kernel_tsbs(void)
1822 for (i
= 0; i
< KERNEL_TSB_NENTRIES
; i
++) {
1823 struct tsb
*ent
= &swapper_tsb
[i
];
1825 ent
->tag
= (1UL << TSB_TAG_INVALID_BIT
);
1827 #ifndef CONFIG_DEBUG_PAGEALLOC
1828 for (i
= 0; i
< KERNEL_TSB4M_NENTRIES
; i
++) {
1829 struct tsb
*ent
= &swapper_4m_tsb
[i
];
1831 ent
->tag
= (1UL << TSB_TAG_INVALID_BIT
);
1836 extern unsigned int kvmap_linear_patch
[1];
1838 static void __init
kernel_physical_mapping_init(void)
1840 unsigned long i
, mem_alloced
= 0UL;
1841 bool use_huge
= true;
1843 #ifdef CONFIG_DEBUG_PAGEALLOC
1846 for (i
= 0; i
< pall_ents
; i
++) {
1847 unsigned long phys_start
, phys_end
;
1849 phys_start
= pall
[i
].phys_addr
;
1850 phys_end
= phys_start
+ pall
[i
].reg_size
;
1852 mem_alloced
+= kernel_map_range(phys_start
, phys_end
,
1853 PAGE_KERNEL
, use_huge
);
1856 printk("Allocated %ld bytes for kernel page tables.\n",
1859 kvmap_linear_patch
[0] = 0x01000000; /* nop */
1860 flushi(&kvmap_linear_patch
[0]);
1862 flush_all_kernel_tsbs();
1867 #ifdef CONFIG_DEBUG_PAGEALLOC
1868 void __kernel_map_pages(struct page
*page
, int numpages
, int enable
)
1870 unsigned long phys_start
= page_to_pfn(page
) << PAGE_SHIFT
;
1871 unsigned long phys_end
= phys_start
+ (numpages
* PAGE_SIZE
);
1873 kernel_map_range(phys_start
, phys_end
,
1874 (enable
? PAGE_KERNEL
: __pgprot(0)), false);
1876 flush_tsb_kernel_range(PAGE_OFFSET
+ phys_start
,
1877 PAGE_OFFSET
+ phys_end
);
1879 /* we should perform an IPI and flush all tlbs,
1880 * but that can deadlock->flush only current cpu.
1882 __flush_tlb_kernel_range(PAGE_OFFSET
+ phys_start
,
1883 PAGE_OFFSET
+ phys_end
);
1887 unsigned long __init
find_ecache_flush_span(unsigned long size
)
1891 for (i
= 0; i
< pavail_ents
; i
++) {
1892 if (pavail
[i
].reg_size
>= size
)
1893 return pavail
[i
].phys_addr
;
1899 unsigned long PAGE_OFFSET
;
1900 EXPORT_SYMBOL(PAGE_OFFSET
);
1902 unsigned long VMALLOC_END
= 0x0000010000000000UL
;
1903 EXPORT_SYMBOL(VMALLOC_END
);
1905 unsigned long sparc64_va_hole_top
= 0xfffff80000000000UL
;
1906 unsigned long sparc64_va_hole_bottom
= 0x0000080000000000UL
;
1908 static void __init
setup_page_offset(void)
1910 if (tlb_type
== cheetah
|| tlb_type
== cheetah_plus
) {
1911 /* Cheetah/Panther support a full 64-bit virtual
1912 * address, so we can use all that our page tables
1915 sparc64_va_hole_top
= 0xfff0000000000000UL
;
1916 sparc64_va_hole_bottom
= 0x0010000000000000UL
;
1919 } else if (tlb_type
== hypervisor
) {
1920 switch (sun4v_chip_type
) {
1921 case SUN4V_CHIP_NIAGARA1
:
1922 case SUN4V_CHIP_NIAGARA2
:
1923 /* T1 and T2 support 48-bit virtual addresses. */
1924 sparc64_va_hole_top
= 0xffff800000000000UL
;
1925 sparc64_va_hole_bottom
= 0x0000800000000000UL
;
1929 case SUN4V_CHIP_NIAGARA3
:
1930 /* T3 supports 48-bit virtual addresses. */
1931 sparc64_va_hole_top
= 0xffff800000000000UL
;
1932 sparc64_va_hole_bottom
= 0x0000800000000000UL
;
1936 case SUN4V_CHIP_NIAGARA4
:
1937 case SUN4V_CHIP_NIAGARA5
:
1938 case SUN4V_CHIP_SPARC64X
:
1939 case SUN4V_CHIP_SPARC_M6
:
1940 /* T4 and later support 52-bit virtual addresses. */
1941 sparc64_va_hole_top
= 0xfff8000000000000UL
;
1942 sparc64_va_hole_bottom
= 0x0008000000000000UL
;
1945 case SUN4V_CHIP_SPARC_M7
:
1946 case SUN4V_CHIP_SPARC_SN
:
1947 /* M7 and later support 52-bit virtual addresses. */
1948 sparc64_va_hole_top
= 0xfff8000000000000UL
;
1949 sparc64_va_hole_bottom
= 0x0008000000000000UL
;
1952 case SUN4V_CHIP_SPARC_M8
:
1954 /* M8 and later support 54-bit virtual addresses.
1955 * However, restricting M8 and above VA bits to 53
1956 * as 4-level page table cannot support more than
1959 sparc64_va_hole_top
= 0xfff0000000000000UL
;
1960 sparc64_va_hole_bottom
= 0x0010000000000000UL
;
1966 if (max_phys_bits
> MAX_PHYS_ADDRESS_BITS
) {
1967 prom_printf("MAX_PHYS_ADDRESS_BITS is too small, need %lu\n",
1972 PAGE_OFFSET
= sparc64_va_hole_top
;
1973 VMALLOC_END
= ((sparc64_va_hole_bottom
>> 1) +
1974 (sparc64_va_hole_bottom
>> 2));
1976 pr_info("MM: PAGE_OFFSET is 0x%016lx (max_phys_bits == %lu)\n",
1977 PAGE_OFFSET
, max_phys_bits
);
1978 pr_info("MM: VMALLOC [0x%016lx --> 0x%016lx]\n",
1979 VMALLOC_START
, VMALLOC_END
);
1980 pr_info("MM: VMEMMAP [0x%016lx --> 0x%016lx]\n",
1981 VMEMMAP_BASE
, VMEMMAP_BASE
<< 1);
1984 static void __init
tsb_phys_patch(void)
1986 struct tsb_ldquad_phys_patch_entry
*pquad
;
1987 struct tsb_phys_patch_entry
*p
;
1989 pquad
= &__tsb_ldquad_phys_patch
;
1990 while (pquad
< &__tsb_ldquad_phys_patch_end
) {
1991 unsigned long addr
= pquad
->addr
;
1993 if (tlb_type
== hypervisor
)
1994 *(unsigned int *) addr
= pquad
->sun4v_insn
;
1996 *(unsigned int *) addr
= pquad
->sun4u_insn
;
1998 __asm__
__volatile__("flush %0"
2005 p
= &__tsb_phys_patch
;
2006 while (p
< &__tsb_phys_patch_end
) {
2007 unsigned long addr
= p
->addr
;
2009 *(unsigned int *) addr
= p
->insn
;
2011 __asm__
__volatile__("flush %0"
2019 /* Don't mark as init, we give this to the Hypervisor. */
2020 #ifndef CONFIG_DEBUG_PAGEALLOC
2021 #define NUM_KTSB_DESCR 2
2023 #define NUM_KTSB_DESCR 1
2025 static struct hv_tsb_descr ktsb_descr
[NUM_KTSB_DESCR
];
2027 /* The swapper TSBs are loaded with a base sequence of:
2029 * sethi %uhi(SYMBOL), REG1
2030 * sethi %hi(SYMBOL), REG2
2031 * or REG1, %ulo(SYMBOL), REG1
2032 * or REG2, %lo(SYMBOL), REG2
2033 * sllx REG1, 32, REG1
2034 * or REG1, REG2, REG1
2036 * When we use physical addressing for the TSB accesses, we patch the
2037 * first four instructions in the above sequence.
2040 static void patch_one_ktsb_phys(unsigned int *start
, unsigned int *end
, unsigned long pa
)
2042 unsigned long high_bits
, low_bits
;
2044 high_bits
= (pa
>> 32) & 0xffffffff;
2045 low_bits
= (pa
>> 0) & 0xffffffff;
2047 while (start
< end
) {
2048 unsigned int *ia
= (unsigned int *)(unsigned long)*start
;
2050 ia
[0] = (ia
[0] & ~0x3fffff) | (high_bits
>> 10);
2051 __asm__
__volatile__("flush %0" : : "r" (ia
));
2053 ia
[1] = (ia
[1] & ~0x3fffff) | (low_bits
>> 10);
2054 __asm__
__volatile__("flush %0" : : "r" (ia
+ 1));
2056 ia
[2] = (ia
[2] & ~0x1fff) | (high_bits
& 0x3ff);
2057 __asm__
__volatile__("flush %0" : : "r" (ia
+ 2));
2059 ia
[3] = (ia
[3] & ~0x1fff) | (low_bits
& 0x3ff);
2060 __asm__
__volatile__("flush %0" : : "r" (ia
+ 3));
2066 static void ktsb_phys_patch(void)
2068 extern unsigned int __swapper_tsb_phys_patch
;
2069 extern unsigned int __swapper_tsb_phys_patch_end
;
2070 unsigned long ktsb_pa
;
2072 ktsb_pa
= kern_base
+ ((unsigned long)&swapper_tsb
[0] - KERNBASE
);
2073 patch_one_ktsb_phys(&__swapper_tsb_phys_patch
,
2074 &__swapper_tsb_phys_patch_end
, ktsb_pa
);
2075 #ifndef CONFIG_DEBUG_PAGEALLOC
2077 extern unsigned int __swapper_4m_tsb_phys_patch
;
2078 extern unsigned int __swapper_4m_tsb_phys_patch_end
;
2079 ktsb_pa
= (kern_base
+
2080 ((unsigned long)&swapper_4m_tsb
[0] - KERNBASE
));
2081 patch_one_ktsb_phys(&__swapper_4m_tsb_phys_patch
,
2082 &__swapper_4m_tsb_phys_patch_end
, ktsb_pa
);
2087 static void __init
sun4v_ktsb_init(void)
2089 unsigned long ktsb_pa
;
2091 /* First KTSB for PAGE_SIZE mappings. */
2092 ktsb_pa
= kern_base
+ ((unsigned long)&swapper_tsb
[0] - KERNBASE
);
2094 switch (PAGE_SIZE
) {
2097 ktsb_descr
[0].pgsz_idx
= HV_PGSZ_IDX_8K
;
2098 ktsb_descr
[0].pgsz_mask
= HV_PGSZ_MASK_8K
;
2102 ktsb_descr
[0].pgsz_idx
= HV_PGSZ_IDX_64K
;
2103 ktsb_descr
[0].pgsz_mask
= HV_PGSZ_MASK_64K
;
2107 ktsb_descr
[0].pgsz_idx
= HV_PGSZ_IDX_512K
;
2108 ktsb_descr
[0].pgsz_mask
= HV_PGSZ_MASK_512K
;
2111 case 4 * 1024 * 1024:
2112 ktsb_descr
[0].pgsz_idx
= HV_PGSZ_IDX_4MB
;
2113 ktsb_descr
[0].pgsz_mask
= HV_PGSZ_MASK_4MB
;
2117 ktsb_descr
[0].assoc
= 1;
2118 ktsb_descr
[0].num_ttes
= KERNEL_TSB_NENTRIES
;
2119 ktsb_descr
[0].ctx_idx
= 0;
2120 ktsb_descr
[0].tsb_base
= ktsb_pa
;
2121 ktsb_descr
[0].resv
= 0;
2123 #ifndef CONFIG_DEBUG_PAGEALLOC
2124 /* Second KTSB for 4MB/256MB/2GB/16GB mappings. */
2125 ktsb_pa
= (kern_base
+
2126 ((unsigned long)&swapper_4m_tsb
[0] - KERNBASE
));
2128 ktsb_descr
[1].pgsz_idx
= HV_PGSZ_IDX_4MB
;
2129 ktsb_descr
[1].pgsz_mask
= ((HV_PGSZ_MASK_4MB
|
2130 HV_PGSZ_MASK_256MB
|
2132 HV_PGSZ_MASK_16GB
) &
2134 ktsb_descr
[1].assoc
= 1;
2135 ktsb_descr
[1].num_ttes
= KERNEL_TSB4M_NENTRIES
;
2136 ktsb_descr
[1].ctx_idx
= 0;
2137 ktsb_descr
[1].tsb_base
= ktsb_pa
;
2138 ktsb_descr
[1].resv
= 0;
2142 void sun4v_ktsb_register(void)
2144 unsigned long pa
, ret
;
2146 pa
= kern_base
+ ((unsigned long)&ktsb_descr
[0] - KERNBASE
);
2148 ret
= sun4v_mmu_tsb_ctx0(NUM_KTSB_DESCR
, pa
);
2150 prom_printf("hypervisor_mmu_tsb_ctx0[%lx]: "
2151 "errors with %lx\n", pa
, ret
);
2156 static void __init
sun4u_linear_pte_xor_finalize(void)
2158 #ifndef CONFIG_DEBUG_PAGEALLOC
2159 /* This is where we would add Panther support for
2160 * 32MB and 256MB pages.
2165 static void __init
sun4v_linear_pte_xor_finalize(void)
2167 unsigned long pagecv_flag
;
2169 /* Bit 9 of TTE is no longer CV bit on M7 processor and it instead
2170 * enables MCD error. Do not set bit 9 on M7 processor.
2172 switch (sun4v_chip_type
) {
2173 case SUN4V_CHIP_SPARC_M7
:
2174 case SUN4V_CHIP_SPARC_M8
:
2175 case SUN4V_CHIP_SPARC_SN
:
2179 pagecv_flag
= _PAGE_CV_4V
;
2182 #ifndef CONFIG_DEBUG_PAGEALLOC
2183 if (cpu_pgsz_mask
& HV_PGSZ_MASK_256MB
) {
2184 kern_linear_pte_xor
[1] = (_PAGE_VALID
| _PAGE_SZ256MB_4V
) ^
2186 kern_linear_pte_xor
[1] |= (_PAGE_CP_4V
| pagecv_flag
|
2187 _PAGE_P_4V
| _PAGE_W_4V
);
2189 kern_linear_pte_xor
[1] = kern_linear_pte_xor
[0];
2192 if (cpu_pgsz_mask
& HV_PGSZ_MASK_2GB
) {
2193 kern_linear_pte_xor
[2] = (_PAGE_VALID
| _PAGE_SZ2GB_4V
) ^
2195 kern_linear_pte_xor
[2] |= (_PAGE_CP_4V
| pagecv_flag
|
2196 _PAGE_P_4V
| _PAGE_W_4V
);
2198 kern_linear_pte_xor
[2] = kern_linear_pte_xor
[1];
2201 if (cpu_pgsz_mask
& HV_PGSZ_MASK_16GB
) {
2202 kern_linear_pte_xor
[3] = (_PAGE_VALID
| _PAGE_SZ16GB_4V
) ^
2204 kern_linear_pte_xor
[3] |= (_PAGE_CP_4V
| pagecv_flag
|
2205 _PAGE_P_4V
| _PAGE_W_4V
);
2207 kern_linear_pte_xor
[3] = kern_linear_pte_xor
[2];
2212 /* paging_init() sets up the page tables */
2214 static unsigned long last_valid_pfn
;
2216 static void sun4u_pgprot_init(void);
2217 static void sun4v_pgprot_init(void);
2219 static phys_addr_t __init
available_memory(void)
2221 phys_addr_t available
= 0ULL;
2222 phys_addr_t pa_start
, pa_end
;
2225 for_each_free_mem_range(i
, NUMA_NO_NODE
, MEMBLOCK_NONE
, &pa_start
,
2227 available
= available
+ (pa_end
- pa_start
);
2232 #define _PAGE_CACHE_4U (_PAGE_CP_4U | _PAGE_CV_4U)
2233 #define _PAGE_CACHE_4V (_PAGE_CP_4V | _PAGE_CV_4V)
2234 #define __DIRTY_BITS_4U (_PAGE_MODIFIED_4U | _PAGE_WRITE_4U | _PAGE_W_4U)
2235 #define __DIRTY_BITS_4V (_PAGE_MODIFIED_4V | _PAGE_WRITE_4V | _PAGE_W_4V)
2236 #define __ACCESS_BITS_4U (_PAGE_ACCESSED_4U | _PAGE_READ_4U | _PAGE_R)
2237 #define __ACCESS_BITS_4V (_PAGE_ACCESSED_4V | _PAGE_READ_4V | _PAGE_R)
2239 /* We need to exclude reserved regions. This exclusion will include
2240 * vmlinux and initrd. To be more precise the initrd size could be used to
2241 * compute a new lower limit because it is freed later during initialization.
2243 static void __init
reduce_memory(phys_addr_t limit_ram
)
2245 phys_addr_t avail_ram
= available_memory();
2246 phys_addr_t pa_start
, pa_end
;
2249 if (limit_ram
>= avail_ram
)
2252 for_each_free_mem_range(i
, NUMA_NO_NODE
, MEMBLOCK_NONE
, &pa_start
,
2254 phys_addr_t region_size
= pa_end
- pa_start
;
2255 phys_addr_t clip_start
= pa_start
;
2257 avail_ram
= avail_ram
- region_size
;
2258 /* Are we consuming too much? */
2259 if (avail_ram
< limit_ram
) {
2260 phys_addr_t give_back
= limit_ram
- avail_ram
;
2262 region_size
= region_size
- give_back
;
2263 clip_start
= clip_start
+ give_back
;
2266 memblock_remove(clip_start
, region_size
);
2268 if (avail_ram
<= limit_ram
)
2274 void __init
paging_init(void)
2276 unsigned long end_pfn
, shift
, phys_base
;
2277 unsigned long real_end
, i
;
2279 setup_page_offset();
2281 /* These build time checkes make sure that the dcache_dirty_cpu()
2282 * page->flags usage will work.
2284 * When a page gets marked as dcache-dirty, we store the
2285 * cpu number starting at bit 32 in the page->flags. Also,
2286 * functions like clear_dcache_dirty_cpu use the cpu mask
2287 * in 13-bit signed-immediate instruction fields.
2291 * Page flags must not reach into upper 32 bits that are used
2292 * for the cpu number
2294 BUILD_BUG_ON(NR_PAGEFLAGS
> 32);
2297 * The bit fields placed in the high range must not reach below
2298 * the 32 bit boundary. Otherwise we cannot place the cpu field
2299 * at the 32 bit boundary.
2301 BUILD_BUG_ON(SECTIONS_WIDTH
+ NODES_WIDTH
+ ZONES_WIDTH
+
2302 ilog2(roundup_pow_of_two(NR_CPUS
)) > 32);
2304 BUILD_BUG_ON(NR_CPUS
> 4096);
2306 kern_base
= (prom_boot_mapping_phys_low
>> ILOG2_4MB
) << ILOG2_4MB
;
2307 kern_size
= (unsigned long)&_end
- (unsigned long)KERNBASE
;
2309 /* Invalidate both kernel TSBs. */
2310 memset(swapper_tsb
, 0x40, sizeof(swapper_tsb
));
2311 #ifndef CONFIG_DEBUG_PAGEALLOC
2312 memset(swapper_4m_tsb
, 0x40, sizeof(swapper_4m_tsb
));
2315 /* TTE.cv bit on sparc v9 occupies the same position as TTE.mcde
2316 * bit on M7 processor. This is a conflicting usage of the same
2317 * bit. Enabling TTE.cv on M7 would turn on Memory Corruption
2318 * Detection error on all pages and this will lead to problems
2319 * later. Kernel does not run with MCD enabled and hence rest
2320 * of the required steps to fully configure memory corruption
2321 * detection are not taken. We need to ensure TTE.mcde is not
2322 * set on M7 processor. Compute the value of cacheability
2323 * flag for use later taking this into consideration.
2325 switch (sun4v_chip_type
) {
2326 case SUN4V_CHIP_SPARC_M7
:
2327 case SUN4V_CHIP_SPARC_M8
:
2328 case SUN4V_CHIP_SPARC_SN
:
2329 page_cache4v_flag
= _PAGE_CP_4V
;
2332 page_cache4v_flag
= _PAGE_CACHE_4V
;
2336 if (tlb_type
== hypervisor
)
2337 sun4v_pgprot_init();
2339 sun4u_pgprot_init();
2341 if (tlb_type
== cheetah_plus
||
2342 tlb_type
== hypervisor
) {
2347 if (tlb_type
== hypervisor
)
2348 sun4v_patch_tlb_handlers();
2350 /* Find available physical memory...
2352 * Read it twice in order to work around a bug in openfirmware.
2353 * The call to grab this table itself can cause openfirmware to
2354 * allocate memory, which in turn can take away some space from
2355 * the list of available memory. Reading it twice makes sure
2356 * we really do get the final value.
2358 read_obp_translations();
2359 read_obp_memory("reg", &pall
[0], &pall_ents
);
2360 read_obp_memory("available", &pavail
[0], &pavail_ents
);
2361 read_obp_memory("available", &pavail
[0], &pavail_ents
);
2363 phys_base
= 0xffffffffffffffffUL
;
2364 for (i
= 0; i
< pavail_ents
; i
++) {
2365 phys_base
= min(phys_base
, pavail
[i
].phys_addr
);
2366 memblock_add(pavail
[i
].phys_addr
, pavail
[i
].reg_size
);
2369 memblock_reserve(kern_base
, kern_size
);
2371 find_ramdisk(phys_base
);
2373 if (cmdline_memory_size
)
2374 reduce_memory(cmdline_memory_size
);
2376 memblock_allow_resize();
2377 memblock_dump_all();
2379 set_bit(0, mmu_context_bmap
);
2381 shift
= kern_base
+ PAGE_OFFSET
- ((unsigned long)KERNBASE
);
2383 real_end
= (unsigned long)_end
;
2384 num_kernel_image_mappings
= DIV_ROUND_UP(real_end
- KERNBASE
, 1 << ILOG2_4MB
);
2385 printk("Kernel: Using %d locked TLB entries for main kernel image.\n",
2386 num_kernel_image_mappings
);
2388 /* Set kernel pgd to upper alias so physical page computations
2391 init_mm
.pgd
+= ((shift
) / (sizeof(pgd_t
)));
2393 memset(swapper_pg_dir
, 0, sizeof(swapper_pg_dir
));
2395 inherit_prom_mappings();
2397 /* Ok, we can use our TLB miss and window trap handlers safely. */
2402 prom_build_devicetree();
2403 of_populate_present_mask();
2405 of_fill_in_cpu_data();
2408 if (tlb_type
== hypervisor
) {
2410 mdesc_populate_present_mask(cpu_all_mask
);
2412 mdesc_fill_in_cpu_data(cpu_all_mask
);
2414 mdesc_get_page_sizes(cpu_all_mask
, &cpu_pgsz_mask
);
2416 sun4v_linear_pte_xor_finalize();
2419 sun4v_ktsb_register();
2421 unsigned long impl
, ver
;
2423 cpu_pgsz_mask
= (HV_PGSZ_MASK_8K
| HV_PGSZ_MASK_64K
|
2424 HV_PGSZ_MASK_512K
| HV_PGSZ_MASK_4MB
);
2426 __asm__
__volatile__("rdpr %%ver, %0" : "=r" (ver
));
2427 impl
= ((ver
>> 32) & 0xffff);
2428 if (impl
== PANTHER_IMPL
)
2429 cpu_pgsz_mask
|= (HV_PGSZ_MASK_32MB
|
2430 HV_PGSZ_MASK_256MB
);
2432 sun4u_linear_pte_xor_finalize();
2435 /* Flush the TLBs and the 4M TSB so that the updated linear
2436 * pte XOR settings are realized for all mappings.
2439 #ifndef CONFIG_DEBUG_PAGEALLOC
2440 memset(swapper_4m_tsb
, 0x40, sizeof(swapper_4m_tsb
));
2444 /* Setup bootmem... */
2445 last_valid_pfn
= end_pfn
= bootmem_init(phys_base
);
2447 kernel_physical_mapping_init();
2450 unsigned long max_zone_pfns
[MAX_NR_ZONES
];
2452 memset(max_zone_pfns
, 0, sizeof(max_zone_pfns
));
2454 max_zone_pfns
[ZONE_NORMAL
] = end_pfn
;
2456 free_area_init_nodes(max_zone_pfns
);
2459 printk("Booting Linux...\n");
2462 int page_in_phys_avail(unsigned long paddr
)
2468 for (i
= 0; i
< pavail_ents
; i
++) {
2469 unsigned long start
, end
;
2471 start
= pavail
[i
].phys_addr
;
2472 end
= start
+ pavail
[i
].reg_size
;
2474 if (paddr
>= start
&& paddr
< end
)
2477 if (paddr
>= kern_base
&& paddr
< (kern_base
+ kern_size
))
2479 #ifdef CONFIG_BLK_DEV_INITRD
2480 if (paddr
>= __pa(initrd_start
) &&
2481 paddr
< __pa(PAGE_ALIGN(initrd_end
)))
2488 static void __init
register_page_bootmem_info(void)
2490 #ifdef CONFIG_NEED_MULTIPLE_NODES
2493 for_each_online_node(i
)
2494 if (NODE_DATA(i
)->node_spanned_pages
)
2495 register_page_bootmem_info_node(NODE_DATA(i
));
2498 void __init
mem_init(void)
2500 high_memory
= __va(last_valid_pfn
<< PAGE_SHIFT
);
2502 register_page_bootmem_info();
2506 * Set up the zero page, mark it reserved, so that page count
2507 * is not manipulated when freeing the page from user ptes.
2509 mem_map_zero
= alloc_pages(GFP_KERNEL
|__GFP_ZERO
, 0);
2510 if (mem_map_zero
== NULL
) {
2511 prom_printf("paging_init: Cannot alloc zero page.\n");
2514 mark_page_reserved(mem_map_zero
);
2516 mem_init_print_info(NULL
);
2518 if (tlb_type
== cheetah
|| tlb_type
== cheetah_plus
)
2519 cheetah_ecache_flush_init();
2522 void free_initmem(void)
2524 unsigned long addr
, initend
;
2527 /* If the physical memory maps were trimmed by kernel command
2528 * line options, don't even try freeing this initmem stuff up.
2529 * The kernel image could have been in the trimmed out region
2530 * and if so the freeing below will free invalid page structs.
2532 if (cmdline_memory_size
)
2536 * The init section is aligned to 8k in vmlinux.lds. Page align for >8k pagesizes.
2538 addr
= PAGE_ALIGN((unsigned long)(__init_begin
));
2539 initend
= (unsigned long)(__init_end
) & PAGE_MASK
;
2540 for (; addr
< initend
; addr
+= PAGE_SIZE
) {
2544 ((unsigned long) __va(kern_base
)) -
2545 ((unsigned long) KERNBASE
));
2546 memset((void *)addr
, POISON_FREE_INITMEM
, PAGE_SIZE
);
2549 free_reserved_page(virt_to_page(page
));
2553 #ifdef CONFIG_BLK_DEV_INITRD
2554 void free_initrd_mem(unsigned long start
, unsigned long end
)
2556 free_reserved_area((void *)start
, (void *)end
, POISON_FREE_INITMEM
,
2561 pgprot_t PAGE_KERNEL __read_mostly
;
2562 EXPORT_SYMBOL(PAGE_KERNEL
);
2564 pgprot_t PAGE_KERNEL_LOCKED __read_mostly
;
2565 pgprot_t PAGE_COPY __read_mostly
;
2567 pgprot_t PAGE_SHARED __read_mostly
;
2568 EXPORT_SYMBOL(PAGE_SHARED
);
2570 unsigned long pg_iobits __read_mostly
;
2572 unsigned long _PAGE_IE __read_mostly
;
2573 EXPORT_SYMBOL(_PAGE_IE
);
2575 unsigned long _PAGE_E __read_mostly
;
2576 EXPORT_SYMBOL(_PAGE_E
);
2578 unsigned long _PAGE_CACHE __read_mostly
;
2579 EXPORT_SYMBOL(_PAGE_CACHE
);
2581 #ifdef CONFIG_SPARSEMEM_VMEMMAP
2582 int __meminit
vmemmap_populate(unsigned long vstart
, unsigned long vend
,
2585 unsigned long pte_base
;
2587 pte_base
= (_PAGE_VALID
| _PAGE_SZ4MB_4U
|
2588 _PAGE_CP_4U
| _PAGE_CV_4U
|
2589 _PAGE_P_4U
| _PAGE_W_4U
);
2590 if (tlb_type
== hypervisor
)
2591 pte_base
= (_PAGE_VALID
| _PAGE_SZ4MB_4V
|
2592 page_cache4v_flag
| _PAGE_P_4V
| _PAGE_W_4V
);
2594 pte_base
|= _PAGE_PMD_HUGE
;
2596 vstart
= vstart
& PMD_MASK
;
2597 vend
= ALIGN(vend
, PMD_SIZE
);
2598 for (; vstart
< vend
; vstart
+= PMD_SIZE
) {
2599 pgd_t
*pgd
= pgd_offset_k(vstart
);
2604 if (pgd_none(*pgd
)) {
2605 pud_t
*new = vmemmap_alloc_block(PAGE_SIZE
, node
);
2609 pgd_populate(&init_mm
, pgd
, new);
2612 pud
= pud_offset(pgd
, vstart
);
2613 if (pud_none(*pud
)) {
2614 pmd_t
*new = vmemmap_alloc_block(PAGE_SIZE
, node
);
2618 pud_populate(&init_mm
, pud
, new);
2621 pmd
= pmd_offset(pud
, vstart
);
2623 pte
= pmd_val(*pmd
);
2624 if (!(pte
& _PAGE_VALID
)) {
2625 void *block
= vmemmap_alloc_block(PMD_SIZE
, node
);
2630 pmd_val(*pmd
) = pte_base
| __pa(block
);
2637 void vmemmap_free(unsigned long start
, unsigned long end
)
2640 #endif /* CONFIG_SPARSEMEM_VMEMMAP */
2642 static void prot_init_common(unsigned long page_none
,
2643 unsigned long page_shared
,
2644 unsigned long page_copy
,
2645 unsigned long page_readonly
,
2646 unsigned long page_exec_bit
)
2648 PAGE_COPY
= __pgprot(page_copy
);
2649 PAGE_SHARED
= __pgprot(page_shared
);
2651 protection_map
[0x0] = __pgprot(page_none
);
2652 protection_map
[0x1] = __pgprot(page_readonly
& ~page_exec_bit
);
2653 protection_map
[0x2] = __pgprot(page_copy
& ~page_exec_bit
);
2654 protection_map
[0x3] = __pgprot(page_copy
& ~page_exec_bit
);
2655 protection_map
[0x4] = __pgprot(page_readonly
);
2656 protection_map
[0x5] = __pgprot(page_readonly
);
2657 protection_map
[0x6] = __pgprot(page_copy
);
2658 protection_map
[0x7] = __pgprot(page_copy
);
2659 protection_map
[0x8] = __pgprot(page_none
);
2660 protection_map
[0x9] = __pgprot(page_readonly
& ~page_exec_bit
);
2661 protection_map
[0xa] = __pgprot(page_shared
& ~page_exec_bit
);
2662 protection_map
[0xb] = __pgprot(page_shared
& ~page_exec_bit
);
2663 protection_map
[0xc] = __pgprot(page_readonly
);
2664 protection_map
[0xd] = __pgprot(page_readonly
);
2665 protection_map
[0xe] = __pgprot(page_shared
);
2666 protection_map
[0xf] = __pgprot(page_shared
);
2669 static void __init
sun4u_pgprot_init(void)
2671 unsigned long page_none
, page_shared
, page_copy
, page_readonly
;
2672 unsigned long page_exec_bit
;
2675 PAGE_KERNEL
= __pgprot (_PAGE_PRESENT_4U
| _PAGE_VALID
|
2676 _PAGE_CACHE_4U
| _PAGE_P_4U
|
2677 __ACCESS_BITS_4U
| __DIRTY_BITS_4U
|
2679 PAGE_KERNEL_LOCKED
= __pgprot (_PAGE_PRESENT_4U
| _PAGE_VALID
|
2680 _PAGE_CACHE_4U
| _PAGE_P_4U
|
2681 __ACCESS_BITS_4U
| __DIRTY_BITS_4U
|
2682 _PAGE_EXEC_4U
| _PAGE_L_4U
);
2684 _PAGE_IE
= _PAGE_IE_4U
;
2685 _PAGE_E
= _PAGE_E_4U
;
2686 _PAGE_CACHE
= _PAGE_CACHE_4U
;
2688 pg_iobits
= (_PAGE_VALID
| _PAGE_PRESENT_4U
| __DIRTY_BITS_4U
|
2689 __ACCESS_BITS_4U
| _PAGE_E_4U
);
2691 #ifdef CONFIG_DEBUG_PAGEALLOC
2692 kern_linear_pte_xor
[0] = _PAGE_VALID
^ PAGE_OFFSET
;
2694 kern_linear_pte_xor
[0] = (_PAGE_VALID
| _PAGE_SZ4MB_4U
) ^
2697 kern_linear_pte_xor
[0] |= (_PAGE_CP_4U
| _PAGE_CV_4U
|
2698 _PAGE_P_4U
| _PAGE_W_4U
);
2700 for (i
= 1; i
< 4; i
++)
2701 kern_linear_pte_xor
[i
] = kern_linear_pte_xor
[0];
2703 _PAGE_ALL_SZ_BITS
= (_PAGE_SZ4MB_4U
| _PAGE_SZ512K_4U
|
2704 _PAGE_SZ64K_4U
| _PAGE_SZ8K_4U
|
2705 _PAGE_SZ32MB_4U
| _PAGE_SZ256MB_4U
);
2708 page_none
= _PAGE_PRESENT_4U
| _PAGE_ACCESSED_4U
| _PAGE_CACHE_4U
;
2709 page_shared
= (_PAGE_VALID
| _PAGE_PRESENT_4U
| _PAGE_CACHE_4U
|
2710 __ACCESS_BITS_4U
| _PAGE_WRITE_4U
| _PAGE_EXEC_4U
);
2711 page_copy
= (_PAGE_VALID
| _PAGE_PRESENT_4U
| _PAGE_CACHE_4U
|
2712 __ACCESS_BITS_4U
| _PAGE_EXEC_4U
);
2713 page_readonly
= (_PAGE_VALID
| _PAGE_PRESENT_4U
| _PAGE_CACHE_4U
|
2714 __ACCESS_BITS_4U
| _PAGE_EXEC_4U
);
2716 page_exec_bit
= _PAGE_EXEC_4U
;
2718 prot_init_common(page_none
, page_shared
, page_copy
, page_readonly
,
2722 static void __init
sun4v_pgprot_init(void)
2724 unsigned long page_none
, page_shared
, page_copy
, page_readonly
;
2725 unsigned long page_exec_bit
;
2728 PAGE_KERNEL
= __pgprot (_PAGE_PRESENT_4V
| _PAGE_VALID
|
2729 page_cache4v_flag
| _PAGE_P_4V
|
2730 __ACCESS_BITS_4V
| __DIRTY_BITS_4V
|
2732 PAGE_KERNEL_LOCKED
= PAGE_KERNEL
;
2734 _PAGE_IE
= _PAGE_IE_4V
;
2735 _PAGE_E
= _PAGE_E_4V
;
2736 _PAGE_CACHE
= page_cache4v_flag
;
2738 #ifdef CONFIG_DEBUG_PAGEALLOC
2739 kern_linear_pte_xor
[0] = _PAGE_VALID
^ PAGE_OFFSET
;
2741 kern_linear_pte_xor
[0] = (_PAGE_VALID
| _PAGE_SZ4MB_4V
) ^
2744 kern_linear_pte_xor
[0] |= (page_cache4v_flag
| _PAGE_P_4V
|
2747 for (i
= 1; i
< 4; i
++)
2748 kern_linear_pte_xor
[i
] = kern_linear_pte_xor
[0];
2750 pg_iobits
= (_PAGE_VALID
| _PAGE_PRESENT_4V
| __DIRTY_BITS_4V
|
2751 __ACCESS_BITS_4V
| _PAGE_E_4V
);
2753 _PAGE_ALL_SZ_BITS
= (_PAGE_SZ16GB_4V
| _PAGE_SZ2GB_4V
|
2754 _PAGE_SZ256MB_4V
| _PAGE_SZ32MB_4V
|
2755 _PAGE_SZ4MB_4V
| _PAGE_SZ512K_4V
|
2756 _PAGE_SZ64K_4V
| _PAGE_SZ8K_4V
);
2758 page_none
= _PAGE_PRESENT_4V
| _PAGE_ACCESSED_4V
| page_cache4v_flag
;
2759 page_shared
= (_PAGE_VALID
| _PAGE_PRESENT_4V
| page_cache4v_flag
|
2760 __ACCESS_BITS_4V
| _PAGE_WRITE_4V
| _PAGE_EXEC_4V
);
2761 page_copy
= (_PAGE_VALID
| _PAGE_PRESENT_4V
| page_cache4v_flag
|
2762 __ACCESS_BITS_4V
| _PAGE_EXEC_4V
);
2763 page_readonly
= (_PAGE_VALID
| _PAGE_PRESENT_4V
| page_cache4v_flag
|
2764 __ACCESS_BITS_4V
| _PAGE_EXEC_4V
);
2766 page_exec_bit
= _PAGE_EXEC_4V
;
2768 prot_init_common(page_none
, page_shared
, page_copy
, page_readonly
,
2772 unsigned long pte_sz_bits(unsigned long sz
)
2774 if (tlb_type
== hypervisor
) {
2778 return _PAGE_SZ8K_4V
;
2780 return _PAGE_SZ64K_4V
;
2782 return _PAGE_SZ512K_4V
;
2783 case 4 * 1024 * 1024:
2784 return _PAGE_SZ4MB_4V
;
2790 return _PAGE_SZ8K_4U
;
2792 return _PAGE_SZ64K_4U
;
2794 return _PAGE_SZ512K_4U
;
2795 case 4 * 1024 * 1024:
2796 return _PAGE_SZ4MB_4U
;
2801 pte_t
mk_pte_io(unsigned long page
, pgprot_t prot
, int space
, unsigned long page_size
)
2805 pte_val(pte
) = page
| pgprot_val(pgprot_noncached(prot
));
2806 pte_val(pte
) |= (((unsigned long)space
) << 32);
2807 pte_val(pte
) |= pte_sz_bits(page_size
);
2812 static unsigned long kern_large_tte(unsigned long paddr
)
2816 val
= (_PAGE_VALID
| _PAGE_SZ4MB_4U
|
2817 _PAGE_CP_4U
| _PAGE_CV_4U
| _PAGE_P_4U
|
2818 _PAGE_EXEC_4U
| _PAGE_L_4U
| _PAGE_W_4U
);
2819 if (tlb_type
== hypervisor
)
2820 val
= (_PAGE_VALID
| _PAGE_SZ4MB_4V
|
2821 page_cache4v_flag
| _PAGE_P_4V
|
2822 _PAGE_EXEC_4V
| _PAGE_W_4V
);
2827 /* If not locked, zap it. */
2828 void __flush_tlb_all(void)
2830 unsigned long pstate
;
2833 __asm__
__volatile__("flushw\n\t"
2834 "rdpr %%pstate, %0\n\t"
2835 "wrpr %0, %1, %%pstate"
2838 if (tlb_type
== hypervisor
) {
2839 sun4v_mmu_demap_all();
2840 } else if (tlb_type
== spitfire
) {
2841 for (i
= 0; i
< 64; i
++) {
2842 /* Spitfire Errata #32 workaround */
2843 /* NOTE: Always runs on spitfire, so no
2844 * cheetah+ page size encodings.
2846 __asm__
__volatile__("stxa %0, [%1] %2\n\t"
2850 "r" (PRIMARY_CONTEXT
), "i" (ASI_DMMU
));
2852 if (!(spitfire_get_dtlb_data(i
) & _PAGE_L_4U
)) {
2853 __asm__
__volatile__("stxa %%g0, [%0] %1\n\t"
2856 : "r" (TLB_TAG_ACCESS
), "i" (ASI_DMMU
));
2857 spitfire_put_dtlb_data(i
, 0x0UL
);
2860 /* Spitfire Errata #32 workaround */
2861 /* NOTE: Always runs on spitfire, so no
2862 * cheetah+ page size encodings.
2864 __asm__
__volatile__("stxa %0, [%1] %2\n\t"
2868 "r" (PRIMARY_CONTEXT
), "i" (ASI_DMMU
));
2870 if (!(spitfire_get_itlb_data(i
) & _PAGE_L_4U
)) {
2871 __asm__
__volatile__("stxa %%g0, [%0] %1\n\t"
2874 : "r" (TLB_TAG_ACCESS
), "i" (ASI_IMMU
));
2875 spitfire_put_itlb_data(i
, 0x0UL
);
2878 } else if (tlb_type
== cheetah
|| tlb_type
== cheetah_plus
) {
2879 cheetah_flush_dtlb_all();
2880 cheetah_flush_itlb_all();
2882 __asm__
__volatile__("wrpr %0, 0, %%pstate"
2886 pte_t
*pte_alloc_one_kernel(struct mm_struct
*mm
,
2887 unsigned long address
)
2889 struct page
*page
= alloc_page(GFP_KERNEL
| __GFP_NOTRACK
| __GFP_ZERO
);
2893 pte
= (pte_t
*) page_address(page
);
2898 pgtable_t
pte_alloc_one(struct mm_struct
*mm
,
2899 unsigned long address
)
2901 struct page
*page
= alloc_page(GFP_KERNEL
| __GFP_NOTRACK
| __GFP_ZERO
);
2904 if (!pgtable_page_ctor(page
)) {
2905 free_hot_cold_page(page
, 0);
2908 return (pte_t
*) page_address(page
);
2911 void pte_free_kernel(struct mm_struct
*mm
, pte_t
*pte
)
2913 free_page((unsigned long)pte
);
2916 static void __pte_free(pgtable_t pte
)
2918 struct page
*page
= virt_to_page(pte
);
2920 pgtable_page_dtor(page
);
2924 void pte_free(struct mm_struct
*mm
, pgtable_t pte
)
2929 void pgtable_free(void *table
, bool is_page
)
2934 kmem_cache_free(pgtable_cache
, table
);
2937 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
2938 void update_mmu_cache_pmd(struct vm_area_struct
*vma
, unsigned long addr
,
2941 unsigned long pte
, flags
;
2942 struct mm_struct
*mm
;
2945 if (!pmd_large(entry
) || !pmd_young(entry
))
2948 pte
= pmd_val(entry
);
2950 /* Don't insert a non-valid PMD into the TSB, we'll deadlock. */
2951 if (!(pte
& _PAGE_VALID
))
2954 /* We are fabricating 8MB pages using 4MB real hw pages. */
2955 pte
|= (addr
& (1UL << REAL_HPAGE_SHIFT
));
2959 spin_lock_irqsave(&mm
->context
.lock
, flags
);
2961 if (mm
->context
.tsb_block
[MM_TSB_HUGE
].tsb
!= NULL
)
2962 __update_mmu_tsb_insert(mm
, MM_TSB_HUGE
, REAL_HPAGE_SHIFT
,
2965 spin_unlock_irqrestore(&mm
->context
.lock
, flags
);
2967 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
2969 #if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
2970 static void context_reload(void *__data
)
2972 struct mm_struct
*mm
= __data
;
2974 if (mm
== current
->mm
)
2975 load_secondary_context(mm
);
2978 void hugetlb_setup(struct pt_regs
*regs
)
2980 struct mm_struct
*mm
= current
->mm
;
2981 struct tsb_config
*tp
;
2983 if (faulthandler_disabled() || !mm
) {
2984 const struct exception_table_entry
*entry
;
2986 entry
= search_exception_tables(regs
->tpc
);
2988 regs
->tpc
= entry
->fixup
;
2989 regs
->tnpc
= regs
->tpc
+ 4;
2992 pr_alert("Unexpected HugeTLB setup in atomic context.\n");
2993 die_if_kernel("HugeTSB in atomic", regs
);
2996 tp
= &mm
->context
.tsb_block
[MM_TSB_HUGE
];
2997 if (likely(tp
->tsb
== NULL
))
2998 tsb_grow(mm
, MM_TSB_HUGE
, 0);
3000 tsb_context_switch(mm
);
3003 /* On UltraSPARC-III+ and later, configure the second half of
3004 * the Data-TLB for huge pages.
3006 if (tlb_type
== cheetah_plus
) {
3007 bool need_context_reload
= false;
3010 spin_lock_irq(&ctx_alloc_lock
);
3011 ctx
= mm
->context
.sparc64_ctx_val
;
3012 ctx
&= ~CTX_PGSZ_MASK
;
3013 ctx
|= CTX_PGSZ_BASE
<< CTX_PGSZ0_SHIFT
;
3014 ctx
|= CTX_PGSZ_HUGE
<< CTX_PGSZ1_SHIFT
;
3016 if (ctx
!= mm
->context
.sparc64_ctx_val
) {
3017 /* When changing the page size fields, we
3018 * must perform a context flush so that no
3019 * stale entries match. This flush must
3020 * occur with the original context register
3023 do_flush_tlb_mm(mm
);
3025 /* Reload the context register of all processors
3026 * also executing in this address space.
3028 mm
->context
.sparc64_ctx_val
= ctx
;
3029 need_context_reload
= true;
3031 spin_unlock_irq(&ctx_alloc_lock
);
3033 if (need_context_reload
)
3034 on_each_cpu(context_reload
, mm
, 0);
3039 static struct resource code_resource
= {
3040 .name
= "Kernel code",
3041 .flags
= IORESOURCE_BUSY
| IORESOURCE_SYSTEM_RAM
3044 static struct resource data_resource
= {
3045 .name
= "Kernel data",
3046 .flags
= IORESOURCE_BUSY
| IORESOURCE_SYSTEM_RAM
3049 static struct resource bss_resource
= {
3050 .name
= "Kernel bss",
3051 .flags
= IORESOURCE_BUSY
| IORESOURCE_SYSTEM_RAM
3054 static inline resource_size_t
compute_kern_paddr(void *addr
)
3056 return (resource_size_t
) (addr
- KERNBASE
+ kern_base
);
3059 static void __init
kernel_lds_init(void)
3061 code_resource
.start
= compute_kern_paddr(_text
);
3062 code_resource
.end
= compute_kern_paddr(_etext
- 1);
3063 data_resource
.start
= compute_kern_paddr(_etext
);
3064 data_resource
.end
= compute_kern_paddr(_edata
- 1);
3065 bss_resource
.start
= compute_kern_paddr(__bss_start
);
3066 bss_resource
.end
= compute_kern_paddr(_end
- 1);
3069 static int __init
report_memory(void)
3072 struct resource
*res
;
3076 for (i
= 0; i
< pavail_ents
; i
++) {
3077 res
= kzalloc(sizeof(struct resource
), GFP_KERNEL
);
3080 pr_warn("Failed to allocate source.\n");
3084 res
->name
= "System RAM";
3085 res
->start
= pavail
[i
].phys_addr
;
3086 res
->end
= pavail
[i
].phys_addr
+ pavail
[i
].reg_size
- 1;
3087 res
->flags
= IORESOURCE_BUSY
| IORESOURCE_SYSTEM_RAM
;
3089 if (insert_resource(&iomem_resource
, res
) < 0) {
3090 pr_warn("Resource insertion failed.\n");
3094 insert_resource(res
, &code_resource
);
3095 insert_resource(res
, &data_resource
);
3096 insert_resource(res
, &bss_resource
);
3101 arch_initcall(report_memory
);
3104 #define do_flush_tlb_kernel_range smp_flush_tlb_kernel_range
3106 #define do_flush_tlb_kernel_range __flush_tlb_kernel_range
3109 void flush_tlb_kernel_range(unsigned long start
, unsigned long end
)
3111 if (start
< HI_OBP_ADDRESS
&& end
> LOW_OBP_ADDRESS
) {
3112 if (start
< LOW_OBP_ADDRESS
) {
3113 flush_tsb_kernel_range(start
, LOW_OBP_ADDRESS
);
3114 do_flush_tlb_kernel_range(start
, LOW_OBP_ADDRESS
);
3116 if (end
> HI_OBP_ADDRESS
) {
3117 flush_tsb_kernel_range(HI_OBP_ADDRESS
, end
);
3118 do_flush_tlb_kernel_range(HI_OBP_ADDRESS
, end
);
3121 flush_tsb_kernel_range(start
, end
);
3122 do_flush_tlb_kernel_range(start
, end
);