]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - arch/sparc64/mm/init.c
Merge branches 'pxa-ian' and 'pxa-xm270' into pxa
[mirror_ubuntu-artful-kernel.git] / arch / sparc64 / mm / init.c
CommitLineData
b00dc837 1/*
1da177e4
LT
2 * arch/sparc64/mm/init.c
3 *
4 * Copyright (C) 1996-1999 David S. Miller (davem@caip.rutgers.edu)
5 * Copyright (C) 1997-1999 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
6 */
7
c4bce90e 8#include <linux/module.h>
1da177e4
LT
9#include <linux/kernel.h>
10#include <linux/sched.h>
11#include <linux/string.h>
12#include <linux/init.h>
13#include <linux/bootmem.h>
14#include <linux/mm.h>
15#include <linux/hugetlb.h>
16#include <linux/slab.h>
17#include <linux/initrd.h>
18#include <linux/swap.h>
19#include <linux/pagemap.h>
c9cf5528 20#include <linux/poison.h>
1da177e4
LT
21#include <linux/fs.h>
22#include <linux/seq_file.h>
05e14cb3 23#include <linux/kprobes.h>
1ac4f5eb 24#include <linux/cache.h>
13edad7a 25#include <linux/sort.h>
5cbc3073 26#include <linux/percpu.h>
3b2a7e23 27#include <linux/lmb.h>
919ee677 28#include <linux/mmzone.h>
1da177e4
LT
29
30#include <asm/head.h>
31#include <asm/system.h>
32#include <asm/page.h>
33#include <asm/pgalloc.h>
34#include <asm/pgtable.h>
35#include <asm/oplib.h>
36#include <asm/iommu.h>
37#include <asm/io.h>
38#include <asm/uaccess.h>
39#include <asm/mmu_context.h>
40#include <asm/tlbflush.h>
41#include <asm/dma.h>
42#include <asm/starfire.h>
43#include <asm/tlb.h>
44#include <asm/spitfire.h>
45#include <asm/sections.h>
517af332 46#include <asm/tsb.h>
481295f9 47#include <asm/hypervisor.h>
372b07bb 48#include <asm/prom.h>
22d6a1cb 49#include <asm/sstate.h>
5cbc3073 50#include <asm/mdesc.h>
3d5ae6b6 51#include <asm/cpudata.h>
1da177e4 52
9cc3a1ac
DM
53#define MAX_PHYS_ADDRESS (1UL << 42UL)
54#define KPTE_BITMAP_CHUNK_SZ (256UL * 1024UL * 1024UL)
55#define KPTE_BITMAP_BYTES \
56 ((MAX_PHYS_ADDRESS / KPTE_BITMAP_CHUNK_SZ) / 8)
57
58unsigned long kern_linear_pte_xor[2] __read_mostly;
59
60/* A bitmap, one bit for every 256MB of physical memory. If the bit
61 * is clear, we should use a 4MB page (via kern_linear_pte_xor[0]) else
62 * if set we should use a 256MB page (via kern_linear_pte_xor[1]).
63 */
64unsigned long kpte_linear_bitmap[KPTE_BITMAP_BYTES / sizeof(unsigned long)];
65
d1acb421 66#ifndef CONFIG_DEBUG_PAGEALLOC
2d9e2763
DM
67/* A special kernel TSB for 4MB and 256MB linear mappings.
68 * Space is allocated for this right after the trap table
69 * in arch/sparc64/kernel/head.S
70 */
71extern struct tsb swapper_4m_tsb[KERNEL_TSB4M_NENTRIES];
d1acb421 72#endif
d7744a09 73
13edad7a
DM
74#define MAX_BANKS 32
75
76static struct linux_prom64_registers pavail[MAX_BANKS] __initdata;
13edad7a 77static int pavail_ents __initdata;
13edad7a
DM
78
79static int cmp_p64(const void *a, const void *b)
80{
81 const struct linux_prom64_registers *x = a, *y = b;
82
83 if (x->phys_addr > y->phys_addr)
84 return 1;
85 if (x->phys_addr < y->phys_addr)
86 return -1;
87 return 0;
88}
89
90static void __init read_obp_memory(const char *property,
91 struct linux_prom64_registers *regs,
92 int *num_ents)
93{
94 int node = prom_finddevice("/memory");
95 int prop_size = prom_getproplen(node, property);
96 int ents, ret, i;
97
98 ents = prop_size / sizeof(struct linux_prom64_registers);
99 if (ents > MAX_BANKS) {
100 prom_printf("The machine has more %s property entries than "
101 "this kernel can support (%d).\n",
102 property, MAX_BANKS);
103 prom_halt();
104 }
105
106 ret = prom_getproperty(node, property, (char *) regs, prop_size);
107 if (ret == -1) {
108 prom_printf("Couldn't get %s property from /memory.\n");
109 prom_halt();
110 }
111
13edad7a
DM
112 /* Sanitize what we got from the firmware, by page aligning
113 * everything.
114 */
115 for (i = 0; i < ents; i++) {
116 unsigned long base, size;
117
118 base = regs[i].phys_addr;
119 size = regs[i].reg_size;
10147570 120
13edad7a
DM
121 size &= PAGE_MASK;
122 if (base & ~PAGE_MASK) {
123 unsigned long new_base = PAGE_ALIGN(base);
124
125 size -= new_base - base;
126 if ((long) size < 0L)
127 size = 0UL;
128 base = new_base;
129 }
0015d3d6
DM
130 if (size == 0UL) {
131 /* If it is empty, simply get rid of it.
132 * This simplifies the logic of the other
133 * functions that process these arrays.
134 */
135 memmove(&regs[i], &regs[i + 1],
136 (ents - i - 1) * sizeof(regs[0]));
486ad10a 137 i--;
0015d3d6
DM
138 ents--;
139 continue;
486ad10a 140 }
0015d3d6
DM
141 regs[i].phys_addr = base;
142 regs[i].reg_size = size;
486ad10a
DM
143 }
144
145 *num_ents = ents;
146
c9c10830 147 sort(regs, ents, sizeof(struct linux_prom64_registers),
13edad7a
DM
148 cmp_p64, NULL);
149}
1da177e4 150
2bdb3cb2 151unsigned long *sparc64_valid_addr_bitmap __read_mostly;
1da177e4 152
d1112018 153/* Kernel physical address base and size in bytes. */
1ac4f5eb
DM
154unsigned long kern_base __read_mostly;
155unsigned long kern_size __read_mostly;
1da177e4 156
1da177e4
LT
157/* Initial ramdisk setup */
158extern unsigned long sparc_ramdisk_image64;
159extern unsigned int sparc_ramdisk_image;
160extern unsigned int sparc_ramdisk_size;
161
1ac4f5eb 162struct page *mem_map_zero __read_mostly;
35802c0b 163EXPORT_SYMBOL(mem_map_zero);
1da177e4 164
0835ae0f
DM
165unsigned int sparc64_highest_unlocked_tlb_ent __read_mostly;
166
167unsigned long sparc64_kern_pri_context __read_mostly;
168unsigned long sparc64_kern_pri_nuc_bits __read_mostly;
169unsigned long sparc64_kern_sec_context __read_mostly;
170
64658743 171int num_kernel_image_mappings;
1da177e4 172
1da177e4
LT
173#ifdef CONFIG_DEBUG_DCFLUSH
174atomic_t dcpage_flushes = ATOMIC_INIT(0);
175#ifdef CONFIG_SMP
176atomic_t dcpage_flushes_xcall = ATOMIC_INIT(0);
177#endif
178#endif
179
7a591cfe 180inline void flush_dcache_page_impl(struct page *page)
1da177e4 181{
7a591cfe 182 BUG_ON(tlb_type == hypervisor);
1da177e4
LT
183#ifdef CONFIG_DEBUG_DCFLUSH
184 atomic_inc(&dcpage_flushes);
185#endif
186
187#ifdef DCACHE_ALIASING_POSSIBLE
188 __flush_dcache_page(page_address(page),
189 ((tlb_type == spitfire) &&
190 page_mapping(page) != NULL));
191#else
192 if (page_mapping(page) != NULL &&
193 tlb_type == spitfire)
194 __flush_icache_page(__pa(page_address(page)));
195#endif
196}
197
198#define PG_dcache_dirty PG_arch_1
22adb358
DM
199#define PG_dcache_cpu_shift 32UL
200#define PG_dcache_cpu_mask \
201 ((1UL<<ilog2(roundup_pow_of_two(NR_CPUS)))-1UL)
1da177e4
LT
202
203#define dcache_dirty_cpu(page) \
48b0e548 204 (((page)->flags >> PG_dcache_cpu_shift) & PG_dcache_cpu_mask)
1da177e4 205
d979f179 206static inline void set_dcache_dirty(struct page *page, int this_cpu)
1da177e4
LT
207{
208 unsigned long mask = this_cpu;
48b0e548
DM
209 unsigned long non_cpu_bits;
210
211 non_cpu_bits = ~(PG_dcache_cpu_mask << PG_dcache_cpu_shift);
212 mask = (mask << PG_dcache_cpu_shift) | (1UL << PG_dcache_dirty);
213
1da177e4
LT
214 __asm__ __volatile__("1:\n\t"
215 "ldx [%2], %%g7\n\t"
216 "and %%g7, %1, %%g1\n\t"
217 "or %%g1, %0, %%g1\n\t"
218 "casx [%2], %%g7, %%g1\n\t"
219 "cmp %%g7, %%g1\n\t"
b445e26c 220 "membar #StoreLoad | #StoreStore\n\t"
1da177e4 221 "bne,pn %%xcc, 1b\n\t"
b445e26c 222 " nop"
1da177e4
LT
223 : /* no outputs */
224 : "r" (mask), "r" (non_cpu_bits), "r" (&page->flags)
225 : "g1", "g7");
226}
227
d979f179 228static inline void clear_dcache_dirty_cpu(struct page *page, unsigned long cpu)
1da177e4
LT
229{
230 unsigned long mask = (1UL << PG_dcache_dirty);
231
232 __asm__ __volatile__("! test_and_clear_dcache_dirty\n"
233 "1:\n\t"
234 "ldx [%2], %%g7\n\t"
48b0e548 235 "srlx %%g7, %4, %%g1\n\t"
1da177e4
LT
236 "and %%g1, %3, %%g1\n\t"
237 "cmp %%g1, %0\n\t"
238 "bne,pn %%icc, 2f\n\t"
239 " andn %%g7, %1, %%g1\n\t"
240 "casx [%2], %%g7, %%g1\n\t"
241 "cmp %%g7, %%g1\n\t"
b445e26c 242 "membar #StoreLoad | #StoreStore\n\t"
1da177e4 243 "bne,pn %%xcc, 1b\n\t"
b445e26c 244 " nop\n"
1da177e4
LT
245 "2:"
246 : /* no outputs */
247 : "r" (cpu), "r" (mask), "r" (&page->flags),
48b0e548
DM
248 "i" (PG_dcache_cpu_mask),
249 "i" (PG_dcache_cpu_shift)
1da177e4
LT
250 : "g1", "g7");
251}
252
517af332
DM
253static inline void tsb_insert(struct tsb *ent, unsigned long tag, unsigned long pte)
254{
255 unsigned long tsb_addr = (unsigned long) ent;
256
3b3ab2eb 257 if (tlb_type == cheetah_plus || tlb_type == hypervisor)
517af332
DM
258 tsb_addr = __pa(tsb_addr);
259
260 __tsb_insert(tsb_addr, tag, pte);
261}
262
c4bce90e
DM
263unsigned long _PAGE_ALL_SZ_BITS __read_mostly;
264unsigned long _PAGE_SZBITS __read_mostly;
265
1da177e4
LT
266void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t pte)
267{
bd40791e 268 struct mm_struct *mm;
74ae9987 269 struct tsb *tsb;
7a1ac526 270 unsigned long tag, flags;
dcc1e8dd 271 unsigned long tsb_index, tsb_hash_shift;
7a591cfe
DM
272
273 if (tlb_type != hypervisor) {
274 unsigned long pfn = pte_pfn(pte);
275 unsigned long pg_flags;
276 struct page *page;
277
278 if (pfn_valid(pfn) &&
279 (page = pfn_to_page(pfn), page_mapping(page)) &&
280 ((pg_flags = page->flags) & (1UL << PG_dcache_dirty))) {
281 int cpu = ((pg_flags >> PG_dcache_cpu_shift) &
282 PG_dcache_cpu_mask);
283 int this_cpu = get_cpu();
284
285 /* This is just to optimize away some function calls
286 * in the SMP case.
287 */
288 if (cpu == this_cpu)
289 flush_dcache_page_impl(page);
290 else
291 smp_flush_dcache_page_impl(page, cpu);
292
293 clear_dcache_dirty_cpu(page, cpu);
294
295 put_cpu();
296 }
1da177e4 297 }
bd40791e
DM
298
299 mm = vma->vm_mm;
7a1ac526 300
dcc1e8dd
DM
301 tsb_index = MM_TSB_BASE;
302 tsb_hash_shift = PAGE_SHIFT;
303
7a1ac526
DM
304 spin_lock_irqsave(&mm->context.lock, flags);
305
dcc1e8dd
DM
306#ifdef CONFIG_HUGETLB_PAGE
307 if (mm->context.tsb_block[MM_TSB_HUGE].tsb != NULL) {
308 if ((tlb_type == hypervisor &&
309 (pte_val(pte) & _PAGE_SZALL_4V) == _PAGE_SZHUGE_4V) ||
310 (tlb_type != hypervisor &&
311 (pte_val(pte) & _PAGE_SZALL_4U) == _PAGE_SZHUGE_4U)) {
312 tsb_index = MM_TSB_HUGE;
313 tsb_hash_shift = HPAGE_SHIFT;
314 }
315 }
316#endif
317
318 tsb = mm->context.tsb_block[tsb_index].tsb;
319 tsb += ((address >> tsb_hash_shift) &
320 (mm->context.tsb_block[tsb_index].tsb_nentries - 1UL));
74ae9987
DM
321 tag = (address >> 22UL);
322 tsb_insert(tsb, tag, pte_val(pte));
7a1ac526
DM
323
324 spin_unlock_irqrestore(&mm->context.lock, flags);
1da177e4
LT
325}
326
327void flush_dcache_page(struct page *page)
328{
a9546f59
DM
329 struct address_space *mapping;
330 int this_cpu;
1da177e4 331
7a591cfe
DM
332 if (tlb_type == hypervisor)
333 return;
334
a9546f59
DM
335 /* Do not bother with the expensive D-cache flush if it
336 * is merely the zero page. The 'bigcore' testcase in GDB
337 * causes this case to run millions of times.
338 */
339 if (page == ZERO_PAGE(0))
340 return;
341
342 this_cpu = get_cpu();
343
344 mapping = page_mapping(page);
1da177e4 345 if (mapping && !mapping_mapped(mapping)) {
a9546f59 346 int dirty = test_bit(PG_dcache_dirty, &page->flags);
1da177e4 347 if (dirty) {
a9546f59
DM
348 int dirty_cpu = dcache_dirty_cpu(page);
349
1da177e4
LT
350 if (dirty_cpu == this_cpu)
351 goto out;
352 smp_flush_dcache_page_impl(page, dirty_cpu);
353 }
354 set_dcache_dirty(page, this_cpu);
355 } else {
356 /* We could delay the flush for the !page_mapping
357 * case too. But that case is for exec env/arg
358 * pages and those are %99 certainly going to get
359 * faulted into the tlb (and thus flushed) anyways.
360 */
361 flush_dcache_page_impl(page);
362 }
363
364out:
365 put_cpu();
366}
367
05e14cb3 368void __kprobes flush_icache_range(unsigned long start, unsigned long end)
1da177e4 369{
a43fe0e7 370 /* Cheetah and Hypervisor platform cpus have coherent I-cache. */
1da177e4
LT
371 if (tlb_type == spitfire) {
372 unsigned long kaddr;
373
a94aa253
DM
374 /* This code only runs on Spitfire cpus so this is
375 * why we can assume _PAGE_PADDR_4U.
376 */
377 for (kaddr = start; kaddr < end; kaddr += PAGE_SIZE) {
378 unsigned long paddr, mask = _PAGE_PADDR_4U;
379
380 if (kaddr >= PAGE_OFFSET)
381 paddr = kaddr & mask;
382 else {
383 pgd_t *pgdp = pgd_offset_k(kaddr);
384 pud_t *pudp = pud_offset(pgdp, kaddr);
385 pmd_t *pmdp = pmd_offset(pudp, kaddr);
386 pte_t *ptep = pte_offset_kernel(pmdp, kaddr);
387
388 paddr = pte_val(*ptep) & mask;
389 }
390 __flush_icache_page(paddr);
391 }
1da177e4
LT
392 }
393}
394
1da177e4
LT
395void show_mem(void)
396{
5be4a963
DM
397 unsigned long total = 0, reserved = 0;
398 unsigned long shared = 0, cached = 0;
399 pg_data_t *pgdat;
400
28256ca2 401 printk(KERN_INFO "Mem-info:\n");
1da177e4 402 show_free_areas();
28256ca2 403 printk(KERN_INFO "Free swap: %6ldkB\n",
1da177e4 404 nr_swap_pages << (PAGE_SHIFT-10));
5be4a963
DM
405 for_each_online_pgdat(pgdat) {
406 unsigned long i, flags;
407
408 pgdat_resize_lock(pgdat, &flags);
409 for (i = 0; i < pgdat->node_spanned_pages; i++) {
410 struct page *page = pgdat_page_nr(pgdat, i);
411 total++;
412 if (PageReserved(page))
413 reserved++;
414 else if (PageSwapCache(page))
415 cached++;
416 else if (page_count(page))
417 shared += page_count(page) - 1;
418 }
419 pgdat_resize_unlock(pgdat, &flags);
420 }
421
422 printk(KERN_INFO "%lu pages of RAM\n", total);
423 printk(KERN_INFO "%lu reserved pages\n", reserved);
424 printk(KERN_INFO "%lu pages shared\n", shared);
425 printk(KERN_INFO "%lu pages swap cached\n", cached);
426
427 printk(KERN_INFO "%lu pages dirty\n",
428 global_page_state(NR_FILE_DIRTY));
429 printk(KERN_INFO "%lu pages writeback\n",
430 global_page_state(NR_WRITEBACK));
431 printk(KERN_INFO "%lu pages mapped\n",
432 global_page_state(NR_FILE_MAPPED));
433 printk(KERN_INFO "%lu pages slab\n",
434 global_page_state(NR_SLAB_RECLAIMABLE) +
435 global_page_state(NR_SLAB_UNRECLAIMABLE));
436 printk(KERN_INFO "%lu pages pagetables\n",
437 global_page_state(NR_PAGETABLE));
1da177e4
LT
438}
439
440void mmu_info(struct seq_file *m)
441{
442 if (tlb_type == cheetah)
443 seq_printf(m, "MMU Type\t: Cheetah\n");
444 else if (tlb_type == cheetah_plus)
445 seq_printf(m, "MMU Type\t: Cheetah+\n");
446 else if (tlb_type == spitfire)
447 seq_printf(m, "MMU Type\t: Spitfire\n");
a43fe0e7
DM
448 else if (tlb_type == hypervisor)
449 seq_printf(m, "MMU Type\t: Hypervisor (sun4v)\n");
1da177e4
LT
450 else
451 seq_printf(m, "MMU Type\t: ???\n");
452
453#ifdef CONFIG_DEBUG_DCFLUSH
454 seq_printf(m, "DCPageFlushes\t: %d\n",
455 atomic_read(&dcpage_flushes));
456#ifdef CONFIG_SMP
457 seq_printf(m, "DCPageFlushesXC\t: %d\n",
458 atomic_read(&dcpage_flushes_xcall));
459#endif /* CONFIG_SMP */
460#endif /* CONFIG_DEBUG_DCFLUSH */
461}
462
a94aa253
DM
463struct linux_prom_translation {
464 unsigned long virt;
465 unsigned long size;
466 unsigned long data;
467};
468
469/* Exported for kernel TLB miss handling in ktlb.S */
470struct linux_prom_translation prom_trans[512] __read_mostly;
471unsigned int prom_trans_ents __read_mostly;
472
1da177e4
LT
473/* Exported for SMP bootup purposes. */
474unsigned long kern_locked_tte_data;
475
c9c10830
DM
476/* The obp translations are saved based on 8k pagesize, since obp can
477 * use a mixture of pagesizes. Misses to the LOW_OBP_ADDRESS ->
74bf4312 478 * HI_OBP_ADDRESS range are handled in ktlb.S.
c9c10830 479 */
5085b4a5
DM
480static inline int in_obp_range(unsigned long vaddr)
481{
482 return (vaddr >= LOW_OBP_ADDRESS &&
483 vaddr < HI_OBP_ADDRESS);
484}
485
c9c10830 486static int cmp_ptrans(const void *a, const void *b)
405599bd 487{
c9c10830 488 const struct linux_prom_translation *x = a, *y = b;
405599bd 489
c9c10830
DM
490 if (x->virt > y->virt)
491 return 1;
492 if (x->virt < y->virt)
493 return -1;
494 return 0;
405599bd
DM
495}
496
c9c10830 497/* Read OBP translations property into 'prom_trans[]'. */
9ad98c5b 498static void __init read_obp_translations(void)
405599bd 499{
c9c10830 500 int n, node, ents, first, last, i;
1da177e4
LT
501
502 node = prom_finddevice("/virtual-memory");
503 n = prom_getproplen(node, "translations");
405599bd 504 if (unlikely(n == 0 || n == -1)) {
b206fc4c 505 prom_printf("prom_mappings: Couldn't get size.\n");
1da177e4
LT
506 prom_halt();
507 }
405599bd
DM
508 if (unlikely(n > sizeof(prom_trans))) {
509 prom_printf("prom_mappings: Size %Zd is too big.\n", n);
1da177e4
LT
510 prom_halt();
511 }
405599bd 512
b206fc4c 513 if ((n = prom_getproperty(node, "translations",
405599bd
DM
514 (char *)&prom_trans[0],
515 sizeof(prom_trans))) == -1) {
b206fc4c 516 prom_printf("prom_mappings: Couldn't get property.\n");
1da177e4
LT
517 prom_halt();
518 }
9ad98c5b 519
b206fc4c 520 n = n / sizeof(struct linux_prom_translation);
9ad98c5b 521
c9c10830
DM
522 ents = n;
523
524 sort(prom_trans, ents, sizeof(struct linux_prom_translation),
525 cmp_ptrans, NULL);
526
527 /* Now kick out all the non-OBP entries. */
528 for (i = 0; i < ents; i++) {
529 if (in_obp_range(prom_trans[i].virt))
530 break;
531 }
532 first = i;
533 for (; i < ents; i++) {
534 if (!in_obp_range(prom_trans[i].virt))
535 break;
536 }
537 last = i;
538
539 for (i = 0; i < (last - first); i++) {
540 struct linux_prom_translation *src = &prom_trans[i + first];
541 struct linux_prom_translation *dest = &prom_trans[i];
542
543 *dest = *src;
544 }
545 for (; i < ents; i++) {
546 struct linux_prom_translation *dest = &prom_trans[i];
547 dest->virt = dest->size = dest->data = 0x0UL;
548 }
549
550 prom_trans_ents = last - first;
551
552 if (tlb_type == spitfire) {
553 /* Clear diag TTE bits. */
554 for (i = 0; i < prom_trans_ents; i++)
555 prom_trans[i].data &= ~0x0003fe0000000000UL;
556 }
405599bd 557}
1da177e4 558
d82ace7d
DM
559static void __init hypervisor_tlb_lock(unsigned long vaddr,
560 unsigned long pte,
561 unsigned long mmu)
562{
7db35f31
DM
563 unsigned long ret = sun4v_mmu_map_perm_addr(vaddr, 0, pte, mmu);
564
565 if (ret != 0) {
12e126ad 566 prom_printf("hypervisor_tlb_lock[%lx:%lx:%lx:%lx]: "
7db35f31 567 "errors with %lx\n", vaddr, 0, pte, mmu, ret);
12e126ad
DM
568 prom_halt();
569 }
d82ace7d
DM
570}
571
c4bce90e
DM
572static unsigned long kern_large_tte(unsigned long paddr);
573
898cf0ec 574static void __init remap_kernel(void)
405599bd
DM
575{
576 unsigned long phys_page, tte_vaddr, tte_data;
64658743 577 int i, tlb_ent = sparc64_highest_locked_tlbent();
405599bd 578
1da177e4 579 tte_vaddr = (unsigned long) KERNBASE;
bff06d55 580 phys_page = (prom_boot_mapping_phys_low >> 22UL) << 22UL;
c4bce90e 581 tte_data = kern_large_tte(phys_page);
1da177e4
LT
582
583 kern_locked_tte_data = tte_data;
584
d82ace7d
DM
585 /* Now lock us into the TLBs via Hypervisor or OBP. */
586 if (tlb_type == hypervisor) {
64658743 587 for (i = 0; i < num_kernel_image_mappings; i++) {
d82ace7d
DM
588 hypervisor_tlb_lock(tte_vaddr, tte_data, HV_MMU_DMMU);
589 hypervisor_tlb_lock(tte_vaddr, tte_data, HV_MMU_IMMU);
64658743
DM
590 tte_vaddr += 0x400000;
591 tte_data += 0x400000;
d82ace7d
DM
592 }
593 } else {
64658743
DM
594 for (i = 0; i < num_kernel_image_mappings; i++) {
595 prom_dtlb_load(tlb_ent - i, tte_data, tte_vaddr);
596 prom_itlb_load(tlb_ent - i, tte_data, tte_vaddr);
597 tte_vaddr += 0x400000;
598 tte_data += 0x400000;
d82ace7d 599 }
64658743 600 sparc64_highest_unlocked_tlb_ent = tlb_ent - i;
1da177e4 601 }
0835ae0f
DM
602 if (tlb_type == cheetah_plus) {
603 sparc64_kern_pri_context = (CTX_CHEETAH_PLUS_CTX0 |
604 CTX_CHEETAH_PLUS_NUC);
605 sparc64_kern_pri_nuc_bits = CTX_CHEETAH_PLUS_NUC;
606 sparc64_kern_sec_context = CTX_CHEETAH_PLUS_CTX0;
607 }
405599bd 608}
1da177e4 609
405599bd 610
c9c10830 611static void __init inherit_prom_mappings(void)
9ad98c5b 612{
405599bd 613 /* Now fixup OBP's idea about where we really are mapped. */
3c62a2d3 614 printk("Remapping the kernel... ");
405599bd 615 remap_kernel();
3c62a2d3 616 printk("done.\n");
1da177e4
LT
617}
618
1da177e4
LT
619void prom_world(int enter)
620{
1da177e4
LT
621 if (!enter)
622 set_fs((mm_segment_t) { get_thread_current_ds() });
623
3487d1d4 624 __asm__ __volatile__("flushw");
1da177e4
LT
625}
626
1da177e4
LT
627void __flush_dcache_range(unsigned long start, unsigned long end)
628{
629 unsigned long va;
630
631 if (tlb_type == spitfire) {
632 int n = 0;
633
634 for (va = start; va < end; va += 32) {
635 spitfire_put_dcache_tag(va & 0x3fe0, 0x0);
636 if (++n >= 512)
637 break;
638 }
a43fe0e7 639 } else if (tlb_type == cheetah || tlb_type == cheetah_plus) {
1da177e4
LT
640 start = __pa(start);
641 end = __pa(end);
642 for (va = start; va < end; va += 32)
643 __asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
644 "membar #Sync"
645 : /* no outputs */
646 : "r" (va),
647 "i" (ASI_DCACHE_INVALIDATE));
648 }
649}
1da177e4 650
85f1e1f6
DM
651/* get_new_mmu_context() uses "cache + 1". */
652DEFINE_SPINLOCK(ctx_alloc_lock);
653unsigned long tlb_context_cache = CTX_FIRST_VERSION - 1;
654#define MAX_CTX_NR (1UL << CTX_NR_BITS)
655#define CTX_BMAP_SLOTS BITS_TO_LONGS(MAX_CTX_NR)
656DECLARE_BITMAP(mmu_context_bmap, MAX_CTX_NR);
657
1da177e4
LT
658/* Caller does TLB context flushing on local CPU if necessary.
659 * The caller also ensures that CTX_VALID(mm->context) is false.
660 *
661 * We must be careful about boundary cases so that we never
662 * let the user have CTX 0 (nucleus) or we ever use a CTX
663 * version of zero (and thus NO_CONTEXT would not be caught
664 * by version mis-match tests in mmu_context.h).
a0663a79
DM
665 *
666 * Always invoked with interrupts disabled.
1da177e4
LT
667 */
668void get_new_mmu_context(struct mm_struct *mm)
669{
670 unsigned long ctx, new_ctx;
671 unsigned long orig_pgsz_bits;
a77754b4 672 unsigned long flags;
a0663a79 673 int new_version;
1da177e4 674
a77754b4 675 spin_lock_irqsave(&ctx_alloc_lock, flags);
1da177e4
LT
676 orig_pgsz_bits = (mm->context.sparc64_ctx_val & CTX_PGSZ_MASK);
677 ctx = (tlb_context_cache + 1) & CTX_NR_MASK;
678 new_ctx = find_next_zero_bit(mmu_context_bmap, 1 << CTX_NR_BITS, ctx);
a0663a79 679 new_version = 0;
1da177e4
LT
680 if (new_ctx >= (1 << CTX_NR_BITS)) {
681 new_ctx = find_next_zero_bit(mmu_context_bmap, ctx, 1);
682 if (new_ctx >= ctx) {
683 int i;
684 new_ctx = (tlb_context_cache & CTX_VERSION_MASK) +
685 CTX_FIRST_VERSION;
686 if (new_ctx == 1)
687 new_ctx = CTX_FIRST_VERSION;
688
689 /* Don't call memset, for 16 entries that's just
690 * plain silly...
691 */
692 mmu_context_bmap[0] = 3;
693 mmu_context_bmap[1] = 0;
694 mmu_context_bmap[2] = 0;
695 mmu_context_bmap[3] = 0;
696 for (i = 4; i < CTX_BMAP_SLOTS; i += 4) {
697 mmu_context_bmap[i + 0] = 0;
698 mmu_context_bmap[i + 1] = 0;
699 mmu_context_bmap[i + 2] = 0;
700 mmu_context_bmap[i + 3] = 0;
701 }
a0663a79 702 new_version = 1;
1da177e4
LT
703 goto out;
704 }
705 }
706 mmu_context_bmap[new_ctx>>6] |= (1UL << (new_ctx & 63));
707 new_ctx |= (tlb_context_cache & CTX_VERSION_MASK);
708out:
709 tlb_context_cache = new_ctx;
710 mm->context.sparc64_ctx_val = new_ctx | orig_pgsz_bits;
a77754b4 711 spin_unlock_irqrestore(&ctx_alloc_lock, flags);
a0663a79
DM
712
713 if (unlikely(new_version))
714 smp_new_mmu_context_version();
1da177e4
LT
715}
716
919ee677
DM
717static int numa_enabled = 1;
718static int numa_debug;
719
720static int __init early_numa(char *p)
1da177e4 721{
919ee677
DM
722 if (!p)
723 return 0;
724
725 if (strstr(p, "off"))
726 numa_enabled = 0;
d1112018 727
919ee677
DM
728 if (strstr(p, "debug"))
729 numa_debug = 1;
d1112018 730
919ee677 731 return 0;
d1112018 732}
919ee677
DM
733early_param("numa", early_numa);
734
735#define numadbg(f, a...) \
736do { if (numa_debug) \
737 printk(KERN_INFO f, ## a); \
738} while (0)
d1112018 739
4e82c9a6
DM
740static void __init find_ramdisk(unsigned long phys_base)
741{
742#ifdef CONFIG_BLK_DEV_INITRD
743 if (sparc_ramdisk_image || sparc_ramdisk_image64) {
744 unsigned long ramdisk_image;
745
746 /* Older versions of the bootloader only supported a
747 * 32-bit physical address for the ramdisk image
748 * location, stored at sparc_ramdisk_image. Newer
749 * SILO versions set sparc_ramdisk_image to zero and
750 * provide a full 64-bit physical address at
751 * sparc_ramdisk_image64.
752 */
753 ramdisk_image = sparc_ramdisk_image;
754 if (!ramdisk_image)
755 ramdisk_image = sparc_ramdisk_image64;
756
757 /* Another bootloader quirk. The bootloader normalizes
758 * the physical address to KERNBASE, so we have to
759 * factor that back out and add in the lowest valid
760 * physical page address to get the true physical address.
761 */
762 ramdisk_image -= KERNBASE;
763 ramdisk_image += phys_base;
764
919ee677
DM
765 numadbg("Found ramdisk at physical address 0x%lx, size %u\n",
766 ramdisk_image, sparc_ramdisk_size);
767
4e82c9a6
DM
768 initrd_start = ramdisk_image;
769 initrd_end = ramdisk_image + sparc_ramdisk_size;
3b2a7e23 770
7047901e 771 lmb_reserve(initrd_start, sparc_ramdisk_size);
d45100f7
DM
772
773 initrd_start += PAGE_OFFSET;
774 initrd_end += PAGE_OFFSET;
4e82c9a6
DM
775 }
776#endif
777}
778
919ee677
DM
779struct node_mem_mask {
780 unsigned long mask;
781 unsigned long val;
782 unsigned long bootmem_paddr;
783};
784static struct node_mem_mask node_masks[MAX_NUMNODES];
785static int num_node_masks;
786
787int numa_cpu_lookup_table[NR_CPUS];
788cpumask_t numa_cpumask_lookup_table[MAX_NUMNODES];
789
790#ifdef CONFIG_NEED_MULTIPLE_NODES
791static bootmem_data_t plat_node_bdata[MAX_NUMNODES];
792
793struct mdesc_mblock {
794 u64 base;
795 u64 size;
796 u64 offset; /* RA-to-PA */
797};
798static struct mdesc_mblock *mblocks;
799static int num_mblocks;
800
801static unsigned long ra_to_pa(unsigned long addr)
802{
803 int i;
804
805 for (i = 0; i < num_mblocks; i++) {
806 struct mdesc_mblock *m = &mblocks[i];
807
808 if (addr >= m->base &&
809 addr < (m->base + m->size)) {
810 addr += m->offset;
811 break;
812 }
813 }
814 return addr;
815}
816
817static int find_node(unsigned long addr)
818{
819 int i;
820
821 addr = ra_to_pa(addr);
822 for (i = 0; i < num_node_masks; i++) {
823 struct node_mem_mask *p = &node_masks[i];
824
825 if ((addr & p->mask) == p->val)
826 return i;
827 }
828 return -1;
829}
830
831static unsigned long nid_range(unsigned long start, unsigned long end,
832 int *nid)
833{
834 *nid = find_node(start);
835 start += PAGE_SIZE;
836 while (start < end) {
837 int n = find_node(start);
838
839 if (n != *nid)
840 break;
841 start += PAGE_SIZE;
842 }
843
844 return start;
845}
846#else
847static unsigned long nid_range(unsigned long start, unsigned long end,
848 int *nid)
849{
850 *nid = 0;
851 return end;
852}
853#endif
854
855/* This must be invoked after performing all of the necessary
856 * add_active_range() calls for 'nid'. We need to be able to get
857 * correct data from get_pfn_range_for_nid().
f1cfdb55 858 */
919ee677
DM
859static void __init allocate_node_data(int nid)
860{
861 unsigned long paddr, num_pages, start_pfn, end_pfn;
862 struct pglist_data *p;
863
864#ifdef CONFIG_NEED_MULTIPLE_NODES
865 paddr = lmb_alloc_nid(sizeof(struct pglist_data),
866 SMP_CACHE_BYTES, nid, nid_range);
867 if (!paddr) {
868 prom_printf("Cannot allocate pglist_data for nid[%d]\n", nid);
869 prom_halt();
870 }
871 NODE_DATA(nid) = __va(paddr);
872 memset(NODE_DATA(nid), 0, sizeof(struct pglist_data));
873
874 NODE_DATA(nid)->bdata = &plat_node_bdata[nid];
875#endif
876
877 p = NODE_DATA(nid);
878
879 get_pfn_range_for_nid(nid, &start_pfn, &end_pfn);
880 p->node_start_pfn = start_pfn;
881 p->node_spanned_pages = end_pfn - start_pfn;
882
883 if (p->node_spanned_pages) {
884 num_pages = bootmem_bootmap_pages(p->node_spanned_pages);
885
886 paddr = lmb_alloc_nid(num_pages << PAGE_SHIFT, PAGE_SIZE, nid,
887 nid_range);
888 if (!paddr) {
889 prom_printf("Cannot allocate bootmap for nid[%d]\n",
890 nid);
891 prom_halt();
892 }
893 node_masks[nid].bootmem_paddr = paddr;
894 }
895}
896
897static void init_node_masks_nonnuma(void)
d1112018 898{
1da177e4
LT
899 int i;
900
919ee677 901 numadbg("Initializing tables for non-numa.\n");
6fc5bae7 902
919ee677
DM
903 node_masks[0].mask = node_masks[0].val = 0;
904 num_node_masks = 1;
d1112018 905
919ee677
DM
906 for (i = 0; i < NR_CPUS; i++)
907 numa_cpu_lookup_table[i] = 0;
1da177e4 908
919ee677
DM
909 numa_cpumask_lookup_table[0] = CPU_MASK_ALL;
910}
911
912#ifdef CONFIG_NEED_MULTIPLE_NODES
913struct pglist_data *node_data[MAX_NUMNODES];
914
915EXPORT_SYMBOL(numa_cpu_lookup_table);
916EXPORT_SYMBOL(numa_cpumask_lookup_table);
917EXPORT_SYMBOL(node_data);
918
919struct mdesc_mlgroup {
920 u64 node;
921 u64 latency;
922 u64 match;
923 u64 mask;
924};
925static struct mdesc_mlgroup *mlgroups;
926static int num_mlgroups;
927
928static int scan_pio_for_cfg_handle(struct mdesc_handle *md, u64 pio,
929 u32 cfg_handle)
930{
931 u64 arc;
932
933 mdesc_for_each_arc(arc, md, pio, MDESC_ARC_TYPE_FWD) {
934 u64 target = mdesc_arc_target(md, arc);
935 const u64 *val;
936
937 val = mdesc_get_property(md, target,
938 "cfg-handle", NULL);
939 if (val && *val == cfg_handle)
940 return 0;
941 }
942 return -ENODEV;
943}
944
945static int scan_arcs_for_cfg_handle(struct mdesc_handle *md, u64 grp,
946 u32 cfg_handle)
947{
948 u64 arc, candidate, best_latency = ~(u64)0;
949
950 candidate = MDESC_NODE_NULL;
951 mdesc_for_each_arc(arc, md, grp, MDESC_ARC_TYPE_FWD) {
952 u64 target = mdesc_arc_target(md, arc);
953 const char *name = mdesc_node_name(md, target);
954 const u64 *val;
955
956 if (strcmp(name, "pio-latency-group"))
957 continue;
958
959 val = mdesc_get_property(md, target, "latency", NULL);
960 if (!val)
961 continue;
962
963 if (*val < best_latency) {
964 candidate = target;
965 best_latency = *val;
966 }
967 }
968
969 if (candidate == MDESC_NODE_NULL)
970 return -ENODEV;
971
972 return scan_pio_for_cfg_handle(md, candidate, cfg_handle);
973}
974
975int of_node_to_nid(struct device_node *dp)
976{
977 const struct linux_prom64_registers *regs;
978 struct mdesc_handle *md;
979 u32 cfg_handle;
980 int count, nid;
981 u64 grp;
982
983 if (!mlgroups)
984 return -1;
985
986 regs = of_get_property(dp, "reg", NULL);
987 if (!regs)
988 return -1;
989
990 cfg_handle = (regs->phys_addr >> 32UL) & 0x0fffffff;
991
992 md = mdesc_grab();
993
994 count = 0;
995 nid = -1;
996 mdesc_for_each_node_by_name(md, grp, "group") {
997 if (!scan_arcs_for_cfg_handle(md, grp, cfg_handle)) {
998 nid = count;
999 break;
1000 }
1001 count++;
1002 }
1003
1004 mdesc_release(md);
1005
1006 return nid;
1007}
1008
1009static void add_node_ranges(void)
1010{
1011 int i;
1012
1013 for (i = 0; i < lmb.memory.cnt; i++) {
1014 unsigned long size = lmb_size_bytes(&lmb.memory, i);
1015 unsigned long start, end;
1016
1017 start = lmb.memory.region[i].base;
1018 end = start + size;
1019 while (start < end) {
1020 unsigned long this_end;
1021 int nid;
1022
1023 this_end = nid_range(start, end, &nid);
1024
1025 numadbg("Adding active range nid[%d] "
1026 "start[%lx] end[%lx]\n",
1027 nid, start, this_end);
1028
1029 add_active_range(nid,
1030 start >> PAGE_SHIFT,
1031 this_end >> PAGE_SHIFT);
1032
1033 start = this_end;
1034 }
1035 }
1036}
1037
1038static int __init grab_mlgroups(struct mdesc_handle *md)
1039{
1040 unsigned long paddr;
1041 int count = 0;
1042 u64 node;
1043
1044 mdesc_for_each_node_by_name(md, node, "memory-latency-group")
1045 count++;
1046 if (!count)
1047 return -ENOENT;
1048
1049 paddr = lmb_alloc(count * sizeof(struct mdesc_mlgroup),
1050 SMP_CACHE_BYTES);
1051 if (!paddr)
1052 return -ENOMEM;
1053
1054 mlgroups = __va(paddr);
1055 num_mlgroups = count;
1056
1057 count = 0;
1058 mdesc_for_each_node_by_name(md, node, "memory-latency-group") {
1059 struct mdesc_mlgroup *m = &mlgroups[count++];
1060 const u64 *val;
1061
1062 m->node = node;
1063
1064 val = mdesc_get_property(md, node, "latency", NULL);
1065 m->latency = *val;
1066 val = mdesc_get_property(md, node, "address-match", NULL);
1067 m->match = *val;
1068 val = mdesc_get_property(md, node, "address-mask", NULL);
1069 m->mask = *val;
1070
1071 numadbg("MLGROUP[%d]: node[%lx] latency[%lx] "
1072 "match[%lx] mask[%lx]\n",
1073 count - 1, m->node, m->latency, m->match, m->mask);
1074 }
1075
1076 return 0;
1077}
1078
1079static int __init grab_mblocks(struct mdesc_handle *md)
1080{
1081 unsigned long paddr;
1082 int count = 0;
1083 u64 node;
1084
1085 mdesc_for_each_node_by_name(md, node, "mblock")
1086 count++;
1087 if (!count)
1088 return -ENOENT;
1089
1090 paddr = lmb_alloc(count * sizeof(struct mdesc_mblock),
1091 SMP_CACHE_BYTES);
1092 if (!paddr)
1093 return -ENOMEM;
1094
1095 mblocks = __va(paddr);
1096 num_mblocks = count;
1097
1098 count = 0;
1099 mdesc_for_each_node_by_name(md, node, "mblock") {
1100 struct mdesc_mblock *m = &mblocks[count++];
1101 const u64 *val;
1102
1103 val = mdesc_get_property(md, node, "base", NULL);
1104 m->base = *val;
1105 val = mdesc_get_property(md, node, "size", NULL);
1106 m->size = *val;
1107 val = mdesc_get_property(md, node,
1108 "address-congruence-offset", NULL);
1109 m->offset = *val;
1110
1111 numadbg("MBLOCK[%d]: base[%lx] size[%lx] offset[%lx]\n",
1112 count - 1, m->base, m->size, m->offset);
1113 }
1114
1115 return 0;
1116}
1117
1118static void __init numa_parse_mdesc_group_cpus(struct mdesc_handle *md,
1119 u64 grp, cpumask_t *mask)
1120{
1121 u64 arc;
1122
1123 cpus_clear(*mask);
1124
1125 mdesc_for_each_arc(arc, md, grp, MDESC_ARC_TYPE_BACK) {
1126 u64 target = mdesc_arc_target(md, arc);
1127 const char *name = mdesc_node_name(md, target);
1128 const u64 *id;
1129
1130 if (strcmp(name, "cpu"))
1131 continue;
1132 id = mdesc_get_property(md, target, "id", NULL);
1133 if (*id < NR_CPUS)
1134 cpu_set(*id, *mask);
1135 }
1136}
1137
1138static struct mdesc_mlgroup * __init find_mlgroup(u64 node)
1139{
1140 int i;
1141
1142 for (i = 0; i < num_mlgroups; i++) {
1143 struct mdesc_mlgroup *m = &mlgroups[i];
1144 if (m->node == node)
1145 return m;
1146 }
1147 return NULL;
1148}
1149
1150static int __init numa_attach_mlgroup(struct mdesc_handle *md, u64 grp,
1151 int index)
1152{
1153 struct mdesc_mlgroup *candidate = NULL;
1154 u64 arc, best_latency = ~(u64)0;
1155 struct node_mem_mask *n;
1156
1157 mdesc_for_each_arc(arc, md, grp, MDESC_ARC_TYPE_FWD) {
1158 u64 target = mdesc_arc_target(md, arc);
1159 struct mdesc_mlgroup *m = find_mlgroup(target);
1160 if (!m)
1161 continue;
1162 if (m->latency < best_latency) {
1163 candidate = m;
1164 best_latency = m->latency;
1165 }
1166 }
1167 if (!candidate)
1168 return -ENOENT;
1169
1170 if (num_node_masks != index) {
1171 printk(KERN_ERR "Inconsistent NUMA state, "
1172 "index[%d] != num_node_masks[%d]\n",
1173 index, num_node_masks);
1174 return -EINVAL;
1175 }
1176
1177 n = &node_masks[num_node_masks++];
1178
1179 n->mask = candidate->mask;
1180 n->val = candidate->match;
1da177e4 1181
919ee677
DM
1182 numadbg("NUMA NODE[%d]: mask[%lx] val[%lx] (latency[%lx])\n",
1183 index, n->mask, n->val, candidate->latency);
1da177e4 1184
919ee677
DM
1185 return 0;
1186}
1187
1188static int __init numa_parse_mdesc_group(struct mdesc_handle *md, u64 grp,
1189 int index)
1190{
1191 cpumask_t mask;
1192 int cpu;
1193
1194 numa_parse_mdesc_group_cpus(md, grp, &mask);
1195
1196 for_each_cpu_mask(cpu, mask)
1197 numa_cpu_lookup_table[cpu] = index;
1198 numa_cpumask_lookup_table[index] = mask;
1199
1200 if (numa_debug) {
1201 printk(KERN_INFO "NUMA GROUP[%d]: cpus [ ", index);
1202 for_each_cpu_mask(cpu, mask)
1203 printk("%d ", cpu);
1204 printk("]\n");
1205 }
1206
1207 return numa_attach_mlgroup(md, grp, index);
1208}
1209
1210static int __init numa_parse_mdesc(void)
1211{
1212 struct mdesc_handle *md = mdesc_grab();
1213 int i, err, count;
1214 u64 node;
1215
1216 node = mdesc_node_by_name(md, MDESC_NODE_NULL, "latency-groups");
1217 if (node == MDESC_NODE_NULL) {
1218 mdesc_release(md);
1219 return -ENOENT;
1220 }
1221
1222 err = grab_mblocks(md);
1223 if (err < 0)
1224 goto out;
1225
1226 err = grab_mlgroups(md);
1227 if (err < 0)
1228 goto out;
1229
1230 count = 0;
1231 mdesc_for_each_node_by_name(md, node, "group") {
1232 err = numa_parse_mdesc_group(md, node, count);
1233 if (err < 0)
1234 break;
1235 count++;
1236 }
1237
1238 add_node_ranges();
1239
1240 for (i = 0; i < num_node_masks; i++) {
1241 allocate_node_data(i);
1242 node_set_online(i);
1243 }
1244
1245 err = 0;
1246out:
1247 mdesc_release(md);
1248 return err;
1249}
1250
1251static int __init numa_parse_sun4u(void)
1252{
1253 return -1;
1254}
1255
1256static int __init bootmem_init_numa(void)
1257{
1258 int err = -1;
1259
1260 numadbg("bootmem_init_numa()\n");
1261
1262 if (numa_enabled) {
1263 if (tlb_type == hypervisor)
1264 err = numa_parse_mdesc();
1265 else
1266 err = numa_parse_sun4u();
1267 }
1268 return err;
1269}
1270
1271#else
1da177e4 1272
919ee677
DM
1273static int bootmem_init_numa(void)
1274{
1275 return -1;
1276}
1277
1278#endif
1279
1280static void __init bootmem_init_nonnuma(void)
1281{
1282 unsigned long top_of_ram = lmb_end_of_DRAM();
1283 unsigned long total_ram = lmb_phys_mem_size();
1284 unsigned int i;
1285
1286 numadbg("bootmem_init_nonnuma()\n");
1287
1288 printk(KERN_INFO "Top of RAM: 0x%lx, Total RAM: 0x%lx\n",
1289 top_of_ram, total_ram);
1290 printk(KERN_INFO "Memory hole size: %ldMB\n",
1291 (top_of_ram - total_ram) >> 20);
1292
1293 init_node_masks_nonnuma();
1294
1295 for (i = 0; i < lmb.memory.cnt; i++) {
1296 unsigned long size = lmb_size_bytes(&lmb.memory, i);
1297 unsigned long start_pfn, end_pfn;
1298
1299 if (!size)
1300 continue;
1da177e4 1301
9422273b 1302 start_pfn = lmb.memory.region[i].base >> PAGE_SHIFT;
919ee677
DM
1303 end_pfn = start_pfn + lmb_size_pages(&lmb.memory, i);
1304 add_active_range(0, start_pfn, end_pfn);
1305 }
d1112018 1306
919ee677
DM
1307 allocate_node_data(0);
1308
1309 node_set_online(0);
1310}
1311
1312static void __init reserve_range_in_node(int nid, unsigned long start,
1313 unsigned long end)
1314{
1315 numadbg(" reserve_range_in_node(nid[%d],start[%lx],end[%lx]\n",
1316 nid, start, end);
1317 while (start < end) {
1318 unsigned long this_end;
1319 int n;
1320
1321 this_end = nid_range(start, end, &n);
1322 if (n == nid) {
1323 numadbg(" MATCH reserving range [%lx:%lx]\n",
1324 start, this_end);
1325 reserve_bootmem_node(NODE_DATA(nid), start,
1326 (this_end - start), BOOTMEM_DEFAULT);
1327 } else
1328 numadbg(" NO MATCH, advancing start to %lx\n",
1329 this_end);
1330
1331 start = this_end;
d1112018 1332 }
919ee677
DM
1333}
1334
1335static void __init trim_reserved_in_node(int nid)
1336{
1337 int i;
1338
1339 numadbg(" trim_reserved_in_node(%d)\n", nid);
1340
1341 for (i = 0; i < lmb.reserved.cnt; i++) {
1342 unsigned long start = lmb.reserved.region[i].base;
1343 unsigned long size = lmb_size_bytes(&lmb.reserved, i);
1344 unsigned long end = start + size;
1345
1346 reserve_range_in_node(nid, start, end);
1347 }
1348}
1349
1350static void __init bootmem_init_one_node(int nid)
1351{
1352 struct pglist_data *p;
1353
1354 numadbg("bootmem_init_one_node(%d)\n", nid);
1355
1356 p = NODE_DATA(nid);
1357
1358 if (p->node_spanned_pages) {
1359 unsigned long paddr = node_masks[nid].bootmem_paddr;
1360 unsigned long end_pfn;
1361
1362 end_pfn = p->node_start_pfn + p->node_spanned_pages;
1363
1364 numadbg(" init_bootmem_node(%d, %lx, %lx, %lx)\n",
1365 nid, paddr >> PAGE_SHIFT, p->node_start_pfn, end_pfn);
1366
1367 init_bootmem_node(p, paddr >> PAGE_SHIFT,
1368 p->node_start_pfn, end_pfn);
1369
1370 numadbg(" free_bootmem_with_active_regions(%d, %lx)\n",
1371 nid, end_pfn);
1372 free_bootmem_with_active_regions(nid, end_pfn);
1373
1374 trim_reserved_in_node(nid);
1375
1376 numadbg(" sparse_memory_present_with_active_regions(%d)\n",
1377 nid);
1378 sparse_memory_present_with_active_regions(nid);
1379 }
1380}
1381
1382static unsigned long __init bootmem_init(unsigned long phys_base)
1383{
1384 unsigned long end_pfn;
1385 int nid;
1386
1387 end_pfn = lmb_end_of_DRAM() >> PAGE_SHIFT;
1388 max_pfn = max_low_pfn = end_pfn;
1389 min_low_pfn = (phys_base >> PAGE_SHIFT);
1390
1391 if (bootmem_init_numa() < 0)
1392 bootmem_init_nonnuma();
1393
1394 /* XXX cpu notifier XXX */
1395
1396 for_each_online_node(nid)
1397 bootmem_init_one_node(nid);
d1112018
DM
1398
1399 sparse_init();
1400
1da177e4
LT
1401 return end_pfn;
1402}
1403
9cc3a1ac
DM
1404static struct linux_prom64_registers pall[MAX_BANKS] __initdata;
1405static int pall_ents __initdata;
1406
56425306 1407#ifdef CONFIG_DEBUG_PAGEALLOC
896aef43
SR
1408static unsigned long __ref kernel_map_range(unsigned long pstart,
1409 unsigned long pend, pgprot_t prot)
56425306
DM
1410{
1411 unsigned long vstart = PAGE_OFFSET + pstart;
1412 unsigned long vend = PAGE_OFFSET + pend;
1413 unsigned long alloc_bytes = 0UL;
1414
1415 if ((vstart & ~PAGE_MASK) || (vend & ~PAGE_MASK)) {
13edad7a 1416 prom_printf("kernel_map: Unaligned physmem[%lx:%lx]\n",
56425306
DM
1417 vstart, vend);
1418 prom_halt();
1419 }
1420
1421 while (vstart < vend) {
1422 unsigned long this_end, paddr = __pa(vstart);
1423 pgd_t *pgd = pgd_offset_k(vstart);
1424 pud_t *pud;
1425 pmd_t *pmd;
1426 pte_t *pte;
1427
1428 pud = pud_offset(pgd, vstart);
1429 if (pud_none(*pud)) {
1430 pmd_t *new;
1431
1432 new = __alloc_bootmem(PAGE_SIZE, PAGE_SIZE, PAGE_SIZE);
1433 alloc_bytes += PAGE_SIZE;
1434 pud_populate(&init_mm, pud, new);
1435 }
1436
1437 pmd = pmd_offset(pud, vstart);
1438 if (!pmd_present(*pmd)) {
1439 pte_t *new;
1440
1441 new = __alloc_bootmem(PAGE_SIZE, PAGE_SIZE, PAGE_SIZE);
1442 alloc_bytes += PAGE_SIZE;
1443 pmd_populate_kernel(&init_mm, pmd, new);
1444 }
1445
1446 pte = pte_offset_kernel(pmd, vstart);
1447 this_end = (vstart + PMD_SIZE) & PMD_MASK;
1448 if (this_end > vend)
1449 this_end = vend;
1450
1451 while (vstart < this_end) {
1452 pte_val(*pte) = (paddr | pgprot_val(prot));
1453
1454 vstart += PAGE_SIZE;
1455 paddr += PAGE_SIZE;
1456 pte++;
1457 }
1458 }
1459
1460 return alloc_bytes;
1461}
1462
56425306 1463extern unsigned int kvmap_linear_patch[1];
9cc3a1ac
DM
1464#endif /* CONFIG_DEBUG_PAGEALLOC */
1465
1466static void __init mark_kpte_bitmap(unsigned long start, unsigned long end)
1467{
1468 const unsigned long shift_256MB = 28;
1469 const unsigned long mask_256MB = ((1UL << shift_256MB) - 1UL);
1470 const unsigned long size_256MB = (1UL << shift_256MB);
1471
1472 while (start < end) {
1473 long remains;
1474
f7c00338
DM
1475 remains = end - start;
1476 if (remains < size_256MB)
1477 break;
1478
9cc3a1ac
DM
1479 if (start & mask_256MB) {
1480 start = (start + size_256MB) & ~mask_256MB;
1481 continue;
1482 }
1483
9cc3a1ac
DM
1484 while (remains >= size_256MB) {
1485 unsigned long index = start >> shift_256MB;
1486
1487 __set_bit(index, kpte_linear_bitmap);
1488
1489 start += size_256MB;
1490 remains -= size_256MB;
1491 }
1492 }
1493}
56425306 1494
8f361453 1495static void __init init_kpte_bitmap(void)
56425306 1496{
9cc3a1ac 1497 unsigned long i;
13edad7a
DM
1498
1499 for (i = 0; i < pall_ents; i++) {
56425306
DM
1500 unsigned long phys_start, phys_end;
1501
13edad7a
DM
1502 phys_start = pall[i].phys_addr;
1503 phys_end = phys_start + pall[i].reg_size;
9cc3a1ac
DM
1504
1505 mark_kpte_bitmap(phys_start, phys_end);
8f361453
DM
1506 }
1507}
9cc3a1ac 1508
8f361453
DM
1509static void __init kernel_physical_mapping_init(void)
1510{
9cc3a1ac 1511#ifdef CONFIG_DEBUG_PAGEALLOC
8f361453
DM
1512 unsigned long i, mem_alloced = 0UL;
1513
1514 for (i = 0; i < pall_ents; i++) {
1515 unsigned long phys_start, phys_end;
1516
1517 phys_start = pall[i].phys_addr;
1518 phys_end = phys_start + pall[i].reg_size;
1519
56425306
DM
1520 mem_alloced += kernel_map_range(phys_start, phys_end,
1521 PAGE_KERNEL);
56425306
DM
1522 }
1523
1524 printk("Allocated %ld bytes for kernel page tables.\n",
1525 mem_alloced);
1526
1527 kvmap_linear_patch[0] = 0x01000000; /* nop */
1528 flushi(&kvmap_linear_patch[0]);
1529
1530 __flush_tlb_all();
9cc3a1ac 1531#endif
56425306
DM
1532}
1533
9cc3a1ac 1534#ifdef CONFIG_DEBUG_PAGEALLOC
56425306
DM
1535void kernel_map_pages(struct page *page, int numpages, int enable)
1536{
1537 unsigned long phys_start = page_to_pfn(page) << PAGE_SHIFT;
1538 unsigned long phys_end = phys_start + (numpages * PAGE_SIZE);
1539
1540 kernel_map_range(phys_start, phys_end,
1541 (enable ? PAGE_KERNEL : __pgprot(0)));
1542
74bf4312
DM
1543 flush_tsb_kernel_range(PAGE_OFFSET + phys_start,
1544 PAGE_OFFSET + phys_end);
1545
56425306
DM
1546 /* we should perform an IPI and flush all tlbs,
1547 * but that can deadlock->flush only current cpu.
1548 */
1549 __flush_tlb_kernel_range(PAGE_OFFSET + phys_start,
1550 PAGE_OFFSET + phys_end);
1551}
1552#endif
1553
10147570
DM
1554unsigned long __init find_ecache_flush_span(unsigned long size)
1555{
0836a0eb
DM
1556 int i;
1557
13edad7a
DM
1558 for (i = 0; i < pavail_ents; i++) {
1559 if (pavail[i].reg_size >= size)
1560 return pavail[i].phys_addr;
0836a0eb
DM
1561 }
1562
13edad7a 1563 return ~0UL;
0836a0eb
DM
1564}
1565
517af332
DM
1566static void __init tsb_phys_patch(void)
1567{
d257d5da 1568 struct tsb_ldquad_phys_patch_entry *pquad;
517af332
DM
1569 struct tsb_phys_patch_entry *p;
1570
d257d5da
DM
1571 pquad = &__tsb_ldquad_phys_patch;
1572 while (pquad < &__tsb_ldquad_phys_patch_end) {
1573 unsigned long addr = pquad->addr;
1574
1575 if (tlb_type == hypervisor)
1576 *(unsigned int *) addr = pquad->sun4v_insn;
1577 else
1578 *(unsigned int *) addr = pquad->sun4u_insn;
1579 wmb();
1580 __asm__ __volatile__("flush %0"
1581 : /* no outputs */
1582 : "r" (addr));
1583
1584 pquad++;
1585 }
1586
517af332
DM
1587 p = &__tsb_phys_patch;
1588 while (p < &__tsb_phys_patch_end) {
1589 unsigned long addr = p->addr;
1590
1591 *(unsigned int *) addr = p->insn;
1592 wmb();
1593 __asm__ __volatile__("flush %0"
1594 : /* no outputs */
1595 : "r" (addr));
1596
1597 p++;
1598 }
1599}
1600
490384e7 1601/* Don't mark as init, we give this to the Hypervisor. */
d1acb421
DM
1602#ifndef CONFIG_DEBUG_PAGEALLOC
1603#define NUM_KTSB_DESCR 2
1604#else
1605#define NUM_KTSB_DESCR 1
1606#endif
1607static struct hv_tsb_descr ktsb_descr[NUM_KTSB_DESCR];
490384e7
DM
1608extern struct tsb swapper_tsb[KERNEL_TSB_NENTRIES];
1609
1610static void __init sun4v_ktsb_init(void)
1611{
1612 unsigned long ktsb_pa;
1613
d7744a09 1614 /* First KTSB for PAGE_SIZE mappings. */
490384e7
DM
1615 ktsb_pa = kern_base + ((unsigned long)&swapper_tsb[0] - KERNBASE);
1616
1617 switch (PAGE_SIZE) {
1618 case 8 * 1024:
1619 default:
1620 ktsb_descr[0].pgsz_idx = HV_PGSZ_IDX_8K;
1621 ktsb_descr[0].pgsz_mask = HV_PGSZ_MASK_8K;
1622 break;
1623
1624 case 64 * 1024:
1625 ktsb_descr[0].pgsz_idx = HV_PGSZ_IDX_64K;
1626 ktsb_descr[0].pgsz_mask = HV_PGSZ_MASK_64K;
1627 break;
1628
1629 case 512 * 1024:
1630 ktsb_descr[0].pgsz_idx = HV_PGSZ_IDX_512K;
1631 ktsb_descr[0].pgsz_mask = HV_PGSZ_MASK_512K;
1632 break;
1633
1634 case 4 * 1024 * 1024:
1635 ktsb_descr[0].pgsz_idx = HV_PGSZ_IDX_4MB;
1636 ktsb_descr[0].pgsz_mask = HV_PGSZ_MASK_4MB;
1637 break;
1638 };
1639
3f19a84e 1640 ktsb_descr[0].assoc = 1;
490384e7
DM
1641 ktsb_descr[0].num_ttes = KERNEL_TSB_NENTRIES;
1642 ktsb_descr[0].ctx_idx = 0;
1643 ktsb_descr[0].tsb_base = ktsb_pa;
1644 ktsb_descr[0].resv = 0;
1645
d1acb421 1646#ifndef CONFIG_DEBUG_PAGEALLOC
d7744a09
DM
1647 /* Second KTSB for 4MB/256MB mappings. */
1648 ktsb_pa = (kern_base +
1649 ((unsigned long)&swapper_4m_tsb[0] - KERNBASE));
1650
1651 ktsb_descr[1].pgsz_idx = HV_PGSZ_IDX_4MB;
1652 ktsb_descr[1].pgsz_mask = (HV_PGSZ_MASK_4MB |
1653 HV_PGSZ_MASK_256MB);
1654 ktsb_descr[1].assoc = 1;
1655 ktsb_descr[1].num_ttes = KERNEL_TSB4M_NENTRIES;
1656 ktsb_descr[1].ctx_idx = 0;
1657 ktsb_descr[1].tsb_base = ktsb_pa;
1658 ktsb_descr[1].resv = 0;
d1acb421 1659#endif
490384e7
DM
1660}
1661
1662void __cpuinit sun4v_ktsb_register(void)
1663{
7db35f31 1664 unsigned long pa, ret;
490384e7
DM
1665
1666 pa = kern_base + ((unsigned long)&ktsb_descr[0] - KERNBASE);
1667
7db35f31
DM
1668 ret = sun4v_mmu_tsb_ctx0(NUM_KTSB_DESCR, pa);
1669 if (ret != 0) {
1670 prom_printf("hypervisor_mmu_tsb_ctx0[%lx]: "
1671 "errors with %lx\n", pa, ret);
1672 prom_halt();
1673 }
490384e7
DM
1674}
1675
1da177e4
LT
1676/* paging_init() sets up the page tables */
1677
5cbc3073
DM
1678extern void central_probe(void);
1679
1da177e4 1680static unsigned long last_valid_pfn;
56425306 1681pgd_t swapper_pg_dir[2048];
1da177e4 1682
c4bce90e
DM
1683static void sun4u_pgprot_init(void);
1684static void sun4v_pgprot_init(void);
1685
3afc6202 1686/* Dummy function */
1687void __init setup_per_cpu_areas(void)
1688{
1689}
1690
1da177e4
LT
1691void __init paging_init(void)
1692{
919ee677 1693 unsigned long end_pfn, shift, phys_base;
0836a0eb
DM
1694 unsigned long real_end, i;
1695
22adb358
DM
1696 /* These build time checkes make sure that the dcache_dirty_cpu()
1697 * page->flags usage will work.
1698 *
1699 * When a page gets marked as dcache-dirty, we store the
1700 * cpu number starting at bit 32 in the page->flags. Also,
1701 * functions like clear_dcache_dirty_cpu use the cpu mask
1702 * in 13-bit signed-immediate instruction fields.
1703 */
9223b419
CL
1704
1705 /*
1706 * Page flags must not reach into upper 32 bits that are used
1707 * for the cpu number
1708 */
1709 BUILD_BUG_ON(NR_PAGEFLAGS > 32);
1710
1711 /*
1712 * The bit fields placed in the high range must not reach below
1713 * the 32 bit boundary. Otherwise we cannot place the cpu field
1714 * at the 32 bit boundary.
1715 */
22adb358 1716 BUILD_BUG_ON(SECTIONS_WIDTH + NODES_WIDTH + ZONES_WIDTH +
9223b419
CL
1717 ilog2(roundup_pow_of_two(NR_CPUS)) > 32);
1718
22adb358
DM
1719 BUILD_BUG_ON(NR_CPUS > 4096);
1720
481295f9
DM
1721 kern_base = (prom_boot_mapping_phys_low >> 22UL) << 22UL;
1722 kern_size = (unsigned long)&_end - (unsigned long)KERNBASE;
1723
22d6a1cb
DM
1724 sstate_booting();
1725
d7744a09 1726 /* Invalidate both kernel TSBs. */
8b234274 1727 memset(swapper_tsb, 0x40, sizeof(swapper_tsb));
d1acb421 1728#ifndef CONFIG_DEBUG_PAGEALLOC
d7744a09 1729 memset(swapper_4m_tsb, 0x40, sizeof(swapper_4m_tsb));
d1acb421 1730#endif
8b234274 1731
c4bce90e
DM
1732 if (tlb_type == hypervisor)
1733 sun4v_pgprot_init();
1734 else
1735 sun4u_pgprot_init();
1736
d257d5da
DM
1737 if (tlb_type == cheetah_plus ||
1738 tlb_type == hypervisor)
517af332
DM
1739 tsb_phys_patch();
1740
490384e7 1741 if (tlb_type == hypervisor) {
d257d5da 1742 sun4v_patch_tlb_handlers();
490384e7
DM
1743 sun4v_ktsb_init();
1744 }
d257d5da 1745
3b2a7e23
DM
1746 lmb_init();
1747
a94a172d
DM
1748 /* Find available physical memory...
1749 *
1750 * Read it twice in order to work around a bug in openfirmware.
1751 * The call to grab this table itself can cause openfirmware to
1752 * allocate memory, which in turn can take away some space from
1753 * the list of available memory. Reading it twice makes sure
1754 * we really do get the final value.
1755 */
1756 read_obp_translations();
1757 read_obp_memory("reg", &pall[0], &pall_ents);
1758 read_obp_memory("available", &pavail[0], &pavail_ents);
13edad7a 1759 read_obp_memory("available", &pavail[0], &pavail_ents);
0836a0eb
DM
1760
1761 phys_base = 0xffffffffffffffffUL;
3b2a7e23 1762 for (i = 0; i < pavail_ents; i++) {
13edad7a 1763 phys_base = min(phys_base, pavail[i].phys_addr);
3b2a7e23
DM
1764 lmb_add(pavail[i].phys_addr, pavail[i].reg_size);
1765 }
1766
1767 lmb_reserve(kern_base, kern_size);
0836a0eb 1768
4e82c9a6
DM
1769 find_ramdisk(phys_base);
1770
25b0c659
DM
1771 if (cmdline_memory_size)
1772 lmb_enforce_memory_limit(phys_base + cmdline_memory_size);
1773
3b2a7e23
DM
1774 lmb_analyze();
1775 lmb_dump_all();
1776
1da177e4
LT
1777 set_bit(0, mmu_context_bmap);
1778
2bdb3cb2
DM
1779 shift = kern_base + PAGE_OFFSET - ((unsigned long)KERNBASE);
1780
1da177e4 1781 real_end = (unsigned long)_end;
64658743
DM
1782 num_kernel_image_mappings = DIV_ROUND_UP(real_end - KERNBASE, 1 << 22);
1783 printk("Kernel: Using %d locked TLB entries for main kernel image.\n",
1784 num_kernel_image_mappings);
2bdb3cb2
DM
1785
1786 /* Set kernel pgd to upper alias so physical page computations
1da177e4
LT
1787 * work.
1788 */
1789 init_mm.pgd += ((shift) / (sizeof(pgd_t)));
1790
56425306 1791 memset(swapper_low_pmd_dir, 0, sizeof(swapper_low_pmd_dir));
1da177e4
LT
1792
1793 /* Now can init the kernel/bad page tables. */
1794 pud_set(pud_offset(&swapper_pg_dir[0], 0),
56425306 1795 swapper_low_pmd_dir + (shift / sizeof(pgd_t)));
1da177e4 1796
c9c10830 1797 inherit_prom_mappings();
5085b4a5 1798
8f361453
DM
1799 init_kpte_bitmap();
1800
a8b900d8
DM
1801 /* Ok, we can use our TLB miss and window trap handlers safely. */
1802 setup_tba();
1da177e4 1803
c9c10830 1804 __flush_tlb_all();
9ad98c5b 1805
490384e7
DM
1806 if (tlb_type == hypervisor)
1807 sun4v_ktsb_register();
1808
b9709456
DM
1809 /* We must setup the per-cpu areas before we pull in the
1810 * PROM and the MDESC. The code there fills in cpu and
1811 * other information into per-cpu data structures.
1812 */
1813 real_setup_per_cpu_areas();
1814
ad072004
DM
1815 prom_build_devicetree();
1816
4a283339
DM
1817 if (tlb_type == hypervisor)
1818 sun4v_mdesc_init();
1819
2bdb3cb2 1820 /* Setup bootmem... */
919ee677 1821 last_valid_pfn = end_pfn = bootmem_init(phys_base);
d1112018 1822
919ee677 1823#ifndef CONFIG_NEED_MULTIPLE_NODES
17b0e199 1824 max_mapnr = last_valid_pfn;
919ee677 1825#endif
56425306 1826 kernel_physical_mapping_init();
56425306 1827
1da177e4 1828 {
919ee677 1829 unsigned long max_zone_pfns[MAX_NR_ZONES];
1da177e4 1830
919ee677 1831 memset(max_zone_pfns, 0, sizeof(max_zone_pfns));
1da177e4 1832
919ee677 1833 max_zone_pfns[ZONE_NORMAL] = end_pfn;
1da177e4 1834
919ee677 1835 free_area_init_nodes(max_zone_pfns);
1da177e4
LT
1836 }
1837
3c62a2d3 1838 printk("Booting Linux...\n");
5cbc3073
DM
1839
1840 central_probe();
1841 cpu_probe();
1da177e4
LT
1842}
1843
919ee677
DM
1844int __init page_in_phys_avail(unsigned long paddr)
1845{
1846 int i;
1847
1848 paddr &= PAGE_MASK;
1849
1850 for (i = 0; i < pavail_ents; i++) {
1851 unsigned long start, end;
1852
1853 start = pavail[i].phys_addr;
1854 end = start + pavail[i].reg_size;
1855
1856 if (paddr >= start && paddr < end)
1857 return 1;
1858 }
1859 if (paddr >= kern_base && paddr < (kern_base + kern_size))
1860 return 1;
1861#ifdef CONFIG_BLK_DEV_INITRD
1862 if (paddr >= __pa(initrd_start) &&
1863 paddr < __pa(PAGE_ALIGN(initrd_end)))
1864 return 1;
1865#endif
1866
1867 return 0;
1868}
1869
1870static struct linux_prom64_registers pavail_rescan[MAX_BANKS] __initdata;
1871static int pavail_rescan_ents __initdata;
1872
1873/* Certain OBP calls, such as fetching "available" properties, can
1874 * claim physical memory. So, along with initializing the valid
1875 * address bitmap, what we do here is refetch the physical available
1876 * memory list again, and make sure it provides at least as much
1877 * memory as 'pavail' does.
1878 */
1879static void setup_valid_addr_bitmap_from_pavail(void)
1da177e4 1880{
1da177e4
LT
1881 int i;
1882
13edad7a 1883 read_obp_memory("available", &pavail_rescan[0], &pavail_rescan_ents);
1da177e4 1884
13edad7a 1885 for (i = 0; i < pavail_ents; i++) {
1da177e4
LT
1886 unsigned long old_start, old_end;
1887
13edad7a 1888 old_start = pavail[i].phys_addr;
919ee677 1889 old_end = old_start + pavail[i].reg_size;
1da177e4
LT
1890 while (old_start < old_end) {
1891 int n;
1892
c2a5a46b 1893 for (n = 0; n < pavail_rescan_ents; n++) {
1da177e4
LT
1894 unsigned long new_start, new_end;
1895
13edad7a
DM
1896 new_start = pavail_rescan[n].phys_addr;
1897 new_end = new_start +
1898 pavail_rescan[n].reg_size;
1da177e4
LT
1899
1900 if (new_start <= old_start &&
1901 new_end >= (old_start + PAGE_SIZE)) {
13edad7a
DM
1902 set_bit(old_start >> 22,
1903 sparc64_valid_addr_bitmap);
1da177e4
LT
1904 goto do_next_page;
1905 }
1906 }
919ee677
DM
1907
1908 prom_printf("mem_init: Lost memory in pavail\n");
1909 prom_printf("mem_init: OLD start[%lx] size[%lx]\n",
1910 pavail[i].phys_addr,
1911 pavail[i].reg_size);
1912 prom_printf("mem_init: NEW start[%lx] size[%lx]\n",
1913 pavail_rescan[i].phys_addr,
1914 pavail_rescan[i].reg_size);
1915 prom_printf("mem_init: Cannot continue, aborting.\n");
1916 prom_halt();
1da177e4
LT
1917
1918 do_next_page:
1919 old_start += PAGE_SIZE;
1920 }
1921 }
1922}
1923
1924void __init mem_init(void)
1925{
1926 unsigned long codepages, datapages, initpages;
1927 unsigned long addr, last;
1928 int i;
1929
1930 i = last_valid_pfn >> ((22 - PAGE_SHIFT) + 6);
1931 i += 1;
2bdb3cb2 1932 sparc64_valid_addr_bitmap = (unsigned long *) alloc_bootmem(i << 3);
1da177e4
LT
1933 if (sparc64_valid_addr_bitmap == NULL) {
1934 prom_printf("mem_init: Cannot alloc valid_addr_bitmap.\n");
1935 prom_halt();
1936 }
1937 memset(sparc64_valid_addr_bitmap, 0, i << 3);
1938
1939 addr = PAGE_OFFSET + kern_base;
1940 last = PAGE_ALIGN(kern_size) + addr;
1941 while (addr < last) {
1942 set_bit(__pa(addr) >> 22, sparc64_valid_addr_bitmap);
1943 addr += PAGE_SIZE;
1944 }
1945
919ee677 1946 setup_valid_addr_bitmap_from_pavail();
1da177e4 1947
1da177e4
LT
1948 high_memory = __va(last_valid_pfn << PAGE_SHIFT);
1949
919ee677
DM
1950#ifdef CONFIG_NEED_MULTIPLE_NODES
1951 for_each_online_node(i) {
1952 if (NODE_DATA(i)->node_spanned_pages != 0) {
1953 totalram_pages +=
1954 free_all_bootmem_node(NODE_DATA(i));
1955 }
1956 }
1957#else
1958 totalram_pages = free_all_bootmem();
1959#endif
1960
f1cfdb55
DM
1961 /* We subtract one to account for the mem_map_zero page
1962 * allocated below.
1963 */
919ee677
DM
1964 totalram_pages -= 1;
1965 num_physpages = totalram_pages;
1da177e4
LT
1966
1967 /*
1968 * Set up the zero page, mark it reserved, so that page count
1969 * is not manipulated when freeing the page from user ptes.
1970 */
1971 mem_map_zero = alloc_pages(GFP_KERNEL|__GFP_ZERO, 0);
1972 if (mem_map_zero == NULL) {
1973 prom_printf("paging_init: Cannot alloc zero page.\n");
1974 prom_halt();
1975 }
1976 SetPageReserved(mem_map_zero);
1977
1978 codepages = (((unsigned long) _etext) - ((unsigned long) _start));
1979 codepages = PAGE_ALIGN(codepages) >> PAGE_SHIFT;
1980 datapages = (((unsigned long) _edata) - ((unsigned long) _etext));
1981 datapages = PAGE_ALIGN(datapages) >> PAGE_SHIFT;
1982 initpages = (((unsigned long) __init_end) - ((unsigned long) __init_begin));
1983 initpages = PAGE_ALIGN(initpages) >> PAGE_SHIFT;
1984
96177299 1985 printk("Memory: %luk available (%ldk kernel code, %ldk data, %ldk init) [%016lx,%016lx]\n",
1da177e4
LT
1986 nr_free_pages() << (PAGE_SHIFT-10),
1987 codepages << (PAGE_SHIFT-10),
1988 datapages << (PAGE_SHIFT-10),
1989 initpages << (PAGE_SHIFT-10),
1990 PAGE_OFFSET, (last_valid_pfn << PAGE_SHIFT));
1991
1992 if (tlb_type == cheetah || tlb_type == cheetah_plus)
1993 cheetah_ecache_flush_init();
1994}
1995
898cf0ec 1996void free_initmem(void)
1da177e4
LT
1997{
1998 unsigned long addr, initend;
1999
2000 /*
2001 * The init section is aligned to 8k in vmlinux.lds. Page align for >8k pagesizes.
2002 */
2003 addr = PAGE_ALIGN((unsigned long)(__init_begin));
2004 initend = (unsigned long)(__init_end) & PAGE_MASK;
2005 for (; addr < initend; addr += PAGE_SIZE) {
2006 unsigned long page;
2007 struct page *p;
2008
2009 page = (addr +
2010 ((unsigned long) __va(kern_base)) -
2011 ((unsigned long) KERNBASE));
c9cf5528 2012 memset((void *)addr, POISON_FREE_INITMEM, PAGE_SIZE);
1da177e4
LT
2013 p = virt_to_page(page);
2014
2015 ClearPageReserved(p);
7835e98b 2016 init_page_count(p);
1da177e4
LT
2017 __free_page(p);
2018 num_physpages++;
2019 totalram_pages++;
2020 }
2021}
2022
2023#ifdef CONFIG_BLK_DEV_INITRD
2024void free_initrd_mem(unsigned long start, unsigned long end)
2025{
2026 if (start < end)
2027 printk ("Freeing initrd memory: %ldk freed\n", (end - start) >> 10);
2028 for (; start < end; start += PAGE_SIZE) {
2029 struct page *p = virt_to_page(start);
2030
2031 ClearPageReserved(p);
7835e98b 2032 init_page_count(p);
1da177e4
LT
2033 __free_page(p);
2034 num_physpages++;
2035 totalram_pages++;
2036 }
2037}
2038#endif
c4bce90e 2039
c4bce90e
DM
2040#define _PAGE_CACHE_4U (_PAGE_CP_4U | _PAGE_CV_4U)
2041#define _PAGE_CACHE_4V (_PAGE_CP_4V | _PAGE_CV_4V)
2042#define __DIRTY_BITS_4U (_PAGE_MODIFIED_4U | _PAGE_WRITE_4U | _PAGE_W_4U)
2043#define __DIRTY_BITS_4V (_PAGE_MODIFIED_4V | _PAGE_WRITE_4V | _PAGE_W_4V)
2044#define __ACCESS_BITS_4U (_PAGE_ACCESSED_4U | _PAGE_READ_4U | _PAGE_R)
2045#define __ACCESS_BITS_4V (_PAGE_ACCESSED_4V | _PAGE_READ_4V | _PAGE_R)
2046
2047pgprot_t PAGE_KERNEL __read_mostly;
2048EXPORT_SYMBOL(PAGE_KERNEL);
2049
2050pgprot_t PAGE_KERNEL_LOCKED __read_mostly;
2051pgprot_t PAGE_COPY __read_mostly;
0f15952a
DM
2052
2053pgprot_t PAGE_SHARED __read_mostly;
2054EXPORT_SYMBOL(PAGE_SHARED);
2055
c4bce90e
DM
2056pgprot_t PAGE_EXEC __read_mostly;
2057unsigned long pg_iobits __read_mostly;
2058
2059unsigned long _PAGE_IE __read_mostly;
987c74fc 2060EXPORT_SYMBOL(_PAGE_IE);
b2bef442 2061
c4bce90e 2062unsigned long _PAGE_E __read_mostly;
b2bef442
DM
2063EXPORT_SYMBOL(_PAGE_E);
2064
c4bce90e 2065unsigned long _PAGE_CACHE __read_mostly;
b2bef442 2066EXPORT_SYMBOL(_PAGE_CACHE);
c4bce90e 2067
46644c24
DM
2068#ifdef CONFIG_SPARSEMEM_VMEMMAP
2069
2070#define VMEMMAP_CHUNK_SHIFT 22
2071#define VMEMMAP_CHUNK (1UL << VMEMMAP_CHUNK_SHIFT)
2072#define VMEMMAP_CHUNK_MASK ~(VMEMMAP_CHUNK - 1UL)
2073#define VMEMMAP_ALIGN(x) (((x)+VMEMMAP_CHUNK-1UL)&VMEMMAP_CHUNK_MASK)
2074
2075#define VMEMMAP_SIZE ((((1UL << MAX_PHYSADDR_BITS) >> PAGE_SHIFT) * \
2076 sizeof(struct page *)) >> VMEMMAP_CHUNK_SHIFT)
2077unsigned long vmemmap_table[VMEMMAP_SIZE];
2078
2079int __meminit vmemmap_populate(struct page *start, unsigned long nr, int node)
2080{
2081 unsigned long vstart = (unsigned long) start;
2082 unsigned long vend = (unsigned long) (start + nr);
2083 unsigned long phys_start = (vstart - VMEMMAP_BASE);
2084 unsigned long phys_end = (vend - VMEMMAP_BASE);
2085 unsigned long addr = phys_start & VMEMMAP_CHUNK_MASK;
2086 unsigned long end = VMEMMAP_ALIGN(phys_end);
2087 unsigned long pte_base;
2088
2089 pte_base = (_PAGE_VALID | _PAGE_SZ4MB_4U |
2090 _PAGE_CP_4U | _PAGE_CV_4U |
2091 _PAGE_P_4U | _PAGE_W_4U);
2092 if (tlb_type == hypervisor)
2093 pte_base = (_PAGE_VALID | _PAGE_SZ4MB_4V |
2094 _PAGE_CP_4V | _PAGE_CV_4V |
2095 _PAGE_P_4V | _PAGE_W_4V);
2096
2097 for (; addr < end; addr += VMEMMAP_CHUNK) {
2098 unsigned long *vmem_pp =
2099 vmemmap_table + (addr >> VMEMMAP_CHUNK_SHIFT);
2100 void *block;
2101
2102 if (!(*vmem_pp & _PAGE_VALID)) {
2103 block = vmemmap_alloc_block(1UL << 22, node);
2104 if (!block)
2105 return -ENOMEM;
2106
2107 *vmem_pp = pte_base | __pa(block);
2108
2109 printk(KERN_INFO "[%p-%p] page_structs=%lu "
2110 "node=%d entry=%lu/%lu\n", start, block, nr,
2111 node,
2112 addr >> VMEMMAP_CHUNK_SHIFT,
2113 VMEMMAP_SIZE >> VMEMMAP_CHUNK_SHIFT);
2114 }
2115 }
2116 return 0;
2117}
2118#endif /* CONFIG_SPARSEMEM_VMEMMAP */
2119
c4bce90e
DM
2120static void prot_init_common(unsigned long page_none,
2121 unsigned long page_shared,
2122 unsigned long page_copy,
2123 unsigned long page_readonly,
2124 unsigned long page_exec_bit)
2125{
2126 PAGE_COPY = __pgprot(page_copy);
0f15952a 2127 PAGE_SHARED = __pgprot(page_shared);
c4bce90e
DM
2128
2129 protection_map[0x0] = __pgprot(page_none);
2130 protection_map[0x1] = __pgprot(page_readonly & ~page_exec_bit);
2131 protection_map[0x2] = __pgprot(page_copy & ~page_exec_bit);
2132 protection_map[0x3] = __pgprot(page_copy & ~page_exec_bit);
2133 protection_map[0x4] = __pgprot(page_readonly);
2134 protection_map[0x5] = __pgprot(page_readonly);
2135 protection_map[0x6] = __pgprot(page_copy);
2136 protection_map[0x7] = __pgprot(page_copy);
2137 protection_map[0x8] = __pgprot(page_none);
2138 protection_map[0x9] = __pgprot(page_readonly & ~page_exec_bit);
2139 protection_map[0xa] = __pgprot(page_shared & ~page_exec_bit);
2140 protection_map[0xb] = __pgprot(page_shared & ~page_exec_bit);
2141 protection_map[0xc] = __pgprot(page_readonly);
2142 protection_map[0xd] = __pgprot(page_readonly);
2143 protection_map[0xe] = __pgprot(page_shared);
2144 protection_map[0xf] = __pgprot(page_shared);
2145}
2146
2147static void __init sun4u_pgprot_init(void)
2148{
2149 unsigned long page_none, page_shared, page_copy, page_readonly;
2150 unsigned long page_exec_bit;
2151
2152 PAGE_KERNEL = __pgprot (_PAGE_PRESENT_4U | _PAGE_VALID |
2153 _PAGE_CACHE_4U | _PAGE_P_4U |
2154 __ACCESS_BITS_4U | __DIRTY_BITS_4U |
2155 _PAGE_EXEC_4U);
2156 PAGE_KERNEL_LOCKED = __pgprot (_PAGE_PRESENT_4U | _PAGE_VALID |
2157 _PAGE_CACHE_4U | _PAGE_P_4U |
2158 __ACCESS_BITS_4U | __DIRTY_BITS_4U |
2159 _PAGE_EXEC_4U | _PAGE_L_4U);
2160 PAGE_EXEC = __pgprot(_PAGE_EXEC_4U);
2161
2162 _PAGE_IE = _PAGE_IE_4U;
2163 _PAGE_E = _PAGE_E_4U;
2164 _PAGE_CACHE = _PAGE_CACHE_4U;
2165
2166 pg_iobits = (_PAGE_VALID | _PAGE_PRESENT_4U | __DIRTY_BITS_4U |
2167 __ACCESS_BITS_4U | _PAGE_E_4U);
2168
d1acb421
DM
2169#ifdef CONFIG_DEBUG_PAGEALLOC
2170 kern_linear_pte_xor[0] = (_PAGE_VALID | _PAGE_SZBITS_4U) ^
2171 0xfffff80000000000;
2172#else
9cc3a1ac 2173 kern_linear_pte_xor[0] = (_PAGE_VALID | _PAGE_SZ4MB_4U) ^
c4bce90e 2174 0xfffff80000000000;
d1acb421 2175#endif
9cc3a1ac
DM
2176 kern_linear_pte_xor[0] |= (_PAGE_CP_4U | _PAGE_CV_4U |
2177 _PAGE_P_4U | _PAGE_W_4U);
2178
2179 /* XXX Should use 256MB on Panther. XXX */
2180 kern_linear_pte_xor[1] = kern_linear_pte_xor[0];
c4bce90e
DM
2181
2182 _PAGE_SZBITS = _PAGE_SZBITS_4U;
2183 _PAGE_ALL_SZ_BITS = (_PAGE_SZ4MB_4U | _PAGE_SZ512K_4U |
2184 _PAGE_SZ64K_4U | _PAGE_SZ8K_4U |
2185 _PAGE_SZ32MB_4U | _PAGE_SZ256MB_4U);
2186
2187
2188 page_none = _PAGE_PRESENT_4U | _PAGE_ACCESSED_4U | _PAGE_CACHE_4U;
2189 page_shared = (_PAGE_VALID | _PAGE_PRESENT_4U | _PAGE_CACHE_4U |
2190 __ACCESS_BITS_4U | _PAGE_WRITE_4U | _PAGE_EXEC_4U);
2191 page_copy = (_PAGE_VALID | _PAGE_PRESENT_4U | _PAGE_CACHE_4U |
2192 __ACCESS_BITS_4U | _PAGE_EXEC_4U);
2193 page_readonly = (_PAGE_VALID | _PAGE_PRESENT_4U | _PAGE_CACHE_4U |
2194 __ACCESS_BITS_4U | _PAGE_EXEC_4U);
2195
2196 page_exec_bit = _PAGE_EXEC_4U;
2197
2198 prot_init_common(page_none, page_shared, page_copy, page_readonly,
2199 page_exec_bit);
2200}
2201
2202static void __init sun4v_pgprot_init(void)
2203{
2204 unsigned long page_none, page_shared, page_copy, page_readonly;
2205 unsigned long page_exec_bit;
2206
2207 PAGE_KERNEL = __pgprot (_PAGE_PRESENT_4V | _PAGE_VALID |
2208 _PAGE_CACHE_4V | _PAGE_P_4V |
2209 __ACCESS_BITS_4V | __DIRTY_BITS_4V |
2210 _PAGE_EXEC_4V);
2211 PAGE_KERNEL_LOCKED = PAGE_KERNEL;
2212 PAGE_EXEC = __pgprot(_PAGE_EXEC_4V);
2213
2214 _PAGE_IE = _PAGE_IE_4V;
2215 _PAGE_E = _PAGE_E_4V;
2216 _PAGE_CACHE = _PAGE_CACHE_4V;
2217
d1acb421
DM
2218#ifdef CONFIG_DEBUG_PAGEALLOC
2219 kern_linear_pte_xor[0] = (_PAGE_VALID | _PAGE_SZBITS_4V) ^
2220 0xfffff80000000000;
2221#else
9cc3a1ac
DM
2222 kern_linear_pte_xor[0] = (_PAGE_VALID | _PAGE_SZ4MB_4V) ^
2223 0xfffff80000000000;
d1acb421 2224#endif
9cc3a1ac
DM
2225 kern_linear_pte_xor[0] |= (_PAGE_CP_4V | _PAGE_CV_4V |
2226 _PAGE_P_4V | _PAGE_W_4V);
2227
d1acb421
DM
2228#ifdef CONFIG_DEBUG_PAGEALLOC
2229 kern_linear_pte_xor[1] = (_PAGE_VALID | _PAGE_SZBITS_4V) ^
2230 0xfffff80000000000;
2231#else
9cc3a1ac 2232 kern_linear_pte_xor[1] = (_PAGE_VALID | _PAGE_SZ256MB_4V) ^
c4bce90e 2233 0xfffff80000000000;
d1acb421 2234#endif
9cc3a1ac
DM
2235 kern_linear_pte_xor[1] |= (_PAGE_CP_4V | _PAGE_CV_4V |
2236 _PAGE_P_4V | _PAGE_W_4V);
c4bce90e
DM
2237
2238 pg_iobits = (_PAGE_VALID | _PAGE_PRESENT_4V | __DIRTY_BITS_4V |
2239 __ACCESS_BITS_4V | _PAGE_E_4V);
2240
2241 _PAGE_SZBITS = _PAGE_SZBITS_4V;
2242 _PAGE_ALL_SZ_BITS = (_PAGE_SZ16GB_4V | _PAGE_SZ2GB_4V |
2243 _PAGE_SZ256MB_4V | _PAGE_SZ32MB_4V |
2244 _PAGE_SZ4MB_4V | _PAGE_SZ512K_4V |
2245 _PAGE_SZ64K_4V | _PAGE_SZ8K_4V);
2246
2247 page_none = _PAGE_PRESENT_4V | _PAGE_ACCESSED_4V | _PAGE_CACHE_4V;
2248 page_shared = (_PAGE_VALID | _PAGE_PRESENT_4V | _PAGE_CACHE_4V |
2249 __ACCESS_BITS_4V | _PAGE_WRITE_4V | _PAGE_EXEC_4V);
2250 page_copy = (_PAGE_VALID | _PAGE_PRESENT_4V | _PAGE_CACHE_4V |
2251 __ACCESS_BITS_4V | _PAGE_EXEC_4V);
2252 page_readonly = (_PAGE_VALID | _PAGE_PRESENT_4V | _PAGE_CACHE_4V |
2253 __ACCESS_BITS_4V | _PAGE_EXEC_4V);
2254
2255 page_exec_bit = _PAGE_EXEC_4V;
2256
2257 prot_init_common(page_none, page_shared, page_copy, page_readonly,
2258 page_exec_bit);
2259}
2260
2261unsigned long pte_sz_bits(unsigned long sz)
2262{
2263 if (tlb_type == hypervisor) {
2264 switch (sz) {
2265 case 8 * 1024:
2266 default:
2267 return _PAGE_SZ8K_4V;
2268 case 64 * 1024:
2269 return _PAGE_SZ64K_4V;
2270 case 512 * 1024:
2271 return _PAGE_SZ512K_4V;
2272 case 4 * 1024 * 1024:
2273 return _PAGE_SZ4MB_4V;
2274 };
2275 } else {
2276 switch (sz) {
2277 case 8 * 1024:
2278 default:
2279 return _PAGE_SZ8K_4U;
2280 case 64 * 1024:
2281 return _PAGE_SZ64K_4U;
2282 case 512 * 1024:
2283 return _PAGE_SZ512K_4U;
2284 case 4 * 1024 * 1024:
2285 return _PAGE_SZ4MB_4U;
2286 };
2287 }
2288}
2289
2290pte_t mk_pte_io(unsigned long page, pgprot_t prot, int space, unsigned long page_size)
2291{
2292 pte_t pte;
cf627156
DM
2293
2294 pte_val(pte) = page | pgprot_val(pgprot_noncached(prot));
c4bce90e
DM
2295 pte_val(pte) |= (((unsigned long)space) << 32);
2296 pte_val(pte) |= pte_sz_bits(page_size);
c4bce90e 2297
cf627156 2298 return pte;
c4bce90e
DM
2299}
2300
2301static unsigned long kern_large_tte(unsigned long paddr)
2302{
2303 unsigned long val;
2304
2305 val = (_PAGE_VALID | _PAGE_SZ4MB_4U |
2306 _PAGE_CP_4U | _PAGE_CV_4U | _PAGE_P_4U |
2307 _PAGE_EXEC_4U | _PAGE_L_4U | _PAGE_W_4U);
2308 if (tlb_type == hypervisor)
2309 val = (_PAGE_VALID | _PAGE_SZ4MB_4V |
2310 _PAGE_CP_4V | _PAGE_CV_4V | _PAGE_P_4V |
2311 _PAGE_EXEC_4V | _PAGE_W_4V);
2312
2313 return val | paddr;
2314}
2315
c4bce90e
DM
2316/* If not locked, zap it. */
2317void __flush_tlb_all(void)
2318{
2319 unsigned long pstate;
2320 int i;
2321
2322 __asm__ __volatile__("flushw\n\t"
2323 "rdpr %%pstate, %0\n\t"
2324 "wrpr %0, %1, %%pstate"
2325 : "=r" (pstate)
2326 : "i" (PSTATE_IE));
8f361453
DM
2327 if (tlb_type == hypervisor) {
2328 sun4v_mmu_demap_all();
2329 } else if (tlb_type == spitfire) {
c4bce90e
DM
2330 for (i = 0; i < 64; i++) {
2331 /* Spitfire Errata #32 workaround */
2332 /* NOTE: Always runs on spitfire, so no
2333 * cheetah+ page size encodings.
2334 */
2335 __asm__ __volatile__("stxa %0, [%1] %2\n\t"
2336 "flush %%g6"
2337 : /* No outputs */
2338 : "r" (0),
2339 "r" (PRIMARY_CONTEXT), "i" (ASI_DMMU));
2340
2341 if (!(spitfire_get_dtlb_data(i) & _PAGE_L_4U)) {
2342 __asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
2343 "membar #Sync"
2344 : /* no outputs */
2345 : "r" (TLB_TAG_ACCESS), "i" (ASI_DMMU));
2346 spitfire_put_dtlb_data(i, 0x0UL);
2347 }
2348
2349 /* Spitfire Errata #32 workaround */
2350 /* NOTE: Always runs on spitfire, so no
2351 * cheetah+ page size encodings.
2352 */
2353 __asm__ __volatile__("stxa %0, [%1] %2\n\t"
2354 "flush %%g6"
2355 : /* No outputs */
2356 : "r" (0),
2357 "r" (PRIMARY_CONTEXT), "i" (ASI_DMMU));
2358
2359 if (!(spitfire_get_itlb_data(i) & _PAGE_L_4U)) {
2360 __asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
2361 "membar #Sync"
2362 : /* no outputs */
2363 : "r" (TLB_TAG_ACCESS), "i" (ASI_IMMU));
2364 spitfire_put_itlb_data(i, 0x0UL);
2365 }
2366 }
2367 } else if (tlb_type == cheetah || tlb_type == cheetah_plus) {
2368 cheetah_flush_dtlb_all();
2369 cheetah_flush_itlb_all();
2370 }
2371 __asm__ __volatile__("wrpr %0, 0, %%pstate"
2372 : : "r" (pstate));
2373}