2 * PowerPC64 port by Mike Corrigan and Dave Engebretsen
3 * {mikejc|engebret}@us.ibm.com
5 * Copyright (c) 2000 Mike Corrigan <mikejc@us.ibm.com>
7 * SMP scalability work:
8 * Copyright (C) 2001 Anton Blanchard <anton@au.ibm.com>, IBM
13 * PowerPC Hashed Page Table functions
15 * This program is free software; you can redistribute it and/or
16 * modify it under the terms of the GNU General Public License
17 * as published by the Free Software Foundation; either version
18 * 2 of the License, or (at your option) any later version.
24 #include <linux/config.h>
25 #include <linux/spinlock.h>
26 #include <linux/errno.h>
27 #include <linux/sched.h>
28 #include <linux/proc_fs.h>
29 #include <linux/stat.h>
30 #include <linux/sysctl.h>
31 #include <linux/ctype.h>
32 #include <linux/cache.h>
33 #include <linux/init.h>
34 #include <linux/signal.h>
36 #include <asm/processor.h>
37 #include <asm/pgtable.h>
39 #include <asm/mmu_context.h>
41 #include <asm/types.h>
42 #include <asm/system.h>
43 #include <asm/uaccess.h>
44 #include <asm/machdep.h>
46 #include <asm/abs_addr.h>
47 #include <asm/tlbflush.h>
51 #include <asm/cacheflush.h>
52 #include <asm/cputable.h>
53 #include <asm/abs_addr.h>
54 #include <asm/sections.h>
57 #define DBG(fmt...) udbg_printf(fmt)
63 #define DBG_LOW(fmt...) udbg_printf(fmt)
65 #define DBG_LOW(fmt...)
72 * Note: pte --> Linux PTE
73 * HPTE --> PowerPC Hashed Page Table Entry
76 * htab_initialize is called with the MMU off (of course), but
77 * the kernel has been copied down to zero so it can directly
78 * reference global data. At this point it is very difficult
79 * to print debug info.
84 extern unsigned long dart_tablebase
;
85 #endif /* CONFIG_U3_DART */
88 unsigned long htab_hash_mask
;
90 struct mmu_psize_def mmu_psize_defs
[MMU_PAGE_COUNT
];
91 int mmu_linear_psize
= MMU_PAGE_4K
;
92 int mmu_virtual_psize
= MMU_PAGE_4K
;
93 #ifdef CONFIG_HUGETLB_PAGE
94 int mmu_huge_psize
= MMU_PAGE_16M
;
95 unsigned int HPAGE_SHIFT
;
98 /* There are definitions of page sizes arrays to be used when none
99 * is provided by the firmware.
102 /* Pre-POWER4 CPUs (4k pages only)
104 struct mmu_psize_def mmu_psize_defaults_old
[] = {
114 /* POWER4, GPUL, POWER5
116 * Support for 16Mb large pages
118 struct mmu_psize_def mmu_psize_defaults_gp
[] = {
136 int htab_bolt_mapping(unsigned long vstart
, unsigned long vend
,
137 unsigned long pstart
, unsigned long mode
, int psize
)
139 unsigned long vaddr
, paddr
;
140 unsigned int step
, shift
;
141 unsigned long tmp_mode
;
144 shift
= mmu_psize_defs
[psize
].shift
;
147 for (vaddr
= vstart
, paddr
= pstart
; vaddr
< vend
;
148 vaddr
+= step
, paddr
+= step
) {
149 unsigned long vpn
, hash
, hpteg
;
150 unsigned long vsid
= get_kernel_vsid(vaddr
);
151 unsigned long va
= (vsid
<< 28) | (vaddr
& 0x0fffffff);
156 /* Make non-kernel text non-executable */
157 if (!in_kernel_text(vaddr
))
158 tmp_mode
= mode
| HPTE_R_N
;
160 hash
= hpt_hash(va
, shift
);
161 hpteg
= ((hash
& htab_hash_mask
) * HPTES_PER_GROUP
);
163 /* The crap below can be cleaned once ppd_md.probe() can
164 * set up the hash callbacks, thus we can just used the
165 * normal insert callback here.
167 #ifdef CONFIG_PPC_ISERIES
168 if (systemcfg
->platform
== PLATFORM_ISERIES_LPAR
)
169 ret
= iSeries_hpte_insert(hpteg
, va
,
176 #ifdef CONFIG_PPC_PSERIES
177 if (systemcfg
->platform
& PLATFORM_LPAR
)
178 ret
= pSeries_lpar_hpte_insert(hpteg
, va
,
185 #ifdef CONFIG_PPC_MULTIPLATFORM
186 ret
= native_hpte_insert(hpteg
, va
,
188 tmp_mode
, HPTE_V_BOLTED
,
194 return ret
< 0 ? ret
: 0;
197 static int __init
htab_dt_scan_page_sizes(unsigned long node
,
198 const char *uname
, int depth
,
201 char *type
= of_get_flat_dt_prop(node
, "device_type", NULL
);
203 unsigned long size
= 0;
205 /* We are scanning "cpu" nodes only */
206 if (type
== NULL
|| strcmp(type
, "cpu") != 0)
209 prop
= (u32
*)of_get_flat_dt_prop(node
,
210 "ibm,segment-page-sizes", &size
);
212 DBG("Page sizes from device-tree:\n");
214 cur_cpu_spec
->cpu_features
&= ~(CPU_FTR_16M_PAGE
);
216 unsigned int shift
= prop
[0];
217 unsigned int slbenc
= prop
[1];
218 unsigned int lpnum
= prop
[2];
219 unsigned int lpenc
= 0;
220 struct mmu_psize_def
*def
;
223 size
-= 3; prop
+= 3;
224 while(size
> 0 && lpnum
) {
225 if (prop
[0] == shift
)
227 prop
+= 2; size
-= 2;
242 cur_cpu_spec
->cpu_features
|= CPU_FTR_16M_PAGE
;
250 def
= &mmu_psize_defs
[idx
];
255 def
->avpnm
= (1 << (shift
- 23)) - 1;
258 /* We don't know for sure what's up with tlbiel, so
259 * for now we only set it for 4K and 64K pages
261 if (idx
== MMU_PAGE_4K
|| idx
== MMU_PAGE_64K
)
266 DBG(" %d: shift=%02x, sllp=%04x, avpnm=%08x, "
267 "tlbiel=%d, penc=%d\n",
268 idx
, shift
, def
->sllp
, def
->avpnm
, def
->tlbiel
,
277 static void __init
htab_init_page_sizes(void)
281 /* Default to 4K pages only */
282 memcpy(mmu_psize_defs
, mmu_psize_defaults_old
,
283 sizeof(mmu_psize_defaults_old
));
286 * Try to find the available page sizes in the device-tree
288 rc
= of_scan_flat_dt(htab_dt_scan_page_sizes
, NULL
);
289 if (rc
!= 0) /* Found */
293 * Not in the device-tree, let's fallback on known size
294 * list for 16M capable GP & GR
296 if ((systemcfg
->platform
!= PLATFORM_ISERIES_LPAR
) &&
297 cpu_has_feature(CPU_FTR_16M_PAGE
))
298 memcpy(mmu_psize_defs
, mmu_psize_defaults_gp
,
299 sizeof(mmu_psize_defaults_gp
));
302 * Pick a size for the linear mapping. Currently, we only support
303 * 16M, 1M and 4K which is the default
305 if (mmu_psize_defs
[MMU_PAGE_16M
].shift
)
306 mmu_linear_psize
= MMU_PAGE_16M
;
307 else if (mmu_psize_defs
[MMU_PAGE_1M
].shift
)
308 mmu_linear_psize
= MMU_PAGE_1M
;
311 * Pick a size for the ordinary pages. Default is 4K, we support
312 * 64K if cache inhibited large pages are supported by the
315 #ifdef CONFIG_PPC_64K_PAGES
316 if (mmu_psize_defs
[MMU_PAGE_64K
].shift
&&
317 cpu_has_feature(CPU_FTR_CI_LARGE_PAGE
))
318 mmu_virtual_psize
= MMU_PAGE_64K
;
321 printk(KERN_INFO
"Page orders: linear mapping = %d, others = %d\n",
322 mmu_psize_defs
[mmu_linear_psize
].shift
,
323 mmu_psize_defs
[mmu_virtual_psize
].shift
);
325 #ifdef CONFIG_HUGETLB_PAGE
326 /* Init large page size. Currently, we pick 16M or 1M depending
327 * on what is available
329 if (mmu_psize_defs
[MMU_PAGE_16M
].shift
)
330 mmu_huge_psize
= MMU_PAGE_16M
;
331 /* With 4k/4level pagetables, we can't (for now) cope with a
332 * huge page size < PMD_SIZE */
333 else if (mmu_psize_defs
[MMU_PAGE_1M
].shift
)
334 mmu_huge_psize
= MMU_PAGE_1M
;
336 /* Calculate HPAGE_SHIFT and sanity check it */
337 if (mmu_psize_defs
[mmu_huge_psize
].shift
> MIN_HUGEPTE_SHIFT
&&
338 mmu_psize_defs
[mmu_huge_psize
].shift
< SID_SHIFT
)
339 HPAGE_SHIFT
= mmu_psize_defs
[mmu_huge_psize
].shift
;
341 HPAGE_SHIFT
= 0; /* No huge pages dude ! */
342 #endif /* CONFIG_HUGETLB_PAGE */
345 static int __init
htab_dt_scan_pftsize(unsigned long node
,
346 const char *uname
, int depth
,
349 char *type
= of_get_flat_dt_prop(node
, "device_type", NULL
);
352 /* We are scanning "cpu" nodes only */
353 if (type
== NULL
|| strcmp(type
, "cpu") != 0)
356 prop
= (u32
*)of_get_flat_dt_prop(node
, "ibm,pft-size", NULL
);
358 /* pft_size[0] is the NUMA CEC cookie */
359 ppc64_pft_size
= prop
[1];
365 static unsigned long __init
htab_get_table_size(void)
367 unsigned long rnd_mem_size
, pteg_count
;
369 /* If hash size isn't already provided by the platform, we try to
370 * retreive it from the device-tree. If it's not there neither, we
371 * calculate it now based on the total RAM size
373 if (ppc64_pft_size
== 0)
374 of_scan_flat_dt(htab_dt_scan_pftsize
, NULL
);
376 return 1UL << ppc64_pft_size
;
378 /* round mem_size up to next power of 2 */
379 rnd_mem_size
= 1UL << __ilog2(systemcfg
->physicalMemorySize
);
380 if (rnd_mem_size
< systemcfg
->physicalMemorySize
)
384 pteg_count
= max(rnd_mem_size
>> (12 + 1), 1UL << 11);
386 return pteg_count
<< 7;
389 #ifdef CONFIG_MEMORY_HOTPLUG
390 void create_section_mapping(unsigned long start
, unsigned long end
)
392 BUG_ON(htab_bolt_mapping(start
, end
, start
,
393 _PAGE_ACCESSED
| _PAGE_DIRTY
| _PAGE_COHERENT
| PP_RWXX
,
396 #endif /* CONFIG_MEMORY_HOTPLUG */
398 void __init
htab_initialize(void)
400 unsigned long table
, htab_size_bytes
;
401 unsigned long pteg_count
;
402 unsigned long mode_rw
;
403 unsigned long base
= 0, size
= 0;
406 extern unsigned long tce_alloc_start
, tce_alloc_end
;
408 DBG(" -> htab_initialize()\n");
410 /* Initialize page sizes */
411 htab_init_page_sizes();
414 * Calculate the required size of the htab. We want the number of
415 * PTEGs to equal one half the number of real pages.
417 htab_size_bytes
= htab_get_table_size();
418 pteg_count
= htab_size_bytes
>> 7;
420 htab_hash_mask
= pteg_count
- 1;
422 if (systemcfg
->platform
& PLATFORM_LPAR
) {
423 /* Using a hypervisor which owns the htab */
427 /* Find storage for the HPT. Must be contiguous in
428 * the absolute address space.
430 table
= lmb_alloc(htab_size_bytes
, htab_size_bytes
);
433 DBG("Hash table allocated at %lx, size: %lx\n", table
,
436 htab_address
= abs_to_virt(table
);
438 /* htab absolute addr + encoded htabsize */
439 _SDR1
= table
+ __ilog2(pteg_count
) - 11;
441 /* Initialize the HPT with no entries */
442 memset((void *)table
, 0, htab_size_bytes
);
445 mode_rw
= _PAGE_ACCESSED
| _PAGE_DIRTY
| _PAGE_COHERENT
| PP_RWXX
;
447 /* On U3 based machines, we need to reserve the DART area and
448 * _NOT_ map it to avoid cache paradoxes as it's remapped non
452 /* create bolted the linear mapping in the hash table */
453 for (i
=0; i
< lmb
.memory
.cnt
; i
++) {
454 base
= lmb
.memory
.region
[i
].base
+ KERNELBASE
;
455 size
= lmb
.memory
.region
[i
].size
;
457 DBG("creating mapping for region: %lx : %lx\n", base
, size
);
459 #ifdef CONFIG_U3_DART
460 /* Do not map the DART space. Fortunately, it will be aligned
461 * in such a way that it will not cross two lmb regions and
462 * will fit within a single 16Mb page.
463 * The DART space is assumed to be a full 16Mb region even if
464 * we only use 2Mb of that space. We will use more of it later
465 * for AGP GART. We have to use a full 16Mb large page.
467 DBG("DART base: %lx\n", dart_tablebase
);
469 if (dart_tablebase
!= 0 && dart_tablebase
>= base
470 && dart_tablebase
< (base
+ size
)) {
471 if (base
!= dart_tablebase
)
472 BUG_ON(htab_bolt_mapping(base
, dart_tablebase
,
475 if ((base
+ size
) > (dart_tablebase
+ 16*MB
))
476 BUG_ON(htab_bolt_mapping(dart_tablebase
+16*MB
,
478 dart_tablebase
+16*MB
,
483 #endif /* CONFIG_U3_DART */
484 BUG_ON(htab_bolt_mapping(base
, base
+ size
, base
,
485 mode_rw
, mmu_linear_psize
));
489 * If we have a memory_limit and we've allocated TCEs then we need to
490 * explicitly map the TCE area at the top of RAM. We also cope with the
491 * case that the TCEs start below memory_limit.
492 * tce_alloc_start/end are 16MB aligned so the mapping should work
493 * for either 4K or 16MB pages.
495 if (tce_alloc_start
) {
496 tce_alloc_start
+= KERNELBASE
;
497 tce_alloc_end
+= KERNELBASE
;
499 if (base
+ size
>= tce_alloc_start
)
500 tce_alloc_start
= base
+ size
+ 1;
502 BUG_ON(htab_bolt_mapping(tce_alloc_start
, tce_alloc_end
,
503 tce_alloc_start
, mode_rw
,
507 DBG(" <- htab_initialize()\n");
513 * Called by asm hashtable.S for doing lazy icache flush
515 unsigned int hash_page_do_lazy_icache(unsigned int pp
, pte_t pte
, int trap
)
519 if (!pfn_valid(pte_pfn(pte
)))
522 page
= pte_page(pte
);
525 if (!test_bit(PG_arch_1
, &page
->flags
) && !PageReserved(page
)) {
527 __flush_dcache_icache(page_address(page
));
528 set_bit(PG_arch_1
, &page
->flags
);
537 * 1 - normal page fault
538 * -1 - critical hash insertion error
540 int hash_page(unsigned long ea
, unsigned long access
, unsigned long trap
)
544 struct mm_struct
*mm
;
547 int rc
, user_region
= 0, local
= 0;
549 DBG_LOW("hash_page(ea=%016lx, access=%lx, trap=%lx\n",
552 if ((ea
& ~REGION_MASK
) >= PGTABLE_RANGE
) {
553 DBG_LOW(" out of pgtable range !\n");
557 /* Get region & vsid */
558 switch (REGION_ID(ea
)) {
563 DBG_LOW(" user region with no mm !\n");
566 vsid
= get_vsid(mm
->context
.id
, ea
);
568 case VMALLOC_REGION_ID
:
570 vsid
= get_kernel_vsid(ea
);
574 * Send the problem up to do_page_fault
578 DBG_LOW(" mm=%p, mm->pgdir=%p, vsid=%016lx\n", mm
, mm
->pgd
, vsid
);
585 /* Check CPU locality */
586 tmp
= cpumask_of_cpu(smp_processor_id());
587 if (user_region
&& cpus_equal(mm
->cpu_vm_mask
, tmp
))
590 /* Handle hugepage regions */
591 if (unlikely(in_hugepage_area(mm
->context
, ea
))) {
592 DBG_LOW(" -> huge page !\n");
593 return hash_huge_page(mm
, access
, ea
, vsid
, local
);
596 /* Get PTE and page size from page tables */
597 ptep
= find_linux_pte(pgdir
, ea
);
598 if (ptep
== NULL
|| !pte_present(*ptep
)) {
599 DBG_LOW(" no PTE !\n");
603 #ifndef CONFIG_PPC_64K_PAGES
604 DBG_LOW(" i-pte: %016lx\n", pte_val(*ptep
));
606 DBG_LOW(" i-pte: %016lx %016lx\n", pte_val(*ptep
),
607 pte_val(*(ptep
+ PTRS_PER_PTE
)));
609 /* Pre-check access permissions (will be re-checked atomically
610 * in __hash_page_XX but this pre-check is a fast path
612 if (access
& ~pte_val(*ptep
)) {
613 DBG_LOW(" no access !\n");
617 /* Do actual hashing */
618 #ifndef CONFIG_PPC_64K_PAGES
619 rc
= __hash_page_4K(ea
, access
, vsid
, ptep
, trap
, local
);
621 if (mmu_virtual_psize
== MMU_PAGE_64K
)
622 rc
= __hash_page_64K(ea
, access
, vsid
, ptep
, trap
, local
);
624 rc
= __hash_page_4K(ea
, access
, vsid
, ptep
, trap
, local
);
625 #endif /* CONFIG_PPC_64K_PAGES */
627 #ifndef CONFIG_PPC_64K_PAGES
628 DBG_LOW(" o-pte: %016lx\n", pte_val(*ptep
));
630 DBG_LOW(" o-pte: %016lx %016lx\n", pte_val(*ptep
),
631 pte_val(*(ptep
+ PTRS_PER_PTE
)));
633 DBG_LOW(" -> rc=%d\n", rc
);
637 void hash_preload(struct mm_struct
*mm
, unsigned long ea
,
638 unsigned long access
, unsigned long trap
)
647 /* We don't want huge pages prefaulted for now
649 if (unlikely(in_hugepage_area(mm
->context
, ea
)))
652 DBG_LOW("hash_preload(mm=%p, mm->pgdir=%p, ea=%016lx, access=%lx,"
653 " trap=%lx\n", mm
, mm
->pgd
, ea
, access
, trap
);
655 /* Get PTE, VSID, access mask */
659 ptep
= find_linux_pte(pgdir
, ea
);
662 vsid
= get_vsid(mm
->context
.id
, ea
);
665 local_irq_save(flags
);
666 mask
= cpumask_of_cpu(smp_processor_id());
667 if (cpus_equal(mm
->cpu_vm_mask
, mask
))
669 #ifndef CONFIG_PPC_64K_PAGES
670 __hash_page_4K(ea
, access
, vsid
, ptep
, trap
, local
);
672 if (mmu_virtual_psize
== MMU_PAGE_64K
)
673 __hash_page_64K(ea
, access
, vsid
, ptep
, trap
, local
);
675 __hash_page_4K(ea
, access
, vsid
, ptep
, trap
, local
);
676 #endif /* CONFIG_PPC_64K_PAGES */
677 local_irq_restore(flags
);
680 void flush_hash_page(unsigned long va
, real_pte_t pte
, int psize
, int local
)
682 unsigned long hash
, index
, shift
, hidx
, slot
;
684 DBG_LOW("flush_hash_page(va=%016x)\n", va
);
685 pte_iterate_hashed_subpages(pte
, psize
, va
, index
, shift
) {
686 hash
= hpt_hash(va
, shift
);
687 hidx
= __rpte_to_hidx(pte
, index
);
688 if (hidx
& _PTEIDX_SECONDARY
)
690 slot
= (hash
& htab_hash_mask
) * HPTES_PER_GROUP
;
691 slot
+= hidx
& _PTEIDX_GROUP_IX
;
692 DBG_LOW(" sub %d: hash=%x, hidx=%x\n", index
, slot
, hidx
);
693 ppc_md
.hpte_invalidate(slot
, va
, psize
, local
);
694 } pte_iterate_hashed_end();
697 void flush_hash_range(unsigned long number
, int local
)
699 if (ppc_md
.flush_hash_range
)
700 ppc_md
.flush_hash_range(number
, local
);
703 struct ppc64_tlb_batch
*batch
=
704 &__get_cpu_var(ppc64_tlb_batch
);
706 for (i
= 0; i
< number
; i
++)
707 flush_hash_page(batch
->vaddr
[i
], batch
->pte
[i
],
708 batch
->psize
, local
);
712 static inline void make_bl(unsigned int *insn_addr
, void *func
)
714 unsigned long funcp
= *((unsigned long *)func
);
715 int offset
= funcp
- (unsigned long)insn_addr
;
717 *insn_addr
= (unsigned int)(0x48000001 | (offset
& 0x03fffffc));
718 flush_icache_range((unsigned long)insn_addr
, 4+
719 (unsigned long)insn_addr
);
723 * low_hash_fault is called when we the low level hash code failed
724 * to instert a PTE due to an hypervisor error
726 void low_hash_fault(struct pt_regs
*regs
, unsigned long address
)
728 if (user_mode(regs
)) {
731 info
.si_signo
= SIGBUS
;
733 info
.si_code
= BUS_ADRERR
;
734 info
.si_addr
= (void __user
*)address
;
735 force_sig_info(SIGBUS
, &info
, current
);
738 bad_page_fault(regs
, address
, SIGBUS
);
741 void __init
htab_finish_init(void)
743 extern unsigned int *htab_call_hpte_insert1
;
744 extern unsigned int *htab_call_hpte_insert2
;
745 extern unsigned int *htab_call_hpte_remove
;
746 extern unsigned int *htab_call_hpte_updatepp
;
748 #ifdef CONFIG_PPC_64K_PAGES
749 extern unsigned int *ht64_call_hpte_insert1
;
750 extern unsigned int *ht64_call_hpte_insert2
;
751 extern unsigned int *ht64_call_hpte_remove
;
752 extern unsigned int *ht64_call_hpte_updatepp
;
754 make_bl(ht64_call_hpte_insert1
, ppc_md
.hpte_insert
);
755 make_bl(ht64_call_hpte_insert2
, ppc_md
.hpte_insert
);
756 make_bl(ht64_call_hpte_remove
, ppc_md
.hpte_remove
);
757 make_bl(ht64_call_hpte_updatepp
, ppc_md
.hpte_updatepp
);
758 #endif /* CONFIG_PPC_64K_PAGES */
760 make_bl(htab_call_hpte_insert1
, ppc_md
.hpte_insert
);
761 make_bl(htab_call_hpte_insert2
, ppc_md
.hpte_insert
);
762 make_bl(htab_call_hpte_remove
, ppc_md
.hpte_remove
);
763 make_bl(htab_call_hpte_updatepp
, ppc_md
.hpte_updatepp
);