]>
git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blob - arch/powerpc/mm/pgtable_64.c
2 * This file contains ioremap and related functions for 64-bit machines.
4 * Derived from arch/ppc64/mm/init.c
5 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
7 * Modifications by Paul Mackerras (PowerMac) (paulus@samba.org)
8 * and Cort Dougan (PReP) (cort@cs.nmt.edu)
9 * Copyright (C) 1996 Paul Mackerras
11 * Derived from "arch/i386/mm/init.c"
12 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
14 * Dave Engebretsen <engebret@us.ibm.com>
15 * Rework for PPC64 port.
17 * This program is free software; you can redistribute it and/or
18 * modify it under the terms of the GNU General Public License
19 * as published by the Free Software Foundation; either version
20 * 2 of the License, or (at your option) any later version.
24 #include <linux/signal.h>
25 #include <linux/sched.h>
26 #include <linux/kernel.h>
27 #include <linux/errno.h>
28 #include <linux/string.h>
29 #include <linux/export.h>
30 #include <linux/types.h>
31 #include <linux/mman.h>
33 #include <linux/swap.h>
34 #include <linux/stddef.h>
35 #include <linux/vmalloc.h>
36 #include <linux/memblock.h>
37 #include <linux/slab.h>
38 #include <linux/hugetlb.h>
40 #include <asm/pgalloc.h>
44 #include <asm/mmu_context.h>
45 #include <asm/pgtable.h>
48 #include <asm/machdep.h>
50 #include <asm/processor.h>
51 #include <asm/cputable.h>
52 #include <asm/sections.h>
53 #include <asm/firmware.h>
58 #define CREATE_TRACE_POINTS
59 #include <trace/events/thp.h>
61 /* Some sanity checking */
62 #if TASK_SIZE_USER64 > PGTABLE_RANGE
63 #error TASK_SIZE_USER64 exceeds pagetable range
66 #ifdef CONFIG_PPC_STD_MMU_64
67 #if TASK_SIZE_USER64 > (1UL << (ESID_BITS + SID_SHIFT))
68 #error TASK_SIZE_USER64 exceeds user VSID range
72 #ifdef CONFIG_PPC_BOOK3S_64
74 * partition table and process table for ISA 3.0
76 struct prtb_entry
*process_tb
;
77 struct patb_entry
*partition_tb
;
79 unsigned long ioremap_bot
= IOREMAP_BASE
;
81 #ifdef CONFIG_PPC_MMU_NOHASH
82 static __ref
void *early_alloc_pgtable(unsigned long size
)
86 pt
= __va(memblock_alloc_base(size
, size
, __pa(MAX_DMA_ADDRESS
)));
91 #endif /* CONFIG_PPC_MMU_NOHASH */
94 * map_kernel_page currently only called by __ioremap
95 * map_kernel_page adds an entry to the ioremap page table
96 * and adds an entry to the HPT, possibly bolting it
98 int map_kernel_page(unsigned long ea
, unsigned long pa
, unsigned long flags
)
105 if (slab_is_available()) {
106 pgdp
= pgd_offset_k(ea
);
107 pudp
= pud_alloc(&init_mm
, pgdp
, ea
);
110 pmdp
= pmd_alloc(&init_mm
, pudp
, ea
);
113 ptep
= pte_alloc_kernel(pmdp
, ea
);
116 set_pte_at(&init_mm
, ea
, ptep
, pfn_pte(pa
>> PAGE_SHIFT
,
119 #ifdef CONFIG_PPC_MMU_NOHASH
120 pgdp
= pgd_offset_k(ea
);
121 #ifdef PUD_TABLE_SIZE
122 if (pgd_none(*pgdp
)) {
123 pudp
= early_alloc_pgtable(PUD_TABLE_SIZE
);
124 BUG_ON(pudp
== NULL
);
125 pgd_populate(&init_mm
, pgdp
, pudp
);
127 #endif /* PUD_TABLE_SIZE */
128 pudp
= pud_offset(pgdp
, ea
);
129 if (pud_none(*pudp
)) {
130 pmdp
= early_alloc_pgtable(PMD_TABLE_SIZE
);
131 BUG_ON(pmdp
== NULL
);
132 pud_populate(&init_mm
, pudp
, pmdp
);
134 pmdp
= pmd_offset(pudp
, ea
);
135 if (!pmd_present(*pmdp
)) {
136 ptep
= early_alloc_pgtable(PAGE_SIZE
);
137 BUG_ON(ptep
== NULL
);
138 pmd_populate_kernel(&init_mm
, pmdp
, ptep
);
140 ptep
= pte_offset_kernel(pmdp
, ea
);
141 set_pte_at(&init_mm
, ea
, ptep
, pfn_pte(pa
>> PAGE_SHIFT
,
143 #else /* CONFIG_PPC_MMU_NOHASH */
145 * If the mm subsystem is not fully up, we cannot create a
146 * linux page table entry for this mapping. Simply bolt an
147 * entry in the hardware page table.
150 if (htab_bolt_mapping(ea
, ea
+ PAGE_SIZE
, pa
, flags
,
151 mmu_io_psize
, mmu_kernel_ssize
)) {
152 printk(KERN_ERR
"Failed to do bolted mapping IO "
153 "memory at %016lx !\n", pa
);
156 #endif /* !CONFIG_PPC_MMU_NOHASH */
165 * __ioremap_at - Low level function to establish the page tables
168 void __iomem
* __ioremap_at(phys_addr_t pa
, void *ea
, unsigned long size
,
173 /* Make sure we have the base flags */
174 if ((flags
& _PAGE_PRESENT
) == 0)
175 flags
|= pgprot_val(PAGE_KERNEL
);
177 /* We don't support the 4K PFN hack with ioremap */
178 if (flags
& _PAGE_4K_PFN
)
181 WARN_ON(pa
& ~PAGE_MASK
);
182 WARN_ON(((unsigned long)ea
) & ~PAGE_MASK
);
183 WARN_ON(size
& ~PAGE_MASK
);
185 for (i
= 0; i
< size
; i
+= PAGE_SIZE
)
186 if (map_kernel_page((unsigned long)ea
+i
, pa
+i
, flags
))
189 return (void __iomem
*)ea
;
193 * __iounmap_from - Low level function to tear down the page tables
194 * for an IO mapping. This is used for mappings that
195 * are manipulated manually, like partial unmapping of
196 * PCI IOs or ISA space.
198 void __iounmap_at(void *ea
, unsigned long size
)
200 WARN_ON(((unsigned long)ea
) & ~PAGE_MASK
);
201 WARN_ON(size
& ~PAGE_MASK
);
203 unmap_kernel_range((unsigned long)ea
, size
);
206 void __iomem
* __ioremap_caller(phys_addr_t addr
, unsigned long size
,
207 unsigned long flags
, void *caller
)
209 phys_addr_t paligned
;
213 * Choose an address to map it to.
214 * Once the imalloc system is running, we use it.
215 * Before that, we map using addresses going
216 * up from ioremap_bot. imalloc will use
217 * the addresses from ioremap_bot through
221 paligned
= addr
& PAGE_MASK
;
222 size
= PAGE_ALIGN(addr
+ size
) - paligned
;
224 if ((size
== 0) || (paligned
== 0))
227 if (slab_is_available()) {
228 struct vm_struct
*area
;
230 area
= __get_vm_area_caller(size
, VM_IOREMAP
,
231 ioremap_bot
, IOREMAP_END
,
236 area
->phys_addr
= paligned
;
237 ret
= __ioremap_at(paligned
, area
->addr
, size
, flags
);
241 ret
= __ioremap_at(paligned
, (void *)ioremap_bot
, size
, flags
);
247 ret
+= addr
& ~PAGE_MASK
;
251 void __iomem
* __ioremap(phys_addr_t addr
, unsigned long size
,
254 return __ioremap_caller(addr
, size
, flags
, __builtin_return_address(0));
257 void __iomem
* ioremap(phys_addr_t addr
, unsigned long size
)
259 unsigned long flags
= pgprot_val(pgprot_noncached(__pgprot(0)));
260 void *caller
= __builtin_return_address(0);
263 return ppc_md
.ioremap(addr
, size
, flags
, caller
);
264 return __ioremap_caller(addr
, size
, flags
, caller
);
267 void __iomem
* ioremap_wc(phys_addr_t addr
, unsigned long size
)
269 unsigned long flags
= pgprot_val(pgprot_noncached_wc(__pgprot(0)));
270 void *caller
= __builtin_return_address(0);
273 return ppc_md
.ioremap(addr
, size
, flags
, caller
);
274 return __ioremap_caller(addr
, size
, flags
, caller
);
277 void __iomem
* ioremap_prot(phys_addr_t addr
, unsigned long size
,
280 void *caller
= __builtin_return_address(0);
282 /* writeable implies dirty for kernel addresses */
283 if (flags
& _PAGE_WRITE
)
284 flags
|= _PAGE_DIRTY
;
286 /* we don't want to let _PAGE_EXEC leak out */
287 flags
&= ~_PAGE_EXEC
;
289 * Force kernel mapping.
291 #if defined(CONFIG_PPC_BOOK3S_64)
292 flags
|= _PAGE_PRIVILEGED
;
294 flags
&= ~_PAGE_USER
;
299 /* _PAGE_USER contains _PAGE_BAP_SR on BookE using the new PTE format
300 * which means that we just cleared supervisor access... oops ;-) This
303 flags
|= _PAGE_BAP_SR
;
307 return ppc_md
.ioremap(addr
, size
, flags
, caller
);
308 return __ioremap_caller(addr
, size
, flags
, caller
);
313 * Unmap an IO region and remove it from imalloc'd list.
314 * Access to IO memory should be serialized by driver.
316 void __iounmap(volatile void __iomem
*token
)
320 if (!slab_is_available())
323 addr
= (void *) ((unsigned long __force
)
324 PCI_FIX_ADDR(token
) & PAGE_MASK
);
325 if ((unsigned long)addr
< ioremap_bot
) {
326 printk(KERN_WARNING
"Attempt to iounmap early bolted mapping"
333 void iounmap(volatile void __iomem
*token
)
336 ppc_md
.iounmap(token
);
341 EXPORT_SYMBOL(ioremap
);
342 EXPORT_SYMBOL(ioremap_wc
);
343 EXPORT_SYMBOL(ioremap_prot
);
344 EXPORT_SYMBOL(__ioremap
);
345 EXPORT_SYMBOL(__ioremap_at
);
346 EXPORT_SYMBOL(iounmap
);
347 EXPORT_SYMBOL(__iounmap
);
348 EXPORT_SYMBOL(__iounmap_at
);
350 #ifndef __PAGETABLE_PUD_FOLDED
351 /* 4 level page table */
352 struct page
*pgd_page(pgd_t pgd
)
355 return pte_page(pgd_pte(pgd
));
356 return virt_to_page(pgd_page_vaddr(pgd
));
360 struct page
*pud_page(pud_t pud
)
363 return pte_page(pud_pte(pud
));
364 return virt_to_page(pud_page_vaddr(pud
));
368 * For hugepage we have pfn in the pmd, we use PTE_RPN_SHIFT bits for flags
369 * For PTE page, we have a PTE_FRAG_SIZE (4K) aligned virtual address.
371 struct page
*pmd_page(pmd_t pmd
)
373 if (pmd_trans_huge(pmd
) || pmd_huge(pmd
))
374 return pte_page(pmd_pte(pmd
));
375 return virt_to_page(pmd_page_vaddr(pmd
));
378 #ifdef CONFIG_PPC_64K_PAGES
379 static pte_t
*get_from_cache(struct mm_struct
*mm
)
381 void *pte_frag
, *ret
;
383 spin_lock(&mm
->page_table_lock
);
384 ret
= mm
->context
.pte_frag
;
386 pte_frag
= ret
+ PTE_FRAG_SIZE
;
388 * If we have taken up all the fragments mark PTE page NULL
390 if (((unsigned long)pte_frag
& ~PAGE_MASK
) == 0)
392 mm
->context
.pte_frag
= pte_frag
;
394 spin_unlock(&mm
->page_table_lock
);
398 static pte_t
*__alloc_for_cache(struct mm_struct
*mm
, int kernel
)
401 struct page
*page
= alloc_page(GFP_KERNEL
| __GFP_NOTRACK
|
402 __GFP_REPEAT
| __GFP_ZERO
);
405 if (!kernel
&& !pgtable_page_ctor(page
)) {
410 ret
= page_address(page
);
411 spin_lock(&mm
->page_table_lock
);
413 * If we find pgtable_page set, we return
414 * the allocated page with single fragement
417 if (likely(!mm
->context
.pte_frag
)) {
418 set_page_count(page
, PTE_FRAG_NR
);
419 mm
->context
.pte_frag
= ret
+ PTE_FRAG_SIZE
;
421 spin_unlock(&mm
->page_table_lock
);
426 pte_t
*page_table_alloc(struct mm_struct
*mm
, unsigned long vmaddr
, int kernel
)
430 pte
= get_from_cache(mm
);
434 return __alloc_for_cache(mm
, kernel
);
437 void page_table_free(struct mm_struct
*mm
, unsigned long *table
, int kernel
)
439 struct page
*page
= virt_to_page(table
);
440 if (put_page_testzero(page
)) {
442 pgtable_page_dtor(page
);
443 free_hot_cold_page(page
, 0);
448 static void page_table_free_rcu(void *table
)
450 struct page
*page
= virt_to_page(table
);
451 if (put_page_testzero(page
)) {
452 pgtable_page_dtor(page
);
453 free_hot_cold_page(page
, 0);
457 void pgtable_free_tlb(struct mmu_gather
*tlb
, void *table
, int shift
)
459 unsigned long pgf
= (unsigned long)table
;
461 BUG_ON(shift
> MAX_PGTABLE_INDEX_SIZE
);
463 tlb_remove_table(tlb
, (void *)pgf
);
466 void __tlb_remove_table(void *_table
)
468 void *table
= (void *)((unsigned long)_table
& ~MAX_PGTABLE_INDEX_SIZE
);
469 unsigned shift
= (unsigned long)_table
& MAX_PGTABLE_INDEX_SIZE
;
472 /* PTE page needs special handling */
473 page_table_free_rcu(table
);
475 BUG_ON(shift
> MAX_PGTABLE_INDEX_SIZE
);
476 kmem_cache_free(PGT_CACHE(shift
), table
);
480 void pgtable_free_tlb(struct mmu_gather
*tlb
, void *table
, int shift
)
483 /* PTE page needs special handling */
484 struct page
*page
= virt_to_page(table
);
485 if (put_page_testzero(page
)) {
486 pgtable_page_dtor(page
);
487 free_hot_cold_page(page
, 0);
490 BUG_ON(shift
> MAX_PGTABLE_INDEX_SIZE
);
491 kmem_cache_free(PGT_CACHE(shift
), table
);
495 #endif /* CONFIG_PPC_64K_PAGES */
497 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
500 * This is called when relaxing access to a hugepage. It's also called in the page
501 * fault path when we don't hit any of the major fault cases, ie, a minor
502 * update of _PAGE_ACCESSED, _PAGE_DIRTY, etc... The generic code will have
503 * handled those two for us, we additionally deal with missing execute
504 * permission here on some processors
506 int pmdp_set_access_flags(struct vm_area_struct
*vma
, unsigned long address
,
507 pmd_t
*pmdp
, pmd_t entry
, int dirty
)
510 #ifdef CONFIG_DEBUG_VM
511 WARN_ON(!pmd_trans_huge(*pmdp
));
512 assert_spin_locked(&vma
->vm_mm
->page_table_lock
);
514 changed
= !pmd_same(*(pmdp
), entry
);
516 __ptep_set_access_flags(pmdp_ptep(pmdp
), pmd_pte(entry
));
518 * Since we are not supporting SW TLB systems, we don't
519 * have any thing similar to flush_tlb_page_nohash()
525 unsigned long pmd_hugepage_update(struct mm_struct
*mm
, unsigned long addr
,
526 pmd_t
*pmdp
, unsigned long clr
,
533 #ifdef CONFIG_DEBUG_VM
534 WARN_ON(!pmd_trans_huge(*pmdp
));
535 assert_spin_locked(&mm
->page_table_lock
);
538 __asm__
__volatile__(
546 : "=&r" (old_be
), "=&r" (tmp
), "=m" (*pmdp
)
547 : "r" (pmdp
), "r" (cpu_to_be64(clr
)), "m" (*pmdp
),
548 "r" (cpu_to_be64(_PAGE_BUSY
)), "r" (cpu_to_be64(set
))
551 old
= be64_to_cpu(old_be
);
553 trace_hugepage_update(addr
, old
, clr
, set
);
554 if (old
& _PAGE_HASHPTE
)
555 hpte_do_hugepage_flush(mm
, addr
, pmdp
, old
);
559 pmd_t
pmdp_collapse_flush(struct vm_area_struct
*vma
, unsigned long address
,
564 VM_BUG_ON(address
& ~HPAGE_PMD_MASK
);
565 VM_BUG_ON(pmd_trans_huge(*pmdp
));
570 * Wait for all pending hash_page to finish. This is needed
571 * in case of subpage collapse. When we collapse normal pages
572 * to hugepage, we first clear the pmd, then invalidate all
573 * the PTE entries. The assumption here is that any low level
574 * page fault will see a none pmd and take the slow path that
575 * will wait on mmap_sem. But we could very well be in a
576 * hash_page with local ptep pointer value. Such a hash page
577 * can result in adding new HPTE entries for normal subpages.
578 * That means we could be modifying the page content as we
579 * copy them to a huge page. So wait for parallel hash_page
580 * to finish before invalidating HPTE entries. We can do this
581 * by sending an IPI to all the cpus and executing a dummy
584 kick_all_cpus_sync();
586 * Now invalidate the hpte entries in the range
587 * covered by pmd. This make sure we take a
588 * fault and will find the pmd as none, which will
589 * result in a major fault which takes mmap_sem and
590 * hence wait for collapse to complete. Without this
591 * the __collapse_huge_page_copy can result in copying
594 flush_tlb_pmd_range(vma
->vm_mm
, &pmd
, address
);
599 * We currently remove entries from the hashtable regardless of whether
600 * the entry was young or dirty.
602 * We should be more intelligent about this but for the moment we override
603 * these functions and force a tlb flush unconditionally
605 int pmdp_test_and_clear_young(struct vm_area_struct
*vma
,
606 unsigned long address
, pmd_t
*pmdp
)
608 return __pmdp_test_and_clear_young(vma
->vm_mm
, address
, pmdp
);
612 * We want to put the pgtable in pmd and use pgtable for tracking
613 * the base page size hptes
615 void pgtable_trans_huge_deposit(struct mm_struct
*mm
, pmd_t
*pmdp
,
618 pgtable_t
*pgtable_slot
;
619 assert_spin_locked(&mm
->page_table_lock
);
621 * we store the pgtable in the second half of PMD
623 pgtable_slot
= (pgtable_t
*)pmdp
+ PTRS_PER_PMD
;
624 *pgtable_slot
= pgtable
;
626 * expose the deposited pgtable to other cpus.
627 * before we set the hugepage PTE at pmd level
628 * hash fault code looks at the deposted pgtable
629 * to store hash index values.
634 pgtable_t
pgtable_trans_huge_withdraw(struct mm_struct
*mm
, pmd_t
*pmdp
)
637 pgtable_t
*pgtable_slot
;
639 assert_spin_locked(&mm
->page_table_lock
);
640 pgtable_slot
= (pgtable_t
*)pmdp
+ PTRS_PER_PMD
;
641 pgtable
= *pgtable_slot
;
643 * Once we withdraw, mark the entry NULL.
645 *pgtable_slot
= NULL
;
647 * We store HPTE information in the deposited PTE fragment.
648 * zero out the content on withdraw.
650 memset(pgtable
, 0, PTE_FRAG_SIZE
);
654 void pmdp_huge_split_prepare(struct vm_area_struct
*vma
,
655 unsigned long address
, pmd_t
*pmdp
)
657 VM_BUG_ON(address
& ~HPAGE_PMD_MASK
);
658 VM_BUG_ON(REGION_ID(address
) != USER_REGION_ID
);
661 * We can't mark the pmd none here, because that will cause a race
662 * against exit_mmap. We need to continue mark pmd TRANS HUGE, while
663 * we spilt, but at the same time we wan't rest of the ppc64 code
664 * not to insert hash pte on this, because we will be modifying
665 * the deposited pgtable in the caller of this function. Hence
666 * clear the _PAGE_USER so that we move the fault handling to
667 * higher level function and that will serialize against ptl.
668 * We need to flush existing hash pte entries here even though,
669 * the translation is still valid, because we will withdraw
670 * pgtable_t after this.
672 pmd_hugepage_update(vma
->vm_mm
, address
, pmdp
, 0, _PAGE_PRIVILEGED
);
677 * set a new huge pmd. We should not be called for updating
678 * an existing pmd entry. That should go via pmd_hugepage_update.
680 void set_pmd_at(struct mm_struct
*mm
, unsigned long addr
,
681 pmd_t
*pmdp
, pmd_t pmd
)
683 #ifdef CONFIG_DEBUG_VM
684 WARN_ON(pte_present(pmd_pte(*pmdp
)) && !pte_protnone(pmd_pte(*pmdp
)));
685 assert_spin_locked(&mm
->page_table_lock
);
686 WARN_ON(!pmd_trans_huge(pmd
));
688 trace_hugepage_set_pmd(addr
, pmd_val(pmd
));
689 return set_pte_at(mm
, addr
, pmdp_ptep(pmdp
), pmd_pte(pmd
));
693 * We use this to invalidate a pmdp entry before switching from a
694 * hugepte to regular pmd entry.
696 void pmdp_invalidate(struct vm_area_struct
*vma
, unsigned long address
,
699 pmd_hugepage_update(vma
->vm_mm
, address
, pmdp
, _PAGE_PRESENT
, 0);
702 * This ensures that generic code that rely on IRQ disabling
703 * to prevent a parallel THP split work as expected.
705 kick_all_cpus_sync();
709 * A linux hugepage PMD was changed and the corresponding hash table entries
710 * neesd to be flushed.
712 void hpte_do_hugepage_flush(struct mm_struct
*mm
, unsigned long addr
,
713 pmd_t
*pmdp
, unsigned long old_pmd
)
718 unsigned long flags
= 0;
719 const struct cpumask
*tmp
;
721 /* get the base page size,vsid and segment size */
722 #ifdef CONFIG_DEBUG_VM
723 psize
= get_slice_psize(mm
, addr
);
724 BUG_ON(psize
== MMU_PAGE_16M
);
726 if (old_pmd
& _PAGE_COMBO
)
729 psize
= MMU_PAGE_64K
;
731 if (!is_kernel_addr(addr
)) {
732 ssize
= user_segment_size(addr
);
733 vsid
= get_vsid(mm
->context
.id
, addr
, ssize
);
736 vsid
= get_kernel_vsid(addr
, mmu_kernel_ssize
);
737 ssize
= mmu_kernel_ssize
;
740 tmp
= cpumask_of(smp_processor_id());
741 if (cpumask_equal(mm_cpumask(mm
), tmp
))
742 flags
|= HPTE_LOCAL_UPDATE
;
744 return flush_hash_hugepage(vsid
, addr
, pmdp
, psize
, ssize
, flags
);
747 static pmd_t
pmd_set_protbits(pmd_t pmd
, pgprot_t pgprot
)
749 return __pmd(pmd_val(pmd
) | pgprot_val(pgprot
));
752 pmd_t
pfn_pmd(unsigned long pfn
, pgprot_t pgprot
)
756 pmdv
= (pfn
<< PAGE_SHIFT
) & PTE_RPN_MASK
;
757 return pmd_set_protbits(__pmd(pmdv
), pgprot
);
760 pmd_t
mk_pmd(struct page
*page
, pgprot_t pgprot
)
762 return pfn_pmd(page_to_pfn(page
), pgprot
);
765 pmd_t
pmd_modify(pmd_t pmd
, pgprot_t newprot
)
770 pmdv
&= _HPAGE_CHG_MASK
;
771 return pmd_set_protbits(__pmd(pmdv
), newprot
);
775 * This is called at the end of handling a user page fault, when the
776 * fault has been handled by updating a HUGE PMD entry in the linux page tables.
777 * We use it to preload an HPTE into the hash table corresponding to
778 * the updated linux HUGE PMD entry.
780 void update_mmu_cache_pmd(struct vm_area_struct
*vma
, unsigned long addr
,
786 pmd_t
pmdp_huge_get_and_clear(struct mm_struct
*mm
,
787 unsigned long addr
, pmd_t
*pmdp
)
792 pgtable_t
*pgtable_slot
;
794 old
= pmd_hugepage_update(mm
, addr
, pmdp
, ~0UL, 0);
795 old_pmd
= __pmd(old
);
797 * We have pmd == none and we are holding page_table_lock.
798 * So we can safely go and clear the pgtable hash
801 pgtable_slot
= (pgtable_t
*)pmdp
+ PTRS_PER_PMD
;
802 pgtable
= *pgtable_slot
;
804 * Let's zero out old valid and hash index details
805 * hash fault look at them.
807 memset(pgtable
, 0, PTE_FRAG_SIZE
);
809 * Serialize against find_linux_pte_or_hugepte which does lock-less
810 * lookup in page tables with local interrupts disabled. For huge pages
811 * it casts pmd_t to pte_t. Since format of pte_t is different from
812 * pmd_t we want to prevent transit from pmd pointing to page table
813 * to pmd pointing to huge page (and back) while interrupts are disabled.
814 * We clear pmd to possibly replace it with page table pointer in
815 * different code paths. So make sure we wait for the parallel
816 * find_linux_pte_or_hugepage to finish.
818 kick_all_cpus_sync();
822 int has_transparent_hugepage(void)
825 BUILD_BUG_ON_MSG((PMD_SHIFT
- PAGE_SHIFT
) >= MAX_ORDER
,
826 "hugepages can't be allocated by the buddy allocator");
828 BUILD_BUG_ON_MSG((PMD_SHIFT
- PAGE_SHIFT
) < 2,
829 "We need more than 2 pages to do deferred thp split");
831 if (!mmu_has_feature(MMU_FTR_16M_PAGE
))
834 * We support THP only if PMD_SIZE is 16MB.
836 if (mmu_psize_defs
[MMU_PAGE_16M
].shift
!= PMD_SHIFT
)
839 * We need to make sure that we support 16MB hugepage in a segement
840 * with base page size 64K or 4K. We only enable THP with a PAGE_SIZE
844 * If we have 64K HPTE, we will be using that by default
846 if (mmu_psize_defs
[MMU_PAGE_64K
].shift
&&
847 (mmu_psize_defs
[MMU_PAGE_64K
].penc
[MMU_PAGE_16M
] == -1))
850 * Ok we only have 4K HPTE
852 if (mmu_psize_defs
[MMU_PAGE_4K
].penc
[MMU_PAGE_16M
] == -1)
857 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */