]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - mm/pgtable-generic.c
mm, x86: add support for PUD-sized transparent hugepages
[mirror_ubuntu-artful-kernel.git] / mm / pgtable-generic.c
1 /*
2 * mm/pgtable-generic.c
3 *
4 * Generic pgtable methods declared in asm-generic/pgtable.h
5 *
6 * Copyright (C) 2010 Linus Torvalds
7 */
8
9 #include <linux/pagemap.h>
10 #include <asm/tlb.h>
11 #include <asm-generic/pgtable.h>
12
13 /*
14 * If a p?d_bad entry is found while walking page tables, report
15 * the error, before resetting entry to p?d_none. Usually (but
16 * very seldom) called out from the p?d_none_or_clear_bad macros.
17 */
18
19 void pgd_clear_bad(pgd_t *pgd)
20 {
21 pgd_ERROR(*pgd);
22 pgd_clear(pgd);
23 }
24
25 void pud_clear_bad(pud_t *pud)
26 {
27 pud_ERROR(*pud);
28 pud_clear(pud);
29 }
30
31 void pmd_clear_bad(pmd_t *pmd)
32 {
33 pmd_ERROR(*pmd);
34 pmd_clear(pmd);
35 }
36
37 #ifndef __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
38 /*
39 * Only sets the access flags (dirty, accessed), as well as write
40 * permission. Furthermore, we know it always gets set to a "more
41 * permissive" setting, which allows most architectures to optimize
42 * this. We return whether the PTE actually changed, which in turn
43 * instructs the caller to do things like update__mmu_cache. This
44 * used to be done in the caller, but sparc needs minor faults to
45 * force that call on sun4c so we changed this macro slightly
46 */
47 int ptep_set_access_flags(struct vm_area_struct *vma,
48 unsigned long address, pte_t *ptep,
49 pte_t entry, int dirty)
50 {
51 int changed = !pte_same(*ptep, entry);
52 if (changed) {
53 set_pte_at(vma->vm_mm, address, ptep, entry);
54 flush_tlb_fix_spurious_fault(vma, address);
55 }
56 return changed;
57 }
58 #endif
59
60 #ifndef __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
61 int ptep_clear_flush_young(struct vm_area_struct *vma,
62 unsigned long address, pte_t *ptep)
63 {
64 int young;
65 young = ptep_test_and_clear_young(vma, address, ptep);
66 if (young)
67 flush_tlb_page(vma, address);
68 return young;
69 }
70 #endif
71
72 #ifndef __HAVE_ARCH_PTEP_CLEAR_FLUSH
73 pte_t ptep_clear_flush(struct vm_area_struct *vma, unsigned long address,
74 pte_t *ptep)
75 {
76 struct mm_struct *mm = (vma)->vm_mm;
77 pte_t pte;
78 pte = ptep_get_and_clear(mm, address, ptep);
79 if (pte_accessible(mm, pte))
80 flush_tlb_page(vma, address);
81 return pte;
82 }
83 #endif
84
85 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
86
87 #ifndef __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS
88 int pmdp_set_access_flags(struct vm_area_struct *vma,
89 unsigned long address, pmd_t *pmdp,
90 pmd_t entry, int dirty)
91 {
92 int changed = !pmd_same(*pmdp, entry);
93 VM_BUG_ON(address & ~HPAGE_PMD_MASK);
94 if (changed) {
95 set_pmd_at(vma->vm_mm, address, pmdp, entry);
96 flush_pmd_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
97 }
98 return changed;
99 }
100 #endif
101
102 #ifndef __HAVE_ARCH_PMDP_CLEAR_YOUNG_FLUSH
103 int pmdp_clear_flush_young(struct vm_area_struct *vma,
104 unsigned long address, pmd_t *pmdp)
105 {
106 int young;
107 VM_BUG_ON(address & ~HPAGE_PMD_MASK);
108 young = pmdp_test_and_clear_young(vma, address, pmdp);
109 if (young)
110 flush_pmd_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
111 return young;
112 }
113 #endif
114
115 #ifndef __HAVE_ARCH_PMDP_HUGE_CLEAR_FLUSH
116 pmd_t pmdp_huge_clear_flush(struct vm_area_struct *vma, unsigned long address,
117 pmd_t *pmdp)
118 {
119 pmd_t pmd;
120 VM_BUG_ON(address & ~HPAGE_PMD_MASK);
121 VM_BUG_ON(!pmd_trans_huge(*pmdp) && !pmd_devmap(*pmdp));
122 pmd = pmdp_huge_get_and_clear(vma->vm_mm, address, pmdp);
123 flush_pmd_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
124 return pmd;
125 }
126
127 #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
128 pud_t pudp_huge_clear_flush(struct vm_area_struct *vma, unsigned long address,
129 pud_t *pudp)
130 {
131 pud_t pud;
132
133 VM_BUG_ON(address & ~HPAGE_PUD_MASK);
134 VM_BUG_ON(!pud_trans_huge(*pudp) && !pud_devmap(*pudp));
135 pud = pudp_huge_get_and_clear(vma->vm_mm, address, pudp);
136 flush_pud_tlb_range(vma, address, address + HPAGE_PUD_SIZE);
137 return pud;
138 }
139 #endif
140 #endif
141
142 #ifndef __HAVE_ARCH_PGTABLE_DEPOSIT
143 void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp,
144 pgtable_t pgtable)
145 {
146 assert_spin_locked(pmd_lockptr(mm, pmdp));
147
148 /* FIFO */
149 if (!pmd_huge_pte(mm, pmdp))
150 INIT_LIST_HEAD(&pgtable->lru);
151 else
152 list_add(&pgtable->lru, &pmd_huge_pte(mm, pmdp)->lru);
153 pmd_huge_pte(mm, pmdp) = pgtable;
154 }
155 #endif
156
157 #ifndef __HAVE_ARCH_PGTABLE_WITHDRAW
158 /* no "address" argument so destroys page coloring of some arch */
159 pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp)
160 {
161 pgtable_t pgtable;
162
163 assert_spin_locked(pmd_lockptr(mm, pmdp));
164
165 /* FIFO */
166 pgtable = pmd_huge_pte(mm, pmdp);
167 pmd_huge_pte(mm, pmdp) = list_first_entry_or_null(&pgtable->lru,
168 struct page, lru);
169 if (pmd_huge_pte(mm, pmdp))
170 list_del(&pgtable->lru);
171 return pgtable;
172 }
173 #endif
174
175 #ifndef __HAVE_ARCH_PMDP_INVALIDATE
176 void pmdp_invalidate(struct vm_area_struct *vma, unsigned long address,
177 pmd_t *pmdp)
178 {
179 pmd_t entry = *pmdp;
180 set_pmd_at(vma->vm_mm, address, pmdp, pmd_mknotpresent(entry));
181 flush_pmd_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
182 }
183 #endif
184
185 #ifndef pmdp_collapse_flush
186 pmd_t pmdp_collapse_flush(struct vm_area_struct *vma, unsigned long address,
187 pmd_t *pmdp)
188 {
189 /*
190 * pmd and hugepage pte format are same. So we could
191 * use the same function.
192 */
193 pmd_t pmd;
194
195 VM_BUG_ON(address & ~HPAGE_PMD_MASK);
196 VM_BUG_ON(pmd_trans_huge(*pmdp));
197 pmd = pmdp_huge_get_and_clear(vma->vm_mm, address, pmdp);
198
199 /* collapse entails shooting down ptes not pmd */
200 flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
201 return pmd;
202 }
203 #endif
204 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */