]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blob - mm/pgtable-generic.c
block: partition: convert percpu ref
[mirror_ubuntu-zesty-kernel.git] / mm / pgtable-generic.c
1 /*
2 * mm/pgtable-generic.c
3 *
4 * Generic pgtable methods declared in asm-generic/pgtable.h
5 *
6 * Copyright (C) 2010 Linus Torvalds
7 */
8
9 #include <linux/pagemap.h>
10 #include <asm/tlb.h>
11 #include <asm-generic/pgtable.h>
12
13 /*
14 * If a p?d_bad entry is found while walking page tables, report
15 * the error, before resetting entry to p?d_none. Usually (but
16 * very seldom) called out from the p?d_none_or_clear_bad macros.
17 */
18
19 void pgd_clear_bad(pgd_t *pgd)
20 {
21 pgd_ERROR(*pgd);
22 pgd_clear(pgd);
23 }
24
25 void pud_clear_bad(pud_t *pud)
26 {
27 pud_ERROR(*pud);
28 pud_clear(pud);
29 }
30
31 void pmd_clear_bad(pmd_t *pmd)
32 {
33 pmd_ERROR(*pmd);
34 pmd_clear(pmd);
35 }
36
37 #ifndef __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
38 /*
39 * Only sets the access flags (dirty, accessed), as well as write
40 * permission. Furthermore, we know it always gets set to a "more
41 * permissive" setting, which allows most architectures to optimize
42 * this. We return whether the PTE actually changed, which in turn
43 * instructs the caller to do things like update__mmu_cache. This
44 * used to be done in the caller, but sparc needs minor faults to
45 * force that call on sun4c so we changed this macro slightly
46 */
47 int ptep_set_access_flags(struct vm_area_struct *vma,
48 unsigned long address, pte_t *ptep,
49 pte_t entry, int dirty)
50 {
51 int changed = !pte_same(*ptep, entry);
52 if (changed) {
53 set_pte_at(vma->vm_mm, address, ptep, entry);
54 flush_tlb_fix_spurious_fault(vma, address);
55 }
56 return changed;
57 }
58 #endif
59
60 #ifndef __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS
61 int pmdp_set_access_flags(struct vm_area_struct *vma,
62 unsigned long address, pmd_t *pmdp,
63 pmd_t entry, int dirty)
64 {
65 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
66 int changed = !pmd_same(*pmdp, entry);
67 VM_BUG_ON(address & ~HPAGE_PMD_MASK);
68 if (changed) {
69 set_pmd_at(vma->vm_mm, address, pmdp, entry);
70 flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
71 }
72 return changed;
73 #else /* CONFIG_TRANSPARENT_HUGEPAGE */
74 BUG();
75 return 0;
76 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
77 }
78 #endif
79
80 #ifndef __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
81 int ptep_clear_flush_young(struct vm_area_struct *vma,
82 unsigned long address, pte_t *ptep)
83 {
84 int young;
85 young = ptep_test_and_clear_young(vma, address, ptep);
86 if (young)
87 flush_tlb_page(vma, address);
88 return young;
89 }
90 #endif
91
92 #ifndef __HAVE_ARCH_PMDP_CLEAR_YOUNG_FLUSH
93 int pmdp_clear_flush_young(struct vm_area_struct *vma,
94 unsigned long address, pmd_t *pmdp)
95 {
96 int young;
97 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
98 VM_BUG_ON(address & ~HPAGE_PMD_MASK);
99 #else
100 BUG();
101 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
102 young = pmdp_test_and_clear_young(vma, address, pmdp);
103 if (young)
104 flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
105 return young;
106 }
107 #endif
108
109 #ifndef __HAVE_ARCH_PTEP_CLEAR_FLUSH
110 pte_t ptep_clear_flush(struct vm_area_struct *vma, unsigned long address,
111 pte_t *ptep)
112 {
113 struct mm_struct *mm = (vma)->vm_mm;
114 pte_t pte;
115 pte = ptep_get_and_clear(mm, address, ptep);
116 if (pte_accessible(mm, pte))
117 flush_tlb_page(vma, address);
118 return pte;
119 }
120 #endif
121
122 #ifndef __HAVE_ARCH_PMDP_HUGE_CLEAR_FLUSH
123 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
124 pmd_t pmdp_huge_clear_flush(struct vm_area_struct *vma, unsigned long address,
125 pmd_t *pmdp)
126 {
127 pmd_t pmd;
128 VM_BUG_ON(address & ~HPAGE_PMD_MASK);
129 VM_BUG_ON(!pmd_trans_huge(*pmdp));
130 pmd = pmdp_huge_get_and_clear(vma->vm_mm, address, pmdp);
131 flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
132 return pmd;
133 }
134 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
135 #endif
136
137 #ifndef __HAVE_ARCH_PMDP_SPLITTING_FLUSH
138 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
139 void pmdp_splitting_flush(struct vm_area_struct *vma, unsigned long address,
140 pmd_t *pmdp)
141 {
142 pmd_t pmd = pmd_mksplitting(*pmdp);
143 VM_BUG_ON(address & ~HPAGE_PMD_MASK);
144 set_pmd_at(vma->vm_mm, address, pmdp, pmd);
145 /* tlb flush only to serialize against gup-fast */
146 flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
147 }
148 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
149 #endif
150
151 #ifndef __HAVE_ARCH_PGTABLE_DEPOSIT
152 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
153 void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp,
154 pgtable_t pgtable)
155 {
156 assert_spin_locked(pmd_lockptr(mm, pmdp));
157
158 /* FIFO */
159 if (!pmd_huge_pte(mm, pmdp))
160 INIT_LIST_HEAD(&pgtable->lru);
161 else
162 list_add(&pgtable->lru, &pmd_huge_pte(mm, pmdp)->lru);
163 pmd_huge_pte(mm, pmdp) = pgtable;
164 }
165 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
166 #endif
167
168 #ifndef __HAVE_ARCH_PGTABLE_WITHDRAW
169 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
170 /* no "address" argument so destroys page coloring of some arch */
171 pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp)
172 {
173 pgtable_t pgtable;
174
175 assert_spin_locked(pmd_lockptr(mm, pmdp));
176
177 /* FIFO */
178 pgtable = pmd_huge_pte(mm, pmdp);
179 if (list_empty(&pgtable->lru))
180 pmd_huge_pte(mm, pmdp) = NULL;
181 else {
182 pmd_huge_pte(mm, pmdp) = list_entry(pgtable->lru.next,
183 struct page, lru);
184 list_del(&pgtable->lru);
185 }
186 return pgtable;
187 }
188 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
189 #endif
190
191 #ifndef __HAVE_ARCH_PMDP_INVALIDATE
192 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
193 void pmdp_invalidate(struct vm_area_struct *vma, unsigned long address,
194 pmd_t *pmdp)
195 {
196 pmd_t entry = *pmdp;
197 set_pmd_at(vma->vm_mm, address, pmdp, pmd_mknotpresent(entry));
198 flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
199 }
200 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
201 #endif
202
203 #ifndef pmdp_collapse_flush
204 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
205 pmd_t pmdp_collapse_flush(struct vm_area_struct *vma, unsigned long address,
206 pmd_t *pmdp)
207 {
208 /*
209 * pmd and hugepage pte format are same. So we could
210 * use the same function.
211 */
212 pmd_t pmd;
213
214 VM_BUG_ON(address & ~HPAGE_PMD_MASK);
215 VM_BUG_ON(pmd_trans_huge(*pmdp));
216 pmd = pmdp_huge_get_and_clear(vma->vm_mm, address, pmdp);
217 flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
218 return pmd;
219 }
220 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
221 #endif