]>
Commit | Line | Data |
---|---|---|
1 | /* | |
2 | * mm/pgtable-generic.c | |
3 | * | |
4 | * Generic pgtable methods declared in asm-generic/pgtable.h | |
5 | * | |
6 | * Copyright (C) 2010 Linus Torvalds | |
7 | */ | |
8 | ||
9 | #include <linux/pagemap.h> | |
10 | #include <asm/tlb.h> | |
11 | #include <asm-generic/pgtable.h> | |
12 | ||
13 | /* | |
14 | * If a p?d_bad entry is found while walking page tables, report | |
15 | * the error, before resetting entry to p?d_none. Usually (but | |
16 | * very seldom) called out from the p?d_none_or_clear_bad macros. | |
17 | */ | |
18 | ||
19 | void pgd_clear_bad(pgd_t *pgd) | |
20 | { | |
21 | pgd_ERROR(*pgd); | |
22 | pgd_clear(pgd); | |
23 | } | |
24 | ||
25 | void p4d_clear_bad(p4d_t *p4d) | |
26 | { | |
27 | p4d_ERROR(*p4d); | |
28 | p4d_clear(p4d); | |
29 | } | |
30 | ||
31 | void pud_clear_bad(pud_t *pud) | |
32 | { | |
33 | pud_ERROR(*pud); | |
34 | pud_clear(pud); | |
35 | } | |
36 | ||
37 | void pmd_clear_bad(pmd_t *pmd) | |
38 | { | |
39 | pmd_ERROR(*pmd); | |
40 | pmd_clear(pmd); | |
41 | } | |
42 | ||
43 | #ifndef __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS | |
44 | /* | |
45 | * Only sets the access flags (dirty, accessed), as well as write | |
46 | * permission. Furthermore, we know it always gets set to a "more | |
47 | * permissive" setting, which allows most architectures to optimize | |
48 | * this. We return whether the PTE actually changed, which in turn | |
49 | * instructs the caller to do things like update__mmu_cache. This | |
50 | * used to be done in the caller, but sparc needs minor faults to | |
51 | * force that call on sun4c so we changed this macro slightly | |
52 | */ | |
53 | int ptep_set_access_flags(struct vm_area_struct *vma, | |
54 | unsigned long address, pte_t *ptep, | |
55 | pte_t entry, int dirty) | |
56 | { | |
57 | int changed = !pte_same(*ptep, entry); | |
58 | if (changed) { | |
59 | set_pte_at(vma->vm_mm, address, ptep, entry); | |
60 | flush_tlb_fix_spurious_fault(vma, address); | |
61 | } | |
62 | return changed; | |
63 | } | |
64 | #endif | |
65 | ||
66 | #ifndef __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH | |
67 | int ptep_clear_flush_young(struct vm_area_struct *vma, | |
68 | unsigned long address, pte_t *ptep) | |
69 | { | |
70 | int young; | |
71 | young = ptep_test_and_clear_young(vma, address, ptep); | |
72 | if (young) | |
73 | flush_tlb_page(vma, address); | |
74 | return young; | |
75 | } | |
76 | #endif | |
77 | ||
78 | #ifndef __HAVE_ARCH_PTEP_CLEAR_FLUSH | |
79 | pte_t ptep_clear_flush(struct vm_area_struct *vma, unsigned long address, | |
80 | pte_t *ptep) | |
81 | { | |
82 | struct mm_struct *mm = (vma)->vm_mm; | |
83 | pte_t pte; | |
84 | pte = ptep_get_and_clear(mm, address, ptep); | |
85 | if (pte_accessible(mm, pte)) | |
86 | flush_tlb_page(vma, address); | |
87 | return pte; | |
88 | } | |
89 | #endif | |
90 | ||
91 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE | |
92 | ||
93 | #ifndef __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS | |
94 | int pmdp_set_access_flags(struct vm_area_struct *vma, | |
95 | unsigned long address, pmd_t *pmdp, | |
96 | pmd_t entry, int dirty) | |
97 | { | |
98 | int changed = !pmd_same(*pmdp, entry); | |
99 | VM_BUG_ON(address & ~HPAGE_PMD_MASK); | |
100 | if (changed) { | |
101 | set_pmd_at(vma->vm_mm, address, pmdp, entry); | |
102 | flush_pmd_tlb_range(vma, address, address + HPAGE_PMD_SIZE); | |
103 | } | |
104 | return changed; | |
105 | } | |
106 | #endif | |
107 | ||
108 | #ifndef __HAVE_ARCH_PMDP_CLEAR_YOUNG_FLUSH | |
109 | int pmdp_clear_flush_young(struct vm_area_struct *vma, | |
110 | unsigned long address, pmd_t *pmdp) | |
111 | { | |
112 | int young; | |
113 | VM_BUG_ON(address & ~HPAGE_PMD_MASK); | |
114 | young = pmdp_test_and_clear_young(vma, address, pmdp); | |
115 | if (young) | |
116 | flush_pmd_tlb_range(vma, address, address + HPAGE_PMD_SIZE); | |
117 | return young; | |
118 | } | |
119 | #endif | |
120 | ||
121 | #ifndef __HAVE_ARCH_PMDP_HUGE_CLEAR_FLUSH | |
122 | pmd_t pmdp_huge_clear_flush(struct vm_area_struct *vma, unsigned long address, | |
123 | pmd_t *pmdp) | |
124 | { | |
125 | pmd_t pmd; | |
126 | VM_BUG_ON(address & ~HPAGE_PMD_MASK); | |
127 | VM_BUG_ON(!pmd_trans_huge(*pmdp) && !pmd_devmap(*pmdp)); | |
128 | pmd = pmdp_huge_get_and_clear(vma->vm_mm, address, pmdp); | |
129 | flush_pmd_tlb_range(vma, address, address + HPAGE_PMD_SIZE); | |
130 | return pmd; | |
131 | } | |
132 | ||
133 | #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD | |
134 | pud_t pudp_huge_clear_flush(struct vm_area_struct *vma, unsigned long address, | |
135 | pud_t *pudp) | |
136 | { | |
137 | pud_t pud; | |
138 | ||
139 | VM_BUG_ON(address & ~HPAGE_PUD_MASK); | |
140 | VM_BUG_ON(!pud_trans_huge(*pudp) && !pud_devmap(*pudp)); | |
141 | pud = pudp_huge_get_and_clear(vma->vm_mm, address, pudp); | |
142 | flush_pud_tlb_range(vma, address, address + HPAGE_PUD_SIZE); | |
143 | return pud; | |
144 | } | |
145 | #endif | |
146 | #endif | |
147 | ||
148 | #ifndef __HAVE_ARCH_PGTABLE_DEPOSIT | |
149 | void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp, | |
150 | pgtable_t pgtable) | |
151 | { | |
152 | assert_spin_locked(pmd_lockptr(mm, pmdp)); | |
153 | ||
154 | /* FIFO */ | |
155 | if (!pmd_huge_pte(mm, pmdp)) | |
156 | INIT_LIST_HEAD(&pgtable->lru); | |
157 | else | |
158 | list_add(&pgtable->lru, &pmd_huge_pte(mm, pmdp)->lru); | |
159 | pmd_huge_pte(mm, pmdp) = pgtable; | |
160 | } | |
161 | #endif | |
162 | ||
163 | #ifndef __HAVE_ARCH_PGTABLE_WITHDRAW | |
164 | /* no "address" argument so destroys page coloring of some arch */ | |
165 | pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp) | |
166 | { | |
167 | pgtable_t pgtable; | |
168 | ||
169 | assert_spin_locked(pmd_lockptr(mm, pmdp)); | |
170 | ||
171 | /* FIFO */ | |
172 | pgtable = pmd_huge_pte(mm, pmdp); | |
173 | pmd_huge_pte(mm, pmdp) = list_first_entry_or_null(&pgtable->lru, | |
174 | struct page, lru); | |
175 | if (pmd_huge_pte(mm, pmdp)) | |
176 | list_del(&pgtable->lru); | |
177 | return pgtable; | |
178 | } | |
179 | #endif | |
180 | ||
181 | #ifndef __HAVE_ARCH_PMDP_INVALIDATE | |
182 | void pmdp_invalidate(struct vm_area_struct *vma, unsigned long address, | |
183 | pmd_t *pmdp) | |
184 | { | |
185 | pmd_t entry = *pmdp; | |
186 | set_pmd_at(vma->vm_mm, address, pmdp, pmd_mknotpresent(entry)); | |
187 | flush_pmd_tlb_range(vma, address, address + HPAGE_PMD_SIZE); | |
188 | } | |
189 | #endif | |
190 | ||
191 | #ifndef pmdp_collapse_flush | |
192 | pmd_t pmdp_collapse_flush(struct vm_area_struct *vma, unsigned long address, | |
193 | pmd_t *pmdp) | |
194 | { | |
195 | /* | |
196 | * pmd and hugepage pte format are same. So we could | |
197 | * use the same function. | |
198 | */ | |
199 | pmd_t pmd; | |
200 | ||
201 | VM_BUG_ON(address & ~HPAGE_PMD_MASK); | |
202 | VM_BUG_ON(pmd_trans_huge(*pmdp)); | |
203 | pmd = pmdp_huge_get_and_clear(vma->vm_mm, address, pmdp); | |
204 | ||
205 | /* collapse entails shooting down ptes not pmd */ | |
206 | flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE); | |
207 | return pmd; | |
208 | } | |
209 | #endif | |
210 | #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ |