]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | #ifndef _ASM_GENERIC_PGTABLE_H |
2 | #define _ASM_GENERIC_PGTABLE_H | |
3 | ||
673eae82 | 4 | #ifndef __ASSEMBLY__ |
9535239f | 5 | #ifdef CONFIG_MMU |
673eae82 | 6 | |
fbd71844 BH |
7 | #include <linux/mm_types.h> |
8 | ||
1da177e4 | 9 | #ifndef __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS |
e2cda322 AA |
10 | extern int ptep_set_access_flags(struct vm_area_struct *vma, |
11 | unsigned long address, pte_t *ptep, | |
12 | pte_t entry, int dirty); | |
13 | #endif | |
14 | ||
15 | #ifndef __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS | |
16 | extern int pmdp_set_access_flags(struct vm_area_struct *vma, | |
17 | unsigned long address, pmd_t *pmdp, | |
18 | pmd_t entry, int dirty); | |
1da177e4 LT |
19 | #endif |
20 | ||
21 | #ifndef __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG | |
e2cda322 AA |
22 | static inline int ptep_test_and_clear_young(struct vm_area_struct *vma, |
23 | unsigned long address, | |
24 | pte_t *ptep) | |
25 | { | |
26 | pte_t pte = *ptep; | |
27 | int r = 1; | |
28 | if (!pte_young(pte)) | |
29 | r = 0; | |
30 | else | |
31 | set_pte_at(vma->vm_mm, address, ptep, pte_mkold(pte)); | |
32 | return r; | |
33 | } | |
34 | #endif | |
35 | ||
36 | #ifndef __HAVE_ARCH_PMDP_TEST_AND_CLEAR_YOUNG | |
37 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE | |
38 | static inline int pmdp_test_and_clear_young(struct vm_area_struct *vma, | |
39 | unsigned long address, | |
40 | pmd_t *pmdp) | |
41 | { | |
42 | pmd_t pmd = *pmdp; | |
43 | int r = 1; | |
44 | if (!pmd_young(pmd)) | |
45 | r = 0; | |
46 | else | |
47 | set_pmd_at(vma->vm_mm, address, pmdp, pmd_mkold(pmd)); | |
48 | return r; | |
49 | } | |
50 | #else /* CONFIG_TRANSPARENT_HUGEPAGE */ | |
51 | static inline int pmdp_test_and_clear_young(struct vm_area_struct *vma, | |
52 | unsigned long address, | |
53 | pmd_t *pmdp) | |
54 | { | |
55 | BUG(); | |
56 | return 0; | |
57 | } | |
58 | #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ | |
1da177e4 LT |
59 | #endif |
60 | ||
61 | #ifndef __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH | |
e2cda322 AA |
62 | int ptep_clear_flush_young(struct vm_area_struct *vma, |
63 | unsigned long address, pte_t *ptep); | |
64 | #endif | |
65 | ||
66 | #ifndef __HAVE_ARCH_PMDP_CLEAR_YOUNG_FLUSH | |
67 | int pmdp_clear_flush_young(struct vm_area_struct *vma, | |
68 | unsigned long address, pmd_t *pmdp); | |
1da177e4 LT |
69 | #endif |
70 | ||
1da177e4 | 71 | #ifndef __HAVE_ARCH_PTEP_GET_AND_CLEAR |
e2cda322 AA |
72 | static inline pte_t ptep_get_and_clear(struct mm_struct *mm, |
73 | unsigned long address, | |
74 | pte_t *ptep) | |
75 | { | |
76 | pte_t pte = *ptep; | |
77 | pte_clear(mm, address, ptep); | |
78 | return pte; | |
79 | } | |
80 | #endif | |
81 | ||
82 | #ifndef __HAVE_ARCH_PMDP_GET_AND_CLEAR | |
83 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE | |
84 | static inline pmd_t pmdp_get_and_clear(struct mm_struct *mm, | |
85 | unsigned long address, | |
86 | pmd_t *pmdp) | |
87 | { | |
88 | pmd_t pmd = *pmdp; | |
89 | pmd_clear(mm, address, pmdp); | |
90 | return pmd; | |
49b24d6b | 91 | } |
e2cda322 | 92 | #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ |
1da177e4 LT |
93 | #endif |
94 | ||
a600388d | 95 | #ifndef __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL |
e2cda322 AA |
96 | static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm, |
97 | unsigned long address, pte_t *ptep, | |
98 | int full) | |
99 | { | |
100 | pte_t pte; | |
101 | pte = ptep_get_and_clear(mm, address, ptep); | |
102 | return pte; | |
103 | } | |
a600388d ZA |
104 | #endif |
105 | ||
9888a1ca ZA |
106 | /* |
107 | * Some architectures may be able to avoid expensive synchronization | |
108 | * primitives when modifications are made to PTE's which are already | |
109 | * not present, or in the process of an address space destruction. | |
110 | */ | |
111 | #ifndef __HAVE_ARCH_PTE_CLEAR_NOT_PRESENT_FULL | |
e2cda322 AA |
112 | static inline void pte_clear_not_present_full(struct mm_struct *mm, |
113 | unsigned long address, | |
114 | pte_t *ptep, | |
115 | int full) | |
116 | { | |
117 | pte_clear(mm, address, ptep); | |
118 | } | |
a600388d ZA |
119 | #endif |
120 | ||
1da177e4 | 121 | #ifndef __HAVE_ARCH_PTEP_CLEAR_FLUSH |
e2cda322 AA |
122 | extern pte_t ptep_clear_flush(struct vm_area_struct *vma, |
123 | unsigned long address, | |
124 | pte_t *ptep); | |
125 | #endif | |
126 | ||
127 | #ifndef __HAVE_ARCH_PMDP_CLEAR_FLUSH | |
128 | extern pmd_t pmdp_clear_flush(struct vm_area_struct *vma, | |
129 | unsigned long address, | |
130 | pmd_t *pmdp); | |
1da177e4 LT |
131 | #endif |
132 | ||
133 | #ifndef __HAVE_ARCH_PTEP_SET_WRPROTECT | |
8c65b4a6 | 134 | struct mm_struct; |
1da177e4 LT |
135 | static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long address, pte_t *ptep) |
136 | { | |
137 | pte_t old_pte = *ptep; | |
138 | set_pte_at(mm, address, ptep, pte_wrprotect(old_pte)); | |
139 | } | |
140 | #endif | |
141 | ||
e2cda322 AA |
142 | #ifndef __HAVE_ARCH_PMDP_SET_WRPROTECT |
143 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE | |
144 | static inline void pmdp_set_wrprotect(struct mm_struct *mm, | |
145 | unsigned long address, pmd_t *pmdp) | |
146 | { | |
147 | pmd_t old_pmd = *pmdp; | |
148 | set_pmd_at(mm, address, pmdp, pmd_wrprotect(old_pmd)); | |
149 | } | |
150 | #else /* CONFIG_TRANSPARENT_HUGEPAGE */ | |
151 | static inline void pmdp_set_wrprotect(struct mm_struct *mm, | |
152 | unsigned long address, pmd_t *pmdp) | |
153 | { | |
154 | BUG(); | |
155 | } | |
156 | #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ | |
157 | #endif | |
158 | ||
159 | #ifndef __HAVE_ARCH_PMDP_SPLITTING_FLUSH | |
b3697c02 AA |
160 | extern pmd_t pmdp_splitting_flush(struct vm_area_struct *vma, |
161 | unsigned long address, | |
162 | pmd_t *pmdp); | |
e2cda322 AA |
163 | #endif |
164 | ||
1da177e4 | 165 | #ifndef __HAVE_ARCH_PTE_SAME |
e2cda322 AA |
166 | static inline int pte_same(pte_t pte_a, pte_t pte_b) |
167 | { | |
168 | return pte_val(pte_a) == pte_val(pte_b); | |
169 | } | |
170 | #endif | |
171 | ||
172 | #ifndef __HAVE_ARCH_PMD_SAME | |
173 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE | |
174 | static inline int pmd_same(pmd_t pmd_a, pmd_t pmd_b) | |
175 | { | |
176 | return pmd_val(pmd_a) == pmd_val(pmd_b); | |
177 | } | |
178 | #else /* CONFIG_TRANSPARENT_HUGEPAGE */ | |
179 | static inline int pmd_same(pmd_t pmd_a, pmd_t pmd_b) | |
180 | { | |
181 | BUG(); | |
182 | return 0; | |
183 | } | |
184 | #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ | |
1da177e4 LT |
185 | #endif |
186 | ||
2d42552d MS |
187 | #ifndef __HAVE_ARCH_PAGE_TEST_AND_CLEAR_DIRTY |
188 | #define page_test_and_clear_dirty(pfn, mapped) (0) | |
6c210482 MS |
189 | #endif |
190 | ||
2d42552d | 191 | #ifndef __HAVE_ARCH_PAGE_TEST_AND_CLEAR_DIRTY |
b4955ce3 AK |
192 | #define pte_maybe_dirty(pte) pte_dirty(pte) |
193 | #else | |
194 | #define pte_maybe_dirty(pte) (1) | |
1da177e4 LT |
195 | #endif |
196 | ||
197 | #ifndef __HAVE_ARCH_PAGE_TEST_AND_CLEAR_YOUNG | |
2d42552d | 198 | #define page_test_and_clear_young(pfn) (0) |
1da177e4 LT |
199 | #endif |
200 | ||
201 | #ifndef __HAVE_ARCH_PGD_OFFSET_GATE | |
202 | #define pgd_offset_gate(mm, addr) pgd_offset(mm, addr) | |
203 | #endif | |
204 | ||
0b0968a3 | 205 | #ifndef __HAVE_ARCH_MOVE_PTE |
8b1f3124 | 206 | #define move_pte(pte, prot, old_addr, new_addr) (pte) |
8b1f3124 NP |
207 | #endif |
208 | ||
61c77326 SL |
209 | #ifndef flush_tlb_fix_spurious_fault |
210 | #define flush_tlb_fix_spurious_fault(vma, address) flush_tlb_page(vma, address) | |
211 | #endif | |
212 | ||
0634a632 PM |
213 | #ifndef pgprot_noncached |
214 | #define pgprot_noncached(prot) (prot) | |
215 | #endif | |
216 | ||
2520bd31 | 217 | #ifndef pgprot_writecombine |
218 | #define pgprot_writecombine pgprot_noncached | |
219 | #endif | |
220 | ||
1da177e4 | 221 | /* |
8f6c99c1 HD |
222 | * When walking page tables, get the address of the next boundary, |
223 | * or the end address of the range if that comes earlier. Although no | |
224 | * vma end wraps to 0, rounded up __boundary may wrap to 0 throughout. | |
1da177e4 LT |
225 | */ |
226 | ||
1da177e4 LT |
227 | #define pgd_addr_end(addr, end) \ |
228 | ({ unsigned long __boundary = ((addr) + PGDIR_SIZE) & PGDIR_MASK; \ | |
229 | (__boundary - 1 < (end) - 1)? __boundary: (end); \ | |
230 | }) | |
1da177e4 LT |
231 | |
232 | #ifndef pud_addr_end | |
233 | #define pud_addr_end(addr, end) \ | |
234 | ({ unsigned long __boundary = ((addr) + PUD_SIZE) & PUD_MASK; \ | |
235 | (__boundary - 1 < (end) - 1)? __boundary: (end); \ | |
236 | }) | |
237 | #endif | |
238 | ||
239 | #ifndef pmd_addr_end | |
240 | #define pmd_addr_end(addr, end) \ | |
241 | ({ unsigned long __boundary = ((addr) + PMD_SIZE) & PMD_MASK; \ | |
242 | (__boundary - 1 < (end) - 1)? __boundary: (end); \ | |
243 | }) | |
244 | #endif | |
245 | ||
1da177e4 LT |
246 | /* |
247 | * When walking page tables, we usually want to skip any p?d_none entries; | |
248 | * and any p?d_bad entries - reporting the error before resetting to none. | |
249 | * Do the tests inline, but report and clear the bad entry in mm/memory.c. | |
250 | */ | |
251 | void pgd_clear_bad(pgd_t *); | |
252 | void pud_clear_bad(pud_t *); | |
253 | void pmd_clear_bad(pmd_t *); | |
254 | ||
255 | static inline int pgd_none_or_clear_bad(pgd_t *pgd) | |
256 | { | |
257 | if (pgd_none(*pgd)) | |
258 | return 1; | |
259 | if (unlikely(pgd_bad(*pgd))) { | |
260 | pgd_clear_bad(pgd); | |
261 | return 1; | |
262 | } | |
263 | return 0; | |
264 | } | |
265 | ||
266 | static inline int pud_none_or_clear_bad(pud_t *pud) | |
267 | { | |
268 | if (pud_none(*pud)) | |
269 | return 1; | |
270 | if (unlikely(pud_bad(*pud))) { | |
271 | pud_clear_bad(pud); | |
272 | return 1; | |
273 | } | |
274 | return 0; | |
275 | } | |
276 | ||
277 | static inline int pmd_none_or_clear_bad(pmd_t *pmd) | |
278 | { | |
279 | if (pmd_none(*pmd)) | |
280 | return 1; | |
281 | if (unlikely(pmd_bad(*pmd))) { | |
282 | pmd_clear_bad(pmd); | |
283 | return 1; | |
284 | } | |
285 | return 0; | |
286 | } | |
9535239f | 287 | |
1ea0704e JF |
288 | static inline pte_t __ptep_modify_prot_start(struct mm_struct *mm, |
289 | unsigned long addr, | |
290 | pte_t *ptep) | |
291 | { | |
292 | /* | |
293 | * Get the current pte state, but zero it out to make it | |
294 | * non-present, preventing the hardware from asynchronously | |
295 | * updating it. | |
296 | */ | |
297 | return ptep_get_and_clear(mm, addr, ptep); | |
298 | } | |
299 | ||
300 | static inline void __ptep_modify_prot_commit(struct mm_struct *mm, | |
301 | unsigned long addr, | |
302 | pte_t *ptep, pte_t pte) | |
303 | { | |
304 | /* | |
305 | * The pte is non-present, so there's no hardware state to | |
306 | * preserve. | |
307 | */ | |
308 | set_pte_at(mm, addr, ptep, pte); | |
309 | } | |
310 | ||
311 | #ifndef __HAVE_ARCH_PTEP_MODIFY_PROT_TRANSACTION | |
312 | /* | |
313 | * Start a pte protection read-modify-write transaction, which | |
314 | * protects against asynchronous hardware modifications to the pte. | |
315 | * The intention is not to prevent the hardware from making pte | |
316 | * updates, but to prevent any updates it may make from being lost. | |
317 | * | |
318 | * This does not protect against other software modifications of the | |
319 | * pte; the appropriate pte lock must be held over the transation. | |
320 | * | |
321 | * Note that this interface is intended to be batchable, meaning that | |
322 | * ptep_modify_prot_commit may not actually update the pte, but merely | |
323 | * queue the update to be done at some later time. The update must be | |
324 | * actually committed before the pte lock is released, however. | |
325 | */ | |
326 | static inline pte_t ptep_modify_prot_start(struct mm_struct *mm, | |
327 | unsigned long addr, | |
328 | pte_t *ptep) | |
329 | { | |
330 | return __ptep_modify_prot_start(mm, addr, ptep); | |
331 | } | |
332 | ||
333 | /* | |
334 | * Commit an update to a pte, leaving any hardware-controlled bits in | |
335 | * the PTE unmodified. | |
336 | */ | |
337 | static inline void ptep_modify_prot_commit(struct mm_struct *mm, | |
338 | unsigned long addr, | |
339 | pte_t *ptep, pte_t pte) | |
340 | { | |
341 | __ptep_modify_prot_commit(mm, addr, ptep, pte); | |
342 | } | |
343 | #endif /* __HAVE_ARCH_PTEP_MODIFY_PROT_TRANSACTION */ | |
fe1a6875 | 344 | #endif /* CONFIG_MMU */ |
1ea0704e | 345 | |
9535239f GU |
346 | /* |
347 | * A facility to provide lazy MMU batching. This allows PTE updates and | |
348 | * page invalidations to be delayed until a call to leave lazy MMU mode | |
349 | * is issued. Some architectures may benefit from doing this, and it is | |
350 | * beneficial for both shadow and direct mode hypervisors, which may batch | |
351 | * the PTE updates which happen during this window. Note that using this | |
352 | * interface requires that read hazards be removed from the code. A read | |
353 | * hazard could result in the direct mode hypervisor case, since the actual | |
354 | * write to the page tables may not yet have taken place, so reads though | |
355 | * a raw PTE pointer after it has been modified are not guaranteed to be | |
356 | * up to date. This mode can only be entered and left under the protection of | |
357 | * the page table locks for all page tables which may be modified. In the UP | |
358 | * case, this is required so that preemption is disabled, and in the SMP case, | |
359 | * it must synchronize the delayed page table writes properly on other CPUs. | |
360 | */ | |
361 | #ifndef __HAVE_ARCH_ENTER_LAZY_MMU_MODE | |
362 | #define arch_enter_lazy_mmu_mode() do {} while (0) | |
363 | #define arch_leave_lazy_mmu_mode() do {} while (0) | |
364 | #define arch_flush_lazy_mmu_mode() do {} while (0) | |
365 | #endif | |
366 | ||
367 | /* | |
7fd7d83d JF |
368 | * A facility to provide batching of the reload of page tables and |
369 | * other process state with the actual context switch code for | |
370 | * paravirtualized guests. By convention, only one of the batched | |
371 | * update (lazy) modes (CPU, MMU) should be active at any given time, | |
372 | * entry should never be nested, and entry and exits should always be | |
373 | * paired. This is for sanity of maintaining and reasoning about the | |
374 | * kernel code. In this case, the exit (end of the context switch) is | |
375 | * in architecture-specific code, and so doesn't need a generic | |
376 | * definition. | |
9535239f | 377 | */ |
7fd7d83d | 378 | #ifndef __HAVE_ARCH_START_CONTEXT_SWITCH |
224101ed | 379 | #define arch_start_context_switch(prev) do {} while (0) |
9535239f GU |
380 | #endif |
381 | ||
34801ba9 | 382 | #ifndef __HAVE_PFNMAP_TRACKING |
383 | /* | |
384 | * Interface that can be used by architecture code to keep track of | |
385 | * memory type of pfn mappings (remap_pfn_range, vm_insert_pfn) | |
386 | * | |
387 | * track_pfn_vma_new is called when a _new_ pfn mapping is being established | |
388 | * for physical range indicated by pfn and size. | |
389 | */ | |
e4b866ed | 390 | static inline int track_pfn_vma_new(struct vm_area_struct *vma, pgprot_t *prot, |
34801ba9 | 391 | unsigned long pfn, unsigned long size) |
392 | { | |
393 | return 0; | |
394 | } | |
395 | ||
396 | /* | |
397 | * Interface that can be used by architecture code to keep track of | |
398 | * memory type of pfn mappings (remap_pfn_range, vm_insert_pfn) | |
399 | * | |
400 | * track_pfn_vma_copy is called when vma that is covering the pfnmap gets | |
401 | * copied through copy_page_range(). | |
402 | */ | |
403 | static inline int track_pfn_vma_copy(struct vm_area_struct *vma) | |
404 | { | |
405 | return 0; | |
406 | } | |
407 | ||
408 | /* | |
409 | * Interface that can be used by architecture code to keep track of | |
410 | * memory type of pfn mappings (remap_pfn_range, vm_insert_pfn) | |
411 | * | |
412 | * untrack_pfn_vma is called while unmapping a pfnmap for a region. | |
413 | * untrack can be called for a specific region indicated by pfn and size or | |
414 | * can be for the entire vma (in which case size can be zero). | |
415 | */ | |
416 | static inline void untrack_pfn_vma(struct vm_area_struct *vma, | |
417 | unsigned long pfn, unsigned long size) | |
418 | { | |
419 | } | |
420 | #else | |
e4b866ed | 421 | extern int track_pfn_vma_new(struct vm_area_struct *vma, pgprot_t *prot, |
34801ba9 | 422 | unsigned long pfn, unsigned long size); |
423 | extern int track_pfn_vma_copy(struct vm_area_struct *vma); | |
424 | extern void untrack_pfn_vma(struct vm_area_struct *vma, unsigned long pfn, | |
425 | unsigned long size); | |
426 | #endif | |
427 | ||
1a5a9906 AA |
428 | #ifdef CONFIG_MMU |
429 | ||
5f6e8da7 AA |
430 | #ifndef CONFIG_TRANSPARENT_HUGEPAGE |
431 | static inline int pmd_trans_huge(pmd_t pmd) | |
432 | { | |
433 | return 0; | |
434 | } | |
435 | static inline int pmd_trans_splitting(pmd_t pmd) | |
436 | { | |
437 | return 0; | |
438 | } | |
e2cda322 AA |
439 | #ifndef __HAVE_ARCH_PMD_WRITE |
440 | static inline int pmd_write(pmd_t pmd) | |
441 | { | |
442 | BUG(); | |
443 | return 0; | |
444 | } | |
445 | #endif /* __HAVE_ARCH_PMD_WRITE */ | |
1a5a9906 AA |
446 | #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ |
447 | ||
448 | /* | |
449 | * This function is meant to be used by sites walking pagetables with | |
450 | * the mmap_sem hold in read mode to protect against MADV_DONTNEED and | |
451 | * transhuge page faults. MADV_DONTNEED can convert a transhuge pmd | |
452 | * into a null pmd and the transhuge page fault can convert a null pmd | |
453 | * into an hugepmd or into a regular pmd (if the hugepage allocation | |
454 | * fails). While holding the mmap_sem in read mode the pmd becomes | |
455 | * stable and stops changing under us only if it's not null and not a | |
456 | * transhuge pmd. When those races occurs and this function makes a | |
457 | * difference vs the standard pmd_none_or_clear_bad, the result is | |
458 | * undefined so behaving like if the pmd was none is safe (because it | |
459 | * can return none anyway). The compiler level barrier() is critically | |
460 | * important to compute the two checks atomically on the same pmdval. | |
461 | */ | |
462 | static inline int pmd_none_or_trans_huge_or_clear_bad(pmd_t *pmd) | |
463 | { | |
464 | /* depend on compiler for an atomic pmd read */ | |
465 | pmd_t pmdval = *pmd; | |
466 | /* | |
467 | * The barrier will stabilize the pmdval in a register or on | |
468 | * the stack so that it will stop changing under the code. | |
469 | */ | |
470 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE | |
471 | barrier(); | |
472 | #endif | |
473 | if (pmd_none(pmdval)) | |
474 | return 1; | |
475 | if (unlikely(pmd_bad(pmdval))) { | |
476 | if (!pmd_trans_huge(pmdval)) | |
477 | pmd_clear_bad(pmd); | |
478 | return 1; | |
479 | } | |
480 | return 0; | |
481 | } | |
482 | ||
483 | /* | |
484 | * This is a noop if Transparent Hugepage Support is not built into | |
485 | * the kernel. Otherwise it is equivalent to | |
486 | * pmd_none_or_trans_huge_or_clear_bad(), and shall only be called in | |
487 | * places that already verified the pmd is not none and they want to | |
488 | * walk ptes while holding the mmap sem in read mode (write mode don't | |
489 | * need this). If THP is not enabled, the pmd can't go away under the | |
490 | * code even if MADV_DONTNEED runs, but if THP is enabled we need to | |
491 | * run a pmd_trans_unstable before walking the ptes after | |
492 | * split_huge_page_pmd returns (because it may have run when the pmd | |
493 | * become null, but then a page fault can map in a THP and not a | |
494 | * regular page). | |
495 | */ | |
496 | static inline int pmd_trans_unstable(pmd_t *pmd) | |
497 | { | |
498 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE | |
499 | return pmd_none_or_trans_huge_or_clear_bad(pmd); | |
500 | #else | |
501 | return 0; | |
5f6e8da7 | 502 | #endif |
1a5a9906 AA |
503 | } |
504 | ||
505 | #endif /* CONFIG_MMU */ | |
5f6e8da7 | 506 | |
1da177e4 LT |
507 | #endif /* !__ASSEMBLY__ */ |
508 | ||
509 | #endif /* _ASM_GENERIC_PGTABLE_H */ |