]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - arch/mips/include/asm/pgtable.h
MIPS: Add CP0 macros for extended EntryLo registers
[mirror_ubuntu-artful-kernel.git] / arch / mips / include / asm / pgtable.h
CommitLineData
1da177e4
LT
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 2003 Ralf Baechle
7 */
8#ifndef _ASM_PGTABLE_H
9#define _ASM_PGTABLE_H
10
5bbea36a 11#include <linux/mm_types.h>
970d032f 12#include <linux/mmzone.h>
875d43e7 13#ifdef CONFIG_32BIT
1da177e4
LT
14#include <asm/pgtable-32.h>
15#endif
875d43e7 16#ifdef CONFIG_64BIT
1da177e4
LT
17#include <asm/pgtable-64.h>
18#endif
19
f10fae02 20#include <asm/io.h>
1da177e4
LT
21#include <asm/pgtable-bits.h>
22
8c65b4a6
TS
23struct mm_struct;
24struct vm_area_struct;
25
1da177e4 26#define PAGE_NONE __pgprot(_PAGE_PRESENT | _CACHE_CACHABLE_NONCOHERENT)
05857c64 27#define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_WRITE | (cpu_has_rixi ? 0 : _PAGE_READ) | \
35133692 28 _page_cachable_default)
05857c64
SH
29#define PAGE_COPY __pgprot(_PAGE_PRESENT | (cpu_has_rixi ? 0 : _PAGE_READ) | \
30 (cpu_has_rixi ? _PAGE_NO_EXEC : 0) | _page_cachable_default)
31#define PAGE_READONLY __pgprot(_PAGE_PRESENT | (cpu_has_rixi ? 0 : _PAGE_READ) | \
35133692 32 _page_cachable_default)
1da177e4 33#define PAGE_KERNEL __pgprot(_PAGE_PRESENT | __READABLE | __WRITEABLE | \
35133692 34 _PAGE_GLOBAL | _page_cachable_default)
e2a9e5ad
PB
35#define PAGE_KERNEL_NC __pgprot(_PAGE_PRESENT | __READABLE | __WRITEABLE | \
36 _PAGE_GLOBAL | _CACHE_CACHABLE_NONCOHERENT)
05857c64 37#define PAGE_USERIO __pgprot(_PAGE_PRESENT | (cpu_has_rixi ? 0 : _PAGE_READ) | _PAGE_WRITE | \
35133692 38 _page_cachable_default)
1da177e4
LT
39#define PAGE_KERNEL_UNCACHED __pgprot(_PAGE_PRESENT | __READABLE | \
40 __WRITEABLE | _PAGE_GLOBAL | _CACHE_UNCACHED)
41
42/*
6dd9344c
DD
43 * If _PAGE_NO_EXEC is not defined, we can't do page protection for
44 * execute, and consider it to be the same as read. Also, write
45 * permissions imply read permissions. This is the closest we can get
46 * by reasonable means..
1da177e4 47 */
1da177e4 48
35133692
CD
49/*
50 * Dummy values to fill the table in mmap.c
51 * The real values will be generated at runtime
52 */
53#define __P000 __pgprot(0)
54#define __P001 __pgprot(0)
55#define __P010 __pgprot(0)
56#define __P011 __pgprot(0)
57#define __P100 __pgprot(0)
58#define __P101 __pgprot(0)
59#define __P110 __pgprot(0)
60#define __P111 __pgprot(0)
61
62#define __S000 __pgprot(0)
63#define __S001 __pgprot(0)
64#define __S010 __pgprot(0)
65#define __S011 __pgprot(0)
66#define __S100 __pgprot(0)
67#define __S101 __pgprot(0)
68#define __S110 __pgprot(0)
69#define __S111 __pgprot(0)
70
71extern unsigned long _page_cachable_default;
1da177e4
LT
72
73/*
74 * ZERO_PAGE is a global shared page that is always zero; used
75 * for zero-mapped memory areas etc..
76 */
77
78extern unsigned long empty_zero_page;
79extern unsigned long zero_page_mask;
80
81#define ZERO_PAGE(vaddr) \
99e3b942 82 (virt_to_page((void *)(empty_zero_page + (((unsigned long)(vaddr)) & zero_page_mask))))
816422ad 83#define __HAVE_COLOR_ZERO_PAGE
62eede62 84
1da177e4
LT
85extern void paging_init(void);
86
87/*
88 * Conversion functions: convert a page and protection to a page entry,
89 * and a page entry and page directory to the page they refer to.
90 */
c9d06962 91#define pmd_phys(pmd) virt_to_phys((void *)pmd_val(pmd))
970d032f
RB
92
93#define __pmd_page(pmd) (pfn_to_page(pmd_phys(pmd) >> PAGE_SHIFT))
94#ifndef CONFIG_TRANSPARENT_HUGEPAGE
95#define pmd_page(pmd) __pmd_page(pmd)
96#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
97
46a82b2d 98#define pmd_page_vaddr(pmd) pmd_val(pmd)
1da177e4 99
f1014d1b
MC
100#define htw_stop() \
101do { \
102 if (cpu_has_htw) \
103 write_c0_pwctl(read_c0_pwctl() & \
104 ~(1 << MIPS_PWCTL_PWEN_SHIFT)); \
105} while(0)
106
107#define htw_start() \
108do { \
109 if (cpu_has_htw) \
110 write_c0_pwctl(read_c0_pwctl() | \
111 (1 << MIPS_PWCTL_PWEN_SHIFT)); \
112} while(0)
113
114
115#define htw_reset() \
116do { \
117 if (cpu_has_htw) { \
118 htw_stop(); \
119 back_to_back_c0_hazard(); \
120 htw_start(); \
121 back_to_back_c0_hazard(); \
122 } \
123} while(0)
124
2a4a8b1e
LP
125extern void set_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep,
126 pte_t pteval);
127
34adb28d 128#if defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32)
6e953891
SS
129
130#define pte_none(pte) (!(((pte).pte_low | (pte).pte_high) & ~_PAGE_GLOBAL))
131#define pte_present(pte) ((pte).pte_low & _PAGE_PRESENT)
132
1da177e4
LT
133static inline void set_pte(pte_t *ptep, pte_t pte)
134{
135 ptep->pte_high = pte.pte_high;
136 smp_wmb();
137 ptep->pte_low = pte.pte_low;
1da177e4 138
6e953891 139 if (pte.pte_low & _PAGE_GLOBAL) {
1da177e4
LT
140 pte_t *buddy = ptep_buddy(ptep);
141 /*
142 * Make sure the buddy is global too (if it's !none,
143 * it better already be global)
144 */
6e953891 145 if (pte_none(*buddy)) {
70342287 146 buddy->pte_low |= _PAGE_GLOBAL;
6e953891
SS
147 buddy->pte_high |= _PAGE_GLOBAL;
148 }
1da177e4
LT
149 }
150}
1da177e4
LT
151
152static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
153{
6e953891
SS
154 pte_t null = __pte(0);
155
1da177e4 156 /* Preserve global status for the pair */
6e953891
SS
157 if (ptep_buddy(ptep)->pte_low & _PAGE_GLOBAL)
158 null.pte_low = null.pte_high = _PAGE_GLOBAL;
159
160 set_pte_at(mm, addr, ptep, null);
f1014d1b 161 htw_reset();
1da177e4
LT
162}
163#else
6e953891
SS
164
165#define pte_none(pte) (!(pte_val(pte) & ~_PAGE_GLOBAL))
166#define pte_present(pte) (pte_val(pte) & _PAGE_PRESENT)
167
1da177e4
LT
168/*
169 * Certain architectures need to do special things when pte's
170 * within a page table are directly modified. Thus, the following
171 * hook is made available.
172 */
173static inline void set_pte(pte_t *ptep, pte_t pteval)
174{
175 *ptep = pteval;
176#if !defined(CONFIG_CPU_R3000) && !defined(CONFIG_CPU_TX39XX)
177 if (pte_val(pteval) & _PAGE_GLOBAL) {
178 pte_t *buddy = ptep_buddy(ptep);
179 /*
180 * Make sure the buddy is global too (if it's !none,
181 * it better already be global)
182 */
183 if (pte_none(*buddy))
184 pte_val(*buddy) = pte_val(*buddy) | _PAGE_GLOBAL;
185 }
186#endif
187}
1da177e4
LT
188
189static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
190{
191#if !defined(CONFIG_CPU_R3000) && !defined(CONFIG_CPU_TX39XX)
192 /* Preserve global status for the pair */
193 if (pte_val(*ptep_buddy(ptep)) & _PAGE_GLOBAL)
194 set_pte_at(mm, addr, ptep, __pte(_PAGE_GLOBAL));
195 else
196#endif
197 set_pte_at(mm, addr, ptep, __pte(0));
f1014d1b 198 htw_reset();
1da177e4
LT
199}
200#endif
201
202/*
c6e8b587 203 * (pmds are folded into puds so this doesn't get actually called,
1da177e4
LT
204 * but the define is needed for a generic inline function.)
205 */
206#define set_pmd(pmdptr, pmdval) do { *(pmdptr) = (pmdval); } while(0)
c6e8b587 207
325f8a0a 208#ifndef __PAGETABLE_PMD_FOLDED
c6e8b587
RB
209/*
210 * (puds are folded into pgds so this doesn't get actually called,
211 * but the define is needed for a generic inline function.)
212 */
213#define set_pud(pudptr, pudval) do { *(pudptr) = (pudval); } while(0)
214#endif
1da177e4 215
5ff97472
RB
216#define PGD_T_LOG2 (__builtin_ffs(sizeof(pgd_t)) - 1)
217#define PMD_T_LOG2 (__builtin_ffs(sizeof(pmd_t)) - 1)
218#define PTE_T_LOG2 (__builtin_ffs(sizeof(pte_t)) - 1)
1da177e4 219
9975e77d
RB
220/*
221 * We used to declare this array with size but gcc 3.3 and older are not able
222 * to find that this expression is a constant, so the size is dropped.
223 */
224extern pgd_t swapper_pg_dir[];
1da177e4
LT
225
226/*
227 * The following only work if pte_present() is true.
228 * Undefined behaviour if not..
229 */
34adb28d 230#if defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32)
6e953891
SS
231static inline int pte_write(pte_t pte) { return pte.pte_low & _PAGE_WRITE; }
232static inline int pte_dirty(pte_t pte) { return pte.pte_low & _PAGE_MODIFIED; }
233static inline int pte_young(pte_t pte) { return pte.pte_low & _PAGE_ACCESSED; }
234static inline int pte_file(pte_t pte) { return pte.pte_low & _PAGE_FILE; }
235
1da177e4
LT
236static inline pte_t pte_wrprotect(pte_t pte)
237{
6e953891
SS
238 pte.pte_low &= ~(_PAGE_WRITE | _PAGE_SILENT_WRITE);
239 pte.pte_high &= ~_PAGE_SILENT_WRITE;
1da177e4
LT
240 return pte;
241}
242
1da177e4
LT
243static inline pte_t pte_mkclean(pte_t pte)
244{
6e953891
SS
245 pte.pte_low &= ~(_PAGE_MODIFIED | _PAGE_SILENT_WRITE);
246 pte.pte_high &= ~_PAGE_SILENT_WRITE;
1da177e4
LT
247 return pte;
248}
249
250static inline pte_t pte_mkold(pte_t pte)
251{
6e953891
SS
252 pte.pte_low &= ~(_PAGE_ACCESSED | _PAGE_SILENT_READ);
253 pte.pte_high &= ~_PAGE_SILENT_READ;
1da177e4
LT
254 return pte;
255}
256
257static inline pte_t pte_mkwrite(pte_t pte)
258{
6e953891
SS
259 pte.pte_low |= _PAGE_WRITE;
260 if (pte.pte_low & _PAGE_MODIFIED) {
261 pte.pte_low |= _PAGE_SILENT_WRITE;
262 pte.pte_high |= _PAGE_SILENT_WRITE;
1da177e4
LT
263 }
264 return pte;
265}
266
1da177e4
LT
267static inline pte_t pte_mkdirty(pte_t pte)
268{
6e953891
SS
269 pte.pte_low |= _PAGE_MODIFIED;
270 if (pte.pte_low & _PAGE_WRITE) {
271 pte.pte_low |= _PAGE_SILENT_WRITE;
272 pte.pte_high |= _PAGE_SILENT_WRITE;
1da177e4
LT
273 }
274 return pte;
275}
276
277static inline pte_t pte_mkyoung(pte_t pte)
278{
6e953891 279 pte.pte_low |= _PAGE_ACCESSED;
057229f9 280 if (pte.pte_low & _PAGE_READ) {
6e953891
SS
281 pte.pte_low |= _PAGE_SILENT_READ;
282 pte.pte_high |= _PAGE_SILENT_READ;
057229f9 283 }
1da177e4
LT
284 return pte;
285}
286#else
1da177e4
LT
287static inline int pte_write(pte_t pte) { return pte_val(pte) & _PAGE_WRITE; }
288static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_MODIFIED; }
289static inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED; }
290static inline int pte_file(pte_t pte) { return pte_val(pte) & _PAGE_FILE; }
291
292static inline pte_t pte_wrprotect(pte_t pte)
293{
294 pte_val(pte) &= ~(_PAGE_WRITE | _PAGE_SILENT_WRITE);
295 return pte;
296}
297
1da177e4
LT
298static inline pte_t pte_mkclean(pte_t pte)
299{
300 pte_val(pte) &= ~(_PAGE_MODIFIED|_PAGE_SILENT_WRITE);
301 return pte;
302}
303
304static inline pte_t pte_mkold(pte_t pte)
305{
306 pte_val(pte) &= ~(_PAGE_ACCESSED|_PAGE_SILENT_READ);
307 return pte;
308}
309
310static inline pte_t pte_mkwrite(pte_t pte)
311{
312 pte_val(pte) |= _PAGE_WRITE;
313 if (pte_val(pte) & _PAGE_MODIFIED)
314 pte_val(pte) |= _PAGE_SILENT_WRITE;
315 return pte;
316}
317
1da177e4
LT
318static inline pte_t pte_mkdirty(pte_t pte)
319{
320 pte_val(pte) |= _PAGE_MODIFIED;
321 if (pte_val(pte) & _PAGE_WRITE)
322 pte_val(pte) |= _PAGE_SILENT_WRITE;
323 return pte;
324}
325
326static inline pte_t pte_mkyoung(pte_t pte)
327{
328 pte_val(pte) |= _PAGE_ACCESSED;
05857c64 329 if (cpu_has_rixi) {
6dd9344c
DD
330 if (!(pte_val(pte) & _PAGE_NO_READ))
331 pte_val(pte) |= _PAGE_SILENT_READ;
332 } else {
333 if (pte_val(pte) & _PAGE_READ)
334 pte_val(pte) |= _PAGE_SILENT_READ;
335 }
1da177e4
LT
336 return pte;
337}
dd794392
DD
338
339#ifdef _PAGE_HUGE
340static inline int pte_huge(pte_t pte) { return pte_val(pte) & _PAGE_HUGE; }
341
342static inline pte_t pte_mkhuge(pte_t pte)
343{
344 pte_val(pte) |= _PAGE_HUGE;
345 return pte;
346}
347#endif /* _PAGE_HUGE */
1da177e4 348#endif
7e675137
NP
349static inline int pte_special(pte_t pte) { return 0; }
350static inline pte_t pte_mkspecial(pte_t pte) { return pte; }
1da177e4
LT
351
352/*
70342287 353 * Macro to make mark a page protection value as "uncacheable". Note
1da177e4
LT
354 * that "protection" is really a misnomer here as the protection value
355 * contains the memory attribute bits, dirty bits, and various other
356 * bits as well.
357 */
358#define pgprot_noncached pgprot_noncached
359
360static inline pgprot_t pgprot_noncached(pgprot_t _prot)
361{
362 unsigned long prot = pgprot_val(_prot);
363
364 prot = (prot & ~_CACHE_MASK) | _CACHE_UNCACHED;
365
366 return __pgprot(prot);
367}
368
4b050ba7
MC
369static inline pgprot_t pgprot_writecombine(pgprot_t _prot)
370{
371 unsigned long prot = pgprot_val(_prot);
372
373 /* cpu_data[0].writecombine is already shifted by _CACHE_SHIFT */
374 prot = (prot & ~_CACHE_MASK) | cpu_data[0].writecombine;
375
376 return __pgprot(prot);
377}
378
1da177e4
LT
379/*
380 * Conversion functions: convert a page and protection to a page entry,
381 * and a page entry and page directory to the page they refer to.
382 */
383#define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot))
384
34adb28d 385#if defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32)
1da177e4
LT
386static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
387{
79e0bc37
SS
388 pte.pte_low &= _PAGE_CHG_MASK;
389 pte.pte_high &= ~0x3f;
390 pte.pte_low |= pgprot_val(newprot);
1da177e4
LT
391 pte.pte_high |= pgprot_val(newprot) & 0x3f;
392 return pte;
393}
394#else
395static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
396{
397 return __pte((pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot));
398}
399#endif
400
401
402extern void __update_tlb(struct vm_area_struct *vma, unsigned long address,
403 pte_t pte);
1da177e4
LT
404
405static inline void update_mmu_cache(struct vm_area_struct *vma,
4b3073e1 406 unsigned long address, pte_t *ptep)
1da177e4 407{
4b3073e1 408 pte_t pte = *ptep;
1da177e4 409 __update_tlb(vma, address, pte);
1da177e4
LT
410}
411
970d032f
RB
412static inline void update_mmu_cache_pmd(struct vm_area_struct *vma,
413 unsigned long address, pmd_t *pmdp)
414{
415 pte_t pte = *(pte_t *)pmdp;
416
417 __update_tlb(vma, address, pte);
418}
419
1da177e4 420#define kern_addr_valid(addr) (1)
1da177e4 421
34adb28d 422#ifdef CONFIG_PHYS_ADDR_T_64BIT
1da177e4
LT
423extern int remap_pfn_range(struct vm_area_struct *vma, unsigned long from, unsigned long pfn, unsigned long size, pgprot_t prot);
424
1da177e4
LT
425static inline int io_remap_pfn_range(struct vm_area_struct *vma,
426 unsigned long vaddr,
427 unsigned long pfn,
428 unsigned long size,
429 pgprot_t prot)
430{
15d45cce 431 phys_addr_t phys_addr_high = fixup_bigphys_addr(pfn << PAGE_SHIFT, size);
ac5d8c02 432 return remap_pfn_range(vma, vaddr, phys_addr_high >> PAGE_SHIFT, size, prot);
1da177e4 433}
40d158e6 434#define io_remap_pfn_range io_remap_pfn_range
1da177e4
LT
435#endif
436
970d032f
RB
437#ifdef CONFIG_TRANSPARENT_HUGEPAGE
438
439extern int has_transparent_hugepage(void);
440
441static inline int pmd_trans_huge(pmd_t pmd)
442{
443 return !!(pmd_val(pmd) & _PAGE_HUGE);
444}
445
446static inline pmd_t pmd_mkhuge(pmd_t pmd)
447{
448 pmd_val(pmd) |= _PAGE_HUGE;
449
450 return pmd;
451}
452
453static inline int pmd_trans_splitting(pmd_t pmd)
454{
455 return !!(pmd_val(pmd) & _PAGE_SPLITTING);
456}
457
458static inline pmd_t pmd_mksplitting(pmd_t pmd)
459{
460 pmd_val(pmd) |= _PAGE_SPLITTING;
461
462 return pmd;
463}
464
465extern void set_pmd_at(struct mm_struct *mm, unsigned long addr,
466 pmd_t *pmdp, pmd_t pmd);
467
468#define __HAVE_ARCH_PMDP_SPLITTING_FLUSH
469/* Extern to avoid header file madness */
470extern void pmdp_splitting_flush(struct vm_area_struct *vma,
471 unsigned long address,
472 pmd_t *pmdp);
473
474#define __HAVE_ARCH_PMD_WRITE
475static inline int pmd_write(pmd_t pmd)
476{
477 return !!(pmd_val(pmd) & _PAGE_WRITE);
478}
479
480static inline pmd_t pmd_wrprotect(pmd_t pmd)
481{
482 pmd_val(pmd) &= ~(_PAGE_WRITE | _PAGE_SILENT_WRITE);
483 return pmd;
484}
485
486static inline pmd_t pmd_mkwrite(pmd_t pmd)
487{
488 pmd_val(pmd) |= _PAGE_WRITE;
489 if (pmd_val(pmd) & _PAGE_MODIFIED)
490 pmd_val(pmd) |= _PAGE_SILENT_WRITE;
491
492 return pmd;
493}
494
495static inline int pmd_dirty(pmd_t pmd)
496{
497 return !!(pmd_val(pmd) & _PAGE_MODIFIED);
498}
499
500static inline pmd_t pmd_mkclean(pmd_t pmd)
501{
502 pmd_val(pmd) &= ~(_PAGE_MODIFIED | _PAGE_SILENT_WRITE);
503 return pmd;
504}
505
506static inline pmd_t pmd_mkdirty(pmd_t pmd)
507{
508 pmd_val(pmd) |= _PAGE_MODIFIED;
509 if (pmd_val(pmd) & _PAGE_WRITE)
510 pmd_val(pmd) |= _PAGE_SILENT_WRITE;
511
512 return pmd;
513}
514
515static inline int pmd_young(pmd_t pmd)
516{
517 return !!(pmd_val(pmd) & _PAGE_ACCESSED);
518}
519
520static inline pmd_t pmd_mkold(pmd_t pmd)
521{
522 pmd_val(pmd) &= ~(_PAGE_ACCESSED|_PAGE_SILENT_READ);
523
524 return pmd;
525}
526
527static inline pmd_t pmd_mkyoung(pmd_t pmd)
528{
529 pmd_val(pmd) |= _PAGE_ACCESSED;
530
531 if (cpu_has_rixi) {
532 if (!(pmd_val(pmd) & _PAGE_NO_READ))
533 pmd_val(pmd) |= _PAGE_SILENT_READ;
534 } else {
535 if (pmd_val(pmd) & _PAGE_READ)
536 pmd_val(pmd) |= _PAGE_SILENT_READ;
537 }
538
539 return pmd;
540}
541
542/* Extern to avoid header file madness */
543extern pmd_t mk_pmd(struct page *page, pgprot_t prot);
544
545static inline unsigned long pmd_pfn(pmd_t pmd)
546{
547 return pmd_val(pmd) >> _PFN_SHIFT;
548}
549
550static inline struct page *pmd_page(pmd_t pmd)
551{
552 if (pmd_trans_huge(pmd))
553 return pfn_to_page(pmd_pfn(pmd));
554
555 return pfn_to_page(pmd_phys(pmd) >> PAGE_SHIFT);
556}
557
558static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
559{
560 pmd_val(pmd) = (pmd_val(pmd) & _PAGE_CHG_MASK) | pgprot_val(newprot);
561 return pmd;
562}
563
564static inline pmd_t pmd_mknotpresent(pmd_t pmd)
565{
566 pmd_val(pmd) &= ~(_PAGE_PRESENT | _PAGE_VALID | _PAGE_DIRTY);
567
568 return pmd;
569}
570
571/*
572 * The generic version pmdp_get_and_clear uses a version of pmd_clear() with a
573 * different prototype.
574 */
575#define __HAVE_ARCH_PMDP_GET_AND_CLEAR
576static inline pmd_t pmdp_get_and_clear(struct mm_struct *mm,
577 unsigned long address, pmd_t *pmdp)
578{
579 pmd_t old = *pmdp;
580
581 pmd_clear(pmdp);
582
583 return old;
584}
585
586#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
587
1da177e4
LT
588#include <asm-generic/pgtable.h>
589
22f1fdfd
WZ
590/*
591 * uncached accelerated TLB map for video memory access
592 */
593#ifdef CONFIG_CPU_SUPPORTS_UNCACHED_ACCELERATED
594#define __HAVE_PHYS_MEM_ACCESS_PROT
595
596struct file;
597pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
598 unsigned long size, pgprot_t vma_prot);
599int phys_mem_access_prot_allowed(struct file *file, unsigned long pfn,
600 unsigned long size, pgprot_t *vma_prot);
601#endif
602
1da177e4
LT
603/*
604 * We provide our own get_unmapped area to cope with the virtual aliasing
605 * constraints placed on us by the cache architecture.
606 */
607#define HAVE_ARCH_UNMAPPED_AREA
d0be89f6 608#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
1da177e4
LT
609
610/*
611 * No page table caches to initialise
612 */
613#define pgtable_cache_init() do { } while (0)
614
615#endif /* _ASM_PGTABLE_H */