]> git.proxmox.com Git - mirror_ubuntu-hirsute-kernel.git/blame - arch/mn10300/include/asm/pgtable.h
Merge remote-tracking branches 'regmap/fix/irq', 'regmap/fix/rbtree' and 'regmap...
[mirror_ubuntu-hirsute-kernel.git] / arch / mn10300 / include / asm / pgtable.h
CommitLineData
b920de1b
DH
1/* MN10300 Page table manipulators and constants
2 *
3 * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public Licence
8 * as published by the Free Software Foundation; either version
9 * 2 of the Licence, or (at your option) any later version.
10 *
11 *
12 * The Linux memory management assumes a three-level page table setup. On
13 * the i386, we use that, but "fold" the mid level into the top-level page
14 * table, so that we physically have the same two-level page table as the
15 * i386 mmu expects.
16 *
17 * This file contains the functions and defines necessary to modify and use
18 * the i386 page table tree for the purposes of the MN10300 TLB handler
19 * functions.
20 */
21#ifndef _ASM_PGTABLE_H
22#define _ASM_PGTABLE_H
23
24#include <asm/cpu-regs.h>
25
26#ifndef __ASSEMBLY__
27#include <asm/processor.h>
28#include <asm/cache.h>
29#include <linux/threads.h>
30
31#include <asm/bitops.h>
32
33#include <linux/slab.h>
34#include <linux/list.h>
35#include <linux/spinlock.h>
36
37/*
38 * ZERO_PAGE is a global shared page that is always zero: used
39 * for zero-mapped memory areas etc..
40 */
41#define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
42extern unsigned long empty_zero_page[1024];
43extern spinlock_t pgd_lock;
44extern struct page *pgd_list;
45
46extern void pmd_ctor(void *, struct kmem_cache *, unsigned long);
47extern void pgtable_cache_init(void);
48extern void paging_init(void);
49
50#endif /* !__ASSEMBLY__ */
51
52/*
53 * The Linux mn10300 paging architecture only implements both the traditional
54 * 2-level page tables
55 */
56#define PGDIR_SHIFT 22
57#define PTRS_PER_PGD 1024
58#define PTRS_PER_PUD 1 /* we don't really have any PUD physically */
c07af4f1 59#define __PAGETABLE_PUD_FOLDED
b920de1b 60#define PTRS_PER_PMD 1 /* we don't really have any PMD physically */
c07af4f1 61#define __PAGETABLE_PMD_FOLDED
b920de1b
DH
62#define PTRS_PER_PTE 1024
63
64#define PGD_SIZE PAGE_SIZE
65#define PMD_SIZE (1UL << PMD_SHIFT)
66#define PGDIR_SIZE (1UL << PGDIR_SHIFT)
67#define PGDIR_MASK (~(PGDIR_SIZE - 1))
68
69#define USER_PTRS_PER_PGD (TASK_SIZE / PGDIR_SIZE)
d016bf7e 70#define FIRST_USER_ADDRESS 0UL
b920de1b
DH
71
72#define USER_PGD_PTRS (PAGE_OFFSET >> PGDIR_SHIFT)
73#define KERNEL_PGD_PTRS (PTRS_PER_PGD - USER_PGD_PTRS)
74
75#define TWOLEVEL_PGDIR_SHIFT 22
76#define BOOT_USER_PGD_PTRS (__PAGE_OFFSET >> TWOLEVEL_PGDIR_SHIFT)
77#define BOOT_KERNEL_PGD_PTRS (1024 - BOOT_USER_PGD_PTRS)
78
79#ifndef __ASSEMBLY__
80extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
81#endif
82
83/*
84 * Unfortunately, due to the way the MMU works on the MN10300, the vmalloc VM
85 * area has to be in the lower half of the virtual address range (the upper
86 * half is not translated through the TLB).
87 *
88 * So in this case, the vmalloc area goes at the bottom of the address map
89 * (leaving a hole at the very bottom to catch addressing errors), and
90 * userspace starts immediately above.
91 *
92 * The vmalloc() routines also leaves a hole of 4kB between each vmalloced
93 * area to catch addressing errors.
94 */
368dd5ac
AT
95#ifndef __ASSEMBLY__
96#define VMALLOC_OFFSET (8UL * 1024 * 1024)
97#define VMALLOC_START (0x70000000UL)
98#define VMALLOC_END (0x7C000000UL)
99#else
b920de1b
DH
100#define VMALLOC_OFFSET (8 * 1024 * 1024)
101#define VMALLOC_START (0x70000000)
102#define VMALLOC_END (0x7C000000)
368dd5ac 103#endif
b920de1b
DH
104
105#ifndef __ASSEMBLY__
106extern pte_t kernel_vmalloc_ptes[(VMALLOC_END - VMALLOC_START) / PAGE_SIZE];
107#endif
108
dccbf485
AT
109/* IPTEL2/DPTEL2 bit assignments */
110#define _PAGE_BIT_VALID xPTEL2_V_BIT
111#define _PAGE_BIT_CACHE xPTEL2_C_BIT
112#define _PAGE_BIT_PRESENT xPTEL2_PV_BIT
113#define _PAGE_BIT_DIRTY xPTEL2_D_BIT
114#define _PAGE_BIT_GLOBAL xPTEL2_G_BIT
115#define _PAGE_BIT_ACCESSED xPTEL2_UNUSED1_BIT /* mustn't be loaded into IPTEL2/DPTEL2 */
116
117#define _PAGE_VALID xPTEL2_V
118#define _PAGE_CACHE xPTEL2_C
119#define _PAGE_PRESENT xPTEL2_PV
120#define _PAGE_DIRTY xPTEL2_D
121#define _PAGE_PROT xPTEL2_PR
122#define _PAGE_PROT_RKNU xPTEL2_PR_ROK
123#define _PAGE_PROT_WKNU xPTEL2_PR_RWK
124#define _PAGE_PROT_RKRU xPTEL2_PR_ROK_ROU
125#define _PAGE_PROT_WKRU xPTEL2_PR_RWK_ROU
126#define _PAGE_PROT_WKWU xPTEL2_PR_RWK_RWU
127#define _PAGE_GLOBAL xPTEL2_G
128#define _PAGE_PS_MASK xPTEL2_PS
129#define _PAGE_PS_4Kb xPTEL2_PS_4Kb
130#define _PAGE_PS_128Kb xPTEL2_PS_128Kb
131#define _PAGE_PS_1Kb xPTEL2_PS_1Kb
132#define _PAGE_PS_4Mb xPTEL2_PS_4Mb
133#define _PAGE_PSE xPTEL2_PS_4Mb /* 4MB page */
134#define _PAGE_CACHE_WT xPTEL2_CWT
135#define _PAGE_ACCESSED xPTEL2_UNUSED1
136#define _PAGE_NX 0 /* no-execute bit */
137
138/* If _PAGE_VALID is clear, we use these: */
dccbf485
AT
139#define _PAGE_PROTNONE 0x000 /* If not present */
140
141#define __PAGE_PROT_UWAUX 0x010
142#define __PAGE_PROT_USER 0x020
143#define __PAGE_PROT_WRITE 0x040
b920de1b
DH
144
145#define _PAGE_PRESENTV (_PAGE_PRESENT|_PAGE_VALID)
b920de1b
DH
146
147#ifndef __ASSEMBLY__
148
149#define VMALLOC_VMADDR(x) ((unsigned long)(x))
150
151#define _PAGE_TABLE (_PAGE_PRESENTV | _PAGE_PROT_WKNU | _PAGE_ACCESSED | _PAGE_DIRTY)
152#define _PAGE_CHG_MASK (PTE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY)
153
154#define __PAGE_NONE (_PAGE_PRESENTV | _PAGE_PROT_RKNU | _PAGE_ACCESSED | _PAGE_CACHE)
155#define __PAGE_SHARED (_PAGE_PRESENTV | _PAGE_PROT_WKWU | _PAGE_ACCESSED | _PAGE_CACHE)
156#define __PAGE_COPY (_PAGE_PRESENTV | _PAGE_PROT_RKRU | _PAGE_ACCESSED | _PAGE_CACHE)
157#define __PAGE_READONLY (_PAGE_PRESENTV | _PAGE_PROT_RKRU | _PAGE_ACCESSED | _PAGE_CACHE)
158
159#define PAGE_NONE __pgprot(__PAGE_NONE | _PAGE_NX)
160#define PAGE_SHARED_NOEXEC __pgprot(__PAGE_SHARED | _PAGE_NX)
161#define PAGE_COPY_NOEXEC __pgprot(__PAGE_COPY | _PAGE_NX)
162#define PAGE_READONLY_NOEXEC __pgprot(__PAGE_READONLY | _PAGE_NX)
163#define PAGE_SHARED_EXEC __pgprot(__PAGE_SHARED)
164#define PAGE_COPY_EXEC __pgprot(__PAGE_COPY)
165#define PAGE_READONLY_EXEC __pgprot(__PAGE_READONLY)
166#define PAGE_COPY PAGE_COPY_NOEXEC
167#define PAGE_READONLY PAGE_READONLY_NOEXEC
168#define PAGE_SHARED PAGE_SHARED_EXEC
169
170#define __PAGE_KERNEL_BASE (_PAGE_PRESENTV | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_GLOBAL)
171
172#define __PAGE_KERNEL (__PAGE_KERNEL_BASE | _PAGE_PROT_WKNU | _PAGE_CACHE | _PAGE_NX)
173#define __PAGE_KERNEL_NOCACHE (__PAGE_KERNEL_BASE | _PAGE_PROT_WKNU | _PAGE_NX)
174#define __PAGE_KERNEL_EXEC (__PAGE_KERNEL & ~_PAGE_NX)
175#define __PAGE_KERNEL_RO (__PAGE_KERNEL_BASE | _PAGE_PROT_RKNU | _PAGE_CACHE | _PAGE_NX)
176#define __PAGE_KERNEL_LARGE (__PAGE_KERNEL | _PAGE_PSE)
177#define __PAGE_KERNEL_LARGE_EXEC (__PAGE_KERNEL_EXEC | _PAGE_PSE)
178
179#define PAGE_KERNEL __pgprot(__PAGE_KERNEL)
180#define PAGE_KERNEL_RO __pgprot(__PAGE_KERNEL_RO)
181#define PAGE_KERNEL_EXEC __pgprot(__PAGE_KERNEL_EXEC)
182#define PAGE_KERNEL_NOCACHE __pgprot(__PAGE_KERNEL_NOCACHE)
183#define PAGE_KERNEL_LARGE __pgprot(__PAGE_KERNEL_LARGE)
184#define PAGE_KERNEL_LARGE_EXEC __pgprot(__PAGE_KERNEL_LARGE_EXEC)
185
5a226c6f
MS
186#define __PAGE_USERIO (__PAGE_KERNEL_BASE | _PAGE_PROT_WKWU | _PAGE_NX)
187#define PAGE_USERIO __pgprot(__PAGE_USERIO)
188
b920de1b
DH
189/*
190 * Whilst the MN10300 can do page protection for execute (given separate data
191 * and insn TLBs), we are not supporting it at the moment. Write permission,
192 * however, always implies read permission (but not execute permission).
193 */
194#define __P000 PAGE_NONE
195#define __P001 PAGE_READONLY_NOEXEC
196#define __P010 PAGE_COPY_NOEXEC
197#define __P011 PAGE_COPY_NOEXEC
198#define __P100 PAGE_READONLY_EXEC
199#define __P101 PAGE_READONLY_EXEC
200#define __P110 PAGE_COPY_EXEC
201#define __P111 PAGE_COPY_EXEC
202
203#define __S000 PAGE_NONE
204#define __S001 PAGE_READONLY_NOEXEC
205#define __S010 PAGE_SHARED_NOEXEC
206#define __S011 PAGE_SHARED_NOEXEC
207#define __S100 PAGE_READONLY_EXEC
208#define __S101 PAGE_READONLY_EXEC
209#define __S110 PAGE_SHARED_EXEC
210#define __S111 PAGE_SHARED_EXEC
211
212/*
213 * Define this to warn about kernel memory accesses that are
214 * done without a 'verify_area(VERIFY_WRITE,..)'
215 */
216#undef TEST_VERIFY_AREA
217
218#define pte_present(x) (pte_val(x) & _PAGE_VALID)
219#define pte_clear(mm, addr, xp) \
220do { \
221 set_pte_at((mm), (addr), (xp), __pte(0)); \
222} while (0)
223
224#define pmd_none(x) (!pmd_val(x))
225#define pmd_present(x) (!pmd_none(x))
226#define pmd_clear(xp) do { set_pmd(xp, __pmd(0)); } while (0)
227#define pmd_bad(x) 0
228
229
230#define pages_to_mb(x) ((x) >> (20 - PAGE_SHIFT))
231
232#ifndef __ASSEMBLY__
233
234/*
235 * The following only work if pte_present() is true.
236 * Undefined behaviour if not..
237 */
238static inline int pte_user(pte_t pte) { return pte_val(pte) & __PAGE_PROT_USER; }
239static inline int pte_read(pte_t pte) { return pte_val(pte) & __PAGE_PROT_USER; }
240static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_DIRTY; }
241static inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED; }
242static inline int pte_write(pte_t pte) { return pte_val(pte) & __PAGE_PROT_WRITE; }
7e675137 243static inline int pte_special(pte_t pte){ return 0; }
b920de1b 244
b920de1b
DH
245static inline pte_t pte_rdprotect(pte_t pte)
246{
247 pte_val(pte) &= ~(__PAGE_PROT_USER|__PAGE_PROT_UWAUX); return pte;
248}
249static inline pte_t pte_exprotect(pte_t pte)
250{
251 pte_val(pte) |= _PAGE_NX; return pte;
252}
253
254static inline pte_t pte_wrprotect(pte_t pte)
255{
256 pte_val(pte) &= ~(__PAGE_PROT_WRITE|__PAGE_PROT_UWAUX); return pte;
257}
258
259static inline pte_t pte_mkclean(pte_t pte) { pte_val(pte) &= ~_PAGE_DIRTY; return pte; }
260static inline pte_t pte_mkold(pte_t pte) { pte_val(pte) &= ~_PAGE_ACCESSED; return pte; }
261static inline pte_t pte_mkdirty(pte_t pte) { pte_val(pte) |= _PAGE_DIRTY; return pte; }
262static inline pte_t pte_mkyoung(pte_t pte) { pte_val(pte) |= _PAGE_ACCESSED; return pte; }
263static inline pte_t pte_mkexec(pte_t pte) { pte_val(pte) &= ~_PAGE_NX; return pte; }
264
265static inline pte_t pte_mkread(pte_t pte)
266{
267 pte_val(pte) |= __PAGE_PROT_USER;
268 if (pte_write(pte))
269 pte_val(pte) |= __PAGE_PROT_UWAUX;
270 return pte;
271}
272static inline pte_t pte_mkwrite(pte_t pte)
273{
274 pte_val(pte) |= __PAGE_PROT_WRITE;
275 if (pte_val(pte) & __PAGE_PROT_USER)
276 pte_val(pte) |= __PAGE_PROT_UWAUX;
277 return pte;
278}
279
7e675137
NP
280static inline pte_t pte_mkspecial(pte_t pte) { return pte; }
281
b920de1b
DH
282#define pte_ERROR(e) \
283 printk(KERN_ERR "%s:%d: bad pte %08lx.\n", \
284 __FILE__, __LINE__, pte_val(e))
285#define pgd_ERROR(e) \
286 printk(KERN_ERR "%s:%d: bad pgd %08lx.\n", \
287 __FILE__, __LINE__, pgd_val(e))
288
289/*
290 * The "pgd_xxx()" functions here are trivial for a folded two-level
291 * setup: the pgd is never bad, and a pmd always exists (as it's folded
292 * into the pgd entry)
293 */
294#define pgd_clear(xp) do { } while (0)
295
296/*
297 * Certain architectures need to do special things when PTEs
298 * within a page table are directly modified. Thus, the following
299 * hook is made available.
300 */
301#define set_pte(pteptr, pteval) (*(pteptr) = pteval)
302#define set_pte_at(mm, addr, ptep, pteval) set_pte((ptep), (pteval))
303#define set_pte_atomic(pteptr, pteval) set_pte((pteptr), (pteval))
304
305/*
306 * (pmds are folded into pgds so this doesn't get actually called,
307 * but the define is needed for a generic inline function.)
308 */
309#define set_pmd(pmdptr, pmdval) (*(pmdptr) = pmdval)
310
311#define ptep_get_and_clear(mm, addr, ptep) \
312 __pte(xchg(&(ptep)->pte, 0))
313#define pte_same(a, b) (pte_val(a) == pte_val(b))
314#define pte_page(x) pfn_to_page(pte_pfn(x))
315#define pte_none(x) (!pte_val(x))
316#define pte_pfn(x) ((unsigned long) (pte_val(x) >> PAGE_SHIFT))
317#define __pfn_addr(pfn) ((pfn) << PAGE_SHIFT)
318#define pfn_pte(pfn, prot) __pte(__pfn_addr(pfn) | pgprot_val(prot))
319#define pfn_pmd(pfn, prot) __pmd(__pfn_addr(pfn) | pgprot_val(prot))
320
321/*
322 * All present user pages are user-executable:
323 */
324static inline int pte_exec(pte_t pte)
325{
326 return pte_user(pte);
327}
328
329/*
330 * All present pages are kernel-executable:
331 */
332static inline int pte_exec_kernel(pte_t pte)
333{
334 return 1;
335}
336
b920de1b 337/* Encode and de-code a swap entry */
6bf63a8c
KS
338#define __swp_type(x) (((x).val >> 1) & 0x3f)
339#define __swp_offset(x) ((x).val >> 7)
b920de1b 340#define __swp_entry(type, offset) \
6bf63a8c 341 ((swp_entry_t) { ((type) << 1) | ((offset) << 7) })
b920de1b
DH
342#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
343#define __swp_entry_to_pte(x) __pte((x).val)
344
345static inline
346int ptep_test_and_clear_dirty(struct vm_area_struct *vma, unsigned long addr,
347 pte_t *ptep)
348{
349 if (!pte_dirty(*ptep))
350 return 0;
351 return test_and_clear_bit(_PAGE_BIT_DIRTY, &ptep->pte);
352}
353
354static inline
355int ptep_test_and_clear_young(struct vm_area_struct *vma, unsigned long addr,
356 pte_t *ptep)
357{
358 if (!pte_young(*ptep))
359 return 0;
360 return test_and_clear_bit(_PAGE_BIT_ACCESSED, &ptep->pte);
361}
362
363static inline
364void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
365{
366 pte_val(*ptep) &= ~(__PAGE_PROT_WRITE|__PAGE_PROT_UWAUX);
367}
368
369static inline void ptep_mkdirty(pte_t *ptep)
370{
371 set_bit(_PAGE_BIT_DIRTY, &ptep->pte);
372}
373
374/*
375 * Macro to mark a page protection value as "uncacheable". On processors which
376 * do not support it, this is a no-op.
377 */
368dd5ac 378#define pgprot_noncached(prot) __pgprot(pgprot_val(prot) & ~_PAGE_CACHE)
b920de1b 379
368dd5ac
AT
380/*
381 * Macro to mark a page protection value as "Write-Through".
382 * On processors which do not support it, this is a no-op.
383 */
384#define pgprot_through(prot) __pgprot(pgprot_val(prot) | _PAGE_CACHE_WT)
b920de1b
DH
385
386/*
387 * Conversion functions: convert a page and protection to a page entry,
388 * and a page entry and page directory to the page they refer to.
389 */
390
391#define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot))
392#define mk_pte_huge(entry) \
393 ((entry).pte |= _PAGE_PRESENT | _PAGE_PSE | _PAGE_VALID)
394
395static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
396{
397 pte_val(pte) &= _PAGE_CHG_MASK;
398 pte_val(pte) |= pgprot_val(newprot);
399 return pte;
400}
401
402#define page_pte(page) page_pte_prot((page), __pgprot(0))
403
404#define pmd_page_kernel(pmd) \
405 ((unsigned long) __va(pmd_val(pmd) & PAGE_MASK))
406
407#define pmd_page(pmd) pfn_to_page(pmd_val(pmd) >> PAGE_SHIFT)
408
409#define pmd_large(pmd) \
410 ((pmd_val(pmd) & (_PAGE_PSE | _PAGE_PRESENT)) == \
411 (_PAGE_PSE | _PAGE_PRESENT))
412
413/*
414 * the pgd page can be thought of an array like this: pgd_t[PTRS_PER_PGD]
415 *
416 * this macro returns the index of the entry in the pgd page which would
417 * control the given virtual address
418 */
419#define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD - 1))
420
421/*
422 * pgd_offset() returns a (pgd_t *)
423 * pgd_index() is used get the offset into the pgd page's array of pgd_t's;
424 */
425#define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
426
427/*
428 * a shortcut which implies the use of the kernel's pgd, instead
429 * of a process's
430 */
431#define pgd_offset_k(address) pgd_offset(&init_mm, address)
432
433/*
434 * the pmd page can be thought of an array like this: pmd_t[PTRS_PER_PMD]
435 *
436 * this macro returns the index of the entry in the pmd page which would
437 * control the given virtual address
438 */
439#define pmd_index(address) \
440 (((address) >> PMD_SHIFT) & (PTRS_PER_PMD - 1))
441
442/*
443 * the pte page can be thought of an array like this: pte_t[PTRS_PER_PTE]
444 *
445 * this macro returns the index of the entry in the pte page which would
446 * control the given virtual address
447 */
448#define pte_index(address) \
449 (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
450
451#define pte_offset_kernel(dir, address) \
452 ((pte_t *) pmd_page_kernel(*(dir)) + pte_index(address))
453
454/*
455 * Make a given kernel text page executable/non-executable.
456 * Returns the previous executability setting of that page (which
457 * is used to restore the previous state). Used by the SMP bootup code.
458 * NOTE: this is an __init function for security reasons.
459 */
460static inline int set_kernel_exec(unsigned long vaddr, int enable)
461{
462 return 0;
463}
464
465#define pte_offset_map(dir, address) \
466 ((pte_t *) page_address(pmd_page(*(dir))) + pte_index(address))
b920de1b 467#define pte_unmap(pte) do {} while (0)
b920de1b
DH
468
469/*
470 * The MN10300 has external MMU info in the form of a TLB: this is adapted from
471 * the kernel page tables containing the necessary information by tlb-mn10300.S
472 */
473extern void update_mmu_cache(struct vm_area_struct *vma,
4b3073e1 474 unsigned long address, pte_t *ptep);
b920de1b
DH
475
476#endif /* !__ASSEMBLY__ */
477
478#define kern_addr_valid(addr) (1)
479
b920de1b
DH
480#define MK_IOSPACE_PFN(space, pfn) (pfn)
481#define GET_IOSPACE(pfn) 0
482#define GET_PFN(pfn) (pfn)
483
484#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
485#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_DIRTY
486#define __HAVE_ARCH_PTEP_GET_AND_CLEAR
487#define __HAVE_ARCH_PTEP_SET_WRPROTECT
488#define __HAVE_ARCH_PTEP_MKDIRTY
489#define __HAVE_ARCH_PTE_SAME
490#include <asm-generic/pgtable.h>
491
492#endif /* !__ASSEMBLY__ */
493
494#endif /* _ASM_PGTABLE_H */