]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | #ifndef _PARISC_PAGE_H |
2 | #define _PARISC_PAGE_H | |
3 | ||
4 | /* PAGE_SHIFT determines the page size */ | |
5 | #define PAGE_SHIFT 12 | |
6 | #define PAGE_SIZE (1UL << PAGE_SHIFT) | |
7 | #define PAGE_MASK (~(PAGE_SIZE-1)) | |
8 | ||
9 | #ifdef __KERNEL__ | |
10 | #include <linux/config.h> | |
11 | #ifndef __ASSEMBLY__ | |
12 | ||
13 | #include <asm/types.h> | |
14 | #include <asm/cache.h> | |
15 | ||
16 | #define clear_page(page) memset((void *)(page), 0, PAGE_SIZE) | |
17 | #define copy_page(to,from) copy_user_page_asm((void *)(to), (void *)(from)) | |
18 | ||
19 | struct page; | |
20 | ||
21 | extern void purge_kernel_dcache_page(unsigned long); | |
22 | extern void copy_user_page_asm(void *to, void *from); | |
23 | extern void clear_user_page_asm(void *page, unsigned long vaddr); | |
24 | ||
25 | static inline void | |
26 | copy_user_page(void *vto, void *vfrom, unsigned long vaddr, struct page *pg) | |
27 | { | |
28 | copy_user_page_asm(vto, vfrom); | |
29 | flush_kernel_dcache_page(vto); | |
30 | /* XXX: ppc flushes icache too, should we? */ | |
31 | } | |
32 | ||
33 | static inline void | |
34 | clear_user_page(void *page, unsigned long vaddr, struct page *pg) | |
35 | { | |
36 | purge_kernel_dcache_page((unsigned long)page); | |
37 | clear_user_page_asm(page, vaddr); | |
38 | } | |
39 | ||
40 | /* | |
41 | * These are used to make use of C type-checking.. | |
42 | */ | |
43 | #ifdef __LP64__ | |
44 | typedef struct { unsigned long pte; } pte_t; | |
45 | #else | |
46 | typedef struct { | |
47 | unsigned long pte; | |
48 | unsigned long flags; | |
49 | } pte_t; | |
50 | #endif | |
51 | /* NOTE: even on 64 bits, these entries are __u32 because we allocate | |
52 | * the pmd and pgd in ZONE_DMA (i.e. under 4GB) */ | |
53 | typedef struct { __u32 pmd; } pmd_t; | |
54 | typedef struct { __u32 pgd; } pgd_t; | |
55 | typedef struct { unsigned long pgprot; } pgprot_t; | |
56 | ||
57 | #define pte_val(x) ((x).pte) | |
58 | #ifdef __LP64__ | |
59 | #define pte_flags(x) (*(__u32 *)&((x).pte)) | |
60 | #else | |
61 | #define pte_flags(x) ((x).flags) | |
62 | #endif | |
63 | ||
64 | /* These do not work lvalues, so make sure we don't use them as such. */ | |
65 | #define pmd_val(x) ((x).pmd + 0) | |
66 | #define pgd_val(x) ((x).pgd + 0) | |
67 | #define pgprot_val(x) ((x).pgprot) | |
68 | ||
69 | #define __pmd_val_set(x,n) (x).pmd = (n) | |
70 | #define __pgd_val_set(x,n) (x).pgd = (n) | |
71 | ||
72 | #define __pte(x) ((pte_t) { (x) } ) | |
73 | #define __pmd(x) ((pmd_t) { (x) } ) | |
74 | #define __pgd(x) ((pgd_t) { (x) } ) | |
75 | #define __pgprot(x) ((pgprot_t) { (x) } ) | |
76 | ||
1da177e4 LT |
77 | typedef struct __physmem_range { |
78 | unsigned long start_pfn; | |
79 | unsigned long pages; /* PAGE_SIZE pages */ | |
80 | } physmem_range_t; | |
81 | ||
82 | extern physmem_range_t pmem_ranges[]; | |
83 | extern int npmem_ranges; | |
84 | ||
85 | #endif /* !__ASSEMBLY__ */ | |
86 | ||
87 | /* WARNING: The definitions below must match exactly to sizeof(pte_t) | |
88 | * etc | |
89 | */ | |
90 | #ifdef __LP64__ | |
91 | #define BITS_PER_PTE_ENTRY 3 | |
92 | #define BITS_PER_PMD_ENTRY 2 | |
93 | #define BITS_PER_PGD_ENTRY 2 | |
94 | #else | |
95 | #define BITS_PER_PTE_ENTRY 3 | |
96 | #define BITS_PER_PMD_ENTRY 2 | |
97 | #define BITS_PER_PGD_ENTRY BITS_PER_PMD_ENTRY | |
98 | #endif | |
99 | #define PGD_ENTRY_SIZE (1UL << BITS_PER_PGD_ENTRY) | |
100 | #define PMD_ENTRY_SIZE (1UL << BITS_PER_PMD_ENTRY) | |
101 | #define PTE_ENTRY_SIZE (1UL << BITS_PER_PTE_ENTRY) | |
102 | ||
103 | /* to align the pointer to the (next) page boundary */ | |
104 | #define PAGE_ALIGN(addr) (((addr)+PAGE_SIZE-1)&PAGE_MASK) | |
105 | ||
106 | ||
107 | #define LINUX_GATEWAY_SPACE 0 | |
108 | ||
109 | /* This governs the relationship between virtual and physical addresses. | |
110 | * If you alter it, make sure to take care of our various fixed mapping | |
111 | * segments in fixmap.h */ | |
112 | #define __PAGE_OFFSET (0x10000000) | |
113 | ||
114 | #define PAGE_OFFSET ((unsigned long)__PAGE_OFFSET) | |
115 | ||
116 | /* The size of the gateway page (we leave lots of room for expansion) */ | |
117 | #define GATEWAY_PAGE_SIZE 0x4000 | |
118 | ||
119 | /* The start of the actual kernel binary---used in vmlinux.lds.S | |
120 | * Leave some space after __PAGE_OFFSET for detecting kernel null | |
121 | * ptr derefs */ | |
122 | #define KERNEL_BINARY_TEXT_START (__PAGE_OFFSET + 0x100000) | |
123 | ||
124 | /* These macros don't work for 64-bit C code -- don't allow in C at all */ | |
125 | #ifdef __ASSEMBLY__ | |
126 | # define PA(x) ((x)-__PAGE_OFFSET) | |
127 | # define VA(x) ((x)+__PAGE_OFFSET) | |
128 | #endif | |
129 | #define __pa(x) ((unsigned long)(x)-PAGE_OFFSET) | |
130 | #define __va(x) ((void *)((unsigned long)(x)+PAGE_OFFSET)) | |
131 | ||
132 | #ifndef CONFIG_DISCONTIGMEM | |
133 | #define pfn_to_page(pfn) (mem_map + (pfn)) | |
134 | #define page_to_pfn(page) ((unsigned long)((page) - mem_map)) | |
135 | #define pfn_valid(pfn) ((pfn) < max_mapnr) | |
136 | #endif /* CONFIG_DISCONTIGMEM */ | |
137 | ||
138 | #define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT) | |
139 | ||
140 | #define page_to_phys(page) (page_to_pfn(page) << PAGE_SHIFT) | |
141 | #define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT) | |
142 | ||
143 | #define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | VM_EXEC | \ | |
144 | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC) | |
145 | ||
146 | #endif /* __KERNEL__ */ | |
147 | ||
fd4fd5aa SR |
148 | #include <asm-generic/page.h> |
149 | ||
1da177e4 | 150 | #endif /* _PARISC_PAGE_H */ |