]>
Commit | Line | Data |
---|---|---|
f5e706ad SR |
1 | #ifndef _SPARC_PGTABLE_H |
2 | #define _SPARC_PGTABLE_H | |
3 | ||
a439fe51 | 4 | /* asm/pgtable.h: Defines and functions used to work |
f5e706ad SR |
5 | * with Sparc page tables. |
6 | * | |
7 | * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu) | |
8 | * Copyright (C) 1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz) | |
9 | */ | |
10 | ||
eb485d64 SR |
11 | #include <linux/const.h> |
12 | ||
f5e706ad SR |
13 | #ifndef __ASSEMBLY__ |
14 | #include <asm-generic/4level-fixup.h> | |
15 | ||
16 | #include <linux/spinlock.h> | |
b3d9ed3f | 17 | #include <linux/mm_types.h> |
f5e706ad | 18 | #include <asm/types.h> |
f5e706ad | 19 | #include <asm/pgtsrmmu.h> |
9701b264 | 20 | #include <asm/vaddrs.h> |
f5e706ad | 21 | #include <asm/oplib.h> |
d550bbd4 | 22 | #include <asm/cpu_type.h> |
f5e706ad SR |
23 | |
24 | ||
25 | struct vm_area_struct; | |
26 | struct page; | |
27 | ||
f05a6865 SR |
28 | void load_mmu(void); |
29 | unsigned long calc_highpages(void); | |
4c9660f7 | 30 | unsigned long __init bootmem_init(unsigned long *pages_avail); |
f5e706ad | 31 | |
f5e706ad SR |
32 | #define pte_ERROR(e) __builtin_trap() |
33 | #define pmd_ERROR(e) __builtin_trap() | |
34 | #define pgd_ERROR(e) __builtin_trap() | |
35 | ||
1ee0e144 | 36 | #define PMD_SHIFT 22 |
f5e706ad SR |
37 | #define PMD_SIZE (1UL << PMD_SHIFT) |
38 | #define PMD_MASK (~(PMD_SIZE-1)) | |
39 | #define PMD_ALIGN(__addr) (((__addr) + ~PMD_MASK) & PMD_MASK) | |
3d386c0e DM |
40 | #define PGDIR_SHIFT SRMMU_PGDIR_SHIFT |
41 | #define PGDIR_SIZE SRMMU_PGDIR_SIZE | |
42 | #define PGDIR_MASK SRMMU_PGDIR_MASK | |
f5e706ad | 43 | #define PTRS_PER_PTE 1024 |
3d386c0e DM |
44 | #define PTRS_PER_PMD SRMMU_PTRS_PER_PMD |
45 | #define PTRS_PER_PGD SRMMU_PTRS_PER_PGD | |
46 | #define USER_PTRS_PER_PGD PAGE_OFFSET / SRMMU_PGDIR_SIZE | |
d016bf7e | 47 | #define FIRST_USER_ADDRESS 0UL |
f5e706ad SR |
48 | #define PTE_SIZE (PTRS_PER_PTE*4) |
49 | ||
6439d1c6 DM |
50 | #define PAGE_NONE SRMMU_PAGE_NONE |
51 | #define PAGE_SHARED SRMMU_PAGE_SHARED | |
52 | #define PAGE_COPY SRMMU_PAGE_COPY | |
53 | #define PAGE_READONLY SRMMU_PAGE_RDONLY | |
54 | #define PAGE_KERNEL SRMMU_PAGE_KERNEL | |
f5e706ad | 55 | |
881e02d2 SR |
56 | /* Top-level page directory - dummy used by init-mm. |
57 | * srmmu.c will assign the real one (which is dynamically sized) */ | |
58 | #define swapper_pg_dir NULL | |
f5e706ad | 59 | |
f05a6865 | 60 | void paging_init(void); |
f5e706ad | 61 | |
f5e706ad SR |
62 | extern unsigned long ptr_in_current_pgd; |
63 | ||
6439d1c6 DM |
64 | /* xwr */ |
65 | #define __P000 PAGE_NONE | |
66 | #define __P001 PAGE_READONLY | |
67 | #define __P010 PAGE_COPY | |
68 | #define __P011 PAGE_COPY | |
69 | #define __P100 PAGE_READONLY | |
70 | #define __P101 PAGE_READONLY | |
71 | #define __P110 PAGE_COPY | |
72 | #define __P111 PAGE_COPY | |
73 | ||
74 | #define __S000 PAGE_NONE | |
75 | #define __S001 PAGE_READONLY | |
76 | #define __S010 PAGE_SHARED | |
77 | #define __S011 PAGE_SHARED | |
78 | #define __S100 PAGE_READONLY | |
79 | #define __S101 PAGE_READONLY | |
80 | #define __S110 PAGE_SHARED | |
81 | #define __S111 PAGE_SHARED | |
f5e706ad | 82 | |
f5e706ad SR |
83 | /* First physical page can be anywhere, the following is needed so that |
84 | * va-->pa and vice versa conversions work properly without performance | |
85 | * hit for all __pa()/__va() operations. | |
86 | */ | |
87 | extern unsigned long phys_base; | |
88 | extern unsigned long pfn_base; | |
89 | ||
90 | /* | |
f5e706ad SR |
91 | * ZERO_PAGE is a global shared page that is always zero: used |
92 | * for zero-mapped memory areas etc.. | |
93 | */ | |
deba804c | 94 | extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)]; |
f5e706ad | 95 | |
deba804c | 96 | #define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page)) |
f5e706ad | 97 | |
a46d6056 DM |
98 | /* |
99 | * In general all page table modifications should use the V8 atomic | |
100 | * swap instruction. This insures the mmu and the cpu are in sync | |
101 | * with respect to ref/mod bits in the page tables. | |
102 | */ | |
103 | static inline unsigned long srmmu_swap(unsigned long *addr, unsigned long value) | |
104 | { | |
4ecf8860 KS |
105 | __asm__ __volatile__("swap [%2], %0" : |
106 | "=&r" (value) : "0" (value), "r" (addr) : "memory"); | |
a46d6056 DM |
107 | return value; |
108 | } | |
109 | ||
62875cff DM |
110 | /* Certain architectures need to do special things when pte's |
111 | * within a page table are directly modified. Thus, the following | |
112 | * hook is made available. | |
113 | */ | |
114 | ||
115 | static inline void set_pte(pte_t *ptep, pte_t pteval) | |
a46d6056 DM |
116 | { |
117 | srmmu_swap((unsigned long *)ptep, pte_val(pteval)); | |
118 | } | |
119 | ||
62875cff DM |
120 | #define set_pte_at(mm,addr,ptep,pteval) set_pte(ptep,pteval) |
121 | ||
3d827367 DM |
122 | static inline int srmmu_device_memory(unsigned long x) |
123 | { | |
124 | return ((x & 0xF0000000) != 0); | |
125 | } | |
126 | ||
127 | static inline struct page *pmd_page(pmd_t pmd) | |
128 | { | |
129 | if (srmmu_device_memory(pmd_val(pmd))) | |
130 | BUG(); | |
131 | return pfn_to_page((pmd_val(pmd) & SRMMU_PTD_PMASK) >> (PAGE_SHIFT-4)); | |
132 | } | |
133 | ||
9701b264 SR |
134 | static inline unsigned long pgd_page_vaddr(pgd_t pgd) |
135 | { | |
136 | if (srmmu_device_memory(pgd_val(pgd))) { | |
137 | return ~0; | |
138 | } else { | |
139 | unsigned long v = pgd_val(pgd) & SRMMU_PTD_PMASK; | |
140 | return (unsigned long)__nocache_va(v << 4); | |
141 | } | |
142 | } | |
f5e706ad | 143 | |
62875cff DM |
144 | static inline int pte_present(pte_t pte) |
145 | { | |
146 | return ((pte_val(pte) & SRMMU_ET_MASK) == SRMMU_ET_PTE); | |
147 | } | |
f5e706ad SR |
148 | |
149 | static inline int pte_none(pte_t pte) | |
150 | { | |
c87fe1c0 | 151 | return !pte_val(pte); |
f5e706ad SR |
152 | } |
153 | ||
a46d6056 DM |
154 | static inline void __pte_clear(pte_t *ptep) |
155 | { | |
62875cff | 156 | set_pte(ptep, __pte(0)); |
a46d6056 DM |
157 | } |
158 | ||
159 | static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) | |
160 | { | |
161 | __pte_clear(ptep); | |
162 | } | |
f5e706ad | 163 | |
f167edae DM |
164 | static inline int pmd_bad(pmd_t pmd) |
165 | { | |
166 | return (pmd_val(pmd) & SRMMU_ET_MASK) != SRMMU_ET_PTD; | |
167 | } | |
168 | ||
169 | static inline int pmd_present(pmd_t pmd) | |
170 | { | |
171 | return ((pmd_val(pmd) & SRMMU_ET_MASK) == SRMMU_ET_PTD); | |
172 | } | |
f5e706ad SR |
173 | |
174 | static inline int pmd_none(pmd_t pmd) | |
175 | { | |
c87fe1c0 | 176 | return !pmd_val(pmd); |
f5e706ad SR |
177 | } |
178 | ||
a46d6056 DM |
179 | static inline void pmd_clear(pmd_t *pmdp) |
180 | { | |
181 | int i; | |
182 | for (i = 0; i < PTRS_PER_PTE/SRMMU_REAL_PTRS_PER_PTE; i++) | |
62875cff | 183 | set_pte((pte_t *)&pmdp->pmdv[i], __pte(0)); |
a46d6056 | 184 | } |
f5e706ad | 185 | |
7d9fa4aa DM |
186 | static inline int pgd_none(pgd_t pgd) |
187 | { | |
188 | return !(pgd_val(pgd) & 0xFFFFFFF); | |
189 | } | |
f5e706ad | 190 | |
7d9fa4aa DM |
191 | static inline int pgd_bad(pgd_t pgd) |
192 | { | |
193 | return (pgd_val(pgd) & SRMMU_ET_MASK) != SRMMU_ET_PTD; | |
194 | } | |
195 | ||
196 | static inline int pgd_present(pgd_t pgd) | |
197 | { | |
198 | return ((pgd_val(pgd) & SRMMU_ET_MASK) == SRMMU_ET_PTD); | |
199 | } | |
a46d6056 DM |
200 | |
201 | static inline void pgd_clear(pgd_t *pgdp) | |
202 | { | |
62875cff | 203 | set_pte((pte_t *)pgdp, __pte(0)); |
a46d6056 | 204 | } |
f5e706ad SR |
205 | |
206 | /* | |
207 | * The following only work if pte_present() is true. | |
208 | * Undefined behaviour if not.. | |
209 | */ | |
f5e706ad SR |
210 | static inline int pte_write(pte_t pte) |
211 | { | |
f755f77a | 212 | return pte_val(pte) & SRMMU_WRITE; |
f5e706ad SR |
213 | } |
214 | ||
f5e706ad SR |
215 | static inline int pte_dirty(pte_t pte) |
216 | { | |
f755f77a | 217 | return pte_val(pte) & SRMMU_DIRTY; |
f5e706ad SR |
218 | } |
219 | ||
f5e706ad SR |
220 | static inline int pte_young(pte_t pte) |
221 | { | |
f755f77a | 222 | return pte_val(pte) & SRMMU_REF; |
f5e706ad SR |
223 | } |
224 | ||
f5e706ad SR |
225 | static inline int pte_special(pte_t pte) |
226 | { | |
227 | return 0; | |
228 | } | |
229 | ||
f5e706ad SR |
230 | static inline pte_t pte_wrprotect(pte_t pte) |
231 | { | |
301d5bbb | 232 | return __pte(pte_val(pte) & ~SRMMU_WRITE); |
f5e706ad SR |
233 | } |
234 | ||
f5e706ad SR |
235 | static inline pte_t pte_mkclean(pte_t pte) |
236 | { | |
301d5bbb | 237 | return __pte(pte_val(pte) & ~SRMMU_DIRTY); |
f5e706ad SR |
238 | } |
239 | ||
f5e706ad SR |
240 | static inline pte_t pte_mkold(pte_t pte) |
241 | { | |
301d5bbb | 242 | return __pte(pte_val(pte) & ~SRMMU_REF); |
f5e706ad SR |
243 | } |
244 | ||
301d5bbb DM |
245 | static inline pte_t pte_mkwrite(pte_t pte) |
246 | { | |
247 | return __pte(pte_val(pte) | SRMMU_WRITE); | |
248 | } | |
f5e706ad | 249 | |
301d5bbb DM |
250 | static inline pte_t pte_mkdirty(pte_t pte) |
251 | { | |
252 | return __pte(pte_val(pte) | SRMMU_DIRTY); | |
253 | } | |
254 | ||
255 | static inline pte_t pte_mkyoung(pte_t pte) | |
256 | { | |
257 | return __pte(pte_val(pte) | SRMMU_REF); | |
258 | } | |
f5e706ad SR |
259 | |
260 | #define pte_mkspecial(pte) (pte) | |
261 | ||
262 | #define pfn_pte(pfn, prot) mk_pte(pfn_to_page(pfn), prot) | |
263 | ||
3d827367 DM |
264 | static inline unsigned long pte_pfn(pte_t pte) |
265 | { | |
266 | if (srmmu_device_memory(pte_val(pte))) { | |
267 | /* Just return something that will cause | |
268 | * pfn_valid() to return false. This makes | |
269 | * copy_one_pte() to just directly copy to | |
270 | * PTE over. | |
271 | */ | |
272 | return ~0UL; | |
273 | } | |
274 | return (pte_val(pte) & SRMMU_PTE_PMASK) >> (PAGE_SHIFT-4); | |
275 | } | |
276 | ||
f5e706ad SR |
277 | #define pte_page(pte) pfn_to_page(pte_pfn(pte)) |
278 | ||
279 | /* | |
280 | * Conversion functions: convert a page and protection to a page entry, | |
281 | * and a page entry and page directory to the page they refer to. | |
282 | */ | |
62875cff DM |
283 | static inline pte_t mk_pte(struct page *page, pgprot_t pgprot) |
284 | { | |
285 | return __pte((page_to_pfn(page) << (PAGE_SHIFT-4)) | pgprot_val(pgprot)); | |
286 | } | |
f5e706ad | 287 | |
62875cff DM |
288 | static inline pte_t mk_pte_phys(unsigned long page, pgprot_t pgprot) |
289 | { | |
290 | return __pte(((page) >> 4) | pgprot_val(pgprot)); | |
291 | } | |
f5e706ad | 292 | |
62875cff DM |
293 | static inline pte_t mk_pte_io(unsigned long page, pgprot_t pgprot, int space) |
294 | { | |
295 | return __pte(((page) >> 4) | (space << 28) | pgprot_val(pgprot)); | |
296 | } | |
f5e706ad | 297 | |
afaedde7 SR |
298 | #define pgprot_noncached pgprot_noncached |
299 | static inline pgprot_t pgprot_noncached(pgprot_t prot) | |
300 | { | |
6e6e4187 | 301 | pgprot_val(prot) &= ~pgprot_val(__pgprot(SRMMU_CACHE)); |
afaedde7 SR |
302 | return prot; |
303 | } | |
f5e706ad | 304 | |
f5e706ad SR |
305 | static pte_t pte_modify(pte_t pte, pgprot_t newprot) __attribute_const__; |
306 | static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) | |
307 | { | |
9701b264 | 308 | return __pte((pte_val(pte) & SRMMU_CHG_MASK) | |
f5e706ad SR |
309 | pgprot_val(newprot)); |
310 | } | |
311 | ||
312 | #define pgd_index(address) ((address) >> PGDIR_SHIFT) | |
313 | ||
314 | /* to find an entry in a page-table-directory */ | |
315 | #define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address)) | |
316 | ||
317 | /* to find an entry in a kernel page-table-directory */ | |
318 | #define pgd_offset_k(address) pgd_offset(&init_mm, address) | |
319 | ||
320 | /* Find an entry in the second-level page table.. */ | |
9701b264 SR |
321 | static inline pmd_t *pmd_offset(pgd_t * dir, unsigned long address) |
322 | { | |
323 | return (pmd_t *) pgd_page_vaddr(*dir) + | |
324 | ((address >> PMD_SHIFT) & (PTRS_PER_PMD - 1)); | |
325 | } | |
f5e706ad SR |
326 | |
327 | /* Find an entry in the third-level page table.. */ | |
9701b264 | 328 | pte_t *pte_offset_kernel(pmd_t * dir, unsigned long address); |
f5e706ad SR |
329 | |
330 | /* | |
ee906c9e | 331 | * This shortcut works on sun4m (and sun4d) because the nocache area is static. |
f5e706ad SR |
332 | */ |
333 | #define pte_offset_map(d, a) pte_offset_kernel(d,a) | |
f5e706ad | 334 | #define pte_unmap(pte) do{}while(0) |
f5e706ad | 335 | |
f5e706ad | 336 | struct seq_file; |
9701b264 | 337 | void mmu_info(struct seq_file *m); |
f5e706ad SR |
338 | |
339 | /* Fault handler stuff... */ | |
340 | #define FAULT_CODE_PROT 0x1 | |
341 | #define FAULT_CODE_WRITE 0x2 | |
342 | #define FAULT_CODE_USER 0x4 | |
343 | ||
f613914e | 344 | #define update_mmu_cache(vma, address, ptep) do { } while (0) |
f5e706ad | 345 | |
9701b264 SR |
346 | void srmmu_mapiorange(unsigned int bus, unsigned long xpa, |
347 | unsigned long xva, unsigned int len); | |
348 | void srmmu_unmapiorange(unsigned long virt_addr, unsigned int len); | |
f5e706ad | 349 | |
f5e706ad | 350 | /* Encode and de-code a swap entry */ |
9701b264 SR |
351 | static inline unsigned long __swp_type(swp_entry_t entry) |
352 | { | |
353 | return (entry.val >> SRMMU_SWP_TYPE_SHIFT) & SRMMU_SWP_TYPE_MASK; | |
354 | } | |
f5e706ad | 355 | |
9701b264 SR |
356 | static inline unsigned long __swp_offset(swp_entry_t entry) |
357 | { | |
358 | return (entry.val >> SRMMU_SWP_OFF_SHIFT) & SRMMU_SWP_OFF_MASK; | |
359 | } | |
360 | ||
361 | static inline swp_entry_t __swp_entry(unsigned long type, unsigned long offset) | |
362 | { | |
363 | return (swp_entry_t) { | |
364 | (type & SRMMU_SWP_TYPE_MASK) << SRMMU_SWP_TYPE_SHIFT | |
365 | | (offset & SRMMU_SWP_OFF_MASK) << SRMMU_SWP_OFF_SHIFT }; | |
366 | } | |
f5e706ad SR |
367 | |
368 | #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) }) | |
369 | #define __swp_entry_to_pte(x) ((pte_t) { (x).val }) | |
370 | ||
f5e706ad SR |
371 | static inline unsigned long |
372 | __get_phys (unsigned long addr) | |
373 | { | |
374 | switch (sparc_cpu_model){ | |
f5e706ad SR |
375 | case sun4m: |
376 | case sun4d: | |
377 | return ((srmmu_get_pte (addr) & 0xffffff00) << 4); | |
378 | default: | |
379 | return 0; | |
380 | } | |
381 | } | |
382 | ||
383 | static inline int | |
384 | __get_iospace (unsigned long addr) | |
385 | { | |
386 | switch (sparc_cpu_model){ | |
f5e706ad SR |
387 | case sun4m: |
388 | case sun4d: | |
389 | return (srmmu_get_pte (addr) >> 28); | |
390 | default: | |
391 | return -1; | |
392 | } | |
393 | } | |
394 | ||
395 | extern unsigned long *sparc_valid_addr_bitmap; | |
396 | ||
397 | /* Needs to be defined here and not in linux/mm.h, as it is arch dependent */ | |
398 | #define kern_addr_valid(addr) \ | |
399 | (test_bit(__pa((unsigned long)(addr))>>20, sparc_valid_addr_bitmap)) | |
400 | ||
f5e706ad SR |
401 | /* |
402 | * For sparc32&64, the pfn in io_remap_pfn_range() carries <iospace> in | |
403 | * its high 4 bits. These macros/functions put it there or get it from there. | |
404 | */ | |
405 | #define MK_IOSPACE_PFN(space, pfn) (pfn | (space << (BITS_PER_LONG - 4))) | |
406 | #define GET_IOSPACE(pfn) (pfn >> (BITS_PER_LONG - 4)) | |
407 | #define GET_PFN(pfn) (pfn & 0x0fffffffUL) | |
408 | ||
f05a6865 SR |
409 | int remap_pfn_range(struct vm_area_struct *, unsigned long, unsigned long, |
410 | unsigned long, pgprot_t); | |
3e37fd31 DM |
411 | |
412 | static inline int io_remap_pfn_range(struct vm_area_struct *vma, | |
413 | unsigned long from, unsigned long pfn, | |
414 | unsigned long size, pgprot_t prot) | |
415 | { | |
416 | unsigned long long offset, space, phys_base; | |
417 | ||
418 | offset = ((unsigned long long) GET_PFN(pfn)) << PAGE_SHIFT; | |
419 | space = GET_IOSPACE(pfn); | |
420 | phys_base = offset | (space << 32ULL); | |
421 | ||
422 | return remap_pfn_range(vma, from, phys_base >> PAGE_SHIFT, size, prot); | |
423 | } | |
40d158e6 | 424 | #define io_remap_pfn_range io_remap_pfn_range |
3e37fd31 | 425 | |
f5e706ad SR |
426 | #define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS |
427 | #define ptep_set_access_flags(__vma, __address, __ptep, __entry, __dirty) \ | |
428 | ({ \ | |
429 | int __changed = !pte_same(*(__ptep), __entry); \ | |
430 | if (__changed) { \ | |
431 | set_pte_at((__vma)->vm_mm, (__address), __ptep, __entry); \ | |
432 | flush_tlb_page(__vma, __address); \ | |
433 | } \ | |
1ee0e144 | 434 | __changed; \ |
f5e706ad SR |
435 | }) |
436 | ||
437 | #include <asm-generic/pgtable.h> | |
438 | ||
439 | #endif /* !(__ASSEMBLY__) */ | |
440 | ||
eb485d64 | 441 | #define VMALLOC_START _AC(0xfe600000,UL) |
eb485d64 | 442 | #define VMALLOC_END _AC(0xffc00000,UL) |
f5e706ad | 443 | |
f5e706ad SR |
444 | /* We provide our own get_unmapped_area to cope with VA holes for userland */ |
445 | #define HAVE_ARCH_UNMAPPED_AREA | |
446 | ||
447 | /* | |
448 | * No page table caches to initialise | |
449 | */ | |
450 | #define pgtable_cache_init() do { } while (0) | |
451 | ||
452 | #endif /* !(_SPARC_PGTABLE_H) */ |