]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - arch/powerpc/include/asm/book3s/32/pgtable.h
efi/arm: Fix boot crash with CONFIG_CPUMASK_OFFSTACK=y
[mirror_ubuntu-artful-kernel.git] / arch / powerpc / include / asm / book3s / 32 / pgtable.h
1 #ifndef _ASM_POWERPC_BOOK3S_32_PGTABLE_H
2 #define _ASM_POWERPC_BOOK3S_32_PGTABLE_H
3
4 #include <asm-generic/pgtable-nopmd.h>
5
6 #include <asm/book3s/32/hash.h>
7
8 /* And here we include common definitions */
9 #include <asm/pte-common.h>
10
11 #define PTE_INDEX_SIZE PTE_SHIFT
12 #define PMD_INDEX_SIZE 0
13 #define PUD_INDEX_SIZE 0
14 #define PGD_INDEX_SIZE (32 - PGDIR_SHIFT)
15
16 #define PMD_CACHE_INDEX PMD_INDEX_SIZE
17
18 #ifndef __ASSEMBLY__
19 #define PTE_TABLE_SIZE (sizeof(pte_t) << PTE_INDEX_SIZE)
20 #define PMD_TABLE_SIZE 0
21 #define PUD_TABLE_SIZE 0
22 #define PGD_TABLE_SIZE (sizeof(pgd_t) << PGD_INDEX_SIZE)
23 #endif /* __ASSEMBLY__ */
24
25 #define PTRS_PER_PTE (1 << PTE_INDEX_SIZE)
26 #define PTRS_PER_PGD (1 << PGD_INDEX_SIZE)
27
28 /*
29 * The normal case is that PTEs are 32-bits and we have a 1-page
30 * 1024-entry pgdir pointing to 1-page 1024-entry PTE pages. -- paulus
31 *
32 * For any >32-bit physical address platform, we can use the following
33 * two level page table layout where the pgdir is 8KB and the MS 13 bits
34 * are an index to the second level table. The combined pgdir/pmd first
35 * level has 2048 entries and the second level has 512 64-bit PTE entries.
36 * -Matt
37 */
38 /* PGDIR_SHIFT determines what a top-level page table entry can map */
39 #define PGDIR_SHIFT (PAGE_SHIFT + PTE_INDEX_SIZE)
40 #define PGDIR_SIZE (1UL << PGDIR_SHIFT)
41 #define PGDIR_MASK (~(PGDIR_SIZE-1))
42
43 #define USER_PTRS_PER_PGD (TASK_SIZE / PGDIR_SIZE)
44 /*
45 * This is the bottom of the PKMAP area with HIGHMEM or an arbitrary
46 * value (for now) on others, from where we can start layout kernel
47 * virtual space that goes below PKMAP and FIXMAP
48 */
49 #ifdef CONFIG_HIGHMEM
50 #define KVIRT_TOP PKMAP_BASE
51 #else
52 #define KVIRT_TOP (0xfe000000UL) /* for now, could be FIXMAP_BASE ? */
53 #endif
54
55 /*
56 * ioremap_bot starts at that address. Early ioremaps move down from there,
57 * until mem_init() at which point this becomes the top of the vmalloc
58 * and ioremap space
59 */
60 #ifdef CONFIG_NOT_COHERENT_CACHE
61 #define IOREMAP_TOP ((KVIRT_TOP - CONFIG_CONSISTENT_SIZE) & PAGE_MASK)
62 #else
63 #define IOREMAP_TOP KVIRT_TOP
64 #endif
65
66 /*
67 * Just any arbitrary offset to the start of the vmalloc VM area: the
68 * current 16MB value just means that there will be a 64MB "hole" after the
69 * physical memory until the kernel virtual memory starts. That means that
70 * any out-of-bounds memory accesses will hopefully be caught.
71 * The vmalloc() routines leaves a hole of 4kB between each vmalloced
72 * area for the same reason. ;)
73 *
74 * We no longer map larger than phys RAM with the BATs so we don't have
75 * to worry about the VMALLOC_OFFSET causing problems. We do have to worry
76 * about clashes between our early calls to ioremap() that start growing down
77 * from ioremap_base being run into the VM area allocations (growing upwards
78 * from VMALLOC_START). For this reason we have ioremap_bot to check when
79 * we actually run into our mappings setup in the early boot with the VM
80 * system. This really does become a problem for machines with good amounts
81 * of RAM. -- Cort
82 */
83 #define VMALLOC_OFFSET (0x1000000) /* 16M */
84 #ifdef PPC_PIN_SIZE
85 #define VMALLOC_START (((_ALIGN((long)high_memory, PPC_PIN_SIZE) + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1)))
86 #else
87 #define VMALLOC_START ((((long)high_memory + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1)))
88 #endif
89 #define VMALLOC_END ioremap_bot
90
91 #ifndef __ASSEMBLY__
92 #include <linux/sched.h>
93 #include <linux/threads.h>
94 #include <asm/io.h> /* For sub-arch specific PPC_PIN_SIZE */
95
96 extern unsigned long ioremap_bot;
97
98 /* Bits to mask out from a PGD to get to the PUD page */
99 #define PGD_MASKED_BITS 0
100
101 #define pte_ERROR(e) \
102 pr_err("%s:%d: bad pte %llx.\n", __FILE__, __LINE__, \
103 (unsigned long long)pte_val(e))
104 #define pgd_ERROR(e) \
105 pr_err("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, pgd_val(e))
106 /*
107 * Bits in a linux-style PTE. These match the bits in the
108 * (hardware-defined) PowerPC PTE as closely as possible.
109 */
110
111 #define pte_clear(mm, addr, ptep) \
112 do { pte_update(ptep, ~_PAGE_HASHPTE, 0); } while (0)
113
114 #define pmd_none(pmd) (!pmd_val(pmd))
115 #define pmd_bad(pmd) (pmd_val(pmd) & _PMD_BAD)
116 #define pmd_present(pmd) (pmd_val(pmd) & _PMD_PRESENT_MASK)
117 static inline void pmd_clear(pmd_t *pmdp)
118 {
119 *pmdp = __pmd(0);
120 }
121
122
123 /*
124 * When flushing the tlb entry for a page, we also need to flush the hash
125 * table entry. flush_hash_pages is assembler (for speed) in hashtable.S.
126 */
127 extern int flush_hash_pages(unsigned context, unsigned long va,
128 unsigned long pmdval, int count);
129
130 /* Add an HPTE to the hash table */
131 extern void add_hash_page(unsigned context, unsigned long va,
132 unsigned long pmdval);
133
134 /* Flush an entry from the TLB/hash table */
135 extern void flush_hash_entry(struct mm_struct *mm, pte_t *ptep,
136 unsigned long address);
137
138 /*
139 * PTE updates. This function is called whenever an existing
140 * valid PTE is updated. This does -not- include set_pte_at()
141 * which nowadays only sets a new PTE.
142 *
143 * Depending on the type of MMU, we may need to use atomic updates
144 * and the PTE may be either 32 or 64 bit wide. In the later case,
145 * when using atomic updates, only the low part of the PTE is
146 * accessed atomically.
147 *
148 * In addition, on 44x, we also maintain a global flag indicating
149 * that an executable user mapping was modified, which is needed
150 * to properly flush the virtually tagged instruction cache of
151 * those implementations.
152 */
153 #ifndef CONFIG_PTE_64BIT
154 static inline unsigned long pte_update(pte_t *p,
155 unsigned long clr,
156 unsigned long set)
157 {
158 unsigned long old, tmp;
159
160 __asm__ __volatile__("\
161 1: lwarx %0,0,%3\n\
162 andc %1,%0,%4\n\
163 or %1,%1,%5\n"
164 PPC405_ERR77(0,%3)
165 " stwcx. %1,0,%3\n\
166 bne- 1b"
167 : "=&r" (old), "=&r" (tmp), "=m" (*p)
168 : "r" (p), "r" (clr), "r" (set), "m" (*p)
169 : "cc" );
170
171 return old;
172 }
173 #else /* CONFIG_PTE_64BIT */
174 static inline unsigned long long pte_update(pte_t *p,
175 unsigned long clr,
176 unsigned long set)
177 {
178 unsigned long long old;
179 unsigned long tmp;
180
181 __asm__ __volatile__("\
182 1: lwarx %L0,0,%4\n\
183 lwzx %0,0,%3\n\
184 andc %1,%L0,%5\n\
185 or %1,%1,%6\n"
186 PPC405_ERR77(0,%3)
187 " stwcx. %1,0,%4\n\
188 bne- 1b"
189 : "=&r" (old), "=&r" (tmp), "=m" (*p)
190 : "r" (p), "r" ((unsigned long)(p) + 4), "r" (clr), "r" (set), "m" (*p)
191 : "cc" );
192
193 return old;
194 }
195 #endif /* CONFIG_PTE_64BIT */
196
197 /*
198 * 2.6 calls this without flushing the TLB entry; this is wrong
199 * for our hash-based implementation, we fix that up here.
200 */
201 #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
202 static inline int __ptep_test_and_clear_young(unsigned int context, unsigned long addr, pte_t *ptep)
203 {
204 unsigned long old;
205 old = pte_update(ptep, _PAGE_ACCESSED, 0);
206 if (old & _PAGE_HASHPTE) {
207 unsigned long ptephys = __pa(ptep) & PAGE_MASK;
208 flush_hash_pages(context, addr, ptephys, 1);
209 }
210 return (old & _PAGE_ACCESSED) != 0;
211 }
212 #define ptep_test_and_clear_young(__vma, __addr, __ptep) \
213 __ptep_test_and_clear_young((__vma)->vm_mm->context.id, __addr, __ptep)
214
215 #define __HAVE_ARCH_PTEP_GET_AND_CLEAR
216 static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
217 pte_t *ptep)
218 {
219 return __pte(pte_update(ptep, ~_PAGE_HASHPTE, 0));
220 }
221
222 #define __HAVE_ARCH_PTEP_SET_WRPROTECT
223 static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr,
224 pte_t *ptep)
225 {
226 pte_update(ptep, (_PAGE_RW | _PAGE_HWWRITE), _PAGE_RO);
227 }
228 static inline void huge_ptep_set_wrprotect(struct mm_struct *mm,
229 unsigned long addr, pte_t *ptep)
230 {
231 ptep_set_wrprotect(mm, addr, ptep);
232 }
233
234
235 static inline void __ptep_set_access_flags(struct mm_struct *mm,
236 pte_t *ptep, pte_t entry,
237 unsigned long address)
238 {
239 unsigned long set = pte_val(entry) &
240 (_PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_RW | _PAGE_EXEC);
241 unsigned long clr = ~pte_val(entry) & _PAGE_RO;
242
243 pte_update(ptep, clr, set);
244 }
245
246 #define __HAVE_ARCH_PTE_SAME
247 #define pte_same(A,B) (((pte_val(A) ^ pte_val(B)) & ~_PAGE_HASHPTE) == 0)
248
249 /*
250 * Note that on Book E processors, the pmd contains the kernel virtual
251 * (lowmem) address of the pte page. The physical address is less useful
252 * because everything runs with translation enabled (even the TLB miss
253 * handler). On everything else the pmd contains the physical address
254 * of the pte page. -- paulus
255 */
256 #ifndef CONFIG_BOOKE
257 #define pmd_page_vaddr(pmd) \
258 ((unsigned long) __va(pmd_val(pmd) & PAGE_MASK))
259 #define pmd_page(pmd) \
260 pfn_to_page(pmd_val(pmd) >> PAGE_SHIFT)
261 #else
262 #define pmd_page_vaddr(pmd) \
263 ((unsigned long) (pmd_val(pmd) & PAGE_MASK))
264 #define pmd_page(pmd) \
265 pfn_to_page((__pa(pmd_val(pmd)) >> PAGE_SHIFT))
266 #endif
267
268 /* to find an entry in a kernel page-table-directory */
269 #define pgd_offset_k(address) pgd_offset(&init_mm, address)
270
271 /* to find an entry in a page-table-directory */
272 #define pgd_index(address) ((address) >> PGDIR_SHIFT)
273 #define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
274
275 /* Find an entry in the third-level page table.. */
276 #define pte_index(address) \
277 (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
278 #define pte_offset_kernel(dir, addr) \
279 ((pte_t *) pmd_page_vaddr(*(dir)) + pte_index(addr))
280 #define pte_offset_map(dir, addr) \
281 ((pte_t *) kmap_atomic(pmd_page(*(dir))) + pte_index(addr))
282 #define pte_unmap(pte) kunmap_atomic(pte)
283
284 /*
285 * Encode and decode a swap entry.
286 * Note that the bits we use in a PTE for representing a swap entry
287 * must not include the _PAGE_PRESENT bit or the _PAGE_HASHPTE bit (if used).
288 * -- paulus
289 */
290 #define __swp_type(entry) ((entry).val & 0x1f)
291 #define __swp_offset(entry) ((entry).val >> 5)
292 #define __swp_entry(type, offset) ((swp_entry_t) { (type) | ((offset) << 5) })
293 #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) >> 3 })
294 #define __swp_entry_to_pte(x) ((pte_t) { (x).val << 3 })
295
296 extern int get_pteptr(struct mm_struct *mm, unsigned long addr, pte_t **ptep,
297 pmd_t **pmdp);
298
299 /* Generic accessors to PTE bits */
300 static inline int pte_write(pte_t pte) { return !!(pte_val(pte) & _PAGE_RW);}
301 static inline int pte_dirty(pte_t pte) { return !!(pte_val(pte) & _PAGE_DIRTY); }
302 static inline int pte_young(pte_t pte) { return !!(pte_val(pte) & _PAGE_ACCESSED); }
303 static inline int pte_special(pte_t pte) { return !!(pte_val(pte) & _PAGE_SPECIAL); }
304 static inline int pte_none(pte_t pte) { return (pte_val(pte) & ~_PTE_NONE_MASK) == 0; }
305 static inline pgprot_t pte_pgprot(pte_t pte) { return __pgprot(pte_val(pte) & PAGE_PROT_BITS); }
306
307 static inline int pte_present(pte_t pte)
308 {
309 return pte_val(pte) & _PAGE_PRESENT;
310 }
311
312 /* Conversion functions: convert a page and protection to a page entry,
313 * and a page entry and page directory to the page they refer to.
314 *
315 * Even if PTEs can be unsigned long long, a PFN is always an unsigned
316 * long for now.
317 */
318 static inline pte_t pfn_pte(unsigned long pfn, pgprot_t pgprot)
319 {
320 return __pte(((pte_basic_t)(pfn) << PTE_RPN_SHIFT) |
321 pgprot_val(pgprot));
322 }
323
324 static inline unsigned long pte_pfn(pte_t pte)
325 {
326 return pte_val(pte) >> PTE_RPN_SHIFT;
327 }
328
329 /* Generic modifiers for PTE bits */
330 static inline pte_t pte_wrprotect(pte_t pte)
331 {
332 return __pte(pte_val(pte) & ~_PAGE_RW);
333 }
334
335 static inline pte_t pte_mkclean(pte_t pte)
336 {
337 return __pte(pte_val(pte) & ~_PAGE_DIRTY);
338 }
339
340 static inline pte_t pte_mkold(pte_t pte)
341 {
342 return __pte(pte_val(pte) & ~_PAGE_ACCESSED);
343 }
344
345 static inline pte_t pte_mkwrite(pte_t pte)
346 {
347 return __pte(pte_val(pte) | _PAGE_RW);
348 }
349
350 static inline pte_t pte_mkdirty(pte_t pte)
351 {
352 return __pte(pte_val(pte) | _PAGE_DIRTY);
353 }
354
355 static inline pte_t pte_mkyoung(pte_t pte)
356 {
357 return __pte(pte_val(pte) | _PAGE_ACCESSED);
358 }
359
360 static inline pte_t pte_mkspecial(pte_t pte)
361 {
362 return __pte(pte_val(pte) | _PAGE_SPECIAL);
363 }
364
365 static inline pte_t pte_mkhuge(pte_t pte)
366 {
367 return pte;
368 }
369
370 static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
371 {
372 return __pte((pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot));
373 }
374
375
376
377 /* This low level function performs the actual PTE insertion
378 * Setting the PTE depends on the MMU type and other factors. It's
379 * an horrible mess that I'm not going to try to clean up now but
380 * I'm keeping it in one place rather than spread around
381 */
382 static inline void __set_pte_at(struct mm_struct *mm, unsigned long addr,
383 pte_t *ptep, pte_t pte, int percpu)
384 {
385 #if defined(CONFIG_PPC_STD_MMU_32) && defined(CONFIG_SMP) && !defined(CONFIG_PTE_64BIT)
386 /* First case is 32-bit Hash MMU in SMP mode with 32-bit PTEs. We use the
387 * helper pte_update() which does an atomic update. We need to do that
388 * because a concurrent invalidation can clear _PAGE_HASHPTE. If it's a
389 * per-CPU PTE such as a kmap_atomic, we do a simple update preserving
390 * the hash bits instead (ie, same as the non-SMP case)
391 */
392 if (percpu)
393 *ptep = __pte((pte_val(*ptep) & _PAGE_HASHPTE)
394 | (pte_val(pte) & ~_PAGE_HASHPTE));
395 else
396 pte_update(ptep, ~_PAGE_HASHPTE, pte_val(pte));
397
398 #elif defined(CONFIG_PPC32) && defined(CONFIG_PTE_64BIT)
399 /* Second case is 32-bit with 64-bit PTE. In this case, we
400 * can just store as long as we do the two halves in the right order
401 * with a barrier in between. This is possible because we take care,
402 * in the hash code, to pre-invalidate if the PTE was already hashed,
403 * which synchronizes us with any concurrent invalidation.
404 * In the percpu case, we also fallback to the simple update preserving
405 * the hash bits
406 */
407 if (percpu) {
408 *ptep = __pte((pte_val(*ptep) & _PAGE_HASHPTE)
409 | (pte_val(pte) & ~_PAGE_HASHPTE));
410 return;
411 }
412 if (pte_val(*ptep) & _PAGE_HASHPTE)
413 flush_hash_entry(mm, ptep, addr);
414 __asm__ __volatile__("\
415 stw%U0%X0 %2,%0\n\
416 eieio\n\
417 stw%U0%X0 %L2,%1"
418 : "=m" (*ptep), "=m" (*((unsigned char *)ptep+4))
419 : "r" (pte) : "memory");
420
421 #elif defined(CONFIG_PPC_STD_MMU_32)
422 /* Third case is 32-bit hash table in UP mode, we need to preserve
423 * the _PAGE_HASHPTE bit since we may not have invalidated the previous
424 * translation in the hash yet (done in a subsequent flush_tlb_xxx())
425 * and see we need to keep track that this PTE needs invalidating
426 */
427 *ptep = __pte((pte_val(*ptep) & _PAGE_HASHPTE)
428 | (pte_val(pte) & ~_PAGE_HASHPTE));
429
430 #else
431 #error "Not supported "
432 #endif
433 }
434
435 /*
436 * Macro to mark a page protection value as "uncacheable".
437 */
438
439 #define _PAGE_CACHE_CTL (_PAGE_COHERENT | _PAGE_GUARDED | _PAGE_NO_CACHE | \
440 _PAGE_WRITETHRU)
441
442 #define pgprot_noncached pgprot_noncached
443 static inline pgprot_t pgprot_noncached(pgprot_t prot)
444 {
445 return __pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) |
446 _PAGE_NO_CACHE | _PAGE_GUARDED);
447 }
448
449 #define pgprot_noncached_wc pgprot_noncached_wc
450 static inline pgprot_t pgprot_noncached_wc(pgprot_t prot)
451 {
452 return __pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) |
453 _PAGE_NO_CACHE);
454 }
455
456 #define pgprot_cached pgprot_cached
457 static inline pgprot_t pgprot_cached(pgprot_t prot)
458 {
459 return __pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) |
460 _PAGE_COHERENT);
461 }
462
463 #define pgprot_cached_wthru pgprot_cached_wthru
464 static inline pgprot_t pgprot_cached_wthru(pgprot_t prot)
465 {
466 return __pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) |
467 _PAGE_COHERENT | _PAGE_WRITETHRU);
468 }
469
470 #define pgprot_cached_noncoherent pgprot_cached_noncoherent
471 static inline pgprot_t pgprot_cached_noncoherent(pgprot_t prot)
472 {
473 return __pgprot(pgprot_val(prot) & ~_PAGE_CACHE_CTL);
474 }
475
476 #define pgprot_writecombine pgprot_writecombine
477 static inline pgprot_t pgprot_writecombine(pgprot_t prot)
478 {
479 return pgprot_noncached_wc(prot);
480 }
481
482 #endif /* !__ASSEMBLY__ */
483
484 #endif /* _ASM_POWERPC_BOOK3S_32_PGTABLE_H */