]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - arch/arm64/include/asm/pgtable.h
arm64: mm: Map entry trampoline into trampoline and kernel page tables
[mirror_ubuntu-artful-kernel.git] / arch / arm64 / include / asm / pgtable.h
1 /*
2 * Copyright (C) 2012 ARM Ltd.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program. If not, see <http://www.gnu.org/licenses/>.
15 */
16 #ifndef __ASM_PGTABLE_H
17 #define __ASM_PGTABLE_H
18
19 #include <asm/bug.h>
20 #include <asm/proc-fns.h>
21
22 #include <asm/memory.h>
23 #include <asm/pgtable-hwdef.h>
24 #include <asm/pgtable-prot.h>
25
26 /*
27 * VMALLOC range.
28 *
29 * VMALLOC_START: beginning of the kernel vmalloc space
30 * VMALLOC_END: extends to the available space below vmmemmap, PCI I/O space
31 * and fixed mappings
32 */
33 #define VMALLOC_START (MODULES_END)
34 #define VMALLOC_END (PAGE_OFFSET - PUD_SIZE - VMEMMAP_SIZE - SZ_64K)
35
36 #define vmemmap ((struct page *)VMEMMAP_START - (memstart_addr >> PAGE_SHIFT))
37
38 #define FIRST_USER_ADDRESS 0UL
39
40 #ifndef __ASSEMBLY__
41
42 #include <asm/cmpxchg.h>
43 #include <asm/fixmap.h>
44 #include <linux/mmdebug.h>
45
46 extern void __pte_error(const char *file, int line, unsigned long val);
47 extern void __pmd_error(const char *file, int line, unsigned long val);
48 extern void __pud_error(const char *file, int line, unsigned long val);
49 extern void __pgd_error(const char *file, int line, unsigned long val);
50
51 /*
52 * ZERO_PAGE is a global shared page that is always zero: used
53 * for zero-mapped memory areas etc..
54 */
55 extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)];
56 #define ZERO_PAGE(vaddr) phys_to_page(__pa_symbol(empty_zero_page))
57
58 #define pte_ERROR(pte) __pte_error(__FILE__, __LINE__, pte_val(pte))
59
60 #define pte_pfn(pte) ((pte_val(pte) & PHYS_MASK) >> PAGE_SHIFT)
61
62 #define pfn_pte(pfn,prot) (__pte(((phys_addr_t)(pfn) << PAGE_SHIFT) | pgprot_val(prot)))
63
64 #define pte_none(pte) (!pte_val(pte))
65 #define pte_clear(mm,addr,ptep) set_pte(ptep, __pte(0))
66 #define pte_page(pte) (pfn_to_page(pte_pfn(pte)))
67
68 /*
69 * The following only work if pte_present(). Undefined behaviour otherwise.
70 */
71 #define pte_present(pte) (!!(pte_val(pte) & (PTE_VALID | PTE_PROT_NONE)))
72 #define pte_young(pte) (!!(pte_val(pte) & PTE_AF))
73 #define pte_special(pte) (!!(pte_val(pte) & PTE_SPECIAL))
74 #define pte_write(pte) (!!(pte_val(pte) & PTE_WRITE))
75 #define pte_user_exec(pte) (!(pte_val(pte) & PTE_UXN))
76 #define pte_cont(pte) (!!(pte_val(pte) & PTE_CONT))
77
78 #define pte_cont_addr_end(addr, end) \
79 ({ unsigned long __boundary = ((addr) + CONT_PTE_SIZE) & CONT_PTE_MASK; \
80 (__boundary - 1 < (end) - 1) ? __boundary : (end); \
81 })
82
83 #define pmd_cont_addr_end(addr, end) \
84 ({ unsigned long __boundary = ((addr) + CONT_PMD_SIZE) & CONT_PMD_MASK; \
85 (__boundary - 1 < (end) - 1) ? __boundary : (end); \
86 })
87
88 #define pte_hw_dirty(pte) (pte_write(pte) && !(pte_val(pte) & PTE_RDONLY))
89 #define pte_sw_dirty(pte) (!!(pte_val(pte) & PTE_DIRTY))
90 #define pte_dirty(pte) (pte_sw_dirty(pte) || pte_hw_dirty(pte))
91
92 #define pte_valid(pte) (!!(pte_val(pte) & PTE_VALID))
93 /*
94 * Execute-only user mappings do not have the PTE_USER bit set. All valid
95 * kernel mappings have the PTE_UXN bit set.
96 */
97 #define pte_valid_not_user(pte) \
98 ((pte_val(pte) & (PTE_VALID | PTE_USER | PTE_UXN)) == (PTE_VALID | PTE_UXN))
99 #define pte_valid_young(pte) \
100 ((pte_val(pte) & (PTE_VALID | PTE_AF)) == (PTE_VALID | PTE_AF))
101
102 /*
103 * Could the pte be present in the TLB? We must check mm_tlb_flush_pending
104 * so that we don't erroneously return false for pages that have been
105 * remapped as PROT_NONE but are yet to be flushed from the TLB.
106 */
107 #define pte_accessible(mm, pte) \
108 (mm_tlb_flush_pending(mm) ? pte_present(pte) : pte_valid_young(pte))
109
110 static inline pte_t clear_pte_bit(pte_t pte, pgprot_t prot)
111 {
112 pte_val(pte) &= ~pgprot_val(prot);
113 return pte;
114 }
115
116 static inline pte_t set_pte_bit(pte_t pte, pgprot_t prot)
117 {
118 pte_val(pte) |= pgprot_val(prot);
119 return pte;
120 }
121
122 static inline pte_t pte_wrprotect(pte_t pte)
123 {
124 pte = clear_pte_bit(pte, __pgprot(PTE_WRITE));
125 pte = set_pte_bit(pte, __pgprot(PTE_RDONLY));
126 return pte;
127 }
128
129 static inline pte_t pte_mkwrite(pte_t pte)
130 {
131 pte = set_pte_bit(pte, __pgprot(PTE_WRITE));
132 pte = clear_pte_bit(pte, __pgprot(PTE_RDONLY));
133 return pte;
134 }
135
136 static inline pte_t pte_mkclean(pte_t pte)
137 {
138 return clear_pte_bit(pte, __pgprot(PTE_DIRTY));
139 }
140
141 static inline pte_t pte_mkdirty(pte_t pte)
142 {
143 return set_pte_bit(pte, __pgprot(PTE_DIRTY));
144 }
145
146 static inline pte_t pte_mkold(pte_t pte)
147 {
148 return clear_pte_bit(pte, __pgprot(PTE_AF));
149 }
150
151 static inline pte_t pte_mkyoung(pte_t pte)
152 {
153 return set_pte_bit(pte, __pgprot(PTE_AF));
154 }
155
156 static inline pte_t pte_mkspecial(pte_t pte)
157 {
158 return set_pte_bit(pte, __pgprot(PTE_SPECIAL));
159 }
160
161 static inline pte_t pte_mkcont(pte_t pte)
162 {
163 pte = set_pte_bit(pte, __pgprot(PTE_CONT));
164 return set_pte_bit(pte, __pgprot(PTE_TYPE_PAGE));
165 }
166
167 static inline pte_t pte_mknoncont(pte_t pte)
168 {
169 return clear_pte_bit(pte, __pgprot(PTE_CONT));
170 }
171
172 static inline pte_t pte_mkpresent(pte_t pte)
173 {
174 return set_pte_bit(pte, __pgprot(PTE_VALID));
175 }
176
177 static inline pmd_t pmd_mkcont(pmd_t pmd)
178 {
179 return __pmd(pmd_val(pmd) | PMD_SECT_CONT);
180 }
181
182 static inline void set_pte(pte_t *ptep, pte_t pte)
183 {
184 *ptep = pte;
185
186 /*
187 * Only if the new pte is valid and kernel, otherwise TLB maintenance
188 * or update_mmu_cache() have the necessary barriers.
189 */
190 if (pte_valid_not_user(pte)) {
191 dsb(ishst);
192 isb();
193 }
194 }
195
196 struct mm_struct;
197 struct vm_area_struct;
198
199 extern void __sync_icache_dcache(pte_t pteval, unsigned long addr);
200
201 /*
202 * PTE bits configuration in the presence of hardware Dirty Bit Management
203 * (PTE_WRITE == PTE_DBM):
204 *
205 * Dirty Writable | PTE_RDONLY PTE_WRITE PTE_DIRTY (sw)
206 * 0 0 | 1 0 0
207 * 0 1 | 1 1 0
208 * 1 0 | 1 0 1
209 * 1 1 | 0 1 x
210 *
211 * When hardware DBM is not present, the sofware PTE_DIRTY bit is updated via
212 * the page fault mechanism. Checking the dirty status of a pte becomes:
213 *
214 * PTE_DIRTY || (PTE_WRITE && !PTE_RDONLY)
215 */
216 static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
217 pte_t *ptep, pte_t pte)
218 {
219 if (pte_present(pte) && pte_user_exec(pte) && !pte_special(pte))
220 __sync_icache_dcache(pte, addr);
221
222 /*
223 * If the existing pte is valid, check for potential race with
224 * hardware updates of the pte (ptep_set_access_flags safely changes
225 * valid ptes without going through an invalid entry).
226 */
227 if (pte_valid(*ptep) && pte_valid(pte)) {
228 VM_WARN_ONCE(!pte_young(pte),
229 "%s: racy access flag clearing: 0x%016llx -> 0x%016llx",
230 __func__, pte_val(*ptep), pte_val(pte));
231 VM_WARN_ONCE(pte_write(*ptep) && !pte_dirty(pte),
232 "%s: racy dirty state clearing: 0x%016llx -> 0x%016llx",
233 __func__, pte_val(*ptep), pte_val(pte));
234 }
235
236 set_pte(ptep, pte);
237 }
238
239 #define __HAVE_ARCH_PTE_SAME
240 static inline int pte_same(pte_t pte_a, pte_t pte_b)
241 {
242 pteval_t lhs, rhs;
243
244 lhs = pte_val(pte_a);
245 rhs = pte_val(pte_b);
246
247 if (pte_present(pte_a))
248 lhs &= ~PTE_RDONLY;
249
250 if (pte_present(pte_b))
251 rhs &= ~PTE_RDONLY;
252
253 return (lhs == rhs);
254 }
255
256 /*
257 * Huge pte definitions.
258 */
259 #define pte_huge(pte) (!(pte_val(pte) & PTE_TABLE_BIT))
260 #define pte_mkhuge(pte) (__pte(pte_val(pte) & ~PTE_TABLE_BIT))
261
262 /*
263 * Hugetlb definitions.
264 */
265 #define HUGE_MAX_HSTATE 4
266 #define HPAGE_SHIFT PMD_SHIFT
267 #define HPAGE_SIZE (_AC(1, UL) << HPAGE_SHIFT)
268 #define HPAGE_MASK (~(HPAGE_SIZE - 1))
269 #define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT)
270
271 #define __HAVE_ARCH_PTE_SPECIAL
272
273 static inline pte_t pud_pte(pud_t pud)
274 {
275 return __pte(pud_val(pud));
276 }
277
278 static inline pmd_t pud_pmd(pud_t pud)
279 {
280 return __pmd(pud_val(pud));
281 }
282
283 static inline pte_t pmd_pte(pmd_t pmd)
284 {
285 return __pte(pmd_val(pmd));
286 }
287
288 static inline pmd_t pte_pmd(pte_t pte)
289 {
290 return __pmd(pte_val(pte));
291 }
292
293 static inline pgprot_t mk_sect_prot(pgprot_t prot)
294 {
295 return __pgprot(pgprot_val(prot) & ~PTE_TABLE_BIT);
296 }
297
298 #ifdef CONFIG_NUMA_BALANCING
299 /*
300 * See the comment in include/asm-generic/pgtable.h
301 */
302 static inline int pte_protnone(pte_t pte)
303 {
304 return (pte_val(pte) & (PTE_VALID | PTE_PROT_NONE)) == PTE_PROT_NONE;
305 }
306
307 static inline int pmd_protnone(pmd_t pmd)
308 {
309 return pte_protnone(pmd_pte(pmd));
310 }
311 #endif
312
313 /*
314 * THP definitions.
315 */
316
317 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
318 #define pmd_trans_huge(pmd) (pmd_val(pmd) && !(pmd_val(pmd) & PMD_TABLE_BIT))
319 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
320
321 #define pmd_present(pmd) pte_present(pmd_pte(pmd))
322 #define pmd_dirty(pmd) pte_dirty(pmd_pte(pmd))
323 #define pmd_young(pmd) pte_young(pmd_pte(pmd))
324 #define pmd_wrprotect(pmd) pte_pmd(pte_wrprotect(pmd_pte(pmd)))
325 #define pmd_mkold(pmd) pte_pmd(pte_mkold(pmd_pte(pmd)))
326 #define pmd_mkwrite(pmd) pte_pmd(pte_mkwrite(pmd_pte(pmd)))
327 #define pmd_mkclean(pmd) pte_pmd(pte_mkclean(pmd_pte(pmd)))
328 #define pmd_mkdirty(pmd) pte_pmd(pte_mkdirty(pmd_pte(pmd)))
329 #define pmd_mkyoung(pmd) pte_pmd(pte_mkyoung(pmd_pte(pmd)))
330 #define pmd_mknotpresent(pmd) (__pmd(pmd_val(pmd) & ~PMD_SECT_VALID))
331
332 #define pmd_thp_or_huge(pmd) (pmd_huge(pmd) || pmd_trans_huge(pmd))
333
334 #define __HAVE_ARCH_PMD_WRITE
335 #define pmd_write(pmd) pte_write(pmd_pte(pmd))
336
337 #define pmd_mkhuge(pmd) (__pmd(pmd_val(pmd) & ~PMD_TABLE_BIT))
338
339 #define pmd_pfn(pmd) (((pmd_val(pmd) & PMD_MASK) & PHYS_MASK) >> PAGE_SHIFT)
340 #define pfn_pmd(pfn,prot) (__pmd(((phys_addr_t)(pfn) << PAGE_SHIFT) | pgprot_val(prot)))
341 #define mk_pmd(page,prot) pfn_pmd(page_to_pfn(page),prot)
342
343 #define pud_write(pud) pte_write(pud_pte(pud))
344 #define pud_pfn(pud) (((pud_val(pud) & PUD_MASK) & PHYS_MASK) >> PAGE_SHIFT)
345
346 #define set_pmd_at(mm, addr, pmdp, pmd) set_pte_at(mm, addr, (pte_t *)pmdp, pmd_pte(pmd))
347
348 #define __pgprot_modify(prot,mask,bits) \
349 __pgprot((pgprot_val(prot) & ~(mask)) | (bits))
350
351 /*
352 * Mark the prot value as uncacheable and unbufferable.
353 */
354 #define pgprot_noncached(prot) \
355 __pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_DEVICE_nGnRnE) | PTE_PXN | PTE_UXN)
356 #define pgprot_writecombine(prot) \
357 __pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_NORMAL_NC) | PTE_PXN | PTE_UXN)
358 #define pgprot_device(prot) \
359 __pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_DEVICE_nGnRE) | PTE_PXN | PTE_UXN)
360 #define __HAVE_PHYS_MEM_ACCESS_PROT
361 struct file;
362 extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
363 unsigned long size, pgprot_t vma_prot);
364
365 #define pmd_none(pmd) (!pmd_val(pmd))
366
367 #define pmd_bad(pmd) (!(pmd_val(pmd) & PMD_TABLE_BIT))
368
369 #define pmd_table(pmd) ((pmd_val(pmd) & PMD_TYPE_MASK) == \
370 PMD_TYPE_TABLE)
371 #define pmd_sect(pmd) ((pmd_val(pmd) & PMD_TYPE_MASK) == \
372 PMD_TYPE_SECT)
373
374 #if defined(CONFIG_ARM64_64K_PAGES) || CONFIG_PGTABLE_LEVELS < 3
375 #define pud_sect(pud) (0)
376 #define pud_table(pud) (1)
377 #else
378 #define pud_sect(pud) ((pud_val(pud) & PUD_TYPE_MASK) == \
379 PUD_TYPE_SECT)
380 #define pud_table(pud) ((pud_val(pud) & PUD_TYPE_MASK) == \
381 PUD_TYPE_TABLE)
382 #endif
383
384 static inline void set_pmd(pmd_t *pmdp, pmd_t pmd)
385 {
386 *pmdp = pmd;
387 dsb(ishst);
388 isb();
389 }
390
391 static inline void pmd_clear(pmd_t *pmdp)
392 {
393 set_pmd(pmdp, __pmd(0));
394 }
395
396 static inline phys_addr_t pmd_page_paddr(pmd_t pmd)
397 {
398 return pmd_val(pmd) & PHYS_MASK & (s32)PAGE_MASK;
399 }
400
401 /* Find an entry in the third-level page table. */
402 #define pte_index(addr) (((addr) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
403
404 #define pte_offset_phys(dir,addr) (pmd_page_paddr(READ_ONCE(*(dir))) + pte_index(addr) * sizeof(pte_t))
405 #define pte_offset_kernel(dir,addr) ((pte_t *)__va(pte_offset_phys((dir), (addr))))
406
407 #define pte_offset_map(dir,addr) pte_offset_kernel((dir), (addr))
408 #define pte_offset_map_nested(dir,addr) pte_offset_kernel((dir), (addr))
409 #define pte_unmap(pte) do { } while (0)
410 #define pte_unmap_nested(pte) do { } while (0)
411
412 #define pte_set_fixmap(addr) ((pte_t *)set_fixmap_offset(FIX_PTE, addr))
413 #define pte_set_fixmap_offset(pmd, addr) pte_set_fixmap(pte_offset_phys(pmd, addr))
414 #define pte_clear_fixmap() clear_fixmap(FIX_PTE)
415
416 #define pmd_page(pmd) pfn_to_page(__phys_to_pfn(pmd_val(pmd) & PHYS_MASK))
417
418 /* use ONLY for statically allocated translation tables */
419 #define pte_offset_kimg(dir,addr) ((pte_t *)__phys_to_kimg(pte_offset_phys((dir), (addr))))
420
421 /*
422 * Conversion functions: convert a page and protection to a page entry,
423 * and a page entry and page directory to the page they refer to.
424 */
425 #define mk_pte(page,prot) pfn_pte(page_to_pfn(page),prot)
426
427 #if CONFIG_PGTABLE_LEVELS > 2
428
429 #define pmd_ERROR(pmd) __pmd_error(__FILE__, __LINE__, pmd_val(pmd))
430
431 #define pud_none(pud) (!pud_val(pud))
432 #define pud_bad(pud) (!(pud_val(pud) & PUD_TABLE_BIT))
433 #define pud_present(pud) pte_present(pud_pte(pud))
434
435 static inline void set_pud(pud_t *pudp, pud_t pud)
436 {
437 *pudp = pud;
438 dsb(ishst);
439 isb();
440 }
441
442 static inline void pud_clear(pud_t *pudp)
443 {
444 set_pud(pudp, __pud(0));
445 }
446
447 static inline phys_addr_t pud_page_paddr(pud_t pud)
448 {
449 return pud_val(pud) & PHYS_MASK & (s32)PAGE_MASK;
450 }
451
452 /* Find an entry in the second-level page table. */
453 #define pmd_index(addr) (((addr) >> PMD_SHIFT) & (PTRS_PER_PMD - 1))
454
455 #define pmd_offset_phys(dir, addr) (pud_page_paddr(*(dir)) + pmd_index(addr) * sizeof(pmd_t))
456 #define pmd_offset(dir, addr) ((pmd_t *)__va(pmd_offset_phys((dir), (addr))))
457
458 #define pmd_set_fixmap(addr) ((pmd_t *)set_fixmap_offset(FIX_PMD, addr))
459 #define pmd_set_fixmap_offset(pud, addr) pmd_set_fixmap(pmd_offset_phys(pud, addr))
460 #define pmd_clear_fixmap() clear_fixmap(FIX_PMD)
461
462 #define pud_page(pud) pfn_to_page(__phys_to_pfn(pud_val(pud) & PHYS_MASK))
463
464 /* use ONLY for statically allocated translation tables */
465 #define pmd_offset_kimg(dir,addr) ((pmd_t *)__phys_to_kimg(pmd_offset_phys((dir), (addr))))
466
467 #else
468
469 #define pud_page_paddr(pud) ({ BUILD_BUG(); 0; })
470
471 /* Match pmd_offset folding in <asm/generic/pgtable-nopmd.h> */
472 #define pmd_set_fixmap(addr) NULL
473 #define pmd_set_fixmap_offset(pudp, addr) ((pmd_t *)pudp)
474 #define pmd_clear_fixmap()
475
476 #define pmd_offset_kimg(dir,addr) ((pmd_t *)dir)
477
478 #endif /* CONFIG_PGTABLE_LEVELS > 2 */
479
480 #if CONFIG_PGTABLE_LEVELS > 3
481
482 #define pud_ERROR(pud) __pud_error(__FILE__, __LINE__, pud_val(pud))
483
484 #define pgd_none(pgd) (!pgd_val(pgd))
485 #define pgd_bad(pgd) (!(pgd_val(pgd) & 2))
486 #define pgd_present(pgd) (pgd_val(pgd))
487
488 static inline void set_pgd(pgd_t *pgdp, pgd_t pgd)
489 {
490 *pgdp = pgd;
491 dsb(ishst);
492 }
493
494 static inline void pgd_clear(pgd_t *pgdp)
495 {
496 set_pgd(pgdp, __pgd(0));
497 }
498
499 static inline phys_addr_t pgd_page_paddr(pgd_t pgd)
500 {
501 return pgd_val(pgd) & PHYS_MASK & (s32)PAGE_MASK;
502 }
503
504 /* Find an entry in the frst-level page table. */
505 #define pud_index(addr) (((addr) >> PUD_SHIFT) & (PTRS_PER_PUD - 1))
506
507 #define pud_offset_phys(dir, addr) (pgd_page_paddr(*(dir)) + pud_index(addr) * sizeof(pud_t))
508 #define pud_offset(dir, addr) ((pud_t *)__va(pud_offset_phys((dir), (addr))))
509
510 #define pud_set_fixmap(addr) ((pud_t *)set_fixmap_offset(FIX_PUD, addr))
511 #define pud_set_fixmap_offset(pgd, addr) pud_set_fixmap(pud_offset_phys(pgd, addr))
512 #define pud_clear_fixmap() clear_fixmap(FIX_PUD)
513
514 #define pgd_page(pgd) pfn_to_page(__phys_to_pfn(pgd_val(pgd) & PHYS_MASK))
515
516 /* use ONLY for statically allocated translation tables */
517 #define pud_offset_kimg(dir,addr) ((pud_t *)__phys_to_kimg(pud_offset_phys((dir), (addr))))
518
519 #else
520
521 #define pgd_page_paddr(pgd) ({ BUILD_BUG(); 0;})
522
523 /* Match pud_offset folding in <asm/generic/pgtable-nopud.h> */
524 #define pud_set_fixmap(addr) NULL
525 #define pud_set_fixmap_offset(pgdp, addr) ((pud_t *)pgdp)
526 #define pud_clear_fixmap()
527
528 #define pud_offset_kimg(dir,addr) ((pud_t *)dir)
529
530 #endif /* CONFIG_PGTABLE_LEVELS > 3 */
531
532 #define pgd_ERROR(pgd) __pgd_error(__FILE__, __LINE__, pgd_val(pgd))
533
534 /* to find an entry in a page-table-directory */
535 #define pgd_index(addr) (((addr) >> PGDIR_SHIFT) & (PTRS_PER_PGD - 1))
536
537 #define pgd_offset_raw(pgd, addr) ((pgd) + pgd_index(addr))
538
539 #define pgd_offset(mm, addr) (pgd_offset_raw((mm)->pgd, (addr)))
540
541 /* to find an entry in a kernel page-table-directory */
542 #define pgd_offset_k(addr) pgd_offset(&init_mm, addr)
543
544 #define pgd_set_fixmap(addr) ((pgd_t *)set_fixmap_offset(FIX_PGD, addr))
545 #define pgd_clear_fixmap() clear_fixmap(FIX_PGD)
546
547 static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
548 {
549 const pteval_t mask = PTE_USER | PTE_PXN | PTE_UXN | PTE_RDONLY |
550 PTE_PROT_NONE | PTE_VALID | PTE_WRITE;
551 /* preserve the hardware dirty information */
552 if (pte_hw_dirty(pte))
553 pte = pte_mkdirty(pte);
554 pte_val(pte) = (pte_val(pte) & ~mask) | (pgprot_val(newprot) & mask);
555 return pte;
556 }
557
558 static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
559 {
560 return pte_pmd(pte_modify(pmd_pte(pmd), newprot));
561 }
562
563 #define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
564 extern int ptep_set_access_flags(struct vm_area_struct *vma,
565 unsigned long address, pte_t *ptep,
566 pte_t entry, int dirty);
567
568 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
569 #define __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS
570 static inline int pmdp_set_access_flags(struct vm_area_struct *vma,
571 unsigned long address, pmd_t *pmdp,
572 pmd_t entry, int dirty)
573 {
574 return ptep_set_access_flags(vma, address, (pte_t *)pmdp, pmd_pte(entry), dirty);
575 }
576 #endif
577
578 /*
579 * Atomic pte/pmd modifications.
580 */
581 #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
582 static inline int __ptep_test_and_clear_young(pte_t *ptep)
583 {
584 pte_t old_pte, pte;
585
586 pte = READ_ONCE(*ptep);
587 do {
588 old_pte = pte;
589 pte = pte_mkold(pte);
590 pte_val(pte) = cmpxchg_relaxed(&pte_val(*ptep),
591 pte_val(old_pte), pte_val(pte));
592 } while (pte_val(pte) != pte_val(old_pte));
593
594 return pte_young(pte);
595 }
596
597 static inline int ptep_test_and_clear_young(struct vm_area_struct *vma,
598 unsigned long address,
599 pte_t *ptep)
600 {
601 return __ptep_test_and_clear_young(ptep);
602 }
603
604 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
605 #define __HAVE_ARCH_PMDP_TEST_AND_CLEAR_YOUNG
606 static inline int pmdp_test_and_clear_young(struct vm_area_struct *vma,
607 unsigned long address,
608 pmd_t *pmdp)
609 {
610 return ptep_test_and_clear_young(vma, address, (pte_t *)pmdp);
611 }
612 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
613
614 #define __HAVE_ARCH_PTEP_GET_AND_CLEAR
615 static inline pte_t ptep_get_and_clear(struct mm_struct *mm,
616 unsigned long address, pte_t *ptep)
617 {
618 return __pte(xchg_relaxed(&pte_val(*ptep), 0));
619 }
620
621 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
622 #define __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR
623 static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm,
624 unsigned long address, pmd_t *pmdp)
625 {
626 return pte_pmd(ptep_get_and_clear(mm, address, (pte_t *)pmdp));
627 }
628 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
629
630 /*
631 * ptep_set_wrprotect - mark read-only while preserving the hardware update of
632 * the Access Flag.
633 */
634 #define __HAVE_ARCH_PTEP_SET_WRPROTECT
635 static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long address, pte_t *ptep)
636 {
637 pte_t old_pte, pte;
638
639 /*
640 * ptep_set_wrprotect() is only called on CoW mappings which are
641 * private (!VM_SHARED) with the pte either read-only (!PTE_WRITE &&
642 * PTE_RDONLY) or writable and software-dirty (PTE_WRITE &&
643 * !PTE_RDONLY && PTE_DIRTY); see is_cow_mapping() and
644 * protection_map[]. There is no race with the hardware update of the
645 * dirty state: clearing of PTE_RDONLY when PTE_WRITE (a.k.a. PTE_DBM)
646 * is set.
647 */
648 VM_WARN_ONCE(pte_write(*ptep) && !pte_dirty(*ptep),
649 "%s: potential race with hardware DBM", __func__);
650 pte = READ_ONCE(*ptep);
651 do {
652 old_pte = pte;
653 pte = pte_wrprotect(pte);
654 pte_val(pte) = cmpxchg_relaxed(&pte_val(*ptep),
655 pte_val(old_pte), pte_val(pte));
656 } while (pte_val(pte) != pte_val(old_pte));
657 }
658
659 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
660 #define __HAVE_ARCH_PMDP_SET_WRPROTECT
661 static inline void pmdp_set_wrprotect(struct mm_struct *mm,
662 unsigned long address, pmd_t *pmdp)
663 {
664 ptep_set_wrprotect(mm, address, (pte_t *)pmdp);
665 }
666 #endif
667
668 extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
669 extern pgd_t idmap_pg_dir[PTRS_PER_PGD];
670 extern pgd_t tramp_pg_dir[PTRS_PER_PGD];
671
672 /*
673 * Encode and decode a swap entry:
674 * bits 0-1: present (must be zero)
675 * bits 2-7: swap type
676 * bits 8-57: swap offset
677 * bit 58: PTE_PROT_NONE (must be zero)
678 */
679 #define __SWP_TYPE_SHIFT 2
680 #define __SWP_TYPE_BITS 6
681 #define __SWP_OFFSET_BITS 50
682 #define __SWP_TYPE_MASK ((1 << __SWP_TYPE_BITS) - 1)
683 #define __SWP_OFFSET_SHIFT (__SWP_TYPE_BITS + __SWP_TYPE_SHIFT)
684 #define __SWP_OFFSET_MASK ((1UL << __SWP_OFFSET_BITS) - 1)
685
686 #define __swp_type(x) (((x).val >> __SWP_TYPE_SHIFT) & __SWP_TYPE_MASK)
687 #define __swp_offset(x) (((x).val >> __SWP_OFFSET_SHIFT) & __SWP_OFFSET_MASK)
688 #define __swp_entry(type,offset) ((swp_entry_t) { ((type) << __SWP_TYPE_SHIFT) | ((offset) << __SWP_OFFSET_SHIFT) })
689
690 #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
691 #define __swp_entry_to_pte(swp) ((pte_t) { (swp).val })
692
693 /*
694 * Ensure that there are not more swap files than can be encoded in the kernel
695 * PTEs.
696 */
697 #define MAX_SWAPFILES_CHECK() BUILD_BUG_ON(MAX_SWAPFILES_SHIFT > __SWP_TYPE_BITS)
698
699 extern int kern_addr_valid(unsigned long addr);
700
701 #include <asm-generic/pgtable.h>
702
703 void pgd_cache_init(void);
704 #define pgtable_cache_init pgd_cache_init
705
706 /*
707 * On AArch64, the cache coherency is handled via the set_pte_at() function.
708 */
709 static inline void update_mmu_cache(struct vm_area_struct *vma,
710 unsigned long addr, pte_t *ptep)
711 {
712 /*
713 * We don't do anything here, so there's a very small chance of
714 * us retaking a user fault which we just fixed up. The alternative
715 * is doing a dsb(ishst), but that penalises the fastpath.
716 */
717 }
718
719 #define update_mmu_cache_pmd(vma, address, pmd) do { } while (0)
720
721 #define kc_vaddr_to_offset(v) ((v) & ~VA_START)
722 #define kc_offset_to_vaddr(o) ((o) | VA_START)
723
724 #endif /* !__ASSEMBLY__ */
725
726 #endif /* __ASM_PGTABLE_H */