]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - arch/riscv/include/asm/pgtable.h
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
[mirror_ubuntu-bionic-kernel.git] / arch / riscv / include / asm / pgtable.h
CommitLineData
07037db5
PD
1/*
2 * Copyright (C) 2012 Regents of the University of California
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation, version 2.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 */
13
14#ifndef _ASM_RISCV_PGTABLE_H
15#define _ASM_RISCV_PGTABLE_H
16
17#include <linux/mmzone.h>
18
19#include <asm/pgtable-bits.h>
20
21#ifndef __ASSEMBLY__
22
23#ifdef CONFIG_MMU
24
25/* Page Upper Directory not used in RISC-V */
26#include <asm-generic/pgtable-nopud.h>
27#include <asm/page.h>
28#include <asm/tlbflush.h>
29#include <linux/mm_types.h>
30
31#ifdef CONFIG_64BIT
32#include <asm/pgtable-64.h>
33#else
34#include <asm/pgtable-32.h>
35#endif /* CONFIG_64BIT */
36
37/* Number of entries in the page global directory */
38#define PTRS_PER_PGD (PAGE_SIZE / sizeof(pgd_t))
39/* Number of entries in the page table */
40#define PTRS_PER_PTE (PAGE_SIZE / sizeof(pte_t))
41
42/* Number of PGD entries that a user-mode program can use */
43#define USER_PTRS_PER_PGD (TASK_SIZE / PGDIR_SIZE)
44#define FIRST_USER_ADDRESS 0
45
46/* Page protection bits */
47#define _PAGE_BASE (_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_USER)
48
49#define PAGE_NONE __pgprot(0)
50#define PAGE_READ __pgprot(_PAGE_BASE | _PAGE_READ)
51#define PAGE_WRITE __pgprot(_PAGE_BASE | _PAGE_READ | _PAGE_WRITE)
52#define PAGE_EXEC __pgprot(_PAGE_BASE | _PAGE_EXEC)
53#define PAGE_READ_EXEC __pgprot(_PAGE_BASE | _PAGE_READ | _PAGE_EXEC)
54#define PAGE_WRITE_EXEC __pgprot(_PAGE_BASE | _PAGE_READ | \
55 _PAGE_EXEC | _PAGE_WRITE)
56
57#define PAGE_COPY PAGE_READ
58#define PAGE_COPY_EXEC PAGE_EXEC
59#define PAGE_COPY_READ_EXEC PAGE_READ_EXEC
60#define PAGE_SHARED PAGE_WRITE
61#define PAGE_SHARED_EXEC PAGE_WRITE_EXEC
62
63#define _PAGE_KERNEL (_PAGE_READ \
64 | _PAGE_WRITE \
65 | _PAGE_PRESENT \
66 | _PAGE_ACCESSED \
67 | _PAGE_DIRTY)
68
69#define PAGE_KERNEL __pgprot(_PAGE_KERNEL)
70#define PAGE_KERNEL_EXEC __pgprot(_PAGE_KERNEL | _PAGE_EXEC)
71
72extern pgd_t swapper_pg_dir[];
73
74/* MAP_PRIVATE permissions: xwr (copy-on-write) */
75#define __P000 PAGE_NONE
76#define __P001 PAGE_READ
77#define __P010 PAGE_COPY
78#define __P011 PAGE_COPY
79#define __P100 PAGE_EXEC
80#define __P101 PAGE_READ_EXEC
81#define __P110 PAGE_COPY_EXEC
82#define __P111 PAGE_COPY_READ_EXEC
83
84/* MAP_SHARED permissions: xwr */
85#define __S000 PAGE_NONE
86#define __S001 PAGE_READ
87#define __S010 PAGE_SHARED
88#define __S011 PAGE_SHARED
89#define __S100 PAGE_EXEC
90#define __S101 PAGE_READ_EXEC
91#define __S110 PAGE_SHARED_EXEC
92#define __S111 PAGE_SHARED_EXEC
93
94/*
95 * ZERO_PAGE is a global shared page that is always zero,
96 * used for zero-mapped memory areas, etc.
97 */
98extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)];
99#define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
100
101static inline int pmd_present(pmd_t pmd)
102{
103 return (pmd_val(pmd) & _PAGE_PRESENT);
104}
105
106static inline int pmd_none(pmd_t pmd)
107{
108 return (pmd_val(pmd) == 0);
109}
110
111static inline int pmd_bad(pmd_t pmd)
112{
113 return !pmd_present(pmd);
114}
115
116static inline void set_pmd(pmd_t *pmdp, pmd_t pmd)
117{
118 *pmdp = pmd;
119}
120
121static inline void pmd_clear(pmd_t *pmdp)
122{
123 set_pmd(pmdp, __pmd(0));
124}
125
126
127static inline pgd_t pfn_pgd(unsigned long pfn, pgprot_t prot)
128{
129 return __pgd((pfn << _PAGE_PFN_SHIFT) | pgprot_val(prot));
130}
131
132#define pgd_index(addr) (((addr) >> PGDIR_SHIFT) & (PTRS_PER_PGD - 1))
133
134/* Locate an entry in the page global directory */
135static inline pgd_t *pgd_offset(const struct mm_struct *mm, unsigned long addr)
136{
137 return mm->pgd + pgd_index(addr);
138}
139/* Locate an entry in the kernel page global directory */
140#define pgd_offset_k(addr) pgd_offset(&init_mm, (addr))
141
142static inline struct page *pmd_page(pmd_t pmd)
143{
144 return pfn_to_page(pmd_val(pmd) >> _PAGE_PFN_SHIFT);
145}
146
147static inline unsigned long pmd_page_vaddr(pmd_t pmd)
148{
149 return (unsigned long)pfn_to_virt(pmd_val(pmd) >> _PAGE_PFN_SHIFT);
150}
151
152/* Yields the page frame number (PFN) of a page table entry */
153static inline unsigned long pte_pfn(pte_t pte)
154{
155 return (pte_val(pte) >> _PAGE_PFN_SHIFT);
156}
157
158#define pte_page(x) pfn_to_page(pte_pfn(x))
159
160/* Constructs a page table entry */
161static inline pte_t pfn_pte(unsigned long pfn, pgprot_t prot)
162{
163 return __pte((pfn << _PAGE_PFN_SHIFT) | pgprot_val(prot));
164}
165
166static inline pte_t mk_pte(struct page *page, pgprot_t prot)
167{
168 return pfn_pte(page_to_pfn(page), prot);
169}
170
171#define pte_index(addr) (((addr) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
172
173static inline pte_t *pte_offset_kernel(pmd_t *pmd, unsigned long addr)
174{
175 return (pte_t *)pmd_page_vaddr(*pmd) + pte_index(addr);
176}
177
178#define pte_offset_map(dir, addr) pte_offset_kernel((dir), (addr))
179#define pte_unmap(pte) ((void)(pte))
180
07037db5
PD
181static inline int pte_present(pte_t pte)
182{
183 return (pte_val(pte) & _PAGE_PRESENT);
184}
185
186static inline int pte_none(pte_t pte)
187{
188 return (pte_val(pte) == 0);
189}
190
07037db5
PD
191static inline int pte_write(pte_t pte)
192{
193 return pte_val(pte) & _PAGE_WRITE;
194}
195
08f051ed
AW
196static inline int pte_exec(pte_t pte)
197{
198 return pte_val(pte) & _PAGE_EXEC;
199}
200
07037db5
PD
201static inline int pte_huge(pte_t pte)
202{
203 return pte_present(pte)
204 && (pte_val(pte) & (_PAGE_READ | _PAGE_WRITE | _PAGE_EXEC));
205}
206
07037db5
PD
207static inline int pte_dirty(pte_t pte)
208{
209 return pte_val(pte) & _PAGE_DIRTY;
210}
211
212static inline int pte_young(pte_t pte)
213{
214 return pte_val(pte) & _PAGE_ACCESSED;
215}
216
217static inline int pte_special(pte_t pte)
218{
219 return pte_val(pte) & _PAGE_SPECIAL;
220}
221
222/* static inline pte_t pte_rdprotect(pte_t pte) */
223
224static inline pte_t pte_wrprotect(pte_t pte)
225{
226 return __pte(pte_val(pte) & ~(_PAGE_WRITE));
227}
228
229/* static inline pte_t pte_mkread(pte_t pte) */
230
231static inline pte_t pte_mkwrite(pte_t pte)
232{
233 return __pte(pte_val(pte) | _PAGE_WRITE);
234}
235
236/* static inline pte_t pte_mkexec(pte_t pte) */
237
238static inline pte_t pte_mkdirty(pte_t pte)
239{
240 return __pte(pte_val(pte) | _PAGE_DIRTY);
241}
242
243static inline pte_t pte_mkclean(pte_t pte)
244{
245 return __pte(pte_val(pte) & ~(_PAGE_DIRTY));
246}
247
248static inline pte_t pte_mkyoung(pte_t pte)
249{
250 return __pte(pte_val(pte) | _PAGE_ACCESSED);
251}
252
253static inline pte_t pte_mkold(pte_t pte)
254{
255 return __pte(pte_val(pte) & ~(_PAGE_ACCESSED));
256}
257
258static inline pte_t pte_mkspecial(pte_t pte)
259{
260 return __pte(pte_val(pte) | _PAGE_SPECIAL);
261}
262
263/* Modify page protection bits */
264static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
265{
266 return __pte((pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot));
267}
268
269#define pgd_ERROR(e) \
270 pr_err("%s:%d: bad pgd " PTE_FMT ".\n", __FILE__, __LINE__, pgd_val(e))
271
272
273/* Commit new configuration to MMU hardware */
274static inline void update_mmu_cache(struct vm_area_struct *vma,
275 unsigned long address, pte_t *ptep)
276{
277 /*
278 * The kernel assumes that TLBs don't cache invalid entries, but
279 * in RISC-V, SFENCE.VMA specifies an ordering constraint, not a
280 * cache flush; it is necessary even after writing invalid entries.
281 * Relying on flush_tlb_fix_spurious_fault would suffice, but
282 * the extra traps reduce performance. So, eagerly SFENCE.VMA.
283 */
284 local_flush_tlb_page(address);
285}
286
287#define __HAVE_ARCH_PTE_SAME
288static inline int pte_same(pte_t pte_a, pte_t pte_b)
289{
290 return pte_val(pte_a) == pte_val(pte_b);
291}
292
08f051ed
AW
293/*
294 * Certain architectures need to do special things when PTEs within
295 * a page table are directly modified. Thus, the following hook is
296 * made available.
297 */
298static inline void set_pte(pte_t *ptep, pte_t pteval)
299{
300 *ptep = pteval;
301}
302
303void flush_icache_pte(pte_t pte);
304
305static inline void set_pte_at(struct mm_struct *mm,
306 unsigned long addr, pte_t *ptep, pte_t pteval)
307{
308 if (pte_present(pteval) && pte_exec(pteval))
309 flush_icache_pte(pteval);
310
311 set_pte(ptep, pteval);
312}
313
314static inline void pte_clear(struct mm_struct *mm,
315 unsigned long addr, pte_t *ptep)
316{
317 set_pte_at(mm, addr, ptep, __pte(0));
318}
319
07037db5
PD
320#define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
321static inline int ptep_set_access_flags(struct vm_area_struct *vma,
322 unsigned long address, pte_t *ptep,
323 pte_t entry, int dirty)
324{
325 if (!pte_same(*ptep, entry))
326 set_pte_at(vma->vm_mm, address, ptep, entry);
327 /*
328 * update_mmu_cache will unconditionally execute, handling both
329 * the case that the PTE changed and the spurious fault case.
330 */
331 return true;
332}
333
334#define __HAVE_ARCH_PTEP_GET_AND_CLEAR
335static inline pte_t ptep_get_and_clear(struct mm_struct *mm,
336 unsigned long address, pte_t *ptep)
337{
338 return __pte(atomic_long_xchg((atomic_long_t *)ptep, 0));
339}
340
341#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
342static inline int ptep_test_and_clear_young(struct vm_area_struct *vma,
343 unsigned long address,
344 pte_t *ptep)
345{
346 if (!pte_young(*ptep))
347 return 0;
348 return test_and_clear_bit(_PAGE_ACCESSED_OFFSET, &pte_val(*ptep));
349}
350
351#define __HAVE_ARCH_PTEP_SET_WRPROTECT
352static inline void ptep_set_wrprotect(struct mm_struct *mm,
353 unsigned long address, pte_t *ptep)
354{
355 atomic_long_and(~(unsigned long)_PAGE_WRITE, (atomic_long_t *)ptep);
356}
357
358#define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
359static inline int ptep_clear_flush_young(struct vm_area_struct *vma,
360 unsigned long address, pte_t *ptep)
361{
362 /*
363 * This comment is borrowed from x86, but applies equally to RISC-V:
364 *
365 * Clearing the accessed bit without a TLB flush
366 * doesn't cause data corruption. [ It could cause incorrect
367 * page aging and the (mistaken) reclaim of hot pages, but the
368 * chance of that should be relatively low. ]
369 *
370 * So as a performance optimization don't flush the TLB when
371 * clearing the accessed bit, it will eventually be flushed by
372 * a context switch or a VM operation anyway. [ In the rare
373 * event of it not getting flushed for a long time the delay
374 * shouldn't really matter because there's no real memory
375 * pressure for swapout to react to. ]
376 */
377 return ptep_test_and_clear_young(vma, address, ptep);
378}
379
380/*
381 * Encode and decode a swap entry
382 *
383 * Format of swap PTE:
384 * bit 0: _PAGE_PRESENT (zero)
385 * bit 1: reserved for future use (zero)
386 * bits 2 to 6: swap type
387 * bits 7 to XLEN-1: swap offset
388 */
389#define __SWP_TYPE_SHIFT 2
390#define __SWP_TYPE_BITS 5
391#define __SWP_TYPE_MASK ((1UL << __SWP_TYPE_BITS) - 1)
392#define __SWP_OFFSET_SHIFT (__SWP_TYPE_BITS + __SWP_TYPE_SHIFT)
393
394#define MAX_SWAPFILES_CHECK() \
395 BUILD_BUG_ON(MAX_SWAPFILES_SHIFT > __SWP_TYPE_BITS)
396
397#define __swp_type(x) (((x).val >> __SWP_TYPE_SHIFT) & __SWP_TYPE_MASK)
398#define __swp_offset(x) ((x).val >> __SWP_OFFSET_SHIFT)
399#define __swp_entry(type, offset) ((swp_entry_t) \
400 { ((type) << __SWP_TYPE_SHIFT) | ((offset) << __SWP_OFFSET_SHIFT) })
401
402#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
403#define __swp_entry_to_pte(x) ((pte_t) { (x).val })
404
405#ifdef CONFIG_FLATMEM
406#define kern_addr_valid(addr) (1) /* FIXME */
407#endif
408
409extern void paging_init(void);
410
411static inline void pgtable_cache_init(void)
412{
413 /* No page table caches to initialize */
414}
415
416#endif /* CONFIG_MMU */
417
418#define VMALLOC_SIZE (KERN_VIRT_SIZE >> 1)
419#define VMALLOC_END (PAGE_OFFSET - 1)
420#define VMALLOC_START (PAGE_OFFSET - VMALLOC_SIZE)
421
422/*
423 * Task size is 0x40000000000 for RV64 or 0xb800000 for RV32.
424 * Note that PGDIR_SIZE must evenly divide TASK_SIZE.
425 */
426#ifdef CONFIG_64BIT
427#define TASK_SIZE (PGDIR_SIZE * PTRS_PER_PGD / 2)
428#else
429#define TASK_SIZE VMALLOC_START
430#endif
431
432#include <asm-generic/pgtable.h>
433
434#endif /* !__ASSEMBLY__ */
435
436#endif /* _ASM_RISCV_PGTABLE_H */