3 * Copyright IBM Corp. 1999, 2000
4 * Author(s): Hartmut Penner (hp@de.ibm.com)
5 * Ulrich Weigand (weigand@de.ibm.com)
6 * Martin Schwidefsky (schwidefsky@de.ibm.com)
8 * Derived from "include/asm-i386/pgtable.h"
11 #ifndef _ASM_S390_PGTABLE_H
12 #define _ASM_S390_PGTABLE_H
14 #include <linux/sched.h>
15 #include <linux/mm_types.h>
16 #include <linux/page-flags.h>
17 #include <linux/radix-tree.h>
18 #include <linux/atomic.h>
22 extern pgd_t swapper_pg_dir
[];
23 extern void paging_init(void);
32 extern atomic_long_t direct_pages_count
[PG_DIRECT_MAP_MAX
];
34 static inline void update_page_count(int level
, long count
)
36 if (IS_ENABLED(CONFIG_PROC_FS
))
37 atomic_long_add(count
, &direct_pages_count
[level
]);
41 void arch_report_meminfo(struct seq_file
*m
);
44 * The S390 doesn't have any external MMU info: the kernel page
45 * tables contain all the necessary information.
47 #define update_mmu_cache(vma, address, ptep) do { } while (0)
48 #define update_mmu_cache_pmd(vma, address, ptep) do { } while (0)
51 * ZERO_PAGE is a global shared page that is always zero; used
52 * for zero-mapped memory areas etc..
55 extern unsigned long empty_zero_page
;
56 extern unsigned long zero_page_mask
;
58 #define ZERO_PAGE(vaddr) \
59 (virt_to_page((void *)(empty_zero_page + \
60 (((unsigned long)(vaddr)) &zero_page_mask))))
61 #define __HAVE_COLOR_ZERO_PAGE
63 /* TODO: s390 cannot support io_remap_pfn_range... */
65 #define FIRST_USER_ADDRESS 0UL
67 #define pte_ERROR(e) \
68 printk("%s:%d: bad pte %p.\n", __FILE__, __LINE__, (void *) pte_val(e))
69 #define pmd_ERROR(e) \
70 printk("%s:%d: bad pmd %p.\n", __FILE__, __LINE__, (void *) pmd_val(e))
71 #define pud_ERROR(e) \
72 printk("%s:%d: bad pud %p.\n", __FILE__, __LINE__, (void *) pud_val(e))
73 #define p4d_ERROR(e) \
74 printk("%s:%d: bad p4d %p.\n", __FILE__, __LINE__, (void *) p4d_val(e))
75 #define pgd_ERROR(e) \
76 printk("%s:%d: bad pgd %p.\n", __FILE__, __LINE__, (void *) pgd_val(e))
79 * The vmalloc and module area will always be on the topmost area of the
80 * kernel mapping. We reserve 128GB (64bit) for vmalloc and modules.
81 * On 64 bit kernels we have a 2GB area at the top of the vmalloc area where
82 * modules will reside. That makes sure that inter module branches always
83 * happen without trampolines and in addition the placement within a 2GB frame
84 * is branch prediction unit friendly.
86 extern unsigned long VMALLOC_START
;
87 extern unsigned long VMALLOC_END
;
88 extern struct page
*vmemmap
;
90 #define VMEM_MAX_PHYS ((unsigned long) vmemmap)
92 extern unsigned long MODULES_VADDR
;
93 extern unsigned long MODULES_END
;
94 #define MODULES_VADDR MODULES_VADDR
95 #define MODULES_END MODULES_END
96 #define MODULES_LEN (1UL << 31)
98 static inline int is_module_addr(void *addr
)
100 BUILD_BUG_ON(MODULES_LEN
> (1UL << 31));
101 if (addr
< (void *)MODULES_VADDR
)
103 if (addr
> (void *)MODULES_END
)
109 * A 64 bit pagetable entry of S390 has following format:
111 * 0000000000111111111122222222223333333333444444444455555555556666
112 * 0123456789012345678901234567890123456789012345678901234567890123
114 * I Page-Invalid Bit: Page is not available for address-translation
115 * P Page-Protection Bit: Store access not possible for page
116 * C Change-bit override: HW is not required to set change bit
118 * A 64 bit segmenttable entry of S390 has following format:
119 * | P-table origin | TT
120 * 0000000000111111111122222222223333333333444444444455555555556666
121 * 0123456789012345678901234567890123456789012345678901234567890123
123 * I Segment-Invalid Bit: Segment is not available for address-translation
124 * C Common-Segment Bit: Segment is not private (PoP 3-30)
125 * P Page-Protection Bit: Store access not possible for page
128 * A 64 bit region table entry of S390 has following format:
129 * | S-table origin | TF TTTL
130 * 0000000000111111111122222222223333333333444444444455555555556666
131 * 0123456789012345678901234567890123456789012345678901234567890123
133 * I Segment-Invalid Bit: Segment is not available for address-translation
138 * The 64 bit regiontable origin of S390 has following format:
139 * | region table origon | DTTL
140 * 0000000000111111111122222222223333333333444444444455555555556666
141 * 0123456789012345678901234567890123456789012345678901234567890123
143 * X Space-Switch event:
144 * G Segment-Invalid Bit:
145 * P Private-Space Bit:
146 * S Storage-Alteration:
150 * A storage key has the following format:
154 * F : fetch protection bit
159 /* Hardware bits in the page table entry */
160 #define _PAGE_NOEXEC 0x100 /* HW no-execute bit */
161 #define _PAGE_PROTECT 0x200 /* HW read-only bit */
162 #define _PAGE_INVALID 0x400 /* HW invalid bit */
163 #define _PAGE_LARGE 0x800 /* Bit to mark a large pte */
165 /* Software bits in the page table entry */
166 #define _PAGE_PRESENT 0x001 /* SW pte present bit */
167 #define _PAGE_YOUNG 0x004 /* SW pte young bit */
168 #define _PAGE_DIRTY 0x008 /* SW pte dirty bit */
169 #define _PAGE_READ 0x010 /* SW pte read bit */
170 #define _PAGE_WRITE 0x020 /* SW pte write bit */
171 #define _PAGE_SPECIAL 0x040 /* SW associated with special page */
172 #define _PAGE_UNUSED 0x080 /* SW bit for pgste usage state */
173 #define __HAVE_ARCH_PTE_SPECIAL
175 #ifdef CONFIG_MEM_SOFT_DIRTY
176 #define _PAGE_SOFT_DIRTY 0x002 /* SW pte soft dirty bit */
178 #define _PAGE_SOFT_DIRTY 0x000
181 /* Set of bits not changed in pte_modify */
182 #define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_SPECIAL | _PAGE_DIRTY | \
183 _PAGE_YOUNG | _PAGE_SOFT_DIRTY)
186 * handle_pte_fault uses pte_present and pte_none to find out the pte type
187 * WITHOUT holding the page table lock. The _PAGE_PRESENT bit is used to
188 * distinguish present from not-present ptes. It is changed only with the page
191 * The following table gives the different possible bit combinations for
192 * the pte hardware and software bits in the last 12 bits of a pte
193 * (. unassigned bit, x don't care, t swap type):
201 * prot-none, clean, old .11.xx0000.1
202 * prot-none, clean, young .11.xx0001.1
203 * prot-none, dirty, old .11.xx0010.1
204 * prot-none, dirty, young .11.xx0011.1
205 * read-only, clean, old .11.xx0100.1
206 * read-only, clean, young .01.xx0101.1
207 * read-only, dirty, old .11.xx0110.1
208 * read-only, dirty, young .01.xx0111.1
209 * read-write, clean, old .11.xx1100.1
210 * read-write, clean, young .01.xx1101.1
211 * read-write, dirty, old .10.xx1110.1
212 * read-write, dirty, young .00.xx1111.1
213 * HW-bits: R read-only, I invalid
214 * SW-bits: p present, y young, d dirty, r read, w write, s special,
217 * pte_none is true for the bit pattern .10.00000000, pte == 0x400
218 * pte_swap is true for the bit pattern .11..ooooo.0, (pte & 0x201) == 0x200
219 * pte_present is true for the bit pattern .xx.xxxxxx.1, (pte & 0x001) == 0x001
222 /* Bits in the segment/region table address-space-control-element */
223 #define _ASCE_ORIGIN ~0xfffUL/* region/segment table origin */
224 #define _ASCE_PRIVATE_SPACE 0x100 /* private space control */
225 #define _ASCE_ALT_EVENT 0x80 /* storage alteration event control */
226 #define _ASCE_SPACE_SWITCH 0x40 /* space switch event */
227 #define _ASCE_REAL_SPACE 0x20 /* real space control */
228 #define _ASCE_TYPE_MASK 0x0c /* asce table type mask */
229 #define _ASCE_TYPE_REGION1 0x0c /* region first table type */
230 #define _ASCE_TYPE_REGION2 0x08 /* region second table type */
231 #define _ASCE_TYPE_REGION3 0x04 /* region third table type */
232 #define _ASCE_TYPE_SEGMENT 0x00 /* segment table type */
233 #define _ASCE_TABLE_LENGTH 0x03 /* region table length */
235 /* Bits in the region table entry */
236 #define _REGION_ENTRY_ORIGIN ~0xfffUL/* region/segment table origin */
237 #define _REGION_ENTRY_PROTECT 0x200 /* region protection bit */
238 #define _REGION_ENTRY_NOEXEC 0x100 /* region no-execute bit */
239 #define _REGION_ENTRY_OFFSET 0xc0 /* region table offset */
240 #define _REGION_ENTRY_INVALID 0x20 /* invalid region table entry */
241 #define _REGION_ENTRY_TYPE_MASK 0x0c /* region/segment table type mask */
242 #define _REGION_ENTRY_TYPE_R1 0x0c /* region first table type */
243 #define _REGION_ENTRY_TYPE_R2 0x08 /* region second table type */
244 #define _REGION_ENTRY_TYPE_R3 0x04 /* region third table type */
245 #define _REGION_ENTRY_LENGTH 0x03 /* region third length */
247 #define _REGION1_ENTRY (_REGION_ENTRY_TYPE_R1 | _REGION_ENTRY_LENGTH)
248 #define _REGION1_ENTRY_EMPTY (_REGION_ENTRY_TYPE_R1 | _REGION_ENTRY_INVALID)
249 #define _REGION2_ENTRY (_REGION_ENTRY_TYPE_R2 | _REGION_ENTRY_LENGTH)
250 #define _REGION2_ENTRY_EMPTY (_REGION_ENTRY_TYPE_R2 | _REGION_ENTRY_INVALID)
251 #define _REGION3_ENTRY (_REGION_ENTRY_TYPE_R3 | _REGION_ENTRY_LENGTH)
252 #define _REGION3_ENTRY_EMPTY (_REGION_ENTRY_TYPE_R3 | _REGION_ENTRY_INVALID)
254 #define _REGION3_ENTRY_ORIGIN_LARGE ~0x7fffffffUL /* large page address */
255 #define _REGION3_ENTRY_DIRTY 0x2000 /* SW region dirty bit */
256 #define _REGION3_ENTRY_YOUNG 0x1000 /* SW region young bit */
257 #define _REGION3_ENTRY_LARGE 0x0400 /* RTTE-format control, large page */
258 #define _REGION3_ENTRY_READ 0x0002 /* SW region read bit */
259 #define _REGION3_ENTRY_WRITE 0x0001 /* SW region write bit */
261 #ifdef CONFIG_MEM_SOFT_DIRTY
262 #define _REGION3_ENTRY_SOFT_DIRTY 0x4000 /* SW region soft dirty bit */
264 #define _REGION3_ENTRY_SOFT_DIRTY 0x0000 /* SW region soft dirty bit */
267 #define _REGION_ENTRY_BITS 0xfffffffffffff22fUL
268 #define _REGION_ENTRY_BITS_LARGE 0xffffffff8000fe2fUL
270 /* Bits in the segment table entry */
271 #define _SEGMENT_ENTRY_BITS 0xfffffffffffffe33UL
272 #define _SEGMENT_ENTRY_BITS_LARGE 0xfffffffffff0ff33UL
273 #define _SEGMENT_ENTRY_ORIGIN_LARGE ~0xfffffUL /* large page address */
274 #define _SEGMENT_ENTRY_ORIGIN ~0x7ffUL/* page table origin */
275 #define _SEGMENT_ENTRY_PROTECT 0x200 /* segment protection bit */
276 #define _SEGMENT_ENTRY_NOEXEC 0x100 /* segment no-execute bit */
277 #define _SEGMENT_ENTRY_INVALID 0x20 /* invalid segment table entry */
279 #define _SEGMENT_ENTRY (0)
280 #define _SEGMENT_ENTRY_EMPTY (_SEGMENT_ENTRY_INVALID)
282 #define _SEGMENT_ENTRY_DIRTY 0x2000 /* SW segment dirty bit */
283 #define _SEGMENT_ENTRY_YOUNG 0x1000 /* SW segment young bit */
284 #define _SEGMENT_ENTRY_LARGE 0x0400 /* STE-format control, large page */
285 #define _SEGMENT_ENTRY_WRITE 0x0002 /* SW segment write bit */
286 #define _SEGMENT_ENTRY_READ 0x0001 /* SW segment read bit */
288 #ifdef CONFIG_MEM_SOFT_DIRTY
289 #define _SEGMENT_ENTRY_SOFT_DIRTY 0x4000 /* SW segment soft dirty bit */
291 #define _SEGMENT_ENTRY_SOFT_DIRTY 0x0000 /* SW segment soft dirty bit */
294 #define _CRST_ENTRIES 2048 /* number of region/segment table entries */
295 #define _PAGE_ENTRIES 256 /* number of page table entries */
297 #define _CRST_TABLE_SIZE (_CRST_ENTRIES * 8)
298 #define _PAGE_TABLE_SIZE (_PAGE_ENTRIES * 8)
300 #define _REGION1_SHIFT 53
301 #define _REGION2_SHIFT 42
302 #define _REGION3_SHIFT 31
303 #define _SEGMENT_SHIFT 20
305 #define _REGION1_INDEX (0x7ffUL << _REGION1_SHIFT)
306 #define _REGION2_INDEX (0x7ffUL << _REGION2_SHIFT)
307 #define _REGION3_INDEX (0x7ffUL << _REGION3_SHIFT)
308 #define _SEGMENT_INDEX (0x7ffUL << _SEGMENT_SHIFT)
309 #define _PAGE_INDEX (0xffUL << _PAGE_SHIFT)
311 #define _REGION1_SIZE (1UL << _REGION1_SHIFT)
312 #define _REGION2_SIZE (1UL << _REGION2_SHIFT)
313 #define _REGION3_SIZE (1UL << _REGION3_SHIFT)
314 #define _SEGMENT_SIZE (1UL << _SEGMENT_SHIFT)
316 #define _REGION1_MASK (~(_REGION1_SIZE - 1))
317 #define _REGION2_MASK (~(_REGION2_SIZE - 1))
318 #define _REGION3_MASK (~(_REGION3_SIZE - 1))
319 #define _SEGMENT_MASK (~(_SEGMENT_SIZE - 1))
321 #define PMD_SHIFT _SEGMENT_SHIFT
322 #define PUD_SHIFT _REGION3_SHIFT
323 #define P4D_SHIFT _REGION2_SHIFT
324 #define PGDIR_SHIFT _REGION1_SHIFT
326 #define PMD_SIZE _SEGMENT_SIZE
327 #define PUD_SIZE _REGION3_SIZE
328 #define P4D_SIZE _REGION2_SIZE
329 #define PGDIR_SIZE _REGION1_SIZE
331 #define PMD_MASK _SEGMENT_MASK
332 #define PUD_MASK _REGION3_MASK
333 #define P4D_MASK _REGION2_MASK
334 #define PGDIR_MASK _REGION1_MASK
336 #define PTRS_PER_PTE _PAGE_ENTRIES
337 #define PTRS_PER_PMD _CRST_ENTRIES
338 #define PTRS_PER_PUD _CRST_ENTRIES
339 #define PTRS_PER_P4D _CRST_ENTRIES
340 #define PTRS_PER_PGD _CRST_ENTRIES
343 * Segment table and region3 table entry encoding
344 * (R = read-only, I = invalid, y = young bit):
346 * prot-none, clean, old 00..1...1...00
347 * prot-none, clean, young 01..1...1...00
348 * prot-none, dirty, old 10..1...1...00
349 * prot-none, dirty, young 11..1...1...00
350 * read-only, clean, old 00..1...1...01
351 * read-only, clean, young 01..1...0...01
352 * read-only, dirty, old 10..1...1...01
353 * read-only, dirty, young 11..1...0...01
354 * read-write, clean, old 00..1...1...11
355 * read-write, clean, young 01..1...0...11
356 * read-write, dirty, old 10..0...1...11
357 * read-write, dirty, young 11..0...0...11
358 * The segment table origin is used to distinguish empty (origin==0) from
359 * read-write, old segment table entries (origin!=0)
360 * HW-bits: R read-only, I invalid
361 * SW-bits: y young, d dirty, r read, w write
364 /* Page status table bits for virtualization */
365 #define PGSTE_ACC_BITS 0xf000000000000000UL
366 #define PGSTE_FP_BIT 0x0800000000000000UL
367 #define PGSTE_PCL_BIT 0x0080000000000000UL
368 #define PGSTE_HR_BIT 0x0040000000000000UL
369 #define PGSTE_HC_BIT 0x0020000000000000UL
370 #define PGSTE_GR_BIT 0x0004000000000000UL
371 #define PGSTE_GC_BIT 0x0002000000000000UL
372 #define PGSTE_UC_BIT 0x0000800000000000UL /* user dirty (migration) */
373 #define PGSTE_IN_BIT 0x0000400000000000UL /* IPTE notify bit */
374 #define PGSTE_VSIE_BIT 0x0000200000000000UL /* ref'd in a shadow table */
376 /* Guest Page State used for virtualization */
377 #define _PGSTE_GPS_ZERO 0x0000000080000000UL
378 #define _PGSTE_GPS_NODAT 0x0000000040000000UL
379 #define _PGSTE_GPS_USAGE_MASK 0x0000000003000000UL
380 #define _PGSTE_GPS_USAGE_STABLE 0x0000000000000000UL
381 #define _PGSTE_GPS_USAGE_UNUSED 0x0000000001000000UL
382 #define _PGSTE_GPS_USAGE_POT_VOLATILE 0x0000000002000000UL
383 #define _PGSTE_GPS_USAGE_VOLATILE _PGSTE_GPS_USAGE_MASK
386 * A user page table pointer has the space-switch-event bit, the
387 * private-space-control bit and the storage-alteration-event-control
388 * bit set. A kernel page table pointer doesn't need them.
390 #define _ASCE_USER_BITS (_ASCE_SPACE_SWITCH | _ASCE_PRIVATE_SPACE | \
394 * Page protection definitions.
396 #define PAGE_NONE __pgprot(_PAGE_PRESENT | _PAGE_INVALID | _PAGE_PROTECT)
397 #define PAGE_RO __pgprot(_PAGE_PRESENT | _PAGE_READ | \
398 _PAGE_NOEXEC | _PAGE_INVALID | _PAGE_PROTECT)
399 #define PAGE_RX __pgprot(_PAGE_PRESENT | _PAGE_READ | \
400 _PAGE_INVALID | _PAGE_PROTECT)
401 #define PAGE_RW __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \
402 _PAGE_NOEXEC | _PAGE_INVALID | _PAGE_PROTECT)
403 #define PAGE_RWX __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \
404 _PAGE_INVALID | _PAGE_PROTECT)
406 #define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \
407 _PAGE_YOUNG | _PAGE_DIRTY | _PAGE_NOEXEC)
408 #define PAGE_KERNEL __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \
409 _PAGE_YOUNG | _PAGE_DIRTY | _PAGE_NOEXEC)
410 #define PAGE_KERNEL_RO __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_YOUNG | \
411 _PAGE_PROTECT | _PAGE_NOEXEC)
412 #define PAGE_KERNEL_EXEC __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \
413 _PAGE_YOUNG | _PAGE_DIRTY)
416 * On s390 the page table entry has an invalid bit and a read-only bit.
417 * Read permission implies execute permission and write permission
418 * implies read permission.
421 #define __P000 PAGE_NONE
422 #define __P001 PAGE_RO
423 #define __P010 PAGE_RO
424 #define __P011 PAGE_RO
425 #define __P100 PAGE_RX
426 #define __P101 PAGE_RX
427 #define __P110 PAGE_RX
428 #define __P111 PAGE_RX
430 #define __S000 PAGE_NONE
431 #define __S001 PAGE_RO
432 #define __S010 PAGE_RW
433 #define __S011 PAGE_RW
434 #define __S100 PAGE_RX
435 #define __S101 PAGE_RX
436 #define __S110 PAGE_RWX
437 #define __S111 PAGE_RWX
440 * Segment entry (large page) protection definitions.
442 #define SEGMENT_NONE __pgprot(_SEGMENT_ENTRY_INVALID | \
443 _SEGMENT_ENTRY_PROTECT)
444 #define SEGMENT_RO __pgprot(_SEGMENT_ENTRY_PROTECT | \
445 _SEGMENT_ENTRY_READ | \
446 _SEGMENT_ENTRY_NOEXEC)
447 #define SEGMENT_RX __pgprot(_SEGMENT_ENTRY_PROTECT | \
449 #define SEGMENT_RW __pgprot(_SEGMENT_ENTRY_READ | \
450 _SEGMENT_ENTRY_WRITE | \
451 _SEGMENT_ENTRY_NOEXEC)
452 #define SEGMENT_RWX __pgprot(_SEGMENT_ENTRY_READ | \
453 _SEGMENT_ENTRY_WRITE)
454 #define SEGMENT_KERNEL __pgprot(_SEGMENT_ENTRY | \
455 _SEGMENT_ENTRY_LARGE | \
456 _SEGMENT_ENTRY_READ | \
457 _SEGMENT_ENTRY_WRITE | \
458 _SEGMENT_ENTRY_YOUNG | \
459 _SEGMENT_ENTRY_DIRTY | \
460 _SEGMENT_ENTRY_NOEXEC)
461 #define SEGMENT_KERNEL_RO __pgprot(_SEGMENT_ENTRY | \
462 _SEGMENT_ENTRY_LARGE | \
463 _SEGMENT_ENTRY_READ | \
464 _SEGMENT_ENTRY_YOUNG | \
465 _SEGMENT_ENTRY_PROTECT | \
466 _SEGMENT_ENTRY_NOEXEC)
469 * Region3 entry (large page) protection definitions.
472 #define REGION3_KERNEL __pgprot(_REGION_ENTRY_TYPE_R3 | \
473 _REGION3_ENTRY_LARGE | \
474 _REGION3_ENTRY_READ | \
475 _REGION3_ENTRY_WRITE | \
476 _REGION3_ENTRY_YOUNG | \
477 _REGION3_ENTRY_DIRTY | \
478 _REGION_ENTRY_NOEXEC)
479 #define REGION3_KERNEL_RO __pgprot(_REGION_ENTRY_TYPE_R3 | \
480 _REGION3_ENTRY_LARGE | \
481 _REGION3_ENTRY_READ | \
482 _REGION3_ENTRY_YOUNG | \
483 _REGION_ENTRY_PROTECT | \
484 _REGION_ENTRY_NOEXEC)
486 static inline int mm_has_pgste(struct mm_struct
*mm
)
489 if (unlikely(mm
->context
.has_pgste
))
495 static inline int mm_alloc_pgste(struct mm_struct
*mm
)
498 if (unlikely(mm
->context
.alloc_pgste
))
505 * In the case that a guest uses storage keys
506 * faults should no longer be backed by zero pages
508 #define mm_forbids_zeropage mm_has_pgste
509 static inline int mm_use_skey(struct mm_struct
*mm
)
512 if (mm
->context
.use_skey
)
518 static inline void csp(unsigned int *ptr
, unsigned int old
, unsigned int new)
520 register unsigned long reg2
asm("2") = old
;
521 register unsigned long reg3
asm("3") = new;
522 unsigned long address
= (unsigned long)ptr
| 1;
526 : "+d" (reg2
), "+m" (*ptr
)
527 : "d" (reg3
), "d" (address
)
531 static inline void cspg(unsigned long *ptr
, unsigned long old
, unsigned long new)
533 register unsigned long reg2
asm("2") = old
;
534 register unsigned long reg3
asm("3") = new;
535 unsigned long address
= (unsigned long)ptr
| 1;
538 " .insn rre,0xb98a0000,%0,%3"
539 : "+d" (reg2
), "+m" (*ptr
)
540 : "d" (reg3
), "d" (address
)
544 #define CRDTE_DTT_PAGE 0x00UL
545 #define CRDTE_DTT_SEGMENT 0x10UL
546 #define CRDTE_DTT_REGION3 0x14UL
547 #define CRDTE_DTT_REGION2 0x18UL
548 #define CRDTE_DTT_REGION1 0x1cUL
550 static inline void crdte(unsigned long old
, unsigned long new,
551 unsigned long table
, unsigned long dtt
,
552 unsigned long address
, unsigned long asce
)
554 register unsigned long reg2
asm("2") = old
;
555 register unsigned long reg3
asm("3") = new;
556 register unsigned long reg4
asm("4") = table
| dtt
;
557 register unsigned long reg5
asm("5") = address
;
559 asm volatile(".insn rrf,0xb98f0000,%0,%2,%4,0"
561 : "d" (reg3
), "d" (reg4
), "d" (reg5
), "a" (asce
)
566 * pgd/p4d/pud/pmd/pte query functions
568 static inline int pgd_folded(pgd_t pgd
)
570 return (pgd_val(pgd
) & _REGION_ENTRY_TYPE_MASK
) < _REGION_ENTRY_TYPE_R1
;
573 static inline int pgd_present(pgd_t pgd
)
577 return (pgd_val(pgd
) & _REGION_ENTRY_ORIGIN
) != 0UL;
580 static inline int pgd_none(pgd_t pgd
)
584 return (pgd_val(pgd
) & _REGION_ENTRY_INVALID
) != 0UL;
587 static inline int pgd_bad(pgd_t pgd
)
590 * With dynamic page table levels the pgd can be a region table
591 * entry or a segment table entry. Check for the bit that are
592 * invalid for either table entry.
595 ~_SEGMENT_ENTRY_ORIGIN
& ~_REGION_ENTRY_INVALID
&
596 ~_REGION_ENTRY_TYPE_MASK
& ~_REGION_ENTRY_LENGTH
;
597 return (pgd_val(pgd
) & mask
) != 0;
600 static inline int p4d_folded(p4d_t p4d
)
602 return (p4d_val(p4d
) & _REGION_ENTRY_TYPE_MASK
) < _REGION_ENTRY_TYPE_R2
;
605 static inline int p4d_present(p4d_t p4d
)
609 return (p4d_val(p4d
) & _REGION_ENTRY_ORIGIN
) != 0UL;
612 static inline int p4d_none(p4d_t p4d
)
616 return p4d_val(p4d
) == _REGION2_ENTRY_EMPTY
;
619 static inline unsigned long p4d_pfn(p4d_t p4d
)
621 unsigned long origin_mask
;
623 origin_mask
= _REGION_ENTRY_ORIGIN
;
624 return (p4d_val(p4d
) & origin_mask
) >> PAGE_SHIFT
;
627 static inline int pud_folded(pud_t pud
)
629 return (pud_val(pud
) & _REGION_ENTRY_TYPE_MASK
) < _REGION_ENTRY_TYPE_R3
;
632 static inline int pud_present(pud_t pud
)
636 return (pud_val(pud
) & _REGION_ENTRY_ORIGIN
) != 0UL;
639 static inline int pud_none(pud_t pud
)
643 return pud_val(pud
) == _REGION3_ENTRY_EMPTY
;
646 static inline int pud_large(pud_t pud
)
648 if ((pud_val(pud
) & _REGION_ENTRY_TYPE_MASK
) != _REGION_ENTRY_TYPE_R3
)
650 return !!(pud_val(pud
) & _REGION3_ENTRY_LARGE
);
653 static inline unsigned long pud_pfn(pud_t pud
)
655 unsigned long origin_mask
;
657 origin_mask
= _REGION_ENTRY_ORIGIN
;
659 origin_mask
= _REGION3_ENTRY_ORIGIN_LARGE
;
660 return (pud_val(pud
) & origin_mask
) >> PAGE_SHIFT
;
663 static inline int pmd_large(pmd_t pmd
)
665 return (pmd_val(pmd
) & _SEGMENT_ENTRY_LARGE
) != 0;
668 static inline int pmd_bad(pmd_t pmd
)
671 return (pmd_val(pmd
) & ~_SEGMENT_ENTRY_BITS_LARGE
) != 0;
672 return (pmd_val(pmd
) & ~_SEGMENT_ENTRY_BITS
) != 0;
675 static inline int pud_bad(pud_t pud
)
677 if ((pud_val(pud
) & _REGION_ENTRY_TYPE_MASK
) < _REGION_ENTRY_TYPE_R3
)
678 return pmd_bad(__pmd(pud_val(pud
)));
680 return (pud_val(pud
) & ~_REGION_ENTRY_BITS_LARGE
) != 0;
681 return (pud_val(pud
) & ~_REGION_ENTRY_BITS
) != 0;
684 static inline int p4d_bad(p4d_t p4d
)
686 if ((p4d_val(p4d
) & _REGION_ENTRY_TYPE_MASK
) < _REGION_ENTRY_TYPE_R2
)
687 return pud_bad(__pud(p4d_val(p4d
)));
688 return (p4d_val(p4d
) & ~_REGION_ENTRY_BITS
) != 0;
691 static inline int pmd_present(pmd_t pmd
)
693 return pmd_val(pmd
) != _SEGMENT_ENTRY_EMPTY
;
696 static inline int pmd_none(pmd_t pmd
)
698 return pmd_val(pmd
) == _SEGMENT_ENTRY_EMPTY
;
701 static inline unsigned long pmd_pfn(pmd_t pmd
)
703 unsigned long origin_mask
;
705 origin_mask
= _SEGMENT_ENTRY_ORIGIN
;
707 origin_mask
= _SEGMENT_ENTRY_ORIGIN_LARGE
;
708 return (pmd_val(pmd
) & origin_mask
) >> PAGE_SHIFT
;
711 #define __HAVE_ARCH_PMD_WRITE
712 static inline int pmd_write(pmd_t pmd
)
714 return (pmd_val(pmd
) & _SEGMENT_ENTRY_WRITE
) != 0;
717 static inline int pmd_dirty(pmd_t pmd
)
721 dirty
= (pmd_val(pmd
) & _SEGMENT_ENTRY_DIRTY
) != 0;
725 static inline int pmd_young(pmd_t pmd
)
729 young
= (pmd_val(pmd
) & _SEGMENT_ENTRY_YOUNG
) != 0;
733 static inline int pte_present(pte_t pte
)
735 /* Bit pattern: (pte & 0x001) == 0x001 */
736 return (pte_val(pte
) & _PAGE_PRESENT
) != 0;
739 static inline int pte_none(pte_t pte
)
741 /* Bit pattern: pte == 0x400 */
742 return pte_val(pte
) == _PAGE_INVALID
;
745 static inline int pte_swap(pte_t pte
)
747 /* Bit pattern: (pte & 0x201) == 0x200 */
748 return (pte_val(pte
) & (_PAGE_PROTECT
| _PAGE_PRESENT
))
752 static inline int pte_special(pte_t pte
)
754 return (pte_val(pte
) & _PAGE_SPECIAL
);
757 #define __HAVE_ARCH_PTE_SAME
758 static inline int pte_same(pte_t a
, pte_t b
)
760 return pte_val(a
) == pte_val(b
);
763 #ifdef CONFIG_NUMA_BALANCING
764 static inline int pte_protnone(pte_t pte
)
766 return pte_present(pte
) && !(pte_val(pte
) & _PAGE_READ
);
769 static inline int pmd_protnone(pmd_t pmd
)
771 /* pmd_large(pmd) implies pmd_present(pmd) */
772 return pmd_large(pmd
) && !(pmd_val(pmd
) & _SEGMENT_ENTRY_READ
);
776 static inline int pte_soft_dirty(pte_t pte
)
778 return pte_val(pte
) & _PAGE_SOFT_DIRTY
;
780 #define pte_swp_soft_dirty pte_soft_dirty
782 static inline pte_t
pte_mksoft_dirty(pte_t pte
)
784 pte_val(pte
) |= _PAGE_SOFT_DIRTY
;
787 #define pte_swp_mksoft_dirty pte_mksoft_dirty
789 static inline pte_t
pte_clear_soft_dirty(pte_t pte
)
791 pte_val(pte
) &= ~_PAGE_SOFT_DIRTY
;
794 #define pte_swp_clear_soft_dirty pte_clear_soft_dirty
796 static inline int pmd_soft_dirty(pmd_t pmd
)
798 return pmd_val(pmd
) & _SEGMENT_ENTRY_SOFT_DIRTY
;
801 static inline pmd_t
pmd_mksoft_dirty(pmd_t pmd
)
803 pmd_val(pmd
) |= _SEGMENT_ENTRY_SOFT_DIRTY
;
807 static inline pmd_t
pmd_clear_soft_dirty(pmd_t pmd
)
809 pmd_val(pmd
) &= ~_SEGMENT_ENTRY_SOFT_DIRTY
;
814 * query functions pte_write/pte_dirty/pte_young only work if
815 * pte_present() is true. Undefined behaviour if not..
817 static inline int pte_write(pte_t pte
)
819 return (pte_val(pte
) & _PAGE_WRITE
) != 0;
822 static inline int pte_dirty(pte_t pte
)
824 return (pte_val(pte
) & _PAGE_DIRTY
) != 0;
827 static inline int pte_young(pte_t pte
)
829 return (pte_val(pte
) & _PAGE_YOUNG
) != 0;
832 #define __HAVE_ARCH_PTE_UNUSED
833 static inline int pte_unused(pte_t pte
)
835 return pte_val(pte
) & _PAGE_UNUSED
;
839 * pgd/pmd/pte modification functions
842 static inline void pgd_clear(pgd_t
*pgd
)
844 if ((pgd_val(*pgd
) & _REGION_ENTRY_TYPE_MASK
) == _REGION_ENTRY_TYPE_R1
)
845 pgd_val(*pgd
) = _REGION1_ENTRY_EMPTY
;
848 static inline void p4d_clear(p4d_t
*p4d
)
850 if ((p4d_val(*p4d
) & _REGION_ENTRY_TYPE_MASK
) == _REGION_ENTRY_TYPE_R2
)
851 p4d_val(*p4d
) = _REGION2_ENTRY_EMPTY
;
854 static inline void pud_clear(pud_t
*pud
)
856 if ((pud_val(*pud
) & _REGION_ENTRY_TYPE_MASK
) == _REGION_ENTRY_TYPE_R3
)
857 pud_val(*pud
) = _REGION3_ENTRY_EMPTY
;
860 static inline void pmd_clear(pmd_t
*pmdp
)
862 pmd_val(*pmdp
) = _SEGMENT_ENTRY_EMPTY
;
865 static inline void pte_clear(struct mm_struct
*mm
, unsigned long addr
, pte_t
*ptep
)
867 pte_val(*ptep
) = _PAGE_INVALID
;
871 * The following pte modification functions only work if
872 * pte_present() is true. Undefined behaviour if not..
874 static inline pte_t
pte_modify(pte_t pte
, pgprot_t newprot
)
876 pte_val(pte
) &= _PAGE_CHG_MASK
;
877 pte_val(pte
) |= pgprot_val(newprot
);
879 * newprot for PAGE_NONE, PAGE_RO, PAGE_RX, PAGE_RW and PAGE_RWX
880 * has the invalid bit set, clear it again for readable, young pages
882 if ((pte_val(pte
) & _PAGE_YOUNG
) && (pte_val(pte
) & _PAGE_READ
))
883 pte_val(pte
) &= ~_PAGE_INVALID
;
885 * newprot for PAGE_RO, PAGE_RX, PAGE_RW and PAGE_RWX has the page
886 * protection bit set, clear it again for writable, dirty pages
888 if ((pte_val(pte
) & _PAGE_DIRTY
) && (pte_val(pte
) & _PAGE_WRITE
))
889 pte_val(pte
) &= ~_PAGE_PROTECT
;
893 static inline pte_t
pte_wrprotect(pte_t pte
)
895 pte_val(pte
) &= ~_PAGE_WRITE
;
896 pte_val(pte
) |= _PAGE_PROTECT
;
900 static inline pte_t
pte_mkwrite(pte_t pte
)
902 pte_val(pte
) |= _PAGE_WRITE
;
903 if (pte_val(pte
) & _PAGE_DIRTY
)
904 pte_val(pte
) &= ~_PAGE_PROTECT
;
908 static inline pte_t
pte_mkclean(pte_t pte
)
910 pte_val(pte
) &= ~_PAGE_DIRTY
;
911 pte_val(pte
) |= _PAGE_PROTECT
;
915 static inline pte_t
pte_mkdirty(pte_t pte
)
917 pte_val(pte
) |= _PAGE_DIRTY
| _PAGE_SOFT_DIRTY
;
918 if (pte_val(pte
) & _PAGE_WRITE
)
919 pte_val(pte
) &= ~_PAGE_PROTECT
;
923 static inline pte_t
pte_mkold(pte_t pte
)
925 pte_val(pte
) &= ~_PAGE_YOUNG
;
926 pte_val(pte
) |= _PAGE_INVALID
;
930 static inline pte_t
pte_mkyoung(pte_t pte
)
932 pte_val(pte
) |= _PAGE_YOUNG
;
933 if (pte_val(pte
) & _PAGE_READ
)
934 pte_val(pte
) &= ~_PAGE_INVALID
;
938 static inline pte_t
pte_mkspecial(pte_t pte
)
940 pte_val(pte
) |= _PAGE_SPECIAL
;
944 #ifdef CONFIG_HUGETLB_PAGE
945 static inline pte_t
pte_mkhuge(pte_t pte
)
947 pte_val(pte
) |= _PAGE_LARGE
;
952 #define IPTE_GLOBAL 0
955 #define IPTE_NODAT 0x400
956 #define IPTE_GUEST_ASCE 0x800
958 static inline void __ptep_ipte(unsigned long address
, pte_t
*ptep
,
959 unsigned long opt
, unsigned long asce
,
962 unsigned long pto
= (unsigned long) ptep
;
964 if (__builtin_constant_p(opt
) && opt
== 0) {
965 /* Invalidation + TLB flush for the pte */
967 " .insn rrf,0xb2210000,%[r1],%[r2],0,%[m4]"
968 : "+m" (*ptep
) : [r1
] "a" (pto
), [r2
] "a" (address
),
973 /* Invalidate ptes with options + TLB flush of the ptes */
974 opt
= opt
| (asce
& _ASCE_ORIGIN
);
976 " .insn rrf,0xb2210000,%[r1],%[r2],%[r3],%[m4]"
977 : [r2
] "+a" (address
), [r3
] "+a" (opt
)
978 : [r1
] "a" (pto
), [m4
] "i" (local
) : "memory");
981 static inline void __ptep_ipte_range(unsigned long address
, int nr
,
982 pte_t
*ptep
, int local
)
984 unsigned long pto
= (unsigned long) ptep
;
986 /* Invalidate a range of ptes + TLB flush of the ptes */
989 " .insn rrf,0xb2210000,%[r1],%[r2],%[r3],%[m4]"
990 : [r2
] "+a" (address
), [r3
] "+a" (nr
)
991 : [r1
] "a" (pto
), [m4
] "i" (local
) : "memory");
996 * This is hard to understand. ptep_get_and_clear and ptep_clear_flush
997 * both clear the TLB for the unmapped pte. The reason is that
998 * ptep_get_and_clear is used in common code (e.g. change_pte_range)
999 * to modify an active pte. The sequence is
1000 * 1) ptep_get_and_clear
1002 * 3) flush_tlb_range
1003 * On s390 the tlb needs to get flushed with the modification of the pte
1004 * if the pte is active. The only way how this can be implemented is to
1005 * have ptep_get_and_clear do the tlb flush. In exchange flush_tlb_range
1008 pte_t
ptep_xchg_direct(struct mm_struct
*, unsigned long, pte_t
*, pte_t
);
1009 pte_t
ptep_xchg_lazy(struct mm_struct
*, unsigned long, pte_t
*, pte_t
);
1011 #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
1012 static inline int ptep_test_and_clear_young(struct vm_area_struct
*vma
,
1013 unsigned long addr
, pte_t
*ptep
)
1017 pte
= ptep_xchg_direct(vma
->vm_mm
, addr
, ptep
, pte_mkold(pte
));
1018 return pte_young(pte
);
1021 #define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
1022 static inline int ptep_clear_flush_young(struct vm_area_struct
*vma
,
1023 unsigned long address
, pte_t
*ptep
)
1025 return ptep_test_and_clear_young(vma
, address
, ptep
);
1028 #define __HAVE_ARCH_PTEP_GET_AND_CLEAR
1029 static inline pte_t
ptep_get_and_clear(struct mm_struct
*mm
,
1030 unsigned long addr
, pte_t
*ptep
)
1032 return ptep_xchg_lazy(mm
, addr
, ptep
, __pte(_PAGE_INVALID
));
1035 #define __HAVE_ARCH_PTEP_MODIFY_PROT_TRANSACTION
1036 pte_t
ptep_modify_prot_start(struct mm_struct
*, unsigned long, pte_t
*);
1037 void ptep_modify_prot_commit(struct mm_struct
*, unsigned long, pte_t
*, pte_t
);
1039 #define __HAVE_ARCH_PTEP_CLEAR_FLUSH
1040 static inline pte_t
ptep_clear_flush(struct vm_area_struct
*vma
,
1041 unsigned long addr
, pte_t
*ptep
)
1043 return ptep_xchg_direct(vma
->vm_mm
, addr
, ptep
, __pte(_PAGE_INVALID
));
1047 * The batched pte unmap code uses ptep_get_and_clear_full to clear the
1048 * ptes. Here an optimization is possible. tlb_gather_mmu flushes all
1049 * tlbs of an mm if it can guarantee that the ptes of the mm_struct
1050 * cannot be accessed while the batched unmap is running. In this case
1051 * full==1 and a simple pte_clear is enough. See tlb.h.
1053 #define __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL
1054 static inline pte_t
ptep_get_and_clear_full(struct mm_struct
*mm
,
1056 pte_t
*ptep
, int full
)
1060 *ptep
= __pte(_PAGE_INVALID
);
1063 return ptep_xchg_lazy(mm
, addr
, ptep
, __pte(_PAGE_INVALID
));
1066 #define __HAVE_ARCH_PTEP_SET_WRPROTECT
1067 static inline void ptep_set_wrprotect(struct mm_struct
*mm
,
1068 unsigned long addr
, pte_t
*ptep
)
1073 ptep_xchg_lazy(mm
, addr
, ptep
, pte_wrprotect(pte
));
1076 #define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
1077 static inline int ptep_set_access_flags(struct vm_area_struct
*vma
,
1078 unsigned long addr
, pte_t
*ptep
,
1079 pte_t entry
, int dirty
)
1081 if (pte_same(*ptep
, entry
))
1083 ptep_xchg_direct(vma
->vm_mm
, addr
, ptep
, entry
);
1088 * Additional functions to handle KVM guest page tables
1090 void ptep_set_pte_at(struct mm_struct
*mm
, unsigned long addr
,
1091 pte_t
*ptep
, pte_t entry
);
1092 void ptep_set_notify(struct mm_struct
*mm
, unsigned long addr
, pte_t
*ptep
);
1093 void ptep_notify(struct mm_struct
*mm
, unsigned long addr
,
1094 pte_t
*ptep
, unsigned long bits
);
1095 int ptep_force_prot(struct mm_struct
*mm
, unsigned long gaddr
,
1096 pte_t
*ptep
, int prot
, unsigned long bit
);
1097 void ptep_zap_unused(struct mm_struct
*mm
, unsigned long addr
,
1098 pte_t
*ptep
, int reset
);
1099 void ptep_zap_key(struct mm_struct
*mm
, unsigned long addr
, pte_t
*ptep
);
1100 int ptep_shadow_pte(struct mm_struct
*mm
, unsigned long saddr
,
1101 pte_t
*sptep
, pte_t
*tptep
, pte_t pte
);
1102 void ptep_unshadow_pte(struct mm_struct
*mm
, unsigned long saddr
, pte_t
*ptep
);
1104 bool test_and_clear_guest_dirty(struct mm_struct
*mm
, unsigned long address
);
1105 int set_guest_storage_key(struct mm_struct
*mm
, unsigned long addr
,
1106 unsigned char key
, bool nq
);
1107 int cond_set_guest_storage_key(struct mm_struct
*mm
, unsigned long addr
,
1108 unsigned char key
, unsigned char *oldkey
,
1109 bool nq
, bool mr
, bool mc
);
1110 int reset_guest_reference_bit(struct mm_struct
*mm
, unsigned long addr
);
1111 int get_guest_storage_key(struct mm_struct
*mm
, unsigned long addr
,
1112 unsigned char *key
);
1114 int set_pgste_bits(struct mm_struct
*mm
, unsigned long addr
,
1115 unsigned long bits
, unsigned long value
);
1116 int get_pgste(struct mm_struct
*mm
, unsigned long hva
, unsigned long *pgstep
);
1117 int pgste_perform_essa(struct mm_struct
*mm
, unsigned long hva
, int orc
,
1118 unsigned long *oldpte
, unsigned long *oldpgste
);
1121 * Certain architectures need to do special things when PTEs
1122 * within a page table are directly modified. Thus, the following
1123 * hook is made available.
1125 static inline void set_pte_at(struct mm_struct
*mm
, unsigned long addr
,
1126 pte_t
*ptep
, pte_t entry
)
1128 if (!MACHINE_HAS_NX
)
1129 pte_val(entry
) &= ~_PAGE_NOEXEC
;
1130 if (pte_present(entry
))
1131 pte_val(entry
) &= ~_PAGE_UNUSED
;
1132 if (mm_has_pgste(mm
))
1133 ptep_set_pte_at(mm
, addr
, ptep
, entry
);
1139 * Conversion functions: convert a page and protection to a page entry,
1140 * and a page entry and page directory to the page they refer to.
1142 static inline pte_t
mk_pte_phys(unsigned long physpage
, pgprot_t pgprot
)
1145 pte_val(__pte
) = physpage
+ pgprot_val(pgprot
);
1146 return pte_mkyoung(__pte
);
1149 static inline pte_t
mk_pte(struct page
*page
, pgprot_t pgprot
)
1151 unsigned long physpage
= page_to_phys(page
);
1152 pte_t __pte
= mk_pte_phys(physpage
, pgprot
);
1154 if (pte_write(__pte
) && PageDirty(page
))
1155 __pte
= pte_mkdirty(__pte
);
1159 #define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1))
1160 #define p4d_index(address) (((address) >> P4D_SHIFT) & (PTRS_PER_P4D-1))
1161 #define pud_index(address) (((address) >> PUD_SHIFT) & (PTRS_PER_PUD-1))
1162 #define pmd_index(address) (((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1))
1163 #define pte_index(address) (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE-1))
1165 #define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
1166 #define pgd_offset_k(address) pgd_offset(&init_mm, address)
1168 #define pmd_deref(pmd) (pmd_val(pmd) & _SEGMENT_ENTRY_ORIGIN)
1169 #define pud_deref(pud) (pud_val(pud) & _REGION_ENTRY_ORIGIN)
1170 #define p4d_deref(pud) (p4d_val(pud) & _REGION_ENTRY_ORIGIN)
1171 #define pgd_deref(pgd) (pgd_val(pgd) & _REGION_ENTRY_ORIGIN)
1173 static inline p4d_t
*p4d_offset(pgd_t
*pgd
, unsigned long address
)
1175 p4d_t
*p4d
= (p4d_t
*) pgd
;
1177 if ((pgd_val(*pgd
) & _REGION_ENTRY_TYPE_MASK
) == _REGION_ENTRY_TYPE_R1
)
1178 p4d
= (p4d_t
*) pgd_deref(*pgd
);
1179 return p4d
+ p4d_index(address
);
1182 static inline pud_t
*pud_offset(p4d_t
*p4d
, unsigned long address
)
1184 pud_t
*pud
= (pud_t
*) p4d
;
1186 if ((p4d_val(*p4d
) & _REGION_ENTRY_TYPE_MASK
) == _REGION_ENTRY_TYPE_R2
)
1187 pud
= (pud_t
*) p4d_deref(*p4d
);
1188 return pud
+ pud_index(address
);
1191 static inline pmd_t
*pmd_offset(pud_t
*pud
, unsigned long address
)
1193 pmd_t
*pmd
= (pmd_t
*) pud
;
1195 if ((pud_val(*pud
) & _REGION_ENTRY_TYPE_MASK
) == _REGION_ENTRY_TYPE_R3
)
1196 pmd
= (pmd_t
*) pud_deref(*pud
);
1197 return pmd
+ pmd_index(address
);
1200 #define pfn_pte(pfn,pgprot) mk_pte_phys(__pa((pfn) << PAGE_SHIFT),(pgprot))
1201 #define pte_pfn(x) (pte_val(x) >> PAGE_SHIFT)
1202 #define pte_page(x) pfn_to_page(pte_pfn(x))
1204 #define pmd_page(pmd) pfn_to_page(pmd_pfn(pmd))
1205 #define pud_page(pud) pfn_to_page(pud_pfn(pud))
1206 #define p4d_page(pud) pfn_to_page(p4d_pfn(p4d))
1208 /* Find an entry in the lowest level page table.. */
1209 #define pte_offset(pmd, addr) ((pte_t *) pmd_deref(*(pmd)) + pte_index(addr))
1210 #define pte_offset_kernel(pmd, address) pte_offset(pmd,address)
1211 #define pte_offset_map(pmd, address) pte_offset_kernel(pmd, address)
1212 #define pte_unmap(pte) do { } while (0)
1214 static inline pmd_t
pmd_wrprotect(pmd_t pmd
)
1216 pmd_val(pmd
) &= ~_SEGMENT_ENTRY_WRITE
;
1217 pmd_val(pmd
) |= _SEGMENT_ENTRY_PROTECT
;
1221 static inline pmd_t
pmd_mkwrite(pmd_t pmd
)
1223 pmd_val(pmd
) |= _SEGMENT_ENTRY_WRITE
;
1224 if (pmd_large(pmd
) && !(pmd_val(pmd
) & _SEGMENT_ENTRY_DIRTY
))
1226 pmd_val(pmd
) &= ~_SEGMENT_ENTRY_PROTECT
;
1230 static inline pmd_t
pmd_mkclean(pmd_t pmd
)
1232 if (pmd_large(pmd
)) {
1233 pmd_val(pmd
) &= ~_SEGMENT_ENTRY_DIRTY
;
1234 pmd_val(pmd
) |= _SEGMENT_ENTRY_PROTECT
;
1239 static inline pmd_t
pmd_mkdirty(pmd_t pmd
)
1241 if (pmd_large(pmd
)) {
1242 pmd_val(pmd
) |= _SEGMENT_ENTRY_DIRTY
|
1243 _SEGMENT_ENTRY_SOFT_DIRTY
;
1244 if (pmd_val(pmd
) & _SEGMENT_ENTRY_WRITE
)
1245 pmd_val(pmd
) &= ~_SEGMENT_ENTRY_PROTECT
;
1250 static inline pud_t
pud_wrprotect(pud_t pud
)
1252 pud_val(pud
) &= ~_REGION3_ENTRY_WRITE
;
1253 pud_val(pud
) |= _REGION_ENTRY_PROTECT
;
1257 static inline pud_t
pud_mkwrite(pud_t pud
)
1259 pud_val(pud
) |= _REGION3_ENTRY_WRITE
;
1260 if (pud_large(pud
) && !(pud_val(pud
) & _REGION3_ENTRY_DIRTY
))
1262 pud_val(pud
) &= ~_REGION_ENTRY_PROTECT
;
1266 static inline pud_t
pud_mkclean(pud_t pud
)
1268 if (pud_large(pud
)) {
1269 pud_val(pud
) &= ~_REGION3_ENTRY_DIRTY
;
1270 pud_val(pud
) |= _REGION_ENTRY_PROTECT
;
1275 static inline pud_t
pud_mkdirty(pud_t pud
)
1277 if (pud_large(pud
)) {
1278 pud_val(pud
) |= _REGION3_ENTRY_DIRTY
|
1279 _REGION3_ENTRY_SOFT_DIRTY
;
1280 if (pud_val(pud
) & _REGION3_ENTRY_WRITE
)
1281 pud_val(pud
) &= ~_REGION_ENTRY_PROTECT
;
1286 #if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_HUGETLB_PAGE)
1287 static inline unsigned long massage_pgprot_pmd(pgprot_t pgprot
)
1290 * pgprot is PAGE_NONE, PAGE_RO, PAGE_RX, PAGE_RW or PAGE_RWX
1291 * (see __Pxxx / __Sxxx). Convert to segment table entry format.
1293 if (pgprot_val(pgprot
) == pgprot_val(PAGE_NONE
))
1294 return pgprot_val(SEGMENT_NONE
);
1295 if (pgprot_val(pgprot
) == pgprot_val(PAGE_RO
))
1296 return pgprot_val(SEGMENT_RO
);
1297 if (pgprot_val(pgprot
) == pgprot_val(PAGE_RX
))
1298 return pgprot_val(SEGMENT_RX
);
1299 if (pgprot_val(pgprot
) == pgprot_val(PAGE_RW
))
1300 return pgprot_val(SEGMENT_RW
);
1301 return pgprot_val(SEGMENT_RWX
);
1304 static inline pmd_t
pmd_mkyoung(pmd_t pmd
)
1306 if (pmd_large(pmd
)) {
1307 pmd_val(pmd
) |= _SEGMENT_ENTRY_YOUNG
;
1308 if (pmd_val(pmd
) & _SEGMENT_ENTRY_READ
)
1309 pmd_val(pmd
) &= ~_SEGMENT_ENTRY_INVALID
;
1314 static inline pmd_t
pmd_mkold(pmd_t pmd
)
1316 if (pmd_large(pmd
)) {
1317 pmd_val(pmd
) &= ~_SEGMENT_ENTRY_YOUNG
;
1318 pmd_val(pmd
) |= _SEGMENT_ENTRY_INVALID
;
1323 static inline pmd_t
pmd_modify(pmd_t pmd
, pgprot_t newprot
)
1325 if (pmd_large(pmd
)) {
1326 pmd_val(pmd
) &= _SEGMENT_ENTRY_ORIGIN_LARGE
|
1327 _SEGMENT_ENTRY_DIRTY
| _SEGMENT_ENTRY_YOUNG
|
1328 _SEGMENT_ENTRY_LARGE
| _SEGMENT_ENTRY_SOFT_DIRTY
;
1329 pmd_val(pmd
) |= massage_pgprot_pmd(newprot
);
1330 if (!(pmd_val(pmd
) & _SEGMENT_ENTRY_DIRTY
))
1331 pmd_val(pmd
) |= _SEGMENT_ENTRY_PROTECT
;
1332 if (!(pmd_val(pmd
) & _SEGMENT_ENTRY_YOUNG
))
1333 pmd_val(pmd
) |= _SEGMENT_ENTRY_INVALID
;
1336 pmd_val(pmd
) &= _SEGMENT_ENTRY_ORIGIN
;
1337 pmd_val(pmd
) |= massage_pgprot_pmd(newprot
);
1341 static inline pmd_t
mk_pmd_phys(unsigned long physpage
, pgprot_t pgprot
)
1344 pmd_val(__pmd
) = physpage
+ massage_pgprot_pmd(pgprot
);
1348 #endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLB_PAGE */
1350 static inline void __pmdp_csp(pmd_t
*pmdp
)
1352 csp((unsigned int *)pmdp
+ 1, pmd_val(*pmdp
),
1353 pmd_val(*pmdp
) | _SEGMENT_ENTRY_INVALID
);
1356 #define IDTE_GLOBAL 0
1357 #define IDTE_LOCAL 1
1359 #define IDTE_PTOA 0x0800
1360 #define IDTE_NODAT 0x1000
1361 #define IDTE_GUEST_ASCE 0x2000
1363 static inline void __pmdp_idte(unsigned long addr
, pmd_t
*pmdp
,
1364 unsigned long opt
, unsigned long asce
,
1369 sto
= (unsigned long) pmdp
- pmd_index(addr
) * sizeof(pmd_t
);
1370 if (__builtin_constant_p(opt
) && opt
== 0) {
1371 /* flush without guest asce */
1373 " .insn rrf,0xb98e0000,%[r1],%[r2],0,%[m4]"
1375 : [r1
] "a" (sto
), [r2
] "a" ((addr
& HPAGE_MASK
)),
1379 /* flush with guest asce */
1381 " .insn rrf,0xb98e0000,%[r1],%[r2],%[r3],%[m4]"
1383 : [r1
] "a" (sto
), [r2
] "a" ((addr
& HPAGE_MASK
) | opt
),
1384 [r3
] "a" (asce
), [m4
] "i" (local
)
1389 static inline void __pudp_idte(unsigned long addr
, pud_t
*pudp
,
1390 unsigned long opt
, unsigned long asce
,
1395 r3o
= (unsigned long) pudp
- pud_index(addr
) * sizeof(pud_t
);
1396 r3o
|= _ASCE_TYPE_REGION3
;
1397 if (__builtin_constant_p(opt
) && opt
== 0) {
1398 /* flush without guest asce */
1400 " .insn rrf,0xb98e0000,%[r1],%[r2],0,%[m4]"
1402 : [r1
] "a" (r3o
), [r2
] "a" ((addr
& PUD_MASK
)),
1406 /* flush with guest asce */
1408 " .insn rrf,0xb98e0000,%[r1],%[r2],%[r3],%[m4]"
1410 : [r1
] "a" (r3o
), [r2
] "a" ((addr
& PUD_MASK
) | opt
),
1411 [r3
] "a" (asce
), [m4
] "i" (local
)
1416 pmd_t
pmdp_xchg_direct(struct mm_struct
*, unsigned long, pmd_t
*, pmd_t
);
1417 pmd_t
pmdp_xchg_lazy(struct mm_struct
*, unsigned long, pmd_t
*, pmd_t
);
1418 pud_t
pudp_xchg_direct(struct mm_struct
*, unsigned long, pud_t
*, pud_t
);
1420 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
1422 #define __HAVE_ARCH_PGTABLE_DEPOSIT
1423 void pgtable_trans_huge_deposit(struct mm_struct
*mm
, pmd_t
*pmdp
,
1426 #define __HAVE_ARCH_PGTABLE_WITHDRAW
1427 pgtable_t
pgtable_trans_huge_withdraw(struct mm_struct
*mm
, pmd_t
*pmdp
);
1429 #define __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS
1430 static inline int pmdp_set_access_flags(struct vm_area_struct
*vma
,
1431 unsigned long addr
, pmd_t
*pmdp
,
1432 pmd_t entry
, int dirty
)
1434 VM_BUG_ON(addr
& ~HPAGE_MASK
);
1436 entry
= pmd_mkyoung(entry
);
1438 entry
= pmd_mkdirty(entry
);
1439 if (pmd_val(*pmdp
) == pmd_val(entry
))
1441 pmdp_xchg_direct(vma
->vm_mm
, addr
, pmdp
, entry
);
1445 #define __HAVE_ARCH_PMDP_TEST_AND_CLEAR_YOUNG
1446 static inline int pmdp_test_and_clear_young(struct vm_area_struct
*vma
,
1447 unsigned long addr
, pmd_t
*pmdp
)
1451 pmd
= pmdp_xchg_direct(vma
->vm_mm
, addr
, pmdp
, pmd_mkold(pmd
));
1452 return pmd_young(pmd
);
1455 #define __HAVE_ARCH_PMDP_CLEAR_YOUNG_FLUSH
1456 static inline int pmdp_clear_flush_young(struct vm_area_struct
*vma
,
1457 unsigned long addr
, pmd_t
*pmdp
)
1459 VM_BUG_ON(addr
& ~HPAGE_MASK
);
1460 return pmdp_test_and_clear_young(vma
, addr
, pmdp
);
1463 static inline void set_pmd_at(struct mm_struct
*mm
, unsigned long addr
,
1464 pmd_t
*pmdp
, pmd_t entry
)
1466 if (!MACHINE_HAS_NX
)
1467 pmd_val(entry
) &= ~_SEGMENT_ENTRY_NOEXEC
;
1471 static inline pmd_t
pmd_mkhuge(pmd_t pmd
)
1473 pmd_val(pmd
) |= _SEGMENT_ENTRY_LARGE
;
1474 pmd_val(pmd
) |= _SEGMENT_ENTRY_YOUNG
;
1475 pmd_val(pmd
) |= _SEGMENT_ENTRY_PROTECT
;
1479 #define __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR
1480 static inline pmd_t
pmdp_huge_get_and_clear(struct mm_struct
*mm
,
1481 unsigned long addr
, pmd_t
*pmdp
)
1483 return pmdp_xchg_direct(mm
, addr
, pmdp
, __pmd(_SEGMENT_ENTRY_EMPTY
));
1486 #define __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR_FULL
1487 static inline pmd_t
pmdp_huge_get_and_clear_full(struct mm_struct
*mm
,
1489 pmd_t
*pmdp
, int full
)
1493 *pmdp
= __pmd(_SEGMENT_ENTRY_EMPTY
);
1496 return pmdp_xchg_lazy(mm
, addr
, pmdp
, __pmd(_SEGMENT_ENTRY_EMPTY
));
1499 #define __HAVE_ARCH_PMDP_HUGE_CLEAR_FLUSH
1500 static inline pmd_t
pmdp_huge_clear_flush(struct vm_area_struct
*vma
,
1501 unsigned long addr
, pmd_t
*pmdp
)
1503 return pmdp_huge_get_and_clear(vma
->vm_mm
, addr
, pmdp
);
1506 #define __HAVE_ARCH_PMDP_INVALIDATE
1507 static inline void pmdp_invalidate(struct vm_area_struct
*vma
,
1508 unsigned long addr
, pmd_t
*pmdp
)
1510 pmdp_xchg_direct(vma
->vm_mm
, addr
, pmdp
, __pmd(_SEGMENT_ENTRY_EMPTY
));
1513 #define __HAVE_ARCH_PMDP_SET_WRPROTECT
1514 static inline void pmdp_set_wrprotect(struct mm_struct
*mm
,
1515 unsigned long addr
, pmd_t
*pmdp
)
1520 pmd
= pmdp_xchg_lazy(mm
, addr
, pmdp
, pmd_wrprotect(pmd
));
1523 static inline pmd_t
pmdp_collapse_flush(struct vm_area_struct
*vma
,
1524 unsigned long address
,
1527 return pmdp_huge_get_and_clear(vma
->vm_mm
, address
, pmdp
);
1529 #define pmdp_collapse_flush pmdp_collapse_flush
1531 #define pfn_pmd(pfn, pgprot) mk_pmd_phys(__pa((pfn) << PAGE_SHIFT), (pgprot))
1532 #define mk_pmd(page, pgprot) pfn_pmd(page_to_pfn(page), (pgprot))
1534 static inline int pmd_trans_huge(pmd_t pmd
)
1536 return pmd_val(pmd
) & _SEGMENT_ENTRY_LARGE
;
1539 #define has_transparent_hugepage has_transparent_hugepage
1540 static inline int has_transparent_hugepage(void)
1542 return MACHINE_HAS_EDAT1
? 1 : 0;
1544 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
1547 * 64 bit swap entry format:
1548 * A page-table entry has some bits we have to treat in a special way.
1549 * Bits 52 and bit 55 have to be zero, otherwise a specification
1550 * exception will occur instead of a page translation exception. The
1551 * specification exception has the bad habit not to store necessary
1552 * information in the lowcore.
1553 * Bits 54 and 63 are used to indicate the page type.
1554 * A swap pte is indicated by bit pattern (pte & 0x201) == 0x200
1555 * This leaves the bits 0-51 and bits 56-62 to store type and offset.
1556 * We use the 5 bits from 57-61 for the type and the 52 bits from 0-51
1558 * | offset |01100|type |00|
1559 * |0000000000111111111122222222223333333333444444444455|55555|55566|66|
1560 * |0123456789012345678901234567890123456789012345678901|23456|78901|23|
1563 #define __SWP_OFFSET_MASK ((1UL << 52) - 1)
1564 #define __SWP_OFFSET_SHIFT 12
1565 #define __SWP_TYPE_MASK ((1UL << 5) - 1)
1566 #define __SWP_TYPE_SHIFT 2
1568 static inline pte_t
mk_swap_pte(unsigned long type
, unsigned long offset
)
1572 pte_val(pte
) = _PAGE_INVALID
| _PAGE_PROTECT
;
1573 pte_val(pte
) |= (offset
& __SWP_OFFSET_MASK
) << __SWP_OFFSET_SHIFT
;
1574 pte_val(pte
) |= (type
& __SWP_TYPE_MASK
) << __SWP_TYPE_SHIFT
;
1578 static inline unsigned long __swp_type(swp_entry_t entry
)
1580 return (entry
.val
>> __SWP_TYPE_SHIFT
) & __SWP_TYPE_MASK
;
1583 static inline unsigned long __swp_offset(swp_entry_t entry
)
1585 return (entry
.val
>> __SWP_OFFSET_SHIFT
) & __SWP_OFFSET_MASK
;
1588 static inline swp_entry_t
__swp_entry(unsigned long type
, unsigned long offset
)
1590 return (swp_entry_t
) { pte_val(mk_swap_pte(type
, offset
)) };
1593 #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
1594 #define __swp_entry_to_pte(x) ((pte_t) { (x).val })
1596 #define kern_addr_valid(addr) (1)
1598 extern int vmem_add_mapping(unsigned long start
, unsigned long size
);
1599 extern int vmem_remove_mapping(unsigned long start
, unsigned long size
);
1600 extern int s390_enable_sie(void);
1601 extern int s390_enable_skey(void);
1602 extern void s390_reset_cmma(struct mm_struct
*mm
);
1604 /* s390 has a private copy of get unmapped area to deal with cache synonyms */
1605 #define HAVE_ARCH_UNMAPPED_AREA
1606 #define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
1609 * No page table caches to initialise
1611 static inline void pgtable_cache_init(void) { }
1612 static inline void check_pgt_cache(void) { }
1614 #include <asm-generic/pgtable.h>
1616 #endif /* _S390_PAGE_H */