3 * Copyright IBM Corp. 1999, 2000
4 * Author(s): Hartmut Penner (hp@de.ibm.com)
5 * Ulrich Weigand (weigand@de.ibm.com)
6 * Martin Schwidefsky (schwidefsky@de.ibm.com)
8 * Derived from "include/asm-i386/pgtable.h"
11 #ifndef _ASM_S390_PGTABLE_H
12 #define _ASM_S390_PGTABLE_H
15 * The Linux memory management assumes a three-level page table setup.
16 * For s390 64 bit we use up to four of the five levels the hardware
17 * provides (region first tables are not used).
19 * The "pgd_xxx()" functions are trivial for a folded two-level
20 * setup: the pgd is never bad, and a pmd always exists (as it's folded
23 * This file contains the functions and defines necessary to modify and use
24 * the S390 page table tree.
27 #include <linux/sched.h>
28 #include <linux/mm_types.h>
29 #include <linux/page-flags.h>
30 #include <linux/radix-tree.h>
31 #include <linux/atomic.h>
35 extern pgd_t swapper_pg_dir
[];
36 extern void paging_init(void);
37 extern void vmem_map_init(void);
38 pmd_t
*vmem_pmd_alloc(void);
39 pte_t
*vmem_pte_alloc(void);
48 extern atomic_long_t direct_pages_count
[PG_DIRECT_MAP_MAX
];
50 static inline void update_page_count(int level
, long count
)
52 if (IS_ENABLED(CONFIG_PROC_FS
))
53 atomic_long_add(count
, &direct_pages_count
[level
]);
57 void arch_report_meminfo(struct seq_file
*m
);
60 * The S390 doesn't have any external MMU info: the kernel page
61 * tables contain all the necessary information.
63 #define update_mmu_cache(vma, address, ptep) do { } while (0)
64 #define update_mmu_cache_pmd(vma, address, ptep) do { } while (0)
67 * ZERO_PAGE is a global shared page that is always zero; used
68 * for zero-mapped memory areas etc..
71 extern unsigned long empty_zero_page
;
72 extern unsigned long zero_page_mask
;
74 #define ZERO_PAGE(vaddr) \
75 (virt_to_page((void *)(empty_zero_page + \
76 (((unsigned long)(vaddr)) &zero_page_mask))))
77 #define __HAVE_COLOR_ZERO_PAGE
79 /* TODO: s390 cannot support io_remap_pfn_range... */
80 #endif /* !__ASSEMBLY__ */
83 * PMD_SHIFT determines the size of the area a second-level page
85 * PGDIR_SHIFT determines what a third-level page table entry can map
90 #define PGDIR_SHIFT 53
92 #define PMD_SIZE (1UL << PMD_SHIFT)
93 #define PMD_MASK (~(PMD_SIZE-1))
94 #define PUD_SIZE (1UL << PUD_SHIFT)
95 #define PUD_MASK (~(PUD_SIZE-1))
96 #define P4D_SIZE (1UL << P4D_SHIFT)
97 #define P4D_MASK (~(P4D_SIZE-1))
98 #define PGDIR_SIZE (1UL << PGDIR_SHIFT)
99 #define PGDIR_MASK (~(PGDIR_SIZE-1))
102 * entries per page directory level: the S390 is two-level, so
103 * we don't really have any PMD directory physically.
104 * for S390 segment-table entries are combined to one PGD
105 * that leads to 1024 pte per pgd
107 #define PTRS_PER_PTE 256
108 #define PTRS_PER_PMD 2048
109 #define PTRS_PER_PUD 2048
110 #define PTRS_PER_P4D 2048
111 #define PTRS_PER_PGD 2048
113 #define FIRST_USER_ADDRESS 0UL
115 #define pte_ERROR(e) \
116 printk("%s:%d: bad pte %p.\n", __FILE__, __LINE__, (void *) pte_val(e))
117 #define pmd_ERROR(e) \
118 printk("%s:%d: bad pmd %p.\n", __FILE__, __LINE__, (void *) pmd_val(e))
119 #define pud_ERROR(e) \
120 printk("%s:%d: bad pud %p.\n", __FILE__, __LINE__, (void *) pud_val(e))
121 #define p4d_ERROR(e) \
122 printk("%s:%d: bad p4d %p.\n", __FILE__, __LINE__, (void *) p4d_val(e))
123 #define pgd_ERROR(e) \
124 printk("%s:%d: bad pgd %p.\n", __FILE__, __LINE__, (void *) pgd_val(e))
128 * The vmalloc and module area will always be on the topmost area of the
129 * kernel mapping. We reserve 128GB (64bit) for vmalloc and modules.
130 * On 64 bit kernels we have a 2GB area at the top of the vmalloc area where
131 * modules will reside. That makes sure that inter module branches always
132 * happen without trampolines and in addition the placement within a 2GB frame
133 * is branch prediction unit friendly.
135 extern unsigned long VMALLOC_START
;
136 extern unsigned long VMALLOC_END
;
137 extern struct page
*vmemmap
;
139 #define VMEM_MAX_PHYS ((unsigned long) vmemmap)
141 extern unsigned long MODULES_VADDR
;
142 extern unsigned long MODULES_END
;
143 #define MODULES_VADDR MODULES_VADDR
144 #define MODULES_END MODULES_END
145 #define MODULES_LEN (1UL << 31)
147 static inline int is_module_addr(void *addr
)
149 BUILD_BUG_ON(MODULES_LEN
> (1UL << 31));
150 if (addr
< (void *)MODULES_VADDR
)
152 if (addr
> (void *)MODULES_END
)
158 * A 64 bit pagetable entry of S390 has following format:
160 * 0000000000111111111122222222223333333333444444444455555555556666
161 * 0123456789012345678901234567890123456789012345678901234567890123
163 * I Page-Invalid Bit: Page is not available for address-translation
164 * P Page-Protection Bit: Store access not possible for page
165 * C Change-bit override: HW is not required to set change bit
167 * A 64 bit segmenttable entry of S390 has following format:
168 * | P-table origin | TT
169 * 0000000000111111111122222222223333333333444444444455555555556666
170 * 0123456789012345678901234567890123456789012345678901234567890123
172 * I Segment-Invalid Bit: Segment is not available for address-translation
173 * C Common-Segment Bit: Segment is not private (PoP 3-30)
174 * P Page-Protection Bit: Store access not possible for page
177 * A 64 bit region table entry of S390 has following format:
178 * | S-table origin | TF TTTL
179 * 0000000000111111111122222222223333333333444444444455555555556666
180 * 0123456789012345678901234567890123456789012345678901234567890123
182 * I Segment-Invalid Bit: Segment is not available for address-translation
187 * The 64 bit regiontable origin of S390 has following format:
188 * | region table origon | DTTL
189 * 0000000000111111111122222222223333333333444444444455555555556666
190 * 0123456789012345678901234567890123456789012345678901234567890123
192 * X Space-Switch event:
193 * G Segment-Invalid Bit:
194 * P Private-Space Bit:
195 * S Storage-Alteration:
199 * A storage key has the following format:
203 * F : fetch protection bit
208 /* Hardware bits in the page table entry */
209 #define _PAGE_NOEXEC 0x100 /* HW no-execute bit */
210 #define _PAGE_PROTECT 0x200 /* HW read-only bit */
211 #define _PAGE_INVALID 0x400 /* HW invalid bit */
212 #define _PAGE_LARGE 0x800 /* Bit to mark a large pte */
214 /* Software bits in the page table entry */
215 #define _PAGE_PRESENT 0x001 /* SW pte present bit */
216 #define _PAGE_YOUNG 0x004 /* SW pte young bit */
217 #define _PAGE_DIRTY 0x008 /* SW pte dirty bit */
218 #define _PAGE_READ 0x010 /* SW pte read bit */
219 #define _PAGE_WRITE 0x020 /* SW pte write bit */
220 #define _PAGE_SPECIAL 0x040 /* SW associated with special page */
221 #define _PAGE_UNUSED 0x080 /* SW bit for pgste usage state */
222 #define __HAVE_ARCH_PTE_SPECIAL
224 #ifdef CONFIG_MEM_SOFT_DIRTY
225 #define _PAGE_SOFT_DIRTY 0x002 /* SW pte soft dirty bit */
227 #define _PAGE_SOFT_DIRTY 0x000
230 /* Set of bits not changed in pte_modify */
231 #define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_SPECIAL | _PAGE_DIRTY | \
232 _PAGE_YOUNG | _PAGE_SOFT_DIRTY)
235 * handle_pte_fault uses pte_present and pte_none to find out the pte type
236 * WITHOUT holding the page table lock. The _PAGE_PRESENT bit is used to
237 * distinguish present from not-present ptes. It is changed only with the page
240 * The following table gives the different possible bit combinations for
241 * the pte hardware and software bits in the last 12 bits of a pte
242 * (. unassigned bit, x don't care, t swap type):
250 * prot-none, clean, old .11.xx0000.1
251 * prot-none, clean, young .11.xx0001.1
252 * prot-none, dirty, old .11.xx0010.1
253 * prot-none, dirty, young .11.xx0011.1
254 * read-only, clean, old .11.xx0100.1
255 * read-only, clean, young .01.xx0101.1
256 * read-only, dirty, old .11.xx0110.1
257 * read-only, dirty, young .01.xx0111.1
258 * read-write, clean, old .11.xx1100.1
259 * read-write, clean, young .01.xx1101.1
260 * read-write, dirty, old .10.xx1110.1
261 * read-write, dirty, young .00.xx1111.1
262 * HW-bits: R read-only, I invalid
263 * SW-bits: p present, y young, d dirty, r read, w write, s special,
266 * pte_none is true for the bit pattern .10.00000000, pte == 0x400
267 * pte_swap is true for the bit pattern .11..ooooo.0, (pte & 0x201) == 0x200
268 * pte_present is true for the bit pattern .xx.xxxxxx.1, (pte & 0x001) == 0x001
271 /* Bits in the segment/region table address-space-control-element */
272 #define _ASCE_ORIGIN ~0xfffUL/* segment table origin */
273 #define _ASCE_PRIVATE_SPACE 0x100 /* private space control */
274 #define _ASCE_ALT_EVENT 0x80 /* storage alteration event control */
275 #define _ASCE_SPACE_SWITCH 0x40 /* space switch event */
276 #define _ASCE_REAL_SPACE 0x20 /* real space control */
277 #define _ASCE_TYPE_MASK 0x0c /* asce table type mask */
278 #define _ASCE_TYPE_REGION1 0x0c /* region first table type */
279 #define _ASCE_TYPE_REGION2 0x08 /* region second table type */
280 #define _ASCE_TYPE_REGION3 0x04 /* region third table type */
281 #define _ASCE_TYPE_SEGMENT 0x00 /* segment table type */
282 #define _ASCE_TABLE_LENGTH 0x03 /* region table length */
284 /* Bits in the region table entry */
285 #define _REGION_ENTRY_ORIGIN ~0xfffUL/* region/segment table origin */
286 #define _REGION_ENTRY_PROTECT 0x200 /* region protection bit */
287 #define _REGION_ENTRY_NOEXEC 0x100 /* region no-execute bit */
288 #define _REGION_ENTRY_OFFSET 0xc0 /* region table offset */
289 #define _REGION_ENTRY_INVALID 0x20 /* invalid region table entry */
290 #define _REGION_ENTRY_TYPE_MASK 0x0c /* region/segment table type mask */
291 #define _REGION_ENTRY_TYPE_R1 0x0c /* region first table type */
292 #define _REGION_ENTRY_TYPE_R2 0x08 /* region second table type */
293 #define _REGION_ENTRY_TYPE_R3 0x04 /* region third table type */
294 #define _REGION_ENTRY_LENGTH 0x03 /* region third length */
296 #define _REGION1_ENTRY (_REGION_ENTRY_TYPE_R1 | _REGION_ENTRY_LENGTH)
297 #define _REGION1_ENTRY_EMPTY (_REGION_ENTRY_TYPE_R1 | _REGION_ENTRY_INVALID)
298 #define _REGION2_ENTRY (_REGION_ENTRY_TYPE_R2 | _REGION_ENTRY_LENGTH)
299 #define _REGION2_ENTRY_EMPTY (_REGION_ENTRY_TYPE_R2 | _REGION_ENTRY_INVALID)
300 #define _REGION3_ENTRY (_REGION_ENTRY_TYPE_R3 | _REGION_ENTRY_LENGTH)
301 #define _REGION3_ENTRY_EMPTY (_REGION_ENTRY_TYPE_R3 | _REGION_ENTRY_INVALID)
303 #define _REGION3_ENTRY_ORIGIN_LARGE ~0x7fffffffUL /* large page address */
304 #define _REGION3_ENTRY_DIRTY 0x2000 /* SW region dirty bit */
305 #define _REGION3_ENTRY_YOUNG 0x1000 /* SW region young bit */
306 #define _REGION3_ENTRY_LARGE 0x0400 /* RTTE-format control, large page */
307 #define _REGION3_ENTRY_READ 0x0002 /* SW region read bit */
308 #define _REGION3_ENTRY_WRITE 0x0001 /* SW region write bit */
310 #ifdef CONFIG_MEM_SOFT_DIRTY
311 #define _REGION3_ENTRY_SOFT_DIRTY 0x4000 /* SW region soft dirty bit */
313 #define _REGION3_ENTRY_SOFT_DIRTY 0x0000 /* SW region soft dirty bit */
316 #define _REGION_ENTRY_BITS 0xfffffffffffff22fUL
317 #define _REGION_ENTRY_BITS_LARGE 0xffffffff8000fe2fUL
319 /* Bits in the segment table entry */
320 #define _SEGMENT_ENTRY_BITS 0xfffffffffffffe33UL
321 #define _SEGMENT_ENTRY_BITS_LARGE 0xfffffffffff0ff33UL
322 #define _SEGMENT_ENTRY_ORIGIN_LARGE ~0xfffffUL /* large page address */
323 #define _SEGMENT_ENTRY_ORIGIN ~0x7ffUL/* segment table origin */
324 #define _SEGMENT_ENTRY_PROTECT 0x200 /* page protection bit */
325 #define _SEGMENT_ENTRY_NOEXEC 0x100 /* region no-execute bit */
326 #define _SEGMENT_ENTRY_INVALID 0x20 /* invalid segment table entry */
328 #define _SEGMENT_ENTRY (0)
329 #define _SEGMENT_ENTRY_EMPTY (_SEGMENT_ENTRY_INVALID)
331 #define _SEGMENT_ENTRY_DIRTY 0x2000 /* SW segment dirty bit */
332 #define _SEGMENT_ENTRY_YOUNG 0x1000 /* SW segment young bit */
333 #define _SEGMENT_ENTRY_LARGE 0x0400 /* STE-format control, large page */
334 #define _SEGMENT_ENTRY_WRITE 0x0002 /* SW segment write bit */
335 #define _SEGMENT_ENTRY_READ 0x0001 /* SW segment read bit */
337 #ifdef CONFIG_MEM_SOFT_DIRTY
338 #define _SEGMENT_ENTRY_SOFT_DIRTY 0x4000 /* SW segment soft dirty bit */
340 #define _SEGMENT_ENTRY_SOFT_DIRTY 0x0000 /* SW segment soft dirty bit */
344 * Segment table and region3 table entry encoding
345 * (R = read-only, I = invalid, y = young bit):
347 * prot-none, clean, old 00..1...1...00
348 * prot-none, clean, young 01..1...1...00
349 * prot-none, dirty, old 10..1...1...00
350 * prot-none, dirty, young 11..1...1...00
351 * read-only, clean, old 00..1...1...01
352 * read-only, clean, young 01..1...0...01
353 * read-only, dirty, old 10..1...1...01
354 * read-only, dirty, young 11..1...0...01
355 * read-write, clean, old 00..1...1...11
356 * read-write, clean, young 01..1...0...11
357 * read-write, dirty, old 10..0...1...11
358 * read-write, dirty, young 11..0...0...11
359 * The segment table origin is used to distinguish empty (origin==0) from
360 * read-write, old segment table entries (origin!=0)
361 * HW-bits: R read-only, I invalid
362 * SW-bits: y young, d dirty, r read, w write
365 /* Page status table bits for virtualization */
366 #define PGSTE_ACC_BITS 0xf000000000000000UL
367 #define PGSTE_FP_BIT 0x0800000000000000UL
368 #define PGSTE_PCL_BIT 0x0080000000000000UL
369 #define PGSTE_HR_BIT 0x0040000000000000UL
370 #define PGSTE_HC_BIT 0x0020000000000000UL
371 #define PGSTE_GR_BIT 0x0004000000000000UL
372 #define PGSTE_GC_BIT 0x0002000000000000UL
373 #define PGSTE_UC_BIT 0x0000800000000000UL /* user dirty (migration) */
374 #define PGSTE_IN_BIT 0x0000400000000000UL /* IPTE notify bit */
375 #define PGSTE_VSIE_BIT 0x0000200000000000UL /* ref'd in a shadow table */
377 /* Guest Page State used for virtualization */
378 #define _PGSTE_GPS_ZERO 0x0000000080000000UL
379 #define _PGSTE_GPS_USAGE_MASK 0x0000000003000000UL
380 #define _PGSTE_GPS_USAGE_STABLE 0x0000000000000000UL
381 #define _PGSTE_GPS_USAGE_UNUSED 0x0000000001000000UL
382 #define _PGSTE_GPS_USAGE_POT_VOLATILE 0x0000000002000000UL
383 #define _PGSTE_GPS_USAGE_VOLATILE _PGSTE_GPS_USAGE_MASK
386 * A user page table pointer has the space-switch-event bit, the
387 * private-space-control bit and the storage-alteration-event-control
388 * bit set. A kernel page table pointer doesn't need them.
390 #define _ASCE_USER_BITS (_ASCE_SPACE_SWITCH | _ASCE_PRIVATE_SPACE | \
394 * Page protection definitions.
396 #define PAGE_NONE __pgprot(_PAGE_PRESENT | _PAGE_INVALID | _PAGE_PROTECT)
397 #define PAGE_RO __pgprot(_PAGE_PRESENT | _PAGE_READ | \
398 _PAGE_NOEXEC | _PAGE_INVALID | _PAGE_PROTECT)
399 #define PAGE_RX __pgprot(_PAGE_PRESENT | _PAGE_READ | \
400 _PAGE_INVALID | _PAGE_PROTECT)
401 #define PAGE_RW __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \
402 _PAGE_NOEXEC | _PAGE_INVALID | _PAGE_PROTECT)
403 #define PAGE_RWX __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \
404 _PAGE_INVALID | _PAGE_PROTECT)
406 #define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \
407 _PAGE_YOUNG | _PAGE_DIRTY | _PAGE_NOEXEC)
408 #define PAGE_KERNEL __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \
409 _PAGE_YOUNG | _PAGE_DIRTY | _PAGE_NOEXEC)
410 #define PAGE_KERNEL_RO __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_YOUNG | \
411 _PAGE_PROTECT | _PAGE_NOEXEC)
412 #define PAGE_KERNEL_EXEC __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \
413 _PAGE_YOUNG | _PAGE_DIRTY)
416 * On s390 the page table entry has an invalid bit and a read-only bit.
417 * Read permission implies execute permission and write permission
418 * implies read permission.
421 #define __P000 PAGE_NONE
422 #define __P001 PAGE_RO
423 #define __P010 PAGE_RO
424 #define __P011 PAGE_RO
425 #define __P100 PAGE_RX
426 #define __P101 PAGE_RX
427 #define __P110 PAGE_RX
428 #define __P111 PAGE_RX
430 #define __S000 PAGE_NONE
431 #define __S001 PAGE_RO
432 #define __S010 PAGE_RW
433 #define __S011 PAGE_RW
434 #define __S100 PAGE_RX
435 #define __S101 PAGE_RX
436 #define __S110 PAGE_RWX
437 #define __S111 PAGE_RWX
440 * Segment entry (large page) protection definitions.
442 #define SEGMENT_NONE __pgprot(_SEGMENT_ENTRY_INVALID | \
443 _SEGMENT_ENTRY_PROTECT)
444 #define SEGMENT_RO __pgprot(_SEGMENT_ENTRY_PROTECT | \
445 _SEGMENT_ENTRY_READ | \
446 _SEGMENT_ENTRY_NOEXEC)
447 #define SEGMENT_RX __pgprot(_SEGMENT_ENTRY_PROTECT | \
449 #define SEGMENT_RW __pgprot(_SEGMENT_ENTRY_READ | \
450 _SEGMENT_ENTRY_WRITE | \
451 _SEGMENT_ENTRY_NOEXEC)
452 #define SEGMENT_RWX __pgprot(_SEGMENT_ENTRY_READ | \
453 _SEGMENT_ENTRY_WRITE)
454 #define SEGMENT_KERNEL __pgprot(_SEGMENT_ENTRY | \
455 _SEGMENT_ENTRY_LARGE | \
456 _SEGMENT_ENTRY_READ | \
457 _SEGMENT_ENTRY_WRITE | \
458 _SEGMENT_ENTRY_YOUNG | \
459 _SEGMENT_ENTRY_DIRTY | \
460 _SEGMENT_ENTRY_NOEXEC)
461 #define SEGMENT_KERNEL_RO __pgprot(_SEGMENT_ENTRY | \
462 _SEGMENT_ENTRY_LARGE | \
463 _SEGMENT_ENTRY_READ | \
464 _SEGMENT_ENTRY_YOUNG | \
465 _SEGMENT_ENTRY_PROTECT | \
466 _SEGMENT_ENTRY_NOEXEC)
469 * Region3 entry (large page) protection definitions.
472 #define REGION3_KERNEL __pgprot(_REGION_ENTRY_TYPE_R3 | \
473 _REGION3_ENTRY_LARGE | \
474 _REGION3_ENTRY_READ | \
475 _REGION3_ENTRY_WRITE | \
476 _REGION3_ENTRY_YOUNG | \
477 _REGION3_ENTRY_DIRTY | \
478 _REGION_ENTRY_NOEXEC)
479 #define REGION3_KERNEL_RO __pgprot(_REGION_ENTRY_TYPE_R3 | \
480 _REGION3_ENTRY_LARGE | \
481 _REGION3_ENTRY_READ | \
482 _REGION3_ENTRY_YOUNG | \
483 _REGION_ENTRY_PROTECT | \
484 _REGION_ENTRY_NOEXEC)
486 static inline int mm_has_pgste(struct mm_struct
*mm
)
489 if (unlikely(mm
->context
.has_pgste
))
495 static inline int mm_alloc_pgste(struct mm_struct
*mm
)
498 if (unlikely(mm
->context
.alloc_pgste
))
505 * In the case that a guest uses storage keys
506 * faults should no longer be backed by zero pages
508 #define mm_forbids_zeropage mm_has_pgste
509 static inline int mm_use_skey(struct mm_struct
*mm
)
512 if (mm
->context
.use_skey
)
518 static inline void csp(unsigned int *ptr
, unsigned int old
, unsigned int new)
520 register unsigned long reg2
asm("2") = old
;
521 register unsigned long reg3
asm("3") = new;
522 unsigned long address
= (unsigned long)ptr
| 1;
526 : "+d" (reg2
), "+m" (*ptr
)
527 : "d" (reg3
), "d" (address
)
531 static inline void cspg(unsigned long *ptr
, unsigned long old
, unsigned long new)
533 register unsigned long reg2
asm("2") = old
;
534 register unsigned long reg3
asm("3") = new;
535 unsigned long address
= (unsigned long)ptr
| 1;
538 " .insn rre,0xb98a0000,%0,%3"
539 : "+d" (reg2
), "+m" (*ptr
)
540 : "d" (reg3
), "d" (address
)
544 #define CRDTE_DTT_PAGE 0x00UL
545 #define CRDTE_DTT_SEGMENT 0x10UL
546 #define CRDTE_DTT_REGION3 0x14UL
547 #define CRDTE_DTT_REGION2 0x18UL
548 #define CRDTE_DTT_REGION1 0x1cUL
550 static inline void crdte(unsigned long old
, unsigned long new,
551 unsigned long table
, unsigned long dtt
,
552 unsigned long address
, unsigned long asce
)
554 register unsigned long reg2
asm("2") = old
;
555 register unsigned long reg3
asm("3") = new;
556 register unsigned long reg4
asm("4") = table
| dtt
;
557 register unsigned long reg5
asm("5") = address
;
559 asm volatile(".insn rrf,0xb98f0000,%0,%2,%4,0"
561 : "d" (reg3
), "d" (reg4
), "d" (reg5
), "a" (asce
)
566 * pgd/p4d/pud/pmd/pte query functions
568 static inline int pgd_folded(pgd_t pgd
)
570 return (pgd_val(pgd
) & _REGION_ENTRY_TYPE_MASK
) < _REGION_ENTRY_TYPE_R1
;
573 static inline int pgd_present(pgd_t pgd
)
577 return (pgd_val(pgd
) & _REGION_ENTRY_ORIGIN
) != 0UL;
580 static inline int pgd_none(pgd_t pgd
)
584 return (pgd_val(pgd
) & _REGION_ENTRY_INVALID
) != 0UL;
587 static inline int pgd_bad(pgd_t pgd
)
590 * With dynamic page table levels the pgd can be a region table
591 * entry or a segment table entry. Check for the bit that are
592 * invalid for either table entry.
595 ~_SEGMENT_ENTRY_ORIGIN
& ~_REGION_ENTRY_INVALID
&
596 ~_REGION_ENTRY_TYPE_MASK
& ~_REGION_ENTRY_LENGTH
;
597 return (pgd_val(pgd
) & mask
) != 0;
600 static inline int p4d_folded(p4d_t p4d
)
602 return (p4d_val(p4d
) & _REGION_ENTRY_TYPE_MASK
) < _REGION_ENTRY_TYPE_R2
;
605 static inline int p4d_present(p4d_t p4d
)
609 return (p4d_val(p4d
) & _REGION_ENTRY_ORIGIN
) != 0UL;
612 static inline int p4d_none(p4d_t p4d
)
616 return p4d_val(p4d
) == _REGION2_ENTRY_EMPTY
;
619 static inline unsigned long p4d_pfn(p4d_t p4d
)
621 unsigned long origin_mask
;
623 origin_mask
= _REGION_ENTRY_ORIGIN
;
624 return (p4d_val(p4d
) & origin_mask
) >> PAGE_SHIFT
;
627 static inline int pud_folded(pud_t pud
)
629 return (pud_val(pud
) & _REGION_ENTRY_TYPE_MASK
) < _REGION_ENTRY_TYPE_R3
;
632 static inline int pud_present(pud_t pud
)
636 return (pud_val(pud
) & _REGION_ENTRY_ORIGIN
) != 0UL;
639 static inline int pud_none(pud_t pud
)
643 return pud_val(pud
) == _REGION3_ENTRY_EMPTY
;
646 static inline int pud_large(pud_t pud
)
648 if ((pud_val(pud
) & _REGION_ENTRY_TYPE_MASK
) != _REGION_ENTRY_TYPE_R3
)
650 return !!(pud_val(pud
) & _REGION3_ENTRY_LARGE
);
653 static inline unsigned long pud_pfn(pud_t pud
)
655 unsigned long origin_mask
;
657 origin_mask
= _REGION_ENTRY_ORIGIN
;
659 origin_mask
= _REGION3_ENTRY_ORIGIN_LARGE
;
660 return (pud_val(pud
) & origin_mask
) >> PAGE_SHIFT
;
663 static inline int pmd_large(pmd_t pmd
)
665 return (pmd_val(pmd
) & _SEGMENT_ENTRY_LARGE
) != 0;
668 static inline int pmd_bad(pmd_t pmd
)
671 return (pmd_val(pmd
) & ~_SEGMENT_ENTRY_BITS_LARGE
) != 0;
672 return (pmd_val(pmd
) & ~_SEGMENT_ENTRY_BITS
) != 0;
675 static inline int pud_bad(pud_t pud
)
677 if ((pud_val(pud
) & _REGION_ENTRY_TYPE_MASK
) < _REGION_ENTRY_TYPE_R3
)
678 return pmd_bad(__pmd(pud_val(pud
)));
680 return (pud_val(pud
) & ~_REGION_ENTRY_BITS_LARGE
) != 0;
681 return (pud_val(pud
) & ~_REGION_ENTRY_BITS
) != 0;
684 static inline int p4d_bad(p4d_t p4d
)
686 if ((p4d_val(p4d
) & _REGION_ENTRY_TYPE_MASK
) < _REGION_ENTRY_TYPE_R2
)
687 return pud_bad(__pud(p4d_val(p4d
)));
688 return (p4d_val(p4d
) & ~_REGION_ENTRY_BITS
) != 0;
691 static inline int pmd_present(pmd_t pmd
)
693 return pmd_val(pmd
) != _SEGMENT_ENTRY_EMPTY
;
696 static inline int pmd_none(pmd_t pmd
)
698 return pmd_val(pmd
) == _SEGMENT_ENTRY_EMPTY
;
701 static inline unsigned long pmd_pfn(pmd_t pmd
)
703 unsigned long origin_mask
;
705 origin_mask
= _SEGMENT_ENTRY_ORIGIN
;
707 origin_mask
= _SEGMENT_ENTRY_ORIGIN_LARGE
;
708 return (pmd_val(pmd
) & origin_mask
) >> PAGE_SHIFT
;
711 #define __HAVE_ARCH_PMD_WRITE
712 static inline int pmd_write(pmd_t pmd
)
714 return (pmd_val(pmd
) & _SEGMENT_ENTRY_WRITE
) != 0;
717 static inline int pmd_dirty(pmd_t pmd
)
721 dirty
= (pmd_val(pmd
) & _SEGMENT_ENTRY_DIRTY
) != 0;
725 static inline int pmd_young(pmd_t pmd
)
729 young
= (pmd_val(pmd
) & _SEGMENT_ENTRY_YOUNG
) != 0;
733 static inline int pte_present(pte_t pte
)
735 /* Bit pattern: (pte & 0x001) == 0x001 */
736 return (pte_val(pte
) & _PAGE_PRESENT
) != 0;
739 static inline int pte_none(pte_t pte
)
741 /* Bit pattern: pte == 0x400 */
742 return pte_val(pte
) == _PAGE_INVALID
;
745 static inline int pte_swap(pte_t pte
)
747 /* Bit pattern: (pte & 0x201) == 0x200 */
748 return (pte_val(pte
) & (_PAGE_PROTECT
| _PAGE_PRESENT
))
752 static inline int pte_special(pte_t pte
)
754 return (pte_val(pte
) & _PAGE_SPECIAL
);
757 #define __HAVE_ARCH_PTE_SAME
758 static inline int pte_same(pte_t a
, pte_t b
)
760 return pte_val(a
) == pte_val(b
);
763 #ifdef CONFIG_NUMA_BALANCING
764 static inline int pte_protnone(pte_t pte
)
766 return pte_present(pte
) && !(pte_val(pte
) & _PAGE_READ
);
769 static inline int pmd_protnone(pmd_t pmd
)
771 /* pmd_large(pmd) implies pmd_present(pmd) */
772 return pmd_large(pmd
) && !(pmd_val(pmd
) & _SEGMENT_ENTRY_READ
);
776 static inline int pte_soft_dirty(pte_t pte
)
778 return pte_val(pte
) & _PAGE_SOFT_DIRTY
;
780 #define pte_swp_soft_dirty pte_soft_dirty
782 static inline pte_t
pte_mksoft_dirty(pte_t pte
)
784 pte_val(pte
) |= _PAGE_SOFT_DIRTY
;
787 #define pte_swp_mksoft_dirty pte_mksoft_dirty
789 static inline pte_t
pte_clear_soft_dirty(pte_t pte
)
791 pte_val(pte
) &= ~_PAGE_SOFT_DIRTY
;
794 #define pte_swp_clear_soft_dirty pte_clear_soft_dirty
796 static inline int pmd_soft_dirty(pmd_t pmd
)
798 return pmd_val(pmd
) & _SEGMENT_ENTRY_SOFT_DIRTY
;
801 static inline pmd_t
pmd_mksoft_dirty(pmd_t pmd
)
803 pmd_val(pmd
) |= _SEGMENT_ENTRY_SOFT_DIRTY
;
807 static inline pmd_t
pmd_clear_soft_dirty(pmd_t pmd
)
809 pmd_val(pmd
) &= ~_SEGMENT_ENTRY_SOFT_DIRTY
;
814 * query functions pte_write/pte_dirty/pte_young only work if
815 * pte_present() is true. Undefined behaviour if not..
817 static inline int pte_write(pte_t pte
)
819 return (pte_val(pte
) & _PAGE_WRITE
) != 0;
822 static inline int pte_dirty(pte_t pte
)
824 return (pte_val(pte
) & _PAGE_DIRTY
) != 0;
827 static inline int pte_young(pte_t pte
)
829 return (pte_val(pte
) & _PAGE_YOUNG
) != 0;
832 #define __HAVE_ARCH_PTE_UNUSED
833 static inline int pte_unused(pte_t pte
)
835 return pte_val(pte
) & _PAGE_UNUSED
;
839 * pgd/pmd/pte modification functions
842 static inline void pgd_clear(pgd_t
*pgd
)
844 if ((pgd_val(*pgd
) & _REGION_ENTRY_TYPE_MASK
) == _REGION_ENTRY_TYPE_R1
)
845 pgd_val(*pgd
) = _REGION1_ENTRY_EMPTY
;
848 static inline void p4d_clear(p4d_t
*p4d
)
850 if ((p4d_val(*p4d
) & _REGION_ENTRY_TYPE_MASK
) == _REGION_ENTRY_TYPE_R2
)
851 p4d_val(*p4d
) = _REGION2_ENTRY_EMPTY
;
854 static inline void pud_clear(pud_t
*pud
)
856 if ((pud_val(*pud
) & _REGION_ENTRY_TYPE_MASK
) == _REGION_ENTRY_TYPE_R3
)
857 pud_val(*pud
) = _REGION3_ENTRY_EMPTY
;
860 static inline void pmd_clear(pmd_t
*pmdp
)
862 pmd_val(*pmdp
) = _SEGMENT_ENTRY_EMPTY
;
865 static inline void pte_clear(struct mm_struct
*mm
, unsigned long addr
, pte_t
*ptep
)
867 pte_val(*ptep
) = _PAGE_INVALID
;
871 * The following pte modification functions only work if
872 * pte_present() is true. Undefined behaviour if not..
874 static inline pte_t
pte_modify(pte_t pte
, pgprot_t newprot
)
876 pte_val(pte
) &= _PAGE_CHG_MASK
;
877 pte_val(pte
) |= pgprot_val(newprot
);
879 * newprot for PAGE_NONE, PAGE_RO, PAGE_RX, PAGE_RW and PAGE_RWX
880 * has the invalid bit set, clear it again for readable, young pages
882 if ((pte_val(pte
) & _PAGE_YOUNG
) && (pte_val(pte
) & _PAGE_READ
))
883 pte_val(pte
) &= ~_PAGE_INVALID
;
885 * newprot for PAGE_RO, PAGE_RX, PAGE_RW and PAGE_RWX has the page
886 * protection bit set, clear it again for writable, dirty pages
888 if ((pte_val(pte
) & _PAGE_DIRTY
) && (pte_val(pte
) & _PAGE_WRITE
))
889 pte_val(pte
) &= ~_PAGE_PROTECT
;
893 static inline pte_t
pte_wrprotect(pte_t pte
)
895 pte_val(pte
) &= ~_PAGE_WRITE
;
896 pte_val(pte
) |= _PAGE_PROTECT
;
900 static inline pte_t
pte_mkwrite(pte_t pte
)
902 pte_val(pte
) |= _PAGE_WRITE
;
903 if (pte_val(pte
) & _PAGE_DIRTY
)
904 pte_val(pte
) &= ~_PAGE_PROTECT
;
908 static inline pte_t
pte_mkclean(pte_t pte
)
910 pte_val(pte
) &= ~_PAGE_DIRTY
;
911 pte_val(pte
) |= _PAGE_PROTECT
;
915 static inline pte_t
pte_mkdirty(pte_t pte
)
917 pte_val(pte
) |= _PAGE_DIRTY
| _PAGE_SOFT_DIRTY
;
918 if (pte_val(pte
) & _PAGE_WRITE
)
919 pte_val(pte
) &= ~_PAGE_PROTECT
;
923 static inline pte_t
pte_mkold(pte_t pte
)
925 pte_val(pte
) &= ~_PAGE_YOUNG
;
926 pte_val(pte
) |= _PAGE_INVALID
;
930 static inline pte_t
pte_mkyoung(pte_t pte
)
932 pte_val(pte
) |= _PAGE_YOUNG
;
933 if (pte_val(pte
) & _PAGE_READ
)
934 pte_val(pte
) &= ~_PAGE_INVALID
;
938 static inline pte_t
pte_mkspecial(pte_t pte
)
940 pte_val(pte
) |= _PAGE_SPECIAL
;
944 #ifdef CONFIG_HUGETLB_PAGE
945 static inline pte_t
pte_mkhuge(pte_t pte
)
947 pte_val(pte
) |= _PAGE_LARGE
;
952 #define IPTE_GLOBAL 0
955 static inline void __ptep_ipte(unsigned long address
, pte_t
*ptep
, int local
)
957 unsigned long pto
= (unsigned long) ptep
;
959 /* Invalidation + TLB flush for the pte */
961 " .insn rrf,0xb2210000,%[r1],%[r2],0,%[m4]"
962 : "+m" (*ptep
) : [r1
] "a" (pto
), [r2
] "a" (address
),
966 static inline void __ptep_ipte_range(unsigned long address
, int nr
,
967 pte_t
*ptep
, int local
)
969 unsigned long pto
= (unsigned long) ptep
;
971 /* Invalidate a range of ptes + TLB flush of the ptes */
974 " .insn rrf,0xb2210000,%[r1],%[r2],%[r3],%[m4]"
975 : [r2
] "+a" (address
), [r3
] "+a" (nr
)
976 : [r1
] "a" (pto
), [m4
] "i" (local
) : "memory");
981 * This is hard to understand. ptep_get_and_clear and ptep_clear_flush
982 * both clear the TLB for the unmapped pte. The reason is that
983 * ptep_get_and_clear is used in common code (e.g. change_pte_range)
984 * to modify an active pte. The sequence is
985 * 1) ptep_get_and_clear
988 * On s390 the tlb needs to get flushed with the modification of the pte
989 * if the pte is active. The only way how this can be implemented is to
990 * have ptep_get_and_clear do the tlb flush. In exchange flush_tlb_range
993 pte_t
ptep_xchg_direct(struct mm_struct
*, unsigned long, pte_t
*, pte_t
);
994 pte_t
ptep_xchg_lazy(struct mm_struct
*, unsigned long, pte_t
*, pte_t
);
996 #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
997 static inline int ptep_test_and_clear_young(struct vm_area_struct
*vma
,
998 unsigned long addr
, pte_t
*ptep
)
1002 pte
= ptep_xchg_direct(vma
->vm_mm
, addr
, ptep
, pte_mkold(pte
));
1003 return pte_young(pte
);
1006 #define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
1007 static inline int ptep_clear_flush_young(struct vm_area_struct
*vma
,
1008 unsigned long address
, pte_t
*ptep
)
1010 return ptep_test_and_clear_young(vma
, address
, ptep
);
1013 #define __HAVE_ARCH_PTEP_GET_AND_CLEAR
1014 static inline pte_t
ptep_get_and_clear(struct mm_struct
*mm
,
1015 unsigned long addr
, pte_t
*ptep
)
1017 return ptep_xchg_lazy(mm
, addr
, ptep
, __pte(_PAGE_INVALID
));
1020 #define __HAVE_ARCH_PTEP_MODIFY_PROT_TRANSACTION
1021 pte_t
ptep_modify_prot_start(struct mm_struct
*, unsigned long, pte_t
*);
1022 void ptep_modify_prot_commit(struct mm_struct
*, unsigned long, pte_t
*, pte_t
);
1024 #define __HAVE_ARCH_PTEP_CLEAR_FLUSH
1025 static inline pte_t
ptep_clear_flush(struct vm_area_struct
*vma
,
1026 unsigned long addr
, pte_t
*ptep
)
1028 return ptep_xchg_direct(vma
->vm_mm
, addr
, ptep
, __pte(_PAGE_INVALID
));
1032 * The batched pte unmap code uses ptep_get_and_clear_full to clear the
1033 * ptes. Here an optimization is possible. tlb_gather_mmu flushes all
1034 * tlbs of an mm if it can guarantee that the ptes of the mm_struct
1035 * cannot be accessed while the batched unmap is running. In this case
1036 * full==1 and a simple pte_clear is enough. See tlb.h.
1038 #define __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL
1039 static inline pte_t
ptep_get_and_clear_full(struct mm_struct
*mm
,
1041 pte_t
*ptep
, int full
)
1045 *ptep
= __pte(_PAGE_INVALID
);
1048 return ptep_xchg_lazy(mm
, addr
, ptep
, __pte(_PAGE_INVALID
));
1051 #define __HAVE_ARCH_PTEP_SET_WRPROTECT
1052 static inline void ptep_set_wrprotect(struct mm_struct
*mm
,
1053 unsigned long addr
, pte_t
*ptep
)
1058 ptep_xchg_lazy(mm
, addr
, ptep
, pte_wrprotect(pte
));
1061 #define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
1062 static inline int ptep_set_access_flags(struct vm_area_struct
*vma
,
1063 unsigned long addr
, pte_t
*ptep
,
1064 pte_t entry
, int dirty
)
1066 if (pte_same(*ptep
, entry
))
1068 ptep_xchg_direct(vma
->vm_mm
, addr
, ptep
, entry
);
1073 * Additional functions to handle KVM guest page tables
1075 void ptep_set_pte_at(struct mm_struct
*mm
, unsigned long addr
,
1076 pte_t
*ptep
, pte_t entry
);
1077 void ptep_set_notify(struct mm_struct
*mm
, unsigned long addr
, pte_t
*ptep
);
1078 void ptep_notify(struct mm_struct
*mm
, unsigned long addr
,
1079 pte_t
*ptep
, unsigned long bits
);
1080 int ptep_force_prot(struct mm_struct
*mm
, unsigned long gaddr
,
1081 pte_t
*ptep
, int prot
, unsigned long bit
);
1082 void ptep_zap_unused(struct mm_struct
*mm
, unsigned long addr
,
1083 pte_t
*ptep
, int reset
);
1084 void ptep_zap_key(struct mm_struct
*mm
, unsigned long addr
, pte_t
*ptep
);
1085 int ptep_shadow_pte(struct mm_struct
*mm
, unsigned long saddr
,
1086 pte_t
*sptep
, pte_t
*tptep
, pte_t pte
);
1087 void ptep_unshadow_pte(struct mm_struct
*mm
, unsigned long saddr
, pte_t
*ptep
);
1089 bool test_and_clear_guest_dirty(struct mm_struct
*mm
, unsigned long address
);
1090 int set_guest_storage_key(struct mm_struct
*mm
, unsigned long addr
,
1091 unsigned char key
, bool nq
);
1092 int cond_set_guest_storage_key(struct mm_struct
*mm
, unsigned long addr
,
1093 unsigned char key
, unsigned char *oldkey
,
1094 bool nq
, bool mr
, bool mc
);
1095 int reset_guest_reference_bit(struct mm_struct
*mm
, unsigned long addr
);
1096 int get_guest_storage_key(struct mm_struct
*mm
, unsigned long addr
,
1097 unsigned char *key
);
1099 int set_pgste_bits(struct mm_struct
*mm
, unsigned long addr
,
1100 unsigned long bits
, unsigned long value
);
1101 int get_pgste(struct mm_struct
*mm
, unsigned long hva
, unsigned long *pgstep
);
1102 int pgste_perform_essa(struct mm_struct
*mm
, unsigned long hva
, int orc
,
1103 unsigned long *oldpte
, unsigned long *oldpgste
);
1106 * Certain architectures need to do special things when PTEs
1107 * within a page table are directly modified. Thus, the following
1108 * hook is made available.
1110 static inline void set_pte_at(struct mm_struct
*mm
, unsigned long addr
,
1111 pte_t
*ptep
, pte_t entry
)
1113 if (!MACHINE_HAS_NX
)
1114 pte_val(entry
) &= ~_PAGE_NOEXEC
;
1115 if (pte_present(entry
))
1116 pte_val(entry
) &= ~_PAGE_UNUSED
;
1117 if (mm_has_pgste(mm
))
1118 ptep_set_pte_at(mm
, addr
, ptep
, entry
);
1124 * Conversion functions: convert a page and protection to a page entry,
1125 * and a page entry and page directory to the page they refer to.
1127 static inline pte_t
mk_pte_phys(unsigned long physpage
, pgprot_t pgprot
)
1130 pte_val(__pte
) = physpage
+ pgprot_val(pgprot
);
1131 return pte_mkyoung(__pte
);
1134 static inline pte_t
mk_pte(struct page
*page
, pgprot_t pgprot
)
1136 unsigned long physpage
= page_to_phys(page
);
1137 pte_t __pte
= mk_pte_phys(physpage
, pgprot
);
1139 if (pte_write(__pte
) && PageDirty(page
))
1140 __pte
= pte_mkdirty(__pte
);
1144 #define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1))
1145 #define p4d_index(address) (((address) >> P4D_SHIFT) & (PTRS_PER_P4D-1))
1146 #define pud_index(address) (((address) >> PUD_SHIFT) & (PTRS_PER_PUD-1))
1147 #define pmd_index(address) (((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1))
1148 #define pte_index(address) (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE-1))
1150 #define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
1151 #define pgd_offset_k(address) pgd_offset(&init_mm, address)
1153 #define pmd_deref(pmd) (pmd_val(pmd) & _SEGMENT_ENTRY_ORIGIN)
1154 #define pud_deref(pud) (pud_val(pud) & _REGION_ENTRY_ORIGIN)
1155 #define p4d_deref(pud) (p4d_val(pud) & _REGION_ENTRY_ORIGIN)
1156 #define pgd_deref(pgd) (pgd_val(pgd) & _REGION_ENTRY_ORIGIN)
1158 static inline p4d_t
*p4d_offset(pgd_t
*pgd
, unsigned long address
)
1160 p4d_t
*p4d
= (p4d_t
*) pgd
;
1162 if ((pgd_val(*pgd
) & _REGION_ENTRY_TYPE_MASK
) == _REGION_ENTRY_TYPE_R1
)
1163 p4d
= (p4d_t
*) pgd_deref(*pgd
);
1164 return p4d
+ p4d_index(address
);
1167 static inline pud_t
*pud_offset(p4d_t
*p4d
, unsigned long address
)
1169 pud_t
*pud
= (pud_t
*) p4d
;
1171 if ((p4d_val(*p4d
) & _REGION_ENTRY_TYPE_MASK
) == _REGION_ENTRY_TYPE_R2
)
1172 pud
= (pud_t
*) p4d_deref(*p4d
);
1173 return pud
+ pud_index(address
);
1176 static inline pmd_t
*pmd_offset(pud_t
*pud
, unsigned long address
)
1178 pmd_t
*pmd
= (pmd_t
*) pud
;
1180 if ((pud_val(*pud
) & _REGION_ENTRY_TYPE_MASK
) == _REGION_ENTRY_TYPE_R3
)
1181 pmd
= (pmd_t
*) pud_deref(*pud
);
1182 return pmd
+ pmd_index(address
);
1185 #define pfn_pte(pfn,pgprot) mk_pte_phys(__pa((pfn) << PAGE_SHIFT),(pgprot))
1186 #define pte_pfn(x) (pte_val(x) >> PAGE_SHIFT)
1187 #define pte_page(x) pfn_to_page(pte_pfn(x))
1189 #define pmd_page(pmd) pfn_to_page(pmd_pfn(pmd))
1190 #define pud_page(pud) pfn_to_page(pud_pfn(pud))
1191 #define p4d_page(pud) pfn_to_page(p4d_pfn(p4d))
1193 /* Find an entry in the lowest level page table.. */
1194 #define pte_offset(pmd, addr) ((pte_t *) pmd_deref(*(pmd)) + pte_index(addr))
1195 #define pte_offset_kernel(pmd, address) pte_offset(pmd,address)
1196 #define pte_offset_map(pmd, address) pte_offset_kernel(pmd, address)
1197 #define pte_unmap(pte) do { } while (0)
1199 static inline pmd_t
pmd_wrprotect(pmd_t pmd
)
1201 pmd_val(pmd
) &= ~_SEGMENT_ENTRY_WRITE
;
1202 pmd_val(pmd
) |= _SEGMENT_ENTRY_PROTECT
;
1206 static inline pmd_t
pmd_mkwrite(pmd_t pmd
)
1208 pmd_val(pmd
) |= _SEGMENT_ENTRY_WRITE
;
1209 if (pmd_large(pmd
) && !(pmd_val(pmd
) & _SEGMENT_ENTRY_DIRTY
))
1211 pmd_val(pmd
) &= ~_SEGMENT_ENTRY_PROTECT
;
1215 static inline pmd_t
pmd_mkclean(pmd_t pmd
)
1217 if (pmd_large(pmd
)) {
1218 pmd_val(pmd
) &= ~_SEGMENT_ENTRY_DIRTY
;
1219 pmd_val(pmd
) |= _SEGMENT_ENTRY_PROTECT
;
1224 static inline pmd_t
pmd_mkdirty(pmd_t pmd
)
1226 if (pmd_large(pmd
)) {
1227 pmd_val(pmd
) |= _SEGMENT_ENTRY_DIRTY
|
1228 _SEGMENT_ENTRY_SOFT_DIRTY
;
1229 if (pmd_val(pmd
) & _SEGMENT_ENTRY_WRITE
)
1230 pmd_val(pmd
) &= ~_SEGMENT_ENTRY_PROTECT
;
1235 static inline pud_t
pud_wrprotect(pud_t pud
)
1237 pud_val(pud
) &= ~_REGION3_ENTRY_WRITE
;
1238 pud_val(pud
) |= _REGION_ENTRY_PROTECT
;
1242 static inline pud_t
pud_mkwrite(pud_t pud
)
1244 pud_val(pud
) |= _REGION3_ENTRY_WRITE
;
1245 if (pud_large(pud
) && !(pud_val(pud
) & _REGION3_ENTRY_DIRTY
))
1247 pud_val(pud
) &= ~_REGION_ENTRY_PROTECT
;
1251 static inline pud_t
pud_mkclean(pud_t pud
)
1253 if (pud_large(pud
)) {
1254 pud_val(pud
) &= ~_REGION3_ENTRY_DIRTY
;
1255 pud_val(pud
) |= _REGION_ENTRY_PROTECT
;
1260 static inline pud_t
pud_mkdirty(pud_t pud
)
1262 if (pud_large(pud
)) {
1263 pud_val(pud
) |= _REGION3_ENTRY_DIRTY
|
1264 _REGION3_ENTRY_SOFT_DIRTY
;
1265 if (pud_val(pud
) & _REGION3_ENTRY_WRITE
)
1266 pud_val(pud
) &= ~_REGION_ENTRY_PROTECT
;
1271 #if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_HUGETLB_PAGE)
1272 static inline unsigned long massage_pgprot_pmd(pgprot_t pgprot
)
1275 * pgprot is PAGE_NONE, PAGE_RO, PAGE_RX, PAGE_RW or PAGE_RWX
1276 * (see __Pxxx / __Sxxx). Convert to segment table entry format.
1278 if (pgprot_val(pgprot
) == pgprot_val(PAGE_NONE
))
1279 return pgprot_val(SEGMENT_NONE
);
1280 if (pgprot_val(pgprot
) == pgprot_val(PAGE_RO
))
1281 return pgprot_val(SEGMENT_RO
);
1282 if (pgprot_val(pgprot
) == pgprot_val(PAGE_RX
))
1283 return pgprot_val(SEGMENT_RX
);
1284 if (pgprot_val(pgprot
) == pgprot_val(PAGE_RW
))
1285 return pgprot_val(SEGMENT_RW
);
1286 return pgprot_val(SEGMENT_RWX
);
1289 static inline pmd_t
pmd_mkyoung(pmd_t pmd
)
1291 if (pmd_large(pmd
)) {
1292 pmd_val(pmd
) |= _SEGMENT_ENTRY_YOUNG
;
1293 if (pmd_val(pmd
) & _SEGMENT_ENTRY_READ
)
1294 pmd_val(pmd
) &= ~_SEGMENT_ENTRY_INVALID
;
1299 static inline pmd_t
pmd_mkold(pmd_t pmd
)
1301 if (pmd_large(pmd
)) {
1302 pmd_val(pmd
) &= ~_SEGMENT_ENTRY_YOUNG
;
1303 pmd_val(pmd
) |= _SEGMENT_ENTRY_INVALID
;
1308 static inline pmd_t
pmd_modify(pmd_t pmd
, pgprot_t newprot
)
1310 if (pmd_large(pmd
)) {
1311 pmd_val(pmd
) &= _SEGMENT_ENTRY_ORIGIN_LARGE
|
1312 _SEGMENT_ENTRY_DIRTY
| _SEGMENT_ENTRY_YOUNG
|
1313 _SEGMENT_ENTRY_LARGE
| _SEGMENT_ENTRY_SOFT_DIRTY
;
1314 pmd_val(pmd
) |= massage_pgprot_pmd(newprot
);
1315 if (!(pmd_val(pmd
) & _SEGMENT_ENTRY_DIRTY
))
1316 pmd_val(pmd
) |= _SEGMENT_ENTRY_PROTECT
;
1317 if (!(pmd_val(pmd
) & _SEGMENT_ENTRY_YOUNG
))
1318 pmd_val(pmd
) |= _SEGMENT_ENTRY_INVALID
;
1321 pmd_val(pmd
) &= _SEGMENT_ENTRY_ORIGIN
;
1322 pmd_val(pmd
) |= massage_pgprot_pmd(newprot
);
1326 static inline pmd_t
mk_pmd_phys(unsigned long physpage
, pgprot_t pgprot
)
1329 pmd_val(__pmd
) = physpage
+ massage_pgprot_pmd(pgprot
);
1333 #endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLB_PAGE */
1335 static inline void __pmdp_csp(pmd_t
*pmdp
)
1337 csp((unsigned int *)pmdp
+ 1, pmd_val(*pmdp
),
1338 pmd_val(*pmdp
) | _SEGMENT_ENTRY_INVALID
);
1341 #define IDTE_GLOBAL 0
1342 #define IDTE_LOCAL 1
1344 static inline void __pmdp_idte(unsigned long address
, pmd_t
*pmdp
, int local
)
1348 sto
= (unsigned long) pmdp
- pmd_index(address
) * sizeof(pmd_t
);
1350 " .insn rrf,0xb98e0000,%[r1],%[r2],0,%[m4]"
1352 : [r1
] "a" (sto
), [r2
] "a" ((address
& HPAGE_MASK
)),
1357 static inline void __pudp_idte(unsigned long address
, pud_t
*pudp
, int local
)
1361 r3o
= (unsigned long) pudp
- pud_index(address
) * sizeof(pud_t
);
1362 r3o
|= _ASCE_TYPE_REGION3
;
1364 " .insn rrf,0xb98e0000,%[r1],%[r2],0,%[m4]"
1366 : [r1
] "a" (r3o
), [r2
] "a" ((address
& PUD_MASK
)),
1371 pmd_t
pmdp_xchg_direct(struct mm_struct
*, unsigned long, pmd_t
*, pmd_t
);
1372 pmd_t
pmdp_xchg_lazy(struct mm_struct
*, unsigned long, pmd_t
*, pmd_t
);
1373 pud_t
pudp_xchg_direct(struct mm_struct
*, unsigned long, pud_t
*, pud_t
);
1375 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
1377 #define __HAVE_ARCH_PGTABLE_DEPOSIT
1378 void pgtable_trans_huge_deposit(struct mm_struct
*mm
, pmd_t
*pmdp
,
1381 #define __HAVE_ARCH_PGTABLE_WITHDRAW
1382 pgtable_t
pgtable_trans_huge_withdraw(struct mm_struct
*mm
, pmd_t
*pmdp
);
1384 #define __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS
1385 static inline int pmdp_set_access_flags(struct vm_area_struct
*vma
,
1386 unsigned long addr
, pmd_t
*pmdp
,
1387 pmd_t entry
, int dirty
)
1389 VM_BUG_ON(addr
& ~HPAGE_MASK
);
1391 entry
= pmd_mkyoung(entry
);
1393 entry
= pmd_mkdirty(entry
);
1394 if (pmd_val(*pmdp
) == pmd_val(entry
))
1396 pmdp_xchg_direct(vma
->vm_mm
, addr
, pmdp
, entry
);
1400 #define __HAVE_ARCH_PMDP_TEST_AND_CLEAR_YOUNG
1401 static inline int pmdp_test_and_clear_young(struct vm_area_struct
*vma
,
1402 unsigned long addr
, pmd_t
*pmdp
)
1406 pmd
= pmdp_xchg_direct(vma
->vm_mm
, addr
, pmdp
, pmd_mkold(pmd
));
1407 return pmd_young(pmd
);
1410 #define __HAVE_ARCH_PMDP_CLEAR_YOUNG_FLUSH
1411 static inline int pmdp_clear_flush_young(struct vm_area_struct
*vma
,
1412 unsigned long addr
, pmd_t
*pmdp
)
1414 VM_BUG_ON(addr
& ~HPAGE_MASK
);
1415 return pmdp_test_and_clear_young(vma
, addr
, pmdp
);
1418 static inline void set_pmd_at(struct mm_struct
*mm
, unsigned long addr
,
1419 pmd_t
*pmdp
, pmd_t entry
)
1421 if (!MACHINE_HAS_NX
)
1422 pmd_val(entry
) &= ~_SEGMENT_ENTRY_NOEXEC
;
1426 static inline pmd_t
pmd_mkhuge(pmd_t pmd
)
1428 pmd_val(pmd
) |= _SEGMENT_ENTRY_LARGE
;
1429 pmd_val(pmd
) |= _SEGMENT_ENTRY_YOUNG
;
1430 pmd_val(pmd
) |= _SEGMENT_ENTRY_PROTECT
;
1434 #define __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR
1435 static inline pmd_t
pmdp_huge_get_and_clear(struct mm_struct
*mm
,
1436 unsigned long addr
, pmd_t
*pmdp
)
1438 return pmdp_xchg_direct(mm
, addr
, pmdp
, __pmd(_SEGMENT_ENTRY_EMPTY
));
1441 #define __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR_FULL
1442 static inline pmd_t
pmdp_huge_get_and_clear_full(struct mm_struct
*mm
,
1444 pmd_t
*pmdp
, int full
)
1448 *pmdp
= __pmd(_SEGMENT_ENTRY_EMPTY
);
1451 return pmdp_xchg_lazy(mm
, addr
, pmdp
, __pmd(_SEGMENT_ENTRY_EMPTY
));
1454 #define __HAVE_ARCH_PMDP_HUGE_CLEAR_FLUSH
1455 static inline pmd_t
pmdp_huge_clear_flush(struct vm_area_struct
*vma
,
1456 unsigned long addr
, pmd_t
*pmdp
)
1458 return pmdp_huge_get_and_clear(vma
->vm_mm
, addr
, pmdp
);
1461 #define __HAVE_ARCH_PMDP_INVALIDATE
1462 static inline void pmdp_invalidate(struct vm_area_struct
*vma
,
1463 unsigned long addr
, pmd_t
*pmdp
)
1465 pmd_t pmd
= __pmd(pmd_val(*pmdp
) | _SEGMENT_ENTRY_INVALID
);
1467 pmdp_xchg_direct(vma
->vm_mm
, addr
, pmdp
, pmd
);
1470 #define __HAVE_ARCH_PMDP_SET_WRPROTECT
1471 static inline void pmdp_set_wrprotect(struct mm_struct
*mm
,
1472 unsigned long addr
, pmd_t
*pmdp
)
1477 pmd
= pmdp_xchg_lazy(mm
, addr
, pmdp
, pmd_wrprotect(pmd
));
1480 static inline pmd_t
pmdp_collapse_flush(struct vm_area_struct
*vma
,
1481 unsigned long address
,
1484 return pmdp_huge_get_and_clear(vma
->vm_mm
, address
, pmdp
);
1486 #define pmdp_collapse_flush pmdp_collapse_flush
1488 #define pfn_pmd(pfn, pgprot) mk_pmd_phys(__pa((pfn) << PAGE_SHIFT), (pgprot))
1489 #define mk_pmd(page, pgprot) pfn_pmd(page_to_pfn(page), (pgprot))
1491 static inline int pmd_trans_huge(pmd_t pmd
)
1493 return pmd_val(pmd
) & _SEGMENT_ENTRY_LARGE
;
1496 #define has_transparent_hugepage has_transparent_hugepage
1497 static inline int has_transparent_hugepage(void)
1499 return MACHINE_HAS_EDAT1
? 1 : 0;
1501 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
1504 * 64 bit swap entry format:
1505 * A page-table entry has some bits we have to treat in a special way.
1506 * Bits 52 and bit 55 have to be zero, otherwise a specification
1507 * exception will occur instead of a page translation exception. The
1508 * specification exception has the bad habit not to store necessary
1509 * information in the lowcore.
1510 * Bits 54 and 63 are used to indicate the page type.
1511 * A swap pte is indicated by bit pattern (pte & 0x201) == 0x200
1512 * This leaves the bits 0-51 and bits 56-62 to store type and offset.
1513 * We use the 5 bits from 57-61 for the type and the 52 bits from 0-51
1515 * | offset |01100|type |00|
1516 * |0000000000111111111122222222223333333333444444444455|55555|55566|66|
1517 * |0123456789012345678901234567890123456789012345678901|23456|78901|23|
1520 #define __SWP_OFFSET_MASK ((1UL << 52) - 1)
1521 #define __SWP_OFFSET_SHIFT 12
1522 #define __SWP_TYPE_MASK ((1UL << 5) - 1)
1523 #define __SWP_TYPE_SHIFT 2
1525 static inline pte_t
mk_swap_pte(unsigned long type
, unsigned long offset
)
1529 pte_val(pte
) = _PAGE_INVALID
| _PAGE_PROTECT
;
1530 pte_val(pte
) |= (offset
& __SWP_OFFSET_MASK
) << __SWP_OFFSET_SHIFT
;
1531 pte_val(pte
) |= (type
& __SWP_TYPE_MASK
) << __SWP_TYPE_SHIFT
;
1535 static inline unsigned long __swp_type(swp_entry_t entry
)
1537 return (entry
.val
>> __SWP_TYPE_SHIFT
) & __SWP_TYPE_MASK
;
1540 static inline unsigned long __swp_offset(swp_entry_t entry
)
1542 return (entry
.val
>> __SWP_OFFSET_SHIFT
) & __SWP_OFFSET_MASK
;
1545 static inline swp_entry_t
__swp_entry(unsigned long type
, unsigned long offset
)
1547 return (swp_entry_t
) { pte_val(mk_swap_pte(type
, offset
)) };
1550 #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
1551 #define __swp_entry_to_pte(x) ((pte_t) { (x).val })
1553 #endif /* !__ASSEMBLY__ */
1555 #define kern_addr_valid(addr) (1)
1557 extern int vmem_add_mapping(unsigned long start
, unsigned long size
);
1558 extern int vmem_remove_mapping(unsigned long start
, unsigned long size
);
1559 extern int s390_enable_sie(void);
1560 extern int s390_enable_skey(void);
1561 extern void s390_reset_cmma(struct mm_struct
*mm
);
1563 /* s390 has a private copy of get unmapped area to deal with cache synonyms */
1564 #define HAVE_ARCH_UNMAPPED_AREA
1565 #define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
1568 * No page table caches to initialise
1570 static inline void pgtable_cache_init(void) { }
1571 static inline void check_pgt_cache(void) { }
1573 #include <asm-generic/pgtable.h>
1575 #endif /* _S390_PAGE_H */