3 * Copyright IBM Corp. 1999, 2000
4 * Author(s): Hartmut Penner (hp@de.ibm.com)
5 * Ulrich Weigand (weigand@de.ibm.com)
6 * Martin Schwidefsky (schwidefsky@de.ibm.com)
8 * Derived from "include/asm-i386/pgtable.h"
11 #ifndef _ASM_S390_PGTABLE_H
12 #define _ASM_S390_PGTABLE_H
15 * The Linux memory management assumes a three-level page table setup. For
16 * s390 31 bit we "fold" the mid level into the top-level page table, so
17 * that we physically have the same two-level page table as the s390 mmu
18 * expects in 31 bit mode. For s390 64 bit we use three of the five levels
19 * the hardware provides (region first and region second tables are not
22 * The "pgd_xxx()" functions are trivial for a folded two-level
23 * setup: the pgd is never bad, and a pmd always exists (as it's folded
26 * This file contains the functions and defines necessary to modify and use
27 * the S390 page table tree.
30 #include <linux/sched.h>
31 #include <linux/mm_types.h>
32 #include <linux/page-flags.h>
33 #include <linux/radix-tree.h>
37 extern pgd_t swapper_pg_dir
[] __attribute__ ((aligned (4096)));
38 extern void paging_init(void);
39 extern void vmem_map_init(void);
42 * The S390 doesn't have any external MMU info: the kernel page
43 * tables contain all the necessary information.
45 #define update_mmu_cache(vma, address, ptep) do { } while (0)
46 #define update_mmu_cache_pmd(vma, address, ptep) do { } while (0)
49 * ZERO_PAGE is a global shared page that is always zero; used
50 * for zero-mapped memory areas etc..
53 extern unsigned long empty_zero_page
;
54 extern unsigned long zero_page_mask
;
56 #define ZERO_PAGE(vaddr) \
57 (virt_to_page((void *)(empty_zero_page + \
58 (((unsigned long)(vaddr)) &zero_page_mask))))
59 #define __HAVE_COLOR_ZERO_PAGE
61 /* TODO: s390 cannot support io_remap_pfn_range... */
62 #endif /* !__ASSEMBLY__ */
65 * PMD_SHIFT determines the size of the area a second-level page
67 * PGDIR_SHIFT determines what a third-level page table entry can map
72 # define PGDIR_SHIFT 20
73 #else /* CONFIG_64BIT */
76 # define PGDIR_SHIFT 42
77 #endif /* CONFIG_64BIT */
79 #define PMD_SIZE (1UL << PMD_SHIFT)
80 #define PMD_MASK (~(PMD_SIZE-1))
81 #define PUD_SIZE (1UL << PUD_SHIFT)
82 #define PUD_MASK (~(PUD_SIZE-1))
83 #define PGDIR_SIZE (1UL << PGDIR_SHIFT)
84 #define PGDIR_MASK (~(PGDIR_SIZE-1))
87 * entries per page directory level: the S390 is two-level, so
88 * we don't really have any PMD directory physically.
89 * for S390 segment-table entries are combined to one PGD
90 * that leads to 1024 pte per pgd
92 #define PTRS_PER_PTE 256
94 #define PTRS_PER_PMD 1
95 #define PTRS_PER_PUD 1
96 #else /* CONFIG_64BIT */
97 #define PTRS_PER_PMD 2048
98 #define PTRS_PER_PUD 2048
99 #endif /* CONFIG_64BIT */
100 #define PTRS_PER_PGD 2048
102 #define FIRST_USER_ADDRESS 0UL
104 #define pte_ERROR(e) \
105 printk("%s:%d: bad pte %p.\n", __FILE__, __LINE__, (void *) pte_val(e))
106 #define pmd_ERROR(e) \
107 printk("%s:%d: bad pmd %p.\n", __FILE__, __LINE__, (void *) pmd_val(e))
108 #define pud_ERROR(e) \
109 printk("%s:%d: bad pud %p.\n", __FILE__, __LINE__, (void *) pud_val(e))
110 #define pgd_ERROR(e) \
111 printk("%s:%d: bad pgd %p.\n", __FILE__, __LINE__, (void *) pgd_val(e))
115 * The vmalloc and module area will always be on the topmost area of the kernel
116 * mapping. We reserve 96MB (31bit) / 128GB (64bit) for vmalloc and modules.
117 * On 64 bit kernels we have a 2GB area at the top of the vmalloc area where
118 * modules will reside. That makes sure that inter module branches always
119 * happen without trampolines and in addition the placement within a 2GB frame
120 * is branch prediction unit friendly.
122 extern unsigned long VMALLOC_START
;
123 extern unsigned long VMALLOC_END
;
124 extern struct page
*vmemmap
;
126 #define VMEM_MAX_PHYS ((unsigned long) vmemmap)
129 extern unsigned long MODULES_VADDR
;
130 extern unsigned long MODULES_END
;
131 #define MODULES_VADDR MODULES_VADDR
132 #define MODULES_END MODULES_END
133 #define MODULES_LEN (1UL << 31)
136 static inline int is_module_addr(void *addr
)
139 BUILD_BUG_ON(MODULES_LEN
> (1UL << 31));
140 if (addr
< (void *)MODULES_VADDR
)
142 if (addr
> (void *)MODULES_END
)
149 * A 31 bit pagetable entry of S390 has following format:
152 * 00000000001111111111222222222233
153 * 01234567890123456789012345678901
155 * I Page-Invalid Bit: Page is not available for address-translation
156 * P Page-Protection Bit: Store access not possible for page
158 * A 31 bit segmenttable entry of S390 has following format:
159 * | P-table origin | |PTL
161 * 00000000001111111111222222222233
162 * 01234567890123456789012345678901
164 * I Segment-Invalid Bit: Segment is not available for address-translation
165 * C Common-Segment Bit: Segment is not private (PoP 3-30)
166 * PTL Page-Table-Length: Page-table length (PTL+1*16 entries -> up to 256)
168 * The 31 bit segmenttable origin of S390 has following format:
170 * |S-table origin | | STL |
172 * 00000000001111111111222222222233
173 * 01234567890123456789012345678901
175 * X Space-Switch event:
176 * G Segment-Invalid Bit: *
177 * P Private-Space Bit: Segment is not private (PoP 3-30)
178 * S Storage-Alteration:
179 * STL Segment-Table-Length: Segment-table length (STL+1*16 entries -> up to 2048)
181 * A 64 bit pagetable entry of S390 has following format:
183 * 0000000000111111111122222222223333333333444444444455555555556666
184 * 0123456789012345678901234567890123456789012345678901234567890123
186 * I Page-Invalid Bit: Page is not available for address-translation
187 * P Page-Protection Bit: Store access not possible for page
188 * C Change-bit override: HW is not required to set change bit
190 * A 64 bit segmenttable entry of S390 has following format:
191 * | P-table origin | TT
192 * 0000000000111111111122222222223333333333444444444455555555556666
193 * 0123456789012345678901234567890123456789012345678901234567890123
195 * I Segment-Invalid Bit: Segment is not available for address-translation
196 * C Common-Segment Bit: Segment is not private (PoP 3-30)
197 * P Page-Protection Bit: Store access not possible for page
200 * A 64 bit region table entry of S390 has following format:
201 * | S-table origin | TF TTTL
202 * 0000000000111111111122222222223333333333444444444455555555556666
203 * 0123456789012345678901234567890123456789012345678901234567890123
205 * I Segment-Invalid Bit: Segment is not available for address-translation
210 * The 64 bit regiontable origin of S390 has following format:
211 * | region table origon | DTTL
212 * 0000000000111111111122222222223333333333444444444455555555556666
213 * 0123456789012345678901234567890123456789012345678901234567890123
215 * X Space-Switch event:
216 * G Segment-Invalid Bit:
217 * P Private-Space Bit:
218 * S Storage-Alteration:
222 * A storage key has the following format:
226 * F : fetch protection bit
231 /* Hardware bits in the page table entry */
232 #define _PAGE_PROTECT 0x200 /* HW read-only bit */
233 #define _PAGE_INVALID 0x400 /* HW invalid bit */
234 #define _PAGE_LARGE 0x800 /* Bit to mark a large pte */
236 /* Software bits in the page table entry */
237 #define _PAGE_PRESENT 0x001 /* SW pte present bit */
238 #define _PAGE_TYPE 0x002 /* SW pte type bit */
239 #define _PAGE_YOUNG 0x004 /* SW pte young bit */
240 #define _PAGE_DIRTY 0x008 /* SW pte dirty bit */
241 #define _PAGE_READ 0x010 /* SW pte read bit */
242 #define _PAGE_WRITE 0x020 /* SW pte write bit */
243 #define _PAGE_SPECIAL 0x040 /* SW associated with special page */
244 #define _PAGE_UNUSED 0x080 /* SW bit for pgste usage state */
245 #define __HAVE_ARCH_PTE_SPECIAL
247 /* Set of bits not changed in pte_modify */
248 #define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_SPECIAL | _PAGE_DIRTY | \
252 * handle_pte_fault uses pte_present and pte_none to find out the pte type
253 * WITHOUT holding the page table lock. The _PAGE_PRESENT bit is used to
254 * distinguish present from not-present ptes. It is changed only with the page
257 * The following table gives the different possible bit combinations for
258 * the pte hardware and software bits in the last 12 bits of a pte:
267 * prot-none, clean, old .11...000001
268 * prot-none, clean, young .11...000101
269 * prot-none, dirty, old .10...001001
270 * prot-none, dirty, young .10...001101
271 * read-only, clean, old .11...010001
272 * read-only, clean, young .01...010101
273 * read-only, dirty, old .11...011001
274 * read-only, dirty, young .01...011101
275 * read-write, clean, old .11...110001
276 * read-write, clean, young .01...110101
277 * read-write, dirty, old .10...111001
278 * read-write, dirty, young .00...111101
280 * pte_present is true for the bit pattern .xx...xxxxx1, (pte & 0x001) == 0x001
281 * pte_none is true for the bit pattern .10...xxxx00, (pte & 0x603) == 0x400
282 * pte_swap is true for the bit pattern .10...xxxx10, (pte & 0x603) == 0x402
287 /* Bits in the segment table address-space-control-element */
288 #define _ASCE_SPACE_SWITCH 0x80000000UL /* space switch event */
289 #define _ASCE_ORIGIN_MASK 0x7ffff000UL /* segment table origin */
290 #define _ASCE_PRIVATE_SPACE 0x100 /* private space control */
291 #define _ASCE_ALT_EVENT 0x80 /* storage alteration event control */
292 #define _ASCE_TABLE_LENGTH 0x7f /* 128 x 64 entries = 8k */
294 /* Bits in the segment table entry */
295 #define _SEGMENT_ENTRY_BITS 0x7fffffffUL /* Valid segment table bits */
296 #define _SEGMENT_ENTRY_ORIGIN 0x7fffffc0UL /* page table origin */
297 #define _SEGMENT_ENTRY_PROTECT 0x200 /* page protection bit */
298 #define _SEGMENT_ENTRY_INVALID 0x20 /* invalid segment table entry */
299 #define _SEGMENT_ENTRY_COMMON 0x10 /* common segment bit */
300 #define _SEGMENT_ENTRY_PTL 0x0f /* page table length */
302 #define _SEGMENT_ENTRY_DIRTY 0 /* No sw dirty bit for 31-bit */
303 #define _SEGMENT_ENTRY_YOUNG 0 /* No sw young bit for 31-bit */
304 #define _SEGMENT_ENTRY_READ 0 /* No sw read bit for 31-bit */
305 #define _SEGMENT_ENTRY_WRITE 0 /* No sw write bit for 31-bit */
306 #define _SEGMENT_ENTRY_LARGE 0 /* No large pages for 31-bit */
307 #define _SEGMENT_ENTRY_BITS_LARGE 0
308 #define _SEGMENT_ENTRY_ORIGIN_LARGE 0
310 #define _SEGMENT_ENTRY (_SEGMENT_ENTRY_PTL)
311 #define _SEGMENT_ENTRY_EMPTY (_SEGMENT_ENTRY_INVALID)
314 * Segment table entry encoding (I = invalid, R = read-only bit):
316 * prot-none ..1...1.....
317 * read-only ..1...0.....
318 * read-write ..0...0.....
322 /* Page status table bits for virtualization */
323 #define PGSTE_ACC_BITS 0xf0000000UL
324 #define PGSTE_FP_BIT 0x08000000UL
325 #define PGSTE_PCL_BIT 0x00800000UL
326 #define PGSTE_HR_BIT 0x00400000UL
327 #define PGSTE_HC_BIT 0x00200000UL
328 #define PGSTE_GR_BIT 0x00040000UL
329 #define PGSTE_GC_BIT 0x00020000UL
330 #define PGSTE_UC_BIT 0x00008000UL /* user dirty (migration) */
331 #define PGSTE_IN_BIT 0x00004000UL /* IPTE notify bit */
333 #else /* CONFIG_64BIT */
335 /* Bits in the segment/region table address-space-control-element */
336 #define _ASCE_ORIGIN ~0xfffUL/* segment table origin */
337 #define _ASCE_PRIVATE_SPACE 0x100 /* private space control */
338 #define _ASCE_ALT_EVENT 0x80 /* storage alteration event control */
339 #define _ASCE_SPACE_SWITCH 0x40 /* space switch event */
340 #define _ASCE_REAL_SPACE 0x20 /* real space control */
341 #define _ASCE_TYPE_MASK 0x0c /* asce table type mask */
342 #define _ASCE_TYPE_REGION1 0x0c /* region first table type */
343 #define _ASCE_TYPE_REGION2 0x08 /* region second table type */
344 #define _ASCE_TYPE_REGION3 0x04 /* region third table type */
345 #define _ASCE_TYPE_SEGMENT 0x00 /* segment table type */
346 #define _ASCE_TABLE_LENGTH 0x03 /* region table length */
348 /* Bits in the region table entry */
349 #define _REGION_ENTRY_ORIGIN ~0xfffUL/* region/segment table origin */
350 #define _REGION_ENTRY_PROTECT 0x200 /* region protection bit */
351 #define _REGION_ENTRY_INVALID 0x20 /* invalid region table entry */
352 #define _REGION_ENTRY_TYPE_MASK 0x0c /* region/segment table type mask */
353 #define _REGION_ENTRY_TYPE_R1 0x0c /* region first table type */
354 #define _REGION_ENTRY_TYPE_R2 0x08 /* region second table type */
355 #define _REGION_ENTRY_TYPE_R3 0x04 /* region third table type */
356 #define _REGION_ENTRY_LENGTH 0x03 /* region third length */
358 #define _REGION1_ENTRY (_REGION_ENTRY_TYPE_R1 | _REGION_ENTRY_LENGTH)
359 #define _REGION1_ENTRY_EMPTY (_REGION_ENTRY_TYPE_R1 | _REGION_ENTRY_INVALID)
360 #define _REGION2_ENTRY (_REGION_ENTRY_TYPE_R2 | _REGION_ENTRY_LENGTH)
361 #define _REGION2_ENTRY_EMPTY (_REGION_ENTRY_TYPE_R2 | _REGION_ENTRY_INVALID)
362 #define _REGION3_ENTRY (_REGION_ENTRY_TYPE_R3 | _REGION_ENTRY_LENGTH)
363 #define _REGION3_ENTRY_EMPTY (_REGION_ENTRY_TYPE_R3 | _REGION_ENTRY_INVALID)
365 #define _REGION3_ENTRY_LARGE 0x400 /* RTTE-format control, large page */
366 #define _REGION3_ENTRY_RO 0x200 /* page protection bit */
368 /* Bits in the segment table entry */
369 #define _SEGMENT_ENTRY_BITS 0xfffffffffffffe33UL
370 #define _SEGMENT_ENTRY_BITS_LARGE 0xfffffffffff0ff33UL
371 #define _SEGMENT_ENTRY_ORIGIN_LARGE ~0xfffffUL /* large page address */
372 #define _SEGMENT_ENTRY_ORIGIN ~0x7ffUL/* segment table origin */
373 #define _SEGMENT_ENTRY_PROTECT 0x200 /* page protection bit */
374 #define _SEGMENT_ENTRY_INVALID 0x20 /* invalid segment table entry */
376 #define _SEGMENT_ENTRY (0)
377 #define _SEGMENT_ENTRY_EMPTY (_SEGMENT_ENTRY_INVALID)
379 #define _SEGMENT_ENTRY_DIRTY 0x2000 /* SW segment dirty bit */
380 #define _SEGMENT_ENTRY_YOUNG 0x1000 /* SW segment young bit */
381 #define _SEGMENT_ENTRY_SPLIT 0x0800 /* THP splitting bit */
382 #define _SEGMENT_ENTRY_LARGE 0x0400 /* STE-format control, large page */
383 #define _SEGMENT_ENTRY_READ 0x0002 /* SW segment read bit */
384 #define _SEGMENT_ENTRY_WRITE 0x0001 /* SW segment write bit */
387 * Segment table entry encoding (R = read-only, I = invalid, y = young bit):
389 * prot-none, clean, old 00..1...1...00
390 * prot-none, clean, young 01..1...1...00
391 * prot-none, dirty, old 10..1...1...00
392 * prot-none, dirty, young 11..1...1...00
393 * read-only, clean, old 00..1...1...01
394 * read-only, clean, young 01..1...0...01
395 * read-only, dirty, old 10..1...1...01
396 * read-only, dirty, young 11..1...0...01
397 * read-write, clean, old 00..1...1...11
398 * read-write, clean, young 01..1...0...11
399 * read-write, dirty, old 10..0...1...11
400 * read-write, dirty, young 11..0...0...11
401 * The segment table origin is used to distinguish empty (origin==0) from
402 * read-write, old segment table entries (origin!=0)
405 #define _SEGMENT_ENTRY_SPLIT_BIT 11 /* THP splitting bit number */
407 /* Page status table bits for virtualization */
408 #define PGSTE_ACC_BITS 0xf000000000000000UL
409 #define PGSTE_FP_BIT 0x0800000000000000UL
410 #define PGSTE_PCL_BIT 0x0080000000000000UL
411 #define PGSTE_HR_BIT 0x0040000000000000UL
412 #define PGSTE_HC_BIT 0x0020000000000000UL
413 #define PGSTE_GR_BIT 0x0004000000000000UL
414 #define PGSTE_GC_BIT 0x0002000000000000UL
415 #define PGSTE_UC_BIT 0x0000800000000000UL /* user dirty (migration) */
416 #define PGSTE_IN_BIT 0x0000400000000000UL /* IPTE notify bit */
418 #endif /* CONFIG_64BIT */
420 /* Guest Page State used for virtualization */
421 #define _PGSTE_GPS_ZERO 0x0000000080000000UL
422 #define _PGSTE_GPS_USAGE_MASK 0x0000000003000000UL
423 #define _PGSTE_GPS_USAGE_STABLE 0x0000000000000000UL
424 #define _PGSTE_GPS_USAGE_UNUSED 0x0000000001000000UL
427 * A user page table pointer has the space-switch-event bit, the
428 * private-space-control bit and the storage-alteration-event-control
429 * bit set. A kernel page table pointer doesn't need them.
431 #define _ASCE_USER_BITS (_ASCE_SPACE_SWITCH | _ASCE_PRIVATE_SPACE | \
435 * Page protection definitions.
437 #define PAGE_NONE __pgprot(_PAGE_PRESENT | _PAGE_INVALID)
438 #define PAGE_READ __pgprot(_PAGE_PRESENT | _PAGE_READ | \
439 _PAGE_INVALID | _PAGE_PROTECT)
440 #define PAGE_WRITE __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \
441 _PAGE_INVALID | _PAGE_PROTECT)
443 #define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \
444 _PAGE_YOUNG | _PAGE_DIRTY)
445 #define PAGE_KERNEL __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \
446 _PAGE_YOUNG | _PAGE_DIRTY)
447 #define PAGE_KERNEL_RO __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_YOUNG | \
451 * On s390 the page table entry has an invalid bit and a read-only bit.
452 * Read permission implies execute permission and write permission
453 * implies read permission.
456 #define __P000 PAGE_NONE
457 #define __P001 PAGE_READ
458 #define __P010 PAGE_READ
459 #define __P011 PAGE_READ
460 #define __P100 PAGE_READ
461 #define __P101 PAGE_READ
462 #define __P110 PAGE_READ
463 #define __P111 PAGE_READ
465 #define __S000 PAGE_NONE
466 #define __S001 PAGE_READ
467 #define __S010 PAGE_WRITE
468 #define __S011 PAGE_WRITE
469 #define __S100 PAGE_READ
470 #define __S101 PAGE_READ
471 #define __S110 PAGE_WRITE
472 #define __S111 PAGE_WRITE
475 * Segment entry (large page) protection definitions.
477 #define SEGMENT_NONE __pgprot(_SEGMENT_ENTRY_INVALID | \
478 _SEGMENT_ENTRY_PROTECT)
479 #define SEGMENT_READ __pgprot(_SEGMENT_ENTRY_PROTECT | \
481 #define SEGMENT_WRITE __pgprot(_SEGMENT_ENTRY_READ | \
482 _SEGMENT_ENTRY_WRITE)
484 static inline int mm_has_pgste(struct mm_struct
*mm
)
487 if (unlikely(mm
->context
.has_pgste
))
494 * In the case that a guest uses storage keys
495 * faults should no longer be backed by zero pages
497 #define mm_forbids_zeropage mm_use_skey
498 static inline int mm_use_skey(struct mm_struct
*mm
)
501 if (mm
->context
.use_skey
)
508 * pgd/pmd/pte query functions
512 static inline int pgd_present(pgd_t pgd
) { return 1; }
513 static inline int pgd_none(pgd_t pgd
) { return 0; }
514 static inline int pgd_bad(pgd_t pgd
) { return 0; }
516 static inline int pud_present(pud_t pud
) { return 1; }
517 static inline int pud_none(pud_t pud
) { return 0; }
518 static inline int pud_large(pud_t pud
) { return 0; }
519 static inline int pud_bad(pud_t pud
) { return 0; }
521 #else /* CONFIG_64BIT */
523 static inline int pgd_present(pgd_t pgd
)
525 if ((pgd_val(pgd
) & _REGION_ENTRY_TYPE_MASK
) < _REGION_ENTRY_TYPE_R2
)
527 return (pgd_val(pgd
) & _REGION_ENTRY_ORIGIN
) != 0UL;
530 static inline int pgd_none(pgd_t pgd
)
532 if ((pgd_val(pgd
) & _REGION_ENTRY_TYPE_MASK
) < _REGION_ENTRY_TYPE_R2
)
534 return (pgd_val(pgd
) & _REGION_ENTRY_INVALID
) != 0UL;
537 static inline int pgd_bad(pgd_t pgd
)
540 * With dynamic page table levels the pgd can be a region table
541 * entry or a segment table entry. Check for the bit that are
542 * invalid for either table entry.
545 ~_SEGMENT_ENTRY_ORIGIN
& ~_REGION_ENTRY_INVALID
&
546 ~_REGION_ENTRY_TYPE_MASK
& ~_REGION_ENTRY_LENGTH
;
547 return (pgd_val(pgd
) & mask
) != 0;
550 static inline int pud_present(pud_t pud
)
552 if ((pud_val(pud
) & _REGION_ENTRY_TYPE_MASK
) < _REGION_ENTRY_TYPE_R3
)
554 return (pud_val(pud
) & _REGION_ENTRY_ORIGIN
) != 0UL;
557 static inline int pud_none(pud_t pud
)
559 if ((pud_val(pud
) & _REGION_ENTRY_TYPE_MASK
) < _REGION_ENTRY_TYPE_R3
)
561 return (pud_val(pud
) & _REGION_ENTRY_INVALID
) != 0UL;
564 static inline int pud_large(pud_t pud
)
566 if ((pud_val(pud
) & _REGION_ENTRY_TYPE_MASK
) != _REGION_ENTRY_TYPE_R3
)
568 return !!(pud_val(pud
) & _REGION3_ENTRY_LARGE
);
571 static inline int pud_bad(pud_t pud
)
574 * With dynamic page table levels the pud can be a region table
575 * entry or a segment table entry. Check for the bit that are
576 * invalid for either table entry.
579 ~_SEGMENT_ENTRY_ORIGIN
& ~_REGION_ENTRY_INVALID
&
580 ~_REGION_ENTRY_TYPE_MASK
& ~_REGION_ENTRY_LENGTH
;
581 return (pud_val(pud
) & mask
) != 0;
584 #endif /* CONFIG_64BIT */
586 static inline int pmd_present(pmd_t pmd
)
588 return pmd_val(pmd
) != _SEGMENT_ENTRY_INVALID
;
591 static inline int pmd_none(pmd_t pmd
)
593 return pmd_val(pmd
) == _SEGMENT_ENTRY_INVALID
;
596 static inline int pmd_large(pmd_t pmd
)
598 return (pmd_val(pmd
) & _SEGMENT_ENTRY_LARGE
) != 0;
601 static inline int pmd_pfn(pmd_t pmd
)
603 unsigned long origin_mask
;
605 origin_mask
= _SEGMENT_ENTRY_ORIGIN
;
607 origin_mask
= _SEGMENT_ENTRY_ORIGIN_LARGE
;
608 return (pmd_val(pmd
) & origin_mask
) >> PAGE_SHIFT
;
611 static inline int pmd_bad(pmd_t pmd
)
614 return (pmd_val(pmd
) & ~_SEGMENT_ENTRY_BITS_LARGE
) != 0;
615 return (pmd_val(pmd
) & ~_SEGMENT_ENTRY_BITS
) != 0;
618 #define __HAVE_ARCH_PMDP_SPLITTING_FLUSH
619 extern void pmdp_splitting_flush(struct vm_area_struct
*vma
,
620 unsigned long addr
, pmd_t
*pmdp
);
622 #define __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS
623 extern int pmdp_set_access_flags(struct vm_area_struct
*vma
,
624 unsigned long address
, pmd_t
*pmdp
,
625 pmd_t entry
, int dirty
);
627 #define __HAVE_ARCH_PMDP_CLEAR_YOUNG_FLUSH
628 extern int pmdp_clear_flush_young(struct vm_area_struct
*vma
,
629 unsigned long address
, pmd_t
*pmdp
);
631 #define __HAVE_ARCH_PMD_WRITE
632 static inline int pmd_write(pmd_t pmd
)
634 return (pmd_val(pmd
) & _SEGMENT_ENTRY_WRITE
) != 0;
637 static inline int pmd_dirty(pmd_t pmd
)
641 dirty
= (pmd_val(pmd
) & _SEGMENT_ENTRY_DIRTY
) != 0;
645 static inline int pmd_young(pmd_t pmd
)
649 young
= (pmd_val(pmd
) & _SEGMENT_ENTRY_YOUNG
) != 0;
653 static inline int pte_present(pte_t pte
)
655 /* Bit pattern: (pte & 0x001) == 0x001 */
656 return (pte_val(pte
) & _PAGE_PRESENT
) != 0;
659 static inline int pte_none(pte_t pte
)
661 /* Bit pattern: pte == 0x400 */
662 return pte_val(pte
) == _PAGE_INVALID
;
665 static inline int pte_swap(pte_t pte
)
667 /* Bit pattern: (pte & 0x603) == 0x402 */
668 return (pte_val(pte
) & (_PAGE_INVALID
| _PAGE_PROTECT
|
669 _PAGE_TYPE
| _PAGE_PRESENT
))
670 == (_PAGE_INVALID
| _PAGE_TYPE
);
673 static inline int pte_special(pte_t pte
)
675 return (pte_val(pte
) & _PAGE_SPECIAL
);
678 #define __HAVE_ARCH_PTE_SAME
679 static inline int pte_same(pte_t a
, pte_t b
)
681 return pte_val(a
) == pte_val(b
);
684 static inline pgste_t
pgste_get_lock(pte_t
*ptep
)
686 unsigned long new = 0;
694 " nihh %0,0xff7f\n" /* clear PCL bit in old */
695 " oihh %1,0x0080\n" /* set PCL bit in new */
698 : "=&d" (old
), "=&d" (new), "=Q" (ptep
[PTRS_PER_PTE
])
699 : "Q" (ptep
[PTRS_PER_PTE
]) : "cc", "memory");
704 static inline void pgste_set_unlock(pte_t
*ptep
, pgste_t pgste
)
708 " nihh %1,0xff7f\n" /* clear PCL bit */
710 : "=Q" (ptep
[PTRS_PER_PTE
])
711 : "d" (pgste_val(pgste
)), "Q" (ptep
[PTRS_PER_PTE
])
717 static inline pgste_t
pgste_get(pte_t
*ptep
)
719 unsigned long pgste
= 0;
721 pgste
= *(unsigned long *)(ptep
+ PTRS_PER_PTE
);
723 return __pgste(pgste
);
726 static inline void pgste_set(pte_t
*ptep
, pgste_t pgste
)
729 *(pgste_t
*)(ptep
+ PTRS_PER_PTE
) = pgste
;
733 static inline pgste_t
pgste_update_all(pte_t
*ptep
, pgste_t pgste
,
734 struct mm_struct
*mm
)
737 unsigned long address
, bits
, skey
;
739 if (!mm_use_skey(mm
) || pte_val(*ptep
) & _PAGE_INVALID
)
741 address
= pte_val(*ptep
) & PAGE_MASK
;
742 skey
= (unsigned long) page_get_storage_key(address
);
743 bits
= skey
& (_PAGE_CHANGED
| _PAGE_REFERENCED
);
744 /* Transfer page changed & referenced bit to guest bits in pgste */
745 pgste_val(pgste
) |= bits
<< 48; /* GR bit & GC bit */
746 /* Copy page access key and fetch protection bit to pgste */
747 pgste_val(pgste
) &= ~(PGSTE_ACC_BITS
| PGSTE_FP_BIT
);
748 pgste_val(pgste
) |= (skey
& (_PAGE_ACC_BITS
| _PAGE_FP_BIT
)) << 56;
754 static inline void pgste_set_key(pte_t
*ptep
, pgste_t pgste
, pte_t entry
,
755 struct mm_struct
*mm
)
758 unsigned long address
;
761 if (!mm_use_skey(mm
) || pte_val(entry
) & _PAGE_INVALID
)
763 VM_BUG_ON(!(pte_val(*ptep
) & _PAGE_INVALID
));
764 address
= pte_val(entry
) & PAGE_MASK
;
766 * Set page access key and fetch protection bit from pgste.
767 * The guest C/R information is still in the PGSTE, set real
770 nkey
= (pgste_val(pgste
) & (PGSTE_ACC_BITS
| PGSTE_FP_BIT
)) >> 56;
771 nkey
|= (pgste_val(pgste
) & (PGSTE_GR_BIT
| PGSTE_GC_BIT
)) >> 48;
772 page_set_storage_key(address
, nkey
, 0);
776 static inline pgste_t
pgste_set_pte(pte_t
*ptep
, pgste_t pgste
, pte_t entry
)
778 if ((pte_val(entry
) & _PAGE_PRESENT
) &&
779 (pte_val(entry
) & _PAGE_WRITE
) &&
780 !(pte_val(entry
) & _PAGE_INVALID
)) {
781 if (!MACHINE_HAS_ESOP
) {
783 * Without enhanced suppression-on-protection force
784 * the dirty bit on for all writable ptes.
786 pte_val(entry
) |= _PAGE_DIRTY
;
787 pte_val(entry
) &= ~_PAGE_PROTECT
;
789 if (!(pte_val(entry
) & _PAGE_PROTECT
))
790 /* This pte allows write access, set user-dirty */
791 pgste_val(pgste
) |= PGSTE_UC_BIT
;
798 * struct gmap_struct - guest address space
799 * @crst_list: list of all crst tables used in the guest address space
800 * @mm: pointer to the parent mm_struct
801 * @guest_to_host: radix tree with guest to host address translation
802 * @host_to_guest: radix tree with pointer to segment table entries
803 * @guest_table_lock: spinlock to protect all entries in the guest page table
804 * @table: pointer to the page directory
805 * @asce: address space control element for gmap page table
806 * @pfault_enabled: defines if pfaults are applicable for the guest
809 struct list_head list
;
810 struct list_head crst_list
;
811 struct mm_struct
*mm
;
812 struct radix_tree_root guest_to_host
;
813 struct radix_tree_root host_to_guest
;
814 spinlock_t guest_table_lock
;
815 unsigned long *table
;
817 unsigned long asce_end
;
823 * struct gmap_notifier - notify function block for page invalidation
824 * @notifier_call: address of callback function
826 struct gmap_notifier
{
827 struct list_head list
;
828 void (*notifier_call
)(struct gmap
*gmap
, unsigned long gaddr
);
831 struct gmap
*gmap_alloc(struct mm_struct
*mm
, unsigned long limit
);
832 void gmap_free(struct gmap
*gmap
);
833 void gmap_enable(struct gmap
*gmap
);
834 void gmap_disable(struct gmap
*gmap
);
835 int gmap_map_segment(struct gmap
*gmap
, unsigned long from
,
836 unsigned long to
, unsigned long len
);
837 int gmap_unmap_segment(struct gmap
*gmap
, unsigned long to
, unsigned long len
);
838 unsigned long __gmap_translate(struct gmap
*, unsigned long gaddr
);
839 unsigned long gmap_translate(struct gmap
*, unsigned long gaddr
);
840 int __gmap_link(struct gmap
*gmap
, unsigned long gaddr
, unsigned long vmaddr
);
841 int gmap_fault(struct gmap
*, unsigned long gaddr
, unsigned int fault_flags
);
842 void gmap_discard(struct gmap
*, unsigned long from
, unsigned long to
);
843 void __gmap_zap(struct gmap
*, unsigned long gaddr
);
844 bool gmap_test_and_clear_dirty(unsigned long address
, struct gmap
*);
847 void gmap_register_ipte_notifier(struct gmap_notifier
*);
848 void gmap_unregister_ipte_notifier(struct gmap_notifier
*);
849 int gmap_ipte_notify(struct gmap
*, unsigned long start
, unsigned long len
);
850 void gmap_do_ipte_notify(struct mm_struct
*, unsigned long addr
, pte_t
*);
852 static inline pgste_t
pgste_ipte_notify(struct mm_struct
*mm
,
854 pte_t
*ptep
, pgste_t pgste
)
857 if (pgste_val(pgste
) & PGSTE_IN_BIT
) {
858 pgste_val(pgste
) &= ~PGSTE_IN_BIT
;
859 gmap_do_ipte_notify(mm
, addr
, ptep
);
866 * Certain architectures need to do special things when PTEs
867 * within a page table are directly modified. Thus, the following
868 * hook is made available.
870 static inline void set_pte_at(struct mm_struct
*mm
, unsigned long addr
,
871 pte_t
*ptep
, pte_t entry
)
875 if (mm_has_pgste(mm
)) {
876 pgste
= pgste_get_lock(ptep
);
877 pgste_val(pgste
) &= ~_PGSTE_GPS_ZERO
;
878 pgste_set_key(ptep
, pgste
, entry
, mm
);
879 pgste
= pgste_set_pte(ptep
, pgste
, entry
);
880 pgste_set_unlock(ptep
, pgste
);
887 * query functions pte_write/pte_dirty/pte_young only work if
888 * pte_present() is true. Undefined behaviour if not..
890 static inline int pte_write(pte_t pte
)
892 return (pte_val(pte
) & _PAGE_WRITE
) != 0;
895 static inline int pte_dirty(pte_t pte
)
897 return (pte_val(pte
) & _PAGE_DIRTY
) != 0;
900 static inline int pte_young(pte_t pte
)
902 return (pte_val(pte
) & _PAGE_YOUNG
) != 0;
905 #define __HAVE_ARCH_PTE_UNUSED
906 static inline int pte_unused(pte_t pte
)
908 return pte_val(pte
) & _PAGE_UNUSED
;
912 * pgd/pmd/pte modification functions
915 static inline void pgd_clear(pgd_t
*pgd
)
918 if ((pgd_val(*pgd
) & _REGION_ENTRY_TYPE_MASK
) == _REGION_ENTRY_TYPE_R2
)
919 pgd_val(*pgd
) = _REGION2_ENTRY_EMPTY
;
923 static inline void pud_clear(pud_t
*pud
)
926 if ((pud_val(*pud
) & _REGION_ENTRY_TYPE_MASK
) == _REGION_ENTRY_TYPE_R3
)
927 pud_val(*pud
) = _REGION3_ENTRY_EMPTY
;
931 static inline void pmd_clear(pmd_t
*pmdp
)
933 pmd_val(*pmdp
) = _SEGMENT_ENTRY_INVALID
;
936 static inline void pte_clear(struct mm_struct
*mm
, unsigned long addr
, pte_t
*ptep
)
938 pte_val(*ptep
) = _PAGE_INVALID
;
942 * The following pte modification functions only work if
943 * pte_present() is true. Undefined behaviour if not..
945 static inline pte_t
pte_modify(pte_t pte
, pgprot_t newprot
)
947 pte_val(pte
) &= _PAGE_CHG_MASK
;
948 pte_val(pte
) |= pgprot_val(newprot
);
950 * newprot for PAGE_NONE, PAGE_READ and PAGE_WRITE has the
951 * invalid bit set, clear it again for readable, young pages
953 if ((pte_val(pte
) & _PAGE_YOUNG
) && (pte_val(pte
) & _PAGE_READ
))
954 pte_val(pte
) &= ~_PAGE_INVALID
;
956 * newprot for PAGE_READ and PAGE_WRITE has the page protection
957 * bit set, clear it again for writable, dirty pages
959 if ((pte_val(pte
) & _PAGE_DIRTY
) && (pte_val(pte
) & _PAGE_WRITE
))
960 pte_val(pte
) &= ~_PAGE_PROTECT
;
964 static inline pte_t
pte_wrprotect(pte_t pte
)
966 pte_val(pte
) &= ~_PAGE_WRITE
;
967 pte_val(pte
) |= _PAGE_PROTECT
;
971 static inline pte_t
pte_mkwrite(pte_t pte
)
973 pte_val(pte
) |= _PAGE_WRITE
;
974 if (pte_val(pte
) & _PAGE_DIRTY
)
975 pte_val(pte
) &= ~_PAGE_PROTECT
;
979 static inline pte_t
pte_mkclean(pte_t pte
)
981 pte_val(pte
) &= ~_PAGE_DIRTY
;
982 pte_val(pte
) |= _PAGE_PROTECT
;
986 static inline pte_t
pte_mkdirty(pte_t pte
)
988 pte_val(pte
) |= _PAGE_DIRTY
;
989 if (pte_val(pte
) & _PAGE_WRITE
)
990 pte_val(pte
) &= ~_PAGE_PROTECT
;
994 static inline pte_t
pte_mkold(pte_t pte
)
996 pte_val(pte
) &= ~_PAGE_YOUNG
;
997 pte_val(pte
) |= _PAGE_INVALID
;
1001 static inline pte_t
pte_mkyoung(pte_t pte
)
1003 pte_val(pte
) |= _PAGE_YOUNG
;
1004 if (pte_val(pte
) & _PAGE_READ
)
1005 pte_val(pte
) &= ~_PAGE_INVALID
;
1009 static inline pte_t
pte_mkspecial(pte_t pte
)
1011 pte_val(pte
) |= _PAGE_SPECIAL
;
1015 #ifdef CONFIG_HUGETLB_PAGE
1016 static inline pte_t
pte_mkhuge(pte_t pte
)
1018 pte_val(pte
) |= _PAGE_LARGE
;
1023 static inline void __ptep_ipte(unsigned long address
, pte_t
*ptep
)
1025 unsigned long pto
= (unsigned long) ptep
;
1027 #ifndef CONFIG_64BIT
1028 /* pto in ESA mode must point to the start of the segment table */
1031 /* Invalidation + global TLB flush for the pte */
1034 : "=m" (*ptep
) : "m" (*ptep
), "a" (pto
), "a" (address
));
1037 static inline void __ptep_ipte_local(unsigned long address
, pte_t
*ptep
)
1039 unsigned long pto
= (unsigned long) ptep
;
1041 #ifndef CONFIG_64BIT
1042 /* pto in ESA mode must point to the start of the segment table */
1045 /* Invalidation + local TLB flush for the pte */
1047 " .insn rrf,0xb2210000,%2,%3,0,1"
1048 : "=m" (*ptep
) : "m" (*ptep
), "a" (pto
), "a" (address
));
1051 static inline void __ptep_ipte_range(unsigned long address
, int nr
, pte_t
*ptep
)
1053 unsigned long pto
= (unsigned long) ptep
;
1055 #ifndef CONFIG_64BIT
1056 /* pto in ESA mode must point to the start of the segment table */
1059 /* Invalidate a range of ptes + global TLB flush of the ptes */
1062 " .insn rrf,0xb2210000,%2,%0,%1,0"
1063 : "+a" (address
), "+a" (nr
) : "a" (pto
) : "memory");
1064 } while (nr
!= 255);
1067 static inline void ptep_flush_direct(struct mm_struct
*mm
,
1068 unsigned long address
, pte_t
*ptep
)
1072 if (pte_val(*ptep
) & _PAGE_INVALID
)
1074 active
= (mm
== current
->active_mm
) ? 1 : 0;
1075 count
= atomic_add_return(0x10000, &mm
->context
.attach_count
);
1076 if (MACHINE_HAS_TLB_LC
&& (count
& 0xffff) <= active
&&
1077 cpumask_equal(mm_cpumask(mm
), cpumask_of(smp_processor_id())))
1078 __ptep_ipte_local(address
, ptep
);
1080 __ptep_ipte(address
, ptep
);
1081 atomic_sub(0x10000, &mm
->context
.attach_count
);
1084 static inline void ptep_flush_lazy(struct mm_struct
*mm
,
1085 unsigned long address
, pte_t
*ptep
)
1089 if (pte_val(*ptep
) & _PAGE_INVALID
)
1091 active
= (mm
== current
->active_mm
) ? 1 : 0;
1092 count
= atomic_add_return(0x10000, &mm
->context
.attach_count
);
1093 if ((count
& 0xffff) <= active
) {
1094 pte_val(*ptep
) |= _PAGE_INVALID
;
1095 mm
->context
.flush_mm
= 1;
1097 __ptep_ipte(address
, ptep
);
1098 atomic_sub(0x10000, &mm
->context
.attach_count
);
1102 * Get (and clear) the user dirty bit for a pte.
1104 static inline int ptep_test_and_clear_user_dirty(struct mm_struct
*mm
,
1112 if (!mm_has_pgste(mm
))
1114 pgste
= pgste_get_lock(ptep
);
1115 dirty
= !!(pgste_val(pgste
) & PGSTE_UC_BIT
);
1116 pgste_val(pgste
) &= ~PGSTE_UC_BIT
;
1118 if (dirty
&& (pte_val(pte
) & _PAGE_PRESENT
)) {
1119 pgste
= pgste_ipte_notify(mm
, addr
, ptep
, pgste
);
1120 __ptep_ipte(addr
, ptep
);
1121 if (MACHINE_HAS_ESOP
|| !(pte_val(pte
) & _PAGE_WRITE
))
1122 pte_val(pte
) |= _PAGE_PROTECT
;
1124 pte_val(pte
) |= _PAGE_INVALID
;
1127 pgste_set_unlock(ptep
, pgste
);
1131 #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
1132 static inline int ptep_test_and_clear_young(struct vm_area_struct
*vma
,
1133 unsigned long addr
, pte_t
*ptep
)
1139 if (mm_has_pgste(vma
->vm_mm
)) {
1140 pgste
= pgste_get_lock(ptep
);
1141 pgste
= pgste_ipte_notify(vma
->vm_mm
, addr
, ptep
, pgste
);
1144 oldpte
= pte
= *ptep
;
1145 ptep_flush_direct(vma
->vm_mm
, addr
, ptep
);
1146 young
= pte_young(pte
);
1147 pte
= pte_mkold(pte
);
1149 if (mm_has_pgste(vma
->vm_mm
)) {
1150 pgste
= pgste_update_all(&oldpte
, pgste
, vma
->vm_mm
);
1151 pgste
= pgste_set_pte(ptep
, pgste
, pte
);
1152 pgste_set_unlock(ptep
, pgste
);
1159 #define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
1160 static inline int ptep_clear_flush_young(struct vm_area_struct
*vma
,
1161 unsigned long address
, pte_t
*ptep
)
1163 return ptep_test_and_clear_young(vma
, address
, ptep
);
1167 * This is hard to understand. ptep_get_and_clear and ptep_clear_flush
1168 * both clear the TLB for the unmapped pte. The reason is that
1169 * ptep_get_and_clear is used in common code (e.g. change_pte_range)
1170 * to modify an active pte. The sequence is
1171 * 1) ptep_get_and_clear
1173 * 3) flush_tlb_range
1174 * On s390 the tlb needs to get flushed with the modification of the pte
1175 * if the pte is active. The only way how this can be implemented is to
1176 * have ptep_get_and_clear do the tlb flush. In exchange flush_tlb_range
1179 #define __HAVE_ARCH_PTEP_GET_AND_CLEAR
1180 static inline pte_t
ptep_get_and_clear(struct mm_struct
*mm
,
1181 unsigned long address
, pte_t
*ptep
)
1186 if (mm_has_pgste(mm
)) {
1187 pgste
= pgste_get_lock(ptep
);
1188 pgste
= pgste_ipte_notify(mm
, address
, ptep
, pgste
);
1192 ptep_flush_lazy(mm
, address
, ptep
);
1193 pte_val(*ptep
) = _PAGE_INVALID
;
1195 if (mm_has_pgste(mm
)) {
1196 pgste
= pgste_update_all(&pte
, pgste
, mm
);
1197 pgste_set_unlock(ptep
, pgste
);
1202 #define __HAVE_ARCH_PTEP_MODIFY_PROT_TRANSACTION
1203 static inline pte_t
ptep_modify_prot_start(struct mm_struct
*mm
,
1204 unsigned long address
,
1210 if (mm_has_pgste(mm
)) {
1211 pgste
= pgste_get_lock(ptep
);
1212 pgste_ipte_notify(mm
, address
, ptep
, pgste
);
1216 ptep_flush_lazy(mm
, address
, ptep
);
1218 if (mm_has_pgste(mm
)) {
1219 pgste
= pgste_update_all(&pte
, pgste
, mm
);
1220 pgste_set(ptep
, pgste
);
1225 static inline void ptep_modify_prot_commit(struct mm_struct
*mm
,
1226 unsigned long address
,
1227 pte_t
*ptep
, pte_t pte
)
1231 if (mm_has_pgste(mm
)) {
1232 pgste
= pgste_get(ptep
);
1233 pgste_set_key(ptep
, pgste
, pte
, mm
);
1234 pgste
= pgste_set_pte(ptep
, pgste
, pte
);
1235 pgste_set_unlock(ptep
, pgste
);
1240 #define __HAVE_ARCH_PTEP_CLEAR_FLUSH
1241 static inline pte_t
ptep_clear_flush(struct vm_area_struct
*vma
,
1242 unsigned long address
, pte_t
*ptep
)
1247 if (mm_has_pgste(vma
->vm_mm
)) {
1248 pgste
= pgste_get_lock(ptep
);
1249 pgste
= pgste_ipte_notify(vma
->vm_mm
, address
, ptep
, pgste
);
1253 ptep_flush_direct(vma
->vm_mm
, address
, ptep
);
1254 pte_val(*ptep
) = _PAGE_INVALID
;
1256 if (mm_has_pgste(vma
->vm_mm
)) {
1257 if ((pgste_val(pgste
) & _PGSTE_GPS_USAGE_MASK
) ==
1258 _PGSTE_GPS_USAGE_UNUSED
)
1259 pte_val(pte
) |= _PAGE_UNUSED
;
1260 pgste
= pgste_update_all(&pte
, pgste
, vma
->vm_mm
);
1261 pgste_set_unlock(ptep
, pgste
);
1267 * The batched pte unmap code uses ptep_get_and_clear_full to clear the
1268 * ptes. Here an optimization is possible. tlb_gather_mmu flushes all
1269 * tlbs of an mm if it can guarantee that the ptes of the mm_struct
1270 * cannot be accessed while the batched unmap is running. In this case
1271 * full==1 and a simple pte_clear is enough. See tlb.h.
1273 #define __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL
1274 static inline pte_t
ptep_get_and_clear_full(struct mm_struct
*mm
,
1275 unsigned long address
,
1276 pte_t
*ptep
, int full
)
1281 if (!full
&& mm_has_pgste(mm
)) {
1282 pgste
= pgste_get_lock(ptep
);
1283 pgste
= pgste_ipte_notify(mm
, address
, ptep
, pgste
);
1288 ptep_flush_lazy(mm
, address
, ptep
);
1289 pte_val(*ptep
) = _PAGE_INVALID
;
1291 if (!full
&& mm_has_pgste(mm
)) {
1292 pgste
= pgste_update_all(&pte
, pgste
, mm
);
1293 pgste_set_unlock(ptep
, pgste
);
1298 #define __HAVE_ARCH_PTEP_SET_WRPROTECT
1299 static inline pte_t
ptep_set_wrprotect(struct mm_struct
*mm
,
1300 unsigned long address
, pte_t
*ptep
)
1305 if (pte_write(pte
)) {
1306 if (mm_has_pgste(mm
)) {
1307 pgste
= pgste_get_lock(ptep
);
1308 pgste
= pgste_ipte_notify(mm
, address
, ptep
, pgste
);
1311 ptep_flush_lazy(mm
, address
, ptep
);
1312 pte
= pte_wrprotect(pte
);
1314 if (mm_has_pgste(mm
)) {
1315 pgste
= pgste_set_pte(ptep
, pgste
, pte
);
1316 pgste_set_unlock(ptep
, pgste
);
1323 #define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
1324 static inline int ptep_set_access_flags(struct vm_area_struct
*vma
,
1325 unsigned long address
, pte_t
*ptep
,
1326 pte_t entry
, int dirty
)
1330 if (pte_same(*ptep
, entry
))
1332 if (mm_has_pgste(vma
->vm_mm
)) {
1333 pgste
= pgste_get_lock(ptep
);
1334 pgste
= pgste_ipte_notify(vma
->vm_mm
, address
, ptep
, pgste
);
1337 ptep_flush_direct(vma
->vm_mm
, address
, ptep
);
1339 if (mm_has_pgste(vma
->vm_mm
)) {
1340 pgste_set_key(ptep
, pgste
, entry
, vma
->vm_mm
);
1341 pgste
= pgste_set_pte(ptep
, pgste
, entry
);
1342 pgste_set_unlock(ptep
, pgste
);
1349 * Conversion functions: convert a page and protection to a page entry,
1350 * and a page entry and page directory to the page they refer to.
1352 static inline pte_t
mk_pte_phys(unsigned long physpage
, pgprot_t pgprot
)
1355 pte_val(__pte
) = physpage
+ pgprot_val(pgprot
);
1356 return pte_mkyoung(__pte
);
1359 static inline pte_t
mk_pte(struct page
*page
, pgprot_t pgprot
)
1361 unsigned long physpage
= page_to_phys(page
);
1362 pte_t __pte
= mk_pte_phys(physpage
, pgprot
);
1364 if (pte_write(__pte
) && PageDirty(page
))
1365 __pte
= pte_mkdirty(__pte
);
1369 #define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1))
1370 #define pud_index(address) (((address) >> PUD_SHIFT) & (PTRS_PER_PUD-1))
1371 #define pmd_index(address) (((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1))
1372 #define pte_index(address) (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE-1))
1374 #define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
1375 #define pgd_offset_k(address) pgd_offset(&init_mm, address)
1377 #ifndef CONFIG_64BIT
1379 #define pmd_deref(pmd) (pmd_val(pmd) & _SEGMENT_ENTRY_ORIGIN)
1380 #define pud_deref(pmd) ({ BUG(); 0UL; })
1381 #define pgd_deref(pmd) ({ BUG(); 0UL; })
1383 #define pud_offset(pgd, address) ((pud_t *) pgd)
1384 #define pmd_offset(pud, address) ((pmd_t *) pud + pmd_index(address))
1386 #else /* CONFIG_64BIT */
1388 #define pmd_deref(pmd) (pmd_val(pmd) & _SEGMENT_ENTRY_ORIGIN)
1389 #define pud_deref(pud) (pud_val(pud) & _REGION_ENTRY_ORIGIN)
1390 #define pgd_deref(pgd) (pgd_val(pgd) & _REGION_ENTRY_ORIGIN)
1392 static inline pud_t
*pud_offset(pgd_t
*pgd
, unsigned long address
)
1394 pud_t
*pud
= (pud_t
*) pgd
;
1395 if ((pgd_val(*pgd
) & _REGION_ENTRY_TYPE_MASK
) == _REGION_ENTRY_TYPE_R2
)
1396 pud
= (pud_t
*) pgd_deref(*pgd
);
1397 return pud
+ pud_index(address
);
1400 static inline pmd_t
*pmd_offset(pud_t
*pud
, unsigned long address
)
1402 pmd_t
*pmd
= (pmd_t
*) pud
;
1403 if ((pud_val(*pud
) & _REGION_ENTRY_TYPE_MASK
) == _REGION_ENTRY_TYPE_R3
)
1404 pmd
= (pmd_t
*) pud_deref(*pud
);
1405 return pmd
+ pmd_index(address
);
1408 #endif /* CONFIG_64BIT */
1410 #define pfn_pte(pfn,pgprot) mk_pte_phys(__pa((pfn) << PAGE_SHIFT),(pgprot))
1411 #define pte_pfn(x) (pte_val(x) >> PAGE_SHIFT)
1412 #define pte_page(x) pfn_to_page(pte_pfn(x))
1414 #define pmd_page(pmd) pfn_to_page(pmd_pfn(pmd))
1416 /* Find an entry in the lowest level page table.. */
1417 #define pte_offset(pmd, addr) ((pte_t *) pmd_deref(*(pmd)) + pte_index(addr))
1418 #define pte_offset_kernel(pmd, address) pte_offset(pmd,address)
1419 #define pte_offset_map(pmd, address) pte_offset_kernel(pmd, address)
1420 #define pte_unmap(pte) do { } while (0)
1422 #if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_HUGETLB_PAGE)
1423 static inline unsigned long massage_pgprot_pmd(pgprot_t pgprot
)
1426 * pgprot is PAGE_NONE, PAGE_READ, or PAGE_WRITE (see __Pxxx / __Sxxx)
1427 * Convert to segment table entry format.
1429 if (pgprot_val(pgprot
) == pgprot_val(PAGE_NONE
))
1430 return pgprot_val(SEGMENT_NONE
);
1431 if (pgprot_val(pgprot
) == pgprot_val(PAGE_READ
))
1432 return pgprot_val(SEGMENT_READ
);
1433 return pgprot_val(SEGMENT_WRITE
);
1436 static inline pmd_t
pmd_wrprotect(pmd_t pmd
)
1438 pmd_val(pmd
) &= ~_SEGMENT_ENTRY_WRITE
;
1439 pmd_val(pmd
) |= _SEGMENT_ENTRY_PROTECT
;
1443 static inline pmd_t
pmd_mkwrite(pmd_t pmd
)
1445 pmd_val(pmd
) |= _SEGMENT_ENTRY_WRITE
;
1446 if (pmd_large(pmd
) && !(pmd_val(pmd
) & _SEGMENT_ENTRY_DIRTY
))
1448 pmd_val(pmd
) &= ~_SEGMENT_ENTRY_PROTECT
;
1452 static inline pmd_t
pmd_mkclean(pmd_t pmd
)
1454 if (pmd_large(pmd
)) {
1455 pmd_val(pmd
) &= ~_SEGMENT_ENTRY_DIRTY
;
1456 pmd_val(pmd
) |= _SEGMENT_ENTRY_PROTECT
;
1461 static inline pmd_t
pmd_mkdirty(pmd_t pmd
)
1463 if (pmd_large(pmd
)) {
1464 pmd_val(pmd
) |= _SEGMENT_ENTRY_DIRTY
;
1465 if (pmd_val(pmd
) & _SEGMENT_ENTRY_WRITE
)
1466 pmd_val(pmd
) &= ~_SEGMENT_ENTRY_PROTECT
;
1471 static inline pmd_t
pmd_mkyoung(pmd_t pmd
)
1473 if (pmd_large(pmd
)) {
1474 pmd_val(pmd
) |= _SEGMENT_ENTRY_YOUNG
;
1475 if (pmd_val(pmd
) & _SEGMENT_ENTRY_READ
)
1476 pmd_val(pmd
) &= ~_SEGMENT_ENTRY_INVALID
;
1481 static inline pmd_t
pmd_mkold(pmd_t pmd
)
1483 if (pmd_large(pmd
)) {
1484 pmd_val(pmd
) &= ~_SEGMENT_ENTRY_YOUNG
;
1485 pmd_val(pmd
) |= _SEGMENT_ENTRY_INVALID
;
1490 static inline pmd_t
pmd_modify(pmd_t pmd
, pgprot_t newprot
)
1492 if (pmd_large(pmd
)) {
1493 pmd_val(pmd
) &= _SEGMENT_ENTRY_ORIGIN_LARGE
|
1494 _SEGMENT_ENTRY_DIRTY
| _SEGMENT_ENTRY_YOUNG
|
1495 _SEGMENT_ENTRY_LARGE
| _SEGMENT_ENTRY_SPLIT
;
1496 pmd_val(pmd
) |= massage_pgprot_pmd(newprot
);
1497 if (!(pmd_val(pmd
) & _SEGMENT_ENTRY_DIRTY
))
1498 pmd_val(pmd
) |= _SEGMENT_ENTRY_PROTECT
;
1499 if (!(pmd_val(pmd
) & _SEGMENT_ENTRY_YOUNG
))
1500 pmd_val(pmd
) |= _SEGMENT_ENTRY_INVALID
;
1503 pmd_val(pmd
) &= _SEGMENT_ENTRY_ORIGIN
;
1504 pmd_val(pmd
) |= massage_pgprot_pmd(newprot
);
1508 static inline pmd_t
mk_pmd_phys(unsigned long physpage
, pgprot_t pgprot
)
1511 pmd_val(__pmd
) = physpage
+ massage_pgprot_pmd(pgprot
);
1515 #endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLB_PAGE */
1517 static inline void __pmdp_csp(pmd_t
*pmdp
)
1519 register unsigned long reg2
asm("2") = pmd_val(*pmdp
);
1520 register unsigned long reg3
asm("3") = pmd_val(*pmdp
) |
1521 _SEGMENT_ENTRY_INVALID
;
1522 register unsigned long reg4
asm("4") = ((unsigned long) pmdp
) + 5;
1527 : "d" (reg2
), "d" (reg3
), "d" (reg4
), "m" (*pmdp
) : "cc");
1530 static inline void __pmdp_idte(unsigned long address
, pmd_t
*pmdp
)
1534 sto
= (unsigned long) pmdp
- pmd_index(address
) * sizeof(pmd_t
);
1536 " .insn rrf,0xb98e0000,%2,%3,0,0"
1538 : "m" (*pmdp
), "a" (sto
), "a" ((address
& HPAGE_MASK
))
1542 static inline void __pmdp_idte_local(unsigned long address
, pmd_t
*pmdp
)
1546 sto
= (unsigned long) pmdp
- pmd_index(address
) * sizeof(pmd_t
);
1548 " .insn rrf,0xb98e0000,%2,%3,0,1"
1550 : "m" (*pmdp
), "a" (sto
), "a" ((address
& HPAGE_MASK
))
1554 static inline void pmdp_flush_direct(struct mm_struct
*mm
,
1555 unsigned long address
, pmd_t
*pmdp
)
1559 if (pmd_val(*pmdp
) & _SEGMENT_ENTRY_INVALID
)
1561 if (!MACHINE_HAS_IDTE
) {
1565 active
= (mm
== current
->active_mm
) ? 1 : 0;
1566 count
= atomic_add_return(0x10000, &mm
->context
.attach_count
);
1567 if (MACHINE_HAS_TLB_LC
&& (count
& 0xffff) <= active
&&
1568 cpumask_equal(mm_cpumask(mm
), cpumask_of(smp_processor_id())))
1569 __pmdp_idte_local(address
, pmdp
);
1571 __pmdp_idte(address
, pmdp
);
1572 atomic_sub(0x10000, &mm
->context
.attach_count
);
1575 static inline void pmdp_flush_lazy(struct mm_struct
*mm
,
1576 unsigned long address
, pmd_t
*pmdp
)
1580 if (pmd_val(*pmdp
) & _SEGMENT_ENTRY_INVALID
)
1582 active
= (mm
== current
->active_mm
) ? 1 : 0;
1583 count
= atomic_add_return(0x10000, &mm
->context
.attach_count
);
1584 if ((count
& 0xffff) <= active
) {
1585 pmd_val(*pmdp
) |= _SEGMENT_ENTRY_INVALID
;
1586 mm
->context
.flush_mm
= 1;
1587 } else if (MACHINE_HAS_IDTE
)
1588 __pmdp_idte(address
, pmdp
);
1591 atomic_sub(0x10000, &mm
->context
.attach_count
);
1594 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
1596 #define __HAVE_ARCH_PGTABLE_DEPOSIT
1597 extern void pgtable_trans_huge_deposit(struct mm_struct
*mm
, pmd_t
*pmdp
,
1600 #define __HAVE_ARCH_PGTABLE_WITHDRAW
1601 extern pgtable_t
pgtable_trans_huge_withdraw(struct mm_struct
*mm
, pmd_t
*pmdp
);
1603 static inline int pmd_trans_splitting(pmd_t pmd
)
1605 return (pmd_val(pmd
) & _SEGMENT_ENTRY_LARGE
) &&
1606 (pmd_val(pmd
) & _SEGMENT_ENTRY_SPLIT
);
1609 static inline void set_pmd_at(struct mm_struct
*mm
, unsigned long addr
,
1610 pmd_t
*pmdp
, pmd_t entry
)
1615 static inline pmd_t
pmd_mkhuge(pmd_t pmd
)
1617 pmd_val(pmd
) |= _SEGMENT_ENTRY_LARGE
;
1618 pmd_val(pmd
) |= _SEGMENT_ENTRY_YOUNG
;
1619 pmd_val(pmd
) |= _SEGMENT_ENTRY_PROTECT
;
1623 #define __HAVE_ARCH_PMDP_TEST_AND_CLEAR_YOUNG
1624 static inline int pmdp_test_and_clear_young(struct vm_area_struct
*vma
,
1625 unsigned long address
, pmd_t
*pmdp
)
1630 pmdp_flush_direct(vma
->vm_mm
, address
, pmdp
);
1631 *pmdp
= pmd_mkold(pmd
);
1632 return pmd_young(pmd
);
1635 #define __HAVE_ARCH_PMDP_GET_AND_CLEAR
1636 static inline pmd_t
pmdp_get_and_clear(struct mm_struct
*mm
,
1637 unsigned long address
, pmd_t
*pmdp
)
1641 pmdp_flush_direct(mm
, address
, pmdp
);
1646 #define __HAVE_ARCH_PMDP_GET_AND_CLEAR_FULL
1647 static inline pmd_t
pmdp_get_and_clear_full(struct mm_struct
*mm
,
1648 unsigned long address
,
1649 pmd_t
*pmdp
, int full
)
1654 pmdp_flush_lazy(mm
, address
, pmdp
);
1659 #define __HAVE_ARCH_PMDP_CLEAR_FLUSH
1660 static inline pmd_t
pmdp_clear_flush(struct vm_area_struct
*vma
,
1661 unsigned long address
, pmd_t
*pmdp
)
1663 return pmdp_get_and_clear(vma
->vm_mm
, address
, pmdp
);
1666 #define __HAVE_ARCH_PMDP_INVALIDATE
1667 static inline void pmdp_invalidate(struct vm_area_struct
*vma
,
1668 unsigned long address
, pmd_t
*pmdp
)
1670 pmdp_flush_direct(vma
->vm_mm
, address
, pmdp
);
1673 #define __HAVE_ARCH_PMDP_SET_WRPROTECT
1674 static inline void pmdp_set_wrprotect(struct mm_struct
*mm
,
1675 unsigned long address
, pmd_t
*pmdp
)
1679 if (pmd_write(pmd
)) {
1680 pmdp_flush_direct(mm
, address
, pmdp
);
1681 set_pmd_at(mm
, address
, pmdp
, pmd_wrprotect(pmd
));
1685 #define pfn_pmd(pfn, pgprot) mk_pmd_phys(__pa((pfn) << PAGE_SHIFT), (pgprot))
1686 #define mk_pmd(page, pgprot) pfn_pmd(page_to_pfn(page), (pgprot))
1688 static inline int pmd_trans_huge(pmd_t pmd
)
1690 return pmd_val(pmd
) & _SEGMENT_ENTRY_LARGE
;
1693 static inline int has_transparent_hugepage(void)
1695 return MACHINE_HAS_HPAGE
? 1 : 0;
1697 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
1700 * 31 bit swap entry format:
1701 * A page-table entry has some bits we have to treat in a special way.
1702 * Bits 0, 20 and bit 23 have to be zero, otherwise an specification
1703 * exception will occur instead of a page translation exception. The
1704 * specifiation exception has the bad habit not to store necessary
1705 * information in the lowcore.
1706 * Bits 21, 22, 30 and 31 are used to indicate the page type.
1707 * A swap pte is indicated by bit pattern (pte & 0x603) == 0x402
1708 * This leaves the bits 1-19 and bits 24-29 to store type and offset.
1709 * We use the 5 bits from 25-29 for the type and the 20 bits from 1-19
1710 * plus 24 for the offset.
1711 * 0| offset |0110|o|type |00|
1712 * 0 0000000001111111111 2222 2 22222 33
1713 * 0 1234567890123456789 0123 4 56789 01
1715 * 64 bit swap entry format:
1716 * A page-table entry has some bits we have to treat in a special way.
1717 * Bits 52 and bit 55 have to be zero, otherwise an specification
1718 * exception will occur instead of a page translation exception. The
1719 * specifiation exception has the bad habit not to store necessary
1720 * information in the lowcore.
1721 * Bits 53, 54, 62 and 63 are used to indicate the page type.
1722 * A swap pte is indicated by bit pattern (pte & 0x603) == 0x402
1723 * This leaves the bits 0-51 and bits 56-61 to store type and offset.
1724 * We use the 5 bits from 57-61 for the type and the 53 bits from 0-51
1725 * plus 56 for the offset.
1726 * | offset |0110|o|type |00|
1727 * 0000000000111111111122222222223333333333444444444455 5555 5 55566 66
1728 * 0123456789012345678901234567890123456789012345678901 2345 6 78901 23
1730 #ifndef CONFIG_64BIT
1731 #define __SWP_OFFSET_MASK (~0UL >> 12)
1733 #define __SWP_OFFSET_MASK (~0UL >> 11)
1735 static inline pte_t
mk_swap_pte(unsigned long type
, unsigned long offset
)
1738 offset
&= __SWP_OFFSET_MASK
;
1739 pte_val(pte
) = _PAGE_INVALID
| _PAGE_TYPE
| ((type
& 0x1f) << 2) |
1740 ((offset
& 1UL) << 7) | ((offset
& ~1UL) << 11);
1744 #define __swp_type(entry) (((entry).val >> 2) & 0x1f)
1745 #define __swp_offset(entry) (((entry).val >> 11) | (((entry).val >> 7) & 1))
1746 #define __swp_entry(type,offset) ((swp_entry_t) { pte_val(mk_swap_pte((type),(offset))) })
1748 #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
1749 #define __swp_entry_to_pte(x) ((pte_t) { (x).val })
1751 #endif /* !__ASSEMBLY__ */
1753 #define kern_addr_valid(addr) (1)
1755 extern int vmem_add_mapping(unsigned long start
, unsigned long size
);
1756 extern int vmem_remove_mapping(unsigned long start
, unsigned long size
);
1757 extern int s390_enable_sie(void);
1758 extern int s390_enable_skey(void);
1759 extern void s390_reset_cmma(struct mm_struct
*mm
);
1761 /* s390 has a private copy of get unmapped area to deal with cache synonyms */
1762 #define HAVE_ARCH_UNMAPPED_AREA
1763 #define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
1766 * No page table caches to initialise
1768 static inline void pgtable_cache_init(void) { }
1769 static inline void check_pgt_cache(void) { }
1771 #include <asm-generic/pgtable.h>
1773 #endif /* _S390_PAGE_H */