]>
git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blob - arch/s390/include/asm/pgtable.h
3 * Copyright IBM Corp. 1999, 2000
4 * Author(s): Hartmut Penner (hp@de.ibm.com)
5 * Ulrich Weigand (weigand@de.ibm.com)
6 * Martin Schwidefsky (schwidefsky@de.ibm.com)
8 * Derived from "include/asm-i386/pgtable.h"
11 #ifndef _ASM_S390_PGTABLE_H
12 #define _ASM_S390_PGTABLE_H
15 * The Linux memory management assumes a three-level page table setup. For
16 * s390 31 bit we "fold" the mid level into the top-level page table, so
17 * that we physically have the same two-level page table as the s390 mmu
18 * expects in 31 bit mode. For s390 64 bit we use three of the five levels
19 * the hardware provides (region first and region second tables are not
22 * The "pgd_xxx()" functions are trivial for a folded two-level
23 * setup: the pgd is never bad, and a pmd always exists (as it's folded
26 * This file contains the functions and defines necessary to modify and use
27 * the S390 page table tree.
30 #include <linux/sched.h>
31 #include <linux/mm_types.h>
32 #include <linux/page-flags.h>
36 extern pgd_t swapper_pg_dir
[] __attribute__ ((aligned (4096)));
37 extern void paging_init(void);
38 extern void vmem_map_init(void);
41 * The S390 doesn't have any external MMU info: the kernel page
42 * tables contain all the necessary information.
44 #define update_mmu_cache(vma, address, ptep) do { } while (0)
45 #define update_mmu_cache_pmd(vma, address, ptep) do { } while (0)
48 * ZERO_PAGE is a global shared page that is always zero; used
49 * for zero-mapped memory areas etc..
52 extern unsigned long empty_zero_page
;
53 extern unsigned long zero_page_mask
;
55 #define ZERO_PAGE(vaddr) \
56 (virt_to_page((void *)(empty_zero_page + \
57 (((unsigned long)(vaddr)) &zero_page_mask))))
58 #define __HAVE_COLOR_ZERO_PAGE
60 /* TODO: s390 cannot support io_remap_pfn_range... */
61 #define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \
62 remap_pfn_range(vma, vaddr, pfn, size, prot)
64 #endif /* !__ASSEMBLY__ */
67 * PMD_SHIFT determines the size of the area a second-level page
69 * PGDIR_SHIFT determines what a third-level page table entry can map
74 # define PGDIR_SHIFT 20
75 #else /* CONFIG_64BIT */
78 # define PGDIR_SHIFT 42
79 #endif /* CONFIG_64BIT */
81 #define PMD_SIZE (1UL << PMD_SHIFT)
82 #define PMD_MASK (~(PMD_SIZE-1))
83 #define PUD_SIZE (1UL << PUD_SHIFT)
84 #define PUD_MASK (~(PUD_SIZE-1))
85 #define PGDIR_SIZE (1UL << PGDIR_SHIFT)
86 #define PGDIR_MASK (~(PGDIR_SIZE-1))
89 * entries per page directory level: the S390 is two-level, so
90 * we don't really have any PMD directory physically.
91 * for S390 segment-table entries are combined to one PGD
92 * that leads to 1024 pte per pgd
94 #define PTRS_PER_PTE 256
96 #define PTRS_PER_PMD 1
97 #define PTRS_PER_PUD 1
98 #else /* CONFIG_64BIT */
99 #define PTRS_PER_PMD 2048
100 #define PTRS_PER_PUD 2048
101 #endif /* CONFIG_64BIT */
102 #define PTRS_PER_PGD 2048
104 #define FIRST_USER_ADDRESS 0
106 #define pte_ERROR(e) \
107 printk("%s:%d: bad pte %p.\n", __FILE__, __LINE__, (void *) pte_val(e))
108 #define pmd_ERROR(e) \
109 printk("%s:%d: bad pmd %p.\n", __FILE__, __LINE__, (void *) pmd_val(e))
110 #define pud_ERROR(e) \
111 printk("%s:%d: bad pud %p.\n", __FILE__, __LINE__, (void *) pud_val(e))
112 #define pgd_ERROR(e) \
113 printk("%s:%d: bad pgd %p.\n", __FILE__, __LINE__, (void *) pgd_val(e))
117 * The vmalloc and module area will always be on the topmost area of the kernel
118 * mapping. We reserve 96MB (31bit) / 128GB (64bit) for vmalloc and modules.
119 * On 64 bit kernels we have a 2GB area at the top of the vmalloc area where
120 * modules will reside. That makes sure that inter module branches always
121 * happen without trampolines and in addition the placement within a 2GB frame
122 * is branch prediction unit friendly.
124 extern unsigned long VMALLOC_START
;
125 extern unsigned long VMALLOC_END
;
126 extern struct page
*vmemmap
;
128 #define VMEM_MAX_PHYS ((unsigned long) vmemmap)
131 extern unsigned long MODULES_VADDR
;
132 extern unsigned long MODULES_END
;
133 #define MODULES_VADDR MODULES_VADDR
134 #define MODULES_END MODULES_END
135 #define MODULES_LEN (1UL << 31)
139 * A 31 bit pagetable entry of S390 has following format:
142 * 00000000001111111111222222222233
143 * 01234567890123456789012345678901
145 * I Page-Invalid Bit: Page is not available for address-translation
146 * P Page-Protection Bit: Store access not possible for page
148 * A 31 bit segmenttable entry of S390 has following format:
149 * | P-table origin | |PTL
151 * 00000000001111111111222222222233
152 * 01234567890123456789012345678901
154 * I Segment-Invalid Bit: Segment is not available for address-translation
155 * C Common-Segment Bit: Segment is not private (PoP 3-30)
156 * PTL Page-Table-Length: Page-table length (PTL+1*16 entries -> up to 256)
158 * The 31 bit segmenttable origin of S390 has following format:
160 * |S-table origin | | STL |
162 * 00000000001111111111222222222233
163 * 01234567890123456789012345678901
165 * X Space-Switch event:
166 * G Segment-Invalid Bit: *
167 * P Private-Space Bit: Segment is not private (PoP 3-30)
168 * S Storage-Alteration:
169 * STL Segment-Table-Length: Segment-table length (STL+1*16 entries -> up to 2048)
171 * A 64 bit pagetable entry of S390 has following format:
173 * 0000000000111111111122222222223333333333444444444455555555556666
174 * 0123456789012345678901234567890123456789012345678901234567890123
176 * I Page-Invalid Bit: Page is not available for address-translation
177 * P Page-Protection Bit: Store access not possible for page
178 * C Change-bit override: HW is not required to set change bit
180 * A 64 bit segmenttable entry of S390 has following format:
181 * | P-table origin | TT
182 * 0000000000111111111122222222223333333333444444444455555555556666
183 * 0123456789012345678901234567890123456789012345678901234567890123
185 * I Segment-Invalid Bit: Segment is not available for address-translation
186 * C Common-Segment Bit: Segment is not private (PoP 3-30)
187 * P Page-Protection Bit: Store access not possible for page
190 * A 64 bit region table entry of S390 has following format:
191 * | S-table origin | TF TTTL
192 * 0000000000111111111122222222223333333333444444444455555555556666
193 * 0123456789012345678901234567890123456789012345678901234567890123
195 * I Segment-Invalid Bit: Segment is not available for address-translation
200 * The 64 bit regiontable origin of S390 has following format:
201 * | region table origon | DTTL
202 * 0000000000111111111122222222223333333333444444444455555555556666
203 * 0123456789012345678901234567890123456789012345678901234567890123
205 * X Space-Switch event:
206 * G Segment-Invalid Bit:
207 * P Private-Space Bit:
208 * S Storage-Alteration:
212 * A storage key has the following format:
216 * F : fetch protection bit
221 /* Hardware bits in the page table entry */
222 #define _PAGE_CO 0x100 /* HW Change-bit override */
223 #define _PAGE_RO 0x200 /* HW read-only bit */
224 #define _PAGE_INVALID 0x400 /* HW invalid bit */
226 /* Software bits in the page table entry */
227 #define _PAGE_SWT 0x001 /* SW pte type bit t */
228 #define _PAGE_SWX 0x002 /* SW pte type bit x */
229 #define _PAGE_SWC 0x004 /* SW pte changed bit */
230 #define _PAGE_SWR 0x008 /* SW pte referenced bit */
231 #define _PAGE_SWW 0x010 /* SW pte write bit */
232 #define _PAGE_SPECIAL 0x020 /* SW associated with special page */
233 #define __HAVE_ARCH_PTE_SPECIAL
235 /* Set of bits not changed in pte_modify */
236 #define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_SPECIAL | _PAGE_CO | \
237 _PAGE_SWC | _PAGE_SWR)
239 /* Six different types of pages. */
240 #define _PAGE_TYPE_EMPTY 0x400
241 #define _PAGE_TYPE_NONE 0x401
242 #define _PAGE_TYPE_SWAP 0x403
243 #define _PAGE_TYPE_FILE 0x601 /* bit 0x002 is used for offset !! */
244 #define _PAGE_TYPE_RO 0x200
245 #define _PAGE_TYPE_RW 0x000
248 * Only four types for huge pages, using the invalid bit and protection bit
249 * of a segment table entry.
251 #define _HPAGE_TYPE_EMPTY 0x020 /* _SEGMENT_ENTRY_INV */
252 #define _HPAGE_TYPE_NONE 0x220
253 #define _HPAGE_TYPE_RO 0x200 /* _SEGMENT_ENTRY_RO */
254 #define _HPAGE_TYPE_RW 0x000
257 * PTE type bits are rather complicated. handle_pte_fault uses pte_present,
258 * pte_none and pte_file to find out the pte type WITHOUT holding the page
259 * table lock. ptep_clear_flush on the other hand uses ptep_clear_flush to
260 * invalidate a given pte. ipte sets the hw invalid bit and clears all tlbs
261 * for the page. The page table entry is set to _PAGE_TYPE_EMPTY afterwards.
262 * This change is done while holding the lock, but the intermediate step
263 * of a previously valid pte with the hw invalid bit set can be observed by
264 * handle_pte_fault. That makes it necessary that all valid pte types with
265 * the hw invalid bit set must be distinguishable from the four pte types
266 * empty, none, swap and file.
269 * _PAGE_TYPE_EMPTY 1000 -> 1000
270 * _PAGE_TYPE_NONE 1001 -> 1001
271 * _PAGE_TYPE_SWAP 1011 -> 1011
272 * _PAGE_TYPE_FILE 11?1 -> 11?1
273 * _PAGE_TYPE_RO 0100 -> 1100
274 * _PAGE_TYPE_RW 0000 -> 1000
276 * pte_none is true for bits combinations 1000, 1010, 1100, 1110
277 * pte_present is true for bits combinations 0000, 0010, 0100, 0110, 1001
278 * pte_file is true for bits combinations 1101, 1111
279 * swap pte is 1011 and 0001, 0011, 0101, 0111 are invalid.
284 /* Bits in the segment table address-space-control-element */
285 #define _ASCE_SPACE_SWITCH 0x80000000UL /* space switch event */
286 #define _ASCE_ORIGIN_MASK 0x7ffff000UL /* segment table origin */
287 #define _ASCE_PRIVATE_SPACE 0x100 /* private space control */
288 #define _ASCE_ALT_EVENT 0x80 /* storage alteration event control */
289 #define _ASCE_TABLE_LENGTH 0x7f /* 128 x 64 entries = 8k */
291 /* Bits in the segment table entry */
292 #define _SEGMENT_ENTRY_ORIGIN 0x7fffffc0UL /* page table origin */
293 #define _SEGMENT_ENTRY_RO 0x200 /* page protection bit */
294 #define _SEGMENT_ENTRY_INV 0x20 /* invalid segment table entry */
295 #define _SEGMENT_ENTRY_COMMON 0x10 /* common segment bit */
296 #define _SEGMENT_ENTRY_PTL 0x0f /* page table length */
298 #define _SEGMENT_ENTRY (_SEGMENT_ENTRY_PTL)
299 #define _SEGMENT_ENTRY_EMPTY (_SEGMENT_ENTRY_INV)
301 /* Page status table bits for virtualization */
302 #define RCP_ACC_BITS 0xf0000000UL
303 #define RCP_FP_BIT 0x08000000UL
304 #define RCP_PCL_BIT 0x00800000UL
305 #define RCP_HR_BIT 0x00400000UL
306 #define RCP_HC_BIT 0x00200000UL
307 #define RCP_GR_BIT 0x00040000UL
308 #define RCP_GC_BIT 0x00020000UL
310 /* User dirty / referenced bit for KVM's migration feature */
311 #define KVM_UR_BIT 0x00008000UL
312 #define KVM_UC_BIT 0x00004000UL
314 #else /* CONFIG_64BIT */
316 /* Bits in the segment/region table address-space-control-element */
317 #define _ASCE_ORIGIN ~0xfffUL/* segment table origin */
318 #define _ASCE_PRIVATE_SPACE 0x100 /* private space control */
319 #define _ASCE_ALT_EVENT 0x80 /* storage alteration event control */
320 #define _ASCE_SPACE_SWITCH 0x40 /* space switch event */
321 #define _ASCE_REAL_SPACE 0x20 /* real space control */
322 #define _ASCE_TYPE_MASK 0x0c /* asce table type mask */
323 #define _ASCE_TYPE_REGION1 0x0c /* region first table type */
324 #define _ASCE_TYPE_REGION2 0x08 /* region second table type */
325 #define _ASCE_TYPE_REGION3 0x04 /* region third table type */
326 #define _ASCE_TYPE_SEGMENT 0x00 /* segment table type */
327 #define _ASCE_TABLE_LENGTH 0x03 /* region table length */
329 /* Bits in the region table entry */
330 #define _REGION_ENTRY_ORIGIN ~0xfffUL/* region/segment table origin */
331 #define _REGION_ENTRY_RO 0x200 /* region protection bit */
332 #define _REGION_ENTRY_INV 0x20 /* invalid region table entry */
333 #define _REGION_ENTRY_TYPE_MASK 0x0c /* region/segment table type mask */
334 #define _REGION_ENTRY_TYPE_R1 0x0c /* region first table type */
335 #define _REGION_ENTRY_TYPE_R2 0x08 /* region second table type */
336 #define _REGION_ENTRY_TYPE_R3 0x04 /* region third table type */
337 #define _REGION_ENTRY_LENGTH 0x03 /* region third length */
339 #define _REGION1_ENTRY (_REGION_ENTRY_TYPE_R1 | _REGION_ENTRY_LENGTH)
340 #define _REGION1_ENTRY_EMPTY (_REGION_ENTRY_TYPE_R1 | _REGION_ENTRY_INV)
341 #define _REGION2_ENTRY (_REGION_ENTRY_TYPE_R2 | _REGION_ENTRY_LENGTH)
342 #define _REGION2_ENTRY_EMPTY (_REGION_ENTRY_TYPE_R2 | _REGION_ENTRY_INV)
343 #define _REGION3_ENTRY (_REGION_ENTRY_TYPE_R3 | _REGION_ENTRY_LENGTH)
344 #define _REGION3_ENTRY_EMPTY (_REGION_ENTRY_TYPE_R3 | _REGION_ENTRY_INV)
346 #define _REGION3_ENTRY_LARGE 0x400 /* RTTE-format control, large page */
347 #define _REGION3_ENTRY_RO 0x200 /* page protection bit */
348 #define _REGION3_ENTRY_CO 0x100 /* change-recording override */
350 /* Bits in the segment table entry */
351 #define _SEGMENT_ENTRY_ORIGIN_LARGE ~0xfffffUL /* large page address */
352 #define _SEGMENT_ENTRY_ORIGIN ~0x7ffUL/* segment table origin */
353 #define _SEGMENT_ENTRY_RO 0x200 /* page protection bit */
354 #define _SEGMENT_ENTRY_INV 0x20 /* invalid segment table entry */
356 #define _SEGMENT_ENTRY (0)
357 #define _SEGMENT_ENTRY_EMPTY (_SEGMENT_ENTRY_INV)
359 #define _SEGMENT_ENTRY_LARGE 0x400 /* STE-format control, large page */
360 #define _SEGMENT_ENTRY_CO 0x100 /* change-recording override */
361 #define _SEGMENT_ENTRY_SPLIT_BIT 0 /* THP splitting bit number */
362 #define _SEGMENT_ENTRY_SPLIT (1UL << _SEGMENT_ENTRY_SPLIT_BIT)
364 /* Set of bits not changed in pmd_modify */
365 #define _SEGMENT_CHG_MASK (_SEGMENT_ENTRY_ORIGIN | _SEGMENT_ENTRY_LARGE \
366 | _SEGMENT_ENTRY_SPLIT | _SEGMENT_ENTRY_CO)
368 /* Page status table bits for virtualization */
369 #define RCP_ACC_BITS 0xf000000000000000UL
370 #define RCP_FP_BIT 0x0800000000000000UL
371 #define RCP_PCL_BIT 0x0080000000000000UL
372 #define RCP_HR_BIT 0x0040000000000000UL
373 #define RCP_HC_BIT 0x0020000000000000UL
374 #define RCP_GR_BIT 0x0004000000000000UL
375 #define RCP_GC_BIT 0x0002000000000000UL
377 /* User dirty / referenced bit for KVM's migration feature */
378 #define KVM_UR_BIT 0x0000800000000000UL
379 #define KVM_UC_BIT 0x0000400000000000UL
381 #endif /* CONFIG_64BIT */
384 * A user page table pointer has the space-switch-event bit, the
385 * private-space-control bit and the storage-alteration-event-control
386 * bit set. A kernel page table pointer doesn't need them.
388 #define _ASCE_USER_BITS (_ASCE_SPACE_SWITCH | _ASCE_PRIVATE_SPACE | \
392 * Page protection definitions.
394 #define PAGE_NONE __pgprot(_PAGE_TYPE_NONE)
395 #define PAGE_RO __pgprot(_PAGE_TYPE_RO)
396 #define PAGE_RW __pgprot(_PAGE_TYPE_RO | _PAGE_SWW)
397 #define PAGE_RWC __pgprot(_PAGE_TYPE_RW | _PAGE_SWW | _PAGE_SWC)
399 #define PAGE_KERNEL PAGE_RWC
400 #define PAGE_SHARED PAGE_KERNEL
401 #define PAGE_COPY PAGE_RO
404 * On s390 the page table entry has an invalid bit and a read-only bit.
405 * Read permission implies execute permission and write permission
406 * implies read permission.
409 #define __P000 PAGE_NONE
410 #define __P001 PAGE_RO
411 #define __P010 PAGE_RO
412 #define __P011 PAGE_RO
413 #define __P100 PAGE_RO
414 #define __P101 PAGE_RO
415 #define __P110 PAGE_RO
416 #define __P111 PAGE_RO
418 #define __S000 PAGE_NONE
419 #define __S001 PAGE_RO
420 #define __S010 PAGE_RW
421 #define __S011 PAGE_RW
422 #define __S100 PAGE_RO
423 #define __S101 PAGE_RO
424 #define __S110 PAGE_RW
425 #define __S111 PAGE_RW
428 * Segment entry (large page) protection definitions.
430 #define SEGMENT_NONE __pgprot(_HPAGE_TYPE_NONE)
431 #define SEGMENT_RO __pgprot(_HPAGE_TYPE_RO)
432 #define SEGMENT_RW __pgprot(_HPAGE_TYPE_RW)
434 static inline int mm_exclusive(struct mm_struct
*mm
)
436 return likely(mm
== current
->active_mm
&&
437 atomic_read(&mm
->context
.attach_count
) <= 1);
440 static inline int mm_has_pgste(struct mm_struct
*mm
)
443 if (unlikely(mm
->context
.has_pgste
))
449 * pgd/pmd/pte query functions
453 static inline int pgd_present(pgd_t pgd
) { return 1; }
454 static inline int pgd_none(pgd_t pgd
) { return 0; }
455 static inline int pgd_bad(pgd_t pgd
) { return 0; }
457 static inline int pud_present(pud_t pud
) { return 1; }
458 static inline int pud_none(pud_t pud
) { return 0; }
459 static inline int pud_large(pud_t pud
) { return 0; }
460 static inline int pud_bad(pud_t pud
) { return 0; }
462 #else /* CONFIG_64BIT */
464 static inline int pgd_present(pgd_t pgd
)
466 if ((pgd_val(pgd
) & _REGION_ENTRY_TYPE_MASK
) < _REGION_ENTRY_TYPE_R2
)
468 return (pgd_val(pgd
) & _REGION_ENTRY_ORIGIN
) != 0UL;
471 static inline int pgd_none(pgd_t pgd
)
473 if ((pgd_val(pgd
) & _REGION_ENTRY_TYPE_MASK
) < _REGION_ENTRY_TYPE_R2
)
475 return (pgd_val(pgd
) & _REGION_ENTRY_INV
) != 0UL;
478 static inline int pgd_bad(pgd_t pgd
)
481 * With dynamic page table levels the pgd can be a region table
482 * entry or a segment table entry. Check for the bit that are
483 * invalid for either table entry.
486 ~_SEGMENT_ENTRY_ORIGIN
& ~_REGION_ENTRY_INV
&
487 ~_REGION_ENTRY_TYPE_MASK
& ~_REGION_ENTRY_LENGTH
;
488 return (pgd_val(pgd
) & mask
) != 0;
491 static inline int pud_present(pud_t pud
)
493 if ((pud_val(pud
) & _REGION_ENTRY_TYPE_MASK
) < _REGION_ENTRY_TYPE_R3
)
495 return (pud_val(pud
) & _REGION_ENTRY_ORIGIN
) != 0UL;
498 static inline int pud_none(pud_t pud
)
500 if ((pud_val(pud
) & _REGION_ENTRY_TYPE_MASK
) < _REGION_ENTRY_TYPE_R3
)
502 return (pud_val(pud
) & _REGION_ENTRY_INV
) != 0UL;
505 static inline int pud_large(pud_t pud
)
507 if ((pud_val(pud
) & _REGION_ENTRY_TYPE_MASK
) != _REGION_ENTRY_TYPE_R3
)
509 return !!(pud_val(pud
) & _REGION3_ENTRY_LARGE
);
512 static inline int pud_bad(pud_t pud
)
515 * With dynamic page table levels the pud can be a region table
516 * entry or a segment table entry. Check for the bit that are
517 * invalid for either table entry.
520 ~_SEGMENT_ENTRY_ORIGIN
& ~_REGION_ENTRY_INV
&
521 ~_REGION_ENTRY_TYPE_MASK
& ~_REGION_ENTRY_LENGTH
;
522 return (pud_val(pud
) & mask
) != 0;
525 #endif /* CONFIG_64BIT */
527 static inline int pmd_present(pmd_t pmd
)
529 unsigned long mask
= _SEGMENT_ENTRY_INV
| _SEGMENT_ENTRY_RO
;
530 return (pmd_val(pmd
) & mask
) == _HPAGE_TYPE_NONE
||
531 !(pmd_val(pmd
) & _SEGMENT_ENTRY_INV
);
534 static inline int pmd_none(pmd_t pmd
)
536 return (pmd_val(pmd
) & _SEGMENT_ENTRY_INV
) &&
537 !(pmd_val(pmd
) & _SEGMENT_ENTRY_RO
);
540 static inline int pmd_large(pmd_t pmd
)
543 return !!(pmd_val(pmd
) & _SEGMENT_ENTRY_LARGE
);
549 static inline int pmd_bad(pmd_t pmd
)
551 unsigned long mask
= ~_SEGMENT_ENTRY_ORIGIN
& ~_SEGMENT_ENTRY_INV
;
552 return (pmd_val(pmd
) & mask
) != _SEGMENT_ENTRY
;
555 #define __HAVE_ARCH_PMDP_SPLITTING_FLUSH
556 extern void pmdp_splitting_flush(struct vm_area_struct
*vma
,
557 unsigned long addr
, pmd_t
*pmdp
);
559 #define __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS
560 extern int pmdp_set_access_flags(struct vm_area_struct
*vma
,
561 unsigned long address
, pmd_t
*pmdp
,
562 pmd_t entry
, int dirty
);
564 #define __HAVE_ARCH_PMDP_CLEAR_YOUNG_FLUSH
565 extern int pmdp_clear_flush_young(struct vm_area_struct
*vma
,
566 unsigned long address
, pmd_t
*pmdp
);
568 #define __HAVE_ARCH_PMD_WRITE
569 static inline int pmd_write(pmd_t pmd
)
571 return (pmd_val(pmd
) & _SEGMENT_ENTRY_RO
) == 0;
574 static inline int pmd_young(pmd_t pmd
)
579 static inline int pte_none(pte_t pte
)
581 return (pte_val(pte
) & _PAGE_INVALID
) && !(pte_val(pte
) & _PAGE_SWT
);
584 static inline int pte_present(pte_t pte
)
586 unsigned long mask
= _PAGE_RO
| _PAGE_INVALID
| _PAGE_SWT
| _PAGE_SWX
;
587 return (pte_val(pte
) & mask
) == _PAGE_TYPE_NONE
||
588 (!(pte_val(pte
) & _PAGE_INVALID
) &&
589 !(pte_val(pte
) & _PAGE_SWT
));
592 static inline int pte_file(pte_t pte
)
594 unsigned long mask
= _PAGE_RO
| _PAGE_INVALID
| _PAGE_SWT
;
595 return (pte_val(pte
) & mask
) == _PAGE_TYPE_FILE
;
598 static inline int pte_special(pte_t pte
)
600 return (pte_val(pte
) & _PAGE_SPECIAL
);
603 #define __HAVE_ARCH_PTE_SAME
604 static inline int pte_same(pte_t a
, pte_t b
)
606 return pte_val(a
) == pte_val(b
);
609 static inline pgste_t
pgste_get_lock(pte_t
*ptep
)
611 unsigned long new = 0;
619 " nihh %0,0xff7f\n" /* clear RCP_PCL_BIT in old */
620 " oihh %1,0x0080\n" /* set RCP_PCL_BIT in new */
623 : "=&d" (old
), "=&d" (new), "=Q" (ptep
[PTRS_PER_PTE
])
624 : "Q" (ptep
[PTRS_PER_PTE
]) : "cc");
629 static inline void pgste_set_unlock(pte_t
*ptep
, pgste_t pgste
)
633 " nihh %1,0xff7f\n" /* clear RCP_PCL_BIT */
635 : "=Q" (ptep
[PTRS_PER_PTE
])
636 : "d" (pgste_val(pgste
)), "Q" (ptep
[PTRS_PER_PTE
]) : "cc");
641 static inline pgste_t
pgste_update_all(pte_t
*ptep
, pgste_t pgste
)
644 unsigned long address
, bits
;
647 if (!pte_present(*ptep
))
649 address
= pte_val(*ptep
) & PAGE_MASK
;
650 skey
= page_get_storage_key(address
);
651 bits
= skey
& (_PAGE_CHANGED
| _PAGE_REFERENCED
);
652 /* Clear page changed & referenced bit in the storage key */
653 if (bits
& _PAGE_CHANGED
)
654 page_set_storage_key(address
, skey
^ bits
, 0);
656 page_reset_referenced(address
);
657 /* Transfer page changed & referenced bit to guest bits in pgste */
658 pgste_val(pgste
) |= bits
<< 48; /* RCP_GR_BIT & RCP_GC_BIT */
659 /* Get host changed & referenced bits from pgste */
660 bits
|= (pgste_val(pgste
) & (RCP_HR_BIT
| RCP_HC_BIT
)) >> 52;
661 /* Transfer page changed & referenced bit to kvm user bits */
662 pgste_val(pgste
) |= bits
<< 45; /* KVM_UR_BIT & KVM_UC_BIT */
663 /* Clear relevant host bits in pgste. */
664 pgste_val(pgste
) &= ~(RCP_HR_BIT
| RCP_HC_BIT
);
665 pgste_val(pgste
) &= ~(RCP_ACC_BITS
| RCP_FP_BIT
);
666 /* Copy page access key and fetch protection bit to pgste */
668 (unsigned long) (skey
& (_PAGE_ACC_BITS
| _PAGE_FP_BIT
)) << 56;
669 /* Transfer referenced bit to pte */
670 pte_val(*ptep
) |= (bits
& _PAGE_REFERENCED
) << 1;
676 static inline pgste_t
pgste_update_young(pte_t
*ptep
, pgste_t pgste
)
681 if (!pte_present(*ptep
))
683 /* Get referenced bit from storage key */
684 young
= page_reset_referenced(pte_val(*ptep
) & PAGE_MASK
);
686 pgste_val(pgste
) |= RCP_GR_BIT
;
687 /* Get host referenced bit from pgste */
688 if (pgste_val(pgste
) & RCP_HR_BIT
) {
689 pgste_val(pgste
) &= ~RCP_HR_BIT
;
692 /* Transfer referenced bit to kvm user bits and pte */
694 pgste_val(pgste
) |= KVM_UR_BIT
;
695 pte_val(*ptep
) |= _PAGE_SWR
;
701 static inline void pgste_set_key(pte_t
*ptep
, pgste_t pgste
, pte_t entry
)
704 unsigned long address
;
705 unsigned long okey
, nkey
;
707 if (!pte_present(entry
))
709 address
= pte_val(entry
) & PAGE_MASK
;
710 okey
= nkey
= page_get_storage_key(address
);
711 nkey
&= ~(_PAGE_ACC_BITS
| _PAGE_FP_BIT
);
712 /* Set page access key and fetch protection bit from pgste */
713 nkey
|= (pgste_val(pgste
) & (RCP_ACC_BITS
| RCP_FP_BIT
)) >> 56;
715 page_set_storage_key(address
, nkey
, 0);
719 static inline void pgste_set_pte(pte_t
*ptep
, pte_t entry
)
721 if (!MACHINE_HAS_ESOP
&& (pte_val(entry
) & _PAGE_SWW
)) {
723 * Without enhanced suppression-on-protection force
724 * the dirty bit on for all writable ptes.
726 pte_val(entry
) |= _PAGE_SWC
;
727 pte_val(entry
) &= ~_PAGE_RO
;
733 * struct gmap_struct - guest address space
734 * @mm: pointer to the parent mm_struct
735 * @table: pointer to the page directory
736 * @asce: address space control element for gmap page table
737 * @crst_list: list of all crst tables used in the guest address space
740 struct list_head list
;
741 struct mm_struct
*mm
;
742 unsigned long *table
;
744 struct list_head crst_list
;
748 * struct gmap_rmap - reverse mapping for segment table entries
749 * @next: pointer to the next gmap_rmap structure in the list
750 * @entry: pointer to a segment table entry
753 struct list_head list
;
754 unsigned long *entry
;
758 * struct gmap_pgtable - gmap information attached to a page table
759 * @vmaddr: address of the 1MB segment in the process virtual memory
760 * @mapper: list of segment table entries maping a page table
762 struct gmap_pgtable
{
763 unsigned long vmaddr
;
764 struct list_head mapper
;
767 struct gmap
*gmap_alloc(struct mm_struct
*mm
);
768 void gmap_free(struct gmap
*gmap
);
769 void gmap_enable(struct gmap
*gmap
);
770 void gmap_disable(struct gmap
*gmap
);
771 int gmap_map_segment(struct gmap
*gmap
, unsigned long from
,
772 unsigned long to
, unsigned long length
);
773 int gmap_unmap_segment(struct gmap
*gmap
, unsigned long to
, unsigned long len
);
774 unsigned long __gmap_translate(unsigned long address
, struct gmap
*);
775 unsigned long gmap_translate(unsigned long address
, struct gmap
*);
776 unsigned long __gmap_fault(unsigned long address
, struct gmap
*);
777 unsigned long gmap_fault(unsigned long address
, struct gmap
*);
778 void gmap_discard(unsigned long from
, unsigned long to
, struct gmap
*);
781 * Certain architectures need to do special things when PTEs
782 * within a page table are directly modified. Thus, the following
783 * hook is made available.
785 static inline void set_pte_at(struct mm_struct
*mm
, unsigned long addr
,
786 pte_t
*ptep
, pte_t entry
)
790 if (mm_has_pgste(mm
)) {
791 pgste
= pgste_get_lock(ptep
);
792 pgste_set_key(ptep
, pgste
, entry
);
793 pgste_set_pte(ptep
, entry
);
794 pgste_set_unlock(ptep
, pgste
);
796 if (!(pte_val(entry
) & _PAGE_INVALID
) && MACHINE_HAS_EDAT1
)
797 pte_val(entry
) |= _PAGE_CO
;
803 * query functions pte_write/pte_dirty/pte_young only work if
804 * pte_present() is true. Undefined behaviour if not..
806 static inline int pte_write(pte_t pte
)
808 return (pte_val(pte
) & _PAGE_SWW
) != 0;
811 static inline int pte_dirty(pte_t pte
)
813 return (pte_val(pte
) & _PAGE_SWC
) != 0;
816 static inline int pte_young(pte_t pte
)
819 if (pte_val(pte
) & _PAGE_SWR
)
826 * pgd/pmd/pte modification functions
829 static inline void pgd_clear(pgd_t
*pgd
)
832 if ((pgd_val(*pgd
) & _REGION_ENTRY_TYPE_MASK
) == _REGION_ENTRY_TYPE_R2
)
833 pgd_val(*pgd
) = _REGION2_ENTRY_EMPTY
;
837 static inline void pud_clear(pud_t
*pud
)
840 if ((pud_val(*pud
) & _REGION_ENTRY_TYPE_MASK
) == _REGION_ENTRY_TYPE_R3
)
841 pud_val(*pud
) = _REGION3_ENTRY_EMPTY
;
845 static inline void pmd_clear(pmd_t
*pmdp
)
847 pmd_val(*pmdp
) = _SEGMENT_ENTRY_EMPTY
;
850 static inline void pte_clear(struct mm_struct
*mm
, unsigned long addr
, pte_t
*ptep
)
852 pte_val(*ptep
) = _PAGE_TYPE_EMPTY
;
856 * The following pte modification functions only work if
857 * pte_present() is true. Undefined behaviour if not..
859 static inline pte_t
pte_modify(pte_t pte
, pgprot_t newprot
)
861 pte_val(pte
) &= _PAGE_CHG_MASK
;
862 pte_val(pte
) |= pgprot_val(newprot
);
863 if ((pte_val(pte
) & _PAGE_SWC
) && (pte_val(pte
) & _PAGE_SWW
))
864 pte_val(pte
) &= ~_PAGE_RO
;
868 static inline pte_t
pte_wrprotect(pte_t pte
)
870 pte_val(pte
) &= ~_PAGE_SWW
;
871 /* Do not clobber _PAGE_TYPE_NONE pages! */
872 if (!(pte_val(pte
) & _PAGE_INVALID
))
873 pte_val(pte
) |= _PAGE_RO
;
877 static inline pte_t
pte_mkwrite(pte_t pte
)
879 pte_val(pte
) |= _PAGE_SWW
;
880 if (pte_val(pte
) & _PAGE_SWC
)
881 pte_val(pte
) &= ~_PAGE_RO
;
885 static inline pte_t
pte_mkclean(pte_t pte
)
887 pte_val(pte
) &= ~_PAGE_SWC
;
888 /* Do not clobber _PAGE_TYPE_NONE pages! */
889 if (!(pte_val(pte
) & _PAGE_INVALID
))
890 pte_val(pte
) |= _PAGE_RO
;
894 static inline pte_t
pte_mkdirty(pte_t pte
)
896 pte_val(pte
) |= _PAGE_SWC
;
897 if (pte_val(pte
) & _PAGE_SWW
)
898 pte_val(pte
) &= ~_PAGE_RO
;
902 static inline pte_t
pte_mkold(pte_t pte
)
905 pte_val(pte
) &= ~_PAGE_SWR
;
910 static inline pte_t
pte_mkyoung(pte_t pte
)
915 static inline pte_t
pte_mkspecial(pte_t pte
)
917 pte_val(pte
) |= _PAGE_SPECIAL
;
921 #ifdef CONFIG_HUGETLB_PAGE
922 static inline pte_t
pte_mkhuge(pte_t pte
)
924 pte_val(pte
) |= (_SEGMENT_ENTRY_LARGE
| _SEGMENT_ENTRY_CO
);
930 * Get (and clear) the user dirty bit for a pte.
932 static inline int ptep_test_and_clear_user_dirty(struct mm_struct
*mm
,
938 if (mm_has_pgste(mm
)) {
939 pgste
= pgste_get_lock(ptep
);
940 pgste
= pgste_update_all(ptep
, pgste
);
941 dirty
= !!(pgste_val(pgste
) & KVM_UC_BIT
);
942 pgste_val(pgste
) &= ~KVM_UC_BIT
;
943 pgste_set_unlock(ptep
, pgste
);
950 * Get (and clear) the user referenced bit for a pte.
952 static inline int ptep_test_and_clear_user_young(struct mm_struct
*mm
,
958 if (mm_has_pgste(mm
)) {
959 pgste
= pgste_get_lock(ptep
);
960 pgste
= pgste_update_young(ptep
, pgste
);
961 young
= !!(pgste_val(pgste
) & KVM_UR_BIT
);
962 pgste_val(pgste
) &= ~KVM_UR_BIT
;
963 pgste_set_unlock(ptep
, pgste
);
968 #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
969 static inline int ptep_test_and_clear_young(struct vm_area_struct
*vma
,
970 unsigned long addr
, pte_t
*ptep
)
975 if (mm_has_pgste(vma
->vm_mm
)) {
976 pgste
= pgste_get_lock(ptep
);
977 pgste
= pgste_update_young(ptep
, pgste
);
979 *ptep
= pte_mkold(pte
);
980 pgste_set_unlock(ptep
, pgste
);
981 return pte_young(pte
);
986 #define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
987 static inline int ptep_clear_flush_young(struct vm_area_struct
*vma
,
988 unsigned long address
, pte_t
*ptep
)
990 /* No need to flush TLB
991 * On s390 reference bits are in storage key and never in TLB
992 * With virtualization we handle the reference bit, without we
993 * we can simply return */
994 return ptep_test_and_clear_young(vma
, address
, ptep
);
997 static inline void __ptep_ipte(unsigned long address
, pte_t
*ptep
)
999 if (!(pte_val(*ptep
) & _PAGE_INVALID
)) {
1000 #ifndef CONFIG_64BIT
1001 /* pto must point to the start of the segment table */
1002 pte_t
*pto
= (pte_t
*) (((unsigned long) ptep
) & 0x7ffffc00);
1004 /* ipte in zarch mode can do the math */
1009 : "=m" (*ptep
) : "m" (*ptep
),
1010 "a" (pto
), "a" (address
));
1015 * This is hard to understand. ptep_get_and_clear and ptep_clear_flush
1016 * both clear the TLB for the unmapped pte. The reason is that
1017 * ptep_get_and_clear is used in common code (e.g. change_pte_range)
1018 * to modify an active pte. The sequence is
1019 * 1) ptep_get_and_clear
1021 * 3) flush_tlb_range
1022 * On s390 the tlb needs to get flushed with the modification of the pte
1023 * if the pte is active. The only way how this can be implemented is to
1024 * have ptep_get_and_clear do the tlb flush. In exchange flush_tlb_range
1027 #define __HAVE_ARCH_PTEP_GET_AND_CLEAR
1028 static inline pte_t
ptep_get_and_clear(struct mm_struct
*mm
,
1029 unsigned long address
, pte_t
*ptep
)
1034 mm
->context
.flush_mm
= 1;
1035 if (mm_has_pgste(mm
))
1036 pgste
= pgste_get_lock(ptep
);
1039 if (!mm_exclusive(mm
))
1040 __ptep_ipte(address
, ptep
);
1041 pte_val(*ptep
) = _PAGE_TYPE_EMPTY
;
1043 if (mm_has_pgste(mm
)) {
1044 pgste
= pgste_update_all(&pte
, pgste
);
1045 pgste_set_unlock(ptep
, pgste
);
1050 #define __HAVE_ARCH_PTEP_MODIFY_PROT_TRANSACTION
1051 static inline pte_t
ptep_modify_prot_start(struct mm_struct
*mm
,
1052 unsigned long address
,
1057 mm
->context
.flush_mm
= 1;
1058 if (mm_has_pgste(mm
))
1059 pgste_get_lock(ptep
);
1062 if (!mm_exclusive(mm
))
1063 __ptep_ipte(address
, ptep
);
1067 static inline void ptep_modify_prot_commit(struct mm_struct
*mm
,
1068 unsigned long address
,
1069 pte_t
*ptep
, pte_t pte
)
1071 if (mm_has_pgste(mm
)) {
1072 pgste_set_pte(ptep
, pte
);
1073 pgste_set_unlock(ptep
, *(pgste_t
*)(ptep
+ PTRS_PER_PTE
));
1078 #define __HAVE_ARCH_PTEP_CLEAR_FLUSH
1079 static inline pte_t
ptep_clear_flush(struct vm_area_struct
*vma
,
1080 unsigned long address
, pte_t
*ptep
)
1085 if (mm_has_pgste(vma
->vm_mm
))
1086 pgste
= pgste_get_lock(ptep
);
1089 __ptep_ipte(address
, ptep
);
1090 pte_val(*ptep
) = _PAGE_TYPE_EMPTY
;
1092 if (mm_has_pgste(vma
->vm_mm
)) {
1093 pgste
= pgste_update_all(&pte
, pgste
);
1094 pgste_set_unlock(ptep
, pgste
);
1100 * The batched pte unmap code uses ptep_get_and_clear_full to clear the
1101 * ptes. Here an optimization is possible. tlb_gather_mmu flushes all
1102 * tlbs of an mm if it can guarantee that the ptes of the mm_struct
1103 * cannot be accessed while the batched unmap is running. In this case
1104 * full==1 and a simple pte_clear is enough. See tlb.h.
1106 #define __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL
1107 static inline pte_t
ptep_get_and_clear_full(struct mm_struct
*mm
,
1108 unsigned long address
,
1109 pte_t
*ptep
, int full
)
1114 if (mm_has_pgste(mm
))
1115 pgste
= pgste_get_lock(ptep
);
1119 __ptep_ipte(address
, ptep
);
1120 pte_val(*ptep
) = _PAGE_TYPE_EMPTY
;
1122 if (mm_has_pgste(mm
)) {
1123 pgste
= pgste_update_all(&pte
, pgste
);
1124 pgste_set_unlock(ptep
, pgste
);
1129 #define __HAVE_ARCH_PTEP_SET_WRPROTECT
1130 static inline pte_t
ptep_set_wrprotect(struct mm_struct
*mm
,
1131 unsigned long address
, pte_t
*ptep
)
1136 if (pte_write(pte
)) {
1137 mm
->context
.flush_mm
= 1;
1138 if (mm_has_pgste(mm
))
1139 pgste
= pgste_get_lock(ptep
);
1141 if (!mm_exclusive(mm
))
1142 __ptep_ipte(address
, ptep
);
1143 pte
= pte_wrprotect(pte
);
1145 if (mm_has_pgste(mm
)) {
1146 pgste_set_pte(ptep
, pte
);
1147 pgste_set_unlock(ptep
, pgste
);
1154 #define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
1155 static inline int ptep_set_access_flags(struct vm_area_struct
*vma
,
1156 unsigned long address
, pte_t
*ptep
,
1157 pte_t entry
, int dirty
)
1161 if (pte_same(*ptep
, entry
))
1163 if (mm_has_pgste(vma
->vm_mm
))
1164 pgste
= pgste_get_lock(ptep
);
1166 __ptep_ipte(address
, ptep
);
1168 if (mm_has_pgste(vma
->vm_mm
)) {
1169 pgste_set_pte(ptep
, entry
);
1170 pgste_set_unlock(ptep
, pgste
);
1177 * Conversion functions: convert a page and protection to a page entry,
1178 * and a page entry and page directory to the page they refer to.
1180 static inline pte_t
mk_pte_phys(unsigned long physpage
, pgprot_t pgprot
)
1183 pte_val(__pte
) = physpage
+ pgprot_val(pgprot
);
1187 static inline pte_t
mk_pte(struct page
*page
, pgprot_t pgprot
)
1189 unsigned long physpage
= page_to_phys(page
);
1190 pte_t __pte
= mk_pte_phys(physpage
, pgprot
);
1192 if ((pte_val(__pte
) & _PAGE_SWW
) && PageDirty(page
)) {
1193 pte_val(__pte
) |= _PAGE_SWC
;
1194 pte_val(__pte
) &= ~_PAGE_RO
;
1199 #define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1))
1200 #define pud_index(address) (((address) >> PUD_SHIFT) & (PTRS_PER_PUD-1))
1201 #define pmd_index(address) (((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1))
1202 #define pte_index(address) (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE-1))
1204 #define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
1205 #define pgd_offset_k(address) pgd_offset(&init_mm, address)
1207 #ifndef CONFIG_64BIT
1209 #define pmd_deref(pmd) (pmd_val(pmd) & _SEGMENT_ENTRY_ORIGIN)
1210 #define pud_deref(pmd) ({ BUG(); 0UL; })
1211 #define pgd_deref(pmd) ({ BUG(); 0UL; })
1213 #define pud_offset(pgd, address) ((pud_t *) pgd)
1214 #define pmd_offset(pud, address) ((pmd_t *) pud + pmd_index(address))
1216 #else /* CONFIG_64BIT */
1218 #define pmd_deref(pmd) (pmd_val(pmd) & _SEGMENT_ENTRY_ORIGIN)
1219 #define pud_deref(pud) (pud_val(pud) & _REGION_ENTRY_ORIGIN)
1220 #define pgd_deref(pgd) (pgd_val(pgd) & _REGION_ENTRY_ORIGIN)
1222 static inline pud_t
*pud_offset(pgd_t
*pgd
, unsigned long address
)
1224 pud_t
*pud
= (pud_t
*) pgd
;
1225 if ((pgd_val(*pgd
) & _REGION_ENTRY_TYPE_MASK
) == _REGION_ENTRY_TYPE_R2
)
1226 pud
= (pud_t
*) pgd_deref(*pgd
);
1227 return pud
+ pud_index(address
);
1230 static inline pmd_t
*pmd_offset(pud_t
*pud
, unsigned long address
)
1232 pmd_t
*pmd
= (pmd_t
*) pud
;
1233 if ((pud_val(*pud
) & _REGION_ENTRY_TYPE_MASK
) == _REGION_ENTRY_TYPE_R3
)
1234 pmd
= (pmd_t
*) pud_deref(*pud
);
1235 return pmd
+ pmd_index(address
);
1238 #endif /* CONFIG_64BIT */
1240 #define pfn_pte(pfn,pgprot) mk_pte_phys(__pa((pfn) << PAGE_SHIFT),(pgprot))
1241 #define pte_pfn(x) (pte_val(x) >> PAGE_SHIFT)
1242 #define pte_page(x) pfn_to_page(pte_pfn(x))
1244 #define pmd_page(pmd) pfn_to_page(pmd_val(pmd) >> PAGE_SHIFT)
1246 /* Find an entry in the lowest level page table.. */
1247 #define pte_offset(pmd, addr) ((pte_t *) pmd_deref(*(pmd)) + pte_index(addr))
1248 #define pte_offset_kernel(pmd, address) pte_offset(pmd,address)
1249 #define pte_offset_map(pmd, address) pte_offset_kernel(pmd, address)
1250 #define pte_unmap(pte) do { } while (0)
1252 static inline void __pmd_idte(unsigned long address
, pmd_t
*pmdp
)
1254 unsigned long sto
= (unsigned long) pmdp
-
1255 pmd_index(address
) * sizeof(pmd_t
);
1257 if (!(pmd_val(*pmdp
) & _SEGMENT_ENTRY_INV
)) {
1259 " .insn rrf,0xb98e0000,%2,%3,0,0"
1261 : "m" (*pmdp
), "a" (sto
),
1262 "a" ((address
& HPAGE_MASK
))
1268 #if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_HUGETLB_PAGE)
1269 static inline unsigned long massage_pgprot_pmd(pgprot_t pgprot
)
1272 * pgprot is PAGE_NONE, PAGE_RO, or PAGE_RW (see __Pxxx / __Sxxx)
1273 * Convert to segment table entry format.
1275 if (pgprot_val(pgprot
) == pgprot_val(PAGE_NONE
))
1276 return pgprot_val(SEGMENT_NONE
);
1277 if (pgprot_val(pgprot
) == pgprot_val(PAGE_RO
))
1278 return pgprot_val(SEGMENT_RO
);
1279 return pgprot_val(SEGMENT_RW
);
1282 static inline pmd_t
pmd_modify(pmd_t pmd
, pgprot_t newprot
)
1284 pmd_val(pmd
) &= _SEGMENT_CHG_MASK
;
1285 pmd_val(pmd
) |= massage_pgprot_pmd(newprot
);
1289 static inline pmd_t
mk_pmd_phys(unsigned long physpage
, pgprot_t pgprot
)
1292 pmd_val(__pmd
) = physpage
+ massage_pgprot_pmd(pgprot
);
1296 static inline pmd_t
pmd_mkwrite(pmd_t pmd
)
1298 /* Do not clobber _HPAGE_TYPE_NONE pages! */
1299 if (!(pmd_val(pmd
) & _SEGMENT_ENTRY_INV
))
1300 pmd_val(pmd
) &= ~_SEGMENT_ENTRY_RO
;
1303 #endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLB_PAGE */
1305 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
1307 #define __HAVE_ARCH_PGTABLE_DEPOSIT
1308 extern void pgtable_trans_huge_deposit(struct mm_struct
*mm
, pgtable_t pgtable
);
1310 #define __HAVE_ARCH_PGTABLE_WITHDRAW
1311 extern pgtable_t
pgtable_trans_huge_withdraw(struct mm_struct
*mm
);
1313 static inline int pmd_trans_splitting(pmd_t pmd
)
1315 return pmd_val(pmd
) & _SEGMENT_ENTRY_SPLIT
;
1318 static inline void set_pmd_at(struct mm_struct
*mm
, unsigned long addr
,
1319 pmd_t
*pmdp
, pmd_t entry
)
1321 if (!(pmd_val(entry
) & _SEGMENT_ENTRY_INV
) && MACHINE_HAS_EDAT1
)
1322 pmd_val(entry
) |= _SEGMENT_ENTRY_CO
;
1326 static inline pmd_t
pmd_mkhuge(pmd_t pmd
)
1328 pmd_val(pmd
) |= _SEGMENT_ENTRY_LARGE
;
1332 static inline pmd_t
pmd_wrprotect(pmd_t pmd
)
1334 pmd_val(pmd
) |= _SEGMENT_ENTRY_RO
;
1338 static inline pmd_t
pmd_mkdirty(pmd_t pmd
)
1340 /* No dirty bit in the segment table entry. */
1344 static inline pmd_t
pmd_mkold(pmd_t pmd
)
1346 /* No referenced bit in the segment table entry. */
1350 static inline pmd_t
pmd_mkyoung(pmd_t pmd
)
1352 /* No referenced bit in the segment table entry. */
1356 #define __HAVE_ARCH_PMDP_TEST_AND_CLEAR_YOUNG
1357 static inline int pmdp_test_and_clear_young(struct vm_area_struct
*vma
,
1358 unsigned long address
, pmd_t
*pmdp
)
1360 unsigned long pmd_addr
= pmd_val(*pmdp
) & HPAGE_MASK
;
1365 if (MACHINE_HAS_RRBM
) {
1366 counter
= PTRS_PER_PTE
>> 6;
1368 "0: .insn rre,0xb9ae0000,%0,%3\n" /* rrbm */
1372 : "=&d" (tmp
), "+&d" (rc
), "+d" (counter
),
1374 : "a" (64 * 4096UL) : "cc");
1377 counter
= PTRS_PER_PTE
;
1384 : "+d" (rc
), "+d" (counter
), "+a" (pmd_addr
)
1385 : "a" (4096UL) : "cc");
1390 #define __HAVE_ARCH_PMDP_GET_AND_CLEAR
1391 static inline pmd_t
pmdp_get_and_clear(struct mm_struct
*mm
,
1392 unsigned long address
, pmd_t
*pmdp
)
1396 __pmd_idte(address
, pmdp
);
1401 #define __HAVE_ARCH_PMDP_CLEAR_FLUSH
1402 static inline pmd_t
pmdp_clear_flush(struct vm_area_struct
*vma
,
1403 unsigned long address
, pmd_t
*pmdp
)
1405 return pmdp_get_and_clear(vma
->vm_mm
, address
, pmdp
);
1408 #define __HAVE_ARCH_PMDP_INVALIDATE
1409 static inline void pmdp_invalidate(struct vm_area_struct
*vma
,
1410 unsigned long address
, pmd_t
*pmdp
)
1412 __pmd_idte(address
, pmdp
);
1415 #define __HAVE_ARCH_PMDP_SET_WRPROTECT
1416 static inline void pmdp_set_wrprotect(struct mm_struct
*mm
,
1417 unsigned long address
, pmd_t
*pmdp
)
1421 if (pmd_write(pmd
)) {
1422 __pmd_idte(address
, pmdp
);
1423 set_pmd_at(mm
, address
, pmdp
, pmd_wrprotect(pmd
));
1427 #define pfn_pmd(pfn, pgprot) mk_pmd_phys(__pa((pfn) << PAGE_SHIFT), (pgprot))
1428 #define mk_pmd(page, pgprot) pfn_pmd(page_to_pfn(page), (pgprot))
1430 static inline int pmd_trans_huge(pmd_t pmd
)
1432 return pmd_val(pmd
) & _SEGMENT_ENTRY_LARGE
;
1435 static inline int has_transparent_hugepage(void)
1437 return MACHINE_HAS_HPAGE
? 1 : 0;
1440 static inline unsigned long pmd_pfn(pmd_t pmd
)
1442 return pmd_val(pmd
) >> PAGE_SHIFT
;
1444 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
1447 * 31 bit swap entry format:
1448 * A page-table entry has some bits we have to treat in a special way.
1449 * Bits 0, 20 and bit 23 have to be zero, otherwise an specification
1450 * exception will occur instead of a page translation exception. The
1451 * specifiation exception has the bad habit not to store necessary
1452 * information in the lowcore.
1453 * Bit 21 and bit 22 are the page invalid bit and the page protection
1454 * bit. We set both to indicate a swapped page.
1455 * Bit 30 and 31 are used to distinguish the different page types. For
1456 * a swapped page these bits need to be zero.
1457 * This leaves the bits 1-19 and bits 24-29 to store type and offset.
1458 * We use the 5 bits from 25-29 for the type and the 20 bits from 1-19
1459 * plus 24 for the offset.
1460 * 0| offset |0110|o|type |00|
1461 * 0 0000000001111111111 2222 2 22222 33
1462 * 0 1234567890123456789 0123 4 56789 01
1464 * 64 bit swap entry format:
1465 * A page-table entry has some bits we have to treat in a special way.
1466 * Bits 52 and bit 55 have to be zero, otherwise an specification
1467 * exception will occur instead of a page translation exception. The
1468 * specifiation exception has the bad habit not to store necessary
1469 * information in the lowcore.
1470 * Bit 53 and bit 54 are the page invalid bit and the page protection
1471 * bit. We set both to indicate a swapped page.
1472 * Bit 62 and 63 are used to distinguish the different page types. For
1473 * a swapped page these bits need to be zero.
1474 * This leaves the bits 0-51 and bits 56-61 to store type and offset.
1475 * We use the 5 bits from 57-61 for the type and the 53 bits from 0-51
1476 * plus 56 for the offset.
1477 * | offset |0110|o|type |00|
1478 * 0000000000111111111122222222223333333333444444444455 5555 5 55566 66
1479 * 0123456789012345678901234567890123456789012345678901 2345 6 78901 23
1481 #ifndef CONFIG_64BIT
1482 #define __SWP_OFFSET_MASK (~0UL >> 12)
1484 #define __SWP_OFFSET_MASK (~0UL >> 11)
1486 static inline pte_t
mk_swap_pte(unsigned long type
, unsigned long offset
)
1489 offset
&= __SWP_OFFSET_MASK
;
1490 pte_val(pte
) = _PAGE_TYPE_SWAP
| ((type
& 0x1f) << 2) |
1491 ((offset
& 1UL) << 7) | ((offset
& ~1UL) << 11);
1495 #define __swp_type(entry) (((entry).val >> 2) & 0x1f)
1496 #define __swp_offset(entry) (((entry).val >> 11) | (((entry).val >> 7) & 1))
1497 #define __swp_entry(type,offset) ((swp_entry_t) { pte_val(mk_swap_pte((type),(offset))) })
1499 #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
1500 #define __swp_entry_to_pte(x) ((pte_t) { (x).val })
1502 #ifndef CONFIG_64BIT
1503 # define PTE_FILE_MAX_BITS 26
1504 #else /* CONFIG_64BIT */
1505 # define PTE_FILE_MAX_BITS 59
1506 #endif /* CONFIG_64BIT */
1508 #define pte_to_pgoff(__pte) \
1509 ((((__pte).pte >> 12) << 7) + (((__pte).pte >> 1) & 0x7f))
1511 #define pgoff_to_pte(__off) \
1512 ((pte_t) { ((((__off) & 0x7f) << 1) + (((__off) >> 7) << 12)) \
1513 | _PAGE_TYPE_FILE })
1515 #endif /* !__ASSEMBLY__ */
1517 #define kern_addr_valid(addr) (1)
1519 extern int vmem_add_mapping(unsigned long start
, unsigned long size
);
1520 extern int vmem_remove_mapping(unsigned long start
, unsigned long size
);
1521 extern int s390_enable_sie(void);
1524 * No page table caches to initialise
1526 static inline void pgtable_cache_init(void) { }
1527 static inline void check_pgt_cache(void) { }
1529 #include <asm-generic/pgtable.h>
1531 #endif /* _S390_PAGE_H */