]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - arch/s390/include/asm/pgtable.h
s390/dasd: fix statistics for recovered requests
[mirror_ubuntu-artful-kernel.git] / arch / s390 / include / asm / pgtable.h
CommitLineData
1da177e4 1/*
1da177e4 2 * S390 version
a53c8fab 3 * Copyright IBM Corp. 1999, 2000
1da177e4
LT
4 * Author(s): Hartmut Penner (hp@de.ibm.com)
5 * Ulrich Weigand (weigand@de.ibm.com)
6 * Martin Schwidefsky (schwidefsky@de.ibm.com)
7 *
8 * Derived from "include/asm-i386/pgtable.h"
9 */
10
11#ifndef _ASM_S390_PGTABLE_H
12#define _ASM_S390_PGTABLE_H
13
1da177e4
LT
14/*
15 * The Linux memory management assumes a three-level page table setup. For
16 * s390 31 bit we "fold" the mid level into the top-level page table, so
17 * that we physically have the same two-level page table as the s390 mmu
18 * expects in 31 bit mode. For s390 64 bit we use three of the five levels
19 * the hardware provides (region first and region second tables are not
20 * used).
21 *
22 * The "pgd_xxx()" functions are trivial for a folded two-level
23 * setup: the pgd is never bad, and a pmd always exists (as it's folded
24 * into the pgd entry)
25 *
26 * This file contains the functions and defines necessary to modify and use
27 * the S390 page table tree.
28 */
29#ifndef __ASSEMBLY__
9789db08 30#include <linux/sched.h>
2dcea57a 31#include <linux/mm_types.h>
abf09bed 32#include <linux/page-flags.h>
1da177e4 33#include <asm/bug.h>
b2fa47e6 34#include <asm/page.h>
1da177e4 35
1da177e4
LT
36extern pgd_t swapper_pg_dir[] __attribute__ ((aligned (4096)));
37extern void paging_init(void);
2b67fc46 38extern void vmem_map_init(void);
1da177e4
LT
39
40/*
41 * The S390 doesn't have any external MMU info: the kernel page
42 * tables contain all the necessary information.
43 */
4b3073e1 44#define update_mmu_cache(vma, address, ptep) do { } while (0)
b113da65 45#define update_mmu_cache_pmd(vma, address, ptep) do { } while (0)
1da177e4
LT
46
47/*
238ec4ef 48 * ZERO_PAGE is a global shared page that is always zero; used
1da177e4
LT
49 * for zero-mapped memory areas etc..
50 */
238ec4ef
MS
51
52extern unsigned long empty_zero_page;
53extern unsigned long zero_page_mask;
54
55#define ZERO_PAGE(vaddr) \
56 (virt_to_page((void *)(empty_zero_page + \
57 (((unsigned long)(vaddr)) &zero_page_mask))))
816422ad 58#define __HAVE_COLOR_ZERO_PAGE
238ec4ef 59
4f2e2903 60/* TODO: s390 cannot support io_remap_pfn_range... */
1da177e4
LT
61#endif /* !__ASSEMBLY__ */
62
63/*
64 * PMD_SHIFT determines the size of the area a second-level page
65 * table can map
66 * PGDIR_SHIFT determines what a third-level page table entry can map
67 */
f4815ac6 68#ifndef CONFIG_64BIT
146e4b3c
MS
69# define PMD_SHIFT 20
70# define PUD_SHIFT 20
71# define PGDIR_SHIFT 20
f4815ac6 72#else /* CONFIG_64BIT */
146e4b3c 73# define PMD_SHIFT 20
190a1d72 74# define PUD_SHIFT 31
5a216a20 75# define PGDIR_SHIFT 42
f4815ac6 76#endif /* CONFIG_64BIT */
1da177e4
LT
77
78#define PMD_SIZE (1UL << PMD_SHIFT)
79#define PMD_MASK (~(PMD_SIZE-1))
190a1d72
MS
80#define PUD_SIZE (1UL << PUD_SHIFT)
81#define PUD_MASK (~(PUD_SIZE-1))
5a216a20
MS
82#define PGDIR_SIZE (1UL << PGDIR_SHIFT)
83#define PGDIR_MASK (~(PGDIR_SIZE-1))
1da177e4
LT
84
85/*
86 * entries per page directory level: the S390 is two-level, so
87 * we don't really have any PMD directory physically.
88 * for S390 segment-table entries are combined to one PGD
89 * that leads to 1024 pte per pgd
90 */
146e4b3c 91#define PTRS_PER_PTE 256
f4815ac6 92#ifndef CONFIG_64BIT
146e4b3c 93#define PTRS_PER_PMD 1
5a216a20 94#define PTRS_PER_PUD 1
f4815ac6 95#else /* CONFIG_64BIT */
146e4b3c 96#define PTRS_PER_PMD 2048
5a216a20 97#define PTRS_PER_PUD 2048
f4815ac6 98#endif /* CONFIG_64BIT */
146e4b3c 99#define PTRS_PER_PGD 2048
1da177e4 100
d455a369
HD
101#define FIRST_USER_ADDRESS 0
102
1da177e4
LT
103#define pte_ERROR(e) \
104 printk("%s:%d: bad pte %p.\n", __FILE__, __LINE__, (void *) pte_val(e))
105#define pmd_ERROR(e) \
106 printk("%s:%d: bad pmd %p.\n", __FILE__, __LINE__, (void *) pmd_val(e))
190a1d72
MS
107#define pud_ERROR(e) \
108 printk("%s:%d: bad pud %p.\n", __FILE__, __LINE__, (void *) pud_val(e))
1da177e4
LT
109#define pgd_ERROR(e) \
110 printk("%s:%d: bad pgd %p.\n", __FILE__, __LINE__, (void *) pgd_val(e))
111
112#ifndef __ASSEMBLY__
113/*
c972cc60
HC
114 * The vmalloc and module area will always be on the topmost area of the kernel
115 * mapping. We reserve 96MB (31bit) / 128GB (64bit) for vmalloc and modules.
116 * On 64 bit kernels we have a 2GB area at the top of the vmalloc area where
117 * modules will reside. That makes sure that inter module branches always
118 * happen without trampolines and in addition the placement within a 2GB frame
119 * is branch prediction unit friendly.
8b62bc96 120 */
239a6425 121extern unsigned long VMALLOC_START;
14045ebf
MS
122extern unsigned long VMALLOC_END;
123extern struct page *vmemmap;
239a6425 124
14045ebf 125#define VMEM_MAX_PHYS ((unsigned long) vmemmap)
5fd9c6e2 126
c972cc60
HC
127#ifdef CONFIG_64BIT
128extern unsigned long MODULES_VADDR;
129extern unsigned long MODULES_END;
130#define MODULES_VADDR MODULES_VADDR
131#define MODULES_END MODULES_END
132#define MODULES_LEN (1UL << 31)
133#endif
134
1da177e4
LT
135/*
136 * A 31 bit pagetable entry of S390 has following format:
137 * | PFRA | | OS |
138 * 0 0IP0
139 * 00000000001111111111222222222233
140 * 01234567890123456789012345678901
141 *
142 * I Page-Invalid Bit: Page is not available for address-translation
143 * P Page-Protection Bit: Store access not possible for page
144 *
145 * A 31 bit segmenttable entry of S390 has following format:
146 * | P-table origin | |PTL
147 * 0 IC
148 * 00000000001111111111222222222233
149 * 01234567890123456789012345678901
150 *
151 * I Segment-Invalid Bit: Segment is not available for address-translation
152 * C Common-Segment Bit: Segment is not private (PoP 3-30)
153 * PTL Page-Table-Length: Page-table length (PTL+1*16 entries -> up to 256)
154 *
155 * The 31 bit segmenttable origin of S390 has following format:
156 *
157 * |S-table origin | | STL |
158 * X **GPS
159 * 00000000001111111111222222222233
160 * 01234567890123456789012345678901
161 *
162 * X Space-Switch event:
163 * G Segment-Invalid Bit: *
164 * P Private-Space Bit: Segment is not private (PoP 3-30)
165 * S Storage-Alteration:
166 * STL Segment-Table-Length: Segment-table length (STL+1*16 entries -> up to 2048)
167 *
168 * A 64 bit pagetable entry of S390 has following format:
6a985c61 169 * | PFRA |0IPC| OS |
1da177e4
LT
170 * 0000000000111111111122222222223333333333444444444455555555556666
171 * 0123456789012345678901234567890123456789012345678901234567890123
172 *
173 * I Page-Invalid Bit: Page is not available for address-translation
174 * P Page-Protection Bit: Store access not possible for page
6a985c61 175 * C Change-bit override: HW is not required to set change bit
1da177e4
LT
176 *
177 * A 64 bit segmenttable entry of S390 has following format:
178 * | P-table origin | TT
179 * 0000000000111111111122222222223333333333444444444455555555556666
180 * 0123456789012345678901234567890123456789012345678901234567890123
181 *
182 * I Segment-Invalid Bit: Segment is not available for address-translation
183 * C Common-Segment Bit: Segment is not private (PoP 3-30)
184 * P Page-Protection Bit: Store access not possible for page
185 * TT Type 00
186 *
187 * A 64 bit region table entry of S390 has following format:
188 * | S-table origin | TF TTTL
189 * 0000000000111111111122222222223333333333444444444455555555556666
190 * 0123456789012345678901234567890123456789012345678901234567890123
191 *
192 * I Segment-Invalid Bit: Segment is not available for address-translation
193 * TT Type 01
194 * TF
190a1d72 195 * TL Table length
1da177e4
LT
196 *
197 * The 64 bit regiontable origin of S390 has following format:
198 * | region table origon | DTTL
199 * 0000000000111111111122222222223333333333444444444455555555556666
200 * 0123456789012345678901234567890123456789012345678901234567890123
201 *
202 * X Space-Switch event:
203 * G Segment-Invalid Bit:
204 * P Private-Space Bit:
205 * S Storage-Alteration:
206 * R Real space
207 * TL Table-Length:
208 *
209 * A storage key has the following format:
210 * | ACC |F|R|C|0|
211 * 0 3 4 5 6 7
212 * ACC: access key
213 * F : fetch protection bit
214 * R : referenced bit
215 * C : changed bit
216 */
217
218/* Hardware bits in the page table entry */
6a985c61 219#define _PAGE_CO 0x100 /* HW Change-bit override */
e5098611 220#define _PAGE_PROTECT 0x200 /* HW read-only bit */
83377484 221#define _PAGE_INVALID 0x400 /* HW invalid bit */
e5098611 222#define _PAGE_LARGE 0x800 /* Bit to mark a large pte */
3610cce8
MS
223
224/* Software bits in the page table entry */
e5098611
MS
225#define _PAGE_PRESENT 0x001 /* SW pte present bit */
226#define _PAGE_TYPE 0x002 /* SW pte type bit */
227#define _PAGE_YOUNG 0x004 /* SW pte young bit */
228#define _PAGE_DIRTY 0x008 /* SW pte dirty bit */
229#define _PAGE_WRITE 0x010 /* SW pte write bit */
abf09bed 230#define _PAGE_SPECIAL 0x020 /* SW associated with special page */
a08cb629 231#define __HAVE_ARCH_PTE_SPECIAL
1da177e4 232
138c9021 233/* Set of bits not changed in pte_modify */
abf09bed 234#define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_SPECIAL | _PAGE_CO | \
e5098611 235 _PAGE_DIRTY | _PAGE_YOUNG)
53492b1d 236
83377484 237/*
e5098611
MS
238 * handle_pte_fault uses pte_present, pte_none and pte_file to find out the
239 * pte type WITHOUT holding the page table lock. The _PAGE_PRESENT bit
240 * is used to distinguish present from not-present ptes. It is changed only
241 * with the page table lock held.
83377484 242 *
e5098611
MS
243 * The following table gives the different possible bit combinations for
244 * the pte hardware and software bits in the last 12 bits of a pte:
83377484 245 *
e5098611
MS
246 * 842100000000
247 * 000084210000
248 * 000000008421
249 * .IR....wdytp
250 * empty .10....00000
251 * swap .10....xxx10
252 * file .11....xxxx0
253 * prot-none, clean .11....00x01
254 * prot-none, dirty .10....01x01
255 * read-only, clean .01....00x01
256 * read-only, dirty .01....01x01
257 * read-write, clean .01....10x01
258 * read-write, dirty .00....11x01
259 *
260 * pte_present is true for the bit pattern .xx...xxxxx1, (pte & 0x001) == 0x001
261 * pte_none is true for the bit pattern .10...xxxx00, (pte & 0x603) == 0x400
262 * pte_file is true for the bit pattern .11...xxxxx0, (pte & 0x601) == 0x600
263 * pte_swap is true for the bit pattern .10...xxxx10, (pte & 0x603) == 0x402
83377484
MS
264 */
265
f4815ac6 266#ifndef CONFIG_64BIT
1da177e4 267
3610cce8
MS
268/* Bits in the segment table address-space-control-element */
269#define _ASCE_SPACE_SWITCH 0x80000000UL /* space switch event */
270#define _ASCE_ORIGIN_MASK 0x7ffff000UL /* segment table origin */
271#define _ASCE_PRIVATE_SPACE 0x100 /* private space control */
272#define _ASCE_ALT_EVENT 0x80 /* storage alteration event control */
273#define _ASCE_TABLE_LENGTH 0x7f /* 128 x 64 entries = 8k */
1da177e4 274
3610cce8
MS
275/* Bits in the segment table entry */
276#define _SEGMENT_ENTRY_ORIGIN 0x7fffffc0UL /* page table origin */
e5098611
MS
277#define _SEGMENT_ENTRY_PROTECT 0x200 /* page protection bit */
278#define _SEGMENT_ENTRY_INVALID 0x20 /* invalid segment table entry */
3610cce8
MS
279#define _SEGMENT_ENTRY_COMMON 0x10 /* common segment bit */
280#define _SEGMENT_ENTRY_PTL 0x0f /* page table length */
1da177e4 281
3610cce8 282#define _SEGMENT_ENTRY (_SEGMENT_ENTRY_PTL)
e5098611 283#define _SEGMENT_ENTRY_EMPTY (_SEGMENT_ENTRY_INVALID)
1da177e4 284
6c61cfe9 285/* Page status table bits for virtualization */
0d0dafc1
MS
286#define PGSTE_ACC_BITS 0xf0000000UL
287#define PGSTE_FP_BIT 0x08000000UL
288#define PGSTE_PCL_BIT 0x00800000UL
289#define PGSTE_HR_BIT 0x00400000UL
290#define PGSTE_HC_BIT 0x00200000UL
291#define PGSTE_GR_BIT 0x00040000UL
292#define PGSTE_GC_BIT 0x00020000UL
293#define PGSTE_UR_BIT 0x00008000UL
294#define PGSTE_UC_BIT 0x00004000UL /* user dirty (migration) */
295#define PGSTE_IN_BIT 0x00002000UL /* IPTE notify bit */
6c61cfe9 296
f4815ac6 297#else /* CONFIG_64BIT */
1da177e4 298
3610cce8
MS
299/* Bits in the segment/region table address-space-control-element */
300#define _ASCE_ORIGIN ~0xfffUL/* segment table origin */
301#define _ASCE_PRIVATE_SPACE 0x100 /* private space control */
302#define _ASCE_ALT_EVENT 0x80 /* storage alteration event control */
303#define _ASCE_SPACE_SWITCH 0x40 /* space switch event */
304#define _ASCE_REAL_SPACE 0x20 /* real space control */
305#define _ASCE_TYPE_MASK 0x0c /* asce table type mask */
306#define _ASCE_TYPE_REGION1 0x0c /* region first table type */
307#define _ASCE_TYPE_REGION2 0x08 /* region second table type */
308#define _ASCE_TYPE_REGION3 0x04 /* region third table type */
309#define _ASCE_TYPE_SEGMENT 0x00 /* segment table type */
310#define _ASCE_TABLE_LENGTH 0x03 /* region table length */
311
312/* Bits in the region table entry */
313#define _REGION_ENTRY_ORIGIN ~0xfffUL/* region/segment table origin */
e5098611
MS
314#define _REGION_ENTRY_PROTECT 0x200 /* region protection bit */
315#define _REGION_ENTRY_INVALID 0x20 /* invalid region table entry */
3610cce8
MS
316#define _REGION_ENTRY_TYPE_MASK 0x0c /* region/segment table type mask */
317#define _REGION_ENTRY_TYPE_R1 0x0c /* region first table type */
318#define _REGION_ENTRY_TYPE_R2 0x08 /* region second table type */
319#define _REGION_ENTRY_TYPE_R3 0x04 /* region third table type */
320#define _REGION_ENTRY_LENGTH 0x03 /* region third length */
321
322#define _REGION1_ENTRY (_REGION_ENTRY_TYPE_R1 | _REGION_ENTRY_LENGTH)
e5098611 323#define _REGION1_ENTRY_EMPTY (_REGION_ENTRY_TYPE_R1 | _REGION_ENTRY_INVALID)
3610cce8 324#define _REGION2_ENTRY (_REGION_ENTRY_TYPE_R2 | _REGION_ENTRY_LENGTH)
e5098611 325#define _REGION2_ENTRY_EMPTY (_REGION_ENTRY_TYPE_R2 | _REGION_ENTRY_INVALID)
3610cce8 326#define _REGION3_ENTRY (_REGION_ENTRY_TYPE_R3 | _REGION_ENTRY_LENGTH)
e5098611 327#define _REGION3_ENTRY_EMPTY (_REGION_ENTRY_TYPE_R3 | _REGION_ENTRY_INVALID)
3610cce8 328
18da2369 329#define _REGION3_ENTRY_LARGE 0x400 /* RTTE-format control, large page */
1819ed1f
HC
330#define _REGION3_ENTRY_RO 0x200 /* page protection bit */
331#define _REGION3_ENTRY_CO 0x100 /* change-recording override */
18da2369 332
1da177e4 333/* Bits in the segment table entry */
ea81531d 334#define _SEGMENT_ENTRY_ORIGIN_LARGE ~0xfffffUL /* large page address */
3610cce8 335#define _SEGMENT_ENTRY_ORIGIN ~0x7ffUL/* segment table origin */
e5098611
MS
336#define _SEGMENT_ENTRY_PROTECT 0x200 /* page protection bit */
337#define _SEGMENT_ENTRY_INVALID 0x20 /* invalid segment table entry */
1da177e4 338
3610cce8 339#define _SEGMENT_ENTRY (0)
e5098611 340#define _SEGMENT_ENTRY_EMPTY (_SEGMENT_ENTRY_INVALID)
3610cce8 341
53492b1d
GS
342#define _SEGMENT_ENTRY_LARGE 0x400 /* STE-format control, large page */
343#define _SEGMENT_ENTRY_CO 0x100 /* change-recording override */
e5098611
MS
344#define _SEGMENT_ENTRY_SPLIT 0x001 /* THP splitting bit */
345
75077afb 346#define _SEGMENT_ENTRY_SPLIT_BIT 0 /* THP splitting bit number */
53492b1d 347
1ae1c1d0
GS
348/* Set of bits not changed in pmd_modify */
349#define _SEGMENT_CHG_MASK (_SEGMENT_ENTRY_ORIGIN | _SEGMENT_ENTRY_LARGE \
350 | _SEGMENT_ENTRY_SPLIT | _SEGMENT_ENTRY_CO)
351
6c61cfe9 352/* Page status table bits for virtualization */
0d0dafc1
MS
353#define PGSTE_ACC_BITS 0xf000000000000000UL
354#define PGSTE_FP_BIT 0x0800000000000000UL
355#define PGSTE_PCL_BIT 0x0080000000000000UL
356#define PGSTE_HR_BIT 0x0040000000000000UL
357#define PGSTE_HC_BIT 0x0020000000000000UL
358#define PGSTE_GR_BIT 0x0004000000000000UL
359#define PGSTE_GC_BIT 0x0002000000000000UL
360#define PGSTE_UR_BIT 0x0000800000000000UL
361#define PGSTE_UC_BIT 0x0000400000000000UL /* user dirty (migration) */
362#define PGSTE_IN_BIT 0x0000200000000000UL /* IPTE notify bit */
6c61cfe9 363
f4815ac6 364#endif /* CONFIG_64BIT */
1da177e4
LT
365
366/*
3610cce8
MS
367 * A user page table pointer has the space-switch-event bit, the
368 * private-space-control bit and the storage-alteration-event-control
369 * bit set. A kernel page table pointer doesn't need them.
1da177e4 370 */
3610cce8
MS
371#define _ASCE_USER_BITS (_ASCE_SPACE_SWITCH | _ASCE_PRIVATE_SPACE | \
372 _ASCE_ALT_EVENT)
1da177e4 373
1da177e4 374/*
9282ed92 375 * Page protection definitions.
1da177e4 376 */
e5098611
MS
377#define PAGE_NONE __pgprot(_PAGE_PRESENT | _PAGE_INVALID)
378#define PAGE_READ __pgprot(_PAGE_PRESENT | _PAGE_PROTECT)
379#define PAGE_WRITE __pgprot(_PAGE_PRESENT | _PAGE_WRITE | _PAGE_PROTECT)
9282ed92 380
e5098611
MS
381#define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_WRITE | _PAGE_DIRTY)
382#define PAGE_KERNEL __pgprot(_PAGE_PRESENT | _PAGE_WRITE | _PAGE_DIRTY)
383#define PAGE_KERNEL_RO __pgprot(_PAGE_PRESENT | _PAGE_PROTECT)
1da177e4
LT
384
385/*
043d0708
MS
386 * On s390 the page table entry has an invalid bit and a read-only bit.
387 * Read permission implies execute permission and write permission
388 * implies read permission.
1da177e4
LT
389 */
390 /*xwr*/
9282ed92 391#define __P000 PAGE_NONE
e5098611
MS
392#define __P001 PAGE_READ
393#define __P010 PAGE_READ
394#define __P011 PAGE_READ
395#define __P100 PAGE_READ
396#define __P101 PAGE_READ
397#define __P110 PAGE_READ
398#define __P111 PAGE_READ
9282ed92
GS
399
400#define __S000 PAGE_NONE
e5098611
MS
401#define __S001 PAGE_READ
402#define __S010 PAGE_WRITE
403#define __S011 PAGE_WRITE
404#define __S100 PAGE_READ
405#define __S101 PAGE_READ
406#define __S110 PAGE_WRITE
407#define __S111 PAGE_WRITE
1da177e4 408
106c992a
GS
409/*
410 * Segment entry (large page) protection definitions.
411 */
e5098611
MS
412#define SEGMENT_NONE __pgprot(_SEGMENT_ENTRY_INVALID | \
413 _SEGMENT_ENTRY_PROTECT)
414#define SEGMENT_READ __pgprot(_SEGMENT_ENTRY_PROTECT)
415#define SEGMENT_WRITE __pgprot(0)
106c992a 416
b2fa47e6
MS
417static inline int mm_has_pgste(struct mm_struct *mm)
418{
419#ifdef CONFIG_PGSTE
420 if (unlikely(mm->context.has_pgste))
421 return 1;
422#endif
423 return 0;
424}
1da177e4
LT
425/*
426 * pgd/pmd/pte query functions
427 */
f4815ac6 428#ifndef CONFIG_64BIT
1da177e4 429
4448aaf0
AB
430static inline int pgd_present(pgd_t pgd) { return 1; }
431static inline int pgd_none(pgd_t pgd) { return 0; }
432static inline int pgd_bad(pgd_t pgd) { return 0; }
1da177e4 433
190a1d72
MS
434static inline int pud_present(pud_t pud) { return 1; }
435static inline int pud_none(pud_t pud) { return 0; }
18da2369 436static inline int pud_large(pud_t pud) { return 0; }
190a1d72
MS
437static inline int pud_bad(pud_t pud) { return 0; }
438
f4815ac6 439#else /* CONFIG_64BIT */
1da177e4 440
5a216a20
MS
441static inline int pgd_present(pgd_t pgd)
442{
6252d702
MS
443 if ((pgd_val(pgd) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R2)
444 return 1;
5a216a20
MS
445 return (pgd_val(pgd) & _REGION_ENTRY_ORIGIN) != 0UL;
446}
447
448static inline int pgd_none(pgd_t pgd)
449{
6252d702
MS
450 if ((pgd_val(pgd) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R2)
451 return 0;
e5098611 452 return (pgd_val(pgd) & _REGION_ENTRY_INVALID) != 0UL;
5a216a20
MS
453}
454
455static inline int pgd_bad(pgd_t pgd)
456{
6252d702
MS
457 /*
458 * With dynamic page table levels the pgd can be a region table
459 * entry or a segment table entry. Check for the bit that are
460 * invalid for either table entry.
461 */
5a216a20 462 unsigned long mask =
e5098611 463 ~_SEGMENT_ENTRY_ORIGIN & ~_REGION_ENTRY_INVALID &
5a216a20
MS
464 ~_REGION_ENTRY_TYPE_MASK & ~_REGION_ENTRY_LENGTH;
465 return (pgd_val(pgd) & mask) != 0;
466}
190a1d72
MS
467
468static inline int pud_present(pud_t pud)
1da177e4 469{
6252d702
MS
470 if ((pud_val(pud) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R3)
471 return 1;
0d017923 472 return (pud_val(pud) & _REGION_ENTRY_ORIGIN) != 0UL;
1da177e4
LT
473}
474
190a1d72 475static inline int pud_none(pud_t pud)
1da177e4 476{
6252d702
MS
477 if ((pud_val(pud) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R3)
478 return 0;
e5098611 479 return (pud_val(pud) & _REGION_ENTRY_INVALID) != 0UL;
1da177e4
LT
480}
481
18da2369
HC
482static inline int pud_large(pud_t pud)
483{
484 if ((pud_val(pud) & _REGION_ENTRY_TYPE_MASK) != _REGION_ENTRY_TYPE_R3)
485 return 0;
486 return !!(pud_val(pud) & _REGION3_ENTRY_LARGE);
487}
488
190a1d72 489static inline int pud_bad(pud_t pud)
1da177e4 490{
6252d702
MS
491 /*
492 * With dynamic page table levels the pud can be a region table
493 * entry or a segment table entry. Check for the bit that are
494 * invalid for either table entry.
495 */
5a216a20 496 unsigned long mask =
e5098611 497 ~_SEGMENT_ENTRY_ORIGIN & ~_REGION_ENTRY_INVALID &
5a216a20
MS
498 ~_REGION_ENTRY_TYPE_MASK & ~_REGION_ENTRY_LENGTH;
499 return (pud_val(pud) & mask) != 0;
1da177e4
LT
500}
501
f4815ac6 502#endif /* CONFIG_64BIT */
3610cce8 503
4448aaf0 504static inline int pmd_present(pmd_t pmd)
1da177e4 505{
e5098611 506 return pmd_val(pmd) != _SEGMENT_ENTRY_INVALID;
1da177e4
LT
507}
508
4448aaf0 509static inline int pmd_none(pmd_t pmd)
1da177e4 510{
e5098611 511 return pmd_val(pmd) == _SEGMENT_ENTRY_INVALID;
1da177e4
LT
512}
513
378b1e7a
HC
514static inline int pmd_large(pmd_t pmd)
515{
516#ifdef CONFIG_64BIT
e5098611 517 return (pmd_val(pmd) & _SEGMENT_ENTRY_LARGE) != 0;
378b1e7a
HC
518#else
519 return 0;
520#endif
521}
522
4448aaf0 523static inline int pmd_bad(pmd_t pmd)
1da177e4 524{
e5098611 525 unsigned long mask = ~_SEGMENT_ENTRY_ORIGIN & ~_SEGMENT_ENTRY_INVALID;
3610cce8 526 return (pmd_val(pmd) & mask) != _SEGMENT_ENTRY;
1da177e4
LT
527}
528
75077afb
GS
529#define __HAVE_ARCH_PMDP_SPLITTING_FLUSH
530extern void pmdp_splitting_flush(struct vm_area_struct *vma,
531 unsigned long addr, pmd_t *pmdp);
532
1ae1c1d0
GS
533#define __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS
534extern int pmdp_set_access_flags(struct vm_area_struct *vma,
535 unsigned long address, pmd_t *pmdp,
536 pmd_t entry, int dirty);
537
538#define __HAVE_ARCH_PMDP_CLEAR_YOUNG_FLUSH
539extern int pmdp_clear_flush_young(struct vm_area_struct *vma,
540 unsigned long address, pmd_t *pmdp);
541
542#define __HAVE_ARCH_PMD_WRITE
543static inline int pmd_write(pmd_t pmd)
544{
e5098611 545 return (pmd_val(pmd) & _SEGMENT_ENTRY_PROTECT) == 0;
1ae1c1d0
GS
546}
547
548static inline int pmd_young(pmd_t pmd)
549{
550 return 0;
551}
552
e5098611 553static inline int pte_present(pte_t pte)
1da177e4 554{
e5098611
MS
555 /* Bit pattern: (pte & 0x001) == 0x001 */
556 return (pte_val(pte) & _PAGE_PRESENT) != 0;
1da177e4
LT
557}
558
e5098611 559static inline int pte_none(pte_t pte)
1da177e4 560{
e5098611
MS
561 /* Bit pattern: pte == 0x400 */
562 return pte_val(pte) == _PAGE_INVALID;
1da177e4
LT
563}
564
4448aaf0 565static inline int pte_file(pte_t pte)
1da177e4 566{
e5098611
MS
567 /* Bit pattern: (pte & 0x601) == 0x600 */
568 return (pte_val(pte) & (_PAGE_INVALID | _PAGE_PROTECT | _PAGE_PRESENT))
569 == (_PAGE_INVALID | _PAGE_PROTECT);
1da177e4
LT
570}
571
7e675137
NP
572static inline int pte_special(pte_t pte)
573{
a08cb629 574 return (pte_val(pte) & _PAGE_SPECIAL);
7e675137
NP
575}
576
ba8a9229 577#define __HAVE_ARCH_PTE_SAME
b2fa47e6
MS
578static inline int pte_same(pte_t a, pte_t b)
579{
580 return pte_val(a) == pte_val(b);
581}
1da177e4 582
b2fa47e6 583static inline pgste_t pgste_get_lock(pte_t *ptep)
5b7baf05 584{
b2fa47e6 585 unsigned long new = 0;
5b7baf05 586#ifdef CONFIG_PGSTE
b2fa47e6
MS
587 unsigned long old;
588
5b7baf05 589 preempt_disable();
b2fa47e6
MS
590 asm(
591 " lg %0,%2\n"
592 "0: lgr %1,%0\n"
0d0dafc1
MS
593 " nihh %0,0xff7f\n" /* clear PCL bit in old */
594 " oihh %1,0x0080\n" /* set PCL bit in new */
b2fa47e6
MS
595 " csg %0,%1,%2\n"
596 " jl 0b\n"
597 : "=&d" (old), "=&d" (new), "=Q" (ptep[PTRS_PER_PTE])
a8f6e7f7 598 : "Q" (ptep[PTRS_PER_PTE]) : "cc", "memory");
5b7baf05 599#endif
b2fa47e6 600 return __pgste(new);
5b7baf05
CB
601}
602
b2fa47e6 603static inline void pgste_set_unlock(pte_t *ptep, pgste_t pgste)
5b7baf05
CB
604{
605#ifdef CONFIG_PGSTE
b2fa47e6 606 asm(
0d0dafc1 607 " nihh %1,0xff7f\n" /* clear PCL bit */
b2fa47e6
MS
608 " stg %1,%0\n"
609 : "=Q" (ptep[PTRS_PER_PTE])
a8f6e7f7
CB
610 : "d" (pgste_val(pgste)), "Q" (ptep[PTRS_PER_PTE])
611 : "cc", "memory");
5b7baf05
CB
612 preempt_enable();
613#endif
614}
615
d56c893d
MS
616static inline pgste_t pgste_get(pte_t *ptep)
617{
618 unsigned long pgste = 0;
619#ifdef CONFIG_PGSTE
620 pgste = *(unsigned long *)(ptep + PTRS_PER_PTE);
621#endif
622 return __pgste(pgste);
623}
624
3a82603b
CB
625static inline void pgste_set(pte_t *ptep, pgste_t pgste)
626{
627#ifdef CONFIG_PGSTE
628 *(pgste_t *)(ptep + PTRS_PER_PTE) = pgste;
629#endif
630}
631
b2fa47e6 632static inline pgste_t pgste_update_all(pte_t *ptep, pgste_t pgste)
5b7baf05
CB
633{
634#ifdef CONFIG_PGSTE
a43a9d93 635 unsigned long address, bits;
b2fa47e6
MS
636 unsigned char skey;
637
b56433cb 638 if (pte_val(*ptep) & _PAGE_INVALID)
09b53883 639 return pgste;
a43a9d93
HC
640 address = pte_val(*ptep) & PAGE_MASK;
641 skey = page_get_storage_key(address);
b2fa47e6
MS
642 bits = skey & (_PAGE_CHANGED | _PAGE_REFERENCED);
643 /* Clear page changed & referenced bit in the storage key */
7c81878b 644 if (bits & _PAGE_CHANGED)
abf09bed 645 page_set_storage_key(address, skey ^ bits, 0);
7c81878b
CO
646 else if (bits)
647 page_reset_referenced(address);
b2fa47e6 648 /* Transfer page changed & referenced bit to guest bits in pgste */
0d0dafc1 649 pgste_val(pgste) |= bits << 48; /* GR bit & GC bit */
b2fa47e6 650 /* Get host changed & referenced bits from pgste */
0d0dafc1 651 bits |= (pgste_val(pgste) & (PGSTE_HR_BIT | PGSTE_HC_BIT)) >> 52;
abf09bed 652 /* Transfer page changed & referenced bit to kvm user bits */
0d0dafc1 653 pgste_val(pgste) |= bits << 45; /* PGSTE_UR_BIT & PGSTE_UC_BIT */
abf09bed 654 /* Clear relevant host bits in pgste. */
0d0dafc1
MS
655 pgste_val(pgste) &= ~(PGSTE_HR_BIT | PGSTE_HC_BIT);
656 pgste_val(pgste) &= ~(PGSTE_ACC_BITS | PGSTE_FP_BIT);
b2fa47e6
MS
657 /* Copy page access key and fetch protection bit to pgste */
658 pgste_val(pgste) |=
659 (unsigned long) (skey & (_PAGE_ACC_BITS | _PAGE_FP_BIT)) << 56;
abf09bed
MS
660 /* Transfer referenced bit to pte */
661 pte_val(*ptep) |= (bits & _PAGE_REFERENCED) << 1;
b2fa47e6
MS
662#endif
663 return pgste;
664
665}
666
667static inline pgste_t pgste_update_young(pte_t *ptep, pgste_t pgste)
668{
669#ifdef CONFIG_PGSTE
670 int young;
671
b56433cb 672 if (pte_val(*ptep) & _PAGE_INVALID)
09b53883 673 return pgste;
abf09bed 674 /* Get referenced bit from storage key */
b2fa47e6 675 young = page_reset_referenced(pte_val(*ptep) & PAGE_MASK);
abf09bed 676 if (young)
0d0dafc1 677 pgste_val(pgste) |= PGSTE_GR_BIT;
abf09bed 678 /* Get host referenced bit from pgste */
0d0dafc1
MS
679 if (pgste_val(pgste) & PGSTE_HR_BIT) {
680 pgste_val(pgste) &= ~PGSTE_HR_BIT;
abf09bed
MS
681 young = 1;
682 }
683 /* Transfer referenced bit to kvm user bits and pte */
684 if (young) {
0d0dafc1 685 pgste_val(pgste) |= PGSTE_UR_BIT;
e5098611 686 pte_val(*ptep) |= _PAGE_YOUNG;
abf09bed 687 }
b2fa47e6
MS
688#endif
689 return pgste;
b2fa47e6
MS
690}
691
abf09bed 692static inline void pgste_set_key(pte_t *ptep, pgste_t pgste, pte_t entry)
b2fa47e6
MS
693{
694#ifdef CONFIG_PGSTE
a43a9d93 695 unsigned long address;
338679f7 696 unsigned long nkey;
b2fa47e6 697
b56433cb 698 if (pte_val(entry) & _PAGE_INVALID)
09b53883 699 return;
338679f7 700 VM_BUG_ON(!(pte_val(*ptep) & _PAGE_INVALID));
09b53883 701 address = pte_val(entry) & PAGE_MASK;
338679f7
CB
702 /*
703 * Set page access key and fetch protection bit from pgste.
704 * The guest C/R information is still in the PGSTE, set real
705 * key C/R to 0.
706 */
fe489bf4 707 nkey = (pgste_val(pgste) & (PGSTE_ACC_BITS | PGSTE_FP_BIT)) >> 56;
338679f7 708 page_set_storage_key(address, nkey, 0);
5b7baf05
CB
709#endif
710}
711
abf09bed
MS
712static inline void pgste_set_pte(pte_t *ptep, pte_t entry)
713{
e5098611 714 if (!MACHINE_HAS_ESOP && (pte_val(entry) & _PAGE_WRITE)) {
abf09bed
MS
715 /*
716 * Without enhanced suppression-on-protection force
717 * the dirty bit on for all writable ptes.
718 */
e5098611
MS
719 pte_val(entry) |= _PAGE_DIRTY;
720 pte_val(entry) &= ~_PAGE_PROTECT;
abf09bed
MS
721 }
722 *ptep = entry;
723}
724
e5992f2e
MS
725/**
726 * struct gmap_struct - guest address space
727 * @mm: pointer to the parent mm_struct
728 * @table: pointer to the page directory
480e5926 729 * @asce: address space control element for gmap page table
e5992f2e
MS
730 * @crst_list: list of all crst tables used in the guest address space
731 */
732struct gmap {
733 struct list_head list;
734 struct mm_struct *mm;
735 unsigned long *table;
480e5926 736 unsigned long asce;
2c70fe44 737 void *private;
e5992f2e
MS
738 struct list_head crst_list;
739};
740
741/**
742 * struct gmap_rmap - reverse mapping for segment table entries
d3383632 743 * @gmap: pointer to the gmap_struct
e5992f2e 744 * @entry: pointer to a segment table entry
d3383632 745 * @vmaddr: virtual address in the guest address space
e5992f2e
MS
746 */
747struct gmap_rmap {
748 struct list_head list;
d3383632 749 struct gmap *gmap;
e5992f2e 750 unsigned long *entry;
d3383632 751 unsigned long vmaddr;
e5992f2e
MS
752};
753
754/**
755 * struct gmap_pgtable - gmap information attached to a page table
756 * @vmaddr: address of the 1MB segment in the process virtual memory
d3383632 757 * @mapper: list of segment table entries mapping a page table
e5992f2e
MS
758 */
759struct gmap_pgtable {
760 unsigned long vmaddr;
761 struct list_head mapper;
762};
763
d3383632
MS
764/**
765 * struct gmap_notifier - notify function block for page invalidation
766 * @notifier_call: address of callback function
767 */
768struct gmap_notifier {
769 struct list_head list;
770 void (*notifier_call)(struct gmap *gmap, unsigned long address);
771};
772
e5992f2e
MS
773struct gmap *gmap_alloc(struct mm_struct *mm);
774void gmap_free(struct gmap *gmap);
775void gmap_enable(struct gmap *gmap);
776void gmap_disable(struct gmap *gmap);
777int gmap_map_segment(struct gmap *gmap, unsigned long from,
d3383632 778 unsigned long to, unsigned long len);
e5992f2e 779int gmap_unmap_segment(struct gmap *gmap, unsigned long to, unsigned long len);
c5034945
HC
780unsigned long __gmap_translate(unsigned long address, struct gmap *);
781unsigned long gmap_translate(unsigned long address, struct gmap *);
499069e1 782unsigned long __gmap_fault(unsigned long address, struct gmap *);
e5992f2e 783unsigned long gmap_fault(unsigned long address, struct gmap *);
388186bc 784void gmap_discard(unsigned long from, unsigned long to, struct gmap *);
e5992f2e 785
d3383632
MS
786void gmap_register_ipte_notifier(struct gmap_notifier *);
787void gmap_unregister_ipte_notifier(struct gmap_notifier *);
788int gmap_ipte_notify(struct gmap *, unsigned long start, unsigned long len);
789void gmap_do_ipte_notify(struct mm_struct *, unsigned long addr, pte_t *);
790
791static inline pgste_t pgste_ipte_notify(struct mm_struct *mm,
792 unsigned long addr,
793 pte_t *ptep, pgste_t pgste)
794{
795#ifdef CONFIG_PGSTE
0d0dafc1
MS
796 if (pgste_val(pgste) & PGSTE_IN_BIT) {
797 pgste_val(pgste) &= ~PGSTE_IN_BIT;
d3383632
MS
798 gmap_do_ipte_notify(mm, addr, ptep);
799 }
800#endif
801 return pgste;
802}
803
b2fa47e6
MS
804/*
805 * Certain architectures need to do special things when PTEs
806 * within a page table are directly modified. Thus, the following
807 * hook is made available.
808 */
809static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
810 pte_t *ptep, pte_t entry)
811{
812 pgste_t pgste;
813
814 if (mm_has_pgste(mm)) {
815 pgste = pgste_get_lock(ptep);
abf09bed
MS
816 pgste_set_key(ptep, pgste, entry);
817 pgste_set_pte(ptep, entry);
b2fa47e6 818 pgste_set_unlock(ptep, pgste);
abf09bed
MS
819 } else {
820 if (!(pte_val(entry) & _PAGE_INVALID) && MACHINE_HAS_EDAT1)
821 pte_val(entry) |= _PAGE_CO;
b2fa47e6 822 *ptep = entry;
abf09bed 823 }
b2fa47e6
MS
824}
825
1da177e4
LT
826/*
827 * query functions pte_write/pte_dirty/pte_young only work if
828 * pte_present() is true. Undefined behaviour if not..
829 */
4448aaf0 830static inline int pte_write(pte_t pte)
1da177e4 831{
e5098611 832 return (pte_val(pte) & _PAGE_WRITE) != 0;
1da177e4
LT
833}
834
4448aaf0 835static inline int pte_dirty(pte_t pte)
1da177e4 836{
e5098611 837 return (pte_val(pte) & _PAGE_DIRTY) != 0;
1da177e4
LT
838}
839
4448aaf0 840static inline int pte_young(pte_t pte)
1da177e4 841{
b2fa47e6 842#ifdef CONFIG_PGSTE
e5098611 843 if (pte_val(pte) & _PAGE_YOUNG)
b2fa47e6
MS
844 return 1;
845#endif
1da177e4
LT
846 return 0;
847}
848
1da177e4
LT
849/*
850 * pgd/pmd/pte modification functions
851 */
852
b2fa47e6 853static inline void pgd_clear(pgd_t *pgd)
5a216a20 854{
f4815ac6 855#ifdef CONFIG_64BIT
6252d702
MS
856 if ((pgd_val(*pgd) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R2)
857 pgd_val(*pgd) = _REGION2_ENTRY_EMPTY;
b2fa47e6 858#endif
5a216a20
MS
859}
860
b2fa47e6 861static inline void pud_clear(pud_t *pud)
1da177e4 862{
f4815ac6 863#ifdef CONFIG_64BIT
6252d702
MS
864 if ((pud_val(*pud) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R3)
865 pud_val(*pud) = _REGION3_ENTRY_EMPTY;
b2fa47e6 866#endif
1da177e4
LT
867}
868
b2fa47e6 869static inline void pmd_clear(pmd_t *pmdp)
1da177e4 870{
e5098611 871 pmd_val(*pmdp) = _SEGMENT_ENTRY_INVALID;
1da177e4
LT
872}
873
4448aaf0 874static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
1da177e4 875{
e5098611 876 pte_val(*ptep) = _PAGE_INVALID;
1da177e4
LT
877}
878
879/*
880 * The following pte modification functions only work if
881 * pte_present() is true. Undefined behaviour if not..
882 */
4448aaf0 883static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
1da177e4 884{
138c9021 885 pte_val(pte) &= _PAGE_CHG_MASK;
1da177e4 886 pte_val(pte) |= pgprot_val(newprot);
e5098611
MS
887 if ((pte_val(pte) & _PAGE_DIRTY) && (pte_val(pte) & _PAGE_WRITE))
888 pte_val(pte) &= ~_PAGE_PROTECT;
1da177e4
LT
889 return pte;
890}
891
4448aaf0 892static inline pte_t pte_wrprotect(pte_t pte)
1da177e4 893{
e5098611
MS
894 pte_val(pte) &= ~_PAGE_WRITE;
895 pte_val(pte) |= _PAGE_PROTECT;
1da177e4
LT
896 return pte;
897}
898
4448aaf0 899static inline pte_t pte_mkwrite(pte_t pte)
1da177e4 900{
e5098611
MS
901 pte_val(pte) |= _PAGE_WRITE;
902 if (pte_val(pte) & _PAGE_DIRTY)
903 pte_val(pte) &= ~_PAGE_PROTECT;
1da177e4
LT
904 return pte;
905}
906
4448aaf0 907static inline pte_t pte_mkclean(pte_t pte)
1da177e4 908{
e5098611
MS
909 pte_val(pte) &= ~_PAGE_DIRTY;
910 pte_val(pte) |= _PAGE_PROTECT;
1da177e4
LT
911 return pte;
912}
913
4448aaf0 914static inline pte_t pte_mkdirty(pte_t pte)
1da177e4 915{
e5098611
MS
916 pte_val(pte) |= _PAGE_DIRTY;
917 if (pte_val(pte) & _PAGE_WRITE)
918 pte_val(pte) &= ~_PAGE_PROTECT;
1da177e4
LT
919 return pte;
920}
921
4448aaf0 922static inline pte_t pte_mkold(pte_t pte)
1da177e4 923{
b2fa47e6 924#ifdef CONFIG_PGSTE
e5098611 925 pte_val(pte) &= ~_PAGE_YOUNG;
b2fa47e6 926#endif
1da177e4
LT
927 return pte;
928}
929
4448aaf0 930static inline pte_t pte_mkyoung(pte_t pte)
1da177e4 931{
1da177e4
LT
932 return pte;
933}
934
7e675137
NP
935static inline pte_t pte_mkspecial(pte_t pte)
936{
a08cb629 937 pte_val(pte) |= _PAGE_SPECIAL;
7e675137
NP
938 return pte;
939}
940
84afdcee
HC
941#ifdef CONFIG_HUGETLB_PAGE
942static inline pte_t pte_mkhuge(pte_t pte)
943{
e5098611 944 pte_val(pte) |= _PAGE_LARGE;
84afdcee
HC
945 return pte;
946}
947#endif
948
15e86b0c 949/*
b2fa47e6 950 * Get (and clear) the user dirty bit for a pte.
15e86b0c 951 */
b2fa47e6
MS
952static inline int ptep_test_and_clear_user_dirty(struct mm_struct *mm,
953 pte_t *ptep)
15e86b0c 954{
b2fa47e6
MS
955 pgste_t pgste;
956 int dirty = 0;
957
958 if (mm_has_pgste(mm)) {
959 pgste = pgste_get_lock(ptep);
960 pgste = pgste_update_all(ptep, pgste);
0d0dafc1
MS
961 dirty = !!(pgste_val(pgste) & PGSTE_UC_BIT);
962 pgste_val(pgste) &= ~PGSTE_UC_BIT;
b2fa47e6
MS
963 pgste_set_unlock(ptep, pgste);
964 return dirty;
15e86b0c 965 }
15e86b0c
FF
966 return dirty;
967}
b2fa47e6
MS
968
969/*
970 * Get (and clear) the user referenced bit for a pte.
971 */
972static inline int ptep_test_and_clear_user_young(struct mm_struct *mm,
973 pte_t *ptep)
974{
975 pgste_t pgste;
976 int young = 0;
977
978 if (mm_has_pgste(mm)) {
979 pgste = pgste_get_lock(ptep);
980 pgste = pgste_update_young(ptep, pgste);
0d0dafc1
MS
981 young = !!(pgste_val(pgste) & PGSTE_UR_BIT);
982 pgste_val(pgste) &= ~PGSTE_UR_BIT;
b2fa47e6
MS
983 pgste_set_unlock(ptep, pgste);
984 }
985 return young;
986}
15e86b0c 987
ba8a9229
MS
988#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
989static inline int ptep_test_and_clear_young(struct vm_area_struct *vma,
990 unsigned long addr, pte_t *ptep)
1da177e4 991{
b2fa47e6
MS
992 pgste_t pgste;
993 pte_t pte;
994
995 if (mm_has_pgste(vma->vm_mm)) {
996 pgste = pgste_get_lock(ptep);
997 pgste = pgste_update_young(ptep, pgste);
998 pte = *ptep;
999 *ptep = pte_mkold(pte);
1000 pgste_set_unlock(ptep, pgste);
1001 return pte_young(pte);
1002 }
1da177e4
LT
1003 return 0;
1004}
1005
ba8a9229
MS
1006#define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
1007static inline int ptep_clear_flush_young(struct vm_area_struct *vma,
1008 unsigned long address, pte_t *ptep)
1da177e4 1009{
5b7baf05
CB
1010 /* No need to flush TLB
1011 * On s390 reference bits are in storage key and never in TLB
1012 * With virtualization we handle the reference bit, without we
1013 * we can simply return */
5b7baf05 1014 return ptep_test_and_clear_young(vma, address, ptep);
1da177e4
LT
1015}
1016
9282ed92 1017static inline void __ptep_ipte(unsigned long address, pte_t *ptep)
1da177e4 1018{
9282ed92 1019 if (!(pte_val(*ptep) & _PAGE_INVALID)) {
f4815ac6 1020#ifndef CONFIG_64BIT
146e4b3c 1021 /* pto must point to the start of the segment table */
1da177e4 1022 pte_t *pto = (pte_t *) (((unsigned long) ptep) & 0x7ffffc00);
9282ed92
GS
1023#else
1024 /* ipte in zarch mode can do the math */
1025 pte_t *pto = ptep;
1026#endif
94c12cc7
MS
1027 asm volatile(
1028 " ipte %2,%3"
1029 : "=m" (*ptep) : "m" (*ptep),
1030 "a" (pto), "a" (address));
1da177e4 1031 }
9282ed92
GS
1032}
1033
5c474a1e
MS
1034static inline void ptep_flush_lazy(struct mm_struct *mm,
1035 unsigned long address, pte_t *ptep)
1036{
1037 int active = (mm == current->active_mm) ? 1 : 0;
1038
1039 if (atomic_read(&mm->context.attach_count) > active)
1040 __ptep_ipte(address, ptep);
1041 else
1042 mm->context.flush_mm = 1;
1043}
1044
ba8a9229
MS
1045/*
1046 * This is hard to understand. ptep_get_and_clear and ptep_clear_flush
1047 * both clear the TLB for the unmapped pte. The reason is that
1048 * ptep_get_and_clear is used in common code (e.g. change_pte_range)
1049 * to modify an active pte. The sequence is
1050 * 1) ptep_get_and_clear
1051 * 2) set_pte_at
1052 * 3) flush_tlb_range
1053 * On s390 the tlb needs to get flushed with the modification of the pte
1054 * if the pte is active. The only way how this can be implemented is to
1055 * have ptep_get_and_clear do the tlb flush. In exchange flush_tlb_range
1056 * is a nop.
1057 */
1058#define __HAVE_ARCH_PTEP_GET_AND_CLEAR
b2fa47e6
MS
1059static inline pte_t ptep_get_and_clear(struct mm_struct *mm,
1060 unsigned long address, pte_t *ptep)
1061{
1062 pgste_t pgste;
1063 pte_t pte;
1064
d3383632 1065 if (mm_has_pgste(mm)) {
b2fa47e6 1066 pgste = pgste_get_lock(ptep);
d3383632
MS
1067 pgste = pgste_ipte_notify(mm, address, ptep, pgste);
1068 }
b2fa47e6
MS
1069
1070 pte = *ptep;
5c474a1e 1071 ptep_flush_lazy(mm, address, ptep);
e5098611 1072 pte_val(*ptep) = _PAGE_INVALID;
b2fa47e6
MS
1073
1074 if (mm_has_pgste(mm)) {
1075 pgste = pgste_update_all(&pte, pgste);
1076 pgste_set_unlock(ptep, pgste);
1077 }
1078 return pte;
1079}
1080
1081#define __HAVE_ARCH_PTEP_MODIFY_PROT_TRANSACTION
1082static inline pte_t ptep_modify_prot_start(struct mm_struct *mm,
1083 unsigned long address,
1084 pte_t *ptep)
1085{
d3383632 1086 pgste_t pgste;
b2fa47e6
MS
1087 pte_t pte;
1088
d3383632
MS
1089 if (mm_has_pgste(mm)) {
1090 pgste = pgste_get_lock(ptep);
1091 pgste_ipte_notify(mm, address, ptep, pgste);
1092 }
b2fa47e6
MS
1093
1094 pte = *ptep;
5c474a1e 1095 ptep_flush_lazy(mm, address, ptep);
0196c642 1096 pte_val(*ptep) |= _PAGE_INVALID;
b56433cb 1097
3a82603b 1098 if (mm_has_pgste(mm)) {
b56433cb 1099 pgste = pgste_update_all(&pte, pgste);
3a82603b
CB
1100 pgste_set(ptep, pgste);
1101 }
b2fa47e6
MS
1102 return pte;
1103}
1104
1105static inline void ptep_modify_prot_commit(struct mm_struct *mm,
1106 unsigned long address,
1107 pte_t *ptep, pte_t pte)
1108{
b56433cb
CB
1109 pgste_t pgste;
1110
abf09bed 1111 if (mm_has_pgste(mm)) {
d56c893d 1112 pgste = pgste_get(ptep);
b56433cb 1113 pgste_set_key(ptep, pgste, pte);
abf09bed 1114 pgste_set_pte(ptep, pte);
b56433cb 1115 pgste_set_unlock(ptep, pgste);
abf09bed
MS
1116 } else
1117 *ptep = pte;
b2fa47e6 1118}
ba8a9229
MS
1119
1120#define __HAVE_ARCH_PTEP_CLEAR_FLUSH
f0e47c22
MS
1121static inline pte_t ptep_clear_flush(struct vm_area_struct *vma,
1122 unsigned long address, pte_t *ptep)
1123{
b2fa47e6
MS
1124 pgste_t pgste;
1125 pte_t pte;
1126
d3383632 1127 if (mm_has_pgste(vma->vm_mm)) {
b2fa47e6 1128 pgste = pgste_get_lock(ptep);
d3383632
MS
1129 pgste = pgste_ipte_notify(vma->vm_mm, address, ptep, pgste);
1130 }
b2fa47e6
MS
1131
1132 pte = *ptep;
1133 __ptep_ipte(address, ptep);
e5098611 1134 pte_val(*ptep) = _PAGE_INVALID;
b2fa47e6
MS
1135
1136 if (mm_has_pgste(vma->vm_mm)) {
1137 pgste = pgste_update_all(&pte, pgste);
1138 pgste_set_unlock(ptep, pgste);
1139 }
1da177e4
LT
1140 return pte;
1141}
1142
ba8a9229
MS
1143/*
1144 * The batched pte unmap code uses ptep_get_and_clear_full to clear the
1145 * ptes. Here an optimization is possible. tlb_gather_mmu flushes all
1146 * tlbs of an mm if it can guarantee that the ptes of the mm_struct
1147 * cannot be accessed while the batched unmap is running. In this case
1148 * full==1 and a simple pte_clear is enough. See tlb.h.
1149 */
1150#define __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL
1151static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm,
b2fa47e6 1152 unsigned long address,
ba8a9229 1153 pte_t *ptep, int full)
1da177e4 1154{
b2fa47e6
MS
1155 pgste_t pgste;
1156 pte_t pte;
1157
a055f66a 1158 if (!full && mm_has_pgste(mm)) {
b2fa47e6 1159 pgste = pgste_get_lock(ptep);
a055f66a 1160 pgste = pgste_ipte_notify(mm, address, ptep, pgste);
d3383632 1161 }
ba8a9229 1162
b2fa47e6
MS
1163 pte = *ptep;
1164 if (!full)
5c474a1e 1165 ptep_flush_lazy(mm, address, ptep);
e5098611 1166 pte_val(*ptep) = _PAGE_INVALID;
b2fa47e6 1167
a055f66a 1168 if (!full && mm_has_pgste(mm)) {
b2fa47e6
MS
1169 pgste = pgste_update_all(&pte, pgste);
1170 pgste_set_unlock(ptep, pgste);
1171 }
ba8a9229 1172 return pte;
1da177e4
LT
1173}
1174
ba8a9229 1175#define __HAVE_ARCH_PTEP_SET_WRPROTECT
b2fa47e6
MS
1176static inline pte_t ptep_set_wrprotect(struct mm_struct *mm,
1177 unsigned long address, pte_t *ptep)
1178{
1179 pgste_t pgste;
1180 pte_t pte = *ptep;
1181
1182 if (pte_write(pte)) {
d3383632 1183 if (mm_has_pgste(mm)) {
b2fa47e6 1184 pgste = pgste_get_lock(ptep);
d3383632
MS
1185 pgste = pgste_ipte_notify(mm, address, ptep, pgste);
1186 }
b2fa47e6 1187
5c474a1e 1188 ptep_flush_lazy(mm, address, ptep);
abf09bed 1189 pte = pte_wrprotect(pte);
b2fa47e6 1190
abf09bed
MS
1191 if (mm_has_pgste(mm)) {
1192 pgste_set_pte(ptep, pte);
b2fa47e6 1193 pgste_set_unlock(ptep, pgste);
abf09bed
MS
1194 } else
1195 *ptep = pte;
b2fa47e6
MS
1196 }
1197 return pte;
1198}
ba8a9229
MS
1199
1200#define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
b2fa47e6
MS
1201static inline int ptep_set_access_flags(struct vm_area_struct *vma,
1202 unsigned long address, pte_t *ptep,
1203 pte_t entry, int dirty)
1204{
1205 pgste_t pgste;
1206
1207 if (pte_same(*ptep, entry))
1208 return 0;
d3383632 1209 if (mm_has_pgste(vma->vm_mm)) {
b2fa47e6 1210 pgste = pgste_get_lock(ptep);
d3383632
MS
1211 pgste = pgste_ipte_notify(vma->vm_mm, address, ptep, pgste);
1212 }
b2fa47e6
MS
1213
1214 __ptep_ipte(address, ptep);
b2fa47e6 1215
abf09bed
MS
1216 if (mm_has_pgste(vma->vm_mm)) {
1217 pgste_set_pte(ptep, entry);
b2fa47e6 1218 pgste_set_unlock(ptep, pgste);
abf09bed
MS
1219 } else
1220 *ptep = entry;
b2fa47e6
MS
1221 return 1;
1222}
1da177e4 1223
1da177e4
LT
1224/*
1225 * Conversion functions: convert a page and protection to a page entry,
1226 * and a page entry and page directory to the page they refer to.
1227 */
1228static inline pte_t mk_pte_phys(unsigned long physpage, pgprot_t pgprot)
1229{
1230 pte_t __pte;
1231 pte_val(__pte) = physpage + pgprot_val(pgprot);
1232 return __pte;
1233}
1234
2dcea57a
HC
1235static inline pte_t mk_pte(struct page *page, pgprot_t pgprot)
1236{
0b2b6e1d 1237 unsigned long physpage = page_to_phys(page);
abf09bed 1238 pte_t __pte = mk_pte_phys(physpage, pgprot);
1da177e4 1239
e5098611
MS
1240 if (pte_write(__pte) && PageDirty(page))
1241 __pte = pte_mkdirty(__pte);
abf09bed 1242 return __pte;
2dcea57a
HC
1243}
1244
190a1d72
MS
1245#define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1))
1246#define pud_index(address) (((address) >> PUD_SHIFT) & (PTRS_PER_PUD-1))
1247#define pmd_index(address) (((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1))
1248#define pte_index(address) (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE-1))
1da177e4 1249
190a1d72
MS
1250#define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
1251#define pgd_offset_k(address) pgd_offset(&init_mm, address)
1da177e4 1252
f4815ac6 1253#ifndef CONFIG_64BIT
1da177e4 1254
190a1d72
MS
1255#define pmd_deref(pmd) (pmd_val(pmd) & _SEGMENT_ENTRY_ORIGIN)
1256#define pud_deref(pmd) ({ BUG(); 0UL; })
1257#define pgd_deref(pmd) ({ BUG(); 0UL; })
46a82b2d 1258
190a1d72
MS
1259#define pud_offset(pgd, address) ((pud_t *) pgd)
1260#define pmd_offset(pud, address) ((pmd_t *) pud + pmd_index(address))
1da177e4 1261
f4815ac6 1262#else /* CONFIG_64BIT */
1da177e4 1263
190a1d72
MS
1264#define pmd_deref(pmd) (pmd_val(pmd) & _SEGMENT_ENTRY_ORIGIN)
1265#define pud_deref(pud) (pud_val(pud) & _REGION_ENTRY_ORIGIN)
5a216a20 1266#define pgd_deref(pgd) (pgd_val(pgd) & _REGION_ENTRY_ORIGIN)
1da177e4 1267
5a216a20
MS
1268static inline pud_t *pud_offset(pgd_t *pgd, unsigned long address)
1269{
6252d702
MS
1270 pud_t *pud = (pud_t *) pgd;
1271 if ((pgd_val(*pgd) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R2)
1272 pud = (pud_t *) pgd_deref(*pgd);
5a216a20
MS
1273 return pud + pud_index(address);
1274}
1da177e4 1275
190a1d72 1276static inline pmd_t *pmd_offset(pud_t *pud, unsigned long address)
1da177e4 1277{
6252d702
MS
1278 pmd_t *pmd = (pmd_t *) pud;
1279 if ((pud_val(*pud) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R3)
1280 pmd = (pmd_t *) pud_deref(*pud);
190a1d72 1281 return pmd + pmd_index(address);
1da177e4
LT
1282}
1283
f4815ac6 1284#endif /* CONFIG_64BIT */
1da177e4 1285
190a1d72
MS
1286#define pfn_pte(pfn,pgprot) mk_pte_phys(__pa((pfn) << PAGE_SHIFT),(pgprot))
1287#define pte_pfn(x) (pte_val(x) >> PAGE_SHIFT)
1288#define pte_page(x) pfn_to_page(pte_pfn(x))
1da177e4 1289
190a1d72 1290#define pmd_page(pmd) pfn_to_page(pmd_val(pmd) >> PAGE_SHIFT)
1da177e4 1291
190a1d72
MS
1292/* Find an entry in the lowest level page table.. */
1293#define pte_offset(pmd, addr) ((pte_t *) pmd_deref(*(pmd)) + pte_index(addr))
1294#define pte_offset_kernel(pmd, address) pte_offset(pmd,address)
1da177e4 1295#define pte_offset_map(pmd, address) pte_offset_kernel(pmd, address)
1da177e4 1296#define pte_unmap(pte) do { } while (0)
1da177e4 1297
1ae1c1d0
GS
1298static inline void __pmd_idte(unsigned long address, pmd_t *pmdp)
1299{
1300 unsigned long sto = (unsigned long) pmdp -
1301 pmd_index(address) * sizeof(pmd_t);
1302
e5098611 1303 if (!(pmd_val(*pmdp) & _SEGMENT_ENTRY_INVALID)) {
1ae1c1d0
GS
1304 asm volatile(
1305 " .insn rrf,0xb98e0000,%2,%3,0,0"
1306 : "=m" (*pmdp)
1307 : "m" (*pmdp), "a" (sto),
1308 "a" ((address & HPAGE_MASK))
1309 : "cc"
1310 );
1311 }
1312}
1313
e5098611
MS
1314static inline void __pmd_csp(pmd_t *pmdp)
1315{
1316 register unsigned long reg2 asm("2") = pmd_val(*pmdp);
1317 register unsigned long reg3 asm("3") = pmd_val(*pmdp) |
1318 _SEGMENT_ENTRY_INVALID;
1319 register unsigned long reg4 asm("4") = ((unsigned long) pmdp) + 5;
1320
1321 asm volatile(
1322 " csp %1,%3"
1323 : "=m" (*pmdp)
1324 : "d" (reg2), "d" (reg3), "d" (reg4), "m" (*pmdp) : "cc");
1325}
1326
106c992a 1327#if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_HUGETLB_PAGE)
1ae1c1d0
GS
1328static inline unsigned long massage_pgprot_pmd(pgprot_t pgprot)
1329{
d8e7a33d 1330 /*
e5098611 1331 * pgprot is PAGE_NONE, PAGE_READ, or PAGE_WRITE (see __Pxxx / __Sxxx)
d8e7a33d
GS
1332 * Convert to segment table entry format.
1333 */
1334 if (pgprot_val(pgprot) == pgprot_val(PAGE_NONE))
1335 return pgprot_val(SEGMENT_NONE);
e5098611
MS
1336 if (pgprot_val(pgprot) == pgprot_val(PAGE_READ))
1337 return pgprot_val(SEGMENT_READ);
1338 return pgprot_val(SEGMENT_WRITE);
1ae1c1d0
GS
1339}
1340
1341static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
1342{
1343 pmd_val(pmd) &= _SEGMENT_CHG_MASK;
1344 pmd_val(pmd) |= massage_pgprot_pmd(newprot);
1345 return pmd;
1346}
1347
106c992a 1348static inline pmd_t mk_pmd_phys(unsigned long physpage, pgprot_t pgprot)
1ae1c1d0 1349{
106c992a
GS
1350 pmd_t __pmd;
1351 pmd_val(__pmd) = physpage + massage_pgprot_pmd(pgprot);
1352 return __pmd;
1ae1c1d0
GS
1353}
1354
1355static inline pmd_t pmd_mkwrite(pmd_t pmd)
1356{
e5098611
MS
1357 /* Do not clobber PROT_NONE pages! */
1358 if (!(pmd_val(pmd) & _SEGMENT_ENTRY_INVALID))
1359 pmd_val(pmd) &= ~_SEGMENT_ENTRY_PROTECT;
1ae1c1d0
GS
1360 return pmd;
1361}
106c992a
GS
1362#endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLB_PAGE */
1363
1364#ifdef CONFIG_TRANSPARENT_HUGEPAGE
1365
1366#define __HAVE_ARCH_PGTABLE_DEPOSIT
6b0b50b0
AK
1367extern void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp,
1368 pgtable_t pgtable);
106c992a
GS
1369
1370#define __HAVE_ARCH_PGTABLE_WITHDRAW
6b0b50b0 1371extern pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp);
106c992a
GS
1372
1373static inline int pmd_trans_splitting(pmd_t pmd)
1374{
1375 return pmd_val(pmd) & _SEGMENT_ENTRY_SPLIT;
1376}
1377
1378static inline void set_pmd_at(struct mm_struct *mm, unsigned long addr,
1379 pmd_t *pmdp, pmd_t entry)
1380{
e5098611 1381 if (!(pmd_val(entry) & _SEGMENT_ENTRY_INVALID) && MACHINE_HAS_EDAT1)
106c992a
GS
1382 pmd_val(entry) |= _SEGMENT_ENTRY_CO;
1383 *pmdp = entry;
1384}
1385
1386static inline pmd_t pmd_mkhuge(pmd_t pmd)
1387{
1388 pmd_val(pmd) |= _SEGMENT_ENTRY_LARGE;
1389 return pmd;
1390}
1ae1c1d0
GS
1391
1392static inline pmd_t pmd_wrprotect(pmd_t pmd)
1393{
e5098611 1394 pmd_val(pmd) |= _SEGMENT_ENTRY_PROTECT;
1ae1c1d0
GS
1395 return pmd;
1396}
1397
1398static inline pmd_t pmd_mkdirty(pmd_t pmd)
1399{
1400 /* No dirty bit in the segment table entry. */
1401 return pmd;
1402}
1403
1404static inline pmd_t pmd_mkold(pmd_t pmd)
1405{
1406 /* No referenced bit in the segment table entry. */
1407 return pmd;
1408}
1409
1410static inline pmd_t pmd_mkyoung(pmd_t pmd)
1411{
1412 /* No referenced bit in the segment table entry. */
1413 return pmd;
1414}
1415
1416#define __HAVE_ARCH_PMDP_TEST_AND_CLEAR_YOUNG
1417static inline int pmdp_test_and_clear_young(struct vm_area_struct *vma,
1418 unsigned long address, pmd_t *pmdp)
1419{
1420 unsigned long pmd_addr = pmd_val(*pmdp) & HPAGE_MASK;
1421 long tmp, rc;
1422 int counter;
1423
1424 rc = 0;
1425 if (MACHINE_HAS_RRBM) {
1426 counter = PTRS_PER_PTE >> 6;
1427 asm volatile(
1428 "0: .insn rre,0xb9ae0000,%0,%3\n" /* rrbm */
1429 " ogr %1,%0\n"
1430 " la %3,0(%4,%3)\n"
1431 " brct %2,0b\n"
1432 : "=&d" (tmp), "+&d" (rc), "+d" (counter),
1433 "+a" (pmd_addr)
1434 : "a" (64 * 4096UL) : "cc");
1435 rc = !!rc;
1436 } else {
1437 counter = PTRS_PER_PTE;
1438 asm volatile(
1439 "0: rrbe 0,%2\n"
1440 " la %2,0(%3,%2)\n"
1441 " brc 12,1f\n"
1442 " lhi %0,1\n"
1443 "1: brct %1,0b\n"
1444 : "+d" (rc), "+d" (counter), "+a" (pmd_addr)
1445 : "a" (4096UL) : "cc");
1446 }
1447 return rc;
1448}
1449
1450#define __HAVE_ARCH_PMDP_GET_AND_CLEAR
1451static inline pmd_t pmdp_get_and_clear(struct mm_struct *mm,
1452 unsigned long address, pmd_t *pmdp)
1453{
1454 pmd_t pmd = *pmdp;
1455
1456 __pmd_idte(address, pmdp);
1457 pmd_clear(pmdp);
1458 return pmd;
1459}
1460
1461#define __HAVE_ARCH_PMDP_CLEAR_FLUSH
1462static inline pmd_t pmdp_clear_flush(struct vm_area_struct *vma,
1463 unsigned long address, pmd_t *pmdp)
1464{
1465 return pmdp_get_and_clear(vma->vm_mm, address, pmdp);
1466}
1467
1468#define __HAVE_ARCH_PMDP_INVALIDATE
1469static inline void pmdp_invalidate(struct vm_area_struct *vma,
1470 unsigned long address, pmd_t *pmdp)
1471{
1472 __pmd_idte(address, pmdp);
1473}
1474
be328650
GS
1475#define __HAVE_ARCH_PMDP_SET_WRPROTECT
1476static inline void pmdp_set_wrprotect(struct mm_struct *mm,
1477 unsigned long address, pmd_t *pmdp)
1478{
1479 pmd_t pmd = *pmdp;
1480
1481 if (pmd_write(pmd)) {
1482 __pmd_idte(address, pmdp);
1483 set_pmd_at(mm, address, pmdp, pmd_wrprotect(pmd));
1484 }
1485}
1486
1ae1c1d0
GS
1487#define pfn_pmd(pfn, pgprot) mk_pmd_phys(__pa((pfn) << PAGE_SHIFT), (pgprot))
1488#define mk_pmd(page, pgprot) pfn_pmd(page_to_pfn(page), (pgprot))
1489
1490static inline int pmd_trans_huge(pmd_t pmd)
1491{
1492 return pmd_val(pmd) & _SEGMENT_ENTRY_LARGE;
1493}
1494
1495static inline int has_transparent_hugepage(void)
1496{
1497 return MACHINE_HAS_HPAGE ? 1 : 0;
1498}
1499
1500static inline unsigned long pmd_pfn(pmd_t pmd)
1501{
171c4006 1502 return pmd_val(pmd) >> PAGE_SHIFT;
1ae1c1d0 1503}
75077afb
GS
1504#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
1505
1da177e4
LT
1506/*
1507 * 31 bit swap entry format:
1508 * A page-table entry has some bits we have to treat in a special way.
1509 * Bits 0, 20 and bit 23 have to be zero, otherwise an specification
1510 * exception will occur instead of a page translation exception. The
1511 * specifiation exception has the bad habit not to store necessary
1512 * information in the lowcore.
e5098611
MS
1513 * Bits 21, 22, 30 and 31 are used to indicate the page type.
1514 * A swap pte is indicated by bit pattern (pte & 0x603) == 0x402
1da177e4
LT
1515 * This leaves the bits 1-19 and bits 24-29 to store type and offset.
1516 * We use the 5 bits from 25-29 for the type and the 20 bits from 1-19
1517 * plus 24 for the offset.
1518 * 0| offset |0110|o|type |00|
1519 * 0 0000000001111111111 2222 2 22222 33
1520 * 0 1234567890123456789 0123 4 56789 01
1521 *
1522 * 64 bit swap entry format:
1523 * A page-table entry has some bits we have to treat in a special way.
1524 * Bits 52 and bit 55 have to be zero, otherwise an specification
1525 * exception will occur instead of a page translation exception. The
1526 * specifiation exception has the bad habit not to store necessary
1527 * information in the lowcore.
e5098611
MS
1528 * Bits 53, 54, 62 and 63 are used to indicate the page type.
1529 * A swap pte is indicated by bit pattern (pte & 0x603) == 0x402
1da177e4
LT
1530 * This leaves the bits 0-51 and bits 56-61 to store type and offset.
1531 * We use the 5 bits from 57-61 for the type and the 53 bits from 0-51
1532 * plus 56 for the offset.
1533 * | offset |0110|o|type |00|
1534 * 0000000000111111111122222222223333333333444444444455 5555 5 55566 66
1535 * 0123456789012345678901234567890123456789012345678901 2345 6 78901 23
1536 */
f4815ac6 1537#ifndef CONFIG_64BIT
1da177e4
LT
1538#define __SWP_OFFSET_MASK (~0UL >> 12)
1539#else
1540#define __SWP_OFFSET_MASK (~0UL >> 11)
1541#endif
4448aaf0 1542static inline pte_t mk_swap_pte(unsigned long type, unsigned long offset)
1da177e4
LT
1543{
1544 pte_t pte;
1545 offset &= __SWP_OFFSET_MASK;
e5098611 1546 pte_val(pte) = _PAGE_INVALID | _PAGE_TYPE | ((type & 0x1f) << 2) |
1da177e4
LT
1547 ((offset & 1UL) << 7) | ((offset & ~1UL) << 11);
1548 return pte;
1549}
1550
1551#define __swp_type(entry) (((entry).val >> 2) & 0x1f)
1552#define __swp_offset(entry) (((entry).val >> 11) | (((entry).val >> 7) & 1))
1553#define __swp_entry(type,offset) ((swp_entry_t) { pte_val(mk_swap_pte((type),(offset))) })
1554
1555#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
1556#define __swp_entry_to_pte(x) ((pte_t) { (x).val })
1557
f4815ac6 1558#ifndef CONFIG_64BIT
1da177e4 1559# define PTE_FILE_MAX_BITS 26
f4815ac6 1560#else /* CONFIG_64BIT */
1da177e4 1561# define PTE_FILE_MAX_BITS 59
f4815ac6 1562#endif /* CONFIG_64BIT */
1da177e4
LT
1563
1564#define pte_to_pgoff(__pte) \
1565 ((((__pte).pte >> 12) << 7) + (((__pte).pte >> 1) & 0x7f))
1566
1567#define pgoff_to_pte(__off) \
1568 ((pte_t) { ((((__off) & 0x7f) << 1) + (((__off) >> 7) << 12)) \
e5098611 1569 | _PAGE_INVALID | _PAGE_PROTECT })
1da177e4
LT
1570
1571#endif /* !__ASSEMBLY__ */
1572
1573#define kern_addr_valid(addr) (1)
1574
17f34580
HC
1575extern int vmem_add_mapping(unsigned long start, unsigned long size);
1576extern int vmem_remove_mapping(unsigned long start, unsigned long size);
402b0862 1577extern int s390_enable_sie(void);
f4eb07c1 1578
1da177e4
LT
1579/*
1580 * No page table caches to initialise
1581 */
765a0cac
HC
1582static inline void pgtable_cache_init(void) { }
1583static inline void check_pgt_cache(void) { }
1da177e4 1584
1da177e4
LT
1585#include <asm-generic/pgtable.h>
1586
1587#endif /* _S390_PAGE_H */