]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blob - arch/s390/include/asm/pgtable.h
pinctrl: sirf: move sgpio lock into state container
[mirror_ubuntu-zesty-kernel.git] / arch / s390 / include / asm / pgtable.h
1 /*
2 * S390 version
3 * Copyright IBM Corp. 1999, 2000
4 * Author(s): Hartmut Penner (hp@de.ibm.com)
5 * Ulrich Weigand (weigand@de.ibm.com)
6 * Martin Schwidefsky (schwidefsky@de.ibm.com)
7 *
8 * Derived from "include/asm-i386/pgtable.h"
9 */
10
11 #ifndef _ASM_S390_PGTABLE_H
12 #define _ASM_S390_PGTABLE_H
13
14 /*
15 * The Linux memory management assumes a three-level page table setup. For
16 * s390 31 bit we "fold" the mid level into the top-level page table, so
17 * that we physically have the same two-level page table as the s390 mmu
18 * expects in 31 bit mode. For s390 64 bit we use three of the five levels
19 * the hardware provides (region first and region second tables are not
20 * used).
21 *
22 * The "pgd_xxx()" functions are trivial for a folded two-level
23 * setup: the pgd is never bad, and a pmd always exists (as it's folded
24 * into the pgd entry)
25 *
26 * This file contains the functions and defines necessary to modify and use
27 * the S390 page table tree.
28 */
29 #ifndef __ASSEMBLY__
30 #include <linux/sched.h>
31 #include <linux/mm_types.h>
32 #include <linux/page-flags.h>
33 #include <linux/radix-tree.h>
34 #include <asm/bug.h>
35 #include <asm/page.h>
36
37 extern pgd_t swapper_pg_dir[] __attribute__ ((aligned (4096)));
38 extern void paging_init(void);
39 extern void vmem_map_init(void);
40
41 /*
42 * The S390 doesn't have any external MMU info: the kernel page
43 * tables contain all the necessary information.
44 */
45 #define update_mmu_cache(vma, address, ptep) do { } while (0)
46 #define update_mmu_cache_pmd(vma, address, ptep) do { } while (0)
47
48 /*
49 * ZERO_PAGE is a global shared page that is always zero; used
50 * for zero-mapped memory areas etc..
51 */
52
53 extern unsigned long empty_zero_page;
54 extern unsigned long zero_page_mask;
55
56 #define ZERO_PAGE(vaddr) \
57 (virt_to_page((void *)(empty_zero_page + \
58 (((unsigned long)(vaddr)) &zero_page_mask))))
59 #define __HAVE_COLOR_ZERO_PAGE
60
61 /* TODO: s390 cannot support io_remap_pfn_range... */
62 #endif /* !__ASSEMBLY__ */
63
64 /*
65 * PMD_SHIFT determines the size of the area a second-level page
66 * table can map
67 * PGDIR_SHIFT determines what a third-level page table entry can map
68 */
69 #ifndef CONFIG_64BIT
70 # define PMD_SHIFT 20
71 # define PUD_SHIFT 20
72 # define PGDIR_SHIFT 20
73 #else /* CONFIG_64BIT */
74 # define PMD_SHIFT 20
75 # define PUD_SHIFT 31
76 # define PGDIR_SHIFT 42
77 #endif /* CONFIG_64BIT */
78
79 #define PMD_SIZE (1UL << PMD_SHIFT)
80 #define PMD_MASK (~(PMD_SIZE-1))
81 #define PUD_SIZE (1UL << PUD_SHIFT)
82 #define PUD_MASK (~(PUD_SIZE-1))
83 #define PGDIR_SIZE (1UL << PGDIR_SHIFT)
84 #define PGDIR_MASK (~(PGDIR_SIZE-1))
85
86 /*
87 * entries per page directory level: the S390 is two-level, so
88 * we don't really have any PMD directory physically.
89 * for S390 segment-table entries are combined to one PGD
90 * that leads to 1024 pte per pgd
91 */
92 #define PTRS_PER_PTE 256
93 #ifndef CONFIG_64BIT
94 #define PTRS_PER_PMD 1
95 #define PTRS_PER_PUD 1
96 #else /* CONFIG_64BIT */
97 #define PTRS_PER_PMD 2048
98 #define PTRS_PER_PUD 2048
99 #endif /* CONFIG_64BIT */
100 #define PTRS_PER_PGD 2048
101
102 #define FIRST_USER_ADDRESS 0UL
103
104 #define pte_ERROR(e) \
105 printk("%s:%d: bad pte %p.\n", __FILE__, __LINE__, (void *) pte_val(e))
106 #define pmd_ERROR(e) \
107 printk("%s:%d: bad pmd %p.\n", __FILE__, __LINE__, (void *) pmd_val(e))
108 #define pud_ERROR(e) \
109 printk("%s:%d: bad pud %p.\n", __FILE__, __LINE__, (void *) pud_val(e))
110 #define pgd_ERROR(e) \
111 printk("%s:%d: bad pgd %p.\n", __FILE__, __LINE__, (void *) pgd_val(e))
112
113 #ifndef __ASSEMBLY__
114 /*
115 * The vmalloc and module area will always be on the topmost area of the kernel
116 * mapping. We reserve 96MB (31bit) / 128GB (64bit) for vmalloc and modules.
117 * On 64 bit kernels we have a 2GB area at the top of the vmalloc area where
118 * modules will reside. That makes sure that inter module branches always
119 * happen without trampolines and in addition the placement within a 2GB frame
120 * is branch prediction unit friendly.
121 */
122 extern unsigned long VMALLOC_START;
123 extern unsigned long VMALLOC_END;
124 extern struct page *vmemmap;
125
126 #define VMEM_MAX_PHYS ((unsigned long) vmemmap)
127
128 #ifdef CONFIG_64BIT
129 extern unsigned long MODULES_VADDR;
130 extern unsigned long MODULES_END;
131 #define MODULES_VADDR MODULES_VADDR
132 #define MODULES_END MODULES_END
133 #define MODULES_LEN (1UL << 31)
134 #endif
135
136 static inline int is_module_addr(void *addr)
137 {
138 #ifdef CONFIG_64BIT
139 BUILD_BUG_ON(MODULES_LEN > (1UL << 31));
140 if (addr < (void *)MODULES_VADDR)
141 return 0;
142 if (addr > (void *)MODULES_END)
143 return 0;
144 #endif
145 return 1;
146 }
147
148 /*
149 * A 31 bit pagetable entry of S390 has following format:
150 * | PFRA | | OS |
151 * 0 0IP0
152 * 00000000001111111111222222222233
153 * 01234567890123456789012345678901
154 *
155 * I Page-Invalid Bit: Page is not available for address-translation
156 * P Page-Protection Bit: Store access not possible for page
157 *
158 * A 31 bit segmenttable entry of S390 has following format:
159 * | P-table origin | |PTL
160 * 0 IC
161 * 00000000001111111111222222222233
162 * 01234567890123456789012345678901
163 *
164 * I Segment-Invalid Bit: Segment is not available for address-translation
165 * C Common-Segment Bit: Segment is not private (PoP 3-30)
166 * PTL Page-Table-Length: Page-table length (PTL+1*16 entries -> up to 256)
167 *
168 * The 31 bit segmenttable origin of S390 has following format:
169 *
170 * |S-table origin | | STL |
171 * X **GPS
172 * 00000000001111111111222222222233
173 * 01234567890123456789012345678901
174 *
175 * X Space-Switch event:
176 * G Segment-Invalid Bit: *
177 * P Private-Space Bit: Segment is not private (PoP 3-30)
178 * S Storage-Alteration:
179 * STL Segment-Table-Length: Segment-table length (STL+1*16 entries -> up to 2048)
180 *
181 * A 64 bit pagetable entry of S390 has following format:
182 * | PFRA |0IPC| OS |
183 * 0000000000111111111122222222223333333333444444444455555555556666
184 * 0123456789012345678901234567890123456789012345678901234567890123
185 *
186 * I Page-Invalid Bit: Page is not available for address-translation
187 * P Page-Protection Bit: Store access not possible for page
188 * C Change-bit override: HW is not required to set change bit
189 *
190 * A 64 bit segmenttable entry of S390 has following format:
191 * | P-table origin | TT
192 * 0000000000111111111122222222223333333333444444444455555555556666
193 * 0123456789012345678901234567890123456789012345678901234567890123
194 *
195 * I Segment-Invalid Bit: Segment is not available for address-translation
196 * C Common-Segment Bit: Segment is not private (PoP 3-30)
197 * P Page-Protection Bit: Store access not possible for page
198 * TT Type 00
199 *
200 * A 64 bit region table entry of S390 has following format:
201 * | S-table origin | TF TTTL
202 * 0000000000111111111122222222223333333333444444444455555555556666
203 * 0123456789012345678901234567890123456789012345678901234567890123
204 *
205 * I Segment-Invalid Bit: Segment is not available for address-translation
206 * TT Type 01
207 * TF
208 * TL Table length
209 *
210 * The 64 bit regiontable origin of S390 has following format:
211 * | region table origon | DTTL
212 * 0000000000111111111122222222223333333333444444444455555555556666
213 * 0123456789012345678901234567890123456789012345678901234567890123
214 *
215 * X Space-Switch event:
216 * G Segment-Invalid Bit:
217 * P Private-Space Bit:
218 * S Storage-Alteration:
219 * R Real space
220 * TL Table-Length:
221 *
222 * A storage key has the following format:
223 * | ACC |F|R|C|0|
224 * 0 3 4 5 6 7
225 * ACC: access key
226 * F : fetch protection bit
227 * R : referenced bit
228 * C : changed bit
229 */
230
231 /* Hardware bits in the page table entry */
232 #define _PAGE_PROTECT 0x200 /* HW read-only bit */
233 #define _PAGE_INVALID 0x400 /* HW invalid bit */
234 #define _PAGE_LARGE 0x800 /* Bit to mark a large pte */
235
236 /* Software bits in the page table entry */
237 #define _PAGE_PRESENT 0x001 /* SW pte present bit */
238 #define _PAGE_TYPE 0x002 /* SW pte type bit */
239 #define _PAGE_YOUNG 0x004 /* SW pte young bit */
240 #define _PAGE_DIRTY 0x008 /* SW pte dirty bit */
241 #define _PAGE_READ 0x010 /* SW pte read bit */
242 #define _PAGE_WRITE 0x020 /* SW pte write bit */
243 #define _PAGE_SPECIAL 0x040 /* SW associated with special page */
244 #define _PAGE_UNUSED 0x080 /* SW bit for pgste usage state */
245 #define __HAVE_ARCH_PTE_SPECIAL
246
247 /* Set of bits not changed in pte_modify */
248 #define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_SPECIAL | _PAGE_DIRTY | \
249 _PAGE_YOUNG)
250
251 /*
252 * handle_pte_fault uses pte_present and pte_none to find out the pte type
253 * WITHOUT holding the page table lock. The _PAGE_PRESENT bit is used to
254 * distinguish present from not-present ptes. It is changed only with the page
255 * table lock held.
256 *
257 * The following table gives the different possible bit combinations for
258 * the pte hardware and software bits in the last 12 bits of a pte:
259 *
260 * 842100000000
261 * 000084210000
262 * 000000008421
263 * .IR...wrdytp
264 * empty .10...000000
265 * swap .10...xxxx10
266 * file .11...xxxxx0
267 * prot-none, clean, old .11...000001
268 * prot-none, clean, young .11...000101
269 * prot-none, dirty, old .10...001001
270 * prot-none, dirty, young .10...001101
271 * read-only, clean, old .11...010001
272 * read-only, clean, young .01...010101
273 * read-only, dirty, old .11...011001
274 * read-only, dirty, young .01...011101
275 * read-write, clean, old .11...110001
276 * read-write, clean, young .01...110101
277 * read-write, dirty, old .10...111001
278 * read-write, dirty, young .00...111101
279 *
280 * pte_present is true for the bit pattern .xx...xxxxx1, (pte & 0x001) == 0x001
281 * pte_none is true for the bit pattern .10...xxxx00, (pte & 0x603) == 0x400
282 * pte_swap is true for the bit pattern .10...xxxx10, (pte & 0x603) == 0x402
283 */
284
285 #ifndef CONFIG_64BIT
286
287 /* Bits in the segment table address-space-control-element */
288 #define _ASCE_SPACE_SWITCH 0x80000000UL /* space switch event */
289 #define _ASCE_ORIGIN_MASK 0x7ffff000UL /* segment table origin */
290 #define _ASCE_PRIVATE_SPACE 0x100 /* private space control */
291 #define _ASCE_ALT_EVENT 0x80 /* storage alteration event control */
292 #define _ASCE_TABLE_LENGTH 0x7f /* 128 x 64 entries = 8k */
293
294 /* Bits in the segment table entry */
295 #define _SEGMENT_ENTRY_BITS 0x7fffffffUL /* Valid segment table bits */
296 #define _SEGMENT_ENTRY_ORIGIN 0x7fffffc0UL /* page table origin */
297 #define _SEGMENT_ENTRY_PROTECT 0x200 /* page protection bit */
298 #define _SEGMENT_ENTRY_INVALID 0x20 /* invalid segment table entry */
299 #define _SEGMENT_ENTRY_COMMON 0x10 /* common segment bit */
300 #define _SEGMENT_ENTRY_PTL 0x0f /* page table length */
301
302 #define _SEGMENT_ENTRY_DIRTY 0 /* No sw dirty bit for 31-bit */
303 #define _SEGMENT_ENTRY_YOUNG 0 /* No sw young bit for 31-bit */
304 #define _SEGMENT_ENTRY_READ 0 /* No sw read bit for 31-bit */
305 #define _SEGMENT_ENTRY_WRITE 0 /* No sw write bit for 31-bit */
306 #define _SEGMENT_ENTRY_LARGE 0 /* No large pages for 31-bit */
307 #define _SEGMENT_ENTRY_BITS_LARGE 0
308 #define _SEGMENT_ENTRY_ORIGIN_LARGE 0
309
310 #define _SEGMENT_ENTRY (_SEGMENT_ENTRY_PTL)
311 #define _SEGMENT_ENTRY_EMPTY (_SEGMENT_ENTRY_INVALID)
312
313 /*
314 * Segment table entry encoding (I = invalid, R = read-only bit):
315 * ..R...I.....
316 * prot-none ..1...1.....
317 * read-only ..1...0.....
318 * read-write ..0...0.....
319 * empty ..0...1.....
320 */
321
322 /* Page status table bits for virtualization */
323 #define PGSTE_ACC_BITS 0xf0000000UL
324 #define PGSTE_FP_BIT 0x08000000UL
325 #define PGSTE_PCL_BIT 0x00800000UL
326 #define PGSTE_HR_BIT 0x00400000UL
327 #define PGSTE_HC_BIT 0x00200000UL
328 #define PGSTE_GR_BIT 0x00040000UL
329 #define PGSTE_GC_BIT 0x00020000UL
330 #define PGSTE_UC_BIT 0x00008000UL /* user dirty (migration) */
331 #define PGSTE_IN_BIT 0x00004000UL /* IPTE notify bit */
332
333 #else /* CONFIG_64BIT */
334
335 /* Bits in the segment/region table address-space-control-element */
336 #define _ASCE_ORIGIN ~0xfffUL/* segment table origin */
337 #define _ASCE_PRIVATE_SPACE 0x100 /* private space control */
338 #define _ASCE_ALT_EVENT 0x80 /* storage alteration event control */
339 #define _ASCE_SPACE_SWITCH 0x40 /* space switch event */
340 #define _ASCE_REAL_SPACE 0x20 /* real space control */
341 #define _ASCE_TYPE_MASK 0x0c /* asce table type mask */
342 #define _ASCE_TYPE_REGION1 0x0c /* region first table type */
343 #define _ASCE_TYPE_REGION2 0x08 /* region second table type */
344 #define _ASCE_TYPE_REGION3 0x04 /* region third table type */
345 #define _ASCE_TYPE_SEGMENT 0x00 /* segment table type */
346 #define _ASCE_TABLE_LENGTH 0x03 /* region table length */
347
348 /* Bits in the region table entry */
349 #define _REGION_ENTRY_ORIGIN ~0xfffUL/* region/segment table origin */
350 #define _REGION_ENTRY_PROTECT 0x200 /* region protection bit */
351 #define _REGION_ENTRY_INVALID 0x20 /* invalid region table entry */
352 #define _REGION_ENTRY_TYPE_MASK 0x0c /* region/segment table type mask */
353 #define _REGION_ENTRY_TYPE_R1 0x0c /* region first table type */
354 #define _REGION_ENTRY_TYPE_R2 0x08 /* region second table type */
355 #define _REGION_ENTRY_TYPE_R3 0x04 /* region third table type */
356 #define _REGION_ENTRY_LENGTH 0x03 /* region third length */
357
358 #define _REGION1_ENTRY (_REGION_ENTRY_TYPE_R1 | _REGION_ENTRY_LENGTH)
359 #define _REGION1_ENTRY_EMPTY (_REGION_ENTRY_TYPE_R1 | _REGION_ENTRY_INVALID)
360 #define _REGION2_ENTRY (_REGION_ENTRY_TYPE_R2 | _REGION_ENTRY_LENGTH)
361 #define _REGION2_ENTRY_EMPTY (_REGION_ENTRY_TYPE_R2 | _REGION_ENTRY_INVALID)
362 #define _REGION3_ENTRY (_REGION_ENTRY_TYPE_R3 | _REGION_ENTRY_LENGTH)
363 #define _REGION3_ENTRY_EMPTY (_REGION_ENTRY_TYPE_R3 | _REGION_ENTRY_INVALID)
364
365 #define _REGION3_ENTRY_LARGE 0x400 /* RTTE-format control, large page */
366 #define _REGION3_ENTRY_RO 0x200 /* page protection bit */
367
368 /* Bits in the segment table entry */
369 #define _SEGMENT_ENTRY_BITS 0xfffffffffffffe33UL
370 #define _SEGMENT_ENTRY_BITS_LARGE 0xfffffffffff0ff33UL
371 #define _SEGMENT_ENTRY_ORIGIN_LARGE ~0xfffffUL /* large page address */
372 #define _SEGMENT_ENTRY_ORIGIN ~0x7ffUL/* segment table origin */
373 #define _SEGMENT_ENTRY_PROTECT 0x200 /* page protection bit */
374 #define _SEGMENT_ENTRY_INVALID 0x20 /* invalid segment table entry */
375
376 #define _SEGMENT_ENTRY (0)
377 #define _SEGMENT_ENTRY_EMPTY (_SEGMENT_ENTRY_INVALID)
378
379 #define _SEGMENT_ENTRY_DIRTY 0x2000 /* SW segment dirty bit */
380 #define _SEGMENT_ENTRY_YOUNG 0x1000 /* SW segment young bit */
381 #define _SEGMENT_ENTRY_SPLIT 0x0800 /* THP splitting bit */
382 #define _SEGMENT_ENTRY_LARGE 0x0400 /* STE-format control, large page */
383 #define _SEGMENT_ENTRY_READ 0x0002 /* SW segment read bit */
384 #define _SEGMENT_ENTRY_WRITE 0x0001 /* SW segment write bit */
385
386 /*
387 * Segment table entry encoding (R = read-only, I = invalid, y = young bit):
388 * dy..R...I...wr
389 * prot-none, clean, old 00..1...1...00
390 * prot-none, clean, young 01..1...1...00
391 * prot-none, dirty, old 10..1...1...00
392 * prot-none, dirty, young 11..1...1...00
393 * read-only, clean, old 00..1...1...01
394 * read-only, clean, young 01..1...0...01
395 * read-only, dirty, old 10..1...1...01
396 * read-only, dirty, young 11..1...0...01
397 * read-write, clean, old 00..1...1...11
398 * read-write, clean, young 01..1...0...11
399 * read-write, dirty, old 10..0...1...11
400 * read-write, dirty, young 11..0...0...11
401 * The segment table origin is used to distinguish empty (origin==0) from
402 * read-write, old segment table entries (origin!=0)
403 */
404
405 #define _SEGMENT_ENTRY_SPLIT_BIT 11 /* THP splitting bit number */
406
407 /* Page status table bits for virtualization */
408 #define PGSTE_ACC_BITS 0xf000000000000000UL
409 #define PGSTE_FP_BIT 0x0800000000000000UL
410 #define PGSTE_PCL_BIT 0x0080000000000000UL
411 #define PGSTE_HR_BIT 0x0040000000000000UL
412 #define PGSTE_HC_BIT 0x0020000000000000UL
413 #define PGSTE_GR_BIT 0x0004000000000000UL
414 #define PGSTE_GC_BIT 0x0002000000000000UL
415 #define PGSTE_UC_BIT 0x0000800000000000UL /* user dirty (migration) */
416 #define PGSTE_IN_BIT 0x0000400000000000UL /* IPTE notify bit */
417
418 #endif /* CONFIG_64BIT */
419
420 /* Guest Page State used for virtualization */
421 #define _PGSTE_GPS_ZERO 0x0000000080000000UL
422 #define _PGSTE_GPS_USAGE_MASK 0x0000000003000000UL
423 #define _PGSTE_GPS_USAGE_STABLE 0x0000000000000000UL
424 #define _PGSTE_GPS_USAGE_UNUSED 0x0000000001000000UL
425
426 /*
427 * A user page table pointer has the space-switch-event bit, the
428 * private-space-control bit and the storage-alteration-event-control
429 * bit set. A kernel page table pointer doesn't need them.
430 */
431 #define _ASCE_USER_BITS (_ASCE_SPACE_SWITCH | _ASCE_PRIVATE_SPACE | \
432 _ASCE_ALT_EVENT)
433
434 /*
435 * Page protection definitions.
436 */
437 #define PAGE_NONE __pgprot(_PAGE_PRESENT | _PAGE_INVALID)
438 #define PAGE_READ __pgprot(_PAGE_PRESENT | _PAGE_READ | \
439 _PAGE_INVALID | _PAGE_PROTECT)
440 #define PAGE_WRITE __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \
441 _PAGE_INVALID | _PAGE_PROTECT)
442
443 #define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \
444 _PAGE_YOUNG | _PAGE_DIRTY)
445 #define PAGE_KERNEL __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \
446 _PAGE_YOUNG | _PAGE_DIRTY)
447 #define PAGE_KERNEL_RO __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_YOUNG | \
448 _PAGE_PROTECT)
449
450 /*
451 * On s390 the page table entry has an invalid bit and a read-only bit.
452 * Read permission implies execute permission and write permission
453 * implies read permission.
454 */
455 /*xwr*/
456 #define __P000 PAGE_NONE
457 #define __P001 PAGE_READ
458 #define __P010 PAGE_READ
459 #define __P011 PAGE_READ
460 #define __P100 PAGE_READ
461 #define __P101 PAGE_READ
462 #define __P110 PAGE_READ
463 #define __P111 PAGE_READ
464
465 #define __S000 PAGE_NONE
466 #define __S001 PAGE_READ
467 #define __S010 PAGE_WRITE
468 #define __S011 PAGE_WRITE
469 #define __S100 PAGE_READ
470 #define __S101 PAGE_READ
471 #define __S110 PAGE_WRITE
472 #define __S111 PAGE_WRITE
473
474 /*
475 * Segment entry (large page) protection definitions.
476 */
477 #define SEGMENT_NONE __pgprot(_SEGMENT_ENTRY_INVALID | \
478 _SEGMENT_ENTRY_PROTECT)
479 #define SEGMENT_READ __pgprot(_SEGMENT_ENTRY_PROTECT | \
480 _SEGMENT_ENTRY_READ)
481 #define SEGMENT_WRITE __pgprot(_SEGMENT_ENTRY_READ | \
482 _SEGMENT_ENTRY_WRITE)
483
484 static inline int mm_has_pgste(struct mm_struct *mm)
485 {
486 #ifdef CONFIG_PGSTE
487 if (unlikely(mm->context.has_pgste))
488 return 1;
489 #endif
490 return 0;
491 }
492
493 /*
494 * In the case that a guest uses storage keys
495 * faults should no longer be backed by zero pages
496 */
497 #define mm_forbids_zeropage mm_use_skey
498 static inline int mm_use_skey(struct mm_struct *mm)
499 {
500 #ifdef CONFIG_PGSTE
501 if (mm->context.use_skey)
502 return 1;
503 #endif
504 return 0;
505 }
506
507 /*
508 * pgd/pmd/pte query functions
509 */
510 #ifndef CONFIG_64BIT
511
512 static inline int pgd_present(pgd_t pgd) { return 1; }
513 static inline int pgd_none(pgd_t pgd) { return 0; }
514 static inline int pgd_bad(pgd_t pgd) { return 0; }
515
516 static inline int pud_present(pud_t pud) { return 1; }
517 static inline int pud_none(pud_t pud) { return 0; }
518 static inline int pud_large(pud_t pud) { return 0; }
519 static inline int pud_bad(pud_t pud) { return 0; }
520
521 #else /* CONFIG_64BIT */
522
523 static inline int pgd_present(pgd_t pgd)
524 {
525 if ((pgd_val(pgd) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R2)
526 return 1;
527 return (pgd_val(pgd) & _REGION_ENTRY_ORIGIN) != 0UL;
528 }
529
530 static inline int pgd_none(pgd_t pgd)
531 {
532 if ((pgd_val(pgd) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R2)
533 return 0;
534 return (pgd_val(pgd) & _REGION_ENTRY_INVALID) != 0UL;
535 }
536
537 static inline int pgd_bad(pgd_t pgd)
538 {
539 /*
540 * With dynamic page table levels the pgd can be a region table
541 * entry or a segment table entry. Check for the bit that are
542 * invalid for either table entry.
543 */
544 unsigned long mask =
545 ~_SEGMENT_ENTRY_ORIGIN & ~_REGION_ENTRY_INVALID &
546 ~_REGION_ENTRY_TYPE_MASK & ~_REGION_ENTRY_LENGTH;
547 return (pgd_val(pgd) & mask) != 0;
548 }
549
550 static inline int pud_present(pud_t pud)
551 {
552 if ((pud_val(pud) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R3)
553 return 1;
554 return (pud_val(pud) & _REGION_ENTRY_ORIGIN) != 0UL;
555 }
556
557 static inline int pud_none(pud_t pud)
558 {
559 if ((pud_val(pud) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R3)
560 return 0;
561 return (pud_val(pud) & _REGION_ENTRY_INVALID) != 0UL;
562 }
563
564 static inline int pud_large(pud_t pud)
565 {
566 if ((pud_val(pud) & _REGION_ENTRY_TYPE_MASK) != _REGION_ENTRY_TYPE_R3)
567 return 0;
568 return !!(pud_val(pud) & _REGION3_ENTRY_LARGE);
569 }
570
571 static inline int pud_bad(pud_t pud)
572 {
573 /*
574 * With dynamic page table levels the pud can be a region table
575 * entry or a segment table entry. Check for the bit that are
576 * invalid for either table entry.
577 */
578 unsigned long mask =
579 ~_SEGMENT_ENTRY_ORIGIN & ~_REGION_ENTRY_INVALID &
580 ~_REGION_ENTRY_TYPE_MASK & ~_REGION_ENTRY_LENGTH;
581 return (pud_val(pud) & mask) != 0;
582 }
583
584 #endif /* CONFIG_64BIT */
585
586 static inline int pmd_present(pmd_t pmd)
587 {
588 return pmd_val(pmd) != _SEGMENT_ENTRY_INVALID;
589 }
590
591 static inline int pmd_none(pmd_t pmd)
592 {
593 return pmd_val(pmd) == _SEGMENT_ENTRY_INVALID;
594 }
595
596 static inline int pmd_large(pmd_t pmd)
597 {
598 return (pmd_val(pmd) & _SEGMENT_ENTRY_LARGE) != 0;
599 }
600
601 static inline int pmd_pfn(pmd_t pmd)
602 {
603 unsigned long origin_mask;
604
605 origin_mask = _SEGMENT_ENTRY_ORIGIN;
606 if (pmd_large(pmd))
607 origin_mask = _SEGMENT_ENTRY_ORIGIN_LARGE;
608 return (pmd_val(pmd) & origin_mask) >> PAGE_SHIFT;
609 }
610
611 static inline int pmd_bad(pmd_t pmd)
612 {
613 if (pmd_large(pmd))
614 return (pmd_val(pmd) & ~_SEGMENT_ENTRY_BITS_LARGE) != 0;
615 return (pmd_val(pmd) & ~_SEGMENT_ENTRY_BITS) != 0;
616 }
617
618 #define __HAVE_ARCH_PMDP_SPLITTING_FLUSH
619 extern void pmdp_splitting_flush(struct vm_area_struct *vma,
620 unsigned long addr, pmd_t *pmdp);
621
622 #define __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS
623 extern int pmdp_set_access_flags(struct vm_area_struct *vma,
624 unsigned long address, pmd_t *pmdp,
625 pmd_t entry, int dirty);
626
627 #define __HAVE_ARCH_PMDP_CLEAR_YOUNG_FLUSH
628 extern int pmdp_clear_flush_young(struct vm_area_struct *vma,
629 unsigned long address, pmd_t *pmdp);
630
631 #define __HAVE_ARCH_PMD_WRITE
632 static inline int pmd_write(pmd_t pmd)
633 {
634 return (pmd_val(pmd) & _SEGMENT_ENTRY_WRITE) != 0;
635 }
636
637 static inline int pmd_dirty(pmd_t pmd)
638 {
639 int dirty = 1;
640 if (pmd_large(pmd))
641 dirty = (pmd_val(pmd) & _SEGMENT_ENTRY_DIRTY) != 0;
642 return dirty;
643 }
644
645 static inline int pmd_young(pmd_t pmd)
646 {
647 int young = 1;
648 if (pmd_large(pmd))
649 young = (pmd_val(pmd) & _SEGMENT_ENTRY_YOUNG) != 0;
650 return young;
651 }
652
653 static inline int pte_present(pte_t pte)
654 {
655 /* Bit pattern: (pte & 0x001) == 0x001 */
656 return (pte_val(pte) & _PAGE_PRESENT) != 0;
657 }
658
659 static inline int pte_none(pte_t pte)
660 {
661 /* Bit pattern: pte == 0x400 */
662 return pte_val(pte) == _PAGE_INVALID;
663 }
664
665 static inline int pte_swap(pte_t pte)
666 {
667 /* Bit pattern: (pte & 0x603) == 0x402 */
668 return (pte_val(pte) & (_PAGE_INVALID | _PAGE_PROTECT |
669 _PAGE_TYPE | _PAGE_PRESENT))
670 == (_PAGE_INVALID | _PAGE_TYPE);
671 }
672
673 static inline int pte_special(pte_t pte)
674 {
675 return (pte_val(pte) & _PAGE_SPECIAL);
676 }
677
678 #define __HAVE_ARCH_PTE_SAME
679 static inline int pte_same(pte_t a, pte_t b)
680 {
681 return pte_val(a) == pte_val(b);
682 }
683
684 static inline pgste_t pgste_get_lock(pte_t *ptep)
685 {
686 unsigned long new = 0;
687 #ifdef CONFIG_PGSTE
688 unsigned long old;
689
690 preempt_disable();
691 asm(
692 " lg %0,%2\n"
693 "0: lgr %1,%0\n"
694 " nihh %0,0xff7f\n" /* clear PCL bit in old */
695 " oihh %1,0x0080\n" /* set PCL bit in new */
696 " csg %0,%1,%2\n"
697 " jl 0b\n"
698 : "=&d" (old), "=&d" (new), "=Q" (ptep[PTRS_PER_PTE])
699 : "Q" (ptep[PTRS_PER_PTE]) : "cc", "memory");
700 #endif
701 return __pgste(new);
702 }
703
704 static inline void pgste_set_unlock(pte_t *ptep, pgste_t pgste)
705 {
706 #ifdef CONFIG_PGSTE
707 asm(
708 " nihh %1,0xff7f\n" /* clear PCL bit */
709 " stg %1,%0\n"
710 : "=Q" (ptep[PTRS_PER_PTE])
711 : "d" (pgste_val(pgste)), "Q" (ptep[PTRS_PER_PTE])
712 : "cc", "memory");
713 preempt_enable();
714 #endif
715 }
716
717 static inline pgste_t pgste_get(pte_t *ptep)
718 {
719 unsigned long pgste = 0;
720 #ifdef CONFIG_PGSTE
721 pgste = *(unsigned long *)(ptep + PTRS_PER_PTE);
722 #endif
723 return __pgste(pgste);
724 }
725
726 static inline void pgste_set(pte_t *ptep, pgste_t pgste)
727 {
728 #ifdef CONFIG_PGSTE
729 *(pgste_t *)(ptep + PTRS_PER_PTE) = pgste;
730 #endif
731 }
732
733 static inline pgste_t pgste_update_all(pte_t *ptep, pgste_t pgste,
734 struct mm_struct *mm)
735 {
736 #ifdef CONFIG_PGSTE
737 unsigned long address, bits, skey;
738
739 if (!mm_use_skey(mm) || pte_val(*ptep) & _PAGE_INVALID)
740 return pgste;
741 address = pte_val(*ptep) & PAGE_MASK;
742 skey = (unsigned long) page_get_storage_key(address);
743 bits = skey & (_PAGE_CHANGED | _PAGE_REFERENCED);
744 /* Transfer page changed & referenced bit to guest bits in pgste */
745 pgste_val(pgste) |= bits << 48; /* GR bit & GC bit */
746 /* Copy page access key and fetch protection bit to pgste */
747 pgste_val(pgste) &= ~(PGSTE_ACC_BITS | PGSTE_FP_BIT);
748 pgste_val(pgste) |= (skey & (_PAGE_ACC_BITS | _PAGE_FP_BIT)) << 56;
749 #endif
750 return pgste;
751
752 }
753
754 static inline void pgste_set_key(pte_t *ptep, pgste_t pgste, pte_t entry,
755 struct mm_struct *mm)
756 {
757 #ifdef CONFIG_PGSTE
758 unsigned long address;
759 unsigned long nkey;
760
761 if (!mm_use_skey(mm) || pte_val(entry) & _PAGE_INVALID)
762 return;
763 VM_BUG_ON(!(pte_val(*ptep) & _PAGE_INVALID));
764 address = pte_val(entry) & PAGE_MASK;
765 /*
766 * Set page access key and fetch protection bit from pgste.
767 * The guest C/R information is still in the PGSTE, set real
768 * key C/R to 0.
769 */
770 nkey = (pgste_val(pgste) & (PGSTE_ACC_BITS | PGSTE_FP_BIT)) >> 56;
771 nkey |= (pgste_val(pgste) & (PGSTE_GR_BIT | PGSTE_GC_BIT)) >> 48;
772 page_set_storage_key(address, nkey, 0);
773 #endif
774 }
775
776 static inline pgste_t pgste_set_pte(pte_t *ptep, pgste_t pgste, pte_t entry)
777 {
778 if ((pte_val(entry) & _PAGE_PRESENT) &&
779 (pte_val(entry) & _PAGE_WRITE) &&
780 !(pte_val(entry) & _PAGE_INVALID)) {
781 if (!MACHINE_HAS_ESOP) {
782 /*
783 * Without enhanced suppression-on-protection force
784 * the dirty bit on for all writable ptes.
785 */
786 pte_val(entry) |= _PAGE_DIRTY;
787 pte_val(entry) &= ~_PAGE_PROTECT;
788 }
789 if (!(pte_val(entry) & _PAGE_PROTECT))
790 /* This pte allows write access, set user-dirty */
791 pgste_val(pgste) |= PGSTE_UC_BIT;
792 }
793 *ptep = entry;
794 return pgste;
795 }
796
797 /**
798 * struct gmap_struct - guest address space
799 * @crst_list: list of all crst tables used in the guest address space
800 * @mm: pointer to the parent mm_struct
801 * @guest_to_host: radix tree with guest to host address translation
802 * @host_to_guest: radix tree with pointer to segment table entries
803 * @guest_table_lock: spinlock to protect all entries in the guest page table
804 * @table: pointer to the page directory
805 * @asce: address space control element for gmap page table
806 * @pfault_enabled: defines if pfaults are applicable for the guest
807 */
808 struct gmap {
809 struct list_head list;
810 struct list_head crst_list;
811 struct mm_struct *mm;
812 struct radix_tree_root guest_to_host;
813 struct radix_tree_root host_to_guest;
814 spinlock_t guest_table_lock;
815 unsigned long *table;
816 unsigned long asce;
817 unsigned long asce_end;
818 void *private;
819 bool pfault_enabled;
820 };
821
822 /**
823 * struct gmap_notifier - notify function block for page invalidation
824 * @notifier_call: address of callback function
825 */
826 struct gmap_notifier {
827 struct list_head list;
828 void (*notifier_call)(struct gmap *gmap, unsigned long gaddr);
829 };
830
831 struct gmap *gmap_alloc(struct mm_struct *mm, unsigned long limit);
832 void gmap_free(struct gmap *gmap);
833 void gmap_enable(struct gmap *gmap);
834 void gmap_disable(struct gmap *gmap);
835 int gmap_map_segment(struct gmap *gmap, unsigned long from,
836 unsigned long to, unsigned long len);
837 int gmap_unmap_segment(struct gmap *gmap, unsigned long to, unsigned long len);
838 unsigned long __gmap_translate(struct gmap *, unsigned long gaddr);
839 unsigned long gmap_translate(struct gmap *, unsigned long gaddr);
840 int __gmap_link(struct gmap *gmap, unsigned long gaddr, unsigned long vmaddr);
841 int gmap_fault(struct gmap *, unsigned long gaddr, unsigned int fault_flags);
842 void gmap_discard(struct gmap *, unsigned long from, unsigned long to);
843 void __gmap_zap(struct gmap *, unsigned long gaddr);
844 bool gmap_test_and_clear_dirty(unsigned long address, struct gmap *);
845
846
847 void gmap_register_ipte_notifier(struct gmap_notifier *);
848 void gmap_unregister_ipte_notifier(struct gmap_notifier *);
849 int gmap_ipte_notify(struct gmap *, unsigned long start, unsigned long len);
850 void gmap_do_ipte_notify(struct mm_struct *, unsigned long addr, pte_t *);
851
852 static inline pgste_t pgste_ipte_notify(struct mm_struct *mm,
853 unsigned long addr,
854 pte_t *ptep, pgste_t pgste)
855 {
856 #ifdef CONFIG_PGSTE
857 if (pgste_val(pgste) & PGSTE_IN_BIT) {
858 pgste_val(pgste) &= ~PGSTE_IN_BIT;
859 gmap_do_ipte_notify(mm, addr, ptep);
860 }
861 #endif
862 return pgste;
863 }
864
865 /*
866 * Certain architectures need to do special things when PTEs
867 * within a page table are directly modified. Thus, the following
868 * hook is made available.
869 */
870 static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
871 pte_t *ptep, pte_t entry)
872 {
873 pgste_t pgste;
874
875 if (mm_has_pgste(mm)) {
876 pgste = pgste_get_lock(ptep);
877 pgste_val(pgste) &= ~_PGSTE_GPS_ZERO;
878 pgste_set_key(ptep, pgste, entry, mm);
879 pgste = pgste_set_pte(ptep, pgste, entry);
880 pgste_set_unlock(ptep, pgste);
881 } else {
882 *ptep = entry;
883 }
884 }
885
886 /*
887 * query functions pte_write/pte_dirty/pte_young only work if
888 * pte_present() is true. Undefined behaviour if not..
889 */
890 static inline int pte_write(pte_t pte)
891 {
892 return (pte_val(pte) & _PAGE_WRITE) != 0;
893 }
894
895 static inline int pte_dirty(pte_t pte)
896 {
897 return (pte_val(pte) & _PAGE_DIRTY) != 0;
898 }
899
900 static inline int pte_young(pte_t pte)
901 {
902 return (pte_val(pte) & _PAGE_YOUNG) != 0;
903 }
904
905 #define __HAVE_ARCH_PTE_UNUSED
906 static inline int pte_unused(pte_t pte)
907 {
908 return pte_val(pte) & _PAGE_UNUSED;
909 }
910
911 /*
912 * pgd/pmd/pte modification functions
913 */
914
915 static inline void pgd_clear(pgd_t *pgd)
916 {
917 #ifdef CONFIG_64BIT
918 if ((pgd_val(*pgd) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R2)
919 pgd_val(*pgd) = _REGION2_ENTRY_EMPTY;
920 #endif
921 }
922
923 static inline void pud_clear(pud_t *pud)
924 {
925 #ifdef CONFIG_64BIT
926 if ((pud_val(*pud) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R3)
927 pud_val(*pud) = _REGION3_ENTRY_EMPTY;
928 #endif
929 }
930
931 static inline void pmd_clear(pmd_t *pmdp)
932 {
933 pmd_val(*pmdp) = _SEGMENT_ENTRY_INVALID;
934 }
935
936 static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
937 {
938 pte_val(*ptep) = _PAGE_INVALID;
939 }
940
941 /*
942 * The following pte modification functions only work if
943 * pte_present() is true. Undefined behaviour if not..
944 */
945 static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
946 {
947 pte_val(pte) &= _PAGE_CHG_MASK;
948 pte_val(pte) |= pgprot_val(newprot);
949 /*
950 * newprot for PAGE_NONE, PAGE_READ and PAGE_WRITE has the
951 * invalid bit set, clear it again for readable, young pages
952 */
953 if ((pte_val(pte) & _PAGE_YOUNG) && (pte_val(pte) & _PAGE_READ))
954 pte_val(pte) &= ~_PAGE_INVALID;
955 /*
956 * newprot for PAGE_READ and PAGE_WRITE has the page protection
957 * bit set, clear it again for writable, dirty pages
958 */
959 if ((pte_val(pte) & _PAGE_DIRTY) && (pte_val(pte) & _PAGE_WRITE))
960 pte_val(pte) &= ~_PAGE_PROTECT;
961 return pte;
962 }
963
964 static inline pte_t pte_wrprotect(pte_t pte)
965 {
966 pte_val(pte) &= ~_PAGE_WRITE;
967 pte_val(pte) |= _PAGE_PROTECT;
968 return pte;
969 }
970
971 static inline pte_t pte_mkwrite(pte_t pte)
972 {
973 pte_val(pte) |= _PAGE_WRITE;
974 if (pte_val(pte) & _PAGE_DIRTY)
975 pte_val(pte) &= ~_PAGE_PROTECT;
976 return pte;
977 }
978
979 static inline pte_t pte_mkclean(pte_t pte)
980 {
981 pte_val(pte) &= ~_PAGE_DIRTY;
982 pte_val(pte) |= _PAGE_PROTECT;
983 return pte;
984 }
985
986 static inline pte_t pte_mkdirty(pte_t pte)
987 {
988 pte_val(pte) |= _PAGE_DIRTY;
989 if (pte_val(pte) & _PAGE_WRITE)
990 pte_val(pte) &= ~_PAGE_PROTECT;
991 return pte;
992 }
993
994 static inline pte_t pte_mkold(pte_t pte)
995 {
996 pte_val(pte) &= ~_PAGE_YOUNG;
997 pte_val(pte) |= _PAGE_INVALID;
998 return pte;
999 }
1000
1001 static inline pte_t pte_mkyoung(pte_t pte)
1002 {
1003 pte_val(pte) |= _PAGE_YOUNG;
1004 if (pte_val(pte) & _PAGE_READ)
1005 pte_val(pte) &= ~_PAGE_INVALID;
1006 return pte;
1007 }
1008
1009 static inline pte_t pte_mkspecial(pte_t pte)
1010 {
1011 pte_val(pte) |= _PAGE_SPECIAL;
1012 return pte;
1013 }
1014
1015 #ifdef CONFIG_HUGETLB_PAGE
1016 static inline pte_t pte_mkhuge(pte_t pte)
1017 {
1018 pte_val(pte) |= _PAGE_LARGE;
1019 return pte;
1020 }
1021 #endif
1022
1023 static inline void __ptep_ipte(unsigned long address, pte_t *ptep)
1024 {
1025 unsigned long pto = (unsigned long) ptep;
1026
1027 #ifndef CONFIG_64BIT
1028 /* pto in ESA mode must point to the start of the segment table */
1029 pto &= 0x7ffffc00;
1030 #endif
1031 /* Invalidation + global TLB flush for the pte */
1032 asm volatile(
1033 " ipte %2,%3"
1034 : "=m" (*ptep) : "m" (*ptep), "a" (pto), "a" (address));
1035 }
1036
1037 static inline void __ptep_ipte_local(unsigned long address, pte_t *ptep)
1038 {
1039 unsigned long pto = (unsigned long) ptep;
1040
1041 #ifndef CONFIG_64BIT
1042 /* pto in ESA mode must point to the start of the segment table */
1043 pto &= 0x7ffffc00;
1044 #endif
1045 /* Invalidation + local TLB flush for the pte */
1046 asm volatile(
1047 " .insn rrf,0xb2210000,%2,%3,0,1"
1048 : "=m" (*ptep) : "m" (*ptep), "a" (pto), "a" (address));
1049 }
1050
1051 static inline void __ptep_ipte_range(unsigned long address, int nr, pte_t *ptep)
1052 {
1053 unsigned long pto = (unsigned long) ptep;
1054
1055 #ifndef CONFIG_64BIT
1056 /* pto in ESA mode must point to the start of the segment table */
1057 pto &= 0x7ffffc00;
1058 #endif
1059 /* Invalidate a range of ptes + global TLB flush of the ptes */
1060 do {
1061 asm volatile(
1062 " .insn rrf,0xb2210000,%2,%0,%1,0"
1063 : "+a" (address), "+a" (nr) : "a" (pto) : "memory");
1064 } while (nr != 255);
1065 }
1066
1067 static inline void ptep_flush_direct(struct mm_struct *mm,
1068 unsigned long address, pte_t *ptep)
1069 {
1070 int active, count;
1071
1072 if (pte_val(*ptep) & _PAGE_INVALID)
1073 return;
1074 active = (mm == current->active_mm) ? 1 : 0;
1075 count = atomic_add_return(0x10000, &mm->context.attach_count);
1076 if (MACHINE_HAS_TLB_LC && (count & 0xffff) <= active &&
1077 cpumask_equal(mm_cpumask(mm), cpumask_of(smp_processor_id())))
1078 __ptep_ipte_local(address, ptep);
1079 else
1080 __ptep_ipte(address, ptep);
1081 atomic_sub(0x10000, &mm->context.attach_count);
1082 }
1083
1084 static inline void ptep_flush_lazy(struct mm_struct *mm,
1085 unsigned long address, pte_t *ptep)
1086 {
1087 int active, count;
1088
1089 if (pte_val(*ptep) & _PAGE_INVALID)
1090 return;
1091 active = (mm == current->active_mm) ? 1 : 0;
1092 count = atomic_add_return(0x10000, &mm->context.attach_count);
1093 if ((count & 0xffff) <= active) {
1094 pte_val(*ptep) |= _PAGE_INVALID;
1095 mm->context.flush_mm = 1;
1096 } else
1097 __ptep_ipte(address, ptep);
1098 atomic_sub(0x10000, &mm->context.attach_count);
1099 }
1100
1101 /*
1102 * Get (and clear) the user dirty bit for a pte.
1103 */
1104 static inline int ptep_test_and_clear_user_dirty(struct mm_struct *mm,
1105 unsigned long addr,
1106 pte_t *ptep)
1107 {
1108 pgste_t pgste;
1109 pte_t pte;
1110 int dirty;
1111
1112 if (!mm_has_pgste(mm))
1113 return 0;
1114 pgste = pgste_get_lock(ptep);
1115 dirty = !!(pgste_val(pgste) & PGSTE_UC_BIT);
1116 pgste_val(pgste) &= ~PGSTE_UC_BIT;
1117 pte = *ptep;
1118 if (dirty && (pte_val(pte) & _PAGE_PRESENT)) {
1119 pgste = pgste_ipte_notify(mm, addr, ptep, pgste);
1120 __ptep_ipte(addr, ptep);
1121 if (MACHINE_HAS_ESOP || !(pte_val(pte) & _PAGE_WRITE))
1122 pte_val(pte) |= _PAGE_PROTECT;
1123 else
1124 pte_val(pte) |= _PAGE_INVALID;
1125 *ptep = pte;
1126 }
1127 pgste_set_unlock(ptep, pgste);
1128 return dirty;
1129 }
1130
1131 #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
1132 static inline int ptep_test_and_clear_young(struct vm_area_struct *vma,
1133 unsigned long addr, pte_t *ptep)
1134 {
1135 pgste_t pgste;
1136 pte_t pte, oldpte;
1137 int young;
1138
1139 if (mm_has_pgste(vma->vm_mm)) {
1140 pgste = pgste_get_lock(ptep);
1141 pgste = pgste_ipte_notify(vma->vm_mm, addr, ptep, pgste);
1142 }
1143
1144 oldpte = pte = *ptep;
1145 ptep_flush_direct(vma->vm_mm, addr, ptep);
1146 young = pte_young(pte);
1147 pte = pte_mkold(pte);
1148
1149 if (mm_has_pgste(vma->vm_mm)) {
1150 pgste = pgste_update_all(&oldpte, pgste, vma->vm_mm);
1151 pgste = pgste_set_pte(ptep, pgste, pte);
1152 pgste_set_unlock(ptep, pgste);
1153 } else
1154 *ptep = pte;
1155
1156 return young;
1157 }
1158
1159 #define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
1160 static inline int ptep_clear_flush_young(struct vm_area_struct *vma,
1161 unsigned long address, pte_t *ptep)
1162 {
1163 return ptep_test_and_clear_young(vma, address, ptep);
1164 }
1165
1166 /*
1167 * This is hard to understand. ptep_get_and_clear and ptep_clear_flush
1168 * both clear the TLB for the unmapped pte. The reason is that
1169 * ptep_get_and_clear is used in common code (e.g. change_pte_range)
1170 * to modify an active pte. The sequence is
1171 * 1) ptep_get_and_clear
1172 * 2) set_pte_at
1173 * 3) flush_tlb_range
1174 * On s390 the tlb needs to get flushed with the modification of the pte
1175 * if the pte is active. The only way how this can be implemented is to
1176 * have ptep_get_and_clear do the tlb flush. In exchange flush_tlb_range
1177 * is a nop.
1178 */
1179 #define __HAVE_ARCH_PTEP_GET_AND_CLEAR
1180 static inline pte_t ptep_get_and_clear(struct mm_struct *mm,
1181 unsigned long address, pte_t *ptep)
1182 {
1183 pgste_t pgste;
1184 pte_t pte;
1185
1186 if (mm_has_pgste(mm)) {
1187 pgste = pgste_get_lock(ptep);
1188 pgste = pgste_ipte_notify(mm, address, ptep, pgste);
1189 }
1190
1191 pte = *ptep;
1192 ptep_flush_lazy(mm, address, ptep);
1193 pte_val(*ptep) = _PAGE_INVALID;
1194
1195 if (mm_has_pgste(mm)) {
1196 pgste = pgste_update_all(&pte, pgste, mm);
1197 pgste_set_unlock(ptep, pgste);
1198 }
1199 return pte;
1200 }
1201
1202 #define __HAVE_ARCH_PTEP_MODIFY_PROT_TRANSACTION
1203 static inline pte_t ptep_modify_prot_start(struct mm_struct *mm,
1204 unsigned long address,
1205 pte_t *ptep)
1206 {
1207 pgste_t pgste;
1208 pte_t pte;
1209
1210 if (mm_has_pgste(mm)) {
1211 pgste = pgste_get_lock(ptep);
1212 pgste_ipte_notify(mm, address, ptep, pgste);
1213 }
1214
1215 pte = *ptep;
1216 ptep_flush_lazy(mm, address, ptep);
1217
1218 if (mm_has_pgste(mm)) {
1219 pgste = pgste_update_all(&pte, pgste, mm);
1220 pgste_set(ptep, pgste);
1221 }
1222 return pte;
1223 }
1224
1225 static inline void ptep_modify_prot_commit(struct mm_struct *mm,
1226 unsigned long address,
1227 pte_t *ptep, pte_t pte)
1228 {
1229 pgste_t pgste;
1230
1231 if (mm_has_pgste(mm)) {
1232 pgste = pgste_get(ptep);
1233 pgste_set_key(ptep, pgste, pte, mm);
1234 pgste = pgste_set_pte(ptep, pgste, pte);
1235 pgste_set_unlock(ptep, pgste);
1236 } else
1237 *ptep = pte;
1238 }
1239
1240 #define __HAVE_ARCH_PTEP_CLEAR_FLUSH
1241 static inline pte_t ptep_clear_flush(struct vm_area_struct *vma,
1242 unsigned long address, pte_t *ptep)
1243 {
1244 pgste_t pgste;
1245 pte_t pte;
1246
1247 if (mm_has_pgste(vma->vm_mm)) {
1248 pgste = pgste_get_lock(ptep);
1249 pgste = pgste_ipte_notify(vma->vm_mm, address, ptep, pgste);
1250 }
1251
1252 pte = *ptep;
1253 ptep_flush_direct(vma->vm_mm, address, ptep);
1254 pte_val(*ptep) = _PAGE_INVALID;
1255
1256 if (mm_has_pgste(vma->vm_mm)) {
1257 if ((pgste_val(pgste) & _PGSTE_GPS_USAGE_MASK) ==
1258 _PGSTE_GPS_USAGE_UNUSED)
1259 pte_val(pte) |= _PAGE_UNUSED;
1260 pgste = pgste_update_all(&pte, pgste, vma->vm_mm);
1261 pgste_set_unlock(ptep, pgste);
1262 }
1263 return pte;
1264 }
1265
1266 /*
1267 * The batched pte unmap code uses ptep_get_and_clear_full to clear the
1268 * ptes. Here an optimization is possible. tlb_gather_mmu flushes all
1269 * tlbs of an mm if it can guarantee that the ptes of the mm_struct
1270 * cannot be accessed while the batched unmap is running. In this case
1271 * full==1 and a simple pte_clear is enough. See tlb.h.
1272 */
1273 #define __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL
1274 static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm,
1275 unsigned long address,
1276 pte_t *ptep, int full)
1277 {
1278 pgste_t pgste;
1279 pte_t pte;
1280
1281 if (!full && mm_has_pgste(mm)) {
1282 pgste = pgste_get_lock(ptep);
1283 pgste = pgste_ipte_notify(mm, address, ptep, pgste);
1284 }
1285
1286 pte = *ptep;
1287 if (!full)
1288 ptep_flush_lazy(mm, address, ptep);
1289 pte_val(*ptep) = _PAGE_INVALID;
1290
1291 if (!full && mm_has_pgste(mm)) {
1292 pgste = pgste_update_all(&pte, pgste, mm);
1293 pgste_set_unlock(ptep, pgste);
1294 }
1295 return pte;
1296 }
1297
1298 #define __HAVE_ARCH_PTEP_SET_WRPROTECT
1299 static inline pte_t ptep_set_wrprotect(struct mm_struct *mm,
1300 unsigned long address, pte_t *ptep)
1301 {
1302 pgste_t pgste;
1303 pte_t pte = *ptep;
1304
1305 if (pte_write(pte)) {
1306 if (mm_has_pgste(mm)) {
1307 pgste = pgste_get_lock(ptep);
1308 pgste = pgste_ipte_notify(mm, address, ptep, pgste);
1309 }
1310
1311 ptep_flush_lazy(mm, address, ptep);
1312 pte = pte_wrprotect(pte);
1313
1314 if (mm_has_pgste(mm)) {
1315 pgste = pgste_set_pte(ptep, pgste, pte);
1316 pgste_set_unlock(ptep, pgste);
1317 } else
1318 *ptep = pte;
1319 }
1320 return pte;
1321 }
1322
1323 #define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
1324 static inline int ptep_set_access_flags(struct vm_area_struct *vma,
1325 unsigned long address, pte_t *ptep,
1326 pte_t entry, int dirty)
1327 {
1328 pgste_t pgste;
1329
1330 if (pte_same(*ptep, entry))
1331 return 0;
1332 if (mm_has_pgste(vma->vm_mm)) {
1333 pgste = pgste_get_lock(ptep);
1334 pgste = pgste_ipte_notify(vma->vm_mm, address, ptep, pgste);
1335 }
1336
1337 ptep_flush_direct(vma->vm_mm, address, ptep);
1338
1339 if (mm_has_pgste(vma->vm_mm)) {
1340 pgste_set_key(ptep, pgste, entry, vma->vm_mm);
1341 pgste = pgste_set_pte(ptep, pgste, entry);
1342 pgste_set_unlock(ptep, pgste);
1343 } else
1344 *ptep = entry;
1345 return 1;
1346 }
1347
1348 /*
1349 * Conversion functions: convert a page and protection to a page entry,
1350 * and a page entry and page directory to the page they refer to.
1351 */
1352 static inline pte_t mk_pte_phys(unsigned long physpage, pgprot_t pgprot)
1353 {
1354 pte_t __pte;
1355 pte_val(__pte) = physpage + pgprot_val(pgprot);
1356 return pte_mkyoung(__pte);
1357 }
1358
1359 static inline pte_t mk_pte(struct page *page, pgprot_t pgprot)
1360 {
1361 unsigned long physpage = page_to_phys(page);
1362 pte_t __pte = mk_pte_phys(physpage, pgprot);
1363
1364 if (pte_write(__pte) && PageDirty(page))
1365 __pte = pte_mkdirty(__pte);
1366 return __pte;
1367 }
1368
1369 #define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1))
1370 #define pud_index(address) (((address) >> PUD_SHIFT) & (PTRS_PER_PUD-1))
1371 #define pmd_index(address) (((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1))
1372 #define pte_index(address) (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE-1))
1373
1374 #define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
1375 #define pgd_offset_k(address) pgd_offset(&init_mm, address)
1376
1377 #ifndef CONFIG_64BIT
1378
1379 #define pmd_deref(pmd) (pmd_val(pmd) & _SEGMENT_ENTRY_ORIGIN)
1380 #define pud_deref(pmd) ({ BUG(); 0UL; })
1381 #define pgd_deref(pmd) ({ BUG(); 0UL; })
1382
1383 #define pud_offset(pgd, address) ((pud_t *) pgd)
1384 #define pmd_offset(pud, address) ((pmd_t *) pud + pmd_index(address))
1385
1386 #else /* CONFIG_64BIT */
1387
1388 #define pmd_deref(pmd) (pmd_val(pmd) & _SEGMENT_ENTRY_ORIGIN)
1389 #define pud_deref(pud) (pud_val(pud) & _REGION_ENTRY_ORIGIN)
1390 #define pgd_deref(pgd) (pgd_val(pgd) & _REGION_ENTRY_ORIGIN)
1391
1392 static inline pud_t *pud_offset(pgd_t *pgd, unsigned long address)
1393 {
1394 pud_t *pud = (pud_t *) pgd;
1395 if ((pgd_val(*pgd) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R2)
1396 pud = (pud_t *) pgd_deref(*pgd);
1397 return pud + pud_index(address);
1398 }
1399
1400 static inline pmd_t *pmd_offset(pud_t *pud, unsigned long address)
1401 {
1402 pmd_t *pmd = (pmd_t *) pud;
1403 if ((pud_val(*pud) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R3)
1404 pmd = (pmd_t *) pud_deref(*pud);
1405 return pmd + pmd_index(address);
1406 }
1407
1408 #endif /* CONFIG_64BIT */
1409
1410 #define pfn_pte(pfn,pgprot) mk_pte_phys(__pa((pfn) << PAGE_SHIFT),(pgprot))
1411 #define pte_pfn(x) (pte_val(x) >> PAGE_SHIFT)
1412 #define pte_page(x) pfn_to_page(pte_pfn(x))
1413
1414 #define pmd_page(pmd) pfn_to_page(pmd_pfn(pmd))
1415
1416 /* Find an entry in the lowest level page table.. */
1417 #define pte_offset(pmd, addr) ((pte_t *) pmd_deref(*(pmd)) + pte_index(addr))
1418 #define pte_offset_kernel(pmd, address) pte_offset(pmd,address)
1419 #define pte_offset_map(pmd, address) pte_offset_kernel(pmd, address)
1420 #define pte_unmap(pte) do { } while (0)
1421
1422 #if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_HUGETLB_PAGE)
1423 static inline unsigned long massage_pgprot_pmd(pgprot_t pgprot)
1424 {
1425 /*
1426 * pgprot is PAGE_NONE, PAGE_READ, or PAGE_WRITE (see __Pxxx / __Sxxx)
1427 * Convert to segment table entry format.
1428 */
1429 if (pgprot_val(pgprot) == pgprot_val(PAGE_NONE))
1430 return pgprot_val(SEGMENT_NONE);
1431 if (pgprot_val(pgprot) == pgprot_val(PAGE_READ))
1432 return pgprot_val(SEGMENT_READ);
1433 return pgprot_val(SEGMENT_WRITE);
1434 }
1435
1436 static inline pmd_t pmd_wrprotect(pmd_t pmd)
1437 {
1438 pmd_val(pmd) &= ~_SEGMENT_ENTRY_WRITE;
1439 pmd_val(pmd) |= _SEGMENT_ENTRY_PROTECT;
1440 return pmd;
1441 }
1442
1443 static inline pmd_t pmd_mkwrite(pmd_t pmd)
1444 {
1445 pmd_val(pmd) |= _SEGMENT_ENTRY_WRITE;
1446 if (pmd_large(pmd) && !(pmd_val(pmd) & _SEGMENT_ENTRY_DIRTY))
1447 return pmd;
1448 pmd_val(pmd) &= ~_SEGMENT_ENTRY_PROTECT;
1449 return pmd;
1450 }
1451
1452 static inline pmd_t pmd_mkclean(pmd_t pmd)
1453 {
1454 if (pmd_large(pmd)) {
1455 pmd_val(pmd) &= ~_SEGMENT_ENTRY_DIRTY;
1456 pmd_val(pmd) |= _SEGMENT_ENTRY_PROTECT;
1457 }
1458 return pmd;
1459 }
1460
1461 static inline pmd_t pmd_mkdirty(pmd_t pmd)
1462 {
1463 if (pmd_large(pmd)) {
1464 pmd_val(pmd) |= _SEGMENT_ENTRY_DIRTY;
1465 if (pmd_val(pmd) & _SEGMENT_ENTRY_WRITE)
1466 pmd_val(pmd) &= ~_SEGMENT_ENTRY_PROTECT;
1467 }
1468 return pmd;
1469 }
1470
1471 static inline pmd_t pmd_mkyoung(pmd_t pmd)
1472 {
1473 if (pmd_large(pmd)) {
1474 pmd_val(pmd) |= _SEGMENT_ENTRY_YOUNG;
1475 if (pmd_val(pmd) & _SEGMENT_ENTRY_READ)
1476 pmd_val(pmd) &= ~_SEGMENT_ENTRY_INVALID;
1477 }
1478 return pmd;
1479 }
1480
1481 static inline pmd_t pmd_mkold(pmd_t pmd)
1482 {
1483 if (pmd_large(pmd)) {
1484 pmd_val(pmd) &= ~_SEGMENT_ENTRY_YOUNG;
1485 pmd_val(pmd) |= _SEGMENT_ENTRY_INVALID;
1486 }
1487 return pmd;
1488 }
1489
1490 static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
1491 {
1492 if (pmd_large(pmd)) {
1493 pmd_val(pmd) &= _SEGMENT_ENTRY_ORIGIN_LARGE |
1494 _SEGMENT_ENTRY_DIRTY | _SEGMENT_ENTRY_YOUNG |
1495 _SEGMENT_ENTRY_LARGE | _SEGMENT_ENTRY_SPLIT;
1496 pmd_val(pmd) |= massage_pgprot_pmd(newprot);
1497 if (!(pmd_val(pmd) & _SEGMENT_ENTRY_DIRTY))
1498 pmd_val(pmd) |= _SEGMENT_ENTRY_PROTECT;
1499 if (!(pmd_val(pmd) & _SEGMENT_ENTRY_YOUNG))
1500 pmd_val(pmd) |= _SEGMENT_ENTRY_INVALID;
1501 return pmd;
1502 }
1503 pmd_val(pmd) &= _SEGMENT_ENTRY_ORIGIN;
1504 pmd_val(pmd) |= massage_pgprot_pmd(newprot);
1505 return pmd;
1506 }
1507
1508 static inline pmd_t mk_pmd_phys(unsigned long physpage, pgprot_t pgprot)
1509 {
1510 pmd_t __pmd;
1511 pmd_val(__pmd) = physpage + massage_pgprot_pmd(pgprot);
1512 return __pmd;
1513 }
1514
1515 #endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLB_PAGE */
1516
1517 static inline void __pmdp_csp(pmd_t *pmdp)
1518 {
1519 register unsigned long reg2 asm("2") = pmd_val(*pmdp);
1520 register unsigned long reg3 asm("3") = pmd_val(*pmdp) |
1521 _SEGMENT_ENTRY_INVALID;
1522 register unsigned long reg4 asm("4") = ((unsigned long) pmdp) + 5;
1523
1524 asm volatile(
1525 " csp %1,%3"
1526 : "=m" (*pmdp)
1527 : "d" (reg2), "d" (reg3), "d" (reg4), "m" (*pmdp) : "cc");
1528 }
1529
1530 static inline void __pmdp_idte(unsigned long address, pmd_t *pmdp)
1531 {
1532 unsigned long sto;
1533
1534 sto = (unsigned long) pmdp - pmd_index(address) * sizeof(pmd_t);
1535 asm volatile(
1536 " .insn rrf,0xb98e0000,%2,%3,0,0"
1537 : "=m" (*pmdp)
1538 : "m" (*pmdp), "a" (sto), "a" ((address & HPAGE_MASK))
1539 : "cc" );
1540 }
1541
1542 static inline void __pmdp_idte_local(unsigned long address, pmd_t *pmdp)
1543 {
1544 unsigned long sto;
1545
1546 sto = (unsigned long) pmdp - pmd_index(address) * sizeof(pmd_t);
1547 asm volatile(
1548 " .insn rrf,0xb98e0000,%2,%3,0,1"
1549 : "=m" (*pmdp)
1550 : "m" (*pmdp), "a" (sto), "a" ((address & HPAGE_MASK))
1551 : "cc" );
1552 }
1553
1554 static inline void pmdp_flush_direct(struct mm_struct *mm,
1555 unsigned long address, pmd_t *pmdp)
1556 {
1557 int active, count;
1558
1559 if (pmd_val(*pmdp) & _SEGMENT_ENTRY_INVALID)
1560 return;
1561 if (!MACHINE_HAS_IDTE) {
1562 __pmdp_csp(pmdp);
1563 return;
1564 }
1565 active = (mm == current->active_mm) ? 1 : 0;
1566 count = atomic_add_return(0x10000, &mm->context.attach_count);
1567 if (MACHINE_HAS_TLB_LC && (count & 0xffff) <= active &&
1568 cpumask_equal(mm_cpumask(mm), cpumask_of(smp_processor_id())))
1569 __pmdp_idte_local(address, pmdp);
1570 else
1571 __pmdp_idte(address, pmdp);
1572 atomic_sub(0x10000, &mm->context.attach_count);
1573 }
1574
1575 static inline void pmdp_flush_lazy(struct mm_struct *mm,
1576 unsigned long address, pmd_t *pmdp)
1577 {
1578 int active, count;
1579
1580 if (pmd_val(*pmdp) & _SEGMENT_ENTRY_INVALID)
1581 return;
1582 active = (mm == current->active_mm) ? 1 : 0;
1583 count = atomic_add_return(0x10000, &mm->context.attach_count);
1584 if ((count & 0xffff) <= active) {
1585 pmd_val(*pmdp) |= _SEGMENT_ENTRY_INVALID;
1586 mm->context.flush_mm = 1;
1587 } else if (MACHINE_HAS_IDTE)
1588 __pmdp_idte(address, pmdp);
1589 else
1590 __pmdp_csp(pmdp);
1591 atomic_sub(0x10000, &mm->context.attach_count);
1592 }
1593
1594 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
1595
1596 #define __HAVE_ARCH_PGTABLE_DEPOSIT
1597 extern void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp,
1598 pgtable_t pgtable);
1599
1600 #define __HAVE_ARCH_PGTABLE_WITHDRAW
1601 extern pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp);
1602
1603 static inline int pmd_trans_splitting(pmd_t pmd)
1604 {
1605 return (pmd_val(pmd) & _SEGMENT_ENTRY_LARGE) &&
1606 (pmd_val(pmd) & _SEGMENT_ENTRY_SPLIT);
1607 }
1608
1609 static inline void set_pmd_at(struct mm_struct *mm, unsigned long addr,
1610 pmd_t *pmdp, pmd_t entry)
1611 {
1612 *pmdp = entry;
1613 }
1614
1615 static inline pmd_t pmd_mkhuge(pmd_t pmd)
1616 {
1617 pmd_val(pmd) |= _SEGMENT_ENTRY_LARGE;
1618 pmd_val(pmd) |= _SEGMENT_ENTRY_YOUNG;
1619 pmd_val(pmd) |= _SEGMENT_ENTRY_PROTECT;
1620 return pmd;
1621 }
1622
1623 #define __HAVE_ARCH_PMDP_TEST_AND_CLEAR_YOUNG
1624 static inline int pmdp_test_and_clear_young(struct vm_area_struct *vma,
1625 unsigned long address, pmd_t *pmdp)
1626 {
1627 pmd_t pmd;
1628
1629 pmd = *pmdp;
1630 pmdp_flush_direct(vma->vm_mm, address, pmdp);
1631 *pmdp = pmd_mkold(pmd);
1632 return pmd_young(pmd);
1633 }
1634
1635 #define __HAVE_ARCH_PMDP_GET_AND_CLEAR
1636 static inline pmd_t pmdp_get_and_clear(struct mm_struct *mm,
1637 unsigned long address, pmd_t *pmdp)
1638 {
1639 pmd_t pmd = *pmdp;
1640
1641 pmdp_flush_direct(mm, address, pmdp);
1642 pmd_clear(pmdp);
1643 return pmd;
1644 }
1645
1646 #define __HAVE_ARCH_PMDP_GET_AND_CLEAR_FULL
1647 static inline pmd_t pmdp_get_and_clear_full(struct mm_struct *mm,
1648 unsigned long address,
1649 pmd_t *pmdp, int full)
1650 {
1651 pmd_t pmd = *pmdp;
1652
1653 if (!full)
1654 pmdp_flush_lazy(mm, address, pmdp);
1655 pmd_clear(pmdp);
1656 return pmd;
1657 }
1658
1659 #define __HAVE_ARCH_PMDP_CLEAR_FLUSH
1660 static inline pmd_t pmdp_clear_flush(struct vm_area_struct *vma,
1661 unsigned long address, pmd_t *pmdp)
1662 {
1663 return pmdp_get_and_clear(vma->vm_mm, address, pmdp);
1664 }
1665
1666 #define __HAVE_ARCH_PMDP_INVALIDATE
1667 static inline void pmdp_invalidate(struct vm_area_struct *vma,
1668 unsigned long address, pmd_t *pmdp)
1669 {
1670 pmdp_flush_direct(vma->vm_mm, address, pmdp);
1671 }
1672
1673 #define __HAVE_ARCH_PMDP_SET_WRPROTECT
1674 static inline void pmdp_set_wrprotect(struct mm_struct *mm,
1675 unsigned long address, pmd_t *pmdp)
1676 {
1677 pmd_t pmd = *pmdp;
1678
1679 if (pmd_write(pmd)) {
1680 pmdp_flush_direct(mm, address, pmdp);
1681 set_pmd_at(mm, address, pmdp, pmd_wrprotect(pmd));
1682 }
1683 }
1684
1685 #define pfn_pmd(pfn, pgprot) mk_pmd_phys(__pa((pfn) << PAGE_SHIFT), (pgprot))
1686 #define mk_pmd(page, pgprot) pfn_pmd(page_to_pfn(page), (pgprot))
1687
1688 static inline int pmd_trans_huge(pmd_t pmd)
1689 {
1690 return pmd_val(pmd) & _SEGMENT_ENTRY_LARGE;
1691 }
1692
1693 static inline int has_transparent_hugepage(void)
1694 {
1695 return MACHINE_HAS_HPAGE ? 1 : 0;
1696 }
1697 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
1698
1699 /*
1700 * 31 bit swap entry format:
1701 * A page-table entry has some bits we have to treat in a special way.
1702 * Bits 0, 20 and bit 23 have to be zero, otherwise an specification
1703 * exception will occur instead of a page translation exception. The
1704 * specifiation exception has the bad habit not to store necessary
1705 * information in the lowcore.
1706 * Bits 21, 22, 30 and 31 are used to indicate the page type.
1707 * A swap pte is indicated by bit pattern (pte & 0x603) == 0x402
1708 * This leaves the bits 1-19 and bits 24-29 to store type and offset.
1709 * We use the 5 bits from 25-29 for the type and the 20 bits from 1-19
1710 * plus 24 for the offset.
1711 * 0| offset |0110|o|type |00|
1712 * 0 0000000001111111111 2222 2 22222 33
1713 * 0 1234567890123456789 0123 4 56789 01
1714 *
1715 * 64 bit swap entry format:
1716 * A page-table entry has some bits we have to treat in a special way.
1717 * Bits 52 and bit 55 have to be zero, otherwise an specification
1718 * exception will occur instead of a page translation exception. The
1719 * specifiation exception has the bad habit not to store necessary
1720 * information in the lowcore.
1721 * Bits 53, 54, 62 and 63 are used to indicate the page type.
1722 * A swap pte is indicated by bit pattern (pte & 0x603) == 0x402
1723 * This leaves the bits 0-51 and bits 56-61 to store type and offset.
1724 * We use the 5 bits from 57-61 for the type and the 53 bits from 0-51
1725 * plus 56 for the offset.
1726 * | offset |0110|o|type |00|
1727 * 0000000000111111111122222222223333333333444444444455 5555 5 55566 66
1728 * 0123456789012345678901234567890123456789012345678901 2345 6 78901 23
1729 */
1730 #ifndef CONFIG_64BIT
1731 #define __SWP_OFFSET_MASK (~0UL >> 12)
1732 #else
1733 #define __SWP_OFFSET_MASK (~0UL >> 11)
1734 #endif
1735 static inline pte_t mk_swap_pte(unsigned long type, unsigned long offset)
1736 {
1737 pte_t pte;
1738 offset &= __SWP_OFFSET_MASK;
1739 pte_val(pte) = _PAGE_INVALID | _PAGE_TYPE | ((type & 0x1f) << 2) |
1740 ((offset & 1UL) << 7) | ((offset & ~1UL) << 11);
1741 return pte;
1742 }
1743
1744 #define __swp_type(entry) (((entry).val >> 2) & 0x1f)
1745 #define __swp_offset(entry) (((entry).val >> 11) | (((entry).val >> 7) & 1))
1746 #define __swp_entry(type,offset) ((swp_entry_t) { pte_val(mk_swap_pte((type),(offset))) })
1747
1748 #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
1749 #define __swp_entry_to_pte(x) ((pte_t) { (x).val })
1750
1751 #endif /* !__ASSEMBLY__ */
1752
1753 #define kern_addr_valid(addr) (1)
1754
1755 extern int vmem_add_mapping(unsigned long start, unsigned long size);
1756 extern int vmem_remove_mapping(unsigned long start, unsigned long size);
1757 extern int s390_enable_sie(void);
1758 extern int s390_enable_skey(void);
1759 extern void s390_reset_cmma(struct mm_struct *mm);
1760
1761 /* s390 has a private copy of get unmapped area to deal with cache synonyms */
1762 #define HAVE_ARCH_UNMAPPED_AREA
1763 #define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
1764
1765 /*
1766 * No page table caches to initialise
1767 */
1768 static inline void pgtable_cache_init(void) { }
1769 static inline void check_pgt_cache(void) { }
1770
1771 #include <asm-generic/pgtable.h>
1772
1773 #endif /* _S390_PAGE_H */