]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - arch/powerpc/include/asm/book3s/64/hash.h
mtd: nand: atmel: Relax tADL_min constraint
[mirror_ubuntu-artful-kernel.git] / arch / powerpc / include / asm / book3s / 64 / hash.h
1 #ifndef _ASM_POWERPC_BOOK3S_64_HASH_H
2 #define _ASM_POWERPC_BOOK3S_64_HASH_H
3 #ifdef __KERNEL__
4
5 /*
6 * Common bits between 4K and 64K pages in a linux-style PTE.
7 * Additional bits may be defined in pgtable-hash64-*.h
8 *
9 */
10 #define H_PTE_NONE_MASK _PAGE_HPTEFLAGS
11 #define H_PAGE_F_GIX_SHIFT 56
12 #define H_PAGE_BUSY _RPAGE_RSV1 /* software: PTE & hash are busy */
13 #define H_PAGE_F_SECOND _RPAGE_RSV2 /* HPTE is in 2ndary HPTEG */
14 #define H_PAGE_F_GIX (_RPAGE_RSV3 | _RPAGE_RSV4 | _RPAGE_RPN44)
15 #define H_PAGE_HASHPTE _RPAGE_RPN43 /* PTE has associated HPTE */
16
17 #ifdef CONFIG_PPC_64K_PAGES
18 #include <asm/book3s/64/hash-64k.h>
19 #else
20 #include <asm/book3s/64/hash-4k.h>
21 #endif
22
23 /*
24 * Size of EA range mapped by our pagetables.
25 */
26 #define H_PGTABLE_EADDR_SIZE (H_PTE_INDEX_SIZE + H_PMD_INDEX_SIZE + \
27 H_PUD_INDEX_SIZE + H_PGD_INDEX_SIZE + PAGE_SHIFT)
28 #define H_PGTABLE_RANGE (ASM_CONST(1) << H_PGTABLE_EADDR_SIZE)
29
30 #if defined(CONFIG_TRANSPARENT_HUGEPAGE) && defined(CONFIG_PPC_64K_PAGES)
31 /*
32 * only with hash 64k we need to use the second half of pmd page table
33 * to store pointer to deposited pgtable_t
34 */
35 #define H_PMD_CACHE_INDEX (H_PMD_INDEX_SIZE + 1)
36 #else
37 #define H_PMD_CACHE_INDEX H_PMD_INDEX_SIZE
38 #endif
39 /*
40 * Define the address range of the kernel non-linear virtual area
41 */
42 #define H_KERN_VIRT_START ASM_CONST(0xD000000000000000)
43 #define H_KERN_VIRT_SIZE ASM_CONST(0x0000100000000000)
44
45 /*
46 * The vmalloc space starts at the beginning of that region, and
47 * occupies half of it on hash CPUs and a quarter of it on Book3E
48 * (we keep a quarter for the virtual memmap)
49 */
50 #define H_VMALLOC_START H_KERN_VIRT_START
51 #define H_VMALLOC_SIZE (H_KERN_VIRT_SIZE >> 1)
52 #define H_VMALLOC_END (H_VMALLOC_START + H_VMALLOC_SIZE)
53
54 /*
55 * Region IDs
56 */
57 #define REGION_SHIFT 60UL
58 #define REGION_MASK (0xfUL << REGION_SHIFT)
59 #define REGION_ID(ea) (((unsigned long)(ea)) >> REGION_SHIFT)
60
61 #define VMALLOC_REGION_ID (REGION_ID(H_VMALLOC_START))
62 #define KERNEL_REGION_ID (REGION_ID(PAGE_OFFSET))
63 #define VMEMMAP_REGION_ID (0xfUL) /* Server only */
64 #define USER_REGION_ID (0UL)
65
66 /*
67 * Defines the address of the vmemap area, in its own region on
68 * hash table CPUs.
69 */
70 #define H_VMEMMAP_BASE (VMEMMAP_REGION_ID << REGION_SHIFT)
71
72 #ifdef CONFIG_PPC_MM_SLICES
73 #define HAVE_ARCH_UNMAPPED_AREA
74 #define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
75 #endif /* CONFIG_PPC_MM_SLICES */
76
77
78 /* PTEIDX nibble */
79 #define _PTEIDX_SECONDARY 0x8
80 #define _PTEIDX_GROUP_IX 0x7
81
82 #define H_PMD_BAD_BITS (PTE_TABLE_SIZE-1)
83 #define H_PUD_BAD_BITS (PMD_TABLE_SIZE-1)
84
85 #ifndef __ASSEMBLY__
86 #define hash__pmd_bad(pmd) (pmd_val(pmd) & H_PMD_BAD_BITS)
87 #define hash__pud_bad(pud) (pud_val(pud) & H_PUD_BAD_BITS)
88 static inline int hash__pgd_bad(pgd_t pgd)
89 {
90 return (pgd_val(pgd) == 0);
91 }
92 #ifdef CONFIG_STRICT_KERNEL_RWX
93 extern void hash__mark_rodata_ro(void);
94 #endif
95
96 extern void hpte_need_flush(struct mm_struct *mm, unsigned long addr,
97 pte_t *ptep, unsigned long pte, int huge);
98 extern unsigned long htab_convert_pte_flags(unsigned long pteflags);
99 /* Atomic PTE updates */
100 static inline unsigned long hash__pte_update(struct mm_struct *mm,
101 unsigned long addr,
102 pte_t *ptep, unsigned long clr,
103 unsigned long set,
104 int huge)
105 {
106 __be64 old_be, tmp_be;
107 unsigned long old;
108
109 __asm__ __volatile__(
110 "1: ldarx %0,0,%3 # pte_update\n\
111 and. %1,%0,%6\n\
112 bne- 1b \n\
113 andc %1,%0,%4 \n\
114 or %1,%1,%7\n\
115 stdcx. %1,0,%3 \n\
116 bne- 1b"
117 : "=&r" (old_be), "=&r" (tmp_be), "=m" (*ptep)
118 : "r" (ptep), "r" (cpu_to_be64(clr)), "m" (*ptep),
119 "r" (cpu_to_be64(H_PAGE_BUSY)), "r" (cpu_to_be64(set))
120 : "cc" );
121 /* huge pages use the old page table lock */
122 if (!huge)
123 assert_pte_locked(mm, addr);
124
125 old = be64_to_cpu(old_be);
126 if (old & H_PAGE_HASHPTE)
127 hpte_need_flush(mm, addr, ptep, old, huge);
128
129 return old;
130 }
131
132 /* Set the dirty and/or accessed bits atomically in a linux PTE, this
133 * function doesn't need to flush the hash entry
134 */
135 static inline void hash__ptep_set_access_flags(pte_t *ptep, pte_t entry)
136 {
137 __be64 old, tmp, val, mask;
138
139 mask = cpu_to_be64(_PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_READ | _PAGE_WRITE |
140 _PAGE_EXEC | _PAGE_SOFT_DIRTY);
141
142 val = pte_raw(entry) & mask;
143
144 __asm__ __volatile__(
145 "1: ldarx %0,0,%4\n\
146 and. %1,%0,%6\n\
147 bne- 1b \n\
148 or %0,%3,%0\n\
149 stdcx. %0,0,%4\n\
150 bne- 1b"
151 :"=&r" (old), "=&r" (tmp), "=m" (*ptep)
152 :"r" (val), "r" (ptep), "m" (*ptep), "r" (cpu_to_be64(H_PAGE_BUSY))
153 :"cc");
154 }
155
156 static inline int hash__pte_same(pte_t pte_a, pte_t pte_b)
157 {
158 return (((pte_raw(pte_a) ^ pte_raw(pte_b)) & ~cpu_to_be64(_PAGE_HPTEFLAGS)) == 0);
159 }
160
161 static inline int hash__pte_none(pte_t pte)
162 {
163 return (pte_val(pte) & ~H_PTE_NONE_MASK) == 0;
164 }
165
166 /* This low level function performs the actual PTE insertion
167 * Setting the PTE depends on the MMU type and other factors. It's
168 * an horrible mess that I'm not going to try to clean up now but
169 * I'm keeping it in one place rather than spread around
170 */
171 static inline void hash__set_pte_at(struct mm_struct *mm, unsigned long addr,
172 pte_t *ptep, pte_t pte, int percpu)
173 {
174 /*
175 * Anything else just stores the PTE normally. That covers all 64-bit
176 * cases, and 32-bit non-hash with 32-bit PTEs.
177 */
178 *ptep = pte;
179 }
180
181 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
182 extern void hpte_do_hugepage_flush(struct mm_struct *mm, unsigned long addr,
183 pmd_t *pmdp, unsigned long old_pmd);
184 #else
185 static inline void hpte_do_hugepage_flush(struct mm_struct *mm,
186 unsigned long addr, pmd_t *pmdp,
187 unsigned long old_pmd)
188 {
189 WARN(1, "%s called with THP disabled\n", __func__);
190 }
191 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
192
193
194 extern int hash__map_kernel_page(unsigned long ea, unsigned long pa,
195 unsigned long flags);
196 extern int __meminit hash__vmemmap_create_mapping(unsigned long start,
197 unsigned long page_size,
198 unsigned long phys);
199 extern void hash__vmemmap_remove_mapping(unsigned long start,
200 unsigned long page_size);
201
202 int hash__create_section_mapping(unsigned long start, unsigned long end);
203 int hash__remove_section_mapping(unsigned long start, unsigned long end);
204
205 #endif /* !__ASSEMBLY__ */
206 #endif /* __KERNEL__ */
207 #endif /* _ASM_POWERPC_BOOK3S_64_HASH_H */