]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - arch/powerpc/mm/slb.c
Merge branches 'release', 'asus', 'sony-laptop' and 'thinkpad' into release
[mirror_ubuntu-artful-kernel.git] / arch / powerpc / mm / slb.c
1 /*
2 * PowerPC64 SLB support.
3 *
4 * Copyright (C) 2004 David Gibson <dwg@au.ibm.com>, IBM
5 * Based on earlier code writteh by:
6 * Dave Engebretsen and Mike Corrigan {engebret|mikejc}@us.ibm.com
7 * Copyright (c) 2001 Dave Engebretsen
8 * Copyright (C) 2002 Anton Blanchard <anton@au.ibm.com>, IBM
9 *
10 *
11 * This program is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU General Public License
13 * as published by the Free Software Foundation; either version
14 * 2 of the License, or (at your option) any later version.
15 */
16
17 #undef DEBUG
18
19 #include <asm/pgtable.h>
20 #include <asm/mmu.h>
21 #include <asm/mmu_context.h>
22 #include <asm/paca.h>
23 #include <asm/cputable.h>
24 #include <asm/cacheflush.h>
25 #include <asm/smp.h>
26 #include <asm/firmware.h>
27 #include <linux/compiler.h>
28 #include <asm/udbg.h>
29
30 #ifdef DEBUG
31 #define DBG(fmt...) udbg_printf(fmt)
32 #else
33 #define DBG(fmt...)
34 #endif
35
36 extern void slb_allocate_realmode(unsigned long ea);
37 extern void slb_allocate_user(unsigned long ea);
38
39 static void slb_allocate(unsigned long ea)
40 {
41 /* Currently, we do real mode for all SLBs including user, but
42 * that will change if we bring back dynamic VSIDs
43 */
44 slb_allocate_realmode(ea);
45 }
46
47 static inline unsigned long mk_esid_data(unsigned long ea, int ssize,
48 unsigned long slot)
49 {
50 unsigned long mask;
51
52 mask = (ssize == MMU_SEGSIZE_256M)? ESID_MASK: ESID_MASK_1T;
53 return (ea & mask) | SLB_ESID_V | slot;
54 }
55
56 #define slb_vsid_shift(ssize) \
57 ((ssize) == MMU_SEGSIZE_256M? SLB_VSID_SHIFT: SLB_VSID_SHIFT_1T)
58
59 static inline unsigned long mk_vsid_data(unsigned long ea, int ssize,
60 unsigned long flags)
61 {
62 return (get_kernel_vsid(ea, ssize) << slb_vsid_shift(ssize)) | flags |
63 ((unsigned long) ssize << SLB_VSID_SSIZE_SHIFT);
64 }
65
66 static inline void slb_shadow_update(unsigned long ea, int ssize,
67 unsigned long flags,
68 unsigned long entry)
69 {
70 /*
71 * Clear the ESID first so the entry is not valid while we are
72 * updating it. No write barriers are needed here, provided
73 * we only update the current CPU's SLB shadow buffer.
74 */
75 get_slb_shadow()->save_area[entry].esid = 0;
76 get_slb_shadow()->save_area[entry].vsid = mk_vsid_data(ea, ssize, flags);
77 get_slb_shadow()->save_area[entry].esid = mk_esid_data(ea, ssize, entry);
78 }
79
80 static inline void slb_shadow_clear(unsigned long entry)
81 {
82 get_slb_shadow()->save_area[entry].esid = 0;
83 }
84
85 static inline void create_shadowed_slbe(unsigned long ea, int ssize,
86 unsigned long flags,
87 unsigned long entry)
88 {
89 /*
90 * Updating the shadow buffer before writing the SLB ensures
91 * we don't get a stale entry here if we get preempted by PHYP
92 * between these two statements.
93 */
94 slb_shadow_update(ea, ssize, flags, entry);
95
96 asm volatile("slbmte %0,%1" :
97 : "r" (mk_vsid_data(ea, ssize, flags)),
98 "r" (mk_esid_data(ea, ssize, entry))
99 : "memory" );
100 }
101
102 void slb_flush_and_rebolt(void)
103 {
104 /* If you change this make sure you change SLB_NUM_BOLTED
105 * appropriately too. */
106 unsigned long linear_llp, vmalloc_llp, lflags, vflags;
107 unsigned long ksp_esid_data, ksp_vsid_data;
108
109 WARN_ON(!irqs_disabled());
110
111 linear_llp = mmu_psize_defs[mmu_linear_psize].sllp;
112 vmalloc_llp = mmu_psize_defs[mmu_vmalloc_psize].sllp;
113 lflags = SLB_VSID_KERNEL | linear_llp;
114 vflags = SLB_VSID_KERNEL | vmalloc_llp;
115
116 ksp_esid_data = mk_esid_data(get_paca()->kstack, mmu_kernel_ssize, 2);
117 if ((ksp_esid_data & ~0xfffffffUL) <= PAGE_OFFSET) {
118 ksp_esid_data &= ~SLB_ESID_V;
119 ksp_vsid_data = 0;
120 slb_shadow_clear(2);
121 } else {
122 /* Update stack entry; others don't change */
123 slb_shadow_update(get_paca()->kstack, mmu_kernel_ssize, lflags, 2);
124 ksp_vsid_data = get_slb_shadow()->save_area[2].vsid;
125 }
126
127 /* We need to do this all in asm, so we're sure we don't touch
128 * the stack between the slbia and rebolting it. */
129 asm volatile("isync\n"
130 "slbia\n"
131 /* Slot 1 - first VMALLOC segment */
132 "slbmte %0,%1\n"
133 /* Slot 2 - kernel stack */
134 "slbmte %2,%3\n"
135 "isync"
136 :: "r"(mk_vsid_data(VMALLOC_START, mmu_kernel_ssize, vflags)),
137 "r"(mk_esid_data(VMALLOC_START, mmu_kernel_ssize, 1)),
138 "r"(ksp_vsid_data),
139 "r"(ksp_esid_data)
140 : "memory");
141 }
142
143 void slb_vmalloc_update(void)
144 {
145 unsigned long vflags;
146
147 vflags = SLB_VSID_KERNEL | mmu_psize_defs[mmu_vmalloc_psize].sllp;
148 slb_shadow_update(VMALLOC_START, mmu_kernel_ssize, vflags, 1);
149 slb_flush_and_rebolt();
150 }
151
152 /* Helper function to compare esids. There are four cases to handle.
153 * 1. The system is not 1T segment size capable. Use the GET_ESID compare.
154 * 2. The system is 1T capable, both addresses are < 1T, use the GET_ESID compare.
155 * 3. The system is 1T capable, only one of the two addresses is > 1T. This is not a match.
156 * 4. The system is 1T capable, both addresses are > 1T, use the GET_ESID_1T macro to compare.
157 */
158 static inline int esids_match(unsigned long addr1, unsigned long addr2)
159 {
160 int esid_1t_count;
161
162 /* System is not 1T segment size capable. */
163 if (!cpu_has_feature(CPU_FTR_1T_SEGMENT))
164 return (GET_ESID(addr1) == GET_ESID(addr2));
165
166 esid_1t_count = (((addr1 >> SID_SHIFT_1T) != 0) +
167 ((addr2 >> SID_SHIFT_1T) != 0));
168
169 /* both addresses are < 1T */
170 if (esid_1t_count == 0)
171 return (GET_ESID(addr1) == GET_ESID(addr2));
172
173 /* One address < 1T, the other > 1T. Not a match */
174 if (esid_1t_count == 1)
175 return 0;
176
177 /* Both addresses are > 1T. */
178 return (GET_ESID_1T(addr1) == GET_ESID_1T(addr2));
179 }
180
181 /* Flush all user entries from the segment table of the current processor. */
182 void switch_slb(struct task_struct *tsk, struct mm_struct *mm)
183 {
184 unsigned long offset = get_paca()->slb_cache_ptr;
185 unsigned long slbie_data = 0;
186 unsigned long pc = KSTK_EIP(tsk);
187 unsigned long stack = KSTK_ESP(tsk);
188 unsigned long unmapped_base;
189
190 if (!cpu_has_feature(CPU_FTR_NO_SLBIE_B) &&
191 offset <= SLB_CACHE_ENTRIES) {
192 int i;
193 asm volatile("isync" : : : "memory");
194 for (i = 0; i < offset; i++) {
195 slbie_data = (unsigned long)get_paca()->slb_cache[i]
196 << SID_SHIFT; /* EA */
197 slbie_data |= user_segment_size(slbie_data)
198 << SLBIE_SSIZE_SHIFT;
199 slbie_data |= SLBIE_C; /* C set for user addresses */
200 asm volatile("slbie %0" : : "r" (slbie_data));
201 }
202 asm volatile("isync" : : : "memory");
203 } else {
204 slb_flush_and_rebolt();
205 }
206
207 /* Workaround POWER5 < DD2.1 issue */
208 if (offset == 1 || offset > SLB_CACHE_ENTRIES)
209 asm volatile("slbie %0" : : "r" (slbie_data));
210
211 get_paca()->slb_cache_ptr = 0;
212 get_paca()->context = mm->context;
213
214 /*
215 * preload some userspace segments into the SLB.
216 */
217 if (test_tsk_thread_flag(tsk, TIF_32BIT))
218 unmapped_base = TASK_UNMAPPED_BASE_USER32;
219 else
220 unmapped_base = TASK_UNMAPPED_BASE_USER64;
221
222 if (is_kernel_addr(pc))
223 return;
224 slb_allocate(pc);
225
226 if (esids_match(pc,stack))
227 return;
228
229 if (is_kernel_addr(stack))
230 return;
231 slb_allocate(stack);
232
233 if (esids_match(pc,unmapped_base) || esids_match(stack,unmapped_base))
234 return;
235
236 if (is_kernel_addr(unmapped_base))
237 return;
238 slb_allocate(unmapped_base);
239 }
240
241 static inline void patch_slb_encoding(unsigned int *insn_addr,
242 unsigned int immed)
243 {
244 /* Assume the instruction had a "0" immediate value, just
245 * "or" in the new value
246 */
247 *insn_addr |= immed;
248 flush_icache_range((unsigned long)insn_addr, 4+
249 (unsigned long)insn_addr);
250 }
251
252 void slb_initialize(void)
253 {
254 unsigned long linear_llp, vmalloc_llp, io_llp;
255 unsigned long lflags, vflags;
256 static int slb_encoding_inited;
257 extern unsigned int *slb_miss_kernel_load_linear;
258 extern unsigned int *slb_miss_kernel_load_io;
259 extern unsigned int *slb_compare_rr_to_size;
260
261 /* Prepare our SLB miss handler based on our page size */
262 linear_llp = mmu_psize_defs[mmu_linear_psize].sllp;
263 io_llp = mmu_psize_defs[mmu_io_psize].sllp;
264 vmalloc_llp = mmu_psize_defs[mmu_vmalloc_psize].sllp;
265 get_paca()->vmalloc_sllp = SLB_VSID_KERNEL | vmalloc_llp;
266
267 if (!slb_encoding_inited) {
268 slb_encoding_inited = 1;
269 patch_slb_encoding(slb_miss_kernel_load_linear,
270 SLB_VSID_KERNEL | linear_llp);
271 patch_slb_encoding(slb_miss_kernel_load_io,
272 SLB_VSID_KERNEL | io_llp);
273 patch_slb_encoding(slb_compare_rr_to_size,
274 mmu_slb_size);
275
276 DBG("SLB: linear LLP = %04x\n", linear_llp);
277 DBG("SLB: io LLP = %04x\n", io_llp);
278 }
279
280 get_paca()->stab_rr = SLB_NUM_BOLTED;
281
282 /* On iSeries the bolted entries have already been set up by
283 * the hypervisor from the lparMap data in head.S */
284 if (firmware_has_feature(FW_FEATURE_ISERIES))
285 return;
286
287 lflags = SLB_VSID_KERNEL | linear_llp;
288 vflags = SLB_VSID_KERNEL | vmalloc_llp;
289
290 /* Invalidate the entire SLB (even slot 0) & all the ERATS */
291 asm volatile("isync":::"memory");
292 asm volatile("slbmte %0,%0"::"r" (0) : "memory");
293 asm volatile("isync; slbia; isync":::"memory");
294 create_shadowed_slbe(PAGE_OFFSET, mmu_kernel_ssize, lflags, 0);
295
296 create_shadowed_slbe(VMALLOC_START, mmu_kernel_ssize, vflags, 1);
297
298 slb_shadow_clear(2);
299
300 /* We don't bolt the stack for the time being - we're in boot,
301 * so the stack is in the bolted segment. By the time it goes
302 * elsewhere, we'll call _switch() which will bolt in the new
303 * one. */
304 asm volatile("isync":::"memory");
305 }