]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - arch/mips/math-emu/dsemul.c
Merge tag 'efi-urgent' of git://git.kernel.org/pub/scm/linux/kernel/git/efi/efi into...
[mirror_ubuntu-artful-kernel.git] / arch / mips / math-emu / dsemul.c
1 #include <linux/err.h>
2 #include <linux/slab.h>
3 #include <linux/mm_types.h>
4 #include <linux/sched/task.h>
5
6 #include <asm/branch.h>
7 #include <asm/cacheflush.h>
8 #include <asm/fpu_emulator.h>
9 #include <asm/inst.h>
10 #include <asm/mipsregs.h>
11 #include <linux/uaccess.h>
12
13 /**
14 * struct emuframe - The 'emulation' frame structure
15 * @emul: The instruction to 'emulate'.
16 * @badinst: A break instruction to cause a return to the kernel.
17 *
18 * This structure defines the frames placed within the delay slot emulation
19 * page in response to a call to mips_dsemul(). Each thread may be allocated
20 * only one frame at any given time. The kernel stores within it the
21 * instruction to be 'emulated' followed by a break instruction, then
22 * executes the frame in user mode. The break causes a trap to the kernel
23 * which leads to do_dsemulret() being called unless the instruction in
24 * @emul causes a trap itself, is a branch, or a signal is delivered to
25 * the thread. In these cases the allocated frame will either be reused by
26 * a subsequent delay slot 'emulation', or be freed during signal delivery or
27 * upon thread exit.
28 *
29 * This approach is used because:
30 *
31 * - Actually emulating all instructions isn't feasible. We would need to
32 * be able to handle instructions from all revisions of the MIPS ISA,
33 * all ASEs & all vendor instruction set extensions. This would be a
34 * whole lot of work & continual maintenance burden as new instructions
35 * are introduced, and in the case of some vendor extensions may not
36 * even be possible. Thus we need to take the approach of actually
37 * executing the instruction.
38 *
39 * - We must execute the instruction within user context. If we were to
40 * execute the instruction in kernel mode then it would have access to
41 * kernel resources without very careful checks, leaving us with a
42 * high potential for security or stability issues to arise.
43 *
44 * - We used to place the frame on the users stack, but this requires
45 * that the stack be executable. This is bad for security so the
46 * per-process page is now used instead.
47 *
48 * - The instruction in @emul may be something entirely invalid for a
49 * delay slot. The user may (intentionally or otherwise) place a branch
50 * in a delay slot, or a kernel mode instruction, or something else
51 * which generates an exception. Thus we can't rely upon the break in
52 * @badinst always being hit. For this reason we track the index of the
53 * frame allocated to each thread, allowing us to clean it up at later
54 * points such as signal delivery or thread exit.
55 *
56 * - The user may generate a fake struct emuframe if they wish, invoking
57 * the BRK_MEMU break instruction themselves. We must therefore not
58 * trust that BRK_MEMU means there's actually a valid frame allocated
59 * to the thread, and must not allow the user to do anything they
60 * couldn't already.
61 */
62 struct emuframe {
63 mips_instruction emul;
64 mips_instruction badinst;
65 };
66
67 static const int emupage_frame_count = PAGE_SIZE / sizeof(struct emuframe);
68
69 static inline __user struct emuframe *dsemul_page(void)
70 {
71 return (__user struct emuframe *)STACK_TOP;
72 }
73
74 static int alloc_emuframe(void)
75 {
76 mm_context_t *mm_ctx = &current->mm->context;
77 int idx;
78
79 retry:
80 spin_lock(&mm_ctx->bd_emupage_lock);
81
82 /* Ensure we have an allocation bitmap */
83 if (!mm_ctx->bd_emupage_allocmap) {
84 mm_ctx->bd_emupage_allocmap =
85 kcalloc(BITS_TO_LONGS(emupage_frame_count),
86 sizeof(unsigned long),
87 GFP_ATOMIC);
88
89 if (!mm_ctx->bd_emupage_allocmap) {
90 idx = BD_EMUFRAME_NONE;
91 goto out_unlock;
92 }
93 }
94
95 /* Attempt to allocate a single bit/frame */
96 idx = bitmap_find_free_region(mm_ctx->bd_emupage_allocmap,
97 emupage_frame_count, 0);
98 if (idx < 0) {
99 /*
100 * Failed to allocate a frame. We'll wait until one becomes
101 * available. We unlock the page so that other threads actually
102 * get the opportunity to free their frames, which means
103 * technically the result of bitmap_full may be incorrect.
104 * However the worst case is that we repeat all this and end up
105 * back here again.
106 */
107 spin_unlock(&mm_ctx->bd_emupage_lock);
108 if (!wait_event_killable(mm_ctx->bd_emupage_queue,
109 !bitmap_full(mm_ctx->bd_emupage_allocmap,
110 emupage_frame_count)))
111 goto retry;
112
113 /* Received a fatal signal - just give in */
114 return BD_EMUFRAME_NONE;
115 }
116
117 /* Success! */
118 pr_debug("allocate emuframe %d to %d\n", idx, current->pid);
119 out_unlock:
120 spin_unlock(&mm_ctx->bd_emupage_lock);
121 return idx;
122 }
123
124 static void free_emuframe(int idx, struct mm_struct *mm)
125 {
126 mm_context_t *mm_ctx = &mm->context;
127
128 spin_lock(&mm_ctx->bd_emupage_lock);
129
130 pr_debug("free emuframe %d from %d\n", idx, current->pid);
131 bitmap_clear(mm_ctx->bd_emupage_allocmap, idx, 1);
132
133 /* If some thread is waiting for a frame, now's its chance */
134 wake_up(&mm_ctx->bd_emupage_queue);
135
136 spin_unlock(&mm_ctx->bd_emupage_lock);
137 }
138
139 static bool within_emuframe(struct pt_regs *regs)
140 {
141 unsigned long base = (unsigned long)dsemul_page();
142
143 if (regs->cp0_epc < base)
144 return false;
145 if (regs->cp0_epc >= (base + PAGE_SIZE))
146 return false;
147
148 return true;
149 }
150
151 bool dsemul_thread_cleanup(struct task_struct *tsk)
152 {
153 int fr_idx;
154
155 /* Clear any allocated frame, retrieving its index */
156 fr_idx = atomic_xchg(&tsk->thread.bd_emu_frame, BD_EMUFRAME_NONE);
157
158 /* If no frame was allocated, we're done */
159 if (fr_idx == BD_EMUFRAME_NONE)
160 return false;
161
162 task_lock(tsk);
163
164 /* Free the frame that this thread had allocated */
165 if (tsk->mm)
166 free_emuframe(fr_idx, tsk->mm);
167
168 task_unlock(tsk);
169 return true;
170 }
171
172 bool dsemul_thread_rollback(struct pt_regs *regs)
173 {
174 struct emuframe __user *fr;
175 int fr_idx;
176
177 /* Do nothing if we're not executing from a frame */
178 if (!within_emuframe(regs))
179 return false;
180
181 /* Find the frame being executed */
182 fr_idx = atomic_read(&current->thread.bd_emu_frame);
183 if (fr_idx == BD_EMUFRAME_NONE)
184 return false;
185 fr = &dsemul_page()[fr_idx];
186
187 /*
188 * If the PC is at the emul instruction, roll back to the branch. If
189 * PC is at the badinst (break) instruction, we've already emulated the
190 * instruction so progress to the continue PC. If it's anything else
191 * then something is amiss & the user has branched into some other area
192 * of the emupage - we'll free the allocated frame anyway.
193 */
194 if (msk_isa16_mode(regs->cp0_epc) == (unsigned long)&fr->emul)
195 regs->cp0_epc = current->thread.bd_emu_branch_pc;
196 else if (msk_isa16_mode(regs->cp0_epc) == (unsigned long)&fr->badinst)
197 regs->cp0_epc = current->thread.bd_emu_cont_pc;
198
199 atomic_set(&current->thread.bd_emu_frame, BD_EMUFRAME_NONE);
200 free_emuframe(fr_idx, current->mm);
201 return true;
202 }
203
204 void dsemul_mm_cleanup(struct mm_struct *mm)
205 {
206 mm_context_t *mm_ctx = &mm->context;
207
208 kfree(mm_ctx->bd_emupage_allocmap);
209 }
210
211 int mips_dsemul(struct pt_regs *regs, mips_instruction ir,
212 unsigned long branch_pc, unsigned long cont_pc)
213 {
214 int isa16 = get_isa16_mode(regs->cp0_epc);
215 mips_instruction break_math;
216 struct emuframe __user *fr;
217 int err, fr_idx;
218
219 /* NOP is easy */
220 if (ir == 0)
221 return -1;
222
223 /* microMIPS instructions */
224 if (isa16) {
225 union mips_instruction insn = { .word = ir };
226
227 /* NOP16 aka MOVE16 $0, $0 */
228 if ((ir >> 16) == MM_NOP16)
229 return -1;
230
231 /* ADDIUPC */
232 if (insn.mm_a_format.opcode == mm_addiupc_op) {
233 unsigned int rs;
234 s32 v;
235
236 rs = (((insn.mm_a_format.rs + 0xe) & 0xf) + 2);
237 v = regs->cp0_epc & ~3;
238 v += insn.mm_a_format.simmediate << 2;
239 regs->regs[rs] = (long)v;
240 return -1;
241 }
242 }
243
244 pr_debug("dsemul 0x%08lx cont at 0x%08lx\n", regs->cp0_epc, cont_pc);
245
246 /* Allocate a frame if we don't already have one */
247 fr_idx = atomic_read(&current->thread.bd_emu_frame);
248 if (fr_idx == BD_EMUFRAME_NONE)
249 fr_idx = alloc_emuframe();
250 if (fr_idx == BD_EMUFRAME_NONE)
251 return SIGBUS;
252 fr = &dsemul_page()[fr_idx];
253
254 /* Retrieve the appropriately encoded break instruction */
255 break_math = BREAK_MATH(isa16);
256
257 /* Write the instructions to the frame */
258 if (isa16) {
259 err = __put_user(ir >> 16,
260 (u16 __user *)(&fr->emul));
261 err |= __put_user(ir & 0xffff,
262 (u16 __user *)((long)(&fr->emul) + 2));
263 err |= __put_user(break_math >> 16,
264 (u16 __user *)(&fr->badinst));
265 err |= __put_user(break_math & 0xffff,
266 (u16 __user *)((long)(&fr->badinst) + 2));
267 } else {
268 err = __put_user(ir, &fr->emul);
269 err |= __put_user(break_math, &fr->badinst);
270 }
271
272 if (unlikely(err)) {
273 MIPS_FPU_EMU_INC_STATS(errors);
274 free_emuframe(fr_idx, current->mm);
275 return SIGBUS;
276 }
277
278 /* Record the PC of the branch, PC to continue from & frame index */
279 current->thread.bd_emu_branch_pc = branch_pc;
280 current->thread.bd_emu_cont_pc = cont_pc;
281 atomic_set(&current->thread.bd_emu_frame, fr_idx);
282
283 /* Change user register context to execute the frame */
284 regs->cp0_epc = (unsigned long)&fr->emul | isa16;
285
286 /* Ensure the icache observes our newly written frame */
287 flush_cache_sigtramp((unsigned long)&fr->emul);
288
289 return 0;
290 }
291
292 bool do_dsemulret(struct pt_regs *xcp)
293 {
294 /* Cleanup the allocated frame, returning if there wasn't one */
295 if (!dsemul_thread_cleanup(current)) {
296 MIPS_FPU_EMU_INC_STATS(errors);
297 return false;
298 }
299
300 /* Set EPC to return to post-branch instruction */
301 xcp->cp0_epc = current->thread.bd_emu_cont_pc;
302 pr_debug("dsemulret to 0x%08lx\n", xcp->cp0_epc);
303 MIPS_FPU_EMU_INC_STATS(ds_emul);
304 return true;
305 }