]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blob - arch/mips/kernel/process.c
Merge branch 'upstream' of git://git.linux-mips.org/pub/scm/ralf/upstream-linus
[mirror_ubuntu-zesty-kernel.git] / arch / mips / kernel / process.c
1 /*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 1994 - 1999, 2000 by Ralf Baechle and others.
7 * Copyright (C) 2005, 2006 by Ralf Baechle (ralf@linux-mips.org)
8 * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
9 * Copyright (C) 2004 Thiemo Seufer
10 * Copyright (C) 2013 Imagination Technologies Ltd.
11 */
12 #include <linux/errno.h>
13 #include <linux/sched.h>
14 #include <linux/tick.h>
15 #include <linux/kernel.h>
16 #include <linux/mm.h>
17 #include <linux/stddef.h>
18 #include <linux/unistd.h>
19 #include <linux/export.h>
20 #include <linux/ptrace.h>
21 #include <linux/mman.h>
22 #include <linux/personality.h>
23 #include <linux/sys.h>
24 #include <linux/init.h>
25 #include <linux/completion.h>
26 #include <linux/kallsyms.h>
27 #include <linux/random.h>
28 #include <linux/prctl.h>
29
30 #include <asm/asm.h>
31 #include <asm/bootinfo.h>
32 #include <asm/cpu.h>
33 #include <asm/dsp.h>
34 #include <asm/fpu.h>
35 #include <asm/msa.h>
36 #include <asm/pgtable.h>
37 #include <asm/mipsregs.h>
38 #include <asm/processor.h>
39 #include <asm/reg.h>
40 #include <asm/uaccess.h>
41 #include <asm/io.h>
42 #include <asm/elf.h>
43 #include <asm/isadep.h>
44 #include <asm/inst.h>
45 #include <asm/stacktrace.h>
46 #include <asm/irq_regs.h>
47
48 #ifdef CONFIG_HOTPLUG_CPU
49 void arch_cpu_idle_dead(void)
50 {
51 /* What the heck is this check doing ? */
52 if (!cpu_isset(smp_processor_id(), cpu_callin_map))
53 play_dead();
54 }
55 #endif
56
57 asmlinkage void ret_from_fork(void);
58 asmlinkage void ret_from_kernel_thread(void);
59
60 void start_thread(struct pt_regs * regs, unsigned long pc, unsigned long sp)
61 {
62 unsigned long status;
63
64 /* New thread loses kernel privileges. */
65 status = regs->cp0_status & ~(ST0_CU0|ST0_CU1|ST0_FR|KU_MASK);
66 status |= KU_USER;
67 regs->cp0_status = status;
68 clear_used_math();
69 clear_fpu_owner();
70 init_dsp();
71 clear_thread_flag(TIF_USEDMSA);
72 clear_thread_flag(TIF_MSA_CTX_LIVE);
73 disable_msa();
74 regs->cp0_epc = pc;
75 regs->regs[29] = sp;
76 }
77
78 void exit_thread(void)
79 {
80 }
81
82 void flush_thread(void)
83 {
84 }
85
86 int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
87 {
88 /*
89 * Save any process state which is live in hardware registers to the
90 * parent context prior to duplication. This prevents the new child
91 * state becoming stale if the parent is preempted before copy_thread()
92 * gets a chance to save the parent's live hardware registers to the
93 * child context.
94 */
95 preempt_disable();
96
97 if (is_msa_enabled())
98 save_msa(current);
99 else if (is_fpu_owner())
100 _save_fp(current);
101
102 save_dsp(current);
103
104 preempt_enable();
105
106 *dst = *src;
107 return 0;
108 }
109
110 int copy_thread(unsigned long clone_flags, unsigned long usp,
111 unsigned long arg, struct task_struct *p)
112 {
113 struct thread_info *ti = task_thread_info(p);
114 struct pt_regs *childregs, *regs = current_pt_regs();
115 unsigned long childksp;
116 p->set_child_tid = p->clear_child_tid = NULL;
117
118 childksp = (unsigned long)task_stack_page(p) + THREAD_SIZE - 32;
119
120 /* set up new TSS. */
121 childregs = (struct pt_regs *) childksp - 1;
122 /* Put the stack after the struct pt_regs. */
123 childksp = (unsigned long) childregs;
124 p->thread.cp0_status = read_c0_status() & ~(ST0_CU2|ST0_CU1);
125 if (unlikely(p->flags & PF_KTHREAD)) {
126 unsigned long status = p->thread.cp0_status;
127 memset(childregs, 0, sizeof(struct pt_regs));
128 ti->addr_limit = KERNEL_DS;
129 p->thread.reg16 = usp; /* fn */
130 p->thread.reg17 = arg;
131 p->thread.reg29 = childksp;
132 p->thread.reg31 = (unsigned long) ret_from_kernel_thread;
133 #if defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX)
134 status = (status & ~(ST0_KUP | ST0_IEP | ST0_IEC)) |
135 ((status & (ST0_KUC | ST0_IEC)) << 2);
136 #else
137 status |= ST0_EXL;
138 #endif
139 childregs->cp0_status = status;
140 return 0;
141 }
142 *childregs = *regs;
143 childregs->regs[7] = 0; /* Clear error flag */
144 childregs->regs[2] = 0; /* Child gets zero as return value */
145 if (usp)
146 childregs->regs[29] = usp;
147 ti->addr_limit = USER_DS;
148
149 p->thread.reg29 = (unsigned long) childregs;
150 p->thread.reg31 = (unsigned long) ret_from_fork;
151
152 /*
153 * New tasks lose permission to use the fpu. This accelerates context
154 * switching for most programs since they don't use the fpu.
155 */
156 childregs->cp0_status &= ~(ST0_CU2|ST0_CU1);
157
158 clear_tsk_thread_flag(p, TIF_USEDFPU);
159 clear_tsk_thread_flag(p, TIF_USEDMSA);
160 clear_tsk_thread_flag(p, TIF_MSA_CTX_LIVE);
161
162 #ifdef CONFIG_MIPS_MT_FPAFF
163 clear_tsk_thread_flag(p, TIF_FPUBOUND);
164 #endif /* CONFIG_MIPS_MT_FPAFF */
165
166 if (clone_flags & CLONE_SETTLS)
167 ti->tp_value = regs->regs[7];
168
169 return 0;
170 }
171
172 #ifdef CONFIG_CC_STACKPROTECTOR
173 #include <linux/stackprotector.h>
174 unsigned long __stack_chk_guard __read_mostly;
175 EXPORT_SYMBOL(__stack_chk_guard);
176 #endif
177
178 struct mips_frame_info {
179 void *func;
180 unsigned long func_size;
181 int frame_size;
182 int pc_offset;
183 };
184
185 #define J_TARGET(pc,target) \
186 (((unsigned long)(pc) & 0xf0000000) | ((target) << 2))
187
188 static inline int is_ra_save_ins(union mips_instruction *ip)
189 {
190 #ifdef CONFIG_CPU_MICROMIPS
191 union mips_instruction mmi;
192
193 /*
194 * swsp ra,offset
195 * swm16 reglist,offset(sp)
196 * swm32 reglist,offset(sp)
197 * sw32 ra,offset(sp)
198 * jradiussp - NOT SUPPORTED
199 *
200 * microMIPS is way more fun...
201 */
202 if (mm_insn_16bit(ip->halfword[0])) {
203 mmi.word = (ip->halfword[0] << 16);
204 return (mmi.mm16_r5_format.opcode == mm_swsp16_op &&
205 mmi.mm16_r5_format.rt == 31) ||
206 (mmi.mm16_m_format.opcode == mm_pool16c_op &&
207 mmi.mm16_m_format.func == mm_swm16_op);
208 }
209 else {
210 mmi.halfword[0] = ip->halfword[1];
211 mmi.halfword[1] = ip->halfword[0];
212 return (mmi.mm_m_format.opcode == mm_pool32b_op &&
213 mmi.mm_m_format.rd > 9 &&
214 mmi.mm_m_format.base == 29 &&
215 mmi.mm_m_format.func == mm_swm32_func) ||
216 (mmi.i_format.opcode == mm_sw32_op &&
217 mmi.i_format.rs == 29 &&
218 mmi.i_format.rt == 31);
219 }
220 #else
221 /* sw / sd $ra, offset($sp) */
222 return (ip->i_format.opcode == sw_op || ip->i_format.opcode == sd_op) &&
223 ip->i_format.rs == 29 &&
224 ip->i_format.rt == 31;
225 #endif
226 }
227
228 static inline int is_jump_ins(union mips_instruction *ip)
229 {
230 #ifdef CONFIG_CPU_MICROMIPS
231 /*
232 * jr16,jrc,jalr16,jalr16
233 * jal
234 * jalr/jr,jalr.hb/jr.hb,jalrs,jalrs.hb
235 * jraddiusp - NOT SUPPORTED
236 *
237 * microMIPS is kind of more fun...
238 */
239 union mips_instruction mmi;
240
241 mmi.word = (ip->halfword[0] << 16);
242
243 if ((mmi.mm16_r5_format.opcode == mm_pool16c_op &&
244 (mmi.mm16_r5_format.rt & mm_jr16_op) == mm_jr16_op) ||
245 ip->j_format.opcode == mm_jal32_op)
246 return 1;
247 if (ip->r_format.opcode != mm_pool32a_op ||
248 ip->r_format.func != mm_pool32axf_op)
249 return 0;
250 return ((ip->u_format.uimmediate >> 6) & mm_jalr_op) == mm_jalr_op;
251 #else
252 if (ip->j_format.opcode == j_op)
253 return 1;
254 if (ip->j_format.opcode == jal_op)
255 return 1;
256 if (ip->r_format.opcode != spec_op)
257 return 0;
258 return ip->r_format.func == jalr_op || ip->r_format.func == jr_op;
259 #endif
260 }
261
262 static inline int is_sp_move_ins(union mips_instruction *ip)
263 {
264 #ifdef CONFIG_CPU_MICROMIPS
265 /*
266 * addiusp -imm
267 * addius5 sp,-imm
268 * addiu32 sp,sp,-imm
269 * jradiussp - NOT SUPPORTED
270 *
271 * microMIPS is not more fun...
272 */
273 if (mm_insn_16bit(ip->halfword[0])) {
274 union mips_instruction mmi;
275
276 mmi.word = (ip->halfword[0] << 16);
277 return (mmi.mm16_r3_format.opcode == mm_pool16d_op &&
278 mmi.mm16_r3_format.simmediate && mm_addiusp_func) ||
279 (mmi.mm16_r5_format.opcode == mm_pool16d_op &&
280 mmi.mm16_r5_format.rt == 29);
281 }
282 return ip->mm_i_format.opcode == mm_addiu32_op &&
283 ip->mm_i_format.rt == 29 && ip->mm_i_format.rs == 29;
284 #else
285 /* addiu/daddiu sp,sp,-imm */
286 if (ip->i_format.rs != 29 || ip->i_format.rt != 29)
287 return 0;
288 if (ip->i_format.opcode == addiu_op || ip->i_format.opcode == daddiu_op)
289 return 1;
290 #endif
291 return 0;
292 }
293
294 static int get_frame_info(struct mips_frame_info *info)
295 {
296 #ifdef CONFIG_CPU_MICROMIPS
297 union mips_instruction *ip = (void *) (((char *) info->func) - 1);
298 #else
299 union mips_instruction *ip = info->func;
300 #endif
301 unsigned max_insns = info->func_size / sizeof(union mips_instruction);
302 unsigned i;
303
304 info->pc_offset = -1;
305 info->frame_size = 0;
306
307 if (!ip)
308 goto err;
309
310 if (max_insns == 0)
311 max_insns = 128U; /* unknown function size */
312 max_insns = min(128U, max_insns);
313
314 for (i = 0; i < max_insns; i++, ip++) {
315
316 if (is_jump_ins(ip))
317 break;
318 if (!info->frame_size) {
319 if (is_sp_move_ins(ip))
320 {
321 #ifdef CONFIG_CPU_MICROMIPS
322 if (mm_insn_16bit(ip->halfword[0]))
323 {
324 unsigned short tmp;
325
326 if (ip->halfword[0] & mm_addiusp_func)
327 {
328 tmp = (((ip->halfword[0] >> 1) & 0x1ff) << 2);
329 info->frame_size = -(signed short)(tmp | ((tmp & 0x100) ? 0xfe00 : 0));
330 } else {
331 tmp = (ip->halfword[0] >> 1);
332 info->frame_size = -(signed short)(tmp & 0xf);
333 }
334 ip = (void *) &ip->halfword[1];
335 ip--;
336 } else
337 #endif
338 info->frame_size = - ip->i_format.simmediate;
339 }
340 continue;
341 }
342 if (info->pc_offset == -1 && is_ra_save_ins(ip)) {
343 info->pc_offset =
344 ip->i_format.simmediate / sizeof(long);
345 break;
346 }
347 }
348 if (info->frame_size && info->pc_offset >= 0) /* nested */
349 return 0;
350 if (info->pc_offset < 0) /* leaf */
351 return 1;
352 /* prologue seems boggus... */
353 err:
354 return -1;
355 }
356
357 static struct mips_frame_info schedule_mfi __read_mostly;
358
359 #ifdef CONFIG_KALLSYMS
360 static unsigned long get___schedule_addr(void)
361 {
362 return kallsyms_lookup_name("__schedule");
363 }
364 #else
365 static unsigned long get___schedule_addr(void)
366 {
367 union mips_instruction *ip = (void *)schedule;
368 int max_insns = 8;
369 int i;
370
371 for (i = 0; i < max_insns; i++, ip++) {
372 if (ip->j_format.opcode == j_op)
373 return J_TARGET(ip, ip->j_format.target);
374 }
375 return 0;
376 }
377 #endif
378
379 static int __init frame_info_init(void)
380 {
381 unsigned long size = 0;
382 #ifdef CONFIG_KALLSYMS
383 unsigned long ofs;
384 #endif
385 unsigned long addr;
386
387 addr = get___schedule_addr();
388 if (!addr)
389 addr = (unsigned long)schedule;
390
391 #ifdef CONFIG_KALLSYMS
392 kallsyms_lookup_size_offset(addr, &size, &ofs);
393 #endif
394 schedule_mfi.func = (void *)addr;
395 schedule_mfi.func_size = size;
396
397 get_frame_info(&schedule_mfi);
398
399 /*
400 * Without schedule() frame info, result given by
401 * thread_saved_pc() and get_wchan() are not reliable.
402 */
403 if (schedule_mfi.pc_offset < 0)
404 printk("Can't analyze schedule() prologue at %p\n", schedule);
405
406 return 0;
407 }
408
409 arch_initcall(frame_info_init);
410
411 /*
412 * Return saved PC of a blocked thread.
413 */
414 unsigned long thread_saved_pc(struct task_struct *tsk)
415 {
416 struct thread_struct *t = &tsk->thread;
417
418 /* New born processes are a special case */
419 if (t->reg31 == (unsigned long) ret_from_fork)
420 return t->reg31;
421 if (schedule_mfi.pc_offset < 0)
422 return 0;
423 return ((unsigned long *)t->reg29)[schedule_mfi.pc_offset];
424 }
425
426
427 #ifdef CONFIG_KALLSYMS
428 /* generic stack unwinding function */
429 unsigned long notrace unwind_stack_by_address(unsigned long stack_page,
430 unsigned long *sp,
431 unsigned long pc,
432 unsigned long *ra)
433 {
434 struct mips_frame_info info;
435 unsigned long size, ofs;
436 int leaf;
437 extern void ret_from_irq(void);
438 extern void ret_from_exception(void);
439
440 if (!stack_page)
441 return 0;
442
443 /*
444 * If we reached the bottom of interrupt context,
445 * return saved pc in pt_regs.
446 */
447 if (pc == (unsigned long)ret_from_irq ||
448 pc == (unsigned long)ret_from_exception) {
449 struct pt_regs *regs;
450 if (*sp >= stack_page &&
451 *sp + sizeof(*regs) <= stack_page + THREAD_SIZE - 32) {
452 regs = (struct pt_regs *)*sp;
453 pc = regs->cp0_epc;
454 if (__kernel_text_address(pc)) {
455 *sp = regs->regs[29];
456 *ra = regs->regs[31];
457 return pc;
458 }
459 }
460 return 0;
461 }
462 if (!kallsyms_lookup_size_offset(pc, &size, &ofs))
463 return 0;
464 /*
465 * Return ra if an exception occurred at the first instruction
466 */
467 if (unlikely(ofs == 0)) {
468 pc = *ra;
469 *ra = 0;
470 return pc;
471 }
472
473 info.func = (void *)(pc - ofs);
474 info.func_size = ofs; /* analyze from start to ofs */
475 leaf = get_frame_info(&info);
476 if (leaf < 0)
477 return 0;
478
479 if (*sp < stack_page ||
480 *sp + info.frame_size > stack_page + THREAD_SIZE - 32)
481 return 0;
482
483 if (leaf)
484 /*
485 * For some extreme cases, get_frame_info() can
486 * consider wrongly a nested function as a leaf
487 * one. In that cases avoid to return always the
488 * same value.
489 */
490 pc = pc != *ra ? *ra : 0;
491 else
492 pc = ((unsigned long *)(*sp))[info.pc_offset];
493
494 *sp += info.frame_size;
495 *ra = 0;
496 return __kernel_text_address(pc) ? pc : 0;
497 }
498 EXPORT_SYMBOL(unwind_stack_by_address);
499
500 /* used by show_backtrace() */
501 unsigned long unwind_stack(struct task_struct *task, unsigned long *sp,
502 unsigned long pc, unsigned long *ra)
503 {
504 unsigned long stack_page = (unsigned long)task_stack_page(task);
505 return unwind_stack_by_address(stack_page, sp, pc, ra);
506 }
507 #endif
508
509 /*
510 * get_wchan - a maintenance nightmare^W^Wpain in the ass ...
511 */
512 unsigned long get_wchan(struct task_struct *task)
513 {
514 unsigned long pc = 0;
515 #ifdef CONFIG_KALLSYMS
516 unsigned long sp;
517 unsigned long ra = 0;
518 #endif
519
520 if (!task || task == current || task->state == TASK_RUNNING)
521 goto out;
522 if (!task_stack_page(task))
523 goto out;
524
525 pc = thread_saved_pc(task);
526
527 #ifdef CONFIG_KALLSYMS
528 sp = task->thread.reg29 + schedule_mfi.frame_size;
529
530 while (in_sched_functions(pc))
531 pc = unwind_stack(task, &sp, pc, &ra);
532 #endif
533
534 out:
535 return pc;
536 }
537
538 /*
539 * Don't forget that the stack pointer must be aligned on a 8 bytes
540 * boundary for 32-bits ABI and 16 bytes for 64-bits ABI.
541 */
542 unsigned long arch_align_stack(unsigned long sp)
543 {
544 if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
545 sp -= get_random_int() & ~PAGE_MASK;
546
547 return sp & ALMASK;
548 }
549
550 static void arch_dump_stack(void *info)
551 {
552 struct pt_regs *regs;
553
554 regs = get_irq_regs();
555
556 if (regs)
557 show_regs(regs);
558
559 dump_stack();
560 }
561
562 void arch_trigger_all_cpu_backtrace(bool include_self)
563 {
564 smp_call_function(arch_dump_stack, NULL, 1);
565 }
566
567 int mips_get_process_fp_mode(struct task_struct *task)
568 {
569 int value = 0;
570
571 if (!test_tsk_thread_flag(task, TIF_32BIT_FPREGS))
572 value |= PR_FP_MODE_FR;
573 if (test_tsk_thread_flag(task, TIF_HYBRID_FPREGS))
574 value |= PR_FP_MODE_FRE;
575
576 return value;
577 }
578
579 int mips_set_process_fp_mode(struct task_struct *task, unsigned int value)
580 {
581 const unsigned int known_bits = PR_FP_MODE_FR | PR_FP_MODE_FRE;
582 unsigned long switch_count;
583 struct task_struct *t;
584
585 /* Check the value is valid */
586 if (value & ~known_bits)
587 return -EOPNOTSUPP;
588
589 /* Avoid inadvertently triggering emulation */
590 if ((value & PR_FP_MODE_FR) && cpu_has_fpu &&
591 !(current_cpu_data.fpu_id & MIPS_FPIR_F64))
592 return -EOPNOTSUPP;
593 if ((value & PR_FP_MODE_FRE) && cpu_has_fpu && !cpu_has_fre)
594 return -EOPNOTSUPP;
595
596 /* FR = 0 not supported in MIPS R6 */
597 if (!(value & PR_FP_MODE_FR) && cpu_has_fpu && cpu_has_mips_r6)
598 return -EOPNOTSUPP;
599
600 /* Save FP & vector context, then disable FPU & MSA */
601 if (task->signal == current->signal)
602 lose_fpu(1);
603
604 /* Prevent any threads from obtaining live FP context */
605 atomic_set(&task->mm->context.fp_mode_switching, 1);
606 smp_mb__after_atomic();
607
608 /*
609 * If there are multiple online CPUs then wait until all threads whose
610 * FP mode is about to change have been context switched. This approach
611 * allows us to only worry about whether an FP mode switch is in
612 * progress when FP is first used in a tasks time slice. Pretty much all
613 * of the mode switch overhead can thus be confined to cases where mode
614 * switches are actually occuring. That is, to here. However for the
615 * thread performing the mode switch it may take a while...
616 */
617 if (num_online_cpus() > 1) {
618 spin_lock_irq(&task->sighand->siglock);
619
620 for_each_thread(task, t) {
621 if (t == current)
622 continue;
623
624 switch_count = t->nvcsw + t->nivcsw;
625
626 do {
627 spin_unlock_irq(&task->sighand->siglock);
628 cond_resched();
629 spin_lock_irq(&task->sighand->siglock);
630 } while ((t->nvcsw + t->nivcsw) == switch_count);
631 }
632
633 spin_unlock_irq(&task->sighand->siglock);
634 }
635
636 /*
637 * There are now no threads of the process with live FP context, so it
638 * is safe to proceed with the FP mode switch.
639 */
640 for_each_thread(task, t) {
641 /* Update desired FP register width */
642 if (value & PR_FP_MODE_FR) {
643 clear_tsk_thread_flag(t, TIF_32BIT_FPREGS);
644 } else {
645 set_tsk_thread_flag(t, TIF_32BIT_FPREGS);
646 clear_tsk_thread_flag(t, TIF_MSA_CTX_LIVE);
647 }
648
649 /* Update desired FP single layout */
650 if (value & PR_FP_MODE_FRE)
651 set_tsk_thread_flag(t, TIF_HYBRID_FPREGS);
652 else
653 clear_tsk_thread_flag(t, TIF_HYBRID_FPREGS);
654 }
655
656 /* Allow threads to use FP again */
657 atomic_set(&task->mm->context.fp_mode_switching, 0);
658
659 return 0;
660 }