]>
Commit | Line | Data |
---|---|---|
44dea393 JH |
1 | /* |
2 | * Copyright (C) 2005,2006,2007,2008,2009,2010,2011 Imagination Technologies | |
3 | * | |
4 | * This file contains the architecture-dependent parts of process handling. | |
5 | * | |
6 | */ | |
7 | ||
8 | #include <linux/errno.h> | |
9 | #include <linux/export.h> | |
10 | #include <linux/sched.h> | |
b17b0153 | 11 | #include <linux/sched/debug.h> |
29930025 | 12 | #include <linux/sched/task.h> |
44dea393 JH |
13 | #include <linux/kernel.h> |
14 | #include <linux/mm.h> | |
15 | #include <linux/unistd.h> | |
16 | #include <linux/ptrace.h> | |
17 | #include <linux/user.h> | |
18 | #include <linux/reboot.h> | |
19 | #include <linux/elfcore.h> | |
20 | #include <linux/fs.h> | |
21 | #include <linux/tick.h> | |
22 | #include <linux/slab.h> | |
23 | #include <linux/mman.h> | |
24 | #include <linux/pm.h> | |
25 | #include <linux/syscalls.h> | |
26 | #include <linux/uaccess.h> | |
d1dba0fc | 27 | #include <linux/smp.h> |
44dea393 JH |
28 | #include <asm/core_reg.h> |
29 | #include <asm/user_gateway.h> | |
30 | #include <asm/tcm.h> | |
31 | #include <asm/traps.h> | |
32 | #include <asm/switch_to.h> | |
33 | ||
34 | /* | |
35 | * Wait for the next interrupt and enable local interrupts | |
36 | */ | |
d1dba0fc | 37 | void arch_cpu_idle(void) |
44dea393 JH |
38 | { |
39 | int tmp; | |
40 | ||
41 | /* | |
42 | * Quickly jump straight into the interrupt entry point without actually | |
43 | * triggering an interrupt. When TXSTATI gets read the processor will | |
44 | * block until an interrupt is triggered. | |
45 | */ | |
46 | asm volatile (/* Switch into ISTAT mode */ | |
47 | "RTH\n\t" | |
48 | /* Enable local interrupts */ | |
49 | "MOV TXMASKI, %1\n\t" | |
50 | /* | |
51 | * We can't directly "SWAP PC, PCX", so we swap via a | |
52 | * temporary. Essentially we do: | |
53 | * PCX_new = 1f (the place to continue execution) | |
54 | * PC = PCX_old | |
55 | */ | |
56 | "ADD %0, CPC0, #(1f-.)\n\t" | |
57 | "SWAP PCX, %0\n\t" | |
58 | "MOV PC, %0\n" | |
59 | /* Continue execution here with interrupts enabled */ | |
60 | "1:" | |
61 | : "=a" (tmp) | |
62 | : "r" (get_trigger_mask())); | |
63 | } | |
64 | ||
44dea393 | 65 | #ifdef CONFIG_HOTPLUG_CPU |
d1dba0fc TG |
66 | void arch_cpu_idle_dead(void) |
67 | { | |
68 | cpu_die(); | |
44dea393 | 69 | } |
d1dba0fc | 70 | #endif |
44dea393 JH |
71 | |
72 | void (*pm_power_off)(void); | |
73 | EXPORT_SYMBOL(pm_power_off); | |
74 | ||
75 | void (*soc_restart)(char *cmd); | |
76 | void (*soc_halt)(void); | |
77 | ||
78 | void machine_restart(char *cmd) | |
79 | { | |
80 | if (soc_restart) | |
81 | soc_restart(cmd); | |
82 | hard_processor_halt(HALT_OK); | |
83 | } | |
84 | ||
85 | void machine_halt(void) | |
86 | { | |
87 | if (soc_halt) | |
88 | soc_halt(); | |
89 | smp_send_stop(); | |
90 | hard_processor_halt(HALT_OK); | |
91 | } | |
92 | ||
93 | void machine_power_off(void) | |
94 | { | |
95 | if (pm_power_off) | |
96 | pm_power_off(); | |
97 | smp_send_stop(); | |
98 | hard_processor_halt(HALT_OK); | |
99 | } | |
100 | ||
101 | #define FLAG_Z 0x8 | |
102 | #define FLAG_N 0x4 | |
103 | #define FLAG_O 0x2 | |
104 | #define FLAG_C 0x1 | |
105 | ||
106 | void show_regs(struct pt_regs *regs) | |
107 | { | |
108 | int i; | |
109 | const char *AX0_names[] = {"A0StP", "A0FrP"}; | |
110 | const char *AX1_names[] = {"A1GbP", "A1LbP"}; | |
111 | ||
112 | const char *DX0_names[] = { | |
113 | "D0Re0", | |
114 | "D0Ar6", | |
115 | "D0Ar4", | |
116 | "D0Ar2", | |
117 | "D0FrT", | |
118 | "D0.5 ", | |
119 | "D0.6 ", | |
120 | "D0.7 " | |
121 | }; | |
122 | ||
123 | const char *DX1_names[] = { | |
124 | "D1Re0", | |
125 | "D1Ar5", | |
126 | "D1Ar3", | |
127 | "D1Ar1", | |
128 | "D1RtP", | |
129 | "D1.5 ", | |
130 | "D1.6 ", | |
131 | "D1.7 " | |
132 | }; | |
133 | ||
a43cb95d TH |
134 | show_regs_print_info(KERN_INFO); |
135 | ||
44dea393 JH |
136 | pr_info(" pt_regs @ %p\n", regs); |
137 | pr_info(" SaveMask = 0x%04hx\n", regs->ctx.SaveMask); | |
138 | pr_info(" Flags = 0x%04hx (%c%c%c%c)\n", regs->ctx.Flags, | |
139 | regs->ctx.Flags & FLAG_Z ? 'Z' : 'z', | |
140 | regs->ctx.Flags & FLAG_N ? 'N' : 'n', | |
141 | regs->ctx.Flags & FLAG_O ? 'O' : 'o', | |
142 | regs->ctx.Flags & FLAG_C ? 'C' : 'c'); | |
143 | pr_info(" TXRPT = 0x%08x\n", regs->ctx.CurrRPT); | |
144 | pr_info(" PC = 0x%08x\n", regs->ctx.CurrPC); | |
145 | ||
146 | /* AX regs */ | |
147 | for (i = 0; i < 2; i++) { | |
148 | pr_info(" %s = 0x%08x ", | |
149 | AX0_names[i], | |
150 | regs->ctx.AX[i].U0); | |
151 | printk(" %s = 0x%08x\n", | |
152 | AX1_names[i], | |
153 | regs->ctx.AX[i].U1); | |
154 | } | |
155 | ||
156 | if (regs->ctx.SaveMask & TBICTX_XEXT_BIT) | |
157 | pr_warn(" Extended state present - AX2.[01] will be WRONG\n"); | |
158 | ||
159 | /* Special place with AXx.2 */ | |
160 | pr_info(" A0.2 = 0x%08x ", | |
161 | regs->ctx.Ext.AX2.U0); | |
162 | printk(" A1.2 = 0x%08x\n", | |
163 | regs->ctx.Ext.AX2.U1); | |
164 | ||
165 | /* 'extended' AX regs (nominally, just AXx.3) */ | |
166 | for (i = 0; i < (TBICTX_AX_REGS - 3); i++) { | |
167 | pr_info(" A0.%d = 0x%08x ", i + 3, regs->ctx.AX3[i].U0); | |
168 | printk(" A1.%d = 0x%08x\n", i + 3, regs->ctx.AX3[i].U1); | |
169 | } | |
170 | ||
171 | for (i = 0; i < 8; i++) { | |
172 | pr_info(" %s = 0x%08x ", DX0_names[i], regs->ctx.DX[i].U0); | |
173 | printk(" %s = 0x%08x\n", DX1_names[i], regs->ctx.DX[i].U1); | |
174 | } | |
175 | ||
176 | show_trace(NULL, (unsigned long *)regs->ctx.AX[0].U0, regs); | |
177 | } | |
178 | ||
40346a03 AD |
179 | /* |
180 | * Copy architecture-specific thread state | |
181 | */ | |
44dea393 | 182 | int copy_thread(unsigned long clone_flags, unsigned long usp, |
40346a03 | 183 | unsigned long kthread_arg, struct task_struct *tsk) |
44dea393 JH |
184 | { |
185 | struct pt_regs *childregs = task_pt_regs(tsk); | |
186 | void *kernel_context = ((void *) childregs + | |
187 | sizeof(struct pt_regs)); | |
188 | unsigned long global_base; | |
189 | ||
190 | BUG_ON(((unsigned long)childregs) & 0x7); | |
191 | BUG_ON(((unsigned long)kernel_context) & 0x7); | |
192 | ||
193 | memset(&tsk->thread.kernel_context, 0, | |
194 | sizeof(tsk->thread.kernel_context)); | |
195 | ||
196 | tsk->thread.kernel_context = __TBISwitchInit(kernel_context, | |
197 | ret_from_fork, | |
198 | 0, 0); | |
199 | ||
200 | if (unlikely(tsk->flags & PF_KTHREAD)) { | |
201 | /* | |
202 | * Make sure we don't leak any kernel data to child's regs | |
203 | * if kernel thread becomes a userspace thread in the future | |
204 | */ | |
205 | memset(childregs, 0 , sizeof(struct pt_regs)); | |
206 | ||
207 | global_base = __core_reg_get(A1GbP); | |
208 | childregs->ctx.AX[0].U1 = (unsigned long) global_base; | |
209 | childregs->ctx.AX[0].U0 = (unsigned long) kernel_context; | |
40346a03 | 210 | /* Set D1Ar1=kthread_arg and D1RtP=usp (fn) */ |
44dea393 | 211 | childregs->ctx.DX[4].U1 = usp; |
40346a03 | 212 | childregs->ctx.DX[3].U1 = kthread_arg; |
44dea393 JH |
213 | tsk->thread.int_depth = 2; |
214 | return 0; | |
215 | } | |
40346a03 | 216 | |
44dea393 JH |
217 | /* |
218 | * Get a pointer to where the new child's register block should have | |
219 | * been pushed. | |
220 | * The Meta's stack grows upwards, and the context is the the first | |
221 | * thing to be pushed by TBX (phew) | |
222 | */ | |
223 | *childregs = *current_pt_regs(); | |
224 | /* Set the correct stack for the clone mode */ | |
225 | if (usp) | |
226 | childregs->ctx.AX[0].U0 = ALIGN(usp, 8); | |
227 | tsk->thread.int_depth = 1; | |
228 | ||
229 | /* set return value for child process */ | |
230 | childregs->ctx.DX[0].U0 = 0; | |
231 | ||
232 | /* The TLS pointer is passed as an argument to sys_clone. */ | |
233 | if (clone_flags & CLONE_SETTLS) | |
234 | tsk->thread.tls_ptr = | |
235 | (__force void __user *)childregs->ctx.DX[1].U1; | |
236 | ||
237 | #ifdef CONFIG_METAG_FPU | |
238 | if (tsk->thread.fpu_context) { | |
239 | struct meta_fpu_context *ctx; | |
240 | ||
241 | ctx = kmemdup(tsk->thread.fpu_context, | |
242 | sizeof(struct meta_fpu_context), GFP_ATOMIC); | |
243 | tsk->thread.fpu_context = ctx; | |
244 | } | |
245 | #endif | |
246 | ||
247 | #ifdef CONFIG_METAG_DSP | |
248 | if (tsk->thread.dsp_context) { | |
249 | struct meta_ext_context *ctx; | |
250 | int i; | |
251 | ||
252 | ctx = kmemdup(tsk->thread.dsp_context, | |
253 | sizeof(struct meta_ext_context), GFP_ATOMIC); | |
254 | for (i = 0; i < 2; i++) | |
255 | ctx->ram[i] = kmemdup(ctx->ram[i], ctx->ram_sz[i], | |
256 | GFP_ATOMIC); | |
257 | tsk->thread.dsp_context = ctx; | |
258 | } | |
259 | #endif | |
260 | ||
261 | return 0; | |
262 | } | |
263 | ||
264 | #ifdef CONFIG_METAG_FPU | |
265 | static void alloc_fpu_context(struct thread_struct *thread) | |
266 | { | |
267 | thread->fpu_context = kzalloc(sizeof(struct meta_fpu_context), | |
268 | GFP_ATOMIC); | |
269 | } | |
270 | ||
271 | static void clear_fpu(struct thread_struct *thread) | |
272 | { | |
273 | thread->user_flags &= ~TBICTX_FPAC_BIT; | |
274 | kfree(thread->fpu_context); | |
275 | thread->fpu_context = NULL; | |
276 | } | |
277 | #else | |
278 | static void clear_fpu(struct thread_struct *thread) | |
279 | { | |
280 | } | |
281 | #endif | |
282 | ||
283 | #ifdef CONFIG_METAG_DSP | |
284 | static void clear_dsp(struct thread_struct *thread) | |
285 | { | |
286 | if (thread->dsp_context) { | |
287 | kfree(thread->dsp_context->ram[0]); | |
288 | kfree(thread->dsp_context->ram[1]); | |
289 | ||
290 | kfree(thread->dsp_context); | |
291 | ||
292 | thread->dsp_context = NULL; | |
293 | } | |
294 | ||
295 | __core_reg_set(D0.8, 0); | |
296 | } | |
297 | #else | |
298 | static void clear_dsp(struct thread_struct *thread) | |
299 | { | |
300 | } | |
301 | #endif | |
302 | ||
303 | struct task_struct *__sched __switch_to(struct task_struct *prev, | |
304 | struct task_struct *next) | |
305 | { | |
306 | TBIRES to, from; | |
307 | ||
308 | to.Switch.pCtx = next->thread.kernel_context; | |
309 | to.Switch.pPara = prev; | |
310 | ||
311 | #ifdef CONFIG_METAG_FPU | |
312 | if (prev->thread.user_flags & TBICTX_FPAC_BIT) { | |
313 | struct pt_regs *regs = task_pt_regs(prev); | |
314 | TBIRES state; | |
315 | ||
316 | state.Sig.SaveMask = prev->thread.user_flags; | |
317 | state.Sig.pCtx = ®s->ctx; | |
318 | ||
319 | if (!prev->thread.fpu_context) | |
320 | alloc_fpu_context(&prev->thread); | |
321 | if (prev->thread.fpu_context) | |
322 | __TBICtxFPUSave(state, prev->thread.fpu_context); | |
323 | } | |
324 | /* | |
325 | * Force a restore of the FPU context next time this process is | |
326 | * scheduled. | |
327 | */ | |
328 | if (prev->thread.fpu_context) | |
329 | prev->thread.fpu_context->needs_restore = true; | |
330 | #endif | |
331 | ||
332 | ||
333 | from = __TBISwitch(to, &prev->thread.kernel_context); | |
334 | ||
335 | /* Restore TLS pointer for this process. */ | |
336 | set_gateway_tls(current->thread.tls_ptr); | |
337 | ||
338 | return (struct task_struct *) from.Switch.pPara; | |
339 | } | |
340 | ||
341 | void flush_thread(void) | |
342 | { | |
343 | clear_fpu(¤t->thread); | |
344 | clear_dsp(¤t->thread); | |
345 | } | |
346 | ||
347 | /* | |
348 | * Free current thread data structures etc. | |
349 | */ | |
e6464694 | 350 | void exit_thread(struct task_struct *tsk) |
44dea393 | 351 | { |
e6464694 JS |
352 | clear_fpu(&tsk->thread); |
353 | clear_dsp(&tsk->thread); | |
44dea393 JH |
354 | } |
355 | ||
356 | /* TODO: figure out how to unwind the kernel stack here to figure out | |
357 | * where we went to sleep. */ | |
358 | unsigned long get_wchan(struct task_struct *p) | |
359 | { | |
360 | return 0; | |
361 | } | |
362 | ||
363 | int dump_fpu(struct pt_regs *regs, elf_fpregset_t *fpu) | |
364 | { | |
365 | /* Returning 0 indicates that the FPU state was not stored (as it was | |
366 | * not in use) */ | |
367 | return 0; | |
368 | } | |
369 | ||
370 | #ifdef CONFIG_METAG_USER_TCM | |
371 | ||
372 | #define ELF_MIN_ALIGN PAGE_SIZE | |
373 | ||
374 | #define ELF_PAGESTART(_v) ((_v) & ~(unsigned long)(ELF_MIN_ALIGN-1)) | |
375 | #define ELF_PAGEOFFSET(_v) ((_v) & (ELF_MIN_ALIGN-1)) | |
376 | #define ELF_PAGEALIGN(_v) (((_v) + ELF_MIN_ALIGN - 1) & ~(ELF_MIN_ALIGN - 1)) | |
377 | ||
378 | #define BAD_ADDR(x) ((unsigned long)(x) >= TASK_SIZE) | |
379 | ||
380 | unsigned long __metag_elf_map(struct file *filep, unsigned long addr, | |
381 | struct elf_phdr *eppnt, int prot, int type, | |
382 | unsigned long total_size) | |
383 | { | |
384 | unsigned long map_addr, size; | |
385 | unsigned long page_off = ELF_PAGEOFFSET(eppnt->p_vaddr); | |
386 | unsigned long raw_size = eppnt->p_filesz + page_off; | |
387 | unsigned long off = eppnt->p_offset - page_off; | |
388 | unsigned int tcm_tag; | |
389 | addr = ELF_PAGESTART(addr); | |
390 | size = ELF_PAGEALIGN(raw_size); | |
391 | ||
392 | /* mmap() will return -EINVAL if given a zero size, but a | |
393 | * segment with zero filesize is perfectly valid */ | |
394 | if (!size) | |
395 | return addr; | |
396 | ||
397 | tcm_tag = tcm_lookup_tag(addr); | |
398 | ||
399 | if (tcm_tag != TCM_INVALID_TAG) | |
400 | type &= ~MAP_FIXED; | |
401 | ||
402 | /* | |
403 | * total_size is the size of the ELF (interpreter) image. | |
404 | * The _first_ mmap needs to know the full size, otherwise | |
405 | * randomization might put this image into an overlapping | |
406 | * position with the ELF binary image. (since size < total_size) | |
407 | * So we first map the 'big' image - and unmap the remainder at | |
408 | * the end. (which unmap is needed for ELF images with holes.) | |
409 | */ | |
410 | if (total_size) { | |
411 | total_size = ELF_PAGEALIGN(total_size); | |
412 | map_addr = vm_mmap(filep, addr, total_size, prot, type, off); | |
413 | if (!BAD_ADDR(map_addr)) | |
414 | vm_munmap(map_addr+size, total_size-size); | |
415 | } else | |
416 | map_addr = vm_mmap(filep, addr, size, prot, type, off); | |
417 | ||
418 | if (!BAD_ADDR(map_addr) && tcm_tag != TCM_INVALID_TAG) { | |
419 | struct tcm_allocation *tcm; | |
420 | unsigned long tcm_addr; | |
421 | ||
422 | tcm = kmalloc(sizeof(*tcm), GFP_KERNEL); | |
423 | if (!tcm) | |
424 | return -ENOMEM; | |
425 | ||
426 | tcm_addr = tcm_alloc(tcm_tag, raw_size); | |
427 | if (tcm_addr != addr) { | |
428 | kfree(tcm); | |
429 | return -ENOMEM; | |
430 | } | |
431 | ||
432 | tcm->tag = tcm_tag; | |
433 | tcm->addr = tcm_addr; | |
434 | tcm->size = raw_size; | |
435 | ||
436 | list_add(&tcm->list, ¤t->mm->context.tcm); | |
437 | ||
438 | eppnt->p_vaddr = map_addr; | |
439 | if (copy_from_user((void *) addr, (void __user *) map_addr, | |
440 | raw_size)) | |
441 | return -EFAULT; | |
442 | } | |
443 | ||
444 | return map_addr; | |
445 | } | |
446 | #endif |