2 * Blackfin architecture-dependent process handling
4 * Copyright 2004-2009 Analog Devices Inc.
6 * Licensed under the GPL-2 or later
9 #include <linux/module.h>
10 #include <linux/unistd.h>
11 #include <linux/user.h>
12 #include <linux/uaccess.h>
13 #include <linux/slab.h>
14 #include <linux/sched.h>
15 #include <linux/tick.h>
17 #include <linux/err.h>
19 #include <asm/blackfin.h>
20 #include <asm/fixed_code.h>
21 #include <asm/mem_map.h>
24 asmlinkage
void ret_from_fork(void);
26 /* Points to the SDRAM backup memory for the stack that is currently in
27 * L1 scratchpad memory.
29 void *current_l1_stack_save
;
31 /* The number of tasks currently using a L1 stack area. The SRAM is
32 * allocated/deallocated whenever this changes from/to zero.
36 /* Start and length of the area in L1 scratchpad memory which we've allocated
40 unsigned long l1_stack_len
;
43 * Powermanagement idle function, if any..
45 void (*pm_idle
)(void) = NULL
;
46 EXPORT_SYMBOL(pm_idle
);
48 void (*pm_power_off
)(void) = NULL
;
49 EXPORT_SYMBOL(pm_power_off
);
52 * The idle loop on BFIN
55 static void default_idle(void)__attribute__((l1_text
));
56 void cpu_idle(void)__attribute__((l1_text
));
60 * This is our default idle handler. We need to disable
61 * interrupts here to ensure we don't miss a wakeup call.
63 static void default_idle(void)
66 ipipe_suspend_domain();
68 hard_local_irq_disable();
70 idle_with_irq_disabled();
72 hard_local_irq_enable();
76 * The idle thread. We try to conserve power, while trying to keep
77 * overall latency low. The architecture specific idle is passed
78 * a value to indicate the level of "idleness" of the system.
82 /* endless idle loop with no priority at all */
84 void (*idle
)(void) = pm_idle
;
86 #ifdef CONFIG_HOTPLUG_CPU
87 if (cpu_is_offline(smp_processor_id()))
92 tick_nohz_idle_enter();
94 while (!need_resched())
97 tick_nohz_idle_exit();
98 preempt_enable_no_resched();
105 * Do necessary setup to start up a newly executed thread.
107 * pass the data segment into user programs if it exists,
108 * it can't hurt anything as far as I can tell
110 void start_thread(struct pt_regs
*regs
, unsigned long new_ip
, unsigned long new_sp
)
114 regs
->p5
= current
->mm
->start_data
;
116 task_thread_info(current
)->l1_task_info
.stack_start
=
117 (void *)current
->mm
->context
.stack_start
;
118 task_thread_info(current
)->l1_task_info
.lowest_sp
= (void *)new_sp
;
119 memcpy(L1_SCRATCH_TASK_INFO
, &task_thread_info(current
)->l1_task_info
,
120 sizeof(*L1_SCRATCH_TASK_INFO
));
124 EXPORT_SYMBOL_GPL(start_thread
);
126 void flush_thread(void)
130 asmlinkage
int bfin_vfork(struct pt_regs
*regs
)
132 return do_fork(CLONE_VFORK
| CLONE_VM
| SIGCHLD
, rdusp(), regs
, 0, NULL
,
136 asmlinkage
int bfin_clone(struct pt_regs
*regs
)
138 unsigned long clone_flags
;
141 #ifdef __ARCH_SYNC_CORE_DCACHE
142 if (current
->nr_cpus_allowed
== num_possible_cpus())
143 set_cpus_allowed_ptr(current
, cpumask_of(smp_processor_id()));
146 /* syscall2 puts clone_flags in r0 and usp in r1 */
147 clone_flags
= regs
->r0
;
153 return do_fork(clone_flags
, newsp
, regs
, 0, NULL
, NULL
);
157 copy_thread(unsigned long clone_flags
,
158 unsigned long usp
, unsigned long topstk
,
159 struct task_struct
*p
, struct pt_regs
*regs
)
161 struct pt_regs
*childregs
;
164 childregs
= (struct pt_regs
*) (task_stack_page(p
) + THREAD_SIZE
) - 1;
165 v
= ((unsigned long *)childregs
) - 2;
166 if (unlikely(!regs
)) {
167 memset(childregs
, 0, sizeof(struct pt_regs
));
170 childregs
->orig_p0
= -1;
171 childregs
->ipend
= 0x8000;
172 __asm__
__volatile__("%0 = syscfg;":"=da"(childregs
->syscfg
):);
181 p
->thread
.ksp
= (unsigned long)v
;
182 p
->thread
.pc
= (unsigned long)ret_from_fork
;
187 unsigned long get_wchan(struct task_struct
*p
)
189 unsigned long fp
, pc
;
190 unsigned long stack_page
;
192 if (!p
|| p
== current
|| p
->state
== TASK_RUNNING
)
195 stack_page
= (unsigned long)p
;
198 if (fp
< stack_page
+ sizeof(struct thread_info
) ||
199 fp
>= 8184 + stack_page
)
201 pc
= ((unsigned long *)fp
)[1];
202 if (!in_sched_functions(pc
))
204 fp
= *(unsigned long *)fp
;
206 while (count
++ < 16);
210 void finish_atomic_sections (struct pt_regs
*regs
)
212 int __user
*up0
= (int __user
*)regs
->p0
;
216 /* not in middle of an atomic step, so resume like normal */
219 case ATOMIC_XCHG32
+ 2:
220 put_user(regs
->r1
, up0
);
223 case ATOMIC_CAS32
+ 2:
224 case ATOMIC_CAS32
+ 4:
225 if (regs
->r0
== regs
->r1
)
226 case ATOMIC_CAS32
+ 6:
227 put_user(regs
->r2
, up0
);
230 case ATOMIC_ADD32
+ 2:
231 regs
->r0
= regs
->r1
+ regs
->r0
;
233 case ATOMIC_ADD32
+ 4:
234 put_user(regs
->r0
, up0
);
237 case ATOMIC_SUB32
+ 2:
238 regs
->r0
= regs
->r1
- regs
->r0
;
240 case ATOMIC_SUB32
+ 4:
241 put_user(regs
->r0
, up0
);
244 case ATOMIC_IOR32
+ 2:
245 regs
->r0
= regs
->r1
| regs
->r0
;
247 case ATOMIC_IOR32
+ 4:
248 put_user(regs
->r0
, up0
);
251 case ATOMIC_AND32
+ 2:
252 regs
->r0
= regs
->r1
& regs
->r0
;
254 case ATOMIC_AND32
+ 4:
255 put_user(regs
->r0
, up0
);
258 case ATOMIC_XOR32
+ 2:
259 regs
->r0
= regs
->r1
^ regs
->r0
;
261 case ATOMIC_XOR32
+ 4:
262 put_user(regs
->r0
, up0
);
267 * We've finished the atomic section, and the only thing left for
268 * userspace is to do a RTS, so we might as well handle that too
269 * since we need to update the PC anyways.
271 regs
->pc
= regs
->rets
;
275 int in_mem(unsigned long addr
, unsigned long size
,
276 unsigned long start
, unsigned long end
)
278 return addr
>= start
&& addr
+ size
<= end
;
281 int in_mem_const_off(unsigned long addr
, unsigned long size
, unsigned long off
,
282 unsigned long const_addr
, unsigned long const_size
)
285 in_mem(addr
, size
, const_addr
+ off
, const_addr
+ const_size
);
288 int in_mem_const(unsigned long addr
, unsigned long size
,
289 unsigned long const_addr
, unsigned long const_size
)
291 return in_mem_const_off(addr
, size
, 0, const_addr
, const_size
);
294 #define ASYNC_ENABLED(bnum, bctlnum) 1
296 #define ASYNC_ENABLED(bnum, bctlnum) \
298 (bfin_read_EBIU_AMGCTL() & 0xe) < ((bnum + 1) << 1) ? 0 : \
299 bfin_read_EBIU_AMBCTL##bctlnum() & B##bnum##RDYEN ? 0 : \
304 * We can't read EBIU banks that aren't enabled or we end up hanging
305 * on the access to the async space. Make sure we validate accesses
306 * that cross async banks too.
307 * 0 - found, but unusable
312 int in_async(unsigned long addr
, unsigned long size
)
314 if (addr
>= ASYNC_BANK0_BASE
&& addr
< ASYNC_BANK0_BASE
+ ASYNC_BANK0_SIZE
) {
315 if (!ASYNC_ENABLED(0, 0))
317 if (addr
+ size
<= ASYNC_BANK0_BASE
+ ASYNC_BANK0_SIZE
)
319 size
-= ASYNC_BANK0_BASE
+ ASYNC_BANK0_SIZE
- addr
;
320 addr
= ASYNC_BANK0_BASE
+ ASYNC_BANK0_SIZE
;
322 if (addr
>= ASYNC_BANK1_BASE
&& addr
< ASYNC_BANK1_BASE
+ ASYNC_BANK1_SIZE
) {
323 if (!ASYNC_ENABLED(1, 0))
325 if (addr
+ size
<= ASYNC_BANK1_BASE
+ ASYNC_BANK1_SIZE
)
327 size
-= ASYNC_BANK1_BASE
+ ASYNC_BANK1_SIZE
- addr
;
328 addr
= ASYNC_BANK1_BASE
+ ASYNC_BANK1_SIZE
;
330 if (addr
>= ASYNC_BANK2_BASE
&& addr
< ASYNC_BANK2_BASE
+ ASYNC_BANK2_SIZE
) {
331 if (!ASYNC_ENABLED(2, 1))
333 if (addr
+ size
<= ASYNC_BANK2_BASE
+ ASYNC_BANK2_SIZE
)
335 size
-= ASYNC_BANK2_BASE
+ ASYNC_BANK2_SIZE
- addr
;
336 addr
= ASYNC_BANK2_BASE
+ ASYNC_BANK2_SIZE
;
338 if (addr
>= ASYNC_BANK3_BASE
&& addr
< ASYNC_BANK3_BASE
+ ASYNC_BANK3_SIZE
) {
339 if (ASYNC_ENABLED(3, 1))
341 if (addr
+ size
<= ASYNC_BANK3_BASE
+ ASYNC_BANK3_SIZE
)
346 /* not within async bounds */
350 int bfin_mem_access_type(unsigned long addr
, unsigned long size
)
352 int cpu
= raw_smp_processor_id();
354 /* Check that things do not wrap around */
355 if (addr
> ULONG_MAX
- size
)
358 if (in_mem(addr
, size
, FIXED_CODE_START
, physical_mem_end
))
359 return BFIN_MEM_ACCESS_CORE
;
361 if (in_mem_const(addr
, size
, L1_CODE_START
, L1_CODE_LENGTH
))
362 return cpu
== 0 ? BFIN_MEM_ACCESS_ITEST
: BFIN_MEM_ACCESS_IDMA
;
363 if (in_mem_const(addr
, size
, L1_SCRATCH_START
, L1_SCRATCH_LENGTH
))
364 return cpu
== 0 ? BFIN_MEM_ACCESS_CORE_ONLY
: -EFAULT
;
365 if (in_mem_const(addr
, size
, L1_DATA_A_START
, L1_DATA_A_LENGTH
))
366 return cpu
== 0 ? BFIN_MEM_ACCESS_CORE
: BFIN_MEM_ACCESS_IDMA
;
367 if (in_mem_const(addr
, size
, L1_DATA_B_START
, L1_DATA_B_LENGTH
))
368 return cpu
== 0 ? BFIN_MEM_ACCESS_CORE
: BFIN_MEM_ACCESS_IDMA
;
369 #ifdef COREB_L1_CODE_START
370 if (in_mem_const(addr
, size
, COREB_L1_CODE_START
, COREB_L1_CODE_LENGTH
))
371 return cpu
== 1 ? BFIN_MEM_ACCESS_ITEST
: BFIN_MEM_ACCESS_IDMA
;
372 if (in_mem_const(addr
, size
, COREB_L1_SCRATCH_START
, L1_SCRATCH_LENGTH
))
373 return cpu
== 1 ? BFIN_MEM_ACCESS_CORE_ONLY
: -EFAULT
;
374 if (in_mem_const(addr
, size
, COREB_L1_DATA_A_START
, COREB_L1_DATA_A_LENGTH
))
375 return cpu
== 1 ? BFIN_MEM_ACCESS_CORE
: BFIN_MEM_ACCESS_IDMA
;
376 if (in_mem_const(addr
, size
, COREB_L1_DATA_B_START
, COREB_L1_DATA_B_LENGTH
))
377 return cpu
== 1 ? BFIN_MEM_ACCESS_CORE
: BFIN_MEM_ACCESS_IDMA
;
379 if (in_mem_const(addr
, size
, L2_START
, L2_LENGTH
))
380 return BFIN_MEM_ACCESS_CORE
;
382 if (addr
>= SYSMMR_BASE
)
383 return BFIN_MEM_ACCESS_CORE_ONLY
;
385 switch (in_async(addr
, size
)) {
386 case 0: return -EFAULT
;
387 case 1: return BFIN_MEM_ACCESS_CORE
;
388 case 2: /* fall through */;
391 if (in_mem_const(addr
, size
, BOOT_ROM_START
, BOOT_ROM_LENGTH
))
392 return BFIN_MEM_ACCESS_CORE
;
393 if (in_mem_const(addr
, size
, L1_ROM_START
, L1_ROM_LENGTH
))
394 return BFIN_MEM_ACCESS_DMA
;
399 #if defined(CONFIG_ACCESS_CHECK)
400 #ifdef CONFIG_ACCESS_OK_L1
401 __attribute__((l1_text
))
403 /* Return 1 if access to memory range is OK, 0 otherwise */
404 int _access_ok(unsigned long addr
, unsigned long size
)
410 /* Check that things do not wrap around */
411 if (addr
> ULONG_MAX
- size
)
413 if (segment_eq(get_fs(), KERNEL_DS
))
415 #ifdef CONFIG_MTD_UCLINUX
421 if (in_mem(addr
, size
, memory_start
, memory_end
))
423 if (in_mem(addr
, size
, memory_mtd_end
, physical_mem_end
))
425 # ifndef CONFIG_ROMFS_ON_MTD
428 /* For XIP, allow user space to use pointers within the ROMFS. */
429 if (in_mem(addr
, size
, memory_mtd_start
, memory_mtd_end
))
432 if (in_mem(addr
, size
, memory_start
, physical_mem_end
))
436 if (in_mem(addr
, size
, (unsigned long)__init_begin
, (unsigned long)__init_end
))
439 if (in_mem_const(addr
, size
, L1_CODE_START
, L1_CODE_LENGTH
))
441 if (in_mem_const_off(addr
, size
, _etext_l1
- _stext_l1
, L1_CODE_START
, L1_CODE_LENGTH
))
443 if (in_mem_const_off(addr
, size
, _ebss_l1
- _sdata_l1
, L1_DATA_A_START
, L1_DATA_A_LENGTH
))
445 if (in_mem_const_off(addr
, size
, _ebss_b_l1
- _sdata_b_l1
, L1_DATA_B_START
, L1_DATA_B_LENGTH
))
447 #ifdef COREB_L1_CODE_START
448 if (in_mem_const(addr
, size
, COREB_L1_CODE_START
, COREB_L1_CODE_LENGTH
))
450 if (in_mem_const(addr
, size
, COREB_L1_SCRATCH_START
, L1_SCRATCH_LENGTH
))
452 if (in_mem_const(addr
, size
, COREB_L1_DATA_A_START
, COREB_L1_DATA_A_LENGTH
))
454 if (in_mem_const(addr
, size
, COREB_L1_DATA_B_START
, COREB_L1_DATA_B_LENGTH
))
458 #ifndef CONFIG_EXCEPTION_L1_SCRATCH
459 if (in_mem_const(addr
, size
, (unsigned long)l1_stack_base
, l1_stack_len
))
463 aret
= in_async(addr
, size
);
467 if (in_mem_const_off(addr
, size
, _ebss_l2
- _stext_l2
, L2_START
, L2_LENGTH
))
470 if (in_mem_const(addr
, size
, BOOT_ROM_START
, BOOT_ROM_LENGTH
))
472 if (in_mem_const(addr
, size
, L1_ROM_START
, L1_ROM_LENGTH
))
477 EXPORT_SYMBOL(_access_ok
);
478 #endif /* CONFIG_ACCESS_CHECK */