]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* system.h: FR-V CPU control definitions |
2 | * | |
3 | * Copyright (C) 2003 Red Hat, Inc. All Rights Reserved. | |
4 | * Written by David Howells (dhowells@redhat.com) | |
5 | * | |
6 | * This program is free software; you can redistribute it and/or | |
7 | * modify it under the terms of the GNU General Public License | |
8 | * as published by the Free Software Foundation; either version | |
9 | * 2 of the License, or (at your option) any later version. | |
10 | */ | |
11 | ||
12 | #ifndef _ASM_SYSTEM_H | |
13 | #define _ASM_SYSTEM_H | |
14 | ||
2c750edd | 15 | #include <linux/types.h> |
1da177e4 | 16 | #include <linux/linkage.h> |
6784fd59 | 17 | #include <linux/kernel.h> |
1da177e4 LT |
18 | |
19 | struct thread_struct; | |
20 | ||
1da177e4 LT |
21 | /* |
22 | * switch_to(prev, next) should switch from task `prev' to `next' | |
23 | * `prev' will never be the same as `next'. | |
24 | * The `mb' is to tell GCC not to cache `current' across this call. | |
25 | */ | |
26 | extern asmlinkage | |
27 | struct task_struct *__switch_to(struct thread_struct *prev_thread, | |
28 | struct thread_struct *next_thread, | |
29 | struct task_struct *prev); | |
30 | ||
31 | #define switch_to(prev, next, last) \ | |
32 | do { \ | |
33 | (prev)->thread.sched_lr = \ | |
34 | (unsigned long) __builtin_return_address(0); \ | |
35 | (last) = __switch_to(&(prev)->thread, &(next)->thread, (prev)); \ | |
36 | mb(); \ | |
37 | } while(0) | |
38 | ||
1da177e4 LT |
39 | /* |
40 | * Force strict CPU ordering. | |
41 | */ | |
42 | #define nop() asm volatile ("nop"::) | |
43 | #define mb() asm volatile ("membar" : : :"memory") | |
44 | #define rmb() asm volatile ("membar" : : :"memory") | |
45 | #define wmb() asm volatile ("membar" : : :"memory") | |
73f10281 | 46 | #define read_barrier_depends() do { } while (0) |
1da177e4 | 47 | |
f17520e1 DH |
48 | #define smp_mb() barrier() |
49 | #define smp_rmb() barrier() | |
50 | #define smp_wmb() barrier() | |
51 | #define smp_read_barrier_depends() do {} while(0) | |
52 | #define set_mb(var, value) \ | |
53 | do { var = (value); barrier(); } while (0) | |
1da177e4 | 54 | |
1da177e4 LT |
55 | extern void die_if_kernel(const char *, ...) __attribute__((format(printf, 1, 2))); |
56 | extern void free_initmem(void); | |
57 | ||
58 | #define arch_align_stack(x) (x) | |
59 | ||
2856f5e3 MD |
60 | /*****************************************************************************/ |
61 | /* | |
62 | * compare and conditionally exchange value with memory | |
63 | * - if (*ptr == test) then orig = *ptr; *ptr = test; | |
64 | * - if (*ptr != test) then orig = *ptr; | |
65 | */ | |
00460f41 DH |
66 | extern uint64_t __cmpxchg_64(uint64_t test, uint64_t new, volatile uint64_t *v); |
67 | ||
2856f5e3 MD |
68 | #ifndef CONFIG_FRV_OUTOFLINE_ATOMIC_OPS |
69 | ||
70 | #define cmpxchg(ptr, test, new) \ | |
71 | ({ \ | |
72 | __typeof__(ptr) __xg_ptr = (ptr); \ | |
73 | __typeof__(*(ptr)) __xg_orig, __xg_tmp; \ | |
74 | __typeof__(*(ptr)) __xg_test = (test); \ | |
75 | __typeof__(*(ptr)) __xg_new = (new); \ | |
76 | \ | |
77 | switch (sizeof(__xg_orig)) { \ | |
78 | case 4: \ | |
79 | asm volatile( \ | |
80 | "0: \n" \ | |
81 | " orcc gr0,gr0,gr0,icc3 \n" \ | |
82 | " ckeq icc3,cc7 \n" \ | |
83 | " ld.p %M0,%1 \n" \ | |
84 | " orcr cc7,cc7,cc3 \n" \ | |
85 | " sub%I4cc %1,%4,%2,icc0 \n" \ | |
86 | " bne icc0,#0,1f \n" \ | |
87 | " cst.p %3,%M0 ,cc3,#1 \n" \ | |
88 | " corcc gr29,gr29,gr0 ,cc3,#1 \n" \ | |
89 | " beq icc3,#0,0b \n" \ | |
90 | "1: \n" \ | |
91 | : "+U"(*__xg_ptr), "=&r"(__xg_orig), "=&r"(__xg_tmp) \ | |
92 | : "r"(__xg_new), "NPr"(__xg_test) \ | |
93 | : "memory", "cc7", "cc3", "icc3", "icc0" \ | |
94 | ); \ | |
95 | break; \ | |
96 | \ | |
97 | default: \ | |
7c43f2b8 | 98 | __xg_orig = (__typeof__(__xg_orig))0; \ |
2856f5e3 MD |
99 | asm volatile("break"); \ |
100 | break; \ | |
101 | } \ | |
102 | \ | |
103 | __xg_orig; \ | |
104 | }) | |
105 | ||
106 | #else | |
107 | ||
108 | extern uint32_t __cmpxchg_32(uint32_t *v, uint32_t test, uint32_t new); | |
109 | ||
110 | #define cmpxchg(ptr, test, new) \ | |
111 | ({ \ | |
112 | __typeof__(ptr) __xg_ptr = (ptr); \ | |
113 | __typeof__(*(ptr)) __xg_orig; \ | |
114 | __typeof__(*(ptr)) __xg_test = (test); \ | |
115 | __typeof__(*(ptr)) __xg_new = (new); \ | |
116 | \ | |
117 | switch (sizeof(__xg_orig)) { \ | |
0cc0844b AV |
118 | case 4: __xg_orig = (__force __typeof__(*ptr)) \ |
119 | __cmpxchg_32((__force uint32_t *)__xg_ptr, \ | |
120 | (__force uint32_t)__xg_test, \ | |
121 | (__force uint32_t)__xg_new); break; \ | |
2856f5e3 | 122 | default: \ |
7c43f2b8 | 123 | __xg_orig = (__typeof__(__xg_orig))0; \ |
2856f5e3 MD |
124 | asm volatile("break"); \ |
125 | break; \ | |
126 | } \ | |
127 | \ | |
128 | __xg_orig; \ | |
129 | }) | |
130 | ||
131 | #endif | |
132 | ||
14e0cb3c MD |
133 | #include <asm-generic/cmpxchg-local.h> |
134 | ||
135 | static inline unsigned long __cmpxchg_local(volatile void *ptr, | |
136 | unsigned long old, | |
137 | unsigned long new, int size) | |
138 | { | |
139 | switch (size) { | |
140 | case 4: | |
6784fd59 | 141 | return cmpxchg((unsigned long *)ptr, old, new); |
14e0cb3c MD |
142 | default: |
143 | return __cmpxchg_local_generic(ptr, old, new, size); | |
144 | } | |
145 | ||
146 | return old; | |
147 | } | |
148 | ||
149 | /* | |
150 | * cmpxchg_local and cmpxchg64_local are atomic wrt current CPU. Always make | |
151 | * them available. | |
152 | */ | |
153 | #define cmpxchg_local(ptr, o, n) \ | |
154 | ((__typeof__(*(ptr)))__cmpxchg_local((ptr), (unsigned long)(o), \ | |
155 | (unsigned long)(n), sizeof(*(ptr)))) | |
156 | #define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n)) | |
2856f5e3 | 157 | |
1da177e4 | 158 | #endif /* _ASM_SYSTEM_H */ |