]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - arch/s390/include/asm/system.h
[S390] zcore: CPU registers are not saved under LPAR
[mirror_ubuntu-artful-kernel.git] / arch / s390 / include / asm / system.h
CommitLineData
1da177e4 1/*
155af2f9 2 * Copyright IBM Corp. 1999, 2009
1da177e4 3 *
155af2f9 4 * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
1da177e4
LT
5 */
6
7#ifndef __ASM_SYSTEM_H
8#define __ASM_SYSTEM_H
9
1da177e4 10#include <linux/kernel.h>
320c04c0 11#include <linux/errno.h>
1da177e4
LT
12#include <asm/types.h>
13#include <asm/ptrace.h>
14#include <asm/setup.h>
77fa2245 15#include <asm/processor.h>
484875b1 16#include <asm/lowcore.h>
1da177e4
LT
17
18#ifdef __KERNEL__
19
20struct task_struct;
21
22extern struct task_struct *__switch_to(void *, void *);
23
1da177e4
LT
24static inline void save_fp_regs(s390_fp_regs *fpregs)
25{
94c12cc7 26 asm volatile(
987bcdac
MS
27 " std 0,%O0+8(%R0)\n"
28 " std 2,%O0+24(%R0)\n"
29 " std 4,%O0+40(%R0)\n"
30 " std 6,%O0+56(%R0)"
31 : "=Q" (*fpregs) : "Q" (*fpregs));
1da177e4
LT
32 if (!MACHINE_HAS_IEEE)
33 return;
34 asm volatile(
987bcdac
MS
35 " stfpc %0\n"
36 " std 1,%O0+16(%R0)\n"
37 " std 3,%O0+32(%R0)\n"
38 " std 5,%O0+48(%R0)\n"
39 " std 7,%O0+64(%R0)\n"
40 " std 8,%O0+72(%R0)\n"
41 " std 9,%O0+80(%R0)\n"
42 " std 10,%O0+88(%R0)\n"
43 " std 11,%O0+96(%R0)\n"
44 " std 12,%O0+104(%R0)\n"
45 " std 13,%O0+112(%R0)\n"
46 " std 14,%O0+120(%R0)\n"
47 " std 15,%O0+128(%R0)\n"
48 : "=Q" (*fpregs) : "Q" (*fpregs));
1da177e4
LT
49}
50
51static inline void restore_fp_regs(s390_fp_regs *fpregs)
52{
94c12cc7 53 asm volatile(
987bcdac
MS
54 " ld 0,%O0+8(%R0)\n"
55 " ld 2,%O0+24(%R0)\n"
56 " ld 4,%O0+40(%R0)\n"
57 " ld 6,%O0+56(%R0)"
58 : : "Q" (*fpregs));
1da177e4
LT
59 if (!MACHINE_HAS_IEEE)
60 return;
61 asm volatile(
987bcdac
MS
62 " lfpc %0\n"
63 " ld 1,%O0+16(%R0)\n"
64 " ld 3,%O0+32(%R0)\n"
65 " ld 5,%O0+48(%R0)\n"
66 " ld 7,%O0+64(%R0)\n"
67 " ld 8,%O0+72(%R0)\n"
68 " ld 9,%O0+80(%R0)\n"
69 " ld 10,%O0+88(%R0)\n"
70 " ld 11,%O0+96(%R0)\n"
71 " ld 12,%O0+104(%R0)\n"
72 " ld 13,%O0+112(%R0)\n"
73 " ld 14,%O0+120(%R0)\n"
74 " ld 15,%O0+128(%R0)\n"
75 : : "Q" (*fpregs));
1da177e4
LT
76}
77
78static inline void save_access_regs(unsigned int *acrs)
79{
987bcdac 80 asm volatile("stam 0,15,%0" : "=Q" (*acrs));
1da177e4
LT
81}
82
83static inline void restore_access_regs(unsigned int *acrs)
84{
987bcdac 85 asm volatile("lam 0,15,%0" : : "Q" (*acrs));
1da177e4
LT
86}
87
88#define switch_to(prev,next,last) do { \
89 if (prev == next) \
90 break; \
91 save_fp_regs(&prev->thread.fp_regs); \
92 restore_fp_regs(&next->thread.fp_regs); \
93 save_access_regs(&prev->thread.acrs[0]); \
94 restore_access_regs(&next->thread.acrs[0]); \
95 prev = __switch_to(prev,next); \
96} while (0)
97
aa5e97ce 98extern void account_vtime(struct task_struct *, struct task_struct *);
1f1c12af 99extern void account_tick_vtime(struct task_struct *);
1da177e4 100extern void account_system_vtime(struct task_struct *);
1da177e4 101
29b08d2b
HC
102#ifdef CONFIG_PFAULT
103extern void pfault_irq_init(void);
104extern int pfault_init(void);
105extern void pfault_fini(void);
106#else /* CONFIG_PFAULT */
107#define pfault_irq_init() do { } while (0)
108#define pfault_init() ({-1;})
109#define pfault_fini() do { } while (0)
110#endif /* CONFIG_PFAULT */
111
45e576b1 112extern void cmma_init(void);
92fe3132 113extern int memcpy_real(void *, void *, size_t);
45e576b1 114
5ee24d95 115#define finish_arch_switch(prev) do { \
1da177e4 116 set_fs(current->thread.mm_segment); \
aa5e97ce 117 account_vtime(prev, current); \
1da177e4
LT
118} while (0)
119
94c12cc7 120#define nop() asm volatile("nop")
1da177e4 121
5a651c93
HC
122#define xchg(ptr,x) \
123({ \
124 __typeof__(*(ptr)) __ret; \
125 __ret = (__typeof__(*(ptr))) \
126 __xchg((unsigned long)(x), (void *)(ptr),sizeof(*(ptr))); \
127 __ret; \
128})
1da177e4 129
210d3a90
HC
130extern void __xchg_called_with_bad_pointer(void);
131
1da177e4
LT
132static inline unsigned long __xchg(unsigned long x, void * ptr, int size)
133{
134 unsigned long addr, old;
135 int shift;
136
137 switch (size) {
138 case 1:
139 addr = (unsigned long) ptr;
140 shift = (3 ^ (addr & 3)) << 3;
141 addr ^= addr & 3;
142 asm volatile(
987bcdac 143 " l %0,%4\n"
94c12cc7
MS
144 "0: lr 0,%0\n"
145 " nr 0,%3\n"
146 " or 0,%2\n"
987bcdac 147 " cs %0,0,%4\n"
94c12cc7 148 " jl 0b\n"
987bcdac
MS
149 : "=&d" (old), "=Q" (*(int *) addr)
150 : "d" (x << shift), "d" (~(255 << shift)),
151 "Q" (*(int *) addr) : "memory", "cc", "0");
210d3a90 152 return old >> shift;
1da177e4
LT
153 case 2:
154 addr = (unsigned long) ptr;
155 shift = (2 ^ (addr & 2)) << 3;
156 addr ^= addr & 2;
157 asm volatile(
987bcdac 158 " l %0,%4\n"
94c12cc7
MS
159 "0: lr 0,%0\n"
160 " nr 0,%3\n"
161 " or 0,%2\n"
987bcdac 162 " cs %0,0,%4\n"
94c12cc7 163 " jl 0b\n"
987bcdac
MS
164 : "=&d" (old), "=Q" (*(int *) addr)
165 : "d" (x << shift), "d" (~(65535 << shift)),
166 "Q" (*(int *) addr) : "memory", "cc", "0");
210d3a90 167 return old >> shift;
1da177e4 168 case 4:
94c12cc7 169 asm volatile(
987bcdac
MS
170 " l %0,%3\n"
171 "0: cs %0,%2,%3\n"
94c12cc7 172 " jl 0b\n"
987bcdac
MS
173 : "=&d" (old), "=Q" (*(int *) ptr)
174 : "d" (x), "Q" (*(int *) ptr)
94c12cc7 175 : "memory", "cc");
210d3a90 176 return old;
1da177e4
LT
177#ifdef __s390x__
178 case 8:
94c12cc7 179 asm volatile(
987bcdac
MS
180 " lg %0,%3\n"
181 "0: csg %0,%2,%3\n"
94c12cc7 182 " jl 0b\n"
1da177e4 183 : "=&d" (old), "=m" (*(long *) ptr)
987bcdac 184 : "d" (x), "Q" (*(long *) ptr)
94c12cc7 185 : "memory", "cc");
210d3a90 186 return old;
1da177e4 187#endif /* __s390x__ */
210d3a90
HC
188 }
189 __xchg_called_with_bad_pointer();
190 return x;
1da177e4
LT
191}
192
193/*
194 * Atomic compare and exchange. Compare OLD with MEM, if identical,
195 * store NEW in MEM. Return the initial value in MEM. Success is
196 * indicated by comparing RETURN with OLD.
197 */
198
199#define __HAVE_ARCH_CMPXCHG 1
200
fe413013
MD
201#define cmpxchg(ptr, o, n) \
202 ((__typeof__(*(ptr)))__cmpxchg((ptr), (unsigned long)(o), \
203 (unsigned long)(n), sizeof(*(ptr))))
1da177e4 204
210d3a90
HC
205extern void __cmpxchg_called_with_bad_pointer(void);
206
1da177e4
LT
207static inline unsigned long
208__cmpxchg(volatile void *ptr, unsigned long old, unsigned long new, int size)
209{
210 unsigned long addr, prev, tmp;
211 int shift;
212
213 switch (size) {
214 case 1:
215 addr = (unsigned long) ptr;
216 shift = (3 ^ (addr & 3)) << 3;
217 addr ^= addr & 3;
218 asm volatile(
987bcdac 219 " l %0,%2\n"
94c12cc7
MS
220 "0: nr %0,%5\n"
221 " lr %1,%0\n"
222 " or %0,%2\n"
223 " or %1,%3\n"
987bcdac 224 " cs %0,%1,%2\n"
94c12cc7
MS
225 " jnl 1f\n"
226 " xr %1,%0\n"
227 " nr %1,%5\n"
228 " jnz 0b\n"
1da177e4 229 "1:"
987bcdac
MS
230 : "=&d" (prev), "=&d" (tmp), "=Q" (*(int *) ptr)
231 : "d" (old << shift), "d" (new << shift),
232 "d" (~(255 << shift)), "Q" (*(int *) ptr)
94c12cc7 233 : "memory", "cc");
1da177e4
LT
234 return prev >> shift;
235 case 2:
236 addr = (unsigned long) ptr;
237 shift = (2 ^ (addr & 2)) << 3;
238 addr ^= addr & 2;
239 asm volatile(
987bcdac 240 " l %0,%2\n"
94c12cc7
MS
241 "0: nr %0,%5\n"
242 " lr %1,%0\n"
243 " or %0,%2\n"
244 " or %1,%3\n"
987bcdac 245 " cs %0,%1,%2\n"
94c12cc7
MS
246 " jnl 1f\n"
247 " xr %1,%0\n"
248 " nr %1,%5\n"
249 " jnz 0b\n"
1da177e4 250 "1:"
987bcdac
MS
251 : "=&d" (prev), "=&d" (tmp), "=Q" (*(int *) ptr)
252 : "d" (old << shift), "d" (new << shift),
253 "d" (~(65535 << shift)), "Q" (*(int *) ptr)
94c12cc7 254 : "memory", "cc");
1da177e4
LT
255 return prev >> shift;
256 case 4:
94c12cc7 257 asm volatile(
987bcdac
MS
258 " cs %0,%3,%1\n"
259 : "=&d" (prev), "=Q" (*(int *) ptr)
260 : "0" (old), "d" (new), "Q" (*(int *) ptr)
94c12cc7 261 : "memory", "cc");
1da177e4
LT
262 return prev;
263#ifdef __s390x__
264 case 8:
94c12cc7 265 asm volatile(
987bcdac
MS
266 " csg %0,%3,%1\n"
267 : "=&d" (prev), "=Q" (*(long *) ptr)
268 : "0" (old), "d" (new), "Q" (*(long *) ptr)
94c12cc7 269 : "memory", "cc");
1da177e4
LT
270 return prev;
271#endif /* __s390x__ */
272 }
210d3a90
HC
273 __cmpxchg_called_with_bad_pointer();
274 return old;
1da177e4
LT
275}
276
277/*
278 * Force strict CPU ordering.
279 * And yes, this is required on UP too when we're talking
280 * to devices.
281 *
282 * This is very similar to the ppc eieio/sync instruction in that is
283 * does a checkpoint syncronisation & makes sure that
284 * all memory ops have completed wrt other CPU's ( see 7-15 POP DJB ).
285 */
286
94c12cc7
MS
287#define eieio() asm volatile("bcr 15,0" : : : "memory")
288#define SYNC_OTHER_CORES(x) eieio()
1da177e4
LT
289#define mb() eieio()
290#define rmb() eieio()
291#define wmb() eieio()
292#define read_barrier_depends() do { } while(0)
293#define smp_mb() mb()
294#define smp_rmb() rmb()
295#define smp_wmb() wmb()
296#define smp_read_barrier_depends() read_barrier_depends()
297#define smp_mb__before_clear_bit() smp_mb()
298#define smp_mb__after_clear_bit() smp_mb()
299
300
301#define set_mb(var, value) do { var = value; mb(); } while (0)
1da177e4 302
1da177e4
LT
303#ifdef __s390x__
304
94c12cc7
MS
305#define __ctl_load(array, low, high) ({ \
306 typedef struct { char _[sizeof(array)]; } addrtype; \
307 asm volatile( \
987bcdac
MS
308 " lctlg %1,%2,%0\n" \
309 : : "Q" (*(addrtype *)(&array)), \
310 "i" (low), "i" (high)); \
1da177e4
LT
311 })
312
94c12cc7
MS
313#define __ctl_store(array, low, high) ({ \
314 typedef struct { char _[sizeof(array)]; } addrtype; \
315 asm volatile( \
987bcdac
MS
316 " stctg %1,%2,%0\n" \
317 : "=Q" (*(addrtype *)(&array)) \
318 : "i" (low), "i" (high)); \
1da177e4
LT
319 })
320
1da177e4
LT
321#else /* __s390x__ */
322
94c12cc7
MS
323#define __ctl_load(array, low, high) ({ \
324 typedef struct { char _[sizeof(array)]; } addrtype; \
325 asm volatile( \
987bcdac
MS
326 " lctl %1,%2,%0\n" \
327 : : "Q" (*(addrtype *)(&array)), \
328 "i" (low), "i" (high)); \
94c12cc7 329})
1da177e4 330
94c12cc7
MS
331#define __ctl_store(array, low, high) ({ \
332 typedef struct { char _[sizeof(array)]; } addrtype; \
333 asm volatile( \
987bcdac
MS
334 " stctl %1,%2,%0\n" \
335 : "=Q" (*(addrtype *)(&array)) \
336 : "i" (low), "i" (high)); \
1da177e4
LT
337 })
338
1da177e4
LT
339#endif /* __s390x__ */
340
94c12cc7
MS
341#define __ctl_set_bit(cr, bit) ({ \
342 unsigned long __dummy; \
343 __ctl_store(__dummy, cr, cr); \
344 __dummy |= 1UL << (bit); \
345 __ctl_load(__dummy, cr, cr); \
346})
347
348#define __ctl_clear_bit(cr, bit) ({ \
349 unsigned long __dummy; \
350 __ctl_store(__dummy, cr, cr); \
351 __dummy &= ~(1UL << (bit)); \
352 __ctl_load(__dummy, cr, cr); \
353})
354
1f194a4c 355#include <linux/irqflags.h>
1da177e4 356
fe413013
MD
357#include <asm-generic/cmpxchg-local.h>
358
359static inline unsigned long __cmpxchg_local(volatile void *ptr,
360 unsigned long old,
361 unsigned long new, int size)
362{
363 switch (size) {
364 case 1:
365 case 2:
366 case 4:
367#ifdef __s390x__
368 case 8:
369#endif
370 return __cmpxchg(ptr, old, new, size);
371 default:
372 return __cmpxchg_local_generic(ptr, old, new, size);
373 }
374
375 return old;
376}
377
378/*
379 * cmpxchg_local and cmpxchg64_local are atomic wrt current CPU. Always make
380 * them available.
381 */
382#define cmpxchg_local(ptr, o, n) \
383 ((__typeof__(*(ptr)))__cmpxchg_local((ptr), (unsigned long)(o), \
384 (unsigned long)(n), sizeof(*(ptr))))
385#ifdef __s390x__
386#define cmpxchg64_local(ptr, o, n) \
387 ({ \
388 BUILD_BUG_ON(sizeof(*(ptr)) != 8); \
389 cmpxchg_local((ptr), (o), (n)); \
390 })
391#else
392#define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))
393#endif
394
77fa2245
HC
395/*
396 * Use to set psw mask except for the first byte which
397 * won't be changed by this function.
398 */
399static inline void
400__set_psw_mask(unsigned long mask)
401{
94c12cc7 402 __load_psw_mask(mask | (__raw_local_irq_stosm(0x00) & ~(-1UL >> 8)));
77fa2245
HC
403}
404
c1821c2e
GS
405#define local_mcck_enable() __set_psw_mask(psw_kernel_bits)
406#define local_mcck_disable() __set_psw_mask(psw_kernel_bits & ~PSW_MASK_MCHECK)
77fa2245 407
1da177e4
LT
408#ifdef CONFIG_SMP
409
410extern void smp_ctl_set_bit(int cr, int bit);
411extern void smp_ctl_clear_bit(int cr, int bit);
412#define ctl_set_bit(cr, bit) smp_ctl_set_bit(cr, bit)
413#define ctl_clear_bit(cr, bit) smp_ctl_clear_bit(cr, bit)
414
415#else
416
417#define ctl_set_bit(cr, bit) __ctl_set_bit(cr, bit)
418#define ctl_clear_bit(cr, bit) __ctl_clear_bit(cr, bit)
419
420#endif /* CONFIG_SMP */
421
484875b1
HC
422static inline unsigned int stfl(void)
423{
424 asm volatile(
425 " .insn s,0xb2b10000,0(0)\n" /* stfl */
426 "0:\n"
427 EX_TABLE(0b,0b));
428 return S390_lowcore.stfl_fac_list;
429}
430
320c04c0
HC
431static inline int __stfle(unsigned long long *list, int doublewords)
432{
433 typedef struct { unsigned long long _[doublewords]; } addrtype;
434 register unsigned long __nr asm("0") = doublewords - 1;
435
436 asm volatile(".insn s,0xb2b00000,%0" /* stfle */
437 : "=m" (*(addrtype *) list), "+d" (__nr) : : "cc");
438 return __nr + 1;
439}
440
441static inline int stfle(unsigned long long *list, int doublewords)
442{
443 if (!(stfl() & (1UL << 24)))
444 return -EOPNOTSUPP;
445 return __stfle(list, doublewords);
446}
447
2e5061e4
HC
448static inline unsigned short stap(void)
449{
450 unsigned short cpu_address;
451
452 asm volatile("stap %0" : "=m" (cpu_address));
453 return cpu_address;
454}
455
1da177e4
LT
456extern void (*_machine_restart)(char *command);
457extern void (*_machine_halt)(void);
458extern void (*_machine_power_off)(void);
459
460#define arch_align_stack(x) (x)
461
411788ea
HC
462#ifdef CONFIG_TRACE_IRQFLAGS
463extern psw_t sysc_restore_trace_psw;
464extern psw_t io_restore_trace_psw;
465#endif
466
155af2f9
HJP
467static inline int tprot(unsigned long addr)
468{
469 int rc = -EFAULT;
470
471 asm volatile(
472 " tprot 0(%1),0\n"
473 "0: ipm %0\n"
474 " srl %0,28\n"
475 "1:\n"
476 EX_TABLE(0b,1b)
477 : "+d" (rc) : "a" (addr) : "cc");
478 return rc;
479}
480
1da177e4
LT
481#endif /* __KERNEL__ */
482
483#endif