]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - arch/parisc/include/asm/ldcw.h
License cleanup: add SPDX GPL-2.0 license identifier to files with no license
[mirror_ubuntu-bionic-kernel.git] / arch / parisc / include / asm / ldcw.h
CommitLineData
b2441318 1/* SPDX-License-Identifier: GPL-2.0 */
527dcdcc
DH
2#ifndef __PARISC_LDCW_H
3#define __PARISC_LDCW_H
4
5#ifndef CONFIG_PA20
6/* Because kmalloc only guarantees 8-byte alignment for kmalloc'd data,
7 and GCC only guarantees 8-byte alignment for stack locals, we can't
8 be assured of 16-byte alignment for atomic lock data even if we
9 specify "__attribute ((aligned(16)))" in the type declaration. So,
10 we use a struct containing an array of four ints for the atomic lock
11 type and dynamically select the 16-byte aligned int from the array
12 for the semaphore. */
13
14#define __PA_LDCW_ALIGNMENT 16
15#define __ldcw_align(a) ({ \
16 unsigned long __ret = (unsigned long) &(a)->lock[0]; \
17 __ret = (__ret + __PA_LDCW_ALIGNMENT - 1) \
18 & ~(__PA_LDCW_ALIGNMENT - 1); \
19 (volatile unsigned int *) __ret; \
20})
21#define __LDCW "ldcw"
22
23#else /*CONFIG_PA20*/
24/* From: "Jim Hull" <jim.hull of hp.com>
25 I've attached a summary of the change, but basically, for PA 2.0, as
26 long as the ",CO" (coherent operation) completer is specified, then the
27 16-byte alignment requirement for ldcw and ldcd is relaxed, and instead
28 they only require "natural" alignment (4-byte for ldcw, 8-byte for
29 ldcd). */
30
31#define __PA_LDCW_ALIGNMENT 4
32#define __ldcw_align(a) (&(a)->slock)
33#define __LDCW "ldcw,co"
34
35#endif /*!CONFIG_PA20*/
36
45db0738
JDA
37/* LDCW, the only atomic read-write operation PA-RISC has. *sigh*.
38 We don't explicitly expose that "*a" may be written as reload
39 fails to find a register in class R1_REGS when "a" needs to be
40 reloaded when generating 64-bit PIC code. Instead, we clobber
41 memory to indicate to the compiler that the assembly code reads
42 or writes to items other than those listed in the input and output
43 operands. This may pessimize the code somewhat but __ldcw is
d14b3dfc 44 usually used within code blocks surrounded by memory barriers. */
527dcdcc
DH
45#define __ldcw(a) ({ \
46 unsigned __ret; \
45db0738
JDA
47 __asm__ __volatile__(__LDCW " 0(%1),%0" \
48 : "=r" (__ret) : "r" (a) : "memory"); \
527dcdcc
DH
49 __ret; \
50})
51
52#ifdef CONFIG_SMP
53# define __lock_aligned __attribute__((__section__(".data..lock_aligned")))
54#endif
55
56#endif /* __PARISC_LDCW_H */