]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - arch/riscv/include/asm/spinlock.h
Merge branch 'for-4.15-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/wq
[mirror_ubuntu-bionic-kernel.git] / arch / riscv / include / asm / spinlock.h
1 /*
2 * Copyright (C) 2015 Regents of the University of California
3 * Copyright (C) 2017 SiFive
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License
7 * as published by the Free Software Foundation, version 2.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 */
14
15 #ifndef _ASM_RISCV_SPINLOCK_H
16 #define _ASM_RISCV_SPINLOCK_H
17
18 #include <linux/kernel.h>
19 #include <asm/current.h>
20
21 /*
22 * Simple spin lock operations. These provide no fairness guarantees.
23 */
24
25 /* FIXME: Replace this with a ticket lock, like MIPS. */
26
27 #define arch_spin_is_locked(x) (READ_ONCE((x)->lock) != 0)
28
29 static inline void arch_spin_unlock(arch_spinlock_t *lock)
30 {
31 __asm__ __volatile__ (
32 "amoswap.w.rl x0, x0, %0"
33 : "=A" (lock->lock)
34 :: "memory");
35 }
36
37 static inline int arch_spin_trylock(arch_spinlock_t *lock)
38 {
39 int tmp = 1, busy;
40
41 __asm__ __volatile__ (
42 "amoswap.w.aq %0, %2, %1"
43 : "=r" (busy), "+A" (lock->lock)
44 : "r" (tmp)
45 : "memory");
46
47 return !busy;
48 }
49
50 static inline void arch_spin_lock(arch_spinlock_t *lock)
51 {
52 while (1) {
53 if (arch_spin_is_locked(lock))
54 continue;
55
56 if (arch_spin_trylock(lock))
57 break;
58 }
59 }
60
61 /***********************************************************/
62
63 static inline void arch_read_lock(arch_rwlock_t *lock)
64 {
65 int tmp;
66
67 __asm__ __volatile__(
68 "1: lr.w %1, %0\n"
69 " bltz %1, 1b\n"
70 " addi %1, %1, 1\n"
71 " sc.w.aq %1, %1, %0\n"
72 " bnez %1, 1b\n"
73 : "+A" (lock->lock), "=&r" (tmp)
74 :: "memory");
75 }
76
77 static inline void arch_write_lock(arch_rwlock_t *lock)
78 {
79 int tmp;
80
81 __asm__ __volatile__(
82 "1: lr.w %1, %0\n"
83 " bnez %1, 1b\n"
84 " li %1, -1\n"
85 " sc.w.aq %1, %1, %0\n"
86 " bnez %1, 1b\n"
87 : "+A" (lock->lock), "=&r" (tmp)
88 :: "memory");
89 }
90
91 static inline int arch_read_trylock(arch_rwlock_t *lock)
92 {
93 int busy;
94
95 __asm__ __volatile__(
96 "1: lr.w %1, %0\n"
97 " bltz %1, 1f\n"
98 " addi %1, %1, 1\n"
99 " sc.w.aq %1, %1, %0\n"
100 " bnez %1, 1b\n"
101 "1:\n"
102 : "+A" (lock->lock), "=&r" (busy)
103 :: "memory");
104
105 return !busy;
106 }
107
108 static inline int arch_write_trylock(arch_rwlock_t *lock)
109 {
110 int busy;
111
112 __asm__ __volatile__(
113 "1: lr.w %1, %0\n"
114 " bnez %1, 1f\n"
115 " li %1, -1\n"
116 " sc.w.aq %1, %1, %0\n"
117 " bnez %1, 1b\n"
118 "1:\n"
119 : "+A" (lock->lock), "=&r" (busy)
120 :: "memory");
121
122 return !busy;
123 }
124
125 static inline void arch_read_unlock(arch_rwlock_t *lock)
126 {
127 __asm__ __volatile__(
128 "amoadd.w.rl x0, %1, %0"
129 : "+A" (lock->lock)
130 : "r" (-1)
131 : "memory");
132 }
133
134 static inline void arch_write_unlock(arch_rwlock_t *lock)
135 {
136 __asm__ __volatile__ (
137 "amoswap.w.rl x0, x0, %0"
138 : "=A" (lock->lock)
139 :: "memory");
140 }
141
142 #endif /* _ASM_RISCV_SPINLOCK_H */