]> git.proxmox.com Git - mirror_ubuntu-focal-kernel.git/blob - arch/riscv/include/asm/spinlock.h
ASoC: tlv320aic31xx: Reset registers during power up
[mirror_ubuntu-focal-kernel.git] / arch / riscv / include / asm / spinlock.h
1 /*
2 * Copyright (C) 2015 Regents of the University of California
3 * Copyright (C) 2017 SiFive
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License
7 * as published by the Free Software Foundation, version 2.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 */
14
15 #ifndef _ASM_RISCV_SPINLOCK_H
16 #define _ASM_RISCV_SPINLOCK_H
17
18 #include <linux/kernel.h>
19 #include <asm/current.h>
20
21 /*
22 * Simple spin lock operations. These provide no fairness guarantees.
23 */
24
25 /* FIXME: Replace this with a ticket lock, like MIPS. */
26
27 #define arch_spin_is_locked(x) ((x)->lock != 0)
28
29 static inline void arch_spin_unlock(arch_spinlock_t *lock)
30 {
31 __asm__ __volatile__ (
32 "amoswap.w.rl x0, x0, %0"
33 : "=A" (lock->lock)
34 :: "memory");
35 }
36
37 static inline int arch_spin_trylock(arch_spinlock_t *lock)
38 {
39 int tmp = 1, busy;
40
41 __asm__ __volatile__ (
42 "amoswap.w.aq %0, %2, %1"
43 : "=r" (busy), "+A" (lock->lock)
44 : "r" (tmp)
45 : "memory");
46
47 return !busy;
48 }
49
50 static inline void arch_spin_lock(arch_spinlock_t *lock)
51 {
52 while (1) {
53 if (arch_spin_is_locked(lock))
54 continue;
55
56 if (arch_spin_trylock(lock))
57 break;
58 }
59 }
60
61 static inline void arch_spin_unlock_wait(arch_spinlock_t *lock)
62 {
63 smp_rmb();
64 do {
65 cpu_relax();
66 } while (arch_spin_is_locked(lock));
67 smp_acquire__after_ctrl_dep();
68 }
69
70 /***********************************************************/
71
72 static inline void arch_read_lock(arch_rwlock_t *lock)
73 {
74 int tmp;
75
76 __asm__ __volatile__(
77 "1: lr.w %1, %0\n"
78 " bltz %1, 1b\n"
79 " addi %1, %1, 1\n"
80 " sc.w.aq %1, %1, %0\n"
81 " bnez %1, 1b\n"
82 : "+A" (lock->lock), "=&r" (tmp)
83 :: "memory");
84 }
85
86 static inline void arch_write_lock(arch_rwlock_t *lock)
87 {
88 int tmp;
89
90 __asm__ __volatile__(
91 "1: lr.w %1, %0\n"
92 " bnez %1, 1b\n"
93 " li %1, -1\n"
94 " sc.w.aq %1, %1, %0\n"
95 " bnez %1, 1b\n"
96 : "+A" (lock->lock), "=&r" (tmp)
97 :: "memory");
98 }
99
100 static inline int arch_read_trylock(arch_rwlock_t *lock)
101 {
102 int busy;
103
104 __asm__ __volatile__(
105 "1: lr.w %1, %0\n"
106 " bltz %1, 1f\n"
107 " addi %1, %1, 1\n"
108 " sc.w.aq %1, %1, %0\n"
109 " bnez %1, 1b\n"
110 "1:\n"
111 : "+A" (lock->lock), "=&r" (busy)
112 :: "memory");
113
114 return !busy;
115 }
116
117 static inline int arch_write_trylock(arch_rwlock_t *lock)
118 {
119 int busy;
120
121 __asm__ __volatile__(
122 "1: lr.w %1, %0\n"
123 " bnez %1, 1f\n"
124 " li %1, -1\n"
125 " sc.w.aq %1, %1, %0\n"
126 " bnez %1, 1b\n"
127 "1:\n"
128 : "+A" (lock->lock), "=&r" (busy)
129 :: "memory");
130
131 return !busy;
132 }
133
134 static inline void arch_read_unlock(arch_rwlock_t *lock)
135 {
136 __asm__ __volatile__(
137 "amoadd.w.rl x0, %1, %0"
138 : "+A" (lock->lock)
139 : "r" (-1)
140 : "memory");
141 }
142
143 static inline void arch_write_unlock(arch_rwlock_t *lock)
144 {
145 __asm__ __volatile__ (
146 "amoswap.w.rl x0, x0, %0"
147 : "=A" (lock->lock)
148 :: "memory");
149 }
150
151 #endif /* _ASM_RISCV_SPINLOCK_H */