]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - lib/lockref.c
Merge tag 'vfio-ccw-20170724' of git://git.kernel.org/pub/scm/linux/kernel/git/kvms39...
[mirror_ubuntu-artful-kernel.git] / lib / lockref.c
1 #include <linux/export.h>
2 #include <linux/lockref.h>
3
4 #if USE_CMPXCHG_LOCKREF
5
6 /*
7 * Note that the "cmpxchg()" reloads the "old" value for the
8 * failure case.
9 */
10 #define CMPXCHG_LOOP(CODE, SUCCESS) do { \
11 struct lockref old; \
12 BUILD_BUG_ON(sizeof(old) != 8); \
13 old.lock_count = READ_ONCE(lockref->lock_count); \
14 while (likely(arch_spin_value_unlocked(old.lock.rlock.raw_lock))) { \
15 struct lockref new = old, prev = old; \
16 CODE \
17 old.lock_count = cmpxchg64_relaxed(&lockref->lock_count, \
18 old.lock_count, \
19 new.lock_count); \
20 if (likely(old.lock_count == prev.lock_count)) { \
21 SUCCESS; \
22 } \
23 cpu_relax(); \
24 } \
25 } while (0)
26
27 #else
28
29 #define CMPXCHG_LOOP(CODE, SUCCESS) do { } while (0)
30
31 #endif
32
33 /**
34 * lockref_get - Increments reference count unconditionally
35 * @lockref: pointer to lockref structure
36 *
37 * This operation is only valid if you already hold a reference
38 * to the object, so you know the count cannot be zero.
39 */
40 void lockref_get(struct lockref *lockref)
41 {
42 CMPXCHG_LOOP(
43 new.count++;
44 ,
45 return;
46 );
47
48 spin_lock(&lockref->lock);
49 lockref->count++;
50 spin_unlock(&lockref->lock);
51 }
52 EXPORT_SYMBOL(lockref_get);
53
54 /**
55 * lockref_get_not_zero - Increments count unless the count is 0 or dead
56 * @lockref: pointer to lockref structure
57 * Return: 1 if count updated successfully or 0 if count was zero
58 */
59 int lockref_get_not_zero(struct lockref *lockref)
60 {
61 int retval;
62
63 CMPXCHG_LOOP(
64 new.count++;
65 if (old.count <= 0)
66 return 0;
67 ,
68 return 1;
69 );
70
71 spin_lock(&lockref->lock);
72 retval = 0;
73 if (lockref->count > 0) {
74 lockref->count++;
75 retval = 1;
76 }
77 spin_unlock(&lockref->lock);
78 return retval;
79 }
80 EXPORT_SYMBOL(lockref_get_not_zero);
81
82 /**
83 * lockref_get_or_lock - Increments count unless the count is 0 or dead
84 * @lockref: pointer to lockref structure
85 * Return: 1 if count updated successfully or 0 if count was zero
86 * and we got the lock instead.
87 */
88 int lockref_get_or_lock(struct lockref *lockref)
89 {
90 CMPXCHG_LOOP(
91 new.count++;
92 if (old.count <= 0)
93 break;
94 ,
95 return 1;
96 );
97
98 spin_lock(&lockref->lock);
99 if (lockref->count <= 0)
100 return 0;
101 lockref->count++;
102 spin_unlock(&lockref->lock);
103 return 1;
104 }
105 EXPORT_SYMBOL(lockref_get_or_lock);
106
107 /**
108 * lockref_put_return - Decrement reference count if possible
109 * @lockref: pointer to lockref structure
110 *
111 * Decrement the reference count and return the new value.
112 * If the lockref was dead or locked, return an error.
113 */
114 int lockref_put_return(struct lockref *lockref)
115 {
116 CMPXCHG_LOOP(
117 new.count--;
118 if (old.count <= 0)
119 return -1;
120 ,
121 return new.count;
122 );
123 return -1;
124 }
125 EXPORT_SYMBOL(lockref_put_return);
126
127 /**
128 * lockref_put_or_lock - decrements count unless count <= 1 before decrement
129 * @lockref: pointer to lockref structure
130 * Return: 1 if count updated successfully or 0 if count <= 1 and lock taken
131 */
132 int lockref_put_or_lock(struct lockref *lockref)
133 {
134 CMPXCHG_LOOP(
135 new.count--;
136 if (old.count <= 1)
137 break;
138 ,
139 return 1;
140 );
141
142 spin_lock(&lockref->lock);
143 if (lockref->count <= 1)
144 return 0;
145 lockref->count--;
146 spin_unlock(&lockref->lock);
147 return 1;
148 }
149 EXPORT_SYMBOL(lockref_put_or_lock);
150
151 /**
152 * lockref_mark_dead - mark lockref dead
153 * @lockref: pointer to lockref structure
154 */
155 void lockref_mark_dead(struct lockref *lockref)
156 {
157 assert_spin_locked(&lockref->lock);
158 lockref->count = -128;
159 }
160 EXPORT_SYMBOL(lockref_mark_dead);
161
162 /**
163 * lockref_get_not_dead - Increments count unless the ref is dead
164 * @lockref: pointer to lockref structure
165 * Return: 1 if count updated successfully or 0 if lockref was dead
166 */
167 int lockref_get_not_dead(struct lockref *lockref)
168 {
169 int retval;
170
171 CMPXCHG_LOOP(
172 new.count++;
173 if (old.count < 0)
174 return 0;
175 ,
176 return 1;
177 );
178
179 spin_lock(&lockref->lock);
180 retval = 0;
181 if (lockref->count >= 0) {
182 lockref->count++;
183 retval = 1;
184 }
185 spin_unlock(&lockref->lock);
186 return retval;
187 }
188 EXPORT_SYMBOL(lockref_get_not_dead);