]> git.proxmox.com Git - mirror_zfs.git/blame - module/zfs/rrwlock.c
Fixes for procfs files backed by linked lists
[mirror_zfs.git] / module / zfs / rrwlock.c
CommitLineData
34dc7c2f
BB
1/*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21/*
45d1cae3 22 * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
34dc7c2f
BB
23 * Use is subject to license terms.
24 */
6f1ffb06
MA
25/*
26 * Copyright (c) 2012 by Delphix. All rights reserved.
27 */
34dc7c2f 28
34dc7c2f
BB
29#include <sys/refcount.h>
30#include <sys/rrwlock.h>
31
32/*
33 * This file contains the implementation of a re-entrant read
34 * reader/writer lock (aka "rrwlock").
35 *
36 * This is a normal reader/writer lock with the additional feature
37 * of allowing threads who have already obtained a read lock to
38 * re-enter another read lock (re-entrant read) - even if there are
39 * waiting writers.
40 *
41 * Callers who have not obtained a read lock give waiting writers priority.
42 *
43 * The rrwlock_t lock does not allow re-entrant writers, nor does it
44 * allow a re-entrant mix of reads and writes (that is, it does not
45 * allow a caller who has already obtained a read lock to be able to
46 * then grab a write lock without first dropping all read locks, and
47 * vice versa).
48 *
49 * The rrwlock_t uses tsd (thread specific data) to keep a list of
50 * nodes (rrw_node_t), where each node keeps track of which specific
51 * lock (rrw_node_t::rn_rrl) the thread has grabbed. Since re-entering
52 * should be rare, a thread that grabs multiple reads on the same rrwlock_t
53 * will store multiple rrw_node_ts of the same 'rrn_rrl'. Nodes on the
54 * tsd list can represent a different rrwlock_t. This allows a thread
55 * to enter multiple and unique rrwlock_ts for read locks at the same time.
56 *
57 * Since using tsd exposes some overhead, the rrwlock_t only needs to
58 * keep tsd data when writers are waiting. If no writers are waiting, then
59 * a reader just bumps the anonymous read count (rr_anon_rcount) - no tsd
60 * is needed. Once a writer attempts to grab the lock, readers then
61 * keep tsd data and bump the linked readers count (rr_linked_rcount).
62 *
63 * If there are waiting writers and there are anonymous readers, then a
64 * reader doesn't know if it is a re-entrant lock. But since it may be one,
65 * we allow the read to proceed (otherwise it could deadlock). Since once
66 * waiting writers are active, readers no longer bump the anonymous count,
67 * the anonymous readers will eventually flush themselves out. At this point,
68 * readers will be able to tell if they are a re-entrant lock (have a
69 * rrw_node_t entry for the lock) or not. If they are a re-entrant lock, then
70 * we must let the proceed. If they are not, then the reader blocks for the
71 * waiting writers. Hence, we do not starve writers.
72 */
73
74/* global key for TSD */
75uint_t rrw_tsd_key;
76
77typedef struct rrw_node {
13fe0198
MA
78 struct rrw_node *rn_next;
79 rrwlock_t *rn_rrl;
80 void *rn_tag;
34dc7c2f
BB
81} rrw_node_t;
82
83static rrw_node_t *
84rrn_find(rrwlock_t *rrl)
85{
86 rrw_node_t *rn;
87
88 if (refcount_count(&rrl->rr_linked_rcount) == 0)
89 return (NULL);
90
91 for (rn = tsd_get(rrw_tsd_key); rn != NULL; rn = rn->rn_next) {
92 if (rn->rn_rrl == rrl)
93 return (rn);
94 }
95 return (NULL);
96}
97
98/*
99 * Add a node to the head of the singly linked list.
100 */
101static void
13fe0198 102rrn_add(rrwlock_t *rrl, void *tag)
34dc7c2f
BB
103{
104 rrw_node_t *rn;
105
79c76d5b 106 rn = kmem_alloc(sizeof (*rn), KM_SLEEP);
34dc7c2f
BB
107 rn->rn_rrl = rrl;
108 rn->rn_next = tsd_get(rrw_tsd_key);
13fe0198 109 rn->rn_tag = tag;
34dc7c2f
BB
110 VERIFY(tsd_set(rrw_tsd_key, rn) == 0);
111}
112
113/*
114 * If a node is found for 'rrl', then remove the node from this
115 * thread's list and return TRUE; otherwise return FALSE.
116 */
117static boolean_t
13fe0198 118rrn_find_and_remove(rrwlock_t *rrl, void *tag)
34dc7c2f
BB
119{
120 rrw_node_t *rn;
121 rrw_node_t *prev = NULL;
122
123 if (refcount_count(&rrl->rr_linked_rcount) == 0)
45d1cae3 124 return (B_FALSE);
34dc7c2f
BB
125
126 for (rn = tsd_get(rrw_tsd_key); rn != NULL; rn = rn->rn_next) {
13fe0198 127 if (rn->rn_rrl == rrl && rn->rn_tag == tag) {
34dc7c2f
BB
128 if (prev)
129 prev->rn_next = rn->rn_next;
130 else
131 VERIFY(tsd_set(rrw_tsd_key, rn->rn_next) == 0);
132 kmem_free(rn, sizeof (*rn));
133 return (B_TRUE);
134 }
135 prev = rn;
136 }
137 return (B_FALSE);
138}
139
140void
13fe0198 141rrw_init(rrwlock_t *rrl, boolean_t track_all)
34dc7c2f
BB
142{
143 mutex_init(&rrl->rr_lock, NULL, MUTEX_DEFAULT, NULL);
144 cv_init(&rrl->rr_cv, NULL, CV_DEFAULT, NULL);
145 rrl->rr_writer = NULL;
146 refcount_create(&rrl->rr_anon_rcount);
147 refcount_create(&rrl->rr_linked_rcount);
148 rrl->rr_writer_wanted = B_FALSE;
13fe0198 149 rrl->rr_track_all = track_all;
34dc7c2f
BB
150}
151
152void
153rrw_destroy(rrwlock_t *rrl)
154{
155 mutex_destroy(&rrl->rr_lock);
156 cv_destroy(&rrl->rr_cv);
157 ASSERT(rrl->rr_writer == NULL);
158 refcount_destroy(&rrl->rr_anon_rcount);
159 refcount_destroy(&rrl->rr_linked_rcount);
160}
161
5e8cd5d1
AJ
162static void
163rrw_enter_read_impl(rrwlock_t *rrl, boolean_t prio, void *tag)
34dc7c2f
BB
164{
165 mutex_enter(&rrl->rr_lock);
45d1cae3 166#if !defined(DEBUG) && defined(_KERNEL)
13fe0198
MA
167 if (rrl->rr_writer == NULL && !rrl->rr_writer_wanted &&
168 !rrl->rr_track_all) {
45d1cae3
BB
169 rrl->rr_anon_rcount.rc_count++;
170 mutex_exit(&rrl->rr_lock);
171 return;
172 }
173 DTRACE_PROBE(zfs__rrwfastpath__rdmiss);
174#endif
34dc7c2f
BB
175 ASSERT(rrl->rr_writer != curthread);
176 ASSERT(refcount_count(&rrl->rr_anon_rcount) >= 0);
177
13fe0198 178 while (rrl->rr_writer != NULL || (rrl->rr_writer_wanted &&
5e8cd5d1 179 refcount_is_zero(&rrl->rr_anon_rcount) && !prio &&
34dc7c2f
BB
180 rrn_find(rrl) == NULL))
181 cv_wait(&rrl->rr_cv, &rrl->rr_lock);
182
13fe0198 183 if (rrl->rr_writer_wanted || rrl->rr_track_all) {
34dc7c2f 184 /* may or may not be a re-entrant enter */
13fe0198 185 rrn_add(rrl, tag);
c13060e4 186 (void) zfs_refcount_add(&rrl->rr_linked_rcount, tag);
34dc7c2f 187 } else {
c13060e4 188 (void) zfs_refcount_add(&rrl->rr_anon_rcount, tag);
34dc7c2f
BB
189 }
190 ASSERT(rrl->rr_writer == NULL);
191 mutex_exit(&rrl->rr_lock);
192}
193
5e8cd5d1
AJ
194void
195rrw_enter_read(rrwlock_t *rrl, void *tag)
196{
197 rrw_enter_read_impl(rrl, B_FALSE, tag);
198}
199
200/*
201 * take a read lock even if there are pending write lock requests. if we want
202 * to take a lock reentrantly, but from different threads (that have a
203 * relationship to each other), the normal detection mechanism to overrule
204 * the pending writer does not work, so we have to give an explicit hint here.
205 */
206void
207rrw_enter_read_prio(rrwlock_t *rrl, void *tag)
208{
209 rrw_enter_read_impl(rrl, B_TRUE, tag);
210}
211
212
13fe0198 213void
34dc7c2f
BB
214rrw_enter_write(rrwlock_t *rrl)
215{
216 mutex_enter(&rrl->rr_lock);
217 ASSERT(rrl->rr_writer != curthread);
218
219 while (refcount_count(&rrl->rr_anon_rcount) > 0 ||
220 refcount_count(&rrl->rr_linked_rcount) > 0 ||
221 rrl->rr_writer != NULL) {
222 rrl->rr_writer_wanted = B_TRUE;
223 cv_wait(&rrl->rr_cv, &rrl->rr_lock);
224 }
225 rrl->rr_writer_wanted = B_FALSE;
226 rrl->rr_writer = curthread;
227 mutex_exit(&rrl->rr_lock);
228}
229
230void
231rrw_enter(rrwlock_t *rrl, krw_t rw, void *tag)
232{
233 if (rw == RW_READER)
234 rrw_enter_read(rrl, tag);
235 else
236 rrw_enter_write(rrl);
237}
238
239void
240rrw_exit(rrwlock_t *rrl, void *tag)
241{
242 mutex_enter(&rrl->rr_lock);
45d1cae3
BB
243#if !defined(DEBUG) && defined(_KERNEL)
244 if (!rrl->rr_writer && rrl->rr_linked_rcount.rc_count == 0) {
245 rrl->rr_anon_rcount.rc_count--;
246 if (rrl->rr_anon_rcount.rc_count == 0)
247 cv_broadcast(&rrl->rr_cv);
248 mutex_exit(&rrl->rr_lock);
249 return;
250 }
251 DTRACE_PROBE(zfs__rrwfastpath__exitmiss);
252#endif
34dc7c2f
BB
253 ASSERT(!refcount_is_zero(&rrl->rr_anon_rcount) ||
254 !refcount_is_zero(&rrl->rr_linked_rcount) ||
255 rrl->rr_writer != NULL);
256
257 if (rrl->rr_writer == NULL) {
45d1cae3 258 int64_t count;
13fe0198 259 if (rrn_find_and_remove(rrl, tag)) {
45d1cae3 260 count = refcount_remove(&rrl->rr_linked_rcount, tag);
13fe0198
MA
261 } else {
262 ASSERT(!rrl->rr_track_all);
45d1cae3 263 count = refcount_remove(&rrl->rr_anon_rcount, tag);
13fe0198 264 }
45d1cae3
BB
265 if (count == 0)
266 cv_broadcast(&rrl->rr_cv);
34dc7c2f
BB
267 } else {
268 ASSERT(rrl->rr_writer == curthread);
269 ASSERT(refcount_is_zero(&rrl->rr_anon_rcount) &&
270 refcount_is_zero(&rrl->rr_linked_rcount));
271 rrl->rr_writer = NULL;
272 cv_broadcast(&rrl->rr_cv);
273 }
274 mutex_exit(&rrl->rr_lock);
275}
276
13fe0198
MA
277/*
278 * If the lock was created with track_all, rrw_held(RW_READER) will return
279 * B_TRUE iff the current thread has the lock for reader. Otherwise it may
280 * return B_TRUE if any thread has the lock for reader.
281 */
34dc7c2f
BB
282boolean_t
283rrw_held(rrwlock_t *rrl, krw_t rw)
284{
285 boolean_t held;
286
287 mutex_enter(&rrl->rr_lock);
288 if (rw == RW_WRITER) {
289 held = (rrl->rr_writer == curthread);
290 } else {
291 held = (!refcount_is_zero(&rrl->rr_anon_rcount) ||
13fe0198 292 rrn_find(rrl) != NULL);
34dc7c2f
BB
293 }
294 mutex_exit(&rrl->rr_lock);
295
296 return (held);
297}
6f1ffb06
MA
298
299void
300rrw_tsd_destroy(void *arg)
301{
302 rrw_node_t *rn = arg;
303 if (rn != NULL) {
304 panic("thread %p terminating with rrw lock %p held",
305 (void *)curthread, (void *)rn->rn_rrl);
306 }
307}
e16b3fcc
AM
308
309/*
310 * A reader-mostly lock implementation, tuning above reader-writer locks
311 * for hightly parallel read acquisitions, while pessimizing writes.
312 *
313 * The idea is to split single busy lock into array of locks, so that
314 * each reader can lock only one of them for read, depending on result
315 * of simple hash function. That proportionally reduces lock congestion.
4e33ba4c 316 * Writer at the same time has to sequentially acquire write on all the locks.
317 * That makes write acquisition proportionally slower, but in places where
e16b3fcc
AM
318 * it is used (filesystem unmount) performance is not critical.
319 *
320 * All the functions below are direct wrappers around functions above.
321 */
322void
323rrm_init(rrmlock_t *rrl, boolean_t track_all)
324{
325 int i;
326
327 for (i = 0; i < RRM_NUM_LOCKS; i++)
328 rrw_init(&rrl->locks[i], track_all);
329}
330
331void
332rrm_destroy(rrmlock_t *rrl)
333{
334 int i;
335
336 for (i = 0; i < RRM_NUM_LOCKS; i++)
337 rrw_destroy(&rrl->locks[i]);
338}
339
340void
341rrm_enter(rrmlock_t *rrl, krw_t rw, void *tag)
342{
343 if (rw == RW_READER)
344 rrm_enter_read(rrl, tag);
345 else
346 rrm_enter_write(rrl);
347}
348
349/*
350 * This maps the current thread to a specific lock. Note that the lock
351 * must be released by the same thread that acquired it. We do this
352 * mapping by taking the thread pointer mod a prime number. We examine
353 * only the low 32 bits of the thread pointer, because 32-bit division
354 * is faster than 64-bit division, and the high 32 bits have little
355 * entropy anyway.
356 */
357#define RRM_TD_LOCK() (((uint32_t)(uintptr_t)(curthread)) % RRM_NUM_LOCKS)
358
359void
360rrm_enter_read(rrmlock_t *rrl, void *tag)
361{
362 rrw_enter_read(&rrl->locks[RRM_TD_LOCK()], tag);
363}
364
365void
366rrm_enter_write(rrmlock_t *rrl)
367{
368 int i;
369
370 for (i = 0; i < RRM_NUM_LOCKS; i++)
371 rrw_enter_write(&rrl->locks[i]);
372}
373
374void
375rrm_exit(rrmlock_t *rrl, void *tag)
376{
377 int i;
378
379 if (rrl->locks[0].rr_writer == curthread) {
380 for (i = 0; i < RRM_NUM_LOCKS; i++)
381 rrw_exit(&rrl->locks[i], tag);
382 } else {
383 rrw_exit(&rrl->locks[RRM_TD_LOCK()], tag);
384 }
385}
386
387boolean_t
388rrm_held(rrmlock_t *rrl, krw_t rw)
389{
390 if (rw == RW_WRITER) {
391 return (rrw_held(&rrl->locks[0], rw));
392 } else {
393 return (rrw_held(&rrl->locks[RRM_TD_LOCK()], rw));
394 }
395}