]> git.proxmox.com Git - mirror_zfs.git/blame - module/zfs/rrwlock.c
Simplify spa_sync by breaking it up to smaller functions
[mirror_zfs.git] / module / zfs / rrwlock.c
CommitLineData
34dc7c2f
BB
1/*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21/*
45d1cae3 22 * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
34dc7c2f
BB
23 * Use is subject to license terms.
24 */
6f1ffb06
MA
25/*
26 * Copyright (c) 2012 by Delphix. All rights reserved.
27 */
34dc7c2f 28
34dc7c2f
BB
29#include <sys/refcount.h>
30#include <sys/rrwlock.h>
31
32/*
33 * This file contains the implementation of a re-entrant read
34 * reader/writer lock (aka "rrwlock").
35 *
36 * This is a normal reader/writer lock with the additional feature
37 * of allowing threads who have already obtained a read lock to
38 * re-enter another read lock (re-entrant read) - even if there are
39 * waiting writers.
40 *
41 * Callers who have not obtained a read lock give waiting writers priority.
42 *
43 * The rrwlock_t lock does not allow re-entrant writers, nor does it
44 * allow a re-entrant mix of reads and writes (that is, it does not
45 * allow a caller who has already obtained a read lock to be able to
46 * then grab a write lock without first dropping all read locks, and
47 * vice versa).
48 *
49 * The rrwlock_t uses tsd (thread specific data) to keep a list of
50 * nodes (rrw_node_t), where each node keeps track of which specific
51 * lock (rrw_node_t::rn_rrl) the thread has grabbed. Since re-entering
52 * should be rare, a thread that grabs multiple reads on the same rrwlock_t
53 * will store multiple rrw_node_ts of the same 'rrn_rrl'. Nodes on the
54 * tsd list can represent a different rrwlock_t. This allows a thread
55 * to enter multiple and unique rrwlock_ts for read locks at the same time.
56 *
57 * Since using tsd exposes some overhead, the rrwlock_t only needs to
58 * keep tsd data when writers are waiting. If no writers are waiting, then
59 * a reader just bumps the anonymous read count (rr_anon_rcount) - no tsd
60 * is needed. Once a writer attempts to grab the lock, readers then
61 * keep tsd data and bump the linked readers count (rr_linked_rcount).
62 *
63 * If there are waiting writers and there are anonymous readers, then a
64 * reader doesn't know if it is a re-entrant lock. But since it may be one,
65 * we allow the read to proceed (otherwise it could deadlock). Since once
66 * waiting writers are active, readers no longer bump the anonymous count,
67 * the anonymous readers will eventually flush themselves out. At this point,
68 * readers will be able to tell if they are a re-entrant lock (have a
69 * rrw_node_t entry for the lock) or not. If they are a re-entrant lock, then
70 * we must let the proceed. If they are not, then the reader blocks for the
71 * waiting writers. Hence, we do not starve writers.
72 */
73
74/* global key for TSD */
75uint_t rrw_tsd_key;
76
77typedef struct rrw_node {
13fe0198
MA
78 struct rrw_node *rn_next;
79 rrwlock_t *rn_rrl;
80 void *rn_tag;
34dc7c2f
BB
81} rrw_node_t;
82
83static rrw_node_t *
84rrn_find(rrwlock_t *rrl)
85{
86 rrw_node_t *rn;
87
424fd7c3 88 if (zfs_refcount_count(&rrl->rr_linked_rcount) == 0)
34dc7c2f
BB
89 return (NULL);
90
91 for (rn = tsd_get(rrw_tsd_key); rn != NULL; rn = rn->rn_next) {
92 if (rn->rn_rrl == rrl)
93 return (rn);
94 }
95 return (NULL);
96}
97
98/*
99 * Add a node to the head of the singly linked list.
100 */
101static void
13fe0198 102rrn_add(rrwlock_t *rrl, void *tag)
34dc7c2f
BB
103{
104 rrw_node_t *rn;
105
79c76d5b 106 rn = kmem_alloc(sizeof (*rn), KM_SLEEP);
34dc7c2f
BB
107 rn->rn_rrl = rrl;
108 rn->rn_next = tsd_get(rrw_tsd_key);
13fe0198 109 rn->rn_tag = tag;
34dc7c2f
BB
110 VERIFY(tsd_set(rrw_tsd_key, rn) == 0);
111}
112
113/*
114 * If a node is found for 'rrl', then remove the node from this
115 * thread's list and return TRUE; otherwise return FALSE.
116 */
117static boolean_t
13fe0198 118rrn_find_and_remove(rrwlock_t *rrl, void *tag)
34dc7c2f
BB
119{
120 rrw_node_t *rn;
121 rrw_node_t *prev = NULL;
122
424fd7c3 123 if (zfs_refcount_count(&rrl->rr_linked_rcount) == 0)
45d1cae3 124 return (B_FALSE);
34dc7c2f
BB
125
126 for (rn = tsd_get(rrw_tsd_key); rn != NULL; rn = rn->rn_next) {
13fe0198 127 if (rn->rn_rrl == rrl && rn->rn_tag == tag) {
34dc7c2f
BB
128 if (prev)
129 prev->rn_next = rn->rn_next;
130 else
131 VERIFY(tsd_set(rrw_tsd_key, rn->rn_next) == 0);
132 kmem_free(rn, sizeof (*rn));
133 return (B_TRUE);
134 }
135 prev = rn;
136 }
137 return (B_FALSE);
138}
139
140void
13fe0198 141rrw_init(rrwlock_t *rrl, boolean_t track_all)
34dc7c2f
BB
142{
143 mutex_init(&rrl->rr_lock, NULL, MUTEX_DEFAULT, NULL);
144 cv_init(&rrl->rr_cv, NULL, CV_DEFAULT, NULL);
145 rrl->rr_writer = NULL;
424fd7c3
TS
146 zfs_refcount_create(&rrl->rr_anon_rcount);
147 zfs_refcount_create(&rrl->rr_linked_rcount);
34dc7c2f 148 rrl->rr_writer_wanted = B_FALSE;
13fe0198 149 rrl->rr_track_all = track_all;
34dc7c2f
BB
150}
151
152void
153rrw_destroy(rrwlock_t *rrl)
154{
155 mutex_destroy(&rrl->rr_lock);
156 cv_destroy(&rrl->rr_cv);
157 ASSERT(rrl->rr_writer == NULL);
424fd7c3
TS
158 zfs_refcount_destroy(&rrl->rr_anon_rcount);
159 zfs_refcount_destroy(&rrl->rr_linked_rcount);
34dc7c2f
BB
160}
161
5e8cd5d1
AJ
162static void
163rrw_enter_read_impl(rrwlock_t *rrl, boolean_t prio, void *tag)
34dc7c2f
BB
164{
165 mutex_enter(&rrl->rr_lock);
45d1cae3 166#if !defined(DEBUG) && defined(_KERNEL)
13fe0198
MA
167 if (rrl->rr_writer == NULL && !rrl->rr_writer_wanted &&
168 !rrl->rr_track_all) {
45d1cae3
BB
169 rrl->rr_anon_rcount.rc_count++;
170 mutex_exit(&rrl->rr_lock);
171 return;
172 }
173 DTRACE_PROBE(zfs__rrwfastpath__rdmiss);
174#endif
34dc7c2f 175 ASSERT(rrl->rr_writer != curthread);
424fd7c3 176 ASSERT(zfs_refcount_count(&rrl->rr_anon_rcount) >= 0);
34dc7c2f 177
13fe0198 178 while (rrl->rr_writer != NULL || (rrl->rr_writer_wanted &&
424fd7c3 179 zfs_refcount_is_zero(&rrl->rr_anon_rcount) && !prio &&
34dc7c2f
BB
180 rrn_find(rrl) == NULL))
181 cv_wait(&rrl->rr_cv, &rrl->rr_lock);
182
13fe0198 183 if (rrl->rr_writer_wanted || rrl->rr_track_all) {
34dc7c2f 184 /* may or may not be a re-entrant enter */
13fe0198 185 rrn_add(rrl, tag);
c13060e4 186 (void) zfs_refcount_add(&rrl->rr_linked_rcount, tag);
34dc7c2f 187 } else {
c13060e4 188 (void) zfs_refcount_add(&rrl->rr_anon_rcount, tag);
34dc7c2f
BB
189 }
190 ASSERT(rrl->rr_writer == NULL);
191 mutex_exit(&rrl->rr_lock);
192}
193
5e8cd5d1
AJ
194void
195rrw_enter_read(rrwlock_t *rrl, void *tag)
196{
197 rrw_enter_read_impl(rrl, B_FALSE, tag);
198}
199
200/*
201 * take a read lock even if there are pending write lock requests. if we want
202 * to take a lock reentrantly, but from different threads (that have a
203 * relationship to each other), the normal detection mechanism to overrule
204 * the pending writer does not work, so we have to give an explicit hint here.
205 */
206void
207rrw_enter_read_prio(rrwlock_t *rrl, void *tag)
208{
209 rrw_enter_read_impl(rrl, B_TRUE, tag);
210}
211
212
13fe0198 213void
34dc7c2f
BB
214rrw_enter_write(rrwlock_t *rrl)
215{
216 mutex_enter(&rrl->rr_lock);
217 ASSERT(rrl->rr_writer != curthread);
218
424fd7c3
TS
219 while (zfs_refcount_count(&rrl->rr_anon_rcount) > 0 ||
220 zfs_refcount_count(&rrl->rr_linked_rcount) > 0 ||
34dc7c2f
BB
221 rrl->rr_writer != NULL) {
222 rrl->rr_writer_wanted = B_TRUE;
223 cv_wait(&rrl->rr_cv, &rrl->rr_lock);
224 }
225 rrl->rr_writer_wanted = B_FALSE;
226 rrl->rr_writer = curthread;
227 mutex_exit(&rrl->rr_lock);
228}
229
230void
231rrw_enter(rrwlock_t *rrl, krw_t rw, void *tag)
232{
233 if (rw == RW_READER)
234 rrw_enter_read(rrl, tag);
235 else
236 rrw_enter_write(rrl);
237}
238
239void
240rrw_exit(rrwlock_t *rrl, void *tag)
241{
242 mutex_enter(&rrl->rr_lock);
45d1cae3
BB
243#if !defined(DEBUG) && defined(_KERNEL)
244 if (!rrl->rr_writer && rrl->rr_linked_rcount.rc_count == 0) {
245 rrl->rr_anon_rcount.rc_count--;
246 if (rrl->rr_anon_rcount.rc_count == 0)
247 cv_broadcast(&rrl->rr_cv);
248 mutex_exit(&rrl->rr_lock);
249 return;
250 }
251 DTRACE_PROBE(zfs__rrwfastpath__exitmiss);
252#endif
424fd7c3
TS
253 ASSERT(!zfs_refcount_is_zero(&rrl->rr_anon_rcount) ||
254 !zfs_refcount_is_zero(&rrl->rr_linked_rcount) ||
34dc7c2f
BB
255 rrl->rr_writer != NULL);
256
257 if (rrl->rr_writer == NULL) {
45d1cae3 258 int64_t count;
13fe0198 259 if (rrn_find_and_remove(rrl, tag)) {
424fd7c3
TS
260 count = zfs_refcount_remove(
261 &rrl->rr_linked_rcount, tag);
13fe0198
MA
262 } else {
263 ASSERT(!rrl->rr_track_all);
424fd7c3 264 count = zfs_refcount_remove(&rrl->rr_anon_rcount, tag);
13fe0198 265 }
45d1cae3
BB
266 if (count == 0)
267 cv_broadcast(&rrl->rr_cv);
34dc7c2f
BB
268 } else {
269 ASSERT(rrl->rr_writer == curthread);
424fd7c3
TS
270 ASSERT(zfs_refcount_is_zero(&rrl->rr_anon_rcount) &&
271 zfs_refcount_is_zero(&rrl->rr_linked_rcount));
34dc7c2f
BB
272 rrl->rr_writer = NULL;
273 cv_broadcast(&rrl->rr_cv);
274 }
275 mutex_exit(&rrl->rr_lock);
276}
277
13fe0198
MA
278/*
279 * If the lock was created with track_all, rrw_held(RW_READER) will return
280 * B_TRUE iff the current thread has the lock for reader. Otherwise it may
281 * return B_TRUE if any thread has the lock for reader.
282 */
34dc7c2f
BB
283boolean_t
284rrw_held(rrwlock_t *rrl, krw_t rw)
285{
286 boolean_t held;
287
288 mutex_enter(&rrl->rr_lock);
289 if (rw == RW_WRITER) {
290 held = (rrl->rr_writer == curthread);
291 } else {
424fd7c3 292 held = (!zfs_refcount_is_zero(&rrl->rr_anon_rcount) ||
13fe0198 293 rrn_find(rrl) != NULL);
34dc7c2f
BB
294 }
295 mutex_exit(&rrl->rr_lock);
296
297 return (held);
298}
6f1ffb06
MA
299
300void
301rrw_tsd_destroy(void *arg)
302{
303 rrw_node_t *rn = arg;
304 if (rn != NULL) {
305 panic("thread %p terminating with rrw lock %p held",
306 (void *)curthread, (void *)rn->rn_rrl);
307 }
308}
e16b3fcc
AM
309
310/*
311 * A reader-mostly lock implementation, tuning above reader-writer locks
312 * for hightly parallel read acquisitions, while pessimizing writes.
313 *
314 * The idea is to split single busy lock into array of locks, so that
315 * each reader can lock only one of them for read, depending on result
316 * of simple hash function. That proportionally reduces lock congestion.
4e33ba4c 317 * Writer at the same time has to sequentially acquire write on all the locks.
318 * That makes write acquisition proportionally slower, but in places where
e16b3fcc
AM
319 * it is used (filesystem unmount) performance is not critical.
320 *
321 * All the functions below are direct wrappers around functions above.
322 */
323void
324rrm_init(rrmlock_t *rrl, boolean_t track_all)
325{
326 int i;
327
328 for (i = 0; i < RRM_NUM_LOCKS; i++)
329 rrw_init(&rrl->locks[i], track_all);
330}
331
332void
333rrm_destroy(rrmlock_t *rrl)
334{
335 int i;
336
337 for (i = 0; i < RRM_NUM_LOCKS; i++)
338 rrw_destroy(&rrl->locks[i]);
339}
340
341void
342rrm_enter(rrmlock_t *rrl, krw_t rw, void *tag)
343{
344 if (rw == RW_READER)
345 rrm_enter_read(rrl, tag);
346 else
347 rrm_enter_write(rrl);
348}
349
350/*
351 * This maps the current thread to a specific lock. Note that the lock
352 * must be released by the same thread that acquired it. We do this
353 * mapping by taking the thread pointer mod a prime number. We examine
354 * only the low 32 bits of the thread pointer, because 32-bit division
355 * is faster than 64-bit division, and the high 32 bits have little
356 * entropy anyway.
357 */
358#define RRM_TD_LOCK() (((uint32_t)(uintptr_t)(curthread)) % RRM_NUM_LOCKS)
359
360void
361rrm_enter_read(rrmlock_t *rrl, void *tag)
362{
363 rrw_enter_read(&rrl->locks[RRM_TD_LOCK()], tag);
364}
365
366void
367rrm_enter_write(rrmlock_t *rrl)
368{
369 int i;
370
371 for (i = 0; i < RRM_NUM_LOCKS; i++)
372 rrw_enter_write(&rrl->locks[i]);
373}
374
375void
376rrm_exit(rrmlock_t *rrl, void *tag)
377{
378 int i;
379
380 if (rrl->locks[0].rr_writer == curthread) {
381 for (i = 0; i < RRM_NUM_LOCKS; i++)
382 rrw_exit(&rrl->locks[i], tag);
383 } else {
384 rrw_exit(&rrl->locks[RRM_TD_LOCK()], tag);
385 }
386}
387
388boolean_t
389rrm_held(rrmlock_t *rrl, krw_t rw)
390{
391 if (rw == RW_WRITER) {
392 return (rrw_held(&rrl->locks[0], rw));
393 } else {
394 return (rrw_held(&rrl->locks[RRM_TD_LOCK()], rw));
395 }
396}