]> git.proxmox.com Git - mirror_zfs-debian.git/blame - module/zfs/zrlock.c
Update to onnv_147
[mirror_zfs-debian.git] / module / zfs / zrlock.c
CommitLineData
572e2857
BB
1/*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21/*
22 * Copyright (c) 2010, Oracle and/or its affiliates. All rights reserved.
23 */
24
25/*
26 * A Zero Reference Lock (ZRL) is a reference count that can lock out new
27 * references only when the count is zero and only without waiting if the count
28 * is not already zero. It is similar to a read-write lock in that it allows
29 * multiple readers and only a single writer, but it does not allow a writer to
30 * block while waiting for readers to exit, and therefore the question of
31 * reader/writer priority is moot (no WRWANT bit). Since the equivalent of
32 * rw_enter(&lock, RW_WRITER) is disallowed and only tryenter() is allowed, it
33 * is perfectly safe for the same reader to acquire the same lock multiple
34 * times. The fact that a ZRL is reentrant for readers (through multiple calls
35 * to zrl_add()) makes it convenient for determining whether something is
36 * actively referenced without the fuss of flagging lock ownership across
37 * function calls.
38 */
39#include <sys/zrlock.h>
40
41/*
42 * A ZRL can be locked only while there are zero references, so ZRL_LOCKED is
43 * treated as zero references.
44 */
45#define ZRL_LOCKED ((uint32_t)-1)
46#define ZRL_DESTROYED -2
47
48void
49zrl_init(zrlock_t *zrl)
50{
51 mutex_init(&zrl->zr_mtx, NULL, MUTEX_DEFAULT, NULL);
52 zrl->zr_refcount = 0;
53 cv_init(&zrl->zr_cv, NULL, CV_DEFAULT, NULL);
54#ifdef ZFS_DEBUG
55 zrl->zr_owner = NULL;
56 zrl->zr_caller = NULL;
57#endif
58}
59
60void
61zrl_destroy(zrlock_t *zrl)
62{
63 ASSERT(zrl->zr_refcount == 0);
64
65 mutex_destroy(&zrl->zr_mtx);
66 zrl->zr_refcount = ZRL_DESTROYED;
67 cv_destroy(&zrl->zr_cv);
68}
69
70void
71#ifdef ZFS_DEBUG
72zrl_add_debug(zrlock_t *zrl, const char *zc)
73#else
74zrl_add(zrlock_t *zrl)
75#endif
76{
77 uint32_t n = (uint32_t)zrl->zr_refcount;
78
79 while (n != ZRL_LOCKED) {
80 uint32_t cas = atomic_cas_32(
81 (uint32_t *)&zrl->zr_refcount, n, n + 1);
82 if (cas == n) {
83 ASSERT((int32_t)n >= 0);
84#ifdef ZFS_DEBUG
85 if (zrl->zr_owner == curthread) {
86 DTRACE_PROBE2(zrlock__reentry,
87 zrlock_t *, zrl, uint32_t, n);
88 }
89 zrl->zr_owner = curthread;
90 zrl->zr_caller = zc;
91#endif
92 return;
93 }
94 n = cas;
95 }
96
97 mutex_enter(&zrl->zr_mtx);
98 while (zrl->zr_refcount == ZRL_LOCKED) {
99 cv_wait(&zrl->zr_cv, &zrl->zr_mtx);
100 }
101 ASSERT(zrl->zr_refcount >= 0);
102 zrl->zr_refcount++;
103#ifdef ZFS_DEBUG
104 zrl->zr_owner = curthread;
105 zrl->zr_caller = zc;
106#endif
107 mutex_exit(&zrl->zr_mtx);
108}
109
110void
111zrl_remove(zrlock_t *zrl)
112{
113 uint32_t n;
114
115 n = atomic_dec_32_nv((uint32_t *)&zrl->zr_refcount);
116 ASSERT((int32_t)n >= 0);
117#ifdef ZFS_DEBUG
118 if (zrl->zr_owner == curthread) {
119 zrl->zr_owner = NULL;
120 zrl->zr_caller = NULL;
121 }
122#endif
123}
124
125int
126zrl_tryenter(zrlock_t *zrl)
127{
128 uint32_t n = (uint32_t)zrl->zr_refcount;
129
130 if (n == 0) {
131 uint32_t cas = atomic_cas_32(
132 (uint32_t *)&zrl->zr_refcount, 0, ZRL_LOCKED);
133 if (cas == 0) {
134#ifdef ZFS_DEBUG
135 ASSERT(zrl->zr_owner == NULL);
136 zrl->zr_owner = curthread;
137#endif
138 return (1);
139 }
140 }
141
142 ASSERT((int32_t)n > ZRL_DESTROYED);
143
144 return (0);
145}
146
147void
148zrl_exit(zrlock_t *zrl)
149{
150 ASSERT(zrl->zr_refcount == ZRL_LOCKED);
151
152 mutex_enter(&zrl->zr_mtx);
153#ifdef ZFS_DEBUG
154 ASSERT(zrl->zr_owner == curthread);
155 zrl->zr_owner = NULL;
156 membar_producer(); /* make sure the owner store happens first */
157#endif
158 zrl->zr_refcount = 0;
159 cv_broadcast(&zrl->zr_cv);
160 mutex_exit(&zrl->zr_mtx);
161}
162
163int
164zrl_refcount(zrlock_t *zrl)
165{
166 ASSERT(zrl->zr_refcount > ZRL_DESTROYED);
167
168 int n = (int)zrl->zr_refcount;
169 return (n <= 0 ? 0 : n);
170}
171
172int
173zrl_is_zero(zrlock_t *zrl)
174{
175 ASSERT(zrl->zr_refcount > ZRL_DESTROYED);
176
177 return (zrl->zr_refcount <= 0);
178}
179
180int
181zrl_is_locked(zrlock_t *zrl)
182{
183 ASSERT(zrl->zr_refcount > ZRL_DESTROYED);
184
185 return (zrl->zr_refcount == ZRL_LOCKED);
186}
187
188#ifdef ZFS_DEBUG
189kthread_t *
190zrl_owner(zrlock_t *zrl)
191{
192 return (zrl->zr_owner);
193}
194#endif