]> git.proxmox.com Git - mirror_spl-debian.git/blame - modules/spl/spl-condvar.c
Rework condition variable implementation to be consistent with
[mirror_spl-debian.git] / modules / spl / spl-condvar.c
CommitLineData
4efd4118 1#include <sys/condvar.h>
2
3#ifdef DEBUG_SUBSYSTEM
4#undef DEBUG_SUBSYSTEM
5#endif
6
7#define DEBUG_SUBSYSTEM S_CONDVAR
8
9void
10__cv_init(kcondvar_t *cvp, char *name, kcv_type_t type, void *arg)
11{
12 int flags = KM_SLEEP;
13
14 ENTRY;
15 ASSERT(cvp);
16 ASSERT(name);
17 ASSERT(type == CV_DEFAULT);
18 ASSERT(arg == NULL);
19
20 cvp->cv_magic = CV_MAGIC;
21 init_waitqueue_head(&cvp->cv_event);
22 spin_lock_init(&cvp->cv_lock);
23 atomic_set(&cvp->cv_waiters, 0);
24 cvp->cv_mutex = NULL;
25 cvp->cv_name = NULL;
26 cvp->cv_name_size = strlen(name) + 1;
27
28 /* We may be called when there is a non-zero preempt_count or
29 * interrupts are disabled is which case we must not sleep.
30 */
31 if (current_thread_info()->preempt_count || irqs_disabled())
32 flags = KM_NOSLEEP;
33
34 cvp->cv_name = kmem_alloc(cvp->cv_name_size, flags);
35 if (cvp->cv_name)
36 strcpy(cvp->cv_name, name);
37
38 EXIT;
39}
40EXPORT_SYMBOL(__cv_init);
41
42void
43__cv_destroy(kcondvar_t *cvp)
44{
45 ENTRY;
46 ASSERT(cvp);
47 ASSERT(cvp->cv_magic == CV_MAGIC);
48 spin_lock(&cvp->cv_lock);
49 ASSERT(atomic_read(&cvp->cv_waiters) == 0);
50 ASSERT(!waitqueue_active(&cvp->cv_event));
51
52 if (cvp->cv_name)
53 kmem_free(cvp->cv_name, cvp->cv_name_size);
54
55 memset(cvp, CV_POISON, sizeof(*cvp));
56 spin_unlock(&cvp->cv_lock);
57 EXIT;
58}
59EXPORT_SYMBOL(__cv_destroy);
60
61void
62__cv_wait(kcondvar_t *cvp, kmutex_t *mp)
63{
64 DEFINE_WAIT(wait);
65 ENTRY;
66
67 ASSERT(cvp);
68 ASSERT(mp);
69 ASSERT(cvp->cv_magic == CV_MAGIC);
70 spin_lock(&cvp->cv_lock);
71 ASSERT(mutex_owned(mp));
72
73 if (cvp->cv_mutex == NULL)
74 cvp->cv_mutex = mp;
75
76 /* Ensure the same mutex is used by all callers */
77 ASSERT(cvp->cv_mutex == mp);
78 spin_unlock(&cvp->cv_lock);
79
80 prepare_to_wait_exclusive(&cvp->cv_event, &wait,
81 TASK_UNINTERRUPTIBLE);
82 atomic_inc(&cvp->cv_waiters);
83
84 /* Mutex should be dropped after prepare_to_wait() this
85 * ensures we're linked in to the waiters list and avoids the
86 * race where 'cvp->cv_waiters > 0' but the list is empty. */
87 mutex_exit(mp);
88 schedule();
89 mutex_enter(mp);
90
91 atomic_dec(&cvp->cv_waiters);
92 finish_wait(&cvp->cv_event, &wait);
93 EXIT;
94}
95EXPORT_SYMBOL(__cv_wait);
96
97/* 'expire_time' argument is an absolute wall clock time in jiffies.
98 * Return value is time left (expire_time - now) or -1 if timeout occurred.
99 */
100clock_t
101__cv_timedwait(kcondvar_t *cvp, kmutex_t *mp, clock_t expire_time)
102{
103 DEFINE_WAIT(wait);
104 clock_t time_left;
105 ENTRY;
106
107 ASSERT(cvp);
108 ASSERT(mp);
109 ASSERT(cvp->cv_magic == CV_MAGIC);
110 spin_lock(&cvp->cv_lock);
111 ASSERT(mutex_owned(mp));
112
113 if (cvp->cv_mutex == NULL)
114 cvp->cv_mutex = mp;
115
116 /* Ensure the same mutex is used by all callers */
117 ASSERT(cvp->cv_mutex == mp);
118 spin_unlock(&cvp->cv_lock);
119
120 /* XXX - Does not handle jiffie wrap properly */
121 time_left = expire_time - jiffies;
122 if (time_left <= 0)
123 RETURN(-1);
124
125 prepare_to_wait_exclusive(&cvp->cv_event, &wait,
126 TASK_UNINTERRUPTIBLE);
127 atomic_inc(&cvp->cv_waiters);
128
129 /* Mutex should be dropped after prepare_to_wait() this
130 * ensures we're linked in to the waiters list and avoids the
131 * race where 'cvp->cv_waiters > 0' but the list is empty. */
132 mutex_exit(mp);
133 time_left = schedule_timeout(time_left);
134 mutex_enter(mp);
135
136 atomic_dec(&cvp->cv_waiters);
137 finish_wait(&cvp->cv_event, &wait);
138
139 RETURN(time_left > 0 ? time_left : -1);
140}
141EXPORT_SYMBOL(__cv_timedwait);
142
143void
144__cv_signal(kcondvar_t *cvp)
145{
146 ENTRY;
147 ASSERT(cvp);
148 ASSERT(cvp->cv_magic == CV_MAGIC);
149
150 /* All waiters are added with WQ_FLAG_EXCLUSIVE so only one
151 * waiter will be set runable with each call to wake_up().
152 * Additionally wake_up() holds a spin_lock assoicated with
153 * the wait queue to ensure we don't race waking up processes. */
154 if (atomic_read(&cvp->cv_waiters) > 0)
155 wake_up(&cvp->cv_event);
156
157 EXIT;
158}
159EXPORT_SYMBOL(__cv_signal);
160
161void
162__cv_broadcast(kcondvar_t *cvp)
163{
164 ASSERT(cvp);
165 ASSERT(cvp->cv_magic == CV_MAGIC);
166 ENTRY;
167
168 /* Wake_up_all() will wake up all waiters even those which
169 * have the WQ_FLAG_EXCLUSIVE flag set. */
170 if (atomic_read(&cvp->cv_waiters) > 0)
171 wake_up_all(&cvp->cv_event);
172
173 EXIT;
174}
175EXPORT_SYMBOL(__cv_broadcast);