]> git.proxmox.com Git - mirror_spl-debian.git/blob - include/sys/condvar.h
fd845d9c1995574aa917ac62d98a3c213b57deaf
[mirror_spl-debian.git] / include / sys / condvar.h
1 #ifndef _SPL_CONDVAR_H
2 #define _SPL_CONDVAR_H
3
4 #ifdef __cplusplus
5 extern "C" {
6 #endif
7
8 #include <linux/module.h>
9 #include <linux/wait.h>
10
11 /* The kcondvar_t struct is protected by mutex taken externally before
12 * calling any of the wait/signal funs, and passed into the wait funs.
13 */
14 #define CV_MAGIC 0x346545f4
15 #define CV_POISON 0x95
16
17 typedef struct {
18 int cv_magic;
19 char *cv_name;
20 wait_queue_head_t cv_event;
21 atomic_t cv_waiters;
22 kmutex_t *cv_mutex; /* only for verification purposes */
23 spinlock_t cv_lock;
24 } kcondvar_t;
25
26 typedef enum { CV_DEFAULT=0, CV_DRIVER } kcv_type_t;
27
28 static __inline__ void
29 cv_init(kcondvar_t *cvp, char *name, kcv_type_t type, void *arg)
30 {
31 ASSERT(cvp);
32 ASSERT(type == CV_DEFAULT);
33 ASSERT(arg == NULL);
34
35 cvp->cv_magic = CV_MAGIC;
36 init_waitqueue_head(&cvp->cv_event);
37 spin_lock_init(&cvp->cv_lock);
38 atomic_set(&cvp->cv_waiters, 0);
39 cvp->cv_mutex = NULL;
40 cvp->cv_name = NULL;
41
42 if (name) {
43 cvp->cv_name = kmalloc(strlen(name) + 1, GFP_KERNEL);
44 if (cvp->cv_name)
45 strcpy(cvp->cv_name, name);
46 }
47 }
48
49 static __inline__ void
50 cv_destroy(kcondvar_t *cvp)
51 {
52 ASSERT(cvp);
53 ASSERT(cvp->cv_magic == CV_MAGIC);
54 spin_lock(&cvp->cv_lock);
55 ASSERT(atomic_read(&cvp->cv_waiters) == 0);
56 ASSERT(!waitqueue_active(&cvp->cv_event));
57
58 if (cvp->cv_name)
59 kfree(cvp->cv_name);
60
61 memset(cvp, CV_POISON, sizeof(*cvp));
62 spin_unlock(&cvp->cv_lock);
63 }
64
65 static __inline__ void
66 cv_wait(kcondvar_t *cvp, kmutex_t *mtx)
67 {
68 DEFINE_WAIT(wait);
69
70 ASSERT(cvp);
71 ASSERT(mtx);
72 ASSERT(cvp->cv_magic == CV_MAGIC);
73 spin_lock(&cvp->cv_lock);
74 ASSERT(mutex_owned(mtx));
75
76 if (cvp->cv_mutex == NULL)
77 cvp->cv_mutex = mtx;
78
79 /* Ensure the same mutex is used by all callers */
80 ASSERT(cvp->cv_mutex == mtx);
81 spin_unlock(&cvp->cv_lock);
82
83 prepare_to_wait_exclusive(&cvp->cv_event, &wait,
84 TASK_UNINTERRUPTIBLE);
85 atomic_inc(&cvp->cv_waiters);
86
87 /* Mutex should be dropped after prepare_to_wait() this
88 * ensures we're linked in to the waiters list and avoids the
89 * race where 'cvp->cv_waiters > 0' but the list is empty. */
90 mutex_exit(mtx);
91 schedule();
92 mutex_enter(mtx);
93
94 atomic_dec(&cvp->cv_waiters);
95 finish_wait(&cvp->cv_event, &wait);
96 }
97
98 /* 'expire_time' argument is an absolute wall clock time in jiffies.
99 * Return value is time left (expire_time - now) or -1 if timeout occurred.
100 */
101 static __inline__ clock_t
102 cv_timedwait(kcondvar_t *cvp, kmutex_t *mtx, clock_t expire_time)
103 {
104 DEFINE_WAIT(wait);
105 clock_t time_left;
106
107 ASSERT(cvp);
108 ASSERT(mtx);
109 ASSERT(cvp->cv_magic == CV_MAGIC);
110 spin_lock(&cvp->cv_lock);
111 ASSERT(mutex_owned(mtx));
112
113 if (cvp->cv_mutex == NULL)
114 cvp->cv_mutex = mtx;
115
116 /* Ensure the same mutex is used by all callers */
117 ASSERT(cvp->cv_mutex == mtx);
118 spin_unlock(&cvp->cv_lock);
119
120 /* XXX - Does not handle jiffie wrap properly */
121 time_left = expire_time - jiffies;
122 if (time_left <= 0)
123 return -1;
124
125 prepare_to_wait_exclusive(&cvp->cv_event, &wait,
126 TASK_UNINTERRUPTIBLE);
127 atomic_inc(&cvp->cv_waiters);
128
129 /* Mutex should be dropped after prepare_to_wait() this
130 * ensures we're linked in to the waiters list and avoids the
131 * race where 'cvp->cv_waiters > 0' but the list is empty. */
132 mutex_exit(mtx);
133 time_left = schedule_timeout(time_left);
134 mutex_enter(mtx);
135
136 atomic_dec(&cvp->cv_waiters);
137 finish_wait(&cvp->cv_event, &wait);
138
139 return (time_left > 0 ? time_left : -1);
140 }
141
142 static __inline__ void
143 cv_signal(kcondvar_t *cvp)
144 {
145 ASSERT(cvp);
146 ASSERT(cvp->cv_magic == CV_MAGIC);
147
148 /* All waiters are added with WQ_FLAG_EXCLUSIVE so only one
149 * waiter will be set runable with each call to wake_up().
150 * Additionally wake_up() holds a spin_lock assoicated with
151 * the wait queue to ensure we don't race waking up processes. */
152 if (atomic_read(&cvp->cv_waiters) > 0)
153 wake_up(&cvp->cv_event);
154 }
155
156 static __inline__ void
157 cv_broadcast(kcondvar_t *cvp)
158 {
159 ASSERT(cvp);
160 ASSERT(cvp->cv_magic == CV_MAGIC);
161
162 /* Wake_up_all() will wake up all waiters even those which
163 * have the WQ_FLAG_EXCLUSIVE flag set. */
164 if (atomic_read(&cvp->cv_waiters) > 0)
165 wake_up_all(&cvp->cv_event);
166 }
167 #endif /* _SPL_CONDVAR_H */