]>
Commit | Line | Data |
---|---|---|
09b414e8 | 1 | #ifndef _SPL_CONDVAR_H |
2 | #define _SPL_CONDVAR_H | |
f1ca4da6 | 3 | |
4 | #ifdef __cplusplus | |
5 | extern "C" { | |
6 | #endif | |
7 | ||
f1b59d26 | 8 | #include <linux/module.h> |
f1ca4da6 | 9 | #include <linux/wait.h> |
10 | ||
11 | /* The kcondvar_t struct is protected by mutex taken externally before | |
12 | * calling any of the wait/signal funs, and passed into the wait funs. | |
13 | */ | |
14 | #define CV_MAGIC 0x346545f4 | |
15 | #define CV_POISON 0x95 | |
16 | ||
17 | typedef struct { | |
18 | int cv_magic; | |
19 | char *cv_name; | |
20 | wait_queue_head_t cv_event; | |
21 | atomic_t cv_waiters; | |
22 | kmutex_t *cv_mutex; /* only for verification purposes */ | |
23 | } kcondvar_t; | |
24 | ||
25 | typedef enum { CV_DEFAULT=0, CV_DRIVER } kcv_type_t; | |
26 | ||
27 | static __inline__ void | |
28 | cv_init(kcondvar_t *cvp, char *name, kcv_type_t type, void *arg) | |
29 | { | |
30 | BUG_ON(cvp == NULL); | |
31 | BUG_ON(type != CV_DEFAULT); | |
32 | BUG_ON(arg != NULL); | |
33 | ||
34 | cvp->cv_magic = CV_MAGIC; | |
35 | init_waitqueue_head(&cvp->cv_event); | |
36 | atomic_set(&cvp->cv_waiters, 0); | |
37 | cvp->cv_mutex = NULL; | |
38 | cvp->cv_name = NULL; | |
39 | ||
40 | if (name) { | |
41 | cvp->cv_name = kmalloc(strlen(name) + 1, GFP_KERNEL); | |
42 | if (cvp->cv_name) | |
43 | strcpy(cvp->cv_name, name); | |
44 | } | |
45 | } | |
46 | ||
47 | static __inline__ void | |
48 | cv_destroy(kcondvar_t *cvp) | |
49 | { | |
50 | BUG_ON(cvp == NULL); | |
51 | BUG_ON(cvp->cv_magic != CV_MAGIC); | |
52 | BUG_ON(atomic_read(&cvp->cv_waiters) != 0); | |
53 | BUG_ON(waitqueue_active(&cvp->cv_event)); | |
54 | ||
55 | if (cvp->cv_name) | |
56 | kfree(cvp->cv_name); | |
57 | ||
58 | memset(cvp, CV_POISON, sizeof(*cvp)); | |
59 | } | |
60 | ||
61 | static __inline__ void | |
62 | cv_wait(kcondvar_t *cvp, kmutex_t *mtx) | |
63 | { | |
64 | DEFINE_WAIT(wait); | |
65 | int flag = 1; | |
66 | ||
67 | BUG_ON(cvp == NULL || mtx == NULL); | |
68 | BUG_ON(cvp->cv_magic != CV_MAGIC); | |
69 | BUG_ON(!mutex_owned(mtx)); | |
70 | ||
71 | if (cvp->cv_mutex == NULL) | |
72 | cvp->cv_mutex = mtx; | |
73 | ||
74 | /* Ensure the same mutex is used by all callers */ | |
75 | BUG_ON(cvp->cv_mutex != mtx); | |
76 | ||
77 | for (;;) { | |
78 | prepare_to_wait_exclusive(&cvp->cv_event, &wait, | |
79 | TASK_INTERRUPTIBLE); | |
80 | /* Must occur after we are added to the list but only once */ | |
81 | if (flag) { | |
82 | atomic_inc(&cvp->cv_waiters); | |
83 | flag = 0; | |
84 | } | |
85 | ||
86 | /* XXX - The correct thing to do here may be to wake up and | |
87 | * force the caller to handle the signal. Spurious wakeups | |
88 | * should already be safely handled by the caller. */ | |
89 | if (signal_pending(current)) | |
90 | flush_signals(current); | |
91 | ||
92 | /* Mutex should be dropped after prepare_to_wait() this | |
93 | * ensures we're linked in to the waiters list and avoids the | |
94 | * race where 'cvp->cv_waiters > 0' but the list is empty. */ | |
95 | mutex_exit(mtx); | |
96 | schedule(); | |
97 | mutex_enter(mtx); | |
98 | ||
99 | /* XXX - The correct thing to do here may be to wake up and | |
100 | * force the caller to handle the signal. Spurious wakeups | |
101 | * should already be safely handled by the caller. */ | |
102 | if (signal_pending(current)) | |
103 | continue; | |
104 | ||
105 | break; | |
106 | } | |
107 | ||
108 | atomic_dec(&cvp->cv_waiters); | |
109 | finish_wait(&cvp->cv_event, &wait); | |
110 | } | |
111 | ||
112 | /* 'expire_time' argument is an absolute wall clock time in jiffies. | |
113 | * Return value is time left (expire_time - now) or -1 if timeout occurred. | |
114 | */ | |
115 | static __inline__ clock_t | |
116 | cv_timedwait(kcondvar_t *cvp, kmutex_t *mtx, clock_t expire_time) | |
117 | { | |
118 | DEFINE_WAIT(wait); | |
119 | clock_t time_left; | |
120 | int flag = 1; | |
121 | ||
122 | BUG_ON(cvp == NULL || mtx == NULL); | |
123 | BUG_ON(cvp->cv_magic != CV_MAGIC); | |
124 | BUG_ON(!mutex_owned(mtx)); | |
125 | ||
126 | if (cvp->cv_mutex == NULL) | |
127 | cvp->cv_mutex = mtx; | |
128 | ||
129 | /* XXX - Does not handle jiffie wrap properly */ | |
130 | time_left = expire_time - jiffies; | |
131 | if (time_left <= 0) | |
132 | return -1; | |
133 | ||
134 | /* Ensure the same mutex is used by all callers */ | |
135 | BUG_ON(cvp->cv_mutex != mtx); | |
136 | ||
137 | for (;;) { | |
138 | prepare_to_wait_exclusive(&cvp->cv_event, &wait, | |
139 | TASK_INTERRUPTIBLE); | |
140 | if (flag) { | |
141 | atomic_inc(&cvp->cv_waiters); | |
142 | flag = 0; | |
143 | } | |
144 | ||
145 | /* XXX - The correct thing to do here may be to wake up and | |
146 | * force the caller to handle the signal. Spurious wakeups | |
147 | * should already be safely handled by the caller. */ | |
148 | if (signal_pending(current)) | |
149 | flush_signals(current); | |
150 | ||
151 | /* Mutex should be dropped after prepare_to_wait() this | |
152 | * ensures we're linked in to the waiters list and avoids the | |
153 | * race where 'cvp->cv_waiters > 0' but the list is empty. */ | |
154 | mutex_exit(mtx); | |
155 | time_left = schedule_timeout(time_left); | |
156 | mutex_enter(mtx); | |
157 | ||
158 | /* XXX - The correct thing to do here may be to wake up and | |
159 | * force the caller to handle the signal. Spurious wakeups | |
160 | * should already be safely handled by the caller. */ | |
161 | if (signal_pending(current)) { | |
162 | if (time_left > 0) | |
163 | continue; | |
164 | ||
165 | flush_signals(current); | |
166 | } | |
167 | ||
168 | break; | |
169 | } | |
170 | ||
171 | atomic_dec(&cvp->cv_waiters); | |
172 | finish_wait(&cvp->cv_event, &wait); | |
173 | ||
174 | return (time_left > 0 ? time_left : -1); | |
175 | } | |
176 | ||
177 | static __inline__ void | |
178 | cv_signal(kcondvar_t *cvp) | |
179 | { | |
180 | BUG_ON(cvp == NULL); | |
181 | BUG_ON(cvp->cv_magic != CV_MAGIC); | |
182 | ||
183 | /* All waiters are added with WQ_FLAG_EXCLUSIVE so only one | |
184 | * waiter will be set runable with each call to wake_up(). | |
185 | * Additionally wake_up() holds a spin_lock assoicated with | |
186 | * the wait queue to ensure we don't race waking up processes. */ | |
187 | if (atomic_read(&cvp->cv_waiters) > 0) | |
188 | wake_up(&cvp->cv_event); | |
189 | } | |
190 | ||
191 | static __inline__ void | |
192 | cv_broadcast(kcondvar_t *cvp) | |
193 | { | |
194 | BUG_ON(cvp == NULL); | |
195 | BUG_ON(cvp->cv_magic != CV_MAGIC); | |
196 | ||
197 | /* Wake_up_all() will wake up all waiters even those which | |
198 | * have the WQ_FLAG_EXCLUSIVE flag set. */ | |
199 | if (atomic_read(&cvp->cv_waiters) > 0) | |
200 | wake_up_all(&cvp->cv_event); | |
201 | } | |
09b414e8 | 202 | #endif /* _SPL_CONDVAR_H */ |