]>
Commit | Line | Data |
---|---|---|
3d4ea0ce | 1 | #ifndef _LINUX_CONDVAR_H |
2 | #define _LINUX_CONDVAR_H | |
f1ca4da6 | 3 | |
4 | #ifdef __cplusplus | |
5 | extern "C" { | |
6 | #endif | |
7 | ||
8 | #include <linux/wait.h> | |
9 | ||
10 | /* The kcondvar_t struct is protected by mutex taken externally before | |
11 | * calling any of the wait/signal funs, and passed into the wait funs. | |
12 | */ | |
13 | #define CV_MAGIC 0x346545f4 | |
14 | #define CV_POISON 0x95 | |
15 | ||
16 | typedef struct { | |
17 | int cv_magic; | |
18 | char *cv_name; | |
19 | wait_queue_head_t cv_event; | |
20 | atomic_t cv_waiters; | |
21 | kmutex_t *cv_mutex; /* only for verification purposes */ | |
22 | } kcondvar_t; | |
23 | ||
24 | typedef enum { CV_DEFAULT=0, CV_DRIVER } kcv_type_t; | |
25 | ||
26 | static __inline__ void | |
27 | cv_init(kcondvar_t *cvp, char *name, kcv_type_t type, void *arg) | |
28 | { | |
29 | BUG_ON(cvp == NULL); | |
30 | BUG_ON(type != CV_DEFAULT); | |
31 | BUG_ON(arg != NULL); | |
32 | ||
33 | cvp->cv_magic = CV_MAGIC; | |
34 | init_waitqueue_head(&cvp->cv_event); | |
35 | atomic_set(&cvp->cv_waiters, 0); | |
36 | cvp->cv_mutex = NULL; | |
37 | cvp->cv_name = NULL; | |
38 | ||
39 | if (name) { | |
40 | cvp->cv_name = kmalloc(strlen(name) + 1, GFP_KERNEL); | |
41 | if (cvp->cv_name) | |
42 | strcpy(cvp->cv_name, name); | |
43 | } | |
44 | } | |
45 | ||
46 | static __inline__ void | |
47 | cv_destroy(kcondvar_t *cvp) | |
48 | { | |
49 | BUG_ON(cvp == NULL); | |
50 | BUG_ON(cvp->cv_magic != CV_MAGIC); | |
51 | BUG_ON(atomic_read(&cvp->cv_waiters) != 0); | |
52 | BUG_ON(waitqueue_active(&cvp->cv_event)); | |
53 | ||
54 | if (cvp->cv_name) | |
55 | kfree(cvp->cv_name); | |
56 | ||
57 | memset(cvp, CV_POISON, sizeof(*cvp)); | |
58 | } | |
59 | ||
60 | static __inline__ void | |
61 | cv_wait(kcondvar_t *cvp, kmutex_t *mtx) | |
62 | { | |
63 | DEFINE_WAIT(wait); | |
64 | int flag = 1; | |
65 | ||
66 | BUG_ON(cvp == NULL || mtx == NULL); | |
67 | BUG_ON(cvp->cv_magic != CV_MAGIC); | |
68 | BUG_ON(!mutex_owned(mtx)); | |
69 | ||
70 | if (cvp->cv_mutex == NULL) | |
71 | cvp->cv_mutex = mtx; | |
72 | ||
73 | /* Ensure the same mutex is used by all callers */ | |
74 | BUG_ON(cvp->cv_mutex != mtx); | |
75 | ||
76 | for (;;) { | |
77 | prepare_to_wait_exclusive(&cvp->cv_event, &wait, | |
78 | TASK_INTERRUPTIBLE); | |
79 | /* Must occur after we are added to the list but only once */ | |
80 | if (flag) { | |
81 | atomic_inc(&cvp->cv_waiters); | |
82 | flag = 0; | |
83 | } | |
84 | ||
85 | /* XXX - The correct thing to do here may be to wake up and | |
86 | * force the caller to handle the signal. Spurious wakeups | |
87 | * should already be safely handled by the caller. */ | |
88 | if (signal_pending(current)) | |
89 | flush_signals(current); | |
90 | ||
91 | /* Mutex should be dropped after prepare_to_wait() this | |
92 | * ensures we're linked in to the waiters list and avoids the | |
93 | * race where 'cvp->cv_waiters > 0' but the list is empty. */ | |
94 | mutex_exit(mtx); | |
95 | schedule(); | |
96 | mutex_enter(mtx); | |
97 | ||
98 | /* XXX - The correct thing to do here may be to wake up and | |
99 | * force the caller to handle the signal. Spurious wakeups | |
100 | * should already be safely handled by the caller. */ | |
101 | if (signal_pending(current)) | |
102 | continue; | |
103 | ||
104 | break; | |
105 | } | |
106 | ||
107 | atomic_dec(&cvp->cv_waiters); | |
108 | finish_wait(&cvp->cv_event, &wait); | |
109 | } | |
110 | ||
111 | /* 'expire_time' argument is an absolute wall clock time in jiffies. | |
112 | * Return value is time left (expire_time - now) or -1 if timeout occurred. | |
113 | */ | |
114 | static __inline__ clock_t | |
115 | cv_timedwait(kcondvar_t *cvp, kmutex_t *mtx, clock_t expire_time) | |
116 | { | |
117 | DEFINE_WAIT(wait); | |
118 | clock_t time_left; | |
119 | int flag = 1; | |
120 | ||
121 | BUG_ON(cvp == NULL || mtx == NULL); | |
122 | BUG_ON(cvp->cv_magic != CV_MAGIC); | |
123 | BUG_ON(!mutex_owned(mtx)); | |
124 | ||
125 | if (cvp->cv_mutex == NULL) | |
126 | cvp->cv_mutex = mtx; | |
127 | ||
128 | /* XXX - Does not handle jiffie wrap properly */ | |
129 | time_left = expire_time - jiffies; | |
130 | if (time_left <= 0) | |
131 | return -1; | |
132 | ||
133 | /* Ensure the same mutex is used by all callers */ | |
134 | BUG_ON(cvp->cv_mutex != mtx); | |
135 | ||
136 | for (;;) { | |
137 | prepare_to_wait_exclusive(&cvp->cv_event, &wait, | |
138 | TASK_INTERRUPTIBLE); | |
139 | if (flag) { | |
140 | atomic_inc(&cvp->cv_waiters); | |
141 | flag = 0; | |
142 | } | |
143 | ||
144 | /* XXX - The correct thing to do here may be to wake up and | |
145 | * force the caller to handle the signal. Spurious wakeups | |
146 | * should already be safely handled by the caller. */ | |
147 | if (signal_pending(current)) | |
148 | flush_signals(current); | |
149 | ||
150 | /* Mutex should be dropped after prepare_to_wait() this | |
151 | * ensures we're linked in to the waiters list and avoids the | |
152 | * race where 'cvp->cv_waiters > 0' but the list is empty. */ | |
153 | mutex_exit(mtx); | |
154 | time_left = schedule_timeout(time_left); | |
155 | mutex_enter(mtx); | |
156 | ||
157 | /* XXX - The correct thing to do here may be to wake up and | |
158 | * force the caller to handle the signal. Spurious wakeups | |
159 | * should already be safely handled by the caller. */ | |
160 | if (signal_pending(current)) { | |
161 | if (time_left > 0) | |
162 | continue; | |
163 | ||
164 | flush_signals(current); | |
165 | } | |
166 | ||
167 | break; | |
168 | } | |
169 | ||
170 | atomic_dec(&cvp->cv_waiters); | |
171 | finish_wait(&cvp->cv_event, &wait); | |
172 | ||
173 | return (time_left > 0 ? time_left : -1); | |
174 | } | |
175 | ||
176 | static __inline__ void | |
177 | cv_signal(kcondvar_t *cvp) | |
178 | { | |
179 | BUG_ON(cvp == NULL); | |
180 | BUG_ON(cvp->cv_magic != CV_MAGIC); | |
181 | ||
182 | /* All waiters are added with WQ_FLAG_EXCLUSIVE so only one | |
183 | * waiter will be set runable with each call to wake_up(). | |
184 | * Additionally wake_up() holds a spin_lock assoicated with | |
185 | * the wait queue to ensure we don't race waking up processes. */ | |
186 | if (atomic_read(&cvp->cv_waiters) > 0) | |
187 | wake_up(&cvp->cv_event); | |
188 | } | |
189 | ||
190 | static __inline__ void | |
191 | cv_broadcast(kcondvar_t *cvp) | |
192 | { | |
193 | BUG_ON(cvp == NULL); | |
194 | BUG_ON(cvp->cv_magic != CV_MAGIC); | |
195 | ||
196 | /* Wake_up_all() will wake up all waiters even those which | |
197 | * have the WQ_FLAG_EXCLUSIVE flag set. */ | |
198 | if (atomic_read(&cvp->cv_waiters) > 0) | |
199 | wake_up_all(&cvp->cv_event); | |
200 | } | |
3d4ea0ce | 201 | #endif /* _LINUX_CONDVAR_H */ |