8 #include <linux/module.h>
9 #include <linux/wait.h>
11 /* The kcondvar_t struct is protected by mutex taken externally before
12 * calling any of the wait/signal funs, and passed into the wait funs.
14 #define CV_MAGIC 0x346545f4
15 #define CV_POISON 0x95
20 wait_queue_head_t cv_event
;
22 kmutex_t
*cv_mutex
; /* only for verification purposes */
26 typedef enum { CV_DEFAULT
=0, CV_DRIVER
} kcv_type_t
;
28 static __inline__
void
29 cv_init(kcondvar_t
*cvp
, char *name
, kcv_type_t type
, void *arg
)
32 ASSERT(type
== CV_DEFAULT
);
35 cvp
->cv_magic
= CV_MAGIC
;
36 init_waitqueue_head(&cvp
->cv_event
);
37 spin_lock_init(&cvp
->cv_lock
);
38 atomic_set(&cvp
->cv_waiters
, 0);
43 cvp
->cv_name
= kmalloc(strlen(name
) + 1, GFP_KERNEL
);
45 strcpy(cvp
->cv_name
, name
);
49 static __inline__
void
50 cv_destroy(kcondvar_t
*cvp
)
53 ASSERT(cvp
->cv_magic
== CV_MAGIC
);
54 spin_lock(&cvp
->cv_lock
);
55 ASSERT(atomic_read(&cvp
->cv_waiters
) == 0);
56 ASSERT(!waitqueue_active(&cvp
->cv_event
));
61 memset(cvp
, CV_POISON
, sizeof(*cvp
));
62 spin_unlock(&cvp
->cv_lock
);
65 static __inline__
void
66 cv_wait(kcondvar_t
*cvp
, kmutex_t
*mtx
)
72 ASSERT(cvp
->cv_magic
== CV_MAGIC
);
73 spin_lock(&cvp
->cv_lock
);
74 ASSERT(mutex_owned(mtx
));
76 if (cvp
->cv_mutex
== NULL
)
79 /* Ensure the same mutex is used by all callers */
80 ASSERT(cvp
->cv_mutex
== mtx
);
81 spin_unlock(&cvp
->cv_lock
);
83 prepare_to_wait_exclusive(&cvp
->cv_event
, &wait
,
84 TASK_UNINTERRUPTIBLE
);
85 atomic_inc(&cvp
->cv_waiters
);
87 /* Mutex should be dropped after prepare_to_wait() this
88 * ensures we're linked in to the waiters list and avoids the
89 * race where 'cvp->cv_waiters > 0' but the list is empty. */
94 atomic_dec(&cvp
->cv_waiters
);
95 finish_wait(&cvp
->cv_event
, &wait
);
98 /* 'expire_time' argument is an absolute wall clock time in jiffies.
99 * Return value is time left (expire_time - now) or -1 if timeout occurred.
101 static __inline__
clock_t
102 cv_timedwait(kcondvar_t
*cvp
, kmutex_t
*mtx
, clock_t expire_time
)
109 ASSERT(cvp
->cv_magic
== CV_MAGIC
);
110 spin_lock(&cvp
->cv_lock
);
111 ASSERT(mutex_owned(mtx
));
113 if (cvp
->cv_mutex
== NULL
)
116 /* Ensure the same mutex is used by all callers */
117 ASSERT(cvp
->cv_mutex
== mtx
);
118 spin_unlock(&cvp
->cv_lock
);
120 /* XXX - Does not handle jiffie wrap properly */
121 time_left
= expire_time
- jiffies
;
125 prepare_to_wait_exclusive(&cvp
->cv_event
, &wait
,
126 TASK_UNINTERRUPTIBLE
);
127 atomic_inc(&cvp
->cv_waiters
);
129 /* Mutex should be dropped after prepare_to_wait() this
130 * ensures we're linked in to the waiters list and avoids the
131 * race where 'cvp->cv_waiters > 0' but the list is empty. */
133 time_left
= schedule_timeout(time_left
);
136 atomic_dec(&cvp
->cv_waiters
);
137 finish_wait(&cvp
->cv_event
, &wait
);
139 return (time_left
> 0 ? time_left
: -1);
142 static __inline__
void
143 cv_signal(kcondvar_t
*cvp
)
146 ASSERT(cvp
->cv_magic
== CV_MAGIC
);
148 /* All waiters are added with WQ_FLAG_EXCLUSIVE so only one
149 * waiter will be set runable with each call to wake_up().
150 * Additionally wake_up() holds a spin_lock assoicated with
151 * the wait queue to ensure we don't race waking up processes. */
152 if (atomic_read(&cvp
->cv_waiters
) > 0)
153 wake_up(&cvp
->cv_event
);
156 static __inline__
void
157 cv_broadcast(kcondvar_t
*cvp
)
160 ASSERT(cvp
->cv_magic
== CV_MAGIC
);
162 /* Wake_up_all() will wake up all waiters even those which
163 * have the WQ_FLAG_EXCLUSIVE flag set. */
164 if (atomic_read(&cvp
->cv_waiters
) > 0)
165 wake_up_all(&cvp
->cv_event
);
167 #endif /* _SPL_CONDVAR_H */