]> git.proxmox.com Git - mirror_spl.git/blame - include/sys/mutex.h
New an improved taskq implementation for the SPL. It allows a
[mirror_spl.git] / include / sys / mutex.h
CommitLineData
09b414e8 1#ifndef _SPL_MUTEX_H
2#define _SPL_MUTEX_H
f1ca4da6 3
4#ifdef __cplusplus
5extern "C" {
6#endif
7
f1b59d26 8#include <linux/module.h>
115aed0d 9#include <linux/hardirq.h>
f4b37741 10#include <sys/types.h>
f1b59d26 11
f1ca4da6 12/* See the "Big Theory Statement" in solaris mutex.c.
13 *
14 * Spin mutexes apparently aren't needed by zfs so we assert
15 * if ibc is non-zero.
16 *
17 * Our impementation of adaptive mutexes aren't really adaptive.
18 * They go to sleep every time.
19 */
20
21#define MUTEX_DEFAULT 0
22#define MUTEX_HELD(x) (mutex_owned(x))
23
24#define KM_MAGIC 0x42424242
25#define KM_POISON 0x84
f1b59d26 26
f1ca4da6 27typedef struct {
28 int km_magic;
29 char *km_name;
30 struct task_struct *km_owner;
31 struct semaphore km_sem;
d61e12af 32 spinlock_t km_lock;
f1ca4da6 33} kmutex_t;
34
35#undef mutex_init
36static __inline__ void
37mutex_init(kmutex_t *mp, char *name, int type, void *ibc)
38{
bcd68186 39 ENTRY;
937879f1 40 ASSERT(mp);
41 ASSERT(ibc == NULL); /* XXX - Spin mutexes not needed */
42 ASSERT(type == MUTEX_DEFAULT); /* XXX - Only default type supported */
f1ca4da6 43
44 mp->km_magic = KM_MAGIC;
d61e12af 45 spin_lock_init(&mp->km_lock);
f1ca4da6 46 sema_init(&mp->km_sem, 1);
47 mp->km_owner = NULL;
48 mp->km_name = NULL;
49
50 if (name) {
51 mp->km_name = kmalloc(strlen(name) + 1, GFP_KERNEL);
52 if (mp->km_name)
53 strcpy(mp->km_name, name);
54 }
bcd68186 55 EXIT;
f1ca4da6 56}
57
58#undef mutex_destroy
59static __inline__ void
60mutex_destroy(kmutex_t *mp)
61{
bcd68186 62 ENTRY;
937879f1 63 ASSERT(mp);
64 ASSERT(mp->km_magic == KM_MAGIC);
d61e12af 65 spin_lock(&mp->km_lock);
f1ca4da6 66
67 if (mp->km_name)
68 kfree(mp->km_name);
69
70 memset(mp, KM_POISON, sizeof(*mp));
d61e12af 71 spin_unlock(&mp->km_lock);
bcd68186 72 EXIT;
f1ca4da6 73}
74
75static __inline__ void
76mutex_enter(kmutex_t *mp)
77{
bcd68186 78 ENTRY;
937879f1 79 ASSERT(mp);
80 ASSERT(mp->km_magic == KM_MAGIC);
d61e12af 81 spin_lock(&mp->km_lock);
115aed0d 82
83 if (unlikely(in_atomic() && !current->exit_state)) {
d61e12af 84 spin_unlock(&mp->km_lock);
3561541c 85 __CDEBUG_LIMIT(S_MUTEX, D_ERROR,
86 "May schedule while atomic: %s/0x%08x/%d\n",
87 current->comm, preempt_count(), current->pid);
88 SBUG();
115aed0d 89 }
90
d61e12af 91 spin_unlock(&mp->km_lock);
92
93 down(&mp->km_sem);
94
95 spin_lock(&mp->km_lock);
937879f1 96 ASSERT(mp->km_owner == NULL);
f1ca4da6 97 mp->km_owner = current;
d61e12af 98 spin_unlock(&mp->km_lock);
bcd68186 99 EXIT;
f1ca4da6 100}
101
d61e12af 102/* Return 1 if we acquired the mutex, else zero. */
f1ca4da6 103static __inline__ int
104mutex_tryenter(kmutex_t *mp)
105{
d61e12af 106 int rc;
bcd68186 107 ENTRY;
f1ca4da6 108
937879f1 109 ASSERT(mp);
110 ASSERT(mp->km_magic == KM_MAGIC);
d61e12af 111 spin_lock(&mp->km_lock);
115aed0d 112
113 if (unlikely(in_atomic() && !current->exit_state)) {
d61e12af 114 spin_unlock(&mp->km_lock);
3561541c 115 __CDEBUG_LIMIT(S_MUTEX, D_ERROR,
116 "May schedule while atomic: %s/0x%08x/%d\n",
117 current->comm, preempt_count(), current->pid);
118 SBUG();
115aed0d 119 }
120
d61e12af 121 spin_unlock(&mp->km_lock);
122 rc = down_trylock(&mp->km_sem); /* returns 0 if acquired */
123 if (rc == 0) {
124 spin_lock(&mp->km_lock);
937879f1 125 ASSERT(mp->km_owner == NULL);
f1ca4da6 126 mp->km_owner = current;
d61e12af 127 spin_unlock(&mp->km_lock);
bcd68186 128 RETURN(1);
f1ca4da6 129 }
bcd68186 130
131 RETURN(0);
f1ca4da6 132}
133
134static __inline__ void
135mutex_exit(kmutex_t *mp)
136{
bcd68186 137 ENTRY;
937879f1 138 ASSERT(mp);
139 ASSERT(mp->km_magic == KM_MAGIC);
d61e12af 140 spin_lock(&mp->km_lock);
937879f1 141
142 ASSERT(mp->km_owner == current);
f1ca4da6 143 mp->km_owner = NULL;
d61e12af 144 spin_unlock(&mp->km_lock);
f1ca4da6 145 up(&mp->km_sem);
bcd68186 146 EXIT;
f1ca4da6 147}
148
d61e12af 149/* Return 1 if mutex is held by current process, else zero. */
f1ca4da6 150static __inline__ int
151mutex_owned(kmutex_t *mp)
152{
d61e12af 153 int rc;
bcd68186 154 ENTRY;
d61e12af 155
937879f1 156 ASSERT(mp);
157 ASSERT(mp->km_magic == KM_MAGIC);
d61e12af 158 spin_lock(&mp->km_lock);
d61e12af 159 rc = (mp->km_owner == current);
160 spin_unlock(&mp->km_lock);
161
bcd68186 162 RETURN(rc);
f1ca4da6 163}
164
d61e12af 165/* Return owner if mutex is owned, else NULL. */
f1ca4da6 166static __inline__ kthread_t *
167mutex_owner(kmutex_t *mp)
168{
d61e12af 169 kthread_t *thr;
bcd68186 170 ENTRY;
d61e12af 171
937879f1 172 ASSERT(mp);
173 ASSERT(mp->km_magic == KM_MAGIC);
d61e12af 174 spin_lock(&mp->km_lock);
d61e12af 175 thr = mp->km_owner;
176 spin_unlock(&mp->km_lock);
177
bcd68186 178 RETURN(thr);
f1ca4da6 179}
180
181#ifdef __cplusplus
182}
183#endif
184
09b414e8 185#endif /* _SPL_MUTEX_H */