]>
git.proxmox.com Git - mirror_spl-debian.git/blob - include/sys/mutex.h
8 #include <linux/module.h>
9 #include <linux/hardirq.h>
10 #include <sys/types.h>
12 /* See the "Big Theory Statement" in solaris mutex.c.
14 * Spin mutexes apparently aren't needed by zfs so we assert
17 * Our impementation of adaptive mutexes aren't really adaptive.
18 * They go to sleep every time.
21 #define MUTEX_DEFAULT 0
22 #define MUTEX_HELD(x) (mutex_owned(x))
24 #define KM_MAGIC 0x42424242
25 #define KM_POISON 0x84
30 struct task_struct
*km_owner
;
31 struct semaphore km_sem
;
36 static __inline__
void
37 mutex_init(kmutex_t
*mp
, char *name
, int type
, void *ibc
)
41 ASSERT(ibc
== NULL
); /* XXX - Spin mutexes not needed */
42 ASSERT(type
== MUTEX_DEFAULT
); /* XXX - Only default type supported */
44 mp
->km_magic
= KM_MAGIC
;
45 spin_lock_init(&mp
->km_lock
);
46 sema_init(&mp
->km_sem
, 1);
51 mp
->km_name
= kmalloc(strlen(name
) + 1, GFP_KERNEL
);
53 strcpy(mp
->km_name
, name
);
59 static __inline__
void
60 mutex_destroy(kmutex_t
*mp
)
64 ASSERT(mp
->km_magic
== KM_MAGIC
);
65 spin_lock(&mp
->km_lock
);
70 memset(mp
, KM_POISON
, sizeof(*mp
));
71 spin_unlock(&mp
->km_lock
);
75 static __inline__
void
76 mutex_enter(kmutex_t
*mp
)
80 ASSERT(mp
->km_magic
== KM_MAGIC
);
81 spin_lock(&mp
->km_lock
);
83 if (unlikely(in_atomic() && !current
->exit_state
)) {
84 spin_unlock(&mp
->km_lock
);
85 __CDEBUG_LIMIT(S_MUTEX
, D_ERROR
,
86 "May schedule while atomic: %s/0x%08x/%d\n",
87 current
->comm
, preempt_count(), current
->pid
);
91 spin_unlock(&mp
->km_lock
);
95 spin_lock(&mp
->km_lock
);
96 ASSERT(mp
->km_owner
== NULL
);
97 mp
->km_owner
= current
;
98 spin_unlock(&mp
->km_lock
);
102 /* Return 1 if we acquired the mutex, else zero. */
103 static __inline__
int
104 mutex_tryenter(kmutex_t
*mp
)
110 ASSERT(mp
->km_magic
== KM_MAGIC
);
111 spin_lock(&mp
->km_lock
);
113 if (unlikely(in_atomic() && !current
->exit_state
)) {
114 spin_unlock(&mp
->km_lock
);
115 __CDEBUG_LIMIT(S_MUTEX
, D_ERROR
,
116 "May schedule while atomic: %s/0x%08x/%d\n",
117 current
->comm
, preempt_count(), current
->pid
);
121 spin_unlock(&mp
->km_lock
);
122 rc
= down_trylock(&mp
->km_sem
); /* returns 0 if acquired */
124 spin_lock(&mp
->km_lock
);
125 ASSERT(mp
->km_owner
== NULL
);
126 mp
->km_owner
= current
;
127 spin_unlock(&mp
->km_lock
);
134 static __inline__
void
135 mutex_exit(kmutex_t
*mp
)
139 ASSERT(mp
->km_magic
== KM_MAGIC
);
140 spin_lock(&mp
->km_lock
);
142 ASSERT(mp
->km_owner
== current
);
144 spin_unlock(&mp
->km_lock
);
149 /* Return 1 if mutex is held by current process, else zero. */
150 static __inline__
int
151 mutex_owned(kmutex_t
*mp
)
157 ASSERT(mp
->km_magic
== KM_MAGIC
);
158 spin_lock(&mp
->km_lock
);
159 rc
= (mp
->km_owner
== current
);
160 spin_unlock(&mp
->km_lock
);
165 /* Return owner if mutex is owned, else NULL. */
166 static __inline__ kthread_t
*
167 mutex_owner(kmutex_t
*mp
)
173 ASSERT(mp
->km_magic
== KM_MAGIC
);
174 spin_lock(&mp
->km_lock
);
176 spin_unlock(&mp
->km_lock
);
185 #endif /* _SPL_MUTEX_H */