]>
git.proxmox.com Git - mirror_spl-debian.git/blob - include/sys/mutex.h
8 #include <linux/module.h>
9 #include <linux/hardirq.h>
10 #include <sys/types.h>
12 /* See the "Big Theory Statement" in solaris mutex.c.
14 * Spin mutexes apparently aren't needed by zfs so we assert
17 * Our impementation of adaptive mutexes aren't really adaptive.
18 * They go to sleep every time.
21 #define MUTEX_DEFAULT 0
22 #define MUTEX_HELD(x) (mutex_owned(x))
24 #define KM_MAGIC 0x42424242
25 #define KM_POISON 0x84
30 struct task_struct
*km_owner
;
31 struct semaphore km_sem
;
36 static __inline__
void
37 mutex_init(kmutex_t
*mp
, char *name
, int type
, void *ibc
)
40 BUG_ON(ibc
!= NULL
); /* XXX - Spin mutexes not needed? */
41 BUG_ON(type
!= MUTEX_DEFAULT
); /* XXX - Only default type supported? */
43 mp
->km_magic
= KM_MAGIC
;
44 spin_lock_init(&mp
->km_lock
);
45 sema_init(&mp
->km_sem
, 1);
50 mp
->km_name
= kmalloc(strlen(name
) + 1, GFP_KERNEL
);
52 strcpy(mp
->km_name
, name
);
57 static __inline__
void
58 mutex_destroy(kmutex_t
*mp
)
61 spin_lock(&mp
->km_lock
);
62 BUG_ON(mp
->km_magic
!= KM_MAGIC
);
67 memset(mp
, KM_POISON
, sizeof(*mp
));
68 spin_unlock(&mp
->km_lock
);
71 static __inline__
void
72 mutex_enter(kmutex_t
*mp
)
75 spin_lock(&mp
->km_lock
);
76 BUG_ON(mp
->km_magic
!= KM_MAGIC
);
78 if (unlikely(in_atomic() && !current
->exit_state
)) {
79 printk("May schedule while atomic: %s/0x%08x/%d\n",
80 current
->comm
, preempt_count(), current
->pid
);
81 spin_unlock(&mp
->km_lock
);
85 spin_unlock(&mp
->km_lock
);
89 spin_lock(&mp
->km_lock
);
90 BUG_ON(mp
->km_owner
!= NULL
);
91 mp
->km_owner
= current
;
92 spin_unlock(&mp
->km_lock
);
95 /* Return 1 if we acquired the mutex, else zero. */
97 mutex_tryenter(kmutex_t
*mp
)
102 spin_lock(&mp
->km_lock
);
103 BUG_ON(mp
->km_magic
!= KM_MAGIC
);
105 if (unlikely(in_atomic() && !current
->exit_state
)) {
106 printk("May schedule while atomic: %s/0x%08x/%d\n",
107 current
->comm
, preempt_count(), current
->pid
);
108 spin_unlock(&mp
->km_lock
);
112 spin_unlock(&mp
->km_lock
);
113 rc
= down_trylock(&mp
->km_sem
); /* returns 0 if acquired */
115 spin_lock(&mp
->km_lock
);
116 BUG_ON(mp
->km_owner
!= NULL
);
117 mp
->km_owner
= current
;
118 spin_unlock(&mp
->km_lock
);
124 static __inline__
void
125 mutex_exit(kmutex_t
*mp
)
128 spin_lock(&mp
->km_lock
);
129 BUG_ON(mp
->km_magic
!= KM_MAGIC
);
130 BUG_ON(mp
->km_owner
!= current
);
132 spin_unlock(&mp
->km_lock
);
136 /* Return 1 if mutex is held by current process, else zero. */
137 static __inline__
int
138 mutex_owned(kmutex_t
*mp
)
143 spin_lock(&mp
->km_lock
);
144 BUG_ON(mp
->km_magic
!= KM_MAGIC
);
145 rc
= (mp
->km_owner
== current
);
146 spin_unlock(&mp
->km_lock
);
151 /* Return owner if mutex is owned, else NULL. */
152 static __inline__ kthread_t
*
153 mutex_owner(kmutex_t
*mp
)
158 spin_lock(&mp
->km_lock
);
159 BUG_ON(mp
->km_magic
!= KM_MAGIC
);
161 spin_unlock(&mp
->km_lock
);
170 #endif /* _SPL_MUTEX_H */