]> git.proxmox.com Git - mirror_spl.git/blob - include/sys/mutex.h
ca76d6ea9b942f522ccc8aaf14739b266c6f4238
[mirror_spl.git] / include / sys / mutex.h
1 #ifndef _SPL_MUTEX_H
2 #define _SPL_MUTEX_H
3
4 #ifdef __cplusplus
5 extern "C" {
6 #endif
7
8 #include <linux/module.h>
9 #include <linux/hardirq.h>
10 #include <sys/types.h>
11
12 /* See the "Big Theory Statement" in solaris mutex.c.
13 *
14 * Spin mutexes apparently aren't needed by zfs so we assert
15 * if ibc is non-zero.
16 *
17 * Our impementation of adaptive mutexes aren't really adaptive.
18 * They go to sleep every time.
19 */
20
21 #define MUTEX_DEFAULT 0
22 #define MUTEX_HELD(x) (mutex_owned(x))
23
24 #define KM_MAGIC 0x42424242
25 #define KM_POISON 0x84
26
27 typedef struct {
28 int km_magic;
29 char *km_name;
30 struct task_struct *km_owner;
31 struct semaphore km_sem;
32 } kmutex_t;
33
34 #undef mutex_init
35 static __inline__ void
36 mutex_init(kmutex_t *mp, char *name, int type, void *ibc)
37 {
38 BUG_ON(ibc != NULL); /* XXX - Spin mutexes not needed? */
39 BUG_ON(type != MUTEX_DEFAULT); /* XXX - Only default type supported? */
40
41 mp->km_magic = KM_MAGIC;
42 sema_init(&mp->km_sem, 1);
43 mp->km_owner = NULL;
44 mp->km_name = NULL;
45
46 if (name) {
47 mp->km_name = kmalloc(strlen(name) + 1, GFP_KERNEL);
48 if (mp->km_name)
49 strcpy(mp->km_name, name);
50 }
51 }
52
53 #undef mutex_destroy
54 static __inline__ void
55 mutex_destroy(kmutex_t *mp)
56 {
57 BUG_ON(mp->km_magic != KM_MAGIC);
58
59 if (mp->km_name)
60 kfree(mp->km_name);
61
62 memset(mp, KM_POISON, sizeof(*mp));
63 }
64
65 static __inline__ void
66 mutex_enter(kmutex_t *mp)
67 {
68 BUG_ON(mp->km_magic != KM_MAGIC);
69
70 if (unlikely(in_atomic() && !current->exit_state)) {
71 dump_stack();
72 printk("Scheduling while atomic: %s/0x%08x/%d\n",
73 current->comm, preempt_count(), current->pid);
74 BUG();
75 }
76
77 down(&mp->km_sem); /* Will check in_atomic() for us */
78 BUG_ON(mp->km_owner != NULL);
79 mp->km_owner = current;
80 }
81
82 /* Return 1 if we acquired the mutex, else zero.
83 */
84 static __inline__ int
85 mutex_tryenter(kmutex_t *mp)
86 {
87 int result;
88
89 BUG_ON(mp->km_magic != KM_MAGIC);
90
91 if (unlikely(in_atomic() && !current->exit_state)) {
92 dump_stack();
93 printk("Scheduling while atomic: %s/0x%08x/%d\n",
94 current->comm, preempt_count(), current->pid);
95 BUG();
96 }
97
98 result = down_trylock(&mp->km_sem); /* returns 0 if acquired */
99 if (result == 0) {
100 BUG_ON(mp->km_owner != NULL);
101 mp->km_owner = current;
102 return 1;
103 }
104 return 0;
105 }
106
107 static __inline__ void
108 mutex_exit(kmutex_t *mp)
109 {
110 BUG_ON(mp->km_magic != KM_MAGIC);
111 BUG_ON(mp->km_owner != current);
112 mp->km_owner = NULL;
113 up(&mp->km_sem);
114 }
115
116 /* Return 1 if mutex is held by current process, else zero.
117 */
118 static __inline__ int
119 mutex_owned(kmutex_t *mp)
120 {
121 BUG_ON(mp->km_magic != KM_MAGIC);
122 return (mp->km_owner == current);
123 }
124
125 /* Return owner if mutex is owned, else NULL.
126 */
127 static __inline__ kthread_t *
128 mutex_owner(kmutex_t *mp)
129 {
130 BUG_ON(mp->km_magic != KM_MAGIC);
131 return mp->km_owner;
132 }
133
134 #ifdef __cplusplus
135 }
136 #endif
137
138 #endif /* _SPL_MUTEX_H */