]> git.proxmox.com Git - mirror_spl.git/blob - include/sys/mutex.h
- Add some spinlocks to cover all the private data in the mutex. I don't
[mirror_spl.git] / include / sys / mutex.h
1 #ifndef _SPL_MUTEX_H
2 #define _SPL_MUTEX_H
3
4 #ifdef __cplusplus
5 extern "C" {
6 #endif
7
8 #include <linux/module.h>
9 #include <linux/hardirq.h>
10 #include <sys/types.h>
11
12 /* See the "Big Theory Statement" in solaris mutex.c.
13 *
14 * Spin mutexes apparently aren't needed by zfs so we assert
15 * if ibc is non-zero.
16 *
17 * Our impementation of adaptive mutexes aren't really adaptive.
18 * They go to sleep every time.
19 */
20
21 #define MUTEX_DEFAULT 0
22 #define MUTEX_HELD(x) (mutex_owned(x))
23
24 #define KM_MAGIC 0x42424242
25 #define KM_POISON 0x84
26
27 typedef struct {
28 int km_magic;
29 char *km_name;
30 struct task_struct *km_owner;
31 struct semaphore km_sem;
32 spinlock_t km_lock;
33 } kmutex_t;
34
35 #undef mutex_init
36 static __inline__ void
37 mutex_init(kmutex_t *mp, char *name, int type, void *ibc)
38 {
39 BUG_ON(mp == NULL);
40 BUG_ON(ibc != NULL); /* XXX - Spin mutexes not needed? */
41 BUG_ON(type != MUTEX_DEFAULT); /* XXX - Only default type supported? */
42
43 mp->km_magic = KM_MAGIC;
44 spin_lock_init(&mp->km_lock);
45 sema_init(&mp->km_sem, 1);
46 mp->km_owner = NULL;
47 mp->km_name = NULL;
48
49 if (name) {
50 mp->km_name = kmalloc(strlen(name) + 1, GFP_KERNEL);
51 if (mp->km_name)
52 strcpy(mp->km_name, name);
53 }
54 }
55
56 #undef mutex_destroy
57 static __inline__ void
58 mutex_destroy(kmutex_t *mp)
59 {
60 BUG_ON(mp == NULL);
61 spin_lock(&mp->km_lock);
62 BUG_ON(mp->km_magic != KM_MAGIC);
63
64 if (mp->km_name)
65 kfree(mp->km_name);
66
67 memset(mp, KM_POISON, sizeof(*mp));
68 spin_unlock(&mp->km_lock);
69 }
70
71 static __inline__ void
72 mutex_enter(kmutex_t *mp)
73 {
74 BUG_ON(mp == NULL);
75 spin_lock(&mp->km_lock);
76 BUG_ON(mp->km_magic != KM_MAGIC);
77
78 if (unlikely(in_atomic() && !current->exit_state)) {
79 printk("May schedule while atomic: %s/0x%08x/%d\n",
80 current->comm, preempt_count(), current->pid);
81 spin_unlock(&mp->km_lock);
82 BUG();
83 }
84
85 spin_unlock(&mp->km_lock);
86
87 down(&mp->km_sem);
88
89 spin_lock(&mp->km_lock);
90 BUG_ON(mp->km_owner != NULL);
91 mp->km_owner = current;
92 spin_unlock(&mp->km_lock);
93 }
94
95 /* Return 1 if we acquired the mutex, else zero. */
96 static __inline__ int
97 mutex_tryenter(kmutex_t *mp)
98 {
99 int rc;
100
101 BUG_ON(mp == NULL);
102 spin_lock(&mp->km_lock);
103 BUG_ON(mp->km_magic != KM_MAGIC);
104
105 if (unlikely(in_atomic() && !current->exit_state)) {
106 printk("May schedule while atomic: %s/0x%08x/%d\n",
107 current->comm, preempt_count(), current->pid);
108 spin_unlock(&mp->km_lock);
109 BUG();
110 }
111
112 spin_unlock(&mp->km_lock);
113 rc = down_trylock(&mp->km_sem); /* returns 0 if acquired */
114 if (rc == 0) {
115 spin_lock(&mp->km_lock);
116 BUG_ON(mp->km_owner != NULL);
117 mp->km_owner = current;
118 spin_unlock(&mp->km_lock);
119 return 1;
120 }
121 return 0;
122 }
123
124 static __inline__ void
125 mutex_exit(kmutex_t *mp)
126 {
127 BUG_ON(mp == NULL);
128 spin_lock(&mp->km_lock);
129 BUG_ON(mp->km_magic != KM_MAGIC);
130 BUG_ON(mp->km_owner != current);
131 mp->km_owner = NULL;
132 spin_unlock(&mp->km_lock);
133 up(&mp->km_sem);
134 }
135
136 /* Return 1 if mutex is held by current process, else zero. */
137 static __inline__ int
138 mutex_owned(kmutex_t *mp)
139 {
140 int rc;
141
142 BUG_ON(mp == NULL);
143 spin_lock(&mp->km_lock);
144 BUG_ON(mp->km_magic != KM_MAGIC);
145 rc = (mp->km_owner == current);
146 spin_unlock(&mp->km_lock);
147
148 return rc;
149 }
150
151 /* Return owner if mutex is owned, else NULL. */
152 static __inline__ kthread_t *
153 mutex_owner(kmutex_t *mp)
154 {
155 kthread_t *thr;
156
157 BUG_ON(mp == NULL);
158 spin_lock(&mp->km_lock);
159 BUG_ON(mp->km_magic != KM_MAGIC);
160 thr = mp->km_owner;
161 spin_unlock(&mp->km_lock);
162
163 return thr;
164 }
165
166 #ifdef __cplusplus
167 }
168 #endif
169
170 #endif /* _SPL_MUTEX_H */