]> git.proxmox.com Git - mirror_spl.git/blob - include/sys/mutex.h
Just cleanup up an error case to avoid overspamming the console.
[mirror_spl.git] / include / sys / mutex.h
1 #ifndef _SPL_MUTEX_H
2 #define _SPL_MUTEX_H
3
4 #ifdef __cplusplus
5 extern "C" {
6 #endif
7
8 #include <linux/module.h>
9 #include <linux/hardirq.h>
10 #include <sys/types.h>
11
12 /* See the "Big Theory Statement" in solaris mutex.c.
13 *
14 * Spin mutexes apparently aren't needed by zfs so we assert
15 * if ibc is non-zero.
16 *
17 * Our impementation of adaptive mutexes aren't really adaptive.
18 * They go to sleep every time.
19 */
20
21 #define MUTEX_DEFAULT 0
22 #define MUTEX_HELD(x) (mutex_owned(x))
23
24 #define KM_MAGIC 0x42424242
25 #define KM_POISON 0x84
26
27 typedef struct {
28 int km_magic;
29 char *km_name;
30 struct task_struct *km_owner;
31 struct semaphore km_sem;
32 } kmutex_t;
33
34 #undef mutex_init
35 static __inline__ void
36 mutex_init(kmutex_t *mp, char *name, int type, void *ibc)
37 {
38 BUG_ON(ibc != NULL); /* XXX - Spin mutexes not needed? */
39 BUG_ON(type != MUTEX_DEFAULT); /* XXX - Only default type supported? */
40
41 mp->km_magic = KM_MAGIC;
42 sema_init(&mp->km_sem, 1);
43 mp->km_owner = NULL;
44 mp->km_name = NULL;
45
46 if (name) {
47 mp->km_name = kmalloc(strlen(name) + 1, GFP_KERNEL);
48 if (mp->km_name)
49 strcpy(mp->km_name, name);
50 }
51 }
52
53 #undef mutex_destroy
54 static __inline__ void
55 mutex_destroy(kmutex_t *mp)
56 {
57 BUG_ON(mp->km_magic != KM_MAGIC);
58
59 if (mp->km_name)
60 kfree(mp->km_name);
61
62 memset(mp, KM_POISON, sizeof(*mp));
63 }
64
65 static __inline__ void
66 mutex_enter(kmutex_t *mp)
67 {
68 BUG_ON(mp->km_magic != KM_MAGIC);
69
70 if (unlikely(in_atomic() && !current->exit_state)) {
71 printk("May schedule while atomic: %s/0x%08x/%d\n",
72 current->comm, preempt_count(), current->pid);
73 BUG();
74 }
75
76 down(&mp->km_sem); /* Will check in_atomic() for us */
77 BUG_ON(mp->km_owner != NULL);
78 mp->km_owner = current;
79 }
80
81 /* Return 1 if we acquired the mutex, else zero.
82 */
83 static __inline__ int
84 mutex_tryenter(kmutex_t *mp)
85 {
86 int result;
87
88 BUG_ON(mp->km_magic != KM_MAGIC);
89
90 if (unlikely(in_atomic() && !current->exit_state)) {
91 printk("May schedule while atomic: %s/0x%08x/%d\n",
92 current->comm, preempt_count(), current->pid);
93 BUG();
94 }
95
96 result = down_trylock(&mp->km_sem); /* returns 0 if acquired */
97 if (result == 0) {
98 BUG_ON(mp->km_owner != NULL);
99 mp->km_owner = current;
100 return 1;
101 }
102 return 0;
103 }
104
105 static __inline__ void
106 mutex_exit(kmutex_t *mp)
107 {
108 BUG_ON(mp->km_magic != KM_MAGIC);
109 BUG_ON(mp->km_owner != current);
110 mp->km_owner = NULL;
111 up(&mp->km_sem);
112 }
113
114 /* Return 1 if mutex is held by current process, else zero.
115 */
116 static __inline__ int
117 mutex_owned(kmutex_t *mp)
118 {
119 BUG_ON(mp->km_magic != KM_MAGIC);
120 return (mp->km_owner == current);
121 }
122
123 /* Return owner if mutex is owned, else NULL.
124 */
125 static __inline__ kthread_t *
126 mutex_owner(kmutex_t *mp)
127 {
128 BUG_ON(mp->km_magic != KM_MAGIC);
129 return mp->km_owner;
130 }
131
132 #ifdef __cplusplus
133 }
134 #endif
135
136 #endif /* _SPL_MUTEX_H */