]>
Commit | Line | Data |
---|---|---|
09b414e8 BB |
1 | #ifndef _SPL_MUTEX_H |
2 | #define _SPL_MUTEX_H | |
f1ca4da6 BB |
3 | |
4 | #ifdef __cplusplus | |
5 | extern "C" { | |
6 | #endif | |
7 | ||
f1b59d26 | 8 | #include <linux/module.h> |
596e65b4 | 9 | #include "spl-types.h" |
f1b59d26 | 10 | |
f1ca4da6 BB |
11 | /* See the "Big Theory Statement" in solaris mutex.c. |
12 | * | |
13 | * Spin mutexes apparently aren't needed by zfs so we assert | |
14 | * if ibc is non-zero. | |
15 | * | |
16 | * Our impementation of adaptive mutexes aren't really adaptive. | |
17 | * They go to sleep every time. | |
18 | */ | |
19 | ||
20 | #define MUTEX_DEFAULT 0 | |
21 | #define MUTEX_HELD(x) (mutex_owned(x)) | |
22 | ||
23 | #define KM_MAGIC 0x42424242 | |
24 | #define KM_POISON 0x84 | |
f1b59d26 | 25 | |
f1ca4da6 BB |
26 | typedef struct { |
27 | int km_magic; | |
28 | char *km_name; | |
29 | struct task_struct *km_owner; | |
30 | struct semaphore km_sem; | |
31 | } kmutex_t; | |
32 | ||
33 | #undef mutex_init | |
34 | static __inline__ void | |
35 | mutex_init(kmutex_t *mp, char *name, int type, void *ibc) | |
36 | { | |
37 | BUG_ON(ibc != NULL); /* XXX - Spin mutexes not needed? */ | |
38 | BUG_ON(type != MUTEX_DEFAULT); /* XXX - Only default type supported? */ | |
39 | ||
40 | mp->km_magic = KM_MAGIC; | |
41 | sema_init(&mp->km_sem, 1); | |
42 | mp->km_owner = NULL; | |
43 | mp->km_name = NULL; | |
44 | ||
45 | if (name) { | |
46 | mp->km_name = kmalloc(strlen(name) + 1, GFP_KERNEL); | |
47 | if (mp->km_name) | |
48 | strcpy(mp->km_name, name); | |
49 | } | |
50 | } | |
51 | ||
52 | #undef mutex_destroy | |
53 | static __inline__ void | |
54 | mutex_destroy(kmutex_t *mp) | |
55 | { | |
56 | BUG_ON(mp->km_magic != KM_MAGIC); | |
57 | ||
58 | if (mp->km_name) | |
59 | kfree(mp->km_name); | |
60 | ||
61 | memset(mp, KM_POISON, sizeof(*mp)); | |
62 | } | |
63 | ||
64 | static __inline__ void | |
65 | mutex_enter(kmutex_t *mp) | |
66 | { | |
67 | BUG_ON(mp->km_magic != KM_MAGIC); | |
68 | down(&mp->km_sem); /* Will check in_atomic() for us */ | |
69 | BUG_ON(mp->km_owner != NULL); | |
70 | mp->km_owner = current; | |
71 | } | |
72 | ||
73 | /* Return 1 if we acquired the mutex, else zero. | |
74 | */ | |
75 | static __inline__ int | |
76 | mutex_tryenter(kmutex_t *mp) | |
77 | { | |
78 | int result; | |
79 | ||
80 | BUG_ON(mp->km_magic != KM_MAGIC); | |
81 | result = down_trylock(&mp->km_sem); /* returns 0 if acquired */ | |
82 | if (result == 0) { | |
83 | BUG_ON(mp->km_owner != NULL); | |
84 | mp->km_owner = current; | |
85 | return 1; | |
86 | } | |
87 | return 0; | |
88 | } | |
89 | ||
90 | static __inline__ void | |
91 | mutex_exit(kmutex_t *mp) | |
92 | { | |
93 | BUG_ON(mp->km_magic != KM_MAGIC); | |
94 | BUG_ON(mp->km_owner != current); | |
95 | mp->km_owner = NULL; | |
96 | up(&mp->km_sem); | |
97 | } | |
98 | ||
99 | /* Return 1 if mutex is held by current process, else zero. | |
100 | */ | |
101 | static __inline__ int | |
102 | mutex_owned(kmutex_t *mp) | |
103 | { | |
104 | BUG_ON(mp->km_magic != KM_MAGIC); | |
105 | return (mp->km_owner == current); | |
106 | } | |
107 | ||
108 | /* Return owner if mutex is owned, else NULL. | |
109 | */ | |
110 | static __inline__ kthread_t * | |
111 | mutex_owner(kmutex_t *mp) | |
112 | { | |
113 | BUG_ON(mp->km_magic != KM_MAGIC); | |
114 | return mp->km_owner; | |
115 | } | |
116 | ||
117 | #ifdef __cplusplus | |
118 | } | |
119 | #endif | |
120 | ||
09b414e8 | 121 | #endif /* _SPL_MUTEX_H */ |