]> git.proxmox.com Git - mirror_spl-debian.git/blame - modules/spl/spl-mutex.c
Commit adaptive mutexes. This seems to have introduced some new
[mirror_spl-debian.git] / modules / spl / spl-mutex.c
CommitLineData
9ab1ac14 1#include <sys/mutex.h>
2
3#ifdef DEBUG_SUBSYSTEM
4#undef DEBUG_SUBSYSTEM
5#endif
6
7#define DEBUG_SUBSYSTEM S_MUTEX
8
9/* Mutex implementation based on those found in Solaris. This means
10 * they the MUTEX_DEFAULT type is an adaptive mutex. When calling
11 * mutex_enter() your process will spin waiting for the lock if it's
12 * likely the lock will be free'd shortly. If it looks like the
13 * lock will be held for a longer time we schedule and sleep waiting
14 * for it. This determination is made by checking if the holder of
15 * the lock is currently running on cpu or sleeping waiting to be
16 * scheduled. If the holder is currently running it's likely the
17 * lock will be shortly dropped.
18 *
19 * XXX: This is basically a rough implementation to see if this
20 * helps our performance. If it does a more careful implementation
21 * should be done, perhaps in assembly.
22 */
23
24/* 0: Never spin when trying to aquire lock
25 * -1: Spin until aquired or holder yeilds without dropping lock
26 * 1-MAX_INT: Spin for N attempts before sleeping for lock
27 */
28int mutex_spin_max = 100;
29
30#ifdef DEBUG_MUTEX
31int mutex_stats[MUTEX_STATS_SIZE] = { 0 };
32DEFINE_MUTEX(mutex_stats_lock);
33LIST_HEAD(mutex_stats_list);
34#endif
35
36void
37__spl_mutex_init(kmutex_t *mp, char *name, int type, void *ibc)
38{
39 ASSERT(mp);
40 ASSERT(name);
41 ASSERT(ibc == NULL);
42 ASSERT(mp->km_magic != KM_MAGIC); /* Never double init */
43
44 mp->km_magic = KM_MAGIC;
45 mp->km_owner = NULL;
46 mp->km_name = NULL;
47 mp->km_name_size = strlen(name) + 1;
48
49 switch (type) {
50 case MUTEX_DEFAULT:
51 mp->km_type = MUTEX_ADAPTIVE;
52 break;
53 case MUTEX_SPIN:
54 case MUTEX_ADAPTIVE:
55 mp->km_type = type;
56 break;
57 default:
58 SBUG();
59 }
60
61 /* Semaphore kmem_alloc'ed to keep struct size down (<64b) */
62 mp->km_sem = kmem_alloc(sizeof(struct semaphore), KM_SLEEP);
63 if (mp->km_sem == NULL)
64 return;
65
66 mp->km_name = kmem_alloc(mp->km_name_size, KM_SLEEP);
67 if (mp->km_name == NULL) {
68 kmem_free(mp->km_sem, sizeof(struct semaphore));
69 return;
70 }
71
72 sema_init(mp->km_sem, 1);
73 strcpy(mp->km_name, name);
74
75#ifdef DEBUG_MUTEX
76 mp->km_stats = kmem_zalloc(sizeof(int) * MUTEX_STATS_SIZE, KM_SLEEP);
77 if (mp->km_stats == NULL) {
78 kmem_free(mp->km_name, mp->km_name_size);
79 kmem_free(mp->km_sem, sizeof(struct semaphore));
80 return;
81 }
82
83 mutex_lock(&mutex_stats_lock);
84 list_add_tail(&mp->km_list, &mutex_stats_list);
85 mutex_unlock(&mutex_stats_lock);
86#endif
87}
88EXPORT_SYMBOL(__spl_mutex_init);
89
90void
91__spl_mutex_destroy(kmutex_t *mp)
92{
93 ASSERT(mp);
94 ASSERT(mp->km_magic == KM_MAGIC);
95
96#ifdef DEBUG_MUTEX
97 mutex_lock(&mutex_stats_lock);
98 list_del_init(&mp->km_list);
99 mutex_unlock(&mutex_stats_lock);
100
101 kmem_free(mp->km_stats, sizeof(int) * MUTEX_STATS_SIZE);
102#endif
103 kmem_free(mp->km_name, mp->km_name_size);
104 kmem_free(mp->km_sem, sizeof(struct semaphore));
105
106 memset(mp, KM_POISON, sizeof(*mp));
107}
108EXPORT_SYMBOL(__spl_mutex_destroy);
109
110/* Return 1 if we acquired the mutex, else zero. */
111int
112__mutex_tryenter(kmutex_t *mp)
113{
114 int rc;
115 ENTRY;
116
117 ASSERT(mp);
118 ASSERT(mp->km_magic == KM_MAGIC);
119 MUTEX_STAT_INC(mutex_stats, MUTEX_TRYENTER_TOTAL);
120 MUTEX_STAT_INC(mp->km_stats, MUTEX_TRYENTER_TOTAL);
121
122 rc = down_trylock(mp->km_sem);
123 if (rc == 0) {
124 ASSERT(mp->km_owner == NULL);
125 mp->km_owner = current;
126 MUTEX_STAT_INC(mutex_stats, MUTEX_TRYENTER_NOT_HELD);
127 MUTEX_STAT_INC(mp->km_stats, MUTEX_TRYENTER_NOT_HELD);
128 }
129
130 RETURN(!rc);
131}
132EXPORT_SYMBOL(__mutex_tryenter);
133
134static void
135mutex_enter_adaptive(kmutex_t *mp)
136{
137 struct task_struct *owner;
138 int count = 0;
139
140 /* Lock is not held so we expect to aquire the lock */
141 if ((owner = mp->km_owner) == NULL) {
142 down(mp->km_sem);
143 MUTEX_STAT_INC(mutex_stats, MUTEX_ENTER_NOT_HELD);
144 MUTEX_STAT_INC(mp->km_stats, MUTEX_ENTER_NOT_HELD);
145 } else {
146 /* The lock is held by a currently running task which
147 * we expect will drop the lock before leaving the
148 * head of the runqueue. So the ideal thing to do
149 * is spin until we aquire the lock and avoid a
150 * context switch. However it is also possible the
151 * task holding the lock yields the processor with
152 * out dropping lock. In which case, we know it's
153 * going to be a while so we stop spinning and go
154 * to sleep waiting for the lock to be available.
155 * This should strike the optimum balance between
156 * spinning and sleeping waiting for a lock.
157 */
158 while (task_curr(owner) && (count <= mutex_spin_max)) {
159 if (down_trylock(mp->km_sem) == 0) {
160 MUTEX_STAT_INC(mutex_stats, MUTEX_ENTER_SPIN);
161 MUTEX_STAT_INC(mp->km_stats, MUTEX_ENTER_SPIN);
162 GOTO(out, count);
163 }
164 count++;
165 }
166
167 /* The lock is held by a sleeping task so it's going to
168 * cost us minimally one context switch. We might as
169 * well sleep and yield the processor to other tasks.
170 */
171 down(mp->km_sem);
172 MUTEX_STAT_INC(mutex_stats, MUTEX_ENTER_SLEEP);
173 MUTEX_STAT_INC(mp->km_stats, MUTEX_ENTER_SLEEP);
174 }
175out:
176 MUTEX_STAT_INC(mutex_stats, MUTEX_ENTER_TOTAL);
177 MUTEX_STAT_INC(mp->km_stats, MUTEX_ENTER_TOTAL);
178}
179
180void
181__mutex_enter(kmutex_t *mp)
182{
183 ENTRY;
184 ASSERT(mp);
185 ASSERT(mp->km_magic == KM_MAGIC);
186
187 switch (mp->km_type) {
188 case MUTEX_SPIN:
189 while (down_trylock(mp->km_sem));
190 MUTEX_STAT_INC(mutex_stats, MUTEX_ENTER_SPIN);
191 MUTEX_STAT_INC(mp->km_stats, MUTEX_ENTER_SPIN);
192 break;
193 case MUTEX_ADAPTIVE:
194 mutex_enter_adaptive(mp);
195 break;
196 }
197
198 ASSERT(mp->km_owner == NULL);
199 mp->km_owner = current;
200
201 EXIT;
202}
203EXPORT_SYMBOL(__mutex_enter);
204
205void
206__mutex_exit(kmutex_t *mp)
207{
208 ENTRY;
209 ASSERT(mp);
210 ASSERT(mp->km_magic == KM_MAGIC);
211 ASSERT(mp->km_owner == current);
212 mp->km_owner = NULL;
213 up(mp->km_sem);
214 EXIT;
215}
216EXPORT_SYMBOL(__mutex_exit);
217
218/* Return 1 if mutex is held by current process, else zero. */
219int
220__mutex_owned(kmutex_t *mp)
221{
222 ENTRY;
223 ASSERT(mp);
224 ASSERT(mp->km_magic == KM_MAGIC);
225 RETURN(mp->km_owner == current);
226}
227EXPORT_SYMBOL(__mutex_owned);
228
229/* Return owner if mutex is owned, else NULL. */
230kthread_t *
231__spl_mutex_owner(kmutex_t *mp)
232{
233 ENTRY;
234 ASSERT(mp);
235 ASSERT(mp->km_magic == KM_MAGIC);
236 RETURN(mp->km_owner);
237}
238EXPORT_SYMBOL(__spl_mutex_owner);
239
240int
241spl_mutex_init(void)
242{
243 ENTRY;
244 RETURN(0);
245}
246
247void
248spl_mutex_fini(void)
249{
250 ENTRY;
251#ifdef DEBUG_MUTEX
252 ASSERT(list_empty(&mutex_stats_list));
253#endif
254 EXIT;
255}
256