7 #define DEBUG_SUBSYSTEM S_MUTEX
9 /* Mutex implementation based on those found in Solaris. This means
10 * they the MUTEX_DEFAULT type is an adaptive mutex. When calling
11 * mutex_enter() your process will spin waiting for the lock if it's
12 * likely the lock will be free'd shortly. If it looks like the
13 * lock will be held for a longer time we schedule and sleep waiting
14 * for it. This determination is made by checking if the holder of
15 * the lock is currently running on cpu or sleeping waiting to be
16 * scheduled. If the holder is currently running it's likely the
17 * lock will be shortly dropped.
19 * XXX: This is basically a rough implementation to see if this
20 * helps our performance. If it does a more careful implementation
21 * should be done, perhaps in assembly.
24 /* 0: Never spin when trying to aquire lock
25 * -1: Spin until aquired or holder yeilds without dropping lock
26 * 1-MAX_INT: Spin for N attempts before sleeping for lock
28 int mutex_spin_max
= 0;
31 int mutex_stats
[MUTEX_STATS_SIZE
] = { 0 };
32 spinlock_t mutex_stats_lock
;
33 struct list_head mutex_stats_list
;
37 __spl_mutex_init(kmutex_t
*mp
, char *name
, int type
, void *ibc
)
44 ASSERT(mp
->km_magic
!= KM_MAGIC
); /* Never double init */
46 mp
->km_magic
= KM_MAGIC
;
49 mp
->km_name_size
= strlen(name
) + 1;
53 mp
->km_type
= MUTEX_ADAPTIVE
;
63 /* We may be called when there is a non-zero preempt_count or
64 * interrupts are disabled is which case we must not sleep.
66 if (current_thread_info()->preempt_count
|| irqs_disabled())
69 /* Semaphore kmem_alloc'ed to keep struct size down (<64b) */
70 mp
->km_sem
= kmem_alloc(sizeof(struct semaphore
), flags
);
71 if (mp
->km_sem
== NULL
)
74 mp
->km_name
= kmem_alloc(mp
->km_name_size
, flags
);
75 if (mp
->km_name
== NULL
) {
76 kmem_free(mp
->km_sem
, sizeof(struct semaphore
));
80 sema_init(mp
->km_sem
, 1);
81 strncpy(mp
->km_name
, name
, mp
->km_name_size
);
84 mp
->km_stats
= kmem_zalloc(sizeof(int) * MUTEX_STATS_SIZE
, flags
);
85 if (mp
->km_stats
== NULL
) {
86 kmem_free(mp
->km_name
, mp
->km_name_size
);
87 kmem_free(mp
->km_sem
, sizeof(struct semaphore
));
91 /* XXX - This appears to be a much more contended lock than I
92 * would have expected. To run with this debugging enabled and
93 * get reasonable performance we may need to be more clever and
94 * do something like hash the mutex ptr on to one of several
95 * lists to ease this single point of contention.
97 spin_lock(&mutex_stats_lock
);
98 list_add_tail(&mp
->km_list
, &mutex_stats_list
);
99 spin_unlock(&mutex_stats_lock
);
102 EXPORT_SYMBOL(__spl_mutex_init
);
105 __spl_mutex_destroy(kmutex_t
*mp
)
108 ASSERT(mp
->km_magic
== KM_MAGIC
);
111 spin_lock(&mutex_stats_lock
);
112 list_del_init(&mp
->km_list
);
113 spin_unlock(&mutex_stats_lock
);
115 kmem_free(mp
->km_stats
, sizeof(int) * MUTEX_STATS_SIZE
);
117 kmem_free(mp
->km_name
, mp
->km_name_size
);
118 kmem_free(mp
->km_sem
, sizeof(struct semaphore
));
120 memset(mp
, KM_POISON
, sizeof(*mp
));
122 EXPORT_SYMBOL(__spl_mutex_destroy
);
124 /* Return 1 if we acquired the mutex, else zero. */
126 __mutex_tryenter(kmutex_t
*mp
)
132 ASSERT(mp
->km_magic
== KM_MAGIC
);
133 MUTEX_STAT_INC(mutex_stats
, MUTEX_TRYENTER_TOTAL
);
134 MUTEX_STAT_INC(mp
->km_stats
, MUTEX_TRYENTER_TOTAL
);
136 rc
= down_trylock(mp
->km_sem
);
138 ASSERT(mp
->km_owner
== NULL
);
139 mp
->km_owner
= current
;
140 MUTEX_STAT_INC(mutex_stats
, MUTEX_TRYENTER_NOT_HELD
);
141 MUTEX_STAT_INC(mp
->km_stats
, MUTEX_TRYENTER_NOT_HELD
);
146 EXPORT_SYMBOL(__mutex_tryenter
);
149 mutex_enter_adaptive(kmutex_t
*mp
)
151 struct task_struct
*owner
;
154 /* Lock is not held so we expect to aquire the lock */
155 if ((owner
= mp
->km_owner
) == NULL
) {
157 MUTEX_STAT_INC(mutex_stats
, MUTEX_ENTER_NOT_HELD
);
158 MUTEX_STAT_INC(mp
->km_stats
, MUTEX_ENTER_NOT_HELD
);
160 /* The lock is held by a currently running task which
161 * we expect will drop the lock before leaving the
162 * head of the runqueue. So the ideal thing to do
163 * is spin until we aquire the lock and avoid a
164 * context switch. However it is also possible the
165 * task holding the lock yields the processor with
166 * out dropping lock. In which case, we know it's
167 * going to be a while so we stop spinning and go
168 * to sleep waiting for the lock to be available.
169 * This should strike the optimum balance between
170 * spinning and sleeping waiting for a lock.
172 while (task_curr(owner
) && (count
<= mutex_spin_max
)) {
173 if (down_trylock(mp
->km_sem
) == 0) {
174 MUTEX_STAT_INC(mutex_stats
, MUTEX_ENTER_SPIN
);
175 MUTEX_STAT_INC(mp
->km_stats
, MUTEX_ENTER_SPIN
);
181 /* The lock is held by a sleeping task so it's going to
182 * cost us minimally one context switch. We might as
183 * well sleep and yield the processor to other tasks.
186 MUTEX_STAT_INC(mutex_stats
, MUTEX_ENTER_SLEEP
);
187 MUTEX_STAT_INC(mp
->km_stats
, MUTEX_ENTER_SLEEP
);
190 MUTEX_STAT_INC(mutex_stats
, MUTEX_ENTER_TOTAL
);
191 MUTEX_STAT_INC(mp
->km_stats
, MUTEX_ENTER_TOTAL
);
195 __mutex_enter(kmutex_t
*mp
)
199 ASSERT(mp
->km_magic
== KM_MAGIC
);
201 switch (mp
->km_type
) {
203 while (down_trylock(mp
->km_sem
));
204 MUTEX_STAT_INC(mutex_stats
, MUTEX_ENTER_SPIN
);
205 MUTEX_STAT_INC(mp
->km_stats
, MUTEX_ENTER_SPIN
);
208 mutex_enter_adaptive(mp
);
212 ASSERT(mp
->km_owner
== NULL
);
213 mp
->km_owner
= current
;
217 EXPORT_SYMBOL(__mutex_enter
);
220 __mutex_exit(kmutex_t
*mp
)
224 ASSERT(mp
->km_magic
== KM_MAGIC
);
225 ASSERT(mp
->km_owner
== current
);
230 EXPORT_SYMBOL(__mutex_exit
);
232 /* Return 1 if mutex is held by current process, else zero. */
234 __mutex_owned(kmutex_t
*mp
)
238 ASSERT(mp
->km_magic
== KM_MAGIC
);
239 RETURN(mp
->km_owner
== current
);
241 EXPORT_SYMBOL(__mutex_owned
);
243 /* Return owner if mutex is owned, else NULL. */
245 __spl_mutex_owner(kmutex_t
*mp
)
249 ASSERT(mp
->km_magic
== KM_MAGIC
);
250 RETURN(mp
->km_owner
);
252 EXPORT_SYMBOL(__spl_mutex_owner
);
259 spin_lock_init(&mutex_stats_lock
);
260 INIT_LIST_HEAD(&mutex_stats_list
);
270 ASSERT(list_empty(&mutex_stats_list
));
275 module_param(mutex_spin_max
, int, 0644);
276 MODULE_PARM_DESC(mutex_spin_max
, "Spin a maximum of N times to aquire lock");