X-Git-Url: https://git.proxmox.com/?a=blobdiff_plain;f=module%2Fspl%2Fspl-condvar.c;h=80c2ef09051fd79129ae20d14bf33ae46f8852ea;hb=2ded1c7eff76f9f53b179659f25f3868b6a354ec;hp=6b4512472c2b25b8db8dbd5656a9a9e8bfd8497e;hpb=b17edc10a9c66543bef54b08e4655832aefe8939;p=mirror_spl.git diff --git a/module/spl/spl-condvar.c b/module/spl/spl-condvar.c index 6b45124..80c2ef0 100644 --- a/module/spl/spl-condvar.c +++ b/module/spl/spl-condvar.c @@ -1,4 +1,4 @@ -/*****************************************************************************\ +/* * Copyright (C) 2007-2010 Lawrence Livermore National Security, LLC. * Copyright (C) 2007 The Regents of the University of California. * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). @@ -6,7 +6,7 @@ * UCRL-CODE-235197 * * This file is part of the SPL, Solaris Porting Layer. - * For details, see . + * For details, see . * * The SPL is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the @@ -20,180 +20,332 @@ * * You should have received a copy of the GNU General Public License along * with the SPL. If not, see . - ***************************************************************************** + * * Solaris Porting Layer (SPL) Credential Implementation. -\*****************************************************************************/ + */ #include -#include - -#ifdef SS_DEBUG_SUBSYS -#undef SS_DEBUG_SUBSYS -#endif - -#define SS_DEBUG_SUBSYS SS_CONDVAR +#include +#include void __cv_init(kcondvar_t *cvp, char *name, kcv_type_t type, void *arg) { - int flags = KM_SLEEP; - - SENTRY; ASSERT(cvp); - ASSERT(name); + ASSERT(name == NULL); ASSERT(type == CV_DEFAULT); ASSERT(arg == NULL); cvp->cv_magic = CV_MAGIC; init_waitqueue_head(&cvp->cv_event); - spin_lock_init(&cvp->cv_lock); + init_waitqueue_head(&cvp->cv_destroy); atomic_set(&cvp->cv_waiters, 0); + atomic_set(&cvp->cv_refs, 1); cvp->cv_mutex = NULL; - cvp->cv_name = NULL; - cvp->cv_name_size = strlen(name) + 1; - - /* We may be called when there is a non-zero preempt_count or - * interrupts are disabled is which case we must not sleep. - */ - if (current_thread_info()->preempt_count || irqs_disabled()) - flags = KM_NOSLEEP; +} +EXPORT_SYMBOL(__cv_init); - cvp->cv_name = kmem_alloc(cvp->cv_name_size, flags); - if (cvp->cv_name) - strcpy(cvp->cv_name, name); +static int +cv_destroy_wakeup(kcondvar_t *cvp) +{ + if (!atomic_read(&cvp->cv_waiters) && !atomic_read(&cvp->cv_refs)) { + ASSERT(cvp->cv_mutex == NULL); + ASSERT(!waitqueue_active(&cvp->cv_event)); + return (1); + } - SEXIT; + return (0); } -EXPORT_SYMBOL(__cv_init); void __cv_destroy(kcondvar_t *cvp) { - SENTRY; ASSERT(cvp); ASSERT(cvp->cv_magic == CV_MAGIC); - spin_lock(&cvp->cv_lock); - ASSERT(atomic_read(&cvp->cv_waiters) == 0); - ASSERT(!waitqueue_active(&cvp->cv_event)); - if (cvp->cv_name) - kmem_free(cvp->cv_name, cvp->cv_name_size); + cvp->cv_magic = CV_DESTROY; + atomic_dec(&cvp->cv_refs); - spin_unlock(&cvp->cv_lock); - memset(cvp, CV_POISON, sizeof(*cvp)); - SEXIT; + /* Block until all waiters are woken and references dropped. */ + while (cv_destroy_wakeup(cvp) == 0) + wait_event_timeout(cvp->cv_destroy, cv_destroy_wakeup(cvp), 1); + + ASSERT3P(cvp->cv_mutex, ==, NULL); + ASSERT3S(atomic_read(&cvp->cv_refs), ==, 0); + ASSERT3S(atomic_read(&cvp->cv_waiters), ==, 0); + ASSERT3S(waitqueue_active(&cvp->cv_event), ==, 0); } EXPORT_SYMBOL(__cv_destroy); static void -cv_wait_common(kcondvar_t *cvp, kmutex_t *mp, int state) +cv_wait_common(kcondvar_t *cvp, kmutex_t *mp, int state, int io) { DEFINE_WAIT(wait); - SENTRY; + kmutex_t *m; ASSERT(cvp); - ASSERT(mp); + ASSERT(mp); ASSERT(cvp->cv_magic == CV_MAGIC); - spin_lock(&cvp->cv_lock); ASSERT(mutex_owned(mp)); + atomic_inc(&cvp->cv_refs); - if (cvp->cv_mutex == NULL) - cvp->cv_mutex = mp; - + m = ACCESS_ONCE(cvp->cv_mutex); + if (!m) + m = xchg(&cvp->cv_mutex, mp); /* Ensure the same mutex is used by all callers */ - ASSERT(cvp->cv_mutex == mp); - spin_unlock(&cvp->cv_lock); + ASSERT(m == NULL || m == mp); prepare_to_wait_exclusive(&cvp->cv_event, &wait, state); atomic_inc(&cvp->cv_waiters); - /* Mutex should be dropped after prepare_to_wait() this + /* + * Mutex should be dropped after prepare_to_wait() this * ensures we're linked in to the waiters list and avoids the - * race where 'cvp->cv_waiters > 0' but the list is empty. */ + * race where 'cvp->cv_waiters > 0' but the list is empty. + */ mutex_exit(mp); - schedule(); - mutex_enter(mp); + if (io) + io_schedule(); + else + schedule(); + + /* No more waiters a different mutex could be used */ + if (atomic_dec_and_test(&cvp->cv_waiters)) { + /* + * This is set without any lock, so it's racy. But this is + * just for debug anyway, so make it best-effort + */ + cvp->cv_mutex = NULL; + wake_up(&cvp->cv_destroy); + } - atomic_dec(&cvp->cv_waiters); finish_wait(&cvp->cv_event, &wait); - SEXIT; + atomic_dec(&cvp->cv_refs); + + /* + * Hold mutex after we release the cvp, otherwise we could dead lock + * with a thread holding the mutex and call cv_destroy. + */ + mutex_enter(mp); } void __cv_wait(kcondvar_t *cvp, kmutex_t *mp) { - cv_wait_common(cvp, mp, TASK_UNINTERRUPTIBLE); + cv_wait_common(cvp, mp, TASK_UNINTERRUPTIBLE, 0); } EXPORT_SYMBOL(__cv_wait); void -__cv_wait_interruptible(kcondvar_t *cvp, kmutex_t *mp) +__cv_wait_sig(kcondvar_t *cvp, kmutex_t *mp) { - cv_wait_common(cvp, mp, TASK_INTERRUPTIBLE); + cv_wait_common(cvp, mp, TASK_INTERRUPTIBLE, 0); } -EXPORT_SYMBOL(__cv_wait_interruptible); +EXPORT_SYMBOL(__cv_wait_sig); -/* 'expire_time' argument is an absolute wall clock time in jiffies. +void +__cv_wait_io(kcondvar_t *cvp, kmutex_t *mp) +{ + cv_wait_common(cvp, mp, TASK_UNINTERRUPTIBLE, 1); +} +EXPORT_SYMBOL(__cv_wait_io); + +/* + * 'expire_time' argument is an absolute wall clock time in jiffies. * Return value is time left (expire_time - now) or -1 if timeout occurred. */ -clock_t -__cv_timedwait(kcondvar_t *cvp, kmutex_t *mp, clock_t expire_time) +static clock_t +__cv_timedwait_common(kcondvar_t *cvp, kmutex_t *mp, clock_t expire_time, + int state) { DEFINE_WAIT(wait); + kmutex_t *m; clock_t time_left; - SENTRY; ASSERT(cvp); - ASSERT(mp); + ASSERT(mp); ASSERT(cvp->cv_magic == CV_MAGIC); - spin_lock(&cvp->cv_lock); ASSERT(mutex_owned(mp)); - if (cvp->cv_mutex == NULL) - cvp->cv_mutex = mp; - - /* Ensure the same mutex is used by all callers */ - ASSERT(cvp->cv_mutex == mp); - spin_unlock(&cvp->cv_lock); - /* XXX - Does not handle jiffie wrap properly */ time_left = expire_time - jiffies; if (time_left <= 0) - SRETURN(-1); + return (-1); - prepare_to_wait_exclusive(&cvp->cv_event, &wait, - TASK_UNINTERRUPTIBLE); + atomic_inc(&cvp->cv_refs); + m = ACCESS_ONCE(cvp->cv_mutex); + if (!m) + m = xchg(&cvp->cv_mutex, mp); + /* Ensure the same mutex is used by all callers */ + ASSERT(m == NULL || m == mp); + + prepare_to_wait_exclusive(&cvp->cv_event, &wait, state); atomic_inc(&cvp->cv_waiters); - /* Mutex should be dropped after prepare_to_wait() this + /* + * Mutex should be dropped after prepare_to_wait() this * ensures we're linked in to the waiters list and avoids the - * race where 'cvp->cv_waiters > 0' but the list is empty. */ + * race where 'cvp->cv_waiters > 0' but the list is empty. + */ mutex_exit(mp); time_left = schedule_timeout(time_left); - mutex_enter(mp); - atomic_dec(&cvp->cv_waiters); + /* No more waiters a different mutex could be used */ + if (atomic_dec_and_test(&cvp->cv_waiters)) { + /* + * This is set without any lock, so it's racy. But this is + * just for debug anyway, so make it best-effort + */ + cvp->cv_mutex = NULL; + wake_up(&cvp->cv_destroy); + } + finish_wait(&cvp->cv_event, &wait); + atomic_dec(&cvp->cv_refs); + + /* + * Hold mutex after we release the cvp, otherwise we could dead lock + * with a thread holding the mutex and call cv_destroy. + */ + mutex_enter(mp); + return (time_left > 0 ? time_left : -1); +} - SRETURN(time_left > 0 ? time_left : -1); +clock_t +__cv_timedwait(kcondvar_t *cvp, kmutex_t *mp, clock_t exp_time) +{ + return (__cv_timedwait_common(cvp, mp, exp_time, TASK_UNINTERRUPTIBLE)); } EXPORT_SYMBOL(__cv_timedwait); +clock_t +__cv_timedwait_sig(kcondvar_t *cvp, kmutex_t *mp, clock_t exp_time) +{ + return (__cv_timedwait_common(cvp, mp, exp_time, TASK_INTERRUPTIBLE)); +} +EXPORT_SYMBOL(__cv_timedwait_sig); + +/* + * 'expire_time' argument is an absolute clock time in nanoseconds. + * Return value is time left (expire_time - now) or -1 if timeout occurred. + */ +static clock_t +__cv_timedwait_hires(kcondvar_t *cvp, kmutex_t *mp, hrtime_t expire_time, + int state) +{ + DEFINE_WAIT(wait); + kmutex_t *m; + hrtime_t time_left; + ktime_t ktime_left; + + ASSERT(cvp); + ASSERT(mp); + ASSERT(cvp->cv_magic == CV_MAGIC); + ASSERT(mutex_owned(mp)); + + time_left = expire_time - gethrtime(); + if (time_left <= 0) + return (-1); + + atomic_inc(&cvp->cv_refs); + m = ACCESS_ONCE(cvp->cv_mutex); + if (!m) + m = xchg(&cvp->cv_mutex, mp); + /* Ensure the same mutex is used by all callers */ + ASSERT(m == NULL || m == mp); + + prepare_to_wait_exclusive(&cvp->cv_event, &wait, state); + atomic_inc(&cvp->cv_waiters); + + /* + * Mutex should be dropped after prepare_to_wait() this + * ensures we're linked in to the waiters list and avoids the + * race where 'cvp->cv_waiters > 0' but the list is empty. + */ + mutex_exit(mp); + /* + * Allow a 100 us range to give kernel an opportunity to coalesce + * interrupts + */ + ktime_left = ktime_set(0, time_left); + schedule_hrtimeout_range(&ktime_left, 100 * NSEC_PER_USEC, + HRTIMER_MODE_REL); + + /* No more waiters a different mutex could be used */ + if (atomic_dec_and_test(&cvp->cv_waiters)) { + /* + * This is set without any lock, so it's racy. But this is + * just for debug anyway, so make it best-effort + */ + cvp->cv_mutex = NULL; + wake_up(&cvp->cv_destroy); + } + + finish_wait(&cvp->cv_event, &wait); + atomic_dec(&cvp->cv_refs); + + mutex_enter(mp); + time_left = expire_time - gethrtime(); + return (time_left > 0 ? NSEC_TO_TICK(time_left) : -1); +} + +/* + * Compatibility wrapper for the cv_timedwait_hires() Illumos interface. + */ +static clock_t +cv_timedwait_hires_common(kcondvar_t *cvp, kmutex_t *mp, hrtime_t tim, hrtime_t res, + int flag, int state) +{ + if (res > 1) { + /* + * Align expiration to the specified resolution. + */ + if (flag & CALLOUT_FLAG_ROUNDUP) + tim += res - 1; + tim = (tim / res) * res; + } + + if (!(flag & CALLOUT_FLAG_ABSOLUTE)) + tim += gethrtime(); + + return (__cv_timedwait_hires(cvp, mp, tim, state)); +} + +clock_t +cv_timedwait_hires(kcondvar_t *cvp, kmutex_t *mp, hrtime_t tim, hrtime_t res, + int flag) +{ + return (cv_timedwait_hires_common(cvp, mp, tim, res, flag, + TASK_UNINTERRUPTIBLE)); +} +EXPORT_SYMBOL(cv_timedwait_hires); + +clock_t +cv_timedwait_sig_hires(kcondvar_t *cvp, kmutex_t *mp, hrtime_t tim, hrtime_t res, + int flag) +{ + return (cv_timedwait_hires_common(cvp, mp, tim, res, flag, + TASK_INTERRUPTIBLE)); +} +EXPORT_SYMBOL(cv_timedwait_sig_hires); + void __cv_signal(kcondvar_t *cvp) { - SENTRY; ASSERT(cvp); ASSERT(cvp->cv_magic == CV_MAGIC); + atomic_inc(&cvp->cv_refs); - /* All waiters are added with WQ_FLAG_EXCLUSIVE so only one + /* + * All waiters are added with WQ_FLAG_EXCLUSIVE so only one * waiter will be set runable with each call to wake_up(). * Additionally wake_up() holds a spin_lock assoicated with - * the wait queue to ensure we don't race waking up processes. */ + * the wait queue to ensure we don't race waking up processes. + */ if (atomic_read(&cvp->cv_waiters) > 0) wake_up(&cvp->cv_event); - SEXIT; + atomic_dec(&cvp->cv_refs); } EXPORT_SYMBOL(__cv_signal); @@ -202,13 +354,15 @@ __cv_broadcast(kcondvar_t *cvp) { ASSERT(cvp); ASSERT(cvp->cv_magic == CV_MAGIC); - SENTRY; + atomic_inc(&cvp->cv_refs); - /* Wake_up_all() will wake up all waiters even those which - * have the WQ_FLAG_EXCLUSIVE flag set. */ + /* + * Wake_up_all() will wake up all waiters even those which + * have the WQ_FLAG_EXCLUSIVE flag set. + */ if (atomic_read(&cvp->cv_waiters) > 0) wake_up_all(&cvp->cv_event); - SEXIT; + atomic_dec(&cvp->cv_refs); } EXPORT_SYMBOL(__cv_broadcast);