#define DEBUG_CALLB
#ifndef DEBUG_CALLB
-#define CALLB_CPR_ASSERT(cp) BUG_ON(!(MUTEX_HELD((cp)->cc_lockp)));
+#define CALLB_CPR_ASSERT(cp) ASSERT(MUTEX_HELD((cp)->cc_lockp));
#else
#define CALLB_CPR_ASSERT(cp)
#endif
static __inline__ void
cv_init(kcondvar_t *cvp, char *name, kcv_type_t type, void *arg)
{
- BUG_ON(cvp == NULL);
- BUG_ON(type != CV_DEFAULT);
- BUG_ON(arg != NULL);
+ ASSERT(cvp);
+ ASSERT(type == CV_DEFAULT);
+ ASSERT(arg == NULL);
cvp->cv_magic = CV_MAGIC;
init_waitqueue_head(&cvp->cv_event);
static __inline__ void
cv_destroy(kcondvar_t *cvp)
{
- BUG_ON(cvp == NULL);
+ ASSERT(cvp);
+ ASSERT(cvp->cv_magic == CV_MAGIC);
spin_lock(&cvp->cv_lock);
- BUG_ON(cvp->cv_magic != CV_MAGIC);
- BUG_ON(atomic_read(&cvp->cv_waiters) != 0);
- BUG_ON(waitqueue_active(&cvp->cv_event));
+ ASSERT(atomic_read(&cvp->cv_waiters) == 0);
+ ASSERT(!waitqueue_active(&cvp->cv_event));
if (cvp->cv_name)
kfree(cvp->cv_name);
{
DEFINE_WAIT(wait);
- BUG_ON(cvp == NULL || mtx == NULL);
+ ASSERT(cvp);
+ ASSERT(mtx);
+ ASSERT(cvp->cv_magic == CV_MAGIC);
spin_lock(&cvp->cv_lock);
- BUG_ON(cvp->cv_magic != CV_MAGIC);
- BUG_ON(!mutex_owned(mtx));
+ ASSERT(mutex_owned(mtx));
if (cvp->cv_mutex == NULL)
cvp->cv_mutex = mtx;
/* Ensure the same mutex is used by all callers */
- BUG_ON(cvp->cv_mutex != mtx);
+ ASSERT(cvp->cv_mutex == mtx);
spin_unlock(&cvp->cv_lock);
prepare_to_wait_exclusive(&cvp->cv_event, &wait,
DEFINE_WAIT(wait);
clock_t time_left;
- BUG_ON(cvp == NULL || mtx == NULL);
+ ASSERT(cvp);
+ ASSERT(mtx);
+ ASSERT(cvp->cv_magic == CV_MAGIC);
spin_lock(&cvp->cv_lock);
- BUG_ON(cvp->cv_magic != CV_MAGIC);
- BUG_ON(!mutex_owned(mtx));
+ ASSERT(mutex_owned(mtx));
if (cvp->cv_mutex == NULL)
cvp->cv_mutex = mtx;
/* Ensure the same mutex is used by all callers */
- BUG_ON(cvp->cv_mutex != mtx);
+ ASSERT(cvp->cv_mutex == mtx);
spin_unlock(&cvp->cv_lock);
/* XXX - Does not handle jiffie wrap properly */
static __inline__ void
cv_signal(kcondvar_t *cvp)
{
- BUG_ON(cvp == NULL);
- BUG_ON(cvp->cv_magic != CV_MAGIC);
+ ASSERT(cvp);
+ ASSERT(cvp->cv_magic == CV_MAGIC);
/* All waiters are added with WQ_FLAG_EXCLUSIVE so only one
* waiter will be set runable with each call to wake_up().
static __inline__ void
cv_broadcast(kcondvar_t *cvp)
{
- BUG_ON(cvp == NULL);
- BUG_ON(cvp->cv_magic != CV_MAGIC);
+ ASSERT(cvp);
+ ASSERT(cvp->cv_magic == CV_MAGIC);
/* Wake_up_all() will wake up all waiters even those which
* have the WQ_FLAG_EXCLUSIVE flag set. */
#define S_DEBUG 0x00001000
#define S_GENERIC 0x00002000
#define S_PROC 0x00004000
+#define S_MODULE 0x00008000
#define D_TRACE 0x00000001
#define D_INFO 0x00000002
#define SBUG() spl_debug_bug(__FILE__, __FUNCTION__, __LINE__);
+#ifdef __ia64__
+#define CDEBUG_STACK() (THREAD_SIZE - \
+ ((unsigned long)__builtin_dwarf_cfa() & \
+ (THREAD_SIZE - 1)))
+#else
+#define CDEBUG_STACK() (THREAD_SIZE - \
+ ((unsigned long)__builtin_frame_address(0) & \
+ (THREAD_SIZE - 1)))
+# endif /* __ia64__ */
+
+#define __CHECK_STACK(file, func, line) \
+do { \
+ unsigned long _stack = CDEBUG_STACK(); \
+ unsigned long _soft_limit = (9 * THREAD_SIZE) / 10; \
+ \
+ if (unlikely(_stack > _soft_limit && _stack > spl_debug_stack)){\
+ spl_debug_stack = _stack; \
+ if (_stack <= THREAD_SIZE) { \
+ spl_debug_msg(NULL, D_TRACE, D_WARNING, \
+ file, func, line, "Warning " \
+ "exceeded 90%% of maximum safe " \
+ "stack size (%lu/%lu)\n", \
+ _stack, THREAD_SIZE); \
+ } else { \
+ spl_debug_msg(NULL, D_TRACE, D_WARNING, \
+ file, func, line, "Error " \
+ "exceeded maximum safe stack " \
+ "size (%lu/%lu)\n", \
+ _stack, THREAD_SIZE); \
+ SBUG(); \
+ } \
+ } \
+} while (0)
+
+#define CHECK_STACK()__CHECK_STACK(__FILE__, __func__, __LINE__)
+
/* ASSERTION that is safe to use within the debug system */
#define __ASSERT(cond) \
do { \
+ CHECK_STACK(); \
+ \
if (unlikely(!(cond))) { \
printk(KERN_ERR "ASSERTION("#cond") failed"); \
SBUG(); \
/* ASSERTION that will debug log used outside the debug sysytem */
#define ASSERT(cond) \
do { \
+ CHECK_STACK(); \
+ \
if (unlikely(!(cond))) { \
spl_debug_msg(NULL, DEBUG_SUBSYSTEM, D_EMERG, \
__FILE__, __FUNCTION__, __LINE__, \
#define ASSERTF(cond, fmt, a...) \
do { \
+ CHECK_STACK(); \
+ \
if (unlikely(!(cond))) { \
spl_debug_msg(NULL, DEBUG_SUBSYSTEM, D_EMERG, \
__FILE__, __FUNCTION__, __LINE__, \
do { \
const TYPE __left = (TYPE)(LEFT); \
const TYPE __right = (TYPE)(RIGHT); \
+ \
+ CHECK_STACK(); \
+ \
if (!(__left OP __right)) { \
spl_debug_msg(NULL, DEBUG_SUBSYSTEM, D_EMERG, \
__FILE__, __FUNCTION__, __LINE__, \
spl_debug_vmsg(cdls, subsys, mask, file, fn, \
line, NULL, NULL, format, ##a)
-#ifdef __ia64__
-#define CDEBUG_STACK() (THREAD_SIZE - \
- ((unsigned long)__builtin_dwarf_cfa() & \
- (THREAD_SIZE - 1)))
-#else
-#define CDEBUG_STACK() (THREAD_SIZE - \
- ((unsigned long)__builtin_frame_address(0) & \
- (THREAD_SIZE - 1)))
-# endif /* __ia64__ */
-
-#define __CHECK_STACK(file, func, line) \
-do { \
- unsigned long _stack = CDEBUG_STACK(); \
- \
- if (_stack > (3*THREAD_SIZE/4) && _stack > spl_debug_stack) { \
- spl_debug_stack = _stack; \
- spl_debug_msg(NULL, D_TRACE, D_WARNING, \
- file, func, line, \
- "Exceeded maximum safe stack " \
- "%lu/%lu\n", _stack, THREAD_SIZE); \
- __ASSERT(0); \
- } \
-} while (0)
-
-#define CHECK_STACK()__CHECK_STACK(__FILE__, __func__, __LINE__)
-
#define __CDEBUG(cdls, subsys, mask, format, a...) \
do { \
CHECK_STACK(); \
#include <linux/vmalloc.h>
#include <linux/mm.h>
#include <linux/spinlock.h>
+#include <sys/debug.h>
/*
* Memory allocation interfaces
*/
#define kmem_free(ptr, size) \
({ \
- BUG_ON(!(ptr) || (size) < 0); \
+ ASSERT((ptr) || (size > 0)); \
atomic64_sub((size), &kmem_alloc_used); \
memset(ptr, 0x5a, (size)); /* Poison */ \
kfree(ptr); \
#define __vmem_alloc(size, flags) \
({ void *_ptr_; \
\
- BUG_ON(!(flags & KM_SLEEP)); \
+ ASSERT(flags & KM_SLEEP); \
\
_ptr_ = (void *)__vmalloc((size), ((flags) | \
__GFP_HIGHMEM), PAGE_KERNEL); \
#define vmem_free(ptr, size) \
({ \
- BUG_ON(!(ptr) || (size) < 0); \
+ ASSERT((ptr) || (size > 0)); \
atomic64_sub((size), &vmem_alloc_used); \
memset(ptr, 0x5a, (size)); /* Poison */ \
vfree(ptr); \
#define kmem_zalloc(size, flags) kzalloc((size), (flags))
#define kmem_free(ptr, size) \
({ \
- BUG_ON(!(ptr) || (size) < 0); \
+ ASSERT((ptr) || (size > 0)); \
kfree(ptr); \
})
PAGE_KERNEL)
#define vmem_free(ptr, size) \
({ \
- BUG_ON(!(ptr) || (size) < 0); \
+ ASSERT((ptr) || (size > 0)); \
vfree(ptr); \
})
static __inline__ void
mutex_init(kmutex_t *mp, char *name, int type, void *ibc)
{
- BUG_ON(mp == NULL);
- BUG_ON(ibc != NULL); /* XXX - Spin mutexes not needed? */
- BUG_ON(type != MUTEX_DEFAULT); /* XXX - Only default type supported? */
+ ASSERT(mp);
+ ASSERT(ibc == NULL); /* XXX - Spin mutexes not needed */
+ ASSERT(type == MUTEX_DEFAULT); /* XXX - Only default type supported */
mp->km_magic = KM_MAGIC;
spin_lock_init(&mp->km_lock);
static __inline__ void
mutex_destroy(kmutex_t *mp)
{
- BUG_ON(mp == NULL);
+ ASSERT(mp);
+ ASSERT(mp->km_magic == KM_MAGIC);
spin_lock(&mp->km_lock);
- BUG_ON(mp->km_magic != KM_MAGIC);
if (mp->km_name)
kfree(mp->km_name);
static __inline__ void
mutex_enter(kmutex_t *mp)
{
- BUG_ON(mp == NULL);
+ ASSERT(mp);
+ ASSERT(mp->km_magic == KM_MAGIC);
spin_lock(&mp->km_lock);
- BUG_ON(mp->km_magic != KM_MAGIC);
if (unlikely(in_atomic() && !current->exit_state)) {
printk("May schedule while atomic: %s/0x%08x/%d\n",
down(&mp->km_sem);
spin_lock(&mp->km_lock);
- BUG_ON(mp->km_owner != NULL);
+ ASSERT(mp->km_owner == NULL);
mp->km_owner = current;
spin_unlock(&mp->km_lock);
}
{
int rc;
- BUG_ON(mp == NULL);
+ ASSERT(mp);
+ ASSERT(mp->km_magic == KM_MAGIC);
spin_lock(&mp->km_lock);
- BUG_ON(mp->km_magic != KM_MAGIC);
if (unlikely(in_atomic() && !current->exit_state)) {
printk("May schedule while atomic: %s/0x%08x/%d\n",
rc = down_trylock(&mp->km_sem); /* returns 0 if acquired */
if (rc == 0) {
spin_lock(&mp->km_lock);
- BUG_ON(mp->km_owner != NULL);
+ ASSERT(mp->km_owner == NULL);
mp->km_owner = current;
spin_unlock(&mp->km_lock);
return 1;
static __inline__ void
mutex_exit(kmutex_t *mp)
{
- BUG_ON(mp == NULL);
+ ASSERT(mp);
+ ASSERT(mp->km_magic == KM_MAGIC);
spin_lock(&mp->km_lock);
- BUG_ON(mp->km_magic != KM_MAGIC);
- BUG_ON(mp->km_owner != current);
+
+ ASSERT(mp->km_owner == current);
mp->km_owner = NULL;
spin_unlock(&mp->km_lock);
up(&mp->km_sem);
{
int rc;
- BUG_ON(mp == NULL);
+ ASSERT(mp);
+ ASSERT(mp->km_magic == KM_MAGIC);
spin_lock(&mp->km_lock);
- BUG_ON(mp->km_magic != KM_MAGIC);
rc = (mp->km_owner == current);
spin_unlock(&mp->km_lock);
{
kthread_t *thr;
- BUG_ON(mp == NULL);
+ ASSERT(mp);
+ ASSERT(mp->km_magic == KM_MAGIC);
spin_lock(&mp->km_lock);
- BUG_ON(mp->km_magic != KM_MAGIC);
thr = mp->km_owner;
spin_unlock(&mp->km_lock);
static __inline__ int
random_get_bytes(uint8_t *ptr, size_t len)
{
- BUG_ON(len < 0);
+ ASSERT(len >= 0);
get_random_bytes((void *)ptr,(int)len);
return 0;
}
static __inline__ int
random_get_pseudo_bytes(uint8_t *ptr, size_t len)
{
- BUG_ON(len < 0);
+ ASSERT(len >= 0);
get_random_bytes((void *)ptr,(int)len);
return 0;
}
static __inline__ void
rw_init(krwlock_t *rwlp, char *name, krw_type_t type, void *arg)
{
- BUG_ON(type != RW_DEFAULT); /* XXX no irq handler use */
- BUG_ON(arg != NULL); /* XXX no irq handler use */
+ ASSERT(type == RW_DEFAULT); /* XXX no irq handler use */
+ ASSERT(arg == NULL); /* XXX no irq handler use */
+
rwlp->rw_magic = RW_MAGIC;
rwlp->rw_owner = NULL; /* no one holds the write lock yet */
init_rwsem(&rwlp->rw_sem);
static __inline__ void
rw_destroy(krwlock_t *rwlp)
{
- BUG_ON(rwlp == NULL);
- BUG_ON(rwlp->rw_magic != RW_MAGIC);
- BUG_ON(rwlp->rw_owner != NULL);
+ ASSERT(rwlp);
+ ASSERT(rwlp->rw_magic == RW_MAGIC);
+ ASSERT(rwlp->rw_owner == NULL);
spin_lock(&rwlp->rw_sem.wait_lock);
- BUG_ON(!list_empty(&rwlp->rw_sem.wait_list));
+ ASSERT(list_empty(&rwlp->rw_sem.wait_list));
spin_unlock(&rwlp->rw_sem.wait_lock);
if (rwlp->rw_name)
{
int result;
- BUG_ON(rwlp->rw_magic != RW_MAGIC);
+ ASSERT(rwlp);
+ ASSERT(rwlp->rw_magic == RW_MAGIC);
+
switch (rw) {
/* these functions return 1 if success, 0 if contention */
case RW_READER:
if (result) {
/* there better not be anyone else
* holding the write lock here */
- BUG_ON(rwlp->rw_owner != NULL);
+ ASSERT(rwlp->rw_owner == NULL);
rwlp->rw_owner = current;
}
break;
default:
- BUG_ON(1);
+ SBUG();
}
return result;
static __inline__ void
rw_enter(krwlock_t *rwlp, krw_t rw)
{
- BUG_ON(rwlp->rw_magic != RW_MAGIC);
+ ASSERT(rwlp);
+ ASSERT(rwlp->rw_magic == RW_MAGIC);
+
switch (rw) {
case RW_READER:
/* Here the Solaris code would block
/* there better not be anyone else
* holding the write lock here */
- BUG_ON(rwlp->rw_owner != NULL);
+ ASSERT(rwlp->rw_owner == NULL);
rwlp->rw_owner = current;
break;
default:
- BUG_ON(1);
+ SBUG();
}
}
static __inline__ void
rw_exit(krwlock_t *rwlp)
{
- BUG_ON(rwlp->rw_magic != RW_MAGIC);
+ ASSERT(rwlp);
+ ASSERT(rwlp->rw_magic == RW_MAGIC);
/* rw_owner is held by current
* thread iff it is a writer */
static __inline__ void
rw_downgrade(krwlock_t *rwlp)
{
- BUG_ON(rwlp->rw_magic != RW_MAGIC);
- BUG_ON(rwlp->rw_owner != current);
+ ASSERT(rwlp);
+ ASSERT(rwlp->rw_magic == RW_MAGIC);
+ ASSERT(rwlp->rw_owner == current);
+
rwlp->rw_owner = NULL;
downgrade_write(&rwlp->rw_sem);
}
rw_tryupgrade(krwlock_t *rwlp)
{
int result = 0;
- BUG_ON(rwlp->rw_magic != RW_MAGIC);
+
+ ASSERT(rwlp);
+ ASSERT(rwlp->rw_magic == RW_MAGIC);
spin_lock(&rwlp->rw_sem.wait_lock);
/* Check if upgrade failed. Should not ever happen
* if we got to this point */
- BUG_ON(!result);
- BUG_ON(rwlp->rw_owner != NULL);
+ ASSERT(result);
+ ASSERT(rwlp->rw_owner == NULL);
rwlp->rw_owner = current;
spin_unlock(&rwlp->rw_sem.wait_lock);
return 1;
static __inline__ kthread_t *
rw_owner(krwlock_t *rwlp)
{
- BUG_ON(rwlp->rw_magic != RW_MAGIC);
+ ASSERT(rwlp);
+ ASSERT(rwlp->rw_magic == RW_MAGIC);
+
return rwlp->rw_owner;
}
static __inline__ int
issig(int why)
{
- BUG_ON(!(why == FORREAL || why == JUSTLOOKING));
+ ASSERT(why == FORREAL || why == JUSTLOOKING);
return signal_pending(current);
}
#define DTRACE_PROBE4(a, b, c, d, e, f, g, h, i) ((void)0)
/* Missing globals */
-extern unsigned long spl_hostid;
-extern char spl_hw_serial[11];
+extern long spl_hostid;
+extern char hw_serial[11];
extern int p0;
/* Missing misc functions */
#define hz \
({ \
- BUG_ON(HZ < 100 || HZ > MICROSEC); \
+ ASSERT(HZ >= 100 && HZ <= MICROSEC); \
HZ; \
})
#include <sys/atomic.h>
+#ifdef DEBUG_SUBSYSTEM
+#undef DEBUG_SUBSYSTEM
+#endif
+
+#define DEBUG_SUBSYSTEM S_ATOMIC
+
/* Global atomic lock declarations */
spinlock_t atomic64_lock = SPIN_LOCK_UNLOCKED;
spinlock_t atomic32_lock = SPIN_LOCK_UNLOCKED;
void spl_debug_bug(char *file, const char *func, const int line)
{
spl_debug_catastrophe = 1;
- spl_debug_msg(NULL, 0, D_EMERG, file, func, line, "SPL BUG\n");
+ spl_debug_msg(NULL, 0, D_EMERG, file, func, line, "SBUG\n");
if (in_interrupt()) {
- panic("SPL BUG in interrupt.\n");
+ panic("SBUG in interrupt.\n");
/* not reached */
}
spl_debug_dumplog();
if (spl_debug_panic_on_bug)
- panic("SPL BUG");
+ panic("SBUG");
set_task_state(current, TASK_UNINTERRUPTIBLE);
while (1)
#include <sys/cmn_err.h>
#include "config.h"
+#ifdef DEBUG_SUBSYSTEM
+#undef DEBUG_SUBSYSTEM
+#endif
+
+#define DEBUG_SUBSYSTEM S_GENERIC
+
static char ce_prefix[CE_IGNORE][10] = { "", "NOTICE: ", "WARNING: ", "" };
static char ce_suffix[CE_IGNORE][2] = { "", "\n", "\n", "" };
vsnprintf(msg, MAXMSGLEN - 1, fmt, ap);
va_end(ap);
- printk("%s", msg);
+ CERROR("%s", msg);
} /* cmn_err() */
EXPORT_SYMBOL(cmn_err);
if (ce != CE_NOTE) { /* suppress noise in stress testing */
vsnprintf(msg, MAXMSGLEN - 1, fmt, ap);
- printk("%s%s%s", ce_prefix[ce], msg, ce_suffix[ce]);
+ CERROR("%s%s%s", ce_prefix[ce], msg, ce_suffix[ce]);
}
} /* vcmn_err() */
EXPORT_SYMBOL(vcmn_err);
#define DEBUG_SUBSYSTEM S_GENERIC
-unsigned long spl_hostid = 0;
+long spl_hostid = 0;
EXPORT_SYMBOL(spl_hostid);
-char spl_hw_serial[11] = "<none>";
-EXPORT_SYMBOL(spl_hw_serial);
+char hw_serial[11] = "<none>";
+EXPORT_SYMBOL(hw_serial);
int p0 = 0;
EXPORT_SYMBOL(p0);
NULL };
/* Doing address resolution in the kernel is tricky and just
- * not a good idea in general. So to set the proper 'spl_hw_serial'
+ * not a good idea in general. So to set the proper 'hw_serial'
* use the usermodehelper support to ask '/bin/sh' to run
* '/usr/bin/hostid' and redirect the result to /proc/sys/spl/hostid
* for us to use. It's a horific solution but it will do for now.
if ((rc = set_hostid()))
GOTO(out4, rc = -EADDRNOTAVAIL);
- CWARN("Loaded Solaris Porting Layer v%s\n", VERSION);
+ printk("SPL: Loaded Solaris Porting Layer v%s\n", VERSION);
RETURN(rc);
out4:
proc_fini();
{
ENTRY;
- CWARN("Unloaded Solaris Porting Layer v%s\n", VERSION);
+ printk("SPL: Unloaded Solaris Porting Layer v%s\n", VERSION);
proc_fini();
vn_fini();
kmem_fini();
#include <sys/kmem.h>
+#ifdef DEBUG_SUBSYSTEM
+#undef DEBUG_SUBSYSTEM
+#endif
+
+#define DEBUG_SUBSYSTEM S_KMEM
+
/*
* Memory allocation interfaces
*/
/* Callback list must be in sync with linux slab caches */
kcc = kmem_cache_find_cache_cb(cache);
- BUG_ON(!kcc);
+ ASSERT(kcc);
+
constructor = kcc->kcc_constructor;
private = kcc->kcc_private;
/* Callback list must be in sync with linux slab caches */
kcc = kmem_cache_find_cache_cb(cache);
- BUG_ON(!kcc);
+ ASSERT(kcc);
+
destructor = kcc->kcc_destructor;
private = kcc->kcc_private;
kmem_cache_cb_t *kcc;
int shrinker_flag = 0;
char *cache_name;
+ ENTRY;
- /* FIXME: - Option currently unsupported by shim layer */
- BUG_ON(vmp);
+ /* XXX: - Option currently unsupported by shim layer */
+ ASSERT(!vmp);
cache_name = kzalloc(strlen(name) + 1, GFP_KERNEL);
if (cache_name == NULL)
- return NULL;
+ RETURN(NULL);
strcpy(cache_name, name);
cache = kmem_cache_create(cache_name, size, align, flags,
kmem_cache_generic_constructor,
kmem_cache_generic_destructor);
if (cache == NULL)
- return NULL;
+ RETURN(NULL);
/* Register shared shrinker function on initial cache create */
spin_lock(&kmem_cache_cb_lock);
if (kmem_cache_shrinker == NULL) {
kmem_cache_destroy(cache);
spin_unlock(&kmem_cache_cb_lock);
- return NULL;
+ RETURN(NULL);
}
}
remove_shrinker(kmem_cache_shrinker);
kmem_cache_destroy(cache);
- return NULL;
+ RETURN(NULL);
}
- return cache;
+ RETURN(cache);
}
EXPORT_SYMBOL(__kmem_cache_create);
char *name;
unsigned long flags;
int rc;
+ ENTRY;
spin_lock_irqsave(&kmem_cache_cb_lock, flags);
kcc = kmem_cache_find_cache_cb(cache);
spin_unlock_irqrestore(&kmem_cache_cb_lock, flags);
if (kcc == NULL)
- return -EINVAL;
+ RETURN(-EINVAL);
name = (char *)kmem_cache_name(cache);
rc = kmem_cache_destroy(cache);
remove_shrinker(kmem_cache_shrinker);
spin_unlock_irqrestore(&kmem_cache_cb_lock, flags);
- return rc;
+ RETURN(rc);
}
EXPORT_SYMBOL(__kmem_cache_destroy);
void
-__kmem_reap(void) {
+__kmem_reap(void)
+{
+ ENTRY;
/* Since there's no easy hook in to linux to force all the registered
* shrinkers to run we just run the ones registered for this shim */
kmem_cache_generic_shrinker(KMC_REAP_CHUNK, GFP_KERNEL);
+ EXIT;
}
EXPORT_SYMBOL(__kmem_reap);
int
kmem_init(void)
{
+ ENTRY;
#ifdef DEBUG_KMEM
atomic64_set(&kmem_alloc_used, 0);
atomic64_set(&vmem_alloc_used, 0);
#endif
- return 0;
+ RETURN(0);
}
void
kmem_fini(void)
{
+ ENTRY;
#ifdef DEBUG_KMEM
if (atomic64_read(&kmem_alloc_used) != 0)
- printk("spl: Warning kmem leaked %ld/%ld bytes\n",
+ CWARN("kmem leaked %ld/%ld bytes\n",
atomic_read(&kmem_alloc_used), kmem_alloc_max);
if (atomic64_read(&vmem_alloc_used) != 0)
- printk("spl: Warning vmem leaked %ld/%ld bytes\n",
+ CWARN("vmem leaked %ld/%ld bytes\n",
atomic_read(&vmem_alloc_used), vmem_alloc_max);
#endif
+ EXIT;
}
#include <sys/kobj.h>
#include "config.h"
+#ifdef DEBUG_SUBSYSTEM
+#undef DEBUG_SUBSYSTEM
+#endif
+
+#define DEBUG_SUBSYSTEM S_KOBJ
+
struct _buf *
kobj_open_file(const char *name)
{
struct _buf *file;
vnode_t *vp;
int rc;
+ ENTRY;
if ((rc = vn_open(name, UIO_SYSSPACE, FREAD, 0644, &vp, 0, 0)))
- return ((_buf_t *)-1UL);
+ RETURN((_buf_t *)-1UL);
file = kmalloc(sizeof(_buf_t), GFP_KERNEL);
file->vp = vp;
- return file;
+ RETURN(file);
} /* kobj_open_file() */
EXPORT_SYMBOL(kobj_open_file);
void
kobj_close_file(struct _buf *file)
{
+ ENTRY;
VOP_CLOSE(file->vp, 0, 0, 0, 0, 0);
VN_RELE(file->vp);
kfree(file);
-
- return;
+ EXIT;
} /* kobj_close_file() */
EXPORT_SYMBOL(kobj_close_file);
int
kobj_read_file(struct _buf *file, char *buf, ssize_t size, offset_t off)
{
- return vn_rdwr(UIO_READ, file->vp, buf, size, off,
- UIO_SYSSPACE, 0, RLIM64_INFINITY, 0, NULL);
+ ENTRY;
+ RETURN(vn_rdwr(UIO_READ, file->vp, buf, size, off,
+ UIO_SYSSPACE, 0, RLIM64_INFINITY, 0, NULL));
} /* kobj_read_file() */
EXPORT_SYMBOL(kobj_read_file);
{
vattr_t vap;
int rc;
+ ENTRY;
rc = VOP_GETATTR(file->vp, &vap, 0, 0, NULL);
if (rc)
- return rc;
+ RETURN(rc);
*size = vap.va_size;
- return rc;
+ RETURN(rc);
} /* kobj_get_filesize() */
EXPORT_SYMBOL(kobj_get_filesize);
#include <sys/sunddi.h>
#include "config.h"
+#ifdef DEBUG_SUBSYSTEM
+#undef DEBUG_SUBSYSTEM
+#endif
+
+#define DEBUG_SUBSYSTEM S_MODULE
+
static spinlock_t dev_info_lock = SPIN_LOCK_UNLOCKED;
static LIST_HEAD(dev_info_list);
struct cb_ops *cb_ops;
struct file_operations *fops;
int rc;
+ ENTRY;
- BUG_ON(spec_type != S_IFCHR);
- BUG_ON(minor_num >= di->di_minors);
- BUG_ON(strcmp(node_type, DDI_PSEUDO));
- BUG_ON(flag != 0);
+ ASSERT(spec_type == S_IFCHR);
+ ASSERT(minor_num < di->di_minors);
+ ASSERT(!strcmp(node_type, DDI_PSEUDO));
+ ASSERT(flag == 0);
fops = kzalloc(sizeof(struct file_operations), GFP_KERNEL);
if (fops == NULL)
- return DDI_FAILURE;
+ RETURN(DDI_FAILURE);
cdev = cdev_alloc();
if (cdev == NULL) {
kfree(fops);
- return DDI_FAILURE;
+ RETURN(DDI_FAILURE);
}
cdev->ops = fops;
mutex_enter(&di->di_lock);
dev_ops = di->di_ops;
- BUG_ON(dev_ops == NULL);
+ ASSERT(dev_ops);
cb_ops = di->di_ops->devo_cb_ops;
- BUG_ON(cb_ops == NULL);
+ ASSERT(cb_ops);
/* Setup the fops to cb_ops mapping */
fops->owner = mod;
fops->write = mod_generic_write;
#endif
/* XXX: Currently unsupported operations */
- BUG_ON(cb_ops->cb_open != NULL);
- BUG_ON(cb_ops->cb_close != NULL);
- BUG_ON(cb_ops->cb_read != NULL);
- BUG_ON(cb_ops->cb_write != NULL);
- BUG_ON(cb_ops->cb_strategy != NULL);
- BUG_ON(cb_ops->cb_print != NULL);
- BUG_ON(cb_ops->cb_dump != NULL);
- BUG_ON(cb_ops->cb_devmap != NULL);
- BUG_ON(cb_ops->cb_mmap != NULL);
- BUG_ON(cb_ops->cb_segmap != NULL);
- BUG_ON(cb_ops->cb_chpoll != NULL);
- BUG_ON(cb_ops->cb_prop_op != NULL);
- BUG_ON(cb_ops->cb_str != NULL);
- BUG_ON(cb_ops->cb_aread != NULL);
- BUG_ON(cb_ops->cb_awrite != NULL);
+ ASSERT(cb_ops->cb_open == NULL);
+ ASSERT(cb_ops->cb_close == NULL);
+ ASSERT(cb_ops->cb_read == NULL);
+ ASSERT(cb_ops->cb_write == NULL);
+ ASSERT(cb_ops->cb_strategy == NULL);
+ ASSERT(cb_ops->cb_print == NULL);
+ ASSERT(cb_ops->cb_dump == NULL);
+ ASSERT(cb_ops->cb_devmap == NULL);
+ ASSERT(cb_ops->cb_mmap == NULL);
+ ASSERT(cb_ops->cb_segmap == NULL);
+ ASSERT(cb_ops->cb_chpoll == NULL);
+ ASSERT(cb_ops->cb_prop_op == NULL);
+ ASSERT(cb_ops->cb_str == NULL);
+ ASSERT(cb_ops->cb_aread == NULL);
+ ASSERT(cb_ops->cb_awrite == NULL);
di->di_minor = minor_num;
di->di_dev = MKDEV(di->di_major, di->di_minor);
rc = cdev_add(cdev, di->di_dev, 1);
if (rc) {
- printk("spl: Error adding cdev, %d\n", rc);
+ CERROR("Error adding cdev, %d\n", rc);
kfree(fops);
cdev_del(cdev);
mutex_exit(&di->di_lock);
- return DDI_FAILURE;
+ RETURN(DDI_FAILURE);
}
di->di_class = class_create(THIS_MODULE, name);
if (IS_ERR(di->di_class)) {
rc = PTR_ERR(di->di_class);
- printk("spl: Error creating %s class, %d\n", name, rc);
+ CERROR("Error creating %s class, %d\n", name, rc);
kfree(fops);
cdev_del(di->di_cdev);
mutex_exit(&di->di_lock);
- return DDI_FAILURE;
+ RETURN(DDI_FAILURE);
}
/* Do not append a 0 to devices with minor nums of 0 */
mutex_exit(&di->di_lock);
- return DDI_SUCCESS;
+ RETURN(DDI_SUCCESS);
}
EXPORT_SYMBOL(__ddi_create_minor_node);
void
__ddi_remove_minor_node(dev_info_t *di, char *name)
{
+ ENTRY;
mutex_enter(&di->di_lock);
__ddi_remove_minor_node_locked(di, name);
mutex_exit(&di->di_lock);
+ EXIT;
}
EXPORT_SYMBOL(ddi_remove_minor_node);
struct modldrv *drv = modlp->ml_modldrv;
struct dev_info *di;
int rc;
+ ENTRY;
di = dev_info_alloc(modlp->ml_major, modlp->ml_minors,
drv->drv_dev_ops);
if (di == NULL)
- return ENOMEM;
+ RETURN(ENOMEM);
/* XXX: Really we need to be calling devo_probe if it's available
* and then calling devo_attach for each device discovered. However
rc = drv->drv_dev_ops->devo_attach(di, DDI_ATTACH);
if (rc != DDI_SUCCESS) {
dev_info_free(di);
- return rc;
+ RETURN(rc);
}
drv->drv_dev_info = di;
- return DDI_SUCCESS;
+ RETURN(DDI_SUCCESS);
}
EXPORT_SYMBOL(__mod_install);
struct modldrv *drv = modlp->ml_modldrv;
struct dev_info *di = drv->drv_dev_info;
int rc;
+ ENTRY;
rc = drv->drv_dev_ops->devo_detach(di, DDI_DETACH);
if (rc != DDI_SUCCESS)
- return rc;
+ RETURN(rc);
dev_info_free(di);
drv->drv_dev_info = NULL;
- return DDI_SUCCESS;
+ RETURN(DDI_SUCCESS);
}
EXPORT_SYMBOL(__mod_remove);
ldi_ident_from_mod(struct modlinkage *modlp, ldi_ident_t *lip)
{
ldi_ident_t li;
+ ENTRY;
- BUG_ON(modlp == NULL || lip == NULL);
+ ASSERT(modlp);
+ ASSERT(lip);
li = kmalloc(sizeof(struct ldi_ident), GFP_KERNEL);
if (li == NULL)
- return ENOMEM;
+ RETURN(ENOMEM);
li->li_dev = MKDEV(modlp->ml_major, 0);
*lip = li;
- return 0;
+ RETURN(0);
}
EXPORT_SYMBOL(ldi_ident_from_mod);
void
ldi_ident_release(ldi_ident_t lip)
{
- BUG_ON(lip == NULL);
+ ENTRY;
+ ASSERT(lip);
kfree(lip);
+ EXIT;
}
EXPORT_SYMBOL(ldi_ident_release);
void __user *buffer, size_t *lenp, loff_t *ppos)
{
int len, rc = 0;
- unsigned long val;
+ int32_t val;
char *end, str[32];
ENTRY;
if (rc < 0)
RETURN(rc);
- val = simple_strtoul(str, &end, 16);
+ val = simple_strtol(str, &end, 16);
if (str == end)
RETURN(-EINVAL);
- spl_hostid = val;
- sprintf(spl_hw_serial, "%lu", ((long)val >= 0) ? val : -val);
+ spl_hostid = (long)val;
+ sprintf(hw_serial, "%u", (val >= 0) ? val : -val);
*ppos += *lenp;
} else {
len = snprintf(str, sizeof(str), "%lx", spl_hostid);
{
.ctl_name = CTL_HW_SERIAL,
.procname = "hw_serial",
- .data = spl_hw_serial,
- .maxlen = sizeof(spl_hw_serial),
+ .data = hw_serial,
+ .maxlen = sizeof(hw_serial),
.mode = 0444,
.proc_handler = &proc_dostring,
},
#include <sys/rwlock.h>
+#ifdef DEBUG_SUBSYSTEM
+#undef DEBUG_SUBSYSTEM
+#endif
+
+#define DEBUG_SUBSYSTEM S_RWLOCK
+
int
__rw_read_held(krwlock_t *rwlp)
{
- BUG_ON(rwlp->rw_magic != RW_MAGIC);
-
- if (__rw_lock_held(rwlp) && rwlp->rw_owner == NULL) {
- return 1;
- }
-
- return 0;
+ ENTRY;
+ ASSERT(rwlp->rw_magic == RW_MAGIC);
+ RETURN(__rw_lock_held(rwlp) && rwlp->rw_owner == NULL);
}
EXPORT_SYMBOL(__rw_read_held);
int
__rw_write_held(krwlock_t *rwlp)
{
- BUG_ON(rwlp->rw_magic != RW_MAGIC);
-
- if (rwlp->rw_owner == current) {
- return 1;
- }
-
- return 0;
+ ENTRY;
+ ASSERT(rwlp->rw_magic == RW_MAGIC);
+ RETURN(rwlp->rw_owner == current);
}
EXPORT_SYMBOL(__rw_write_held);
__rw_lock_held(krwlock_t *rwlp)
{
int rc = 0;
+ ENTRY;
- BUG_ON(rwlp->rw_magic != RW_MAGIC);
+ ASSERT(rwlp->rw_magic == RW_MAGIC);
spin_lock_irq(&(rwlp->rw_sem.wait_lock));
#ifdef CONFIG_RWSEM_GENERIC_SPINLOCK
spin_unlock_irq(&(rwlp->rw_sem.wait_lock));
- return rc;
+ RETURN(rc);
}
EXPORT_SYMBOL(__rw_lock_held);
#include <sys/taskq.h>
+#ifdef DEBUG_SUBSYSTEM
+#undef DEBUG_SUBSYSTEM
+#endif
+
+#define DEBUG_SUBSYSTEM S_TASKQ
+
/*
* Task queue interface
*
{
taskq_work_wrapper_t *tww = priv;
- BUG_ON(tww == NULL);
- BUG_ON(tww->tww_func == NULL);
+ ASSERT(tww);
+ ASSERT(tww->tww_func);
/* Call the real function and free the wrapper */
tww->tww_func(tww->tww_priv);
struct workqueue_struct *wq = tq;
taskq_work_wrapper_t *tww;
int rc;
+ ENTRY;
- BUG_ON(tq == NULL);
- BUG_ON(func == NULL);
+ ASSERT(tq);
+ ASSERT(func);
/* Use GFP_ATOMIC since this may be called in interrupt context */
tww = (taskq_work_wrapper_t *)kmalloc(sizeof(*tww), GFP_ATOMIC);
if (!tww)
- return (taskqid_t)0;
+ RETURN((taskqid_t)0);
INIT_WORK(&(tww->tww_work), taskq_work_handler, tww);
tww->tww_func = func;
rc = queue_work(wq, &(tww->tww_work));
if (!rc) {
kfree(tww);
- return (taskqid_t)0;
+ RETURN((taskqid_t)0);
}
- return (taskqid_t)wq;
+ RETURN((taskqid_t)wq);
}
EXPORT_SYMBOL(__taskq_dispatch);
int minalloc, int maxalloc, uint_t flags)
{
/* NOTE: Linux workqueue names are limited to 10 chars */
-
- return create_singlethread_workqueue(name);
+ ENTRY;
+ RETURN(create_singlethread_workqueue(name));
}
EXPORT_SYMBOL(__taskq_create);
void
__taskq_destroy(taskq_t *tq)
{
+ ENTRY;
destroy_workqueue(tq);
+ EXIT;
}
EXPORT_SYMBOL(__taskq_destroy);
void
__taskq_wait(taskq_t *tq)
{
+ ENTRY;
flush_workqueue(tq);
+ EXIT;
}
EXPORT_SYMBOL(__taskq_wait);
#include <sys/thread.h>
#include <sys/kmem.h>
+#ifdef DEBUG_SUBSYSTEM
+#undef DEBUG_SUBSYSTEM
+#endif
+
+#define DEBUG_SUBSYSTEM S_THREAD
+
/*
* Thread interfaces
*/
void (*func)(void *);
void *args;
- BUG_ON(tp->tp_magic != TP_MAGIC);
+ ASSERT(tp->tp_magic == TP_MAGIC);
func = tp->tp_func;
args = tp->tp_args;
set_current_state(tp->tp_state);
void
__thread_exit(void)
{
+ ENTRY;
+ EXIT;
do_exit(0);
- return;
+ /* Unreachable */
}
EXPORT_SYMBOL(__thread_exit);
thread_priv_t *tp;
DEFINE_WAIT(wait);
struct task_struct *tsk;
+ ENTRY;
/* Option pp is simply ignored */
/* Variable stack size unsupported */
- BUG_ON(stk != NULL);
- BUG_ON(stk != 0);
+ ASSERT(stk == NULL);
tp = kmem_alloc(sizeof(thread_priv_t), KM_SLEEP);
if (tp == NULL)
- return NULL;
+ RETURN(NULL);
tp->tp_magic = TP_MAGIC;
tp->tp_func = func;
tsk = kthread_create(thread_generic_wrapper, (void *)tp, "%s", name);
if (IS_ERR(tsk)) {
- printk("spl: Failed to create thread: %ld\n", PTR_ERR(tsk));
- return NULL;
+ CERROR("Failed to create thread: %ld\n", PTR_ERR(tsk));
+ RETURN(NULL);
}
wake_up_process(tsk);
- return (kthread_t *)tsk;
+ RETURN((kthread_t *)tsk);
}
EXPORT_SYMBOL(__thread_create);
#include <sys/time.h>
#include "config.h"
+#ifdef DEBUG_SUBSYSTEM
+#undef DEBUG_SUBSYSTEM
+#endif
+
+#define DEBUG_SUBSYSTEM S_TIME
+
void
__gethrestime(timestruc_t *ts)
{
__clock_gettime(clock_type_t type, timespec_t *tp)
{
/* Only support CLOCK_REALTIME+__CLOCK_REALTIME0 for now */
- BUG_ON(!((type == CLOCK_REALTIME) || (type == __CLOCK_REALTIME0)));
+ ASSERT((type == CLOCK_REALTIME) || (type == __CLOCK_REALTIME0));
getnstimeofday(tp);
return 0;
#include <sys/vnode.h>
#include "config.h"
+
+#ifdef DEBUG_SUBSYSTEM
+#undef DEBUG_SUBSYSTEM
+#endif
+
+#define DEBUG_SUBSYSTEM S_VNODE
+
void *rootdir = NULL;
EXPORT_SYMBOL(rootdir);
vn_alloc(int flag)
{
vnode_t *vp;
+ ENTRY;
vp = kmem_cache_alloc(vn_cache, flag);
if (vp != NULL) {
vp->v_type = 0;
}
- return vp;
+ RETURN(vp);
} /* vn_alloc() */
EXPORT_SYMBOL(vn_alloc);
void
vn_free(vnode_t *vp)
{
+ ENTRY;
kmem_cache_free(vn_cache, vp);
+ EXIT;
} /* vn_free() */
EXPORT_SYMBOL(vn_free);
struct kstat stat;
int rc, saved_umask;
vnode_t *vp;
+ ENTRY;
- BUG_ON(!(flags & (FWRITE | FREAD)));
- BUG_ON(seg != UIO_SYSSPACE);
- BUG_ON(!vpp);
+ ASSERT(flags & (FWRITE | FREAD));
+ ASSERT(seg == UIO_SYSSPACE);
+ ASSERT(vpp);
*vpp = NULL;
if (!(flags & FCREAT) && (flags & FWRITE))
(void)xchg(¤t->fs->umask, saved_umask);
if (IS_ERR(fp))
- return -PTR_ERR(fp);
+ RETURN(-PTR_ERR(fp));
rc = vfs_getattr(fp->f_vfsmnt, fp->f_dentry, &stat);
if (rc) {
filp_close(fp, 0);
- return -rc;
+ RETURN(-rc);
}
vp = vn_alloc(KM_SLEEP);
if (!vp) {
filp_close(fp, 0);
- return ENOMEM;
+ RETURN(ENOMEM);
}
mutex_enter(&vp->v_lock);
*vpp = vp;
mutex_exit(&vp->v_lock);
- return 0;
+ RETURN(0);
} /* vn_open() */
EXPORT_SYMBOL(vn_open);
{
char *realpath;
int rc;
+ ENTRY;
- BUG_ON(vp != rootdir);
+ ASSERT(vp == rootdir);
realpath = kmalloc(strlen(path) + 2, GFP_KERNEL);
if (!realpath)
- return ENOMEM;
+ RETURN(ENOMEM);
sprintf(realpath, "/%s", path);
rc = vn_open(realpath, seg, flags, mode, vpp, x1, x2);
kfree(realpath);
- return rc;
+ RETURN(rc);
} /* vn_openat() */
EXPORT_SYMBOL(vn_openat);
mm_segment_t saved_fs;
struct file *fp;
int rc;
+ ENTRY;
- BUG_ON(!(uio == UIO_WRITE || uio == UIO_READ));
- BUG_ON(!vp);
- BUG_ON(!vp->v_file);
- BUG_ON(seg != UIO_SYSSPACE);
- BUG_ON(x1 != 0);
- BUG_ON(x2 != RLIM64_INFINITY);
+ ASSERT(uio == UIO_WRITE || uio == UIO_READ);
+ ASSERT(vp);
+ ASSERT(vp->v_file);
+ ASSERT(seg == UIO_SYSSPACE);
+ ASSERT(x1 == 0);
+ ASSERT(x2 == RLIM64_INFINITY);
offset = off;
fp = vp->v_file;
set_fs(saved_fs);
if (rc < 0)
- return -rc;
+ RETURN(-rc);
if (residp) {
*residp = len - rc;
} else {
if (rc != len)
- return EIO;
+ RETURN(EIO);
}
- return 0;
+ RETURN(0);
} /* vn_rdwr() */
EXPORT_SYMBOL(vn_rdwr);
vn_close(vnode_t *vp, int flags, int x1, int x2, void *x3, void *x4)
{
int rc;
+ ENTRY;
- BUG_ON(!vp);
- BUG_ON(!vp->v_file);
+ ASSERT(vp);
+ ASSERT(vp->v_file);
rc = filp_close(vp->v_file, 0);
vn_free(vp);
- return -rc;
+ RETURN(-rc);
} /* vn_close() */
EXPORT_SYMBOL(vn_close);
struct nameidata nd;
struct inode *inode = NULL;
int rc = 0;
+ ENTRY;
- BUG_ON(seg != UIO_SYSSPACE);
- BUG_ON(flags != RMFILE);
+ ASSERT(seg == UIO_SYSSPACE);
+ ASSERT(flags == RMFILE);
rc = path_lookup(path, LOOKUP_PARENT, &nd);
if (rc)
- goto exit;
+ GOTO(exit, rc);
rc = -EISDIR;
if (nd.last_type != LAST_NORM)
- goto exit1;
+ GOTO(exit1, rc);
mutex_lock_nested(&nd.dentry->d_inode->i_mutex, I_MUTEX_PARENT);
dentry = lookup_hash(&nd);
if (!IS_ERR(dentry)) {
/* Why not before? Because we want correct rc value */
if (nd.last.name[nd.last.len])
- goto slashes;
+ GOTO(slashes, rc);
+
inode = dentry->d_inode;
if (inode)
atomic_inc(&inode->i_count);
exit1:
path_release(&nd);
exit:
- return -rc;
+ RETURN(-rc);
slashes:
rc = !dentry->d_inode ? -ENOENT :
S_ISDIR(dentry->d_inode->i_mode) ? -EISDIR : -ENOTDIR;
- goto exit2;
+ GOTO(exit2, rc);
} /* vn_remove() */
EXPORT_SYMBOL(vn_remove);
struct dentry * trap;
struct nameidata oldnd, newnd;
int rc = 0;
+ ENTRY;
rc = path_lookup(oldname, LOOKUP_PARENT, &oldnd);
if (rc)
- goto exit;
+ GOTO(exit, rc);
rc = path_lookup(newname, LOOKUP_PARENT, &newnd);
if (rc)
- goto exit1;
+ GOTO(exit1, rc);
rc = -EXDEV;
if (oldnd.mnt != newnd.mnt)
- goto exit2;
+ GOTO(exit2, rc);
old_dir = oldnd.dentry;
rc = -EBUSY;
if (oldnd.last_type != LAST_NORM)
- goto exit2;
+ GOTO(exit2, rc);
new_dir = newnd.dentry;
if (newnd.last_type != LAST_NORM)
- goto exit2;
+ GOTO(exit2, rc);
trap = lock_rename(new_dir, old_dir);
rc = PTR_ERR(old_dentry);
if (IS_ERR(old_dentry))
- goto exit3;
+ GOTO(exit3, rc);
/* source must exist */
rc = -ENOENT;
if (!old_dentry->d_inode)
- goto exit4;
+ GOTO(exit4, rc);
/* unless the source is a directory trailing slashes give -ENOTDIR */
if (!S_ISDIR(old_dentry->d_inode->i_mode)) {
rc = -ENOTDIR;
if (oldnd.last.name[oldnd.last.len])
- goto exit4;
+ GOTO(exit4, rc);
if (newnd.last.name[newnd.last.len])
- goto exit4;
+ GOTO(exit4, rc);
}
/* source should not be ancestor of target */
rc = -EINVAL;
if (old_dentry == trap)
- goto exit4;
+ GOTO(exit4, rc);
new_dentry = lookup_hash(&newnd);
rc = PTR_ERR(new_dentry);
if (IS_ERR(new_dentry))
- goto exit4;
+ GOTO(exit4, rc);
/* target should not be an ancestor of source */
rc = -ENOTEMPTY;
if (new_dentry == trap)
- goto exit5;
+ GOTO(exit5, rc);
rc = vfs_rename(old_dir->d_inode, old_dentry,
new_dir->d_inode, new_dentry);
exit1:
path_release(&oldnd);
exit:
- return -rc;
+ RETURN(-rc);
}
EXPORT_SYMBOL(vn_rename);
struct file *fp;
struct kstat stat;
int rc;
+ ENTRY;
- BUG_ON(!vp);
- BUG_ON(!vp->v_file);
- BUG_ON(!vap);
+ ASSERT(vp);
+ ASSERT(vp->v_file);
+ ASSERT(vap);
fp = vp->v_file;
rc = vfs_getattr(fp->f_vfsmnt, fp->f_dentry, &stat);
if (rc)
- return -rc;
+ RETURN(-rc);
vap->va_type = vn_get_sol_type(stat.mode);
vap->va_mode = stat.mode;
vap->va_rdev = stat.rdev;
vap->va_blocks = stat.blocks;
- return 0;
+ RETURN(0);
}
EXPORT_SYMBOL(vn_getattr);
int vn_fsync(vnode_t *vp, int flags, void *x3, void *x4)
{
int datasync = 0;
+ ENTRY;
- BUG_ON(!vp);
- BUG_ON(!vp->v_file);
+ ASSERT(vp);
+ ASSERT(vp->v_file);
if (flags & FDSYNC)
datasync = 1;
- return -file_fsync(vp->v_file, vp->v_file->f_dentry, datasync);
+ RETURN(-file_fsync(vp->v_file, vp->v_file->f_dentry, datasync));
} /* vn_fsync() */
EXPORT_SYMBOL(vn_fsync);
{
file_t *fp;
- BUG_ON(!spin_is_locked(&vn_file_lock));
+ ASSERT(spin_is_locked(&vn_file_lock));
list_for_each_entry(fp, &vn_file_list, f_list) {
if (fd == fp->f_fd) {
- BUG_ON(atomic_read(&fp->f_ref) == 0);
+ ASSERT(atomic_read(&fp->f_ref) != 0);
return fp;
}
}
struct file *lfp;
file_t *fp;
vnode_t *vp;
+ int rc = 0;
+ ENTRY;
/* Already open just take an extra reference */
spin_lock(&vn_file_lock);
if (fp) {
atomic_inc(&fp->f_ref);
spin_unlock(&vn_file_lock);
- return fp;
+ RETURN(fp);
}
spin_unlock(&vn_file_lock);
/* File was not yet opened create the object and setup */
fp = kmem_cache_alloc(vn_file_cache, 0);
if (fp == NULL)
- goto out;
+ GOTO(out, rc);
mutex_enter(&fp->f_lock);
lfp = fget(fd);
if (lfp == NULL)
- goto out_mutex;
+ GOTO(out_mutex, rc);
vp = vn_alloc(KM_SLEEP);
if (vp == NULL)
- goto out_fget;
+ GOTO(out_fget, rc);
if (vfs_getattr(lfp->f_vfsmnt, lfp->f_dentry, &stat))
- goto out_vnode;
+ GOTO(out_vnode, rc);
mutex_enter(&vp->v_lock);
vp->v_type = vn_get_sol_type(stat.mode);
spin_unlock(&vn_file_lock);
mutex_exit(&fp->f_lock);
- return fp;
+ RETURN(fp);
out_vnode:
vn_free(vp);
mutex_exit(&fp->f_lock);
kmem_cache_free(vn_file_cache, fp);
out:
- return NULL;
+ RETURN(NULL);
} /* getf() */
EXPORT_SYMBOL(getf);
static void releasef_locked(file_t *fp)
{
- BUG_ON(fp->f_file == NULL);
- BUG_ON(fp->f_vnode == NULL);
+ ASSERT(fp->f_file);
+ ASSERT(fp->f_vnode);
/* Unlinked from list, no refs, safe to free outside mutex */
fput(fp->f_file);
vn_releasef(int fd)
{
file_t *fp;
+ ENTRY;
spin_lock(&vn_file_lock);
fp = file_find(fd);
atomic_dec(&fp->f_ref);
if (atomic_read(&fp->f_ref) > 0) {
spin_unlock(&vn_file_lock);
+ EXIT;
return;
}
}
spin_unlock(&vn_file_lock);
+ EXIT;
return;
} /* releasef() */
EXPORT_SYMBOL(releasef);
int
vn_init(void)
{
+ ENTRY;
vn_cache = kmem_cache_create("spl_vn_cache", sizeof(struct vnode), 64,
vn_cache_constructor,
vn_cache_destructor,
vn_file_cache_constructor,
vn_file_cache_destructor,
NULL, NULL, NULL, 0);
- return 0;
+ RETURN(0);
} /* vn_init() */
void
{
file_t *fp, *next_fp;
int rc, leaked = 0;
+ ENTRY;
spin_lock(&vn_file_lock);
rc = kmem_cache_destroy(vn_file_cache);
if (rc)
- printk("spl: Warning leaked vn_file_cache objects, %d\n", rc);
+ CWARN("Warning leaked vn_file_cache objects, %d\n", rc);
vn_file_cache = NULL;
spin_unlock(&vn_file_lock);
if (leaked > 0)
- printk("spl: Warning %d files leaked\n", leaked);
+ CWARN("Warning %d files leaked\n", leaked);
rc = kmem_cache_destroy(vn_cache);
if (rc)
- printk("spl: Warning leaked vn_cache objects, %d\n", rc);
+ CWARN("Warning leaked vn_cache objects, %d\n", rc);
+ EXIT;
return;
} /* vn_fini() */