]> git.proxmox.com Git - mirror_spl-debian.git/commitdiff
OK, everything builds now. My initial intent was to place all of
authorbehlendo <behlendo@7e1ea52c-4ff2-0310-8f11-9dd32ca42a1c>
Wed, 27 Feb 2008 20:52:44 +0000 (20:52 +0000)
committerbehlendo <behlendo@7e1ea52c-4ff2-0310-8f11-9dd32ca42a1c>
Wed, 27 Feb 2008 20:52:44 +0000 (20:52 +0000)
the directories at the top level but that proved troublesome.  The
kernel buildsystem and autoconf were conflicting too much.  To
resolve the issue I moved the kernel bits in to a modules directory
which can then only use the kernel build system.  We just pass
along the likely make targets to the kernel build system.

git-svn-id: https://outreach.scidac.gov/svn/spl/trunk@11 7e1ea52c-4ff2-0310-8f11-9dd32ca42a1c

37 files changed:
Makefile.am
cmd/Makefile.am
configure.ac
modules/Makefile.in [new file with mode: 0644]
modules/spl/Makefile.in [new file with mode: 0644]
modules/spl/linux-generic.c [new file with mode: 0644]
modules/spl/linux-kmem.c [new file with mode: 0644]
modules/spl/linux-rwlock.c [new file with mode: 0644]
modules/spl/linux-taskq.c [new file with mode: 0644]
modules/spl/linux-thread.c [new file with mode: 0644]
modules/splat/Makefile.in [new file with mode: 0644]
modules/splat/splat-condvar.c [new file with mode: 0644]
modules/splat/splat-ctl.c [new file with mode: 0644]
modules/splat/splat-kmem.c [new file with mode: 0644]
modules/splat/splat-mutex.c [new file with mode: 0644]
modules/splat/splat-random.c [new file with mode: 0644]
modules/splat/splat-rwlock.c [new file with mode: 0644]
modules/splat/splat-taskq.c [new file with mode: 0644]
modules/splat/splat-thread.c [new file with mode: 0644]
modules/splat/splat-time.c [new file with mode: 0644]
spl/Makefile.in [deleted file]
spl/linux-generic.c [deleted file]
spl/linux-kmem.c [deleted file]
spl/linux-rwlock.c [deleted file]
spl/linux-taskq.c [deleted file]
spl/linux-thread.c [deleted file]
splat/Makefile.in [deleted file]
splat/splat-condvar.c [deleted file]
splat/splat-ctl.c [deleted file]
splat/splat-kmem.c [deleted file]
splat/splat-mutex.c [deleted file]
splat/splat-random.c [deleted file]
splat/splat-rwlock.c [deleted file]
splat/splat-taskq.c [deleted file]
splat/splat-thread.c [deleted file]
splat/splat-time.c [deleted file]
src/Makefile.am [deleted file]

index 7abb6ee39b78e76e06a1c6fdefba983cbccaa867..1d219cd85a7c07880e12c06a452f40c0d6059633 100644 (file)
@@ -1,9 +1,10 @@
 AUTOMAKE_OPTIONS = foreign dist-zip
 
-SUBDIRS = src include scripts
+SUBDIRS = lib cmd modules include scripts
 CONFIG_CLEAN_FILES =  aclocal.m4 config.guess config.sub
 CONFIG_CLEAN_FILES += depcomp install-sh missing mkinstalldirs
 EXTRA_DIST = autogen.sh
 
 rpms: dist Makefile
        rpmbuild -ta $(distdir).tar.gz
+
index ae3961a9778e4535d9e0b07ffda3560f34e5eaf7..f211db18f2eb058c7a1d059eb9296738f7cc9201 100644 (file)
@@ -2,4 +2,4 @@ AM_CFLAGS = -g -O2 -W -Wall -Wstrict-prototypes -Wshadow
 INCLUDES = -I$(top_srcdir)/include
 sbin_PROGRAMS = splat
 splat_SOURCES = splat.c
-splat_LDFLAGS = $(top_builddir)/src/lib/libcommon.la
+splat_LDFLAGS = $(top_builddir)/lib/libcommon.la
index 215b5091c1796e519567db1a60152493e522f36a..3992520ac806b53aa6c55c34f5d9503ea0609e01 100644 (file)
@@ -109,11 +109,11 @@ AC_SUBST(KERNELCPPFLAGS)
 AC_SUBST(KERNELCFLAGS)
 
 AC_CONFIG_FILES([ Makefile
-                  src/Makefile
-                  src/lib/Makefile
-                  src/cmd/Makefile
-                  src/spl/Makefile
-                  src/splat/Makefile
+                  lib/Makefile
+                  cmd/Makefile
+                  modules/Makefile
+                  modules/spl/Makefile
+                  modules/splat/Makefile
                   include/Makefile
                   scripts/Makefile
                   scripts/spl.spec
diff --git a/modules/Makefile.in b/modules/Makefile.in
new file mode 100644 (file)
index 0000000..f1cb0ca
--- /dev/null
@@ -0,0 +1,12 @@
+subdir-m += spl
+subdir-m += splat
+
+all:
+       $(MAKE) -C @kernelsrc@ SUBDIRS=`pwd` @KERNELMAKE_PARAMS@ modules
+
+install uninstall clean distclean maintainer-clean distdir:
+       $(MAKE) -C @kernelsrc@ SUBDIRS=`pwd` @KERNELMAKE_PARAMS@ $@
+
+
+
+
diff --git a/modules/spl/Makefile.in b/modules/spl/Makefile.in
new file mode 100644 (file)
index 0000000..134d882
--- /dev/null
@@ -0,0 +1,40 @@
+# Makefile.in for spl kernel module
+
+MODULES := spl
+DISTFILES = Makefile.in \
+            linux-kmem.c linux-rwlock.c linux-taskq.c \
+            linux-thread.c linux-generic.c
+CPPFLAGS += @KERNELCPPFLAGS@
+
+# Solaris porting layer module
+obj-m := spl.o
+
+spl-objs += linux-kmem.o
+spl-objs += linux-thread.o
+spl-objs += linux-taskq.o
+spl-objs += linux-rwlock.o
+spl-objs += linux-generic.o
+
+splmodule := spl.ko
+splmoduledir := @kmoduledir@/kernel/lib/
+
+install:
+       mkdir -p $(DESTDIR)$(splmoduledir)
+       $(INSTALL) -m 644 $(splmodule) $(DESTDIR)$(splmoduledir)/$(splmodule)
+       -/sbin/depmod -a
+
+uninstall:
+       rm -f $(DESTDIR)$(splmoduledir)/$(splmodule)
+       -/sbin/depmod -a
+
+clean:
+       -rm -f $(splmodule) *.o .*.cmd *.mod.c *.ko *.s */*.o
+
+distclean: clean
+       rm -f Makefile
+       rm -rf .tmp_versions
+
+maintainer-clean: distclean
+
+distdir: $(DISTFILES)
+       cp -p $(DISTFILES) $(distdir)
diff --git a/modules/spl/linux-generic.c b/modules/spl/linux-generic.c
new file mode 100644 (file)
index 0000000..fa1ebab
--- /dev/null
@@ -0,0 +1,8 @@
+#include "linux-generic.h"
+
+/*
+ * Generic support
+ */
+
+int p0 = 0;
+EXPORT_SYMBOL(p0);
diff --git a/modules/spl/linux-kmem.c b/modules/spl/linux-kmem.c
new file mode 100644 (file)
index 0000000..4dc7c01
--- /dev/null
@@ -0,0 +1,251 @@
+#include "linux-kmem.h"
+
+/*
+ * Memory allocation interfaces
+ */
+#ifdef DEBUG_KMEM
+/* Shim layer memory accounting */
+atomic_t kmem_alloc_used;
+unsigned int kmem_alloc_max;
+#endif
+
+/*
+ * Slab allocation interfaces
+ *
+ * While the linux slab implementation was inspired by solaris they
+ * have made some changes to the API which complicates this shim
+ * layer.  For one thing the same symbol names are used with different
+ * arguments for the prototypes.  To deal with this we must use the
+ * preprocessor to re-order arguments.  Happily for us standard C says,
+ * "Macro's appearing in their own expansion are not reexpanded" so
+ * this does not result in an infinite recursion.  Additionally the
+ * function pointers registered by solarias differ from those used
+ * by linux so a lookup and mapping from linux style callback to a
+ * solaris style callback is needed.  There is some overhead in this
+ * operation which isn't horibile but it needs to be kept in mind.
+ */
+typedef struct kmem_cache_cb {
+        struct list_head    kcc_list;
+        kmem_cache_t *      kcc_cache;
+        kmem_constructor_t  kcc_constructor;
+        kmem_destructor_t   kcc_destructor;
+        kmem_reclaim_t      kcc_reclaim;
+        void *              kcc_private;
+        void *              kcc_vmp;
+} kmem_cache_cb_t;
+
+
+static spinlock_t kmem_cache_cb_lock = SPIN_LOCK_UNLOCKED;
+//static spinlock_t kmem_cache_cb_lock = (spinlock_t) { 1 SPINLOCK_MAGIC_INIT };
+static LIST_HEAD(kmem_cache_cb_list);
+static struct shrinker *kmem_cache_shrinker;
+
+/* Function must be called while holding the kmem_cache_cb_lock
+ * Because kmem_cache_t is an opaque datatype we're forced to
+ * match pointers to identify specific cache entires.
+ */
+static kmem_cache_cb_t *
+kmem_cache_find_cache_cb(kmem_cache_t *cache)
+{
+        kmem_cache_cb_t *kcc;
+
+        list_for_each_entry(kcc, &kmem_cache_cb_list, kcc_list)
+               if (cache == kcc->kcc_cache)
+                        return kcc;
+
+        return NULL;
+}
+
+static kmem_cache_cb_t *
+kmem_cache_add_cache_cb(kmem_cache_t *cache,
+                       kmem_constructor_t constructor,
+                        kmem_destructor_t destructor,
+                       kmem_reclaim_t reclaim,
+                        void *priv, void *vmp)
+{
+        kmem_cache_cb_t *kcc;
+
+        kcc = (kmem_cache_cb_t *)kmalloc(sizeof(*kcc), GFP_KERNEL);
+        if (kcc) {
+               kcc->kcc_cache = cache;
+                kcc->kcc_constructor = constructor;
+                kcc->kcc_destructor = destructor;
+                kcc->kcc_reclaim = reclaim;
+                kcc->kcc_private = priv;
+                kcc->kcc_vmp = vmp;
+               spin_lock(&kmem_cache_cb_lock);
+                list_add(&kcc->kcc_list, &kmem_cache_cb_list);
+               spin_unlock(&kmem_cache_cb_lock);
+        }
+
+        return kcc;
+}
+
+static void
+kmem_cache_remove_cache_cb(kmem_cache_cb_t *kcc)
+{
+       spin_lock(&kmem_cache_cb_lock);
+        list_del(&kcc->kcc_list);
+       spin_unlock(&kmem_cache_cb_lock);
+
+       if (kcc)
+              kfree(kcc);
+}
+
+static void
+kmem_cache_generic_constructor(void *ptr, kmem_cache_t *cache, unsigned long flags)
+{
+        kmem_cache_cb_t *kcc;
+
+       spin_lock(&kmem_cache_cb_lock);
+
+        /* Callback list must be in sync with linux slab caches */
+        kcc = kmem_cache_find_cache_cb(cache);
+        BUG_ON(!kcc);
+
+       kcc->kcc_constructor(ptr, kcc->kcc_private, (int)flags);
+       spin_unlock(&kmem_cache_cb_lock);
+       /* Linux constructor has no return code, silently eat it */
+}
+
+static void
+kmem_cache_generic_destructor(void *ptr, kmem_cache_t *cache, unsigned long flags)
+{
+        kmem_cache_cb_t *kcc;
+
+       spin_lock(&kmem_cache_cb_lock);
+
+        /* Callback list must be in sync with linux slab caches */
+        kcc = kmem_cache_find_cache_cb(cache);
+        BUG_ON(!kcc);
+
+       /* Solaris destructor takes no flags, silently eat them */
+       kcc->kcc_destructor(ptr, kcc->kcc_private);
+       spin_unlock(&kmem_cache_cb_lock);
+}
+
+/* XXX - Arguments are ignored */
+static int
+kmem_cache_generic_shrinker(int nr_to_scan, unsigned int gfp_mask)
+{
+        kmem_cache_cb_t *kcc;
+        int total = 0;
+
+       /* Under linux a shrinker is not tightly coupled with a slab
+        * cache.  In fact linux always systematically trys calling all
+        * registered shrinker callbacks until its target reclamation level
+        * is reached.  Because of this we only register one shrinker
+        * function in the shim layer for all slab caches.  And we always
+        * attempt to shrink all caches when this generic shrinker is called.
+        */
+       spin_lock(&kmem_cache_cb_lock);
+
+        list_for_each_entry(kcc, &kmem_cache_cb_list, kcc_list) {
+               /* Under linux the desired number and gfp type of objects
+                * is passed to the reclaiming function as a sugested reclaim
+                * target.  I do not pass these args on because reclaim
+                * policy is entirely up to the owner under solaris.  We only
+                * pass on the pre-registered private data.
+                 */
+               if (kcc->kcc_reclaim)
+                        kcc->kcc_reclaim(kcc->kcc_private);
+
+               total += 1;
+        }
+
+       /* Under linux we should return the remaining number of entires in
+        * the cache.  Unfortunately, I don't see an easy way to safely
+        * emulate this behavior so I'm returning one entry per cache which
+        * was registered with the generic shrinker.  This should fake out
+        * the linux VM when it attempts to shrink caches.
+        */
+       spin_unlock(&kmem_cache_cb_lock);
+       return total;
+}
+
+/* Ensure the __kmem_cache_create/__kmem_cache_destroy macros are
+ * removed here to prevent a recursive substitution, we want to call
+ * the native linux version.
+ */
+#undef kmem_cache_create
+#undef kmem_cache_destroy
+
+kmem_cache_t *
+__kmem_cache_create(char *name, size_t size, size_t align,
+        kmem_constructor_t constructor,
+       kmem_destructor_t destructor,
+       kmem_reclaim_t reclaim,
+        void *priv, void *vmp, int flags)
+{
+        kmem_cache_t *cache;
+        kmem_cache_cb_t *kcc;
+       int shrinker_flag = 0;
+
+        /* FIXME: - Option currently unsupported by shim layer */
+        BUG_ON(vmp);
+
+        cache = kmem_cache_create(name, size, align, flags,
+                                  kmem_cache_generic_constructor,
+                                  kmem_cache_generic_destructor);
+       if (cache == NULL)
+                return NULL;
+
+        /* Register shared shrinker function on initial cache create */
+       spin_lock(&kmem_cache_cb_lock);
+       if (list_empty(&kmem_cache_cb_list)) {
+                kmem_cache_shrinker = set_shrinker(KMC_DEFAULT_SEEKS,
+                                                 kmem_cache_generic_shrinker);
+                if (kmem_cache_shrinker == NULL) {
+                        kmem_cache_destroy(cache);
+                       spin_unlock(&kmem_cache_cb_lock);
+                        return NULL;
+                }
+
+        }
+       spin_unlock(&kmem_cache_cb_lock);
+
+        kcc = kmem_cache_add_cache_cb(cache, constructor, destructor,
+                                      reclaim, priv, vmp);
+        if (kcc == NULL) {
+               if (shrinker_flag) /* New shrinker registered must be removed */
+                       remove_shrinker(kmem_cache_shrinker);
+
+                kmem_cache_destroy(cache);
+                return NULL;
+        }
+
+        return cache;
+}
+EXPORT_SYMBOL(__kmem_cache_create);
+
+/* Return codes discarded because Solaris implementation has void return */
+void
+__kmem_cache_destroy(kmem_cache_t *cache)
+{
+        kmem_cache_cb_t *kcc;
+
+       spin_lock(&kmem_cache_cb_lock);
+        kcc = kmem_cache_find_cache_cb(cache);
+       spin_unlock(&kmem_cache_cb_lock);
+        if (kcc == NULL)
+                return;
+
+        kmem_cache_destroy(cache);
+        kmem_cache_remove_cache_cb(kcc);
+
+       /* Unregister generic shrinker on removal of all caches */
+       spin_lock(&kmem_cache_cb_lock);
+       if (list_empty(&kmem_cache_cb_list))
+                remove_shrinker(kmem_cache_shrinker);
+
+       spin_unlock(&kmem_cache_cb_lock);
+}
+EXPORT_SYMBOL(__kmem_cache_destroy);
+
+void
+__kmem_reap(void) {
+       /* Since there's no easy hook in to linux to force all the registered
+        * shrinkers to run we just run the ones registered for this shim */
+       kmem_cache_generic_shrinker(KMC_REAP_CHUNK, GFP_KERNEL);
+}
+EXPORT_SYMBOL(__kmem_reap);
diff --git a/modules/spl/linux-rwlock.c b/modules/spl/linux-rwlock.c
new file mode 100644 (file)
index 0000000..24775c4
--- /dev/null
@@ -0,0 +1,41 @@
+#include <linux-rwlock.h>
+
+int
+rw_lock_held(krwlock_t *rwlp)
+{
+       BUG_ON(rwlp->rw_magic != RW_MAGIC);
+
+#ifdef CONFIG_RWSEM_GENERIC_SPINLOCK
+       if (rwlp->rw_sem.activity != 0) {
+#else
+       if (rwlp->rw_sem.count != 0) {
+#endif
+               return 1;
+       }
+
+       return 0;
+}
+
+int
+rw_read_held(krwlock_t *rwlp)
+{
+       BUG_ON(rwlp->rw_magic != RW_MAGIC);
+
+       if (rw_lock_held(rwlp) && rwlp->rw_owner == NULL) {
+               return 1;
+       }
+
+       return 0;
+}
+
+int
+rw_write_held(krwlock_t *rwlp)
+{
+       BUG_ON(rwlp->rw_magic != RW_MAGIC);
+
+       if (rwlp->rw_owner == current) {
+               return 1;
+       }
+
+       return 0;
+}
diff --git a/modules/spl/linux-taskq.c b/modules/spl/linux-taskq.c
new file mode 100644 (file)
index 0000000..ddcf57c
--- /dev/null
@@ -0,0 +1,80 @@
+#include <linux-taskq.h>
+
+/*
+ * Task queue interface
+ *
+ * The taskq_work_wrapper functions are used to manage the work_structs
+ * which must be submitted to linux.  The shim layer allocates a wrapper
+ * structure for all items which contains a pointer to itself as well as
+ * the real work to be performed.  When the work item run the generic
+ * handle is called which calls the real work function and then using
+ * the self pointer frees the work_struct.
+ */
+typedef struct taskq_work_wrapper {
+        struct work_struct tww_work;
+        task_func_t        tww_func;
+        void *             tww_priv;
+} taskq_work_wrapper_t;
+
+static void
+taskq_work_handler(void *priv)
+{
+        taskq_work_wrapper_t *tww = priv;
+
+        BUG_ON(tww == NULL);
+        BUG_ON(tww->tww_func == NULL);
+
+        /* Call the real function and free the wrapper */
+        tww->tww_func(tww->tww_priv);
+        kfree(tww);
+}
+
+/* XXX - All flags currently ignored */
+taskqid_t
+__taskq_dispatch(taskq_t *tq, task_func_t func, void *priv, uint_t flags)
+{
+        struct workqueue_struct *wq = tq;
+        taskq_work_wrapper_t *tww;
+        int rc;
+
+
+        BUG_ON(in_interrupt());
+        BUG_ON(tq == NULL);
+        BUG_ON(func == NULL);
+
+        tww = (taskq_work_wrapper_t *)kmalloc(sizeof(*tww), GFP_KERNEL);
+        if (!tww)
+                return (taskqid_t)0;
+
+        INIT_WORK(&(tww->tww_work), taskq_work_handler, tww);
+        tww->tww_func = func;
+        tww->tww_priv = priv;
+
+        rc = queue_work(wq, &(tww->tww_work));
+        if (!rc) {
+                kfree(tww);
+                return (taskqid_t)0;
+        }
+
+        return (taskqid_t)wq;
+}
+EXPORT_SYMBOL(__taskq_dispatch);
+
+/* XXX - Most args ignored until we decide if it's worth the effort
+ *       to emulate the solaris notion of dynamic thread pools.  For
+ *       now we simply serialize everything through one thread which
+ *       may come back to bite us as a performance issue.
+ * pri   - Ignore priority
+ * min   - Ignored until this is a dynamic thread pool
+ * max   - Ignored until this is a dynamic thread pool
+ * flags - Ignored until this is a dynamic thread_pool
+ */
+taskq_t *
+__taskq_create(const char *name, int nthreads, pri_t pri,
+               int minalloc, int maxalloc, uint_t flags)
+{
+       /* NOTE: Linux workqueue names are limited to 10 chars */
+
+        return create_singlethread_workqueue(name);
+}
+EXPORT_SYMBOL(__taskq_create);
diff --git a/modules/spl/linux-thread.c b/modules/spl/linux-thread.c
new file mode 100644 (file)
index 0000000..9785d50
--- /dev/null
@@ -0,0 +1,114 @@
+#include <linux-thread.h>
+
+/*
+ * Thread interfaces
+ */
+typedef struct thread_priv_s {
+       unsigned long tp_magic;         /* Magic */
+       void (*tp_func)(void *);        /* Registered function */
+       void *tp_args;                  /* Args to be passed to function */
+       size_t tp_len;                  /* Len to be passed to function */
+       int tp_state;                   /* State to start thread at */
+       pri_t tp_pri;                   /* Priority to start threat at */
+       volatile kthread_t *tp_task;    /* Task pointer for new thread */
+       spinlock_t tp_lock;             /* Syncronization lock */
+        wait_queue_head_t tp_waitq;    /* Syncronization wait queue */
+} thread_priv_t;
+
+static int
+thread_generic_wrapper(void *arg)
+{
+       thread_priv_t *tp = (thread_priv_t *)arg;
+       void (*func)(void *);
+       void *args;
+       char name[16];
+
+       /* Use the truncated function name as thread name */
+       snprintf(name, sizeof(name), "%s", "kthread");
+       daemonize(name);
+
+        spin_lock(&tp->tp_lock);
+       BUG_ON(tp->tp_magic != TP_MAGIC);
+       func = tp->tp_func;
+       args = tp->tp_args;
+       tp->tp_task = get_current();
+       set_current_state(tp->tp_state);
+       set_user_nice((kthread_t *)tp->tp_task, PRIO_TO_NICE(tp->tp_pri));
+
+        spin_unlock(&tp->tp_lock);
+       wake_up(&tp->tp_waitq);
+
+       /* DO NOT USE 'ARG' AFTER THIS POINT, EVER, EVER, EVER!
+        * Local variables are used here because after the calling thread
+        * has been woken up it will exit and this memory will no longer
+        * be safe to access since it was declared on the callers stack. */
+       if (func)
+               func(args);
+
+       return 0;
+}
+
+void
+__thread_exit(void)
+{
+       return;
+}
+EXPORT_SYMBOL(__thread_exit);
+
+/* thread_create() may block forever if it cannot create a thread or
+ * allocate memory.  This is preferable to returning a NULL which Solaris
+ * style callers likely never check for... since it can't fail. */
+kthread_t *
+__thread_create(caddr_t stk, size_t  stksize, void (*proc)(void *),
+               void *args, size_t len, proc_t *pp, int state, pri_t pri)
+{
+       thread_priv_t tp;
+       DEFINE_WAIT(wait);
+       long pid;
+
+       /* Option pp is simply ignored */
+       /* Variable stack size unsupported */
+       BUG_ON(stk != NULL);
+       BUG_ON(stk != 0);
+
+       /* Variable tp is located on the stack and not the heap because I want
+        * to minimize any chance of a failure, since the Solaris code is designed
+        * such that this function cannot fail.  This is a little dangerous since
+        * we're passing a stack address to a new thread but correct locking was
+        * added to ensure the callee can use the data safely until wake_up(). */
+       tp.tp_magic = TP_MAGIC;
+       tp.tp_func  = proc;
+       tp.tp_args  = args;
+       tp.tp_len   = len;
+       tp.tp_state = state;
+       tp.tp_pri   = pri;
+       tp.tp_task  = NULL;
+       spin_lock_init(&tp.tp_lock);
+        init_waitqueue_head(&tp.tp_waitq);
+
+       spin_lock(&tp.tp_lock);
+
+       /* Solaris says this must never fail so we try forever */
+       while ((pid = kernel_thread(thread_generic_wrapper, (void *)&tp, 0)) < 0)
+               printk(KERN_ERR "linux-thread: Unable to create thread; "
+                      "pid = %ld\n", pid);
+
+       /* All signals are ignored due to sleeping TASK_UNINTERRUPTIBLE */
+       for (;;) {
+               prepare_to_wait(&tp.tp_waitq, &wait, TASK_UNINTERRUPTIBLE);
+               if (tp.tp_task != NULL)
+                       break;
+
+               spin_unlock(&tp.tp_lock);
+               schedule();
+               spin_lock(&tp.tp_lock);
+       }
+
+       /* Verify the pid retunred matches the pid in the task struct */
+       BUG_ON(pid != (tp.tp_task)->pid);
+
+       spin_unlock(&tp.tp_lock);
+
+       return (kthread_t *)tp.tp_task;
+}
+EXPORT_SYMBOL(__thread_create);
diff --git a/modules/splat/Makefile.in b/modules/splat/Makefile.in
new file mode 100644 (file)
index 0000000..54155c8
--- /dev/null
@@ -0,0 +1,45 @@
+# Makefile.in for splat kernel module
+
+MODULES := splat
+DISTFILES = Makefile.in \
+            splat-kmem.c splat-random.c splat-taskq.c \
+            splat-time.c splat-condvar.c  splat-mutex.c \
+            splat-rwlock.c  splat-thread.c splat-ctl.c
+CPPFLAGS += @KERNELCPPFLAGS@
+
+# Solaris porting layer aggressive tests
+obj-m := splat.o
+
+splat-objs += splat-ctl.o
+splat-objs += splat-kmem.o
+splat-objs += splat-taskq.o
+splat-objs += splat-random.o
+splat-objs += splat-mutex.o
+splat-objs += splat-condvar.o
+splat-objs += splat-thread.o
+splat-objs += splat-rwlock.o
+splat-objs += splat-time.o
+
+splatmodule := splat.ko
+splatmoduledir := @kmoduledir@/kernel/lib/
+
+install:
+       mkdir -p $(DESTDIR)$(splatmoduledir)
+       $(INSTALL) -m 644 $(splatmodule) $(DESTDIR)$(splatmoduledir)/$(splatmodule)
+       -/sbin/depmod -a
+
+uninstall:
+       rm -f $(DESTDIR)$(splatmoduledir)/$(splatmodule)
+       -/sbin/depmod -a
+
+clean:
+       -rm -f $(splmodule) *.o .*.cmd *.mod.c *.ko *.s */*.o
+
+distclean: clean
+       rm -f Makefile
+       rm -rf .tmp_versions
+
+maintainer-clean: distclean
+
+distdir: $(DISTFILES)
+       cp -p $(DISTFILES) $(distdir)
diff --git a/modules/splat/splat-condvar.c b/modules/splat/splat-condvar.c
new file mode 100644 (file)
index 0000000..7c9b557
--- /dev/null
@@ -0,0 +1,453 @@
+#include <splat-ctl.h>
+
+#define KZT_SUBSYSTEM_CONDVAR          0x0500
+#define KZT_CONDVAR_NAME               "condvar"
+#define KZT_CONDVAR_DESC               "Kernel Condition Variable Tests"
+
+#define KZT_CONDVAR_TEST1_ID           0x0501
+#define KZT_CONDVAR_TEST1_NAME         "signal1"
+#define KZT_CONDVAR_TEST1_DESC         "Wake a single thread, cv_wait()/cv_signal()"
+
+#define KZT_CONDVAR_TEST2_ID           0x0502
+#define KZT_CONDVAR_TEST2_NAME         "broadcast1"
+#define KZT_CONDVAR_TEST2_DESC         "Wake all threads, cv_wait()/cv_broadcast()"
+
+#define KZT_CONDVAR_TEST3_ID           0x0503
+#define KZT_CONDVAR_TEST3_NAME         "signal2"
+#define KZT_CONDVAR_TEST3_DESC         "Wake a single thread, cv_wait_timeout()/cv_signal()"
+
+#define KZT_CONDVAR_TEST4_ID           0x0504
+#define KZT_CONDVAR_TEST4_NAME         "broadcast2"
+#define KZT_CONDVAR_TEST4_DESC         "Wake all threads, cv_wait_timeout()/cv_broadcast()"
+
+#define KZT_CONDVAR_TEST5_ID           0x0505
+#define KZT_CONDVAR_TEST5_NAME         "timeout"
+#define KZT_CONDVAR_TEST5_DESC         "Timeout thread, cv_wait_timeout()"
+
+#define KZT_CONDVAR_TEST_MAGIC         0x115599DDUL
+#define KZT_CONDVAR_TEST_NAME          "condvar_test"
+#define KZT_CONDVAR_TEST_COUNT         8
+
+typedef struct condvar_priv {
+        unsigned long cv_magic;
+        struct file *cv_file;
+       kcondvar_t cv_condvar;
+       kmutex_t cv_mtx;
+} condvar_priv_t;
+
+typedef struct condvar_thr {
+       int ct_id;
+       const char *ct_name;
+       condvar_priv_t *ct_cvp;
+       int ct_rc;
+} condvar_thr_t;
+
+int
+kzt_condvar_test12_thread(void *arg)
+{
+       condvar_thr_t *ct = (condvar_thr_t *)arg;
+       condvar_priv_t *cv = ct->ct_cvp;
+       char name[16];
+
+       ASSERT(cv->cv_magic == KZT_CONDVAR_TEST_MAGIC);
+        snprintf(name, sizeof(name), "%s%d", KZT_CONDVAR_TEST_NAME, ct->ct_id);
+       daemonize(name);
+
+       mutex_enter(&cv->cv_mtx);
+       kzt_vprint(cv->cv_file, ct->ct_name,
+                  "%s thread sleeping with %d waiters\n",
+                  name, atomic_read(&cv->cv_condvar.cv_waiters));
+       cv_wait(&cv->cv_condvar, &cv->cv_mtx);
+       kzt_vprint(cv->cv_file, ct->ct_name,
+                  "%s thread woken %d waiters remain\n",
+                  name, atomic_read(&cv->cv_condvar.cv_waiters));
+       mutex_exit(&cv->cv_mtx);
+
+       return 0;
+}
+
+static int
+kzt_condvar_test1(struct file *file, void *arg)
+{
+       int i, count = 0, rc = 0;
+       long pids[KZT_CONDVAR_TEST_COUNT];
+       condvar_thr_t ct[KZT_CONDVAR_TEST_COUNT];
+       condvar_priv_t cv;
+
+       cv.cv_magic = KZT_CONDVAR_TEST_MAGIC;
+       cv.cv_file = file;
+       mutex_init(&cv.cv_mtx, KZT_CONDVAR_TEST_NAME, MUTEX_DEFAULT, NULL);
+       cv_init(&cv.cv_condvar, KZT_CONDVAR_TEST_NAME, CV_DEFAULT, NULL);
+
+       /* Create some threads, the exact number isn't important just as
+        * long as we know how many we managed to create and should expect. */
+       for (i = 0; i < KZT_CONDVAR_TEST_COUNT; i++) {
+               ct[i].ct_cvp = &cv;
+               ct[i].ct_id = i;
+               ct[i].ct_name = KZT_CONDVAR_TEST1_NAME;
+               ct[i].ct_rc = 0;
+
+               pids[i] = kernel_thread(kzt_condvar_test12_thread, &ct[i], 0);
+               if (pids[i] >= 0)
+                       count++;
+       }
+
+       /* Wait until all threads are waiting on the condition variable */
+       while (atomic_read(&cv.cv_condvar.cv_waiters) != count)
+               schedule();
+
+       /* Wake a single thread at a time, wait until it exits */
+       for (i = 1; i <= count; i++) {
+               cv_signal(&cv.cv_condvar);
+
+               while (atomic_read(&cv.cv_condvar.cv_waiters) > (count - i))
+                       schedule();
+
+               /* Correct behavior 1 thread woken */
+               if (atomic_read(&cv.cv_condvar.cv_waiters) == (count - i))
+                       continue;
+
+                kzt_vprint(file, KZT_CONDVAR_TEST1_NAME, "Attempted to "
+                          "wake %d thread but work %d threads woke\n",
+                          1, count - atomic_read(&cv.cv_condvar.cv_waiters));
+               rc = -EINVAL;
+               break;
+       }
+
+       if (!rc)
+                kzt_vprint(file, KZT_CONDVAR_TEST1_NAME, "Correctly woke "
+                          "%d sleeping threads %d at a time\n", count, 1);
+
+       /* Wait until that last nutex is dropped */
+       while (mutex_owner(&cv.cv_mtx))
+               schedule();
+
+       /* Wake everything for the failure case */
+       cv_broadcast(&cv.cv_condvar);
+       cv_destroy(&cv.cv_condvar);
+       mutex_destroy(&cv.cv_mtx);
+
+       return rc;
+}
+
+static int
+kzt_condvar_test2(struct file *file, void *arg)
+{
+       int i, count = 0, rc = 0;
+       long pids[KZT_CONDVAR_TEST_COUNT];
+       condvar_thr_t ct[KZT_CONDVAR_TEST_COUNT];
+       condvar_priv_t cv;
+
+       cv.cv_magic = KZT_CONDVAR_TEST_MAGIC;
+       cv.cv_file = file;
+       mutex_init(&cv.cv_mtx, KZT_CONDVAR_TEST_NAME, MUTEX_DEFAULT, NULL);
+       cv_init(&cv.cv_condvar, KZT_CONDVAR_TEST_NAME, CV_DEFAULT, NULL);
+
+       /* Create some threads, the exact number isn't important just as
+        * long as we know how many we managed to create and should expect. */
+       for (i = 0; i < KZT_CONDVAR_TEST_COUNT; i++) {
+               ct[i].ct_cvp = &cv;
+               ct[i].ct_id = i;
+               ct[i].ct_name = KZT_CONDVAR_TEST2_NAME;
+               ct[i].ct_rc = 0;
+
+               pids[i] = kernel_thread(kzt_condvar_test12_thread, &ct[i], 0);
+               if (pids[i] > 0)
+                       count++;
+       }
+
+       /* Wait until all threads are waiting on the condition variable */
+       while (atomic_read(&cv.cv_condvar.cv_waiters) != count)
+               schedule();
+
+       /* Wake all threads waiting on the condition variable */
+       cv_broadcast(&cv.cv_condvar);
+
+       /* Wait until all threads have exited */
+       while ((atomic_read(&cv.cv_condvar.cv_waiters) > 0) || mutex_owner(&cv.cv_mtx))
+               schedule();
+
+        kzt_vprint(file, KZT_CONDVAR_TEST2_NAME, "Correctly woke all "
+                          "%d sleeping threads at once\n", count);
+
+       /* Wake everything for the failure case */
+       cv_destroy(&cv.cv_condvar);
+       mutex_destroy(&cv.cv_mtx);
+
+       return rc;
+}
+
+int
+kzt_condvar_test34_thread(void *arg)
+{
+       condvar_thr_t *ct = (condvar_thr_t *)arg;
+       condvar_priv_t *cv = ct->ct_cvp;
+       char name[16];
+       clock_t rc;
+
+       ASSERT(cv->cv_magic == KZT_CONDVAR_TEST_MAGIC);
+        snprintf(name, sizeof(name), "%s%d", KZT_CONDVAR_TEST_NAME, ct->ct_id);
+       daemonize(name);
+
+       mutex_enter(&cv->cv_mtx);
+       kzt_vprint(cv->cv_file, ct->ct_name,
+                  "%s thread sleeping with %d waiters\n",
+                  name, atomic_read(&cv->cv_condvar.cv_waiters));
+
+       /* Sleep no longer than 3 seconds, for this test we should
+        * actually never sleep that long without being woken up. */
+       rc = cv_timedwait(&cv->cv_condvar, &cv->cv_mtx, lbolt + HZ * 3);
+       if (rc == -1) {
+               ct->ct_rc = -ETIMEDOUT;
+               kzt_vprint(cv->cv_file, ct->ct_name, "%s thread timed out, "
+                          "should have been woken\n", name);
+       } else {
+               kzt_vprint(cv->cv_file, ct->ct_name,
+                          "%s thread woken %d waiters remain\n",
+                          name, atomic_read(&cv->cv_condvar.cv_waiters));
+       }
+
+       mutex_exit(&cv->cv_mtx);
+
+       return 0;
+}
+
+static int
+kzt_condvar_test3(struct file *file, void *arg)
+{
+       int i, count = 0, rc = 0;
+       long pids[KZT_CONDVAR_TEST_COUNT];
+       condvar_thr_t ct[KZT_CONDVAR_TEST_COUNT];
+       condvar_priv_t cv;
+
+       cv.cv_magic = KZT_CONDVAR_TEST_MAGIC;
+       cv.cv_file = file;
+       mutex_init(&cv.cv_mtx, KZT_CONDVAR_TEST_NAME, MUTEX_DEFAULT, NULL);
+       cv_init(&cv.cv_condvar, KZT_CONDVAR_TEST_NAME, CV_DEFAULT, NULL);
+
+       /* Create some threads, the exact number isn't important just as
+        * long as we know how many we managed to create and should expect. */
+       for (i = 0; i < KZT_CONDVAR_TEST_COUNT; i++) {
+               ct[i].ct_cvp = &cv;
+               ct[i].ct_id = i;
+               ct[i].ct_name = KZT_CONDVAR_TEST3_NAME;
+               ct[i].ct_rc = 0;
+
+               pids[i] = kernel_thread(kzt_condvar_test34_thread, &ct[i], 0);
+               if (pids[i] >= 0)
+                       count++;
+       }
+
+       /* Wait until all threads are waiting on the condition variable */
+       while (atomic_read(&cv.cv_condvar.cv_waiters) != count)
+               schedule();
+
+       /* Wake a single thread at a time, wait until it exits */
+       for (i = 1; i <= count; i++) {
+               cv_signal(&cv.cv_condvar);
+
+               while (atomic_read(&cv.cv_condvar.cv_waiters) > (count - i))
+                       schedule();
+
+               /* Correct behavior 1 thread woken */
+               if (atomic_read(&cv.cv_condvar.cv_waiters) == (count - i))
+                       continue;
+
+                kzt_vprint(file, KZT_CONDVAR_TEST3_NAME, "Attempted to "
+                          "wake %d thread but work %d threads woke\n",
+                          1, count - atomic_read(&cv.cv_condvar.cv_waiters));
+               rc = -EINVAL;
+               break;
+       }
+
+       /* Validate no waiting thread timed out early */
+       for (i = 0; i < count; i++)
+               if (ct[i].ct_rc)
+                       rc = ct[i].ct_rc;
+
+       if (!rc)
+                kzt_vprint(file, KZT_CONDVAR_TEST3_NAME, "Correctly woke "
+                          "%d sleeping threads %d at a time\n", count, 1);
+
+       /* Wait until that last nutex is dropped */
+       while (mutex_owner(&cv.cv_mtx))
+               schedule();
+
+       /* Wake everything for the failure case */
+       cv_broadcast(&cv.cv_condvar);
+       cv_destroy(&cv.cv_condvar);
+       mutex_destroy(&cv.cv_mtx);
+
+       return rc;
+}
+
+static int
+kzt_condvar_test4(struct file *file, void *arg)
+{
+       int i, count = 0, rc = 0;
+       long pids[KZT_CONDVAR_TEST_COUNT];
+       condvar_thr_t ct[KZT_CONDVAR_TEST_COUNT];
+       condvar_priv_t cv;
+
+       cv.cv_magic = KZT_CONDVAR_TEST_MAGIC;
+       cv.cv_file = file;
+       mutex_init(&cv.cv_mtx, KZT_CONDVAR_TEST_NAME, MUTEX_DEFAULT, NULL);
+       cv_init(&cv.cv_condvar, KZT_CONDVAR_TEST_NAME, CV_DEFAULT, NULL);
+
+       /* Create some threads, the exact number isn't important just as
+        * long as we know how many we managed to create and should expect. */
+       for (i = 0; i < KZT_CONDVAR_TEST_COUNT; i++) {
+               ct[i].ct_cvp = &cv;
+               ct[i].ct_id = i;
+               ct[i].ct_name = KZT_CONDVAR_TEST3_NAME;
+               ct[i].ct_rc = 0;
+
+               pids[i] = kernel_thread(kzt_condvar_test34_thread, &ct[i], 0);
+               if (pids[i] >= 0)
+                       count++;
+       }
+
+       /* Wait until all threads are waiting on the condition variable */
+       while (atomic_read(&cv.cv_condvar.cv_waiters) != count)
+               schedule();
+
+       /* Wake a single thread at a time, wait until it exits */
+       for (i = 1; i <= count; i++) {
+               cv_signal(&cv.cv_condvar);
+
+               while (atomic_read(&cv.cv_condvar.cv_waiters) > (count - i))
+                       schedule();
+
+               /* Correct behavior 1 thread woken */
+               if (atomic_read(&cv.cv_condvar.cv_waiters) == (count - i))
+                       continue;
+
+                kzt_vprint(file, KZT_CONDVAR_TEST3_NAME, "Attempted to "
+                          "wake %d thread but work %d threads woke\n",
+                          1, count - atomic_read(&cv.cv_condvar.cv_waiters));
+               rc = -EINVAL;
+               break;
+       }
+
+       /* Validate no waiting thread timed out early */
+       for (i = 0; i < count; i++)
+               if (ct[i].ct_rc)
+                       rc = ct[i].ct_rc;
+
+       if (!rc)
+                kzt_vprint(file, KZT_CONDVAR_TEST3_NAME, "Correctly woke "
+                          "%d sleeping threads %d at a time\n", count, 1);
+
+       /* Wait until that last nutex is dropped */
+       while (mutex_owner(&cv.cv_mtx))
+               schedule();
+
+       /* Wake everything for the failure case */
+       cv_broadcast(&cv.cv_condvar);
+       cv_destroy(&cv.cv_condvar);
+       mutex_destroy(&cv.cv_mtx);
+
+       return rc;
+}
+
+static int
+kzt_condvar_test5(struct file *file, void *arg)
+{
+        kcondvar_t condvar;
+        kmutex_t mtx;
+       clock_t time_left, time_before, time_after, time_delta;
+       int64_t whole_delta;
+       int32_t remain_delta;
+       int rc = 0;
+
+       mutex_init(&mtx, KZT_CONDVAR_TEST_NAME, MUTEX_DEFAULT, NULL);
+       cv_init(&condvar, KZT_CONDVAR_TEST_NAME, CV_DEFAULT, NULL);
+
+        kzt_vprint(file, KZT_CONDVAR_TEST5_NAME, "Thread going to sleep for "
+                  "%d second and expecting to be woken by timeout\n", 1);
+
+       /* Allow a 1 second timeout, plenty long to validate correctness. */
+       time_before = lbolt;
+       mutex_enter(&mtx);
+       time_left = cv_timedwait(&condvar, &mtx, lbolt + HZ);
+       mutex_exit(&mtx);
+       time_after = lbolt;
+       time_delta = time_after - time_before; /* XXX - Handle jiffie wrap */
+       whole_delta  = time_delta;
+       remain_delta = do_div(whole_delta, HZ);
+
+       if (time_left == -1) {
+               if (time_delta >= HZ) {
+                       kzt_vprint(file, KZT_CONDVAR_TEST5_NAME,
+                                  "Thread correctly timed out and was asleep "
+                                  "for %d.%d seconds (%d second min)\n",
+                                  (int)whole_delta, remain_delta, 1);
+               } else {
+                       kzt_vprint(file, KZT_CONDVAR_TEST5_NAME,
+                                  "Thread correctly timed out but was only "
+                                  "asleep for %d.%d seconds (%d second "
+                                  "min)\n", (int)whole_delta, remain_delta, 1);
+                       rc = -ETIMEDOUT;
+               }
+       } else {
+               kzt_vprint(file, KZT_CONDVAR_TEST5_NAME,
+                          "Thread exited after only %d.%d seconds, it "
+                          "did not hit the %d second timeout\n",
+                          (int)whole_delta, remain_delta, 1);
+               rc = -ETIMEDOUT;
+       }
+
+       cv_destroy(&condvar);
+       mutex_destroy(&mtx);
+
+       return rc;
+}
+
+kzt_subsystem_t *
+kzt_condvar_init(void)
+{
+        kzt_subsystem_t *sub;
+
+        sub = kmalloc(sizeof(*sub), GFP_KERNEL);
+        if (sub == NULL)
+                return NULL;
+
+        memset(sub, 0, sizeof(*sub));
+        strncpy(sub->desc.name, KZT_CONDVAR_NAME, KZT_NAME_SIZE);
+        strncpy(sub->desc.desc, KZT_CONDVAR_DESC, KZT_DESC_SIZE);
+        INIT_LIST_HEAD(&sub->subsystem_list);
+        INIT_LIST_HEAD(&sub->test_list);
+        spin_lock_init(&sub->test_lock);
+        sub->desc.id = KZT_SUBSYSTEM_CONDVAR;
+
+        KZT_TEST_INIT(sub, KZT_CONDVAR_TEST1_NAME, KZT_CONDVAR_TEST1_DESC,
+                      KZT_CONDVAR_TEST1_ID, kzt_condvar_test1);
+        KZT_TEST_INIT(sub, KZT_CONDVAR_TEST2_NAME, KZT_CONDVAR_TEST2_DESC,
+                      KZT_CONDVAR_TEST2_ID, kzt_condvar_test2);
+        KZT_TEST_INIT(sub, KZT_CONDVAR_TEST3_NAME, KZT_CONDVAR_TEST3_DESC,
+                      KZT_CONDVAR_TEST3_ID, kzt_condvar_test3);
+        KZT_TEST_INIT(sub, KZT_CONDVAR_TEST4_NAME, KZT_CONDVAR_TEST4_DESC,
+                      KZT_CONDVAR_TEST4_ID, kzt_condvar_test4);
+        KZT_TEST_INIT(sub, KZT_CONDVAR_TEST5_NAME, KZT_CONDVAR_TEST5_DESC,
+                      KZT_CONDVAR_TEST5_ID, kzt_condvar_test5);
+
+        return sub;
+}
+
+void
+kzt_condvar_fini(kzt_subsystem_t *sub)
+{
+        ASSERT(sub);
+        KZT_TEST_FINI(sub, KZT_CONDVAR_TEST5_ID);
+        KZT_TEST_FINI(sub, KZT_CONDVAR_TEST4_ID);
+        KZT_TEST_FINI(sub, KZT_CONDVAR_TEST3_ID);
+        KZT_TEST_FINI(sub, KZT_CONDVAR_TEST2_ID);
+        KZT_TEST_FINI(sub, KZT_CONDVAR_TEST1_ID);
+
+        kfree(sub);
+}
+
+int
+kzt_condvar_id(void) {
+        return KZT_SUBSYSTEM_CONDVAR;
+}
diff --git a/modules/splat/splat-ctl.c b/modules/splat/splat-ctl.c
new file mode 100644 (file)
index 0000000..9bff58a
--- /dev/null
@@ -0,0 +1,677 @@
+/*
+ * My intent is the create a loadable kzt (kernel ZFS test) module
+ * which can be used as an access point to run in kernel ZFS regression
+ * tests.  Why do we need this when we have ztest?  Well ztest.c only
+ * excersises the ZFS code proper, it cannot be used to validate the
+ * linux kernel shim primatives.  This also provides a nice hook for
+ * any other in kernel regression tests we wish to run such as direct
+ * in-kernel tests against the DMU.
+ *
+ * The basic design is the kzt module is that it is constructed of
+ * various kzt_* source files each of which contains regression tests.
+ * For example the kzt_linux_kmem.c file contains tests for validating
+ * kmem correctness.  When the kzt module is loaded kzt_*_init()
+ * will be called for each subsystems tests, similarly kzt_*_fini() is
+ * called when the kzt module is removed.  Each test can then be
+ * run by making an ioctl() call from a userspace control application
+ * to pick the subsystem and test which should be run.
+ *
+ * Author: Brian Behlendorf
+ */
+
+#include <splat-ctl.h>
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,18)
+#include <linux/devfs_fs_kernel.h>
+#endif
+
+#include <linux/cdev.h>
+
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,18)
+static struct class_simple *kzt_class;
+#else
+static struct class *kzt_class;
+#endif
+static struct list_head kzt_module_list;
+static spinlock_t kzt_module_lock;
+
+static int
+kzt_open(struct inode *inode, struct file *file)
+{
+       unsigned int minor = iminor(inode);
+       kzt_info_t *info;
+
+       if (minor >= KZT_MINORS)
+               return -ENXIO;
+
+       info = (kzt_info_t *)kmalloc(sizeof(*info), GFP_KERNEL);
+       if (info == NULL)
+               return -ENOMEM;
+
+       spin_lock_init(&info->info_lock);
+       info->info_size = KZT_INFO_BUFFER_SIZE;
+       info->info_buffer = (char *)vmalloc(KZT_INFO_BUFFER_SIZE);
+       if (info->info_buffer == NULL) {
+               kfree(info);
+               return -ENOMEM;
+       }
+
+       info->info_head = info->info_buffer;
+       file->private_data = (void *)info;
+
+       kzt_print(file, "Kernel ZFS Tests %s\n", KZT_VERSION);
+
+        return 0;
+}
+
+static int
+kzt_release(struct inode *inode, struct file *file)
+{
+       unsigned int minor = iminor(inode);
+       kzt_info_t *info = (kzt_info_t *)file->private_data;
+
+       if (minor >= KZT_MINORS)
+               return -ENXIO;
+
+       ASSERT(info);
+       ASSERT(info->info_buffer);
+
+       vfree(info->info_buffer);
+       kfree(info);
+
+       return 0;
+}
+
+static int
+kzt_buffer_clear(struct file *file, kzt_cfg_t *kcfg, unsigned long arg)
+{
+       kzt_info_t *info = (kzt_info_t *)file->private_data;
+
+       ASSERT(info);
+       ASSERT(info->info_buffer);
+
+       spin_lock(&info->info_lock);
+       memset(info->info_buffer, 0, info->info_size);
+       info->info_head = info->info_buffer;
+       spin_unlock(&info->info_lock);
+
+       return 0;
+}
+
+static int
+kzt_buffer_size(struct file *file, kzt_cfg_t *kcfg, unsigned long arg)
+{
+       kzt_info_t *info = (kzt_info_t *)file->private_data;
+       char *buf;
+       int min, size, rc = 0;
+
+       ASSERT(info);
+       ASSERT(info->info_buffer);
+
+       spin_lock(&info->info_lock);
+       if (kcfg->cfg_arg1 > 0) {
+
+               size = kcfg->cfg_arg1;
+               buf = (char *)vmalloc(size);
+               if (buf == NULL) {
+                       rc = -ENOMEM;
+                       goto out;
+               }
+
+               /* Zero fill and truncate contents when coping buffer */
+               min = ((size < info->info_size) ? size : info->info_size);
+               memset(buf, 0, size);
+               memcpy(buf, info->info_buffer, min);
+               vfree(info->info_buffer);
+               info->info_size = size;
+               info->info_buffer = buf;
+               info->info_head = info->info_buffer;
+       }
+
+       kcfg->cfg_rc1 = info->info_size;
+
+       if (copy_to_user((struct kzt_cfg_t __user *)arg, kcfg, sizeof(*kcfg)))
+               rc = -EFAULT;
+out:
+       spin_unlock(&info->info_lock);
+
+       return rc;
+}
+
+
+static kzt_subsystem_t *
+kzt_subsystem_find(int id) {
+       kzt_subsystem_t *sub;
+
+        spin_lock(&kzt_module_lock);
+        list_for_each_entry(sub, &kzt_module_list, subsystem_list) {
+               if (id == sub->desc.id) {
+                       spin_unlock(&kzt_module_lock);
+                       return sub;
+               }
+        }
+        spin_unlock(&kzt_module_lock);
+
+       return NULL;
+}
+
+static int
+kzt_subsystem_count(kzt_cfg_t *kcfg, unsigned long arg)
+{
+       kzt_subsystem_t *sub;
+       int i = 0;
+
+        spin_lock(&kzt_module_lock);
+        list_for_each_entry(sub, &kzt_module_list, subsystem_list)
+               i++;
+
+        spin_unlock(&kzt_module_lock);
+       kcfg->cfg_rc1 = i;
+
+       if (copy_to_user((struct kzt_cfg_t __user *)arg, kcfg, sizeof(*kcfg)))
+               return -EFAULT;
+
+       return 0;
+}
+
+static int
+kzt_subsystem_list(kzt_cfg_t *kcfg, unsigned long arg)
+{
+       kzt_subsystem_t *sub;
+       kzt_cfg_t *tmp;
+       int size, i = 0;
+
+       /* Structure will be sized large enough for N subsystem entries
+        * which is passed in by the caller.  On exit the number of
+        * entries filled in with valid subsystems will be stored in
+        * cfg_rc1.  If the caller does not provide enough entries
+        * for all subsystems we will truncate the list to avoid overrun.
+        */
+       size = sizeof(*tmp) + kcfg->cfg_data.kzt_subsystems.size *
+              sizeof(kzt_user_t);
+       tmp = kmalloc(size, GFP_KERNEL);
+       if (tmp == NULL)
+               return -ENOMEM;
+
+       /* Local 'tmp' is used as the structure copied back to user space */
+       memset(tmp, 0, size);
+       memcpy(tmp, kcfg, sizeof(*kcfg));
+
+        spin_lock(&kzt_module_lock);
+        list_for_each_entry(sub, &kzt_module_list, subsystem_list) {
+               strncpy(tmp->cfg_data.kzt_subsystems.descs[i].name,
+                       sub->desc.name, KZT_NAME_SIZE);
+               strncpy(tmp->cfg_data.kzt_subsystems.descs[i].desc,
+                       sub->desc.desc, KZT_DESC_SIZE);
+               tmp->cfg_data.kzt_subsystems.descs[i].id = sub->desc.id;
+
+               /* Truncate list if we are about to overrun alloc'ed memory */
+               if ((i++) == kcfg->cfg_data.kzt_subsystems.size)
+                       break;
+        }
+        spin_unlock(&kzt_module_lock);
+       tmp->cfg_rc1 = i;
+
+       if (copy_to_user((struct kzt_cfg_t __user *)arg, tmp, size)) {
+               kfree(tmp);
+               return -EFAULT;
+       }
+
+       kfree(tmp);
+       return 0;
+}
+
+static int
+kzt_test_count(kzt_cfg_t *kcfg, unsigned long arg)
+{
+       kzt_subsystem_t *sub;
+       kzt_test_t *test;
+       int i = 0;
+
+       /* Subsystem ID passed as arg1 */
+       sub = kzt_subsystem_find(kcfg->cfg_arg1);
+       if (sub == NULL)
+               return -EINVAL;
+
+        spin_lock(&(sub->test_lock));
+        list_for_each_entry(test, &(sub->test_list), test_list)
+               i++;
+
+        spin_unlock(&(sub->test_lock));
+       kcfg->cfg_rc1 = i;
+
+       if (copy_to_user((struct kzt_cfg_t __user *)arg, kcfg, sizeof(*kcfg)))
+               return -EFAULT;
+
+       return 0;
+}
+
+static int
+kzt_test_list(kzt_cfg_t *kcfg, unsigned long arg)
+{
+       kzt_subsystem_t *sub;
+       kzt_test_t *test;
+       kzt_cfg_t *tmp;
+       int size, i = 0;
+
+       /* Subsystem ID passed as arg1 */
+       sub = kzt_subsystem_find(kcfg->cfg_arg1);
+       if (sub == NULL)
+               return -EINVAL;
+
+       /* Structure will be sized large enough for N test entries
+        * which is passed in by the caller.  On exit the number of
+        * entries filled in with valid tests will be stored in
+        * cfg_rc1.  If the caller does not provide enough entries
+        * for all tests we will truncate the list to avoid overrun.
+        */
+       size = sizeof(*tmp)+kcfg->cfg_data.kzt_tests.size*sizeof(kzt_user_t);
+       tmp = kmalloc(size, GFP_KERNEL);
+       if (tmp == NULL)
+               return -ENOMEM;
+
+       /* Local 'tmp' is used as the structure copied back to user space */
+       memset(tmp, 0, size);
+       memcpy(tmp, kcfg, sizeof(*kcfg));
+
+        spin_lock(&(sub->test_lock));
+        list_for_each_entry(test, &(sub->test_list), test_list) {
+               strncpy(tmp->cfg_data.kzt_tests.descs[i].name,
+                       test->desc.name, KZT_NAME_SIZE);
+               strncpy(tmp->cfg_data.kzt_tests.descs[i].desc,
+                       test->desc.desc, KZT_DESC_SIZE);
+               tmp->cfg_data.kzt_tests.descs[i].id = test->desc.id;
+
+               /* Truncate list if we are about to overrun alloc'ed memory */
+               if ((i++) == kcfg->cfg_data.kzt_tests.size)
+                       break;
+        }
+        spin_unlock(&(sub->test_lock));
+       tmp->cfg_rc1 = i;
+
+       if (copy_to_user((struct kzt_cfg_t __user *)arg, tmp, size)) {
+               kfree(tmp);
+               return -EFAULT;
+       }
+
+       kfree(tmp);
+       return 0;
+}
+
+static int
+kzt_validate(struct file *file, kzt_subsystem_t *sub, int cmd, void *arg)
+{
+        kzt_test_t *test;
+
+        spin_lock(&(sub->test_lock));
+        list_for_each_entry(test, &(sub->test_list), test_list) {
+                if (test->desc.id == cmd) {
+                       spin_unlock(&(sub->test_lock));
+                        return test->test(file, arg);
+                }
+        }
+        spin_unlock(&(sub->test_lock));
+
+        return -EINVAL;
+}
+
+static int
+kzt_ioctl_cfg(struct file *file, unsigned long arg)
+{
+       kzt_cfg_t kcfg;
+       int rc = 0;
+
+       if (copy_from_user(&kcfg, (kzt_cfg_t *)arg, sizeof(kcfg)))
+               return -EFAULT;
+
+       if (kcfg.cfg_magic != KZT_CFG_MAGIC) {
+               kzt_print(file, "Bad config magic 0x%x != 0x%x\n",
+                         kcfg.cfg_magic, KZT_CFG_MAGIC);
+               return -EINVAL;
+       }
+
+       switch (kcfg.cfg_cmd) {
+               case KZT_CFG_BUFFER_CLEAR:
+                       /* cfg_arg1 - Unused
+                        * cfg_rc1  - Unused
+                        */
+                       rc = kzt_buffer_clear(file, &kcfg, arg);
+                       break;
+               case KZT_CFG_BUFFER_SIZE:
+                       /* cfg_arg1 - 0 - query size; >0 resize
+                        * cfg_rc1  - Set to current buffer size
+                        */
+                       rc = kzt_buffer_size(file, &kcfg, arg);
+                       break;
+               case KZT_CFG_SUBSYSTEM_COUNT:
+                       /* cfg_arg1 - Unused
+                        * cfg_rc1  - Set to number of subsystems
+                        */
+                       rc = kzt_subsystem_count(&kcfg, arg);
+                       break;
+               case KZT_CFG_SUBSYSTEM_LIST:
+                       /* cfg_arg1 - Unused
+                        * cfg_rc1  - Set to number of subsystems
+                        * cfg_data.kzt_subsystems - Populated with subsystems
+                        */
+                       rc = kzt_subsystem_list(&kcfg, arg);
+                       break;
+               case KZT_CFG_TEST_COUNT:
+                       /* cfg_arg1 - Set to a target subsystem
+                        * cfg_rc1  - Set to number of tests
+                        */
+                       rc = kzt_test_count(&kcfg, arg);
+                       break;
+               case KZT_CFG_TEST_LIST:
+                       /* cfg_arg1 - Set to a target subsystem
+                        * cfg_rc1  - Set to number of tests
+                        * cfg_data.kzt_subsystems - Populated with tests
+                        */
+                       rc = kzt_test_list(&kcfg, arg);
+                       break;
+               default:
+                       kzt_print(file, "Bad config command %d\n", kcfg.cfg_cmd);
+                       rc = -EINVAL;
+                       break;
+       }
+
+       return rc;
+}
+
+static int
+kzt_ioctl_cmd(struct file *file, unsigned long arg)
+{
+       kzt_subsystem_t *sub;
+       kzt_cmd_t kcmd;
+       int rc = -EINVAL;
+       void *data = NULL;
+
+       if (copy_from_user(&kcmd, (kzt_cfg_t *)arg, sizeof(kcmd)))
+               return -EFAULT;
+
+       if (kcmd.cmd_magic != KZT_CMD_MAGIC) {
+               kzt_print(file, "Bad command magic 0x%x != 0x%x\n",
+                         kcmd.cmd_magic, KZT_CFG_MAGIC);
+               return -EINVAL;
+       }
+
+       /* Allocate memory for any opaque data the caller needed to pass on */
+       if (kcmd.cmd_data_size > 0) {
+               data = (void *)kmalloc(kcmd.cmd_data_size, GFP_KERNEL);
+               if (data == NULL)
+                       return -ENOMEM;
+
+               if (copy_from_user(data, (void *)(arg + offsetof(kzt_cmd_t,
+                                  cmd_data_str)), kcmd.cmd_data_size)) {
+                       kfree(data);
+                       return -EFAULT;
+               }
+       }
+
+       sub = kzt_subsystem_find(kcmd.cmd_subsystem);
+       if (sub != NULL)
+               rc = kzt_validate(file, sub, kcmd.cmd_test, data);
+       else
+               rc = -EINVAL;
+
+       if (data != NULL)
+               kfree(data);
+
+       return rc;
+}
+
+static int
+kzt_ioctl(struct inode *inode, struct file *file,
+         unsigned int cmd, unsigned long arg)
+{
+        unsigned int minor = iminor(file->f_dentry->d_inode);
+       int rc = 0;
+
+       /* Ignore tty ioctls */
+       if ((cmd & 0xffffff00) == ((int)'T') << 8)
+               return -ENOTTY;
+
+       if (minor >= KZT_MINORS)
+               return -ENXIO;
+
+       switch (cmd) {
+               case KZT_CFG:
+                       rc = kzt_ioctl_cfg(file, arg);
+                       break;
+               case KZT_CMD:
+                       rc = kzt_ioctl_cmd(file, arg);
+                       break;
+               default:
+                       kzt_print(file, "Bad ioctl command %d\n", cmd);
+                       rc = -EINVAL;
+                       break;
+       }
+
+       return rc;
+}
+
+/* I'm not sure why you would want to write in to this buffer from
+ * user space since its principle use is to pass test status info
+ * back to the user space, but I don't see any reason to prevent it.
+ */
+static ssize_t kzt_write(struct file *file, const char __user *buf,
+                         size_t count, loff_t *ppos)
+{
+        unsigned int minor = iminor(file->f_dentry->d_inode);
+       kzt_info_t *info = (kzt_info_t *)file->private_data;
+       int rc = 0;
+
+       if (minor >= KZT_MINORS)
+               return -ENXIO;
+
+       ASSERT(info);
+       ASSERT(info->info_buffer);
+
+       spin_lock(&info->info_lock);
+
+       /* Write beyond EOF */
+       if (*ppos >= info->info_size) {
+               rc = -EFBIG;
+               goto out;
+       }
+
+       /* Resize count if beyond EOF */
+       if (*ppos + count > info->info_size)
+               count = info->info_size - *ppos;
+
+       if (copy_from_user(info->info_buffer, buf, count)) {
+               rc = -EFAULT;
+               goto out;
+       }
+
+       *ppos += count;
+       rc = count;
+out:
+       spin_unlock(&info->info_lock);
+       return rc;
+}
+
+static ssize_t kzt_read(struct file *file, char __user *buf,
+                       size_t count, loff_t *ppos)
+{
+        unsigned int minor = iminor(file->f_dentry->d_inode);
+       kzt_info_t *info = (kzt_info_t *)file->private_data;
+       int rc = 0;
+
+       if (minor >= KZT_MINORS)
+               return -ENXIO;
+
+       ASSERT(info);
+       ASSERT(info->info_buffer);
+
+       spin_lock(&info->info_lock);
+
+       /* Read beyond EOF */
+       if (*ppos >= info->info_size)
+               goto out;
+
+       /* Resize count if beyond EOF */
+       if (*ppos + count > info->info_size)
+               count = info->info_size - *ppos;
+
+       if (copy_to_user(buf, info->info_buffer + *ppos, count)) {
+               rc = -EFAULT;
+               goto out;
+       }
+
+       *ppos += count;
+       rc = count;
+out:
+       spin_unlock(&info->info_lock);
+       return rc;
+}
+
+static loff_t kzt_seek(struct file *file, loff_t offset, int origin)
+{
+        unsigned int minor = iminor(file->f_dentry->d_inode);
+       kzt_info_t *info = (kzt_info_t *)file->private_data;
+       int rc = -EINVAL;
+
+       if (minor >= KZT_MINORS)
+               return -ENXIO;
+
+       ASSERT(info);
+       ASSERT(info->info_buffer);
+
+       spin_lock(&info->info_lock);
+
+       switch (origin) {
+       case 0: /* SEEK_SET - No-op just do it */
+               break;
+       case 1: /* SEEK_CUR - Seek from current */
+               offset = file->f_pos + offset;
+               break;
+       case 2: /* SEEK_END - Seek from end */
+               offset = info->info_size + offset;
+               break;
+       }
+
+       if (offset >= 0) {
+               file->f_pos = offset;
+               file->f_version = 0;
+               rc = offset;
+       }
+
+       spin_unlock(&info->info_lock);
+
+       return rc;
+}
+
+static struct file_operations kzt_fops = {
+       .owner   = THIS_MODULE,
+       .open    = kzt_open,
+       .release = kzt_release,
+       .ioctl   = kzt_ioctl,
+       .read    = kzt_read,
+       .write   = kzt_write,
+       .llseek  = kzt_seek,
+};
+
+static struct cdev kzt_cdev = {
+       .owner  =       THIS_MODULE,
+       .kobj   =       { .name = "kztctl", },
+};
+
+static int __init
+kzt_init(void)
+{
+       dev_t dev;
+       int rc;
+
+       spin_lock_init(&kzt_module_lock);
+       INIT_LIST_HEAD(&kzt_module_list);
+
+       KZT_SUBSYSTEM_INIT(kmem);
+       KZT_SUBSYSTEM_INIT(taskq);
+       KZT_SUBSYSTEM_INIT(krng);
+       KZT_SUBSYSTEM_INIT(mutex);
+       KZT_SUBSYSTEM_INIT(condvar);
+       KZT_SUBSYSTEM_INIT(thread);
+       KZT_SUBSYSTEM_INIT(rwlock);
+       KZT_SUBSYSTEM_INIT(time);
+
+       dev = MKDEV(KZT_MAJOR, 0);
+        if ((rc = register_chrdev_region(dev, KZT_MINORS, "kztctl")))
+               goto error;
+
+       /* Support for registering a character driver */
+       cdev_init(&kzt_cdev, &kzt_fops);
+       if ((rc = cdev_add(&kzt_cdev, dev, KZT_MINORS))) {
+               printk(KERN_ERR "kzt: Error adding cdev, %d\n", rc);
+               kobject_put(&kzt_cdev.kobj);
+               unregister_chrdev_region(dev, KZT_MINORS);
+               goto error;
+       }
+
+       /* Support for udev make driver info available in sysfs */
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,18)
+        kzt_class = class_simple_create(THIS_MODULE, "kzt");
+#else
+        kzt_class = class_create(THIS_MODULE, "kzt");
+#endif
+       if (IS_ERR(kzt_class)) {
+               rc = PTR_ERR(kzt_class);
+               printk(KERN_ERR "kzt: Error creating kzt class, %d\n", rc);
+               cdev_del(&kzt_cdev);
+               unregister_chrdev_region(dev, KZT_MINORS);
+               goto error;
+       }
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,18)
+       class_simple_device_add(kzt_class, MKDEV(KZT_MAJOR, 0),
+                               NULL, "kztctl");
+#else
+       class_device_create(kzt_class, NULL, MKDEV(KZT_MAJOR, 0),
+                           NULL, "kztctl");
+#endif
+
+       printk(KERN_INFO "kzt: Kernel ZFS Tests %s Loaded\n", KZT_VERSION);
+       return 0;
+error:
+       printk(KERN_ERR "kzt: Error registering kzt device, %d\n", rc);
+       return rc;
+}
+
+static void
+kzt_fini(void)
+{
+       dev_t dev = MKDEV(KZT_MAJOR, 0);
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,18)
+        class_simple_device_remove(dev);
+        class_simple_destroy(kzt_class);
+        devfs_remove("kzt/kztctl");
+        devfs_remove("kzt");
+#else
+        class_device_destroy(kzt_class, dev);
+        class_destroy(kzt_class);
+#endif
+        cdev_del(&kzt_cdev);
+        unregister_chrdev_region(dev, KZT_MINORS);
+
+       KZT_SUBSYSTEM_FINI(time);
+       KZT_SUBSYSTEM_FINI(rwlock);
+       KZT_SUBSYSTEM_FINI(thread);
+       KZT_SUBSYSTEM_FINI(condvar);
+       KZT_SUBSYSTEM_FINI(mutex);
+       KZT_SUBSYSTEM_FINI(krng);
+       KZT_SUBSYSTEM_FINI(taskq);
+       KZT_SUBSYSTEM_FINI(kmem);
+
+       ASSERT(list_empty(&kzt_module_list));
+       printk(KERN_INFO "kzt: Kernel ZFS Tests %s Unloaded\n", KZT_VERSION);
+}
+
+module_init(kzt_init);
+module_exit(kzt_fini);
+
+MODULE_AUTHOR("Lawrence Livermore National Labs");
+MODULE_DESCRIPTION("Kernel ZFS Test");
+MODULE_LICENSE("GPL");
+
diff --git a/modules/splat/splat-kmem.c b/modules/splat/splat-kmem.c
new file mode 100644 (file)
index 0000000..d0af3fc
--- /dev/null
@@ -0,0 +1,364 @@
+#include <splat-ctl.h>
+
+#define KZT_SUBSYSTEM_KMEM             0x0100
+#define KZT_KMEM_NAME                  "kmem"
+#define KZT_KMEM_DESC                  "Kernel Malloc/Slab Tests"
+
+#define KZT_KMEM_TEST1_ID              0x0101
+#define KZT_KMEM_TEST1_NAME            "kmem_alloc"
+#define KZT_KMEM_TEST1_DESC            "Memory allocation test (kmem_alloc)"
+
+#define KZT_KMEM_TEST2_ID              0x0102
+#define KZT_KMEM_TEST2_NAME            "kmem_zalloc"
+#define KZT_KMEM_TEST2_DESC            "Memory allocation test (kmem_zalloc)"
+
+#define KZT_KMEM_TEST3_ID              0x0103
+#define KZT_KMEM_TEST3_NAME            "slab_alloc"
+#define KZT_KMEM_TEST3_DESC            "Slab constructor/destructor test"
+
+#define KZT_KMEM_TEST4_ID              0x0104
+#define KZT_KMEM_TEST4_NAME            "slab_reap"
+#define KZT_KMEM_TEST4_DESC            "Slab reaping test"
+
+#define KZT_KMEM_ALLOC_COUNT           10
+/* XXX - This test may fail under tight memory conditions */
+static int
+kzt_kmem_test1(struct file *file, void *arg)
+{
+       void *ptr[KZT_KMEM_ALLOC_COUNT];
+       int size = PAGE_SIZE;
+       int i, count, rc = 0;
+
+       while ((!rc) && (size < (PAGE_SIZE * 16))) {
+               count = 0;
+
+               for (i = 0; i < KZT_KMEM_ALLOC_COUNT; i++) {
+                       ptr[i] = kmem_alloc(size, KM_SLEEP);
+                       if (ptr[i])
+                               count++;
+               }
+
+               for (i = 0; i < KZT_KMEM_ALLOC_COUNT; i++)
+                       if (ptr[i])
+                               kmem_free(ptr[i], size);
+
+               kzt_vprint(file, KZT_KMEM_TEST1_NAME,
+                          "%d byte allocations, %d/%d successful\n",
+                          size, count, KZT_KMEM_ALLOC_COUNT);
+               if (count != KZT_KMEM_ALLOC_COUNT)
+                       rc = -ENOMEM;
+
+               size *= 2;
+       }
+
+       return rc;
+}
+
+static int
+kzt_kmem_test2(struct file *file, void *arg)
+{
+       void *ptr[KZT_KMEM_ALLOC_COUNT];
+       int size = PAGE_SIZE;
+       int i, j, count, rc = 0;
+
+       while ((!rc) && (size < (PAGE_SIZE * 16))) {
+               count = 0;
+
+               for (i = 0; i < KZT_KMEM_ALLOC_COUNT; i++) {
+                       ptr[i] = kmem_zalloc(size, KM_SLEEP);
+                       if (ptr[i])
+                               count++;
+               }
+
+               /* Ensure buffer has been zero filled */
+               for (i = 0; i < KZT_KMEM_ALLOC_COUNT; i++) {
+                       for (j = 0; j < size; j++) {
+                               if (((char *)ptr[i])[j] != '\0') {
+                                       kzt_vprint(file, KZT_KMEM_TEST2_NAME,
+                                                 "%d-byte allocation was "
+                                                 "not zeroed\n", size);
+                                       rc = -EFAULT;
+                               }
+                       }
+               }
+
+               for (i = 0; i < KZT_KMEM_ALLOC_COUNT; i++)
+                       if (ptr[i])
+                               kmem_free(ptr[i], size);
+
+               kzt_vprint(file, KZT_KMEM_TEST2_NAME,
+                          "%d byte allocations, %d/%d successful\n",
+                          size, count, KZT_KMEM_ALLOC_COUNT);
+               if (count != KZT_KMEM_ALLOC_COUNT)
+                       rc = -ENOMEM;
+
+               size *= 2;
+       }
+
+       return rc;
+}
+
+#define KZT_KMEM_TEST_MAGIC            0x004488CCUL
+#define KZT_KMEM_CACHE_NAME            "kmem_test"
+#define KZT_KMEM_CACHE_SIZE            256
+#define KZT_KMEM_OBJ_COUNT             128
+#define KZT_KMEM_OBJ_RECLAIM           64
+
+typedef struct kmem_cache_data {
+       char kcd_buf[KZT_KMEM_CACHE_SIZE];
+       unsigned long kcd_magic;
+       int kcd_flag;
+} kmem_cache_data_t;
+
+typedef struct kmem_cache_priv {
+       unsigned long kcp_magic;
+       struct file *kcp_file;
+       kmem_cache_t *kcp_cache;
+       kmem_cache_data_t *kcp_kcd[KZT_KMEM_OBJ_COUNT];
+       int kcp_count;
+       int kcp_rc;
+} kmem_cache_priv_t;
+
+static int
+kzt_kmem_test34_constructor(void *ptr, void *priv, int flags)
+{
+       kmem_cache_data_t *kcd = (kmem_cache_data_t *)ptr;
+       kmem_cache_priv_t *kcp = (kmem_cache_priv_t *)priv;
+
+       if (kcd) {
+               memset(kcd->kcd_buf, 0xaa, KZT_KMEM_CACHE_SIZE);
+               kcd->kcd_flag = 1;
+
+               if (kcp) {
+                       kcd->kcd_magic = kcp->kcp_magic;
+                       kcp->kcp_count++;
+               }
+       }
+
+       return 0;
+}
+
+static void
+kzt_kmem_test34_destructor(void *ptr, void *priv)
+{
+       kmem_cache_data_t *kcd = (kmem_cache_data_t *)ptr;
+       kmem_cache_priv_t *kcp = (kmem_cache_priv_t *)priv;
+
+       if (kcd) {
+               memset(kcd->kcd_buf, 0xbb, KZT_KMEM_CACHE_SIZE);
+               kcd->kcd_flag = 0;
+
+               if (kcp)
+                       kcp->kcp_count--;
+       }
+
+       return;
+}
+
+static int
+kzt_kmem_test3(struct file *file, void *arg)
+{
+       kmem_cache_t *cache = NULL;
+       kmem_cache_data_t *kcd = NULL;
+       kmem_cache_priv_t kcp;
+       int rc = 0, max;
+
+       kcp.kcp_magic = KZT_KMEM_TEST_MAGIC;
+       kcp.kcp_file = file;
+       kcp.kcp_count = 0;
+       kcp.kcp_rc = 0;
+
+       cache = kmem_cache_create(KZT_KMEM_CACHE_NAME, sizeof(*kcd), 0,
+                                 kzt_kmem_test34_constructor,
+                                 kzt_kmem_test34_destructor,
+                                 NULL, &kcp, NULL, 0);
+       if (!cache) {
+               kzt_vprint(file, KZT_KMEM_TEST3_NAME,
+                          "Unable to create '%s'\n", KZT_KMEM_CACHE_NAME);
+               return -ENOMEM;
+       }
+
+       kcd = kmem_cache_alloc(cache, 0);
+       if (!kcd) {
+               kzt_vprint(file, KZT_KMEM_TEST3_NAME,
+                          "Unable to allocate from '%s'\n",
+                          KZT_KMEM_CACHE_NAME);
+               rc = -EINVAL;
+               goto out_free;
+       }
+
+       if (!kcd->kcd_flag) {
+               kzt_vprint(file, KZT_KMEM_TEST3_NAME,
+                          "Failed to run contructor for '%s'\n",
+                          KZT_KMEM_CACHE_NAME);
+               rc = -EINVAL;
+               goto out_free;
+       }
+
+       if (kcd->kcd_magic != kcp.kcp_magic) {
+               kzt_vprint(file, KZT_KMEM_TEST3_NAME,
+                          "Failed to pass private data to constructor "
+                          "for '%s'\n", KZT_KMEM_CACHE_NAME);
+               rc = -EINVAL;
+               goto out_free;
+       }
+
+       max = kcp.kcp_count;
+
+       /* Destructor's run lazily so it hard to check correctness here.
+        * We assume if it doesn't crash the free worked properly */
+       kmem_cache_free(cache, kcd);
+
+       /* Destroy the entire cache which will force destructors to
+        * run and we can verify one was called for every object */
+       kmem_cache_destroy(cache);
+       if (kcp.kcp_count) {
+               kzt_vprint(file, KZT_KMEM_TEST3_NAME,
+                          "Failed to run destructor on all slab objects "
+                          "for '%s'\n", KZT_KMEM_CACHE_NAME);
+               rc = -EINVAL;
+       }
+
+       kzt_vprint(file, KZT_KMEM_TEST3_NAME,
+                  "%d allocated/destroyed objects for '%s'\n",
+                  max, KZT_KMEM_CACHE_NAME);
+
+       return rc;
+
+out_free:
+       if (kcd)
+               kmem_cache_free(cache, kcd);
+
+       kmem_cache_destroy(cache);
+       return rc;
+}
+
+static void
+kzt_kmem_test4_reclaim(void *priv)
+{
+       kmem_cache_priv_t *kcp = (kmem_cache_priv_t *)priv;
+       int i;
+
+       kzt_vprint(kcp->kcp_file, KZT_KMEM_TEST4_NAME,
+                   "Reaping %d objects from '%s'\n",
+                  KZT_KMEM_OBJ_RECLAIM, KZT_KMEM_CACHE_NAME);
+       for (i = 0; i < KZT_KMEM_OBJ_RECLAIM; i++) {
+               if (kcp->kcp_kcd[i]) {
+                       kmem_cache_free(kcp->kcp_cache, kcp->kcp_kcd[i]);
+                       kcp->kcp_kcd[i] = NULL;
+               }
+       }
+
+       return;
+}
+
+static int
+kzt_kmem_test4(struct file *file, void *arg)
+{
+       kmem_cache_t *cache;
+       kmem_cache_priv_t kcp;
+       int i, rc = 0, max, reclaim_percent, target_percent;
+
+       kcp.kcp_magic = KZT_KMEM_TEST_MAGIC;
+       kcp.kcp_file = file;
+       kcp.kcp_count = 0;
+       kcp.kcp_rc = 0;
+
+       cache = kmem_cache_create(KZT_KMEM_CACHE_NAME,
+                                 sizeof(kmem_cache_data_t), 0,
+                                 kzt_kmem_test34_constructor,
+                                 kzt_kmem_test34_destructor,
+                                 kzt_kmem_test4_reclaim, &kcp, NULL, 0);
+       if (!cache) {
+               kzt_vprint(file, KZT_KMEM_TEST4_NAME,
+                          "Unable to create '%s'\n", KZT_KMEM_CACHE_NAME);
+               return -ENOMEM;
+       }
+
+       kcp.kcp_cache = cache;
+
+       for (i = 0; i < KZT_KMEM_OBJ_COUNT; i++) {
+               /* All allocations need not succeed */
+               kcp.kcp_kcd[i] = kmem_cache_alloc(cache, 0);
+               if (!kcp.kcp_kcd[i]) {
+                       kzt_vprint(file, KZT_KMEM_TEST4_NAME,
+                                  "Unable to allocate from '%s'\n",
+                                  KZT_KMEM_CACHE_NAME);
+               }
+       }
+
+       max = kcp.kcp_count;
+
+       /* Force shrinker to run */
+       kmem_reap();
+
+       /* Reclaim reclaimed objects, this ensure the destructors are run */
+       kmem_cache_reap_now(cache);
+
+       reclaim_percent = ((kcp.kcp_count * 100) / max);
+       target_percent = (((KZT_KMEM_OBJ_COUNT - KZT_KMEM_OBJ_RECLAIM) * 100) /
+                           KZT_KMEM_OBJ_COUNT);
+       kzt_vprint(file, KZT_KMEM_TEST4_NAME,
+                   "%d%% (%d/%d) of previous size, target of "
+                  "%d%%-%d%% for '%s'\n", reclaim_percent, kcp.kcp_count,
+                  max, target_percent - 10, target_percent + 10,
+                  KZT_KMEM_CACHE_NAME);
+       if ((reclaim_percent < target_percent - 10) ||
+           (reclaim_percent > target_percent + 10))
+               rc = -EINVAL;
+
+       /* Cleanup our mess */
+       for (i = 0; i < KZT_KMEM_OBJ_COUNT; i++)
+               if (kcp.kcp_kcd[i])
+                       kmem_cache_free(cache, kcp.kcp_kcd[i]);
+
+       kmem_cache_destroy(cache);
+
+       return rc;
+}
+
+kzt_subsystem_t *
+kzt_kmem_init(void)
+{
+        kzt_subsystem_t *sub;
+
+        sub = kmalloc(sizeof(*sub), GFP_KERNEL);
+        if (sub == NULL)
+                return NULL;
+
+        memset(sub, 0, sizeof(*sub));
+        strncpy(sub->desc.name, KZT_KMEM_NAME, KZT_NAME_SIZE);
+       strncpy(sub->desc.desc, KZT_KMEM_DESC, KZT_DESC_SIZE);
+        INIT_LIST_HEAD(&sub->subsystem_list);
+       INIT_LIST_HEAD(&sub->test_list);
+        spin_lock_init(&sub->test_lock);
+        sub->desc.id = KZT_SUBSYSTEM_KMEM;
+
+        KZT_TEST_INIT(sub, KZT_KMEM_TEST1_NAME, KZT_KMEM_TEST1_DESC,
+                     KZT_KMEM_TEST1_ID, kzt_kmem_test1);
+        KZT_TEST_INIT(sub, KZT_KMEM_TEST2_NAME, KZT_KMEM_TEST2_DESC,
+                     KZT_KMEM_TEST2_ID, kzt_kmem_test2);
+        KZT_TEST_INIT(sub, KZT_KMEM_TEST3_NAME, KZT_KMEM_TEST3_DESC,
+                     KZT_KMEM_TEST3_ID, kzt_kmem_test3);
+        KZT_TEST_INIT(sub, KZT_KMEM_TEST4_NAME, KZT_KMEM_TEST4_DESC,
+                     KZT_KMEM_TEST4_ID, kzt_kmem_test4);
+
+        return sub;
+}
+
+void
+kzt_kmem_fini(kzt_subsystem_t *sub)
+{
+        ASSERT(sub);
+        KZT_TEST_FINI(sub, KZT_KMEM_TEST4_ID);
+        KZT_TEST_FINI(sub, KZT_KMEM_TEST3_ID);
+        KZT_TEST_FINI(sub, KZT_KMEM_TEST2_ID);
+        KZT_TEST_FINI(sub, KZT_KMEM_TEST1_ID);
+
+        kfree(sub);
+}
+
+int
+kzt_kmem_id(void) {
+        return KZT_SUBSYSTEM_KMEM;
+}
diff --git a/modules/splat/splat-mutex.c b/modules/splat/splat-mutex.c
new file mode 100644 (file)
index 0000000..47a3630
--- /dev/null
@@ -0,0 +1,323 @@
+#include <splat-ctl.h>
+
+#define KZT_SUBSYSTEM_MUTEX            0x0400
+#define KZT_MUTEX_NAME                 "mutex"
+#define KZT_MUTEX_DESC                 "Kernel Mutex Tests"
+
+#define KZT_MUTEX_TEST1_ID             0x0401
+#define KZT_MUTEX_TEST1_NAME           "tryenter"
+#define KZT_MUTEX_TEST1_DESC           "Validate mutex_tryenter() correctness"
+
+#define KZT_MUTEX_TEST2_ID             0x0402
+#define KZT_MUTEX_TEST2_NAME           "race"
+#define KZT_MUTEX_TEST2_DESC           "Many threads entering/exiting the mutex"
+
+#define KZT_MUTEX_TEST3_ID             0x0403
+#define KZT_MUTEX_TEST3_NAME           "owned"
+#define KZT_MUTEX_TEST3_DESC           "Validate mutex_owned() correctness"
+
+#define KZT_MUTEX_TEST4_ID             0x0404
+#define KZT_MUTEX_TEST4_NAME           "owner"
+#define KZT_MUTEX_TEST4_DESC           "Validate mutex_owner() correctness"
+
+#define KZT_MUTEX_TEST_MAGIC           0x115599DDUL
+#define KZT_MUTEX_TEST_NAME            "mutex_test"
+#define KZT_MUTEX_TEST_WORKQ           "mutex_wq"
+#define KZT_MUTEX_TEST_COUNT           128
+
+typedef struct mutex_priv {
+        unsigned long mp_magic;
+        struct file *mp_file;
+       struct work_struct mp_work[KZT_MUTEX_TEST_COUNT];
+       kmutex_t mp_mtx;
+       int mp_rc;
+} mutex_priv_t;
+
+
+static void
+kzt_mutex_test1_work(void *priv)
+{
+       mutex_priv_t *mp = (mutex_priv_t *)priv;
+
+       ASSERT(mp->mp_magic == KZT_MUTEX_TEST_MAGIC);
+       mp->mp_rc = 0;
+
+       if (!mutex_tryenter(&mp->mp_mtx))
+               mp->mp_rc = -EBUSY;
+}
+
+static int
+kzt_mutex_test1(struct file *file, void *arg)
+{
+       struct workqueue_struct *wq;
+       struct work_struct work;
+       mutex_priv_t *mp;
+       int rc = 0;
+
+       mp = (mutex_priv_t *)kmalloc(sizeof(*mp), GFP_KERNEL);
+       if (mp == NULL)
+               return -ENOMEM;
+
+       wq = create_singlethread_workqueue(KZT_MUTEX_TEST_WORKQ);
+       if (wq == NULL) {
+               rc = -ENOMEM;
+               goto out2;
+       }
+
+       mutex_init(&(mp->mp_mtx), KZT_MUTEX_TEST_NAME, MUTEX_DEFAULT, NULL);
+       mutex_enter(&(mp->mp_mtx));
+
+       mp->mp_magic = KZT_MUTEX_TEST_MAGIC;
+       mp->mp_file = file;
+       INIT_WORK(&work, kzt_mutex_test1_work, mp);
+
+       /* Schedule a work item which will try and aquire the mutex via
+         * mutex_tryenter() while its held.  This should fail and the work
+        * item will indicte this status in the passed private data. */
+       if (!queue_work(wq, &work)) {
+               mutex_exit(&(mp->mp_mtx));
+               rc = -EINVAL;
+               goto out;
+       }
+
+       flush_workqueue(wq);
+       mutex_exit(&(mp->mp_mtx));
+
+       /* Work item successfully aquired mutex, very bad! */
+       if (mp->mp_rc != -EBUSY) {
+               rc = -EINVAL;
+               goto out;
+       }
+
+        kzt_vprint(file, KZT_MUTEX_TEST1_NAME, "%s",
+                   "mutex_trylock() correctly failed when mutex held\n");
+
+       /* Schedule a work item which will try and aquire the mutex via
+        * mutex_tryenter() while it is not  held.  This should work and
+        * the item will indicte this status in the passed private data. */
+       if (!queue_work(wq, &work)) {
+               rc = -EINVAL;
+               goto out;
+       }
+
+       flush_workqueue(wq);
+
+       /* Work item failed to aquire mutex, very bad! */
+       if (mp->mp_rc != 0) {
+               rc = -EINVAL;
+               goto out;
+       }
+
+        kzt_vprint(file, KZT_MUTEX_TEST1_NAME, "%s",
+                   "mutex_trylock() correctly succeeded when mutex unheld\n");
+out:
+       mutex_destroy(&(mp->mp_mtx));
+       destroy_workqueue(wq);
+out2:
+       kfree(mp);
+
+       return rc;
+}
+
+static void
+kzt_mutex_test2_work(void *priv)
+{
+       mutex_priv_t *mp = (mutex_priv_t *)priv;
+       int rc;
+
+       ASSERT(mp->mp_magic == KZT_MUTEX_TEST_MAGIC);
+
+       /* Read the value before sleeping and write it after we wake up to
+        * maximize the chance of a race if mutexs are not working properly */
+       mutex_enter(&mp->mp_mtx);
+       rc = mp->mp_rc;
+       set_current_state(TASK_INTERRUPTIBLE);
+       schedule_timeout(HZ / 100);  /* 1/100 of a second */
+       mp->mp_rc = rc + 1;
+       mutex_exit(&mp->mp_mtx);
+}
+
+static int
+kzt_mutex_test2(struct file *file, void *arg)
+{
+       struct workqueue_struct *wq;
+       mutex_priv_t *mp;
+       int i, rc = 0;
+
+       mp = (mutex_priv_t *)kmalloc(sizeof(*mp), GFP_KERNEL);
+       if (mp == NULL)
+               return -ENOMEM;
+
+       /* Create a thread per CPU items on queue will race */
+       wq = create_workqueue(KZT_MUTEX_TEST_WORKQ);
+       if (wq == NULL) {
+               rc = -ENOMEM;
+               goto out;
+       }
+
+       mutex_init(&(mp->mp_mtx), KZT_MUTEX_TEST_NAME, MUTEX_DEFAULT, NULL);
+
+       mp->mp_magic = KZT_MUTEX_TEST_MAGIC;
+       mp->mp_file = file;
+       mp->mp_rc = 0;
+
+       /* Schedule N work items to the work queue each of which enters the
+        * mutex, sleeps briefly, then exits the mutex.  On a multiprocessor
+        * box these work items will be handled by all available CPUs.  The
+        * mutex is instrumented such that if any two processors are in the
+        * critical region at the same time the system will panic.  If the
+        * mutex is implemented right this will never happy, that's a pass. */
+       for (i = 0; i < KZT_MUTEX_TEST_COUNT; i++) {
+               INIT_WORK(&(mp->mp_work[i]), kzt_mutex_test2_work, mp);
+
+               if (!queue_work(wq, &(mp->mp_work[i]))) {
+                       kzt_vprint(file, KZT_MUTEX_TEST2_NAME,
+                                  "Failed to queue work id %d\n", i);
+                       rc = -EINVAL;
+               }
+       }
+
+       flush_workqueue(wq);
+
+       if (mp->mp_rc == KZT_MUTEX_TEST_COUNT) {
+               kzt_vprint(file, KZT_MUTEX_TEST2_NAME, "%d racing threads "
+                          "correctly entered/exited the mutex %d times\n",
+                          num_online_cpus(), mp->mp_rc);
+       } else {
+               kzt_vprint(file, KZT_MUTEX_TEST2_NAME, "%d racing threads "
+                          "only processed %d/%d mutex work items\n",
+                          num_online_cpus(), mp->mp_rc, KZT_MUTEX_TEST_COUNT);
+               rc = -EINVAL;
+       }
+
+       mutex_destroy(&(mp->mp_mtx));
+       destroy_workqueue(wq);
+out:
+       kfree(mp);
+
+       return rc;
+}
+
+static int
+kzt_mutex_test3(struct file *file, void *arg)
+{
+        kmutex_t mtx;
+       int rc = 0;
+
+       mutex_init(&mtx, KZT_MUTEX_TEST_NAME, MUTEX_DEFAULT, NULL);
+
+       mutex_enter(&mtx);
+
+       /* Mutex should be owned by current */
+       if (!mutex_owned(&mtx)) {
+               kzt_vprint(file, KZT_MUTEX_TEST3_NAME, "Mutex should "
+                          "be owned by pid %d but is owned by pid %d\n",
+                          current->pid, mtx.km_owner ?  mtx.km_owner->pid : -1);
+               rc = -EINVAL;
+               goto out;
+       }
+
+       mutex_exit(&mtx);
+
+       /* Mutex should not be owned by any task */
+       if (mutex_owned(&mtx)) {
+               kzt_vprint(file, KZT_MUTEX_TEST3_NAME, "Mutex should "
+                          "not be owned but is owned by pid %d\n",
+                          mtx.km_owner ?  mtx.km_owner->pid : -1);
+               rc = -EINVAL;
+               goto out;
+       }
+
+        kzt_vprint(file, KZT_MUTEX_TEST3_NAME, "%s",
+                  "Correct mutex_owned() behavior\n");
+out:
+       mutex_destroy(&mtx);
+
+       return rc;
+}
+
+static int
+kzt_mutex_test4(struct file *file, void *arg)
+{
+        kmutex_t mtx;
+       kthread_t *owner;
+       int rc = 0;
+
+       mutex_init(&mtx, KZT_MUTEX_TEST_NAME, MUTEX_DEFAULT, NULL);
+
+       mutex_enter(&mtx);
+
+       /* Mutex should be owned by current */
+       owner = mutex_owner(&mtx);
+       if (current != owner) {
+               kzt_vprint(file, KZT_MUTEX_TEST3_NAME, "Mutex should "
+                          "be owned by pid %d but is owned by pid %d\n",
+                          current->pid, owner ? owner->pid : -1);
+               rc = -EINVAL;
+               goto out;
+       }
+
+       mutex_exit(&mtx);
+
+       /* Mutex should not be owned by any task */
+       owner = mutex_owner(&mtx);
+       if (owner) {
+               kzt_vprint(file, KZT_MUTEX_TEST3_NAME, "Mutex should not "
+                          "be owned but is owned by pid %d\n", owner->pid);
+               rc = -EINVAL;
+               goto out;
+       }
+
+        kzt_vprint(file, KZT_MUTEX_TEST3_NAME, "%s",
+                  "Correct mutex_owner() behavior\n");
+out:
+       mutex_destroy(&mtx);
+
+       return rc;
+}
+
+kzt_subsystem_t *
+kzt_mutex_init(void)
+{
+        kzt_subsystem_t *sub;
+
+        sub = kmalloc(sizeof(*sub), GFP_KERNEL);
+        if (sub == NULL)
+                return NULL;
+
+        memset(sub, 0, sizeof(*sub));
+        strncpy(sub->desc.name, KZT_MUTEX_NAME, KZT_NAME_SIZE);
+        strncpy(sub->desc.desc, KZT_MUTEX_DESC, KZT_DESC_SIZE);
+        INIT_LIST_HEAD(&sub->subsystem_list);
+        INIT_LIST_HEAD(&sub->test_list);
+        spin_lock_init(&sub->test_lock);
+        sub->desc.id = KZT_SUBSYSTEM_MUTEX;
+
+        KZT_TEST_INIT(sub, KZT_MUTEX_TEST1_NAME, KZT_MUTEX_TEST1_DESC,
+                      KZT_MUTEX_TEST1_ID, kzt_mutex_test1);
+        KZT_TEST_INIT(sub, KZT_MUTEX_TEST2_NAME, KZT_MUTEX_TEST2_DESC,
+                      KZT_MUTEX_TEST2_ID, kzt_mutex_test2);
+        KZT_TEST_INIT(sub, KZT_MUTEX_TEST3_NAME, KZT_MUTEX_TEST3_DESC,
+                      KZT_MUTEX_TEST3_ID, kzt_mutex_test3);
+        KZT_TEST_INIT(sub, KZT_MUTEX_TEST4_NAME, KZT_MUTEX_TEST4_DESC,
+                      KZT_MUTEX_TEST4_ID, kzt_mutex_test4);
+
+        return sub;
+}
+
+void
+kzt_mutex_fini(kzt_subsystem_t *sub)
+{
+        ASSERT(sub);
+        KZT_TEST_FINI(sub, KZT_MUTEX_TEST4_ID);
+        KZT_TEST_FINI(sub, KZT_MUTEX_TEST3_ID);
+        KZT_TEST_FINI(sub, KZT_MUTEX_TEST2_ID);
+        KZT_TEST_FINI(sub, KZT_MUTEX_TEST1_ID);
+
+        kfree(sub);
+}
+
+int
+kzt_mutex_id(void) {
+        return KZT_SUBSYSTEM_MUTEX;
+}
diff --git a/modules/splat/splat-random.c b/modules/splat/splat-random.c
new file mode 100644 (file)
index 0000000..412c1d6
--- /dev/null
@@ -0,0 +1,103 @@
+#include <splat-ctl.h>
+
+#define KZT_SUBSYSTEM_KRNG             0x0300
+#define KZT_KRNG_NAME                  "krng"
+#define KZT_KRNG_DESC                  "Kernel Random Number Generator Tests"
+
+#define KZT_KRNG_TEST1_ID              0x0301
+#define KZT_KRNG_TEST1_NAME            "freq"
+#define KZT_KRNG_TEST1_DESC            "Frequency Test"
+
+#define KRNG_NUM_BITS                  1048576
+#define KRNG_NUM_BYTES                 (KRNG_NUM_BITS >> 3)
+#define KRNG_NUM_BITS_DIV2             (KRNG_NUM_BITS >> 1)
+#define KRNG_ERROR_RANGE               2097
+
+/* Random Number Generator Tests
+   There can be meny more tests on quality of the
+   random number generator.  For now we are only
+   testing the frequency of particular bits.
+   We could also test consecutive sequences,
+   randomness within a particular block, etc.
+   but is probably not necessary for our purposes */
+
+static int
+kzt_krng_test1(struct file *file, void *arg)
+{
+       uint8_t *buf;
+       int i, j, diff, num = 0, rc = 0;
+
+       buf = kmalloc(sizeof(*buf) * KRNG_NUM_BYTES, GFP_KERNEL);
+       if (buf == NULL) {
+               rc = -ENOMEM;
+               goto out;
+       }
+
+       memset(buf, 0, sizeof(*buf) * KRNG_NUM_BYTES);
+
+       /* Always succeeds */
+       random_get_pseudo_bytes(buf, sizeof(uint8_t) * KRNG_NUM_BYTES);
+
+       for (i = 0; i < KRNG_NUM_BYTES; i++) {
+               uint8_t tmp = buf[i];
+               for (j = 0; j < 8; j++) {
+                       uint8_t tmp2 = ((tmp >> j) & 0x01);
+                       if (tmp2 == 1) {
+                               num++;
+                       }
+               }
+       }
+
+       kfree(buf);
+
+       diff = KRNG_NUM_BITS_DIV2 - num;
+       if (diff < 0)
+               diff *= -1;
+
+       kzt_print(file, "Test 1 Number of ones: %d\n", num);
+       kzt_print(file, "Test 1 Difference from expected: %d Allowed: %d\n",
+                  diff, KRNG_ERROR_RANGE);
+
+       if (diff > KRNG_ERROR_RANGE)
+               rc = -ERANGE;
+out:
+       return rc;
+}
+
+kzt_subsystem_t *
+kzt_krng_init(void)
+{
+        kzt_subsystem_t *sub;
+
+        sub = kmalloc(sizeof(*sub), GFP_KERNEL);
+        if (sub == NULL)
+                return NULL;
+
+        memset(sub, 0, sizeof(*sub));
+        strncpy(sub->desc.name, KZT_KRNG_NAME, KZT_NAME_SIZE);
+       strncpy(sub->desc.desc, KZT_KRNG_DESC, KZT_DESC_SIZE);
+        INIT_LIST_HEAD(&sub->subsystem_list);
+       INIT_LIST_HEAD(&sub->test_list);
+        spin_lock_init(&sub->test_lock);
+        sub->desc.id = KZT_SUBSYSTEM_KRNG;
+
+        KZT_TEST_INIT(sub, KZT_KRNG_TEST1_NAME, KZT_KRNG_TEST1_DESC,
+                     KZT_KRNG_TEST1_ID, kzt_krng_test1);
+
+        return sub;
+}
+
+void
+kzt_krng_fini(kzt_subsystem_t *sub)
+{
+        ASSERT(sub);
+
+        KZT_TEST_FINI(sub, KZT_KRNG_TEST1_ID);
+
+        kfree(sub);
+}
+
+int
+kzt_krng_id(void) {
+        return KZT_SUBSYSTEM_KRNG;
+}
diff --git a/modules/splat/splat-rwlock.c b/modules/splat/splat-rwlock.c
new file mode 100644 (file)
index 0000000..df4585e
--- /dev/null
@@ -0,0 +1,763 @@
+#include <splat-ctl.h>
+
+#define KZT_SUBSYSTEM_RWLOCK           0x0700
+#define KZT_RWLOCK_NAME                        "rwlock"
+#define KZT_RWLOCK_DESC                        "Kernel RW Lock Tests"
+
+#define KZT_RWLOCK_TEST1_ID            0x0701
+#define KZT_RWLOCK_TEST1_NAME          "rwtest1"
+#define KZT_RWLOCK_TEST1_DESC          "Multiple Readers One Writer"
+
+#define KZT_RWLOCK_TEST2_ID            0x0702
+#define KZT_RWLOCK_TEST2_NAME          "rwtest2"
+#define KZT_RWLOCK_TEST2_DESC          "Multiple Writers"
+
+#define KZT_RWLOCK_TEST3_ID            0x0703
+#define KZT_RWLOCK_TEST3_NAME          "rwtest3"
+#define KZT_RWLOCK_TEST3_DESC          "Owner Verification"
+
+#define KZT_RWLOCK_TEST4_ID            0x0704
+#define KZT_RWLOCK_TEST4_NAME          "rwtest4"
+#define KZT_RWLOCK_TEST4_DESC          "Trylock Test"
+
+#define KZT_RWLOCK_TEST5_ID            0x0705
+#define KZT_RWLOCK_TEST5_NAME          "rwtest5"
+#define KZT_RWLOCK_TEST5_DESC          "Write Downgrade Test"
+
+#define KZT_RWLOCK_TEST6_ID            0x0706
+#define KZT_RWLOCK_TEST6_NAME          "rwtest6"
+#define KZT_RWLOCK_TEST6_DESC          "Read Upgrade Test"
+
+#define KZT_RWLOCK_TEST_MAGIC          0x115599DDUL
+#define KZT_RWLOCK_TEST_NAME           "rwlock_test"
+#define KZT_RWLOCK_TEST_COUNT          8
+
+#define KZT_RWLOCK_RELEASE_INIT                0
+#define KZT_RWLOCK_RELEASE_WRITERS     1
+#define KZT_RWLOCK_RELEASE_READERS     2
+
+typedef struct rw_priv {
+        unsigned long rw_magic;
+        struct file *rw_file;
+       krwlock_t rwl;
+       spinlock_t rw_priv_lock;
+       wait_queue_head_t rw_waitq;
+       atomic_t rw_completed;
+       atomic_t rw_acquired;
+       atomic_t rw_waiters;
+       atomic_t rw_release;
+} rw_priv_t;
+
+typedef struct rw_thr {
+       int rwt_id;
+       const char *rwt_name;
+       rw_priv_t *rwt_rwp;
+       int rwt_rc;
+} rw_thr_t;
+
+static inline void
+kzt_rwlock_sleep(signed long delay)
+{
+       set_current_state(TASK_INTERRUPTIBLE);
+       schedule_timeout(delay);
+}
+
+#define kzt_rwlock_lock_and_test(lock,test)    \
+({                                             \
+       int ret = 0;                            \
+                                               \
+       spin_lock(lock);                        \
+       ret = (test) ? 1 : 0;                   \
+       spin_unlock(lock);                      \
+       ret;                                    \
+})
+
+void kzt_init_rw_priv(rw_priv_t *rwv, struct file *file)
+{
+       rwv->rw_magic = KZT_RWLOCK_TEST_MAGIC;
+       rwv->rw_file = file;
+       spin_lock_init(&rwv->rw_priv_lock);
+       init_waitqueue_head(&rwv->rw_waitq);
+       atomic_set(&rwv->rw_completed, 0);
+       atomic_set(&rwv->rw_acquired, 0);
+       atomic_set(&rwv->rw_waiters, 0);
+       atomic_set(&rwv->rw_release, KZT_RWLOCK_RELEASE_INIT);
+       
+       /* Initialize the read/write lock */
+       rw_init(&rwv->rwl, KZT_RWLOCK_TEST_NAME, RW_DEFAULT, NULL);
+}
+
+int
+kzt_rwlock_test1_writer_thread(void *arg)
+{
+       rw_thr_t *rwt = (rw_thr_t *)arg;
+       rw_priv_t *rwv = rwt->rwt_rwp;
+       uint8_t rnd = 0;
+       char name[16];
+
+       ASSERT(rwv->rw_magic == KZT_RWLOCK_TEST_MAGIC);
+        snprintf(name, sizeof(name), "%s%d", 
+                KZT_RWLOCK_TEST_NAME, rwt->rwt_id);
+       daemonize(name);
+       get_random_bytes((void *)&rnd, 1);
+       kzt_rwlock_sleep(rnd * HZ / 1000);
+
+       spin_lock(&rwv->rw_priv_lock);
+       kzt_vprint(rwv->rw_file, rwt->rwt_name,
+                  "%s writer thread trying to acquire rwlock with "
+                  "%d holding lock and %d waiting\n",
+                  name, atomic_read(&rwv->rw_acquired),
+                  atomic_read(&rwv->rw_waiters));
+       atomic_inc(&rwv->rw_waiters);
+       spin_unlock(&rwv->rw_priv_lock);
+
+       /* Take the semaphore for writing 
+        * release it when we are told to */
+       rw_enter(&rwv->rwl, RW_WRITER);
+
+       spin_lock(&rwv->rw_priv_lock);
+       atomic_dec(&rwv->rw_waiters);
+       atomic_inc(&rwv->rw_acquired);
+       kzt_vprint(rwv->rw_file, rwt->rwt_name,
+                  "%s writer thread acquired rwlock with "
+                  "%d holding lock and %d waiting\n",
+                  name, atomic_read(&rwv->rw_acquired),
+                  atomic_read(&rwv->rw_waiters));
+       spin_unlock(&rwv->rw_priv_lock);
+
+       /* Wait here until the control thread
+        * says we can release the write lock */
+       wait_event_interruptible(rwv->rw_waitq,
+                                kzt_rwlock_lock_and_test(&rwv->rw_priv_lock,
+                                        atomic_read(&rwv->rw_release) ==
+                                        KZT_RWLOCK_RELEASE_WRITERS));
+       spin_lock(&rwv->rw_priv_lock);
+       atomic_inc(&rwv->rw_completed);
+       atomic_dec(&rwv->rw_acquired);
+       kzt_vprint(rwv->rw_file, rwt->rwt_name,
+                  "%s writer thread dropped rwlock with "
+                  "%d holding lock and %d waiting\n",
+                  name, atomic_read(&rwv->rw_acquired),
+                  atomic_read(&rwv->rw_waiters));
+       spin_unlock(&rwv->rw_priv_lock);
+
+       /* Release the semaphore */
+       rw_exit(&rwv->rwl);
+       return 0;
+}
+
+int
+kzt_rwlock_test1_reader_thread(void *arg)
+{
+       rw_thr_t *rwt = (rw_thr_t *)arg;
+       rw_priv_t *rwv = rwt->rwt_rwp;
+       uint8_t rnd = 0;
+       char name[16];
+
+       ASSERT(rwv->rw_magic == KZT_RWLOCK_TEST_MAGIC);
+        snprintf(name, sizeof(name), "%s%d",
+                KZT_RWLOCK_TEST_NAME, rwt->rwt_id);
+       daemonize(name);
+       get_random_bytes((void *)&rnd, 1);
+        kzt_rwlock_sleep(rnd * HZ / 1000);
+
+       /* Don't try and and take the semaphore until
+        * someone else has already acquired it */
+        wait_event_interruptible(rwv->rw_waitq,
+                                kzt_rwlock_lock_and_test(&rwv->rw_priv_lock,
+                                        atomic_read(&rwv->rw_acquired) > 0));
+
+       spin_lock(&rwv->rw_priv_lock);
+       kzt_vprint(rwv->rw_file, rwt->rwt_name,
+                  "%s reader thread trying to acquire rwlock with "
+                  "%d holding lock and %d waiting\n",
+                  name, atomic_read(&rwv->rw_acquired),
+                  atomic_read(&rwv->rw_waiters));
+       atomic_inc(&rwv->rw_waiters);
+       spin_unlock(&rwv->rw_priv_lock);
+
+       /* Take the semaphore for reading
+        * release it when we are told to */
+       rw_enter(&rwv->rwl, RW_READER);
+
+       spin_lock(&rwv->rw_priv_lock);
+       atomic_dec(&rwv->rw_waiters);
+       atomic_inc(&rwv->rw_acquired);
+       kzt_vprint(rwv->rw_file, rwt->rwt_name,
+                  "%s reader thread acquired rwlock with "
+                  "%d holding lock and %d waiting\n",
+                  name, atomic_read(&rwv->rw_acquired),
+                  atomic_read(&rwv->rw_waiters));
+       spin_unlock(&rwv->rw_priv_lock);
+
+       /* Wait here until the control thread
+         * says we can release the read lock */
+       wait_event_interruptible(rwv->rw_waitq,
+                                kzt_rwlock_lock_and_test(&rwv->rw_priv_lock,
+                                atomic_read(&rwv->rw_release) ==
+                                KZT_RWLOCK_RELEASE_READERS));
+
+       spin_lock(&rwv->rw_priv_lock);
+       atomic_inc(&rwv->rw_completed);
+       atomic_dec(&rwv->rw_acquired);
+       kzt_vprint(rwv->rw_file, rwt->rwt_name,
+                  "%s reader thread dropped rwlock with "
+                  "%d holding lock and %d waiting\n",
+                  name, atomic_read(&rwv->rw_acquired),
+                  atomic_read(&rwv->rw_waiters));
+       spin_unlock(&rwv->rw_priv_lock);
+
+       /* Release the semaphore */
+       rw_exit(&rwv->rwl);
+       return 0;
+}
+
+static int
+kzt_rwlock_test1(struct file *file, void *arg)
+{
+       int i, count = 0, rc = 0;
+       long pids[KZT_RWLOCK_TEST_COUNT];
+       rw_thr_t rwt[KZT_RWLOCK_TEST_COUNT];
+       rw_priv_t rwv;
+
+       /* Initialize private data 
+        * including the rwlock */
+       kzt_init_rw_priv(&rwv, file);
+
+       /* Create some threads, the exact number isn't important just as
+        * long as we know how many we managed to create and should expect. */
+       for (i = 0; i < KZT_RWLOCK_TEST_COUNT; i++) {
+               rwt[i].rwt_rwp = &rwv;
+               rwt[i].rwt_id = i;
+               rwt[i].rwt_name = KZT_RWLOCK_TEST1_NAME;
+               rwt[i].rwt_rc = 0;
+
+               /* The first thread will be a writer */
+               if (i == 0) {
+                       pids[i] = kernel_thread(kzt_rwlock_test1_writer_thread,
+                                               &rwt[i], 0);
+               } else {
+                       pids[i] = kernel_thread(kzt_rwlock_test1_reader_thread,
+                                               &rwt[i], 0);
+               }
+               
+               if (pids[i] >= 0) {
+                       count++;
+               }
+       }
+
+       /* Once the writer has the lock, release the readers */
+       while (kzt_rwlock_lock_and_test(&rwv.rw_priv_lock, atomic_read(&rwv.rw_acquired) <= 0)) {
+               kzt_rwlock_sleep(1 * HZ);
+       }
+       wake_up_interruptible(&rwv.rw_waitq);
+
+       /* Ensure that there is only 1 writer and all readers are waiting */
+       while (kzt_rwlock_lock_and_test(&rwv.rw_priv_lock, 
+                                       atomic_read(&rwv.rw_acquired) != 1 ||
+                                       atomic_read(&rwv.rw_waiters) !=
+                                       KZT_RWLOCK_TEST_COUNT - 1)) {
+
+               kzt_rwlock_sleep(1 * HZ);
+       }
+       /* Relase the writer */
+       spin_lock(&rwv.rw_priv_lock);
+       atomic_set(&rwv.rw_release, KZT_RWLOCK_RELEASE_WRITERS);
+       spin_unlock(&rwv.rw_priv_lock);
+       wake_up_interruptible(&rwv.rw_waitq);
+
+       /* Now ensure that there are multiple reader threads holding the lock */
+       while (kzt_rwlock_lock_and_test(&rwv.rw_priv_lock,
+              atomic_read(&rwv.rw_acquired) <= 1)) {
+               kzt_rwlock_sleep(1 * HZ);
+       }
+       /* Release the readers */
+       spin_lock(&rwv.rw_priv_lock);
+       atomic_set(&rwv.rw_release, KZT_RWLOCK_RELEASE_READERS);
+       spin_unlock(&rwv.rw_priv_lock);
+       wake_up_interruptible(&rwv.rw_waitq);
+
+       /* Wait for the test to complete */
+       while (kzt_rwlock_lock_and_test(&rwv.rw_priv_lock,
+              atomic_read(&rwv.rw_acquired) != 0 ||
+              atomic_read(&rwv.rw_waiters) != 0)) {
+               kzt_rwlock_sleep(1 * HZ);
+
+       }
+
+       rw_destroy(&rwv.rwl);
+       return rc;
+}
+
+int
+kzt_rwlock_test2_writer_thread(void *arg)
+{
+       rw_thr_t *rwt = (rw_thr_t *)arg;
+       rw_priv_t *rwv = rwt->rwt_rwp;
+       uint8_t rnd = 0;
+       char name[16];
+       
+       ASSERT(rwv->rw_magic == KZT_RWLOCK_TEST_MAGIC);
+       snprintf(name, sizeof(name), "%s%d",
+                KZT_RWLOCK_TEST_NAME, rwt->rwt_id);
+       daemonize(name);
+       get_random_bytes((void *)&rnd, 1);
+       kzt_rwlock_sleep(rnd * HZ / 1000);
+
+       /* Here just increment the waiters count even if we are not
+        * exactly about to call rw_enter().  Not really a big deal
+        * since more than likely will be true when we simulate work
+        * later on */
+       spin_lock(&rwv->rw_priv_lock);
+       kzt_vprint(rwv->rw_file, rwt->rwt_name,
+                  "%s writer thread trying to acquire rwlock with "
+                  "%d holding lock and %d waiting\n",
+                  name, atomic_read(&rwv->rw_acquired),
+                  atomic_read(&rwv->rw_waiters));
+       atomic_inc(&rwv->rw_waiters);
+       spin_unlock(&rwv->rw_priv_lock);
+
+       /* Wait here until the control thread
+        * says we can acquire the write lock */
+       wait_event_interruptible(rwv->rw_waitq,
+                                kzt_rwlock_lock_and_test(&rwv->rw_priv_lock,
+                                atomic_read(&rwv->rw_release) ==
+                                KZT_RWLOCK_RELEASE_WRITERS));
+       
+       /* Take the semaphore for writing */
+       rw_enter(&rwv->rwl, RW_WRITER);
+
+       spin_lock(&rwv->rw_priv_lock);
+       atomic_dec(&rwv->rw_waiters);
+       atomic_inc(&rwv->rw_acquired);
+       kzt_vprint(rwv->rw_file, rwt->rwt_name,
+                  "%s writer thread acquired rwlock with "
+                  "%d holding lock and %d waiting\n",
+                  name, atomic_read(&rwv->rw_acquired),
+                  atomic_read(&rwv->rw_waiters));
+       spin_unlock(&rwv->rw_priv_lock);
+
+       /* Give up the processor for a bit to simulate
+        * doing some work while taking the write lock */
+       kzt_rwlock_sleep(rnd * HZ / 1000);
+
+       /* Ensure that we are the only one writing */
+       if (atomic_read(&rwv->rw_acquired) > 1) {
+               rwt->rwt_rc = 1;
+       } else {
+               rwt->rwt_rc = 0;
+       }
+
+       spin_lock(&rwv->rw_priv_lock);
+       atomic_inc(&rwv->rw_completed);
+       atomic_dec(&rwv->rw_acquired);
+       kzt_vprint(rwv->rw_file, rwt->rwt_name,
+                  "%s writer thread dropped rwlock with "
+                  "%d holding lock and %d waiting\n",
+                  name, atomic_read(&rwv->rw_acquired),
+                  atomic_read(&rwv->rw_waiters));
+       spin_unlock(&rwv->rw_priv_lock);
+
+       rw_exit(&rwv->rwl);
+       
+
+       return 0;
+}
+
+static int
+kzt_rwlock_test2(struct file *file, void *arg)
+{
+       int i, count = 0, rc = 0;
+       long pids[KZT_RWLOCK_TEST_COUNT];
+       rw_thr_t rwt[KZT_RWLOCK_TEST_COUNT];
+       rw_priv_t rwv;
+
+       /* Initialize private data 
+        * including the rwlock */
+       kzt_init_rw_priv(&rwv, file);
+
+       /* Create some threads, the exact number isn't important just as
+        * long as we know how many we managed to create and should expect. */
+       for (i = 0; i < KZT_RWLOCK_TEST_COUNT; i++) {
+               rwt[i].rwt_rwp = &rwv;
+               rwt[i].rwt_id = i;
+               rwt[i].rwt_name = KZT_RWLOCK_TEST2_NAME;
+               rwt[i].rwt_rc = 0;
+
+               /* The first thread will be a writer */
+               pids[i] = kernel_thread(kzt_rwlock_test2_writer_thread,
+                                       &rwt[i], 0);
+
+               if (pids[i] >= 0) {
+                       count++;
+               }
+       }
+
+       /* Wait for writers to get queued up */
+       while (kzt_rwlock_lock_and_test(&rwv.rw_priv_lock,
+              atomic_read(&rwv.rw_waiters) < KZT_RWLOCK_TEST_COUNT)) {
+               kzt_rwlock_sleep(1 * HZ);
+       }
+       /* Relase the writers */
+       spin_lock(&rwv.rw_priv_lock);
+       atomic_set(&rwv.rw_release, KZT_RWLOCK_RELEASE_WRITERS);
+       spin_unlock(&rwv.rw_priv_lock);
+       wake_up_interruptible(&rwv.rw_waitq);
+
+       /* Wait for the test to complete */
+       while (kzt_rwlock_lock_and_test(&rwv.rw_priv_lock,
+              atomic_read(&rwv.rw_acquired) != 0 ||
+              atomic_read(&rwv.rw_waiters) != 0)) {
+               kzt_rwlock_sleep(1 * HZ);
+       }
+
+       /* If any of the write threads ever acquired the lock
+        * while another thread had it, make sure we return
+        * an error */
+       for (i = 0; i < KZT_RWLOCK_TEST_COUNT; i++) {
+               if (rwt[i].rwt_rc) {
+                       rc++;
+               }
+       }
+
+       rw_destroy(&rwv.rwl);
+       return rc;
+}
+
+static int
+kzt_rwlock_test3(struct file *file, void *arg)
+{
+       kthread_t *owner;
+       rw_priv_t rwv;
+       int rc = 0;
+
+       /* Initialize private data 
+        * including the rwlock */
+       kzt_init_rw_priv(&rwv, file);
+
+       /* Take the rwlock for writing */
+       rw_enter(&rwv.rwl, RW_WRITER);
+       owner = rw_owner(&rwv.rwl);
+       if (current != owner) {
+               kzt_vprint(file, KZT_RWLOCK_TEST3_NAME, "rwlock should "
+                          "be owned by pid %d but is owned by pid %d\n",
+                          current->pid, owner ? owner->pid : -1);
+               rc = -EINVAL;
+               goto out;
+       }
+
+       /* Release the rwlock */
+       rw_exit(&rwv.rwl);
+       owner = rw_owner(&rwv.rwl);
+       if (owner) {
+               kzt_vprint(file, KZT_RWLOCK_TEST3_NAME, "rwlock should not "
+                          "be owned but is owned by pid %d\n", owner->pid);
+               rc = -EINVAL;
+               goto out;
+       }
+
+       /* Take the rwlock for reading.
+        * Should not have an owner */
+       rw_enter(&rwv.rwl, RW_READER);
+       owner = rw_owner(&rwv.rwl);
+       if (owner) {
+               kzt_vprint(file, KZT_RWLOCK_TEST3_NAME, "rwlock should not "
+                          "be owned but is owned by pid %d\n", owner->pid);
+               /* Release the rwlock */
+               rw_exit(&rwv.rwl);
+               rc = -EINVAL;
+               goto out;
+       }
+
+       /* Release the rwlock */
+       rw_exit(&rwv.rwl);
+
+out:
+       rw_destroy(&rwv.rwl);
+       return rc;
+}
+
+int
+kzt_rwlock_test4_reader_thread(void *arg)
+{
+       rw_thr_t *rwt = (rw_thr_t *)arg;
+       rw_priv_t *rwv = rwt->rwt_rwp;
+       uint8_t rnd = 0;
+       char name[16];
+
+       ASSERT(rwv->rw_magic == KZT_RWLOCK_TEST_MAGIC);
+        snprintf(name, sizeof(name), "%s%d",
+                KZT_RWLOCK_TEST_NAME, rwt->rwt_id);
+       daemonize(name);
+       get_random_bytes((void *)&rnd, 1);
+        kzt_rwlock_sleep(rnd * HZ / 1000);
+
+       /* Don't try and and take the semaphore until
+        * someone else has already acquired it */
+        wait_event_interruptible(rwv->rw_waitq,
+                                kzt_rwlock_lock_and_test(&rwv->rw_priv_lock,
+                                atomic_read(&rwv->rw_acquired) > 0));
+
+       spin_lock(&rwv->rw_priv_lock);
+       kzt_vprint(rwv->rw_file, rwt->rwt_name,
+                  "%s reader thread trying to acquire rwlock with "
+                  "%d holding lock and %d waiting\n",
+                  name, atomic_read(&rwv->rw_acquired),
+                  atomic_read(&rwv->rw_waiters));
+       spin_unlock(&rwv->rw_priv_lock);
+
+       /* Take the semaphore for reading
+        * release it when we are told to */
+       rwt->rwt_rc = rw_tryenter(&rwv->rwl, RW_READER);
+
+       /* Here we acquired the lock this is a
+        * failure since the writer should be
+        * holding the lock */
+       if (rwt->rwt_rc == 1) {
+               spin_lock(&rwv->rw_priv_lock);
+               atomic_inc(&rwv->rw_acquired);
+               kzt_vprint(rwv->rw_file, rwt->rwt_name,
+                          "%s reader thread acquired rwlock with "
+                          "%d holding lock and %d waiting\n",
+                          name, atomic_read(&rwv->rw_acquired),
+                          atomic_read(&rwv->rw_waiters));
+               spin_unlock(&rwv->rw_priv_lock);
+               
+               spin_lock(&rwv->rw_priv_lock);
+               atomic_dec(&rwv->rw_acquired);
+               kzt_vprint(rwv->rw_file, rwt->rwt_name,
+                          "%s reader thread dropped rwlock with "
+                          "%d holding lock and %d waiting\n",
+                          name, atomic_read(&rwv->rw_acquired),
+                          atomic_read(&rwv->rw_waiters));
+               spin_unlock(&rwv->rw_priv_lock);
+               
+               /* Release the semaphore */
+               rw_exit(&rwv->rwl);
+       }
+       /* Here we know we didn't block and didn't
+        * acquire the rwlock for reading */
+       else {
+               spin_lock(&rwv->rw_priv_lock);
+               atomic_inc(&rwv->rw_completed);
+               kzt_vprint(rwv->rw_file, rwt->rwt_name,
+                          "%s reader thread could not acquire rwlock with "
+                          "%d holding lock and %d waiting\n",
+                          name, atomic_read(&rwv->rw_acquired),
+                          atomic_read(&rwv->rw_waiters));
+               spin_unlock(&rwv->rw_priv_lock);
+       }
+
+       return 0;
+}
+
+static int
+kzt_rwlock_test4(struct file *file, void *arg)
+{
+       int i, count = 0, rc = 0;
+       long pids[KZT_RWLOCK_TEST_COUNT];
+       rw_thr_t rwt[KZT_RWLOCK_TEST_COUNT];
+       rw_priv_t rwv;
+
+       /* Initialize private data 
+        * including the rwlock */
+       kzt_init_rw_priv(&rwv, file);
+
+       /* Create some threads, the exact number isn't important just as
+        * long as we know how many we managed to create and should expect. */
+       for (i = 0; i < KZT_RWLOCK_TEST_COUNT; i++) {
+               rwt[i].rwt_rwp = &rwv;
+               rwt[i].rwt_id = i;
+               rwt[i].rwt_name = KZT_RWLOCK_TEST4_NAME;
+               rwt[i].rwt_rc = 0;
+
+               /* The first thread will be a writer */
+               if (i == 0) {
+                       /* We can reuse the test1 writer thread here */
+                       pids[i] = kernel_thread(kzt_rwlock_test1_writer_thread,
+                                               &rwt[i], 0);
+               } else {
+                        pids[i] = kernel_thread(kzt_rwlock_test4_reader_thread,
+                                               &rwt[i], 0);
+               }
+
+               if (pids[i] >= 0) {
+                       count++;
+               }
+       }
+
+       /* Once the writer has the lock, release the readers */
+       while (kzt_rwlock_lock_and_test(&rwv.rw_priv_lock,
+              atomic_read(&rwv.rw_acquired) <= 0)) {
+               kzt_rwlock_sleep(1 * HZ);
+       }
+       wake_up_interruptible(&rwv.rw_waitq);
+
+       /* Make sure that the reader threads complete */
+       while (kzt_rwlock_lock_and_test(&rwv.rw_priv_lock,
+              atomic_read(&rwv.rw_completed) != KZT_RWLOCK_TEST_COUNT - 1)) {
+               kzt_rwlock_sleep(1 * HZ);
+       }
+       /* Release the writer */
+       spin_lock(&rwv.rw_priv_lock);
+       atomic_set(&rwv.rw_release, KZT_RWLOCK_RELEASE_WRITERS);
+       spin_unlock(&rwv.rw_priv_lock);
+       wake_up_interruptible(&rwv.rw_waitq);
+
+       /* Wait for the test to complete */
+       while (kzt_rwlock_lock_and_test(&rwv.rw_priv_lock,
+              atomic_read(&rwv.rw_acquired) != 0 ||
+              atomic_read(&rwv.rw_waiters) != 0)) {
+               kzt_rwlock_sleep(1 * HZ);
+       }
+
+       /* If any of the reader threads ever acquired the lock
+        * while another thread had it, make sure we return
+        * an error since the rw_tryenter() should have failed */
+       for (i = 0; i < KZT_RWLOCK_TEST_COUNT; i++) {
+               if (rwt[i].rwt_rc) {
+                       rc++;
+               }
+       }
+
+       rw_destroy(&rwv.rwl);
+       return rc;
+}
+
+static int
+kzt_rwlock_test5(struct file *file, void *arg)
+{
+       kthread_t *owner;
+       rw_priv_t rwv;
+       int rc = 0;
+
+       /* Initialize private data 
+        * including the rwlock */
+       kzt_init_rw_priv(&rwv, file);
+
+       /* Take the rwlock for writing */
+       rw_enter(&rwv.rwl, RW_WRITER);
+       owner = rw_owner(&rwv.rwl);
+       if (current != owner) {
+               kzt_vprint(file, KZT_RWLOCK_TEST5_NAME, "rwlock should "
+                          "be owned by pid %d but is owned by pid %d\n",
+                          current->pid, owner ? owner->pid : -1);
+               rc = -EINVAL;
+               goto out;
+       }
+
+       /* Make sure that the downgrade
+        * worked properly */
+       rw_downgrade(&rwv.rwl);
+
+       owner = rw_owner(&rwv.rwl);
+       if (owner) {
+               kzt_vprint(file, KZT_RWLOCK_TEST5_NAME, "rwlock should not "
+                          "be owned but is owned by pid %d\n", owner->pid);
+               /* Release the rwlock */
+               rw_exit(&rwv.rwl);
+               rc = -EINVAL;
+               goto out;
+       }
+
+       /* Release the rwlock */
+       rw_exit(&rwv.rwl);
+
+out:
+       rw_destroy(&rwv.rwl);
+       return rc;
+}
+
+static int
+kzt_rwlock_test6(struct file *file, void *arg)
+{
+       kthread_t *owner;
+       rw_priv_t rwv;
+       int rc = 0;
+
+       /* Initialize private data 
+        * including the rwlock */
+       kzt_init_rw_priv(&rwv, file);
+
+       /* Take the rwlock for reading */
+       rw_enter(&rwv.rwl, RW_READER);
+       owner = rw_owner(&rwv.rwl);
+       if (owner) {
+               kzt_vprint(file, KZT_RWLOCK_TEST6_NAME, "rwlock should not "
+                          "be owned but is owned by pid %d\n", owner->pid);
+               rc = -EINVAL;
+               goto out;
+       }
+
+       /* Make sure that the upgrade
+        * worked properly */
+       rc = !rw_tryupgrade(&rwv.rwl);
+
+       owner = rw_owner(&rwv.rwl);
+       if (rc || current != owner) {
+               kzt_vprint(file, KZT_RWLOCK_TEST6_NAME, "rwlock should "
+                          "be owned by pid %d but is owned by pid %d "
+                          "trylock rc %d\n",
+                          current->pid, owner ? owner->pid : -1, rc);
+               rc = -EINVAL;
+               goto out;
+       }
+
+       /* Release the rwlock */
+       rw_exit(&rwv.rwl);
+
+out:
+       rw_destroy(&rwv.rwl);
+       return rc;
+}
+
+kzt_subsystem_t *
+kzt_rwlock_init(void)
+{
+        kzt_subsystem_t *sub;
+
+        sub = kmalloc(sizeof(*sub), GFP_KERNEL);
+        if (sub == NULL)
+                return NULL;
+
+        memset(sub, 0, sizeof(*sub));
+        strncpy(sub->desc.name, KZT_RWLOCK_NAME, KZT_NAME_SIZE);
+        strncpy(sub->desc.desc, KZT_RWLOCK_DESC, KZT_DESC_SIZE);
+        INIT_LIST_HEAD(&sub->subsystem_list);
+        INIT_LIST_HEAD(&sub->test_list);
+        spin_lock_init(&sub->test_lock);
+        sub->desc.id = KZT_SUBSYSTEM_RWLOCK;
+
+        KZT_TEST_INIT(sub, KZT_RWLOCK_TEST1_NAME, KZT_RWLOCK_TEST1_DESC,
+                      KZT_RWLOCK_TEST1_ID, kzt_rwlock_test1);
+        KZT_TEST_INIT(sub, KZT_RWLOCK_TEST2_NAME, KZT_RWLOCK_TEST2_DESC,
+                      KZT_RWLOCK_TEST2_ID, kzt_rwlock_test2);
+        KZT_TEST_INIT(sub, KZT_RWLOCK_TEST3_NAME, KZT_RWLOCK_TEST3_DESC,
+                      KZT_RWLOCK_TEST3_ID, kzt_rwlock_test3);
+        KZT_TEST_INIT(sub, KZT_RWLOCK_TEST4_NAME, KZT_RWLOCK_TEST4_DESC,
+                      KZT_RWLOCK_TEST4_ID, kzt_rwlock_test4);
+        KZT_TEST_INIT(sub, KZT_RWLOCK_TEST5_NAME, KZT_RWLOCK_TEST5_DESC,
+                      KZT_RWLOCK_TEST5_ID, kzt_rwlock_test5);
+        KZT_TEST_INIT(sub, KZT_RWLOCK_TEST6_NAME, KZT_RWLOCK_TEST6_DESC,
+                      KZT_RWLOCK_TEST6_ID, kzt_rwlock_test6);
+
+        return sub;
+}
+
+void
+kzt_rwlock_fini(kzt_subsystem_t *sub)
+{
+        ASSERT(sub);
+        KZT_TEST_FINI(sub, KZT_RWLOCK_TEST6_ID);
+        KZT_TEST_FINI(sub, KZT_RWLOCK_TEST5_ID);
+        KZT_TEST_FINI(sub, KZT_RWLOCK_TEST4_ID);
+        KZT_TEST_FINI(sub, KZT_RWLOCK_TEST3_ID);
+        KZT_TEST_FINI(sub, KZT_RWLOCK_TEST2_ID);
+        KZT_TEST_FINI(sub, KZT_RWLOCK_TEST1_ID);
+        kfree(sub);
+}
+
+int
+kzt_rwlock_id(void) {
+        return KZT_SUBSYSTEM_RWLOCK;
+}
diff --git a/modules/splat/splat-taskq.c b/modules/splat/splat-taskq.c
new file mode 100644 (file)
index 0000000..3d5c075
--- /dev/null
@@ -0,0 +1,237 @@
+#include <splat-ctl.h>
+
+#define KZT_SUBSYSTEM_TASKQ            0x0200
+#define KZT_TASKQ_NAME                 "taskq"
+#define KZT_TASKQ_DESC                 "Kernel Task Queue Tests"
+
+#define KZT_TASKQ_TEST1_ID             0x0201
+#define KZT_TASKQ_TEST1_NAME           "single"
+#define KZT_TASKQ_TEST1_DESC           "Single task queue, single task"
+
+#define KZT_TASKQ_TEST2_ID              0x0202
+#define KZT_TASKQ_TEST2_NAME           "multiple"
+#define KZT_TASKQ_TEST2_DESC           "Multiple task queues, multiple tasks"
+
+typedef struct kzt_taskq_arg {
+       int flag;
+       int id;
+       struct file *file;
+       const char *name;
+} kzt_taskq_arg_t;
+
+/* Validation Test 1 - Create a taskq, queue a task, wait until
+ * task completes, ensure task ran properly, cleanup taskq,
+ */
+static void
+kzt_taskq_test1_func(void *arg)
+{
+       kzt_taskq_arg_t *tq_arg = (kzt_taskq_arg_t *)arg;
+
+       ASSERT(tq_arg);
+       kzt_vprint(tq_arg->file, KZT_TASKQ_TEST1_NAME,
+                  "Taskq '%s' function '%s' setting flag\n",
+                  tq_arg->name, sym2str(kzt_taskq_test1_func));
+       tq_arg->flag = 1;
+}
+
+static int
+kzt_taskq_test1(struct file *file, void *arg)
+{
+       taskq_t *tq;
+       taskqid_t id;
+       kzt_taskq_arg_t tq_arg;
+
+       kzt_vprint(file, KZT_TASKQ_TEST1_NAME, "Taskq '%s' creating\n",
+                  KZT_TASKQ_TEST1_NAME);
+       if ((tq = taskq_create(KZT_TASKQ_TEST1_NAME, 1, 0, 0, 0, 0)) == NULL) {
+               kzt_vprint(file, KZT_TASKQ_TEST1_NAME,
+                          "Taskq '%s' create failed\n",
+                          KZT_TASKQ_TEST1_NAME);
+               return -EINVAL;
+       }
+
+       tq_arg.flag = 0;
+       tq_arg.id   = 0;
+       tq_arg.file = file;
+       tq_arg.name = KZT_TASKQ_TEST1_NAME;
+
+       kzt_vprint(file, KZT_TASKQ_TEST1_NAME,
+                  "Taskq '%s' function '%s' dispatching\n",
+                  tq_arg.name, sym2str(kzt_taskq_test1_func));
+       if ((id = taskq_dispatch(tq, kzt_taskq_test1_func, &tq_arg, 0)) == 0) {
+               kzt_vprint(file, KZT_TASKQ_TEST1_NAME,
+                          "Taskq '%s' function '%s' dispatch failed\n",
+                          tq_arg.name, sym2str(kzt_taskq_test1_func));
+               taskq_destory(tq);
+               return -EINVAL;
+       }
+
+       kzt_vprint(file, KZT_TASKQ_TEST1_NAME, "Taskq '%s' waiting\n",
+                  tq_arg.name);
+       taskq_wait(tq);
+       kzt_vprint(file, KZT_TASKQ_TEST1_NAME, "Taskq '%s' destroying\n",
+                  tq_arg.name);
+       taskq_destory(tq);
+
+       return (tq_arg.flag) ? 0 : -EINVAL;
+}
+
+/* Validation Test 2 - Create multiple taskq's, each with multiple tasks,
+ * wait until all tasks complete, ensure all tasks ran properly and in the
+ * the correct order, cleanup taskq's
+ */
+static void
+kzt_taskq_test2_func1(void *arg)
+{
+       kzt_taskq_arg_t *tq_arg = (kzt_taskq_arg_t *)arg;
+
+       ASSERT(tq_arg);
+       kzt_vprint(tq_arg->file, KZT_TASKQ_TEST2_NAME,
+                  "Taskq '%s/%d' function '%s' flag = %d = %d * 2\n",
+                  tq_arg->name, tq_arg->id,
+                  sym2str(kzt_taskq_test2_func1),
+                  tq_arg->flag * 2, tq_arg->flag);
+       tq_arg->flag *= 2;
+}
+
+static void
+kzt_taskq_test2_func2(void *arg)
+{
+       kzt_taskq_arg_t *tq_arg = (kzt_taskq_arg_t *)arg;
+
+       ASSERT(tq_arg);
+       kzt_vprint(tq_arg->file, KZT_TASKQ_TEST2_NAME,
+                  "Taskq '%s/%d' function '%s' flag = %d = %d + 1\n",
+                  tq_arg->name, tq_arg->id,
+                  sym2str(kzt_taskq_test2_func2),
+                  tq_arg->flag + 1, tq_arg->flag);
+       tq_arg->flag += 1;
+}
+
+#define TEST2_TASKQS                    8
+static int
+kzt_taskq_test2(struct file *file, void *arg) {
+       taskq_t *tq[TEST2_TASKQS] = { NULL };
+       taskqid_t id;
+       kzt_taskq_arg_t tq_args[TEST2_TASKQS];
+       int i, rc = 0;
+
+       for (i = 0; i < TEST2_TASKQS; i++) {
+
+               kzt_vprint(file, KZT_TASKQ_TEST2_NAME, "Taskq '%s/%d' "
+                          "creating\n", KZT_TASKQ_TEST2_NAME, i);
+               if ((tq[i] = taskq_create(KZT_TASKQ_TEST2_NAME,
+                                         1, 0, 0, 0, 0)) == NULL) {
+                       kzt_vprint(file, KZT_TASKQ_TEST2_NAME,
+                                  "Taskq '%s/%d' create failed\n",
+                                  KZT_TASKQ_TEST2_NAME, i);
+                       rc = -EINVAL;
+                       break;
+               }
+
+               tq_args[i].flag = i;
+               tq_args[i].id   = i;
+               tq_args[i].file = file;
+               tq_args[i].name = KZT_TASKQ_TEST2_NAME;
+
+               kzt_vprint(file, KZT_TASKQ_TEST2_NAME,
+                          "Taskq '%s/%d' function '%s' dispatching\n",
+                          tq_args[i].name, tq_args[i].id,
+                          sym2str(kzt_taskq_test2_func1));
+               if ((id = taskq_dispatch(
+                    tq[i], kzt_taskq_test2_func1, &tq_args[i], 0)) == 0) {
+                       kzt_vprint(file, KZT_TASKQ_TEST2_NAME,
+                                  "Taskq '%s/%d' function '%s' dispatch "
+                                  "failed\n", tq_args[i].name, tq_args[i].id,
+                                  sym2str(kzt_taskq_test2_func1));
+                       rc = -EINVAL;
+                       break;
+               }
+
+               kzt_vprint(file, KZT_TASKQ_TEST2_NAME,
+                          "Taskq '%s/%d' function '%s' dispatching\n",
+                          tq_args[i].name, tq_args[i].id,
+                          sym2str(kzt_taskq_test2_func2));
+               if ((id = taskq_dispatch(
+                    tq[i], kzt_taskq_test2_func2, &tq_args[i], 0)) == 0) {
+                       kzt_vprint(file, KZT_TASKQ_TEST2_NAME,
+                                  "Taskq '%s/%d' function '%s' dispatch failed\n",
+                                  tq_args[i].name, tq_args[i].id,
+                                  sym2str(kzt_taskq_test2_func2));
+                       rc = -EINVAL;
+                       break;
+               }
+       }
+
+       /* When rc is set we're effectively just doing cleanup here, so
+        * ignore new errors in that case.  They just cause noise. */
+       for (i = 0; i < TEST2_TASKQS; i++) {
+               if (tq[i] != NULL) {
+                       kzt_vprint(file, KZT_TASKQ_TEST2_NAME,
+                                  "Taskq '%s/%d' waiting\n",
+                                  tq_args[i].name, tq_args[i].id);
+                       taskq_wait(tq[i]);
+                       kzt_vprint(file, KZT_TASKQ_TEST2_NAME,
+                                  "Taskq '%s/%d; destroying\n",
+                                 tq_args[i].name, tq_args[i].id);
+                       taskq_destory(tq[i]);
+
+                       if (!rc && tq_args[i].flag != ((i * 2) + 1)) {
+                               kzt_vprint(file, KZT_TASKQ_TEST2_NAME,
+                                          "Taskq '%s/%d' processed tasks "
+                                          "out of order; %d != %d\n",
+                                          tq_args[i].name, tq_args[i].id,
+                                          tq_args[i].flag, i * 2 + 1);
+                               rc = -EINVAL;
+                       } else {
+                               kzt_vprint(file, KZT_TASKQ_TEST2_NAME,
+                                          "Taskq '%s/%d' processed tasks "
+                                          "in the correct order; %d == %d\n",
+                                          tq_args[i].name, tq_args[i].id,
+                                          tq_args[i].flag, i * 2 + 1);
+                       }
+               }
+       }
+
+       return rc;
+}
+
+kzt_subsystem_t *
+kzt_taskq_init(void)
+{
+        kzt_subsystem_t *sub;
+
+        sub = kmalloc(sizeof(*sub), GFP_KERNEL);
+        if (sub == NULL)
+                return NULL;
+
+        memset(sub, 0, sizeof(*sub));
+        strncpy(sub->desc.name, KZT_TASKQ_NAME, KZT_NAME_SIZE);
+        strncpy(sub->desc.desc, KZT_TASKQ_DESC, KZT_DESC_SIZE);
+        INIT_LIST_HEAD(&sub->subsystem_list);
+       INIT_LIST_HEAD(&sub->test_list);
+       spin_lock_init(&sub->test_lock);
+        sub->desc.id = KZT_SUBSYSTEM_TASKQ;
+
+       KZT_TEST_INIT(sub, KZT_TASKQ_TEST1_NAME, KZT_TASKQ_TEST1_DESC,
+                     KZT_TASKQ_TEST1_ID, kzt_taskq_test1);
+       KZT_TEST_INIT(sub, KZT_TASKQ_TEST2_NAME, KZT_TASKQ_TEST2_DESC,
+                     KZT_TASKQ_TEST2_ID, kzt_taskq_test2);
+
+        return sub;
+}
+
+void
+kzt_taskq_fini(kzt_subsystem_t *sub)
+{
+        ASSERT(sub);
+       KZT_TEST_FINI(sub, KZT_TASKQ_TEST2_ID);
+       KZT_TEST_FINI(sub, KZT_TASKQ_TEST1_ID);
+
+        kfree(sub);
+}
+
+int
+kzt_taskq_id(void) {
+        return KZT_SUBSYSTEM_TASKQ;
+}
diff --git a/modules/splat/splat-thread.c b/modules/splat/splat-thread.c
new file mode 100644 (file)
index 0000000..34260c2
--- /dev/null
@@ -0,0 +1,115 @@
+#include <splat-ctl.h>
+
+#define KZT_SUBSYSTEM_THREAD           0x0600
+#define KZT_THREAD_NAME                        "thread"
+#define KZT_THREAD_DESC                        "Kernel Thread Tests"
+
+#define KZT_THREAD_TEST1_ID            0x0601
+#define KZT_THREAD_TEST1_NAME          "create"
+#define KZT_THREAD_TEST1_DESC          "Validate thread creation and destruction"
+
+#define KZT_THREAD_TEST_MAGIC            0x4488CC00UL
+
+typedef struct thread_priv {
+        unsigned long tp_magic;
+        struct file *tp_file;
+        spinlock_t tp_lock;
+        wait_queue_head_t tp_waitq;
+       int tp_rc;
+} thread_priv_t;
+
+
+static void
+kzt_thread_work(void *priv)
+{
+       thread_priv_t *tp = (thread_priv_t *)priv;
+
+       spin_lock(&tp->tp_lock);
+       ASSERT(tp->tp_magic == KZT_THREAD_TEST_MAGIC);
+       tp->tp_rc = 1;
+
+       spin_unlock(&tp->tp_lock);
+       wake_up(&tp->tp_waitq);
+
+       thread_exit();
+}
+
+static int
+kzt_thread_test1(struct file *file, void *arg)
+{
+       thread_priv_t tp;
+        DEFINE_WAIT(wait);
+       kthread_t *thr;
+       int rc = 0;
+
+       tp.tp_magic = KZT_THREAD_TEST_MAGIC;
+       tp.tp_file = file;
+        spin_lock_init(&tp.tp_lock);
+       init_waitqueue_head(&tp.tp_waitq);
+       tp.tp_rc = 0;
+
+       spin_lock(&tp.tp_lock);
+
+       thr = (kthread_t *)thread_create(NULL, 0, kzt_thread_work, &tp, 0,
+                                        (proc_t *) &p0, TS_RUN, minclsyspri);
+       /* Must never fail under Solaris, but we check anyway so we can
+        * report an error when this impossible thing happens */
+       if (thr == NULL) {
+               rc = -ESRCH;
+               goto out;
+       }
+
+        for (;;) {
+                prepare_to_wait(&tp.tp_waitq, &wait, TASK_UNINTERRUPTIBLE);
+                if (tp.tp_rc)
+                        break;
+
+                spin_unlock(&tp.tp_lock);
+                schedule();
+                spin_lock(&tp.tp_lock);
+        }
+
+        kzt_vprint(file, KZT_THREAD_TEST1_NAME, "%s",
+                  "Thread successfully started and exited cleanly\n");
+out:
+       spin_unlock(&tp.tp_lock);
+
+       return rc;
+}
+
+kzt_subsystem_t *
+kzt_thread_init(void)
+{
+        kzt_subsystem_t *sub;
+
+        sub = kmalloc(sizeof(*sub), GFP_KERNEL);
+        if (sub == NULL)
+                return NULL;
+
+        memset(sub, 0, sizeof(*sub));
+        strncpy(sub->desc.name, KZT_THREAD_NAME, KZT_NAME_SIZE);
+        strncpy(sub->desc.desc, KZT_THREAD_DESC, KZT_DESC_SIZE);
+        INIT_LIST_HEAD(&sub->subsystem_list);
+        INIT_LIST_HEAD(&sub->test_list);
+        spin_lock_init(&sub->test_lock);
+        sub->desc.id = KZT_SUBSYSTEM_THREAD;
+
+        KZT_TEST_INIT(sub, KZT_THREAD_TEST1_NAME, KZT_THREAD_TEST1_DESC,
+                      KZT_THREAD_TEST1_ID, kzt_thread_test1);
+
+        return sub;
+}
+
+void
+kzt_thread_fini(kzt_subsystem_t *sub)
+{
+        ASSERT(sub);
+        KZT_TEST_FINI(sub, KZT_THREAD_TEST1_ID);
+
+        kfree(sub);
+}
+
+int
+kzt_thread_id(void) {
+        return KZT_SUBSYSTEM_THREAD;
+}
diff --git a/modules/splat/splat-time.c b/modules/splat/splat-time.c
new file mode 100644 (file)
index 0000000..3e8007a
--- /dev/null
@@ -0,0 +1,89 @@
+#include <splat-ctl.h>
+
+#define KZT_SUBSYSTEM_TIME             0x0800
+#define KZT_TIME_NAME                  "time"
+#define KZT_TIME_DESC                  "Kernel Time Tests"
+
+#define KZT_TIME_TEST1_ID              0x0801
+#define KZT_TIME_TEST1_NAME            "time1"
+#define KZT_TIME_TEST1_DESC            "HZ Test"
+
+#define KZT_TIME_TEST2_ID              0x0802
+#define KZT_TIME_TEST2_NAME            "time2"
+#define KZT_TIME_TEST2_DESC            "Monotonic Test"
+
+static int
+kzt_time_test1(struct file *file, void *arg)
+{
+       int myhz = hz;
+       kzt_vprint(file, KZT_TIME_TEST1_NAME, "hz is %d\n", myhz);
+        return 0;
+}
+
+static int
+kzt_time_test2(struct file *file, void *arg)
+{
+        hrtime_t tm1, tm2;
+       int i;
+
+        tm1 = gethrtime();
+        kzt_vprint(file, KZT_TIME_TEST2_NAME, "time is %lld\n", tm1);
+
+        for(i = 0; i < 100; i++) {
+                tm2 = gethrtime();
+                kzt_vprint(file, KZT_TIME_TEST2_NAME, "time is %lld\n", tm2);
+
+                if(tm1 > tm2) {
+                        kzt_print(file, "%s: gethrtime() is not giving monotonically increasing values\n", KZT_TIME_TEST2_NAME);
+                        return 1;
+                }
+                tm1 = tm2;
+
+                set_current_state(TASK_INTERRUPTIBLE);
+                schedule_timeout(10);
+        }
+
+        return 0;
+}
+
+kzt_subsystem_t *
+kzt_time_init(void)
+{
+        kzt_subsystem_t *sub;
+
+        sub = kmalloc(sizeof(*sub), GFP_KERNEL);
+        if (sub == NULL)
+                return NULL;
+
+        memset(sub, 0, sizeof(*sub));
+        strncpy(sub->desc.name, KZT_TIME_NAME, KZT_NAME_SIZE);
+       strncpy(sub->desc.desc, KZT_TIME_DESC, KZT_DESC_SIZE);
+        INIT_LIST_HEAD(&sub->subsystem_list);
+       INIT_LIST_HEAD(&sub->test_list);
+        spin_lock_init(&sub->test_lock);
+        sub->desc.id = KZT_SUBSYSTEM_TIME;
+
+        KZT_TEST_INIT(sub, KZT_TIME_TEST1_NAME, KZT_TIME_TEST1_DESC,
+                     KZT_TIME_TEST1_ID, kzt_time_test1);
+        KZT_TEST_INIT(sub, KZT_TIME_TEST2_NAME, KZT_TIME_TEST2_DESC,
+                     KZT_TIME_TEST2_ID, kzt_time_test2);
+
+        return sub;
+}
+
+void
+kzt_time_fini(kzt_subsystem_t *sub)
+{
+        ASSERT(sub);
+
+        KZT_TEST_FINI(sub, KZT_TIME_TEST2_ID);
+        KZT_TEST_FINI(sub, KZT_TIME_TEST1_ID);
+
+        kfree(sub);
+}
+
+int
+kzt_time_id(void)
+{
+        return KZT_SUBSYSTEM_TIME;
+}
diff --git a/spl/Makefile.in b/spl/Makefile.in
deleted file mode 100644 (file)
index 82b45a2..0000000
+++ /dev/null
@@ -1,44 +0,0 @@
-# Makefile.in for spl kernel module
-
-MODULES := spl
-DISTFILES = Makefile.in \
-            linux-kmem.c linux-rwlock.c linux-taskq.c \
-            linux-thread.c linux-generic.c
-CPPFLAGS += @KERNELCPPFLAGS@
-
-# Solaris porting layer module
-obj-m := spl.o
-
-spl-objs += linux-kmem.o
-spl-objs += linux-thread.o
-spl-objs += linux-taskq.o
-spl-objs += linux-rwlock.o
-
-splmodule := spl.ko
-splmoduledir := @kmoduledir@/kernel/lib/
-
-all: all-spec
-
-install: all
-       mkdir -p $(DESTDIR)$(splmoduledir)
-       $(INSTALL) -m 644 $(splmodule) $(DESTDIR)$(splmoduledir)/$(splmodule)
-       -/sbin/depmod -a
-
-uninstall:
-       rm -f $(DESTDIR)$(splmoduledir)/$(splmodule)
-       -/sbin/depmod -a
-
-clean:
-       -rm -f $(splmodule) *.o .*.cmd *.mod.c *.ko *.s */*.o
-
-distclean: clean
-       rm -f Makefile
-       rm -rf .tmp_versions
-
-maintainer-clean: distclean
-
-distdir: $(DISTFILES)
-       cp -p $(DISTFILES) $(distdir)
-
-all-spec:
-       $(MAKE) -C @kernelsrc@ SUBDIRS=`pwd` @KERNELMAKE_PARAMS@ modules
diff --git a/spl/linux-generic.c b/spl/linux-generic.c
deleted file mode 100644 (file)
index fa1ebab..0000000
+++ /dev/null
@@ -1,8 +0,0 @@
-#include "linux-generic.h"
-
-/*
- * Generic support
- */
-
-int p0 = 0;
-EXPORT_SYMBOL(p0);
diff --git a/spl/linux-kmem.c b/spl/linux-kmem.c
deleted file mode 100644 (file)
index 4dc7c01..0000000
+++ /dev/null
@@ -1,251 +0,0 @@
-#include "linux-kmem.h"
-
-/*
- * Memory allocation interfaces
- */
-#ifdef DEBUG_KMEM
-/* Shim layer memory accounting */
-atomic_t kmem_alloc_used;
-unsigned int kmem_alloc_max;
-#endif
-
-/*
- * Slab allocation interfaces
- *
- * While the linux slab implementation was inspired by solaris they
- * have made some changes to the API which complicates this shim
- * layer.  For one thing the same symbol names are used with different
- * arguments for the prototypes.  To deal with this we must use the
- * preprocessor to re-order arguments.  Happily for us standard C says,
- * "Macro's appearing in their own expansion are not reexpanded" so
- * this does not result in an infinite recursion.  Additionally the
- * function pointers registered by solarias differ from those used
- * by linux so a lookup and mapping from linux style callback to a
- * solaris style callback is needed.  There is some overhead in this
- * operation which isn't horibile but it needs to be kept in mind.
- */
-typedef struct kmem_cache_cb {
-        struct list_head    kcc_list;
-        kmem_cache_t *      kcc_cache;
-        kmem_constructor_t  kcc_constructor;
-        kmem_destructor_t   kcc_destructor;
-        kmem_reclaim_t      kcc_reclaim;
-        void *              kcc_private;
-        void *              kcc_vmp;
-} kmem_cache_cb_t;
-
-
-static spinlock_t kmem_cache_cb_lock = SPIN_LOCK_UNLOCKED;
-//static spinlock_t kmem_cache_cb_lock = (spinlock_t) { 1 SPINLOCK_MAGIC_INIT };
-static LIST_HEAD(kmem_cache_cb_list);
-static struct shrinker *kmem_cache_shrinker;
-
-/* Function must be called while holding the kmem_cache_cb_lock
- * Because kmem_cache_t is an opaque datatype we're forced to
- * match pointers to identify specific cache entires.
- */
-static kmem_cache_cb_t *
-kmem_cache_find_cache_cb(kmem_cache_t *cache)
-{
-        kmem_cache_cb_t *kcc;
-
-        list_for_each_entry(kcc, &kmem_cache_cb_list, kcc_list)
-               if (cache == kcc->kcc_cache)
-                        return kcc;
-
-        return NULL;
-}
-
-static kmem_cache_cb_t *
-kmem_cache_add_cache_cb(kmem_cache_t *cache,
-                       kmem_constructor_t constructor,
-                        kmem_destructor_t destructor,
-                       kmem_reclaim_t reclaim,
-                        void *priv, void *vmp)
-{
-        kmem_cache_cb_t *kcc;
-
-        kcc = (kmem_cache_cb_t *)kmalloc(sizeof(*kcc), GFP_KERNEL);
-        if (kcc) {
-               kcc->kcc_cache = cache;
-                kcc->kcc_constructor = constructor;
-                kcc->kcc_destructor = destructor;
-                kcc->kcc_reclaim = reclaim;
-                kcc->kcc_private = priv;
-                kcc->kcc_vmp = vmp;
-               spin_lock(&kmem_cache_cb_lock);
-                list_add(&kcc->kcc_list, &kmem_cache_cb_list);
-               spin_unlock(&kmem_cache_cb_lock);
-        }
-
-        return kcc;
-}
-
-static void
-kmem_cache_remove_cache_cb(kmem_cache_cb_t *kcc)
-{
-       spin_lock(&kmem_cache_cb_lock);
-        list_del(&kcc->kcc_list);
-       spin_unlock(&kmem_cache_cb_lock);
-
-       if (kcc)
-              kfree(kcc);
-}
-
-static void
-kmem_cache_generic_constructor(void *ptr, kmem_cache_t *cache, unsigned long flags)
-{
-        kmem_cache_cb_t *kcc;
-
-       spin_lock(&kmem_cache_cb_lock);
-
-        /* Callback list must be in sync with linux slab caches */
-        kcc = kmem_cache_find_cache_cb(cache);
-        BUG_ON(!kcc);
-
-       kcc->kcc_constructor(ptr, kcc->kcc_private, (int)flags);
-       spin_unlock(&kmem_cache_cb_lock);
-       /* Linux constructor has no return code, silently eat it */
-}
-
-static void
-kmem_cache_generic_destructor(void *ptr, kmem_cache_t *cache, unsigned long flags)
-{
-        kmem_cache_cb_t *kcc;
-
-       spin_lock(&kmem_cache_cb_lock);
-
-        /* Callback list must be in sync with linux slab caches */
-        kcc = kmem_cache_find_cache_cb(cache);
-        BUG_ON(!kcc);
-
-       /* Solaris destructor takes no flags, silently eat them */
-       kcc->kcc_destructor(ptr, kcc->kcc_private);
-       spin_unlock(&kmem_cache_cb_lock);
-}
-
-/* XXX - Arguments are ignored */
-static int
-kmem_cache_generic_shrinker(int nr_to_scan, unsigned int gfp_mask)
-{
-        kmem_cache_cb_t *kcc;
-        int total = 0;
-
-       /* Under linux a shrinker is not tightly coupled with a slab
-        * cache.  In fact linux always systematically trys calling all
-        * registered shrinker callbacks until its target reclamation level
-        * is reached.  Because of this we only register one shrinker
-        * function in the shim layer for all slab caches.  And we always
-        * attempt to shrink all caches when this generic shrinker is called.
-        */
-       spin_lock(&kmem_cache_cb_lock);
-
-        list_for_each_entry(kcc, &kmem_cache_cb_list, kcc_list) {
-               /* Under linux the desired number and gfp type of objects
-                * is passed to the reclaiming function as a sugested reclaim
-                * target.  I do not pass these args on because reclaim
-                * policy is entirely up to the owner under solaris.  We only
-                * pass on the pre-registered private data.
-                 */
-               if (kcc->kcc_reclaim)
-                        kcc->kcc_reclaim(kcc->kcc_private);
-
-               total += 1;
-        }
-
-       /* Under linux we should return the remaining number of entires in
-        * the cache.  Unfortunately, I don't see an easy way to safely
-        * emulate this behavior so I'm returning one entry per cache which
-        * was registered with the generic shrinker.  This should fake out
-        * the linux VM when it attempts to shrink caches.
-        */
-       spin_unlock(&kmem_cache_cb_lock);
-       return total;
-}
-
-/* Ensure the __kmem_cache_create/__kmem_cache_destroy macros are
- * removed here to prevent a recursive substitution, we want to call
- * the native linux version.
- */
-#undef kmem_cache_create
-#undef kmem_cache_destroy
-
-kmem_cache_t *
-__kmem_cache_create(char *name, size_t size, size_t align,
-        kmem_constructor_t constructor,
-       kmem_destructor_t destructor,
-       kmem_reclaim_t reclaim,
-        void *priv, void *vmp, int flags)
-{
-        kmem_cache_t *cache;
-        kmem_cache_cb_t *kcc;
-       int shrinker_flag = 0;
-
-        /* FIXME: - Option currently unsupported by shim layer */
-        BUG_ON(vmp);
-
-        cache = kmem_cache_create(name, size, align, flags,
-                                  kmem_cache_generic_constructor,
-                                  kmem_cache_generic_destructor);
-       if (cache == NULL)
-                return NULL;
-
-        /* Register shared shrinker function on initial cache create */
-       spin_lock(&kmem_cache_cb_lock);
-       if (list_empty(&kmem_cache_cb_list)) {
-                kmem_cache_shrinker = set_shrinker(KMC_DEFAULT_SEEKS,
-                                                 kmem_cache_generic_shrinker);
-                if (kmem_cache_shrinker == NULL) {
-                        kmem_cache_destroy(cache);
-                       spin_unlock(&kmem_cache_cb_lock);
-                        return NULL;
-                }
-
-        }
-       spin_unlock(&kmem_cache_cb_lock);
-
-        kcc = kmem_cache_add_cache_cb(cache, constructor, destructor,
-                                      reclaim, priv, vmp);
-        if (kcc == NULL) {
-               if (shrinker_flag) /* New shrinker registered must be removed */
-                       remove_shrinker(kmem_cache_shrinker);
-
-                kmem_cache_destroy(cache);
-                return NULL;
-        }
-
-        return cache;
-}
-EXPORT_SYMBOL(__kmem_cache_create);
-
-/* Return codes discarded because Solaris implementation has void return */
-void
-__kmem_cache_destroy(kmem_cache_t *cache)
-{
-        kmem_cache_cb_t *kcc;
-
-       spin_lock(&kmem_cache_cb_lock);
-        kcc = kmem_cache_find_cache_cb(cache);
-       spin_unlock(&kmem_cache_cb_lock);
-        if (kcc == NULL)
-                return;
-
-        kmem_cache_destroy(cache);
-        kmem_cache_remove_cache_cb(kcc);
-
-       /* Unregister generic shrinker on removal of all caches */
-       spin_lock(&kmem_cache_cb_lock);
-       if (list_empty(&kmem_cache_cb_list))
-                remove_shrinker(kmem_cache_shrinker);
-
-       spin_unlock(&kmem_cache_cb_lock);
-}
-EXPORT_SYMBOL(__kmem_cache_destroy);
-
-void
-__kmem_reap(void) {
-       /* Since there's no easy hook in to linux to force all the registered
-        * shrinkers to run we just run the ones registered for this shim */
-       kmem_cache_generic_shrinker(KMC_REAP_CHUNK, GFP_KERNEL);
-}
-EXPORT_SYMBOL(__kmem_reap);
diff --git a/spl/linux-rwlock.c b/spl/linux-rwlock.c
deleted file mode 100644 (file)
index 24775c4..0000000
+++ /dev/null
@@ -1,41 +0,0 @@
-#include <linux-rwlock.h>
-
-int
-rw_lock_held(krwlock_t *rwlp)
-{
-       BUG_ON(rwlp->rw_magic != RW_MAGIC);
-
-#ifdef CONFIG_RWSEM_GENERIC_SPINLOCK
-       if (rwlp->rw_sem.activity != 0) {
-#else
-       if (rwlp->rw_sem.count != 0) {
-#endif
-               return 1;
-       }
-
-       return 0;
-}
-
-int
-rw_read_held(krwlock_t *rwlp)
-{
-       BUG_ON(rwlp->rw_magic != RW_MAGIC);
-
-       if (rw_lock_held(rwlp) && rwlp->rw_owner == NULL) {
-               return 1;
-       }
-
-       return 0;
-}
-
-int
-rw_write_held(krwlock_t *rwlp)
-{
-       BUG_ON(rwlp->rw_magic != RW_MAGIC);
-
-       if (rwlp->rw_owner == current) {
-               return 1;
-       }
-
-       return 0;
-}
diff --git a/spl/linux-taskq.c b/spl/linux-taskq.c
deleted file mode 100644 (file)
index ddcf57c..0000000
+++ /dev/null
@@ -1,80 +0,0 @@
-#include <linux-taskq.h>
-
-/*
- * Task queue interface
- *
- * The taskq_work_wrapper functions are used to manage the work_structs
- * which must be submitted to linux.  The shim layer allocates a wrapper
- * structure for all items which contains a pointer to itself as well as
- * the real work to be performed.  When the work item run the generic
- * handle is called which calls the real work function and then using
- * the self pointer frees the work_struct.
- */
-typedef struct taskq_work_wrapper {
-        struct work_struct tww_work;
-        task_func_t        tww_func;
-        void *             tww_priv;
-} taskq_work_wrapper_t;
-
-static void
-taskq_work_handler(void *priv)
-{
-        taskq_work_wrapper_t *tww = priv;
-
-        BUG_ON(tww == NULL);
-        BUG_ON(tww->tww_func == NULL);
-
-        /* Call the real function and free the wrapper */
-        tww->tww_func(tww->tww_priv);
-        kfree(tww);
-}
-
-/* XXX - All flags currently ignored */
-taskqid_t
-__taskq_dispatch(taskq_t *tq, task_func_t func, void *priv, uint_t flags)
-{
-        struct workqueue_struct *wq = tq;
-        taskq_work_wrapper_t *tww;
-        int rc;
-
-
-        BUG_ON(in_interrupt());
-        BUG_ON(tq == NULL);
-        BUG_ON(func == NULL);
-
-        tww = (taskq_work_wrapper_t *)kmalloc(sizeof(*tww), GFP_KERNEL);
-        if (!tww)
-                return (taskqid_t)0;
-
-        INIT_WORK(&(tww->tww_work), taskq_work_handler, tww);
-        tww->tww_func = func;
-        tww->tww_priv = priv;
-
-        rc = queue_work(wq, &(tww->tww_work));
-        if (!rc) {
-                kfree(tww);
-                return (taskqid_t)0;
-        }
-
-        return (taskqid_t)wq;
-}
-EXPORT_SYMBOL(__taskq_dispatch);
-
-/* XXX - Most args ignored until we decide if it's worth the effort
- *       to emulate the solaris notion of dynamic thread pools.  For
- *       now we simply serialize everything through one thread which
- *       may come back to bite us as a performance issue.
- * pri   - Ignore priority
- * min   - Ignored until this is a dynamic thread pool
- * max   - Ignored until this is a dynamic thread pool
- * flags - Ignored until this is a dynamic thread_pool
- */
-taskq_t *
-__taskq_create(const char *name, int nthreads, pri_t pri,
-               int minalloc, int maxalloc, uint_t flags)
-{
-       /* NOTE: Linux workqueue names are limited to 10 chars */
-
-        return create_singlethread_workqueue(name);
-}
-EXPORT_SYMBOL(__taskq_create);
diff --git a/spl/linux-thread.c b/spl/linux-thread.c
deleted file mode 100644 (file)
index 9785d50..0000000
+++ /dev/null
@@ -1,114 +0,0 @@
-#include <linux-thread.h>
-
-/*
- * Thread interfaces
- */
-typedef struct thread_priv_s {
-       unsigned long tp_magic;         /* Magic */
-       void (*tp_func)(void *);        /* Registered function */
-       void *tp_args;                  /* Args to be passed to function */
-       size_t tp_len;                  /* Len to be passed to function */
-       int tp_state;                   /* State to start thread at */
-       pri_t tp_pri;                   /* Priority to start threat at */
-       volatile kthread_t *tp_task;    /* Task pointer for new thread */
-       spinlock_t tp_lock;             /* Syncronization lock */
-        wait_queue_head_t tp_waitq;    /* Syncronization wait queue */
-} thread_priv_t;
-
-static int
-thread_generic_wrapper(void *arg)
-{
-       thread_priv_t *tp = (thread_priv_t *)arg;
-       void (*func)(void *);
-       void *args;
-       char name[16];
-
-       /* Use the truncated function name as thread name */
-       snprintf(name, sizeof(name), "%s", "kthread");
-       daemonize(name);
-
-        spin_lock(&tp->tp_lock);
-       BUG_ON(tp->tp_magic != TP_MAGIC);
-       func = tp->tp_func;
-       args = tp->tp_args;
-       tp->tp_task = get_current();
-       set_current_state(tp->tp_state);
-       set_user_nice((kthread_t *)tp->tp_task, PRIO_TO_NICE(tp->tp_pri));
-
-        spin_unlock(&tp->tp_lock);
-       wake_up(&tp->tp_waitq);
-
-       /* DO NOT USE 'ARG' AFTER THIS POINT, EVER, EVER, EVER!
-        * Local variables are used here because after the calling thread
-        * has been woken up it will exit and this memory will no longer
-        * be safe to access since it was declared on the callers stack. */
-       if (func)
-               func(args);
-
-       return 0;
-}
-
-void
-__thread_exit(void)
-{
-       return;
-}
-EXPORT_SYMBOL(__thread_exit);
-
-/* thread_create() may block forever if it cannot create a thread or
- * allocate memory.  This is preferable to returning a NULL which Solaris
- * style callers likely never check for... since it can't fail. */
-kthread_t *
-__thread_create(caddr_t stk, size_t  stksize, void (*proc)(void *),
-               void *args, size_t len, proc_t *pp, int state, pri_t pri)
-{
-       thread_priv_t tp;
-       DEFINE_WAIT(wait);
-       long pid;
-
-       /* Option pp is simply ignored */
-       /* Variable stack size unsupported */
-       BUG_ON(stk != NULL);
-       BUG_ON(stk != 0);
-
-       /* Variable tp is located on the stack and not the heap because I want
-        * to minimize any chance of a failure, since the Solaris code is designed
-        * such that this function cannot fail.  This is a little dangerous since
-        * we're passing a stack address to a new thread but correct locking was
-        * added to ensure the callee can use the data safely until wake_up(). */
-       tp.tp_magic = TP_MAGIC;
-       tp.tp_func  = proc;
-       tp.tp_args  = args;
-       tp.tp_len   = len;
-       tp.tp_state = state;
-       tp.tp_pri   = pri;
-       tp.tp_task  = NULL;
-       spin_lock_init(&tp.tp_lock);
-        init_waitqueue_head(&tp.tp_waitq);
-
-       spin_lock(&tp.tp_lock);
-
-       /* Solaris says this must never fail so we try forever */
-       while ((pid = kernel_thread(thread_generic_wrapper, (void *)&tp, 0)) < 0)
-               printk(KERN_ERR "linux-thread: Unable to create thread; "
-                      "pid = %ld\n", pid);
-
-       /* All signals are ignored due to sleeping TASK_UNINTERRUPTIBLE */
-       for (;;) {
-               prepare_to_wait(&tp.tp_waitq, &wait, TASK_UNINTERRUPTIBLE);
-               if (tp.tp_task != NULL)
-                       break;
-
-               spin_unlock(&tp.tp_lock);
-               schedule();
-               spin_lock(&tp.tp_lock);
-       }
-
-       /* Verify the pid retunred matches the pid in the task struct */
-       BUG_ON(pid != (tp.tp_task)->pid);
-
-       spin_unlock(&tp.tp_lock);
-
-       return (kthread_t *)tp.tp_task;
-}
-EXPORT_SYMBOL(__thread_create);
diff --git a/splat/Makefile.in b/splat/Makefile.in
deleted file mode 100644 (file)
index ebc0fb6..0000000
+++ /dev/null
@@ -1,50 +0,0 @@
-# Makefile.in for splat kernel module
-
-MODULES := splat
-DISTFILES = Makefile.in \
-            splat-kmem.c splat-random.c splat-taskq.c \
-            splat-time.c splat-condvar.c  splat-mutex.c \
-            splat-rwlock.c  splat-thread.c splat-ctl.c
-CPPFLAGS += @KERNELCPPFLAGS@
-
-# Solaris porting layer aggressive tests
-obj-m := splat.o
-
-splat-objs += splat-ctl.o
-splat-objs += splat-kmem.o
-splat-objs += splat-taskq.o
-splat-objs += splat-random.o
-splat-objs += splat-mutex.o
-splat-objs += splat-condvar.o
-splat-objs += splat-thread.o
-splat-objs += splat-rwlock.o
-splat-objs += splat-time.o
-
-splatmodule := splat.ko
-splatmoduledir := @kmoduledir@/kernel/lib/
-
-all: all-spec
-
-install: all
-       mkdir -p $(DESTDIR)$(splatmoduledir)
-       $(INSTALL) -m 644 $(splatmodule) $(DESTDIR)$(splatmoduledir)/$(splatmodule)
-       -/sbin/depmod -a
-
-uninstall:
-       rm -f $(DESTDIR)$(splatmoduledir)/$(splatmodule)
-       -/sbin/depmod -a
-
-clean:
-       -rm -f $(splmodule) *.o .*.cmd *.mod.c *.ko *.s */*.o
-
-distclean: clean
-       rm -f Makefile
-       rm -rf .tmp_versions
-
-maintainer-clean: distclean
-
-distdir: $(DISTFILES)
-       cp -p $(DISTFILES) $(distdir)
-
-all-spec:
-       $(MAKE) -C @kernelsrc@ SUBDIRS=`pwd` @KERNELMAKE_PARAMS@ modules
diff --git a/splat/splat-condvar.c b/splat/splat-condvar.c
deleted file mode 100644 (file)
index 7c9b557..0000000
+++ /dev/null
@@ -1,453 +0,0 @@
-#include <splat-ctl.h>
-
-#define KZT_SUBSYSTEM_CONDVAR          0x0500
-#define KZT_CONDVAR_NAME               "condvar"
-#define KZT_CONDVAR_DESC               "Kernel Condition Variable Tests"
-
-#define KZT_CONDVAR_TEST1_ID           0x0501
-#define KZT_CONDVAR_TEST1_NAME         "signal1"
-#define KZT_CONDVAR_TEST1_DESC         "Wake a single thread, cv_wait()/cv_signal()"
-
-#define KZT_CONDVAR_TEST2_ID           0x0502
-#define KZT_CONDVAR_TEST2_NAME         "broadcast1"
-#define KZT_CONDVAR_TEST2_DESC         "Wake all threads, cv_wait()/cv_broadcast()"
-
-#define KZT_CONDVAR_TEST3_ID           0x0503
-#define KZT_CONDVAR_TEST3_NAME         "signal2"
-#define KZT_CONDVAR_TEST3_DESC         "Wake a single thread, cv_wait_timeout()/cv_signal()"
-
-#define KZT_CONDVAR_TEST4_ID           0x0504
-#define KZT_CONDVAR_TEST4_NAME         "broadcast2"
-#define KZT_CONDVAR_TEST4_DESC         "Wake all threads, cv_wait_timeout()/cv_broadcast()"
-
-#define KZT_CONDVAR_TEST5_ID           0x0505
-#define KZT_CONDVAR_TEST5_NAME         "timeout"
-#define KZT_CONDVAR_TEST5_DESC         "Timeout thread, cv_wait_timeout()"
-
-#define KZT_CONDVAR_TEST_MAGIC         0x115599DDUL
-#define KZT_CONDVAR_TEST_NAME          "condvar_test"
-#define KZT_CONDVAR_TEST_COUNT         8
-
-typedef struct condvar_priv {
-        unsigned long cv_magic;
-        struct file *cv_file;
-       kcondvar_t cv_condvar;
-       kmutex_t cv_mtx;
-} condvar_priv_t;
-
-typedef struct condvar_thr {
-       int ct_id;
-       const char *ct_name;
-       condvar_priv_t *ct_cvp;
-       int ct_rc;
-} condvar_thr_t;
-
-int
-kzt_condvar_test12_thread(void *arg)
-{
-       condvar_thr_t *ct = (condvar_thr_t *)arg;
-       condvar_priv_t *cv = ct->ct_cvp;
-       char name[16];
-
-       ASSERT(cv->cv_magic == KZT_CONDVAR_TEST_MAGIC);
-        snprintf(name, sizeof(name), "%s%d", KZT_CONDVAR_TEST_NAME, ct->ct_id);
-       daemonize(name);
-
-       mutex_enter(&cv->cv_mtx);
-       kzt_vprint(cv->cv_file, ct->ct_name,
-                  "%s thread sleeping with %d waiters\n",
-                  name, atomic_read(&cv->cv_condvar.cv_waiters));
-       cv_wait(&cv->cv_condvar, &cv->cv_mtx);
-       kzt_vprint(cv->cv_file, ct->ct_name,
-                  "%s thread woken %d waiters remain\n",
-                  name, atomic_read(&cv->cv_condvar.cv_waiters));
-       mutex_exit(&cv->cv_mtx);
-
-       return 0;
-}
-
-static int
-kzt_condvar_test1(struct file *file, void *arg)
-{
-       int i, count = 0, rc = 0;
-       long pids[KZT_CONDVAR_TEST_COUNT];
-       condvar_thr_t ct[KZT_CONDVAR_TEST_COUNT];
-       condvar_priv_t cv;
-
-       cv.cv_magic = KZT_CONDVAR_TEST_MAGIC;
-       cv.cv_file = file;
-       mutex_init(&cv.cv_mtx, KZT_CONDVAR_TEST_NAME, MUTEX_DEFAULT, NULL);
-       cv_init(&cv.cv_condvar, KZT_CONDVAR_TEST_NAME, CV_DEFAULT, NULL);
-
-       /* Create some threads, the exact number isn't important just as
-        * long as we know how many we managed to create and should expect. */
-       for (i = 0; i < KZT_CONDVAR_TEST_COUNT; i++) {
-               ct[i].ct_cvp = &cv;
-               ct[i].ct_id = i;
-               ct[i].ct_name = KZT_CONDVAR_TEST1_NAME;
-               ct[i].ct_rc = 0;
-
-               pids[i] = kernel_thread(kzt_condvar_test12_thread, &ct[i], 0);
-               if (pids[i] >= 0)
-                       count++;
-       }
-
-       /* Wait until all threads are waiting on the condition variable */
-       while (atomic_read(&cv.cv_condvar.cv_waiters) != count)
-               schedule();
-
-       /* Wake a single thread at a time, wait until it exits */
-       for (i = 1; i <= count; i++) {
-               cv_signal(&cv.cv_condvar);
-
-               while (atomic_read(&cv.cv_condvar.cv_waiters) > (count - i))
-                       schedule();
-
-               /* Correct behavior 1 thread woken */
-               if (atomic_read(&cv.cv_condvar.cv_waiters) == (count - i))
-                       continue;
-
-                kzt_vprint(file, KZT_CONDVAR_TEST1_NAME, "Attempted to "
-                          "wake %d thread but work %d threads woke\n",
-                          1, count - atomic_read(&cv.cv_condvar.cv_waiters));
-               rc = -EINVAL;
-               break;
-       }
-
-       if (!rc)
-                kzt_vprint(file, KZT_CONDVAR_TEST1_NAME, "Correctly woke "
-                          "%d sleeping threads %d at a time\n", count, 1);
-
-       /* Wait until that last nutex is dropped */
-       while (mutex_owner(&cv.cv_mtx))
-               schedule();
-
-       /* Wake everything for the failure case */
-       cv_broadcast(&cv.cv_condvar);
-       cv_destroy(&cv.cv_condvar);
-       mutex_destroy(&cv.cv_mtx);
-
-       return rc;
-}
-
-static int
-kzt_condvar_test2(struct file *file, void *arg)
-{
-       int i, count = 0, rc = 0;
-       long pids[KZT_CONDVAR_TEST_COUNT];
-       condvar_thr_t ct[KZT_CONDVAR_TEST_COUNT];
-       condvar_priv_t cv;
-
-       cv.cv_magic = KZT_CONDVAR_TEST_MAGIC;
-       cv.cv_file = file;
-       mutex_init(&cv.cv_mtx, KZT_CONDVAR_TEST_NAME, MUTEX_DEFAULT, NULL);
-       cv_init(&cv.cv_condvar, KZT_CONDVAR_TEST_NAME, CV_DEFAULT, NULL);
-
-       /* Create some threads, the exact number isn't important just as
-        * long as we know how many we managed to create and should expect. */
-       for (i = 0; i < KZT_CONDVAR_TEST_COUNT; i++) {
-               ct[i].ct_cvp = &cv;
-               ct[i].ct_id = i;
-               ct[i].ct_name = KZT_CONDVAR_TEST2_NAME;
-               ct[i].ct_rc = 0;
-
-               pids[i] = kernel_thread(kzt_condvar_test12_thread, &ct[i], 0);
-               if (pids[i] > 0)
-                       count++;
-       }
-
-       /* Wait until all threads are waiting on the condition variable */
-       while (atomic_read(&cv.cv_condvar.cv_waiters) != count)
-               schedule();
-
-       /* Wake all threads waiting on the condition variable */
-       cv_broadcast(&cv.cv_condvar);
-
-       /* Wait until all threads have exited */
-       while ((atomic_read(&cv.cv_condvar.cv_waiters) > 0) || mutex_owner(&cv.cv_mtx))
-               schedule();
-
-        kzt_vprint(file, KZT_CONDVAR_TEST2_NAME, "Correctly woke all "
-                          "%d sleeping threads at once\n", count);
-
-       /* Wake everything for the failure case */
-       cv_destroy(&cv.cv_condvar);
-       mutex_destroy(&cv.cv_mtx);
-
-       return rc;
-}
-
-int
-kzt_condvar_test34_thread(void *arg)
-{
-       condvar_thr_t *ct = (condvar_thr_t *)arg;
-       condvar_priv_t *cv = ct->ct_cvp;
-       char name[16];
-       clock_t rc;
-
-       ASSERT(cv->cv_magic == KZT_CONDVAR_TEST_MAGIC);
-        snprintf(name, sizeof(name), "%s%d", KZT_CONDVAR_TEST_NAME, ct->ct_id);
-       daemonize(name);
-
-       mutex_enter(&cv->cv_mtx);
-       kzt_vprint(cv->cv_file, ct->ct_name,
-                  "%s thread sleeping with %d waiters\n",
-                  name, atomic_read(&cv->cv_condvar.cv_waiters));
-
-       /* Sleep no longer than 3 seconds, for this test we should
-        * actually never sleep that long without being woken up. */
-       rc = cv_timedwait(&cv->cv_condvar, &cv->cv_mtx, lbolt + HZ * 3);
-       if (rc == -1) {
-               ct->ct_rc = -ETIMEDOUT;
-               kzt_vprint(cv->cv_file, ct->ct_name, "%s thread timed out, "
-                          "should have been woken\n", name);
-       } else {
-               kzt_vprint(cv->cv_file, ct->ct_name,
-                          "%s thread woken %d waiters remain\n",
-                          name, atomic_read(&cv->cv_condvar.cv_waiters));
-       }
-
-       mutex_exit(&cv->cv_mtx);
-
-       return 0;
-}
-
-static int
-kzt_condvar_test3(struct file *file, void *arg)
-{
-       int i, count = 0, rc = 0;
-       long pids[KZT_CONDVAR_TEST_COUNT];
-       condvar_thr_t ct[KZT_CONDVAR_TEST_COUNT];
-       condvar_priv_t cv;
-
-       cv.cv_magic = KZT_CONDVAR_TEST_MAGIC;
-       cv.cv_file = file;
-       mutex_init(&cv.cv_mtx, KZT_CONDVAR_TEST_NAME, MUTEX_DEFAULT, NULL);
-       cv_init(&cv.cv_condvar, KZT_CONDVAR_TEST_NAME, CV_DEFAULT, NULL);
-
-       /* Create some threads, the exact number isn't important just as
-        * long as we know how many we managed to create and should expect. */
-       for (i = 0; i < KZT_CONDVAR_TEST_COUNT; i++) {
-               ct[i].ct_cvp = &cv;
-               ct[i].ct_id = i;
-               ct[i].ct_name = KZT_CONDVAR_TEST3_NAME;
-               ct[i].ct_rc = 0;
-
-               pids[i] = kernel_thread(kzt_condvar_test34_thread, &ct[i], 0);
-               if (pids[i] >= 0)
-                       count++;
-       }
-
-       /* Wait until all threads are waiting on the condition variable */
-       while (atomic_read(&cv.cv_condvar.cv_waiters) != count)
-               schedule();
-
-       /* Wake a single thread at a time, wait until it exits */
-       for (i = 1; i <= count; i++) {
-               cv_signal(&cv.cv_condvar);
-
-               while (atomic_read(&cv.cv_condvar.cv_waiters) > (count - i))
-                       schedule();
-
-               /* Correct behavior 1 thread woken */
-               if (atomic_read(&cv.cv_condvar.cv_waiters) == (count - i))
-                       continue;
-
-                kzt_vprint(file, KZT_CONDVAR_TEST3_NAME, "Attempted to "
-                          "wake %d thread but work %d threads woke\n",
-                          1, count - atomic_read(&cv.cv_condvar.cv_waiters));
-               rc = -EINVAL;
-               break;
-       }
-
-       /* Validate no waiting thread timed out early */
-       for (i = 0; i < count; i++)
-               if (ct[i].ct_rc)
-                       rc = ct[i].ct_rc;
-
-       if (!rc)
-                kzt_vprint(file, KZT_CONDVAR_TEST3_NAME, "Correctly woke "
-                          "%d sleeping threads %d at a time\n", count, 1);
-
-       /* Wait until that last nutex is dropped */
-       while (mutex_owner(&cv.cv_mtx))
-               schedule();
-
-       /* Wake everything for the failure case */
-       cv_broadcast(&cv.cv_condvar);
-       cv_destroy(&cv.cv_condvar);
-       mutex_destroy(&cv.cv_mtx);
-
-       return rc;
-}
-
-static int
-kzt_condvar_test4(struct file *file, void *arg)
-{
-       int i, count = 0, rc = 0;
-       long pids[KZT_CONDVAR_TEST_COUNT];
-       condvar_thr_t ct[KZT_CONDVAR_TEST_COUNT];
-       condvar_priv_t cv;
-
-       cv.cv_magic = KZT_CONDVAR_TEST_MAGIC;
-       cv.cv_file = file;
-       mutex_init(&cv.cv_mtx, KZT_CONDVAR_TEST_NAME, MUTEX_DEFAULT, NULL);
-       cv_init(&cv.cv_condvar, KZT_CONDVAR_TEST_NAME, CV_DEFAULT, NULL);
-
-       /* Create some threads, the exact number isn't important just as
-        * long as we know how many we managed to create and should expect. */
-       for (i = 0; i < KZT_CONDVAR_TEST_COUNT; i++) {
-               ct[i].ct_cvp = &cv;
-               ct[i].ct_id = i;
-               ct[i].ct_name = KZT_CONDVAR_TEST3_NAME;
-               ct[i].ct_rc = 0;
-
-               pids[i] = kernel_thread(kzt_condvar_test34_thread, &ct[i], 0);
-               if (pids[i] >= 0)
-                       count++;
-       }
-
-       /* Wait until all threads are waiting on the condition variable */
-       while (atomic_read(&cv.cv_condvar.cv_waiters) != count)
-               schedule();
-
-       /* Wake a single thread at a time, wait until it exits */
-       for (i = 1; i <= count; i++) {
-               cv_signal(&cv.cv_condvar);
-
-               while (atomic_read(&cv.cv_condvar.cv_waiters) > (count - i))
-                       schedule();
-
-               /* Correct behavior 1 thread woken */
-               if (atomic_read(&cv.cv_condvar.cv_waiters) == (count - i))
-                       continue;
-
-                kzt_vprint(file, KZT_CONDVAR_TEST3_NAME, "Attempted to "
-                          "wake %d thread but work %d threads woke\n",
-                          1, count - atomic_read(&cv.cv_condvar.cv_waiters));
-               rc = -EINVAL;
-               break;
-       }
-
-       /* Validate no waiting thread timed out early */
-       for (i = 0; i < count; i++)
-               if (ct[i].ct_rc)
-                       rc = ct[i].ct_rc;
-
-       if (!rc)
-                kzt_vprint(file, KZT_CONDVAR_TEST3_NAME, "Correctly woke "
-                          "%d sleeping threads %d at a time\n", count, 1);
-
-       /* Wait until that last nutex is dropped */
-       while (mutex_owner(&cv.cv_mtx))
-               schedule();
-
-       /* Wake everything for the failure case */
-       cv_broadcast(&cv.cv_condvar);
-       cv_destroy(&cv.cv_condvar);
-       mutex_destroy(&cv.cv_mtx);
-
-       return rc;
-}
-
-static int
-kzt_condvar_test5(struct file *file, void *arg)
-{
-        kcondvar_t condvar;
-        kmutex_t mtx;
-       clock_t time_left, time_before, time_after, time_delta;
-       int64_t whole_delta;
-       int32_t remain_delta;
-       int rc = 0;
-
-       mutex_init(&mtx, KZT_CONDVAR_TEST_NAME, MUTEX_DEFAULT, NULL);
-       cv_init(&condvar, KZT_CONDVAR_TEST_NAME, CV_DEFAULT, NULL);
-
-        kzt_vprint(file, KZT_CONDVAR_TEST5_NAME, "Thread going to sleep for "
-                  "%d second and expecting to be woken by timeout\n", 1);
-
-       /* Allow a 1 second timeout, plenty long to validate correctness. */
-       time_before = lbolt;
-       mutex_enter(&mtx);
-       time_left = cv_timedwait(&condvar, &mtx, lbolt + HZ);
-       mutex_exit(&mtx);
-       time_after = lbolt;
-       time_delta = time_after - time_before; /* XXX - Handle jiffie wrap */
-       whole_delta  = time_delta;
-       remain_delta = do_div(whole_delta, HZ);
-
-       if (time_left == -1) {
-               if (time_delta >= HZ) {
-                       kzt_vprint(file, KZT_CONDVAR_TEST5_NAME,
-                                  "Thread correctly timed out and was asleep "
-                                  "for %d.%d seconds (%d second min)\n",
-                                  (int)whole_delta, remain_delta, 1);
-               } else {
-                       kzt_vprint(file, KZT_CONDVAR_TEST5_NAME,
-                                  "Thread correctly timed out but was only "
-                                  "asleep for %d.%d seconds (%d second "
-                                  "min)\n", (int)whole_delta, remain_delta, 1);
-                       rc = -ETIMEDOUT;
-               }
-       } else {
-               kzt_vprint(file, KZT_CONDVAR_TEST5_NAME,
-                          "Thread exited after only %d.%d seconds, it "
-                          "did not hit the %d second timeout\n",
-                          (int)whole_delta, remain_delta, 1);
-               rc = -ETIMEDOUT;
-       }
-
-       cv_destroy(&condvar);
-       mutex_destroy(&mtx);
-
-       return rc;
-}
-
-kzt_subsystem_t *
-kzt_condvar_init(void)
-{
-        kzt_subsystem_t *sub;
-
-        sub = kmalloc(sizeof(*sub), GFP_KERNEL);
-        if (sub == NULL)
-                return NULL;
-
-        memset(sub, 0, sizeof(*sub));
-        strncpy(sub->desc.name, KZT_CONDVAR_NAME, KZT_NAME_SIZE);
-        strncpy(sub->desc.desc, KZT_CONDVAR_DESC, KZT_DESC_SIZE);
-        INIT_LIST_HEAD(&sub->subsystem_list);
-        INIT_LIST_HEAD(&sub->test_list);
-        spin_lock_init(&sub->test_lock);
-        sub->desc.id = KZT_SUBSYSTEM_CONDVAR;
-
-        KZT_TEST_INIT(sub, KZT_CONDVAR_TEST1_NAME, KZT_CONDVAR_TEST1_DESC,
-                      KZT_CONDVAR_TEST1_ID, kzt_condvar_test1);
-        KZT_TEST_INIT(sub, KZT_CONDVAR_TEST2_NAME, KZT_CONDVAR_TEST2_DESC,
-                      KZT_CONDVAR_TEST2_ID, kzt_condvar_test2);
-        KZT_TEST_INIT(sub, KZT_CONDVAR_TEST3_NAME, KZT_CONDVAR_TEST3_DESC,
-                      KZT_CONDVAR_TEST3_ID, kzt_condvar_test3);
-        KZT_TEST_INIT(sub, KZT_CONDVAR_TEST4_NAME, KZT_CONDVAR_TEST4_DESC,
-                      KZT_CONDVAR_TEST4_ID, kzt_condvar_test4);
-        KZT_TEST_INIT(sub, KZT_CONDVAR_TEST5_NAME, KZT_CONDVAR_TEST5_DESC,
-                      KZT_CONDVAR_TEST5_ID, kzt_condvar_test5);
-
-        return sub;
-}
-
-void
-kzt_condvar_fini(kzt_subsystem_t *sub)
-{
-        ASSERT(sub);
-        KZT_TEST_FINI(sub, KZT_CONDVAR_TEST5_ID);
-        KZT_TEST_FINI(sub, KZT_CONDVAR_TEST4_ID);
-        KZT_TEST_FINI(sub, KZT_CONDVAR_TEST3_ID);
-        KZT_TEST_FINI(sub, KZT_CONDVAR_TEST2_ID);
-        KZT_TEST_FINI(sub, KZT_CONDVAR_TEST1_ID);
-
-        kfree(sub);
-}
-
-int
-kzt_condvar_id(void) {
-        return KZT_SUBSYSTEM_CONDVAR;
-}
diff --git a/splat/splat-ctl.c b/splat/splat-ctl.c
deleted file mode 100644 (file)
index 9bff58a..0000000
+++ /dev/null
@@ -1,677 +0,0 @@
-/*
- * My intent is the create a loadable kzt (kernel ZFS test) module
- * which can be used as an access point to run in kernel ZFS regression
- * tests.  Why do we need this when we have ztest?  Well ztest.c only
- * excersises the ZFS code proper, it cannot be used to validate the
- * linux kernel shim primatives.  This also provides a nice hook for
- * any other in kernel regression tests we wish to run such as direct
- * in-kernel tests against the DMU.
- *
- * The basic design is the kzt module is that it is constructed of
- * various kzt_* source files each of which contains regression tests.
- * For example the kzt_linux_kmem.c file contains tests for validating
- * kmem correctness.  When the kzt module is loaded kzt_*_init()
- * will be called for each subsystems tests, similarly kzt_*_fini() is
- * called when the kzt module is removed.  Each test can then be
- * run by making an ioctl() call from a userspace control application
- * to pick the subsystem and test which should be run.
- *
- * Author: Brian Behlendorf
- */
-
-#include <splat-ctl.h>
-
-#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,18)
-#include <linux/devfs_fs_kernel.h>
-#endif
-
-#include <linux/cdev.h>
-
-
-#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,18)
-static struct class_simple *kzt_class;
-#else
-static struct class *kzt_class;
-#endif
-static struct list_head kzt_module_list;
-static spinlock_t kzt_module_lock;
-
-static int
-kzt_open(struct inode *inode, struct file *file)
-{
-       unsigned int minor = iminor(inode);
-       kzt_info_t *info;
-
-       if (minor >= KZT_MINORS)
-               return -ENXIO;
-
-       info = (kzt_info_t *)kmalloc(sizeof(*info), GFP_KERNEL);
-       if (info == NULL)
-               return -ENOMEM;
-
-       spin_lock_init(&info->info_lock);
-       info->info_size = KZT_INFO_BUFFER_SIZE;
-       info->info_buffer = (char *)vmalloc(KZT_INFO_BUFFER_SIZE);
-       if (info->info_buffer == NULL) {
-               kfree(info);
-               return -ENOMEM;
-       }
-
-       info->info_head = info->info_buffer;
-       file->private_data = (void *)info;
-
-       kzt_print(file, "Kernel ZFS Tests %s\n", KZT_VERSION);
-
-        return 0;
-}
-
-static int
-kzt_release(struct inode *inode, struct file *file)
-{
-       unsigned int minor = iminor(inode);
-       kzt_info_t *info = (kzt_info_t *)file->private_data;
-
-       if (minor >= KZT_MINORS)
-               return -ENXIO;
-
-       ASSERT(info);
-       ASSERT(info->info_buffer);
-
-       vfree(info->info_buffer);
-       kfree(info);
-
-       return 0;
-}
-
-static int
-kzt_buffer_clear(struct file *file, kzt_cfg_t *kcfg, unsigned long arg)
-{
-       kzt_info_t *info = (kzt_info_t *)file->private_data;
-
-       ASSERT(info);
-       ASSERT(info->info_buffer);
-
-       spin_lock(&info->info_lock);
-       memset(info->info_buffer, 0, info->info_size);
-       info->info_head = info->info_buffer;
-       spin_unlock(&info->info_lock);
-
-       return 0;
-}
-
-static int
-kzt_buffer_size(struct file *file, kzt_cfg_t *kcfg, unsigned long arg)
-{
-       kzt_info_t *info = (kzt_info_t *)file->private_data;
-       char *buf;
-       int min, size, rc = 0;
-
-       ASSERT(info);
-       ASSERT(info->info_buffer);
-
-       spin_lock(&info->info_lock);
-       if (kcfg->cfg_arg1 > 0) {
-
-               size = kcfg->cfg_arg1;
-               buf = (char *)vmalloc(size);
-               if (buf == NULL) {
-                       rc = -ENOMEM;
-                       goto out;
-               }
-
-               /* Zero fill and truncate contents when coping buffer */
-               min = ((size < info->info_size) ? size : info->info_size);
-               memset(buf, 0, size);
-               memcpy(buf, info->info_buffer, min);
-               vfree(info->info_buffer);
-               info->info_size = size;
-               info->info_buffer = buf;
-               info->info_head = info->info_buffer;
-       }
-
-       kcfg->cfg_rc1 = info->info_size;
-
-       if (copy_to_user((struct kzt_cfg_t __user *)arg, kcfg, sizeof(*kcfg)))
-               rc = -EFAULT;
-out:
-       spin_unlock(&info->info_lock);
-
-       return rc;
-}
-
-
-static kzt_subsystem_t *
-kzt_subsystem_find(int id) {
-       kzt_subsystem_t *sub;
-
-        spin_lock(&kzt_module_lock);
-        list_for_each_entry(sub, &kzt_module_list, subsystem_list) {
-               if (id == sub->desc.id) {
-                       spin_unlock(&kzt_module_lock);
-                       return sub;
-               }
-        }
-        spin_unlock(&kzt_module_lock);
-
-       return NULL;
-}
-
-static int
-kzt_subsystem_count(kzt_cfg_t *kcfg, unsigned long arg)
-{
-       kzt_subsystem_t *sub;
-       int i = 0;
-
-        spin_lock(&kzt_module_lock);
-        list_for_each_entry(sub, &kzt_module_list, subsystem_list)
-               i++;
-
-        spin_unlock(&kzt_module_lock);
-       kcfg->cfg_rc1 = i;
-
-       if (copy_to_user((struct kzt_cfg_t __user *)arg, kcfg, sizeof(*kcfg)))
-               return -EFAULT;
-
-       return 0;
-}
-
-static int
-kzt_subsystem_list(kzt_cfg_t *kcfg, unsigned long arg)
-{
-       kzt_subsystem_t *sub;
-       kzt_cfg_t *tmp;
-       int size, i = 0;
-
-       /* Structure will be sized large enough for N subsystem entries
-        * which is passed in by the caller.  On exit the number of
-        * entries filled in with valid subsystems will be stored in
-        * cfg_rc1.  If the caller does not provide enough entries
-        * for all subsystems we will truncate the list to avoid overrun.
-        */
-       size = sizeof(*tmp) + kcfg->cfg_data.kzt_subsystems.size *
-              sizeof(kzt_user_t);
-       tmp = kmalloc(size, GFP_KERNEL);
-       if (tmp == NULL)
-               return -ENOMEM;
-
-       /* Local 'tmp' is used as the structure copied back to user space */
-       memset(tmp, 0, size);
-       memcpy(tmp, kcfg, sizeof(*kcfg));
-
-        spin_lock(&kzt_module_lock);
-        list_for_each_entry(sub, &kzt_module_list, subsystem_list) {
-               strncpy(tmp->cfg_data.kzt_subsystems.descs[i].name,
-                       sub->desc.name, KZT_NAME_SIZE);
-               strncpy(tmp->cfg_data.kzt_subsystems.descs[i].desc,
-                       sub->desc.desc, KZT_DESC_SIZE);
-               tmp->cfg_data.kzt_subsystems.descs[i].id = sub->desc.id;
-
-               /* Truncate list if we are about to overrun alloc'ed memory */
-               if ((i++) == kcfg->cfg_data.kzt_subsystems.size)
-                       break;
-        }
-        spin_unlock(&kzt_module_lock);
-       tmp->cfg_rc1 = i;
-
-       if (copy_to_user((struct kzt_cfg_t __user *)arg, tmp, size)) {
-               kfree(tmp);
-               return -EFAULT;
-       }
-
-       kfree(tmp);
-       return 0;
-}
-
-static int
-kzt_test_count(kzt_cfg_t *kcfg, unsigned long arg)
-{
-       kzt_subsystem_t *sub;
-       kzt_test_t *test;
-       int i = 0;
-
-       /* Subsystem ID passed as arg1 */
-       sub = kzt_subsystem_find(kcfg->cfg_arg1);
-       if (sub == NULL)
-               return -EINVAL;
-
-        spin_lock(&(sub->test_lock));
-        list_for_each_entry(test, &(sub->test_list), test_list)
-               i++;
-
-        spin_unlock(&(sub->test_lock));
-       kcfg->cfg_rc1 = i;
-
-       if (copy_to_user((struct kzt_cfg_t __user *)arg, kcfg, sizeof(*kcfg)))
-               return -EFAULT;
-
-       return 0;
-}
-
-static int
-kzt_test_list(kzt_cfg_t *kcfg, unsigned long arg)
-{
-       kzt_subsystem_t *sub;
-       kzt_test_t *test;
-       kzt_cfg_t *tmp;
-       int size, i = 0;
-
-       /* Subsystem ID passed as arg1 */
-       sub = kzt_subsystem_find(kcfg->cfg_arg1);
-       if (sub == NULL)
-               return -EINVAL;
-
-       /* Structure will be sized large enough for N test entries
-        * which is passed in by the caller.  On exit the number of
-        * entries filled in with valid tests will be stored in
-        * cfg_rc1.  If the caller does not provide enough entries
-        * for all tests we will truncate the list to avoid overrun.
-        */
-       size = sizeof(*tmp)+kcfg->cfg_data.kzt_tests.size*sizeof(kzt_user_t);
-       tmp = kmalloc(size, GFP_KERNEL);
-       if (tmp == NULL)
-               return -ENOMEM;
-
-       /* Local 'tmp' is used as the structure copied back to user space */
-       memset(tmp, 0, size);
-       memcpy(tmp, kcfg, sizeof(*kcfg));
-
-        spin_lock(&(sub->test_lock));
-        list_for_each_entry(test, &(sub->test_list), test_list) {
-               strncpy(tmp->cfg_data.kzt_tests.descs[i].name,
-                       test->desc.name, KZT_NAME_SIZE);
-               strncpy(tmp->cfg_data.kzt_tests.descs[i].desc,
-                       test->desc.desc, KZT_DESC_SIZE);
-               tmp->cfg_data.kzt_tests.descs[i].id = test->desc.id;
-
-               /* Truncate list if we are about to overrun alloc'ed memory */
-               if ((i++) == kcfg->cfg_data.kzt_tests.size)
-                       break;
-        }
-        spin_unlock(&(sub->test_lock));
-       tmp->cfg_rc1 = i;
-
-       if (copy_to_user((struct kzt_cfg_t __user *)arg, tmp, size)) {
-               kfree(tmp);
-               return -EFAULT;
-       }
-
-       kfree(tmp);
-       return 0;
-}
-
-static int
-kzt_validate(struct file *file, kzt_subsystem_t *sub, int cmd, void *arg)
-{
-        kzt_test_t *test;
-
-        spin_lock(&(sub->test_lock));
-        list_for_each_entry(test, &(sub->test_list), test_list) {
-                if (test->desc.id == cmd) {
-                       spin_unlock(&(sub->test_lock));
-                        return test->test(file, arg);
-                }
-        }
-        spin_unlock(&(sub->test_lock));
-
-        return -EINVAL;
-}
-
-static int
-kzt_ioctl_cfg(struct file *file, unsigned long arg)
-{
-       kzt_cfg_t kcfg;
-       int rc = 0;
-
-       if (copy_from_user(&kcfg, (kzt_cfg_t *)arg, sizeof(kcfg)))
-               return -EFAULT;
-
-       if (kcfg.cfg_magic != KZT_CFG_MAGIC) {
-               kzt_print(file, "Bad config magic 0x%x != 0x%x\n",
-                         kcfg.cfg_magic, KZT_CFG_MAGIC);
-               return -EINVAL;
-       }
-
-       switch (kcfg.cfg_cmd) {
-               case KZT_CFG_BUFFER_CLEAR:
-                       /* cfg_arg1 - Unused
-                        * cfg_rc1  - Unused
-                        */
-                       rc = kzt_buffer_clear(file, &kcfg, arg);
-                       break;
-               case KZT_CFG_BUFFER_SIZE:
-                       /* cfg_arg1 - 0 - query size; >0 resize
-                        * cfg_rc1  - Set to current buffer size
-                        */
-                       rc = kzt_buffer_size(file, &kcfg, arg);
-                       break;
-               case KZT_CFG_SUBSYSTEM_COUNT:
-                       /* cfg_arg1 - Unused
-                        * cfg_rc1  - Set to number of subsystems
-                        */
-                       rc = kzt_subsystem_count(&kcfg, arg);
-                       break;
-               case KZT_CFG_SUBSYSTEM_LIST:
-                       /* cfg_arg1 - Unused
-                        * cfg_rc1  - Set to number of subsystems
-                        * cfg_data.kzt_subsystems - Populated with subsystems
-                        */
-                       rc = kzt_subsystem_list(&kcfg, arg);
-                       break;
-               case KZT_CFG_TEST_COUNT:
-                       /* cfg_arg1 - Set to a target subsystem
-                        * cfg_rc1  - Set to number of tests
-                        */
-                       rc = kzt_test_count(&kcfg, arg);
-                       break;
-               case KZT_CFG_TEST_LIST:
-                       /* cfg_arg1 - Set to a target subsystem
-                        * cfg_rc1  - Set to number of tests
-                        * cfg_data.kzt_subsystems - Populated with tests
-                        */
-                       rc = kzt_test_list(&kcfg, arg);
-                       break;
-               default:
-                       kzt_print(file, "Bad config command %d\n", kcfg.cfg_cmd);
-                       rc = -EINVAL;
-                       break;
-       }
-
-       return rc;
-}
-
-static int
-kzt_ioctl_cmd(struct file *file, unsigned long arg)
-{
-       kzt_subsystem_t *sub;
-       kzt_cmd_t kcmd;
-       int rc = -EINVAL;
-       void *data = NULL;
-
-       if (copy_from_user(&kcmd, (kzt_cfg_t *)arg, sizeof(kcmd)))
-               return -EFAULT;
-
-       if (kcmd.cmd_magic != KZT_CMD_MAGIC) {
-               kzt_print(file, "Bad command magic 0x%x != 0x%x\n",
-                         kcmd.cmd_magic, KZT_CFG_MAGIC);
-               return -EINVAL;
-       }
-
-       /* Allocate memory for any opaque data the caller needed to pass on */
-       if (kcmd.cmd_data_size > 0) {
-               data = (void *)kmalloc(kcmd.cmd_data_size, GFP_KERNEL);
-               if (data == NULL)
-                       return -ENOMEM;
-
-               if (copy_from_user(data, (void *)(arg + offsetof(kzt_cmd_t,
-                                  cmd_data_str)), kcmd.cmd_data_size)) {
-                       kfree(data);
-                       return -EFAULT;
-               }
-       }
-
-       sub = kzt_subsystem_find(kcmd.cmd_subsystem);
-       if (sub != NULL)
-               rc = kzt_validate(file, sub, kcmd.cmd_test, data);
-       else
-               rc = -EINVAL;
-
-       if (data != NULL)
-               kfree(data);
-
-       return rc;
-}
-
-static int
-kzt_ioctl(struct inode *inode, struct file *file,
-         unsigned int cmd, unsigned long arg)
-{
-        unsigned int minor = iminor(file->f_dentry->d_inode);
-       int rc = 0;
-
-       /* Ignore tty ioctls */
-       if ((cmd & 0xffffff00) == ((int)'T') << 8)
-               return -ENOTTY;
-
-       if (minor >= KZT_MINORS)
-               return -ENXIO;
-
-       switch (cmd) {
-               case KZT_CFG:
-                       rc = kzt_ioctl_cfg(file, arg);
-                       break;
-               case KZT_CMD:
-                       rc = kzt_ioctl_cmd(file, arg);
-                       break;
-               default:
-                       kzt_print(file, "Bad ioctl command %d\n", cmd);
-                       rc = -EINVAL;
-                       break;
-       }
-
-       return rc;
-}
-
-/* I'm not sure why you would want to write in to this buffer from
- * user space since its principle use is to pass test status info
- * back to the user space, but I don't see any reason to prevent it.
- */
-static ssize_t kzt_write(struct file *file, const char __user *buf,
-                         size_t count, loff_t *ppos)
-{
-        unsigned int minor = iminor(file->f_dentry->d_inode);
-       kzt_info_t *info = (kzt_info_t *)file->private_data;
-       int rc = 0;
-
-       if (minor >= KZT_MINORS)
-               return -ENXIO;
-
-       ASSERT(info);
-       ASSERT(info->info_buffer);
-
-       spin_lock(&info->info_lock);
-
-       /* Write beyond EOF */
-       if (*ppos >= info->info_size) {
-               rc = -EFBIG;
-               goto out;
-       }
-
-       /* Resize count if beyond EOF */
-       if (*ppos + count > info->info_size)
-               count = info->info_size - *ppos;
-
-       if (copy_from_user(info->info_buffer, buf, count)) {
-               rc = -EFAULT;
-               goto out;
-       }
-
-       *ppos += count;
-       rc = count;
-out:
-       spin_unlock(&info->info_lock);
-       return rc;
-}
-
-static ssize_t kzt_read(struct file *file, char __user *buf,
-                       size_t count, loff_t *ppos)
-{
-        unsigned int minor = iminor(file->f_dentry->d_inode);
-       kzt_info_t *info = (kzt_info_t *)file->private_data;
-       int rc = 0;
-
-       if (minor >= KZT_MINORS)
-               return -ENXIO;
-
-       ASSERT(info);
-       ASSERT(info->info_buffer);
-
-       spin_lock(&info->info_lock);
-
-       /* Read beyond EOF */
-       if (*ppos >= info->info_size)
-               goto out;
-
-       /* Resize count if beyond EOF */
-       if (*ppos + count > info->info_size)
-               count = info->info_size - *ppos;
-
-       if (copy_to_user(buf, info->info_buffer + *ppos, count)) {
-               rc = -EFAULT;
-               goto out;
-       }
-
-       *ppos += count;
-       rc = count;
-out:
-       spin_unlock(&info->info_lock);
-       return rc;
-}
-
-static loff_t kzt_seek(struct file *file, loff_t offset, int origin)
-{
-        unsigned int minor = iminor(file->f_dentry->d_inode);
-       kzt_info_t *info = (kzt_info_t *)file->private_data;
-       int rc = -EINVAL;
-
-       if (minor >= KZT_MINORS)
-               return -ENXIO;
-
-       ASSERT(info);
-       ASSERT(info->info_buffer);
-
-       spin_lock(&info->info_lock);
-
-       switch (origin) {
-       case 0: /* SEEK_SET - No-op just do it */
-               break;
-       case 1: /* SEEK_CUR - Seek from current */
-               offset = file->f_pos + offset;
-               break;
-       case 2: /* SEEK_END - Seek from end */
-               offset = info->info_size + offset;
-               break;
-       }
-
-       if (offset >= 0) {
-               file->f_pos = offset;
-               file->f_version = 0;
-               rc = offset;
-       }
-
-       spin_unlock(&info->info_lock);
-
-       return rc;
-}
-
-static struct file_operations kzt_fops = {
-       .owner   = THIS_MODULE,
-       .open    = kzt_open,
-       .release = kzt_release,
-       .ioctl   = kzt_ioctl,
-       .read    = kzt_read,
-       .write   = kzt_write,
-       .llseek  = kzt_seek,
-};
-
-static struct cdev kzt_cdev = {
-       .owner  =       THIS_MODULE,
-       .kobj   =       { .name = "kztctl", },
-};
-
-static int __init
-kzt_init(void)
-{
-       dev_t dev;
-       int rc;
-
-       spin_lock_init(&kzt_module_lock);
-       INIT_LIST_HEAD(&kzt_module_list);
-
-       KZT_SUBSYSTEM_INIT(kmem);
-       KZT_SUBSYSTEM_INIT(taskq);
-       KZT_SUBSYSTEM_INIT(krng);
-       KZT_SUBSYSTEM_INIT(mutex);
-       KZT_SUBSYSTEM_INIT(condvar);
-       KZT_SUBSYSTEM_INIT(thread);
-       KZT_SUBSYSTEM_INIT(rwlock);
-       KZT_SUBSYSTEM_INIT(time);
-
-       dev = MKDEV(KZT_MAJOR, 0);
-        if ((rc = register_chrdev_region(dev, KZT_MINORS, "kztctl")))
-               goto error;
-
-       /* Support for registering a character driver */
-       cdev_init(&kzt_cdev, &kzt_fops);
-       if ((rc = cdev_add(&kzt_cdev, dev, KZT_MINORS))) {
-               printk(KERN_ERR "kzt: Error adding cdev, %d\n", rc);
-               kobject_put(&kzt_cdev.kobj);
-               unregister_chrdev_region(dev, KZT_MINORS);
-               goto error;
-       }
-
-       /* Support for udev make driver info available in sysfs */
-#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,18)
-        kzt_class = class_simple_create(THIS_MODULE, "kzt");
-#else
-        kzt_class = class_create(THIS_MODULE, "kzt");
-#endif
-       if (IS_ERR(kzt_class)) {
-               rc = PTR_ERR(kzt_class);
-               printk(KERN_ERR "kzt: Error creating kzt class, %d\n", rc);
-               cdev_del(&kzt_cdev);
-               unregister_chrdev_region(dev, KZT_MINORS);
-               goto error;
-       }
-
-#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,18)
-       class_simple_device_add(kzt_class, MKDEV(KZT_MAJOR, 0),
-                               NULL, "kztctl");
-#else
-       class_device_create(kzt_class, NULL, MKDEV(KZT_MAJOR, 0),
-                           NULL, "kztctl");
-#endif
-
-       printk(KERN_INFO "kzt: Kernel ZFS Tests %s Loaded\n", KZT_VERSION);
-       return 0;
-error:
-       printk(KERN_ERR "kzt: Error registering kzt device, %d\n", rc);
-       return rc;
-}
-
-static void
-kzt_fini(void)
-{
-       dev_t dev = MKDEV(KZT_MAJOR, 0);
-
-#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,18)
-        class_simple_device_remove(dev);
-        class_simple_destroy(kzt_class);
-        devfs_remove("kzt/kztctl");
-        devfs_remove("kzt");
-#else
-        class_device_destroy(kzt_class, dev);
-        class_destroy(kzt_class);
-#endif
-        cdev_del(&kzt_cdev);
-        unregister_chrdev_region(dev, KZT_MINORS);
-
-       KZT_SUBSYSTEM_FINI(time);
-       KZT_SUBSYSTEM_FINI(rwlock);
-       KZT_SUBSYSTEM_FINI(thread);
-       KZT_SUBSYSTEM_FINI(condvar);
-       KZT_SUBSYSTEM_FINI(mutex);
-       KZT_SUBSYSTEM_FINI(krng);
-       KZT_SUBSYSTEM_FINI(taskq);
-       KZT_SUBSYSTEM_FINI(kmem);
-
-       ASSERT(list_empty(&kzt_module_list));
-       printk(KERN_INFO "kzt: Kernel ZFS Tests %s Unloaded\n", KZT_VERSION);
-}
-
-module_init(kzt_init);
-module_exit(kzt_fini);
-
-MODULE_AUTHOR("Lawrence Livermore National Labs");
-MODULE_DESCRIPTION("Kernel ZFS Test");
-MODULE_LICENSE("GPL");
-
diff --git a/splat/splat-kmem.c b/splat/splat-kmem.c
deleted file mode 100644 (file)
index d0af3fc..0000000
+++ /dev/null
@@ -1,364 +0,0 @@
-#include <splat-ctl.h>
-
-#define KZT_SUBSYSTEM_KMEM             0x0100
-#define KZT_KMEM_NAME                  "kmem"
-#define KZT_KMEM_DESC                  "Kernel Malloc/Slab Tests"
-
-#define KZT_KMEM_TEST1_ID              0x0101
-#define KZT_KMEM_TEST1_NAME            "kmem_alloc"
-#define KZT_KMEM_TEST1_DESC            "Memory allocation test (kmem_alloc)"
-
-#define KZT_KMEM_TEST2_ID              0x0102
-#define KZT_KMEM_TEST2_NAME            "kmem_zalloc"
-#define KZT_KMEM_TEST2_DESC            "Memory allocation test (kmem_zalloc)"
-
-#define KZT_KMEM_TEST3_ID              0x0103
-#define KZT_KMEM_TEST3_NAME            "slab_alloc"
-#define KZT_KMEM_TEST3_DESC            "Slab constructor/destructor test"
-
-#define KZT_KMEM_TEST4_ID              0x0104
-#define KZT_KMEM_TEST4_NAME            "slab_reap"
-#define KZT_KMEM_TEST4_DESC            "Slab reaping test"
-
-#define KZT_KMEM_ALLOC_COUNT           10
-/* XXX - This test may fail under tight memory conditions */
-static int
-kzt_kmem_test1(struct file *file, void *arg)
-{
-       void *ptr[KZT_KMEM_ALLOC_COUNT];
-       int size = PAGE_SIZE;
-       int i, count, rc = 0;
-
-       while ((!rc) && (size < (PAGE_SIZE * 16))) {
-               count = 0;
-
-               for (i = 0; i < KZT_KMEM_ALLOC_COUNT; i++) {
-                       ptr[i] = kmem_alloc(size, KM_SLEEP);
-                       if (ptr[i])
-                               count++;
-               }
-
-               for (i = 0; i < KZT_KMEM_ALLOC_COUNT; i++)
-                       if (ptr[i])
-                               kmem_free(ptr[i], size);
-
-               kzt_vprint(file, KZT_KMEM_TEST1_NAME,
-                          "%d byte allocations, %d/%d successful\n",
-                          size, count, KZT_KMEM_ALLOC_COUNT);
-               if (count != KZT_KMEM_ALLOC_COUNT)
-                       rc = -ENOMEM;
-
-               size *= 2;
-       }
-
-       return rc;
-}
-
-static int
-kzt_kmem_test2(struct file *file, void *arg)
-{
-       void *ptr[KZT_KMEM_ALLOC_COUNT];
-       int size = PAGE_SIZE;
-       int i, j, count, rc = 0;
-
-       while ((!rc) && (size < (PAGE_SIZE * 16))) {
-               count = 0;
-
-               for (i = 0; i < KZT_KMEM_ALLOC_COUNT; i++) {
-                       ptr[i] = kmem_zalloc(size, KM_SLEEP);
-                       if (ptr[i])
-                               count++;
-               }
-
-               /* Ensure buffer has been zero filled */
-               for (i = 0; i < KZT_KMEM_ALLOC_COUNT; i++) {
-                       for (j = 0; j < size; j++) {
-                               if (((char *)ptr[i])[j] != '\0') {
-                                       kzt_vprint(file, KZT_KMEM_TEST2_NAME,
-                                                 "%d-byte allocation was "
-                                                 "not zeroed\n", size);
-                                       rc = -EFAULT;
-                               }
-                       }
-               }
-
-               for (i = 0; i < KZT_KMEM_ALLOC_COUNT; i++)
-                       if (ptr[i])
-                               kmem_free(ptr[i], size);
-
-               kzt_vprint(file, KZT_KMEM_TEST2_NAME,
-                          "%d byte allocations, %d/%d successful\n",
-                          size, count, KZT_KMEM_ALLOC_COUNT);
-               if (count != KZT_KMEM_ALLOC_COUNT)
-                       rc = -ENOMEM;
-
-               size *= 2;
-       }
-
-       return rc;
-}
-
-#define KZT_KMEM_TEST_MAGIC            0x004488CCUL
-#define KZT_KMEM_CACHE_NAME            "kmem_test"
-#define KZT_KMEM_CACHE_SIZE            256
-#define KZT_KMEM_OBJ_COUNT             128
-#define KZT_KMEM_OBJ_RECLAIM           64
-
-typedef struct kmem_cache_data {
-       char kcd_buf[KZT_KMEM_CACHE_SIZE];
-       unsigned long kcd_magic;
-       int kcd_flag;
-} kmem_cache_data_t;
-
-typedef struct kmem_cache_priv {
-       unsigned long kcp_magic;
-       struct file *kcp_file;
-       kmem_cache_t *kcp_cache;
-       kmem_cache_data_t *kcp_kcd[KZT_KMEM_OBJ_COUNT];
-       int kcp_count;
-       int kcp_rc;
-} kmem_cache_priv_t;
-
-static int
-kzt_kmem_test34_constructor(void *ptr, void *priv, int flags)
-{
-       kmem_cache_data_t *kcd = (kmem_cache_data_t *)ptr;
-       kmem_cache_priv_t *kcp = (kmem_cache_priv_t *)priv;
-
-       if (kcd) {
-               memset(kcd->kcd_buf, 0xaa, KZT_KMEM_CACHE_SIZE);
-               kcd->kcd_flag = 1;
-
-               if (kcp) {
-                       kcd->kcd_magic = kcp->kcp_magic;
-                       kcp->kcp_count++;
-               }
-       }
-
-       return 0;
-}
-
-static void
-kzt_kmem_test34_destructor(void *ptr, void *priv)
-{
-       kmem_cache_data_t *kcd = (kmem_cache_data_t *)ptr;
-       kmem_cache_priv_t *kcp = (kmem_cache_priv_t *)priv;
-
-       if (kcd) {
-               memset(kcd->kcd_buf, 0xbb, KZT_KMEM_CACHE_SIZE);
-               kcd->kcd_flag = 0;
-
-               if (kcp)
-                       kcp->kcp_count--;
-       }
-
-       return;
-}
-
-static int
-kzt_kmem_test3(struct file *file, void *arg)
-{
-       kmem_cache_t *cache = NULL;
-       kmem_cache_data_t *kcd = NULL;
-       kmem_cache_priv_t kcp;
-       int rc = 0, max;
-
-       kcp.kcp_magic = KZT_KMEM_TEST_MAGIC;
-       kcp.kcp_file = file;
-       kcp.kcp_count = 0;
-       kcp.kcp_rc = 0;
-
-       cache = kmem_cache_create(KZT_KMEM_CACHE_NAME, sizeof(*kcd), 0,
-                                 kzt_kmem_test34_constructor,
-                                 kzt_kmem_test34_destructor,
-                                 NULL, &kcp, NULL, 0);
-       if (!cache) {
-               kzt_vprint(file, KZT_KMEM_TEST3_NAME,
-                          "Unable to create '%s'\n", KZT_KMEM_CACHE_NAME);
-               return -ENOMEM;
-       }
-
-       kcd = kmem_cache_alloc(cache, 0);
-       if (!kcd) {
-               kzt_vprint(file, KZT_KMEM_TEST3_NAME,
-                          "Unable to allocate from '%s'\n",
-                          KZT_KMEM_CACHE_NAME);
-               rc = -EINVAL;
-               goto out_free;
-       }
-
-       if (!kcd->kcd_flag) {
-               kzt_vprint(file, KZT_KMEM_TEST3_NAME,
-                          "Failed to run contructor for '%s'\n",
-                          KZT_KMEM_CACHE_NAME);
-               rc = -EINVAL;
-               goto out_free;
-       }
-
-       if (kcd->kcd_magic != kcp.kcp_magic) {
-               kzt_vprint(file, KZT_KMEM_TEST3_NAME,
-                          "Failed to pass private data to constructor "
-                          "for '%s'\n", KZT_KMEM_CACHE_NAME);
-               rc = -EINVAL;
-               goto out_free;
-       }
-
-       max = kcp.kcp_count;
-
-       /* Destructor's run lazily so it hard to check correctness here.
-        * We assume if it doesn't crash the free worked properly */
-       kmem_cache_free(cache, kcd);
-
-       /* Destroy the entire cache which will force destructors to
-        * run and we can verify one was called for every object */
-       kmem_cache_destroy(cache);
-       if (kcp.kcp_count) {
-               kzt_vprint(file, KZT_KMEM_TEST3_NAME,
-                          "Failed to run destructor on all slab objects "
-                          "for '%s'\n", KZT_KMEM_CACHE_NAME);
-               rc = -EINVAL;
-       }
-
-       kzt_vprint(file, KZT_KMEM_TEST3_NAME,
-                  "%d allocated/destroyed objects for '%s'\n",
-                  max, KZT_KMEM_CACHE_NAME);
-
-       return rc;
-
-out_free:
-       if (kcd)
-               kmem_cache_free(cache, kcd);
-
-       kmem_cache_destroy(cache);
-       return rc;
-}
-
-static void
-kzt_kmem_test4_reclaim(void *priv)
-{
-       kmem_cache_priv_t *kcp = (kmem_cache_priv_t *)priv;
-       int i;
-
-       kzt_vprint(kcp->kcp_file, KZT_KMEM_TEST4_NAME,
-                   "Reaping %d objects from '%s'\n",
-                  KZT_KMEM_OBJ_RECLAIM, KZT_KMEM_CACHE_NAME);
-       for (i = 0; i < KZT_KMEM_OBJ_RECLAIM; i++) {
-               if (kcp->kcp_kcd[i]) {
-                       kmem_cache_free(kcp->kcp_cache, kcp->kcp_kcd[i]);
-                       kcp->kcp_kcd[i] = NULL;
-               }
-       }
-
-       return;
-}
-
-static int
-kzt_kmem_test4(struct file *file, void *arg)
-{
-       kmem_cache_t *cache;
-       kmem_cache_priv_t kcp;
-       int i, rc = 0, max, reclaim_percent, target_percent;
-
-       kcp.kcp_magic = KZT_KMEM_TEST_MAGIC;
-       kcp.kcp_file = file;
-       kcp.kcp_count = 0;
-       kcp.kcp_rc = 0;
-
-       cache = kmem_cache_create(KZT_KMEM_CACHE_NAME,
-                                 sizeof(kmem_cache_data_t), 0,
-                                 kzt_kmem_test34_constructor,
-                                 kzt_kmem_test34_destructor,
-                                 kzt_kmem_test4_reclaim, &kcp, NULL, 0);
-       if (!cache) {
-               kzt_vprint(file, KZT_KMEM_TEST4_NAME,
-                          "Unable to create '%s'\n", KZT_KMEM_CACHE_NAME);
-               return -ENOMEM;
-       }
-
-       kcp.kcp_cache = cache;
-
-       for (i = 0; i < KZT_KMEM_OBJ_COUNT; i++) {
-               /* All allocations need not succeed */
-               kcp.kcp_kcd[i] = kmem_cache_alloc(cache, 0);
-               if (!kcp.kcp_kcd[i]) {
-                       kzt_vprint(file, KZT_KMEM_TEST4_NAME,
-                                  "Unable to allocate from '%s'\n",
-                                  KZT_KMEM_CACHE_NAME);
-               }
-       }
-
-       max = kcp.kcp_count;
-
-       /* Force shrinker to run */
-       kmem_reap();
-
-       /* Reclaim reclaimed objects, this ensure the destructors are run */
-       kmem_cache_reap_now(cache);
-
-       reclaim_percent = ((kcp.kcp_count * 100) / max);
-       target_percent = (((KZT_KMEM_OBJ_COUNT - KZT_KMEM_OBJ_RECLAIM) * 100) /
-                           KZT_KMEM_OBJ_COUNT);
-       kzt_vprint(file, KZT_KMEM_TEST4_NAME,
-                   "%d%% (%d/%d) of previous size, target of "
-                  "%d%%-%d%% for '%s'\n", reclaim_percent, kcp.kcp_count,
-                  max, target_percent - 10, target_percent + 10,
-                  KZT_KMEM_CACHE_NAME);
-       if ((reclaim_percent < target_percent - 10) ||
-           (reclaim_percent > target_percent + 10))
-               rc = -EINVAL;
-
-       /* Cleanup our mess */
-       for (i = 0; i < KZT_KMEM_OBJ_COUNT; i++)
-               if (kcp.kcp_kcd[i])
-                       kmem_cache_free(cache, kcp.kcp_kcd[i]);
-
-       kmem_cache_destroy(cache);
-
-       return rc;
-}
-
-kzt_subsystem_t *
-kzt_kmem_init(void)
-{
-        kzt_subsystem_t *sub;
-
-        sub = kmalloc(sizeof(*sub), GFP_KERNEL);
-        if (sub == NULL)
-                return NULL;
-
-        memset(sub, 0, sizeof(*sub));
-        strncpy(sub->desc.name, KZT_KMEM_NAME, KZT_NAME_SIZE);
-       strncpy(sub->desc.desc, KZT_KMEM_DESC, KZT_DESC_SIZE);
-        INIT_LIST_HEAD(&sub->subsystem_list);
-       INIT_LIST_HEAD(&sub->test_list);
-        spin_lock_init(&sub->test_lock);
-        sub->desc.id = KZT_SUBSYSTEM_KMEM;
-
-        KZT_TEST_INIT(sub, KZT_KMEM_TEST1_NAME, KZT_KMEM_TEST1_DESC,
-                     KZT_KMEM_TEST1_ID, kzt_kmem_test1);
-        KZT_TEST_INIT(sub, KZT_KMEM_TEST2_NAME, KZT_KMEM_TEST2_DESC,
-                     KZT_KMEM_TEST2_ID, kzt_kmem_test2);
-        KZT_TEST_INIT(sub, KZT_KMEM_TEST3_NAME, KZT_KMEM_TEST3_DESC,
-                     KZT_KMEM_TEST3_ID, kzt_kmem_test3);
-        KZT_TEST_INIT(sub, KZT_KMEM_TEST4_NAME, KZT_KMEM_TEST4_DESC,
-                     KZT_KMEM_TEST4_ID, kzt_kmem_test4);
-
-        return sub;
-}
-
-void
-kzt_kmem_fini(kzt_subsystem_t *sub)
-{
-        ASSERT(sub);
-        KZT_TEST_FINI(sub, KZT_KMEM_TEST4_ID);
-        KZT_TEST_FINI(sub, KZT_KMEM_TEST3_ID);
-        KZT_TEST_FINI(sub, KZT_KMEM_TEST2_ID);
-        KZT_TEST_FINI(sub, KZT_KMEM_TEST1_ID);
-
-        kfree(sub);
-}
-
-int
-kzt_kmem_id(void) {
-        return KZT_SUBSYSTEM_KMEM;
-}
diff --git a/splat/splat-mutex.c b/splat/splat-mutex.c
deleted file mode 100644 (file)
index 47a3630..0000000
+++ /dev/null
@@ -1,323 +0,0 @@
-#include <splat-ctl.h>
-
-#define KZT_SUBSYSTEM_MUTEX            0x0400
-#define KZT_MUTEX_NAME                 "mutex"
-#define KZT_MUTEX_DESC                 "Kernel Mutex Tests"
-
-#define KZT_MUTEX_TEST1_ID             0x0401
-#define KZT_MUTEX_TEST1_NAME           "tryenter"
-#define KZT_MUTEX_TEST1_DESC           "Validate mutex_tryenter() correctness"
-
-#define KZT_MUTEX_TEST2_ID             0x0402
-#define KZT_MUTEX_TEST2_NAME           "race"
-#define KZT_MUTEX_TEST2_DESC           "Many threads entering/exiting the mutex"
-
-#define KZT_MUTEX_TEST3_ID             0x0403
-#define KZT_MUTEX_TEST3_NAME           "owned"
-#define KZT_MUTEX_TEST3_DESC           "Validate mutex_owned() correctness"
-
-#define KZT_MUTEX_TEST4_ID             0x0404
-#define KZT_MUTEX_TEST4_NAME           "owner"
-#define KZT_MUTEX_TEST4_DESC           "Validate mutex_owner() correctness"
-
-#define KZT_MUTEX_TEST_MAGIC           0x115599DDUL
-#define KZT_MUTEX_TEST_NAME            "mutex_test"
-#define KZT_MUTEX_TEST_WORKQ           "mutex_wq"
-#define KZT_MUTEX_TEST_COUNT           128
-
-typedef struct mutex_priv {
-        unsigned long mp_magic;
-        struct file *mp_file;
-       struct work_struct mp_work[KZT_MUTEX_TEST_COUNT];
-       kmutex_t mp_mtx;
-       int mp_rc;
-} mutex_priv_t;
-
-
-static void
-kzt_mutex_test1_work(void *priv)
-{
-       mutex_priv_t *mp = (mutex_priv_t *)priv;
-
-       ASSERT(mp->mp_magic == KZT_MUTEX_TEST_MAGIC);
-       mp->mp_rc = 0;
-
-       if (!mutex_tryenter(&mp->mp_mtx))
-               mp->mp_rc = -EBUSY;
-}
-
-static int
-kzt_mutex_test1(struct file *file, void *arg)
-{
-       struct workqueue_struct *wq;
-       struct work_struct work;
-       mutex_priv_t *mp;
-       int rc = 0;
-
-       mp = (mutex_priv_t *)kmalloc(sizeof(*mp), GFP_KERNEL);
-       if (mp == NULL)
-               return -ENOMEM;
-
-       wq = create_singlethread_workqueue(KZT_MUTEX_TEST_WORKQ);
-       if (wq == NULL) {
-               rc = -ENOMEM;
-               goto out2;
-       }
-
-       mutex_init(&(mp->mp_mtx), KZT_MUTEX_TEST_NAME, MUTEX_DEFAULT, NULL);
-       mutex_enter(&(mp->mp_mtx));
-
-       mp->mp_magic = KZT_MUTEX_TEST_MAGIC;
-       mp->mp_file = file;
-       INIT_WORK(&work, kzt_mutex_test1_work, mp);
-
-       /* Schedule a work item which will try and aquire the mutex via
-         * mutex_tryenter() while its held.  This should fail and the work
-        * item will indicte this status in the passed private data. */
-       if (!queue_work(wq, &work)) {
-               mutex_exit(&(mp->mp_mtx));
-               rc = -EINVAL;
-               goto out;
-       }
-
-       flush_workqueue(wq);
-       mutex_exit(&(mp->mp_mtx));
-
-       /* Work item successfully aquired mutex, very bad! */
-       if (mp->mp_rc != -EBUSY) {
-               rc = -EINVAL;
-               goto out;
-       }
-
-        kzt_vprint(file, KZT_MUTEX_TEST1_NAME, "%s",
-                   "mutex_trylock() correctly failed when mutex held\n");
-
-       /* Schedule a work item which will try and aquire the mutex via
-        * mutex_tryenter() while it is not  held.  This should work and
-        * the item will indicte this status in the passed private data. */
-       if (!queue_work(wq, &work)) {
-               rc = -EINVAL;
-               goto out;
-       }
-
-       flush_workqueue(wq);
-
-       /* Work item failed to aquire mutex, very bad! */
-       if (mp->mp_rc != 0) {
-               rc = -EINVAL;
-               goto out;
-       }
-
-        kzt_vprint(file, KZT_MUTEX_TEST1_NAME, "%s",
-                   "mutex_trylock() correctly succeeded when mutex unheld\n");
-out:
-       mutex_destroy(&(mp->mp_mtx));
-       destroy_workqueue(wq);
-out2:
-       kfree(mp);
-
-       return rc;
-}
-
-static void
-kzt_mutex_test2_work(void *priv)
-{
-       mutex_priv_t *mp = (mutex_priv_t *)priv;
-       int rc;
-
-       ASSERT(mp->mp_magic == KZT_MUTEX_TEST_MAGIC);
-
-       /* Read the value before sleeping and write it after we wake up to
-        * maximize the chance of a race if mutexs are not working properly */
-       mutex_enter(&mp->mp_mtx);
-       rc = mp->mp_rc;
-       set_current_state(TASK_INTERRUPTIBLE);
-       schedule_timeout(HZ / 100);  /* 1/100 of a second */
-       mp->mp_rc = rc + 1;
-       mutex_exit(&mp->mp_mtx);
-}
-
-static int
-kzt_mutex_test2(struct file *file, void *arg)
-{
-       struct workqueue_struct *wq;
-       mutex_priv_t *mp;
-       int i, rc = 0;
-
-       mp = (mutex_priv_t *)kmalloc(sizeof(*mp), GFP_KERNEL);
-       if (mp == NULL)
-               return -ENOMEM;
-
-       /* Create a thread per CPU items on queue will race */
-       wq = create_workqueue(KZT_MUTEX_TEST_WORKQ);
-       if (wq == NULL) {
-               rc = -ENOMEM;
-               goto out;
-       }
-
-       mutex_init(&(mp->mp_mtx), KZT_MUTEX_TEST_NAME, MUTEX_DEFAULT, NULL);
-
-       mp->mp_magic = KZT_MUTEX_TEST_MAGIC;
-       mp->mp_file = file;
-       mp->mp_rc = 0;
-
-       /* Schedule N work items to the work queue each of which enters the
-        * mutex, sleeps briefly, then exits the mutex.  On a multiprocessor
-        * box these work items will be handled by all available CPUs.  The
-        * mutex is instrumented such that if any two processors are in the
-        * critical region at the same time the system will panic.  If the
-        * mutex is implemented right this will never happy, that's a pass. */
-       for (i = 0; i < KZT_MUTEX_TEST_COUNT; i++) {
-               INIT_WORK(&(mp->mp_work[i]), kzt_mutex_test2_work, mp);
-
-               if (!queue_work(wq, &(mp->mp_work[i]))) {
-                       kzt_vprint(file, KZT_MUTEX_TEST2_NAME,
-                                  "Failed to queue work id %d\n", i);
-                       rc = -EINVAL;
-               }
-       }
-
-       flush_workqueue(wq);
-
-       if (mp->mp_rc == KZT_MUTEX_TEST_COUNT) {
-               kzt_vprint(file, KZT_MUTEX_TEST2_NAME, "%d racing threads "
-                          "correctly entered/exited the mutex %d times\n",
-                          num_online_cpus(), mp->mp_rc);
-       } else {
-               kzt_vprint(file, KZT_MUTEX_TEST2_NAME, "%d racing threads "
-                          "only processed %d/%d mutex work items\n",
-                          num_online_cpus(), mp->mp_rc, KZT_MUTEX_TEST_COUNT);
-               rc = -EINVAL;
-       }
-
-       mutex_destroy(&(mp->mp_mtx));
-       destroy_workqueue(wq);
-out:
-       kfree(mp);
-
-       return rc;
-}
-
-static int
-kzt_mutex_test3(struct file *file, void *arg)
-{
-        kmutex_t mtx;
-       int rc = 0;
-
-       mutex_init(&mtx, KZT_MUTEX_TEST_NAME, MUTEX_DEFAULT, NULL);
-
-       mutex_enter(&mtx);
-
-       /* Mutex should be owned by current */
-       if (!mutex_owned(&mtx)) {
-               kzt_vprint(file, KZT_MUTEX_TEST3_NAME, "Mutex should "
-                          "be owned by pid %d but is owned by pid %d\n",
-                          current->pid, mtx.km_owner ?  mtx.km_owner->pid : -1);
-               rc = -EINVAL;
-               goto out;
-       }
-
-       mutex_exit(&mtx);
-
-       /* Mutex should not be owned by any task */
-       if (mutex_owned(&mtx)) {
-               kzt_vprint(file, KZT_MUTEX_TEST3_NAME, "Mutex should "
-                          "not be owned but is owned by pid %d\n",
-                          mtx.km_owner ?  mtx.km_owner->pid : -1);
-               rc = -EINVAL;
-               goto out;
-       }
-
-        kzt_vprint(file, KZT_MUTEX_TEST3_NAME, "%s",
-                  "Correct mutex_owned() behavior\n");
-out:
-       mutex_destroy(&mtx);
-
-       return rc;
-}
-
-static int
-kzt_mutex_test4(struct file *file, void *arg)
-{
-        kmutex_t mtx;
-       kthread_t *owner;
-       int rc = 0;
-
-       mutex_init(&mtx, KZT_MUTEX_TEST_NAME, MUTEX_DEFAULT, NULL);
-
-       mutex_enter(&mtx);
-
-       /* Mutex should be owned by current */
-       owner = mutex_owner(&mtx);
-       if (current != owner) {
-               kzt_vprint(file, KZT_MUTEX_TEST3_NAME, "Mutex should "
-                          "be owned by pid %d but is owned by pid %d\n",
-                          current->pid, owner ? owner->pid : -1);
-               rc = -EINVAL;
-               goto out;
-       }
-
-       mutex_exit(&mtx);
-
-       /* Mutex should not be owned by any task */
-       owner = mutex_owner(&mtx);
-       if (owner) {
-               kzt_vprint(file, KZT_MUTEX_TEST3_NAME, "Mutex should not "
-                          "be owned but is owned by pid %d\n", owner->pid);
-               rc = -EINVAL;
-               goto out;
-       }
-
-        kzt_vprint(file, KZT_MUTEX_TEST3_NAME, "%s",
-                  "Correct mutex_owner() behavior\n");
-out:
-       mutex_destroy(&mtx);
-
-       return rc;
-}
-
-kzt_subsystem_t *
-kzt_mutex_init(void)
-{
-        kzt_subsystem_t *sub;
-
-        sub = kmalloc(sizeof(*sub), GFP_KERNEL);
-        if (sub == NULL)
-                return NULL;
-
-        memset(sub, 0, sizeof(*sub));
-        strncpy(sub->desc.name, KZT_MUTEX_NAME, KZT_NAME_SIZE);
-        strncpy(sub->desc.desc, KZT_MUTEX_DESC, KZT_DESC_SIZE);
-        INIT_LIST_HEAD(&sub->subsystem_list);
-        INIT_LIST_HEAD(&sub->test_list);
-        spin_lock_init(&sub->test_lock);
-        sub->desc.id = KZT_SUBSYSTEM_MUTEX;
-
-        KZT_TEST_INIT(sub, KZT_MUTEX_TEST1_NAME, KZT_MUTEX_TEST1_DESC,
-                      KZT_MUTEX_TEST1_ID, kzt_mutex_test1);
-        KZT_TEST_INIT(sub, KZT_MUTEX_TEST2_NAME, KZT_MUTEX_TEST2_DESC,
-                      KZT_MUTEX_TEST2_ID, kzt_mutex_test2);
-        KZT_TEST_INIT(sub, KZT_MUTEX_TEST3_NAME, KZT_MUTEX_TEST3_DESC,
-                      KZT_MUTEX_TEST3_ID, kzt_mutex_test3);
-        KZT_TEST_INIT(sub, KZT_MUTEX_TEST4_NAME, KZT_MUTEX_TEST4_DESC,
-                      KZT_MUTEX_TEST4_ID, kzt_mutex_test4);
-
-        return sub;
-}
-
-void
-kzt_mutex_fini(kzt_subsystem_t *sub)
-{
-        ASSERT(sub);
-        KZT_TEST_FINI(sub, KZT_MUTEX_TEST4_ID);
-        KZT_TEST_FINI(sub, KZT_MUTEX_TEST3_ID);
-        KZT_TEST_FINI(sub, KZT_MUTEX_TEST2_ID);
-        KZT_TEST_FINI(sub, KZT_MUTEX_TEST1_ID);
-
-        kfree(sub);
-}
-
-int
-kzt_mutex_id(void) {
-        return KZT_SUBSYSTEM_MUTEX;
-}
diff --git a/splat/splat-random.c b/splat/splat-random.c
deleted file mode 100644 (file)
index 412c1d6..0000000
+++ /dev/null
@@ -1,103 +0,0 @@
-#include <splat-ctl.h>
-
-#define KZT_SUBSYSTEM_KRNG             0x0300
-#define KZT_KRNG_NAME                  "krng"
-#define KZT_KRNG_DESC                  "Kernel Random Number Generator Tests"
-
-#define KZT_KRNG_TEST1_ID              0x0301
-#define KZT_KRNG_TEST1_NAME            "freq"
-#define KZT_KRNG_TEST1_DESC            "Frequency Test"
-
-#define KRNG_NUM_BITS                  1048576
-#define KRNG_NUM_BYTES                 (KRNG_NUM_BITS >> 3)
-#define KRNG_NUM_BITS_DIV2             (KRNG_NUM_BITS >> 1)
-#define KRNG_ERROR_RANGE               2097
-
-/* Random Number Generator Tests
-   There can be meny more tests on quality of the
-   random number generator.  For now we are only
-   testing the frequency of particular bits.
-   We could also test consecutive sequences,
-   randomness within a particular block, etc.
-   but is probably not necessary for our purposes */
-
-static int
-kzt_krng_test1(struct file *file, void *arg)
-{
-       uint8_t *buf;
-       int i, j, diff, num = 0, rc = 0;
-
-       buf = kmalloc(sizeof(*buf) * KRNG_NUM_BYTES, GFP_KERNEL);
-       if (buf == NULL) {
-               rc = -ENOMEM;
-               goto out;
-       }
-
-       memset(buf, 0, sizeof(*buf) * KRNG_NUM_BYTES);
-
-       /* Always succeeds */
-       random_get_pseudo_bytes(buf, sizeof(uint8_t) * KRNG_NUM_BYTES);
-
-       for (i = 0; i < KRNG_NUM_BYTES; i++) {
-               uint8_t tmp = buf[i];
-               for (j = 0; j < 8; j++) {
-                       uint8_t tmp2 = ((tmp >> j) & 0x01);
-                       if (tmp2 == 1) {
-                               num++;
-                       }
-               }
-       }
-
-       kfree(buf);
-
-       diff = KRNG_NUM_BITS_DIV2 - num;
-       if (diff < 0)
-               diff *= -1;
-
-       kzt_print(file, "Test 1 Number of ones: %d\n", num);
-       kzt_print(file, "Test 1 Difference from expected: %d Allowed: %d\n",
-                  diff, KRNG_ERROR_RANGE);
-
-       if (diff > KRNG_ERROR_RANGE)
-               rc = -ERANGE;
-out:
-       return rc;
-}
-
-kzt_subsystem_t *
-kzt_krng_init(void)
-{
-        kzt_subsystem_t *sub;
-
-        sub = kmalloc(sizeof(*sub), GFP_KERNEL);
-        if (sub == NULL)
-                return NULL;
-
-        memset(sub, 0, sizeof(*sub));
-        strncpy(sub->desc.name, KZT_KRNG_NAME, KZT_NAME_SIZE);
-       strncpy(sub->desc.desc, KZT_KRNG_DESC, KZT_DESC_SIZE);
-        INIT_LIST_HEAD(&sub->subsystem_list);
-       INIT_LIST_HEAD(&sub->test_list);
-        spin_lock_init(&sub->test_lock);
-        sub->desc.id = KZT_SUBSYSTEM_KRNG;
-
-        KZT_TEST_INIT(sub, KZT_KRNG_TEST1_NAME, KZT_KRNG_TEST1_DESC,
-                     KZT_KRNG_TEST1_ID, kzt_krng_test1);
-
-        return sub;
-}
-
-void
-kzt_krng_fini(kzt_subsystem_t *sub)
-{
-        ASSERT(sub);
-
-        KZT_TEST_FINI(sub, KZT_KRNG_TEST1_ID);
-
-        kfree(sub);
-}
-
-int
-kzt_krng_id(void) {
-        return KZT_SUBSYSTEM_KRNG;
-}
diff --git a/splat/splat-rwlock.c b/splat/splat-rwlock.c
deleted file mode 100644 (file)
index df4585e..0000000
+++ /dev/null
@@ -1,763 +0,0 @@
-#include <splat-ctl.h>
-
-#define KZT_SUBSYSTEM_RWLOCK           0x0700
-#define KZT_RWLOCK_NAME                        "rwlock"
-#define KZT_RWLOCK_DESC                        "Kernel RW Lock Tests"
-
-#define KZT_RWLOCK_TEST1_ID            0x0701
-#define KZT_RWLOCK_TEST1_NAME          "rwtest1"
-#define KZT_RWLOCK_TEST1_DESC          "Multiple Readers One Writer"
-
-#define KZT_RWLOCK_TEST2_ID            0x0702
-#define KZT_RWLOCK_TEST2_NAME          "rwtest2"
-#define KZT_RWLOCK_TEST2_DESC          "Multiple Writers"
-
-#define KZT_RWLOCK_TEST3_ID            0x0703
-#define KZT_RWLOCK_TEST3_NAME          "rwtest3"
-#define KZT_RWLOCK_TEST3_DESC          "Owner Verification"
-
-#define KZT_RWLOCK_TEST4_ID            0x0704
-#define KZT_RWLOCK_TEST4_NAME          "rwtest4"
-#define KZT_RWLOCK_TEST4_DESC          "Trylock Test"
-
-#define KZT_RWLOCK_TEST5_ID            0x0705
-#define KZT_RWLOCK_TEST5_NAME          "rwtest5"
-#define KZT_RWLOCK_TEST5_DESC          "Write Downgrade Test"
-
-#define KZT_RWLOCK_TEST6_ID            0x0706
-#define KZT_RWLOCK_TEST6_NAME          "rwtest6"
-#define KZT_RWLOCK_TEST6_DESC          "Read Upgrade Test"
-
-#define KZT_RWLOCK_TEST_MAGIC          0x115599DDUL
-#define KZT_RWLOCK_TEST_NAME           "rwlock_test"
-#define KZT_RWLOCK_TEST_COUNT          8
-
-#define KZT_RWLOCK_RELEASE_INIT                0
-#define KZT_RWLOCK_RELEASE_WRITERS     1
-#define KZT_RWLOCK_RELEASE_READERS     2
-
-typedef struct rw_priv {
-        unsigned long rw_magic;
-        struct file *rw_file;
-       krwlock_t rwl;
-       spinlock_t rw_priv_lock;
-       wait_queue_head_t rw_waitq;
-       atomic_t rw_completed;
-       atomic_t rw_acquired;
-       atomic_t rw_waiters;
-       atomic_t rw_release;
-} rw_priv_t;
-
-typedef struct rw_thr {
-       int rwt_id;
-       const char *rwt_name;
-       rw_priv_t *rwt_rwp;
-       int rwt_rc;
-} rw_thr_t;
-
-static inline void
-kzt_rwlock_sleep(signed long delay)
-{
-       set_current_state(TASK_INTERRUPTIBLE);
-       schedule_timeout(delay);
-}
-
-#define kzt_rwlock_lock_and_test(lock,test)    \
-({                                             \
-       int ret = 0;                            \
-                                               \
-       spin_lock(lock);                        \
-       ret = (test) ? 1 : 0;                   \
-       spin_unlock(lock);                      \
-       ret;                                    \
-})
-
-void kzt_init_rw_priv(rw_priv_t *rwv, struct file *file)
-{
-       rwv->rw_magic = KZT_RWLOCK_TEST_MAGIC;
-       rwv->rw_file = file;
-       spin_lock_init(&rwv->rw_priv_lock);
-       init_waitqueue_head(&rwv->rw_waitq);
-       atomic_set(&rwv->rw_completed, 0);
-       atomic_set(&rwv->rw_acquired, 0);
-       atomic_set(&rwv->rw_waiters, 0);
-       atomic_set(&rwv->rw_release, KZT_RWLOCK_RELEASE_INIT);
-       
-       /* Initialize the read/write lock */
-       rw_init(&rwv->rwl, KZT_RWLOCK_TEST_NAME, RW_DEFAULT, NULL);
-}
-
-int
-kzt_rwlock_test1_writer_thread(void *arg)
-{
-       rw_thr_t *rwt = (rw_thr_t *)arg;
-       rw_priv_t *rwv = rwt->rwt_rwp;
-       uint8_t rnd = 0;
-       char name[16];
-
-       ASSERT(rwv->rw_magic == KZT_RWLOCK_TEST_MAGIC);
-        snprintf(name, sizeof(name), "%s%d", 
-                KZT_RWLOCK_TEST_NAME, rwt->rwt_id);
-       daemonize(name);
-       get_random_bytes((void *)&rnd, 1);
-       kzt_rwlock_sleep(rnd * HZ / 1000);
-
-       spin_lock(&rwv->rw_priv_lock);
-       kzt_vprint(rwv->rw_file, rwt->rwt_name,
-                  "%s writer thread trying to acquire rwlock with "
-                  "%d holding lock and %d waiting\n",
-                  name, atomic_read(&rwv->rw_acquired),
-                  atomic_read(&rwv->rw_waiters));
-       atomic_inc(&rwv->rw_waiters);
-       spin_unlock(&rwv->rw_priv_lock);
-
-       /* Take the semaphore for writing 
-        * release it when we are told to */
-       rw_enter(&rwv->rwl, RW_WRITER);
-
-       spin_lock(&rwv->rw_priv_lock);
-       atomic_dec(&rwv->rw_waiters);
-       atomic_inc(&rwv->rw_acquired);
-       kzt_vprint(rwv->rw_file, rwt->rwt_name,
-                  "%s writer thread acquired rwlock with "
-                  "%d holding lock and %d waiting\n",
-                  name, atomic_read(&rwv->rw_acquired),
-                  atomic_read(&rwv->rw_waiters));
-       spin_unlock(&rwv->rw_priv_lock);
-
-       /* Wait here until the control thread
-        * says we can release the write lock */
-       wait_event_interruptible(rwv->rw_waitq,
-                                kzt_rwlock_lock_and_test(&rwv->rw_priv_lock,
-                                        atomic_read(&rwv->rw_release) ==
-                                        KZT_RWLOCK_RELEASE_WRITERS));
-       spin_lock(&rwv->rw_priv_lock);
-       atomic_inc(&rwv->rw_completed);
-       atomic_dec(&rwv->rw_acquired);
-       kzt_vprint(rwv->rw_file, rwt->rwt_name,
-                  "%s writer thread dropped rwlock with "
-                  "%d holding lock and %d waiting\n",
-                  name, atomic_read(&rwv->rw_acquired),
-                  atomic_read(&rwv->rw_waiters));
-       spin_unlock(&rwv->rw_priv_lock);
-
-       /* Release the semaphore */
-       rw_exit(&rwv->rwl);
-       return 0;
-}
-
-int
-kzt_rwlock_test1_reader_thread(void *arg)
-{
-       rw_thr_t *rwt = (rw_thr_t *)arg;
-       rw_priv_t *rwv = rwt->rwt_rwp;
-       uint8_t rnd = 0;
-       char name[16];
-
-       ASSERT(rwv->rw_magic == KZT_RWLOCK_TEST_MAGIC);
-        snprintf(name, sizeof(name), "%s%d",
-                KZT_RWLOCK_TEST_NAME, rwt->rwt_id);
-       daemonize(name);
-       get_random_bytes((void *)&rnd, 1);
-        kzt_rwlock_sleep(rnd * HZ / 1000);
-
-       /* Don't try and and take the semaphore until
-        * someone else has already acquired it */
-        wait_event_interruptible(rwv->rw_waitq,
-                                kzt_rwlock_lock_and_test(&rwv->rw_priv_lock,
-                                        atomic_read(&rwv->rw_acquired) > 0));
-
-       spin_lock(&rwv->rw_priv_lock);
-       kzt_vprint(rwv->rw_file, rwt->rwt_name,
-                  "%s reader thread trying to acquire rwlock with "
-                  "%d holding lock and %d waiting\n",
-                  name, atomic_read(&rwv->rw_acquired),
-                  atomic_read(&rwv->rw_waiters));
-       atomic_inc(&rwv->rw_waiters);
-       spin_unlock(&rwv->rw_priv_lock);
-
-       /* Take the semaphore for reading
-        * release it when we are told to */
-       rw_enter(&rwv->rwl, RW_READER);
-
-       spin_lock(&rwv->rw_priv_lock);
-       atomic_dec(&rwv->rw_waiters);
-       atomic_inc(&rwv->rw_acquired);
-       kzt_vprint(rwv->rw_file, rwt->rwt_name,
-                  "%s reader thread acquired rwlock with "
-                  "%d holding lock and %d waiting\n",
-                  name, atomic_read(&rwv->rw_acquired),
-                  atomic_read(&rwv->rw_waiters));
-       spin_unlock(&rwv->rw_priv_lock);
-
-       /* Wait here until the control thread
-         * says we can release the read lock */
-       wait_event_interruptible(rwv->rw_waitq,
-                                kzt_rwlock_lock_and_test(&rwv->rw_priv_lock,
-                                atomic_read(&rwv->rw_release) ==
-                                KZT_RWLOCK_RELEASE_READERS));
-
-       spin_lock(&rwv->rw_priv_lock);
-       atomic_inc(&rwv->rw_completed);
-       atomic_dec(&rwv->rw_acquired);
-       kzt_vprint(rwv->rw_file, rwt->rwt_name,
-                  "%s reader thread dropped rwlock with "
-                  "%d holding lock and %d waiting\n",
-                  name, atomic_read(&rwv->rw_acquired),
-                  atomic_read(&rwv->rw_waiters));
-       spin_unlock(&rwv->rw_priv_lock);
-
-       /* Release the semaphore */
-       rw_exit(&rwv->rwl);
-       return 0;
-}
-
-static int
-kzt_rwlock_test1(struct file *file, void *arg)
-{
-       int i, count = 0, rc = 0;
-       long pids[KZT_RWLOCK_TEST_COUNT];
-       rw_thr_t rwt[KZT_RWLOCK_TEST_COUNT];
-       rw_priv_t rwv;
-
-       /* Initialize private data 
-        * including the rwlock */
-       kzt_init_rw_priv(&rwv, file);
-
-       /* Create some threads, the exact number isn't important just as
-        * long as we know how many we managed to create and should expect. */
-       for (i = 0; i < KZT_RWLOCK_TEST_COUNT; i++) {
-               rwt[i].rwt_rwp = &rwv;
-               rwt[i].rwt_id = i;
-               rwt[i].rwt_name = KZT_RWLOCK_TEST1_NAME;
-               rwt[i].rwt_rc = 0;
-
-               /* The first thread will be a writer */
-               if (i == 0) {
-                       pids[i] = kernel_thread(kzt_rwlock_test1_writer_thread,
-                                               &rwt[i], 0);
-               } else {
-                       pids[i] = kernel_thread(kzt_rwlock_test1_reader_thread,
-                                               &rwt[i], 0);
-               }
-               
-               if (pids[i] >= 0) {
-                       count++;
-               }
-       }
-
-       /* Once the writer has the lock, release the readers */
-       while (kzt_rwlock_lock_and_test(&rwv.rw_priv_lock, atomic_read(&rwv.rw_acquired) <= 0)) {
-               kzt_rwlock_sleep(1 * HZ);
-       }
-       wake_up_interruptible(&rwv.rw_waitq);
-
-       /* Ensure that there is only 1 writer and all readers are waiting */
-       while (kzt_rwlock_lock_and_test(&rwv.rw_priv_lock, 
-                                       atomic_read(&rwv.rw_acquired) != 1 ||
-                                       atomic_read(&rwv.rw_waiters) !=
-                                       KZT_RWLOCK_TEST_COUNT - 1)) {
-
-               kzt_rwlock_sleep(1 * HZ);
-       }
-       /* Relase the writer */
-       spin_lock(&rwv.rw_priv_lock);
-       atomic_set(&rwv.rw_release, KZT_RWLOCK_RELEASE_WRITERS);
-       spin_unlock(&rwv.rw_priv_lock);
-       wake_up_interruptible(&rwv.rw_waitq);
-
-       /* Now ensure that there are multiple reader threads holding the lock */
-       while (kzt_rwlock_lock_and_test(&rwv.rw_priv_lock,
-              atomic_read(&rwv.rw_acquired) <= 1)) {
-               kzt_rwlock_sleep(1 * HZ);
-       }
-       /* Release the readers */
-       spin_lock(&rwv.rw_priv_lock);
-       atomic_set(&rwv.rw_release, KZT_RWLOCK_RELEASE_READERS);
-       spin_unlock(&rwv.rw_priv_lock);
-       wake_up_interruptible(&rwv.rw_waitq);
-
-       /* Wait for the test to complete */
-       while (kzt_rwlock_lock_and_test(&rwv.rw_priv_lock,
-              atomic_read(&rwv.rw_acquired) != 0 ||
-              atomic_read(&rwv.rw_waiters) != 0)) {
-               kzt_rwlock_sleep(1 * HZ);
-
-       }
-
-       rw_destroy(&rwv.rwl);
-       return rc;
-}
-
-int
-kzt_rwlock_test2_writer_thread(void *arg)
-{
-       rw_thr_t *rwt = (rw_thr_t *)arg;
-       rw_priv_t *rwv = rwt->rwt_rwp;
-       uint8_t rnd = 0;
-       char name[16];
-       
-       ASSERT(rwv->rw_magic == KZT_RWLOCK_TEST_MAGIC);
-       snprintf(name, sizeof(name), "%s%d",
-                KZT_RWLOCK_TEST_NAME, rwt->rwt_id);
-       daemonize(name);
-       get_random_bytes((void *)&rnd, 1);
-       kzt_rwlock_sleep(rnd * HZ / 1000);
-
-       /* Here just increment the waiters count even if we are not
-        * exactly about to call rw_enter().  Not really a big deal
-        * since more than likely will be true when we simulate work
-        * later on */
-       spin_lock(&rwv->rw_priv_lock);
-       kzt_vprint(rwv->rw_file, rwt->rwt_name,
-                  "%s writer thread trying to acquire rwlock with "
-                  "%d holding lock and %d waiting\n",
-                  name, atomic_read(&rwv->rw_acquired),
-                  atomic_read(&rwv->rw_waiters));
-       atomic_inc(&rwv->rw_waiters);
-       spin_unlock(&rwv->rw_priv_lock);
-
-       /* Wait here until the control thread
-        * says we can acquire the write lock */
-       wait_event_interruptible(rwv->rw_waitq,
-                                kzt_rwlock_lock_and_test(&rwv->rw_priv_lock,
-                                atomic_read(&rwv->rw_release) ==
-                                KZT_RWLOCK_RELEASE_WRITERS));
-       
-       /* Take the semaphore for writing */
-       rw_enter(&rwv->rwl, RW_WRITER);
-
-       spin_lock(&rwv->rw_priv_lock);
-       atomic_dec(&rwv->rw_waiters);
-       atomic_inc(&rwv->rw_acquired);
-       kzt_vprint(rwv->rw_file, rwt->rwt_name,
-                  "%s writer thread acquired rwlock with "
-                  "%d holding lock and %d waiting\n",
-                  name, atomic_read(&rwv->rw_acquired),
-                  atomic_read(&rwv->rw_waiters));
-       spin_unlock(&rwv->rw_priv_lock);
-
-       /* Give up the processor for a bit to simulate
-        * doing some work while taking the write lock */
-       kzt_rwlock_sleep(rnd * HZ / 1000);
-
-       /* Ensure that we are the only one writing */
-       if (atomic_read(&rwv->rw_acquired) > 1) {
-               rwt->rwt_rc = 1;
-       } else {
-               rwt->rwt_rc = 0;
-       }
-
-       spin_lock(&rwv->rw_priv_lock);
-       atomic_inc(&rwv->rw_completed);
-       atomic_dec(&rwv->rw_acquired);
-       kzt_vprint(rwv->rw_file, rwt->rwt_name,
-                  "%s writer thread dropped rwlock with "
-                  "%d holding lock and %d waiting\n",
-                  name, atomic_read(&rwv->rw_acquired),
-                  atomic_read(&rwv->rw_waiters));
-       spin_unlock(&rwv->rw_priv_lock);
-
-       rw_exit(&rwv->rwl);
-       
-
-       return 0;
-}
-
-static int
-kzt_rwlock_test2(struct file *file, void *arg)
-{
-       int i, count = 0, rc = 0;
-       long pids[KZT_RWLOCK_TEST_COUNT];
-       rw_thr_t rwt[KZT_RWLOCK_TEST_COUNT];
-       rw_priv_t rwv;
-
-       /* Initialize private data 
-        * including the rwlock */
-       kzt_init_rw_priv(&rwv, file);
-
-       /* Create some threads, the exact number isn't important just as
-        * long as we know how many we managed to create and should expect. */
-       for (i = 0; i < KZT_RWLOCK_TEST_COUNT; i++) {
-               rwt[i].rwt_rwp = &rwv;
-               rwt[i].rwt_id = i;
-               rwt[i].rwt_name = KZT_RWLOCK_TEST2_NAME;
-               rwt[i].rwt_rc = 0;
-
-               /* The first thread will be a writer */
-               pids[i] = kernel_thread(kzt_rwlock_test2_writer_thread,
-                                       &rwt[i], 0);
-
-               if (pids[i] >= 0) {
-                       count++;
-               }
-       }
-
-       /* Wait for writers to get queued up */
-       while (kzt_rwlock_lock_and_test(&rwv.rw_priv_lock,
-              atomic_read(&rwv.rw_waiters) < KZT_RWLOCK_TEST_COUNT)) {
-               kzt_rwlock_sleep(1 * HZ);
-       }
-       /* Relase the writers */
-       spin_lock(&rwv.rw_priv_lock);
-       atomic_set(&rwv.rw_release, KZT_RWLOCK_RELEASE_WRITERS);
-       spin_unlock(&rwv.rw_priv_lock);
-       wake_up_interruptible(&rwv.rw_waitq);
-
-       /* Wait for the test to complete */
-       while (kzt_rwlock_lock_and_test(&rwv.rw_priv_lock,
-              atomic_read(&rwv.rw_acquired) != 0 ||
-              atomic_read(&rwv.rw_waiters) != 0)) {
-               kzt_rwlock_sleep(1 * HZ);
-       }
-
-       /* If any of the write threads ever acquired the lock
-        * while another thread had it, make sure we return
-        * an error */
-       for (i = 0; i < KZT_RWLOCK_TEST_COUNT; i++) {
-               if (rwt[i].rwt_rc) {
-                       rc++;
-               }
-       }
-
-       rw_destroy(&rwv.rwl);
-       return rc;
-}
-
-static int
-kzt_rwlock_test3(struct file *file, void *arg)
-{
-       kthread_t *owner;
-       rw_priv_t rwv;
-       int rc = 0;
-
-       /* Initialize private data 
-        * including the rwlock */
-       kzt_init_rw_priv(&rwv, file);
-
-       /* Take the rwlock for writing */
-       rw_enter(&rwv.rwl, RW_WRITER);
-       owner = rw_owner(&rwv.rwl);
-       if (current != owner) {
-               kzt_vprint(file, KZT_RWLOCK_TEST3_NAME, "rwlock should "
-                          "be owned by pid %d but is owned by pid %d\n",
-                          current->pid, owner ? owner->pid : -1);
-               rc = -EINVAL;
-               goto out;
-       }
-
-       /* Release the rwlock */
-       rw_exit(&rwv.rwl);
-       owner = rw_owner(&rwv.rwl);
-       if (owner) {
-               kzt_vprint(file, KZT_RWLOCK_TEST3_NAME, "rwlock should not "
-                          "be owned but is owned by pid %d\n", owner->pid);
-               rc = -EINVAL;
-               goto out;
-       }
-
-       /* Take the rwlock for reading.
-        * Should not have an owner */
-       rw_enter(&rwv.rwl, RW_READER);
-       owner = rw_owner(&rwv.rwl);
-       if (owner) {
-               kzt_vprint(file, KZT_RWLOCK_TEST3_NAME, "rwlock should not "
-                          "be owned but is owned by pid %d\n", owner->pid);
-               /* Release the rwlock */
-               rw_exit(&rwv.rwl);
-               rc = -EINVAL;
-               goto out;
-       }
-
-       /* Release the rwlock */
-       rw_exit(&rwv.rwl);
-
-out:
-       rw_destroy(&rwv.rwl);
-       return rc;
-}
-
-int
-kzt_rwlock_test4_reader_thread(void *arg)
-{
-       rw_thr_t *rwt = (rw_thr_t *)arg;
-       rw_priv_t *rwv = rwt->rwt_rwp;
-       uint8_t rnd = 0;
-       char name[16];
-
-       ASSERT(rwv->rw_magic == KZT_RWLOCK_TEST_MAGIC);
-        snprintf(name, sizeof(name), "%s%d",
-                KZT_RWLOCK_TEST_NAME, rwt->rwt_id);
-       daemonize(name);
-       get_random_bytes((void *)&rnd, 1);
-        kzt_rwlock_sleep(rnd * HZ / 1000);
-
-       /* Don't try and and take the semaphore until
-        * someone else has already acquired it */
-        wait_event_interruptible(rwv->rw_waitq,
-                                kzt_rwlock_lock_and_test(&rwv->rw_priv_lock,
-                                atomic_read(&rwv->rw_acquired) > 0));
-
-       spin_lock(&rwv->rw_priv_lock);
-       kzt_vprint(rwv->rw_file, rwt->rwt_name,
-                  "%s reader thread trying to acquire rwlock with "
-                  "%d holding lock and %d waiting\n",
-                  name, atomic_read(&rwv->rw_acquired),
-                  atomic_read(&rwv->rw_waiters));
-       spin_unlock(&rwv->rw_priv_lock);
-
-       /* Take the semaphore for reading
-        * release it when we are told to */
-       rwt->rwt_rc = rw_tryenter(&rwv->rwl, RW_READER);
-
-       /* Here we acquired the lock this is a
-        * failure since the writer should be
-        * holding the lock */
-       if (rwt->rwt_rc == 1) {
-               spin_lock(&rwv->rw_priv_lock);
-               atomic_inc(&rwv->rw_acquired);
-               kzt_vprint(rwv->rw_file, rwt->rwt_name,
-                          "%s reader thread acquired rwlock with "
-                          "%d holding lock and %d waiting\n",
-                          name, atomic_read(&rwv->rw_acquired),
-                          atomic_read(&rwv->rw_waiters));
-               spin_unlock(&rwv->rw_priv_lock);
-               
-               spin_lock(&rwv->rw_priv_lock);
-               atomic_dec(&rwv->rw_acquired);
-               kzt_vprint(rwv->rw_file, rwt->rwt_name,
-                          "%s reader thread dropped rwlock with "
-                          "%d holding lock and %d waiting\n",
-                          name, atomic_read(&rwv->rw_acquired),
-                          atomic_read(&rwv->rw_waiters));
-               spin_unlock(&rwv->rw_priv_lock);
-               
-               /* Release the semaphore */
-               rw_exit(&rwv->rwl);
-       }
-       /* Here we know we didn't block and didn't
-        * acquire the rwlock for reading */
-       else {
-               spin_lock(&rwv->rw_priv_lock);
-               atomic_inc(&rwv->rw_completed);
-               kzt_vprint(rwv->rw_file, rwt->rwt_name,
-                          "%s reader thread could not acquire rwlock with "
-                          "%d holding lock and %d waiting\n",
-                          name, atomic_read(&rwv->rw_acquired),
-                          atomic_read(&rwv->rw_waiters));
-               spin_unlock(&rwv->rw_priv_lock);
-       }
-
-       return 0;
-}
-
-static int
-kzt_rwlock_test4(struct file *file, void *arg)
-{
-       int i, count = 0, rc = 0;
-       long pids[KZT_RWLOCK_TEST_COUNT];
-       rw_thr_t rwt[KZT_RWLOCK_TEST_COUNT];
-       rw_priv_t rwv;
-
-       /* Initialize private data 
-        * including the rwlock */
-       kzt_init_rw_priv(&rwv, file);
-
-       /* Create some threads, the exact number isn't important just as
-        * long as we know how many we managed to create and should expect. */
-       for (i = 0; i < KZT_RWLOCK_TEST_COUNT; i++) {
-               rwt[i].rwt_rwp = &rwv;
-               rwt[i].rwt_id = i;
-               rwt[i].rwt_name = KZT_RWLOCK_TEST4_NAME;
-               rwt[i].rwt_rc = 0;
-
-               /* The first thread will be a writer */
-               if (i == 0) {
-                       /* We can reuse the test1 writer thread here */
-                       pids[i] = kernel_thread(kzt_rwlock_test1_writer_thread,
-                                               &rwt[i], 0);
-               } else {
-                        pids[i] = kernel_thread(kzt_rwlock_test4_reader_thread,
-                                               &rwt[i], 0);
-               }
-
-               if (pids[i] >= 0) {
-                       count++;
-               }
-       }
-
-       /* Once the writer has the lock, release the readers */
-       while (kzt_rwlock_lock_and_test(&rwv.rw_priv_lock,
-              atomic_read(&rwv.rw_acquired) <= 0)) {
-               kzt_rwlock_sleep(1 * HZ);
-       }
-       wake_up_interruptible(&rwv.rw_waitq);
-
-       /* Make sure that the reader threads complete */
-       while (kzt_rwlock_lock_and_test(&rwv.rw_priv_lock,
-              atomic_read(&rwv.rw_completed) != KZT_RWLOCK_TEST_COUNT - 1)) {
-               kzt_rwlock_sleep(1 * HZ);
-       }
-       /* Release the writer */
-       spin_lock(&rwv.rw_priv_lock);
-       atomic_set(&rwv.rw_release, KZT_RWLOCK_RELEASE_WRITERS);
-       spin_unlock(&rwv.rw_priv_lock);
-       wake_up_interruptible(&rwv.rw_waitq);
-
-       /* Wait for the test to complete */
-       while (kzt_rwlock_lock_and_test(&rwv.rw_priv_lock,
-              atomic_read(&rwv.rw_acquired) != 0 ||
-              atomic_read(&rwv.rw_waiters) != 0)) {
-               kzt_rwlock_sleep(1 * HZ);
-       }
-
-       /* If any of the reader threads ever acquired the lock
-        * while another thread had it, make sure we return
-        * an error since the rw_tryenter() should have failed */
-       for (i = 0; i < KZT_RWLOCK_TEST_COUNT; i++) {
-               if (rwt[i].rwt_rc) {
-                       rc++;
-               }
-       }
-
-       rw_destroy(&rwv.rwl);
-       return rc;
-}
-
-static int
-kzt_rwlock_test5(struct file *file, void *arg)
-{
-       kthread_t *owner;
-       rw_priv_t rwv;
-       int rc = 0;
-
-       /* Initialize private data 
-        * including the rwlock */
-       kzt_init_rw_priv(&rwv, file);
-
-       /* Take the rwlock for writing */
-       rw_enter(&rwv.rwl, RW_WRITER);
-       owner = rw_owner(&rwv.rwl);
-       if (current != owner) {
-               kzt_vprint(file, KZT_RWLOCK_TEST5_NAME, "rwlock should "
-                          "be owned by pid %d but is owned by pid %d\n",
-                          current->pid, owner ? owner->pid : -1);
-               rc = -EINVAL;
-               goto out;
-       }
-
-       /* Make sure that the downgrade
-        * worked properly */
-       rw_downgrade(&rwv.rwl);
-
-       owner = rw_owner(&rwv.rwl);
-       if (owner) {
-               kzt_vprint(file, KZT_RWLOCK_TEST5_NAME, "rwlock should not "
-                          "be owned but is owned by pid %d\n", owner->pid);
-               /* Release the rwlock */
-               rw_exit(&rwv.rwl);
-               rc = -EINVAL;
-               goto out;
-       }
-
-       /* Release the rwlock */
-       rw_exit(&rwv.rwl);
-
-out:
-       rw_destroy(&rwv.rwl);
-       return rc;
-}
-
-static int
-kzt_rwlock_test6(struct file *file, void *arg)
-{
-       kthread_t *owner;
-       rw_priv_t rwv;
-       int rc = 0;
-
-       /* Initialize private data 
-        * including the rwlock */
-       kzt_init_rw_priv(&rwv, file);
-
-       /* Take the rwlock for reading */
-       rw_enter(&rwv.rwl, RW_READER);
-       owner = rw_owner(&rwv.rwl);
-       if (owner) {
-               kzt_vprint(file, KZT_RWLOCK_TEST6_NAME, "rwlock should not "
-                          "be owned but is owned by pid %d\n", owner->pid);
-               rc = -EINVAL;
-               goto out;
-       }
-
-       /* Make sure that the upgrade
-        * worked properly */
-       rc = !rw_tryupgrade(&rwv.rwl);
-
-       owner = rw_owner(&rwv.rwl);
-       if (rc || current != owner) {
-               kzt_vprint(file, KZT_RWLOCK_TEST6_NAME, "rwlock should "
-                          "be owned by pid %d but is owned by pid %d "
-                          "trylock rc %d\n",
-                          current->pid, owner ? owner->pid : -1, rc);
-               rc = -EINVAL;
-               goto out;
-       }
-
-       /* Release the rwlock */
-       rw_exit(&rwv.rwl);
-
-out:
-       rw_destroy(&rwv.rwl);
-       return rc;
-}
-
-kzt_subsystem_t *
-kzt_rwlock_init(void)
-{
-        kzt_subsystem_t *sub;
-
-        sub = kmalloc(sizeof(*sub), GFP_KERNEL);
-        if (sub == NULL)
-                return NULL;
-
-        memset(sub, 0, sizeof(*sub));
-        strncpy(sub->desc.name, KZT_RWLOCK_NAME, KZT_NAME_SIZE);
-        strncpy(sub->desc.desc, KZT_RWLOCK_DESC, KZT_DESC_SIZE);
-        INIT_LIST_HEAD(&sub->subsystem_list);
-        INIT_LIST_HEAD(&sub->test_list);
-        spin_lock_init(&sub->test_lock);
-        sub->desc.id = KZT_SUBSYSTEM_RWLOCK;
-
-        KZT_TEST_INIT(sub, KZT_RWLOCK_TEST1_NAME, KZT_RWLOCK_TEST1_DESC,
-                      KZT_RWLOCK_TEST1_ID, kzt_rwlock_test1);
-        KZT_TEST_INIT(sub, KZT_RWLOCK_TEST2_NAME, KZT_RWLOCK_TEST2_DESC,
-                      KZT_RWLOCK_TEST2_ID, kzt_rwlock_test2);
-        KZT_TEST_INIT(sub, KZT_RWLOCK_TEST3_NAME, KZT_RWLOCK_TEST3_DESC,
-                      KZT_RWLOCK_TEST3_ID, kzt_rwlock_test3);
-        KZT_TEST_INIT(sub, KZT_RWLOCK_TEST4_NAME, KZT_RWLOCK_TEST4_DESC,
-                      KZT_RWLOCK_TEST4_ID, kzt_rwlock_test4);
-        KZT_TEST_INIT(sub, KZT_RWLOCK_TEST5_NAME, KZT_RWLOCK_TEST5_DESC,
-                      KZT_RWLOCK_TEST5_ID, kzt_rwlock_test5);
-        KZT_TEST_INIT(sub, KZT_RWLOCK_TEST6_NAME, KZT_RWLOCK_TEST6_DESC,
-                      KZT_RWLOCK_TEST6_ID, kzt_rwlock_test6);
-
-        return sub;
-}
-
-void
-kzt_rwlock_fini(kzt_subsystem_t *sub)
-{
-        ASSERT(sub);
-        KZT_TEST_FINI(sub, KZT_RWLOCK_TEST6_ID);
-        KZT_TEST_FINI(sub, KZT_RWLOCK_TEST5_ID);
-        KZT_TEST_FINI(sub, KZT_RWLOCK_TEST4_ID);
-        KZT_TEST_FINI(sub, KZT_RWLOCK_TEST3_ID);
-        KZT_TEST_FINI(sub, KZT_RWLOCK_TEST2_ID);
-        KZT_TEST_FINI(sub, KZT_RWLOCK_TEST1_ID);
-        kfree(sub);
-}
-
-int
-kzt_rwlock_id(void) {
-        return KZT_SUBSYSTEM_RWLOCK;
-}
diff --git a/splat/splat-taskq.c b/splat/splat-taskq.c
deleted file mode 100644 (file)
index 3d5c075..0000000
+++ /dev/null
@@ -1,237 +0,0 @@
-#include <splat-ctl.h>
-
-#define KZT_SUBSYSTEM_TASKQ            0x0200
-#define KZT_TASKQ_NAME                 "taskq"
-#define KZT_TASKQ_DESC                 "Kernel Task Queue Tests"
-
-#define KZT_TASKQ_TEST1_ID             0x0201
-#define KZT_TASKQ_TEST1_NAME           "single"
-#define KZT_TASKQ_TEST1_DESC           "Single task queue, single task"
-
-#define KZT_TASKQ_TEST2_ID              0x0202
-#define KZT_TASKQ_TEST2_NAME           "multiple"
-#define KZT_TASKQ_TEST2_DESC           "Multiple task queues, multiple tasks"
-
-typedef struct kzt_taskq_arg {
-       int flag;
-       int id;
-       struct file *file;
-       const char *name;
-} kzt_taskq_arg_t;
-
-/* Validation Test 1 - Create a taskq, queue a task, wait until
- * task completes, ensure task ran properly, cleanup taskq,
- */
-static void
-kzt_taskq_test1_func(void *arg)
-{
-       kzt_taskq_arg_t *tq_arg = (kzt_taskq_arg_t *)arg;
-
-       ASSERT(tq_arg);
-       kzt_vprint(tq_arg->file, KZT_TASKQ_TEST1_NAME,
-                  "Taskq '%s' function '%s' setting flag\n",
-                  tq_arg->name, sym2str(kzt_taskq_test1_func));
-       tq_arg->flag = 1;
-}
-
-static int
-kzt_taskq_test1(struct file *file, void *arg)
-{
-       taskq_t *tq;
-       taskqid_t id;
-       kzt_taskq_arg_t tq_arg;
-
-       kzt_vprint(file, KZT_TASKQ_TEST1_NAME, "Taskq '%s' creating\n",
-                  KZT_TASKQ_TEST1_NAME);
-       if ((tq = taskq_create(KZT_TASKQ_TEST1_NAME, 1, 0, 0, 0, 0)) == NULL) {
-               kzt_vprint(file, KZT_TASKQ_TEST1_NAME,
-                          "Taskq '%s' create failed\n",
-                          KZT_TASKQ_TEST1_NAME);
-               return -EINVAL;
-       }
-
-       tq_arg.flag = 0;
-       tq_arg.id   = 0;
-       tq_arg.file = file;
-       tq_arg.name = KZT_TASKQ_TEST1_NAME;
-
-       kzt_vprint(file, KZT_TASKQ_TEST1_NAME,
-                  "Taskq '%s' function '%s' dispatching\n",
-                  tq_arg.name, sym2str(kzt_taskq_test1_func));
-       if ((id = taskq_dispatch(tq, kzt_taskq_test1_func, &tq_arg, 0)) == 0) {
-               kzt_vprint(file, KZT_TASKQ_TEST1_NAME,
-                          "Taskq '%s' function '%s' dispatch failed\n",
-                          tq_arg.name, sym2str(kzt_taskq_test1_func));
-               taskq_destory(tq);
-               return -EINVAL;
-       }
-
-       kzt_vprint(file, KZT_TASKQ_TEST1_NAME, "Taskq '%s' waiting\n",
-                  tq_arg.name);
-       taskq_wait(tq);
-       kzt_vprint(file, KZT_TASKQ_TEST1_NAME, "Taskq '%s' destroying\n",
-                  tq_arg.name);
-       taskq_destory(tq);
-
-       return (tq_arg.flag) ? 0 : -EINVAL;
-}
-
-/* Validation Test 2 - Create multiple taskq's, each with multiple tasks,
- * wait until all tasks complete, ensure all tasks ran properly and in the
- * the correct order, cleanup taskq's
- */
-static void
-kzt_taskq_test2_func1(void *arg)
-{
-       kzt_taskq_arg_t *tq_arg = (kzt_taskq_arg_t *)arg;
-
-       ASSERT(tq_arg);
-       kzt_vprint(tq_arg->file, KZT_TASKQ_TEST2_NAME,
-                  "Taskq '%s/%d' function '%s' flag = %d = %d * 2\n",
-                  tq_arg->name, tq_arg->id,
-                  sym2str(kzt_taskq_test2_func1),
-                  tq_arg->flag * 2, tq_arg->flag);
-       tq_arg->flag *= 2;
-}
-
-static void
-kzt_taskq_test2_func2(void *arg)
-{
-       kzt_taskq_arg_t *tq_arg = (kzt_taskq_arg_t *)arg;
-
-       ASSERT(tq_arg);
-       kzt_vprint(tq_arg->file, KZT_TASKQ_TEST2_NAME,
-                  "Taskq '%s/%d' function '%s' flag = %d = %d + 1\n",
-                  tq_arg->name, tq_arg->id,
-                  sym2str(kzt_taskq_test2_func2),
-                  tq_arg->flag + 1, tq_arg->flag);
-       tq_arg->flag += 1;
-}
-
-#define TEST2_TASKQS                    8
-static int
-kzt_taskq_test2(struct file *file, void *arg) {
-       taskq_t *tq[TEST2_TASKQS] = { NULL };
-       taskqid_t id;
-       kzt_taskq_arg_t tq_args[TEST2_TASKQS];
-       int i, rc = 0;
-
-       for (i = 0; i < TEST2_TASKQS; i++) {
-
-               kzt_vprint(file, KZT_TASKQ_TEST2_NAME, "Taskq '%s/%d' "
-                          "creating\n", KZT_TASKQ_TEST2_NAME, i);
-               if ((tq[i] = taskq_create(KZT_TASKQ_TEST2_NAME,
-                                         1, 0, 0, 0, 0)) == NULL) {
-                       kzt_vprint(file, KZT_TASKQ_TEST2_NAME,
-                                  "Taskq '%s/%d' create failed\n",
-                                  KZT_TASKQ_TEST2_NAME, i);
-                       rc = -EINVAL;
-                       break;
-               }
-
-               tq_args[i].flag = i;
-               tq_args[i].id   = i;
-               tq_args[i].file = file;
-               tq_args[i].name = KZT_TASKQ_TEST2_NAME;
-
-               kzt_vprint(file, KZT_TASKQ_TEST2_NAME,
-                          "Taskq '%s/%d' function '%s' dispatching\n",
-                          tq_args[i].name, tq_args[i].id,
-                          sym2str(kzt_taskq_test2_func1));
-               if ((id = taskq_dispatch(
-                    tq[i], kzt_taskq_test2_func1, &tq_args[i], 0)) == 0) {
-                       kzt_vprint(file, KZT_TASKQ_TEST2_NAME,
-                                  "Taskq '%s/%d' function '%s' dispatch "
-                                  "failed\n", tq_args[i].name, tq_args[i].id,
-                                  sym2str(kzt_taskq_test2_func1));
-                       rc = -EINVAL;
-                       break;
-               }
-
-               kzt_vprint(file, KZT_TASKQ_TEST2_NAME,
-                          "Taskq '%s/%d' function '%s' dispatching\n",
-                          tq_args[i].name, tq_args[i].id,
-                          sym2str(kzt_taskq_test2_func2));
-               if ((id = taskq_dispatch(
-                    tq[i], kzt_taskq_test2_func2, &tq_args[i], 0)) == 0) {
-                       kzt_vprint(file, KZT_TASKQ_TEST2_NAME,
-                                  "Taskq '%s/%d' function '%s' dispatch failed\n",
-                                  tq_args[i].name, tq_args[i].id,
-                                  sym2str(kzt_taskq_test2_func2));
-                       rc = -EINVAL;
-                       break;
-               }
-       }
-
-       /* When rc is set we're effectively just doing cleanup here, so
-        * ignore new errors in that case.  They just cause noise. */
-       for (i = 0; i < TEST2_TASKQS; i++) {
-               if (tq[i] != NULL) {
-                       kzt_vprint(file, KZT_TASKQ_TEST2_NAME,
-                                  "Taskq '%s/%d' waiting\n",
-                                  tq_args[i].name, tq_args[i].id);
-                       taskq_wait(tq[i]);
-                       kzt_vprint(file, KZT_TASKQ_TEST2_NAME,
-                                  "Taskq '%s/%d; destroying\n",
-                                 tq_args[i].name, tq_args[i].id);
-                       taskq_destory(tq[i]);
-
-                       if (!rc && tq_args[i].flag != ((i * 2) + 1)) {
-                               kzt_vprint(file, KZT_TASKQ_TEST2_NAME,
-                                          "Taskq '%s/%d' processed tasks "
-                                          "out of order; %d != %d\n",
-                                          tq_args[i].name, tq_args[i].id,
-                                          tq_args[i].flag, i * 2 + 1);
-                               rc = -EINVAL;
-                       } else {
-                               kzt_vprint(file, KZT_TASKQ_TEST2_NAME,
-                                          "Taskq '%s/%d' processed tasks "
-                                          "in the correct order; %d == %d\n",
-                                          tq_args[i].name, tq_args[i].id,
-                                          tq_args[i].flag, i * 2 + 1);
-                       }
-               }
-       }
-
-       return rc;
-}
-
-kzt_subsystem_t *
-kzt_taskq_init(void)
-{
-        kzt_subsystem_t *sub;
-
-        sub = kmalloc(sizeof(*sub), GFP_KERNEL);
-        if (sub == NULL)
-                return NULL;
-
-        memset(sub, 0, sizeof(*sub));
-        strncpy(sub->desc.name, KZT_TASKQ_NAME, KZT_NAME_SIZE);
-        strncpy(sub->desc.desc, KZT_TASKQ_DESC, KZT_DESC_SIZE);
-        INIT_LIST_HEAD(&sub->subsystem_list);
-       INIT_LIST_HEAD(&sub->test_list);
-       spin_lock_init(&sub->test_lock);
-        sub->desc.id = KZT_SUBSYSTEM_TASKQ;
-
-       KZT_TEST_INIT(sub, KZT_TASKQ_TEST1_NAME, KZT_TASKQ_TEST1_DESC,
-                     KZT_TASKQ_TEST1_ID, kzt_taskq_test1);
-       KZT_TEST_INIT(sub, KZT_TASKQ_TEST2_NAME, KZT_TASKQ_TEST2_DESC,
-                     KZT_TASKQ_TEST2_ID, kzt_taskq_test2);
-
-        return sub;
-}
-
-void
-kzt_taskq_fini(kzt_subsystem_t *sub)
-{
-        ASSERT(sub);
-       KZT_TEST_FINI(sub, KZT_TASKQ_TEST2_ID);
-       KZT_TEST_FINI(sub, KZT_TASKQ_TEST1_ID);
-
-        kfree(sub);
-}
-
-int
-kzt_taskq_id(void) {
-        return KZT_SUBSYSTEM_TASKQ;
-}
diff --git a/splat/splat-thread.c b/splat/splat-thread.c
deleted file mode 100644 (file)
index 34260c2..0000000
+++ /dev/null
@@ -1,115 +0,0 @@
-#include <splat-ctl.h>
-
-#define KZT_SUBSYSTEM_THREAD           0x0600
-#define KZT_THREAD_NAME                        "thread"
-#define KZT_THREAD_DESC                        "Kernel Thread Tests"
-
-#define KZT_THREAD_TEST1_ID            0x0601
-#define KZT_THREAD_TEST1_NAME          "create"
-#define KZT_THREAD_TEST1_DESC          "Validate thread creation and destruction"
-
-#define KZT_THREAD_TEST_MAGIC            0x4488CC00UL
-
-typedef struct thread_priv {
-        unsigned long tp_magic;
-        struct file *tp_file;
-        spinlock_t tp_lock;
-        wait_queue_head_t tp_waitq;
-       int tp_rc;
-} thread_priv_t;
-
-
-static void
-kzt_thread_work(void *priv)
-{
-       thread_priv_t *tp = (thread_priv_t *)priv;
-
-       spin_lock(&tp->tp_lock);
-       ASSERT(tp->tp_magic == KZT_THREAD_TEST_MAGIC);
-       tp->tp_rc = 1;
-
-       spin_unlock(&tp->tp_lock);
-       wake_up(&tp->tp_waitq);
-
-       thread_exit();
-}
-
-static int
-kzt_thread_test1(struct file *file, void *arg)
-{
-       thread_priv_t tp;
-        DEFINE_WAIT(wait);
-       kthread_t *thr;
-       int rc = 0;
-
-       tp.tp_magic = KZT_THREAD_TEST_MAGIC;
-       tp.tp_file = file;
-        spin_lock_init(&tp.tp_lock);
-       init_waitqueue_head(&tp.tp_waitq);
-       tp.tp_rc = 0;
-
-       spin_lock(&tp.tp_lock);
-
-       thr = (kthread_t *)thread_create(NULL, 0, kzt_thread_work, &tp, 0,
-                                        (proc_t *) &p0, TS_RUN, minclsyspri);
-       /* Must never fail under Solaris, but we check anyway so we can
-        * report an error when this impossible thing happens */
-       if (thr == NULL) {
-               rc = -ESRCH;
-               goto out;
-       }
-
-        for (;;) {
-                prepare_to_wait(&tp.tp_waitq, &wait, TASK_UNINTERRUPTIBLE);
-                if (tp.tp_rc)
-                        break;
-
-                spin_unlock(&tp.tp_lock);
-                schedule();
-                spin_lock(&tp.tp_lock);
-        }
-
-        kzt_vprint(file, KZT_THREAD_TEST1_NAME, "%s",
-                  "Thread successfully started and exited cleanly\n");
-out:
-       spin_unlock(&tp.tp_lock);
-
-       return rc;
-}
-
-kzt_subsystem_t *
-kzt_thread_init(void)
-{
-        kzt_subsystem_t *sub;
-
-        sub = kmalloc(sizeof(*sub), GFP_KERNEL);
-        if (sub == NULL)
-                return NULL;
-
-        memset(sub, 0, sizeof(*sub));
-        strncpy(sub->desc.name, KZT_THREAD_NAME, KZT_NAME_SIZE);
-        strncpy(sub->desc.desc, KZT_THREAD_DESC, KZT_DESC_SIZE);
-        INIT_LIST_HEAD(&sub->subsystem_list);
-        INIT_LIST_HEAD(&sub->test_list);
-        spin_lock_init(&sub->test_lock);
-        sub->desc.id = KZT_SUBSYSTEM_THREAD;
-
-        KZT_TEST_INIT(sub, KZT_THREAD_TEST1_NAME, KZT_THREAD_TEST1_DESC,
-                      KZT_THREAD_TEST1_ID, kzt_thread_test1);
-
-        return sub;
-}
-
-void
-kzt_thread_fini(kzt_subsystem_t *sub)
-{
-        ASSERT(sub);
-        KZT_TEST_FINI(sub, KZT_THREAD_TEST1_ID);
-
-        kfree(sub);
-}
-
-int
-kzt_thread_id(void) {
-        return KZT_SUBSYSTEM_THREAD;
-}
diff --git a/splat/splat-time.c b/splat/splat-time.c
deleted file mode 100644 (file)
index 3e8007a..0000000
+++ /dev/null
@@ -1,89 +0,0 @@
-#include <splat-ctl.h>
-
-#define KZT_SUBSYSTEM_TIME             0x0800
-#define KZT_TIME_NAME                  "time"
-#define KZT_TIME_DESC                  "Kernel Time Tests"
-
-#define KZT_TIME_TEST1_ID              0x0801
-#define KZT_TIME_TEST1_NAME            "time1"
-#define KZT_TIME_TEST1_DESC            "HZ Test"
-
-#define KZT_TIME_TEST2_ID              0x0802
-#define KZT_TIME_TEST2_NAME            "time2"
-#define KZT_TIME_TEST2_DESC            "Monotonic Test"
-
-static int
-kzt_time_test1(struct file *file, void *arg)
-{
-       int myhz = hz;
-       kzt_vprint(file, KZT_TIME_TEST1_NAME, "hz is %d\n", myhz);
-        return 0;
-}
-
-static int
-kzt_time_test2(struct file *file, void *arg)
-{
-        hrtime_t tm1, tm2;
-       int i;
-
-        tm1 = gethrtime();
-        kzt_vprint(file, KZT_TIME_TEST2_NAME, "time is %lld\n", tm1);
-
-        for(i = 0; i < 100; i++) {
-                tm2 = gethrtime();
-                kzt_vprint(file, KZT_TIME_TEST2_NAME, "time is %lld\n", tm2);
-
-                if(tm1 > tm2) {
-                        kzt_print(file, "%s: gethrtime() is not giving monotonically increasing values\n", KZT_TIME_TEST2_NAME);
-                        return 1;
-                }
-                tm1 = tm2;
-
-                set_current_state(TASK_INTERRUPTIBLE);
-                schedule_timeout(10);
-        }
-
-        return 0;
-}
-
-kzt_subsystem_t *
-kzt_time_init(void)
-{
-        kzt_subsystem_t *sub;
-
-        sub = kmalloc(sizeof(*sub), GFP_KERNEL);
-        if (sub == NULL)
-                return NULL;
-
-        memset(sub, 0, sizeof(*sub));
-        strncpy(sub->desc.name, KZT_TIME_NAME, KZT_NAME_SIZE);
-       strncpy(sub->desc.desc, KZT_TIME_DESC, KZT_DESC_SIZE);
-        INIT_LIST_HEAD(&sub->subsystem_list);
-       INIT_LIST_HEAD(&sub->test_list);
-        spin_lock_init(&sub->test_lock);
-        sub->desc.id = KZT_SUBSYSTEM_TIME;
-
-        KZT_TEST_INIT(sub, KZT_TIME_TEST1_NAME, KZT_TIME_TEST1_DESC,
-                     KZT_TIME_TEST1_ID, kzt_time_test1);
-        KZT_TEST_INIT(sub, KZT_TIME_TEST2_NAME, KZT_TIME_TEST2_DESC,
-                     KZT_TIME_TEST2_ID, kzt_time_test2);
-
-        return sub;
-}
-
-void
-kzt_time_fini(kzt_subsystem_t *sub)
-{
-        ASSERT(sub);
-
-        KZT_TEST_FINI(sub, KZT_TIME_TEST2_ID);
-        KZT_TEST_FINI(sub, KZT_TIME_TEST1_ID);
-
-        kfree(sub);
-}
-
-int
-kzt_time_id(void)
-{
-        return KZT_SUBSYSTEM_TIME;
-}
diff --git a/src/Makefile.am b/src/Makefile.am
deleted file mode 100644 (file)
index 51013b8..0000000
+++ /dev/null
@@ -1 +0,0 @@
-SUBDIRS = lib cmd spl splat