--- /dev/null
+# Makefile.in for splat kernel module
+
+MODULES := splat
+DISTFILES = Makefile.in \
+ splat-kmem.c splat-random.c splat-taskq.c \
+ splat-time.c splat-condvar.c splat-mutex.c \
+ splat-rwlock.c splat-thread.c splat-ctl.c
+CPPFLAGS += @KERNELCPPFLAGS@
+
+# Solaris porting layer aggressive tests
+obj-m := splat.o
+
+splat-objs += splat-ctl.o
+splat-objs += splat-kmem.o
+splat-objs += splat-taskq.o
+splat-objs += splat-random.o
+splat-objs += splat-mutex.o
+splat-objs += splat-condvar.o
+splat-objs += splat-thread.o
+splat-objs += splat-rwlock.o
+splat-objs += splat-time.o
+
+splatmodule := splat.ko
+splatmoduledir := @kmoduledir@/kernel/lib/
+
+all: all-spec
+
+install: all
+ mkdir -p $(DESTDIR)$(splatmoduledir)
+ $(INSTALL) -m 644 $(splatmodule) $(DESTDIR)$(splatmoduledir)/$(splatmodule)
+ -/sbin/depmod -a
+
+uninstall:
+ rm -f $(DESTDIR)$(splatmoduledir)/$(splatmodule)
+ -/sbin/depmod -a
+
+clean:
+ -rm -f $(splmodule) *.o .*.cmd *.mod.c *.ko *.s */*.o
+
+distclean: clean
+ rm -f Makefile
+ rm -rf .tmp_versions
+
+maintainer-clean: distclean
+
+distdir: $(DISTFILES)
+ cp -p $(DISTFILES) $(distdir)
+
+all-spec:
+ $(MAKE) -C @kernelsrc@ SUBDIRS=`pwd` @KERNELMAKE_PARAMS@ modules
--- /dev/null
+#include <splat-ctl.h>
+
+#define KZT_SUBSYSTEM_CONDVAR 0x0500
+#define KZT_CONDVAR_NAME "condvar"
+#define KZT_CONDVAR_DESC "Kernel Condition Variable Tests"
+
+#define KZT_CONDVAR_TEST1_ID 0x0501
+#define KZT_CONDVAR_TEST1_NAME "signal1"
+#define KZT_CONDVAR_TEST1_DESC "Wake a single thread, cv_wait()/cv_signal()"
+
+#define KZT_CONDVAR_TEST2_ID 0x0502
+#define KZT_CONDVAR_TEST2_NAME "broadcast1"
+#define KZT_CONDVAR_TEST2_DESC "Wake all threads, cv_wait()/cv_broadcast()"
+
+#define KZT_CONDVAR_TEST3_ID 0x0503
+#define KZT_CONDVAR_TEST3_NAME "signal2"
+#define KZT_CONDVAR_TEST3_DESC "Wake a single thread, cv_wait_timeout()/cv_signal()"
+
+#define KZT_CONDVAR_TEST4_ID 0x0504
+#define KZT_CONDVAR_TEST4_NAME "broadcast2"
+#define KZT_CONDVAR_TEST4_DESC "Wake all threads, cv_wait_timeout()/cv_broadcast()"
+
+#define KZT_CONDVAR_TEST5_ID 0x0505
+#define KZT_CONDVAR_TEST5_NAME "timeout"
+#define KZT_CONDVAR_TEST5_DESC "Timeout thread, cv_wait_timeout()"
+
+#define KZT_CONDVAR_TEST_MAGIC 0x115599DDUL
+#define KZT_CONDVAR_TEST_NAME "condvar_test"
+#define KZT_CONDVAR_TEST_COUNT 8
+
+typedef struct condvar_priv {
+ unsigned long cv_magic;
+ struct file *cv_file;
+ kcondvar_t cv_condvar;
+ kmutex_t cv_mtx;
+} condvar_priv_t;
+
+typedef struct condvar_thr {
+ int ct_id;
+ const char *ct_name;
+ condvar_priv_t *ct_cvp;
+ int ct_rc;
+} condvar_thr_t;
+
+int
+kzt_condvar_test12_thread(void *arg)
+{
+ condvar_thr_t *ct = (condvar_thr_t *)arg;
+ condvar_priv_t *cv = ct->ct_cvp;
+ char name[16];
+
+ ASSERT(cv->cv_magic == KZT_CONDVAR_TEST_MAGIC);
+ snprintf(name, sizeof(name), "%s%d", KZT_CONDVAR_TEST_NAME, ct->ct_id);
+ daemonize(name);
+
+ mutex_enter(&cv->cv_mtx);
+ kzt_vprint(cv->cv_file, ct->ct_name,
+ "%s thread sleeping with %d waiters\n",
+ name, atomic_read(&cv->cv_condvar.cv_waiters));
+ cv_wait(&cv->cv_condvar, &cv->cv_mtx);
+ kzt_vprint(cv->cv_file, ct->ct_name,
+ "%s thread woken %d waiters remain\n",
+ name, atomic_read(&cv->cv_condvar.cv_waiters));
+ mutex_exit(&cv->cv_mtx);
+
+ return 0;
+}
+
+static int
+kzt_condvar_test1(struct file *file, void *arg)
+{
+ int i, count = 0, rc = 0;
+ long pids[KZT_CONDVAR_TEST_COUNT];
+ condvar_thr_t ct[KZT_CONDVAR_TEST_COUNT];
+ condvar_priv_t cv;
+
+ cv.cv_magic = KZT_CONDVAR_TEST_MAGIC;
+ cv.cv_file = file;
+ mutex_init(&cv.cv_mtx, KZT_CONDVAR_TEST_NAME, MUTEX_DEFAULT, NULL);
+ cv_init(&cv.cv_condvar, KZT_CONDVAR_TEST_NAME, CV_DEFAULT, NULL);
+
+ /* Create some threads, the exact number isn't important just as
+ * long as we know how many we managed to create and should expect. */
+ for (i = 0; i < KZT_CONDVAR_TEST_COUNT; i++) {
+ ct[i].ct_cvp = &cv;
+ ct[i].ct_id = i;
+ ct[i].ct_name = KZT_CONDVAR_TEST1_NAME;
+ ct[i].ct_rc = 0;
+
+ pids[i] = kernel_thread(kzt_condvar_test12_thread, &ct[i], 0);
+ if (pids[i] >= 0)
+ count++;
+ }
+
+ /* Wait until all threads are waiting on the condition variable */
+ while (atomic_read(&cv.cv_condvar.cv_waiters) != count)
+ schedule();
+
+ /* Wake a single thread at a time, wait until it exits */
+ for (i = 1; i <= count; i++) {
+ cv_signal(&cv.cv_condvar);
+
+ while (atomic_read(&cv.cv_condvar.cv_waiters) > (count - i))
+ schedule();
+
+ /* Correct behavior 1 thread woken */
+ if (atomic_read(&cv.cv_condvar.cv_waiters) == (count - i))
+ continue;
+
+ kzt_vprint(file, KZT_CONDVAR_TEST1_NAME, "Attempted to "
+ "wake %d thread but work %d threads woke\n",
+ 1, count - atomic_read(&cv.cv_condvar.cv_waiters));
+ rc = -EINVAL;
+ break;
+ }
+
+ if (!rc)
+ kzt_vprint(file, KZT_CONDVAR_TEST1_NAME, "Correctly woke "
+ "%d sleeping threads %d at a time\n", count, 1);
+
+ /* Wait until that last nutex is dropped */
+ while (mutex_owner(&cv.cv_mtx))
+ schedule();
+
+ /* Wake everything for the failure case */
+ cv_broadcast(&cv.cv_condvar);
+ cv_destroy(&cv.cv_condvar);
+ mutex_destroy(&cv.cv_mtx);
+
+ return rc;
+}
+
+static int
+kzt_condvar_test2(struct file *file, void *arg)
+{
+ int i, count = 0, rc = 0;
+ long pids[KZT_CONDVAR_TEST_COUNT];
+ condvar_thr_t ct[KZT_CONDVAR_TEST_COUNT];
+ condvar_priv_t cv;
+
+ cv.cv_magic = KZT_CONDVAR_TEST_MAGIC;
+ cv.cv_file = file;
+ mutex_init(&cv.cv_mtx, KZT_CONDVAR_TEST_NAME, MUTEX_DEFAULT, NULL);
+ cv_init(&cv.cv_condvar, KZT_CONDVAR_TEST_NAME, CV_DEFAULT, NULL);
+
+ /* Create some threads, the exact number isn't important just as
+ * long as we know how many we managed to create and should expect. */
+ for (i = 0; i < KZT_CONDVAR_TEST_COUNT; i++) {
+ ct[i].ct_cvp = &cv;
+ ct[i].ct_id = i;
+ ct[i].ct_name = KZT_CONDVAR_TEST2_NAME;
+ ct[i].ct_rc = 0;
+
+ pids[i] = kernel_thread(kzt_condvar_test12_thread, &ct[i], 0);
+ if (pids[i] > 0)
+ count++;
+ }
+
+ /* Wait until all threads are waiting on the condition variable */
+ while (atomic_read(&cv.cv_condvar.cv_waiters) != count)
+ schedule();
+
+ /* Wake all threads waiting on the condition variable */
+ cv_broadcast(&cv.cv_condvar);
+
+ /* Wait until all threads have exited */
+ while ((atomic_read(&cv.cv_condvar.cv_waiters) > 0) || mutex_owner(&cv.cv_mtx))
+ schedule();
+
+ kzt_vprint(file, KZT_CONDVAR_TEST2_NAME, "Correctly woke all "
+ "%d sleeping threads at once\n", count);
+
+ /* Wake everything for the failure case */
+ cv_destroy(&cv.cv_condvar);
+ mutex_destroy(&cv.cv_mtx);
+
+ return rc;
+}
+
+int
+kzt_condvar_test34_thread(void *arg)
+{
+ condvar_thr_t *ct = (condvar_thr_t *)arg;
+ condvar_priv_t *cv = ct->ct_cvp;
+ char name[16];
+ clock_t rc;
+
+ ASSERT(cv->cv_magic == KZT_CONDVAR_TEST_MAGIC);
+ snprintf(name, sizeof(name), "%s%d", KZT_CONDVAR_TEST_NAME, ct->ct_id);
+ daemonize(name);
+
+ mutex_enter(&cv->cv_mtx);
+ kzt_vprint(cv->cv_file, ct->ct_name,
+ "%s thread sleeping with %d waiters\n",
+ name, atomic_read(&cv->cv_condvar.cv_waiters));
+
+ /* Sleep no longer than 3 seconds, for this test we should
+ * actually never sleep that long without being woken up. */
+ rc = cv_timedwait(&cv->cv_condvar, &cv->cv_mtx, lbolt + HZ * 3);
+ if (rc == -1) {
+ ct->ct_rc = -ETIMEDOUT;
+ kzt_vprint(cv->cv_file, ct->ct_name, "%s thread timed out, "
+ "should have been woken\n", name);
+ } else {
+ kzt_vprint(cv->cv_file, ct->ct_name,
+ "%s thread woken %d waiters remain\n",
+ name, atomic_read(&cv->cv_condvar.cv_waiters));
+ }
+
+ mutex_exit(&cv->cv_mtx);
+
+ return 0;
+}
+
+static int
+kzt_condvar_test3(struct file *file, void *arg)
+{
+ int i, count = 0, rc = 0;
+ long pids[KZT_CONDVAR_TEST_COUNT];
+ condvar_thr_t ct[KZT_CONDVAR_TEST_COUNT];
+ condvar_priv_t cv;
+
+ cv.cv_magic = KZT_CONDVAR_TEST_MAGIC;
+ cv.cv_file = file;
+ mutex_init(&cv.cv_mtx, KZT_CONDVAR_TEST_NAME, MUTEX_DEFAULT, NULL);
+ cv_init(&cv.cv_condvar, KZT_CONDVAR_TEST_NAME, CV_DEFAULT, NULL);
+
+ /* Create some threads, the exact number isn't important just as
+ * long as we know how many we managed to create and should expect. */
+ for (i = 0; i < KZT_CONDVAR_TEST_COUNT; i++) {
+ ct[i].ct_cvp = &cv;
+ ct[i].ct_id = i;
+ ct[i].ct_name = KZT_CONDVAR_TEST3_NAME;
+ ct[i].ct_rc = 0;
+
+ pids[i] = kernel_thread(kzt_condvar_test34_thread, &ct[i], 0);
+ if (pids[i] >= 0)
+ count++;
+ }
+
+ /* Wait until all threads are waiting on the condition variable */
+ while (atomic_read(&cv.cv_condvar.cv_waiters) != count)
+ schedule();
+
+ /* Wake a single thread at a time, wait until it exits */
+ for (i = 1; i <= count; i++) {
+ cv_signal(&cv.cv_condvar);
+
+ while (atomic_read(&cv.cv_condvar.cv_waiters) > (count - i))
+ schedule();
+
+ /* Correct behavior 1 thread woken */
+ if (atomic_read(&cv.cv_condvar.cv_waiters) == (count - i))
+ continue;
+
+ kzt_vprint(file, KZT_CONDVAR_TEST3_NAME, "Attempted to "
+ "wake %d thread but work %d threads woke\n",
+ 1, count - atomic_read(&cv.cv_condvar.cv_waiters));
+ rc = -EINVAL;
+ break;
+ }
+
+ /* Validate no waiting thread timed out early */
+ for (i = 0; i < count; i++)
+ if (ct[i].ct_rc)
+ rc = ct[i].ct_rc;
+
+ if (!rc)
+ kzt_vprint(file, KZT_CONDVAR_TEST3_NAME, "Correctly woke "
+ "%d sleeping threads %d at a time\n", count, 1);
+
+ /* Wait until that last nutex is dropped */
+ while (mutex_owner(&cv.cv_mtx))
+ schedule();
+
+ /* Wake everything for the failure case */
+ cv_broadcast(&cv.cv_condvar);
+ cv_destroy(&cv.cv_condvar);
+ mutex_destroy(&cv.cv_mtx);
+
+ return rc;
+}
+
+static int
+kzt_condvar_test4(struct file *file, void *arg)
+{
+ int i, count = 0, rc = 0;
+ long pids[KZT_CONDVAR_TEST_COUNT];
+ condvar_thr_t ct[KZT_CONDVAR_TEST_COUNT];
+ condvar_priv_t cv;
+
+ cv.cv_magic = KZT_CONDVAR_TEST_MAGIC;
+ cv.cv_file = file;
+ mutex_init(&cv.cv_mtx, KZT_CONDVAR_TEST_NAME, MUTEX_DEFAULT, NULL);
+ cv_init(&cv.cv_condvar, KZT_CONDVAR_TEST_NAME, CV_DEFAULT, NULL);
+
+ /* Create some threads, the exact number isn't important just as
+ * long as we know how many we managed to create and should expect. */
+ for (i = 0; i < KZT_CONDVAR_TEST_COUNT; i++) {
+ ct[i].ct_cvp = &cv;
+ ct[i].ct_id = i;
+ ct[i].ct_name = KZT_CONDVAR_TEST3_NAME;
+ ct[i].ct_rc = 0;
+
+ pids[i] = kernel_thread(kzt_condvar_test34_thread, &ct[i], 0);
+ if (pids[i] >= 0)
+ count++;
+ }
+
+ /* Wait until all threads are waiting on the condition variable */
+ while (atomic_read(&cv.cv_condvar.cv_waiters) != count)
+ schedule();
+
+ /* Wake a single thread at a time, wait until it exits */
+ for (i = 1; i <= count; i++) {
+ cv_signal(&cv.cv_condvar);
+
+ while (atomic_read(&cv.cv_condvar.cv_waiters) > (count - i))
+ schedule();
+
+ /* Correct behavior 1 thread woken */
+ if (atomic_read(&cv.cv_condvar.cv_waiters) == (count - i))
+ continue;
+
+ kzt_vprint(file, KZT_CONDVAR_TEST3_NAME, "Attempted to "
+ "wake %d thread but work %d threads woke\n",
+ 1, count - atomic_read(&cv.cv_condvar.cv_waiters));
+ rc = -EINVAL;
+ break;
+ }
+
+ /* Validate no waiting thread timed out early */
+ for (i = 0; i < count; i++)
+ if (ct[i].ct_rc)
+ rc = ct[i].ct_rc;
+
+ if (!rc)
+ kzt_vprint(file, KZT_CONDVAR_TEST3_NAME, "Correctly woke "
+ "%d sleeping threads %d at a time\n", count, 1);
+
+ /* Wait until that last nutex is dropped */
+ while (mutex_owner(&cv.cv_mtx))
+ schedule();
+
+ /* Wake everything for the failure case */
+ cv_broadcast(&cv.cv_condvar);
+ cv_destroy(&cv.cv_condvar);
+ mutex_destroy(&cv.cv_mtx);
+
+ return rc;
+}
+
+static int
+kzt_condvar_test5(struct file *file, void *arg)
+{
+ kcondvar_t condvar;
+ kmutex_t mtx;
+ clock_t time_left, time_before, time_after, time_delta;
+ int64_t whole_delta;
+ int32_t remain_delta;
+ int rc = 0;
+
+ mutex_init(&mtx, KZT_CONDVAR_TEST_NAME, MUTEX_DEFAULT, NULL);
+ cv_init(&condvar, KZT_CONDVAR_TEST_NAME, CV_DEFAULT, NULL);
+
+ kzt_vprint(file, KZT_CONDVAR_TEST5_NAME, "Thread going to sleep for "
+ "%d second and expecting to be woken by timeout\n", 1);
+
+ /* Allow a 1 second timeout, plenty long to validate correctness. */
+ time_before = lbolt;
+ mutex_enter(&mtx);
+ time_left = cv_timedwait(&condvar, &mtx, lbolt + HZ);
+ mutex_exit(&mtx);
+ time_after = lbolt;
+ time_delta = time_after - time_before; /* XXX - Handle jiffie wrap */
+ whole_delta = time_delta;
+ remain_delta = do_div(whole_delta, HZ);
+
+ if (time_left == -1) {
+ if (time_delta >= HZ) {
+ kzt_vprint(file, KZT_CONDVAR_TEST5_NAME,
+ "Thread correctly timed out and was asleep "
+ "for %d.%d seconds (%d second min)\n",
+ (int)whole_delta, remain_delta, 1);
+ } else {
+ kzt_vprint(file, KZT_CONDVAR_TEST5_NAME,
+ "Thread correctly timed out but was only "
+ "asleep for %d.%d seconds (%d second "
+ "min)\n", (int)whole_delta, remain_delta, 1);
+ rc = -ETIMEDOUT;
+ }
+ } else {
+ kzt_vprint(file, KZT_CONDVAR_TEST5_NAME,
+ "Thread exited after only %d.%d seconds, it "
+ "did not hit the %d second timeout\n",
+ (int)whole_delta, remain_delta, 1);
+ rc = -ETIMEDOUT;
+ }
+
+ cv_destroy(&condvar);
+ mutex_destroy(&mtx);
+
+ return rc;
+}
+
+kzt_subsystem_t *
+kzt_condvar_init(void)
+{
+ kzt_subsystem_t *sub;
+
+ sub = kmalloc(sizeof(*sub), GFP_KERNEL);
+ if (sub == NULL)
+ return NULL;
+
+ memset(sub, 0, sizeof(*sub));
+ strncpy(sub->desc.name, KZT_CONDVAR_NAME, KZT_NAME_SIZE);
+ strncpy(sub->desc.desc, KZT_CONDVAR_DESC, KZT_DESC_SIZE);
+ INIT_LIST_HEAD(&sub->subsystem_list);
+ INIT_LIST_HEAD(&sub->test_list);
+ spin_lock_init(&sub->test_lock);
+ sub->desc.id = KZT_SUBSYSTEM_CONDVAR;
+
+ KZT_TEST_INIT(sub, KZT_CONDVAR_TEST1_NAME, KZT_CONDVAR_TEST1_DESC,
+ KZT_CONDVAR_TEST1_ID, kzt_condvar_test1);
+ KZT_TEST_INIT(sub, KZT_CONDVAR_TEST2_NAME, KZT_CONDVAR_TEST2_DESC,
+ KZT_CONDVAR_TEST2_ID, kzt_condvar_test2);
+ KZT_TEST_INIT(sub, KZT_CONDVAR_TEST3_NAME, KZT_CONDVAR_TEST3_DESC,
+ KZT_CONDVAR_TEST3_ID, kzt_condvar_test3);
+ KZT_TEST_INIT(sub, KZT_CONDVAR_TEST4_NAME, KZT_CONDVAR_TEST4_DESC,
+ KZT_CONDVAR_TEST4_ID, kzt_condvar_test4);
+ KZT_TEST_INIT(sub, KZT_CONDVAR_TEST5_NAME, KZT_CONDVAR_TEST5_DESC,
+ KZT_CONDVAR_TEST5_ID, kzt_condvar_test5);
+
+ return sub;
+}
+
+void
+kzt_condvar_fini(kzt_subsystem_t *sub)
+{
+ ASSERT(sub);
+ KZT_TEST_FINI(sub, KZT_CONDVAR_TEST5_ID);
+ KZT_TEST_FINI(sub, KZT_CONDVAR_TEST4_ID);
+ KZT_TEST_FINI(sub, KZT_CONDVAR_TEST3_ID);
+ KZT_TEST_FINI(sub, KZT_CONDVAR_TEST2_ID);
+ KZT_TEST_FINI(sub, KZT_CONDVAR_TEST1_ID);
+
+ kfree(sub);
+}
+
+int
+kzt_condvar_id(void) {
+ return KZT_SUBSYSTEM_CONDVAR;
+}
--- /dev/null
+/*
+ * My intent is the create a loadable kzt (kernel ZFS test) module
+ * which can be used as an access point to run in kernel ZFS regression
+ * tests. Why do we need this when we have ztest? Well ztest.c only
+ * excersises the ZFS code proper, it cannot be used to validate the
+ * linux kernel shim primatives. This also provides a nice hook for
+ * any other in kernel regression tests we wish to run such as direct
+ * in-kernel tests against the DMU.
+ *
+ * The basic design is the kzt module is that it is constructed of
+ * various kzt_* source files each of which contains regression tests.
+ * For example the kzt_linux_kmem.c file contains tests for validating
+ * kmem correctness. When the kzt module is loaded kzt_*_init()
+ * will be called for each subsystems tests, similarly kzt_*_fini() is
+ * called when the kzt module is removed. Each test can then be
+ * run by making an ioctl() call from a userspace control application
+ * to pick the subsystem and test which should be run.
+ *
+ * Author: Brian Behlendorf
+ */
+
+#include <splat-ctl.h>
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,18)
+#include <linux/devfs_fs_kernel.h>
+#endif
+
+#include <linux/cdev.h>
+
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,18)
+static struct class_simple *kzt_class;
+#else
+static struct class *kzt_class;
+#endif
+static struct list_head kzt_module_list;
+static spinlock_t kzt_module_lock;
+
+static int
+kzt_open(struct inode *inode, struct file *file)
+{
+ unsigned int minor = iminor(inode);
+ kzt_info_t *info;
+
+ if (minor >= KZT_MINORS)
+ return -ENXIO;
+
+ info = (kzt_info_t *)kmalloc(sizeof(*info), GFP_KERNEL);
+ if (info == NULL)
+ return -ENOMEM;
+
+ spin_lock_init(&info->info_lock);
+ info->info_size = KZT_INFO_BUFFER_SIZE;
+ info->info_buffer = (char *)vmalloc(KZT_INFO_BUFFER_SIZE);
+ if (info->info_buffer == NULL) {
+ kfree(info);
+ return -ENOMEM;
+ }
+
+ info->info_head = info->info_buffer;
+ file->private_data = (void *)info;
+
+ kzt_print(file, "Kernel ZFS Tests %s\n", KZT_VERSION);
+
+ return 0;
+}
+
+static int
+kzt_release(struct inode *inode, struct file *file)
+{
+ unsigned int minor = iminor(inode);
+ kzt_info_t *info = (kzt_info_t *)file->private_data;
+
+ if (minor >= KZT_MINORS)
+ return -ENXIO;
+
+ ASSERT(info);
+ ASSERT(info->info_buffer);
+
+ vfree(info->info_buffer);
+ kfree(info);
+
+ return 0;
+}
+
+static int
+kzt_buffer_clear(struct file *file, kzt_cfg_t *kcfg, unsigned long arg)
+{
+ kzt_info_t *info = (kzt_info_t *)file->private_data;
+
+ ASSERT(info);
+ ASSERT(info->info_buffer);
+
+ spin_lock(&info->info_lock);
+ memset(info->info_buffer, 0, info->info_size);
+ info->info_head = info->info_buffer;
+ spin_unlock(&info->info_lock);
+
+ return 0;
+}
+
+static int
+kzt_buffer_size(struct file *file, kzt_cfg_t *kcfg, unsigned long arg)
+{
+ kzt_info_t *info = (kzt_info_t *)file->private_data;
+ char *buf;
+ int min, size, rc = 0;
+
+ ASSERT(info);
+ ASSERT(info->info_buffer);
+
+ spin_lock(&info->info_lock);
+ if (kcfg->cfg_arg1 > 0) {
+
+ size = kcfg->cfg_arg1;
+ buf = (char *)vmalloc(size);
+ if (buf == NULL) {
+ rc = -ENOMEM;
+ goto out;
+ }
+
+ /* Zero fill and truncate contents when coping buffer */
+ min = ((size < info->info_size) ? size : info->info_size);
+ memset(buf, 0, size);
+ memcpy(buf, info->info_buffer, min);
+ vfree(info->info_buffer);
+ info->info_size = size;
+ info->info_buffer = buf;
+ info->info_head = info->info_buffer;
+ }
+
+ kcfg->cfg_rc1 = info->info_size;
+
+ if (copy_to_user((struct kzt_cfg_t __user *)arg, kcfg, sizeof(*kcfg)))
+ rc = -EFAULT;
+out:
+ spin_unlock(&info->info_lock);
+
+ return rc;
+}
+
+
+static kzt_subsystem_t *
+kzt_subsystem_find(int id) {
+ kzt_subsystem_t *sub;
+
+ spin_lock(&kzt_module_lock);
+ list_for_each_entry(sub, &kzt_module_list, subsystem_list) {
+ if (id == sub->desc.id) {
+ spin_unlock(&kzt_module_lock);
+ return sub;
+ }
+ }
+ spin_unlock(&kzt_module_lock);
+
+ return NULL;
+}
+
+static int
+kzt_subsystem_count(kzt_cfg_t *kcfg, unsigned long arg)
+{
+ kzt_subsystem_t *sub;
+ int i = 0;
+
+ spin_lock(&kzt_module_lock);
+ list_for_each_entry(sub, &kzt_module_list, subsystem_list)
+ i++;
+
+ spin_unlock(&kzt_module_lock);
+ kcfg->cfg_rc1 = i;
+
+ if (copy_to_user((struct kzt_cfg_t __user *)arg, kcfg, sizeof(*kcfg)))
+ return -EFAULT;
+
+ return 0;
+}
+
+static int
+kzt_subsystem_list(kzt_cfg_t *kcfg, unsigned long arg)
+{
+ kzt_subsystem_t *sub;
+ kzt_cfg_t *tmp;
+ int size, i = 0;
+
+ /* Structure will be sized large enough for N subsystem entries
+ * which is passed in by the caller. On exit the number of
+ * entries filled in with valid subsystems will be stored in
+ * cfg_rc1. If the caller does not provide enough entries
+ * for all subsystems we will truncate the list to avoid overrun.
+ */
+ size = sizeof(*tmp) + kcfg->cfg_data.kzt_subsystems.size *
+ sizeof(kzt_user_t);
+ tmp = kmalloc(size, GFP_KERNEL);
+ if (tmp == NULL)
+ return -ENOMEM;
+
+ /* Local 'tmp' is used as the structure copied back to user space */
+ memset(tmp, 0, size);
+ memcpy(tmp, kcfg, sizeof(*kcfg));
+
+ spin_lock(&kzt_module_lock);
+ list_for_each_entry(sub, &kzt_module_list, subsystem_list) {
+ strncpy(tmp->cfg_data.kzt_subsystems.descs[i].name,
+ sub->desc.name, KZT_NAME_SIZE);
+ strncpy(tmp->cfg_data.kzt_subsystems.descs[i].desc,
+ sub->desc.desc, KZT_DESC_SIZE);
+ tmp->cfg_data.kzt_subsystems.descs[i].id = sub->desc.id;
+
+ /* Truncate list if we are about to overrun alloc'ed memory */
+ if ((i++) == kcfg->cfg_data.kzt_subsystems.size)
+ break;
+ }
+ spin_unlock(&kzt_module_lock);
+ tmp->cfg_rc1 = i;
+
+ if (copy_to_user((struct kzt_cfg_t __user *)arg, tmp, size)) {
+ kfree(tmp);
+ return -EFAULT;
+ }
+
+ kfree(tmp);
+ return 0;
+}
+
+static int
+kzt_test_count(kzt_cfg_t *kcfg, unsigned long arg)
+{
+ kzt_subsystem_t *sub;
+ kzt_test_t *test;
+ int i = 0;
+
+ /* Subsystem ID passed as arg1 */
+ sub = kzt_subsystem_find(kcfg->cfg_arg1);
+ if (sub == NULL)
+ return -EINVAL;
+
+ spin_lock(&(sub->test_lock));
+ list_for_each_entry(test, &(sub->test_list), test_list)
+ i++;
+
+ spin_unlock(&(sub->test_lock));
+ kcfg->cfg_rc1 = i;
+
+ if (copy_to_user((struct kzt_cfg_t __user *)arg, kcfg, sizeof(*kcfg)))
+ return -EFAULT;
+
+ return 0;
+}
+
+static int
+kzt_test_list(kzt_cfg_t *kcfg, unsigned long arg)
+{
+ kzt_subsystem_t *sub;
+ kzt_test_t *test;
+ kzt_cfg_t *tmp;
+ int size, i = 0;
+
+ /* Subsystem ID passed as arg1 */
+ sub = kzt_subsystem_find(kcfg->cfg_arg1);
+ if (sub == NULL)
+ return -EINVAL;
+
+ /* Structure will be sized large enough for N test entries
+ * which is passed in by the caller. On exit the number of
+ * entries filled in with valid tests will be stored in
+ * cfg_rc1. If the caller does not provide enough entries
+ * for all tests we will truncate the list to avoid overrun.
+ */
+ size = sizeof(*tmp)+kcfg->cfg_data.kzt_tests.size*sizeof(kzt_user_t);
+ tmp = kmalloc(size, GFP_KERNEL);
+ if (tmp == NULL)
+ return -ENOMEM;
+
+ /* Local 'tmp' is used as the structure copied back to user space */
+ memset(tmp, 0, size);
+ memcpy(tmp, kcfg, sizeof(*kcfg));
+
+ spin_lock(&(sub->test_lock));
+ list_for_each_entry(test, &(sub->test_list), test_list) {
+ strncpy(tmp->cfg_data.kzt_tests.descs[i].name,
+ test->desc.name, KZT_NAME_SIZE);
+ strncpy(tmp->cfg_data.kzt_tests.descs[i].desc,
+ test->desc.desc, KZT_DESC_SIZE);
+ tmp->cfg_data.kzt_tests.descs[i].id = test->desc.id;
+
+ /* Truncate list if we are about to overrun alloc'ed memory */
+ if ((i++) == kcfg->cfg_data.kzt_tests.size)
+ break;
+ }
+ spin_unlock(&(sub->test_lock));
+ tmp->cfg_rc1 = i;
+
+ if (copy_to_user((struct kzt_cfg_t __user *)arg, tmp, size)) {
+ kfree(tmp);
+ return -EFAULT;
+ }
+
+ kfree(tmp);
+ return 0;
+}
+
+static int
+kzt_validate(struct file *file, kzt_subsystem_t *sub, int cmd, void *arg)
+{
+ kzt_test_t *test;
+
+ spin_lock(&(sub->test_lock));
+ list_for_each_entry(test, &(sub->test_list), test_list) {
+ if (test->desc.id == cmd) {
+ spin_unlock(&(sub->test_lock));
+ return test->test(file, arg);
+ }
+ }
+ spin_unlock(&(sub->test_lock));
+
+ return -EINVAL;
+}
+
+static int
+kzt_ioctl_cfg(struct file *file, unsigned long arg)
+{
+ kzt_cfg_t kcfg;
+ int rc = 0;
+
+ if (copy_from_user(&kcfg, (kzt_cfg_t *)arg, sizeof(kcfg)))
+ return -EFAULT;
+
+ if (kcfg.cfg_magic != KZT_CFG_MAGIC) {
+ kzt_print(file, "Bad config magic 0x%x != 0x%x\n",
+ kcfg.cfg_magic, KZT_CFG_MAGIC);
+ return -EINVAL;
+ }
+
+ switch (kcfg.cfg_cmd) {
+ case KZT_CFG_BUFFER_CLEAR:
+ /* cfg_arg1 - Unused
+ * cfg_rc1 - Unused
+ */
+ rc = kzt_buffer_clear(file, &kcfg, arg);
+ break;
+ case KZT_CFG_BUFFER_SIZE:
+ /* cfg_arg1 - 0 - query size; >0 resize
+ * cfg_rc1 - Set to current buffer size
+ */
+ rc = kzt_buffer_size(file, &kcfg, arg);
+ break;
+ case KZT_CFG_SUBSYSTEM_COUNT:
+ /* cfg_arg1 - Unused
+ * cfg_rc1 - Set to number of subsystems
+ */
+ rc = kzt_subsystem_count(&kcfg, arg);
+ break;
+ case KZT_CFG_SUBSYSTEM_LIST:
+ /* cfg_arg1 - Unused
+ * cfg_rc1 - Set to number of subsystems
+ * cfg_data.kzt_subsystems - Populated with subsystems
+ */
+ rc = kzt_subsystem_list(&kcfg, arg);
+ break;
+ case KZT_CFG_TEST_COUNT:
+ /* cfg_arg1 - Set to a target subsystem
+ * cfg_rc1 - Set to number of tests
+ */
+ rc = kzt_test_count(&kcfg, arg);
+ break;
+ case KZT_CFG_TEST_LIST:
+ /* cfg_arg1 - Set to a target subsystem
+ * cfg_rc1 - Set to number of tests
+ * cfg_data.kzt_subsystems - Populated with tests
+ */
+ rc = kzt_test_list(&kcfg, arg);
+ break;
+ default:
+ kzt_print(file, "Bad config command %d\n", kcfg.cfg_cmd);
+ rc = -EINVAL;
+ break;
+ }
+
+ return rc;
+}
+
+static int
+kzt_ioctl_cmd(struct file *file, unsigned long arg)
+{
+ kzt_subsystem_t *sub;
+ kzt_cmd_t kcmd;
+ int rc = -EINVAL;
+ void *data = NULL;
+
+ if (copy_from_user(&kcmd, (kzt_cfg_t *)arg, sizeof(kcmd)))
+ return -EFAULT;
+
+ if (kcmd.cmd_magic != KZT_CMD_MAGIC) {
+ kzt_print(file, "Bad command magic 0x%x != 0x%x\n",
+ kcmd.cmd_magic, KZT_CFG_MAGIC);
+ return -EINVAL;
+ }
+
+ /* Allocate memory for any opaque data the caller needed to pass on */
+ if (kcmd.cmd_data_size > 0) {
+ data = (void *)kmalloc(kcmd.cmd_data_size, GFP_KERNEL);
+ if (data == NULL)
+ return -ENOMEM;
+
+ if (copy_from_user(data, (void *)(arg + offsetof(kzt_cmd_t,
+ cmd_data_str)), kcmd.cmd_data_size)) {
+ kfree(data);
+ return -EFAULT;
+ }
+ }
+
+ sub = kzt_subsystem_find(kcmd.cmd_subsystem);
+ if (sub != NULL)
+ rc = kzt_validate(file, sub, kcmd.cmd_test, data);
+ else
+ rc = -EINVAL;
+
+ if (data != NULL)
+ kfree(data);
+
+ return rc;
+}
+
+static int
+kzt_ioctl(struct inode *inode, struct file *file,
+ unsigned int cmd, unsigned long arg)
+{
+ unsigned int minor = iminor(file->f_dentry->d_inode);
+ int rc = 0;
+
+ /* Ignore tty ioctls */
+ if ((cmd & 0xffffff00) == ((int)'T') << 8)
+ return -ENOTTY;
+
+ if (minor >= KZT_MINORS)
+ return -ENXIO;
+
+ switch (cmd) {
+ case KZT_CFG:
+ rc = kzt_ioctl_cfg(file, arg);
+ break;
+ case KZT_CMD:
+ rc = kzt_ioctl_cmd(file, arg);
+ break;
+ default:
+ kzt_print(file, "Bad ioctl command %d\n", cmd);
+ rc = -EINVAL;
+ break;
+ }
+
+ return rc;
+}
+
+/* I'm not sure why you would want to write in to this buffer from
+ * user space since its principle use is to pass test status info
+ * back to the user space, but I don't see any reason to prevent it.
+ */
+static ssize_t kzt_write(struct file *file, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ unsigned int minor = iminor(file->f_dentry->d_inode);
+ kzt_info_t *info = (kzt_info_t *)file->private_data;
+ int rc = 0;
+
+ if (minor >= KZT_MINORS)
+ return -ENXIO;
+
+ ASSERT(info);
+ ASSERT(info->info_buffer);
+
+ spin_lock(&info->info_lock);
+
+ /* Write beyond EOF */
+ if (*ppos >= info->info_size) {
+ rc = -EFBIG;
+ goto out;
+ }
+
+ /* Resize count if beyond EOF */
+ if (*ppos + count > info->info_size)
+ count = info->info_size - *ppos;
+
+ if (copy_from_user(info->info_buffer, buf, count)) {
+ rc = -EFAULT;
+ goto out;
+ }
+
+ *ppos += count;
+ rc = count;
+out:
+ spin_unlock(&info->info_lock);
+ return rc;
+}
+
+static ssize_t kzt_read(struct file *file, char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ unsigned int minor = iminor(file->f_dentry->d_inode);
+ kzt_info_t *info = (kzt_info_t *)file->private_data;
+ int rc = 0;
+
+ if (minor >= KZT_MINORS)
+ return -ENXIO;
+
+ ASSERT(info);
+ ASSERT(info->info_buffer);
+
+ spin_lock(&info->info_lock);
+
+ /* Read beyond EOF */
+ if (*ppos >= info->info_size)
+ goto out;
+
+ /* Resize count if beyond EOF */
+ if (*ppos + count > info->info_size)
+ count = info->info_size - *ppos;
+
+ if (copy_to_user(buf, info->info_buffer + *ppos, count)) {
+ rc = -EFAULT;
+ goto out;
+ }
+
+ *ppos += count;
+ rc = count;
+out:
+ spin_unlock(&info->info_lock);
+ return rc;
+}
+
+static loff_t kzt_seek(struct file *file, loff_t offset, int origin)
+{
+ unsigned int minor = iminor(file->f_dentry->d_inode);
+ kzt_info_t *info = (kzt_info_t *)file->private_data;
+ int rc = -EINVAL;
+
+ if (minor >= KZT_MINORS)
+ return -ENXIO;
+
+ ASSERT(info);
+ ASSERT(info->info_buffer);
+
+ spin_lock(&info->info_lock);
+
+ switch (origin) {
+ case 0: /* SEEK_SET - No-op just do it */
+ break;
+ case 1: /* SEEK_CUR - Seek from current */
+ offset = file->f_pos + offset;
+ break;
+ case 2: /* SEEK_END - Seek from end */
+ offset = info->info_size + offset;
+ break;
+ }
+
+ if (offset >= 0) {
+ file->f_pos = offset;
+ file->f_version = 0;
+ rc = offset;
+ }
+
+ spin_unlock(&info->info_lock);
+
+ return rc;
+}
+
+static struct file_operations kzt_fops = {
+ .owner = THIS_MODULE,
+ .open = kzt_open,
+ .release = kzt_release,
+ .ioctl = kzt_ioctl,
+ .read = kzt_read,
+ .write = kzt_write,
+ .llseek = kzt_seek,
+};
+
+static struct cdev kzt_cdev = {
+ .owner = THIS_MODULE,
+ .kobj = { .name = "kztctl", },
+};
+
+static int __init
+kzt_init(void)
+{
+ dev_t dev;
+ int rc;
+
+ spin_lock_init(&kzt_module_lock);
+ INIT_LIST_HEAD(&kzt_module_list);
+
+ KZT_SUBSYSTEM_INIT(kmem);
+ KZT_SUBSYSTEM_INIT(taskq);
+ KZT_SUBSYSTEM_INIT(krng);
+ KZT_SUBSYSTEM_INIT(mutex);
+ KZT_SUBSYSTEM_INIT(condvar);
+ KZT_SUBSYSTEM_INIT(thread);
+ KZT_SUBSYSTEM_INIT(rwlock);
+ KZT_SUBSYSTEM_INIT(time);
+
+ dev = MKDEV(KZT_MAJOR, 0);
+ if ((rc = register_chrdev_region(dev, KZT_MINORS, "kztctl")))
+ goto error;
+
+ /* Support for registering a character driver */
+ cdev_init(&kzt_cdev, &kzt_fops);
+ if ((rc = cdev_add(&kzt_cdev, dev, KZT_MINORS))) {
+ printk(KERN_ERR "kzt: Error adding cdev, %d\n", rc);
+ kobject_put(&kzt_cdev.kobj);
+ unregister_chrdev_region(dev, KZT_MINORS);
+ goto error;
+ }
+
+ /* Support for udev make driver info available in sysfs */
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,18)
+ kzt_class = class_simple_create(THIS_MODULE, "kzt");
+#else
+ kzt_class = class_create(THIS_MODULE, "kzt");
+#endif
+ if (IS_ERR(kzt_class)) {
+ rc = PTR_ERR(kzt_class);
+ printk(KERN_ERR "kzt: Error creating kzt class, %d\n", rc);
+ cdev_del(&kzt_cdev);
+ unregister_chrdev_region(dev, KZT_MINORS);
+ goto error;
+ }
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,18)
+ class_simple_device_add(kzt_class, MKDEV(KZT_MAJOR, 0),
+ NULL, "kztctl");
+#else
+ class_device_create(kzt_class, NULL, MKDEV(KZT_MAJOR, 0),
+ NULL, "kztctl");
+#endif
+
+ printk(KERN_INFO "kzt: Kernel ZFS Tests %s Loaded\n", KZT_VERSION);
+ return 0;
+error:
+ printk(KERN_ERR "kzt: Error registering kzt device, %d\n", rc);
+ return rc;
+}
+
+static void
+kzt_fini(void)
+{
+ dev_t dev = MKDEV(KZT_MAJOR, 0);
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,18)
+ class_simple_device_remove(dev);
+ class_simple_destroy(kzt_class);
+ devfs_remove("kzt/kztctl");
+ devfs_remove("kzt");
+#else
+ class_device_destroy(kzt_class, dev);
+ class_destroy(kzt_class);
+#endif
+ cdev_del(&kzt_cdev);
+ unregister_chrdev_region(dev, KZT_MINORS);
+
+ KZT_SUBSYSTEM_FINI(time);
+ KZT_SUBSYSTEM_FINI(rwlock);
+ KZT_SUBSYSTEM_FINI(thread);
+ KZT_SUBSYSTEM_FINI(condvar);
+ KZT_SUBSYSTEM_FINI(mutex);
+ KZT_SUBSYSTEM_FINI(krng);
+ KZT_SUBSYSTEM_FINI(taskq);
+ KZT_SUBSYSTEM_FINI(kmem);
+
+ ASSERT(list_empty(&kzt_module_list));
+ printk(KERN_INFO "kzt: Kernel ZFS Tests %s Unloaded\n", KZT_VERSION);
+}
+
+module_init(kzt_init);
+module_exit(kzt_fini);
+
+MODULE_AUTHOR("Lawrence Livermore National Labs");
+MODULE_DESCRIPTION("Kernel ZFS Test");
+MODULE_LICENSE("GPL");
+
--- /dev/null
+#include <splat-ctl.h>
+
+#define KZT_SUBSYSTEM_KMEM 0x0100
+#define KZT_KMEM_NAME "kmem"
+#define KZT_KMEM_DESC "Kernel Malloc/Slab Tests"
+
+#define KZT_KMEM_TEST1_ID 0x0101
+#define KZT_KMEM_TEST1_NAME "kmem_alloc"
+#define KZT_KMEM_TEST1_DESC "Memory allocation test (kmem_alloc)"
+
+#define KZT_KMEM_TEST2_ID 0x0102
+#define KZT_KMEM_TEST2_NAME "kmem_zalloc"
+#define KZT_KMEM_TEST2_DESC "Memory allocation test (kmem_zalloc)"
+
+#define KZT_KMEM_TEST3_ID 0x0103
+#define KZT_KMEM_TEST3_NAME "slab_alloc"
+#define KZT_KMEM_TEST3_DESC "Slab constructor/destructor test"
+
+#define KZT_KMEM_TEST4_ID 0x0104
+#define KZT_KMEM_TEST4_NAME "slab_reap"
+#define KZT_KMEM_TEST4_DESC "Slab reaping test"
+
+#define KZT_KMEM_ALLOC_COUNT 10
+/* XXX - This test may fail under tight memory conditions */
+static int
+kzt_kmem_test1(struct file *file, void *arg)
+{
+ void *ptr[KZT_KMEM_ALLOC_COUNT];
+ int size = PAGE_SIZE;
+ int i, count, rc = 0;
+
+ while ((!rc) && (size < (PAGE_SIZE * 16))) {
+ count = 0;
+
+ for (i = 0; i < KZT_KMEM_ALLOC_COUNT; i++) {
+ ptr[i] = kmem_alloc(size, KM_SLEEP);
+ if (ptr[i])
+ count++;
+ }
+
+ for (i = 0; i < KZT_KMEM_ALLOC_COUNT; i++)
+ if (ptr[i])
+ kmem_free(ptr[i], size);
+
+ kzt_vprint(file, KZT_KMEM_TEST1_NAME,
+ "%d byte allocations, %d/%d successful\n",
+ size, count, KZT_KMEM_ALLOC_COUNT);
+ if (count != KZT_KMEM_ALLOC_COUNT)
+ rc = -ENOMEM;
+
+ size *= 2;
+ }
+
+ return rc;
+}
+
+static int
+kzt_kmem_test2(struct file *file, void *arg)
+{
+ void *ptr[KZT_KMEM_ALLOC_COUNT];
+ int size = PAGE_SIZE;
+ int i, j, count, rc = 0;
+
+ while ((!rc) && (size < (PAGE_SIZE * 16))) {
+ count = 0;
+
+ for (i = 0; i < KZT_KMEM_ALLOC_COUNT; i++) {
+ ptr[i] = kmem_zalloc(size, KM_SLEEP);
+ if (ptr[i])
+ count++;
+ }
+
+ /* Ensure buffer has been zero filled */
+ for (i = 0; i < KZT_KMEM_ALLOC_COUNT; i++) {
+ for (j = 0; j < size; j++) {
+ if (((char *)ptr[i])[j] != '\0') {
+ kzt_vprint(file, KZT_KMEM_TEST2_NAME,
+ "%d-byte allocation was "
+ "not zeroed\n", size);
+ rc = -EFAULT;
+ }
+ }
+ }
+
+ for (i = 0; i < KZT_KMEM_ALLOC_COUNT; i++)
+ if (ptr[i])
+ kmem_free(ptr[i], size);
+
+ kzt_vprint(file, KZT_KMEM_TEST2_NAME,
+ "%d byte allocations, %d/%d successful\n",
+ size, count, KZT_KMEM_ALLOC_COUNT);
+ if (count != KZT_KMEM_ALLOC_COUNT)
+ rc = -ENOMEM;
+
+ size *= 2;
+ }
+
+ return rc;
+}
+
+#define KZT_KMEM_TEST_MAGIC 0x004488CCUL
+#define KZT_KMEM_CACHE_NAME "kmem_test"
+#define KZT_KMEM_CACHE_SIZE 256
+#define KZT_KMEM_OBJ_COUNT 128
+#define KZT_KMEM_OBJ_RECLAIM 64
+
+typedef struct kmem_cache_data {
+ char kcd_buf[KZT_KMEM_CACHE_SIZE];
+ unsigned long kcd_magic;
+ int kcd_flag;
+} kmem_cache_data_t;
+
+typedef struct kmem_cache_priv {
+ unsigned long kcp_magic;
+ struct file *kcp_file;
+ kmem_cache_t *kcp_cache;
+ kmem_cache_data_t *kcp_kcd[KZT_KMEM_OBJ_COUNT];
+ int kcp_count;
+ int kcp_rc;
+} kmem_cache_priv_t;
+
+static int
+kzt_kmem_test34_constructor(void *ptr, void *priv, int flags)
+{
+ kmem_cache_data_t *kcd = (kmem_cache_data_t *)ptr;
+ kmem_cache_priv_t *kcp = (kmem_cache_priv_t *)priv;
+
+ if (kcd) {
+ memset(kcd->kcd_buf, 0xaa, KZT_KMEM_CACHE_SIZE);
+ kcd->kcd_flag = 1;
+
+ if (kcp) {
+ kcd->kcd_magic = kcp->kcp_magic;
+ kcp->kcp_count++;
+ }
+ }
+
+ return 0;
+}
+
+static void
+kzt_kmem_test34_destructor(void *ptr, void *priv)
+{
+ kmem_cache_data_t *kcd = (kmem_cache_data_t *)ptr;
+ kmem_cache_priv_t *kcp = (kmem_cache_priv_t *)priv;
+
+ if (kcd) {
+ memset(kcd->kcd_buf, 0xbb, KZT_KMEM_CACHE_SIZE);
+ kcd->kcd_flag = 0;
+
+ if (kcp)
+ kcp->kcp_count--;
+ }
+
+ return;
+}
+
+static int
+kzt_kmem_test3(struct file *file, void *arg)
+{
+ kmem_cache_t *cache = NULL;
+ kmem_cache_data_t *kcd = NULL;
+ kmem_cache_priv_t kcp;
+ int rc = 0, max;
+
+ kcp.kcp_magic = KZT_KMEM_TEST_MAGIC;
+ kcp.kcp_file = file;
+ kcp.kcp_count = 0;
+ kcp.kcp_rc = 0;
+
+ cache = kmem_cache_create(KZT_KMEM_CACHE_NAME, sizeof(*kcd), 0,
+ kzt_kmem_test34_constructor,
+ kzt_kmem_test34_destructor,
+ NULL, &kcp, NULL, 0);
+ if (!cache) {
+ kzt_vprint(file, KZT_KMEM_TEST3_NAME,
+ "Unable to create '%s'\n", KZT_KMEM_CACHE_NAME);
+ return -ENOMEM;
+ }
+
+ kcd = kmem_cache_alloc(cache, 0);
+ if (!kcd) {
+ kzt_vprint(file, KZT_KMEM_TEST3_NAME,
+ "Unable to allocate from '%s'\n",
+ KZT_KMEM_CACHE_NAME);
+ rc = -EINVAL;
+ goto out_free;
+ }
+
+ if (!kcd->kcd_flag) {
+ kzt_vprint(file, KZT_KMEM_TEST3_NAME,
+ "Failed to run contructor for '%s'\n",
+ KZT_KMEM_CACHE_NAME);
+ rc = -EINVAL;
+ goto out_free;
+ }
+
+ if (kcd->kcd_magic != kcp.kcp_magic) {
+ kzt_vprint(file, KZT_KMEM_TEST3_NAME,
+ "Failed to pass private data to constructor "
+ "for '%s'\n", KZT_KMEM_CACHE_NAME);
+ rc = -EINVAL;
+ goto out_free;
+ }
+
+ max = kcp.kcp_count;
+
+ /* Destructor's run lazily so it hard to check correctness here.
+ * We assume if it doesn't crash the free worked properly */
+ kmem_cache_free(cache, kcd);
+
+ /* Destroy the entire cache which will force destructors to
+ * run and we can verify one was called for every object */
+ kmem_cache_destroy(cache);
+ if (kcp.kcp_count) {
+ kzt_vprint(file, KZT_KMEM_TEST3_NAME,
+ "Failed to run destructor on all slab objects "
+ "for '%s'\n", KZT_KMEM_CACHE_NAME);
+ rc = -EINVAL;
+ }
+
+ kzt_vprint(file, KZT_KMEM_TEST3_NAME,
+ "%d allocated/destroyed objects for '%s'\n",
+ max, KZT_KMEM_CACHE_NAME);
+
+ return rc;
+
+out_free:
+ if (kcd)
+ kmem_cache_free(cache, kcd);
+
+ kmem_cache_destroy(cache);
+ return rc;
+}
+
+static void
+kzt_kmem_test4_reclaim(void *priv)
+{
+ kmem_cache_priv_t *kcp = (kmem_cache_priv_t *)priv;
+ int i;
+
+ kzt_vprint(kcp->kcp_file, KZT_KMEM_TEST4_NAME,
+ "Reaping %d objects from '%s'\n",
+ KZT_KMEM_OBJ_RECLAIM, KZT_KMEM_CACHE_NAME);
+ for (i = 0; i < KZT_KMEM_OBJ_RECLAIM; i++) {
+ if (kcp->kcp_kcd[i]) {
+ kmem_cache_free(kcp->kcp_cache, kcp->kcp_kcd[i]);
+ kcp->kcp_kcd[i] = NULL;
+ }
+ }
+
+ return;
+}
+
+static int
+kzt_kmem_test4(struct file *file, void *arg)
+{
+ kmem_cache_t *cache;
+ kmem_cache_priv_t kcp;
+ int i, rc = 0, max, reclaim_percent, target_percent;
+
+ kcp.kcp_magic = KZT_KMEM_TEST_MAGIC;
+ kcp.kcp_file = file;
+ kcp.kcp_count = 0;
+ kcp.kcp_rc = 0;
+
+ cache = kmem_cache_create(KZT_KMEM_CACHE_NAME,
+ sizeof(kmem_cache_data_t), 0,
+ kzt_kmem_test34_constructor,
+ kzt_kmem_test34_destructor,
+ kzt_kmem_test4_reclaim, &kcp, NULL, 0);
+ if (!cache) {
+ kzt_vprint(file, KZT_KMEM_TEST4_NAME,
+ "Unable to create '%s'\n", KZT_KMEM_CACHE_NAME);
+ return -ENOMEM;
+ }
+
+ kcp.kcp_cache = cache;
+
+ for (i = 0; i < KZT_KMEM_OBJ_COUNT; i++) {
+ /* All allocations need not succeed */
+ kcp.kcp_kcd[i] = kmem_cache_alloc(cache, 0);
+ if (!kcp.kcp_kcd[i]) {
+ kzt_vprint(file, KZT_KMEM_TEST4_NAME,
+ "Unable to allocate from '%s'\n",
+ KZT_KMEM_CACHE_NAME);
+ }
+ }
+
+ max = kcp.kcp_count;
+
+ /* Force shrinker to run */
+ kmem_reap();
+
+ /* Reclaim reclaimed objects, this ensure the destructors are run */
+ kmem_cache_reap_now(cache);
+
+ reclaim_percent = ((kcp.kcp_count * 100) / max);
+ target_percent = (((KZT_KMEM_OBJ_COUNT - KZT_KMEM_OBJ_RECLAIM) * 100) /
+ KZT_KMEM_OBJ_COUNT);
+ kzt_vprint(file, KZT_KMEM_TEST4_NAME,
+ "%d%% (%d/%d) of previous size, target of "
+ "%d%%-%d%% for '%s'\n", reclaim_percent, kcp.kcp_count,
+ max, target_percent - 10, target_percent + 10,
+ KZT_KMEM_CACHE_NAME);
+ if ((reclaim_percent < target_percent - 10) ||
+ (reclaim_percent > target_percent + 10))
+ rc = -EINVAL;
+
+ /* Cleanup our mess */
+ for (i = 0; i < KZT_KMEM_OBJ_COUNT; i++)
+ if (kcp.kcp_kcd[i])
+ kmem_cache_free(cache, kcp.kcp_kcd[i]);
+
+ kmem_cache_destroy(cache);
+
+ return rc;
+}
+
+kzt_subsystem_t *
+kzt_kmem_init(void)
+{
+ kzt_subsystem_t *sub;
+
+ sub = kmalloc(sizeof(*sub), GFP_KERNEL);
+ if (sub == NULL)
+ return NULL;
+
+ memset(sub, 0, sizeof(*sub));
+ strncpy(sub->desc.name, KZT_KMEM_NAME, KZT_NAME_SIZE);
+ strncpy(sub->desc.desc, KZT_KMEM_DESC, KZT_DESC_SIZE);
+ INIT_LIST_HEAD(&sub->subsystem_list);
+ INIT_LIST_HEAD(&sub->test_list);
+ spin_lock_init(&sub->test_lock);
+ sub->desc.id = KZT_SUBSYSTEM_KMEM;
+
+ KZT_TEST_INIT(sub, KZT_KMEM_TEST1_NAME, KZT_KMEM_TEST1_DESC,
+ KZT_KMEM_TEST1_ID, kzt_kmem_test1);
+ KZT_TEST_INIT(sub, KZT_KMEM_TEST2_NAME, KZT_KMEM_TEST2_DESC,
+ KZT_KMEM_TEST2_ID, kzt_kmem_test2);
+ KZT_TEST_INIT(sub, KZT_KMEM_TEST3_NAME, KZT_KMEM_TEST3_DESC,
+ KZT_KMEM_TEST3_ID, kzt_kmem_test3);
+ KZT_TEST_INIT(sub, KZT_KMEM_TEST4_NAME, KZT_KMEM_TEST4_DESC,
+ KZT_KMEM_TEST4_ID, kzt_kmem_test4);
+
+ return sub;
+}
+
+void
+kzt_kmem_fini(kzt_subsystem_t *sub)
+{
+ ASSERT(sub);
+ KZT_TEST_FINI(sub, KZT_KMEM_TEST4_ID);
+ KZT_TEST_FINI(sub, KZT_KMEM_TEST3_ID);
+ KZT_TEST_FINI(sub, KZT_KMEM_TEST2_ID);
+ KZT_TEST_FINI(sub, KZT_KMEM_TEST1_ID);
+
+ kfree(sub);
+}
+
+int
+kzt_kmem_id(void) {
+ return KZT_SUBSYSTEM_KMEM;
+}
--- /dev/null
+#include <splat-ctl.h>
+
+#define KZT_SUBSYSTEM_MUTEX 0x0400
+#define KZT_MUTEX_NAME "mutex"
+#define KZT_MUTEX_DESC "Kernel Mutex Tests"
+
+#define KZT_MUTEX_TEST1_ID 0x0401
+#define KZT_MUTEX_TEST1_NAME "tryenter"
+#define KZT_MUTEX_TEST1_DESC "Validate mutex_tryenter() correctness"
+
+#define KZT_MUTEX_TEST2_ID 0x0402
+#define KZT_MUTEX_TEST2_NAME "race"
+#define KZT_MUTEX_TEST2_DESC "Many threads entering/exiting the mutex"
+
+#define KZT_MUTEX_TEST3_ID 0x0403
+#define KZT_MUTEX_TEST3_NAME "owned"
+#define KZT_MUTEX_TEST3_DESC "Validate mutex_owned() correctness"
+
+#define KZT_MUTEX_TEST4_ID 0x0404
+#define KZT_MUTEX_TEST4_NAME "owner"
+#define KZT_MUTEX_TEST4_DESC "Validate mutex_owner() correctness"
+
+#define KZT_MUTEX_TEST_MAGIC 0x115599DDUL
+#define KZT_MUTEX_TEST_NAME "mutex_test"
+#define KZT_MUTEX_TEST_WORKQ "mutex_wq"
+#define KZT_MUTEX_TEST_COUNT 128
+
+typedef struct mutex_priv {
+ unsigned long mp_magic;
+ struct file *mp_file;
+ struct work_struct mp_work[KZT_MUTEX_TEST_COUNT];
+ kmutex_t mp_mtx;
+ int mp_rc;
+} mutex_priv_t;
+
+
+static void
+kzt_mutex_test1_work(void *priv)
+{
+ mutex_priv_t *mp = (mutex_priv_t *)priv;
+
+ ASSERT(mp->mp_magic == KZT_MUTEX_TEST_MAGIC);
+ mp->mp_rc = 0;
+
+ if (!mutex_tryenter(&mp->mp_mtx))
+ mp->mp_rc = -EBUSY;
+}
+
+static int
+kzt_mutex_test1(struct file *file, void *arg)
+{
+ struct workqueue_struct *wq;
+ struct work_struct work;
+ mutex_priv_t *mp;
+ int rc = 0;
+
+ mp = (mutex_priv_t *)kmalloc(sizeof(*mp), GFP_KERNEL);
+ if (mp == NULL)
+ return -ENOMEM;
+
+ wq = create_singlethread_workqueue(KZT_MUTEX_TEST_WORKQ);
+ if (wq == NULL) {
+ rc = -ENOMEM;
+ goto out2;
+ }
+
+ mutex_init(&(mp->mp_mtx), KZT_MUTEX_TEST_NAME, MUTEX_DEFAULT, NULL);
+ mutex_enter(&(mp->mp_mtx));
+
+ mp->mp_magic = KZT_MUTEX_TEST_MAGIC;
+ mp->mp_file = file;
+ INIT_WORK(&work, kzt_mutex_test1_work, mp);
+
+ /* Schedule a work item which will try and aquire the mutex via
+ * mutex_tryenter() while its held. This should fail and the work
+ * item will indicte this status in the passed private data. */
+ if (!queue_work(wq, &work)) {
+ mutex_exit(&(mp->mp_mtx));
+ rc = -EINVAL;
+ goto out;
+ }
+
+ flush_workqueue(wq);
+ mutex_exit(&(mp->mp_mtx));
+
+ /* Work item successfully aquired mutex, very bad! */
+ if (mp->mp_rc != -EBUSY) {
+ rc = -EINVAL;
+ goto out;
+ }
+
+ kzt_vprint(file, KZT_MUTEX_TEST1_NAME, "%s",
+ "mutex_trylock() correctly failed when mutex held\n");
+
+ /* Schedule a work item which will try and aquire the mutex via
+ * mutex_tryenter() while it is not held. This should work and
+ * the item will indicte this status in the passed private data. */
+ if (!queue_work(wq, &work)) {
+ rc = -EINVAL;
+ goto out;
+ }
+
+ flush_workqueue(wq);
+
+ /* Work item failed to aquire mutex, very bad! */
+ if (mp->mp_rc != 0) {
+ rc = -EINVAL;
+ goto out;
+ }
+
+ kzt_vprint(file, KZT_MUTEX_TEST1_NAME, "%s",
+ "mutex_trylock() correctly succeeded when mutex unheld\n");
+out:
+ mutex_destroy(&(mp->mp_mtx));
+ destroy_workqueue(wq);
+out2:
+ kfree(mp);
+
+ return rc;
+}
+
+static void
+kzt_mutex_test2_work(void *priv)
+{
+ mutex_priv_t *mp = (mutex_priv_t *)priv;
+ int rc;
+
+ ASSERT(mp->mp_magic == KZT_MUTEX_TEST_MAGIC);
+
+ /* Read the value before sleeping and write it after we wake up to
+ * maximize the chance of a race if mutexs are not working properly */
+ mutex_enter(&mp->mp_mtx);
+ rc = mp->mp_rc;
+ set_current_state(TASK_INTERRUPTIBLE);
+ schedule_timeout(HZ / 100); /* 1/100 of a second */
+ mp->mp_rc = rc + 1;
+ mutex_exit(&mp->mp_mtx);
+}
+
+static int
+kzt_mutex_test2(struct file *file, void *arg)
+{
+ struct workqueue_struct *wq;
+ mutex_priv_t *mp;
+ int i, rc = 0;
+
+ mp = (mutex_priv_t *)kmalloc(sizeof(*mp), GFP_KERNEL);
+ if (mp == NULL)
+ return -ENOMEM;
+
+ /* Create a thread per CPU items on queue will race */
+ wq = create_workqueue(KZT_MUTEX_TEST_WORKQ);
+ if (wq == NULL) {
+ rc = -ENOMEM;
+ goto out;
+ }
+
+ mutex_init(&(mp->mp_mtx), KZT_MUTEX_TEST_NAME, MUTEX_DEFAULT, NULL);
+
+ mp->mp_magic = KZT_MUTEX_TEST_MAGIC;
+ mp->mp_file = file;
+ mp->mp_rc = 0;
+
+ /* Schedule N work items to the work queue each of which enters the
+ * mutex, sleeps briefly, then exits the mutex. On a multiprocessor
+ * box these work items will be handled by all available CPUs. The
+ * mutex is instrumented such that if any two processors are in the
+ * critical region at the same time the system will panic. If the
+ * mutex is implemented right this will never happy, that's a pass. */
+ for (i = 0; i < KZT_MUTEX_TEST_COUNT; i++) {
+ INIT_WORK(&(mp->mp_work[i]), kzt_mutex_test2_work, mp);
+
+ if (!queue_work(wq, &(mp->mp_work[i]))) {
+ kzt_vprint(file, KZT_MUTEX_TEST2_NAME,
+ "Failed to queue work id %d\n", i);
+ rc = -EINVAL;
+ }
+ }
+
+ flush_workqueue(wq);
+
+ if (mp->mp_rc == KZT_MUTEX_TEST_COUNT) {
+ kzt_vprint(file, KZT_MUTEX_TEST2_NAME, "%d racing threads "
+ "correctly entered/exited the mutex %d times\n",
+ num_online_cpus(), mp->mp_rc);
+ } else {
+ kzt_vprint(file, KZT_MUTEX_TEST2_NAME, "%d racing threads "
+ "only processed %d/%d mutex work items\n",
+ num_online_cpus(), mp->mp_rc, KZT_MUTEX_TEST_COUNT);
+ rc = -EINVAL;
+ }
+
+ mutex_destroy(&(mp->mp_mtx));
+ destroy_workqueue(wq);
+out:
+ kfree(mp);
+
+ return rc;
+}
+
+static int
+kzt_mutex_test3(struct file *file, void *arg)
+{
+ kmutex_t mtx;
+ int rc = 0;
+
+ mutex_init(&mtx, KZT_MUTEX_TEST_NAME, MUTEX_DEFAULT, NULL);
+
+ mutex_enter(&mtx);
+
+ /* Mutex should be owned by current */
+ if (!mutex_owned(&mtx)) {
+ kzt_vprint(file, KZT_MUTEX_TEST3_NAME, "Mutex should "
+ "be owned by pid %d but is owned by pid %d\n",
+ current->pid, mtx.km_owner ? mtx.km_owner->pid : -1);
+ rc = -EINVAL;
+ goto out;
+ }
+
+ mutex_exit(&mtx);
+
+ /* Mutex should not be owned by any task */
+ if (mutex_owned(&mtx)) {
+ kzt_vprint(file, KZT_MUTEX_TEST3_NAME, "Mutex should "
+ "not be owned but is owned by pid %d\n",
+ mtx.km_owner ? mtx.km_owner->pid : -1);
+ rc = -EINVAL;
+ goto out;
+ }
+
+ kzt_vprint(file, KZT_MUTEX_TEST3_NAME, "%s",
+ "Correct mutex_owned() behavior\n");
+out:
+ mutex_destroy(&mtx);
+
+ return rc;
+}
+
+static int
+kzt_mutex_test4(struct file *file, void *arg)
+{
+ kmutex_t mtx;
+ kthread_t *owner;
+ int rc = 0;
+
+ mutex_init(&mtx, KZT_MUTEX_TEST_NAME, MUTEX_DEFAULT, NULL);
+
+ mutex_enter(&mtx);
+
+ /* Mutex should be owned by current */
+ owner = mutex_owner(&mtx);
+ if (current != owner) {
+ kzt_vprint(file, KZT_MUTEX_TEST3_NAME, "Mutex should "
+ "be owned by pid %d but is owned by pid %d\n",
+ current->pid, owner ? owner->pid : -1);
+ rc = -EINVAL;
+ goto out;
+ }
+
+ mutex_exit(&mtx);
+
+ /* Mutex should not be owned by any task */
+ owner = mutex_owner(&mtx);
+ if (owner) {
+ kzt_vprint(file, KZT_MUTEX_TEST3_NAME, "Mutex should not "
+ "be owned but is owned by pid %d\n", owner->pid);
+ rc = -EINVAL;
+ goto out;
+ }
+
+ kzt_vprint(file, KZT_MUTEX_TEST3_NAME, "%s",
+ "Correct mutex_owner() behavior\n");
+out:
+ mutex_destroy(&mtx);
+
+ return rc;
+}
+
+kzt_subsystem_t *
+kzt_mutex_init(void)
+{
+ kzt_subsystem_t *sub;
+
+ sub = kmalloc(sizeof(*sub), GFP_KERNEL);
+ if (sub == NULL)
+ return NULL;
+
+ memset(sub, 0, sizeof(*sub));
+ strncpy(sub->desc.name, KZT_MUTEX_NAME, KZT_NAME_SIZE);
+ strncpy(sub->desc.desc, KZT_MUTEX_DESC, KZT_DESC_SIZE);
+ INIT_LIST_HEAD(&sub->subsystem_list);
+ INIT_LIST_HEAD(&sub->test_list);
+ spin_lock_init(&sub->test_lock);
+ sub->desc.id = KZT_SUBSYSTEM_MUTEX;
+
+ KZT_TEST_INIT(sub, KZT_MUTEX_TEST1_NAME, KZT_MUTEX_TEST1_DESC,
+ KZT_MUTEX_TEST1_ID, kzt_mutex_test1);
+ KZT_TEST_INIT(sub, KZT_MUTEX_TEST2_NAME, KZT_MUTEX_TEST2_DESC,
+ KZT_MUTEX_TEST2_ID, kzt_mutex_test2);
+ KZT_TEST_INIT(sub, KZT_MUTEX_TEST3_NAME, KZT_MUTEX_TEST3_DESC,
+ KZT_MUTEX_TEST3_ID, kzt_mutex_test3);
+ KZT_TEST_INIT(sub, KZT_MUTEX_TEST4_NAME, KZT_MUTEX_TEST4_DESC,
+ KZT_MUTEX_TEST4_ID, kzt_mutex_test4);
+
+ return sub;
+}
+
+void
+kzt_mutex_fini(kzt_subsystem_t *sub)
+{
+ ASSERT(sub);
+ KZT_TEST_FINI(sub, KZT_MUTEX_TEST4_ID);
+ KZT_TEST_FINI(sub, KZT_MUTEX_TEST3_ID);
+ KZT_TEST_FINI(sub, KZT_MUTEX_TEST2_ID);
+ KZT_TEST_FINI(sub, KZT_MUTEX_TEST1_ID);
+
+ kfree(sub);
+}
+
+int
+kzt_mutex_id(void) {
+ return KZT_SUBSYSTEM_MUTEX;
+}
--- /dev/null
+#include <splat-ctl.h>
+
+#define KZT_SUBSYSTEM_KRNG 0x0300
+#define KZT_KRNG_NAME "krng"
+#define KZT_KRNG_DESC "Kernel Random Number Generator Tests"
+
+#define KZT_KRNG_TEST1_ID 0x0301
+#define KZT_KRNG_TEST1_NAME "freq"
+#define KZT_KRNG_TEST1_DESC "Frequency Test"
+
+#define KRNG_NUM_BITS 1048576
+#define KRNG_NUM_BYTES (KRNG_NUM_BITS >> 3)
+#define KRNG_NUM_BITS_DIV2 (KRNG_NUM_BITS >> 1)
+#define KRNG_ERROR_RANGE 2097
+
+/* Random Number Generator Tests
+ There can be meny more tests on quality of the
+ random number generator. For now we are only
+ testing the frequency of particular bits.
+ We could also test consecutive sequences,
+ randomness within a particular block, etc.
+ but is probably not necessary for our purposes */
+
+static int
+kzt_krng_test1(struct file *file, void *arg)
+{
+ uint8_t *buf;
+ int i, j, diff, num = 0, rc = 0;
+
+ buf = kmalloc(sizeof(*buf) * KRNG_NUM_BYTES, GFP_KERNEL);
+ if (buf == NULL) {
+ rc = -ENOMEM;
+ goto out;
+ }
+
+ memset(buf, 0, sizeof(*buf) * KRNG_NUM_BYTES);
+
+ /* Always succeeds */
+ random_get_pseudo_bytes(buf, sizeof(uint8_t) * KRNG_NUM_BYTES);
+
+ for (i = 0; i < KRNG_NUM_BYTES; i++) {
+ uint8_t tmp = buf[i];
+ for (j = 0; j < 8; j++) {
+ uint8_t tmp2 = ((tmp >> j) & 0x01);
+ if (tmp2 == 1) {
+ num++;
+ }
+ }
+ }
+
+ kfree(buf);
+
+ diff = KRNG_NUM_BITS_DIV2 - num;
+ if (diff < 0)
+ diff *= -1;
+
+ kzt_print(file, "Test 1 Number of ones: %d\n", num);
+ kzt_print(file, "Test 1 Difference from expected: %d Allowed: %d\n",
+ diff, KRNG_ERROR_RANGE);
+
+ if (diff > KRNG_ERROR_RANGE)
+ rc = -ERANGE;
+out:
+ return rc;
+}
+
+kzt_subsystem_t *
+kzt_krng_init(void)
+{
+ kzt_subsystem_t *sub;
+
+ sub = kmalloc(sizeof(*sub), GFP_KERNEL);
+ if (sub == NULL)
+ return NULL;
+
+ memset(sub, 0, sizeof(*sub));
+ strncpy(sub->desc.name, KZT_KRNG_NAME, KZT_NAME_SIZE);
+ strncpy(sub->desc.desc, KZT_KRNG_DESC, KZT_DESC_SIZE);
+ INIT_LIST_HEAD(&sub->subsystem_list);
+ INIT_LIST_HEAD(&sub->test_list);
+ spin_lock_init(&sub->test_lock);
+ sub->desc.id = KZT_SUBSYSTEM_KRNG;
+
+ KZT_TEST_INIT(sub, KZT_KRNG_TEST1_NAME, KZT_KRNG_TEST1_DESC,
+ KZT_KRNG_TEST1_ID, kzt_krng_test1);
+
+ return sub;
+}
+
+void
+kzt_krng_fini(kzt_subsystem_t *sub)
+{
+ ASSERT(sub);
+
+ KZT_TEST_FINI(sub, KZT_KRNG_TEST1_ID);
+
+ kfree(sub);
+}
+
+int
+kzt_krng_id(void) {
+ return KZT_SUBSYSTEM_KRNG;
+}
--- /dev/null
+#include <splat-ctl.h>
+
+#define KZT_SUBSYSTEM_RWLOCK 0x0700
+#define KZT_RWLOCK_NAME "rwlock"
+#define KZT_RWLOCK_DESC "Kernel RW Lock Tests"
+
+#define KZT_RWLOCK_TEST1_ID 0x0701
+#define KZT_RWLOCK_TEST1_NAME "rwtest1"
+#define KZT_RWLOCK_TEST1_DESC "Multiple Readers One Writer"
+
+#define KZT_RWLOCK_TEST2_ID 0x0702
+#define KZT_RWLOCK_TEST2_NAME "rwtest2"
+#define KZT_RWLOCK_TEST2_DESC "Multiple Writers"
+
+#define KZT_RWLOCK_TEST3_ID 0x0703
+#define KZT_RWLOCK_TEST3_NAME "rwtest3"
+#define KZT_RWLOCK_TEST3_DESC "Owner Verification"
+
+#define KZT_RWLOCK_TEST4_ID 0x0704
+#define KZT_RWLOCK_TEST4_NAME "rwtest4"
+#define KZT_RWLOCK_TEST4_DESC "Trylock Test"
+
+#define KZT_RWLOCK_TEST5_ID 0x0705
+#define KZT_RWLOCK_TEST5_NAME "rwtest5"
+#define KZT_RWLOCK_TEST5_DESC "Write Downgrade Test"
+
+#define KZT_RWLOCK_TEST6_ID 0x0706
+#define KZT_RWLOCK_TEST6_NAME "rwtest6"
+#define KZT_RWLOCK_TEST6_DESC "Read Upgrade Test"
+
+#define KZT_RWLOCK_TEST_MAGIC 0x115599DDUL
+#define KZT_RWLOCK_TEST_NAME "rwlock_test"
+#define KZT_RWLOCK_TEST_COUNT 8
+
+#define KZT_RWLOCK_RELEASE_INIT 0
+#define KZT_RWLOCK_RELEASE_WRITERS 1
+#define KZT_RWLOCK_RELEASE_READERS 2
+
+typedef struct rw_priv {
+ unsigned long rw_magic;
+ struct file *rw_file;
+ krwlock_t rwl;
+ spinlock_t rw_priv_lock;
+ wait_queue_head_t rw_waitq;
+ atomic_t rw_completed;
+ atomic_t rw_acquired;
+ atomic_t rw_waiters;
+ atomic_t rw_release;
+} rw_priv_t;
+
+typedef struct rw_thr {
+ int rwt_id;
+ const char *rwt_name;
+ rw_priv_t *rwt_rwp;
+ int rwt_rc;
+} rw_thr_t;
+
+static inline void
+kzt_rwlock_sleep(signed long delay)
+{
+ set_current_state(TASK_INTERRUPTIBLE);
+ schedule_timeout(delay);
+}
+
+#define kzt_rwlock_lock_and_test(lock,test) \
+({ \
+ int ret = 0; \
+ \
+ spin_lock(lock); \
+ ret = (test) ? 1 : 0; \
+ spin_unlock(lock); \
+ ret; \
+})
+
+void kzt_init_rw_priv(rw_priv_t *rwv, struct file *file)
+{
+ rwv->rw_magic = KZT_RWLOCK_TEST_MAGIC;
+ rwv->rw_file = file;
+ spin_lock_init(&rwv->rw_priv_lock);
+ init_waitqueue_head(&rwv->rw_waitq);
+ atomic_set(&rwv->rw_completed, 0);
+ atomic_set(&rwv->rw_acquired, 0);
+ atomic_set(&rwv->rw_waiters, 0);
+ atomic_set(&rwv->rw_release, KZT_RWLOCK_RELEASE_INIT);
+
+ /* Initialize the read/write lock */
+ rw_init(&rwv->rwl, KZT_RWLOCK_TEST_NAME, RW_DEFAULT, NULL);
+}
+
+int
+kzt_rwlock_test1_writer_thread(void *arg)
+{
+ rw_thr_t *rwt = (rw_thr_t *)arg;
+ rw_priv_t *rwv = rwt->rwt_rwp;
+ uint8_t rnd = 0;
+ char name[16];
+
+ ASSERT(rwv->rw_magic == KZT_RWLOCK_TEST_MAGIC);
+ snprintf(name, sizeof(name), "%s%d",
+ KZT_RWLOCK_TEST_NAME, rwt->rwt_id);
+ daemonize(name);
+ get_random_bytes((void *)&rnd, 1);
+ kzt_rwlock_sleep(rnd * HZ / 1000);
+
+ spin_lock(&rwv->rw_priv_lock);
+ kzt_vprint(rwv->rw_file, rwt->rwt_name,
+ "%s writer thread trying to acquire rwlock with "
+ "%d holding lock and %d waiting\n",
+ name, atomic_read(&rwv->rw_acquired),
+ atomic_read(&rwv->rw_waiters));
+ atomic_inc(&rwv->rw_waiters);
+ spin_unlock(&rwv->rw_priv_lock);
+
+ /* Take the semaphore for writing
+ * release it when we are told to */
+ rw_enter(&rwv->rwl, RW_WRITER);
+
+ spin_lock(&rwv->rw_priv_lock);
+ atomic_dec(&rwv->rw_waiters);
+ atomic_inc(&rwv->rw_acquired);
+ kzt_vprint(rwv->rw_file, rwt->rwt_name,
+ "%s writer thread acquired rwlock with "
+ "%d holding lock and %d waiting\n",
+ name, atomic_read(&rwv->rw_acquired),
+ atomic_read(&rwv->rw_waiters));
+ spin_unlock(&rwv->rw_priv_lock);
+
+ /* Wait here until the control thread
+ * says we can release the write lock */
+ wait_event_interruptible(rwv->rw_waitq,
+ kzt_rwlock_lock_and_test(&rwv->rw_priv_lock,
+ atomic_read(&rwv->rw_release) ==
+ KZT_RWLOCK_RELEASE_WRITERS));
+ spin_lock(&rwv->rw_priv_lock);
+ atomic_inc(&rwv->rw_completed);
+ atomic_dec(&rwv->rw_acquired);
+ kzt_vprint(rwv->rw_file, rwt->rwt_name,
+ "%s writer thread dropped rwlock with "
+ "%d holding lock and %d waiting\n",
+ name, atomic_read(&rwv->rw_acquired),
+ atomic_read(&rwv->rw_waiters));
+ spin_unlock(&rwv->rw_priv_lock);
+
+ /* Release the semaphore */
+ rw_exit(&rwv->rwl);
+ return 0;
+}
+
+int
+kzt_rwlock_test1_reader_thread(void *arg)
+{
+ rw_thr_t *rwt = (rw_thr_t *)arg;
+ rw_priv_t *rwv = rwt->rwt_rwp;
+ uint8_t rnd = 0;
+ char name[16];
+
+ ASSERT(rwv->rw_magic == KZT_RWLOCK_TEST_MAGIC);
+ snprintf(name, sizeof(name), "%s%d",
+ KZT_RWLOCK_TEST_NAME, rwt->rwt_id);
+ daemonize(name);
+ get_random_bytes((void *)&rnd, 1);
+ kzt_rwlock_sleep(rnd * HZ / 1000);
+
+ /* Don't try and and take the semaphore until
+ * someone else has already acquired it */
+ wait_event_interruptible(rwv->rw_waitq,
+ kzt_rwlock_lock_and_test(&rwv->rw_priv_lock,
+ atomic_read(&rwv->rw_acquired) > 0));
+
+ spin_lock(&rwv->rw_priv_lock);
+ kzt_vprint(rwv->rw_file, rwt->rwt_name,
+ "%s reader thread trying to acquire rwlock with "
+ "%d holding lock and %d waiting\n",
+ name, atomic_read(&rwv->rw_acquired),
+ atomic_read(&rwv->rw_waiters));
+ atomic_inc(&rwv->rw_waiters);
+ spin_unlock(&rwv->rw_priv_lock);
+
+ /* Take the semaphore for reading
+ * release it when we are told to */
+ rw_enter(&rwv->rwl, RW_READER);
+
+ spin_lock(&rwv->rw_priv_lock);
+ atomic_dec(&rwv->rw_waiters);
+ atomic_inc(&rwv->rw_acquired);
+ kzt_vprint(rwv->rw_file, rwt->rwt_name,
+ "%s reader thread acquired rwlock with "
+ "%d holding lock and %d waiting\n",
+ name, atomic_read(&rwv->rw_acquired),
+ atomic_read(&rwv->rw_waiters));
+ spin_unlock(&rwv->rw_priv_lock);
+
+ /* Wait here until the control thread
+ * says we can release the read lock */
+ wait_event_interruptible(rwv->rw_waitq,
+ kzt_rwlock_lock_and_test(&rwv->rw_priv_lock,
+ atomic_read(&rwv->rw_release) ==
+ KZT_RWLOCK_RELEASE_READERS));
+
+ spin_lock(&rwv->rw_priv_lock);
+ atomic_inc(&rwv->rw_completed);
+ atomic_dec(&rwv->rw_acquired);
+ kzt_vprint(rwv->rw_file, rwt->rwt_name,
+ "%s reader thread dropped rwlock with "
+ "%d holding lock and %d waiting\n",
+ name, atomic_read(&rwv->rw_acquired),
+ atomic_read(&rwv->rw_waiters));
+ spin_unlock(&rwv->rw_priv_lock);
+
+ /* Release the semaphore */
+ rw_exit(&rwv->rwl);
+ return 0;
+}
+
+static int
+kzt_rwlock_test1(struct file *file, void *arg)
+{
+ int i, count = 0, rc = 0;
+ long pids[KZT_RWLOCK_TEST_COUNT];
+ rw_thr_t rwt[KZT_RWLOCK_TEST_COUNT];
+ rw_priv_t rwv;
+
+ /* Initialize private data
+ * including the rwlock */
+ kzt_init_rw_priv(&rwv, file);
+
+ /* Create some threads, the exact number isn't important just as
+ * long as we know how many we managed to create and should expect. */
+ for (i = 0; i < KZT_RWLOCK_TEST_COUNT; i++) {
+ rwt[i].rwt_rwp = &rwv;
+ rwt[i].rwt_id = i;
+ rwt[i].rwt_name = KZT_RWLOCK_TEST1_NAME;
+ rwt[i].rwt_rc = 0;
+
+ /* The first thread will be a writer */
+ if (i == 0) {
+ pids[i] = kernel_thread(kzt_rwlock_test1_writer_thread,
+ &rwt[i], 0);
+ } else {
+ pids[i] = kernel_thread(kzt_rwlock_test1_reader_thread,
+ &rwt[i], 0);
+ }
+
+ if (pids[i] >= 0) {
+ count++;
+ }
+ }
+
+ /* Once the writer has the lock, release the readers */
+ while (kzt_rwlock_lock_and_test(&rwv.rw_priv_lock, atomic_read(&rwv.rw_acquired) <= 0)) {
+ kzt_rwlock_sleep(1 * HZ);
+ }
+ wake_up_interruptible(&rwv.rw_waitq);
+
+ /* Ensure that there is only 1 writer and all readers are waiting */
+ while (kzt_rwlock_lock_and_test(&rwv.rw_priv_lock,
+ atomic_read(&rwv.rw_acquired) != 1 ||
+ atomic_read(&rwv.rw_waiters) !=
+ KZT_RWLOCK_TEST_COUNT - 1)) {
+
+ kzt_rwlock_sleep(1 * HZ);
+ }
+ /* Relase the writer */
+ spin_lock(&rwv.rw_priv_lock);
+ atomic_set(&rwv.rw_release, KZT_RWLOCK_RELEASE_WRITERS);
+ spin_unlock(&rwv.rw_priv_lock);
+ wake_up_interruptible(&rwv.rw_waitq);
+
+ /* Now ensure that there are multiple reader threads holding the lock */
+ while (kzt_rwlock_lock_and_test(&rwv.rw_priv_lock,
+ atomic_read(&rwv.rw_acquired) <= 1)) {
+ kzt_rwlock_sleep(1 * HZ);
+ }
+ /* Release the readers */
+ spin_lock(&rwv.rw_priv_lock);
+ atomic_set(&rwv.rw_release, KZT_RWLOCK_RELEASE_READERS);
+ spin_unlock(&rwv.rw_priv_lock);
+ wake_up_interruptible(&rwv.rw_waitq);
+
+ /* Wait for the test to complete */
+ while (kzt_rwlock_lock_and_test(&rwv.rw_priv_lock,
+ atomic_read(&rwv.rw_acquired) != 0 ||
+ atomic_read(&rwv.rw_waiters) != 0)) {
+ kzt_rwlock_sleep(1 * HZ);
+
+ }
+
+ rw_destroy(&rwv.rwl);
+ return rc;
+}
+
+int
+kzt_rwlock_test2_writer_thread(void *arg)
+{
+ rw_thr_t *rwt = (rw_thr_t *)arg;
+ rw_priv_t *rwv = rwt->rwt_rwp;
+ uint8_t rnd = 0;
+ char name[16];
+
+ ASSERT(rwv->rw_magic == KZT_RWLOCK_TEST_MAGIC);
+ snprintf(name, sizeof(name), "%s%d",
+ KZT_RWLOCK_TEST_NAME, rwt->rwt_id);
+ daemonize(name);
+ get_random_bytes((void *)&rnd, 1);
+ kzt_rwlock_sleep(rnd * HZ / 1000);
+
+ /* Here just increment the waiters count even if we are not
+ * exactly about to call rw_enter(). Not really a big deal
+ * since more than likely will be true when we simulate work
+ * later on */
+ spin_lock(&rwv->rw_priv_lock);
+ kzt_vprint(rwv->rw_file, rwt->rwt_name,
+ "%s writer thread trying to acquire rwlock with "
+ "%d holding lock and %d waiting\n",
+ name, atomic_read(&rwv->rw_acquired),
+ atomic_read(&rwv->rw_waiters));
+ atomic_inc(&rwv->rw_waiters);
+ spin_unlock(&rwv->rw_priv_lock);
+
+ /* Wait here until the control thread
+ * says we can acquire the write lock */
+ wait_event_interruptible(rwv->rw_waitq,
+ kzt_rwlock_lock_and_test(&rwv->rw_priv_lock,
+ atomic_read(&rwv->rw_release) ==
+ KZT_RWLOCK_RELEASE_WRITERS));
+
+ /* Take the semaphore for writing */
+ rw_enter(&rwv->rwl, RW_WRITER);
+
+ spin_lock(&rwv->rw_priv_lock);
+ atomic_dec(&rwv->rw_waiters);
+ atomic_inc(&rwv->rw_acquired);
+ kzt_vprint(rwv->rw_file, rwt->rwt_name,
+ "%s writer thread acquired rwlock with "
+ "%d holding lock and %d waiting\n",
+ name, atomic_read(&rwv->rw_acquired),
+ atomic_read(&rwv->rw_waiters));
+ spin_unlock(&rwv->rw_priv_lock);
+
+ /* Give up the processor for a bit to simulate
+ * doing some work while taking the write lock */
+ kzt_rwlock_sleep(rnd * HZ / 1000);
+
+ /* Ensure that we are the only one writing */
+ if (atomic_read(&rwv->rw_acquired) > 1) {
+ rwt->rwt_rc = 1;
+ } else {
+ rwt->rwt_rc = 0;
+ }
+
+ spin_lock(&rwv->rw_priv_lock);
+ atomic_inc(&rwv->rw_completed);
+ atomic_dec(&rwv->rw_acquired);
+ kzt_vprint(rwv->rw_file, rwt->rwt_name,
+ "%s writer thread dropped rwlock with "
+ "%d holding lock and %d waiting\n",
+ name, atomic_read(&rwv->rw_acquired),
+ atomic_read(&rwv->rw_waiters));
+ spin_unlock(&rwv->rw_priv_lock);
+
+ rw_exit(&rwv->rwl);
+
+
+ return 0;
+}
+
+static int
+kzt_rwlock_test2(struct file *file, void *arg)
+{
+ int i, count = 0, rc = 0;
+ long pids[KZT_RWLOCK_TEST_COUNT];
+ rw_thr_t rwt[KZT_RWLOCK_TEST_COUNT];
+ rw_priv_t rwv;
+
+ /* Initialize private data
+ * including the rwlock */
+ kzt_init_rw_priv(&rwv, file);
+
+ /* Create some threads, the exact number isn't important just as
+ * long as we know how many we managed to create and should expect. */
+ for (i = 0; i < KZT_RWLOCK_TEST_COUNT; i++) {
+ rwt[i].rwt_rwp = &rwv;
+ rwt[i].rwt_id = i;
+ rwt[i].rwt_name = KZT_RWLOCK_TEST2_NAME;
+ rwt[i].rwt_rc = 0;
+
+ /* The first thread will be a writer */
+ pids[i] = kernel_thread(kzt_rwlock_test2_writer_thread,
+ &rwt[i], 0);
+
+ if (pids[i] >= 0) {
+ count++;
+ }
+ }
+
+ /* Wait for writers to get queued up */
+ while (kzt_rwlock_lock_and_test(&rwv.rw_priv_lock,
+ atomic_read(&rwv.rw_waiters) < KZT_RWLOCK_TEST_COUNT)) {
+ kzt_rwlock_sleep(1 * HZ);
+ }
+ /* Relase the writers */
+ spin_lock(&rwv.rw_priv_lock);
+ atomic_set(&rwv.rw_release, KZT_RWLOCK_RELEASE_WRITERS);
+ spin_unlock(&rwv.rw_priv_lock);
+ wake_up_interruptible(&rwv.rw_waitq);
+
+ /* Wait for the test to complete */
+ while (kzt_rwlock_lock_and_test(&rwv.rw_priv_lock,
+ atomic_read(&rwv.rw_acquired) != 0 ||
+ atomic_read(&rwv.rw_waiters) != 0)) {
+ kzt_rwlock_sleep(1 * HZ);
+ }
+
+ /* If any of the write threads ever acquired the lock
+ * while another thread had it, make sure we return
+ * an error */
+ for (i = 0; i < KZT_RWLOCK_TEST_COUNT; i++) {
+ if (rwt[i].rwt_rc) {
+ rc++;
+ }
+ }
+
+ rw_destroy(&rwv.rwl);
+ return rc;
+}
+
+static int
+kzt_rwlock_test3(struct file *file, void *arg)
+{
+ kthread_t *owner;
+ rw_priv_t rwv;
+ int rc = 0;
+
+ /* Initialize private data
+ * including the rwlock */
+ kzt_init_rw_priv(&rwv, file);
+
+ /* Take the rwlock for writing */
+ rw_enter(&rwv.rwl, RW_WRITER);
+ owner = rw_owner(&rwv.rwl);
+ if (current != owner) {
+ kzt_vprint(file, KZT_RWLOCK_TEST3_NAME, "rwlock should "
+ "be owned by pid %d but is owned by pid %d\n",
+ current->pid, owner ? owner->pid : -1);
+ rc = -EINVAL;
+ goto out;
+ }
+
+ /* Release the rwlock */
+ rw_exit(&rwv.rwl);
+ owner = rw_owner(&rwv.rwl);
+ if (owner) {
+ kzt_vprint(file, KZT_RWLOCK_TEST3_NAME, "rwlock should not "
+ "be owned but is owned by pid %d\n", owner->pid);
+ rc = -EINVAL;
+ goto out;
+ }
+
+ /* Take the rwlock for reading.
+ * Should not have an owner */
+ rw_enter(&rwv.rwl, RW_READER);
+ owner = rw_owner(&rwv.rwl);
+ if (owner) {
+ kzt_vprint(file, KZT_RWLOCK_TEST3_NAME, "rwlock should not "
+ "be owned but is owned by pid %d\n", owner->pid);
+ /* Release the rwlock */
+ rw_exit(&rwv.rwl);
+ rc = -EINVAL;
+ goto out;
+ }
+
+ /* Release the rwlock */
+ rw_exit(&rwv.rwl);
+
+out:
+ rw_destroy(&rwv.rwl);
+ return rc;
+}
+
+int
+kzt_rwlock_test4_reader_thread(void *arg)
+{
+ rw_thr_t *rwt = (rw_thr_t *)arg;
+ rw_priv_t *rwv = rwt->rwt_rwp;
+ uint8_t rnd = 0;
+ char name[16];
+
+ ASSERT(rwv->rw_magic == KZT_RWLOCK_TEST_MAGIC);
+ snprintf(name, sizeof(name), "%s%d",
+ KZT_RWLOCK_TEST_NAME, rwt->rwt_id);
+ daemonize(name);
+ get_random_bytes((void *)&rnd, 1);
+ kzt_rwlock_sleep(rnd * HZ / 1000);
+
+ /* Don't try and and take the semaphore until
+ * someone else has already acquired it */
+ wait_event_interruptible(rwv->rw_waitq,
+ kzt_rwlock_lock_and_test(&rwv->rw_priv_lock,
+ atomic_read(&rwv->rw_acquired) > 0));
+
+ spin_lock(&rwv->rw_priv_lock);
+ kzt_vprint(rwv->rw_file, rwt->rwt_name,
+ "%s reader thread trying to acquire rwlock with "
+ "%d holding lock and %d waiting\n",
+ name, atomic_read(&rwv->rw_acquired),
+ atomic_read(&rwv->rw_waiters));
+ spin_unlock(&rwv->rw_priv_lock);
+
+ /* Take the semaphore for reading
+ * release it when we are told to */
+ rwt->rwt_rc = rw_tryenter(&rwv->rwl, RW_READER);
+
+ /* Here we acquired the lock this is a
+ * failure since the writer should be
+ * holding the lock */
+ if (rwt->rwt_rc == 1) {
+ spin_lock(&rwv->rw_priv_lock);
+ atomic_inc(&rwv->rw_acquired);
+ kzt_vprint(rwv->rw_file, rwt->rwt_name,
+ "%s reader thread acquired rwlock with "
+ "%d holding lock and %d waiting\n",
+ name, atomic_read(&rwv->rw_acquired),
+ atomic_read(&rwv->rw_waiters));
+ spin_unlock(&rwv->rw_priv_lock);
+
+ spin_lock(&rwv->rw_priv_lock);
+ atomic_dec(&rwv->rw_acquired);
+ kzt_vprint(rwv->rw_file, rwt->rwt_name,
+ "%s reader thread dropped rwlock with "
+ "%d holding lock and %d waiting\n",
+ name, atomic_read(&rwv->rw_acquired),
+ atomic_read(&rwv->rw_waiters));
+ spin_unlock(&rwv->rw_priv_lock);
+
+ /* Release the semaphore */
+ rw_exit(&rwv->rwl);
+ }
+ /* Here we know we didn't block and didn't
+ * acquire the rwlock for reading */
+ else {
+ spin_lock(&rwv->rw_priv_lock);
+ atomic_inc(&rwv->rw_completed);
+ kzt_vprint(rwv->rw_file, rwt->rwt_name,
+ "%s reader thread could not acquire rwlock with "
+ "%d holding lock and %d waiting\n",
+ name, atomic_read(&rwv->rw_acquired),
+ atomic_read(&rwv->rw_waiters));
+ spin_unlock(&rwv->rw_priv_lock);
+ }
+
+ return 0;
+}
+
+static int
+kzt_rwlock_test4(struct file *file, void *arg)
+{
+ int i, count = 0, rc = 0;
+ long pids[KZT_RWLOCK_TEST_COUNT];
+ rw_thr_t rwt[KZT_RWLOCK_TEST_COUNT];
+ rw_priv_t rwv;
+
+ /* Initialize private data
+ * including the rwlock */
+ kzt_init_rw_priv(&rwv, file);
+
+ /* Create some threads, the exact number isn't important just as
+ * long as we know how many we managed to create and should expect. */
+ for (i = 0; i < KZT_RWLOCK_TEST_COUNT; i++) {
+ rwt[i].rwt_rwp = &rwv;
+ rwt[i].rwt_id = i;
+ rwt[i].rwt_name = KZT_RWLOCK_TEST4_NAME;
+ rwt[i].rwt_rc = 0;
+
+ /* The first thread will be a writer */
+ if (i == 0) {
+ /* We can reuse the test1 writer thread here */
+ pids[i] = kernel_thread(kzt_rwlock_test1_writer_thread,
+ &rwt[i], 0);
+ } else {
+ pids[i] = kernel_thread(kzt_rwlock_test4_reader_thread,
+ &rwt[i], 0);
+ }
+
+ if (pids[i] >= 0) {
+ count++;
+ }
+ }
+
+ /* Once the writer has the lock, release the readers */
+ while (kzt_rwlock_lock_and_test(&rwv.rw_priv_lock,
+ atomic_read(&rwv.rw_acquired) <= 0)) {
+ kzt_rwlock_sleep(1 * HZ);
+ }
+ wake_up_interruptible(&rwv.rw_waitq);
+
+ /* Make sure that the reader threads complete */
+ while (kzt_rwlock_lock_and_test(&rwv.rw_priv_lock,
+ atomic_read(&rwv.rw_completed) != KZT_RWLOCK_TEST_COUNT - 1)) {
+ kzt_rwlock_sleep(1 * HZ);
+ }
+ /* Release the writer */
+ spin_lock(&rwv.rw_priv_lock);
+ atomic_set(&rwv.rw_release, KZT_RWLOCK_RELEASE_WRITERS);
+ spin_unlock(&rwv.rw_priv_lock);
+ wake_up_interruptible(&rwv.rw_waitq);
+
+ /* Wait for the test to complete */
+ while (kzt_rwlock_lock_and_test(&rwv.rw_priv_lock,
+ atomic_read(&rwv.rw_acquired) != 0 ||
+ atomic_read(&rwv.rw_waiters) != 0)) {
+ kzt_rwlock_sleep(1 * HZ);
+ }
+
+ /* If any of the reader threads ever acquired the lock
+ * while another thread had it, make sure we return
+ * an error since the rw_tryenter() should have failed */
+ for (i = 0; i < KZT_RWLOCK_TEST_COUNT; i++) {
+ if (rwt[i].rwt_rc) {
+ rc++;
+ }
+ }
+
+ rw_destroy(&rwv.rwl);
+ return rc;
+}
+
+static int
+kzt_rwlock_test5(struct file *file, void *arg)
+{
+ kthread_t *owner;
+ rw_priv_t rwv;
+ int rc = 0;
+
+ /* Initialize private data
+ * including the rwlock */
+ kzt_init_rw_priv(&rwv, file);
+
+ /* Take the rwlock for writing */
+ rw_enter(&rwv.rwl, RW_WRITER);
+ owner = rw_owner(&rwv.rwl);
+ if (current != owner) {
+ kzt_vprint(file, KZT_RWLOCK_TEST5_NAME, "rwlock should "
+ "be owned by pid %d but is owned by pid %d\n",
+ current->pid, owner ? owner->pid : -1);
+ rc = -EINVAL;
+ goto out;
+ }
+
+ /* Make sure that the downgrade
+ * worked properly */
+ rw_downgrade(&rwv.rwl);
+
+ owner = rw_owner(&rwv.rwl);
+ if (owner) {
+ kzt_vprint(file, KZT_RWLOCK_TEST5_NAME, "rwlock should not "
+ "be owned but is owned by pid %d\n", owner->pid);
+ /* Release the rwlock */
+ rw_exit(&rwv.rwl);
+ rc = -EINVAL;
+ goto out;
+ }
+
+ /* Release the rwlock */
+ rw_exit(&rwv.rwl);
+
+out:
+ rw_destroy(&rwv.rwl);
+ return rc;
+}
+
+static int
+kzt_rwlock_test6(struct file *file, void *arg)
+{
+ kthread_t *owner;
+ rw_priv_t rwv;
+ int rc = 0;
+
+ /* Initialize private data
+ * including the rwlock */
+ kzt_init_rw_priv(&rwv, file);
+
+ /* Take the rwlock for reading */
+ rw_enter(&rwv.rwl, RW_READER);
+ owner = rw_owner(&rwv.rwl);
+ if (owner) {
+ kzt_vprint(file, KZT_RWLOCK_TEST6_NAME, "rwlock should not "
+ "be owned but is owned by pid %d\n", owner->pid);
+ rc = -EINVAL;
+ goto out;
+ }
+
+ /* Make sure that the upgrade
+ * worked properly */
+ rc = !rw_tryupgrade(&rwv.rwl);
+
+ owner = rw_owner(&rwv.rwl);
+ if (rc || current != owner) {
+ kzt_vprint(file, KZT_RWLOCK_TEST6_NAME, "rwlock should "
+ "be owned by pid %d but is owned by pid %d "
+ "trylock rc %d\n",
+ current->pid, owner ? owner->pid : -1, rc);
+ rc = -EINVAL;
+ goto out;
+ }
+
+ /* Release the rwlock */
+ rw_exit(&rwv.rwl);
+
+out:
+ rw_destroy(&rwv.rwl);
+ return rc;
+}
+
+kzt_subsystem_t *
+kzt_rwlock_init(void)
+{
+ kzt_subsystem_t *sub;
+
+ sub = kmalloc(sizeof(*sub), GFP_KERNEL);
+ if (sub == NULL)
+ return NULL;
+
+ memset(sub, 0, sizeof(*sub));
+ strncpy(sub->desc.name, KZT_RWLOCK_NAME, KZT_NAME_SIZE);
+ strncpy(sub->desc.desc, KZT_RWLOCK_DESC, KZT_DESC_SIZE);
+ INIT_LIST_HEAD(&sub->subsystem_list);
+ INIT_LIST_HEAD(&sub->test_list);
+ spin_lock_init(&sub->test_lock);
+ sub->desc.id = KZT_SUBSYSTEM_RWLOCK;
+
+ KZT_TEST_INIT(sub, KZT_RWLOCK_TEST1_NAME, KZT_RWLOCK_TEST1_DESC,
+ KZT_RWLOCK_TEST1_ID, kzt_rwlock_test1);
+ KZT_TEST_INIT(sub, KZT_RWLOCK_TEST2_NAME, KZT_RWLOCK_TEST2_DESC,
+ KZT_RWLOCK_TEST2_ID, kzt_rwlock_test2);
+ KZT_TEST_INIT(sub, KZT_RWLOCK_TEST3_NAME, KZT_RWLOCK_TEST3_DESC,
+ KZT_RWLOCK_TEST3_ID, kzt_rwlock_test3);
+ KZT_TEST_INIT(sub, KZT_RWLOCK_TEST4_NAME, KZT_RWLOCK_TEST4_DESC,
+ KZT_RWLOCK_TEST4_ID, kzt_rwlock_test4);
+ KZT_TEST_INIT(sub, KZT_RWLOCK_TEST5_NAME, KZT_RWLOCK_TEST5_DESC,
+ KZT_RWLOCK_TEST5_ID, kzt_rwlock_test5);
+ KZT_TEST_INIT(sub, KZT_RWLOCK_TEST6_NAME, KZT_RWLOCK_TEST6_DESC,
+ KZT_RWLOCK_TEST6_ID, kzt_rwlock_test6);
+
+ return sub;
+}
+
+void
+kzt_rwlock_fini(kzt_subsystem_t *sub)
+{
+ ASSERT(sub);
+ KZT_TEST_FINI(sub, KZT_RWLOCK_TEST6_ID);
+ KZT_TEST_FINI(sub, KZT_RWLOCK_TEST5_ID);
+ KZT_TEST_FINI(sub, KZT_RWLOCK_TEST4_ID);
+ KZT_TEST_FINI(sub, KZT_RWLOCK_TEST3_ID);
+ KZT_TEST_FINI(sub, KZT_RWLOCK_TEST2_ID);
+ KZT_TEST_FINI(sub, KZT_RWLOCK_TEST1_ID);
+ kfree(sub);
+}
+
+int
+kzt_rwlock_id(void) {
+ return KZT_SUBSYSTEM_RWLOCK;
+}
--- /dev/null
+#include <splat-ctl.h>
+
+#define KZT_SUBSYSTEM_TASKQ 0x0200
+#define KZT_TASKQ_NAME "taskq"
+#define KZT_TASKQ_DESC "Kernel Task Queue Tests"
+
+#define KZT_TASKQ_TEST1_ID 0x0201
+#define KZT_TASKQ_TEST1_NAME "single"
+#define KZT_TASKQ_TEST1_DESC "Single task queue, single task"
+
+#define KZT_TASKQ_TEST2_ID 0x0202
+#define KZT_TASKQ_TEST2_NAME "multiple"
+#define KZT_TASKQ_TEST2_DESC "Multiple task queues, multiple tasks"
+
+typedef struct kzt_taskq_arg {
+ int flag;
+ int id;
+ struct file *file;
+ const char *name;
+} kzt_taskq_arg_t;
+
+/* Validation Test 1 - Create a taskq, queue a task, wait until
+ * task completes, ensure task ran properly, cleanup taskq,
+ */
+static void
+kzt_taskq_test1_func(void *arg)
+{
+ kzt_taskq_arg_t *tq_arg = (kzt_taskq_arg_t *)arg;
+
+ ASSERT(tq_arg);
+ kzt_vprint(tq_arg->file, KZT_TASKQ_TEST1_NAME,
+ "Taskq '%s' function '%s' setting flag\n",
+ tq_arg->name, sym2str(kzt_taskq_test1_func));
+ tq_arg->flag = 1;
+}
+
+static int
+kzt_taskq_test1(struct file *file, void *arg)
+{
+ taskq_t *tq;
+ taskqid_t id;
+ kzt_taskq_arg_t tq_arg;
+
+ kzt_vprint(file, KZT_TASKQ_TEST1_NAME, "Taskq '%s' creating\n",
+ KZT_TASKQ_TEST1_NAME);
+ if ((tq = taskq_create(KZT_TASKQ_TEST1_NAME, 1, 0, 0, 0, 0)) == NULL) {
+ kzt_vprint(file, KZT_TASKQ_TEST1_NAME,
+ "Taskq '%s' create failed\n",
+ KZT_TASKQ_TEST1_NAME);
+ return -EINVAL;
+ }
+
+ tq_arg.flag = 0;
+ tq_arg.id = 0;
+ tq_arg.file = file;
+ tq_arg.name = KZT_TASKQ_TEST1_NAME;
+
+ kzt_vprint(file, KZT_TASKQ_TEST1_NAME,
+ "Taskq '%s' function '%s' dispatching\n",
+ tq_arg.name, sym2str(kzt_taskq_test1_func));
+ if ((id = taskq_dispatch(tq, kzt_taskq_test1_func, &tq_arg, 0)) == 0) {
+ kzt_vprint(file, KZT_TASKQ_TEST1_NAME,
+ "Taskq '%s' function '%s' dispatch failed\n",
+ tq_arg.name, sym2str(kzt_taskq_test1_func));
+ taskq_destory(tq);
+ return -EINVAL;
+ }
+
+ kzt_vprint(file, KZT_TASKQ_TEST1_NAME, "Taskq '%s' waiting\n",
+ tq_arg.name);
+ taskq_wait(tq);
+ kzt_vprint(file, KZT_TASKQ_TEST1_NAME, "Taskq '%s' destroying\n",
+ tq_arg.name);
+ taskq_destory(tq);
+
+ return (tq_arg.flag) ? 0 : -EINVAL;
+}
+
+/* Validation Test 2 - Create multiple taskq's, each with multiple tasks,
+ * wait until all tasks complete, ensure all tasks ran properly and in the
+ * the correct order, cleanup taskq's
+ */
+static void
+kzt_taskq_test2_func1(void *arg)
+{
+ kzt_taskq_arg_t *tq_arg = (kzt_taskq_arg_t *)arg;
+
+ ASSERT(tq_arg);
+ kzt_vprint(tq_arg->file, KZT_TASKQ_TEST2_NAME,
+ "Taskq '%s/%d' function '%s' flag = %d = %d * 2\n",
+ tq_arg->name, tq_arg->id,
+ sym2str(kzt_taskq_test2_func1),
+ tq_arg->flag * 2, tq_arg->flag);
+ tq_arg->flag *= 2;
+}
+
+static void
+kzt_taskq_test2_func2(void *arg)
+{
+ kzt_taskq_arg_t *tq_arg = (kzt_taskq_arg_t *)arg;
+
+ ASSERT(tq_arg);
+ kzt_vprint(tq_arg->file, KZT_TASKQ_TEST2_NAME,
+ "Taskq '%s/%d' function '%s' flag = %d = %d + 1\n",
+ tq_arg->name, tq_arg->id,
+ sym2str(kzt_taskq_test2_func2),
+ tq_arg->flag + 1, tq_arg->flag);
+ tq_arg->flag += 1;
+}
+
+#define TEST2_TASKQS 8
+static int
+kzt_taskq_test2(struct file *file, void *arg) {
+ taskq_t *tq[TEST2_TASKQS] = { NULL };
+ taskqid_t id;
+ kzt_taskq_arg_t tq_args[TEST2_TASKQS];
+ int i, rc = 0;
+
+ for (i = 0; i < TEST2_TASKQS; i++) {
+
+ kzt_vprint(file, KZT_TASKQ_TEST2_NAME, "Taskq '%s/%d' "
+ "creating\n", KZT_TASKQ_TEST2_NAME, i);
+ if ((tq[i] = taskq_create(KZT_TASKQ_TEST2_NAME,
+ 1, 0, 0, 0, 0)) == NULL) {
+ kzt_vprint(file, KZT_TASKQ_TEST2_NAME,
+ "Taskq '%s/%d' create failed\n",
+ KZT_TASKQ_TEST2_NAME, i);
+ rc = -EINVAL;
+ break;
+ }
+
+ tq_args[i].flag = i;
+ tq_args[i].id = i;
+ tq_args[i].file = file;
+ tq_args[i].name = KZT_TASKQ_TEST2_NAME;
+
+ kzt_vprint(file, KZT_TASKQ_TEST2_NAME,
+ "Taskq '%s/%d' function '%s' dispatching\n",
+ tq_args[i].name, tq_args[i].id,
+ sym2str(kzt_taskq_test2_func1));
+ if ((id = taskq_dispatch(
+ tq[i], kzt_taskq_test2_func1, &tq_args[i], 0)) == 0) {
+ kzt_vprint(file, KZT_TASKQ_TEST2_NAME,
+ "Taskq '%s/%d' function '%s' dispatch "
+ "failed\n", tq_args[i].name, tq_args[i].id,
+ sym2str(kzt_taskq_test2_func1));
+ rc = -EINVAL;
+ break;
+ }
+
+ kzt_vprint(file, KZT_TASKQ_TEST2_NAME,
+ "Taskq '%s/%d' function '%s' dispatching\n",
+ tq_args[i].name, tq_args[i].id,
+ sym2str(kzt_taskq_test2_func2));
+ if ((id = taskq_dispatch(
+ tq[i], kzt_taskq_test2_func2, &tq_args[i], 0)) == 0) {
+ kzt_vprint(file, KZT_TASKQ_TEST2_NAME,
+ "Taskq '%s/%d' function '%s' dispatch failed\n",
+ tq_args[i].name, tq_args[i].id,
+ sym2str(kzt_taskq_test2_func2));
+ rc = -EINVAL;
+ break;
+ }
+ }
+
+ /* When rc is set we're effectively just doing cleanup here, so
+ * ignore new errors in that case. They just cause noise. */
+ for (i = 0; i < TEST2_TASKQS; i++) {
+ if (tq[i] != NULL) {
+ kzt_vprint(file, KZT_TASKQ_TEST2_NAME,
+ "Taskq '%s/%d' waiting\n",
+ tq_args[i].name, tq_args[i].id);
+ taskq_wait(tq[i]);
+ kzt_vprint(file, KZT_TASKQ_TEST2_NAME,
+ "Taskq '%s/%d; destroying\n",
+ tq_args[i].name, tq_args[i].id);
+ taskq_destory(tq[i]);
+
+ if (!rc && tq_args[i].flag != ((i * 2) + 1)) {
+ kzt_vprint(file, KZT_TASKQ_TEST2_NAME,
+ "Taskq '%s/%d' processed tasks "
+ "out of order; %d != %d\n",
+ tq_args[i].name, tq_args[i].id,
+ tq_args[i].flag, i * 2 + 1);
+ rc = -EINVAL;
+ } else {
+ kzt_vprint(file, KZT_TASKQ_TEST2_NAME,
+ "Taskq '%s/%d' processed tasks "
+ "in the correct order; %d == %d\n",
+ tq_args[i].name, tq_args[i].id,
+ tq_args[i].flag, i * 2 + 1);
+ }
+ }
+ }
+
+ return rc;
+}
+
+kzt_subsystem_t *
+kzt_taskq_init(void)
+{
+ kzt_subsystem_t *sub;
+
+ sub = kmalloc(sizeof(*sub), GFP_KERNEL);
+ if (sub == NULL)
+ return NULL;
+
+ memset(sub, 0, sizeof(*sub));
+ strncpy(sub->desc.name, KZT_TASKQ_NAME, KZT_NAME_SIZE);
+ strncpy(sub->desc.desc, KZT_TASKQ_DESC, KZT_DESC_SIZE);
+ INIT_LIST_HEAD(&sub->subsystem_list);
+ INIT_LIST_HEAD(&sub->test_list);
+ spin_lock_init(&sub->test_lock);
+ sub->desc.id = KZT_SUBSYSTEM_TASKQ;
+
+ KZT_TEST_INIT(sub, KZT_TASKQ_TEST1_NAME, KZT_TASKQ_TEST1_DESC,
+ KZT_TASKQ_TEST1_ID, kzt_taskq_test1);
+ KZT_TEST_INIT(sub, KZT_TASKQ_TEST2_NAME, KZT_TASKQ_TEST2_DESC,
+ KZT_TASKQ_TEST2_ID, kzt_taskq_test2);
+
+ return sub;
+}
+
+void
+kzt_taskq_fini(kzt_subsystem_t *sub)
+{
+ ASSERT(sub);
+ KZT_TEST_FINI(sub, KZT_TASKQ_TEST2_ID);
+ KZT_TEST_FINI(sub, KZT_TASKQ_TEST1_ID);
+
+ kfree(sub);
+}
+
+int
+kzt_taskq_id(void) {
+ return KZT_SUBSYSTEM_TASKQ;
+}
--- /dev/null
+#include <splat-ctl.h>
+
+#define KZT_SUBSYSTEM_THREAD 0x0600
+#define KZT_THREAD_NAME "thread"
+#define KZT_THREAD_DESC "Kernel Thread Tests"
+
+#define KZT_THREAD_TEST1_ID 0x0601
+#define KZT_THREAD_TEST1_NAME "create"
+#define KZT_THREAD_TEST1_DESC "Validate thread creation and destruction"
+
+#define KZT_THREAD_TEST_MAGIC 0x4488CC00UL
+
+typedef struct thread_priv {
+ unsigned long tp_magic;
+ struct file *tp_file;
+ spinlock_t tp_lock;
+ wait_queue_head_t tp_waitq;
+ int tp_rc;
+} thread_priv_t;
+
+
+static void
+kzt_thread_work(void *priv)
+{
+ thread_priv_t *tp = (thread_priv_t *)priv;
+
+ spin_lock(&tp->tp_lock);
+ ASSERT(tp->tp_magic == KZT_THREAD_TEST_MAGIC);
+ tp->tp_rc = 1;
+
+ spin_unlock(&tp->tp_lock);
+ wake_up(&tp->tp_waitq);
+
+ thread_exit();
+}
+
+static int
+kzt_thread_test1(struct file *file, void *arg)
+{
+ thread_priv_t tp;
+ DEFINE_WAIT(wait);
+ kthread_t *thr;
+ int rc = 0;
+
+ tp.tp_magic = KZT_THREAD_TEST_MAGIC;
+ tp.tp_file = file;
+ spin_lock_init(&tp.tp_lock);
+ init_waitqueue_head(&tp.tp_waitq);
+ tp.tp_rc = 0;
+
+ spin_lock(&tp.tp_lock);
+
+ thr = (kthread_t *)thread_create(NULL, 0, kzt_thread_work, &tp, 0,
+ (proc_t *) &p0, TS_RUN, minclsyspri);
+ /* Must never fail under Solaris, but we check anyway so we can
+ * report an error when this impossible thing happens */
+ if (thr == NULL) {
+ rc = -ESRCH;
+ goto out;
+ }
+
+ for (;;) {
+ prepare_to_wait(&tp.tp_waitq, &wait, TASK_UNINTERRUPTIBLE);
+ if (tp.tp_rc)
+ break;
+
+ spin_unlock(&tp.tp_lock);
+ schedule();
+ spin_lock(&tp.tp_lock);
+ }
+
+ kzt_vprint(file, KZT_THREAD_TEST1_NAME, "%s",
+ "Thread successfully started and exited cleanly\n");
+out:
+ spin_unlock(&tp.tp_lock);
+
+ return rc;
+}
+
+kzt_subsystem_t *
+kzt_thread_init(void)
+{
+ kzt_subsystem_t *sub;
+
+ sub = kmalloc(sizeof(*sub), GFP_KERNEL);
+ if (sub == NULL)
+ return NULL;
+
+ memset(sub, 0, sizeof(*sub));
+ strncpy(sub->desc.name, KZT_THREAD_NAME, KZT_NAME_SIZE);
+ strncpy(sub->desc.desc, KZT_THREAD_DESC, KZT_DESC_SIZE);
+ INIT_LIST_HEAD(&sub->subsystem_list);
+ INIT_LIST_HEAD(&sub->test_list);
+ spin_lock_init(&sub->test_lock);
+ sub->desc.id = KZT_SUBSYSTEM_THREAD;
+
+ KZT_TEST_INIT(sub, KZT_THREAD_TEST1_NAME, KZT_THREAD_TEST1_DESC,
+ KZT_THREAD_TEST1_ID, kzt_thread_test1);
+
+ return sub;
+}
+
+void
+kzt_thread_fini(kzt_subsystem_t *sub)
+{
+ ASSERT(sub);
+ KZT_TEST_FINI(sub, KZT_THREAD_TEST1_ID);
+
+ kfree(sub);
+}
+
+int
+kzt_thread_id(void) {
+ return KZT_SUBSYSTEM_THREAD;
+}
--- /dev/null
+#include <splat-ctl.h>
+
+#define KZT_SUBSYSTEM_TIME 0x0800
+#define KZT_TIME_NAME "time"
+#define KZT_TIME_DESC "Kernel Time Tests"
+
+#define KZT_TIME_TEST1_ID 0x0801
+#define KZT_TIME_TEST1_NAME "time1"
+#define KZT_TIME_TEST1_DESC "HZ Test"
+
+#define KZT_TIME_TEST2_ID 0x0802
+#define KZT_TIME_TEST2_NAME "time2"
+#define KZT_TIME_TEST2_DESC "Monotonic Test"
+
+static int
+kzt_time_test1(struct file *file, void *arg)
+{
+ int myhz = hz;
+ kzt_vprint(file, KZT_TIME_TEST1_NAME, "hz is %d\n", myhz);
+ return 0;
+}
+
+static int
+kzt_time_test2(struct file *file, void *arg)
+{
+ hrtime_t tm1, tm2;
+ int i;
+
+ tm1 = gethrtime();
+ kzt_vprint(file, KZT_TIME_TEST2_NAME, "time is %lld\n", tm1);
+
+ for(i = 0; i < 100; i++) {
+ tm2 = gethrtime();
+ kzt_vprint(file, KZT_TIME_TEST2_NAME, "time is %lld\n", tm2);
+
+ if(tm1 > tm2) {
+ kzt_print(file, "%s: gethrtime() is not giving monotonically increasing values\n", KZT_TIME_TEST2_NAME);
+ return 1;
+ }
+ tm1 = tm2;
+
+ set_current_state(TASK_INTERRUPTIBLE);
+ schedule_timeout(10);
+ }
+
+ return 0;
+}
+
+kzt_subsystem_t *
+kzt_time_init(void)
+{
+ kzt_subsystem_t *sub;
+
+ sub = kmalloc(sizeof(*sub), GFP_KERNEL);
+ if (sub == NULL)
+ return NULL;
+
+ memset(sub, 0, sizeof(*sub));
+ strncpy(sub->desc.name, KZT_TIME_NAME, KZT_NAME_SIZE);
+ strncpy(sub->desc.desc, KZT_TIME_DESC, KZT_DESC_SIZE);
+ INIT_LIST_HEAD(&sub->subsystem_list);
+ INIT_LIST_HEAD(&sub->test_list);
+ spin_lock_init(&sub->test_lock);
+ sub->desc.id = KZT_SUBSYSTEM_TIME;
+
+ KZT_TEST_INIT(sub, KZT_TIME_TEST1_NAME, KZT_TIME_TEST1_DESC,
+ KZT_TIME_TEST1_ID, kzt_time_test1);
+ KZT_TEST_INIT(sub, KZT_TIME_TEST2_NAME, KZT_TIME_TEST2_DESC,
+ KZT_TIME_TEST2_ID, kzt_time_test2);
+
+ return sub;
+}
+
+void
+kzt_time_fini(kzt_subsystem_t *sub)
+{
+ ASSERT(sub);
+
+ KZT_TEST_FINI(sub, KZT_TIME_TEST2_ID);
+ KZT_TEST_FINI(sub, KZT_TIME_TEST1_ID);
+
+ kfree(sub);
+}
+
+int
+kzt_time_id(void)
+{
+ return KZT_SUBSYSTEM_TIME;
+}
+++ /dev/null
-# Makefile.in for splat kernel module
-
-MODULES := splat
-DISTFILES = Makefile.in \
- splat-kmem.c splat-random.c splat-taskq.c \
- splat-time.c splat-condvar.c splat-mutex.c \
- splat-rwlock.c splat-thread.c splat-ctl.c
-CPPFLAGS += @KERNELCPPFLAGS@
-
-# Solaris porting layer aggressive tests
-obj-m := splat.o
-
-splat-objs += splat-ctl.o
-splat-objs += splat-kmem.o
-splat-objs += splat-taskq.o
-splat-objs += splat-random.o
-splat-objs += splat-mutex.o
-splat-objs += splat-condvar.o
-splat-objs += splat-thread.o
-splat-objs += splat-rwlock.o
-splat-objs += splat-time.o
-
-splatmodule := splat.ko
-splatmoduledir := @kmoduledir@/kernel/lib/
-
-all: all-spec
-
-install: all
- mkdir -p $(DESTDIR)$(splatmoduledir)
- $(INSTALL) -m 644 $(splatmodule) $(DESTDIR)$(splatmoduledir)/$(splatmodule)
- -/sbin/depmod -a
-
-uninstall:
- rm -f $(DESTDIR)$(splatmoduledir)/$(splatmodule)
- -/sbin/depmod -a
-
-clean:
- -rm -f $(splmodule) *.o .*.cmd *.mod.c *.ko *.s */*.o
-
-distclean: clean
- rm -f Makefile
- rm -rf .tmp_versions
-
-maintainer-clean: distclean
-
-distdir: $(DISTFILES)
- cp -p $(DISTFILES) $(distdir)
-
-all-spec:
- $(MAKE) -C @kernelsrc@ SUBDIRS=`pwd` @KERNELMAKE_PARAMS@ modules
+++ /dev/null
-#include <splat-ctl.h>
-
-#define KZT_SUBSYSTEM_CONDVAR 0x0500
-#define KZT_CONDVAR_NAME "condvar"
-#define KZT_CONDVAR_DESC "Kernel Condition Variable Tests"
-
-#define KZT_CONDVAR_TEST1_ID 0x0501
-#define KZT_CONDVAR_TEST1_NAME "signal1"
-#define KZT_CONDVAR_TEST1_DESC "Wake a single thread, cv_wait()/cv_signal()"
-
-#define KZT_CONDVAR_TEST2_ID 0x0502
-#define KZT_CONDVAR_TEST2_NAME "broadcast1"
-#define KZT_CONDVAR_TEST2_DESC "Wake all threads, cv_wait()/cv_broadcast()"
-
-#define KZT_CONDVAR_TEST3_ID 0x0503
-#define KZT_CONDVAR_TEST3_NAME "signal2"
-#define KZT_CONDVAR_TEST3_DESC "Wake a single thread, cv_wait_timeout()/cv_signal()"
-
-#define KZT_CONDVAR_TEST4_ID 0x0504
-#define KZT_CONDVAR_TEST4_NAME "broadcast2"
-#define KZT_CONDVAR_TEST4_DESC "Wake all threads, cv_wait_timeout()/cv_broadcast()"
-
-#define KZT_CONDVAR_TEST5_ID 0x0505
-#define KZT_CONDVAR_TEST5_NAME "timeout"
-#define KZT_CONDVAR_TEST5_DESC "Timeout thread, cv_wait_timeout()"
-
-#define KZT_CONDVAR_TEST_MAGIC 0x115599DDUL
-#define KZT_CONDVAR_TEST_NAME "condvar_test"
-#define KZT_CONDVAR_TEST_COUNT 8
-
-typedef struct condvar_priv {
- unsigned long cv_magic;
- struct file *cv_file;
- kcondvar_t cv_condvar;
- kmutex_t cv_mtx;
-} condvar_priv_t;
-
-typedef struct condvar_thr {
- int ct_id;
- const char *ct_name;
- condvar_priv_t *ct_cvp;
- int ct_rc;
-} condvar_thr_t;
-
-int
-kzt_condvar_test12_thread(void *arg)
-{
- condvar_thr_t *ct = (condvar_thr_t *)arg;
- condvar_priv_t *cv = ct->ct_cvp;
- char name[16];
-
- ASSERT(cv->cv_magic == KZT_CONDVAR_TEST_MAGIC);
- snprintf(name, sizeof(name), "%s%d", KZT_CONDVAR_TEST_NAME, ct->ct_id);
- daemonize(name);
-
- mutex_enter(&cv->cv_mtx);
- kzt_vprint(cv->cv_file, ct->ct_name,
- "%s thread sleeping with %d waiters\n",
- name, atomic_read(&cv->cv_condvar.cv_waiters));
- cv_wait(&cv->cv_condvar, &cv->cv_mtx);
- kzt_vprint(cv->cv_file, ct->ct_name,
- "%s thread woken %d waiters remain\n",
- name, atomic_read(&cv->cv_condvar.cv_waiters));
- mutex_exit(&cv->cv_mtx);
-
- return 0;
-}
-
-static int
-kzt_condvar_test1(struct file *file, void *arg)
-{
- int i, count = 0, rc = 0;
- long pids[KZT_CONDVAR_TEST_COUNT];
- condvar_thr_t ct[KZT_CONDVAR_TEST_COUNT];
- condvar_priv_t cv;
-
- cv.cv_magic = KZT_CONDVAR_TEST_MAGIC;
- cv.cv_file = file;
- mutex_init(&cv.cv_mtx, KZT_CONDVAR_TEST_NAME, MUTEX_DEFAULT, NULL);
- cv_init(&cv.cv_condvar, KZT_CONDVAR_TEST_NAME, CV_DEFAULT, NULL);
-
- /* Create some threads, the exact number isn't important just as
- * long as we know how many we managed to create and should expect. */
- for (i = 0; i < KZT_CONDVAR_TEST_COUNT; i++) {
- ct[i].ct_cvp = &cv;
- ct[i].ct_id = i;
- ct[i].ct_name = KZT_CONDVAR_TEST1_NAME;
- ct[i].ct_rc = 0;
-
- pids[i] = kernel_thread(kzt_condvar_test12_thread, &ct[i], 0);
- if (pids[i] >= 0)
- count++;
- }
-
- /* Wait until all threads are waiting on the condition variable */
- while (atomic_read(&cv.cv_condvar.cv_waiters) != count)
- schedule();
-
- /* Wake a single thread at a time, wait until it exits */
- for (i = 1; i <= count; i++) {
- cv_signal(&cv.cv_condvar);
-
- while (atomic_read(&cv.cv_condvar.cv_waiters) > (count - i))
- schedule();
-
- /* Correct behavior 1 thread woken */
- if (atomic_read(&cv.cv_condvar.cv_waiters) == (count - i))
- continue;
-
- kzt_vprint(file, KZT_CONDVAR_TEST1_NAME, "Attempted to "
- "wake %d thread but work %d threads woke\n",
- 1, count - atomic_read(&cv.cv_condvar.cv_waiters));
- rc = -EINVAL;
- break;
- }
-
- if (!rc)
- kzt_vprint(file, KZT_CONDVAR_TEST1_NAME, "Correctly woke "
- "%d sleeping threads %d at a time\n", count, 1);
-
- /* Wait until that last nutex is dropped */
- while (mutex_owner(&cv.cv_mtx))
- schedule();
-
- /* Wake everything for the failure case */
- cv_broadcast(&cv.cv_condvar);
- cv_destroy(&cv.cv_condvar);
- mutex_destroy(&cv.cv_mtx);
-
- return rc;
-}
-
-static int
-kzt_condvar_test2(struct file *file, void *arg)
-{
- int i, count = 0, rc = 0;
- long pids[KZT_CONDVAR_TEST_COUNT];
- condvar_thr_t ct[KZT_CONDVAR_TEST_COUNT];
- condvar_priv_t cv;
-
- cv.cv_magic = KZT_CONDVAR_TEST_MAGIC;
- cv.cv_file = file;
- mutex_init(&cv.cv_mtx, KZT_CONDVAR_TEST_NAME, MUTEX_DEFAULT, NULL);
- cv_init(&cv.cv_condvar, KZT_CONDVAR_TEST_NAME, CV_DEFAULT, NULL);
-
- /* Create some threads, the exact number isn't important just as
- * long as we know how many we managed to create and should expect. */
- for (i = 0; i < KZT_CONDVAR_TEST_COUNT; i++) {
- ct[i].ct_cvp = &cv;
- ct[i].ct_id = i;
- ct[i].ct_name = KZT_CONDVAR_TEST2_NAME;
- ct[i].ct_rc = 0;
-
- pids[i] = kernel_thread(kzt_condvar_test12_thread, &ct[i], 0);
- if (pids[i] > 0)
- count++;
- }
-
- /* Wait until all threads are waiting on the condition variable */
- while (atomic_read(&cv.cv_condvar.cv_waiters) != count)
- schedule();
-
- /* Wake all threads waiting on the condition variable */
- cv_broadcast(&cv.cv_condvar);
-
- /* Wait until all threads have exited */
- while ((atomic_read(&cv.cv_condvar.cv_waiters) > 0) || mutex_owner(&cv.cv_mtx))
- schedule();
-
- kzt_vprint(file, KZT_CONDVAR_TEST2_NAME, "Correctly woke all "
- "%d sleeping threads at once\n", count);
-
- /* Wake everything for the failure case */
- cv_destroy(&cv.cv_condvar);
- mutex_destroy(&cv.cv_mtx);
-
- return rc;
-}
-
-int
-kzt_condvar_test34_thread(void *arg)
-{
- condvar_thr_t *ct = (condvar_thr_t *)arg;
- condvar_priv_t *cv = ct->ct_cvp;
- char name[16];
- clock_t rc;
-
- ASSERT(cv->cv_magic == KZT_CONDVAR_TEST_MAGIC);
- snprintf(name, sizeof(name), "%s%d", KZT_CONDVAR_TEST_NAME, ct->ct_id);
- daemonize(name);
-
- mutex_enter(&cv->cv_mtx);
- kzt_vprint(cv->cv_file, ct->ct_name,
- "%s thread sleeping with %d waiters\n",
- name, atomic_read(&cv->cv_condvar.cv_waiters));
-
- /* Sleep no longer than 3 seconds, for this test we should
- * actually never sleep that long without being woken up. */
- rc = cv_timedwait(&cv->cv_condvar, &cv->cv_mtx, lbolt + HZ * 3);
- if (rc == -1) {
- ct->ct_rc = -ETIMEDOUT;
- kzt_vprint(cv->cv_file, ct->ct_name, "%s thread timed out, "
- "should have been woken\n", name);
- } else {
- kzt_vprint(cv->cv_file, ct->ct_name,
- "%s thread woken %d waiters remain\n",
- name, atomic_read(&cv->cv_condvar.cv_waiters));
- }
-
- mutex_exit(&cv->cv_mtx);
-
- return 0;
-}
-
-static int
-kzt_condvar_test3(struct file *file, void *arg)
-{
- int i, count = 0, rc = 0;
- long pids[KZT_CONDVAR_TEST_COUNT];
- condvar_thr_t ct[KZT_CONDVAR_TEST_COUNT];
- condvar_priv_t cv;
-
- cv.cv_magic = KZT_CONDVAR_TEST_MAGIC;
- cv.cv_file = file;
- mutex_init(&cv.cv_mtx, KZT_CONDVAR_TEST_NAME, MUTEX_DEFAULT, NULL);
- cv_init(&cv.cv_condvar, KZT_CONDVAR_TEST_NAME, CV_DEFAULT, NULL);
-
- /* Create some threads, the exact number isn't important just as
- * long as we know how many we managed to create and should expect. */
- for (i = 0; i < KZT_CONDVAR_TEST_COUNT; i++) {
- ct[i].ct_cvp = &cv;
- ct[i].ct_id = i;
- ct[i].ct_name = KZT_CONDVAR_TEST3_NAME;
- ct[i].ct_rc = 0;
-
- pids[i] = kernel_thread(kzt_condvar_test34_thread, &ct[i], 0);
- if (pids[i] >= 0)
- count++;
- }
-
- /* Wait until all threads are waiting on the condition variable */
- while (atomic_read(&cv.cv_condvar.cv_waiters) != count)
- schedule();
-
- /* Wake a single thread at a time, wait until it exits */
- for (i = 1; i <= count; i++) {
- cv_signal(&cv.cv_condvar);
-
- while (atomic_read(&cv.cv_condvar.cv_waiters) > (count - i))
- schedule();
-
- /* Correct behavior 1 thread woken */
- if (atomic_read(&cv.cv_condvar.cv_waiters) == (count - i))
- continue;
-
- kzt_vprint(file, KZT_CONDVAR_TEST3_NAME, "Attempted to "
- "wake %d thread but work %d threads woke\n",
- 1, count - atomic_read(&cv.cv_condvar.cv_waiters));
- rc = -EINVAL;
- break;
- }
-
- /* Validate no waiting thread timed out early */
- for (i = 0; i < count; i++)
- if (ct[i].ct_rc)
- rc = ct[i].ct_rc;
-
- if (!rc)
- kzt_vprint(file, KZT_CONDVAR_TEST3_NAME, "Correctly woke "
- "%d sleeping threads %d at a time\n", count, 1);
-
- /* Wait until that last nutex is dropped */
- while (mutex_owner(&cv.cv_mtx))
- schedule();
-
- /* Wake everything for the failure case */
- cv_broadcast(&cv.cv_condvar);
- cv_destroy(&cv.cv_condvar);
- mutex_destroy(&cv.cv_mtx);
-
- return rc;
-}
-
-static int
-kzt_condvar_test4(struct file *file, void *arg)
-{
- int i, count = 0, rc = 0;
- long pids[KZT_CONDVAR_TEST_COUNT];
- condvar_thr_t ct[KZT_CONDVAR_TEST_COUNT];
- condvar_priv_t cv;
-
- cv.cv_magic = KZT_CONDVAR_TEST_MAGIC;
- cv.cv_file = file;
- mutex_init(&cv.cv_mtx, KZT_CONDVAR_TEST_NAME, MUTEX_DEFAULT, NULL);
- cv_init(&cv.cv_condvar, KZT_CONDVAR_TEST_NAME, CV_DEFAULT, NULL);
-
- /* Create some threads, the exact number isn't important just as
- * long as we know how many we managed to create and should expect. */
- for (i = 0; i < KZT_CONDVAR_TEST_COUNT; i++) {
- ct[i].ct_cvp = &cv;
- ct[i].ct_id = i;
- ct[i].ct_name = KZT_CONDVAR_TEST3_NAME;
- ct[i].ct_rc = 0;
-
- pids[i] = kernel_thread(kzt_condvar_test34_thread, &ct[i], 0);
- if (pids[i] >= 0)
- count++;
- }
-
- /* Wait until all threads are waiting on the condition variable */
- while (atomic_read(&cv.cv_condvar.cv_waiters) != count)
- schedule();
-
- /* Wake a single thread at a time, wait until it exits */
- for (i = 1; i <= count; i++) {
- cv_signal(&cv.cv_condvar);
-
- while (atomic_read(&cv.cv_condvar.cv_waiters) > (count - i))
- schedule();
-
- /* Correct behavior 1 thread woken */
- if (atomic_read(&cv.cv_condvar.cv_waiters) == (count - i))
- continue;
-
- kzt_vprint(file, KZT_CONDVAR_TEST3_NAME, "Attempted to "
- "wake %d thread but work %d threads woke\n",
- 1, count - atomic_read(&cv.cv_condvar.cv_waiters));
- rc = -EINVAL;
- break;
- }
-
- /* Validate no waiting thread timed out early */
- for (i = 0; i < count; i++)
- if (ct[i].ct_rc)
- rc = ct[i].ct_rc;
-
- if (!rc)
- kzt_vprint(file, KZT_CONDVAR_TEST3_NAME, "Correctly woke "
- "%d sleeping threads %d at a time\n", count, 1);
-
- /* Wait until that last nutex is dropped */
- while (mutex_owner(&cv.cv_mtx))
- schedule();
-
- /* Wake everything for the failure case */
- cv_broadcast(&cv.cv_condvar);
- cv_destroy(&cv.cv_condvar);
- mutex_destroy(&cv.cv_mtx);
-
- return rc;
-}
-
-static int
-kzt_condvar_test5(struct file *file, void *arg)
-{
- kcondvar_t condvar;
- kmutex_t mtx;
- clock_t time_left, time_before, time_after, time_delta;
- int64_t whole_delta;
- int32_t remain_delta;
- int rc = 0;
-
- mutex_init(&mtx, KZT_CONDVAR_TEST_NAME, MUTEX_DEFAULT, NULL);
- cv_init(&condvar, KZT_CONDVAR_TEST_NAME, CV_DEFAULT, NULL);
-
- kzt_vprint(file, KZT_CONDVAR_TEST5_NAME, "Thread going to sleep for "
- "%d second and expecting to be woken by timeout\n", 1);
-
- /* Allow a 1 second timeout, plenty long to validate correctness. */
- time_before = lbolt;
- mutex_enter(&mtx);
- time_left = cv_timedwait(&condvar, &mtx, lbolt + HZ);
- mutex_exit(&mtx);
- time_after = lbolt;
- time_delta = time_after - time_before; /* XXX - Handle jiffie wrap */
- whole_delta = time_delta;
- remain_delta = do_div(whole_delta, HZ);
-
- if (time_left == -1) {
- if (time_delta >= HZ) {
- kzt_vprint(file, KZT_CONDVAR_TEST5_NAME,
- "Thread correctly timed out and was asleep "
- "for %d.%d seconds (%d second min)\n",
- (int)whole_delta, remain_delta, 1);
- } else {
- kzt_vprint(file, KZT_CONDVAR_TEST5_NAME,
- "Thread correctly timed out but was only "
- "asleep for %d.%d seconds (%d second "
- "min)\n", (int)whole_delta, remain_delta, 1);
- rc = -ETIMEDOUT;
- }
- } else {
- kzt_vprint(file, KZT_CONDVAR_TEST5_NAME,
- "Thread exited after only %d.%d seconds, it "
- "did not hit the %d second timeout\n",
- (int)whole_delta, remain_delta, 1);
- rc = -ETIMEDOUT;
- }
-
- cv_destroy(&condvar);
- mutex_destroy(&mtx);
-
- return rc;
-}
-
-kzt_subsystem_t *
-kzt_condvar_init(void)
-{
- kzt_subsystem_t *sub;
-
- sub = kmalloc(sizeof(*sub), GFP_KERNEL);
- if (sub == NULL)
- return NULL;
-
- memset(sub, 0, sizeof(*sub));
- strncpy(sub->desc.name, KZT_CONDVAR_NAME, KZT_NAME_SIZE);
- strncpy(sub->desc.desc, KZT_CONDVAR_DESC, KZT_DESC_SIZE);
- INIT_LIST_HEAD(&sub->subsystem_list);
- INIT_LIST_HEAD(&sub->test_list);
- spin_lock_init(&sub->test_lock);
- sub->desc.id = KZT_SUBSYSTEM_CONDVAR;
-
- KZT_TEST_INIT(sub, KZT_CONDVAR_TEST1_NAME, KZT_CONDVAR_TEST1_DESC,
- KZT_CONDVAR_TEST1_ID, kzt_condvar_test1);
- KZT_TEST_INIT(sub, KZT_CONDVAR_TEST2_NAME, KZT_CONDVAR_TEST2_DESC,
- KZT_CONDVAR_TEST2_ID, kzt_condvar_test2);
- KZT_TEST_INIT(sub, KZT_CONDVAR_TEST3_NAME, KZT_CONDVAR_TEST3_DESC,
- KZT_CONDVAR_TEST3_ID, kzt_condvar_test3);
- KZT_TEST_INIT(sub, KZT_CONDVAR_TEST4_NAME, KZT_CONDVAR_TEST4_DESC,
- KZT_CONDVAR_TEST4_ID, kzt_condvar_test4);
- KZT_TEST_INIT(sub, KZT_CONDVAR_TEST5_NAME, KZT_CONDVAR_TEST5_DESC,
- KZT_CONDVAR_TEST5_ID, kzt_condvar_test5);
-
- return sub;
-}
-
-void
-kzt_condvar_fini(kzt_subsystem_t *sub)
-{
- ASSERT(sub);
- KZT_TEST_FINI(sub, KZT_CONDVAR_TEST5_ID);
- KZT_TEST_FINI(sub, KZT_CONDVAR_TEST4_ID);
- KZT_TEST_FINI(sub, KZT_CONDVAR_TEST3_ID);
- KZT_TEST_FINI(sub, KZT_CONDVAR_TEST2_ID);
- KZT_TEST_FINI(sub, KZT_CONDVAR_TEST1_ID);
-
- kfree(sub);
-}
-
-int
-kzt_condvar_id(void) {
- return KZT_SUBSYSTEM_CONDVAR;
-}
+++ /dev/null
-/*
- * My intent is the create a loadable kzt (kernel ZFS test) module
- * which can be used as an access point to run in kernel ZFS regression
- * tests. Why do we need this when we have ztest? Well ztest.c only
- * excersises the ZFS code proper, it cannot be used to validate the
- * linux kernel shim primatives. This also provides a nice hook for
- * any other in kernel regression tests we wish to run such as direct
- * in-kernel tests against the DMU.
- *
- * The basic design is the kzt module is that it is constructed of
- * various kzt_* source files each of which contains regression tests.
- * For example the kzt_linux_kmem.c file contains tests for validating
- * kmem correctness. When the kzt module is loaded kzt_*_init()
- * will be called for each subsystems tests, similarly kzt_*_fini() is
- * called when the kzt module is removed. Each test can then be
- * run by making an ioctl() call from a userspace control application
- * to pick the subsystem and test which should be run.
- *
- * Author: Brian Behlendorf
- */
-
-#include <splat-ctl.h>
-
-#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,18)
-#include <linux/devfs_fs_kernel.h>
-#endif
-
-#include <linux/cdev.h>
-
-
-#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,18)
-static struct class_simple *kzt_class;
-#else
-static struct class *kzt_class;
-#endif
-static struct list_head kzt_module_list;
-static spinlock_t kzt_module_lock;
-
-static int
-kzt_open(struct inode *inode, struct file *file)
-{
- unsigned int minor = iminor(inode);
- kzt_info_t *info;
-
- if (minor >= KZT_MINORS)
- return -ENXIO;
-
- info = (kzt_info_t *)kmalloc(sizeof(*info), GFP_KERNEL);
- if (info == NULL)
- return -ENOMEM;
-
- spin_lock_init(&info->info_lock);
- info->info_size = KZT_INFO_BUFFER_SIZE;
- info->info_buffer = (char *)vmalloc(KZT_INFO_BUFFER_SIZE);
- if (info->info_buffer == NULL) {
- kfree(info);
- return -ENOMEM;
- }
-
- info->info_head = info->info_buffer;
- file->private_data = (void *)info;
-
- kzt_print(file, "Kernel ZFS Tests %s\n", KZT_VERSION);
-
- return 0;
-}
-
-static int
-kzt_release(struct inode *inode, struct file *file)
-{
- unsigned int minor = iminor(inode);
- kzt_info_t *info = (kzt_info_t *)file->private_data;
-
- if (minor >= KZT_MINORS)
- return -ENXIO;
-
- ASSERT(info);
- ASSERT(info->info_buffer);
-
- vfree(info->info_buffer);
- kfree(info);
-
- return 0;
-}
-
-static int
-kzt_buffer_clear(struct file *file, kzt_cfg_t *kcfg, unsigned long arg)
-{
- kzt_info_t *info = (kzt_info_t *)file->private_data;
-
- ASSERT(info);
- ASSERT(info->info_buffer);
-
- spin_lock(&info->info_lock);
- memset(info->info_buffer, 0, info->info_size);
- info->info_head = info->info_buffer;
- spin_unlock(&info->info_lock);
-
- return 0;
-}
-
-static int
-kzt_buffer_size(struct file *file, kzt_cfg_t *kcfg, unsigned long arg)
-{
- kzt_info_t *info = (kzt_info_t *)file->private_data;
- char *buf;
- int min, size, rc = 0;
-
- ASSERT(info);
- ASSERT(info->info_buffer);
-
- spin_lock(&info->info_lock);
- if (kcfg->cfg_arg1 > 0) {
-
- size = kcfg->cfg_arg1;
- buf = (char *)vmalloc(size);
- if (buf == NULL) {
- rc = -ENOMEM;
- goto out;
- }
-
- /* Zero fill and truncate contents when coping buffer */
- min = ((size < info->info_size) ? size : info->info_size);
- memset(buf, 0, size);
- memcpy(buf, info->info_buffer, min);
- vfree(info->info_buffer);
- info->info_size = size;
- info->info_buffer = buf;
- info->info_head = info->info_buffer;
- }
-
- kcfg->cfg_rc1 = info->info_size;
-
- if (copy_to_user((struct kzt_cfg_t __user *)arg, kcfg, sizeof(*kcfg)))
- rc = -EFAULT;
-out:
- spin_unlock(&info->info_lock);
-
- return rc;
-}
-
-
-static kzt_subsystem_t *
-kzt_subsystem_find(int id) {
- kzt_subsystem_t *sub;
-
- spin_lock(&kzt_module_lock);
- list_for_each_entry(sub, &kzt_module_list, subsystem_list) {
- if (id == sub->desc.id) {
- spin_unlock(&kzt_module_lock);
- return sub;
- }
- }
- spin_unlock(&kzt_module_lock);
-
- return NULL;
-}
-
-static int
-kzt_subsystem_count(kzt_cfg_t *kcfg, unsigned long arg)
-{
- kzt_subsystem_t *sub;
- int i = 0;
-
- spin_lock(&kzt_module_lock);
- list_for_each_entry(sub, &kzt_module_list, subsystem_list)
- i++;
-
- spin_unlock(&kzt_module_lock);
- kcfg->cfg_rc1 = i;
-
- if (copy_to_user((struct kzt_cfg_t __user *)arg, kcfg, sizeof(*kcfg)))
- return -EFAULT;
-
- return 0;
-}
-
-static int
-kzt_subsystem_list(kzt_cfg_t *kcfg, unsigned long arg)
-{
- kzt_subsystem_t *sub;
- kzt_cfg_t *tmp;
- int size, i = 0;
-
- /* Structure will be sized large enough for N subsystem entries
- * which is passed in by the caller. On exit the number of
- * entries filled in with valid subsystems will be stored in
- * cfg_rc1. If the caller does not provide enough entries
- * for all subsystems we will truncate the list to avoid overrun.
- */
- size = sizeof(*tmp) + kcfg->cfg_data.kzt_subsystems.size *
- sizeof(kzt_user_t);
- tmp = kmalloc(size, GFP_KERNEL);
- if (tmp == NULL)
- return -ENOMEM;
-
- /* Local 'tmp' is used as the structure copied back to user space */
- memset(tmp, 0, size);
- memcpy(tmp, kcfg, sizeof(*kcfg));
-
- spin_lock(&kzt_module_lock);
- list_for_each_entry(sub, &kzt_module_list, subsystem_list) {
- strncpy(tmp->cfg_data.kzt_subsystems.descs[i].name,
- sub->desc.name, KZT_NAME_SIZE);
- strncpy(tmp->cfg_data.kzt_subsystems.descs[i].desc,
- sub->desc.desc, KZT_DESC_SIZE);
- tmp->cfg_data.kzt_subsystems.descs[i].id = sub->desc.id;
-
- /* Truncate list if we are about to overrun alloc'ed memory */
- if ((i++) == kcfg->cfg_data.kzt_subsystems.size)
- break;
- }
- spin_unlock(&kzt_module_lock);
- tmp->cfg_rc1 = i;
-
- if (copy_to_user((struct kzt_cfg_t __user *)arg, tmp, size)) {
- kfree(tmp);
- return -EFAULT;
- }
-
- kfree(tmp);
- return 0;
-}
-
-static int
-kzt_test_count(kzt_cfg_t *kcfg, unsigned long arg)
-{
- kzt_subsystem_t *sub;
- kzt_test_t *test;
- int i = 0;
-
- /* Subsystem ID passed as arg1 */
- sub = kzt_subsystem_find(kcfg->cfg_arg1);
- if (sub == NULL)
- return -EINVAL;
-
- spin_lock(&(sub->test_lock));
- list_for_each_entry(test, &(sub->test_list), test_list)
- i++;
-
- spin_unlock(&(sub->test_lock));
- kcfg->cfg_rc1 = i;
-
- if (copy_to_user((struct kzt_cfg_t __user *)arg, kcfg, sizeof(*kcfg)))
- return -EFAULT;
-
- return 0;
-}
-
-static int
-kzt_test_list(kzt_cfg_t *kcfg, unsigned long arg)
-{
- kzt_subsystem_t *sub;
- kzt_test_t *test;
- kzt_cfg_t *tmp;
- int size, i = 0;
-
- /* Subsystem ID passed as arg1 */
- sub = kzt_subsystem_find(kcfg->cfg_arg1);
- if (sub == NULL)
- return -EINVAL;
-
- /* Structure will be sized large enough for N test entries
- * which is passed in by the caller. On exit the number of
- * entries filled in with valid tests will be stored in
- * cfg_rc1. If the caller does not provide enough entries
- * for all tests we will truncate the list to avoid overrun.
- */
- size = sizeof(*tmp)+kcfg->cfg_data.kzt_tests.size*sizeof(kzt_user_t);
- tmp = kmalloc(size, GFP_KERNEL);
- if (tmp == NULL)
- return -ENOMEM;
-
- /* Local 'tmp' is used as the structure copied back to user space */
- memset(tmp, 0, size);
- memcpy(tmp, kcfg, sizeof(*kcfg));
-
- spin_lock(&(sub->test_lock));
- list_for_each_entry(test, &(sub->test_list), test_list) {
- strncpy(tmp->cfg_data.kzt_tests.descs[i].name,
- test->desc.name, KZT_NAME_SIZE);
- strncpy(tmp->cfg_data.kzt_tests.descs[i].desc,
- test->desc.desc, KZT_DESC_SIZE);
- tmp->cfg_data.kzt_tests.descs[i].id = test->desc.id;
-
- /* Truncate list if we are about to overrun alloc'ed memory */
- if ((i++) == kcfg->cfg_data.kzt_tests.size)
- break;
- }
- spin_unlock(&(sub->test_lock));
- tmp->cfg_rc1 = i;
-
- if (copy_to_user((struct kzt_cfg_t __user *)arg, tmp, size)) {
- kfree(tmp);
- return -EFAULT;
- }
-
- kfree(tmp);
- return 0;
-}
-
-static int
-kzt_validate(struct file *file, kzt_subsystem_t *sub, int cmd, void *arg)
-{
- kzt_test_t *test;
-
- spin_lock(&(sub->test_lock));
- list_for_each_entry(test, &(sub->test_list), test_list) {
- if (test->desc.id == cmd) {
- spin_unlock(&(sub->test_lock));
- return test->test(file, arg);
- }
- }
- spin_unlock(&(sub->test_lock));
-
- return -EINVAL;
-}
-
-static int
-kzt_ioctl_cfg(struct file *file, unsigned long arg)
-{
- kzt_cfg_t kcfg;
- int rc = 0;
-
- if (copy_from_user(&kcfg, (kzt_cfg_t *)arg, sizeof(kcfg)))
- return -EFAULT;
-
- if (kcfg.cfg_magic != KZT_CFG_MAGIC) {
- kzt_print(file, "Bad config magic 0x%x != 0x%x\n",
- kcfg.cfg_magic, KZT_CFG_MAGIC);
- return -EINVAL;
- }
-
- switch (kcfg.cfg_cmd) {
- case KZT_CFG_BUFFER_CLEAR:
- /* cfg_arg1 - Unused
- * cfg_rc1 - Unused
- */
- rc = kzt_buffer_clear(file, &kcfg, arg);
- break;
- case KZT_CFG_BUFFER_SIZE:
- /* cfg_arg1 - 0 - query size; >0 resize
- * cfg_rc1 - Set to current buffer size
- */
- rc = kzt_buffer_size(file, &kcfg, arg);
- break;
- case KZT_CFG_SUBSYSTEM_COUNT:
- /* cfg_arg1 - Unused
- * cfg_rc1 - Set to number of subsystems
- */
- rc = kzt_subsystem_count(&kcfg, arg);
- break;
- case KZT_CFG_SUBSYSTEM_LIST:
- /* cfg_arg1 - Unused
- * cfg_rc1 - Set to number of subsystems
- * cfg_data.kzt_subsystems - Populated with subsystems
- */
- rc = kzt_subsystem_list(&kcfg, arg);
- break;
- case KZT_CFG_TEST_COUNT:
- /* cfg_arg1 - Set to a target subsystem
- * cfg_rc1 - Set to number of tests
- */
- rc = kzt_test_count(&kcfg, arg);
- break;
- case KZT_CFG_TEST_LIST:
- /* cfg_arg1 - Set to a target subsystem
- * cfg_rc1 - Set to number of tests
- * cfg_data.kzt_subsystems - Populated with tests
- */
- rc = kzt_test_list(&kcfg, arg);
- break;
- default:
- kzt_print(file, "Bad config command %d\n", kcfg.cfg_cmd);
- rc = -EINVAL;
- break;
- }
-
- return rc;
-}
-
-static int
-kzt_ioctl_cmd(struct file *file, unsigned long arg)
-{
- kzt_subsystem_t *sub;
- kzt_cmd_t kcmd;
- int rc = -EINVAL;
- void *data = NULL;
-
- if (copy_from_user(&kcmd, (kzt_cfg_t *)arg, sizeof(kcmd)))
- return -EFAULT;
-
- if (kcmd.cmd_magic != KZT_CMD_MAGIC) {
- kzt_print(file, "Bad command magic 0x%x != 0x%x\n",
- kcmd.cmd_magic, KZT_CFG_MAGIC);
- return -EINVAL;
- }
-
- /* Allocate memory for any opaque data the caller needed to pass on */
- if (kcmd.cmd_data_size > 0) {
- data = (void *)kmalloc(kcmd.cmd_data_size, GFP_KERNEL);
- if (data == NULL)
- return -ENOMEM;
-
- if (copy_from_user(data, (void *)(arg + offsetof(kzt_cmd_t,
- cmd_data_str)), kcmd.cmd_data_size)) {
- kfree(data);
- return -EFAULT;
- }
- }
-
- sub = kzt_subsystem_find(kcmd.cmd_subsystem);
- if (sub != NULL)
- rc = kzt_validate(file, sub, kcmd.cmd_test, data);
- else
- rc = -EINVAL;
-
- if (data != NULL)
- kfree(data);
-
- return rc;
-}
-
-static int
-kzt_ioctl(struct inode *inode, struct file *file,
- unsigned int cmd, unsigned long arg)
-{
- unsigned int minor = iminor(file->f_dentry->d_inode);
- int rc = 0;
-
- /* Ignore tty ioctls */
- if ((cmd & 0xffffff00) == ((int)'T') << 8)
- return -ENOTTY;
-
- if (minor >= KZT_MINORS)
- return -ENXIO;
-
- switch (cmd) {
- case KZT_CFG:
- rc = kzt_ioctl_cfg(file, arg);
- break;
- case KZT_CMD:
- rc = kzt_ioctl_cmd(file, arg);
- break;
- default:
- kzt_print(file, "Bad ioctl command %d\n", cmd);
- rc = -EINVAL;
- break;
- }
-
- return rc;
-}
-
-/* I'm not sure why you would want to write in to this buffer from
- * user space since its principle use is to pass test status info
- * back to the user space, but I don't see any reason to prevent it.
- */
-static ssize_t kzt_write(struct file *file, const char __user *buf,
- size_t count, loff_t *ppos)
-{
- unsigned int minor = iminor(file->f_dentry->d_inode);
- kzt_info_t *info = (kzt_info_t *)file->private_data;
- int rc = 0;
-
- if (minor >= KZT_MINORS)
- return -ENXIO;
-
- ASSERT(info);
- ASSERT(info->info_buffer);
-
- spin_lock(&info->info_lock);
-
- /* Write beyond EOF */
- if (*ppos >= info->info_size) {
- rc = -EFBIG;
- goto out;
- }
-
- /* Resize count if beyond EOF */
- if (*ppos + count > info->info_size)
- count = info->info_size - *ppos;
-
- if (copy_from_user(info->info_buffer, buf, count)) {
- rc = -EFAULT;
- goto out;
- }
-
- *ppos += count;
- rc = count;
-out:
- spin_unlock(&info->info_lock);
- return rc;
-}
-
-static ssize_t kzt_read(struct file *file, char __user *buf,
- size_t count, loff_t *ppos)
-{
- unsigned int minor = iminor(file->f_dentry->d_inode);
- kzt_info_t *info = (kzt_info_t *)file->private_data;
- int rc = 0;
-
- if (minor >= KZT_MINORS)
- return -ENXIO;
-
- ASSERT(info);
- ASSERT(info->info_buffer);
-
- spin_lock(&info->info_lock);
-
- /* Read beyond EOF */
- if (*ppos >= info->info_size)
- goto out;
-
- /* Resize count if beyond EOF */
- if (*ppos + count > info->info_size)
- count = info->info_size - *ppos;
-
- if (copy_to_user(buf, info->info_buffer + *ppos, count)) {
- rc = -EFAULT;
- goto out;
- }
-
- *ppos += count;
- rc = count;
-out:
- spin_unlock(&info->info_lock);
- return rc;
-}
-
-static loff_t kzt_seek(struct file *file, loff_t offset, int origin)
-{
- unsigned int minor = iminor(file->f_dentry->d_inode);
- kzt_info_t *info = (kzt_info_t *)file->private_data;
- int rc = -EINVAL;
-
- if (minor >= KZT_MINORS)
- return -ENXIO;
-
- ASSERT(info);
- ASSERT(info->info_buffer);
-
- spin_lock(&info->info_lock);
-
- switch (origin) {
- case 0: /* SEEK_SET - No-op just do it */
- break;
- case 1: /* SEEK_CUR - Seek from current */
- offset = file->f_pos + offset;
- break;
- case 2: /* SEEK_END - Seek from end */
- offset = info->info_size + offset;
- break;
- }
-
- if (offset >= 0) {
- file->f_pos = offset;
- file->f_version = 0;
- rc = offset;
- }
-
- spin_unlock(&info->info_lock);
-
- return rc;
-}
-
-static struct file_operations kzt_fops = {
- .owner = THIS_MODULE,
- .open = kzt_open,
- .release = kzt_release,
- .ioctl = kzt_ioctl,
- .read = kzt_read,
- .write = kzt_write,
- .llseek = kzt_seek,
-};
-
-static struct cdev kzt_cdev = {
- .owner = THIS_MODULE,
- .kobj = { .name = "kztctl", },
-};
-
-static int __init
-kzt_init(void)
-{
- dev_t dev;
- int rc;
-
- spin_lock_init(&kzt_module_lock);
- INIT_LIST_HEAD(&kzt_module_list);
-
- KZT_SUBSYSTEM_INIT(kmem);
- KZT_SUBSYSTEM_INIT(taskq);
- KZT_SUBSYSTEM_INIT(krng);
- KZT_SUBSYSTEM_INIT(mutex);
- KZT_SUBSYSTEM_INIT(condvar);
- KZT_SUBSYSTEM_INIT(thread);
- KZT_SUBSYSTEM_INIT(rwlock);
- KZT_SUBSYSTEM_INIT(time);
-
- dev = MKDEV(KZT_MAJOR, 0);
- if ((rc = register_chrdev_region(dev, KZT_MINORS, "kztctl")))
- goto error;
-
- /* Support for registering a character driver */
- cdev_init(&kzt_cdev, &kzt_fops);
- if ((rc = cdev_add(&kzt_cdev, dev, KZT_MINORS))) {
- printk(KERN_ERR "kzt: Error adding cdev, %d\n", rc);
- kobject_put(&kzt_cdev.kobj);
- unregister_chrdev_region(dev, KZT_MINORS);
- goto error;
- }
-
- /* Support for udev make driver info available in sysfs */
-#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,18)
- kzt_class = class_simple_create(THIS_MODULE, "kzt");
-#else
- kzt_class = class_create(THIS_MODULE, "kzt");
-#endif
- if (IS_ERR(kzt_class)) {
- rc = PTR_ERR(kzt_class);
- printk(KERN_ERR "kzt: Error creating kzt class, %d\n", rc);
- cdev_del(&kzt_cdev);
- unregister_chrdev_region(dev, KZT_MINORS);
- goto error;
- }
-
-#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,18)
- class_simple_device_add(kzt_class, MKDEV(KZT_MAJOR, 0),
- NULL, "kztctl");
-#else
- class_device_create(kzt_class, NULL, MKDEV(KZT_MAJOR, 0),
- NULL, "kztctl");
-#endif
-
- printk(KERN_INFO "kzt: Kernel ZFS Tests %s Loaded\n", KZT_VERSION);
- return 0;
-error:
- printk(KERN_ERR "kzt: Error registering kzt device, %d\n", rc);
- return rc;
-}
-
-static void
-kzt_fini(void)
-{
- dev_t dev = MKDEV(KZT_MAJOR, 0);
-
-#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,18)
- class_simple_device_remove(dev);
- class_simple_destroy(kzt_class);
- devfs_remove("kzt/kztctl");
- devfs_remove("kzt");
-#else
- class_device_destroy(kzt_class, dev);
- class_destroy(kzt_class);
-#endif
- cdev_del(&kzt_cdev);
- unregister_chrdev_region(dev, KZT_MINORS);
-
- KZT_SUBSYSTEM_FINI(time);
- KZT_SUBSYSTEM_FINI(rwlock);
- KZT_SUBSYSTEM_FINI(thread);
- KZT_SUBSYSTEM_FINI(condvar);
- KZT_SUBSYSTEM_FINI(mutex);
- KZT_SUBSYSTEM_FINI(krng);
- KZT_SUBSYSTEM_FINI(taskq);
- KZT_SUBSYSTEM_FINI(kmem);
-
- ASSERT(list_empty(&kzt_module_list));
- printk(KERN_INFO "kzt: Kernel ZFS Tests %s Unloaded\n", KZT_VERSION);
-}
-
-module_init(kzt_init);
-module_exit(kzt_fini);
-
-MODULE_AUTHOR("Lawrence Livermore National Labs");
-MODULE_DESCRIPTION("Kernel ZFS Test");
-MODULE_LICENSE("GPL");
-
+++ /dev/null
-#include <splat-ctl.h>
-
-#define KZT_SUBSYSTEM_KMEM 0x0100
-#define KZT_KMEM_NAME "kmem"
-#define KZT_KMEM_DESC "Kernel Malloc/Slab Tests"
-
-#define KZT_KMEM_TEST1_ID 0x0101
-#define KZT_KMEM_TEST1_NAME "kmem_alloc"
-#define KZT_KMEM_TEST1_DESC "Memory allocation test (kmem_alloc)"
-
-#define KZT_KMEM_TEST2_ID 0x0102
-#define KZT_KMEM_TEST2_NAME "kmem_zalloc"
-#define KZT_KMEM_TEST2_DESC "Memory allocation test (kmem_zalloc)"
-
-#define KZT_KMEM_TEST3_ID 0x0103
-#define KZT_KMEM_TEST3_NAME "slab_alloc"
-#define KZT_KMEM_TEST3_DESC "Slab constructor/destructor test"
-
-#define KZT_KMEM_TEST4_ID 0x0104
-#define KZT_KMEM_TEST4_NAME "slab_reap"
-#define KZT_KMEM_TEST4_DESC "Slab reaping test"
-
-#define KZT_KMEM_ALLOC_COUNT 10
-/* XXX - This test may fail under tight memory conditions */
-static int
-kzt_kmem_test1(struct file *file, void *arg)
-{
- void *ptr[KZT_KMEM_ALLOC_COUNT];
- int size = PAGE_SIZE;
- int i, count, rc = 0;
-
- while ((!rc) && (size < (PAGE_SIZE * 16))) {
- count = 0;
-
- for (i = 0; i < KZT_KMEM_ALLOC_COUNT; i++) {
- ptr[i] = kmem_alloc(size, KM_SLEEP);
- if (ptr[i])
- count++;
- }
-
- for (i = 0; i < KZT_KMEM_ALLOC_COUNT; i++)
- if (ptr[i])
- kmem_free(ptr[i], size);
-
- kzt_vprint(file, KZT_KMEM_TEST1_NAME,
- "%d byte allocations, %d/%d successful\n",
- size, count, KZT_KMEM_ALLOC_COUNT);
- if (count != KZT_KMEM_ALLOC_COUNT)
- rc = -ENOMEM;
-
- size *= 2;
- }
-
- return rc;
-}
-
-static int
-kzt_kmem_test2(struct file *file, void *arg)
-{
- void *ptr[KZT_KMEM_ALLOC_COUNT];
- int size = PAGE_SIZE;
- int i, j, count, rc = 0;
-
- while ((!rc) && (size < (PAGE_SIZE * 16))) {
- count = 0;
-
- for (i = 0; i < KZT_KMEM_ALLOC_COUNT; i++) {
- ptr[i] = kmem_zalloc(size, KM_SLEEP);
- if (ptr[i])
- count++;
- }
-
- /* Ensure buffer has been zero filled */
- for (i = 0; i < KZT_KMEM_ALLOC_COUNT; i++) {
- for (j = 0; j < size; j++) {
- if (((char *)ptr[i])[j] != '\0') {
- kzt_vprint(file, KZT_KMEM_TEST2_NAME,
- "%d-byte allocation was "
- "not zeroed\n", size);
- rc = -EFAULT;
- }
- }
- }
-
- for (i = 0; i < KZT_KMEM_ALLOC_COUNT; i++)
- if (ptr[i])
- kmem_free(ptr[i], size);
-
- kzt_vprint(file, KZT_KMEM_TEST2_NAME,
- "%d byte allocations, %d/%d successful\n",
- size, count, KZT_KMEM_ALLOC_COUNT);
- if (count != KZT_KMEM_ALLOC_COUNT)
- rc = -ENOMEM;
-
- size *= 2;
- }
-
- return rc;
-}
-
-#define KZT_KMEM_TEST_MAGIC 0x004488CCUL
-#define KZT_KMEM_CACHE_NAME "kmem_test"
-#define KZT_KMEM_CACHE_SIZE 256
-#define KZT_KMEM_OBJ_COUNT 128
-#define KZT_KMEM_OBJ_RECLAIM 64
-
-typedef struct kmem_cache_data {
- char kcd_buf[KZT_KMEM_CACHE_SIZE];
- unsigned long kcd_magic;
- int kcd_flag;
-} kmem_cache_data_t;
-
-typedef struct kmem_cache_priv {
- unsigned long kcp_magic;
- struct file *kcp_file;
- kmem_cache_t *kcp_cache;
- kmem_cache_data_t *kcp_kcd[KZT_KMEM_OBJ_COUNT];
- int kcp_count;
- int kcp_rc;
-} kmem_cache_priv_t;
-
-static int
-kzt_kmem_test34_constructor(void *ptr, void *priv, int flags)
-{
- kmem_cache_data_t *kcd = (kmem_cache_data_t *)ptr;
- kmem_cache_priv_t *kcp = (kmem_cache_priv_t *)priv;
-
- if (kcd) {
- memset(kcd->kcd_buf, 0xaa, KZT_KMEM_CACHE_SIZE);
- kcd->kcd_flag = 1;
-
- if (kcp) {
- kcd->kcd_magic = kcp->kcp_magic;
- kcp->kcp_count++;
- }
- }
-
- return 0;
-}
-
-static void
-kzt_kmem_test34_destructor(void *ptr, void *priv)
-{
- kmem_cache_data_t *kcd = (kmem_cache_data_t *)ptr;
- kmem_cache_priv_t *kcp = (kmem_cache_priv_t *)priv;
-
- if (kcd) {
- memset(kcd->kcd_buf, 0xbb, KZT_KMEM_CACHE_SIZE);
- kcd->kcd_flag = 0;
-
- if (kcp)
- kcp->kcp_count--;
- }
-
- return;
-}
-
-static int
-kzt_kmem_test3(struct file *file, void *arg)
-{
- kmem_cache_t *cache = NULL;
- kmem_cache_data_t *kcd = NULL;
- kmem_cache_priv_t kcp;
- int rc = 0, max;
-
- kcp.kcp_magic = KZT_KMEM_TEST_MAGIC;
- kcp.kcp_file = file;
- kcp.kcp_count = 0;
- kcp.kcp_rc = 0;
-
- cache = kmem_cache_create(KZT_KMEM_CACHE_NAME, sizeof(*kcd), 0,
- kzt_kmem_test34_constructor,
- kzt_kmem_test34_destructor,
- NULL, &kcp, NULL, 0);
- if (!cache) {
- kzt_vprint(file, KZT_KMEM_TEST3_NAME,
- "Unable to create '%s'\n", KZT_KMEM_CACHE_NAME);
- return -ENOMEM;
- }
-
- kcd = kmem_cache_alloc(cache, 0);
- if (!kcd) {
- kzt_vprint(file, KZT_KMEM_TEST3_NAME,
- "Unable to allocate from '%s'\n",
- KZT_KMEM_CACHE_NAME);
- rc = -EINVAL;
- goto out_free;
- }
-
- if (!kcd->kcd_flag) {
- kzt_vprint(file, KZT_KMEM_TEST3_NAME,
- "Failed to run contructor for '%s'\n",
- KZT_KMEM_CACHE_NAME);
- rc = -EINVAL;
- goto out_free;
- }
-
- if (kcd->kcd_magic != kcp.kcp_magic) {
- kzt_vprint(file, KZT_KMEM_TEST3_NAME,
- "Failed to pass private data to constructor "
- "for '%s'\n", KZT_KMEM_CACHE_NAME);
- rc = -EINVAL;
- goto out_free;
- }
-
- max = kcp.kcp_count;
-
- /* Destructor's run lazily so it hard to check correctness here.
- * We assume if it doesn't crash the free worked properly */
- kmem_cache_free(cache, kcd);
-
- /* Destroy the entire cache which will force destructors to
- * run and we can verify one was called for every object */
- kmem_cache_destroy(cache);
- if (kcp.kcp_count) {
- kzt_vprint(file, KZT_KMEM_TEST3_NAME,
- "Failed to run destructor on all slab objects "
- "for '%s'\n", KZT_KMEM_CACHE_NAME);
- rc = -EINVAL;
- }
-
- kzt_vprint(file, KZT_KMEM_TEST3_NAME,
- "%d allocated/destroyed objects for '%s'\n",
- max, KZT_KMEM_CACHE_NAME);
-
- return rc;
-
-out_free:
- if (kcd)
- kmem_cache_free(cache, kcd);
-
- kmem_cache_destroy(cache);
- return rc;
-}
-
-static void
-kzt_kmem_test4_reclaim(void *priv)
-{
- kmem_cache_priv_t *kcp = (kmem_cache_priv_t *)priv;
- int i;
-
- kzt_vprint(kcp->kcp_file, KZT_KMEM_TEST4_NAME,
- "Reaping %d objects from '%s'\n",
- KZT_KMEM_OBJ_RECLAIM, KZT_KMEM_CACHE_NAME);
- for (i = 0; i < KZT_KMEM_OBJ_RECLAIM; i++) {
- if (kcp->kcp_kcd[i]) {
- kmem_cache_free(kcp->kcp_cache, kcp->kcp_kcd[i]);
- kcp->kcp_kcd[i] = NULL;
- }
- }
-
- return;
-}
-
-static int
-kzt_kmem_test4(struct file *file, void *arg)
-{
- kmem_cache_t *cache;
- kmem_cache_priv_t kcp;
- int i, rc = 0, max, reclaim_percent, target_percent;
-
- kcp.kcp_magic = KZT_KMEM_TEST_MAGIC;
- kcp.kcp_file = file;
- kcp.kcp_count = 0;
- kcp.kcp_rc = 0;
-
- cache = kmem_cache_create(KZT_KMEM_CACHE_NAME,
- sizeof(kmem_cache_data_t), 0,
- kzt_kmem_test34_constructor,
- kzt_kmem_test34_destructor,
- kzt_kmem_test4_reclaim, &kcp, NULL, 0);
- if (!cache) {
- kzt_vprint(file, KZT_KMEM_TEST4_NAME,
- "Unable to create '%s'\n", KZT_KMEM_CACHE_NAME);
- return -ENOMEM;
- }
-
- kcp.kcp_cache = cache;
-
- for (i = 0; i < KZT_KMEM_OBJ_COUNT; i++) {
- /* All allocations need not succeed */
- kcp.kcp_kcd[i] = kmem_cache_alloc(cache, 0);
- if (!kcp.kcp_kcd[i]) {
- kzt_vprint(file, KZT_KMEM_TEST4_NAME,
- "Unable to allocate from '%s'\n",
- KZT_KMEM_CACHE_NAME);
- }
- }
-
- max = kcp.kcp_count;
-
- /* Force shrinker to run */
- kmem_reap();
-
- /* Reclaim reclaimed objects, this ensure the destructors are run */
- kmem_cache_reap_now(cache);
-
- reclaim_percent = ((kcp.kcp_count * 100) / max);
- target_percent = (((KZT_KMEM_OBJ_COUNT - KZT_KMEM_OBJ_RECLAIM) * 100) /
- KZT_KMEM_OBJ_COUNT);
- kzt_vprint(file, KZT_KMEM_TEST4_NAME,
- "%d%% (%d/%d) of previous size, target of "
- "%d%%-%d%% for '%s'\n", reclaim_percent, kcp.kcp_count,
- max, target_percent - 10, target_percent + 10,
- KZT_KMEM_CACHE_NAME);
- if ((reclaim_percent < target_percent - 10) ||
- (reclaim_percent > target_percent + 10))
- rc = -EINVAL;
-
- /* Cleanup our mess */
- for (i = 0; i < KZT_KMEM_OBJ_COUNT; i++)
- if (kcp.kcp_kcd[i])
- kmem_cache_free(cache, kcp.kcp_kcd[i]);
-
- kmem_cache_destroy(cache);
-
- return rc;
-}
-
-kzt_subsystem_t *
-kzt_kmem_init(void)
-{
- kzt_subsystem_t *sub;
-
- sub = kmalloc(sizeof(*sub), GFP_KERNEL);
- if (sub == NULL)
- return NULL;
-
- memset(sub, 0, sizeof(*sub));
- strncpy(sub->desc.name, KZT_KMEM_NAME, KZT_NAME_SIZE);
- strncpy(sub->desc.desc, KZT_KMEM_DESC, KZT_DESC_SIZE);
- INIT_LIST_HEAD(&sub->subsystem_list);
- INIT_LIST_HEAD(&sub->test_list);
- spin_lock_init(&sub->test_lock);
- sub->desc.id = KZT_SUBSYSTEM_KMEM;
-
- KZT_TEST_INIT(sub, KZT_KMEM_TEST1_NAME, KZT_KMEM_TEST1_DESC,
- KZT_KMEM_TEST1_ID, kzt_kmem_test1);
- KZT_TEST_INIT(sub, KZT_KMEM_TEST2_NAME, KZT_KMEM_TEST2_DESC,
- KZT_KMEM_TEST2_ID, kzt_kmem_test2);
- KZT_TEST_INIT(sub, KZT_KMEM_TEST3_NAME, KZT_KMEM_TEST3_DESC,
- KZT_KMEM_TEST3_ID, kzt_kmem_test3);
- KZT_TEST_INIT(sub, KZT_KMEM_TEST4_NAME, KZT_KMEM_TEST4_DESC,
- KZT_KMEM_TEST4_ID, kzt_kmem_test4);
-
- return sub;
-}
-
-void
-kzt_kmem_fini(kzt_subsystem_t *sub)
-{
- ASSERT(sub);
- KZT_TEST_FINI(sub, KZT_KMEM_TEST4_ID);
- KZT_TEST_FINI(sub, KZT_KMEM_TEST3_ID);
- KZT_TEST_FINI(sub, KZT_KMEM_TEST2_ID);
- KZT_TEST_FINI(sub, KZT_KMEM_TEST1_ID);
-
- kfree(sub);
-}
-
-int
-kzt_kmem_id(void) {
- return KZT_SUBSYSTEM_KMEM;
-}
+++ /dev/null
-#include <splat-ctl.h>
-
-#define KZT_SUBSYSTEM_MUTEX 0x0400
-#define KZT_MUTEX_NAME "mutex"
-#define KZT_MUTEX_DESC "Kernel Mutex Tests"
-
-#define KZT_MUTEX_TEST1_ID 0x0401
-#define KZT_MUTEX_TEST1_NAME "tryenter"
-#define KZT_MUTEX_TEST1_DESC "Validate mutex_tryenter() correctness"
-
-#define KZT_MUTEX_TEST2_ID 0x0402
-#define KZT_MUTEX_TEST2_NAME "race"
-#define KZT_MUTEX_TEST2_DESC "Many threads entering/exiting the mutex"
-
-#define KZT_MUTEX_TEST3_ID 0x0403
-#define KZT_MUTEX_TEST3_NAME "owned"
-#define KZT_MUTEX_TEST3_DESC "Validate mutex_owned() correctness"
-
-#define KZT_MUTEX_TEST4_ID 0x0404
-#define KZT_MUTEX_TEST4_NAME "owner"
-#define KZT_MUTEX_TEST4_DESC "Validate mutex_owner() correctness"
-
-#define KZT_MUTEX_TEST_MAGIC 0x115599DDUL
-#define KZT_MUTEX_TEST_NAME "mutex_test"
-#define KZT_MUTEX_TEST_WORKQ "mutex_wq"
-#define KZT_MUTEX_TEST_COUNT 128
-
-typedef struct mutex_priv {
- unsigned long mp_magic;
- struct file *mp_file;
- struct work_struct mp_work[KZT_MUTEX_TEST_COUNT];
- kmutex_t mp_mtx;
- int mp_rc;
-} mutex_priv_t;
-
-
-static void
-kzt_mutex_test1_work(void *priv)
-{
- mutex_priv_t *mp = (mutex_priv_t *)priv;
-
- ASSERT(mp->mp_magic == KZT_MUTEX_TEST_MAGIC);
- mp->mp_rc = 0;
-
- if (!mutex_tryenter(&mp->mp_mtx))
- mp->mp_rc = -EBUSY;
-}
-
-static int
-kzt_mutex_test1(struct file *file, void *arg)
-{
- struct workqueue_struct *wq;
- struct work_struct work;
- mutex_priv_t *mp;
- int rc = 0;
-
- mp = (mutex_priv_t *)kmalloc(sizeof(*mp), GFP_KERNEL);
- if (mp == NULL)
- return -ENOMEM;
-
- wq = create_singlethread_workqueue(KZT_MUTEX_TEST_WORKQ);
- if (wq == NULL) {
- rc = -ENOMEM;
- goto out2;
- }
-
- mutex_init(&(mp->mp_mtx), KZT_MUTEX_TEST_NAME, MUTEX_DEFAULT, NULL);
- mutex_enter(&(mp->mp_mtx));
-
- mp->mp_magic = KZT_MUTEX_TEST_MAGIC;
- mp->mp_file = file;
- INIT_WORK(&work, kzt_mutex_test1_work, mp);
-
- /* Schedule a work item which will try and aquire the mutex via
- * mutex_tryenter() while its held. This should fail and the work
- * item will indicte this status in the passed private data. */
- if (!queue_work(wq, &work)) {
- mutex_exit(&(mp->mp_mtx));
- rc = -EINVAL;
- goto out;
- }
-
- flush_workqueue(wq);
- mutex_exit(&(mp->mp_mtx));
-
- /* Work item successfully aquired mutex, very bad! */
- if (mp->mp_rc != -EBUSY) {
- rc = -EINVAL;
- goto out;
- }
-
- kzt_vprint(file, KZT_MUTEX_TEST1_NAME, "%s",
- "mutex_trylock() correctly failed when mutex held\n");
-
- /* Schedule a work item which will try and aquire the mutex via
- * mutex_tryenter() while it is not held. This should work and
- * the item will indicte this status in the passed private data. */
- if (!queue_work(wq, &work)) {
- rc = -EINVAL;
- goto out;
- }
-
- flush_workqueue(wq);
-
- /* Work item failed to aquire mutex, very bad! */
- if (mp->mp_rc != 0) {
- rc = -EINVAL;
- goto out;
- }
-
- kzt_vprint(file, KZT_MUTEX_TEST1_NAME, "%s",
- "mutex_trylock() correctly succeeded when mutex unheld\n");
-out:
- mutex_destroy(&(mp->mp_mtx));
- destroy_workqueue(wq);
-out2:
- kfree(mp);
-
- return rc;
-}
-
-static void
-kzt_mutex_test2_work(void *priv)
-{
- mutex_priv_t *mp = (mutex_priv_t *)priv;
- int rc;
-
- ASSERT(mp->mp_magic == KZT_MUTEX_TEST_MAGIC);
-
- /* Read the value before sleeping and write it after we wake up to
- * maximize the chance of a race if mutexs are not working properly */
- mutex_enter(&mp->mp_mtx);
- rc = mp->mp_rc;
- set_current_state(TASK_INTERRUPTIBLE);
- schedule_timeout(HZ / 100); /* 1/100 of a second */
- mp->mp_rc = rc + 1;
- mutex_exit(&mp->mp_mtx);
-}
-
-static int
-kzt_mutex_test2(struct file *file, void *arg)
-{
- struct workqueue_struct *wq;
- mutex_priv_t *mp;
- int i, rc = 0;
-
- mp = (mutex_priv_t *)kmalloc(sizeof(*mp), GFP_KERNEL);
- if (mp == NULL)
- return -ENOMEM;
-
- /* Create a thread per CPU items on queue will race */
- wq = create_workqueue(KZT_MUTEX_TEST_WORKQ);
- if (wq == NULL) {
- rc = -ENOMEM;
- goto out;
- }
-
- mutex_init(&(mp->mp_mtx), KZT_MUTEX_TEST_NAME, MUTEX_DEFAULT, NULL);
-
- mp->mp_magic = KZT_MUTEX_TEST_MAGIC;
- mp->mp_file = file;
- mp->mp_rc = 0;
-
- /* Schedule N work items to the work queue each of which enters the
- * mutex, sleeps briefly, then exits the mutex. On a multiprocessor
- * box these work items will be handled by all available CPUs. The
- * mutex is instrumented such that if any two processors are in the
- * critical region at the same time the system will panic. If the
- * mutex is implemented right this will never happy, that's a pass. */
- for (i = 0; i < KZT_MUTEX_TEST_COUNT; i++) {
- INIT_WORK(&(mp->mp_work[i]), kzt_mutex_test2_work, mp);
-
- if (!queue_work(wq, &(mp->mp_work[i]))) {
- kzt_vprint(file, KZT_MUTEX_TEST2_NAME,
- "Failed to queue work id %d\n", i);
- rc = -EINVAL;
- }
- }
-
- flush_workqueue(wq);
-
- if (mp->mp_rc == KZT_MUTEX_TEST_COUNT) {
- kzt_vprint(file, KZT_MUTEX_TEST2_NAME, "%d racing threads "
- "correctly entered/exited the mutex %d times\n",
- num_online_cpus(), mp->mp_rc);
- } else {
- kzt_vprint(file, KZT_MUTEX_TEST2_NAME, "%d racing threads "
- "only processed %d/%d mutex work items\n",
- num_online_cpus(), mp->mp_rc, KZT_MUTEX_TEST_COUNT);
- rc = -EINVAL;
- }
-
- mutex_destroy(&(mp->mp_mtx));
- destroy_workqueue(wq);
-out:
- kfree(mp);
-
- return rc;
-}
-
-static int
-kzt_mutex_test3(struct file *file, void *arg)
-{
- kmutex_t mtx;
- int rc = 0;
-
- mutex_init(&mtx, KZT_MUTEX_TEST_NAME, MUTEX_DEFAULT, NULL);
-
- mutex_enter(&mtx);
-
- /* Mutex should be owned by current */
- if (!mutex_owned(&mtx)) {
- kzt_vprint(file, KZT_MUTEX_TEST3_NAME, "Mutex should "
- "be owned by pid %d but is owned by pid %d\n",
- current->pid, mtx.km_owner ? mtx.km_owner->pid : -1);
- rc = -EINVAL;
- goto out;
- }
-
- mutex_exit(&mtx);
-
- /* Mutex should not be owned by any task */
- if (mutex_owned(&mtx)) {
- kzt_vprint(file, KZT_MUTEX_TEST3_NAME, "Mutex should "
- "not be owned but is owned by pid %d\n",
- mtx.km_owner ? mtx.km_owner->pid : -1);
- rc = -EINVAL;
- goto out;
- }
-
- kzt_vprint(file, KZT_MUTEX_TEST3_NAME, "%s",
- "Correct mutex_owned() behavior\n");
-out:
- mutex_destroy(&mtx);
-
- return rc;
-}
-
-static int
-kzt_mutex_test4(struct file *file, void *arg)
-{
- kmutex_t mtx;
- kthread_t *owner;
- int rc = 0;
-
- mutex_init(&mtx, KZT_MUTEX_TEST_NAME, MUTEX_DEFAULT, NULL);
-
- mutex_enter(&mtx);
-
- /* Mutex should be owned by current */
- owner = mutex_owner(&mtx);
- if (current != owner) {
- kzt_vprint(file, KZT_MUTEX_TEST3_NAME, "Mutex should "
- "be owned by pid %d but is owned by pid %d\n",
- current->pid, owner ? owner->pid : -1);
- rc = -EINVAL;
- goto out;
- }
-
- mutex_exit(&mtx);
-
- /* Mutex should not be owned by any task */
- owner = mutex_owner(&mtx);
- if (owner) {
- kzt_vprint(file, KZT_MUTEX_TEST3_NAME, "Mutex should not "
- "be owned but is owned by pid %d\n", owner->pid);
- rc = -EINVAL;
- goto out;
- }
-
- kzt_vprint(file, KZT_MUTEX_TEST3_NAME, "%s",
- "Correct mutex_owner() behavior\n");
-out:
- mutex_destroy(&mtx);
-
- return rc;
-}
-
-kzt_subsystem_t *
-kzt_mutex_init(void)
-{
- kzt_subsystem_t *sub;
-
- sub = kmalloc(sizeof(*sub), GFP_KERNEL);
- if (sub == NULL)
- return NULL;
-
- memset(sub, 0, sizeof(*sub));
- strncpy(sub->desc.name, KZT_MUTEX_NAME, KZT_NAME_SIZE);
- strncpy(sub->desc.desc, KZT_MUTEX_DESC, KZT_DESC_SIZE);
- INIT_LIST_HEAD(&sub->subsystem_list);
- INIT_LIST_HEAD(&sub->test_list);
- spin_lock_init(&sub->test_lock);
- sub->desc.id = KZT_SUBSYSTEM_MUTEX;
-
- KZT_TEST_INIT(sub, KZT_MUTEX_TEST1_NAME, KZT_MUTEX_TEST1_DESC,
- KZT_MUTEX_TEST1_ID, kzt_mutex_test1);
- KZT_TEST_INIT(sub, KZT_MUTEX_TEST2_NAME, KZT_MUTEX_TEST2_DESC,
- KZT_MUTEX_TEST2_ID, kzt_mutex_test2);
- KZT_TEST_INIT(sub, KZT_MUTEX_TEST3_NAME, KZT_MUTEX_TEST3_DESC,
- KZT_MUTEX_TEST3_ID, kzt_mutex_test3);
- KZT_TEST_INIT(sub, KZT_MUTEX_TEST4_NAME, KZT_MUTEX_TEST4_DESC,
- KZT_MUTEX_TEST4_ID, kzt_mutex_test4);
-
- return sub;
-}
-
-void
-kzt_mutex_fini(kzt_subsystem_t *sub)
-{
- ASSERT(sub);
- KZT_TEST_FINI(sub, KZT_MUTEX_TEST4_ID);
- KZT_TEST_FINI(sub, KZT_MUTEX_TEST3_ID);
- KZT_TEST_FINI(sub, KZT_MUTEX_TEST2_ID);
- KZT_TEST_FINI(sub, KZT_MUTEX_TEST1_ID);
-
- kfree(sub);
-}
-
-int
-kzt_mutex_id(void) {
- return KZT_SUBSYSTEM_MUTEX;
-}
+++ /dev/null
-#include <splat-ctl.h>
-
-#define KZT_SUBSYSTEM_KRNG 0x0300
-#define KZT_KRNG_NAME "krng"
-#define KZT_KRNG_DESC "Kernel Random Number Generator Tests"
-
-#define KZT_KRNG_TEST1_ID 0x0301
-#define KZT_KRNG_TEST1_NAME "freq"
-#define KZT_KRNG_TEST1_DESC "Frequency Test"
-
-#define KRNG_NUM_BITS 1048576
-#define KRNG_NUM_BYTES (KRNG_NUM_BITS >> 3)
-#define KRNG_NUM_BITS_DIV2 (KRNG_NUM_BITS >> 1)
-#define KRNG_ERROR_RANGE 2097
-
-/* Random Number Generator Tests
- There can be meny more tests on quality of the
- random number generator. For now we are only
- testing the frequency of particular bits.
- We could also test consecutive sequences,
- randomness within a particular block, etc.
- but is probably not necessary for our purposes */
-
-static int
-kzt_krng_test1(struct file *file, void *arg)
-{
- uint8_t *buf;
- int i, j, diff, num = 0, rc = 0;
-
- buf = kmalloc(sizeof(*buf) * KRNG_NUM_BYTES, GFP_KERNEL);
- if (buf == NULL) {
- rc = -ENOMEM;
- goto out;
- }
-
- memset(buf, 0, sizeof(*buf) * KRNG_NUM_BYTES);
-
- /* Always succeeds */
- random_get_pseudo_bytes(buf, sizeof(uint8_t) * KRNG_NUM_BYTES);
-
- for (i = 0; i < KRNG_NUM_BYTES; i++) {
- uint8_t tmp = buf[i];
- for (j = 0; j < 8; j++) {
- uint8_t tmp2 = ((tmp >> j) & 0x01);
- if (tmp2 == 1) {
- num++;
- }
- }
- }
-
- kfree(buf);
-
- diff = KRNG_NUM_BITS_DIV2 - num;
- if (diff < 0)
- diff *= -1;
-
- kzt_print(file, "Test 1 Number of ones: %d\n", num);
- kzt_print(file, "Test 1 Difference from expected: %d Allowed: %d\n",
- diff, KRNG_ERROR_RANGE);
-
- if (diff > KRNG_ERROR_RANGE)
- rc = -ERANGE;
-out:
- return rc;
-}
-
-kzt_subsystem_t *
-kzt_krng_init(void)
-{
- kzt_subsystem_t *sub;
-
- sub = kmalloc(sizeof(*sub), GFP_KERNEL);
- if (sub == NULL)
- return NULL;
-
- memset(sub, 0, sizeof(*sub));
- strncpy(sub->desc.name, KZT_KRNG_NAME, KZT_NAME_SIZE);
- strncpy(sub->desc.desc, KZT_KRNG_DESC, KZT_DESC_SIZE);
- INIT_LIST_HEAD(&sub->subsystem_list);
- INIT_LIST_HEAD(&sub->test_list);
- spin_lock_init(&sub->test_lock);
- sub->desc.id = KZT_SUBSYSTEM_KRNG;
-
- KZT_TEST_INIT(sub, KZT_KRNG_TEST1_NAME, KZT_KRNG_TEST1_DESC,
- KZT_KRNG_TEST1_ID, kzt_krng_test1);
-
- return sub;
-}
-
-void
-kzt_krng_fini(kzt_subsystem_t *sub)
-{
- ASSERT(sub);
-
- KZT_TEST_FINI(sub, KZT_KRNG_TEST1_ID);
-
- kfree(sub);
-}
-
-int
-kzt_krng_id(void) {
- return KZT_SUBSYSTEM_KRNG;
-}
+++ /dev/null
-#include <splat-ctl.h>
-
-#define KZT_SUBSYSTEM_RWLOCK 0x0700
-#define KZT_RWLOCK_NAME "rwlock"
-#define KZT_RWLOCK_DESC "Kernel RW Lock Tests"
-
-#define KZT_RWLOCK_TEST1_ID 0x0701
-#define KZT_RWLOCK_TEST1_NAME "rwtest1"
-#define KZT_RWLOCK_TEST1_DESC "Multiple Readers One Writer"
-
-#define KZT_RWLOCK_TEST2_ID 0x0702
-#define KZT_RWLOCK_TEST2_NAME "rwtest2"
-#define KZT_RWLOCK_TEST2_DESC "Multiple Writers"
-
-#define KZT_RWLOCK_TEST3_ID 0x0703
-#define KZT_RWLOCK_TEST3_NAME "rwtest3"
-#define KZT_RWLOCK_TEST3_DESC "Owner Verification"
-
-#define KZT_RWLOCK_TEST4_ID 0x0704
-#define KZT_RWLOCK_TEST4_NAME "rwtest4"
-#define KZT_RWLOCK_TEST4_DESC "Trylock Test"
-
-#define KZT_RWLOCK_TEST5_ID 0x0705
-#define KZT_RWLOCK_TEST5_NAME "rwtest5"
-#define KZT_RWLOCK_TEST5_DESC "Write Downgrade Test"
-
-#define KZT_RWLOCK_TEST6_ID 0x0706
-#define KZT_RWLOCK_TEST6_NAME "rwtest6"
-#define KZT_RWLOCK_TEST6_DESC "Read Upgrade Test"
-
-#define KZT_RWLOCK_TEST_MAGIC 0x115599DDUL
-#define KZT_RWLOCK_TEST_NAME "rwlock_test"
-#define KZT_RWLOCK_TEST_COUNT 8
-
-#define KZT_RWLOCK_RELEASE_INIT 0
-#define KZT_RWLOCK_RELEASE_WRITERS 1
-#define KZT_RWLOCK_RELEASE_READERS 2
-
-typedef struct rw_priv {
- unsigned long rw_magic;
- struct file *rw_file;
- krwlock_t rwl;
- spinlock_t rw_priv_lock;
- wait_queue_head_t rw_waitq;
- atomic_t rw_completed;
- atomic_t rw_acquired;
- atomic_t rw_waiters;
- atomic_t rw_release;
-} rw_priv_t;
-
-typedef struct rw_thr {
- int rwt_id;
- const char *rwt_name;
- rw_priv_t *rwt_rwp;
- int rwt_rc;
-} rw_thr_t;
-
-static inline void
-kzt_rwlock_sleep(signed long delay)
-{
- set_current_state(TASK_INTERRUPTIBLE);
- schedule_timeout(delay);
-}
-
-#define kzt_rwlock_lock_and_test(lock,test) \
-({ \
- int ret = 0; \
- \
- spin_lock(lock); \
- ret = (test) ? 1 : 0; \
- spin_unlock(lock); \
- ret; \
-})
-
-void kzt_init_rw_priv(rw_priv_t *rwv, struct file *file)
-{
- rwv->rw_magic = KZT_RWLOCK_TEST_MAGIC;
- rwv->rw_file = file;
- spin_lock_init(&rwv->rw_priv_lock);
- init_waitqueue_head(&rwv->rw_waitq);
- atomic_set(&rwv->rw_completed, 0);
- atomic_set(&rwv->rw_acquired, 0);
- atomic_set(&rwv->rw_waiters, 0);
- atomic_set(&rwv->rw_release, KZT_RWLOCK_RELEASE_INIT);
-
- /* Initialize the read/write lock */
- rw_init(&rwv->rwl, KZT_RWLOCK_TEST_NAME, RW_DEFAULT, NULL);
-}
-
-int
-kzt_rwlock_test1_writer_thread(void *arg)
-{
- rw_thr_t *rwt = (rw_thr_t *)arg;
- rw_priv_t *rwv = rwt->rwt_rwp;
- uint8_t rnd = 0;
- char name[16];
-
- ASSERT(rwv->rw_magic == KZT_RWLOCK_TEST_MAGIC);
- snprintf(name, sizeof(name), "%s%d",
- KZT_RWLOCK_TEST_NAME, rwt->rwt_id);
- daemonize(name);
- get_random_bytes((void *)&rnd, 1);
- kzt_rwlock_sleep(rnd * HZ / 1000);
-
- spin_lock(&rwv->rw_priv_lock);
- kzt_vprint(rwv->rw_file, rwt->rwt_name,
- "%s writer thread trying to acquire rwlock with "
- "%d holding lock and %d waiting\n",
- name, atomic_read(&rwv->rw_acquired),
- atomic_read(&rwv->rw_waiters));
- atomic_inc(&rwv->rw_waiters);
- spin_unlock(&rwv->rw_priv_lock);
-
- /* Take the semaphore for writing
- * release it when we are told to */
- rw_enter(&rwv->rwl, RW_WRITER);
-
- spin_lock(&rwv->rw_priv_lock);
- atomic_dec(&rwv->rw_waiters);
- atomic_inc(&rwv->rw_acquired);
- kzt_vprint(rwv->rw_file, rwt->rwt_name,
- "%s writer thread acquired rwlock with "
- "%d holding lock and %d waiting\n",
- name, atomic_read(&rwv->rw_acquired),
- atomic_read(&rwv->rw_waiters));
- spin_unlock(&rwv->rw_priv_lock);
-
- /* Wait here until the control thread
- * says we can release the write lock */
- wait_event_interruptible(rwv->rw_waitq,
- kzt_rwlock_lock_and_test(&rwv->rw_priv_lock,
- atomic_read(&rwv->rw_release) ==
- KZT_RWLOCK_RELEASE_WRITERS));
- spin_lock(&rwv->rw_priv_lock);
- atomic_inc(&rwv->rw_completed);
- atomic_dec(&rwv->rw_acquired);
- kzt_vprint(rwv->rw_file, rwt->rwt_name,
- "%s writer thread dropped rwlock with "
- "%d holding lock and %d waiting\n",
- name, atomic_read(&rwv->rw_acquired),
- atomic_read(&rwv->rw_waiters));
- spin_unlock(&rwv->rw_priv_lock);
-
- /* Release the semaphore */
- rw_exit(&rwv->rwl);
- return 0;
-}
-
-int
-kzt_rwlock_test1_reader_thread(void *arg)
-{
- rw_thr_t *rwt = (rw_thr_t *)arg;
- rw_priv_t *rwv = rwt->rwt_rwp;
- uint8_t rnd = 0;
- char name[16];
-
- ASSERT(rwv->rw_magic == KZT_RWLOCK_TEST_MAGIC);
- snprintf(name, sizeof(name), "%s%d",
- KZT_RWLOCK_TEST_NAME, rwt->rwt_id);
- daemonize(name);
- get_random_bytes((void *)&rnd, 1);
- kzt_rwlock_sleep(rnd * HZ / 1000);
-
- /* Don't try and and take the semaphore until
- * someone else has already acquired it */
- wait_event_interruptible(rwv->rw_waitq,
- kzt_rwlock_lock_and_test(&rwv->rw_priv_lock,
- atomic_read(&rwv->rw_acquired) > 0));
-
- spin_lock(&rwv->rw_priv_lock);
- kzt_vprint(rwv->rw_file, rwt->rwt_name,
- "%s reader thread trying to acquire rwlock with "
- "%d holding lock and %d waiting\n",
- name, atomic_read(&rwv->rw_acquired),
- atomic_read(&rwv->rw_waiters));
- atomic_inc(&rwv->rw_waiters);
- spin_unlock(&rwv->rw_priv_lock);
-
- /* Take the semaphore for reading
- * release it when we are told to */
- rw_enter(&rwv->rwl, RW_READER);
-
- spin_lock(&rwv->rw_priv_lock);
- atomic_dec(&rwv->rw_waiters);
- atomic_inc(&rwv->rw_acquired);
- kzt_vprint(rwv->rw_file, rwt->rwt_name,
- "%s reader thread acquired rwlock with "
- "%d holding lock and %d waiting\n",
- name, atomic_read(&rwv->rw_acquired),
- atomic_read(&rwv->rw_waiters));
- spin_unlock(&rwv->rw_priv_lock);
-
- /* Wait here until the control thread
- * says we can release the read lock */
- wait_event_interruptible(rwv->rw_waitq,
- kzt_rwlock_lock_and_test(&rwv->rw_priv_lock,
- atomic_read(&rwv->rw_release) ==
- KZT_RWLOCK_RELEASE_READERS));
-
- spin_lock(&rwv->rw_priv_lock);
- atomic_inc(&rwv->rw_completed);
- atomic_dec(&rwv->rw_acquired);
- kzt_vprint(rwv->rw_file, rwt->rwt_name,
- "%s reader thread dropped rwlock with "
- "%d holding lock and %d waiting\n",
- name, atomic_read(&rwv->rw_acquired),
- atomic_read(&rwv->rw_waiters));
- spin_unlock(&rwv->rw_priv_lock);
-
- /* Release the semaphore */
- rw_exit(&rwv->rwl);
- return 0;
-}
-
-static int
-kzt_rwlock_test1(struct file *file, void *arg)
-{
- int i, count = 0, rc = 0;
- long pids[KZT_RWLOCK_TEST_COUNT];
- rw_thr_t rwt[KZT_RWLOCK_TEST_COUNT];
- rw_priv_t rwv;
-
- /* Initialize private data
- * including the rwlock */
- kzt_init_rw_priv(&rwv, file);
-
- /* Create some threads, the exact number isn't important just as
- * long as we know how many we managed to create and should expect. */
- for (i = 0; i < KZT_RWLOCK_TEST_COUNT; i++) {
- rwt[i].rwt_rwp = &rwv;
- rwt[i].rwt_id = i;
- rwt[i].rwt_name = KZT_RWLOCK_TEST1_NAME;
- rwt[i].rwt_rc = 0;
-
- /* The first thread will be a writer */
- if (i == 0) {
- pids[i] = kernel_thread(kzt_rwlock_test1_writer_thread,
- &rwt[i], 0);
- } else {
- pids[i] = kernel_thread(kzt_rwlock_test1_reader_thread,
- &rwt[i], 0);
- }
-
- if (pids[i] >= 0) {
- count++;
- }
- }
-
- /* Once the writer has the lock, release the readers */
- while (kzt_rwlock_lock_and_test(&rwv.rw_priv_lock, atomic_read(&rwv.rw_acquired) <= 0)) {
- kzt_rwlock_sleep(1 * HZ);
- }
- wake_up_interruptible(&rwv.rw_waitq);
-
- /* Ensure that there is only 1 writer and all readers are waiting */
- while (kzt_rwlock_lock_and_test(&rwv.rw_priv_lock,
- atomic_read(&rwv.rw_acquired) != 1 ||
- atomic_read(&rwv.rw_waiters) !=
- KZT_RWLOCK_TEST_COUNT - 1)) {
-
- kzt_rwlock_sleep(1 * HZ);
- }
- /* Relase the writer */
- spin_lock(&rwv.rw_priv_lock);
- atomic_set(&rwv.rw_release, KZT_RWLOCK_RELEASE_WRITERS);
- spin_unlock(&rwv.rw_priv_lock);
- wake_up_interruptible(&rwv.rw_waitq);
-
- /* Now ensure that there are multiple reader threads holding the lock */
- while (kzt_rwlock_lock_and_test(&rwv.rw_priv_lock,
- atomic_read(&rwv.rw_acquired) <= 1)) {
- kzt_rwlock_sleep(1 * HZ);
- }
- /* Release the readers */
- spin_lock(&rwv.rw_priv_lock);
- atomic_set(&rwv.rw_release, KZT_RWLOCK_RELEASE_READERS);
- spin_unlock(&rwv.rw_priv_lock);
- wake_up_interruptible(&rwv.rw_waitq);
-
- /* Wait for the test to complete */
- while (kzt_rwlock_lock_and_test(&rwv.rw_priv_lock,
- atomic_read(&rwv.rw_acquired) != 0 ||
- atomic_read(&rwv.rw_waiters) != 0)) {
- kzt_rwlock_sleep(1 * HZ);
-
- }
-
- rw_destroy(&rwv.rwl);
- return rc;
-}
-
-int
-kzt_rwlock_test2_writer_thread(void *arg)
-{
- rw_thr_t *rwt = (rw_thr_t *)arg;
- rw_priv_t *rwv = rwt->rwt_rwp;
- uint8_t rnd = 0;
- char name[16];
-
- ASSERT(rwv->rw_magic == KZT_RWLOCK_TEST_MAGIC);
- snprintf(name, sizeof(name), "%s%d",
- KZT_RWLOCK_TEST_NAME, rwt->rwt_id);
- daemonize(name);
- get_random_bytes((void *)&rnd, 1);
- kzt_rwlock_sleep(rnd * HZ / 1000);
-
- /* Here just increment the waiters count even if we are not
- * exactly about to call rw_enter(). Not really a big deal
- * since more than likely will be true when we simulate work
- * later on */
- spin_lock(&rwv->rw_priv_lock);
- kzt_vprint(rwv->rw_file, rwt->rwt_name,
- "%s writer thread trying to acquire rwlock with "
- "%d holding lock and %d waiting\n",
- name, atomic_read(&rwv->rw_acquired),
- atomic_read(&rwv->rw_waiters));
- atomic_inc(&rwv->rw_waiters);
- spin_unlock(&rwv->rw_priv_lock);
-
- /* Wait here until the control thread
- * says we can acquire the write lock */
- wait_event_interruptible(rwv->rw_waitq,
- kzt_rwlock_lock_and_test(&rwv->rw_priv_lock,
- atomic_read(&rwv->rw_release) ==
- KZT_RWLOCK_RELEASE_WRITERS));
-
- /* Take the semaphore for writing */
- rw_enter(&rwv->rwl, RW_WRITER);
-
- spin_lock(&rwv->rw_priv_lock);
- atomic_dec(&rwv->rw_waiters);
- atomic_inc(&rwv->rw_acquired);
- kzt_vprint(rwv->rw_file, rwt->rwt_name,
- "%s writer thread acquired rwlock with "
- "%d holding lock and %d waiting\n",
- name, atomic_read(&rwv->rw_acquired),
- atomic_read(&rwv->rw_waiters));
- spin_unlock(&rwv->rw_priv_lock);
-
- /* Give up the processor for a bit to simulate
- * doing some work while taking the write lock */
- kzt_rwlock_sleep(rnd * HZ / 1000);
-
- /* Ensure that we are the only one writing */
- if (atomic_read(&rwv->rw_acquired) > 1) {
- rwt->rwt_rc = 1;
- } else {
- rwt->rwt_rc = 0;
- }
-
- spin_lock(&rwv->rw_priv_lock);
- atomic_inc(&rwv->rw_completed);
- atomic_dec(&rwv->rw_acquired);
- kzt_vprint(rwv->rw_file, rwt->rwt_name,
- "%s writer thread dropped rwlock with "
- "%d holding lock and %d waiting\n",
- name, atomic_read(&rwv->rw_acquired),
- atomic_read(&rwv->rw_waiters));
- spin_unlock(&rwv->rw_priv_lock);
-
- rw_exit(&rwv->rwl);
-
-
- return 0;
-}
-
-static int
-kzt_rwlock_test2(struct file *file, void *arg)
-{
- int i, count = 0, rc = 0;
- long pids[KZT_RWLOCK_TEST_COUNT];
- rw_thr_t rwt[KZT_RWLOCK_TEST_COUNT];
- rw_priv_t rwv;
-
- /* Initialize private data
- * including the rwlock */
- kzt_init_rw_priv(&rwv, file);
-
- /* Create some threads, the exact number isn't important just as
- * long as we know how many we managed to create and should expect. */
- for (i = 0; i < KZT_RWLOCK_TEST_COUNT; i++) {
- rwt[i].rwt_rwp = &rwv;
- rwt[i].rwt_id = i;
- rwt[i].rwt_name = KZT_RWLOCK_TEST2_NAME;
- rwt[i].rwt_rc = 0;
-
- /* The first thread will be a writer */
- pids[i] = kernel_thread(kzt_rwlock_test2_writer_thread,
- &rwt[i], 0);
-
- if (pids[i] >= 0) {
- count++;
- }
- }
-
- /* Wait for writers to get queued up */
- while (kzt_rwlock_lock_and_test(&rwv.rw_priv_lock,
- atomic_read(&rwv.rw_waiters) < KZT_RWLOCK_TEST_COUNT)) {
- kzt_rwlock_sleep(1 * HZ);
- }
- /* Relase the writers */
- spin_lock(&rwv.rw_priv_lock);
- atomic_set(&rwv.rw_release, KZT_RWLOCK_RELEASE_WRITERS);
- spin_unlock(&rwv.rw_priv_lock);
- wake_up_interruptible(&rwv.rw_waitq);
-
- /* Wait for the test to complete */
- while (kzt_rwlock_lock_and_test(&rwv.rw_priv_lock,
- atomic_read(&rwv.rw_acquired) != 0 ||
- atomic_read(&rwv.rw_waiters) != 0)) {
- kzt_rwlock_sleep(1 * HZ);
- }
-
- /* If any of the write threads ever acquired the lock
- * while another thread had it, make sure we return
- * an error */
- for (i = 0; i < KZT_RWLOCK_TEST_COUNT; i++) {
- if (rwt[i].rwt_rc) {
- rc++;
- }
- }
-
- rw_destroy(&rwv.rwl);
- return rc;
-}
-
-static int
-kzt_rwlock_test3(struct file *file, void *arg)
-{
- kthread_t *owner;
- rw_priv_t rwv;
- int rc = 0;
-
- /* Initialize private data
- * including the rwlock */
- kzt_init_rw_priv(&rwv, file);
-
- /* Take the rwlock for writing */
- rw_enter(&rwv.rwl, RW_WRITER);
- owner = rw_owner(&rwv.rwl);
- if (current != owner) {
- kzt_vprint(file, KZT_RWLOCK_TEST3_NAME, "rwlock should "
- "be owned by pid %d but is owned by pid %d\n",
- current->pid, owner ? owner->pid : -1);
- rc = -EINVAL;
- goto out;
- }
-
- /* Release the rwlock */
- rw_exit(&rwv.rwl);
- owner = rw_owner(&rwv.rwl);
- if (owner) {
- kzt_vprint(file, KZT_RWLOCK_TEST3_NAME, "rwlock should not "
- "be owned but is owned by pid %d\n", owner->pid);
- rc = -EINVAL;
- goto out;
- }
-
- /* Take the rwlock for reading.
- * Should not have an owner */
- rw_enter(&rwv.rwl, RW_READER);
- owner = rw_owner(&rwv.rwl);
- if (owner) {
- kzt_vprint(file, KZT_RWLOCK_TEST3_NAME, "rwlock should not "
- "be owned but is owned by pid %d\n", owner->pid);
- /* Release the rwlock */
- rw_exit(&rwv.rwl);
- rc = -EINVAL;
- goto out;
- }
-
- /* Release the rwlock */
- rw_exit(&rwv.rwl);
-
-out:
- rw_destroy(&rwv.rwl);
- return rc;
-}
-
-int
-kzt_rwlock_test4_reader_thread(void *arg)
-{
- rw_thr_t *rwt = (rw_thr_t *)arg;
- rw_priv_t *rwv = rwt->rwt_rwp;
- uint8_t rnd = 0;
- char name[16];
-
- ASSERT(rwv->rw_magic == KZT_RWLOCK_TEST_MAGIC);
- snprintf(name, sizeof(name), "%s%d",
- KZT_RWLOCK_TEST_NAME, rwt->rwt_id);
- daemonize(name);
- get_random_bytes((void *)&rnd, 1);
- kzt_rwlock_sleep(rnd * HZ / 1000);
-
- /* Don't try and and take the semaphore until
- * someone else has already acquired it */
- wait_event_interruptible(rwv->rw_waitq,
- kzt_rwlock_lock_and_test(&rwv->rw_priv_lock,
- atomic_read(&rwv->rw_acquired) > 0));
-
- spin_lock(&rwv->rw_priv_lock);
- kzt_vprint(rwv->rw_file, rwt->rwt_name,
- "%s reader thread trying to acquire rwlock with "
- "%d holding lock and %d waiting\n",
- name, atomic_read(&rwv->rw_acquired),
- atomic_read(&rwv->rw_waiters));
- spin_unlock(&rwv->rw_priv_lock);
-
- /* Take the semaphore for reading
- * release it when we are told to */
- rwt->rwt_rc = rw_tryenter(&rwv->rwl, RW_READER);
-
- /* Here we acquired the lock this is a
- * failure since the writer should be
- * holding the lock */
- if (rwt->rwt_rc == 1) {
- spin_lock(&rwv->rw_priv_lock);
- atomic_inc(&rwv->rw_acquired);
- kzt_vprint(rwv->rw_file, rwt->rwt_name,
- "%s reader thread acquired rwlock with "
- "%d holding lock and %d waiting\n",
- name, atomic_read(&rwv->rw_acquired),
- atomic_read(&rwv->rw_waiters));
- spin_unlock(&rwv->rw_priv_lock);
-
- spin_lock(&rwv->rw_priv_lock);
- atomic_dec(&rwv->rw_acquired);
- kzt_vprint(rwv->rw_file, rwt->rwt_name,
- "%s reader thread dropped rwlock with "
- "%d holding lock and %d waiting\n",
- name, atomic_read(&rwv->rw_acquired),
- atomic_read(&rwv->rw_waiters));
- spin_unlock(&rwv->rw_priv_lock);
-
- /* Release the semaphore */
- rw_exit(&rwv->rwl);
- }
- /* Here we know we didn't block and didn't
- * acquire the rwlock for reading */
- else {
- spin_lock(&rwv->rw_priv_lock);
- atomic_inc(&rwv->rw_completed);
- kzt_vprint(rwv->rw_file, rwt->rwt_name,
- "%s reader thread could not acquire rwlock with "
- "%d holding lock and %d waiting\n",
- name, atomic_read(&rwv->rw_acquired),
- atomic_read(&rwv->rw_waiters));
- spin_unlock(&rwv->rw_priv_lock);
- }
-
- return 0;
-}
-
-static int
-kzt_rwlock_test4(struct file *file, void *arg)
-{
- int i, count = 0, rc = 0;
- long pids[KZT_RWLOCK_TEST_COUNT];
- rw_thr_t rwt[KZT_RWLOCK_TEST_COUNT];
- rw_priv_t rwv;
-
- /* Initialize private data
- * including the rwlock */
- kzt_init_rw_priv(&rwv, file);
-
- /* Create some threads, the exact number isn't important just as
- * long as we know how many we managed to create and should expect. */
- for (i = 0; i < KZT_RWLOCK_TEST_COUNT; i++) {
- rwt[i].rwt_rwp = &rwv;
- rwt[i].rwt_id = i;
- rwt[i].rwt_name = KZT_RWLOCK_TEST4_NAME;
- rwt[i].rwt_rc = 0;
-
- /* The first thread will be a writer */
- if (i == 0) {
- /* We can reuse the test1 writer thread here */
- pids[i] = kernel_thread(kzt_rwlock_test1_writer_thread,
- &rwt[i], 0);
- } else {
- pids[i] = kernel_thread(kzt_rwlock_test4_reader_thread,
- &rwt[i], 0);
- }
-
- if (pids[i] >= 0) {
- count++;
- }
- }
-
- /* Once the writer has the lock, release the readers */
- while (kzt_rwlock_lock_and_test(&rwv.rw_priv_lock,
- atomic_read(&rwv.rw_acquired) <= 0)) {
- kzt_rwlock_sleep(1 * HZ);
- }
- wake_up_interruptible(&rwv.rw_waitq);
-
- /* Make sure that the reader threads complete */
- while (kzt_rwlock_lock_and_test(&rwv.rw_priv_lock,
- atomic_read(&rwv.rw_completed) != KZT_RWLOCK_TEST_COUNT - 1)) {
- kzt_rwlock_sleep(1 * HZ);
- }
- /* Release the writer */
- spin_lock(&rwv.rw_priv_lock);
- atomic_set(&rwv.rw_release, KZT_RWLOCK_RELEASE_WRITERS);
- spin_unlock(&rwv.rw_priv_lock);
- wake_up_interruptible(&rwv.rw_waitq);
-
- /* Wait for the test to complete */
- while (kzt_rwlock_lock_and_test(&rwv.rw_priv_lock,
- atomic_read(&rwv.rw_acquired) != 0 ||
- atomic_read(&rwv.rw_waiters) != 0)) {
- kzt_rwlock_sleep(1 * HZ);
- }
-
- /* If any of the reader threads ever acquired the lock
- * while another thread had it, make sure we return
- * an error since the rw_tryenter() should have failed */
- for (i = 0; i < KZT_RWLOCK_TEST_COUNT; i++) {
- if (rwt[i].rwt_rc) {
- rc++;
- }
- }
-
- rw_destroy(&rwv.rwl);
- return rc;
-}
-
-static int
-kzt_rwlock_test5(struct file *file, void *arg)
-{
- kthread_t *owner;
- rw_priv_t rwv;
- int rc = 0;
-
- /* Initialize private data
- * including the rwlock */
- kzt_init_rw_priv(&rwv, file);
-
- /* Take the rwlock for writing */
- rw_enter(&rwv.rwl, RW_WRITER);
- owner = rw_owner(&rwv.rwl);
- if (current != owner) {
- kzt_vprint(file, KZT_RWLOCK_TEST5_NAME, "rwlock should "
- "be owned by pid %d but is owned by pid %d\n",
- current->pid, owner ? owner->pid : -1);
- rc = -EINVAL;
- goto out;
- }
-
- /* Make sure that the downgrade
- * worked properly */
- rw_downgrade(&rwv.rwl);
-
- owner = rw_owner(&rwv.rwl);
- if (owner) {
- kzt_vprint(file, KZT_RWLOCK_TEST5_NAME, "rwlock should not "
- "be owned but is owned by pid %d\n", owner->pid);
- /* Release the rwlock */
- rw_exit(&rwv.rwl);
- rc = -EINVAL;
- goto out;
- }
-
- /* Release the rwlock */
- rw_exit(&rwv.rwl);
-
-out:
- rw_destroy(&rwv.rwl);
- return rc;
-}
-
-static int
-kzt_rwlock_test6(struct file *file, void *arg)
-{
- kthread_t *owner;
- rw_priv_t rwv;
- int rc = 0;
-
- /* Initialize private data
- * including the rwlock */
- kzt_init_rw_priv(&rwv, file);
-
- /* Take the rwlock for reading */
- rw_enter(&rwv.rwl, RW_READER);
- owner = rw_owner(&rwv.rwl);
- if (owner) {
- kzt_vprint(file, KZT_RWLOCK_TEST6_NAME, "rwlock should not "
- "be owned but is owned by pid %d\n", owner->pid);
- rc = -EINVAL;
- goto out;
- }
-
- /* Make sure that the upgrade
- * worked properly */
- rc = !rw_tryupgrade(&rwv.rwl);
-
- owner = rw_owner(&rwv.rwl);
- if (rc || current != owner) {
- kzt_vprint(file, KZT_RWLOCK_TEST6_NAME, "rwlock should "
- "be owned by pid %d but is owned by pid %d "
- "trylock rc %d\n",
- current->pid, owner ? owner->pid : -1, rc);
- rc = -EINVAL;
- goto out;
- }
-
- /* Release the rwlock */
- rw_exit(&rwv.rwl);
-
-out:
- rw_destroy(&rwv.rwl);
- return rc;
-}
-
-kzt_subsystem_t *
-kzt_rwlock_init(void)
-{
- kzt_subsystem_t *sub;
-
- sub = kmalloc(sizeof(*sub), GFP_KERNEL);
- if (sub == NULL)
- return NULL;
-
- memset(sub, 0, sizeof(*sub));
- strncpy(sub->desc.name, KZT_RWLOCK_NAME, KZT_NAME_SIZE);
- strncpy(sub->desc.desc, KZT_RWLOCK_DESC, KZT_DESC_SIZE);
- INIT_LIST_HEAD(&sub->subsystem_list);
- INIT_LIST_HEAD(&sub->test_list);
- spin_lock_init(&sub->test_lock);
- sub->desc.id = KZT_SUBSYSTEM_RWLOCK;
-
- KZT_TEST_INIT(sub, KZT_RWLOCK_TEST1_NAME, KZT_RWLOCK_TEST1_DESC,
- KZT_RWLOCK_TEST1_ID, kzt_rwlock_test1);
- KZT_TEST_INIT(sub, KZT_RWLOCK_TEST2_NAME, KZT_RWLOCK_TEST2_DESC,
- KZT_RWLOCK_TEST2_ID, kzt_rwlock_test2);
- KZT_TEST_INIT(sub, KZT_RWLOCK_TEST3_NAME, KZT_RWLOCK_TEST3_DESC,
- KZT_RWLOCK_TEST3_ID, kzt_rwlock_test3);
- KZT_TEST_INIT(sub, KZT_RWLOCK_TEST4_NAME, KZT_RWLOCK_TEST4_DESC,
- KZT_RWLOCK_TEST4_ID, kzt_rwlock_test4);
- KZT_TEST_INIT(sub, KZT_RWLOCK_TEST5_NAME, KZT_RWLOCK_TEST5_DESC,
- KZT_RWLOCK_TEST5_ID, kzt_rwlock_test5);
- KZT_TEST_INIT(sub, KZT_RWLOCK_TEST6_NAME, KZT_RWLOCK_TEST6_DESC,
- KZT_RWLOCK_TEST6_ID, kzt_rwlock_test6);
-
- return sub;
-}
-
-void
-kzt_rwlock_fini(kzt_subsystem_t *sub)
-{
- ASSERT(sub);
- KZT_TEST_FINI(sub, KZT_RWLOCK_TEST6_ID);
- KZT_TEST_FINI(sub, KZT_RWLOCK_TEST5_ID);
- KZT_TEST_FINI(sub, KZT_RWLOCK_TEST4_ID);
- KZT_TEST_FINI(sub, KZT_RWLOCK_TEST3_ID);
- KZT_TEST_FINI(sub, KZT_RWLOCK_TEST2_ID);
- KZT_TEST_FINI(sub, KZT_RWLOCK_TEST1_ID);
- kfree(sub);
-}
-
-int
-kzt_rwlock_id(void) {
- return KZT_SUBSYSTEM_RWLOCK;
-}
+++ /dev/null
-#include <splat-ctl.h>
-
-#define KZT_SUBSYSTEM_TASKQ 0x0200
-#define KZT_TASKQ_NAME "taskq"
-#define KZT_TASKQ_DESC "Kernel Task Queue Tests"
-
-#define KZT_TASKQ_TEST1_ID 0x0201
-#define KZT_TASKQ_TEST1_NAME "single"
-#define KZT_TASKQ_TEST1_DESC "Single task queue, single task"
-
-#define KZT_TASKQ_TEST2_ID 0x0202
-#define KZT_TASKQ_TEST2_NAME "multiple"
-#define KZT_TASKQ_TEST2_DESC "Multiple task queues, multiple tasks"
-
-typedef struct kzt_taskq_arg {
- int flag;
- int id;
- struct file *file;
- const char *name;
-} kzt_taskq_arg_t;
-
-/* Validation Test 1 - Create a taskq, queue a task, wait until
- * task completes, ensure task ran properly, cleanup taskq,
- */
-static void
-kzt_taskq_test1_func(void *arg)
-{
- kzt_taskq_arg_t *tq_arg = (kzt_taskq_arg_t *)arg;
-
- ASSERT(tq_arg);
- kzt_vprint(tq_arg->file, KZT_TASKQ_TEST1_NAME,
- "Taskq '%s' function '%s' setting flag\n",
- tq_arg->name, sym2str(kzt_taskq_test1_func));
- tq_arg->flag = 1;
-}
-
-static int
-kzt_taskq_test1(struct file *file, void *arg)
-{
- taskq_t *tq;
- taskqid_t id;
- kzt_taskq_arg_t tq_arg;
-
- kzt_vprint(file, KZT_TASKQ_TEST1_NAME, "Taskq '%s' creating\n",
- KZT_TASKQ_TEST1_NAME);
- if ((tq = taskq_create(KZT_TASKQ_TEST1_NAME, 1, 0, 0, 0, 0)) == NULL) {
- kzt_vprint(file, KZT_TASKQ_TEST1_NAME,
- "Taskq '%s' create failed\n",
- KZT_TASKQ_TEST1_NAME);
- return -EINVAL;
- }
-
- tq_arg.flag = 0;
- tq_arg.id = 0;
- tq_arg.file = file;
- tq_arg.name = KZT_TASKQ_TEST1_NAME;
-
- kzt_vprint(file, KZT_TASKQ_TEST1_NAME,
- "Taskq '%s' function '%s' dispatching\n",
- tq_arg.name, sym2str(kzt_taskq_test1_func));
- if ((id = taskq_dispatch(tq, kzt_taskq_test1_func, &tq_arg, 0)) == 0) {
- kzt_vprint(file, KZT_TASKQ_TEST1_NAME,
- "Taskq '%s' function '%s' dispatch failed\n",
- tq_arg.name, sym2str(kzt_taskq_test1_func));
- taskq_destory(tq);
- return -EINVAL;
- }
-
- kzt_vprint(file, KZT_TASKQ_TEST1_NAME, "Taskq '%s' waiting\n",
- tq_arg.name);
- taskq_wait(tq);
- kzt_vprint(file, KZT_TASKQ_TEST1_NAME, "Taskq '%s' destroying\n",
- tq_arg.name);
- taskq_destory(tq);
-
- return (tq_arg.flag) ? 0 : -EINVAL;
-}
-
-/* Validation Test 2 - Create multiple taskq's, each with multiple tasks,
- * wait until all tasks complete, ensure all tasks ran properly and in the
- * the correct order, cleanup taskq's
- */
-static void
-kzt_taskq_test2_func1(void *arg)
-{
- kzt_taskq_arg_t *tq_arg = (kzt_taskq_arg_t *)arg;
-
- ASSERT(tq_arg);
- kzt_vprint(tq_arg->file, KZT_TASKQ_TEST2_NAME,
- "Taskq '%s/%d' function '%s' flag = %d = %d * 2\n",
- tq_arg->name, tq_arg->id,
- sym2str(kzt_taskq_test2_func1),
- tq_arg->flag * 2, tq_arg->flag);
- tq_arg->flag *= 2;
-}
-
-static void
-kzt_taskq_test2_func2(void *arg)
-{
- kzt_taskq_arg_t *tq_arg = (kzt_taskq_arg_t *)arg;
-
- ASSERT(tq_arg);
- kzt_vprint(tq_arg->file, KZT_TASKQ_TEST2_NAME,
- "Taskq '%s/%d' function '%s' flag = %d = %d + 1\n",
- tq_arg->name, tq_arg->id,
- sym2str(kzt_taskq_test2_func2),
- tq_arg->flag + 1, tq_arg->flag);
- tq_arg->flag += 1;
-}
-
-#define TEST2_TASKQS 8
-static int
-kzt_taskq_test2(struct file *file, void *arg) {
- taskq_t *tq[TEST2_TASKQS] = { NULL };
- taskqid_t id;
- kzt_taskq_arg_t tq_args[TEST2_TASKQS];
- int i, rc = 0;
-
- for (i = 0; i < TEST2_TASKQS; i++) {
-
- kzt_vprint(file, KZT_TASKQ_TEST2_NAME, "Taskq '%s/%d' "
- "creating\n", KZT_TASKQ_TEST2_NAME, i);
- if ((tq[i] = taskq_create(KZT_TASKQ_TEST2_NAME,
- 1, 0, 0, 0, 0)) == NULL) {
- kzt_vprint(file, KZT_TASKQ_TEST2_NAME,
- "Taskq '%s/%d' create failed\n",
- KZT_TASKQ_TEST2_NAME, i);
- rc = -EINVAL;
- break;
- }
-
- tq_args[i].flag = i;
- tq_args[i].id = i;
- tq_args[i].file = file;
- tq_args[i].name = KZT_TASKQ_TEST2_NAME;
-
- kzt_vprint(file, KZT_TASKQ_TEST2_NAME,
- "Taskq '%s/%d' function '%s' dispatching\n",
- tq_args[i].name, tq_args[i].id,
- sym2str(kzt_taskq_test2_func1));
- if ((id = taskq_dispatch(
- tq[i], kzt_taskq_test2_func1, &tq_args[i], 0)) == 0) {
- kzt_vprint(file, KZT_TASKQ_TEST2_NAME,
- "Taskq '%s/%d' function '%s' dispatch "
- "failed\n", tq_args[i].name, tq_args[i].id,
- sym2str(kzt_taskq_test2_func1));
- rc = -EINVAL;
- break;
- }
-
- kzt_vprint(file, KZT_TASKQ_TEST2_NAME,
- "Taskq '%s/%d' function '%s' dispatching\n",
- tq_args[i].name, tq_args[i].id,
- sym2str(kzt_taskq_test2_func2));
- if ((id = taskq_dispatch(
- tq[i], kzt_taskq_test2_func2, &tq_args[i], 0)) == 0) {
- kzt_vprint(file, KZT_TASKQ_TEST2_NAME,
- "Taskq '%s/%d' function '%s' dispatch failed\n",
- tq_args[i].name, tq_args[i].id,
- sym2str(kzt_taskq_test2_func2));
- rc = -EINVAL;
- break;
- }
- }
-
- /* When rc is set we're effectively just doing cleanup here, so
- * ignore new errors in that case. They just cause noise. */
- for (i = 0; i < TEST2_TASKQS; i++) {
- if (tq[i] != NULL) {
- kzt_vprint(file, KZT_TASKQ_TEST2_NAME,
- "Taskq '%s/%d' waiting\n",
- tq_args[i].name, tq_args[i].id);
- taskq_wait(tq[i]);
- kzt_vprint(file, KZT_TASKQ_TEST2_NAME,
- "Taskq '%s/%d; destroying\n",
- tq_args[i].name, tq_args[i].id);
- taskq_destory(tq[i]);
-
- if (!rc && tq_args[i].flag != ((i * 2) + 1)) {
- kzt_vprint(file, KZT_TASKQ_TEST2_NAME,
- "Taskq '%s/%d' processed tasks "
- "out of order; %d != %d\n",
- tq_args[i].name, tq_args[i].id,
- tq_args[i].flag, i * 2 + 1);
- rc = -EINVAL;
- } else {
- kzt_vprint(file, KZT_TASKQ_TEST2_NAME,
- "Taskq '%s/%d' processed tasks "
- "in the correct order; %d == %d\n",
- tq_args[i].name, tq_args[i].id,
- tq_args[i].flag, i * 2 + 1);
- }
- }
- }
-
- return rc;
-}
-
-kzt_subsystem_t *
-kzt_taskq_init(void)
-{
- kzt_subsystem_t *sub;
-
- sub = kmalloc(sizeof(*sub), GFP_KERNEL);
- if (sub == NULL)
- return NULL;
-
- memset(sub, 0, sizeof(*sub));
- strncpy(sub->desc.name, KZT_TASKQ_NAME, KZT_NAME_SIZE);
- strncpy(sub->desc.desc, KZT_TASKQ_DESC, KZT_DESC_SIZE);
- INIT_LIST_HEAD(&sub->subsystem_list);
- INIT_LIST_HEAD(&sub->test_list);
- spin_lock_init(&sub->test_lock);
- sub->desc.id = KZT_SUBSYSTEM_TASKQ;
-
- KZT_TEST_INIT(sub, KZT_TASKQ_TEST1_NAME, KZT_TASKQ_TEST1_DESC,
- KZT_TASKQ_TEST1_ID, kzt_taskq_test1);
- KZT_TEST_INIT(sub, KZT_TASKQ_TEST2_NAME, KZT_TASKQ_TEST2_DESC,
- KZT_TASKQ_TEST2_ID, kzt_taskq_test2);
-
- return sub;
-}
-
-void
-kzt_taskq_fini(kzt_subsystem_t *sub)
-{
- ASSERT(sub);
- KZT_TEST_FINI(sub, KZT_TASKQ_TEST2_ID);
- KZT_TEST_FINI(sub, KZT_TASKQ_TEST1_ID);
-
- kfree(sub);
-}
-
-int
-kzt_taskq_id(void) {
- return KZT_SUBSYSTEM_TASKQ;
-}
+++ /dev/null
-#include <splat-ctl.h>
-
-#define KZT_SUBSYSTEM_THREAD 0x0600
-#define KZT_THREAD_NAME "thread"
-#define KZT_THREAD_DESC "Kernel Thread Tests"
-
-#define KZT_THREAD_TEST1_ID 0x0601
-#define KZT_THREAD_TEST1_NAME "create"
-#define KZT_THREAD_TEST1_DESC "Validate thread creation and destruction"
-
-#define KZT_THREAD_TEST_MAGIC 0x4488CC00UL
-
-typedef struct thread_priv {
- unsigned long tp_magic;
- struct file *tp_file;
- spinlock_t tp_lock;
- wait_queue_head_t tp_waitq;
- int tp_rc;
-} thread_priv_t;
-
-
-static void
-kzt_thread_work(void *priv)
-{
- thread_priv_t *tp = (thread_priv_t *)priv;
-
- spin_lock(&tp->tp_lock);
- ASSERT(tp->tp_magic == KZT_THREAD_TEST_MAGIC);
- tp->tp_rc = 1;
-
- spin_unlock(&tp->tp_lock);
- wake_up(&tp->tp_waitq);
-
- thread_exit();
-}
-
-static int
-kzt_thread_test1(struct file *file, void *arg)
-{
- thread_priv_t tp;
- DEFINE_WAIT(wait);
- kthread_t *thr;
- int rc = 0;
-
- tp.tp_magic = KZT_THREAD_TEST_MAGIC;
- tp.tp_file = file;
- spin_lock_init(&tp.tp_lock);
- init_waitqueue_head(&tp.tp_waitq);
- tp.tp_rc = 0;
-
- spin_lock(&tp.tp_lock);
-
- thr = (kthread_t *)thread_create(NULL, 0, kzt_thread_work, &tp, 0,
- (proc_t *) &p0, TS_RUN, minclsyspri);
- /* Must never fail under Solaris, but we check anyway so we can
- * report an error when this impossible thing happens */
- if (thr == NULL) {
- rc = -ESRCH;
- goto out;
- }
-
- for (;;) {
- prepare_to_wait(&tp.tp_waitq, &wait, TASK_UNINTERRUPTIBLE);
- if (tp.tp_rc)
- break;
-
- spin_unlock(&tp.tp_lock);
- schedule();
- spin_lock(&tp.tp_lock);
- }
-
- kzt_vprint(file, KZT_THREAD_TEST1_NAME, "%s",
- "Thread successfully started and exited cleanly\n");
-out:
- spin_unlock(&tp.tp_lock);
-
- return rc;
-}
-
-kzt_subsystem_t *
-kzt_thread_init(void)
-{
- kzt_subsystem_t *sub;
-
- sub = kmalloc(sizeof(*sub), GFP_KERNEL);
- if (sub == NULL)
- return NULL;
-
- memset(sub, 0, sizeof(*sub));
- strncpy(sub->desc.name, KZT_THREAD_NAME, KZT_NAME_SIZE);
- strncpy(sub->desc.desc, KZT_THREAD_DESC, KZT_DESC_SIZE);
- INIT_LIST_HEAD(&sub->subsystem_list);
- INIT_LIST_HEAD(&sub->test_list);
- spin_lock_init(&sub->test_lock);
- sub->desc.id = KZT_SUBSYSTEM_THREAD;
-
- KZT_TEST_INIT(sub, KZT_THREAD_TEST1_NAME, KZT_THREAD_TEST1_DESC,
- KZT_THREAD_TEST1_ID, kzt_thread_test1);
-
- return sub;
-}
-
-void
-kzt_thread_fini(kzt_subsystem_t *sub)
-{
- ASSERT(sub);
- KZT_TEST_FINI(sub, KZT_THREAD_TEST1_ID);
-
- kfree(sub);
-}
-
-int
-kzt_thread_id(void) {
- return KZT_SUBSYSTEM_THREAD;
-}
+++ /dev/null
-#include <splat-ctl.h>
-
-#define KZT_SUBSYSTEM_TIME 0x0800
-#define KZT_TIME_NAME "time"
-#define KZT_TIME_DESC "Kernel Time Tests"
-
-#define KZT_TIME_TEST1_ID 0x0801
-#define KZT_TIME_TEST1_NAME "time1"
-#define KZT_TIME_TEST1_DESC "HZ Test"
-
-#define KZT_TIME_TEST2_ID 0x0802
-#define KZT_TIME_TEST2_NAME "time2"
-#define KZT_TIME_TEST2_DESC "Monotonic Test"
-
-static int
-kzt_time_test1(struct file *file, void *arg)
-{
- int myhz = hz;
- kzt_vprint(file, KZT_TIME_TEST1_NAME, "hz is %d\n", myhz);
- return 0;
-}
-
-static int
-kzt_time_test2(struct file *file, void *arg)
-{
- hrtime_t tm1, tm2;
- int i;
-
- tm1 = gethrtime();
- kzt_vprint(file, KZT_TIME_TEST2_NAME, "time is %lld\n", tm1);
-
- for(i = 0; i < 100; i++) {
- tm2 = gethrtime();
- kzt_vprint(file, KZT_TIME_TEST2_NAME, "time is %lld\n", tm2);
-
- if(tm1 > tm2) {
- kzt_print(file, "%s: gethrtime() is not giving monotonically increasing values\n", KZT_TIME_TEST2_NAME);
- return 1;
- }
- tm1 = tm2;
-
- set_current_state(TASK_INTERRUPTIBLE);
- schedule_timeout(10);
- }
-
- return 0;
-}
-
-kzt_subsystem_t *
-kzt_time_init(void)
-{
- kzt_subsystem_t *sub;
-
- sub = kmalloc(sizeof(*sub), GFP_KERNEL);
- if (sub == NULL)
- return NULL;
-
- memset(sub, 0, sizeof(*sub));
- strncpy(sub->desc.name, KZT_TIME_NAME, KZT_NAME_SIZE);
- strncpy(sub->desc.desc, KZT_TIME_DESC, KZT_DESC_SIZE);
- INIT_LIST_HEAD(&sub->subsystem_list);
- INIT_LIST_HEAD(&sub->test_list);
- spin_lock_init(&sub->test_lock);
- sub->desc.id = KZT_SUBSYSTEM_TIME;
-
- KZT_TEST_INIT(sub, KZT_TIME_TEST1_NAME, KZT_TIME_TEST1_DESC,
- KZT_TIME_TEST1_ID, kzt_time_test1);
- KZT_TEST_INIT(sub, KZT_TIME_TEST2_NAME, KZT_TIME_TEST2_DESC,
- KZT_TIME_TEST2_ID, kzt_time_test2);
-
- return sub;
-}
-
-void
-kzt_time_fini(kzt_subsystem_t *sub)
-{
- ASSERT(sub);
-
- KZT_TEST_FINI(sub, KZT_TIME_TEST2_ID);
- KZT_TEST_FINI(sub, KZT_TIME_TEST1_ID);
-
- kfree(sub);
-}
-
-int
-kzt_time_id(void)
-{
- return KZT_SUBSYSTEM_TIME;
-}