]> git.proxmox.com Git - mirror_spl-debian.git/commitdiff
Correctly functioning 64-bit atomic shim layer. It's not
authorbehlendo <behlendo@7e1ea52c-4ff2-0310-8f11-9dd32ca42a1c>
Fri, 28 Mar 2008 18:21:09 +0000 (18:21 +0000)
committerbehlendo <behlendo@7e1ea52c-4ff2-0310-8f11-9dd32ca42a1c>
Fri, 28 Mar 2008 18:21:09 +0000 (18:21 +0000)
what I would call effecient but it does have the advantage
of being correct which is all I need right now.  I added
a regression test as well.

git-svn-id: https://outreach.scidac.gov/svn/spl/trunk@57 7e1ea52c-4ff2-0310-8f11-9dd32ca42a1c

include/sys/atomic.h
modules/spl/Makefile.in
modules/spl/spl-atomic.c [new file with mode: 0644]
modules/splat/Makefile.in
modules/splat/splat-atomic.c [new file with mode: 0644]
modules/splat/splat-ctl.c
modules/splat/splat-internal.h

index 1f2a4780b51ea8e66bb73220e28d7f3781cf89e8..647d0db9a037f85e869252bdc620b4a1c84a055a 100644 (file)
@@ -6,43 +6,90 @@ extern "C" {
 #endif
 
 #include <linux/module.h>
-/* FIXME - NONE OF THIS IS ATOMIC, IT SHOULD BE.  I think we can
- * get by for now since I'm only working on real 64bit systems but
- * this will need to be addressed properly.
+#include <linux/spinlock.h>
+
+/* XXX: Serialize everything through global locks.  This is
+ * going to be bad for performance, but for now it's the easiest
+ * way to ensure correct behavior.  I don't like it at all.
+ * It would be nicer to make these function to the atomic linux
+ * functions, but the normal uint64_t type complicates this.
  */
+extern spinlock_t atomic64_lock;
+extern spinlock_t atomic32_lock;
+extern spinlock_t atomic_lock;
+
+static __inline__ uint32_t
+atomic_add_32(volatile uint32_t *target, int32_t delta)
+{
+       uint32_t rc;
+
+       spin_lock(&atomic32_lock);
+       rc = *target;
+       *target += delta;
+       spin_unlock(&atomic32_lock);
+
+       return rc;
+}
 
 static __inline__ void
 atomic_inc_64(volatile uint64_t *target)
 {
+       spin_lock(&atomic64_lock);
        (*target)++;
+       spin_unlock(&atomic64_lock);
 }
 
 static __inline__ void
 atomic_dec_64(volatile uint64_t *target)
 {
+       spin_lock(&atomic64_lock);
        (*target)--;
+       spin_unlock(&atomic64_lock);
 }
 
-static __inline__ uint32_t
-atomic_add_32(volatile uint32_t *target, int32_t delta)
+static __inline__ uint64_t
+atomic_add_64(volatile uint64_t *target, uint64_t delta)
 {
-       uint32_t rc = *target;
+       uint64_t rc;
+
+       spin_lock(&atomic64_lock);
+       rc = *target;
        *target += delta;
+       spin_unlock(&atomic64_lock);
+
        return rc;
 }
 
 static __inline__ uint64_t
-atomic_add_64(volatile uint64_t *target, uint64_t delta)
+atomic_sub_64(volatile uint64_t *target, uint64_t delta)
 {
-       uint64_t rc = *target;
-       *target += delta;
+       uint64_t rc;
+
+       spin_lock(&atomic64_lock);
+       rc = *target;
+       *target -= delta;
+       spin_unlock(&atomic64_lock);
+
        return rc;
 }
 
 static __inline__ uint64_t
 atomic_add_64_nv(volatile uint64_t *target, uint64_t delta)
 {
+       spin_lock(&atomic64_lock);
        *target += delta;
+       spin_unlock(&atomic64_lock);
+
+       return *target;
+}
+
+static __inline__ uint64_t
+atomic_sub_64_nv(volatile uint64_t *target, uint64_t delta)
+{
+       spin_lock(&atomic64_lock);
+       *target -= delta;
+       spin_unlock(&atomic64_lock);
+
        return *target;
 }
 
@@ -50,10 +97,13 @@ static __inline__ uint64_t
 atomic_cas_64(volatile uint64_t  *target,  uint64_t cmp,
                uint64_t newval)
 {
-       uint64_t rc = *target;
+       uint64_t rc;
 
+       spin_lock(&atomic64_lock);
+       rc = *target;
        if (*target == cmp)
                *target = newval;
+       spin_unlock(&atomic64_lock);
 
        return rc;
 }
@@ -61,10 +111,13 @@ atomic_cas_64(volatile uint64_t  *target,  uint64_t cmp,
 static __inline__ void *
 atomic_cas_ptr(volatile void  *target,  void *cmp, void *newval)
 {
-       void *rc = (void *)target;
+       void *rc;
 
+       spin_lock(&atomic_lock);
+       rc = (void *)target;
        if (target == cmp)
                target = newval;
+       spin_unlock(&atomic_lock);
 
        return rc;
 }
index bcc25be9731cf148d2bd9405cec7e6c270c7facd..7f16904ef67a791d716c29fb8c4a8ebface2e251 100644 (file)
@@ -19,6 +19,7 @@ spl-objs += spl-time.o
 spl-objs += spl-kobj.o
 spl-objs += spl-module.o
 spl-objs += spl-generic.o
+spl-objs += spl-atomic.o
 
 splmodule := spl.ko
 splmoduledir := @kmoduledir@/kernel/lib/
diff --git a/modules/spl/spl-atomic.c b/modules/spl/spl-atomic.c
new file mode 100644 (file)
index 0000000..fb161c5
--- /dev/null
@@ -0,0 +1,10 @@
+#include <sys/atomic.h>
+
+/* Global atomic lock declarations */
+spinlock_t atomic64_lock = SPIN_LOCK_UNLOCKED;
+spinlock_t atomic32_lock = SPIN_LOCK_UNLOCKED;
+spinlock_t atomic_lock   = SPIN_LOCK_UNLOCKED;
+
+EXPORT_SYMBOL(atomic64_lock);
+EXPORT_SYMBOL(atomic32_lock);
+EXPORT_SYMBOL(atomic_lock);
index 69f38b1f577d1173d5ea1214ca5b6067f3cf0df8..8c814d3e2fdc9fd33693f7bac754807d33eaa94d 100644 (file)
@@ -23,6 +23,7 @@ splat-objs += splat-rwlock.o
 splat-objs += splat-time.o
 splat-objs += splat-vnode.o
 splat-objs += splat-kobj.o
+splat-objs += splat-atomic.o
 
 splatmodule := splat.ko
 splatmoduledir := @kmoduledir@/kernel/lib/
diff --git a/modules/splat/splat-atomic.c b/modules/splat/splat-atomic.c
new file mode 100644 (file)
index 0000000..c170cc0
--- /dev/null
@@ -0,0 +1,190 @@
+#include "splat-internal.h"
+
+#define SPLAT_SUBSYSTEM_ATOMIC         0x0b00
+#define SPLAT_ATOMIC_NAME              "atomic"
+#define SPLAT_ATOMIC_DESC              "Kernel Atomic Tests"
+
+#define SPLAT_ATOMIC_TEST1_ID          0x0b01
+#define SPLAT_ATOMIC_TEST1_NAME                "64-bit"
+#define SPLAT_ATOMIC_TEST1_DESC                "Validate 64-bit atomic ops"
+
+#define SPLAT_ATOMIC_TEST_MAGIC                0x43435454UL
+#define SPLAT_ATOMIC_INIT_VALUE                10000000UL
+
+typedef enum {
+       SPLAT_ATOMIC_INC_64    = 0,
+       SPLAT_ATOMIC_DEC_64    = 1,
+       SPLAT_ATOMIC_ADD_64    = 2,
+       SPLAT_ATOMIC_SUB_64    = 3,
+       SPLAT_ATOMIC_ADD_64_NV = 4,
+       SPLAT_ATOMIC_SUB_64_NV = 5,
+       SPLAT_ATOMIC_COUNT_64  = 6
+} atomic_op_t;
+
+typedef struct atomic_priv {
+        unsigned long ap_magic;
+        struct file *ap_file;
+       spinlock_t ap_lock;
+        wait_queue_head_t ap_waitq;
+       volatile uint64_t ap_atomic;
+       volatile uint64_t ap_atomic_exited;
+       atomic_op_t ap_op;
+
+} atomic_priv_t;
+
+static void
+splat_atomic_work(void *priv)
+{
+       atomic_priv_t *ap;
+       atomic_op_t op;
+       int i;
+
+       ap = (atomic_priv_t *)priv;
+       ASSERT(ap->ap_magic == SPLAT_ATOMIC_TEST_MAGIC);
+
+       spin_lock(&ap->ap_lock);
+       op = ap->ap_op;
+       wake_up(&ap->ap_waitq);
+       spin_unlock(&ap->ap_lock);
+
+        splat_vprint(ap->ap_file, SPLAT_ATOMIC_TEST1_NAME,
+                    "Thread %d successfully started: %lu/%lu\n", op,
+                    (long unsigned)ap->ap_atomic,
+                    (long unsigned)ap->ap_atomic_exited);
+
+       for (i = 0; i < SPLAT_ATOMIC_INIT_VALUE / 10; i++) {
+
+               /* Periodically sleep to mix up the ordering */
+               if ((i % (SPLAT_ATOMIC_INIT_VALUE / 100)) == 0) {
+                       splat_vprint(ap->ap_file, SPLAT_ATOMIC_TEST1_NAME,
+                            "Thread %d sleeping: %lu/%lu\n", op,
+                            (long unsigned)ap->ap_atomic,
+                            (long unsigned)ap->ap_atomic_exited);
+                       set_current_state(TASK_INTERRUPTIBLE);
+                       schedule_timeout(HZ / 100);
+               }
+
+               switch (op) {
+                       case SPLAT_ATOMIC_INC_64:
+                               atomic_inc_64(&ap->ap_atomic);
+                               break;
+                       case SPLAT_ATOMIC_DEC_64:
+                               atomic_dec_64(&ap->ap_atomic);
+                               break;
+                       case SPLAT_ATOMIC_ADD_64:
+                               atomic_add_64(&ap->ap_atomic, 3);
+                               break;
+                       case SPLAT_ATOMIC_SUB_64:
+                               atomic_sub_64(&ap->ap_atomic, 3);
+                               break;
+                       case SPLAT_ATOMIC_ADD_64_NV:
+                               atomic_add_64_nv(&ap->ap_atomic, 5);
+                               break;
+                       case SPLAT_ATOMIC_SUB_64_NV:
+                               atomic_sub_64_nv(&ap->ap_atomic, 5);
+                               break;
+                       default:
+                               BUG_ON(1);
+               }
+       }
+
+       atomic_inc_64(&ap->ap_atomic_exited);
+
+        splat_vprint(ap->ap_file, SPLAT_ATOMIC_TEST1_NAME,
+                    "Thread %d successfully exited: %lu/%lu\n", op,
+                    (long unsigned)ap->ap_atomic,
+                    (long unsigned)ap->ap_atomic_exited);
+
+       thread_exit();
+       wake_up(&ap->ap_waitq);
+}
+
+static int
+splat_atomic_test1_cond(atomic_priv_t *ap)
+{
+       return (ap->ap_atomic_exited == SPLAT_ATOMIC_COUNT_64);
+}
+
+static int
+splat_atomic_test1(struct file *file, void *arg)
+{
+       atomic_priv_t ap;
+        DEFINE_WAIT(wait);
+       kthread_t *thr;
+       int i;
+
+       ap.ap_magic = SPLAT_ATOMIC_TEST_MAGIC;
+       ap.ap_file = file;
+       spin_lock_init(&ap.ap_lock);
+       init_waitqueue_head(&ap.ap_waitq);
+       ap.ap_atomic = SPLAT_ATOMIC_INIT_VALUE;
+       ap.ap_atomic_exited = 0;
+
+       for (i = 0; i < SPLAT_ATOMIC_COUNT_64; i++) {
+               spin_lock(&ap.ap_lock);
+               ap.ap_op = i;
+
+               thr = (kthread_t *)thread_create(NULL, 0, splat_atomic_work,
+                                                &ap, 0, &p0, TS_RUN,
+                                                minclsyspri);
+               BUG_ON(thr == NULL);
+
+               /* Prepare to wait, the new thread will wake us once it
+                * has made a copy of the unique private passed data */
+                prepare_to_wait(&ap.ap_waitq, &wait, TASK_UNINTERRUPTIBLE);
+               spin_unlock(&ap.ap_lock);
+               schedule();
+       }
+
+       wait_event_interruptible(ap.ap_waitq, splat_atomic_test1_cond(&ap));
+
+       if (ap.ap_atomic != SPLAT_ATOMIC_INIT_VALUE) {
+               splat_vprint(file, SPLAT_ATOMIC_TEST1_NAME,
+                            "Final value %lu does not match initial value %lu\n",
+                            (long unsigned)ap.ap_atomic, SPLAT_ATOMIC_INIT_VALUE);
+               return -EINVAL;
+       }
+
+        splat_vprint(file, SPLAT_ATOMIC_TEST1_NAME,
+                  "Success initial and final values match, %lu == %lu\n",
+                  (long unsigned)ap.ap_atomic, SPLAT_ATOMIC_INIT_VALUE);
+
+       return 0;
+}
+
+splat_subsystem_t *
+splat_atomic_init(void)
+{
+        splat_subsystem_t *sub;
+
+        sub = kmalloc(sizeof(*sub), GFP_KERNEL);
+        if (sub == NULL)
+                return NULL;
+
+        memset(sub, 0, sizeof(*sub));
+        strncpy(sub->desc.name, SPLAT_ATOMIC_NAME, SPLAT_NAME_SIZE);
+        strncpy(sub->desc.desc, SPLAT_ATOMIC_DESC, SPLAT_DESC_SIZE);
+        INIT_LIST_HEAD(&sub->subsystem_list);
+        INIT_LIST_HEAD(&sub->test_list);
+        spin_lock_init(&sub->test_lock);
+        sub->desc.id = SPLAT_SUBSYSTEM_ATOMIC;
+
+        SPLAT_TEST_INIT(sub, SPLAT_ATOMIC_TEST1_NAME, SPLAT_ATOMIC_TEST1_DESC,
+                      SPLAT_ATOMIC_TEST1_ID, splat_atomic_test1);
+
+        return sub;
+}
+
+void
+splat_atomic_fini(splat_subsystem_t *sub)
+{
+        ASSERT(sub);
+        SPLAT_TEST_FINI(sub, SPLAT_ATOMIC_TEST1_ID);
+
+        kfree(sub);
+}
+
+int
+splat_atomic_id(void) {
+        return SPLAT_SUBSYSTEM_ATOMIC;
+}
index e9026cd8d87b6763efc1ab500543b17b02a7a386..d8245737a0c21141e7750290301facf171ddfd47 100644 (file)
@@ -593,6 +593,7 @@ splat_init(void)
        SPLAT_SUBSYSTEM_INIT(time);
        SPLAT_SUBSYSTEM_INIT(vnode);
        SPLAT_SUBSYSTEM_INIT(kobj);
+       SPLAT_SUBSYSTEM_INIT(atomic);
 
        dev = MKDEV(SPLAT_MAJOR, 0);
         if ((rc = register_chrdev_region(dev, SPLAT_MINORS, "splatctl")))
@@ -654,6 +655,7 @@ splat_fini(void)
         cdev_del(&splat_cdev);
         unregister_chrdev_region(dev, SPLAT_MINORS);
 
+       SPLAT_SUBSYSTEM_FINI(atomic);
        SPLAT_SUBSYSTEM_FINI(kobj);
        SPLAT_SUBSYSTEM_FINI(vnode);
        SPLAT_SUBSYSTEM_FINI(time);
index aca4b3d3810129b4e064f88cf86d4fe84498edc2..61d0fd286fae8e184af297a87619aa36828a2b7d 100644 (file)
@@ -33,6 +33,7 @@
 #include <sys/timer.h>
 #include <sys/types.h>
 #include <sys/kobj.h>
+#include <sys/atomic.h>
 
 #include "splat-ctl.h"
 
@@ -172,6 +173,7 @@ splat_subsystem_t * splat_thread_init(void);
 splat_subsystem_t * splat_time_init(void);
 splat_subsystem_t * splat_vnode_init(void);
 splat_subsystem_t * splat_kobj_init(void);
+splat_subsystem_t * splat_atomic_init(void);
 
 void splat_condvar_fini(splat_subsystem_t *);
 void splat_kmem_fini(splat_subsystem_t *);
@@ -183,6 +185,7 @@ void splat_thread_fini(splat_subsystem_t *);
 void splat_time_fini(splat_subsystem_t *);
 void splat_vnode_fini(splat_subsystem_t *);
 void splat_kobj_fini(splat_subsystem_t *);
+void splat_atomic_fini(splat_subsystem_t *);
 
 int splat_condvar_id(void);
 int splat_kmem_id(void);
@@ -194,5 +197,6 @@ int splat_thread_id(void);
 int splat_time_id(void);
 int splat_vnode_id(void);
 int splat_kobj_id(void);
+int splat_atomic_id(void);
 
 #endif /* _SPLAT_INTERNAL_H */