]> git.proxmox.com Git - mirror_spl.git/commitdiff
Pull in initial 32-bit support patches.
authorbehlendo <behlendo@7e1ea52c-4ff2-0310-8f11-9dd32ca42a1c>
Mon, 11 Aug 2008 22:42:04 +0000 (22:42 +0000)
committerbehlendo <behlendo@7e1ea52c-4ff2-0310-8f11-9dd32ca42a1c>
Mon, 11 Aug 2008 22:42:04 +0000 (22:42 +0000)
git-svn-id: https://outreach.scidac.gov/svn/spl/trunk@156 7e1ea52c-4ff2-0310-8f11-9dd32ca42a1c

autoconf/spl-build.m4
configure.ac
include/sys/atomic.h
include/sys/div64.h [new file with mode: 0644]
modules/spl/spl-generic.c

index e12a2e3ebb279055462f38f6aac7f4ef4be9f2af..33465b6bb7bed64e273ba2e24d02db248cbe10d0 100644 (file)
@@ -665,3 +665,21 @@ AC_DEFUN([SPL_AC_INODE_I_MUTEX], [
                AC_MSG_RESULT(no)
        ])
 ])
+
+dnl #
+dnl # 2.6.14 API change,
+dnl # check whether 'div64_64()' is available
+dnl #
+AC_DEFUN([SPL_AC_DIV64_64], [
+       AC_MSG_CHECKING([whether div64_64() is available])
+       SPL_LINUX_TRY_COMPILE([
+               #include <asm/div64.h>
+       ],[
+               uint64_t i = div64_64(1ULL, 1ULL);
+       ],[
+               AC_MSG_RESULT(yes)
+               AC_DEFINE(HAVE_DIV64_64, 1, [div64_64() is available])
+       ],[
+               AC_MSG_RESULT(no)
+       ])
+])
index 74b043b326096650d90fc29bea2bd61f6c78d19d..f32db79da6be307357dc524dfcef3cd27469bd65 100644 (file)
@@ -64,6 +64,7 @@ SPL_AC_UACCESS_HEADER
 SPL_AC_KMALLOC_NODE
 SPL_AC_MONOTONIC_CLOCK
 SPL_AC_INODE_I_MUTEX
+SPL_AC_DIV64_64
 
 TOPDIR=`/bin/pwd`
 
index 7bb9156113d3f96a09ff0e69e4437433fce9b6d0..cd0eb3b0a897a7333dbb28cb7179a3eebad6d833 100644 (file)
@@ -33,6 +33,7 @@ extern "C" {
 
 #include <linux/module.h>
 #include <linux/spinlock.h>
+#include <sys/isa_defs.h>
 
 /* XXX: Serialize everything through global locks.  This is
  * going to be bad for performance, but for now it's the easiest
@@ -133,7 +134,23 @@ atomic_cas_64(volatile uint64_t *target,  uint64_t cmp,
        return rc;
 }
 
-#if defined(__x86_64__)
+static __inline__ uint32_t
+atomic_cas_32(volatile uint32_t *target,  uint32_t cmp,
+               uint32_t newval)
+{
+       uint32_t rc;
+
+       spin_lock(&atomic32_lock);
+       rc = *target;
+       if (*target == cmp)
+               *target = newval;
+
+       spin_unlock(&atomic32_lock);
+
+       return rc;
+}
+
+#ifdef _LP64
 /* XXX: Implement atomic_cas_ptr() in terms of uint64'ts.  This
  * is of course only safe and correct for 64 bit arches...  but
  * for now I'm OK with that.
@@ -144,6 +161,13 @@ atomic_cas_ptr(volatile void *target,  void *cmp, void *newval)
        return (void *)atomic_cas_64((volatile uint64_t *)target,
                                     (uint64_t)cmp, (uint64_t)newval);
 }
+#else
+static __inline__ void *
+atomic_cas_ptr(volatile void *target,  void *cmp, void *newval)
+{
+       return (void *)atomic_cas_32((volatile uint32_t *)target,
+                                    (uint32_t)cmp, (uint32_t)newval);
+}
 #endif
 
 #ifdef  __cplusplus
diff --git a/include/sys/div64.h b/include/sys/div64.h
new file mode 100644 (file)
index 0000000..cb62cd3
--- /dev/null
@@ -0,0 +1,44 @@
+/*
+ *  This file is part of the SPL: Solaris Porting Layer.
+ *
+ *  Copyright (c) 2008 Sun Microsystems, Inc.
+ *
+ *  This is free software; you can redistribute it and/or modify it
+ *  under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 2 of the License, or
+ *  (at your option) any later version.
+ *
+ *  This is distributed in the hope that it will be useful, but WITHOUT
+ *  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ *  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ *  for more details.
+ *
+ *  You should have received a copy of the GNU General Public License along
+ *  with this program; if not, write to the Free Software Foundation, Inc.,
+ *  51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA.
+ */
+
+#ifndef _SPL_DIV64_H
+#define _SPL_DIV64_H
+
+#include <asm/div64.h>
+
+#ifndef HAVE_DIV64_64
+#if BITS_PER_LONG == 32
+
+extern uint64_t spl_div64_64(uint64_t dividend, uint64_t divisor);
+#define div64_64(a,b) spl_div64_64(a,b)
+
+#else /* BITS_PER_LONG == 32 */
+
+static inline uint64_t div64_64(uint64_t dividend, uint64_t divisor)
+{
+       return dividend / divisor;
+}
+
+#endif /* BITS_PER_LONG == 32 */
+#endif /* HAVE_DIV64_64 */
+
+#define roundup64(x, y) (div64_64((x) + ((y) - 1), (y)) * (y))
+
+#endif /* _SPL_DIV64_H */
index 7a818add8389c98f0df64ccbea173c796b357faa..afaefb6b47176a77f1f068313982b14e0855f68c 100644 (file)
@@ -87,6 +87,30 @@ highbit(unsigned long i)
 }
 EXPORT_SYMBOL(highbit);
 
+/*
+ * Implementation of div64_64(), for kernels that don't have it.
+ *
+ * Taken from a 2.6.24 kernel.
+ */
+uint64_t spl_div64_64(uint64_t dividend, uint64_t divisor)
+{
+       uint32_t high, d;
+
+       high = divisor >> 32;
+       if (high) {
+               unsigned int shift = fls(high);
+
+               d = divisor >> shift;
+               dividend >>= shift;
+       } else
+               d = divisor;
+
+       do_div(dividend, d);
+
+       return dividend;
+}
+EXPORT_SYMBOL(spl_div64_64);
+
 int
 ddi_strtoul(const char *str, char **nptr, int base, unsigned long *result)
 {