]> git.proxmox.com Git - mirror_spl-debian.git/commitdiff
Imported Upstream version 0.6.3+git20140731
authorLiang Guo <guoliang@debian.org>
Thu, 31 Jul 2014 07:09:08 +0000 (15:09 +0800)
committerLiang Guo <guoliang@debian.org>
Thu, 31 Jul 2014 07:09:08 +0000 (15:09 +0800)
31 files changed:
META
config/rpm.am
config/spl-build.m4
config/spl-meta.m4
include/sys/atomic.h
include/sys/debug.h
include/sys/isa_defs.h
include/sys/kmem.h
include/sys/sysmacros.h
include/sys/systeminfo.h
include/sys/thread.h
include/sys/timer.h
man/man5/spl-module-parameters.5
module/spl/spl-cred.c
module/spl/spl-debug.c
module/spl/spl-err.c
module/spl/spl-generic.c
module/spl/spl-kmem.c
module/spl/spl-proc.c
module/spl/spl-taskq.c
module/spl/spl-thread.c
module/spl/spl-vnode.c
module/splat/splat-condvar.c
module/splat/splat-cred.c
module/splat/splat-kmem.c
module/splat/splat-rwlock.c
module/splat/splat-taskq.c
rpm/generic/spl-dkms.spec.in
rpm/generic/spl-kmod.spec.in
rpm/generic/spl.spec.in
scripts/kmodtool

diff --git a/META b/META
index d83c536445dd73422ee1d1361b788d368189f278..117c3e336caee169c1f98e4675051aaa85b21b35 100644 (file)
--- a/META
+++ b/META
@@ -1,6 +1,6 @@
 Meta:         1
 Name:         spl
 Branch:       1.0
-Version:      0.6.2
+Version:      0.6.3
 Release:      1
 Release-Tags: relext
index 8b33bb5b690361d392b42fe69a164235290fb6ba..311c754d429d4dbc00eb35e5fc312dc42abf7db4 100644 (file)
@@ -63,7 +63,7 @@ srpm-common: dist
                rpmbuild="$$rpmbuild" \
                rpmspec="$$rpmspec" \
                rpm-local || exit 1; \
-       $(RPMBUILD) \
+       LANG=C $(RPMBUILD) \
                --define "_tmppath $$rpmbuild/TMP" \
                --define "_topdir $$rpmbuild" \
                $(def) -bs $$rpmbuild/SPECS/$$rpmspec || exit 1; \
@@ -79,7 +79,7 @@ rpm-common:
                rpmbuild="$$rpmbuild" \
                rpmspec="$$rpmspec" \
                rpm-local || exit 1; \
-       ${RPMBUILD} \
+       LANG=C ${RPMBUILD} \
                --define "_tmppath $$rpmbuild/TMP" \
                --define "_topdir $$rpmbuild" \
                $(def) --rebuild $$rpmpkg || exit 1; \
index 84267807db1ca4df51a58fcf9217d15ef7f2740a..eef52334fb10532d00351607990534e50be0777e 100644 (file)
@@ -93,6 +93,7 @@ AC_DEFUN([SPL_AC_CONFIG_KERNEL], [
        SPL_AC_SCHED_RT_HEADER
        SPL_AC_2ARGS_VFS_GETATTR
        SPL_AC_USLEEP_RANGE
+       SPL_AC_KMEM_CACHE_ALLOCFLAGS
 ])
 
 AC_DEFUN([SPL_AC_MODULE_SYMVERS], [
@@ -1907,7 +1908,28 @@ AC_DEFUN([SPL_AC_4ARGS_VFS_RENAME],
                        AC_DEFINE(HAVE_5ARGS_VFS_RENAME, 1,
                                  [vfs_rename() wants 5 args])
                ],[
-                       AC_MSG_ERROR(no)
+                       AC_MSG_RESULT(no)
+                       dnl #
+                       dnl # Linux 3.15 API change
+                       dnl # Added flags
+                       dnl #
+                       AC_MSG_CHECKING([whether vfs_rename() wants 6 args])
+                       SPL_LINUX_TRY_COMPILE([
+                               #include <linux/fs.h>
+                       ],[
+                               vfs_rename((struct inode *) NULL,
+                                       (struct dentry *) NULL,
+                                       (struct inode *) NULL,
+                                       (struct dentry *) NULL,
+                                       (struct inode **) NULL,
+                                       (unsigned int) 0);
+                       ],[
+                               AC_MSG_RESULT(yes)
+                               AC_DEFINE(HAVE_6ARGS_VFS_RENAME, 1,
+                                         [vfs_rename() wants 6 args])
+                       ],[
+                               AC_MSG_ERROR(no)
+                       ])
                ])
        ])
 ])
@@ -2511,3 +2533,40 @@ AC_DEFUN([SPL_AC_USLEEP_RANGE], [
                AC_MSG_RESULT(no)
        ])
 ])
+
+dnl #
+dnl # 2.6.35 API change,
+dnl # The cachep->gfpflags member was renamed cachep->allocflags.  These are
+dnl # private allocation flags which are applied when allocating a new slab
+dnl # in kmem_getpages().  Unfortunately there is no public API for setting
+dnl # non-default flags.
+dnl #
+AC_DEFUN([SPL_AC_KMEM_CACHE_ALLOCFLAGS], [
+       AC_MSG_CHECKING([whether struct kmem_cache has allocflags])
+       SPL_LINUX_TRY_COMPILE([
+               #include <linux/slab.h>
+       ],[
+               struct kmem_cache cachep __attribute__ ((unused));
+               cachep.allocflags = GFP_KERNEL;
+       ],[
+               AC_MSG_RESULT(yes)
+               AC_DEFINE(HAVE_KMEM_CACHE_ALLOCFLAGS, 1,
+                       [struct kmem_cache has allocflags])
+       ],[
+               AC_MSG_RESULT(no)
+
+               AC_MSG_CHECKING([whether struct kmem_cache has gfpflags])
+               SPL_LINUX_TRY_COMPILE([
+                       #include <linux/slab.h>
+               ],[
+                       struct kmem_cache cachep __attribute__ ((unused));
+                       cachep.gfpflags = GFP_KERNEL;
+               ],[
+                       AC_MSG_RESULT(yes)
+                       AC_DEFINE(HAVE_KMEM_CACHE_GFPFLAGS, 1,
+                               [struct kmem_cache has gfpflags])
+               ],[
+                       AC_MSG_RESULT(no)
+               ])
+       ])
+])
index a51fa797eefb7f89e9eb0478dbfde37313f0a23a..e98cc2ea6378d5b247821d691f4faf1c3017264c 100644 (file)
@@ -57,7 +57,7 @@ AC_DEFUN([SPL_AC_META], [
 
                SPL_META_RELEASE=_SPL_AC_META_GETVAL([Release]);
                if test ! -f ".nogitrelease" && git rev-parse --git-dir > /dev/null 2>&1; then
-                       _match="${SPL_META_NAME}-${SPL_META_VERSION}*"
+                       _match="${SPL_META_NAME}-${SPL_META_VERSION}"
                        _alias=$(git describe --match=${_match} 2>/dev/null)
                        _release=$(echo ${_alias}|cut -f3- -d'-'|sed 's/-/_/g')
                        if test -n "${_release}"; then
index 31d35eb1437d9ff5c9c6d8b7fb3777b01a905dce..e034f2e2c836e55a24b16547e426266175ec85b5 100644 (file)
@@ -156,6 +156,19 @@ atomic_cas_32(volatile uint32_t *target,  uint32_t cmp,
        return rc;
 }
 
+static __inline__ uint32_t
+atomic_swap_32(volatile uint32_t *target,  uint32_t newval)
+{
+       uint32_t rc;
+
+       spin_lock(&atomic32_lock);
+       rc = *target;
+       *target = newval;
+       spin_unlock(&atomic32_lock);
+
+       return rc;
+}
+
 static __inline__ void
 atomic_inc_64(volatile uint64_t *target)
 {
@@ -253,6 +266,18 @@ atomic_cas_64(volatile uint64_t *target,  uint64_t cmp,
        return rc;
 }
 
+static __inline__ uint64_t
+atomic_swap_64(volatile uint64_t *target,  uint64_t newval)
+{
+       uint64_t rc;
+
+       spin_lock(&atomic64_lock);
+       rc = *target;
+       *target = newval;
+       spin_unlock(&atomic64_lock);
+
+       return rc;
+}
 
 #else /* ATOMIC_SPINLOCK */
 
@@ -265,6 +290,7 @@ atomic_cas_64(volatile uint64_t *target,  uint64_t cmp,
 #define atomic_add_32_nv(v, i) atomic_add_return((i), (atomic_t *)(v))
 #define atomic_sub_32_nv(v, i) atomic_sub_return((i), (atomic_t *)(v))
 #define atomic_cas_32(v, x, y) atomic_cmpxchg((atomic_t *)(v), x, y)
+#define atomic_swap_32(v, x)   atomic_xchg((atomic_t *)(v), x)
 #define atomic_inc_64(v)       atomic64_inc((atomic64_t *)(v))
 #define atomic_dec_64(v)       atomic64_dec((atomic64_t *)(v))
 #define atomic_add_64(v, i)    atomic64_add((i), (atomic64_t *)(v))
@@ -274,6 +300,7 @@ atomic_cas_64(volatile uint64_t *target,  uint64_t cmp,
 #define atomic_add_64_nv(v, i) atomic64_add_return((i), (atomic64_t *)(v))
 #define atomic_sub_64_nv(v, i) atomic64_sub_return((i), (atomic64_t *)(v))
 #define atomic_cas_64(v, x, y) atomic64_cmpxchg((atomic64_t *)(v), x, y)
+#define atomic_swap_64(v, x)   atomic64_xchg((atomic64_t *)(v), x)
 
 #endif /* ATOMIC_SPINLOCK */
 
index f3f3529a9cb331335e1dfa9b4fd778eab01286c9..3a4b1352fbfd915521d87268fa2bf323893b10fa 100644 (file)
 #define SPL_DEBUG_STR  ""
 
 #define PANIC(fmt, a...)                                               \
-do {                                                                   \
-       printk(KERN_EMERG fmt, ## a);                                   \
-       spl_debug_bug(__FILE__, __FUNCTION__, __LINE__, 0);             \
-} while (0)
+       spl_PANIC(__FILE__, __FUNCTION__, __LINE__, fmt, ## a)
 
 #define __ASSERT(x)                    ((void)0)
 #define ASSERT(x)                      ((void)0)
 #define ASSERTF(x, y, z...)            ((void)0)
 #define ASSERTV(x)
 #define VERIFY(cond)                                                   \
-do {                                                                   \
-       if (unlikely(!(cond)))                                          \
-               PANIC("VERIFY(" #cond ") failed\n");                    \
-} while (0)
+       (void)(unlikely(!(cond)) &&                                     \
+               spl_PANIC(__FILE__, __FUNCTION__, __LINE__,             \
+               "%s", "VERIFY(" #cond ") failed\n"))
 
 #define VERIFY3_IMPL(LEFT, OP, RIGHT, TYPE, FMT, CAST)                 \
-do {                                                                   \
-       if (!((TYPE)(LEFT) OP (TYPE)(RIGHT)))                           \
-               PANIC("VERIFY3(" #LEFT " " #OP " " #RIGHT ") "          \
+       (void)((!((TYPE)(LEFT) OP (TYPE)(RIGHT))) &&                    \
+               spl_PANIC(__FILE__, __FUNCTION__, __LINE__,             \
+               "VERIFY3(" #LEFT " " #OP " " #RIGHT ") "                \
                    "failed (" FMT " " #OP " " FMT ")\n",               \
-                   CAST (LEFT), CAST (RIGHT));                         \
-} while (0)
+                   CAST (LEFT), CAST (RIGHT)))
 
 #define VERIFY3S(x,y,z)        VERIFY3_IMPL(x, y, z, int64_t, "%lld", (long long))
 #define VERIFY3U(x,y,z)        VERIFY3_IMPL(x, y, z, uint64_t, "%llu",         \
@@ -94,11 +89,7 @@ do {                                                                 \
 #define SPL_DEBUG_STR  " (DEBUG mode)"
 
 #define PANIC(fmt, a...)                                               \
-do {                                                                   \
-       spl_debug_msg(NULL, 0, 0,                                       \
-            __FILE__, __FUNCTION__, __LINE__,  fmt, ## a);             \
-       spl_debug_bug(__FILE__, __FUNCTION__, __LINE__, 0);             \
-} while (0)
+       spl_PANIC(__FILE__, __FUNCTION__, __LINE__, fmt, ## a)
 
 /* ASSERTION that is safe to use within the debug system */
 #define __ASSERT(cond)                                                 \
@@ -111,24 +102,21 @@ do {                                                                      \
 
 /* ASSERTION that will debug log used outside the debug sysytem */
 #define ASSERT(cond)                                                   \
-do {                                                                   \
-       if (unlikely(!(cond)))                                          \
-               PANIC("ASSERTION(" #cond ") failed\n");                 \
-} while (0)
+       (void)(unlikely(!(cond)) &&                                     \
+               spl_PANIC(__FILE__, __FUNCTION__, __LINE__,             \
+               "%s", "ASSERTION(" #cond ") failed\n"))
 
 #define ASSERTF(cond, fmt, a...)                                       \
-do {                                                                   \
-       if (unlikely(!(cond)))                                          \
-               PANIC("ASSERTION(" #cond ") failed: " fmt, ## a);       \
-} while (0)
+       (void)(unlikely(!(cond)) &&                                     \
+               spl_PANIC(__FILE__, __FUNCTION__, __LINE__,             \
+               "ASSERTION(" #cond ") failed: " fmt, ## a))
 
 #define VERIFY3_IMPL(LEFT, OP, RIGHT, TYPE, FMT, CAST)                 \
-do {                                                                   \
-       if (!((TYPE)(LEFT) OP (TYPE)(RIGHT)))                           \
-               PANIC("VERIFY3(" #LEFT " " #OP " " #RIGHT ") "          \
+       (void)((!((TYPE)(LEFT) OP (TYPE)(RIGHT))) &&                    \
+               spl_PANIC(__FILE__, __FUNCTION__, __LINE__,             \
+               "VERIFY3(" #LEFT " " #OP " " #RIGHT ") "                \
                    "failed (" FMT " " #OP " " FMT ")\n",               \
-                   CAST (LEFT), CAST (RIGHT));                         \
-} while (0)
+                   CAST (LEFT), CAST (RIGHT)))
 
 #define VERIFY3S(x,y,z)        VERIFY3_IMPL(x, y, z, int64_t, "%lld", (long long))
 #define VERIFY3U(x,y,z)        VERIFY3_IMPL(x, y, z, uint64_t, "%llu",         \
@@ -145,4 +133,21 @@ do {                                                                       \
 #define VERIFY(x)      ASSERT(x)
 
 #endif /* NDEBUG */
+
+/*
+ * Helpers for the Solaris debug macros above
+ */
+extern int spl_PANIC(char *filename, const char *functionname,
+       int lineno, const char *fmt, ...);
+
+/*
+ * Compile-time assertion. The condition 'x' must be constant.
+ */
+#define        CTASSERT_GLOBAL(x)              _CTASSERT(x, __LINE__)
+#define        CTASSERT(x)                     { _CTASSERT(x, __LINE__); }
+#define        _CTASSERT(x, y)                 __CTASSERT(x, y)
+#define        __CTASSERT(x, y)                        \
+       typedef char __attribute__ ((unused))   \
+       __compile_time_assertion__ ## y[(x) ? 1 : -1]
+
 #endif /* SPL_DEBUG_H */
index cc59a3aab44780e86b91b9605a7f4ff1c20efef1..aa5ae43c2683224e2bd912cbd5a449038e42a07f 100644 (file)
@@ -75,7 +75,7 @@
 #endif
 
 /* arm arch specific defines */
-#elif defined(__arm) || defined(__arm__)
+#elif defined(__arm) || defined(__arm__) || defined(__aarch64__)
 
 #if !defined(__arm)
 #define __arm
@@ -85,7 +85,7 @@
 #define __arm__
 #endif
 
-#if defined(__ARMEL__)
+#if defined(__ARMEL__) || defined(__AARCH64EL__)
 #define _LITTLE_ENDIAN
 #else
 #define _BIG_ENDIAN
index 516114fd7d118daf80778d73bdc7d89ab32d1155..3418d3dd41103f5c233a5648e2656cd82cac22d4 100644 (file)
@@ -340,8 +340,9 @@ enum {
        KMC_BIT_QCACHE          = 4,    /* XXX: Unsupported */
        KMC_BIT_KMEM            = 5,    /* Use kmem cache */
        KMC_BIT_VMEM            = 6,    /* Use vmem cache */
-       KMC_BIT_OFFSLAB         = 7,    /* Objects not on slab */
-       KMC_BIT_NOEMERGENCY     = 8,    /* Disable emergency objects */
+       KMC_BIT_SLAB            = 7,    /* Use Linux slab cache */
+       KMC_BIT_OFFSLAB         = 8,    /* Objects not on slab */
+       KMC_BIT_NOEMERGENCY     = 9,    /* Disable emergency objects */
        KMC_BIT_DEADLOCKED      = 14,   /* Deadlock detected */
        KMC_BIT_GROWING         = 15,   /* Growing in progress */
        KMC_BIT_REAPING         = 16,   /* Reaping in progress */
@@ -367,6 +368,7 @@ typedef enum kmem_cbrc {
 #define KMC_QCACHE             (1 << KMC_BIT_QCACHE)
 #define KMC_KMEM               (1 << KMC_BIT_KMEM)
 #define KMC_VMEM               (1 << KMC_BIT_VMEM)
+#define KMC_SLAB               (1 << KMC_BIT_SLAB)
 #define KMC_OFFSLAB            (1 << KMC_BIT_OFFSLAB)
 #define KMC_NOEMERGENCY                (1 << KMC_BIT_NOEMERGENCY)
 #define KMC_DEADLOCKED         (1 << KMC_BIT_DEADLOCKED)
@@ -383,6 +385,8 @@ typedef enum kmem_cbrc {
 #define KMC_EXPIRE_AGE         0x1     /* Due to age */
 #define KMC_EXPIRE_MEM         0x2     /* Due to low memory */
 
+#define        KMC_RECLAIM_ONCE        0x1     /* Force a single shrinker pass */
+
 extern unsigned int spl_kmem_cache_expire;
 extern struct list_head spl_kmem_cache_list;
 extern struct rw_semaphore spl_kmem_cache_sem;
@@ -456,6 +460,7 @@ typedef struct spl_kmem_cache {
        spl_kmem_reclaim_t      skc_reclaim;    /* Reclaimator */
        void                    *skc_private;   /* Private data */
        void                    *skc_vmp;       /* Unused */
+       struct kmem_cache       *skc_linux_cache; /* Linux slab cache if used */
        unsigned long           skc_flags;      /* Flags */
        uint32_t                skc_obj_size;   /* Object size */
        uint32_t                skc_obj_align;  /* Object alignment */
@@ -513,4 +518,24 @@ void spl_kmem_fini(void);
 #define kmem_virt(ptr)                 (((ptr) >= (void *)VMALLOC_START) && \
                                         ((ptr) <  (void *)VMALLOC_END))
 
+/*
+ * Allow custom slab allocation flags to be set for KMC_SLAB based caches.
+ * One use for this function is to ensure the __GFP_COMP flag is part of
+ * the default allocation mask which ensures higher order allocations are
+ * properly refcounted.  This flag was added to the default ->allocflags
+ * as of Linux 3.11.
+ */
+static inline void
+kmem_cache_set_allocflags(spl_kmem_cache_t *skc, gfp_t flags)
+{
+       if (skc->skc_linux_cache == NULL)
+               return;
+
+#if defined(HAVE_KMEM_CACHE_ALLOCFLAGS)
+       skc->skc_linux_cache->allocflags |= flags;
+#elif defined(HAVE_KMEM_CACHE_GFPFLAGS)
+       skc->skc_linux_cache->gfpflags |= flags;
+#endif
+}
+
 #endif /* _SPL_KMEM_H */
index b4778b70e83fc0437879f456edc2c893ada64792..565fc5600043c7264de121425d3667d3970b9865 100644 (file)
 #define minclsyspri                    (MAX_RT_PRIO)
 #define maxclsyspri                    (MAX_PRIO-1)
 
+#ifndef NICE_TO_PRIO
 #define NICE_TO_PRIO(nice)             (MAX_RT_PRIO + (nice) + 20)
+#endif
+#ifndef PRIO_TO_NICE
 #define PRIO_TO_NICE(prio)             ((prio) - MAX_RT_PRIO - 20)
+#endif
 
 /* Missing macros
  */
 /* Missing globals */
 extern char spl_version[32];
 extern unsigned long spl_hostid;
-extern char hw_serial[11];
 
 /* Missing misc functions */
 extern int highbit(unsigned long i);
+extern int highbit64(uint64_t i);
 extern uint32_t zone_get_hostid(void *zone);
 extern void spl_setup(void);
 extern void spl_cleanup(void);
@@ -169,6 +173,9 @@ extern void spl_cleanup(void);
 #ifndef roundup
 #define roundup(x, y)          ((((x) + ((y) - 1)) / (y)) * (y))
 #endif
+#ifndef howmany
+#define howmany(x, y)          (((x) + ((y) - 1)) / (y))
+#endif
 
 /*
  * Compatibility macros/typedefs needed for Solaris -> Linux port
index e22a08530d79d46ec6a8ae1398714e96ea43caef..5c0cc4663d8d39baea2e27a79b792a5b5cab03aa 100644 (file)
@@ -25,7 +25,6 @@
 #ifndef _SPL_SYSTEMINFO_H
 #define _SPL_SYSTEMINFO_H
 
-#define HW_INVALID_HOSTID      0xFFFFFFFF      /* an invalid hostid */
 #define HW_HOSTID_LEN          11              /* minimum buffer size needed */
                                                /* to hold a decimal or hex */
                                                /* hostid string */
index 864a74bba99d2199d498aff56cfd0f153aec2354..433a0761d165028cfb8762403cae57f9bf7eab85 100644 (file)
@@ -59,5 +59,7 @@ extern kthread_t *__thread_create(caddr_t stk, size_t  stksize,
                                   void *args, size_t len, proc_t *pp,
                                   int state, pri_t pri);
 extern void __thread_exit(void);
+extern struct task_struct *spl_kthread_create(int (*func)(void *),
+                       void *data, const char namefmt[], ...);
 
 #endif  /* _SPL_THREAD_H */
index 2542510dd299574a9d4e1138cd58ca1433be871d..33d577e719aa63643dc5abc655d3d3e85fcb0bdd 100644 (file)
 #define ddi_get_lbolt()                        ((clock_t)jiffies)
 #define ddi_get_lbolt64()              ((int64_t)get_jiffies_64())
 
+#define ddi_time_before(a, b)          (typecheck(clock_t, a) && \
+                                       typecheck(clock_t, b) && \
+                                       ((a) - (b) < 0))
+#define ddi_time_after(a, b)           ddi_time_before(b, a)
+#define ddi_time_before_eq(a, b)       (!ddi_time_after(a, b))
+#define ddi_time_after_eq(a, b)                ddi_time_before_eq(b, a)
+
+#define ddi_time_before64(a, b)                (typecheck(int64_t, a) && \
+                                       typecheck(int64_t, b) && \
+                                       ((a) - (b) < 0))
+#define ddi_time_after64(a, b)         ddi_time_before64(b, a)
+#define ddi_time_before_eq64(a, b)     (!ddi_time_after64(a, b))
+#define ddi_time_after_eq64(a, b)      ddi_time_before_eq64(b, a)
+
 #define delay(ticks)                   schedule_timeout_uninterruptible(ticks)
 
 #define SEC_TO_TICK(sec)               ((sec) * HZ)
index 3c134f7757ad349da89dd43591438d7df2020917..9b351762cbc9281e0bba466a8541456489e765f9 100644 (file)
@@ -124,3 +124,15 @@ Spin a maximum of N times to acquire lock
 .sp
 .ne -4
 Default value: \fB0\fR.
+.RE
+
+.sp
+.ne 2
+.na
+\fBspl_taskq_thread_bind\fR (int)
+.ad
+.RS 12n
+Bind taskq thread to CPU
+.sp
+Default value: \fB0\fR.
+.RE
index 0ed65725eb53a9dcab10e77dc47b0bf49c6051a0..602bd74e883d24720af58f422a370e8500f569e7 100644 (file)
@@ -44,7 +44,8 @@ cr_groups_search(const struct group_info *group_info, kgid_t grp)
 cr_groups_search(const struct group_info *group_info, gid_t grp)
 #endif
 {
-       unsigned int left, right;
+       unsigned int left, right, mid;
+       int cmp;
 
        if (!group_info)
                return 0;
@@ -52,8 +53,10 @@ cr_groups_search(const struct group_info *group_info, gid_t grp)
        left = 0;
        right = group_info->ngroups;
        while (left < right) {
-               unsigned int mid = (left+right)/2;
-               int cmp = KGID_TO_SGID(grp) - KGID_TO_SGID(GROUP_AT(group_info, mid));
+               mid = (left + right) / 2;
+               cmp = KGID_TO_SGID(grp) -
+                   KGID_TO_SGID(GROUP_AT(group_info, mid));
+
                if (cmp > 0)
                        left = mid + 1;
                else if (cmp < 0)
@@ -120,7 +123,7 @@ crgetgroups(const cred_t *cr)
        return gids;
 }
 
-/* Check if the passed gid is available is in supplied credential. */
+/* Check if the passed gid is available in supplied credential. */
 int
 groupmember(gid_t gid, const cred_t *cr)
 {
@@ -128,7 +131,7 @@ groupmember(gid_t gid, const cred_t *cr)
        int rc;
 
        gi = get_group_info(cr->group_info);
-       rc = cr_groups_search(cr->group_info, SGID_TO_KGID(gid));
+       rc = cr_groups_search(gi, SGID_TO_KGID(gid));
        put_group_info(gi);
 
        return rc;
index d450368b1038084646e6068a375f2bc9990e9eee..6c4e043f05da70b0d872fab1fa840d0448f696c4 100644 (file)
@@ -37,7 +37,9 @@
 #include <linux/proc_compat.h>
 #include <linux/file_compat.h>
 #include <linux/swap.h>
+#include <linux/ratelimit.h>
 #include <sys/sysmacros.h>
+#include <sys/thread.h>
 #include <spl-debug.h>
 #include <spl-trace.h>
 #include <spl-ctl.h>
@@ -415,7 +417,7 @@ spl_debug_dumplog(int flags)
                 spl_debug_dumplog_internal(&dp);
         } else {
 
-                tsk = kthread_create(spl_debug_dumplog_thread,(void *)&dp,"spl_debug");
+                tsk = spl_kthread_create(spl_debug_dumplog_thread,(void *)&dp,"spl_debug");
                 if (tsk == NULL)
                         return -ENOMEM;
 
@@ -1072,15 +1074,22 @@ spl_debug_get_mb(void)
 }
 EXPORT_SYMBOL(spl_debug_get_mb);
 
-void spl_debug_dumpstack(struct task_struct *tsk)
-{
-        extern void show_task(struct task_struct *);
+/*
+ * Limit the number of stack traces dumped to not more than 5 every
+ * 60 seconds to prevent denial-of-service attacks from debug code.
+ */
+DEFINE_RATELIMIT_STATE(dumpstack_ratelimit_state, 60 * HZ, 5);
 
-        if (tsk == NULL)
-                tsk = current;
+void
+spl_debug_dumpstack(struct task_struct *tsk)
+{
+       if (__ratelimit(&dumpstack_ratelimit_state)) {
+               if (tsk == NULL)
+                       tsk = current;
 
-        printk("SPL: Showing stack for process %d\n", tsk->pid);
-        dump_stack();
+               printk("SPL: Showing stack for process %d\n", tsk->pid);
+               dump_stack();
+       }
 }
 EXPORT_SYMBOL(spl_debug_dumpstack);
 
index b6d15f01918f1ff68e25f0150a0939a0c504c9d6..2706f9bd13996fc0955d0f69aeb85538fcd905ec 100644 (file)
@@ -39,6 +39,27 @@ static char ce_prefix[CE_IGNORE][10] = { "", "NOTICE: ", "WARNING: ", "" };
 static char ce_suffix[CE_IGNORE][2] = { "", "\n", "\n", "" };
 #endif
 
+int
+spl_PANIC(char *filename, const char *functionname,
+    int lineno, const char *fmt, ...) {
+       char msg[MAXMSGLEN];
+       va_list ap;
+
+       va_start(ap, fmt);
+       if (vsnprintf(msg, sizeof (msg), fmt, ap) == sizeof (msg))
+               msg[sizeof (msg) - 1] = '\0';
+       va_end(ap);
+#ifdef NDEBUG
+       printk(KERN_EMERG "%s", msg);
+#else
+       spl_debug_msg(NULL, 0, 0,
+            filename, functionname, lineno, "%s", msg);
+#endif
+       spl_debug_bug(filename, functionname, lineno, 0);
+       return 1;
+}
+EXPORT_SYMBOL(spl_PANIC);
+
 void
 vpanic(const char *fmt, va_list ap)
 {
index 351f53670b8b50f973e26d6439adc59c63c1d45e..6cb73ddb70e3d991adb37db5385218bf6b6634fc 100644 (file)
 char spl_version[32] = "SPL v" SPL_META_VERSION "-" SPL_META_RELEASE;
 EXPORT_SYMBOL(spl_version);
 
-unsigned long spl_hostid = HW_INVALID_HOSTID;
+unsigned long spl_hostid = 0;
 EXPORT_SYMBOL(spl_hostid);
 module_param(spl_hostid, ulong, 0644);
 MODULE_PARM_DESC(spl_hostid, "The system hostid.");
 
-char hw_serial[HW_HOSTID_LEN] = "<none>";
-EXPORT_SYMBOL(hw_serial);
-
 proc_t p0 = { 0 };
 EXPORT_SYMBOL(p0);
 
@@ -100,6 +97,36 @@ highbit(unsigned long i)
 }
 EXPORT_SYMBOL(highbit);
 
+int
+highbit64(uint64_t i)
+{
+        register int h = 1;
+        SENTRY;
+
+        if (i == 0)
+                SRETURN(0);
+        if (i & 0xffffffff00000000ull) {
+                h += 32; i >>= 32;
+        }
+        if (i & 0xffff0000) {
+                h += 16; i >>= 16;
+        }
+        if (i & 0xff00) {
+                h += 8; i >>= 8;
+        }
+        if (i & 0xf0) {
+                h += 4; i >>= 4;
+        }
+        if (i & 0xc) {
+                h += 2; i >>= 2;
+        }
+        if (i & 0x2) {
+                h += 1;
+        }
+        SRETURN(h);
+}
+EXPORT_SYMBOL(highbit64);
+
 #if BITS_PER_LONG == 32
 /*
  * Support 64/64 => 64 division on a 32-bit platform.  While the kernel
@@ -467,7 +494,7 @@ hostid_read(void)
        int result;
        uint64_t size;
        struct _buf *file;
-       unsigned long hostid = 0;
+       uint32_t hostid = 0;
 
        file = kobj_open_file(spl_hostid_path);
 
@@ -511,45 +538,10 @@ hostid_read(void)
        return 0;
 }
 
-#define GET_HOSTID_CMD \
-       "exec 0</dev/null " \
-       "     1>/proc/sys/kernel/spl/hostid " \
-       "     2>/dev/null; " \
-       "hostid"
-
-static int
-hostid_exec(void)
-{
-       char *argv[] = { "/bin/sh",
-                        "-c",
-                        GET_HOSTID_CMD,
-                        NULL };
-       char *envp[] = { "HOME=/",
-                        "TERM=linux",
-                        "PATH=/sbin:/usr/sbin:/bin:/usr/bin",
-                        NULL };
-       int rc;
-
-       /* Doing address resolution in the kernel is tricky and just
-        * not a good idea in general.  So to set the proper 'hw_serial'
-        * use the usermodehelper support to ask '/bin/sh' to run
-        * '/usr/bin/hostid' and redirect the result to /proc/sys/spl/hostid
-        * for us to use.  It's a horrific solution but it will do for now.
-        */
-       rc = call_usermodehelper(argv[0], argv, envp, UMH_WAIT_PROC);
-       if (rc)
-               printk("SPL: Failed user helper '%s %s %s', rc = %d\n",
-                      argv[0], argv[1], argv[2], rc);
-
-       return rc;
-}
-
 uint32_t
 zone_get_hostid(void *zone)
 {
        static int first = 1;
-       unsigned long hostid;
-       int rc;
 
        /* Only the global zone is supported */
        ASSERT(zone == NULL);
@@ -559,21 +551,16 @@ zone_get_hostid(void *zone)
 
                /*
                 * Get the hostid if it was not passed as a module parameter.
-                * Try reading the /etc/hostid file directly, and then fall
-                * back to calling the /usr/bin/hostid utility.
+                * Try reading the /etc/hostid file directly.
                 */
-               if ((spl_hostid == HW_INVALID_HOSTID) &&
-                   (rc = hostid_read()) && (rc = hostid_exec()))
-                       return HW_INVALID_HOSTID;
+               if (hostid_read())
+                       spl_hostid = 0;
 
                printk(KERN_NOTICE "SPL: using hostid 0x%08x\n",
                        (unsigned int) spl_hostid);
        }
 
-       if (ddi_strtoul(hw_serial, NULL, HW_HOSTID_LEN-1, &hostid) != 0)
-               return HW_INVALID_HOSTID;
-
-       return (uint32_t)hostid;
+       return spl_hostid;
 }
 EXPORT_SYMBOL(zone_get_hostid);
 
index 23e47808aaa70931d596e435312b1090e96a3f54..6389dc5aaba76086cc368d844df216b95c942533 100644 (file)
 
 #define SS_DEBUG_SUBSYS SS_KMEM
 
+/*
+ * Within the scope of spl-kmem.c file the kmem_cache_* definitions
+ * are removed to allow access to the real Linux slab allocator.
+ */
+#undef kmem_cache_destroy
+#undef kmem_cache_create
+#undef kmem_cache_alloc
+#undef kmem_cache_free
+
+
 /*
  * Cache expiration was implemented because it was part of the default Solaris
  * kmem_cache behavior.  The idea is that per-cpu objects which haven't been
  * accessed in several seconds should be returned to the cache.  On the other
  * hand Linux slabs never move objects back to the slabs unless there is
- * memory pressure on the system.  By default both methods are disabled, but
- * may be enabled by setting KMC_EXPIRE_AGE or KMC_EXPIRE_MEM.
+ * memory pressure on the system.  By default the Linux method is enabled
+ * because it has been shown to improve responsiveness on low memory systems.
+ * This policy may be changed by setting KMC_EXPIRE_AGE or KMC_EXPIRE_MEM.
  */
-unsigned int spl_kmem_cache_expire = 0;
+unsigned int spl_kmem_cache_expire = KMC_EXPIRE_MEM;
 EXPORT_SYMBOL(spl_kmem_cache_expire);
 module_param(spl_kmem_cache_expire, uint, 0644);
 MODULE_PARM_DESC(spl_kmem_cache_expire, "By age (0x1) or low memory (0x2)");
 
+/*
+ * KMC_RECLAIM_ONCE is set as the default until zfsonlinux/spl#268 is
+ * definitively resolved.  Depending on the system configuration and
+ * workload this may increase the likelihood of out of memory events.
+ * For those cases it is advised that this option be set to zero.
+ */
+unsigned int spl_kmem_cache_reclaim = KMC_RECLAIM_ONCE;
+module_param(spl_kmem_cache_reclaim, uint, 0644);
+MODULE_PARM_DESC(spl_kmem_cache_reclaim, "Single reclaim pass (0x1)");
+
+unsigned int spl_kmem_cache_obj_per_slab = SPL_KMEM_CACHE_OBJ_PER_SLAB;
+module_param(spl_kmem_cache_obj_per_slab, uint, 0644);
+MODULE_PARM_DESC(spl_kmem_cache_obj_per_slab, "Number of objects per slab");
+
+unsigned int spl_kmem_cache_obj_per_slab_min = SPL_KMEM_CACHE_OBJ_PER_SLAB_MIN;
+module_param(spl_kmem_cache_obj_per_slab_min, uint, 0644);
+MODULE_PARM_DESC(spl_kmem_cache_obj_per_slab_min,
+    "Minimal number of objects per slab");
+
+unsigned int spl_kmem_cache_max_size = 32;
+module_param(spl_kmem_cache_max_size, uint, 0644);
+MODULE_PARM_DESC(spl_kmem_cache_max_size, "Maximum size of slab in MB");
+
+unsigned int spl_kmem_cache_slab_limit = 0;
+module_param(spl_kmem_cache_slab_limit, uint, 0644);
+MODULE_PARM_DESC(spl_kmem_cache_slab_limit,
+    "Objects less than N bytes use the Linux slab");
+
+unsigned int spl_kmem_cache_kmem_limit = (PAGE_SIZE / 4);
+module_param(spl_kmem_cache_kmem_limit, uint, 0644);
+MODULE_PARM_DESC(spl_kmem_cache_kmem_limit,
+    "Objects less than N bytes use the kmalloc");
+
 /*
  * The minimum amount of memory measured in pages to be free at all
  * times on the system.  This is similar to Linux's zone->pages_min
@@ -681,7 +725,7 @@ kmem_alloc_debug(size_t size, int flags, const char *func, int line,
                    "large kmem_alloc(%llu, 0x%x) at %s:%d (%lld/%llu)\n",
                    (unsigned long long) size, flags, func, line,
                    kmem_alloc_used_read(), kmem_alloc_max);
-               dump_stack();
+               spl_debug_dumpstack(NULL);
        }
 
        /* Use the correct allocator */
@@ -850,7 +894,8 @@ kv_alloc(spl_kmem_cache_t *skc, int size, int flags)
        ASSERT(ISP2(size));
 
        if (skc->skc_flags & KMC_KMEM)
-               ptr = (void *)__get_free_pages(flags, get_order(size));
+               ptr = (void *)__get_free_pages(flags | __GFP_COMP,
+                   get_order(size));
        else
                ptr = __vmalloc(size, flags | __GFP_HIGHMEM, PAGE_KERNEL);
 
@@ -1333,7 +1378,10 @@ spl_cache_age(void *data)
                return;
 
        atomic_inc(&skc->skc_ref);
-       spl_on_each_cpu(spl_magazine_age, skc, 1);
+
+       if (!(skc->skc_flags & KMC_NOMAGAZINE))
+               spl_on_each_cpu(spl_magazine_age, skc, 1);
+
        spl_slab_reclaim(skc, skc->skc_reap, 0);
 
        while (!test_bit(KMC_BIT_DESTROY, &skc->skc_flags) && !id) {
@@ -1355,10 +1403,10 @@ spl_cache_age(void *data)
 
 /*
  * Size a slab based on the size of each aligned object plus spl_kmem_obj_t.
- * When on-slab we want to target SPL_KMEM_CACHE_OBJ_PER_SLAB.  However,
+ * When on-slab we want to target spl_kmem_cache_obj_per_slab.  However,
  * for very small objects we may end up with more than this so as not
  * to waste space in the minimal allocation of a single page.  Also for
- * very large objects we may use as few as SPL_KMEM_CACHE_OBJ_PER_SLAB_MIN,
+ * very large objects we may use as few as spl_kmem_cache_obj_per_slab_min,
  * lower than this and we will fail.
  */
 static int
@@ -1367,7 +1415,7 @@ spl_slab_size(spl_kmem_cache_t *skc, uint32_t *objs, uint32_t *size)
        uint32_t sks_size, obj_size, max_size;
 
        if (skc->skc_flags & KMC_OFFSLAB) {
-               *objs = SPL_KMEM_CACHE_OBJ_PER_SLAB;
+               *objs = spl_kmem_cache_obj_per_slab;
                *size = P2ROUNDUP(sizeof(spl_kmem_slab_t), PAGE_SIZE);
                SRETURN(0);
        } else {
@@ -1377,12 +1425,12 @@ spl_slab_size(spl_kmem_cache_t *skc, uint32_t *objs, uint32_t *size)
                if (skc->skc_flags & KMC_KMEM)
                        max_size = ((uint32_t)1 << (MAX_ORDER-3)) * PAGE_SIZE;
                else
-                       max_size = (32 * 1024 * 1024);
+                       max_size = (spl_kmem_cache_max_size * 1024 * 1024);
 
                /* Power of two sized slab */
                for (*size = PAGE_SIZE; *size <= max_size; *size *= 2) {
                        *objs = (*size - sks_size) / obj_size;
-                       if (*objs >= SPL_KMEM_CACHE_OBJ_PER_SLAB)
+                       if (*objs >= spl_kmem_cache_obj_per_slab)
                                SRETURN(0);
                }
 
@@ -1393,7 +1441,7 @@ spl_slab_size(spl_kmem_cache_t *skc, uint32_t *objs, uint32_t *size)
                 */
                *size = max_size;
                *objs = (*size - sks_size) / obj_size;
-               if (*objs >= SPL_KMEM_CACHE_OBJ_PER_SLAB_MIN)
+               if (*objs >= (spl_kmem_cache_obj_per_slab_min))
                        SRETURN(0);
        }
 
@@ -1478,6 +1526,9 @@ spl_magazine_create(spl_kmem_cache_t *skc)
        int i;
        SENTRY;
 
+       if (skc->skc_flags & KMC_NOMAGAZINE)
+               SRETURN(0);
+
        skc->skc_mag_size = spl_magazine_size(skc);
        skc->skc_mag_refill = (skc->skc_mag_size + 1) / 2;
 
@@ -1504,6 +1555,11 @@ spl_magazine_destroy(spl_kmem_cache_t *skc)
        int i;
        SENTRY;
 
+       if (skc->skc_flags & KMC_NOMAGAZINE) {
+               SEXIT;
+               return;
+       }
+
         for_each_online_cpu(i) {
                skm = skc->skc_mag[i];
                spl_cache_flush(skc, skm, skm->skm_avail);
@@ -1526,11 +1582,12 @@ spl_magazine_destroy(spl_kmem_cache_t *skc)
  * flags
  *     KMC_NOTOUCH     Disable cache object aging (unsupported)
  *     KMC_NODEBUG     Disable debugging (unsupported)
- *     KMC_NOMAGAZINE  Disable magazine (unsupported)
  *     KMC_NOHASH      Disable hashing (unsupported)
  *     KMC_QCACHE      Disable qcache (unsupported)
+ *     KMC_NOMAGAZINE  Enabled for kmem/vmem, Disabled for Linux slab
  *     KMC_KMEM        Force kmem backed cache
  *     KMC_VMEM        Force vmem backed cache
+ *     KMC_SLAB        Force Linux slab backed cache
  *     KMC_OFFSLAB     Locate objects off the slab
  */
 spl_kmem_cache_t *
@@ -1576,6 +1633,7 @@ spl_kmem_cache_create(char *name, size_t size, size_t align,
        skc->skc_reclaim = reclaim;
        skc->skc_private = priv;
        skc->skc_vmp = vmp;
+       skc->skc_linux_cache = NULL;
        skc->skc_flags = flags;
        skc->skc_obj_size = size;
        skc->skc_obj_align = SPL_KMEM_CACHE_ALIGN;
@@ -1602,28 +1660,69 @@ spl_kmem_cache_create(char *name, size_t size, size_t align,
        skc->skc_obj_emergency = 0;
        skc->skc_obj_emergency_max = 0;
 
+       /*
+        * Verify the requested alignment restriction is sane.
+        */
        if (align) {
                VERIFY(ISP2(align));
-               VERIFY3U(align, >=, SPL_KMEM_CACHE_ALIGN); /* Min alignment */
-               VERIFY3U(align, <=, PAGE_SIZE);            /* Max alignment */
+               VERIFY3U(align, >=, SPL_KMEM_CACHE_ALIGN);
+               VERIFY3U(align, <=, PAGE_SIZE);
                skc->skc_obj_align = align;
        }
 
-       /* If none passed select a cache type based on object size */
-       if (!(skc->skc_flags & (KMC_KMEM | KMC_VMEM))) {
-               if (spl_obj_size(skc) < (PAGE_SIZE / 8))
+       /*
+        * When no specific type of slab is requested (kmem, vmem, or
+        * linuxslab) then select a cache type based on the object size
+        * and default tunables.
+        */
+       if (!(skc->skc_flags & (KMC_KMEM | KMC_VMEM | KMC_SLAB))) {
+
+               /*
+                * Objects smaller than spl_kmem_cache_slab_limit can
+                * use the Linux slab for better space-efficiency.  By
+                * default this functionality is disabled until its
+                * performance characters are fully understood.
+                */
+               if (spl_kmem_cache_slab_limit &&
+                   size <= (size_t)spl_kmem_cache_slab_limit)
+                       skc->skc_flags |= KMC_SLAB;
+
+               /*
+                * Small objects, less than spl_kmem_cache_kmem_limit per
+                * object should use kmem because their slabs are small.
+                */
+               else if (spl_obj_size(skc) <= spl_kmem_cache_kmem_limit)
                        skc->skc_flags |= KMC_KMEM;
+
+               /*
+                * All other objects are considered large and are placed
+                * on vmem backed slabs.
+                */
                else
                        skc->skc_flags |= KMC_VMEM;
        }
 
-       rc = spl_slab_size(skc, &skc->skc_slab_objs, &skc->skc_slab_size);
-       if (rc)
-               SGOTO(out, rc);
+       /*
+        * Given the type of slab allocate the required resources.
+        */
+       if (skc->skc_flags & (KMC_KMEM | KMC_VMEM)) {
+               rc = spl_slab_size(skc,
+                   &skc->skc_slab_objs, &skc->skc_slab_size);
+               if (rc)
+                       SGOTO(out, rc);
+
+               rc = spl_magazine_create(skc);
+               if (rc)
+                       SGOTO(out, rc);
+       } else {
+               skc->skc_linux_cache = kmem_cache_create(
+                   skc->skc_name, size, align, 0, NULL);
+               if (skc->skc_linux_cache == NULL)
+                       SGOTO(out, rc = ENOMEM);
 
-       rc = spl_magazine_create(skc);
-       if (rc)
-               SGOTO(out, rc);
+               kmem_cache_set_allocflags(skc, __GFP_COMP);
+               skc->skc_flags |= KMC_NOMAGAZINE;
+       }
 
        if (spl_kmem_cache_expire & KMC_EXPIRE_AGE)
                skc->skc_taskqid = taskq_dispatch_delay(spl_kmem_cache_taskq,
@@ -1665,6 +1764,7 @@ spl_kmem_cache_destroy(spl_kmem_cache_t *skc)
        SENTRY;
 
        ASSERT(skc->skc_magic == SKC_MAGIC);
+       ASSERT(skc->skc_flags & (KMC_KMEM | KMC_VMEM | KMC_SLAB));
 
        down_write(&spl_kmem_cache_sem);
        list_del_init(&skc->skc_list);
@@ -1684,8 +1784,14 @@ spl_kmem_cache_destroy(spl_kmem_cache_t *skc)
         * cache reaping action which races with this destroy. */
        wait_event(wq, atomic_read(&skc->skc_ref) == 0);
 
-       spl_magazine_destroy(skc);
-       spl_slab_reclaim(skc, 0, 1);
+       if (skc->skc_flags & (KMC_KMEM | KMC_VMEM)) {
+               spl_magazine_destroy(skc);
+               spl_slab_reclaim(skc, 0, 1);
+       } else {
+               ASSERT(skc->skc_flags & KMC_SLAB);
+               kmem_cache_destroy(skc->skc_linux_cache);
+       }
+
        spin_lock(&skc->skc_lock);
 
        /* Validate there are no objects in use and free all the
@@ -1791,7 +1897,9 @@ spl_cache_reclaim_wait(void *word)
 }
 
 /*
- * No available objects on any slabs, create a new slab.
+ * No available objects on any slabs, create a new slab.  Note that this
+ * functionality is disabled for KMC_SLAB caches which are backed by the
+ * Linux slab.
  */
 static int
 spl_cache_grow(spl_kmem_cache_t *skc, int flags, void **obj)
@@ -1800,6 +1908,7 @@ spl_cache_grow(spl_kmem_cache_t *skc, int flags, void **obj)
        SENTRY;
 
        ASSERT(skc->skc_magic == SKC_MAGIC);
+       ASSERT((skc->skc_flags & KMC_SLAB) == 0);
        might_sleep();
        *obj = NULL;
 
@@ -2001,7 +2110,28 @@ spl_kmem_cache_alloc(spl_kmem_cache_t *skc, int flags)
        ASSERT(skc->skc_magic == SKC_MAGIC);
        ASSERT(!test_bit(KMC_BIT_DESTROY, &skc->skc_flags));
        ASSERT(flags & KM_SLEEP);
+
        atomic_inc(&skc->skc_ref);
+
+       /*
+        * Allocate directly from a Linux slab.  All optimizations are left
+        * to the underlying cache we only need to guarantee that KM_SLEEP
+        * callers will never fail.
+        */
+       if (skc->skc_flags & KMC_SLAB) {
+               struct kmem_cache *slc = skc->skc_linux_cache;
+
+               do {
+                       obj = kmem_cache_alloc(slc, flags | __GFP_COMP);
+                       if (obj && skc->skc_ctor)
+                               skc->skc_ctor(obj, skc->skc_private, flags);
+
+               } while ((obj == NULL) && !(flags & KM_NOSLEEP));
+
+               atomic_dec(&skc->skc_ref);
+               SRETURN(obj);
+       }
+
        local_irq_disable();
 
 restart:
@@ -2053,6 +2183,17 @@ spl_kmem_cache_free(spl_kmem_cache_t *skc, void *obj)
        ASSERT(!test_bit(KMC_BIT_DESTROY, &skc->skc_flags));
        atomic_inc(&skc->skc_ref);
 
+       /*
+        * Free the object from the Linux underlying Linux slab.
+        */
+       if (skc->skc_flags & KMC_SLAB) {
+               if (skc->skc_dtor)
+                       skc->skc_dtor(obj, skc->skc_private);
+
+               kmem_cache_free(skc->skc_linux_cache, obj);
+               goto out;
+       }
+
        /*
         * Only virtual slabs may have emergency objects and these objects
         * are guaranteed to have physical addresses.  They must be removed
@@ -2104,7 +2245,7 @@ __spl_kmem_cache_generic_shrinker(struct shrinker *shrink,
     struct shrink_control *sc)
 {
        spl_kmem_cache_t *skc;
-       int unused = 0;
+       int alloc = 0;
 
        down_read(&spl_kmem_cache_sem);
        list_for_each_entry(skc, &spl_kmem_cache_list, skc_list) {
@@ -2113,24 +2254,25 @@ __spl_kmem_cache_generic_shrinker(struct shrinker *shrink,
                           MAX(sc->nr_to_scan >> fls64(skc->skc_slab_objs), 1));
 
                /*
-                * Presume everything alloc'ed in reclaimable, this ensures
+                * Presume everything alloc'ed is reclaimable, this ensures
                 * we are called again with nr_to_scan > 0 so can try and
                 * reclaim.  The exact number is not important either so
                 * we forgo taking this already highly contented lock.
                 */
-               unused += skc->skc_obj_alloc;
+               alloc += skc->skc_obj_alloc;
        }
        up_read(&spl_kmem_cache_sem);
 
        /*
-        * After performing reclaim always return -1 to indicate we cannot
-        * perform additional reclaim.  This prevents shrink_slabs() from
-        * repeatedly invoking this generic shrinker and potentially spinning.
+        * When KMC_RECLAIM_ONCE is set allow only a single reclaim pass.
+        * This functionality only exists to work around a rare issue where
+        * shrink_slabs() is repeatedly invoked by many cores causing the
+        * system to thrash.
         */
-       if (sc->nr_to_scan)
-               return -1;
+       if ((spl_kmem_cache_reclaim & KMC_RECLAIM_ONCE) && sc->nr_to_scan)
+               return (-1);
 
-       return unused;
+       return MAX((alloc * sysctl_vfs_cache_pressure) / 100, 0);
 }
 
 SPL_SHRINKER_CALLBACK_WRAPPER(spl_kmem_cache_generic_shrinker);
@@ -2151,13 +2293,27 @@ spl_kmem_cache_reap_now(spl_kmem_cache_t *skc, int count)
        ASSERT(skc->skc_magic == SKC_MAGIC);
        ASSERT(!test_bit(KMC_BIT_DESTROY, &skc->skc_flags));
 
-       /* Prevent concurrent cache reaping when contended */
-       if (test_and_set_bit(KMC_BIT_REAPING, &skc->skc_flags)) {
-               SEXIT;
-               return;
+       atomic_inc(&skc->skc_ref);
+
+       /*
+        * Execute the registered reclaim callback if it exists.  The
+        * per-cpu caches will be drained when is set KMC_EXPIRE_MEM.
+        */
+       if (skc->skc_flags & KMC_SLAB) {
+               if (skc->skc_reclaim)
+                       skc->skc_reclaim(skc->skc_private);
+
+               if (spl_kmem_cache_expire & KMC_EXPIRE_MEM)
+                       kmem_cache_shrink(skc->skc_linux_cache);
+
+               SGOTO(out, 0);
        }
 
-       atomic_inc(&skc->skc_ref);
+       /*
+        * Prevent concurrent cache reaping when contended.
+        */
+       if (test_and_set_bit(KMC_BIT_REAPING, &skc->skc_flags))
+               SGOTO(out, 0);
 
        /*
         * When a reclaim function is available it may be invoked repeatedly
@@ -2207,7 +2363,7 @@ spl_kmem_cache_reap_now(spl_kmem_cache_t *skc, int count)
        clear_bit(KMC_BIT_REAPING, &skc->skc_flags);
        smp_mb__after_clear_bit();
        wake_up_bit(&skc->skc_flags, KMC_BIT_REAPING);
-
+out:
        atomic_dec(&skc->skc_ref);
 
        SEXIT;
index f25239aca9c452f3c7535bfa901ca6cce5a0d466..b4be84fef9bbef890c2f55d3a4ca6bd1e0a8e8c1 100644 (file)
@@ -72,7 +72,6 @@ struct proc_dir_entry *proc_spl_kstat = NULL;
 
 #define CTL_VERSION            CTL_UNNUMBERED /* Version */
 #define CTL_HOSTID             CTL_UNNUMBERED /* Host id by /usr/bin/hostid */
-#define CTL_HW_SERIAL          CTL_UNNUMBERED /* HW serial number by hostid */
 #define CTL_KALLSYMS           CTL_UNNUMBERED /* kallsyms_lookup_name addr */
 
 #define CTL_DEBUG_SUBSYS       CTL_UNNUMBERED /* Debug subsystem */
@@ -129,7 +128,6 @@ enum {
 enum {
        CTL_VERSION = 1,                /* Version */
        CTL_HOSTID,                     /* Host id reported by /usr/bin/hostid */
-       CTL_HW_SERIAL,                  /* Hardware serial number from hostid */
        CTL_KALLSYMS,                   /* Address of kallsyms_lookup_name */
 
 #ifdef DEBUG_LOG
@@ -513,9 +511,6 @@ SPL_PROC_HANDLER(proc_dohostid)
                 if (str == end)
                         SRETURN(-EINVAL);
 
-                (void) snprintf(hw_serial, HW_HOSTID_LEN, "%lu", spl_hostid);
-                hw_serial[HW_HOSTID_LEN - 1] = '\0';
-                *ppos += *lenp;
         } else {
                 len = snprintf(str, sizeof(str), "%lx", spl_hostid);
                 if (*ppos >= len)
@@ -651,6 +646,12 @@ slab_seq_show(struct seq_file *f, void *p)
 
         ASSERT(skc->skc_magic == SKC_MAGIC);
 
+       /*
+        * Backed by Linux slab see /proc/slabinfo.
+        */
+       if (skc->skc_flags & KMC_SLAB)
+               return (0);
+
         spin_lock(&skc->skc_lock);
         seq_printf(f, "%-36s  ", skc->skc_name);
         seq_printf(f, "0x%05lx %9lu %9lu %8u %8u  "
@@ -1058,14 +1059,6 @@ static struct ctl_table spl_table[] = {
                 .mode     = 0644,
                 .proc_handler = &proc_dohostid,
         },
-        {
-                CTL_NAME    (CTL_HW_SERIAL)
-                .procname = "hw_serial",
-                .data     = hw_serial,
-                .maxlen   = sizeof(hw_serial),
-                .mode     = 0444,
-                .proc_handler = &proc_dostring,
-        },
 #ifndef HAVE_KALLSYMS_LOOKUP_NAME
         {
                 CTL_NAME    (CTL_KALLSYMS)
index 3605a0f3b8ddd14647906836328100adc4b385ea..0cb2ceeaf15f0818b36ca520e4a75c90318d5300 100644 (file)
 
 #define SS_DEBUG_SUBSYS SS_TASKQ
 
+int spl_taskq_thread_bind = 0;
+module_param(spl_taskq_thread_bind, int, 0644);
+MODULE_PARM_DESC(spl_taskq_thread_bind, "Bind taskq thread to CPU by default");
+
 /* Global system-wide dynamic task queue available for all consumers */
 taskq_t *system_taskq;
 EXPORT_SYMBOL(system_taskq);
@@ -781,6 +785,7 @@ taskq_t *
 taskq_create(const char *name, int nthreads, pri_t pri,
     int minalloc, int maxalloc, uint_t flags)
 {
+       static int last_used_cpu = 0;
        taskq_t *tq;
        taskq_thread_t *tqt;
        int rc = 0, i, j = 0;
@@ -839,10 +844,14 @@ taskq_create(const char *name, int nthreads, pri_t pri,
                tqt->tqt_tq = tq;
                tqt->tqt_id = 0;
 
-               tqt->tqt_thread = kthread_create(taskq_thread, tqt,
+               tqt->tqt_thread = spl_kthread_create(taskq_thread, tqt,
                    "%s/%d", name, i);
                if (tqt->tqt_thread) {
                        list_add(&tqt->tqt_thread_list, &tq->tq_thread_list);
+                       if (spl_taskq_thread_bind) {
+                               last_used_cpu = (last_used_cpu + 1) % num_online_cpus();
+                               kthread_bind(tqt->tqt_thread, last_used_cpu);
+                       }
                        set_user_nice(tqt->tqt_thread, PRIO_TO_NICE(pri));
                        wake_up_process(tqt->tqt_thread);
                        j++;
index 6b3bec509372f086ca1482304e9d60eb04fcf4a9..5c851405177eea7779efdcb41b794501334a7510 100644 (file)
@@ -126,7 +126,7 @@ __thread_create(caddr_t stk, size_t  stksize, thread_func_t func,
        tp->tp_state = state;
        tp->tp_pri   = pri;
 
-       tsk = kthread_create(thread_generic_wrapper, (void *)tp,
+       tsk = spl_kthread_create(thread_generic_wrapper, (void *)tp,
                             "%s", tp->tp_name);
        if (IS_ERR(tsk)) {
                SERROR("Failed to create thread: %ld\n", PTR_ERR(tsk));
@@ -137,3 +137,34 @@ __thread_create(caddr_t stk, size_t  stksize, thread_func_t func,
        SRETURN((kthread_t *)tsk);
 }
 EXPORT_SYMBOL(__thread_create);
+
+/*
+ * spl_kthread_create - Wrapper providing pre-3.13 semantics for
+ * kthread_create() in which it is not killable and less likely
+ * to return -ENOMEM.
+ */
+struct task_struct *
+spl_kthread_create(int (*func)(void *), void *data, const char namefmt[], ...)
+{
+       struct task_struct *tsk;
+       va_list args;
+       char name[TASK_COMM_LEN];
+
+       va_start(args, namefmt);
+       vsnprintf(name, sizeof(name), namefmt, args);
+       va_end(args);
+       do {
+               tsk = kthread_create(func, data, "%s", name);
+               if (IS_ERR(tsk)) {
+                       if (signal_pending(current)) {
+                               clear_thread_flag(TIF_SIGPENDING);
+                               continue;
+                       }
+                       if (PTR_ERR(tsk) == -ENOMEM)
+                               continue;
+                       return (NULL);
+               } else
+                       return (tsk);
+       } while (1);
+}
+EXPORT_SYMBOL(spl_kthread_create);
index 549606770fa140b0bd534ed365e1a1e7cb096a43..fa3e49054fd301b312438ffee743398841cb2ed5 100644 (file)
@@ -414,13 +414,16 @@ vn_rename(const char *oldname, const char *newname, int x1)
                        SGOTO(exit4, rc);
        }
 
-#ifdef HAVE_4ARGS_VFS_RENAME
+#if defined(HAVE_4ARGS_VFS_RENAME)
        rc = vfs_rename(old_dir->d_inode, old_dentry,
            new_dir->d_inode, new_dentry);
-#else
+#elif defined(HAVE_5ARGS_VFS_RENAME)
        rc = vfs_rename(old_dir->d_inode, old_dentry,
            new_dir->d_inode, new_dentry, NULL);
-#endif /* HAVE_4ARGS_VFS_RENAME */
+#else
+       rc = vfs_rename(old_dir->d_inode, old_dentry,
+           new_dir->d_inode, new_dentry, NULL, 0);
+#endif
 exit4:
        unlock_rename(new_dir, old_dir);
 exit3:
@@ -574,13 +577,16 @@ vn_rename(const char *oldname, const char *newname, int x1)
         if (new_dentry == trap)
                 SGOTO(exit5, rc);
 
-#ifdef HAVE_4ARGS_VFS_RENAME
+#if defined(HAVE_4ARGS_VFS_RENAME)
        rc = vfs_rename(old_dir->d_inode, old_dentry,
            new_dir->d_inode, new_dentry);
-#else
+#elif defined(HAVE_5ARGS_VFS_RENAME)
        rc = vfs_rename(old_dir->d_inode, old_dentry,
            new_dir->d_inode, new_dentry, NULL);
-#endif /* HAVE_4ARGS_VFS_RENAME */
+#else
+       rc = vfs_rename(old_dir->d_inode, old_dentry,
+           new_dir->d_inode, new_dentry, NULL, 0);
+#endif
 exit5:
         dput(new_dentry);
 exit4:
index 1ddde39bbf3f70153af93d61acdb78f2c203905d..3ee2ffc9e7a7b590ecc7288519f0e8b6b1cca7f9 100644 (file)
@@ -108,7 +108,7 @@ splat_condvar_test1(struct file *file, void *arg)
                ct[i].ct_cvp = &cv;
                ct[i].ct_name = SPLAT_CONDVAR_TEST1_NAME;
                ct[i].ct_rc = 0;
-               ct[i].ct_thread = kthread_create(splat_condvar_test12_thread,
+               ct[i].ct_thread = spl_kthread_create(splat_condvar_test12_thread,
                    &ct[i], "%s/%d", SPLAT_CONDVAR_TEST_NAME, i);
 
                if (!IS_ERR(ct[i].ct_thread)) {
@@ -173,7 +173,7 @@ splat_condvar_test2(struct file *file, void *arg)
                ct[i].ct_cvp = &cv;
                ct[i].ct_name = SPLAT_CONDVAR_TEST2_NAME;
                ct[i].ct_rc = 0;
-               ct[i].ct_thread = kthread_create(splat_condvar_test12_thread,
+               ct[i].ct_thread = spl_kthread_create(splat_condvar_test12_thread,
                    &ct[i], "%s/%d", SPLAT_CONDVAR_TEST_NAME, i);
 
                if (!IS_ERR(ct[i].ct_thread)) {
@@ -254,7 +254,7 @@ splat_condvar_test3(struct file *file, void *arg)
                ct[i].ct_cvp = &cv;
                ct[i].ct_name = SPLAT_CONDVAR_TEST3_NAME;
                ct[i].ct_rc = 0;
-               ct[i].ct_thread = kthread_create(splat_condvar_test34_thread,
+               ct[i].ct_thread = spl_kthread_create(splat_condvar_test34_thread,
                    &ct[i], "%s/%d", SPLAT_CONDVAR_TEST_NAME, i);
 
                if (!IS_ERR(ct[i].ct_thread)) {
@@ -324,7 +324,7 @@ splat_condvar_test4(struct file *file, void *arg)
                ct[i].ct_cvp = &cv;
                ct[i].ct_name = SPLAT_CONDVAR_TEST3_NAME;
                ct[i].ct_rc = 0;
-               ct[i].ct_thread = kthread_create(splat_condvar_test34_thread,
+               ct[i].ct_thread = spl_kthread_create(splat_condvar_test34_thread,
                    &ct[i], "%s/%d", SPLAT_CONDVAR_TEST_NAME, i);
 
                if (!IS_ERR(ct[i].ct_thread)) {
index 47dfa02f6b8cd51620b0e444745bf5249bccf5dd..fadf9bca0c7c724b176576d38572b31d757adaa9 100644 (file)
@@ -25,6 +25,7 @@
 \*****************************************************************************/
 
 #include <sys/cred.h>
+#include <sys/random.h>
 #include "splat-internal.h"
 
 #define SPLAT_CRED_NAME                        "cred"
@@ -166,42 +167,89 @@ splat_cred_test2(struct file *file, void *arg)
 } /* splat_cred_test2() */
 
 /*
- * On most/all systems it can be expected that a task with root
- * permissions also is a member of the root group,  Since the
- * test suite is always run as root we check first that CRED() is
- * a member of the root group, and secondly that it is not a member
- * of our fake group.  This test will break is someone happens to
- * create group number NGROUPS_MAX-1 and then added root to it.
+ * Verify the groupmember() works correctly by constructing an interesting
+ * CRED() and checking that the expected gids are part of it.
  */
 static int
 splat_cred_test3(struct file *file, void *arg)
 {
-       gid_t root_gid, fake_gid;
-       int rc;
+       gid_t known_gid, missing_gid, tmp_gid;
+       unsigned char rnd;
+       struct group_info *gi;
+       int i, rc;
+
+       get_random_bytes((void *)&rnd, 1);
+       known_gid = (rnd > 0) ? rnd : 1;
+       missing_gid = 0;
+
+       /*
+        * Create an interesting known set of gids for test purposes. The
+        * gids are pseudo randomly selected are will be in the range of
+        * 1:(NGROUPS_MAX-1).  Gid 0 is explicitly avoided so we can reliably
+        * test for its absence in the test cases.
+        */
+       gi = groups_alloc(NGROUPS_SMALL);
+       if (gi == NULL) {
+               splat_vprint(file, SPLAT_CRED_TEST3_NAME, "Failed create "
+                   "group_info for known gids: %d\n", -ENOMEM);
+               rc = -ENOMEM;
+               goto show_groups;
+       }
+
+       for (i = 0, tmp_gid = known_gid; i < NGROUPS_SMALL; i++) {
+               splat_vprint(file, SPLAT_CRED_TEST3_NAME, "Adding gid %d "
+                   "to current CRED() (%d/%d)\n", tmp_gid, i, gi->ngroups);
+#ifdef HAVE_KUIDGID_T
+               GROUP_AT(gi, i) = make_kgid(current_user_ns(), tmp_gid);
+#else
+               GROUP_AT(gi, i) = tmp_gid;
+#endif /* HAVE_KUIDGID_T */
+               tmp_gid = ((tmp_gid * 17) % (NGROUPS_MAX - 1)) + 1;
+       }
 
-       root_gid = 0;
-       fake_gid = NGROUPS_MAX-1;
+       /* Set the new groups in the CRED() and release our reference. */
+       rc = set_current_groups(gi);
+       put_group_info(gi);
 
-       rc = groupmember(root_gid, CRED());
+       if (rc) {
+               splat_vprint(file, SPLAT_CRED_TEST3_NAME, "Failed to add "
+                   "gid %d to current group: %d\n", known_gid, rc);
+               goto show_groups;
+       }
+
+       /* Verify groupmember() finds the known_gid in the CRED() */
+       rc = groupmember(known_gid, CRED());
        if (!rc) {
-               splat_vprint(file, SPLAT_CRED_TEST3_NAME,
-                            "Failed root git %d expected to be member "
-                            "of CRED() groups: %d\n", root_gid, rc);
-               return -EIDRM;
+               splat_vprint(file, SPLAT_CRED_TEST3_NAME, "Failed to find "
+                   "known gid %d in CRED()'s groups.\n", known_gid);
+               rc = -EIDRM;
+               goto show_groups;
        }
 
-       rc = groupmember(fake_gid, CRED());
+       /* Verify groupmember() does NOT finds the missing gid in the CRED() */
+       rc = groupmember(missing_gid, CRED());
        if (rc) {
-               splat_vprint(file, SPLAT_CRED_TEST3_NAME,
-                            "Failed fake git %d expected not to be member "
-                            "of CRED() groups: %d\n", fake_gid, rc);
-               return -EIDRM;
+               splat_vprint(file, SPLAT_CRED_TEST3_NAME, "Failed missing "
+                   "gid %d was found in CRED()'s groups.\n", missing_gid);
+               rc = -EIDRM;
+               goto show_groups;
+       }
+
+       splat_vprint(file, SPLAT_CRED_TEST3_NAME, "Success groupmember() "
+           "correctly detects expected gids in CRED(): %d\n", rc);
+
+show_groups:
+       if (rc) {
+               int i, grps = crgetngroups(CRED());
+
+               splat_vprint(file, SPLAT_CRED_TEST3_NAME, "%d groups: ", grps);
+               for (i = 0; i < grps; i++)
+                       splat_print(file, "%d ", crgetgroups(CRED())[i]);
+               splat_print(file, "%s", "\n");
        }
 
-       splat_vprint(file, SPLAT_CRED_TEST3_NAME, "Success root gid "
-                    "is a member of the expected groups: %d\n", rc);
 
-       return rc;
+       return (rc);
 } /* splat_cred_test3() */
 
 splat_subsystem_t *
index 28ff837215c9ad320b7297e4d6749cc4268fbb88..4d060c138bb0b276b2445770360d75ed07cf87c0 100644 (file)
@@ -244,7 +244,7 @@ splat_kmem_test4(struct file *file, void *arg)
 #define SPLAT_KMEM_TEST_MAGIC          0x004488CCUL
 #define SPLAT_KMEM_CACHE_NAME          "kmem_test"
 #define SPLAT_KMEM_OBJ_COUNT           1024
-#define SPLAT_KMEM_OBJ_RECLAIM         1000 /* objects */
+#define SPLAT_KMEM_OBJ_RECLAIM         32 /* objects */
 #define SPLAT_KMEM_THREADS             32
 
 #define KCP_FLAG_READY                 0x01
@@ -394,18 +394,25 @@ splat_kmem_cache_test_debug(struct file *file, char *name,
 {
        int j;
 
-       splat_vprint(file, name,
-                    "%s cache objects %d, slabs %u/%u objs %u/%u mags ",
-                    kcp->kcp_cache->skc_name, kcp->kcp_count,
+       splat_vprint(file, name, "%s cache objects %d",
+            kcp->kcp_cache->skc_name, kcp->kcp_count);
+
+       if (kcp->kcp_cache->skc_flags & (KMC_KMEM | KMC_VMEM)) {
+               splat_vprint(file, name, ", slabs %u/%u objs %u/%u",
                     (unsigned)kcp->kcp_cache->skc_slab_alloc,
                     (unsigned)kcp->kcp_cache->skc_slab_total,
                     (unsigned)kcp->kcp_cache->skc_obj_alloc,
                     (unsigned)kcp->kcp_cache->skc_obj_total);
 
-       for_each_online_cpu(j)
-               splat_print(file, "%u/%u ",
-                            kcp->kcp_cache->skc_mag[j]->skm_avail,
-                            kcp->kcp_cache->skc_mag[j]->skm_size);
+               if (!(kcp->kcp_cache->skc_flags & KMC_NOMAGAZINE)) {
+                       splat_vprint(file, name, "%s", "mags");
+
+                       for_each_online_cpu(j)
+                               splat_print(file, "%u/%u ",
+                                    kcp->kcp_cache->skc_mag[j]->skm_avail,
+                                    kcp->kcp_cache->skc_mag[j]->skm_size);
+               }
+       }
 
        splat_print(file, "%s\n", "");
 }
@@ -895,18 +902,19 @@ splat_kmem_test8(struct file *file, void *arg)
                goto out_kct;
        }
 
-       for (i = 0; i < 60; i++) {
+       /* Force reclaim every 1/10 a second for 60 seconds. */
+       for (i = 0; i < 600; i++) {
                kmem_cache_reap_now(kcp->kcp_cache);
                splat_kmem_cache_test_debug(file, SPLAT_KMEM_TEST8_NAME, kcp);
 
-               if (kcp->kcp_cache->skc_obj_total == 0)
+               if (kcp->kcp_count == 0)
                        break;
 
                set_current_state(TASK_INTERRUPTIBLE);
-               schedule_timeout(HZ);
+               schedule_timeout(HZ / 10);
        }
 
-       if (kcp->kcp_cache->skc_obj_total == 0) {
+       if (kcp->kcp_count == 0) {
                splat_vprint(file, SPLAT_KMEM_TEST8_NAME,
                        "Successfully created %d objects "
                        "in cache %s and reclaimed them\n",
@@ -914,7 +922,7 @@ splat_kmem_test8(struct file *file, void *arg)
        } else {
                splat_vprint(file, SPLAT_KMEM_TEST8_NAME,
                        "Failed to reclaim %u/%d objects from cache %s\n",
-                       (unsigned)kcp->kcp_cache->skc_obj_total,
+                       (unsigned)kcp->kcp_count,
                        SPLAT_KMEM_OBJ_COUNT, SPLAT_KMEM_CACHE_NAME);
                rc = -ENOMEM;
        }
@@ -994,14 +1002,14 @@ splat_kmem_test9(struct file *file, void *arg)
        for (i = 0; i < 60; i++) {
                splat_kmem_cache_test_debug(file, SPLAT_KMEM_TEST9_NAME, kcp);
 
-               if (kcp->kcp_cache->skc_obj_total == 0)
+               if (kcp->kcp_count == 0)
                        break;
 
                set_current_state(TASK_INTERRUPTIBLE);
                schedule_timeout(HZ);
        }
 
-       if (kcp->kcp_cache->skc_obj_total == 0) {
+       if (kcp->kcp_count == 0) {
                splat_vprint(file, SPLAT_KMEM_TEST9_NAME,
                        "Successfully created %d objects "
                        "in cache %s and reclaimed them\n",
@@ -1009,7 +1017,7 @@ splat_kmem_test9(struct file *file, void *arg)
        } else {
                splat_vprint(file, SPLAT_KMEM_TEST9_NAME,
                        "Failed to reclaim %u/%d objects from cache %s\n",
-                       (unsigned)kcp->kcp_cache->skc_obj_total, count,
+                       (unsigned)kcp->kcp_count, count,
                        SPLAT_KMEM_CACHE_NAME);
                rc = -ENOMEM;
        }
@@ -1271,7 +1279,7 @@ splat_kmem_test13(struct file *file, void *arg)
                        break;
                }
 
-               dp = (dummy_page_t *)__get_free_page(GFP_KERNEL | __GFP_NORETRY);
+               dp = (dummy_page_t *)__get_free_page(GFP_KERNEL);
                if (!dp) {
                        fails++;
                        splat_vprint(file, SPLAT_KMEM_TEST13_NAME,
index a865fb3108c8e47cbfb3369719abea499cc9bee1..6faf7d24e227f4aa1532d56c19d924e779f98f35 100644 (file)
@@ -215,10 +215,10 @@ splat_rwlock_test1(struct file *file, void *arg)
 
                /* The first thread will be the writer */
                if (i == 0)
-                       rwt[i].rwt_thread = kthread_create(splat_rwlock_wr_thr,
+                       rwt[i].rwt_thread = spl_kthread_create(splat_rwlock_wr_thr,
                            &rwt[i], "%s/%d", SPLAT_RWLOCK_TEST_NAME, i);
                else
-                       rwt[i].rwt_thread = kthread_create(splat_rwlock_rd_thr,
+                       rwt[i].rwt_thread = spl_kthread_create(splat_rwlock_rd_thr,
                            &rwt[i], "%s/%d", SPLAT_RWLOCK_TEST_NAME, i);
 
                if (!IS_ERR(rwt[i].rwt_thread)) {
index e4793d4578dcc074ee32798f3fa218ac816c9486..074af895b2f031b706fe14ea9b44220046683bf1 100644 (file)
@@ -82,7 +82,7 @@ typedef struct splat_taskq_arg {
        atomic_t *count;
        int order[SPLAT_TASKQ_ORDER_MAX];
        unsigned int depth;
-       unsigned long expire;
+       clock_t expire;
        taskq_t *tq;
        taskq_ent_t *tqe;
        spinlock_t lock;
@@ -1140,7 +1140,7 @@ splat_taskq_test9_func(void *arg)
        splat_taskq_arg_t *tq_arg = (splat_taskq_arg_t *)arg;
        ASSERT(tq_arg);
 
-       if (ddi_get_lbolt() >= tq_arg->expire)
+       if (ddi_time_after_eq(ddi_get_lbolt(), tq_arg->expire))
                atomic_inc(tq_arg->count);
 
        kmem_free(tq_arg, sizeof(splat_taskq_arg_t));
@@ -1228,7 +1228,7 @@ splat_taskq_test10_func(void *arg)
        splat_taskq_arg_t *tq_arg = (splat_taskq_arg_t *)arg;
        uint8_t rnd;
 
-       if (ddi_get_lbolt() >= tq_arg->expire)
+       if (ddi_time_after_eq(ddi_get_lbolt(), tq_arg->expire))
                atomic_inc(tq_arg->count);
 
        /* Randomly sleep to further perturb the system */
@@ -1249,7 +1249,7 @@ splat_taskq_test10(struct file *file, void *arg)
        int canceled = 0;
        int completed = 0;
        int blocked = 0;
-       unsigned long start, cancel;
+       clock_t start, cancel;
 
        tqas = vmalloc(sizeof(*tqas) * nr_tasks);
        if (tqas == NULL)
@@ -1327,7 +1327,7 @@ splat_taskq_test10(struct file *file, void *arg)
        start = ddi_get_lbolt();
        i = 0;
 
-       while (ddi_get_lbolt() < start + 5 * HZ) {
+       while (ddi_time_before(ddi_get_lbolt(), start + 5 * HZ)) {
                taskqid_t id;
                uint32_t rnd;
 
index d0a649036d21ecc663d854209eb8b77e45557a25..6d8e058cb01acb3244c0700baf05bed2df683bfd 100644 (file)
@@ -1,3 +1,5 @@
+%{?!packager: %define packager Brian Behlendorf <behlendorf1@llnl.gov>}
+
 %define module  @PACKAGE@
 %define mkconf  scripts/dkms.mkconf
 
@@ -60,11 +62,16 @@ echo -e "support or upgrade DKMS to a more current version."
 exit 1
 
 %preun
-dkms remove -m %{module} -v %{version} --all --rpm_safe_upgrade
+# Only remove the modules if they are for this %{version}-%{release}.  A
+# package upgrade can replace them if only the %{release} is changed.
+RELEASE="/var/lib/dkms/%{module}/%{version}/build/%{module}.release"
+if [ -f $RELEASE ] && [ `cat $RELEASE`%{?dist} = "%{version}-%{release}" ]; then
+    echo -e
+    echo -e "Uninstall of %{module} module (version %{version}) beginning:"
+    dkms remove -m %{module} -v %{version} --all --rpm_safe_upgrade
+fi
 exit 0
 
 %changelog
-* Wed Aug 21 2013 Brian Behlendorf <behlendorf1@llnl.gov> - 0.6.2-1
-- Released 0.6.2-1
-* Fri Mar 22 2013 Brian Behlendorf <behlendorf1@llnl.gov> - 0.6.1-1
-- First official stable release.
+* %(date "+%a %b %d %Y") %packager %{version}-%{release}
+- Automatic build by DKMS
index 50947c0355be1878d864636afde5558edf9fc994..490734667e2d232311f02d0aa13999f0902a1075 100644 (file)
@@ -160,6 +160,8 @@ chmod u+x ${RPM_BUILD_ROOT}%{kmodinstdir_prefix}/*/extra/*/*/*
 rm -rf $RPM_BUILD_ROOT
 
 %changelog
+* Thu Jun 12 2014 Brian Behlendorf <behlendorf1@llnl.gov> - 0.6.3-1
+- Released 0.6.3-1
 * Wed Aug 21 2013 Brian Behlendorf <behlendorf1@llnl.gov> - 0.6.2-1
 - Released 0.6.2-1
 * Fri Mar 22 2013 Brian Behlendorf <behlendorf1@llnl.gov> - 0.6.1-1
index a0fe298173ce97d777ffefc36307204d98d5d95f..7934787c5443d27dfc3a6150c9af72ee350d0f3f 100644 (file)
@@ -38,6 +38,8 @@ make install DESTDIR=%{?buildroot}
 %{_mandir}/man5/*
 
 %changelog
+* Thu Jun 12 2014 Brian Behlendorf <behlendorf1@llnl.gov> - 0.6.3-1
+- Released 0.6.3-1
 * Wed Aug 21 2013 Brian Behlendorf <behlendorf1@llnl.gov> - 0.6.2-1
 - Released 0.6.2-1
 * Fri Mar 22 2013 Brian Behlendorf <behlendorf1@llnl.gov> - 0.6.1-1
index 852ade021d3c8a509deb929e6ca8872875e128f9..ce3f042947a5f14acc4ced6c3cfbda7881cb0155 100644 (file)
@@ -64,7 +64,7 @@ print_akmodtemplate ()
        cat <<EOF
 
 %global akmod_install mkdir -p \$RPM_BUILD_ROOT/%{_usrsrc}/akmods/; \\\
-rpmbuild --define "_sourcedir %{_sourcedir}" \\\
+LANG=C rpmbuild --define "_sourcedir %{_sourcedir}" \\\
 --define "_srcrpmdir \$RPM_BUILD_ROOT/%{_usrsrc}/akmods/" \\\
 -bs --nodeps %{_specdir}/%{name}.spec ; \\\
 ln -s \$(ls \$RPM_BUILD_ROOT/%{_usrsrc}/akmods/) \$RPM_BUILD_ROOT/%{_usrsrc}/akmods/${kmodname}-kmod.latest