]> git.proxmox.com Git - mirror_zfs-debian.git/commitdiff
Merge branch 'upstream'
authorDarik Horn <dajhorn@vanadac.com>
Sat, 5 Nov 2011 15:27:43 +0000 (10:27 -0500)
committerDarik Horn <dajhorn@vanadac.com>
Sat, 5 Nov 2011 15:27:43 +0000 (10:27 -0500)
include/sys/zfs_context.h
module/zfs/dnode.c
module/zfs/zfs_vnops.c
module/zfs/zio.c
module/zfs/zpl_file.c

index 1027e7215febd1a33a340d0b1165a5315b02a423..a32848941557b40dd0b1b46cdd6bf1ac6286bb57 100644 (file)
@@ -329,6 +329,8 @@ extern void kstat_delete(kstat_t *);
 #define        KM_NOSLEEP              UMEM_DEFAULT
 #define        KM_NODEBUG              0x0
 #define        KMC_NODEBUG             UMC_NODEBUG
+#define        KMC_KMEM                0x0
+#define        KMC_VMEM                0x0
 #define        kmem_alloc(_s, _f)      umem_alloc(_s, _f)
 #define        kmem_zalloc(_s, _f)     umem_zalloc(_s, _f)
 #define        kmem_free(_b, _s)       umem_free(_b, _s)
index 9889c3c36524713b0c8c06b78893a843a838ae05..5438f60d0003587b61bac4b5331f46ec7ae3f891 100644 (file)
@@ -171,9 +171,8 @@ void
 dnode_init(void)
 {
        ASSERT(dnode_cache == NULL);
-       dnode_cache = kmem_cache_create("dnode_t",
-           sizeof (dnode_t),
-           0, dnode_cons, dnode_dest, NULL, NULL, NULL, 0);
+       dnode_cache = kmem_cache_create("dnode_t", sizeof (dnode_t),
+           0, dnode_cons, dnode_dest, NULL, NULL, NULL, KMC_KMEM);
        kmem_cache_set_move(dnode_cache, dnode_move);
 }
 
index 3331a17067885c30e5715b489efe5d095aa692a8..b7f5daaaf00d59326d53da7c05496a6f2f74b465 100644 (file)
@@ -2301,6 +2301,9 @@ zfs_getattr_fast(struct inode *ip, struct kstat *sp)
        znode_t *zp = ITOZ(ip);
        zfs_sb_t *zsb = ITOZSB(ip);
 
+       ZFS_ENTER(zsb);
+       ZFS_VERIFY_ZP(zp);
+
        mutex_enter(&zp->z_lock);
 
        generic_fillattr(ip, sp);
@@ -2316,6 +2319,8 @@ zfs_getattr_fast(struct inode *ip, struct kstat *sp)
 
        mutex_exit(&zp->z_lock);
 
+       ZFS_EXIT(zsb);
+
        return (0);
 }
 EXPORT_SYMBOL(zfs_getattr_fast);
index 0022c64cc5c05e490fa987f70c1d41a1bcc9a2c4..6b03be6f3bbf62e8d7c442e145e8bd5fd8ca90cb 100644 (file)
@@ -108,9 +108,9 @@ zio_init(void)
        data_alloc_arena = zio_alloc_arena;
 #endif
        zio_cache = kmem_cache_create("zio_cache",
-           sizeof (zio_t), 0, NULL, NULL, NULL, NULL, NULL, 0);
+           sizeof (zio_t), 0, NULL, NULL, NULL, NULL, NULL, KMC_KMEM);
        zio_link_cache = kmem_cache_create("zio_link_cache",
-           sizeof (zio_link_t), 0, NULL, NULL, NULL, NULL, NULL, 0);
+           sizeof (zio_link_t), 0, NULL, NULL, NULL, NULL, NULL, KMC_KMEM);
 
        /*
         * For small buffers, we want a cache for each multiple of
@@ -136,17 +136,27 @@ zio_init(void)
 
                if (align != 0) {
                        char name[36];
+                       int flags = zio_bulk_flags;
+
+                       /*
+                        * The smallest buffers (512b) are heavily used and
+                        * experience a lot of churn.  The slabs allocated
+                        * for them are also relatively small (32K).  Thus
+                        * in over to avoid expensive calls to vmalloc() we
+                        * make an exception to the usual slab allocation
+                        * policy and force these buffers to be kmem backed.
+                        */
+                       if (size == (1 << SPA_MINBLOCKSHIFT))
+                               flags |= KMC_KMEM;
+
                        (void) sprintf(name, "zio_buf_%lu", (ulong_t)size);
                        zio_buf_cache[c] = kmem_cache_create(name, size,
-                           align, NULL, NULL, NULL, NULL, NULL,
-                           (size > zio_buf_debug_limit ? KMC_NODEBUG : 0) |
-                           zio_bulk_flags);
+                           align, NULL, NULL, NULL, NULL, NULL, flags);
 
                        (void) sprintf(name, "zio_data_buf_%lu", (ulong_t)size);
                        zio_data_buf_cache[c] = kmem_cache_create(name, size,
-                           align, NULL, NULL, NULL, NULL, data_alloc_arena,
-                           (size > zio_buf_debug_limit ? KMC_NODEBUG : 0) |
-                           zio_bulk_flags);
+                           align, NULL, NULL, NULL, NULL,
+                           data_alloc_arena, flags);
                }
        }
 
index af46afddfbbcf0ffcc255d4b40c2ad4fa2b89424..298c0b62df751dcc7ad13c380ef43b2b28a9d70d 100644 (file)
@@ -324,9 +324,13 @@ zpl_putpage(struct page *pp, struct writeback_control *wbc, void *data)
         * the VM might try to write out additional pages by calling
         * zpl_putpage() again resulting in a deadlock.
         */
-       current->flags |= PF_MEMALLOC;
-       (void) zfs_putpage(mapping->host, pp, wbc);
-       current->flags &= ~PF_MEMALLOC;
+       if (current->flags & PF_MEMALLOC) {
+               (void) zfs_putpage(mapping->host, pp, wbc);
+       } else {
+               current->flags |= PF_MEMALLOC;
+               (void) zfs_putpage(mapping->host, pp, wbc);
+               current->flags &= ~PF_MEMALLOC;
+       }
 
        return (0);
 }