* ==========================================================================
*/
+#ifdef ZFS_DEBUG
+static const ulong_t zio_buf_canary = (ulong_t)0xdeadc0dedead210b;
+#endif
+
+/*
+ * Use empty space after the buffer to detect overflows.
+ *
+ * Since zio_init() creates kmem caches only for certain set of buffer sizes,
+ * allocations of different sizes may have some unused space after the data.
+ * Filling part of that space with a known pattern on allocation and checking
+ * it on free should allow us to detect some buffer overflows.
+ */
+static void
+zio_buf_put_canary(ulong_t *p, size_t size, kmem_cache_t **cache, size_t c)
+{
+#ifdef ZFS_DEBUG
+ size_t off = P2ROUNDUP(size, sizeof (ulong_t));
+ ulong_t *canary = p + off / sizeof (ulong_t);
+ size_t asize = (c + 1) << SPA_MINBLOCKSHIFT;
+ if (c + 1 < SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT &&
+ cache[c] == cache[c + 1])
+ asize = (c + 2) << SPA_MINBLOCKSHIFT;
+ for (; off < asize; canary++, off += sizeof (ulong_t))
+ *canary = zio_buf_canary;
+#endif
+}
+
+static void
+zio_buf_check_canary(ulong_t *p, size_t size, kmem_cache_t **cache, size_t c)
+{
+#ifdef ZFS_DEBUG
+ size_t off = P2ROUNDUP(size, sizeof (ulong_t));
+ ulong_t *canary = p + off / sizeof (ulong_t);
+ size_t asize = (c + 1) << SPA_MINBLOCKSHIFT;
+ if (c + 1 < SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT &&
+ cache[c] == cache[c + 1])
+ asize = (c + 2) << SPA_MINBLOCKSHIFT;
+ for (; off < asize; canary++, off += sizeof (ulong_t)) {
+ if (unlikely(*canary != zio_buf_canary)) {
+ PANIC("ZIO buffer overflow %p (%zu) + %zu %#lx != %#lx",
+ p, size, (canary - p) * sizeof (ulong_t),
+ *canary, zio_buf_canary);
+ }
+ }
+#endif
+}
+
/*
* Use zio_buf_alloc to allocate ZFS metadata. This data will appear in a
* crashdump if the kernel panics, so use it judiciously. Obviously, it's
atomic_add_64(&zio_buf_cache_allocs[c], 1);
#endif
- return (kmem_cache_alloc(zio_buf_cache[c], KM_PUSHPAGE));
+ void *p = kmem_cache_alloc(zio_buf_cache[c], KM_PUSHPAGE);
+ zio_buf_put_canary(p, size, zio_buf_cache, c);
+ return (p);
}
/*
VERIFY3U(c, <, SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT);
- return (kmem_cache_alloc(zio_data_buf_cache[c], KM_PUSHPAGE));
+ void *p = kmem_cache_alloc(zio_data_buf_cache[c], KM_PUSHPAGE);
+ zio_buf_put_canary(p, size, zio_data_buf_cache, c);
+ return (p);
}
void
atomic_add_64(&zio_buf_cache_frees[c], 1);
#endif
+ zio_buf_check_canary(buf, size, zio_buf_cache, c);
kmem_cache_free(zio_buf_cache[c], buf);
}
VERIFY3U(c, <, SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT);
+ zio_buf_check_canary(buf, size, zio_data_buf_cache, c);
kmem_cache_free(zio_data_buf_cache[c], buf);
}