+/*
+ * Copyright (C) 2007-2010 Lawrence Livermore National Security, LLC.
+ * Copyright (C) 2007 The Regents of the University of California.
+ * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
+ * Written by Brian Behlendorf <behlendorf1@llnl.gov>.
+ * UCRL-CODE-235197
+ *
+ * This file is part of the SPL, Solaris Porting Layer.
+ * For details, see <http://zfsonlinux.org/>.
+ *
+ * The SPL is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * The SPL is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with the SPL. If not, see <http://www.gnu.org/licenses/>.
+ */
+
#ifndef _SPL_KMEM_H
#define _SPL_KMEM_H
-#ifdef __cplusplus
-extern "C" {
-#endif
+#include <sys/debug.h>
+#include <linux/slab.h>
+#include <linux/sched.h>
-#define DEBUG_KMEM
-#undef DEBUG_KMEM_UNIMPLEMENTED
+extern int kmem_debugging(void);
+extern char *kmem_vasprintf(const char *fmt, va_list ap);
+extern char *kmem_asprintf(const char *fmt, ...);
+extern char *strdup(const char *str);
+extern void strfree(char *str);
-#include <linux/module.h>
-#include <linux/slab.h>
-#include <linux/vmalloc.h>
-#include <linux/mm.h>
-#include <linux/spinlock.h>
/*
* Memory allocation interfaces
*/
-#define KM_SLEEP GFP_KERNEL
-#define KM_NOSLEEP GFP_ATOMIC
-#undef KM_PANIC /* No linux analog */
-#define KM_PUSHPAGE (GFP_KERNEL | GFP_HIGH)
-#define KM_VMFLAGS GFP_LEVEL_MASK
-#define KM_FLAGS __GFP_BITS_MASK
-
-#ifdef DEBUG_KMEM
-extern atomic64_t kmem_alloc_used;
-extern unsigned long kmem_alloc_max;
-extern atomic64_t vmem_alloc_used;
-extern unsigned long vmem_alloc_max;
-extern int kmem_warning_flag;
-
-#define __kmem_alloc(size, flags, allocator) \
-({ void *_ptr_; \
- \
- /* Marked unlikely because we should never be doing this */ \
- if (unlikely((size) > (PAGE_SIZE * 2)) && kmem_warning_flag) \
- printk("Warning: kmem_alloc(%d, 0x%x) large alloc at %s:%d " \
- "(%ld/%ld)\n", (int)(size), (int)(flags), \
- __FILE__, __LINE__, \
- atomic64_read(&kmem_alloc_used), kmem_alloc_max); \
- \
- _ptr_ = (void *)allocator((size), (flags)); \
- if (_ptr_ == NULL) { \
- printk("Warning: kmem_alloc(%d, 0x%x) failed at %s:%d " \
- "(%ld/%ld)\n", (int)(size), (int)(flags), \
- __FILE__, __LINE__, \
- atomic64_read(&kmem_alloc_used), kmem_alloc_max); \
- } else { \
- atomic64_add((size), &kmem_alloc_used); \
- if (unlikely(atomic64_read(&kmem_alloc_used)>kmem_alloc_max)) \
- kmem_alloc_max = atomic64_read(&kmem_alloc_used); \
- } \
- \
- _ptr_; \
-})
-
-#define kmem_alloc(size, flags) __kmem_alloc(size, flags, kmalloc)
-#define kmem_zalloc(size, flags) __kmem_alloc(size, flags, kzalloc)
-
-#define kmem_free(ptr, size) \
-({ \
- BUG_ON(!(ptr) || (size) < 0); \
- atomic64_sub((size), &kmem_alloc_used); \
- memset(ptr, 0x5a, (size)); /* Poison */ \
- kfree(ptr); \
-})
-
-#define __vmem_alloc(size, flags) \
-({ void *_ptr_; \
- \
- BUG_ON(flags != KM_SLEEP); \
- \
- _ptr_ = (void *)vmalloc((size)); \
- if (_ptr_ == NULL) { \
- printk("Warning: vmem_alloc(%d, 0x%x) failed at %s:%d " \
- "(%ld/%ld)\n", (int)(size), (int)(flags), \
- __FILE__, __LINE__, \
- atomic64_read(&vmem_alloc_used), vmem_alloc_max); \
- } else { \
- atomic64_add((size), &vmem_alloc_used); \
- if (unlikely(atomic64_read(&vmem_alloc_used)>vmem_alloc_max)) \
- vmem_alloc_max = atomic64_read(&vmem_alloc_used); \
- } \
- \
- _ptr_; \
-})
-
-#define vmem_alloc(size, flags) __vmem_alloc(size, flags)
-
-#define vmem_free(ptr, size) \
-({ \
- BUG_ON(!(ptr) || (size) < 0); \
- atomic64_sub((size), &vmem_alloc_used); \
- memset(ptr, 0x5a, (size)); /* Poison */ \
- vfree(ptr); \
-})
-
-#else
-
-#define kmem_alloc(size, flags) kmalloc(size, flags)
-#define kmem_zalloc(size, flags) kzalloc(size, flags)
-#define kmem_free(ptr, size) \
-({ \
- BUG_ON(!(ptr) || (size) < 0); \
- kfree(ptr); \
-})
-
-#define vmem_alloc(size, flags) vmalloc(size)
-#define vmem_free(ptr, size) \
-({ \
- BUG_ON(!(ptr) || (size) < 0); \
- vfree(ptr); \
-})
-
-#endif /* DEBUG_KMEM */
-
-
-#ifdef DEBUG_KMEM_UNIMPLEMENTED
-static __inline__ void *
-kmem_alloc_tryhard(size_t size, size_t *alloc_size, int kmflags)
-{
-#error "kmem_alloc_tryhard() not implemented"
-}
-#endif /* DEBUG_KMEM_UNIMPLEMENTED */
+#define KM_SLEEP 0x0000 /* can block for memory; success guaranteed */
+#define KM_NOSLEEP 0x0001 /* cannot block for memory; may fail */
+#define KM_PUSHPAGE 0x0004 /* can block for memory; may use reserve */
+#define KM_ZERO 0x1000 /* zero the allocation */
+#define KM_VMEM 0x2000 /* caller is vmem_* wrapper */
+
+#define KM_PUBLIC_MASK (KM_SLEEP | KM_NOSLEEP | KM_PUSHPAGE)
/*
- * Slab allocation interfaces
+ * Convert a KM_* flags mask to its Linux GFP_* counterpart. The conversion
+ * function is context aware which means that KM_SLEEP allocations can be
+ * safely used in syncing contexts which have set PF_FSTRANS.
*/
-#undef KMC_NOTOUCH /* No linux analog */
-#define KMC_NODEBUG 0x00000000 /* Default behavior */
-#define KMC_NOMAGAZINE /* No linux analog */
-#define KMC_NOHASH /* No linux analog */
-#define KMC_QCACHE /* No linux analog */
+static inline gfp_t
+kmem_flags_convert(int flags)
+{
+ gfp_t lflags = __GFP_NOWARN | __GFP_COMP;
-#define KMC_REAP_CHUNK 256
-#define KMC_DEFAULT_SEEKS DEFAULT_SEEKS
+ if (flags & KM_NOSLEEP) {
+ lflags |= GFP_ATOMIC | __GFP_NORETRY;
+ } else {
+ lflags |= GFP_KERNEL;
+ if ((current->flags & PF_FSTRANS))
+ lflags &= ~(__GFP_IO|__GFP_FS);
+ }
-/* Defined by linux slab.h
- * typedef struct kmem_cache_s kmem_cache_t;
- */
+ if (flags & KM_PUSHPAGE)
+ lflags |= __GFP_HIGH;
-/* No linux analog
- * extern int kmem_ready;
- * extern pgcnt_t kmem_reapahead;
- */
+ if (flags & KM_ZERO)
+ lflags |= __GFP_ZERO;
-#ifdef DEBUG_KMEM_UNIMPLEMENTED
-static __inline__ void kmem_init(void) {
-#error "kmem_init() not implemented"
+ return (lflags);
}
-static __inline__ void kmem_thread_init(void) {
-#error "kmem_thread_init() not implemented"
-}
+typedef struct {
+ struct task_struct *fstrans_thread;
+ unsigned int saved_flags;
+} fstrans_cookie_t;
-static __inline__ void kmem_mp_init(void) {
-#error "kmem_mp_init() not implemented"
-}
+static inline fstrans_cookie_t
+spl_fstrans_mark(void)
+{
+ fstrans_cookie_t cookie;
-static __inline__ void kmem_reap_idspace(void) {
-#error "kmem_reap_idspace() not implemented"
-}
+ cookie.fstrans_thread = current;
+ cookie.saved_flags = current->flags & PF_FSTRANS;
+ current->flags |= PF_FSTRANS;
-static __inline__ size_t kmem_avail(void) {
-#error "kmem_avail() not implemented"
+ return (cookie);
}
-static __inline__ size_t kmem_maxavail(void) {
-#error "kmem_maxavail() not implemented"
-}
+static inline void
+spl_fstrans_unmark(fstrans_cookie_t cookie)
+{
+ ASSERT3P(cookie.fstrans_thread, ==, current);
+ ASSERT(current->flags & PF_FSTRANS);
-static __inline__ uint64_t kmem_cache_stat(kmem_cache_t *cache) {
-#error "kmem_cache_stat() not implemented"
+ current->flags &= ~(PF_FSTRANS);
+ current->flags |= cookie.saved_flags;
}
-#endif /* DEBUG_KMEM_UNIMPLEMENTED */
-/* XXX - Used by arc.c to adjust its memory footprint. We may want
- * to use this hook in the future to adjust behavior based on
- * debug levels. For now it's safe to always return 0.
- */
-static __inline__ int
-kmem_debugging(void)
+static inline int
+spl_fstrans_check(void)
{
- return 0;
+ return (current->flags & PF_FSTRANS);
}
-typedef int (*kmem_constructor_t)(void *, void *, int);
-typedef void (*kmem_destructor_t)(void *, void *);
-typedef void (*kmem_reclaim_t)(void *);
-
-extern int kmem_set_warning(int flag);
-
-extern kmem_cache_t *
-__kmem_cache_create(char *name, size_t size, size_t align,
- kmem_constructor_t constructor,
- kmem_destructor_t destructor,
- kmem_reclaim_t reclaim,
- void *priv, void *vmp, int flags);
-
-int
-extern __kmem_cache_destroy(kmem_cache_t *cache);
-
-void
-extern __kmem_reap(void);
-
-int kmem_init(void);
-void kmem_fini(void);
-
-#define kmem_cache_create(name,size,align,ctor,dtor,rclm,priv,vmp,flags) \
- __kmem_cache_create(name,size,align,ctor,dtor,rclm,priv,vmp,flags)
-#define kmem_cache_destroy(cache) __kmem_cache_destroy(cache)
-#define kmem_cache_alloc(cache, flags) kmem_cache_alloc(cache, flags)
-#define kmem_cache_free(cache, ptr) kmem_cache_free(cache, ptr)
-#define kmem_cache_reap_now(cache) kmem_cache_shrink(cache)
-#define kmem_reap() __kmem_reap()
+#ifdef HAVE_ATOMIC64_T
+#define kmem_alloc_used_add(size) atomic64_add(size, &kmem_alloc_used)
+#define kmem_alloc_used_sub(size) atomic64_sub(size, &kmem_alloc_used)
+#define kmem_alloc_used_read() atomic64_read(&kmem_alloc_used)
+#define kmem_alloc_used_set(size) atomic64_set(&kmem_alloc_used, size)
+extern atomic64_t kmem_alloc_used;
+extern unsigned long long kmem_alloc_max;
+#else /* HAVE_ATOMIC64_T */
+#define kmem_alloc_used_add(size) atomic_add(size, &kmem_alloc_used)
+#define kmem_alloc_used_sub(size) atomic_sub(size, &kmem_alloc_used)
+#define kmem_alloc_used_read() atomic_read(&kmem_alloc_used)
+#define kmem_alloc_used_set(size) atomic_set(&kmem_alloc_used, size)
+extern atomic_t kmem_alloc_used;
+extern unsigned long long kmem_alloc_max;
+#endif /* HAVE_ATOMIC64_T */
+
+extern unsigned int spl_kmem_alloc_warn;
+extern unsigned int spl_kmem_alloc_max;
+
+#define kmem_alloc(sz, fl) spl_kmem_alloc((sz), (fl), __func__, __LINE__)
+#define kmem_zalloc(sz, fl) spl_kmem_zalloc((sz), (fl), __func__, __LINE__)
+#define kmem_free(ptr, sz) spl_kmem_free((ptr), (sz))
+
+extern void *spl_kmem_alloc(size_t sz, int fl, const char *func, int line);
+extern void *spl_kmem_zalloc(size_t sz, int fl, const char *func, int line);
+extern void spl_kmem_free(const void *ptr, size_t sz);
-#ifdef __cplusplus
-}
-#endif
+/*
+ * The following functions are only available for internal use.
+ */
+extern void *spl_kmem_alloc_impl(size_t size, int flags, int node);
+extern void *spl_kmem_alloc_debug(size_t size, int flags, int node);
+extern void *spl_kmem_alloc_track(size_t size, int flags,
+ const char *func, int line, int node);
+extern void spl_kmem_free_impl(const void *buf, size_t size);
+extern void spl_kmem_free_debug(const void *buf, size_t size);
+extern void spl_kmem_free_track(const void *buf, size_t size);
+
+extern int spl_kmem_init(void);
+extern void spl_kmem_fini(void);
#endif /* _SPL_KMEM_H */