/*
* Memory allocation interfaces
*/
-#define KM_SLEEP (GFP_KERNEL | __GFP_NOFAIL)
+#define KM_SLEEP GFP_KERNEL
#define KM_NOSLEEP GFP_ATOMIC
#undef KM_PANIC /* No linux analog */
#define KM_PUSHPAGE (KM_SLEEP | __GFP_HIGH)
# define __GFP_ZERO 0x8000
#endif
+/*
+ * __GFP_NOFAIL looks like it will be removed from the kernel perhaps as
+ * early as 2.6.32. To avoid this issue when it occurs in upstream kernels
+ * we retry the allocation here as long as it is not __GFP_WAIT (GFP_ATOMIC).
+ * I would prefer the caller handle the failure case cleanly but we are
+ * trying to emulate Solaris and those are not the Solaris semantics.
+ */
+static inline void *
+kmalloc_nofail(size_t size, gfp_t flags)
+{
+ void *ptr;
+
+ do {
+ ptr = kmalloc(size, flags);
+ } while (ptr == NULL && (flags & __GFP_WAIT));
+
+ return ptr;
+}
+
+static inline void *
+kzalloc_nofail(size_t size, gfp_t flags)
+{
+ void *ptr;
+
+ do {
+ ptr = kzalloc(size, flags);
+ } while (ptr == NULL && (flags & __GFP_WAIT));
+
+ return ptr;
+}
+
+#ifdef HAVE_KMALLOC_NODE
+static inline void *
+kmalloc_node_nofail(size_t size, gfp_t flags, int node)
+{
+ void *ptr;
+
+ do {
+ ptr = kmalloc_node(size, flags, node);
+ } while (ptr == NULL && (flags & __GFP_WAIT));
+
+ return ptr;
+}
+#endif /* HAVE_KMALLOC_NODE */
+
#ifdef DEBUG_KMEM
extern atomic64_t kmem_alloc_used;
#else /* DEBUG_KMEM */
-# define kmem_alloc(size, flags) kmalloc((size), (flags))
-# define kmem_zalloc(size, flags) kzalloc((size), (flags))
+# define kmem_alloc(size, flags) kmalloc_nofail((size), (flags))
+# define kmem_zalloc(size, flags) kzalloc_nofail((size), (flags))
# define kmem_free(ptr, size) ((void)(size), kfree(ptr))
# ifdef HAVE_KMALLOC_NODE
# define kmem_alloc_node(size, flags, node) \
- kmalloc_node((size), (flags), (node))
+ kmalloc_node_nofail((size), (flags), (node))
# else
# define kmem_alloc_node(size, flags, node) \
- kmalloc((size), (flags))
+ kmalloc_nofail((size), (flags))
# endif
# define vmem_alloc(size, flags) __vmalloc((size), ((flags) | \
unsigned long irq_flags;
ENTRY;
- dptr = (kmem_debug_t *) kmalloc(sizeof(kmem_debug_t),
+ dptr = (kmem_debug_t *) kmalloc_nofail(sizeof(kmem_debug_t),
flags & ~__GFP_ZERO);
if (dptr == NULL) {
/* Use the correct allocator */
if (node_alloc) {
ASSERT(!(flags & __GFP_ZERO));
- ptr = kmalloc_node(size, flags, node);
+ ptr = kmalloc_node_nofail(size, flags, node);
} else if (flags & __GFP_ZERO) {
- ptr = kzalloc(size, flags & ~__GFP_ZERO);
+ ptr = kzalloc_nofail(size, flags & ~__GFP_ZERO);
} else {
- ptr = kmalloc(size, flags);
+ ptr = kmalloc_nofail(size, flags);
}
if (unlikely(ptr == NULL)) {
ASSERT(flags & KM_SLEEP);
- dptr = (kmem_debug_t *) kmalloc(sizeof(kmem_debug_t), flags);
+ dptr = (kmem_debug_t *) kmalloc_nofail(sizeof(kmem_debug_t), flags);
if (dptr == NULL) {
CWARN("vmem_alloc(%ld, 0x%x) debug failed\n",
sizeof(kmem_debug_t), flags);
/* Use the correct allocator */
if (node_alloc) {
ASSERT(!(flags & __GFP_ZERO));
- ptr = kmalloc_node(size, flags, node);
+ ptr = kmalloc_node_nofail(size, flags, node);
} else if (flags & __GFP_ZERO) {
- ptr = kzalloc(size, flags & (~__GFP_ZERO));
+ ptr = kzalloc_nofail(size, flags & (~__GFP_ZERO));
} else {
- ptr = kmalloc(size, flags);
+ ptr = kmalloc_nofail(size, flags);
}
if (ptr == NULL) {
sizeof(void *) * skc->skc_mag_size;
ENTRY;
- skm = kmem_alloc_node(size, GFP_KERNEL | __GFP_NOFAIL, node);
+ skm = kmem_alloc_node(size, KM_SLEEP, node);
if (skm) {
skm->skm_magic = SKM_MAGIC;
skm->skm_avail = 0;