print_trailer(s, page, object);
}
-static void slab_err(struct kmem_cache *s, struct page *page,
+static __printf(3, 4) void slab_err(struct kmem_cache *s, struct page *page,
const char *fmt, ...)
{
va_list args;
kasan_kfree_large(x);
}
-static inline void *slab_free_hook(struct kmem_cache *s, void *x)
+static inline bool slab_free_hook(struct kmem_cache *s, void *x)
{
- void *freeptr;
-
kmemleak_free_recursive(x, s->flags);
/*
if (!(s->flags & SLAB_DEBUG_OBJECTS))
debug_check_no_obj_freed(x, s->object_size);
- freeptr = get_freepointer(s, x);
- /*
- * kasan_slab_free() may put x into memory quarantine, delaying its
- * reuse. In this case the object's freelist pointer is changed.
- */
- kasan_slab_free(s, x);
- return freeptr;
+ /* KASAN might put x into memory quarantine, delaying its reuse */
+ return kasan_slab_free(s, x);
}
-static inline void slab_free_freelist_hook(struct kmem_cache *s,
- void *head, void *tail)
+static inline bool slab_free_freelist_hook(struct kmem_cache *s,
+ void **head, void **tail)
{
/*
* Compiler cannot detect this function can be removed if slab_free_hook()
defined(CONFIG_DEBUG_OBJECTS_FREE) || \
defined(CONFIG_KASAN)
- void *object = head;
- void *tail_obj = tail ? : head;
- void *freeptr;
+ void *object;
+ void *next = *head;
+ void *old_tail = *tail ? *tail : *head;
+
+ /* Head and tail of the reconstructed freelist */
+ *head = NULL;
+ *tail = NULL;
do {
- freeptr = slab_free_hook(s, object);
- } while ((object != tail_obj) && (object = freeptr));
+ object = next;
+ next = get_freepointer(s, object);
+ /* If object's reuse doesn't have to be delayed */
+ if (!slab_free_hook(s, object)) {
+ /* Move object to the new freelist */
+ set_freepointer(s, object, *head);
+ *head = object;
+ if (!*tail)
+ *tail = object;
+ }
+ } while (object != old_tail);
+
+ if (*head == *tail)
+ *tail = NULL;
+
+ return *head != NULL;
+#else
+ return true;
#endif
}
{
struct page *page, *page2;
void *object = NULL;
- int available = 0;
+ unsigned int available = 0;
int objects;
/*
void *head, void *tail, int cnt,
unsigned long addr)
{
- slab_free_freelist_hook(s, head, tail);
/*
- * slab_free_freelist_hook() could have put the items into quarantine.
- * If so, no need to free them.
+ * With KASAN enabled slab_free_freelist_hook modifies the freelist
+ * to remove objects, whose reuse must be delayed.
*/
- if (s->flags & SLAB_KASAN && !(s->flags & SLAB_TYPESAFE_BY_RCU))
- return;
- do_slab_free(s, page, head, tail, cnt, addr);
+ if (slab_free_freelist_hook(s, &head, &tail))
+ do_slab_free(s, page, head, tail, cnt, addr);
}
#ifdef CONFIG_KASAN
if (s->flags & SLAB_CACHE_DMA)
s->allocflags |= GFP_DMA;
+ if (s->flags & SLAB_CACHE_DMA32)
+ s->allocflags |= GFP_DMA32;
+
if (s->flags & SLAB_RECLAIM_ACCOUNT)
s->allocflags |= __GFP_RECLAIMABLE;
static ssize_t cpu_partial_store(struct kmem_cache *s, const char *buf,
size_t length)
{
- unsigned long objects;
+ unsigned int objects;
int err;
- err = kstrtoul(buf, 10, &objects);
+ err = kstrtouint(buf, 10, &objects);
if (err)
return err;
if (objects && !kmem_cache_has_cpu_partial(s))
*/
if (s->flags & SLAB_CACHE_DMA)
*p++ = 'd';
+ if (s->flags & SLAB_CACHE_DMA32)
+ *p++ = 'D';
if (s->flags & SLAB_RECLAIM_ACCOUNT)
*p++ = 'a';
if (s->flags & SLAB_CONSISTENCY_CHECKS)
kset_unregister(s->memcg_kset);
#endif
kobject_uevent(&s->kobj, KOBJ_REMOVE);
- kobject_del(&s->kobj);
out:
kobject_put(&s->kobj);
}
schedule_work(&s->kobj_remove_work);
}
+void sysfs_slab_unlink(struct kmem_cache *s)
+{
+ if (slab_state >= FULL)
+ kobject_del(&s->kobj);
+}
+
void sysfs_slab_release(struct kmem_cache *s)
{
if (slab_state >= FULL)