]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - include/linux/slub_def.h
mm: memcontrol: move kmem accounting code to CONFIG_MEMCG
[mirror_ubuntu-artful-kernel.git] / include / linux / slub_def.h
CommitLineData
81819f0f
CL
1#ifndef _LINUX_SLUB_DEF_H
2#define _LINUX_SLUB_DEF_H
3
4/*
5 * SLUB : A Slab allocator without object queues.
6 *
cde53535 7 * (C) 2007 SGI, Christoph Lameter
81819f0f 8 */
81819f0f
CL
9#include <linux/kobject.h>
10
8ff12cfc
CL
11enum stat_item {
12 ALLOC_FASTPATH, /* Allocation from cpu slab */
13 ALLOC_SLOWPATH, /* Allocation by getting a new cpu slab */
a941f836 14 FREE_FASTPATH, /* Free to cpu slab */
8ff12cfc
CL
15 FREE_SLOWPATH, /* Freeing not to cpu slab */
16 FREE_FROZEN, /* Freeing to frozen slab */
17 FREE_ADD_PARTIAL, /* Freeing moves slab to partial list */
18 FREE_REMOVE_PARTIAL, /* Freeing removes last object */
8028dcea 19 ALLOC_FROM_PARTIAL, /* Cpu slab acquired from node partial list */
8ff12cfc
CL
20 ALLOC_SLAB, /* Cpu slab acquired from page allocator */
21 ALLOC_REFILL, /* Refill cpu slab from slab freelist */
e36a2652 22 ALLOC_NODE_MISMATCH, /* Switching cpu slab */
8ff12cfc
CL
23 FREE_SLAB, /* Slab freed to the page allocator */
24 CPUSLAB_FLUSH, /* Abandoning of the cpu slab */
25 DEACTIVATE_FULL, /* Cpu slab was full when deactivated */
26 DEACTIVATE_EMPTY, /* Cpu slab was empty when deactivated */
27 DEACTIVATE_TO_HEAD, /* Cpu slab was moved to the head of partials */
28 DEACTIVATE_TO_TAIL, /* Cpu slab was moved to the tail of partials */
29 DEACTIVATE_REMOTE_FREES,/* Slab contained remotely freed objects */
03e404af 30 DEACTIVATE_BYPASS, /* Implicit deactivation */
65c3376a 31 ORDER_FALLBACK, /* Number of times fallback was necessary */
4fdccdfb 32 CMPXCHG_DOUBLE_CPU_FAIL,/* Failure of this_cpu_cmpxchg_double */
b789ef51 33 CMPXCHG_DOUBLE_FAIL, /* Number of times that cmpxchg double did not match */
49e22585 34 CPU_PARTIAL_ALLOC, /* Used cpu partial on alloc */
8028dcea
AS
35 CPU_PARTIAL_FREE, /* Refill cpu partial on free */
36 CPU_PARTIAL_NODE, /* Refill cpu partial from node partial */
37 CPU_PARTIAL_DRAIN, /* Drain cpu partial to node partial */
8ff12cfc
CL
38 NR_SLUB_STAT_ITEMS };
39
dfb4f096 40struct kmem_cache_cpu {
8a5ec0ba 41 void **freelist; /* Pointer to next available object */
8a5ec0ba 42 unsigned long tid; /* Globally unique transaction id */
da89b79e 43 struct page *page; /* The slab from which we are allocating */
49e22585 44 struct page *partial; /* Partially allocated frozen slabs */
8ff12cfc
CL
45#ifdef CONFIG_SLUB_STATS
46 unsigned stat[NR_SLUB_STAT_ITEMS];
47#endif
4c93c355 48};
dfb4f096 49
834f3d11
CL
50/*
51 * Word size structure that can be atomically updated or read and that
52 * contains both the order and the number of objects that a slab of the
53 * given order would contain.
54 */
55struct kmem_cache_order_objects {
56 unsigned long x;
57};
58
81819f0f
CL
59/*
60 * Slab cache management.
61 */
62struct kmem_cache {
1b5ad248 63 struct kmem_cache_cpu __percpu *cpu_slab;
81819f0f
CL
64 /* Used for retriving partial slabs etc */
65 unsigned long flags;
1a757fe5 66 unsigned long min_partial;
81819f0f 67 int size; /* The size of an object including meta data */
3b0efdfa 68 int object_size; /* The size of an object without meta data */
81819f0f 69 int offset; /* Free pointer offset. */
9f264904 70 int cpu_partial; /* Number of per cpu partial objects to keep around */
834f3d11 71 struct kmem_cache_order_objects oo;
81819f0f 72
81819f0f 73 /* Allocation and freeing of slabs */
205ab99d 74 struct kmem_cache_order_objects max;
65c3376a 75 struct kmem_cache_order_objects min;
b7a49f0d 76 gfp_t allocflags; /* gfp flags to use on each alloc */
81819f0f 77 int refcount; /* Refcount for slab cache destroy */
51cc5068 78 void (*ctor)(void *);
81819f0f
CL
79 int inuse; /* Offset to metadata */
80 int align; /* Alignment */
ab9a0f19 81 int reserved; /* Reserved bytes at the end of slabs */
81819f0f
CL
82 const char *name; /* Name (only for display!) */
83 struct list_head list; /* List of slab caches */
ab4d5ed5 84#ifdef CONFIG_SYSFS
81819f0f 85 struct kobject kobj; /* For sysfs */
0c710013 86#endif
127424c8 87#ifdef CONFIG_MEMCG
f7ce3190 88 struct memcg_cache_params memcg_params;
107dab5c 89 int max_attr_size; /* for propagation, maximum size of a stored attr */
9a41707b
VD
90#ifdef CONFIG_SYSFS
91 struct kset *memcg_kset;
92#endif
ba6c496e 93#endif
81819f0f
CL
94
95#ifdef CONFIG_NUMA
9824601e
CL
96 /*
97 * Defragmentation by allocating from a remote node.
98 */
99 int remote_node_defrag_ratio;
81819f0f 100#endif
7340cc84 101 struct kmem_cache_node *node[MAX_NUMNODES];
81819f0f
CL
102};
103
41a21285
CL
104#ifdef CONFIG_SYSFS
105#define SLAB_SUPPORTS_SYSFS
106void sysfs_slab_remove(struct kmem_cache *);
107#else
108static inline void sysfs_slab_remove(struct kmem_cache *s)
109{
110}
111#endif
112
912f5fbf
AR
113
114/**
115 * virt_to_obj - returns address of the beginning of object.
116 * @s: object's kmem_cache
117 * @slab_page: address of slab page
118 * @x: address within object memory range
119 *
120 * Returns address of the beginning of object
121 */
122static inline void *virt_to_obj(struct kmem_cache *s,
123 const void *slab_page,
124 const void *x)
125{
126 return (void *)x - ((x - slab_page) % s->size);
127}
128
75c66def
AR
129void object_err(struct kmem_cache *s, struct page *page,
130 u8 *object, char *reason);
131
81819f0f 132#endif /* _LINUX_SLUB_DEF_H */