Merge branch 'for-4.15' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/cgroup
[sfrench/cifs-2.6.git] / include / linux / slub_def.h
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _LINUX_SLUB_DEF_H
3 #define _LINUX_SLUB_DEF_H
4
5 /*
6  * SLUB : A Slab allocator without object queues.
7  *
8  * (C) 2007 SGI, Christoph Lameter
9  */
10 #include <linux/kobject.h>
11
12 enum stat_item {
13         ALLOC_FASTPATH,         /* Allocation from cpu slab */
14         ALLOC_SLOWPATH,         /* Allocation by getting a new cpu slab */
15         FREE_FASTPATH,          /* Free to cpu slab */
16         FREE_SLOWPATH,          /* Freeing not to cpu slab */
17         FREE_FROZEN,            /* Freeing to frozen slab */
18         FREE_ADD_PARTIAL,       /* Freeing moves slab to partial list */
19         FREE_REMOVE_PARTIAL,    /* Freeing removes last object */
20         ALLOC_FROM_PARTIAL,     /* Cpu slab acquired from node partial list */
21         ALLOC_SLAB,             /* Cpu slab acquired from page allocator */
22         ALLOC_REFILL,           /* Refill cpu slab from slab freelist */
23         ALLOC_NODE_MISMATCH,    /* Switching cpu slab */
24         FREE_SLAB,              /* Slab freed to the page allocator */
25         CPUSLAB_FLUSH,          /* Abandoning of the cpu slab */
26         DEACTIVATE_FULL,        /* Cpu slab was full when deactivated */
27         DEACTIVATE_EMPTY,       /* Cpu slab was empty when deactivated */
28         DEACTIVATE_TO_HEAD,     /* Cpu slab was moved to the head of partials */
29         DEACTIVATE_TO_TAIL,     /* Cpu slab was moved to the tail of partials */
30         DEACTIVATE_REMOTE_FREES,/* Slab contained remotely freed objects */
31         DEACTIVATE_BYPASS,      /* Implicit deactivation */
32         ORDER_FALLBACK,         /* Number of times fallback was necessary */
33         CMPXCHG_DOUBLE_CPU_FAIL,/* Failure of this_cpu_cmpxchg_double */
34         CMPXCHG_DOUBLE_FAIL,    /* Number of times that cmpxchg double did not match */
35         CPU_PARTIAL_ALLOC,      /* Used cpu partial on alloc */
36         CPU_PARTIAL_FREE,       /* Refill cpu partial on free */
37         CPU_PARTIAL_NODE,       /* Refill cpu partial from node partial */
38         CPU_PARTIAL_DRAIN,      /* Drain cpu partial to node partial */
39         NR_SLUB_STAT_ITEMS };
40
41 struct kmem_cache_cpu {
42         void **freelist;        /* Pointer to next available object */
43         unsigned long tid;      /* Globally unique transaction id */
44         struct page *page;      /* The slab from which we are allocating */
45 #ifdef CONFIG_SLUB_CPU_PARTIAL
46         struct page *partial;   /* Partially allocated frozen slabs */
47 #endif
48 #ifdef CONFIG_SLUB_STATS
49         unsigned stat[NR_SLUB_STAT_ITEMS];
50 #endif
51 };
52
53 #ifdef CONFIG_SLUB_CPU_PARTIAL
54 #define slub_percpu_partial(c)          ((c)->partial)
55
56 #define slub_set_percpu_partial(c, p)           \
57 ({                                              \
58         slub_percpu_partial(c) = (p)->next;     \
59 })
60
61 #define slub_percpu_partial_read_once(c)     READ_ONCE(slub_percpu_partial(c))
62 #else
63 #define slub_percpu_partial(c)                  NULL
64
65 #define slub_set_percpu_partial(c, p)
66
67 #define slub_percpu_partial_read_once(c)        NULL
68 #endif // CONFIG_SLUB_CPU_PARTIAL
69
70 /*
71  * Word size structure that can be atomically updated or read and that
72  * contains both the order and the number of objects that a slab of the
73  * given order would contain.
74  */
75 struct kmem_cache_order_objects {
76         unsigned long x;
77 };
78
79 /*
80  * Slab cache management.
81  */
82 struct kmem_cache {
83         struct kmem_cache_cpu __percpu *cpu_slab;
84         /* Used for retriving partial slabs etc */
85         unsigned long flags;
86         unsigned long min_partial;
87         int size;               /* The size of an object including meta data */
88         int object_size;        /* The size of an object without meta data */
89         int offset;             /* Free pointer offset. */
90 #ifdef CONFIG_SLUB_CPU_PARTIAL
91         int cpu_partial;        /* Number of per cpu partial objects to keep around */
92 #endif
93         struct kmem_cache_order_objects oo;
94
95         /* Allocation and freeing of slabs */
96         struct kmem_cache_order_objects max;
97         struct kmem_cache_order_objects min;
98         gfp_t allocflags;       /* gfp flags to use on each alloc */
99         int refcount;           /* Refcount for slab cache destroy */
100         void (*ctor)(void *);
101         int inuse;              /* Offset to metadata */
102         int align;              /* Alignment */
103         int reserved;           /* Reserved bytes at the end of slabs */
104         int red_left_pad;       /* Left redzone padding size */
105         const char *name;       /* Name (only for display!) */
106         struct list_head list;  /* List of slab caches */
107 #ifdef CONFIG_SYSFS
108         struct kobject kobj;    /* For sysfs */
109         struct work_struct kobj_remove_work;
110 #endif
111 #ifdef CONFIG_MEMCG
112         struct memcg_cache_params memcg_params;
113         int max_attr_size; /* for propagation, maximum size of a stored attr */
114 #ifdef CONFIG_SYSFS
115         struct kset *memcg_kset;
116 #endif
117 #endif
118
119 #ifdef CONFIG_SLAB_FREELIST_HARDENED
120         unsigned long random;
121 #endif
122
123 #ifdef CONFIG_NUMA
124         /*
125          * Defragmentation by allocating from a remote node.
126          */
127         int remote_node_defrag_ratio;
128 #endif
129
130 #ifdef CONFIG_SLAB_FREELIST_RANDOM
131         unsigned int *random_seq;
132 #endif
133
134 #ifdef CONFIG_KASAN
135         struct kasan_cache kasan_info;
136 #endif
137
138         struct kmem_cache_node *node[MAX_NUMNODES];
139 };
140
141 #ifdef CONFIG_SLUB_CPU_PARTIAL
142 #define slub_cpu_partial(s)             ((s)->cpu_partial)
143 #define slub_set_cpu_partial(s, n)              \
144 ({                                              \
145         slub_cpu_partial(s) = (n);              \
146 })
147 #else
148 #define slub_cpu_partial(s)             (0)
149 #define slub_set_cpu_partial(s, n)
150 #endif // CONFIG_SLUB_CPU_PARTIAL
151
152 #ifdef CONFIG_SYSFS
153 #define SLAB_SUPPORTS_SYSFS
154 void sysfs_slab_release(struct kmem_cache *);
155 #else
156 static inline void sysfs_slab_release(struct kmem_cache *s)
157 {
158 }
159 #endif
160
161 void object_err(struct kmem_cache *s, struct page *page,
162                 u8 *object, char *reason);
163
164 void *fixup_red_left(struct kmem_cache *s, void *p);
165
166 static inline void *nearest_obj(struct kmem_cache *cache, struct page *page,
167                                 void *x) {
168         void *object = x - (x - page_address(page)) % cache->size;
169         void *last_object = page_address(page) +
170                 (page->objects - 1) * cache->size;
171         void *result = (unlikely(object > last_object)) ? last_object : object;
172
173         result = fixup_red_left(cache, result);
174         return result;
175 }
176
177 #endif /* _LINUX_SLUB_DEF_H */