1 /* SPDX-License-Identifier: GPL-2.0 */
5 * Internal slab definitions
8 /* Reuses the bits in struct page */
10 unsigned long __page_flags;
12 #if defined(CONFIG_SLAB)
15 struct list_head slab_list;
16 struct rcu_head rcu_head;
18 struct kmem_cache *slab_cache;
19 void *freelist; /* array of free object indexes */
20 void *s_mem; /* first object */
23 #elif defined(CONFIG_SLUB)
26 struct list_head slab_list;
27 struct rcu_head rcu_head;
28 #ifdef CONFIG_SLUB_CPU_PARTIAL
31 int slabs; /* Nr of slabs left */
35 struct kmem_cache *slab_cache;
36 /* Double-word boundary */
37 void *freelist; /* first free object */
39 unsigned long counters;
46 unsigned int __unused;
48 #elif defined(CONFIG_SLOB)
50 struct list_head slab_list;
52 void *freelist; /* first free block */
54 unsigned int __unused_2;
57 #error "Unexpected slab allocator configured"
60 atomic_t __page_refcount;
62 unsigned long memcg_data;
66 #define SLAB_MATCH(pg, sl) \
67 static_assert(offsetof(struct page, pg) == offsetof(struct slab, sl))
68 SLAB_MATCH(flags, __page_flags);
69 SLAB_MATCH(compound_head, slab_list); /* Ensure bit 0 is clear */
70 SLAB_MATCH(slab_list, slab_list);
72 SLAB_MATCH(rcu_head, rcu_head);
73 SLAB_MATCH(slab_cache, slab_cache);
76 SLAB_MATCH(s_mem, s_mem);
77 SLAB_MATCH(active, active);
79 SLAB_MATCH(_refcount, __page_refcount);
81 SLAB_MATCH(memcg_data, memcg_data);
84 static_assert(sizeof(struct slab) <= sizeof(struct page));
87 * folio_slab - Converts from folio to slab.
90 * Currently struct slab is a different representation of a folio where
91 * folio_test_slab() is true.
93 * Return: The slab which contains this folio.
95 #define folio_slab(folio) (_Generic((folio), \
96 const struct folio *: (const struct slab *)(folio), \
97 struct folio *: (struct slab *)(folio)))
100 * slab_folio - The folio allocated for a slab
103 * Slabs are allocated as folios that contain the individual objects and are
104 * using some fields in the first struct page of the folio - those fields are
105 * now accessed by struct slab. It is occasionally necessary to convert back to
106 * a folio in order to communicate with the rest of the mm. Please use this
107 * helper function instead of casting yourself, as the implementation may change
110 #define slab_folio(s) (_Generic((s), \
111 const struct slab *: (const struct folio *)s, \
112 struct slab *: (struct folio *)s))
115 * page_slab - Converts from first struct page to slab.
116 * @p: The first (either head of compound or single) page of slab.
118 * A temporary wrapper to convert struct page to struct slab in situations where
119 * we know the page is the compound head, or single order-0 page.
121 * Long-term ideally everything would work with struct slab directly or go
122 * through folio to struct slab.
124 * Return: The slab which contains this page
126 #define page_slab(p) (_Generic((p), \
127 const struct page *: (const struct slab *)(p), \
128 struct page *: (struct slab *)(p)))
131 * slab_page - The first struct page allocated for a slab
134 * A convenience wrapper for converting slab to the first struct page of the
135 * underlying folio, to communicate with code not yet converted to folio or
138 #define slab_page(s) folio_page(slab_folio(s), 0)
141 * If network-based swap is enabled, sl*b must keep track of whether pages
142 * were allocated from pfmemalloc reserves.
144 static inline bool slab_test_pfmemalloc(const struct slab *slab)
146 return folio_test_active((struct folio *)slab_folio(slab));
149 static inline void slab_set_pfmemalloc(struct slab *slab)
151 folio_set_active(slab_folio(slab));
154 static inline void slab_clear_pfmemalloc(struct slab *slab)
156 folio_clear_active(slab_folio(slab));
159 static inline void __slab_clear_pfmemalloc(struct slab *slab)
161 __folio_clear_active(slab_folio(slab));
164 static inline void *slab_address(const struct slab *slab)
166 return folio_address(slab_folio(slab));
169 static inline int slab_nid(const struct slab *slab)
171 return folio_nid(slab_folio(slab));
174 static inline pg_data_t *slab_pgdat(const struct slab *slab)
176 return folio_pgdat(slab_folio(slab));
179 static inline struct slab *virt_to_slab(const void *addr)
181 struct folio *folio = virt_to_folio(addr);
183 if (!folio_test_slab(folio))
186 return folio_slab(folio);
189 static inline int slab_order(const struct slab *slab)
191 return folio_order((struct folio *)slab_folio(slab));
194 static inline size_t slab_size(const struct slab *slab)
196 return PAGE_SIZE << slab_order(slab);
201 * Common fields provided in kmem_cache by all slab allocators
202 * This struct is either used directly by the allocator (SLOB)
203 * or the allocator must include definitions for all fields
204 * provided in kmem_cache_common in their definition of kmem_cache.
206 * Once we can do anonymous structs (C11 standard) we could put a
207 * anonymous struct definition in these allocators so that the
208 * separate allocations in the kmem_cache structure of SLAB and
209 * SLUB is no longer needed.
212 unsigned int object_size;/* The original size of the object */
213 unsigned int size; /* The aligned/padded/added on size */
214 unsigned int align; /* Alignment as calculated */
215 slab_flags_t flags; /* Active flags on the slab */
216 unsigned int useroffset;/* Usercopy region offset */
217 unsigned int usersize; /* Usercopy region size */
218 const char *name; /* Slab name for sysfs */
219 int refcount; /* Use counter */
220 void (*ctor)(void *); /* Called on object slot creation */
221 struct list_head list; /* List of all slab caches on the system */
224 #endif /* CONFIG_SLOB */
227 #include <linux/slab_def.h>
231 #include <linux/slub_def.h>
234 #include <linux/memcontrol.h>
235 #include <linux/fault-inject.h>
236 #include <linux/kasan.h>
237 #include <linux/kmemleak.h>
238 #include <linux/random.h>
239 #include <linux/sched/mm.h>
242 * State of the slab allocator.
244 * This is used to describe the states of the allocator during bootup.
245 * Allocators use this to gradually bootstrap themselves. Most allocators
246 * have the problem that the structures used for managing slab caches are
247 * allocated from slab caches themselves.
250 DOWN, /* No slab functionality yet */
251 PARTIAL, /* SLUB: kmem_cache_node available */
252 PARTIAL_NODE, /* SLAB: kmalloc size for node struct available */
253 UP, /* Slab caches usable but not all extras yet */
254 FULL /* Everything is working */
257 extern enum slab_state slab_state;
259 /* The slab cache mutex protects the management structures during changes */
260 extern struct mutex slab_mutex;
262 /* The list of all slab caches on the system */
263 extern struct list_head slab_caches;
265 /* The slab cache that manages slab cache information */
266 extern struct kmem_cache *kmem_cache;
268 /* A table of kmalloc cache names and sizes */
269 extern const struct kmalloc_info_struct {
270 const char *name[NR_KMALLOC_TYPES];
275 /* Kmalloc array related functions */
276 void setup_kmalloc_cache_index_table(void);
277 void create_kmalloc_caches(slab_flags_t);
279 /* Find the kmalloc slab corresponding for a certain size */
280 struct kmem_cache *kmalloc_slab(size_t, gfp_t);
283 gfp_t kmalloc_fix_flags(gfp_t flags);
285 /* Functions provided by the slab allocators */
286 int __kmem_cache_create(struct kmem_cache *, slab_flags_t flags);
288 struct kmem_cache *create_kmalloc_cache(const char *name, unsigned int size,
289 slab_flags_t flags, unsigned int useroffset,
290 unsigned int usersize);
291 extern void create_boot_cache(struct kmem_cache *, const char *name,
292 unsigned int size, slab_flags_t flags,
293 unsigned int useroffset, unsigned int usersize);
295 int slab_unmergeable(struct kmem_cache *s);
296 struct kmem_cache *find_mergeable(unsigned size, unsigned align,
297 slab_flags_t flags, const char *name, void (*ctor)(void *));
300 __kmem_cache_alias(const char *name, unsigned int size, unsigned int align,
301 slab_flags_t flags, void (*ctor)(void *));
303 slab_flags_t kmem_cache_flags(unsigned int object_size,
304 slab_flags_t flags, const char *name);
306 static inline struct kmem_cache *
307 __kmem_cache_alias(const char *name, unsigned int size, unsigned int align,
308 slab_flags_t flags, void (*ctor)(void *))
311 static inline slab_flags_t kmem_cache_flags(unsigned int object_size,
312 slab_flags_t flags, const char *name)
319 /* Legal flag mask for kmem_cache_create(), for various configurations */
320 #define SLAB_CORE_FLAGS (SLAB_HWCACHE_ALIGN | SLAB_CACHE_DMA | \
321 SLAB_CACHE_DMA32 | SLAB_PANIC | \
322 SLAB_TYPESAFE_BY_RCU | SLAB_DEBUG_OBJECTS )
324 #if defined(CONFIG_DEBUG_SLAB)
325 #define SLAB_DEBUG_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER)
326 #elif defined(CONFIG_SLUB_DEBUG)
327 #define SLAB_DEBUG_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER | \
328 SLAB_TRACE | SLAB_CONSISTENCY_CHECKS)
330 #define SLAB_DEBUG_FLAGS (0)
333 #if defined(CONFIG_SLAB)
334 #define SLAB_CACHE_FLAGS (SLAB_MEM_SPREAD | SLAB_NOLEAKTRACE | \
335 SLAB_RECLAIM_ACCOUNT | SLAB_TEMPORARY | \
337 #elif defined(CONFIG_SLUB)
338 #define SLAB_CACHE_FLAGS (SLAB_NOLEAKTRACE | SLAB_RECLAIM_ACCOUNT | \
339 SLAB_TEMPORARY | SLAB_ACCOUNT)
341 #define SLAB_CACHE_FLAGS (SLAB_NOLEAKTRACE)
344 /* Common flags available with current configuration */
345 #define CACHE_CREATE_MASK (SLAB_CORE_FLAGS | SLAB_DEBUG_FLAGS | SLAB_CACHE_FLAGS)
347 /* Common flags permitted for kmem_cache_create */
348 #define SLAB_FLAGS_PERMITTED (SLAB_CORE_FLAGS | \
353 SLAB_CONSISTENCY_CHECKS | \
356 SLAB_RECLAIM_ACCOUNT | \
360 bool __kmem_cache_empty(struct kmem_cache *);
361 int __kmem_cache_shutdown(struct kmem_cache *);
362 void __kmem_cache_release(struct kmem_cache *);
363 int __kmem_cache_shrink(struct kmem_cache *);
364 void slab_kmem_cache_release(struct kmem_cache *);
370 unsigned long active_objs;
371 unsigned long num_objs;
372 unsigned long active_slabs;
373 unsigned long num_slabs;
374 unsigned long shared_avail;
376 unsigned int batchcount;
378 unsigned int objects_per_slab;
379 unsigned int cache_order;
382 void get_slabinfo(struct kmem_cache *s, struct slabinfo *sinfo);
383 void slabinfo_show_stats(struct seq_file *m, struct kmem_cache *s);
384 ssize_t slabinfo_write(struct file *file, const char __user *buffer,
385 size_t count, loff_t *ppos);
388 * Generic implementation of bulk operations
389 * These are useful for situations in which the allocator cannot
390 * perform optimizations. In that case segments of the object listed
391 * may be allocated or freed using these operations.
393 void __kmem_cache_free_bulk(struct kmem_cache *, size_t, void **);
394 int __kmem_cache_alloc_bulk(struct kmem_cache *, gfp_t, size_t, void **);
396 static inline enum node_stat_item cache_vmstat_idx(struct kmem_cache *s)
398 return (s->flags & SLAB_RECLAIM_ACCOUNT) ?
399 NR_SLAB_RECLAIMABLE_B : NR_SLAB_UNRECLAIMABLE_B;
402 #ifdef CONFIG_SLUB_DEBUG
403 #ifdef CONFIG_SLUB_DEBUG_ON
404 DECLARE_STATIC_KEY_TRUE(slub_debug_enabled);
406 DECLARE_STATIC_KEY_FALSE(slub_debug_enabled);
408 extern void print_tracking(struct kmem_cache *s, void *object);
409 long validate_slab_cache(struct kmem_cache *s);
410 static inline bool __slub_debug_enabled(void)
412 return static_branch_unlikely(&slub_debug_enabled);
415 static inline void print_tracking(struct kmem_cache *s, void *object)
418 static inline bool __slub_debug_enabled(void)
425 * Returns true if any of the specified slub_debug flags is enabled for the
426 * cache. Use only for flags parsed by setup_slub_debug() as it also enables
429 static inline bool kmem_cache_debug_flags(struct kmem_cache *s, slab_flags_t flags)
431 if (IS_ENABLED(CONFIG_SLUB_DEBUG))
432 VM_WARN_ON_ONCE(!(flags & SLAB_DEBUG_FLAGS));
433 if (__slub_debug_enabled())
434 return s->flags & flags;
438 #ifdef CONFIG_MEMCG_KMEM
440 * slab_objcgs - get the object cgroups vector associated with a slab
441 * @slab: a pointer to the slab struct
443 * Returns a pointer to the object cgroups vector associated with the slab,
444 * or NULL if no such vector has been associated yet.
446 static inline struct obj_cgroup **slab_objcgs(struct slab *slab)
448 unsigned long memcg_data = READ_ONCE(slab->memcg_data);
450 VM_BUG_ON_PAGE(memcg_data && !(memcg_data & MEMCG_DATA_OBJCGS),
452 VM_BUG_ON_PAGE(memcg_data & MEMCG_DATA_KMEM, slab_page(slab));
454 return (struct obj_cgroup **)(memcg_data & ~MEMCG_DATA_FLAGS_MASK);
457 int memcg_alloc_slab_cgroups(struct slab *slab, struct kmem_cache *s,
458 gfp_t gfp, bool new_slab);
459 void mod_objcg_state(struct obj_cgroup *objcg, struct pglist_data *pgdat,
460 enum node_stat_item idx, int nr);
462 static inline void memcg_free_slab_cgroups(struct slab *slab)
464 kfree(slab_objcgs(slab));
465 slab->memcg_data = 0;
468 static inline size_t obj_full_size(struct kmem_cache *s)
471 * For each accounted object there is an extra space which is used
472 * to store obj_cgroup membership. Charge it too.
474 return s->size + sizeof(struct obj_cgroup *);
478 * Returns false if the allocation should fail.
480 static inline bool memcg_slab_pre_alloc_hook(struct kmem_cache *s,
481 struct obj_cgroup **objcgp,
482 size_t objects, gfp_t flags)
484 struct obj_cgroup *objcg;
486 if (!memcg_kmem_enabled())
489 if (!(flags & __GFP_ACCOUNT) && !(s->flags & SLAB_ACCOUNT))
492 objcg = get_obj_cgroup_from_current();
496 if (obj_cgroup_charge(objcg, flags, objects * obj_full_size(s))) {
497 obj_cgroup_put(objcg);
505 static inline void memcg_slab_post_alloc_hook(struct kmem_cache *s,
506 struct obj_cgroup *objcg,
507 gfp_t flags, size_t size,
514 if (!memcg_kmem_enabled() || !objcg)
517 for (i = 0; i < size; i++) {
519 slab = virt_to_slab(p[i]);
521 if (!slab_objcgs(slab) &&
522 memcg_alloc_slab_cgroups(slab, s, flags,
524 obj_cgroup_uncharge(objcg, obj_full_size(s));
528 off = obj_to_index(s, slab, p[i]);
529 obj_cgroup_get(objcg);
530 slab_objcgs(slab)[off] = objcg;
531 mod_objcg_state(objcg, slab_pgdat(slab),
532 cache_vmstat_idx(s), obj_full_size(s));
534 obj_cgroup_uncharge(objcg, obj_full_size(s));
537 obj_cgroup_put(objcg);
540 static inline void memcg_slab_free_hook(struct kmem_cache *s_orig,
541 void **p, int objects)
543 struct kmem_cache *s;
544 struct obj_cgroup **objcgs;
545 struct obj_cgroup *objcg;
550 if (!memcg_kmem_enabled())
553 for (i = 0; i < objects; i++) {
557 slab = virt_to_slab(p[i]);
558 /* we could be given a kmalloc_large() object, skip those */
562 objcgs = slab_objcgs(slab);
567 s = slab->slab_cache;
571 off = obj_to_index(s, slab, p[i]);
577 obj_cgroup_uncharge(objcg, obj_full_size(s));
578 mod_objcg_state(objcg, slab_pgdat(slab), cache_vmstat_idx(s),
580 obj_cgroup_put(objcg);
584 #else /* CONFIG_MEMCG_KMEM */
585 static inline struct obj_cgroup **slab_objcgs(struct slab *slab)
590 static inline struct mem_cgroup *memcg_from_slab_obj(void *ptr)
595 static inline int memcg_alloc_slab_cgroups(struct slab *slab,
596 struct kmem_cache *s, gfp_t gfp,
602 static inline void memcg_free_slab_cgroups(struct slab *slab)
606 static inline bool memcg_slab_pre_alloc_hook(struct kmem_cache *s,
607 struct obj_cgroup **objcgp,
608 size_t objects, gfp_t flags)
613 static inline void memcg_slab_post_alloc_hook(struct kmem_cache *s,
614 struct obj_cgroup *objcg,
615 gfp_t flags, size_t size,
620 static inline void memcg_slab_free_hook(struct kmem_cache *s,
621 void **p, int objects)
624 #endif /* CONFIG_MEMCG_KMEM */
627 static inline struct kmem_cache *virt_to_cache(const void *obj)
631 slab = virt_to_slab(obj);
632 if (WARN_ONCE(!slab, "%s: Object is not a Slab page!\n",
635 return slab->slab_cache;
638 static __always_inline void account_slab(struct slab *slab, int order,
639 struct kmem_cache *s, gfp_t gfp)
641 if (memcg_kmem_enabled() && (s->flags & SLAB_ACCOUNT))
642 memcg_alloc_slab_cgroups(slab, s, gfp, true);
644 mod_node_page_state(slab_pgdat(slab), cache_vmstat_idx(s),
648 static __always_inline void unaccount_slab(struct slab *slab, int order,
649 struct kmem_cache *s)
651 if (memcg_kmem_enabled())
652 memcg_free_slab_cgroups(slab);
654 mod_node_page_state(slab_pgdat(slab), cache_vmstat_idx(s),
655 -(PAGE_SIZE << order));
658 static inline struct kmem_cache *cache_from_obj(struct kmem_cache *s, void *x)
660 struct kmem_cache *cachep;
662 if (!IS_ENABLED(CONFIG_SLAB_FREELIST_HARDENED) &&
663 !kmem_cache_debug_flags(s, SLAB_CONSISTENCY_CHECKS))
666 cachep = virt_to_cache(x);
667 if (WARN(cachep && cachep != s,
668 "%s: Wrong slab cache. %s but object is from %s\n",
669 __func__, s->name, cachep->name))
670 print_tracking(cachep, x);
673 #endif /* CONFIG_SLOB */
675 static inline size_t slab_ksize(const struct kmem_cache *s)
678 return s->object_size;
680 #else /* CONFIG_SLUB */
681 # ifdef CONFIG_SLUB_DEBUG
683 * Debugging requires use of the padding between object
684 * and whatever may come after it.
686 if (s->flags & (SLAB_RED_ZONE | SLAB_POISON))
687 return s->object_size;
689 if (s->flags & SLAB_KASAN)
690 return s->object_size;
692 * If we have the need to store the freelist pointer
693 * back there or track user information then we can
694 * only use the space before that information.
696 if (s->flags & (SLAB_TYPESAFE_BY_RCU | SLAB_STORE_USER))
699 * Else we can use all the padding etc for the allocation
705 static inline struct kmem_cache *slab_pre_alloc_hook(struct kmem_cache *s,
706 struct obj_cgroup **objcgp,
707 size_t size, gfp_t flags)
709 flags &= gfp_allowed_mask;
713 if (should_failslab(s, flags))
716 if (!memcg_slab_pre_alloc_hook(s, objcgp, size, flags))
722 static inline void slab_post_alloc_hook(struct kmem_cache *s,
723 struct obj_cgroup *objcg, gfp_t flags,
724 size_t size, void **p, bool init)
728 flags &= gfp_allowed_mask;
731 * As memory initialization might be integrated into KASAN,
732 * kasan_slab_alloc and initialization memset must be
733 * kept together to avoid discrepancies in behavior.
735 * As p[i] might get tagged, memset and kmemleak hook come after KASAN.
737 for (i = 0; i < size; i++) {
738 p[i] = kasan_slab_alloc(s, p[i], flags, init);
739 if (p[i] && init && !kasan_has_integrated_init())
740 memset(p[i], 0, s->object_size);
741 kmemleak_alloc_recursive(p[i], s->object_size, 1,
745 memcg_slab_post_alloc_hook(s, objcg, flags, size, p);
750 * The slab lists for all objects.
752 struct kmem_cache_node {
753 spinlock_t list_lock;
756 struct list_head slabs_partial; /* partial list first, better asm code */
757 struct list_head slabs_full;
758 struct list_head slabs_free;
759 unsigned long total_slabs; /* length of all slab lists */
760 unsigned long free_slabs; /* length of free slab list only */
761 unsigned long free_objects;
762 unsigned int free_limit;
763 unsigned int colour_next; /* Per-node cache coloring */
764 struct array_cache *shared; /* shared per node */
765 struct alien_cache **alien; /* on other nodes */
766 unsigned long next_reap; /* updated without locking */
767 int free_touched; /* updated without locking */
771 unsigned long nr_partial;
772 struct list_head partial;
773 #ifdef CONFIG_SLUB_DEBUG
774 atomic_long_t nr_slabs;
775 atomic_long_t total_objects;
776 struct list_head full;
782 static inline struct kmem_cache_node *get_node(struct kmem_cache *s, int node)
784 return s->node[node];
788 * Iterator over all nodes. The body will be executed for each node that has
789 * a kmem_cache_node structure allocated (which is true for all online nodes)
791 #define for_each_kmem_cache_node(__s, __node, __n) \
792 for (__node = 0; __node < nr_node_ids; __node++) \
793 if ((__n = get_node(__s, __node)))
797 #if defined(CONFIG_SLAB) || defined(CONFIG_SLUB_DEBUG)
798 void dump_unreclaimable_slab(void);
800 static inline void dump_unreclaimable_slab(void)
805 void ___cache_free(struct kmem_cache *cache, void *x, unsigned long addr);
807 #ifdef CONFIG_SLAB_FREELIST_RANDOM
808 int cache_random_seq_create(struct kmem_cache *cachep, unsigned int count,
810 void cache_random_seq_destroy(struct kmem_cache *cachep);
812 static inline int cache_random_seq_create(struct kmem_cache *cachep,
813 unsigned int count, gfp_t gfp)
817 static inline void cache_random_seq_destroy(struct kmem_cache *cachep) { }
818 #endif /* CONFIG_SLAB_FREELIST_RANDOM */
820 static inline bool slab_want_init_on_alloc(gfp_t flags, struct kmem_cache *c)
822 if (static_branch_maybe(CONFIG_INIT_ON_ALLOC_DEFAULT_ON,
826 if (c->flags & (SLAB_TYPESAFE_BY_RCU | SLAB_POISON))
827 return flags & __GFP_ZERO;
830 return flags & __GFP_ZERO;
833 static inline bool slab_want_init_on_free(struct kmem_cache *c)
835 if (static_branch_maybe(CONFIG_INIT_ON_FREE_DEFAULT_ON,
838 (c->flags & (SLAB_TYPESAFE_BY_RCU | SLAB_POISON)));
842 #if defined(CONFIG_DEBUG_FS) && defined(CONFIG_SLUB_DEBUG)
843 void debugfs_slab_release(struct kmem_cache *);
845 static inline void debugfs_slab_release(struct kmem_cache *s) { }
849 #define KS_ADDRS_COUNT 16
850 struct kmem_obj_info {
852 struct slab *kp_slab;
854 unsigned long kp_data_offset;
855 struct kmem_cache *kp_slab_cache;
857 void *kp_stack[KS_ADDRS_COUNT];
858 void *kp_free_stack[KS_ADDRS_COUNT];
860 void kmem_obj_info(struct kmem_obj_info *kpp, void *object, struct slab *slab);
863 #ifdef CONFIG_HAVE_HARDENED_USERCOPY_ALLOCATOR
864 void __check_heap_object(const void *ptr, unsigned long n,
865 const struct slab *slab, bool to_user);
868 void __check_heap_object(const void *ptr, unsigned long n,
869 const struct slab *slab, bool to_user)
874 #endif /* MM_SLAB_H */