4 * Internal slab definitions
9 * Common fields provided in kmem_cache by all slab allocators
10 * This struct is either used directly by the allocator (SLOB)
11 * or the allocator must include definitions for all fields
12 * provided in kmem_cache_common in their definition of kmem_cache.
14 * Once we can do anonymous structs (C11 standard) we could put a
15 * anonymous struct definition in these allocators so that the
16 * separate allocations in the kmem_cache structure of SLAB and
17 * SLUB is no longer needed.
20 unsigned int object_size;/* The original size of the object */
21 unsigned int size; /* The aligned/padded/added on size */
22 unsigned int align; /* Alignment as calculated */
23 unsigned long flags; /* Active flags on the slab */
24 const char *name; /* Slab name for sysfs */
25 int refcount; /* Use counter */
26 void (*ctor)(void *); /* Called on object slot creation */
27 struct list_head list; /* List of all slab caches on the system */
30 #endif /* CONFIG_SLOB */
33 #include <linux/slab_def.h>
37 #include <linux/slub_def.h>
40 #include <linux/memcontrol.h>
43 * State of the slab allocator.
45 * This is used to describe the states of the allocator during bootup.
46 * Allocators use this to gradually bootstrap themselves. Most allocators
47 * have the problem that the structures used for managing slab caches are
48 * allocated from slab caches themselves.
51 DOWN, /* No slab functionality yet */
52 PARTIAL, /* SLUB: kmem_cache_node available */
53 PARTIAL_ARRAYCACHE, /* SLAB: kmalloc size for arraycache available */
54 PARTIAL_NODE, /* SLAB: kmalloc size for node struct available */
55 UP, /* Slab caches usable but not all extras yet */
56 FULL /* Everything is working */
59 extern enum slab_state slab_state;
61 /* The slab cache mutex protects the management structures during changes */
62 extern struct mutex slab_mutex;
64 /* The list of all slab caches on the system */
65 extern struct list_head slab_caches;
67 /* The slab cache that manages slab cache information */
68 extern struct kmem_cache *kmem_cache;
70 unsigned long calculate_alignment(unsigned long flags,
71 unsigned long align, unsigned long size);
74 /* Kmalloc array related functions */
75 void create_kmalloc_caches(unsigned long);
77 /* Find the kmalloc slab corresponding for a certain size */
78 struct kmem_cache *kmalloc_slab(size_t, gfp_t);
82 /* Functions provided by the slab allocators */
83 extern int __kmem_cache_create(struct kmem_cache *, unsigned long flags);
85 extern struct kmem_cache *create_kmalloc_cache(const char *name, size_t size,
87 extern void create_boot_cache(struct kmem_cache *, const char *name,
88 size_t size, unsigned long flags);
92 int slab_unmergeable(struct kmem_cache *s);
93 struct kmem_cache *find_mergeable(size_t size, size_t align,
94 unsigned long flags, const char *name, void (*ctor)(void *));
97 __kmem_cache_alias(const char *name, size_t size, size_t align,
98 unsigned long flags, void (*ctor)(void *));
100 unsigned long kmem_cache_flags(unsigned long object_size,
101 unsigned long flags, const char *name,
102 void (*ctor)(void *));
104 static inline struct kmem_cache *
105 __kmem_cache_alias(const char *name, size_t size, size_t align,
106 unsigned long flags, void (*ctor)(void *))
109 static inline unsigned long kmem_cache_flags(unsigned long object_size,
110 unsigned long flags, const char *name,
111 void (*ctor)(void *))
118 /* Legal flag mask for kmem_cache_create(), for various configurations */
119 #define SLAB_CORE_FLAGS (SLAB_HWCACHE_ALIGN | SLAB_CACHE_DMA | SLAB_PANIC | \
120 SLAB_DESTROY_BY_RCU | SLAB_DEBUG_OBJECTS )
122 #if defined(CONFIG_DEBUG_SLAB)
123 #define SLAB_DEBUG_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER)
124 #elif defined(CONFIG_SLUB_DEBUG)
125 #define SLAB_DEBUG_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER | \
126 SLAB_TRACE | SLAB_DEBUG_FREE)
128 #define SLAB_DEBUG_FLAGS (0)
131 #if defined(CONFIG_SLAB)
132 #define SLAB_CACHE_FLAGS (SLAB_MEM_SPREAD | SLAB_NOLEAKTRACE | \
133 SLAB_RECLAIM_ACCOUNT | SLAB_TEMPORARY | SLAB_NOTRACK)
134 #elif defined(CONFIG_SLUB)
135 #define SLAB_CACHE_FLAGS (SLAB_NOLEAKTRACE | SLAB_RECLAIM_ACCOUNT | \
136 SLAB_TEMPORARY | SLAB_NOTRACK)
138 #define SLAB_CACHE_FLAGS (0)
141 #define CACHE_CREATE_MASK (SLAB_CORE_FLAGS | SLAB_DEBUG_FLAGS | SLAB_CACHE_FLAGS)
143 int __kmem_cache_shutdown(struct kmem_cache *);
144 int __kmem_cache_shrink(struct kmem_cache *);
145 void slab_kmem_cache_release(struct kmem_cache *);
151 unsigned long active_objs;
152 unsigned long num_objs;
153 unsigned long active_slabs;
154 unsigned long num_slabs;
155 unsigned long shared_avail;
157 unsigned int batchcount;
159 unsigned int objects_per_slab;
160 unsigned int cache_order;
163 void get_slabinfo(struct kmem_cache *s, struct slabinfo *sinfo);
164 void slabinfo_show_stats(struct seq_file *m, struct kmem_cache *s);
165 ssize_t slabinfo_write(struct file *file, const char __user *buffer,
166 size_t count, loff_t *ppos);
168 #ifdef CONFIG_MEMCG_KMEM
169 static inline bool is_root_cache(struct kmem_cache *s)
171 return !s->memcg_params || s->memcg_params->is_root_cache;
174 static inline bool slab_equal_or_root(struct kmem_cache *s,
175 struct kmem_cache *p)
178 (s->memcg_params && (p == s->memcg_params->root_cache));
182 * We use suffixes to the name in memcg because we can't have caches
183 * created in the system with the same name. But when we print them
184 * locally, better refer to them with the base name
186 static inline const char *cache_name(struct kmem_cache *s)
188 if (!is_root_cache(s))
189 return s->memcg_params->root_cache->name;
194 * Note, we protect with RCU only the memcg_caches array, not per-memcg caches.
195 * That said the caller must assure the memcg's cache won't go away. Since once
196 * created a memcg's cache is destroyed only along with the root cache, it is
197 * true if we are going to allocate from the cache or hold a reference to the
198 * root cache by other means. Otherwise, we should hold either the slab_mutex
199 * or the memcg's slab_caches_mutex while calling this function and accessing
200 * the returned value.
202 static inline struct kmem_cache *
203 cache_from_memcg_idx(struct kmem_cache *s, int idx)
205 struct kmem_cache *cachep;
206 struct memcg_cache_params *params;
208 if (!s->memcg_params)
212 params = rcu_dereference(s->memcg_params);
213 cachep = params->memcg_caches[idx];
217 * Make sure we will access the up-to-date value. The code updating
218 * memcg_caches issues a write barrier to match this (see
219 * memcg_register_cache()).
221 smp_read_barrier_depends();
225 static inline struct kmem_cache *memcg_root_cache(struct kmem_cache *s)
227 if (is_root_cache(s))
229 return s->memcg_params->root_cache;
232 static __always_inline int memcg_charge_slab(struct kmem_cache *s,
233 gfp_t gfp, int order)
235 if (!memcg_kmem_enabled())
237 if (is_root_cache(s))
239 return __memcg_charge_slab(s, gfp, order);
242 static __always_inline void memcg_uncharge_slab(struct kmem_cache *s, int order)
244 if (!memcg_kmem_enabled())
246 if (is_root_cache(s))
248 __memcg_uncharge_slab(s, order);
251 static inline bool is_root_cache(struct kmem_cache *s)
256 static inline bool slab_equal_or_root(struct kmem_cache *s,
257 struct kmem_cache *p)
262 static inline const char *cache_name(struct kmem_cache *s)
267 static inline struct kmem_cache *
268 cache_from_memcg_idx(struct kmem_cache *s, int idx)
273 static inline struct kmem_cache *memcg_root_cache(struct kmem_cache *s)
278 static inline int memcg_charge_slab(struct kmem_cache *s, gfp_t gfp, int order)
283 static inline void memcg_uncharge_slab(struct kmem_cache *s, int order)
288 static inline struct kmem_cache *cache_from_obj(struct kmem_cache *s, void *x)
290 struct kmem_cache *cachep;
294 * When kmemcg is not being used, both assignments should return the
295 * same value. but we don't want to pay the assignment price in that
296 * case. If it is not compiled in, the compiler should be smart enough
297 * to not do even the assignment. In that case, slab_equal_or_root
298 * will also be a constant.
300 if (!memcg_kmem_enabled() && !unlikely(s->flags & SLAB_DEBUG_FREE))
303 page = virt_to_head_page(x);
304 cachep = page->slab_cache;
305 if (slab_equal_or_root(cachep, s))
308 pr_err("%s: Wrong slab cache. %s but object is from %s\n",
309 __func__, cachep->name, s->name);
316 * The slab lists for all objects.
318 struct kmem_cache_node {
319 spinlock_t list_lock;
322 struct list_head slabs_partial; /* partial list first, better asm code */
323 struct list_head slabs_full;
324 struct list_head slabs_free;
325 unsigned long free_objects;
326 unsigned int free_limit;
327 unsigned int colour_next; /* Per-node cache coloring */
328 struct array_cache *shared; /* shared per node */
329 struct alien_cache **alien; /* on other nodes */
330 unsigned long next_reap; /* updated without locking */
331 int free_touched; /* updated without locking */
335 unsigned long nr_partial;
336 struct list_head partial;
337 #ifdef CONFIG_SLUB_DEBUG
338 atomic_long_t nr_slabs;
339 atomic_long_t total_objects;
340 struct list_head full;
346 static inline struct kmem_cache_node *get_node(struct kmem_cache *s, int node)
348 return s->node[node];
352 * Iterator over all nodes. The body will be executed for each node that has
353 * a kmem_cache_node structure allocated (which is true for all online nodes)
355 #define for_each_kmem_cache_node(__s, __node, __n) \
356 for (__node = 0; __node < nr_node_ids; __node++) \
357 if ((__n = get_node(__s, __node)))
361 void *slab_next(struct seq_file *m, void *p, loff_t *pos);
362 void slab_stop(struct seq_file *m, void *p);
364 #endif /* MM_SLAB_H */