1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3  * Written by Mark Hemment, 1996 (markhe@nextd.demon.co.uk).
4  *
5  * (C) SGI 2006, Christoph Lameter
6  * 	Cleaned up and restructured to ease the addition of alternative
7  * 	implementations of SLAB allocators.
8  * (C) Linux Foundation 2008-2013
9  *      Unified interface for all slab allocators
10  */
11 
12 #ifndef _LINUX_SLAB_H
13 #define	_LINUX_SLAB_H
14 
15 #include <linux/gfp.h>
16 #include <linux/overflow.h>
17 #include <linux/types.h>
18 #include <linux/workqueue.h>
19 
20 
21 /*
22  * Flags to pass to kmem_cache_create().
23  * The ones marked DEBUG are only valid if CONFIG_DEBUG_SLAB is set.
24  */
25 /* DEBUG: Perform (expensive) checks on alloc/free */
26 #define SLAB_CONSISTENCY_CHECKS	((slab_flags_t __force)0x00000100U)
27 /* DEBUG: Red zone objs in a cache */
28 #define SLAB_RED_ZONE		((slab_flags_t __force)0x00000400U)
29 /* DEBUG: Poison objects */
30 #define SLAB_POISON		((slab_flags_t __force)0x00000800U)
31 /* Align objs on cache lines */
32 #define SLAB_HWCACHE_ALIGN	((slab_flags_t __force)0x00002000U)
33 /* Use GFP_DMA memory */
34 #define SLAB_CACHE_DMA		((slab_flags_t __force)0x00004000U)
35 /* Use GFP_DMA32 memory */
36 #define SLAB_CACHE_DMA32	((slab_flags_t __force)0x00008000U)
37 /* DEBUG: Store the last owner for bug hunting */
38 #define SLAB_STORE_USER		((slab_flags_t __force)0x00010000U)
39 /* Panic if kmem_cache_create() fails */
40 #define SLAB_PANIC		((slab_flags_t __force)0x00040000U)
41 /*
42  * SLAB_TYPESAFE_BY_RCU - **WARNING** READ THIS!
43  *
44  * This delays freeing the SLAB page by a grace period, it does _NOT_
45  * delay object freeing. This means that if you do kmem_cache_free()
46  * that memory location is free to be reused at any time. Thus it may
47  * be possible to see another object there in the same RCU grace period.
48  *
49  * This feature only ensures the memory location backing the object
50  * stays valid, the trick to using this is relying on an independent
51  * object validation pass. Something like:
52  *
53  *  rcu_read_lock()
54  * again:
55  *  obj = lockless_lookup(key);
56  *  if (obj) {
57  *    if (!try_get_ref(obj)) // might fail for free objects
58  *      goto again;
59  *
60  *    if (obj->key != key) { // not the object we expected
61  *      put_ref(obj);
62  *      goto again;
63  *    }
64  *  }
65  *  rcu_read_unlock();
66  *
67  * This is useful if we need to approach a kernel structure obliquely,
68  * from its address obtained without the usual locking. We can lock
69  * the structure to stabilize it and check it's still at the given address,
70  * only if we can be sure that the memory has not been meanwhile reused
71  * for some other kind of object (which our subsystem's lock might corrupt).
72  *
73  * rcu_read_lock before reading the address, then rcu_read_unlock after
74  * taking the spinlock within the structure expected at that address.
75  *
76  * Note that SLAB_TYPESAFE_BY_RCU was originally named SLAB_DESTROY_BY_RCU.
77  */
78 /* Defer freeing slabs to RCU */
79 #define SLAB_TYPESAFE_BY_RCU	((slab_flags_t __force)0x00080000U)
80 /* Spread some memory over cpuset */
81 #define SLAB_MEM_SPREAD		((slab_flags_t __force)0x00100000U)
82 /* Trace allocations and frees */
83 #define SLAB_TRACE		((slab_flags_t __force)0x00200000U)
84 
85 /* Flag to prevent checks on free */
86 #ifdef CONFIG_DEBUG_OBJECTS
87 # define SLAB_DEBUG_OBJECTS	((slab_flags_t __force)0x00400000U)
88 #else
89 # define SLAB_DEBUG_OBJECTS	0
90 #endif
91 
92 /* Avoid kmemleak tracing */
93 #define SLAB_NOLEAKTRACE	((slab_flags_t __force)0x00800000U)
94 
95 /* Fault injection mark */
96 #ifdef CONFIG_FAILSLAB
97 # define SLAB_FAILSLAB		((slab_flags_t __force)0x02000000U)
98 #else
99 # define SLAB_FAILSLAB		0
100 #endif
101 /* Account to memcg */
102 #ifdef CONFIG_MEMCG_KMEM
103 # define SLAB_ACCOUNT		((slab_flags_t __force)0x04000000U)
104 #else
105 # define SLAB_ACCOUNT		0
106 #endif
107 
108 #ifdef CONFIG_KASAN
109 #define SLAB_KASAN		((slab_flags_t __force)0x08000000U)
110 #else
111 #define SLAB_KASAN		0
112 #endif
113 
114 /* The following flags affect the page allocator grouping pages by mobility */
115 /* Objects are reclaimable */
116 #define SLAB_RECLAIM_ACCOUNT	((slab_flags_t __force)0x00020000U)
117 #define SLAB_TEMPORARY		SLAB_RECLAIM_ACCOUNT	/* Objects are short-lived */
118 /*
119  * ZERO_SIZE_PTR will be returned for zero sized kmalloc requests.
120  *
121  * Dereferencing ZERO_SIZE_PTR will lead to a distinct access fault.
122  *
123  * ZERO_SIZE_PTR can be passed to kfree though in the same way that NULL can.
124  * Both make kfree a no-op.
125  */
126 #define ZERO_SIZE_PTR ((void *)16)
127 
128 #define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) <= \
129 				(unsigned long)ZERO_SIZE_PTR)
130 
131 #include <linux/kasan.h>
132 
133 struct mem_cgroup;
134 /*
135  * struct kmem_cache related prototypes
136  */
137 void __init kmem_cache_init(void);
138 bool slab_is_available(void);
139 
140 extern bool usercopy_fallback;
141 
142 struct kmem_cache *kmem_cache_create(const char *name, unsigned int size,
143 			unsigned int align, slab_flags_t flags,
144 			void (*ctor)(void *));
145 struct kmem_cache *kmem_cache_create_usercopy(const char *name,
146 			unsigned int size, unsigned int align,
147 			slab_flags_t flags,
148 			unsigned int useroffset, unsigned int usersize,
149 			void (*ctor)(void *));
150 void kmem_cache_destroy(struct kmem_cache *);
151 int kmem_cache_shrink(struct kmem_cache *);
152 
153 void memcg_create_kmem_cache(struct mem_cgroup *, struct kmem_cache *);
154 void memcg_deactivate_kmem_caches(struct mem_cgroup *);
155 void memcg_destroy_kmem_caches(struct mem_cgroup *);
156 
157 /*
158  * Please use this macro to create slab caches. Simply specify the
159  * name of the structure and maybe some flags that are listed above.
160  *
161  * The alignment of the struct determines object alignment. If you
162  * f.e. add ____cacheline_aligned_in_smp to the struct declaration
163  * then the objects will be properly aligned in SMP configurations.
164  */
165 #define KMEM_CACHE(__struct, __flags)					\
166 		kmem_cache_create(#__struct, sizeof(struct __struct),	\
167 			__alignof__(struct __struct), (__flags), NULL)
168 
169 /*
170  * To whitelist a single field for copying to/from usercopy, use this
171  * macro instead for KMEM_CACHE() above.
172  */
173 #define KMEM_CACHE_USERCOPY(__struct, __flags, __field)			\
174 		kmem_cache_create_usercopy(#__struct,			\
175 			sizeof(struct __struct),			\
176 			__alignof__(struct __struct), (__flags),	\
177 			offsetof(struct __struct, __field),		\
178 			sizeof_field(struct __struct, __field), NULL)
179 
180 /*
181  * Common kmalloc functions provided by all allocators
182  */
183 void * __must_check __krealloc(const void *, size_t, gfp_t);
184 void * __must_check krealloc(const void *, size_t, gfp_t);
185 void kfree(const void *);
186 void kzfree(const void *);
187 size_t ksize(const void *);
188 
189 #ifdef CONFIG_HAVE_HARDENED_USERCOPY_ALLOCATOR
190 void __check_heap_object(const void *ptr, unsigned long n, struct page *page,
191 			bool to_user);
192 #else
__check_heap_object(const void * ptr,unsigned long n,struct page * page,bool to_user)193 static inline void __check_heap_object(const void *ptr, unsigned long n,
194 				       struct page *page, bool to_user) { }
195 #endif
196 
197 /*
198  * Some archs want to perform DMA into kmalloc caches and need a guaranteed
199  * alignment larger than the alignment of a 64-bit integer.
200  * Setting ARCH_KMALLOC_MINALIGN in arch headers allows that.
201  */
202 #if defined(ARCH_DMA_MINALIGN) && ARCH_DMA_MINALIGN > 8
203 #define ARCH_KMALLOC_MINALIGN ARCH_DMA_MINALIGN
204 #define KMALLOC_MIN_SIZE ARCH_DMA_MINALIGN
205 #define KMALLOC_SHIFT_LOW ilog2(ARCH_DMA_MINALIGN)
206 #else
207 #define ARCH_KMALLOC_MINALIGN __alignof__(unsigned long long)
208 #endif
209 
210 /*
211  * Setting ARCH_SLAB_MINALIGN in arch headers allows a different alignment.
212  * Intended for arches that get misalignment faults even for 64 bit integer
213  * aligned buffers.
214  */
215 #ifndef ARCH_SLAB_MINALIGN
216 #define ARCH_SLAB_MINALIGN __alignof__(unsigned long long)
217 #endif
218 
219 /*
220  * kmalloc and friends return ARCH_KMALLOC_MINALIGN aligned
221  * pointers. kmem_cache_alloc and friends return ARCH_SLAB_MINALIGN
222  * aligned pointers.
223  */
224 #define __assume_kmalloc_alignment __assume_aligned(ARCH_KMALLOC_MINALIGN)
225 #define __assume_slab_alignment __assume_aligned(ARCH_SLAB_MINALIGN)
226 #define __assume_page_alignment __assume_aligned(PAGE_SIZE)
227 
228 /*
229  * Kmalloc array related definitions
230  */
231 
232 #ifdef CONFIG_SLAB
233 /*
234  * The largest kmalloc size supported by the SLAB allocators is
235  * 32 megabyte (2^25) or the maximum allocatable page order if that is
236  * less than 32 MB.
237  *
238  * WARNING: Its not easy to increase this value since the allocators have
239  * to do various tricks to work around compiler limitations in order to
240  * ensure proper constant folding.
241  */
242 #define KMALLOC_SHIFT_HIGH	((MAX_ORDER + PAGE_SHIFT - 1) <= 25 ? \
243 				(MAX_ORDER + PAGE_SHIFT - 1) : 25)
244 #define KMALLOC_SHIFT_MAX	KMALLOC_SHIFT_HIGH
245 #ifndef KMALLOC_SHIFT_LOW
246 #define KMALLOC_SHIFT_LOW	5
247 #endif
248 #endif
249 
250 #ifdef CONFIG_SLUB
251 /*
252  * SLUB directly allocates requests fitting in to an order-1 page
253  * (PAGE_SIZE*2).  Larger requests are passed to the page allocator.
254  */
255 #define KMALLOC_SHIFT_HIGH	(PAGE_SHIFT + 1)
256 #define KMALLOC_SHIFT_MAX	(MAX_ORDER + PAGE_SHIFT - 1)
257 #ifndef KMALLOC_SHIFT_LOW
258 #define KMALLOC_SHIFT_LOW	3
259 #endif
260 #endif
261 
262 #ifdef CONFIG_SLOB
263 /*
264  * SLOB passes all requests larger than one page to the page allocator.
265  * No kmalloc array is necessary since objects of different sizes can
266  * be allocated from the same page.
267  */
268 #define KMALLOC_SHIFT_HIGH	PAGE_SHIFT
269 #define KMALLOC_SHIFT_MAX	(MAX_ORDER + PAGE_SHIFT - 1)
270 #ifndef KMALLOC_SHIFT_LOW
271 #define KMALLOC_SHIFT_LOW	3
272 #endif
273 #endif
274 
275 /* Maximum allocatable size */
276 #define KMALLOC_MAX_SIZE	(1UL << KMALLOC_SHIFT_MAX)
277 /* Maximum size for which we actually use a slab cache */
278 #define KMALLOC_MAX_CACHE_SIZE	(1UL << KMALLOC_SHIFT_HIGH)
279 /* Maximum order allocatable via the slab allocagtor */
280 #define KMALLOC_MAX_ORDER	(KMALLOC_SHIFT_MAX - PAGE_SHIFT)
281 
282 /*
283  * Kmalloc subsystem.
284  */
285 #ifndef KMALLOC_MIN_SIZE
286 #define KMALLOC_MIN_SIZE (1 << KMALLOC_SHIFT_LOW)
287 #endif
288 
289 /*
290  * This restriction comes from byte sized index implementation.
291  * Page size is normally 2^12 bytes and, in this case, if we want to use
292  * byte sized index which can represent 2^8 entries, the size of the object
293  * should be equal or greater to 2^12 / 2^8 = 2^4 = 16.
294  * If minimum size of kmalloc is less than 16, we use it as minimum object
295  * size and give up to use byte sized index.
296  */
297 #define SLAB_OBJ_MIN_SIZE      (KMALLOC_MIN_SIZE < 16 ? \
298                                (KMALLOC_MIN_SIZE) : 16)
299 
300 #ifndef CONFIG_SLOB
301 extern struct kmem_cache *kmalloc_caches[KMALLOC_SHIFT_HIGH + 1];
302 #ifdef CONFIG_ZONE_DMA
303 extern struct kmem_cache *kmalloc_dma_caches[KMALLOC_SHIFT_HIGH + 1];
304 #endif
305 
306 /*
307  * Figure out which kmalloc slab an allocation of a certain size
308  * belongs to.
309  * 0 = zero alloc
310  * 1 =  65 .. 96 bytes
311  * 2 = 129 .. 192 bytes
312  * n = 2^(n-1)+1 .. 2^n
313  */
kmalloc_index(size_t size)314 static __always_inline unsigned int kmalloc_index(size_t size)
315 {
316 	if (!size)
317 		return 0;
318 
319 	if (size <= KMALLOC_MIN_SIZE)
320 		return KMALLOC_SHIFT_LOW;
321 
322 	if (KMALLOC_MIN_SIZE <= 32 && size > 64 && size <= 96)
323 		return 1;
324 	if (KMALLOC_MIN_SIZE <= 64 && size > 128 && size <= 192)
325 		return 2;
326 	if (size <=          8) return 3;
327 	if (size <=         16) return 4;
328 	if (size <=         32) return 5;
329 	if (size <=         64) return 6;
330 	if (size <=        128) return 7;
331 	if (size <=        256) return 8;
332 	if (size <=        512) return 9;
333 	if (size <=       1024) return 10;
334 	if (size <=   2 * 1024) return 11;
335 	if (size <=   4 * 1024) return 12;
336 	if (size <=   8 * 1024) return 13;
337 	if (size <=  16 * 1024) return 14;
338 	if (size <=  32 * 1024) return 15;
339 	if (size <=  64 * 1024) return 16;
340 	if (size <= 128 * 1024) return 17;
341 	if (size <= 256 * 1024) return 18;
342 	if (size <= 512 * 1024) return 19;
343 	if (size <= 1024 * 1024) return 20;
344 	if (size <=  2 * 1024 * 1024) return 21;
345 	if (size <=  4 * 1024 * 1024) return 22;
346 	if (size <=  8 * 1024 * 1024) return 23;
347 	if (size <=  16 * 1024 * 1024) return 24;
348 	if (size <=  32 * 1024 * 1024) return 25;
349 	if (size <=  64 * 1024 * 1024) return 26;
350 	BUG();
351 
352 	/* Will never be reached. Needed because the compiler may complain */
353 	return -1;
354 }
355 #endif /* !CONFIG_SLOB */
356 
357 void *__kmalloc(size_t size, gfp_t flags) __assume_kmalloc_alignment __malloc;
358 void *kmem_cache_alloc(struct kmem_cache *, gfp_t flags) __assume_slab_alignment __malloc;
359 void kmem_cache_free(struct kmem_cache *, void *);
360 
361 /*
362  * Bulk allocation and freeing operations. These are accelerated in an
363  * allocator specific way to avoid taking locks repeatedly or building
364  * metadata structures unnecessarily.
365  *
366  * Note that interrupts must be enabled when calling these functions.
367  */
368 void kmem_cache_free_bulk(struct kmem_cache *, size_t, void **);
369 int kmem_cache_alloc_bulk(struct kmem_cache *, gfp_t, size_t, void **);
370 
371 /*
372  * Caller must not use kfree_bulk() on memory not originally allocated
373  * by kmalloc(), because the SLOB allocator cannot handle this.
374  */
kfree_bulk(size_t size,void ** p)375 static __always_inline void kfree_bulk(size_t size, void **p)
376 {
377 	kmem_cache_free_bulk(NULL, size, p);
378 }
379 
380 #ifdef CONFIG_NUMA
381 void *__kmalloc_node(size_t size, gfp_t flags, int node) __assume_kmalloc_alignment __malloc;
382 void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node) __assume_slab_alignment __malloc;
383 #else
__kmalloc_node(size_t size,gfp_t flags,int node)384 static __always_inline void *__kmalloc_node(size_t size, gfp_t flags, int node)
385 {
386 	return __kmalloc(size, flags);
387 }
388 
kmem_cache_alloc_node(struct kmem_cache * s,gfp_t flags,int node)389 static __always_inline void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t flags, int node)
390 {
391 	return kmem_cache_alloc(s, flags);
392 }
393 #endif
394 
395 #ifdef CONFIG_TRACING
396 extern void *kmem_cache_alloc_trace(struct kmem_cache *, gfp_t, size_t) __assume_slab_alignment __malloc;
397 
398 #ifdef CONFIG_NUMA
399 extern void *kmem_cache_alloc_node_trace(struct kmem_cache *s,
400 					   gfp_t gfpflags,
401 					   int node, size_t size) __assume_slab_alignment __malloc;
402 #else
403 static __always_inline void *
kmem_cache_alloc_node_trace(struct kmem_cache * s,gfp_t gfpflags,int node,size_t size)404 kmem_cache_alloc_node_trace(struct kmem_cache *s,
405 			      gfp_t gfpflags,
406 			      int node, size_t size)
407 {
408 	return kmem_cache_alloc_trace(s, gfpflags, size);
409 }
410 #endif /* CONFIG_NUMA */
411 
412 #else /* CONFIG_TRACING */
kmem_cache_alloc_trace(struct kmem_cache * s,gfp_t flags,size_t size)413 static __always_inline void *kmem_cache_alloc_trace(struct kmem_cache *s,
414 		gfp_t flags, size_t size)
415 {
416 	void *ret = kmem_cache_alloc(s, flags);
417 
418 	kasan_kmalloc(s, ret, size, flags);
419 	return ret;
420 }
421 
422 static __always_inline void *
kmem_cache_alloc_node_trace(struct kmem_cache * s,gfp_t gfpflags,int node,size_t size)423 kmem_cache_alloc_node_trace(struct kmem_cache *s,
424 			      gfp_t gfpflags,
425 			      int node, size_t size)
426 {
427 	void *ret = kmem_cache_alloc_node(s, gfpflags, node);
428 
429 	kasan_kmalloc(s, ret, size, gfpflags);
430 	return ret;
431 }
432 #endif /* CONFIG_TRACING */
433 
434 extern void *kmalloc_order(size_t size, gfp_t flags, unsigned int order) __assume_page_alignment __malloc;
435 
436 #ifdef CONFIG_TRACING
437 extern void *kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order) __assume_page_alignment __malloc;
438 #else
439 static __always_inline void *
kmalloc_order_trace(size_t size,gfp_t flags,unsigned int order)440 kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order)
441 {
442 	return kmalloc_order(size, flags, order);
443 }
444 #endif
445 
kmalloc_large(size_t size,gfp_t flags)446 static __always_inline void *kmalloc_large(size_t size, gfp_t flags)
447 {
448 	unsigned int order = get_order(size);
449 	return kmalloc_order_trace(size, flags, order);
450 }
451 
452 /**
453  * kmalloc - allocate memory
454  * @size: how many bytes of memory are required.
455  * @flags: the type of memory to allocate.
456  *
457  * kmalloc is the normal method of allocating memory
458  * for objects smaller than page size in the kernel.
459  *
460  * The @flags argument may be one of:
461  *
462  * %GFP_USER - Allocate memory on behalf of user.  May sleep.
463  *
464  * %GFP_KERNEL - Allocate normal kernel ram.  May sleep.
465  *
466  * %GFP_ATOMIC - Allocation will not sleep.  May use emergency pools.
467  *   For example, use this inside interrupt handlers.
468  *
469  * %GFP_HIGHUSER - Allocate pages from high memory.
470  *
471  * %GFP_NOIO - Do not do any I/O at all while trying to get memory.
472  *
473  * %GFP_NOFS - Do not make any fs calls while trying to get memory.
474  *
475  * %GFP_NOWAIT - Allocation will not sleep.
476  *
477  * %__GFP_THISNODE - Allocate node-local memory only.
478  *
479  * %GFP_DMA - Allocation suitable for DMA.
480  *   Should only be used for kmalloc() caches. Otherwise, use a
481  *   slab created with SLAB_DMA.
482  *
483  * Also it is possible to set different flags by OR'ing
484  * in one or more of the following additional @flags:
485  *
486  * %__GFP_HIGH - This allocation has high priority and may use emergency pools.
487  *
488  * %__GFP_NOFAIL - Indicate that this allocation is in no way allowed to fail
489  *   (think twice before using).
490  *
491  * %__GFP_NORETRY - If memory is not immediately available,
492  *   then give up at once.
493  *
494  * %__GFP_NOWARN - If allocation fails, don't issue any warnings.
495  *
496  * %__GFP_RETRY_MAYFAIL - Try really hard to succeed the allocation but fail
497  *   eventually.
498  *
499  * There are other flags available as well, but these are not intended
500  * for general use, and so are not documented here. For a full list of
501  * potential flags, always refer to linux/gfp.h.
502  */
kmalloc(size_t size,gfp_t flags)503 static __always_inline void *kmalloc(size_t size, gfp_t flags)
504 {
505 	if (__builtin_constant_p(size)) {
506 		if (size > KMALLOC_MAX_CACHE_SIZE)
507 			return kmalloc_large(size, flags);
508 #ifndef CONFIG_SLOB
509 		if (!(flags & GFP_DMA)) {
510 			unsigned int index = kmalloc_index(size);
511 
512 			if (!index)
513 				return ZERO_SIZE_PTR;
514 
515 			return kmem_cache_alloc_trace(kmalloc_caches[index],
516 					flags, size);
517 		}
518 #endif
519 	}
520 	return __kmalloc(size, flags);
521 }
522 
523 /*
524  * Determine size used for the nth kmalloc cache.
525  * return size or 0 if a kmalloc cache for that
526  * size does not exist
527  */
kmalloc_size(unsigned int n)528 static __always_inline unsigned int kmalloc_size(unsigned int n)
529 {
530 #ifndef CONFIG_SLOB
531 	if (n > 2)
532 		return 1U << n;
533 
534 	if (n == 1 && KMALLOC_MIN_SIZE <= 32)
535 		return 96;
536 
537 	if (n == 2 && KMALLOC_MIN_SIZE <= 64)
538 		return 192;
539 #endif
540 	return 0;
541 }
542 
kmalloc_node(size_t size,gfp_t flags,int node)543 static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
544 {
545 #ifndef CONFIG_SLOB
546 	if (__builtin_constant_p(size) &&
547 		size <= KMALLOC_MAX_CACHE_SIZE && !(flags & GFP_DMA)) {
548 		unsigned int i = kmalloc_index(size);
549 
550 		if (!i)
551 			return ZERO_SIZE_PTR;
552 
553 		return kmem_cache_alloc_node_trace(kmalloc_caches[i],
554 						flags, node, size);
555 	}
556 #endif
557 	return __kmalloc_node(size, flags, node);
558 }
559 
560 struct memcg_cache_array {
561 	struct rcu_head rcu;
562 	struct kmem_cache *entries[0];
563 };
564 
565 /*
566  * This is the main placeholder for memcg-related information in kmem caches.
567  * Both the root cache and the child caches will have it. For the root cache,
568  * this will hold a dynamically allocated array large enough to hold
569  * information about the currently limited memcgs in the system. To allow the
570  * array to be accessed without taking any locks, on relocation we free the old
571  * version only after a grace period.
572  *
573  * Root and child caches hold different metadata.
574  *
575  * @root_cache:	Common to root and child caches.  NULL for root, pointer to
576  *		the root cache for children.
577  *
578  * The following fields are specific to root caches.
579  *
580  * @memcg_caches: kmemcg ID indexed table of child caches.  This table is
581  *		used to index child cachces during allocation and cleared
582  *		early during shutdown.
583  *
584  * @root_caches_node: List node for slab_root_caches list.
585  *
586  * @children:	List of all child caches.  While the child caches are also
587  *		reachable through @memcg_caches, a child cache remains on
588  *		this list until it is actually destroyed.
589  *
590  * The following fields are specific to child caches.
591  *
592  * @memcg:	Pointer to the memcg this cache belongs to.
593  *
594  * @children_node: List node for @root_cache->children list.
595  *
596  * @kmem_caches_node: List node for @memcg->kmem_caches list.
597  */
598 struct memcg_cache_params {
599 	struct kmem_cache *root_cache;
600 	union {
601 		struct {
602 			struct memcg_cache_array __rcu *memcg_caches;
603 			struct list_head __root_caches_node;
604 			struct list_head children;
605 			bool dying;
606 		};
607 		struct {
608 			struct mem_cgroup *memcg;
609 			struct list_head children_node;
610 			struct list_head kmem_caches_node;
611 
612 			void (*deact_fn)(struct kmem_cache *);
613 			union {
614 				struct rcu_head deact_rcu_head;
615 				struct work_struct deact_work;
616 			};
617 		};
618 	};
619 };
620 
621 int memcg_update_all_caches(int num_memcgs);
622 
623 /**
624  * kmalloc_array - allocate memory for an array.
625  * @n: number of elements.
626  * @size: element size.
627  * @flags: the type of memory to allocate (see kmalloc).
628  */
kmalloc_array(size_t n,size_t size,gfp_t flags)629 static inline void *kmalloc_array(size_t n, size_t size, gfp_t flags)
630 {
631 	size_t bytes;
632 
633 	if (unlikely(check_mul_overflow(n, size, &bytes)))
634 		return NULL;
635 	if (__builtin_constant_p(n) && __builtin_constant_p(size))
636 		return kmalloc(bytes, flags);
637 	return __kmalloc(bytes, flags);
638 }
639 
640 /**
641  * kcalloc - allocate memory for an array. The memory is set to zero.
642  * @n: number of elements.
643  * @size: element size.
644  * @flags: the type of memory to allocate (see kmalloc).
645  */
kcalloc(size_t n,size_t size,gfp_t flags)646 static inline void *kcalloc(size_t n, size_t size, gfp_t flags)
647 {
648 	return kmalloc_array(n, size, flags | __GFP_ZERO);
649 }
650 
651 /*
652  * kmalloc_track_caller is a special version of kmalloc that records the
653  * calling function of the routine calling it for slab leak tracking instead
654  * of just the calling function (confusing, eh?).
655  * It's useful when the call to kmalloc comes from a widely-used standard
656  * allocator where we care about the real place the memory allocation
657  * request comes from.
658  */
659 extern void *__kmalloc_track_caller(size_t, gfp_t, unsigned long);
660 #define kmalloc_track_caller(size, flags) \
661 	__kmalloc_track_caller(size, flags, _RET_IP_)
662 
kmalloc_array_node(size_t n,size_t size,gfp_t flags,int node)663 static inline void *kmalloc_array_node(size_t n, size_t size, gfp_t flags,
664 				       int node)
665 {
666 	size_t bytes;
667 
668 	if (unlikely(check_mul_overflow(n, size, &bytes)))
669 		return NULL;
670 	if (__builtin_constant_p(n) && __builtin_constant_p(size))
671 		return kmalloc_node(bytes, flags, node);
672 	return __kmalloc_node(bytes, flags, node);
673 }
674 
kcalloc_node(size_t n,size_t size,gfp_t flags,int node)675 static inline void *kcalloc_node(size_t n, size_t size, gfp_t flags, int node)
676 {
677 	return kmalloc_array_node(n, size, flags | __GFP_ZERO, node);
678 }
679 
680 
681 #ifdef CONFIG_NUMA
682 extern void *__kmalloc_node_track_caller(size_t, gfp_t, int, unsigned long);
683 #define kmalloc_node_track_caller(size, flags, node) \
684 	__kmalloc_node_track_caller(size, flags, node, \
685 			_RET_IP_)
686 
687 #else /* CONFIG_NUMA */
688 
689 #define kmalloc_node_track_caller(size, flags, node) \
690 	kmalloc_track_caller(size, flags)
691 
692 #endif /* CONFIG_NUMA */
693 
694 /*
695  * Shortcuts
696  */
kmem_cache_zalloc(struct kmem_cache * k,gfp_t flags)697 static inline void *kmem_cache_zalloc(struct kmem_cache *k, gfp_t flags)
698 {
699 	return kmem_cache_alloc(k, flags | __GFP_ZERO);
700 }
701 
702 /**
703  * kzalloc - allocate memory. The memory is set to zero.
704  * @size: how many bytes of memory are required.
705  * @flags: the type of memory to allocate (see kmalloc).
706  */
kzalloc(size_t size,gfp_t flags)707 static inline void *kzalloc(size_t size, gfp_t flags)
708 {
709 	return kmalloc(size, flags | __GFP_ZERO);
710 }
711 
712 /**
713  * kzalloc_node - allocate zeroed memory from a particular memory node.
714  * @size: how many bytes of memory are required.
715  * @flags: the type of memory to allocate (see kmalloc).
716  * @node: memory node from which to allocate
717  */
kzalloc_node(size_t size,gfp_t flags,int node)718 static inline void *kzalloc_node(size_t size, gfp_t flags, int node)
719 {
720 	return kmalloc_node(size, flags | __GFP_ZERO, node);
721 }
722 
723 unsigned int kmem_cache_size(struct kmem_cache *s);
724 void __init kmem_cache_init_late(void);
725 
726 #if defined(CONFIG_SMP) && defined(CONFIG_SLAB)
727 int slab_prepare_cpu(unsigned int cpu);
728 int slab_dead_cpu(unsigned int cpu);
729 #else
730 #define slab_prepare_cpu	NULL
731 #define slab_dead_cpu		NULL
732 #endif
733 
734 #endif	/* _LINUX_SLAB_H */
735