1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _LINUX_SLUB_DEF_H
3 #define _LINUX_SLUB_DEF_H
4 
5 /*
6  * SLUB : A Slab allocator without object queues.
7  *
8  * (C) 2007 SGI, Christoph Lameter
9  */
10 #include <linux/kobject.h>
11 
12 enum stat_item {
13 	ALLOC_FASTPATH,		/* Allocation from cpu slab */
14 	ALLOC_SLOWPATH,		/* Allocation by getting a new cpu slab */
15 	FREE_FASTPATH,		/* Free to cpu slab */
16 	FREE_SLOWPATH,		/* Freeing not to cpu slab */
17 	FREE_FROZEN,		/* Freeing to frozen slab */
18 	FREE_ADD_PARTIAL,	/* Freeing moves slab to partial list */
19 	FREE_REMOVE_PARTIAL,	/* Freeing removes last object */
20 	ALLOC_FROM_PARTIAL,	/* Cpu slab acquired from node partial list */
21 	ALLOC_SLAB,		/* Cpu slab acquired from page allocator */
22 	ALLOC_REFILL,		/* Refill cpu slab from slab freelist */
23 	ALLOC_NODE_MISMATCH,	/* Switching cpu slab */
24 	FREE_SLAB,		/* Slab freed to the page allocator */
25 	CPUSLAB_FLUSH,		/* Abandoning of the cpu slab */
26 	DEACTIVATE_FULL,	/* Cpu slab was full when deactivated */
27 	DEACTIVATE_EMPTY,	/* Cpu slab was empty when deactivated */
28 	DEACTIVATE_TO_HEAD,	/* Cpu slab was moved to the head of partials */
29 	DEACTIVATE_TO_TAIL,	/* Cpu slab was moved to the tail of partials */
30 	DEACTIVATE_REMOTE_FREES,/* Slab contained remotely freed objects */
31 	DEACTIVATE_BYPASS,	/* Implicit deactivation */
32 	ORDER_FALLBACK,		/* Number of times fallback was necessary */
33 	CMPXCHG_DOUBLE_CPU_FAIL,/* Failure of this_cpu_cmpxchg_double */
34 	CMPXCHG_DOUBLE_FAIL,	/* Number of times that cmpxchg double did not match */
35 	CPU_PARTIAL_ALLOC,	/* Used cpu partial on alloc */
36 	CPU_PARTIAL_FREE,	/* Refill cpu partial on free */
37 	CPU_PARTIAL_NODE,	/* Refill cpu partial from node partial */
38 	CPU_PARTIAL_DRAIN,	/* Drain cpu partial to node partial */
39 	NR_SLUB_STAT_ITEMS };
40 
41 struct kmem_cache_cpu {
42 	void **freelist;	/* Pointer to next available object */
43 	unsigned long tid;	/* Globally unique transaction id */
44 	struct page *page;	/* The slab from which we are allocating */
45 #ifdef CONFIG_SLUB_CPU_PARTIAL
46 	struct page *partial;	/* Partially allocated frozen slabs */
47 #endif
48 #ifdef CONFIG_SLUB_STATS
49 	unsigned stat[NR_SLUB_STAT_ITEMS];
50 #endif
51 };
52 
53 #ifdef CONFIG_SLUB_CPU_PARTIAL
54 #define slub_percpu_partial(c)		((c)->partial)
55 
56 #define slub_set_percpu_partial(c, p)		\
57 ({						\
58 	slub_percpu_partial(c) = (p)->next;	\
59 })
60 
61 #define slub_percpu_partial_read_once(c)     READ_ONCE(slub_percpu_partial(c))
62 #else
63 #define slub_percpu_partial(c)			NULL
64 
65 #define slub_set_percpu_partial(c, p)
66 
67 #define slub_percpu_partial_read_once(c)	NULL
68 #endif // CONFIG_SLUB_CPU_PARTIAL
69 
70 /*
71  * Word size structure that can be atomically updated or read and that
72  * contains both the order and the number of objects that a slab of the
73  * given order would contain.
74  */
75 struct kmem_cache_order_objects {
76 	unsigned int x;
77 };
78 
79 /*
80  * Slab cache management.
81  */
82 struct kmem_cache {
83 	struct kmem_cache_cpu __percpu *cpu_slab;
84 	/* Used for retriving partial slabs etc */
85 	slab_flags_t flags;
86 	unsigned long min_partial;
87 	unsigned int size;	/* The size of an object including meta data */
88 	unsigned int object_size;/* The size of an object without meta data */
89 	unsigned int offset;	/* Free pointer offset. */
90 #ifdef CONFIG_SLUB_CPU_PARTIAL
91 	/* Number of per cpu partial objects to keep around */
92 	unsigned int cpu_partial;
93 #endif
94 	struct kmem_cache_order_objects oo;
95 
96 	/* Allocation and freeing of slabs */
97 	struct kmem_cache_order_objects max;
98 	struct kmem_cache_order_objects min;
99 	gfp_t allocflags;	/* gfp flags to use on each alloc */
100 	int refcount;		/* Refcount for slab cache destroy */
101 	void (*ctor)(void *);
102 	unsigned int inuse;		/* Offset to metadata */
103 	unsigned int align;		/* Alignment */
104 	unsigned int red_left_pad;	/* Left redzone padding size */
105 	const char *name;	/* Name (only for display!) */
106 	struct list_head list;	/* List of slab caches */
107 #ifdef CONFIG_SYSFS
108 	struct kobject kobj;	/* For sysfs */
109 	struct work_struct kobj_remove_work;
110 #endif
111 #ifdef CONFIG_MEMCG
112 	struct memcg_cache_params memcg_params;
113 	/* for propagation, maximum size of a stored attr */
114 	unsigned int max_attr_size;
115 #ifdef CONFIG_SYSFS
116 	struct kset *memcg_kset;
117 #endif
118 #endif
119 
120 #ifdef CONFIG_SLAB_FREELIST_HARDENED
121 	unsigned long random;
122 #endif
123 
124 #ifdef CONFIG_NUMA
125 	/*
126 	 * Defragmentation by allocating from a remote node.
127 	 */
128 	unsigned int remote_node_defrag_ratio;
129 #endif
130 
131 #ifdef CONFIG_SLAB_FREELIST_RANDOM
132 	unsigned int *random_seq;
133 #endif
134 
135 #ifdef CONFIG_KASAN
136 	struct kasan_cache kasan_info;
137 #endif
138 
139 	unsigned int useroffset;	/* Usercopy region offset */
140 	unsigned int usersize;		/* Usercopy region size */
141 
142 	struct kmem_cache_node *node[MAX_NUMNODES];
143 };
144 
145 #ifdef CONFIG_SLUB_CPU_PARTIAL
146 #define slub_cpu_partial(s)		((s)->cpu_partial)
147 #define slub_set_cpu_partial(s, n)		\
148 ({						\
149 	slub_cpu_partial(s) = (n);		\
150 })
151 #else
152 #define slub_cpu_partial(s)		(0)
153 #define slub_set_cpu_partial(s, n)
154 #endif // CONFIG_SLUB_CPU_PARTIAL
155 
156 #ifdef CONFIG_SYSFS
157 #define SLAB_SUPPORTS_SYSFS
158 void sysfs_slab_unlink(struct kmem_cache *);
159 void sysfs_slab_release(struct kmem_cache *);
160 #else
sysfs_slab_unlink(struct kmem_cache * s)161 static inline void sysfs_slab_unlink(struct kmem_cache *s)
162 {
163 }
sysfs_slab_release(struct kmem_cache * s)164 static inline void sysfs_slab_release(struct kmem_cache *s)
165 {
166 }
167 #endif
168 
169 void object_err(struct kmem_cache *s, struct page *page,
170 		u8 *object, char *reason);
171 
172 void *fixup_red_left(struct kmem_cache *s, void *p);
173 
nearest_obj(struct kmem_cache * cache,struct page * page,void * x)174 static inline void *nearest_obj(struct kmem_cache *cache, struct page *page,
175 				void *x) {
176 	void *object = x - (x - page_address(page)) % cache->size;
177 	void *last_object = page_address(page) +
178 		(page->objects - 1) * cache->size;
179 	void *result = (unlikely(object > last_object)) ? last_object : object;
180 
181 	result = fixup_red_left(cache, result);
182 	return result;
183 }
184 
185 #endif /* _LINUX_SLUB_DEF_H */
186