1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3  * Percpu refcounts:
4  * (C) 2012 Google, Inc.
5  * Author: Kent Overstreet <koverstreet@google.com>
6  *
7  * This implements a refcount with similar semantics to atomic_t - atomic_inc(),
8  * atomic_dec_and_test() - but percpu.
9  *
10  * There's one important difference between percpu refs and normal atomic_t
11  * refcounts; you have to keep track of your initial refcount, and then when you
12  * start shutting down you call percpu_ref_kill() _before_ dropping the initial
13  * refcount.
14  *
15  * The refcount will have a range of 0 to ((1U << 31) - 1), i.e. one bit less
16  * than an atomic_t - this is because of the way shutdown works, see
17  * percpu_ref_kill()/PERCPU_COUNT_BIAS.
18  *
19  * Before you call percpu_ref_kill(), percpu_ref_put() does not check for the
20  * refcount hitting 0 - it can't, if it was in percpu mode. percpu_ref_kill()
21  * puts the ref back in single atomic_t mode, collecting the per cpu refs and
22  * issuing the appropriate barriers, and then marks the ref as shutting down so
23  * that percpu_ref_put() will check for the ref hitting 0.  After it returns,
24  * it's safe to drop the initial ref.
25  *
26  * USAGE:
27  *
28  * See fs/aio.c for some example usage; it's used there for struct kioctx, which
29  * is created when userspaces calls io_setup(), and destroyed when userspace
30  * calls io_destroy() or the process exits.
31  *
32  * In the aio code, kill_ioctx() is called when we wish to destroy a kioctx; it
33  * removes the kioctx from the proccess's table of kioctxs and kills percpu_ref.
34  * After that, there can't be any new users of the kioctx (from lookup_ioctx())
35  * and it's then safe to drop the initial ref with percpu_ref_put().
36  *
37  * Note that the free path, free_ioctx(), needs to go through explicit call_rcu()
38  * to synchronize with RCU protected lookup_ioctx().  percpu_ref operations don't
39  * imply RCU grace periods of any kind and if a user wants to combine percpu_ref
40  * with RCU protection, it must be done explicitly.
41  *
42  * Code that does a two stage shutdown like this often needs some kind of
43  * explicit synchronization to ensure the initial refcount can only be dropped
44  * once - percpu_ref_kill() does this for you, it returns true once and false if
45  * someone else already called it. The aio code uses it this way, but it's not
46  * necessary if the code has some other mechanism to synchronize teardown.
47  * around.
48  */
49 
50 #ifndef _LINUX_PERCPU_REFCOUNT_H
51 #define _LINUX_PERCPU_REFCOUNT_H
52 
53 #include <linux/atomic.h>
54 #include <linux/kernel.h>
55 #include <linux/percpu.h>
56 #include <linux/rcupdate.h>
57 #include <linux/gfp.h>
58 
59 struct percpu_ref;
60 typedef void (percpu_ref_func_t)(struct percpu_ref *);
61 
62 /* flags set in the lower bits of percpu_ref->percpu_count_ptr */
63 enum {
64 	__PERCPU_REF_ATOMIC	= 1LU << 0,	/* operating in atomic mode */
65 	__PERCPU_REF_DEAD	= 1LU << 1,	/* (being) killed */
66 	__PERCPU_REF_ATOMIC_DEAD = __PERCPU_REF_ATOMIC | __PERCPU_REF_DEAD,
67 
68 	__PERCPU_REF_FLAG_BITS	= 2,
69 };
70 
71 /* @flags for percpu_ref_init() */
72 enum {
73 	/*
74 	 * Start w/ ref == 1 in atomic mode.  Can be switched to percpu
75 	 * operation using percpu_ref_switch_to_percpu().  If initialized
76 	 * with this flag, the ref will stay in atomic mode until
77 	 * percpu_ref_switch_to_percpu() is invoked on it.
78 	 */
79 	PERCPU_REF_INIT_ATOMIC	= 1 << 0,
80 
81 	/*
82 	 * Start dead w/ ref == 0 in atomic mode.  Must be revived with
83 	 * percpu_ref_reinit() before used.  Implies INIT_ATOMIC.
84 	 */
85 	PERCPU_REF_INIT_DEAD	= 1 << 1,
86 };
87 
88 struct percpu_ref {
89 	atomic_long_t		count;
90 	/*
91 	 * The low bit of the pointer indicates whether the ref is in percpu
92 	 * mode; if set, then get/put will manipulate the atomic_t.
93 	 */
94 	unsigned long		percpu_count_ptr;
95 	percpu_ref_func_t	*release;
96 	percpu_ref_func_t	*confirm_switch;
97 	bool			force_atomic:1;
98 	struct rcu_head		rcu;
99 };
100 
101 int __must_check percpu_ref_init(struct percpu_ref *ref,
102 				 percpu_ref_func_t *release, unsigned int flags,
103 				 gfp_t gfp);
104 void percpu_ref_exit(struct percpu_ref *ref);
105 void percpu_ref_switch_to_atomic(struct percpu_ref *ref,
106 				 percpu_ref_func_t *confirm_switch);
107 void percpu_ref_switch_to_atomic_sync(struct percpu_ref *ref);
108 void percpu_ref_switch_to_percpu(struct percpu_ref *ref);
109 void percpu_ref_kill_and_confirm(struct percpu_ref *ref,
110 				 percpu_ref_func_t *confirm_kill);
111 void percpu_ref_reinit(struct percpu_ref *ref);
112 
113 /**
114  * percpu_ref_kill - drop the initial ref
115  * @ref: percpu_ref to kill
116  *
117  * Must be used to drop the initial ref on a percpu refcount; must be called
118  * precisely once before shutdown.
119  *
120  * Switches @ref into atomic mode before gathering up the percpu counters
121  * and dropping the initial ref.
122  *
123  * There are no implied RCU grace periods between kill and release.
124  */
percpu_ref_kill(struct percpu_ref * ref)125 static inline void percpu_ref_kill(struct percpu_ref *ref)
126 {
127 	percpu_ref_kill_and_confirm(ref, NULL);
128 }
129 
130 /*
131  * Internal helper.  Don't use outside percpu-refcount proper.  The
132  * function doesn't return the pointer and let the caller test it for NULL
133  * because doing so forces the compiler to generate two conditional
134  * branches as it can't assume that @ref->percpu_count is not NULL.
135  */
__ref_is_percpu(struct percpu_ref * ref,unsigned long __percpu ** percpu_countp)136 static inline bool __ref_is_percpu(struct percpu_ref *ref,
137 					  unsigned long __percpu **percpu_countp)
138 {
139 	unsigned long percpu_ptr;
140 
141 	/*
142 	 * The value of @ref->percpu_count_ptr is tested for
143 	 * !__PERCPU_REF_ATOMIC, which may be set asynchronously, and then
144 	 * used as a pointer.  If the compiler generates a separate fetch
145 	 * when using it as a pointer, __PERCPU_REF_ATOMIC may be set in
146 	 * between contaminating the pointer value, meaning that
147 	 * READ_ONCE() is required when fetching it.
148 	 *
149 	 * The smp_read_barrier_depends() implied by READ_ONCE() pairs
150 	 * with smp_store_release() in __percpu_ref_switch_to_percpu().
151 	 */
152 	percpu_ptr = READ_ONCE(ref->percpu_count_ptr);
153 
154 	/*
155 	 * Theoretically, the following could test just ATOMIC; however,
156 	 * then we'd have to mask off DEAD separately as DEAD may be
157 	 * visible without ATOMIC if we race with percpu_ref_kill().  DEAD
158 	 * implies ATOMIC anyway.  Test them together.
159 	 */
160 	if (unlikely(percpu_ptr & __PERCPU_REF_ATOMIC_DEAD))
161 		return false;
162 
163 	*percpu_countp = (unsigned long __percpu *)percpu_ptr;
164 	return true;
165 }
166 
167 /**
168  * percpu_ref_get_many - increment a percpu refcount
169  * @ref: percpu_ref to get
170  * @nr: number of references to get
171  *
172  * Analogous to atomic_long_add().
173  *
174  * This function is safe to call as long as @ref is between init and exit.
175  */
percpu_ref_get_many(struct percpu_ref * ref,unsigned long nr)176 static inline void percpu_ref_get_many(struct percpu_ref *ref, unsigned long nr)
177 {
178 	unsigned long __percpu *percpu_count;
179 
180 	rcu_read_lock_sched();
181 
182 	if (__ref_is_percpu(ref, &percpu_count))
183 		this_cpu_add(*percpu_count, nr);
184 	else
185 		atomic_long_add(nr, &ref->count);
186 
187 	rcu_read_unlock_sched();
188 }
189 
190 /**
191  * percpu_ref_get - increment a percpu refcount
192  * @ref: percpu_ref to get
193  *
194  * Analagous to atomic_long_inc().
195  *
196  * This function is safe to call as long as @ref is between init and exit.
197  */
percpu_ref_get(struct percpu_ref * ref)198 static inline void percpu_ref_get(struct percpu_ref *ref)
199 {
200 	percpu_ref_get_many(ref, 1);
201 }
202 
203 /**
204  * percpu_ref_tryget - try to increment a percpu refcount
205  * @ref: percpu_ref to try-get
206  *
207  * Increment a percpu refcount unless its count already reached zero.
208  * Returns %true on success; %false on failure.
209  *
210  * This function is safe to call as long as @ref is between init and exit.
211  */
percpu_ref_tryget(struct percpu_ref * ref)212 static inline bool percpu_ref_tryget(struct percpu_ref *ref)
213 {
214 	unsigned long __percpu *percpu_count;
215 	bool ret;
216 
217 	rcu_read_lock_sched();
218 
219 	if (__ref_is_percpu(ref, &percpu_count)) {
220 		this_cpu_inc(*percpu_count);
221 		ret = true;
222 	} else {
223 		ret = atomic_long_inc_not_zero(&ref->count);
224 	}
225 
226 	rcu_read_unlock_sched();
227 
228 	return ret;
229 }
230 
231 /**
232  * percpu_ref_tryget_live - try to increment a live percpu refcount
233  * @ref: percpu_ref to try-get
234  *
235  * Increment a percpu refcount unless it has already been killed.  Returns
236  * %true on success; %false on failure.
237  *
238  * Completion of percpu_ref_kill() in itself doesn't guarantee that this
239  * function will fail.  For such guarantee, percpu_ref_kill_and_confirm()
240  * should be used.  After the confirm_kill callback is invoked, it's
241  * guaranteed that no new reference will be given out by
242  * percpu_ref_tryget_live().
243  *
244  * This function is safe to call as long as @ref is between init and exit.
245  */
percpu_ref_tryget_live(struct percpu_ref * ref)246 static inline bool percpu_ref_tryget_live(struct percpu_ref *ref)
247 {
248 	unsigned long __percpu *percpu_count;
249 	bool ret = false;
250 
251 	rcu_read_lock_sched();
252 
253 	if (__ref_is_percpu(ref, &percpu_count)) {
254 		this_cpu_inc(*percpu_count);
255 		ret = true;
256 	} else if (!(ref->percpu_count_ptr & __PERCPU_REF_DEAD)) {
257 		ret = atomic_long_inc_not_zero(&ref->count);
258 	}
259 
260 	rcu_read_unlock_sched();
261 
262 	return ret;
263 }
264 
265 /**
266  * percpu_ref_put_many - decrement a percpu refcount
267  * @ref: percpu_ref to put
268  * @nr: number of references to put
269  *
270  * Decrement the refcount, and if 0, call the release function (which was passed
271  * to percpu_ref_init())
272  *
273  * This function is safe to call as long as @ref is between init and exit.
274  */
percpu_ref_put_many(struct percpu_ref * ref,unsigned long nr)275 static inline void percpu_ref_put_many(struct percpu_ref *ref, unsigned long nr)
276 {
277 	unsigned long __percpu *percpu_count;
278 
279 	rcu_read_lock_sched();
280 
281 	if (__ref_is_percpu(ref, &percpu_count))
282 		this_cpu_sub(*percpu_count, nr);
283 	else if (unlikely(atomic_long_sub_and_test(nr, &ref->count)))
284 		ref->release(ref);
285 
286 	rcu_read_unlock_sched();
287 }
288 
289 /**
290  * percpu_ref_put - decrement a percpu refcount
291  * @ref: percpu_ref to put
292  *
293  * Decrement the refcount, and if 0, call the release function (which was passed
294  * to percpu_ref_init())
295  *
296  * This function is safe to call as long as @ref is between init and exit.
297  */
percpu_ref_put(struct percpu_ref * ref)298 static inline void percpu_ref_put(struct percpu_ref *ref)
299 {
300 	percpu_ref_put_many(ref, 1);
301 }
302 
303 /**
304  * percpu_ref_is_dying - test whether a percpu refcount is dying or dead
305  * @ref: percpu_ref to test
306  *
307  * Returns %true if @ref is dying or dead.
308  *
309  * This function is safe to call as long as @ref is between init and exit
310  * and the caller is responsible for synchronizing against state changes.
311  */
percpu_ref_is_dying(struct percpu_ref * ref)312 static inline bool percpu_ref_is_dying(struct percpu_ref *ref)
313 {
314 	return ref->percpu_count_ptr & __PERCPU_REF_DEAD;
315 }
316 
317 /**
318  * percpu_ref_is_zero - test whether a percpu refcount reached zero
319  * @ref: percpu_ref to test
320  *
321  * Returns %true if @ref reached zero.
322  *
323  * This function is safe to call as long as @ref is between init and exit.
324  */
percpu_ref_is_zero(struct percpu_ref * ref)325 static inline bool percpu_ref_is_zero(struct percpu_ref *ref)
326 {
327 	unsigned long __percpu *percpu_count;
328 
329 	if (__ref_is_percpu(ref, &percpu_count))
330 		return false;
331 	return !atomic_long_read(&ref->count);
332 }
333 
334 #endif
335