1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3  * workqueue.h --- work queue handling for Linux.
4  */
5 
6 #ifndef _LINUX_WORKQUEUE_H
7 #define _LINUX_WORKQUEUE_H
8 
9 #include <linux/timer.h>
10 #include <linux/linkage.h>
11 #include <linux/bitops.h>
12 #include <linux/lockdep.h>
13 #include <linux/threads.h>
14 #include <linux/atomic.h>
15 #include <linux/cpumask.h>
16 #include <linux/rcupdate.h>
17 
18 struct workqueue_struct;
19 
20 struct work_struct;
21 typedef void (*work_func_t)(struct work_struct *work);
22 void delayed_work_timer_fn(struct timer_list *t);
23 
24 /*
25  * The first word is the work queue pointer and the flags rolled into
26  * one
27  */
28 #define work_data_bits(work) ((unsigned long *)(&(work)->data))
29 
30 enum {
31 	WORK_STRUCT_PENDING_BIT	= 0,	/* work item is pending execution */
32 	WORK_STRUCT_DELAYED_BIT	= 1,	/* work item is delayed */
33 	WORK_STRUCT_PWQ_BIT	= 2,	/* data points to pwq */
34 	WORK_STRUCT_LINKED_BIT	= 3,	/* next work is linked to this one */
35 #ifdef CONFIG_DEBUG_OBJECTS_WORK
36 	WORK_STRUCT_STATIC_BIT	= 4,	/* static initializer (debugobjects) */
37 	WORK_STRUCT_COLOR_SHIFT	= 5,	/* color for workqueue flushing */
38 #else
39 	WORK_STRUCT_COLOR_SHIFT	= 4,	/* color for workqueue flushing */
40 #endif
41 
42 	WORK_STRUCT_COLOR_BITS	= 4,
43 
44 	WORK_STRUCT_PENDING	= 1 << WORK_STRUCT_PENDING_BIT,
45 	WORK_STRUCT_DELAYED	= 1 << WORK_STRUCT_DELAYED_BIT,
46 	WORK_STRUCT_PWQ		= 1 << WORK_STRUCT_PWQ_BIT,
47 	WORK_STRUCT_LINKED	= 1 << WORK_STRUCT_LINKED_BIT,
48 #ifdef CONFIG_DEBUG_OBJECTS_WORK
49 	WORK_STRUCT_STATIC	= 1 << WORK_STRUCT_STATIC_BIT,
50 #else
51 	WORK_STRUCT_STATIC	= 0,
52 #endif
53 
54 	/*
55 	 * The last color is no color used for works which don't
56 	 * participate in workqueue flushing.
57 	 */
58 	WORK_NR_COLORS		= (1 << WORK_STRUCT_COLOR_BITS) - 1,
59 	WORK_NO_COLOR		= WORK_NR_COLORS,
60 
61 	/* not bound to any CPU, prefer the local CPU */
62 	WORK_CPU_UNBOUND	= NR_CPUS,
63 
64 	/*
65 	 * Reserve 7 bits off of pwq pointer w/ debugobjects turned off.
66 	 * This makes pwqs aligned to 256 bytes and allows 15 workqueue
67 	 * flush colors.
68 	 */
69 	WORK_STRUCT_FLAG_BITS	= WORK_STRUCT_COLOR_SHIFT +
70 				  WORK_STRUCT_COLOR_BITS,
71 
72 	/* data contains off-queue information when !WORK_STRUCT_PWQ */
73 	WORK_OFFQ_FLAG_BASE	= WORK_STRUCT_COLOR_SHIFT,
74 
75 	__WORK_OFFQ_CANCELING	= WORK_OFFQ_FLAG_BASE,
76 
77 	/*
78 	 * When a work item is off queue, its high bits point to the last
79 	 * pool it was on.  Cap at 31 bits and use the highest number to
80 	 * indicate that no pool is associated.
81 	 */
82 	WORK_OFFQ_FLAG_BITS	= 1,
83 	WORK_OFFQ_POOL_SHIFT	= WORK_OFFQ_FLAG_BASE + WORK_OFFQ_FLAG_BITS,
84 	WORK_OFFQ_LEFT		= BITS_PER_LONG - WORK_OFFQ_POOL_SHIFT,
85 	WORK_OFFQ_POOL_BITS	= WORK_OFFQ_LEFT <= 31 ? WORK_OFFQ_LEFT : 31,
86 
87 	/* bit mask for work_busy() return values */
88 	WORK_BUSY_PENDING	= 1 << 0,
89 	WORK_BUSY_RUNNING	= 1 << 1,
90 
91 	/* maximum string length for set_worker_desc() */
92 	WORKER_DESC_LEN		= 24,
93 };
94 
95 /* Convenience constants - of type 'unsigned long', not 'enum'! */
96 #define WORK_OFFQ_CANCELING	(1ul << __WORK_OFFQ_CANCELING)
97 #define WORK_OFFQ_POOL_NONE	((1ul << WORK_OFFQ_POOL_BITS) - 1)
98 #define WORK_STRUCT_NO_POOL	(WORK_OFFQ_POOL_NONE << WORK_OFFQ_POOL_SHIFT)
99 
100 #define WORK_STRUCT_FLAG_MASK    ((1ul << WORK_STRUCT_FLAG_BITS) - 1)
101 #define WORK_STRUCT_WQ_DATA_MASK (~WORK_STRUCT_FLAG_MASK)
102 
103 struct work_struct {
104 	atomic_long_t data;
105 	struct list_head entry;
106 	work_func_t func;
107 #ifdef CONFIG_LOCKDEP
108 	struct lockdep_map lockdep_map;
109 #endif
110 };
111 
112 #define WORK_DATA_INIT()	ATOMIC_LONG_INIT((unsigned long)WORK_STRUCT_NO_POOL)
113 #define WORK_DATA_STATIC_INIT()	\
114 	ATOMIC_LONG_INIT((unsigned long)(WORK_STRUCT_NO_POOL | WORK_STRUCT_STATIC))
115 
116 struct delayed_work {
117 	struct work_struct work;
118 	struct timer_list timer;
119 
120 	/* target workqueue and CPU ->timer uses to queue ->work */
121 	struct workqueue_struct *wq;
122 	int cpu;
123 };
124 
125 struct rcu_work {
126 	struct work_struct work;
127 	struct rcu_head rcu;
128 
129 	/* target workqueue ->rcu uses to queue ->work */
130 	struct workqueue_struct *wq;
131 };
132 
133 /**
134  * struct workqueue_attrs - A struct for workqueue attributes.
135  *
136  * This can be used to change attributes of an unbound workqueue.
137  */
138 struct workqueue_attrs {
139 	/**
140 	 * @nice: nice level
141 	 */
142 	int nice;
143 
144 	/**
145 	 * @cpumask: allowed CPUs
146 	 */
147 	cpumask_var_t cpumask;
148 
149 	/**
150 	 * @no_numa: disable NUMA affinity
151 	 *
152 	 * Unlike other fields, ``no_numa`` isn't a property of a worker_pool. It
153 	 * only modifies how :c:func:`apply_workqueue_attrs` select pools and thus
154 	 * doesn't participate in pool hash calculations or equality comparisons.
155 	 */
156 	bool no_numa;
157 };
158 
to_delayed_work(struct work_struct * work)159 static inline struct delayed_work *to_delayed_work(struct work_struct *work)
160 {
161 	return container_of(work, struct delayed_work, work);
162 }
163 
to_rcu_work(struct work_struct * work)164 static inline struct rcu_work *to_rcu_work(struct work_struct *work)
165 {
166 	return container_of(work, struct rcu_work, work);
167 }
168 
169 struct execute_work {
170 	struct work_struct work;
171 };
172 
173 #ifdef CONFIG_LOCKDEP
174 /*
175  * NB: because we have to copy the lockdep_map, setting _key
176  * here is required, otherwise it could get initialised to the
177  * copy of the lockdep_map!
178  */
179 #define __WORK_INIT_LOCKDEP_MAP(n, k) \
180 	.lockdep_map = STATIC_LOCKDEP_MAP_INIT(n, k),
181 #else
182 #define __WORK_INIT_LOCKDEP_MAP(n, k)
183 #endif
184 
185 #define __WORK_INITIALIZER(n, f) {					\
186 	.data = WORK_DATA_STATIC_INIT(),				\
187 	.entry	= { &(n).entry, &(n).entry },				\
188 	.func = (f),							\
189 	__WORK_INIT_LOCKDEP_MAP(#n, &(n))				\
190 	}
191 
192 #define __DELAYED_WORK_INITIALIZER(n, f, tflags) {			\
193 	.work = __WORK_INITIALIZER((n).work, (f)),			\
194 	.timer = __TIMER_INITIALIZER(delayed_work_timer_fn,\
195 				     (tflags) | TIMER_IRQSAFE),		\
196 	}
197 
198 #define DECLARE_WORK(n, f)						\
199 	struct work_struct n = __WORK_INITIALIZER(n, f)
200 
201 #define DECLARE_DELAYED_WORK(n, f)					\
202 	struct delayed_work n = __DELAYED_WORK_INITIALIZER(n, f, 0)
203 
204 #define DECLARE_DEFERRABLE_WORK(n, f)					\
205 	struct delayed_work n = __DELAYED_WORK_INITIALIZER(n, f, TIMER_DEFERRABLE)
206 
207 #ifdef CONFIG_DEBUG_OBJECTS_WORK
208 extern void __init_work(struct work_struct *work, int onstack);
209 extern void destroy_work_on_stack(struct work_struct *work);
210 extern void destroy_delayed_work_on_stack(struct delayed_work *work);
work_static(struct work_struct * work)211 static inline unsigned int work_static(struct work_struct *work)
212 {
213 	return *work_data_bits(work) & WORK_STRUCT_STATIC;
214 }
215 #else
__init_work(struct work_struct * work,int onstack)216 static inline void __init_work(struct work_struct *work, int onstack) { }
destroy_work_on_stack(struct work_struct * work)217 static inline void destroy_work_on_stack(struct work_struct *work) { }
destroy_delayed_work_on_stack(struct delayed_work * work)218 static inline void destroy_delayed_work_on_stack(struct delayed_work *work) { }
work_static(struct work_struct * work)219 static inline unsigned int work_static(struct work_struct *work) { return 0; }
220 #endif
221 
222 /*
223  * initialize all of a work item in one go
224  *
225  * NOTE! No point in using "atomic_long_set()": using a direct
226  * assignment of the work data initializer allows the compiler
227  * to generate better code.
228  */
229 #ifdef CONFIG_LOCKDEP
230 #define __INIT_WORK(_work, _func, _onstack)				\
231 	do {								\
232 		static struct lock_class_key __key;			\
233 									\
234 		__init_work((_work), _onstack);				\
235 		(_work)->data = (atomic_long_t) WORK_DATA_INIT();	\
236 		lockdep_init_map(&(_work)->lockdep_map, "(work_completion)"#_work, &__key, 0); \
237 		INIT_LIST_HEAD(&(_work)->entry);			\
238 		(_work)->func = (_func);				\
239 	} while (0)
240 #else
241 #define __INIT_WORK(_work, _func, _onstack)				\
242 	do {								\
243 		__init_work((_work), _onstack);				\
244 		(_work)->data = (atomic_long_t) WORK_DATA_INIT();	\
245 		INIT_LIST_HEAD(&(_work)->entry);			\
246 		(_work)->func = (_func);				\
247 	} while (0)
248 #endif
249 
250 #define INIT_WORK(_work, _func)						\
251 	__INIT_WORK((_work), (_func), 0)
252 
253 #define INIT_WORK_ONSTACK(_work, _func)					\
254 	__INIT_WORK((_work), (_func), 1)
255 
256 #define __INIT_DELAYED_WORK(_work, _func, _tflags)			\
257 	do {								\
258 		INIT_WORK(&(_work)->work, (_func));			\
259 		__init_timer(&(_work)->timer,				\
260 			     delayed_work_timer_fn,			\
261 			     (_tflags) | TIMER_IRQSAFE);		\
262 	} while (0)
263 
264 #define __INIT_DELAYED_WORK_ONSTACK(_work, _func, _tflags)		\
265 	do {								\
266 		INIT_WORK_ONSTACK(&(_work)->work, (_func));		\
267 		__init_timer_on_stack(&(_work)->timer,			\
268 				      delayed_work_timer_fn,		\
269 				      (_tflags) | TIMER_IRQSAFE);	\
270 	} while (0)
271 
272 #define INIT_DELAYED_WORK(_work, _func)					\
273 	__INIT_DELAYED_WORK(_work, _func, 0)
274 
275 #define INIT_DELAYED_WORK_ONSTACK(_work, _func)				\
276 	__INIT_DELAYED_WORK_ONSTACK(_work, _func, 0)
277 
278 #define INIT_DEFERRABLE_WORK(_work, _func)				\
279 	__INIT_DELAYED_WORK(_work, _func, TIMER_DEFERRABLE)
280 
281 #define INIT_DEFERRABLE_WORK_ONSTACK(_work, _func)			\
282 	__INIT_DELAYED_WORK_ONSTACK(_work, _func, TIMER_DEFERRABLE)
283 
284 #define INIT_RCU_WORK(_work, _func)					\
285 	INIT_WORK(&(_work)->work, (_func))
286 
287 #define INIT_RCU_WORK_ONSTACK(_work, _func)				\
288 	INIT_WORK_ONSTACK(&(_work)->work, (_func))
289 
290 /**
291  * work_pending - Find out whether a work item is currently pending
292  * @work: The work item in question
293  */
294 #define work_pending(work) \
295 	test_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))
296 
297 /**
298  * delayed_work_pending - Find out whether a delayable work item is currently
299  * pending
300  * @w: The work item in question
301  */
302 #define delayed_work_pending(w) \
303 	work_pending(&(w)->work)
304 
305 /*
306  * Workqueue flags and constants.  For details, please refer to
307  * Documentation/core-api/workqueue.rst.
308  */
309 enum {
310 	WQ_UNBOUND		= 1 << 1, /* not bound to any cpu */
311 	WQ_FREEZABLE		= 1 << 2, /* freeze during suspend */
312 	WQ_MEM_RECLAIM		= 1 << 3, /* may be used for memory reclaim */
313 	WQ_HIGHPRI		= 1 << 4, /* high priority */
314 	WQ_CPU_INTENSIVE	= 1 << 5, /* cpu intensive workqueue */
315 	WQ_SYSFS		= 1 << 6, /* visible in sysfs, see wq_sysfs_register() */
316 
317 	/*
318 	 * Per-cpu workqueues are generally preferred because they tend to
319 	 * show better performance thanks to cache locality.  Per-cpu
320 	 * workqueues exclude the scheduler from choosing the CPU to
321 	 * execute the worker threads, which has an unfortunate side effect
322 	 * of increasing power consumption.
323 	 *
324 	 * The scheduler considers a CPU idle if it doesn't have any task
325 	 * to execute and tries to keep idle cores idle to conserve power;
326 	 * however, for example, a per-cpu work item scheduled from an
327 	 * interrupt handler on an idle CPU will force the scheduler to
328 	 * excute the work item on that CPU breaking the idleness, which in
329 	 * turn may lead to more scheduling choices which are sub-optimal
330 	 * in terms of power consumption.
331 	 *
332 	 * Workqueues marked with WQ_POWER_EFFICIENT are per-cpu by default
333 	 * but become unbound if workqueue.power_efficient kernel param is
334 	 * specified.  Per-cpu workqueues which are identified to
335 	 * contribute significantly to power-consumption are identified and
336 	 * marked with this flag and enabling the power_efficient mode
337 	 * leads to noticeable power saving at the cost of small
338 	 * performance disadvantage.
339 	 *
340 	 * http://thread.gmane.org/gmane.linux.kernel/1480396
341 	 */
342 	WQ_POWER_EFFICIENT	= 1 << 7,
343 
344 	__WQ_DRAINING		= 1 << 16, /* internal: workqueue is draining */
345 	__WQ_ORDERED		= 1 << 17, /* internal: workqueue is ordered */
346 	__WQ_LEGACY		= 1 << 18, /* internal: create*_workqueue() */
347 	__WQ_ORDERED_EXPLICIT	= 1 << 19, /* internal: alloc_ordered_workqueue() */
348 
349 	WQ_MAX_ACTIVE		= 512,	  /* I like 512, better ideas? */
350 	WQ_MAX_UNBOUND_PER_CPU	= 4,	  /* 4 * #cpus for unbound wq */
351 	WQ_DFL_ACTIVE		= WQ_MAX_ACTIVE / 2,
352 };
353 
354 /* unbound wq's aren't per-cpu, scale max_active according to #cpus */
355 #define WQ_UNBOUND_MAX_ACTIVE	\
356 	max_t(int, WQ_MAX_ACTIVE, num_possible_cpus() * WQ_MAX_UNBOUND_PER_CPU)
357 
358 /*
359  * System-wide workqueues which are always present.
360  *
361  * system_wq is the one used by schedule[_delayed]_work[_on]().
362  * Multi-CPU multi-threaded.  There are users which expect relatively
363  * short queue flush time.  Don't queue works which can run for too
364  * long.
365  *
366  * system_highpri_wq is similar to system_wq but for work items which
367  * require WQ_HIGHPRI.
368  *
369  * system_long_wq is similar to system_wq but may host long running
370  * works.  Queue flushing might take relatively long.
371  *
372  * system_unbound_wq is unbound workqueue.  Workers are not bound to
373  * any specific CPU, not concurrency managed, and all queued works are
374  * executed immediately as long as max_active limit is not reached and
375  * resources are available.
376  *
377  * system_freezable_wq is equivalent to system_wq except that it's
378  * freezable.
379  *
380  * *_power_efficient_wq are inclined towards saving power and converted
381  * into WQ_UNBOUND variants if 'wq_power_efficient' is enabled; otherwise,
382  * they are same as their non-power-efficient counterparts - e.g.
383  * system_power_efficient_wq is identical to system_wq if
384  * 'wq_power_efficient' is disabled.  See WQ_POWER_EFFICIENT for more info.
385  */
386 extern struct workqueue_struct *system_wq;
387 extern struct workqueue_struct *system_highpri_wq;
388 extern struct workqueue_struct *system_long_wq;
389 extern struct workqueue_struct *system_unbound_wq;
390 extern struct workqueue_struct *system_freezable_wq;
391 extern struct workqueue_struct *system_power_efficient_wq;
392 extern struct workqueue_struct *system_freezable_power_efficient_wq;
393 
394 extern struct workqueue_struct *
395 __alloc_workqueue_key(const char *fmt, unsigned int flags, int max_active,
396 	struct lock_class_key *key, const char *lock_name, ...) __printf(1, 6);
397 
398 /**
399  * alloc_workqueue - allocate a workqueue
400  * @fmt: printf format for the name of the workqueue
401  * @flags: WQ_* flags
402  * @max_active: max in-flight work items, 0 for default
403  * @args...: args for @fmt
404  *
405  * Allocate a workqueue with the specified parameters.  For detailed
406  * information on WQ_* flags, please refer to
407  * Documentation/core-api/workqueue.rst.
408  *
409  * The __lock_name macro dance is to guarantee that single lock_class_key
410  * doesn't end up with different namesm, which isn't allowed by lockdep.
411  *
412  * RETURNS:
413  * Pointer to the allocated workqueue on success, %NULL on failure.
414  */
415 #ifdef CONFIG_LOCKDEP
416 #define alloc_workqueue(fmt, flags, max_active, args...)		\
417 ({									\
418 	static struct lock_class_key __key;				\
419 	const char *__lock_name;					\
420 									\
421 	__lock_name = "(wq_completion)"#fmt#args;			\
422 									\
423 	__alloc_workqueue_key((fmt), (flags), (max_active),		\
424 			      &__key, __lock_name, ##args);		\
425 })
426 #else
427 #define alloc_workqueue(fmt, flags, max_active, args...)		\
428 	__alloc_workqueue_key((fmt), (flags), (max_active),		\
429 			      NULL, NULL, ##args)
430 #endif
431 
432 /**
433  * alloc_ordered_workqueue - allocate an ordered workqueue
434  * @fmt: printf format for the name of the workqueue
435  * @flags: WQ_* flags (only WQ_FREEZABLE and WQ_MEM_RECLAIM are meaningful)
436  * @args...: args for @fmt
437  *
438  * Allocate an ordered workqueue.  An ordered workqueue executes at
439  * most one work item at any given time in the queued order.  They are
440  * implemented as unbound workqueues with @max_active of one.
441  *
442  * RETURNS:
443  * Pointer to the allocated workqueue on success, %NULL on failure.
444  */
445 #define alloc_ordered_workqueue(fmt, flags, args...)			\
446 	alloc_workqueue(fmt, WQ_UNBOUND | __WQ_ORDERED |		\
447 			__WQ_ORDERED_EXPLICIT | (flags), 1, ##args)
448 
449 #define create_workqueue(name)						\
450 	alloc_workqueue("%s", __WQ_LEGACY | WQ_MEM_RECLAIM, 1, (name))
451 #define create_freezable_workqueue(name)				\
452 	alloc_workqueue("%s", __WQ_LEGACY | WQ_FREEZABLE | WQ_UNBOUND |	\
453 			WQ_MEM_RECLAIM, 1, (name))
454 #define create_singlethread_workqueue(name)				\
455 	alloc_ordered_workqueue("%s", __WQ_LEGACY | WQ_MEM_RECLAIM, name)
456 
457 extern void destroy_workqueue(struct workqueue_struct *wq);
458 
459 struct workqueue_attrs *alloc_workqueue_attrs(gfp_t gfp_mask);
460 void free_workqueue_attrs(struct workqueue_attrs *attrs);
461 int apply_workqueue_attrs(struct workqueue_struct *wq,
462 			  const struct workqueue_attrs *attrs);
463 int workqueue_set_unbound_cpumask(cpumask_var_t cpumask);
464 
465 extern bool queue_work_on(int cpu, struct workqueue_struct *wq,
466 			struct work_struct *work);
467 extern bool queue_delayed_work_on(int cpu, struct workqueue_struct *wq,
468 			struct delayed_work *work, unsigned long delay);
469 extern bool mod_delayed_work_on(int cpu, struct workqueue_struct *wq,
470 			struct delayed_work *dwork, unsigned long delay);
471 extern bool queue_rcu_work(struct workqueue_struct *wq, struct rcu_work *rwork);
472 
473 extern void flush_workqueue(struct workqueue_struct *wq);
474 extern void drain_workqueue(struct workqueue_struct *wq);
475 
476 extern int schedule_on_each_cpu(work_func_t func);
477 
478 int execute_in_process_context(work_func_t fn, struct execute_work *);
479 
480 extern bool flush_work(struct work_struct *work);
481 extern bool cancel_work_sync(struct work_struct *work);
482 
483 extern bool flush_delayed_work(struct delayed_work *dwork);
484 extern bool cancel_delayed_work(struct delayed_work *dwork);
485 extern bool cancel_delayed_work_sync(struct delayed_work *dwork);
486 
487 extern bool flush_rcu_work(struct rcu_work *rwork);
488 
489 extern void workqueue_set_max_active(struct workqueue_struct *wq,
490 				     int max_active);
491 extern struct work_struct *current_work(void);
492 extern bool current_is_workqueue_rescuer(void);
493 extern bool workqueue_congested(int cpu, struct workqueue_struct *wq);
494 extern unsigned int work_busy(struct work_struct *work);
495 extern __printf(1, 2) void set_worker_desc(const char *fmt, ...);
496 extern void print_worker_info(const char *log_lvl, struct task_struct *task);
497 extern void show_workqueue_state(void);
498 extern void wq_worker_comm(char *buf, size_t size, struct task_struct *task);
499 
500 /**
501  * queue_work - queue work on a workqueue
502  * @wq: workqueue to use
503  * @work: work to queue
504  *
505  * Returns %false if @work was already on a queue, %true otherwise.
506  *
507  * We queue the work to the CPU on which it was submitted, but if the CPU dies
508  * it can be processed by another CPU.
509  */
queue_work(struct workqueue_struct * wq,struct work_struct * work)510 static inline bool queue_work(struct workqueue_struct *wq,
511 			      struct work_struct *work)
512 {
513 	return queue_work_on(WORK_CPU_UNBOUND, wq, work);
514 }
515 
516 /**
517  * queue_delayed_work - queue work on a workqueue after delay
518  * @wq: workqueue to use
519  * @dwork: delayable work to queue
520  * @delay: number of jiffies to wait before queueing
521  *
522  * Equivalent to queue_delayed_work_on() but tries to use the local CPU.
523  */
queue_delayed_work(struct workqueue_struct * wq,struct delayed_work * dwork,unsigned long delay)524 static inline bool queue_delayed_work(struct workqueue_struct *wq,
525 				      struct delayed_work *dwork,
526 				      unsigned long delay)
527 {
528 	return queue_delayed_work_on(WORK_CPU_UNBOUND, wq, dwork, delay);
529 }
530 
531 /**
532  * mod_delayed_work - modify delay of or queue a delayed work
533  * @wq: workqueue to use
534  * @dwork: work to queue
535  * @delay: number of jiffies to wait before queueing
536  *
537  * mod_delayed_work_on() on local CPU.
538  */
mod_delayed_work(struct workqueue_struct * wq,struct delayed_work * dwork,unsigned long delay)539 static inline bool mod_delayed_work(struct workqueue_struct *wq,
540 				    struct delayed_work *dwork,
541 				    unsigned long delay)
542 {
543 	return mod_delayed_work_on(WORK_CPU_UNBOUND, wq, dwork, delay);
544 }
545 
546 /**
547  * schedule_work_on - put work task on a specific cpu
548  * @cpu: cpu to put the work task on
549  * @work: job to be done
550  *
551  * This puts a job on a specific cpu
552  */
schedule_work_on(int cpu,struct work_struct * work)553 static inline bool schedule_work_on(int cpu, struct work_struct *work)
554 {
555 	return queue_work_on(cpu, system_wq, work);
556 }
557 
558 /**
559  * schedule_work - put work task in global workqueue
560  * @work: job to be done
561  *
562  * Returns %false if @work was already on the kernel-global workqueue and
563  * %true otherwise.
564  *
565  * This puts a job in the kernel-global workqueue if it was not already
566  * queued and leaves it in the same position on the kernel-global
567  * workqueue otherwise.
568  */
schedule_work(struct work_struct * work)569 static inline bool schedule_work(struct work_struct *work)
570 {
571 	return queue_work(system_wq, work);
572 }
573 
574 /**
575  * flush_scheduled_work - ensure that any scheduled work has run to completion.
576  *
577  * Forces execution of the kernel-global workqueue and blocks until its
578  * completion.
579  *
580  * Think twice before calling this function!  It's very easy to get into
581  * trouble if you don't take great care.  Either of the following situations
582  * will lead to deadlock:
583  *
584  *	One of the work items currently on the workqueue needs to acquire
585  *	a lock held by your code or its caller.
586  *
587  *	Your code is running in the context of a work routine.
588  *
589  * They will be detected by lockdep when they occur, but the first might not
590  * occur very often.  It depends on what work items are on the workqueue and
591  * what locks they need, which you have no control over.
592  *
593  * In most situations flushing the entire workqueue is overkill; you merely
594  * need to know that a particular work item isn't queued and isn't running.
595  * In such cases you should use cancel_delayed_work_sync() or
596  * cancel_work_sync() instead.
597  */
flush_scheduled_work(void)598 static inline void flush_scheduled_work(void)
599 {
600 	flush_workqueue(system_wq);
601 }
602 
603 /**
604  * schedule_delayed_work_on - queue work in global workqueue on CPU after delay
605  * @cpu: cpu to use
606  * @dwork: job to be done
607  * @delay: number of jiffies to wait
608  *
609  * After waiting for a given time this puts a job in the kernel-global
610  * workqueue on the specified CPU.
611  */
schedule_delayed_work_on(int cpu,struct delayed_work * dwork,unsigned long delay)612 static inline bool schedule_delayed_work_on(int cpu, struct delayed_work *dwork,
613 					    unsigned long delay)
614 {
615 	return queue_delayed_work_on(cpu, system_wq, dwork, delay);
616 }
617 
618 /**
619  * schedule_delayed_work - put work task in global workqueue after delay
620  * @dwork: job to be done
621  * @delay: number of jiffies to wait or 0 for immediate execution
622  *
623  * After waiting for a given time this puts a job in the kernel-global
624  * workqueue.
625  */
schedule_delayed_work(struct delayed_work * dwork,unsigned long delay)626 static inline bool schedule_delayed_work(struct delayed_work *dwork,
627 					 unsigned long delay)
628 {
629 	return queue_delayed_work(system_wq, dwork, delay);
630 }
631 
632 #ifndef CONFIG_SMP
work_on_cpu(int cpu,long (* fn)(void *),void * arg)633 static inline long work_on_cpu(int cpu, long (*fn)(void *), void *arg)
634 {
635 	return fn(arg);
636 }
work_on_cpu_safe(int cpu,long (* fn)(void *),void * arg)637 static inline long work_on_cpu_safe(int cpu, long (*fn)(void *), void *arg)
638 {
639 	return fn(arg);
640 }
641 #else
642 long work_on_cpu(int cpu, long (*fn)(void *), void *arg);
643 long work_on_cpu_safe(int cpu, long (*fn)(void *), void *arg);
644 #endif /* CONFIG_SMP */
645 
646 #ifdef CONFIG_FREEZER
647 extern void freeze_workqueues_begin(void);
648 extern bool freeze_workqueues_busy(void);
649 extern void thaw_workqueues(void);
650 #endif /* CONFIG_FREEZER */
651 
652 #ifdef CONFIG_SYSFS
653 int workqueue_sysfs_register(struct workqueue_struct *wq);
654 #else	/* CONFIG_SYSFS */
workqueue_sysfs_register(struct workqueue_struct * wq)655 static inline int workqueue_sysfs_register(struct workqueue_struct *wq)
656 { return 0; }
657 #endif	/* CONFIG_SYSFS */
658 
659 #ifdef CONFIG_WQ_WATCHDOG
660 void wq_watchdog_touch(int cpu);
661 #else	/* CONFIG_WQ_WATCHDOG */
wq_watchdog_touch(int cpu)662 static inline void wq_watchdog_touch(int cpu) { }
663 #endif	/* CONFIG_WQ_WATCHDOG */
664 
665 #ifdef CONFIG_SMP
666 int workqueue_prepare_cpu(unsigned int cpu);
667 int workqueue_online_cpu(unsigned int cpu);
668 int workqueue_offline_cpu(unsigned int cpu);
669 #endif
670 
671 int __init workqueue_init_early(void);
672 int __init workqueue_init(void);
673 
674 #endif
675