1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _LINUX_WAIT_H
3 #define _LINUX_WAIT_H
4 /*
5  * Linux wait queue related types and methods
6  */
7 #include <linux/list.h>
8 #include <linux/stddef.h>
9 #include <linux/spinlock.h>
10 
11 #include <asm/current.h>
12 #include <uapi/linux/wait.h>
13 
14 typedef struct wait_queue_entry wait_queue_entry_t;
15 
16 typedef int (*wait_queue_func_t)(struct wait_queue_entry *wq_entry, unsigned mode, int flags, void *key);
17 int default_wake_function(struct wait_queue_entry *wq_entry, unsigned mode, int flags, void *key);
18 
19 /* wait_queue_entry::flags */
20 #define WQ_FLAG_EXCLUSIVE	0x01
21 #define WQ_FLAG_WOKEN		0x02
22 #define WQ_FLAG_BOOKMARK	0x04
23 
24 /*
25  * A single wait-queue entry structure:
26  */
27 struct wait_queue_entry {
28 	unsigned int		flags;
29 	void			*private;
30 	wait_queue_func_t	func;
31 	struct list_head	entry;
32 };
33 
34 struct wait_queue_head {
35 	spinlock_t		lock;
36 	struct list_head	head;
37 };
38 typedef struct wait_queue_head wait_queue_head_t;
39 
40 struct task_struct;
41 
42 /*
43  * Macros for declaration and initialisaton of the datatypes
44  */
45 
46 #define __WAITQUEUE_INITIALIZER(name, tsk) {					\
47 	.private	= tsk,							\
48 	.func		= default_wake_function,				\
49 	.entry		= { NULL, NULL } }
50 
51 #define DECLARE_WAITQUEUE(name, tsk)						\
52 	struct wait_queue_entry name = __WAITQUEUE_INITIALIZER(name, tsk)
53 
54 #define __WAIT_QUEUE_HEAD_INITIALIZER(name) {					\
55 	.lock		= __SPIN_LOCK_UNLOCKED(name.lock),			\
56 	.head		= { &(name).head, &(name).head } }
57 
58 #define DECLARE_WAIT_QUEUE_HEAD(name) \
59 	struct wait_queue_head name = __WAIT_QUEUE_HEAD_INITIALIZER(name)
60 
61 extern void __init_waitqueue_head(struct wait_queue_head *wq_head, const char *name, struct lock_class_key *);
62 
63 #define init_waitqueue_head(wq_head)						\
64 	do {									\
65 		static struct lock_class_key __key;				\
66 										\
67 		__init_waitqueue_head((wq_head), #wq_head, &__key);		\
68 	} while (0)
69 
70 #ifdef CONFIG_LOCKDEP
71 # define __WAIT_QUEUE_HEAD_INIT_ONSTACK(name) \
72 	({ init_waitqueue_head(&name); name; })
73 # define DECLARE_WAIT_QUEUE_HEAD_ONSTACK(name) \
74 	struct wait_queue_head name = __WAIT_QUEUE_HEAD_INIT_ONSTACK(name)
75 #else
76 # define DECLARE_WAIT_QUEUE_HEAD_ONSTACK(name) DECLARE_WAIT_QUEUE_HEAD(name)
77 #endif
78 
init_waitqueue_entry(struct wait_queue_entry * wq_entry,struct task_struct * p)79 static inline void init_waitqueue_entry(struct wait_queue_entry *wq_entry, struct task_struct *p)
80 {
81 	wq_entry->flags		= 0;
82 	wq_entry->private	= p;
83 	wq_entry->func		= default_wake_function;
84 }
85 
86 static inline void
init_waitqueue_func_entry(struct wait_queue_entry * wq_entry,wait_queue_func_t func)87 init_waitqueue_func_entry(struct wait_queue_entry *wq_entry, wait_queue_func_t func)
88 {
89 	wq_entry->flags		= 0;
90 	wq_entry->private	= NULL;
91 	wq_entry->func		= func;
92 }
93 
94 /**
95  * waitqueue_active -- locklessly test for waiters on the queue
96  * @wq_head: the waitqueue to test for waiters
97  *
98  * returns true if the wait list is not empty
99  *
100  * NOTE: this function is lockless and requires care, incorrect usage _will_
101  * lead to sporadic and non-obvious failure.
102  *
103  * Use either while holding wait_queue_head::lock or when used for wakeups
104  * with an extra smp_mb() like:
105  *
106  *      CPU0 - waker                    CPU1 - waiter
107  *
108  *                                      for (;;) {
109  *      @cond = true;                     prepare_to_wait(&wq_head, &wait, state);
110  *      smp_mb();                         // smp_mb() from set_current_state()
111  *      if (waitqueue_active(wq_head))         if (@cond)
112  *        wake_up(wq_head);                      break;
113  *                                        schedule();
114  *                                      }
115  *                                      finish_wait(&wq_head, &wait);
116  *
117  * Because without the explicit smp_mb() it's possible for the
118  * waitqueue_active() load to get hoisted over the @cond store such that we'll
119  * observe an empty wait list while the waiter might not observe @cond.
120  *
121  * Also note that this 'optimization' trades a spin_lock() for an smp_mb(),
122  * which (when the lock is uncontended) are of roughly equal cost.
123  */
waitqueue_active(struct wait_queue_head * wq_head)124 static inline int waitqueue_active(struct wait_queue_head *wq_head)
125 {
126 	return !list_empty(&wq_head->head);
127 }
128 
129 /**
130  * wq_has_sleeper - check if there are any waiting processes
131  * @wq_head: wait queue head
132  *
133  * Returns true if wq_head has waiting processes
134  *
135  * Please refer to the comment for waitqueue_active.
136  */
wq_has_sleeper(struct wait_queue_head * wq_head)137 static inline bool wq_has_sleeper(struct wait_queue_head *wq_head)
138 {
139 	/*
140 	 * We need to be sure we are in sync with the
141 	 * add_wait_queue modifications to the wait queue.
142 	 *
143 	 * This memory barrier should be paired with one on the
144 	 * waiting side.
145 	 */
146 	smp_mb();
147 	return waitqueue_active(wq_head);
148 }
149 
150 extern void add_wait_queue(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry);
151 extern void add_wait_queue_exclusive(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry);
152 extern void remove_wait_queue(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry);
153 
__add_wait_queue(struct wait_queue_head * wq_head,struct wait_queue_entry * wq_entry)154 static inline void __add_wait_queue(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry)
155 {
156 	list_add(&wq_entry->entry, &wq_head->head);
157 }
158 
159 /*
160  * Used for wake-one threads:
161  */
162 static inline void
__add_wait_queue_exclusive(struct wait_queue_head * wq_head,struct wait_queue_entry * wq_entry)163 __add_wait_queue_exclusive(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry)
164 {
165 	wq_entry->flags |= WQ_FLAG_EXCLUSIVE;
166 	__add_wait_queue(wq_head, wq_entry);
167 }
168 
__add_wait_queue_entry_tail(struct wait_queue_head * wq_head,struct wait_queue_entry * wq_entry)169 static inline void __add_wait_queue_entry_tail(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry)
170 {
171 	list_add_tail(&wq_entry->entry, &wq_head->head);
172 }
173 
174 static inline void
__add_wait_queue_entry_tail_exclusive(struct wait_queue_head * wq_head,struct wait_queue_entry * wq_entry)175 __add_wait_queue_entry_tail_exclusive(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry)
176 {
177 	wq_entry->flags |= WQ_FLAG_EXCLUSIVE;
178 	__add_wait_queue_entry_tail(wq_head, wq_entry);
179 }
180 
181 static inline void
__remove_wait_queue(struct wait_queue_head * wq_head,struct wait_queue_entry * wq_entry)182 __remove_wait_queue(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry)
183 {
184 	list_del(&wq_entry->entry);
185 }
186 
187 void __wake_up(struct wait_queue_head *wq_head, unsigned int mode, int nr, void *key);
188 void __wake_up_locked_key(struct wait_queue_head *wq_head, unsigned int mode, void *key);
189 void __wake_up_locked_key_bookmark(struct wait_queue_head *wq_head,
190 		unsigned int mode, void *key, wait_queue_entry_t *bookmark);
191 void __wake_up_sync_key(struct wait_queue_head *wq_head, unsigned int mode, int nr, void *key);
192 void __wake_up_locked(struct wait_queue_head *wq_head, unsigned int mode, int nr);
193 void __wake_up_sync(struct wait_queue_head *wq_head, unsigned int mode, int nr);
194 void __wake_up_pollfree(struct wait_queue_head *wq_head);
195 
196 #define wake_up(x)			__wake_up(x, TASK_NORMAL, 1, NULL)
197 #define wake_up_nr(x, nr)		__wake_up(x, TASK_NORMAL, nr, NULL)
198 #define wake_up_all(x)			__wake_up(x, TASK_NORMAL, 0, NULL)
199 #define wake_up_locked(x)		__wake_up_locked((x), TASK_NORMAL, 1)
200 #define wake_up_all_locked(x)		__wake_up_locked((x), TASK_NORMAL, 0)
201 
202 #define wake_up_interruptible(x)	__wake_up(x, TASK_INTERRUPTIBLE, 1, NULL)
203 #define wake_up_interruptible_nr(x, nr)	__wake_up(x, TASK_INTERRUPTIBLE, nr, NULL)
204 #define wake_up_interruptible_all(x)	__wake_up(x, TASK_INTERRUPTIBLE, 0, NULL)
205 #define wake_up_interruptible_sync(x)	__wake_up_sync((x), TASK_INTERRUPTIBLE, 1)
206 
207 /*
208  * Wakeup macros to be used to report events to the targets.
209  */
210 #define poll_to_key(m) ((void *)(__force uintptr_t)(__poll_t)(m))
211 #define key_to_poll(m) ((__force __poll_t)(uintptr_t)(void *)(m))
212 #define wake_up_poll(x, m)							\
213 	__wake_up(x, TASK_NORMAL, 1, poll_to_key(m))
214 #define wake_up_locked_poll(x, m)						\
215 	__wake_up_locked_key((x), TASK_NORMAL, poll_to_key(m))
216 #define wake_up_interruptible_poll(x, m)					\
217 	__wake_up(x, TASK_INTERRUPTIBLE, 1, poll_to_key(m))
218 #define wake_up_interruptible_sync_poll(x, m)					\
219 	__wake_up_sync_key((x), TASK_INTERRUPTIBLE, 1, poll_to_key(m))
220 
221 /**
222  * wake_up_pollfree - signal that a polled waitqueue is going away
223  * @wq_head: the wait queue head
224  *
225  * In the very rare cases where a ->poll() implementation uses a waitqueue whose
226  * lifetime is tied to a task rather than to the 'struct file' being polled,
227  * this function must be called before the waitqueue is freed so that
228  * non-blocking polls (e.g. epoll) are notified that the queue is going away.
229  *
230  * The caller must also RCU-delay the freeing of the wait_queue_head, e.g. via
231  * an explicit synchronize_rcu() or call_rcu(), or via SLAB_TYPESAFE_BY_RCU.
232  */
wake_up_pollfree(struct wait_queue_head * wq_head)233 static inline void wake_up_pollfree(struct wait_queue_head *wq_head)
234 {
235 	/*
236 	 * For performance reasons, we don't always take the queue lock here.
237 	 * Therefore, we might race with someone removing the last entry from
238 	 * the queue, and proceed while they still hold the queue lock.
239 	 * However, rcu_read_lock() is required to be held in such cases, so we
240 	 * can safely proceed with an RCU-delayed free.
241 	 */
242 	if (waitqueue_active(wq_head))
243 		__wake_up_pollfree(wq_head);
244 }
245 
246 #define ___wait_cond_timeout(condition)						\
247 ({										\
248 	bool __cond = (condition);						\
249 	if (__cond && !__ret)							\
250 		__ret = 1;							\
251 	__cond || !__ret;							\
252 })
253 
254 #define ___wait_is_interruptible(state)						\
255 	(!__builtin_constant_p(state) ||					\
256 		state == TASK_INTERRUPTIBLE || state == TASK_KILLABLE)		\
257 
258 extern void init_wait_entry(struct wait_queue_entry *wq_entry, int flags);
259 
260 /*
261  * The below macro ___wait_event() has an explicit shadow of the __ret
262  * variable when used from the wait_event_*() macros.
263  *
264  * This is so that both can use the ___wait_cond_timeout() construct
265  * to wrap the condition.
266  *
267  * The type inconsistency of the wait_event_*() __ret variable is also
268  * on purpose; we use long where we can return timeout values and int
269  * otherwise.
270  */
271 
272 #define ___wait_event(wq_head, condition, state, exclusive, ret, cmd)		\
273 ({										\
274 	__label__ __out;							\
275 	struct wait_queue_entry __wq_entry;					\
276 	long __ret = ret;	/* explicit shadow */				\
277 										\
278 	init_wait_entry(&__wq_entry, exclusive ? WQ_FLAG_EXCLUSIVE : 0);	\
279 	for (;;) {								\
280 		long __int = prepare_to_wait_event(&wq_head, &__wq_entry, state);\
281 										\
282 		if (condition)							\
283 			break;							\
284 										\
285 		if (___wait_is_interruptible(state) && __int) {			\
286 			__ret = __int;						\
287 			goto __out;						\
288 		}								\
289 										\
290 		cmd;								\
291 	}									\
292 	finish_wait(&wq_head, &__wq_entry);					\
293 __out:	__ret;									\
294 })
295 
296 #define __wait_event(wq_head, condition)					\
297 	(void)___wait_event(wq_head, condition, TASK_UNINTERRUPTIBLE, 0, 0,	\
298 			    schedule())
299 
300 /**
301  * wait_event - sleep until a condition gets true
302  * @wq_head: the waitqueue to wait on
303  * @condition: a C expression for the event to wait for
304  *
305  * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
306  * @condition evaluates to true. The @condition is checked each time
307  * the waitqueue @wq_head is woken up.
308  *
309  * wake_up() has to be called after changing any variable that could
310  * change the result of the wait condition.
311  */
312 #define wait_event(wq_head, condition)						\
313 do {										\
314 	might_sleep();								\
315 	if (condition)								\
316 		break;								\
317 	__wait_event(wq_head, condition);					\
318 } while (0)
319 
320 #define __io_wait_event(wq_head, condition)					\
321 	(void)___wait_event(wq_head, condition, TASK_UNINTERRUPTIBLE, 0, 0,	\
322 			    io_schedule())
323 
324 /*
325  * io_wait_event() -- like wait_event() but with io_schedule()
326  */
327 #define io_wait_event(wq_head, condition)					\
328 do {										\
329 	might_sleep();								\
330 	if (condition)								\
331 		break;								\
332 	__io_wait_event(wq_head, condition);					\
333 } while (0)
334 
335 #define __wait_event_freezable(wq_head, condition)				\
336 	___wait_event(wq_head, condition, TASK_INTERRUPTIBLE, 0, 0,		\
337 			    schedule(); try_to_freeze())
338 
339 /**
340  * wait_event_freezable - sleep (or freeze) until a condition gets true
341  * @wq_head: the waitqueue to wait on
342  * @condition: a C expression for the event to wait for
343  *
344  * The process is put to sleep (TASK_INTERRUPTIBLE -- so as not to contribute
345  * to system load) until the @condition evaluates to true. The
346  * @condition is checked each time the waitqueue @wq_head is woken up.
347  *
348  * wake_up() has to be called after changing any variable that could
349  * change the result of the wait condition.
350  */
351 #define wait_event_freezable(wq_head, condition)				\
352 ({										\
353 	int __ret = 0;								\
354 	might_sleep();								\
355 	if (!(condition))							\
356 		__ret = __wait_event_freezable(wq_head, condition);		\
357 	__ret;									\
358 })
359 
360 #define __wait_event_timeout(wq_head, condition, timeout)			\
361 	___wait_event(wq_head, ___wait_cond_timeout(condition),			\
362 		      TASK_UNINTERRUPTIBLE, 0, timeout,				\
363 		      __ret = schedule_timeout(__ret))
364 
365 /**
366  * wait_event_timeout - sleep until a condition gets true or a timeout elapses
367  * @wq_head: the waitqueue to wait on
368  * @condition: a C expression for the event to wait for
369  * @timeout: timeout, in jiffies
370  *
371  * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
372  * @condition evaluates to true. The @condition is checked each time
373  * the waitqueue @wq_head is woken up.
374  *
375  * wake_up() has to be called after changing any variable that could
376  * change the result of the wait condition.
377  *
378  * Returns:
379  * 0 if the @condition evaluated to %false after the @timeout elapsed,
380  * 1 if the @condition evaluated to %true after the @timeout elapsed,
381  * or the remaining jiffies (at least 1) if the @condition evaluated
382  * to %true before the @timeout elapsed.
383  */
384 #define wait_event_timeout(wq_head, condition, timeout)				\
385 ({										\
386 	long __ret = timeout;							\
387 	might_sleep();								\
388 	if (!___wait_cond_timeout(condition))					\
389 		__ret = __wait_event_timeout(wq_head, condition, timeout);	\
390 	__ret;									\
391 })
392 
393 #define __wait_event_freezable_timeout(wq_head, condition, timeout)		\
394 	___wait_event(wq_head, ___wait_cond_timeout(condition),			\
395 		      TASK_INTERRUPTIBLE, 0, timeout,				\
396 		      __ret = schedule_timeout(__ret); try_to_freeze())
397 
398 /*
399  * like wait_event_timeout() -- except it uses TASK_INTERRUPTIBLE to avoid
400  * increasing load and is freezable.
401  */
402 #define wait_event_freezable_timeout(wq_head, condition, timeout)		\
403 ({										\
404 	long __ret = timeout;							\
405 	might_sleep();								\
406 	if (!___wait_cond_timeout(condition))					\
407 		__ret = __wait_event_freezable_timeout(wq_head, condition, timeout); \
408 	__ret;									\
409 })
410 
411 #define __wait_event_exclusive_cmd(wq_head, condition, cmd1, cmd2)		\
412 	(void)___wait_event(wq_head, condition, TASK_UNINTERRUPTIBLE, 1, 0,	\
413 			    cmd1; schedule(); cmd2)
414 /*
415  * Just like wait_event_cmd(), except it sets exclusive flag
416  */
417 #define wait_event_exclusive_cmd(wq_head, condition, cmd1, cmd2)		\
418 do {										\
419 	if (condition)								\
420 		break;								\
421 	__wait_event_exclusive_cmd(wq_head, condition, cmd1, cmd2);		\
422 } while (0)
423 
424 #define __wait_event_cmd(wq_head, condition, cmd1, cmd2)			\
425 	(void)___wait_event(wq_head, condition, TASK_UNINTERRUPTIBLE, 0, 0,	\
426 			    cmd1; schedule(); cmd2)
427 
428 /**
429  * wait_event_cmd - sleep until a condition gets true
430  * @wq_head: the waitqueue to wait on
431  * @condition: a C expression for the event to wait for
432  * @cmd1: the command will be executed before sleep
433  * @cmd2: the command will be executed after sleep
434  *
435  * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
436  * @condition evaluates to true. The @condition is checked each time
437  * the waitqueue @wq_head is woken up.
438  *
439  * wake_up() has to be called after changing any variable that could
440  * change the result of the wait condition.
441  */
442 #define wait_event_cmd(wq_head, condition, cmd1, cmd2)				\
443 do {										\
444 	if (condition)								\
445 		break;								\
446 	__wait_event_cmd(wq_head, condition, cmd1, cmd2);			\
447 } while (0)
448 
449 #define __wait_event_interruptible(wq_head, condition)				\
450 	___wait_event(wq_head, condition, TASK_INTERRUPTIBLE, 0, 0,		\
451 		      schedule())
452 
453 /**
454  * wait_event_interruptible - sleep until a condition gets true
455  * @wq_head: the waitqueue to wait on
456  * @condition: a C expression for the event to wait for
457  *
458  * The process is put to sleep (TASK_INTERRUPTIBLE) until the
459  * @condition evaluates to true or a signal is received.
460  * The @condition is checked each time the waitqueue @wq_head is woken up.
461  *
462  * wake_up() has to be called after changing any variable that could
463  * change the result of the wait condition.
464  *
465  * The function will return -ERESTARTSYS if it was interrupted by a
466  * signal and 0 if @condition evaluated to true.
467  */
468 #define wait_event_interruptible(wq_head, condition)				\
469 ({										\
470 	int __ret = 0;								\
471 	might_sleep();								\
472 	if (!(condition))							\
473 		__ret = __wait_event_interruptible(wq_head, condition);		\
474 	__ret;									\
475 })
476 
477 #define __wait_event_interruptible_timeout(wq_head, condition, timeout)		\
478 	___wait_event(wq_head, ___wait_cond_timeout(condition),			\
479 		      TASK_INTERRUPTIBLE, 0, timeout,				\
480 		      __ret = schedule_timeout(__ret))
481 
482 /**
483  * wait_event_interruptible_timeout - sleep until a condition gets true or a timeout elapses
484  * @wq_head: the waitqueue to wait on
485  * @condition: a C expression for the event to wait for
486  * @timeout: timeout, in jiffies
487  *
488  * The process is put to sleep (TASK_INTERRUPTIBLE) until the
489  * @condition evaluates to true or a signal is received.
490  * The @condition is checked each time the waitqueue @wq_head is woken up.
491  *
492  * wake_up() has to be called after changing any variable that could
493  * change the result of the wait condition.
494  *
495  * Returns:
496  * 0 if the @condition evaluated to %false after the @timeout elapsed,
497  * 1 if the @condition evaluated to %true after the @timeout elapsed,
498  * the remaining jiffies (at least 1) if the @condition evaluated
499  * to %true before the @timeout elapsed, or -%ERESTARTSYS if it was
500  * interrupted by a signal.
501  */
502 #define wait_event_interruptible_timeout(wq_head, condition, timeout)		\
503 ({										\
504 	long __ret = timeout;							\
505 	might_sleep();								\
506 	if (!___wait_cond_timeout(condition))					\
507 		__ret = __wait_event_interruptible_timeout(wq_head,		\
508 						condition, timeout);		\
509 	__ret;									\
510 })
511 
512 #define __wait_event_hrtimeout(wq_head, condition, timeout, state)		\
513 ({										\
514 	int __ret = 0;								\
515 	struct hrtimer_sleeper __t;						\
516 										\
517 	hrtimer_init_on_stack(&__t.timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);	\
518 	hrtimer_init_sleeper(&__t, current);					\
519 	if ((timeout) != KTIME_MAX)						\
520 		hrtimer_start_range_ns(&__t.timer, timeout,			\
521 				       current->timer_slack_ns,			\
522 				       HRTIMER_MODE_REL);			\
523 										\
524 	__ret = ___wait_event(wq_head, condition, state, 0, 0,			\
525 		if (!__t.task) {						\
526 			__ret = -ETIME;						\
527 			break;							\
528 		}								\
529 		schedule());							\
530 										\
531 	hrtimer_cancel(&__t.timer);						\
532 	destroy_hrtimer_on_stack(&__t.timer);					\
533 	__ret;									\
534 })
535 
536 /**
537  * wait_event_hrtimeout - sleep until a condition gets true or a timeout elapses
538  * @wq_head: the waitqueue to wait on
539  * @condition: a C expression for the event to wait for
540  * @timeout: timeout, as a ktime_t
541  *
542  * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
543  * @condition evaluates to true or a signal is received.
544  * The @condition is checked each time the waitqueue @wq_head is woken up.
545  *
546  * wake_up() has to be called after changing any variable that could
547  * change the result of the wait condition.
548  *
549  * The function returns 0 if @condition became true, or -ETIME if the timeout
550  * elapsed.
551  */
552 #define wait_event_hrtimeout(wq_head, condition, timeout)			\
553 ({										\
554 	int __ret = 0;								\
555 	might_sleep();								\
556 	if (!(condition))							\
557 		__ret = __wait_event_hrtimeout(wq_head, condition, timeout,	\
558 					       TASK_UNINTERRUPTIBLE);		\
559 	__ret;									\
560 })
561 
562 /**
563  * wait_event_interruptible_hrtimeout - sleep until a condition gets true or a timeout elapses
564  * @wq: the waitqueue to wait on
565  * @condition: a C expression for the event to wait for
566  * @timeout: timeout, as a ktime_t
567  *
568  * The process is put to sleep (TASK_INTERRUPTIBLE) until the
569  * @condition evaluates to true or a signal is received.
570  * The @condition is checked each time the waitqueue @wq is woken up.
571  *
572  * wake_up() has to be called after changing any variable that could
573  * change the result of the wait condition.
574  *
575  * The function returns 0 if @condition became true, -ERESTARTSYS if it was
576  * interrupted by a signal, or -ETIME if the timeout elapsed.
577  */
578 #define wait_event_interruptible_hrtimeout(wq, condition, timeout)		\
579 ({										\
580 	long __ret = 0;								\
581 	might_sleep();								\
582 	if (!(condition))							\
583 		__ret = __wait_event_hrtimeout(wq, condition, timeout,		\
584 					       TASK_INTERRUPTIBLE);		\
585 	__ret;									\
586 })
587 
588 #define __wait_event_interruptible_exclusive(wq, condition)			\
589 	___wait_event(wq, condition, TASK_INTERRUPTIBLE, 1, 0,			\
590 		      schedule())
591 
592 #define wait_event_interruptible_exclusive(wq, condition)			\
593 ({										\
594 	int __ret = 0;								\
595 	might_sleep();								\
596 	if (!(condition))							\
597 		__ret = __wait_event_interruptible_exclusive(wq, condition);	\
598 	__ret;									\
599 })
600 
601 #define __wait_event_killable_exclusive(wq, condition)				\
602 	___wait_event(wq, condition, TASK_KILLABLE, 1, 0,			\
603 		      schedule())
604 
605 #define wait_event_killable_exclusive(wq, condition)				\
606 ({										\
607 	int __ret = 0;								\
608 	might_sleep();								\
609 	if (!(condition))							\
610 		__ret = __wait_event_killable_exclusive(wq, condition);		\
611 	__ret;									\
612 })
613 
614 
615 #define __wait_event_freezable_exclusive(wq, condition)				\
616 	___wait_event(wq, condition, TASK_INTERRUPTIBLE, 1, 0,			\
617 			schedule(); try_to_freeze())
618 
619 #define wait_event_freezable_exclusive(wq, condition)				\
620 ({										\
621 	int __ret = 0;								\
622 	might_sleep();								\
623 	if (!(condition))							\
624 		__ret = __wait_event_freezable_exclusive(wq, condition);	\
625 	__ret;									\
626 })
627 
628 /**
629  * wait_event_idle - wait for a condition without contributing to system load
630  * @wq_head: the waitqueue to wait on
631  * @condition: a C expression for the event to wait for
632  *
633  * The process is put to sleep (TASK_IDLE) until the
634  * @condition evaluates to true.
635  * The @condition is checked each time the waitqueue @wq_head is woken up.
636  *
637  * wake_up() has to be called after changing any variable that could
638  * change the result of the wait condition.
639  *
640  */
641 #define wait_event_idle(wq_head, condition)					\
642 do {										\
643 	might_sleep();								\
644 	if (!(condition))							\
645 		___wait_event(wq_head, condition, TASK_IDLE, 0, 0, schedule());	\
646 } while (0)
647 
648 /**
649  * wait_event_idle_exclusive - wait for a condition with contributing to system load
650  * @wq_head: the waitqueue to wait on
651  * @condition: a C expression for the event to wait for
652  *
653  * The process is put to sleep (TASK_IDLE) until the
654  * @condition evaluates to true.
655  * The @condition is checked each time the waitqueue @wq_head is woken up.
656  *
657  * The process is put on the wait queue with an WQ_FLAG_EXCLUSIVE flag
658  * set thus if other processes wait on the same list, when this
659  * process is woken further processes are not considered.
660  *
661  * wake_up() has to be called after changing any variable that could
662  * change the result of the wait condition.
663  *
664  */
665 #define wait_event_idle_exclusive(wq_head, condition)				\
666 do {										\
667 	might_sleep();								\
668 	if (!(condition))							\
669 		___wait_event(wq_head, condition, TASK_IDLE, 1, 0, schedule());	\
670 } while (0)
671 
672 #define __wait_event_idle_timeout(wq_head, condition, timeout)			\
673 	___wait_event(wq_head, ___wait_cond_timeout(condition),			\
674 		      TASK_IDLE, 0, timeout,					\
675 		      __ret = schedule_timeout(__ret))
676 
677 /**
678  * wait_event_idle_timeout - sleep without load until a condition becomes true or a timeout elapses
679  * @wq_head: the waitqueue to wait on
680  * @condition: a C expression for the event to wait for
681  * @timeout: timeout, in jiffies
682  *
683  * The process is put to sleep (TASK_IDLE) until the
684  * @condition evaluates to true. The @condition is checked each time
685  * the waitqueue @wq_head is woken up.
686  *
687  * wake_up() has to be called after changing any variable that could
688  * change the result of the wait condition.
689  *
690  * Returns:
691  * 0 if the @condition evaluated to %false after the @timeout elapsed,
692  * 1 if the @condition evaluated to %true after the @timeout elapsed,
693  * or the remaining jiffies (at least 1) if the @condition evaluated
694  * to %true before the @timeout elapsed.
695  */
696 #define wait_event_idle_timeout(wq_head, condition, timeout)			\
697 ({										\
698 	long __ret = timeout;							\
699 	might_sleep();								\
700 	if (!___wait_cond_timeout(condition))					\
701 		__ret = __wait_event_idle_timeout(wq_head, condition, timeout);	\
702 	__ret;									\
703 })
704 
705 #define __wait_event_idle_exclusive_timeout(wq_head, condition, timeout)	\
706 	___wait_event(wq_head, ___wait_cond_timeout(condition),			\
707 		      TASK_IDLE, 1, timeout,					\
708 		      __ret = schedule_timeout(__ret))
709 
710 /**
711  * wait_event_idle_exclusive_timeout - sleep without load until a condition becomes true or a timeout elapses
712  * @wq_head: the waitqueue to wait on
713  * @condition: a C expression for the event to wait for
714  * @timeout: timeout, in jiffies
715  *
716  * The process is put to sleep (TASK_IDLE) until the
717  * @condition evaluates to true. The @condition is checked each time
718  * the waitqueue @wq_head is woken up.
719  *
720  * The process is put on the wait queue with an WQ_FLAG_EXCLUSIVE flag
721  * set thus if other processes wait on the same list, when this
722  * process is woken further processes are not considered.
723  *
724  * wake_up() has to be called after changing any variable that could
725  * change the result of the wait condition.
726  *
727  * Returns:
728  * 0 if the @condition evaluated to %false after the @timeout elapsed,
729  * 1 if the @condition evaluated to %true after the @timeout elapsed,
730  * or the remaining jiffies (at least 1) if the @condition evaluated
731  * to %true before the @timeout elapsed.
732  */
733 #define wait_event_idle_exclusive_timeout(wq_head, condition, timeout)		\
734 ({										\
735 	long __ret = timeout;							\
736 	might_sleep();								\
737 	if (!___wait_cond_timeout(condition))					\
738 		__ret = __wait_event_idle_exclusive_timeout(wq_head, condition, timeout);\
739 	__ret;									\
740 })
741 
742 extern int do_wait_intr(wait_queue_head_t *, wait_queue_entry_t *);
743 extern int do_wait_intr_irq(wait_queue_head_t *, wait_queue_entry_t *);
744 
745 #define __wait_event_interruptible_locked(wq, condition, exclusive, fn)		\
746 ({										\
747 	int __ret;								\
748 	DEFINE_WAIT(__wait);							\
749 	if (exclusive)								\
750 		__wait.flags |= WQ_FLAG_EXCLUSIVE;				\
751 	do {									\
752 		__ret = fn(&(wq), &__wait);					\
753 		if (__ret)							\
754 			break;							\
755 	} while (!(condition));							\
756 	__remove_wait_queue(&(wq), &__wait);					\
757 	__set_current_state(TASK_RUNNING);					\
758 	__ret;									\
759 })
760 
761 
762 /**
763  * wait_event_interruptible_locked - sleep until a condition gets true
764  * @wq: the waitqueue to wait on
765  * @condition: a C expression for the event to wait for
766  *
767  * The process is put to sleep (TASK_INTERRUPTIBLE) until the
768  * @condition evaluates to true or a signal is received.
769  * The @condition is checked each time the waitqueue @wq is woken up.
770  *
771  * It must be called with wq.lock being held.  This spinlock is
772  * unlocked while sleeping but @condition testing is done while lock
773  * is held and when this macro exits the lock is held.
774  *
775  * The lock is locked/unlocked using spin_lock()/spin_unlock()
776  * functions which must match the way they are locked/unlocked outside
777  * of this macro.
778  *
779  * wake_up_locked() has to be called after changing any variable that could
780  * change the result of the wait condition.
781  *
782  * The function will return -ERESTARTSYS if it was interrupted by a
783  * signal and 0 if @condition evaluated to true.
784  */
785 #define wait_event_interruptible_locked(wq, condition)				\
786 	((condition)								\
787 	 ? 0 : __wait_event_interruptible_locked(wq, condition, 0, do_wait_intr))
788 
789 /**
790  * wait_event_interruptible_locked_irq - sleep until a condition gets true
791  * @wq: the waitqueue to wait on
792  * @condition: a C expression for the event to wait for
793  *
794  * The process is put to sleep (TASK_INTERRUPTIBLE) until the
795  * @condition evaluates to true or a signal is received.
796  * The @condition is checked each time the waitqueue @wq is woken up.
797  *
798  * It must be called with wq.lock being held.  This spinlock is
799  * unlocked while sleeping but @condition testing is done while lock
800  * is held and when this macro exits the lock is held.
801  *
802  * The lock is locked/unlocked using spin_lock_irq()/spin_unlock_irq()
803  * functions which must match the way they are locked/unlocked outside
804  * of this macro.
805  *
806  * wake_up_locked() has to be called after changing any variable that could
807  * change the result of the wait condition.
808  *
809  * The function will return -ERESTARTSYS if it was interrupted by a
810  * signal and 0 if @condition evaluated to true.
811  */
812 #define wait_event_interruptible_locked_irq(wq, condition)			\
813 	((condition)								\
814 	 ? 0 : __wait_event_interruptible_locked(wq, condition, 0, do_wait_intr_irq))
815 
816 /**
817  * wait_event_interruptible_exclusive_locked - sleep exclusively until a condition gets true
818  * @wq: the waitqueue to wait on
819  * @condition: a C expression for the event to wait for
820  *
821  * The process is put to sleep (TASK_INTERRUPTIBLE) until the
822  * @condition evaluates to true or a signal is received.
823  * The @condition is checked each time the waitqueue @wq is woken up.
824  *
825  * It must be called with wq.lock being held.  This spinlock is
826  * unlocked while sleeping but @condition testing is done while lock
827  * is held and when this macro exits the lock is held.
828  *
829  * The lock is locked/unlocked using spin_lock()/spin_unlock()
830  * functions which must match the way they are locked/unlocked outside
831  * of this macro.
832  *
833  * The process is put on the wait queue with an WQ_FLAG_EXCLUSIVE flag
834  * set thus when other process waits process on the list if this
835  * process is awaken further processes are not considered.
836  *
837  * wake_up_locked() has to be called after changing any variable that could
838  * change the result of the wait condition.
839  *
840  * The function will return -ERESTARTSYS if it was interrupted by a
841  * signal and 0 if @condition evaluated to true.
842  */
843 #define wait_event_interruptible_exclusive_locked(wq, condition)		\
844 	((condition)								\
845 	 ? 0 : __wait_event_interruptible_locked(wq, condition, 1, do_wait_intr))
846 
847 /**
848  * wait_event_interruptible_exclusive_locked_irq - sleep until a condition gets true
849  * @wq: the waitqueue to wait on
850  * @condition: a C expression for the event to wait for
851  *
852  * The process is put to sleep (TASK_INTERRUPTIBLE) until the
853  * @condition evaluates to true or a signal is received.
854  * The @condition is checked each time the waitqueue @wq is woken up.
855  *
856  * It must be called with wq.lock being held.  This spinlock is
857  * unlocked while sleeping but @condition testing is done while lock
858  * is held and when this macro exits the lock is held.
859  *
860  * The lock is locked/unlocked using spin_lock_irq()/spin_unlock_irq()
861  * functions which must match the way they are locked/unlocked outside
862  * of this macro.
863  *
864  * The process is put on the wait queue with an WQ_FLAG_EXCLUSIVE flag
865  * set thus when other process waits process on the list if this
866  * process is awaken further processes are not considered.
867  *
868  * wake_up_locked() has to be called after changing any variable that could
869  * change the result of the wait condition.
870  *
871  * The function will return -ERESTARTSYS if it was interrupted by a
872  * signal and 0 if @condition evaluated to true.
873  */
874 #define wait_event_interruptible_exclusive_locked_irq(wq, condition)		\
875 	((condition)								\
876 	 ? 0 : __wait_event_interruptible_locked(wq, condition, 1, do_wait_intr_irq))
877 
878 
879 #define __wait_event_killable(wq, condition)					\
880 	___wait_event(wq, condition, TASK_KILLABLE, 0, 0, schedule())
881 
882 /**
883  * wait_event_killable - sleep until a condition gets true
884  * @wq_head: the waitqueue to wait on
885  * @condition: a C expression for the event to wait for
886  *
887  * The process is put to sleep (TASK_KILLABLE) until the
888  * @condition evaluates to true or a signal is received.
889  * The @condition is checked each time the waitqueue @wq_head is woken up.
890  *
891  * wake_up() has to be called after changing any variable that could
892  * change the result of the wait condition.
893  *
894  * The function will return -ERESTARTSYS if it was interrupted by a
895  * signal and 0 if @condition evaluated to true.
896  */
897 #define wait_event_killable(wq_head, condition)					\
898 ({										\
899 	int __ret = 0;								\
900 	might_sleep();								\
901 	if (!(condition))							\
902 		__ret = __wait_event_killable(wq_head, condition);		\
903 	__ret;									\
904 })
905 
906 #define __wait_event_killable_timeout(wq_head, condition, timeout)		\
907 	___wait_event(wq_head, ___wait_cond_timeout(condition),			\
908 		      TASK_KILLABLE, 0, timeout,				\
909 		      __ret = schedule_timeout(__ret))
910 
911 /**
912  * wait_event_killable_timeout - sleep until a condition gets true or a timeout elapses
913  * @wq_head: the waitqueue to wait on
914  * @condition: a C expression for the event to wait for
915  * @timeout: timeout, in jiffies
916  *
917  * The process is put to sleep (TASK_KILLABLE) until the
918  * @condition evaluates to true or a kill signal is received.
919  * The @condition is checked each time the waitqueue @wq_head is woken up.
920  *
921  * wake_up() has to be called after changing any variable that could
922  * change the result of the wait condition.
923  *
924  * Returns:
925  * 0 if the @condition evaluated to %false after the @timeout elapsed,
926  * 1 if the @condition evaluated to %true after the @timeout elapsed,
927  * the remaining jiffies (at least 1) if the @condition evaluated
928  * to %true before the @timeout elapsed, or -%ERESTARTSYS if it was
929  * interrupted by a kill signal.
930  *
931  * Only kill signals interrupt this process.
932  */
933 #define wait_event_killable_timeout(wq_head, condition, timeout)		\
934 ({										\
935 	long __ret = timeout;							\
936 	might_sleep();								\
937 	if (!___wait_cond_timeout(condition))					\
938 		__ret = __wait_event_killable_timeout(wq_head,			\
939 						condition, timeout);		\
940 	__ret;									\
941 })
942 
943 
944 #define __wait_event_lock_irq(wq_head, condition, lock, cmd)			\
945 	(void)___wait_event(wq_head, condition, TASK_UNINTERRUPTIBLE, 0, 0,	\
946 			    spin_unlock_irq(&lock);				\
947 			    cmd;						\
948 			    schedule();						\
949 			    spin_lock_irq(&lock))
950 
951 /**
952  * wait_event_lock_irq_cmd - sleep until a condition gets true. The
953  *			     condition is checked under the lock. This
954  *			     is expected to be called with the lock
955  *			     taken.
956  * @wq_head: the waitqueue to wait on
957  * @condition: a C expression for the event to wait for
958  * @lock: a locked spinlock_t, which will be released before cmd
959  *	  and schedule() and reacquired afterwards.
960  * @cmd: a command which is invoked outside the critical section before
961  *	 sleep
962  *
963  * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
964  * @condition evaluates to true. The @condition is checked each time
965  * the waitqueue @wq_head is woken up.
966  *
967  * wake_up() has to be called after changing any variable that could
968  * change the result of the wait condition.
969  *
970  * This is supposed to be called while holding the lock. The lock is
971  * dropped before invoking the cmd and going to sleep and is reacquired
972  * afterwards.
973  */
974 #define wait_event_lock_irq_cmd(wq_head, condition, lock, cmd)			\
975 do {										\
976 	if (condition)								\
977 		break;								\
978 	__wait_event_lock_irq(wq_head, condition, lock, cmd);			\
979 } while (0)
980 
981 /**
982  * wait_event_lock_irq - sleep until a condition gets true. The
983  *			 condition is checked under the lock. This
984  *			 is expected to be called with the lock
985  *			 taken.
986  * @wq_head: the waitqueue to wait on
987  * @condition: a C expression for the event to wait for
988  * @lock: a locked spinlock_t, which will be released before schedule()
989  *	  and reacquired afterwards.
990  *
991  * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
992  * @condition evaluates to true. The @condition is checked each time
993  * the waitqueue @wq_head is woken up.
994  *
995  * wake_up() has to be called after changing any variable that could
996  * change the result of the wait condition.
997  *
998  * This is supposed to be called while holding the lock. The lock is
999  * dropped before going to sleep and is reacquired afterwards.
1000  */
1001 #define wait_event_lock_irq(wq_head, condition, lock)				\
1002 do {										\
1003 	if (condition)								\
1004 		break;								\
1005 	__wait_event_lock_irq(wq_head, condition, lock, );			\
1006 } while (0)
1007 
1008 
1009 #define __wait_event_interruptible_lock_irq(wq_head, condition, lock, cmd)	\
1010 	___wait_event(wq_head, condition, TASK_INTERRUPTIBLE, 0, 0,		\
1011 		      spin_unlock_irq(&lock);					\
1012 		      cmd;							\
1013 		      schedule();						\
1014 		      spin_lock_irq(&lock))
1015 
1016 /**
1017  * wait_event_interruptible_lock_irq_cmd - sleep until a condition gets true.
1018  *		The condition is checked under the lock. This is expected to
1019  *		be called with the lock taken.
1020  * @wq_head: the waitqueue to wait on
1021  * @condition: a C expression for the event to wait for
1022  * @lock: a locked spinlock_t, which will be released before cmd and
1023  *	  schedule() and reacquired afterwards.
1024  * @cmd: a command which is invoked outside the critical section before
1025  *	 sleep
1026  *
1027  * The process is put to sleep (TASK_INTERRUPTIBLE) until the
1028  * @condition evaluates to true or a signal is received. The @condition is
1029  * checked each time the waitqueue @wq_head is woken up.
1030  *
1031  * wake_up() has to be called after changing any variable that could
1032  * change the result of the wait condition.
1033  *
1034  * This is supposed to be called while holding the lock. The lock is
1035  * dropped before invoking the cmd and going to sleep and is reacquired
1036  * afterwards.
1037  *
1038  * The macro will return -ERESTARTSYS if it was interrupted by a signal
1039  * and 0 if @condition evaluated to true.
1040  */
1041 #define wait_event_interruptible_lock_irq_cmd(wq_head, condition, lock, cmd)	\
1042 ({										\
1043 	int __ret = 0;								\
1044 	if (!(condition))							\
1045 		__ret = __wait_event_interruptible_lock_irq(wq_head,		\
1046 						condition, lock, cmd);		\
1047 	__ret;									\
1048 })
1049 
1050 /**
1051  * wait_event_interruptible_lock_irq - sleep until a condition gets true.
1052  *		The condition is checked under the lock. This is expected
1053  *		to be called with the lock taken.
1054  * @wq_head: the waitqueue to wait on
1055  * @condition: a C expression for the event to wait for
1056  * @lock: a locked spinlock_t, which will be released before schedule()
1057  *	  and reacquired afterwards.
1058  *
1059  * The process is put to sleep (TASK_INTERRUPTIBLE) until the
1060  * @condition evaluates to true or signal is received. The @condition is
1061  * checked each time the waitqueue @wq_head is woken up.
1062  *
1063  * wake_up() has to be called after changing any variable that could
1064  * change the result of the wait condition.
1065  *
1066  * This is supposed to be called while holding the lock. The lock is
1067  * dropped before going to sleep and is reacquired afterwards.
1068  *
1069  * The macro will return -ERESTARTSYS if it was interrupted by a signal
1070  * and 0 if @condition evaluated to true.
1071  */
1072 #define wait_event_interruptible_lock_irq(wq_head, condition, lock)		\
1073 ({										\
1074 	int __ret = 0;								\
1075 	if (!(condition))							\
1076 		__ret = __wait_event_interruptible_lock_irq(wq_head,		\
1077 						condition, lock,);		\
1078 	__ret;									\
1079 })
1080 
1081 #define __wait_event_lock_irq_timeout(wq_head, condition, lock, timeout, state)	\
1082 	___wait_event(wq_head, ___wait_cond_timeout(condition),			\
1083 		      state, 0, timeout,					\
1084 		      spin_unlock_irq(&lock);					\
1085 		      __ret = schedule_timeout(__ret);				\
1086 		      spin_lock_irq(&lock));
1087 
1088 /**
1089  * wait_event_interruptible_lock_irq_timeout - sleep until a condition gets
1090  *		true or a timeout elapses. The condition is checked under
1091  *		the lock. This is expected to be called with the lock taken.
1092  * @wq_head: the waitqueue to wait on
1093  * @condition: a C expression for the event to wait for
1094  * @lock: a locked spinlock_t, which will be released before schedule()
1095  *	  and reacquired afterwards.
1096  * @timeout: timeout, in jiffies
1097  *
1098  * The process is put to sleep (TASK_INTERRUPTIBLE) until the
1099  * @condition evaluates to true or signal is received. The @condition is
1100  * checked each time the waitqueue @wq_head is woken up.
1101  *
1102  * wake_up() has to be called after changing any variable that could
1103  * change the result of the wait condition.
1104  *
1105  * This is supposed to be called while holding the lock. The lock is
1106  * dropped before going to sleep and is reacquired afterwards.
1107  *
1108  * The function returns 0 if the @timeout elapsed, -ERESTARTSYS if it
1109  * was interrupted by a signal, and the remaining jiffies otherwise
1110  * if the condition evaluated to true before the timeout elapsed.
1111  */
1112 #define wait_event_interruptible_lock_irq_timeout(wq_head, condition, lock,	\
1113 						  timeout)			\
1114 ({										\
1115 	long __ret = timeout;							\
1116 	if (!___wait_cond_timeout(condition))					\
1117 		__ret = __wait_event_lock_irq_timeout(				\
1118 					wq_head, condition, lock, timeout,	\
1119 					TASK_INTERRUPTIBLE);			\
1120 	__ret;									\
1121 })
1122 
1123 #define wait_event_lock_irq_timeout(wq_head, condition, lock, timeout)		\
1124 ({										\
1125 	long __ret = timeout;							\
1126 	if (!___wait_cond_timeout(condition))					\
1127 		__ret = __wait_event_lock_irq_timeout(				\
1128 					wq_head, condition, lock, timeout,	\
1129 					TASK_UNINTERRUPTIBLE);			\
1130 	__ret;									\
1131 })
1132 
1133 /*
1134  * Waitqueues which are removed from the waitqueue_head at wakeup time
1135  */
1136 void prepare_to_wait(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry, int state);
1137 void prepare_to_wait_exclusive(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry, int state);
1138 long prepare_to_wait_event(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry, int state);
1139 void finish_wait(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry);
1140 long wait_woken(struct wait_queue_entry *wq_entry, unsigned mode, long timeout);
1141 int woken_wake_function(struct wait_queue_entry *wq_entry, unsigned mode, int sync, void *key);
1142 int autoremove_wake_function(struct wait_queue_entry *wq_entry, unsigned mode, int sync, void *key);
1143 
1144 #define DEFINE_WAIT_FUNC(name, function)					\
1145 	struct wait_queue_entry name = {					\
1146 		.private	= current,					\
1147 		.func		= function,					\
1148 		.entry		= LIST_HEAD_INIT((name).entry),			\
1149 	}
1150 
1151 #define DEFINE_WAIT(name) DEFINE_WAIT_FUNC(name, autoremove_wake_function)
1152 
1153 #define init_wait(wait)								\
1154 	do {									\
1155 		(wait)->private = current;					\
1156 		(wait)->func = autoremove_wake_function;			\
1157 		INIT_LIST_HEAD(&(wait)->entry);					\
1158 		(wait)->flags = 0;						\
1159 	} while (0)
1160 
1161 #endif /* _LINUX_WAIT_H */
1162