1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _LINUX_SCHED_IDLE_H
3 #define _LINUX_SCHED_IDLE_H
4 
5 #include <linux/sched.h>
6 
7 enum cpu_idle_type {
8 	CPU_IDLE,
9 	CPU_NOT_IDLE,
10 	CPU_NEWLY_IDLE,
11 	CPU_MAX_IDLE_TYPES
12 };
13 
14 extern void wake_up_if_idle(int cpu);
15 
16 /*
17  * Idle thread specific functions to determine the need_resched
18  * polling state.
19  */
20 #ifdef TIF_POLLING_NRFLAG
21 
__current_set_polling(void)22 static inline void __current_set_polling(void)
23 {
24 	set_thread_flag(TIF_POLLING_NRFLAG);
25 }
26 
current_set_polling_and_test(void)27 static inline bool __must_check current_set_polling_and_test(void)
28 {
29 	__current_set_polling();
30 
31 	/*
32 	 * Polling state must be visible before we test NEED_RESCHED,
33 	 * paired by resched_curr()
34 	 */
35 	smp_mb__after_atomic();
36 
37 	return unlikely(tif_need_resched());
38 }
39 
__current_clr_polling(void)40 static inline void __current_clr_polling(void)
41 {
42 	clear_thread_flag(TIF_POLLING_NRFLAG);
43 }
44 
current_clr_polling_and_test(void)45 static inline bool __must_check current_clr_polling_and_test(void)
46 {
47 	__current_clr_polling();
48 
49 	/*
50 	 * Polling state must be visible before we test NEED_RESCHED,
51 	 * paired by resched_curr()
52 	 */
53 	smp_mb__after_atomic();
54 
55 	return unlikely(tif_need_resched());
56 }
57 
58 #else
__current_set_polling(void)59 static inline void __current_set_polling(void) { }
__current_clr_polling(void)60 static inline void __current_clr_polling(void) { }
61 
current_set_polling_and_test(void)62 static inline bool __must_check current_set_polling_and_test(void)
63 {
64 	return unlikely(tif_need_resched());
65 }
current_clr_polling_and_test(void)66 static inline bool __must_check current_clr_polling_and_test(void)
67 {
68 	return unlikely(tif_need_resched());
69 }
70 #endif
71 
current_clr_polling(void)72 static inline void current_clr_polling(void)
73 {
74 	__current_clr_polling();
75 
76 	/*
77 	 * Ensure we check TIF_NEED_RESCHED after we clear the polling bit.
78 	 * Once the bit is cleared, we'll get IPIs with every new
79 	 * TIF_NEED_RESCHED and the IPI handler, scheduler_ipi(), will also
80 	 * fold.
81 	 */
82 	smp_mb(); /* paired with resched_curr() */
83 
84 	preempt_fold_need_resched();
85 }
86 
87 #endif /* _LINUX_SCHED_IDLE_H */
88