1 /*
2 * cpuidle.c - core cpuidle infrastructure
3 *
4 * (C) 2006-2007 Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
5 * Shaohua Li <shaohua.li@intel.com>
6 * Adam Belay <abelay@novell.com>
7 *
8 * This code is licenced under the GPL.
9 */
10
11 #include <linux/clockchips.h>
12 #include <linux/kernel.h>
13 #include <linux/mutex.h>
14 #include <linux/sched.h>
15 #include <linux/sched/clock.h>
16 #include <linux/notifier.h>
17 #include <linux/pm_qos.h>
18 #include <linux/cpu.h>
19 #include <linux/cpuidle.h>
20 #include <linux/ktime.h>
21 #include <linux/hrtimer.h>
22 #include <linux/module.h>
23 #include <linux/suspend.h>
24 #include <linux/tick.h>
25 #include <trace/events/power.h>
26
27 #include "cpuidle.h"
28
29 DEFINE_PER_CPU(struct cpuidle_device *, cpuidle_devices);
30 DEFINE_PER_CPU(struct cpuidle_device, cpuidle_dev);
31
32 DEFINE_MUTEX(cpuidle_lock);
33 LIST_HEAD(cpuidle_detected_devices);
34
35 static int enabled_devices;
36 static int off __read_mostly;
37 static int initialized __read_mostly;
38
cpuidle_disabled(void)39 int cpuidle_disabled(void)
40 {
41 return off;
42 }
disable_cpuidle(void)43 void disable_cpuidle(void)
44 {
45 off = 1;
46 }
47
cpuidle_not_available(struct cpuidle_driver * drv,struct cpuidle_device * dev)48 bool cpuidle_not_available(struct cpuidle_driver *drv,
49 struct cpuidle_device *dev)
50 {
51 return off || !initialized || !drv || !dev || !dev->enabled;
52 }
53
54 /**
55 * cpuidle_play_dead - cpu off-lining
56 *
57 * Returns in case of an error or no driver
58 */
cpuidle_play_dead(void)59 int cpuidle_play_dead(void)
60 {
61 struct cpuidle_device *dev = __this_cpu_read(cpuidle_devices);
62 struct cpuidle_driver *drv = cpuidle_get_cpu_driver(dev);
63 int i;
64
65 if (!drv)
66 return -ENODEV;
67
68 /* Find lowest-power state that supports long-term idle */
69 for (i = drv->state_count - 1; i >= 0; i--)
70 if (drv->states[i].enter_dead)
71 return drv->states[i].enter_dead(dev, i);
72
73 return -ENODEV;
74 }
75
find_deepest_state(struct cpuidle_driver * drv,struct cpuidle_device * dev,unsigned int max_latency,unsigned int forbidden_flags,bool s2idle)76 static int find_deepest_state(struct cpuidle_driver *drv,
77 struct cpuidle_device *dev,
78 unsigned int max_latency,
79 unsigned int forbidden_flags,
80 bool s2idle)
81 {
82 unsigned int latency_req = 0;
83 int i, ret = 0;
84
85 for (i = 1; i < drv->state_count; i++) {
86 struct cpuidle_state *s = &drv->states[i];
87 struct cpuidle_state_usage *su = &dev->states_usage[i];
88
89 if (s->disabled || su->disable || s->exit_latency <= latency_req
90 || s->exit_latency > max_latency
91 || (s->flags & forbidden_flags)
92 || (s2idle && !s->enter_s2idle))
93 continue;
94
95 latency_req = s->exit_latency;
96 ret = i;
97 }
98 return ret;
99 }
100
101 /**
102 * cpuidle_use_deepest_state - Set/clear governor override flag.
103 * @enable: New value of the flag.
104 *
105 * Set/unset the current CPU to use the deepest idle state (override governors
106 * going forward if set).
107 */
cpuidle_use_deepest_state(bool enable)108 void cpuidle_use_deepest_state(bool enable)
109 {
110 struct cpuidle_device *dev;
111
112 preempt_disable();
113 dev = cpuidle_get_device();
114 if (dev)
115 dev->use_deepest_state = enable;
116 preempt_enable();
117 }
118
119 /**
120 * cpuidle_find_deepest_state - Find the deepest available idle state.
121 * @drv: cpuidle driver for the given CPU.
122 * @dev: cpuidle device for the given CPU.
123 */
cpuidle_find_deepest_state(struct cpuidle_driver * drv,struct cpuidle_device * dev)124 int cpuidle_find_deepest_state(struct cpuidle_driver *drv,
125 struct cpuidle_device *dev)
126 {
127 return find_deepest_state(drv, dev, UINT_MAX, 0, false);
128 }
129
130 #ifdef CONFIG_SUSPEND
enter_s2idle_proper(struct cpuidle_driver * drv,struct cpuidle_device * dev,int index)131 static void enter_s2idle_proper(struct cpuidle_driver *drv,
132 struct cpuidle_device *dev, int index)
133 {
134 ktime_t time_start, time_end;
135
136 time_start = ns_to_ktime(local_clock());
137
138 /*
139 * trace_suspend_resume() called by tick_freeze() for the last CPU
140 * executing it contains RCU usage regarded as invalid in the idle
141 * context, so tell RCU about that.
142 */
143 RCU_NONIDLE(tick_freeze());
144 /*
145 * The state used here cannot be a "coupled" one, because the "coupled"
146 * cpuidle mechanism enables interrupts and doing that with timekeeping
147 * suspended is generally unsafe.
148 */
149 stop_critical_timings();
150 drv->states[index].enter_s2idle(dev, drv, index);
151 if (WARN_ON_ONCE(!irqs_disabled()))
152 local_irq_disable();
153 /*
154 * timekeeping_resume() that will be called by tick_unfreeze() for the
155 * first CPU executing it calls functions containing RCU read-side
156 * critical sections, so tell RCU about that.
157 */
158 RCU_NONIDLE(tick_unfreeze());
159 start_critical_timings();
160
161 time_end = ns_to_ktime(local_clock());
162
163 dev->states_usage[index].s2idle_time += ktime_us_delta(time_end, time_start);
164 dev->states_usage[index].s2idle_usage++;
165 }
166
167 /**
168 * cpuidle_enter_s2idle - Enter an idle state suitable for suspend-to-idle.
169 * @drv: cpuidle driver for the given CPU.
170 * @dev: cpuidle device for the given CPU.
171 *
172 * If there are states with the ->enter_s2idle callback, find the deepest of
173 * them and enter it with frozen tick.
174 */
cpuidle_enter_s2idle(struct cpuidle_driver * drv,struct cpuidle_device * dev)175 int cpuidle_enter_s2idle(struct cpuidle_driver *drv, struct cpuidle_device *dev)
176 {
177 int index;
178
179 /*
180 * Find the deepest state with ->enter_s2idle present, which guarantees
181 * that interrupts won't be enabled when it exits and allows the tick to
182 * be frozen safely.
183 */
184 index = find_deepest_state(drv, dev, UINT_MAX, 0, true);
185 if (index > 0)
186 enter_s2idle_proper(drv, dev, index);
187
188 return index;
189 }
190 #endif /* CONFIG_SUSPEND */
191
192 /**
193 * cpuidle_enter_state - enter the state and update stats
194 * @dev: cpuidle device for this cpu
195 * @drv: cpuidle driver for this cpu
196 * @index: index into the states table in @drv of the state to enter
197 */
cpuidle_enter_state(struct cpuidle_device * dev,struct cpuidle_driver * drv,int index)198 int cpuidle_enter_state(struct cpuidle_device *dev, struct cpuidle_driver *drv,
199 int index)
200 {
201 int entered_state;
202
203 struct cpuidle_state *target_state = &drv->states[index];
204 bool broadcast = !!(target_state->flags & CPUIDLE_FLAG_TIMER_STOP);
205 ktime_t time_start, time_end;
206 s64 diff;
207
208 /*
209 * Tell the time framework to switch to a broadcast timer because our
210 * local timer will be shut down. If a local timer is used from another
211 * CPU as a broadcast timer, this call may fail if it is not available.
212 */
213 if (broadcast && tick_broadcast_enter()) {
214 index = find_deepest_state(drv, dev, target_state->exit_latency,
215 CPUIDLE_FLAG_TIMER_STOP, false);
216 if (index < 0) {
217 default_idle_call();
218 return -EBUSY;
219 }
220 target_state = &drv->states[index];
221 broadcast = false;
222 }
223
224 /* Take note of the planned idle state. */
225 sched_idle_set_state(target_state);
226
227 trace_cpu_idle_rcuidle(index, dev->cpu);
228 time_start = ns_to_ktime(local_clock());
229
230 stop_critical_timings();
231 entered_state = target_state->enter(dev, drv, index);
232 start_critical_timings();
233
234 sched_clock_idle_wakeup_event();
235 time_end = ns_to_ktime(local_clock());
236 trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, dev->cpu);
237
238 /* The cpu is no longer idle or about to enter idle. */
239 sched_idle_set_state(NULL);
240
241 if (broadcast) {
242 if (WARN_ON_ONCE(!irqs_disabled()))
243 local_irq_disable();
244
245 tick_broadcast_exit();
246 }
247
248 if (!cpuidle_state_is_coupled(drv, index))
249 local_irq_enable();
250
251 diff = ktime_us_delta(time_end, time_start);
252 if (diff > INT_MAX)
253 diff = INT_MAX;
254
255 dev->last_residency = (int) diff;
256
257 if (entered_state >= 0) {
258 /* Update cpuidle counters */
259 /* This can be moved to within driver enter routine
260 * but that results in multiple copies of same code.
261 */
262 dev->states_usage[entered_state].time += dev->last_residency;
263 dev->states_usage[entered_state].usage++;
264 } else {
265 dev->last_residency = 0;
266 }
267
268 return entered_state;
269 }
270
271 /**
272 * cpuidle_select - ask the cpuidle framework to choose an idle state
273 *
274 * @drv: the cpuidle driver
275 * @dev: the cpuidle device
276 * @stop_tick: indication on whether or not to stop the tick
277 *
278 * Returns the index of the idle state. The return value must not be negative.
279 *
280 * The memory location pointed to by @stop_tick is expected to be written the
281 * 'false' boolean value if the scheduler tick should not be stopped before
282 * entering the returned state.
283 */
cpuidle_select(struct cpuidle_driver * drv,struct cpuidle_device * dev,bool * stop_tick)284 int cpuidle_select(struct cpuidle_driver *drv, struct cpuidle_device *dev,
285 bool *stop_tick)
286 {
287 return cpuidle_curr_governor->select(drv, dev, stop_tick);
288 }
289
290 /**
291 * cpuidle_enter - enter into the specified idle state
292 *
293 * @drv: the cpuidle driver tied with the cpu
294 * @dev: the cpuidle device
295 * @index: the index in the idle state table
296 *
297 * Returns the index in the idle state, < 0 in case of error.
298 * The error code depends on the backend driver
299 */
cpuidle_enter(struct cpuidle_driver * drv,struct cpuidle_device * dev,int index)300 int cpuidle_enter(struct cpuidle_driver *drv, struct cpuidle_device *dev,
301 int index)
302 {
303 if (cpuidle_state_is_coupled(drv, index))
304 return cpuidle_enter_state_coupled(dev, drv, index);
305 return cpuidle_enter_state(dev, drv, index);
306 }
307
308 /**
309 * cpuidle_reflect - tell the underlying governor what was the state
310 * we were in
311 *
312 * @dev : the cpuidle device
313 * @index: the index in the idle state table
314 *
315 */
cpuidle_reflect(struct cpuidle_device * dev,int index)316 void cpuidle_reflect(struct cpuidle_device *dev, int index)
317 {
318 if (cpuidle_curr_governor->reflect && index >= 0)
319 cpuidle_curr_governor->reflect(dev, index);
320 }
321
322 /**
323 * cpuidle_install_idle_handler - installs the cpuidle idle loop handler
324 */
cpuidle_install_idle_handler(void)325 void cpuidle_install_idle_handler(void)
326 {
327 if (enabled_devices) {
328 /* Make sure all changes finished before we switch to new idle */
329 smp_wmb();
330 initialized = 1;
331 }
332 }
333
334 /**
335 * cpuidle_uninstall_idle_handler - uninstalls the cpuidle idle loop handler
336 */
cpuidle_uninstall_idle_handler(void)337 void cpuidle_uninstall_idle_handler(void)
338 {
339 if (enabled_devices) {
340 initialized = 0;
341 wake_up_all_idle_cpus();
342 }
343
344 /*
345 * Make sure external observers (such as the scheduler)
346 * are done looking at pointed idle states.
347 */
348 synchronize_rcu();
349 }
350
351 /**
352 * cpuidle_pause_and_lock - temporarily disables CPUIDLE
353 */
cpuidle_pause_and_lock(void)354 void cpuidle_pause_and_lock(void)
355 {
356 mutex_lock(&cpuidle_lock);
357 cpuidle_uninstall_idle_handler();
358 }
359
360 EXPORT_SYMBOL_GPL(cpuidle_pause_and_lock);
361
362 /**
363 * cpuidle_resume_and_unlock - resumes CPUIDLE operation
364 */
cpuidle_resume_and_unlock(void)365 void cpuidle_resume_and_unlock(void)
366 {
367 cpuidle_install_idle_handler();
368 mutex_unlock(&cpuidle_lock);
369 }
370
371 EXPORT_SYMBOL_GPL(cpuidle_resume_and_unlock);
372
373 /* Currently used in suspend/resume path to suspend cpuidle */
cpuidle_pause(void)374 void cpuidle_pause(void)
375 {
376 mutex_lock(&cpuidle_lock);
377 cpuidle_uninstall_idle_handler();
378 mutex_unlock(&cpuidle_lock);
379 }
380
381 /* Currently used in suspend/resume path to resume cpuidle */
cpuidle_resume(void)382 void cpuidle_resume(void)
383 {
384 mutex_lock(&cpuidle_lock);
385 cpuidle_install_idle_handler();
386 mutex_unlock(&cpuidle_lock);
387 }
388
389 /**
390 * cpuidle_enable_device - enables idle PM for a CPU
391 * @dev: the CPU
392 *
393 * This function must be called between cpuidle_pause_and_lock and
394 * cpuidle_resume_and_unlock when used externally.
395 */
cpuidle_enable_device(struct cpuidle_device * dev)396 int cpuidle_enable_device(struct cpuidle_device *dev)
397 {
398 int ret;
399 struct cpuidle_driver *drv;
400
401 if (!dev)
402 return -EINVAL;
403
404 if (dev->enabled)
405 return 0;
406
407 if (!cpuidle_curr_governor)
408 return -EIO;
409
410 drv = cpuidle_get_cpu_driver(dev);
411
412 if (!drv)
413 return -EIO;
414
415 if (!dev->registered)
416 return -EINVAL;
417
418 ret = cpuidle_add_device_sysfs(dev);
419 if (ret)
420 return ret;
421
422 if (cpuidle_curr_governor->enable) {
423 ret = cpuidle_curr_governor->enable(drv, dev);
424 if (ret)
425 goto fail_sysfs;
426 }
427
428 smp_wmb();
429
430 dev->enabled = 1;
431
432 enabled_devices++;
433 return 0;
434
435 fail_sysfs:
436 cpuidle_remove_device_sysfs(dev);
437
438 return ret;
439 }
440
441 EXPORT_SYMBOL_GPL(cpuidle_enable_device);
442
443 /**
444 * cpuidle_disable_device - disables idle PM for a CPU
445 * @dev: the CPU
446 *
447 * This function must be called between cpuidle_pause_and_lock and
448 * cpuidle_resume_and_unlock when used externally.
449 */
cpuidle_disable_device(struct cpuidle_device * dev)450 void cpuidle_disable_device(struct cpuidle_device *dev)
451 {
452 struct cpuidle_driver *drv = cpuidle_get_cpu_driver(dev);
453
454 if (!dev || !dev->enabled)
455 return;
456
457 if (!drv || !cpuidle_curr_governor)
458 return;
459
460 dev->enabled = 0;
461
462 if (cpuidle_curr_governor->disable)
463 cpuidle_curr_governor->disable(drv, dev);
464
465 cpuidle_remove_device_sysfs(dev);
466 enabled_devices--;
467 }
468
469 EXPORT_SYMBOL_GPL(cpuidle_disable_device);
470
__cpuidle_unregister_device(struct cpuidle_device * dev)471 static void __cpuidle_unregister_device(struct cpuidle_device *dev)
472 {
473 struct cpuidle_driver *drv = cpuidle_get_cpu_driver(dev);
474
475 list_del(&dev->device_list);
476 per_cpu(cpuidle_devices, dev->cpu) = NULL;
477 module_put(drv->owner);
478
479 dev->registered = 0;
480 }
481
__cpuidle_device_init(struct cpuidle_device * dev)482 static void __cpuidle_device_init(struct cpuidle_device *dev)
483 {
484 memset(dev->states_usage, 0, sizeof(dev->states_usage));
485 dev->last_residency = 0;
486 }
487
488 /**
489 * __cpuidle_register_device - internal register function called before register
490 * and enable routines
491 * @dev: the cpu
492 *
493 * cpuidle_lock mutex must be held before this is called
494 */
__cpuidle_register_device(struct cpuidle_device * dev)495 static int __cpuidle_register_device(struct cpuidle_device *dev)
496 {
497 int ret;
498 struct cpuidle_driver *drv = cpuidle_get_cpu_driver(dev);
499
500 if (!try_module_get(drv->owner))
501 return -EINVAL;
502
503 per_cpu(cpuidle_devices, dev->cpu) = dev;
504 list_add(&dev->device_list, &cpuidle_detected_devices);
505
506 ret = cpuidle_coupled_register_device(dev);
507 if (ret)
508 __cpuidle_unregister_device(dev);
509 else
510 dev->registered = 1;
511
512 return ret;
513 }
514
515 /**
516 * cpuidle_register_device - registers a CPU's idle PM feature
517 * @dev: the cpu
518 */
cpuidle_register_device(struct cpuidle_device * dev)519 int cpuidle_register_device(struct cpuidle_device *dev)
520 {
521 int ret = -EBUSY;
522
523 if (!dev)
524 return -EINVAL;
525
526 mutex_lock(&cpuidle_lock);
527
528 if (dev->registered)
529 goto out_unlock;
530
531 __cpuidle_device_init(dev);
532
533 ret = __cpuidle_register_device(dev);
534 if (ret)
535 goto out_unlock;
536
537 ret = cpuidle_add_sysfs(dev);
538 if (ret)
539 goto out_unregister;
540
541 ret = cpuidle_enable_device(dev);
542 if (ret)
543 goto out_sysfs;
544
545 cpuidle_install_idle_handler();
546
547 out_unlock:
548 mutex_unlock(&cpuidle_lock);
549
550 return ret;
551
552 out_sysfs:
553 cpuidle_remove_sysfs(dev);
554 out_unregister:
555 __cpuidle_unregister_device(dev);
556 goto out_unlock;
557 }
558
559 EXPORT_SYMBOL_GPL(cpuidle_register_device);
560
561 /**
562 * cpuidle_unregister_device - unregisters a CPU's idle PM feature
563 * @dev: the cpu
564 */
cpuidle_unregister_device(struct cpuidle_device * dev)565 void cpuidle_unregister_device(struct cpuidle_device *dev)
566 {
567 if (!dev || dev->registered == 0)
568 return;
569
570 cpuidle_pause_and_lock();
571
572 cpuidle_disable_device(dev);
573
574 cpuidle_remove_sysfs(dev);
575
576 __cpuidle_unregister_device(dev);
577
578 cpuidle_coupled_unregister_device(dev);
579
580 cpuidle_resume_and_unlock();
581 }
582
583 EXPORT_SYMBOL_GPL(cpuidle_unregister_device);
584
585 /**
586 * cpuidle_unregister: unregister a driver and the devices. This function
587 * can be used only if the driver has been previously registered through
588 * the cpuidle_register function.
589 *
590 * @drv: a valid pointer to a struct cpuidle_driver
591 */
cpuidle_unregister(struct cpuidle_driver * drv)592 void cpuidle_unregister(struct cpuidle_driver *drv)
593 {
594 int cpu;
595 struct cpuidle_device *device;
596
597 for_each_cpu(cpu, drv->cpumask) {
598 device = &per_cpu(cpuidle_dev, cpu);
599 cpuidle_unregister_device(device);
600 }
601
602 cpuidle_unregister_driver(drv);
603 }
604 EXPORT_SYMBOL_GPL(cpuidle_unregister);
605
606 /**
607 * cpuidle_register: registers the driver and the cpu devices with the
608 * coupled_cpus passed as parameter. This function is used for all common
609 * initialization pattern there are in the arch specific drivers. The
610 * devices is globally defined in this file.
611 *
612 * @drv : a valid pointer to a struct cpuidle_driver
613 * @coupled_cpus: a cpumask for the coupled states
614 *
615 * Returns 0 on success, < 0 otherwise
616 */
cpuidle_register(struct cpuidle_driver * drv,const struct cpumask * const coupled_cpus)617 int cpuidle_register(struct cpuidle_driver *drv,
618 const struct cpumask *const coupled_cpus)
619 {
620 int ret, cpu;
621 struct cpuidle_device *device;
622
623 ret = cpuidle_register_driver(drv);
624 if (ret) {
625 pr_err("failed to register cpuidle driver\n");
626 return ret;
627 }
628
629 for_each_cpu(cpu, drv->cpumask) {
630 device = &per_cpu(cpuidle_dev, cpu);
631 device->cpu = cpu;
632
633 #ifdef CONFIG_ARCH_NEEDS_CPU_IDLE_COUPLED
634 /*
635 * On multiplatform for ARM, the coupled idle states could be
636 * enabled in the kernel even if the cpuidle driver does not
637 * use it. Note, coupled_cpus is a struct copy.
638 */
639 if (coupled_cpus)
640 device->coupled_cpus = *coupled_cpus;
641 #endif
642 ret = cpuidle_register_device(device);
643 if (!ret)
644 continue;
645
646 pr_err("Failed to register cpuidle device for cpu%d\n", cpu);
647
648 cpuidle_unregister(drv);
649 break;
650 }
651
652 return ret;
653 }
654 EXPORT_SYMBOL_GPL(cpuidle_register);
655
656 #ifdef CONFIG_SMP
657
658 /*
659 * This function gets called when a part of the kernel has a new latency
660 * requirement. This means we need to get all processors out of their C-state,
661 * and then recalculate a new suitable C-state. Just do a cross-cpu IPI; that
662 * wakes them all right up.
663 */
cpuidle_latency_notify(struct notifier_block * b,unsigned long l,void * v)664 static int cpuidle_latency_notify(struct notifier_block *b,
665 unsigned long l, void *v)
666 {
667 wake_up_all_idle_cpus();
668 return NOTIFY_OK;
669 }
670
671 static struct notifier_block cpuidle_latency_notifier = {
672 .notifier_call = cpuidle_latency_notify,
673 };
674
latency_notifier_init(struct notifier_block * n)675 static inline void latency_notifier_init(struct notifier_block *n)
676 {
677 pm_qos_add_notifier(PM_QOS_CPU_DMA_LATENCY, n);
678 }
679
680 #else /* CONFIG_SMP */
681
682 #define latency_notifier_init(x) do { } while (0)
683
684 #endif /* CONFIG_SMP */
685
686 /**
687 * cpuidle_init - core initializer
688 */
cpuidle_init(void)689 static int __init cpuidle_init(void)
690 {
691 int ret;
692
693 if (cpuidle_disabled())
694 return -ENODEV;
695
696 ret = cpuidle_add_interface(cpu_subsys.dev_root);
697 if (ret)
698 return ret;
699
700 latency_notifier_init(&cpuidle_latency_notifier);
701
702 return 0;
703 }
704
705 module_param(off, int, 0444);
706 core_initcall(cpuidle_init);
707