1 /*
2 * drivers/cpufreq/cpufreq_governor.c
3 *
4 * CPUFREQ governors common code
5 *
6 * Copyright (C) 2001 Russell King
7 * (C) 2003 Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>.
8 * (C) 2003 Jun Nakajima <jun.nakajima@intel.com>
9 * (C) 2009 Alexander Clouter <alex@digriz.org.uk>
10 * (c) 2012 Viresh Kumar <viresh.kumar@linaro.org>
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License version 2 as
14 * published by the Free Software Foundation.
15 */
16
17 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
18
19 #include <linux/export.h>
20 #include <linux/kernel_stat.h>
21 #include <linux/slab.h>
22
23 #include "cpufreq_governor.h"
24
25 #define CPUFREQ_DBS_MIN_SAMPLING_INTERVAL (2 * TICK_NSEC / NSEC_PER_USEC)
26
27 static DEFINE_PER_CPU(struct cpu_dbs_info, cpu_dbs);
28
29 static DEFINE_MUTEX(gov_dbs_data_mutex);
30
31 /* Common sysfs tunables */
32 /**
33 * store_sampling_rate - update sampling rate effective immediately if needed.
34 *
35 * If new rate is smaller than the old, simply updating
36 * dbs.sampling_rate might not be appropriate. For example, if the
37 * original sampling_rate was 1 second and the requested new sampling rate is 10
38 * ms because the user needs immediate reaction from ondemand governor, but not
39 * sure if higher frequency will be required or not, then, the governor may
40 * change the sampling rate too late; up to 1 second later. Thus, if we are
41 * reducing the sampling rate, we need to make the new value effective
42 * immediately.
43 *
44 * This must be called with dbs_data->mutex held, otherwise traversing
45 * policy_dbs_list isn't safe.
46 */
store_sampling_rate(struct gov_attr_set * attr_set,const char * buf,size_t count)47 ssize_t store_sampling_rate(struct gov_attr_set *attr_set, const char *buf,
48 size_t count)
49 {
50 struct dbs_data *dbs_data = to_dbs_data(attr_set);
51 struct policy_dbs_info *policy_dbs;
52 unsigned int sampling_interval;
53 int ret;
54
55 ret = sscanf(buf, "%u", &sampling_interval);
56 if (ret != 1 || sampling_interval < CPUFREQ_DBS_MIN_SAMPLING_INTERVAL)
57 return -EINVAL;
58
59 dbs_data->sampling_rate = sampling_interval;
60
61 /*
62 * We are operating under dbs_data->mutex and so the list and its
63 * entries can't be freed concurrently.
64 */
65 list_for_each_entry(policy_dbs, &attr_set->policy_list, list) {
66 mutex_lock(&policy_dbs->update_mutex);
67 /*
68 * On 32-bit architectures this may race with the
69 * sample_delay_ns read in dbs_update_util_handler(), but that
70 * really doesn't matter. If the read returns a value that's
71 * too big, the sample will be skipped, but the next invocation
72 * of dbs_update_util_handler() (when the update has been
73 * completed) will take a sample.
74 *
75 * If this runs in parallel with dbs_work_handler(), we may end
76 * up overwriting the sample_delay_ns value that it has just
77 * written, but it will be corrected next time a sample is
78 * taken, so it shouldn't be significant.
79 */
80 gov_update_sample_delay(policy_dbs, 0);
81 mutex_unlock(&policy_dbs->update_mutex);
82 }
83
84 return count;
85 }
86 EXPORT_SYMBOL_GPL(store_sampling_rate);
87
88 /**
89 * gov_update_cpu_data - Update CPU load data.
90 * @dbs_data: Top-level governor data pointer.
91 *
92 * Update CPU load data for all CPUs in the domain governed by @dbs_data
93 * (that may be a single policy or a bunch of them if governor tunables are
94 * system-wide).
95 *
96 * Call under the @dbs_data mutex.
97 */
gov_update_cpu_data(struct dbs_data * dbs_data)98 void gov_update_cpu_data(struct dbs_data *dbs_data)
99 {
100 struct policy_dbs_info *policy_dbs;
101
102 list_for_each_entry(policy_dbs, &dbs_data->attr_set.policy_list, list) {
103 unsigned int j;
104
105 for_each_cpu(j, policy_dbs->policy->cpus) {
106 struct cpu_dbs_info *j_cdbs = &per_cpu(cpu_dbs, j);
107
108 j_cdbs->prev_cpu_idle = get_cpu_idle_time(j, &j_cdbs->prev_update_time,
109 dbs_data->io_is_busy);
110 if (dbs_data->ignore_nice_load)
111 j_cdbs->prev_cpu_nice = kcpustat_cpu(j).cpustat[CPUTIME_NICE];
112 }
113 }
114 }
115 EXPORT_SYMBOL_GPL(gov_update_cpu_data);
116
dbs_update(struct cpufreq_policy * policy)117 unsigned int dbs_update(struct cpufreq_policy *policy)
118 {
119 struct policy_dbs_info *policy_dbs = policy->governor_data;
120 struct dbs_data *dbs_data = policy_dbs->dbs_data;
121 unsigned int ignore_nice = dbs_data->ignore_nice_load;
122 unsigned int max_load = 0, idle_periods = UINT_MAX;
123 unsigned int sampling_rate, io_busy, j;
124
125 /*
126 * Sometimes governors may use an additional multiplier to increase
127 * sample delays temporarily. Apply that multiplier to sampling_rate
128 * so as to keep the wake-up-from-idle detection logic a bit
129 * conservative.
130 */
131 sampling_rate = dbs_data->sampling_rate * policy_dbs->rate_mult;
132 /*
133 * For the purpose of ondemand, waiting for disk IO is an indication
134 * that you're performance critical, and not that the system is actually
135 * idle, so do not add the iowait time to the CPU idle time then.
136 */
137 io_busy = dbs_data->io_is_busy;
138
139 /* Get Absolute Load */
140 for_each_cpu(j, policy->cpus) {
141 struct cpu_dbs_info *j_cdbs = &per_cpu(cpu_dbs, j);
142 u64 update_time, cur_idle_time;
143 unsigned int idle_time, time_elapsed;
144 unsigned int load;
145
146 cur_idle_time = get_cpu_idle_time(j, &update_time, io_busy);
147
148 time_elapsed = update_time - j_cdbs->prev_update_time;
149 j_cdbs->prev_update_time = update_time;
150
151 idle_time = cur_idle_time - j_cdbs->prev_cpu_idle;
152 j_cdbs->prev_cpu_idle = cur_idle_time;
153
154 if (ignore_nice) {
155 u64 cur_nice = kcpustat_cpu(j).cpustat[CPUTIME_NICE];
156
157 idle_time += div_u64(cur_nice - j_cdbs->prev_cpu_nice, NSEC_PER_USEC);
158 j_cdbs->prev_cpu_nice = cur_nice;
159 }
160
161 if (unlikely(!time_elapsed)) {
162 /*
163 * That can only happen when this function is called
164 * twice in a row with a very short interval between the
165 * calls, so the previous load value can be used then.
166 */
167 load = j_cdbs->prev_load;
168 } else if (unlikely((int)idle_time > 2 * sampling_rate &&
169 j_cdbs->prev_load)) {
170 /*
171 * If the CPU had gone completely idle and a task has
172 * just woken up on this CPU now, it would be unfair to
173 * calculate 'load' the usual way for this elapsed
174 * time-window, because it would show near-zero load,
175 * irrespective of how CPU intensive that task actually
176 * was. This is undesirable for latency-sensitive bursty
177 * workloads.
178 *
179 * To avoid this, reuse the 'load' from the previous
180 * time-window and give this task a chance to start with
181 * a reasonably high CPU frequency. However, that
182 * shouldn't be over-done, lest we get stuck at a high
183 * load (high frequency) for too long, even when the
184 * current system load has actually dropped down, so
185 * clear prev_load to guarantee that the load will be
186 * computed again next time.
187 *
188 * Detecting this situation is easy: an unusually large
189 * 'idle_time' (as compared to the sampling rate)
190 * indicates this scenario.
191 */
192 load = j_cdbs->prev_load;
193 j_cdbs->prev_load = 0;
194 } else {
195 if (time_elapsed >= idle_time) {
196 load = 100 * (time_elapsed - idle_time) / time_elapsed;
197 } else {
198 /*
199 * That can happen if idle_time is returned by
200 * get_cpu_idle_time_jiffy(). In that case
201 * idle_time is roughly equal to the difference
202 * between time_elapsed and "busy time" obtained
203 * from CPU statistics. Then, the "busy time"
204 * can end up being greater than time_elapsed
205 * (for example, if jiffies_64 and the CPU
206 * statistics are updated by different CPUs),
207 * so idle_time may in fact be negative. That
208 * means, though, that the CPU was busy all
209 * the time (on the rough average) during the
210 * last sampling interval and 100 can be
211 * returned as the load.
212 */
213 load = (int)idle_time < 0 ? 100 : 0;
214 }
215 j_cdbs->prev_load = load;
216 }
217
218 if (unlikely((int)idle_time > 2 * sampling_rate)) {
219 unsigned int periods = idle_time / sampling_rate;
220
221 if (periods < idle_periods)
222 idle_periods = periods;
223 }
224
225 if (load > max_load)
226 max_load = load;
227 }
228
229 policy_dbs->idle_periods = idle_periods;
230
231 return max_load;
232 }
233 EXPORT_SYMBOL_GPL(dbs_update);
234
dbs_work_handler(struct work_struct * work)235 static void dbs_work_handler(struct work_struct *work)
236 {
237 struct policy_dbs_info *policy_dbs;
238 struct cpufreq_policy *policy;
239 struct dbs_governor *gov;
240
241 policy_dbs = container_of(work, struct policy_dbs_info, work);
242 policy = policy_dbs->policy;
243 gov = dbs_governor_of(policy);
244
245 /*
246 * Make sure cpufreq_governor_limits() isn't evaluating load or the
247 * ondemand governor isn't updating the sampling rate in parallel.
248 */
249 mutex_lock(&policy_dbs->update_mutex);
250 gov_update_sample_delay(policy_dbs, gov->gov_dbs_update(policy));
251 mutex_unlock(&policy_dbs->update_mutex);
252
253 /* Allow the utilization update handler to queue up more work. */
254 atomic_set(&policy_dbs->work_count, 0);
255 /*
256 * If the update below is reordered with respect to the sample delay
257 * modification, the utilization update handler may end up using a stale
258 * sample delay value.
259 */
260 smp_wmb();
261 policy_dbs->work_in_progress = false;
262 }
263
dbs_irq_work(struct irq_work * irq_work)264 static void dbs_irq_work(struct irq_work *irq_work)
265 {
266 struct policy_dbs_info *policy_dbs;
267
268 policy_dbs = container_of(irq_work, struct policy_dbs_info, irq_work);
269 schedule_work_on(smp_processor_id(), &policy_dbs->work);
270 }
271
dbs_update_util_handler(struct update_util_data * data,u64 time,unsigned int flags)272 static void dbs_update_util_handler(struct update_util_data *data, u64 time,
273 unsigned int flags)
274 {
275 struct cpu_dbs_info *cdbs = container_of(data, struct cpu_dbs_info, update_util);
276 struct policy_dbs_info *policy_dbs = cdbs->policy_dbs;
277 u64 delta_ns, lst;
278
279 if (!cpufreq_this_cpu_can_update(policy_dbs->policy))
280 return;
281
282 /*
283 * The work may not be allowed to be queued up right now.
284 * Possible reasons:
285 * - Work has already been queued up or is in progress.
286 * - It is too early (too little time from the previous sample).
287 */
288 if (policy_dbs->work_in_progress)
289 return;
290
291 /*
292 * If the reads below are reordered before the check above, the value
293 * of sample_delay_ns used in the computation may be stale.
294 */
295 smp_rmb();
296 lst = READ_ONCE(policy_dbs->last_sample_time);
297 delta_ns = time - lst;
298 if ((s64)delta_ns < policy_dbs->sample_delay_ns)
299 return;
300
301 /*
302 * If the policy is not shared, the irq_work may be queued up right away
303 * at this point. Otherwise, we need to ensure that only one of the
304 * CPUs sharing the policy will do that.
305 */
306 if (policy_dbs->is_shared) {
307 if (!atomic_add_unless(&policy_dbs->work_count, 1, 1))
308 return;
309
310 /*
311 * If another CPU updated last_sample_time in the meantime, we
312 * shouldn't be here, so clear the work counter and bail out.
313 */
314 if (unlikely(lst != READ_ONCE(policy_dbs->last_sample_time))) {
315 atomic_set(&policy_dbs->work_count, 0);
316 return;
317 }
318 }
319
320 policy_dbs->last_sample_time = time;
321 policy_dbs->work_in_progress = true;
322 irq_work_queue(&policy_dbs->irq_work);
323 }
324
gov_set_update_util(struct policy_dbs_info * policy_dbs,unsigned int delay_us)325 static void gov_set_update_util(struct policy_dbs_info *policy_dbs,
326 unsigned int delay_us)
327 {
328 struct cpufreq_policy *policy = policy_dbs->policy;
329 int cpu;
330
331 gov_update_sample_delay(policy_dbs, delay_us);
332 policy_dbs->last_sample_time = 0;
333
334 for_each_cpu(cpu, policy->cpus) {
335 struct cpu_dbs_info *cdbs = &per_cpu(cpu_dbs, cpu);
336
337 cpufreq_add_update_util_hook(cpu, &cdbs->update_util,
338 dbs_update_util_handler);
339 }
340 }
341
gov_clear_update_util(struct cpufreq_policy * policy)342 static inline void gov_clear_update_util(struct cpufreq_policy *policy)
343 {
344 int i;
345
346 for_each_cpu(i, policy->cpus)
347 cpufreq_remove_update_util_hook(i);
348
349 synchronize_sched();
350 }
351
alloc_policy_dbs_info(struct cpufreq_policy * policy,struct dbs_governor * gov)352 static struct policy_dbs_info *alloc_policy_dbs_info(struct cpufreq_policy *policy,
353 struct dbs_governor *gov)
354 {
355 struct policy_dbs_info *policy_dbs;
356 int j;
357
358 /* Allocate memory for per-policy governor data. */
359 policy_dbs = gov->alloc();
360 if (!policy_dbs)
361 return NULL;
362
363 policy_dbs->policy = policy;
364 mutex_init(&policy_dbs->update_mutex);
365 atomic_set(&policy_dbs->work_count, 0);
366 init_irq_work(&policy_dbs->irq_work, dbs_irq_work);
367 INIT_WORK(&policy_dbs->work, dbs_work_handler);
368
369 /* Set policy_dbs for all CPUs, online+offline */
370 for_each_cpu(j, policy->related_cpus) {
371 struct cpu_dbs_info *j_cdbs = &per_cpu(cpu_dbs, j);
372
373 j_cdbs->policy_dbs = policy_dbs;
374 }
375 return policy_dbs;
376 }
377
free_policy_dbs_info(struct policy_dbs_info * policy_dbs,struct dbs_governor * gov)378 static void free_policy_dbs_info(struct policy_dbs_info *policy_dbs,
379 struct dbs_governor *gov)
380 {
381 int j;
382
383 mutex_destroy(&policy_dbs->update_mutex);
384
385 for_each_cpu(j, policy_dbs->policy->related_cpus) {
386 struct cpu_dbs_info *j_cdbs = &per_cpu(cpu_dbs, j);
387
388 j_cdbs->policy_dbs = NULL;
389 j_cdbs->update_util.func = NULL;
390 }
391 gov->free(policy_dbs);
392 }
393
cpufreq_dbs_governor_init(struct cpufreq_policy * policy)394 int cpufreq_dbs_governor_init(struct cpufreq_policy *policy)
395 {
396 struct dbs_governor *gov = dbs_governor_of(policy);
397 struct dbs_data *dbs_data;
398 struct policy_dbs_info *policy_dbs;
399 int ret = 0;
400
401 /* State should be equivalent to EXIT */
402 if (policy->governor_data)
403 return -EBUSY;
404
405 policy_dbs = alloc_policy_dbs_info(policy, gov);
406 if (!policy_dbs)
407 return -ENOMEM;
408
409 /* Protect gov->gdbs_data against concurrent updates. */
410 mutex_lock(&gov_dbs_data_mutex);
411
412 dbs_data = gov->gdbs_data;
413 if (dbs_data) {
414 if (WARN_ON(have_governor_per_policy())) {
415 ret = -EINVAL;
416 goto free_policy_dbs_info;
417 }
418 policy_dbs->dbs_data = dbs_data;
419 policy->governor_data = policy_dbs;
420
421 gov_attr_set_get(&dbs_data->attr_set, &policy_dbs->list);
422 goto out;
423 }
424
425 dbs_data = kzalloc(sizeof(*dbs_data), GFP_KERNEL);
426 if (!dbs_data) {
427 ret = -ENOMEM;
428 goto free_policy_dbs_info;
429 }
430
431 gov_attr_set_init(&dbs_data->attr_set, &policy_dbs->list);
432
433 ret = gov->init(dbs_data);
434 if (ret)
435 goto free_policy_dbs_info;
436
437 /*
438 * The sampling interval should not be less than the transition latency
439 * of the CPU and it also cannot be too small for dbs_update() to work
440 * correctly.
441 */
442 dbs_data->sampling_rate = max_t(unsigned int,
443 CPUFREQ_DBS_MIN_SAMPLING_INTERVAL,
444 cpufreq_policy_transition_delay_us(policy));
445
446 if (!have_governor_per_policy())
447 gov->gdbs_data = dbs_data;
448
449 policy_dbs->dbs_data = dbs_data;
450 policy->governor_data = policy_dbs;
451
452 gov->kobj_type.sysfs_ops = &governor_sysfs_ops;
453 ret = kobject_init_and_add(&dbs_data->attr_set.kobj, &gov->kobj_type,
454 get_governor_parent_kobj(policy),
455 "%s", gov->gov.name);
456 if (!ret)
457 goto out;
458
459 /* Failure, so roll back. */
460 pr_err("initialization failed (dbs_data kobject init error %d)\n", ret);
461
462 kobject_put(&dbs_data->attr_set.kobj);
463
464 policy->governor_data = NULL;
465
466 if (!have_governor_per_policy())
467 gov->gdbs_data = NULL;
468 gov->exit(dbs_data);
469 kfree(dbs_data);
470
471 free_policy_dbs_info:
472 free_policy_dbs_info(policy_dbs, gov);
473
474 out:
475 mutex_unlock(&gov_dbs_data_mutex);
476 return ret;
477 }
478 EXPORT_SYMBOL_GPL(cpufreq_dbs_governor_init);
479
cpufreq_dbs_governor_exit(struct cpufreq_policy * policy)480 void cpufreq_dbs_governor_exit(struct cpufreq_policy *policy)
481 {
482 struct dbs_governor *gov = dbs_governor_of(policy);
483 struct policy_dbs_info *policy_dbs = policy->governor_data;
484 struct dbs_data *dbs_data = policy_dbs->dbs_data;
485 unsigned int count;
486
487 /* Protect gov->gdbs_data against concurrent updates. */
488 mutex_lock(&gov_dbs_data_mutex);
489
490 count = gov_attr_set_put(&dbs_data->attr_set, &policy_dbs->list);
491
492 policy->governor_data = NULL;
493
494 if (!count) {
495 if (!have_governor_per_policy())
496 gov->gdbs_data = NULL;
497
498 gov->exit(dbs_data);
499 kfree(dbs_data);
500 }
501
502 free_policy_dbs_info(policy_dbs, gov);
503
504 mutex_unlock(&gov_dbs_data_mutex);
505 }
506 EXPORT_SYMBOL_GPL(cpufreq_dbs_governor_exit);
507
cpufreq_dbs_governor_start(struct cpufreq_policy * policy)508 int cpufreq_dbs_governor_start(struct cpufreq_policy *policy)
509 {
510 struct dbs_governor *gov = dbs_governor_of(policy);
511 struct policy_dbs_info *policy_dbs = policy->governor_data;
512 struct dbs_data *dbs_data = policy_dbs->dbs_data;
513 unsigned int sampling_rate, ignore_nice, j;
514 unsigned int io_busy;
515
516 if (!policy->cur)
517 return -EINVAL;
518
519 policy_dbs->is_shared = policy_is_shared(policy);
520 policy_dbs->rate_mult = 1;
521
522 sampling_rate = dbs_data->sampling_rate;
523 ignore_nice = dbs_data->ignore_nice_load;
524 io_busy = dbs_data->io_is_busy;
525
526 for_each_cpu(j, policy->cpus) {
527 struct cpu_dbs_info *j_cdbs = &per_cpu(cpu_dbs, j);
528
529 j_cdbs->prev_cpu_idle = get_cpu_idle_time(j, &j_cdbs->prev_update_time, io_busy);
530 /*
531 * Make the first invocation of dbs_update() compute the load.
532 */
533 j_cdbs->prev_load = 0;
534
535 if (ignore_nice)
536 j_cdbs->prev_cpu_nice = kcpustat_cpu(j).cpustat[CPUTIME_NICE];
537 }
538
539 gov->start(policy);
540
541 gov_set_update_util(policy_dbs, sampling_rate);
542 return 0;
543 }
544 EXPORT_SYMBOL_GPL(cpufreq_dbs_governor_start);
545
cpufreq_dbs_governor_stop(struct cpufreq_policy * policy)546 void cpufreq_dbs_governor_stop(struct cpufreq_policy *policy)
547 {
548 struct policy_dbs_info *policy_dbs = policy->governor_data;
549
550 gov_clear_update_util(policy_dbs->policy);
551 irq_work_sync(&policy_dbs->irq_work);
552 cancel_work_sync(&policy_dbs->work);
553 atomic_set(&policy_dbs->work_count, 0);
554 policy_dbs->work_in_progress = false;
555 }
556 EXPORT_SYMBOL_GPL(cpufreq_dbs_governor_stop);
557
cpufreq_dbs_governor_limits(struct cpufreq_policy * policy)558 void cpufreq_dbs_governor_limits(struct cpufreq_policy *policy)
559 {
560 struct policy_dbs_info *policy_dbs;
561
562 /* Protect gov->gdbs_data against cpufreq_dbs_governor_exit() */
563 mutex_lock(&gov_dbs_data_mutex);
564 policy_dbs = policy->governor_data;
565 if (!policy_dbs)
566 goto out;
567
568 mutex_lock(&policy_dbs->update_mutex);
569 cpufreq_policy_apply_limits(policy);
570 gov_update_sample_delay(policy_dbs, 0);
571 mutex_unlock(&policy_dbs->update_mutex);
572
573 out:
574 mutex_unlock(&gov_dbs_data_mutex);
575 }
576 EXPORT_SYMBOL_GPL(cpufreq_dbs_governor_limits);
577