Lines Matching refs:policy

36 static inline bool policy_is_inactive(struct cpufreq_policy *policy)  in policy_is_inactive()  argument
38 return cpumask_empty(policy->cpus); in policy_is_inactive()
77 static unsigned int __cpufreq_get(struct cpufreq_policy *policy);
78 static int cpufreq_init_governor(struct cpufreq_policy *policy);
79 static void cpufreq_exit_governor(struct cpufreq_policy *policy);
80 static int cpufreq_start_governor(struct cpufreq_policy *policy);
81 static void cpufreq_stop_governor(struct cpufreq_policy *policy);
82 static void cpufreq_governor_limits(struct cpufreq_policy *policy);
111 struct kobject *get_governor_parent_kobj(struct cpufreq_policy *policy) in get_governor_parent_kobj() argument
114 return &policy->kobj; in get_governor_parent_kobj()
168 int cpufreq_generic_init(struct cpufreq_policy *policy, in cpufreq_generic_init() argument
172 policy->freq_table = table; in cpufreq_generic_init()
173 policy->cpuinfo.transition_latency = transition_latency; in cpufreq_generic_init()
179 cpumask_setall(policy->cpus); in cpufreq_generic_init()
187 struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu); in cpufreq_cpu_get_raw() local
189 return policy && cpumask_test_cpu(cpu, policy->cpus) ? policy : NULL; in cpufreq_cpu_get_raw()
195 struct cpufreq_policy *policy = cpufreq_cpu_get_raw(cpu); in cpufreq_generic_get() local
197 if (!policy || IS_ERR(policy->clk)) { in cpufreq_generic_get()
199 __func__, policy ? "clk" : "policy", cpu); in cpufreq_generic_get()
203 return clk_get_rate(policy->clk) / 1000; in cpufreq_generic_get()
222 struct cpufreq_policy *policy = NULL; in cpufreq_cpu_get() local
233 policy = cpufreq_cpu_get_raw(cpu); in cpufreq_cpu_get()
234 if (policy) in cpufreq_cpu_get()
235 kobject_get(&policy->kobj); in cpufreq_cpu_get()
240 return policy; in cpufreq_cpu_get()
252 void cpufreq_cpu_put(struct cpufreq_policy *policy) in cpufreq_cpu_put() argument
254 kobject_put(&policy->kobj); in cpufreq_cpu_put()
304 static void cpufreq_notify_transition(struct cpufreq_policy *policy, in cpufreq_notify_transition() argument
325 if (policy->cur && (policy->cur != freqs->old)) { in cpufreq_notify_transition()
327 freqs->old, policy->cur); in cpufreq_notify_transition()
328 freqs->old = policy->cur; in cpufreq_notify_transition()
332 for_each_cpu(freqs->cpu, policy->cpus) { in cpufreq_notify_transition()
343 cpumask_pr_args(policy->cpus)); in cpufreq_notify_transition()
345 for_each_cpu(freqs->cpu, policy->cpus) { in cpufreq_notify_transition()
351 cpufreq_stats_record_transition(policy, freqs->new); in cpufreq_notify_transition()
352 policy->cur = freqs->new; in cpufreq_notify_transition()
357 static void cpufreq_notify_post_transition(struct cpufreq_policy *policy, in cpufreq_notify_post_transition() argument
360 cpufreq_notify_transition(policy, freqs, CPUFREQ_POSTCHANGE); in cpufreq_notify_post_transition()
365 cpufreq_notify_transition(policy, freqs, CPUFREQ_PRECHANGE); in cpufreq_notify_post_transition()
366 cpufreq_notify_transition(policy, freqs, CPUFREQ_POSTCHANGE); in cpufreq_notify_post_transition()
369 void cpufreq_freq_transition_begin(struct cpufreq_policy *policy, in cpufreq_freq_transition_begin() argument
382 && current == policy->transition_task); in cpufreq_freq_transition_begin()
385 wait_event(policy->transition_wait, !policy->transition_ongoing); in cpufreq_freq_transition_begin()
387 spin_lock(&policy->transition_lock); in cpufreq_freq_transition_begin()
389 if (unlikely(policy->transition_ongoing)) { in cpufreq_freq_transition_begin()
390 spin_unlock(&policy->transition_lock); in cpufreq_freq_transition_begin()
394 policy->transition_ongoing = true; in cpufreq_freq_transition_begin()
395 policy->transition_task = current; in cpufreq_freq_transition_begin()
397 spin_unlock(&policy->transition_lock); in cpufreq_freq_transition_begin()
399 cpufreq_notify_transition(policy, freqs, CPUFREQ_PRECHANGE); in cpufreq_freq_transition_begin()
403 void cpufreq_freq_transition_end(struct cpufreq_policy *policy, in cpufreq_freq_transition_end() argument
406 if (unlikely(WARN_ON(!policy->transition_ongoing))) in cpufreq_freq_transition_end()
409 cpufreq_notify_post_transition(policy, freqs, transition_failed); in cpufreq_freq_transition_end()
411 policy->transition_ongoing = false; in cpufreq_freq_transition_end()
412 policy->transition_task = NULL; in cpufreq_freq_transition_end()
414 wake_up(&policy->transition_wait); in cpufreq_freq_transition_end()
450 void cpufreq_enable_fast_switch(struct cpufreq_policy *policy) in cpufreq_enable_fast_switch() argument
452 lockdep_assert_held(&policy->rwsem); in cpufreq_enable_fast_switch()
454 if (!policy->fast_switch_possible) in cpufreq_enable_fast_switch()
460 policy->fast_switch_enabled = true; in cpufreq_enable_fast_switch()
463 policy->cpu); in cpufreq_enable_fast_switch()
474 void cpufreq_disable_fast_switch(struct cpufreq_policy *policy) in cpufreq_disable_fast_switch() argument
477 if (policy->fast_switch_enabled) { in cpufreq_disable_fast_switch()
478 policy->fast_switch_enabled = false; in cpufreq_disable_fast_switch()
496 unsigned int cpufreq_driver_resolve_freq(struct cpufreq_policy *policy, in cpufreq_driver_resolve_freq() argument
499 target_freq = clamp_val(target_freq, policy->min, policy->max); in cpufreq_driver_resolve_freq()
500 policy->cached_target_freq = target_freq; in cpufreq_driver_resolve_freq()
505 idx = cpufreq_frequency_table_target(policy, target_freq, in cpufreq_driver_resolve_freq()
507 policy->cached_resolved_idx = idx; in cpufreq_driver_resolve_freq()
508 return policy->freq_table[idx].frequency; in cpufreq_driver_resolve_freq()
512 return cpufreq_driver->resolve_freq(policy, target_freq); in cpufreq_driver_resolve_freq()
518 unsigned int cpufreq_policy_transition_delay_us(struct cpufreq_policy *policy) in cpufreq_policy_transition_delay_us() argument
522 if (policy->transition_delay_us) in cpufreq_policy_transition_delay_us()
523 return policy->transition_delay_us; in cpufreq_policy_transition_delay_us()
525 latency = policy->cpuinfo.transition_latency / NSEC_PER_USEC; in cpufreq_policy_transition_delay_us()
590 struct cpufreq_policy *policy) in cpufreq_parse_governor() argument
594 policy->policy = CPUFREQ_POLICY_PERFORMANCE; in cpufreq_parse_governor()
599 policy->policy = CPUFREQ_POLICY_POWERSAVE; in cpufreq_parse_governor()
627 policy->governor = t; in cpufreq_parse_governor()
645 (struct cpufreq_policy *policy, char *buf) \
647 return sprintf(buf, "%u\n", policy->object); \
661 static ssize_t show_scaling_cur_freq(struct cpufreq_policy *policy, char *buf) in show_scaling_cur_freq() argument
666 freq = arch_freq_get_on_cpu(policy->cpu); in show_scaling_cur_freq()
671 ret = sprintf(buf, "%u\n", cpufreq_driver->get(policy->cpu)); in show_scaling_cur_freq()
673 ret = sprintf(buf, "%u\n", policy->cur); in show_scaling_cur_freq()
677 static int cpufreq_set_policy(struct cpufreq_policy *policy,
685 (struct cpufreq_policy *policy, const char *buf, size_t count) \
690 memcpy(&new_policy, policy, sizeof(*policy)); \
691 new_policy.min = policy->user_policy.min; \
692 new_policy.max = policy->user_policy.max; \
699 ret = cpufreq_set_policy(policy, &new_policy); \
701 policy->user_policy.object = temp; \
712 static ssize_t show_cpuinfo_cur_freq(struct cpufreq_policy *policy, in show_cpuinfo_cur_freq() argument
715 unsigned int cur_freq = __cpufreq_get(policy); in show_cpuinfo_cur_freq()
726 static ssize_t show_scaling_governor(struct cpufreq_policy *policy, char *buf) in show_scaling_governor() argument
728 if (policy->policy == CPUFREQ_POLICY_POWERSAVE) in show_scaling_governor()
730 else if (policy->policy == CPUFREQ_POLICY_PERFORMANCE) in show_scaling_governor()
732 else if (policy->governor) in show_scaling_governor()
734 policy->governor->name); in show_scaling_governor()
741 static ssize_t store_scaling_governor(struct cpufreq_policy *policy, in store_scaling_governor() argument
748 memcpy(&new_policy, policy, sizeof(*policy)); in store_scaling_governor()
757 ret = cpufreq_set_policy(policy, &new_policy); in store_scaling_governor()
768 static ssize_t show_scaling_driver(struct cpufreq_policy *policy, char *buf) in show_scaling_driver() argument
776 static ssize_t show_scaling_available_governors(struct cpufreq_policy *policy, in show_scaling_available_governors() argument
819 static ssize_t show_related_cpus(struct cpufreq_policy *policy, char *buf) in show_related_cpus() argument
821 return cpufreq_show_cpus(policy->related_cpus, buf); in show_related_cpus()
827 static ssize_t show_affected_cpus(struct cpufreq_policy *policy, char *buf) in show_affected_cpus() argument
829 return cpufreq_show_cpus(policy->cpus, buf); in show_affected_cpus()
832 static ssize_t store_scaling_setspeed(struct cpufreq_policy *policy, in store_scaling_setspeed() argument
838 if (!policy->governor || !policy->governor->store_setspeed) in store_scaling_setspeed()
845 policy->governor->store_setspeed(policy, freq); in store_scaling_setspeed()
850 static ssize_t show_scaling_setspeed(struct cpufreq_policy *policy, char *buf) in show_scaling_setspeed() argument
852 if (!policy->governor || !policy->governor->show_setspeed) in show_scaling_setspeed()
855 return policy->governor->show_setspeed(policy, buf); in show_scaling_setspeed()
861 static ssize_t show_bios_limit(struct cpufreq_policy *policy, char *buf) in show_bios_limit() argument
866 ret = cpufreq_driver->bios_limit(policy->cpu, &limit); in show_bios_limit()
870 return sprintf(buf, "%u\n", policy->cpuinfo.max_freq); in show_bios_limit()
908 struct cpufreq_policy *policy = to_policy(kobj); in show() local
915 down_read(&policy->rwsem); in show()
916 ret = fattr->show(policy, buf); in show()
917 up_read(&policy->rwsem); in show()
925 struct cpufreq_policy *policy = to_policy(kobj); in store() local
939 if (cpu_online(policy->cpu)) { in store()
940 down_write(&policy->rwsem); in store()
941 ret = fattr->store(policy, buf, count); in store()
942 up_write(&policy->rwsem); in store()
952 struct cpufreq_policy *policy = to_policy(kobj); in cpufreq_sysfs_release() local
954 complete(&policy->kobj_unregister); in cpufreq_sysfs_release()
968 static void add_cpu_dev_symlink(struct cpufreq_policy *policy, unsigned int cpu) in add_cpu_dev_symlink() argument
975 if (cpumask_test_and_set_cpu(cpu, policy->real_cpus)) in add_cpu_dev_symlink()
979 if (sysfs_create_link(&dev->kobj, &policy->kobj, "cpufreq")) in add_cpu_dev_symlink()
983 static void remove_cpu_dev_symlink(struct cpufreq_policy *policy, in remove_cpu_dev_symlink() argument
990 static int cpufreq_add_dev_interface(struct cpufreq_policy *policy) in cpufreq_add_dev_interface() argument
998 ret = sysfs_create_file(&policy->kobj, &((*drv_attr)->attr)); in cpufreq_add_dev_interface()
1004 ret = sysfs_create_file(&policy->kobj, &cpuinfo_cur_freq.attr); in cpufreq_add_dev_interface()
1009 ret = sysfs_create_file(&policy->kobj, &scaling_cur_freq.attr); in cpufreq_add_dev_interface()
1014 ret = sysfs_create_file(&policy->kobj, &bios_limit.attr); in cpufreq_add_dev_interface()
1027 static int cpufreq_init_policy(struct cpufreq_policy *policy) in cpufreq_init_policy() argument
1032 memcpy(&new_policy, policy, sizeof(*policy)); in cpufreq_init_policy()
1035 gov = find_governor(policy->last_governor); in cpufreq_init_policy()
1038 policy->governor->name, policy->cpu); in cpufreq_init_policy()
1049 if (policy->last_policy) in cpufreq_init_policy()
1050 new_policy.policy = policy->last_policy; in cpufreq_init_policy()
1055 return cpufreq_set_policy(policy, &new_policy); in cpufreq_init_policy()
1058 static int cpufreq_add_policy_cpu(struct cpufreq_policy *policy, unsigned int cpu) in cpufreq_add_policy_cpu() argument
1063 if (cpumask_test_cpu(cpu, policy->cpus)) in cpufreq_add_policy_cpu()
1066 down_write(&policy->rwsem); in cpufreq_add_policy_cpu()
1068 cpufreq_stop_governor(policy); in cpufreq_add_policy_cpu()
1070 cpumask_set_cpu(cpu, policy->cpus); in cpufreq_add_policy_cpu()
1073 ret = cpufreq_start_governor(policy); in cpufreq_add_policy_cpu()
1077 up_write(&policy->rwsem); in cpufreq_add_policy_cpu()
1083 struct cpufreq_policy *policy = in handle_update() local
1085 unsigned int cpu = policy->cpu; in handle_update()
1092 struct cpufreq_policy *policy; in cpufreq_policy_alloc() local
1095 policy = kzalloc(sizeof(*policy), GFP_KERNEL); in cpufreq_policy_alloc()
1096 if (!policy) in cpufreq_policy_alloc()
1099 if (!alloc_cpumask_var(&policy->cpus, GFP_KERNEL)) in cpufreq_policy_alloc()
1102 if (!zalloc_cpumask_var(&policy->related_cpus, GFP_KERNEL)) in cpufreq_policy_alloc()
1105 if (!zalloc_cpumask_var(&policy->real_cpus, GFP_KERNEL)) in cpufreq_policy_alloc()
1108 ret = kobject_init_and_add(&policy->kobj, &ktype_cpufreq, in cpufreq_policy_alloc()
1112 kobject_put(&policy->kobj); in cpufreq_policy_alloc()
1116 INIT_LIST_HEAD(&policy->policy_list); in cpufreq_policy_alloc()
1117 init_rwsem(&policy->rwsem); in cpufreq_policy_alloc()
1118 spin_lock_init(&policy->transition_lock); in cpufreq_policy_alloc()
1119 init_waitqueue_head(&policy->transition_wait); in cpufreq_policy_alloc()
1120 init_completion(&policy->kobj_unregister); in cpufreq_policy_alloc()
1121 INIT_WORK(&policy->update, handle_update); in cpufreq_policy_alloc()
1123 policy->cpu = cpu; in cpufreq_policy_alloc()
1124 return policy; in cpufreq_policy_alloc()
1127 free_cpumask_var(policy->real_cpus); in cpufreq_policy_alloc()
1129 free_cpumask_var(policy->related_cpus); in cpufreq_policy_alloc()
1131 free_cpumask_var(policy->cpus); in cpufreq_policy_alloc()
1133 kfree(policy); in cpufreq_policy_alloc()
1138 static void cpufreq_policy_put_kobj(struct cpufreq_policy *policy) in cpufreq_policy_put_kobj() argument
1143 down_write(&policy->rwsem); in cpufreq_policy_put_kobj()
1144 cpufreq_stats_free_table(policy); in cpufreq_policy_put_kobj()
1145 kobj = &policy->kobj; in cpufreq_policy_put_kobj()
1146 cmp = &policy->kobj_unregister; in cpufreq_policy_put_kobj()
1147 up_write(&policy->rwsem); in cpufreq_policy_put_kobj()
1160 static void cpufreq_policy_free(struct cpufreq_policy *policy) in cpufreq_policy_free() argument
1167 list_del(&policy->policy_list); in cpufreq_policy_free()
1169 for_each_cpu(cpu, policy->related_cpus) in cpufreq_policy_free()
1173 cpufreq_policy_put_kobj(policy); in cpufreq_policy_free()
1174 free_cpumask_var(policy->real_cpus); in cpufreq_policy_free()
1175 free_cpumask_var(policy->related_cpus); in cpufreq_policy_free()
1176 free_cpumask_var(policy->cpus); in cpufreq_policy_free()
1177 kfree(policy); in cpufreq_policy_free()
1182 struct cpufreq_policy *policy; in cpufreq_online() local
1191 policy = per_cpu(cpufreq_cpu_data, cpu); in cpufreq_online()
1192 if (policy) { in cpufreq_online()
1193 WARN_ON(!cpumask_test_cpu(cpu, policy->related_cpus)); in cpufreq_online()
1194 if (!policy_is_inactive(policy)) in cpufreq_online()
1195 return cpufreq_add_policy_cpu(policy, cpu); in cpufreq_online()
1199 down_write(&policy->rwsem); in cpufreq_online()
1200 policy->cpu = cpu; in cpufreq_online()
1201 policy->governor = NULL; in cpufreq_online()
1202 up_write(&policy->rwsem); in cpufreq_online()
1205 policy = cpufreq_policy_alloc(cpu); in cpufreq_online()
1206 if (!policy) in cpufreq_online()
1210 cpumask_copy(policy->cpus, cpumask_of(cpu)); in cpufreq_online()
1215 ret = cpufreq_driver->init(policy); in cpufreq_online()
1221 ret = cpufreq_table_validate_and_sort(policy); in cpufreq_online()
1225 down_write(&policy->rwsem); in cpufreq_online()
1229 cpumask_copy(policy->related_cpus, policy->cpus); in cpufreq_online()
1236 cpumask_and(policy->cpus, policy->cpus, cpu_online_mask); in cpufreq_online()
1239 policy->user_policy.min = policy->min; in cpufreq_online()
1240 policy->user_policy.max = policy->max; in cpufreq_online()
1242 for_each_cpu(j, policy->related_cpus) { in cpufreq_online()
1243 per_cpu(cpufreq_cpu_data, j) = policy; in cpufreq_online()
1244 add_cpu_dev_symlink(policy, j); in cpufreq_online()
1247 policy->min = policy->user_policy.min; in cpufreq_online()
1248 policy->max = policy->user_policy.max; in cpufreq_online()
1252 policy->cur = cpufreq_driver->get(policy->cpu); in cpufreq_online()
1253 if (!policy->cur) { in cpufreq_online()
1280 ret = cpufreq_frequency_table_get_index(policy, policy->cur); in cpufreq_online()
1284 __func__, policy->cpu, policy->cur); in cpufreq_online()
1285 ret = __cpufreq_driver_target(policy, policy->cur - 1, in cpufreq_online()
1295 __func__, policy->cpu, policy->cur); in cpufreq_online()
1300 ret = cpufreq_add_dev_interface(policy); in cpufreq_online()
1304 cpufreq_stats_create_table(policy); in cpufreq_online()
1307 list_add(&policy->policy_list, &cpufreq_policy_list); in cpufreq_online()
1311 ret = cpufreq_init_policy(policy); in cpufreq_online()
1320 up_write(&policy->rwsem); in cpufreq_online()
1322 kobject_uevent(&policy->kobj, KOBJ_ADD); in cpufreq_online()
1326 cpufreq_driver->ready(policy); in cpufreq_online()
1333 for_each_cpu(j, policy->real_cpus) in cpufreq_online()
1334 remove_cpu_dev_symlink(policy, get_cpu_device(j)); in cpufreq_online()
1336 up_write(&policy->rwsem); in cpufreq_online()
1340 cpufreq_driver->exit(policy); in cpufreq_online()
1343 cpufreq_policy_free(policy); in cpufreq_online()
1354 struct cpufreq_policy *policy; in cpufreq_add_dev() local
1367 policy = per_cpu(cpufreq_cpu_data, cpu); in cpufreq_add_dev()
1368 if (policy) in cpufreq_add_dev()
1369 add_cpu_dev_symlink(policy, cpu); in cpufreq_add_dev()
1376 struct cpufreq_policy *policy; in cpufreq_offline() local
1381 policy = cpufreq_cpu_get_raw(cpu); in cpufreq_offline()
1382 if (!policy) { in cpufreq_offline()
1387 down_write(&policy->rwsem); in cpufreq_offline()
1389 cpufreq_stop_governor(policy); in cpufreq_offline()
1391 cpumask_clear_cpu(cpu, policy->cpus); in cpufreq_offline()
1393 if (policy_is_inactive(policy)) { in cpufreq_offline()
1395 strncpy(policy->last_governor, policy->governor->name, in cpufreq_offline()
1398 policy->last_policy = policy->policy; in cpufreq_offline()
1399 } else if (cpu == policy->cpu) { in cpufreq_offline()
1401 policy->cpu = cpumask_any(policy->cpus); in cpufreq_offline()
1405 if (!policy_is_inactive(policy)) { in cpufreq_offline()
1407 ret = cpufreq_start_governor(policy); in cpufreq_offline()
1416 cpufreq_driver->stop_cpu(policy); in cpufreq_offline()
1419 cpufreq_exit_governor(policy); in cpufreq_offline()
1427 cpufreq_driver->exit(policy); in cpufreq_offline()
1428 policy->freq_table = NULL; in cpufreq_offline()
1432 up_write(&policy->rwsem); in cpufreq_offline()
1444 struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu); in cpufreq_remove_dev() local
1446 if (!policy) in cpufreq_remove_dev()
1452 cpumask_clear_cpu(cpu, policy->real_cpus); in cpufreq_remove_dev()
1453 remove_cpu_dev_symlink(policy, dev); in cpufreq_remove_dev()
1455 if (cpumask_empty(policy->real_cpus)) in cpufreq_remove_dev()
1456 cpufreq_policy_free(policy); in cpufreq_remove_dev()
1468 static void cpufreq_out_of_sync(struct cpufreq_policy *policy, in cpufreq_out_of_sync() argument
1474 policy->cur, new_freq); in cpufreq_out_of_sync()
1476 freqs.old = policy->cur; in cpufreq_out_of_sync()
1479 cpufreq_freq_transition_begin(policy, &freqs); in cpufreq_out_of_sync()
1480 cpufreq_freq_transition_end(policy, &freqs, 0); in cpufreq_out_of_sync()
1492 struct cpufreq_policy *policy; in cpufreq_quick_get() local
1506 policy = cpufreq_cpu_get(cpu); in cpufreq_quick_get()
1507 if (policy) { in cpufreq_quick_get()
1508 ret_freq = policy->cur; in cpufreq_quick_get()
1509 cpufreq_cpu_put(policy); in cpufreq_quick_get()
1524 struct cpufreq_policy *policy = cpufreq_cpu_get(cpu); in cpufreq_quick_get_max() local
1527 if (policy) { in cpufreq_quick_get_max()
1528 ret_freq = policy->max; in cpufreq_quick_get_max()
1529 cpufreq_cpu_put(policy); in cpufreq_quick_get_max()
1536 static unsigned int __cpufreq_get(struct cpufreq_policy *policy) in __cpufreq_get() argument
1540 if (unlikely(policy_is_inactive(policy)) || !cpufreq_driver->get) in __cpufreq_get()
1543 ret_freq = cpufreq_driver->get(policy->cpu); in __cpufreq_get()
1549 if (policy->fast_switch_enabled) in __cpufreq_get()
1552 if (ret_freq && policy->cur && in __cpufreq_get()
1556 if (unlikely(ret_freq != policy->cur)) { in __cpufreq_get()
1557 cpufreq_out_of_sync(policy, ret_freq); in __cpufreq_get()
1558 schedule_work(&policy->update); in __cpufreq_get()
1573 struct cpufreq_policy *policy = cpufreq_cpu_get(cpu); in cpufreq_get() local
1576 if (policy) { in cpufreq_get()
1577 down_read(&policy->rwsem); in cpufreq_get()
1578 ret_freq = __cpufreq_get(policy); in cpufreq_get()
1579 up_read(&policy->rwsem); in cpufreq_get()
1581 cpufreq_cpu_put(policy); in cpufreq_get()
1588 static unsigned int cpufreq_update_current_freq(struct cpufreq_policy *policy) in cpufreq_update_current_freq() argument
1592 new_freq = cpufreq_driver->get(policy->cpu); in cpufreq_update_current_freq()
1596 if (!policy->cur) { in cpufreq_update_current_freq()
1598 policy->cur = new_freq; in cpufreq_update_current_freq()
1599 } else if (policy->cur != new_freq && has_target()) { in cpufreq_update_current_freq()
1600 cpufreq_out_of_sync(policy, new_freq); in cpufreq_update_current_freq()
1617 int cpufreq_generic_suspend(struct cpufreq_policy *policy) in cpufreq_generic_suspend() argument
1621 if (!policy->suspend_freq) { in cpufreq_generic_suspend()
1627 policy->suspend_freq); in cpufreq_generic_suspend()
1629 ret = __cpufreq_driver_target(policy, policy->suspend_freq, in cpufreq_generic_suspend()
1633 __func__, policy->suspend_freq, ret); in cpufreq_generic_suspend()
1649 struct cpufreq_policy *policy; in cpufreq_suspend() local
1659 for_each_active_policy(policy) { in cpufreq_suspend()
1661 down_write(&policy->rwsem); in cpufreq_suspend()
1662 cpufreq_stop_governor(policy); in cpufreq_suspend()
1663 up_write(&policy->rwsem); in cpufreq_suspend()
1666 if (cpufreq_driver->suspend && cpufreq_driver->suspend(policy)) in cpufreq_suspend()
1668 policy); in cpufreq_suspend()
1683 struct cpufreq_policy *policy; in cpufreq_resume() local
1699 for_each_active_policy(policy) { in cpufreq_resume()
1700 if (cpufreq_driver->resume && cpufreq_driver->resume(policy)) { in cpufreq_resume()
1702 policy); in cpufreq_resume()
1704 down_write(&policy->rwsem); in cpufreq_resume()
1705 ret = cpufreq_start_governor(policy); in cpufreq_resume()
1706 up_write(&policy->rwsem); in cpufreq_resume()
1710 __func__, policy); in cpufreq_resume()
1864 unsigned int cpufreq_driver_fast_switch(struct cpufreq_policy *policy, in cpufreq_driver_fast_switch() argument
1867 target_freq = clamp_val(target_freq, policy->min, policy->max); in cpufreq_driver_fast_switch()
1869 return cpufreq_driver->fast_switch(policy, target_freq); in cpufreq_driver_fast_switch()
1874 static int __target_intermediate(struct cpufreq_policy *policy, in __target_intermediate() argument
1879 freqs->new = cpufreq_driver->get_intermediate(policy, index); in __target_intermediate()
1886 __func__, policy->cpu, freqs->old, freqs->new); in __target_intermediate()
1888 cpufreq_freq_transition_begin(policy, freqs); in __target_intermediate()
1889 ret = cpufreq_driver->target_intermediate(policy, index); in __target_intermediate()
1890 cpufreq_freq_transition_end(policy, freqs, ret); in __target_intermediate()
1899 static int __target_index(struct cpufreq_policy *policy, int index) in __target_index() argument
1901 struct cpufreq_freqs freqs = {.old = policy->cur, .flags = 0}; in __target_index()
1903 unsigned int newfreq = policy->freq_table[index].frequency; in __target_index()
1907 if (newfreq == policy->cur) in __target_index()
1914 retval = __target_intermediate(policy, &freqs, index); in __target_index()
1926 __func__, policy->cpu, freqs.old, freqs.new); in __target_index()
1928 cpufreq_freq_transition_begin(policy, &freqs); in __target_index()
1931 retval = cpufreq_driver->target_index(policy, index); in __target_index()
1937 cpufreq_freq_transition_end(policy, &freqs, retval); in __target_index()
1947 freqs.new = policy->restore_freq; in __target_index()
1948 cpufreq_freq_transition_begin(policy, &freqs); in __target_index()
1949 cpufreq_freq_transition_end(policy, &freqs, 0); in __target_index()
1956 int __cpufreq_driver_target(struct cpufreq_policy *policy, in __cpufreq_driver_target() argument
1967 target_freq = clamp_val(target_freq, policy->min, policy->max); in __cpufreq_driver_target()
1970 policy->cpu, target_freq, relation, old_target_freq); in __cpufreq_driver_target()
1978 if (target_freq == policy->cur) in __cpufreq_driver_target()
1982 policy->restore_freq = policy->cur; in __cpufreq_driver_target()
1985 return cpufreq_driver->target(policy, target_freq, relation); in __cpufreq_driver_target()
1990 index = cpufreq_frequency_table_target(policy, target_freq, relation); in __cpufreq_driver_target()
1992 return __target_index(policy, index); in __cpufreq_driver_target()
1996 int cpufreq_driver_target(struct cpufreq_policy *policy, in cpufreq_driver_target() argument
2002 down_write(&policy->rwsem); in cpufreq_driver_target()
2004 ret = __cpufreq_driver_target(policy, target_freq, relation); in cpufreq_driver_target()
2006 up_write(&policy->rwsem); in cpufreq_driver_target()
2017 static int cpufreq_init_governor(struct cpufreq_policy *policy) in cpufreq_init_governor() argument
2028 if (!policy->governor) in cpufreq_init_governor()
2032 if (policy->governor->dynamic_switching && in cpufreq_init_governor()
2038 policy->governor->name, gov->name); in cpufreq_init_governor()
2039 policy->governor = gov; in cpufreq_init_governor()
2045 if (!try_module_get(policy->governor->owner)) in cpufreq_init_governor()
2048 pr_debug("%s: for CPU %u\n", __func__, policy->cpu); in cpufreq_init_governor()
2050 if (policy->governor->init) { in cpufreq_init_governor()
2051 ret = policy->governor->init(policy); in cpufreq_init_governor()
2053 module_put(policy->governor->owner); in cpufreq_init_governor()
2061 static void cpufreq_exit_governor(struct cpufreq_policy *policy) in cpufreq_exit_governor() argument
2063 if (cpufreq_suspended || !policy->governor) in cpufreq_exit_governor()
2066 pr_debug("%s: for CPU %u\n", __func__, policy->cpu); in cpufreq_exit_governor()
2068 if (policy->governor->exit) in cpufreq_exit_governor()
2069 policy->governor->exit(policy); in cpufreq_exit_governor()
2071 module_put(policy->governor->owner); in cpufreq_exit_governor()
2074 static int cpufreq_start_governor(struct cpufreq_policy *policy) in cpufreq_start_governor() argument
2081 if (!policy->governor) in cpufreq_start_governor()
2084 pr_debug("%s: for CPU %u\n", __func__, policy->cpu); in cpufreq_start_governor()
2087 cpufreq_update_current_freq(policy); in cpufreq_start_governor()
2089 if (policy->governor->start) { in cpufreq_start_governor()
2090 ret = policy->governor->start(policy); in cpufreq_start_governor()
2095 if (policy->governor->limits) in cpufreq_start_governor()
2096 policy->governor->limits(policy); in cpufreq_start_governor()
2101 static void cpufreq_stop_governor(struct cpufreq_policy *policy) in cpufreq_stop_governor() argument
2103 if (cpufreq_suspended || !policy->governor) in cpufreq_stop_governor()
2106 pr_debug("%s: for CPU %u\n", __func__, policy->cpu); in cpufreq_stop_governor()
2108 if (policy->governor->stop) in cpufreq_stop_governor()
2109 policy->governor->stop(policy); in cpufreq_stop_governor()
2112 static void cpufreq_governor_limits(struct cpufreq_policy *policy) in cpufreq_governor_limits() argument
2114 if (cpufreq_suspended || !policy->governor) in cpufreq_governor_limits()
2117 pr_debug("%s: for CPU %u\n", __func__, policy->cpu); in cpufreq_governor_limits()
2119 if (policy->governor->limits) in cpufreq_governor_limits()
2120 policy->governor->limits(policy); in cpufreq_governor_limits()
2148 struct cpufreq_policy *policy; in cpufreq_unregister_governor() local
2159 for_each_inactive_policy(policy) { in cpufreq_unregister_governor()
2160 if (!strcmp(policy->last_governor, governor->name)) { in cpufreq_unregister_governor()
2161 policy->governor = NULL; in cpufreq_unregister_governor()
2162 strcpy(policy->last_governor, "\0"); in cpufreq_unregister_governor()
2185 int cpufreq_get_policy(struct cpufreq_policy *policy, unsigned int cpu) in cpufreq_get_policy() argument
2188 if (!policy) in cpufreq_get_policy()
2195 memcpy(policy, cpu_policy, sizeof(*policy)); in cpufreq_get_policy()
2206 static int cpufreq_set_policy(struct cpufreq_policy *policy, in cpufreq_set_policy() argument
2215 memcpy(&new_policy->cpuinfo, &policy->cpuinfo, sizeof(policy->cpuinfo)); in cpufreq_set_policy()
2245 policy->min = new_policy->min; in cpufreq_set_policy()
2246 policy->max = new_policy->max; in cpufreq_set_policy()
2247 trace_cpu_frequency_limits(policy); in cpufreq_set_policy()
2249 policy->cached_target_freq = UINT_MAX; in cpufreq_set_policy()
2252 policy->min, policy->max); in cpufreq_set_policy()
2255 policy->policy = new_policy->policy; in cpufreq_set_policy()
2260 if (new_policy->governor == policy->governor) { in cpufreq_set_policy()
2262 cpufreq_governor_limits(policy); in cpufreq_set_policy()
2269 old_gov = policy->governor; in cpufreq_set_policy()
2272 cpufreq_stop_governor(policy); in cpufreq_set_policy()
2273 cpufreq_exit_governor(policy); in cpufreq_set_policy()
2277 policy->governor = new_policy->governor; in cpufreq_set_policy()
2278 ret = cpufreq_init_governor(policy); in cpufreq_set_policy()
2280 ret = cpufreq_start_governor(policy); in cpufreq_set_policy()
2285 cpufreq_exit_governor(policy); in cpufreq_set_policy()
2289 pr_debug("starting governor %s failed\n", policy->governor->name); in cpufreq_set_policy()
2291 policy->governor = old_gov; in cpufreq_set_policy()
2292 if (cpufreq_init_governor(policy)) in cpufreq_set_policy()
2293 policy->governor = NULL; in cpufreq_set_policy()
2295 cpufreq_start_governor(policy); in cpufreq_set_policy()
2310 struct cpufreq_policy *policy = cpufreq_cpu_get(cpu); in cpufreq_update_policy() local
2313 if (!policy) in cpufreq_update_policy()
2316 down_write(&policy->rwsem); in cpufreq_update_policy()
2318 if (policy_is_inactive(policy)) in cpufreq_update_policy()
2322 memcpy(&new_policy, policy, sizeof(*policy)); in cpufreq_update_policy()
2323 new_policy.min = policy->user_policy.min; in cpufreq_update_policy()
2324 new_policy.max = policy->user_policy.max; in cpufreq_update_policy()
2334 new_policy.cur = cpufreq_update_current_freq(policy); in cpufreq_update_policy()
2339 cpufreq_set_policy(policy, &new_policy); in cpufreq_update_policy()
2342 up_write(&policy->rwsem); in cpufreq_update_policy()
2344 cpufreq_cpu_put(policy); in cpufreq_update_policy()
2353 struct cpufreq_policy *policy; in cpufreq_boost_set_sw() local
2356 for_each_active_policy(policy) { in cpufreq_boost_set_sw()
2357 if (!policy->freq_table) in cpufreq_boost_set_sw()
2360 ret = cpufreq_frequency_table_cpuinfo(policy, in cpufreq_boost_set_sw()
2361 policy->freq_table); in cpufreq_boost_set_sw()
2368 down_write(&policy->rwsem); in cpufreq_boost_set_sw()
2369 policy->user_policy.max = policy->max; in cpufreq_boost_set_sw()
2370 cpufreq_governor_limits(policy); in cpufreq_boost_set_sw()
2371 up_write(&policy->rwsem); in cpufreq_boost_set_sw()