/linux-4.19.296/drivers/cpufreq/ |
D | cppc_cpufreq.c | 84 static unsigned int cppc_cpufreq_perf_to_khz(struct cppc_cpudata *cpu, in cppc_cpufreq_perf_to_khz() argument 88 struct cppc_perf_caps *caps = &cpu->perf_caps; in cppc_cpufreq_perf_to_khz() 103 div = cpu->perf_caps.highest_perf; in cppc_cpufreq_perf_to_khz() 108 static unsigned int cppc_cpufreq_khz_to_perf(struct cppc_cpudata *cpu, in cppc_cpufreq_khz_to_perf() argument 112 struct cppc_perf_caps *caps = &cpu->perf_caps; in cppc_cpufreq_khz_to_perf() 126 mul = cpu->perf_caps.highest_perf; in cppc_cpufreq_khz_to_perf() 137 struct cppc_cpudata *cpu; in cppc_cpufreq_set_target() local 142 cpu = all_cpu_data[policy->cpu]; in cppc_cpufreq_set_target() 144 desired_perf = cppc_cpufreq_khz_to_perf(cpu, target_freq); in cppc_cpufreq_set_target() 146 if (desired_perf == cpu->perf_ctrls.desired_perf) in cppc_cpufreq_set_target() [all …]
|
D | intel_pstate.c | 233 int cpu; member 341 static void intel_pstate_set_itmt_prio(int cpu) in intel_pstate_set_itmt_prio() argument 347 ret = cppc_get_perf_caps(cpu, &cppc_perf); in intel_pstate_set_itmt_prio() 356 sched_set_itmt_core_prio(cppc_perf.highest_perf, cpu); in intel_pstate_set_itmt_prio() 377 static void intel_pstate_set_itmt_prio(int cpu) in intel_pstate_set_itmt_prio() argument 384 struct cpudata *cpu; in intel_pstate_init_acpi_perf_limits() local 389 intel_pstate_set_itmt_prio(policy->cpu); in intel_pstate_init_acpi_perf_limits() 396 cpu = all_cpu_data[policy->cpu]; in intel_pstate_init_acpi_perf_limits() 398 ret = acpi_processor_register_performance(&cpu->acpi_perf_data, in intel_pstate_init_acpi_perf_limits() 399 policy->cpu); in intel_pstate_init_acpi_perf_limits() [all …]
|
D | ppc_cbe_cpufreq.c | 50 static int set_pmode(unsigned int cpu, unsigned int slow_mode) in set_pmode() argument 55 rc = cbe_cpufreq_set_pmode_pmi(cpu, slow_mode); in set_pmode() 57 rc = cbe_cpufreq_set_pmode(cpu, slow_mode); in set_pmode() 59 pr_debug("register contains slow mode %d\n", cbe_cpufreq_get_pmode(cpu)); in set_pmode() 74 struct device_node *cpu; in cbe_cpufreq_cpu_init() local 76 cpu = of_get_cpu_node(policy->cpu, NULL); in cbe_cpufreq_cpu_init() 78 if (!cpu) in cbe_cpufreq_cpu_init() 81 pr_debug("init cpufreq on CPU %d\n", policy->cpu); in cbe_cpufreq_cpu_init() 86 if (!cbe_get_cpu_pmd_regs(policy->cpu) || in cbe_cpufreq_cpu_init() 87 !cbe_get_cpu_mic_tm_regs(policy->cpu)) { in cbe_cpufreq_cpu_init() [all …]
|
D | speedstep-centrino.c | 235 struct cpuinfo_x86 *cpu = &cpu_data(policy->cpu); in centrino_cpu_init_table() local 239 if (centrino_verify_cpu_id(cpu, model->cpu_id) && in centrino_cpu_init_table() 241 strcmp(cpu->x86_model_id, model->model_name) == 0)) in centrino_cpu_init_table() 248 cpu->x86_model_id); in centrino_cpu_init_table() 255 cpu->x86_model_id); in centrino_cpu_init_table() 260 per_cpu(centrino_model, policy->cpu) = model; in centrino_cpu_init_table() 286 static unsigned extract_clock(unsigned msr, unsigned int cpu, int failsafe) in extract_clock() argument 295 if ((per_cpu(centrino_cpu, cpu) == &cpu_ids[CPU_BANIAS]) || in extract_clock() 296 (per_cpu(centrino_cpu, cpu) == &cpu_ids[CPU_DOTHAN_A1]) || in extract_clock() 297 (per_cpu(centrino_cpu, cpu) == &cpu_ids[CPU_DOTHAN_B0])) { in extract_clock() [all …]
|
D | sh-cpufreq.c | 38 static unsigned int sh_cpufreq_get(unsigned int cpu) in sh_cpufreq_get() argument 40 return (clk_get_rate(&per_cpu(sh_cpuclk, cpu)) + 500) / 1000; in sh_cpufreq_get() 47 int cpu = policy->cpu; in __sh_cpufreq_target() local 48 struct clk *cpuclk = &per_cpu(sh_cpuclk, cpu); in __sh_cpufreq_target() 53 if (smp_processor_id() != cpu) in __sh_cpufreq_target() 56 dev = get_cpu_device(cpu); in __sh_cpufreq_target() 66 freqs.old = sh_cpufreq_get(cpu); in __sh_cpufreq_target() 87 return work_on_cpu(policy->cpu, __sh_cpufreq_target, &data); in sh_cpufreq_target() 92 struct clk *cpuclk = &per_cpu(sh_cpuclk, policy->cpu); in sh_cpufreq_verify() 110 unsigned int cpu = policy->cpu; in sh_cpufreq_cpu_init() local [all …]
|
D | ia64-acpi-cpufreq.c | 38 unsigned int cpu; member 110 unsigned int cpu = req->cpu; in processor_get_freq() local 111 struct cpufreq_acpi_io *data = acpi_io_data[cpu]; in processor_get_freq() 116 if (smp_processor_id() != cpu) in processor_get_freq() 134 unsigned int cpu = req->cpu; in processor_set_freq() local 135 struct cpufreq_acpi_io *data = acpi_io_data[cpu]; in processor_set_freq() 140 if (smp_processor_id() != cpu) in processor_set_freq() 177 unsigned int cpu) in acpi_cpufreq_get() argument 182 req.cpu = cpu; in acpi_cpufreq_get() 183 ret = work_on_cpu(cpu, processor_get_freq, &req); in acpi_cpufreq_get() [all …]
|
/linux-4.19.296/include/linux/ |
D | cpumask.h | 103 #define cpu_online(cpu) cpumask_test_cpu((cpu), cpu_online_mask) argument 104 #define cpu_possible(cpu) cpumask_test_cpu((cpu), cpu_possible_mask) argument 105 #define cpu_present(cpu) cpumask_test_cpu((cpu), cpu_present_mask) argument 106 #define cpu_active(cpu) cpumask_test_cpu((cpu), cpu_active_mask) argument 112 #define cpu_online(cpu) ((cpu) == 0) argument 113 #define cpu_possible(cpu) ((cpu) == 0) argument 114 #define cpu_present(cpu) ((cpu) == 0) argument 115 #define cpu_active(cpu) ((cpu) == 0) argument 118 static inline void cpu_max_bits_warn(unsigned int cpu, unsigned int bits) in cpu_max_bits_warn() argument 121 WARN_ON_ONCE(cpu >= bits); in cpu_max_bits_warn() [all …]
|
D | topology.h | 79 static inline int cpu_to_node(int cpu) in cpu_to_node() argument 81 return per_cpu(numa_node, cpu); in cpu_to_node() 93 static inline void set_cpu_numa_node(int cpu, int node) in set_cpu_numa_node() argument 95 per_cpu(numa_node, cpu) = node; in set_cpu_numa_node() 145 static inline int cpu_to_mem(int cpu) in cpu_to_mem() argument 147 return per_cpu(_numa_mem_, cpu); in cpu_to_mem() 152 static inline void set_cpu_numa_mem(int cpu, int node) in set_cpu_numa_mem() argument 154 per_cpu(_numa_mem_, cpu) = node; in set_cpu_numa_mem() 155 _node_numa_mem_[cpu_to_node(cpu)] = node; in set_cpu_numa_mem() 177 static inline int cpu_to_mem(int cpu) in cpu_to_mem() argument [all …]
|
D | tick.h | 22 extern void tick_cleanup_dead_cpu(int cpu); 28 static inline void tick_cleanup_dead_cpu(int cpu) { } in tick_cleanup_dead_cpu() argument 119 extern bool tick_nohz_tick_stopped_cpu(int cpu); 129 extern unsigned long tick_nohz_get_idle_calls_cpu(int cpu); 130 extern u64 get_cpu_idle_time_us(int cpu, u64 *last_update_time); 131 extern u64 get_cpu_iowait_time_us(int cpu, u64 *last_update_time); 143 static inline int tick_nohz_tick_stopped_cpu(int cpu) { return 0; } in tick_nohz_tick_stopped_cpu() argument 156 static inline u64 get_cpu_idle_time_us(int cpu, u64 *unused) { return -1; } in get_cpu_idle_time_us() argument 157 static inline u64 get_cpu_iowait_time_us(int cpu, u64 *unused) { return -1; } in get_cpu_iowait_time_us() argument 174 static inline bool tick_nohz_full_cpu(int cpu) in tick_nohz_full_cpu() argument [all …]
|
D | ring_buffer.h | 100 int ring_buffer_wait(struct ring_buffer *buffer, int cpu, bool full); 101 __poll_t ring_buffer_poll_wait(struct ring_buffer *buffer, int cpu, 109 int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size, int cpu); 124 ring_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts, 127 ring_buffer_consume(struct ring_buffer *buffer, int cpu, u64 *ts, 131 ring_buffer_read_prepare(struct ring_buffer *buffer, int cpu, gfp_t flags); 143 unsigned long ring_buffer_size(struct ring_buffer *buffer, int cpu); 145 void ring_buffer_reset_cpu(struct ring_buffer *buffer, int cpu); 150 struct ring_buffer *buffer_b, int cpu); 154 struct ring_buffer *buffer_b, int cpu) in ring_buffer_swap_cpu() argument [all …]
|
/linux-4.19.296/drivers/xen/ |
D | cpu_hotplug.c | 12 static void enable_hotplug_cpu(int cpu) in enable_hotplug_cpu() argument 14 if (!cpu_present(cpu)) in enable_hotplug_cpu() 15 xen_arch_register_cpu(cpu); in enable_hotplug_cpu() 17 set_cpu_present(cpu, true); in enable_hotplug_cpu() 20 static void disable_hotplug_cpu(int cpu) in disable_hotplug_cpu() argument 22 if (!cpu_is_hotpluggable(cpu)) in disable_hotplug_cpu() 25 if (cpu_online(cpu)) in disable_hotplug_cpu() 26 device_offline(get_cpu_device(cpu)); in disable_hotplug_cpu() 27 if (!cpu_online(cpu) && cpu_present(cpu)) { in disable_hotplug_cpu() 28 xen_arch_unregister_cpu(cpu); in disable_hotplug_cpu() [all …]
|
D | time.c | 55 struct vcpu_runstate_info *res, unsigned int cpu) in xen_get_runstate_snapshot_cpu_delta() argument 62 state = per_cpu_ptr(&xen_runstate, cpu); in xen_get_runstate_snapshot_cpu_delta() 74 unsigned int cpu) in xen_get_runstate_snapshot_cpu() argument 78 xen_get_runstate_snapshot_cpu_delta(res, cpu); in xen_get_runstate_snapshot_cpu() 81 res->time[i] += per_cpu(old_runstate_time, cpu)[i]; in xen_get_runstate_snapshot_cpu() 88 int cpu, i; in xen_manage_runstate_time() local 105 for_each_possible_cpu(cpu) { in xen_manage_runstate_time() 106 xen_get_runstate_snapshot_cpu_delta(&state, cpu); in xen_manage_runstate_time() 107 memcpy(runstate_delta[cpu].time, state.time, in xen_manage_runstate_time() 108 sizeof(runstate_delta[cpu].time)); in xen_manage_runstate_time() [all …]
|
/linux-4.19.296/drivers/base/ |
D | cacheinfo.c | 25 #define ci_cacheinfo(cpu) (&per_cpu(ci_cpu_cacheinfo, cpu)) argument 26 #define cache_leaves(cpu) (ci_cacheinfo(cpu)->num_leaves) argument 27 #define per_cpu_cacheinfo(cpu) (ci_cacheinfo(cpu)->info_list) argument 29 struct cpu_cacheinfo *get_cpu_cacheinfo(unsigned int cpu) in get_cpu_cacheinfo() argument 31 return ci_cacheinfo(cpu); in get_cpu_cacheinfo() 156 static int cache_setup_of_node(unsigned int cpu) in cache_setup_of_node() argument 160 struct device *cpu_dev = get_cpu_device(cpu); in cache_setup_of_node() 161 struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu); in cache_setup_of_node() 170 pr_err("No cpu device for CPU %d\n", cpu); in cache_setup_of_node() 175 pr_err("Failed to find cpu%d device node\n", cpu); in cache_setup_of_node() [all …]
|
D | arch_topology.c | 36 void topology_set_cpu_scale(unsigned int cpu, unsigned long capacity) in topology_set_cpu_scale() argument 38 per_cpu(cpu_scale, cpu) = capacity; in topology_set_cpu_scale() 45 struct cpu *cpu = container_of(dev, struct cpu, dev); in cpu_capacity_show() local 47 return sprintf(buf, "%lu\n", topology_get_cpu_scale(NULL, cpu->dev.id)); in cpu_capacity_show() 55 struct cpu *cpu = container_of(dev, struct cpu, dev); in cpu_capacity_store() local 56 int this_cpu = cpu->dev.id; in cpu_capacity_store() 83 struct device *cpu; in register_cpu_capacity_sysctl() local 86 cpu = get_cpu_device(i); in register_cpu_capacity_sysctl() 87 if (!cpu) { in register_cpu_capacity_sysctl() 92 device_create_file(cpu, &dev_attr_cpu_capacity); in register_cpu_capacity_sysctl() [all …]
|
D | cpu.c | 38 static void change_cpu_under_node(struct cpu *cpu, in change_cpu_under_node() argument 41 int cpuid = cpu->dev.id; in change_cpu_under_node() 44 cpu->node_id = to_nid; in change_cpu_under_node() 49 struct cpu *cpu = container_of(dev, struct cpu, dev); in cpu_subsys_online() local 65 change_cpu_under_node(cpu, from_nid, to_nid); in cpu_subsys_online() 75 void unregister_cpu(struct cpu *cpu) in unregister_cpu() argument 77 int logical_cpu = cpu->dev.id; in unregister_cpu() 81 device_unregister(&cpu->dev); in unregister_cpu() 145 struct cpu *cpu = container_of(dev, struct cpu, dev); in show_crash_notes() local 150 cpunum = cpu->dev.id; in show_crash_notes() [all …]
|
/linux-4.19.296/drivers/clk/imx/ |
D | clk-cpu.c | 33 struct clk_cpu *cpu = to_clk_cpu(hw); in clk_cpu_recalc_rate() local 35 return clk_get_rate(cpu->div); in clk_cpu_recalc_rate() 41 struct clk_cpu *cpu = to_clk_cpu(hw); in clk_cpu_round_rate() local 43 return clk_round_rate(cpu->pll, rate); in clk_cpu_round_rate() 49 struct clk_cpu *cpu = to_clk_cpu(hw); in clk_cpu_set_rate() local 53 ret = clk_set_parent(cpu->mux, cpu->step); in clk_cpu_set_rate() 58 ret = clk_set_rate(cpu->pll, rate); in clk_cpu_set_rate() 60 clk_set_parent(cpu->mux, cpu->pll); in clk_cpu_set_rate() 64 clk_set_parent(cpu->mux, cpu->pll); in clk_cpu_set_rate() 67 clk_set_rate(cpu->div, rate); in clk_cpu_set_rate() [all …]
|
/linux-4.19.296/drivers/cpuidle/ |
D | coupled.c | 323 int cpu = (unsigned long)info; in cpuidle_coupled_handle_poke() local 324 cpumask_set_cpu(cpu, &cpuidle_coupled_poked); in cpuidle_coupled_handle_poke() 325 cpumask_clear_cpu(cpu, &cpuidle_coupled_poke_pending); in cpuidle_coupled_handle_poke() 340 static void cpuidle_coupled_poke(int cpu) in cpuidle_coupled_poke() argument 342 call_single_data_t *csd = &per_cpu(cpuidle_coupled_poke_cb, cpu); in cpuidle_coupled_poke() 344 if (!cpumask_test_and_set_cpu(cpu, &cpuidle_coupled_poke_pending)) in cpuidle_coupled_poke() 345 smp_call_function_single_async(cpu, csd); in cpuidle_coupled_poke() 358 int cpu; in cpuidle_coupled_poke_others() local 360 for_each_cpu(cpu, &coupled->coupled_cpus) in cpuidle_coupled_poke_others() 361 if (cpu != this_cpu && cpu_online(cpu)) in cpuidle_coupled_poke_others() [all …]
|
D | driver.c | 35 static struct cpuidle_driver *__cpuidle_get_cpu_driver(int cpu) in __cpuidle_get_cpu_driver() argument 37 return per_cpu(cpuidle_drivers, cpu); in __cpuidle_get_cpu_driver() 50 int cpu; in __cpuidle_unset_driver() local 52 for_each_cpu(cpu, drv->cpumask) { in __cpuidle_unset_driver() 54 if (drv != __cpuidle_get_cpu_driver(cpu)) in __cpuidle_unset_driver() 57 per_cpu(cpuidle_drivers, cpu) = NULL; in __cpuidle_unset_driver() 70 int cpu; in __cpuidle_set_driver() local 72 for_each_cpu(cpu, drv->cpumask) { in __cpuidle_set_driver() 75 old_drv = __cpuidle_get_cpu_driver(cpu); in __cpuidle_set_driver() 80 for_each_cpu(cpu, drv->cpumask) in __cpuidle_set_driver() [all …]
|
/linux-4.19.296/block/ |
D | blk-mq-cpumap.c | 17 static int cpu_to_queue_index(unsigned int nr_queues, const int cpu) in cpu_to_queue_index() argument 19 return cpu % nr_queues; in cpu_to_queue_index() 22 static int get_first_sibling(unsigned int cpu) in get_first_sibling() argument 26 ret = cpumask_first(topology_sibling_cpumask(cpu)); in get_first_sibling() 30 return cpu; in get_first_sibling() 37 unsigned int cpu, first_sibling; in blk_mq_map_queues() local 39 for_each_possible_cpu(cpu) { in blk_mq_map_queues() 46 if (cpu < nr_queues) { in blk_mq_map_queues() 47 map[cpu] = cpu_to_queue_index(nr_queues, cpu); in blk_mq_map_queues() 49 first_sibling = get_first_sibling(cpu); in blk_mq_map_queues() [all …]
|
/linux-4.19.296/drivers/oprofile/ |
D | nmi_timer_int.c | 37 static int nmi_timer_start_cpu(int cpu) in nmi_timer_start_cpu() argument 39 struct perf_event *event = per_cpu(nmi_timer_events, cpu); in nmi_timer_start_cpu() 42 event = perf_event_create_kernel_counter(&nmi_timer_attr, cpu, NULL, in nmi_timer_start_cpu() 46 per_cpu(nmi_timer_events, cpu) = event; in nmi_timer_start_cpu() 55 static void nmi_timer_stop_cpu(int cpu) in nmi_timer_stop_cpu() argument 57 struct perf_event *event = per_cpu(nmi_timer_events, cpu); in nmi_timer_stop_cpu() 63 static int nmi_timer_cpu_online(unsigned int cpu) in nmi_timer_cpu_online() argument 65 nmi_timer_start_cpu(cpu); in nmi_timer_cpu_online() 68 static int nmi_timer_cpu_predown(unsigned int cpu) in nmi_timer_cpu_predown() argument 70 nmi_timer_stop_cpu(cpu); in nmi_timer_cpu_predown() [all …]
|
D | oprofile_perf.c | 40 u32 cpu = smp_processor_id(); in op_overflow_handler() local 43 if (per_cpu(perf_events, cpu)[id] == event) in op_overflow_handler() 50 "on cpu %u\n", cpu); in op_overflow_handler() 75 static int op_create_counter(int cpu, int event) in op_create_counter() argument 79 if (!counter_config[event].enabled || per_cpu(perf_events, cpu)[event]) in op_create_counter() 83 cpu, NULL, in op_create_counter() 92 "on CPU %d\n", event, cpu); in op_create_counter() 96 per_cpu(perf_events, cpu)[event] = pevent; in op_create_counter() 101 static void op_destroy_counter(int cpu, int event) in op_destroy_counter() argument 103 struct perf_event *pevent = per_cpu(perf_events, cpu)[event]; in op_destroy_counter() [all …]
|
/linux-4.19.296/lib/ |
D | cpu_rmap.c | 31 unsigned int cpu; in alloc_cpu_rmap() local 55 for_each_possible_cpu(cpu) { in alloc_cpu_rmap() 56 rmap->near[cpu].index = cpu % size; in alloc_cpu_rmap() 57 rmap->near[cpu].dist = CPU_RMAP_DIST_INF; in alloc_cpu_rmap() 97 static bool cpu_rmap_copy_neigh(struct cpu_rmap *rmap, unsigned int cpu, in cpu_rmap_copy_neigh() argument 103 if (rmap->near[cpu].dist > dist && in cpu_rmap_copy_neigh() 105 rmap->near[cpu].index = rmap->near[neigh].index; in cpu_rmap_copy_neigh() 106 rmap->near[cpu].dist = dist; in cpu_rmap_copy_neigh() 117 unsigned int cpu; in debug_print_rmap() local 121 for_each_possible_cpu(cpu) { in debug_print_rmap() [all …]
|
/linux-4.19.296/include/trace/events/ |
D | cpuhp.h | 12 TP_PROTO(unsigned int cpu, 17 TP_ARGS(cpu, target, idx, fun), 20 __field( unsigned int, cpu ) 27 __entry->cpu = cpu; 34 __entry->cpu, __entry->target, __entry->idx, __entry->fun) 39 TP_PROTO(unsigned int cpu, 45 TP_ARGS(cpu, target, idx, fun, node), 48 __field( unsigned int, cpu ) 55 __entry->cpu = cpu; 62 __entry->cpu, __entry->target, __entry->idx, __entry->fun) [all …]
|
/linux-4.19.296/include/linux/clk/ |
D | tegra.h | 44 void (*wait_for_reset)(u32 cpu); 45 void (*put_in_reset)(u32 cpu); 46 void (*out_of_reset)(u32 cpu); 47 void (*enable_clock)(u32 cpu); 48 void (*disable_clock)(u32 cpu); 58 static inline void tegra_wait_cpu_in_reset(u32 cpu) in tegra_wait_cpu_in_reset() argument 63 tegra_cpu_car_ops->wait_for_reset(cpu); in tegra_wait_cpu_in_reset() 66 static inline void tegra_put_cpu_in_reset(u32 cpu) in tegra_put_cpu_in_reset() argument 71 tegra_cpu_car_ops->put_in_reset(cpu); in tegra_put_cpu_in_reset() 74 static inline void tegra_cpu_out_of_reset(u32 cpu) in tegra_cpu_out_of_reset() argument [all …]
|
/linux-4.19.296/drivers/xen/events/ |
D | events_fifo.c | 102 static int init_control_block(int cpu, in init_control_block() argument 105 struct evtchn_fifo_queue *q = &per_cpu(cpu_queue, cpu); in init_control_block() 116 init_control.vcpu = xen_vcpu_nr(cpu); in init_control_block() 189 static void evtchn_fifo_bind_to_cpu(struct irq_info *info, unsigned cpu) in evtchn_fifo_bind_to_cpu() argument 272 static void consume_one_event(unsigned cpu, struct evtchn_loop_ctrl *ctrl, in consume_one_event() argument 276 struct evtchn_fifo_queue *q = &per_cpu(cpu_queue, cpu); in consume_one_event() 316 static void __evtchn_fifo_handle_events(unsigned cpu, in __evtchn_fifo_handle_events() argument 323 control_block = per_cpu(cpu_control_block, cpu); in __evtchn_fifo_handle_events() 329 consume_one_event(cpu, ctrl, control_block, q, &ready); in __evtchn_fifo_handle_events() 334 static void evtchn_fifo_handle_events(unsigned cpu, in evtchn_fifo_handle_events() argument [all …]
|