Home
last modified time | relevance | path

Searched refs:per_cpu (Results 1 – 25 of 52) sorted by relevance

123

/linux-4.19.296/drivers/cpufreq/
Dspeedstep-centrino.c260 per_cpu(centrino_model, policy->cpu) = model; in centrino_cpu_init_table()
295 if ((per_cpu(centrino_cpu, cpu) == &cpu_ids[CPU_BANIAS]) || in extract_clock()
296 (per_cpu(centrino_cpu, cpu) == &cpu_ids[CPU_DOTHAN_A1]) || in extract_clock()
297 (per_cpu(centrino_cpu, cpu) == &cpu_ids[CPU_DOTHAN_B0])) { in extract_clock()
302 if ((!per_cpu(centrino_model, cpu)) || in extract_clock()
303 (!per_cpu(centrino_model, cpu)->op_points)) in extract_clock()
308 per_cpu(centrino_model, cpu)->op_points[i].frequency in extract_clock()
311 if (msr == per_cpu(centrino_model, cpu)->op_points[i].driver_data) in extract_clock()
312 return per_cpu(centrino_model, cpu)-> in extract_clock()
316 return per_cpu(centrino_model, cpu)->op_points[i-1].frequency; in extract_clock()
[all …]
Darm_big_little.c90 cpu_freq = per_cpu(cpu_last_req_freq, j); in find_cluster_maxfreq()
92 if ((cluster == per_cpu(physical_cluster, j)) && in find_cluster_maxfreq()
105 u32 cur_cluster = per_cpu(physical_cluster, cpu); in clk_get_cpu_rate()
121 pr_debug("%s: freq: %d\n", __func__, per_cpu(cpu_last_req_freq, in bL_cpufreq_get_rate()
124 return per_cpu(cpu_last_req_freq, cpu); in bL_cpufreq_get_rate()
140 prev_rate = per_cpu(cpu_last_req_freq, cpu); in bL_cpufreq_set_rate()
141 per_cpu(cpu_last_req_freq, cpu) = rate; in bL_cpufreq_set_rate()
142 per_cpu(physical_cluster, cpu) = new_cluster; in bL_cpufreq_set_rate()
171 per_cpu(cpu_last_req_freq, cpu) = prev_rate; in bL_cpufreq_set_rate()
172 per_cpu(physical_cluster, cpu) = old_cluster; in bL_cpufreq_set_rate()
[all …]
Dsh-cpufreq.c40 return (clk_get_rate(&per_cpu(sh_cpuclk, cpu)) + 500) / 1000; in sh_cpufreq_get()
48 struct clk *cpuclk = &per_cpu(sh_cpuclk, cpu); in __sh_cpufreq_target()
92 struct clk *cpuclk = &per_cpu(sh_cpuclk, policy->cpu); in sh_cpufreq_verify()
111 struct clk *cpuclk = &per_cpu(sh_cpuclk, cpu); in sh_cpufreq_cpu_init()
142 struct clk *cpuclk = &per_cpu(sh_cpuclk, cpu); in sh_cpufreq_cpu_exit()
Dcpufreq_userspace.c40 if (!per_cpu(cpu_is_managed, policy->cpu)) in cpufreq_set()
84 per_cpu(cpu_is_managed, policy->cpu) = 1; in cpufreq_userspace_policy_start()
97 per_cpu(cpu_is_managed, policy->cpu) = 0; in cpufreq_userspace_policy_stop()
Dcpufreq_governor.c106 struct cpu_dbs_info *j_cdbs = &per_cpu(cpu_dbs, j); in gov_update_cpu_data()
141 struct cpu_dbs_info *j_cdbs = &per_cpu(cpu_dbs, j); in dbs_update()
335 struct cpu_dbs_info *cdbs = &per_cpu(cpu_dbs, cpu); in gov_set_update_util()
371 struct cpu_dbs_info *j_cdbs = &per_cpu(cpu_dbs, j); in alloc_policy_dbs_info()
386 struct cpu_dbs_info *j_cdbs = &per_cpu(cpu_dbs, j); in free_policy_dbs_info()
527 struct cpu_dbs_info *j_cdbs = &per_cpu(cpu_dbs, j); in cpufreq_dbs_governor_start()
/linux-4.19.296/drivers/oprofile/
Doprofile_perf.c43 if (per_cpu(perf_events, cpu)[id] == event) in op_overflow_handler()
79 if (!counter_config[event].enabled || per_cpu(perf_events, cpu)[event]) in op_create_counter()
96 per_cpu(perf_events, cpu)[event] = pevent; in op_create_counter()
103 struct perf_event *pevent = per_cpu(perf_events, cpu)[event]; in op_destroy_counter()
107 per_cpu(perf_events, cpu)[event] = NULL; in op_destroy_counter()
262 event = per_cpu(perf_events, cpu)[id]; in oprofile_perf_exit()
267 kfree(per_cpu(perf_events, cpu)); in oprofile_perf_exit()
301 per_cpu(perf_events, cpu) = kcalloc(num_counters, in oprofile_perf_init()
303 if (!per_cpu(perf_events, cpu)) { in oprofile_perf_init()
Dnmi_timer_int.c39 struct perf_event *event = per_cpu(nmi_timer_events, cpu); in nmi_timer_start_cpu()
46 per_cpu(nmi_timer_events, cpu) = event; in nmi_timer_start_cpu()
57 struct perf_event *event = per_cpu(nmi_timer_events, cpu); in nmi_timer_stop_cpu()
107 event = per_cpu(nmi_timer_events, cpu); in nmi_timer_shutdown()
111 per_cpu(nmi_timer_events, cpu) = NULL; in nmi_timer_shutdown()
Doprofile_stats.c26 cpu_buf = &per_cpu(op_cpu_buffer, i); in oprofile_reset_stats()
54 cpu_buf = &per_cpu(op_cpu_buffer, i); in oprofile_create_stats_files()
/linux-4.19.296/drivers/xen/events/
Devents_2l.c52 clear_bit(evtchn, BM(per_cpu(cpu_evtchn_mask, cpu))); in evtchn_2l_remove()
57 clear_bit(info->evtchn, BM(per_cpu(cpu_evtchn_mask, info->cpu))); in evtchn_2l_bind_to_cpu()
58 set_bit(info->evtchn, BM(per_cpu(cpu_evtchn_mask, cpu))); in evtchn_2l_bind_to_cpu()
151 per_cpu(cpu_evtchn_mask, cpu)[idx] & in active_evtchns()
267 xen_ulong_t *cpu_evtchn = per_cpu(cpu_evtchn_mask, cpu); in xen_debug_interrupt()
279 v = per_cpu(xen_vcpu, i); in xen_debug_interrupt()
288 v = per_cpu(xen_vcpu, cpu); in xen_debug_interrupt()
352 memset(per_cpu(cpu_evtchn_mask, i), 0, sizeof(xen_ulong_t) * in evtchn_2l_resume()
358 memset(per_cpu(cpu_evtchn_mask, cpu), 0, sizeof(xen_ulong_t) * in evtchn_2l_percpu_deinit()
Devents_fifo.c105 struct evtchn_fifo_queue *q = &per_cpu(cpu_queue, cpu); in init_control_block()
276 struct evtchn_fifo_queue *q = &per_cpu(cpu_queue, cpu); in consume_one_event()
323 control_block = per_cpu(cpu_control_block, cpu); in __evtchn_fifo_handle_events()
345 void *control_block = per_cpu(cpu_control_block, cpu); in evtchn_fifo_resume()
358 per_cpu(cpu_control_block, cpu) = NULL; in evtchn_fifo_resume()
387 per_cpu(cpu_control_block, cpu) = control_block; in evtchn_fifo_alloc_control_block()
398 if (!per_cpu(cpu_control_block, cpu)) in evtchn_fifo_percpu_init()
Devents_base.c266 per_cpu(ipi_to_irq, cpu)[ipi] = irq; in xen_irq_info_ipi_setup()
280 per_cpu(virq_to_irq, cpu)[virq] = irq; in xen_irq_info_virq_setup()
332 return per_cpu(virq_to_irq, cpu)[virq]; in irq_from_virq()
470 struct lateeoi_work *eoi = &per_cpu(lateeoi, info->eoi_cpu); in lateeoi_list_del()
480 struct lateeoi_work *eoi = &per_cpu(lateeoi, info->eoi_cpu); in lateeoi_list_add()
535 (info->irq_epoch == per_cpu(irq_epoch, cpu) || delay)) { in xen_irq_lateeoi_locked()
590 struct lateeoi_work *eoi = &per_cpu(lateeoi, cpu); in xen_cpu_init_eoi()
873 per_cpu(virq_to_irq, cpu)[virq_from_irq(irq)] = -1; in __unbind_from_irq()
876 per_cpu(ipi_to_irq, cpu)[ipi_from_irq(irq)] = -1; in __unbind_from_irq()
1142 irq = per_cpu(ipi_to_irq, cpu)[ipi]; in bind_ipi_to_irq()
[all …]
/linux-4.19.296/include/linux/
Dtopology.h81 return per_cpu(numa_node, cpu); in cpu_to_node()
95 per_cpu(numa_node, cpu) = node; in set_cpu_numa_node()
147 return per_cpu(_numa_mem_, cpu); in cpu_to_mem()
154 per_cpu(_numa_mem_, cpu) = node; in set_cpu_numa_mem()
Darch_topology.h22 return per_cpu(cpu_scale, cpu); in topology_get_cpu_scale()
32 return per_cpu(freq_scale, cpu); in topology_get_freq_scale()
Dkernel_stat.h49 #define kstat_cpu(cpu) per_cpu(kstat, cpu)
50 #define kcpustat_cpu(cpu) per_cpu(kernel_cpustat, cpu)
Dirq_cpustat.h22 #define __IRQ_STAT(cpu, member) (per_cpu(irq_stat.member, cpu))
/linux-4.19.296/drivers/xen/
Dtime.c81 res->time[i] += per_cpu(old_runstate_time, cpu)[i]; in xen_get_runstate_snapshot_cpu()
122 per_cpu(old_runstate_time, cpu)[i] += in xen_manage_runstate_time()
149 return per_cpu(xen_runstate, vcpu).state == RUNSTATE_runnable; in xen_vcpu_stolen()
164 area.addr.v = &per_cpu(xen_runstate, cpu); in xen_setup_runstate_info()
/linux-4.19.296/drivers/cpuidle/
Ddriver.c37 return per_cpu(cpuidle_drivers, cpu); in __cpuidle_get_cpu_driver()
57 per_cpu(cpuidle_drivers, cpu) = NULL; in __cpuidle_unset_driver()
81 per_cpu(cpuidle_drivers, cpu) = drv; in __cpuidle_set_driver()
Dcoupled.c342 call_single_data_t *csd = &per_cpu(cpuidle_coupled_poke_cb, cpu); in cpuidle_coupled_poke()
661 other_dev = per_cpu(cpuidle_devices, cpu); in cpuidle_coupled_register_device()
684 csd = &per_cpu(cpuidle_coupled_poke_cb, dev->cpu); in cpuidle_coupled_register_device()
758 dev = per_cpu(cpuidle_devices, cpu); in coupled_cpu_online()
774 dev = per_cpu(cpuidle_devices, cpu); in coupled_cpu_up_prepare()
Dcpuidle-cps.c112 device = &per_cpu(cpuidle_dev, cpu); in cps_cpuidle_unregister()
163 device = &per_cpu(cpuidle_dev, cpu); in cps_cpuidle_init()
Dcpuidle.c476 per_cpu(cpuidle_devices, dev->cpu) = NULL; in __cpuidle_unregister_device()
503 per_cpu(cpuidle_devices, dev->cpu) = dev; in __cpuidle_register_device()
598 device = &per_cpu(cpuidle_dev, cpu); in cpuidle_unregister()
630 device = &per_cpu(cpuidle_dev, cpu); in cpuidle_register()
Dcpuidle-pseries.c193 struct cpuidle_device *dev = per_cpu(cpuidle_devices, cpu); in pseries_cpuidle_cpu_online()
205 struct cpuidle_device *dev = per_cpu(cpuidle_devices, cpu); in pseries_cpuidle_cpu_dead()
/linux-4.19.296/drivers/irqchip/
Dirq-ompic.c104 set_bit(ipi_msg, &per_cpu(ops, dst_cpu)); in ompic_raise_softirq()
122 unsigned long *pending_ops = &per_cpu(ops, cpu); in ompic_ipi_handler()
/linux-4.19.296/block/
Dblk-softirq.c90 list_splice_init(&per_cpu(blk_cpu_done, cpu), in blk_softirq_cpu_dead()
174 INIT_LIST_HEAD(&per_cpu(blk_cpu_done, i)); in blk_softirq_init()
/linux-4.19.296/lib/
Dirq_poll.c195 list_splice_init(&per_cpu(blk_cpu_iopoll, cpu), in irq_poll_cpu_dead()
208 INIT_LIST_HEAD(&per_cpu(blk_cpu_iopoll, i)); in irq_poll_setup()
/linux-4.19.296/drivers/base/
Darch_topology.c30 per_cpu(freq_scale, i) = scale; in arch_set_freq_scale()
38 per_cpu(cpu_scale, cpu) = capacity; in topology_set_cpu_scale()

123