1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * System Control and Power Interface (SCMI) based CPUFreq Interface driver
4  *
5  * Copyright (C) 2018 ARM Ltd.
6  * Sudeep Holla <sudeep.holla@arm.com>
7  */
8 
9 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
10 
11 #include <linux/cpu.h>
12 #include <linux/cpufreq.h>
13 #include <linux/cpumask.h>
14 #include <linux/cpu_cooling.h>
15 #include <linux/export.h>
16 #include <linux/module.h>
17 #include <linux/pm_opp.h>
18 #include <linux/slab.h>
19 #include <linux/scmi_protocol.h>
20 #include <linux/types.h>
21 
22 struct scmi_data {
23 	int domain_id;
24 	struct device *cpu_dev;
25 	struct thermal_cooling_device *cdev;
26 };
27 
28 static const struct scmi_handle *handle;
29 
scmi_cpufreq_get_rate(unsigned int cpu)30 static unsigned int scmi_cpufreq_get_rate(unsigned int cpu)
31 {
32 	struct cpufreq_policy *policy = cpufreq_cpu_get_raw(cpu);
33 	struct scmi_perf_ops *perf_ops = handle->perf_ops;
34 	struct scmi_data *priv = policy->driver_data;
35 	unsigned long rate;
36 	int ret;
37 
38 	ret = perf_ops->freq_get(handle, priv->domain_id, &rate, false);
39 	if (ret)
40 		return 0;
41 	return rate / 1000;
42 }
43 
44 /*
45  * perf_ops->freq_set is not a synchronous, the actual OPP change will
46  * happen asynchronously and can get notified if the events are
47  * subscribed for by the SCMI firmware
48  */
49 static int
scmi_cpufreq_set_target(struct cpufreq_policy * policy,unsigned int index)50 scmi_cpufreq_set_target(struct cpufreq_policy *policy, unsigned int index)
51 {
52 	int ret;
53 	struct scmi_data *priv = policy->driver_data;
54 	struct scmi_perf_ops *perf_ops = handle->perf_ops;
55 	u64 freq = policy->freq_table[index].frequency;
56 
57 	ret = perf_ops->freq_set(handle, priv->domain_id, freq * 1000, false);
58 	if (!ret)
59 		arch_set_freq_scale(policy->related_cpus, freq,
60 				    policy->cpuinfo.max_freq);
61 	return ret;
62 }
63 
scmi_cpufreq_fast_switch(struct cpufreq_policy * policy,unsigned int target_freq)64 static unsigned int scmi_cpufreq_fast_switch(struct cpufreq_policy *policy,
65 					     unsigned int target_freq)
66 {
67 	struct scmi_data *priv = policy->driver_data;
68 	struct scmi_perf_ops *perf_ops = handle->perf_ops;
69 
70 	if (!perf_ops->freq_set(handle, priv->domain_id,
71 				target_freq * 1000, true)) {
72 		arch_set_freq_scale(policy->related_cpus, target_freq,
73 				    policy->cpuinfo.max_freq);
74 		return target_freq;
75 	}
76 
77 	return 0;
78 }
79 
80 static int
scmi_get_sharing_cpus(struct device * cpu_dev,struct cpumask * cpumask)81 scmi_get_sharing_cpus(struct device *cpu_dev, struct cpumask *cpumask)
82 {
83 	int cpu, domain, tdomain;
84 	struct device *tcpu_dev;
85 
86 	domain = handle->perf_ops->device_domain_id(cpu_dev);
87 	if (domain < 0)
88 		return domain;
89 
90 	for_each_possible_cpu(cpu) {
91 		if (cpu == cpu_dev->id)
92 			continue;
93 
94 		tcpu_dev = get_cpu_device(cpu);
95 		if (!tcpu_dev)
96 			continue;
97 
98 		tdomain = handle->perf_ops->device_domain_id(tcpu_dev);
99 		if (tdomain == domain)
100 			cpumask_set_cpu(cpu, cpumask);
101 	}
102 
103 	return 0;
104 }
105 
scmi_cpufreq_init(struct cpufreq_policy * policy)106 static int scmi_cpufreq_init(struct cpufreq_policy *policy)
107 {
108 	int ret;
109 	unsigned int latency;
110 	struct device *cpu_dev;
111 	struct scmi_data *priv;
112 	struct cpufreq_frequency_table *freq_table;
113 
114 	cpu_dev = get_cpu_device(policy->cpu);
115 	if (!cpu_dev) {
116 		pr_err("failed to get cpu%d device\n", policy->cpu);
117 		return -ENODEV;
118 	}
119 
120 	ret = handle->perf_ops->device_opps_add(handle, cpu_dev);
121 	if (ret) {
122 		dev_warn(cpu_dev, "failed to add opps to the device\n");
123 		return ret;
124 	}
125 
126 	ret = scmi_get_sharing_cpus(cpu_dev, policy->cpus);
127 	if (ret) {
128 		dev_warn(cpu_dev, "failed to get sharing cpumask\n");
129 		return ret;
130 	}
131 
132 	ret = dev_pm_opp_set_sharing_cpus(cpu_dev, policy->cpus);
133 	if (ret) {
134 		dev_err(cpu_dev, "%s: failed to mark OPPs as shared: %d\n",
135 			__func__, ret);
136 		return ret;
137 	}
138 
139 	ret = dev_pm_opp_get_opp_count(cpu_dev);
140 	if (ret <= 0) {
141 		dev_dbg(cpu_dev, "OPP table is not ready, deferring probe\n");
142 		ret = -EPROBE_DEFER;
143 		goto out_free_opp;
144 	}
145 
146 	priv = kzalloc(sizeof(*priv), GFP_KERNEL);
147 	if (!priv) {
148 		ret = -ENOMEM;
149 		goto out_free_opp;
150 	}
151 
152 	ret = dev_pm_opp_init_cpufreq_table(cpu_dev, &freq_table);
153 	if (ret) {
154 		dev_err(cpu_dev, "failed to init cpufreq table: %d\n", ret);
155 		goto out_free_priv;
156 	}
157 
158 	priv->cpu_dev = cpu_dev;
159 	priv->domain_id = handle->perf_ops->device_domain_id(cpu_dev);
160 
161 	policy->driver_data = priv;
162 	policy->freq_table = freq_table;
163 
164 	/* SCMI allows DVFS request for any domain from any CPU */
165 	policy->dvfs_possible_from_any_cpu = true;
166 
167 	latency = handle->perf_ops->transition_latency_get(handle, cpu_dev);
168 	if (!latency)
169 		latency = CPUFREQ_ETERNAL;
170 
171 	policy->cpuinfo.transition_latency = latency;
172 
173 	policy->fast_switch_possible = true;
174 	return 0;
175 
176 out_free_priv:
177 	kfree(priv);
178 out_free_opp:
179 	dev_pm_opp_cpumask_remove_table(policy->cpus);
180 
181 	return ret;
182 }
183 
scmi_cpufreq_exit(struct cpufreq_policy * policy)184 static int scmi_cpufreq_exit(struct cpufreq_policy *policy)
185 {
186 	struct scmi_data *priv = policy->driver_data;
187 
188 	cpufreq_cooling_unregister(priv->cdev);
189 	dev_pm_opp_free_cpufreq_table(priv->cpu_dev, &policy->freq_table);
190 	kfree(priv);
191 	dev_pm_opp_cpumask_remove_table(policy->related_cpus);
192 
193 	return 0;
194 }
195 
scmi_cpufreq_ready(struct cpufreq_policy * policy)196 static void scmi_cpufreq_ready(struct cpufreq_policy *policy)
197 {
198 	struct scmi_data *priv = policy->driver_data;
199 
200 	priv->cdev = of_cpufreq_cooling_register(policy);
201 }
202 
203 static struct cpufreq_driver scmi_cpufreq_driver = {
204 	.name	= "scmi",
205 	.flags	= CPUFREQ_STICKY | CPUFREQ_HAVE_GOVERNOR_PER_POLICY |
206 		  CPUFREQ_NEED_INITIAL_FREQ_CHECK,
207 	.verify	= cpufreq_generic_frequency_table_verify,
208 	.attr	= cpufreq_generic_attr,
209 	.target_index	= scmi_cpufreq_set_target,
210 	.fast_switch	= scmi_cpufreq_fast_switch,
211 	.get	= scmi_cpufreq_get_rate,
212 	.init	= scmi_cpufreq_init,
213 	.exit	= scmi_cpufreq_exit,
214 	.ready	= scmi_cpufreq_ready,
215 };
216 
scmi_cpufreq_probe(struct scmi_device * sdev)217 static int scmi_cpufreq_probe(struct scmi_device *sdev)
218 {
219 	int ret;
220 
221 	handle = sdev->handle;
222 
223 	if (!handle || !handle->perf_ops)
224 		return -ENODEV;
225 
226 	ret = cpufreq_register_driver(&scmi_cpufreq_driver);
227 	if (ret) {
228 		dev_err(&sdev->dev, "%s: registering cpufreq failed, err: %d\n",
229 			__func__, ret);
230 	}
231 
232 	return ret;
233 }
234 
scmi_cpufreq_remove(struct scmi_device * sdev)235 static void scmi_cpufreq_remove(struct scmi_device *sdev)
236 {
237 	cpufreq_unregister_driver(&scmi_cpufreq_driver);
238 }
239 
240 static const struct scmi_device_id scmi_id_table[] = {
241 	{ SCMI_PROTOCOL_PERF },
242 	{ },
243 };
244 MODULE_DEVICE_TABLE(scmi, scmi_id_table);
245 
246 static struct scmi_driver scmi_cpufreq_drv = {
247 	.name		= "scmi-cpufreq",
248 	.probe		= scmi_cpufreq_probe,
249 	.remove		= scmi_cpufreq_remove,
250 	.id_table	= scmi_id_table,
251 };
252 module_scmi_driver(scmi_cpufreq_drv);
253 
254 MODULE_AUTHOR("Sudeep Holla <sudeep.holla@arm.com>");
255 MODULE_DESCRIPTION("ARM SCMI CPUFreq interface driver");
256 MODULE_LICENSE("GPL v2");
257