1 /*
2  * Synopsys HSDK SDP Generic PLL clock driver
3  *
4  * Copyright (C) 2017 Synopsys
5  *
6  * This file is licensed under the terms of the GNU General Public
7  * License version 2. This program is licensed "as is" without any
8  * warranty of any kind, whether express or implied.
9  */
10 
11 #include <linux/clk-provider.h>
12 #include <linux/delay.h>
13 #include <linux/device.h>
14 #include <linux/err.h>
15 #include <linux/of.h>
16 #include <linux/of_address.h>
17 #include <linux/of_device.h>
18 #include <linux/platform_device.h>
19 #include <linux/slab.h>
20 
21 #define CGU_PLL_CTRL	0x000 /* ARC PLL control register */
22 #define CGU_PLL_STATUS	0x004 /* ARC PLL status register */
23 #define CGU_PLL_FMEAS	0x008 /* ARC PLL frequency measurement register */
24 #define CGU_PLL_MON	0x00C /* ARC PLL monitor register */
25 
26 #define CGU_PLL_CTRL_ODIV_SHIFT		2
27 #define CGU_PLL_CTRL_IDIV_SHIFT		4
28 #define CGU_PLL_CTRL_FBDIV_SHIFT	9
29 #define CGU_PLL_CTRL_BAND_SHIFT		20
30 
31 #define CGU_PLL_CTRL_ODIV_MASK		GENMASK(3, CGU_PLL_CTRL_ODIV_SHIFT)
32 #define CGU_PLL_CTRL_IDIV_MASK		GENMASK(8, CGU_PLL_CTRL_IDIV_SHIFT)
33 #define CGU_PLL_CTRL_FBDIV_MASK		GENMASK(15, CGU_PLL_CTRL_FBDIV_SHIFT)
34 
35 #define CGU_PLL_CTRL_PD			BIT(0)
36 #define CGU_PLL_CTRL_BYPASS		BIT(1)
37 
38 #define CGU_PLL_STATUS_LOCK		BIT(0)
39 #define CGU_PLL_STATUS_ERR		BIT(1)
40 
41 #define HSDK_PLL_MAX_LOCK_TIME		100 /* 100 us */
42 
43 #define CGU_PLL_SOURCE_MAX		1
44 
45 #define CORE_IF_CLK_THRESHOLD_HZ	500000000
46 #define CREG_CORE_IF_CLK_DIV_1		0x0
47 #define CREG_CORE_IF_CLK_DIV_2		0x1
48 
49 struct hsdk_pll_cfg {
50 	u32 rate;
51 	u32 idiv;
52 	u32 fbdiv;
53 	u32 odiv;
54 	u32 band;
55 };
56 
57 static const struct hsdk_pll_cfg asdt_pll_cfg[] = {
58 	{ 100000000,  0, 11, 3, 0 },
59 	{ 133000000,  0, 15, 3, 0 },
60 	{ 200000000,  1, 47, 3, 0 },
61 	{ 233000000,  1, 27, 2, 0 },
62 	{ 300000000,  1, 35, 2, 0 },
63 	{ 333000000,  1, 39, 2, 0 },
64 	{ 400000000,  1, 47, 2, 0 },
65 	{ 500000000,  0, 14, 1, 0 },
66 	{ 600000000,  0, 17, 1, 0 },
67 	{ 700000000,  0, 20, 1, 0 },
68 	{ 800000000,  0, 23, 1, 0 },
69 	{ 900000000,  1, 26, 0, 0 },
70 	{ 1000000000, 1, 29, 0, 0 },
71 	{ 1100000000, 1, 32, 0, 0 },
72 	{ 1200000000, 1, 35, 0, 0 },
73 	{ 1300000000, 1, 38, 0, 0 },
74 	{ 1400000000, 1, 41, 0, 0 },
75 	{ 1500000000, 1, 44, 0, 0 },
76 	{ 1600000000, 1, 47, 0, 0 },
77 	{}
78 };
79 
80 static const struct hsdk_pll_cfg hdmi_pll_cfg[] = {
81 	{ 297000000,  0, 21, 2, 0 },
82 	{ 540000000,  0, 19, 1, 0 },
83 	{ 594000000,  0, 21, 1, 0 },
84 	{}
85 };
86 
87 struct hsdk_pll_clk {
88 	struct clk_hw hw;
89 	void __iomem *regs;
90 	void __iomem *spec_regs;
91 	const struct hsdk_pll_devdata *pll_devdata;
92 	struct device *dev;
93 };
94 
95 struct hsdk_pll_devdata {
96 	const struct hsdk_pll_cfg *pll_cfg;
97 	int (*update_rate)(struct hsdk_pll_clk *clk, unsigned long rate,
98 			   const struct hsdk_pll_cfg *cfg);
99 };
100 
101 static int hsdk_pll_core_update_rate(struct hsdk_pll_clk *, unsigned long,
102 				     const struct hsdk_pll_cfg *);
103 static int hsdk_pll_comm_update_rate(struct hsdk_pll_clk *, unsigned long,
104 				     const struct hsdk_pll_cfg *);
105 
106 static const struct hsdk_pll_devdata core_pll_devdata = {
107 	.pll_cfg = asdt_pll_cfg,
108 	.update_rate = hsdk_pll_core_update_rate,
109 };
110 
111 static const struct hsdk_pll_devdata sdt_pll_devdata = {
112 	.pll_cfg = asdt_pll_cfg,
113 	.update_rate = hsdk_pll_comm_update_rate,
114 };
115 
116 static const struct hsdk_pll_devdata hdmi_pll_devdata = {
117 	.pll_cfg = hdmi_pll_cfg,
118 	.update_rate = hsdk_pll_comm_update_rate,
119 };
120 
hsdk_pll_write(struct hsdk_pll_clk * clk,u32 reg,u32 val)121 static inline void hsdk_pll_write(struct hsdk_pll_clk *clk, u32 reg, u32 val)
122 {
123 	iowrite32(val, clk->regs + reg);
124 }
125 
hsdk_pll_read(struct hsdk_pll_clk * clk,u32 reg)126 static inline u32 hsdk_pll_read(struct hsdk_pll_clk *clk, u32 reg)
127 {
128 	return ioread32(clk->regs + reg);
129 }
130 
hsdk_pll_set_cfg(struct hsdk_pll_clk * clk,const struct hsdk_pll_cfg * cfg)131 static inline void hsdk_pll_set_cfg(struct hsdk_pll_clk *clk,
132 				    const struct hsdk_pll_cfg *cfg)
133 {
134 	u32 val = 0;
135 
136 	/* Powerdown and Bypass bits should be cleared */
137 	val |= cfg->idiv << CGU_PLL_CTRL_IDIV_SHIFT;
138 	val |= cfg->fbdiv << CGU_PLL_CTRL_FBDIV_SHIFT;
139 	val |= cfg->odiv << CGU_PLL_CTRL_ODIV_SHIFT;
140 	val |= cfg->band << CGU_PLL_CTRL_BAND_SHIFT;
141 
142 	dev_dbg(clk->dev, "write configuration: %#x\n", val);
143 
144 	hsdk_pll_write(clk, CGU_PLL_CTRL, val);
145 }
146 
hsdk_pll_is_locked(struct hsdk_pll_clk * clk)147 static inline bool hsdk_pll_is_locked(struct hsdk_pll_clk *clk)
148 {
149 	return !!(hsdk_pll_read(clk, CGU_PLL_STATUS) & CGU_PLL_STATUS_LOCK);
150 }
151 
hsdk_pll_is_err(struct hsdk_pll_clk * clk)152 static inline bool hsdk_pll_is_err(struct hsdk_pll_clk *clk)
153 {
154 	return !!(hsdk_pll_read(clk, CGU_PLL_STATUS) & CGU_PLL_STATUS_ERR);
155 }
156 
to_hsdk_pll_clk(struct clk_hw * hw)157 static inline struct hsdk_pll_clk *to_hsdk_pll_clk(struct clk_hw *hw)
158 {
159 	return container_of(hw, struct hsdk_pll_clk, hw);
160 }
161 
hsdk_pll_recalc_rate(struct clk_hw * hw,unsigned long parent_rate)162 static unsigned long hsdk_pll_recalc_rate(struct clk_hw *hw,
163 					  unsigned long parent_rate)
164 {
165 	u32 val;
166 	u64 rate;
167 	u32 idiv, fbdiv, odiv;
168 	struct hsdk_pll_clk *clk = to_hsdk_pll_clk(hw);
169 
170 	val = hsdk_pll_read(clk, CGU_PLL_CTRL);
171 
172 	dev_dbg(clk->dev, "current configuration: %#x\n", val);
173 
174 	/* Check if PLL is disabled */
175 	if (val & CGU_PLL_CTRL_PD)
176 		return 0;
177 
178 	/* Check if PLL is bypassed */
179 	if (val & CGU_PLL_CTRL_BYPASS)
180 		return parent_rate;
181 
182 	/* input divider = reg.idiv + 1 */
183 	idiv = 1 + ((val & CGU_PLL_CTRL_IDIV_MASK) >> CGU_PLL_CTRL_IDIV_SHIFT);
184 	/* fb divider = 2*(reg.fbdiv + 1) */
185 	fbdiv = 2 * (1 + ((val & CGU_PLL_CTRL_FBDIV_MASK) >> CGU_PLL_CTRL_FBDIV_SHIFT));
186 	/* output divider = 2^(reg.odiv) */
187 	odiv = 1 << ((val & CGU_PLL_CTRL_ODIV_MASK) >> CGU_PLL_CTRL_ODIV_SHIFT);
188 
189 	rate = (u64)parent_rate * fbdiv;
190 	do_div(rate, idiv * odiv);
191 
192 	return rate;
193 }
194 
hsdk_pll_round_rate(struct clk_hw * hw,unsigned long rate,unsigned long * prate)195 static long hsdk_pll_round_rate(struct clk_hw *hw, unsigned long rate,
196 				unsigned long *prate)
197 {
198 	int i;
199 	unsigned long best_rate;
200 	struct hsdk_pll_clk *clk = to_hsdk_pll_clk(hw);
201 	const struct hsdk_pll_cfg *pll_cfg = clk->pll_devdata->pll_cfg;
202 
203 	if (pll_cfg[0].rate == 0)
204 		return -EINVAL;
205 
206 	best_rate = pll_cfg[0].rate;
207 
208 	for (i = 1; pll_cfg[i].rate != 0; i++) {
209 		if (abs(rate - pll_cfg[i].rate) < abs(rate - best_rate))
210 			best_rate = pll_cfg[i].rate;
211 	}
212 
213 	dev_dbg(clk->dev, "chosen best rate: %lu\n", best_rate);
214 
215 	return best_rate;
216 }
217 
hsdk_pll_comm_update_rate(struct hsdk_pll_clk * clk,unsigned long rate,const struct hsdk_pll_cfg * cfg)218 static int hsdk_pll_comm_update_rate(struct hsdk_pll_clk *clk,
219 				     unsigned long rate,
220 				     const struct hsdk_pll_cfg *cfg)
221 {
222 	hsdk_pll_set_cfg(clk, cfg);
223 
224 	/*
225 	 * Wait until CGU relocks and check error status.
226 	 * If after timeout CGU is unlocked yet return error.
227 	 */
228 	udelay(HSDK_PLL_MAX_LOCK_TIME);
229 	if (!hsdk_pll_is_locked(clk))
230 		return -ETIMEDOUT;
231 
232 	if (hsdk_pll_is_err(clk))
233 		return -EINVAL;
234 
235 	return 0;
236 }
237 
hsdk_pll_core_update_rate(struct hsdk_pll_clk * clk,unsigned long rate,const struct hsdk_pll_cfg * cfg)238 static int hsdk_pll_core_update_rate(struct hsdk_pll_clk *clk,
239 				     unsigned long rate,
240 				     const struct hsdk_pll_cfg *cfg)
241 {
242 	/*
243 	 * When core clock exceeds 500MHz, the divider for the interface
244 	 * clock must be programmed to div-by-2.
245 	 */
246 	if (rate > CORE_IF_CLK_THRESHOLD_HZ)
247 		iowrite32(CREG_CORE_IF_CLK_DIV_2, clk->spec_regs);
248 
249 	hsdk_pll_set_cfg(clk, cfg);
250 
251 	/*
252 	 * Wait until CGU relocks and check error status.
253 	 * If after timeout CGU is unlocked yet return error.
254 	 */
255 	udelay(HSDK_PLL_MAX_LOCK_TIME);
256 	if (!hsdk_pll_is_locked(clk))
257 		return -ETIMEDOUT;
258 
259 	if (hsdk_pll_is_err(clk))
260 		return -EINVAL;
261 
262 	/*
263 	 * Program divider to div-by-1 if we succesfuly set core clock below
264 	 * 500MHz threshold.
265 	 */
266 	if (rate <= CORE_IF_CLK_THRESHOLD_HZ)
267 		iowrite32(CREG_CORE_IF_CLK_DIV_1, clk->spec_regs);
268 
269 	return 0;
270 }
271 
hsdk_pll_set_rate(struct clk_hw * hw,unsigned long rate,unsigned long parent_rate)272 static int hsdk_pll_set_rate(struct clk_hw *hw, unsigned long rate,
273 			     unsigned long parent_rate)
274 {
275 	int i;
276 	struct hsdk_pll_clk *clk = to_hsdk_pll_clk(hw);
277 	const struct hsdk_pll_cfg *pll_cfg = clk->pll_devdata->pll_cfg;
278 
279 	for (i = 0; pll_cfg[i].rate != 0; i++) {
280 		if (pll_cfg[i].rate == rate) {
281 			return clk->pll_devdata->update_rate(clk, rate,
282 							     &pll_cfg[i]);
283 		}
284 	}
285 
286 	dev_err(clk->dev, "invalid rate=%ld, parent_rate=%ld\n", rate,
287 			parent_rate);
288 
289 	return -EINVAL;
290 }
291 
292 static const struct clk_ops hsdk_pll_ops = {
293 	.recalc_rate = hsdk_pll_recalc_rate,
294 	.round_rate = hsdk_pll_round_rate,
295 	.set_rate = hsdk_pll_set_rate,
296 };
297 
hsdk_pll_clk_probe(struct platform_device * pdev)298 static int hsdk_pll_clk_probe(struct platform_device *pdev)
299 {
300 	int ret;
301 	struct resource *mem;
302 	const char *parent_name;
303 	unsigned int num_parents;
304 	struct hsdk_pll_clk *pll_clk;
305 	struct clk_init_data init = { };
306 	struct device *dev = &pdev->dev;
307 
308 	pll_clk = devm_kzalloc(dev, sizeof(*pll_clk), GFP_KERNEL);
309 	if (!pll_clk)
310 		return -ENOMEM;
311 
312 	mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
313 	pll_clk->regs = devm_ioremap_resource(dev, mem);
314 	if (IS_ERR(pll_clk->regs))
315 		return PTR_ERR(pll_clk->regs);
316 
317 	init.name = dev->of_node->name;
318 	init.ops = &hsdk_pll_ops;
319 	parent_name = of_clk_get_parent_name(dev->of_node, 0);
320 	init.parent_names = &parent_name;
321 	num_parents = of_clk_get_parent_count(dev->of_node);
322 	if (num_parents == 0 || num_parents > CGU_PLL_SOURCE_MAX) {
323 		dev_err(dev, "wrong clock parents number: %u\n", num_parents);
324 		return -EINVAL;
325 	}
326 	init.num_parents = num_parents;
327 
328 	pll_clk->hw.init = &init;
329 	pll_clk->dev = dev;
330 	pll_clk->pll_devdata = of_device_get_match_data(dev);
331 
332 	if (!pll_clk->pll_devdata) {
333 		dev_err(dev, "No OF match data provided\n");
334 		return -EINVAL;
335 	}
336 
337 	ret = devm_clk_hw_register(dev, &pll_clk->hw);
338 	if (ret) {
339 		dev_err(dev, "failed to register %s clock\n", init.name);
340 		return ret;
341 	}
342 
343 	return of_clk_add_hw_provider(dev->of_node, of_clk_hw_simple_get,
344 			&pll_clk->hw);
345 }
346 
hsdk_pll_clk_remove(struct platform_device * pdev)347 static int hsdk_pll_clk_remove(struct platform_device *pdev)
348 {
349 	of_clk_del_provider(pdev->dev.of_node);
350 	return 0;
351 }
352 
of_hsdk_pll_clk_setup(struct device_node * node)353 static void __init of_hsdk_pll_clk_setup(struct device_node *node)
354 {
355 	int ret;
356 	const char *parent_name;
357 	unsigned int num_parents;
358 	struct hsdk_pll_clk *pll_clk;
359 	struct clk_init_data init = { };
360 
361 	pll_clk = kzalloc(sizeof(*pll_clk), GFP_KERNEL);
362 	if (!pll_clk)
363 		return;
364 
365 	pll_clk->regs = of_iomap(node, 0);
366 	if (!pll_clk->regs) {
367 		pr_err("failed to map pll registers\n");
368 		goto err_free_pll_clk;
369 	}
370 
371 	pll_clk->spec_regs = of_iomap(node, 1);
372 	if (!pll_clk->spec_regs) {
373 		pr_err("failed to map pll registers\n");
374 		goto err_unmap_comm_regs;
375 	}
376 
377 	init.name = node->name;
378 	init.ops = &hsdk_pll_ops;
379 	parent_name = of_clk_get_parent_name(node, 0);
380 	init.parent_names = &parent_name;
381 	num_parents = of_clk_get_parent_count(node);
382 	if (num_parents > CGU_PLL_SOURCE_MAX) {
383 		pr_err("too much clock parents: %u\n", num_parents);
384 		goto err_unmap_spec_regs;
385 	}
386 	init.num_parents = num_parents;
387 
388 	pll_clk->hw.init = &init;
389 	pll_clk->pll_devdata = &core_pll_devdata;
390 
391 	ret = clk_hw_register(NULL, &pll_clk->hw);
392 	if (ret) {
393 		pr_err("failed to register %s clock\n", node->name);
394 		goto err_unmap_spec_regs;
395 	}
396 
397 	ret = of_clk_add_hw_provider(node, of_clk_hw_simple_get, &pll_clk->hw);
398 	if (ret) {
399 		pr_err("failed to add hw provider for %s clock\n", node->name);
400 		goto err_unmap_spec_regs;
401 	}
402 
403 	return;
404 
405 err_unmap_spec_regs:
406 	iounmap(pll_clk->spec_regs);
407 err_unmap_comm_regs:
408 	iounmap(pll_clk->regs);
409 err_free_pll_clk:
410 	kfree(pll_clk);
411 }
412 
413 /* Core PLL needed early for ARC cpus timers */
414 CLK_OF_DECLARE(hsdk_pll_clock, "snps,hsdk-core-pll-clock",
415 of_hsdk_pll_clk_setup);
416 
417 static const struct of_device_id hsdk_pll_clk_id[] = {
418 	{ .compatible = "snps,hsdk-gp-pll-clock", .data = &sdt_pll_devdata},
419 	{ .compatible = "snps,hsdk-hdmi-pll-clock", .data = &hdmi_pll_devdata},
420 	{ }
421 };
422 
423 static struct platform_driver hsdk_pll_clk_driver = {
424 	.driver = {
425 		.name = "hsdk-gp-pll-clock",
426 		.of_match_table = hsdk_pll_clk_id,
427 	},
428 	.probe = hsdk_pll_clk_probe,
429 	.remove = hsdk_pll_clk_remove,
430 };
431 builtin_platform_driver(hsdk_pll_clk_driver);
432