1 /*
2  * Copyright 2013 Freescale Semiconductor, Inc.
3  *
4  * This program is free software; you can redistribute it and/or modify
5  * it under the terms of the GNU General Public License version 2 as
6  * published by the Free Software Foundation.
7  *
8  * clock driver for Freescale QorIQ SoCs.
9  */
10 
11 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
12 
13 #include <linux/clk.h>
14 #include <linux/clk-provider.h>
15 #include <linux/clkdev.h>
16 #include <linux/fsl/guts.h>
17 #include <linux/io.h>
18 #include <linux/kernel.h>
19 #include <linux/module.h>
20 #include <linux/of_address.h>
21 #include <linux/of_platform.h>
22 #include <linux/of.h>
23 #include <linux/slab.h>
24 
25 #define PLL_DIV1	0
26 #define PLL_DIV2	1
27 #define PLL_DIV3	2
28 #define PLL_DIV4	3
29 
30 #define PLATFORM_PLL	0
31 #define CGA_PLL1	1
32 #define CGA_PLL2	2
33 #define CGA_PLL3	3
34 #define CGA_PLL4	4	/* only on clockgen-1.0, which lacks CGB */
35 #define CGB_PLL1	4
36 #define CGB_PLL2	5
37 
38 struct clockgen_pll_div {
39 	struct clk *clk;
40 	char name[32];
41 };
42 
43 struct clockgen_pll {
44 	struct clockgen_pll_div div[8];
45 };
46 
47 #define CLKSEL_VALID	1
48 #define CLKSEL_80PCT	2	/* Only allowed if PLL <= 80% of max cpu freq */
49 
50 struct clockgen_sourceinfo {
51 	u32 flags;	/* CLKSEL_xxx */
52 	int pll;	/* CGx_PLLn */
53 	int div;	/* PLL_DIVn */
54 };
55 
56 #define NUM_MUX_PARENTS	16
57 
58 struct clockgen_muxinfo {
59 	struct clockgen_sourceinfo clksel[NUM_MUX_PARENTS];
60 };
61 
62 #define NUM_HWACCEL	5
63 #define NUM_CMUX	8
64 
65 struct clockgen;
66 
67 /*
68  * cmux freq must be >= platform pll.
69  * If not set, cmux freq must be >= platform pll/2
70  */
71 #define CG_CMUX_GE_PLAT		1
72 
73 #define CG_PLL_8BIT		2	/* PLLCnGSR[CFG] is 8 bits, not 6 */
74 #define CG_VER3			4	/* version 3 cg: reg layout different */
75 #define CG_LITTLE_ENDIAN	8
76 
77 struct clockgen_chipinfo {
78 	const char *compat, *guts_compat;
79 	const struct clockgen_muxinfo *cmux_groups[2];
80 	const struct clockgen_muxinfo *hwaccel[NUM_HWACCEL];
81 	void (*init_periph)(struct clockgen *cg);
82 	int cmux_to_group[NUM_CMUX]; /* -1 terminates if fewer than NUM_CMUX */
83 	u32 pll_mask;	/* 1 << n bit set if PLL n is valid */
84 	u32 flags;	/* CG_xxx */
85 };
86 
87 struct clockgen {
88 	struct device_node *node;
89 	void __iomem *regs;
90 	struct clockgen_chipinfo info; /* mutable copy */
91 	struct clk *sysclk, *coreclk;
92 	struct clockgen_pll pll[6];
93 	struct clk *cmux[NUM_CMUX];
94 	struct clk *hwaccel[NUM_HWACCEL];
95 	struct clk *fman[2];
96 	struct ccsr_guts __iomem *guts;
97 };
98 
99 static struct clockgen clockgen;
100 
cg_out(struct clockgen * cg,u32 val,u32 __iomem * reg)101 static void cg_out(struct clockgen *cg, u32 val, u32 __iomem *reg)
102 {
103 	if (cg->info.flags & CG_LITTLE_ENDIAN)
104 		iowrite32(val, reg);
105 	else
106 		iowrite32be(val, reg);
107 }
108 
cg_in(struct clockgen * cg,u32 __iomem * reg)109 static u32 cg_in(struct clockgen *cg, u32 __iomem *reg)
110 {
111 	u32 val;
112 
113 	if (cg->info.flags & CG_LITTLE_ENDIAN)
114 		val = ioread32(reg);
115 	else
116 		val = ioread32be(reg);
117 
118 	return val;
119 }
120 
121 static const struct clockgen_muxinfo p2041_cmux_grp1 = {
122 	{
123 		[0] = { CLKSEL_VALID, CGA_PLL1, PLL_DIV1 },
124 		[1] = { CLKSEL_VALID, CGA_PLL1, PLL_DIV2 },
125 		[4] = { CLKSEL_VALID, CGA_PLL2, PLL_DIV1 },
126 	}
127 };
128 
129 static const struct clockgen_muxinfo p2041_cmux_grp2 = {
130 	{
131 		[0] = { CLKSEL_VALID, CGA_PLL1, PLL_DIV1 },
132 		[4] = { CLKSEL_VALID, CGA_PLL2, PLL_DIV1 },
133 		[5] = { CLKSEL_VALID, CGA_PLL2, PLL_DIV2 },
134 	}
135 };
136 
137 static const struct clockgen_muxinfo p5020_cmux_grp1 = {
138 	{
139 		[0] = { CLKSEL_VALID, CGA_PLL1, PLL_DIV1 },
140 		[1] = { CLKSEL_VALID, CGA_PLL1, PLL_DIV2 },
141 		[4] = { CLKSEL_VALID | CLKSEL_80PCT, CGA_PLL2, PLL_DIV1 },
142 	}
143 };
144 
145 static const struct clockgen_muxinfo p5020_cmux_grp2 = {
146 	{
147 		[0] = { CLKSEL_VALID | CLKSEL_80PCT, CGA_PLL1, PLL_DIV1 },
148 		[4] = { CLKSEL_VALID, CGA_PLL2, PLL_DIV1 },
149 		[5] = { CLKSEL_VALID, CGA_PLL2, PLL_DIV2 },
150 	}
151 };
152 
153 static const struct clockgen_muxinfo p5040_cmux_grp1 = {
154 	{
155 		[0] = { CLKSEL_VALID, CGA_PLL1, PLL_DIV1 },
156 		[1] = { CLKSEL_VALID, CGA_PLL1, PLL_DIV2 },
157 		[4] = { CLKSEL_VALID | CLKSEL_80PCT, CGA_PLL2, PLL_DIV1 },
158 		[5] = { CLKSEL_VALID | CLKSEL_80PCT, CGA_PLL2, PLL_DIV2 },
159 	}
160 };
161 
162 static const struct clockgen_muxinfo p5040_cmux_grp2 = {
163 	{
164 		[0] = { CLKSEL_VALID | CLKSEL_80PCT, CGA_PLL1, PLL_DIV1 },
165 		[1] = { CLKSEL_VALID | CLKSEL_80PCT, CGA_PLL1, PLL_DIV2 },
166 		[4] = { CLKSEL_VALID, CGA_PLL2, PLL_DIV1 },
167 		[5] = { CLKSEL_VALID, CGA_PLL2, PLL_DIV2 },
168 	}
169 };
170 
171 static const struct clockgen_muxinfo p4080_cmux_grp1 = {
172 	{
173 		[0] = { CLKSEL_VALID, CGA_PLL1, PLL_DIV1 },
174 		[1] = { CLKSEL_VALID, CGA_PLL1, PLL_DIV2 },
175 		[4] = { CLKSEL_VALID, CGA_PLL2, PLL_DIV1 },
176 		[5] = { CLKSEL_VALID, CGA_PLL2, PLL_DIV2 },
177 		[8] = { CLKSEL_VALID | CLKSEL_80PCT, CGA_PLL3, PLL_DIV1 },
178 	}
179 };
180 
181 static const struct clockgen_muxinfo p4080_cmux_grp2 = {
182 	{
183 		[0] = { CLKSEL_VALID | CLKSEL_80PCT, CGA_PLL1, PLL_DIV1 },
184 		[8] = { CLKSEL_VALID, CGA_PLL3, PLL_DIV1 },
185 		[9] = { CLKSEL_VALID, CGA_PLL3, PLL_DIV2 },
186 		[12] = { CLKSEL_VALID, CGA_PLL4, PLL_DIV1 },
187 		[13] = { CLKSEL_VALID, CGA_PLL4, PLL_DIV2 },
188 	}
189 };
190 
191 static const struct clockgen_muxinfo t1023_cmux = {
192 	{
193 		[0] = { CLKSEL_VALID, CGA_PLL1, PLL_DIV1 },
194 		[1] = { CLKSEL_VALID, CGA_PLL1, PLL_DIV2 },
195 	}
196 };
197 
198 static const struct clockgen_muxinfo t1040_cmux = {
199 	{
200 		[0] = { CLKSEL_VALID, CGA_PLL1, PLL_DIV1 },
201 		[1] = { CLKSEL_VALID, CGA_PLL1, PLL_DIV2 },
202 		[4] = { CLKSEL_VALID, CGA_PLL2, PLL_DIV1 },
203 		[5] = { CLKSEL_VALID, CGA_PLL2, PLL_DIV2 },
204 	}
205 };
206 
207 
208 static const struct clockgen_muxinfo clockgen2_cmux_cga = {
209 	{
210 		{ CLKSEL_VALID, CGA_PLL1, PLL_DIV1 },
211 		{ CLKSEL_VALID, CGA_PLL1, PLL_DIV2 },
212 		{ CLKSEL_VALID, CGA_PLL1, PLL_DIV4 },
213 		{},
214 		{ CLKSEL_VALID, CGA_PLL2, PLL_DIV1 },
215 		{ CLKSEL_VALID, CGA_PLL2, PLL_DIV2 },
216 		{ CLKSEL_VALID, CGA_PLL2, PLL_DIV4 },
217 		{},
218 		{ CLKSEL_VALID, CGA_PLL3, PLL_DIV1 },
219 		{ CLKSEL_VALID, CGA_PLL3, PLL_DIV2 },
220 		{ CLKSEL_VALID, CGA_PLL3, PLL_DIV4 },
221 	},
222 };
223 
224 static const struct clockgen_muxinfo clockgen2_cmux_cga12 = {
225 	{
226 		{ CLKSEL_VALID, CGA_PLL1, PLL_DIV1 },
227 		{ CLKSEL_VALID, CGA_PLL1, PLL_DIV2 },
228 		{ CLKSEL_VALID, CGA_PLL1, PLL_DIV4 },
229 		{},
230 		{ CLKSEL_VALID, CGA_PLL2, PLL_DIV1 },
231 		{ CLKSEL_VALID, CGA_PLL2, PLL_DIV2 },
232 		{ CLKSEL_VALID, CGA_PLL2, PLL_DIV4 },
233 	},
234 };
235 
236 static const struct clockgen_muxinfo clockgen2_cmux_cgb = {
237 	{
238 		{ CLKSEL_VALID, CGB_PLL1, PLL_DIV1 },
239 		{ CLKSEL_VALID, CGB_PLL1, PLL_DIV2 },
240 		{ CLKSEL_VALID, CGB_PLL1, PLL_DIV4 },
241 		{},
242 		{ CLKSEL_VALID, CGB_PLL2, PLL_DIV1 },
243 		{ CLKSEL_VALID, CGB_PLL2, PLL_DIV2 },
244 		{ CLKSEL_VALID, CGB_PLL2, PLL_DIV4 },
245 	},
246 };
247 
248 static const struct clockgen_muxinfo ls1043a_hwa1 = {
249 	{
250 		{},
251 		{},
252 		{ CLKSEL_VALID, CGA_PLL1, PLL_DIV2 },
253 		{ CLKSEL_VALID, CGA_PLL1, PLL_DIV3 },
254 		{},
255 		{},
256 		{ CLKSEL_VALID, CGA_PLL2, PLL_DIV2 },
257 		{ CLKSEL_VALID, CGA_PLL2, PLL_DIV3 },
258 	},
259 };
260 
261 static const struct clockgen_muxinfo ls1043a_hwa2 = {
262 	{
263 		{},
264 		{ CLKSEL_VALID, CGA_PLL2, PLL_DIV1 },
265 		{},
266 		{ CLKSEL_VALID, CGA_PLL2, PLL_DIV3 },
267 	},
268 };
269 
270 static const struct clockgen_muxinfo ls1046a_hwa1 = {
271 	{
272 		{},
273 		{},
274 		{ CLKSEL_VALID, CGA_PLL1, PLL_DIV2 },
275 		{ CLKSEL_VALID, CGA_PLL1, PLL_DIV3 },
276 		{ CLKSEL_VALID, CGA_PLL1, PLL_DIV4 },
277 		{ CLKSEL_VALID, PLATFORM_PLL, PLL_DIV1 },
278 		{ CLKSEL_VALID, CGA_PLL2, PLL_DIV2 },
279 		{ CLKSEL_VALID, CGA_PLL2, PLL_DIV3 },
280 	},
281 };
282 
283 static const struct clockgen_muxinfo ls1046a_hwa2 = {
284 	{
285 		{},
286 		{ CLKSEL_VALID, CGA_PLL2, PLL_DIV1 },
287 		{ CLKSEL_VALID, CGA_PLL2, PLL_DIV2 },
288 		{ CLKSEL_VALID, CGA_PLL2, PLL_DIV3 },
289 		{},
290 		{},
291 		{ CLKSEL_VALID, CGA_PLL1, PLL_DIV2 },
292 	},
293 };
294 
295 static const struct clockgen_muxinfo ls1012a_cmux = {
296 	{
297 		[0] = { CLKSEL_VALID, CGA_PLL1, PLL_DIV1 },
298 		{},
299 		[2] = { CLKSEL_VALID, CGA_PLL1, PLL_DIV2 },
300 	}
301 };
302 
303 static const struct clockgen_muxinfo t1023_hwa1 = {
304 	{
305 		{},
306 		{ CLKSEL_VALID, CGA_PLL1, PLL_DIV1 },
307 		{ CLKSEL_VALID, CGA_PLL1, PLL_DIV2 },
308 		{ CLKSEL_VALID, CGA_PLL1, PLL_DIV3 },
309 	},
310 };
311 
312 static const struct clockgen_muxinfo t1023_hwa2 = {
313 	{
314 		[6] = { CLKSEL_VALID, CGA_PLL1, PLL_DIV2 },
315 	},
316 };
317 
318 static const struct clockgen_muxinfo t2080_hwa1 = {
319 	{
320 		{},
321 		{ CLKSEL_VALID, CGA_PLL1, PLL_DIV1 },
322 		{ CLKSEL_VALID, CGA_PLL1, PLL_DIV2 },
323 		{ CLKSEL_VALID, CGA_PLL1, PLL_DIV3 },
324 		{ CLKSEL_VALID, CGA_PLL1, PLL_DIV4 },
325 		{ CLKSEL_VALID, PLATFORM_PLL, PLL_DIV1 },
326 		{ CLKSEL_VALID, CGA_PLL2, PLL_DIV2 },
327 		{ CLKSEL_VALID, CGA_PLL2, PLL_DIV3 },
328 	},
329 };
330 
331 static const struct clockgen_muxinfo t2080_hwa2 = {
332 	{
333 		{},
334 		{ CLKSEL_VALID, CGA_PLL2, PLL_DIV1 },
335 		{ CLKSEL_VALID, CGA_PLL2, PLL_DIV2 },
336 		{ CLKSEL_VALID, CGA_PLL2, PLL_DIV3 },
337 		{ CLKSEL_VALID, CGA_PLL2, PLL_DIV4 },
338 		{ CLKSEL_VALID, PLATFORM_PLL, PLL_DIV1 },
339 		{ CLKSEL_VALID, CGA_PLL1, PLL_DIV2 },
340 		{ CLKSEL_VALID, CGA_PLL1, PLL_DIV3 },
341 	},
342 };
343 
344 static const struct clockgen_muxinfo t4240_hwa1 = {
345 	{
346 		{ CLKSEL_VALID, PLATFORM_PLL, PLL_DIV2 },
347 		{ CLKSEL_VALID, CGA_PLL1, PLL_DIV1 },
348 		{ CLKSEL_VALID, CGA_PLL1, PLL_DIV2 },
349 		{ CLKSEL_VALID, CGA_PLL1, PLL_DIV3 },
350 		{ CLKSEL_VALID, CGA_PLL1, PLL_DIV4 },
351 		{},
352 		{ CLKSEL_VALID, CGA_PLL2, PLL_DIV2 },
353 		{ CLKSEL_VALID, CGA_PLL2, PLL_DIV3 },
354 	},
355 };
356 
357 static const struct clockgen_muxinfo t4240_hwa4 = {
358 	{
359 		[2] = { CLKSEL_VALID, CGB_PLL1, PLL_DIV2 },
360 		[3] = { CLKSEL_VALID, CGB_PLL1, PLL_DIV3 },
361 		[4] = { CLKSEL_VALID, CGB_PLL1, PLL_DIV4 },
362 		[5] = { CLKSEL_VALID, PLATFORM_PLL, PLL_DIV1 },
363 		[6] = { CLKSEL_VALID, CGB_PLL2, PLL_DIV2 },
364 	},
365 };
366 
367 static const struct clockgen_muxinfo t4240_hwa5 = {
368 	{
369 		[2] = { CLKSEL_VALID, CGB_PLL2, PLL_DIV2 },
370 		[3] = { CLKSEL_VALID, CGB_PLL2, PLL_DIV3 },
371 		[4] = { CLKSEL_VALID, CGB_PLL2, PLL_DIV4 },
372 		[5] = { CLKSEL_VALID, PLATFORM_PLL, PLL_DIV1 },
373 		[6] = { CLKSEL_VALID, CGB_PLL1, PLL_DIV2 },
374 		[7] = { CLKSEL_VALID, CGB_PLL1, PLL_DIV3 },
375 	},
376 };
377 
378 #define RCWSR7_FM1_CLK_SEL	0x40000000
379 #define RCWSR7_FM2_CLK_SEL	0x20000000
380 #define RCWSR7_HWA_ASYNC_DIV	0x04000000
381 
p2041_init_periph(struct clockgen * cg)382 static void __init p2041_init_periph(struct clockgen *cg)
383 {
384 	u32 reg;
385 
386 	reg = ioread32be(&cg->guts->rcwsr[7]);
387 
388 	if (reg & RCWSR7_FM1_CLK_SEL)
389 		cg->fman[0] = cg->pll[CGA_PLL2].div[PLL_DIV2].clk;
390 	else
391 		cg->fman[0] = cg->pll[PLATFORM_PLL].div[PLL_DIV2].clk;
392 }
393 
p4080_init_periph(struct clockgen * cg)394 static void __init p4080_init_periph(struct clockgen *cg)
395 {
396 	u32 reg;
397 
398 	reg = ioread32be(&cg->guts->rcwsr[7]);
399 
400 	if (reg & RCWSR7_FM1_CLK_SEL)
401 		cg->fman[0] = cg->pll[CGA_PLL3].div[PLL_DIV2].clk;
402 	else
403 		cg->fman[0] = cg->pll[PLATFORM_PLL].div[PLL_DIV2].clk;
404 
405 	if (reg & RCWSR7_FM2_CLK_SEL)
406 		cg->fman[1] = cg->pll[CGA_PLL3].div[PLL_DIV2].clk;
407 	else
408 		cg->fman[1] = cg->pll[PLATFORM_PLL].div[PLL_DIV2].clk;
409 }
410 
p5020_init_periph(struct clockgen * cg)411 static void __init p5020_init_periph(struct clockgen *cg)
412 {
413 	u32 reg;
414 	int div = PLL_DIV2;
415 
416 	reg = ioread32be(&cg->guts->rcwsr[7]);
417 	if (reg & RCWSR7_HWA_ASYNC_DIV)
418 		div = PLL_DIV4;
419 
420 	if (reg & RCWSR7_FM1_CLK_SEL)
421 		cg->fman[0] = cg->pll[CGA_PLL2].div[div].clk;
422 	else
423 		cg->fman[0] = cg->pll[PLATFORM_PLL].div[PLL_DIV2].clk;
424 }
425 
p5040_init_periph(struct clockgen * cg)426 static void __init p5040_init_periph(struct clockgen *cg)
427 {
428 	u32 reg;
429 	int div = PLL_DIV2;
430 
431 	reg = ioread32be(&cg->guts->rcwsr[7]);
432 	if (reg & RCWSR7_HWA_ASYNC_DIV)
433 		div = PLL_DIV4;
434 
435 	if (reg & RCWSR7_FM1_CLK_SEL)
436 		cg->fman[0] = cg->pll[CGA_PLL3].div[div].clk;
437 	else
438 		cg->fman[0] = cg->pll[PLATFORM_PLL].div[PLL_DIV2].clk;
439 
440 	if (reg & RCWSR7_FM2_CLK_SEL)
441 		cg->fman[1] = cg->pll[CGA_PLL3].div[div].clk;
442 	else
443 		cg->fman[1] = cg->pll[PLATFORM_PLL].div[PLL_DIV2].clk;
444 }
445 
t1023_init_periph(struct clockgen * cg)446 static void __init t1023_init_periph(struct clockgen *cg)
447 {
448 	cg->fman[0] = cg->hwaccel[1];
449 }
450 
t1040_init_periph(struct clockgen * cg)451 static void __init t1040_init_periph(struct clockgen *cg)
452 {
453 	cg->fman[0] = cg->pll[PLATFORM_PLL].div[PLL_DIV1].clk;
454 }
455 
t2080_init_periph(struct clockgen * cg)456 static void __init t2080_init_periph(struct clockgen *cg)
457 {
458 	cg->fman[0] = cg->hwaccel[0];
459 }
460 
t4240_init_periph(struct clockgen * cg)461 static void __init t4240_init_periph(struct clockgen *cg)
462 {
463 	cg->fman[0] = cg->hwaccel[3];
464 	cg->fman[1] = cg->hwaccel[4];
465 }
466 
467 static const struct clockgen_chipinfo chipinfo[] = {
468 	{
469 		.compat = "fsl,b4420-clockgen",
470 		.guts_compat = "fsl,b4860-device-config",
471 		.init_periph = t2080_init_periph,
472 		.cmux_groups = {
473 			&clockgen2_cmux_cga12, &clockgen2_cmux_cgb
474 		},
475 		.hwaccel = {
476 			&t2080_hwa1
477 		},
478 		.cmux_to_group = {
479 			0, 1, 1, 1, -1
480 		},
481 		.pll_mask = 0x3f,
482 		.flags = CG_PLL_8BIT,
483 	},
484 	{
485 		.compat = "fsl,b4860-clockgen",
486 		.guts_compat = "fsl,b4860-device-config",
487 		.init_periph = t2080_init_periph,
488 		.cmux_groups = {
489 			&clockgen2_cmux_cga12, &clockgen2_cmux_cgb
490 		},
491 		.hwaccel = {
492 			&t2080_hwa1
493 		},
494 		.cmux_to_group = {
495 			0, 1, 1, 1, -1
496 		},
497 		.pll_mask = 0x3f,
498 		.flags = CG_PLL_8BIT,
499 	},
500 	{
501 		.compat = "fsl,ls1021a-clockgen",
502 		.cmux_groups = {
503 			&t1023_cmux
504 		},
505 		.cmux_to_group = {
506 			0, -1
507 		},
508 		.pll_mask = 0x03,
509 	},
510 	{
511 		.compat = "fsl,ls1043a-clockgen",
512 		.init_periph = t2080_init_periph,
513 		.cmux_groups = {
514 			&t1040_cmux
515 		},
516 		.hwaccel = {
517 			&ls1043a_hwa1, &ls1043a_hwa2
518 		},
519 		.cmux_to_group = {
520 			0, -1
521 		},
522 		.pll_mask = 0x07,
523 		.flags = CG_PLL_8BIT,
524 	},
525 	{
526 		.compat = "fsl,ls1046a-clockgen",
527 		.init_periph = t2080_init_periph,
528 		.cmux_groups = {
529 			&t1040_cmux
530 		},
531 		.hwaccel = {
532 			&ls1046a_hwa1, &ls1046a_hwa2
533 		},
534 		.cmux_to_group = {
535 			0, -1
536 		},
537 		.pll_mask = 0x07,
538 		.flags = CG_PLL_8BIT,
539 	},
540 	{
541 		.compat = "fsl,ls1088a-clockgen",
542 		.cmux_groups = {
543 			&clockgen2_cmux_cga12
544 		},
545 		.cmux_to_group = {
546 			0, 0, -1
547 		},
548 		.pll_mask = 0x07,
549 		.flags = CG_VER3 | CG_LITTLE_ENDIAN,
550 	},
551 	{
552 		.compat = "fsl,ls1012a-clockgen",
553 		.cmux_groups = {
554 			&ls1012a_cmux
555 		},
556 		.cmux_to_group = {
557 			0, -1
558 		},
559 		.pll_mask = 0x03,
560 	},
561 	{
562 		.compat = "fsl,ls2080a-clockgen",
563 		.cmux_groups = {
564 			&clockgen2_cmux_cga12, &clockgen2_cmux_cgb
565 		},
566 		.cmux_to_group = {
567 			0, 0, 1, 1, -1
568 		},
569 		.pll_mask = 0x37,
570 		.flags = CG_VER3 | CG_LITTLE_ENDIAN,
571 	},
572 	{
573 		.compat = "fsl,p2041-clockgen",
574 		.guts_compat = "fsl,qoriq-device-config-1.0",
575 		.init_periph = p2041_init_periph,
576 		.cmux_groups = {
577 			&p2041_cmux_grp1, &p2041_cmux_grp2
578 		},
579 		.cmux_to_group = {
580 			0, 0, 1, 1, -1
581 		},
582 		.pll_mask = 0x07,
583 	},
584 	{
585 		.compat = "fsl,p3041-clockgen",
586 		.guts_compat = "fsl,qoriq-device-config-1.0",
587 		.init_periph = p2041_init_periph,
588 		.cmux_groups = {
589 			&p2041_cmux_grp1, &p2041_cmux_grp2
590 		},
591 		.cmux_to_group = {
592 			0, 0, 1, 1, -1
593 		},
594 		.pll_mask = 0x07,
595 	},
596 	{
597 		.compat = "fsl,p4080-clockgen",
598 		.guts_compat = "fsl,qoriq-device-config-1.0",
599 		.init_periph = p4080_init_periph,
600 		.cmux_groups = {
601 			&p4080_cmux_grp1, &p4080_cmux_grp2
602 		},
603 		.cmux_to_group = {
604 			0, 0, 0, 0, 1, 1, 1, 1
605 		},
606 		.pll_mask = 0x1f,
607 	},
608 	{
609 		.compat = "fsl,p5020-clockgen",
610 		.guts_compat = "fsl,qoriq-device-config-1.0",
611 		.init_periph = p5020_init_periph,
612 		.cmux_groups = {
613 			&p5020_cmux_grp1, &p5020_cmux_grp2
614 		},
615 		.cmux_to_group = {
616 			0, 1, -1
617 		},
618 		.pll_mask = 0x07,
619 	},
620 	{
621 		.compat = "fsl,p5040-clockgen",
622 		.guts_compat = "fsl,p5040-device-config",
623 		.init_periph = p5040_init_periph,
624 		.cmux_groups = {
625 			&p5040_cmux_grp1, &p5040_cmux_grp2
626 		},
627 		.cmux_to_group = {
628 			0, 0, 1, 1, -1
629 		},
630 		.pll_mask = 0x0f,
631 	},
632 	{
633 		.compat = "fsl,t1023-clockgen",
634 		.guts_compat = "fsl,t1023-device-config",
635 		.init_periph = t1023_init_periph,
636 		.cmux_groups = {
637 			&t1023_cmux
638 		},
639 		.hwaccel = {
640 			&t1023_hwa1, &t1023_hwa2
641 		},
642 		.cmux_to_group = {
643 			0, 0, -1
644 		},
645 		.pll_mask = 0x03,
646 		.flags = CG_PLL_8BIT,
647 	},
648 	{
649 		.compat = "fsl,t1040-clockgen",
650 		.guts_compat = "fsl,t1040-device-config",
651 		.init_periph = t1040_init_periph,
652 		.cmux_groups = {
653 			&t1040_cmux
654 		},
655 		.cmux_to_group = {
656 			0, 0, 0, 0, -1
657 		},
658 		.pll_mask = 0x07,
659 		.flags = CG_PLL_8BIT,
660 	},
661 	{
662 		.compat = "fsl,t2080-clockgen",
663 		.guts_compat = "fsl,t2080-device-config",
664 		.init_periph = t2080_init_periph,
665 		.cmux_groups = {
666 			&clockgen2_cmux_cga12
667 		},
668 		.hwaccel = {
669 			&t2080_hwa1, &t2080_hwa2
670 		},
671 		.cmux_to_group = {
672 			0, -1
673 		},
674 		.pll_mask = 0x07,
675 		.flags = CG_PLL_8BIT,
676 	},
677 	{
678 		.compat = "fsl,t4240-clockgen",
679 		.guts_compat = "fsl,t4240-device-config",
680 		.init_periph = t4240_init_periph,
681 		.cmux_groups = {
682 			&clockgen2_cmux_cga, &clockgen2_cmux_cgb
683 		},
684 		.hwaccel = {
685 			&t4240_hwa1, NULL, NULL, &t4240_hwa4, &t4240_hwa5
686 		},
687 		.cmux_to_group = {
688 			0, 0, 1, -1
689 		},
690 		.pll_mask = 0x3f,
691 		.flags = CG_PLL_8BIT,
692 	},
693 	{},
694 };
695 
696 struct mux_hwclock {
697 	struct clk_hw hw;
698 	struct clockgen *cg;
699 	const struct clockgen_muxinfo *info;
700 	u32 __iomem *reg;
701 	u8 parent_to_clksel[NUM_MUX_PARENTS];
702 	s8 clksel_to_parent[NUM_MUX_PARENTS];
703 	int num_parents;
704 };
705 
706 #define to_mux_hwclock(p)	container_of(p, struct mux_hwclock, hw)
707 #define CLKSEL_MASK		0x78000000
708 #define	CLKSEL_SHIFT		27
709 
mux_set_parent(struct clk_hw * hw,u8 idx)710 static int mux_set_parent(struct clk_hw *hw, u8 idx)
711 {
712 	struct mux_hwclock *hwc = to_mux_hwclock(hw);
713 	u32 clksel;
714 
715 	if (idx >= hwc->num_parents)
716 		return -EINVAL;
717 
718 	clksel = hwc->parent_to_clksel[idx];
719 	cg_out(hwc->cg, (clksel << CLKSEL_SHIFT) & CLKSEL_MASK, hwc->reg);
720 
721 	return 0;
722 }
723 
mux_get_parent(struct clk_hw * hw)724 static u8 mux_get_parent(struct clk_hw *hw)
725 {
726 	struct mux_hwclock *hwc = to_mux_hwclock(hw);
727 	u32 clksel;
728 	s8 ret;
729 
730 	clksel = (cg_in(hwc->cg, hwc->reg) & CLKSEL_MASK) >> CLKSEL_SHIFT;
731 
732 	ret = hwc->clksel_to_parent[clksel];
733 	if (ret < 0) {
734 		pr_err("%s: mux at %p has bad clksel\n", __func__, hwc->reg);
735 		return 0;
736 	}
737 
738 	return ret;
739 }
740 
741 static const struct clk_ops cmux_ops = {
742 	.get_parent = mux_get_parent,
743 	.set_parent = mux_set_parent,
744 };
745 
746 /*
747  * Don't allow setting for now, as the clock options haven't been
748  * sanitized for additional restrictions.
749  */
750 static const struct clk_ops hwaccel_ops = {
751 	.get_parent = mux_get_parent,
752 };
753 
get_pll_div(struct clockgen * cg,struct mux_hwclock * hwc,int idx)754 static const struct clockgen_pll_div *get_pll_div(struct clockgen *cg,
755 						  struct mux_hwclock *hwc,
756 						  int idx)
757 {
758 	int pll, div;
759 
760 	if (!(hwc->info->clksel[idx].flags & CLKSEL_VALID))
761 		return NULL;
762 
763 	pll = hwc->info->clksel[idx].pll;
764 	div = hwc->info->clksel[idx].div;
765 
766 	return &cg->pll[pll].div[div];
767 }
768 
create_mux_common(struct clockgen * cg,struct mux_hwclock * hwc,const struct clk_ops * ops,unsigned long min_rate,unsigned long max_rate,unsigned long pct80_rate,const char * fmt,int idx)769 static struct clk * __init create_mux_common(struct clockgen *cg,
770 					     struct mux_hwclock *hwc,
771 					     const struct clk_ops *ops,
772 					     unsigned long min_rate,
773 					     unsigned long max_rate,
774 					     unsigned long pct80_rate,
775 					     const char *fmt, int idx)
776 {
777 	struct clk_init_data init = {};
778 	struct clk *clk;
779 	const struct clockgen_pll_div *div;
780 	const char *parent_names[NUM_MUX_PARENTS];
781 	char name[32];
782 	int i, j;
783 
784 	snprintf(name, sizeof(name), fmt, idx);
785 
786 	for (i = 0, j = 0; i < NUM_MUX_PARENTS; i++) {
787 		unsigned long rate;
788 
789 		hwc->clksel_to_parent[i] = -1;
790 
791 		div = get_pll_div(cg, hwc, i);
792 		if (!div)
793 			continue;
794 
795 		rate = clk_get_rate(div->clk);
796 
797 		if (hwc->info->clksel[i].flags & CLKSEL_80PCT &&
798 		    rate > pct80_rate)
799 			continue;
800 		if (rate < min_rate)
801 			continue;
802 		if (rate > max_rate)
803 			continue;
804 
805 		parent_names[j] = div->name;
806 		hwc->parent_to_clksel[j] = i;
807 		hwc->clksel_to_parent[i] = j;
808 		j++;
809 	}
810 
811 	init.name = name;
812 	init.ops = ops;
813 	init.parent_names = parent_names;
814 	init.num_parents = hwc->num_parents = j;
815 	init.flags = 0;
816 	hwc->hw.init = &init;
817 	hwc->cg = cg;
818 
819 	clk = clk_register(NULL, &hwc->hw);
820 	if (IS_ERR(clk)) {
821 		pr_err("%s: Couldn't register %s: %ld\n", __func__, name,
822 		       PTR_ERR(clk));
823 		kfree(hwc);
824 		return NULL;
825 	}
826 
827 	return clk;
828 }
829 
create_one_cmux(struct clockgen * cg,int idx)830 static struct clk * __init create_one_cmux(struct clockgen *cg, int idx)
831 {
832 	struct mux_hwclock *hwc;
833 	const struct clockgen_pll_div *div;
834 	unsigned long plat_rate, min_rate;
835 	u64 max_rate, pct80_rate;
836 	u32 clksel;
837 
838 	hwc = kzalloc(sizeof(*hwc), GFP_KERNEL);
839 	if (!hwc)
840 		return NULL;
841 
842 	if (cg->info.flags & CG_VER3)
843 		hwc->reg = cg->regs + 0x70000 + 0x20 * idx;
844 	else
845 		hwc->reg = cg->regs + 0x20 * idx;
846 
847 	hwc->info = cg->info.cmux_groups[cg->info.cmux_to_group[idx]];
848 
849 	/*
850 	 * Find the rate for the default clksel, and treat it as the
851 	 * maximum rated core frequency.  If this is an incorrect
852 	 * assumption, certain clock options (possibly including the
853 	 * default clksel) may be inappropriately excluded on certain
854 	 * chips.
855 	 */
856 	clksel = (cg_in(cg, hwc->reg) & CLKSEL_MASK) >> CLKSEL_SHIFT;
857 	div = get_pll_div(cg, hwc, clksel);
858 	if (!div) {
859 		kfree(hwc);
860 		return NULL;
861 	}
862 
863 	max_rate = clk_get_rate(div->clk);
864 	pct80_rate = max_rate * 8;
865 	do_div(pct80_rate, 10);
866 
867 	plat_rate = clk_get_rate(cg->pll[PLATFORM_PLL].div[PLL_DIV1].clk);
868 
869 	if (cg->info.flags & CG_CMUX_GE_PLAT)
870 		min_rate = plat_rate;
871 	else
872 		min_rate = plat_rate / 2;
873 
874 	return create_mux_common(cg, hwc, &cmux_ops, min_rate, max_rate,
875 				 pct80_rate, "cg-cmux%d", idx);
876 }
877 
create_one_hwaccel(struct clockgen * cg,int idx)878 static struct clk * __init create_one_hwaccel(struct clockgen *cg, int idx)
879 {
880 	struct mux_hwclock *hwc;
881 
882 	hwc = kzalloc(sizeof(*hwc), GFP_KERNEL);
883 	if (!hwc)
884 		return NULL;
885 
886 	hwc->reg = cg->regs + 0x20 * idx + 0x10;
887 	hwc->info = cg->info.hwaccel[idx];
888 
889 	return create_mux_common(cg, hwc, &hwaccel_ops, 0, ULONG_MAX, 0,
890 				 "cg-hwaccel%d", idx);
891 }
892 
create_muxes(struct clockgen * cg)893 static void __init create_muxes(struct clockgen *cg)
894 {
895 	int i;
896 
897 	for (i = 0; i < ARRAY_SIZE(cg->cmux); i++) {
898 		if (cg->info.cmux_to_group[i] < 0)
899 			break;
900 		if (cg->info.cmux_to_group[i] >=
901 		    ARRAY_SIZE(cg->info.cmux_groups)) {
902 			WARN_ON_ONCE(1);
903 			continue;
904 		}
905 
906 		cg->cmux[i] = create_one_cmux(cg, i);
907 	}
908 
909 	for (i = 0; i < ARRAY_SIZE(cg->hwaccel); i++) {
910 		if (!cg->info.hwaccel[i])
911 			continue;
912 
913 		cg->hwaccel[i] = create_one_hwaccel(cg, i);
914 	}
915 }
916 
917 static void __init clockgen_init(struct device_node *np);
918 
919 /*
920  * Legacy nodes may get probed before the parent clockgen node.
921  * It is assumed that device trees with legacy nodes will not
922  * contain a "clocks" property -- otherwise the input clocks may
923  * not be initialized at this point.
924  */
legacy_init_clockgen(struct device_node * np)925 static void __init legacy_init_clockgen(struct device_node *np)
926 {
927 	if (!clockgen.node)
928 		clockgen_init(of_get_parent(np));
929 }
930 
931 /* Legacy node */
core_mux_init(struct device_node * np)932 static void __init core_mux_init(struct device_node *np)
933 {
934 	struct clk *clk;
935 	struct resource res;
936 	int idx, rc;
937 
938 	legacy_init_clockgen(np);
939 
940 	if (of_address_to_resource(np, 0, &res))
941 		return;
942 
943 	idx = (res.start & 0xf0) >> 5;
944 	clk = clockgen.cmux[idx];
945 
946 	rc = of_clk_add_provider(np, of_clk_src_simple_get, clk);
947 	if (rc) {
948 		pr_err("%s: Couldn't register clk provider for node %s: %d\n",
949 		       __func__, np->name, rc);
950 		return;
951 	}
952 }
953 
954 static struct clk __init
sysclk_from_fixed(struct device_node * node,const char * name)955 *sysclk_from_fixed(struct device_node *node, const char *name)
956 {
957 	u32 rate;
958 
959 	if (of_property_read_u32(node, "clock-frequency", &rate))
960 		return ERR_PTR(-ENODEV);
961 
962 	return clk_register_fixed_rate(NULL, name, NULL, 0, rate);
963 }
964 
input_clock(const char * name,struct clk * clk)965 static struct clk __init *input_clock(const char *name, struct clk *clk)
966 {
967 	const char *input_name;
968 
969 	/* Register the input clock under the desired name. */
970 	input_name = __clk_get_name(clk);
971 	clk = clk_register_fixed_factor(NULL, name, input_name,
972 					0, 1, 1);
973 	if (IS_ERR(clk))
974 		pr_err("%s: Couldn't register %s: %ld\n", __func__, name,
975 		       PTR_ERR(clk));
976 
977 	return clk;
978 }
979 
input_clock_by_name(const char * name,const char * dtname)980 static struct clk __init *input_clock_by_name(const char *name,
981 					      const char *dtname)
982 {
983 	struct clk *clk;
984 
985 	clk = of_clk_get_by_name(clockgen.node, dtname);
986 	if (IS_ERR(clk))
987 		return clk;
988 
989 	return input_clock(name, clk);
990 }
991 
input_clock_by_index(const char * name,int idx)992 static struct clk __init *input_clock_by_index(const char *name, int idx)
993 {
994 	struct clk *clk;
995 
996 	clk = of_clk_get(clockgen.node, 0);
997 	if (IS_ERR(clk))
998 		return clk;
999 
1000 	return input_clock(name, clk);
1001 }
1002 
create_sysclk(const char * name)1003 static struct clk * __init create_sysclk(const char *name)
1004 {
1005 	struct device_node *sysclk;
1006 	struct clk *clk;
1007 
1008 	clk = sysclk_from_fixed(clockgen.node, name);
1009 	if (!IS_ERR(clk))
1010 		return clk;
1011 
1012 	clk = input_clock_by_name(name, "sysclk");
1013 	if (!IS_ERR(clk))
1014 		return clk;
1015 
1016 	clk = input_clock_by_index(name, 0);
1017 	if (!IS_ERR(clk))
1018 		return clk;
1019 
1020 	sysclk = of_get_child_by_name(clockgen.node, "sysclk");
1021 	if (sysclk) {
1022 		clk = sysclk_from_fixed(sysclk, name);
1023 		if (!IS_ERR(clk))
1024 			return clk;
1025 	}
1026 
1027 	pr_err("%s: No input sysclk\n", __func__);
1028 	return NULL;
1029 }
1030 
create_coreclk(const char * name)1031 static struct clk * __init create_coreclk(const char *name)
1032 {
1033 	struct clk *clk;
1034 
1035 	clk = input_clock_by_name(name, "coreclk");
1036 	if (!IS_ERR(clk))
1037 		return clk;
1038 
1039 	/*
1040 	 * This indicates a mix of legacy nodes with the new coreclk
1041 	 * mechanism, which should never happen.  If this error occurs,
1042 	 * don't use the wrong input clock just because coreclk isn't
1043 	 * ready yet.
1044 	 */
1045 	if (WARN_ON(PTR_ERR(clk) == -EPROBE_DEFER))
1046 		return clk;
1047 
1048 	return NULL;
1049 }
1050 
1051 /* Legacy node */
sysclk_init(struct device_node * node)1052 static void __init sysclk_init(struct device_node *node)
1053 {
1054 	struct clk *clk;
1055 
1056 	legacy_init_clockgen(node);
1057 
1058 	clk = clockgen.sysclk;
1059 	if (clk)
1060 		of_clk_add_provider(node, of_clk_src_simple_get, clk);
1061 }
1062 
1063 #define PLL_KILL BIT(31)
1064 
create_one_pll(struct clockgen * cg,int idx)1065 static void __init create_one_pll(struct clockgen *cg, int idx)
1066 {
1067 	u32 __iomem *reg;
1068 	u32 mult;
1069 	struct clockgen_pll *pll = &cg->pll[idx];
1070 	const char *input = "cg-sysclk";
1071 	int i;
1072 
1073 	if (!(cg->info.pll_mask & (1 << idx)))
1074 		return;
1075 
1076 	if (cg->coreclk && idx != PLATFORM_PLL) {
1077 		if (IS_ERR(cg->coreclk))
1078 			return;
1079 
1080 		input = "cg-coreclk";
1081 	}
1082 
1083 	if (cg->info.flags & CG_VER3) {
1084 		switch (idx) {
1085 		case PLATFORM_PLL:
1086 			reg = cg->regs + 0x60080;
1087 			break;
1088 		case CGA_PLL1:
1089 			reg = cg->regs + 0x80;
1090 			break;
1091 		case CGA_PLL2:
1092 			reg = cg->regs + 0xa0;
1093 			break;
1094 		case CGB_PLL1:
1095 			reg = cg->regs + 0x10080;
1096 			break;
1097 		case CGB_PLL2:
1098 			reg = cg->regs + 0x100a0;
1099 			break;
1100 		default:
1101 			WARN_ONCE(1, "index %d\n", idx);
1102 			return;
1103 		}
1104 	} else {
1105 		if (idx == PLATFORM_PLL)
1106 			reg = cg->regs + 0xc00;
1107 		else
1108 			reg = cg->regs + 0x800 + 0x20 * (idx - 1);
1109 	}
1110 
1111 	/* Get the multiple of PLL */
1112 	mult = cg_in(cg, reg);
1113 
1114 	/* Check if this PLL is disabled */
1115 	if (mult & PLL_KILL) {
1116 		pr_debug("%s(): pll %p disabled\n", __func__, reg);
1117 		return;
1118 	}
1119 
1120 	if ((cg->info.flags & CG_VER3) ||
1121 	    ((cg->info.flags & CG_PLL_8BIT) && idx != PLATFORM_PLL))
1122 		mult = (mult & GENMASK(8, 1)) >> 1;
1123 	else
1124 		mult = (mult & GENMASK(6, 1)) >> 1;
1125 
1126 	for (i = 0; i < ARRAY_SIZE(pll->div); i++) {
1127 		struct clk *clk;
1128 		int ret;
1129 
1130 		/*
1131 		 * For platform PLL, there are 8 divider clocks.
1132 		 * For core PLL, there are 4 divider clocks at most.
1133 		 */
1134 		if (idx != PLATFORM_PLL && i >= 4)
1135 			break;
1136 
1137 		snprintf(pll->div[i].name, sizeof(pll->div[i].name),
1138 			 "cg-pll%d-div%d", idx, i + 1);
1139 
1140 		clk = clk_register_fixed_factor(NULL,
1141 				pll->div[i].name, input, 0, mult, i + 1);
1142 		if (IS_ERR(clk)) {
1143 			pr_err("%s: %s: register failed %ld\n",
1144 			       __func__, pll->div[i].name, PTR_ERR(clk));
1145 			continue;
1146 		}
1147 
1148 		pll->div[i].clk = clk;
1149 		ret = clk_register_clkdev(clk, pll->div[i].name, NULL);
1150 		if (ret != 0)
1151 			pr_err("%s: %s: register to lookup table failed %ld\n",
1152 			       __func__, pll->div[i].name, PTR_ERR(clk));
1153 
1154 	}
1155 }
1156 
create_plls(struct clockgen * cg)1157 static void __init create_plls(struct clockgen *cg)
1158 {
1159 	int i;
1160 
1161 	for (i = 0; i < ARRAY_SIZE(cg->pll); i++)
1162 		create_one_pll(cg, i);
1163 }
1164 
legacy_pll_init(struct device_node * np,int idx)1165 static void __init legacy_pll_init(struct device_node *np, int idx)
1166 {
1167 	struct clockgen_pll *pll;
1168 	struct clk_onecell_data *onecell_data;
1169 	struct clk **subclks;
1170 	int count, rc;
1171 
1172 	legacy_init_clockgen(np);
1173 
1174 	pll = &clockgen.pll[idx];
1175 	count = of_property_count_strings(np, "clock-output-names");
1176 
1177 	BUILD_BUG_ON(ARRAY_SIZE(pll->div) < 4);
1178 	subclks = kcalloc(4, sizeof(struct clk *), GFP_KERNEL);
1179 	if (!subclks)
1180 		return;
1181 
1182 	onecell_data = kmalloc(sizeof(*onecell_data), GFP_KERNEL);
1183 	if (!onecell_data)
1184 		goto err_clks;
1185 
1186 	if (count <= 3) {
1187 		subclks[0] = pll->div[0].clk;
1188 		subclks[1] = pll->div[1].clk;
1189 		subclks[2] = pll->div[3].clk;
1190 	} else {
1191 		subclks[0] = pll->div[0].clk;
1192 		subclks[1] = pll->div[1].clk;
1193 		subclks[2] = pll->div[2].clk;
1194 		subclks[3] = pll->div[3].clk;
1195 	}
1196 
1197 	onecell_data->clks = subclks;
1198 	onecell_data->clk_num = count;
1199 
1200 	rc = of_clk_add_provider(np, of_clk_src_onecell_get, onecell_data);
1201 	if (rc) {
1202 		pr_err("%s: Couldn't register clk provider for node %s: %d\n",
1203 		       __func__, np->name, rc);
1204 		goto err_cell;
1205 	}
1206 
1207 	return;
1208 err_cell:
1209 	kfree(onecell_data);
1210 err_clks:
1211 	kfree(subclks);
1212 }
1213 
1214 /* Legacy node */
pltfrm_pll_init(struct device_node * np)1215 static void __init pltfrm_pll_init(struct device_node *np)
1216 {
1217 	legacy_pll_init(np, PLATFORM_PLL);
1218 }
1219 
1220 /* Legacy node */
core_pll_init(struct device_node * np)1221 static void __init core_pll_init(struct device_node *np)
1222 {
1223 	struct resource res;
1224 	int idx;
1225 
1226 	if (of_address_to_resource(np, 0, &res))
1227 		return;
1228 
1229 	if ((res.start & 0xfff) == 0xc00) {
1230 		/*
1231 		 * ls1021a devtree labels the platform PLL
1232 		 * with the core PLL compatible
1233 		 */
1234 		pltfrm_pll_init(np);
1235 	} else {
1236 		idx = (res.start & 0xf0) >> 5;
1237 		legacy_pll_init(np, CGA_PLL1 + idx);
1238 	}
1239 }
1240 
clockgen_clk_get(struct of_phandle_args * clkspec,void * data)1241 static struct clk *clockgen_clk_get(struct of_phandle_args *clkspec, void *data)
1242 {
1243 	struct clockgen *cg = data;
1244 	struct clk *clk;
1245 	struct clockgen_pll *pll;
1246 	u32 type, idx;
1247 
1248 	if (clkspec->args_count < 2) {
1249 		pr_err("%s: insufficient phandle args\n", __func__);
1250 		return ERR_PTR(-EINVAL);
1251 	}
1252 
1253 	type = clkspec->args[0];
1254 	idx = clkspec->args[1];
1255 
1256 	switch (type) {
1257 	case 0:
1258 		if (idx != 0)
1259 			goto bad_args;
1260 		clk = cg->sysclk;
1261 		break;
1262 	case 1:
1263 		if (idx >= ARRAY_SIZE(cg->cmux))
1264 			goto bad_args;
1265 		clk = cg->cmux[idx];
1266 		break;
1267 	case 2:
1268 		if (idx >= ARRAY_SIZE(cg->hwaccel))
1269 			goto bad_args;
1270 		clk = cg->hwaccel[idx];
1271 		break;
1272 	case 3:
1273 		if (idx >= ARRAY_SIZE(cg->fman))
1274 			goto bad_args;
1275 		clk = cg->fman[idx];
1276 		break;
1277 	case 4:
1278 		pll = &cg->pll[PLATFORM_PLL];
1279 		if (idx >= ARRAY_SIZE(pll->div))
1280 			goto bad_args;
1281 		clk = pll->div[idx].clk;
1282 		break;
1283 	case 5:
1284 		if (idx != 0)
1285 			goto bad_args;
1286 		clk = cg->coreclk;
1287 		if (IS_ERR(clk))
1288 			clk = NULL;
1289 		break;
1290 	default:
1291 		goto bad_args;
1292 	}
1293 
1294 	if (!clk)
1295 		return ERR_PTR(-ENOENT);
1296 	return clk;
1297 
1298 bad_args:
1299 	pr_err("%s: Bad phandle args %u %u\n", __func__, type, idx);
1300 	return ERR_PTR(-EINVAL);
1301 }
1302 
1303 #ifdef CONFIG_PPC
1304 #include <asm/mpc85xx.h>
1305 
1306 static const u32 a4510_svrs[] __initconst = {
1307 	(SVR_P2040 << 8) | 0x10,	/* P2040 1.0 */
1308 	(SVR_P2040 << 8) | 0x11,	/* P2040 1.1 */
1309 	(SVR_P2041 << 8) | 0x10,	/* P2041 1.0 */
1310 	(SVR_P2041 << 8) | 0x11,	/* P2041 1.1 */
1311 	(SVR_P3041 << 8) | 0x10,	/* P3041 1.0 */
1312 	(SVR_P3041 << 8) | 0x11,	/* P3041 1.1 */
1313 	(SVR_P4040 << 8) | 0x20,	/* P4040 2.0 */
1314 	(SVR_P4080 << 8) | 0x20,	/* P4080 2.0 */
1315 	(SVR_P5010 << 8) | 0x10,	/* P5010 1.0 */
1316 	(SVR_P5010 << 8) | 0x20,	/* P5010 2.0 */
1317 	(SVR_P5020 << 8) | 0x10,	/* P5020 1.0 */
1318 	(SVR_P5021 << 8) | 0x10,	/* P5021 1.0 */
1319 	(SVR_P5040 << 8) | 0x10,	/* P5040 1.0 */
1320 };
1321 
1322 #define SVR_SECURITY	0x80000	/* The Security (E) bit */
1323 
has_erratum_a4510(void)1324 static bool __init has_erratum_a4510(void)
1325 {
1326 	u32 svr = mfspr(SPRN_SVR);
1327 	int i;
1328 
1329 	svr &= ~SVR_SECURITY;
1330 
1331 	for (i = 0; i < ARRAY_SIZE(a4510_svrs); i++) {
1332 		if (svr == a4510_svrs[i])
1333 			return true;
1334 	}
1335 
1336 	return false;
1337 }
1338 #else
has_erratum_a4510(void)1339 static bool __init has_erratum_a4510(void)
1340 {
1341 	return false;
1342 }
1343 #endif
1344 
clockgen_init(struct device_node * np)1345 static void __init clockgen_init(struct device_node *np)
1346 {
1347 	int i, ret;
1348 	bool is_old_ls1021a = false;
1349 
1350 	/* May have already been called by a legacy probe */
1351 	if (clockgen.node)
1352 		return;
1353 
1354 	clockgen.node = np;
1355 	clockgen.regs = of_iomap(np, 0);
1356 	if (!clockgen.regs &&
1357 	    of_device_is_compatible(of_root, "fsl,ls1021a")) {
1358 		/* Compatibility hack for old, broken device trees */
1359 		clockgen.regs = ioremap(0x1ee1000, 0x1000);
1360 		is_old_ls1021a = true;
1361 	}
1362 	if (!clockgen.regs) {
1363 		pr_err("%s(): %s: of_iomap() failed\n", __func__, np->name);
1364 		return;
1365 	}
1366 
1367 	for (i = 0; i < ARRAY_SIZE(chipinfo); i++) {
1368 		if (of_device_is_compatible(np, chipinfo[i].compat))
1369 			break;
1370 		if (is_old_ls1021a &&
1371 		    !strcmp(chipinfo[i].compat, "fsl,ls1021a-clockgen"))
1372 			break;
1373 	}
1374 
1375 	if (i == ARRAY_SIZE(chipinfo)) {
1376 		pr_err("%s: unknown clockgen node %pOF\n", __func__, np);
1377 		goto err;
1378 	}
1379 	clockgen.info = chipinfo[i];
1380 
1381 	if (clockgen.info.guts_compat) {
1382 		struct device_node *guts;
1383 
1384 		guts = of_find_compatible_node(NULL, NULL,
1385 					       clockgen.info.guts_compat);
1386 		if (guts) {
1387 			clockgen.guts = of_iomap(guts, 0);
1388 			if (!clockgen.guts) {
1389 				pr_err("%s: Couldn't map %pOF regs\n", __func__,
1390 				       guts);
1391 			}
1392 			of_node_put(guts);
1393 		}
1394 
1395 	}
1396 
1397 	if (has_erratum_a4510())
1398 		clockgen.info.flags |= CG_CMUX_GE_PLAT;
1399 
1400 	clockgen.sysclk = create_sysclk("cg-sysclk");
1401 	clockgen.coreclk = create_coreclk("cg-coreclk");
1402 	create_plls(&clockgen);
1403 	create_muxes(&clockgen);
1404 
1405 	if (clockgen.info.init_periph)
1406 		clockgen.info.init_periph(&clockgen);
1407 
1408 	ret = of_clk_add_provider(np, clockgen_clk_get, &clockgen);
1409 	if (ret) {
1410 		pr_err("%s: Couldn't register clk provider for node %s: %d\n",
1411 		       __func__, np->name, ret);
1412 	}
1413 
1414 	return;
1415 err:
1416 	iounmap(clockgen.regs);
1417 	clockgen.regs = NULL;
1418 }
1419 
1420 CLK_OF_DECLARE(qoriq_clockgen_1, "fsl,qoriq-clockgen-1.0", clockgen_init);
1421 CLK_OF_DECLARE(qoriq_clockgen_2, "fsl,qoriq-clockgen-2.0", clockgen_init);
1422 CLK_OF_DECLARE(qoriq_clockgen_ls1012a, "fsl,ls1012a-clockgen", clockgen_init);
1423 CLK_OF_DECLARE(qoriq_clockgen_ls1021a, "fsl,ls1021a-clockgen", clockgen_init);
1424 CLK_OF_DECLARE(qoriq_clockgen_ls1043a, "fsl,ls1043a-clockgen", clockgen_init);
1425 CLK_OF_DECLARE(qoriq_clockgen_ls1046a, "fsl,ls1046a-clockgen", clockgen_init);
1426 CLK_OF_DECLARE(qoriq_clockgen_ls1088a, "fsl,ls1088a-clockgen", clockgen_init);
1427 CLK_OF_DECLARE(qoriq_clockgen_ls2080a, "fsl,ls2080a-clockgen", clockgen_init);
1428 
1429 /* Legacy nodes */
1430 CLK_OF_DECLARE(qoriq_sysclk_1, "fsl,qoriq-sysclk-1.0", sysclk_init);
1431 CLK_OF_DECLARE(qoriq_sysclk_2, "fsl,qoriq-sysclk-2.0", sysclk_init);
1432 CLK_OF_DECLARE(qoriq_core_pll_1, "fsl,qoriq-core-pll-1.0", core_pll_init);
1433 CLK_OF_DECLARE(qoriq_core_pll_2, "fsl,qoriq-core-pll-2.0", core_pll_init);
1434 CLK_OF_DECLARE(qoriq_core_mux_1, "fsl,qoriq-core-mux-1.0", core_mux_init);
1435 CLK_OF_DECLARE(qoriq_core_mux_2, "fsl,qoriq-core-mux-2.0", core_mux_init);
1436 CLK_OF_DECLARE(qoriq_pltfrm_pll_1, "fsl,qoriq-platform-pll-1.0", pltfrm_pll_init);
1437 CLK_OF_DECLARE(qoriq_pltfrm_pll_2, "fsl,qoriq-platform-pll-2.0", pltfrm_pll_init);
1438