1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (c) 2013, 2018, The Linux Foundation. All rights reserved.
4 */
5
6 #include <linux/kernel.h>
7 #include <linux/bitops.h>
8 #include <linux/err.h>
9 #include <linux/bug.h>
10 #include <linux/export.h>
11 #include <linux/clk-provider.h>
12 #include <linux/delay.h>
13 #include <linux/regmap.h>
14 #include <linux/math64.h>
15
16 #include <asm/div64.h>
17
18 #include "clk-rcg.h"
19 #include "common.h"
20
21 #define CMD_REG 0x0
22 #define CMD_UPDATE BIT(0)
23 #define CMD_ROOT_EN BIT(1)
24 #define CMD_DIRTY_CFG BIT(4)
25 #define CMD_DIRTY_N BIT(5)
26 #define CMD_DIRTY_M BIT(6)
27 #define CMD_DIRTY_D BIT(7)
28 #define CMD_ROOT_OFF BIT(31)
29
30 #define CFG_REG 0x4
31 #define CFG_SRC_DIV_SHIFT 0
32 #define CFG_SRC_SEL_SHIFT 8
33 #define CFG_SRC_SEL_MASK (0x7 << CFG_SRC_SEL_SHIFT)
34 #define CFG_MODE_SHIFT 12
35 #define CFG_MODE_MASK (0x3 << CFG_MODE_SHIFT)
36 #define CFG_MODE_DUAL_EDGE (0x2 << CFG_MODE_SHIFT)
37 #define CFG_HW_CLK_CTRL_MASK BIT(20)
38
39 #define M_REG 0x8
40 #define N_REG 0xc
41 #define D_REG 0x10
42
43 enum freq_policy {
44 FLOOR,
45 CEIL,
46 };
47
clk_rcg2_is_enabled(struct clk_hw * hw)48 static int clk_rcg2_is_enabled(struct clk_hw *hw)
49 {
50 struct clk_rcg2 *rcg = to_clk_rcg2(hw);
51 u32 cmd;
52 int ret;
53
54 ret = regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + CMD_REG, &cmd);
55 if (ret)
56 return ret;
57
58 return (cmd & CMD_ROOT_OFF) == 0;
59 }
60
clk_rcg2_get_parent(struct clk_hw * hw)61 static u8 clk_rcg2_get_parent(struct clk_hw *hw)
62 {
63 struct clk_rcg2 *rcg = to_clk_rcg2(hw);
64 int num_parents = clk_hw_get_num_parents(hw);
65 u32 cfg;
66 int i, ret;
67
68 ret = regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + CFG_REG, &cfg);
69 if (ret)
70 goto err;
71
72 cfg &= CFG_SRC_SEL_MASK;
73 cfg >>= CFG_SRC_SEL_SHIFT;
74
75 for (i = 0; i < num_parents; i++)
76 if (cfg == rcg->parent_map[i].cfg)
77 return i;
78
79 err:
80 pr_debug("%s: Clock %s has invalid parent, using default.\n",
81 __func__, clk_hw_get_name(hw));
82 return 0;
83 }
84
update_config(struct clk_rcg2 * rcg)85 static int update_config(struct clk_rcg2 *rcg)
86 {
87 int count, ret;
88 u32 cmd;
89 struct clk_hw *hw = &rcg->clkr.hw;
90 const char *name = clk_hw_get_name(hw);
91
92 ret = regmap_update_bits(rcg->clkr.regmap, rcg->cmd_rcgr + CMD_REG,
93 CMD_UPDATE, CMD_UPDATE);
94 if (ret)
95 return ret;
96
97 /* Wait for update to take effect */
98 for (count = 500; count > 0; count--) {
99 ret = regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + CMD_REG, &cmd);
100 if (ret)
101 return ret;
102 if (!(cmd & CMD_UPDATE))
103 return 0;
104 udelay(1);
105 }
106
107 WARN(1, "%s: rcg didn't update its configuration.", name);
108 return -EBUSY;
109 }
110
clk_rcg2_set_parent(struct clk_hw * hw,u8 index)111 static int clk_rcg2_set_parent(struct clk_hw *hw, u8 index)
112 {
113 struct clk_rcg2 *rcg = to_clk_rcg2(hw);
114 int ret;
115 u32 cfg = rcg->parent_map[index].cfg << CFG_SRC_SEL_SHIFT;
116
117 ret = regmap_update_bits(rcg->clkr.regmap, rcg->cmd_rcgr + CFG_REG,
118 CFG_SRC_SEL_MASK, cfg);
119 if (ret)
120 return ret;
121
122 return update_config(rcg);
123 }
124
125 /*
126 * Calculate m/n:d rate
127 *
128 * parent_rate m
129 * rate = ----------- x ---
130 * hid_div n
131 */
132 static unsigned long
calc_rate(unsigned long rate,u32 m,u32 n,u32 mode,u32 hid_div)133 calc_rate(unsigned long rate, u32 m, u32 n, u32 mode, u32 hid_div)
134 {
135 if (hid_div) {
136 rate *= 2;
137 rate /= hid_div + 1;
138 }
139
140 if (mode) {
141 u64 tmp = rate;
142 tmp *= m;
143 do_div(tmp, n);
144 rate = tmp;
145 }
146
147 return rate;
148 }
149
150 static unsigned long
clk_rcg2_recalc_rate(struct clk_hw * hw,unsigned long parent_rate)151 clk_rcg2_recalc_rate(struct clk_hw *hw, unsigned long parent_rate)
152 {
153 struct clk_rcg2 *rcg = to_clk_rcg2(hw);
154 u32 cfg, hid_div, m = 0, n = 0, mode = 0, mask;
155
156 regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + CFG_REG, &cfg);
157
158 if (rcg->mnd_width) {
159 mask = BIT(rcg->mnd_width) - 1;
160 regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + M_REG, &m);
161 m &= mask;
162 regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + N_REG, &n);
163 n = ~n;
164 n &= mask;
165 n += m;
166 mode = cfg & CFG_MODE_MASK;
167 mode >>= CFG_MODE_SHIFT;
168 }
169
170 mask = BIT(rcg->hid_width) - 1;
171 hid_div = cfg >> CFG_SRC_DIV_SHIFT;
172 hid_div &= mask;
173
174 return calc_rate(parent_rate, m, n, mode, hid_div);
175 }
176
_freq_tbl_determine_rate(struct clk_hw * hw,const struct freq_tbl * f,struct clk_rate_request * req,enum freq_policy policy)177 static int _freq_tbl_determine_rate(struct clk_hw *hw, const struct freq_tbl *f,
178 struct clk_rate_request *req,
179 enum freq_policy policy)
180 {
181 unsigned long clk_flags, rate = req->rate;
182 struct clk_hw *p;
183 struct clk_rcg2 *rcg = to_clk_rcg2(hw);
184 int index;
185
186 switch (policy) {
187 case FLOOR:
188 f = qcom_find_freq_floor(f, rate);
189 break;
190 case CEIL:
191 f = qcom_find_freq(f, rate);
192 break;
193 default:
194 return -EINVAL;
195 };
196
197 if (!f)
198 return -EINVAL;
199
200 index = qcom_find_src_index(hw, rcg->parent_map, f->src);
201 if (index < 0)
202 return index;
203
204 clk_flags = clk_hw_get_flags(hw);
205 p = clk_hw_get_parent_by_index(hw, index);
206 if (!p)
207 return -EINVAL;
208
209 if (clk_flags & CLK_SET_RATE_PARENT) {
210 rate = f->freq;
211 if (f->pre_div) {
212 if (!rate)
213 rate = req->rate;
214 rate /= 2;
215 rate *= f->pre_div + 1;
216 }
217
218 if (f->n) {
219 u64 tmp = rate;
220 tmp = tmp * f->n;
221 do_div(tmp, f->m);
222 rate = tmp;
223 }
224 } else {
225 rate = clk_hw_get_rate(p);
226 }
227 req->best_parent_hw = p;
228 req->best_parent_rate = rate;
229 req->rate = f->freq;
230
231 return 0;
232 }
233
clk_rcg2_determine_rate(struct clk_hw * hw,struct clk_rate_request * req)234 static int clk_rcg2_determine_rate(struct clk_hw *hw,
235 struct clk_rate_request *req)
236 {
237 struct clk_rcg2 *rcg = to_clk_rcg2(hw);
238
239 return _freq_tbl_determine_rate(hw, rcg->freq_tbl, req, CEIL);
240 }
241
clk_rcg2_determine_floor_rate(struct clk_hw * hw,struct clk_rate_request * req)242 static int clk_rcg2_determine_floor_rate(struct clk_hw *hw,
243 struct clk_rate_request *req)
244 {
245 struct clk_rcg2 *rcg = to_clk_rcg2(hw);
246
247 return _freq_tbl_determine_rate(hw, rcg->freq_tbl, req, FLOOR);
248 }
249
__clk_rcg2_configure(struct clk_rcg2 * rcg,const struct freq_tbl * f)250 static int __clk_rcg2_configure(struct clk_rcg2 *rcg, const struct freq_tbl *f)
251 {
252 u32 cfg, mask;
253 struct clk_hw *hw = &rcg->clkr.hw;
254 int ret, index = qcom_find_src_index(hw, rcg->parent_map, f->src);
255
256 if (index < 0)
257 return index;
258
259 if (rcg->mnd_width && f->n) {
260 mask = BIT(rcg->mnd_width) - 1;
261 ret = regmap_update_bits(rcg->clkr.regmap,
262 rcg->cmd_rcgr + M_REG, mask, f->m);
263 if (ret)
264 return ret;
265
266 ret = regmap_update_bits(rcg->clkr.regmap,
267 rcg->cmd_rcgr + N_REG, mask, ~(f->n - f->m));
268 if (ret)
269 return ret;
270
271 ret = regmap_update_bits(rcg->clkr.regmap,
272 rcg->cmd_rcgr + D_REG, mask, ~f->n);
273 if (ret)
274 return ret;
275 }
276
277 mask = BIT(rcg->hid_width) - 1;
278 mask |= CFG_SRC_SEL_MASK | CFG_MODE_MASK | CFG_HW_CLK_CTRL_MASK;
279 cfg = f->pre_div << CFG_SRC_DIV_SHIFT;
280 cfg |= rcg->parent_map[index].cfg << CFG_SRC_SEL_SHIFT;
281 if (rcg->mnd_width && f->n && (f->m != f->n))
282 cfg |= CFG_MODE_DUAL_EDGE;
283
284 return regmap_update_bits(rcg->clkr.regmap, rcg->cmd_rcgr + CFG_REG,
285 mask, cfg);
286 }
287
clk_rcg2_configure(struct clk_rcg2 * rcg,const struct freq_tbl * f)288 static int clk_rcg2_configure(struct clk_rcg2 *rcg, const struct freq_tbl *f)
289 {
290 int ret;
291
292 ret = __clk_rcg2_configure(rcg, f);
293 if (ret)
294 return ret;
295
296 return update_config(rcg);
297 }
298
__clk_rcg2_set_rate(struct clk_hw * hw,unsigned long rate,enum freq_policy policy)299 static int __clk_rcg2_set_rate(struct clk_hw *hw, unsigned long rate,
300 enum freq_policy policy)
301 {
302 struct clk_rcg2 *rcg = to_clk_rcg2(hw);
303 const struct freq_tbl *f;
304
305 switch (policy) {
306 case FLOOR:
307 f = qcom_find_freq_floor(rcg->freq_tbl, rate);
308 break;
309 case CEIL:
310 f = qcom_find_freq(rcg->freq_tbl, rate);
311 break;
312 default:
313 return -EINVAL;
314 };
315
316 if (!f)
317 return -EINVAL;
318
319 return clk_rcg2_configure(rcg, f);
320 }
321
clk_rcg2_set_rate(struct clk_hw * hw,unsigned long rate,unsigned long parent_rate)322 static int clk_rcg2_set_rate(struct clk_hw *hw, unsigned long rate,
323 unsigned long parent_rate)
324 {
325 return __clk_rcg2_set_rate(hw, rate, CEIL);
326 }
327
clk_rcg2_set_floor_rate(struct clk_hw * hw,unsigned long rate,unsigned long parent_rate)328 static int clk_rcg2_set_floor_rate(struct clk_hw *hw, unsigned long rate,
329 unsigned long parent_rate)
330 {
331 return __clk_rcg2_set_rate(hw, rate, FLOOR);
332 }
333
clk_rcg2_set_rate_and_parent(struct clk_hw * hw,unsigned long rate,unsigned long parent_rate,u8 index)334 static int clk_rcg2_set_rate_and_parent(struct clk_hw *hw,
335 unsigned long rate, unsigned long parent_rate, u8 index)
336 {
337 return __clk_rcg2_set_rate(hw, rate, CEIL);
338 }
339
clk_rcg2_set_floor_rate_and_parent(struct clk_hw * hw,unsigned long rate,unsigned long parent_rate,u8 index)340 static int clk_rcg2_set_floor_rate_and_parent(struct clk_hw *hw,
341 unsigned long rate, unsigned long parent_rate, u8 index)
342 {
343 return __clk_rcg2_set_rate(hw, rate, FLOOR);
344 }
345
346 const struct clk_ops clk_rcg2_ops = {
347 .is_enabled = clk_rcg2_is_enabled,
348 .get_parent = clk_rcg2_get_parent,
349 .set_parent = clk_rcg2_set_parent,
350 .recalc_rate = clk_rcg2_recalc_rate,
351 .determine_rate = clk_rcg2_determine_rate,
352 .set_rate = clk_rcg2_set_rate,
353 .set_rate_and_parent = clk_rcg2_set_rate_and_parent,
354 };
355 EXPORT_SYMBOL_GPL(clk_rcg2_ops);
356
357 const struct clk_ops clk_rcg2_floor_ops = {
358 .is_enabled = clk_rcg2_is_enabled,
359 .get_parent = clk_rcg2_get_parent,
360 .set_parent = clk_rcg2_set_parent,
361 .recalc_rate = clk_rcg2_recalc_rate,
362 .determine_rate = clk_rcg2_determine_floor_rate,
363 .set_rate = clk_rcg2_set_floor_rate,
364 .set_rate_and_parent = clk_rcg2_set_floor_rate_and_parent,
365 };
366 EXPORT_SYMBOL_GPL(clk_rcg2_floor_ops);
367
368 struct frac_entry {
369 int num;
370 int den;
371 };
372
373 static const struct frac_entry frac_table_675m[] = { /* link rate of 270M */
374 { 52, 295 }, /* 119 M */
375 { 11, 57 }, /* 130.25 M */
376 { 63, 307 }, /* 138.50 M */
377 { 11, 50 }, /* 148.50 M */
378 { 47, 206 }, /* 154 M */
379 { 31, 100 }, /* 205.25 M */
380 { 107, 269 }, /* 268.50 M */
381 { },
382 };
383
384 static struct frac_entry frac_table_810m[] = { /* Link rate of 162M */
385 { 31, 211 }, /* 119 M */
386 { 32, 199 }, /* 130.25 M */
387 { 63, 307 }, /* 138.50 M */
388 { 11, 60 }, /* 148.50 M */
389 { 50, 263 }, /* 154 M */
390 { 31, 120 }, /* 205.25 M */
391 { 119, 359 }, /* 268.50 M */
392 { },
393 };
394
clk_edp_pixel_set_rate(struct clk_hw * hw,unsigned long rate,unsigned long parent_rate)395 static int clk_edp_pixel_set_rate(struct clk_hw *hw, unsigned long rate,
396 unsigned long parent_rate)
397 {
398 struct clk_rcg2 *rcg = to_clk_rcg2(hw);
399 struct freq_tbl f = *rcg->freq_tbl;
400 const struct frac_entry *frac;
401 int delta = 100000;
402 s64 src_rate = parent_rate;
403 s64 request;
404 u32 mask = BIT(rcg->hid_width) - 1;
405 u32 hid_div;
406
407 if (src_rate == 810000000)
408 frac = frac_table_810m;
409 else
410 frac = frac_table_675m;
411
412 for (; frac->num; frac++) {
413 request = rate;
414 request *= frac->den;
415 request = div_s64(request, frac->num);
416 if ((src_rate < (request - delta)) ||
417 (src_rate > (request + delta)))
418 continue;
419
420 regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + CFG_REG,
421 &hid_div);
422 f.pre_div = hid_div;
423 f.pre_div >>= CFG_SRC_DIV_SHIFT;
424 f.pre_div &= mask;
425 f.m = frac->num;
426 f.n = frac->den;
427
428 return clk_rcg2_configure(rcg, &f);
429 }
430
431 return -EINVAL;
432 }
433
clk_edp_pixel_set_rate_and_parent(struct clk_hw * hw,unsigned long rate,unsigned long parent_rate,u8 index)434 static int clk_edp_pixel_set_rate_and_parent(struct clk_hw *hw,
435 unsigned long rate, unsigned long parent_rate, u8 index)
436 {
437 /* Parent index is set statically in frequency table */
438 return clk_edp_pixel_set_rate(hw, rate, parent_rate);
439 }
440
clk_edp_pixel_determine_rate(struct clk_hw * hw,struct clk_rate_request * req)441 static int clk_edp_pixel_determine_rate(struct clk_hw *hw,
442 struct clk_rate_request *req)
443 {
444 struct clk_rcg2 *rcg = to_clk_rcg2(hw);
445 const struct freq_tbl *f = rcg->freq_tbl;
446 const struct frac_entry *frac;
447 int delta = 100000;
448 s64 request;
449 u32 mask = BIT(rcg->hid_width) - 1;
450 u32 hid_div;
451 int index = qcom_find_src_index(hw, rcg->parent_map, f->src);
452
453 /* Force the correct parent */
454 req->best_parent_hw = clk_hw_get_parent_by_index(hw, index);
455 req->best_parent_rate = clk_hw_get_rate(req->best_parent_hw);
456
457 if (req->best_parent_rate == 810000000)
458 frac = frac_table_810m;
459 else
460 frac = frac_table_675m;
461
462 for (; frac->num; frac++) {
463 request = req->rate;
464 request *= frac->den;
465 request = div_s64(request, frac->num);
466 if ((req->best_parent_rate < (request - delta)) ||
467 (req->best_parent_rate > (request + delta)))
468 continue;
469
470 regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + CFG_REG,
471 &hid_div);
472 hid_div >>= CFG_SRC_DIV_SHIFT;
473 hid_div &= mask;
474
475 req->rate = calc_rate(req->best_parent_rate,
476 frac->num, frac->den,
477 !!frac->den, hid_div);
478 return 0;
479 }
480
481 return -EINVAL;
482 }
483
484 const struct clk_ops clk_edp_pixel_ops = {
485 .is_enabled = clk_rcg2_is_enabled,
486 .get_parent = clk_rcg2_get_parent,
487 .set_parent = clk_rcg2_set_parent,
488 .recalc_rate = clk_rcg2_recalc_rate,
489 .set_rate = clk_edp_pixel_set_rate,
490 .set_rate_and_parent = clk_edp_pixel_set_rate_and_parent,
491 .determine_rate = clk_edp_pixel_determine_rate,
492 };
493 EXPORT_SYMBOL_GPL(clk_edp_pixel_ops);
494
clk_byte_determine_rate(struct clk_hw * hw,struct clk_rate_request * req)495 static int clk_byte_determine_rate(struct clk_hw *hw,
496 struct clk_rate_request *req)
497 {
498 struct clk_rcg2 *rcg = to_clk_rcg2(hw);
499 const struct freq_tbl *f = rcg->freq_tbl;
500 int index = qcom_find_src_index(hw, rcg->parent_map, f->src);
501 unsigned long parent_rate, div;
502 u32 mask = BIT(rcg->hid_width) - 1;
503 struct clk_hw *p;
504
505 if (req->rate == 0)
506 return -EINVAL;
507
508 req->best_parent_hw = p = clk_hw_get_parent_by_index(hw, index);
509 req->best_parent_rate = parent_rate = clk_hw_round_rate(p, req->rate);
510
511 div = DIV_ROUND_UP((2 * parent_rate), req->rate) - 1;
512 div = min_t(u32, div, mask);
513
514 req->rate = calc_rate(parent_rate, 0, 0, 0, div);
515
516 return 0;
517 }
518
clk_byte_set_rate(struct clk_hw * hw,unsigned long rate,unsigned long parent_rate)519 static int clk_byte_set_rate(struct clk_hw *hw, unsigned long rate,
520 unsigned long parent_rate)
521 {
522 struct clk_rcg2 *rcg = to_clk_rcg2(hw);
523 struct freq_tbl f = *rcg->freq_tbl;
524 unsigned long div;
525 u32 mask = BIT(rcg->hid_width) - 1;
526
527 div = DIV_ROUND_UP((2 * parent_rate), rate) - 1;
528 div = min_t(u32, div, mask);
529
530 f.pre_div = div;
531
532 return clk_rcg2_configure(rcg, &f);
533 }
534
clk_byte_set_rate_and_parent(struct clk_hw * hw,unsigned long rate,unsigned long parent_rate,u8 index)535 static int clk_byte_set_rate_and_parent(struct clk_hw *hw,
536 unsigned long rate, unsigned long parent_rate, u8 index)
537 {
538 /* Parent index is set statically in frequency table */
539 return clk_byte_set_rate(hw, rate, parent_rate);
540 }
541
542 const struct clk_ops clk_byte_ops = {
543 .is_enabled = clk_rcg2_is_enabled,
544 .get_parent = clk_rcg2_get_parent,
545 .set_parent = clk_rcg2_set_parent,
546 .recalc_rate = clk_rcg2_recalc_rate,
547 .set_rate = clk_byte_set_rate,
548 .set_rate_and_parent = clk_byte_set_rate_and_parent,
549 .determine_rate = clk_byte_determine_rate,
550 };
551 EXPORT_SYMBOL_GPL(clk_byte_ops);
552
clk_byte2_determine_rate(struct clk_hw * hw,struct clk_rate_request * req)553 static int clk_byte2_determine_rate(struct clk_hw *hw,
554 struct clk_rate_request *req)
555 {
556 struct clk_rcg2 *rcg = to_clk_rcg2(hw);
557 unsigned long parent_rate, div;
558 u32 mask = BIT(rcg->hid_width) - 1;
559 struct clk_hw *p;
560 unsigned long rate = req->rate;
561
562 if (rate == 0)
563 return -EINVAL;
564
565 p = req->best_parent_hw;
566 req->best_parent_rate = parent_rate = clk_hw_round_rate(p, rate);
567
568 div = DIV_ROUND_UP((2 * parent_rate), rate) - 1;
569 div = min_t(u32, div, mask);
570
571 req->rate = calc_rate(parent_rate, 0, 0, 0, div);
572
573 return 0;
574 }
575
clk_byte2_set_rate(struct clk_hw * hw,unsigned long rate,unsigned long parent_rate)576 static int clk_byte2_set_rate(struct clk_hw *hw, unsigned long rate,
577 unsigned long parent_rate)
578 {
579 struct clk_rcg2 *rcg = to_clk_rcg2(hw);
580 struct freq_tbl f = { 0 };
581 unsigned long div;
582 int i, num_parents = clk_hw_get_num_parents(hw);
583 u32 mask = BIT(rcg->hid_width) - 1;
584 u32 cfg;
585
586 div = DIV_ROUND_UP((2 * parent_rate), rate) - 1;
587 div = min_t(u32, div, mask);
588
589 f.pre_div = div;
590
591 regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + CFG_REG, &cfg);
592 cfg &= CFG_SRC_SEL_MASK;
593 cfg >>= CFG_SRC_SEL_SHIFT;
594
595 for (i = 0; i < num_parents; i++) {
596 if (cfg == rcg->parent_map[i].cfg) {
597 f.src = rcg->parent_map[i].src;
598 return clk_rcg2_configure(rcg, &f);
599 }
600 }
601
602 return -EINVAL;
603 }
604
clk_byte2_set_rate_and_parent(struct clk_hw * hw,unsigned long rate,unsigned long parent_rate,u8 index)605 static int clk_byte2_set_rate_and_parent(struct clk_hw *hw,
606 unsigned long rate, unsigned long parent_rate, u8 index)
607 {
608 /* Read the hardware to determine parent during set_rate */
609 return clk_byte2_set_rate(hw, rate, parent_rate);
610 }
611
612 const struct clk_ops clk_byte2_ops = {
613 .is_enabled = clk_rcg2_is_enabled,
614 .get_parent = clk_rcg2_get_parent,
615 .set_parent = clk_rcg2_set_parent,
616 .recalc_rate = clk_rcg2_recalc_rate,
617 .set_rate = clk_byte2_set_rate,
618 .set_rate_and_parent = clk_byte2_set_rate_and_parent,
619 .determine_rate = clk_byte2_determine_rate,
620 };
621 EXPORT_SYMBOL_GPL(clk_byte2_ops);
622
623 static const struct frac_entry frac_table_pixel[] = {
624 { 3, 8 },
625 { 2, 9 },
626 { 4, 9 },
627 { 1, 1 },
628 { 2, 3 },
629 { }
630 };
631
clk_pixel_determine_rate(struct clk_hw * hw,struct clk_rate_request * req)632 static int clk_pixel_determine_rate(struct clk_hw *hw,
633 struct clk_rate_request *req)
634 {
635 unsigned long request, src_rate;
636 int delta = 100000;
637 const struct frac_entry *frac = frac_table_pixel;
638
639 for (; frac->num; frac++) {
640 request = (req->rate * frac->den) / frac->num;
641
642 src_rate = clk_hw_round_rate(req->best_parent_hw, request);
643 if ((src_rate < (request - delta)) ||
644 (src_rate > (request + delta)))
645 continue;
646
647 req->best_parent_rate = src_rate;
648 req->rate = (src_rate * frac->num) / frac->den;
649 return 0;
650 }
651
652 return -EINVAL;
653 }
654
clk_pixel_set_rate(struct clk_hw * hw,unsigned long rate,unsigned long parent_rate)655 static int clk_pixel_set_rate(struct clk_hw *hw, unsigned long rate,
656 unsigned long parent_rate)
657 {
658 struct clk_rcg2 *rcg = to_clk_rcg2(hw);
659 struct freq_tbl f = { 0 };
660 const struct frac_entry *frac = frac_table_pixel;
661 unsigned long request;
662 int delta = 100000;
663 u32 mask = BIT(rcg->hid_width) - 1;
664 u32 hid_div, cfg;
665 int i, num_parents = clk_hw_get_num_parents(hw);
666
667 regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + CFG_REG, &cfg);
668 cfg &= CFG_SRC_SEL_MASK;
669 cfg >>= CFG_SRC_SEL_SHIFT;
670
671 for (i = 0; i < num_parents; i++)
672 if (cfg == rcg->parent_map[i].cfg) {
673 f.src = rcg->parent_map[i].src;
674 break;
675 }
676
677 for (; frac->num; frac++) {
678 request = (rate * frac->den) / frac->num;
679
680 if ((parent_rate < (request - delta)) ||
681 (parent_rate > (request + delta)))
682 continue;
683
684 regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + CFG_REG,
685 &hid_div);
686 f.pre_div = hid_div;
687 f.pre_div >>= CFG_SRC_DIV_SHIFT;
688 f.pre_div &= mask;
689 f.m = frac->num;
690 f.n = frac->den;
691
692 return clk_rcg2_configure(rcg, &f);
693 }
694 return -EINVAL;
695 }
696
clk_pixel_set_rate_and_parent(struct clk_hw * hw,unsigned long rate,unsigned long parent_rate,u8 index)697 static int clk_pixel_set_rate_and_parent(struct clk_hw *hw, unsigned long rate,
698 unsigned long parent_rate, u8 index)
699 {
700 return clk_pixel_set_rate(hw, rate, parent_rate);
701 }
702
703 const struct clk_ops clk_pixel_ops = {
704 .is_enabled = clk_rcg2_is_enabled,
705 .get_parent = clk_rcg2_get_parent,
706 .set_parent = clk_rcg2_set_parent,
707 .recalc_rate = clk_rcg2_recalc_rate,
708 .set_rate = clk_pixel_set_rate,
709 .set_rate_and_parent = clk_pixel_set_rate_and_parent,
710 .determine_rate = clk_pixel_determine_rate,
711 };
712 EXPORT_SYMBOL_GPL(clk_pixel_ops);
713
clk_gfx3d_determine_rate(struct clk_hw * hw,struct clk_rate_request * req)714 static int clk_gfx3d_determine_rate(struct clk_hw *hw,
715 struct clk_rate_request *req)
716 {
717 struct clk_rate_request parent_req = { };
718 struct clk_hw *p2, *p8, *p9, *xo;
719 unsigned long p9_rate;
720 int ret;
721
722 xo = clk_hw_get_parent_by_index(hw, 0);
723 if (req->rate == clk_hw_get_rate(xo)) {
724 req->best_parent_hw = xo;
725 return 0;
726 }
727
728 p9 = clk_hw_get_parent_by_index(hw, 2);
729 p2 = clk_hw_get_parent_by_index(hw, 3);
730 p8 = clk_hw_get_parent_by_index(hw, 4);
731
732 /* PLL9 is a fixed rate PLL */
733 p9_rate = clk_hw_get_rate(p9);
734
735 parent_req.rate = req->rate = min(req->rate, p9_rate);
736 if (req->rate == p9_rate) {
737 req->rate = req->best_parent_rate = p9_rate;
738 req->best_parent_hw = p9;
739 return 0;
740 }
741
742 if (req->best_parent_hw == p9) {
743 /* Are we going back to a previously used rate? */
744 if (clk_hw_get_rate(p8) == req->rate)
745 req->best_parent_hw = p8;
746 else
747 req->best_parent_hw = p2;
748 } else if (req->best_parent_hw == p8) {
749 req->best_parent_hw = p2;
750 } else {
751 req->best_parent_hw = p8;
752 }
753
754 ret = __clk_determine_rate(req->best_parent_hw, &parent_req);
755 if (ret)
756 return ret;
757
758 req->rate = req->best_parent_rate = parent_req.rate;
759
760 return 0;
761 }
762
clk_gfx3d_set_rate_and_parent(struct clk_hw * hw,unsigned long rate,unsigned long parent_rate,u8 index)763 static int clk_gfx3d_set_rate_and_parent(struct clk_hw *hw, unsigned long rate,
764 unsigned long parent_rate, u8 index)
765 {
766 struct clk_rcg2 *rcg = to_clk_rcg2(hw);
767 u32 cfg;
768 int ret;
769
770 /* Just mux it, we don't use the division or m/n hardware */
771 cfg = rcg->parent_map[index].cfg << CFG_SRC_SEL_SHIFT;
772 ret = regmap_write(rcg->clkr.regmap, rcg->cmd_rcgr + CFG_REG, cfg);
773 if (ret)
774 return ret;
775
776 return update_config(rcg);
777 }
778
clk_gfx3d_set_rate(struct clk_hw * hw,unsigned long rate,unsigned long parent_rate)779 static int clk_gfx3d_set_rate(struct clk_hw *hw, unsigned long rate,
780 unsigned long parent_rate)
781 {
782 /*
783 * We should never get here; clk_gfx3d_determine_rate() should always
784 * make us use a different parent than what we're currently using, so
785 * clk_gfx3d_set_rate_and_parent() should always be called.
786 */
787 return 0;
788 }
789
790 const struct clk_ops clk_gfx3d_ops = {
791 .is_enabled = clk_rcg2_is_enabled,
792 .get_parent = clk_rcg2_get_parent,
793 .set_parent = clk_rcg2_set_parent,
794 .recalc_rate = clk_rcg2_recalc_rate,
795 .set_rate = clk_gfx3d_set_rate,
796 .set_rate_and_parent = clk_gfx3d_set_rate_and_parent,
797 .determine_rate = clk_gfx3d_determine_rate,
798 };
799 EXPORT_SYMBOL_GPL(clk_gfx3d_ops);
800
clk_rcg2_set_force_enable(struct clk_hw * hw)801 static int clk_rcg2_set_force_enable(struct clk_hw *hw)
802 {
803 struct clk_rcg2 *rcg = to_clk_rcg2(hw);
804 const char *name = clk_hw_get_name(hw);
805 int ret, count;
806
807 ret = regmap_update_bits(rcg->clkr.regmap, rcg->cmd_rcgr + CMD_REG,
808 CMD_ROOT_EN, CMD_ROOT_EN);
809 if (ret)
810 return ret;
811
812 /* wait for RCG to turn ON */
813 for (count = 500; count > 0; count--) {
814 if (clk_rcg2_is_enabled(hw))
815 return 0;
816
817 udelay(1);
818 }
819
820 pr_err("%s: RCG did not turn on\n", name);
821 return -ETIMEDOUT;
822 }
823
clk_rcg2_clear_force_enable(struct clk_hw * hw)824 static int clk_rcg2_clear_force_enable(struct clk_hw *hw)
825 {
826 struct clk_rcg2 *rcg = to_clk_rcg2(hw);
827
828 return regmap_update_bits(rcg->clkr.regmap, rcg->cmd_rcgr + CMD_REG,
829 CMD_ROOT_EN, 0);
830 }
831
832 static int
clk_rcg2_shared_force_enable_clear(struct clk_hw * hw,const struct freq_tbl * f)833 clk_rcg2_shared_force_enable_clear(struct clk_hw *hw, const struct freq_tbl *f)
834 {
835 struct clk_rcg2 *rcg = to_clk_rcg2(hw);
836 int ret;
837
838 ret = clk_rcg2_set_force_enable(hw);
839 if (ret)
840 return ret;
841
842 ret = clk_rcg2_configure(rcg, f);
843 if (ret)
844 return ret;
845
846 return clk_rcg2_clear_force_enable(hw);
847 }
848
clk_rcg2_shared_set_rate(struct clk_hw * hw,unsigned long rate,unsigned long parent_rate)849 static int clk_rcg2_shared_set_rate(struct clk_hw *hw, unsigned long rate,
850 unsigned long parent_rate)
851 {
852 struct clk_rcg2 *rcg = to_clk_rcg2(hw);
853 const struct freq_tbl *f;
854
855 f = qcom_find_freq(rcg->freq_tbl, rate);
856 if (!f)
857 return -EINVAL;
858
859 /*
860 * In case clock is disabled, update the CFG, M, N and D registers
861 * and don't hit the update bit of CMD register.
862 */
863 if (!__clk_is_enabled(hw->clk))
864 return __clk_rcg2_configure(rcg, f);
865
866 return clk_rcg2_shared_force_enable_clear(hw, f);
867 }
868
clk_rcg2_shared_set_rate_and_parent(struct clk_hw * hw,unsigned long rate,unsigned long parent_rate,u8 index)869 static int clk_rcg2_shared_set_rate_and_parent(struct clk_hw *hw,
870 unsigned long rate, unsigned long parent_rate, u8 index)
871 {
872 return clk_rcg2_shared_set_rate(hw, rate, parent_rate);
873 }
874
clk_rcg2_shared_enable(struct clk_hw * hw)875 static int clk_rcg2_shared_enable(struct clk_hw *hw)
876 {
877 struct clk_rcg2 *rcg = to_clk_rcg2(hw);
878 int ret;
879
880 /*
881 * Set the update bit because required configuration has already
882 * been written in clk_rcg2_shared_set_rate()
883 */
884 ret = clk_rcg2_set_force_enable(hw);
885 if (ret)
886 return ret;
887
888 ret = update_config(rcg);
889 if (ret)
890 return ret;
891
892 return clk_rcg2_clear_force_enable(hw);
893 }
894
clk_rcg2_shared_disable(struct clk_hw * hw)895 static void clk_rcg2_shared_disable(struct clk_hw *hw)
896 {
897 struct clk_rcg2 *rcg = to_clk_rcg2(hw);
898 u32 cfg;
899
900 /*
901 * Store current configuration as switching to safe source would clear
902 * the SRC and DIV of CFG register
903 */
904 regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + CFG_REG, &cfg);
905
906 /*
907 * Park the RCG at a safe configuration - sourced off of safe source.
908 * Force enable and disable the RCG while configuring it to safeguard
909 * against any update signal coming from the downstream clock.
910 * The current parent is still prepared and enabled at this point, and
911 * the safe source is always on while application processor subsystem
912 * is online. Therefore, the RCG can safely switch its parent.
913 */
914 clk_rcg2_set_force_enable(hw);
915
916 regmap_write(rcg->clkr.regmap, rcg->cmd_rcgr + CFG_REG,
917 rcg->safe_src_index << CFG_SRC_SEL_SHIFT);
918
919 update_config(rcg);
920
921 clk_rcg2_clear_force_enable(hw);
922
923 /* Write back the stored configuration corresponding to current rate */
924 regmap_write(rcg->clkr.regmap, rcg->cmd_rcgr + CFG_REG, cfg);
925 }
926
927 const struct clk_ops clk_rcg2_shared_ops = {
928 .enable = clk_rcg2_shared_enable,
929 .disable = clk_rcg2_shared_disable,
930 .get_parent = clk_rcg2_get_parent,
931 .set_parent = clk_rcg2_set_parent,
932 .recalc_rate = clk_rcg2_recalc_rate,
933 .determine_rate = clk_rcg2_determine_rate,
934 .set_rate = clk_rcg2_shared_set_rate,
935 .set_rate_and_parent = clk_rcg2_shared_set_rate_and_parent,
936 };
937 EXPORT_SYMBOL_GPL(clk_rcg2_shared_ops);
938