1 /*
2  *  linux/include/linux/clk.h
3  *
4  *  Copyright (C) 2004 ARM Limited.
5  *  Written by Deep Blue Solutions Limited.
6  *  Copyright (C) 2011-2012 Linaro Ltd <mturquette@linaro.org>
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License version 2 as
10  * published by the Free Software Foundation.
11  */
12 #ifndef __LINUX_CLK_H
13 #define __LINUX_CLK_H
14 
15 #include <linux/err.h>
16 #include <linux/kernel.h>
17 #include <linux/notifier.h>
18 
19 struct device;
20 struct clk;
21 struct device_node;
22 struct of_phandle_args;
23 
24 /**
25  * DOC: clk notifier callback types
26  *
27  * PRE_RATE_CHANGE - called immediately before the clk rate is changed,
28  *     to indicate that the rate change will proceed.  Drivers must
29  *     immediately terminate any operations that will be affected by the
30  *     rate change.  Callbacks may either return NOTIFY_DONE, NOTIFY_OK,
31  *     NOTIFY_STOP or NOTIFY_BAD.
32  *
33  * ABORT_RATE_CHANGE: called if the rate change failed for some reason
34  *     after PRE_RATE_CHANGE.  In this case, all registered notifiers on
35  *     the clk will be called with ABORT_RATE_CHANGE. Callbacks must
36  *     always return NOTIFY_DONE or NOTIFY_OK.
37  *
38  * POST_RATE_CHANGE - called after the clk rate change has successfully
39  *     completed.  Callbacks must always return NOTIFY_DONE or NOTIFY_OK.
40  *
41  */
42 #define PRE_RATE_CHANGE			BIT(0)
43 #define POST_RATE_CHANGE		BIT(1)
44 #define ABORT_RATE_CHANGE		BIT(2)
45 
46 /**
47  * struct clk_notifier - associate a clk with a notifier
48  * @clk: struct clk * to associate the notifier with
49  * @notifier_head: a blocking_notifier_head for this clk
50  * @node: linked list pointers
51  *
52  * A list of struct clk_notifier is maintained by the notifier code.
53  * An entry is created whenever code registers the first notifier on a
54  * particular @clk.  Future notifiers on that @clk are added to the
55  * @notifier_head.
56  */
57 struct clk_notifier {
58 	struct clk			*clk;
59 	struct srcu_notifier_head	notifier_head;
60 	struct list_head		node;
61 };
62 
63 /**
64  * struct clk_notifier_data - rate data to pass to the notifier callback
65  * @clk: struct clk * being changed
66  * @old_rate: previous rate of this clk
67  * @new_rate: new rate of this clk
68  *
69  * For a pre-notifier, old_rate is the clk's rate before this rate
70  * change, and new_rate is what the rate will be in the future.  For a
71  * post-notifier, old_rate and new_rate are both set to the clk's
72  * current rate (this was done to optimize the implementation).
73  */
74 struct clk_notifier_data {
75 	struct clk		*clk;
76 	unsigned long		old_rate;
77 	unsigned long		new_rate;
78 };
79 
80 /**
81  * struct clk_bulk_data - Data used for bulk clk operations.
82  *
83  * @id: clock consumer ID
84  * @clk: struct clk * to store the associated clock
85  *
86  * The CLK APIs provide a series of clk_bulk_() API calls as
87  * a convenience to consumers which require multiple clks.  This
88  * structure is used to manage data for these calls.
89  */
90 struct clk_bulk_data {
91 	const char		*id;
92 	struct clk		*clk;
93 };
94 
95 #ifdef CONFIG_COMMON_CLK
96 
97 /**
98  * clk_notifier_register: register a clock rate-change notifier callback
99  * @clk: clock whose rate we are interested in
100  * @nb: notifier block with callback function pointer
101  *
102  * ProTip: debugging across notifier chains can be frustrating. Make sure that
103  * your notifier callback function prints a nice big warning in case of
104  * failure.
105  */
106 int clk_notifier_register(struct clk *clk, struct notifier_block *nb);
107 
108 /**
109  * clk_notifier_unregister: unregister a clock rate-change notifier callback
110  * @clk: clock whose rate we are no longer interested in
111  * @nb: notifier block which will be unregistered
112  */
113 int clk_notifier_unregister(struct clk *clk, struct notifier_block *nb);
114 
115 /**
116  * clk_get_accuracy - obtain the clock accuracy in ppb (parts per billion)
117  *		      for a clock source.
118  * @clk: clock source
119  *
120  * This gets the clock source accuracy expressed in ppb.
121  * A perfect clock returns 0.
122  */
123 long clk_get_accuracy(struct clk *clk);
124 
125 /**
126  * clk_set_phase - adjust the phase shift of a clock signal
127  * @clk: clock signal source
128  * @degrees: number of degrees the signal is shifted
129  *
130  * Shifts the phase of a clock signal by the specified degrees. Returns 0 on
131  * success, -EERROR otherwise.
132  */
133 int clk_set_phase(struct clk *clk, int degrees);
134 
135 /**
136  * clk_get_phase - return the phase shift of a clock signal
137  * @clk: clock signal source
138  *
139  * Returns the phase shift of a clock node in degrees, otherwise returns
140  * -EERROR.
141  */
142 int clk_get_phase(struct clk *clk);
143 
144 /**
145  * clk_set_duty_cycle - adjust the duty cycle ratio of a clock signal
146  * @clk: clock signal source
147  * @num: numerator of the duty cycle ratio to be applied
148  * @den: denominator of the duty cycle ratio to be applied
149  *
150  * Adjust the duty cycle of a clock signal by the specified ratio. Returns 0 on
151  * success, -EERROR otherwise.
152  */
153 int clk_set_duty_cycle(struct clk *clk, unsigned int num, unsigned int den);
154 
155 /**
156  * clk_get_duty_cycle - return the duty cycle ratio of a clock signal
157  * @clk: clock signal source
158  * @scale: scaling factor to be applied to represent the ratio as an integer
159  *
160  * Returns the duty cycle ratio multiplied by the scale provided, otherwise
161  * returns -EERROR.
162  */
163 int clk_get_scaled_duty_cycle(struct clk *clk, unsigned int scale);
164 
165 /**
166  * clk_is_match - check if two clk's point to the same hardware clock
167  * @p: clk compared against q
168  * @q: clk compared against p
169  *
170  * Returns true if the two struct clk pointers both point to the same hardware
171  * clock node. Put differently, returns true if @p and @q
172  * share the same &struct clk_core object.
173  *
174  * Returns false otherwise. Note that two NULL clks are treated as matching.
175  */
176 bool clk_is_match(const struct clk *p, const struct clk *q);
177 
178 /**
179  * clk_rate_exclusive_get - get exclusivity over the rate control of a
180  *                          producer
181  * @clk: clock source
182  *
183  * This function allows drivers to get exclusive control over the rate of a
184  * provider. It prevents any other consumer to execute, even indirectly,
185  * opereation which could alter the rate of the provider or cause glitches
186  *
187  * If exlusivity is claimed more than once on clock, even by the same driver,
188  * the rate effectively gets locked as exclusivity can't be preempted.
189  *
190  * Must not be called from within atomic context.
191  *
192  * Returns success (0) or negative errno.
193  */
194 int clk_rate_exclusive_get(struct clk *clk);
195 
196 /**
197  * clk_rate_exclusive_put - release exclusivity over the rate control of a
198  *                          producer
199  * @clk: clock source
200  *
201  * This function allows drivers to release the exclusivity it previously got
202  * from clk_rate_exclusive_get()
203  *
204  * The caller must balance the number of clk_rate_exclusive_get() and
205  * clk_rate_exclusive_put() calls.
206  *
207  * Must not be called from within atomic context.
208  */
209 void clk_rate_exclusive_put(struct clk *clk);
210 
211 #else
212 
clk_notifier_register(struct clk * clk,struct notifier_block * nb)213 static inline int clk_notifier_register(struct clk *clk,
214 					struct notifier_block *nb)
215 {
216 	return -ENOTSUPP;
217 }
218 
clk_notifier_unregister(struct clk * clk,struct notifier_block * nb)219 static inline int clk_notifier_unregister(struct clk *clk,
220 					  struct notifier_block *nb)
221 {
222 	return -ENOTSUPP;
223 }
224 
clk_get_accuracy(struct clk * clk)225 static inline long clk_get_accuracy(struct clk *clk)
226 {
227 	return -ENOTSUPP;
228 }
229 
clk_set_phase(struct clk * clk,int phase)230 static inline long clk_set_phase(struct clk *clk, int phase)
231 {
232 	return -ENOTSUPP;
233 }
234 
clk_get_phase(struct clk * clk)235 static inline long clk_get_phase(struct clk *clk)
236 {
237 	return -ENOTSUPP;
238 }
239 
clk_set_duty_cycle(struct clk * clk,unsigned int num,unsigned int den)240 static inline int clk_set_duty_cycle(struct clk *clk, unsigned int num,
241 				     unsigned int den)
242 {
243 	return -ENOTSUPP;
244 }
245 
clk_get_scaled_duty_cycle(struct clk * clk,unsigned int scale)246 static inline unsigned int clk_get_scaled_duty_cycle(struct clk *clk,
247 						     unsigned int scale)
248 {
249 	return 0;
250 }
251 
clk_is_match(const struct clk * p,const struct clk * q)252 static inline bool clk_is_match(const struct clk *p, const struct clk *q)
253 {
254 	return p == q;
255 }
256 
clk_rate_exclusive_get(struct clk * clk)257 static inline int clk_rate_exclusive_get(struct clk *clk)
258 {
259 	return 0;
260 }
261 
clk_rate_exclusive_put(struct clk * clk)262 static inline void clk_rate_exclusive_put(struct clk *clk) {}
263 
264 #endif
265 
266 /**
267  * clk_prepare - prepare a clock source
268  * @clk: clock source
269  *
270  * This prepares the clock source for use.
271  *
272  * Must not be called from within atomic context.
273  */
274 #ifdef CONFIG_HAVE_CLK_PREPARE
275 int clk_prepare(struct clk *clk);
276 int __must_check clk_bulk_prepare(int num_clks,
277 				  const struct clk_bulk_data *clks);
278 #else
clk_prepare(struct clk * clk)279 static inline int clk_prepare(struct clk *clk)
280 {
281 	might_sleep();
282 	return 0;
283 }
284 
clk_bulk_prepare(int num_clks,struct clk_bulk_data * clks)285 static inline int __must_check clk_bulk_prepare(int num_clks, struct clk_bulk_data *clks)
286 {
287 	might_sleep();
288 	return 0;
289 }
290 #endif
291 
292 /**
293  * clk_unprepare - undo preparation of a clock source
294  * @clk: clock source
295  *
296  * This undoes a previously prepared clock.  The caller must balance
297  * the number of prepare and unprepare calls.
298  *
299  * Must not be called from within atomic context.
300  */
301 #ifdef CONFIG_HAVE_CLK_PREPARE
302 void clk_unprepare(struct clk *clk);
303 void clk_bulk_unprepare(int num_clks, const struct clk_bulk_data *clks);
304 #else
clk_unprepare(struct clk * clk)305 static inline void clk_unprepare(struct clk *clk)
306 {
307 	might_sleep();
308 }
clk_bulk_unprepare(int num_clks,struct clk_bulk_data * clks)309 static inline void clk_bulk_unprepare(int num_clks, struct clk_bulk_data *clks)
310 {
311 	might_sleep();
312 }
313 #endif
314 
315 #ifdef CONFIG_HAVE_CLK
316 /**
317  * clk_get - lookup and obtain a reference to a clock producer.
318  * @dev: device for clock "consumer"
319  * @id: clock consumer ID
320  *
321  * Returns a struct clk corresponding to the clock producer, or
322  * valid IS_ERR() condition containing errno.  The implementation
323  * uses @dev and @id to determine the clock consumer, and thereby
324  * the clock producer.  (IOW, @id may be identical strings, but
325  * clk_get may return different clock producers depending on @dev.)
326  *
327  * Drivers must assume that the clock source is not enabled.
328  *
329  * clk_get should not be called from within interrupt context.
330  */
331 struct clk *clk_get(struct device *dev, const char *id);
332 
333 /**
334  * clk_bulk_get - lookup and obtain a number of references to clock producer.
335  * @dev: device for clock "consumer"
336  * @num_clks: the number of clk_bulk_data
337  * @clks: the clk_bulk_data table of consumer
338  *
339  * This helper function allows drivers to get several clk consumers in one
340  * operation. If any of the clk cannot be acquired then any clks
341  * that were obtained will be freed before returning to the caller.
342  *
343  * Returns 0 if all clocks specified in clk_bulk_data table are obtained
344  * successfully, or valid IS_ERR() condition containing errno.
345  * The implementation uses @dev and @clk_bulk_data.id to determine the
346  * clock consumer, and thereby the clock producer.
347  * The clock returned is stored in each @clk_bulk_data.clk field.
348  *
349  * Drivers must assume that the clock source is not enabled.
350  *
351  * clk_bulk_get should not be called from within interrupt context.
352  */
353 int __must_check clk_bulk_get(struct device *dev, int num_clks,
354 			      struct clk_bulk_data *clks);
355 
356 /**
357  * devm_clk_bulk_get - managed get multiple clk consumers
358  * @dev: device for clock "consumer"
359  * @num_clks: the number of clk_bulk_data
360  * @clks: the clk_bulk_data table of consumer
361  *
362  * Return 0 on success, an errno on failure.
363  *
364  * This helper function allows drivers to get several clk
365  * consumers in one operation with management, the clks will
366  * automatically be freed when the device is unbound.
367  */
368 int __must_check devm_clk_bulk_get(struct device *dev, int num_clks,
369 				   struct clk_bulk_data *clks);
370 
371 /**
372  * devm_clk_get - lookup and obtain a managed reference to a clock producer.
373  * @dev: device for clock "consumer"
374  * @id: clock consumer ID
375  *
376  * Returns a struct clk corresponding to the clock producer, or
377  * valid IS_ERR() condition containing errno.  The implementation
378  * uses @dev and @id to determine the clock consumer, and thereby
379  * the clock producer.  (IOW, @id may be identical strings, but
380  * clk_get may return different clock producers depending on @dev.)
381  *
382  * Drivers must assume that the clock source is not enabled.
383  *
384  * devm_clk_get should not be called from within interrupt context.
385  *
386  * The clock will automatically be freed when the device is unbound
387  * from the bus.
388  */
389 struct clk *devm_clk_get(struct device *dev, const char *id);
390 
391 /**
392  * devm_get_clk_from_child - lookup and obtain a managed reference to a
393  *			     clock producer from child node.
394  * @dev: device for clock "consumer"
395  * @np: pointer to clock consumer node
396  * @con_id: clock consumer ID
397  *
398  * This function parses the clocks, and uses them to look up the
399  * struct clk from the registered list of clock providers by using
400  * @np and @con_id
401  *
402  * The clock will automatically be freed when the device is unbound
403  * from the bus.
404  */
405 struct clk *devm_get_clk_from_child(struct device *dev,
406 				    struct device_node *np, const char *con_id);
407 
408 /**
409  * clk_enable - inform the system when the clock source should be running.
410  * @clk: clock source
411  *
412  * If the clock can not be enabled/disabled, this should return success.
413  *
414  * May be called from atomic contexts.
415  *
416  * Returns success (0) or negative errno.
417  */
418 int clk_enable(struct clk *clk);
419 
420 /**
421  * clk_bulk_enable - inform the system when the set of clks should be running.
422  * @num_clks: the number of clk_bulk_data
423  * @clks: the clk_bulk_data table of consumer
424  *
425  * May be called from atomic contexts.
426  *
427  * Returns success (0) or negative errno.
428  */
429 int __must_check clk_bulk_enable(int num_clks,
430 				 const struct clk_bulk_data *clks);
431 
432 /**
433  * clk_disable - inform the system when the clock source is no longer required.
434  * @clk: clock source
435  *
436  * Inform the system that a clock source is no longer required by
437  * a driver and may be shut down.
438  *
439  * May be called from atomic contexts.
440  *
441  * Implementation detail: if the clock source is shared between
442  * multiple drivers, clk_enable() calls must be balanced by the
443  * same number of clk_disable() calls for the clock source to be
444  * disabled.
445  */
446 void clk_disable(struct clk *clk);
447 
448 /**
449  * clk_bulk_disable - inform the system when the set of clks is no
450  *		      longer required.
451  * @num_clks: the number of clk_bulk_data
452  * @clks: the clk_bulk_data table of consumer
453  *
454  * Inform the system that a set of clks is no longer required by
455  * a driver and may be shut down.
456  *
457  * May be called from atomic contexts.
458  *
459  * Implementation detail: if the set of clks is shared between
460  * multiple drivers, clk_bulk_enable() calls must be balanced by the
461  * same number of clk_bulk_disable() calls for the clock source to be
462  * disabled.
463  */
464 void clk_bulk_disable(int num_clks, const struct clk_bulk_data *clks);
465 
466 /**
467  * clk_get_rate - obtain the current clock rate (in Hz) for a clock source.
468  *		  This is only valid once the clock source has been enabled.
469  * @clk: clock source
470  */
471 unsigned long clk_get_rate(struct clk *clk);
472 
473 /**
474  * clk_put	- "free" the clock source
475  * @clk: clock source
476  *
477  * Note: drivers must ensure that all clk_enable calls made on this
478  * clock source are balanced by clk_disable calls prior to calling
479  * this function.
480  *
481  * clk_put should not be called from within interrupt context.
482  */
483 void clk_put(struct clk *clk);
484 
485 /**
486  * clk_bulk_put	- "free" the clock source
487  * @num_clks: the number of clk_bulk_data
488  * @clks: the clk_bulk_data table of consumer
489  *
490  * Note: drivers must ensure that all clk_bulk_enable calls made on this
491  * clock source are balanced by clk_bulk_disable calls prior to calling
492  * this function.
493  *
494  * clk_bulk_put should not be called from within interrupt context.
495  */
496 void clk_bulk_put(int num_clks, struct clk_bulk_data *clks);
497 
498 /**
499  * devm_clk_put	- "free" a managed clock source
500  * @dev: device used to acquire the clock
501  * @clk: clock source acquired with devm_clk_get()
502  *
503  * Note: drivers must ensure that all clk_enable calls made on this
504  * clock source are balanced by clk_disable calls prior to calling
505  * this function.
506  *
507  * clk_put should not be called from within interrupt context.
508  */
509 void devm_clk_put(struct device *dev, struct clk *clk);
510 
511 /*
512  * The remaining APIs are optional for machine class support.
513  */
514 
515 
516 /**
517  * clk_round_rate - adjust a rate to the exact rate a clock can provide
518  * @clk: clock source
519  * @rate: desired clock rate in Hz
520  *
521  * This answers the question "if I were to pass @rate to clk_set_rate(),
522  * what clock rate would I end up with?" without changing the hardware
523  * in any way.  In other words:
524  *
525  *   rate = clk_round_rate(clk, r);
526  *
527  * and:
528  *
529  *   clk_set_rate(clk, r);
530  *   rate = clk_get_rate(clk);
531  *
532  * are equivalent except the former does not modify the clock hardware
533  * in any way.
534  *
535  * Returns rounded clock rate in Hz, or negative errno.
536  */
537 long clk_round_rate(struct clk *clk, unsigned long rate);
538 
539 /**
540  * clk_set_rate - set the clock rate for a clock source
541  * @clk: clock source
542  * @rate: desired clock rate in Hz
543  *
544  * Returns success (0) or negative errno.
545  */
546 int clk_set_rate(struct clk *clk, unsigned long rate);
547 
548 /**
549  * clk_set_rate_exclusive- set the clock rate and claim exclusivity over
550  *                         clock source
551  * @clk: clock source
552  * @rate: desired clock rate in Hz
553  *
554  * This helper function allows drivers to atomically set the rate of a producer
555  * and claim exclusivity over the rate control of the producer.
556  *
557  * It is essentially a combination of clk_set_rate() and
558  * clk_rate_exclusite_get(). Caller must balance this call with a call to
559  * clk_rate_exclusive_put()
560  *
561  * Returns success (0) or negative errno.
562  */
563 int clk_set_rate_exclusive(struct clk *clk, unsigned long rate);
564 
565 /**
566  * clk_has_parent - check if a clock is a possible parent for another
567  * @clk: clock source
568  * @parent: parent clock source
569  *
570  * This function can be used in drivers that need to check that a clock can be
571  * the parent of another without actually changing the parent.
572  *
573  * Returns true if @parent is a possible parent for @clk, false otherwise.
574  */
575 bool clk_has_parent(struct clk *clk, struct clk *parent);
576 
577 /**
578  * clk_set_rate_range - set a rate range for a clock source
579  * @clk: clock source
580  * @min: desired minimum clock rate in Hz, inclusive
581  * @max: desired maximum clock rate in Hz, inclusive
582  *
583  * Returns success (0) or negative errno.
584  */
585 int clk_set_rate_range(struct clk *clk, unsigned long min, unsigned long max);
586 
587 /**
588  * clk_set_min_rate - set a minimum clock rate for a clock source
589  * @clk: clock source
590  * @rate: desired minimum clock rate in Hz, inclusive
591  *
592  * Returns success (0) or negative errno.
593  */
594 int clk_set_min_rate(struct clk *clk, unsigned long rate);
595 
596 /**
597  * clk_set_max_rate - set a maximum clock rate for a clock source
598  * @clk: clock source
599  * @rate: desired maximum clock rate in Hz, inclusive
600  *
601  * Returns success (0) or negative errno.
602  */
603 int clk_set_max_rate(struct clk *clk, unsigned long rate);
604 
605 /**
606  * clk_set_parent - set the parent clock source for this clock
607  * @clk: clock source
608  * @parent: parent clock source
609  *
610  * Returns success (0) or negative errno.
611  */
612 int clk_set_parent(struct clk *clk, struct clk *parent);
613 
614 /**
615  * clk_get_parent - get the parent clock source for this clock
616  * @clk: clock source
617  *
618  * Returns struct clk corresponding to parent clock source, or
619  * valid IS_ERR() condition containing errno.
620  */
621 struct clk *clk_get_parent(struct clk *clk);
622 
623 /**
624  * clk_get_sys - get a clock based upon the device name
625  * @dev_id: device name
626  * @con_id: connection ID
627  *
628  * Returns a struct clk corresponding to the clock producer, or
629  * valid IS_ERR() condition containing errno.  The implementation
630  * uses @dev_id and @con_id to determine the clock consumer, and
631  * thereby the clock producer. In contrast to clk_get() this function
632  * takes the device name instead of the device itself for identification.
633  *
634  * Drivers must assume that the clock source is not enabled.
635  *
636  * clk_get_sys should not be called from within interrupt context.
637  */
638 struct clk *clk_get_sys(const char *dev_id, const char *con_id);
639 
640 #else /* !CONFIG_HAVE_CLK */
641 
clk_get(struct device * dev,const char * id)642 static inline struct clk *clk_get(struct device *dev, const char *id)
643 {
644 	return NULL;
645 }
646 
clk_bulk_get(struct device * dev,int num_clks,struct clk_bulk_data * clks)647 static inline int __must_check clk_bulk_get(struct device *dev, int num_clks,
648 					    struct clk_bulk_data *clks)
649 {
650 	return 0;
651 }
652 
devm_clk_get(struct device * dev,const char * id)653 static inline struct clk *devm_clk_get(struct device *dev, const char *id)
654 {
655 	return NULL;
656 }
657 
devm_clk_bulk_get(struct device * dev,int num_clks,struct clk_bulk_data * clks)658 static inline int __must_check devm_clk_bulk_get(struct device *dev, int num_clks,
659 						 struct clk_bulk_data *clks)
660 {
661 	return 0;
662 }
663 
devm_get_clk_from_child(struct device * dev,struct device_node * np,const char * con_id)664 static inline struct clk *devm_get_clk_from_child(struct device *dev,
665 				struct device_node *np, const char *con_id)
666 {
667 	return NULL;
668 }
669 
clk_put(struct clk * clk)670 static inline void clk_put(struct clk *clk) {}
671 
clk_bulk_put(int num_clks,struct clk_bulk_data * clks)672 static inline void clk_bulk_put(int num_clks, struct clk_bulk_data *clks) {}
673 
devm_clk_put(struct device * dev,struct clk * clk)674 static inline void devm_clk_put(struct device *dev, struct clk *clk) {}
675 
clk_enable(struct clk * clk)676 static inline int clk_enable(struct clk *clk)
677 {
678 	return 0;
679 }
680 
clk_bulk_enable(int num_clks,struct clk_bulk_data * clks)681 static inline int __must_check clk_bulk_enable(int num_clks, struct clk_bulk_data *clks)
682 {
683 	return 0;
684 }
685 
clk_disable(struct clk * clk)686 static inline void clk_disable(struct clk *clk) {}
687 
688 
clk_bulk_disable(int num_clks,struct clk_bulk_data * clks)689 static inline void clk_bulk_disable(int num_clks,
690 				    struct clk_bulk_data *clks) {}
691 
clk_get_rate(struct clk * clk)692 static inline unsigned long clk_get_rate(struct clk *clk)
693 {
694 	return 0;
695 }
696 
clk_set_rate(struct clk * clk,unsigned long rate)697 static inline int clk_set_rate(struct clk *clk, unsigned long rate)
698 {
699 	return 0;
700 }
701 
clk_set_rate_exclusive(struct clk * clk,unsigned long rate)702 static inline int clk_set_rate_exclusive(struct clk *clk, unsigned long rate)
703 {
704 	return 0;
705 }
706 
clk_round_rate(struct clk * clk,unsigned long rate)707 static inline long clk_round_rate(struct clk *clk, unsigned long rate)
708 {
709 	return 0;
710 }
711 
clk_has_parent(struct clk * clk,struct clk * parent)712 static inline bool clk_has_parent(struct clk *clk, struct clk *parent)
713 {
714 	return true;
715 }
716 
clk_set_parent(struct clk * clk,struct clk * parent)717 static inline int clk_set_parent(struct clk *clk, struct clk *parent)
718 {
719 	return 0;
720 }
721 
clk_get_parent(struct clk * clk)722 static inline struct clk *clk_get_parent(struct clk *clk)
723 {
724 	return NULL;
725 }
726 
clk_get_sys(const char * dev_id,const char * con_id)727 static inline struct clk *clk_get_sys(const char *dev_id, const char *con_id)
728 {
729 	return NULL;
730 }
731 #endif
732 
733 /* clk_prepare_enable helps cases using clk_enable in non-atomic context. */
clk_prepare_enable(struct clk * clk)734 static inline int clk_prepare_enable(struct clk *clk)
735 {
736 	int ret;
737 
738 	ret = clk_prepare(clk);
739 	if (ret)
740 		return ret;
741 	ret = clk_enable(clk);
742 	if (ret)
743 		clk_unprepare(clk);
744 
745 	return ret;
746 }
747 
748 /* clk_disable_unprepare helps cases using clk_disable in non-atomic context. */
clk_disable_unprepare(struct clk * clk)749 static inline void clk_disable_unprepare(struct clk *clk)
750 {
751 	clk_disable(clk);
752 	clk_unprepare(clk);
753 }
754 
clk_bulk_prepare_enable(int num_clks,struct clk_bulk_data * clks)755 static inline int __must_check clk_bulk_prepare_enable(int num_clks,
756 					struct clk_bulk_data *clks)
757 {
758 	int ret;
759 
760 	ret = clk_bulk_prepare(num_clks, clks);
761 	if (ret)
762 		return ret;
763 	ret = clk_bulk_enable(num_clks, clks);
764 	if (ret)
765 		clk_bulk_unprepare(num_clks, clks);
766 
767 	return ret;
768 }
769 
clk_bulk_disable_unprepare(int num_clks,struct clk_bulk_data * clks)770 static inline void clk_bulk_disable_unprepare(int num_clks,
771 					      struct clk_bulk_data *clks)
772 {
773 	clk_bulk_disable(num_clks, clks);
774 	clk_bulk_unprepare(num_clks, clks);
775 }
776 
777 #if defined(CONFIG_OF) && defined(CONFIG_COMMON_CLK)
778 struct clk *of_clk_get(struct device_node *np, int index);
779 struct clk *of_clk_get_by_name(struct device_node *np, const char *name);
780 struct clk *of_clk_get_from_provider(struct of_phandle_args *clkspec);
781 #else
of_clk_get(struct device_node * np,int index)782 static inline struct clk *of_clk_get(struct device_node *np, int index)
783 {
784 	return ERR_PTR(-ENOENT);
785 }
of_clk_get_by_name(struct device_node * np,const char * name)786 static inline struct clk *of_clk_get_by_name(struct device_node *np,
787 					     const char *name)
788 {
789 	return ERR_PTR(-ENOENT);
790 }
of_clk_get_from_provider(struct of_phandle_args * clkspec)791 static inline struct clk *of_clk_get_from_provider(struct of_phandle_args *clkspec)
792 {
793 	return ERR_PTR(-ENOENT);
794 }
795 #endif
796 
797 #endif
798