1 /*
2  * V4L2 clock service
3  *
4  * Copyright (C) 2012-2013, Guennadi Liakhovetski <g.liakhovetski@gmx.de>
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 as
8  * published by the Free Software Foundation.
9  */
10 
11 #include <linux/atomic.h>
12 #include <linux/clk.h>
13 #include <linux/device.h>
14 #include <linux/errno.h>
15 #include <linux/list.h>
16 #include <linux/module.h>
17 #include <linux/mutex.h>
18 #include <linux/of.h>
19 #include <linux/slab.h>
20 #include <linux/string.h>
21 
22 #include <media/v4l2-clk.h>
23 #include <media/v4l2-subdev.h>
24 
25 static DEFINE_MUTEX(clk_lock);
26 static LIST_HEAD(clk_list);
27 
v4l2_clk_find(const char * dev_id)28 static struct v4l2_clk *v4l2_clk_find(const char *dev_id)
29 {
30 	struct v4l2_clk *clk;
31 
32 	list_for_each_entry(clk, &clk_list, list)
33 		if (!strcmp(dev_id, clk->dev_id))
34 			return clk;
35 
36 	return ERR_PTR(-ENODEV);
37 }
38 
v4l2_clk_get(struct device * dev,const char * id)39 struct v4l2_clk *v4l2_clk_get(struct device *dev, const char *id)
40 {
41 	struct v4l2_clk *clk;
42 	struct clk *ccf_clk = clk_get(dev, id);
43 	char clk_name[V4L2_CLK_NAME_SIZE];
44 
45 	if (PTR_ERR(ccf_clk) == -EPROBE_DEFER)
46 		return ERR_PTR(-EPROBE_DEFER);
47 
48 	if (!IS_ERR_OR_NULL(ccf_clk)) {
49 		clk = kzalloc(sizeof(*clk), GFP_KERNEL);
50 		if (!clk) {
51 			clk_put(ccf_clk);
52 			return ERR_PTR(-ENOMEM);
53 		}
54 		clk->clk = ccf_clk;
55 
56 		return clk;
57 	}
58 
59 	mutex_lock(&clk_lock);
60 	clk = v4l2_clk_find(dev_name(dev));
61 
62 	/* if dev_name is not found, try use the OF name to find again  */
63 	if (PTR_ERR(clk) == -ENODEV && dev->of_node) {
64 		v4l2_clk_name_of(clk_name, sizeof(clk_name), dev->of_node);
65 		clk = v4l2_clk_find(clk_name);
66 	}
67 
68 	if (!IS_ERR(clk))
69 		atomic_inc(&clk->use_count);
70 	mutex_unlock(&clk_lock);
71 
72 	return clk;
73 }
74 EXPORT_SYMBOL(v4l2_clk_get);
75 
v4l2_clk_put(struct v4l2_clk * clk)76 void v4l2_clk_put(struct v4l2_clk *clk)
77 {
78 	struct v4l2_clk *tmp;
79 
80 	if (IS_ERR(clk))
81 		return;
82 
83 	if (clk->clk) {
84 		clk_put(clk->clk);
85 		kfree(clk);
86 		return;
87 	}
88 
89 	mutex_lock(&clk_lock);
90 
91 	list_for_each_entry(tmp, &clk_list, list)
92 		if (tmp == clk)
93 			atomic_dec(&clk->use_count);
94 
95 	mutex_unlock(&clk_lock);
96 }
97 EXPORT_SYMBOL(v4l2_clk_put);
98 
v4l2_clk_lock_driver(struct v4l2_clk * clk)99 static int v4l2_clk_lock_driver(struct v4l2_clk *clk)
100 {
101 	struct v4l2_clk *tmp;
102 	int ret = -ENODEV;
103 
104 	mutex_lock(&clk_lock);
105 
106 	list_for_each_entry(tmp, &clk_list, list)
107 		if (tmp == clk) {
108 			ret = !try_module_get(clk->ops->owner);
109 			if (ret)
110 				ret = -EFAULT;
111 			break;
112 		}
113 
114 	mutex_unlock(&clk_lock);
115 
116 	return ret;
117 }
118 
v4l2_clk_unlock_driver(struct v4l2_clk * clk)119 static void v4l2_clk_unlock_driver(struct v4l2_clk *clk)
120 {
121 	module_put(clk->ops->owner);
122 }
123 
v4l2_clk_enable(struct v4l2_clk * clk)124 int v4l2_clk_enable(struct v4l2_clk *clk)
125 {
126 	int ret;
127 
128 	if (clk->clk)
129 		return clk_prepare_enable(clk->clk);
130 
131 	ret = v4l2_clk_lock_driver(clk);
132 	if (ret < 0)
133 		return ret;
134 
135 	mutex_lock(&clk->lock);
136 
137 	if (++clk->enable == 1 && clk->ops->enable) {
138 		ret = clk->ops->enable(clk);
139 		if (ret < 0)
140 			clk->enable--;
141 	}
142 
143 	mutex_unlock(&clk->lock);
144 
145 	return ret;
146 }
147 EXPORT_SYMBOL(v4l2_clk_enable);
148 
149 /*
150  * You might Oops if you try to disabled a disabled clock, because then the
151  * driver isn't locked and could have been unloaded by now, so, don't do that
152  */
v4l2_clk_disable(struct v4l2_clk * clk)153 void v4l2_clk_disable(struct v4l2_clk *clk)
154 {
155 	int enable;
156 
157 	if (clk->clk)
158 		return clk_disable_unprepare(clk->clk);
159 
160 	mutex_lock(&clk->lock);
161 
162 	enable = --clk->enable;
163 	if (WARN(enable < 0, "Unbalanced %s() on %s!\n", __func__,
164 		 clk->dev_id))
165 		clk->enable++;
166 	else if (!enable && clk->ops->disable)
167 		clk->ops->disable(clk);
168 
169 	mutex_unlock(&clk->lock);
170 
171 	v4l2_clk_unlock_driver(clk);
172 }
173 EXPORT_SYMBOL(v4l2_clk_disable);
174 
v4l2_clk_get_rate(struct v4l2_clk * clk)175 unsigned long v4l2_clk_get_rate(struct v4l2_clk *clk)
176 {
177 	int ret;
178 
179 	if (clk->clk)
180 		return clk_get_rate(clk->clk);
181 
182 	ret = v4l2_clk_lock_driver(clk);
183 	if (ret < 0)
184 		return ret;
185 
186 	mutex_lock(&clk->lock);
187 	if (!clk->ops->get_rate)
188 		ret = -ENOSYS;
189 	else
190 		ret = clk->ops->get_rate(clk);
191 	mutex_unlock(&clk->lock);
192 
193 	v4l2_clk_unlock_driver(clk);
194 
195 	return ret;
196 }
197 EXPORT_SYMBOL(v4l2_clk_get_rate);
198 
v4l2_clk_set_rate(struct v4l2_clk * clk,unsigned long rate)199 int v4l2_clk_set_rate(struct v4l2_clk *clk, unsigned long rate)
200 {
201 	int ret;
202 
203 	if (clk->clk) {
204 		long r = clk_round_rate(clk->clk, rate);
205 		if (r < 0)
206 			return r;
207 		return clk_set_rate(clk->clk, r);
208 	}
209 
210 	ret = v4l2_clk_lock_driver(clk);
211 
212 	if (ret < 0)
213 		return ret;
214 
215 	mutex_lock(&clk->lock);
216 	if (!clk->ops->set_rate)
217 		ret = -ENOSYS;
218 	else
219 		ret = clk->ops->set_rate(clk, rate);
220 	mutex_unlock(&clk->lock);
221 
222 	v4l2_clk_unlock_driver(clk);
223 
224 	return ret;
225 }
226 EXPORT_SYMBOL(v4l2_clk_set_rate);
227 
v4l2_clk_register(const struct v4l2_clk_ops * ops,const char * dev_id,void * priv)228 struct v4l2_clk *v4l2_clk_register(const struct v4l2_clk_ops *ops,
229 				   const char *dev_id,
230 				   void *priv)
231 {
232 	struct v4l2_clk *clk;
233 	int ret;
234 
235 	if (!ops || !dev_id)
236 		return ERR_PTR(-EINVAL);
237 
238 	clk = kzalloc(sizeof(struct v4l2_clk), GFP_KERNEL);
239 	if (!clk)
240 		return ERR_PTR(-ENOMEM);
241 
242 	clk->dev_id = kstrdup(dev_id, GFP_KERNEL);
243 	if (!clk->dev_id) {
244 		ret = -ENOMEM;
245 		goto ealloc;
246 	}
247 	clk->ops = ops;
248 	clk->priv = priv;
249 	atomic_set(&clk->use_count, 0);
250 	mutex_init(&clk->lock);
251 
252 	mutex_lock(&clk_lock);
253 	if (!IS_ERR(v4l2_clk_find(dev_id))) {
254 		mutex_unlock(&clk_lock);
255 		ret = -EEXIST;
256 		goto eexist;
257 	}
258 	list_add_tail(&clk->list, &clk_list);
259 	mutex_unlock(&clk_lock);
260 
261 	return clk;
262 
263 eexist:
264 ealloc:
265 	kfree(clk->dev_id);
266 	kfree(clk);
267 	return ERR_PTR(ret);
268 }
269 EXPORT_SYMBOL(v4l2_clk_register);
270 
v4l2_clk_unregister(struct v4l2_clk * clk)271 void v4l2_clk_unregister(struct v4l2_clk *clk)
272 {
273 	if (WARN(atomic_read(&clk->use_count),
274 		 "%s(): Refusing to unregister ref-counted %s clock!\n",
275 		 __func__, clk->dev_id))
276 		return;
277 
278 	mutex_lock(&clk_lock);
279 	list_del(&clk->list);
280 	mutex_unlock(&clk_lock);
281 
282 	kfree(clk->dev_id);
283 	kfree(clk);
284 }
285 EXPORT_SYMBOL(v4l2_clk_unregister);
286 
287 struct v4l2_clk_fixed {
288 	unsigned long rate;
289 	struct v4l2_clk_ops ops;
290 };
291 
fixed_get_rate(struct v4l2_clk * clk)292 static unsigned long fixed_get_rate(struct v4l2_clk *clk)
293 {
294 	struct v4l2_clk_fixed *priv = clk->priv;
295 	return priv->rate;
296 }
297 
__v4l2_clk_register_fixed(const char * dev_id,unsigned long rate,struct module * owner)298 struct v4l2_clk *__v4l2_clk_register_fixed(const char *dev_id,
299 				unsigned long rate, struct module *owner)
300 {
301 	struct v4l2_clk *clk;
302 	struct v4l2_clk_fixed *priv = kzalloc(sizeof(*priv), GFP_KERNEL);
303 
304 	if (!priv)
305 		return ERR_PTR(-ENOMEM);
306 
307 	priv->rate = rate;
308 	priv->ops.get_rate = fixed_get_rate;
309 	priv->ops.owner = owner;
310 
311 	clk = v4l2_clk_register(&priv->ops, dev_id, priv);
312 	if (IS_ERR(clk))
313 		kfree(priv);
314 
315 	return clk;
316 }
317 EXPORT_SYMBOL(__v4l2_clk_register_fixed);
318 
v4l2_clk_unregister_fixed(struct v4l2_clk * clk)319 void v4l2_clk_unregister_fixed(struct v4l2_clk *clk)
320 {
321 	kfree(clk->priv);
322 	v4l2_clk_unregister(clk);
323 }
324 EXPORT_SYMBOL(v4l2_clk_unregister_fixed);
325