1 /*
2  * Memory-to-memory device framework for Video for Linux 2 and videobuf.
3  *
4  * Helper functions for devices that use videobuf buffers for both their
5  * source and destination.
6  *
7  * Copyright (c) 2009-2010 Samsung Electronics Co., Ltd.
8  * Pawel Osciak, <pawel@osciak.com>
9  * Marek Szyprowski, <m.szyprowski@samsung.com>
10  *
11  * This program is free software; you can redistribute it and/or modify
12  * it under the terms of the GNU General Public License as published by the
13  * Free Software Foundation; either version 2 of the License, or (at your
14  * option) any later version.
15  */
16 #include <linux/module.h>
17 #include <linux/sched.h>
18 #include <linux/slab.h>
19 
20 #include <media/media-device.h>
21 #include <media/videobuf2-v4l2.h>
22 #include <media/v4l2-mem2mem.h>
23 #include <media/v4l2-dev.h>
24 #include <media/v4l2-device.h>
25 #include <media/v4l2-fh.h>
26 #include <media/v4l2-event.h>
27 
28 MODULE_DESCRIPTION("Mem to mem device framework for videobuf");
29 MODULE_AUTHOR("Pawel Osciak, <pawel@osciak.com>");
30 MODULE_LICENSE("GPL");
31 
32 static bool debug;
33 module_param(debug, bool, 0644);
34 
35 #define dprintk(fmt, arg...)						\
36 	do {								\
37 		if (debug)						\
38 			printk(KERN_DEBUG "%s: " fmt, __func__, ## arg);\
39 	} while (0)
40 
41 
42 /* Instance is already queued on the job_queue */
43 #define TRANS_QUEUED		(1 << 0)
44 /* Instance is currently running in hardware */
45 #define TRANS_RUNNING		(1 << 1)
46 /* Instance is currently aborting */
47 #define TRANS_ABORT		(1 << 2)
48 
49 
50 /* Offset base for buffers on the destination queue - used to distinguish
51  * between source and destination buffers when mmapping - they receive the same
52  * offsets but for different queues */
53 #define DST_QUEUE_OFF_BASE	(1 << 30)
54 
55 enum v4l2_m2m_entity_type {
56 	MEM2MEM_ENT_TYPE_SOURCE,
57 	MEM2MEM_ENT_TYPE_SINK,
58 	MEM2MEM_ENT_TYPE_PROC
59 };
60 
61 static const char * const m2m_entity_name[] = {
62 	"source",
63 	"sink",
64 	"proc"
65 };
66 
67 /**
68  * struct v4l2_m2m_dev - per-device context
69  * @source:		&struct media_entity pointer with the source entity
70  *			Used only when the M2M device is registered via
71  *			v4l2_m2m_unregister_media_controller().
72  * @source_pad:		&struct media_pad with the source pad.
73  *			Used only when the M2M device is registered via
74  *			v4l2_m2m_unregister_media_controller().
75  * @sink:		&struct media_entity pointer with the sink entity
76  *			Used only when the M2M device is registered via
77  *			v4l2_m2m_unregister_media_controller().
78  * @sink_pad:		&struct media_pad with the sink pad.
79  *			Used only when the M2M device is registered via
80  *			v4l2_m2m_unregister_media_controller().
81  * @proc:		&struct media_entity pointer with the M2M device itself.
82  * @proc_pads:		&struct media_pad with the @proc pads.
83  *			Used only when the M2M device is registered via
84  *			v4l2_m2m_unregister_media_controller().
85  * @intf_devnode:	&struct media_intf devnode pointer with the interface
86  *			with controls the M2M device.
87  * @curr_ctx:		currently running instance
88  * @job_queue:		instances queued to run
89  * @job_spinlock:	protects job_queue
90  * @m2m_ops:		driver callbacks
91  */
92 struct v4l2_m2m_dev {
93 	struct v4l2_m2m_ctx	*curr_ctx;
94 #ifdef CONFIG_MEDIA_CONTROLLER
95 	struct media_entity	*source;
96 	struct media_pad	source_pad;
97 	struct media_entity	sink;
98 	struct media_pad	sink_pad;
99 	struct media_entity	proc;
100 	struct media_pad	proc_pads[2];
101 	struct media_intf_devnode *intf_devnode;
102 #endif
103 
104 	struct list_head	job_queue;
105 	spinlock_t		job_spinlock;
106 
107 	const struct v4l2_m2m_ops *m2m_ops;
108 };
109 
get_queue_ctx(struct v4l2_m2m_ctx * m2m_ctx,enum v4l2_buf_type type)110 static struct v4l2_m2m_queue_ctx *get_queue_ctx(struct v4l2_m2m_ctx *m2m_ctx,
111 						enum v4l2_buf_type type)
112 {
113 	if (V4L2_TYPE_IS_OUTPUT(type))
114 		return &m2m_ctx->out_q_ctx;
115 	else
116 		return &m2m_ctx->cap_q_ctx;
117 }
118 
v4l2_m2m_get_vq(struct v4l2_m2m_ctx * m2m_ctx,enum v4l2_buf_type type)119 struct vb2_queue *v4l2_m2m_get_vq(struct v4l2_m2m_ctx *m2m_ctx,
120 				       enum v4l2_buf_type type)
121 {
122 	struct v4l2_m2m_queue_ctx *q_ctx;
123 
124 	q_ctx = get_queue_ctx(m2m_ctx, type);
125 	if (!q_ctx)
126 		return NULL;
127 
128 	return &q_ctx->q;
129 }
130 EXPORT_SYMBOL(v4l2_m2m_get_vq);
131 
v4l2_m2m_next_buf(struct v4l2_m2m_queue_ctx * q_ctx)132 void *v4l2_m2m_next_buf(struct v4l2_m2m_queue_ctx *q_ctx)
133 {
134 	struct v4l2_m2m_buffer *b;
135 	unsigned long flags;
136 
137 	spin_lock_irqsave(&q_ctx->rdy_spinlock, flags);
138 
139 	if (list_empty(&q_ctx->rdy_queue)) {
140 		spin_unlock_irqrestore(&q_ctx->rdy_spinlock, flags);
141 		return NULL;
142 	}
143 
144 	b = list_first_entry(&q_ctx->rdy_queue, struct v4l2_m2m_buffer, list);
145 	spin_unlock_irqrestore(&q_ctx->rdy_spinlock, flags);
146 	return &b->vb;
147 }
148 EXPORT_SYMBOL_GPL(v4l2_m2m_next_buf);
149 
v4l2_m2m_last_buf(struct v4l2_m2m_queue_ctx * q_ctx)150 void *v4l2_m2m_last_buf(struct v4l2_m2m_queue_ctx *q_ctx)
151 {
152 	struct v4l2_m2m_buffer *b;
153 	unsigned long flags;
154 
155 	spin_lock_irqsave(&q_ctx->rdy_spinlock, flags);
156 
157 	if (list_empty(&q_ctx->rdy_queue)) {
158 		spin_unlock_irqrestore(&q_ctx->rdy_spinlock, flags);
159 		return NULL;
160 	}
161 
162 	b = list_last_entry(&q_ctx->rdy_queue, struct v4l2_m2m_buffer, list);
163 	spin_unlock_irqrestore(&q_ctx->rdy_spinlock, flags);
164 	return &b->vb;
165 }
166 EXPORT_SYMBOL_GPL(v4l2_m2m_last_buf);
167 
v4l2_m2m_buf_remove(struct v4l2_m2m_queue_ctx * q_ctx)168 void *v4l2_m2m_buf_remove(struct v4l2_m2m_queue_ctx *q_ctx)
169 {
170 	struct v4l2_m2m_buffer *b;
171 	unsigned long flags;
172 
173 	spin_lock_irqsave(&q_ctx->rdy_spinlock, flags);
174 	if (list_empty(&q_ctx->rdy_queue)) {
175 		spin_unlock_irqrestore(&q_ctx->rdy_spinlock, flags);
176 		return NULL;
177 	}
178 	b = list_first_entry(&q_ctx->rdy_queue, struct v4l2_m2m_buffer, list);
179 	list_del(&b->list);
180 	q_ctx->num_rdy--;
181 	spin_unlock_irqrestore(&q_ctx->rdy_spinlock, flags);
182 
183 	return &b->vb;
184 }
185 EXPORT_SYMBOL_GPL(v4l2_m2m_buf_remove);
186 
v4l2_m2m_buf_remove_by_buf(struct v4l2_m2m_queue_ctx * q_ctx,struct vb2_v4l2_buffer * vbuf)187 void v4l2_m2m_buf_remove_by_buf(struct v4l2_m2m_queue_ctx *q_ctx,
188 				struct vb2_v4l2_buffer *vbuf)
189 {
190 	struct v4l2_m2m_buffer *b;
191 	unsigned long flags;
192 
193 	spin_lock_irqsave(&q_ctx->rdy_spinlock, flags);
194 	b = container_of(vbuf, struct v4l2_m2m_buffer, vb);
195 	list_del(&b->list);
196 	q_ctx->num_rdy--;
197 	spin_unlock_irqrestore(&q_ctx->rdy_spinlock, flags);
198 }
199 EXPORT_SYMBOL_GPL(v4l2_m2m_buf_remove_by_buf);
200 
201 struct vb2_v4l2_buffer *
v4l2_m2m_buf_remove_by_idx(struct v4l2_m2m_queue_ctx * q_ctx,unsigned int idx)202 v4l2_m2m_buf_remove_by_idx(struct v4l2_m2m_queue_ctx *q_ctx, unsigned int idx)
203 
204 {
205 	struct v4l2_m2m_buffer *b, *tmp;
206 	struct vb2_v4l2_buffer *ret = NULL;
207 	unsigned long flags;
208 
209 	spin_lock_irqsave(&q_ctx->rdy_spinlock, flags);
210 	list_for_each_entry_safe(b, tmp, &q_ctx->rdy_queue, list) {
211 		if (b->vb.vb2_buf.index == idx) {
212 			list_del(&b->list);
213 			q_ctx->num_rdy--;
214 			ret = &b->vb;
215 			break;
216 		}
217 	}
218 	spin_unlock_irqrestore(&q_ctx->rdy_spinlock, flags);
219 
220 	return ret;
221 }
222 EXPORT_SYMBOL_GPL(v4l2_m2m_buf_remove_by_idx);
223 
224 /*
225  * Scheduling handlers
226  */
227 
v4l2_m2m_get_curr_priv(struct v4l2_m2m_dev * m2m_dev)228 void *v4l2_m2m_get_curr_priv(struct v4l2_m2m_dev *m2m_dev)
229 {
230 	unsigned long flags;
231 	void *ret = NULL;
232 
233 	spin_lock_irqsave(&m2m_dev->job_spinlock, flags);
234 	if (m2m_dev->curr_ctx)
235 		ret = m2m_dev->curr_ctx->priv;
236 	spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
237 
238 	return ret;
239 }
240 EXPORT_SYMBOL(v4l2_m2m_get_curr_priv);
241 
242 /**
243  * v4l2_m2m_try_run() - select next job to perform and run it if possible
244  * @m2m_dev: per-device context
245  *
246  * Get next transaction (if present) from the waiting jobs list and run it.
247  */
v4l2_m2m_try_run(struct v4l2_m2m_dev * m2m_dev)248 static void v4l2_m2m_try_run(struct v4l2_m2m_dev *m2m_dev)
249 {
250 	unsigned long flags;
251 
252 	spin_lock_irqsave(&m2m_dev->job_spinlock, flags);
253 	if (NULL != m2m_dev->curr_ctx) {
254 		spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
255 		dprintk("Another instance is running, won't run now\n");
256 		return;
257 	}
258 
259 	if (list_empty(&m2m_dev->job_queue)) {
260 		spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
261 		dprintk("No job pending\n");
262 		return;
263 	}
264 
265 	m2m_dev->curr_ctx = list_first_entry(&m2m_dev->job_queue,
266 				   struct v4l2_m2m_ctx, queue);
267 	m2m_dev->curr_ctx->job_flags |= TRANS_RUNNING;
268 	spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
269 
270 	dprintk("Running job on m2m_ctx: %p\n", m2m_dev->curr_ctx);
271 	m2m_dev->m2m_ops->device_run(m2m_dev->curr_ctx->priv);
272 }
273 
274 /*
275  * __v4l2_m2m_try_queue() - queue a job
276  * @m2m_dev: m2m device
277  * @m2m_ctx: m2m context
278  *
279  * Check if this context is ready to queue a job.
280  *
281  * This function can run in interrupt context.
282  */
__v4l2_m2m_try_queue(struct v4l2_m2m_dev * m2m_dev,struct v4l2_m2m_ctx * m2m_ctx)283 static void __v4l2_m2m_try_queue(struct v4l2_m2m_dev *m2m_dev,
284 				 struct v4l2_m2m_ctx *m2m_ctx)
285 {
286 	unsigned long flags_job, flags_out, flags_cap;
287 
288 	dprintk("Trying to schedule a job for m2m_ctx: %p\n", m2m_ctx);
289 
290 	if (!m2m_ctx->out_q_ctx.q.streaming
291 	    || !m2m_ctx->cap_q_ctx.q.streaming) {
292 		dprintk("Streaming needs to be on for both queues\n");
293 		return;
294 	}
295 
296 	spin_lock_irqsave(&m2m_dev->job_spinlock, flags_job);
297 
298 	/* If the context is aborted then don't schedule it */
299 	if (m2m_ctx->job_flags & TRANS_ABORT) {
300 		spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags_job);
301 		dprintk("Aborted context\n");
302 		return;
303 	}
304 
305 	if (m2m_ctx->job_flags & TRANS_QUEUED) {
306 		spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags_job);
307 		dprintk("On job queue already\n");
308 		return;
309 	}
310 
311 	spin_lock_irqsave(&m2m_ctx->out_q_ctx.rdy_spinlock, flags_out);
312 	if (list_empty(&m2m_ctx->out_q_ctx.rdy_queue)
313 	    && !m2m_ctx->out_q_ctx.buffered) {
314 		spin_unlock_irqrestore(&m2m_ctx->out_q_ctx.rdy_spinlock,
315 					flags_out);
316 		spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags_job);
317 		dprintk("No input buffers available\n");
318 		return;
319 	}
320 	spin_lock_irqsave(&m2m_ctx->cap_q_ctx.rdy_spinlock, flags_cap);
321 	if (list_empty(&m2m_ctx->cap_q_ctx.rdy_queue)
322 	    && !m2m_ctx->cap_q_ctx.buffered) {
323 		spin_unlock_irqrestore(&m2m_ctx->cap_q_ctx.rdy_spinlock,
324 					flags_cap);
325 		spin_unlock_irqrestore(&m2m_ctx->out_q_ctx.rdy_spinlock,
326 					flags_out);
327 		spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags_job);
328 		dprintk("No output buffers available\n");
329 		return;
330 	}
331 	spin_unlock_irqrestore(&m2m_ctx->cap_q_ctx.rdy_spinlock, flags_cap);
332 	spin_unlock_irqrestore(&m2m_ctx->out_q_ctx.rdy_spinlock, flags_out);
333 
334 	if (m2m_dev->m2m_ops->job_ready
335 		&& (!m2m_dev->m2m_ops->job_ready(m2m_ctx->priv))) {
336 		spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags_job);
337 		dprintk("Driver not ready\n");
338 		return;
339 	}
340 
341 	list_add_tail(&m2m_ctx->queue, &m2m_dev->job_queue);
342 	m2m_ctx->job_flags |= TRANS_QUEUED;
343 
344 	spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags_job);
345 }
346 
347 /**
348  * v4l2_m2m_try_schedule() - schedule and possibly run a job for any context
349  * @m2m_ctx: m2m context
350  *
351  * Check if this context is ready to queue a job. If suitable,
352  * run the next queued job on the mem2mem device.
353  *
354  * This function shouldn't run in interrupt context.
355  *
356  * Note that v4l2_m2m_try_schedule() can schedule one job for this context,
357  * and then run another job for another context.
358  */
v4l2_m2m_try_schedule(struct v4l2_m2m_ctx * m2m_ctx)359 void v4l2_m2m_try_schedule(struct v4l2_m2m_ctx *m2m_ctx)
360 {
361 	struct v4l2_m2m_dev *m2m_dev = m2m_ctx->m2m_dev;
362 
363 	__v4l2_m2m_try_queue(m2m_dev, m2m_ctx);
364 	v4l2_m2m_try_run(m2m_dev);
365 }
366 EXPORT_SYMBOL_GPL(v4l2_m2m_try_schedule);
367 
368 /**
369  * v4l2_m2m_cancel_job() - cancel pending jobs for the context
370  * @m2m_ctx: m2m context with jobs to be canceled
371  *
372  * In case of streamoff or release called on any context,
373  * 1] If the context is currently running, then abort job will be called
374  * 2] If the context is queued, then the context will be removed from
375  *    the job_queue
376  */
v4l2_m2m_cancel_job(struct v4l2_m2m_ctx * m2m_ctx)377 static void v4l2_m2m_cancel_job(struct v4l2_m2m_ctx *m2m_ctx)
378 {
379 	struct v4l2_m2m_dev *m2m_dev;
380 	unsigned long flags;
381 
382 	m2m_dev = m2m_ctx->m2m_dev;
383 	spin_lock_irqsave(&m2m_dev->job_spinlock, flags);
384 
385 	m2m_ctx->job_flags |= TRANS_ABORT;
386 	if (m2m_ctx->job_flags & TRANS_RUNNING) {
387 		spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
388 		if (m2m_dev->m2m_ops->job_abort)
389 			m2m_dev->m2m_ops->job_abort(m2m_ctx->priv);
390 		dprintk("m2m_ctx %p running, will wait to complete", m2m_ctx);
391 		wait_event(m2m_ctx->finished,
392 				!(m2m_ctx->job_flags & TRANS_RUNNING));
393 	} else if (m2m_ctx->job_flags & TRANS_QUEUED) {
394 		list_del(&m2m_ctx->queue);
395 		m2m_ctx->job_flags &= ~(TRANS_QUEUED | TRANS_RUNNING);
396 		spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
397 		dprintk("m2m_ctx: %p had been on queue and was removed\n",
398 			m2m_ctx);
399 	} else {
400 		/* Do nothing, was not on queue/running */
401 		spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
402 	}
403 }
404 
v4l2_m2m_job_finish(struct v4l2_m2m_dev * m2m_dev,struct v4l2_m2m_ctx * m2m_ctx)405 void v4l2_m2m_job_finish(struct v4l2_m2m_dev *m2m_dev,
406 			 struct v4l2_m2m_ctx *m2m_ctx)
407 {
408 	unsigned long flags;
409 
410 	spin_lock_irqsave(&m2m_dev->job_spinlock, flags);
411 	if (!m2m_dev->curr_ctx || m2m_dev->curr_ctx != m2m_ctx) {
412 		spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
413 		dprintk("Called by an instance not currently running\n");
414 		return;
415 	}
416 
417 	list_del(&m2m_dev->curr_ctx->queue);
418 	m2m_dev->curr_ctx->job_flags &= ~(TRANS_QUEUED | TRANS_RUNNING);
419 	wake_up(&m2m_dev->curr_ctx->finished);
420 	m2m_dev->curr_ctx = NULL;
421 
422 	spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
423 
424 	/* This instance might have more buffers ready, but since we do not
425 	 * allow more than one job on the job_queue per instance, each has
426 	 * to be scheduled separately after the previous one finishes. */
427 	v4l2_m2m_try_schedule(m2m_ctx);
428 }
429 EXPORT_SYMBOL(v4l2_m2m_job_finish);
430 
v4l2_m2m_reqbufs(struct file * file,struct v4l2_m2m_ctx * m2m_ctx,struct v4l2_requestbuffers * reqbufs)431 int v4l2_m2m_reqbufs(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
432 		     struct v4l2_requestbuffers *reqbufs)
433 {
434 	struct vb2_queue *vq;
435 	int ret;
436 
437 	vq = v4l2_m2m_get_vq(m2m_ctx, reqbufs->type);
438 	ret = vb2_reqbufs(vq, reqbufs);
439 	/* If count == 0, then the owner has released all buffers and he
440 	   is no longer owner of the queue. Otherwise we have an owner. */
441 	if (ret == 0)
442 		vq->owner = reqbufs->count ? file->private_data : NULL;
443 
444 	return ret;
445 }
446 EXPORT_SYMBOL_GPL(v4l2_m2m_reqbufs);
447 
v4l2_m2m_adjust_mem_offset(struct vb2_queue * vq,struct v4l2_buffer * buf)448 static void v4l2_m2m_adjust_mem_offset(struct vb2_queue *vq,
449 				       struct v4l2_buffer *buf)
450 {
451 	/* Adjust MMAP memory offsets for the CAPTURE queue */
452 	if (buf->memory == V4L2_MEMORY_MMAP && !V4L2_TYPE_IS_OUTPUT(vq->type)) {
453 		if (V4L2_TYPE_IS_MULTIPLANAR(vq->type)) {
454 			unsigned int i;
455 
456 			for (i = 0; i < buf->length; ++i)
457 				buf->m.planes[i].m.mem_offset
458 					+= DST_QUEUE_OFF_BASE;
459 		} else {
460 			buf->m.offset += DST_QUEUE_OFF_BASE;
461 		}
462 	}
463 }
464 
v4l2_m2m_querybuf(struct file * file,struct v4l2_m2m_ctx * m2m_ctx,struct v4l2_buffer * buf)465 int v4l2_m2m_querybuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
466 		      struct v4l2_buffer *buf)
467 {
468 	struct vb2_queue *vq;
469 	int ret;
470 
471 	vq = v4l2_m2m_get_vq(m2m_ctx, buf->type);
472 	ret = vb2_querybuf(vq, buf);
473 	if (ret)
474 		return ret;
475 
476 	/* Adjust MMAP memory offsets for the CAPTURE queue */
477 	v4l2_m2m_adjust_mem_offset(vq, buf);
478 
479 	return 0;
480 }
481 EXPORT_SYMBOL_GPL(v4l2_m2m_querybuf);
482 
v4l2_m2m_qbuf(struct file * file,struct v4l2_m2m_ctx * m2m_ctx,struct v4l2_buffer * buf)483 int v4l2_m2m_qbuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
484 		  struct v4l2_buffer *buf)
485 {
486 	struct vb2_queue *vq;
487 	int ret;
488 
489 	vq = v4l2_m2m_get_vq(m2m_ctx, buf->type);
490 	ret = vb2_qbuf(vq, buf);
491 	if (ret)
492 		return ret;
493 
494 	/* Adjust MMAP memory offsets for the CAPTURE queue */
495 	v4l2_m2m_adjust_mem_offset(vq, buf);
496 
497 	v4l2_m2m_try_schedule(m2m_ctx);
498 
499 	return 0;
500 }
501 EXPORT_SYMBOL_GPL(v4l2_m2m_qbuf);
502 
v4l2_m2m_dqbuf(struct file * file,struct v4l2_m2m_ctx * m2m_ctx,struct v4l2_buffer * buf)503 int v4l2_m2m_dqbuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
504 		   struct v4l2_buffer *buf)
505 {
506 	struct vb2_queue *vq;
507 	int ret;
508 
509 	vq = v4l2_m2m_get_vq(m2m_ctx, buf->type);
510 	ret = vb2_dqbuf(vq, buf, file->f_flags & O_NONBLOCK);
511 	if (ret)
512 		return ret;
513 
514 	/* Adjust MMAP memory offsets for the CAPTURE queue */
515 	v4l2_m2m_adjust_mem_offset(vq, buf);
516 
517 	return 0;
518 }
519 EXPORT_SYMBOL_GPL(v4l2_m2m_dqbuf);
520 
v4l2_m2m_prepare_buf(struct file * file,struct v4l2_m2m_ctx * m2m_ctx,struct v4l2_buffer * buf)521 int v4l2_m2m_prepare_buf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
522 			 struct v4l2_buffer *buf)
523 {
524 	struct vb2_queue *vq;
525 	int ret;
526 
527 	vq = v4l2_m2m_get_vq(m2m_ctx, buf->type);
528 	ret = vb2_prepare_buf(vq, buf);
529 	if (ret)
530 		return ret;
531 
532 	/* Adjust MMAP memory offsets for the CAPTURE queue */
533 	v4l2_m2m_adjust_mem_offset(vq, buf);
534 
535 	v4l2_m2m_try_schedule(m2m_ctx);
536 
537 	return 0;
538 }
539 EXPORT_SYMBOL_GPL(v4l2_m2m_prepare_buf);
540 
v4l2_m2m_create_bufs(struct file * file,struct v4l2_m2m_ctx * m2m_ctx,struct v4l2_create_buffers * create)541 int v4l2_m2m_create_bufs(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
542 			 struct v4l2_create_buffers *create)
543 {
544 	struct vb2_queue *vq;
545 
546 	vq = v4l2_m2m_get_vq(m2m_ctx, create->format.type);
547 	return vb2_create_bufs(vq, create);
548 }
549 EXPORT_SYMBOL_GPL(v4l2_m2m_create_bufs);
550 
v4l2_m2m_expbuf(struct file * file,struct v4l2_m2m_ctx * m2m_ctx,struct v4l2_exportbuffer * eb)551 int v4l2_m2m_expbuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
552 		  struct v4l2_exportbuffer *eb)
553 {
554 	struct vb2_queue *vq;
555 
556 	vq = v4l2_m2m_get_vq(m2m_ctx, eb->type);
557 	return vb2_expbuf(vq, eb);
558 }
559 EXPORT_SYMBOL_GPL(v4l2_m2m_expbuf);
560 
v4l2_m2m_streamon(struct file * file,struct v4l2_m2m_ctx * m2m_ctx,enum v4l2_buf_type type)561 int v4l2_m2m_streamon(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
562 		      enum v4l2_buf_type type)
563 {
564 	struct vb2_queue *vq;
565 	int ret;
566 
567 	vq = v4l2_m2m_get_vq(m2m_ctx, type);
568 	ret = vb2_streamon(vq, type);
569 	if (!ret)
570 		v4l2_m2m_try_schedule(m2m_ctx);
571 
572 	return ret;
573 }
574 EXPORT_SYMBOL_GPL(v4l2_m2m_streamon);
575 
v4l2_m2m_streamoff(struct file * file,struct v4l2_m2m_ctx * m2m_ctx,enum v4l2_buf_type type)576 int v4l2_m2m_streamoff(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
577 		       enum v4l2_buf_type type)
578 {
579 	struct v4l2_m2m_dev *m2m_dev;
580 	struct v4l2_m2m_queue_ctx *q_ctx;
581 	unsigned long flags_job, flags;
582 	int ret;
583 
584 	/* wait until the current context is dequeued from job_queue */
585 	v4l2_m2m_cancel_job(m2m_ctx);
586 
587 	q_ctx = get_queue_ctx(m2m_ctx, type);
588 	ret = vb2_streamoff(&q_ctx->q, type);
589 	if (ret)
590 		return ret;
591 
592 	m2m_dev = m2m_ctx->m2m_dev;
593 	spin_lock_irqsave(&m2m_dev->job_spinlock, flags_job);
594 	/* We should not be scheduled anymore, since we're dropping a queue. */
595 	if (m2m_ctx->job_flags & TRANS_QUEUED)
596 		list_del(&m2m_ctx->queue);
597 	m2m_ctx->job_flags = 0;
598 
599 	spin_lock_irqsave(&q_ctx->rdy_spinlock, flags);
600 	/* Drop queue, since streamoff returns device to the same state as after
601 	 * calling reqbufs. */
602 	INIT_LIST_HEAD(&q_ctx->rdy_queue);
603 	q_ctx->num_rdy = 0;
604 	spin_unlock_irqrestore(&q_ctx->rdy_spinlock, flags);
605 
606 	if (m2m_dev->curr_ctx == m2m_ctx) {
607 		m2m_dev->curr_ctx = NULL;
608 		wake_up(&m2m_ctx->finished);
609 	}
610 	spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags_job);
611 
612 	return 0;
613 }
614 EXPORT_SYMBOL_GPL(v4l2_m2m_streamoff);
615 
v4l2_m2m_poll(struct file * file,struct v4l2_m2m_ctx * m2m_ctx,struct poll_table_struct * wait)616 __poll_t v4l2_m2m_poll(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
617 			   struct poll_table_struct *wait)
618 {
619 	struct video_device *vfd = video_devdata(file);
620 	__poll_t req_events = poll_requested_events(wait);
621 	struct vb2_queue *src_q, *dst_q;
622 	struct vb2_buffer *src_vb = NULL, *dst_vb = NULL;
623 	__poll_t rc = 0;
624 	unsigned long flags;
625 
626 	if (test_bit(V4L2_FL_USES_V4L2_FH, &vfd->flags)) {
627 		struct v4l2_fh *fh = file->private_data;
628 
629 		if (v4l2_event_pending(fh))
630 			rc = EPOLLPRI;
631 		else if (req_events & EPOLLPRI)
632 			poll_wait(file, &fh->wait, wait);
633 		if (!(req_events & (EPOLLOUT | EPOLLWRNORM | EPOLLIN | EPOLLRDNORM)))
634 			return rc;
635 	}
636 
637 	src_q = v4l2_m2m_get_src_vq(m2m_ctx);
638 	dst_q = v4l2_m2m_get_dst_vq(m2m_ctx);
639 
640 	/*
641 	 * There has to be at least one buffer queued on each queued_list, which
642 	 * means either in driver already or waiting for driver to claim it
643 	 * and start processing.
644 	 */
645 	if ((!src_q->streaming || list_empty(&src_q->queued_list))
646 		&& (!dst_q->streaming || list_empty(&dst_q->queued_list))) {
647 		rc |= EPOLLERR;
648 		goto end;
649 	}
650 
651 	spin_lock_irqsave(&src_q->done_lock, flags);
652 	if (list_empty(&src_q->done_list))
653 		poll_wait(file, &src_q->done_wq, wait);
654 	spin_unlock_irqrestore(&src_q->done_lock, flags);
655 
656 	spin_lock_irqsave(&dst_q->done_lock, flags);
657 	if (list_empty(&dst_q->done_list)) {
658 		/*
659 		 * If the last buffer was dequeued from the capture queue,
660 		 * return immediately. DQBUF will return -EPIPE.
661 		 */
662 		if (dst_q->last_buffer_dequeued) {
663 			spin_unlock_irqrestore(&dst_q->done_lock, flags);
664 			return rc | EPOLLIN | EPOLLRDNORM;
665 		}
666 
667 		poll_wait(file, &dst_q->done_wq, wait);
668 	}
669 	spin_unlock_irqrestore(&dst_q->done_lock, flags);
670 
671 	spin_lock_irqsave(&src_q->done_lock, flags);
672 	if (!list_empty(&src_q->done_list))
673 		src_vb = list_first_entry(&src_q->done_list, struct vb2_buffer,
674 						done_entry);
675 	if (src_vb && (src_vb->state == VB2_BUF_STATE_DONE
676 			|| src_vb->state == VB2_BUF_STATE_ERROR))
677 		rc |= EPOLLOUT | EPOLLWRNORM;
678 	spin_unlock_irqrestore(&src_q->done_lock, flags);
679 
680 	spin_lock_irqsave(&dst_q->done_lock, flags);
681 	if (!list_empty(&dst_q->done_list))
682 		dst_vb = list_first_entry(&dst_q->done_list, struct vb2_buffer,
683 						done_entry);
684 	if (dst_vb && (dst_vb->state == VB2_BUF_STATE_DONE
685 			|| dst_vb->state == VB2_BUF_STATE_ERROR))
686 		rc |= EPOLLIN | EPOLLRDNORM;
687 	spin_unlock_irqrestore(&dst_q->done_lock, flags);
688 
689 end:
690 	return rc;
691 }
692 EXPORT_SYMBOL_GPL(v4l2_m2m_poll);
693 
v4l2_m2m_mmap(struct file * file,struct v4l2_m2m_ctx * m2m_ctx,struct vm_area_struct * vma)694 int v4l2_m2m_mmap(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
695 			 struct vm_area_struct *vma)
696 {
697 	unsigned long offset = vma->vm_pgoff << PAGE_SHIFT;
698 	struct vb2_queue *vq;
699 
700 	if (offset < DST_QUEUE_OFF_BASE) {
701 		vq = v4l2_m2m_get_src_vq(m2m_ctx);
702 	} else {
703 		vq = v4l2_m2m_get_dst_vq(m2m_ctx);
704 		vma->vm_pgoff -= (DST_QUEUE_OFF_BASE >> PAGE_SHIFT);
705 	}
706 
707 	return vb2_mmap(vq, vma);
708 }
709 EXPORT_SYMBOL(v4l2_m2m_mmap);
710 
711 #if defined(CONFIG_MEDIA_CONTROLLER)
v4l2_m2m_unregister_media_controller(struct v4l2_m2m_dev * m2m_dev)712 void v4l2_m2m_unregister_media_controller(struct v4l2_m2m_dev *m2m_dev)
713 {
714 	media_remove_intf_links(&m2m_dev->intf_devnode->intf);
715 	media_devnode_remove(m2m_dev->intf_devnode);
716 
717 	media_entity_remove_links(m2m_dev->source);
718 	media_entity_remove_links(&m2m_dev->sink);
719 	media_entity_remove_links(&m2m_dev->proc);
720 	media_device_unregister_entity(m2m_dev->source);
721 	media_device_unregister_entity(&m2m_dev->sink);
722 	media_device_unregister_entity(&m2m_dev->proc);
723 	kfree(m2m_dev->source->name);
724 	kfree(m2m_dev->sink.name);
725 	kfree(m2m_dev->proc.name);
726 }
727 EXPORT_SYMBOL_GPL(v4l2_m2m_unregister_media_controller);
728 
v4l2_m2m_register_entity(struct media_device * mdev,struct v4l2_m2m_dev * m2m_dev,enum v4l2_m2m_entity_type type,struct video_device * vdev,int function)729 static int v4l2_m2m_register_entity(struct media_device *mdev,
730 	struct v4l2_m2m_dev *m2m_dev, enum v4l2_m2m_entity_type type,
731 	struct video_device *vdev, int function)
732 {
733 	struct media_entity *entity;
734 	struct media_pad *pads;
735 	char *name;
736 	unsigned int len;
737 	int num_pads;
738 	int ret;
739 
740 	switch (type) {
741 	case MEM2MEM_ENT_TYPE_SOURCE:
742 		entity = m2m_dev->source;
743 		pads = &m2m_dev->source_pad;
744 		pads[0].flags = MEDIA_PAD_FL_SOURCE;
745 		num_pads = 1;
746 		break;
747 	case MEM2MEM_ENT_TYPE_SINK:
748 		entity = &m2m_dev->sink;
749 		pads = &m2m_dev->sink_pad;
750 		pads[0].flags = MEDIA_PAD_FL_SINK;
751 		num_pads = 1;
752 		break;
753 	case MEM2MEM_ENT_TYPE_PROC:
754 		entity = &m2m_dev->proc;
755 		pads = m2m_dev->proc_pads;
756 		pads[0].flags = MEDIA_PAD_FL_SINK;
757 		pads[1].flags = MEDIA_PAD_FL_SOURCE;
758 		num_pads = 2;
759 		break;
760 	default:
761 		return -EINVAL;
762 	}
763 
764 	entity->obj_type = MEDIA_ENTITY_TYPE_BASE;
765 	if (type != MEM2MEM_ENT_TYPE_PROC) {
766 		entity->info.dev.major = VIDEO_MAJOR;
767 		entity->info.dev.minor = vdev->minor;
768 	}
769 	len = strlen(vdev->name) + 2 + strlen(m2m_entity_name[type]);
770 	name = kmalloc(len, GFP_KERNEL);
771 	if (!name)
772 		return -ENOMEM;
773 	snprintf(name, len, "%s-%s", vdev->name, m2m_entity_name[type]);
774 	entity->name = name;
775 	entity->function = function;
776 
777 	ret = media_entity_pads_init(entity, num_pads, pads);
778 	if (ret)
779 		return ret;
780 	ret = media_device_register_entity(mdev, entity);
781 	if (ret)
782 		return ret;
783 
784 	return 0;
785 }
786 
v4l2_m2m_register_media_controller(struct v4l2_m2m_dev * m2m_dev,struct video_device * vdev,int function)787 int v4l2_m2m_register_media_controller(struct v4l2_m2m_dev *m2m_dev,
788 		struct video_device *vdev, int function)
789 {
790 	struct media_device *mdev = vdev->v4l2_dev->mdev;
791 	struct media_link *link;
792 	int ret;
793 
794 	if (!mdev)
795 		return 0;
796 
797 	/* A memory-to-memory device consists in two
798 	 * DMA engine and one video processing entities.
799 	 * The DMA engine entities are linked to a V4L interface
800 	 */
801 
802 	/* Create the three entities with their pads */
803 	m2m_dev->source = &vdev->entity;
804 	ret = v4l2_m2m_register_entity(mdev, m2m_dev,
805 			MEM2MEM_ENT_TYPE_SOURCE, vdev, MEDIA_ENT_F_IO_V4L);
806 	if (ret)
807 		return ret;
808 	ret = v4l2_m2m_register_entity(mdev, m2m_dev,
809 			MEM2MEM_ENT_TYPE_PROC, vdev, function);
810 	if (ret)
811 		goto err_rel_entity0;
812 	ret = v4l2_m2m_register_entity(mdev, m2m_dev,
813 			MEM2MEM_ENT_TYPE_SINK, vdev, MEDIA_ENT_F_IO_V4L);
814 	if (ret)
815 		goto err_rel_entity1;
816 
817 	/* Connect the three entities */
818 	ret = media_create_pad_link(m2m_dev->source, 0, &m2m_dev->proc, 0,
819 			MEDIA_LNK_FL_IMMUTABLE | MEDIA_LNK_FL_ENABLED);
820 	if (ret)
821 		goto err_rel_entity2;
822 
823 	ret = media_create_pad_link(&m2m_dev->proc, 1, &m2m_dev->sink, 0,
824 			MEDIA_LNK_FL_IMMUTABLE | MEDIA_LNK_FL_ENABLED);
825 	if (ret)
826 		goto err_rm_links0;
827 
828 	/* Create video interface */
829 	m2m_dev->intf_devnode = media_devnode_create(mdev,
830 			MEDIA_INTF_T_V4L_VIDEO, 0,
831 			VIDEO_MAJOR, vdev->minor);
832 	if (!m2m_dev->intf_devnode) {
833 		ret = -ENOMEM;
834 		goto err_rm_links1;
835 	}
836 
837 	/* Connect the two DMA engines to the interface */
838 	link = media_create_intf_link(m2m_dev->source,
839 			&m2m_dev->intf_devnode->intf,
840 			MEDIA_LNK_FL_IMMUTABLE | MEDIA_LNK_FL_ENABLED);
841 	if (!link) {
842 		ret = -ENOMEM;
843 		goto err_rm_devnode;
844 	}
845 
846 	link = media_create_intf_link(&m2m_dev->sink,
847 			&m2m_dev->intf_devnode->intf,
848 			MEDIA_LNK_FL_IMMUTABLE | MEDIA_LNK_FL_ENABLED);
849 	if (!link) {
850 		ret = -ENOMEM;
851 		goto err_rm_intf_link;
852 	}
853 	return 0;
854 
855 err_rm_intf_link:
856 	media_remove_intf_links(&m2m_dev->intf_devnode->intf);
857 err_rm_devnode:
858 	media_devnode_remove(m2m_dev->intf_devnode);
859 err_rm_links1:
860 	media_entity_remove_links(&m2m_dev->sink);
861 err_rm_links0:
862 	media_entity_remove_links(&m2m_dev->proc);
863 	media_entity_remove_links(m2m_dev->source);
864 err_rel_entity2:
865 	media_device_unregister_entity(&m2m_dev->proc);
866 	kfree(m2m_dev->proc.name);
867 err_rel_entity1:
868 	media_device_unregister_entity(&m2m_dev->sink);
869 	kfree(m2m_dev->sink.name);
870 err_rel_entity0:
871 	media_device_unregister_entity(m2m_dev->source);
872 	kfree(m2m_dev->source->name);
873 	return ret;
874 	return 0;
875 }
876 EXPORT_SYMBOL_GPL(v4l2_m2m_register_media_controller);
877 #endif
878 
v4l2_m2m_init(const struct v4l2_m2m_ops * m2m_ops)879 struct v4l2_m2m_dev *v4l2_m2m_init(const struct v4l2_m2m_ops *m2m_ops)
880 {
881 	struct v4l2_m2m_dev *m2m_dev;
882 
883 	if (!m2m_ops || WARN_ON(!m2m_ops->device_run))
884 		return ERR_PTR(-EINVAL);
885 
886 	m2m_dev = kzalloc(sizeof *m2m_dev, GFP_KERNEL);
887 	if (!m2m_dev)
888 		return ERR_PTR(-ENOMEM);
889 
890 	m2m_dev->curr_ctx = NULL;
891 	m2m_dev->m2m_ops = m2m_ops;
892 	INIT_LIST_HEAD(&m2m_dev->job_queue);
893 	spin_lock_init(&m2m_dev->job_spinlock);
894 
895 	return m2m_dev;
896 }
897 EXPORT_SYMBOL_GPL(v4l2_m2m_init);
898 
v4l2_m2m_release(struct v4l2_m2m_dev * m2m_dev)899 void v4l2_m2m_release(struct v4l2_m2m_dev *m2m_dev)
900 {
901 	kfree(m2m_dev);
902 }
903 EXPORT_SYMBOL_GPL(v4l2_m2m_release);
904 
v4l2_m2m_ctx_init(struct v4l2_m2m_dev * m2m_dev,void * drv_priv,int (* queue_init)(void * priv,struct vb2_queue * src_vq,struct vb2_queue * dst_vq))905 struct v4l2_m2m_ctx *v4l2_m2m_ctx_init(struct v4l2_m2m_dev *m2m_dev,
906 		void *drv_priv,
907 		int (*queue_init)(void *priv, struct vb2_queue *src_vq, struct vb2_queue *dst_vq))
908 {
909 	struct v4l2_m2m_ctx *m2m_ctx;
910 	struct v4l2_m2m_queue_ctx *out_q_ctx, *cap_q_ctx;
911 	int ret;
912 
913 	m2m_ctx = kzalloc(sizeof *m2m_ctx, GFP_KERNEL);
914 	if (!m2m_ctx)
915 		return ERR_PTR(-ENOMEM);
916 
917 	m2m_ctx->priv = drv_priv;
918 	m2m_ctx->m2m_dev = m2m_dev;
919 	init_waitqueue_head(&m2m_ctx->finished);
920 
921 	out_q_ctx = &m2m_ctx->out_q_ctx;
922 	cap_q_ctx = &m2m_ctx->cap_q_ctx;
923 
924 	INIT_LIST_HEAD(&out_q_ctx->rdy_queue);
925 	INIT_LIST_HEAD(&cap_q_ctx->rdy_queue);
926 	spin_lock_init(&out_q_ctx->rdy_spinlock);
927 	spin_lock_init(&cap_q_ctx->rdy_spinlock);
928 
929 	INIT_LIST_HEAD(&m2m_ctx->queue);
930 
931 	ret = queue_init(drv_priv, &out_q_ctx->q, &cap_q_ctx->q);
932 
933 	if (ret)
934 		goto err;
935 	/*
936 	 * If both queues use same mutex assign it as the common buffer
937 	 * queues lock to the m2m context. This lock is used in the
938 	 * v4l2_m2m_ioctl_* helpers.
939 	 */
940 	if (out_q_ctx->q.lock == cap_q_ctx->q.lock)
941 		m2m_ctx->q_lock = out_q_ctx->q.lock;
942 
943 	return m2m_ctx;
944 err:
945 	kfree(m2m_ctx);
946 	return ERR_PTR(ret);
947 }
948 EXPORT_SYMBOL_GPL(v4l2_m2m_ctx_init);
949 
v4l2_m2m_ctx_release(struct v4l2_m2m_ctx * m2m_ctx)950 void v4l2_m2m_ctx_release(struct v4l2_m2m_ctx *m2m_ctx)
951 {
952 	/* wait until the current context is dequeued from job_queue */
953 	v4l2_m2m_cancel_job(m2m_ctx);
954 
955 	vb2_queue_release(&m2m_ctx->cap_q_ctx.q);
956 	vb2_queue_release(&m2m_ctx->out_q_ctx.q);
957 
958 	kfree(m2m_ctx);
959 }
960 EXPORT_SYMBOL_GPL(v4l2_m2m_ctx_release);
961 
v4l2_m2m_buf_queue(struct v4l2_m2m_ctx * m2m_ctx,struct vb2_v4l2_buffer * vbuf)962 void v4l2_m2m_buf_queue(struct v4l2_m2m_ctx *m2m_ctx,
963 		struct vb2_v4l2_buffer *vbuf)
964 {
965 	struct v4l2_m2m_buffer *b = container_of(vbuf,
966 				struct v4l2_m2m_buffer, vb);
967 	struct v4l2_m2m_queue_ctx *q_ctx;
968 	unsigned long flags;
969 
970 	q_ctx = get_queue_ctx(m2m_ctx, vbuf->vb2_buf.vb2_queue->type);
971 	if (!q_ctx)
972 		return;
973 
974 	spin_lock_irqsave(&q_ctx->rdy_spinlock, flags);
975 	list_add_tail(&b->list, &q_ctx->rdy_queue);
976 	q_ctx->num_rdy++;
977 	spin_unlock_irqrestore(&q_ctx->rdy_spinlock, flags);
978 }
979 EXPORT_SYMBOL_GPL(v4l2_m2m_buf_queue);
980 
981 /* Videobuf2 ioctl helpers */
982 
v4l2_m2m_ioctl_reqbufs(struct file * file,void * priv,struct v4l2_requestbuffers * rb)983 int v4l2_m2m_ioctl_reqbufs(struct file *file, void *priv,
984 				struct v4l2_requestbuffers *rb)
985 {
986 	struct v4l2_fh *fh = file->private_data;
987 
988 	return v4l2_m2m_reqbufs(file, fh->m2m_ctx, rb);
989 }
990 EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_reqbufs);
991 
v4l2_m2m_ioctl_create_bufs(struct file * file,void * priv,struct v4l2_create_buffers * create)992 int v4l2_m2m_ioctl_create_bufs(struct file *file, void *priv,
993 				struct v4l2_create_buffers *create)
994 {
995 	struct v4l2_fh *fh = file->private_data;
996 
997 	return v4l2_m2m_create_bufs(file, fh->m2m_ctx, create);
998 }
999 EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_create_bufs);
1000 
v4l2_m2m_ioctl_querybuf(struct file * file,void * priv,struct v4l2_buffer * buf)1001 int v4l2_m2m_ioctl_querybuf(struct file *file, void *priv,
1002 				struct v4l2_buffer *buf)
1003 {
1004 	struct v4l2_fh *fh = file->private_data;
1005 
1006 	return v4l2_m2m_querybuf(file, fh->m2m_ctx, buf);
1007 }
1008 EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_querybuf);
1009 
v4l2_m2m_ioctl_qbuf(struct file * file,void * priv,struct v4l2_buffer * buf)1010 int v4l2_m2m_ioctl_qbuf(struct file *file, void *priv,
1011 				struct v4l2_buffer *buf)
1012 {
1013 	struct v4l2_fh *fh = file->private_data;
1014 
1015 	return v4l2_m2m_qbuf(file, fh->m2m_ctx, buf);
1016 }
1017 EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_qbuf);
1018 
v4l2_m2m_ioctl_dqbuf(struct file * file,void * priv,struct v4l2_buffer * buf)1019 int v4l2_m2m_ioctl_dqbuf(struct file *file, void *priv,
1020 				struct v4l2_buffer *buf)
1021 {
1022 	struct v4l2_fh *fh = file->private_data;
1023 
1024 	return v4l2_m2m_dqbuf(file, fh->m2m_ctx, buf);
1025 }
1026 EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_dqbuf);
1027 
v4l2_m2m_ioctl_prepare_buf(struct file * file,void * priv,struct v4l2_buffer * buf)1028 int v4l2_m2m_ioctl_prepare_buf(struct file *file, void *priv,
1029 			       struct v4l2_buffer *buf)
1030 {
1031 	struct v4l2_fh *fh = file->private_data;
1032 
1033 	return v4l2_m2m_prepare_buf(file, fh->m2m_ctx, buf);
1034 }
1035 EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_prepare_buf);
1036 
v4l2_m2m_ioctl_expbuf(struct file * file,void * priv,struct v4l2_exportbuffer * eb)1037 int v4l2_m2m_ioctl_expbuf(struct file *file, void *priv,
1038 				struct v4l2_exportbuffer *eb)
1039 {
1040 	struct v4l2_fh *fh = file->private_data;
1041 
1042 	return v4l2_m2m_expbuf(file, fh->m2m_ctx, eb);
1043 }
1044 EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_expbuf);
1045 
v4l2_m2m_ioctl_streamon(struct file * file,void * priv,enum v4l2_buf_type type)1046 int v4l2_m2m_ioctl_streamon(struct file *file, void *priv,
1047 				enum v4l2_buf_type type)
1048 {
1049 	struct v4l2_fh *fh = file->private_data;
1050 
1051 	return v4l2_m2m_streamon(file, fh->m2m_ctx, type);
1052 }
1053 EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_streamon);
1054 
v4l2_m2m_ioctl_streamoff(struct file * file,void * priv,enum v4l2_buf_type type)1055 int v4l2_m2m_ioctl_streamoff(struct file *file, void *priv,
1056 				enum v4l2_buf_type type)
1057 {
1058 	struct v4l2_fh *fh = file->private_data;
1059 
1060 	return v4l2_m2m_streamoff(file, fh->m2m_ctx, type);
1061 }
1062 EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_streamoff);
1063 
1064 /*
1065  * v4l2_file_operations helpers. It is assumed here same lock is used
1066  * for the output and the capture buffer queue.
1067  */
1068 
v4l2_m2m_fop_mmap(struct file * file,struct vm_area_struct * vma)1069 int v4l2_m2m_fop_mmap(struct file *file, struct vm_area_struct *vma)
1070 {
1071 	struct v4l2_fh *fh = file->private_data;
1072 
1073 	return v4l2_m2m_mmap(file, fh->m2m_ctx, vma);
1074 }
1075 EXPORT_SYMBOL_GPL(v4l2_m2m_fop_mmap);
1076 
v4l2_m2m_fop_poll(struct file * file,poll_table * wait)1077 __poll_t v4l2_m2m_fop_poll(struct file *file, poll_table *wait)
1078 {
1079 	struct v4l2_fh *fh = file->private_data;
1080 	struct v4l2_m2m_ctx *m2m_ctx = fh->m2m_ctx;
1081 	__poll_t ret;
1082 
1083 	if (m2m_ctx->q_lock)
1084 		mutex_lock(m2m_ctx->q_lock);
1085 
1086 	ret = v4l2_m2m_poll(file, m2m_ctx, wait);
1087 
1088 	if (m2m_ctx->q_lock)
1089 		mutex_unlock(m2m_ctx->q_lock);
1090 
1091 	return ret;
1092 }
1093 EXPORT_SYMBOL_GPL(v4l2_m2m_fop_poll);
1094 
1095