1 /*
2 * generic helper functions for handling video4linux capture buffers
3 *
4 * (c) 2007 Mauro Carvalho Chehab, <mchehab@kernel.org>
5 *
6 * Highly based on video-buf written originally by:
7 * (c) 2001,02 Gerd Knorr <kraxel@bytesex.org>
8 * (c) 2006 Mauro Carvalho Chehab, <mchehab@kernel.org>
9 * (c) 2006 Ted Walther and John Sokol
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; either version 2
14 */
15
16 #include <linux/init.h>
17 #include <linux/module.h>
18 #include <linux/moduleparam.h>
19 #include <linux/mm.h>
20 #include <linux/sched.h>
21 #include <linux/slab.h>
22 #include <linux/interrupt.h>
23
24 #include <media/videobuf-core.h>
25
26 #define MAGIC_BUFFER 0x20070728
27 #define MAGIC_CHECK(is, should) \
28 do { \
29 if (unlikely((is) != (should))) { \
30 printk(KERN_ERR \
31 "magic mismatch: %x (expected %x)\n", \
32 is, should); \
33 BUG(); \
34 } \
35 } while (0)
36
37 static int debug;
38 module_param(debug, int, 0644);
39
40 MODULE_DESCRIPTION("helper module to manage video4linux buffers");
41 MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@kernel.org>");
42 MODULE_LICENSE("GPL");
43
44 #define dprintk(level, fmt, arg...) \
45 do { \
46 if (debug >= level) \
47 printk(KERN_DEBUG "vbuf: " fmt, ## arg); \
48 } while (0)
49
50 /* --------------------------------------------------------------------- */
51
52 #define CALL(q, f, arg...) \
53 ((q->int_ops->f) ? q->int_ops->f(arg) : 0)
54 #define CALLPTR(q, f, arg...) \
55 ((q->int_ops->f) ? q->int_ops->f(arg) : NULL)
56
videobuf_alloc_vb(struct videobuf_queue * q)57 struct videobuf_buffer *videobuf_alloc_vb(struct videobuf_queue *q)
58 {
59 struct videobuf_buffer *vb;
60
61 BUG_ON(q->msize < sizeof(*vb));
62
63 if (!q->int_ops || !q->int_ops->alloc_vb) {
64 printk(KERN_ERR "No specific ops defined!\n");
65 BUG();
66 }
67
68 vb = q->int_ops->alloc_vb(q->msize);
69 if (NULL != vb) {
70 init_waitqueue_head(&vb->done);
71 vb->magic = MAGIC_BUFFER;
72 }
73
74 return vb;
75 }
76 EXPORT_SYMBOL_GPL(videobuf_alloc_vb);
77
state_neither_active_nor_queued(struct videobuf_queue * q,struct videobuf_buffer * vb)78 static int state_neither_active_nor_queued(struct videobuf_queue *q,
79 struct videobuf_buffer *vb)
80 {
81 unsigned long flags;
82 bool rc;
83
84 spin_lock_irqsave(q->irqlock, flags);
85 rc = vb->state != VIDEOBUF_ACTIVE && vb->state != VIDEOBUF_QUEUED;
86 spin_unlock_irqrestore(q->irqlock, flags);
87 return rc;
88 };
89
videobuf_waiton(struct videobuf_queue * q,struct videobuf_buffer * vb,int non_blocking,int intr)90 int videobuf_waiton(struct videobuf_queue *q, struct videobuf_buffer *vb,
91 int non_blocking, int intr)
92 {
93 bool is_ext_locked;
94 int ret = 0;
95
96 MAGIC_CHECK(vb->magic, MAGIC_BUFFER);
97
98 if (non_blocking) {
99 if (state_neither_active_nor_queued(q, vb))
100 return 0;
101 return -EAGAIN;
102 }
103
104 is_ext_locked = q->ext_lock && mutex_is_locked(q->ext_lock);
105
106 /* Release vdev lock to prevent this wait from blocking outside access to
107 the device. */
108 if (is_ext_locked)
109 mutex_unlock(q->ext_lock);
110 if (intr)
111 ret = wait_event_interruptible(vb->done,
112 state_neither_active_nor_queued(q, vb));
113 else
114 wait_event(vb->done, state_neither_active_nor_queued(q, vb));
115 /* Relock */
116 if (is_ext_locked)
117 mutex_lock(q->ext_lock);
118
119 return ret;
120 }
121 EXPORT_SYMBOL_GPL(videobuf_waiton);
122
videobuf_iolock(struct videobuf_queue * q,struct videobuf_buffer * vb,struct v4l2_framebuffer * fbuf)123 int videobuf_iolock(struct videobuf_queue *q, struct videobuf_buffer *vb,
124 struct v4l2_framebuffer *fbuf)
125 {
126 MAGIC_CHECK(vb->magic, MAGIC_BUFFER);
127 MAGIC_CHECK(q->int_ops->magic, MAGIC_QTYPE_OPS);
128
129 return CALL(q, iolock, q, vb, fbuf);
130 }
131 EXPORT_SYMBOL_GPL(videobuf_iolock);
132
videobuf_queue_to_vaddr(struct videobuf_queue * q,struct videobuf_buffer * buf)133 void *videobuf_queue_to_vaddr(struct videobuf_queue *q,
134 struct videobuf_buffer *buf)
135 {
136 if (q->int_ops->vaddr)
137 return q->int_ops->vaddr(buf);
138 return NULL;
139 }
140 EXPORT_SYMBOL_GPL(videobuf_queue_to_vaddr);
141
142 /* --------------------------------------------------------------------- */
143
144
videobuf_queue_core_init(struct videobuf_queue * q,const struct videobuf_queue_ops * ops,struct device * dev,spinlock_t * irqlock,enum v4l2_buf_type type,enum v4l2_field field,unsigned int msize,void * priv,struct videobuf_qtype_ops * int_ops,struct mutex * ext_lock)145 void videobuf_queue_core_init(struct videobuf_queue *q,
146 const struct videobuf_queue_ops *ops,
147 struct device *dev,
148 spinlock_t *irqlock,
149 enum v4l2_buf_type type,
150 enum v4l2_field field,
151 unsigned int msize,
152 void *priv,
153 struct videobuf_qtype_ops *int_ops,
154 struct mutex *ext_lock)
155 {
156 BUG_ON(!q);
157 memset(q, 0, sizeof(*q));
158 q->irqlock = irqlock;
159 q->ext_lock = ext_lock;
160 q->dev = dev;
161 q->type = type;
162 q->field = field;
163 q->msize = msize;
164 q->ops = ops;
165 q->priv_data = priv;
166 q->int_ops = int_ops;
167
168 /* All buffer operations are mandatory */
169 BUG_ON(!q->ops->buf_setup);
170 BUG_ON(!q->ops->buf_prepare);
171 BUG_ON(!q->ops->buf_queue);
172 BUG_ON(!q->ops->buf_release);
173
174 /* Lock is mandatory for queue_cancel to work */
175 BUG_ON(!irqlock);
176
177 /* Having implementations for abstract methods are mandatory */
178 BUG_ON(!q->int_ops);
179
180 mutex_init(&q->vb_lock);
181 init_waitqueue_head(&q->wait);
182 INIT_LIST_HEAD(&q->stream);
183 }
184 EXPORT_SYMBOL_GPL(videobuf_queue_core_init);
185
186 /* Locking: Only usage in bttv unsafe find way to remove */
videobuf_queue_is_busy(struct videobuf_queue * q)187 int videobuf_queue_is_busy(struct videobuf_queue *q)
188 {
189 int i;
190
191 MAGIC_CHECK(q->int_ops->magic, MAGIC_QTYPE_OPS);
192
193 if (q->streaming) {
194 dprintk(1, "busy: streaming active\n");
195 return 1;
196 }
197 if (q->reading) {
198 dprintk(1, "busy: pending read #1\n");
199 return 1;
200 }
201 if (q->read_buf) {
202 dprintk(1, "busy: pending read #2\n");
203 return 1;
204 }
205 for (i = 0; i < VIDEO_MAX_FRAME; i++) {
206 if (NULL == q->bufs[i])
207 continue;
208 if (q->bufs[i]->map) {
209 dprintk(1, "busy: buffer #%d mapped\n", i);
210 return 1;
211 }
212 if (q->bufs[i]->state == VIDEOBUF_QUEUED) {
213 dprintk(1, "busy: buffer #%d queued\n", i);
214 return 1;
215 }
216 if (q->bufs[i]->state == VIDEOBUF_ACTIVE) {
217 dprintk(1, "busy: buffer #%d avtive\n", i);
218 return 1;
219 }
220 }
221 return 0;
222 }
223 EXPORT_SYMBOL_GPL(videobuf_queue_is_busy);
224
225 /*
226 * __videobuf_free() - free all the buffers and their control structures
227 *
228 * This function can only be called if streaming/reading is off, i.e. no buffers
229 * are under control of the driver.
230 */
231 /* Locking: Caller holds q->vb_lock */
__videobuf_free(struct videobuf_queue * q)232 static int __videobuf_free(struct videobuf_queue *q)
233 {
234 int i;
235
236 dprintk(1, "%s\n", __func__);
237 if (!q)
238 return 0;
239
240 if (q->streaming || q->reading) {
241 dprintk(1, "Cannot free buffers when streaming or reading\n");
242 return -EBUSY;
243 }
244
245 MAGIC_CHECK(q->int_ops->magic, MAGIC_QTYPE_OPS);
246
247 for (i = 0; i < VIDEO_MAX_FRAME; i++)
248 if (q->bufs[i] && q->bufs[i]->map) {
249 dprintk(1, "Cannot free mmapped buffers\n");
250 return -EBUSY;
251 }
252
253 for (i = 0; i < VIDEO_MAX_FRAME; i++) {
254 if (NULL == q->bufs[i])
255 continue;
256 q->ops->buf_release(q, q->bufs[i]);
257 kfree(q->bufs[i]);
258 q->bufs[i] = NULL;
259 }
260
261 return 0;
262 }
263
264 /* Locking: Caller holds q->vb_lock */
videobuf_queue_cancel(struct videobuf_queue * q)265 void videobuf_queue_cancel(struct videobuf_queue *q)
266 {
267 unsigned long flags = 0;
268 int i;
269
270 q->streaming = 0;
271 q->reading = 0;
272 wake_up_interruptible_sync(&q->wait);
273
274 /* remove queued buffers from list */
275 spin_lock_irqsave(q->irqlock, flags);
276 for (i = 0; i < VIDEO_MAX_FRAME; i++) {
277 if (NULL == q->bufs[i])
278 continue;
279 if (q->bufs[i]->state == VIDEOBUF_QUEUED) {
280 list_del(&q->bufs[i]->queue);
281 q->bufs[i]->state = VIDEOBUF_ERROR;
282 wake_up_all(&q->bufs[i]->done);
283 }
284 }
285 spin_unlock_irqrestore(q->irqlock, flags);
286
287 /* free all buffers + clear queue */
288 for (i = 0; i < VIDEO_MAX_FRAME; i++) {
289 if (NULL == q->bufs[i])
290 continue;
291 q->ops->buf_release(q, q->bufs[i]);
292 }
293 INIT_LIST_HEAD(&q->stream);
294 }
295 EXPORT_SYMBOL_GPL(videobuf_queue_cancel);
296
297 /* --------------------------------------------------------------------- */
298
299 /* Locking: Caller holds q->vb_lock */
videobuf_next_field(struct videobuf_queue * q)300 enum v4l2_field videobuf_next_field(struct videobuf_queue *q)
301 {
302 enum v4l2_field field = q->field;
303
304 BUG_ON(V4L2_FIELD_ANY == field);
305
306 if (V4L2_FIELD_ALTERNATE == field) {
307 if (V4L2_FIELD_TOP == q->last) {
308 field = V4L2_FIELD_BOTTOM;
309 q->last = V4L2_FIELD_BOTTOM;
310 } else {
311 field = V4L2_FIELD_TOP;
312 q->last = V4L2_FIELD_TOP;
313 }
314 }
315 return field;
316 }
317 EXPORT_SYMBOL_GPL(videobuf_next_field);
318
319 /* Locking: Caller holds q->vb_lock */
videobuf_status(struct videobuf_queue * q,struct v4l2_buffer * b,struct videobuf_buffer * vb,enum v4l2_buf_type type)320 static void videobuf_status(struct videobuf_queue *q, struct v4l2_buffer *b,
321 struct videobuf_buffer *vb, enum v4l2_buf_type type)
322 {
323 MAGIC_CHECK(vb->magic, MAGIC_BUFFER);
324 MAGIC_CHECK(q->int_ops->magic, MAGIC_QTYPE_OPS);
325
326 b->index = vb->i;
327 b->type = type;
328
329 b->memory = vb->memory;
330 switch (b->memory) {
331 case V4L2_MEMORY_MMAP:
332 b->m.offset = vb->boff;
333 b->length = vb->bsize;
334 break;
335 case V4L2_MEMORY_USERPTR:
336 b->m.userptr = vb->baddr;
337 b->length = vb->bsize;
338 break;
339 case V4L2_MEMORY_OVERLAY:
340 b->m.offset = vb->boff;
341 break;
342 case V4L2_MEMORY_DMABUF:
343 /* DMABUF is not handled in videobuf framework */
344 break;
345 }
346
347 b->flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
348 if (vb->map)
349 b->flags |= V4L2_BUF_FLAG_MAPPED;
350
351 switch (vb->state) {
352 case VIDEOBUF_PREPARED:
353 case VIDEOBUF_QUEUED:
354 case VIDEOBUF_ACTIVE:
355 b->flags |= V4L2_BUF_FLAG_QUEUED;
356 break;
357 case VIDEOBUF_ERROR:
358 b->flags |= V4L2_BUF_FLAG_ERROR;
359 /* fall through */
360 case VIDEOBUF_DONE:
361 b->flags |= V4L2_BUF_FLAG_DONE;
362 break;
363 case VIDEOBUF_NEEDS_INIT:
364 case VIDEOBUF_IDLE:
365 /* nothing */
366 break;
367 }
368
369 b->field = vb->field;
370 b->timestamp = vb->ts;
371 b->bytesused = vb->size;
372 b->sequence = vb->field_count >> 1;
373 }
374
videobuf_mmap_free(struct videobuf_queue * q)375 int videobuf_mmap_free(struct videobuf_queue *q)
376 {
377 int ret;
378 videobuf_queue_lock(q);
379 ret = __videobuf_free(q);
380 videobuf_queue_unlock(q);
381 return ret;
382 }
383 EXPORT_SYMBOL_GPL(videobuf_mmap_free);
384
385 /* Locking: Caller holds q->vb_lock */
__videobuf_mmap_setup(struct videobuf_queue * q,unsigned int bcount,unsigned int bsize,enum v4l2_memory memory)386 int __videobuf_mmap_setup(struct videobuf_queue *q,
387 unsigned int bcount, unsigned int bsize,
388 enum v4l2_memory memory)
389 {
390 unsigned int i;
391 int err;
392
393 MAGIC_CHECK(q->int_ops->magic, MAGIC_QTYPE_OPS);
394
395 err = __videobuf_free(q);
396 if (0 != err)
397 return err;
398
399 /* Allocate and initialize buffers */
400 for (i = 0; i < bcount; i++) {
401 q->bufs[i] = videobuf_alloc_vb(q);
402
403 if (NULL == q->bufs[i])
404 break;
405
406 q->bufs[i]->i = i;
407 q->bufs[i]->memory = memory;
408 q->bufs[i]->bsize = bsize;
409 switch (memory) {
410 case V4L2_MEMORY_MMAP:
411 q->bufs[i]->boff = PAGE_ALIGN(bsize) * i;
412 break;
413 case V4L2_MEMORY_USERPTR:
414 case V4L2_MEMORY_OVERLAY:
415 case V4L2_MEMORY_DMABUF:
416 /* nothing */
417 break;
418 }
419 }
420
421 if (!i)
422 return -ENOMEM;
423
424 dprintk(1, "mmap setup: %d buffers, %d bytes each\n", i, bsize);
425
426 return i;
427 }
428 EXPORT_SYMBOL_GPL(__videobuf_mmap_setup);
429
videobuf_mmap_setup(struct videobuf_queue * q,unsigned int bcount,unsigned int bsize,enum v4l2_memory memory)430 int videobuf_mmap_setup(struct videobuf_queue *q,
431 unsigned int bcount, unsigned int bsize,
432 enum v4l2_memory memory)
433 {
434 int ret;
435 videobuf_queue_lock(q);
436 ret = __videobuf_mmap_setup(q, bcount, bsize, memory);
437 videobuf_queue_unlock(q);
438 return ret;
439 }
440 EXPORT_SYMBOL_GPL(videobuf_mmap_setup);
441
videobuf_reqbufs(struct videobuf_queue * q,struct v4l2_requestbuffers * req)442 int videobuf_reqbufs(struct videobuf_queue *q,
443 struct v4l2_requestbuffers *req)
444 {
445 unsigned int size, count;
446 int retval;
447
448 if (req->memory != V4L2_MEMORY_MMAP &&
449 req->memory != V4L2_MEMORY_USERPTR &&
450 req->memory != V4L2_MEMORY_OVERLAY) {
451 dprintk(1, "reqbufs: memory type invalid\n");
452 return -EINVAL;
453 }
454
455 videobuf_queue_lock(q);
456 if (req->type != q->type) {
457 dprintk(1, "reqbufs: queue type invalid\n");
458 retval = -EINVAL;
459 goto done;
460 }
461
462 if (q->streaming) {
463 dprintk(1, "reqbufs: streaming already exists\n");
464 retval = -EBUSY;
465 goto done;
466 }
467 if (!list_empty(&q->stream)) {
468 dprintk(1, "reqbufs: stream running\n");
469 retval = -EBUSY;
470 goto done;
471 }
472
473 if (req->count == 0) {
474 dprintk(1, "reqbufs: count invalid (%d)\n", req->count);
475 retval = __videobuf_free(q);
476 goto done;
477 }
478
479 count = req->count;
480 if (count > VIDEO_MAX_FRAME)
481 count = VIDEO_MAX_FRAME;
482 size = 0;
483 q->ops->buf_setup(q, &count, &size);
484 dprintk(1, "reqbufs: bufs=%d, size=0x%x [%u pages total]\n",
485 count, size,
486 (unsigned int)((count * PAGE_ALIGN(size)) >> PAGE_SHIFT));
487
488 retval = __videobuf_mmap_setup(q, count, size, req->memory);
489 if (retval < 0) {
490 dprintk(1, "reqbufs: mmap setup returned %d\n", retval);
491 goto done;
492 }
493
494 req->count = retval;
495 retval = 0;
496
497 done:
498 videobuf_queue_unlock(q);
499 return retval;
500 }
501 EXPORT_SYMBOL_GPL(videobuf_reqbufs);
502
videobuf_querybuf(struct videobuf_queue * q,struct v4l2_buffer * b)503 int videobuf_querybuf(struct videobuf_queue *q, struct v4l2_buffer *b)
504 {
505 int ret = -EINVAL;
506
507 videobuf_queue_lock(q);
508 if (unlikely(b->type != q->type)) {
509 dprintk(1, "querybuf: Wrong type.\n");
510 goto done;
511 }
512 if (unlikely(b->index >= VIDEO_MAX_FRAME)) {
513 dprintk(1, "querybuf: index out of range.\n");
514 goto done;
515 }
516 if (unlikely(NULL == q->bufs[b->index])) {
517 dprintk(1, "querybuf: buffer is null.\n");
518 goto done;
519 }
520
521 videobuf_status(q, b, q->bufs[b->index], q->type);
522
523 ret = 0;
524 done:
525 videobuf_queue_unlock(q);
526 return ret;
527 }
528 EXPORT_SYMBOL_GPL(videobuf_querybuf);
529
videobuf_qbuf(struct videobuf_queue * q,struct v4l2_buffer * b)530 int videobuf_qbuf(struct videobuf_queue *q, struct v4l2_buffer *b)
531 {
532 struct videobuf_buffer *buf;
533 enum v4l2_field field;
534 unsigned long flags = 0;
535 int retval;
536
537 MAGIC_CHECK(q->int_ops->magic, MAGIC_QTYPE_OPS);
538
539 if (b->memory == V4L2_MEMORY_MMAP)
540 down_read(¤t->mm->mmap_sem);
541
542 videobuf_queue_lock(q);
543 retval = -EBUSY;
544 if (q->reading) {
545 dprintk(1, "qbuf: Reading running...\n");
546 goto done;
547 }
548 retval = -EINVAL;
549 if (b->type != q->type) {
550 dprintk(1, "qbuf: Wrong type.\n");
551 goto done;
552 }
553 if (b->index >= VIDEO_MAX_FRAME) {
554 dprintk(1, "qbuf: index out of range.\n");
555 goto done;
556 }
557 buf = q->bufs[b->index];
558 if (NULL == buf) {
559 dprintk(1, "qbuf: buffer is null.\n");
560 goto done;
561 }
562 MAGIC_CHECK(buf->magic, MAGIC_BUFFER);
563 if (buf->memory != b->memory) {
564 dprintk(1, "qbuf: memory type is wrong.\n");
565 goto done;
566 }
567 if (buf->state != VIDEOBUF_NEEDS_INIT && buf->state != VIDEOBUF_IDLE) {
568 dprintk(1, "qbuf: buffer is already queued or active.\n");
569 goto done;
570 }
571
572 switch (b->memory) {
573 case V4L2_MEMORY_MMAP:
574 if (0 == buf->baddr) {
575 dprintk(1, "qbuf: mmap requested but buffer addr is zero!\n");
576 goto done;
577 }
578 if (q->type == V4L2_BUF_TYPE_VIDEO_OUTPUT
579 || q->type == V4L2_BUF_TYPE_VBI_OUTPUT
580 || q->type == V4L2_BUF_TYPE_SLICED_VBI_OUTPUT
581 || q->type == V4L2_BUF_TYPE_SDR_OUTPUT) {
582 buf->size = b->bytesused;
583 buf->field = b->field;
584 buf->ts = b->timestamp;
585 }
586 break;
587 case V4L2_MEMORY_USERPTR:
588 if (b->length < buf->bsize) {
589 dprintk(1, "qbuf: buffer length is not enough\n");
590 goto done;
591 }
592 if (VIDEOBUF_NEEDS_INIT != buf->state &&
593 buf->baddr != b->m.userptr)
594 q->ops->buf_release(q, buf);
595 buf->baddr = b->m.userptr;
596 break;
597 case V4L2_MEMORY_OVERLAY:
598 buf->boff = b->m.offset;
599 break;
600 default:
601 dprintk(1, "qbuf: wrong memory type\n");
602 goto done;
603 }
604
605 dprintk(1, "qbuf: requesting next field\n");
606 field = videobuf_next_field(q);
607 retval = q->ops->buf_prepare(q, buf, field);
608 if (0 != retval) {
609 dprintk(1, "qbuf: buffer_prepare returned %d\n", retval);
610 goto done;
611 }
612
613 list_add_tail(&buf->stream, &q->stream);
614 if (q->streaming) {
615 spin_lock_irqsave(q->irqlock, flags);
616 q->ops->buf_queue(q, buf);
617 spin_unlock_irqrestore(q->irqlock, flags);
618 }
619 dprintk(1, "qbuf: succeeded\n");
620 retval = 0;
621 wake_up_interruptible_sync(&q->wait);
622
623 done:
624 videobuf_queue_unlock(q);
625
626 if (b->memory == V4L2_MEMORY_MMAP)
627 up_read(¤t->mm->mmap_sem);
628
629 return retval;
630 }
631 EXPORT_SYMBOL_GPL(videobuf_qbuf);
632
633 /* Locking: Caller holds q->vb_lock */
stream_next_buffer_check_queue(struct videobuf_queue * q,int noblock)634 static int stream_next_buffer_check_queue(struct videobuf_queue *q, int noblock)
635 {
636 int retval;
637
638 checks:
639 if (!q->streaming) {
640 dprintk(1, "next_buffer: Not streaming\n");
641 retval = -EINVAL;
642 goto done;
643 }
644
645 if (list_empty(&q->stream)) {
646 if (noblock) {
647 retval = -EAGAIN;
648 dprintk(2, "next_buffer: no buffers to dequeue\n");
649 goto done;
650 } else {
651 dprintk(2, "next_buffer: waiting on buffer\n");
652
653 /* Drop lock to avoid deadlock with qbuf */
654 videobuf_queue_unlock(q);
655
656 /* Checking list_empty and streaming is safe without
657 * locks because we goto checks to validate while
658 * holding locks before proceeding */
659 retval = wait_event_interruptible(q->wait,
660 !list_empty(&q->stream) || !q->streaming);
661 videobuf_queue_lock(q);
662
663 if (retval)
664 goto done;
665
666 goto checks;
667 }
668 }
669
670 retval = 0;
671
672 done:
673 return retval;
674 }
675
676 /* Locking: Caller holds q->vb_lock */
stream_next_buffer(struct videobuf_queue * q,struct videobuf_buffer ** vb,int nonblocking)677 static int stream_next_buffer(struct videobuf_queue *q,
678 struct videobuf_buffer **vb, int nonblocking)
679 {
680 int retval;
681 struct videobuf_buffer *buf = NULL;
682
683 retval = stream_next_buffer_check_queue(q, nonblocking);
684 if (retval)
685 goto done;
686
687 buf = list_entry(q->stream.next, struct videobuf_buffer, stream);
688 retval = videobuf_waiton(q, buf, nonblocking, 1);
689 if (retval < 0)
690 goto done;
691
692 *vb = buf;
693 done:
694 return retval;
695 }
696
videobuf_dqbuf(struct videobuf_queue * q,struct v4l2_buffer * b,int nonblocking)697 int videobuf_dqbuf(struct videobuf_queue *q,
698 struct v4l2_buffer *b, int nonblocking)
699 {
700 struct videobuf_buffer *buf = NULL;
701 int retval;
702
703 MAGIC_CHECK(q->int_ops->magic, MAGIC_QTYPE_OPS);
704
705 memset(b, 0, sizeof(*b));
706 videobuf_queue_lock(q);
707
708 retval = stream_next_buffer(q, &buf, nonblocking);
709 if (retval < 0) {
710 dprintk(1, "dqbuf: next_buffer error: %i\n", retval);
711 goto done;
712 }
713
714 switch (buf->state) {
715 case VIDEOBUF_ERROR:
716 dprintk(1, "dqbuf: state is error\n");
717 break;
718 case VIDEOBUF_DONE:
719 dprintk(1, "dqbuf: state is done\n");
720 break;
721 default:
722 dprintk(1, "dqbuf: state invalid\n");
723 retval = -EINVAL;
724 goto done;
725 }
726 CALL(q, sync, q, buf);
727 videobuf_status(q, b, buf, q->type);
728 list_del(&buf->stream);
729 buf->state = VIDEOBUF_IDLE;
730 b->flags &= ~V4L2_BUF_FLAG_DONE;
731 done:
732 videobuf_queue_unlock(q);
733 return retval;
734 }
735 EXPORT_SYMBOL_GPL(videobuf_dqbuf);
736
videobuf_streamon(struct videobuf_queue * q)737 int videobuf_streamon(struct videobuf_queue *q)
738 {
739 struct videobuf_buffer *buf;
740 unsigned long flags = 0;
741 int retval;
742
743 videobuf_queue_lock(q);
744 retval = -EBUSY;
745 if (q->reading)
746 goto done;
747 retval = 0;
748 if (q->streaming)
749 goto done;
750 q->streaming = 1;
751 spin_lock_irqsave(q->irqlock, flags);
752 list_for_each_entry(buf, &q->stream, stream)
753 if (buf->state == VIDEOBUF_PREPARED)
754 q->ops->buf_queue(q, buf);
755 spin_unlock_irqrestore(q->irqlock, flags);
756
757 wake_up_interruptible_sync(&q->wait);
758 done:
759 videobuf_queue_unlock(q);
760 return retval;
761 }
762 EXPORT_SYMBOL_GPL(videobuf_streamon);
763
764 /* Locking: Caller holds q->vb_lock */
__videobuf_streamoff(struct videobuf_queue * q)765 static int __videobuf_streamoff(struct videobuf_queue *q)
766 {
767 if (!q->streaming)
768 return -EINVAL;
769
770 videobuf_queue_cancel(q);
771
772 return 0;
773 }
774
videobuf_streamoff(struct videobuf_queue * q)775 int videobuf_streamoff(struct videobuf_queue *q)
776 {
777 int retval;
778
779 videobuf_queue_lock(q);
780 retval = __videobuf_streamoff(q);
781 videobuf_queue_unlock(q);
782
783 return retval;
784 }
785 EXPORT_SYMBOL_GPL(videobuf_streamoff);
786
787 /* Locking: Caller holds q->vb_lock */
videobuf_read_zerocopy(struct videobuf_queue * q,char __user * data,size_t count,loff_t * ppos)788 static ssize_t videobuf_read_zerocopy(struct videobuf_queue *q,
789 char __user *data,
790 size_t count, loff_t *ppos)
791 {
792 enum v4l2_field field;
793 unsigned long flags = 0;
794 int retval;
795
796 MAGIC_CHECK(q->int_ops->magic, MAGIC_QTYPE_OPS);
797
798 /* setup stuff */
799 q->read_buf = videobuf_alloc_vb(q);
800 if (NULL == q->read_buf)
801 return -ENOMEM;
802
803 q->read_buf->memory = V4L2_MEMORY_USERPTR;
804 q->read_buf->baddr = (unsigned long)data;
805 q->read_buf->bsize = count;
806
807 field = videobuf_next_field(q);
808 retval = q->ops->buf_prepare(q, q->read_buf, field);
809 if (0 != retval)
810 goto done;
811
812 /* start capture & wait */
813 spin_lock_irqsave(q->irqlock, flags);
814 q->ops->buf_queue(q, q->read_buf);
815 spin_unlock_irqrestore(q->irqlock, flags);
816 retval = videobuf_waiton(q, q->read_buf, 0, 0);
817 if (0 == retval) {
818 CALL(q, sync, q, q->read_buf);
819 if (VIDEOBUF_ERROR == q->read_buf->state)
820 retval = -EIO;
821 else
822 retval = q->read_buf->size;
823 }
824
825 done:
826 /* cleanup */
827 q->ops->buf_release(q, q->read_buf);
828 kfree(q->read_buf);
829 q->read_buf = NULL;
830 return retval;
831 }
832
__videobuf_copy_to_user(struct videobuf_queue * q,struct videobuf_buffer * buf,char __user * data,size_t count,int nonblocking)833 static int __videobuf_copy_to_user(struct videobuf_queue *q,
834 struct videobuf_buffer *buf,
835 char __user *data, size_t count,
836 int nonblocking)
837 {
838 void *vaddr = CALLPTR(q, vaddr, buf);
839
840 /* copy to userspace */
841 if (count > buf->size - q->read_off)
842 count = buf->size - q->read_off;
843
844 if (copy_to_user(data, vaddr + q->read_off, count))
845 return -EFAULT;
846
847 return count;
848 }
849
__videobuf_copy_stream(struct videobuf_queue * q,struct videobuf_buffer * buf,char __user * data,size_t count,size_t pos,int vbihack,int nonblocking)850 static int __videobuf_copy_stream(struct videobuf_queue *q,
851 struct videobuf_buffer *buf,
852 char __user *data, size_t count, size_t pos,
853 int vbihack, int nonblocking)
854 {
855 unsigned int *fc = CALLPTR(q, vaddr, buf);
856
857 if (vbihack) {
858 /* dirty, undocumented hack -- pass the frame counter
859 * within the last four bytes of each vbi data block.
860 * We need that one to maintain backward compatibility
861 * to all vbi decoding software out there ... */
862 fc += (buf->size >> 2) - 1;
863 *fc = buf->field_count >> 1;
864 dprintk(1, "vbihack: %d\n", *fc);
865 }
866
867 /* copy stuff using the common method */
868 count = __videobuf_copy_to_user(q, buf, data, count, nonblocking);
869
870 if ((count == -EFAULT) && (pos == 0))
871 return -EFAULT;
872
873 return count;
874 }
875
videobuf_read_one(struct videobuf_queue * q,char __user * data,size_t count,loff_t * ppos,int nonblocking)876 ssize_t videobuf_read_one(struct videobuf_queue *q,
877 char __user *data, size_t count, loff_t *ppos,
878 int nonblocking)
879 {
880 enum v4l2_field field;
881 unsigned long flags = 0;
882 unsigned size = 0, nbufs = 1;
883 int retval;
884
885 MAGIC_CHECK(q->int_ops->magic, MAGIC_QTYPE_OPS);
886
887 videobuf_queue_lock(q);
888
889 q->ops->buf_setup(q, &nbufs, &size);
890
891 if (NULL == q->read_buf &&
892 count >= size &&
893 !nonblocking) {
894 retval = videobuf_read_zerocopy(q, data, count, ppos);
895 if (retval >= 0 || retval == -EIO)
896 /* ok, all done */
897 goto done;
898 /* fallback to kernel bounce buffer on failures */
899 }
900
901 if (NULL == q->read_buf) {
902 /* need to capture a new frame */
903 retval = -ENOMEM;
904 q->read_buf = videobuf_alloc_vb(q);
905
906 dprintk(1, "video alloc=0x%p\n", q->read_buf);
907 if (NULL == q->read_buf)
908 goto done;
909 q->read_buf->memory = V4L2_MEMORY_USERPTR;
910 q->read_buf->bsize = count; /* preferred size */
911 field = videobuf_next_field(q);
912 retval = q->ops->buf_prepare(q, q->read_buf, field);
913
914 if (0 != retval) {
915 kfree(q->read_buf);
916 q->read_buf = NULL;
917 goto done;
918 }
919
920 spin_lock_irqsave(q->irqlock, flags);
921 q->ops->buf_queue(q, q->read_buf);
922 spin_unlock_irqrestore(q->irqlock, flags);
923
924 q->read_off = 0;
925 }
926
927 /* wait until capture is done */
928 retval = videobuf_waiton(q, q->read_buf, nonblocking, 1);
929 if (0 != retval)
930 goto done;
931
932 CALL(q, sync, q, q->read_buf);
933
934 if (VIDEOBUF_ERROR == q->read_buf->state) {
935 /* catch I/O errors */
936 q->ops->buf_release(q, q->read_buf);
937 kfree(q->read_buf);
938 q->read_buf = NULL;
939 retval = -EIO;
940 goto done;
941 }
942
943 /* Copy to userspace */
944 retval = __videobuf_copy_to_user(q, q->read_buf, data, count, nonblocking);
945 if (retval < 0)
946 goto done;
947
948 q->read_off += retval;
949 if (q->read_off == q->read_buf->size) {
950 /* all data copied, cleanup */
951 q->ops->buf_release(q, q->read_buf);
952 kfree(q->read_buf);
953 q->read_buf = NULL;
954 }
955
956 done:
957 videobuf_queue_unlock(q);
958 return retval;
959 }
960 EXPORT_SYMBOL_GPL(videobuf_read_one);
961
962 /* Locking: Caller holds q->vb_lock */
__videobuf_read_start(struct videobuf_queue * q)963 static int __videobuf_read_start(struct videobuf_queue *q)
964 {
965 enum v4l2_field field;
966 unsigned long flags = 0;
967 unsigned int count = 0, size = 0;
968 int err, i;
969
970 q->ops->buf_setup(q, &count, &size);
971 if (count < 2)
972 count = 2;
973 if (count > VIDEO_MAX_FRAME)
974 count = VIDEO_MAX_FRAME;
975 size = PAGE_ALIGN(size);
976
977 err = __videobuf_mmap_setup(q, count, size, V4L2_MEMORY_USERPTR);
978 if (err < 0)
979 return err;
980
981 count = err;
982
983 for (i = 0; i < count; i++) {
984 field = videobuf_next_field(q);
985 err = q->ops->buf_prepare(q, q->bufs[i], field);
986 if (err)
987 return err;
988 list_add_tail(&q->bufs[i]->stream, &q->stream);
989 }
990 spin_lock_irqsave(q->irqlock, flags);
991 for (i = 0; i < count; i++)
992 q->ops->buf_queue(q, q->bufs[i]);
993 spin_unlock_irqrestore(q->irqlock, flags);
994 q->reading = 1;
995 return 0;
996 }
997
__videobuf_read_stop(struct videobuf_queue * q)998 static void __videobuf_read_stop(struct videobuf_queue *q)
999 {
1000 int i;
1001
1002 videobuf_queue_cancel(q);
1003 __videobuf_free(q);
1004 INIT_LIST_HEAD(&q->stream);
1005 for (i = 0; i < VIDEO_MAX_FRAME; i++) {
1006 if (NULL == q->bufs[i])
1007 continue;
1008 kfree(q->bufs[i]);
1009 q->bufs[i] = NULL;
1010 }
1011 q->read_buf = NULL;
1012 }
1013
videobuf_read_start(struct videobuf_queue * q)1014 int videobuf_read_start(struct videobuf_queue *q)
1015 {
1016 int rc;
1017
1018 videobuf_queue_lock(q);
1019 rc = __videobuf_read_start(q);
1020 videobuf_queue_unlock(q);
1021
1022 return rc;
1023 }
1024 EXPORT_SYMBOL_GPL(videobuf_read_start);
1025
videobuf_read_stop(struct videobuf_queue * q)1026 void videobuf_read_stop(struct videobuf_queue *q)
1027 {
1028 videobuf_queue_lock(q);
1029 __videobuf_read_stop(q);
1030 videobuf_queue_unlock(q);
1031 }
1032 EXPORT_SYMBOL_GPL(videobuf_read_stop);
1033
videobuf_stop(struct videobuf_queue * q)1034 void videobuf_stop(struct videobuf_queue *q)
1035 {
1036 videobuf_queue_lock(q);
1037
1038 if (q->streaming)
1039 __videobuf_streamoff(q);
1040
1041 if (q->reading)
1042 __videobuf_read_stop(q);
1043
1044 videobuf_queue_unlock(q);
1045 }
1046 EXPORT_SYMBOL_GPL(videobuf_stop);
1047
videobuf_read_stream(struct videobuf_queue * q,char __user * data,size_t count,loff_t * ppos,int vbihack,int nonblocking)1048 ssize_t videobuf_read_stream(struct videobuf_queue *q,
1049 char __user *data, size_t count, loff_t *ppos,
1050 int vbihack, int nonblocking)
1051 {
1052 int rc, retval;
1053 unsigned long flags = 0;
1054
1055 MAGIC_CHECK(q->int_ops->magic, MAGIC_QTYPE_OPS);
1056
1057 dprintk(2, "%s\n", __func__);
1058 videobuf_queue_lock(q);
1059 retval = -EBUSY;
1060 if (q->streaming)
1061 goto done;
1062 if (!q->reading) {
1063 retval = __videobuf_read_start(q);
1064 if (retval < 0)
1065 goto done;
1066 }
1067
1068 retval = 0;
1069 while (count > 0) {
1070 /* get / wait for data */
1071 if (NULL == q->read_buf) {
1072 q->read_buf = list_entry(q->stream.next,
1073 struct videobuf_buffer,
1074 stream);
1075 list_del(&q->read_buf->stream);
1076 q->read_off = 0;
1077 }
1078 rc = videobuf_waiton(q, q->read_buf, nonblocking, 1);
1079 if (rc < 0) {
1080 if (0 == retval)
1081 retval = rc;
1082 break;
1083 }
1084
1085 if (q->read_buf->state == VIDEOBUF_DONE) {
1086 rc = __videobuf_copy_stream(q, q->read_buf, data + retval, count,
1087 retval, vbihack, nonblocking);
1088 if (rc < 0) {
1089 retval = rc;
1090 break;
1091 }
1092 retval += rc;
1093 count -= rc;
1094 q->read_off += rc;
1095 } else {
1096 /* some error */
1097 q->read_off = q->read_buf->size;
1098 if (0 == retval)
1099 retval = -EIO;
1100 }
1101
1102 /* requeue buffer when done with copying */
1103 if (q->read_off == q->read_buf->size) {
1104 list_add_tail(&q->read_buf->stream,
1105 &q->stream);
1106 spin_lock_irqsave(q->irqlock, flags);
1107 q->ops->buf_queue(q, q->read_buf);
1108 spin_unlock_irqrestore(q->irqlock, flags);
1109 q->read_buf = NULL;
1110 }
1111 if (retval < 0)
1112 break;
1113 }
1114
1115 done:
1116 videobuf_queue_unlock(q);
1117 return retval;
1118 }
1119 EXPORT_SYMBOL_GPL(videobuf_read_stream);
1120
videobuf_poll_stream(struct file * file,struct videobuf_queue * q,poll_table * wait)1121 __poll_t videobuf_poll_stream(struct file *file,
1122 struct videobuf_queue *q,
1123 poll_table *wait)
1124 {
1125 __poll_t req_events = poll_requested_events(wait);
1126 struct videobuf_buffer *buf = NULL;
1127 __poll_t rc = 0;
1128
1129 videobuf_queue_lock(q);
1130 if (q->streaming) {
1131 if (!list_empty(&q->stream))
1132 buf = list_entry(q->stream.next,
1133 struct videobuf_buffer, stream);
1134 } else if (req_events & (EPOLLIN | EPOLLRDNORM)) {
1135 if (!q->reading)
1136 __videobuf_read_start(q);
1137 if (!q->reading) {
1138 rc = EPOLLERR;
1139 } else if (NULL == q->read_buf) {
1140 q->read_buf = list_entry(q->stream.next,
1141 struct videobuf_buffer,
1142 stream);
1143 list_del(&q->read_buf->stream);
1144 q->read_off = 0;
1145 }
1146 buf = q->read_buf;
1147 }
1148 if (!buf)
1149 rc = EPOLLERR;
1150
1151 if (0 == rc) {
1152 poll_wait(file, &buf->done, wait);
1153 if (buf->state == VIDEOBUF_DONE ||
1154 buf->state == VIDEOBUF_ERROR) {
1155 switch (q->type) {
1156 case V4L2_BUF_TYPE_VIDEO_OUTPUT:
1157 case V4L2_BUF_TYPE_VBI_OUTPUT:
1158 case V4L2_BUF_TYPE_SLICED_VBI_OUTPUT:
1159 case V4L2_BUF_TYPE_SDR_OUTPUT:
1160 rc = EPOLLOUT | EPOLLWRNORM;
1161 break;
1162 default:
1163 rc = EPOLLIN | EPOLLRDNORM;
1164 break;
1165 }
1166 }
1167 }
1168 videobuf_queue_unlock(q);
1169 return rc;
1170 }
1171 EXPORT_SYMBOL_GPL(videobuf_poll_stream);
1172
videobuf_mmap_mapper(struct videobuf_queue * q,struct vm_area_struct * vma)1173 int videobuf_mmap_mapper(struct videobuf_queue *q, struct vm_area_struct *vma)
1174 {
1175 int rc = -EINVAL;
1176 int i;
1177
1178 MAGIC_CHECK(q->int_ops->magic, MAGIC_QTYPE_OPS);
1179
1180 if (!(vma->vm_flags & VM_WRITE) || !(vma->vm_flags & VM_SHARED)) {
1181 dprintk(1, "mmap appl bug: PROT_WRITE and MAP_SHARED are required\n");
1182 return -EINVAL;
1183 }
1184
1185 videobuf_queue_lock(q);
1186 for (i = 0; i < VIDEO_MAX_FRAME; i++) {
1187 struct videobuf_buffer *buf = q->bufs[i];
1188
1189 if (buf && buf->memory == V4L2_MEMORY_MMAP &&
1190 buf->boff == (vma->vm_pgoff << PAGE_SHIFT)) {
1191 rc = CALL(q, mmap_mapper, q, buf, vma);
1192 break;
1193 }
1194 }
1195 videobuf_queue_unlock(q);
1196
1197 return rc;
1198 }
1199 EXPORT_SYMBOL_GPL(videobuf_mmap_mapper);
1200