1 /*
2  * Copyright 2013-2015 Analog Devices Inc.
3  *  Author: Lars-Peter Clausen <lars@metafoo.de>
4  *
5  * Licensed under the GPL-2.
6  */
7 
8 #include <linux/slab.h>
9 #include <linux/kernel.h>
10 #include <linux/module.h>
11 #include <linux/device.h>
12 #include <linux/workqueue.h>
13 #include <linux/mutex.h>
14 #include <linux/sched.h>
15 #include <linux/poll.h>
16 #include <linux/iio/buffer.h>
17 #include <linux/iio/buffer_impl.h>
18 #include <linux/iio/buffer-dma.h>
19 #include <linux/dma-mapping.h>
20 #include <linux/sizes.h>
21 
22 /*
23  * For DMA buffers the storage is sub-divided into so called blocks. Each block
24  * has its own memory buffer. The size of the block is the granularity at which
25  * memory is exchanged between the hardware and the application. Increasing the
26  * basic unit of data exchange from one sample to one block decreases the
27  * management overhead that is associated with each sample. E.g. if we say the
28  * management overhead for one exchange is x and the unit of exchange is one
29  * sample the overhead will be x for each sample. Whereas when using a block
30  * which contains n samples the overhead per sample is reduced to x/n. This
31  * allows to achieve much higher samplerates than what can be sustained with
32  * the one sample approach.
33  *
34  * Blocks are exchanged between the DMA controller and the application via the
35  * means of two queues. The incoming queue and the outgoing queue. Blocks on the
36  * incoming queue are waiting for the DMA controller to pick them up and fill
37  * them with data. Block on the outgoing queue have been filled with data and
38  * are waiting for the application to dequeue them and read the data.
39  *
40  * A block can be in one of the following states:
41  *  * Owned by the application. In this state the application can read data from
42  *    the block.
43  *  * On the incoming list: Blocks on the incoming list are queued up to be
44  *    processed by the DMA controller.
45  *  * Owned by the DMA controller: The DMA controller is processing the block
46  *    and filling it with data.
47  *  * On the outgoing list: Blocks on the outgoing list have been successfully
48  *    processed by the DMA controller and contain data. They can be dequeued by
49  *    the application.
50  *  * Dead: A block that is dead has been marked as to be freed. It might still
51  *    be owned by either the application or the DMA controller at the moment.
52  *    But once they are done processing it instead of going to either the
53  *    incoming or outgoing queue the block will be freed.
54  *
55  * In addition to this blocks are reference counted and the memory associated
56  * with both the block structure as well as the storage memory for the block
57  * will be freed when the last reference to the block is dropped. This means a
58  * block must not be accessed without holding a reference.
59  *
60  * The iio_dma_buffer implementation provides a generic infrastructure for
61  * managing the blocks.
62  *
63  * A driver for a specific piece of hardware that has DMA capabilities need to
64  * implement the submit() callback from the iio_dma_buffer_ops structure. This
65  * callback is supposed to initiate the DMA transfer copying data from the
66  * converter to the memory region of the block. Once the DMA transfer has been
67  * completed the driver must call iio_dma_buffer_block_done() for the completed
68  * block.
69  *
70  * Prior to this it must set the bytes_used field of the block contains
71  * the actual number of bytes in the buffer. Typically this will be equal to the
72  * size of the block, but if the DMA hardware has certain alignment requirements
73  * for the transfer length it might choose to use less than the full size. In
74  * either case it is expected that bytes_used is a multiple of the bytes per
75  * datum, i.e. the block must not contain partial samples.
76  *
77  * The driver must call iio_dma_buffer_block_done() for each block it has
78  * received through its submit_block() callback, even if it does not actually
79  * perform a DMA transfer for the block, e.g. because the buffer was disabled
80  * before the block transfer was started. In this case it should set bytes_used
81  * to 0.
82  *
83  * In addition it is recommended that a driver implements the abort() callback.
84  * It will be called when the buffer is disabled and can be used to cancel
85  * pending and stop active transfers.
86  *
87  * The specific driver implementation should use the default callback
88  * implementations provided by this module for the iio_buffer_access_funcs
89  * struct. It may overload some callbacks with custom variants if the hardware
90  * has special requirements that are not handled by the generic functions. If a
91  * driver chooses to overload a callback it has to ensure that the generic
92  * callback is called from within the custom callback.
93  */
94 
iio_buffer_block_release(struct kref * kref)95 static void iio_buffer_block_release(struct kref *kref)
96 {
97 	struct iio_dma_buffer_block *block = container_of(kref,
98 		struct iio_dma_buffer_block, kref);
99 
100 	WARN_ON(block->state != IIO_BLOCK_STATE_DEAD);
101 
102 	dma_free_coherent(block->queue->dev, PAGE_ALIGN(block->size),
103 					block->vaddr, block->phys_addr);
104 
105 	iio_buffer_put(&block->queue->buffer);
106 	kfree(block);
107 }
108 
iio_buffer_block_get(struct iio_dma_buffer_block * block)109 static void iio_buffer_block_get(struct iio_dma_buffer_block *block)
110 {
111 	kref_get(&block->kref);
112 }
113 
iio_buffer_block_put(struct iio_dma_buffer_block * block)114 static void iio_buffer_block_put(struct iio_dma_buffer_block *block)
115 {
116 	kref_put(&block->kref, iio_buffer_block_release);
117 }
118 
119 /*
120  * dma_free_coherent can sleep, hence we need to take some special care to be
121  * able to drop a reference from an atomic context.
122  */
123 static LIST_HEAD(iio_dma_buffer_dead_blocks);
124 static DEFINE_SPINLOCK(iio_dma_buffer_dead_blocks_lock);
125 
iio_dma_buffer_cleanup_worker(struct work_struct * work)126 static void iio_dma_buffer_cleanup_worker(struct work_struct *work)
127 {
128 	struct iio_dma_buffer_block *block, *_block;
129 	LIST_HEAD(block_list);
130 
131 	spin_lock_irq(&iio_dma_buffer_dead_blocks_lock);
132 	list_splice_tail_init(&iio_dma_buffer_dead_blocks, &block_list);
133 	spin_unlock_irq(&iio_dma_buffer_dead_blocks_lock);
134 
135 	list_for_each_entry_safe(block, _block, &block_list, head)
136 		iio_buffer_block_release(&block->kref);
137 }
138 static DECLARE_WORK(iio_dma_buffer_cleanup_work, iio_dma_buffer_cleanup_worker);
139 
iio_buffer_block_release_atomic(struct kref * kref)140 static void iio_buffer_block_release_atomic(struct kref *kref)
141 {
142 	struct iio_dma_buffer_block *block;
143 	unsigned long flags;
144 
145 	block = container_of(kref, struct iio_dma_buffer_block, kref);
146 
147 	spin_lock_irqsave(&iio_dma_buffer_dead_blocks_lock, flags);
148 	list_add_tail(&block->head, &iio_dma_buffer_dead_blocks);
149 	spin_unlock_irqrestore(&iio_dma_buffer_dead_blocks_lock, flags);
150 
151 	schedule_work(&iio_dma_buffer_cleanup_work);
152 }
153 
154 /*
155  * Version of iio_buffer_block_put() that can be called from atomic context
156  */
iio_buffer_block_put_atomic(struct iio_dma_buffer_block * block)157 static void iio_buffer_block_put_atomic(struct iio_dma_buffer_block *block)
158 {
159 	kref_put(&block->kref, iio_buffer_block_release_atomic);
160 }
161 
iio_buffer_to_queue(struct iio_buffer * buf)162 static struct iio_dma_buffer_queue *iio_buffer_to_queue(struct iio_buffer *buf)
163 {
164 	return container_of(buf, struct iio_dma_buffer_queue, buffer);
165 }
166 
iio_dma_buffer_alloc_block(struct iio_dma_buffer_queue * queue,size_t size)167 static struct iio_dma_buffer_block *iio_dma_buffer_alloc_block(
168 	struct iio_dma_buffer_queue *queue, size_t size)
169 {
170 	struct iio_dma_buffer_block *block;
171 
172 	block = kzalloc(sizeof(*block), GFP_KERNEL);
173 	if (!block)
174 		return NULL;
175 
176 	block->vaddr = dma_alloc_coherent(queue->dev, PAGE_ALIGN(size),
177 		&block->phys_addr, GFP_KERNEL);
178 	if (!block->vaddr) {
179 		kfree(block);
180 		return NULL;
181 	}
182 
183 	block->size = size;
184 	block->state = IIO_BLOCK_STATE_DEQUEUED;
185 	block->queue = queue;
186 	INIT_LIST_HEAD(&block->head);
187 	kref_init(&block->kref);
188 
189 	iio_buffer_get(&queue->buffer);
190 
191 	return block;
192 }
193 
_iio_dma_buffer_block_done(struct iio_dma_buffer_block * block)194 static void _iio_dma_buffer_block_done(struct iio_dma_buffer_block *block)
195 {
196 	struct iio_dma_buffer_queue *queue = block->queue;
197 
198 	/*
199 	 * The buffer has already been freed by the application, just drop the
200 	 * reference.
201 	 */
202 	if (block->state != IIO_BLOCK_STATE_DEAD) {
203 		block->state = IIO_BLOCK_STATE_DONE;
204 		list_add_tail(&block->head, &queue->outgoing);
205 	}
206 }
207 
208 /**
209  * iio_dma_buffer_block_done() - Indicate that a block has been completed
210  * @block: The completed block
211  *
212  * Should be called when the DMA controller has finished handling the block to
213  * pass back ownership of the block to the queue.
214  */
iio_dma_buffer_block_done(struct iio_dma_buffer_block * block)215 void iio_dma_buffer_block_done(struct iio_dma_buffer_block *block)
216 {
217 	struct iio_dma_buffer_queue *queue = block->queue;
218 	unsigned long flags;
219 
220 	spin_lock_irqsave(&queue->list_lock, flags);
221 	_iio_dma_buffer_block_done(block);
222 	spin_unlock_irqrestore(&queue->list_lock, flags);
223 
224 	iio_buffer_block_put_atomic(block);
225 	wake_up_interruptible_poll(&queue->buffer.pollq, EPOLLIN | EPOLLRDNORM);
226 }
227 EXPORT_SYMBOL_GPL(iio_dma_buffer_block_done);
228 
229 /**
230  * iio_dma_buffer_block_list_abort() - Indicate that a list block has been
231  *   aborted
232  * @queue: Queue for which to complete blocks.
233  * @list: List of aborted blocks. All blocks in this list must be from @queue.
234  *
235  * Typically called from the abort() callback after the DMA controller has been
236  * stopped. This will set bytes_used to 0 for each block in the list and then
237  * hand the blocks back to the queue.
238  */
iio_dma_buffer_block_list_abort(struct iio_dma_buffer_queue * queue,struct list_head * list)239 void iio_dma_buffer_block_list_abort(struct iio_dma_buffer_queue *queue,
240 	struct list_head *list)
241 {
242 	struct iio_dma_buffer_block *block, *_block;
243 	unsigned long flags;
244 
245 	spin_lock_irqsave(&queue->list_lock, flags);
246 	list_for_each_entry_safe(block, _block, list, head) {
247 		list_del(&block->head);
248 		block->bytes_used = 0;
249 		_iio_dma_buffer_block_done(block);
250 		iio_buffer_block_put_atomic(block);
251 	}
252 	spin_unlock_irqrestore(&queue->list_lock, flags);
253 
254 	wake_up_interruptible_poll(&queue->buffer.pollq, EPOLLIN | EPOLLRDNORM);
255 }
256 EXPORT_SYMBOL_GPL(iio_dma_buffer_block_list_abort);
257 
iio_dma_block_reusable(struct iio_dma_buffer_block * block)258 static bool iio_dma_block_reusable(struct iio_dma_buffer_block *block)
259 {
260 	/*
261 	 * If the core owns the block it can be re-used. This should be the
262 	 * default case when enabling the buffer, unless the DMA controller does
263 	 * not support abort and has not given back the block yet.
264 	 */
265 	switch (block->state) {
266 	case IIO_BLOCK_STATE_DEQUEUED:
267 	case IIO_BLOCK_STATE_QUEUED:
268 	case IIO_BLOCK_STATE_DONE:
269 		return true;
270 	default:
271 		return false;
272 	}
273 }
274 
275 /**
276  * iio_dma_buffer_request_update() - DMA buffer request_update callback
277  * @buffer: The buffer which to request an update
278  *
279  * Should be used as the iio_dma_buffer_request_update() callback for
280  * iio_buffer_access_ops struct for DMA buffers.
281  */
iio_dma_buffer_request_update(struct iio_buffer * buffer)282 int iio_dma_buffer_request_update(struct iio_buffer *buffer)
283 {
284 	struct iio_dma_buffer_queue *queue = iio_buffer_to_queue(buffer);
285 	struct iio_dma_buffer_block *block;
286 	bool try_reuse = false;
287 	size_t size;
288 	int ret = 0;
289 	int i;
290 
291 	/*
292 	 * Split the buffer into two even parts. This is used as a double
293 	 * buffering scheme with usually one block at a time being used by the
294 	 * DMA and the other one by the application.
295 	 */
296 	size = DIV_ROUND_UP(queue->buffer.bytes_per_datum *
297 		queue->buffer.length, 2);
298 
299 	mutex_lock(&queue->lock);
300 
301 	/* Allocations are page aligned */
302 	if (PAGE_ALIGN(queue->fileio.block_size) == PAGE_ALIGN(size))
303 		try_reuse = true;
304 
305 	queue->fileio.block_size = size;
306 	queue->fileio.active_block = NULL;
307 
308 	spin_lock_irq(&queue->list_lock);
309 	for (i = 0; i < ARRAY_SIZE(queue->fileio.blocks); i++) {
310 		block = queue->fileio.blocks[i];
311 
312 		/* If we can't re-use it free it */
313 		if (block && (!iio_dma_block_reusable(block) || !try_reuse))
314 			block->state = IIO_BLOCK_STATE_DEAD;
315 	}
316 
317 	/*
318 	 * At this point all blocks are either owned by the core or marked as
319 	 * dead. This means we can reset the lists without having to fear
320 	 * corrution.
321 	 */
322 	INIT_LIST_HEAD(&queue->outgoing);
323 	spin_unlock_irq(&queue->list_lock);
324 
325 	INIT_LIST_HEAD(&queue->incoming);
326 
327 	for (i = 0; i < ARRAY_SIZE(queue->fileio.blocks); i++) {
328 		if (queue->fileio.blocks[i]) {
329 			block = queue->fileio.blocks[i];
330 			if (block->state == IIO_BLOCK_STATE_DEAD) {
331 				/* Could not reuse it */
332 				iio_buffer_block_put(block);
333 				block = NULL;
334 			} else {
335 				block->size = size;
336 			}
337 		} else {
338 			block = NULL;
339 		}
340 
341 		if (!block) {
342 			block = iio_dma_buffer_alloc_block(queue, size);
343 			if (!block) {
344 				ret = -ENOMEM;
345 				goto out_unlock;
346 			}
347 			queue->fileio.blocks[i] = block;
348 		}
349 
350 		block->state = IIO_BLOCK_STATE_QUEUED;
351 		list_add_tail(&block->head, &queue->incoming);
352 	}
353 
354 out_unlock:
355 	mutex_unlock(&queue->lock);
356 
357 	return ret;
358 }
359 EXPORT_SYMBOL_GPL(iio_dma_buffer_request_update);
360 
iio_dma_buffer_submit_block(struct iio_dma_buffer_queue * queue,struct iio_dma_buffer_block * block)361 static void iio_dma_buffer_submit_block(struct iio_dma_buffer_queue *queue,
362 	struct iio_dma_buffer_block *block)
363 {
364 	int ret;
365 
366 	/*
367 	 * If the hardware has already been removed we put the block into
368 	 * limbo. It will neither be on the incoming nor outgoing list, nor will
369 	 * it ever complete. It will just wait to be freed eventually.
370 	 */
371 	if (!queue->ops)
372 		return;
373 
374 	block->state = IIO_BLOCK_STATE_ACTIVE;
375 	iio_buffer_block_get(block);
376 	ret = queue->ops->submit(queue, block);
377 	if (ret) {
378 		/*
379 		 * This is a bit of a problem and there is not much we can do
380 		 * other then wait for the buffer to be disabled and re-enabled
381 		 * and try again. But it should not really happen unless we run
382 		 * out of memory or something similar.
383 		 *
384 		 * TODO: Implement support in the IIO core to allow buffers to
385 		 * notify consumers that something went wrong and the buffer
386 		 * should be disabled.
387 		 */
388 		iio_buffer_block_put(block);
389 	}
390 }
391 
392 /**
393  * iio_dma_buffer_enable() - Enable DMA buffer
394  * @buffer: IIO buffer to enable
395  * @indio_dev: IIO device the buffer is attached to
396  *
397  * Needs to be called when the device that the buffer is attached to starts
398  * sampling. Typically should be the iio_buffer_access_ops enable callback.
399  *
400  * This will allocate the DMA buffers and start the DMA transfers.
401  */
iio_dma_buffer_enable(struct iio_buffer * buffer,struct iio_dev * indio_dev)402 int iio_dma_buffer_enable(struct iio_buffer *buffer,
403 	struct iio_dev *indio_dev)
404 {
405 	struct iio_dma_buffer_queue *queue = iio_buffer_to_queue(buffer);
406 	struct iio_dma_buffer_block *block, *_block;
407 
408 	mutex_lock(&queue->lock);
409 	queue->active = true;
410 	list_for_each_entry_safe(block, _block, &queue->incoming, head) {
411 		list_del(&block->head);
412 		iio_dma_buffer_submit_block(queue, block);
413 	}
414 	mutex_unlock(&queue->lock);
415 
416 	return 0;
417 }
418 EXPORT_SYMBOL_GPL(iio_dma_buffer_enable);
419 
420 /**
421  * iio_dma_buffer_disable() - Disable DMA buffer
422  * @buffer: IIO DMA buffer to disable
423  * @indio_dev: IIO device the buffer is attached to
424  *
425  * Needs to be called when the device that the buffer is attached to stops
426  * sampling. Typically should be the iio_buffer_access_ops disable callback.
427  */
iio_dma_buffer_disable(struct iio_buffer * buffer,struct iio_dev * indio_dev)428 int iio_dma_buffer_disable(struct iio_buffer *buffer,
429 	struct iio_dev *indio_dev)
430 {
431 	struct iio_dma_buffer_queue *queue = iio_buffer_to_queue(buffer);
432 
433 	mutex_lock(&queue->lock);
434 	queue->active = false;
435 
436 	if (queue->ops && queue->ops->abort)
437 		queue->ops->abort(queue);
438 	mutex_unlock(&queue->lock);
439 
440 	return 0;
441 }
442 EXPORT_SYMBOL_GPL(iio_dma_buffer_disable);
443 
iio_dma_buffer_enqueue(struct iio_dma_buffer_queue * queue,struct iio_dma_buffer_block * block)444 static void iio_dma_buffer_enqueue(struct iio_dma_buffer_queue *queue,
445 	struct iio_dma_buffer_block *block)
446 {
447 	if (block->state == IIO_BLOCK_STATE_DEAD) {
448 		iio_buffer_block_put(block);
449 	} else if (queue->active) {
450 		iio_dma_buffer_submit_block(queue, block);
451 	} else {
452 		block->state = IIO_BLOCK_STATE_QUEUED;
453 		list_add_tail(&block->head, &queue->incoming);
454 	}
455 }
456 
iio_dma_buffer_dequeue(struct iio_dma_buffer_queue * queue)457 static struct iio_dma_buffer_block *iio_dma_buffer_dequeue(
458 	struct iio_dma_buffer_queue *queue)
459 {
460 	struct iio_dma_buffer_block *block;
461 
462 	spin_lock_irq(&queue->list_lock);
463 	block = list_first_entry_or_null(&queue->outgoing, struct
464 		iio_dma_buffer_block, head);
465 	if (block != NULL) {
466 		list_del(&block->head);
467 		block->state = IIO_BLOCK_STATE_DEQUEUED;
468 	}
469 	spin_unlock_irq(&queue->list_lock);
470 
471 	return block;
472 }
473 
474 /**
475  * iio_dma_buffer_read() - DMA buffer read callback
476  * @buffer: Buffer to read form
477  * @n: Number of bytes to read
478  * @user_buffer: Userspace buffer to copy the data to
479  *
480  * Should be used as the read_first_n callback for iio_buffer_access_ops
481  * struct for DMA buffers.
482  */
iio_dma_buffer_read(struct iio_buffer * buffer,size_t n,char __user * user_buffer)483 int iio_dma_buffer_read(struct iio_buffer *buffer, size_t n,
484 	char __user *user_buffer)
485 {
486 	struct iio_dma_buffer_queue *queue = iio_buffer_to_queue(buffer);
487 	struct iio_dma_buffer_block *block;
488 	int ret;
489 
490 	if (n < buffer->bytes_per_datum)
491 		return -EINVAL;
492 
493 	mutex_lock(&queue->lock);
494 
495 	if (!queue->fileio.active_block) {
496 		block = iio_dma_buffer_dequeue(queue);
497 		if (block == NULL) {
498 			ret = 0;
499 			goto out_unlock;
500 		}
501 		queue->fileio.pos = 0;
502 		queue->fileio.active_block = block;
503 	} else {
504 		block = queue->fileio.active_block;
505 	}
506 
507 	n = rounddown(n, buffer->bytes_per_datum);
508 	if (n > block->bytes_used - queue->fileio.pos)
509 		n = block->bytes_used - queue->fileio.pos;
510 
511 	if (copy_to_user(user_buffer, block->vaddr + queue->fileio.pos, n)) {
512 		ret = -EFAULT;
513 		goto out_unlock;
514 	}
515 
516 	queue->fileio.pos += n;
517 
518 	if (queue->fileio.pos == block->bytes_used) {
519 		queue->fileio.active_block = NULL;
520 		iio_dma_buffer_enqueue(queue, block);
521 	}
522 
523 	ret = n;
524 
525 out_unlock:
526 	mutex_unlock(&queue->lock);
527 
528 	return ret;
529 }
530 EXPORT_SYMBOL_GPL(iio_dma_buffer_read);
531 
532 /**
533  * iio_dma_buffer_data_available() - DMA buffer data_available callback
534  * @buf: Buffer to check for data availability
535  *
536  * Should be used as the data_available callback for iio_buffer_access_ops
537  * struct for DMA buffers.
538  */
iio_dma_buffer_data_available(struct iio_buffer * buf)539 size_t iio_dma_buffer_data_available(struct iio_buffer *buf)
540 {
541 	struct iio_dma_buffer_queue *queue = iio_buffer_to_queue(buf);
542 	struct iio_dma_buffer_block *block;
543 	size_t data_available = 0;
544 
545 	/*
546 	 * For counting the available bytes we'll use the size of the block not
547 	 * the number of actual bytes available in the block. Otherwise it is
548 	 * possible that we end up with a value that is lower than the watermark
549 	 * but won't increase since all blocks are in use.
550 	 */
551 
552 	mutex_lock(&queue->lock);
553 	if (queue->fileio.active_block)
554 		data_available += queue->fileio.active_block->size;
555 
556 	spin_lock_irq(&queue->list_lock);
557 	list_for_each_entry(block, &queue->outgoing, head)
558 		data_available += block->size;
559 	spin_unlock_irq(&queue->list_lock);
560 	mutex_unlock(&queue->lock);
561 
562 	return data_available;
563 }
564 EXPORT_SYMBOL_GPL(iio_dma_buffer_data_available);
565 
566 /**
567  * iio_dma_buffer_set_bytes_per_datum() - DMA buffer set_bytes_per_datum callback
568  * @buffer: Buffer to set the bytes-per-datum for
569  * @bpd: The new bytes-per-datum value
570  *
571  * Should be used as the set_bytes_per_datum callback for iio_buffer_access_ops
572  * struct for DMA buffers.
573  */
iio_dma_buffer_set_bytes_per_datum(struct iio_buffer * buffer,size_t bpd)574 int iio_dma_buffer_set_bytes_per_datum(struct iio_buffer *buffer, size_t bpd)
575 {
576 	buffer->bytes_per_datum = bpd;
577 
578 	return 0;
579 }
580 EXPORT_SYMBOL_GPL(iio_dma_buffer_set_bytes_per_datum);
581 
582 /**
583  * iio_dma_buffer_set_length - DMA buffer set_length callback
584  * @buffer: Buffer to set the length for
585  * @length: The new buffer length
586  *
587  * Should be used as the set_length callback for iio_buffer_access_ops
588  * struct for DMA buffers.
589  */
iio_dma_buffer_set_length(struct iio_buffer * buffer,unsigned int length)590 int iio_dma_buffer_set_length(struct iio_buffer *buffer, unsigned int length)
591 {
592 	/* Avoid an invalid state */
593 	if (length < 2)
594 		length = 2;
595 	buffer->length = length;
596 	buffer->watermark = length / 2;
597 
598 	return 0;
599 }
600 EXPORT_SYMBOL_GPL(iio_dma_buffer_set_length);
601 
602 /**
603  * iio_dma_buffer_init() - Initialize DMA buffer queue
604  * @queue: Buffer to initialize
605  * @dev: DMA device
606  * @ops: DMA buffer queue callback operations
607  *
608  * The DMA device will be used by the queue to do DMA memory allocations. So it
609  * should refer to the device that will perform the DMA to ensure that
610  * allocations are done from a memory region that can be accessed by the device.
611  */
iio_dma_buffer_init(struct iio_dma_buffer_queue * queue,struct device * dev,const struct iio_dma_buffer_ops * ops)612 int iio_dma_buffer_init(struct iio_dma_buffer_queue *queue,
613 	struct device *dev, const struct iio_dma_buffer_ops *ops)
614 {
615 	iio_buffer_init(&queue->buffer);
616 	queue->buffer.length = PAGE_SIZE;
617 	queue->buffer.watermark = queue->buffer.length / 2;
618 	queue->dev = dev;
619 	queue->ops = ops;
620 
621 	INIT_LIST_HEAD(&queue->incoming);
622 	INIT_LIST_HEAD(&queue->outgoing);
623 
624 	mutex_init(&queue->lock);
625 	spin_lock_init(&queue->list_lock);
626 
627 	return 0;
628 }
629 EXPORT_SYMBOL_GPL(iio_dma_buffer_init);
630 
631 /**
632  * iio_dma_buffer_exit() - Cleanup DMA buffer queue
633  * @queue: Buffer to cleanup
634  *
635  * After this function has completed it is safe to free any resources that are
636  * associated with the buffer and are accessed inside the callback operations.
637  */
iio_dma_buffer_exit(struct iio_dma_buffer_queue * queue)638 void iio_dma_buffer_exit(struct iio_dma_buffer_queue *queue)
639 {
640 	unsigned int i;
641 
642 	mutex_lock(&queue->lock);
643 
644 	spin_lock_irq(&queue->list_lock);
645 	for (i = 0; i < ARRAY_SIZE(queue->fileio.blocks); i++) {
646 		if (!queue->fileio.blocks[i])
647 			continue;
648 		queue->fileio.blocks[i]->state = IIO_BLOCK_STATE_DEAD;
649 	}
650 	INIT_LIST_HEAD(&queue->outgoing);
651 	spin_unlock_irq(&queue->list_lock);
652 
653 	INIT_LIST_HEAD(&queue->incoming);
654 
655 	for (i = 0; i < ARRAY_SIZE(queue->fileio.blocks); i++) {
656 		if (!queue->fileio.blocks[i])
657 			continue;
658 		iio_buffer_block_put(queue->fileio.blocks[i]);
659 		queue->fileio.blocks[i] = NULL;
660 	}
661 	queue->fileio.active_block = NULL;
662 	queue->ops = NULL;
663 
664 	mutex_unlock(&queue->lock);
665 }
666 EXPORT_SYMBOL_GPL(iio_dma_buffer_exit);
667 
668 /**
669  * iio_dma_buffer_release() - Release final buffer resources
670  * @queue: Buffer to release
671  *
672  * Frees resources that can't yet be freed in iio_dma_buffer_exit(). Should be
673  * called in the buffers release callback implementation right before freeing
674  * the memory associated with the buffer.
675  */
iio_dma_buffer_release(struct iio_dma_buffer_queue * queue)676 void iio_dma_buffer_release(struct iio_dma_buffer_queue *queue)
677 {
678 	mutex_destroy(&queue->lock);
679 }
680 EXPORT_SYMBOL_GPL(iio_dma_buffer_release);
681 
682 MODULE_AUTHOR("Lars-Peter Clausen <lars@metafoo.de>");
683 MODULE_DESCRIPTION("DMA buffer for the IIO framework");
684 MODULE_LICENSE("GPL v2");
685