1 /*
2  * Copyright 2013-2015 Analog Devices Inc.
3  *  Author: Lars-Peter Clausen <lars@metafoo.de>
4  *
5  * Licensed under the GPL-2.
6  */
7 
8 #ifndef __INDUSTRIALIO_DMA_BUFFER_H__
9 #define __INDUSTRIALIO_DMA_BUFFER_H__
10 
11 #include <linux/list.h>
12 #include <linux/kref.h>
13 #include <linux/spinlock.h>
14 #include <linux/mutex.h>
15 #include <linux/iio/buffer.h>
16 
17 struct iio_dma_buffer_queue;
18 struct iio_dma_buffer_ops;
19 struct device;
20 
21 struct iio_buffer_block {
22 	u32 size;
23 	u32 bytes_used;
24 };
25 
26 /**
27  * enum iio_block_state - State of a struct iio_dma_buffer_block
28  * @IIO_BLOCK_STATE_DEQUEUED: Block is not queued
29  * @IIO_BLOCK_STATE_QUEUED: Block is on the incoming queue
30  * @IIO_BLOCK_STATE_ACTIVE: Block is currently being processed by the DMA
31  * @IIO_BLOCK_STATE_DONE: Block is on the outgoing queue
32  * @IIO_BLOCK_STATE_DEAD: Block has been marked as to be freed
33  */
34 enum iio_block_state {
35 	IIO_BLOCK_STATE_DEQUEUED,
36 	IIO_BLOCK_STATE_QUEUED,
37 	IIO_BLOCK_STATE_ACTIVE,
38 	IIO_BLOCK_STATE_DONE,
39 	IIO_BLOCK_STATE_DEAD,
40 };
41 
42 /**
43  * struct iio_dma_buffer_block - IIO buffer block
44  * @head: List head
45  * @size: Total size of the block in bytes
46  * @bytes_used: Number of bytes that contain valid data
47  * @vaddr: Virutal address of the blocks memory
48  * @phys_addr: Physical address of the blocks memory
49  * @queue: Parent DMA buffer queue
50  * @kref: kref used to manage the lifetime of block
51  * @state: Current state of the block
52  */
53 struct iio_dma_buffer_block {
54 	/* May only be accessed by the owner of the block */
55 	struct list_head head;
56 	size_t bytes_used;
57 
58 	/*
59 	 * Set during allocation, constant thereafter. May be accessed read-only
60 	 * by anybody holding a reference to the block.
61 	 */
62 	void *vaddr;
63 	dma_addr_t phys_addr;
64 	size_t size;
65 	struct iio_dma_buffer_queue *queue;
66 
67 	/* Must not be accessed outside the core. */
68 	struct kref kref;
69 	/*
70 	 * Must not be accessed outside the core. Access needs to hold
71 	 * queue->list_lock if the block is not owned by the core.
72 	 */
73 	enum iio_block_state state;
74 };
75 
76 /**
77  * struct iio_dma_buffer_queue_fileio - FileIO state for the DMA buffer
78  * @blocks: Buffer blocks used for fileio
79  * @active_block: Block being used in read()
80  * @pos: Read offset in the active block
81  * @block_size: Size of each block
82  */
83 struct iio_dma_buffer_queue_fileio {
84 	struct iio_dma_buffer_block *blocks[2];
85 	struct iio_dma_buffer_block *active_block;
86 	size_t pos;
87 	size_t block_size;
88 };
89 
90 /**
91  * struct iio_dma_buffer_queue - DMA buffer base structure
92  * @buffer: IIO buffer base structure
93  * @dev: Parent device
94  * @ops: DMA buffer callbacks
95  * @lock: Protects the incoming list, active and the fields in the fileio
96  *   substruct
97  * @list_lock: Protects lists that contain blocks which can be modified in
98  *   atomic context as well as blocks on those lists. This is the outgoing queue
99  *   list and typically also a list of active blocks in the part that handles
100  *   the DMA controller
101  * @incoming: List of buffers on the incoming queue
102  * @outgoing: List of buffers on the outgoing queue
103  * @active: Whether the buffer is currently active
104  * @fileio: FileIO state
105  */
106 struct iio_dma_buffer_queue {
107 	struct iio_buffer buffer;
108 	struct device *dev;
109 	const struct iio_dma_buffer_ops *ops;
110 
111 	struct mutex lock;
112 	spinlock_t list_lock;
113 	struct list_head incoming;
114 	struct list_head outgoing;
115 
116 	bool active;
117 
118 	struct iio_dma_buffer_queue_fileio fileio;
119 };
120 
121 /**
122  * struct iio_dma_buffer_ops - DMA buffer callback operations
123  * @submit: Called when a block is submitted to the DMA controller
124  * @abort: Should abort all pending transfers
125  */
126 struct iio_dma_buffer_ops {
127 	int (*submit)(struct iio_dma_buffer_queue *queue,
128 		struct iio_dma_buffer_block *block);
129 	void (*abort)(struct iio_dma_buffer_queue *queue);
130 };
131 
132 void iio_dma_buffer_block_done(struct iio_dma_buffer_block *block);
133 void iio_dma_buffer_block_list_abort(struct iio_dma_buffer_queue *queue,
134 	struct list_head *list);
135 
136 int iio_dma_buffer_enable(struct iio_buffer *buffer,
137 	struct iio_dev *indio_dev);
138 int iio_dma_buffer_disable(struct iio_buffer *buffer,
139 	struct iio_dev *indio_dev);
140 int iio_dma_buffer_read(struct iio_buffer *buffer, size_t n,
141 	char __user *user_buffer);
142 size_t iio_dma_buffer_data_available(struct iio_buffer *buffer);
143 int iio_dma_buffer_set_bytes_per_datum(struct iio_buffer *buffer, size_t bpd);
144 int iio_dma_buffer_set_length(struct iio_buffer *buffer, unsigned int length);
145 int iio_dma_buffer_request_update(struct iio_buffer *buffer);
146 
147 int iio_dma_buffer_init(struct iio_dma_buffer_queue *queue,
148 	struct device *dma_dev, const struct iio_dma_buffer_ops *ops);
149 void iio_dma_buffer_exit(struct iio_dma_buffer_queue *queue);
150 void iio_dma_buffer_release(struct iio_dma_buffer_queue *queue);
151 
152 #endif
153