1 /*
2 * ISHTP Ring Buffers
3 *
4 * Copyright (c) 2003-2016, Intel Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 */
16
17 #include <linux/slab.h>
18 #include "client.h"
19
20 /**
21 * ishtp_cl_alloc_rx_ring() - Allocate RX ring buffers
22 * @cl: client device instance
23 *
24 * Allocate and initialize RX ring buffers
25 *
26 * Return: 0 on success else -ENOMEM
27 */
ishtp_cl_alloc_rx_ring(struct ishtp_cl * cl)28 int ishtp_cl_alloc_rx_ring(struct ishtp_cl *cl)
29 {
30 size_t len = cl->device->fw_client->props.max_msg_length;
31 int j;
32 struct ishtp_cl_rb *rb;
33 int ret = 0;
34 unsigned long flags;
35
36 for (j = 0; j < cl->rx_ring_size; ++j) {
37 rb = ishtp_io_rb_init(cl);
38 if (!rb) {
39 ret = -ENOMEM;
40 goto out;
41 }
42 ret = ishtp_io_rb_alloc_buf(rb, len);
43 if (ret)
44 goto out;
45 spin_lock_irqsave(&cl->free_list_spinlock, flags);
46 list_add_tail(&rb->list, &cl->free_rb_list.list);
47 spin_unlock_irqrestore(&cl->free_list_spinlock, flags);
48 }
49
50 return 0;
51
52 out:
53 dev_err(&cl->device->dev, "error in allocating Rx buffers\n");
54 ishtp_cl_free_rx_ring(cl);
55 return ret;
56 }
57
58 /**
59 * ishtp_cl_alloc_tx_ring() - Allocate TX ring buffers
60 * @cl: client device instance
61 *
62 * Allocate and initialize TX ring buffers
63 *
64 * Return: 0 on success else -ENOMEM
65 */
ishtp_cl_alloc_tx_ring(struct ishtp_cl * cl)66 int ishtp_cl_alloc_tx_ring(struct ishtp_cl *cl)
67 {
68 size_t len = cl->device->fw_client->props.max_msg_length;
69 int j;
70 unsigned long flags;
71
72 /* Allocate pool to free Tx bufs */
73 for (j = 0; j < cl->tx_ring_size; ++j) {
74 struct ishtp_cl_tx_ring *tx_buf;
75
76 tx_buf = kzalloc(sizeof(struct ishtp_cl_tx_ring), GFP_KERNEL);
77 if (!tx_buf)
78 goto out;
79
80 tx_buf->send_buf.data = kmalloc(len, GFP_KERNEL);
81 if (!tx_buf->send_buf.data) {
82 kfree(tx_buf);
83 goto out;
84 }
85
86 spin_lock_irqsave(&cl->tx_free_list_spinlock, flags);
87 list_add_tail(&tx_buf->list, &cl->tx_free_list.list);
88 spin_unlock_irqrestore(&cl->tx_free_list_spinlock, flags);
89 }
90 return 0;
91 out:
92 dev_err(&cl->device->dev, "error in allocating Tx pool\n");
93 ishtp_cl_free_tx_ring(cl);
94 return -ENOMEM;
95 }
96
97 /**
98 * ishtp_cl_free_rx_ring() - Free RX ring buffers
99 * @cl: client device instance
100 *
101 * Free RX ring buffers
102 */
ishtp_cl_free_rx_ring(struct ishtp_cl * cl)103 void ishtp_cl_free_rx_ring(struct ishtp_cl *cl)
104 {
105 struct ishtp_cl_rb *rb;
106 unsigned long flags;
107
108 /* release allocated memory - pass over free_rb_list */
109 spin_lock_irqsave(&cl->free_list_spinlock, flags);
110 while (!list_empty(&cl->free_rb_list.list)) {
111 rb = list_entry(cl->free_rb_list.list.next, struct ishtp_cl_rb,
112 list);
113 list_del(&rb->list);
114 kfree(rb->buffer.data);
115 kfree(rb);
116 }
117 spin_unlock_irqrestore(&cl->free_list_spinlock, flags);
118 /* release allocated memory - pass over in_process_list */
119 spin_lock_irqsave(&cl->in_process_spinlock, flags);
120 while (!list_empty(&cl->in_process_list.list)) {
121 rb = list_entry(cl->in_process_list.list.next,
122 struct ishtp_cl_rb, list);
123 list_del(&rb->list);
124 kfree(rb->buffer.data);
125 kfree(rb);
126 }
127 spin_unlock_irqrestore(&cl->in_process_spinlock, flags);
128 }
129
130 /**
131 * ishtp_cl_free_tx_ring() - Free TX ring buffers
132 * @cl: client device instance
133 *
134 * Free TX ring buffers
135 */
ishtp_cl_free_tx_ring(struct ishtp_cl * cl)136 void ishtp_cl_free_tx_ring(struct ishtp_cl *cl)
137 {
138 struct ishtp_cl_tx_ring *tx_buf;
139 unsigned long flags;
140
141 spin_lock_irqsave(&cl->tx_free_list_spinlock, flags);
142 /* release allocated memory - pass over tx_free_list */
143 while (!list_empty(&cl->tx_free_list.list)) {
144 tx_buf = list_entry(cl->tx_free_list.list.next,
145 struct ishtp_cl_tx_ring, list);
146 list_del(&tx_buf->list);
147 kfree(tx_buf->send_buf.data);
148 kfree(tx_buf);
149 }
150 spin_unlock_irqrestore(&cl->tx_free_list_spinlock, flags);
151
152 spin_lock_irqsave(&cl->tx_list_spinlock, flags);
153 /* release allocated memory - pass over tx_list */
154 while (!list_empty(&cl->tx_list.list)) {
155 tx_buf = list_entry(cl->tx_list.list.next,
156 struct ishtp_cl_tx_ring, list);
157 list_del(&tx_buf->list);
158 kfree(tx_buf->send_buf.data);
159 kfree(tx_buf);
160 }
161 spin_unlock_irqrestore(&cl->tx_list_spinlock, flags);
162 }
163
164 /**
165 * ishtp_io_rb_free() - Free IO request block
166 * @rb: IO request block
167 *
168 * Free io request block memory
169 */
ishtp_io_rb_free(struct ishtp_cl_rb * rb)170 void ishtp_io_rb_free(struct ishtp_cl_rb *rb)
171 {
172 if (rb == NULL)
173 return;
174
175 kfree(rb->buffer.data);
176 kfree(rb);
177 }
178
179 /**
180 * ishtp_io_rb_init() - Allocate and init IO request block
181 * @cl: client device instance
182 *
183 * Allocate and initialize request block
184 *
185 * Return: Allocted IO request block pointer
186 */
ishtp_io_rb_init(struct ishtp_cl * cl)187 struct ishtp_cl_rb *ishtp_io_rb_init(struct ishtp_cl *cl)
188 {
189 struct ishtp_cl_rb *rb;
190
191 rb = kzalloc(sizeof(struct ishtp_cl_rb), GFP_KERNEL);
192 if (!rb)
193 return NULL;
194
195 INIT_LIST_HEAD(&rb->list);
196 rb->cl = cl;
197 rb->buf_idx = 0;
198 return rb;
199 }
200
201 /**
202 * ishtp_io_rb_alloc_buf() - Allocate and init response buffer
203 * @rb: IO request block
204 * @length: length of response buffer
205 *
206 * Allocate respose buffer
207 *
208 * Return: 0 on success else -ENOMEM
209 */
ishtp_io_rb_alloc_buf(struct ishtp_cl_rb * rb,size_t length)210 int ishtp_io_rb_alloc_buf(struct ishtp_cl_rb *rb, size_t length)
211 {
212 if (!rb)
213 return -EINVAL;
214
215 if (length == 0)
216 return 0;
217
218 rb->buffer.data = kmalloc(length, GFP_KERNEL);
219 if (!rb->buffer.data)
220 return -ENOMEM;
221
222 rb->buffer.size = length;
223 return 0;
224 }
225
226 /**
227 * ishtp_cl_io_rb_recycle() - Recycle IO request blocks
228 * @rb: IO request block
229 *
230 * Re-append rb to its client's free list and send flow control if needed
231 *
232 * Return: 0 on success else -EFAULT
233 */
ishtp_cl_io_rb_recycle(struct ishtp_cl_rb * rb)234 int ishtp_cl_io_rb_recycle(struct ishtp_cl_rb *rb)
235 {
236 struct ishtp_cl *cl;
237 int rets = 0;
238 unsigned long flags;
239
240 if (!rb || !rb->cl)
241 return -EFAULT;
242
243 cl = rb->cl;
244 spin_lock_irqsave(&cl->free_list_spinlock, flags);
245 list_add_tail(&rb->list, &cl->free_rb_list.list);
246 spin_unlock_irqrestore(&cl->free_list_spinlock, flags);
247
248 /*
249 * If we returned the first buffer to empty 'free' list,
250 * send flow control
251 */
252 if (!cl->out_flow_ctrl_creds)
253 rets = ishtp_cl_read_start(cl);
254
255 return rets;
256 }
257 EXPORT_SYMBOL(ishtp_cl_io_rb_recycle);
258