1 /*
2 * Copyright(c) 2007 Yuri Tikhonov <yur@emcraft.com>
3 * Copyright(c) 2009 Intel Corporation
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License as published by the Free
7 * Software Foundation; either version 2 of the License, or (at your option)
8 * any later version.
9 *
10 * This program is distributed in the hope that it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License along with
16 * this program; if not, write to the Free Software Foundation, Inc., 59
17 * Temple Place - Suite 330, Boston, MA 02111-1307, USA.
18 *
19 * The full GNU General Public License is included in this distribution in the
20 * file called COPYING.
21 */
22 #include <linux/kernel.h>
23 #include <linux/interrupt.h>
24 #include <linux/module.h>
25 #include <linux/dma-mapping.h>
26 #include <linux/raid/pq.h>
27 #include <linux/async_tx.h>
28 #include <linux/gfp.h>
29
30 /**
31 * pq_scribble_page - space to hold throwaway P or Q buffer for
32 * synchronous gen_syndrome
33 */
34 static struct page *pq_scribble_page;
35
36 /* the struct page *blocks[] parameter passed to async_gen_syndrome()
37 * and async_syndrome_val() contains the 'P' destination address at
38 * blocks[disks-2] and the 'Q' destination address at blocks[disks-1]
39 *
40 * note: these are macros as they are used as lvalues
41 */
42 #define P(b, d) (b[d-2])
43 #define Q(b, d) (b[d-1])
44
45 #define MAX_DISKS 255
46
47 /**
48 * do_async_gen_syndrome - asynchronously calculate P and/or Q
49 */
50 static __async_inline struct dma_async_tx_descriptor *
do_async_gen_syndrome(struct dma_chan * chan,const unsigned char * scfs,int disks,struct dmaengine_unmap_data * unmap,enum dma_ctrl_flags dma_flags,struct async_submit_ctl * submit)51 do_async_gen_syndrome(struct dma_chan *chan,
52 const unsigned char *scfs, int disks,
53 struct dmaengine_unmap_data *unmap,
54 enum dma_ctrl_flags dma_flags,
55 struct async_submit_ctl *submit)
56 {
57 struct dma_async_tx_descriptor *tx = NULL;
58 struct dma_device *dma = chan->device;
59 enum async_tx_flags flags_orig = submit->flags;
60 dma_async_tx_callback cb_fn_orig = submit->cb_fn;
61 dma_async_tx_callback cb_param_orig = submit->cb_param;
62 int src_cnt = disks - 2;
63 unsigned short pq_src_cnt;
64 dma_addr_t dma_dest[2];
65 int src_off = 0;
66
67 while (src_cnt > 0) {
68 submit->flags = flags_orig;
69 pq_src_cnt = min(src_cnt, dma_maxpq(dma, dma_flags));
70 /* if we are submitting additional pqs, leave the chain open,
71 * clear the callback parameters, and leave the destination
72 * buffers mapped
73 */
74 if (src_cnt > pq_src_cnt) {
75 submit->flags &= ~ASYNC_TX_ACK;
76 submit->flags |= ASYNC_TX_FENCE;
77 submit->cb_fn = NULL;
78 submit->cb_param = NULL;
79 } else {
80 submit->cb_fn = cb_fn_orig;
81 submit->cb_param = cb_param_orig;
82 if (cb_fn_orig)
83 dma_flags |= DMA_PREP_INTERRUPT;
84 }
85 if (submit->flags & ASYNC_TX_FENCE)
86 dma_flags |= DMA_PREP_FENCE;
87
88 /* Drivers force forward progress in case they can not provide
89 * a descriptor
90 */
91 for (;;) {
92 dma_dest[0] = unmap->addr[disks - 2];
93 dma_dest[1] = unmap->addr[disks - 1];
94 tx = dma->device_prep_dma_pq(chan, dma_dest,
95 &unmap->addr[src_off],
96 pq_src_cnt,
97 &scfs[src_off], unmap->len,
98 dma_flags);
99 if (likely(tx))
100 break;
101 async_tx_quiesce(&submit->depend_tx);
102 dma_async_issue_pending(chan);
103 }
104
105 dma_set_unmap(tx, unmap);
106 async_tx_submit(chan, tx, submit);
107 submit->depend_tx = tx;
108
109 /* drop completed sources */
110 src_cnt -= pq_src_cnt;
111 src_off += pq_src_cnt;
112
113 dma_flags |= DMA_PREP_CONTINUE;
114 }
115
116 return tx;
117 }
118
119 /**
120 * do_sync_gen_syndrome - synchronously calculate a raid6 syndrome
121 */
122 static void
do_sync_gen_syndrome(struct page ** blocks,unsigned int offset,int disks,size_t len,struct async_submit_ctl * submit)123 do_sync_gen_syndrome(struct page **blocks, unsigned int offset, int disks,
124 size_t len, struct async_submit_ctl *submit)
125 {
126 void **srcs;
127 int i;
128 int start = -1, stop = disks - 3;
129
130 if (submit->scribble)
131 srcs = submit->scribble;
132 else
133 srcs = (void **) blocks;
134
135 for (i = 0; i < disks; i++) {
136 if (blocks[i] == NULL) {
137 BUG_ON(i > disks - 3); /* P or Q can't be zero */
138 srcs[i] = (void*)raid6_empty_zero_page;
139 } else {
140 srcs[i] = page_address(blocks[i]) + offset;
141 if (i < disks - 2) {
142 stop = i;
143 if (start == -1)
144 start = i;
145 }
146 }
147 }
148 if (submit->flags & ASYNC_TX_PQ_XOR_DST) {
149 BUG_ON(!raid6_call.xor_syndrome);
150 if (start >= 0)
151 raid6_call.xor_syndrome(disks, start, stop, len, srcs);
152 } else
153 raid6_call.gen_syndrome(disks, len, srcs);
154 async_tx_sync_epilog(submit);
155 }
156
157 /**
158 * async_gen_syndrome - asynchronously calculate a raid6 syndrome
159 * @blocks: source blocks from idx 0..disks-3, P @ disks-2 and Q @ disks-1
160 * @offset: common offset into each block (src and dest) to start transaction
161 * @disks: number of blocks (including missing P or Q, see below)
162 * @len: length of operation in bytes
163 * @submit: submission/completion modifiers
164 *
165 * General note: This routine assumes a field of GF(2^8) with a
166 * primitive polynomial of 0x11d and a generator of {02}.
167 *
168 * 'disks' note: callers can optionally omit either P or Q (but not
169 * both) from the calculation by setting blocks[disks-2] or
170 * blocks[disks-1] to NULL. When P or Q is omitted 'len' must be <=
171 * PAGE_SIZE as a temporary buffer of this size is used in the
172 * synchronous path. 'disks' always accounts for both destination
173 * buffers. If any source buffers (blocks[i] where i < disks - 2) are
174 * set to NULL those buffers will be replaced with the raid6_zero_page
175 * in the synchronous path and omitted in the hardware-asynchronous
176 * path.
177 */
178 struct dma_async_tx_descriptor *
async_gen_syndrome(struct page ** blocks,unsigned int offset,int disks,size_t len,struct async_submit_ctl * submit)179 async_gen_syndrome(struct page **blocks, unsigned int offset, int disks,
180 size_t len, struct async_submit_ctl *submit)
181 {
182 int src_cnt = disks - 2;
183 struct dma_chan *chan = async_tx_find_channel(submit, DMA_PQ,
184 &P(blocks, disks), 2,
185 blocks, src_cnt, len);
186 struct dma_device *device = chan ? chan->device : NULL;
187 struct dmaengine_unmap_data *unmap = NULL;
188
189 BUG_ON(disks > MAX_DISKS || !(P(blocks, disks) || Q(blocks, disks)));
190
191 if (device)
192 unmap = dmaengine_get_unmap_data(device->dev, disks, GFP_NOWAIT);
193
194 /* XORing P/Q is only implemented in software */
195 if (unmap && !(submit->flags & ASYNC_TX_PQ_XOR_DST) &&
196 (src_cnt <= dma_maxpq(device, 0) ||
197 dma_maxpq(device, DMA_PREP_CONTINUE) > 0) &&
198 is_dma_pq_aligned(device, offset, 0, len)) {
199 struct dma_async_tx_descriptor *tx;
200 enum dma_ctrl_flags dma_flags = 0;
201 unsigned char coefs[MAX_DISKS];
202 int i, j;
203
204 /* run the p+q asynchronously */
205 pr_debug("%s: (async) disks: %d len: %zu\n",
206 __func__, disks, len);
207
208 /* convert source addresses being careful to collapse 'empty'
209 * sources and update the coefficients accordingly
210 */
211 unmap->len = len;
212 for (i = 0, j = 0; i < src_cnt; i++) {
213 if (blocks[i] == NULL)
214 continue;
215 unmap->addr[j] = dma_map_page(device->dev, blocks[i], offset,
216 len, DMA_TO_DEVICE);
217 coefs[j] = raid6_gfexp[i];
218 unmap->to_cnt++;
219 j++;
220 }
221
222 /*
223 * DMAs use destinations as sources,
224 * so use BIDIRECTIONAL mapping
225 */
226 unmap->bidi_cnt++;
227 if (P(blocks, disks))
228 unmap->addr[j++] = dma_map_page(device->dev, P(blocks, disks),
229 offset, len, DMA_BIDIRECTIONAL);
230 else {
231 unmap->addr[j++] = 0;
232 dma_flags |= DMA_PREP_PQ_DISABLE_P;
233 }
234
235 unmap->bidi_cnt++;
236 if (Q(blocks, disks))
237 unmap->addr[j++] = dma_map_page(device->dev, Q(blocks, disks),
238 offset, len, DMA_BIDIRECTIONAL);
239 else {
240 unmap->addr[j++] = 0;
241 dma_flags |= DMA_PREP_PQ_DISABLE_Q;
242 }
243
244 tx = do_async_gen_syndrome(chan, coefs, j, unmap, dma_flags, submit);
245 dmaengine_unmap_put(unmap);
246 return tx;
247 }
248
249 dmaengine_unmap_put(unmap);
250
251 /* run the pq synchronously */
252 pr_debug("%s: (sync) disks: %d len: %zu\n", __func__, disks, len);
253
254 /* wait for any prerequisite operations */
255 async_tx_quiesce(&submit->depend_tx);
256
257 if (!P(blocks, disks)) {
258 P(blocks, disks) = pq_scribble_page;
259 BUG_ON(len + offset > PAGE_SIZE);
260 }
261 if (!Q(blocks, disks)) {
262 Q(blocks, disks) = pq_scribble_page;
263 BUG_ON(len + offset > PAGE_SIZE);
264 }
265 do_sync_gen_syndrome(blocks, offset, disks, len, submit);
266
267 return NULL;
268 }
269 EXPORT_SYMBOL_GPL(async_gen_syndrome);
270
271 static inline struct dma_chan *
pq_val_chan(struct async_submit_ctl * submit,struct page ** blocks,int disks,size_t len)272 pq_val_chan(struct async_submit_ctl *submit, struct page **blocks, int disks, size_t len)
273 {
274 #ifdef CONFIG_ASYNC_TX_DISABLE_PQ_VAL_DMA
275 return NULL;
276 #endif
277 return async_tx_find_channel(submit, DMA_PQ_VAL, NULL, 0, blocks,
278 disks, len);
279 }
280
281 /**
282 * async_syndrome_val - asynchronously validate a raid6 syndrome
283 * @blocks: source blocks from idx 0..disks-3, P @ disks-2 and Q @ disks-1
284 * @offset: common offset into each block (src and dest) to start transaction
285 * @disks: number of blocks (including missing P or Q, see below)
286 * @len: length of operation in bytes
287 * @pqres: on val failure SUM_CHECK_P_RESULT and/or SUM_CHECK_Q_RESULT are set
288 * @spare: temporary result buffer for the synchronous case
289 * @submit: submission / completion modifiers
290 *
291 * The same notes from async_gen_syndrome apply to the 'blocks',
292 * and 'disks' parameters of this routine. The synchronous path
293 * requires a temporary result buffer and submit->scribble to be
294 * specified.
295 */
296 struct dma_async_tx_descriptor *
async_syndrome_val(struct page ** blocks,unsigned int offset,int disks,size_t len,enum sum_check_flags * pqres,struct page * spare,struct async_submit_ctl * submit)297 async_syndrome_val(struct page **blocks, unsigned int offset, int disks,
298 size_t len, enum sum_check_flags *pqres, struct page *spare,
299 struct async_submit_ctl *submit)
300 {
301 struct dma_chan *chan = pq_val_chan(submit, blocks, disks, len);
302 struct dma_device *device = chan ? chan->device : NULL;
303 struct dma_async_tx_descriptor *tx;
304 unsigned char coefs[MAX_DISKS];
305 enum dma_ctrl_flags dma_flags = submit->cb_fn ? DMA_PREP_INTERRUPT : 0;
306 struct dmaengine_unmap_data *unmap = NULL;
307
308 BUG_ON(disks < 4 || disks > MAX_DISKS);
309
310 if (device)
311 unmap = dmaengine_get_unmap_data(device->dev, disks, GFP_NOWAIT);
312
313 if (unmap && disks <= dma_maxpq(device, 0) &&
314 is_dma_pq_aligned(device, offset, 0, len)) {
315 struct device *dev = device->dev;
316 dma_addr_t pq[2];
317 int i, j = 0, src_cnt = 0;
318
319 pr_debug("%s: (async) disks: %d len: %zu\n",
320 __func__, disks, len);
321
322 unmap->len = len;
323 for (i = 0; i < disks-2; i++)
324 if (likely(blocks[i])) {
325 unmap->addr[j] = dma_map_page(dev, blocks[i],
326 offset, len,
327 DMA_TO_DEVICE);
328 coefs[j] = raid6_gfexp[i];
329 unmap->to_cnt++;
330 src_cnt++;
331 j++;
332 }
333
334 if (!P(blocks, disks)) {
335 pq[0] = 0;
336 dma_flags |= DMA_PREP_PQ_DISABLE_P;
337 } else {
338 pq[0] = dma_map_page(dev, P(blocks, disks),
339 offset, len,
340 DMA_TO_DEVICE);
341 unmap->addr[j++] = pq[0];
342 unmap->to_cnt++;
343 }
344 if (!Q(blocks, disks)) {
345 pq[1] = 0;
346 dma_flags |= DMA_PREP_PQ_DISABLE_Q;
347 } else {
348 pq[1] = dma_map_page(dev, Q(blocks, disks),
349 offset, len,
350 DMA_TO_DEVICE);
351 unmap->addr[j++] = pq[1];
352 unmap->to_cnt++;
353 }
354
355 if (submit->flags & ASYNC_TX_FENCE)
356 dma_flags |= DMA_PREP_FENCE;
357 for (;;) {
358 tx = device->device_prep_dma_pq_val(chan, pq,
359 unmap->addr,
360 src_cnt,
361 coefs,
362 len, pqres,
363 dma_flags);
364 if (likely(tx))
365 break;
366 async_tx_quiesce(&submit->depend_tx);
367 dma_async_issue_pending(chan);
368 }
369
370 dma_set_unmap(tx, unmap);
371 async_tx_submit(chan, tx, submit);
372 } else {
373 struct page *p_src = P(blocks, disks);
374 struct page *q_src = Q(blocks, disks);
375 enum async_tx_flags flags_orig = submit->flags;
376 dma_async_tx_callback cb_fn_orig = submit->cb_fn;
377 void *scribble = submit->scribble;
378 void *cb_param_orig = submit->cb_param;
379 void *p, *q, *s;
380
381 pr_debug("%s: (sync) disks: %d len: %zu\n",
382 __func__, disks, len);
383
384 /* caller must provide a temporary result buffer and
385 * allow the input parameters to be preserved
386 */
387 BUG_ON(!spare || !scribble);
388
389 /* wait for any prerequisite operations */
390 async_tx_quiesce(&submit->depend_tx);
391
392 /* recompute p and/or q into the temporary buffer and then
393 * check to see the result matches the current value
394 */
395 tx = NULL;
396 *pqres = 0;
397 if (p_src) {
398 init_async_submit(submit, ASYNC_TX_XOR_ZERO_DST, NULL,
399 NULL, NULL, scribble);
400 tx = async_xor(spare, blocks, offset, disks-2, len, submit);
401 async_tx_quiesce(&tx);
402 p = page_address(p_src) + offset;
403 s = page_address(spare) + offset;
404 *pqres |= !!memcmp(p, s, len) << SUM_CHECK_P;
405 }
406
407 if (q_src) {
408 P(blocks, disks) = NULL;
409 Q(blocks, disks) = spare;
410 init_async_submit(submit, 0, NULL, NULL, NULL, scribble);
411 tx = async_gen_syndrome(blocks, offset, disks, len, submit);
412 async_tx_quiesce(&tx);
413 q = page_address(q_src) + offset;
414 s = page_address(spare) + offset;
415 *pqres |= !!memcmp(q, s, len) << SUM_CHECK_Q;
416 }
417
418 /* restore P, Q and submit */
419 P(blocks, disks) = p_src;
420 Q(blocks, disks) = q_src;
421
422 submit->cb_fn = cb_fn_orig;
423 submit->cb_param = cb_param_orig;
424 submit->flags = flags_orig;
425 async_tx_sync_epilog(submit);
426 tx = NULL;
427 }
428 dmaengine_unmap_put(unmap);
429
430 return tx;
431 }
432 EXPORT_SYMBOL_GPL(async_syndrome_val);
433
async_pq_init(void)434 static int __init async_pq_init(void)
435 {
436 pq_scribble_page = alloc_page(GFP_KERNEL);
437
438 if (pq_scribble_page)
439 return 0;
440
441 pr_err("%s: failed to allocate required spare page\n", __func__);
442
443 return -ENOMEM;
444 }
445
async_pq_exit(void)446 static void __exit async_pq_exit(void)
447 {
448 __free_page(pq_scribble_page);
449 }
450
451 module_init(async_pq_init);
452 module_exit(async_pq_exit);
453
454 MODULE_DESCRIPTION("asynchronous raid6 syndrome generation/validation");
455 MODULE_LICENSE("GPL");
456