1 /*
2 * Asynchronous block chaining cipher operations.
3 *
4 * This is the asynchronous version of blkcipher.c indicating completion
5 * via a callback.
6 *
7 * Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au>
8 *
9 * This program is free software; you can redistribute it and/or modify it
10 * under the terms of the GNU General Public License as published by the Free
11 * Software Foundation; either version 2 of the License, or (at your option)
12 * any later version.
13 *
14 */
15
16 #include <crypto/internal/skcipher.h>
17 #include <linux/err.h>
18 #include <linux/kernel.h>
19 #include <linux/slab.h>
20 #include <linux/seq_file.h>
21 #include <linux/cryptouser.h>
22 #include <linux/compiler.h>
23 #include <net/netlink.h>
24
25 #include <crypto/scatterwalk.h>
26
27 #include "internal.h"
28
29 struct ablkcipher_buffer {
30 struct list_head entry;
31 struct scatter_walk dst;
32 unsigned int len;
33 void *data;
34 };
35
36 enum {
37 ABLKCIPHER_WALK_SLOW = 1 << 0,
38 };
39
ablkcipher_buffer_write(struct ablkcipher_buffer * p)40 static inline void ablkcipher_buffer_write(struct ablkcipher_buffer *p)
41 {
42 scatterwalk_copychunks(p->data, &p->dst, p->len, 1);
43 }
44
__ablkcipher_walk_complete(struct ablkcipher_walk * walk)45 void __ablkcipher_walk_complete(struct ablkcipher_walk *walk)
46 {
47 struct ablkcipher_buffer *p, *tmp;
48
49 list_for_each_entry_safe(p, tmp, &walk->buffers, entry) {
50 ablkcipher_buffer_write(p);
51 list_del(&p->entry);
52 kfree(p);
53 }
54 }
55 EXPORT_SYMBOL_GPL(__ablkcipher_walk_complete);
56
ablkcipher_queue_write(struct ablkcipher_walk * walk,struct ablkcipher_buffer * p)57 static inline void ablkcipher_queue_write(struct ablkcipher_walk *walk,
58 struct ablkcipher_buffer *p)
59 {
60 p->dst = walk->out;
61 list_add_tail(&p->entry, &walk->buffers);
62 }
63
64 /* Get a spot of the specified length that does not straddle a page.
65 * The caller needs to ensure that there is enough space for this operation.
66 */
ablkcipher_get_spot(u8 * start,unsigned int len)67 static inline u8 *ablkcipher_get_spot(u8 *start, unsigned int len)
68 {
69 u8 *end_page = (u8 *)(((unsigned long)(start + len - 1)) & PAGE_MASK);
70
71 return max(start, end_page);
72 }
73
ablkcipher_done_slow(struct ablkcipher_walk * walk,unsigned int n)74 static inline void ablkcipher_done_slow(struct ablkcipher_walk *walk,
75 unsigned int n)
76 {
77 for (;;) {
78 unsigned int len_this_page = scatterwalk_pagelen(&walk->out);
79
80 if (len_this_page > n)
81 len_this_page = n;
82 scatterwalk_advance(&walk->out, n);
83 if (n == len_this_page)
84 break;
85 n -= len_this_page;
86 scatterwalk_start(&walk->out, sg_next(walk->out.sg));
87 }
88 }
89
ablkcipher_done_fast(struct ablkcipher_walk * walk,unsigned int n)90 static inline void ablkcipher_done_fast(struct ablkcipher_walk *walk,
91 unsigned int n)
92 {
93 scatterwalk_advance(&walk->in, n);
94 scatterwalk_advance(&walk->out, n);
95 }
96
97 static int ablkcipher_walk_next(struct ablkcipher_request *req,
98 struct ablkcipher_walk *walk);
99
ablkcipher_walk_done(struct ablkcipher_request * req,struct ablkcipher_walk * walk,int err)100 int ablkcipher_walk_done(struct ablkcipher_request *req,
101 struct ablkcipher_walk *walk, int err)
102 {
103 struct crypto_tfm *tfm = req->base.tfm;
104 unsigned int n; /* bytes processed */
105 bool more;
106
107 if (unlikely(err < 0))
108 goto finish;
109
110 n = walk->nbytes - err;
111 walk->total -= n;
112 more = (walk->total != 0);
113
114 if (likely(!(walk->flags & ABLKCIPHER_WALK_SLOW))) {
115 ablkcipher_done_fast(walk, n);
116 } else {
117 if (WARN_ON(err)) {
118 /* unexpected case; didn't process all bytes */
119 err = -EINVAL;
120 goto finish;
121 }
122 ablkcipher_done_slow(walk, n);
123 }
124
125 scatterwalk_done(&walk->in, 0, more);
126 scatterwalk_done(&walk->out, 1, more);
127
128 if (more) {
129 crypto_yield(req->base.flags);
130 return ablkcipher_walk_next(req, walk);
131 }
132 err = 0;
133 finish:
134 walk->nbytes = 0;
135 if (walk->iv != req->info)
136 memcpy(req->info, walk->iv, tfm->crt_ablkcipher.ivsize);
137 kfree(walk->iv_buffer);
138 return err;
139 }
140 EXPORT_SYMBOL_GPL(ablkcipher_walk_done);
141
ablkcipher_next_slow(struct ablkcipher_request * req,struct ablkcipher_walk * walk,unsigned int bsize,unsigned int alignmask,void ** src_p,void ** dst_p)142 static inline int ablkcipher_next_slow(struct ablkcipher_request *req,
143 struct ablkcipher_walk *walk,
144 unsigned int bsize,
145 unsigned int alignmask,
146 void **src_p, void **dst_p)
147 {
148 unsigned aligned_bsize = ALIGN(bsize, alignmask + 1);
149 struct ablkcipher_buffer *p;
150 void *src, *dst, *base;
151 unsigned int n;
152
153 n = ALIGN(sizeof(struct ablkcipher_buffer), alignmask + 1);
154 n += (aligned_bsize * 3 - (alignmask + 1) +
155 (alignmask & ~(crypto_tfm_ctx_alignment() - 1)));
156
157 p = kmalloc(n, GFP_ATOMIC);
158 if (!p)
159 return ablkcipher_walk_done(req, walk, -ENOMEM);
160
161 base = p + 1;
162
163 dst = (u8 *)ALIGN((unsigned long)base, alignmask + 1);
164 src = dst = ablkcipher_get_spot(dst, bsize);
165
166 p->len = bsize;
167 p->data = dst;
168
169 scatterwalk_copychunks(src, &walk->in, bsize, 0);
170
171 ablkcipher_queue_write(walk, p);
172
173 walk->nbytes = bsize;
174 walk->flags |= ABLKCIPHER_WALK_SLOW;
175
176 *src_p = src;
177 *dst_p = dst;
178
179 return 0;
180 }
181
ablkcipher_copy_iv(struct ablkcipher_walk * walk,struct crypto_tfm * tfm,unsigned int alignmask)182 static inline int ablkcipher_copy_iv(struct ablkcipher_walk *walk,
183 struct crypto_tfm *tfm,
184 unsigned int alignmask)
185 {
186 unsigned bs = walk->blocksize;
187 unsigned int ivsize = tfm->crt_ablkcipher.ivsize;
188 unsigned aligned_bs = ALIGN(bs, alignmask + 1);
189 unsigned int size = aligned_bs * 2 + ivsize + max(aligned_bs, ivsize) -
190 (alignmask + 1);
191 u8 *iv;
192
193 size += alignmask & ~(crypto_tfm_ctx_alignment() - 1);
194 walk->iv_buffer = kmalloc(size, GFP_ATOMIC);
195 if (!walk->iv_buffer)
196 return -ENOMEM;
197
198 iv = (u8 *)ALIGN((unsigned long)walk->iv_buffer, alignmask + 1);
199 iv = ablkcipher_get_spot(iv, bs) + aligned_bs;
200 iv = ablkcipher_get_spot(iv, bs) + aligned_bs;
201 iv = ablkcipher_get_spot(iv, ivsize);
202
203 walk->iv = memcpy(iv, walk->iv, ivsize);
204 return 0;
205 }
206
ablkcipher_next_fast(struct ablkcipher_request * req,struct ablkcipher_walk * walk)207 static inline int ablkcipher_next_fast(struct ablkcipher_request *req,
208 struct ablkcipher_walk *walk)
209 {
210 walk->src.page = scatterwalk_page(&walk->in);
211 walk->src.offset = offset_in_page(walk->in.offset);
212 walk->dst.page = scatterwalk_page(&walk->out);
213 walk->dst.offset = offset_in_page(walk->out.offset);
214
215 return 0;
216 }
217
ablkcipher_walk_next(struct ablkcipher_request * req,struct ablkcipher_walk * walk)218 static int ablkcipher_walk_next(struct ablkcipher_request *req,
219 struct ablkcipher_walk *walk)
220 {
221 struct crypto_tfm *tfm = req->base.tfm;
222 unsigned int alignmask, bsize, n;
223 void *src, *dst;
224 int err;
225
226 alignmask = crypto_tfm_alg_alignmask(tfm);
227 n = walk->total;
228 if (unlikely(n < crypto_tfm_alg_blocksize(tfm))) {
229 req->base.flags |= CRYPTO_TFM_RES_BAD_BLOCK_LEN;
230 return ablkcipher_walk_done(req, walk, -EINVAL);
231 }
232
233 walk->flags &= ~ABLKCIPHER_WALK_SLOW;
234 src = dst = NULL;
235
236 bsize = min(walk->blocksize, n);
237 n = scatterwalk_clamp(&walk->in, n);
238 n = scatterwalk_clamp(&walk->out, n);
239
240 if (n < bsize ||
241 !scatterwalk_aligned(&walk->in, alignmask) ||
242 !scatterwalk_aligned(&walk->out, alignmask)) {
243 err = ablkcipher_next_slow(req, walk, bsize, alignmask,
244 &src, &dst);
245 goto set_phys_lowmem;
246 }
247
248 walk->nbytes = n;
249
250 return ablkcipher_next_fast(req, walk);
251
252 set_phys_lowmem:
253 if (err >= 0) {
254 walk->src.page = virt_to_page(src);
255 walk->dst.page = virt_to_page(dst);
256 walk->src.offset = ((unsigned long)src & (PAGE_SIZE - 1));
257 walk->dst.offset = ((unsigned long)dst & (PAGE_SIZE - 1));
258 }
259
260 return err;
261 }
262
ablkcipher_walk_first(struct ablkcipher_request * req,struct ablkcipher_walk * walk)263 static int ablkcipher_walk_first(struct ablkcipher_request *req,
264 struct ablkcipher_walk *walk)
265 {
266 struct crypto_tfm *tfm = req->base.tfm;
267 unsigned int alignmask;
268
269 alignmask = crypto_tfm_alg_alignmask(tfm);
270 if (WARN_ON_ONCE(in_irq()))
271 return -EDEADLK;
272
273 walk->iv = req->info;
274 walk->nbytes = walk->total;
275 if (unlikely(!walk->total))
276 return 0;
277
278 walk->iv_buffer = NULL;
279 if (unlikely(((unsigned long)walk->iv & alignmask))) {
280 int err = ablkcipher_copy_iv(walk, tfm, alignmask);
281
282 if (err)
283 return err;
284 }
285
286 scatterwalk_start(&walk->in, walk->in.sg);
287 scatterwalk_start(&walk->out, walk->out.sg);
288
289 return ablkcipher_walk_next(req, walk);
290 }
291
ablkcipher_walk_phys(struct ablkcipher_request * req,struct ablkcipher_walk * walk)292 int ablkcipher_walk_phys(struct ablkcipher_request *req,
293 struct ablkcipher_walk *walk)
294 {
295 walk->blocksize = crypto_tfm_alg_blocksize(req->base.tfm);
296 return ablkcipher_walk_first(req, walk);
297 }
298 EXPORT_SYMBOL_GPL(ablkcipher_walk_phys);
299
setkey_unaligned(struct crypto_ablkcipher * tfm,const u8 * key,unsigned int keylen)300 static int setkey_unaligned(struct crypto_ablkcipher *tfm, const u8 *key,
301 unsigned int keylen)
302 {
303 struct ablkcipher_alg *cipher = crypto_ablkcipher_alg(tfm);
304 unsigned long alignmask = crypto_ablkcipher_alignmask(tfm);
305 int ret;
306 u8 *buffer, *alignbuffer;
307 unsigned long absize;
308
309 absize = keylen + alignmask;
310 buffer = kmalloc(absize, GFP_ATOMIC);
311 if (!buffer)
312 return -ENOMEM;
313
314 alignbuffer = (u8 *)ALIGN((unsigned long)buffer, alignmask + 1);
315 memcpy(alignbuffer, key, keylen);
316 ret = cipher->setkey(tfm, alignbuffer, keylen);
317 memset(alignbuffer, 0, keylen);
318 kfree(buffer);
319 return ret;
320 }
321
setkey(struct crypto_ablkcipher * tfm,const u8 * key,unsigned int keylen)322 static int setkey(struct crypto_ablkcipher *tfm, const u8 *key,
323 unsigned int keylen)
324 {
325 struct ablkcipher_alg *cipher = crypto_ablkcipher_alg(tfm);
326 unsigned long alignmask = crypto_ablkcipher_alignmask(tfm);
327
328 if (keylen < cipher->min_keysize || keylen > cipher->max_keysize) {
329 crypto_ablkcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
330 return -EINVAL;
331 }
332
333 if ((unsigned long)key & alignmask)
334 return setkey_unaligned(tfm, key, keylen);
335
336 return cipher->setkey(tfm, key, keylen);
337 }
338
crypto_ablkcipher_ctxsize(struct crypto_alg * alg,u32 type,u32 mask)339 static unsigned int crypto_ablkcipher_ctxsize(struct crypto_alg *alg, u32 type,
340 u32 mask)
341 {
342 return alg->cra_ctxsize;
343 }
344
crypto_init_ablkcipher_ops(struct crypto_tfm * tfm,u32 type,u32 mask)345 static int crypto_init_ablkcipher_ops(struct crypto_tfm *tfm, u32 type,
346 u32 mask)
347 {
348 struct ablkcipher_alg *alg = &tfm->__crt_alg->cra_ablkcipher;
349 struct ablkcipher_tfm *crt = &tfm->crt_ablkcipher;
350
351 if (alg->ivsize > PAGE_SIZE / 8)
352 return -EINVAL;
353
354 crt->setkey = setkey;
355 crt->encrypt = alg->encrypt;
356 crt->decrypt = alg->decrypt;
357 crt->base = __crypto_ablkcipher_cast(tfm);
358 crt->ivsize = alg->ivsize;
359
360 return 0;
361 }
362
363 #ifdef CONFIG_NET
crypto_ablkcipher_report(struct sk_buff * skb,struct crypto_alg * alg)364 static int crypto_ablkcipher_report(struct sk_buff *skb, struct crypto_alg *alg)
365 {
366 struct crypto_report_blkcipher rblkcipher;
367
368 strncpy(rblkcipher.type, "ablkcipher", sizeof(rblkcipher.type));
369 strncpy(rblkcipher.geniv, alg->cra_ablkcipher.geniv ?: "<default>",
370 sizeof(rblkcipher.geniv));
371 rblkcipher.geniv[sizeof(rblkcipher.geniv) - 1] = '\0';
372
373 rblkcipher.blocksize = alg->cra_blocksize;
374 rblkcipher.min_keysize = alg->cra_ablkcipher.min_keysize;
375 rblkcipher.max_keysize = alg->cra_ablkcipher.max_keysize;
376 rblkcipher.ivsize = alg->cra_ablkcipher.ivsize;
377
378 if (nla_put(skb, CRYPTOCFGA_REPORT_BLKCIPHER,
379 sizeof(struct crypto_report_blkcipher), &rblkcipher))
380 goto nla_put_failure;
381 return 0;
382
383 nla_put_failure:
384 return -EMSGSIZE;
385 }
386 #else
crypto_ablkcipher_report(struct sk_buff * skb,struct crypto_alg * alg)387 static int crypto_ablkcipher_report(struct sk_buff *skb, struct crypto_alg *alg)
388 {
389 return -ENOSYS;
390 }
391 #endif
392
393 static void crypto_ablkcipher_show(struct seq_file *m, struct crypto_alg *alg)
394 __maybe_unused;
crypto_ablkcipher_show(struct seq_file * m,struct crypto_alg * alg)395 static void crypto_ablkcipher_show(struct seq_file *m, struct crypto_alg *alg)
396 {
397 struct ablkcipher_alg *ablkcipher = &alg->cra_ablkcipher;
398
399 seq_printf(m, "type : ablkcipher\n");
400 seq_printf(m, "async : %s\n", alg->cra_flags & CRYPTO_ALG_ASYNC ?
401 "yes" : "no");
402 seq_printf(m, "blocksize : %u\n", alg->cra_blocksize);
403 seq_printf(m, "min keysize : %u\n", ablkcipher->min_keysize);
404 seq_printf(m, "max keysize : %u\n", ablkcipher->max_keysize);
405 seq_printf(m, "ivsize : %u\n", ablkcipher->ivsize);
406 seq_printf(m, "geniv : %s\n", ablkcipher->geniv ?: "<default>");
407 }
408
409 const struct crypto_type crypto_ablkcipher_type = {
410 .ctxsize = crypto_ablkcipher_ctxsize,
411 .init = crypto_init_ablkcipher_ops,
412 #ifdef CONFIG_PROC_FS
413 .show = crypto_ablkcipher_show,
414 #endif
415 .report = crypto_ablkcipher_report,
416 };
417 EXPORT_SYMBOL_GPL(crypto_ablkcipher_type);
418
crypto_init_givcipher_ops(struct crypto_tfm * tfm,u32 type,u32 mask)419 static int crypto_init_givcipher_ops(struct crypto_tfm *tfm, u32 type,
420 u32 mask)
421 {
422 struct ablkcipher_alg *alg = &tfm->__crt_alg->cra_ablkcipher;
423 struct ablkcipher_tfm *crt = &tfm->crt_ablkcipher;
424
425 if (alg->ivsize > PAGE_SIZE / 8)
426 return -EINVAL;
427
428 crt->setkey = tfm->__crt_alg->cra_flags & CRYPTO_ALG_GENIV ?
429 alg->setkey : setkey;
430 crt->encrypt = alg->encrypt;
431 crt->decrypt = alg->decrypt;
432 crt->base = __crypto_ablkcipher_cast(tfm);
433 crt->ivsize = alg->ivsize;
434
435 return 0;
436 }
437
438 #ifdef CONFIG_NET
crypto_givcipher_report(struct sk_buff * skb,struct crypto_alg * alg)439 static int crypto_givcipher_report(struct sk_buff *skb, struct crypto_alg *alg)
440 {
441 struct crypto_report_blkcipher rblkcipher;
442
443 strncpy(rblkcipher.type, "givcipher", sizeof(rblkcipher.type));
444 strncpy(rblkcipher.geniv, alg->cra_ablkcipher.geniv ?: "<built-in>",
445 sizeof(rblkcipher.geniv));
446 rblkcipher.geniv[sizeof(rblkcipher.geniv) - 1] = '\0';
447
448 rblkcipher.blocksize = alg->cra_blocksize;
449 rblkcipher.min_keysize = alg->cra_ablkcipher.min_keysize;
450 rblkcipher.max_keysize = alg->cra_ablkcipher.max_keysize;
451 rblkcipher.ivsize = alg->cra_ablkcipher.ivsize;
452
453 if (nla_put(skb, CRYPTOCFGA_REPORT_BLKCIPHER,
454 sizeof(struct crypto_report_blkcipher), &rblkcipher))
455 goto nla_put_failure;
456 return 0;
457
458 nla_put_failure:
459 return -EMSGSIZE;
460 }
461 #else
crypto_givcipher_report(struct sk_buff * skb,struct crypto_alg * alg)462 static int crypto_givcipher_report(struct sk_buff *skb, struct crypto_alg *alg)
463 {
464 return -ENOSYS;
465 }
466 #endif
467
468 static void crypto_givcipher_show(struct seq_file *m, struct crypto_alg *alg)
469 __maybe_unused;
crypto_givcipher_show(struct seq_file * m,struct crypto_alg * alg)470 static void crypto_givcipher_show(struct seq_file *m, struct crypto_alg *alg)
471 {
472 struct ablkcipher_alg *ablkcipher = &alg->cra_ablkcipher;
473
474 seq_printf(m, "type : givcipher\n");
475 seq_printf(m, "async : %s\n", alg->cra_flags & CRYPTO_ALG_ASYNC ?
476 "yes" : "no");
477 seq_printf(m, "blocksize : %u\n", alg->cra_blocksize);
478 seq_printf(m, "min keysize : %u\n", ablkcipher->min_keysize);
479 seq_printf(m, "max keysize : %u\n", ablkcipher->max_keysize);
480 seq_printf(m, "ivsize : %u\n", ablkcipher->ivsize);
481 seq_printf(m, "geniv : %s\n", ablkcipher->geniv ?: "<built-in>");
482 }
483
484 const struct crypto_type crypto_givcipher_type = {
485 .ctxsize = crypto_ablkcipher_ctxsize,
486 .init = crypto_init_givcipher_ops,
487 #ifdef CONFIG_PROC_FS
488 .show = crypto_givcipher_show,
489 #endif
490 .report = crypto_givcipher_report,
491 };
492 EXPORT_SYMBOL_GPL(crypto_givcipher_type);
493