1 /*
2  * Symmetric key cipher operations.
3  *
4  * Generic encrypt/decrypt wrapper for ciphers, handles operations across
5  * multiple page boundaries by using temporary blocks.  In user context,
6  * the kernel is given a chance to schedule us once per page.
7  *
8  * Copyright (c) 2015 Herbert Xu <herbert@gondor.apana.org.au>
9  *
10  * This program is free software; you can redistribute it and/or modify it
11  * under the terms of the GNU General Public License as published by the Free
12  * Software Foundation; either version 2 of the License, or (at your option)
13  * any later version.
14  *
15  */
16 
17 #include <crypto/internal/aead.h>
18 #include <crypto/internal/skcipher.h>
19 #include <crypto/scatterwalk.h>
20 #include <linux/bug.h>
21 #include <linux/cryptouser.h>
22 #include <linux/compiler.h>
23 #include <linux/list.h>
24 #include <linux/module.h>
25 #include <linux/rtnetlink.h>
26 #include <linux/seq_file.h>
27 #include <net/netlink.h>
28 
29 #include "internal.h"
30 
31 enum {
32 	SKCIPHER_WALK_PHYS = 1 << 0,
33 	SKCIPHER_WALK_SLOW = 1 << 1,
34 	SKCIPHER_WALK_COPY = 1 << 2,
35 	SKCIPHER_WALK_DIFF = 1 << 3,
36 	SKCIPHER_WALK_SLEEP = 1 << 4,
37 };
38 
39 struct skcipher_walk_buffer {
40 	struct list_head entry;
41 	struct scatter_walk dst;
42 	unsigned int len;
43 	u8 *data;
44 	u8 buffer[];
45 };
46 
47 static int skcipher_walk_next(struct skcipher_walk *walk);
48 
skcipher_unmap(struct scatter_walk * walk,void * vaddr)49 static inline void skcipher_unmap(struct scatter_walk *walk, void *vaddr)
50 {
51 	if (PageHighMem(scatterwalk_page(walk)))
52 		kunmap_atomic(vaddr);
53 }
54 
skcipher_map(struct scatter_walk * walk)55 static inline void *skcipher_map(struct scatter_walk *walk)
56 {
57 	struct page *page = scatterwalk_page(walk);
58 
59 	return (PageHighMem(page) ? kmap_atomic(page) : page_address(page)) +
60 	       offset_in_page(walk->offset);
61 }
62 
skcipher_map_src(struct skcipher_walk * walk)63 static inline void skcipher_map_src(struct skcipher_walk *walk)
64 {
65 	walk->src.virt.addr = skcipher_map(&walk->in);
66 }
67 
skcipher_map_dst(struct skcipher_walk * walk)68 static inline void skcipher_map_dst(struct skcipher_walk *walk)
69 {
70 	walk->dst.virt.addr = skcipher_map(&walk->out);
71 }
72 
skcipher_unmap_src(struct skcipher_walk * walk)73 static inline void skcipher_unmap_src(struct skcipher_walk *walk)
74 {
75 	skcipher_unmap(&walk->in, walk->src.virt.addr);
76 }
77 
skcipher_unmap_dst(struct skcipher_walk * walk)78 static inline void skcipher_unmap_dst(struct skcipher_walk *walk)
79 {
80 	skcipher_unmap(&walk->out, walk->dst.virt.addr);
81 }
82 
skcipher_walk_gfp(struct skcipher_walk * walk)83 static inline gfp_t skcipher_walk_gfp(struct skcipher_walk *walk)
84 {
85 	return walk->flags & SKCIPHER_WALK_SLEEP ? GFP_KERNEL : GFP_ATOMIC;
86 }
87 
88 /* Get a spot of the specified length that does not straddle a page.
89  * The caller needs to ensure that there is enough space for this operation.
90  */
skcipher_get_spot(u8 * start,unsigned int len)91 static inline u8 *skcipher_get_spot(u8 *start, unsigned int len)
92 {
93 	u8 *end_page = (u8 *)(((unsigned long)(start + len - 1)) & PAGE_MASK);
94 
95 	return max(start, end_page);
96 }
97 
skcipher_done_slow(struct skcipher_walk * walk,unsigned int bsize)98 static int skcipher_done_slow(struct skcipher_walk *walk, unsigned int bsize)
99 {
100 	u8 *addr;
101 
102 	addr = (u8 *)ALIGN((unsigned long)walk->buffer, walk->alignmask + 1);
103 	addr = skcipher_get_spot(addr, bsize);
104 	scatterwalk_copychunks(addr, &walk->out, bsize,
105 			       (walk->flags & SKCIPHER_WALK_PHYS) ? 2 : 1);
106 	return 0;
107 }
108 
skcipher_walk_done(struct skcipher_walk * walk,int err)109 int skcipher_walk_done(struct skcipher_walk *walk, int err)
110 {
111 	unsigned int n = walk->nbytes;
112 	unsigned int nbytes = 0;
113 
114 	if (!n)
115 		goto finish;
116 
117 	if (likely(err >= 0)) {
118 		n -= err;
119 		nbytes = walk->total - n;
120 	}
121 
122 	if (likely(!(walk->flags & (SKCIPHER_WALK_PHYS |
123 				    SKCIPHER_WALK_SLOW |
124 				    SKCIPHER_WALK_COPY |
125 				    SKCIPHER_WALK_DIFF)))) {
126 unmap_src:
127 		skcipher_unmap_src(walk);
128 	} else if (walk->flags & SKCIPHER_WALK_DIFF) {
129 		skcipher_unmap_dst(walk);
130 		goto unmap_src;
131 	} else if (walk->flags & SKCIPHER_WALK_COPY) {
132 		skcipher_map_dst(walk);
133 		memcpy(walk->dst.virt.addr, walk->page, n);
134 		skcipher_unmap_dst(walk);
135 	} else if (unlikely(walk->flags & SKCIPHER_WALK_SLOW)) {
136 		if (err > 0) {
137 			/*
138 			 * Didn't process all bytes.  Either the algorithm is
139 			 * broken, or this was the last step and it turned out
140 			 * the message wasn't evenly divisible into blocks but
141 			 * the algorithm requires it.
142 			 */
143 			err = -EINVAL;
144 			nbytes = 0;
145 		} else
146 			n = skcipher_done_slow(walk, n);
147 	}
148 
149 	if (err > 0)
150 		err = 0;
151 
152 	walk->total = nbytes;
153 	walk->nbytes = 0;
154 
155 	scatterwalk_advance(&walk->in, n);
156 	scatterwalk_advance(&walk->out, n);
157 	scatterwalk_done(&walk->in, 0, nbytes);
158 	scatterwalk_done(&walk->out, 1, nbytes);
159 
160 	if (nbytes) {
161 		crypto_yield(walk->flags & SKCIPHER_WALK_SLEEP ?
162 			     CRYPTO_TFM_REQ_MAY_SLEEP : 0);
163 		return skcipher_walk_next(walk);
164 	}
165 
166 finish:
167 	/* Short-circuit for the common/fast path. */
168 	if (!((unsigned long)walk->buffer | (unsigned long)walk->page))
169 		goto out;
170 
171 	if (walk->flags & SKCIPHER_WALK_PHYS)
172 		goto out;
173 
174 	if (walk->iv != walk->oiv)
175 		memcpy(walk->oiv, walk->iv, walk->ivsize);
176 	if (walk->buffer != walk->page)
177 		kfree(walk->buffer);
178 	if (walk->page)
179 		free_page((unsigned long)walk->page);
180 
181 out:
182 	return err;
183 }
184 EXPORT_SYMBOL_GPL(skcipher_walk_done);
185 
skcipher_walk_complete(struct skcipher_walk * walk,int err)186 void skcipher_walk_complete(struct skcipher_walk *walk, int err)
187 {
188 	struct skcipher_walk_buffer *p, *tmp;
189 
190 	list_for_each_entry_safe(p, tmp, &walk->buffers, entry) {
191 		u8 *data;
192 
193 		if (err)
194 			goto done;
195 
196 		data = p->data;
197 		if (!data) {
198 			data = PTR_ALIGN(&p->buffer[0], walk->alignmask + 1);
199 			data = skcipher_get_spot(data, walk->stride);
200 		}
201 
202 		scatterwalk_copychunks(data, &p->dst, p->len, 1);
203 
204 		if (offset_in_page(p->data) + p->len + walk->stride >
205 		    PAGE_SIZE)
206 			free_page((unsigned long)p->data);
207 
208 done:
209 		list_del(&p->entry);
210 		kfree(p);
211 	}
212 
213 	if (!err && walk->iv != walk->oiv)
214 		memcpy(walk->oiv, walk->iv, walk->ivsize);
215 	if (walk->buffer != walk->page)
216 		kfree(walk->buffer);
217 	if (walk->page)
218 		free_page((unsigned long)walk->page);
219 }
220 EXPORT_SYMBOL_GPL(skcipher_walk_complete);
221 
skcipher_queue_write(struct skcipher_walk * walk,struct skcipher_walk_buffer * p)222 static void skcipher_queue_write(struct skcipher_walk *walk,
223 				 struct skcipher_walk_buffer *p)
224 {
225 	p->dst = walk->out;
226 	list_add_tail(&p->entry, &walk->buffers);
227 }
228 
skcipher_next_slow(struct skcipher_walk * walk,unsigned int bsize)229 static int skcipher_next_slow(struct skcipher_walk *walk, unsigned int bsize)
230 {
231 	bool phys = walk->flags & SKCIPHER_WALK_PHYS;
232 	unsigned alignmask = walk->alignmask;
233 	struct skcipher_walk_buffer *p;
234 	unsigned a;
235 	unsigned n;
236 	u8 *buffer;
237 	void *v;
238 
239 	if (!phys) {
240 		if (!walk->buffer)
241 			walk->buffer = walk->page;
242 		buffer = walk->buffer;
243 		if (buffer)
244 			goto ok;
245 	}
246 
247 	/* Start with the minimum alignment of kmalloc. */
248 	a = crypto_tfm_ctx_alignment() - 1;
249 	n = bsize;
250 
251 	if (phys) {
252 		/* Calculate the minimum alignment of p->buffer. */
253 		a &= (sizeof(*p) ^ (sizeof(*p) - 1)) >> 1;
254 		n += sizeof(*p);
255 	}
256 
257 	/* Minimum size to align p->buffer by alignmask. */
258 	n += alignmask & ~a;
259 
260 	/* Minimum size to ensure p->buffer does not straddle a page. */
261 	n += (bsize - 1) & ~(alignmask | a);
262 
263 	v = kzalloc(n, skcipher_walk_gfp(walk));
264 	if (!v)
265 		return skcipher_walk_done(walk, -ENOMEM);
266 
267 	if (phys) {
268 		p = v;
269 		p->len = bsize;
270 		skcipher_queue_write(walk, p);
271 		buffer = p->buffer;
272 	} else {
273 		walk->buffer = v;
274 		buffer = v;
275 	}
276 
277 ok:
278 	walk->dst.virt.addr = PTR_ALIGN(buffer, alignmask + 1);
279 	walk->dst.virt.addr = skcipher_get_spot(walk->dst.virt.addr, bsize);
280 	walk->src.virt.addr = walk->dst.virt.addr;
281 
282 	scatterwalk_copychunks(walk->src.virt.addr, &walk->in, bsize, 0);
283 
284 	walk->nbytes = bsize;
285 	walk->flags |= SKCIPHER_WALK_SLOW;
286 
287 	return 0;
288 }
289 
skcipher_next_copy(struct skcipher_walk * walk)290 static int skcipher_next_copy(struct skcipher_walk *walk)
291 {
292 	struct skcipher_walk_buffer *p;
293 	u8 *tmp = walk->page;
294 
295 	skcipher_map_src(walk);
296 	memcpy(tmp, walk->src.virt.addr, walk->nbytes);
297 	skcipher_unmap_src(walk);
298 
299 	walk->src.virt.addr = tmp;
300 	walk->dst.virt.addr = tmp;
301 
302 	if (!(walk->flags & SKCIPHER_WALK_PHYS))
303 		return 0;
304 
305 	p = kmalloc(sizeof(*p), skcipher_walk_gfp(walk));
306 	if (!p)
307 		return -ENOMEM;
308 
309 	p->data = walk->page;
310 	p->len = walk->nbytes;
311 	skcipher_queue_write(walk, p);
312 
313 	if (offset_in_page(walk->page) + walk->nbytes + walk->stride >
314 	    PAGE_SIZE)
315 		walk->page = NULL;
316 	else
317 		walk->page += walk->nbytes;
318 
319 	return 0;
320 }
321 
skcipher_next_fast(struct skcipher_walk * walk)322 static int skcipher_next_fast(struct skcipher_walk *walk)
323 {
324 	unsigned long diff;
325 
326 	walk->src.phys.page = scatterwalk_page(&walk->in);
327 	walk->src.phys.offset = offset_in_page(walk->in.offset);
328 	walk->dst.phys.page = scatterwalk_page(&walk->out);
329 	walk->dst.phys.offset = offset_in_page(walk->out.offset);
330 
331 	if (walk->flags & SKCIPHER_WALK_PHYS)
332 		return 0;
333 
334 	diff = walk->src.phys.offset - walk->dst.phys.offset;
335 	diff |= walk->src.virt.page - walk->dst.virt.page;
336 
337 	skcipher_map_src(walk);
338 	walk->dst.virt.addr = walk->src.virt.addr;
339 
340 	if (diff) {
341 		walk->flags |= SKCIPHER_WALK_DIFF;
342 		skcipher_map_dst(walk);
343 	}
344 
345 	return 0;
346 }
347 
skcipher_walk_next(struct skcipher_walk * walk)348 static int skcipher_walk_next(struct skcipher_walk *walk)
349 {
350 	unsigned int bsize;
351 	unsigned int n;
352 	int err;
353 
354 	walk->flags &= ~(SKCIPHER_WALK_SLOW | SKCIPHER_WALK_COPY |
355 			 SKCIPHER_WALK_DIFF);
356 
357 	n = walk->total;
358 	bsize = min(walk->stride, max(n, walk->blocksize));
359 	n = scatterwalk_clamp(&walk->in, n);
360 	n = scatterwalk_clamp(&walk->out, n);
361 
362 	if (unlikely(n < bsize)) {
363 		if (unlikely(walk->total < walk->blocksize))
364 			return skcipher_walk_done(walk, -EINVAL);
365 
366 slow_path:
367 		err = skcipher_next_slow(walk, bsize);
368 		goto set_phys_lowmem;
369 	}
370 
371 	if (unlikely((walk->in.offset | walk->out.offset) & walk->alignmask)) {
372 		if (!walk->page) {
373 			gfp_t gfp = skcipher_walk_gfp(walk);
374 
375 			walk->page = (void *)__get_free_page(gfp);
376 			if (!walk->page)
377 				goto slow_path;
378 		}
379 
380 		walk->nbytes = min_t(unsigned, n,
381 				     PAGE_SIZE - offset_in_page(walk->page));
382 		walk->flags |= SKCIPHER_WALK_COPY;
383 		err = skcipher_next_copy(walk);
384 		goto set_phys_lowmem;
385 	}
386 
387 	walk->nbytes = n;
388 
389 	return skcipher_next_fast(walk);
390 
391 set_phys_lowmem:
392 	if (!err && (walk->flags & SKCIPHER_WALK_PHYS)) {
393 		walk->src.phys.page = virt_to_page(walk->src.virt.addr);
394 		walk->dst.phys.page = virt_to_page(walk->dst.virt.addr);
395 		walk->src.phys.offset &= PAGE_SIZE - 1;
396 		walk->dst.phys.offset &= PAGE_SIZE - 1;
397 	}
398 	return err;
399 }
400 
skcipher_copy_iv(struct skcipher_walk * walk)401 static int skcipher_copy_iv(struct skcipher_walk *walk)
402 {
403 	unsigned a = crypto_tfm_ctx_alignment() - 1;
404 	unsigned alignmask = walk->alignmask;
405 	unsigned ivsize = walk->ivsize;
406 	unsigned bs = walk->stride;
407 	unsigned aligned_bs;
408 	unsigned size;
409 	u8 *iv;
410 
411 	aligned_bs = ALIGN(bs, alignmask + 1);
412 
413 	/* Minimum size to align buffer by alignmask. */
414 	size = alignmask & ~a;
415 
416 	if (walk->flags & SKCIPHER_WALK_PHYS)
417 		size += ivsize;
418 	else {
419 		size += aligned_bs + ivsize;
420 
421 		/* Minimum size to ensure buffer does not straddle a page. */
422 		size += (bs - 1) & ~(alignmask | a);
423 	}
424 
425 	walk->buffer = kmalloc(size, skcipher_walk_gfp(walk));
426 	if (!walk->buffer)
427 		return -ENOMEM;
428 
429 	iv = PTR_ALIGN(walk->buffer, alignmask + 1);
430 	iv = skcipher_get_spot(iv, bs) + aligned_bs;
431 
432 	walk->iv = memcpy(iv, walk->iv, walk->ivsize);
433 	return 0;
434 }
435 
skcipher_walk_first(struct skcipher_walk * walk)436 static int skcipher_walk_first(struct skcipher_walk *walk)
437 {
438 	if (WARN_ON_ONCE(in_irq()))
439 		return -EDEADLK;
440 
441 	walk->buffer = NULL;
442 	if (unlikely(((unsigned long)walk->iv & walk->alignmask))) {
443 		int err = skcipher_copy_iv(walk);
444 		if (err)
445 			return err;
446 	}
447 
448 	walk->page = NULL;
449 
450 	return skcipher_walk_next(walk);
451 }
452 
skcipher_walk_skcipher(struct skcipher_walk * walk,struct skcipher_request * req)453 static int skcipher_walk_skcipher(struct skcipher_walk *walk,
454 				  struct skcipher_request *req)
455 {
456 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
457 
458 	walk->total = req->cryptlen;
459 	walk->nbytes = 0;
460 	walk->iv = req->iv;
461 	walk->oiv = req->iv;
462 
463 	if (unlikely(!walk->total))
464 		return 0;
465 
466 	scatterwalk_start(&walk->in, req->src);
467 	scatterwalk_start(&walk->out, req->dst);
468 
469 	walk->flags &= ~SKCIPHER_WALK_SLEEP;
470 	walk->flags |= req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ?
471 		       SKCIPHER_WALK_SLEEP : 0;
472 
473 	walk->blocksize = crypto_skcipher_blocksize(tfm);
474 	walk->stride = crypto_skcipher_walksize(tfm);
475 	walk->ivsize = crypto_skcipher_ivsize(tfm);
476 	walk->alignmask = crypto_skcipher_alignmask(tfm);
477 
478 	return skcipher_walk_first(walk);
479 }
480 
skcipher_walk_virt(struct skcipher_walk * walk,struct skcipher_request * req,bool atomic)481 int skcipher_walk_virt(struct skcipher_walk *walk,
482 		       struct skcipher_request *req, bool atomic)
483 {
484 	int err;
485 
486 	walk->flags &= ~SKCIPHER_WALK_PHYS;
487 
488 	err = skcipher_walk_skcipher(walk, req);
489 
490 	walk->flags &= atomic ? ~SKCIPHER_WALK_SLEEP : ~0;
491 
492 	return err;
493 }
494 EXPORT_SYMBOL_GPL(skcipher_walk_virt);
495 
skcipher_walk_atomise(struct skcipher_walk * walk)496 void skcipher_walk_atomise(struct skcipher_walk *walk)
497 {
498 	walk->flags &= ~SKCIPHER_WALK_SLEEP;
499 }
500 EXPORT_SYMBOL_GPL(skcipher_walk_atomise);
501 
skcipher_walk_async(struct skcipher_walk * walk,struct skcipher_request * req)502 int skcipher_walk_async(struct skcipher_walk *walk,
503 			struct skcipher_request *req)
504 {
505 	walk->flags |= SKCIPHER_WALK_PHYS;
506 
507 	INIT_LIST_HEAD(&walk->buffers);
508 
509 	return skcipher_walk_skcipher(walk, req);
510 }
511 EXPORT_SYMBOL_GPL(skcipher_walk_async);
512 
skcipher_walk_aead_common(struct skcipher_walk * walk,struct aead_request * req,bool atomic)513 static int skcipher_walk_aead_common(struct skcipher_walk *walk,
514 				     struct aead_request *req, bool atomic)
515 {
516 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
517 	int err;
518 
519 	walk->nbytes = 0;
520 	walk->iv = req->iv;
521 	walk->oiv = req->iv;
522 
523 	if (unlikely(!walk->total))
524 		return 0;
525 
526 	walk->flags &= ~SKCIPHER_WALK_PHYS;
527 
528 	scatterwalk_start(&walk->in, req->src);
529 	scatterwalk_start(&walk->out, req->dst);
530 
531 	scatterwalk_copychunks(NULL, &walk->in, req->assoclen, 2);
532 	scatterwalk_copychunks(NULL, &walk->out, req->assoclen, 2);
533 
534 	scatterwalk_done(&walk->in, 0, walk->total);
535 	scatterwalk_done(&walk->out, 0, walk->total);
536 
537 	if (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP)
538 		walk->flags |= SKCIPHER_WALK_SLEEP;
539 	else
540 		walk->flags &= ~SKCIPHER_WALK_SLEEP;
541 
542 	walk->blocksize = crypto_aead_blocksize(tfm);
543 	walk->stride = crypto_aead_chunksize(tfm);
544 	walk->ivsize = crypto_aead_ivsize(tfm);
545 	walk->alignmask = crypto_aead_alignmask(tfm);
546 
547 	err = skcipher_walk_first(walk);
548 
549 	if (atomic)
550 		walk->flags &= ~SKCIPHER_WALK_SLEEP;
551 
552 	return err;
553 }
554 
skcipher_walk_aead(struct skcipher_walk * walk,struct aead_request * req,bool atomic)555 int skcipher_walk_aead(struct skcipher_walk *walk, struct aead_request *req,
556 		       bool atomic)
557 {
558 	walk->total = req->cryptlen;
559 
560 	return skcipher_walk_aead_common(walk, req, atomic);
561 }
562 EXPORT_SYMBOL_GPL(skcipher_walk_aead);
563 
skcipher_walk_aead_encrypt(struct skcipher_walk * walk,struct aead_request * req,bool atomic)564 int skcipher_walk_aead_encrypt(struct skcipher_walk *walk,
565 			       struct aead_request *req, bool atomic)
566 {
567 	walk->total = req->cryptlen;
568 
569 	return skcipher_walk_aead_common(walk, req, atomic);
570 }
571 EXPORT_SYMBOL_GPL(skcipher_walk_aead_encrypt);
572 
skcipher_walk_aead_decrypt(struct skcipher_walk * walk,struct aead_request * req,bool atomic)573 int skcipher_walk_aead_decrypt(struct skcipher_walk *walk,
574 			       struct aead_request *req, bool atomic)
575 {
576 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
577 
578 	walk->total = req->cryptlen - crypto_aead_authsize(tfm);
579 
580 	return skcipher_walk_aead_common(walk, req, atomic);
581 }
582 EXPORT_SYMBOL_GPL(skcipher_walk_aead_decrypt);
583 
crypto_skcipher_extsize(struct crypto_alg * alg)584 static unsigned int crypto_skcipher_extsize(struct crypto_alg *alg)
585 {
586 	if (alg->cra_type == &crypto_blkcipher_type)
587 		return sizeof(struct crypto_blkcipher *);
588 
589 	if (alg->cra_type == &crypto_ablkcipher_type ||
590 	    alg->cra_type == &crypto_givcipher_type)
591 		return sizeof(struct crypto_ablkcipher *);
592 
593 	return crypto_alg_extsize(alg);
594 }
595 
skcipher_set_needkey(struct crypto_skcipher * tfm)596 static void skcipher_set_needkey(struct crypto_skcipher *tfm)
597 {
598 	if (tfm->keysize)
599 		crypto_skcipher_set_flags(tfm, CRYPTO_TFM_NEED_KEY);
600 }
601 
skcipher_setkey_blkcipher(struct crypto_skcipher * tfm,const u8 * key,unsigned int keylen)602 static int skcipher_setkey_blkcipher(struct crypto_skcipher *tfm,
603 				     const u8 *key, unsigned int keylen)
604 {
605 	struct crypto_blkcipher **ctx = crypto_skcipher_ctx(tfm);
606 	struct crypto_blkcipher *blkcipher = *ctx;
607 	int err;
608 
609 	crypto_blkcipher_clear_flags(blkcipher, ~0);
610 	crypto_blkcipher_set_flags(blkcipher, crypto_skcipher_get_flags(tfm) &
611 					      CRYPTO_TFM_REQ_MASK);
612 	err = crypto_blkcipher_setkey(blkcipher, key, keylen);
613 	crypto_skcipher_set_flags(tfm, crypto_blkcipher_get_flags(blkcipher) &
614 				       CRYPTO_TFM_RES_MASK);
615 	if (unlikely(err)) {
616 		skcipher_set_needkey(tfm);
617 		return err;
618 	}
619 
620 	crypto_skcipher_clear_flags(tfm, CRYPTO_TFM_NEED_KEY);
621 	return 0;
622 }
623 
skcipher_crypt_blkcipher(struct skcipher_request * req,int (* crypt)(struct blkcipher_desc *,struct scatterlist *,struct scatterlist *,unsigned int))624 static int skcipher_crypt_blkcipher(struct skcipher_request *req,
625 				    int (*crypt)(struct blkcipher_desc *,
626 						 struct scatterlist *,
627 						 struct scatterlist *,
628 						 unsigned int))
629 {
630 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
631 	struct crypto_blkcipher **ctx = crypto_skcipher_ctx(tfm);
632 	struct blkcipher_desc desc = {
633 		.tfm = *ctx,
634 		.info = req->iv,
635 		.flags = req->base.flags,
636 	};
637 
638 
639 	return crypt(&desc, req->dst, req->src, req->cryptlen);
640 }
641 
skcipher_encrypt_blkcipher(struct skcipher_request * req)642 static int skcipher_encrypt_blkcipher(struct skcipher_request *req)
643 {
644 	struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
645 	struct crypto_tfm *tfm = crypto_skcipher_tfm(skcipher);
646 	struct blkcipher_alg *alg = &tfm->__crt_alg->cra_blkcipher;
647 
648 	return skcipher_crypt_blkcipher(req, alg->encrypt);
649 }
650 
skcipher_decrypt_blkcipher(struct skcipher_request * req)651 static int skcipher_decrypt_blkcipher(struct skcipher_request *req)
652 {
653 	struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
654 	struct crypto_tfm *tfm = crypto_skcipher_tfm(skcipher);
655 	struct blkcipher_alg *alg = &tfm->__crt_alg->cra_blkcipher;
656 
657 	return skcipher_crypt_blkcipher(req, alg->decrypt);
658 }
659 
crypto_exit_skcipher_ops_blkcipher(struct crypto_tfm * tfm)660 static void crypto_exit_skcipher_ops_blkcipher(struct crypto_tfm *tfm)
661 {
662 	struct crypto_blkcipher **ctx = crypto_tfm_ctx(tfm);
663 
664 	crypto_free_blkcipher(*ctx);
665 }
666 
crypto_init_skcipher_ops_blkcipher(struct crypto_tfm * tfm)667 static int crypto_init_skcipher_ops_blkcipher(struct crypto_tfm *tfm)
668 {
669 	struct crypto_alg *calg = tfm->__crt_alg;
670 	struct crypto_skcipher *skcipher = __crypto_skcipher_cast(tfm);
671 	struct crypto_blkcipher **ctx = crypto_tfm_ctx(tfm);
672 	struct crypto_blkcipher *blkcipher;
673 	struct crypto_tfm *btfm;
674 
675 	if (!crypto_mod_get(calg))
676 		return -EAGAIN;
677 
678 	btfm = __crypto_alloc_tfm(calg, CRYPTO_ALG_TYPE_BLKCIPHER,
679 					CRYPTO_ALG_TYPE_MASK);
680 	if (IS_ERR(btfm)) {
681 		crypto_mod_put(calg);
682 		return PTR_ERR(btfm);
683 	}
684 
685 	blkcipher = __crypto_blkcipher_cast(btfm);
686 	*ctx = blkcipher;
687 	tfm->exit = crypto_exit_skcipher_ops_blkcipher;
688 
689 	skcipher->setkey = skcipher_setkey_blkcipher;
690 	skcipher->encrypt = skcipher_encrypt_blkcipher;
691 	skcipher->decrypt = skcipher_decrypt_blkcipher;
692 
693 	skcipher->ivsize = crypto_blkcipher_ivsize(blkcipher);
694 	skcipher->keysize = calg->cra_blkcipher.max_keysize;
695 
696 	skcipher_set_needkey(skcipher);
697 
698 	return 0;
699 }
700 
skcipher_setkey_ablkcipher(struct crypto_skcipher * tfm,const u8 * key,unsigned int keylen)701 static int skcipher_setkey_ablkcipher(struct crypto_skcipher *tfm,
702 				      const u8 *key, unsigned int keylen)
703 {
704 	struct crypto_ablkcipher **ctx = crypto_skcipher_ctx(tfm);
705 	struct crypto_ablkcipher *ablkcipher = *ctx;
706 	int err;
707 
708 	crypto_ablkcipher_clear_flags(ablkcipher, ~0);
709 	crypto_ablkcipher_set_flags(ablkcipher,
710 				    crypto_skcipher_get_flags(tfm) &
711 				    CRYPTO_TFM_REQ_MASK);
712 	err = crypto_ablkcipher_setkey(ablkcipher, key, keylen);
713 	crypto_skcipher_set_flags(tfm,
714 				  crypto_ablkcipher_get_flags(ablkcipher) &
715 				  CRYPTO_TFM_RES_MASK);
716 	if (unlikely(err)) {
717 		skcipher_set_needkey(tfm);
718 		return err;
719 	}
720 
721 	crypto_skcipher_clear_flags(tfm, CRYPTO_TFM_NEED_KEY);
722 	return 0;
723 }
724 
skcipher_crypt_ablkcipher(struct skcipher_request * req,int (* crypt)(struct ablkcipher_request *))725 static int skcipher_crypt_ablkcipher(struct skcipher_request *req,
726 				     int (*crypt)(struct ablkcipher_request *))
727 {
728 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
729 	struct crypto_ablkcipher **ctx = crypto_skcipher_ctx(tfm);
730 	struct ablkcipher_request *subreq = skcipher_request_ctx(req);
731 
732 	ablkcipher_request_set_tfm(subreq, *ctx);
733 	ablkcipher_request_set_callback(subreq, skcipher_request_flags(req),
734 					req->base.complete, req->base.data);
735 	ablkcipher_request_set_crypt(subreq, req->src, req->dst, req->cryptlen,
736 				     req->iv);
737 
738 	return crypt(subreq);
739 }
740 
skcipher_encrypt_ablkcipher(struct skcipher_request * req)741 static int skcipher_encrypt_ablkcipher(struct skcipher_request *req)
742 {
743 	struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
744 	struct crypto_tfm *tfm = crypto_skcipher_tfm(skcipher);
745 	struct ablkcipher_alg *alg = &tfm->__crt_alg->cra_ablkcipher;
746 
747 	return skcipher_crypt_ablkcipher(req, alg->encrypt);
748 }
749 
skcipher_decrypt_ablkcipher(struct skcipher_request * req)750 static int skcipher_decrypt_ablkcipher(struct skcipher_request *req)
751 {
752 	struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
753 	struct crypto_tfm *tfm = crypto_skcipher_tfm(skcipher);
754 	struct ablkcipher_alg *alg = &tfm->__crt_alg->cra_ablkcipher;
755 
756 	return skcipher_crypt_ablkcipher(req, alg->decrypt);
757 }
758 
crypto_exit_skcipher_ops_ablkcipher(struct crypto_tfm * tfm)759 static void crypto_exit_skcipher_ops_ablkcipher(struct crypto_tfm *tfm)
760 {
761 	struct crypto_ablkcipher **ctx = crypto_tfm_ctx(tfm);
762 
763 	crypto_free_ablkcipher(*ctx);
764 }
765 
crypto_init_skcipher_ops_ablkcipher(struct crypto_tfm * tfm)766 static int crypto_init_skcipher_ops_ablkcipher(struct crypto_tfm *tfm)
767 {
768 	struct crypto_alg *calg = tfm->__crt_alg;
769 	struct crypto_skcipher *skcipher = __crypto_skcipher_cast(tfm);
770 	struct crypto_ablkcipher **ctx = crypto_tfm_ctx(tfm);
771 	struct crypto_ablkcipher *ablkcipher;
772 	struct crypto_tfm *abtfm;
773 
774 	if (!crypto_mod_get(calg))
775 		return -EAGAIN;
776 
777 	abtfm = __crypto_alloc_tfm(calg, 0, 0);
778 	if (IS_ERR(abtfm)) {
779 		crypto_mod_put(calg);
780 		return PTR_ERR(abtfm);
781 	}
782 
783 	ablkcipher = __crypto_ablkcipher_cast(abtfm);
784 	*ctx = ablkcipher;
785 	tfm->exit = crypto_exit_skcipher_ops_ablkcipher;
786 
787 	skcipher->setkey = skcipher_setkey_ablkcipher;
788 	skcipher->encrypt = skcipher_encrypt_ablkcipher;
789 	skcipher->decrypt = skcipher_decrypt_ablkcipher;
790 
791 	skcipher->ivsize = crypto_ablkcipher_ivsize(ablkcipher);
792 	skcipher->reqsize = crypto_ablkcipher_reqsize(ablkcipher) +
793 			    sizeof(struct ablkcipher_request);
794 	skcipher->keysize = calg->cra_ablkcipher.max_keysize;
795 
796 	skcipher_set_needkey(skcipher);
797 
798 	return 0;
799 }
800 
skcipher_setkey_unaligned(struct crypto_skcipher * tfm,const u8 * key,unsigned int keylen)801 static int skcipher_setkey_unaligned(struct crypto_skcipher *tfm,
802 				     const u8 *key, unsigned int keylen)
803 {
804 	unsigned long alignmask = crypto_skcipher_alignmask(tfm);
805 	struct skcipher_alg *cipher = crypto_skcipher_alg(tfm);
806 	u8 *buffer, *alignbuffer;
807 	unsigned long absize;
808 	int ret;
809 
810 	absize = keylen + alignmask;
811 	buffer = kmalloc(absize, GFP_ATOMIC);
812 	if (!buffer)
813 		return -ENOMEM;
814 
815 	alignbuffer = (u8 *)ALIGN((unsigned long)buffer, alignmask + 1);
816 	memcpy(alignbuffer, key, keylen);
817 	ret = cipher->setkey(tfm, alignbuffer, keylen);
818 	kzfree(buffer);
819 	return ret;
820 }
821 
skcipher_setkey(struct crypto_skcipher * tfm,const u8 * key,unsigned int keylen)822 static int skcipher_setkey(struct crypto_skcipher *tfm, const u8 *key,
823 			   unsigned int keylen)
824 {
825 	struct skcipher_alg *cipher = crypto_skcipher_alg(tfm);
826 	unsigned long alignmask = crypto_skcipher_alignmask(tfm);
827 	int err;
828 
829 	if (keylen < cipher->min_keysize || keylen > cipher->max_keysize) {
830 		crypto_skcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
831 		return -EINVAL;
832 	}
833 
834 	if ((unsigned long)key & alignmask)
835 		err = skcipher_setkey_unaligned(tfm, key, keylen);
836 	else
837 		err = cipher->setkey(tfm, key, keylen);
838 
839 	if (unlikely(err)) {
840 		skcipher_set_needkey(tfm);
841 		return err;
842 	}
843 
844 	crypto_skcipher_clear_flags(tfm, CRYPTO_TFM_NEED_KEY);
845 	return 0;
846 }
847 
crypto_skcipher_exit_tfm(struct crypto_tfm * tfm)848 static void crypto_skcipher_exit_tfm(struct crypto_tfm *tfm)
849 {
850 	struct crypto_skcipher *skcipher = __crypto_skcipher_cast(tfm);
851 	struct skcipher_alg *alg = crypto_skcipher_alg(skcipher);
852 
853 	alg->exit(skcipher);
854 }
855 
crypto_skcipher_init_tfm(struct crypto_tfm * tfm)856 static int crypto_skcipher_init_tfm(struct crypto_tfm *tfm)
857 {
858 	struct crypto_skcipher *skcipher = __crypto_skcipher_cast(tfm);
859 	struct skcipher_alg *alg = crypto_skcipher_alg(skcipher);
860 
861 	if (tfm->__crt_alg->cra_type == &crypto_blkcipher_type)
862 		return crypto_init_skcipher_ops_blkcipher(tfm);
863 
864 	if (tfm->__crt_alg->cra_type == &crypto_ablkcipher_type ||
865 	    tfm->__crt_alg->cra_type == &crypto_givcipher_type)
866 		return crypto_init_skcipher_ops_ablkcipher(tfm);
867 
868 	skcipher->setkey = skcipher_setkey;
869 	skcipher->encrypt = alg->encrypt;
870 	skcipher->decrypt = alg->decrypt;
871 	skcipher->ivsize = alg->ivsize;
872 	skcipher->keysize = alg->max_keysize;
873 
874 	skcipher_set_needkey(skcipher);
875 
876 	if (alg->exit)
877 		skcipher->base.exit = crypto_skcipher_exit_tfm;
878 
879 	if (alg->init)
880 		return alg->init(skcipher);
881 
882 	return 0;
883 }
884 
crypto_skcipher_free_instance(struct crypto_instance * inst)885 static void crypto_skcipher_free_instance(struct crypto_instance *inst)
886 {
887 	struct skcipher_instance *skcipher =
888 		container_of(inst, struct skcipher_instance, s.base);
889 
890 	skcipher->free(skcipher);
891 }
892 
893 static void crypto_skcipher_show(struct seq_file *m, struct crypto_alg *alg)
894 	__maybe_unused;
crypto_skcipher_show(struct seq_file * m,struct crypto_alg * alg)895 static void crypto_skcipher_show(struct seq_file *m, struct crypto_alg *alg)
896 {
897 	struct skcipher_alg *skcipher = container_of(alg, struct skcipher_alg,
898 						     base);
899 
900 	seq_printf(m, "type         : skcipher\n");
901 	seq_printf(m, "async        : %s\n",
902 		   alg->cra_flags & CRYPTO_ALG_ASYNC ?  "yes" : "no");
903 	seq_printf(m, "blocksize    : %u\n", alg->cra_blocksize);
904 	seq_printf(m, "min keysize  : %u\n", skcipher->min_keysize);
905 	seq_printf(m, "max keysize  : %u\n", skcipher->max_keysize);
906 	seq_printf(m, "ivsize       : %u\n", skcipher->ivsize);
907 	seq_printf(m, "chunksize    : %u\n", skcipher->chunksize);
908 	seq_printf(m, "walksize     : %u\n", skcipher->walksize);
909 }
910 
911 #ifdef CONFIG_NET
crypto_skcipher_report(struct sk_buff * skb,struct crypto_alg * alg)912 static int crypto_skcipher_report(struct sk_buff *skb, struct crypto_alg *alg)
913 {
914 	struct crypto_report_blkcipher rblkcipher;
915 	struct skcipher_alg *skcipher = container_of(alg, struct skcipher_alg,
916 						     base);
917 
918 	strncpy(rblkcipher.type, "skcipher", sizeof(rblkcipher.type));
919 	strncpy(rblkcipher.geniv, "<none>", sizeof(rblkcipher.geniv));
920 
921 	rblkcipher.blocksize = alg->cra_blocksize;
922 	rblkcipher.min_keysize = skcipher->min_keysize;
923 	rblkcipher.max_keysize = skcipher->max_keysize;
924 	rblkcipher.ivsize = skcipher->ivsize;
925 
926 	if (nla_put(skb, CRYPTOCFGA_REPORT_BLKCIPHER,
927 		    sizeof(struct crypto_report_blkcipher), &rblkcipher))
928 		goto nla_put_failure;
929 	return 0;
930 
931 nla_put_failure:
932 	return -EMSGSIZE;
933 }
934 #else
crypto_skcipher_report(struct sk_buff * skb,struct crypto_alg * alg)935 static int crypto_skcipher_report(struct sk_buff *skb, struct crypto_alg *alg)
936 {
937 	return -ENOSYS;
938 }
939 #endif
940 
941 static const struct crypto_type crypto_skcipher_type2 = {
942 	.extsize = crypto_skcipher_extsize,
943 	.init_tfm = crypto_skcipher_init_tfm,
944 	.free = crypto_skcipher_free_instance,
945 #ifdef CONFIG_PROC_FS
946 	.show = crypto_skcipher_show,
947 #endif
948 	.report = crypto_skcipher_report,
949 	.maskclear = ~CRYPTO_ALG_TYPE_MASK,
950 	.maskset = CRYPTO_ALG_TYPE_BLKCIPHER_MASK,
951 	.type = CRYPTO_ALG_TYPE_SKCIPHER,
952 	.tfmsize = offsetof(struct crypto_skcipher, base),
953 };
954 
crypto_grab_skcipher(struct crypto_skcipher_spawn * spawn,const char * name,u32 type,u32 mask)955 int crypto_grab_skcipher(struct crypto_skcipher_spawn *spawn,
956 			  const char *name, u32 type, u32 mask)
957 {
958 	spawn->base.frontend = &crypto_skcipher_type2;
959 	return crypto_grab_spawn(&spawn->base, name, type, mask);
960 }
961 EXPORT_SYMBOL_GPL(crypto_grab_skcipher);
962 
crypto_alloc_skcipher(const char * alg_name,u32 type,u32 mask)963 struct crypto_skcipher *crypto_alloc_skcipher(const char *alg_name,
964 					      u32 type, u32 mask)
965 {
966 	return crypto_alloc_tfm(alg_name, &crypto_skcipher_type2, type, mask);
967 }
968 EXPORT_SYMBOL_GPL(crypto_alloc_skcipher);
969 
crypto_has_skcipher2(const char * alg_name,u32 type,u32 mask)970 int crypto_has_skcipher2(const char *alg_name, u32 type, u32 mask)
971 {
972 	return crypto_type_has_alg(alg_name, &crypto_skcipher_type2,
973 				   type, mask);
974 }
975 EXPORT_SYMBOL_GPL(crypto_has_skcipher2);
976 
skcipher_prepare_alg(struct skcipher_alg * alg)977 static int skcipher_prepare_alg(struct skcipher_alg *alg)
978 {
979 	struct crypto_alg *base = &alg->base;
980 
981 	if (alg->ivsize > PAGE_SIZE / 8 || alg->chunksize > PAGE_SIZE / 8 ||
982 	    alg->walksize > PAGE_SIZE / 8)
983 		return -EINVAL;
984 
985 	if (!alg->chunksize)
986 		alg->chunksize = base->cra_blocksize;
987 	if (!alg->walksize)
988 		alg->walksize = alg->chunksize;
989 
990 	base->cra_type = &crypto_skcipher_type2;
991 	base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK;
992 	base->cra_flags |= CRYPTO_ALG_TYPE_SKCIPHER;
993 
994 	return 0;
995 }
996 
crypto_register_skcipher(struct skcipher_alg * alg)997 int crypto_register_skcipher(struct skcipher_alg *alg)
998 {
999 	struct crypto_alg *base = &alg->base;
1000 	int err;
1001 
1002 	err = skcipher_prepare_alg(alg);
1003 	if (err)
1004 		return err;
1005 
1006 	return crypto_register_alg(base);
1007 }
1008 EXPORT_SYMBOL_GPL(crypto_register_skcipher);
1009 
crypto_unregister_skcipher(struct skcipher_alg * alg)1010 void crypto_unregister_skcipher(struct skcipher_alg *alg)
1011 {
1012 	crypto_unregister_alg(&alg->base);
1013 }
1014 EXPORT_SYMBOL_GPL(crypto_unregister_skcipher);
1015 
crypto_register_skciphers(struct skcipher_alg * algs,int count)1016 int crypto_register_skciphers(struct skcipher_alg *algs, int count)
1017 {
1018 	int i, ret;
1019 
1020 	for (i = 0; i < count; i++) {
1021 		ret = crypto_register_skcipher(&algs[i]);
1022 		if (ret)
1023 			goto err;
1024 	}
1025 
1026 	return 0;
1027 
1028 err:
1029 	for (--i; i >= 0; --i)
1030 		crypto_unregister_skcipher(&algs[i]);
1031 
1032 	return ret;
1033 }
1034 EXPORT_SYMBOL_GPL(crypto_register_skciphers);
1035 
crypto_unregister_skciphers(struct skcipher_alg * algs,int count)1036 void crypto_unregister_skciphers(struct skcipher_alg *algs, int count)
1037 {
1038 	int i;
1039 
1040 	for (i = count - 1; i >= 0; --i)
1041 		crypto_unregister_skcipher(&algs[i]);
1042 }
1043 EXPORT_SYMBOL_GPL(crypto_unregister_skciphers);
1044 
skcipher_register_instance(struct crypto_template * tmpl,struct skcipher_instance * inst)1045 int skcipher_register_instance(struct crypto_template *tmpl,
1046 			   struct skcipher_instance *inst)
1047 {
1048 	int err;
1049 
1050 	err = skcipher_prepare_alg(&inst->alg);
1051 	if (err)
1052 		return err;
1053 
1054 	return crypto_register_instance(tmpl, skcipher_crypto_instance(inst));
1055 }
1056 EXPORT_SYMBOL_GPL(skcipher_register_instance);
1057 
1058 MODULE_LICENSE("GPL");
1059 MODULE_DESCRIPTION("Symmetric key cipher type");
1060