1 /*
2  * Block chaining cipher operations.
3  *
4  * Generic encrypt/decrypt wrapper for ciphers, handles operations across
5  * multiple page boundaries by using temporary blocks.  In user context,
6  * the kernel is given a chance to schedule us once per page.
7  *
8  * Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au>
9  *
10  * This program is free software; you can redistribute it and/or modify it
11  * under the terms of the GNU General Public License as published by the Free
12  * Software Foundation; either version 2 of the License, or (at your option)
13  * any later version.
14  *
15  */
16 
17 #include <crypto/aead.h>
18 #include <crypto/internal/skcipher.h>
19 #include <crypto/scatterwalk.h>
20 #include <linux/errno.h>
21 #include <linux/kernel.h>
22 #include <linux/module.h>
23 #include <linux/seq_file.h>
24 #include <linux/slab.h>
25 #include <linux/string.h>
26 #include <linux/cryptouser.h>
27 #include <linux/compiler.h>
28 #include <net/netlink.h>
29 
30 #include "internal.h"
31 
32 enum {
33 	BLKCIPHER_WALK_PHYS = 1 << 0,
34 	BLKCIPHER_WALK_SLOW = 1 << 1,
35 	BLKCIPHER_WALK_COPY = 1 << 2,
36 	BLKCIPHER_WALK_DIFF = 1 << 3,
37 };
38 
39 static int blkcipher_walk_next(struct blkcipher_desc *desc,
40 			       struct blkcipher_walk *walk);
41 static int blkcipher_walk_first(struct blkcipher_desc *desc,
42 				struct blkcipher_walk *walk);
43 
blkcipher_map_src(struct blkcipher_walk * walk)44 static inline void blkcipher_map_src(struct blkcipher_walk *walk)
45 {
46 	walk->src.virt.addr = scatterwalk_map(&walk->in);
47 }
48 
blkcipher_map_dst(struct blkcipher_walk * walk)49 static inline void blkcipher_map_dst(struct blkcipher_walk *walk)
50 {
51 	walk->dst.virt.addr = scatterwalk_map(&walk->out);
52 }
53 
blkcipher_unmap_src(struct blkcipher_walk * walk)54 static inline void blkcipher_unmap_src(struct blkcipher_walk *walk)
55 {
56 	scatterwalk_unmap(walk->src.virt.addr);
57 }
58 
blkcipher_unmap_dst(struct blkcipher_walk * walk)59 static inline void blkcipher_unmap_dst(struct blkcipher_walk *walk)
60 {
61 	scatterwalk_unmap(walk->dst.virt.addr);
62 }
63 
64 /* Get a spot of the specified length that does not straddle a page.
65  * The caller needs to ensure that there is enough space for this operation.
66  */
blkcipher_get_spot(u8 * start,unsigned int len)67 static inline u8 *blkcipher_get_spot(u8 *start, unsigned int len)
68 {
69 	u8 *end_page = (u8 *)(((unsigned long)(start + len - 1)) & PAGE_MASK);
70 	return max(start, end_page);
71 }
72 
blkcipher_done_slow(struct blkcipher_walk * walk,unsigned int bsize)73 static inline void blkcipher_done_slow(struct blkcipher_walk *walk,
74 				       unsigned int bsize)
75 {
76 	u8 *addr;
77 
78 	addr = (u8 *)ALIGN((unsigned long)walk->buffer, walk->alignmask + 1);
79 	addr = blkcipher_get_spot(addr, bsize);
80 	scatterwalk_copychunks(addr, &walk->out, bsize, 1);
81 }
82 
blkcipher_done_fast(struct blkcipher_walk * walk,unsigned int n)83 static inline void blkcipher_done_fast(struct blkcipher_walk *walk,
84 				       unsigned int n)
85 {
86 	if (walk->flags & BLKCIPHER_WALK_COPY) {
87 		blkcipher_map_dst(walk);
88 		memcpy(walk->dst.virt.addr, walk->page, n);
89 		blkcipher_unmap_dst(walk);
90 	} else if (!(walk->flags & BLKCIPHER_WALK_PHYS)) {
91 		if (walk->flags & BLKCIPHER_WALK_DIFF)
92 			blkcipher_unmap_dst(walk);
93 		blkcipher_unmap_src(walk);
94 	}
95 
96 	scatterwalk_advance(&walk->in, n);
97 	scatterwalk_advance(&walk->out, n);
98 }
99 
blkcipher_walk_done(struct blkcipher_desc * desc,struct blkcipher_walk * walk,int err)100 int blkcipher_walk_done(struct blkcipher_desc *desc,
101 			struct blkcipher_walk *walk, int err)
102 {
103 	unsigned int n; /* bytes processed */
104 	bool more;
105 
106 	if (unlikely(err < 0))
107 		goto finish;
108 
109 	n = walk->nbytes - err;
110 	walk->total -= n;
111 	more = (walk->total != 0);
112 
113 	if (likely(!(walk->flags & BLKCIPHER_WALK_SLOW))) {
114 		blkcipher_done_fast(walk, n);
115 	} else {
116 		if (WARN_ON(err)) {
117 			/* unexpected case; didn't process all bytes */
118 			err = -EINVAL;
119 			goto finish;
120 		}
121 		blkcipher_done_slow(walk, n);
122 	}
123 
124 	scatterwalk_done(&walk->in, 0, more);
125 	scatterwalk_done(&walk->out, 1, more);
126 
127 	if (more) {
128 		crypto_yield(desc->flags);
129 		return blkcipher_walk_next(desc, walk);
130 	}
131 	err = 0;
132 finish:
133 	walk->nbytes = 0;
134 	if (walk->iv != desc->info)
135 		memcpy(desc->info, walk->iv, walk->ivsize);
136 	if (walk->buffer != walk->page)
137 		kfree(walk->buffer);
138 	if (walk->page)
139 		free_page((unsigned long)walk->page);
140 	return err;
141 }
142 EXPORT_SYMBOL_GPL(blkcipher_walk_done);
143 
blkcipher_next_slow(struct blkcipher_desc * desc,struct blkcipher_walk * walk,unsigned int bsize,unsigned int alignmask)144 static inline int blkcipher_next_slow(struct blkcipher_desc *desc,
145 				      struct blkcipher_walk *walk,
146 				      unsigned int bsize,
147 				      unsigned int alignmask)
148 {
149 	unsigned int n;
150 	unsigned aligned_bsize = ALIGN(bsize, alignmask + 1);
151 
152 	if (walk->buffer)
153 		goto ok;
154 
155 	walk->buffer = walk->page;
156 	if (walk->buffer)
157 		goto ok;
158 
159 	n = aligned_bsize * 3 - (alignmask + 1) +
160 	    (alignmask & ~(crypto_tfm_ctx_alignment() - 1));
161 	walk->buffer = kmalloc(n, GFP_ATOMIC);
162 	if (!walk->buffer)
163 		return blkcipher_walk_done(desc, walk, -ENOMEM);
164 
165 ok:
166 	walk->dst.virt.addr = (u8 *)ALIGN((unsigned long)walk->buffer,
167 					  alignmask + 1);
168 	walk->dst.virt.addr = blkcipher_get_spot(walk->dst.virt.addr, bsize);
169 	walk->src.virt.addr = blkcipher_get_spot(walk->dst.virt.addr +
170 						 aligned_bsize, bsize);
171 
172 	scatterwalk_copychunks(walk->src.virt.addr, &walk->in, bsize, 0);
173 
174 	walk->nbytes = bsize;
175 	walk->flags |= BLKCIPHER_WALK_SLOW;
176 
177 	return 0;
178 }
179 
blkcipher_next_copy(struct blkcipher_walk * walk)180 static inline int blkcipher_next_copy(struct blkcipher_walk *walk)
181 {
182 	u8 *tmp = walk->page;
183 
184 	blkcipher_map_src(walk);
185 	memcpy(tmp, walk->src.virt.addr, walk->nbytes);
186 	blkcipher_unmap_src(walk);
187 
188 	walk->src.virt.addr = tmp;
189 	walk->dst.virt.addr = tmp;
190 
191 	return 0;
192 }
193 
blkcipher_next_fast(struct blkcipher_desc * desc,struct blkcipher_walk * walk)194 static inline int blkcipher_next_fast(struct blkcipher_desc *desc,
195 				      struct blkcipher_walk *walk)
196 {
197 	unsigned long diff;
198 
199 	walk->src.phys.page = scatterwalk_page(&walk->in);
200 	walk->src.phys.offset = offset_in_page(walk->in.offset);
201 	walk->dst.phys.page = scatterwalk_page(&walk->out);
202 	walk->dst.phys.offset = offset_in_page(walk->out.offset);
203 
204 	if (walk->flags & BLKCIPHER_WALK_PHYS)
205 		return 0;
206 
207 	diff = walk->src.phys.offset - walk->dst.phys.offset;
208 	diff |= walk->src.virt.page - walk->dst.virt.page;
209 
210 	blkcipher_map_src(walk);
211 	walk->dst.virt.addr = walk->src.virt.addr;
212 
213 	if (diff) {
214 		walk->flags |= BLKCIPHER_WALK_DIFF;
215 		blkcipher_map_dst(walk);
216 	}
217 
218 	return 0;
219 }
220 
blkcipher_walk_next(struct blkcipher_desc * desc,struct blkcipher_walk * walk)221 static int blkcipher_walk_next(struct blkcipher_desc *desc,
222 			       struct blkcipher_walk *walk)
223 {
224 	unsigned int bsize;
225 	unsigned int n;
226 	int err;
227 
228 	n = walk->total;
229 	if (unlikely(n < walk->cipher_blocksize)) {
230 		desc->flags |= CRYPTO_TFM_RES_BAD_BLOCK_LEN;
231 		return blkcipher_walk_done(desc, walk, -EINVAL);
232 	}
233 
234 	bsize = min(walk->walk_blocksize, n);
235 
236 	walk->flags &= ~(BLKCIPHER_WALK_SLOW | BLKCIPHER_WALK_COPY |
237 			 BLKCIPHER_WALK_DIFF);
238 	if (!scatterwalk_aligned(&walk->in, walk->alignmask) ||
239 	    !scatterwalk_aligned(&walk->out, walk->alignmask)) {
240 		walk->flags |= BLKCIPHER_WALK_COPY;
241 		if (!walk->page) {
242 			walk->page = (void *)__get_free_page(GFP_ATOMIC);
243 			if (!walk->page)
244 				n = 0;
245 		}
246 	}
247 
248 	n = scatterwalk_clamp(&walk->in, n);
249 	n = scatterwalk_clamp(&walk->out, n);
250 
251 	if (unlikely(n < bsize)) {
252 		err = blkcipher_next_slow(desc, walk, bsize, walk->alignmask);
253 		goto set_phys_lowmem;
254 	}
255 
256 	walk->nbytes = n;
257 	if (walk->flags & BLKCIPHER_WALK_COPY) {
258 		err = blkcipher_next_copy(walk);
259 		goto set_phys_lowmem;
260 	}
261 
262 	return blkcipher_next_fast(desc, walk);
263 
264 set_phys_lowmem:
265 	if (walk->flags & BLKCIPHER_WALK_PHYS) {
266 		walk->src.phys.page = virt_to_page(walk->src.virt.addr);
267 		walk->dst.phys.page = virt_to_page(walk->dst.virt.addr);
268 		walk->src.phys.offset &= PAGE_SIZE - 1;
269 		walk->dst.phys.offset &= PAGE_SIZE - 1;
270 	}
271 	return err;
272 }
273 
blkcipher_copy_iv(struct blkcipher_walk * walk)274 static inline int blkcipher_copy_iv(struct blkcipher_walk *walk)
275 {
276 	unsigned bs = walk->walk_blocksize;
277 	unsigned aligned_bs = ALIGN(bs, walk->alignmask + 1);
278 	unsigned int size = aligned_bs * 2 +
279 			    walk->ivsize + max(aligned_bs, walk->ivsize) -
280 			    (walk->alignmask + 1);
281 	u8 *iv;
282 
283 	size += walk->alignmask & ~(crypto_tfm_ctx_alignment() - 1);
284 	walk->buffer = kmalloc(size, GFP_ATOMIC);
285 	if (!walk->buffer)
286 		return -ENOMEM;
287 
288 	iv = (u8 *)ALIGN((unsigned long)walk->buffer, walk->alignmask + 1);
289 	iv = blkcipher_get_spot(iv, bs) + aligned_bs;
290 	iv = blkcipher_get_spot(iv, bs) + aligned_bs;
291 	iv = blkcipher_get_spot(iv, walk->ivsize);
292 
293 	walk->iv = memcpy(iv, walk->iv, walk->ivsize);
294 	return 0;
295 }
296 
blkcipher_walk_virt(struct blkcipher_desc * desc,struct blkcipher_walk * walk)297 int blkcipher_walk_virt(struct blkcipher_desc *desc,
298 			struct blkcipher_walk *walk)
299 {
300 	walk->flags &= ~BLKCIPHER_WALK_PHYS;
301 	walk->walk_blocksize = crypto_blkcipher_blocksize(desc->tfm);
302 	walk->cipher_blocksize = walk->walk_blocksize;
303 	walk->ivsize = crypto_blkcipher_ivsize(desc->tfm);
304 	walk->alignmask = crypto_blkcipher_alignmask(desc->tfm);
305 	return blkcipher_walk_first(desc, walk);
306 }
307 EXPORT_SYMBOL_GPL(blkcipher_walk_virt);
308 
blkcipher_walk_phys(struct blkcipher_desc * desc,struct blkcipher_walk * walk)309 int blkcipher_walk_phys(struct blkcipher_desc *desc,
310 			struct blkcipher_walk *walk)
311 {
312 	walk->flags |= BLKCIPHER_WALK_PHYS;
313 	walk->walk_blocksize = crypto_blkcipher_blocksize(desc->tfm);
314 	walk->cipher_blocksize = walk->walk_blocksize;
315 	walk->ivsize = crypto_blkcipher_ivsize(desc->tfm);
316 	walk->alignmask = crypto_blkcipher_alignmask(desc->tfm);
317 	return blkcipher_walk_first(desc, walk);
318 }
319 EXPORT_SYMBOL_GPL(blkcipher_walk_phys);
320 
blkcipher_walk_first(struct blkcipher_desc * desc,struct blkcipher_walk * walk)321 static int blkcipher_walk_first(struct blkcipher_desc *desc,
322 				struct blkcipher_walk *walk)
323 {
324 	if (WARN_ON_ONCE(in_irq()))
325 		return -EDEADLK;
326 
327 	walk->iv = desc->info;
328 	walk->nbytes = walk->total;
329 	if (unlikely(!walk->total))
330 		return 0;
331 
332 	walk->buffer = NULL;
333 	if (unlikely(((unsigned long)walk->iv & walk->alignmask))) {
334 		int err = blkcipher_copy_iv(walk);
335 		if (err)
336 			return err;
337 	}
338 
339 	scatterwalk_start(&walk->in, walk->in.sg);
340 	scatterwalk_start(&walk->out, walk->out.sg);
341 	walk->page = NULL;
342 
343 	return blkcipher_walk_next(desc, walk);
344 }
345 
blkcipher_walk_virt_block(struct blkcipher_desc * desc,struct blkcipher_walk * walk,unsigned int blocksize)346 int blkcipher_walk_virt_block(struct blkcipher_desc *desc,
347 			      struct blkcipher_walk *walk,
348 			      unsigned int blocksize)
349 {
350 	walk->flags &= ~BLKCIPHER_WALK_PHYS;
351 	walk->walk_blocksize = blocksize;
352 	walk->cipher_blocksize = crypto_blkcipher_blocksize(desc->tfm);
353 	walk->ivsize = crypto_blkcipher_ivsize(desc->tfm);
354 	walk->alignmask = crypto_blkcipher_alignmask(desc->tfm);
355 	return blkcipher_walk_first(desc, walk);
356 }
357 EXPORT_SYMBOL_GPL(blkcipher_walk_virt_block);
358 
blkcipher_aead_walk_virt_block(struct blkcipher_desc * desc,struct blkcipher_walk * walk,struct crypto_aead * tfm,unsigned int blocksize)359 int blkcipher_aead_walk_virt_block(struct blkcipher_desc *desc,
360 				   struct blkcipher_walk *walk,
361 				   struct crypto_aead *tfm,
362 				   unsigned int blocksize)
363 {
364 	walk->flags &= ~BLKCIPHER_WALK_PHYS;
365 	walk->walk_blocksize = blocksize;
366 	walk->cipher_blocksize = crypto_aead_blocksize(tfm);
367 	walk->ivsize = crypto_aead_ivsize(tfm);
368 	walk->alignmask = crypto_aead_alignmask(tfm);
369 	return blkcipher_walk_first(desc, walk);
370 }
371 EXPORT_SYMBOL_GPL(blkcipher_aead_walk_virt_block);
372 
setkey_unaligned(struct crypto_tfm * tfm,const u8 * key,unsigned int keylen)373 static int setkey_unaligned(struct crypto_tfm *tfm, const u8 *key,
374 			    unsigned int keylen)
375 {
376 	struct blkcipher_alg *cipher = &tfm->__crt_alg->cra_blkcipher;
377 	unsigned long alignmask = crypto_tfm_alg_alignmask(tfm);
378 	int ret;
379 	u8 *buffer, *alignbuffer;
380 	unsigned long absize;
381 
382 	absize = keylen + alignmask;
383 	buffer = kmalloc(absize, GFP_ATOMIC);
384 	if (!buffer)
385 		return -ENOMEM;
386 
387 	alignbuffer = (u8 *)ALIGN((unsigned long)buffer, alignmask + 1);
388 	memcpy(alignbuffer, key, keylen);
389 	ret = cipher->setkey(tfm, alignbuffer, keylen);
390 	memset(alignbuffer, 0, keylen);
391 	kfree(buffer);
392 	return ret;
393 }
394 
setkey(struct crypto_tfm * tfm,const u8 * key,unsigned int keylen)395 static int setkey(struct crypto_tfm *tfm, const u8 *key, unsigned int keylen)
396 {
397 	struct blkcipher_alg *cipher = &tfm->__crt_alg->cra_blkcipher;
398 	unsigned long alignmask = crypto_tfm_alg_alignmask(tfm);
399 
400 	if (keylen < cipher->min_keysize || keylen > cipher->max_keysize) {
401 		tfm->crt_flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
402 		return -EINVAL;
403 	}
404 
405 	if ((unsigned long)key & alignmask)
406 		return setkey_unaligned(tfm, key, keylen);
407 
408 	return cipher->setkey(tfm, key, keylen);
409 }
410 
async_setkey(struct crypto_ablkcipher * tfm,const u8 * key,unsigned int keylen)411 static int async_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
412 			unsigned int keylen)
413 {
414 	return setkey(crypto_ablkcipher_tfm(tfm), key, keylen);
415 }
416 
async_encrypt(struct ablkcipher_request * req)417 static int async_encrypt(struct ablkcipher_request *req)
418 {
419 	struct crypto_tfm *tfm = req->base.tfm;
420 	struct blkcipher_alg *alg = &tfm->__crt_alg->cra_blkcipher;
421 	struct blkcipher_desc desc = {
422 		.tfm = __crypto_blkcipher_cast(tfm),
423 		.info = req->info,
424 		.flags = req->base.flags,
425 	};
426 
427 
428 	return alg->encrypt(&desc, req->dst, req->src, req->nbytes);
429 }
430 
async_decrypt(struct ablkcipher_request * req)431 static int async_decrypt(struct ablkcipher_request *req)
432 {
433 	struct crypto_tfm *tfm = req->base.tfm;
434 	struct blkcipher_alg *alg = &tfm->__crt_alg->cra_blkcipher;
435 	struct blkcipher_desc desc = {
436 		.tfm = __crypto_blkcipher_cast(tfm),
437 		.info = req->info,
438 		.flags = req->base.flags,
439 	};
440 
441 	return alg->decrypt(&desc, req->dst, req->src, req->nbytes);
442 }
443 
crypto_blkcipher_ctxsize(struct crypto_alg * alg,u32 type,u32 mask)444 static unsigned int crypto_blkcipher_ctxsize(struct crypto_alg *alg, u32 type,
445 					     u32 mask)
446 {
447 	struct blkcipher_alg *cipher = &alg->cra_blkcipher;
448 	unsigned int len = alg->cra_ctxsize;
449 
450 	if ((mask & CRYPTO_ALG_TYPE_MASK) == CRYPTO_ALG_TYPE_MASK &&
451 	    cipher->ivsize) {
452 		len = ALIGN(len, (unsigned long)alg->cra_alignmask + 1);
453 		len += cipher->ivsize;
454 	}
455 
456 	return len;
457 }
458 
crypto_init_blkcipher_ops_async(struct crypto_tfm * tfm)459 static int crypto_init_blkcipher_ops_async(struct crypto_tfm *tfm)
460 {
461 	struct ablkcipher_tfm *crt = &tfm->crt_ablkcipher;
462 	struct blkcipher_alg *alg = &tfm->__crt_alg->cra_blkcipher;
463 
464 	crt->setkey = async_setkey;
465 	crt->encrypt = async_encrypt;
466 	crt->decrypt = async_decrypt;
467 	crt->base = __crypto_ablkcipher_cast(tfm);
468 	crt->ivsize = alg->ivsize;
469 
470 	return 0;
471 }
472 
crypto_init_blkcipher_ops_sync(struct crypto_tfm * tfm)473 static int crypto_init_blkcipher_ops_sync(struct crypto_tfm *tfm)
474 {
475 	struct blkcipher_tfm *crt = &tfm->crt_blkcipher;
476 	struct blkcipher_alg *alg = &tfm->__crt_alg->cra_blkcipher;
477 	unsigned long align = crypto_tfm_alg_alignmask(tfm) + 1;
478 	unsigned long addr;
479 
480 	crt->setkey = setkey;
481 	crt->encrypt = alg->encrypt;
482 	crt->decrypt = alg->decrypt;
483 
484 	addr = (unsigned long)crypto_tfm_ctx(tfm);
485 	addr = ALIGN(addr, align);
486 	addr += ALIGN(tfm->__crt_alg->cra_ctxsize, align);
487 	crt->iv = (void *)addr;
488 
489 	return 0;
490 }
491 
crypto_init_blkcipher_ops(struct crypto_tfm * tfm,u32 type,u32 mask)492 static int crypto_init_blkcipher_ops(struct crypto_tfm *tfm, u32 type, u32 mask)
493 {
494 	struct blkcipher_alg *alg = &tfm->__crt_alg->cra_blkcipher;
495 
496 	if (alg->ivsize > PAGE_SIZE / 8)
497 		return -EINVAL;
498 
499 	if ((mask & CRYPTO_ALG_TYPE_MASK) == CRYPTO_ALG_TYPE_MASK)
500 		return crypto_init_blkcipher_ops_sync(tfm);
501 	else
502 		return crypto_init_blkcipher_ops_async(tfm);
503 }
504 
505 #ifdef CONFIG_NET
crypto_blkcipher_report(struct sk_buff * skb,struct crypto_alg * alg)506 static int crypto_blkcipher_report(struct sk_buff *skb, struct crypto_alg *alg)
507 {
508 	struct crypto_report_blkcipher rblkcipher;
509 
510 	strncpy(rblkcipher.type, "blkcipher", sizeof(rblkcipher.type));
511 	strncpy(rblkcipher.geniv, alg->cra_blkcipher.geniv ?: "<default>",
512 		sizeof(rblkcipher.geniv));
513 	rblkcipher.geniv[sizeof(rblkcipher.geniv) - 1] = '\0';
514 
515 	rblkcipher.blocksize = alg->cra_blocksize;
516 	rblkcipher.min_keysize = alg->cra_blkcipher.min_keysize;
517 	rblkcipher.max_keysize = alg->cra_blkcipher.max_keysize;
518 	rblkcipher.ivsize = alg->cra_blkcipher.ivsize;
519 
520 	if (nla_put(skb, CRYPTOCFGA_REPORT_BLKCIPHER,
521 		    sizeof(struct crypto_report_blkcipher), &rblkcipher))
522 		goto nla_put_failure;
523 	return 0;
524 
525 nla_put_failure:
526 	return -EMSGSIZE;
527 }
528 #else
crypto_blkcipher_report(struct sk_buff * skb,struct crypto_alg * alg)529 static int crypto_blkcipher_report(struct sk_buff *skb, struct crypto_alg *alg)
530 {
531 	return -ENOSYS;
532 }
533 #endif
534 
535 static void crypto_blkcipher_show(struct seq_file *m, struct crypto_alg *alg)
536 	__maybe_unused;
crypto_blkcipher_show(struct seq_file * m,struct crypto_alg * alg)537 static void crypto_blkcipher_show(struct seq_file *m, struct crypto_alg *alg)
538 {
539 	seq_printf(m, "type         : blkcipher\n");
540 	seq_printf(m, "blocksize    : %u\n", alg->cra_blocksize);
541 	seq_printf(m, "min keysize  : %u\n", alg->cra_blkcipher.min_keysize);
542 	seq_printf(m, "max keysize  : %u\n", alg->cra_blkcipher.max_keysize);
543 	seq_printf(m, "ivsize       : %u\n", alg->cra_blkcipher.ivsize);
544 	seq_printf(m, "geniv        : %s\n", alg->cra_blkcipher.geniv ?:
545 					     "<default>");
546 }
547 
548 const struct crypto_type crypto_blkcipher_type = {
549 	.ctxsize = crypto_blkcipher_ctxsize,
550 	.init = crypto_init_blkcipher_ops,
551 #ifdef CONFIG_PROC_FS
552 	.show = crypto_blkcipher_show,
553 #endif
554 	.report = crypto_blkcipher_report,
555 };
556 EXPORT_SYMBOL_GPL(crypto_blkcipher_type);
557 
558 MODULE_LICENSE("GPL");
559 MODULE_DESCRIPTION("Generic block chaining cipher type");
560