1 /*
2 * Software async crypto daemon.
3 *
4 * Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au>
5 *
6 * Added AEAD support to cryptd.
7 * Authors: Tadeusz Struk (tadeusz.struk@intel.com)
8 * Adrian Hoban <adrian.hoban@intel.com>
9 * Gabriele Paoloni <gabriele.paoloni@intel.com>
10 * Aidan O'Mahony (aidan.o.mahony@intel.com)
11 * Copyright (c) 2010, Intel Corporation.
12 *
13 * This program is free software; you can redistribute it and/or modify it
14 * under the terms of the GNU General Public License as published by the Free
15 * Software Foundation; either version 2 of the License, or (at your option)
16 * any later version.
17 *
18 */
19
20 #include <crypto/internal/hash.h>
21 #include <crypto/internal/aead.h>
22 #include <crypto/internal/skcipher.h>
23 #include <crypto/cryptd.h>
24 #include <crypto/crypto_wq.h>
25 #include <linux/atomic.h>
26 #include <linux/err.h>
27 #include <linux/init.h>
28 #include <linux/kernel.h>
29 #include <linux/list.h>
30 #include <linux/module.h>
31 #include <linux/scatterlist.h>
32 #include <linux/sched.h>
33 #include <linux/slab.h>
34
35 static unsigned int cryptd_max_cpu_qlen = 1000;
36 module_param(cryptd_max_cpu_qlen, uint, 0);
37 MODULE_PARM_DESC(cryptd_max_cpu_qlen, "Set cryptd Max queue depth");
38
39 struct cryptd_cpu_queue {
40 struct crypto_queue queue;
41 struct work_struct work;
42 };
43
44 struct cryptd_queue {
45 struct cryptd_cpu_queue __percpu *cpu_queue;
46 };
47
48 struct cryptd_instance_ctx {
49 struct crypto_spawn spawn;
50 struct cryptd_queue *queue;
51 };
52
53 struct skcipherd_instance_ctx {
54 struct crypto_skcipher_spawn spawn;
55 struct cryptd_queue *queue;
56 };
57
58 struct hashd_instance_ctx {
59 struct crypto_shash_spawn spawn;
60 struct cryptd_queue *queue;
61 };
62
63 struct aead_instance_ctx {
64 struct crypto_aead_spawn aead_spawn;
65 struct cryptd_queue *queue;
66 };
67
68 struct cryptd_blkcipher_ctx {
69 atomic_t refcnt;
70 struct crypto_blkcipher *child;
71 };
72
73 struct cryptd_blkcipher_request_ctx {
74 crypto_completion_t complete;
75 };
76
77 struct cryptd_skcipher_ctx {
78 atomic_t refcnt;
79 struct crypto_skcipher *child;
80 };
81
82 struct cryptd_skcipher_request_ctx {
83 crypto_completion_t complete;
84 };
85
86 struct cryptd_hash_ctx {
87 atomic_t refcnt;
88 struct crypto_shash *child;
89 };
90
91 struct cryptd_hash_request_ctx {
92 crypto_completion_t complete;
93 struct shash_desc desc;
94 };
95
96 struct cryptd_aead_ctx {
97 atomic_t refcnt;
98 struct crypto_aead *child;
99 };
100
101 struct cryptd_aead_request_ctx {
102 crypto_completion_t complete;
103 };
104
105 static void cryptd_queue_worker(struct work_struct *work);
106
cryptd_init_queue(struct cryptd_queue * queue,unsigned int max_cpu_qlen)107 static int cryptd_init_queue(struct cryptd_queue *queue,
108 unsigned int max_cpu_qlen)
109 {
110 int cpu;
111 struct cryptd_cpu_queue *cpu_queue;
112
113 queue->cpu_queue = alloc_percpu(struct cryptd_cpu_queue);
114 if (!queue->cpu_queue)
115 return -ENOMEM;
116 for_each_possible_cpu(cpu) {
117 cpu_queue = per_cpu_ptr(queue->cpu_queue, cpu);
118 crypto_init_queue(&cpu_queue->queue, max_cpu_qlen);
119 INIT_WORK(&cpu_queue->work, cryptd_queue_worker);
120 }
121 pr_info("cryptd: max_cpu_qlen set to %d\n", max_cpu_qlen);
122 return 0;
123 }
124
cryptd_fini_queue(struct cryptd_queue * queue)125 static void cryptd_fini_queue(struct cryptd_queue *queue)
126 {
127 int cpu;
128 struct cryptd_cpu_queue *cpu_queue;
129
130 for_each_possible_cpu(cpu) {
131 cpu_queue = per_cpu_ptr(queue->cpu_queue, cpu);
132 BUG_ON(cpu_queue->queue.qlen);
133 }
134 free_percpu(queue->cpu_queue);
135 }
136
cryptd_enqueue_request(struct cryptd_queue * queue,struct crypto_async_request * request)137 static int cryptd_enqueue_request(struct cryptd_queue *queue,
138 struct crypto_async_request *request)
139 {
140 int cpu, err;
141 struct cryptd_cpu_queue *cpu_queue;
142 atomic_t *refcnt;
143
144 cpu = get_cpu();
145 cpu_queue = this_cpu_ptr(queue->cpu_queue);
146 err = crypto_enqueue_request(&cpu_queue->queue, request);
147
148 refcnt = crypto_tfm_ctx(request->tfm);
149
150 if (err == -ENOSPC)
151 goto out_put_cpu;
152
153 queue_work_on(cpu, kcrypto_wq, &cpu_queue->work);
154
155 if (!atomic_read(refcnt))
156 goto out_put_cpu;
157
158 atomic_inc(refcnt);
159
160 out_put_cpu:
161 put_cpu();
162
163 return err;
164 }
165
166 /* Called in workqueue context, do one real cryption work (via
167 * req->complete) and reschedule itself if there are more work to
168 * do. */
cryptd_queue_worker(struct work_struct * work)169 static void cryptd_queue_worker(struct work_struct *work)
170 {
171 struct cryptd_cpu_queue *cpu_queue;
172 struct crypto_async_request *req, *backlog;
173
174 cpu_queue = container_of(work, struct cryptd_cpu_queue, work);
175 /*
176 * Only handle one request at a time to avoid hogging crypto workqueue.
177 * preempt_disable/enable is used to prevent being preempted by
178 * cryptd_enqueue_request(). local_bh_disable/enable is used to prevent
179 * cryptd_enqueue_request() being accessed from software interrupts.
180 */
181 local_bh_disable();
182 preempt_disable();
183 backlog = crypto_get_backlog(&cpu_queue->queue);
184 req = crypto_dequeue_request(&cpu_queue->queue);
185 preempt_enable();
186 local_bh_enable();
187
188 if (!req)
189 return;
190
191 if (backlog)
192 backlog->complete(backlog, -EINPROGRESS);
193 req->complete(req, 0);
194
195 if (cpu_queue->queue.qlen)
196 queue_work(kcrypto_wq, &cpu_queue->work);
197 }
198
cryptd_get_queue(struct crypto_tfm * tfm)199 static inline struct cryptd_queue *cryptd_get_queue(struct crypto_tfm *tfm)
200 {
201 struct crypto_instance *inst = crypto_tfm_alg_instance(tfm);
202 struct cryptd_instance_ctx *ictx = crypto_instance_ctx(inst);
203 return ictx->queue;
204 }
205
cryptd_check_internal(struct rtattr ** tb,u32 * type,u32 * mask)206 static inline void cryptd_check_internal(struct rtattr **tb, u32 *type,
207 u32 *mask)
208 {
209 struct crypto_attr_type *algt;
210
211 algt = crypto_get_attr_type(tb);
212 if (IS_ERR(algt))
213 return;
214
215 *type |= algt->type & CRYPTO_ALG_INTERNAL;
216 *mask |= algt->mask & CRYPTO_ALG_INTERNAL;
217 }
218
cryptd_blkcipher_setkey(struct crypto_ablkcipher * parent,const u8 * key,unsigned int keylen)219 static int cryptd_blkcipher_setkey(struct crypto_ablkcipher *parent,
220 const u8 *key, unsigned int keylen)
221 {
222 struct cryptd_blkcipher_ctx *ctx = crypto_ablkcipher_ctx(parent);
223 struct crypto_blkcipher *child = ctx->child;
224 int err;
225
226 crypto_blkcipher_clear_flags(child, CRYPTO_TFM_REQ_MASK);
227 crypto_blkcipher_set_flags(child, crypto_ablkcipher_get_flags(parent) &
228 CRYPTO_TFM_REQ_MASK);
229 err = crypto_blkcipher_setkey(child, key, keylen);
230 crypto_ablkcipher_set_flags(parent, crypto_blkcipher_get_flags(child) &
231 CRYPTO_TFM_RES_MASK);
232 return err;
233 }
234
cryptd_blkcipher_crypt(struct ablkcipher_request * req,struct crypto_blkcipher * child,int err,int (* crypt)(struct blkcipher_desc * desc,struct scatterlist * dst,struct scatterlist * src,unsigned int len))235 static void cryptd_blkcipher_crypt(struct ablkcipher_request *req,
236 struct crypto_blkcipher *child,
237 int err,
238 int (*crypt)(struct blkcipher_desc *desc,
239 struct scatterlist *dst,
240 struct scatterlist *src,
241 unsigned int len))
242 {
243 struct cryptd_blkcipher_request_ctx *rctx;
244 struct cryptd_blkcipher_ctx *ctx;
245 struct crypto_ablkcipher *tfm;
246 struct blkcipher_desc desc;
247 int refcnt;
248
249 rctx = ablkcipher_request_ctx(req);
250
251 if (unlikely(err == -EINPROGRESS))
252 goto out;
253
254 desc.tfm = child;
255 desc.info = req->info;
256 desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP;
257
258 err = crypt(&desc, req->dst, req->src, req->nbytes);
259
260 req->base.complete = rctx->complete;
261
262 out:
263 tfm = crypto_ablkcipher_reqtfm(req);
264 ctx = crypto_ablkcipher_ctx(tfm);
265 refcnt = atomic_read(&ctx->refcnt);
266
267 local_bh_disable();
268 rctx->complete(&req->base, err);
269 local_bh_enable();
270
271 if (err != -EINPROGRESS && refcnt && atomic_dec_and_test(&ctx->refcnt))
272 crypto_free_ablkcipher(tfm);
273 }
274
cryptd_blkcipher_encrypt(struct crypto_async_request * req,int err)275 static void cryptd_blkcipher_encrypt(struct crypto_async_request *req, int err)
276 {
277 struct cryptd_blkcipher_ctx *ctx = crypto_tfm_ctx(req->tfm);
278 struct crypto_blkcipher *child = ctx->child;
279
280 cryptd_blkcipher_crypt(ablkcipher_request_cast(req), child, err,
281 crypto_blkcipher_crt(child)->encrypt);
282 }
283
cryptd_blkcipher_decrypt(struct crypto_async_request * req,int err)284 static void cryptd_blkcipher_decrypt(struct crypto_async_request *req, int err)
285 {
286 struct cryptd_blkcipher_ctx *ctx = crypto_tfm_ctx(req->tfm);
287 struct crypto_blkcipher *child = ctx->child;
288
289 cryptd_blkcipher_crypt(ablkcipher_request_cast(req), child, err,
290 crypto_blkcipher_crt(child)->decrypt);
291 }
292
cryptd_blkcipher_enqueue(struct ablkcipher_request * req,crypto_completion_t compl)293 static int cryptd_blkcipher_enqueue(struct ablkcipher_request *req,
294 crypto_completion_t compl)
295 {
296 struct cryptd_blkcipher_request_ctx *rctx = ablkcipher_request_ctx(req);
297 struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
298 struct cryptd_queue *queue;
299
300 queue = cryptd_get_queue(crypto_ablkcipher_tfm(tfm));
301 rctx->complete = req->base.complete;
302 req->base.complete = compl;
303
304 return cryptd_enqueue_request(queue, &req->base);
305 }
306
cryptd_blkcipher_encrypt_enqueue(struct ablkcipher_request * req)307 static int cryptd_blkcipher_encrypt_enqueue(struct ablkcipher_request *req)
308 {
309 return cryptd_blkcipher_enqueue(req, cryptd_blkcipher_encrypt);
310 }
311
cryptd_blkcipher_decrypt_enqueue(struct ablkcipher_request * req)312 static int cryptd_blkcipher_decrypt_enqueue(struct ablkcipher_request *req)
313 {
314 return cryptd_blkcipher_enqueue(req, cryptd_blkcipher_decrypt);
315 }
316
cryptd_blkcipher_init_tfm(struct crypto_tfm * tfm)317 static int cryptd_blkcipher_init_tfm(struct crypto_tfm *tfm)
318 {
319 struct crypto_instance *inst = crypto_tfm_alg_instance(tfm);
320 struct cryptd_instance_ctx *ictx = crypto_instance_ctx(inst);
321 struct crypto_spawn *spawn = &ictx->spawn;
322 struct cryptd_blkcipher_ctx *ctx = crypto_tfm_ctx(tfm);
323 struct crypto_blkcipher *cipher;
324
325 cipher = crypto_spawn_blkcipher(spawn);
326 if (IS_ERR(cipher))
327 return PTR_ERR(cipher);
328
329 ctx->child = cipher;
330 tfm->crt_ablkcipher.reqsize =
331 sizeof(struct cryptd_blkcipher_request_ctx);
332 return 0;
333 }
334
cryptd_blkcipher_exit_tfm(struct crypto_tfm * tfm)335 static void cryptd_blkcipher_exit_tfm(struct crypto_tfm *tfm)
336 {
337 struct cryptd_blkcipher_ctx *ctx = crypto_tfm_ctx(tfm);
338
339 crypto_free_blkcipher(ctx->child);
340 }
341
cryptd_init_instance(struct crypto_instance * inst,struct crypto_alg * alg)342 static int cryptd_init_instance(struct crypto_instance *inst,
343 struct crypto_alg *alg)
344 {
345 if (snprintf(inst->alg.cra_driver_name, CRYPTO_MAX_ALG_NAME,
346 "cryptd(%s)",
347 alg->cra_driver_name) >= CRYPTO_MAX_ALG_NAME)
348 return -ENAMETOOLONG;
349
350 memcpy(inst->alg.cra_name, alg->cra_name, CRYPTO_MAX_ALG_NAME);
351
352 inst->alg.cra_priority = alg->cra_priority + 50;
353 inst->alg.cra_blocksize = alg->cra_blocksize;
354 inst->alg.cra_alignmask = alg->cra_alignmask;
355
356 return 0;
357 }
358
cryptd_alloc_instance(struct crypto_alg * alg,unsigned int head,unsigned int tail)359 static void *cryptd_alloc_instance(struct crypto_alg *alg, unsigned int head,
360 unsigned int tail)
361 {
362 char *p;
363 struct crypto_instance *inst;
364 int err;
365
366 p = kzalloc(head + sizeof(*inst) + tail, GFP_KERNEL);
367 if (!p)
368 return ERR_PTR(-ENOMEM);
369
370 inst = (void *)(p + head);
371
372 err = cryptd_init_instance(inst, alg);
373 if (err)
374 goto out_free_inst;
375
376 out:
377 return p;
378
379 out_free_inst:
380 kfree(p);
381 p = ERR_PTR(err);
382 goto out;
383 }
384
cryptd_create_blkcipher(struct crypto_template * tmpl,struct rtattr ** tb,struct cryptd_queue * queue)385 static int cryptd_create_blkcipher(struct crypto_template *tmpl,
386 struct rtattr **tb,
387 struct cryptd_queue *queue)
388 {
389 struct cryptd_instance_ctx *ctx;
390 struct crypto_instance *inst;
391 struct crypto_alg *alg;
392 u32 type = CRYPTO_ALG_TYPE_BLKCIPHER;
393 u32 mask = CRYPTO_ALG_TYPE_MASK;
394 int err;
395
396 cryptd_check_internal(tb, &type, &mask);
397
398 alg = crypto_get_attr_alg(tb, type, mask);
399 if (IS_ERR(alg))
400 return PTR_ERR(alg);
401
402 inst = cryptd_alloc_instance(alg, 0, sizeof(*ctx));
403 err = PTR_ERR(inst);
404 if (IS_ERR(inst))
405 goto out_put_alg;
406
407 ctx = crypto_instance_ctx(inst);
408 ctx->queue = queue;
409
410 err = crypto_init_spawn(&ctx->spawn, alg, inst,
411 CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_ASYNC);
412 if (err)
413 goto out_free_inst;
414
415 type = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC;
416 if (alg->cra_flags & CRYPTO_ALG_INTERNAL)
417 type |= CRYPTO_ALG_INTERNAL;
418 inst->alg.cra_flags = type;
419 inst->alg.cra_type = &crypto_ablkcipher_type;
420
421 inst->alg.cra_ablkcipher.ivsize = alg->cra_blkcipher.ivsize;
422 inst->alg.cra_ablkcipher.min_keysize = alg->cra_blkcipher.min_keysize;
423 inst->alg.cra_ablkcipher.max_keysize = alg->cra_blkcipher.max_keysize;
424
425 inst->alg.cra_ablkcipher.geniv = alg->cra_blkcipher.geniv;
426
427 inst->alg.cra_ctxsize = sizeof(struct cryptd_blkcipher_ctx);
428
429 inst->alg.cra_init = cryptd_blkcipher_init_tfm;
430 inst->alg.cra_exit = cryptd_blkcipher_exit_tfm;
431
432 inst->alg.cra_ablkcipher.setkey = cryptd_blkcipher_setkey;
433 inst->alg.cra_ablkcipher.encrypt = cryptd_blkcipher_encrypt_enqueue;
434 inst->alg.cra_ablkcipher.decrypt = cryptd_blkcipher_decrypt_enqueue;
435
436 err = crypto_register_instance(tmpl, inst);
437 if (err) {
438 crypto_drop_spawn(&ctx->spawn);
439 out_free_inst:
440 kfree(inst);
441 }
442
443 out_put_alg:
444 crypto_mod_put(alg);
445 return err;
446 }
447
cryptd_skcipher_setkey(struct crypto_skcipher * parent,const u8 * key,unsigned int keylen)448 static int cryptd_skcipher_setkey(struct crypto_skcipher *parent,
449 const u8 *key, unsigned int keylen)
450 {
451 struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(parent);
452 struct crypto_skcipher *child = ctx->child;
453 int err;
454
455 crypto_skcipher_clear_flags(child, CRYPTO_TFM_REQ_MASK);
456 crypto_skcipher_set_flags(child, crypto_skcipher_get_flags(parent) &
457 CRYPTO_TFM_REQ_MASK);
458 err = crypto_skcipher_setkey(child, key, keylen);
459 crypto_skcipher_set_flags(parent, crypto_skcipher_get_flags(child) &
460 CRYPTO_TFM_RES_MASK);
461 return err;
462 }
463
cryptd_skcipher_complete(struct skcipher_request * req,int err)464 static void cryptd_skcipher_complete(struct skcipher_request *req, int err)
465 {
466 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
467 struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
468 struct cryptd_skcipher_request_ctx *rctx = skcipher_request_ctx(req);
469 int refcnt = atomic_read(&ctx->refcnt);
470
471 local_bh_disable();
472 rctx->complete(&req->base, err);
473 local_bh_enable();
474
475 if (err != -EINPROGRESS && refcnt && atomic_dec_and_test(&ctx->refcnt))
476 crypto_free_skcipher(tfm);
477 }
478
cryptd_skcipher_encrypt(struct crypto_async_request * base,int err)479 static void cryptd_skcipher_encrypt(struct crypto_async_request *base,
480 int err)
481 {
482 struct skcipher_request *req = skcipher_request_cast(base);
483 struct cryptd_skcipher_request_ctx *rctx = skcipher_request_ctx(req);
484 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
485 struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
486 struct crypto_skcipher *child = ctx->child;
487 SKCIPHER_REQUEST_ON_STACK(subreq, child);
488
489 if (unlikely(err == -EINPROGRESS))
490 goto out;
491
492 skcipher_request_set_tfm(subreq, child);
493 skcipher_request_set_callback(subreq, CRYPTO_TFM_REQ_MAY_SLEEP,
494 NULL, NULL);
495 skcipher_request_set_crypt(subreq, req->src, req->dst, req->cryptlen,
496 req->iv);
497
498 err = crypto_skcipher_encrypt(subreq);
499 skcipher_request_zero(subreq);
500
501 req->base.complete = rctx->complete;
502
503 out:
504 cryptd_skcipher_complete(req, err);
505 }
506
cryptd_skcipher_decrypt(struct crypto_async_request * base,int err)507 static void cryptd_skcipher_decrypt(struct crypto_async_request *base,
508 int err)
509 {
510 struct skcipher_request *req = skcipher_request_cast(base);
511 struct cryptd_skcipher_request_ctx *rctx = skcipher_request_ctx(req);
512 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
513 struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
514 struct crypto_skcipher *child = ctx->child;
515 SKCIPHER_REQUEST_ON_STACK(subreq, child);
516
517 if (unlikely(err == -EINPROGRESS))
518 goto out;
519
520 skcipher_request_set_tfm(subreq, child);
521 skcipher_request_set_callback(subreq, CRYPTO_TFM_REQ_MAY_SLEEP,
522 NULL, NULL);
523 skcipher_request_set_crypt(subreq, req->src, req->dst, req->cryptlen,
524 req->iv);
525
526 err = crypto_skcipher_decrypt(subreq);
527 skcipher_request_zero(subreq);
528
529 req->base.complete = rctx->complete;
530
531 out:
532 cryptd_skcipher_complete(req, err);
533 }
534
cryptd_skcipher_enqueue(struct skcipher_request * req,crypto_completion_t compl)535 static int cryptd_skcipher_enqueue(struct skcipher_request *req,
536 crypto_completion_t compl)
537 {
538 struct cryptd_skcipher_request_ctx *rctx = skcipher_request_ctx(req);
539 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
540 struct cryptd_queue *queue;
541
542 queue = cryptd_get_queue(crypto_skcipher_tfm(tfm));
543 rctx->complete = req->base.complete;
544 req->base.complete = compl;
545
546 return cryptd_enqueue_request(queue, &req->base);
547 }
548
cryptd_skcipher_encrypt_enqueue(struct skcipher_request * req)549 static int cryptd_skcipher_encrypt_enqueue(struct skcipher_request *req)
550 {
551 return cryptd_skcipher_enqueue(req, cryptd_skcipher_encrypt);
552 }
553
cryptd_skcipher_decrypt_enqueue(struct skcipher_request * req)554 static int cryptd_skcipher_decrypt_enqueue(struct skcipher_request *req)
555 {
556 return cryptd_skcipher_enqueue(req, cryptd_skcipher_decrypt);
557 }
558
cryptd_skcipher_init_tfm(struct crypto_skcipher * tfm)559 static int cryptd_skcipher_init_tfm(struct crypto_skcipher *tfm)
560 {
561 struct skcipher_instance *inst = skcipher_alg_instance(tfm);
562 struct skcipherd_instance_ctx *ictx = skcipher_instance_ctx(inst);
563 struct crypto_skcipher_spawn *spawn = &ictx->spawn;
564 struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
565 struct crypto_skcipher *cipher;
566
567 cipher = crypto_spawn_skcipher(spawn);
568 if (IS_ERR(cipher))
569 return PTR_ERR(cipher);
570
571 ctx->child = cipher;
572 crypto_skcipher_set_reqsize(
573 tfm, sizeof(struct cryptd_skcipher_request_ctx));
574 return 0;
575 }
576
cryptd_skcipher_exit_tfm(struct crypto_skcipher * tfm)577 static void cryptd_skcipher_exit_tfm(struct crypto_skcipher *tfm)
578 {
579 struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
580
581 crypto_free_skcipher(ctx->child);
582 }
583
cryptd_skcipher_free(struct skcipher_instance * inst)584 static void cryptd_skcipher_free(struct skcipher_instance *inst)
585 {
586 struct skcipherd_instance_ctx *ctx = skcipher_instance_ctx(inst);
587
588 crypto_drop_skcipher(&ctx->spawn);
589 kfree(inst);
590 }
591
cryptd_create_skcipher(struct crypto_template * tmpl,struct rtattr ** tb,struct cryptd_queue * queue)592 static int cryptd_create_skcipher(struct crypto_template *tmpl,
593 struct rtattr **tb,
594 struct cryptd_queue *queue)
595 {
596 struct skcipherd_instance_ctx *ctx;
597 struct skcipher_instance *inst;
598 struct skcipher_alg *alg;
599 const char *name;
600 u32 type;
601 u32 mask;
602 int err;
603
604 type = 0;
605 mask = CRYPTO_ALG_ASYNC;
606
607 cryptd_check_internal(tb, &type, &mask);
608
609 name = crypto_attr_alg_name(tb[1]);
610 if (IS_ERR(name))
611 return PTR_ERR(name);
612
613 inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL);
614 if (!inst)
615 return -ENOMEM;
616
617 ctx = skcipher_instance_ctx(inst);
618 ctx->queue = queue;
619
620 crypto_set_skcipher_spawn(&ctx->spawn, skcipher_crypto_instance(inst));
621 err = crypto_grab_skcipher(&ctx->spawn, name, type, mask);
622 if (err)
623 goto out_free_inst;
624
625 alg = crypto_spawn_skcipher_alg(&ctx->spawn);
626 err = cryptd_init_instance(skcipher_crypto_instance(inst), &alg->base);
627 if (err)
628 goto out_drop_skcipher;
629
630 inst->alg.base.cra_flags = CRYPTO_ALG_ASYNC |
631 (alg->base.cra_flags & CRYPTO_ALG_INTERNAL);
632
633 inst->alg.ivsize = crypto_skcipher_alg_ivsize(alg);
634 inst->alg.chunksize = crypto_skcipher_alg_chunksize(alg);
635 inst->alg.min_keysize = crypto_skcipher_alg_min_keysize(alg);
636 inst->alg.max_keysize = crypto_skcipher_alg_max_keysize(alg);
637
638 inst->alg.base.cra_ctxsize = sizeof(struct cryptd_skcipher_ctx);
639
640 inst->alg.init = cryptd_skcipher_init_tfm;
641 inst->alg.exit = cryptd_skcipher_exit_tfm;
642
643 inst->alg.setkey = cryptd_skcipher_setkey;
644 inst->alg.encrypt = cryptd_skcipher_encrypt_enqueue;
645 inst->alg.decrypt = cryptd_skcipher_decrypt_enqueue;
646
647 inst->free = cryptd_skcipher_free;
648
649 err = skcipher_register_instance(tmpl, inst);
650 if (err) {
651 out_drop_skcipher:
652 crypto_drop_skcipher(&ctx->spawn);
653 out_free_inst:
654 kfree(inst);
655 }
656 return err;
657 }
658
cryptd_hash_init_tfm(struct crypto_tfm * tfm)659 static int cryptd_hash_init_tfm(struct crypto_tfm *tfm)
660 {
661 struct crypto_instance *inst = crypto_tfm_alg_instance(tfm);
662 struct hashd_instance_ctx *ictx = crypto_instance_ctx(inst);
663 struct crypto_shash_spawn *spawn = &ictx->spawn;
664 struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(tfm);
665 struct crypto_shash *hash;
666
667 hash = crypto_spawn_shash(spawn);
668 if (IS_ERR(hash))
669 return PTR_ERR(hash);
670
671 ctx->child = hash;
672 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
673 sizeof(struct cryptd_hash_request_ctx) +
674 crypto_shash_descsize(hash));
675 return 0;
676 }
677
cryptd_hash_exit_tfm(struct crypto_tfm * tfm)678 static void cryptd_hash_exit_tfm(struct crypto_tfm *tfm)
679 {
680 struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(tfm);
681
682 crypto_free_shash(ctx->child);
683 }
684
cryptd_hash_setkey(struct crypto_ahash * parent,const u8 * key,unsigned int keylen)685 static int cryptd_hash_setkey(struct crypto_ahash *parent,
686 const u8 *key, unsigned int keylen)
687 {
688 struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(parent);
689 struct crypto_shash *child = ctx->child;
690 int err;
691
692 crypto_shash_clear_flags(child, CRYPTO_TFM_REQ_MASK);
693 crypto_shash_set_flags(child, crypto_ahash_get_flags(parent) &
694 CRYPTO_TFM_REQ_MASK);
695 err = crypto_shash_setkey(child, key, keylen);
696 crypto_ahash_set_flags(parent, crypto_shash_get_flags(child) &
697 CRYPTO_TFM_RES_MASK);
698 return err;
699 }
700
cryptd_hash_enqueue(struct ahash_request * req,crypto_completion_t compl)701 static int cryptd_hash_enqueue(struct ahash_request *req,
702 crypto_completion_t compl)
703 {
704 struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
705 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
706 struct cryptd_queue *queue =
707 cryptd_get_queue(crypto_ahash_tfm(tfm));
708
709 rctx->complete = req->base.complete;
710 req->base.complete = compl;
711
712 return cryptd_enqueue_request(queue, &req->base);
713 }
714
cryptd_hash_complete(struct ahash_request * req,int err)715 static void cryptd_hash_complete(struct ahash_request *req, int err)
716 {
717 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
718 struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(tfm);
719 struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
720 int refcnt = atomic_read(&ctx->refcnt);
721
722 local_bh_disable();
723 rctx->complete(&req->base, err);
724 local_bh_enable();
725
726 if (err != -EINPROGRESS && refcnt && atomic_dec_and_test(&ctx->refcnt))
727 crypto_free_ahash(tfm);
728 }
729
cryptd_hash_init(struct crypto_async_request * req_async,int err)730 static void cryptd_hash_init(struct crypto_async_request *req_async, int err)
731 {
732 struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(req_async->tfm);
733 struct crypto_shash *child = ctx->child;
734 struct ahash_request *req = ahash_request_cast(req_async);
735 struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
736 struct shash_desc *desc = &rctx->desc;
737
738 if (unlikely(err == -EINPROGRESS))
739 goto out;
740
741 desc->tfm = child;
742 desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP;
743
744 err = crypto_shash_init(desc);
745
746 req->base.complete = rctx->complete;
747
748 out:
749 cryptd_hash_complete(req, err);
750 }
751
cryptd_hash_init_enqueue(struct ahash_request * req)752 static int cryptd_hash_init_enqueue(struct ahash_request *req)
753 {
754 return cryptd_hash_enqueue(req, cryptd_hash_init);
755 }
756
cryptd_hash_update(struct crypto_async_request * req_async,int err)757 static void cryptd_hash_update(struct crypto_async_request *req_async, int err)
758 {
759 struct ahash_request *req = ahash_request_cast(req_async);
760 struct cryptd_hash_request_ctx *rctx;
761
762 rctx = ahash_request_ctx(req);
763
764 if (unlikely(err == -EINPROGRESS))
765 goto out;
766
767 err = shash_ahash_update(req, &rctx->desc);
768
769 req->base.complete = rctx->complete;
770
771 out:
772 cryptd_hash_complete(req, err);
773 }
774
cryptd_hash_update_enqueue(struct ahash_request * req)775 static int cryptd_hash_update_enqueue(struct ahash_request *req)
776 {
777 return cryptd_hash_enqueue(req, cryptd_hash_update);
778 }
779
cryptd_hash_final(struct crypto_async_request * req_async,int err)780 static void cryptd_hash_final(struct crypto_async_request *req_async, int err)
781 {
782 struct ahash_request *req = ahash_request_cast(req_async);
783 struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
784
785 if (unlikely(err == -EINPROGRESS))
786 goto out;
787
788 err = crypto_shash_final(&rctx->desc, req->result);
789
790 req->base.complete = rctx->complete;
791
792 out:
793 cryptd_hash_complete(req, err);
794 }
795
cryptd_hash_final_enqueue(struct ahash_request * req)796 static int cryptd_hash_final_enqueue(struct ahash_request *req)
797 {
798 return cryptd_hash_enqueue(req, cryptd_hash_final);
799 }
800
cryptd_hash_finup(struct crypto_async_request * req_async,int err)801 static void cryptd_hash_finup(struct crypto_async_request *req_async, int err)
802 {
803 struct ahash_request *req = ahash_request_cast(req_async);
804 struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
805
806 if (unlikely(err == -EINPROGRESS))
807 goto out;
808
809 err = shash_ahash_finup(req, &rctx->desc);
810
811 req->base.complete = rctx->complete;
812
813 out:
814 cryptd_hash_complete(req, err);
815 }
816
cryptd_hash_finup_enqueue(struct ahash_request * req)817 static int cryptd_hash_finup_enqueue(struct ahash_request *req)
818 {
819 return cryptd_hash_enqueue(req, cryptd_hash_finup);
820 }
821
cryptd_hash_digest(struct crypto_async_request * req_async,int err)822 static void cryptd_hash_digest(struct crypto_async_request *req_async, int err)
823 {
824 struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(req_async->tfm);
825 struct crypto_shash *child = ctx->child;
826 struct ahash_request *req = ahash_request_cast(req_async);
827 struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
828 struct shash_desc *desc = &rctx->desc;
829
830 if (unlikely(err == -EINPROGRESS))
831 goto out;
832
833 desc->tfm = child;
834 desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP;
835
836 err = shash_ahash_digest(req, desc);
837
838 req->base.complete = rctx->complete;
839
840 out:
841 cryptd_hash_complete(req, err);
842 }
843
cryptd_hash_digest_enqueue(struct ahash_request * req)844 static int cryptd_hash_digest_enqueue(struct ahash_request *req)
845 {
846 return cryptd_hash_enqueue(req, cryptd_hash_digest);
847 }
848
cryptd_hash_export(struct ahash_request * req,void * out)849 static int cryptd_hash_export(struct ahash_request *req, void *out)
850 {
851 struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
852
853 return crypto_shash_export(&rctx->desc, out);
854 }
855
cryptd_hash_import(struct ahash_request * req,const void * in)856 static int cryptd_hash_import(struct ahash_request *req, const void *in)
857 {
858 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
859 struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(tfm);
860 struct shash_desc *desc = cryptd_shash_desc(req);
861
862 desc->tfm = ctx->child;
863 desc->flags = req->base.flags;
864
865 return crypto_shash_import(desc, in);
866 }
867
cryptd_create_hash(struct crypto_template * tmpl,struct rtattr ** tb,struct cryptd_queue * queue)868 static int cryptd_create_hash(struct crypto_template *tmpl, struct rtattr **tb,
869 struct cryptd_queue *queue)
870 {
871 struct hashd_instance_ctx *ctx;
872 struct ahash_instance *inst;
873 struct shash_alg *salg;
874 struct crypto_alg *alg;
875 u32 type = 0;
876 u32 mask = 0;
877 int err;
878
879 cryptd_check_internal(tb, &type, &mask);
880
881 salg = shash_attr_alg(tb[1], type, mask);
882 if (IS_ERR(salg))
883 return PTR_ERR(salg);
884
885 alg = &salg->base;
886 inst = cryptd_alloc_instance(alg, ahash_instance_headroom(),
887 sizeof(*ctx));
888 err = PTR_ERR(inst);
889 if (IS_ERR(inst))
890 goto out_put_alg;
891
892 ctx = ahash_instance_ctx(inst);
893 ctx->queue = queue;
894
895 err = crypto_init_shash_spawn(&ctx->spawn, salg,
896 ahash_crypto_instance(inst));
897 if (err)
898 goto out_free_inst;
899
900 inst->alg.halg.base.cra_flags = CRYPTO_ALG_ASYNC |
901 (alg->cra_flags & (CRYPTO_ALG_INTERNAL |
902 CRYPTO_ALG_OPTIONAL_KEY));
903
904 inst->alg.halg.digestsize = salg->digestsize;
905 inst->alg.halg.statesize = salg->statesize;
906 inst->alg.halg.base.cra_ctxsize = sizeof(struct cryptd_hash_ctx);
907
908 inst->alg.halg.base.cra_init = cryptd_hash_init_tfm;
909 inst->alg.halg.base.cra_exit = cryptd_hash_exit_tfm;
910
911 inst->alg.init = cryptd_hash_init_enqueue;
912 inst->alg.update = cryptd_hash_update_enqueue;
913 inst->alg.final = cryptd_hash_final_enqueue;
914 inst->alg.finup = cryptd_hash_finup_enqueue;
915 inst->alg.export = cryptd_hash_export;
916 inst->alg.import = cryptd_hash_import;
917 if (crypto_shash_alg_has_setkey(salg))
918 inst->alg.setkey = cryptd_hash_setkey;
919 inst->alg.digest = cryptd_hash_digest_enqueue;
920
921 err = ahash_register_instance(tmpl, inst);
922 if (err) {
923 crypto_drop_shash(&ctx->spawn);
924 out_free_inst:
925 kfree(inst);
926 }
927
928 out_put_alg:
929 crypto_mod_put(alg);
930 return err;
931 }
932
cryptd_aead_setkey(struct crypto_aead * parent,const u8 * key,unsigned int keylen)933 static int cryptd_aead_setkey(struct crypto_aead *parent,
934 const u8 *key, unsigned int keylen)
935 {
936 struct cryptd_aead_ctx *ctx = crypto_aead_ctx(parent);
937 struct crypto_aead *child = ctx->child;
938
939 return crypto_aead_setkey(child, key, keylen);
940 }
941
cryptd_aead_setauthsize(struct crypto_aead * parent,unsigned int authsize)942 static int cryptd_aead_setauthsize(struct crypto_aead *parent,
943 unsigned int authsize)
944 {
945 struct cryptd_aead_ctx *ctx = crypto_aead_ctx(parent);
946 struct crypto_aead *child = ctx->child;
947
948 return crypto_aead_setauthsize(child, authsize);
949 }
950
cryptd_aead_crypt(struct aead_request * req,struct crypto_aead * child,int err,int (* crypt)(struct aead_request * req))951 static void cryptd_aead_crypt(struct aead_request *req,
952 struct crypto_aead *child,
953 int err,
954 int (*crypt)(struct aead_request *req))
955 {
956 struct cryptd_aead_request_ctx *rctx;
957 struct cryptd_aead_ctx *ctx;
958 crypto_completion_t compl;
959 struct crypto_aead *tfm;
960 int refcnt;
961
962 rctx = aead_request_ctx(req);
963 compl = rctx->complete;
964
965 tfm = crypto_aead_reqtfm(req);
966
967 if (unlikely(err == -EINPROGRESS))
968 goto out;
969 aead_request_set_tfm(req, child);
970 err = crypt( req );
971
972 out:
973 ctx = crypto_aead_ctx(tfm);
974 refcnt = atomic_read(&ctx->refcnt);
975
976 local_bh_disable();
977 compl(&req->base, err);
978 local_bh_enable();
979
980 if (err != -EINPROGRESS && refcnt && atomic_dec_and_test(&ctx->refcnt))
981 crypto_free_aead(tfm);
982 }
983
cryptd_aead_encrypt(struct crypto_async_request * areq,int err)984 static void cryptd_aead_encrypt(struct crypto_async_request *areq, int err)
985 {
986 struct cryptd_aead_ctx *ctx = crypto_tfm_ctx(areq->tfm);
987 struct crypto_aead *child = ctx->child;
988 struct aead_request *req;
989
990 req = container_of(areq, struct aead_request, base);
991 cryptd_aead_crypt(req, child, err, crypto_aead_alg(child)->encrypt);
992 }
993
cryptd_aead_decrypt(struct crypto_async_request * areq,int err)994 static void cryptd_aead_decrypt(struct crypto_async_request *areq, int err)
995 {
996 struct cryptd_aead_ctx *ctx = crypto_tfm_ctx(areq->tfm);
997 struct crypto_aead *child = ctx->child;
998 struct aead_request *req;
999
1000 req = container_of(areq, struct aead_request, base);
1001 cryptd_aead_crypt(req, child, err, crypto_aead_alg(child)->decrypt);
1002 }
1003
cryptd_aead_enqueue(struct aead_request * req,crypto_completion_t compl)1004 static int cryptd_aead_enqueue(struct aead_request *req,
1005 crypto_completion_t compl)
1006 {
1007 struct cryptd_aead_request_ctx *rctx = aead_request_ctx(req);
1008 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1009 struct cryptd_queue *queue = cryptd_get_queue(crypto_aead_tfm(tfm));
1010
1011 rctx->complete = req->base.complete;
1012 req->base.complete = compl;
1013 return cryptd_enqueue_request(queue, &req->base);
1014 }
1015
cryptd_aead_encrypt_enqueue(struct aead_request * req)1016 static int cryptd_aead_encrypt_enqueue(struct aead_request *req)
1017 {
1018 return cryptd_aead_enqueue(req, cryptd_aead_encrypt );
1019 }
1020
cryptd_aead_decrypt_enqueue(struct aead_request * req)1021 static int cryptd_aead_decrypt_enqueue(struct aead_request *req)
1022 {
1023 return cryptd_aead_enqueue(req, cryptd_aead_decrypt );
1024 }
1025
cryptd_aead_init_tfm(struct crypto_aead * tfm)1026 static int cryptd_aead_init_tfm(struct crypto_aead *tfm)
1027 {
1028 struct aead_instance *inst = aead_alg_instance(tfm);
1029 struct aead_instance_ctx *ictx = aead_instance_ctx(inst);
1030 struct crypto_aead_spawn *spawn = &ictx->aead_spawn;
1031 struct cryptd_aead_ctx *ctx = crypto_aead_ctx(tfm);
1032 struct crypto_aead *cipher;
1033
1034 cipher = crypto_spawn_aead(spawn);
1035 if (IS_ERR(cipher))
1036 return PTR_ERR(cipher);
1037
1038 ctx->child = cipher;
1039 crypto_aead_set_reqsize(
1040 tfm, max((unsigned)sizeof(struct cryptd_aead_request_ctx),
1041 crypto_aead_reqsize(cipher)));
1042 return 0;
1043 }
1044
cryptd_aead_exit_tfm(struct crypto_aead * tfm)1045 static void cryptd_aead_exit_tfm(struct crypto_aead *tfm)
1046 {
1047 struct cryptd_aead_ctx *ctx = crypto_aead_ctx(tfm);
1048 crypto_free_aead(ctx->child);
1049 }
1050
cryptd_create_aead(struct crypto_template * tmpl,struct rtattr ** tb,struct cryptd_queue * queue)1051 static int cryptd_create_aead(struct crypto_template *tmpl,
1052 struct rtattr **tb,
1053 struct cryptd_queue *queue)
1054 {
1055 struct aead_instance_ctx *ctx;
1056 struct aead_instance *inst;
1057 struct aead_alg *alg;
1058 const char *name;
1059 u32 type = 0;
1060 u32 mask = CRYPTO_ALG_ASYNC;
1061 int err;
1062
1063 cryptd_check_internal(tb, &type, &mask);
1064
1065 name = crypto_attr_alg_name(tb[1]);
1066 if (IS_ERR(name))
1067 return PTR_ERR(name);
1068
1069 inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL);
1070 if (!inst)
1071 return -ENOMEM;
1072
1073 ctx = aead_instance_ctx(inst);
1074 ctx->queue = queue;
1075
1076 crypto_set_aead_spawn(&ctx->aead_spawn, aead_crypto_instance(inst));
1077 err = crypto_grab_aead(&ctx->aead_spawn, name, type, mask);
1078 if (err)
1079 goto out_free_inst;
1080
1081 alg = crypto_spawn_aead_alg(&ctx->aead_spawn);
1082 err = cryptd_init_instance(aead_crypto_instance(inst), &alg->base);
1083 if (err)
1084 goto out_drop_aead;
1085
1086 inst->alg.base.cra_flags = CRYPTO_ALG_ASYNC |
1087 (alg->base.cra_flags & CRYPTO_ALG_INTERNAL);
1088 inst->alg.base.cra_ctxsize = sizeof(struct cryptd_aead_ctx);
1089
1090 inst->alg.ivsize = crypto_aead_alg_ivsize(alg);
1091 inst->alg.maxauthsize = crypto_aead_alg_maxauthsize(alg);
1092
1093 inst->alg.init = cryptd_aead_init_tfm;
1094 inst->alg.exit = cryptd_aead_exit_tfm;
1095 inst->alg.setkey = cryptd_aead_setkey;
1096 inst->alg.setauthsize = cryptd_aead_setauthsize;
1097 inst->alg.encrypt = cryptd_aead_encrypt_enqueue;
1098 inst->alg.decrypt = cryptd_aead_decrypt_enqueue;
1099
1100 err = aead_register_instance(tmpl, inst);
1101 if (err) {
1102 out_drop_aead:
1103 crypto_drop_aead(&ctx->aead_spawn);
1104 out_free_inst:
1105 kfree(inst);
1106 }
1107 return err;
1108 }
1109
1110 static struct cryptd_queue queue;
1111
cryptd_create(struct crypto_template * tmpl,struct rtattr ** tb)1112 static int cryptd_create(struct crypto_template *tmpl, struct rtattr **tb)
1113 {
1114 struct crypto_attr_type *algt;
1115
1116 algt = crypto_get_attr_type(tb);
1117 if (IS_ERR(algt))
1118 return PTR_ERR(algt);
1119
1120 switch (algt->type & algt->mask & CRYPTO_ALG_TYPE_MASK) {
1121 case CRYPTO_ALG_TYPE_BLKCIPHER:
1122 if ((algt->type & CRYPTO_ALG_TYPE_MASK) ==
1123 CRYPTO_ALG_TYPE_BLKCIPHER)
1124 return cryptd_create_blkcipher(tmpl, tb, &queue);
1125
1126 return cryptd_create_skcipher(tmpl, tb, &queue);
1127 case CRYPTO_ALG_TYPE_DIGEST:
1128 return cryptd_create_hash(tmpl, tb, &queue);
1129 case CRYPTO_ALG_TYPE_AEAD:
1130 return cryptd_create_aead(tmpl, tb, &queue);
1131 }
1132
1133 return -EINVAL;
1134 }
1135
cryptd_free(struct crypto_instance * inst)1136 static void cryptd_free(struct crypto_instance *inst)
1137 {
1138 struct cryptd_instance_ctx *ctx = crypto_instance_ctx(inst);
1139 struct hashd_instance_ctx *hctx = crypto_instance_ctx(inst);
1140 struct aead_instance_ctx *aead_ctx = crypto_instance_ctx(inst);
1141
1142 switch (inst->alg.cra_flags & CRYPTO_ALG_TYPE_MASK) {
1143 case CRYPTO_ALG_TYPE_AHASH:
1144 crypto_drop_shash(&hctx->spawn);
1145 kfree(ahash_instance(inst));
1146 return;
1147 case CRYPTO_ALG_TYPE_AEAD:
1148 crypto_drop_aead(&aead_ctx->aead_spawn);
1149 kfree(aead_instance(inst));
1150 return;
1151 default:
1152 crypto_drop_spawn(&ctx->spawn);
1153 kfree(inst);
1154 }
1155 }
1156
1157 static struct crypto_template cryptd_tmpl = {
1158 .name = "cryptd",
1159 .create = cryptd_create,
1160 .free = cryptd_free,
1161 .module = THIS_MODULE,
1162 };
1163
cryptd_alloc_ablkcipher(const char * alg_name,u32 type,u32 mask)1164 struct cryptd_ablkcipher *cryptd_alloc_ablkcipher(const char *alg_name,
1165 u32 type, u32 mask)
1166 {
1167 char cryptd_alg_name[CRYPTO_MAX_ALG_NAME];
1168 struct cryptd_blkcipher_ctx *ctx;
1169 struct crypto_tfm *tfm;
1170
1171 if (snprintf(cryptd_alg_name, CRYPTO_MAX_ALG_NAME,
1172 "cryptd(%s)", alg_name) >= CRYPTO_MAX_ALG_NAME)
1173 return ERR_PTR(-EINVAL);
1174 type = crypto_skcipher_type(type);
1175 mask &= ~CRYPTO_ALG_TYPE_MASK;
1176 mask |= (CRYPTO_ALG_GENIV | CRYPTO_ALG_TYPE_BLKCIPHER_MASK);
1177 tfm = crypto_alloc_base(cryptd_alg_name, type, mask);
1178 if (IS_ERR(tfm))
1179 return ERR_CAST(tfm);
1180 if (tfm->__crt_alg->cra_module != THIS_MODULE) {
1181 crypto_free_tfm(tfm);
1182 return ERR_PTR(-EINVAL);
1183 }
1184
1185 ctx = crypto_tfm_ctx(tfm);
1186 atomic_set(&ctx->refcnt, 1);
1187
1188 return __cryptd_ablkcipher_cast(__crypto_ablkcipher_cast(tfm));
1189 }
1190 EXPORT_SYMBOL_GPL(cryptd_alloc_ablkcipher);
1191
cryptd_ablkcipher_child(struct cryptd_ablkcipher * tfm)1192 struct crypto_blkcipher *cryptd_ablkcipher_child(struct cryptd_ablkcipher *tfm)
1193 {
1194 struct cryptd_blkcipher_ctx *ctx = crypto_ablkcipher_ctx(&tfm->base);
1195 return ctx->child;
1196 }
1197 EXPORT_SYMBOL_GPL(cryptd_ablkcipher_child);
1198
cryptd_ablkcipher_queued(struct cryptd_ablkcipher * tfm)1199 bool cryptd_ablkcipher_queued(struct cryptd_ablkcipher *tfm)
1200 {
1201 struct cryptd_blkcipher_ctx *ctx = crypto_ablkcipher_ctx(&tfm->base);
1202
1203 return atomic_read(&ctx->refcnt) - 1;
1204 }
1205 EXPORT_SYMBOL_GPL(cryptd_ablkcipher_queued);
1206
cryptd_free_ablkcipher(struct cryptd_ablkcipher * tfm)1207 void cryptd_free_ablkcipher(struct cryptd_ablkcipher *tfm)
1208 {
1209 struct cryptd_blkcipher_ctx *ctx = crypto_ablkcipher_ctx(&tfm->base);
1210
1211 if (atomic_dec_and_test(&ctx->refcnt))
1212 crypto_free_ablkcipher(&tfm->base);
1213 }
1214 EXPORT_SYMBOL_GPL(cryptd_free_ablkcipher);
1215
cryptd_alloc_skcipher(const char * alg_name,u32 type,u32 mask)1216 struct cryptd_skcipher *cryptd_alloc_skcipher(const char *alg_name,
1217 u32 type, u32 mask)
1218 {
1219 char cryptd_alg_name[CRYPTO_MAX_ALG_NAME];
1220 struct cryptd_skcipher_ctx *ctx;
1221 struct crypto_skcipher *tfm;
1222
1223 if (snprintf(cryptd_alg_name, CRYPTO_MAX_ALG_NAME,
1224 "cryptd(%s)", alg_name) >= CRYPTO_MAX_ALG_NAME)
1225 return ERR_PTR(-EINVAL);
1226
1227 tfm = crypto_alloc_skcipher(cryptd_alg_name, type, mask);
1228 if (IS_ERR(tfm))
1229 return ERR_CAST(tfm);
1230
1231 if (tfm->base.__crt_alg->cra_module != THIS_MODULE) {
1232 crypto_free_skcipher(tfm);
1233 return ERR_PTR(-EINVAL);
1234 }
1235
1236 ctx = crypto_skcipher_ctx(tfm);
1237 atomic_set(&ctx->refcnt, 1);
1238
1239 return container_of(tfm, struct cryptd_skcipher, base);
1240 }
1241 EXPORT_SYMBOL_GPL(cryptd_alloc_skcipher);
1242
cryptd_skcipher_child(struct cryptd_skcipher * tfm)1243 struct crypto_skcipher *cryptd_skcipher_child(struct cryptd_skcipher *tfm)
1244 {
1245 struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(&tfm->base);
1246
1247 return ctx->child;
1248 }
1249 EXPORT_SYMBOL_GPL(cryptd_skcipher_child);
1250
cryptd_skcipher_queued(struct cryptd_skcipher * tfm)1251 bool cryptd_skcipher_queued(struct cryptd_skcipher *tfm)
1252 {
1253 struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(&tfm->base);
1254
1255 return atomic_read(&ctx->refcnt) - 1;
1256 }
1257 EXPORT_SYMBOL_GPL(cryptd_skcipher_queued);
1258
cryptd_free_skcipher(struct cryptd_skcipher * tfm)1259 void cryptd_free_skcipher(struct cryptd_skcipher *tfm)
1260 {
1261 struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(&tfm->base);
1262
1263 if (atomic_dec_and_test(&ctx->refcnt))
1264 crypto_free_skcipher(&tfm->base);
1265 }
1266 EXPORT_SYMBOL_GPL(cryptd_free_skcipher);
1267
cryptd_alloc_ahash(const char * alg_name,u32 type,u32 mask)1268 struct cryptd_ahash *cryptd_alloc_ahash(const char *alg_name,
1269 u32 type, u32 mask)
1270 {
1271 char cryptd_alg_name[CRYPTO_MAX_ALG_NAME];
1272 struct cryptd_hash_ctx *ctx;
1273 struct crypto_ahash *tfm;
1274
1275 if (snprintf(cryptd_alg_name, CRYPTO_MAX_ALG_NAME,
1276 "cryptd(%s)", alg_name) >= CRYPTO_MAX_ALG_NAME)
1277 return ERR_PTR(-EINVAL);
1278 tfm = crypto_alloc_ahash(cryptd_alg_name, type, mask);
1279 if (IS_ERR(tfm))
1280 return ERR_CAST(tfm);
1281 if (tfm->base.__crt_alg->cra_module != THIS_MODULE) {
1282 crypto_free_ahash(tfm);
1283 return ERR_PTR(-EINVAL);
1284 }
1285
1286 ctx = crypto_ahash_ctx(tfm);
1287 atomic_set(&ctx->refcnt, 1);
1288
1289 return __cryptd_ahash_cast(tfm);
1290 }
1291 EXPORT_SYMBOL_GPL(cryptd_alloc_ahash);
1292
cryptd_ahash_child(struct cryptd_ahash * tfm)1293 struct crypto_shash *cryptd_ahash_child(struct cryptd_ahash *tfm)
1294 {
1295 struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(&tfm->base);
1296
1297 return ctx->child;
1298 }
1299 EXPORT_SYMBOL_GPL(cryptd_ahash_child);
1300
cryptd_shash_desc(struct ahash_request * req)1301 struct shash_desc *cryptd_shash_desc(struct ahash_request *req)
1302 {
1303 struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
1304 return &rctx->desc;
1305 }
1306 EXPORT_SYMBOL_GPL(cryptd_shash_desc);
1307
cryptd_ahash_queued(struct cryptd_ahash * tfm)1308 bool cryptd_ahash_queued(struct cryptd_ahash *tfm)
1309 {
1310 struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(&tfm->base);
1311
1312 return atomic_read(&ctx->refcnt) - 1;
1313 }
1314 EXPORT_SYMBOL_GPL(cryptd_ahash_queued);
1315
cryptd_free_ahash(struct cryptd_ahash * tfm)1316 void cryptd_free_ahash(struct cryptd_ahash *tfm)
1317 {
1318 struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(&tfm->base);
1319
1320 if (atomic_dec_and_test(&ctx->refcnt))
1321 crypto_free_ahash(&tfm->base);
1322 }
1323 EXPORT_SYMBOL_GPL(cryptd_free_ahash);
1324
cryptd_alloc_aead(const char * alg_name,u32 type,u32 mask)1325 struct cryptd_aead *cryptd_alloc_aead(const char *alg_name,
1326 u32 type, u32 mask)
1327 {
1328 char cryptd_alg_name[CRYPTO_MAX_ALG_NAME];
1329 struct cryptd_aead_ctx *ctx;
1330 struct crypto_aead *tfm;
1331
1332 if (snprintf(cryptd_alg_name, CRYPTO_MAX_ALG_NAME,
1333 "cryptd(%s)", alg_name) >= CRYPTO_MAX_ALG_NAME)
1334 return ERR_PTR(-EINVAL);
1335 tfm = crypto_alloc_aead(cryptd_alg_name, type, mask);
1336 if (IS_ERR(tfm))
1337 return ERR_CAST(tfm);
1338 if (tfm->base.__crt_alg->cra_module != THIS_MODULE) {
1339 crypto_free_aead(tfm);
1340 return ERR_PTR(-EINVAL);
1341 }
1342
1343 ctx = crypto_aead_ctx(tfm);
1344 atomic_set(&ctx->refcnt, 1);
1345
1346 return __cryptd_aead_cast(tfm);
1347 }
1348 EXPORT_SYMBOL_GPL(cryptd_alloc_aead);
1349
cryptd_aead_child(struct cryptd_aead * tfm)1350 struct crypto_aead *cryptd_aead_child(struct cryptd_aead *tfm)
1351 {
1352 struct cryptd_aead_ctx *ctx;
1353 ctx = crypto_aead_ctx(&tfm->base);
1354 return ctx->child;
1355 }
1356 EXPORT_SYMBOL_GPL(cryptd_aead_child);
1357
cryptd_aead_queued(struct cryptd_aead * tfm)1358 bool cryptd_aead_queued(struct cryptd_aead *tfm)
1359 {
1360 struct cryptd_aead_ctx *ctx = crypto_aead_ctx(&tfm->base);
1361
1362 return atomic_read(&ctx->refcnt) - 1;
1363 }
1364 EXPORT_SYMBOL_GPL(cryptd_aead_queued);
1365
cryptd_free_aead(struct cryptd_aead * tfm)1366 void cryptd_free_aead(struct cryptd_aead *tfm)
1367 {
1368 struct cryptd_aead_ctx *ctx = crypto_aead_ctx(&tfm->base);
1369
1370 if (atomic_dec_and_test(&ctx->refcnt))
1371 crypto_free_aead(&tfm->base);
1372 }
1373 EXPORT_SYMBOL_GPL(cryptd_free_aead);
1374
cryptd_init(void)1375 static int __init cryptd_init(void)
1376 {
1377 int err;
1378
1379 err = cryptd_init_queue(&queue, cryptd_max_cpu_qlen);
1380 if (err)
1381 return err;
1382
1383 err = crypto_register_template(&cryptd_tmpl);
1384 if (err)
1385 cryptd_fini_queue(&queue);
1386
1387 return err;
1388 }
1389
cryptd_exit(void)1390 static void __exit cryptd_exit(void)
1391 {
1392 cryptd_fini_queue(&queue);
1393 crypto_unregister_template(&cryptd_tmpl);
1394 }
1395
1396 subsys_initcall(cryptd_init);
1397 module_exit(cryptd_exit);
1398
1399 MODULE_LICENSE("GPL");
1400 MODULE_DESCRIPTION("Software async crypto daemon");
1401 MODULE_ALIAS_CRYPTO("cryptd");
1402