1 /*
2  * pcrypt - Parallel crypto wrapper.
3  *
4  * Copyright (C) 2009 secunet Security Networks AG
5  * Copyright (C) 2009 Steffen Klassert <steffen.klassert@secunet.com>
6  *
7  * This program is free software; you can redistribute it and/or modify it
8  * under the terms and conditions of the GNU General Public License,
9  * version 2, as published by the Free Software Foundation.
10  *
11  * This program is distributed in the hope it will be useful, but WITHOUT
12  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
14  * more details.
15  *
16  * You should have received a copy of the GNU General Public License along with
17  * this program; if not, write to the Free Software Foundation, Inc.,
18  * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
19  */
20 
21 #include <crypto/algapi.h>
22 #include <crypto/internal/aead.h>
23 #include <linux/atomic.h>
24 #include <linux/err.h>
25 #include <linux/init.h>
26 #include <linux/module.h>
27 #include <linux/slab.h>
28 #include <linux/notifier.h>
29 #include <linux/kobject.h>
30 #include <linux/cpu.h>
31 #include <crypto/pcrypt.h>
32 
33 struct padata_pcrypt {
34 	struct padata_instance *pinst;
35 	struct workqueue_struct *wq;
36 
37 	/*
38 	 * Cpumask for callback CPUs. It should be
39 	 * equal to serial cpumask of corresponding padata instance,
40 	 * so it is updated when padata notifies us about serial
41 	 * cpumask change.
42 	 *
43 	 * cb_cpumask is protected by RCU. This fact prevents us from
44 	 * using cpumask_var_t directly because the actual type of
45 	 * cpumsak_var_t depends on kernel configuration(particularly on
46 	 * CONFIG_CPUMASK_OFFSTACK macro). Depending on the configuration
47 	 * cpumask_var_t may be either a pointer to the struct cpumask
48 	 * or a variable allocated on the stack. Thus we can not safely use
49 	 * cpumask_var_t with RCU operations such as rcu_assign_pointer or
50 	 * rcu_dereference. So cpumask_var_t is wrapped with struct
51 	 * pcrypt_cpumask which makes possible to use it with RCU.
52 	 */
53 	struct pcrypt_cpumask {
54 		cpumask_var_t mask;
55 	} *cb_cpumask;
56 	struct notifier_block nblock;
57 };
58 
59 static struct padata_pcrypt pencrypt;
60 static struct padata_pcrypt pdecrypt;
61 static struct kset           *pcrypt_kset;
62 
63 struct pcrypt_instance_ctx {
64 	struct crypto_aead_spawn spawn;
65 	atomic_t tfm_count;
66 };
67 
68 struct pcrypt_aead_ctx {
69 	struct crypto_aead *child;
70 	unsigned int cb_cpu;
71 };
72 
pcrypt_do_parallel(struct padata_priv * padata,unsigned int * cb_cpu,struct padata_pcrypt * pcrypt)73 static int pcrypt_do_parallel(struct padata_priv *padata, unsigned int *cb_cpu,
74 			      struct padata_pcrypt *pcrypt)
75 {
76 	unsigned int cpu_index, cpu, i;
77 	struct pcrypt_cpumask *cpumask;
78 
79 	cpu = *cb_cpu;
80 
81 	rcu_read_lock_bh();
82 	cpumask = rcu_dereference_bh(pcrypt->cb_cpumask);
83 	if (cpumask_test_cpu(cpu, cpumask->mask))
84 			goto out;
85 
86 	if (!cpumask_weight(cpumask->mask))
87 			goto out;
88 
89 	cpu_index = cpu % cpumask_weight(cpumask->mask);
90 
91 	cpu = cpumask_first(cpumask->mask);
92 	for (i = 0; i < cpu_index; i++)
93 		cpu = cpumask_next(cpu, cpumask->mask);
94 
95 	*cb_cpu = cpu;
96 
97 out:
98 	rcu_read_unlock_bh();
99 	return padata_do_parallel(pcrypt->pinst, padata, cpu);
100 }
101 
pcrypt_aead_setkey(struct crypto_aead * parent,const u8 * key,unsigned int keylen)102 static int pcrypt_aead_setkey(struct crypto_aead *parent,
103 			      const u8 *key, unsigned int keylen)
104 {
105 	struct pcrypt_aead_ctx *ctx = crypto_aead_ctx(parent);
106 
107 	return crypto_aead_setkey(ctx->child, key, keylen);
108 }
109 
pcrypt_aead_setauthsize(struct crypto_aead * parent,unsigned int authsize)110 static int pcrypt_aead_setauthsize(struct crypto_aead *parent,
111 				   unsigned int authsize)
112 {
113 	struct pcrypt_aead_ctx *ctx = crypto_aead_ctx(parent);
114 
115 	return crypto_aead_setauthsize(ctx->child, authsize);
116 }
117 
pcrypt_aead_serial(struct padata_priv * padata)118 static void pcrypt_aead_serial(struct padata_priv *padata)
119 {
120 	struct pcrypt_request *preq = pcrypt_padata_request(padata);
121 	struct aead_request *req = pcrypt_request_ctx(preq);
122 
123 	aead_request_complete(req->base.data, padata->info);
124 }
125 
pcrypt_aead_done(struct crypto_async_request * areq,int err)126 static void pcrypt_aead_done(struct crypto_async_request *areq, int err)
127 {
128 	struct aead_request *req = areq->data;
129 	struct pcrypt_request *preq = aead_request_ctx(req);
130 	struct padata_priv *padata = pcrypt_request_padata(preq);
131 
132 	padata->info = err;
133 
134 	padata_do_serial(padata);
135 }
136 
pcrypt_aead_enc(struct padata_priv * padata)137 static void pcrypt_aead_enc(struct padata_priv *padata)
138 {
139 	struct pcrypt_request *preq = pcrypt_padata_request(padata);
140 	struct aead_request *req = pcrypt_request_ctx(preq);
141 	int ret;
142 
143 	ret = crypto_aead_encrypt(req);
144 
145 	if (ret == -EINPROGRESS)
146 		return;
147 
148 	padata->info = ret;
149 	padata_do_serial(padata);
150 }
151 
pcrypt_aead_encrypt(struct aead_request * req)152 static int pcrypt_aead_encrypt(struct aead_request *req)
153 {
154 	int err;
155 	struct pcrypt_request *preq = aead_request_ctx(req);
156 	struct aead_request *creq = pcrypt_request_ctx(preq);
157 	struct padata_priv *padata = pcrypt_request_padata(preq);
158 	struct crypto_aead *aead = crypto_aead_reqtfm(req);
159 	struct pcrypt_aead_ctx *ctx = crypto_aead_ctx(aead);
160 	u32 flags = aead_request_flags(req);
161 
162 	memset(padata, 0, sizeof(struct padata_priv));
163 
164 	padata->parallel = pcrypt_aead_enc;
165 	padata->serial = pcrypt_aead_serial;
166 
167 	aead_request_set_tfm(creq, ctx->child);
168 	aead_request_set_callback(creq, flags & ~CRYPTO_TFM_REQ_MAY_SLEEP,
169 				  pcrypt_aead_done, req);
170 	aead_request_set_crypt(creq, req->src, req->dst,
171 			       req->cryptlen, req->iv);
172 	aead_request_set_ad(creq, req->assoclen);
173 
174 	err = pcrypt_do_parallel(padata, &ctx->cb_cpu, &pencrypt);
175 	if (!err)
176 		return -EINPROGRESS;
177 
178 	return err;
179 }
180 
pcrypt_aead_dec(struct padata_priv * padata)181 static void pcrypt_aead_dec(struct padata_priv *padata)
182 {
183 	struct pcrypt_request *preq = pcrypt_padata_request(padata);
184 	struct aead_request *req = pcrypt_request_ctx(preq);
185 	int ret;
186 
187 	ret = crypto_aead_decrypt(req);
188 
189 	if (ret == -EINPROGRESS)
190 		return;
191 
192 	padata->info = ret;
193 	padata_do_serial(padata);
194 }
195 
pcrypt_aead_decrypt(struct aead_request * req)196 static int pcrypt_aead_decrypt(struct aead_request *req)
197 {
198 	int err;
199 	struct pcrypt_request *preq = aead_request_ctx(req);
200 	struct aead_request *creq = pcrypt_request_ctx(preq);
201 	struct padata_priv *padata = pcrypt_request_padata(preq);
202 	struct crypto_aead *aead = crypto_aead_reqtfm(req);
203 	struct pcrypt_aead_ctx *ctx = crypto_aead_ctx(aead);
204 	u32 flags = aead_request_flags(req);
205 
206 	memset(padata, 0, sizeof(struct padata_priv));
207 
208 	padata->parallel = pcrypt_aead_dec;
209 	padata->serial = pcrypt_aead_serial;
210 
211 	aead_request_set_tfm(creq, ctx->child);
212 	aead_request_set_callback(creq, flags & ~CRYPTO_TFM_REQ_MAY_SLEEP,
213 				  pcrypt_aead_done, req);
214 	aead_request_set_crypt(creq, req->src, req->dst,
215 			       req->cryptlen, req->iv);
216 	aead_request_set_ad(creq, req->assoclen);
217 
218 	err = pcrypt_do_parallel(padata, &ctx->cb_cpu, &pdecrypt);
219 	if (!err)
220 		return -EINPROGRESS;
221 
222 	return err;
223 }
224 
pcrypt_aead_init_tfm(struct crypto_aead * tfm)225 static int pcrypt_aead_init_tfm(struct crypto_aead *tfm)
226 {
227 	int cpu, cpu_index;
228 	struct aead_instance *inst = aead_alg_instance(tfm);
229 	struct pcrypt_instance_ctx *ictx = aead_instance_ctx(inst);
230 	struct pcrypt_aead_ctx *ctx = crypto_aead_ctx(tfm);
231 	struct crypto_aead *cipher;
232 
233 	cpu_index = (unsigned int)atomic_inc_return(&ictx->tfm_count) %
234 		    cpumask_weight(cpu_online_mask);
235 
236 	ctx->cb_cpu = cpumask_first(cpu_online_mask);
237 	for (cpu = 0; cpu < cpu_index; cpu++)
238 		ctx->cb_cpu = cpumask_next(ctx->cb_cpu, cpu_online_mask);
239 
240 	cipher = crypto_spawn_aead(&ictx->spawn);
241 
242 	if (IS_ERR(cipher))
243 		return PTR_ERR(cipher);
244 
245 	ctx->child = cipher;
246 	crypto_aead_set_reqsize(tfm, sizeof(struct pcrypt_request) +
247 				     sizeof(struct aead_request) +
248 				     crypto_aead_reqsize(cipher));
249 
250 	return 0;
251 }
252 
pcrypt_aead_exit_tfm(struct crypto_aead * tfm)253 static void pcrypt_aead_exit_tfm(struct crypto_aead *tfm)
254 {
255 	struct pcrypt_aead_ctx *ctx = crypto_aead_ctx(tfm);
256 
257 	crypto_free_aead(ctx->child);
258 }
259 
pcrypt_free(struct aead_instance * inst)260 static void pcrypt_free(struct aead_instance *inst)
261 {
262 	struct pcrypt_instance_ctx *ctx = aead_instance_ctx(inst);
263 
264 	crypto_drop_aead(&ctx->spawn);
265 	kfree(inst);
266 }
267 
pcrypt_init_instance(struct crypto_instance * inst,struct crypto_alg * alg)268 static int pcrypt_init_instance(struct crypto_instance *inst,
269 				struct crypto_alg *alg)
270 {
271 	if (snprintf(inst->alg.cra_driver_name, CRYPTO_MAX_ALG_NAME,
272 		     "pcrypt(%s)", alg->cra_driver_name) >= CRYPTO_MAX_ALG_NAME)
273 		return -ENAMETOOLONG;
274 
275 	memcpy(inst->alg.cra_name, alg->cra_name, CRYPTO_MAX_ALG_NAME);
276 
277 	inst->alg.cra_priority = alg->cra_priority + 100;
278 	inst->alg.cra_blocksize = alg->cra_blocksize;
279 	inst->alg.cra_alignmask = alg->cra_alignmask;
280 
281 	return 0;
282 }
283 
pcrypt_create_aead(struct crypto_template * tmpl,struct rtattr ** tb,u32 type,u32 mask)284 static int pcrypt_create_aead(struct crypto_template *tmpl, struct rtattr **tb,
285 			      u32 type, u32 mask)
286 {
287 	struct pcrypt_instance_ctx *ctx;
288 	struct crypto_attr_type *algt;
289 	struct aead_instance *inst;
290 	struct aead_alg *alg;
291 	const char *name;
292 	int err;
293 
294 	algt = crypto_get_attr_type(tb);
295 	if (IS_ERR(algt))
296 		return PTR_ERR(algt);
297 
298 	name = crypto_attr_alg_name(tb[1]);
299 	if (IS_ERR(name))
300 		return PTR_ERR(name);
301 
302 	inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL);
303 	if (!inst)
304 		return -ENOMEM;
305 
306 	ctx = aead_instance_ctx(inst);
307 	crypto_set_aead_spawn(&ctx->spawn, aead_crypto_instance(inst));
308 
309 	err = crypto_grab_aead(&ctx->spawn, name, 0, 0);
310 	if (err)
311 		goto out_free_inst;
312 
313 	alg = crypto_spawn_aead_alg(&ctx->spawn);
314 	err = pcrypt_init_instance(aead_crypto_instance(inst), &alg->base);
315 	if (err)
316 		goto out_drop_aead;
317 
318 	inst->alg.base.cra_flags = CRYPTO_ALG_ASYNC;
319 
320 	inst->alg.ivsize = crypto_aead_alg_ivsize(alg);
321 	inst->alg.maxauthsize = crypto_aead_alg_maxauthsize(alg);
322 
323 	inst->alg.base.cra_ctxsize = sizeof(struct pcrypt_aead_ctx);
324 
325 	inst->alg.init = pcrypt_aead_init_tfm;
326 	inst->alg.exit = pcrypt_aead_exit_tfm;
327 
328 	inst->alg.setkey = pcrypt_aead_setkey;
329 	inst->alg.setauthsize = pcrypt_aead_setauthsize;
330 	inst->alg.encrypt = pcrypt_aead_encrypt;
331 	inst->alg.decrypt = pcrypt_aead_decrypt;
332 
333 	inst->free = pcrypt_free;
334 
335 	err = aead_register_instance(tmpl, inst);
336 	if (err)
337 		goto out_drop_aead;
338 
339 out:
340 	return err;
341 
342 out_drop_aead:
343 	crypto_drop_aead(&ctx->spawn);
344 out_free_inst:
345 	kfree(inst);
346 	goto out;
347 }
348 
pcrypt_create(struct crypto_template * tmpl,struct rtattr ** tb)349 static int pcrypt_create(struct crypto_template *tmpl, struct rtattr **tb)
350 {
351 	struct crypto_attr_type *algt;
352 
353 	algt = crypto_get_attr_type(tb);
354 	if (IS_ERR(algt))
355 		return PTR_ERR(algt);
356 
357 	switch (algt->type & algt->mask & CRYPTO_ALG_TYPE_MASK) {
358 	case CRYPTO_ALG_TYPE_AEAD:
359 		return pcrypt_create_aead(tmpl, tb, algt->type, algt->mask);
360 	}
361 
362 	return -EINVAL;
363 }
364 
pcrypt_cpumask_change_notify(struct notifier_block * self,unsigned long val,void * data)365 static int pcrypt_cpumask_change_notify(struct notifier_block *self,
366 					unsigned long val, void *data)
367 {
368 	struct padata_pcrypt *pcrypt;
369 	struct pcrypt_cpumask *new_mask, *old_mask;
370 	struct padata_cpumask *cpumask = (struct padata_cpumask *)data;
371 
372 	if (!(val & PADATA_CPU_SERIAL))
373 		return 0;
374 
375 	pcrypt = container_of(self, struct padata_pcrypt, nblock);
376 	new_mask = kmalloc(sizeof(*new_mask), GFP_KERNEL);
377 	if (!new_mask)
378 		return -ENOMEM;
379 	if (!alloc_cpumask_var(&new_mask->mask, GFP_KERNEL)) {
380 		kfree(new_mask);
381 		return -ENOMEM;
382 	}
383 
384 	old_mask = pcrypt->cb_cpumask;
385 
386 	cpumask_copy(new_mask->mask, cpumask->cbcpu);
387 	rcu_assign_pointer(pcrypt->cb_cpumask, new_mask);
388 	synchronize_rcu_bh();
389 
390 	free_cpumask_var(old_mask->mask);
391 	kfree(old_mask);
392 	return 0;
393 }
394 
pcrypt_sysfs_add(struct padata_instance * pinst,const char * name)395 static int pcrypt_sysfs_add(struct padata_instance *pinst, const char *name)
396 {
397 	int ret;
398 
399 	pinst->kobj.kset = pcrypt_kset;
400 	ret = kobject_add(&pinst->kobj, NULL, "%s", name);
401 	if (!ret)
402 		kobject_uevent(&pinst->kobj, KOBJ_ADD);
403 
404 	return ret;
405 }
406 
pcrypt_init_padata(struct padata_pcrypt * pcrypt,const char * name)407 static int pcrypt_init_padata(struct padata_pcrypt *pcrypt,
408 			      const char *name)
409 {
410 	int ret = -ENOMEM;
411 	struct pcrypt_cpumask *mask;
412 
413 	get_online_cpus();
414 
415 	pcrypt->wq = alloc_workqueue("%s", WQ_MEM_RECLAIM | WQ_CPU_INTENSIVE,
416 				     1, name);
417 	if (!pcrypt->wq)
418 		goto err;
419 
420 	pcrypt->pinst = padata_alloc_possible(pcrypt->wq);
421 	if (!pcrypt->pinst)
422 		goto err_destroy_workqueue;
423 
424 	mask = kmalloc(sizeof(*mask), GFP_KERNEL);
425 	if (!mask)
426 		goto err_free_padata;
427 	if (!alloc_cpumask_var(&mask->mask, GFP_KERNEL)) {
428 		kfree(mask);
429 		goto err_free_padata;
430 	}
431 
432 	cpumask_and(mask->mask, cpu_possible_mask, cpu_online_mask);
433 	rcu_assign_pointer(pcrypt->cb_cpumask, mask);
434 
435 	pcrypt->nblock.notifier_call = pcrypt_cpumask_change_notify;
436 	ret = padata_register_cpumask_notifier(pcrypt->pinst, &pcrypt->nblock);
437 	if (ret)
438 		goto err_free_cpumask;
439 
440 	ret = pcrypt_sysfs_add(pcrypt->pinst, name);
441 	if (ret)
442 		goto err_unregister_notifier;
443 
444 	put_online_cpus();
445 
446 	return ret;
447 
448 err_unregister_notifier:
449 	padata_unregister_cpumask_notifier(pcrypt->pinst, &pcrypt->nblock);
450 err_free_cpumask:
451 	free_cpumask_var(mask->mask);
452 	kfree(mask);
453 err_free_padata:
454 	padata_free(pcrypt->pinst);
455 err_destroy_workqueue:
456 	destroy_workqueue(pcrypt->wq);
457 err:
458 	put_online_cpus();
459 
460 	return ret;
461 }
462 
pcrypt_fini_padata(struct padata_pcrypt * pcrypt)463 static void pcrypt_fini_padata(struct padata_pcrypt *pcrypt)
464 {
465 	free_cpumask_var(pcrypt->cb_cpumask->mask);
466 	kfree(pcrypt->cb_cpumask);
467 
468 	padata_stop(pcrypt->pinst);
469 	padata_unregister_cpumask_notifier(pcrypt->pinst, &pcrypt->nblock);
470 	destroy_workqueue(pcrypt->wq);
471 	padata_free(pcrypt->pinst);
472 }
473 
474 static struct crypto_template pcrypt_tmpl = {
475 	.name = "pcrypt",
476 	.create = pcrypt_create,
477 	.module = THIS_MODULE,
478 };
479 
pcrypt_init(void)480 static int __init pcrypt_init(void)
481 {
482 	int err = -ENOMEM;
483 
484 	pcrypt_kset = kset_create_and_add("pcrypt", NULL, kernel_kobj);
485 	if (!pcrypt_kset)
486 		goto err;
487 
488 	err = pcrypt_init_padata(&pencrypt, "pencrypt");
489 	if (err)
490 		goto err_unreg_kset;
491 
492 	err = pcrypt_init_padata(&pdecrypt, "pdecrypt");
493 	if (err)
494 		goto err_deinit_pencrypt;
495 
496 	padata_start(pencrypt.pinst);
497 	padata_start(pdecrypt.pinst);
498 
499 	return crypto_register_template(&pcrypt_tmpl);
500 
501 err_deinit_pencrypt:
502 	pcrypt_fini_padata(&pencrypt);
503 err_unreg_kset:
504 	kset_unregister(pcrypt_kset);
505 err:
506 	return err;
507 }
508 
pcrypt_exit(void)509 static void __exit pcrypt_exit(void)
510 {
511 	crypto_unregister_template(&pcrypt_tmpl);
512 
513 	pcrypt_fini_padata(&pencrypt);
514 	pcrypt_fini_padata(&pdecrypt);
515 
516 	kset_unregister(pcrypt_kset);
517 }
518 
519 module_init(pcrypt_init);
520 module_exit(pcrypt_exit);
521 
522 MODULE_LICENSE("GPL");
523 MODULE_AUTHOR("Steffen Klassert <steffen.klassert@secunet.com>");
524 MODULE_DESCRIPTION("Parallel crypto wrapper");
525 MODULE_ALIAS_CRYPTO("pcrypt");
526