1 /*
2 * pcrypt - Parallel crypto wrapper.
3 *
4 * Copyright (C) 2009 secunet Security Networks AG
5 * Copyright (C) 2009 Steffen Klassert <steffen.klassert@secunet.com>
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms and conditions of the GNU General Public License,
9 * version 2, as published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * more details.
15 *
16 * You should have received a copy of the GNU General Public License along with
17 * this program; if not, write to the Free Software Foundation, Inc.,
18 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
19 */
20
21 #include <crypto/algapi.h>
22 #include <crypto/internal/aead.h>
23 #include <linux/atomic.h>
24 #include <linux/err.h>
25 #include <linux/init.h>
26 #include <linux/module.h>
27 #include <linux/slab.h>
28 #include <linux/notifier.h>
29 #include <linux/kobject.h>
30 #include <linux/cpu.h>
31 #include <crypto/pcrypt.h>
32
33 struct padata_pcrypt {
34 struct padata_instance *pinst;
35 struct workqueue_struct *wq;
36
37 /*
38 * Cpumask for callback CPUs. It should be
39 * equal to serial cpumask of corresponding padata instance,
40 * so it is updated when padata notifies us about serial
41 * cpumask change.
42 *
43 * cb_cpumask is protected by RCU. This fact prevents us from
44 * using cpumask_var_t directly because the actual type of
45 * cpumsak_var_t depends on kernel configuration(particularly on
46 * CONFIG_CPUMASK_OFFSTACK macro). Depending on the configuration
47 * cpumask_var_t may be either a pointer to the struct cpumask
48 * or a variable allocated on the stack. Thus we can not safely use
49 * cpumask_var_t with RCU operations such as rcu_assign_pointer or
50 * rcu_dereference. So cpumask_var_t is wrapped with struct
51 * pcrypt_cpumask which makes possible to use it with RCU.
52 */
53 struct pcrypt_cpumask {
54 cpumask_var_t mask;
55 } *cb_cpumask;
56 struct notifier_block nblock;
57 };
58
59 static struct padata_pcrypt pencrypt;
60 static struct padata_pcrypt pdecrypt;
61 static struct kset *pcrypt_kset;
62
63 struct pcrypt_instance_ctx {
64 struct crypto_aead_spawn spawn;
65 atomic_t tfm_count;
66 };
67
68 struct pcrypt_aead_ctx {
69 struct crypto_aead *child;
70 unsigned int cb_cpu;
71 };
72
pcrypt_do_parallel(struct padata_priv * padata,unsigned int * cb_cpu,struct padata_pcrypt * pcrypt)73 static int pcrypt_do_parallel(struct padata_priv *padata, unsigned int *cb_cpu,
74 struct padata_pcrypt *pcrypt)
75 {
76 unsigned int cpu_index, cpu, i;
77 struct pcrypt_cpumask *cpumask;
78
79 cpu = *cb_cpu;
80
81 rcu_read_lock_bh();
82 cpumask = rcu_dereference_bh(pcrypt->cb_cpumask);
83 if (cpumask_test_cpu(cpu, cpumask->mask))
84 goto out;
85
86 if (!cpumask_weight(cpumask->mask))
87 goto out;
88
89 cpu_index = cpu % cpumask_weight(cpumask->mask);
90
91 cpu = cpumask_first(cpumask->mask);
92 for (i = 0; i < cpu_index; i++)
93 cpu = cpumask_next(cpu, cpumask->mask);
94
95 *cb_cpu = cpu;
96
97 out:
98 rcu_read_unlock_bh();
99 return padata_do_parallel(pcrypt->pinst, padata, cpu);
100 }
101
pcrypt_aead_setkey(struct crypto_aead * parent,const u8 * key,unsigned int keylen)102 static int pcrypt_aead_setkey(struct crypto_aead *parent,
103 const u8 *key, unsigned int keylen)
104 {
105 struct pcrypt_aead_ctx *ctx = crypto_aead_ctx(parent);
106
107 return crypto_aead_setkey(ctx->child, key, keylen);
108 }
109
pcrypt_aead_setauthsize(struct crypto_aead * parent,unsigned int authsize)110 static int pcrypt_aead_setauthsize(struct crypto_aead *parent,
111 unsigned int authsize)
112 {
113 struct pcrypt_aead_ctx *ctx = crypto_aead_ctx(parent);
114
115 return crypto_aead_setauthsize(ctx->child, authsize);
116 }
117
pcrypt_aead_serial(struct padata_priv * padata)118 static void pcrypt_aead_serial(struct padata_priv *padata)
119 {
120 struct pcrypt_request *preq = pcrypt_padata_request(padata);
121 struct aead_request *req = pcrypt_request_ctx(preq);
122
123 aead_request_complete(req->base.data, padata->info);
124 }
125
pcrypt_aead_done(struct crypto_async_request * areq,int err)126 static void pcrypt_aead_done(struct crypto_async_request *areq, int err)
127 {
128 struct aead_request *req = areq->data;
129 struct pcrypt_request *preq = aead_request_ctx(req);
130 struct padata_priv *padata = pcrypt_request_padata(preq);
131
132 padata->info = err;
133 req->base.flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
134
135 padata_do_serial(padata);
136 }
137
pcrypt_aead_enc(struct padata_priv * padata)138 static void pcrypt_aead_enc(struct padata_priv *padata)
139 {
140 struct pcrypt_request *preq = pcrypt_padata_request(padata);
141 struct aead_request *req = pcrypt_request_ctx(preq);
142
143 padata->info = crypto_aead_encrypt(req);
144
145 if (padata->info == -EINPROGRESS)
146 return;
147
148 padata_do_serial(padata);
149 }
150
pcrypt_aead_encrypt(struct aead_request * req)151 static int pcrypt_aead_encrypt(struct aead_request *req)
152 {
153 int err;
154 struct pcrypt_request *preq = aead_request_ctx(req);
155 struct aead_request *creq = pcrypt_request_ctx(preq);
156 struct padata_priv *padata = pcrypt_request_padata(preq);
157 struct crypto_aead *aead = crypto_aead_reqtfm(req);
158 struct pcrypt_aead_ctx *ctx = crypto_aead_ctx(aead);
159 u32 flags = aead_request_flags(req);
160
161 memset(padata, 0, sizeof(struct padata_priv));
162
163 padata->parallel = pcrypt_aead_enc;
164 padata->serial = pcrypt_aead_serial;
165
166 aead_request_set_tfm(creq, ctx->child);
167 aead_request_set_callback(creq, flags & ~CRYPTO_TFM_REQ_MAY_SLEEP,
168 pcrypt_aead_done, req);
169 aead_request_set_crypt(creq, req->src, req->dst,
170 req->cryptlen, req->iv);
171 aead_request_set_ad(creq, req->assoclen);
172
173 err = pcrypt_do_parallel(padata, &ctx->cb_cpu, &pencrypt);
174 if (!err)
175 return -EINPROGRESS;
176
177 return err;
178 }
179
pcrypt_aead_dec(struct padata_priv * padata)180 static void pcrypt_aead_dec(struct padata_priv *padata)
181 {
182 struct pcrypt_request *preq = pcrypt_padata_request(padata);
183 struct aead_request *req = pcrypt_request_ctx(preq);
184
185 padata->info = crypto_aead_decrypt(req);
186
187 if (padata->info == -EINPROGRESS)
188 return;
189
190 padata_do_serial(padata);
191 }
192
pcrypt_aead_decrypt(struct aead_request * req)193 static int pcrypt_aead_decrypt(struct aead_request *req)
194 {
195 int err;
196 struct pcrypt_request *preq = aead_request_ctx(req);
197 struct aead_request *creq = pcrypt_request_ctx(preq);
198 struct padata_priv *padata = pcrypt_request_padata(preq);
199 struct crypto_aead *aead = crypto_aead_reqtfm(req);
200 struct pcrypt_aead_ctx *ctx = crypto_aead_ctx(aead);
201 u32 flags = aead_request_flags(req);
202
203 memset(padata, 0, sizeof(struct padata_priv));
204
205 padata->parallel = pcrypt_aead_dec;
206 padata->serial = pcrypt_aead_serial;
207
208 aead_request_set_tfm(creq, ctx->child);
209 aead_request_set_callback(creq, flags & ~CRYPTO_TFM_REQ_MAY_SLEEP,
210 pcrypt_aead_done, req);
211 aead_request_set_crypt(creq, req->src, req->dst,
212 req->cryptlen, req->iv);
213 aead_request_set_ad(creq, req->assoclen);
214
215 err = pcrypt_do_parallel(padata, &ctx->cb_cpu, &pdecrypt);
216 if (!err)
217 return -EINPROGRESS;
218
219 return err;
220 }
221
pcrypt_aead_init_tfm(struct crypto_aead * tfm)222 static int pcrypt_aead_init_tfm(struct crypto_aead *tfm)
223 {
224 int cpu, cpu_index;
225 struct aead_instance *inst = aead_alg_instance(tfm);
226 struct pcrypt_instance_ctx *ictx = aead_instance_ctx(inst);
227 struct pcrypt_aead_ctx *ctx = crypto_aead_ctx(tfm);
228 struct crypto_aead *cipher;
229
230 cpu_index = (unsigned int)atomic_inc_return(&ictx->tfm_count) %
231 cpumask_weight(cpu_online_mask);
232
233 ctx->cb_cpu = cpumask_first(cpu_online_mask);
234 for (cpu = 0; cpu < cpu_index; cpu++)
235 ctx->cb_cpu = cpumask_next(ctx->cb_cpu, cpu_online_mask);
236
237 cipher = crypto_spawn_aead(&ictx->spawn);
238
239 if (IS_ERR(cipher))
240 return PTR_ERR(cipher);
241
242 ctx->child = cipher;
243 crypto_aead_set_reqsize(tfm, sizeof(struct pcrypt_request) +
244 sizeof(struct aead_request) +
245 crypto_aead_reqsize(cipher));
246
247 return 0;
248 }
249
pcrypt_aead_exit_tfm(struct crypto_aead * tfm)250 static void pcrypt_aead_exit_tfm(struct crypto_aead *tfm)
251 {
252 struct pcrypt_aead_ctx *ctx = crypto_aead_ctx(tfm);
253
254 crypto_free_aead(ctx->child);
255 }
256
pcrypt_free(struct aead_instance * inst)257 static void pcrypt_free(struct aead_instance *inst)
258 {
259 struct pcrypt_instance_ctx *ctx = aead_instance_ctx(inst);
260
261 crypto_drop_aead(&ctx->spawn);
262 kfree(inst);
263 }
264
pcrypt_init_instance(struct crypto_instance * inst,struct crypto_alg * alg)265 static int pcrypt_init_instance(struct crypto_instance *inst,
266 struct crypto_alg *alg)
267 {
268 if (snprintf(inst->alg.cra_driver_name, CRYPTO_MAX_ALG_NAME,
269 "pcrypt(%s)", alg->cra_driver_name) >= CRYPTO_MAX_ALG_NAME)
270 return -ENAMETOOLONG;
271
272 memcpy(inst->alg.cra_name, alg->cra_name, CRYPTO_MAX_ALG_NAME);
273
274 inst->alg.cra_priority = alg->cra_priority + 100;
275 inst->alg.cra_blocksize = alg->cra_blocksize;
276 inst->alg.cra_alignmask = alg->cra_alignmask;
277
278 return 0;
279 }
280
pcrypt_create_aead(struct crypto_template * tmpl,struct rtattr ** tb,u32 type,u32 mask)281 static int pcrypt_create_aead(struct crypto_template *tmpl, struct rtattr **tb,
282 u32 type, u32 mask)
283 {
284 struct pcrypt_instance_ctx *ctx;
285 struct crypto_attr_type *algt;
286 struct aead_instance *inst;
287 struct aead_alg *alg;
288 const char *name;
289 int err;
290
291 algt = crypto_get_attr_type(tb);
292 if (IS_ERR(algt))
293 return PTR_ERR(algt);
294
295 name = crypto_attr_alg_name(tb[1]);
296 if (IS_ERR(name))
297 return PTR_ERR(name);
298
299 inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL);
300 if (!inst)
301 return -ENOMEM;
302
303 ctx = aead_instance_ctx(inst);
304 crypto_set_aead_spawn(&ctx->spawn, aead_crypto_instance(inst));
305
306 err = crypto_grab_aead(&ctx->spawn, name, 0, 0);
307 if (err)
308 goto out_free_inst;
309
310 alg = crypto_spawn_aead_alg(&ctx->spawn);
311 err = pcrypt_init_instance(aead_crypto_instance(inst), &alg->base);
312 if (err)
313 goto out_drop_aead;
314
315 inst->alg.base.cra_flags = CRYPTO_ALG_ASYNC;
316
317 inst->alg.ivsize = crypto_aead_alg_ivsize(alg);
318 inst->alg.maxauthsize = crypto_aead_alg_maxauthsize(alg);
319
320 inst->alg.base.cra_ctxsize = sizeof(struct pcrypt_aead_ctx);
321
322 inst->alg.init = pcrypt_aead_init_tfm;
323 inst->alg.exit = pcrypt_aead_exit_tfm;
324
325 inst->alg.setkey = pcrypt_aead_setkey;
326 inst->alg.setauthsize = pcrypt_aead_setauthsize;
327 inst->alg.encrypt = pcrypt_aead_encrypt;
328 inst->alg.decrypt = pcrypt_aead_decrypt;
329
330 inst->free = pcrypt_free;
331
332 err = aead_register_instance(tmpl, inst);
333 if (err)
334 goto out_drop_aead;
335
336 out:
337 return err;
338
339 out_drop_aead:
340 crypto_drop_aead(&ctx->spawn);
341 out_free_inst:
342 kfree(inst);
343 goto out;
344 }
345
pcrypt_create(struct crypto_template * tmpl,struct rtattr ** tb)346 static int pcrypt_create(struct crypto_template *tmpl, struct rtattr **tb)
347 {
348 struct crypto_attr_type *algt;
349
350 algt = crypto_get_attr_type(tb);
351 if (IS_ERR(algt))
352 return PTR_ERR(algt);
353
354 switch (algt->type & algt->mask & CRYPTO_ALG_TYPE_MASK) {
355 case CRYPTO_ALG_TYPE_AEAD:
356 return pcrypt_create_aead(tmpl, tb, algt->type, algt->mask);
357 }
358
359 return -EINVAL;
360 }
361
pcrypt_cpumask_change_notify(struct notifier_block * self,unsigned long val,void * data)362 static int pcrypt_cpumask_change_notify(struct notifier_block *self,
363 unsigned long val, void *data)
364 {
365 struct padata_pcrypt *pcrypt;
366 struct pcrypt_cpumask *new_mask, *old_mask;
367 struct padata_cpumask *cpumask = (struct padata_cpumask *)data;
368
369 if (!(val & PADATA_CPU_SERIAL))
370 return 0;
371
372 pcrypt = container_of(self, struct padata_pcrypt, nblock);
373 new_mask = kmalloc(sizeof(*new_mask), GFP_KERNEL);
374 if (!new_mask)
375 return -ENOMEM;
376 if (!alloc_cpumask_var(&new_mask->mask, GFP_KERNEL)) {
377 kfree(new_mask);
378 return -ENOMEM;
379 }
380
381 old_mask = pcrypt->cb_cpumask;
382
383 cpumask_copy(new_mask->mask, cpumask->cbcpu);
384 rcu_assign_pointer(pcrypt->cb_cpumask, new_mask);
385 synchronize_rcu_bh();
386
387 free_cpumask_var(old_mask->mask);
388 kfree(old_mask);
389 return 0;
390 }
391
pcrypt_sysfs_add(struct padata_instance * pinst,const char * name)392 static int pcrypt_sysfs_add(struct padata_instance *pinst, const char *name)
393 {
394 int ret;
395
396 pinst->kobj.kset = pcrypt_kset;
397 ret = kobject_add(&pinst->kobj, NULL, name);
398 if (!ret)
399 kobject_uevent(&pinst->kobj, KOBJ_ADD);
400
401 return ret;
402 }
403
pcrypt_init_padata(struct padata_pcrypt * pcrypt,const char * name)404 static int pcrypt_init_padata(struct padata_pcrypt *pcrypt,
405 const char *name)
406 {
407 int ret = -ENOMEM;
408 struct pcrypt_cpumask *mask;
409
410 get_online_cpus();
411
412 pcrypt->wq = alloc_workqueue("%s", WQ_MEM_RECLAIM | WQ_CPU_INTENSIVE,
413 1, name);
414 if (!pcrypt->wq)
415 goto err;
416
417 pcrypt->pinst = padata_alloc_possible(pcrypt->wq);
418 if (!pcrypt->pinst)
419 goto err_destroy_workqueue;
420
421 mask = kmalloc(sizeof(*mask), GFP_KERNEL);
422 if (!mask)
423 goto err_free_padata;
424 if (!alloc_cpumask_var(&mask->mask, GFP_KERNEL)) {
425 kfree(mask);
426 goto err_free_padata;
427 }
428
429 cpumask_and(mask->mask, cpu_possible_mask, cpu_online_mask);
430 rcu_assign_pointer(pcrypt->cb_cpumask, mask);
431
432 pcrypt->nblock.notifier_call = pcrypt_cpumask_change_notify;
433 ret = padata_register_cpumask_notifier(pcrypt->pinst, &pcrypt->nblock);
434 if (ret)
435 goto err_free_cpumask;
436
437 ret = pcrypt_sysfs_add(pcrypt->pinst, name);
438 if (ret)
439 goto err_unregister_notifier;
440
441 put_online_cpus();
442
443 return ret;
444
445 err_unregister_notifier:
446 padata_unregister_cpumask_notifier(pcrypt->pinst, &pcrypt->nblock);
447 err_free_cpumask:
448 free_cpumask_var(mask->mask);
449 kfree(mask);
450 err_free_padata:
451 padata_free(pcrypt->pinst);
452 err_destroy_workqueue:
453 destroy_workqueue(pcrypt->wq);
454 err:
455 put_online_cpus();
456
457 return ret;
458 }
459
pcrypt_fini_padata(struct padata_pcrypt * pcrypt)460 static void pcrypt_fini_padata(struct padata_pcrypt *pcrypt)
461 {
462 free_cpumask_var(pcrypt->cb_cpumask->mask);
463 kfree(pcrypt->cb_cpumask);
464
465 padata_stop(pcrypt->pinst);
466 padata_unregister_cpumask_notifier(pcrypt->pinst, &pcrypt->nblock);
467 destroy_workqueue(pcrypt->wq);
468 padata_free(pcrypt->pinst);
469 }
470
471 static struct crypto_template pcrypt_tmpl = {
472 .name = "pcrypt",
473 .create = pcrypt_create,
474 .module = THIS_MODULE,
475 };
476
pcrypt_init(void)477 static int __init pcrypt_init(void)
478 {
479 int err = -ENOMEM;
480
481 pcrypt_kset = kset_create_and_add("pcrypt", NULL, kernel_kobj);
482 if (!pcrypt_kset)
483 goto err;
484
485 err = pcrypt_init_padata(&pencrypt, "pencrypt");
486 if (err)
487 goto err_unreg_kset;
488
489 err = pcrypt_init_padata(&pdecrypt, "pdecrypt");
490 if (err)
491 goto err_deinit_pencrypt;
492
493 padata_start(pencrypt.pinst);
494 padata_start(pdecrypt.pinst);
495
496 return crypto_register_template(&pcrypt_tmpl);
497
498 err_deinit_pencrypt:
499 pcrypt_fini_padata(&pencrypt);
500 err_unreg_kset:
501 kset_unregister(pcrypt_kset);
502 err:
503 return err;
504 }
505
pcrypt_exit(void)506 static void __exit pcrypt_exit(void)
507 {
508 pcrypt_fini_padata(&pencrypt);
509 pcrypt_fini_padata(&pdecrypt);
510
511 kset_unregister(pcrypt_kset);
512 crypto_unregister_template(&pcrypt_tmpl);
513 }
514
515 module_init(pcrypt_init);
516 module_exit(pcrypt_exit);
517
518 MODULE_LICENSE("GPL");
519 MODULE_AUTHOR("Steffen Klassert <steffen.klassert@secunet.com>");
520 MODULE_DESCRIPTION("Parallel crypto wrapper");
521 MODULE_ALIAS_CRYPTO("pcrypt");
522