crypto: aead - Add common IV generation code
[deliverable/linux.git] / crypto / cryptd.c
1 /*
2 * Software async crypto daemon.
3 *
4 * Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au>
5 *
6 * Added AEAD support to cryptd.
7 * Authors: Tadeusz Struk (tadeusz.struk@intel.com)
8 * Adrian Hoban <adrian.hoban@intel.com>
9 * Gabriele Paoloni <gabriele.paoloni@intel.com>
10 * Aidan O'Mahony (aidan.o.mahony@intel.com)
11 * Copyright (c) 2010, Intel Corporation.
12 *
13 * This program is free software; you can redistribute it and/or modify it
14 * under the terms of the GNU General Public License as published by the Free
15 * Software Foundation; either version 2 of the License, or (at your option)
16 * any later version.
17 *
18 */
19
20 #include <crypto/algapi.h>
21 #include <crypto/internal/hash.h>
22 #include <crypto/internal/aead.h>
23 #include <crypto/cryptd.h>
24 #include <crypto/crypto_wq.h>
25 #include <linux/err.h>
26 #include <linux/init.h>
27 #include <linux/kernel.h>
28 #include <linux/list.h>
29 #include <linux/module.h>
30 #include <linux/scatterlist.h>
31 #include <linux/sched.h>
32 #include <linux/slab.h>
33
34 #define CRYPTD_MAX_CPU_QLEN 100
35
36 struct cryptd_cpu_queue {
37 struct crypto_queue queue;
38 struct work_struct work;
39 };
40
41 struct cryptd_queue {
42 struct cryptd_cpu_queue __percpu *cpu_queue;
43 };
44
45 struct cryptd_instance_ctx {
46 struct crypto_spawn spawn;
47 struct cryptd_queue *queue;
48 };
49
50 struct hashd_instance_ctx {
51 struct crypto_shash_spawn spawn;
52 struct cryptd_queue *queue;
53 };
54
55 struct aead_instance_ctx {
56 struct crypto_aead_spawn aead_spawn;
57 struct cryptd_queue *queue;
58 };
59
60 struct cryptd_blkcipher_ctx {
61 struct crypto_blkcipher *child;
62 };
63
64 struct cryptd_blkcipher_request_ctx {
65 crypto_completion_t complete;
66 };
67
68 struct cryptd_hash_ctx {
69 struct crypto_shash *child;
70 };
71
72 struct cryptd_hash_request_ctx {
73 crypto_completion_t complete;
74 struct shash_desc desc;
75 };
76
77 struct cryptd_aead_ctx {
78 struct crypto_aead *child;
79 };
80
81 struct cryptd_aead_request_ctx {
82 crypto_completion_t complete;
83 };
84
85 static void cryptd_queue_worker(struct work_struct *work);
86
87 static int cryptd_init_queue(struct cryptd_queue *queue,
88 unsigned int max_cpu_qlen)
89 {
90 int cpu;
91 struct cryptd_cpu_queue *cpu_queue;
92
93 queue->cpu_queue = alloc_percpu(struct cryptd_cpu_queue);
94 if (!queue->cpu_queue)
95 return -ENOMEM;
96 for_each_possible_cpu(cpu) {
97 cpu_queue = per_cpu_ptr(queue->cpu_queue, cpu);
98 crypto_init_queue(&cpu_queue->queue, max_cpu_qlen);
99 INIT_WORK(&cpu_queue->work, cryptd_queue_worker);
100 }
101 return 0;
102 }
103
104 static void cryptd_fini_queue(struct cryptd_queue *queue)
105 {
106 int cpu;
107 struct cryptd_cpu_queue *cpu_queue;
108
109 for_each_possible_cpu(cpu) {
110 cpu_queue = per_cpu_ptr(queue->cpu_queue, cpu);
111 BUG_ON(cpu_queue->queue.qlen);
112 }
113 free_percpu(queue->cpu_queue);
114 }
115
116 static int cryptd_enqueue_request(struct cryptd_queue *queue,
117 struct crypto_async_request *request)
118 {
119 int cpu, err;
120 struct cryptd_cpu_queue *cpu_queue;
121
122 cpu = get_cpu();
123 cpu_queue = this_cpu_ptr(queue->cpu_queue);
124 err = crypto_enqueue_request(&cpu_queue->queue, request);
125 queue_work_on(cpu, kcrypto_wq, &cpu_queue->work);
126 put_cpu();
127
128 return err;
129 }
130
131 /* Called in workqueue context, do one real cryption work (via
132 * req->complete) and reschedule itself if there are more work to
133 * do. */
134 static void cryptd_queue_worker(struct work_struct *work)
135 {
136 struct cryptd_cpu_queue *cpu_queue;
137 struct crypto_async_request *req, *backlog;
138
139 cpu_queue = container_of(work, struct cryptd_cpu_queue, work);
140 /*
141 * Only handle one request at a time to avoid hogging crypto workqueue.
142 * preempt_disable/enable is used to prevent being preempted by
143 * cryptd_enqueue_request(). local_bh_disable/enable is used to prevent
144 * cryptd_enqueue_request() being accessed from software interrupts.
145 */
146 local_bh_disable();
147 preempt_disable();
148 backlog = crypto_get_backlog(&cpu_queue->queue);
149 req = crypto_dequeue_request(&cpu_queue->queue);
150 preempt_enable();
151 local_bh_enable();
152
153 if (!req)
154 return;
155
156 if (backlog)
157 backlog->complete(backlog, -EINPROGRESS);
158 req->complete(req, 0);
159
160 if (cpu_queue->queue.qlen)
161 queue_work(kcrypto_wq, &cpu_queue->work);
162 }
163
164 static inline struct cryptd_queue *cryptd_get_queue(struct crypto_tfm *tfm)
165 {
166 struct crypto_instance *inst = crypto_tfm_alg_instance(tfm);
167 struct cryptd_instance_ctx *ictx = crypto_instance_ctx(inst);
168 return ictx->queue;
169 }
170
171 static inline void cryptd_check_internal(struct rtattr **tb, u32 *type,
172 u32 *mask)
173 {
174 struct crypto_attr_type *algt;
175
176 algt = crypto_get_attr_type(tb);
177 if (IS_ERR(algt))
178 return;
179 if ((algt->type & CRYPTO_ALG_INTERNAL))
180 *type |= CRYPTO_ALG_INTERNAL;
181 if ((algt->mask & CRYPTO_ALG_INTERNAL))
182 *mask |= CRYPTO_ALG_INTERNAL;
183 }
184
185 static int cryptd_blkcipher_setkey(struct crypto_ablkcipher *parent,
186 const u8 *key, unsigned int keylen)
187 {
188 struct cryptd_blkcipher_ctx *ctx = crypto_ablkcipher_ctx(parent);
189 struct crypto_blkcipher *child = ctx->child;
190 int err;
191
192 crypto_blkcipher_clear_flags(child, CRYPTO_TFM_REQ_MASK);
193 crypto_blkcipher_set_flags(child, crypto_ablkcipher_get_flags(parent) &
194 CRYPTO_TFM_REQ_MASK);
195 err = crypto_blkcipher_setkey(child, key, keylen);
196 crypto_ablkcipher_set_flags(parent, crypto_blkcipher_get_flags(child) &
197 CRYPTO_TFM_RES_MASK);
198 return err;
199 }
200
201 static void cryptd_blkcipher_crypt(struct ablkcipher_request *req,
202 struct crypto_blkcipher *child,
203 int err,
204 int (*crypt)(struct blkcipher_desc *desc,
205 struct scatterlist *dst,
206 struct scatterlist *src,
207 unsigned int len))
208 {
209 struct cryptd_blkcipher_request_ctx *rctx;
210 struct blkcipher_desc desc;
211
212 rctx = ablkcipher_request_ctx(req);
213
214 if (unlikely(err == -EINPROGRESS))
215 goto out;
216
217 desc.tfm = child;
218 desc.info = req->info;
219 desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP;
220
221 err = crypt(&desc, req->dst, req->src, req->nbytes);
222
223 req->base.complete = rctx->complete;
224
225 out:
226 local_bh_disable();
227 rctx->complete(&req->base, err);
228 local_bh_enable();
229 }
230
231 static void cryptd_blkcipher_encrypt(struct crypto_async_request *req, int err)
232 {
233 struct cryptd_blkcipher_ctx *ctx = crypto_tfm_ctx(req->tfm);
234 struct crypto_blkcipher *child = ctx->child;
235
236 cryptd_blkcipher_crypt(ablkcipher_request_cast(req), child, err,
237 crypto_blkcipher_crt(child)->encrypt);
238 }
239
240 static void cryptd_blkcipher_decrypt(struct crypto_async_request *req, int err)
241 {
242 struct cryptd_blkcipher_ctx *ctx = crypto_tfm_ctx(req->tfm);
243 struct crypto_blkcipher *child = ctx->child;
244
245 cryptd_blkcipher_crypt(ablkcipher_request_cast(req), child, err,
246 crypto_blkcipher_crt(child)->decrypt);
247 }
248
249 static int cryptd_blkcipher_enqueue(struct ablkcipher_request *req,
250 crypto_completion_t compl)
251 {
252 struct cryptd_blkcipher_request_ctx *rctx = ablkcipher_request_ctx(req);
253 struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
254 struct cryptd_queue *queue;
255
256 queue = cryptd_get_queue(crypto_ablkcipher_tfm(tfm));
257 rctx->complete = req->base.complete;
258 req->base.complete = compl;
259
260 return cryptd_enqueue_request(queue, &req->base);
261 }
262
263 static int cryptd_blkcipher_encrypt_enqueue(struct ablkcipher_request *req)
264 {
265 return cryptd_blkcipher_enqueue(req, cryptd_blkcipher_encrypt);
266 }
267
268 static int cryptd_blkcipher_decrypt_enqueue(struct ablkcipher_request *req)
269 {
270 return cryptd_blkcipher_enqueue(req, cryptd_blkcipher_decrypt);
271 }
272
273 static int cryptd_blkcipher_init_tfm(struct crypto_tfm *tfm)
274 {
275 struct crypto_instance *inst = crypto_tfm_alg_instance(tfm);
276 struct cryptd_instance_ctx *ictx = crypto_instance_ctx(inst);
277 struct crypto_spawn *spawn = &ictx->spawn;
278 struct cryptd_blkcipher_ctx *ctx = crypto_tfm_ctx(tfm);
279 struct crypto_blkcipher *cipher;
280
281 cipher = crypto_spawn_blkcipher(spawn);
282 if (IS_ERR(cipher))
283 return PTR_ERR(cipher);
284
285 ctx->child = cipher;
286 tfm->crt_ablkcipher.reqsize =
287 sizeof(struct cryptd_blkcipher_request_ctx);
288 return 0;
289 }
290
291 static void cryptd_blkcipher_exit_tfm(struct crypto_tfm *tfm)
292 {
293 struct cryptd_blkcipher_ctx *ctx = crypto_tfm_ctx(tfm);
294
295 crypto_free_blkcipher(ctx->child);
296 }
297
298 static int cryptd_init_instance(struct crypto_instance *inst,
299 struct crypto_alg *alg)
300 {
301 if (snprintf(inst->alg.cra_driver_name, CRYPTO_MAX_ALG_NAME,
302 "cryptd(%s)",
303 alg->cra_driver_name) >= CRYPTO_MAX_ALG_NAME)
304 return -ENAMETOOLONG;
305
306 memcpy(inst->alg.cra_name, alg->cra_name, CRYPTO_MAX_ALG_NAME);
307
308 inst->alg.cra_priority = alg->cra_priority + 50;
309 inst->alg.cra_blocksize = alg->cra_blocksize;
310 inst->alg.cra_alignmask = alg->cra_alignmask;
311
312 return 0;
313 }
314
315 static void *cryptd_alloc_instance(struct crypto_alg *alg, unsigned int head,
316 unsigned int tail)
317 {
318 char *p;
319 struct crypto_instance *inst;
320 int err;
321
322 p = kzalloc(head + sizeof(*inst) + tail, GFP_KERNEL);
323 if (!p)
324 return ERR_PTR(-ENOMEM);
325
326 inst = (void *)(p + head);
327
328 err = cryptd_init_instance(inst, alg);
329 if (err)
330 goto out_free_inst;
331
332 out:
333 return p;
334
335 out_free_inst:
336 kfree(p);
337 p = ERR_PTR(err);
338 goto out;
339 }
340
341 static int cryptd_create_blkcipher(struct crypto_template *tmpl,
342 struct rtattr **tb,
343 struct cryptd_queue *queue)
344 {
345 struct cryptd_instance_ctx *ctx;
346 struct crypto_instance *inst;
347 struct crypto_alg *alg;
348 u32 type = CRYPTO_ALG_TYPE_BLKCIPHER;
349 u32 mask = CRYPTO_ALG_TYPE_MASK;
350 int err;
351
352 cryptd_check_internal(tb, &type, &mask);
353
354 alg = crypto_get_attr_alg(tb, type, mask);
355 if (IS_ERR(alg))
356 return PTR_ERR(alg);
357
358 inst = cryptd_alloc_instance(alg, 0, sizeof(*ctx));
359 err = PTR_ERR(inst);
360 if (IS_ERR(inst))
361 goto out_put_alg;
362
363 ctx = crypto_instance_ctx(inst);
364 ctx->queue = queue;
365
366 err = crypto_init_spawn(&ctx->spawn, alg, inst,
367 CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_ASYNC);
368 if (err)
369 goto out_free_inst;
370
371 type = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC;
372 if (alg->cra_flags & CRYPTO_ALG_INTERNAL)
373 type |= CRYPTO_ALG_INTERNAL;
374 inst->alg.cra_flags = type;
375 inst->alg.cra_type = &crypto_ablkcipher_type;
376
377 inst->alg.cra_ablkcipher.ivsize = alg->cra_blkcipher.ivsize;
378 inst->alg.cra_ablkcipher.min_keysize = alg->cra_blkcipher.min_keysize;
379 inst->alg.cra_ablkcipher.max_keysize = alg->cra_blkcipher.max_keysize;
380
381 inst->alg.cra_ablkcipher.geniv = alg->cra_blkcipher.geniv;
382
383 inst->alg.cra_ctxsize = sizeof(struct cryptd_blkcipher_ctx);
384
385 inst->alg.cra_init = cryptd_blkcipher_init_tfm;
386 inst->alg.cra_exit = cryptd_blkcipher_exit_tfm;
387
388 inst->alg.cra_ablkcipher.setkey = cryptd_blkcipher_setkey;
389 inst->alg.cra_ablkcipher.encrypt = cryptd_blkcipher_encrypt_enqueue;
390 inst->alg.cra_ablkcipher.decrypt = cryptd_blkcipher_decrypt_enqueue;
391
392 err = crypto_register_instance(tmpl, inst);
393 if (err) {
394 crypto_drop_spawn(&ctx->spawn);
395 out_free_inst:
396 kfree(inst);
397 }
398
399 out_put_alg:
400 crypto_mod_put(alg);
401 return err;
402 }
403
404 static int cryptd_hash_init_tfm(struct crypto_tfm *tfm)
405 {
406 struct crypto_instance *inst = crypto_tfm_alg_instance(tfm);
407 struct hashd_instance_ctx *ictx = crypto_instance_ctx(inst);
408 struct crypto_shash_spawn *spawn = &ictx->spawn;
409 struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(tfm);
410 struct crypto_shash *hash;
411
412 hash = crypto_spawn_shash(spawn);
413 if (IS_ERR(hash))
414 return PTR_ERR(hash);
415
416 ctx->child = hash;
417 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
418 sizeof(struct cryptd_hash_request_ctx) +
419 crypto_shash_descsize(hash));
420 return 0;
421 }
422
423 static void cryptd_hash_exit_tfm(struct crypto_tfm *tfm)
424 {
425 struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(tfm);
426
427 crypto_free_shash(ctx->child);
428 }
429
430 static int cryptd_hash_setkey(struct crypto_ahash *parent,
431 const u8 *key, unsigned int keylen)
432 {
433 struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(parent);
434 struct crypto_shash *child = ctx->child;
435 int err;
436
437 crypto_shash_clear_flags(child, CRYPTO_TFM_REQ_MASK);
438 crypto_shash_set_flags(child, crypto_ahash_get_flags(parent) &
439 CRYPTO_TFM_REQ_MASK);
440 err = crypto_shash_setkey(child, key, keylen);
441 crypto_ahash_set_flags(parent, crypto_shash_get_flags(child) &
442 CRYPTO_TFM_RES_MASK);
443 return err;
444 }
445
446 static int cryptd_hash_enqueue(struct ahash_request *req,
447 crypto_completion_t compl)
448 {
449 struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
450 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
451 struct cryptd_queue *queue =
452 cryptd_get_queue(crypto_ahash_tfm(tfm));
453
454 rctx->complete = req->base.complete;
455 req->base.complete = compl;
456
457 return cryptd_enqueue_request(queue, &req->base);
458 }
459
460 static void cryptd_hash_init(struct crypto_async_request *req_async, int err)
461 {
462 struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(req_async->tfm);
463 struct crypto_shash *child = ctx->child;
464 struct ahash_request *req = ahash_request_cast(req_async);
465 struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
466 struct shash_desc *desc = &rctx->desc;
467
468 if (unlikely(err == -EINPROGRESS))
469 goto out;
470
471 desc->tfm = child;
472 desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP;
473
474 err = crypto_shash_init(desc);
475
476 req->base.complete = rctx->complete;
477
478 out:
479 local_bh_disable();
480 rctx->complete(&req->base, err);
481 local_bh_enable();
482 }
483
484 static int cryptd_hash_init_enqueue(struct ahash_request *req)
485 {
486 return cryptd_hash_enqueue(req, cryptd_hash_init);
487 }
488
489 static void cryptd_hash_update(struct crypto_async_request *req_async, int err)
490 {
491 struct ahash_request *req = ahash_request_cast(req_async);
492 struct cryptd_hash_request_ctx *rctx;
493
494 rctx = ahash_request_ctx(req);
495
496 if (unlikely(err == -EINPROGRESS))
497 goto out;
498
499 err = shash_ahash_update(req, &rctx->desc);
500
501 req->base.complete = rctx->complete;
502
503 out:
504 local_bh_disable();
505 rctx->complete(&req->base, err);
506 local_bh_enable();
507 }
508
509 static int cryptd_hash_update_enqueue(struct ahash_request *req)
510 {
511 return cryptd_hash_enqueue(req, cryptd_hash_update);
512 }
513
514 static void cryptd_hash_final(struct crypto_async_request *req_async, int err)
515 {
516 struct ahash_request *req = ahash_request_cast(req_async);
517 struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
518
519 if (unlikely(err == -EINPROGRESS))
520 goto out;
521
522 err = crypto_shash_final(&rctx->desc, req->result);
523
524 req->base.complete = rctx->complete;
525
526 out:
527 local_bh_disable();
528 rctx->complete(&req->base, err);
529 local_bh_enable();
530 }
531
532 static int cryptd_hash_final_enqueue(struct ahash_request *req)
533 {
534 return cryptd_hash_enqueue(req, cryptd_hash_final);
535 }
536
537 static void cryptd_hash_finup(struct crypto_async_request *req_async, int err)
538 {
539 struct ahash_request *req = ahash_request_cast(req_async);
540 struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
541
542 if (unlikely(err == -EINPROGRESS))
543 goto out;
544
545 err = shash_ahash_finup(req, &rctx->desc);
546
547 req->base.complete = rctx->complete;
548
549 out:
550 local_bh_disable();
551 rctx->complete(&req->base, err);
552 local_bh_enable();
553 }
554
555 static int cryptd_hash_finup_enqueue(struct ahash_request *req)
556 {
557 return cryptd_hash_enqueue(req, cryptd_hash_finup);
558 }
559
560 static void cryptd_hash_digest(struct crypto_async_request *req_async, int err)
561 {
562 struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(req_async->tfm);
563 struct crypto_shash *child = ctx->child;
564 struct ahash_request *req = ahash_request_cast(req_async);
565 struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
566 struct shash_desc *desc = &rctx->desc;
567
568 if (unlikely(err == -EINPROGRESS))
569 goto out;
570
571 desc->tfm = child;
572 desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP;
573
574 err = shash_ahash_digest(req, desc);
575
576 req->base.complete = rctx->complete;
577
578 out:
579 local_bh_disable();
580 rctx->complete(&req->base, err);
581 local_bh_enable();
582 }
583
584 static int cryptd_hash_digest_enqueue(struct ahash_request *req)
585 {
586 return cryptd_hash_enqueue(req, cryptd_hash_digest);
587 }
588
589 static int cryptd_hash_export(struct ahash_request *req, void *out)
590 {
591 struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
592
593 return crypto_shash_export(&rctx->desc, out);
594 }
595
596 static int cryptd_hash_import(struct ahash_request *req, const void *in)
597 {
598 struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
599
600 return crypto_shash_import(&rctx->desc, in);
601 }
602
603 static int cryptd_create_hash(struct crypto_template *tmpl, struct rtattr **tb,
604 struct cryptd_queue *queue)
605 {
606 struct hashd_instance_ctx *ctx;
607 struct ahash_instance *inst;
608 struct shash_alg *salg;
609 struct crypto_alg *alg;
610 u32 type = 0;
611 u32 mask = 0;
612 int err;
613
614 cryptd_check_internal(tb, &type, &mask);
615
616 salg = shash_attr_alg(tb[1], type, mask);
617 if (IS_ERR(salg))
618 return PTR_ERR(salg);
619
620 alg = &salg->base;
621 inst = cryptd_alloc_instance(alg, ahash_instance_headroom(),
622 sizeof(*ctx));
623 err = PTR_ERR(inst);
624 if (IS_ERR(inst))
625 goto out_put_alg;
626
627 ctx = ahash_instance_ctx(inst);
628 ctx->queue = queue;
629
630 err = crypto_init_shash_spawn(&ctx->spawn, salg,
631 ahash_crypto_instance(inst));
632 if (err)
633 goto out_free_inst;
634
635 type = CRYPTO_ALG_ASYNC;
636 if (alg->cra_flags & CRYPTO_ALG_INTERNAL)
637 type |= CRYPTO_ALG_INTERNAL;
638 inst->alg.halg.base.cra_flags = type;
639
640 inst->alg.halg.digestsize = salg->digestsize;
641 inst->alg.halg.base.cra_ctxsize = sizeof(struct cryptd_hash_ctx);
642
643 inst->alg.halg.base.cra_init = cryptd_hash_init_tfm;
644 inst->alg.halg.base.cra_exit = cryptd_hash_exit_tfm;
645
646 inst->alg.init = cryptd_hash_init_enqueue;
647 inst->alg.update = cryptd_hash_update_enqueue;
648 inst->alg.final = cryptd_hash_final_enqueue;
649 inst->alg.finup = cryptd_hash_finup_enqueue;
650 inst->alg.export = cryptd_hash_export;
651 inst->alg.import = cryptd_hash_import;
652 inst->alg.setkey = cryptd_hash_setkey;
653 inst->alg.digest = cryptd_hash_digest_enqueue;
654
655 err = ahash_register_instance(tmpl, inst);
656 if (err) {
657 crypto_drop_shash(&ctx->spawn);
658 out_free_inst:
659 kfree(inst);
660 }
661
662 out_put_alg:
663 crypto_mod_put(alg);
664 return err;
665 }
666
667 static void cryptd_aead_crypt(struct aead_request *req,
668 struct crypto_aead *child,
669 int err,
670 int (*crypt)(struct aead_request *req))
671 {
672 struct cryptd_aead_request_ctx *rctx;
673 rctx = aead_request_ctx(req);
674
675 if (unlikely(err == -EINPROGRESS))
676 goto out;
677 aead_request_set_tfm(req, child);
678 err = crypt( req );
679 req->base.complete = rctx->complete;
680 out:
681 local_bh_disable();
682 rctx->complete(&req->base, err);
683 local_bh_enable();
684 }
685
686 static void cryptd_aead_encrypt(struct crypto_async_request *areq, int err)
687 {
688 struct cryptd_aead_ctx *ctx = crypto_tfm_ctx(areq->tfm);
689 struct crypto_aead *child = ctx->child;
690 struct aead_request *req;
691
692 req = container_of(areq, struct aead_request, base);
693 cryptd_aead_crypt(req, child, err, crypto_aead_crt(child)->encrypt);
694 }
695
696 static void cryptd_aead_decrypt(struct crypto_async_request *areq, int err)
697 {
698 struct cryptd_aead_ctx *ctx = crypto_tfm_ctx(areq->tfm);
699 struct crypto_aead *child = ctx->child;
700 struct aead_request *req;
701
702 req = container_of(areq, struct aead_request, base);
703 cryptd_aead_crypt(req, child, err, crypto_aead_crt(child)->decrypt);
704 }
705
706 static int cryptd_aead_enqueue(struct aead_request *req,
707 crypto_completion_t compl)
708 {
709 struct cryptd_aead_request_ctx *rctx = aead_request_ctx(req);
710 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
711 struct cryptd_queue *queue = cryptd_get_queue(crypto_aead_tfm(tfm));
712
713 rctx->complete = req->base.complete;
714 req->base.complete = compl;
715 return cryptd_enqueue_request(queue, &req->base);
716 }
717
718 static int cryptd_aead_encrypt_enqueue(struct aead_request *req)
719 {
720 return cryptd_aead_enqueue(req, cryptd_aead_encrypt );
721 }
722
723 static int cryptd_aead_decrypt_enqueue(struct aead_request *req)
724 {
725 return cryptd_aead_enqueue(req, cryptd_aead_decrypt );
726 }
727
728 static int cryptd_aead_init_tfm(struct crypto_tfm *tfm)
729 {
730 struct crypto_instance *inst = crypto_tfm_alg_instance(tfm);
731 struct aead_instance_ctx *ictx = crypto_instance_ctx(inst);
732 struct crypto_aead_spawn *spawn = &ictx->aead_spawn;
733 struct cryptd_aead_ctx *ctx = crypto_tfm_ctx(tfm);
734 struct crypto_aead *cipher;
735
736 cipher = crypto_spawn_aead(spawn);
737 if (IS_ERR(cipher))
738 return PTR_ERR(cipher);
739
740 crypto_aead_set_flags(cipher, CRYPTO_TFM_REQ_MAY_SLEEP);
741 ctx->child = cipher;
742 crypto_aead_set_reqsize(__crypto_aead_cast(tfm),
743 sizeof(struct cryptd_aead_request_ctx));
744 return 0;
745 }
746
747 static void cryptd_aead_exit_tfm(struct crypto_tfm *tfm)
748 {
749 struct cryptd_aead_ctx *ctx = crypto_tfm_ctx(tfm);
750 crypto_free_aead(ctx->child);
751 }
752
753 static int cryptd_create_aead(struct crypto_template *tmpl,
754 struct rtattr **tb,
755 struct cryptd_queue *queue)
756 {
757 struct aead_instance_ctx *ctx;
758 struct crypto_instance *inst;
759 struct crypto_alg *alg;
760 const char *name;
761 u32 type = 0;
762 u32 mask = 0;
763 int err;
764
765 cryptd_check_internal(tb, &type, &mask);
766
767 name = crypto_attr_alg_name(tb[1]);
768 if (IS_ERR(name))
769 return PTR_ERR(name);
770
771 inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL);
772 if (!inst)
773 return -ENOMEM;
774
775 ctx = crypto_instance_ctx(inst);
776 ctx->queue = queue;
777
778 crypto_set_aead_spawn(&ctx->aead_spawn, inst);
779 err = crypto_grab_aead(&ctx->aead_spawn, name, type, mask);
780 if (err)
781 goto out_free_inst;
782
783 alg = crypto_aead_spawn_alg(&ctx->aead_spawn);
784 err = cryptd_init_instance(inst, alg);
785 if (err)
786 goto out_drop_aead;
787
788 type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC;
789 if (alg->cra_flags & CRYPTO_ALG_INTERNAL)
790 type |= CRYPTO_ALG_INTERNAL;
791 inst->alg.cra_flags = type;
792 inst->alg.cra_type = alg->cra_type;
793 inst->alg.cra_ctxsize = sizeof(struct cryptd_aead_ctx);
794 inst->alg.cra_init = cryptd_aead_init_tfm;
795 inst->alg.cra_exit = cryptd_aead_exit_tfm;
796 inst->alg.cra_aead.setkey = alg->cra_aead.setkey;
797 inst->alg.cra_aead.setauthsize = alg->cra_aead.setauthsize;
798 inst->alg.cra_aead.geniv = alg->cra_aead.geniv;
799 inst->alg.cra_aead.ivsize = alg->cra_aead.ivsize;
800 inst->alg.cra_aead.maxauthsize = alg->cra_aead.maxauthsize;
801 inst->alg.cra_aead.encrypt = cryptd_aead_encrypt_enqueue;
802 inst->alg.cra_aead.decrypt = cryptd_aead_decrypt_enqueue;
803 inst->alg.cra_aead.givencrypt = alg->cra_aead.givencrypt;
804 inst->alg.cra_aead.givdecrypt = alg->cra_aead.givdecrypt;
805
806 err = crypto_register_instance(tmpl, inst);
807 if (err) {
808 out_drop_aead:
809 crypto_drop_aead(&ctx->aead_spawn);
810 out_free_inst:
811 kfree(inst);
812 }
813 return err;
814 }
815
816 static struct cryptd_queue queue;
817
818 static int cryptd_create(struct crypto_template *tmpl, struct rtattr **tb)
819 {
820 struct crypto_attr_type *algt;
821
822 algt = crypto_get_attr_type(tb);
823 if (IS_ERR(algt))
824 return PTR_ERR(algt);
825
826 switch (algt->type & algt->mask & CRYPTO_ALG_TYPE_MASK) {
827 case CRYPTO_ALG_TYPE_BLKCIPHER:
828 return cryptd_create_blkcipher(tmpl, tb, &queue);
829 case CRYPTO_ALG_TYPE_DIGEST:
830 return cryptd_create_hash(tmpl, tb, &queue);
831 case CRYPTO_ALG_TYPE_AEAD:
832 return cryptd_create_aead(tmpl, tb, &queue);
833 }
834
835 return -EINVAL;
836 }
837
838 static void cryptd_free(struct crypto_instance *inst)
839 {
840 struct cryptd_instance_ctx *ctx = crypto_instance_ctx(inst);
841 struct hashd_instance_ctx *hctx = crypto_instance_ctx(inst);
842 struct aead_instance_ctx *aead_ctx = crypto_instance_ctx(inst);
843
844 switch (inst->alg.cra_flags & CRYPTO_ALG_TYPE_MASK) {
845 case CRYPTO_ALG_TYPE_AHASH:
846 crypto_drop_shash(&hctx->spawn);
847 kfree(ahash_instance(inst));
848 return;
849 case CRYPTO_ALG_TYPE_AEAD:
850 crypto_drop_spawn(&aead_ctx->aead_spawn.base);
851 kfree(inst);
852 return;
853 default:
854 crypto_drop_spawn(&ctx->spawn);
855 kfree(inst);
856 }
857 }
858
859 static struct crypto_template cryptd_tmpl = {
860 .name = "cryptd",
861 .create = cryptd_create,
862 .free = cryptd_free,
863 .module = THIS_MODULE,
864 };
865
866 struct cryptd_ablkcipher *cryptd_alloc_ablkcipher(const char *alg_name,
867 u32 type, u32 mask)
868 {
869 char cryptd_alg_name[CRYPTO_MAX_ALG_NAME];
870 struct crypto_tfm *tfm;
871
872 if (snprintf(cryptd_alg_name, CRYPTO_MAX_ALG_NAME,
873 "cryptd(%s)", alg_name) >= CRYPTO_MAX_ALG_NAME)
874 return ERR_PTR(-EINVAL);
875 type &= ~(CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_GENIV);
876 type |= CRYPTO_ALG_TYPE_BLKCIPHER;
877 mask &= ~CRYPTO_ALG_TYPE_MASK;
878 mask |= (CRYPTO_ALG_GENIV | CRYPTO_ALG_TYPE_BLKCIPHER_MASK);
879 tfm = crypto_alloc_base(cryptd_alg_name, type, mask);
880 if (IS_ERR(tfm))
881 return ERR_CAST(tfm);
882 if (tfm->__crt_alg->cra_module != THIS_MODULE) {
883 crypto_free_tfm(tfm);
884 return ERR_PTR(-EINVAL);
885 }
886
887 return __cryptd_ablkcipher_cast(__crypto_ablkcipher_cast(tfm));
888 }
889 EXPORT_SYMBOL_GPL(cryptd_alloc_ablkcipher);
890
891 struct crypto_blkcipher *cryptd_ablkcipher_child(struct cryptd_ablkcipher *tfm)
892 {
893 struct cryptd_blkcipher_ctx *ctx = crypto_ablkcipher_ctx(&tfm->base);
894 return ctx->child;
895 }
896 EXPORT_SYMBOL_GPL(cryptd_ablkcipher_child);
897
898 void cryptd_free_ablkcipher(struct cryptd_ablkcipher *tfm)
899 {
900 crypto_free_ablkcipher(&tfm->base);
901 }
902 EXPORT_SYMBOL_GPL(cryptd_free_ablkcipher);
903
904 struct cryptd_ahash *cryptd_alloc_ahash(const char *alg_name,
905 u32 type, u32 mask)
906 {
907 char cryptd_alg_name[CRYPTO_MAX_ALG_NAME];
908 struct crypto_ahash *tfm;
909
910 if (snprintf(cryptd_alg_name, CRYPTO_MAX_ALG_NAME,
911 "cryptd(%s)", alg_name) >= CRYPTO_MAX_ALG_NAME)
912 return ERR_PTR(-EINVAL);
913 tfm = crypto_alloc_ahash(cryptd_alg_name, type, mask);
914 if (IS_ERR(tfm))
915 return ERR_CAST(tfm);
916 if (tfm->base.__crt_alg->cra_module != THIS_MODULE) {
917 crypto_free_ahash(tfm);
918 return ERR_PTR(-EINVAL);
919 }
920
921 return __cryptd_ahash_cast(tfm);
922 }
923 EXPORT_SYMBOL_GPL(cryptd_alloc_ahash);
924
925 struct crypto_shash *cryptd_ahash_child(struct cryptd_ahash *tfm)
926 {
927 struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(&tfm->base);
928
929 return ctx->child;
930 }
931 EXPORT_SYMBOL_GPL(cryptd_ahash_child);
932
933 struct shash_desc *cryptd_shash_desc(struct ahash_request *req)
934 {
935 struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
936 return &rctx->desc;
937 }
938 EXPORT_SYMBOL_GPL(cryptd_shash_desc);
939
940 void cryptd_free_ahash(struct cryptd_ahash *tfm)
941 {
942 crypto_free_ahash(&tfm->base);
943 }
944 EXPORT_SYMBOL_GPL(cryptd_free_ahash);
945
946 struct cryptd_aead *cryptd_alloc_aead(const char *alg_name,
947 u32 type, u32 mask)
948 {
949 char cryptd_alg_name[CRYPTO_MAX_ALG_NAME];
950 struct crypto_aead *tfm;
951
952 if (snprintf(cryptd_alg_name, CRYPTO_MAX_ALG_NAME,
953 "cryptd(%s)", alg_name) >= CRYPTO_MAX_ALG_NAME)
954 return ERR_PTR(-EINVAL);
955 tfm = crypto_alloc_aead(cryptd_alg_name, type, mask);
956 if (IS_ERR(tfm))
957 return ERR_CAST(tfm);
958 if (tfm->base.__crt_alg->cra_module != THIS_MODULE) {
959 crypto_free_aead(tfm);
960 return ERR_PTR(-EINVAL);
961 }
962 return __cryptd_aead_cast(tfm);
963 }
964 EXPORT_SYMBOL_GPL(cryptd_alloc_aead);
965
966 struct crypto_aead *cryptd_aead_child(struct cryptd_aead *tfm)
967 {
968 struct cryptd_aead_ctx *ctx;
969 ctx = crypto_aead_ctx(&tfm->base);
970 return ctx->child;
971 }
972 EXPORT_SYMBOL_GPL(cryptd_aead_child);
973
974 void cryptd_free_aead(struct cryptd_aead *tfm)
975 {
976 crypto_free_aead(&tfm->base);
977 }
978 EXPORT_SYMBOL_GPL(cryptd_free_aead);
979
980 static int __init cryptd_init(void)
981 {
982 int err;
983
984 err = cryptd_init_queue(&queue, CRYPTD_MAX_CPU_QLEN);
985 if (err)
986 return err;
987
988 err = crypto_register_template(&cryptd_tmpl);
989 if (err)
990 cryptd_fini_queue(&queue);
991
992 return err;
993 }
994
995 static void __exit cryptd_exit(void)
996 {
997 cryptd_fini_queue(&queue);
998 crypto_unregister_template(&cryptd_tmpl);
999 }
1000
1001 subsys_initcall(cryptd_init);
1002 module_exit(cryptd_exit);
1003
1004 MODULE_LICENSE("GPL");
1005 MODULE_DESCRIPTION("Software async crypto daemon");
1006 MODULE_ALIAS_CRYPTO("cryptd");
This page took 0.052443 seconds and 5 git commands to generate.