ad58f513ba8b8d20089f7470e73c8da4859900eb
[deliverable/linux.git] / crypto / cryptd.c
1 /*
2 * Software async crypto daemon.
3 *
4 * Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au>
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the Free
8 * Software Foundation; either version 2 of the License, or (at your option)
9 * any later version.
10 *
11 */
12
13 #include <crypto/algapi.h>
14 #include <crypto/internal/hash.h>
15 #include <crypto/cryptd.h>
16 #include <crypto/crypto_wq.h>
17 #include <linux/err.h>
18 #include <linux/init.h>
19 #include <linux/kernel.h>
20 #include <linux/list.h>
21 #include <linux/module.h>
22 #include <linux/scatterlist.h>
23 #include <linux/sched.h>
24 #include <linux/slab.h>
25
26 #define CRYPTD_MAX_CPU_QLEN 100
27
28 struct cryptd_cpu_queue {
29 struct crypto_queue queue;
30 struct work_struct work;
31 };
32
33 struct cryptd_queue {
34 struct cryptd_cpu_queue *cpu_queue;
35 };
36
37 struct cryptd_instance_ctx {
38 struct crypto_spawn spawn;
39 struct cryptd_queue *queue;
40 };
41
42 struct hashd_instance_ctx {
43 struct crypto_shash_spawn spawn;
44 struct cryptd_queue *queue;
45 };
46
47 struct cryptd_blkcipher_ctx {
48 struct crypto_blkcipher *child;
49 };
50
51 struct cryptd_blkcipher_request_ctx {
52 crypto_completion_t complete;
53 };
54
55 struct cryptd_hash_ctx {
56 struct crypto_shash *child;
57 };
58
59 struct cryptd_hash_request_ctx {
60 crypto_completion_t complete;
61 struct shash_desc desc;
62 };
63
64 static void cryptd_queue_worker(struct work_struct *work);
65
66 static int cryptd_init_queue(struct cryptd_queue *queue,
67 unsigned int max_cpu_qlen)
68 {
69 int cpu;
70 struct cryptd_cpu_queue *cpu_queue;
71
72 queue->cpu_queue = alloc_percpu(struct cryptd_cpu_queue);
73 if (!queue->cpu_queue)
74 return -ENOMEM;
75 for_each_possible_cpu(cpu) {
76 cpu_queue = per_cpu_ptr(queue->cpu_queue, cpu);
77 crypto_init_queue(&cpu_queue->queue, max_cpu_qlen);
78 INIT_WORK(&cpu_queue->work, cryptd_queue_worker);
79 }
80 return 0;
81 }
82
83 static void cryptd_fini_queue(struct cryptd_queue *queue)
84 {
85 int cpu;
86 struct cryptd_cpu_queue *cpu_queue;
87
88 for_each_possible_cpu(cpu) {
89 cpu_queue = per_cpu_ptr(queue->cpu_queue, cpu);
90 BUG_ON(cpu_queue->queue.qlen);
91 }
92 free_percpu(queue->cpu_queue);
93 }
94
95 static int cryptd_enqueue_request(struct cryptd_queue *queue,
96 struct crypto_async_request *request)
97 {
98 int cpu, err;
99 struct cryptd_cpu_queue *cpu_queue;
100
101 cpu = get_cpu();
102 cpu_queue = per_cpu_ptr(queue->cpu_queue, cpu);
103 err = crypto_enqueue_request(&cpu_queue->queue, request);
104 queue_work_on(cpu, kcrypto_wq, &cpu_queue->work);
105 put_cpu();
106
107 return err;
108 }
109
110 /* Called in workqueue context, do one real cryption work (via
111 * req->complete) and reschedule itself if there are more work to
112 * do. */
113 static void cryptd_queue_worker(struct work_struct *work)
114 {
115 struct cryptd_cpu_queue *cpu_queue;
116 struct crypto_async_request *req, *backlog;
117
118 cpu_queue = container_of(work, struct cryptd_cpu_queue, work);
119 /* Only handle one request at a time to avoid hogging crypto
120 * workqueue. preempt_disable/enable is used to prevent
121 * being preempted by cryptd_enqueue_request() */
122 preempt_disable();
123 backlog = crypto_get_backlog(&cpu_queue->queue);
124 req = crypto_dequeue_request(&cpu_queue->queue);
125 preempt_enable();
126
127 if (!req)
128 return;
129
130 if (backlog)
131 backlog->complete(backlog, -EINPROGRESS);
132 req->complete(req, 0);
133
134 if (cpu_queue->queue.qlen)
135 queue_work(kcrypto_wq, &cpu_queue->work);
136 }
137
138 static inline struct cryptd_queue *cryptd_get_queue(struct crypto_tfm *tfm)
139 {
140 struct crypto_instance *inst = crypto_tfm_alg_instance(tfm);
141 struct cryptd_instance_ctx *ictx = crypto_instance_ctx(inst);
142 return ictx->queue;
143 }
144
145 static int cryptd_blkcipher_setkey(struct crypto_ablkcipher *parent,
146 const u8 *key, unsigned int keylen)
147 {
148 struct cryptd_blkcipher_ctx *ctx = crypto_ablkcipher_ctx(parent);
149 struct crypto_blkcipher *child = ctx->child;
150 int err;
151
152 crypto_blkcipher_clear_flags(child, CRYPTO_TFM_REQ_MASK);
153 crypto_blkcipher_set_flags(child, crypto_ablkcipher_get_flags(parent) &
154 CRYPTO_TFM_REQ_MASK);
155 err = crypto_blkcipher_setkey(child, key, keylen);
156 crypto_ablkcipher_set_flags(parent, crypto_blkcipher_get_flags(child) &
157 CRYPTO_TFM_RES_MASK);
158 return err;
159 }
160
161 static void cryptd_blkcipher_crypt(struct ablkcipher_request *req,
162 struct crypto_blkcipher *child,
163 int err,
164 int (*crypt)(struct blkcipher_desc *desc,
165 struct scatterlist *dst,
166 struct scatterlist *src,
167 unsigned int len))
168 {
169 struct cryptd_blkcipher_request_ctx *rctx;
170 struct blkcipher_desc desc;
171
172 rctx = ablkcipher_request_ctx(req);
173
174 if (unlikely(err == -EINPROGRESS))
175 goto out;
176
177 desc.tfm = child;
178 desc.info = req->info;
179 desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP;
180
181 err = crypt(&desc, req->dst, req->src, req->nbytes);
182
183 req->base.complete = rctx->complete;
184
185 out:
186 local_bh_disable();
187 rctx->complete(&req->base, err);
188 local_bh_enable();
189 }
190
191 static void cryptd_blkcipher_encrypt(struct crypto_async_request *req, int err)
192 {
193 struct cryptd_blkcipher_ctx *ctx = crypto_tfm_ctx(req->tfm);
194 struct crypto_blkcipher *child = ctx->child;
195
196 cryptd_blkcipher_crypt(ablkcipher_request_cast(req), child, err,
197 crypto_blkcipher_crt(child)->encrypt);
198 }
199
200 static void cryptd_blkcipher_decrypt(struct crypto_async_request *req, int err)
201 {
202 struct cryptd_blkcipher_ctx *ctx = crypto_tfm_ctx(req->tfm);
203 struct crypto_blkcipher *child = ctx->child;
204
205 cryptd_blkcipher_crypt(ablkcipher_request_cast(req), child, err,
206 crypto_blkcipher_crt(child)->decrypt);
207 }
208
209 static int cryptd_blkcipher_enqueue(struct ablkcipher_request *req,
210 crypto_completion_t complete)
211 {
212 struct cryptd_blkcipher_request_ctx *rctx = ablkcipher_request_ctx(req);
213 struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
214 struct cryptd_queue *queue;
215
216 queue = cryptd_get_queue(crypto_ablkcipher_tfm(tfm));
217 rctx->complete = req->base.complete;
218 req->base.complete = complete;
219
220 return cryptd_enqueue_request(queue, &req->base);
221 }
222
223 static int cryptd_blkcipher_encrypt_enqueue(struct ablkcipher_request *req)
224 {
225 return cryptd_blkcipher_enqueue(req, cryptd_blkcipher_encrypt);
226 }
227
228 static int cryptd_blkcipher_decrypt_enqueue(struct ablkcipher_request *req)
229 {
230 return cryptd_blkcipher_enqueue(req, cryptd_blkcipher_decrypt);
231 }
232
233 static int cryptd_blkcipher_init_tfm(struct crypto_tfm *tfm)
234 {
235 struct crypto_instance *inst = crypto_tfm_alg_instance(tfm);
236 struct cryptd_instance_ctx *ictx = crypto_instance_ctx(inst);
237 struct crypto_spawn *spawn = &ictx->spawn;
238 struct cryptd_blkcipher_ctx *ctx = crypto_tfm_ctx(tfm);
239 struct crypto_blkcipher *cipher;
240
241 cipher = crypto_spawn_blkcipher(spawn);
242 if (IS_ERR(cipher))
243 return PTR_ERR(cipher);
244
245 ctx->child = cipher;
246 tfm->crt_ablkcipher.reqsize =
247 sizeof(struct cryptd_blkcipher_request_ctx);
248 return 0;
249 }
250
251 static void cryptd_blkcipher_exit_tfm(struct crypto_tfm *tfm)
252 {
253 struct cryptd_blkcipher_ctx *ctx = crypto_tfm_ctx(tfm);
254
255 crypto_free_blkcipher(ctx->child);
256 }
257
258 static struct crypto_instance *cryptd_alloc_instance(struct crypto_alg *alg,
259 unsigned int tail)
260 {
261 struct crypto_instance *inst;
262 int err;
263
264 inst = kzalloc(sizeof(*inst) + tail, GFP_KERNEL);
265 if (!inst) {
266 inst = ERR_PTR(-ENOMEM);
267 goto out;
268 }
269
270 err = -ENAMETOOLONG;
271 if (snprintf(inst->alg.cra_driver_name, CRYPTO_MAX_ALG_NAME,
272 "cryptd(%s)", alg->cra_driver_name) >= CRYPTO_MAX_ALG_NAME)
273 goto out_free_inst;
274
275 memcpy(inst->alg.cra_name, alg->cra_name, CRYPTO_MAX_ALG_NAME);
276
277 inst->alg.cra_priority = alg->cra_priority + 50;
278 inst->alg.cra_blocksize = alg->cra_blocksize;
279 inst->alg.cra_alignmask = alg->cra_alignmask;
280
281 out:
282 return inst;
283
284 out_free_inst:
285 kfree(inst);
286 inst = ERR_PTR(err);
287 goto out;
288 }
289
290 static int cryptd_create_blkcipher(struct crypto_template *tmpl,
291 struct rtattr **tb,
292 struct cryptd_queue *queue)
293 {
294 struct cryptd_instance_ctx *ctx;
295 struct crypto_instance *inst;
296 struct crypto_alg *alg;
297 int err;
298
299 alg = crypto_get_attr_alg(tb, CRYPTO_ALG_TYPE_BLKCIPHER,
300 CRYPTO_ALG_TYPE_MASK);
301 if (IS_ERR(alg))
302 return PTR_ERR(alg);
303
304 inst = cryptd_alloc_instance(alg, sizeof(*ctx));
305 if (IS_ERR(inst))
306 goto out_put_alg;
307
308 ctx = crypto_instance_ctx(inst);
309 ctx->queue = queue;
310
311 err = crypto_init_spawn(&ctx->spawn, alg, inst,
312 CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_ASYNC);
313 if (err)
314 goto out_free_inst;
315
316 inst->alg.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC;
317 inst->alg.cra_type = &crypto_ablkcipher_type;
318
319 inst->alg.cra_ablkcipher.ivsize = alg->cra_blkcipher.ivsize;
320 inst->alg.cra_ablkcipher.min_keysize = alg->cra_blkcipher.min_keysize;
321 inst->alg.cra_ablkcipher.max_keysize = alg->cra_blkcipher.max_keysize;
322
323 inst->alg.cra_ablkcipher.geniv = alg->cra_blkcipher.geniv;
324
325 inst->alg.cra_ctxsize = sizeof(struct cryptd_blkcipher_ctx);
326
327 inst->alg.cra_init = cryptd_blkcipher_init_tfm;
328 inst->alg.cra_exit = cryptd_blkcipher_exit_tfm;
329
330 inst->alg.cra_ablkcipher.setkey = cryptd_blkcipher_setkey;
331 inst->alg.cra_ablkcipher.encrypt = cryptd_blkcipher_encrypt_enqueue;
332 inst->alg.cra_ablkcipher.decrypt = cryptd_blkcipher_decrypt_enqueue;
333
334 err = crypto_register_instance(tmpl, inst);
335 if (err) {
336 crypto_drop_spawn(&ctx->spawn);
337 out_free_inst:
338 kfree(inst);
339 }
340
341 out_put_alg:
342 crypto_mod_put(alg);
343 return err;
344 }
345
346 static int cryptd_hash_init_tfm(struct crypto_tfm *tfm)
347 {
348 struct crypto_instance *inst = crypto_tfm_alg_instance(tfm);
349 struct hashd_instance_ctx *ictx = crypto_instance_ctx(inst);
350 struct crypto_shash_spawn *spawn = &ictx->spawn;
351 struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(tfm);
352 struct crypto_shash *hash;
353
354 hash = crypto_spawn_shash(spawn);
355 if (IS_ERR(hash))
356 return PTR_ERR(hash);
357
358 ctx->child = hash;
359 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
360 sizeof(struct cryptd_hash_request_ctx) +
361 crypto_shash_descsize(hash));
362 return 0;
363 }
364
365 static void cryptd_hash_exit_tfm(struct crypto_tfm *tfm)
366 {
367 struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(tfm);
368
369 crypto_free_shash(ctx->child);
370 }
371
372 static int cryptd_hash_setkey(struct crypto_ahash *parent,
373 const u8 *key, unsigned int keylen)
374 {
375 struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(parent);
376 struct crypto_shash *child = ctx->child;
377 int err;
378
379 crypto_shash_clear_flags(child, CRYPTO_TFM_REQ_MASK);
380 crypto_shash_set_flags(child, crypto_ahash_get_flags(parent) &
381 CRYPTO_TFM_REQ_MASK);
382 err = crypto_shash_setkey(child, key, keylen);
383 crypto_ahash_set_flags(parent, crypto_shash_get_flags(child) &
384 CRYPTO_TFM_RES_MASK);
385 return err;
386 }
387
388 static int cryptd_hash_enqueue(struct ahash_request *req,
389 crypto_completion_t complete)
390 {
391 struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
392 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
393 struct cryptd_queue *queue =
394 cryptd_get_queue(crypto_ahash_tfm(tfm));
395
396 rctx->complete = req->base.complete;
397 req->base.complete = complete;
398
399 return cryptd_enqueue_request(queue, &req->base);
400 }
401
402 static void cryptd_hash_init(struct crypto_async_request *req_async, int err)
403 {
404 struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(req_async->tfm);
405 struct crypto_shash *child = ctx->child;
406 struct ahash_request *req = ahash_request_cast(req_async);
407 struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
408 struct shash_desc *desc = &rctx->desc;
409
410 if (unlikely(err == -EINPROGRESS))
411 goto out;
412
413 desc->tfm = child;
414 desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP;
415
416 err = crypto_shash_init(desc);
417
418 req->base.complete = rctx->complete;
419
420 out:
421 local_bh_disable();
422 rctx->complete(&req->base, err);
423 local_bh_enable();
424 }
425
426 static int cryptd_hash_init_enqueue(struct ahash_request *req)
427 {
428 return cryptd_hash_enqueue(req, cryptd_hash_init);
429 }
430
431 static void cryptd_hash_update(struct crypto_async_request *req_async, int err)
432 {
433 struct ahash_request *req = ahash_request_cast(req_async);
434 struct cryptd_hash_request_ctx *rctx;
435
436 rctx = ahash_request_ctx(req);
437
438 if (unlikely(err == -EINPROGRESS))
439 goto out;
440
441 err = shash_ahash_update(req, &rctx->desc);
442
443 req->base.complete = rctx->complete;
444
445 out:
446 local_bh_disable();
447 rctx->complete(&req->base, err);
448 local_bh_enable();
449 }
450
451 static int cryptd_hash_update_enqueue(struct ahash_request *req)
452 {
453 return cryptd_hash_enqueue(req, cryptd_hash_update);
454 }
455
456 static void cryptd_hash_final(struct crypto_async_request *req_async, int err)
457 {
458 struct ahash_request *req = ahash_request_cast(req_async);
459 struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
460
461 if (unlikely(err == -EINPROGRESS))
462 goto out;
463
464 err = crypto_shash_final(&rctx->desc, req->result);
465
466 req->base.complete = rctx->complete;
467
468 out:
469 local_bh_disable();
470 rctx->complete(&req->base, err);
471 local_bh_enable();
472 }
473
474 static int cryptd_hash_final_enqueue(struct ahash_request *req)
475 {
476 return cryptd_hash_enqueue(req, cryptd_hash_final);
477 }
478
479 static void cryptd_hash_digest(struct crypto_async_request *req_async, int err)
480 {
481 struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(req_async->tfm);
482 struct crypto_shash *child = ctx->child;
483 struct ahash_request *req = ahash_request_cast(req_async);
484 struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
485 struct shash_desc *desc = &rctx->desc;
486
487 if (unlikely(err == -EINPROGRESS))
488 goto out;
489
490 desc->tfm = child;
491 desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP;
492
493 err = shash_ahash_digest(req, desc);
494
495 req->base.complete = rctx->complete;
496
497 out:
498 local_bh_disable();
499 rctx->complete(&req->base, err);
500 local_bh_enable();
501 }
502
503 static int cryptd_hash_digest_enqueue(struct ahash_request *req)
504 {
505 return cryptd_hash_enqueue(req, cryptd_hash_digest);
506 }
507
508 static int cryptd_create_hash(struct crypto_template *tmpl, struct rtattr **tb,
509 struct cryptd_queue *queue)
510 {
511 struct hashd_instance_ctx *ctx;
512 struct crypto_instance *inst;
513 struct shash_alg *salg;
514 struct crypto_alg *alg;
515 int err;
516
517 salg = shash_attr_alg(tb[1], 0, 0);
518 if (IS_ERR(salg))
519 return PTR_ERR(salg);
520
521 alg = &salg->base;
522 inst = cryptd_alloc_instance(alg, sizeof(*ctx));
523 if (IS_ERR(inst))
524 goto out_put_alg;
525
526 ctx = crypto_instance_ctx(inst);
527 ctx->queue = queue;
528
529 err = crypto_init_shash_spawn(&ctx->spawn, salg, inst);
530 if (err)
531 goto out_free_inst;
532
533 inst->alg.cra_flags = CRYPTO_ALG_TYPE_AHASH | CRYPTO_ALG_ASYNC;
534 inst->alg.cra_type = &crypto_ahash_type;
535
536 inst->alg.cra_ahash.digestsize = salg->digestsize;
537 inst->alg.cra_ctxsize = sizeof(struct cryptd_hash_ctx);
538
539 inst->alg.cra_init = cryptd_hash_init_tfm;
540 inst->alg.cra_exit = cryptd_hash_exit_tfm;
541
542 inst->alg.cra_ahash.init = cryptd_hash_init_enqueue;
543 inst->alg.cra_ahash.update = cryptd_hash_update_enqueue;
544 inst->alg.cra_ahash.final = cryptd_hash_final_enqueue;
545 inst->alg.cra_ahash.setkey = cryptd_hash_setkey;
546 inst->alg.cra_ahash.digest = cryptd_hash_digest_enqueue;
547
548 err = crypto_register_instance(tmpl, inst);
549 if (err) {
550 crypto_drop_shash(&ctx->spawn);
551 out_free_inst:
552 kfree(inst);
553 }
554
555 out_put_alg:
556 crypto_mod_put(alg);
557 return err;
558 }
559
560 static struct cryptd_queue queue;
561
562 static int cryptd_create(struct crypto_template *tmpl, struct rtattr **tb)
563 {
564 struct crypto_attr_type *algt;
565
566 algt = crypto_get_attr_type(tb);
567 if (IS_ERR(algt))
568 return PTR_ERR(algt);
569
570 switch (algt->type & algt->mask & CRYPTO_ALG_TYPE_MASK) {
571 case CRYPTO_ALG_TYPE_BLKCIPHER:
572 return cryptd_create_blkcipher(tmpl, tb, &queue);
573 case CRYPTO_ALG_TYPE_DIGEST:
574 return cryptd_create_hash(tmpl, tb, &queue);
575 }
576
577 return -EINVAL;
578 }
579
580 static void cryptd_free(struct crypto_instance *inst)
581 {
582 struct cryptd_instance_ctx *ctx = crypto_instance_ctx(inst);
583
584 crypto_drop_spawn(&ctx->spawn);
585 kfree(inst);
586 }
587
588 static struct crypto_template cryptd_tmpl = {
589 .name = "cryptd",
590 .create = cryptd_create,
591 .free = cryptd_free,
592 .module = THIS_MODULE,
593 };
594
595 struct cryptd_ablkcipher *cryptd_alloc_ablkcipher(const char *alg_name,
596 u32 type, u32 mask)
597 {
598 char cryptd_alg_name[CRYPTO_MAX_ALG_NAME];
599 struct crypto_tfm *tfm;
600
601 if (snprintf(cryptd_alg_name, CRYPTO_MAX_ALG_NAME,
602 "cryptd(%s)", alg_name) >= CRYPTO_MAX_ALG_NAME)
603 return ERR_PTR(-EINVAL);
604 type &= ~(CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_GENIV);
605 type |= CRYPTO_ALG_TYPE_BLKCIPHER;
606 mask &= ~CRYPTO_ALG_TYPE_MASK;
607 mask |= (CRYPTO_ALG_GENIV | CRYPTO_ALG_TYPE_BLKCIPHER_MASK);
608 tfm = crypto_alloc_base(cryptd_alg_name, type, mask);
609 if (IS_ERR(tfm))
610 return ERR_CAST(tfm);
611 if (tfm->__crt_alg->cra_module != THIS_MODULE) {
612 crypto_free_tfm(tfm);
613 return ERR_PTR(-EINVAL);
614 }
615
616 return __cryptd_ablkcipher_cast(__crypto_ablkcipher_cast(tfm));
617 }
618 EXPORT_SYMBOL_GPL(cryptd_alloc_ablkcipher);
619
620 struct crypto_blkcipher *cryptd_ablkcipher_child(struct cryptd_ablkcipher *tfm)
621 {
622 struct cryptd_blkcipher_ctx *ctx = crypto_ablkcipher_ctx(&tfm->base);
623 return ctx->child;
624 }
625 EXPORT_SYMBOL_GPL(cryptd_ablkcipher_child);
626
627 void cryptd_free_ablkcipher(struct cryptd_ablkcipher *tfm)
628 {
629 crypto_free_ablkcipher(&tfm->base);
630 }
631 EXPORT_SYMBOL_GPL(cryptd_free_ablkcipher);
632
633 static int __init cryptd_init(void)
634 {
635 int err;
636
637 err = cryptd_init_queue(&queue, CRYPTD_MAX_CPU_QLEN);
638 if (err)
639 return err;
640
641 err = crypto_register_template(&cryptd_tmpl);
642 if (err)
643 cryptd_fini_queue(&queue);
644
645 return err;
646 }
647
648 static void __exit cryptd_exit(void)
649 {
650 cryptd_fini_queue(&queue);
651 crypto_unregister_template(&cryptd_tmpl);
652 }
653
654 module_init(cryptd_init);
655 module_exit(cryptd_exit);
656
657 MODULE_LICENSE("GPL");
658 MODULE_DESCRIPTION("Software async crypto daemon");
This page took 0.091178 seconds and 4 git commands to generate.