4fdb4d3fddecdd41f69952efcabb1e3680ae6659
[deliverable/linux.git] / crypto / ahash.c
1 /*
2 * Asynchronous Cryptographic Hash operations.
3 *
4 * This is the asynchronous version of hash.c with notification of
5 * completion via a callback.
6 *
7 * Copyright (c) 2008 Loc Ho <lho@amcc.com>
8 *
9 * This program is free software; you can redistribute it and/or modify it
10 * under the terms of the GNU General Public License as published by the Free
11 * Software Foundation; either version 2 of the License, or (at your option)
12 * any later version.
13 *
14 */
15
16 #include <crypto/internal/hash.h>
17 #include <crypto/scatterwalk.h>
18 #include <linux/err.h>
19 #include <linux/kernel.h>
20 #include <linux/module.h>
21 #include <linux/sched.h>
22 #include <linux/slab.h>
23 #include <linux/seq_file.h>
24 #include <linux/cryptouser.h>
25 #include <net/netlink.h>
26
27 #include "internal.h"
28
29 struct ahash_request_priv {
30 crypto_completion_t complete;
31 void *data;
32 u8 *result;
33 void *ubuf[] CRYPTO_MINALIGN_ATTR;
34 };
35
36 static inline struct ahash_alg *crypto_ahash_alg(struct crypto_ahash *hash)
37 {
38 return container_of(crypto_hash_alg_common(hash), struct ahash_alg,
39 halg);
40 }
41
42 static int hash_walk_next(struct crypto_hash_walk *walk)
43 {
44 unsigned int alignmask = walk->alignmask;
45 unsigned int offset = walk->offset;
46 unsigned int nbytes = min(walk->entrylen,
47 ((unsigned int)(PAGE_SIZE)) - offset);
48
49 walk->data = kmap_atomic(walk->pg);
50 walk->data += offset;
51
52 if (offset & alignmask) {
53 unsigned int unaligned = alignmask + 1 - (offset & alignmask);
54 if (nbytes > unaligned)
55 nbytes = unaligned;
56 }
57
58 walk->entrylen -= nbytes;
59 return nbytes;
60 }
61
62 static int hash_walk_new_entry(struct crypto_hash_walk *walk)
63 {
64 struct scatterlist *sg;
65
66 sg = walk->sg;
67 walk->pg = sg_page(sg);
68 walk->offset = sg->offset;
69 walk->entrylen = sg->length;
70
71 if (walk->entrylen > walk->total)
72 walk->entrylen = walk->total;
73 walk->total -= walk->entrylen;
74
75 return hash_walk_next(walk);
76 }
77
78 int crypto_hash_walk_done(struct crypto_hash_walk *walk, int err)
79 {
80 unsigned int alignmask = walk->alignmask;
81 unsigned int nbytes = walk->entrylen;
82
83 walk->data -= walk->offset;
84
85 if (nbytes && walk->offset & alignmask && !err) {
86 walk->offset = ALIGN(walk->offset, alignmask + 1);
87 walk->data += walk->offset;
88
89 nbytes = min(nbytes,
90 ((unsigned int)(PAGE_SIZE)) - walk->offset);
91 walk->entrylen -= nbytes;
92
93 return nbytes;
94 }
95
96 kunmap_atomic(walk->data);
97 crypto_yield(walk->flags);
98
99 if (err)
100 return err;
101
102 if (nbytes) {
103 walk->offset = 0;
104 walk->pg++;
105 return hash_walk_next(walk);
106 }
107
108 if (!walk->total)
109 return 0;
110
111 walk->sg = scatterwalk_sg_next(walk->sg);
112
113 return hash_walk_new_entry(walk);
114 }
115 EXPORT_SYMBOL_GPL(crypto_hash_walk_done);
116
117 int crypto_hash_walk_first(struct ahash_request *req,
118 struct crypto_hash_walk *walk)
119 {
120 walk->total = req->nbytes;
121
122 if (!walk->total)
123 return 0;
124
125 walk->alignmask = crypto_ahash_alignmask(crypto_ahash_reqtfm(req));
126 walk->sg = req->src;
127 walk->flags = req->base.flags;
128
129 return hash_walk_new_entry(walk);
130 }
131 EXPORT_SYMBOL_GPL(crypto_hash_walk_first);
132
133 int crypto_hash_walk_first_compat(struct hash_desc *hdesc,
134 struct crypto_hash_walk *walk,
135 struct scatterlist *sg, unsigned int len)
136 {
137 walk->total = len;
138
139 if (!walk->total)
140 return 0;
141
142 walk->alignmask = crypto_hash_alignmask(hdesc->tfm);
143 walk->sg = sg;
144 walk->flags = hdesc->flags;
145
146 return hash_walk_new_entry(walk);
147 }
148
149 static int ahash_setkey_unaligned(struct crypto_ahash *tfm, const u8 *key,
150 unsigned int keylen)
151 {
152 unsigned long alignmask = crypto_ahash_alignmask(tfm);
153 int ret;
154 u8 *buffer, *alignbuffer;
155 unsigned long absize;
156
157 absize = keylen + alignmask;
158 buffer = kmalloc(absize, GFP_KERNEL);
159 if (!buffer)
160 return -ENOMEM;
161
162 alignbuffer = (u8 *)ALIGN((unsigned long)buffer, alignmask + 1);
163 memcpy(alignbuffer, key, keylen);
164 ret = tfm->setkey(tfm, alignbuffer, keylen);
165 kzfree(buffer);
166 return ret;
167 }
168
169 int crypto_ahash_setkey(struct crypto_ahash *tfm, const u8 *key,
170 unsigned int keylen)
171 {
172 unsigned long alignmask = crypto_ahash_alignmask(tfm);
173
174 if ((unsigned long)key & alignmask)
175 return ahash_setkey_unaligned(tfm, key, keylen);
176
177 return tfm->setkey(tfm, key, keylen);
178 }
179 EXPORT_SYMBOL_GPL(crypto_ahash_setkey);
180
181 static int ahash_nosetkey(struct crypto_ahash *tfm, const u8 *key,
182 unsigned int keylen)
183 {
184 return -ENOSYS;
185 }
186
187 static inline unsigned int ahash_align_buffer_size(unsigned len,
188 unsigned long mask)
189 {
190 return len + (mask & ~(crypto_tfm_ctx_alignment() - 1));
191 }
192
193 static void ahash_op_unaligned_finish(struct ahash_request *req, int err)
194 {
195 struct ahash_request_priv *priv = req->priv;
196
197 if (err == -EINPROGRESS)
198 return;
199
200 if (!err)
201 memcpy(priv->result, req->result,
202 crypto_ahash_digestsize(crypto_ahash_reqtfm(req)));
203
204 /* Restore the original crypto request. */
205 req->result = priv->result;
206 req->base.complete = priv->complete;
207 req->base.data = priv->data;
208 req->priv = NULL;
209
210 /* Free the req->priv.priv from the ADJUSTED request. */
211 kzfree(priv);
212 }
213
214 static void ahash_op_unaligned_done(struct crypto_async_request *req, int err)
215 {
216 struct ahash_request *areq = req->data;
217
218 /*
219 * Restore the original request, see ahash_op_unaligned() for what
220 * goes where.
221 *
222 * The "struct ahash_request *req" here is in fact the "req.base"
223 * from the ADJUSTED request from ahash_op_unaligned(), thus as it
224 * is a pointer to self, it is also the ADJUSTED "req" .
225 */
226
227 /* First copy areq->result into areq->priv.result */
228 ahash_op_unaligned_finish(areq, err);
229
230 /* Complete the ORIGINAL request. */
231 areq->base.complete(&areq->base, err);
232 }
233
234 static int ahash_op_unaligned(struct ahash_request *req,
235 int (*op)(struct ahash_request *))
236 {
237 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
238 unsigned long alignmask = crypto_ahash_alignmask(tfm);
239 unsigned int ds = crypto_ahash_digestsize(tfm);
240 struct ahash_request_priv *priv;
241 int err;
242
243 priv = kmalloc(sizeof(*priv) + ahash_align_buffer_size(ds, alignmask),
244 (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
245 GFP_KERNEL : GFP_ATOMIC);
246 if (!priv)
247 return -ENOMEM;
248
249 /*
250 * WARNING: Voodoo programming below!
251 *
252 * The code below is obscure and hard to understand, thus explanation
253 * is necessary. See include/crypto/hash.h and include/linux/crypto.h
254 * to understand the layout of structures used here!
255 *
256 * The code here will replace portions of the ORIGINAL request with
257 * pointers to new code and buffers so the hashing operation can store
258 * the result in aligned buffer. We will call the modified request
259 * an ADJUSTED request.
260 *
261 * The newly mangled request will look as such:
262 *
263 * req {
264 * .result = ADJUSTED[new aligned buffer]
265 * .base.complete = ADJUSTED[pointer to completion function]
266 * .base.data = ADJUSTED[*req (pointer to self)]
267 * .priv = ADJUSTED[new priv] {
268 * .result = ORIGINAL(result)
269 * .complete = ORIGINAL(base.complete)
270 * .data = ORIGINAL(base.data)
271 * }
272 */
273
274 priv->result = req->result;
275 priv->complete = req->base.complete;
276 priv->data = req->base.data;
277 /*
278 * WARNING: We do not backup req->priv here! The req->priv
279 * is for internal use of the Crypto API and the
280 * user must _NOT_ _EVER_ depend on it's content!
281 */
282
283 req->result = PTR_ALIGN((u8 *)priv->ubuf, alignmask + 1);
284 req->base.complete = ahash_op_unaligned_done;
285 req->base.data = req;
286 req->priv = priv;
287
288 err = op(req);
289 ahash_op_unaligned_finish(req, err);
290
291 return err;
292 }
293
294 static int crypto_ahash_op(struct ahash_request *req,
295 int (*op)(struct ahash_request *))
296 {
297 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
298 unsigned long alignmask = crypto_ahash_alignmask(tfm);
299
300 if ((unsigned long)req->result & alignmask)
301 return ahash_op_unaligned(req, op);
302
303 return op(req);
304 }
305
306 int crypto_ahash_final(struct ahash_request *req)
307 {
308 return crypto_ahash_op(req, crypto_ahash_reqtfm(req)->final);
309 }
310 EXPORT_SYMBOL_GPL(crypto_ahash_final);
311
312 int crypto_ahash_finup(struct ahash_request *req)
313 {
314 return crypto_ahash_op(req, crypto_ahash_reqtfm(req)->finup);
315 }
316 EXPORT_SYMBOL_GPL(crypto_ahash_finup);
317
318 int crypto_ahash_digest(struct ahash_request *req)
319 {
320 return crypto_ahash_op(req, crypto_ahash_reqtfm(req)->digest);
321 }
322 EXPORT_SYMBOL_GPL(crypto_ahash_digest);
323
324 static void ahash_def_finup_finish2(struct ahash_request *req, int err)
325 {
326 struct ahash_request_priv *priv = req->priv;
327
328 if (err == -EINPROGRESS)
329 return;
330
331 if (!err)
332 memcpy(priv->result, req->result,
333 crypto_ahash_digestsize(crypto_ahash_reqtfm(req)));
334
335 kzfree(priv);
336 }
337
338 static void ahash_def_finup_done2(struct crypto_async_request *req, int err)
339 {
340 struct ahash_request *areq = req->data;
341 struct ahash_request_priv *priv = areq->priv;
342 crypto_completion_t complete = priv->complete;
343 void *data = priv->data;
344
345 ahash_def_finup_finish2(areq, err);
346
347 complete(data, err);
348 }
349
350 static int ahash_def_finup_finish1(struct ahash_request *req, int err)
351 {
352 if (err)
353 goto out;
354
355 req->base.complete = ahash_def_finup_done2;
356 req->base.flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
357 err = crypto_ahash_reqtfm(req)->final(req);
358
359 out:
360 ahash_def_finup_finish2(req, err);
361 return err;
362 }
363
364 static void ahash_def_finup_done1(struct crypto_async_request *req, int err)
365 {
366 struct ahash_request *areq = req->data;
367 struct ahash_request_priv *priv = areq->priv;
368 crypto_completion_t complete = priv->complete;
369 void *data = priv->data;
370
371 err = ahash_def_finup_finish1(areq, err);
372
373 complete(data, err);
374 }
375
376 static int ahash_def_finup(struct ahash_request *req)
377 {
378 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
379 unsigned long alignmask = crypto_ahash_alignmask(tfm);
380 unsigned int ds = crypto_ahash_digestsize(tfm);
381 struct ahash_request_priv *priv;
382
383 priv = kmalloc(sizeof(*priv) + ahash_align_buffer_size(ds, alignmask),
384 (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
385 GFP_KERNEL : GFP_ATOMIC);
386 if (!priv)
387 return -ENOMEM;
388
389 priv->result = req->result;
390 priv->complete = req->base.complete;
391 priv->data = req->base.data;
392
393 req->result = PTR_ALIGN((u8 *)priv->ubuf, alignmask + 1);
394 req->base.complete = ahash_def_finup_done1;
395 req->base.data = req;
396 req->priv = priv;
397
398 return ahash_def_finup_finish1(req, tfm->update(req));
399 }
400
401 static int ahash_no_export(struct ahash_request *req, void *out)
402 {
403 return -ENOSYS;
404 }
405
406 static int ahash_no_import(struct ahash_request *req, const void *in)
407 {
408 return -ENOSYS;
409 }
410
411 static int crypto_ahash_init_tfm(struct crypto_tfm *tfm)
412 {
413 struct crypto_ahash *hash = __crypto_ahash_cast(tfm);
414 struct ahash_alg *alg = crypto_ahash_alg(hash);
415
416 hash->setkey = ahash_nosetkey;
417 hash->export = ahash_no_export;
418 hash->import = ahash_no_import;
419
420 if (tfm->__crt_alg->cra_type != &crypto_ahash_type)
421 return crypto_init_shash_ops_async(tfm);
422
423 hash->init = alg->init;
424 hash->update = alg->update;
425 hash->final = alg->final;
426 hash->finup = alg->finup ?: ahash_def_finup;
427 hash->digest = alg->digest;
428
429 if (alg->setkey)
430 hash->setkey = alg->setkey;
431 if (alg->export)
432 hash->export = alg->export;
433 if (alg->import)
434 hash->import = alg->import;
435
436 return 0;
437 }
438
439 static unsigned int crypto_ahash_extsize(struct crypto_alg *alg)
440 {
441 if (alg->cra_type == &crypto_ahash_type)
442 return alg->cra_ctxsize;
443
444 return sizeof(struct crypto_shash *);
445 }
446
447 #ifdef CONFIG_NET
448 static int crypto_ahash_report(struct sk_buff *skb, struct crypto_alg *alg)
449 {
450 struct crypto_report_hash rhash;
451
452 strncpy(rhash.type, "ahash", sizeof(rhash.type));
453
454 rhash.blocksize = alg->cra_blocksize;
455 rhash.digestsize = __crypto_hash_alg_common(alg)->digestsize;
456
457 if (nla_put(skb, CRYPTOCFGA_REPORT_HASH,
458 sizeof(struct crypto_report_hash), &rhash))
459 goto nla_put_failure;
460 return 0;
461
462 nla_put_failure:
463 return -EMSGSIZE;
464 }
465 #else
466 static int crypto_ahash_report(struct sk_buff *skb, struct crypto_alg *alg)
467 {
468 return -ENOSYS;
469 }
470 #endif
471
472 static void crypto_ahash_show(struct seq_file *m, struct crypto_alg *alg)
473 __attribute__ ((unused));
474 static void crypto_ahash_show(struct seq_file *m, struct crypto_alg *alg)
475 {
476 seq_printf(m, "type : ahash\n");
477 seq_printf(m, "async : %s\n", alg->cra_flags & CRYPTO_ALG_ASYNC ?
478 "yes" : "no");
479 seq_printf(m, "blocksize : %u\n", alg->cra_blocksize);
480 seq_printf(m, "digestsize : %u\n",
481 __crypto_hash_alg_common(alg)->digestsize);
482 }
483
484 const struct crypto_type crypto_ahash_type = {
485 .extsize = crypto_ahash_extsize,
486 .init_tfm = crypto_ahash_init_tfm,
487 #ifdef CONFIG_PROC_FS
488 .show = crypto_ahash_show,
489 #endif
490 .report = crypto_ahash_report,
491 .maskclear = ~CRYPTO_ALG_TYPE_MASK,
492 .maskset = CRYPTO_ALG_TYPE_AHASH_MASK,
493 .type = CRYPTO_ALG_TYPE_AHASH,
494 .tfmsize = offsetof(struct crypto_ahash, base),
495 };
496 EXPORT_SYMBOL_GPL(crypto_ahash_type);
497
498 struct crypto_ahash *crypto_alloc_ahash(const char *alg_name, u32 type,
499 u32 mask)
500 {
501 return crypto_alloc_tfm(alg_name, &crypto_ahash_type, type, mask);
502 }
503 EXPORT_SYMBOL_GPL(crypto_alloc_ahash);
504
505 static int ahash_prepare_alg(struct ahash_alg *alg)
506 {
507 struct crypto_alg *base = &alg->halg.base;
508
509 if (alg->halg.digestsize > PAGE_SIZE / 8 ||
510 alg->halg.statesize > PAGE_SIZE / 8)
511 return -EINVAL;
512
513 base->cra_type = &crypto_ahash_type;
514 base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK;
515 base->cra_flags |= CRYPTO_ALG_TYPE_AHASH;
516
517 return 0;
518 }
519
520 int crypto_register_ahash(struct ahash_alg *alg)
521 {
522 struct crypto_alg *base = &alg->halg.base;
523 int err;
524
525 err = ahash_prepare_alg(alg);
526 if (err)
527 return err;
528
529 return crypto_register_alg(base);
530 }
531 EXPORT_SYMBOL_GPL(crypto_register_ahash);
532
533 int crypto_unregister_ahash(struct ahash_alg *alg)
534 {
535 return crypto_unregister_alg(&alg->halg.base);
536 }
537 EXPORT_SYMBOL_GPL(crypto_unregister_ahash);
538
539 int ahash_register_instance(struct crypto_template *tmpl,
540 struct ahash_instance *inst)
541 {
542 int err;
543
544 err = ahash_prepare_alg(&inst->alg);
545 if (err)
546 return err;
547
548 return crypto_register_instance(tmpl, ahash_crypto_instance(inst));
549 }
550 EXPORT_SYMBOL_GPL(ahash_register_instance);
551
552 void ahash_free_instance(struct crypto_instance *inst)
553 {
554 crypto_drop_spawn(crypto_instance_ctx(inst));
555 kfree(ahash_instance(inst));
556 }
557 EXPORT_SYMBOL_GPL(ahash_free_instance);
558
559 int crypto_init_ahash_spawn(struct crypto_ahash_spawn *spawn,
560 struct hash_alg_common *alg,
561 struct crypto_instance *inst)
562 {
563 return crypto_init_spawn2(&spawn->base, &alg->base, inst,
564 &crypto_ahash_type);
565 }
566 EXPORT_SYMBOL_GPL(crypto_init_ahash_spawn);
567
568 struct hash_alg_common *ahash_attr_alg(struct rtattr *rta, u32 type, u32 mask)
569 {
570 struct crypto_alg *alg;
571
572 alg = crypto_attr_alg2(rta, &crypto_ahash_type, type, mask);
573 return IS_ERR(alg) ? ERR_CAST(alg) : __crypto_hash_alg_common(alg);
574 }
575 EXPORT_SYMBOL_GPL(ahash_attr_alg);
576
577 MODULE_LICENSE("GPL");
578 MODULE_DESCRIPTION("Asynchronous cryptographic hash type");
This page took 0.0408 seconds and 4 git commands to generate.