c9d5dcc81c96a6b243ff0a0bf5e50be94c6d96cd
[deliverable/linux.git] / arch / x86 / crypto / sha256-mb / sha256_mb.c
1 /*
2 * Multi buffer SHA256 algorithm Glue Code
3 *
4 * This file is provided under a dual BSD/GPLv2 license. When using or
5 * redistributing this file, you may do so under either license.
6 *
7 * GPL LICENSE SUMMARY
8 *
9 * Copyright(c) 2016 Intel Corporation.
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of version 2 of the GNU General Public License as
13 * published by the Free Software Foundation.
14 *
15 * This program is distributed in the hope that it will be useful, but
16 * WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * General Public License for more details.
19 *
20 * Contact Information:
21 * Megha Dey <megha.dey@linux.intel.com>
22 *
23 * BSD LICENSE
24 *
25 * Copyright(c) 2016 Intel Corporation.
26 *
27 * Redistribution and use in source and binary forms, with or without
28 * modification, are permitted provided that the following conditions
29 * are met:
30 *
31 * * Redistributions of source code must retain the above copyright
32 * notice, this list of conditions and the following disclaimer.
33 * * Redistributions in binary form must reproduce the above copyright
34 * notice, this list of conditions and the following disclaimer in
35 * the documentation and/or other materials provided with the
36 * distribution.
37 * * Neither the name of Intel Corporation nor the names of its
38 * contributors may be used to endorse or promote products derived
39 * from this software without specific prior written permission.
40 *
41 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
42 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
43 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
44 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
45 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
46 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
47 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
48 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
49 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
50 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
51 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
52 */
53
54 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
55
56 #include <crypto/internal/hash.h>
57 #include <linux/init.h>
58 #include <linux/module.h>
59 #include <linux/mm.h>
60 #include <linux/cryptohash.h>
61 #include <linux/types.h>
62 #include <linux/list.h>
63 #include <crypto/scatterwalk.h>
64 #include <crypto/sha.h>
65 #include <crypto/mcryptd.h>
66 #include <crypto/crypto_wq.h>
67 #include <asm/byteorder.h>
68 #include <linux/hardirq.h>
69 #include <asm/fpu/api.h>
70 #include "sha256_mb_ctx.h"
71
72 #define FLUSH_INTERVAL 1000 /* in usec */
73
74 static struct mcryptd_alg_state sha256_mb_alg_state;
75
76 struct sha256_mb_ctx {
77 struct mcryptd_ahash *mcryptd_tfm;
78 };
79
80 static inline struct mcryptd_hash_request_ctx
81 *cast_hash_to_mcryptd_ctx(struct sha256_hash_ctx *hash_ctx)
82 {
83 struct ahash_request *areq;
84
85 areq = container_of((void *) hash_ctx, struct ahash_request, __ctx);
86 return container_of(areq, struct mcryptd_hash_request_ctx, areq);
87 }
88
89 static inline struct ahash_request
90 *cast_mcryptd_ctx_to_req(struct mcryptd_hash_request_ctx *ctx)
91 {
92 return container_of((void *) ctx, struct ahash_request, __ctx);
93 }
94
95 static void req_ctx_init(struct mcryptd_hash_request_ctx *rctx,
96 struct ahash_request *areq)
97 {
98 rctx->flag = HASH_UPDATE;
99 }
100
101 static asmlinkage void (*sha256_job_mgr_init)(struct sha256_mb_mgr *state);
102 static asmlinkage struct job_sha256* (*sha256_job_mgr_submit)
103 (struct sha256_mb_mgr *state, struct job_sha256 *job);
104 static asmlinkage struct job_sha256* (*sha256_job_mgr_flush)
105 (struct sha256_mb_mgr *state);
106 static asmlinkage struct job_sha256* (*sha256_job_mgr_get_comp_job)
107 (struct sha256_mb_mgr *state);
108
109 inline void sha256_init_digest(uint32_t *digest)
110 {
111 static const uint32_t initial_digest[SHA256_DIGEST_LENGTH] = {
112 SHA256_H0, SHA256_H1, SHA256_H2, SHA256_H3,
113 SHA256_H4, SHA256_H5, SHA256_H6, SHA256_H7};
114 memcpy(digest, initial_digest, sizeof(initial_digest));
115 }
116
117 inline uint32_t sha256_pad(uint8_t padblock[SHA256_BLOCK_SIZE * 2],
118 uint32_t total_len)
119 {
120 uint32_t i = total_len & (SHA256_BLOCK_SIZE - 1);
121
122 memset(&padblock[i], 0, SHA256_BLOCK_SIZE);
123 padblock[i] = 0x80;
124
125 i += ((SHA256_BLOCK_SIZE - 1) &
126 (0 - (total_len + SHA256_PADLENGTHFIELD_SIZE + 1)))
127 + 1 + SHA256_PADLENGTHFIELD_SIZE;
128
129 #if SHA256_PADLENGTHFIELD_SIZE == 16
130 *((uint64_t *) &padblock[i - 16]) = 0;
131 #endif
132
133 *((uint64_t *) &padblock[i - 8]) = cpu_to_be64(total_len << 3);
134
135 /* Number of extra blocks to hash */
136 return i >> SHA256_LOG2_BLOCK_SIZE;
137 }
138
139 static struct sha256_hash_ctx
140 *sha256_ctx_mgr_resubmit(struct sha256_ctx_mgr *mgr,
141 struct sha256_hash_ctx *ctx)
142 {
143 while (ctx) {
144 if (ctx->status & HASH_CTX_STS_COMPLETE) {
145 /* Clear PROCESSING bit */
146 ctx->status = HASH_CTX_STS_COMPLETE;
147 return ctx;
148 }
149
150 /*
151 * If the extra blocks are empty, begin hashing what remains
152 * in the user's buffer.
153 */
154 if (ctx->partial_block_buffer_length == 0 &&
155 ctx->incoming_buffer_length) {
156
157 const void *buffer = ctx->incoming_buffer;
158 uint32_t len = ctx->incoming_buffer_length;
159 uint32_t copy_len;
160
161 /*
162 * Only entire blocks can be hashed.
163 * Copy remainder to extra blocks buffer.
164 */
165 copy_len = len & (SHA256_BLOCK_SIZE-1);
166
167 if (copy_len) {
168 len -= copy_len;
169 memcpy(ctx->partial_block_buffer,
170 ((const char *) buffer + len),
171 copy_len);
172 ctx->partial_block_buffer_length = copy_len;
173 }
174
175 ctx->incoming_buffer_length = 0;
176
177 /* len should be a multiple of the block size now */
178 assert((len % SHA256_BLOCK_SIZE) == 0);
179
180 /* Set len to the number of blocks to be hashed */
181 len >>= SHA256_LOG2_BLOCK_SIZE;
182
183 if (len) {
184
185 ctx->job.buffer = (uint8_t *) buffer;
186 ctx->job.len = len;
187 ctx = (struct sha256_hash_ctx *)
188 sha256_job_mgr_submit(&mgr->mgr, &ctx->job);
189 continue;
190 }
191 }
192
193 /*
194 * If the extra blocks are not empty, then we are
195 * either on the last block(s) or we need more
196 * user input before continuing.
197 */
198 if (ctx->status & HASH_CTX_STS_LAST) {
199
200 uint8_t *buf = ctx->partial_block_buffer;
201 uint32_t n_extra_blocks =
202 sha256_pad(buf, ctx->total_length);
203
204 ctx->status = (HASH_CTX_STS_PROCESSING |
205 HASH_CTX_STS_COMPLETE);
206 ctx->job.buffer = buf;
207 ctx->job.len = (uint32_t) n_extra_blocks;
208 ctx = (struct sha256_hash_ctx *)
209 sha256_job_mgr_submit(&mgr->mgr, &ctx->job);
210 continue;
211 }
212
213 ctx->status = HASH_CTX_STS_IDLE;
214 return ctx;
215 }
216
217 return NULL;
218 }
219
220 static struct sha256_hash_ctx
221 *sha256_ctx_mgr_get_comp_ctx(struct sha256_ctx_mgr *mgr)
222 {
223 /*
224 * If get_comp_job returns NULL, there are no jobs complete.
225 * If get_comp_job returns a job, verify that it is safe to return to
226 * the user. If it is not ready, resubmit the job to finish processing.
227 * If sha256_ctx_mgr_resubmit returned a job, it is ready to be
228 * returned. Otherwise, all jobs currently being managed by the
229 * hash_ctx_mgr still need processing.
230 */
231 struct sha256_hash_ctx *ctx;
232
233 ctx = (struct sha256_hash_ctx *) sha256_job_mgr_get_comp_job(&mgr->mgr);
234 return sha256_ctx_mgr_resubmit(mgr, ctx);
235 }
236
237 static void sha256_ctx_mgr_init(struct sha256_ctx_mgr *mgr)
238 {
239 sha256_job_mgr_init(&mgr->mgr);
240 }
241
242 static struct sha256_hash_ctx *sha256_ctx_mgr_submit(struct sha256_ctx_mgr *mgr,
243 struct sha256_hash_ctx *ctx,
244 const void *buffer,
245 uint32_t len,
246 int flags)
247 {
248 if (flags & (~HASH_ENTIRE)) {
249 /* User should not pass anything other than FIRST, UPDATE
250 * or LAST
251 */
252 ctx->error = HASH_CTX_ERROR_INVALID_FLAGS;
253 return ctx;
254 }
255
256 if (ctx->status & HASH_CTX_STS_PROCESSING) {
257 /* Cannot submit to a currently processing job. */
258 ctx->error = HASH_CTX_ERROR_ALREADY_PROCESSING;
259 return ctx;
260 }
261
262 if ((ctx->status & HASH_CTX_STS_COMPLETE) && !(flags & HASH_FIRST)) {
263 /* Cannot update a finished job. */
264 ctx->error = HASH_CTX_ERROR_ALREADY_COMPLETED;
265 return ctx;
266 }
267
268 if (flags & HASH_FIRST) {
269 /* Init digest */
270 sha256_init_digest(ctx->job.result_digest);
271
272 /* Reset byte counter */
273 ctx->total_length = 0;
274
275 /* Clear extra blocks */
276 ctx->partial_block_buffer_length = 0;
277 }
278
279 /* If we made it here, there was no error during this call to submit */
280 ctx->error = HASH_CTX_ERROR_NONE;
281
282 /* Store buffer ptr info from user */
283 ctx->incoming_buffer = buffer;
284 ctx->incoming_buffer_length = len;
285
286 /* Store the user's request flags and mark this ctx as currently
287 * being processed.
288 */
289 ctx->status = (flags & HASH_LAST) ?
290 (HASH_CTX_STS_PROCESSING | HASH_CTX_STS_LAST) :
291 HASH_CTX_STS_PROCESSING;
292
293 /* Advance byte counter */
294 ctx->total_length += len;
295
296 /*
297 * If there is anything currently buffered in the extra blocks,
298 * append to it until it contains a whole block.
299 * Or if the user's buffer contains less than a whole block,
300 * append as much as possible to the extra block.
301 */
302 if ((ctx->partial_block_buffer_length) | (len < SHA256_BLOCK_SIZE)) {
303 /* Compute how many bytes to copy from user buffer into
304 * extra block
305 */
306 uint32_t copy_len = SHA256_BLOCK_SIZE -
307 ctx->partial_block_buffer_length;
308 if (len < copy_len)
309 copy_len = len;
310
311 if (copy_len) {
312 /* Copy and update relevant pointers and counters */
313 memcpy(
314 &ctx->partial_block_buffer[ctx->partial_block_buffer_length],
315 buffer, copy_len);
316
317 ctx->partial_block_buffer_length += copy_len;
318 ctx->incoming_buffer = (const void *)
319 ((const char *)buffer + copy_len);
320 ctx->incoming_buffer_length = len - copy_len;
321 }
322
323 /* The extra block should never contain more than 1 block */
324 assert(ctx->partial_block_buffer_length <= SHA256_BLOCK_SIZE);
325
326 /* If the extra block buffer contains exactly 1 block,
327 * it can be hashed.
328 */
329 if (ctx->partial_block_buffer_length >= SHA256_BLOCK_SIZE) {
330 ctx->partial_block_buffer_length = 0;
331
332 ctx->job.buffer = ctx->partial_block_buffer;
333 ctx->job.len = 1;
334 ctx = (struct sha256_hash_ctx *)
335 sha256_job_mgr_submit(&mgr->mgr, &ctx->job);
336 }
337 }
338
339 return sha256_ctx_mgr_resubmit(mgr, ctx);
340 }
341
342 static struct sha256_hash_ctx *sha256_ctx_mgr_flush(struct sha256_ctx_mgr *mgr)
343 {
344 struct sha256_hash_ctx *ctx;
345
346 while (1) {
347 ctx = (struct sha256_hash_ctx *)
348 sha256_job_mgr_flush(&mgr->mgr);
349
350 /* If flush returned 0, there are no more jobs in flight. */
351 if (!ctx)
352 return NULL;
353
354 /*
355 * If flush returned a job, resubmit the job to finish
356 * processing.
357 */
358 ctx = sha256_ctx_mgr_resubmit(mgr, ctx);
359
360 /*
361 * If sha256_ctx_mgr_resubmit returned a job, it is ready to
362 * be returned. Otherwise, all jobs currently being managed by
363 * the sha256_ctx_mgr still need processing. Loop.
364 */
365 if (ctx)
366 return ctx;
367 }
368 }
369
370 static int sha256_mb_init(struct ahash_request *areq)
371 {
372 struct sha256_hash_ctx *sctx = ahash_request_ctx(areq);
373
374 hash_ctx_init(sctx);
375 sctx->job.result_digest[0] = SHA256_H0;
376 sctx->job.result_digest[1] = SHA256_H1;
377 sctx->job.result_digest[2] = SHA256_H2;
378 sctx->job.result_digest[3] = SHA256_H3;
379 sctx->job.result_digest[4] = SHA256_H4;
380 sctx->job.result_digest[5] = SHA256_H5;
381 sctx->job.result_digest[6] = SHA256_H6;
382 sctx->job.result_digest[7] = SHA256_H7;
383 sctx->total_length = 0;
384 sctx->partial_block_buffer_length = 0;
385 sctx->status = HASH_CTX_STS_IDLE;
386
387 return 0;
388 }
389
390 static int sha256_mb_set_results(struct mcryptd_hash_request_ctx *rctx)
391 {
392 int i;
393 struct sha256_hash_ctx *sctx = ahash_request_ctx(&rctx->areq);
394 __be32 *dst = (__be32 *) rctx->out;
395
396 for (i = 0; i < 8; ++i)
397 dst[i] = cpu_to_be32(sctx->job.result_digest[i]);
398
399 return 0;
400 }
401
402 static int sha_finish_walk(struct mcryptd_hash_request_ctx **ret_rctx,
403 struct mcryptd_alg_cstate *cstate, bool flush)
404 {
405 int flag = HASH_UPDATE;
406 int nbytes, err = 0;
407 struct mcryptd_hash_request_ctx *rctx = *ret_rctx;
408 struct sha256_hash_ctx *sha_ctx;
409
410 /* more work ? */
411 while (!(rctx->flag & HASH_DONE)) {
412 nbytes = crypto_ahash_walk_done(&rctx->walk, 0);
413 if (nbytes < 0) {
414 err = nbytes;
415 goto out;
416 }
417 /* check if the walk is done */
418 if (crypto_ahash_walk_last(&rctx->walk)) {
419 rctx->flag |= HASH_DONE;
420 if (rctx->flag & HASH_FINAL)
421 flag |= HASH_LAST;
422
423 }
424 sha_ctx = (struct sha256_hash_ctx *)
425 ahash_request_ctx(&rctx->areq);
426 kernel_fpu_begin();
427 sha_ctx = sha256_ctx_mgr_submit(cstate->mgr, sha_ctx,
428 rctx->walk.data, nbytes, flag);
429 if (!sha_ctx) {
430 if (flush)
431 sha_ctx = sha256_ctx_mgr_flush(cstate->mgr);
432 }
433 kernel_fpu_end();
434 if (sha_ctx)
435 rctx = cast_hash_to_mcryptd_ctx(sha_ctx);
436 else {
437 rctx = NULL;
438 goto out;
439 }
440 }
441
442 /* copy the results */
443 if (rctx->flag & HASH_FINAL)
444 sha256_mb_set_results(rctx);
445
446 out:
447 *ret_rctx = rctx;
448 return err;
449 }
450
451 static int sha_complete_job(struct mcryptd_hash_request_ctx *rctx,
452 struct mcryptd_alg_cstate *cstate,
453 int err)
454 {
455 struct ahash_request *req = cast_mcryptd_ctx_to_req(rctx);
456 struct sha256_hash_ctx *sha_ctx;
457 struct mcryptd_hash_request_ctx *req_ctx;
458 int ret;
459
460 /* remove from work list */
461 spin_lock(&cstate->work_lock);
462 list_del(&rctx->waiter);
463 spin_unlock(&cstate->work_lock);
464
465 if (irqs_disabled())
466 rctx->complete(&req->base, err);
467 else {
468 local_bh_disable();
469 rctx->complete(&req->base, err);
470 local_bh_enable();
471 }
472
473 /* check to see if there are other jobs that are done */
474 sha_ctx = sha256_ctx_mgr_get_comp_ctx(cstate->mgr);
475 while (sha_ctx) {
476 req_ctx = cast_hash_to_mcryptd_ctx(sha_ctx);
477 ret = sha_finish_walk(&req_ctx, cstate, false);
478 if (req_ctx) {
479 spin_lock(&cstate->work_lock);
480 list_del(&req_ctx->waiter);
481 spin_unlock(&cstate->work_lock);
482
483 req = cast_mcryptd_ctx_to_req(req_ctx);
484 if (irqs_disabled())
485 rctx->complete(&req->base, ret);
486 else {
487 local_bh_disable();
488 rctx->complete(&req->base, ret);
489 local_bh_enable();
490 }
491 }
492 sha_ctx = sha256_ctx_mgr_get_comp_ctx(cstate->mgr);
493 }
494
495 return 0;
496 }
497
498 static void sha256_mb_add_list(struct mcryptd_hash_request_ctx *rctx,
499 struct mcryptd_alg_cstate *cstate)
500 {
501 unsigned long next_flush;
502 unsigned long delay = usecs_to_jiffies(FLUSH_INTERVAL);
503
504 /* initialize tag */
505 rctx->tag.arrival = jiffies; /* tag the arrival time */
506 rctx->tag.seq_num = cstate->next_seq_num++;
507 next_flush = rctx->tag.arrival + delay;
508 rctx->tag.expire = next_flush;
509
510 spin_lock(&cstate->work_lock);
511 list_add_tail(&rctx->waiter, &cstate->work_list);
512 spin_unlock(&cstate->work_lock);
513
514 mcryptd_arm_flusher(cstate, delay);
515 }
516
517 static int sha256_mb_update(struct ahash_request *areq)
518 {
519 struct mcryptd_hash_request_ctx *rctx =
520 container_of(areq, struct mcryptd_hash_request_ctx, areq);
521 struct mcryptd_alg_cstate *cstate =
522 this_cpu_ptr(sha256_mb_alg_state.alg_cstate);
523
524 struct ahash_request *req = cast_mcryptd_ctx_to_req(rctx);
525 struct sha256_hash_ctx *sha_ctx;
526 int ret = 0, nbytes;
527
528 /* sanity check */
529 if (rctx->tag.cpu != smp_processor_id()) {
530 pr_err("mcryptd error: cpu clash\n");
531 goto done;
532 }
533
534 /* need to init context */
535 req_ctx_init(rctx, areq);
536
537 nbytes = crypto_ahash_walk_first(req, &rctx->walk);
538
539 if (nbytes < 0) {
540 ret = nbytes;
541 goto done;
542 }
543
544 if (crypto_ahash_walk_last(&rctx->walk))
545 rctx->flag |= HASH_DONE;
546
547 /* submit */
548 sha_ctx = (struct sha256_hash_ctx *) ahash_request_ctx(areq);
549 sha256_mb_add_list(rctx, cstate);
550 kernel_fpu_begin();
551 sha_ctx = sha256_ctx_mgr_submit(cstate->mgr, sha_ctx, rctx->walk.data,
552 nbytes, HASH_UPDATE);
553 kernel_fpu_end();
554
555 /* check if anything is returned */
556 if (!sha_ctx)
557 return -EINPROGRESS;
558
559 if (sha_ctx->error) {
560 ret = sha_ctx->error;
561 rctx = cast_hash_to_mcryptd_ctx(sha_ctx);
562 goto done;
563 }
564
565 rctx = cast_hash_to_mcryptd_ctx(sha_ctx);
566 ret = sha_finish_walk(&rctx, cstate, false);
567
568 if (!rctx)
569 return -EINPROGRESS;
570 done:
571 sha_complete_job(rctx, cstate, ret);
572 return ret;
573 }
574
575 static int sha256_mb_finup(struct ahash_request *areq)
576 {
577 struct mcryptd_hash_request_ctx *rctx =
578 container_of(areq, struct mcryptd_hash_request_ctx, areq);
579 struct mcryptd_alg_cstate *cstate =
580 this_cpu_ptr(sha256_mb_alg_state.alg_cstate);
581
582 struct ahash_request *req = cast_mcryptd_ctx_to_req(rctx);
583 struct sha256_hash_ctx *sha_ctx;
584 int ret = 0, flag = HASH_UPDATE, nbytes;
585
586 /* sanity check */
587 if (rctx->tag.cpu != smp_processor_id()) {
588 pr_err("mcryptd error: cpu clash\n");
589 goto done;
590 }
591
592 /* need to init context */
593 req_ctx_init(rctx, areq);
594
595 nbytes = crypto_ahash_walk_first(req, &rctx->walk);
596
597 if (nbytes < 0) {
598 ret = nbytes;
599 goto done;
600 }
601
602 if (crypto_ahash_walk_last(&rctx->walk)) {
603 rctx->flag |= HASH_DONE;
604 flag = HASH_LAST;
605 }
606
607 /* submit */
608 rctx->flag |= HASH_FINAL;
609 sha_ctx = (struct sha256_hash_ctx *) ahash_request_ctx(areq);
610 sha256_mb_add_list(rctx, cstate);
611
612 kernel_fpu_begin();
613 sha_ctx = sha256_ctx_mgr_submit(cstate->mgr, sha_ctx, rctx->walk.data,
614 nbytes, flag);
615 kernel_fpu_end();
616
617 /* check if anything is returned */
618 if (!sha_ctx)
619 return -EINPROGRESS;
620
621 if (sha_ctx->error) {
622 ret = sha_ctx->error;
623 goto done;
624 }
625
626 rctx = cast_hash_to_mcryptd_ctx(sha_ctx);
627 ret = sha_finish_walk(&rctx, cstate, false);
628 if (!rctx)
629 return -EINPROGRESS;
630 done:
631 sha_complete_job(rctx, cstate, ret);
632 return ret;
633 }
634
635 static int sha256_mb_final(struct ahash_request *areq)
636 {
637 struct mcryptd_hash_request_ctx *rctx =
638 container_of(areq, struct mcryptd_hash_request_ctx,
639 areq);
640 struct mcryptd_alg_cstate *cstate =
641 this_cpu_ptr(sha256_mb_alg_state.alg_cstate);
642
643 struct sha256_hash_ctx *sha_ctx;
644 int ret = 0;
645 u8 data;
646
647 /* sanity check */
648 if (rctx->tag.cpu != smp_processor_id()) {
649 pr_err("mcryptd error: cpu clash\n");
650 goto done;
651 }
652
653 /* need to init context */
654 req_ctx_init(rctx, areq);
655
656 rctx->flag |= HASH_DONE | HASH_FINAL;
657
658 sha_ctx = (struct sha256_hash_ctx *) ahash_request_ctx(areq);
659 /* flag HASH_FINAL and 0 data size */
660 sha256_mb_add_list(rctx, cstate);
661 kernel_fpu_begin();
662 sha_ctx = sha256_ctx_mgr_submit(cstate->mgr, sha_ctx, &data, 0,
663 HASH_LAST);
664 kernel_fpu_end();
665
666 /* check if anything is returned */
667 if (!sha_ctx)
668 return -EINPROGRESS;
669
670 if (sha_ctx->error) {
671 ret = sha_ctx->error;
672 rctx = cast_hash_to_mcryptd_ctx(sha_ctx);
673 goto done;
674 }
675
676 rctx = cast_hash_to_mcryptd_ctx(sha_ctx);
677 ret = sha_finish_walk(&rctx, cstate, false);
678 if (!rctx)
679 return -EINPROGRESS;
680 done:
681 sha_complete_job(rctx, cstate, ret);
682 return ret;
683 }
684
685 static int sha256_mb_export(struct ahash_request *areq, void *out)
686 {
687 struct sha256_hash_ctx *sctx = ahash_request_ctx(areq);
688
689 memcpy(out, sctx, sizeof(*sctx));
690
691 return 0;
692 }
693
694 static int sha256_mb_import(struct ahash_request *areq, const void *in)
695 {
696 struct sha256_hash_ctx *sctx = ahash_request_ctx(areq);
697
698 memcpy(sctx, in, sizeof(*sctx));
699
700 return 0;
701 }
702
703 static int sha256_mb_async_init_tfm(struct crypto_tfm *tfm)
704 {
705 struct mcryptd_ahash *mcryptd_tfm;
706 struct sha256_mb_ctx *ctx = crypto_tfm_ctx(tfm);
707 struct mcryptd_hash_ctx *mctx;
708
709 mcryptd_tfm = mcryptd_alloc_ahash("__intel_sha256-mb",
710 CRYPTO_ALG_INTERNAL,
711 CRYPTO_ALG_INTERNAL);
712 if (IS_ERR(mcryptd_tfm))
713 return PTR_ERR(mcryptd_tfm);
714 mctx = crypto_ahash_ctx(&mcryptd_tfm->base);
715 mctx->alg_state = &sha256_mb_alg_state;
716 ctx->mcryptd_tfm = mcryptd_tfm;
717 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
718 sizeof(struct ahash_request) +
719 crypto_ahash_reqsize(&mcryptd_tfm->base));
720
721 return 0;
722 }
723
724 static void sha256_mb_async_exit_tfm(struct crypto_tfm *tfm)
725 {
726 struct sha256_mb_ctx *ctx = crypto_tfm_ctx(tfm);
727
728 mcryptd_free_ahash(ctx->mcryptd_tfm);
729 }
730
731 static int sha256_mb_areq_init_tfm(struct crypto_tfm *tfm)
732 {
733 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
734 sizeof(struct ahash_request) +
735 sizeof(struct sha256_hash_ctx));
736
737 return 0;
738 }
739
740 static void sha256_mb_areq_exit_tfm(struct crypto_tfm *tfm)
741 {
742 struct sha256_mb_ctx *ctx = crypto_tfm_ctx(tfm);
743
744 mcryptd_free_ahash(ctx->mcryptd_tfm);
745 }
746
747 static struct ahash_alg sha256_mb_areq_alg = {
748 .init = sha256_mb_init,
749 .update = sha256_mb_update,
750 .final = sha256_mb_final,
751 .finup = sha256_mb_finup,
752 .export = sha256_mb_export,
753 .import = sha256_mb_import,
754 .halg = {
755 .digestsize = SHA256_DIGEST_SIZE,
756 .statesize = sizeof(struct sha256_hash_ctx),
757 .base = {
758 .cra_name = "__sha256-mb",
759 .cra_driver_name = "__intel_sha256-mb",
760 .cra_priority = 100,
761 /*
762 * use ASYNC flag as some buffers in multi-buffer
763 * algo may not have completed before hashing thread
764 * sleep
765 */
766 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
767 CRYPTO_ALG_ASYNC |
768 CRYPTO_ALG_INTERNAL,
769 .cra_blocksize = SHA256_BLOCK_SIZE,
770 .cra_module = THIS_MODULE,
771 .cra_list = LIST_HEAD_INIT
772 (sha256_mb_areq_alg.halg.base.cra_list),
773 .cra_init = sha256_mb_areq_init_tfm,
774 .cra_exit = sha256_mb_areq_exit_tfm,
775 .cra_ctxsize = sizeof(struct sha256_hash_ctx),
776 }
777 }
778 };
779
780 static int sha256_mb_async_init(struct ahash_request *req)
781 {
782 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
783 struct sha256_mb_ctx *ctx = crypto_ahash_ctx(tfm);
784 struct ahash_request *mcryptd_req = ahash_request_ctx(req);
785 struct mcryptd_ahash *mcryptd_tfm = ctx->mcryptd_tfm;
786
787 memcpy(mcryptd_req, req, sizeof(*req));
788 ahash_request_set_tfm(mcryptd_req, &mcryptd_tfm->base);
789 return crypto_ahash_init(mcryptd_req);
790 }
791
792 static int sha256_mb_async_update(struct ahash_request *req)
793 {
794 struct ahash_request *mcryptd_req = ahash_request_ctx(req);
795
796 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
797 struct sha256_mb_ctx *ctx = crypto_ahash_ctx(tfm);
798 struct mcryptd_ahash *mcryptd_tfm = ctx->mcryptd_tfm;
799
800 memcpy(mcryptd_req, req, sizeof(*req));
801 ahash_request_set_tfm(mcryptd_req, &mcryptd_tfm->base);
802 return crypto_ahash_update(mcryptd_req);
803 }
804
805 static int sha256_mb_async_finup(struct ahash_request *req)
806 {
807 struct ahash_request *mcryptd_req = ahash_request_ctx(req);
808
809 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
810 struct sha256_mb_ctx *ctx = crypto_ahash_ctx(tfm);
811 struct mcryptd_ahash *mcryptd_tfm = ctx->mcryptd_tfm;
812
813 memcpy(mcryptd_req, req, sizeof(*req));
814 ahash_request_set_tfm(mcryptd_req, &mcryptd_tfm->base);
815 return crypto_ahash_finup(mcryptd_req);
816 }
817
818 static int sha256_mb_async_final(struct ahash_request *req)
819 {
820 struct ahash_request *mcryptd_req = ahash_request_ctx(req);
821
822 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
823 struct sha256_mb_ctx *ctx = crypto_ahash_ctx(tfm);
824 struct mcryptd_ahash *mcryptd_tfm = ctx->mcryptd_tfm;
825
826 memcpy(mcryptd_req, req, sizeof(*req));
827 ahash_request_set_tfm(mcryptd_req, &mcryptd_tfm->base);
828 return crypto_ahash_final(mcryptd_req);
829 }
830
831 static int sha256_mb_async_digest(struct ahash_request *req)
832 {
833 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
834 struct sha256_mb_ctx *ctx = crypto_ahash_ctx(tfm);
835 struct ahash_request *mcryptd_req = ahash_request_ctx(req);
836 struct mcryptd_ahash *mcryptd_tfm = ctx->mcryptd_tfm;
837
838 memcpy(mcryptd_req, req, sizeof(*req));
839 ahash_request_set_tfm(mcryptd_req, &mcryptd_tfm->base);
840 return crypto_ahash_digest(mcryptd_req);
841 }
842
843 static int sha256_mb_async_export(struct ahash_request *req, void *out)
844 {
845 struct ahash_request *mcryptd_req = ahash_request_ctx(req);
846 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
847 struct sha256_mb_ctx *ctx = crypto_ahash_ctx(tfm);
848 struct mcryptd_ahash *mcryptd_tfm = ctx->mcryptd_tfm;
849
850 memcpy(mcryptd_req, req, sizeof(*req));
851 ahash_request_set_tfm(mcryptd_req, &mcryptd_tfm->base);
852 return crypto_ahash_export(mcryptd_req, out);
853 }
854
855 static int sha256_mb_async_import(struct ahash_request *req, const void *in)
856 {
857 struct ahash_request *mcryptd_req = ahash_request_ctx(req);
858 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
859 struct sha256_mb_ctx *ctx = crypto_ahash_ctx(tfm);
860 struct mcryptd_ahash *mcryptd_tfm = ctx->mcryptd_tfm;
861 struct crypto_ahash *child = mcryptd_ahash_child(mcryptd_tfm);
862 struct mcryptd_hash_request_ctx *rctx;
863 struct ahash_request *areq;
864
865 memcpy(mcryptd_req, req, sizeof(*req));
866 ahash_request_set_tfm(mcryptd_req, &mcryptd_tfm->base);
867 rctx = ahash_request_ctx(mcryptd_req);
868 areq = &rctx->areq;
869
870 ahash_request_set_tfm(areq, child);
871 ahash_request_set_callback(areq, CRYPTO_TFM_REQ_MAY_SLEEP,
872 rctx->complete, req);
873
874 return crypto_ahash_import(mcryptd_req, in);
875 }
876
877 static struct ahash_alg sha256_mb_async_alg = {
878 .init = sha256_mb_async_init,
879 .update = sha256_mb_async_update,
880 .final = sha256_mb_async_final,
881 .finup = sha256_mb_async_finup,
882 .export = sha256_mb_async_export,
883 .import = sha256_mb_async_import,
884 .digest = sha256_mb_async_digest,
885 .halg = {
886 .digestsize = SHA256_DIGEST_SIZE,
887 .statesize = sizeof(struct sha256_hash_ctx),
888 .base = {
889 .cra_name = "sha256",
890 .cra_driver_name = "sha256_mb",
891 .cra_priority = 200,
892 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
893 CRYPTO_ALG_ASYNC,
894 .cra_blocksize = SHA256_BLOCK_SIZE,
895 .cra_type = &crypto_ahash_type,
896 .cra_module = THIS_MODULE,
897 .cra_list = LIST_HEAD_INIT
898 (sha256_mb_async_alg.halg.base.cra_list),
899 .cra_init = sha256_mb_async_init_tfm,
900 .cra_exit = sha256_mb_async_exit_tfm,
901 .cra_ctxsize = sizeof(struct sha256_mb_ctx),
902 .cra_alignmask = 0,
903 },
904 },
905 };
906
907 static unsigned long sha256_mb_flusher(struct mcryptd_alg_cstate *cstate)
908 {
909 struct mcryptd_hash_request_ctx *rctx;
910 unsigned long cur_time;
911 unsigned long next_flush = 0;
912 struct sha256_hash_ctx *sha_ctx;
913
914
915 cur_time = jiffies;
916
917 while (!list_empty(&cstate->work_list)) {
918 rctx = list_entry(cstate->work_list.next,
919 struct mcryptd_hash_request_ctx, waiter);
920 if (time_before(cur_time, rctx->tag.expire))
921 break;
922 kernel_fpu_begin();
923 sha_ctx = (struct sha256_hash_ctx *)
924 sha256_ctx_mgr_flush(cstate->mgr);
925 kernel_fpu_end();
926 if (!sha_ctx) {
927 pr_err("sha256_mb error: nothing got"
928 " flushed for non-empty list\n");
929 break;
930 }
931 rctx = cast_hash_to_mcryptd_ctx(sha_ctx);
932 sha_finish_walk(&rctx, cstate, true);
933 sha_complete_job(rctx, cstate, 0);
934 }
935
936 if (!list_empty(&cstate->work_list)) {
937 rctx = list_entry(cstate->work_list.next,
938 struct mcryptd_hash_request_ctx, waiter);
939 /* get the hash context and then flush time */
940 next_flush = rctx->tag.expire;
941 mcryptd_arm_flusher(cstate, get_delay(next_flush));
942 }
943 return next_flush;
944 }
945
946 static int __init sha256_mb_mod_init(void)
947 {
948
949 int cpu;
950 int err;
951 struct mcryptd_alg_cstate *cpu_state;
952
953 /* check for dependent cpu features */
954 if (!boot_cpu_has(X86_FEATURE_AVX2) ||
955 !boot_cpu_has(X86_FEATURE_BMI2))
956 return -ENODEV;
957
958 /* initialize multibuffer structures */
959 sha256_mb_alg_state.alg_cstate = alloc_percpu
960 (struct mcryptd_alg_cstate);
961
962 sha256_job_mgr_init = sha256_mb_mgr_init_avx2;
963 sha256_job_mgr_submit = sha256_mb_mgr_submit_avx2;
964 sha256_job_mgr_flush = sha256_mb_mgr_flush_avx2;
965 sha256_job_mgr_get_comp_job = sha256_mb_mgr_get_comp_job_avx2;
966
967 if (!sha256_mb_alg_state.alg_cstate)
968 return -ENOMEM;
969 for_each_possible_cpu(cpu) {
970 cpu_state = per_cpu_ptr(sha256_mb_alg_state.alg_cstate, cpu);
971 cpu_state->next_flush = 0;
972 cpu_state->next_seq_num = 0;
973 cpu_state->flusher_engaged = false;
974 INIT_DELAYED_WORK(&cpu_state->flush, mcryptd_flusher);
975 cpu_state->cpu = cpu;
976 cpu_state->alg_state = &sha256_mb_alg_state;
977 cpu_state->mgr = kzalloc(sizeof(struct sha256_ctx_mgr),
978 GFP_KERNEL);
979 if (!cpu_state->mgr)
980 goto err2;
981 sha256_ctx_mgr_init(cpu_state->mgr);
982 INIT_LIST_HEAD(&cpu_state->work_list);
983 spin_lock_init(&cpu_state->work_lock);
984 }
985 sha256_mb_alg_state.flusher = &sha256_mb_flusher;
986
987 err = crypto_register_ahash(&sha256_mb_areq_alg);
988 if (err)
989 goto err2;
990 err = crypto_register_ahash(&sha256_mb_async_alg);
991 if (err)
992 goto err1;
993
994
995 return 0;
996 err1:
997 crypto_unregister_ahash(&sha256_mb_areq_alg);
998 err2:
999 for_each_possible_cpu(cpu) {
1000 cpu_state = per_cpu_ptr(sha256_mb_alg_state.alg_cstate, cpu);
1001 kfree(cpu_state->mgr);
1002 }
1003 free_percpu(sha256_mb_alg_state.alg_cstate);
1004 return -ENODEV;
1005 }
1006
1007 static void __exit sha256_mb_mod_fini(void)
1008 {
1009 int cpu;
1010 struct mcryptd_alg_cstate *cpu_state;
1011
1012 crypto_unregister_ahash(&sha256_mb_async_alg);
1013 crypto_unregister_ahash(&sha256_mb_areq_alg);
1014 for_each_possible_cpu(cpu) {
1015 cpu_state = per_cpu_ptr(sha256_mb_alg_state.alg_cstate, cpu);
1016 kfree(cpu_state->mgr);
1017 }
1018 free_percpu(sha256_mb_alg_state.alg_cstate);
1019 }
1020
1021 module_init(sha256_mb_mod_init);
1022 module_exit(sha256_mb_mod_fini);
1023
1024 MODULE_LICENSE("GPL");
1025 MODULE_DESCRIPTION("SHA256 Secure Hash Algorithm, multi buffer accelerated");
1026
1027 MODULE_ALIAS_CRYPTO("sha256");
This page took 0.051051 seconds and 4 git commands to generate.