Commit | Line | Data |
---|---|---|
ad61e042 TC |
1 | /* |
2 | * Multi buffer SHA1 algorithm Glue Code | |
3 | * | |
4 | * This file is provided under a dual BSD/GPLv2 license. When using or | |
5 | * redistributing this file, you may do so under either license. | |
6 | * | |
7 | * GPL LICENSE SUMMARY | |
8 | * | |
9 | * Copyright(c) 2014 Intel Corporation. | |
10 | * | |
11 | * This program is free software; you can redistribute it and/or modify | |
12 | * it under the terms of version 2 of the GNU General Public License as | |
13 | * published by the Free Software Foundation. | |
14 | * | |
15 | * This program is distributed in the hope that it will be useful, but | |
16 | * WITHOUT ANY WARRANTY; without even the implied warranty of | |
17 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
18 | * General Public License for more details. | |
19 | * | |
20 | * Contact Information: | |
21 | * Tim Chen <tim.c.chen@linux.intel.com> | |
22 | * | |
23 | * BSD LICENSE | |
24 | * | |
25 | * Copyright(c) 2014 Intel Corporation. | |
26 | * | |
27 | * Redistribution and use in source and binary forms, with or without | |
28 | * modification, are permitted provided that the following conditions | |
29 | * are met: | |
30 | * | |
31 | * * Redistributions of source code must retain the above copyright | |
32 | * notice, this list of conditions and the following disclaimer. | |
33 | * * Redistributions in binary form must reproduce the above copyright | |
34 | * notice, this list of conditions and the following disclaimer in | |
35 | * the documentation and/or other materials provided with the | |
36 | * distribution. | |
37 | * * Neither the name of Intel Corporation nor the names of its | |
38 | * contributors may be used to endorse or promote products derived | |
39 | * from this software without specific prior written permission. | |
40 | * | |
41 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS | |
42 | * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT | |
43 | * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR | |
44 | * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT | |
45 | * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, | |
46 | * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT | |
47 | * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, | |
48 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY | |
49 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | |
50 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE | |
51 | * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | |
52 | */ | |
53 | ||
54 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | |
55 | ||
56 | #include <crypto/internal/hash.h> | |
57 | #include <linux/init.h> | |
58 | #include <linux/module.h> | |
59 | #include <linux/mm.h> | |
60 | #include <linux/cryptohash.h> | |
61 | #include <linux/types.h> | |
62 | #include <linux/list.h> | |
63 | #include <crypto/scatterwalk.h> | |
64 | #include <crypto/sha.h> | |
65 | #include <crypto/mcryptd.h> | |
66 | #include <crypto/crypto_wq.h> | |
67 | #include <asm/byteorder.h> | |
68 | #include <asm/i387.h> | |
69 | #include <asm/xcr.h> | |
70 | #include <asm/xsave.h> | |
71 | #include <linux/hardirq.h> | |
72 | #include <asm/fpu-internal.h> | |
73 | #include "sha_mb_ctx.h" | |
74 | ||
75 | #define FLUSH_INTERVAL 1000 /* in usec */ | |
76 | ||
77 | struct mcryptd_alg_state sha1_mb_alg_state; | |
78 | ||
79 | struct sha1_mb_ctx { | |
80 | struct mcryptd_ahash *mcryptd_tfm; | |
81 | }; | |
82 | ||
83 | static inline struct mcryptd_hash_request_ctx *cast_hash_to_mcryptd_ctx(struct sha1_hash_ctx *hash_ctx) | |
84 | { | |
85 | struct shash_desc *desc; | |
86 | ||
87 | desc = container_of((void *) hash_ctx, struct shash_desc, __ctx); | |
88 | return container_of(desc, struct mcryptd_hash_request_ctx, desc); | |
89 | } | |
90 | ||
91 | static inline struct ahash_request *cast_mcryptd_ctx_to_req(struct mcryptd_hash_request_ctx *ctx) | |
92 | { | |
93 | return container_of((void *) ctx, struct ahash_request, __ctx); | |
94 | } | |
95 | ||
96 | static void req_ctx_init(struct mcryptd_hash_request_ctx *rctx, | |
97 | struct shash_desc *desc) | |
98 | { | |
99 | rctx->flag = HASH_UPDATE; | |
100 | } | |
101 | ||
102 | asmlinkage void (*sha1_job_mgr_init)(struct sha1_mb_mgr *state); | |
103 | asmlinkage struct job_sha1* (*sha1_job_mgr_submit)(struct sha1_mb_mgr *state, | |
104 | struct job_sha1 *job); | |
105 | asmlinkage struct job_sha1* (*sha1_job_mgr_flush)(struct sha1_mb_mgr *state); | |
106 | asmlinkage struct job_sha1* (*sha1_job_mgr_get_comp_job)(struct sha1_mb_mgr *state); | |
107 | ||
108 | inline void sha1_init_digest(uint32_t *digest) | |
109 | { | |
110 | static const uint32_t initial_digest[SHA1_DIGEST_LENGTH] = {SHA1_H0, | |
111 | SHA1_H1, SHA1_H2, SHA1_H3, SHA1_H4 }; | |
112 | memcpy(digest, initial_digest, sizeof(initial_digest)); | |
113 | } | |
114 | ||
115 | inline uint32_t sha1_pad(uint8_t padblock[SHA1_BLOCK_SIZE * 2], | |
116 | uint32_t total_len) | |
117 | { | |
118 | uint32_t i = total_len & (SHA1_BLOCK_SIZE - 1); | |
119 | ||
120 | memset(&padblock[i], 0, SHA1_BLOCK_SIZE); | |
121 | padblock[i] = 0x80; | |
122 | ||
123 | i += ((SHA1_BLOCK_SIZE - 1) & | |
124 | (0 - (total_len + SHA1_PADLENGTHFIELD_SIZE + 1))) | |
125 | + 1 + SHA1_PADLENGTHFIELD_SIZE; | |
126 | ||
127 | #if SHA1_PADLENGTHFIELD_SIZE == 16 | |
128 | *((uint64_t *) &padblock[i - 16]) = 0; | |
129 | #endif | |
130 | ||
131 | *((uint64_t *) &padblock[i - 8]) = cpu_to_be64(total_len << 3); | |
132 | ||
133 | /* Number of extra blocks to hash */ | |
134 | return i >> SHA1_LOG2_BLOCK_SIZE; | |
135 | } | |
136 | ||
137 | static struct sha1_hash_ctx *sha1_ctx_mgr_resubmit(struct sha1_ctx_mgr *mgr, struct sha1_hash_ctx *ctx) | |
138 | { | |
139 | while (ctx) { | |
140 | if (ctx->status & HASH_CTX_STS_COMPLETE) { | |
141 | /* Clear PROCESSING bit */ | |
142 | ctx->status = HASH_CTX_STS_COMPLETE; | |
143 | return ctx; | |
144 | } | |
145 | ||
146 | /* | |
147 | * If the extra blocks are empty, begin hashing what remains | |
148 | * in the user's buffer. | |
149 | */ | |
150 | if (ctx->partial_block_buffer_length == 0 && | |
151 | ctx->incoming_buffer_length) { | |
152 | ||
153 | const void *buffer = ctx->incoming_buffer; | |
154 | uint32_t len = ctx->incoming_buffer_length; | |
155 | uint32_t copy_len; | |
156 | ||
157 | /* | |
158 | * Only entire blocks can be hashed. | |
159 | * Copy remainder to extra blocks buffer. | |
160 | */ | |
161 | copy_len = len & (SHA1_BLOCK_SIZE-1); | |
162 | ||
163 | if (copy_len) { | |
164 | len -= copy_len; | |
165 | memcpy(ctx->partial_block_buffer, | |
166 | ((const char *) buffer + len), | |
167 | copy_len); | |
168 | ctx->partial_block_buffer_length = copy_len; | |
169 | } | |
170 | ||
171 | ctx->incoming_buffer_length = 0; | |
172 | ||
173 | /* len should be a multiple of the block size now */ | |
174 | assert((len % SHA1_BLOCK_SIZE) == 0); | |
175 | ||
176 | /* Set len to the number of blocks to be hashed */ | |
177 | len >>= SHA1_LOG2_BLOCK_SIZE; | |
178 | ||
179 | if (len) { | |
180 | ||
181 | ctx->job.buffer = (uint8_t *) buffer; | |
182 | ctx->job.len = len; | |
183 | ctx = (struct sha1_hash_ctx *) sha1_job_mgr_submit(&mgr->mgr, | |
184 | &ctx->job); | |
185 | continue; | |
186 | } | |
187 | } | |
188 | ||
189 | /* | |
190 | * If the extra blocks are not empty, then we are | |
191 | * either on the last block(s) or we need more | |
192 | * user input before continuing. | |
193 | */ | |
194 | if (ctx->status & HASH_CTX_STS_LAST) { | |
195 | ||
196 | uint8_t *buf = ctx->partial_block_buffer; | |
197 | uint32_t n_extra_blocks = sha1_pad(buf, ctx->total_length); | |
198 | ||
199 | ctx->status = (HASH_CTX_STS_PROCESSING | | |
200 | HASH_CTX_STS_COMPLETE); | |
201 | ctx->job.buffer = buf; | |
202 | ctx->job.len = (uint32_t) n_extra_blocks; | |
203 | ctx = (struct sha1_hash_ctx *) sha1_job_mgr_submit(&mgr->mgr, &ctx->job); | |
204 | continue; | |
205 | } | |
206 | ||
207 | if (ctx) | |
208 | ctx->status = HASH_CTX_STS_IDLE; | |
209 | return ctx; | |
210 | } | |
211 | ||
212 | return NULL; | |
213 | } | |
214 | ||
215 | struct sha1_hash_ctx *sha1_ctx_mgr_get_comp_ctx(struct sha1_ctx_mgr *mgr) | |
216 | { | |
217 | /* | |
218 | * If get_comp_job returns NULL, there are no jobs complete. | |
219 | * If get_comp_job returns a job, verify that it is safe to return to the user. | |
220 | * If it is not ready, resubmit the job to finish processing. | |
221 | * If sha1_ctx_mgr_resubmit returned a job, it is ready to be returned. | |
222 | * Otherwise, all jobs currently being managed by the hash_ctx_mgr still need processing. | |
223 | */ | |
224 | struct sha1_hash_ctx *ctx; | |
225 | ||
226 | ctx = (struct sha1_hash_ctx *) sha1_job_mgr_get_comp_job(&mgr->mgr); | |
227 | return sha1_ctx_mgr_resubmit(mgr, ctx); | |
228 | } | |
229 | ||
230 | void sha1_ctx_mgr_init(struct sha1_ctx_mgr *mgr) | |
231 | { | |
232 | sha1_job_mgr_init(&mgr->mgr); | |
233 | } | |
234 | ||
235 | struct sha1_hash_ctx *sha1_ctx_mgr_submit(struct sha1_ctx_mgr *mgr, | |
236 | struct sha1_hash_ctx *ctx, | |
237 | const void *buffer, | |
238 | uint32_t len, | |
239 | int flags) | |
240 | { | |
241 | if (flags & (~HASH_ENTIRE)) { | |
242 | /* User should not pass anything other than FIRST, UPDATE, or LAST */ | |
243 | ctx->error = HASH_CTX_ERROR_INVALID_FLAGS; | |
244 | return ctx; | |
245 | } | |
246 | ||
247 | if (ctx->status & HASH_CTX_STS_PROCESSING) { | |
248 | /* Cannot submit to a currently processing job. */ | |
249 | ctx->error = HASH_CTX_ERROR_ALREADY_PROCESSING; | |
250 | return ctx; | |
251 | } | |
252 | ||
253 | if ((ctx->status & HASH_CTX_STS_COMPLETE) && !(flags & HASH_FIRST)) { | |
254 | /* Cannot update a finished job. */ | |
255 | ctx->error = HASH_CTX_ERROR_ALREADY_COMPLETED; | |
256 | return ctx; | |
257 | } | |
258 | ||
259 | ||
260 | if (flags & HASH_FIRST) { | |
261 | /* Init digest */ | |
262 | sha1_init_digest(ctx->job.result_digest); | |
263 | ||
264 | /* Reset byte counter */ | |
265 | ctx->total_length = 0; | |
266 | ||
267 | /* Clear extra blocks */ | |
268 | ctx->partial_block_buffer_length = 0; | |
269 | } | |
270 | ||
271 | /* If we made it here, there were no errors during this call to submit */ | |
272 | ctx->error = HASH_CTX_ERROR_NONE; | |
273 | ||
274 | /* Store buffer ptr info from user */ | |
275 | ctx->incoming_buffer = buffer; | |
276 | ctx->incoming_buffer_length = len; | |
277 | ||
278 | /* Store the user's request flags and mark this ctx as currently being processed. */ | |
279 | ctx->status = (flags & HASH_LAST) ? | |
280 | (HASH_CTX_STS_PROCESSING | HASH_CTX_STS_LAST) : | |
281 | HASH_CTX_STS_PROCESSING; | |
282 | ||
283 | /* Advance byte counter */ | |
284 | ctx->total_length += len; | |
285 | ||
286 | /* | |
287 | * If there is anything currently buffered in the extra blocks, | |
288 | * append to it until it contains a whole block. | |
289 | * Or if the user's buffer contains less than a whole block, | |
290 | * append as much as possible to the extra block. | |
291 | */ | |
292 | if ((ctx->partial_block_buffer_length) | (len < SHA1_BLOCK_SIZE)) { | |
293 | /* Compute how many bytes to copy from user buffer into extra block */ | |
294 | uint32_t copy_len = SHA1_BLOCK_SIZE - ctx->partial_block_buffer_length; | |
295 | if (len < copy_len) | |
296 | copy_len = len; | |
297 | ||
298 | if (copy_len) { | |
299 | /* Copy and update relevant pointers and counters */ | |
300 | memcpy(&ctx->partial_block_buffer[ctx->partial_block_buffer_length], | |
301 | buffer, copy_len); | |
302 | ||
303 | ctx->partial_block_buffer_length += copy_len; | |
304 | ctx->incoming_buffer = (const void *)((const char *)buffer + copy_len); | |
305 | ctx->incoming_buffer_length = len - copy_len; | |
306 | } | |
307 | ||
308 | /* The extra block should never contain more than 1 block here */ | |
309 | assert(ctx->partial_block_buffer_length <= SHA1_BLOCK_SIZE); | |
310 | ||
311 | /* If the extra block buffer contains exactly 1 block, it can be hashed. */ | |
312 | if (ctx->partial_block_buffer_length >= SHA1_BLOCK_SIZE) { | |
313 | ctx->partial_block_buffer_length = 0; | |
314 | ||
315 | ctx->job.buffer = ctx->partial_block_buffer; | |
316 | ctx->job.len = 1; | |
317 | ctx = (struct sha1_hash_ctx *) sha1_job_mgr_submit(&mgr->mgr, &ctx->job); | |
318 | } | |
319 | } | |
320 | ||
321 | return sha1_ctx_mgr_resubmit(mgr, ctx); | |
322 | } | |
323 | ||
324 | struct sha1_hash_ctx *sha1_ctx_mgr_flush(struct sha1_ctx_mgr *mgr) | |
325 | { | |
326 | struct sha1_hash_ctx *ctx; | |
327 | ||
328 | while (1) { | |
329 | ctx = (struct sha1_hash_ctx *) sha1_job_mgr_flush(&mgr->mgr); | |
330 | ||
331 | /* If flush returned 0, there are no more jobs in flight. */ | |
332 | if (!ctx) | |
333 | return NULL; | |
334 | ||
335 | /* | |
336 | * If flush returned a job, resubmit the job to finish processing. | |
337 | */ | |
338 | ctx = sha1_ctx_mgr_resubmit(mgr, ctx); | |
339 | ||
340 | /* | |
341 | * If sha1_ctx_mgr_resubmit returned a job, it is ready to be returned. | |
342 | * Otherwise, all jobs currently being managed by the sha1_ctx_mgr | |
343 | * still need processing. Loop. | |
344 | */ | |
345 | if (ctx) | |
346 | return ctx; | |
347 | } | |
348 | } | |
349 | ||
350 | static int sha1_mb_init(struct shash_desc *desc) | |
351 | { | |
352 | struct sha1_hash_ctx *sctx = shash_desc_ctx(desc); | |
353 | ||
354 | hash_ctx_init(sctx); | |
355 | sctx->job.result_digest[0] = SHA1_H0; | |
356 | sctx->job.result_digest[1] = SHA1_H1; | |
357 | sctx->job.result_digest[2] = SHA1_H2; | |
358 | sctx->job.result_digest[3] = SHA1_H3; | |
359 | sctx->job.result_digest[4] = SHA1_H4; | |
360 | sctx->total_length = 0; | |
361 | sctx->partial_block_buffer_length = 0; | |
362 | sctx->status = HASH_CTX_STS_IDLE; | |
363 | ||
364 | return 0; | |
365 | } | |
366 | ||
367 | static int sha1_mb_set_results(struct mcryptd_hash_request_ctx *rctx) | |
368 | { | |
369 | int i; | |
370 | struct sha1_hash_ctx *sctx = shash_desc_ctx(&rctx->desc); | |
371 | __be32 *dst = (__be32 *) rctx->out; | |
372 | ||
373 | for (i = 0; i < 5; ++i) | |
374 | dst[i] = cpu_to_be32(sctx->job.result_digest[i]); | |
375 | ||
376 | return 0; | |
377 | } | |
378 | ||
379 | static int sha_finish_walk(struct mcryptd_hash_request_ctx **ret_rctx, | |
380 | struct mcryptd_alg_cstate *cstate, bool flush) | |
381 | { | |
382 | int flag = HASH_UPDATE; | |
383 | int nbytes, err = 0; | |
384 | struct mcryptd_hash_request_ctx *rctx = *ret_rctx; | |
385 | struct sha1_hash_ctx *sha_ctx; | |
386 | ||
387 | /* more work ? */ | |
388 | while (!(rctx->flag & HASH_DONE)) { | |
389 | nbytes = crypto_ahash_walk_done(&rctx->walk, 0); | |
390 | if (nbytes < 0) { | |
391 | err = nbytes; | |
392 | goto out; | |
393 | } | |
394 | /* check if the walk is done */ | |
395 | if (crypto_ahash_walk_last(&rctx->walk)) { | |
396 | rctx->flag |= HASH_DONE; | |
397 | if (rctx->flag & HASH_FINAL) | |
398 | flag |= HASH_LAST; | |
399 | ||
400 | } | |
401 | sha_ctx = (struct sha1_hash_ctx *) shash_desc_ctx(&rctx->desc); | |
402 | kernel_fpu_begin(); | |
403 | sha_ctx = sha1_ctx_mgr_submit(cstate->mgr, sha_ctx, rctx->walk.data, nbytes, flag); | |
404 | if (!sha_ctx) { | |
405 | if (flush) | |
406 | sha_ctx = sha1_ctx_mgr_flush(cstate->mgr); | |
407 | } | |
408 | kernel_fpu_end(); | |
409 | if (sha_ctx) | |
410 | rctx = cast_hash_to_mcryptd_ctx(sha_ctx); | |
411 | else { | |
412 | rctx = NULL; | |
413 | goto out; | |
414 | } | |
415 | } | |
416 | ||
417 | /* copy the results */ | |
418 | if (rctx->flag & HASH_FINAL) | |
419 | sha1_mb_set_results(rctx); | |
420 | ||
421 | out: | |
422 | *ret_rctx = rctx; | |
423 | return err; | |
424 | } | |
425 | ||
426 | static int sha_complete_job(struct mcryptd_hash_request_ctx *rctx, | |
427 | struct mcryptd_alg_cstate *cstate, | |
428 | int err) | |
429 | { | |
430 | struct ahash_request *req = cast_mcryptd_ctx_to_req(rctx); | |
431 | struct sha1_hash_ctx *sha_ctx; | |
432 | struct mcryptd_hash_request_ctx *req_ctx; | |
433 | int ret; | |
434 | ||
435 | /* remove from work list */ | |
436 | spin_lock(&cstate->work_lock); | |
437 | list_del(&rctx->waiter); | |
438 | spin_unlock(&cstate->work_lock); | |
439 | ||
440 | if (irqs_disabled()) | |
441 | rctx->complete(&req->base, err); | |
442 | else { | |
443 | local_bh_disable(); | |
444 | rctx->complete(&req->base, err); | |
445 | local_bh_enable(); | |
446 | } | |
447 | ||
448 | /* check to see if there are other jobs that are done */ | |
449 | sha_ctx = sha1_ctx_mgr_get_comp_ctx(cstate->mgr); | |
450 | while (sha_ctx) { | |
451 | req_ctx = cast_hash_to_mcryptd_ctx(sha_ctx); | |
452 | ret = sha_finish_walk(&req_ctx, cstate, false); | |
453 | if (req_ctx) { | |
454 | spin_lock(&cstate->work_lock); | |
455 | list_del(&req_ctx->waiter); | |
456 | spin_unlock(&cstate->work_lock); | |
457 | ||
458 | req = cast_mcryptd_ctx_to_req(req_ctx); | |
459 | if (irqs_disabled()) | |
460 | rctx->complete(&req->base, ret); | |
461 | else { | |
462 | local_bh_disable(); | |
463 | rctx->complete(&req->base, ret); | |
464 | local_bh_enable(); | |
465 | } | |
466 | } | |
467 | sha_ctx = sha1_ctx_mgr_get_comp_ctx(cstate->mgr); | |
468 | } | |
469 | ||
470 | return 0; | |
471 | } | |
472 | ||
473 | static void sha1_mb_add_list(struct mcryptd_hash_request_ctx *rctx, | |
474 | struct mcryptd_alg_cstate *cstate) | |
475 | { | |
476 | unsigned long next_flush; | |
477 | unsigned long delay = usecs_to_jiffies(FLUSH_INTERVAL); | |
478 | ||
479 | /* initialize tag */ | |
480 | rctx->tag.arrival = jiffies; /* tag the arrival time */ | |
481 | rctx->tag.seq_num = cstate->next_seq_num++; | |
482 | next_flush = rctx->tag.arrival + delay; | |
483 | rctx->tag.expire = next_flush; | |
484 | ||
485 | spin_lock(&cstate->work_lock); | |
486 | list_add_tail(&rctx->waiter, &cstate->work_list); | |
487 | spin_unlock(&cstate->work_lock); | |
488 | ||
489 | mcryptd_arm_flusher(cstate, delay); | |
490 | } | |
491 | ||
492 | static int sha1_mb_update(struct shash_desc *desc, const u8 *data, | |
493 | unsigned int len) | |
494 | { | |
495 | struct mcryptd_hash_request_ctx *rctx = | |
496 | container_of(desc, struct mcryptd_hash_request_ctx, desc); | |
497 | struct mcryptd_alg_cstate *cstate = | |
498 | this_cpu_ptr(sha1_mb_alg_state.alg_cstate); | |
499 | ||
500 | struct ahash_request *req = cast_mcryptd_ctx_to_req(rctx); | |
501 | struct sha1_hash_ctx *sha_ctx; | |
502 | int ret = 0, nbytes; | |
503 | ||
504 | ||
505 | /* sanity check */ | |
506 | if (rctx->tag.cpu != smp_processor_id()) { | |
507 | pr_err("mcryptd error: cpu clash\n"); | |
508 | goto done; | |
509 | } | |
510 | ||
511 | /* need to init context */ | |
512 | req_ctx_init(rctx, desc); | |
513 | ||
514 | nbytes = crypto_ahash_walk_first(req, &rctx->walk); | |
515 | ||
516 | if (nbytes < 0) { | |
517 | ret = nbytes; | |
518 | goto done; | |
519 | } | |
520 | ||
521 | if (crypto_ahash_walk_last(&rctx->walk)) | |
522 | rctx->flag |= HASH_DONE; | |
523 | ||
524 | /* submit */ | |
525 | sha_ctx = (struct sha1_hash_ctx *) shash_desc_ctx(desc); | |
526 | sha1_mb_add_list(rctx, cstate); | |
527 | kernel_fpu_begin(); | |
528 | sha_ctx = sha1_ctx_mgr_submit(cstate->mgr, sha_ctx, rctx->walk.data, nbytes, HASH_UPDATE); | |
529 | kernel_fpu_end(); | |
530 | ||
531 | /* check if anything is returned */ | |
532 | if (!sha_ctx) | |
533 | return -EINPROGRESS; | |
534 | ||
535 | if (sha_ctx->error) { | |
536 | ret = sha_ctx->error; | |
537 | rctx = cast_hash_to_mcryptd_ctx(sha_ctx); | |
538 | goto done; | |
539 | } | |
540 | ||
541 | rctx = cast_hash_to_mcryptd_ctx(sha_ctx); | |
542 | ret = sha_finish_walk(&rctx, cstate, false); | |
543 | ||
544 | if (!rctx) | |
545 | return -EINPROGRESS; | |
546 | done: | |
547 | sha_complete_job(rctx, cstate, ret); | |
548 | return ret; | |
549 | } | |
550 | ||
551 | static int sha1_mb_finup(struct shash_desc *desc, const u8 *data, | |
552 | unsigned int len, u8 *out) | |
553 | { | |
554 | struct mcryptd_hash_request_ctx *rctx = | |
555 | container_of(desc, struct mcryptd_hash_request_ctx, desc); | |
556 | struct mcryptd_alg_cstate *cstate = | |
557 | this_cpu_ptr(sha1_mb_alg_state.alg_cstate); | |
558 | ||
559 | struct ahash_request *req = cast_mcryptd_ctx_to_req(rctx); | |
560 | struct sha1_hash_ctx *sha_ctx; | |
561 | int ret = 0, flag = HASH_UPDATE, nbytes; | |
562 | ||
563 | /* sanity check */ | |
564 | if (rctx->tag.cpu != smp_processor_id()) { | |
565 | pr_err("mcryptd error: cpu clash\n"); | |
566 | goto done; | |
567 | } | |
568 | ||
569 | /* need to init context */ | |
570 | req_ctx_init(rctx, desc); | |
571 | ||
572 | nbytes = crypto_ahash_walk_first(req, &rctx->walk); | |
573 | ||
574 | if (nbytes < 0) { | |
575 | ret = nbytes; | |
576 | goto done; | |
577 | } | |
578 | ||
579 | if (crypto_ahash_walk_last(&rctx->walk)) { | |
580 | rctx->flag |= HASH_DONE; | |
581 | flag = HASH_LAST; | |
582 | } | |
583 | rctx->out = out; | |
584 | ||
585 | /* submit */ | |
586 | rctx->flag |= HASH_FINAL; | |
587 | sha_ctx = (struct sha1_hash_ctx *) shash_desc_ctx(desc); | |
588 | sha1_mb_add_list(rctx, cstate); | |
589 | ||
590 | kernel_fpu_begin(); | |
591 | sha_ctx = sha1_ctx_mgr_submit(cstate->mgr, sha_ctx, rctx->walk.data, nbytes, flag); | |
592 | kernel_fpu_end(); | |
593 | ||
594 | /* check if anything is returned */ | |
595 | if (!sha_ctx) | |
596 | return -EINPROGRESS; | |
597 | ||
598 | if (sha_ctx->error) { | |
599 | ret = sha_ctx->error; | |
600 | goto done; | |
601 | } | |
602 | ||
603 | rctx = cast_hash_to_mcryptd_ctx(sha_ctx); | |
604 | ret = sha_finish_walk(&rctx, cstate, false); | |
605 | if (!rctx) | |
606 | return -EINPROGRESS; | |
607 | done: | |
608 | sha_complete_job(rctx, cstate, ret); | |
609 | return ret; | |
610 | } | |
611 | ||
612 | static int sha1_mb_final(struct shash_desc *desc, u8 *out) | |
613 | { | |
614 | struct mcryptd_hash_request_ctx *rctx = | |
615 | container_of(desc, struct mcryptd_hash_request_ctx, desc); | |
616 | struct mcryptd_alg_cstate *cstate = | |
617 | this_cpu_ptr(sha1_mb_alg_state.alg_cstate); | |
618 | ||
619 | struct sha1_hash_ctx *sha_ctx; | |
620 | int ret = 0; | |
621 | u8 data; | |
622 | ||
623 | /* sanity check */ | |
624 | if (rctx->tag.cpu != smp_processor_id()) { | |
625 | pr_err("mcryptd error: cpu clash\n"); | |
626 | goto done; | |
627 | } | |
628 | ||
629 | /* need to init context */ | |
630 | req_ctx_init(rctx, desc); | |
631 | ||
632 | rctx->out = out; | |
633 | rctx->flag |= HASH_DONE | HASH_FINAL; | |
634 | ||
635 | sha_ctx = (struct sha1_hash_ctx *) shash_desc_ctx(desc); | |
636 | /* flag HASH_FINAL and 0 data size */ | |
637 | sha1_mb_add_list(rctx, cstate); | |
638 | kernel_fpu_begin(); | |
639 | sha_ctx = sha1_ctx_mgr_submit(cstate->mgr, sha_ctx, &data, 0, HASH_LAST); | |
640 | kernel_fpu_end(); | |
641 | ||
642 | /* check if anything is returned */ | |
643 | if (!sha_ctx) | |
644 | return -EINPROGRESS; | |
645 | ||
646 | if (sha_ctx->error) { | |
647 | ret = sha_ctx->error; | |
648 | rctx = cast_hash_to_mcryptd_ctx(sha_ctx); | |
649 | goto done; | |
650 | } | |
651 | ||
652 | rctx = cast_hash_to_mcryptd_ctx(sha_ctx); | |
653 | ret = sha_finish_walk(&rctx, cstate, false); | |
654 | if (!rctx) | |
655 | return -EINPROGRESS; | |
656 | done: | |
657 | sha_complete_job(rctx, cstate, ret); | |
658 | return ret; | |
659 | } | |
660 | ||
661 | static int sha1_mb_export(struct shash_desc *desc, void *out) | |
662 | { | |
663 | struct sha1_hash_ctx *sctx = shash_desc_ctx(desc); | |
664 | ||
665 | memcpy(out, sctx, sizeof(*sctx)); | |
666 | ||
667 | return 0; | |
668 | } | |
669 | ||
670 | static int sha1_mb_import(struct shash_desc *desc, const void *in) | |
671 | { | |
672 | struct sha1_hash_ctx *sctx = shash_desc_ctx(desc); | |
673 | ||
674 | memcpy(sctx, in, sizeof(*sctx)); | |
675 | ||
676 | return 0; | |
677 | } | |
678 | ||
679 | ||
680 | static struct shash_alg sha1_mb_shash_alg = { | |
681 | .digestsize = SHA1_DIGEST_SIZE, | |
682 | .init = sha1_mb_init, | |
683 | .update = sha1_mb_update, | |
684 | .final = sha1_mb_final, | |
685 | .finup = sha1_mb_finup, | |
686 | .export = sha1_mb_export, | |
687 | .import = sha1_mb_import, | |
688 | .descsize = sizeof(struct sha1_hash_ctx), | |
689 | .statesize = sizeof(struct sha1_hash_ctx), | |
690 | .base = { | |
691 | .cra_name = "__sha1-mb", | |
692 | .cra_driver_name = "__intel_sha1-mb", | |
693 | .cra_priority = 100, | |
694 | /* | |
695 | * use ASYNC flag as some buffers in multi-buffer | |
696 | * algo may not have completed before hashing thread sleep | |
697 | */ | |
698 | .cra_flags = CRYPTO_ALG_TYPE_SHASH | CRYPTO_ALG_ASYNC, | |
699 | .cra_blocksize = SHA1_BLOCK_SIZE, | |
700 | .cra_module = THIS_MODULE, | |
701 | .cra_list = LIST_HEAD_INIT(sha1_mb_shash_alg.base.cra_list), | |
702 | } | |
703 | }; | |
704 | ||
705 | static int sha1_mb_async_init(struct ahash_request *req) | |
706 | { | |
707 | struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); | |
708 | struct sha1_mb_ctx *ctx = crypto_ahash_ctx(tfm); | |
709 | struct ahash_request *mcryptd_req = ahash_request_ctx(req); | |
710 | struct mcryptd_ahash *mcryptd_tfm = ctx->mcryptd_tfm; | |
711 | ||
712 | memcpy(mcryptd_req, req, sizeof(*req)); | |
713 | ahash_request_set_tfm(mcryptd_req, &mcryptd_tfm->base); | |
714 | return crypto_ahash_init(mcryptd_req); | |
715 | } | |
716 | ||
717 | static int sha1_mb_async_update(struct ahash_request *req) | |
718 | { | |
719 | struct ahash_request *mcryptd_req = ahash_request_ctx(req); | |
720 | ||
721 | struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); | |
722 | struct sha1_mb_ctx *ctx = crypto_ahash_ctx(tfm); | |
723 | struct mcryptd_ahash *mcryptd_tfm = ctx->mcryptd_tfm; | |
724 | ||
725 | memcpy(mcryptd_req, req, sizeof(*req)); | |
726 | ahash_request_set_tfm(mcryptd_req, &mcryptd_tfm->base); | |
727 | return crypto_ahash_update(mcryptd_req); | |
728 | } | |
729 | ||
730 | static int sha1_mb_async_finup(struct ahash_request *req) | |
731 | { | |
732 | struct ahash_request *mcryptd_req = ahash_request_ctx(req); | |
733 | ||
734 | struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); | |
735 | struct sha1_mb_ctx *ctx = crypto_ahash_ctx(tfm); | |
736 | struct mcryptd_ahash *mcryptd_tfm = ctx->mcryptd_tfm; | |
737 | ||
738 | memcpy(mcryptd_req, req, sizeof(*req)); | |
739 | ahash_request_set_tfm(mcryptd_req, &mcryptd_tfm->base); | |
740 | return crypto_ahash_finup(mcryptd_req); | |
741 | } | |
742 | ||
743 | static int sha1_mb_async_final(struct ahash_request *req) | |
744 | { | |
745 | struct ahash_request *mcryptd_req = ahash_request_ctx(req); | |
746 | ||
747 | struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); | |
748 | struct sha1_mb_ctx *ctx = crypto_ahash_ctx(tfm); | |
749 | struct mcryptd_ahash *mcryptd_tfm = ctx->mcryptd_tfm; | |
750 | ||
751 | memcpy(mcryptd_req, req, sizeof(*req)); | |
752 | ahash_request_set_tfm(mcryptd_req, &mcryptd_tfm->base); | |
753 | return crypto_ahash_final(mcryptd_req); | |
754 | } | |
755 | ||
756 | int sha1_mb_async_digest(struct ahash_request *req) | |
757 | { | |
758 | struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); | |
759 | struct sha1_mb_ctx *ctx = crypto_ahash_ctx(tfm); | |
760 | struct ahash_request *mcryptd_req = ahash_request_ctx(req); | |
761 | struct mcryptd_ahash *mcryptd_tfm = ctx->mcryptd_tfm; | |
762 | ||
763 | memcpy(mcryptd_req, req, sizeof(*req)); | |
764 | ahash_request_set_tfm(mcryptd_req, &mcryptd_tfm->base); | |
765 | return crypto_ahash_digest(mcryptd_req); | |
766 | } | |
767 | ||
768 | static int sha1_mb_async_init_tfm(struct crypto_tfm *tfm) | |
769 | { | |
770 | struct mcryptd_ahash *mcryptd_tfm; | |
771 | struct sha1_mb_ctx *ctx = crypto_tfm_ctx(tfm); | |
772 | struct mcryptd_hash_ctx *mctx; | |
773 | ||
774 | mcryptd_tfm = mcryptd_alloc_ahash("__intel_sha1-mb", 0, 0); | |
775 | if (IS_ERR(mcryptd_tfm)) | |
776 | return PTR_ERR(mcryptd_tfm); | |
777 | mctx = crypto_ahash_ctx(&mcryptd_tfm->base); | |
778 | mctx->alg_state = &sha1_mb_alg_state; | |
779 | ctx->mcryptd_tfm = mcryptd_tfm; | |
780 | crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm), | |
781 | sizeof(struct ahash_request) + | |
782 | crypto_ahash_reqsize(&mcryptd_tfm->base)); | |
783 | ||
784 | return 0; | |
785 | } | |
786 | ||
787 | static void sha1_mb_async_exit_tfm(struct crypto_tfm *tfm) | |
788 | { | |
789 | struct sha1_mb_ctx *ctx = crypto_tfm_ctx(tfm); | |
790 | ||
791 | mcryptd_free_ahash(ctx->mcryptd_tfm); | |
792 | } | |
793 | ||
794 | static struct ahash_alg sha1_mb_async_alg = { | |
795 | .init = sha1_mb_async_init, | |
796 | .update = sha1_mb_async_update, | |
797 | .final = sha1_mb_async_final, | |
798 | .finup = sha1_mb_async_finup, | |
799 | .digest = sha1_mb_async_digest, | |
800 | .halg = { | |
801 | .digestsize = SHA1_DIGEST_SIZE, | |
802 | .base = { | |
803 | .cra_name = "sha1", | |
804 | .cra_driver_name = "sha1_mb", | |
805 | .cra_priority = 200, | |
806 | .cra_flags = CRYPTO_ALG_TYPE_AHASH | CRYPTO_ALG_ASYNC, | |
807 | .cra_blocksize = SHA1_BLOCK_SIZE, | |
808 | .cra_type = &crypto_ahash_type, | |
809 | .cra_module = THIS_MODULE, | |
810 | .cra_list = LIST_HEAD_INIT(sha1_mb_async_alg.halg.base.cra_list), | |
811 | .cra_init = sha1_mb_async_init_tfm, | |
812 | .cra_exit = sha1_mb_async_exit_tfm, | |
813 | .cra_ctxsize = sizeof(struct sha1_mb_ctx), | |
814 | .cra_alignmask = 0, | |
815 | }, | |
816 | }, | |
817 | }; | |
818 | ||
819 | unsigned long sha1_mb_flusher(struct mcryptd_alg_cstate *cstate) | |
820 | { | |
821 | struct mcryptd_hash_request_ctx *rctx; | |
822 | unsigned long cur_time; | |
823 | unsigned long next_flush = 0; | |
824 | struct sha1_hash_ctx *sha_ctx; | |
825 | ||
826 | ||
827 | cur_time = jiffies; | |
828 | ||
829 | while (!list_empty(&cstate->work_list)) { | |
830 | rctx = list_entry(cstate->work_list.next, | |
831 | struct mcryptd_hash_request_ctx, waiter); | |
832 | if time_before(cur_time, rctx->tag.expire) | |
833 | break; | |
834 | kernel_fpu_begin(); | |
835 | sha_ctx = (struct sha1_hash_ctx *) sha1_ctx_mgr_flush(cstate->mgr); | |
836 | kernel_fpu_end(); | |
837 | if (!sha_ctx) { | |
838 | pr_err("sha1_mb error: nothing got flushed for non-empty list\n"); | |
839 | break; | |
840 | } | |
841 | rctx = cast_hash_to_mcryptd_ctx(sha_ctx); | |
842 | sha_finish_walk(&rctx, cstate, true); | |
843 | sha_complete_job(rctx, cstate, 0); | |
844 | } | |
845 | ||
846 | if (!list_empty(&cstate->work_list)) { | |
847 | rctx = list_entry(cstate->work_list.next, | |
848 | struct mcryptd_hash_request_ctx, waiter); | |
849 | /* get the hash context and then flush time */ | |
850 | next_flush = rctx->tag.expire; | |
851 | mcryptd_arm_flusher(cstate, get_delay(next_flush)); | |
852 | } | |
853 | return next_flush; | |
854 | } | |
855 | ||
856 | static int __init sha1_mb_mod_init(void) | |
857 | { | |
858 | ||
859 | int cpu; | |
860 | int err; | |
861 | struct mcryptd_alg_cstate *cpu_state; | |
862 | ||
863 | /* check for dependent cpu features */ | |
864 | if (!boot_cpu_has(X86_FEATURE_AVX2) || | |
865 | !boot_cpu_has(X86_FEATURE_BMI2)) | |
866 | return -ENODEV; | |
867 | ||
868 | /* initialize multibuffer structures */ | |
869 | sha1_mb_alg_state.alg_cstate = alloc_percpu(struct mcryptd_alg_cstate); | |
870 | ||
871 | sha1_job_mgr_init = sha1_mb_mgr_init_avx2; | |
872 | sha1_job_mgr_submit = sha1_mb_mgr_submit_avx2; | |
873 | sha1_job_mgr_flush = sha1_mb_mgr_flush_avx2; | |
874 | sha1_job_mgr_get_comp_job = sha1_mb_mgr_get_comp_job_avx2; | |
875 | ||
876 | if (!sha1_mb_alg_state.alg_cstate) | |
877 | return -ENOMEM; | |
878 | for_each_possible_cpu(cpu) { | |
879 | cpu_state = per_cpu_ptr(sha1_mb_alg_state.alg_cstate, cpu); | |
880 | cpu_state->next_flush = 0; | |
881 | cpu_state->next_seq_num = 0; | |
882 | cpu_state->flusher_engaged = false; | |
883 | INIT_DELAYED_WORK(&cpu_state->flush, mcryptd_flusher); | |
884 | cpu_state->cpu = cpu; | |
885 | cpu_state->alg_state = &sha1_mb_alg_state; | |
886 | cpu_state->mgr = (struct sha1_ctx_mgr *) kzalloc(sizeof(struct sha1_ctx_mgr), GFP_KERNEL); | |
887 | if (!cpu_state->mgr) | |
888 | goto err2; | |
889 | sha1_ctx_mgr_init(cpu_state->mgr); | |
890 | INIT_LIST_HEAD(&cpu_state->work_list); | |
891 | spin_lock_init(&cpu_state->work_lock); | |
892 | } | |
893 | sha1_mb_alg_state.flusher = &sha1_mb_flusher; | |
894 | ||
895 | err = crypto_register_shash(&sha1_mb_shash_alg); | |
896 | if (err) | |
897 | goto err2; | |
898 | err = crypto_register_ahash(&sha1_mb_async_alg); | |
899 | if (err) | |
900 | goto err1; | |
901 | ||
902 | ||
903 | return 0; | |
904 | err1: | |
905 | crypto_unregister_shash(&sha1_mb_shash_alg); | |
906 | err2: | |
907 | for_each_possible_cpu(cpu) { | |
908 | cpu_state = per_cpu_ptr(sha1_mb_alg_state.alg_cstate, cpu); | |
909 | kfree(cpu_state->mgr); | |
910 | } | |
911 | free_percpu(sha1_mb_alg_state.alg_cstate); | |
912 | return -ENODEV; | |
913 | } | |
914 | ||
915 | static void __exit sha1_mb_mod_fini(void) | |
916 | { | |
917 | int cpu; | |
918 | struct mcryptd_alg_cstate *cpu_state; | |
919 | ||
920 | crypto_unregister_ahash(&sha1_mb_async_alg); | |
921 | crypto_unregister_shash(&sha1_mb_shash_alg); | |
922 | for_each_possible_cpu(cpu) { | |
923 | cpu_state = per_cpu_ptr(sha1_mb_alg_state.alg_cstate, cpu); | |
924 | kfree(cpu_state->mgr); | |
925 | } | |
926 | free_percpu(sha1_mb_alg_state.alg_cstate); | |
927 | } | |
928 | ||
929 | module_init(sha1_mb_mod_init); | |
930 | module_exit(sha1_mb_mod_fini); | |
931 | ||
932 | MODULE_LICENSE("GPL"); | |
933 | MODULE_DESCRIPTION("SHA1 Secure Hash Algorithm, multi buffer accelerated"); | |
934 | ||
935 | MODULE_ALIAS("sha1"); |