staging: delete non-required instances of include <linux/init.h>
[deliverable/linux.git] / drivers / staging / sep / sep_crypto.c
1 /*
2 *
3 * sep_crypto.c - Crypto interface structures
4 *
5 * Copyright(c) 2009-2011 Intel Corporation. All rights reserved.
6 * Contributions(c) 2009-2010 Discretix. All rights reserved.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License as published by the Free
10 * Software Foundation; version 2 of the License.
11 *
12 * This program is distributed in the hope that it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * more details.
16 *
17 * You should have received a copy of the GNU General Public License along with
18 * this program; if not, write to the Free Software Foundation, Inc., 59
19 * Temple Place - Suite 330, Boston, MA 02111-1307, USA.
20 *
21 * CONTACTS:
22 *
23 * Mark Allyn mark.a.allyn@intel.com
24 * Jayant Mangalampalli jayant.mangalampalli@intel.com
25 *
26 * CHANGES:
27 *
28 * 2009.06.26 Initial publish
29 * 2010.09.14 Upgrade to Medfield
30 * 2011.02.22 Enable Kernel Crypto
31 *
32 */
33
34 /* #define DEBUG */
35 #include <linux/module.h>
36 #include <linux/miscdevice.h>
37 #include <linux/fs.h>
38 #include <linux/cdev.h>
39 #include <linux/kdev_t.h>
40 #include <linux/mutex.h>
41 #include <linux/sched.h>
42 #include <linux/mm.h>
43 #include <linux/poll.h>
44 #include <linux/wait.h>
45 #include <linux/pci.h>
46 #include <linux/pm_runtime.h>
47 #include <linux/err.h>
48 #include <linux/device.h>
49 #include <linux/errno.h>
50 #include <linux/interrupt.h>
51 #include <linux/kernel.h>
52 #include <linux/clk.h>
53 #include <linux/irq.h>
54 #include <linux/io.h>
55 #include <linux/platform_device.h>
56 #include <linux/list.h>
57 #include <linux/dma-mapping.h>
58 #include <linux/delay.h>
59 #include <linux/jiffies.h>
60 #include <linux/workqueue.h>
61 #include <linux/crypto.h>
62 #include <crypto/internal/hash.h>
63 #include <crypto/scatterwalk.h>
64 #include <crypto/sha.h>
65 #include <crypto/md5.h>
66 #include <crypto/aes.h>
67 #include <crypto/des.h>
68 #include <crypto/hash.h>
69 #include "sep_driver_hw_defs.h"
70 #include "sep_driver_config.h"
71 #include "sep_driver_api.h"
72 #include "sep_dev.h"
73 #include "sep_crypto.h"
74
75 #if defined(CONFIG_CRYPTO) || defined(CONFIG_CRYPTO_MODULE)
76
77 /* Globals for queuing */
78 static spinlock_t queue_lock;
79 static struct crypto_queue sep_queue;
80
81 /* Declare of dequeuer */
82 static void sep_dequeuer(void *data);
83
84 /* TESTING */
85 /**
86 * sep_do_callback
87 * @work: pointer to work_struct
88 * This is what is called by the queue; it is generic so that it
89 * can be used by any type of operation as each different callback
90 * function can use the data parameter in its own way
91 */
92 static void sep_do_callback(struct work_struct *work)
93 {
94 struct sep_work_struct *sep_work = container_of(work,
95 struct sep_work_struct, work);
96 if (sep_work != NULL) {
97 (sep_work->callback)(sep_work->data);
98 kfree(sep_work);
99 } else {
100 pr_debug("sep crypto: do callback - NULL container\n");
101 }
102 }
103
104 /**
105 * sep_submit_work
106 * @work_queue: pointer to struct_workqueue
107 * @funct: pointer to function to execute
108 * @data: pointer to data; function will know
109 * how to use it
110 * This is a generic API to submit something to
111 * the queue. The callback function will depend
112 * on what operation is to be done
113 */
114 static int sep_submit_work(struct workqueue_struct *work_queue,
115 void(*funct)(void *),
116 void *data)
117 {
118 struct sep_work_struct *sep_work;
119 int result;
120
121 sep_work = kmalloc(sizeof(struct sep_work_struct), GFP_ATOMIC);
122
123 if (sep_work == NULL) {
124 pr_debug("sep crypto: cant allocate work structure\n");
125 return -ENOMEM;
126 }
127
128 sep_work->callback = funct;
129 sep_work->data = data;
130 INIT_WORK(&sep_work->work, sep_do_callback);
131 result = queue_work(work_queue, &sep_work->work);
132 if (!result) {
133 pr_debug("sep_crypto: queue_work failed\n");
134 return -EINVAL;
135 }
136 return 0;
137 }
138
139 /**
140 * sep_alloc_sg_buf -
141 * @sep: pointer to struct sep_device
142 * @size: total size of area
143 * @block_size: minimum size of chunks
144 * each page is minimum or modulo this size
145 * @returns: pointer to struct scatterlist for new
146 * buffer
147 **/
148 static struct scatterlist *sep_alloc_sg_buf(
149 struct sep_device *sep,
150 size_t size,
151 size_t block_size)
152 {
153 u32 nbr_pages;
154 u32 ct1;
155 void *buf;
156 size_t current_size;
157 size_t real_page_size;
158
159 struct scatterlist *sg, *sg_temp;
160
161 if (size == 0)
162 return NULL;
163
164 dev_dbg(&sep->pdev->dev, "sep alloc sg buf\n");
165
166 current_size = 0;
167 nbr_pages = 0;
168 real_page_size = PAGE_SIZE - (PAGE_SIZE % block_size);
169 /**
170 * The size of each page must be modulo of the operation
171 * block size; increment by the modified page size until
172 * the total size is reached, then you have the number of
173 * pages
174 */
175 while (current_size < size) {
176 current_size += real_page_size;
177 nbr_pages += 1;
178 }
179
180 sg = kmalloc_array(nbr_pages, sizeof(struct scatterlist), GFP_ATOMIC);
181 if (!sg)
182 return NULL;
183
184 sg_init_table(sg, nbr_pages);
185
186 current_size = 0;
187 sg_temp = sg;
188 for (ct1 = 0; ct1 < nbr_pages; ct1 += 1) {
189 buf = (void *)get_zeroed_page(GFP_ATOMIC);
190 if (!buf) {
191 dev_warn(&sep->pdev->dev,
192 "Cannot allocate page for new buffer\n");
193 kfree(sg);
194 return NULL;
195 }
196
197 sg_set_buf(sg_temp, buf, real_page_size);
198 if ((size - current_size) > real_page_size) {
199 sg_temp->length = real_page_size;
200 current_size += real_page_size;
201 } else {
202 sg_temp->length = (size - current_size);
203 current_size = size;
204 }
205 sg_temp = sg_next(sg);
206 }
207 return sg;
208 }
209
210 /**
211 * sep_free_sg_buf -
212 * @sg: pointer to struct scatterlist; points to area to free
213 */
214 static void sep_free_sg_buf(struct scatterlist *sg)
215 {
216 struct scatterlist *sg_temp = sg;
217 while (sg_temp) {
218 free_page((unsigned long)sg_virt(sg_temp));
219 sg_temp = sg_next(sg_temp);
220 }
221 kfree(sg);
222 }
223
224 /**
225 * sep_copy_sg -
226 * @sep: pointer to struct sep_device
227 * @sg_src: pointer to struct scatterlist for source
228 * @sg_dst: pointer to struct scatterlist for destination
229 * @size: size (in bytes) of data to copy
230 *
231 * Copy data from one scatterlist to another; both must
232 * be the same size
233 */
234 static void sep_copy_sg(
235 struct sep_device *sep,
236 struct scatterlist *sg_src,
237 struct scatterlist *sg_dst,
238 size_t size)
239 {
240 u32 seg_size;
241 u32 in_offset, out_offset;
242
243 u32 count = 0;
244 struct scatterlist *sg_src_tmp = sg_src;
245 struct scatterlist *sg_dst_tmp = sg_dst;
246 in_offset = 0;
247 out_offset = 0;
248
249 dev_dbg(&sep->pdev->dev, "sep copy sg\n");
250
251 if ((sg_src == NULL) || (sg_dst == NULL) || (size == 0))
252 return;
253
254 dev_dbg(&sep->pdev->dev, "sep copy sg not null\n");
255
256 while (count < size) {
257 if ((sg_src_tmp->length - in_offset) >
258 (sg_dst_tmp->length - out_offset))
259 seg_size = sg_dst_tmp->length - out_offset;
260 else
261 seg_size = sg_src_tmp->length - in_offset;
262
263 if (seg_size > (size - count))
264 seg_size = (size = count);
265
266 memcpy(sg_virt(sg_dst_tmp) + out_offset,
267 sg_virt(sg_src_tmp) + in_offset,
268 seg_size);
269
270 in_offset += seg_size;
271 out_offset += seg_size;
272 count += seg_size;
273
274 if (in_offset >= sg_src_tmp->length) {
275 sg_src_tmp = sg_next(sg_src_tmp);
276 in_offset = 0;
277 }
278
279 if (out_offset >= sg_dst_tmp->length) {
280 sg_dst_tmp = sg_next(sg_dst_tmp);
281 out_offset = 0;
282 }
283 }
284 }
285
286 /**
287 * sep_oddball_pages -
288 * @sep: pointer to struct sep_device
289 * @sg: pointer to struct scatterlist - buffer to check
290 * @size: total data size
291 * @blocksize: minimum block size; must be multiples of this size
292 * @to_copy: 1 means do copy, 0 means do not copy
293 * @new_sg: pointer to location to put pointer to new sg area
294 * @returns: 1 if new scatterlist is needed; 0 if not needed;
295 * error value if operation failed
296 *
297 * The SEP device requires all pages to be multiples of the
298 * minimum block size appropriate for the operation
299 * This function check all pages; if any are oddball sizes
300 * (not multiple of block sizes), it creates a new scatterlist.
301 * If the to_copy parameter is set to 1, then a scatter list
302 * copy is performed. The pointer to the new scatterlist is
303 * put into the address supplied by the new_sg parameter; if
304 * no new scatterlist is needed, then a NULL is put into
305 * the location at new_sg.
306 *
307 */
308 static int sep_oddball_pages(
309 struct sep_device *sep,
310 struct scatterlist *sg,
311 size_t data_size,
312 u32 block_size,
313 struct scatterlist **new_sg,
314 u32 do_copy)
315 {
316 struct scatterlist *sg_temp;
317 u32 flag;
318 u32 nbr_pages, page_count;
319
320 dev_dbg(&sep->pdev->dev, "sep oddball\n");
321 if ((sg == NULL) || (data_size == 0) || (data_size < block_size))
322 return 0;
323
324 dev_dbg(&sep->pdev->dev, "sep oddball not null\n");
325 flag = 0;
326 nbr_pages = 0;
327 page_count = 0;
328 sg_temp = sg;
329
330 while (sg_temp) {
331 nbr_pages += 1;
332 sg_temp = sg_next(sg_temp);
333 }
334
335 sg_temp = sg;
336 while ((sg_temp) && (flag == 0)) {
337 page_count += 1;
338 if (sg_temp->length % block_size)
339 flag = 1;
340 else
341 sg_temp = sg_next(sg_temp);
342 }
343
344 /* Do not process if last (or only) page is oddball */
345 if (nbr_pages == page_count)
346 flag = 0;
347
348 if (flag) {
349 dev_dbg(&sep->pdev->dev, "sep oddball processing\n");
350 *new_sg = sep_alloc_sg_buf(sep, data_size, block_size);
351 if (*new_sg == NULL) {
352 dev_warn(&sep->pdev->dev, "cannot allocate new sg\n");
353 return -ENOMEM;
354 }
355
356 if (do_copy)
357 sep_copy_sg(sep, sg, *new_sg, data_size);
358
359 return 1;
360 } else {
361 return 0;
362 }
363 }
364
365 /**
366 * sep_copy_offset_sg -
367 * @sep: pointer to struct sep_device;
368 * @sg: pointer to struct scatterlist
369 * @offset: offset into scatterlist memory
370 * @dst: place to put data
371 * @len: length of data
372 * @returns: number of bytes copies
373 *
374 * This copies data from scatterlist buffer
375 * offset from beginning - it is needed for
376 * handling tail data in hash
377 */
378 static size_t sep_copy_offset_sg(
379 struct sep_device *sep,
380 struct scatterlist *sg,
381 u32 offset,
382 void *dst,
383 u32 len)
384 {
385 size_t page_start;
386 size_t page_end;
387 size_t offset_within_page;
388 size_t length_within_page;
389 size_t length_remaining;
390 size_t current_offset;
391
392 /* Find which page is beginning of segment */
393 page_start = 0;
394 page_end = sg->length;
395 while ((sg) && (offset > page_end)) {
396 page_start += sg->length;
397 sg = sg_next(sg);
398 if (sg)
399 page_end += sg->length;
400 }
401
402 if (sg == NULL)
403 return -ENOMEM;
404
405 offset_within_page = offset - page_start;
406 if ((sg->length - offset_within_page) >= len) {
407 /* All within this page */
408 memcpy(dst, sg_virt(sg) + offset_within_page, len);
409 return len;
410 } else {
411 /* Scattered multiple pages */
412 current_offset = 0;
413 length_remaining = len;
414 while ((sg) && (current_offset < len)) {
415 length_within_page = sg->length - offset_within_page;
416 if (length_within_page >= length_remaining) {
417 memcpy(dst+current_offset,
418 sg_virt(sg) + offset_within_page,
419 length_remaining);
420 length_remaining = 0;
421 current_offset = len;
422 } else {
423 memcpy(dst+current_offset,
424 sg_virt(sg) + offset_within_page,
425 length_within_page);
426 length_remaining -= length_within_page;
427 current_offset += length_within_page;
428 offset_within_page = 0;
429 sg = sg_next(sg);
430 }
431 }
432
433 if (sg == NULL)
434 return -ENOMEM;
435 }
436 return len;
437 }
438
439 /**
440 * partial_overlap -
441 * @src_ptr: source pointer
442 * @dst_ptr: destination pointer
443 * @nbytes: number of bytes
444 * @returns: 0 for success; -1 for failure
445 * We cannot have any partial overlap. Total overlap
446 * where src is the same as dst is okay
447 */
448 static int partial_overlap(void *src_ptr, void *dst_ptr, u32 nbytes)
449 {
450 /* Check for partial overlap */
451 if (src_ptr != dst_ptr) {
452 if (src_ptr < dst_ptr) {
453 if ((src_ptr + nbytes) > dst_ptr)
454 return -EINVAL;
455 } else {
456 if ((dst_ptr + nbytes) > src_ptr)
457 return -EINVAL;
458 }
459 }
460
461 return 0;
462 }
463
464 /* Debug - prints only if DEBUG is defined */
465 static void sep_dump_ivs(struct ablkcipher_request *req, char *reason)
466
467 {
468 unsigned char *cptr;
469 struct sep_aes_internal_context *aes_internal;
470 struct sep_des_internal_context *des_internal;
471 int ct1;
472
473 struct this_task_ctx *ta_ctx;
474 struct crypto_ablkcipher *tfm;
475 struct sep_system_ctx *sctx;
476
477 ta_ctx = ablkcipher_request_ctx(req);
478 tfm = crypto_ablkcipher_reqtfm(req);
479 sctx = crypto_ablkcipher_ctx(tfm);
480
481 dev_dbg(&ta_ctx->sep_used->pdev->dev, "IV DUMP - %s\n", reason);
482 if ((ta_ctx->current_request == DES_CBC) &&
483 (ta_ctx->des_opmode == SEP_DES_CBC)) {
484
485 des_internal = (struct sep_des_internal_context *)
486 sctx->des_private_ctx.ctx_buf;
487 /* print vendor */
488 dev_dbg(&ta_ctx->sep_used->pdev->dev,
489 "sep - vendor iv for DES\n");
490 cptr = (unsigned char *)des_internal->iv_context;
491 for (ct1 = 0; ct1 < crypto_ablkcipher_ivsize(tfm); ct1 += 1)
492 dev_dbg(&ta_ctx->sep_used->pdev->dev,
493 "%02x\n", *(cptr + ct1));
494
495 /* print walk */
496 dev_dbg(&ta_ctx->sep_used->pdev->dev,
497 "sep - walk from kernel crypto iv for DES\n");
498 cptr = (unsigned char *)ta_ctx->walk.iv;
499 for (ct1 = 0; ct1 < crypto_ablkcipher_ivsize(tfm); ct1 += 1)
500 dev_dbg(&ta_ctx->sep_used->pdev->dev,
501 "%02x\n", *(cptr + ct1));
502 } else if ((ta_ctx->current_request == AES_CBC) &&
503 (ta_ctx->aes_opmode == SEP_AES_CBC)) {
504
505 aes_internal = (struct sep_aes_internal_context *)
506 sctx->aes_private_ctx.cbuff;
507 /* print vendor */
508 dev_dbg(&ta_ctx->sep_used->pdev->dev,
509 "sep - vendor iv for AES\n");
510 cptr = (unsigned char *)aes_internal->aes_ctx_iv;
511 for (ct1 = 0; ct1 < crypto_ablkcipher_ivsize(tfm); ct1 += 1)
512 dev_dbg(&ta_ctx->sep_used->pdev->dev,
513 "%02x\n", *(cptr + ct1));
514
515 /* print walk */
516 dev_dbg(&ta_ctx->sep_used->pdev->dev,
517 "sep - walk from kernel crypto iv for AES\n");
518 cptr = (unsigned char *)ta_ctx->walk.iv;
519 for (ct1 = 0; ct1 < crypto_ablkcipher_ivsize(tfm); ct1 += 1)
520 dev_dbg(&ta_ctx->sep_used->pdev->dev,
521 "%02x\n", *(cptr + ct1));
522 }
523 }
524
525 /**
526 * RFC2451: Weak key check
527 * Returns: 1 (weak), 0 (not weak)
528 */
529 static int sep_weak_key(const u8 *key, unsigned int keylen)
530 {
531 static const u8 parity[] = {
532 8, 1, 0, 8, 0, 8, 8, 0, 0, 8, 8, 0, 8, 0, 2, 8,
533 0, 8, 8, 0, 8, 0, 0, 8, 8,
534 0, 0, 8, 0, 8, 8, 3,
535 0, 8, 8, 0, 8, 0, 0, 8, 8, 0, 0, 8, 0, 8, 8, 0,
536 8, 0, 0, 8, 0, 8, 8, 0, 0,
537 8, 8, 0, 8, 0, 0, 8,
538 0, 8, 8, 0, 8, 0, 0, 8, 8, 0, 0, 8, 0, 8, 8, 0,
539 8, 0, 0, 8, 0, 8, 8, 0, 0,
540 8, 8, 0, 8, 0, 0, 8,
541 8, 0, 0, 8, 0, 8, 8, 0, 0, 8, 8, 0, 8, 0, 0, 8,
542 0, 8, 8, 0, 8, 0, 0, 8, 8,
543 0, 0, 8, 0, 8, 8, 0,
544 0, 8, 8, 0, 8, 0, 0, 8, 8, 0, 0, 8, 0, 8, 8, 0,
545 8, 0, 0, 8, 0, 8, 8, 0, 0,
546 8, 8, 0, 8, 0, 0, 8,
547 8, 0, 0, 8, 0, 8, 8, 0, 0, 8, 8, 0, 8, 0, 0, 8,
548 0, 8, 8, 0, 8, 0, 0, 8, 8,
549 0, 0, 8, 0, 8, 8, 0,
550 8, 0, 0, 8, 0, 8, 8, 0, 0, 8, 8, 0, 8, 0, 0, 8,
551 0, 8, 8, 0, 8, 0, 0, 8, 8,
552 0, 0, 8, 0, 8, 8, 0,
553 4, 8, 8, 0, 8, 0, 0, 8, 8, 0, 0, 8, 0, 8, 8, 0,
554 8, 5, 0, 8, 0, 8, 8, 0, 0,
555 8, 8, 0, 8, 0, 6, 8,
556 };
557
558 u32 n, w;
559
560 n = parity[key[0]]; n <<= 4;
561 n |= parity[key[1]]; n <<= 4;
562 n |= parity[key[2]]; n <<= 4;
563 n |= parity[key[3]]; n <<= 4;
564 n |= parity[key[4]]; n <<= 4;
565 n |= parity[key[5]]; n <<= 4;
566 n |= parity[key[6]]; n <<= 4;
567 n |= parity[key[7]];
568 w = 0x88888888L;
569
570 /* 1 in 10^10 keys passes this test */
571 if (!((n - (w >> 3)) & w)) {
572 if (n < 0x41415151) {
573 if (n < 0x31312121) {
574 if (n < 0x14141515) {
575 /* 01 01 01 01 01 01 01 01 */
576 if (n == 0x11111111)
577 goto weak;
578 /* 01 1F 01 1F 01 0E 01 0E */
579 if (n == 0x13131212)
580 goto weak;
581 } else {
582 /* 01 E0 01 E0 01 F1 01 F1 */
583 if (n == 0x14141515)
584 goto weak;
585 /* 01 FE 01 FE 01 FE 01 FE */
586 if (n == 0x16161616)
587 goto weak;
588 }
589 } else {
590 if (n < 0x34342525) {
591 /* 1F 01 1F 01 0E 01 0E 01 */
592 if (n == 0x31312121)
593 goto weak;
594 /* 1F 1F 1F 1F 0E 0E 0E 0E (?) */
595 if (n == 0x33332222)
596 goto weak;
597 } else {
598 /* 1F E0 1F E0 0E F1 0E F1 */
599 if (n == 0x34342525)
600 goto weak;
601 /* 1F FE 1F FE 0E FE 0E FE */
602 if (n == 0x36362626)
603 goto weak;
604 }
605 }
606 } else {
607 if (n < 0x61616161) {
608 if (n < 0x44445555) {
609 /* E0 01 E0 01 F1 01 F1 01 */
610 if (n == 0x41415151)
611 goto weak;
612 /* E0 1F E0 1F F1 0E F1 0E */
613 if (n == 0x43435252)
614 goto weak;
615 } else {
616 /* E0 E0 E0 E0 F1 F1 F1 F1 (?) */
617 if (n == 0x44445555)
618 goto weak;
619 /* E0 FE E0 FE F1 FE F1 FE */
620 if (n == 0x46465656)
621 goto weak;
622 }
623 } else {
624 if (n < 0x64646565) {
625 /* FE 01 FE 01 FE 01 FE 01 */
626 if (n == 0x61616161)
627 goto weak;
628 /* FE 1F FE 1F FE 0E FE 0E */
629 if (n == 0x63636262)
630 goto weak;
631 } else {
632 /* FE E0 FE E0 FE F1 FE F1 */
633 if (n == 0x64646565)
634 goto weak;
635 /* FE FE FE FE FE FE FE FE */
636 if (n == 0x66666666)
637 goto weak;
638 }
639 }
640 }
641 }
642 return 0;
643 weak:
644 return 1;
645 }
646 /**
647 * sep_sg_nents
648 */
649 static u32 sep_sg_nents(struct scatterlist *sg)
650 {
651 u32 ct1 = 0;
652 while (sg) {
653 ct1 += 1;
654 sg = sg_next(sg);
655 }
656
657 return ct1;
658 }
659
660 /**
661 * sep_start_msg -
662 * @ta_ctx: pointer to struct this_task_ctx
663 * @returns: offset to place for the next word in the message
664 * Set up pointer in message pool for new message
665 */
666 static u32 sep_start_msg(struct this_task_ctx *ta_ctx)
667 {
668 u32 *word_ptr;
669 ta_ctx->msg_len_words = 2;
670 ta_ctx->msgptr = ta_ctx->msg;
671 memset(ta_ctx->msg, 0, SEP_DRIVER_MESSAGE_SHARED_AREA_SIZE_IN_BYTES);
672 ta_ctx->msgptr += sizeof(u32) * 2;
673 word_ptr = (u32 *)ta_ctx->msgptr;
674 *word_ptr = SEP_START_MSG_TOKEN;
675 return sizeof(u32) * 2;
676 }
677
678 /**
679 * sep_end_msg -
680 * @ta_ctx: pointer to struct this_task_ctx
681 * @messages_offset: current message offset
682 * Returns: 0 for success; <0 otherwise
683 * End message; set length and CRC; and
684 * send interrupt to the SEP
685 */
686 static void sep_end_msg(struct this_task_ctx *ta_ctx, u32 msg_offset)
687 {
688 u32 *word_ptr;
689 /* Msg size goes into msg after token */
690 ta_ctx->msg_len_words = msg_offset / sizeof(u32) + 1;
691 word_ptr = (u32 *)ta_ctx->msgptr;
692 word_ptr += 1;
693 *word_ptr = ta_ctx->msg_len_words;
694
695 /* CRC (currently 0) goes at end of msg */
696 word_ptr = (u32 *)(ta_ctx->msgptr + msg_offset);
697 *word_ptr = 0;
698 }
699
700 /**
701 * sep_start_inbound_msg -
702 * @ta_ctx: pointer to struct this_task_ctx
703 * @msg_offset: offset to place for the next word in the message
704 * @returns: 0 for success; error value for failure
705 * Set up pointer in message pool for inbound message
706 */
707 static u32 sep_start_inbound_msg(struct this_task_ctx *ta_ctx, u32 *msg_offset)
708 {
709 u32 *word_ptr;
710 u32 token;
711 u32 error = SEP_OK;
712
713 *msg_offset = sizeof(u32) * 2;
714 word_ptr = (u32 *)ta_ctx->msgptr;
715 token = *word_ptr;
716 ta_ctx->msg_len_words = *(word_ptr + 1);
717
718 if (token != SEP_START_MSG_TOKEN) {
719 error = SEP_INVALID_START;
720 goto end_function;
721 }
722
723 end_function:
724
725 return error;
726 }
727
728 /**
729 * sep_write_msg -
730 * @ta_ctx: pointer to struct this_task_ctx
731 * @in_addr: pointer to start of parameter
732 * @size: size of parameter to copy (in bytes)
733 * @max_size: size to move up offset; SEP mesg is in word sizes
734 * @msg_offset: pointer to current offset (is updated)
735 * @byte_array: flag ti indicate whether endian must be changed
736 * Copies data into the message area from caller
737 */
738 static void sep_write_msg(struct this_task_ctx *ta_ctx, void *in_addr,
739 u32 size, u32 max_size, u32 *msg_offset, u32 byte_array)
740 {
741 u32 *word_ptr;
742 void *void_ptr;
743 void_ptr = ta_ctx->msgptr + *msg_offset;
744 word_ptr = (u32 *)void_ptr;
745 memcpy(void_ptr, in_addr, size);
746 *msg_offset += max_size;
747
748 /* Do we need to manipulate endian? */
749 if (byte_array) {
750 u32 i;
751 for (i = 0; i < ((size + 3) / 4); i += 1)
752 *(word_ptr + i) = CHG_ENDIAN(*(word_ptr + i));
753 }
754 }
755
756 /**
757 * sep_make_header
758 * @ta_ctx: pointer to struct this_task_ctx
759 * @msg_offset: pointer to current offset (is updated)
760 * @op_code: op code to put into message
761 * Puts op code into message and updates offset
762 */
763 static void sep_make_header(struct this_task_ctx *ta_ctx, u32 *msg_offset,
764 u32 op_code)
765 {
766 u32 *word_ptr;
767
768 *msg_offset = sep_start_msg(ta_ctx);
769 word_ptr = (u32 *)(ta_ctx->msgptr + *msg_offset);
770 *word_ptr = op_code;
771 *msg_offset += sizeof(u32);
772 }
773
774
775
776 /**
777 * sep_read_msg -
778 * @ta_ctx: pointer to struct this_task_ctx
779 * @in_addr: pointer to start of parameter
780 * @size: size of parameter to copy (in bytes)
781 * @max_size: size to move up offset; SEP mesg is in word sizes
782 * @msg_offset: pointer to current offset (is updated)
783 * @byte_array: flag ti indicate whether endian must be changed
784 * Copies data out of the message area to caller
785 */
786 static void sep_read_msg(struct this_task_ctx *ta_ctx, void *in_addr,
787 u32 size, u32 max_size, u32 *msg_offset, u32 byte_array)
788 {
789 u32 *word_ptr;
790 void *void_ptr;
791 void_ptr = ta_ctx->msgptr + *msg_offset;
792 word_ptr = (u32 *)void_ptr;
793
794 /* Do we need to manipulate endian? */
795 if (byte_array) {
796 u32 i;
797 for (i = 0; i < ((size + 3) / 4); i += 1)
798 *(word_ptr + i) = CHG_ENDIAN(*(word_ptr + i));
799 }
800
801 memcpy(in_addr, void_ptr, size);
802 *msg_offset += max_size;
803 }
804
805 /**
806 * sep_verify_op -
807 * @ta_ctx: pointer to struct this_task_ctx
808 * @op_code: expected op_code
809 * @msg_offset: pointer to current offset (is updated)
810 * @returns: 0 for success; error for failure
811 */
812 static u32 sep_verify_op(struct this_task_ctx *ta_ctx, u32 op_code,
813 u32 *msg_offset)
814 {
815 u32 error;
816 u32 in_ary[2];
817
818 struct sep_device *sep = ta_ctx->sep_used;
819
820 dev_dbg(&sep->pdev->dev, "dumping return message\n");
821 error = sep_start_inbound_msg(ta_ctx, msg_offset);
822 if (error) {
823 dev_warn(&sep->pdev->dev,
824 "sep_start_inbound_msg error\n");
825 return error;
826 }
827
828 sep_read_msg(ta_ctx, in_ary, sizeof(u32) * 2, sizeof(u32) * 2,
829 msg_offset, 0);
830
831 if (in_ary[0] != op_code) {
832 dev_warn(&sep->pdev->dev,
833 "sep got back wrong opcode\n");
834 dev_warn(&sep->pdev->dev,
835 "got back %x; expected %x\n",
836 in_ary[0], op_code);
837 return SEP_WRONG_OPCODE;
838 }
839
840 if (in_ary[1] != SEP_OK) {
841 dev_warn(&sep->pdev->dev,
842 "sep execution error\n");
843 dev_warn(&sep->pdev->dev,
844 "got back %x; expected %x\n",
845 in_ary[1], SEP_OK);
846 return in_ary[0];
847 }
848
849 return 0;
850 }
851
852 /**
853 * sep_read_context -
854 * @ta_ctx: pointer to struct this_task_ctx
855 * @msg_offset: point to current place in SEP msg; is updated
856 * @dst: pointer to place to put the context
857 * @len: size of the context structure (differs for crypro/hash)
858 * This function reads the context from the msg area
859 * There is a special way the vendor needs to have the maximum
860 * length calculated so that the msg_offset is updated properly;
861 * it skips over some words in the msg area depending on the size
862 * of the context
863 */
864 static void sep_read_context(struct this_task_ctx *ta_ctx, u32 *msg_offset,
865 void *dst, u32 len)
866 {
867 u32 max_length = ((len + 3) / sizeof(u32)) * sizeof(u32);
868 sep_read_msg(ta_ctx, dst, len, max_length, msg_offset, 0);
869 }
870
871 /**
872 * sep_write_context -
873 * @ta_ctx: pointer to struct this_task_ctx
874 * @msg_offset: point to current place in SEP msg; is updated
875 * @src: pointer to the current context
876 * @len: size of the context structure (differs for crypro/hash)
877 * This function writes the context to the msg area
878 * There is a special way the vendor needs to have the maximum
879 * length calculated so that the msg_offset is updated properly;
880 * it skips over some words in the msg area depending on the size
881 * of the context
882 */
883 static void sep_write_context(struct this_task_ctx *ta_ctx, u32 *msg_offset,
884 void *src, u32 len)
885 {
886 u32 max_length = ((len + 3) / sizeof(u32)) * sizeof(u32);
887 sep_write_msg(ta_ctx, src, len, max_length, msg_offset, 0);
888 }
889
890 /**
891 * sep_clear_out -
892 * @ta_ctx: pointer to struct this_task_ctx
893 * Clear out crypto related values in sep device structure
894 * to enable device to be used by anyone; either kernel
895 * crypto or userspace app via middleware
896 */
897 static void sep_clear_out(struct this_task_ctx *ta_ctx)
898 {
899 if (ta_ctx->src_sg_hold) {
900 sep_free_sg_buf(ta_ctx->src_sg_hold);
901 ta_ctx->src_sg_hold = NULL;
902 }
903
904 if (ta_ctx->dst_sg_hold) {
905 sep_free_sg_buf(ta_ctx->dst_sg_hold);
906 ta_ctx->dst_sg_hold = NULL;
907 }
908
909 ta_ctx->src_sg = NULL;
910 ta_ctx->dst_sg = NULL;
911
912 sep_free_dma_table_data_handler(ta_ctx->sep_used, &ta_ctx->dma_ctx);
913
914 if (ta_ctx->i_own_sep) {
915 /**
916 * The following unlocks the sep and makes it available
917 * to any other application
918 * First, null out crypto entries in sep before releasing it
919 */
920 ta_ctx->sep_used->current_hash_req = NULL;
921 ta_ctx->sep_used->current_cypher_req = NULL;
922 ta_ctx->sep_used->current_request = 0;
923 ta_ctx->sep_used->current_hash_stage = 0;
924 ta_ctx->sep_used->ta_ctx = NULL;
925 ta_ctx->sep_used->in_kernel = 0;
926
927 ta_ctx->call_status.status = 0;
928
929 /* Remove anything confidential */
930 memset(ta_ctx->sep_used->shared_addr, 0,
931 SEP_DRIVER_MESSAGE_SHARED_AREA_SIZE_IN_BYTES);
932
933 sep_queue_status_remove(ta_ctx->sep_used, &ta_ctx->queue_elem);
934
935 #ifdef SEP_ENABLE_RUNTIME_PM
936 ta_ctx->sep_used->in_use = 0;
937 pm_runtime_mark_last_busy(&ta_ctx->sep_used->pdev->dev);
938 pm_runtime_put_autosuspend(&ta_ctx->sep_used->pdev->dev);
939 #endif
940
941 clear_bit(SEP_WORKING_LOCK_BIT,
942 &ta_ctx->sep_used->in_use_flags);
943 ta_ctx->sep_used->pid_doing_transaction = 0;
944
945 dev_dbg(&ta_ctx->sep_used->pdev->dev,
946 "[PID%d] waking up next transaction\n",
947 current->pid);
948
949 clear_bit(SEP_TRANSACTION_STARTED_LOCK_BIT,
950 &ta_ctx->sep_used->in_use_flags);
951 wake_up(&ta_ctx->sep_used->event_transactions);
952
953 ta_ctx->i_own_sep = 0;
954 }
955 }
956
957 /**
958 * Release crypto infrastructure from EINPROGRESS and
959 * clear sep_dev so that SEP is available to anyone
960 */
961 static void sep_crypto_release(struct sep_system_ctx *sctx,
962 struct this_task_ctx *ta_ctx, u32 error)
963 {
964 struct ahash_request *hash_req = ta_ctx->current_hash_req;
965 struct ablkcipher_request *cypher_req =
966 ta_ctx->current_cypher_req;
967 struct sep_device *sep = ta_ctx->sep_used;
968
969 sep_clear_out(ta_ctx);
970
971 /**
972 * This may not yet exist depending when we
973 * chose to bail out. If it does exist, set
974 * it to 1
975 */
976 if (ta_ctx->are_we_done_yet != NULL)
977 *ta_ctx->are_we_done_yet = 1;
978
979 if (cypher_req != NULL) {
980 if ((sctx->key_sent == 1) ||
981 ((error != 0) && (error != -EINPROGRESS))) {
982 if (cypher_req->base.complete == NULL) {
983 dev_dbg(&sep->pdev->dev,
984 "release is null for cypher!");
985 } else {
986 cypher_req->base.complete(
987 &cypher_req->base, error);
988 }
989 }
990 }
991
992 if (hash_req != NULL) {
993 if (hash_req->base.complete == NULL) {
994 dev_dbg(&sep->pdev->dev,
995 "release is null for hash!");
996 } else {
997 hash_req->base.complete(
998 &hash_req->base, error);
999 }
1000 }
1001 }
1002
1003 /**
1004 * This is where we grab the sep itself and tell it to do something.
1005 * It will sleep if the sep is currently busy
1006 * and it will return 0 if sep is now ours; error value if there
1007 * were problems
1008 */
1009 static int sep_crypto_take_sep(struct this_task_ctx *ta_ctx)
1010 {
1011 struct sep_device *sep = ta_ctx->sep_used;
1012 int result;
1013 struct sep_msgarea_hdr *my_msg_header;
1014
1015 my_msg_header = (struct sep_msgarea_hdr *)ta_ctx->msg;
1016
1017 /* add to status queue */
1018 ta_ctx->queue_elem = sep_queue_status_add(sep, my_msg_header->opcode,
1019 ta_ctx->nbytes, current->pid,
1020 current->comm, sizeof(current->comm));
1021
1022 if (!ta_ctx->queue_elem) {
1023 dev_dbg(&sep->pdev->dev,
1024 "[PID%d] updating queue status error\n", current->pid);
1025 return -EINVAL;
1026 }
1027
1028 /* get the device; this can sleep */
1029 result = sep_wait_transaction(sep);
1030 if (result)
1031 return result;
1032
1033 if (sep_dev->power_save_setup == 1)
1034 pm_runtime_get_sync(&sep_dev->pdev->dev);
1035
1036 /* Copy in the message */
1037 memcpy(sep->shared_addr, ta_ctx->msg,
1038 SEP_DRIVER_MESSAGE_SHARED_AREA_SIZE_IN_BYTES);
1039
1040 /* Copy in the dcb information if there is any */
1041 if (ta_ctx->dcb_region) {
1042 result = sep_activate_dcb_dmatables_context(sep,
1043 &ta_ctx->dcb_region, &ta_ctx->dmatables_region,
1044 ta_ctx->dma_ctx);
1045 if (result)
1046 return result;
1047 }
1048
1049 /* Mark the device so we know how to finish the job in the tasklet */
1050 if (ta_ctx->current_hash_req)
1051 sep->current_hash_req = ta_ctx->current_hash_req;
1052 else
1053 sep->current_cypher_req = ta_ctx->current_cypher_req;
1054
1055 sep->current_request = ta_ctx->current_request;
1056 sep->current_hash_stage = ta_ctx->current_hash_stage;
1057 sep->ta_ctx = ta_ctx;
1058 sep->in_kernel = 1;
1059 ta_ctx->i_own_sep = 1;
1060
1061 /* need to set bit first to avoid race condition with interrupt */
1062 set_bit(SEP_LEGACY_SENDMSG_DONE_OFFSET, &ta_ctx->call_status.status);
1063
1064 result = sep_send_command_handler(sep);
1065
1066 dev_dbg(&sep->pdev->dev, "[PID%d]: sending command to the sep\n",
1067 current->pid);
1068
1069 if (!result)
1070 dev_dbg(&sep->pdev->dev, "[PID%d]: command sent okay\n",
1071 current->pid);
1072 else {
1073 dev_dbg(&sep->pdev->dev, "[PID%d]: cant send command\n",
1074 current->pid);
1075 clear_bit(SEP_LEGACY_SENDMSG_DONE_OFFSET,
1076 &ta_ctx->call_status.status);
1077 }
1078
1079 return result;
1080 }
1081
1082 /**
1083 * This function sets things up for a crypto data block process
1084 * This does all preparation, but does not try to grab the
1085 * sep
1086 * @req: pointer to struct ablkcipher_request
1087 * returns: 0 if all went well, non zero if error
1088 */
1089 static int sep_crypto_block_data(struct ablkcipher_request *req)
1090 {
1091
1092 int int_error;
1093 u32 msg_offset;
1094 static u32 msg[10];
1095 void *src_ptr;
1096 void *dst_ptr;
1097
1098 static char small_buf[100];
1099 ssize_t copy_result;
1100 int result;
1101
1102 struct scatterlist *new_sg;
1103 struct this_task_ctx *ta_ctx;
1104 struct crypto_ablkcipher *tfm;
1105 struct sep_system_ctx *sctx;
1106
1107 struct sep_des_internal_context *des_internal;
1108 struct sep_aes_internal_context *aes_internal;
1109
1110 ta_ctx = ablkcipher_request_ctx(req);
1111 tfm = crypto_ablkcipher_reqtfm(req);
1112 sctx = crypto_ablkcipher_ctx(tfm);
1113
1114 /* start the walk on scatterlists */
1115 ablkcipher_walk_init(&ta_ctx->walk, req->src, req->dst, req->nbytes);
1116 dev_dbg(&ta_ctx->sep_used->pdev->dev, "sep crypto block data size of %x\n",
1117 req->nbytes);
1118
1119 int_error = ablkcipher_walk_phys(req, &ta_ctx->walk);
1120 if (int_error) {
1121 dev_warn(&ta_ctx->sep_used->pdev->dev, "walk phys error %x\n",
1122 int_error);
1123 return -ENOMEM;
1124 }
1125
1126 dev_dbg(&ta_ctx->sep_used->pdev->dev,
1127 "crypto block: src is %lx dst is %lx\n",
1128 (unsigned long)req->src, (unsigned long)req->dst);
1129
1130 /* Make sure all pages are even block */
1131 int_error = sep_oddball_pages(ta_ctx->sep_used, req->src,
1132 req->nbytes, ta_ctx->walk.blocksize, &new_sg, 1);
1133
1134 if (int_error < 0) {
1135 dev_warn(&ta_ctx->sep_used->pdev->dev, "oddball page error\n");
1136 return int_error;
1137 } else if (int_error == 1) {
1138 ta_ctx->src_sg = new_sg;
1139 ta_ctx->src_sg_hold = new_sg;
1140 } else {
1141 ta_ctx->src_sg = req->src;
1142 ta_ctx->src_sg_hold = NULL;
1143 }
1144
1145 int_error = sep_oddball_pages(ta_ctx->sep_used, req->dst,
1146 req->nbytes, ta_ctx->walk.blocksize, &new_sg, 0);
1147
1148 if (int_error < 0) {
1149 dev_warn(&ta_ctx->sep_used->pdev->dev, "walk phys error %x\n",
1150 int_error);
1151 return int_error;
1152 } else if (int_error == 1) {
1153 ta_ctx->dst_sg = new_sg;
1154 ta_ctx->dst_sg_hold = new_sg;
1155 } else {
1156 ta_ctx->dst_sg = req->dst;
1157 ta_ctx->dst_sg_hold = NULL;
1158 }
1159
1160 /* set nbytes for queue status */
1161 ta_ctx->nbytes = req->nbytes;
1162
1163 /* Key already done; this is for data */
1164 dev_dbg(&ta_ctx->sep_used->pdev->dev, "sending data\n");
1165
1166 /* check for valid data and proper spacing */
1167 src_ptr = sg_virt(ta_ctx->src_sg);
1168 dst_ptr = sg_virt(ta_ctx->dst_sg);
1169
1170 if (!src_ptr || !dst_ptr ||
1171 (ta_ctx->current_cypher_req->nbytes %
1172 crypto_ablkcipher_blocksize(tfm))) {
1173
1174 dev_warn(&ta_ctx->sep_used->pdev->dev,
1175 "cipher block size odd\n");
1176 dev_warn(&ta_ctx->sep_used->pdev->dev,
1177 "cipher block size is %x\n",
1178 crypto_ablkcipher_blocksize(tfm));
1179 dev_warn(&ta_ctx->sep_used->pdev->dev,
1180 "cipher data size is %x\n",
1181 ta_ctx->current_cypher_req->nbytes);
1182 return -EINVAL;
1183 }
1184
1185 if (partial_overlap(src_ptr, dst_ptr,
1186 ta_ctx->current_cypher_req->nbytes)) {
1187 dev_warn(&ta_ctx->sep_used->pdev->dev,
1188 "block partial overlap\n");
1189 return -EINVAL;
1190 }
1191
1192 /* Put together the message */
1193 sep_make_header(ta_ctx, &msg_offset, ta_ctx->block_opcode);
1194
1195 /* If des, and size is 1 block, put directly in msg */
1196 if ((ta_ctx->block_opcode == SEP_DES_BLOCK_OPCODE) &&
1197 (req->nbytes == crypto_ablkcipher_blocksize(tfm))) {
1198
1199 dev_dbg(&ta_ctx->sep_used->pdev->dev,
1200 "writing out one block des\n");
1201
1202 copy_result = sg_copy_to_buffer(
1203 ta_ctx->src_sg, sep_sg_nents(ta_ctx->src_sg),
1204 small_buf, crypto_ablkcipher_blocksize(tfm));
1205
1206 if (copy_result != crypto_ablkcipher_blocksize(tfm)) {
1207 dev_warn(&ta_ctx->sep_used->pdev->dev,
1208 "des block copy failed\n");
1209 return -ENOMEM;
1210 }
1211
1212 /* Put data into message */
1213 sep_write_msg(ta_ctx, small_buf,
1214 crypto_ablkcipher_blocksize(tfm),
1215 crypto_ablkcipher_blocksize(tfm) * 2,
1216 &msg_offset, 1);
1217
1218 /* Put size into message */
1219 sep_write_msg(ta_ctx, &req->nbytes,
1220 sizeof(u32), sizeof(u32), &msg_offset, 0);
1221 } else {
1222 /* Otherwise, fill out dma tables */
1223 ta_ctx->dcb_input_data.app_in_address = src_ptr;
1224 ta_ctx->dcb_input_data.data_in_size = req->nbytes;
1225 ta_ctx->dcb_input_data.app_out_address = dst_ptr;
1226 ta_ctx->dcb_input_data.block_size =
1227 crypto_ablkcipher_blocksize(tfm);
1228 ta_ctx->dcb_input_data.tail_block_size = 0;
1229 ta_ctx->dcb_input_data.is_applet = 0;
1230 ta_ctx->dcb_input_data.src_sg = ta_ctx->src_sg;
1231 ta_ctx->dcb_input_data.dst_sg = ta_ctx->dst_sg;
1232
1233 result = sep_create_dcb_dmatables_context_kernel(
1234 ta_ctx->sep_used,
1235 &ta_ctx->dcb_region,
1236 &ta_ctx->dmatables_region,
1237 &ta_ctx->dma_ctx,
1238 &ta_ctx->dcb_input_data,
1239 1);
1240 if (result) {
1241 dev_warn(&ta_ctx->sep_used->pdev->dev,
1242 "crypto dma table create failed\n");
1243 return -EINVAL;
1244 }
1245
1246 /* Portion of msg is nulled (no data) */
1247 msg[0] = (u32)0;
1248 msg[1] = (u32)0;
1249 msg[2] = (u32)0;
1250 msg[3] = (u32)0;
1251 msg[4] = (u32)0;
1252 sep_write_msg(ta_ctx, (void *)msg, sizeof(u32) * 5,
1253 sizeof(u32) * 5, &msg_offset, 0);
1254 }
1255
1256 /**
1257 * Before we write the message, we need to overwrite the
1258 * vendor's IV with the one from our own ablkcipher walk
1259 * iv because this is needed for dm-crypt
1260 */
1261 sep_dump_ivs(req, "sending data block to sep\n");
1262 if ((ta_ctx->current_request == DES_CBC) &&
1263 (ta_ctx->des_opmode == SEP_DES_CBC)) {
1264
1265 dev_dbg(&ta_ctx->sep_used->pdev->dev,
1266 "overwrite vendor iv on DES\n");
1267 des_internal = (struct sep_des_internal_context *)
1268 sctx->des_private_ctx.ctx_buf;
1269 memcpy((void *)des_internal->iv_context,
1270 ta_ctx->walk.iv, crypto_ablkcipher_ivsize(tfm));
1271 } else if ((ta_ctx->current_request == AES_CBC) &&
1272 (ta_ctx->aes_opmode == SEP_AES_CBC)) {
1273
1274 dev_dbg(&ta_ctx->sep_used->pdev->dev,
1275 "overwrite vendor iv on AES\n");
1276 aes_internal = (struct sep_aes_internal_context *)
1277 sctx->aes_private_ctx.cbuff;
1278 memcpy((void *)aes_internal->aes_ctx_iv,
1279 ta_ctx->walk.iv, crypto_ablkcipher_ivsize(tfm));
1280 }
1281
1282 /* Write context into message */
1283 if (ta_ctx->block_opcode == SEP_DES_BLOCK_OPCODE) {
1284 sep_write_context(ta_ctx, &msg_offset,
1285 &sctx->des_private_ctx,
1286 sizeof(struct sep_des_private_context));
1287 } else {
1288 sep_write_context(ta_ctx, &msg_offset,
1289 &sctx->aes_private_ctx,
1290 sizeof(struct sep_aes_private_context));
1291 }
1292
1293 /* conclude message */
1294 sep_end_msg(ta_ctx, msg_offset);
1295
1296 /* Parent (caller) is now ready to tell the sep to do ahead */
1297 return 0;
1298 }
1299
1300
1301 /**
1302 * This function sets things up for a crypto key submit process
1303 * This does all preparation, but does not try to grab the
1304 * sep
1305 * @req: pointer to struct ablkcipher_request
1306 * returns: 0 if all went well, non zero if error
1307 */
1308 static int sep_crypto_send_key(struct ablkcipher_request *req)
1309 {
1310
1311 int int_error;
1312 u32 msg_offset;
1313 static u32 msg[10];
1314
1315 u32 max_length;
1316 struct this_task_ctx *ta_ctx;
1317 struct crypto_ablkcipher *tfm;
1318 struct sep_system_ctx *sctx;
1319
1320 ta_ctx = ablkcipher_request_ctx(req);
1321 tfm = crypto_ablkcipher_reqtfm(req);
1322 sctx = crypto_ablkcipher_ctx(tfm);
1323
1324 dev_dbg(&ta_ctx->sep_used->pdev->dev, "sending key\n");
1325
1326 /* start the walk on scatterlists */
1327 ablkcipher_walk_init(&ta_ctx->walk, req->src, req->dst, req->nbytes);
1328 dev_dbg(&ta_ctx->sep_used->pdev->dev,
1329 "sep crypto block data size of %x\n", req->nbytes);
1330
1331 int_error = ablkcipher_walk_phys(req, &ta_ctx->walk);
1332 if (int_error) {
1333 dev_warn(&ta_ctx->sep_used->pdev->dev, "walk phys error %x\n",
1334 int_error);
1335 return -ENOMEM;
1336 }
1337
1338 /* check iv */
1339 if ((ta_ctx->current_request == DES_CBC) &&
1340 (ta_ctx->des_opmode == SEP_DES_CBC)) {
1341 if (!ta_ctx->walk.iv) {
1342 dev_warn(&ta_ctx->sep_used->pdev->dev, "no iv found\n");
1343 return -EINVAL;
1344 }
1345
1346 memcpy(ta_ctx->iv, ta_ctx->walk.iv, SEP_DES_IV_SIZE_BYTES);
1347 }
1348
1349 if ((ta_ctx->current_request == AES_CBC) &&
1350 (ta_ctx->aes_opmode == SEP_AES_CBC)) {
1351 if (!ta_ctx->walk.iv) {
1352 dev_warn(&ta_ctx->sep_used->pdev->dev, "no iv found\n");
1353 return -EINVAL;
1354 }
1355
1356 memcpy(ta_ctx->iv, ta_ctx->walk.iv, SEP_AES_IV_SIZE_BYTES);
1357 }
1358
1359 /* put together message to SEP */
1360 /* Start with op code */
1361 sep_make_header(ta_ctx, &msg_offset, ta_ctx->init_opcode);
1362
1363 /* now deal with IV */
1364 if (ta_ctx->init_opcode == SEP_DES_INIT_OPCODE) {
1365 if (ta_ctx->des_opmode == SEP_DES_CBC) {
1366 sep_write_msg(ta_ctx, ta_ctx->iv,
1367 SEP_DES_IV_SIZE_BYTES, sizeof(u32) * 4,
1368 &msg_offset, 1);
1369 } else {
1370 /* Skip if ECB */
1371 msg_offset += 4 * sizeof(u32);
1372 }
1373 } else {
1374 max_length = ((SEP_AES_IV_SIZE_BYTES + 3) /
1375 sizeof(u32)) * sizeof(u32);
1376 if (ta_ctx->aes_opmode == SEP_AES_CBC) {
1377 sep_write_msg(ta_ctx, ta_ctx->iv,
1378 SEP_AES_IV_SIZE_BYTES, max_length,
1379 &msg_offset, 1);
1380 } else {
1381 /* Skip if ECB */
1382 msg_offset += max_length;
1383 }
1384 }
1385
1386 /* load the key */
1387 if (ta_ctx->init_opcode == SEP_DES_INIT_OPCODE) {
1388 sep_write_msg(ta_ctx, (void *)&sctx->key.des.key1,
1389 sizeof(u32) * 8, sizeof(u32) * 8,
1390 &msg_offset, 1);
1391
1392 msg[0] = (u32)sctx->des_nbr_keys;
1393 msg[1] = (u32)ta_ctx->des_encmode;
1394 msg[2] = (u32)ta_ctx->des_opmode;
1395
1396 sep_write_msg(ta_ctx, (void *)msg,
1397 sizeof(u32) * 3, sizeof(u32) * 3,
1398 &msg_offset, 0);
1399 } else {
1400 sep_write_msg(ta_ctx, (void *)&sctx->key.aes,
1401 sctx->keylen,
1402 SEP_AES_MAX_KEY_SIZE_BYTES,
1403 &msg_offset, 1);
1404
1405 msg[0] = (u32)sctx->aes_key_size;
1406 msg[1] = (u32)ta_ctx->aes_encmode;
1407 msg[2] = (u32)ta_ctx->aes_opmode;
1408 msg[3] = (u32)0; /* Secret key is not used */
1409 sep_write_msg(ta_ctx, (void *)msg,
1410 sizeof(u32) * 4, sizeof(u32) * 4,
1411 &msg_offset, 0);
1412 }
1413
1414 /* conclude message */
1415 sep_end_msg(ta_ctx, msg_offset);
1416
1417 /* Parent (caller) is now ready to tell the sep to do ahead */
1418 return 0;
1419 }
1420
1421
1422 /* This needs to be run as a work queue as it can be put asleep */
1423 static void sep_crypto_block(void *data)
1424 {
1425 unsigned long end_time;
1426
1427 int result;
1428
1429 struct ablkcipher_request *req;
1430 struct this_task_ctx *ta_ctx;
1431 struct crypto_ablkcipher *tfm;
1432 struct sep_system_ctx *sctx;
1433 int are_we_done_yet;
1434
1435 req = (struct ablkcipher_request *)data;
1436 ta_ctx = ablkcipher_request_ctx(req);
1437 tfm = crypto_ablkcipher_reqtfm(req);
1438 sctx = crypto_ablkcipher_ctx(tfm);
1439
1440 ta_ctx->are_we_done_yet = &are_we_done_yet;
1441
1442 pr_debug("sep_crypto_block\n");
1443 pr_debug("tfm is %p sctx is %p ta_ctx is %p\n",
1444 tfm, sctx, ta_ctx);
1445 pr_debug("key_sent is %d\n", sctx->key_sent);
1446
1447 /* do we need to send the key */
1448 if (sctx->key_sent == 0) {
1449 are_we_done_yet = 0;
1450 result = sep_crypto_send_key(req); /* prep to send key */
1451 if (result != 0) {
1452 dev_dbg(&ta_ctx->sep_used->pdev->dev,
1453 "could not prep key %x\n", result);
1454 sep_crypto_release(sctx, ta_ctx, result);
1455 return;
1456 }
1457
1458 result = sep_crypto_take_sep(ta_ctx);
1459 if (result) {
1460 dev_warn(&ta_ctx->sep_used->pdev->dev,
1461 "sep_crypto_take_sep for key send failed\n");
1462 sep_crypto_release(sctx, ta_ctx, result);
1463 return;
1464 }
1465
1466 /* now we sit and wait up to a fixed time for completion */
1467 end_time = jiffies + (WAIT_TIME * HZ);
1468 while ((time_before(jiffies, end_time)) &&
1469 (are_we_done_yet == 0))
1470 schedule();
1471
1472 /* Done waiting; still not done yet? */
1473 if (are_we_done_yet == 0) {
1474 dev_dbg(&ta_ctx->sep_used->pdev->dev,
1475 "Send key job never got done\n");
1476 sep_crypto_release(sctx, ta_ctx, -EINVAL);
1477 return;
1478 }
1479
1480 /* Set the key sent variable so this can be skipped later */
1481 sctx->key_sent = 1;
1482 }
1483
1484 /* Key sent (or maybe not if we did not have to), now send block */
1485 are_we_done_yet = 0;
1486
1487 result = sep_crypto_block_data(req);
1488
1489 if (result != 0) {
1490 dev_dbg(&ta_ctx->sep_used->pdev->dev,
1491 "could prep not send block %x\n", result);
1492 sep_crypto_release(sctx, ta_ctx, result);
1493 return;
1494 }
1495
1496 result = sep_crypto_take_sep(ta_ctx);
1497 if (result) {
1498 dev_warn(&ta_ctx->sep_used->pdev->dev,
1499 "sep_crypto_take_sep for block send failed\n");
1500 sep_crypto_release(sctx, ta_ctx, result);
1501 return;
1502 }
1503
1504 /* now we sit and wait up to a fixed time for completion */
1505 end_time = jiffies + (WAIT_TIME * HZ);
1506 while ((time_before(jiffies, end_time)) && (are_we_done_yet == 0))
1507 schedule();
1508
1509 /* Done waiting; still not done yet? */
1510 if (are_we_done_yet == 0) {
1511 dev_dbg(&ta_ctx->sep_used->pdev->dev,
1512 "Send block job never got done\n");
1513 sep_crypto_release(sctx, ta_ctx, -EINVAL);
1514 return;
1515 }
1516
1517 /* That's it; entire thing done, get out of queue */
1518
1519 pr_debug("crypto_block leaving\n");
1520 pr_debug("tfm is %p sctx is %p ta_ctx is %p\n", tfm, sctx, ta_ctx);
1521 }
1522
1523 /**
1524 * Post operation (after interrupt) for crypto block
1525 */
1526 static u32 crypto_post_op(struct sep_device *sep)
1527 {
1528 /* HERE */
1529 u32 u32_error;
1530 u32 msg_offset;
1531
1532 ssize_t copy_result;
1533 static char small_buf[100];
1534
1535 struct ablkcipher_request *req;
1536 struct this_task_ctx *ta_ctx;
1537 struct sep_system_ctx *sctx;
1538 struct crypto_ablkcipher *tfm;
1539
1540 struct sep_des_internal_context *des_internal;
1541 struct sep_aes_internal_context *aes_internal;
1542
1543 if (!sep->current_cypher_req)
1544 return -EINVAL;
1545
1546 /* hold req since we need to submit work after clearing sep */
1547 req = sep->current_cypher_req;
1548
1549 ta_ctx = ablkcipher_request_ctx(sep->current_cypher_req);
1550 tfm = crypto_ablkcipher_reqtfm(sep->current_cypher_req);
1551 sctx = crypto_ablkcipher_ctx(tfm);
1552
1553 pr_debug("crypto_post op\n");
1554 pr_debug("key_sent is %d tfm is %p sctx is %p ta_ctx is %p\n",
1555 sctx->key_sent, tfm, sctx, ta_ctx);
1556
1557 dev_dbg(&ta_ctx->sep_used->pdev->dev, "crypto post_op\n");
1558 dev_dbg(&ta_ctx->sep_used->pdev->dev, "crypto post_op message dump\n");
1559
1560 /* first bring msg from shared area to local area */
1561 memcpy(ta_ctx->msg, sep->shared_addr,
1562 SEP_DRIVER_MESSAGE_SHARED_AREA_SIZE_IN_BYTES);
1563
1564 /* Is this the result of performing init (key to SEP */
1565 if (sctx->key_sent == 0) {
1566
1567 /* Did SEP do it okay */
1568 u32_error = sep_verify_op(ta_ctx, ta_ctx->init_opcode,
1569 &msg_offset);
1570 if (u32_error) {
1571 dev_warn(&ta_ctx->sep_used->pdev->dev,
1572 "aes init error %x\n", u32_error);
1573 sep_crypto_release(sctx, ta_ctx, u32_error);
1574 return u32_error;
1575 }
1576
1577 /* Read Context */
1578 if (ta_ctx->init_opcode == SEP_DES_INIT_OPCODE) {
1579 sep_read_context(ta_ctx, &msg_offset,
1580 &sctx->des_private_ctx,
1581 sizeof(struct sep_des_private_context));
1582 } else {
1583 sep_read_context(ta_ctx, &msg_offset,
1584 &sctx->aes_private_ctx,
1585 sizeof(struct sep_aes_private_context));
1586 }
1587
1588 sep_dump_ivs(req, "after sending key to sep\n");
1589
1590 /* key sent went okay; release sep, and set are_we_done_yet */
1591 sctx->key_sent = 1;
1592 sep_crypto_release(sctx, ta_ctx, -EINPROGRESS);
1593
1594 } else {
1595
1596 /**
1597 * This is the result of a block request
1598 */
1599 dev_dbg(&ta_ctx->sep_used->pdev->dev,
1600 "crypto_post_op block response\n");
1601
1602 u32_error = sep_verify_op(ta_ctx, ta_ctx->block_opcode,
1603 &msg_offset);
1604
1605 if (u32_error) {
1606 dev_warn(&ta_ctx->sep_used->pdev->dev,
1607 "sep block error %x\n", u32_error);
1608 sep_crypto_release(sctx, ta_ctx, u32_error);
1609 return -EINVAL;
1610 }
1611
1612 if (ta_ctx->block_opcode == SEP_DES_BLOCK_OPCODE) {
1613
1614 dev_dbg(&ta_ctx->sep_used->pdev->dev,
1615 "post op for DES\n");
1616
1617 /* special case for 1 block des */
1618 if (sep->current_cypher_req->nbytes ==
1619 crypto_ablkcipher_blocksize(tfm)) {
1620
1621 sep_read_msg(ta_ctx, small_buf,
1622 crypto_ablkcipher_blocksize(tfm),
1623 crypto_ablkcipher_blocksize(tfm) * 2,
1624 &msg_offset, 1);
1625
1626 dev_dbg(&ta_ctx->sep_used->pdev->dev,
1627 "reading in block des\n");
1628
1629 copy_result = sg_copy_from_buffer(
1630 ta_ctx->dst_sg,
1631 sep_sg_nents(ta_ctx->dst_sg),
1632 small_buf,
1633 crypto_ablkcipher_blocksize(tfm));
1634
1635 if (copy_result !=
1636 crypto_ablkcipher_blocksize(tfm)) {
1637
1638 dev_warn(&ta_ctx->sep_used->pdev->dev,
1639 "des block copy failed\n");
1640 sep_crypto_release(sctx, ta_ctx,
1641 -ENOMEM);
1642 return -ENOMEM;
1643 }
1644 }
1645
1646 /* Read Context */
1647 sep_read_context(ta_ctx, &msg_offset,
1648 &sctx->des_private_ctx,
1649 sizeof(struct sep_des_private_context));
1650 } else {
1651
1652 dev_dbg(&ta_ctx->sep_used->pdev->dev,
1653 "post op for AES\n");
1654
1655 /* Skip the MAC Output */
1656 msg_offset += (sizeof(u32) * 4);
1657
1658 /* Read Context */
1659 sep_read_context(ta_ctx, &msg_offset,
1660 &sctx->aes_private_ctx,
1661 sizeof(struct sep_aes_private_context));
1662 }
1663
1664 /* Copy to correct sg if this block had oddball pages */
1665 if (ta_ctx->dst_sg_hold)
1666 sep_copy_sg(ta_ctx->sep_used,
1667 ta_ctx->dst_sg,
1668 ta_ctx->current_cypher_req->dst,
1669 ta_ctx->current_cypher_req->nbytes);
1670
1671 /**
1672 * Copy the iv's back to the walk.iv
1673 * This is required for dm_crypt
1674 */
1675 sep_dump_ivs(req, "got data block from sep\n");
1676 if ((ta_ctx->current_request == DES_CBC) &&
1677 (ta_ctx->des_opmode == SEP_DES_CBC)) {
1678
1679 dev_dbg(&ta_ctx->sep_used->pdev->dev,
1680 "returning result iv to walk on DES\n");
1681 des_internal = (struct sep_des_internal_context *)
1682 sctx->des_private_ctx.ctx_buf;
1683 memcpy(ta_ctx->walk.iv,
1684 (void *)des_internal->iv_context,
1685 crypto_ablkcipher_ivsize(tfm));
1686 } else if ((ta_ctx->current_request == AES_CBC) &&
1687 (ta_ctx->aes_opmode == SEP_AES_CBC)) {
1688
1689 dev_dbg(&ta_ctx->sep_used->pdev->dev,
1690 "returning result iv to walk on AES\n");
1691 aes_internal = (struct sep_aes_internal_context *)
1692 sctx->aes_private_ctx.cbuff;
1693 memcpy(ta_ctx->walk.iv,
1694 (void *)aes_internal->aes_ctx_iv,
1695 crypto_ablkcipher_ivsize(tfm));
1696 }
1697
1698 /* finished, release everything */
1699 sep_crypto_release(sctx, ta_ctx, 0);
1700 }
1701 pr_debug("crypto_post_op done\n");
1702 pr_debug("key_sent is %d tfm is %p sctx is %p ta_ctx is %p\n",
1703 sctx->key_sent, tfm, sctx, ta_ctx);
1704
1705 return 0;
1706 }
1707
1708 static u32 hash_init_post_op(struct sep_device *sep)
1709 {
1710 u32 u32_error;
1711 u32 msg_offset;
1712 struct crypto_ahash *tfm = crypto_ahash_reqtfm(sep->current_hash_req);
1713 struct this_task_ctx *ta_ctx = ahash_request_ctx(sep->current_hash_req);
1714 struct sep_system_ctx *sctx = crypto_ahash_ctx(tfm);
1715 dev_dbg(&ta_ctx->sep_used->pdev->dev,
1716 "hash init post op\n");
1717
1718 /* first bring msg from shared area to local area */
1719 memcpy(ta_ctx->msg, sep->shared_addr,
1720 SEP_DRIVER_MESSAGE_SHARED_AREA_SIZE_IN_BYTES);
1721
1722 u32_error = sep_verify_op(ta_ctx, SEP_HASH_INIT_OPCODE,
1723 &msg_offset);
1724
1725 if (u32_error) {
1726 dev_warn(&ta_ctx->sep_used->pdev->dev, "hash init error %x\n",
1727 u32_error);
1728 sep_crypto_release(sctx, ta_ctx, u32_error);
1729 return u32_error;
1730 }
1731
1732 /* Read Context */
1733 sep_read_context(ta_ctx, &msg_offset,
1734 &sctx->hash_private_ctx,
1735 sizeof(struct sep_hash_private_context));
1736
1737 /* Signal to crypto infrastructure and clear out */
1738 dev_dbg(&ta_ctx->sep_used->pdev->dev, "hash init post op done\n");
1739 sep_crypto_release(sctx, ta_ctx, 0);
1740 return 0;
1741 }
1742
1743 static u32 hash_update_post_op(struct sep_device *sep)
1744 {
1745 u32 u32_error;
1746 u32 msg_offset;
1747 struct crypto_ahash *tfm = crypto_ahash_reqtfm(sep->current_hash_req);
1748 struct this_task_ctx *ta_ctx = ahash_request_ctx(sep->current_hash_req);
1749 struct sep_system_ctx *sctx = crypto_ahash_ctx(tfm);
1750 dev_dbg(&ta_ctx->sep_used->pdev->dev,
1751 "hash update post op\n");
1752
1753 /* first bring msg from shared area to local area */
1754 memcpy(ta_ctx->msg, sep->shared_addr,
1755 SEP_DRIVER_MESSAGE_SHARED_AREA_SIZE_IN_BYTES);
1756
1757 u32_error = sep_verify_op(ta_ctx, SEP_HASH_UPDATE_OPCODE,
1758 &msg_offset);
1759
1760 if (u32_error) {
1761 dev_warn(&ta_ctx->sep_used->pdev->dev, "hash init error %x\n",
1762 u32_error);
1763 sep_crypto_release(sctx, ta_ctx, u32_error);
1764 return u32_error;
1765 }
1766
1767 /* Read Context */
1768 sep_read_context(ta_ctx, &msg_offset,
1769 &sctx->hash_private_ctx,
1770 sizeof(struct sep_hash_private_context));
1771
1772 /**
1773 * Following is only for finup; if we just completed the
1774 * data portion of finup, we now need to kick off the
1775 * finish portion of finup.
1776 */
1777
1778 if (ta_ctx->sep_used->current_hash_stage == HASH_FINUP_DATA) {
1779
1780 /* first reset stage to HASH_FINUP_FINISH */
1781 ta_ctx->sep_used->current_hash_stage = HASH_FINUP_FINISH;
1782
1783 /* now enqueue the finish operation */
1784 spin_lock_irq(&queue_lock);
1785 u32_error = crypto_enqueue_request(&sep_queue,
1786 &ta_ctx->sep_used->current_hash_req->base);
1787 spin_unlock_irq(&queue_lock);
1788
1789 if ((u32_error != 0) && (u32_error != -EINPROGRESS)) {
1790 dev_warn(&ta_ctx->sep_used->pdev->dev,
1791 "spe cypher post op cant queue\n");
1792 sep_crypto_release(sctx, ta_ctx, u32_error);
1793 return u32_error;
1794 }
1795
1796 /* schedule the data send */
1797 u32_error = sep_submit_work(ta_ctx->sep_used->workqueue,
1798 sep_dequeuer, (void *)&sep_queue);
1799
1800 if (u32_error) {
1801 dev_warn(&ta_ctx->sep_used->pdev->dev,
1802 "cant submit work sep_crypto_block\n");
1803 sep_crypto_release(sctx, ta_ctx, -EINVAL);
1804 return -EINVAL;
1805 }
1806 }
1807
1808 /* Signal to crypto infrastructure and clear out */
1809 dev_dbg(&ta_ctx->sep_used->pdev->dev, "hash update post op done\n");
1810 sep_crypto_release(sctx, ta_ctx, 0);
1811 return 0;
1812 }
1813
1814 static u32 hash_final_post_op(struct sep_device *sep)
1815 {
1816 int max_length;
1817 u32 u32_error;
1818 u32 msg_offset;
1819 struct crypto_ahash *tfm = crypto_ahash_reqtfm(sep->current_hash_req);
1820 struct sep_system_ctx *sctx = crypto_ahash_ctx(tfm);
1821 struct this_task_ctx *ta_ctx = ahash_request_ctx(sep->current_hash_req);
1822 dev_dbg(&ta_ctx->sep_used->pdev->dev,
1823 "hash final post op\n");
1824
1825 /* first bring msg from shared area to local area */
1826 memcpy(ta_ctx->msg, sep->shared_addr,
1827 SEP_DRIVER_MESSAGE_SHARED_AREA_SIZE_IN_BYTES);
1828
1829 u32_error = sep_verify_op(ta_ctx, SEP_HASH_FINISH_OPCODE,
1830 &msg_offset);
1831
1832 if (u32_error) {
1833 dev_warn(&ta_ctx->sep_used->pdev->dev, "hash finish error %x\n",
1834 u32_error);
1835 sep_crypto_release(sctx, ta_ctx, u32_error);
1836 return u32_error;
1837 }
1838
1839 /* Grab the result */
1840 if (ta_ctx->current_hash_req->result == NULL) {
1841 /* Oops, null buffer; error out here */
1842 dev_warn(&ta_ctx->sep_used->pdev->dev,
1843 "hash finish null buffer\n");
1844 sep_crypto_release(sctx, ta_ctx, (u32)-ENOMEM);
1845 return -ENOMEM;
1846 }
1847
1848 max_length = (((SEP_HASH_RESULT_SIZE_WORDS * sizeof(u32)) + 3) /
1849 sizeof(u32)) * sizeof(u32);
1850
1851 sep_read_msg(ta_ctx,
1852 ta_ctx->current_hash_req->result,
1853 crypto_ahash_digestsize(tfm), max_length,
1854 &msg_offset, 0);
1855
1856 /* Signal to crypto infrastructure and clear out */
1857 dev_dbg(&ta_ctx->sep_used->pdev->dev, "hash finish post op done\n");
1858 sep_crypto_release(sctx, ta_ctx, 0);
1859 return 0;
1860 }
1861
1862 static u32 hash_digest_post_op(struct sep_device *sep)
1863 {
1864 int max_length;
1865 u32 u32_error;
1866 u32 msg_offset;
1867 struct crypto_ahash *tfm = crypto_ahash_reqtfm(sep->current_hash_req);
1868 struct sep_system_ctx *sctx = crypto_ahash_ctx(tfm);
1869 struct this_task_ctx *ta_ctx = ahash_request_ctx(sep->current_hash_req);
1870 dev_dbg(&ta_ctx->sep_used->pdev->dev,
1871 "hash digest post op\n");
1872
1873 /* first bring msg from shared area to local area */
1874 memcpy(ta_ctx->msg, sep->shared_addr,
1875 SEP_DRIVER_MESSAGE_SHARED_AREA_SIZE_IN_BYTES);
1876
1877 u32_error = sep_verify_op(ta_ctx, SEP_HASH_SINGLE_OPCODE,
1878 &msg_offset);
1879
1880 if (u32_error) {
1881 dev_warn(&ta_ctx->sep_used->pdev->dev,
1882 "hash digest finish error %x\n", u32_error);
1883
1884 sep_crypto_release(sctx, ta_ctx, u32_error);
1885 return u32_error;
1886 }
1887
1888 /* Grab the result */
1889 if (ta_ctx->current_hash_req->result == NULL) {
1890 /* Oops, null buffer; error out here */
1891 dev_warn(&ta_ctx->sep_used->pdev->dev,
1892 "hash digest finish null buffer\n");
1893 sep_crypto_release(sctx, ta_ctx, (u32)-ENOMEM);
1894 return -ENOMEM;
1895 }
1896
1897 max_length = (((SEP_HASH_RESULT_SIZE_WORDS * sizeof(u32)) + 3) /
1898 sizeof(u32)) * sizeof(u32);
1899
1900 sep_read_msg(ta_ctx,
1901 ta_ctx->current_hash_req->result,
1902 crypto_ahash_digestsize(tfm), max_length,
1903 &msg_offset, 0);
1904
1905 /* Signal to crypto infrastructure and clear out */
1906 dev_dbg(&ta_ctx->sep_used->pdev->dev,
1907 "hash digest finish post op done\n");
1908
1909 sep_crypto_release(sctx, ta_ctx, 0);
1910 return 0;
1911 }
1912
1913 /**
1914 * The sep_finish function is the function that is scheduled (via tasklet)
1915 * by the interrupt service routine when the SEP sends and interrupt
1916 * This is only called by the interrupt handler as a tasklet.
1917 */
1918 static void sep_finish(unsigned long data)
1919 {
1920 struct sep_device *sep_dev;
1921 int res;
1922
1923 res = 0;
1924
1925 if (data == 0) {
1926 pr_debug("sep_finish called with null data\n");
1927 return;
1928 }
1929
1930 sep_dev = (struct sep_device *)data;
1931 if (sep_dev == NULL) {
1932 pr_debug("sep_finish; sep_dev is NULL\n");
1933 return;
1934 }
1935
1936 if (sep_dev->in_kernel == (u32)0) {
1937 dev_warn(&sep_dev->pdev->dev,
1938 "sep_finish; not in kernel operation\n");
1939 return;
1940 }
1941
1942 /* Did we really do a sep command prior to this? */
1943 if (0 == test_bit(SEP_LEGACY_SENDMSG_DONE_OFFSET,
1944 &sep_dev->ta_ctx->call_status.status)) {
1945
1946 dev_warn(&sep_dev->pdev->dev, "[PID%d] sendmsg not called\n",
1947 current->pid);
1948 return;
1949 }
1950
1951 if (sep_dev->send_ct != sep_dev->reply_ct) {
1952 dev_warn(&sep_dev->pdev->dev,
1953 "[PID%d] poll; no message came back\n",
1954 current->pid);
1955 return;
1956 }
1957
1958 /* Check for error (In case time ran out) */
1959 if ((res != 0x0) && (res != 0x8)) {
1960 dev_warn(&sep_dev->pdev->dev,
1961 "[PID%d] poll; poll error GPR3 is %x\n",
1962 current->pid, res);
1963 return;
1964 }
1965
1966 /* What kind of interrupt from sep was this? */
1967 res = sep_read_reg(sep_dev, HW_HOST_SEP_HOST_GPR2_REG_ADDR);
1968
1969 dev_dbg(&sep_dev->pdev->dev, "[PID%d] GPR2 at crypto finish is %x\n",
1970 current->pid, res);
1971
1972 /* Print request? */
1973 if ((res >> 30) & 0x1) {
1974 dev_dbg(&sep_dev->pdev->dev, "[PID%d] sep print req\n",
1975 current->pid);
1976 dev_dbg(&sep_dev->pdev->dev, "[PID%d] contents: %s\n",
1977 current->pid,
1978 (char *)(sep_dev->shared_addr +
1979 SEP_DRIVER_PRINTF_OFFSET_IN_BYTES));
1980 return;
1981 }
1982
1983 /* Request for daemon (not currently in POR)? */
1984 if (res >> 31) {
1985 dev_dbg(&sep_dev->pdev->dev,
1986 "[PID%d] sep request; ignoring\n",
1987 current->pid);
1988 return;
1989 }
1990
1991 /* If we got here, then we have a replay to a sep command */
1992
1993 dev_dbg(&sep_dev->pdev->dev,
1994 "[PID%d] sep reply to command; processing request: %x\n",
1995 current->pid, sep_dev->current_request);
1996
1997 switch (sep_dev->current_request) {
1998 case AES_CBC:
1999 case AES_ECB:
2000 case DES_CBC:
2001 case DES_ECB:
2002 res = crypto_post_op(sep_dev);
2003 break;
2004 case SHA1:
2005 case MD5:
2006 case SHA224:
2007 case SHA256:
2008 switch (sep_dev->current_hash_stage) {
2009 case HASH_INIT:
2010 res = hash_init_post_op(sep_dev);
2011 break;
2012 case HASH_UPDATE:
2013 case HASH_FINUP_DATA:
2014 res = hash_update_post_op(sep_dev);
2015 break;
2016 case HASH_FINUP_FINISH:
2017 case HASH_FINISH:
2018 res = hash_final_post_op(sep_dev);
2019 break;
2020 case HASH_DIGEST:
2021 res = hash_digest_post_op(sep_dev);
2022 break;
2023 default:
2024 pr_debug("sep - invalid stage for hash finish\n");
2025 }
2026 break;
2027 default:
2028 pr_debug("sep - invalid request for finish\n");
2029 }
2030
2031 if (res)
2032 pr_debug("sep - finish returned error %x\n", res);
2033 }
2034
2035 static int sep_hash_cra_init(struct crypto_tfm *tfm)
2036 {
2037 const char *alg_name = crypto_tfm_alg_name(tfm);
2038
2039 pr_debug("sep_hash_cra_init name is %s\n", alg_name);
2040
2041 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
2042 sizeof(struct this_task_ctx));
2043 return 0;
2044 }
2045
2046 static void sep_hash_cra_exit(struct crypto_tfm *tfm)
2047 {
2048 pr_debug("sep_hash_cra_exit\n");
2049 }
2050
2051 static void sep_hash_init(void *data)
2052 {
2053 u32 msg_offset;
2054 int result;
2055 struct ahash_request *req;
2056 struct crypto_ahash *tfm;
2057 struct this_task_ctx *ta_ctx;
2058 struct sep_system_ctx *sctx;
2059 unsigned long end_time;
2060 int are_we_done_yet;
2061
2062 req = (struct ahash_request *)data;
2063 tfm = crypto_ahash_reqtfm(req);
2064 sctx = crypto_ahash_ctx(tfm);
2065 ta_ctx = ahash_request_ctx(req);
2066 ta_ctx->sep_used = sep_dev;
2067
2068 ta_ctx->are_we_done_yet = &are_we_done_yet;
2069
2070 dev_dbg(&ta_ctx->sep_used->pdev->dev,
2071 "sep_hash_init\n");
2072 ta_ctx->current_hash_stage = HASH_INIT;
2073 /* opcode and mode */
2074 sep_make_header(ta_ctx, &msg_offset, SEP_HASH_INIT_OPCODE);
2075 sep_write_msg(ta_ctx, &ta_ctx->hash_opmode,
2076 sizeof(u32), sizeof(u32), &msg_offset, 0);
2077 sep_end_msg(ta_ctx, msg_offset);
2078
2079 are_we_done_yet = 0;
2080 result = sep_crypto_take_sep(ta_ctx);
2081 if (result) {
2082 dev_warn(&ta_ctx->sep_used->pdev->dev,
2083 "sep_hash_init take sep failed\n");
2084 sep_crypto_release(sctx, ta_ctx, -EINVAL);
2085 }
2086
2087 /* now we sit and wait up to a fixed time for completion */
2088 end_time = jiffies + (WAIT_TIME * HZ);
2089 while ((time_before(jiffies, end_time)) && (are_we_done_yet == 0))
2090 schedule();
2091
2092 /* Done waiting; still not done yet? */
2093 if (are_we_done_yet == 0) {
2094 dev_dbg(&ta_ctx->sep_used->pdev->dev,
2095 "hash init never got done\n");
2096 sep_crypto_release(sctx, ta_ctx, -EINVAL);
2097 return;
2098 }
2099
2100 }
2101
2102 static void sep_hash_update(void *data)
2103 {
2104 int int_error;
2105 u32 msg_offset;
2106 u32 len;
2107 struct sep_hash_internal_context *int_ctx;
2108 u32 block_size;
2109 u32 head_len;
2110 u32 tail_len;
2111 int are_we_done_yet;
2112
2113 static u32 msg[10];
2114 static char small_buf[100];
2115 void *src_ptr;
2116 struct scatterlist *new_sg;
2117 ssize_t copy_result;
2118 struct ahash_request *req;
2119 struct crypto_ahash *tfm;
2120 struct this_task_ctx *ta_ctx;
2121 struct sep_system_ctx *sctx;
2122 unsigned long end_time;
2123
2124 req = (struct ahash_request *)data;
2125 tfm = crypto_ahash_reqtfm(req);
2126 sctx = crypto_ahash_ctx(tfm);
2127 ta_ctx = ahash_request_ctx(req);
2128 ta_ctx->sep_used = sep_dev;
2129
2130 ta_ctx->are_we_done_yet = &are_we_done_yet;
2131
2132 /* length for queue status */
2133 ta_ctx->nbytes = req->nbytes;
2134
2135 dev_dbg(&ta_ctx->sep_used->pdev->dev,
2136 "sep_hash_update\n");
2137 ta_ctx->current_hash_stage = HASH_UPDATE;
2138 len = req->nbytes;
2139
2140 block_size = crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
2141 tail_len = req->nbytes % block_size;
2142 dev_dbg(&ta_ctx->sep_used->pdev->dev, "length is %x\n", len);
2143 dev_dbg(&ta_ctx->sep_used->pdev->dev, "block_size is %x\n", block_size);
2144 dev_dbg(&ta_ctx->sep_used->pdev->dev, "tail len is %x\n", tail_len);
2145
2146 /* Compute header/tail sizes */
2147 int_ctx = (struct sep_hash_internal_context *)&sctx->
2148 hash_private_ctx.internal_context;
2149 head_len = (block_size - int_ctx->prev_update_bytes) % block_size;
2150 tail_len = (req->nbytes - head_len) % block_size;
2151
2152 /* Make sure all pages are an even block */
2153 int_error = sep_oddball_pages(ta_ctx->sep_used, req->src,
2154 req->nbytes,
2155 block_size, &new_sg, 1);
2156
2157 if (int_error < 0) {
2158 dev_warn(&ta_ctx->sep_used->pdev->dev,
2159 "oddball pages error in crash update\n");
2160 sep_crypto_release(sctx, ta_ctx, -ENOMEM);
2161 return;
2162 } else if (int_error == 1) {
2163 ta_ctx->src_sg = new_sg;
2164 ta_ctx->src_sg_hold = new_sg;
2165 } else {
2166 ta_ctx->src_sg = req->src;
2167 ta_ctx->src_sg_hold = NULL;
2168 }
2169
2170 src_ptr = sg_virt(ta_ctx->src_sg);
2171
2172 if ((!req->nbytes) || (!ta_ctx->src_sg)) {
2173 /* null data */
2174 src_ptr = NULL;
2175 }
2176
2177 ta_ctx->dcb_input_data.app_in_address = src_ptr;
2178 ta_ctx->dcb_input_data.data_in_size =
2179 req->nbytes - (head_len + tail_len);
2180 ta_ctx->dcb_input_data.app_out_address = NULL;
2181 ta_ctx->dcb_input_data.block_size = block_size;
2182 ta_ctx->dcb_input_data.tail_block_size = 0;
2183 ta_ctx->dcb_input_data.is_applet = 0;
2184 ta_ctx->dcb_input_data.src_sg = ta_ctx->src_sg;
2185 ta_ctx->dcb_input_data.dst_sg = NULL;
2186
2187 int_error = sep_create_dcb_dmatables_context_kernel(
2188 ta_ctx->sep_used,
2189 &ta_ctx->dcb_region,
2190 &ta_ctx->dmatables_region,
2191 &ta_ctx->dma_ctx,
2192 &ta_ctx->dcb_input_data,
2193 1);
2194 if (int_error) {
2195 dev_warn(&ta_ctx->sep_used->pdev->dev,
2196 "hash update dma table create failed\n");
2197 sep_crypto_release(sctx, ta_ctx, -EINVAL);
2198 return;
2199 }
2200
2201 /* Construct message to SEP */
2202 sep_make_header(ta_ctx, &msg_offset, SEP_HASH_UPDATE_OPCODE);
2203
2204 msg[0] = (u32)0;
2205 msg[1] = (u32)0;
2206 msg[2] = (u32)0;
2207
2208 sep_write_msg(ta_ctx, msg, sizeof(u32) * 3, sizeof(u32) * 3,
2209 &msg_offset, 0);
2210
2211 /* Handle remainders */
2212
2213 /* Head */
2214 sep_write_msg(ta_ctx, &head_len, sizeof(u32),
2215 sizeof(u32), &msg_offset, 0);
2216
2217 if (head_len) {
2218 copy_result = sg_copy_to_buffer(
2219 req->src,
2220 sep_sg_nents(ta_ctx->src_sg),
2221 small_buf, head_len);
2222
2223 if (copy_result != head_len) {
2224 dev_warn(&ta_ctx->sep_used->pdev->dev,
2225 "sg head copy failure in hash block\n");
2226 sep_crypto_release(sctx, ta_ctx, -ENOMEM);
2227 return;
2228 }
2229
2230 sep_write_msg(ta_ctx, small_buf, head_len,
2231 sizeof(u32) * 32, &msg_offset, 1);
2232 } else {
2233 msg_offset += sizeof(u32) * 32;
2234 }
2235
2236 /* Tail */
2237 sep_write_msg(ta_ctx, &tail_len, sizeof(u32),
2238 sizeof(u32), &msg_offset, 0);
2239
2240 if (tail_len) {
2241 copy_result = sep_copy_offset_sg(
2242 ta_ctx->sep_used,
2243 ta_ctx->src_sg,
2244 req->nbytes - tail_len,
2245 small_buf, tail_len);
2246
2247 if (copy_result != tail_len) {
2248 dev_warn(&ta_ctx->sep_used->pdev->dev,
2249 "sg tail copy failure in hash block\n");
2250 sep_crypto_release(sctx, ta_ctx, -ENOMEM);
2251 return;
2252 }
2253
2254 sep_write_msg(ta_ctx, small_buf, tail_len,
2255 sizeof(u32) * 32, &msg_offset, 1);
2256 } else {
2257 msg_offset += sizeof(u32) * 32;
2258 }
2259
2260 /* Context */
2261 sep_write_context(ta_ctx, &msg_offset, &sctx->hash_private_ctx,
2262 sizeof(struct sep_hash_private_context));
2263
2264 sep_end_msg(ta_ctx, msg_offset);
2265 are_we_done_yet = 0;
2266 int_error = sep_crypto_take_sep(ta_ctx);
2267 if (int_error) {
2268 dev_warn(&ta_ctx->sep_used->pdev->dev,
2269 "sep_hash_update take sep failed\n");
2270 sep_crypto_release(sctx, ta_ctx, -EINVAL);
2271 }
2272
2273 /* now we sit and wait up to a fixed time for completion */
2274 end_time = jiffies + (WAIT_TIME * HZ);
2275 while ((time_before(jiffies, end_time)) && (are_we_done_yet == 0))
2276 schedule();
2277
2278 /* Done waiting; still not done yet? */
2279 if (are_we_done_yet == 0) {
2280 dev_dbg(&ta_ctx->sep_used->pdev->dev,
2281 "hash update never got done\n");
2282 sep_crypto_release(sctx, ta_ctx, -EINVAL);
2283 return;
2284 }
2285
2286 }
2287
2288 static void sep_hash_final(void *data)
2289 {
2290 u32 msg_offset;
2291 struct ahash_request *req;
2292 struct crypto_ahash *tfm;
2293 struct this_task_ctx *ta_ctx;
2294 struct sep_system_ctx *sctx;
2295 int result;
2296 unsigned long end_time;
2297 int are_we_done_yet;
2298
2299 req = (struct ahash_request *)data;
2300 tfm = crypto_ahash_reqtfm(req);
2301 sctx = crypto_ahash_ctx(tfm);
2302 ta_ctx = ahash_request_ctx(req);
2303 ta_ctx->sep_used = sep_dev;
2304
2305 dev_dbg(&ta_ctx->sep_used->pdev->dev,
2306 "sep_hash_final\n");
2307 ta_ctx->current_hash_stage = HASH_FINISH;
2308
2309 ta_ctx->are_we_done_yet = &are_we_done_yet;
2310
2311 /* opcode and mode */
2312 sep_make_header(ta_ctx, &msg_offset, SEP_HASH_FINISH_OPCODE);
2313
2314 /* Context */
2315 sep_write_context(ta_ctx, &msg_offset, &sctx->hash_private_ctx,
2316 sizeof(struct sep_hash_private_context));
2317
2318 sep_end_msg(ta_ctx, msg_offset);
2319 are_we_done_yet = 0;
2320 result = sep_crypto_take_sep(ta_ctx);
2321 if (result) {
2322 dev_warn(&ta_ctx->sep_used->pdev->dev,
2323 "sep_hash_final take sep failed\n");
2324 sep_crypto_release(sctx, ta_ctx, -EINVAL);
2325 }
2326
2327 /* now we sit and wait up to a fixed time for completion */
2328 end_time = jiffies + (WAIT_TIME * HZ);
2329 while ((time_before(jiffies, end_time)) && (are_we_done_yet == 0))
2330 schedule();
2331
2332 /* Done waiting; still not done yet? */
2333 if (are_we_done_yet == 0) {
2334 dev_dbg(&ta_ctx->sep_used->pdev->dev,
2335 "hash final job never got done\n");
2336 sep_crypto_release(sctx, ta_ctx, -EINVAL);
2337 return;
2338 }
2339
2340 }
2341
2342 static void sep_hash_digest(void *data)
2343 {
2344 int int_error;
2345 u32 msg_offset;
2346 u32 block_size;
2347 u32 msg[10];
2348 size_t copy_result;
2349 int result;
2350 int are_we_done_yet;
2351 u32 tail_len;
2352 static char small_buf[100];
2353 struct scatterlist *new_sg;
2354 void *src_ptr;
2355
2356 struct ahash_request *req;
2357 struct crypto_ahash *tfm;
2358 struct this_task_ctx *ta_ctx;
2359 struct sep_system_ctx *sctx;
2360 unsigned long end_time;
2361
2362 req = (struct ahash_request *)data;
2363 tfm = crypto_ahash_reqtfm(req);
2364 sctx = crypto_ahash_ctx(tfm);
2365 ta_ctx = ahash_request_ctx(req);
2366 ta_ctx->sep_used = sep_dev;
2367
2368 dev_dbg(&ta_ctx->sep_used->pdev->dev,
2369 "sep_hash_digest\n");
2370 ta_ctx->current_hash_stage = HASH_DIGEST;
2371
2372 ta_ctx->are_we_done_yet = &are_we_done_yet;
2373
2374 /* length for queue status */
2375 ta_ctx->nbytes = req->nbytes;
2376
2377 block_size = crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
2378 tail_len = req->nbytes % block_size;
2379 dev_dbg(&ta_ctx->sep_used->pdev->dev, "length is %x\n", req->nbytes);
2380 dev_dbg(&ta_ctx->sep_used->pdev->dev, "block_size is %x\n", block_size);
2381 dev_dbg(&ta_ctx->sep_used->pdev->dev, "tail len is %x\n", tail_len);
2382
2383 /* Make sure all pages are an even block */
2384 int_error = sep_oddball_pages(ta_ctx->sep_used, req->src,
2385 req->nbytes,
2386 block_size, &new_sg, 1);
2387
2388 if (int_error < 0) {
2389 dev_warn(&ta_ctx->sep_used->pdev->dev,
2390 "oddball pages error in crash update\n");
2391 sep_crypto_release(sctx, ta_ctx, -ENOMEM);
2392 return;
2393 } else if (int_error == 1) {
2394 ta_ctx->src_sg = new_sg;
2395 ta_ctx->src_sg_hold = new_sg;
2396 } else {
2397 ta_ctx->src_sg = req->src;
2398 ta_ctx->src_sg_hold = NULL;
2399 }
2400
2401 src_ptr = sg_virt(ta_ctx->src_sg);
2402
2403 if ((!req->nbytes) || (!ta_ctx->src_sg)) {
2404 /* null data */
2405 src_ptr = NULL;
2406 }
2407
2408 ta_ctx->dcb_input_data.app_in_address = src_ptr;
2409 ta_ctx->dcb_input_data.data_in_size = req->nbytes - tail_len;
2410 ta_ctx->dcb_input_data.app_out_address = NULL;
2411 ta_ctx->dcb_input_data.block_size = block_size;
2412 ta_ctx->dcb_input_data.tail_block_size = 0;
2413 ta_ctx->dcb_input_data.is_applet = 0;
2414 ta_ctx->dcb_input_data.src_sg = ta_ctx->src_sg;
2415 ta_ctx->dcb_input_data.dst_sg = NULL;
2416
2417 int_error = sep_create_dcb_dmatables_context_kernel(
2418 ta_ctx->sep_used,
2419 &ta_ctx->dcb_region,
2420 &ta_ctx->dmatables_region,
2421 &ta_ctx->dma_ctx,
2422 &ta_ctx->dcb_input_data,
2423 1);
2424 if (int_error) {
2425 dev_warn(&ta_ctx->sep_used->pdev->dev,
2426 "hash update dma table create failed\n");
2427 sep_crypto_release(sctx, ta_ctx, -EINVAL);
2428 return;
2429 }
2430
2431 /* Construct message to SEP */
2432 sep_make_header(ta_ctx, &msg_offset, SEP_HASH_SINGLE_OPCODE);
2433 sep_write_msg(ta_ctx, &ta_ctx->hash_opmode,
2434 sizeof(u32), sizeof(u32), &msg_offset, 0);
2435
2436 msg[0] = (u32)0;
2437 msg[1] = (u32)0;
2438 msg[2] = (u32)0;
2439
2440 sep_write_msg(ta_ctx, msg, sizeof(u32) * 3, sizeof(u32) * 3,
2441 &msg_offset, 0);
2442
2443 /* Tail */
2444 sep_write_msg(ta_ctx, &tail_len, sizeof(u32),
2445 sizeof(u32), &msg_offset, 0);
2446
2447 if (tail_len) {
2448 copy_result = sep_copy_offset_sg(
2449 ta_ctx->sep_used,
2450 ta_ctx->src_sg,
2451 req->nbytes - tail_len,
2452 small_buf, tail_len);
2453
2454 if (copy_result != tail_len) {
2455 dev_warn(&ta_ctx->sep_used->pdev->dev,
2456 "sg tail copy failure in hash block\n");
2457 sep_crypto_release(sctx, ta_ctx, -ENOMEM);
2458 return;
2459 }
2460
2461 sep_write_msg(ta_ctx, small_buf, tail_len,
2462 sizeof(u32) * 32, &msg_offset, 1);
2463 } else {
2464 msg_offset += sizeof(u32) * 32;
2465 }
2466
2467 sep_end_msg(ta_ctx, msg_offset);
2468
2469 are_we_done_yet = 0;
2470 result = sep_crypto_take_sep(ta_ctx);
2471 if (result) {
2472 dev_warn(&ta_ctx->sep_used->pdev->dev,
2473 "sep_hash_digest take sep failed\n");
2474 sep_crypto_release(sctx, ta_ctx, -EINVAL);
2475 }
2476
2477 /* now we sit and wait up to a fixed time for completion */
2478 end_time = jiffies + (WAIT_TIME * HZ);
2479 while ((time_before(jiffies, end_time)) && (are_we_done_yet == 0))
2480 schedule();
2481
2482 /* Done waiting; still not done yet? */
2483 if (are_we_done_yet == 0) {
2484 dev_dbg(&ta_ctx->sep_used->pdev->dev,
2485 "hash digest job never got done\n");
2486 sep_crypto_release(sctx, ta_ctx, -EINVAL);
2487 return;
2488 }
2489
2490 }
2491
2492 /**
2493 * This is what is called by each of the API's provided
2494 * in the kernel crypto descriptors. It is run in a process
2495 * context using the kernel workqueues. Therefore it can
2496 * be put to sleep.
2497 */
2498 static void sep_dequeuer(void *data)
2499 {
2500 struct crypto_queue *this_queue;
2501 struct crypto_async_request *async_req;
2502 struct crypto_async_request *backlog;
2503 struct ablkcipher_request *cypher_req;
2504 struct ahash_request *hash_req;
2505 struct sep_system_ctx *sctx;
2506 struct crypto_ahash *hash_tfm;
2507 struct this_task_ctx *ta_ctx;
2508
2509
2510 this_queue = (struct crypto_queue *)data;
2511
2512 spin_lock_irq(&queue_lock);
2513 backlog = crypto_get_backlog(this_queue);
2514 async_req = crypto_dequeue_request(this_queue);
2515 spin_unlock_irq(&queue_lock);
2516
2517 if (!async_req) {
2518 pr_debug("sep crypto queue is empty\n");
2519 return;
2520 }
2521
2522 if (backlog) {
2523 pr_debug("sep crypto backlog set\n");
2524 if (backlog->complete)
2525 backlog->complete(backlog, -EINPROGRESS);
2526 backlog = NULL;
2527 }
2528
2529 if (!async_req->tfm) {
2530 pr_debug("sep crypto queue null tfm\n");
2531 return;
2532 }
2533
2534 if (!async_req->tfm->__crt_alg) {
2535 pr_debug("sep crypto queue null __crt_alg\n");
2536 return;
2537 }
2538
2539 if (!async_req->tfm->__crt_alg->cra_type) {
2540 pr_debug("sep crypto queue null cra_type\n");
2541 return;
2542 }
2543
2544 /* we have stuff in the queue */
2545 if (async_req->tfm->__crt_alg->cra_type !=
2546 &crypto_ahash_type) {
2547 /* This is for a cypher */
2548 pr_debug("sep crypto queue doing cipher\n");
2549 cypher_req = container_of(async_req,
2550 struct ablkcipher_request,
2551 base);
2552 if (!cypher_req) {
2553 pr_debug("sep crypto queue null cypher_req\n");
2554 return;
2555 }
2556
2557 sep_crypto_block((void *)cypher_req);
2558 return;
2559 } else {
2560 /* This is a hash */
2561 pr_debug("sep crypto queue doing hash\n");
2562 /**
2563 * This is a bit more complex than cipher; we
2564 * need to figure out what type of operation
2565 */
2566 hash_req = ahash_request_cast(async_req);
2567 if (!hash_req) {
2568 pr_debug("sep crypto queue null hash_req\n");
2569 return;
2570 }
2571
2572 hash_tfm = crypto_ahash_reqtfm(hash_req);
2573 if (!hash_tfm) {
2574 pr_debug("sep crypto queue null hash_tfm\n");
2575 return;
2576 }
2577
2578
2579 sctx = crypto_ahash_ctx(hash_tfm);
2580 if (!sctx) {
2581 pr_debug("sep crypto queue null sctx\n");
2582 return;
2583 }
2584
2585 ta_ctx = ahash_request_ctx(hash_req);
2586
2587 if (ta_ctx->current_hash_stage == HASH_INIT) {
2588 pr_debug("sep crypto queue hash init\n");
2589 sep_hash_init((void *)hash_req);
2590 return;
2591 } else if (ta_ctx->current_hash_stage == HASH_UPDATE) {
2592 pr_debug("sep crypto queue hash update\n");
2593 sep_hash_update((void *)hash_req);
2594 return;
2595 } else if (ta_ctx->current_hash_stage == HASH_FINISH) {
2596 pr_debug("sep crypto queue hash final\n");
2597 sep_hash_final((void *)hash_req);
2598 return;
2599 } else if (ta_ctx->current_hash_stage == HASH_DIGEST) {
2600 pr_debug("sep crypto queue hash digest\n");
2601 sep_hash_digest((void *)hash_req);
2602 return;
2603 } else if (ta_ctx->current_hash_stage == HASH_FINUP_DATA) {
2604 pr_debug("sep crypto queue hash digest\n");
2605 sep_hash_update((void *)hash_req);
2606 return;
2607 } else if (ta_ctx->current_hash_stage == HASH_FINUP_FINISH) {
2608 pr_debug("sep crypto queue hash digest\n");
2609 sep_hash_final((void *)hash_req);
2610 return;
2611 } else {
2612 pr_debug("sep crypto queue hash oops nothing\n");
2613 return;
2614 }
2615 }
2616 }
2617
2618 static int sep_sha1_init(struct ahash_request *req)
2619 {
2620 int error;
2621 int error1;
2622 struct this_task_ctx *ta_ctx = ahash_request_ctx(req);
2623
2624 pr_debug("sep - doing sha1 init\n");
2625
2626 /* Clear out task context */
2627 memset(ta_ctx, 0, sizeof(struct this_task_ctx));
2628
2629 ta_ctx->sep_used = sep_dev;
2630 ta_ctx->current_request = SHA1;
2631 ta_ctx->current_hash_req = req;
2632 ta_ctx->current_cypher_req = NULL;
2633 ta_ctx->hash_opmode = SEP_HASH_SHA1;
2634 ta_ctx->current_hash_stage = HASH_INIT;
2635
2636 /* lock necessary so that only one entity touches the queues */
2637 spin_lock_irq(&queue_lock);
2638 error = crypto_enqueue_request(&sep_queue, &req->base);
2639
2640 if ((error != 0) && (error != -EINPROGRESS))
2641 pr_debug(" sep - crypto enqueue failed: %x\n",
2642 error);
2643 error1 = sep_submit_work(ta_ctx->sep_used->workqueue,
2644 sep_dequeuer, (void *)&sep_queue);
2645 if (error1)
2646 pr_debug(" sep - workqueue submit failed: %x\n",
2647 error1);
2648 spin_unlock_irq(&queue_lock);
2649 /* We return result of crypto enqueue */
2650 return error;
2651 }
2652
2653 static int sep_sha1_update(struct ahash_request *req)
2654 {
2655 int error;
2656 int error1;
2657 struct this_task_ctx *ta_ctx = ahash_request_ctx(req);
2658
2659 pr_debug("sep - doing sha1 update\n");
2660
2661 ta_ctx->sep_used = sep_dev;
2662 ta_ctx->current_request = SHA1;
2663 ta_ctx->current_hash_req = req;
2664 ta_ctx->current_cypher_req = NULL;
2665 ta_ctx->hash_opmode = SEP_HASH_SHA1;
2666 ta_ctx->current_hash_stage = HASH_UPDATE;
2667
2668 /* lock necessary so that only one entity touches the queues */
2669 spin_lock_irq(&queue_lock);
2670 error = crypto_enqueue_request(&sep_queue, &req->base);
2671
2672 if ((error != 0) && (error != -EINPROGRESS))
2673 pr_debug(" sep - crypto enqueue failed: %x\n",
2674 error);
2675 error1 = sep_submit_work(ta_ctx->sep_used->workqueue,
2676 sep_dequeuer, (void *)&sep_queue);
2677 if (error1)
2678 pr_debug(" sep - workqueue submit failed: %x\n",
2679 error1);
2680 spin_unlock_irq(&queue_lock);
2681 /* We return result of crypto enqueue */
2682 return error;
2683 }
2684
2685 static int sep_sha1_final(struct ahash_request *req)
2686 {
2687 int error;
2688 int error1;
2689 struct this_task_ctx *ta_ctx = ahash_request_ctx(req);
2690 pr_debug("sep - doing sha1 final\n");
2691
2692 ta_ctx->sep_used = sep_dev;
2693 ta_ctx->current_request = SHA1;
2694 ta_ctx->current_hash_req = req;
2695 ta_ctx->current_cypher_req = NULL;
2696 ta_ctx->hash_opmode = SEP_HASH_SHA1;
2697 ta_ctx->current_hash_stage = HASH_FINISH;
2698
2699 /* lock necessary so that only one entity touches the queues */
2700 spin_lock_irq(&queue_lock);
2701 error = crypto_enqueue_request(&sep_queue, &req->base);
2702
2703 if ((error != 0) && (error != -EINPROGRESS))
2704 pr_debug(" sep - crypto enqueue failed: %x\n",
2705 error);
2706 error1 = sep_submit_work(ta_ctx->sep_used->workqueue,
2707 sep_dequeuer, (void *)&sep_queue);
2708 if (error1)
2709 pr_debug(" sep - workqueue submit failed: %x\n",
2710 error1);
2711 spin_unlock_irq(&queue_lock);
2712 /* We return result of crypto enqueue */
2713 return error;
2714 }
2715
2716 static int sep_sha1_digest(struct ahash_request *req)
2717 {
2718 int error;
2719 int error1;
2720 struct this_task_ctx *ta_ctx = ahash_request_ctx(req);
2721 pr_debug("sep - doing sha1 digest\n");
2722
2723 /* Clear out task context */
2724 memset(ta_ctx, 0, sizeof(struct this_task_ctx));
2725
2726 ta_ctx->sep_used = sep_dev;
2727 ta_ctx->current_request = SHA1;
2728 ta_ctx->current_hash_req = req;
2729 ta_ctx->current_cypher_req = NULL;
2730 ta_ctx->hash_opmode = SEP_HASH_SHA1;
2731 ta_ctx->current_hash_stage = HASH_DIGEST;
2732
2733 /* lock necessary so that only one entity touches the queues */
2734 spin_lock_irq(&queue_lock);
2735 error = crypto_enqueue_request(&sep_queue, &req->base);
2736
2737 if ((error != 0) && (error != -EINPROGRESS))
2738 pr_debug(" sep - crypto enqueue failed: %x\n",
2739 error);
2740 error1 = sep_submit_work(ta_ctx->sep_used->workqueue,
2741 sep_dequeuer, (void *)&sep_queue);
2742 if (error1)
2743 pr_debug(" sep - workqueue submit failed: %x\n",
2744 error1);
2745 spin_unlock_irq(&queue_lock);
2746 /* We return result of crypto enqueue */
2747 return error;
2748 }
2749
2750 static int sep_sha1_finup(struct ahash_request *req)
2751 {
2752 int error;
2753 int error1;
2754 struct this_task_ctx *ta_ctx = ahash_request_ctx(req);
2755 pr_debug("sep - doing sha1 finup\n");
2756
2757 ta_ctx->sep_used = sep_dev;
2758 ta_ctx->current_request = SHA1;
2759 ta_ctx->current_hash_req = req;
2760 ta_ctx->current_cypher_req = NULL;
2761 ta_ctx->hash_opmode = SEP_HASH_SHA1;
2762 ta_ctx->current_hash_stage = HASH_FINUP_DATA;
2763
2764 /* lock necessary so that only one entity touches the queues */
2765 spin_lock_irq(&queue_lock);
2766 error = crypto_enqueue_request(&sep_queue, &req->base);
2767
2768 if ((error != 0) && (error != -EINPROGRESS))
2769 pr_debug(" sep - crypto enqueue failed: %x\n",
2770 error);
2771 error1 = sep_submit_work(ta_ctx->sep_used->workqueue,
2772 sep_dequeuer, (void *)&sep_queue);
2773 if (error1)
2774 pr_debug(" sep - workqueue submit failed: %x\n",
2775 error1);
2776 spin_unlock_irq(&queue_lock);
2777 /* We return result of crypto enqueue */
2778 return error;
2779 }
2780
2781 static int sep_md5_init(struct ahash_request *req)
2782 {
2783 int error;
2784 int error1;
2785 struct this_task_ctx *ta_ctx = ahash_request_ctx(req);
2786 pr_debug("sep - doing md5 init\n");
2787
2788 /* Clear out task context */
2789 memset(ta_ctx, 0, sizeof(struct this_task_ctx));
2790
2791 ta_ctx->sep_used = sep_dev;
2792 ta_ctx->current_request = MD5;
2793 ta_ctx->current_hash_req = req;
2794 ta_ctx->current_cypher_req = NULL;
2795 ta_ctx->hash_opmode = SEP_HASH_MD5;
2796 ta_ctx->current_hash_stage = HASH_INIT;
2797
2798 /* lock necessary so that only one entity touches the queues */
2799 spin_lock_irq(&queue_lock);
2800 error = crypto_enqueue_request(&sep_queue, &req->base);
2801
2802 if ((error != 0) && (error != -EINPROGRESS))
2803 pr_debug(" sep - crypto enqueue failed: %x\n",
2804 error);
2805 error1 = sep_submit_work(ta_ctx->sep_used->workqueue,
2806 sep_dequeuer, (void *)&sep_queue);
2807 if (error1)
2808 pr_debug(" sep - workqueue submit failed: %x\n",
2809 error1);
2810 spin_unlock_irq(&queue_lock);
2811 /* We return result of crypto enqueue */
2812 return error;
2813 }
2814
2815 static int sep_md5_update(struct ahash_request *req)
2816 {
2817 int error;
2818 int error1;
2819 struct this_task_ctx *ta_ctx = ahash_request_ctx(req);
2820 pr_debug("sep - doing md5 update\n");
2821
2822 ta_ctx->sep_used = sep_dev;
2823 ta_ctx->current_request = MD5;
2824 ta_ctx->current_hash_req = req;
2825 ta_ctx->current_cypher_req = NULL;
2826 ta_ctx->hash_opmode = SEP_HASH_MD5;
2827 ta_ctx->current_hash_stage = HASH_UPDATE;
2828
2829 /* lock necessary so that only one entity touches the queues */
2830 spin_lock_irq(&queue_lock);
2831 error = crypto_enqueue_request(&sep_queue, &req->base);
2832
2833 if ((error != 0) && (error != -EINPROGRESS))
2834 pr_debug(" sep - crypto enqueue failed: %x\n",
2835 error);
2836 error1 = sep_submit_work(ta_ctx->sep_used->workqueue,
2837 sep_dequeuer, (void *)&sep_queue);
2838 if (error1)
2839 pr_debug(" sep - workqueue submit failed: %x\n",
2840 error1);
2841 spin_unlock_irq(&queue_lock);
2842 /* We return result of crypto enqueue */
2843 return error;
2844 }
2845
2846 static int sep_md5_final(struct ahash_request *req)
2847 {
2848 int error;
2849 int error1;
2850 struct this_task_ctx *ta_ctx = ahash_request_ctx(req);
2851 pr_debug("sep - doing md5 final\n");
2852
2853 ta_ctx->sep_used = sep_dev;
2854 ta_ctx->current_request = MD5;
2855 ta_ctx->current_hash_req = req;
2856 ta_ctx->current_cypher_req = NULL;
2857 ta_ctx->hash_opmode = SEP_HASH_MD5;
2858 ta_ctx->current_hash_stage = HASH_FINISH;
2859
2860 /* lock necessary so that only one entity touches the queues */
2861 spin_lock_irq(&queue_lock);
2862 error = crypto_enqueue_request(&sep_queue, &req->base);
2863
2864 if ((error != 0) && (error != -EINPROGRESS))
2865 pr_debug(" sep - crypto enqueue failed: %x\n",
2866 error);
2867 error1 = sep_submit_work(ta_ctx->sep_used->workqueue,
2868 sep_dequeuer, (void *)&sep_queue);
2869 if (error1)
2870 pr_debug(" sep - workqueue submit failed: %x\n",
2871 error1);
2872 spin_unlock_irq(&queue_lock);
2873 /* We return result of crypto enqueue */
2874 return error;
2875 }
2876
2877 static int sep_md5_digest(struct ahash_request *req)
2878 {
2879 int error;
2880 int error1;
2881 struct this_task_ctx *ta_ctx = ahash_request_ctx(req);
2882
2883 pr_debug("sep - doing md5 digest\n");
2884
2885 /* Clear out task context */
2886 memset(ta_ctx, 0, sizeof(struct this_task_ctx));
2887
2888 ta_ctx->sep_used = sep_dev;
2889 ta_ctx->current_request = MD5;
2890 ta_ctx->current_hash_req = req;
2891 ta_ctx->current_cypher_req = NULL;
2892 ta_ctx->hash_opmode = SEP_HASH_MD5;
2893 ta_ctx->current_hash_stage = HASH_DIGEST;
2894
2895 /* lock necessary so that only one entity touches the queues */
2896 spin_lock_irq(&queue_lock);
2897 error = crypto_enqueue_request(&sep_queue, &req->base);
2898
2899 if ((error != 0) && (error != -EINPROGRESS))
2900 pr_debug(" sep - crypto enqueue failed: %x\n",
2901 error);
2902 error1 = sep_submit_work(ta_ctx->sep_used->workqueue,
2903 sep_dequeuer, (void *)&sep_queue);
2904 if (error1)
2905 pr_debug(" sep - workqueue submit failed: %x\n",
2906 error1);
2907 spin_unlock_irq(&queue_lock);
2908 /* We return result of crypto enqueue */
2909 return error;
2910 }
2911
2912 static int sep_md5_finup(struct ahash_request *req)
2913 {
2914 int error;
2915 int error1;
2916 struct this_task_ctx *ta_ctx = ahash_request_ctx(req);
2917
2918 pr_debug("sep - doing md5 finup\n");
2919
2920 ta_ctx->sep_used = sep_dev;
2921 ta_ctx->current_request = MD5;
2922 ta_ctx->current_hash_req = req;
2923 ta_ctx->current_cypher_req = NULL;
2924 ta_ctx->hash_opmode = SEP_HASH_MD5;
2925 ta_ctx->current_hash_stage = HASH_FINUP_DATA;
2926
2927 /* lock necessary so that only one entity touches the queues */
2928 spin_lock_irq(&queue_lock);
2929 error = crypto_enqueue_request(&sep_queue, &req->base);
2930
2931 if ((error != 0) && (error != -EINPROGRESS))
2932 pr_debug(" sep - crypto enqueue failed: %x\n",
2933 error);
2934 error1 = sep_submit_work(ta_ctx->sep_used->workqueue,
2935 sep_dequeuer, (void *)&sep_queue);
2936 if (error1)
2937 pr_debug(" sep - workqueue submit failed: %x\n",
2938 error1);
2939 spin_unlock_irq(&queue_lock);
2940 /* We return result of crypto enqueue */
2941 return error;
2942 }
2943
2944 static int sep_sha224_init(struct ahash_request *req)
2945 {
2946 int error;
2947 int error1;
2948 struct this_task_ctx *ta_ctx = ahash_request_ctx(req);
2949 pr_debug("sep - doing sha224 init\n");
2950
2951 /* Clear out task context */
2952 memset(ta_ctx, 0, sizeof(struct this_task_ctx));
2953
2954 ta_ctx->sep_used = sep_dev;
2955 ta_ctx->current_request = SHA224;
2956 ta_ctx->current_hash_req = req;
2957 ta_ctx->current_cypher_req = NULL;
2958 ta_ctx->hash_opmode = SEP_HASH_SHA224;
2959 ta_ctx->current_hash_stage = HASH_INIT;
2960
2961 /* lock necessary so that only one entity touches the queues */
2962 spin_lock_irq(&queue_lock);
2963 error = crypto_enqueue_request(&sep_queue, &req->base);
2964
2965 if ((error != 0) && (error != -EINPROGRESS))
2966 pr_debug(" sep - crypto enqueue failed: %x\n",
2967 error);
2968 error1 = sep_submit_work(ta_ctx->sep_used->workqueue,
2969 sep_dequeuer, (void *)&sep_queue);
2970 if (error1)
2971 pr_debug(" sep - workqueue submit failed: %x\n",
2972 error1);
2973 spin_unlock_irq(&queue_lock);
2974 /* We return result of crypto enqueue */
2975 return error;
2976 }
2977
2978 static int sep_sha224_update(struct ahash_request *req)
2979 {
2980 int error;
2981 int error1;
2982 struct this_task_ctx *ta_ctx = ahash_request_ctx(req);
2983 pr_debug("sep - doing sha224 update\n");
2984
2985 ta_ctx->sep_used = sep_dev;
2986 ta_ctx->current_request = SHA224;
2987 ta_ctx->current_hash_req = req;
2988 ta_ctx->current_cypher_req = NULL;
2989 ta_ctx->hash_opmode = SEP_HASH_SHA224;
2990 ta_ctx->current_hash_stage = HASH_UPDATE;
2991
2992 /* lock necessary so that only one entity touches the queues */
2993 spin_lock_irq(&queue_lock);
2994 error = crypto_enqueue_request(&sep_queue, &req->base);
2995
2996 if ((error != 0) && (error != -EINPROGRESS))
2997 pr_debug(" sep - crypto enqueue failed: %x\n",
2998 error);
2999 error1 = sep_submit_work(ta_ctx->sep_used->workqueue,
3000 sep_dequeuer, (void *)&sep_queue);
3001 if (error1)
3002 pr_debug(" sep - workqueue submit failed: %x\n",
3003 error1);
3004 spin_unlock_irq(&queue_lock);
3005 /* We return result of crypto enqueue */
3006 return error;
3007 }
3008
3009 static int sep_sha224_final(struct ahash_request *req)
3010 {
3011 int error;
3012 int error1;
3013 struct this_task_ctx *ta_ctx = ahash_request_ctx(req);
3014 pr_debug("sep - doing sha224 final\n");
3015
3016 ta_ctx->sep_used = sep_dev;
3017 ta_ctx->current_request = SHA224;
3018 ta_ctx->current_hash_req = req;
3019 ta_ctx->current_cypher_req = NULL;
3020 ta_ctx->hash_opmode = SEP_HASH_SHA224;
3021 ta_ctx->current_hash_stage = HASH_FINISH;
3022
3023 /* lock necessary so that only one entity touches the queues */
3024 spin_lock_irq(&queue_lock);
3025 error = crypto_enqueue_request(&sep_queue, &req->base);
3026
3027 if ((error != 0) && (error != -EINPROGRESS))
3028 pr_debug(" sep - crypto enqueue failed: %x\n",
3029 error);
3030 error1 = sep_submit_work(ta_ctx->sep_used->workqueue,
3031 sep_dequeuer, (void *)&sep_queue);
3032 if (error1)
3033 pr_debug(" sep - workqueue submit failed: %x\n",
3034 error1);
3035 spin_unlock_irq(&queue_lock);
3036 /* We return result of crypto enqueue */
3037 return error;
3038 }
3039
3040 static int sep_sha224_digest(struct ahash_request *req)
3041 {
3042 int error;
3043 int error1;
3044 struct this_task_ctx *ta_ctx = ahash_request_ctx(req);
3045
3046 pr_debug("sep - doing sha224 digest\n");
3047
3048 /* Clear out task context */
3049 memset(ta_ctx, 0, sizeof(struct this_task_ctx));
3050
3051 ta_ctx->sep_used = sep_dev;
3052 ta_ctx->current_request = SHA224;
3053 ta_ctx->current_hash_req = req;
3054 ta_ctx->current_cypher_req = NULL;
3055 ta_ctx->hash_opmode = SEP_HASH_SHA224;
3056 ta_ctx->current_hash_stage = HASH_DIGEST;
3057
3058 /* lock necessary so that only one entity touches the queues */
3059 spin_lock_irq(&queue_lock);
3060 error = crypto_enqueue_request(&sep_queue, &req->base);
3061
3062 if ((error != 0) && (error != -EINPROGRESS))
3063 pr_debug(" sep - crypto enqueue failed: %x\n",
3064 error);
3065 error1 = sep_submit_work(ta_ctx->sep_used->workqueue,
3066 sep_dequeuer, (void *)&sep_queue);
3067 if (error1)
3068 pr_debug(" sep - workqueue submit failed: %x\n",
3069 error1);
3070 spin_unlock_irq(&queue_lock);
3071 /* We return result of crypto enqueue */
3072 return error;
3073 }
3074
3075 static int sep_sha224_finup(struct ahash_request *req)
3076 {
3077 int error;
3078 int error1;
3079 struct this_task_ctx *ta_ctx = ahash_request_ctx(req);
3080
3081 pr_debug("sep - doing sha224 finup\n");
3082
3083 ta_ctx->sep_used = sep_dev;
3084 ta_ctx->current_request = SHA224;
3085 ta_ctx->current_hash_req = req;
3086 ta_ctx->current_cypher_req = NULL;
3087 ta_ctx->hash_opmode = SEP_HASH_SHA224;
3088 ta_ctx->current_hash_stage = HASH_FINUP_DATA;
3089
3090 /* lock necessary so that only one entity touches the queues */
3091 spin_lock_irq(&queue_lock);
3092 error = crypto_enqueue_request(&sep_queue, &req->base);
3093
3094 if ((error != 0) && (error != -EINPROGRESS))
3095 pr_debug(" sep - crypto enqueue failed: %x\n",
3096 error);
3097 error1 = sep_submit_work(ta_ctx->sep_used->workqueue,
3098 sep_dequeuer, (void *)&sep_queue);
3099 if (error1)
3100 pr_debug(" sep - workqueue submit failed: %x\n",
3101 error1);
3102 spin_unlock_irq(&queue_lock);
3103 /* We return result of crypto enqueue */
3104 return error;
3105 }
3106
3107 static int sep_sha256_init(struct ahash_request *req)
3108 {
3109 int error;
3110 int error1;
3111 struct this_task_ctx *ta_ctx = ahash_request_ctx(req);
3112 pr_debug("sep - doing sha256 init\n");
3113
3114 /* Clear out task context */
3115 memset(ta_ctx, 0, sizeof(struct this_task_ctx));
3116
3117 ta_ctx->sep_used = sep_dev;
3118 ta_ctx->current_request = SHA256;
3119 ta_ctx->current_hash_req = req;
3120 ta_ctx->current_cypher_req = NULL;
3121 ta_ctx->hash_opmode = SEP_HASH_SHA256;
3122 ta_ctx->current_hash_stage = HASH_INIT;
3123
3124 /* lock necessary so that only one entity touches the queues */
3125 spin_lock_irq(&queue_lock);
3126 error = crypto_enqueue_request(&sep_queue, &req->base);
3127
3128 if ((error != 0) && (error != -EINPROGRESS))
3129 pr_debug(" sep - crypto enqueue failed: %x\n",
3130 error);
3131 error1 = sep_submit_work(ta_ctx->sep_used->workqueue,
3132 sep_dequeuer, (void *)&sep_queue);
3133 if (error1)
3134 pr_debug(" sep - workqueue submit failed: %x\n",
3135 error1);
3136 spin_unlock_irq(&queue_lock);
3137 /* We return result of crypto enqueue */
3138 return error;
3139 }
3140
3141 static int sep_sha256_update(struct ahash_request *req)
3142 {
3143 int error;
3144 int error1;
3145 struct this_task_ctx *ta_ctx = ahash_request_ctx(req);
3146 pr_debug("sep - doing sha256 update\n");
3147
3148 ta_ctx->sep_used = sep_dev;
3149 ta_ctx->current_request = SHA256;
3150 ta_ctx->current_hash_req = req;
3151 ta_ctx->current_cypher_req = NULL;
3152 ta_ctx->hash_opmode = SEP_HASH_SHA256;
3153 ta_ctx->current_hash_stage = HASH_UPDATE;
3154
3155 /* lock necessary so that only one entity touches the queues */
3156 spin_lock_irq(&queue_lock);
3157 error = crypto_enqueue_request(&sep_queue, &req->base);
3158
3159 if ((error != 0) && (error != -EINPROGRESS))
3160 pr_debug(" sep - crypto enqueue failed: %x\n",
3161 error);
3162 error1 = sep_submit_work(ta_ctx->sep_used->workqueue,
3163 sep_dequeuer, (void *)&sep_queue);
3164 if (error1)
3165 pr_debug(" sep - workqueue submit failed: %x\n",
3166 error1);
3167 spin_unlock_irq(&queue_lock);
3168 /* We return result of crypto enqueue */
3169 return error;
3170 }
3171
3172 static int sep_sha256_final(struct ahash_request *req)
3173 {
3174 int error;
3175 int error1;
3176 struct this_task_ctx *ta_ctx = ahash_request_ctx(req);
3177 pr_debug("sep - doing sha256 final\n");
3178
3179 ta_ctx->sep_used = sep_dev;
3180 ta_ctx->current_request = SHA256;
3181 ta_ctx->current_hash_req = req;
3182 ta_ctx->current_cypher_req = NULL;
3183 ta_ctx->hash_opmode = SEP_HASH_SHA256;
3184 ta_ctx->current_hash_stage = HASH_FINISH;
3185
3186 /* lock necessary so that only one entity touches the queues */
3187 spin_lock_irq(&queue_lock);
3188 error = crypto_enqueue_request(&sep_queue, &req->base);
3189
3190 if ((error != 0) && (error != -EINPROGRESS))
3191 pr_debug(" sep - crypto enqueue failed: %x\n",
3192 error);
3193 error1 = sep_submit_work(ta_ctx->sep_used->workqueue,
3194 sep_dequeuer, (void *)&sep_queue);
3195 if (error1)
3196 pr_debug(" sep - workqueue submit failed: %x\n",
3197 error1);
3198 spin_unlock_irq(&queue_lock);
3199 /* We return result of crypto enqueue */
3200 return error;
3201 }
3202
3203 static int sep_sha256_digest(struct ahash_request *req)
3204 {
3205 int error;
3206 int error1;
3207 struct this_task_ctx *ta_ctx = ahash_request_ctx(req);
3208
3209 pr_debug("sep - doing sha256 digest\n");
3210
3211 /* Clear out task context */
3212 memset(ta_ctx, 0, sizeof(struct this_task_ctx));
3213
3214 ta_ctx->sep_used = sep_dev;
3215 ta_ctx->current_request = SHA256;
3216 ta_ctx->current_hash_req = req;
3217 ta_ctx->current_cypher_req = NULL;
3218 ta_ctx->hash_opmode = SEP_HASH_SHA256;
3219 ta_ctx->current_hash_stage = HASH_DIGEST;
3220
3221 /* lock necessary so that only one entity touches the queues */
3222 spin_lock_irq(&queue_lock);
3223 error = crypto_enqueue_request(&sep_queue, &req->base);
3224
3225 if ((error != 0) && (error != -EINPROGRESS))
3226 pr_debug(" sep - crypto enqueue failed: %x\n",
3227 error);
3228 error1 = sep_submit_work(ta_ctx->sep_used->workqueue,
3229 sep_dequeuer, (void *)&sep_queue);
3230 if (error1)
3231 pr_debug(" sep - workqueue submit failed: %x\n",
3232 error1);
3233 spin_unlock_irq(&queue_lock);
3234 /* We return result of crypto enqueue */
3235 return error;
3236 }
3237
3238 static int sep_sha256_finup(struct ahash_request *req)
3239 {
3240 int error;
3241 int error1;
3242 struct this_task_ctx *ta_ctx = ahash_request_ctx(req);
3243
3244 pr_debug("sep - doing sha256 finup\n");
3245
3246 ta_ctx->sep_used = sep_dev;
3247 ta_ctx->current_request = SHA256;
3248 ta_ctx->current_hash_req = req;
3249 ta_ctx->current_cypher_req = NULL;
3250 ta_ctx->hash_opmode = SEP_HASH_SHA256;
3251 ta_ctx->current_hash_stage = HASH_FINUP_DATA;
3252
3253 /* lock necessary so that only one entity touches the queues */
3254 spin_lock_irq(&queue_lock);
3255 error = crypto_enqueue_request(&sep_queue, &req->base);
3256
3257 if ((error != 0) && (error != -EINPROGRESS))
3258 pr_debug(" sep - crypto enqueue failed: %x\n",
3259 error);
3260 error1 = sep_submit_work(ta_ctx->sep_used->workqueue,
3261 sep_dequeuer, (void *)&sep_queue);
3262 if (error1)
3263 pr_debug(" sep - workqueue submit failed: %x\n",
3264 error1);
3265 spin_unlock_irq(&queue_lock);
3266 /* We return result of crypto enqueue */
3267 return error;
3268 }
3269
3270 static int sep_crypto_init(struct crypto_tfm *tfm)
3271 {
3272 const char *alg_name = crypto_tfm_alg_name(tfm);
3273
3274 if (alg_name == NULL)
3275 pr_debug("sep_crypto_init alg is NULL\n");
3276 else
3277 pr_debug("sep_crypto_init alg is %s\n", alg_name);
3278
3279 tfm->crt_ablkcipher.reqsize = sizeof(struct this_task_ctx);
3280 return 0;
3281 }
3282
3283 static void sep_crypto_exit(struct crypto_tfm *tfm)
3284 {
3285 pr_debug("sep_crypto_exit\n");
3286 }
3287
3288 static int sep_aes_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
3289 unsigned int keylen)
3290 {
3291 struct sep_system_ctx *sctx = crypto_ablkcipher_ctx(tfm);
3292
3293 pr_debug("sep aes setkey\n");
3294
3295 pr_debug("tfm is %p sctx is %p\n", tfm, sctx);
3296 switch (keylen) {
3297 case SEP_AES_KEY_128_SIZE:
3298 sctx->aes_key_size = AES_128;
3299 break;
3300 case SEP_AES_KEY_192_SIZE:
3301 sctx->aes_key_size = AES_192;
3302 break;
3303 case SEP_AES_KEY_256_SIZE:
3304 sctx->aes_key_size = AES_256;
3305 break;
3306 case SEP_AES_KEY_512_SIZE:
3307 sctx->aes_key_size = AES_512;
3308 break;
3309 default:
3310 pr_debug("invalid sep aes key size %x\n",
3311 keylen);
3312 return -EINVAL;
3313 }
3314
3315 memset(&sctx->key.aes, 0, sizeof(u32) *
3316 SEP_AES_MAX_KEY_SIZE_WORDS);
3317 memcpy(&sctx->key.aes, key, keylen);
3318 sctx->keylen = keylen;
3319 /* Indicate to encrypt/decrypt function to send key to SEP */
3320 sctx->key_sent = 0;
3321
3322 return 0;
3323 }
3324
3325 static int sep_aes_ecb_encrypt(struct ablkcipher_request *req)
3326 {
3327 int error;
3328 int error1;
3329 struct this_task_ctx *ta_ctx = ablkcipher_request_ctx(req);
3330
3331 pr_debug("sep - doing aes ecb encrypt\n");
3332
3333 /* Clear out task context */
3334 memset(ta_ctx, 0, sizeof(struct this_task_ctx));
3335
3336 ta_ctx->sep_used = sep_dev;
3337 ta_ctx->current_request = AES_ECB;
3338 ta_ctx->current_hash_req = NULL;
3339 ta_ctx->current_cypher_req = req;
3340 ta_ctx->aes_encmode = SEP_AES_ENCRYPT;
3341 ta_ctx->aes_opmode = SEP_AES_ECB;
3342 ta_ctx->init_opcode = SEP_AES_INIT_OPCODE;
3343 ta_ctx->block_opcode = SEP_AES_BLOCK_OPCODE;
3344
3345 /* lock necessary so that only one entity touches the queues */
3346 spin_lock_irq(&queue_lock);
3347 error = crypto_enqueue_request(&sep_queue, &req->base);
3348
3349 if ((error != 0) && (error != -EINPROGRESS))
3350 pr_debug(" sep - crypto enqueue failed: %x\n",
3351 error);
3352 error1 = sep_submit_work(ta_ctx->sep_used->workqueue,
3353 sep_dequeuer, (void *)&sep_queue);
3354 if (error1)
3355 pr_debug(" sep - workqueue submit failed: %x\n",
3356 error1);
3357 spin_unlock_irq(&queue_lock);
3358 /* We return result of crypto enqueue */
3359 return error;
3360 }
3361
3362 static int sep_aes_ecb_decrypt(struct ablkcipher_request *req)
3363 {
3364 int error;
3365 int error1;
3366 struct this_task_ctx *ta_ctx = ablkcipher_request_ctx(req);
3367
3368 pr_debug("sep - doing aes ecb decrypt\n");
3369
3370 /* Clear out task context */
3371 memset(ta_ctx, 0, sizeof(struct this_task_ctx));
3372
3373 ta_ctx->sep_used = sep_dev;
3374 ta_ctx->current_request = AES_ECB;
3375 ta_ctx->current_hash_req = NULL;
3376 ta_ctx->current_cypher_req = req;
3377 ta_ctx->aes_encmode = SEP_AES_DECRYPT;
3378 ta_ctx->aes_opmode = SEP_AES_ECB;
3379 ta_ctx->init_opcode = SEP_AES_INIT_OPCODE;
3380 ta_ctx->block_opcode = SEP_AES_BLOCK_OPCODE;
3381
3382 /* lock necessary so that only one entity touches the queues */
3383 spin_lock_irq(&queue_lock);
3384 error = crypto_enqueue_request(&sep_queue, &req->base);
3385
3386 if ((error != 0) && (error != -EINPROGRESS))
3387 pr_debug(" sep - crypto enqueue failed: %x\n",
3388 error);
3389 error1 = sep_submit_work(ta_ctx->sep_used->workqueue,
3390 sep_dequeuer, (void *)&sep_queue);
3391 if (error1)
3392 pr_debug(" sep - workqueue submit failed: %x\n",
3393 error1);
3394 spin_unlock_irq(&queue_lock);
3395 /* We return result of crypto enqueue */
3396 return error;
3397 }
3398
3399 static int sep_aes_cbc_encrypt(struct ablkcipher_request *req)
3400 {
3401 int error;
3402 int error1;
3403 struct this_task_ctx *ta_ctx = ablkcipher_request_ctx(req);
3404 struct sep_system_ctx *sctx = crypto_ablkcipher_ctx(
3405 crypto_ablkcipher_reqtfm(req));
3406
3407 pr_debug("sep - doing aes cbc encrypt\n");
3408
3409 /* Clear out task context */
3410 memset(ta_ctx, 0, sizeof(struct this_task_ctx));
3411
3412 pr_debug("tfm is %p sctx is %p and ta_ctx is %p\n",
3413 crypto_ablkcipher_reqtfm(req), sctx, ta_ctx);
3414
3415 ta_ctx->sep_used = sep_dev;
3416 ta_ctx->current_request = AES_CBC;
3417 ta_ctx->current_hash_req = NULL;
3418 ta_ctx->current_cypher_req = req;
3419 ta_ctx->aes_encmode = SEP_AES_ENCRYPT;
3420 ta_ctx->aes_opmode = SEP_AES_CBC;
3421 ta_ctx->init_opcode = SEP_AES_INIT_OPCODE;
3422 ta_ctx->block_opcode = SEP_AES_BLOCK_OPCODE;
3423
3424 /* lock necessary so that only one entity touches the queues */
3425 spin_lock_irq(&queue_lock);
3426 error = crypto_enqueue_request(&sep_queue, &req->base);
3427
3428 if ((error != 0) && (error != -EINPROGRESS))
3429 pr_debug(" sep - crypto enqueue failed: %x\n",
3430 error);
3431 error1 = sep_submit_work(ta_ctx->sep_used->workqueue,
3432 sep_dequeuer, (void *)&sep_queue);
3433 if (error1)
3434 pr_debug(" sep - workqueue submit failed: %x\n",
3435 error1);
3436 spin_unlock_irq(&queue_lock);
3437 /* We return result of crypto enqueue */
3438 return error;
3439 }
3440
3441 static int sep_aes_cbc_decrypt(struct ablkcipher_request *req)
3442 {
3443 int error;
3444 int error1;
3445 struct this_task_ctx *ta_ctx = ablkcipher_request_ctx(req);
3446 struct sep_system_ctx *sctx = crypto_ablkcipher_ctx(
3447 crypto_ablkcipher_reqtfm(req));
3448
3449 pr_debug("sep - doing aes cbc decrypt\n");
3450
3451 pr_debug("tfm is %p sctx is %p and ta_ctx is %p\n",
3452 crypto_ablkcipher_reqtfm(req), sctx, ta_ctx);
3453
3454 /* Clear out task context */
3455 memset(ta_ctx, 0, sizeof(struct this_task_ctx));
3456
3457 ta_ctx->sep_used = sep_dev;
3458 ta_ctx->current_request = AES_CBC;
3459 ta_ctx->current_hash_req = NULL;
3460 ta_ctx->current_cypher_req = req;
3461 ta_ctx->aes_encmode = SEP_AES_DECRYPT;
3462 ta_ctx->aes_opmode = SEP_AES_CBC;
3463 ta_ctx->init_opcode = SEP_AES_INIT_OPCODE;
3464 ta_ctx->block_opcode = SEP_AES_BLOCK_OPCODE;
3465
3466 /* lock necessary so that only one entity touches the queues */
3467 spin_lock_irq(&queue_lock);
3468 error = crypto_enqueue_request(&sep_queue, &req->base);
3469
3470 if ((error != 0) && (error != -EINPROGRESS))
3471 pr_debug(" sep - crypto enqueue failed: %x\n",
3472 error);
3473 error1 = sep_submit_work(ta_ctx->sep_used->workqueue,
3474 sep_dequeuer, (void *)&sep_queue);
3475 if (error1)
3476 pr_debug(" sep - workqueue submit failed: %x\n",
3477 error1);
3478 spin_unlock_irq(&queue_lock);
3479 /* We return result of crypto enqueue */
3480 return error;
3481 }
3482
3483 static int sep_des_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
3484 unsigned int keylen)
3485 {
3486 struct sep_system_ctx *sctx = crypto_ablkcipher_ctx(tfm);
3487 struct crypto_tfm *ctfm = crypto_ablkcipher_tfm(tfm);
3488 u32 *flags = &ctfm->crt_flags;
3489
3490 pr_debug("sep des setkey\n");
3491
3492 switch (keylen) {
3493 case DES_KEY_SIZE:
3494 sctx->des_nbr_keys = DES_KEY_1;
3495 break;
3496 case DES_KEY_SIZE * 2:
3497 sctx->des_nbr_keys = DES_KEY_2;
3498 break;
3499 case DES_KEY_SIZE * 3:
3500 sctx->des_nbr_keys = DES_KEY_3;
3501 break;
3502 default:
3503 pr_debug("invalid key size %x\n",
3504 keylen);
3505 return -EINVAL;
3506 }
3507
3508 if ((*flags & CRYPTO_TFM_REQ_WEAK_KEY) &&
3509 (sep_weak_key(key, keylen))) {
3510
3511 *flags |= CRYPTO_TFM_RES_WEAK_KEY;
3512 pr_debug("weak key\n");
3513 return -EINVAL;
3514 }
3515
3516 memset(&sctx->key.des, 0, sizeof(struct sep_des_key));
3517 memcpy(&sctx->key.des.key1, key, keylen);
3518 sctx->keylen = keylen;
3519 /* Indicate to encrypt/decrypt function to send key to SEP */
3520 sctx->key_sent = 0;
3521
3522 return 0;
3523 }
3524
3525 static int sep_des_ebc_encrypt(struct ablkcipher_request *req)
3526 {
3527 int error;
3528 int error1;
3529 struct this_task_ctx *ta_ctx = ablkcipher_request_ctx(req);
3530
3531 pr_debug("sep - doing des ecb encrypt\n");
3532
3533 /* Clear out task context */
3534 memset(ta_ctx, 0, sizeof(struct this_task_ctx));
3535
3536 ta_ctx->sep_used = sep_dev;
3537 ta_ctx->current_request = DES_ECB;
3538 ta_ctx->current_hash_req = NULL;
3539 ta_ctx->current_cypher_req = req;
3540 ta_ctx->des_encmode = SEP_DES_ENCRYPT;
3541 ta_ctx->des_opmode = SEP_DES_ECB;
3542 ta_ctx->init_opcode = SEP_DES_INIT_OPCODE;
3543 ta_ctx->block_opcode = SEP_DES_BLOCK_OPCODE;
3544
3545 /* lock necessary so that only one entity touches the queues */
3546 spin_lock_irq(&queue_lock);
3547 error = crypto_enqueue_request(&sep_queue, &req->base);
3548
3549 if ((error != 0) && (error != -EINPROGRESS))
3550 pr_debug(" sep - crypto enqueue failed: %x\n",
3551 error);
3552 error1 = sep_submit_work(ta_ctx->sep_used->workqueue,
3553 sep_dequeuer, (void *)&sep_queue);
3554 if (error1)
3555 pr_debug(" sep - workqueue submit failed: %x\n",
3556 error1);
3557 spin_unlock_irq(&queue_lock);
3558 /* We return result of crypto enqueue */
3559 return error;
3560 }
3561
3562 static int sep_des_ebc_decrypt(struct ablkcipher_request *req)
3563 {
3564 int error;
3565 int error1;
3566 struct this_task_ctx *ta_ctx = ablkcipher_request_ctx(req);
3567
3568 pr_debug("sep - doing des ecb decrypt\n");
3569
3570 /* Clear out task context */
3571 memset(ta_ctx, 0, sizeof(struct this_task_ctx));
3572
3573 ta_ctx->sep_used = sep_dev;
3574 ta_ctx->current_request = DES_ECB;
3575 ta_ctx->current_hash_req = NULL;
3576 ta_ctx->current_cypher_req = req;
3577 ta_ctx->des_encmode = SEP_DES_DECRYPT;
3578 ta_ctx->des_opmode = SEP_DES_ECB;
3579 ta_ctx->init_opcode = SEP_DES_INIT_OPCODE;
3580 ta_ctx->block_opcode = SEP_DES_BLOCK_OPCODE;
3581
3582 /* lock necessary so that only one entity touches the queues */
3583 spin_lock_irq(&queue_lock);
3584 error = crypto_enqueue_request(&sep_queue, &req->base);
3585
3586 if ((error != 0) && (error != -EINPROGRESS))
3587 pr_debug(" sep - crypto enqueue failed: %x\n",
3588 error);
3589 error1 = sep_submit_work(ta_ctx->sep_used->workqueue,
3590 sep_dequeuer, (void *)&sep_queue);
3591 if (error1)
3592 pr_debug(" sep - workqueue submit failed: %x\n",
3593 error1);
3594 spin_unlock_irq(&queue_lock);
3595 /* We return result of crypto enqueue */
3596 return error;
3597 }
3598
3599 static int sep_des_cbc_encrypt(struct ablkcipher_request *req)
3600 {
3601 int error;
3602 int error1;
3603 struct this_task_ctx *ta_ctx = ablkcipher_request_ctx(req);
3604
3605 pr_debug("sep - doing des cbc encrypt\n");
3606
3607 /* Clear out task context */
3608 memset(ta_ctx, 0, sizeof(struct this_task_ctx));
3609
3610 ta_ctx->sep_used = sep_dev;
3611 ta_ctx->current_request = DES_CBC;
3612 ta_ctx->current_hash_req = NULL;
3613 ta_ctx->current_cypher_req = req;
3614 ta_ctx->des_encmode = SEP_DES_ENCRYPT;
3615 ta_ctx->des_opmode = SEP_DES_CBC;
3616 ta_ctx->init_opcode = SEP_DES_INIT_OPCODE;
3617 ta_ctx->block_opcode = SEP_DES_BLOCK_OPCODE;
3618
3619 /* lock necessary so that only one entity touches the queues */
3620 spin_lock_irq(&queue_lock);
3621 error = crypto_enqueue_request(&sep_queue, &req->base);
3622
3623 if ((error != 0) && (error != -EINPROGRESS))
3624 pr_debug(" sep - crypto enqueue failed: %x\n",
3625 error);
3626 error1 = sep_submit_work(ta_ctx->sep_used->workqueue,
3627 sep_dequeuer, (void *)&sep_queue);
3628 if (error1)
3629 pr_debug(" sep - workqueue submit failed: %x\n",
3630 error1);
3631 spin_unlock_irq(&queue_lock);
3632 /* We return result of crypto enqueue */
3633 return error;
3634 }
3635
3636 static int sep_des_cbc_decrypt(struct ablkcipher_request *req)
3637 {
3638 int error;
3639 int error1;
3640 struct this_task_ctx *ta_ctx = ablkcipher_request_ctx(req);
3641
3642 pr_debug("sep - doing des ecb decrypt\n");
3643
3644 /* Clear out task context */
3645 memset(ta_ctx, 0, sizeof(struct this_task_ctx));
3646
3647 ta_ctx->sep_used = sep_dev;
3648 ta_ctx->current_request = DES_CBC;
3649 ta_ctx->current_hash_req = NULL;
3650 ta_ctx->current_cypher_req = req;
3651 ta_ctx->des_encmode = SEP_DES_DECRYPT;
3652 ta_ctx->des_opmode = SEP_DES_CBC;
3653 ta_ctx->init_opcode = SEP_DES_INIT_OPCODE;
3654 ta_ctx->block_opcode = SEP_DES_BLOCK_OPCODE;
3655
3656 /* lock necessary so that only one entity touches the queues */
3657 spin_lock_irq(&queue_lock);
3658 error = crypto_enqueue_request(&sep_queue, &req->base);
3659
3660 if ((error != 0) && (error != -EINPROGRESS))
3661 pr_debug(" sep - crypto enqueue failed: %x\n",
3662 error);
3663 error1 = sep_submit_work(ta_ctx->sep_used->workqueue,
3664 sep_dequeuer, (void *)&sep_queue);
3665 if (error1)
3666 pr_debug(" sep - workqueue submit failed: %x\n",
3667 error1);
3668 spin_unlock_irq(&queue_lock);
3669 /* We return result of crypto enqueue */
3670 return error;
3671 }
3672
3673 static struct ahash_alg hash_algs[] = {
3674 {
3675 .init = sep_sha1_init,
3676 .update = sep_sha1_update,
3677 .final = sep_sha1_final,
3678 .digest = sep_sha1_digest,
3679 .finup = sep_sha1_finup,
3680 .halg = {
3681 .digestsize = SHA1_DIGEST_SIZE,
3682 .base = {
3683 .cra_name = "sha1",
3684 .cra_driver_name = "sha1-sep",
3685 .cra_priority = 100,
3686 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
3687 CRYPTO_ALG_ASYNC,
3688 .cra_blocksize = SHA1_BLOCK_SIZE,
3689 .cra_ctxsize = sizeof(struct sep_system_ctx),
3690 .cra_alignmask = 0,
3691 .cra_module = THIS_MODULE,
3692 .cra_init = sep_hash_cra_init,
3693 .cra_exit = sep_hash_cra_exit,
3694 }
3695 }
3696 },
3697 {
3698 .init = sep_md5_init,
3699 .update = sep_md5_update,
3700 .final = sep_md5_final,
3701 .digest = sep_md5_digest,
3702 .finup = sep_md5_finup,
3703 .halg = {
3704 .digestsize = MD5_DIGEST_SIZE,
3705 .base = {
3706 .cra_name = "md5",
3707 .cra_driver_name = "md5-sep",
3708 .cra_priority = 100,
3709 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
3710 CRYPTO_ALG_ASYNC,
3711 .cra_blocksize = SHA1_BLOCK_SIZE,
3712 .cra_ctxsize = sizeof(struct sep_system_ctx),
3713 .cra_alignmask = 0,
3714 .cra_module = THIS_MODULE,
3715 .cra_init = sep_hash_cra_init,
3716 .cra_exit = sep_hash_cra_exit,
3717 }
3718 }
3719 },
3720 {
3721 .init = sep_sha224_init,
3722 .update = sep_sha224_update,
3723 .final = sep_sha224_final,
3724 .digest = sep_sha224_digest,
3725 .finup = sep_sha224_finup,
3726 .halg = {
3727 .digestsize = SHA224_DIGEST_SIZE,
3728 .base = {
3729 .cra_name = "sha224",
3730 .cra_driver_name = "sha224-sep",
3731 .cra_priority = 100,
3732 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
3733 CRYPTO_ALG_ASYNC,
3734 .cra_blocksize = SHA224_BLOCK_SIZE,
3735 .cra_ctxsize = sizeof(struct sep_system_ctx),
3736 .cra_alignmask = 0,
3737 .cra_module = THIS_MODULE,
3738 .cra_init = sep_hash_cra_init,
3739 .cra_exit = sep_hash_cra_exit,
3740 }
3741 }
3742 },
3743 {
3744 .init = sep_sha256_init,
3745 .update = sep_sha256_update,
3746 .final = sep_sha256_final,
3747 .digest = sep_sha256_digest,
3748 .finup = sep_sha256_finup,
3749 .halg = {
3750 .digestsize = SHA256_DIGEST_SIZE,
3751 .base = {
3752 .cra_name = "sha256",
3753 .cra_driver_name = "sha256-sep",
3754 .cra_priority = 100,
3755 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
3756 CRYPTO_ALG_ASYNC,
3757 .cra_blocksize = SHA256_BLOCK_SIZE,
3758 .cra_ctxsize = sizeof(struct sep_system_ctx),
3759 .cra_alignmask = 0,
3760 .cra_module = THIS_MODULE,
3761 .cra_init = sep_hash_cra_init,
3762 .cra_exit = sep_hash_cra_exit,
3763 }
3764 }
3765 }
3766 };
3767
3768 static struct crypto_alg crypto_algs[] = {
3769 {
3770 .cra_name = "ecb(aes)",
3771 .cra_driver_name = "ecb-aes-sep",
3772 .cra_priority = 100,
3773 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
3774 .cra_blocksize = AES_BLOCK_SIZE,
3775 .cra_ctxsize = sizeof(struct sep_system_ctx),
3776 .cra_alignmask = 0,
3777 .cra_type = &crypto_ablkcipher_type,
3778 .cra_module = THIS_MODULE,
3779 .cra_init = sep_crypto_init,
3780 .cra_exit = sep_crypto_exit,
3781 .cra_u.ablkcipher = {
3782 .min_keysize = AES_MIN_KEY_SIZE,
3783 .max_keysize = AES_MAX_KEY_SIZE,
3784 .setkey = sep_aes_setkey,
3785 .encrypt = sep_aes_ecb_encrypt,
3786 .decrypt = sep_aes_ecb_decrypt,
3787 }
3788 },
3789 {
3790 .cra_name = "cbc(aes)",
3791 .cra_driver_name = "cbc-aes-sep",
3792 .cra_priority = 100,
3793 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
3794 .cra_blocksize = AES_BLOCK_SIZE,
3795 .cra_ctxsize = sizeof(struct sep_system_ctx),
3796 .cra_alignmask = 0,
3797 .cra_type = &crypto_ablkcipher_type,
3798 .cra_module = THIS_MODULE,
3799 .cra_init = sep_crypto_init,
3800 .cra_exit = sep_crypto_exit,
3801 .cra_u.ablkcipher = {
3802 .min_keysize = AES_MIN_KEY_SIZE,
3803 .max_keysize = AES_MAX_KEY_SIZE,
3804 .setkey = sep_aes_setkey,
3805 .encrypt = sep_aes_cbc_encrypt,
3806 .ivsize = AES_BLOCK_SIZE,
3807 .decrypt = sep_aes_cbc_decrypt,
3808 }
3809 },
3810 {
3811 .cra_name = "ebc(des)",
3812 .cra_driver_name = "ebc-des-sep",
3813 .cra_priority = 100,
3814 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
3815 .cra_blocksize = DES_BLOCK_SIZE,
3816 .cra_ctxsize = sizeof(struct sep_system_ctx),
3817 .cra_alignmask = 0,
3818 .cra_type = &crypto_ablkcipher_type,
3819 .cra_module = THIS_MODULE,
3820 .cra_init = sep_crypto_init,
3821 .cra_exit = sep_crypto_exit,
3822 .cra_u.ablkcipher = {
3823 .min_keysize = DES_KEY_SIZE,
3824 .max_keysize = DES_KEY_SIZE,
3825 .setkey = sep_des_setkey,
3826 .encrypt = sep_des_ebc_encrypt,
3827 .decrypt = sep_des_ebc_decrypt,
3828 }
3829 },
3830 {
3831 .cra_name = "cbc(des)",
3832 .cra_driver_name = "cbc-des-sep",
3833 .cra_priority = 100,
3834 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
3835 .cra_blocksize = DES_BLOCK_SIZE,
3836 .cra_ctxsize = sizeof(struct sep_system_ctx),
3837 .cra_alignmask = 0,
3838 .cra_type = &crypto_ablkcipher_type,
3839 .cra_module = THIS_MODULE,
3840 .cra_init = sep_crypto_init,
3841 .cra_exit = sep_crypto_exit,
3842 .cra_u.ablkcipher = {
3843 .min_keysize = DES_KEY_SIZE,
3844 .max_keysize = DES_KEY_SIZE,
3845 .setkey = sep_des_setkey,
3846 .encrypt = sep_des_cbc_encrypt,
3847 .ivsize = DES_BLOCK_SIZE,
3848 .decrypt = sep_des_cbc_decrypt,
3849 }
3850 },
3851 {
3852 .cra_name = "ebc(des3-ede)",
3853 .cra_driver_name = "ebc-des3-ede-sep",
3854 .cra_priority = 100,
3855 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
3856 .cra_blocksize = DES_BLOCK_SIZE,
3857 .cra_ctxsize = sizeof(struct sep_system_ctx),
3858 .cra_alignmask = 0,
3859 .cra_type = &crypto_ablkcipher_type,
3860 .cra_module = THIS_MODULE,
3861 .cra_init = sep_crypto_init,
3862 .cra_exit = sep_crypto_exit,
3863 .cra_u.ablkcipher = {
3864 .min_keysize = DES3_EDE_KEY_SIZE,
3865 .max_keysize = DES3_EDE_KEY_SIZE,
3866 .setkey = sep_des_setkey,
3867 .encrypt = sep_des_ebc_encrypt,
3868 .decrypt = sep_des_ebc_decrypt,
3869 }
3870 },
3871 {
3872 .cra_name = "cbc(des3-ede)",
3873 .cra_driver_name = "cbc-des3--ede-sep",
3874 .cra_priority = 100,
3875 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
3876 .cra_blocksize = DES_BLOCK_SIZE,
3877 .cra_ctxsize = sizeof(struct sep_system_ctx),
3878 .cra_alignmask = 0,
3879 .cra_type = &crypto_ablkcipher_type,
3880 .cra_module = THIS_MODULE,
3881 .cra_init = sep_crypto_init,
3882 .cra_exit = sep_crypto_exit,
3883 .cra_u.ablkcipher = {
3884 .min_keysize = DES3_EDE_KEY_SIZE,
3885 .max_keysize = DES3_EDE_KEY_SIZE,
3886 .setkey = sep_des_setkey,
3887 .encrypt = sep_des_cbc_encrypt,
3888 .decrypt = sep_des_cbc_decrypt,
3889 }
3890 }
3891 };
3892
3893 int sep_crypto_setup(void)
3894 {
3895 int err, i, j, k;
3896 tasklet_init(&sep_dev->finish_tasklet, sep_finish,
3897 (unsigned long)sep_dev);
3898
3899 crypto_init_queue(&sep_queue, SEP_QUEUE_LENGTH);
3900
3901 sep_dev->workqueue = create_singlethread_workqueue(
3902 "sep_crypto_workqueue");
3903 if (!sep_dev->workqueue) {
3904 dev_warn(&sep_dev->pdev->dev, "cant create workqueue\n");
3905 return -ENOMEM;
3906 }
3907
3908 spin_lock_init(&queue_lock);
3909
3910 err = 0;
3911 for (i = 0; i < ARRAY_SIZE(hash_algs); i++) {
3912 err = crypto_register_ahash(&hash_algs[i]);
3913 if (err)
3914 goto err_algs;
3915 }
3916
3917 err = 0;
3918 for (j = 0; j < ARRAY_SIZE(crypto_algs); j++) {
3919 err = crypto_register_alg(&crypto_algs[j]);
3920 if (err)
3921 goto err_crypto_algs;
3922 }
3923
3924 return err;
3925
3926 err_algs:
3927 for (k = 0; k < i; k++)
3928 crypto_unregister_ahash(&hash_algs[k]);
3929 return err;
3930
3931 err_crypto_algs:
3932 for (k = 0; k < j; k++)
3933 crypto_unregister_alg(&crypto_algs[k]);
3934 goto err_algs;
3935 }
3936
3937 void sep_crypto_takedown(void)
3938 {
3939
3940 int i;
3941
3942 for (i = 0; i < ARRAY_SIZE(hash_algs); i++)
3943 crypto_unregister_ahash(&hash_algs[i]);
3944 for (i = 0; i < ARRAY_SIZE(crypto_algs); i++)
3945 crypto_unregister_alg(&crypto_algs[i]);
3946
3947 tasklet_kill(&sep_dev->finish_tasklet);
3948 }
3949
3950 #endif
This page took 0.166378 seconds and 5 git commands to generate.