Staging: sep: fix coding style issues
[deliverable/linux.git] / drivers / staging / sep / sep_crypto.c
1 /*
2 *
3 * sep_crypto.c - Crypto interface structures
4 *
5 * Copyright(c) 2009-2011 Intel Corporation. All rights reserved.
6 * Contributions(c) 2009-2010 Discretix. All rights reserved.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License as published by the Free
10 * Software Foundation; version 2 of the License.
11 *
12 * This program is distributed in the hope that it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * more details.
16 *
17 * You should have received a copy of the GNU General Public License along with
18 * this program; if not, write to the Free Software Foundation, Inc., 59
19 * Temple Place - Suite 330, Boston, MA 02111-1307, USA.
20 *
21 * CONTACTS:
22 *
23 * Mark Allyn mark.a.allyn@intel.com
24 * Jayant Mangalampalli jayant.mangalampalli@intel.com
25 *
26 * CHANGES:
27 *
28 * 2009.06.26 Initial publish
29 * 2010.09.14 Upgrade to Medfield
30 * 2011.02.22 Enable Kernel Crypto
31 *
32 */
33
34 /* #define DEBUG */
35 #include <linux/init.h>
36 #include <linux/module.h>
37 #include <linux/miscdevice.h>
38 #include <linux/fs.h>
39 #include <linux/cdev.h>
40 #include <linux/kdev_t.h>
41 #include <linux/mutex.h>
42 #include <linux/sched.h>
43 #include <linux/mm.h>
44 #include <linux/poll.h>
45 #include <linux/wait.h>
46 #include <linux/pci.h>
47 #include <linux/pci.h>
48 #include <linux/pm_runtime.h>
49 #include <linux/err.h>
50 #include <linux/device.h>
51 #include <linux/errno.h>
52 #include <linux/interrupt.h>
53 #include <linux/kernel.h>
54 #include <linux/clk.h>
55 #include <linux/irq.h>
56 #include <linux/io.h>
57 #include <linux/platform_device.h>
58 #include <linux/list.h>
59 #include <linux/dma-mapping.h>
60 #include <linux/delay.h>
61 #include <linux/jiffies.h>
62 #include <linux/workqueue.h>
63 #include <linux/crypto.h>
64 #include <crypto/internal/hash.h>
65 #include <crypto/scatterwalk.h>
66 #include <crypto/sha.h>
67 #include <crypto/md5.h>
68 #include <crypto/aes.h>
69 #include <crypto/des.h>
70 #include <crypto/hash.h>
71 #include "sep_driver_hw_defs.h"
72 #include "sep_driver_config.h"
73 #include "sep_driver_api.h"
74 #include "sep_dev.h"
75 #include "sep_crypto.h"
76
77 #if defined(CONFIG_CRYPTO) || defined(CONFIG_CRYPTO_MODULE)
78
79 /* Globals for queuing */
80 static spinlock_t queue_lock;
81 static struct crypto_queue sep_queue;
82
83 /* Declare of dequeuer */
84 static void sep_dequeuer(void *data);
85
86 /* TESTING */
87 /**
88 * crypto_sep_dump_message - dump the message that is pending
89 * @sep: SEP device
90 * This will only print dump if DEBUG is set; it does
91 * follow kernel debug print enabling
92 */
93 static void crypto_sep_dump_message(struct sep_device *sep, void *msg)
94 {
95 #if 0
96 u32 *p;
97 u32 *i;
98 int count;
99
100 p = sep->shared_addr;
101 i = (u32 *)msg;
102 for (count = 0; count < 10 * 4; count += 4)
103 dev_dbg(&sep->pdev->dev,
104 "[PID%d] Word %d of the message is %x (local)%x\n",
105 current->pid, count/4, *p++, *i++);
106 #endif
107 }
108
109 /**
110 * sep_do_callback
111 * @work: pointer to work_struct
112 * This is what is called by the queue; it is generic so that it
113 * can be used by any type of operation as each different callback
114 * function can use the data parameter in its own way
115 */
116 static void sep_do_callback(struct work_struct *work)
117 {
118 struct sep_work_struct *sep_work = container_of(work,
119 struct sep_work_struct, work);
120 if (sep_work != NULL) {
121 (sep_work->callback)(sep_work->data);
122 kfree(sep_work);
123 } else {
124 pr_debug("sep crypto: do callback - NULL container\n");
125 }
126 }
127
128 /**
129 * sep_submit_work
130 * @work_queue: pointer to struct_workqueue
131 * @funct: pointer to function to execute
132 * @data: pointer to data; function will know
133 * how to use it
134 * This is a generic API to submit something to
135 * the queue. The callback function will depend
136 * on what operation is to be done
137 */
138 static int sep_submit_work(struct workqueue_struct *work_queue,
139 void(*funct)(void *),
140 void *data)
141 {
142 struct sep_work_struct *sep_work;
143 int result;
144
145 sep_work = kmalloc(sizeof(struct sep_work_struct), GFP_ATOMIC);
146
147 if (sep_work == NULL) {
148 pr_debug("sep crypto: cant allocate work structure\n");
149 return -ENOMEM;
150 }
151
152 sep_work->callback = funct;
153 sep_work->data = data;
154 INIT_WORK(&sep_work->work, sep_do_callback);
155 result = queue_work(work_queue, &sep_work->work);
156 if (!result) {
157 pr_debug("sep_crypto: queue_work failed\n");
158 return -EINVAL;
159 }
160 return 0;
161 }
162
163 /**
164 * sep_alloc_sg_buf -
165 * @sep: pointer to struct sep_device
166 * @size: total size of area
167 * @block_size: minimum size of chunks
168 * each page is minimum or modulo this size
169 * @returns: pointer to struct scatterlist for new
170 * buffer
171 **/
172 static struct scatterlist *sep_alloc_sg_buf(
173 struct sep_device *sep,
174 size_t size,
175 size_t block_size)
176 {
177 u32 nbr_pages;
178 u32 ct1;
179 void *buf;
180 size_t current_size;
181 size_t real_page_size;
182
183 struct scatterlist *sg, *sg_temp;
184
185 if (size == 0)
186 return NULL;
187
188 dev_dbg(&sep->pdev->dev, "sep alloc sg buf\n");
189
190 current_size = 0;
191 nbr_pages = 0;
192 real_page_size = PAGE_SIZE - (PAGE_SIZE % block_size);
193 /**
194 * The size of each page must be modulo of the operation
195 * block size; increment by the modified page size until
196 * the total size is reached, then you have the number of
197 * pages
198 */
199 while (current_size < size) {
200 current_size += real_page_size;
201 nbr_pages += 1;
202 }
203
204 sg = kmalloc((sizeof(struct scatterlist) * nbr_pages), GFP_ATOMIC);
205 if (!sg) {
206 dev_warn(&sep->pdev->dev, "Cannot allocate page for new sg\n");
207 return NULL;
208 }
209
210 sg_init_table(sg, nbr_pages);
211
212 current_size = 0;
213 sg_temp = sg;
214 for (ct1 = 0; ct1 < nbr_pages; ct1 += 1) {
215 buf = (void *)get_zeroed_page(GFP_ATOMIC);
216 if (!buf) {
217 dev_warn(&sep->pdev->dev,
218 "Cannot allocate page for new buffer\n");
219 kfree(sg);
220 return NULL;
221 }
222
223 sg_set_buf(sg_temp, buf, real_page_size);
224 if ((size - current_size) > real_page_size) {
225 sg_temp->length = real_page_size;
226 current_size += real_page_size;
227 } else {
228 sg_temp->length = (size - current_size);
229 current_size = size;
230 }
231 sg_temp = sg_next(sg);
232 }
233 return sg;
234 }
235
236 /**
237 * sep_free_sg_buf -
238 * @sg: pointer to struct scatterlist; points to area to free
239 */
240 static void sep_free_sg_buf(struct scatterlist *sg)
241 {
242 struct scatterlist *sg_temp = sg;
243 while (sg_temp) {
244 free_page((unsigned long)sg_virt(sg_temp));
245 sg_temp = sg_next(sg_temp);
246 }
247 kfree(sg);
248 }
249
250 /**
251 * sep_copy_sg -
252 * @sep: pointer to struct sep_device
253 * @sg_src: pointer to struct scatterlist for source
254 * @sg_dst: pointer to struct scatterlist for destination
255 * @size: size (in bytes) of data to copy
256 *
257 * Copy data from one scatterlist to another; both must
258 * be the same size
259 */
260 static void sep_copy_sg(
261 struct sep_device *sep,
262 struct scatterlist *sg_src,
263 struct scatterlist *sg_dst,
264 size_t size)
265 {
266 u32 seg_size;
267 u32 in_offset, out_offset;
268
269 u32 count = 0;
270 struct scatterlist *sg_src_tmp = sg_src;
271 struct scatterlist *sg_dst_tmp = sg_dst;
272 in_offset = 0;
273 out_offset = 0;
274
275 dev_dbg(&sep->pdev->dev, "sep copy sg\n");
276
277 if ((sg_src == NULL) || (sg_dst == NULL) || (size == 0))
278 return;
279
280 dev_dbg(&sep->pdev->dev, "sep copy sg not null\n");
281
282 while (count < size) {
283 if ((sg_src_tmp->length - in_offset) >
284 (sg_dst_tmp->length - out_offset))
285 seg_size = sg_dst_tmp->length - out_offset;
286 else
287 seg_size = sg_src_tmp->length - in_offset;
288
289 if (seg_size > (size - count))
290 seg_size = (size = count);
291
292 memcpy(sg_virt(sg_dst_tmp) + out_offset,
293 sg_virt(sg_src_tmp) + in_offset,
294 seg_size);
295
296 in_offset += seg_size;
297 out_offset += seg_size;
298 count += seg_size;
299
300 if (in_offset >= sg_src_tmp->length) {
301 sg_src_tmp = sg_next(sg_src_tmp);
302 in_offset = 0;
303 }
304
305 if (out_offset >= sg_dst_tmp->length) {
306 sg_dst_tmp = sg_next(sg_dst_tmp);
307 out_offset = 0;
308 }
309 }
310 }
311
312 /**
313 * sep_oddball_pages -
314 * @sep: pointer to struct sep_device
315 * @sg: pointer to struct scatterlist - buffer to check
316 * @size: total data size
317 * @blocksize: minimum block size; must be multiples of this size
318 * @to_copy: 1 means do copy, 0 means do not copy
319 * @new_sg: pointer to location to put pointer to new sg area
320 * @returns: 1 if new scatterlist is needed; 0 if not needed;
321 * error value if operation failed
322 *
323 * The SEP device requires all pages to be multiples of the
324 * minimum block size appropriate for the operation
325 * This function check all pages; if any are oddball sizes
326 * (not multiple of block sizes), it creates a new scatterlist.
327 * If the to_copy parameter is set to 1, then a scatter list
328 * copy is performed. The pointer to the new scatterlist is
329 * put into the address supplied by the new_sg parameter; if
330 * no new scatterlist is needed, then a NULL is put into
331 * the location at new_sg.
332 *
333 */
334 static int sep_oddball_pages(
335 struct sep_device *sep,
336 struct scatterlist *sg,
337 size_t data_size,
338 u32 block_size,
339 struct scatterlist **new_sg,
340 u32 do_copy)
341 {
342 struct scatterlist *sg_temp;
343 u32 flag;
344 u32 nbr_pages, page_count;
345
346 dev_dbg(&sep->pdev->dev, "sep oddball\n");
347 if ((sg == NULL) || (data_size == 0) || (data_size < block_size))
348 return 0;
349
350 dev_dbg(&sep->pdev->dev, "sep oddball not null\n");
351 flag = 0;
352 nbr_pages = 0;
353 page_count = 0;
354 sg_temp = sg;
355
356 while (sg_temp) {
357 nbr_pages += 1;
358 sg_temp = sg_next(sg_temp);
359 }
360
361 sg_temp = sg;
362 while ((sg_temp) && (flag == 0)) {
363 page_count += 1;
364 if (sg_temp->length % block_size)
365 flag = 1;
366 else
367 sg_temp = sg_next(sg_temp);
368 }
369
370 /* Do not process if last (or only) page is oddball */
371 if (nbr_pages == page_count)
372 flag = 0;
373
374 if (flag) {
375 dev_dbg(&sep->pdev->dev, "sep oddball processing\n");
376 *new_sg = sep_alloc_sg_buf(sep, data_size, block_size);
377 if (*new_sg == NULL) {
378 dev_warn(&sep->pdev->dev, "cannot allocate new sg\n");
379 return -ENOMEM;
380 }
381
382 if (do_copy)
383 sep_copy_sg(sep, sg, *new_sg, data_size);
384
385 return 1;
386 } else {
387 return 0;
388 }
389 }
390
391 /**
392 * sep_copy_offset_sg -
393 * @sep: pointer to struct sep_device;
394 * @sg: pointer to struct scatterlist
395 * @offset: offset into scatterlist memory
396 * @dst: place to put data
397 * @len: length of data
398 * @returns: number of bytes copies
399 *
400 * This copies data from scatterlist buffer
401 * offset from beginning - it is needed for
402 * handling tail data in hash
403 */
404 static size_t sep_copy_offset_sg(
405 struct sep_device *sep,
406 struct scatterlist *sg,
407 u32 offset,
408 void *dst,
409 u32 len)
410 {
411 size_t page_start;
412 size_t page_end;
413 size_t offset_within_page;
414 size_t length_within_page;
415 size_t length_remaining;
416 size_t current_offset;
417
418 /* Find which page is beginning of segment */
419 page_start = 0;
420 page_end = sg->length;
421 while ((sg) && (offset > page_end)) {
422 page_start += sg->length;
423 sg = sg_next(sg);
424 if (sg)
425 page_end += sg->length;
426 }
427
428 if (sg == NULL)
429 return -ENOMEM;
430
431 offset_within_page = offset - page_start;
432 if ((sg->length - offset_within_page) >= len) {
433 /* All within this page */
434 memcpy(dst, sg_virt(sg) + offset_within_page, len);
435 return len;
436 } else {
437 /* Scattered multiple pages */
438 current_offset = 0;
439 length_remaining = len;
440 while ((sg) && (current_offset < len)) {
441 length_within_page = sg->length - offset_within_page;
442 if (length_within_page >= length_remaining) {
443 memcpy(dst+current_offset,
444 sg_virt(sg) + offset_within_page,
445 length_remaining);
446 length_remaining = 0;
447 current_offset = len;
448 } else {
449 memcpy(dst+current_offset,
450 sg_virt(sg) + offset_within_page,
451 length_within_page);
452 length_remaining -= length_within_page;
453 current_offset += length_within_page;
454 offset_within_page = 0;
455 sg = sg_next(sg);
456 }
457 }
458
459 if (sg == NULL)
460 return -ENOMEM;
461 }
462 return len;
463 }
464
465 /**
466 * partial_overlap -
467 * @src_ptr: source pointer
468 * @dst_ptr: destination pointer
469 * @nbytes: number of bytes
470 * @returns: 0 for success; -1 for failure
471 * We cannot have any partial overlap. Total overlap
472 * where src is the same as dst is okay
473 */
474 static int partial_overlap(void *src_ptr, void *dst_ptr, u32 nbytes)
475 {
476 /* Check for partial overlap */
477 if (src_ptr != dst_ptr) {
478 if (src_ptr < dst_ptr) {
479 if ((src_ptr + nbytes) > dst_ptr)
480 return -EINVAL;
481 } else {
482 if ((dst_ptr + nbytes) > src_ptr)
483 return -EINVAL;
484 }
485 }
486
487 return 0;
488 }
489
490 /* Debug - prints only if DEBUG is defined; follows kernel debug model */
491 static void sep_dump(struct sep_device *sep, char *stg, void *start, int len)
492 {
493 #if 0
494 int ct1;
495 u8 *ptt;
496
497 dev_dbg(&sep->pdev->dev,
498 "Dump of %s starting at %08lx for %08x bytes\n",
499 stg, (unsigned long)start, len);
500 for (ct1 = 0; ct1 < len; ct1 += 1) {
501 ptt = (u8 *)(start + ct1);
502 dev_dbg(&sep->pdev->dev, "%02x ", *ptt);
503 if (ct1 % 16 == 15)
504 dev_dbg(&sep->pdev->dev, "\n");
505 }
506 dev_dbg(&sep->pdev->dev, "\n");
507 #endif
508 }
509
510 /* Debug - prints only if DEBUG is defined; follows kernel debug model */
511 static void sep_dump_sg(struct sep_device *sep, char *stg,
512 struct scatterlist *sg)
513 {
514 #if 0
515 int ct1, ct2;
516 u8 *ptt;
517
518 dev_dbg(&sep->pdev->dev, "Dump of scatterlist %s\n", stg);
519
520 ct1 = 0;
521 while (sg) {
522 dev_dbg(&sep->pdev->dev, "page %x\n size %x", ct1,
523 sg->length);
524 dev_dbg(&sep->pdev->dev, "phys addr is %lx",
525 (unsigned long)sg_phys(sg));
526 ptt = sg_virt(sg);
527 for (ct2 = 0; ct2 < sg->length; ct2 += 1) {
528 dev_dbg(&sep->pdev->dev, "byte %x is %02x\n",
529 ct2, (unsigned char)*(ptt + ct2));
530 }
531
532 ct1 += 1;
533 sg = sg_next(sg);
534 }
535 dev_dbg(&sep->pdev->dev, "\n");
536 #endif
537 }
538
539 /* Debug - prints only if DEBUG is defined */
540 static void sep_dump_ivs(struct ablkcipher_request *req, char *reason)
541
542 {
543 unsigned char *cptr;
544 struct sep_aes_internal_context *aes_internal;
545 struct sep_des_internal_context *des_internal;
546 int ct1;
547
548 struct this_task_ctx *ta_ctx;
549 struct crypto_ablkcipher *tfm;
550 struct sep_system_ctx *sctx;
551
552 ta_ctx = ablkcipher_request_ctx(req);
553 tfm = crypto_ablkcipher_reqtfm(req);
554 sctx = crypto_ablkcipher_ctx(tfm);
555
556 dev_dbg(&ta_ctx->sep_used->pdev->dev, "IV DUMP - %s\n", reason);
557 if ((ta_ctx->current_request == DES_CBC) &&
558 (ta_ctx->des_opmode == SEP_DES_CBC)) {
559
560 des_internal = (struct sep_des_internal_context *)
561 sctx->des_private_ctx.ctx_buf;
562 /* print vendor */
563 dev_dbg(&ta_ctx->sep_used->pdev->dev,
564 "sep - vendor iv for DES\n");
565 cptr = (unsigned char *)des_internal->iv_context;
566 for (ct1 = 0; ct1 < crypto_ablkcipher_ivsize(tfm); ct1 += 1)
567 dev_dbg(&ta_ctx->sep_used->pdev->dev,
568 "%02x\n", *(cptr + ct1));
569
570 /* print walk */
571 dev_dbg(&ta_ctx->sep_used->pdev->dev,
572 "sep - walk from kernel crypto iv for DES\n");
573 cptr = (unsigned char *)ta_ctx->walk.iv;
574 for (ct1 = 0; ct1 < crypto_ablkcipher_ivsize(tfm); ct1 += 1)
575 dev_dbg(&ta_ctx->sep_used->pdev->dev,
576 "%02x\n", *(cptr + ct1));
577 } else if ((ta_ctx->current_request == AES_CBC) &&
578 (ta_ctx->aes_opmode == SEP_AES_CBC)) {
579
580 aes_internal = (struct sep_aes_internal_context *)
581 sctx->aes_private_ctx.cbuff;
582 /* print vendor */
583 dev_dbg(&ta_ctx->sep_used->pdev->dev,
584 "sep - vendor iv for AES\n");
585 cptr = (unsigned char *)aes_internal->aes_ctx_iv;
586 for (ct1 = 0; ct1 < crypto_ablkcipher_ivsize(tfm); ct1 += 1)
587 dev_dbg(&ta_ctx->sep_used->pdev->dev,
588 "%02x\n", *(cptr + ct1));
589
590 /* print walk */
591 dev_dbg(&ta_ctx->sep_used->pdev->dev,
592 "sep - walk from kernel crypto iv for AES\n");
593 cptr = (unsigned char *)ta_ctx->walk.iv;
594 for (ct1 = 0; ct1 < crypto_ablkcipher_ivsize(tfm); ct1 += 1)
595 dev_dbg(&ta_ctx->sep_used->pdev->dev,
596 "%02x\n", *(cptr + ct1));
597 }
598 }
599
600 /**
601 * RFC2451: Weak key check
602 * Returns: 1 (weak), 0 (not weak)
603 */
604 static int sep_weak_key(const u8 *key, unsigned int keylen)
605 {
606 static const u8 parity[] = {
607 8, 1, 0, 8, 0, 8, 8, 0, 0, 8, 8, 0, 8, 0, 2, 8,
608 0, 8, 8, 0, 8, 0, 0, 8, 8,
609 0, 0, 8, 0, 8, 8, 3,
610 0, 8, 8, 0, 8, 0, 0, 8, 8, 0, 0, 8, 0, 8, 8, 0,
611 8, 0, 0, 8, 0, 8, 8, 0, 0,
612 8, 8, 0, 8, 0, 0, 8,
613 0, 8, 8, 0, 8, 0, 0, 8, 8, 0, 0, 8, 0, 8, 8, 0,
614 8, 0, 0, 8, 0, 8, 8, 0, 0,
615 8, 8, 0, 8, 0, 0, 8,
616 8, 0, 0, 8, 0, 8, 8, 0, 0, 8, 8, 0, 8, 0, 0, 8,
617 0, 8, 8, 0, 8, 0, 0, 8, 8,
618 0, 0, 8, 0, 8, 8, 0,
619 0, 8, 8, 0, 8, 0, 0, 8, 8, 0, 0, 8, 0, 8, 8, 0,
620 8, 0, 0, 8, 0, 8, 8, 0, 0,
621 8, 8, 0, 8, 0, 0, 8,
622 8, 0, 0, 8, 0, 8, 8, 0, 0, 8, 8, 0, 8, 0, 0, 8,
623 0, 8, 8, 0, 8, 0, 0, 8, 8,
624 0, 0, 8, 0, 8, 8, 0,
625 8, 0, 0, 8, 0, 8, 8, 0, 0, 8, 8, 0, 8, 0, 0, 8,
626 0, 8, 8, 0, 8, 0, 0, 8, 8,
627 0, 0, 8, 0, 8, 8, 0,
628 4, 8, 8, 0, 8, 0, 0, 8, 8, 0, 0, 8, 0, 8, 8, 0,
629 8, 5, 0, 8, 0, 8, 8, 0, 0,
630 8, 8, 0, 8, 0, 6, 8,
631 };
632
633 u32 n, w;
634
635 n = parity[key[0]]; n <<= 4;
636 n |= parity[key[1]]; n <<= 4;
637 n |= parity[key[2]]; n <<= 4;
638 n |= parity[key[3]]; n <<= 4;
639 n |= parity[key[4]]; n <<= 4;
640 n |= parity[key[5]]; n <<= 4;
641 n |= parity[key[6]]; n <<= 4;
642 n |= parity[key[7]];
643 w = 0x88888888L;
644
645 /* 1 in 10^10 keys passes this test */
646 if (!((n - (w >> 3)) & w)) {
647 if (n < 0x41415151) {
648 if (n < 0x31312121) {
649 if (n < 0x14141515) {
650 /* 01 01 01 01 01 01 01 01 */
651 if (n == 0x11111111)
652 goto weak;
653 /* 01 1F 01 1F 01 0E 01 0E */
654 if (n == 0x13131212)
655 goto weak;
656 } else {
657 /* 01 E0 01 E0 01 F1 01 F1 */
658 if (n == 0x14141515)
659 goto weak;
660 /* 01 FE 01 FE 01 FE 01 FE */
661 if (n == 0x16161616)
662 goto weak;
663 }
664 } else {
665 if (n < 0x34342525) {
666 /* 1F 01 1F 01 0E 01 0E 01 */
667 if (n == 0x31312121)
668 goto weak;
669 /* 1F 1F 1F 1F 0E 0E 0E 0E (?) */
670 if (n == 0x33332222)
671 goto weak;
672 } else {
673 /* 1F E0 1F E0 0E F1 0E F1 */
674 if (n == 0x34342525)
675 goto weak;
676 /* 1F FE 1F FE 0E FE 0E FE */
677 if (n == 0x36362626)
678 goto weak;
679 }
680 }
681 } else {
682 if (n < 0x61616161) {
683 if (n < 0x44445555) {
684 /* E0 01 E0 01 F1 01 F1 01 */
685 if (n == 0x41415151)
686 goto weak;
687 /* E0 1F E0 1F F1 0E F1 0E */
688 if (n == 0x43435252)
689 goto weak;
690 } else {
691 /* E0 E0 E0 E0 F1 F1 F1 F1 (?) */
692 if (n == 0x44445555)
693 goto weak;
694 /* E0 FE E0 FE F1 FE F1 FE */
695 if (n == 0x46465656)
696 goto weak;
697 }
698 } else {
699 if (n < 0x64646565) {
700 /* FE 01 FE 01 FE 01 FE 01 */
701 if (n == 0x61616161)
702 goto weak;
703 /* FE 1F FE 1F FE 0E FE 0E */
704 if (n == 0x63636262)
705 goto weak;
706 } else {
707 /* FE E0 FE E0 FE F1 FE F1 */
708 if (n == 0x64646565)
709 goto weak;
710 /* FE FE FE FE FE FE FE FE */
711 if (n == 0x66666666)
712 goto weak;
713 }
714 }
715 }
716 }
717 return 0;
718 weak:
719 return 1;
720 }
721 /**
722 * sep_sg_nents
723 */
724 static u32 sep_sg_nents(struct scatterlist *sg)
725 {
726 u32 ct1 = 0;
727 while (sg) {
728 ct1 += 1;
729 sg = sg_next(sg);
730 }
731
732 return ct1;
733 }
734
735 /**
736 * sep_start_msg -
737 * @ta_ctx: pointer to struct this_task_ctx
738 * @returns: offset to place for the next word in the message
739 * Set up pointer in message pool for new message
740 */
741 static u32 sep_start_msg(struct this_task_ctx *ta_ctx)
742 {
743 u32 *word_ptr;
744 ta_ctx->msg_len_words = 2;
745 ta_ctx->msgptr = ta_ctx->msg;
746 memset(ta_ctx->msg, 0, SEP_DRIVER_MESSAGE_SHARED_AREA_SIZE_IN_BYTES);
747 ta_ctx->msgptr += sizeof(u32) * 2;
748 word_ptr = (u32 *)ta_ctx->msgptr;
749 *word_ptr = SEP_START_MSG_TOKEN;
750 return sizeof(u32) * 2;
751 }
752
753 /**
754 * sep_end_msg -
755 * @ta_ctx: pointer to struct this_task_ctx
756 * @messages_offset: current message offset
757 * Returns: 0 for success; <0 otherwise
758 * End message; set length and CRC; and
759 * send interrupt to the SEP
760 */
761 static void sep_end_msg(struct this_task_ctx *ta_ctx, u32 msg_offset)
762 {
763 u32 *word_ptr;
764 /* Msg size goes into msg after token */
765 ta_ctx->msg_len_words = msg_offset / sizeof(u32) + 1;
766 word_ptr = (u32 *)ta_ctx->msgptr;
767 word_ptr += 1;
768 *word_ptr = ta_ctx->msg_len_words;
769
770 /* CRC (currently 0) goes at end of msg */
771 word_ptr = (u32 *)(ta_ctx->msgptr + msg_offset);
772 *word_ptr = 0;
773 }
774
775 /**
776 * sep_start_inbound_msg -
777 * @ta_ctx: pointer to struct this_task_ctx
778 * @msg_offset: offset to place for the next word in the message
779 * @returns: 0 for success; error value for failure
780 * Set up pointer in message pool for inbound message
781 */
782 static u32 sep_start_inbound_msg(struct this_task_ctx *ta_ctx, u32 *msg_offset)
783 {
784 u32 *word_ptr;
785 u32 token;
786 u32 error = SEP_OK;
787
788 *msg_offset = sizeof(u32) * 2;
789 word_ptr = (u32 *)ta_ctx->msgptr;
790 token = *word_ptr;
791 ta_ctx->msg_len_words = *(word_ptr + 1);
792
793 if (token != SEP_START_MSG_TOKEN) {
794 error = SEP_INVALID_START;
795 goto end_function;
796 }
797
798 end_function:
799
800 return error;
801 }
802
803 /**
804 * sep_write_msg -
805 * @ta_ctx: pointer to struct this_task_ctx
806 * @in_addr: pointer to start of parameter
807 * @size: size of parameter to copy (in bytes)
808 * @max_size: size to move up offset; SEP mesg is in word sizes
809 * @msg_offset: pointer to current offset (is updated)
810 * @byte_array: flag ti indicate wheter endian must be changed
811 * Copies data into the message area from caller
812 */
813 static void sep_write_msg(struct this_task_ctx *ta_ctx, void *in_addr,
814 u32 size, u32 max_size, u32 *msg_offset, u32 byte_array)
815 {
816 u32 *word_ptr;
817 void *void_ptr;
818 void_ptr = ta_ctx->msgptr + *msg_offset;
819 word_ptr = (u32 *)void_ptr;
820 memcpy(void_ptr, in_addr, size);
821 *msg_offset += max_size;
822
823 /* Do we need to manipulate endian? */
824 if (byte_array) {
825 u32 i;
826 for (i = 0; i < ((size + 3) / 4); i += 1)
827 *(word_ptr + i) = CHG_ENDIAN(*(word_ptr + i));
828 }
829 }
830
831 /**
832 * sep_make_header
833 * @ta_ctx: pointer to struct this_task_ctx
834 * @msg_offset: pointer to current offset (is updated)
835 * @op_code: op code to put into message
836 * Puts op code into message and updates offset
837 */
838 static void sep_make_header(struct this_task_ctx *ta_ctx, u32 *msg_offset,
839 u32 op_code)
840 {
841 u32 *word_ptr;
842
843 *msg_offset = sep_start_msg(ta_ctx);
844 word_ptr = (u32 *)(ta_ctx->msgptr + *msg_offset);
845 *word_ptr = op_code;
846 *msg_offset += sizeof(u32);
847 }
848
849
850
851 /**
852 * sep_read_msg -
853 * @ta_ctx: pointer to struct this_task_ctx
854 * @in_addr: pointer to start of parameter
855 * @size: size of parameter to copy (in bytes)
856 * @max_size: size to move up offset; SEP mesg is in word sizes
857 * @msg_offset: pointer to current offset (is updated)
858 * @byte_array: flag ti indicate wheter endian must be changed
859 * Copies data out of the message area to caller
860 */
861 static void sep_read_msg(struct this_task_ctx *ta_ctx, void *in_addr,
862 u32 size, u32 max_size, u32 *msg_offset, u32 byte_array)
863 {
864 u32 *word_ptr;
865 void *void_ptr;
866 void_ptr = ta_ctx->msgptr + *msg_offset;
867 word_ptr = (u32 *)void_ptr;
868
869 /* Do we need to manipulate endian? */
870 if (byte_array) {
871 u32 i;
872 for (i = 0; i < ((size + 3) / 4); i += 1)
873 *(word_ptr + i) = CHG_ENDIAN(*(word_ptr + i));
874 }
875
876 memcpy(in_addr, void_ptr, size);
877 *msg_offset += max_size;
878 }
879
880 /**
881 * sep_verify_op -
882 * @ta_ctx: pointer to struct this_task_ctx
883 * @op_code: expected op_code
884 * @msg_offset: pointer to current offset (is updated)
885 * @returns: 0 for success; error for failure
886 */
887 static u32 sep_verify_op(struct this_task_ctx *ta_ctx, u32 op_code,
888 u32 *msg_offset)
889 {
890 u32 error;
891 u32 in_ary[2];
892
893 struct sep_device *sep = ta_ctx->sep_used;
894
895 dev_dbg(&sep->pdev->dev, "dumping return message\n");
896 error = sep_start_inbound_msg(ta_ctx, msg_offset);
897 if (error) {
898 dev_warn(&sep->pdev->dev,
899 "sep_start_inbound_msg error\n");
900 return error;
901 }
902
903 sep_read_msg(ta_ctx, in_ary, sizeof(u32) * 2, sizeof(u32) * 2,
904 msg_offset, 0);
905
906 if (in_ary[0] != op_code) {
907 dev_warn(&sep->pdev->dev,
908 "sep got back wrong opcode\n");
909 dev_warn(&sep->pdev->dev,
910 "got back %x; expected %x\n",
911 in_ary[0], op_code);
912 return SEP_WRONG_OPCODE;
913 }
914
915 if (in_ary[1] != SEP_OK) {
916 dev_warn(&sep->pdev->dev,
917 "sep execution error\n");
918 dev_warn(&sep->pdev->dev,
919 "got back %x; expected %x\n",
920 in_ary[1], SEP_OK);
921 return in_ary[0];
922 }
923
924 return 0;
925 }
926
927 /**
928 * sep_read_context -
929 * @ta_ctx: pointer to struct this_task_ctx
930 * @msg_offset: point to current place in SEP msg; is updated
931 * @dst: pointer to place to put the context
932 * @len: size of the context structure (differs for crypro/hash)
933 * This function reads the context from the msg area
934 * There is a special way the vendor needs to have the maximum
935 * length calculated so that the msg_offset is updated properly;
936 * it skips over some words in the msg area depending on the size
937 * of the context
938 */
939 static void sep_read_context(struct this_task_ctx *ta_ctx, u32 *msg_offset,
940 void *dst, u32 len)
941 {
942 u32 max_length = ((len + 3) / sizeof(u32)) * sizeof(u32);
943 sep_read_msg(ta_ctx, dst, len, max_length, msg_offset, 0);
944 }
945
946 /**
947 * sep_write_context -
948 * @ta_ctx: pointer to struct this_task_ctx
949 * @msg_offset: point to current place in SEP msg; is updated
950 * @src: pointer to the current context
951 * @len: size of the context structure (differs for crypro/hash)
952 * This function writes the context to the msg area
953 * There is a special way the vendor needs to have the maximum
954 * length calculated so that the msg_offset is updated properly;
955 * it skips over some words in the msg area depending on the size
956 * of the context
957 */
958 static void sep_write_context(struct this_task_ctx *ta_ctx, u32 *msg_offset,
959 void *src, u32 len)
960 {
961 u32 max_length = ((len + 3) / sizeof(u32)) * sizeof(u32);
962 sep_write_msg(ta_ctx, src, len, max_length, msg_offset, 0);
963 }
964
965 /**
966 * sep_clear_out -
967 * @ta_ctx: pointer to struct this_task_ctx
968 * Clear out crypto related values in sep device structure
969 * to enable device to be used by anyone; either kernel
970 * crypto or userspace app via middleware
971 */
972 static void sep_clear_out(struct this_task_ctx *ta_ctx)
973 {
974 if (ta_ctx->src_sg_hold) {
975 sep_free_sg_buf(ta_ctx->src_sg_hold);
976 ta_ctx->src_sg_hold = NULL;
977 }
978
979 if (ta_ctx->dst_sg_hold) {
980 sep_free_sg_buf(ta_ctx->dst_sg_hold);
981 ta_ctx->dst_sg_hold = NULL;
982 }
983
984 ta_ctx->src_sg = NULL;
985 ta_ctx->dst_sg = NULL;
986
987 sep_free_dma_table_data_handler(ta_ctx->sep_used, &ta_ctx->dma_ctx);
988
989 if (ta_ctx->i_own_sep) {
990 /**
991 * The following unlocks the sep and makes it available
992 * to any other application
993 * First, null out crypto entries in sep before relesing it
994 */
995 ta_ctx->sep_used->current_hash_req = NULL;
996 ta_ctx->sep_used->current_cypher_req = NULL;
997 ta_ctx->sep_used->current_request = 0;
998 ta_ctx->sep_used->current_hash_stage = 0;
999 ta_ctx->sep_used->ta_ctx = NULL;
1000 ta_ctx->sep_used->in_kernel = 0;
1001
1002 ta_ctx->call_status.status = 0;
1003
1004 /* Remove anything confidentail */
1005 memset(ta_ctx->sep_used->shared_addr, 0,
1006 SEP_DRIVER_MESSAGE_SHARED_AREA_SIZE_IN_BYTES);
1007
1008 sep_queue_status_remove(ta_ctx->sep_used, &ta_ctx->queue_elem);
1009
1010 #ifdef SEP_ENABLE_RUNTIME_PM
1011 ta_ctx->sep_used->in_use = 0;
1012 pm_runtime_mark_last_busy(&ta_ctx->sep_used->pdev->dev);
1013 pm_runtime_put_autosuspend(&ta_ctx->sep_used->pdev->dev);
1014 #endif
1015
1016 clear_bit(SEP_WORKING_LOCK_BIT,
1017 &ta_ctx->sep_used->in_use_flags);
1018 ta_ctx->sep_used->pid_doing_transaction = 0;
1019
1020 dev_dbg(&ta_ctx->sep_used->pdev->dev,
1021 "[PID%d] waking up next transaction\n",
1022 current->pid);
1023
1024 clear_bit(SEP_TRANSACTION_STARTED_LOCK_BIT,
1025 &ta_ctx->sep_used->in_use_flags);
1026 wake_up(&ta_ctx->sep_used->event_transactions);
1027
1028 ta_ctx->i_own_sep = 0;
1029 }
1030 }
1031
1032 /**
1033 * Release crypto infrastructure from EINPROGRESS and
1034 * clear sep_dev so that SEP is available to anyone
1035 */
1036 static void sep_crypto_release(struct sep_system_ctx *sctx,
1037 struct this_task_ctx *ta_ctx, u32 error)
1038 {
1039 struct ahash_request *hash_req = ta_ctx->current_hash_req;
1040 struct ablkcipher_request *cypher_req =
1041 ta_ctx->current_cypher_req;
1042 struct sep_device *sep = ta_ctx->sep_used;
1043
1044 sep_clear_out(ta_ctx);
1045
1046 /**
1047 * This may not yet exist depending when we
1048 * chose to bail out. If it does exist, set
1049 * it to 1
1050 */
1051 if (ta_ctx->are_we_done_yet != NULL)
1052 *ta_ctx->are_we_done_yet = 1;
1053
1054 if (cypher_req != NULL) {
1055 if ((sctx->key_sent == 1) ||
1056 ((error != 0) && (error != -EINPROGRESS))) {
1057 if (cypher_req->base.complete == NULL) {
1058 dev_dbg(&sep->pdev->dev,
1059 "release is null for cypher!");
1060 } else {
1061 cypher_req->base.complete(
1062 &cypher_req->base, error);
1063 }
1064 }
1065 }
1066
1067 if (hash_req != NULL) {
1068 if (hash_req->base.complete == NULL) {
1069 dev_dbg(&sep->pdev->dev,
1070 "release is null for hash!");
1071 } else {
1072 hash_req->base.complete(
1073 &hash_req->base, error);
1074 }
1075 }
1076 }
1077
1078 /**
1079 * This is where we grab the sep itself and tell it to do something.
1080 * It will sleep if the sep is currently busy
1081 * and it will return 0 if sep is now ours; error value if there
1082 * were problems
1083 */
1084 static int sep_crypto_take_sep(struct this_task_ctx *ta_ctx)
1085 {
1086 struct sep_device *sep = ta_ctx->sep_used;
1087 int result;
1088 struct sep_msgarea_hdr *my_msg_header;
1089
1090 my_msg_header = (struct sep_msgarea_hdr *)ta_ctx->msg;
1091
1092 /* add to status queue */
1093 ta_ctx->queue_elem = sep_queue_status_add(sep, my_msg_header->opcode,
1094 ta_ctx->nbytes, current->pid,
1095 current->comm, sizeof(current->comm));
1096
1097 if (!ta_ctx->queue_elem) {
1098 dev_dbg(&sep->pdev->dev,
1099 "[PID%d] updating queue status error\n", current->pid);
1100 return -EINVAL;
1101 }
1102
1103 /* get the device; this can sleep */
1104 result = sep_wait_transaction(sep);
1105 if (result)
1106 return result;
1107
1108 if (sep_dev->power_save_setup == 1)
1109 pm_runtime_get_sync(&sep_dev->pdev->dev);
1110
1111 /* Copy in the message */
1112 memcpy(sep->shared_addr, ta_ctx->msg,
1113 SEP_DRIVER_MESSAGE_SHARED_AREA_SIZE_IN_BYTES);
1114
1115 /* Copy in the dcb information if there is any */
1116 if (ta_ctx->dcb_region) {
1117 result = sep_activate_dcb_dmatables_context(sep,
1118 &ta_ctx->dcb_region, &ta_ctx->dmatables_region,
1119 ta_ctx->dma_ctx);
1120 if (result)
1121 return result;
1122 }
1123
1124 /* Mark the device so we know how to finish the job in the tasklet */
1125 if (ta_ctx->current_hash_req)
1126 sep->current_hash_req = ta_ctx->current_hash_req;
1127 else
1128 sep->current_cypher_req = ta_ctx->current_cypher_req;
1129
1130 sep->current_request = ta_ctx->current_request;
1131 sep->current_hash_stage = ta_ctx->current_hash_stage;
1132 sep->ta_ctx = ta_ctx;
1133 sep->in_kernel = 1;
1134 ta_ctx->i_own_sep = 1;
1135
1136 /* need to set bit first to avoid race condition with interrupt */
1137 set_bit(SEP_LEGACY_SENDMSG_DONE_OFFSET, &ta_ctx->call_status.status);
1138
1139 result = sep_send_command_handler(sep);
1140
1141 dev_dbg(&sep->pdev->dev, "[PID%d]: sending command to the sep\n",
1142 current->pid);
1143
1144 if (!result)
1145 dev_dbg(&sep->pdev->dev, "[PID%d]: command sent okay\n",
1146 current->pid);
1147 else {
1148 dev_dbg(&sep->pdev->dev, "[PID%d]: cant send command\n",
1149 current->pid);
1150 clear_bit(SEP_LEGACY_SENDMSG_DONE_OFFSET,
1151 &ta_ctx->call_status.status);
1152 }
1153
1154 return result;
1155 }
1156
1157 /**
1158 * This function sets things up for a crypto data block process
1159 * This does all preparation, but does not try to grab the
1160 * sep
1161 * @req: pointer to struct ablkcipher_request
1162 * returns: 0 if all went well, non zero if error
1163 */
1164 static int sep_crypto_block_data(struct ablkcipher_request *req)
1165 {
1166
1167 int int_error;
1168 u32 msg_offset;
1169 static u32 msg[10];
1170 void *src_ptr;
1171 void *dst_ptr;
1172
1173 static char small_buf[100];
1174 ssize_t copy_result;
1175 int result;
1176
1177 struct scatterlist *new_sg;
1178 struct this_task_ctx *ta_ctx;
1179 struct crypto_ablkcipher *tfm;
1180 struct sep_system_ctx *sctx;
1181
1182 struct sep_des_internal_context *des_internal;
1183 struct sep_aes_internal_context *aes_internal;
1184
1185 ta_ctx = ablkcipher_request_ctx(req);
1186 tfm = crypto_ablkcipher_reqtfm(req);
1187 sctx = crypto_ablkcipher_ctx(tfm);
1188
1189 /* start the walk on scatterlists */
1190 ablkcipher_walk_init(&ta_ctx->walk, req->src, req->dst, req->nbytes);
1191 dev_dbg(&ta_ctx->sep_used->pdev->dev, "sep crypto block data size of %x\n",
1192 req->nbytes);
1193
1194 int_error = ablkcipher_walk_phys(req, &ta_ctx->walk);
1195 if (int_error) {
1196 dev_warn(&ta_ctx->sep_used->pdev->dev, "walk phys error %x\n",
1197 int_error);
1198 return -ENOMEM;
1199 }
1200
1201 dev_dbg(&ta_ctx->sep_used->pdev->dev,
1202 "crypto block: src is %lx dst is %lx\n",
1203 (unsigned long)req->src, (unsigned long)req->dst);
1204
1205 /* Make sure all pages are even block */
1206 int_error = sep_oddball_pages(ta_ctx->sep_used, req->src,
1207 req->nbytes, ta_ctx->walk.blocksize, &new_sg, 1);
1208
1209 if (int_error < 0) {
1210 dev_warn(&ta_ctx->sep_used->pdev->dev, "oddball page eerror\n");
1211 return -ENOMEM;
1212 } else if (int_error == 1) {
1213 ta_ctx->src_sg = new_sg;
1214 ta_ctx->src_sg_hold = new_sg;
1215 } else {
1216 ta_ctx->src_sg = req->src;
1217 ta_ctx->src_sg_hold = NULL;
1218 }
1219
1220 int_error = sep_oddball_pages(ta_ctx->sep_used, req->dst,
1221 req->nbytes, ta_ctx->walk.blocksize, &new_sg, 0);
1222
1223 if (int_error < 0) {
1224 dev_warn(&ta_ctx->sep_used->pdev->dev, "walk phys error %x\n",
1225 int_error);
1226 return -ENOMEM;
1227 } else if (int_error == 1) {
1228 ta_ctx->dst_sg = new_sg;
1229 ta_ctx->dst_sg_hold = new_sg;
1230 } else {
1231 ta_ctx->dst_sg = req->dst;
1232 ta_ctx->dst_sg_hold = NULL;
1233 }
1234
1235 /* set nbytes for queue status */
1236 ta_ctx->nbytes = req->nbytes;
1237
1238 /* Key already done; this is for data */
1239 dev_dbg(&ta_ctx->sep_used->pdev->dev, "sending data\n");
1240
1241 sep_dump_sg(ta_ctx->sep_used,
1242 "block sg in", ta_ctx->src_sg);
1243
1244 /* check for valid data and proper spacing */
1245 src_ptr = sg_virt(ta_ctx->src_sg);
1246 dst_ptr = sg_virt(ta_ctx->dst_sg);
1247
1248 if (!src_ptr || !dst_ptr ||
1249 (ta_ctx->current_cypher_req->nbytes %
1250 crypto_ablkcipher_blocksize(tfm))) {
1251
1252 dev_warn(&ta_ctx->sep_used->pdev->dev,
1253 "cipher block size odd\n");
1254 dev_warn(&ta_ctx->sep_used->pdev->dev,
1255 "cipher block size is %x\n",
1256 crypto_ablkcipher_blocksize(tfm));
1257 dev_warn(&ta_ctx->sep_used->pdev->dev,
1258 "cipher data size is %x\n",
1259 ta_ctx->current_cypher_req->nbytes);
1260 return -EINVAL;
1261 }
1262
1263 if (partial_overlap(src_ptr, dst_ptr,
1264 ta_ctx->current_cypher_req->nbytes)) {
1265 dev_warn(&ta_ctx->sep_used->pdev->dev,
1266 "block partial overlap\n");
1267 return -EINVAL;
1268 }
1269
1270 /* Put together the message */
1271 sep_make_header(ta_ctx, &msg_offset, ta_ctx->block_opcode);
1272
1273 /* If des, and size is 1 block, put directly in msg */
1274 if ((ta_ctx->block_opcode == SEP_DES_BLOCK_OPCODE) &&
1275 (req->nbytes == crypto_ablkcipher_blocksize(tfm))) {
1276
1277 dev_dbg(&ta_ctx->sep_used->pdev->dev,
1278 "writing out one block des\n");
1279
1280 copy_result = sg_copy_to_buffer(
1281 ta_ctx->src_sg, sep_sg_nents(ta_ctx->src_sg),
1282 small_buf, crypto_ablkcipher_blocksize(tfm));
1283
1284 if (copy_result != crypto_ablkcipher_blocksize(tfm)) {
1285 dev_warn(&ta_ctx->sep_used->pdev->dev,
1286 "des block copy faild\n");
1287 return -ENOMEM;
1288 }
1289
1290 /* Put data into message */
1291 sep_write_msg(ta_ctx, small_buf,
1292 crypto_ablkcipher_blocksize(tfm),
1293 crypto_ablkcipher_blocksize(tfm) * 2,
1294 &msg_offset, 1);
1295
1296 /* Put size into message */
1297 sep_write_msg(ta_ctx, &req->nbytes,
1298 sizeof(u32), sizeof(u32), &msg_offset, 0);
1299 } else {
1300 /* Otherwise, fill out dma tables */
1301 ta_ctx->dcb_input_data.app_in_address = src_ptr;
1302 ta_ctx->dcb_input_data.data_in_size = req->nbytes;
1303 ta_ctx->dcb_input_data.app_out_address = dst_ptr;
1304 ta_ctx->dcb_input_data.block_size =
1305 crypto_ablkcipher_blocksize(tfm);
1306 ta_ctx->dcb_input_data.tail_block_size = 0;
1307 ta_ctx->dcb_input_data.is_applet = 0;
1308 ta_ctx->dcb_input_data.src_sg = ta_ctx->src_sg;
1309 ta_ctx->dcb_input_data.dst_sg = ta_ctx->dst_sg;
1310
1311 result = sep_create_dcb_dmatables_context_kernel(
1312 ta_ctx->sep_used,
1313 &ta_ctx->dcb_region,
1314 &ta_ctx->dmatables_region,
1315 &ta_ctx->dma_ctx,
1316 &ta_ctx->dcb_input_data,
1317 1);
1318 if (result) {
1319 dev_warn(&ta_ctx->sep_used->pdev->dev,
1320 "crypto dma table create failed\n");
1321 return -EINVAL;
1322 }
1323
1324 /* Portion of msg is nulled (no data) */
1325 msg[0] = (u32)0;
1326 msg[1] = (u32)0;
1327 msg[2] = (u32)0;
1328 msg[3] = (u32)0;
1329 msg[4] = (u32)0;
1330 sep_write_msg(ta_ctx, (void *)msg, sizeof(u32) * 5,
1331 sizeof(u32) * 5, &msg_offset, 0);
1332 }
1333
1334 /**
1335 * Before we write the message, we need to overwrite the
1336 * vendor's IV with the one from our own ablkcipher walk
1337 * iv because this is needed for dm-crypt
1338 */
1339 sep_dump_ivs(req, "sending data block to sep\n");
1340 if ((ta_ctx->current_request == DES_CBC) &&
1341 (ta_ctx->des_opmode == SEP_DES_CBC)) {
1342
1343 dev_dbg(&ta_ctx->sep_used->pdev->dev,
1344 "overwrite vendor iv on DES\n");
1345 des_internal = (struct sep_des_internal_context *)
1346 sctx->des_private_ctx.ctx_buf;
1347 memcpy((void *)des_internal->iv_context,
1348 ta_ctx->walk.iv, crypto_ablkcipher_ivsize(tfm));
1349 } else if ((ta_ctx->current_request == AES_CBC) &&
1350 (ta_ctx->aes_opmode == SEP_AES_CBC)) {
1351
1352 dev_dbg(&ta_ctx->sep_used->pdev->dev,
1353 "overwrite vendor iv on AES\n");
1354 aes_internal = (struct sep_aes_internal_context *)
1355 sctx->aes_private_ctx.cbuff;
1356 memcpy((void *)aes_internal->aes_ctx_iv,
1357 ta_ctx->walk.iv, crypto_ablkcipher_ivsize(tfm));
1358 }
1359
1360 /* Write context into message */
1361 if (ta_ctx->block_opcode == SEP_DES_BLOCK_OPCODE) {
1362 sep_write_context(ta_ctx, &msg_offset,
1363 &sctx->des_private_ctx,
1364 sizeof(struct sep_des_private_context));
1365 sep_dump(ta_ctx->sep_used, "ctx to block des",
1366 &sctx->des_private_ctx, 40);
1367 } else {
1368 sep_write_context(ta_ctx, &msg_offset,
1369 &sctx->aes_private_ctx,
1370 sizeof(struct sep_aes_private_context));
1371 sep_dump(ta_ctx->sep_used, "ctx to block aes",
1372 &sctx->aes_private_ctx, 20);
1373 }
1374
1375 /* conclude message */
1376 sep_end_msg(ta_ctx, msg_offset);
1377
1378 /* Parent (caller) is now ready to tell the sep to do ahead */
1379 return 0;
1380 }
1381
1382
1383 /**
1384 * This function sets things up for a crypto key submit process
1385 * This does all preparation, but does not try to grab the
1386 * sep
1387 * @req: pointer to struct ablkcipher_request
1388 * returns: 0 if all went well, non zero if error
1389 */
1390 static int sep_crypto_send_key(struct ablkcipher_request *req)
1391 {
1392
1393 int int_error;
1394 u32 msg_offset;
1395 static u32 msg[10];
1396
1397 u32 max_length;
1398 struct this_task_ctx *ta_ctx;
1399 struct crypto_ablkcipher *tfm;
1400 struct sep_system_ctx *sctx;
1401
1402 ta_ctx = ablkcipher_request_ctx(req);
1403 tfm = crypto_ablkcipher_reqtfm(req);
1404 sctx = crypto_ablkcipher_ctx(tfm);
1405
1406 dev_dbg(&ta_ctx->sep_used->pdev->dev, "sending key\n");
1407
1408 /* start the walk on scatterlists */
1409 ablkcipher_walk_init(&ta_ctx->walk, req->src, req->dst, req->nbytes);
1410 dev_dbg(&ta_ctx->sep_used->pdev->dev,
1411 "sep crypto block data size of %x\n", req->nbytes);
1412
1413 int_error = ablkcipher_walk_phys(req, &ta_ctx->walk);
1414 if (int_error) {
1415 dev_warn(&ta_ctx->sep_used->pdev->dev, "walk phys error %x\n",
1416 int_error);
1417 return -ENOMEM;
1418 }
1419
1420 /* check iv */
1421 if ((ta_ctx->current_request == DES_CBC) &&
1422 (ta_ctx->des_opmode == SEP_DES_CBC)) {
1423 if (!ta_ctx->walk.iv) {
1424 dev_warn(&ta_ctx->sep_used->pdev->dev, "no iv found\n");
1425 return -EINVAL;
1426 }
1427
1428 memcpy(ta_ctx->iv, ta_ctx->walk.iv, SEP_DES_IV_SIZE_BYTES);
1429 sep_dump(ta_ctx->sep_used, "iv",
1430 ta_ctx->iv, SEP_DES_IV_SIZE_BYTES);
1431 }
1432
1433 if ((ta_ctx->current_request == AES_CBC) &&
1434 (ta_ctx->aes_opmode == SEP_AES_CBC)) {
1435 if (!ta_ctx->walk.iv) {
1436 dev_warn(&ta_ctx->sep_used->pdev->dev, "no iv found\n");
1437 return -EINVAL;
1438 }
1439
1440 memcpy(ta_ctx->iv, ta_ctx->walk.iv, SEP_AES_IV_SIZE_BYTES);
1441 sep_dump(ta_ctx->sep_used, "iv",
1442 ta_ctx->iv, SEP_AES_IV_SIZE_BYTES);
1443 }
1444
1445 /* put together message to SEP */
1446 /* Start with op code */
1447 sep_make_header(ta_ctx, &msg_offset, ta_ctx->init_opcode);
1448
1449 /* now deal with IV */
1450 if (ta_ctx->init_opcode == SEP_DES_INIT_OPCODE) {
1451 if (ta_ctx->des_opmode == SEP_DES_CBC) {
1452 sep_write_msg(ta_ctx, ta_ctx->iv,
1453 SEP_DES_IV_SIZE_BYTES, sizeof(u32) * 4,
1454 &msg_offset, 1);
1455 sep_dump(ta_ctx->sep_used, "initial IV",
1456 ta_ctx->walk.iv, SEP_DES_IV_SIZE_BYTES);
1457 } else {
1458 /* Skip if ECB */
1459 msg_offset += 4 * sizeof(u32);
1460 }
1461 } else {
1462 max_length = ((SEP_AES_IV_SIZE_BYTES + 3) /
1463 sizeof(u32)) * sizeof(u32);
1464 if (ta_ctx->aes_opmode == SEP_AES_CBC) {
1465 sep_write_msg(ta_ctx, ta_ctx->iv,
1466 SEP_AES_IV_SIZE_BYTES, max_length,
1467 &msg_offset, 1);
1468 sep_dump(ta_ctx->sep_used, "initial IV",
1469 ta_ctx->walk.iv, SEP_AES_IV_SIZE_BYTES);
1470 } else {
1471 /* Skip if ECB */
1472 msg_offset += max_length;
1473 }
1474 }
1475
1476 /* load the key */
1477 if (ta_ctx->init_opcode == SEP_DES_INIT_OPCODE) {
1478 sep_write_msg(ta_ctx, (void *)&sctx->key.des.key1,
1479 sizeof(u32) * 8, sizeof(u32) * 8,
1480 &msg_offset, 1);
1481
1482 msg[0] = (u32)sctx->des_nbr_keys;
1483 msg[1] = (u32)ta_ctx->des_encmode;
1484 msg[2] = (u32)ta_ctx->des_opmode;
1485
1486 sep_write_msg(ta_ctx, (void *)msg,
1487 sizeof(u32) * 3, sizeof(u32) * 3,
1488 &msg_offset, 0);
1489 } else {
1490 sep_write_msg(ta_ctx, (void *)&sctx->key.aes,
1491 sctx->keylen,
1492 SEP_AES_MAX_KEY_SIZE_BYTES,
1493 &msg_offset, 1);
1494
1495 msg[0] = (u32)sctx->aes_key_size;
1496 msg[1] = (u32)ta_ctx->aes_encmode;
1497 msg[2] = (u32)ta_ctx->aes_opmode;
1498 msg[3] = (u32)0; /* Secret key is not used */
1499 sep_write_msg(ta_ctx, (void *)msg,
1500 sizeof(u32) * 4, sizeof(u32) * 4,
1501 &msg_offset, 0);
1502 }
1503
1504 /* conclude message */
1505 sep_end_msg(ta_ctx, msg_offset);
1506
1507 /* Parent (caller) is now ready to tell the sep to do ahead */
1508 return 0;
1509 }
1510
1511
1512 /* This needs to be run as a work queue as it can be put asleep */
1513 static void sep_crypto_block(void *data)
1514 {
1515 unsigned long end_time;
1516
1517 int result;
1518
1519 struct ablkcipher_request *req;
1520 struct this_task_ctx *ta_ctx;
1521 struct crypto_ablkcipher *tfm;
1522 struct sep_system_ctx *sctx;
1523 int are_we_done_yet;
1524
1525 req = (struct ablkcipher_request *)data;
1526 ta_ctx = ablkcipher_request_ctx(req);
1527 tfm = crypto_ablkcipher_reqtfm(req);
1528 sctx = crypto_ablkcipher_ctx(tfm);
1529
1530 ta_ctx->are_we_done_yet = &are_we_done_yet;
1531
1532 pr_debug("sep_crypto_block\n");
1533 pr_debug("tfm is %p sctx is %p ta_ctx is %p\n",
1534 tfm, sctx, ta_ctx);
1535 pr_debug("key_sent is %d\n", sctx->key_sent);
1536
1537 /* do we need to send the key */
1538 if (sctx->key_sent == 0) {
1539 are_we_done_yet = 0;
1540 result = sep_crypto_send_key(req); /* prep to send key */
1541 if (result != 0) {
1542 dev_dbg(&ta_ctx->sep_used->pdev->dev,
1543 "could not prep key %x\n", result);
1544 sep_crypto_release(sctx, ta_ctx, result);
1545 return;
1546 }
1547
1548 result = sep_crypto_take_sep(ta_ctx);
1549 if (result) {
1550 dev_warn(&ta_ctx->sep_used->pdev->dev,
1551 "sep_crypto_take_sep for key send failed\n");
1552 sep_crypto_release(sctx, ta_ctx, result);
1553 return;
1554 }
1555
1556 /* now we sit and wait up to a fixed time for completion */
1557 end_time = jiffies + (WAIT_TIME * HZ);
1558 while ((time_before(jiffies, end_time)) &&
1559 (are_we_done_yet == 0))
1560 schedule();
1561
1562 /* Done waiting; still not done yet? */
1563 if (are_we_done_yet == 0) {
1564 dev_dbg(&ta_ctx->sep_used->pdev->dev,
1565 "Send key job never got done\n");
1566 sep_crypto_release(sctx, ta_ctx, -EINVAL);
1567 return;
1568 }
1569
1570 /* Set the key sent variable so this can be skipped later */
1571 sctx->key_sent = 1;
1572 }
1573
1574 /* Key sent (or maybe not if we did not have to), now send block */
1575 are_we_done_yet = 0;
1576
1577 result = sep_crypto_block_data(req);
1578
1579 if (result != 0) {
1580 dev_dbg(&ta_ctx->sep_used->pdev->dev,
1581 "could prep not send block %x\n", result);
1582 sep_crypto_release(sctx, ta_ctx, result);
1583 return;
1584 }
1585
1586 result = sep_crypto_take_sep(ta_ctx);
1587 if (result) {
1588 dev_warn(&ta_ctx->sep_used->pdev->dev,
1589 "sep_crypto_take_sep for block send failed\n");
1590 sep_crypto_release(sctx, ta_ctx, result);
1591 return;
1592 }
1593
1594 /* now we sit and wait up to a fixed time for completion */
1595 end_time = jiffies + (WAIT_TIME * HZ);
1596 while ((time_before(jiffies, end_time)) && (are_we_done_yet == 0))
1597 schedule();
1598
1599 /* Done waiting; still not done yet? */
1600 if (are_we_done_yet == 0) {
1601 dev_dbg(&ta_ctx->sep_used->pdev->dev,
1602 "Send block job never got done\n");
1603 sep_crypto_release(sctx, ta_ctx, -EINVAL);
1604 return;
1605 }
1606
1607 /* That's it; entire thing done, get out of queue */
1608
1609 pr_debug("crypto_block leaving\n");
1610 pr_debug("tfm is %p sctx is %p ta_ctx is %p\n", tfm, sctx, ta_ctx);
1611 }
1612
1613 /**
1614 * Post operation (after interrupt) for crypto block
1615 */
1616 static u32 crypto_post_op(struct sep_device *sep)
1617 {
1618 /* HERE */
1619 u32 u32_error;
1620 u32 msg_offset;
1621
1622 ssize_t copy_result;
1623 static char small_buf[100];
1624
1625 struct ablkcipher_request *req;
1626 struct this_task_ctx *ta_ctx;
1627 struct sep_system_ctx *sctx;
1628 struct crypto_ablkcipher *tfm;
1629
1630 struct sep_des_internal_context *des_internal;
1631 struct sep_aes_internal_context *aes_internal;
1632
1633 if (!sep->current_cypher_req)
1634 return -EINVAL;
1635
1636 /* hold req since we need to submit work after clearing sep */
1637 req = sep->current_cypher_req;
1638
1639 ta_ctx = ablkcipher_request_ctx(sep->current_cypher_req);
1640 tfm = crypto_ablkcipher_reqtfm(sep->current_cypher_req);
1641 sctx = crypto_ablkcipher_ctx(tfm);
1642
1643 pr_debug("crypto_post op\n");
1644 pr_debug("key_sent is %d tfm is %p sctx is %p ta_ctx is %p\n",
1645 sctx->key_sent, tfm, sctx, ta_ctx);
1646
1647 dev_dbg(&ta_ctx->sep_used->pdev->dev, "crypto post_op\n");
1648 dev_dbg(&ta_ctx->sep_used->pdev->dev, "crypto post_op message dump\n");
1649 crypto_sep_dump_message(ta_ctx->sep_used, ta_ctx->msg);
1650
1651 /* first bring msg from shared area to local area */
1652 memcpy(ta_ctx->msg, sep->shared_addr,
1653 SEP_DRIVER_MESSAGE_SHARED_AREA_SIZE_IN_BYTES);
1654
1655 /* Is this the result of performing init (key to SEP */
1656 if (sctx->key_sent == 0) {
1657
1658 /* Did SEP do it okay */
1659 u32_error = sep_verify_op(ta_ctx, ta_ctx->init_opcode,
1660 &msg_offset);
1661 if (u32_error) {
1662 dev_warn(&ta_ctx->sep_used->pdev->dev,
1663 "aes init error %x\n", u32_error);
1664 sep_crypto_release(sctx, ta_ctx, u32_error);
1665 return u32_error;
1666 }
1667
1668 /* Read Context */
1669 if (ta_ctx->init_opcode == SEP_DES_INIT_OPCODE) {
1670 sep_read_context(ta_ctx, &msg_offset,
1671 &sctx->des_private_ctx,
1672 sizeof(struct sep_des_private_context));
1673
1674 sep_dump(ta_ctx->sep_used, "ctx init des",
1675 &sctx->des_private_ctx, 40);
1676 } else {
1677 sep_read_context(ta_ctx, &msg_offset,
1678 &sctx->aes_private_ctx,
1679 sizeof(struct sep_aes_private_context));
1680
1681 sep_dump(ta_ctx->sep_used, "ctx init aes",
1682 &sctx->aes_private_ctx, 20);
1683 }
1684
1685 sep_dump_ivs(req, "after sending key to sep\n");
1686
1687 /* key sent went okay; release sep, and set are_we_done_yet */
1688 sctx->key_sent = 1;
1689 sep_crypto_release(sctx, ta_ctx, -EINPROGRESS);
1690
1691 } else {
1692
1693 /**
1694 * This is the result of a block request
1695 */
1696 dev_dbg(&ta_ctx->sep_used->pdev->dev,
1697 "crypto_post_op block response\n");
1698
1699 u32_error = sep_verify_op(ta_ctx, ta_ctx->block_opcode,
1700 &msg_offset);
1701
1702 if (u32_error) {
1703 dev_warn(&ta_ctx->sep_used->pdev->dev,
1704 "sep block error %x\n", u32_error);
1705 sep_crypto_release(sctx, ta_ctx, u32_error);
1706 return -EINVAL;
1707 }
1708
1709 if (ta_ctx->block_opcode == SEP_DES_BLOCK_OPCODE) {
1710
1711 dev_dbg(&ta_ctx->sep_used->pdev->dev,
1712 "post op for DES\n");
1713
1714 /* special case for 1 block des */
1715 if (sep->current_cypher_req->nbytes ==
1716 crypto_ablkcipher_blocksize(tfm)) {
1717
1718 sep_read_msg(ta_ctx, small_buf,
1719 crypto_ablkcipher_blocksize(tfm),
1720 crypto_ablkcipher_blocksize(tfm) * 2,
1721 &msg_offset, 1);
1722
1723 dev_dbg(&ta_ctx->sep_used->pdev->dev,
1724 "reading in block des\n");
1725
1726 copy_result = sg_copy_from_buffer(
1727 ta_ctx->dst_sg,
1728 sep_sg_nents(ta_ctx->dst_sg),
1729 small_buf,
1730 crypto_ablkcipher_blocksize(tfm));
1731
1732 if (copy_result !=
1733 crypto_ablkcipher_blocksize(tfm)) {
1734
1735 dev_warn(&ta_ctx->sep_used->pdev->dev,
1736 "des block copy faild\n");
1737 sep_crypto_release(sctx, ta_ctx,
1738 -ENOMEM);
1739 return -ENOMEM;
1740 }
1741 }
1742
1743 /* Read Context */
1744 sep_read_context(ta_ctx, &msg_offset,
1745 &sctx->des_private_ctx,
1746 sizeof(struct sep_des_private_context));
1747 } else {
1748
1749 dev_dbg(&ta_ctx->sep_used->pdev->dev,
1750 "post op for AES\n");
1751
1752 /* Skip the MAC Output */
1753 msg_offset += (sizeof(u32) * 4);
1754
1755 /* Read Context */
1756 sep_read_context(ta_ctx, &msg_offset,
1757 &sctx->aes_private_ctx,
1758 sizeof(struct sep_aes_private_context));
1759 }
1760
1761 sep_dump_sg(ta_ctx->sep_used,
1762 "block sg out", ta_ctx->dst_sg);
1763
1764 /* Copy to correct sg if this block had oddball pages */
1765 if (ta_ctx->dst_sg_hold)
1766 sep_copy_sg(ta_ctx->sep_used,
1767 ta_ctx->dst_sg,
1768 ta_ctx->current_cypher_req->dst,
1769 ta_ctx->current_cypher_req->nbytes);
1770
1771 /**
1772 * Copy the iv's back to the walk.iv
1773 * This is required for dm_crypt
1774 */
1775 sep_dump_ivs(req, "got data block from sep\n");
1776 if ((ta_ctx->current_request == DES_CBC) &&
1777 (ta_ctx->des_opmode == SEP_DES_CBC)) {
1778
1779 dev_dbg(&ta_ctx->sep_used->pdev->dev,
1780 "returning result iv to walk on DES\n");
1781 des_internal = (struct sep_des_internal_context *)
1782 sctx->des_private_ctx.ctx_buf;
1783 memcpy(ta_ctx->walk.iv,
1784 (void *)des_internal->iv_context,
1785 crypto_ablkcipher_ivsize(tfm));
1786 } else if ((ta_ctx->current_request == AES_CBC) &&
1787 (ta_ctx->aes_opmode == SEP_AES_CBC)) {
1788
1789 dev_dbg(&ta_ctx->sep_used->pdev->dev,
1790 "returning result iv to walk on AES\n");
1791 aes_internal = (struct sep_aes_internal_context *)
1792 sctx->aes_private_ctx.cbuff;
1793 memcpy(ta_ctx->walk.iv,
1794 (void *)aes_internal->aes_ctx_iv,
1795 crypto_ablkcipher_ivsize(tfm));
1796 }
1797
1798 /* finished, release everything */
1799 sep_crypto_release(sctx, ta_ctx, 0);
1800 }
1801 pr_debug("crypto_post_op done\n");
1802 pr_debug("key_sent is %d tfm is %p sctx is %p ta_ctx is %p\n",
1803 sctx->key_sent, tfm, sctx, ta_ctx);
1804
1805 return 0;
1806 }
1807
1808 static u32 hash_init_post_op(struct sep_device *sep)
1809 {
1810 u32 u32_error;
1811 u32 msg_offset;
1812 struct crypto_ahash *tfm = crypto_ahash_reqtfm(sep->current_hash_req);
1813 struct this_task_ctx *ta_ctx = ahash_request_ctx(sep->current_hash_req);
1814 struct sep_system_ctx *sctx = crypto_ahash_ctx(tfm);
1815 dev_dbg(&ta_ctx->sep_used->pdev->dev,
1816 "hash init post op\n");
1817
1818 /* first bring msg from shared area to local area */
1819 memcpy(ta_ctx->msg, sep->shared_addr,
1820 SEP_DRIVER_MESSAGE_SHARED_AREA_SIZE_IN_BYTES);
1821
1822 u32_error = sep_verify_op(ta_ctx, SEP_HASH_INIT_OPCODE,
1823 &msg_offset);
1824
1825 if (u32_error) {
1826 dev_warn(&ta_ctx->sep_used->pdev->dev, "hash init error %x\n",
1827 u32_error);
1828 sep_crypto_release(sctx, ta_ctx, u32_error);
1829 return u32_error;
1830 }
1831
1832 /* Read Context */
1833 sep_read_context(ta_ctx, &msg_offset,
1834 &sctx->hash_private_ctx,
1835 sizeof(struct sep_hash_private_context));
1836
1837 /* Signal to crypto infrastructure and clear out */
1838 dev_dbg(&ta_ctx->sep_used->pdev->dev, "hash init post op done\n");
1839 sep_crypto_release(sctx, ta_ctx, 0);
1840 return 0;
1841 }
1842
1843 static u32 hash_update_post_op(struct sep_device *sep)
1844 {
1845 u32 u32_error;
1846 u32 msg_offset;
1847 struct crypto_ahash *tfm = crypto_ahash_reqtfm(sep->current_hash_req);
1848 struct this_task_ctx *ta_ctx = ahash_request_ctx(sep->current_hash_req);
1849 struct sep_system_ctx *sctx = crypto_ahash_ctx(tfm);
1850 dev_dbg(&ta_ctx->sep_used->pdev->dev,
1851 "hash update post op\n");
1852
1853 /* first bring msg from shared area to local area */
1854 memcpy(ta_ctx->msg, sep->shared_addr,
1855 SEP_DRIVER_MESSAGE_SHARED_AREA_SIZE_IN_BYTES);
1856
1857 u32_error = sep_verify_op(ta_ctx, SEP_HASH_UPDATE_OPCODE,
1858 &msg_offset);
1859
1860 if (u32_error) {
1861 dev_warn(&ta_ctx->sep_used->pdev->dev, "hash init error %x\n",
1862 u32_error);
1863 sep_crypto_release(sctx, ta_ctx, u32_error);
1864 return u32_error;
1865 }
1866
1867 /* Read Context */
1868 sep_read_context(ta_ctx, &msg_offset,
1869 &sctx->hash_private_ctx,
1870 sizeof(struct sep_hash_private_context));
1871
1872 /**
1873 * Following is only for finup; if we just completd the
1874 * data portion of finup, we now need to kick off the
1875 * finish portion of finup.
1876 */
1877
1878 if (ta_ctx->sep_used->current_hash_stage == HASH_FINUP_DATA) {
1879
1880 /* first reset stage to HASH_FINUP_FINISH */
1881 ta_ctx->sep_used->current_hash_stage = HASH_FINUP_FINISH;
1882
1883 /* now enqueue the finish operation */
1884 spin_lock_irq(&queue_lock);
1885 u32_error = crypto_enqueue_request(&sep_queue,
1886 &ta_ctx->sep_used->current_hash_req->base);
1887 spin_unlock_irq(&queue_lock);
1888
1889 if ((u32_error != 0) && (u32_error != -EINPROGRESS)) {
1890 dev_warn(&ta_ctx->sep_used->pdev->dev,
1891 "spe cypher post op cant queue\n");
1892 sep_crypto_release(sctx, ta_ctx, u32_error);
1893 return u32_error;
1894 }
1895
1896 /* schedule the data send */
1897 u32_error = sep_submit_work(ta_ctx->sep_used->workqueue,
1898 sep_dequeuer, (void *)&sep_queue);
1899
1900 if (u32_error) {
1901 dev_warn(&ta_ctx->sep_used->pdev->dev,
1902 "cant submit work sep_crypto_block\n");
1903 sep_crypto_release(sctx, ta_ctx, -EINVAL);
1904 return -EINVAL;
1905 }
1906 }
1907
1908 /* Signal to crypto infrastructure and clear out */
1909 dev_dbg(&ta_ctx->sep_used->pdev->dev, "hash update post op done\n");
1910 sep_crypto_release(sctx, ta_ctx, 0);
1911 return 0;
1912 }
1913
1914 static u32 hash_final_post_op(struct sep_device *sep)
1915 {
1916 int max_length;
1917 u32 u32_error;
1918 u32 msg_offset;
1919 struct crypto_ahash *tfm = crypto_ahash_reqtfm(sep->current_hash_req);
1920 struct sep_system_ctx *sctx = crypto_ahash_ctx(tfm);
1921 struct this_task_ctx *ta_ctx = ahash_request_ctx(sep->current_hash_req);
1922 dev_dbg(&ta_ctx->sep_used->pdev->dev,
1923 "hash final post op\n");
1924
1925 /* first bring msg from shared area to local area */
1926 memcpy(ta_ctx->msg, sep->shared_addr,
1927 SEP_DRIVER_MESSAGE_SHARED_AREA_SIZE_IN_BYTES);
1928
1929 u32_error = sep_verify_op(ta_ctx, SEP_HASH_FINISH_OPCODE,
1930 &msg_offset);
1931
1932 if (u32_error) {
1933 dev_warn(&ta_ctx->sep_used->pdev->dev, "hash finish error %x\n",
1934 u32_error);
1935 sep_crypto_release(sctx, ta_ctx, u32_error);
1936 return u32_error;
1937 }
1938
1939 /* Grab the result */
1940 if (ta_ctx->current_hash_req->result == NULL) {
1941 /* Oops, null buffer; error out here */
1942 dev_warn(&ta_ctx->sep_used->pdev->dev,
1943 "hash finish null buffer\n");
1944 sep_crypto_release(sctx, ta_ctx, (u32)-ENOMEM);
1945 return -ENOMEM;
1946 }
1947
1948 max_length = (((SEP_HASH_RESULT_SIZE_WORDS * sizeof(u32)) + 3) /
1949 sizeof(u32)) * sizeof(u32);
1950
1951 sep_read_msg(ta_ctx,
1952 ta_ctx->current_hash_req->result,
1953 crypto_ahash_digestsize(tfm), max_length,
1954 &msg_offset, 0);
1955
1956 /* Signal to crypto infrastructure and clear out */
1957 dev_dbg(&ta_ctx->sep_used->pdev->dev, "hash finish post op done\n");
1958 sep_crypto_release(sctx, ta_ctx, 0);
1959 return 0;
1960 }
1961
1962 static u32 hash_digest_post_op(struct sep_device *sep)
1963 {
1964 int max_length;
1965 u32 u32_error;
1966 u32 msg_offset;
1967 struct crypto_ahash *tfm = crypto_ahash_reqtfm(sep->current_hash_req);
1968 struct sep_system_ctx *sctx = crypto_ahash_ctx(tfm);
1969 struct this_task_ctx *ta_ctx = ahash_request_ctx(sep->current_hash_req);
1970 dev_dbg(&ta_ctx->sep_used->pdev->dev,
1971 "hash digest post op\n");
1972
1973 /* first bring msg from shared area to local area */
1974 memcpy(ta_ctx->msg, sep->shared_addr,
1975 SEP_DRIVER_MESSAGE_SHARED_AREA_SIZE_IN_BYTES);
1976
1977 u32_error = sep_verify_op(ta_ctx, SEP_HASH_SINGLE_OPCODE,
1978 &msg_offset);
1979
1980 if (u32_error) {
1981 dev_warn(&ta_ctx->sep_used->pdev->dev,
1982 "hash digest finish error %x\n", u32_error);
1983
1984 sep_crypto_release(sctx, ta_ctx, u32_error);
1985 return u32_error;
1986 }
1987
1988 /* Grab the result */
1989 if (ta_ctx->current_hash_req->result == NULL) {
1990 /* Oops, null buffer; error out here */
1991 dev_warn(&ta_ctx->sep_used->pdev->dev,
1992 "hash digest finish null buffer\n");
1993 sep_crypto_release(sctx, ta_ctx, (u32)-ENOMEM);
1994 return -ENOMEM;
1995 }
1996
1997 max_length = (((SEP_HASH_RESULT_SIZE_WORDS * sizeof(u32)) + 3) /
1998 sizeof(u32)) * sizeof(u32);
1999
2000 sep_read_msg(ta_ctx,
2001 ta_ctx->current_hash_req->result,
2002 crypto_ahash_digestsize(tfm), max_length,
2003 &msg_offset, 0);
2004
2005 /* Signal to crypto infrastructure and clear out */
2006 dev_dbg(&ta_ctx->sep_used->pdev->dev,
2007 "hash digest finish post op done\n");
2008
2009 sep_crypto_release(sctx, ta_ctx, 0);
2010 return 0;
2011 }
2012
2013 /**
2014 * The sep_finish function is the function that is schedule (via tasket)
2015 * by the interrupt service routine when the SEP sends and interrupt
2016 * This is only called by the interrupt handler as a tasklet.
2017 */
2018 static void sep_finish(unsigned long data)
2019 {
2020 struct sep_device *sep_dev;
2021 int res;
2022
2023 res = 0;
2024
2025 if (data == 0) {
2026 pr_debug("sep_finish called with null data\n");
2027 return;
2028 }
2029
2030 sep_dev = (struct sep_device *)data;
2031 if (sep_dev == NULL) {
2032 pr_debug("sep_finish; sep_dev is NULL\n");
2033 return;
2034 }
2035
2036 if (sep_dev->in_kernel == (u32)0) {
2037 dev_warn(&sep_dev->pdev->dev,
2038 "sep_finish; not in kernel operation\n");
2039 return;
2040 }
2041
2042 /* Did we really do a sep command prior to this? */
2043 if (0 == test_bit(SEP_LEGACY_SENDMSG_DONE_OFFSET,
2044 &sep_dev->ta_ctx->call_status.status)) {
2045
2046 dev_warn(&sep_dev->pdev->dev, "[PID%d] sendmsg not called\n",
2047 current->pid);
2048 return;
2049 }
2050
2051 if (sep_dev->send_ct != sep_dev->reply_ct) {
2052 dev_warn(&sep_dev->pdev->dev,
2053 "[PID%d] poll; no message came back\n",
2054 current->pid);
2055 return;
2056 }
2057
2058 /* Check for error (In case time ran out) */
2059 if ((res != 0x0) && (res != 0x8)) {
2060 dev_warn(&sep_dev->pdev->dev,
2061 "[PID%d] poll; poll error GPR3 is %x\n",
2062 current->pid, res);
2063 return;
2064 }
2065
2066 /* What kind of interrupt from sep was this? */
2067 res = sep_read_reg(sep_dev, HW_HOST_SEP_HOST_GPR2_REG_ADDR);
2068
2069 dev_dbg(&sep_dev->pdev->dev, "[PID%d] GPR2 at crypto finish is %x\n",
2070 current->pid, res);
2071
2072 /* Print request? */
2073 if ((res >> 30) & 0x1) {
2074 dev_dbg(&sep_dev->pdev->dev, "[PID%d] sep print req\n",
2075 current->pid);
2076 dev_dbg(&sep_dev->pdev->dev, "[PID%d] contents: %s\n",
2077 current->pid,
2078 (char *)(sep_dev->shared_addr +
2079 SEP_DRIVER_PRINTF_OFFSET_IN_BYTES));
2080 return;
2081 }
2082
2083 /* Request for daemon (not currently in POR)? */
2084 if (res >> 31) {
2085 dev_dbg(&sep_dev->pdev->dev,
2086 "[PID%d] sep request; ignoring\n",
2087 current->pid);
2088 return;
2089 }
2090
2091 /* If we got here, then we have a replay to a sep command */
2092
2093 dev_dbg(&sep_dev->pdev->dev,
2094 "[PID%d] sep reply to command; processing request: %x\n",
2095 current->pid, sep_dev->current_request);
2096
2097 switch (sep_dev->current_request) {
2098 case AES_CBC:
2099 case AES_ECB:
2100 case DES_CBC:
2101 case DES_ECB:
2102 res = crypto_post_op(sep_dev);
2103 break;
2104 case SHA1:
2105 case MD5:
2106 case SHA224:
2107 case SHA256:
2108 switch (sep_dev->current_hash_stage) {
2109 case HASH_INIT:
2110 res = hash_init_post_op(sep_dev);
2111 break;
2112 case HASH_UPDATE:
2113 case HASH_FINUP_DATA:
2114 res = hash_update_post_op(sep_dev);
2115 break;
2116 case HASH_FINUP_FINISH:
2117 case HASH_FINISH:
2118 res = hash_final_post_op(sep_dev);
2119 break;
2120 case HASH_DIGEST:
2121 res = hash_digest_post_op(sep_dev);
2122 break;
2123 default:
2124 pr_debug("sep - invalid stage for hash finish\n");
2125 }
2126 break;
2127 default:
2128 pr_debug("sep - invalid request for finish\n");
2129 }
2130
2131 if (res)
2132 pr_debug("sep - finish returned error %x\n", res);
2133 }
2134
2135 static int sep_hash_cra_init(struct crypto_tfm *tfm)
2136 {
2137 const char *alg_name = crypto_tfm_alg_name(tfm);
2138
2139 pr_debug("sep_hash_cra_init name is %s\n", alg_name);
2140
2141 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
2142 sizeof(struct this_task_ctx));
2143 return 0;
2144 }
2145
2146 static void sep_hash_cra_exit(struct crypto_tfm *tfm)
2147 {
2148 pr_debug("sep_hash_cra_exit\n");
2149 }
2150
2151 static void sep_hash_init(void *data)
2152 {
2153 u32 msg_offset;
2154 int result;
2155 struct ahash_request *req;
2156 struct crypto_ahash *tfm;
2157 struct this_task_ctx *ta_ctx;
2158 struct sep_system_ctx *sctx;
2159 unsigned long end_time;
2160 int are_we_done_yet;
2161
2162 req = (struct ahash_request *)data;
2163 tfm = crypto_ahash_reqtfm(req);
2164 sctx = crypto_ahash_ctx(tfm);
2165 ta_ctx = ahash_request_ctx(req);
2166 ta_ctx->sep_used = sep_dev;
2167
2168 ta_ctx->are_we_done_yet = &are_we_done_yet;
2169
2170 dev_dbg(&ta_ctx->sep_used->pdev->dev,
2171 "sep_hash_init\n");
2172 ta_ctx->current_hash_stage = HASH_INIT;
2173 /* opcode and mode */
2174 sep_make_header(ta_ctx, &msg_offset, SEP_HASH_INIT_OPCODE);
2175 sep_write_msg(ta_ctx, &ta_ctx->hash_opmode,
2176 sizeof(u32), sizeof(u32), &msg_offset, 0);
2177 sep_end_msg(ta_ctx, msg_offset);
2178
2179 are_we_done_yet = 0;
2180 result = sep_crypto_take_sep(ta_ctx);
2181 if (result) {
2182 dev_warn(&ta_ctx->sep_used->pdev->dev,
2183 "sep_hash_init take sep failed\n");
2184 sep_crypto_release(sctx, ta_ctx, -EINVAL);
2185 }
2186
2187 /* now we sit and wait up to a fixed time for completion */
2188 end_time = jiffies + (WAIT_TIME * HZ);
2189 while ((time_before(jiffies, end_time)) && (are_we_done_yet == 0))
2190 schedule();
2191
2192 /* Done waiting; still not done yet? */
2193 if (are_we_done_yet == 0) {
2194 dev_dbg(&ta_ctx->sep_used->pdev->dev,
2195 "hash init never got done\n");
2196 sep_crypto_release(sctx, ta_ctx, -EINVAL);
2197 return;
2198 }
2199
2200 }
2201
2202 static void sep_hash_update(void *data)
2203 {
2204 int int_error;
2205 u32 msg_offset;
2206 u32 len;
2207 struct sep_hash_internal_context *int_ctx;
2208 u32 block_size;
2209 u32 head_len;
2210 u32 tail_len;
2211 int are_we_done_yet;
2212
2213 static u32 msg[10];
2214 static char small_buf[100];
2215 void *src_ptr;
2216 struct scatterlist *new_sg;
2217 ssize_t copy_result;
2218 struct ahash_request *req;
2219 struct crypto_ahash *tfm;
2220 struct this_task_ctx *ta_ctx;
2221 struct sep_system_ctx *sctx;
2222 unsigned long end_time;
2223
2224 req = (struct ahash_request *)data;
2225 tfm = crypto_ahash_reqtfm(req);
2226 sctx = crypto_ahash_ctx(tfm);
2227 ta_ctx = ahash_request_ctx(req);
2228 ta_ctx->sep_used = sep_dev;
2229
2230 ta_ctx->are_we_done_yet = &are_we_done_yet;
2231
2232 /* length for queue status */
2233 ta_ctx->nbytes = req->nbytes;
2234
2235 dev_dbg(&ta_ctx->sep_used->pdev->dev,
2236 "sep_hash_update\n");
2237 ta_ctx->current_hash_stage = HASH_UPDATE;
2238 len = req->nbytes;
2239
2240 block_size = crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
2241 tail_len = req->nbytes % block_size;
2242 dev_dbg(&ta_ctx->sep_used->pdev->dev, "length is %x\n", len);
2243 dev_dbg(&ta_ctx->sep_used->pdev->dev, "block_size is %x\n", block_size);
2244 dev_dbg(&ta_ctx->sep_used->pdev->dev, "tail len is %x\n", tail_len);
2245
2246 /* Compute header/tail sizes */
2247 int_ctx = (struct sep_hash_internal_context *)&sctx->
2248 hash_private_ctx.internal_context;
2249 head_len = (block_size - int_ctx->prev_update_bytes) % block_size;
2250 tail_len = (req->nbytes - head_len) % block_size;
2251
2252 /* Make sure all pages are even block */
2253 int_error = sep_oddball_pages(ta_ctx->sep_used, req->src,
2254 req->nbytes,
2255 block_size, &new_sg, 1);
2256
2257 if (int_error < 0) {
2258 dev_warn(&ta_ctx->sep_used->pdev->dev,
2259 "oddball pages error in crash update\n");
2260 sep_crypto_release(sctx, ta_ctx, -ENOMEM);
2261 return;
2262 } else if (int_error == 1) {
2263 ta_ctx->src_sg = new_sg;
2264 ta_ctx->src_sg_hold = new_sg;
2265 } else {
2266 ta_ctx->src_sg = req->src;
2267 ta_ctx->src_sg_hold = NULL;
2268 }
2269
2270 src_ptr = sg_virt(ta_ctx->src_sg);
2271
2272 if ((!req->nbytes) || (!ta_ctx->src_sg)) {
2273 /* null data */
2274 src_ptr = NULL;
2275 }
2276
2277 sep_dump_sg(ta_ctx->sep_used, "hash block sg in", ta_ctx->src_sg);
2278
2279 ta_ctx->dcb_input_data.app_in_address = src_ptr;
2280 ta_ctx->dcb_input_data.data_in_size =
2281 req->nbytes - (head_len + tail_len);
2282 ta_ctx->dcb_input_data.app_out_address = NULL;
2283 ta_ctx->dcb_input_data.block_size = block_size;
2284 ta_ctx->dcb_input_data.tail_block_size = 0;
2285 ta_ctx->dcb_input_data.is_applet = 0;
2286 ta_ctx->dcb_input_data.src_sg = ta_ctx->src_sg;
2287 ta_ctx->dcb_input_data.dst_sg = NULL;
2288
2289 int_error = sep_create_dcb_dmatables_context_kernel(
2290 ta_ctx->sep_used,
2291 &ta_ctx->dcb_region,
2292 &ta_ctx->dmatables_region,
2293 &ta_ctx->dma_ctx,
2294 &ta_ctx->dcb_input_data,
2295 1);
2296 if (int_error) {
2297 dev_warn(&ta_ctx->sep_used->pdev->dev,
2298 "hash update dma table create failed\n");
2299 sep_crypto_release(sctx, ta_ctx, -EINVAL);
2300 return;
2301 }
2302
2303 /* Construct message to SEP */
2304 sep_make_header(ta_ctx, &msg_offset, SEP_HASH_UPDATE_OPCODE);
2305
2306 msg[0] = (u32)0;
2307 msg[1] = (u32)0;
2308 msg[2] = (u32)0;
2309
2310 sep_write_msg(ta_ctx, msg, sizeof(u32) * 3, sizeof(u32) * 3,
2311 &msg_offset, 0);
2312
2313 /* Handle remainders */
2314
2315 /* Head */
2316 sep_write_msg(ta_ctx, &head_len, sizeof(u32),
2317 sizeof(u32), &msg_offset, 0);
2318
2319 if (head_len) {
2320 copy_result = sg_copy_to_buffer(
2321 req->src,
2322 sep_sg_nents(ta_ctx->src_sg),
2323 small_buf, head_len);
2324
2325 if (copy_result != head_len) {
2326 dev_warn(&ta_ctx->sep_used->pdev->dev,
2327 "sg head copy failure in hash block\n");
2328 sep_crypto_release(sctx, ta_ctx, -ENOMEM);
2329 return;
2330 }
2331
2332 sep_write_msg(ta_ctx, small_buf, head_len,
2333 sizeof(u32) * 32, &msg_offset, 1);
2334 } else {
2335 msg_offset += sizeof(u32) * 32;
2336 }
2337
2338 /* Tail */
2339 sep_write_msg(ta_ctx, &tail_len, sizeof(u32),
2340 sizeof(u32), &msg_offset, 0);
2341
2342 if (tail_len) {
2343 copy_result = sep_copy_offset_sg(
2344 ta_ctx->sep_used,
2345 ta_ctx->src_sg,
2346 req->nbytes - tail_len,
2347 small_buf, tail_len);
2348
2349 if (copy_result != tail_len) {
2350 dev_warn(&ta_ctx->sep_used->pdev->dev,
2351 "sg tail copy failure in hash block\n");
2352 sep_crypto_release(sctx, ta_ctx, -ENOMEM);
2353 return;
2354 }
2355
2356 sep_write_msg(ta_ctx, small_buf, tail_len,
2357 sizeof(u32) * 32, &msg_offset, 1);
2358 } else {
2359 msg_offset += sizeof(u32) * 32;
2360 }
2361
2362 /* Context */
2363 sep_write_context(ta_ctx, &msg_offset, &sctx->hash_private_ctx,
2364 sizeof(struct sep_hash_private_context));
2365
2366 sep_end_msg(ta_ctx, msg_offset);
2367 are_we_done_yet = 0;
2368 int_error = sep_crypto_take_sep(ta_ctx);
2369 if (int_error) {
2370 dev_warn(&ta_ctx->sep_used->pdev->dev,
2371 "sep_hash_update take sep failed\n");
2372 sep_crypto_release(sctx, ta_ctx, -EINVAL);
2373 }
2374
2375 /* now we sit and wait up to a fixed time for completion */
2376 end_time = jiffies + (WAIT_TIME * HZ);
2377 while ((time_before(jiffies, end_time)) && (are_we_done_yet == 0))
2378 schedule();
2379
2380 /* Done waiting; still not done yet? */
2381 if (are_we_done_yet == 0) {
2382 dev_dbg(&ta_ctx->sep_used->pdev->dev,
2383 "hash update never got done\n");
2384 sep_crypto_release(sctx, ta_ctx, -EINVAL);
2385 return;
2386 }
2387
2388 }
2389
2390 static void sep_hash_final(void *data)
2391 {
2392 u32 msg_offset;
2393 struct ahash_request *req;
2394 struct crypto_ahash *tfm;
2395 struct this_task_ctx *ta_ctx;
2396 struct sep_system_ctx *sctx;
2397 int result;
2398 unsigned long end_time;
2399 int are_we_done_yet;
2400
2401 req = (struct ahash_request *)data;
2402 tfm = crypto_ahash_reqtfm(req);
2403 sctx = crypto_ahash_ctx(tfm);
2404 ta_ctx = ahash_request_ctx(req);
2405 ta_ctx->sep_used = sep_dev;
2406
2407 dev_dbg(&ta_ctx->sep_used->pdev->dev,
2408 "sep_hash_final\n");
2409 ta_ctx->current_hash_stage = HASH_FINISH;
2410
2411 ta_ctx->are_we_done_yet = &are_we_done_yet;
2412
2413 /* opcode and mode */
2414 sep_make_header(ta_ctx, &msg_offset, SEP_HASH_FINISH_OPCODE);
2415
2416 /* Context */
2417 sep_write_context(ta_ctx, &msg_offset, &sctx->hash_private_ctx,
2418 sizeof(struct sep_hash_private_context));
2419
2420 sep_end_msg(ta_ctx, msg_offset);
2421 are_we_done_yet = 0;
2422 result = sep_crypto_take_sep(ta_ctx);
2423 if (result) {
2424 dev_warn(&ta_ctx->sep_used->pdev->dev,
2425 "sep_hash_final take sep failed\n");
2426 sep_crypto_release(sctx, ta_ctx, -EINVAL);
2427 }
2428
2429 /* now we sit and wait up to a fixed time for completion */
2430 end_time = jiffies + (WAIT_TIME * HZ);
2431 while ((time_before(jiffies, end_time)) && (are_we_done_yet == 0))
2432 schedule();
2433
2434 /* Done waiting; still not done yet? */
2435 if (are_we_done_yet == 0) {
2436 dev_dbg(&ta_ctx->sep_used->pdev->dev,
2437 "hash final job never got done\n");
2438 sep_crypto_release(sctx, ta_ctx, -EINVAL);
2439 return;
2440 }
2441
2442 }
2443
2444 static void sep_hash_digest(void *data)
2445 {
2446 int int_error;
2447 u32 msg_offset;
2448 u32 block_size;
2449 u32 msg[10];
2450 size_t copy_result;
2451 int result;
2452 int are_we_done_yet;
2453 u32 tail_len;
2454 static char small_buf[100];
2455 struct scatterlist *new_sg;
2456 void *src_ptr;
2457
2458 struct ahash_request *req;
2459 struct crypto_ahash *tfm;
2460 struct this_task_ctx *ta_ctx;
2461 struct sep_system_ctx *sctx;
2462 unsigned long end_time;
2463
2464 req = (struct ahash_request *)data;
2465 tfm = crypto_ahash_reqtfm(req);
2466 sctx = crypto_ahash_ctx(tfm);
2467 ta_ctx = ahash_request_ctx(req);
2468 ta_ctx->sep_used = sep_dev;
2469
2470 dev_dbg(&ta_ctx->sep_used->pdev->dev,
2471 "sep_hash_digest\n");
2472 ta_ctx->current_hash_stage = HASH_DIGEST;
2473
2474 ta_ctx->are_we_done_yet = &are_we_done_yet;
2475
2476 /* length for queue status */
2477 ta_ctx->nbytes = req->nbytes;
2478
2479 block_size = crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
2480 tail_len = req->nbytes % block_size;
2481 dev_dbg(&ta_ctx->sep_used->pdev->dev, "length is %x\n", req->nbytes);
2482 dev_dbg(&ta_ctx->sep_used->pdev->dev, "block_size is %x\n", block_size);
2483 dev_dbg(&ta_ctx->sep_used->pdev->dev, "tail len is %x\n", tail_len);
2484
2485 /* Make sure all pages are even block */
2486 int_error = sep_oddball_pages(ta_ctx->sep_used, req->src,
2487 req->nbytes,
2488 block_size, &new_sg, 1);
2489
2490 if (int_error < 0) {
2491 dev_warn(&ta_ctx->sep_used->pdev->dev,
2492 "oddball pages error in crash update\n");
2493 sep_crypto_release(sctx, ta_ctx, -ENOMEM);
2494 return;
2495 } else if (int_error == 1) {
2496 ta_ctx->src_sg = new_sg;
2497 ta_ctx->src_sg_hold = new_sg;
2498 } else {
2499 ta_ctx->src_sg = req->src;
2500 ta_ctx->src_sg_hold = NULL;
2501 }
2502
2503 src_ptr = sg_virt(ta_ctx->src_sg);
2504
2505 if ((!req->nbytes) || (!ta_ctx->src_sg)) {
2506 /* null data */
2507 src_ptr = NULL;
2508 }
2509
2510 sep_dump_sg(ta_ctx->sep_used, "hash block sg in", ta_ctx->src_sg);
2511
2512 ta_ctx->dcb_input_data.app_in_address = src_ptr;
2513 ta_ctx->dcb_input_data.data_in_size = req->nbytes - tail_len;
2514 ta_ctx->dcb_input_data.app_out_address = NULL;
2515 ta_ctx->dcb_input_data.block_size = block_size;
2516 ta_ctx->dcb_input_data.tail_block_size = 0;
2517 ta_ctx->dcb_input_data.is_applet = 0;
2518 ta_ctx->dcb_input_data.src_sg = ta_ctx->src_sg;
2519 ta_ctx->dcb_input_data.dst_sg = NULL;
2520
2521 int_error = sep_create_dcb_dmatables_context_kernel(
2522 ta_ctx->sep_used,
2523 &ta_ctx->dcb_region,
2524 &ta_ctx->dmatables_region,
2525 &ta_ctx->dma_ctx,
2526 &ta_ctx->dcb_input_data,
2527 1);
2528 if (int_error) {
2529 dev_warn(&ta_ctx->sep_used->pdev->dev,
2530 "hash update dma table create failed\n");
2531 sep_crypto_release(sctx, ta_ctx, -EINVAL);
2532 return;
2533 }
2534
2535 /* Construct message to SEP */
2536 sep_make_header(ta_ctx, &msg_offset, SEP_HASH_SINGLE_OPCODE);
2537 sep_write_msg(ta_ctx, &ta_ctx->hash_opmode,
2538 sizeof(u32), sizeof(u32), &msg_offset, 0);
2539
2540 msg[0] = (u32)0;
2541 msg[1] = (u32)0;
2542 msg[2] = (u32)0;
2543
2544 sep_write_msg(ta_ctx, msg, sizeof(u32) * 3, sizeof(u32) * 3,
2545 &msg_offset, 0);
2546
2547 /* Tail */
2548 sep_write_msg(ta_ctx, &tail_len, sizeof(u32),
2549 sizeof(u32), &msg_offset, 0);
2550
2551 if (tail_len) {
2552 copy_result = sep_copy_offset_sg(
2553 ta_ctx->sep_used,
2554 ta_ctx->src_sg,
2555 req->nbytes - tail_len,
2556 small_buf, tail_len);
2557
2558 if (copy_result != tail_len) {
2559 dev_warn(&ta_ctx->sep_used->pdev->dev,
2560 "sg tail copy failure in hash block\n");
2561 sep_crypto_release(sctx, ta_ctx, -ENOMEM);
2562 return;
2563 }
2564
2565 sep_write_msg(ta_ctx, small_buf, tail_len,
2566 sizeof(u32) * 32, &msg_offset, 1);
2567 } else {
2568 msg_offset += sizeof(u32) * 32;
2569 }
2570
2571 sep_end_msg(ta_ctx, msg_offset);
2572
2573 are_we_done_yet = 0;
2574 result = sep_crypto_take_sep(ta_ctx);
2575 if (result) {
2576 dev_warn(&ta_ctx->sep_used->pdev->dev,
2577 "sep_hash_digest take sep failed\n");
2578 sep_crypto_release(sctx, ta_ctx, -EINVAL);
2579 }
2580
2581 /* now we sit and wait up to a fixed time for completion */
2582 end_time = jiffies + (WAIT_TIME * HZ);
2583 while ((time_before(jiffies, end_time)) && (are_we_done_yet == 0))
2584 schedule();
2585
2586 /* Done waiting; still not done yet? */
2587 if (are_we_done_yet == 0) {
2588 dev_dbg(&ta_ctx->sep_used->pdev->dev,
2589 "hash digest job never got done\n");
2590 sep_crypto_release(sctx, ta_ctx, -EINVAL);
2591 return;
2592 }
2593
2594 }
2595
2596 /**
2597 * This is what is called by each of the API's provided
2598 * in the kernel crypto descriptors. It is run in a process
2599 * context using the kernel workqueues. Therefore it can
2600 * be put to sleep.
2601 */
2602 static void sep_dequeuer(void *data)
2603 {
2604 struct crypto_queue *this_queue;
2605 struct crypto_async_request *async_req;
2606 struct crypto_async_request *backlog;
2607 struct ablkcipher_request *cypher_req;
2608 struct ahash_request *hash_req;
2609 struct sep_system_ctx *sctx;
2610 struct crypto_ahash *hash_tfm;
2611 struct this_task_ctx *ta_ctx;
2612
2613
2614 this_queue = (struct crypto_queue *)data;
2615
2616 spin_lock_irq(&queue_lock);
2617 backlog = crypto_get_backlog(this_queue);
2618 async_req = crypto_dequeue_request(this_queue);
2619 spin_unlock_irq(&queue_lock);
2620
2621 if (!async_req) {
2622 pr_debug("sep crypto queue is empty\n");
2623 return;
2624 }
2625
2626 if (backlog) {
2627 pr_debug("sep crypto backlog set\n");
2628 if (backlog->complete)
2629 backlog->complete(backlog, -EINPROGRESS);
2630 backlog = NULL;
2631 }
2632
2633 if (!async_req->tfm) {
2634 pr_debug("sep crypto queue null tfm\n");
2635 return;
2636 }
2637
2638 if (!async_req->tfm->__crt_alg) {
2639 pr_debug("sep crypto queue null __crt_alg\n");
2640 return;
2641 }
2642
2643 if (!async_req->tfm->__crt_alg->cra_type) {
2644 pr_debug("sep crypto queue null cra_type\n");
2645 return;
2646 }
2647
2648 /* we have stuff in the queue */
2649 if (async_req->tfm->__crt_alg->cra_type !=
2650 &crypto_ahash_type) {
2651 /* This is for a cypher */
2652 pr_debug("sep crypto queue doing cipher\n");
2653 cypher_req = container_of(async_req,
2654 struct ablkcipher_request,
2655 base);
2656 if (!cypher_req) {
2657 pr_debug("sep crypto queue null cypher_req\n");
2658 return;
2659 }
2660
2661 sep_crypto_block((void *)cypher_req);
2662 return;
2663 } else {
2664 /* This is a hash */
2665 pr_debug("sep crypto queue doing hash\n");
2666 /**
2667 * This is a bit more complex than cipher; we
2668 * need to figure out what type of operation
2669 */
2670 hash_req = ahash_request_cast(async_req);
2671 if (!hash_req) {
2672 pr_debug("sep crypto queue null hash_req\n");
2673 return;
2674 }
2675
2676 hash_tfm = crypto_ahash_reqtfm(hash_req);
2677 if (!hash_tfm) {
2678 pr_debug("sep crypto queue null hash_tfm\n");
2679 return;
2680 }
2681
2682
2683 sctx = crypto_ahash_ctx(hash_tfm);
2684 if (!sctx) {
2685 pr_debug("sep crypto queue null sctx\n");
2686 return;
2687 }
2688
2689 ta_ctx = ahash_request_ctx(hash_req);
2690
2691 if (ta_ctx->current_hash_stage == HASH_INIT) {
2692 pr_debug("sep crypto queue hash init\n");
2693 sep_hash_init((void *)hash_req);
2694 return;
2695 } else if (ta_ctx->current_hash_stage == HASH_UPDATE) {
2696 pr_debug("sep crypto queue hash update\n");
2697 sep_hash_update((void *)hash_req);
2698 return;
2699 } else if (ta_ctx->current_hash_stage == HASH_FINISH) {
2700 pr_debug("sep crypto queue hash final\n");
2701 sep_hash_final((void *)hash_req);
2702 return;
2703 } else if (ta_ctx->current_hash_stage == HASH_DIGEST) {
2704 pr_debug("sep crypto queue hash digest\n");
2705 sep_hash_digest((void *)hash_req);
2706 return;
2707 } else if (ta_ctx->current_hash_stage == HASH_FINUP_DATA) {
2708 pr_debug("sep crypto queue hash digest\n");
2709 sep_hash_update((void *)hash_req);
2710 return;
2711 } else if (ta_ctx->current_hash_stage == HASH_FINUP_FINISH) {
2712 pr_debug("sep crypto queue hash digest\n");
2713 sep_hash_final((void *)hash_req);
2714 return;
2715 } else {
2716 pr_debug("sep crypto queue hash oops nothing\n");
2717 return;
2718 }
2719 }
2720 }
2721
2722 static int sep_sha1_init(struct ahash_request *req)
2723 {
2724 int error;
2725 int error1;
2726 struct this_task_ctx *ta_ctx = ahash_request_ctx(req);
2727
2728 pr_debug("sep - doing sha1 init\n");
2729
2730 /* Clear out task context */
2731 memset(ta_ctx, 0, sizeof(struct this_task_ctx));
2732
2733 ta_ctx->sep_used = sep_dev;
2734 ta_ctx->current_request = SHA1;
2735 ta_ctx->current_hash_req = req;
2736 ta_ctx->current_cypher_req = NULL;
2737 ta_ctx->hash_opmode = SEP_HASH_SHA1;
2738 ta_ctx->current_hash_stage = HASH_INIT;
2739
2740 /* lock necessary so that only one entity touches the queues */
2741 spin_lock_irq(&queue_lock);
2742 error = crypto_enqueue_request(&sep_queue, &req->base);
2743
2744 if ((error != 0) && (error != -EINPROGRESS))
2745 pr_debug(" sep - crypto enqueue failed: %x\n",
2746 error);
2747 error1 = sep_submit_work(ta_ctx->sep_used->workqueue,
2748 sep_dequeuer, (void *)&sep_queue);
2749 if (error1)
2750 pr_debug(" sep - workqueue submit failed: %x\n",
2751 error1);
2752 spin_unlock_irq(&queue_lock);
2753 /* We return result of crypto enqueue */
2754 return error;
2755 }
2756
2757 static int sep_sha1_update(struct ahash_request *req)
2758 {
2759 int error;
2760 int error1;
2761 struct this_task_ctx *ta_ctx = ahash_request_ctx(req);
2762
2763 pr_debug("sep - doing sha1 update\n");
2764
2765 ta_ctx->sep_used = sep_dev;
2766 ta_ctx->current_request = SHA1;
2767 ta_ctx->current_hash_req = req;
2768 ta_ctx->current_cypher_req = NULL;
2769 ta_ctx->hash_opmode = SEP_HASH_SHA1;
2770 ta_ctx->current_hash_stage = HASH_UPDATE;
2771
2772 /* lock necessary so that only one entity touches the queues */
2773 spin_lock_irq(&queue_lock);
2774 error = crypto_enqueue_request(&sep_queue, &req->base);
2775
2776 if ((error != 0) && (error != -EINPROGRESS))
2777 pr_debug(" sep - crypto enqueue failed: %x\n",
2778 error);
2779 error1 = sep_submit_work(ta_ctx->sep_used->workqueue,
2780 sep_dequeuer, (void *)&sep_queue);
2781 if (error1)
2782 pr_debug(" sep - workqueue submit failed: %x\n",
2783 error1);
2784 spin_unlock_irq(&queue_lock);
2785 /* We return result of crypto enqueue */
2786 return error;
2787 }
2788
2789 static int sep_sha1_final(struct ahash_request *req)
2790 {
2791 int error;
2792 int error1;
2793 struct this_task_ctx *ta_ctx = ahash_request_ctx(req);
2794 pr_debug("sep - doing sha1 final\n");
2795
2796 ta_ctx->sep_used = sep_dev;
2797 ta_ctx->current_request = SHA1;
2798 ta_ctx->current_hash_req = req;
2799 ta_ctx->current_cypher_req = NULL;
2800 ta_ctx->hash_opmode = SEP_HASH_SHA1;
2801 ta_ctx->current_hash_stage = HASH_FINISH;
2802
2803 /* lock necessary so that only one entity touches the queues */
2804 spin_lock_irq(&queue_lock);
2805 error = crypto_enqueue_request(&sep_queue, &req->base);
2806
2807 if ((error != 0) && (error != -EINPROGRESS))
2808 pr_debug(" sep - crypto enqueue failed: %x\n",
2809 error);
2810 error1 = sep_submit_work(ta_ctx->sep_used->workqueue,
2811 sep_dequeuer, (void *)&sep_queue);
2812 if (error1)
2813 pr_debug(" sep - workqueue submit failed: %x\n",
2814 error1);
2815 spin_unlock_irq(&queue_lock);
2816 /* We return result of crypto enqueue */
2817 return error;
2818 }
2819
2820 static int sep_sha1_digest(struct ahash_request *req)
2821 {
2822 int error;
2823 int error1;
2824 struct this_task_ctx *ta_ctx = ahash_request_ctx(req);
2825 pr_debug("sep - doing sha1 digest\n");
2826
2827 /* Clear out task context */
2828 memset(ta_ctx, 0, sizeof(struct this_task_ctx));
2829
2830 ta_ctx->sep_used = sep_dev;
2831 ta_ctx->current_request = SHA1;
2832 ta_ctx->current_hash_req = req;
2833 ta_ctx->current_cypher_req = NULL;
2834 ta_ctx->hash_opmode = SEP_HASH_SHA1;
2835 ta_ctx->current_hash_stage = HASH_DIGEST;
2836
2837 /* lock necessary so that only one entity touches the queues */
2838 spin_lock_irq(&queue_lock);
2839 error = crypto_enqueue_request(&sep_queue, &req->base);
2840
2841 if ((error != 0) && (error != -EINPROGRESS))
2842 pr_debug(" sep - crypto enqueue failed: %x\n",
2843 error);
2844 error1 = sep_submit_work(ta_ctx->sep_used->workqueue,
2845 sep_dequeuer, (void *)&sep_queue);
2846 if (error1)
2847 pr_debug(" sep - workqueue submit failed: %x\n",
2848 error1);
2849 spin_unlock_irq(&queue_lock);
2850 /* We return result of crypto enqueue */
2851 return error;
2852 }
2853
2854 static int sep_sha1_finup(struct ahash_request *req)
2855 {
2856 int error;
2857 int error1;
2858 struct this_task_ctx *ta_ctx = ahash_request_ctx(req);
2859 pr_debug("sep - doing sha1 finup\n");
2860
2861 ta_ctx->sep_used = sep_dev;
2862 ta_ctx->current_request = SHA1;
2863 ta_ctx->current_hash_req = req;
2864 ta_ctx->current_cypher_req = NULL;
2865 ta_ctx->hash_opmode = SEP_HASH_SHA1;
2866 ta_ctx->current_hash_stage = HASH_FINUP_DATA;
2867
2868 /* lock necessary so that only one entity touches the queues */
2869 spin_lock_irq(&queue_lock);
2870 error = crypto_enqueue_request(&sep_queue, &req->base);
2871
2872 if ((error != 0) && (error != -EINPROGRESS))
2873 pr_debug(" sep - crypto enqueue failed: %x\n",
2874 error);
2875 error1 = sep_submit_work(ta_ctx->sep_used->workqueue,
2876 sep_dequeuer, (void *)&sep_queue);
2877 if (error1)
2878 pr_debug(" sep - workqueue submit failed: %x\n",
2879 error1);
2880 spin_unlock_irq(&queue_lock);
2881 /* We return result of crypto enqueue */
2882 return error;
2883 }
2884
2885 static int sep_md5_init(struct ahash_request *req)
2886 {
2887 int error;
2888 int error1;
2889 struct this_task_ctx *ta_ctx = ahash_request_ctx(req);
2890 pr_debug("sep - doing md5 init\n");
2891
2892 /* Clear out task context */
2893 memset(ta_ctx, 0, sizeof(struct this_task_ctx));
2894
2895 ta_ctx->sep_used = sep_dev;
2896 ta_ctx->current_request = MD5;
2897 ta_ctx->current_hash_req = req;
2898 ta_ctx->current_cypher_req = NULL;
2899 ta_ctx->hash_opmode = SEP_HASH_MD5;
2900 ta_ctx->current_hash_stage = HASH_INIT;
2901
2902 /* lock necessary so that only one entity touches the queues */
2903 spin_lock_irq(&queue_lock);
2904 error = crypto_enqueue_request(&sep_queue, &req->base);
2905
2906 if ((error != 0) && (error != -EINPROGRESS))
2907 pr_debug(" sep - crypto enqueue failed: %x\n",
2908 error);
2909 error1 = sep_submit_work(ta_ctx->sep_used->workqueue,
2910 sep_dequeuer, (void *)&sep_queue);
2911 if (error1)
2912 pr_debug(" sep - workqueue submit failed: %x\n",
2913 error1);
2914 spin_unlock_irq(&queue_lock);
2915 /* We return result of crypto enqueue */
2916 return error;
2917 }
2918
2919 static int sep_md5_update(struct ahash_request *req)
2920 {
2921 int error;
2922 int error1;
2923 struct this_task_ctx *ta_ctx = ahash_request_ctx(req);
2924 pr_debug("sep - doing md5 update\n");
2925
2926 ta_ctx->sep_used = sep_dev;
2927 ta_ctx->current_request = MD5;
2928 ta_ctx->current_hash_req = req;
2929 ta_ctx->current_cypher_req = NULL;
2930 ta_ctx->hash_opmode = SEP_HASH_MD5;
2931 ta_ctx->current_hash_stage = HASH_UPDATE;
2932
2933 /* lock necessary so that only one entity touches the queues */
2934 spin_lock_irq(&queue_lock);
2935 error = crypto_enqueue_request(&sep_queue, &req->base);
2936
2937 if ((error != 0) && (error != -EINPROGRESS))
2938 pr_debug(" sep - crypto enqueue failed: %x\n",
2939 error);
2940 error1 = sep_submit_work(ta_ctx->sep_used->workqueue,
2941 sep_dequeuer, (void *)&sep_queue);
2942 if (error1)
2943 pr_debug(" sep - workqueue submit failed: %x\n",
2944 error1);
2945 spin_unlock_irq(&queue_lock);
2946 /* We return result of crypto enqueue */
2947 return error;
2948 }
2949
2950 static int sep_md5_final(struct ahash_request *req)
2951 {
2952 int error;
2953 int error1;
2954 struct this_task_ctx *ta_ctx = ahash_request_ctx(req);
2955 pr_debug("sep - doing md5 final\n");
2956
2957 ta_ctx->sep_used = sep_dev;
2958 ta_ctx->current_request = MD5;
2959 ta_ctx->current_hash_req = req;
2960 ta_ctx->current_cypher_req = NULL;
2961 ta_ctx->hash_opmode = SEP_HASH_MD5;
2962 ta_ctx->current_hash_stage = HASH_FINISH;
2963
2964 /* lock necessary so that only one entity touches the queues */
2965 spin_lock_irq(&queue_lock);
2966 error = crypto_enqueue_request(&sep_queue, &req->base);
2967
2968 if ((error != 0) && (error != -EINPROGRESS))
2969 pr_debug(" sep - crypto enqueue failed: %x\n",
2970 error);
2971 error1 = sep_submit_work(ta_ctx->sep_used->workqueue,
2972 sep_dequeuer, (void *)&sep_queue);
2973 if (error1)
2974 pr_debug(" sep - workqueue submit failed: %x\n",
2975 error1);
2976 spin_unlock_irq(&queue_lock);
2977 /* We return result of crypto enqueue */
2978 return error;
2979 }
2980
2981 static int sep_md5_digest(struct ahash_request *req)
2982 {
2983 int error;
2984 int error1;
2985 struct this_task_ctx *ta_ctx = ahash_request_ctx(req);
2986
2987 pr_debug("sep - doing md5 digest\n");
2988
2989 /* Clear out task context */
2990 memset(ta_ctx, 0, sizeof(struct this_task_ctx));
2991
2992 ta_ctx->sep_used = sep_dev;
2993 ta_ctx->current_request = MD5;
2994 ta_ctx->current_hash_req = req;
2995 ta_ctx->current_cypher_req = NULL;
2996 ta_ctx->hash_opmode = SEP_HASH_MD5;
2997 ta_ctx->current_hash_stage = HASH_DIGEST;
2998
2999 /* lock necessary so that only one entity touches the queues */
3000 spin_lock_irq(&queue_lock);
3001 error = crypto_enqueue_request(&sep_queue, &req->base);
3002
3003 if ((error != 0) && (error != -EINPROGRESS))
3004 pr_debug(" sep - crypto enqueue failed: %x\n",
3005 error);
3006 error1 = sep_submit_work(ta_ctx->sep_used->workqueue,
3007 sep_dequeuer, (void *)&sep_queue);
3008 if (error1)
3009 pr_debug(" sep - workqueue submit failed: %x\n",
3010 error1);
3011 spin_unlock_irq(&queue_lock);
3012 /* We return result of crypto enqueue */
3013 return error;
3014 }
3015
3016 static int sep_md5_finup(struct ahash_request *req)
3017 {
3018 int error;
3019 int error1;
3020 struct this_task_ctx *ta_ctx = ahash_request_ctx(req);
3021
3022 pr_debug("sep - doing md5 finup\n");
3023
3024 ta_ctx->sep_used = sep_dev;
3025 ta_ctx->current_request = MD5;
3026 ta_ctx->current_hash_req = req;
3027 ta_ctx->current_cypher_req = NULL;
3028 ta_ctx->hash_opmode = SEP_HASH_MD5;
3029 ta_ctx->current_hash_stage = HASH_FINUP_DATA;
3030
3031 /* lock necessary so that only one entity touches the queues */
3032 spin_lock_irq(&queue_lock);
3033 error = crypto_enqueue_request(&sep_queue, &req->base);
3034
3035 if ((error != 0) && (error != -EINPROGRESS))
3036 pr_debug(" sep - crypto enqueue failed: %x\n",
3037 error);
3038 error1 = sep_submit_work(ta_ctx->sep_used->workqueue,
3039 sep_dequeuer, (void *)&sep_queue);
3040 if (error1)
3041 pr_debug(" sep - workqueue submit failed: %x\n",
3042 error1);
3043 spin_unlock_irq(&queue_lock);
3044 /* We return result of crypto enqueue */
3045 return error;
3046 }
3047
3048 static int sep_sha224_init(struct ahash_request *req)
3049 {
3050 int error;
3051 int error1;
3052 struct this_task_ctx *ta_ctx = ahash_request_ctx(req);
3053 pr_debug("sep - doing sha224 init\n");
3054
3055 /* Clear out task context */
3056 memset(ta_ctx, 0, sizeof(struct this_task_ctx));
3057
3058 ta_ctx->sep_used = sep_dev;
3059 ta_ctx->current_request = SHA224;
3060 ta_ctx->current_hash_req = req;
3061 ta_ctx->current_cypher_req = NULL;
3062 ta_ctx->hash_opmode = SEP_HASH_SHA224;
3063 ta_ctx->current_hash_stage = HASH_INIT;
3064
3065 /* lock necessary so that only one entity touches the queues */
3066 spin_lock_irq(&queue_lock);
3067 error = crypto_enqueue_request(&sep_queue, &req->base);
3068
3069 if ((error != 0) && (error != -EINPROGRESS))
3070 pr_debug(" sep - crypto enqueue failed: %x\n",
3071 error);
3072 error1 = sep_submit_work(ta_ctx->sep_used->workqueue,
3073 sep_dequeuer, (void *)&sep_queue);
3074 if (error1)
3075 pr_debug(" sep - workqueue submit failed: %x\n",
3076 error1);
3077 spin_unlock_irq(&queue_lock);
3078 /* We return result of crypto enqueue */
3079 return error;
3080 }
3081
3082 static int sep_sha224_update(struct ahash_request *req)
3083 {
3084 int error;
3085 int error1;
3086 struct this_task_ctx *ta_ctx = ahash_request_ctx(req);
3087 pr_debug("sep - doing sha224 update\n");
3088
3089 ta_ctx->sep_used = sep_dev;
3090 ta_ctx->current_request = SHA224;
3091 ta_ctx->current_hash_req = req;
3092 ta_ctx->current_cypher_req = NULL;
3093 ta_ctx->hash_opmode = SEP_HASH_SHA224;
3094 ta_ctx->current_hash_stage = HASH_UPDATE;
3095
3096 /* lock necessary so that only one entity touches the queues */
3097 spin_lock_irq(&queue_lock);
3098 error = crypto_enqueue_request(&sep_queue, &req->base);
3099
3100 if ((error != 0) && (error != -EINPROGRESS))
3101 pr_debug(" sep - crypto enqueue failed: %x\n",
3102 error);
3103 error1 = sep_submit_work(ta_ctx->sep_used->workqueue,
3104 sep_dequeuer, (void *)&sep_queue);
3105 if (error1)
3106 pr_debug(" sep - workqueue submit failed: %x\n",
3107 error1);
3108 spin_unlock_irq(&queue_lock);
3109 /* We return result of crypto enqueue */
3110 return error;
3111 }
3112
3113 static int sep_sha224_final(struct ahash_request *req)
3114 {
3115 int error;
3116 int error1;
3117 struct this_task_ctx *ta_ctx = ahash_request_ctx(req);
3118 pr_debug("sep - doing sha224 final\n");
3119
3120 ta_ctx->sep_used = sep_dev;
3121 ta_ctx->current_request = SHA224;
3122 ta_ctx->current_hash_req = req;
3123 ta_ctx->current_cypher_req = NULL;
3124 ta_ctx->hash_opmode = SEP_HASH_SHA224;
3125 ta_ctx->current_hash_stage = HASH_FINISH;
3126
3127 /* lock necessary so that only one entity touches the queues */
3128 spin_lock_irq(&queue_lock);
3129 error = crypto_enqueue_request(&sep_queue, &req->base);
3130
3131 if ((error != 0) && (error != -EINPROGRESS))
3132 pr_debug(" sep - crypto enqueue failed: %x\n",
3133 error);
3134 error1 = sep_submit_work(ta_ctx->sep_used->workqueue,
3135 sep_dequeuer, (void *)&sep_queue);
3136 if (error1)
3137 pr_debug(" sep - workqueue submit failed: %x\n",
3138 error1);
3139 spin_unlock_irq(&queue_lock);
3140 /* We return result of crypto enqueue */
3141 return error;
3142 }
3143
3144 static int sep_sha224_digest(struct ahash_request *req)
3145 {
3146 int error;
3147 int error1;
3148 struct this_task_ctx *ta_ctx = ahash_request_ctx(req);
3149
3150 pr_debug("sep - doing sha224 digest\n");
3151
3152 /* Clear out task context */
3153 memset(ta_ctx, 0, sizeof(struct this_task_ctx));
3154
3155 ta_ctx->sep_used = sep_dev;
3156 ta_ctx->current_request = SHA224;
3157 ta_ctx->current_hash_req = req;
3158 ta_ctx->current_cypher_req = NULL;
3159 ta_ctx->hash_opmode = SEP_HASH_SHA224;
3160 ta_ctx->current_hash_stage = HASH_DIGEST;
3161
3162 /* lock necessary so that only one entity touches the queues */
3163 spin_lock_irq(&queue_lock);
3164 error = crypto_enqueue_request(&sep_queue, &req->base);
3165
3166 if ((error != 0) && (error != -EINPROGRESS))
3167 pr_debug(" sep - crypto enqueue failed: %x\n",
3168 error);
3169 error1 = sep_submit_work(ta_ctx->sep_used->workqueue,
3170 sep_dequeuer, (void *)&sep_queue);
3171 if (error1)
3172 pr_debug(" sep - workqueue submit failed: %x\n",
3173 error1);
3174 spin_unlock_irq(&queue_lock);
3175 /* We return result of crypto enqueue */
3176 return error;
3177 }
3178
3179 static int sep_sha224_finup(struct ahash_request *req)
3180 {
3181 int error;
3182 int error1;
3183 struct this_task_ctx *ta_ctx = ahash_request_ctx(req);
3184
3185 pr_debug("sep - doing sha224 finup\n");
3186
3187 ta_ctx->sep_used = sep_dev;
3188 ta_ctx->current_request = SHA224;
3189 ta_ctx->current_hash_req = req;
3190 ta_ctx->current_cypher_req = NULL;
3191 ta_ctx->hash_opmode = SEP_HASH_SHA224;
3192 ta_ctx->current_hash_stage = HASH_FINUP_DATA;
3193
3194 /* lock necessary so that only one entity touches the queues */
3195 spin_lock_irq(&queue_lock);
3196 error = crypto_enqueue_request(&sep_queue, &req->base);
3197
3198 if ((error != 0) && (error != -EINPROGRESS))
3199 pr_debug(" sep - crypto enqueue failed: %x\n",
3200 error);
3201 error1 = sep_submit_work(ta_ctx->sep_used->workqueue,
3202 sep_dequeuer, (void *)&sep_queue);
3203 if (error1)
3204 pr_debug(" sep - workqueue submit failed: %x\n",
3205 error1);
3206 spin_unlock_irq(&queue_lock);
3207 /* We return result of crypto enqueue */
3208 return error;
3209 }
3210
3211 static int sep_sha256_init(struct ahash_request *req)
3212 {
3213 int error;
3214 int error1;
3215 struct this_task_ctx *ta_ctx = ahash_request_ctx(req);
3216 pr_debug("sep - doing sha256 init\n");
3217
3218 /* Clear out task context */
3219 memset(ta_ctx, 0, sizeof(struct this_task_ctx));
3220
3221 ta_ctx->sep_used = sep_dev;
3222 ta_ctx->current_request = SHA256;
3223 ta_ctx->current_hash_req = req;
3224 ta_ctx->current_cypher_req = NULL;
3225 ta_ctx->hash_opmode = SEP_HASH_SHA256;
3226 ta_ctx->current_hash_stage = HASH_INIT;
3227
3228 /* lock necessary so that only one entity touches the queues */
3229 spin_lock_irq(&queue_lock);
3230 error = crypto_enqueue_request(&sep_queue, &req->base);
3231
3232 if ((error != 0) && (error != -EINPROGRESS))
3233 pr_debug(" sep - crypto enqueue failed: %x\n",
3234 error);
3235 error1 = sep_submit_work(ta_ctx->sep_used->workqueue,
3236 sep_dequeuer, (void *)&sep_queue);
3237 if (error1)
3238 pr_debug(" sep - workqueue submit failed: %x\n",
3239 error1);
3240 spin_unlock_irq(&queue_lock);
3241 /* We return result of crypto enqueue */
3242 return error;
3243 }
3244
3245 static int sep_sha256_update(struct ahash_request *req)
3246 {
3247 int error;
3248 int error1;
3249 struct this_task_ctx *ta_ctx = ahash_request_ctx(req);
3250 pr_debug("sep - doing sha256 update\n");
3251
3252 ta_ctx->sep_used = sep_dev;
3253 ta_ctx->current_request = SHA256;
3254 ta_ctx->current_hash_req = req;
3255 ta_ctx->current_cypher_req = NULL;
3256 ta_ctx->hash_opmode = SEP_HASH_SHA256;
3257 ta_ctx->current_hash_stage = HASH_UPDATE;
3258
3259 /* lock necessary so that only one entity touches the queues */
3260 spin_lock_irq(&queue_lock);
3261 error = crypto_enqueue_request(&sep_queue, &req->base);
3262
3263 if ((error != 0) && (error != -EINPROGRESS))
3264 pr_debug(" sep - crypto enqueue failed: %x\n",
3265 error);
3266 error1 = sep_submit_work(ta_ctx->sep_used->workqueue,
3267 sep_dequeuer, (void *)&sep_queue);
3268 if (error1)
3269 pr_debug(" sep - workqueue submit failed: %x\n",
3270 error1);
3271 spin_unlock_irq(&queue_lock);
3272 /* We return result of crypto enqueue */
3273 return error;
3274 }
3275
3276 static int sep_sha256_final(struct ahash_request *req)
3277 {
3278 int error;
3279 int error1;
3280 struct this_task_ctx *ta_ctx = ahash_request_ctx(req);
3281 pr_debug("sep - doing sha256 final\n");
3282
3283 ta_ctx->sep_used = sep_dev;
3284 ta_ctx->current_request = SHA256;
3285 ta_ctx->current_hash_req = req;
3286 ta_ctx->current_cypher_req = NULL;
3287 ta_ctx->hash_opmode = SEP_HASH_SHA256;
3288 ta_ctx->current_hash_stage = HASH_FINISH;
3289
3290 /* lock necessary so that only one entity touches the queues */
3291 spin_lock_irq(&queue_lock);
3292 error = crypto_enqueue_request(&sep_queue, &req->base);
3293
3294 if ((error != 0) && (error != -EINPROGRESS))
3295 pr_debug(" sep - crypto enqueue failed: %x\n",
3296 error);
3297 error1 = sep_submit_work(ta_ctx->sep_used->workqueue,
3298 sep_dequeuer, (void *)&sep_queue);
3299 if (error1)
3300 pr_debug(" sep - workqueue submit failed: %x\n",
3301 error1);
3302 spin_unlock_irq(&queue_lock);
3303 /* We return result of crypto enqueue */
3304 return error;
3305 }
3306
3307 static int sep_sha256_digest(struct ahash_request *req)
3308 {
3309 int error;
3310 int error1;
3311 struct this_task_ctx *ta_ctx = ahash_request_ctx(req);
3312
3313 pr_debug("sep - doing sha256 digest\n");
3314
3315 /* Clear out task context */
3316 memset(ta_ctx, 0, sizeof(struct this_task_ctx));
3317
3318 ta_ctx->sep_used = sep_dev;
3319 ta_ctx->current_request = SHA256;
3320 ta_ctx->current_hash_req = req;
3321 ta_ctx->current_cypher_req = NULL;
3322 ta_ctx->hash_opmode = SEP_HASH_SHA256;
3323 ta_ctx->current_hash_stage = HASH_DIGEST;
3324
3325 /* lock necessary so that only one entity touches the queues */
3326 spin_lock_irq(&queue_lock);
3327 error = crypto_enqueue_request(&sep_queue, &req->base);
3328
3329 if ((error != 0) && (error != -EINPROGRESS))
3330 pr_debug(" sep - crypto enqueue failed: %x\n",
3331 error);
3332 error1 = sep_submit_work(ta_ctx->sep_used->workqueue,
3333 sep_dequeuer, (void *)&sep_queue);
3334 if (error1)
3335 pr_debug(" sep - workqueue submit failed: %x\n",
3336 error1);
3337 spin_unlock_irq(&queue_lock);
3338 /* We return result of crypto enqueue */
3339 return error;
3340 }
3341
3342 static int sep_sha256_finup(struct ahash_request *req)
3343 {
3344 int error;
3345 int error1;
3346 struct this_task_ctx *ta_ctx = ahash_request_ctx(req);
3347
3348 pr_debug("sep - doing sha256 finup\n");
3349
3350 ta_ctx->sep_used = sep_dev;
3351 ta_ctx->current_request = SHA256;
3352 ta_ctx->current_hash_req = req;
3353 ta_ctx->current_cypher_req = NULL;
3354 ta_ctx->hash_opmode = SEP_HASH_SHA256;
3355 ta_ctx->current_hash_stage = HASH_FINUP_DATA;
3356
3357 /* lock necessary so that only one entity touches the queues */
3358 spin_lock_irq(&queue_lock);
3359 error = crypto_enqueue_request(&sep_queue, &req->base);
3360
3361 if ((error != 0) && (error != -EINPROGRESS))
3362 pr_debug(" sep - crypto enqueue failed: %x\n",
3363 error);
3364 error1 = sep_submit_work(ta_ctx->sep_used->workqueue,
3365 sep_dequeuer, (void *)&sep_queue);
3366 if (error1)
3367 pr_debug(" sep - workqueue submit failed: %x\n",
3368 error1);
3369 spin_unlock_irq(&queue_lock);
3370 /* We return result of crypto enqueue */
3371 return error;
3372 }
3373
3374 static int sep_crypto_init(struct crypto_tfm *tfm)
3375 {
3376 const char *alg_name = crypto_tfm_alg_name(tfm);
3377
3378 if (alg_name == NULL)
3379 pr_debug("sep_crypto_init alg is NULL\n");
3380 else
3381 pr_debug("sep_crypto_init alg is %s\n", alg_name);
3382
3383 tfm->crt_ablkcipher.reqsize = sizeof(struct this_task_ctx);
3384 return 0;
3385 }
3386
3387 static void sep_crypto_exit(struct crypto_tfm *tfm)
3388 {
3389 pr_debug("sep_crypto_exit\n");
3390 }
3391
3392 static int sep_aes_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
3393 unsigned int keylen)
3394 {
3395 struct sep_system_ctx *sctx = crypto_ablkcipher_ctx(tfm);
3396
3397 pr_debug("sep aes setkey\n");
3398
3399 pr_debug("tfm is %p sctx is %p\n", tfm, sctx);
3400 switch (keylen) {
3401 case SEP_AES_KEY_128_SIZE:
3402 sctx->aes_key_size = AES_128;
3403 break;
3404 case SEP_AES_KEY_192_SIZE:
3405 sctx->aes_key_size = AES_192;
3406 break;
3407 case SEP_AES_KEY_256_SIZE:
3408 sctx->aes_key_size = AES_256;
3409 break;
3410 case SEP_AES_KEY_512_SIZE:
3411 sctx->aes_key_size = AES_512;
3412 break;
3413 default:
3414 pr_debug("invalid sep aes key size %x\n",
3415 keylen);
3416 return -EINVAL;
3417 }
3418
3419 memset(&sctx->key.aes, 0, sizeof(u32) *
3420 SEP_AES_MAX_KEY_SIZE_WORDS);
3421 memcpy(&sctx->key.aes, key, keylen);
3422 sctx->keylen = keylen;
3423 /* Indicate to encrypt/decrypt function to send key to SEP */
3424 sctx->key_sent = 0;
3425
3426 return 0;
3427 }
3428
3429 static int sep_aes_ecb_encrypt(struct ablkcipher_request *req)
3430 {
3431 int error;
3432 int error1;
3433 struct this_task_ctx *ta_ctx = ablkcipher_request_ctx(req);
3434
3435 pr_debug("sep - doing aes ecb encrypt\n");
3436
3437 /* Clear out task context */
3438 memset(ta_ctx, 0, sizeof(struct this_task_ctx));
3439
3440 ta_ctx->sep_used = sep_dev;
3441 ta_ctx->current_request = AES_ECB;
3442 ta_ctx->current_hash_req = NULL;
3443 ta_ctx->current_cypher_req = req;
3444 ta_ctx->aes_encmode = SEP_AES_ENCRYPT;
3445 ta_ctx->aes_opmode = SEP_AES_ECB;
3446 ta_ctx->init_opcode = SEP_AES_INIT_OPCODE;
3447 ta_ctx->block_opcode = SEP_AES_BLOCK_OPCODE;
3448
3449 /* lock necessary so that only one entity touches the queues */
3450 spin_lock_irq(&queue_lock);
3451 error = crypto_enqueue_request(&sep_queue, &req->base);
3452
3453 if ((error != 0) && (error != -EINPROGRESS))
3454 pr_debug(" sep - crypto enqueue failed: %x\n",
3455 error);
3456 error1 = sep_submit_work(ta_ctx->sep_used->workqueue,
3457 sep_dequeuer, (void *)&sep_queue);
3458 if (error1)
3459 pr_debug(" sep - workqueue submit failed: %x\n",
3460 error1);
3461 spin_unlock_irq(&queue_lock);
3462 /* We return result of crypto enqueue */
3463 return error;
3464 }
3465
3466 static int sep_aes_ecb_decrypt(struct ablkcipher_request *req)
3467 {
3468 int error;
3469 int error1;
3470 struct this_task_ctx *ta_ctx = ablkcipher_request_ctx(req);
3471
3472 pr_debug("sep - doing aes ecb decrypt\n");
3473
3474 /* Clear out task context */
3475 memset(ta_ctx, 0, sizeof(struct this_task_ctx));
3476
3477 ta_ctx->sep_used = sep_dev;
3478 ta_ctx->current_request = AES_ECB;
3479 ta_ctx->current_hash_req = NULL;
3480 ta_ctx->current_cypher_req = req;
3481 ta_ctx->aes_encmode = SEP_AES_DECRYPT;
3482 ta_ctx->aes_opmode = SEP_AES_ECB;
3483 ta_ctx->init_opcode = SEP_AES_INIT_OPCODE;
3484 ta_ctx->block_opcode = SEP_AES_BLOCK_OPCODE;
3485
3486 /* lock necessary so that only one entity touches the queues */
3487 spin_lock_irq(&queue_lock);
3488 error = crypto_enqueue_request(&sep_queue, &req->base);
3489
3490 if ((error != 0) && (error != -EINPROGRESS))
3491 pr_debug(" sep - crypto enqueue failed: %x\n",
3492 error);
3493 error1 = sep_submit_work(ta_ctx->sep_used->workqueue,
3494 sep_dequeuer, (void *)&sep_queue);
3495 if (error1)
3496 pr_debug(" sep - workqueue submit failed: %x\n",
3497 error1);
3498 spin_unlock_irq(&queue_lock);
3499 /* We return result of crypto enqueue */
3500 return error;
3501 }
3502
3503 static int sep_aes_cbc_encrypt(struct ablkcipher_request *req)
3504 {
3505 int error;
3506 int error1;
3507 struct this_task_ctx *ta_ctx = ablkcipher_request_ctx(req);
3508 struct sep_system_ctx *sctx = crypto_ablkcipher_ctx(
3509 crypto_ablkcipher_reqtfm(req));
3510
3511 pr_debug("sep - doing aes cbc encrypt\n");
3512
3513 /* Clear out task context */
3514 memset(ta_ctx, 0, sizeof(struct this_task_ctx));
3515
3516 pr_debug("tfm is %p sctx is %p and ta_ctx is %p\n",
3517 crypto_ablkcipher_reqtfm(req), sctx, ta_ctx);
3518
3519 ta_ctx->sep_used = sep_dev;
3520 ta_ctx->current_request = AES_CBC;
3521 ta_ctx->current_hash_req = NULL;
3522 ta_ctx->current_cypher_req = req;
3523 ta_ctx->aes_encmode = SEP_AES_ENCRYPT;
3524 ta_ctx->aes_opmode = SEP_AES_CBC;
3525 ta_ctx->init_opcode = SEP_AES_INIT_OPCODE;
3526 ta_ctx->block_opcode = SEP_AES_BLOCK_OPCODE;
3527
3528 /* lock necessary so that only one entity touches the queues */
3529 spin_lock_irq(&queue_lock);
3530 error = crypto_enqueue_request(&sep_queue, &req->base);
3531
3532 if ((error != 0) && (error != -EINPROGRESS))
3533 pr_debug(" sep - crypto enqueue failed: %x\n",
3534 error);
3535 error1 = sep_submit_work(ta_ctx->sep_used->workqueue,
3536 sep_dequeuer, (void *)&sep_queue);
3537 if (error1)
3538 pr_debug(" sep - workqueue submit failed: %x\n",
3539 error1);
3540 spin_unlock_irq(&queue_lock);
3541 /* We return result of crypto enqueue */
3542 return error;
3543 }
3544
3545 static int sep_aes_cbc_decrypt(struct ablkcipher_request *req)
3546 {
3547 int error;
3548 int error1;
3549 struct this_task_ctx *ta_ctx = ablkcipher_request_ctx(req);
3550 struct sep_system_ctx *sctx = crypto_ablkcipher_ctx(
3551 crypto_ablkcipher_reqtfm(req));
3552
3553 pr_debug("sep - doing aes cbc decrypt\n");
3554
3555 pr_debug("tfm is %p sctx is %p and ta_ctx is %p\n",
3556 crypto_ablkcipher_reqtfm(req), sctx, ta_ctx);
3557
3558 /* Clear out task context */
3559 memset(ta_ctx, 0, sizeof(struct this_task_ctx));
3560
3561 ta_ctx->sep_used = sep_dev;
3562 ta_ctx->current_request = AES_CBC;
3563 ta_ctx->current_hash_req = NULL;
3564 ta_ctx->current_cypher_req = req;
3565 ta_ctx->aes_encmode = SEP_AES_DECRYPT;
3566 ta_ctx->aes_opmode = SEP_AES_CBC;
3567 ta_ctx->init_opcode = SEP_AES_INIT_OPCODE;
3568 ta_ctx->block_opcode = SEP_AES_BLOCK_OPCODE;
3569
3570 /* lock necessary so that only one entity touches the queues */
3571 spin_lock_irq(&queue_lock);
3572 error = crypto_enqueue_request(&sep_queue, &req->base);
3573
3574 if ((error != 0) && (error != -EINPROGRESS))
3575 pr_debug(" sep - crypto enqueue failed: %x\n",
3576 error);
3577 error1 = sep_submit_work(ta_ctx->sep_used->workqueue,
3578 sep_dequeuer, (void *)&sep_queue);
3579 if (error1)
3580 pr_debug(" sep - workqueue submit failed: %x\n",
3581 error1);
3582 spin_unlock_irq(&queue_lock);
3583 /* We return result of crypto enqueue */
3584 return error;
3585 }
3586
3587 static int sep_des_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
3588 unsigned int keylen)
3589 {
3590 struct sep_system_ctx *sctx = crypto_ablkcipher_ctx(tfm);
3591 struct crypto_tfm *ctfm = crypto_ablkcipher_tfm(tfm);
3592 u32 *flags = &ctfm->crt_flags;
3593
3594 pr_debug("sep des setkey\n");
3595
3596 switch (keylen) {
3597 case DES_KEY_SIZE:
3598 sctx->des_nbr_keys = DES_KEY_1;
3599 break;
3600 case DES_KEY_SIZE * 2:
3601 sctx->des_nbr_keys = DES_KEY_2;
3602 break;
3603 case DES_KEY_SIZE * 3:
3604 sctx->des_nbr_keys = DES_KEY_3;
3605 break;
3606 default:
3607 pr_debug("invalid key size %x\n",
3608 keylen);
3609 return -EINVAL;
3610 }
3611
3612 if ((*flags & CRYPTO_TFM_REQ_WEAK_KEY) &&
3613 (sep_weak_key(key, keylen))) {
3614
3615 *flags |= CRYPTO_TFM_RES_WEAK_KEY;
3616 pr_debug("weak key\n");
3617 return -EINVAL;
3618 }
3619
3620 memset(&sctx->key.des, 0, sizeof(struct sep_des_key));
3621 memcpy(&sctx->key.des.key1, key, keylen);
3622 sctx->keylen = keylen;
3623 /* Indicate to encrypt/decrypt function to send key to SEP */
3624 sctx->key_sent = 0;
3625
3626 return 0;
3627 }
3628
3629 static int sep_des_ebc_encrypt(struct ablkcipher_request *req)
3630 {
3631 int error;
3632 int error1;
3633 struct this_task_ctx *ta_ctx = ablkcipher_request_ctx(req);
3634
3635 pr_debug("sep - doing des ecb encrypt\n");
3636
3637 /* Clear out task context */
3638 memset(ta_ctx, 0, sizeof(struct this_task_ctx));
3639
3640 ta_ctx->sep_used = sep_dev;
3641 ta_ctx->current_request = DES_ECB;
3642 ta_ctx->current_hash_req = NULL;
3643 ta_ctx->current_cypher_req = req;
3644 ta_ctx->des_encmode = SEP_DES_ENCRYPT;
3645 ta_ctx->des_opmode = SEP_DES_ECB;
3646 ta_ctx->init_opcode = SEP_DES_INIT_OPCODE;
3647 ta_ctx->block_opcode = SEP_DES_BLOCK_OPCODE;
3648
3649 /* lock necessary so that only one entity touches the queues */
3650 spin_lock_irq(&queue_lock);
3651 error = crypto_enqueue_request(&sep_queue, &req->base);
3652
3653 if ((error != 0) && (error != -EINPROGRESS))
3654 pr_debug(" sep - crypto enqueue failed: %x\n",
3655 error);
3656 error1 = sep_submit_work(ta_ctx->sep_used->workqueue,
3657 sep_dequeuer, (void *)&sep_queue);
3658 if (error1)
3659 pr_debug(" sep - workqueue submit failed: %x\n",
3660 error1);
3661 spin_unlock_irq(&queue_lock);
3662 /* We return result of crypto enqueue */
3663 return error;
3664 }
3665
3666 static int sep_des_ebc_decrypt(struct ablkcipher_request *req)
3667 {
3668 int error;
3669 int error1;
3670 struct this_task_ctx *ta_ctx = ablkcipher_request_ctx(req);
3671
3672 pr_debug("sep - doing des ecb decrypt\n");
3673
3674 /* Clear out task context */
3675 memset(ta_ctx, 0, sizeof(struct this_task_ctx));
3676
3677 ta_ctx->sep_used = sep_dev;
3678 ta_ctx->current_request = DES_ECB;
3679 ta_ctx->current_hash_req = NULL;
3680 ta_ctx->current_cypher_req = req;
3681 ta_ctx->des_encmode = SEP_DES_DECRYPT;
3682 ta_ctx->des_opmode = SEP_DES_ECB;
3683 ta_ctx->init_opcode = SEP_DES_INIT_OPCODE;
3684 ta_ctx->block_opcode = SEP_DES_BLOCK_OPCODE;
3685
3686 /* lock necessary so that only one entity touches the queues */
3687 spin_lock_irq(&queue_lock);
3688 error = crypto_enqueue_request(&sep_queue, &req->base);
3689
3690 if ((error != 0) && (error != -EINPROGRESS))
3691 pr_debug(" sep - crypto enqueue failed: %x\n",
3692 error);
3693 error1 = sep_submit_work(ta_ctx->sep_used->workqueue,
3694 sep_dequeuer, (void *)&sep_queue);
3695 if (error1)
3696 pr_debug(" sep - workqueue submit failed: %x\n",
3697 error1);
3698 spin_unlock_irq(&queue_lock);
3699 /* We return result of crypto enqueue */
3700 return error;
3701 }
3702
3703 static int sep_des_cbc_encrypt(struct ablkcipher_request *req)
3704 {
3705 int error;
3706 int error1;
3707 struct this_task_ctx *ta_ctx = ablkcipher_request_ctx(req);
3708
3709 pr_debug("sep - doing des cbc encrypt\n");
3710
3711 /* Clear out task context */
3712 memset(ta_ctx, 0, sizeof(struct this_task_ctx));
3713
3714 ta_ctx->sep_used = sep_dev;
3715 ta_ctx->current_request = DES_CBC;
3716 ta_ctx->current_hash_req = NULL;
3717 ta_ctx->current_cypher_req = req;
3718 ta_ctx->des_encmode = SEP_DES_ENCRYPT;
3719 ta_ctx->des_opmode = SEP_DES_CBC;
3720 ta_ctx->init_opcode = SEP_DES_INIT_OPCODE;
3721 ta_ctx->block_opcode = SEP_DES_BLOCK_OPCODE;
3722
3723 /* lock necessary so that only one entity touches the queues */
3724 spin_lock_irq(&queue_lock);
3725 error = crypto_enqueue_request(&sep_queue, &req->base);
3726
3727 if ((error != 0) && (error != -EINPROGRESS))
3728 pr_debug(" sep - crypto enqueue failed: %x\n",
3729 error);
3730 error1 = sep_submit_work(ta_ctx->sep_used->workqueue,
3731 sep_dequeuer, (void *)&sep_queue);
3732 if (error1)
3733 pr_debug(" sep - workqueue submit failed: %x\n",
3734 error1);
3735 spin_unlock_irq(&queue_lock);
3736 /* We return result of crypto enqueue */
3737 return error;
3738 }
3739
3740 static int sep_des_cbc_decrypt(struct ablkcipher_request *req)
3741 {
3742 int error;
3743 int error1;
3744 struct this_task_ctx *ta_ctx = ablkcipher_request_ctx(req);
3745
3746 pr_debug("sep - doing des ecb decrypt\n");
3747
3748 /* Clear out task context */
3749 memset(ta_ctx, 0, sizeof(struct this_task_ctx));
3750
3751 ta_ctx->sep_used = sep_dev;
3752 ta_ctx->current_request = DES_CBC;
3753 ta_ctx->current_hash_req = NULL;
3754 ta_ctx->current_cypher_req = req;
3755 ta_ctx->des_encmode = SEP_DES_DECRYPT;
3756 ta_ctx->des_opmode = SEP_DES_CBC;
3757 ta_ctx->init_opcode = SEP_DES_INIT_OPCODE;
3758 ta_ctx->block_opcode = SEP_DES_BLOCK_OPCODE;
3759
3760 /* lock necessary so that only one entity touches the queues */
3761 spin_lock_irq(&queue_lock);
3762 error = crypto_enqueue_request(&sep_queue, &req->base);
3763
3764 if ((error != 0) && (error != -EINPROGRESS))
3765 pr_debug(" sep - crypto enqueue failed: %x\n",
3766 error);
3767 error1 = sep_submit_work(ta_ctx->sep_used->workqueue,
3768 sep_dequeuer, (void *)&sep_queue);
3769 if (error1)
3770 pr_debug(" sep - workqueue submit failed: %x\n",
3771 error1);
3772 spin_unlock_irq(&queue_lock);
3773 /* We return result of crypto enqueue */
3774 return error;
3775 }
3776
3777 static struct ahash_alg hash_algs[] = {
3778 {
3779 .init = sep_sha1_init,
3780 .update = sep_sha1_update,
3781 .final = sep_sha1_final,
3782 .digest = sep_sha1_digest,
3783 .finup = sep_sha1_finup,
3784 .halg = {
3785 .digestsize = SHA1_DIGEST_SIZE,
3786 .base = {
3787 .cra_name = "sha1",
3788 .cra_driver_name = "sha1-sep",
3789 .cra_priority = 100,
3790 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
3791 CRYPTO_ALG_ASYNC,
3792 .cra_blocksize = SHA1_BLOCK_SIZE,
3793 .cra_ctxsize = sizeof(struct sep_system_ctx),
3794 .cra_alignmask = 0,
3795 .cra_module = THIS_MODULE,
3796 .cra_init = sep_hash_cra_init,
3797 .cra_exit = sep_hash_cra_exit,
3798 }
3799 }
3800 },
3801 {
3802 .init = sep_md5_init,
3803 .update = sep_md5_update,
3804 .final = sep_md5_final,
3805 .digest = sep_md5_digest,
3806 .finup = sep_md5_finup,
3807 .halg = {
3808 .digestsize = MD5_DIGEST_SIZE,
3809 .base = {
3810 .cra_name = "md5",
3811 .cra_driver_name = "md5-sep",
3812 .cra_priority = 100,
3813 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
3814 CRYPTO_ALG_ASYNC,
3815 .cra_blocksize = SHA1_BLOCK_SIZE,
3816 .cra_ctxsize = sizeof(struct sep_system_ctx),
3817 .cra_alignmask = 0,
3818 .cra_module = THIS_MODULE,
3819 .cra_init = sep_hash_cra_init,
3820 .cra_exit = sep_hash_cra_exit,
3821 }
3822 }
3823 },
3824 {
3825 .init = sep_sha224_init,
3826 .update = sep_sha224_update,
3827 .final = sep_sha224_final,
3828 .digest = sep_sha224_digest,
3829 .finup = sep_sha224_finup,
3830 .halg = {
3831 .digestsize = SHA224_DIGEST_SIZE,
3832 .base = {
3833 .cra_name = "sha224",
3834 .cra_driver_name = "sha224-sep",
3835 .cra_priority = 100,
3836 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
3837 CRYPTO_ALG_ASYNC,
3838 .cra_blocksize = SHA224_BLOCK_SIZE,
3839 .cra_ctxsize = sizeof(struct sep_system_ctx),
3840 .cra_alignmask = 0,
3841 .cra_module = THIS_MODULE,
3842 .cra_init = sep_hash_cra_init,
3843 .cra_exit = sep_hash_cra_exit,
3844 }
3845 }
3846 },
3847 {
3848 .init = sep_sha256_init,
3849 .update = sep_sha256_update,
3850 .final = sep_sha256_final,
3851 .digest = sep_sha256_digest,
3852 .finup = sep_sha256_finup,
3853 .halg = {
3854 .digestsize = SHA256_DIGEST_SIZE,
3855 .base = {
3856 .cra_name = "sha256",
3857 .cra_driver_name = "sha256-sep",
3858 .cra_priority = 100,
3859 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
3860 CRYPTO_ALG_ASYNC,
3861 .cra_blocksize = SHA256_BLOCK_SIZE,
3862 .cra_ctxsize = sizeof(struct sep_system_ctx),
3863 .cra_alignmask = 0,
3864 .cra_module = THIS_MODULE,
3865 .cra_init = sep_hash_cra_init,
3866 .cra_exit = sep_hash_cra_exit,
3867 }
3868 }
3869 }
3870 };
3871
3872 static struct crypto_alg crypto_algs[] = {
3873 {
3874 .cra_name = "ecb(aes)",
3875 .cra_driver_name = "ecb-aes-sep",
3876 .cra_priority = 100,
3877 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
3878 .cra_blocksize = AES_BLOCK_SIZE,
3879 .cra_ctxsize = sizeof(struct sep_system_ctx),
3880 .cra_alignmask = 0,
3881 .cra_type = &crypto_ablkcipher_type,
3882 .cra_module = THIS_MODULE,
3883 .cra_init = sep_crypto_init,
3884 .cra_exit = sep_crypto_exit,
3885 .cra_u.ablkcipher = {
3886 .min_keysize = AES_MIN_KEY_SIZE,
3887 .max_keysize = AES_MAX_KEY_SIZE,
3888 .setkey = sep_aes_setkey,
3889 .encrypt = sep_aes_ecb_encrypt,
3890 .decrypt = sep_aes_ecb_decrypt,
3891 }
3892 },
3893 {
3894 .cra_name = "cbc(aes)",
3895 .cra_driver_name = "cbc-aes-sep",
3896 .cra_priority = 100,
3897 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
3898 .cra_blocksize = AES_BLOCK_SIZE,
3899 .cra_ctxsize = sizeof(struct sep_system_ctx),
3900 .cra_alignmask = 0,
3901 .cra_type = &crypto_ablkcipher_type,
3902 .cra_module = THIS_MODULE,
3903 .cra_init = sep_crypto_init,
3904 .cra_exit = sep_crypto_exit,
3905 .cra_u.ablkcipher = {
3906 .min_keysize = AES_MIN_KEY_SIZE,
3907 .max_keysize = AES_MAX_KEY_SIZE,
3908 .setkey = sep_aes_setkey,
3909 .encrypt = sep_aes_cbc_encrypt,
3910 .ivsize = AES_BLOCK_SIZE,
3911 .decrypt = sep_aes_cbc_decrypt,
3912 }
3913 },
3914 {
3915 .cra_name = "ebc(des)",
3916 .cra_driver_name = "ebc-des-sep",
3917 .cra_priority = 100,
3918 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
3919 .cra_blocksize = DES_BLOCK_SIZE,
3920 .cra_ctxsize = sizeof(struct sep_system_ctx),
3921 .cra_alignmask = 0,
3922 .cra_type = &crypto_ablkcipher_type,
3923 .cra_module = THIS_MODULE,
3924 .cra_init = sep_crypto_init,
3925 .cra_exit = sep_crypto_exit,
3926 .cra_u.ablkcipher = {
3927 .min_keysize = DES_KEY_SIZE,
3928 .max_keysize = DES_KEY_SIZE,
3929 .setkey = sep_des_setkey,
3930 .encrypt = sep_des_ebc_encrypt,
3931 .decrypt = sep_des_ebc_decrypt,
3932 }
3933 },
3934 {
3935 .cra_name = "cbc(des)",
3936 .cra_driver_name = "cbc-des-sep",
3937 .cra_priority = 100,
3938 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
3939 .cra_blocksize = DES_BLOCK_SIZE,
3940 .cra_ctxsize = sizeof(struct sep_system_ctx),
3941 .cra_alignmask = 0,
3942 .cra_type = &crypto_ablkcipher_type,
3943 .cra_module = THIS_MODULE,
3944 .cra_init = sep_crypto_init,
3945 .cra_exit = sep_crypto_exit,
3946 .cra_u.ablkcipher = {
3947 .min_keysize = DES_KEY_SIZE,
3948 .max_keysize = DES_KEY_SIZE,
3949 .setkey = sep_des_setkey,
3950 .encrypt = sep_des_cbc_encrypt,
3951 .ivsize = DES_BLOCK_SIZE,
3952 .decrypt = sep_des_cbc_decrypt,
3953 }
3954 },
3955 {
3956 .cra_name = "ebc(des3-ede)",
3957 .cra_driver_name = "ebc-des3-ede-sep",
3958 .cra_priority = 100,
3959 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
3960 .cra_blocksize = DES_BLOCK_SIZE,
3961 .cra_ctxsize = sizeof(struct sep_system_ctx),
3962 .cra_alignmask = 0,
3963 .cra_type = &crypto_ablkcipher_type,
3964 .cra_module = THIS_MODULE,
3965 .cra_init = sep_crypto_init,
3966 .cra_exit = sep_crypto_exit,
3967 .cra_u.ablkcipher = {
3968 .min_keysize = DES3_EDE_KEY_SIZE,
3969 .max_keysize = DES3_EDE_KEY_SIZE,
3970 .setkey = sep_des_setkey,
3971 .encrypt = sep_des_ebc_encrypt,
3972 .decrypt = sep_des_ebc_decrypt,
3973 }
3974 },
3975 {
3976 .cra_name = "cbc(des3-ede)",
3977 .cra_driver_name = "cbc-des3--ede-sep",
3978 .cra_priority = 100,
3979 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
3980 .cra_blocksize = DES_BLOCK_SIZE,
3981 .cra_ctxsize = sizeof(struct sep_system_ctx),
3982 .cra_alignmask = 0,
3983 .cra_type = &crypto_ablkcipher_type,
3984 .cra_module = THIS_MODULE,
3985 .cra_init = sep_crypto_init,
3986 .cra_exit = sep_crypto_exit,
3987 .cra_u.ablkcipher = {
3988 .min_keysize = DES3_EDE_KEY_SIZE,
3989 .max_keysize = DES3_EDE_KEY_SIZE,
3990 .setkey = sep_des_setkey,
3991 .encrypt = sep_des_cbc_encrypt,
3992 .decrypt = sep_des_cbc_decrypt,
3993 }
3994 }
3995 };
3996
3997 int sep_crypto_setup(void)
3998 {
3999 int err, i, j, k;
4000 tasklet_init(&sep_dev->finish_tasklet, sep_finish,
4001 (unsigned long)sep_dev);
4002
4003 crypto_init_queue(&sep_queue, SEP_QUEUE_LENGTH);
4004
4005 sep_dev->workqueue = create_singlethread_workqueue(
4006 "sep_crypto_workqueue");
4007 if (!sep_dev->workqueue) {
4008 dev_warn(&sep_dev->pdev->dev, "cant create workqueue\n");
4009 return -ENOMEM;
4010 }
4011
4012 i = 0;
4013 j = 0;
4014
4015 spin_lock_init(&queue_lock);
4016
4017 err = 0;
4018
4019 for (i = 0; i < ARRAY_SIZE(hash_algs); i++) {
4020 err = crypto_register_ahash(&hash_algs[i]);
4021 if (err)
4022 goto err_algs;
4023 }
4024
4025 err = 0;
4026 for (j = 0; j < ARRAY_SIZE(crypto_algs); j++) {
4027 err = crypto_register_alg(&crypto_algs[j]);
4028 if (err)
4029 goto err_crypto_algs;
4030 }
4031
4032 return err;
4033
4034 err_algs:
4035 for (k = 0; k < i; k++)
4036 crypto_unregister_ahash(&hash_algs[k]);
4037 return err;
4038
4039 err_crypto_algs:
4040 for (k = 0; k < j; k++)
4041 crypto_unregister_alg(&crypto_algs[k]);
4042 goto err_algs;
4043 }
4044
4045 void sep_crypto_takedown(void)
4046 {
4047
4048 int i;
4049
4050 for (i = 0; i < ARRAY_SIZE(hash_algs); i++)
4051 crypto_unregister_ahash(&hash_algs[i]);
4052 for (i = 0; i < ARRAY_SIZE(crypto_algs); i++)
4053 crypto_unregister_alg(&crypto_algs[i]);
4054
4055 tasklet_kill(&sep_dev->finish_tasklet);
4056 }
4057
4058 #endif
This page took 0.294233 seconds and 5 git commands to generate.