4 * Support for ATMEL SHA1/SHA256 HW acceleration.
6 * Copyright (c) 2012 Eukréa Electromatique - ATMEL
7 * Author: Nicolas Royer <nicolas@eukrea.com>
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as published
11 * by the Free Software Foundation.
13 * Some ideas are from omap-sham.c drivers.
17 #include <linux/kernel.h>
18 #include <linux/module.h>
19 #include <linux/slab.h>
20 #include <linux/err.h>
21 #include <linux/clk.h>
23 #include <linux/hw_random.h>
24 #include <linux/platform_device.h>
26 #include <linux/device.h>
27 #include <linux/init.h>
28 #include <linux/errno.h>
29 #include <linux/interrupt.h>
30 #include <linux/irq.h>
31 #include <linux/scatterlist.h>
32 #include <linux/dma-mapping.h>
33 #include <linux/delay.h>
34 #include <linux/crypto.h>
35 #include <linux/cryptohash.h>
36 #include <crypto/scatterwalk.h>
37 #include <crypto/algapi.h>
38 #include <crypto/sha.h>
39 #include <crypto/hash.h>
40 #include <crypto/internal/hash.h>
41 #include "atmel-sha-regs.h"
44 #define SHA_FLAGS_BUSY BIT(0)
45 #define SHA_FLAGS_FINAL BIT(1)
46 #define SHA_FLAGS_DMA_ACTIVE BIT(2)
47 #define SHA_FLAGS_OUTPUT_READY BIT(3)
48 #define SHA_FLAGS_INIT BIT(4)
49 #define SHA_FLAGS_CPU BIT(5)
50 #define SHA_FLAGS_DMA_READY BIT(6)
52 #define SHA_FLAGS_FINUP BIT(16)
53 #define SHA_FLAGS_SG BIT(17)
54 #define SHA_FLAGS_SHA1 BIT(18)
55 #define SHA_FLAGS_SHA256 BIT(19)
56 #define SHA_FLAGS_ERROR BIT(20)
57 #define SHA_FLAGS_PAD BIT(21)
59 #define SHA_FLAGS_DUALBUFF BIT(24)
61 #define SHA_OP_UPDATE 1
62 #define SHA_OP_FINAL 2
64 #define SHA_BUFFER_LEN PAGE_SIZE
66 #define ATMEL_SHA_DMA_THRESHOLD 56
71 struct atmel_sha_reqctx
{
72 struct atmel_sha_dev
*dd
;
76 u8 digest
[SHA256_DIGEST_SIZE
] __aligned(sizeof(u32
));
83 struct scatterlist
*sg
;
84 unsigned int offset
; /* offset in current sg */
85 unsigned int total
; /* total request */
87 u8 buffer
[0] __aligned(sizeof(u32
));
90 struct atmel_sha_ctx
{
91 struct atmel_sha_dev
*dd
;
96 struct crypto_shash
*fallback
;
100 #define ATMEL_SHA_QUEUE_LENGTH 1
102 struct atmel_sha_dev
{
103 struct list_head list
;
104 unsigned long phys_base
;
108 void __iomem
*io_base
;
112 struct tasklet_struct done_task
;
115 struct crypto_queue queue
;
116 struct ahash_request
*req
;
119 struct atmel_sha_drv
{
120 struct list_head dev_list
;
124 static struct atmel_sha_drv atmel_sha
= {
125 .dev_list
= LIST_HEAD_INIT(atmel_sha
.dev_list
),
126 .lock
= __SPIN_LOCK_UNLOCKED(atmel_sha
.lock
),
129 static inline u32
atmel_sha_read(struct atmel_sha_dev
*dd
, u32 offset
)
131 return readl_relaxed(dd
->io_base
+ offset
);
134 static inline void atmel_sha_write(struct atmel_sha_dev
*dd
,
135 u32 offset
, u32 value
)
137 writel_relaxed(value
, dd
->io_base
+ offset
);
140 static void atmel_sha_dualbuff_test(struct atmel_sha_dev
*dd
)
142 atmel_sha_write(dd
, SHA_MR
, SHA_MR_DUALBUFF
);
144 if (atmel_sha_read(dd
, SHA_MR
) & SHA_MR_DUALBUFF
)
145 dd
->flags
|= SHA_FLAGS_DUALBUFF
;
148 static size_t atmel_sha_append_sg(struct atmel_sha_reqctx
*ctx
)
152 while ((ctx
->bufcnt
< ctx
->buflen
) && ctx
->total
) {
153 count
= min(ctx
->sg
->length
- ctx
->offset
, ctx
->total
);
154 count
= min(count
, ctx
->buflen
- ctx
->bufcnt
);
159 scatterwalk_map_and_copy(ctx
->buffer
+ ctx
->bufcnt
, ctx
->sg
,
160 ctx
->offset
, count
, 0);
162 ctx
->bufcnt
+= count
;
163 ctx
->offset
+= count
;
166 if (ctx
->offset
== ctx
->sg
->length
) {
167 ctx
->sg
= sg_next(ctx
->sg
);
179 * The purpose of this padding is to ensure that the padded message
180 * is a multiple of 512 bits. The bit "1" is appended at the end of
181 * the message followed by "padlen-1" zero bits. Then a 64 bits block
182 * equals to the message length in bits is appended.
184 * padlen is calculated as followed:
185 * - if message length < 56 bytes then padlen = 56 - message length
186 * - else padlen = 64 + 56 - message length
188 static void atmel_sha_fill_padding(struct atmel_sha_reqctx
*ctx
, int length
)
190 unsigned int index
, padlen
;
194 bits
= (ctx
->bufcnt
+ ctx
->digcnt
+ length
) << 3;
195 size
= cpu_to_be64(bits
);
197 index
= ctx
->bufcnt
& 0x3f;
198 padlen
= (index
< 56) ? (56 - index
) : ((64+56) - index
);
199 *(ctx
->buffer
+ ctx
->bufcnt
) = 0x80;
200 memset(ctx
->buffer
+ ctx
->bufcnt
+ 1, 0, padlen
-1);
201 memcpy(ctx
->buffer
+ ctx
->bufcnt
+ padlen
, &size
, 8);
202 ctx
->bufcnt
+= padlen
+ 8;
203 ctx
->flags
|= SHA_FLAGS_PAD
;
206 static int atmel_sha_init(struct ahash_request
*req
)
208 struct crypto_ahash
*tfm
= crypto_ahash_reqtfm(req
);
209 struct atmel_sha_ctx
*tctx
= crypto_ahash_ctx(tfm
);
210 struct atmel_sha_reqctx
*ctx
= ahash_request_ctx(req
);
211 struct atmel_sha_dev
*dd
= NULL
;
212 struct atmel_sha_dev
*tmp
;
214 spin_lock_bh(&atmel_sha
.lock
);
216 list_for_each_entry(tmp
, &atmel_sha
.dev_list
, list
) {
225 spin_unlock_bh(&atmel_sha
.lock
);
231 dev_dbg(dd
->dev
, "init: digest size: %d\n",
232 crypto_ahash_digestsize(tfm
));
234 if (crypto_ahash_digestsize(tfm
) == SHA1_DIGEST_SIZE
)
235 ctx
->flags
|= SHA_FLAGS_SHA1
;
236 else if (crypto_ahash_digestsize(tfm
) == SHA256_DIGEST_SIZE
)
237 ctx
->flags
|= SHA_FLAGS_SHA256
;
241 ctx
->buflen
= SHA_BUFFER_LEN
;
246 static void atmel_sha_write_ctrl(struct atmel_sha_dev
*dd
, int dma
)
248 struct atmel_sha_reqctx
*ctx
= ahash_request_ctx(dd
->req
);
249 u32 valcr
= 0, valmr
= SHA_MR_MODE_AUTO
;
252 atmel_sha_write(dd
, SHA_IER
, SHA_INT_TXBUFE
);
253 valmr
= SHA_MR_MODE_PDC
;
254 if (dd
->flags
& SHA_FLAGS_DUALBUFF
)
255 valmr
= SHA_MR_DUALBUFF
;
257 atmel_sha_write(dd
, SHA_IER
, SHA_INT_DATARDY
);
260 if (ctx
->flags
& SHA_FLAGS_SHA256
)
261 valmr
|= SHA_MR_ALGO_SHA256
;
263 /* Setting CR_FIRST only for the first iteration */
265 valcr
= SHA_CR_FIRST
;
267 atmel_sha_write(dd
, SHA_CR
, valcr
);
268 atmel_sha_write(dd
, SHA_MR
, valmr
);
271 static int atmel_sha_xmit_cpu(struct atmel_sha_dev
*dd
, const u8
*buf
,
272 size_t length
, int final
)
274 struct atmel_sha_reqctx
*ctx
= ahash_request_ctx(dd
->req
);
276 const u32
*buffer
= (const u32
*)buf
;
278 dev_dbg(dd
->dev
, "xmit_cpu: digcnt: %d, length: %d, final: %d\n",
279 ctx
->digcnt
, length
, final
);
281 atmel_sha_write_ctrl(dd
, 0);
283 /* should be non-zero before next lines to disable clocks later */
284 ctx
->digcnt
+= length
;
287 dd
->flags
|= SHA_FLAGS_FINAL
; /* catch last interrupt */
289 len32
= DIV_ROUND_UP(length
, sizeof(u32
));
291 dd
->flags
|= SHA_FLAGS_CPU
;
293 for (count
= 0; count
< len32
; count
++)
294 atmel_sha_write(dd
, SHA_REG_DIN(count
), buffer
[count
]);
299 static int atmel_sha_xmit_pdc(struct atmel_sha_dev
*dd
, dma_addr_t dma_addr1
,
300 size_t length1
, dma_addr_t dma_addr2
, size_t length2
, int final
)
302 struct atmel_sha_reqctx
*ctx
= ahash_request_ctx(dd
->req
);
305 dev_dbg(dd
->dev
, "xmit_pdc: digcnt: %d, length: %d, final: %d\n",
306 ctx
->digcnt
, length1
, final
);
308 len32
= DIV_ROUND_UP(length1
, sizeof(u32
));
309 atmel_sha_write(dd
, SHA_PTCR
, SHA_PTCR_TXTDIS
);
310 atmel_sha_write(dd
, SHA_TPR
, dma_addr1
);
311 atmel_sha_write(dd
, SHA_TCR
, len32
);
313 len32
= DIV_ROUND_UP(length2
, sizeof(u32
));
314 atmel_sha_write(dd
, SHA_TNPR
, dma_addr2
);
315 atmel_sha_write(dd
, SHA_TNCR
, len32
);
317 atmel_sha_write_ctrl(dd
, 1);
319 /* should be non-zero before next lines to disable clocks later */
320 ctx
->digcnt
+= length1
;
323 dd
->flags
|= SHA_FLAGS_FINAL
; /* catch last interrupt */
325 dd
->flags
|= SHA_FLAGS_DMA_ACTIVE
;
327 /* Start DMA transfer */
328 atmel_sha_write(dd
, SHA_PTCR
, SHA_PTCR_TXTEN
);
333 static int atmel_sha_update_cpu(struct atmel_sha_dev
*dd
)
335 struct atmel_sha_reqctx
*ctx
= ahash_request_ctx(dd
->req
);
338 atmel_sha_append_sg(ctx
);
339 atmel_sha_fill_padding(ctx
, 0);
341 bufcnt
= ctx
->bufcnt
;
344 return atmel_sha_xmit_cpu(dd
, ctx
->buffer
, bufcnt
, 1);
347 static int atmel_sha_xmit_dma_map(struct atmel_sha_dev
*dd
,
348 struct atmel_sha_reqctx
*ctx
,
349 size_t length
, int final
)
351 ctx
->dma_addr
= dma_map_single(dd
->dev
, ctx
->buffer
,
352 ctx
->buflen
+ SHA1_BLOCK_SIZE
, DMA_TO_DEVICE
);
353 if (dma_mapping_error(dd
->dev
, ctx
->dma_addr
)) {
354 dev_err(dd
->dev
, "dma %u bytes error\n", ctx
->buflen
+
359 ctx
->flags
&= ~SHA_FLAGS_SG
;
361 /* next call does not fail... so no unmap in the case of error */
362 return atmel_sha_xmit_pdc(dd
, ctx
->dma_addr
, length
, 0, 0, final
);
365 static int atmel_sha_update_dma_slow(struct atmel_sha_dev
*dd
)
367 struct atmel_sha_reqctx
*ctx
= ahash_request_ctx(dd
->req
);
371 atmel_sha_append_sg(ctx
);
373 final
= (ctx
->flags
& SHA_FLAGS_FINUP
) && !ctx
->total
;
375 dev_dbg(dd
->dev
, "slow: bufcnt: %u, digcnt: %d, final: %d\n",
376 ctx
->bufcnt
, ctx
->digcnt
, final
);
379 atmel_sha_fill_padding(ctx
, 0);
381 if (final
|| (ctx
->bufcnt
== ctx
->buflen
&& ctx
->total
)) {
384 return atmel_sha_xmit_dma_map(dd
, ctx
, count
, final
);
390 static int atmel_sha_update_dma_start(struct atmel_sha_dev
*dd
)
392 struct atmel_sha_reqctx
*ctx
= ahash_request_ctx(dd
->req
);
393 unsigned int length
, final
, tail
;
394 struct scatterlist
*sg
;
400 if (ctx
->bufcnt
|| ctx
->offset
)
401 return atmel_sha_update_dma_slow(dd
);
403 dev_dbg(dd
->dev
, "fast: digcnt: %d, bufcnt: %u, total: %u\n",
404 ctx
->digcnt
, ctx
->bufcnt
, ctx
->total
);
408 if (!IS_ALIGNED(sg
->offset
, sizeof(u32
)))
409 return atmel_sha_update_dma_slow(dd
);
411 if (!sg_is_last(sg
) && !IS_ALIGNED(sg
->length
, SHA1_BLOCK_SIZE
))
412 /* size is not SHA1_BLOCK_SIZE aligned */
413 return atmel_sha_update_dma_slow(dd
);
415 length
= min(ctx
->total
, sg
->length
);
417 if (sg_is_last(sg
)) {
418 if (!(ctx
->flags
& SHA_FLAGS_FINUP
)) {
419 /* not last sg must be SHA1_BLOCK_SIZE aligned */
420 tail
= length
& (SHA1_BLOCK_SIZE
- 1);
423 /* offset where to start slow */
424 ctx
->offset
= length
;
425 return atmel_sha_update_dma_slow(dd
);
430 ctx
->total
-= length
;
431 ctx
->offset
= length
; /* offset where to start slow */
433 final
= (ctx
->flags
& SHA_FLAGS_FINUP
) && !ctx
->total
;
437 tail
= length
& (SHA1_BLOCK_SIZE
- 1);
440 ctx
->offset
= length
; /* offset where to start slow */
443 atmel_sha_append_sg(ctx
);
445 atmel_sha_fill_padding(ctx
, length
);
447 ctx
->dma_addr
= dma_map_single(dd
->dev
, ctx
->buffer
,
448 ctx
->buflen
+ SHA1_BLOCK_SIZE
, DMA_TO_DEVICE
);
449 if (dma_mapping_error(dd
->dev
, ctx
->dma_addr
)) {
450 dev_err(dd
->dev
, "dma %u bytes error\n",
451 ctx
->buflen
+ SHA1_BLOCK_SIZE
);
456 ctx
->flags
&= ~SHA_FLAGS_SG
;
459 return atmel_sha_xmit_pdc(dd
, ctx
->dma_addr
, count
, 0,
463 if (!dma_map_sg(dd
->dev
, ctx
->sg
, 1,
465 dev_err(dd
->dev
, "dma_map_sg error\n");
469 ctx
->flags
|= SHA_FLAGS_SG
;
473 return atmel_sha_xmit_pdc(dd
, sg_dma_address(ctx
->sg
),
474 length
, ctx
->dma_addr
, count
, final
);
478 if (!dma_map_sg(dd
->dev
, ctx
->sg
, 1, DMA_TO_DEVICE
)) {
479 dev_err(dd
->dev
, "dma_map_sg error\n");
483 ctx
->flags
|= SHA_FLAGS_SG
;
485 /* next call does not fail... so no unmap in the case of error */
486 return atmel_sha_xmit_pdc(dd
, sg_dma_address(ctx
->sg
), length
, 0,
490 static int atmel_sha_update_dma_stop(struct atmel_sha_dev
*dd
)
492 struct atmel_sha_reqctx
*ctx
= ahash_request_ctx(dd
->req
);
494 if (ctx
->flags
& SHA_FLAGS_SG
) {
495 dma_unmap_sg(dd
->dev
, ctx
->sg
, 1, DMA_TO_DEVICE
);
496 if (ctx
->sg
->length
== ctx
->offset
) {
497 ctx
->sg
= sg_next(ctx
->sg
);
501 if (ctx
->flags
& SHA_FLAGS_PAD
)
502 dma_unmap_single(dd
->dev
, ctx
->dma_addr
,
503 ctx
->buflen
+ SHA1_BLOCK_SIZE
, DMA_TO_DEVICE
);
505 dma_unmap_single(dd
->dev
, ctx
->dma_addr
, ctx
->buflen
+
506 SHA1_BLOCK_SIZE
, DMA_TO_DEVICE
);
512 static int atmel_sha_update_req(struct atmel_sha_dev
*dd
)
514 struct ahash_request
*req
= dd
->req
;
515 struct atmel_sha_reqctx
*ctx
= ahash_request_ctx(req
);
518 dev_dbg(dd
->dev
, "update_req: total: %u, digcnt: %d, finup: %d\n",
519 ctx
->total
, ctx
->digcnt
, (ctx
->flags
& SHA_FLAGS_FINUP
) != 0);
521 if (ctx
->flags
& SHA_FLAGS_CPU
)
522 err
= atmel_sha_update_cpu(dd
);
524 err
= atmel_sha_update_dma_start(dd
);
526 /* wait for dma completion before can take more data */
527 dev_dbg(dd
->dev
, "update: err: %d, digcnt: %d\n",
533 static int atmel_sha_final_req(struct atmel_sha_dev
*dd
)
535 struct ahash_request
*req
= dd
->req
;
536 struct atmel_sha_reqctx
*ctx
= ahash_request_ctx(req
);
540 if (ctx
->bufcnt
>= ATMEL_SHA_DMA_THRESHOLD
) {
541 atmel_sha_fill_padding(ctx
, 0);
544 err
= atmel_sha_xmit_dma_map(dd
, ctx
, count
, 1);
546 /* faster to handle last block with cpu */
548 atmel_sha_fill_padding(ctx
, 0);
551 err
= atmel_sha_xmit_cpu(dd
, ctx
->buffer
, count
, 1);
554 dev_dbg(dd
->dev
, "final_req: err: %d\n", err
);
559 static void atmel_sha_copy_hash(struct ahash_request
*req
)
561 struct atmel_sha_reqctx
*ctx
= ahash_request_ctx(req
);
562 u32
*hash
= (u32
*)ctx
->digest
;
565 if (likely(ctx
->flags
& SHA_FLAGS_SHA1
))
566 for (i
= 0; i
< SHA1_DIGEST_SIZE
/ sizeof(u32
); i
++)
567 hash
[i
] = atmel_sha_read(ctx
->dd
, SHA_REG_DIGEST(i
));
569 for (i
= 0; i
< SHA256_DIGEST_SIZE
/ sizeof(u32
); i
++)
570 hash
[i
] = atmel_sha_read(ctx
->dd
, SHA_REG_DIGEST(i
));
573 static void atmel_sha_copy_ready_hash(struct ahash_request
*req
)
575 struct atmel_sha_reqctx
*ctx
= ahash_request_ctx(req
);
580 if (likely(ctx
->flags
& SHA_FLAGS_SHA1
))
581 memcpy(req
->result
, ctx
->digest
, SHA1_DIGEST_SIZE
);
583 memcpy(req
->result
, ctx
->digest
, SHA256_DIGEST_SIZE
);
586 static int atmel_sha_finish(struct ahash_request
*req
)
588 struct atmel_sha_reqctx
*ctx
= ahash_request_ctx(req
);
589 struct atmel_sha_dev
*dd
= ctx
->dd
;
593 atmel_sha_copy_ready_hash(req
);
595 dev_dbg(dd
->dev
, "digcnt: %d, bufcnt: %d\n", ctx
->digcnt
,
601 static void atmel_sha_finish_req(struct ahash_request
*req
, int err
)
603 struct atmel_sha_reqctx
*ctx
= ahash_request_ctx(req
);
604 struct atmel_sha_dev
*dd
= ctx
->dd
;
607 atmel_sha_copy_hash(req
);
608 if (SHA_FLAGS_FINAL
& dd
->flags
)
609 err
= atmel_sha_finish(req
);
611 ctx
->flags
|= SHA_FLAGS_ERROR
;
614 /* atomic operation is not needed here */
615 dd
->flags
&= ~(SHA_FLAGS_BUSY
| SHA_FLAGS_FINAL
| SHA_FLAGS_CPU
|
616 SHA_FLAGS_DMA_READY
| SHA_FLAGS_OUTPUT_READY
);
618 clk_disable_unprepare(dd
->iclk
);
620 if (req
->base
.complete
)
621 req
->base
.complete(&req
->base
, err
);
623 /* handle new request */
624 tasklet_schedule(&dd
->done_task
);
627 static int atmel_sha_hw_init(struct atmel_sha_dev
*dd
)
629 clk_prepare_enable(dd
->iclk
);
631 if (SHA_FLAGS_INIT
& dd
->flags
) {
632 atmel_sha_write(dd
, SHA_CR
, SHA_CR_SWRST
);
633 atmel_sha_dualbuff_test(dd
);
634 dd
->flags
|= SHA_FLAGS_INIT
;
641 static int atmel_sha_handle_queue(struct atmel_sha_dev
*dd
,
642 struct ahash_request
*req
)
644 struct crypto_async_request
*async_req
, *backlog
;
645 struct atmel_sha_reqctx
*ctx
;
647 int err
= 0, ret
= 0;
649 spin_lock_irqsave(&dd
->lock
, flags
);
651 ret
= ahash_enqueue_request(&dd
->queue
, req
);
653 if (SHA_FLAGS_BUSY
& dd
->flags
) {
654 spin_unlock_irqrestore(&dd
->lock
, flags
);
658 backlog
= crypto_get_backlog(&dd
->queue
);
659 async_req
= crypto_dequeue_request(&dd
->queue
);
661 dd
->flags
|= SHA_FLAGS_BUSY
;
663 spin_unlock_irqrestore(&dd
->lock
, flags
);
669 backlog
->complete(backlog
, -EINPROGRESS
);
671 req
= ahash_request_cast(async_req
);
673 ctx
= ahash_request_ctx(req
);
675 dev_dbg(dd
->dev
, "handling new req, op: %lu, nbytes: %d\n",
676 ctx
->op
, req
->nbytes
);
678 err
= atmel_sha_hw_init(dd
);
683 if (ctx
->op
== SHA_OP_UPDATE
) {
684 err
= atmel_sha_update_req(dd
);
685 if (err
!= -EINPROGRESS
&& (ctx
->flags
& SHA_FLAGS_FINUP
)) {
686 /* no final() after finup() */
687 err
= atmel_sha_final_req(dd
);
689 } else if (ctx
->op
== SHA_OP_FINAL
) {
690 err
= atmel_sha_final_req(dd
);
694 if (err
!= -EINPROGRESS
)
695 /* done_task will not finish it, so do it here */
696 atmel_sha_finish_req(req
, err
);
698 dev_dbg(dd
->dev
, "exit, err: %d\n", err
);
703 static int atmel_sha_enqueue(struct ahash_request
*req
, unsigned int op
)
705 struct atmel_sha_reqctx
*ctx
= ahash_request_ctx(req
);
706 struct atmel_sha_ctx
*tctx
= crypto_tfm_ctx(req
->base
.tfm
);
707 struct atmel_sha_dev
*dd
= tctx
->dd
;
711 return atmel_sha_handle_queue(dd
, req
);
714 static int atmel_sha_update(struct ahash_request
*req
)
716 struct atmel_sha_reqctx
*ctx
= ahash_request_ctx(req
);
721 ctx
->total
= req
->nbytes
;
725 if (ctx
->flags
& SHA_FLAGS_FINUP
) {
726 if (ctx
->bufcnt
+ ctx
->total
< ATMEL_SHA_DMA_THRESHOLD
)
727 /* faster to use CPU for short transfers */
728 ctx
->flags
|= SHA_FLAGS_CPU
;
729 } else if (ctx
->bufcnt
+ ctx
->total
< ctx
->buflen
) {
730 atmel_sha_append_sg(ctx
);
733 return atmel_sha_enqueue(req
, SHA_OP_UPDATE
);
736 static int atmel_sha_final(struct ahash_request
*req
)
738 struct atmel_sha_reqctx
*ctx
= ahash_request_ctx(req
);
739 struct atmel_sha_ctx
*tctx
= crypto_tfm_ctx(req
->base
.tfm
);
740 struct atmel_sha_dev
*dd
= tctx
->dd
;
744 ctx
->flags
|= SHA_FLAGS_FINUP
;
746 if (ctx
->flags
& SHA_FLAGS_ERROR
)
747 return 0; /* uncompleted hash is not needed */
750 return atmel_sha_enqueue(req
, SHA_OP_FINAL
);
751 } else if (!(ctx
->flags
& SHA_FLAGS_PAD
)) { /* add padding */
752 err
= atmel_sha_hw_init(dd
);
756 dd
->flags
|= SHA_FLAGS_BUSY
;
757 err
= atmel_sha_final_req(dd
);
759 /* copy ready hash (+ finalize hmac) */
760 return atmel_sha_finish(req
);
764 if (err
!= -EINPROGRESS
)
765 /* done_task will not finish it, so do it here */
766 atmel_sha_finish_req(req
, err
);
771 static int atmel_sha_finup(struct ahash_request
*req
)
773 struct atmel_sha_reqctx
*ctx
= ahash_request_ctx(req
);
776 ctx
->flags
|= SHA_FLAGS_FINUP
;
778 err1
= atmel_sha_update(req
);
779 if (err1
== -EINPROGRESS
|| err1
== -EBUSY
)
783 * final() has to be always called to cleanup resources
784 * even if udpate() failed, except EINPROGRESS
786 err2
= atmel_sha_final(req
);
791 static int atmel_sha_digest(struct ahash_request
*req
)
793 return atmel_sha_init(req
) ?: atmel_sha_finup(req
);
796 static int atmel_sha_cra_init_alg(struct crypto_tfm
*tfm
, const char *alg_base
)
798 struct atmel_sha_ctx
*tctx
= crypto_tfm_ctx(tfm
);
799 const char *alg_name
= crypto_tfm_alg_name(tfm
);
801 /* Allocate a fallback and abort if it failed. */
802 tctx
->fallback
= crypto_alloc_shash(alg_name
, 0,
803 CRYPTO_ALG_NEED_FALLBACK
);
804 if (IS_ERR(tctx
->fallback
)) {
805 pr_err("atmel-sha: fallback driver '%s' could not be loaded.\n",
807 return PTR_ERR(tctx
->fallback
);
809 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm
),
810 sizeof(struct atmel_sha_reqctx
) +
811 SHA_BUFFER_LEN
+ SHA256_BLOCK_SIZE
);
816 static int atmel_sha_cra_init(struct crypto_tfm
*tfm
)
818 return atmel_sha_cra_init_alg(tfm
, NULL
);
821 static void atmel_sha_cra_exit(struct crypto_tfm
*tfm
)
823 struct atmel_sha_ctx
*tctx
= crypto_tfm_ctx(tfm
);
825 crypto_free_shash(tctx
->fallback
);
826 tctx
->fallback
= NULL
;
829 static struct ahash_alg sha_algs
[] = {
831 .init
= atmel_sha_init
,
832 .update
= atmel_sha_update
,
833 .final
= atmel_sha_final
,
834 .finup
= atmel_sha_finup
,
835 .digest
= atmel_sha_digest
,
837 .digestsize
= SHA1_DIGEST_SIZE
,
840 .cra_driver_name
= "atmel-sha1",
842 .cra_flags
= CRYPTO_ALG_ASYNC
|
843 CRYPTO_ALG_NEED_FALLBACK
,
844 .cra_blocksize
= SHA1_BLOCK_SIZE
,
845 .cra_ctxsize
= sizeof(struct atmel_sha_ctx
),
847 .cra_module
= THIS_MODULE
,
848 .cra_init
= atmel_sha_cra_init
,
849 .cra_exit
= atmel_sha_cra_exit
,
854 .init
= atmel_sha_init
,
855 .update
= atmel_sha_update
,
856 .final
= atmel_sha_final
,
857 .finup
= atmel_sha_finup
,
858 .digest
= atmel_sha_digest
,
860 .digestsize
= SHA256_DIGEST_SIZE
,
862 .cra_name
= "sha256",
863 .cra_driver_name
= "atmel-sha256",
865 .cra_flags
= CRYPTO_ALG_ASYNC
|
866 CRYPTO_ALG_NEED_FALLBACK
,
867 .cra_blocksize
= SHA256_BLOCK_SIZE
,
868 .cra_ctxsize
= sizeof(struct atmel_sha_ctx
),
870 .cra_module
= THIS_MODULE
,
871 .cra_init
= atmel_sha_cra_init
,
872 .cra_exit
= atmel_sha_cra_exit
,
878 static void atmel_sha_done_task(unsigned long data
)
880 struct atmel_sha_dev
*dd
= (struct atmel_sha_dev
*)data
;
883 if (!(SHA_FLAGS_BUSY
& dd
->flags
)) {
884 atmel_sha_handle_queue(dd
, NULL
);
888 if (SHA_FLAGS_CPU
& dd
->flags
) {
889 if (SHA_FLAGS_OUTPUT_READY
& dd
->flags
) {
890 dd
->flags
&= ~SHA_FLAGS_OUTPUT_READY
;
893 } else if (SHA_FLAGS_DMA_READY
& dd
->flags
) {
894 if (SHA_FLAGS_DMA_ACTIVE
& dd
->flags
) {
895 dd
->flags
&= ~SHA_FLAGS_DMA_ACTIVE
;
896 atmel_sha_update_dma_stop(dd
);
902 if (SHA_FLAGS_OUTPUT_READY
& dd
->flags
) {
903 /* hash or semi-hash ready */
904 dd
->flags
&= ~(SHA_FLAGS_DMA_READY
|
905 SHA_FLAGS_OUTPUT_READY
);
906 err
= atmel_sha_update_dma_start(dd
);
907 if (err
!= -EINPROGRESS
)
914 /* finish curent request */
915 atmel_sha_finish_req(dd
->req
, err
);
918 static irqreturn_t
atmel_sha_irq(int irq
, void *dev_id
)
920 struct atmel_sha_dev
*sha_dd
= dev_id
;
923 reg
= atmel_sha_read(sha_dd
, SHA_ISR
);
924 if (reg
& atmel_sha_read(sha_dd
, SHA_IMR
)) {
925 atmel_sha_write(sha_dd
, SHA_IDR
, reg
);
926 if (SHA_FLAGS_BUSY
& sha_dd
->flags
) {
927 sha_dd
->flags
|= SHA_FLAGS_OUTPUT_READY
;
928 if (!(SHA_FLAGS_CPU
& sha_dd
->flags
))
929 sha_dd
->flags
|= SHA_FLAGS_DMA_READY
;
930 tasklet_schedule(&sha_dd
->done_task
);
932 dev_warn(sha_dd
->dev
, "SHA interrupt when no active requests.\n");
940 static void atmel_sha_unregister_algs(struct atmel_sha_dev
*dd
)
944 for (i
= 0; i
< ARRAY_SIZE(sha_algs
); i
++)
945 crypto_unregister_ahash(&sha_algs
[i
]);
948 static int atmel_sha_register_algs(struct atmel_sha_dev
*dd
)
952 for (i
= 0; i
< ARRAY_SIZE(sha_algs
); i
++) {
953 err
= crypto_register_ahash(&sha_algs
[i
]);
961 for (j
= 0; j
< i
; j
++)
962 crypto_unregister_ahash(&sha_algs
[j
]);
967 static int atmel_sha_probe(struct platform_device
*pdev
)
969 struct atmel_sha_dev
*sha_dd
;
970 struct device
*dev
= &pdev
->dev
;
971 struct resource
*sha_res
;
972 unsigned long sha_phys_size
;
975 sha_dd
= kzalloc(sizeof(struct atmel_sha_dev
), GFP_KERNEL
);
976 if (sha_dd
== NULL
) {
977 dev_err(dev
, "unable to alloc data struct.\n");
984 platform_set_drvdata(pdev
, sha_dd
);
986 INIT_LIST_HEAD(&sha_dd
->list
);
988 tasklet_init(&sha_dd
->done_task
, atmel_sha_done_task
,
989 (unsigned long)sha_dd
);
991 crypto_init_queue(&sha_dd
->queue
, ATMEL_SHA_QUEUE_LENGTH
);
995 /* Get the base address */
996 sha_res
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
998 dev_err(dev
, "no MEM resource info\n");
1002 sha_dd
->phys_base
= sha_res
->start
;
1003 sha_phys_size
= resource_size(sha_res
);
1006 sha_dd
->irq
= platform_get_irq(pdev
, 0);
1007 if (sha_dd
->irq
< 0) {
1008 dev_err(dev
, "no IRQ resource info\n");
1013 err
= request_irq(sha_dd
->irq
, atmel_sha_irq
, IRQF_SHARED
, "atmel-sha",
1016 dev_err(dev
, "unable to request sha irq.\n");
1020 /* Initializing the clock */
1021 sha_dd
->iclk
= clk_get(&pdev
->dev
, NULL
);
1022 if (IS_ERR(sha_dd
->iclk
)) {
1023 dev_err(dev
, "clock intialization failed.\n");
1024 err
= PTR_ERR(sha_dd
->iclk
);
1028 sha_dd
->io_base
= ioremap(sha_dd
->phys_base
, sha_phys_size
);
1029 if (!sha_dd
->io_base
) {
1030 dev_err(dev
, "can't ioremap\n");
1035 spin_lock(&atmel_sha
.lock
);
1036 list_add_tail(&sha_dd
->list
, &atmel_sha
.dev_list
);
1037 spin_unlock(&atmel_sha
.lock
);
1039 err
= atmel_sha_register_algs(sha_dd
);
1043 dev_info(dev
, "Atmel SHA1/SHA256\n");
1048 spin_lock(&atmel_sha
.lock
);
1049 list_del(&sha_dd
->list
);
1050 spin_unlock(&atmel_sha
.lock
);
1051 iounmap(sha_dd
->io_base
);
1053 clk_put(sha_dd
->iclk
);
1055 free_irq(sha_dd
->irq
, sha_dd
);
1057 tasklet_kill(&sha_dd
->done_task
);
1061 dev_err(dev
, "initialization failed.\n");
1066 static int atmel_sha_remove(struct platform_device
*pdev
)
1068 static struct atmel_sha_dev
*sha_dd
;
1070 sha_dd
= platform_get_drvdata(pdev
);
1073 spin_lock(&atmel_sha
.lock
);
1074 list_del(&sha_dd
->list
);
1075 spin_unlock(&atmel_sha
.lock
);
1077 atmel_sha_unregister_algs(sha_dd
);
1079 tasklet_kill(&sha_dd
->done_task
);
1081 iounmap(sha_dd
->io_base
);
1083 clk_put(sha_dd
->iclk
);
1085 if (sha_dd
->irq
>= 0)
1086 free_irq(sha_dd
->irq
, sha_dd
);
1094 static struct platform_driver atmel_sha_driver
= {
1095 .probe
= atmel_sha_probe
,
1096 .remove
= atmel_sha_remove
,
1098 .name
= "atmel_sha",
1099 .owner
= THIS_MODULE
,
1103 module_platform_driver(atmel_sha_driver
);
1105 MODULE_DESCRIPTION("Atmel SHA1/SHA256 hw acceleration support.");
1106 MODULE_LICENSE("GPL v2");
1107 MODULE_AUTHOR("Nicolas Royer - Eukréa Electromatique");