2 * Freescale i.MX23/i.MX28 Data Co-Processor driver
4 * Copyright (C) 2013 Marek Vasut <marex@denx.de>
6 * The code contained herein is licensed under the GNU General Public
7 * License. You may obtain a copy of the GNU General Public License
8 * Version 2 or later at the following locations:
10 * http://www.opensource.org/licenses/gpl-license.html
11 * http://www.gnu.org/copyleft/gpl.html
14 #include <linux/crypto.h>
15 #include <linux/dma-mapping.h>
16 #include <linux/interrupt.h>
18 #include <linux/kernel.h>
19 #include <linux/kthread.h>
20 #include <linux/module.h>
22 #include <linux/platform_device.h>
23 #include <linux/stmp_device.h>
25 #include <crypto/aes.h>
26 #include <crypto/sha.h>
27 #include <crypto/internal/hash.h>
29 #define DCP_MAX_CHANS 4
30 #define DCP_BUF_SZ PAGE_SIZE
32 #define DCP_ALIGNMENT 64
34 /* DCP DMA descriptor. */
36 uint32_t next_cmd_addr
;
46 /* Coherent aligned block for bounce buffering. */
47 struct dcp_coherent_block
{
48 uint8_t aes_in_buf
[DCP_BUF_SZ
];
49 uint8_t aes_out_buf
[DCP_BUF_SZ
];
50 uint8_t sha_in_buf
[DCP_BUF_SZ
];
52 uint8_t aes_key
[2 * AES_KEYSIZE_128
];
53 uint8_t sha_digest
[SHA256_DIGEST_SIZE
];
55 struct dcp_dma_desc desc
[DCP_MAX_CHANS
];
64 struct dcp_coherent_block
*coh
;
66 struct completion completion
[DCP_MAX_CHANS
];
67 struct mutex mutex
[DCP_MAX_CHANS
];
68 struct task_struct
*thread
[DCP_MAX_CHANS
];
69 struct crypto_queue queue
[DCP_MAX_CHANS
];
73 DCP_CHAN_HASH_SHA
= 0,
77 struct dcp_async_ctx
{
82 /* SHA Hash-specific context */
87 /* Crypto-specific context */
88 struct crypto_ablkcipher
*fallback
;
90 uint8_t key
[AES_KEYSIZE_128
];
93 struct dcp_aes_req_ctx
{
98 struct dcp_sha_req_ctx
{
104 * There can even be only one instance of the MXS DCP due to the
105 * design of Linux Crypto API.
107 static struct dcp
*global_sdcp
;
108 static DEFINE_MUTEX(global_mutex
);
110 /* DCP register layout. */
111 #define MXS_DCP_CTRL 0x00
112 #define MXS_DCP_CTRL_GATHER_RESIDUAL_WRITES (1 << 23)
113 #define MXS_DCP_CTRL_ENABLE_CONTEXT_CACHING (1 << 22)
115 #define MXS_DCP_STAT 0x10
116 #define MXS_DCP_STAT_CLR 0x18
117 #define MXS_DCP_STAT_IRQ_MASK 0xf
119 #define MXS_DCP_CHANNELCTRL 0x20
120 #define MXS_DCP_CHANNELCTRL_ENABLE_CHANNEL_MASK 0xff
122 #define MXS_DCP_CAPABILITY1 0x40
123 #define MXS_DCP_CAPABILITY1_SHA256 (4 << 16)
124 #define MXS_DCP_CAPABILITY1_SHA1 (1 << 16)
125 #define MXS_DCP_CAPABILITY1_AES128 (1 << 0)
127 #define MXS_DCP_CONTEXT 0x50
129 #define MXS_DCP_CH_N_CMDPTR(n) (0x100 + ((n) * 0x40))
131 #define MXS_DCP_CH_N_SEMA(n) (0x110 + ((n) * 0x40))
133 #define MXS_DCP_CH_N_STAT(n) (0x120 + ((n) * 0x40))
134 #define MXS_DCP_CH_N_STAT_CLR(n) (0x128 + ((n) * 0x40))
136 /* DMA descriptor bits. */
137 #define MXS_DCP_CONTROL0_HASH_TERM (1 << 13)
138 #define MXS_DCP_CONTROL0_HASH_INIT (1 << 12)
139 #define MXS_DCP_CONTROL0_PAYLOAD_KEY (1 << 11)
140 #define MXS_DCP_CONTROL0_CIPHER_ENCRYPT (1 << 8)
141 #define MXS_DCP_CONTROL0_CIPHER_INIT (1 << 9)
142 #define MXS_DCP_CONTROL0_ENABLE_HASH (1 << 6)
143 #define MXS_DCP_CONTROL0_ENABLE_CIPHER (1 << 5)
144 #define MXS_DCP_CONTROL0_DECR_SEMAPHORE (1 << 1)
145 #define MXS_DCP_CONTROL0_INTERRUPT (1 << 0)
147 #define MXS_DCP_CONTROL1_HASH_SELECT_SHA256 (2 << 16)
148 #define MXS_DCP_CONTROL1_HASH_SELECT_SHA1 (0 << 16)
149 #define MXS_DCP_CONTROL1_CIPHER_MODE_CBC (1 << 4)
150 #define MXS_DCP_CONTROL1_CIPHER_MODE_ECB (0 << 4)
151 #define MXS_DCP_CONTROL1_CIPHER_SELECT_AES128 (0 << 0)
153 static int mxs_dcp_start_dma(struct dcp_async_ctx
*actx
)
155 struct dcp
*sdcp
= global_sdcp
;
156 const int chan
= actx
->chan
;
159 struct dcp_dma_desc
*desc
= &sdcp
->coh
->desc
[actx
->chan
];
161 dma_addr_t desc_phys
= dma_map_single(sdcp
->dev
, desc
, sizeof(*desc
),
164 reinit_completion(&sdcp
->completion
[chan
]);
166 /* Clear status register. */
167 writel(0xffffffff, sdcp
->base
+ MXS_DCP_CH_N_STAT_CLR(chan
));
169 /* Load the DMA descriptor. */
170 writel(desc_phys
, sdcp
->base
+ MXS_DCP_CH_N_CMDPTR(chan
));
172 /* Increment the semaphore to start the DMA transfer. */
173 writel(1, sdcp
->base
+ MXS_DCP_CH_N_SEMA(chan
));
175 ret
= wait_for_completion_timeout(&sdcp
->completion
[chan
],
176 msecs_to_jiffies(1000));
178 dev_err(sdcp
->dev
, "Channel %i timeout (DCP_STAT=0x%08x)\n",
179 chan
, readl(sdcp
->base
+ MXS_DCP_STAT
));
183 stat
= readl(sdcp
->base
+ MXS_DCP_CH_N_STAT(chan
));
185 dev_err(sdcp
->dev
, "Channel %i error (CH_STAT=0x%08x)\n",
190 dma_unmap_single(sdcp
->dev
, desc_phys
, sizeof(*desc
), DMA_TO_DEVICE
);
196 * Encryption (AES128)
198 static int mxs_dcp_run_aes(struct dcp_async_ctx
*actx
,
199 struct ablkcipher_request
*req
, int init
)
201 struct dcp
*sdcp
= global_sdcp
;
202 struct dcp_dma_desc
*desc
= &sdcp
->coh
->desc
[actx
->chan
];
203 struct dcp_aes_req_ctx
*rctx
= ablkcipher_request_ctx(req
);
206 dma_addr_t key_phys
= dma_map_single(sdcp
->dev
, sdcp
->coh
->aes_key
,
209 dma_addr_t src_phys
= dma_map_single(sdcp
->dev
, sdcp
->coh
->aes_in_buf
,
210 DCP_BUF_SZ
, DMA_TO_DEVICE
);
211 dma_addr_t dst_phys
= dma_map_single(sdcp
->dev
, sdcp
->coh
->aes_out_buf
,
212 DCP_BUF_SZ
, DMA_FROM_DEVICE
);
214 /* Fill in the DMA descriptor. */
215 desc
->control0
= MXS_DCP_CONTROL0_DECR_SEMAPHORE
|
216 MXS_DCP_CONTROL0_INTERRUPT
|
217 MXS_DCP_CONTROL0_ENABLE_CIPHER
;
219 /* Payload contains the key. */
220 desc
->control0
|= MXS_DCP_CONTROL0_PAYLOAD_KEY
;
223 desc
->control0
|= MXS_DCP_CONTROL0_CIPHER_ENCRYPT
;
225 desc
->control0
|= MXS_DCP_CONTROL0_CIPHER_INIT
;
227 desc
->control1
= MXS_DCP_CONTROL1_CIPHER_SELECT_AES128
;
230 desc
->control1
|= MXS_DCP_CONTROL1_CIPHER_MODE_ECB
;
232 desc
->control1
|= MXS_DCP_CONTROL1_CIPHER_MODE_CBC
;
234 desc
->next_cmd_addr
= 0;
235 desc
->source
= src_phys
;
236 desc
->destination
= dst_phys
;
237 desc
->size
= actx
->fill
;
238 desc
->payload
= key_phys
;
241 ret
= mxs_dcp_start_dma(actx
);
243 dma_unmap_single(sdcp
->dev
, key_phys
, 2 * AES_KEYSIZE_128
,
245 dma_unmap_single(sdcp
->dev
, src_phys
, DCP_BUF_SZ
, DMA_TO_DEVICE
);
246 dma_unmap_single(sdcp
->dev
, dst_phys
, DCP_BUF_SZ
, DMA_FROM_DEVICE
);
251 static int mxs_dcp_aes_block_crypt(struct crypto_async_request
*arq
)
253 struct dcp
*sdcp
= global_sdcp
;
255 struct ablkcipher_request
*req
= ablkcipher_request_cast(arq
);
256 struct dcp_async_ctx
*actx
= crypto_tfm_ctx(arq
->tfm
);
257 struct dcp_aes_req_ctx
*rctx
= ablkcipher_request_ctx(req
);
259 struct scatterlist
*dst
= req
->dst
;
260 struct scatterlist
*src
= req
->src
;
261 const int nents
= sg_nents(req
->src
);
263 const int out_off
= DCP_BUF_SZ
;
264 uint8_t *in_buf
= sdcp
->coh
->aes_in_buf
;
265 uint8_t *out_buf
= sdcp
->coh
->aes_out_buf
;
267 uint8_t *out_tmp
, *src_buf
, *dst_buf
= NULL
;
268 uint32_t dst_off
= 0;
270 uint8_t *key
= sdcp
->coh
->aes_key
;
274 unsigned int i
, len
, clen
, rem
= 0;
279 /* Copy the key from the temporary location. */
280 memcpy(key
, actx
->key
, actx
->key_len
);
283 /* Copy the CBC IV just past the key. */
284 memcpy(key
+ AES_KEYSIZE_128
, req
->info
, AES_KEYSIZE_128
);
285 /* CBC needs the INIT set. */
288 memset(key
+ AES_KEYSIZE_128
, 0, AES_KEYSIZE_128
);
291 for_each_sg(req
->src
, src
, nents
, i
) {
292 src_buf
= sg_virt(src
);
293 len
= sg_dma_len(src
);
296 if (actx
->fill
+ len
> out_off
)
297 clen
= out_off
- actx
->fill
;
301 memcpy(in_buf
+ actx
->fill
, src_buf
, clen
);
307 * If we filled the buffer or this is the last SG,
310 if (actx
->fill
== out_off
|| sg_is_last(src
)) {
311 ret
= mxs_dcp_run_aes(actx
, req
, init
);
317 while (dst
&& actx
->fill
) {
319 dst_buf
= sg_virt(dst
);
322 rem
= min(sg_dma_len(dst
) - dst_off
,
325 memcpy(dst_buf
+ dst_off
, out_tmp
, rem
);
330 if (dst_off
== sg_dma_len(dst
)) {
344 static int dcp_chan_thread_aes(void *data
)
346 struct dcp
*sdcp
= global_sdcp
;
347 const int chan
= DCP_CHAN_CRYPTO
;
349 struct crypto_async_request
*backlog
;
350 struct crypto_async_request
*arq
;
355 __set_current_state(TASK_INTERRUPTIBLE
);
357 mutex_lock(&sdcp
->mutex
[chan
]);
358 backlog
= crypto_get_backlog(&sdcp
->queue
[chan
]);
359 arq
= crypto_dequeue_request(&sdcp
->queue
[chan
]);
360 mutex_unlock(&sdcp
->mutex
[chan
]);
363 backlog
->complete(backlog
, -EINPROGRESS
);
366 ret
= mxs_dcp_aes_block_crypt(arq
);
367 arq
->complete(arq
, ret
);
372 } while (!kthread_should_stop());
377 static int mxs_dcp_block_fallback(struct ablkcipher_request
*req
, int enc
)
379 struct crypto_tfm
*tfm
=
380 crypto_ablkcipher_tfm(crypto_ablkcipher_reqtfm(req
));
381 struct dcp_async_ctx
*ctx
= crypto_ablkcipher_ctx(
382 crypto_ablkcipher_reqtfm(req
));
385 ablkcipher_request_set_tfm(req
, ctx
->fallback
);
388 ret
= crypto_ablkcipher_encrypt(req
);
390 ret
= crypto_ablkcipher_decrypt(req
);
392 ablkcipher_request_set_tfm(req
, __crypto_ablkcipher_cast(tfm
));
397 static int mxs_dcp_aes_enqueue(struct ablkcipher_request
*req
, int enc
, int ecb
)
399 struct dcp
*sdcp
= global_sdcp
;
400 struct crypto_async_request
*arq
= &req
->base
;
401 struct dcp_async_ctx
*actx
= crypto_tfm_ctx(arq
->tfm
);
402 struct dcp_aes_req_ctx
*rctx
= ablkcipher_request_ctx(req
);
405 if (unlikely(actx
->key_len
!= AES_KEYSIZE_128
))
406 return mxs_dcp_block_fallback(req
, enc
);
410 actx
->chan
= DCP_CHAN_CRYPTO
;
412 mutex_lock(&sdcp
->mutex
[actx
->chan
]);
413 ret
= crypto_enqueue_request(&sdcp
->queue
[actx
->chan
], &req
->base
);
414 mutex_unlock(&sdcp
->mutex
[actx
->chan
]);
416 wake_up_process(sdcp
->thread
[actx
->chan
]);
421 static int mxs_dcp_aes_ecb_decrypt(struct ablkcipher_request
*req
)
423 return mxs_dcp_aes_enqueue(req
, 0, 1);
426 static int mxs_dcp_aes_ecb_encrypt(struct ablkcipher_request
*req
)
428 return mxs_dcp_aes_enqueue(req
, 1, 1);
431 static int mxs_dcp_aes_cbc_decrypt(struct ablkcipher_request
*req
)
433 return mxs_dcp_aes_enqueue(req
, 0, 0);
436 static int mxs_dcp_aes_cbc_encrypt(struct ablkcipher_request
*req
)
438 return mxs_dcp_aes_enqueue(req
, 1, 0);
441 static int mxs_dcp_aes_setkey(struct crypto_ablkcipher
*tfm
, const u8
*key
,
444 struct dcp_async_ctx
*actx
= crypto_ablkcipher_ctx(tfm
);
448 * AES 128 is supposed by the hardware, store key into temporary
449 * buffer and exit. We must use the temporary buffer here, since
450 * there can still be an operation in progress.
453 if (len
== AES_KEYSIZE_128
) {
454 memcpy(actx
->key
, key
, len
);
458 /* Check if the key size is supported by kernel at all. */
459 if (len
!= AES_KEYSIZE_192
&& len
!= AES_KEYSIZE_256
) {
460 tfm
->base
.crt_flags
|= CRYPTO_TFM_RES_BAD_KEY_LEN
;
465 * If the requested AES key size is not supported by the hardware,
466 * but is supported by in-kernel software implementation, we use
469 actx
->fallback
->base
.crt_flags
&= ~CRYPTO_TFM_REQ_MASK
;
470 actx
->fallback
->base
.crt_flags
|=
471 tfm
->base
.crt_flags
& CRYPTO_TFM_REQ_MASK
;
473 ret
= crypto_ablkcipher_setkey(actx
->fallback
, key
, len
);
477 tfm
->base
.crt_flags
&= ~CRYPTO_TFM_RES_MASK
;
478 tfm
->base
.crt_flags
|=
479 actx
->fallback
->base
.crt_flags
& CRYPTO_TFM_RES_MASK
;
484 static int mxs_dcp_aes_fallback_init(struct crypto_tfm
*tfm
)
486 const char *name
= tfm
->__crt_alg
->cra_name
;
487 const uint32_t flags
= CRYPTO_ALG_ASYNC
| CRYPTO_ALG_NEED_FALLBACK
;
488 struct dcp_async_ctx
*actx
= crypto_tfm_ctx(tfm
);
489 struct crypto_ablkcipher
*blk
;
491 blk
= crypto_alloc_ablkcipher(name
, 0, flags
);
495 actx
->fallback
= blk
;
496 tfm
->crt_ablkcipher
.reqsize
= sizeof(struct dcp_aes_req_ctx
);
500 static void mxs_dcp_aes_fallback_exit(struct crypto_tfm
*tfm
)
502 struct dcp_async_ctx
*actx
= crypto_tfm_ctx(tfm
);
504 crypto_free_ablkcipher(actx
->fallback
);
505 actx
->fallback
= NULL
;
509 * Hashing (SHA1/SHA256)
511 static int mxs_dcp_run_sha(struct ahash_request
*req
)
513 struct dcp
*sdcp
= global_sdcp
;
516 struct crypto_ahash
*tfm
= crypto_ahash_reqtfm(req
);
517 struct dcp_async_ctx
*actx
= crypto_ahash_ctx(tfm
);
518 struct dcp_sha_req_ctx
*rctx
= ahash_request_ctx(req
);
520 struct dcp_dma_desc
*desc
= &sdcp
->coh
->desc
[actx
->chan
];
521 dma_addr_t digest_phys
= dma_map_single(sdcp
->dev
,
522 sdcp
->coh
->sha_digest
,
526 dma_addr_t buf_phys
= dma_map_single(sdcp
->dev
, sdcp
->coh
->sha_in_buf
,
527 DCP_BUF_SZ
, DMA_TO_DEVICE
);
529 /* Fill in the DMA descriptor. */
530 desc
->control0
= MXS_DCP_CONTROL0_DECR_SEMAPHORE
|
531 MXS_DCP_CONTROL0_INTERRUPT
|
532 MXS_DCP_CONTROL0_ENABLE_HASH
;
534 desc
->control0
|= MXS_DCP_CONTROL0_HASH_INIT
;
536 desc
->control1
= actx
->alg
;
537 desc
->next_cmd_addr
= 0;
538 desc
->source
= buf_phys
;
539 desc
->destination
= 0;
540 desc
->size
= actx
->fill
;
544 /* Set HASH_TERM bit for last transfer block. */
546 desc
->control0
|= MXS_DCP_CONTROL0_HASH_TERM
;
547 desc
->payload
= digest_phys
;
550 ret
= mxs_dcp_start_dma(actx
);
552 dma_unmap_single(sdcp
->dev
, digest_phys
, SHA256_DIGEST_SIZE
,
554 dma_unmap_single(sdcp
->dev
, buf_phys
, DCP_BUF_SZ
, DMA_TO_DEVICE
);
559 static int dcp_sha_req_to_buf(struct crypto_async_request
*arq
)
561 struct dcp
*sdcp
= global_sdcp
;
563 struct ahash_request
*req
= ahash_request_cast(arq
);
564 struct crypto_ahash
*tfm
= crypto_ahash_reqtfm(req
);
565 struct dcp_async_ctx
*actx
= crypto_ahash_ctx(tfm
);
566 struct dcp_sha_req_ctx
*rctx
= ahash_request_ctx(req
);
567 struct hash_alg_common
*halg
= crypto_hash_alg_common(tfm
);
568 const int nents
= sg_nents(req
->src
);
570 uint8_t *digest
= sdcp
->coh
->sha_digest
;
571 uint8_t *in_buf
= sdcp
->coh
->sha_in_buf
;
575 struct scatterlist
*src
;
577 unsigned int i
, len
, clen
;
580 int fin
= rctx
->fini
;
584 for_each_sg(req
->src
, src
, nents
, i
) {
585 src_buf
= sg_virt(src
);
586 len
= sg_dma_len(src
);
589 if (actx
->fill
+ len
> DCP_BUF_SZ
)
590 clen
= DCP_BUF_SZ
- actx
->fill
;
594 memcpy(in_buf
+ actx
->fill
, src_buf
, clen
);
600 * If we filled the buffer and still have some
601 * more data, submit the buffer.
603 if (len
&& actx
->fill
== DCP_BUF_SZ
) {
604 ret
= mxs_dcp_run_sha(req
);
616 /* Submit whatever is left. */
617 ret
= mxs_dcp_run_sha(req
);
618 if (ret
|| !req
->result
)
622 /* For some reason, the result is flipped. */
623 for (i
= 0; i
< halg
->digestsize
; i
++)
624 req
->result
[i
] = digest
[halg
->digestsize
- i
- 1];
630 static int dcp_chan_thread_sha(void *data
)
632 struct dcp
*sdcp
= global_sdcp
;
633 const int chan
= DCP_CHAN_HASH_SHA
;
635 struct crypto_async_request
*backlog
;
636 struct crypto_async_request
*arq
;
638 struct dcp_sha_req_ctx
*rctx
;
640 struct ahash_request
*req
;
644 __set_current_state(TASK_INTERRUPTIBLE
);
646 mutex_lock(&sdcp
->mutex
[chan
]);
647 backlog
= crypto_get_backlog(&sdcp
->queue
[chan
]);
648 arq
= crypto_dequeue_request(&sdcp
->queue
[chan
]);
649 mutex_unlock(&sdcp
->mutex
[chan
]);
652 backlog
->complete(backlog
, -EINPROGRESS
);
655 req
= ahash_request_cast(arq
);
656 rctx
= ahash_request_ctx(req
);
658 ret
= dcp_sha_req_to_buf(arq
);
660 arq
->complete(arq
, ret
);
666 } while (!kthread_should_stop());
671 static int dcp_sha_init(struct ahash_request
*req
)
673 struct crypto_ahash
*tfm
= crypto_ahash_reqtfm(req
);
674 struct dcp_async_ctx
*actx
= crypto_ahash_ctx(tfm
);
676 struct hash_alg_common
*halg
= crypto_hash_alg_common(tfm
);
679 * Start hashing session. The code below only inits the
680 * hashing session context, nothing more.
682 memset(actx
, 0, sizeof(*actx
));
684 if (strcmp(halg
->base
.cra_name
, "sha1") == 0)
685 actx
->alg
= MXS_DCP_CONTROL1_HASH_SELECT_SHA1
;
687 actx
->alg
= MXS_DCP_CONTROL1_HASH_SELECT_SHA256
;
691 actx
->chan
= DCP_CHAN_HASH_SHA
;
693 mutex_init(&actx
->mutex
);
698 static int dcp_sha_update_fx(struct ahash_request
*req
, int fini
)
700 struct dcp
*sdcp
= global_sdcp
;
702 struct dcp_sha_req_ctx
*rctx
= ahash_request_ctx(req
);
703 struct crypto_ahash
*tfm
= crypto_ahash_reqtfm(req
);
704 struct dcp_async_ctx
*actx
= crypto_ahash_ctx(tfm
);
709 * Ignore requests that have no data in them and are not
710 * the trailing requests in the stream of requests.
712 if (!req
->nbytes
&& !fini
)
715 mutex_lock(&actx
->mutex
);
724 mutex_lock(&sdcp
->mutex
[actx
->chan
]);
725 ret
= crypto_enqueue_request(&sdcp
->queue
[actx
->chan
], &req
->base
);
726 mutex_unlock(&sdcp
->mutex
[actx
->chan
]);
728 wake_up_process(sdcp
->thread
[actx
->chan
]);
729 mutex_unlock(&actx
->mutex
);
734 static int dcp_sha_update(struct ahash_request
*req
)
736 return dcp_sha_update_fx(req
, 0);
739 static int dcp_sha_final(struct ahash_request
*req
)
741 ahash_request_set_crypt(req
, NULL
, req
->result
, 0);
743 return dcp_sha_update_fx(req
, 1);
746 static int dcp_sha_finup(struct ahash_request
*req
)
748 return dcp_sha_update_fx(req
, 1);
751 static int dcp_sha_digest(struct ahash_request
*req
)
755 ret
= dcp_sha_init(req
);
759 return dcp_sha_finup(req
);
762 static int dcp_sha_cra_init(struct crypto_tfm
*tfm
)
764 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm
),
765 sizeof(struct dcp_sha_req_ctx
));
769 static void dcp_sha_cra_exit(struct crypto_tfm
*tfm
)
773 /* AES 128 ECB and AES 128 CBC */
774 static struct crypto_alg dcp_aes_algs
[] = {
776 .cra_name
= "ecb(aes)",
777 .cra_driver_name
= "ecb-aes-dcp",
780 .cra_flags
= CRYPTO_ALG_TYPE_ABLKCIPHER
|
782 CRYPTO_ALG_NEED_FALLBACK
,
783 .cra_init
= mxs_dcp_aes_fallback_init
,
784 .cra_exit
= mxs_dcp_aes_fallback_exit
,
785 .cra_blocksize
= AES_BLOCK_SIZE
,
786 .cra_ctxsize
= sizeof(struct dcp_async_ctx
),
787 .cra_type
= &crypto_ablkcipher_type
,
788 .cra_module
= THIS_MODULE
,
791 .min_keysize
= AES_MIN_KEY_SIZE
,
792 .max_keysize
= AES_MAX_KEY_SIZE
,
793 .setkey
= mxs_dcp_aes_setkey
,
794 .encrypt
= mxs_dcp_aes_ecb_encrypt
,
795 .decrypt
= mxs_dcp_aes_ecb_decrypt
799 .cra_name
= "cbc(aes)",
800 .cra_driver_name
= "cbc-aes-dcp",
803 .cra_flags
= CRYPTO_ALG_TYPE_ABLKCIPHER
|
805 CRYPTO_ALG_NEED_FALLBACK
,
806 .cra_init
= mxs_dcp_aes_fallback_init
,
807 .cra_exit
= mxs_dcp_aes_fallback_exit
,
808 .cra_blocksize
= AES_BLOCK_SIZE
,
809 .cra_ctxsize
= sizeof(struct dcp_async_ctx
),
810 .cra_type
= &crypto_ablkcipher_type
,
811 .cra_module
= THIS_MODULE
,
814 .min_keysize
= AES_MIN_KEY_SIZE
,
815 .max_keysize
= AES_MAX_KEY_SIZE
,
816 .setkey
= mxs_dcp_aes_setkey
,
817 .encrypt
= mxs_dcp_aes_cbc_encrypt
,
818 .decrypt
= mxs_dcp_aes_cbc_decrypt
,
819 .ivsize
= AES_BLOCK_SIZE
,
826 static struct ahash_alg dcp_sha1_alg
= {
827 .init
= dcp_sha_init
,
828 .update
= dcp_sha_update
,
829 .final
= dcp_sha_final
,
830 .finup
= dcp_sha_finup
,
831 .digest
= dcp_sha_digest
,
833 .digestsize
= SHA1_DIGEST_SIZE
,
836 .cra_driver_name
= "sha1-dcp",
839 .cra_flags
= CRYPTO_ALG_ASYNC
,
840 .cra_blocksize
= SHA1_BLOCK_SIZE
,
841 .cra_ctxsize
= sizeof(struct dcp_async_ctx
),
842 .cra_module
= THIS_MODULE
,
843 .cra_init
= dcp_sha_cra_init
,
844 .cra_exit
= dcp_sha_cra_exit
,
850 static struct ahash_alg dcp_sha256_alg
= {
851 .init
= dcp_sha_init
,
852 .update
= dcp_sha_update
,
853 .final
= dcp_sha_final
,
854 .finup
= dcp_sha_finup
,
855 .digest
= dcp_sha_digest
,
857 .digestsize
= SHA256_DIGEST_SIZE
,
859 .cra_name
= "sha256",
860 .cra_driver_name
= "sha256-dcp",
863 .cra_flags
= CRYPTO_ALG_ASYNC
,
864 .cra_blocksize
= SHA256_BLOCK_SIZE
,
865 .cra_ctxsize
= sizeof(struct dcp_async_ctx
),
866 .cra_module
= THIS_MODULE
,
867 .cra_init
= dcp_sha_cra_init
,
868 .cra_exit
= dcp_sha_cra_exit
,
873 static irqreturn_t
mxs_dcp_irq(int irq
, void *context
)
875 struct dcp
*sdcp
= context
;
879 stat
= readl(sdcp
->base
+ MXS_DCP_STAT
);
880 stat
&= MXS_DCP_STAT_IRQ_MASK
;
884 /* Clear the interrupts. */
885 writel(stat
, sdcp
->base
+ MXS_DCP_STAT_CLR
);
887 /* Complete the DMA requests that finished. */
888 for (i
= 0; i
< DCP_MAX_CHANS
; i
++)
890 complete(&sdcp
->completion
[i
]);
895 static int mxs_dcp_probe(struct platform_device
*pdev
)
897 struct device
*dev
= &pdev
->dev
;
898 struct dcp
*sdcp
= NULL
;
901 struct resource
*iores
;
902 int dcp_vmi_irq
, dcp_irq
;
904 mutex_lock(&global_mutex
);
906 dev_err(dev
, "Only one DCP instance allowed!\n");
911 iores
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
912 dcp_vmi_irq
= platform_get_irq(pdev
, 0);
913 if (dcp_vmi_irq
< 0) {
918 dcp_irq
= platform_get_irq(pdev
, 1);
924 sdcp
= devm_kzalloc(dev
, sizeof(*sdcp
), GFP_KERNEL
);
931 sdcp
->base
= devm_ioremap_resource(dev
, iores
);
932 if (IS_ERR(sdcp
->base
)) {
933 ret
= PTR_ERR(sdcp
->base
);
937 ret
= devm_request_irq(dev
, dcp_vmi_irq
, mxs_dcp_irq
, 0,
938 "dcp-vmi-irq", sdcp
);
940 dev_err(dev
, "Failed to claim DCP VMI IRQ!\n");
944 ret
= devm_request_irq(dev
, dcp_irq
, mxs_dcp_irq
, 0,
947 dev_err(dev
, "Failed to claim DCP IRQ!\n");
951 /* Allocate coherent helper block. */
952 sdcp
->coh
= devm_kzalloc(dev
, sizeof(*sdcp
->coh
) + DCP_ALIGNMENT
,
959 /* Re-align the structure so it fits the DCP constraints. */
960 sdcp
->coh
= PTR_ALIGN(sdcp
->coh
, DCP_ALIGNMENT
);
962 /* Restart the DCP block. */
963 ret
= stmp_reset_block(sdcp
->base
);
967 /* Initialize control register. */
968 writel(MXS_DCP_CTRL_GATHER_RESIDUAL_WRITES
|
969 MXS_DCP_CTRL_ENABLE_CONTEXT_CACHING
| 0xf,
970 sdcp
->base
+ MXS_DCP_CTRL
);
972 /* Enable all DCP DMA channels. */
973 writel(MXS_DCP_CHANNELCTRL_ENABLE_CHANNEL_MASK
,
974 sdcp
->base
+ MXS_DCP_CHANNELCTRL
);
977 * We do not enable context switching. Give the context buffer a
978 * pointer to an illegal address so if context switching is
979 * inadvertantly enabled, the DCP will return an error instead of
980 * trashing good memory. The DCP DMA cannot access ROM, so any ROM
983 writel(0xffff0000, sdcp
->base
+ MXS_DCP_CONTEXT
);
984 for (i
= 0; i
< DCP_MAX_CHANS
; i
++)
985 writel(0xffffffff, sdcp
->base
+ MXS_DCP_CH_N_STAT_CLR(i
));
986 writel(0xffffffff, sdcp
->base
+ MXS_DCP_STAT_CLR
);
990 platform_set_drvdata(pdev
, sdcp
);
992 for (i
= 0; i
< DCP_MAX_CHANS
; i
++) {
993 mutex_init(&sdcp
->mutex
[i
]);
994 init_completion(&sdcp
->completion
[i
]);
995 crypto_init_queue(&sdcp
->queue
[i
], 50);
998 /* Create the SHA and AES handler threads. */
999 sdcp
->thread
[DCP_CHAN_HASH_SHA
] = kthread_run(dcp_chan_thread_sha
,
1000 NULL
, "mxs_dcp_chan/sha");
1001 if (IS_ERR(sdcp
->thread
[DCP_CHAN_HASH_SHA
])) {
1002 dev_err(dev
, "Error starting SHA thread!\n");
1003 ret
= PTR_ERR(sdcp
->thread
[DCP_CHAN_HASH_SHA
]);
1007 sdcp
->thread
[DCP_CHAN_CRYPTO
] = kthread_run(dcp_chan_thread_aes
,
1008 NULL
, "mxs_dcp_chan/aes");
1009 if (IS_ERR(sdcp
->thread
[DCP_CHAN_CRYPTO
])) {
1010 dev_err(dev
, "Error starting SHA thread!\n");
1011 ret
= PTR_ERR(sdcp
->thread
[DCP_CHAN_CRYPTO
]);
1012 goto err_destroy_sha_thread
;
1015 /* Register the various crypto algorithms. */
1016 sdcp
->caps
= readl(sdcp
->base
+ MXS_DCP_CAPABILITY1
);
1018 if (sdcp
->caps
& MXS_DCP_CAPABILITY1_AES128
) {
1019 ret
= crypto_register_algs(dcp_aes_algs
,
1020 ARRAY_SIZE(dcp_aes_algs
));
1022 /* Failed to register algorithm. */
1023 dev_err(dev
, "Failed to register AES crypto!\n");
1024 goto err_destroy_aes_thread
;
1028 if (sdcp
->caps
& MXS_DCP_CAPABILITY1_SHA1
) {
1029 ret
= crypto_register_ahash(&dcp_sha1_alg
);
1031 dev_err(dev
, "Failed to register %s hash!\n",
1032 dcp_sha1_alg
.halg
.base
.cra_name
);
1033 goto err_unregister_aes
;
1037 if (sdcp
->caps
& MXS_DCP_CAPABILITY1_SHA256
) {
1038 ret
= crypto_register_ahash(&dcp_sha256_alg
);
1040 dev_err(dev
, "Failed to register %s hash!\n",
1041 dcp_sha256_alg
.halg
.base
.cra_name
);
1042 goto err_unregister_sha1
;
1048 err_unregister_sha1
:
1049 if (sdcp
->caps
& MXS_DCP_CAPABILITY1_SHA1
)
1050 crypto_unregister_ahash(&dcp_sha1_alg
);
1053 if (sdcp
->caps
& MXS_DCP_CAPABILITY1_AES128
)
1054 crypto_unregister_algs(dcp_aes_algs
, ARRAY_SIZE(dcp_aes_algs
));
1056 err_destroy_aes_thread
:
1057 kthread_stop(sdcp
->thread
[DCP_CHAN_CRYPTO
]);
1059 err_destroy_sha_thread
:
1060 kthread_stop(sdcp
->thread
[DCP_CHAN_HASH_SHA
]);
1063 mutex_unlock(&global_mutex
);
1067 static int mxs_dcp_remove(struct platform_device
*pdev
)
1069 struct dcp
*sdcp
= platform_get_drvdata(pdev
);
1071 if (sdcp
->caps
& MXS_DCP_CAPABILITY1_SHA256
)
1072 crypto_unregister_ahash(&dcp_sha256_alg
);
1074 if (sdcp
->caps
& MXS_DCP_CAPABILITY1_SHA1
)
1075 crypto_unregister_ahash(&dcp_sha1_alg
);
1077 if (sdcp
->caps
& MXS_DCP_CAPABILITY1_AES128
)
1078 crypto_unregister_algs(dcp_aes_algs
, ARRAY_SIZE(dcp_aes_algs
));
1080 kthread_stop(sdcp
->thread
[DCP_CHAN_HASH_SHA
]);
1081 kthread_stop(sdcp
->thread
[DCP_CHAN_CRYPTO
]);
1083 platform_set_drvdata(pdev
, NULL
);
1085 mutex_lock(&global_mutex
);
1087 mutex_unlock(&global_mutex
);
1092 static const struct of_device_id mxs_dcp_dt_ids
[] = {
1093 { .compatible
= "fsl,imx23-dcp", .data
= NULL
, },
1094 { .compatible
= "fsl,imx28-dcp", .data
= NULL
, },
1098 MODULE_DEVICE_TABLE(of
, mxs_dcp_dt_ids
);
1100 static struct platform_driver mxs_dcp_driver
= {
1101 .probe
= mxs_dcp_probe
,
1102 .remove
= mxs_dcp_remove
,
1105 .owner
= THIS_MODULE
,
1106 .of_match_table
= mxs_dcp_dt_ids
,
1110 module_platform_driver(mxs_dcp_driver
);
1112 MODULE_AUTHOR("Marek Vasut <marex@denx.de>");
1113 MODULE_DESCRIPTION("Freescale MXS DCP Driver");
1114 MODULE_LICENSE("GPL");
1115 MODULE_ALIAS("platform:mxs-dcp");