4 * Support for SAHARA cryptographic accelerator.
6 * Copyright (c) 2013 Vista Silicon S.L.
7 * Author: Javier Martin <javier.martin@vista-silicon.com>
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as published
11 * by the Free Software Foundation.
13 * Based on omap-aes.c and tegra-aes.c
16 #include <crypto/algapi.h>
17 #include <crypto/aes.h>
19 #include <linux/clk.h>
20 #include <linux/crypto.h>
21 #include <linux/interrupt.h>
23 #include <linux/irq.h>
24 #include <linux/kernel.h>
25 #include <linux/module.h>
27 #include <linux/of_device.h>
28 #include <linux/platform_device.h>
30 #define SAHARA_NAME "sahara"
31 #define SAHARA_VERSION_3 3
32 #define SAHARA_VERSION_4 4
33 #define SAHARA_TIMEOUT_MS 1000
34 #define SAHARA_MAX_HW_DESC 2
35 #define SAHARA_MAX_HW_LINK 20
37 #define FLAGS_MODE_MASK 0x000f
38 #define FLAGS_ENCRYPT BIT(0)
39 #define FLAGS_CBC BIT(1)
40 #define FLAGS_NEW_KEY BIT(3)
43 #define SAHARA_HDR_BASE 0x00800000
44 #define SAHARA_HDR_SKHA_ALG_AES 0
45 #define SAHARA_HDR_SKHA_OP_ENC (1 << 2)
46 #define SAHARA_HDR_SKHA_MODE_ECB (0 << 3)
47 #define SAHARA_HDR_SKHA_MODE_CBC (1 << 3)
48 #define SAHARA_HDR_FORM_DATA (5 << 16)
49 #define SAHARA_HDR_FORM_KEY (8 << 16)
50 #define SAHARA_HDR_LLO (1 << 24)
51 #define SAHARA_HDR_CHA_SKHA (1 << 28)
52 #define SAHARA_HDR_CHA_MDHA (2 << 28)
53 #define SAHARA_HDR_PARITY_BIT (1 << 31)
55 /* SAHARA can only process one request at a time */
56 #define SAHARA_QUEUE_LENGTH 1
58 #define SAHARA_REG_VERSION 0x00
59 #define SAHARA_REG_DAR 0x04
60 #define SAHARA_REG_CONTROL 0x08
61 #define SAHARA_CONTROL_SET_THROTTLE(x) (((x) & 0xff) << 24)
62 #define SAHARA_CONTROL_SET_MAXBURST(x) (((x) & 0xff) << 16)
63 #define SAHARA_CONTROL_RNG_AUTORSD (1 << 7)
64 #define SAHARA_CONTROL_ENABLE_INT (1 << 4)
65 #define SAHARA_REG_CMD 0x0C
66 #define SAHARA_CMD_RESET (1 << 0)
67 #define SAHARA_CMD_CLEAR_INT (1 << 8)
68 #define SAHARA_CMD_CLEAR_ERR (1 << 9)
69 #define SAHARA_CMD_SINGLE_STEP (1 << 10)
70 #define SAHARA_CMD_MODE_BATCH (1 << 16)
71 #define SAHARA_CMD_MODE_DEBUG (1 << 18)
72 #define SAHARA_REG_STATUS 0x10
73 #define SAHARA_STATUS_GET_STATE(x) ((x) & 0x7)
74 #define SAHARA_STATE_IDLE 0
75 #define SAHARA_STATE_BUSY 1
76 #define SAHARA_STATE_ERR 2
77 #define SAHARA_STATE_FAULT 3
78 #define SAHARA_STATE_COMPLETE 4
79 #define SAHARA_STATE_COMP_FLAG (1 << 2)
80 #define SAHARA_STATUS_DAR_FULL (1 << 3)
81 #define SAHARA_STATUS_ERROR (1 << 4)
82 #define SAHARA_STATUS_SECURE (1 << 5)
83 #define SAHARA_STATUS_FAIL (1 << 6)
84 #define SAHARA_STATUS_INIT (1 << 7)
85 #define SAHARA_STATUS_RNG_RESEED (1 << 8)
86 #define SAHARA_STATUS_ACTIVE_RNG (1 << 9)
87 #define SAHARA_STATUS_ACTIVE_MDHA (1 << 10)
88 #define SAHARA_STATUS_ACTIVE_SKHA (1 << 11)
89 #define SAHARA_STATUS_MODE_BATCH (1 << 16)
90 #define SAHARA_STATUS_MODE_DEDICATED (1 << 17)
91 #define SAHARA_STATUS_MODE_DEBUG (1 << 18)
92 #define SAHARA_STATUS_GET_ISTATE(x) (((x) >> 24) & 0xff)
93 #define SAHARA_REG_ERRSTATUS 0x14
94 #define SAHARA_ERRSTATUS_GET_SOURCE(x) ((x) & 0xf)
95 #define SAHARA_ERRSOURCE_CHA 14
96 #define SAHARA_ERRSOURCE_DMA 15
97 #define SAHARA_ERRSTATUS_DMA_DIR (1 << 8)
98 #define SAHARA_ERRSTATUS_GET_DMASZ(x)(((x) >> 9) & 0x3)
99 #define SAHARA_ERRSTATUS_GET_DMASRC(x) (((x) >> 13) & 0x7)
100 #define SAHARA_ERRSTATUS_GET_CHASRC(x) (((x) >> 16) & 0xfff)
101 #define SAHARA_ERRSTATUS_GET_CHAERR(x) (((x) >> 28) & 0x3)
102 #define SAHARA_REG_FADDR 0x18
103 #define SAHARA_REG_CDAR 0x1C
104 #define SAHARA_REG_IDAR 0x20
106 struct sahara_hw_desc
{
115 struct sahara_hw_link
{
122 struct sahara_dev
*dev
;
125 u8 key
[AES_KEYSIZE_128
];
126 struct crypto_ablkcipher
*fallback
;
129 struct sahara_aes_reqctx
{
134 struct device
*device
;
135 unsigned int version
;
136 void __iomem
*regs_base
;
140 struct sahara_ctx
*ctx
;
142 struct crypto_queue queue
;
145 struct tasklet_struct done_task
;
146 struct tasklet_struct queue_task
;
148 struct sahara_hw_desc
*hw_desc
[SAHARA_MAX_HW_DESC
];
149 dma_addr_t hw_phys_desc
[SAHARA_MAX_HW_DESC
];
152 dma_addr_t key_phys_base
;
155 dma_addr_t iv_phys_base
;
157 struct sahara_hw_link
*hw_link
[SAHARA_MAX_HW_LINK
];
158 dma_addr_t hw_phys_link
[SAHARA_MAX_HW_LINK
];
160 struct ablkcipher_request
*req
;
162 struct scatterlist
*in_sg
;
163 unsigned int nb_in_sg
;
164 struct scatterlist
*out_sg
;
165 unsigned int nb_out_sg
;
168 struct timer_list watchdog
;
171 static struct sahara_dev
*dev_ptr
;
173 static inline void sahara_write(struct sahara_dev
*dev
, u32 data
, u32 reg
)
175 writel(data
, dev
->regs_base
+ reg
);
178 static inline unsigned int sahara_read(struct sahara_dev
*dev
, u32 reg
)
180 return readl(dev
->regs_base
+ reg
);
183 static u32
sahara_aes_key_hdr(struct sahara_dev
*dev
)
185 u32 hdr
= SAHARA_HDR_BASE
| SAHARA_HDR_SKHA_ALG_AES
|
186 SAHARA_HDR_FORM_KEY
| SAHARA_HDR_LLO
|
187 SAHARA_HDR_CHA_SKHA
| SAHARA_HDR_PARITY_BIT
;
189 if (dev
->flags
& FLAGS_CBC
) {
190 hdr
|= SAHARA_HDR_SKHA_MODE_CBC
;
191 hdr
^= SAHARA_HDR_PARITY_BIT
;
194 if (dev
->flags
& FLAGS_ENCRYPT
) {
195 hdr
|= SAHARA_HDR_SKHA_OP_ENC
;
196 hdr
^= SAHARA_HDR_PARITY_BIT
;
202 static u32
sahara_aes_data_link_hdr(struct sahara_dev
*dev
)
204 return SAHARA_HDR_BASE
| SAHARA_HDR_FORM_DATA
|
205 SAHARA_HDR_CHA_SKHA
| SAHARA_HDR_PARITY_BIT
;
208 static int sahara_sg_length(struct scatterlist
*sg
,
213 struct scatterlist
*sg_list
;
219 len
= min(sg_list
->length
, total
);
224 sg_list
= sg_next(sg_list
);
232 static char *sahara_err_src
[16] = {
235 "Descriptor length error",
236 "Descriptor length or pointer error",
238 "Link pointer error",
239 "Input buffer error",
240 "Output buffer error",
241 "Output buffer starvation",
242 "Internal state fault",
243 "General descriptor problem",
245 "Descriptor address error",
246 "Link address error",
251 static char *sahara_err_dmasize
[4] = {
253 "Half-word transfer",
258 static char *sahara_err_dmasrc
[8] = {
261 "Internal IP bus error",
263 "DMA crosses 256 byte boundary",
269 static char *sahara_cha_errsrc
[12] = {
270 "Input buffer non-empty",
275 "Write during processing",
276 "CTX read during processing",
278 "Input buffer disabled/underflow",
279 "Output buffer disabled/overflow",
280 "DES key parity error",
284 static char *sahara_cha_err
[4] = { "No error", "SKHA", "MDHA", "RNG" };
286 static void sahara_decode_error(struct sahara_dev
*dev
, unsigned int error
)
288 u8 source
= SAHARA_ERRSTATUS_GET_SOURCE(error
);
289 u16 chasrc
= ffs(SAHARA_ERRSTATUS_GET_CHASRC(error
));
291 dev_err(dev
->device
, "%s: Error Register = 0x%08x\n", __func__
, error
);
293 dev_err(dev
->device
, " - %s.\n", sahara_err_src
[source
]);
295 if (source
== SAHARA_ERRSOURCE_DMA
) {
296 if (error
& SAHARA_ERRSTATUS_DMA_DIR
)
297 dev_err(dev
->device
, " * DMA read.\n");
299 dev_err(dev
->device
, " * DMA write.\n");
301 dev_err(dev
->device
, " * %s.\n",
302 sahara_err_dmasize
[SAHARA_ERRSTATUS_GET_DMASZ(error
)]);
303 dev_err(dev
->device
, " * %s.\n",
304 sahara_err_dmasrc
[SAHARA_ERRSTATUS_GET_DMASRC(error
)]);
305 } else if (source
== SAHARA_ERRSOURCE_CHA
) {
306 dev_err(dev
->device
, " * %s.\n",
307 sahara_cha_errsrc
[chasrc
]);
308 dev_err(dev
->device
, " * %s.\n",
309 sahara_cha_err
[SAHARA_ERRSTATUS_GET_CHAERR(error
)]);
311 dev_err(dev
->device
, "\n");
314 static char *sahara_state
[4] = { "Idle", "Busy", "Error", "HW Fault" };
316 static void sahara_decode_status(struct sahara_dev
*dev
, unsigned int status
)
320 if (!IS_ENABLED(DEBUG
))
323 state
= SAHARA_STATUS_GET_STATE(status
);
325 dev_dbg(dev
->device
, "%s: Status Register = 0x%08x\n",
328 dev_dbg(dev
->device
, " - State = %d:\n", state
);
329 if (state
& SAHARA_STATE_COMP_FLAG
)
330 dev_dbg(dev
->device
, " * Descriptor completed. IRQ pending.\n");
332 dev_dbg(dev
->device
, " * %s.\n",
333 sahara_state
[state
& ~SAHARA_STATE_COMP_FLAG
]);
335 if (status
& SAHARA_STATUS_DAR_FULL
)
336 dev_dbg(dev
->device
, " - DAR Full.\n");
337 if (status
& SAHARA_STATUS_ERROR
)
338 dev_dbg(dev
->device
, " - Error.\n");
339 if (status
& SAHARA_STATUS_SECURE
)
340 dev_dbg(dev
->device
, " - Secure.\n");
341 if (status
& SAHARA_STATUS_FAIL
)
342 dev_dbg(dev
->device
, " - Fail.\n");
343 if (status
& SAHARA_STATUS_RNG_RESEED
)
344 dev_dbg(dev
->device
, " - RNG Reseed Request.\n");
345 if (status
& SAHARA_STATUS_ACTIVE_RNG
)
346 dev_dbg(dev
->device
, " - RNG Active.\n");
347 if (status
& SAHARA_STATUS_ACTIVE_MDHA
)
348 dev_dbg(dev
->device
, " - MDHA Active.\n");
349 if (status
& SAHARA_STATUS_ACTIVE_SKHA
)
350 dev_dbg(dev
->device
, " - SKHA Active.\n");
352 if (status
& SAHARA_STATUS_MODE_BATCH
)
353 dev_dbg(dev
->device
, " - Batch Mode.\n");
354 else if (status
& SAHARA_STATUS_MODE_DEDICATED
)
355 dev_dbg(dev
->device
, " - Decidated Mode.\n");
356 else if (status
& SAHARA_STATUS_MODE_DEBUG
)
357 dev_dbg(dev
->device
, " - Debug Mode.\n");
359 dev_dbg(dev
->device
, " - Internal state = 0x%02x\n",
360 SAHARA_STATUS_GET_ISTATE(status
));
362 dev_dbg(dev
->device
, "Current DAR: 0x%08x\n",
363 sahara_read(dev
, SAHARA_REG_CDAR
));
364 dev_dbg(dev
->device
, "Initial DAR: 0x%08x\n\n",
365 sahara_read(dev
, SAHARA_REG_IDAR
));
368 static void sahara_dump_descriptors(struct sahara_dev
*dev
)
372 if (!IS_ENABLED(DEBUG
))
375 for (i
= 0; i
< SAHARA_MAX_HW_DESC
; i
++) {
376 dev_dbg(dev
->device
, "Descriptor (%d) (0x%08x):\n",
377 i
, dev
->hw_phys_desc
[i
]);
378 dev_dbg(dev
->device
, "\thdr = 0x%08x\n", dev
->hw_desc
[i
]->hdr
);
379 dev_dbg(dev
->device
, "\tlen1 = %u\n", dev
->hw_desc
[i
]->len1
);
380 dev_dbg(dev
->device
, "\tp1 = 0x%08x\n", dev
->hw_desc
[i
]->p1
);
381 dev_dbg(dev
->device
, "\tlen2 = %u\n", dev
->hw_desc
[i
]->len2
);
382 dev_dbg(dev
->device
, "\tp2 = 0x%08x\n", dev
->hw_desc
[i
]->p2
);
383 dev_dbg(dev
->device
, "\tnext = 0x%08x\n",
384 dev
->hw_desc
[i
]->next
);
386 dev_dbg(dev
->device
, "\n");
389 static void sahara_dump_links(struct sahara_dev
*dev
)
393 if (!IS_ENABLED(DEBUG
))
396 for (i
= 0; i
< SAHARA_MAX_HW_LINK
; i
++) {
397 dev_dbg(dev
->device
, "Link (%d) (0x%08x):\n",
398 i
, dev
->hw_phys_link
[i
]);
399 dev_dbg(dev
->device
, "\tlen = %u\n", dev
->hw_link
[i
]->len
);
400 dev_dbg(dev
->device
, "\tp = 0x%08x\n", dev
->hw_link
[i
]->p
);
401 dev_dbg(dev
->device
, "\tnext = 0x%08x\n",
402 dev
->hw_link
[i
]->next
);
404 dev_dbg(dev
->device
, "\n");
407 static void sahara_aes_done_task(unsigned long data
)
409 struct sahara_dev
*dev
= (struct sahara_dev
*)data
;
411 dma_unmap_sg(dev
->device
, dev
->out_sg
, dev
->nb_out_sg
,
413 dma_unmap_sg(dev
->device
, dev
->in_sg
, dev
->nb_in_sg
,
416 spin_lock(&dev
->lock
);
417 clear_bit(FLAGS_BUSY
, &dev
->flags
);
418 spin_unlock(&dev
->lock
);
420 dev
->req
->base
.complete(&dev
->req
->base
, dev
->error
);
423 static void sahara_watchdog(unsigned long data
)
425 struct sahara_dev
*dev
= (struct sahara_dev
*)data
;
426 unsigned int err
= sahara_read(dev
, SAHARA_REG_ERRSTATUS
);
427 unsigned int stat
= sahara_read(dev
, SAHARA_REG_STATUS
);
429 sahara_decode_status(dev
, stat
);
430 sahara_decode_error(dev
, err
);
431 dev
->error
= -ETIMEDOUT
;
432 sahara_aes_done_task(data
);
435 static int sahara_hw_descriptor_create(struct sahara_dev
*dev
)
437 struct sahara_ctx
*ctx
= dev
->ctx
;
438 struct scatterlist
*sg
;
442 /* Copy new key if necessary */
443 if (ctx
->flags
& FLAGS_NEW_KEY
) {
444 memcpy(dev
->key_base
, ctx
->key
, ctx
->keylen
);
445 ctx
->flags
&= ~FLAGS_NEW_KEY
;
447 if (dev
->flags
& FLAGS_CBC
) {
448 dev
->hw_desc
[0]->len1
= AES_BLOCK_SIZE
;
449 dev
->hw_desc
[0]->p1
= dev
->iv_phys_base
;
451 dev
->hw_desc
[0]->len1
= 0;
452 dev
->hw_desc
[0]->p1
= 0;
454 dev
->hw_desc
[0]->len2
= ctx
->keylen
;
455 dev
->hw_desc
[0]->p2
= dev
->key_phys_base
;
456 dev
->hw_desc
[0]->next
= dev
->hw_phys_desc
[1];
458 dev
->hw_desc
[0]->hdr
= sahara_aes_key_hdr(dev
);
460 dev
->nb_in_sg
= sahara_sg_length(dev
->in_sg
, dev
->total
);
461 dev
->nb_out_sg
= sahara_sg_length(dev
->out_sg
, dev
->total
);
462 if ((dev
->nb_in_sg
+ dev
->nb_out_sg
) > SAHARA_MAX_HW_LINK
) {
463 dev_err(dev
->device
, "not enough hw links (%d)\n",
464 dev
->nb_in_sg
+ dev
->nb_out_sg
);
468 ret
= dma_map_sg(dev
->device
, dev
->in_sg
, dev
->nb_in_sg
,
470 if (ret
!= dev
->nb_in_sg
) {
471 dev_err(dev
->device
, "couldn't map in sg\n");
474 ret
= dma_map_sg(dev
->device
, dev
->out_sg
, dev
->nb_out_sg
,
476 if (ret
!= dev
->nb_out_sg
) {
477 dev_err(dev
->device
, "couldn't map out sg\n");
481 /* Create input links */
482 dev
->hw_desc
[1]->p1
= dev
->hw_phys_link
[0];
484 for (i
= 0; i
< dev
->nb_in_sg
; i
++) {
485 dev
->hw_link
[i
]->len
= sg
->length
;
486 dev
->hw_link
[i
]->p
= sg
->dma_address
;
487 if (i
== (dev
->nb_in_sg
- 1)) {
488 dev
->hw_link
[i
]->next
= 0;
490 dev
->hw_link
[i
]->next
= dev
->hw_phys_link
[i
+ 1];
495 /* Create output links */
496 dev
->hw_desc
[1]->p2
= dev
->hw_phys_link
[i
];
498 for (j
= i
; j
< dev
->nb_out_sg
+ i
; j
++) {
499 dev
->hw_link
[j
]->len
= sg
->length
;
500 dev
->hw_link
[j
]->p
= sg
->dma_address
;
501 if (j
== (dev
->nb_out_sg
+ i
- 1)) {
502 dev
->hw_link
[j
]->next
= 0;
504 dev
->hw_link
[j
]->next
= dev
->hw_phys_link
[j
+ 1];
509 /* Fill remaining fields of hw_desc[1] */
510 dev
->hw_desc
[1]->hdr
= sahara_aes_data_link_hdr(dev
);
511 dev
->hw_desc
[1]->len1
= dev
->total
;
512 dev
->hw_desc
[1]->len2
= dev
->total
;
513 dev
->hw_desc
[1]->next
= 0;
515 sahara_dump_descriptors(dev
);
516 sahara_dump_links(dev
);
518 /* Start processing descriptor chain. */
519 mod_timer(&dev
->watchdog
,
520 jiffies
+ msecs_to_jiffies(SAHARA_TIMEOUT_MS
));
521 sahara_write(dev
, dev
->hw_phys_desc
[0], SAHARA_REG_DAR
);
526 dma_unmap_sg(dev
->device
, dev
->out_sg
, dev
->nb_out_sg
,
529 dma_unmap_sg(dev
->device
, dev
->in_sg
, dev
->nb_in_sg
,
535 static void sahara_aes_queue_task(unsigned long data
)
537 struct sahara_dev
*dev
= (struct sahara_dev
*)data
;
538 struct crypto_async_request
*async_req
, *backlog
;
539 struct sahara_ctx
*ctx
;
540 struct sahara_aes_reqctx
*rctx
;
541 struct ablkcipher_request
*req
;
544 spin_lock(&dev
->lock
);
545 backlog
= crypto_get_backlog(&dev
->queue
);
546 async_req
= crypto_dequeue_request(&dev
->queue
);
548 clear_bit(FLAGS_BUSY
, &dev
->flags
);
549 spin_unlock(&dev
->lock
);
555 backlog
->complete(backlog
, -EINPROGRESS
);
557 req
= ablkcipher_request_cast(async_req
);
559 /* Request is ready to be dispatched by the device */
561 "dispatch request (nbytes=%d, src=%p, dst=%p)\n",
562 req
->nbytes
, req
->src
, req
->dst
);
564 /* assign new request to device */
566 dev
->total
= req
->nbytes
;
567 dev
->in_sg
= req
->src
;
568 dev
->out_sg
= req
->dst
;
570 rctx
= ablkcipher_request_ctx(req
);
571 ctx
= crypto_ablkcipher_ctx(crypto_ablkcipher_reqtfm(req
));
572 rctx
->mode
&= FLAGS_MODE_MASK
;
573 dev
->flags
= (dev
->flags
& ~FLAGS_MODE_MASK
) | rctx
->mode
;
575 if ((dev
->flags
& FLAGS_CBC
) && req
->info
)
576 memcpy(dev
->iv_base
, req
->info
, AES_KEYSIZE_128
);
578 /* assign new context to device */
582 ret
= sahara_hw_descriptor_create(dev
);
584 spin_lock(&dev
->lock
);
585 clear_bit(FLAGS_BUSY
, &dev
->flags
);
586 spin_unlock(&dev
->lock
);
587 dev
->req
->base
.complete(&dev
->req
->base
, ret
);
591 static int sahara_aes_setkey(struct crypto_ablkcipher
*tfm
, const u8
*key
,
594 struct sahara_ctx
*ctx
= crypto_ablkcipher_ctx(tfm
);
597 ctx
->keylen
= keylen
;
599 /* SAHARA only supports 128bit keys */
600 if (keylen
== AES_KEYSIZE_128
) {
601 memcpy(ctx
->key
, key
, keylen
);
602 ctx
->flags
|= FLAGS_NEW_KEY
;
606 if (keylen
!= AES_KEYSIZE_128
&&
607 keylen
!= AES_KEYSIZE_192
&& keylen
!= AES_KEYSIZE_256
)
611 * The requested key size is not supported by HW, do a fallback.
613 ctx
->fallback
->base
.crt_flags
&= ~CRYPTO_TFM_REQ_MASK
;
614 ctx
->fallback
->base
.crt_flags
|=
615 (tfm
->base
.crt_flags
& CRYPTO_TFM_REQ_MASK
);
617 ret
= crypto_ablkcipher_setkey(ctx
->fallback
, key
, keylen
);
619 struct crypto_tfm
*tfm_aux
= crypto_ablkcipher_tfm(tfm
);
621 tfm_aux
->crt_flags
&= ~CRYPTO_TFM_RES_MASK
;
622 tfm_aux
->crt_flags
|=
623 (ctx
->fallback
->base
.crt_flags
& CRYPTO_TFM_RES_MASK
);
628 static int sahara_aes_crypt(struct ablkcipher_request
*req
, unsigned long mode
)
630 struct sahara_ctx
*ctx
= crypto_ablkcipher_ctx(
631 crypto_ablkcipher_reqtfm(req
));
632 struct sahara_aes_reqctx
*rctx
= ablkcipher_request_ctx(req
);
633 struct sahara_dev
*dev
= dev_ptr
;
637 dev_dbg(dev
->device
, "nbytes: %d, enc: %d, cbc: %d\n",
638 req
->nbytes
, !!(mode
& FLAGS_ENCRYPT
), !!(mode
& FLAGS_CBC
));
640 if (!IS_ALIGNED(req
->nbytes
, AES_BLOCK_SIZE
)) {
642 "request size is not exact amount of AES blocks\n");
649 spin_lock_bh(&dev
->lock
);
650 err
= ablkcipher_enqueue_request(&dev
->queue
, req
);
651 busy
= test_and_set_bit(FLAGS_BUSY
, &dev
->flags
);
652 spin_unlock_bh(&dev
->lock
);
655 tasklet_schedule(&dev
->queue_task
);
660 static int sahara_aes_ecb_encrypt(struct ablkcipher_request
*req
)
662 struct crypto_tfm
*tfm
=
663 crypto_ablkcipher_tfm(crypto_ablkcipher_reqtfm(req
));
664 struct sahara_ctx
*ctx
= crypto_ablkcipher_ctx(
665 crypto_ablkcipher_reqtfm(req
));
668 if (unlikely(ctx
->keylen
!= AES_KEYSIZE_128
)) {
669 ablkcipher_request_set_tfm(req
, ctx
->fallback
);
670 err
= crypto_ablkcipher_encrypt(req
);
671 ablkcipher_request_set_tfm(req
, __crypto_ablkcipher_cast(tfm
));
675 return sahara_aes_crypt(req
, FLAGS_ENCRYPT
);
678 static int sahara_aes_ecb_decrypt(struct ablkcipher_request
*req
)
680 struct crypto_tfm
*tfm
=
681 crypto_ablkcipher_tfm(crypto_ablkcipher_reqtfm(req
));
682 struct sahara_ctx
*ctx
= crypto_ablkcipher_ctx(
683 crypto_ablkcipher_reqtfm(req
));
686 if (unlikely(ctx
->keylen
!= AES_KEYSIZE_128
)) {
687 ablkcipher_request_set_tfm(req
, ctx
->fallback
);
688 err
= crypto_ablkcipher_decrypt(req
);
689 ablkcipher_request_set_tfm(req
, __crypto_ablkcipher_cast(tfm
));
693 return sahara_aes_crypt(req
, 0);
696 static int sahara_aes_cbc_encrypt(struct ablkcipher_request
*req
)
698 struct crypto_tfm
*tfm
=
699 crypto_ablkcipher_tfm(crypto_ablkcipher_reqtfm(req
));
700 struct sahara_ctx
*ctx
= crypto_ablkcipher_ctx(
701 crypto_ablkcipher_reqtfm(req
));
704 if (unlikely(ctx
->keylen
!= AES_KEYSIZE_128
)) {
705 ablkcipher_request_set_tfm(req
, ctx
->fallback
);
706 err
= crypto_ablkcipher_encrypt(req
);
707 ablkcipher_request_set_tfm(req
, __crypto_ablkcipher_cast(tfm
));
711 return sahara_aes_crypt(req
, FLAGS_ENCRYPT
| FLAGS_CBC
);
714 static int sahara_aes_cbc_decrypt(struct ablkcipher_request
*req
)
716 struct crypto_tfm
*tfm
=
717 crypto_ablkcipher_tfm(crypto_ablkcipher_reqtfm(req
));
718 struct sahara_ctx
*ctx
= crypto_ablkcipher_ctx(
719 crypto_ablkcipher_reqtfm(req
));
722 if (unlikely(ctx
->keylen
!= AES_KEYSIZE_128
)) {
723 ablkcipher_request_set_tfm(req
, ctx
->fallback
);
724 err
= crypto_ablkcipher_decrypt(req
);
725 ablkcipher_request_set_tfm(req
, __crypto_ablkcipher_cast(tfm
));
729 return sahara_aes_crypt(req
, FLAGS_CBC
);
732 static int sahara_aes_cra_init(struct crypto_tfm
*tfm
)
734 const char *name
= crypto_tfm_alg_name(tfm
);
735 struct sahara_ctx
*ctx
= crypto_tfm_ctx(tfm
);
737 ctx
->fallback
= crypto_alloc_ablkcipher(name
, 0,
738 CRYPTO_ALG_ASYNC
| CRYPTO_ALG_NEED_FALLBACK
);
739 if (IS_ERR(ctx
->fallback
)) {
740 pr_err("Error allocating fallback algo %s\n", name
);
741 return PTR_ERR(ctx
->fallback
);
744 tfm
->crt_ablkcipher
.reqsize
= sizeof(struct sahara_aes_reqctx
);
749 static void sahara_aes_cra_exit(struct crypto_tfm
*tfm
)
751 struct sahara_ctx
*ctx
= crypto_tfm_ctx(tfm
);
754 crypto_free_ablkcipher(ctx
->fallback
);
755 ctx
->fallback
= NULL
;
758 static struct crypto_alg aes_algs
[] = {
760 .cra_name
= "ecb(aes)",
761 .cra_driver_name
= "sahara-ecb-aes",
763 .cra_flags
= CRYPTO_ALG_TYPE_ABLKCIPHER
|
764 CRYPTO_ALG_ASYNC
| CRYPTO_ALG_NEED_FALLBACK
,
765 .cra_blocksize
= AES_BLOCK_SIZE
,
766 .cra_ctxsize
= sizeof(struct sahara_ctx
),
767 .cra_alignmask
= 0x0,
768 .cra_type
= &crypto_ablkcipher_type
,
769 .cra_module
= THIS_MODULE
,
770 .cra_init
= sahara_aes_cra_init
,
771 .cra_exit
= sahara_aes_cra_exit
,
772 .cra_u
.ablkcipher
= {
773 .min_keysize
= AES_MIN_KEY_SIZE
,
774 .max_keysize
= AES_MAX_KEY_SIZE
,
775 .setkey
= sahara_aes_setkey
,
776 .encrypt
= sahara_aes_ecb_encrypt
,
777 .decrypt
= sahara_aes_ecb_decrypt
,
780 .cra_name
= "cbc(aes)",
781 .cra_driver_name
= "sahara-cbc-aes",
783 .cra_flags
= CRYPTO_ALG_TYPE_ABLKCIPHER
|
784 CRYPTO_ALG_ASYNC
| CRYPTO_ALG_NEED_FALLBACK
,
785 .cra_blocksize
= AES_BLOCK_SIZE
,
786 .cra_ctxsize
= sizeof(struct sahara_ctx
),
787 .cra_alignmask
= 0x0,
788 .cra_type
= &crypto_ablkcipher_type
,
789 .cra_module
= THIS_MODULE
,
790 .cra_init
= sahara_aes_cra_init
,
791 .cra_exit
= sahara_aes_cra_exit
,
792 .cra_u
.ablkcipher
= {
793 .min_keysize
= AES_MIN_KEY_SIZE
,
794 .max_keysize
= AES_MAX_KEY_SIZE
,
795 .ivsize
= AES_BLOCK_SIZE
,
796 .setkey
= sahara_aes_setkey
,
797 .encrypt
= sahara_aes_cbc_encrypt
,
798 .decrypt
= sahara_aes_cbc_decrypt
,
803 static irqreturn_t
sahara_irq_handler(int irq
, void *data
)
805 struct sahara_dev
*dev
= (struct sahara_dev
*)data
;
806 unsigned int stat
= sahara_read(dev
, SAHARA_REG_STATUS
);
807 unsigned int err
= sahara_read(dev
, SAHARA_REG_ERRSTATUS
);
809 del_timer(&dev
->watchdog
);
811 sahara_write(dev
, SAHARA_CMD_CLEAR_INT
| SAHARA_CMD_CLEAR_ERR
,
814 sahara_decode_status(dev
, stat
);
816 if (SAHARA_STATUS_GET_STATE(stat
) == SAHARA_STATE_BUSY
) {
818 } else if (SAHARA_STATUS_GET_STATE(stat
) == SAHARA_STATE_COMPLETE
) {
821 sahara_decode_error(dev
, err
);
822 dev
->error
= -EINVAL
;
825 tasklet_schedule(&dev
->done_task
);
831 static int sahara_register_algs(struct sahara_dev
*dev
)
835 for (i
= 0; i
< ARRAY_SIZE(aes_algs
); i
++) {
836 INIT_LIST_HEAD(&aes_algs
[i
].cra_list
);
837 err
= crypto_register_alg(&aes_algs
[i
]);
845 for (j
= 0; j
< i
; j
++)
846 crypto_unregister_alg(&aes_algs
[j
]);
851 static void sahara_unregister_algs(struct sahara_dev
*dev
)
855 for (i
= 0; i
< ARRAY_SIZE(aes_algs
); i
++)
856 crypto_unregister_alg(&aes_algs
[i
]);
859 static struct platform_device_id sahara_platform_ids
[] = {
860 { .name
= "sahara-imx27" },
863 MODULE_DEVICE_TABLE(platform
, sahara_platform_ids
);
865 static struct of_device_id sahara_dt_ids
[] = {
866 { .compatible
= "fsl,imx53-sahara" },
867 { .compatible
= "fsl,imx27-sahara" },
870 MODULE_DEVICE_TABLE(of
, sahara_dt_ids
);
872 static int sahara_probe(struct platform_device
*pdev
)
874 struct sahara_dev
*dev
;
875 struct resource
*res
;
881 dev
= devm_kzalloc(&pdev
->dev
, sizeof(struct sahara_dev
), GFP_KERNEL
);
883 dev_err(&pdev
->dev
, "unable to alloc data struct.\n");
887 dev
->device
= &pdev
->dev
;
888 platform_set_drvdata(pdev
, dev
);
890 /* Get the base address */
891 res
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
892 dev
->regs_base
= devm_ioremap_resource(&pdev
->dev
, res
);
893 if (IS_ERR(dev
->regs_base
))
894 return PTR_ERR(dev
->regs_base
);
897 irq
= platform_get_irq(pdev
, 0);
899 dev_err(&pdev
->dev
, "failed to get irq resource\n");
903 err
= devm_request_irq(&pdev
->dev
, irq
, sahara_irq_handler
,
904 0, dev_name(&pdev
->dev
), dev
);
906 dev_err(&pdev
->dev
, "failed to request irq\n");
911 dev
->clk_ipg
= devm_clk_get(&pdev
->dev
, "ipg");
912 if (IS_ERR(dev
->clk_ipg
)) {
913 dev_err(&pdev
->dev
, "Could not get ipg clock\n");
914 return PTR_ERR(dev
->clk_ipg
);
917 dev
->clk_ahb
= devm_clk_get(&pdev
->dev
, "ahb");
918 if (IS_ERR(dev
->clk_ahb
)) {
919 dev_err(&pdev
->dev
, "Could not get ahb clock\n");
920 return PTR_ERR(dev
->clk_ahb
);
923 /* Allocate HW descriptors */
924 dev
->hw_desc
[0] = dma_alloc_coherent(&pdev
->dev
,
925 SAHARA_MAX_HW_DESC
* sizeof(struct sahara_hw_desc
),
926 &dev
->hw_phys_desc
[0], GFP_KERNEL
);
927 if (!dev
->hw_desc
[0]) {
928 dev_err(&pdev
->dev
, "Could not allocate hw descriptors\n");
931 dev
->hw_desc
[1] = dev
->hw_desc
[0] + 1;
932 dev
->hw_phys_desc
[1] = dev
->hw_phys_desc
[0] +
933 sizeof(struct sahara_hw_desc
);
935 /* Allocate space for iv and key */
936 dev
->key_base
= dma_alloc_coherent(&pdev
->dev
, 2 * AES_KEYSIZE_128
,
937 &dev
->key_phys_base
, GFP_KERNEL
);
938 if (!dev
->key_base
) {
939 dev_err(&pdev
->dev
, "Could not allocate memory for key\n");
943 dev
->iv_base
= dev
->key_base
+ AES_KEYSIZE_128
;
944 dev
->iv_phys_base
= dev
->key_phys_base
+ AES_KEYSIZE_128
;
946 /* Allocate space for HW links */
947 dev
->hw_link
[0] = dma_alloc_coherent(&pdev
->dev
,
948 SAHARA_MAX_HW_LINK
* sizeof(struct sahara_hw_link
),
949 &dev
->hw_phys_link
[0], GFP_KERNEL
);
950 if (!dev
->hw_link
[0]) {
951 dev_err(&pdev
->dev
, "Could not allocate hw links\n");
955 for (i
= 1; i
< SAHARA_MAX_HW_LINK
; i
++) {
956 dev
->hw_phys_link
[i
] = dev
->hw_phys_link
[i
- 1] +
957 sizeof(struct sahara_hw_link
);
958 dev
->hw_link
[i
] = dev
->hw_link
[i
- 1] + 1;
961 crypto_init_queue(&dev
->queue
, SAHARA_QUEUE_LENGTH
);
963 spin_lock_init(&dev
->lock
);
967 tasklet_init(&dev
->queue_task
, sahara_aes_queue_task
,
969 tasklet_init(&dev
->done_task
, sahara_aes_done_task
,
972 init_timer(&dev
->watchdog
);
973 dev
->watchdog
.function
= &sahara_watchdog
;
974 dev
->watchdog
.data
= (unsigned long)dev
;
976 clk_prepare_enable(dev
->clk_ipg
);
977 clk_prepare_enable(dev
->clk_ahb
);
979 version
= sahara_read(dev
, SAHARA_REG_VERSION
);
980 if (of_device_is_compatible(pdev
->dev
.of_node
, "fsl,imx27-sahara")) {
981 if (version
!= SAHARA_VERSION_3
)
983 } else if (of_device_is_compatible(pdev
->dev
.of_node
,
984 "fsl,imx53-sahara")) {
985 if (((version
>> 8) & 0xff) != SAHARA_VERSION_4
)
987 version
= (version
>> 8) & 0xff;
989 if (err
== -ENODEV
) {
990 dev_err(&pdev
->dev
, "SAHARA version %d not supported\n",
995 dev
->version
= version
;
997 sahara_write(dev
, SAHARA_CMD_RESET
| SAHARA_CMD_MODE_BATCH
,
999 sahara_write(dev
, SAHARA_CONTROL_SET_THROTTLE(0) |
1000 SAHARA_CONTROL_SET_MAXBURST(8) |
1001 SAHARA_CONTROL_RNG_AUTORSD
|
1002 SAHARA_CONTROL_ENABLE_INT
,
1003 SAHARA_REG_CONTROL
);
1005 err
= sahara_register_algs(dev
);
1009 dev_info(&pdev
->dev
, "SAHARA version %d initialized\n", version
);
1014 dma_free_coherent(&pdev
->dev
,
1015 SAHARA_MAX_HW_LINK
* sizeof(struct sahara_hw_link
),
1016 dev
->hw_link
[0], dev
->hw_phys_link
[0]);
1017 clk_disable_unprepare(dev
->clk_ipg
);
1018 clk_disable_unprepare(dev
->clk_ahb
);
1021 dma_free_coherent(&pdev
->dev
,
1022 2 * AES_KEYSIZE_128
,
1023 dev
->key_base
, dev
->key_phys_base
);
1025 dma_free_coherent(&pdev
->dev
,
1026 SAHARA_MAX_HW_DESC
* sizeof(struct sahara_hw_desc
),
1027 dev
->hw_desc
[0], dev
->hw_phys_desc
[0]);
1032 static int sahara_remove(struct platform_device
*pdev
)
1034 struct sahara_dev
*dev
= platform_get_drvdata(pdev
);
1036 dma_free_coherent(&pdev
->dev
,
1037 SAHARA_MAX_HW_LINK
* sizeof(struct sahara_hw_link
),
1038 dev
->hw_link
[0], dev
->hw_phys_link
[0]);
1039 dma_free_coherent(&pdev
->dev
,
1040 2 * AES_KEYSIZE_128
,
1041 dev
->key_base
, dev
->key_phys_base
);
1042 dma_free_coherent(&pdev
->dev
,
1043 SAHARA_MAX_HW_DESC
* sizeof(struct sahara_hw_desc
),
1044 dev
->hw_desc
[0], dev
->hw_phys_desc
[0]);
1046 tasklet_kill(&dev
->done_task
);
1047 tasklet_kill(&dev
->queue_task
);
1049 sahara_unregister_algs(dev
);
1051 clk_disable_unprepare(dev
->clk_ipg
);
1052 clk_disable_unprepare(dev
->clk_ahb
);
1059 static struct platform_driver sahara_driver
= {
1060 .probe
= sahara_probe
,
1061 .remove
= sahara_remove
,
1063 .name
= SAHARA_NAME
,
1064 .owner
= THIS_MODULE
,
1065 .of_match_table
= sahara_dt_ids
,
1067 .id_table
= sahara_platform_ids
,
1070 module_platform_driver(sahara_driver
);
1072 MODULE_LICENSE("GPL");
1073 MODULE_AUTHOR("Javier Martin <javier.martin@vista-silicon.com>");
1074 MODULE_DESCRIPTION("SAHARA2 HW crypto accelerator");