55c0d2bea42684600501d0185309fa812307fcbf
[deliverable/linux.git] / drivers / crypto / sahara.c
1 /*
2 * Cryptographic API.
3 *
4 * Support for SAHARA cryptographic accelerator.
5 *
6 * Copyright (c) 2013 Vista Silicon S.L.
7 * Author: Javier Martin <javier.martin@vista-silicon.com>
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as published
11 * by the Free Software Foundation.
12 *
13 * Based on omap-aes.c and tegra-aes.c
14 */
15
16 #include <crypto/algapi.h>
17 #include <crypto/aes.h>
18
19 #include <linux/clk.h>
20 #include <linux/crypto.h>
21 #include <linux/interrupt.h>
22 #include <linux/io.h>
23 #include <linux/irq.h>
24 #include <linux/kernel.h>
25 #include <linux/module.h>
26 #include <linux/of.h>
27 #include <linux/of_device.h>
28 #include <linux/platform_device.h>
29
30 #define SAHARA_NAME "sahara"
31 #define SAHARA_VERSION_3 3
32 #define SAHARA_VERSION_4 4
33 #define SAHARA_TIMEOUT_MS 1000
34 #define SAHARA_MAX_HW_DESC 2
35 #define SAHARA_MAX_HW_LINK 20
36
37 #define FLAGS_MODE_MASK 0x000f
38 #define FLAGS_ENCRYPT BIT(0)
39 #define FLAGS_CBC BIT(1)
40 #define FLAGS_NEW_KEY BIT(3)
41 #define FLAGS_BUSY 4
42
43 #define SAHARA_HDR_BASE 0x00800000
44 #define SAHARA_HDR_SKHA_ALG_AES 0
45 #define SAHARA_HDR_SKHA_OP_ENC (1 << 2)
46 #define SAHARA_HDR_SKHA_MODE_ECB (0 << 3)
47 #define SAHARA_HDR_SKHA_MODE_CBC (1 << 3)
48 #define SAHARA_HDR_FORM_DATA (5 << 16)
49 #define SAHARA_HDR_FORM_KEY (8 << 16)
50 #define SAHARA_HDR_LLO (1 << 24)
51 #define SAHARA_HDR_CHA_SKHA (1 << 28)
52 #define SAHARA_HDR_CHA_MDHA (2 << 28)
53 #define SAHARA_HDR_PARITY_BIT (1 << 31)
54
55 /* SAHARA can only process one request at a time */
56 #define SAHARA_QUEUE_LENGTH 1
57
58 #define SAHARA_REG_VERSION 0x00
59 #define SAHARA_REG_DAR 0x04
60 #define SAHARA_REG_CONTROL 0x08
61 #define SAHARA_CONTROL_SET_THROTTLE(x) (((x) & 0xff) << 24)
62 #define SAHARA_CONTROL_SET_MAXBURST(x) (((x) & 0xff) << 16)
63 #define SAHARA_CONTROL_RNG_AUTORSD (1 << 7)
64 #define SAHARA_CONTROL_ENABLE_INT (1 << 4)
65 #define SAHARA_REG_CMD 0x0C
66 #define SAHARA_CMD_RESET (1 << 0)
67 #define SAHARA_CMD_CLEAR_INT (1 << 8)
68 #define SAHARA_CMD_CLEAR_ERR (1 << 9)
69 #define SAHARA_CMD_SINGLE_STEP (1 << 10)
70 #define SAHARA_CMD_MODE_BATCH (1 << 16)
71 #define SAHARA_CMD_MODE_DEBUG (1 << 18)
72 #define SAHARA_REG_STATUS 0x10
73 #define SAHARA_STATUS_GET_STATE(x) ((x) & 0x7)
74 #define SAHARA_STATE_IDLE 0
75 #define SAHARA_STATE_BUSY 1
76 #define SAHARA_STATE_ERR 2
77 #define SAHARA_STATE_FAULT 3
78 #define SAHARA_STATE_COMPLETE 4
79 #define SAHARA_STATE_COMP_FLAG (1 << 2)
80 #define SAHARA_STATUS_DAR_FULL (1 << 3)
81 #define SAHARA_STATUS_ERROR (1 << 4)
82 #define SAHARA_STATUS_SECURE (1 << 5)
83 #define SAHARA_STATUS_FAIL (1 << 6)
84 #define SAHARA_STATUS_INIT (1 << 7)
85 #define SAHARA_STATUS_RNG_RESEED (1 << 8)
86 #define SAHARA_STATUS_ACTIVE_RNG (1 << 9)
87 #define SAHARA_STATUS_ACTIVE_MDHA (1 << 10)
88 #define SAHARA_STATUS_ACTIVE_SKHA (1 << 11)
89 #define SAHARA_STATUS_MODE_BATCH (1 << 16)
90 #define SAHARA_STATUS_MODE_DEDICATED (1 << 17)
91 #define SAHARA_STATUS_MODE_DEBUG (1 << 18)
92 #define SAHARA_STATUS_GET_ISTATE(x) (((x) >> 24) & 0xff)
93 #define SAHARA_REG_ERRSTATUS 0x14
94 #define SAHARA_ERRSTATUS_GET_SOURCE(x) ((x) & 0xf)
95 #define SAHARA_ERRSOURCE_CHA 14
96 #define SAHARA_ERRSOURCE_DMA 15
97 #define SAHARA_ERRSTATUS_DMA_DIR (1 << 8)
98 #define SAHARA_ERRSTATUS_GET_DMASZ(x)(((x) >> 9) & 0x3)
99 #define SAHARA_ERRSTATUS_GET_DMASRC(x) (((x) >> 13) & 0x7)
100 #define SAHARA_ERRSTATUS_GET_CHASRC(x) (((x) >> 16) & 0xfff)
101 #define SAHARA_ERRSTATUS_GET_CHAERR(x) (((x) >> 28) & 0x3)
102 #define SAHARA_REG_FADDR 0x18
103 #define SAHARA_REG_CDAR 0x1C
104 #define SAHARA_REG_IDAR 0x20
105
106 struct sahara_hw_desc {
107 u32 hdr;
108 u32 len1;
109 dma_addr_t p1;
110 u32 len2;
111 dma_addr_t p2;
112 dma_addr_t next;
113 };
114
115 struct sahara_hw_link {
116 u32 len;
117 dma_addr_t p;
118 dma_addr_t next;
119 };
120
121 struct sahara_ctx {
122 struct sahara_dev *dev;
123 unsigned long flags;
124 int keylen;
125 u8 key[AES_KEYSIZE_128];
126 struct crypto_ablkcipher *fallback;
127 };
128
129 struct sahara_aes_reqctx {
130 unsigned long mode;
131 };
132
133 struct sahara_dev {
134 struct device *device;
135 unsigned int version;
136 void __iomem *regs_base;
137 struct clk *clk_ipg;
138 struct clk *clk_ahb;
139
140 struct sahara_ctx *ctx;
141 spinlock_t lock;
142 struct crypto_queue queue;
143 unsigned long flags;
144
145 struct tasklet_struct done_task;
146 struct tasklet_struct queue_task;
147
148 struct sahara_hw_desc *hw_desc[SAHARA_MAX_HW_DESC];
149 dma_addr_t hw_phys_desc[SAHARA_MAX_HW_DESC];
150
151 u8 *key_base;
152 dma_addr_t key_phys_base;
153
154 u8 *iv_base;
155 dma_addr_t iv_phys_base;
156
157 struct sahara_hw_link *hw_link[SAHARA_MAX_HW_LINK];
158 dma_addr_t hw_phys_link[SAHARA_MAX_HW_LINK];
159
160 struct ablkcipher_request *req;
161 size_t total;
162 struct scatterlist *in_sg;
163 unsigned int nb_in_sg;
164 struct scatterlist *out_sg;
165 unsigned int nb_out_sg;
166
167 u32 error;
168 struct timer_list watchdog;
169 };
170
171 static struct sahara_dev *dev_ptr;
172
173 static inline void sahara_write(struct sahara_dev *dev, u32 data, u32 reg)
174 {
175 writel(data, dev->regs_base + reg);
176 }
177
178 static inline unsigned int sahara_read(struct sahara_dev *dev, u32 reg)
179 {
180 return readl(dev->regs_base + reg);
181 }
182
183 static u32 sahara_aes_key_hdr(struct sahara_dev *dev)
184 {
185 u32 hdr = SAHARA_HDR_BASE | SAHARA_HDR_SKHA_ALG_AES |
186 SAHARA_HDR_FORM_KEY | SAHARA_HDR_LLO |
187 SAHARA_HDR_CHA_SKHA | SAHARA_HDR_PARITY_BIT;
188
189 if (dev->flags & FLAGS_CBC) {
190 hdr |= SAHARA_HDR_SKHA_MODE_CBC;
191 hdr ^= SAHARA_HDR_PARITY_BIT;
192 }
193
194 if (dev->flags & FLAGS_ENCRYPT) {
195 hdr |= SAHARA_HDR_SKHA_OP_ENC;
196 hdr ^= SAHARA_HDR_PARITY_BIT;
197 }
198
199 return hdr;
200 }
201
202 static u32 sahara_aes_data_link_hdr(struct sahara_dev *dev)
203 {
204 return SAHARA_HDR_BASE | SAHARA_HDR_FORM_DATA |
205 SAHARA_HDR_CHA_SKHA | SAHARA_HDR_PARITY_BIT;
206 }
207
208 static int sahara_sg_length(struct scatterlist *sg,
209 unsigned int total)
210 {
211 int sg_nb;
212 unsigned int len;
213 struct scatterlist *sg_list;
214
215 sg_nb = 0;
216 sg_list = sg;
217
218 while (total) {
219 len = min(sg_list->length, total);
220
221 sg_nb++;
222 total -= len;
223
224 sg_list = sg_next(sg_list);
225 if (!sg_list)
226 total = 0;
227 }
228
229 return sg_nb;
230 }
231
232 static char *sahara_err_src[16] = {
233 "No error",
234 "Header error",
235 "Descriptor length error",
236 "Descriptor length or pointer error",
237 "Link length error",
238 "Link pointer error",
239 "Input buffer error",
240 "Output buffer error",
241 "Output buffer starvation",
242 "Internal state fault",
243 "General descriptor problem",
244 "Reserved",
245 "Descriptor address error",
246 "Link address error",
247 "CHA error",
248 "DMA error"
249 };
250
251 static char *sahara_err_dmasize[4] = {
252 "Byte transfer",
253 "Half-word transfer",
254 "Word transfer",
255 "Reserved"
256 };
257
258 static char *sahara_err_dmasrc[8] = {
259 "No error",
260 "AHB bus error",
261 "Internal IP bus error",
262 "Parity error",
263 "DMA crosses 256 byte boundary",
264 "DMA is busy",
265 "Reserved",
266 "DMA HW error"
267 };
268
269 static char *sahara_cha_errsrc[12] = {
270 "Input buffer non-empty",
271 "Illegal address",
272 "Illegal mode",
273 "Illegal data size",
274 "Illegal key size",
275 "Write during processing",
276 "CTX read during processing",
277 "HW error",
278 "Input buffer disabled/underflow",
279 "Output buffer disabled/overflow",
280 "DES key parity error",
281 "Reserved"
282 };
283
284 static char *sahara_cha_err[4] = { "No error", "SKHA", "MDHA", "RNG" };
285
286 static void sahara_decode_error(struct sahara_dev *dev, unsigned int error)
287 {
288 u8 source = SAHARA_ERRSTATUS_GET_SOURCE(error);
289 u16 chasrc = ffs(SAHARA_ERRSTATUS_GET_CHASRC(error));
290
291 dev_err(dev->device, "%s: Error Register = 0x%08x\n", __func__, error);
292
293 dev_err(dev->device, " - %s.\n", sahara_err_src[source]);
294
295 if (source == SAHARA_ERRSOURCE_DMA) {
296 if (error & SAHARA_ERRSTATUS_DMA_DIR)
297 dev_err(dev->device, " * DMA read.\n");
298 else
299 dev_err(dev->device, " * DMA write.\n");
300
301 dev_err(dev->device, " * %s.\n",
302 sahara_err_dmasize[SAHARA_ERRSTATUS_GET_DMASZ(error)]);
303 dev_err(dev->device, " * %s.\n",
304 sahara_err_dmasrc[SAHARA_ERRSTATUS_GET_DMASRC(error)]);
305 } else if (source == SAHARA_ERRSOURCE_CHA) {
306 dev_err(dev->device, " * %s.\n",
307 sahara_cha_errsrc[chasrc]);
308 dev_err(dev->device, " * %s.\n",
309 sahara_cha_err[SAHARA_ERRSTATUS_GET_CHAERR(error)]);
310 }
311 dev_err(dev->device, "\n");
312 }
313
314 static char *sahara_state[4] = { "Idle", "Busy", "Error", "HW Fault" };
315
316 static void sahara_decode_status(struct sahara_dev *dev, unsigned int status)
317 {
318 u8 state;
319
320 if (!IS_ENABLED(DEBUG))
321 return;
322
323 state = SAHARA_STATUS_GET_STATE(status);
324
325 dev_dbg(dev->device, "%s: Status Register = 0x%08x\n",
326 __func__, status);
327
328 dev_dbg(dev->device, " - State = %d:\n", state);
329 if (state & SAHARA_STATE_COMP_FLAG)
330 dev_dbg(dev->device, " * Descriptor completed. IRQ pending.\n");
331
332 dev_dbg(dev->device, " * %s.\n",
333 sahara_state[state & ~SAHARA_STATE_COMP_FLAG]);
334
335 if (status & SAHARA_STATUS_DAR_FULL)
336 dev_dbg(dev->device, " - DAR Full.\n");
337 if (status & SAHARA_STATUS_ERROR)
338 dev_dbg(dev->device, " - Error.\n");
339 if (status & SAHARA_STATUS_SECURE)
340 dev_dbg(dev->device, " - Secure.\n");
341 if (status & SAHARA_STATUS_FAIL)
342 dev_dbg(dev->device, " - Fail.\n");
343 if (status & SAHARA_STATUS_RNG_RESEED)
344 dev_dbg(dev->device, " - RNG Reseed Request.\n");
345 if (status & SAHARA_STATUS_ACTIVE_RNG)
346 dev_dbg(dev->device, " - RNG Active.\n");
347 if (status & SAHARA_STATUS_ACTIVE_MDHA)
348 dev_dbg(dev->device, " - MDHA Active.\n");
349 if (status & SAHARA_STATUS_ACTIVE_SKHA)
350 dev_dbg(dev->device, " - SKHA Active.\n");
351
352 if (status & SAHARA_STATUS_MODE_BATCH)
353 dev_dbg(dev->device, " - Batch Mode.\n");
354 else if (status & SAHARA_STATUS_MODE_DEDICATED)
355 dev_dbg(dev->device, " - Decidated Mode.\n");
356 else if (status & SAHARA_STATUS_MODE_DEBUG)
357 dev_dbg(dev->device, " - Debug Mode.\n");
358
359 dev_dbg(dev->device, " - Internal state = 0x%02x\n",
360 SAHARA_STATUS_GET_ISTATE(status));
361
362 dev_dbg(dev->device, "Current DAR: 0x%08x\n",
363 sahara_read(dev, SAHARA_REG_CDAR));
364 dev_dbg(dev->device, "Initial DAR: 0x%08x\n\n",
365 sahara_read(dev, SAHARA_REG_IDAR));
366 }
367
368 static void sahara_dump_descriptors(struct sahara_dev *dev)
369 {
370 int i;
371
372 if (!IS_ENABLED(DEBUG))
373 return;
374
375 for (i = 0; i < SAHARA_MAX_HW_DESC; i++) {
376 dev_dbg(dev->device, "Descriptor (%d) (0x%08x):\n",
377 i, dev->hw_phys_desc[i]);
378 dev_dbg(dev->device, "\thdr = 0x%08x\n", dev->hw_desc[i]->hdr);
379 dev_dbg(dev->device, "\tlen1 = %u\n", dev->hw_desc[i]->len1);
380 dev_dbg(dev->device, "\tp1 = 0x%08x\n", dev->hw_desc[i]->p1);
381 dev_dbg(dev->device, "\tlen2 = %u\n", dev->hw_desc[i]->len2);
382 dev_dbg(dev->device, "\tp2 = 0x%08x\n", dev->hw_desc[i]->p2);
383 dev_dbg(dev->device, "\tnext = 0x%08x\n",
384 dev->hw_desc[i]->next);
385 }
386 dev_dbg(dev->device, "\n");
387 }
388
389 static void sahara_dump_links(struct sahara_dev *dev)
390 {
391 int i;
392
393 if (!IS_ENABLED(DEBUG))
394 return;
395
396 for (i = 0; i < SAHARA_MAX_HW_LINK; i++) {
397 dev_dbg(dev->device, "Link (%d) (0x%08x):\n",
398 i, dev->hw_phys_link[i]);
399 dev_dbg(dev->device, "\tlen = %u\n", dev->hw_link[i]->len);
400 dev_dbg(dev->device, "\tp = 0x%08x\n", dev->hw_link[i]->p);
401 dev_dbg(dev->device, "\tnext = 0x%08x\n",
402 dev->hw_link[i]->next);
403 }
404 dev_dbg(dev->device, "\n");
405 }
406
407 static void sahara_aes_done_task(unsigned long data)
408 {
409 struct sahara_dev *dev = (struct sahara_dev *)data;
410
411 dma_unmap_sg(dev->device, dev->out_sg, dev->nb_out_sg,
412 DMA_TO_DEVICE);
413 dma_unmap_sg(dev->device, dev->in_sg, dev->nb_in_sg,
414 DMA_FROM_DEVICE);
415
416 spin_lock(&dev->lock);
417 clear_bit(FLAGS_BUSY, &dev->flags);
418 spin_unlock(&dev->lock);
419
420 dev->req->base.complete(&dev->req->base, dev->error);
421 }
422
423 static void sahara_watchdog(unsigned long data)
424 {
425 struct sahara_dev *dev = (struct sahara_dev *)data;
426 unsigned int err = sahara_read(dev, SAHARA_REG_ERRSTATUS);
427 unsigned int stat = sahara_read(dev, SAHARA_REG_STATUS);
428
429 sahara_decode_status(dev, stat);
430 sahara_decode_error(dev, err);
431 dev->error = -ETIMEDOUT;
432 sahara_aes_done_task(data);
433 }
434
435 static int sahara_hw_descriptor_create(struct sahara_dev *dev)
436 {
437 struct sahara_ctx *ctx = dev->ctx;
438 struct scatterlist *sg;
439 int ret;
440 int i, j;
441
442 /* Copy new key if necessary */
443 if (ctx->flags & FLAGS_NEW_KEY) {
444 memcpy(dev->key_base, ctx->key, ctx->keylen);
445 ctx->flags &= ~FLAGS_NEW_KEY;
446
447 if (dev->flags & FLAGS_CBC) {
448 dev->hw_desc[0]->len1 = AES_BLOCK_SIZE;
449 dev->hw_desc[0]->p1 = dev->iv_phys_base;
450 } else {
451 dev->hw_desc[0]->len1 = 0;
452 dev->hw_desc[0]->p1 = 0;
453 }
454 dev->hw_desc[0]->len2 = ctx->keylen;
455 dev->hw_desc[0]->p2 = dev->key_phys_base;
456 dev->hw_desc[0]->next = dev->hw_phys_desc[1];
457 }
458 dev->hw_desc[0]->hdr = sahara_aes_key_hdr(dev);
459
460 dev->nb_in_sg = sahara_sg_length(dev->in_sg, dev->total);
461 dev->nb_out_sg = sahara_sg_length(dev->out_sg, dev->total);
462 if ((dev->nb_in_sg + dev->nb_out_sg) > SAHARA_MAX_HW_LINK) {
463 dev_err(dev->device, "not enough hw links (%d)\n",
464 dev->nb_in_sg + dev->nb_out_sg);
465 return -EINVAL;
466 }
467
468 ret = dma_map_sg(dev->device, dev->in_sg, dev->nb_in_sg,
469 DMA_TO_DEVICE);
470 if (ret != dev->nb_in_sg) {
471 dev_err(dev->device, "couldn't map in sg\n");
472 goto unmap_in;
473 }
474 ret = dma_map_sg(dev->device, dev->out_sg, dev->nb_out_sg,
475 DMA_FROM_DEVICE);
476 if (ret != dev->nb_out_sg) {
477 dev_err(dev->device, "couldn't map out sg\n");
478 goto unmap_out;
479 }
480
481 /* Create input links */
482 dev->hw_desc[1]->p1 = dev->hw_phys_link[0];
483 sg = dev->in_sg;
484 for (i = 0; i < dev->nb_in_sg; i++) {
485 dev->hw_link[i]->len = sg->length;
486 dev->hw_link[i]->p = sg->dma_address;
487 if (i == (dev->nb_in_sg - 1)) {
488 dev->hw_link[i]->next = 0;
489 } else {
490 dev->hw_link[i]->next = dev->hw_phys_link[i + 1];
491 sg = sg_next(sg);
492 }
493 }
494
495 /* Create output links */
496 dev->hw_desc[1]->p2 = dev->hw_phys_link[i];
497 sg = dev->out_sg;
498 for (j = i; j < dev->nb_out_sg + i; j++) {
499 dev->hw_link[j]->len = sg->length;
500 dev->hw_link[j]->p = sg->dma_address;
501 if (j == (dev->nb_out_sg + i - 1)) {
502 dev->hw_link[j]->next = 0;
503 } else {
504 dev->hw_link[j]->next = dev->hw_phys_link[j + 1];
505 sg = sg_next(sg);
506 }
507 }
508
509 /* Fill remaining fields of hw_desc[1] */
510 dev->hw_desc[1]->hdr = sahara_aes_data_link_hdr(dev);
511 dev->hw_desc[1]->len1 = dev->total;
512 dev->hw_desc[1]->len2 = dev->total;
513 dev->hw_desc[1]->next = 0;
514
515 sahara_dump_descriptors(dev);
516 sahara_dump_links(dev);
517
518 /* Start processing descriptor chain. */
519 mod_timer(&dev->watchdog,
520 jiffies + msecs_to_jiffies(SAHARA_TIMEOUT_MS));
521 sahara_write(dev, dev->hw_phys_desc[0], SAHARA_REG_DAR);
522
523 return 0;
524
525 unmap_out:
526 dma_unmap_sg(dev->device, dev->out_sg, dev->nb_out_sg,
527 DMA_TO_DEVICE);
528 unmap_in:
529 dma_unmap_sg(dev->device, dev->in_sg, dev->nb_in_sg,
530 DMA_FROM_DEVICE);
531
532 return -EINVAL;
533 }
534
535 static void sahara_aes_queue_task(unsigned long data)
536 {
537 struct sahara_dev *dev = (struct sahara_dev *)data;
538 struct crypto_async_request *async_req, *backlog;
539 struct sahara_ctx *ctx;
540 struct sahara_aes_reqctx *rctx;
541 struct ablkcipher_request *req;
542 int ret;
543
544 spin_lock(&dev->lock);
545 backlog = crypto_get_backlog(&dev->queue);
546 async_req = crypto_dequeue_request(&dev->queue);
547 if (!async_req)
548 clear_bit(FLAGS_BUSY, &dev->flags);
549 spin_unlock(&dev->lock);
550
551 if (!async_req)
552 return;
553
554 if (backlog)
555 backlog->complete(backlog, -EINPROGRESS);
556
557 req = ablkcipher_request_cast(async_req);
558
559 /* Request is ready to be dispatched by the device */
560 dev_dbg(dev->device,
561 "dispatch request (nbytes=%d, src=%p, dst=%p)\n",
562 req->nbytes, req->src, req->dst);
563
564 /* assign new request to device */
565 dev->req = req;
566 dev->total = req->nbytes;
567 dev->in_sg = req->src;
568 dev->out_sg = req->dst;
569
570 rctx = ablkcipher_request_ctx(req);
571 ctx = crypto_ablkcipher_ctx(crypto_ablkcipher_reqtfm(req));
572 rctx->mode &= FLAGS_MODE_MASK;
573 dev->flags = (dev->flags & ~FLAGS_MODE_MASK) | rctx->mode;
574
575 if ((dev->flags & FLAGS_CBC) && req->info)
576 memcpy(dev->iv_base, req->info, AES_KEYSIZE_128);
577
578 /* assign new context to device */
579 ctx->dev = dev;
580 dev->ctx = ctx;
581
582 ret = sahara_hw_descriptor_create(dev);
583 if (ret < 0) {
584 spin_lock(&dev->lock);
585 clear_bit(FLAGS_BUSY, &dev->flags);
586 spin_unlock(&dev->lock);
587 dev->req->base.complete(&dev->req->base, ret);
588 }
589 }
590
591 static int sahara_aes_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
592 unsigned int keylen)
593 {
594 struct sahara_ctx *ctx = crypto_ablkcipher_ctx(tfm);
595 int ret;
596
597 ctx->keylen = keylen;
598
599 /* SAHARA only supports 128bit keys */
600 if (keylen == AES_KEYSIZE_128) {
601 memcpy(ctx->key, key, keylen);
602 ctx->flags |= FLAGS_NEW_KEY;
603 return 0;
604 }
605
606 if (keylen != AES_KEYSIZE_128 &&
607 keylen != AES_KEYSIZE_192 && keylen != AES_KEYSIZE_256)
608 return -EINVAL;
609
610 /*
611 * The requested key size is not supported by HW, do a fallback.
612 */
613 ctx->fallback->base.crt_flags &= ~CRYPTO_TFM_REQ_MASK;
614 ctx->fallback->base.crt_flags |=
615 (tfm->base.crt_flags & CRYPTO_TFM_REQ_MASK);
616
617 ret = crypto_ablkcipher_setkey(ctx->fallback, key, keylen);
618 if (ret) {
619 struct crypto_tfm *tfm_aux = crypto_ablkcipher_tfm(tfm);
620
621 tfm_aux->crt_flags &= ~CRYPTO_TFM_RES_MASK;
622 tfm_aux->crt_flags |=
623 (ctx->fallback->base.crt_flags & CRYPTO_TFM_RES_MASK);
624 }
625 return ret;
626 }
627
628 static int sahara_aes_crypt(struct ablkcipher_request *req, unsigned long mode)
629 {
630 struct sahara_ctx *ctx = crypto_ablkcipher_ctx(
631 crypto_ablkcipher_reqtfm(req));
632 struct sahara_aes_reqctx *rctx = ablkcipher_request_ctx(req);
633 struct sahara_dev *dev = dev_ptr;
634 int err = 0;
635 int busy;
636
637 dev_dbg(dev->device, "nbytes: %d, enc: %d, cbc: %d\n",
638 req->nbytes, !!(mode & FLAGS_ENCRYPT), !!(mode & FLAGS_CBC));
639
640 if (!IS_ALIGNED(req->nbytes, AES_BLOCK_SIZE)) {
641 dev_err(dev->device,
642 "request size is not exact amount of AES blocks\n");
643 return -EINVAL;
644 }
645
646 ctx->dev = dev;
647
648 rctx->mode = mode;
649 spin_lock_bh(&dev->lock);
650 err = ablkcipher_enqueue_request(&dev->queue, req);
651 busy = test_and_set_bit(FLAGS_BUSY, &dev->flags);
652 spin_unlock_bh(&dev->lock);
653
654 if (!busy)
655 tasklet_schedule(&dev->queue_task);
656
657 return err;
658 }
659
660 static int sahara_aes_ecb_encrypt(struct ablkcipher_request *req)
661 {
662 struct crypto_tfm *tfm =
663 crypto_ablkcipher_tfm(crypto_ablkcipher_reqtfm(req));
664 struct sahara_ctx *ctx = crypto_ablkcipher_ctx(
665 crypto_ablkcipher_reqtfm(req));
666 int err;
667
668 if (unlikely(ctx->keylen != AES_KEYSIZE_128)) {
669 ablkcipher_request_set_tfm(req, ctx->fallback);
670 err = crypto_ablkcipher_encrypt(req);
671 ablkcipher_request_set_tfm(req, __crypto_ablkcipher_cast(tfm));
672 return err;
673 }
674
675 return sahara_aes_crypt(req, FLAGS_ENCRYPT);
676 }
677
678 static int sahara_aes_ecb_decrypt(struct ablkcipher_request *req)
679 {
680 struct crypto_tfm *tfm =
681 crypto_ablkcipher_tfm(crypto_ablkcipher_reqtfm(req));
682 struct sahara_ctx *ctx = crypto_ablkcipher_ctx(
683 crypto_ablkcipher_reqtfm(req));
684 int err;
685
686 if (unlikely(ctx->keylen != AES_KEYSIZE_128)) {
687 ablkcipher_request_set_tfm(req, ctx->fallback);
688 err = crypto_ablkcipher_decrypt(req);
689 ablkcipher_request_set_tfm(req, __crypto_ablkcipher_cast(tfm));
690 return err;
691 }
692
693 return sahara_aes_crypt(req, 0);
694 }
695
696 static int sahara_aes_cbc_encrypt(struct ablkcipher_request *req)
697 {
698 struct crypto_tfm *tfm =
699 crypto_ablkcipher_tfm(crypto_ablkcipher_reqtfm(req));
700 struct sahara_ctx *ctx = crypto_ablkcipher_ctx(
701 crypto_ablkcipher_reqtfm(req));
702 int err;
703
704 if (unlikely(ctx->keylen != AES_KEYSIZE_128)) {
705 ablkcipher_request_set_tfm(req, ctx->fallback);
706 err = crypto_ablkcipher_encrypt(req);
707 ablkcipher_request_set_tfm(req, __crypto_ablkcipher_cast(tfm));
708 return err;
709 }
710
711 return sahara_aes_crypt(req, FLAGS_ENCRYPT | FLAGS_CBC);
712 }
713
714 static int sahara_aes_cbc_decrypt(struct ablkcipher_request *req)
715 {
716 struct crypto_tfm *tfm =
717 crypto_ablkcipher_tfm(crypto_ablkcipher_reqtfm(req));
718 struct sahara_ctx *ctx = crypto_ablkcipher_ctx(
719 crypto_ablkcipher_reqtfm(req));
720 int err;
721
722 if (unlikely(ctx->keylen != AES_KEYSIZE_128)) {
723 ablkcipher_request_set_tfm(req, ctx->fallback);
724 err = crypto_ablkcipher_decrypt(req);
725 ablkcipher_request_set_tfm(req, __crypto_ablkcipher_cast(tfm));
726 return err;
727 }
728
729 return sahara_aes_crypt(req, FLAGS_CBC);
730 }
731
732 static int sahara_aes_cra_init(struct crypto_tfm *tfm)
733 {
734 const char *name = crypto_tfm_alg_name(tfm);
735 struct sahara_ctx *ctx = crypto_tfm_ctx(tfm);
736
737 ctx->fallback = crypto_alloc_ablkcipher(name, 0,
738 CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK);
739 if (IS_ERR(ctx->fallback)) {
740 pr_err("Error allocating fallback algo %s\n", name);
741 return PTR_ERR(ctx->fallback);
742 }
743
744 tfm->crt_ablkcipher.reqsize = sizeof(struct sahara_aes_reqctx);
745
746 return 0;
747 }
748
749 static void sahara_aes_cra_exit(struct crypto_tfm *tfm)
750 {
751 struct sahara_ctx *ctx = crypto_tfm_ctx(tfm);
752
753 if (ctx->fallback)
754 crypto_free_ablkcipher(ctx->fallback);
755 ctx->fallback = NULL;
756 }
757
758 static struct crypto_alg aes_algs[] = {
759 {
760 .cra_name = "ecb(aes)",
761 .cra_driver_name = "sahara-ecb-aes",
762 .cra_priority = 300,
763 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
764 CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
765 .cra_blocksize = AES_BLOCK_SIZE,
766 .cra_ctxsize = sizeof(struct sahara_ctx),
767 .cra_alignmask = 0x0,
768 .cra_type = &crypto_ablkcipher_type,
769 .cra_module = THIS_MODULE,
770 .cra_init = sahara_aes_cra_init,
771 .cra_exit = sahara_aes_cra_exit,
772 .cra_u.ablkcipher = {
773 .min_keysize = AES_MIN_KEY_SIZE ,
774 .max_keysize = AES_MAX_KEY_SIZE,
775 .setkey = sahara_aes_setkey,
776 .encrypt = sahara_aes_ecb_encrypt,
777 .decrypt = sahara_aes_ecb_decrypt,
778 }
779 }, {
780 .cra_name = "cbc(aes)",
781 .cra_driver_name = "sahara-cbc-aes",
782 .cra_priority = 300,
783 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
784 CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
785 .cra_blocksize = AES_BLOCK_SIZE,
786 .cra_ctxsize = sizeof(struct sahara_ctx),
787 .cra_alignmask = 0x0,
788 .cra_type = &crypto_ablkcipher_type,
789 .cra_module = THIS_MODULE,
790 .cra_init = sahara_aes_cra_init,
791 .cra_exit = sahara_aes_cra_exit,
792 .cra_u.ablkcipher = {
793 .min_keysize = AES_MIN_KEY_SIZE ,
794 .max_keysize = AES_MAX_KEY_SIZE,
795 .ivsize = AES_BLOCK_SIZE,
796 .setkey = sahara_aes_setkey,
797 .encrypt = sahara_aes_cbc_encrypt,
798 .decrypt = sahara_aes_cbc_decrypt,
799 }
800 }
801 };
802
803 static irqreturn_t sahara_irq_handler(int irq, void *data)
804 {
805 struct sahara_dev *dev = (struct sahara_dev *)data;
806 unsigned int stat = sahara_read(dev, SAHARA_REG_STATUS);
807 unsigned int err = sahara_read(dev, SAHARA_REG_ERRSTATUS);
808
809 del_timer(&dev->watchdog);
810
811 sahara_write(dev, SAHARA_CMD_CLEAR_INT | SAHARA_CMD_CLEAR_ERR,
812 SAHARA_REG_CMD);
813
814 sahara_decode_status(dev, stat);
815
816 if (SAHARA_STATUS_GET_STATE(stat) == SAHARA_STATE_BUSY) {
817 return IRQ_NONE;
818 } else if (SAHARA_STATUS_GET_STATE(stat) == SAHARA_STATE_COMPLETE) {
819 dev->error = 0;
820 } else {
821 sahara_decode_error(dev, err);
822 dev->error = -EINVAL;
823 }
824
825 tasklet_schedule(&dev->done_task);
826
827 return IRQ_HANDLED;
828 }
829
830
831 static int sahara_register_algs(struct sahara_dev *dev)
832 {
833 int err, i, j;
834
835 for (i = 0; i < ARRAY_SIZE(aes_algs); i++) {
836 INIT_LIST_HEAD(&aes_algs[i].cra_list);
837 err = crypto_register_alg(&aes_algs[i]);
838 if (err)
839 goto err_aes_algs;
840 }
841
842 return 0;
843
844 err_aes_algs:
845 for (j = 0; j < i; j++)
846 crypto_unregister_alg(&aes_algs[j]);
847
848 return err;
849 }
850
851 static void sahara_unregister_algs(struct sahara_dev *dev)
852 {
853 int i;
854
855 for (i = 0; i < ARRAY_SIZE(aes_algs); i++)
856 crypto_unregister_alg(&aes_algs[i]);
857 }
858
859 static struct platform_device_id sahara_platform_ids[] = {
860 { .name = "sahara-imx27" },
861 { /* sentinel */ }
862 };
863 MODULE_DEVICE_TABLE(platform, sahara_platform_ids);
864
865 static struct of_device_id sahara_dt_ids[] = {
866 { .compatible = "fsl,imx53-sahara" },
867 { .compatible = "fsl,imx27-sahara" },
868 { /* sentinel */ }
869 };
870 MODULE_DEVICE_TABLE(of, sahara_dt_ids);
871
872 static int sahara_probe(struct platform_device *pdev)
873 {
874 struct sahara_dev *dev;
875 struct resource *res;
876 u32 version;
877 int irq;
878 int err;
879 int i;
880
881 dev = devm_kzalloc(&pdev->dev, sizeof(struct sahara_dev), GFP_KERNEL);
882 if (dev == NULL) {
883 dev_err(&pdev->dev, "unable to alloc data struct.\n");
884 return -ENOMEM;
885 }
886
887 dev->device = &pdev->dev;
888 platform_set_drvdata(pdev, dev);
889
890 /* Get the base address */
891 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
892 dev->regs_base = devm_ioremap_resource(&pdev->dev, res);
893 if (IS_ERR(dev->regs_base))
894 return PTR_ERR(dev->regs_base);
895
896 /* Get the IRQ */
897 irq = platform_get_irq(pdev, 0);
898 if (irq < 0) {
899 dev_err(&pdev->dev, "failed to get irq resource\n");
900 return irq;
901 }
902
903 err = devm_request_irq(&pdev->dev, irq, sahara_irq_handler,
904 0, dev_name(&pdev->dev), dev);
905 if (err) {
906 dev_err(&pdev->dev, "failed to request irq\n");
907 return err;
908 }
909
910 /* clocks */
911 dev->clk_ipg = devm_clk_get(&pdev->dev, "ipg");
912 if (IS_ERR(dev->clk_ipg)) {
913 dev_err(&pdev->dev, "Could not get ipg clock\n");
914 return PTR_ERR(dev->clk_ipg);
915 }
916
917 dev->clk_ahb = devm_clk_get(&pdev->dev, "ahb");
918 if (IS_ERR(dev->clk_ahb)) {
919 dev_err(&pdev->dev, "Could not get ahb clock\n");
920 return PTR_ERR(dev->clk_ahb);
921 }
922
923 /* Allocate HW descriptors */
924 dev->hw_desc[0] = dma_alloc_coherent(&pdev->dev,
925 SAHARA_MAX_HW_DESC * sizeof(struct sahara_hw_desc),
926 &dev->hw_phys_desc[0], GFP_KERNEL);
927 if (!dev->hw_desc[0]) {
928 dev_err(&pdev->dev, "Could not allocate hw descriptors\n");
929 return -ENOMEM;
930 }
931 dev->hw_desc[1] = dev->hw_desc[0] + 1;
932 dev->hw_phys_desc[1] = dev->hw_phys_desc[0] +
933 sizeof(struct sahara_hw_desc);
934
935 /* Allocate space for iv and key */
936 dev->key_base = dma_alloc_coherent(&pdev->dev, 2 * AES_KEYSIZE_128,
937 &dev->key_phys_base, GFP_KERNEL);
938 if (!dev->key_base) {
939 dev_err(&pdev->dev, "Could not allocate memory for key\n");
940 err = -ENOMEM;
941 goto err_key;
942 }
943 dev->iv_base = dev->key_base + AES_KEYSIZE_128;
944 dev->iv_phys_base = dev->key_phys_base + AES_KEYSIZE_128;
945
946 /* Allocate space for HW links */
947 dev->hw_link[0] = dma_alloc_coherent(&pdev->dev,
948 SAHARA_MAX_HW_LINK * sizeof(struct sahara_hw_link),
949 &dev->hw_phys_link[0], GFP_KERNEL);
950 if (!dev->hw_link[0]) {
951 dev_err(&pdev->dev, "Could not allocate hw links\n");
952 err = -ENOMEM;
953 goto err_link;
954 }
955 for (i = 1; i < SAHARA_MAX_HW_LINK; i++) {
956 dev->hw_phys_link[i] = dev->hw_phys_link[i - 1] +
957 sizeof(struct sahara_hw_link);
958 dev->hw_link[i] = dev->hw_link[i - 1] + 1;
959 }
960
961 crypto_init_queue(&dev->queue, SAHARA_QUEUE_LENGTH);
962
963 spin_lock_init(&dev->lock);
964
965 dev_ptr = dev;
966
967 tasklet_init(&dev->queue_task, sahara_aes_queue_task,
968 (unsigned long)dev);
969 tasklet_init(&dev->done_task, sahara_aes_done_task,
970 (unsigned long)dev);
971
972 init_timer(&dev->watchdog);
973 dev->watchdog.function = &sahara_watchdog;
974 dev->watchdog.data = (unsigned long)dev;
975
976 clk_prepare_enable(dev->clk_ipg);
977 clk_prepare_enable(dev->clk_ahb);
978
979 version = sahara_read(dev, SAHARA_REG_VERSION);
980 if (of_device_is_compatible(pdev->dev.of_node, "fsl,imx27-sahara")) {
981 if (version != SAHARA_VERSION_3)
982 err = -ENODEV;
983 } else if (of_device_is_compatible(pdev->dev.of_node,
984 "fsl,imx53-sahara")) {
985 if (((version >> 8) & 0xff) != SAHARA_VERSION_4)
986 err = -ENODEV;
987 version = (version >> 8) & 0xff;
988 }
989 if (err == -ENODEV) {
990 dev_err(&pdev->dev, "SAHARA version %d not supported\n",
991 version);
992 goto err_algs;
993 }
994
995 dev->version = version;
996
997 sahara_write(dev, SAHARA_CMD_RESET | SAHARA_CMD_MODE_BATCH,
998 SAHARA_REG_CMD);
999 sahara_write(dev, SAHARA_CONTROL_SET_THROTTLE(0) |
1000 SAHARA_CONTROL_SET_MAXBURST(8) |
1001 SAHARA_CONTROL_RNG_AUTORSD |
1002 SAHARA_CONTROL_ENABLE_INT,
1003 SAHARA_REG_CONTROL);
1004
1005 err = sahara_register_algs(dev);
1006 if (err)
1007 goto err_algs;
1008
1009 dev_info(&pdev->dev, "SAHARA version %d initialized\n", version);
1010
1011 return 0;
1012
1013 err_algs:
1014 dma_free_coherent(&pdev->dev,
1015 SAHARA_MAX_HW_LINK * sizeof(struct sahara_hw_link),
1016 dev->hw_link[0], dev->hw_phys_link[0]);
1017 clk_disable_unprepare(dev->clk_ipg);
1018 clk_disable_unprepare(dev->clk_ahb);
1019 dev_ptr = NULL;
1020 err_link:
1021 dma_free_coherent(&pdev->dev,
1022 2 * AES_KEYSIZE_128,
1023 dev->key_base, dev->key_phys_base);
1024 err_key:
1025 dma_free_coherent(&pdev->dev,
1026 SAHARA_MAX_HW_DESC * sizeof(struct sahara_hw_desc),
1027 dev->hw_desc[0], dev->hw_phys_desc[0]);
1028
1029 return err;
1030 }
1031
1032 static int sahara_remove(struct platform_device *pdev)
1033 {
1034 struct sahara_dev *dev = platform_get_drvdata(pdev);
1035
1036 dma_free_coherent(&pdev->dev,
1037 SAHARA_MAX_HW_LINK * sizeof(struct sahara_hw_link),
1038 dev->hw_link[0], dev->hw_phys_link[0]);
1039 dma_free_coherent(&pdev->dev,
1040 2 * AES_KEYSIZE_128,
1041 dev->key_base, dev->key_phys_base);
1042 dma_free_coherent(&pdev->dev,
1043 SAHARA_MAX_HW_DESC * sizeof(struct sahara_hw_desc),
1044 dev->hw_desc[0], dev->hw_phys_desc[0]);
1045
1046 tasklet_kill(&dev->done_task);
1047 tasklet_kill(&dev->queue_task);
1048
1049 sahara_unregister_algs(dev);
1050
1051 clk_disable_unprepare(dev->clk_ipg);
1052 clk_disable_unprepare(dev->clk_ahb);
1053
1054 dev_ptr = NULL;
1055
1056 return 0;
1057 }
1058
1059 static struct platform_driver sahara_driver = {
1060 .probe = sahara_probe,
1061 .remove = sahara_remove,
1062 .driver = {
1063 .name = SAHARA_NAME,
1064 .owner = THIS_MODULE,
1065 .of_match_table = sahara_dt_ids,
1066 },
1067 .id_table = sahara_platform_ids,
1068 };
1069
1070 module_platform_driver(sahara_driver);
1071
1072 MODULE_LICENSE("GPL");
1073 MODULE_AUTHOR("Javier Martin <javier.martin@vista-silicon.com>");
1074 MODULE_DESCRIPTION("SAHARA2 HW crypto accelerator");
This page took 0.087028 seconds and 4 git commands to generate.