crypto: omap-aes - Convert to dma_request_slave_channel_compat()
[deliverable/linux.git] / drivers / crypto / omap-aes.c
CommitLineData
537559a5
DK
1/*
2 * Cryptographic API.
3 *
4 * Support for OMAP AES HW acceleration.
5 *
6 * Copyright (c) 2010 Nokia Corporation
7 * Author: Dmitry Kasatkin <dmitry.kasatkin@nokia.com>
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as published
11 * by the Free Software Foundation.
12 *
13 */
14
15#define pr_fmt(fmt) "%s: " fmt, __func__
16
17#include <linux/err.h>
18#include <linux/module.h>
19#include <linux/init.h>
20#include <linux/errno.h>
21#include <linux/kernel.h>
537559a5
DK
22#include <linux/platform_device.h>
23#include <linux/scatterlist.h>
24#include <linux/dma-mapping.h>
ebedbf79
MG
25#include <linux/dmaengine.h>
26#include <linux/omap-dma.h>
5946c4a5 27#include <linux/pm_runtime.h>
bc69d124
MG
28#include <linux/of.h>
29#include <linux/of_device.h>
30#include <linux/of_address.h>
537559a5
DK
31#include <linux/io.h>
32#include <linux/crypto.h>
33#include <linux/interrupt.h>
34#include <crypto/scatterwalk.h>
35#include <crypto/aes.h>
36
ebedbf79
MG
37#define DST_MAXBURST 4
38#define DMA_MIN (DST_MAXBURST * sizeof(u32))
537559a5
DK
39
40/* OMAP TRM gives bitfields as start:end, where start is the higher bit
41 number. For example 7:0 */
42#define FLD_MASK(start, end) (((1 << ((start) - (end) + 1)) - 1) << (end))
43#define FLD_VAL(val, start, end) (((val) << (end)) & FLD_MASK(start, end))
44
45#define AES_REG_KEY(x) (0x1C - ((x ^ 0x01) * 0x04))
46#define AES_REG_IV(x) (0x20 + ((x) * 0x04))
47
48#define AES_REG_CTRL 0x30
49#define AES_REG_CTRL_CTR_WIDTH (1 << 7)
50#define AES_REG_CTRL_CTR (1 << 6)
51#define AES_REG_CTRL_CBC (1 << 5)
52#define AES_REG_CTRL_KEY_SIZE (3 << 3)
53#define AES_REG_CTRL_DIRECTION (1 << 2)
54#define AES_REG_CTRL_INPUT_READY (1 << 1)
55#define AES_REG_CTRL_OUTPUT_READY (1 << 0)
56
57#define AES_REG_DATA 0x34
58#define AES_REG_DATA_N(x) (0x34 + ((x) * 0x04))
59
60#define AES_REG_REV 0x44
61#define AES_REG_REV_MAJOR 0xF0
62#define AES_REG_REV_MINOR 0x0F
63
64#define AES_REG_MASK 0x48
65#define AES_REG_MASK_SIDLE (1 << 6)
66#define AES_REG_MASK_START (1 << 5)
67#define AES_REG_MASK_DMA_OUT_EN (1 << 3)
68#define AES_REG_MASK_DMA_IN_EN (1 << 2)
69#define AES_REG_MASK_SOFTRESET (1 << 1)
70#define AES_REG_AUTOIDLE (1 << 0)
71
72#define AES_REG_SYSSTATUS 0x4C
73#define AES_REG_SYSSTATUS_RESETDONE (1 << 0)
74
75#define DEFAULT_TIMEOUT (5*HZ)
76
77#define FLAGS_MODE_MASK 0x000f
78#define FLAGS_ENCRYPT BIT(0)
79#define FLAGS_CBC BIT(1)
80#define FLAGS_GIV BIT(2)
81
67a730ce
DK
82#define FLAGS_INIT BIT(4)
83#define FLAGS_FAST BIT(5)
84#define FLAGS_BUSY BIT(6)
537559a5
DK
85
86struct omap_aes_ctx {
87 struct omap_aes_dev *dd;
88
89 int keylen;
90 u32 key[AES_KEYSIZE_256 / sizeof(u32)];
91 unsigned long flags;
92};
93
94struct omap_aes_reqctx {
95 unsigned long mode;
96};
97
98#define OMAP_AES_QUEUE_LENGTH 1
99#define OMAP_AES_CACHE_SIZE 0
100
101struct omap_aes_dev {
102 struct list_head list;
103 unsigned long phys_base;
efce41b6 104 void __iomem *io_base;
537559a5
DK
105 struct omap_aes_ctx *ctx;
106 struct device *dev;
107 unsigned long flags;
21fe9767 108 int err;
537559a5 109
21fe9767
DK
110 spinlock_t lock;
111 struct crypto_queue queue;
537559a5 112
21fe9767
DK
113 struct tasklet_struct done_task;
114 struct tasklet_struct queue_task;
537559a5
DK
115
116 struct ablkcipher_request *req;
117 size_t total;
118 struct scatterlist *in_sg;
ebedbf79 119 struct scatterlist in_sgl;
537559a5
DK
120 size_t in_offset;
121 struct scatterlist *out_sg;
ebedbf79 122 struct scatterlist out_sgl;
537559a5
DK
123 size_t out_offset;
124
125 size_t buflen;
126 void *buf_in;
127 size_t dma_size;
128 int dma_in;
ebedbf79 129 struct dma_chan *dma_lch_in;
537559a5
DK
130 dma_addr_t dma_addr_in;
131 void *buf_out;
132 int dma_out;
ebedbf79 133 struct dma_chan *dma_lch_out;
537559a5
DK
134 dma_addr_t dma_addr_out;
135};
136
137/* keep registered devices data here */
138static LIST_HEAD(dev_list);
139static DEFINE_SPINLOCK(list_lock);
140
141static inline u32 omap_aes_read(struct omap_aes_dev *dd, u32 offset)
142{
143 return __raw_readl(dd->io_base + offset);
144}
145
146static inline void omap_aes_write(struct omap_aes_dev *dd, u32 offset,
147 u32 value)
148{
149 __raw_writel(value, dd->io_base + offset);
150}
151
152static inline void omap_aes_write_mask(struct omap_aes_dev *dd, u32 offset,
153 u32 value, u32 mask)
154{
155 u32 val;
156
157 val = omap_aes_read(dd, offset);
158 val &= ~mask;
159 val |= value;
160 omap_aes_write(dd, offset, val);
161}
162
163static void omap_aes_write_n(struct omap_aes_dev *dd, u32 offset,
164 u32 *value, int count)
165{
166 for (; count--; value++, offset += 4)
167 omap_aes_write(dd, offset, *value);
168}
169
537559a5
DK
170static int omap_aes_hw_init(struct omap_aes_dev *dd)
171{
83ea7e0f
DK
172 /*
173 * clocks are enabled when request starts and disabled when finished.
174 * It may be long delays between requests.
175 * Device might go to off mode to save power.
176 */
5946c4a5 177 pm_runtime_get_sync(dd->dev);
eeb2b202 178
537559a5 179 if (!(dd->flags & FLAGS_INIT)) {
eeb2b202 180 dd->flags |= FLAGS_INIT;
21fe9767 181 dd->err = 0;
537559a5
DK
182 }
183
eeb2b202 184 return 0;
537559a5
DK
185}
186
21fe9767 187static int omap_aes_write_ctrl(struct omap_aes_dev *dd)
537559a5
DK
188{
189 unsigned int key32;
67a730ce 190 int i, err;
537559a5
DK
191 u32 val, mask;
192
21fe9767
DK
193 err = omap_aes_hw_init(dd);
194 if (err)
195 return err;
196
537559a5 197 val = 0;
ebedbf79
MG
198 if (dd->dma_lch_out != NULL)
199 val |= AES_REG_MASK_DMA_OUT_EN;
200 if (dd->dma_lch_in != NULL)
201 val |= AES_REG_MASK_DMA_IN_EN;
537559a5
DK
202
203 mask = AES_REG_MASK_DMA_IN_EN | AES_REG_MASK_DMA_OUT_EN;
204
205 omap_aes_write_mask(dd, AES_REG_MASK, val, mask);
206
537559a5 207 key32 = dd->ctx->keylen / sizeof(u32);
67a730ce
DK
208
209 /* it seems a key should always be set even if it has not changed */
537559a5
DK
210 for (i = 0; i < key32; i++) {
211 omap_aes_write(dd, AES_REG_KEY(i),
212 __le32_to_cpu(dd->ctx->key[i]));
213 }
537559a5 214
67a730ce
DK
215 if ((dd->flags & FLAGS_CBC) && dd->req->info)
216 omap_aes_write_n(dd, AES_REG_IV(0), dd->req->info, 4);
217
218 val = FLD_VAL(((dd->ctx->keylen >> 3) - 1), 4, 3);
219 if (dd->flags & FLAGS_CBC)
220 val |= AES_REG_CTRL_CBC;
221 if (dd->flags & FLAGS_ENCRYPT)
222 val |= AES_REG_CTRL_DIRECTION;
537559a5
DK
223
224 mask = AES_REG_CTRL_CBC | AES_REG_CTRL_DIRECTION |
225 AES_REG_CTRL_KEY_SIZE;
226
67a730ce 227 omap_aes_write_mask(dd, AES_REG_CTRL, val, mask);
537559a5 228
21fe9767 229 return 0;
537559a5
DK
230}
231
232static struct omap_aes_dev *omap_aes_find_dev(struct omap_aes_ctx *ctx)
233{
234 struct omap_aes_dev *dd = NULL, *tmp;
235
236 spin_lock_bh(&list_lock);
237 if (!ctx->dd) {
238 list_for_each_entry(tmp, &dev_list, list) {
239 /* FIXME: take fist available aes core */
240 dd = tmp;
241 break;
242 }
243 ctx->dd = dd;
244 } else {
245 /* already found before */
246 dd = ctx->dd;
247 }
248 spin_unlock_bh(&list_lock);
249
250 return dd;
251}
252
ebedbf79
MG
253static void omap_aes_dma_out_callback(void *data)
254{
255 struct omap_aes_dev *dd = data;
256
257 /* dma_lch_out - completed */
258 tasklet_schedule(&dd->done_task);
259}
537559a5
DK
260
261static int omap_aes_dma_init(struct omap_aes_dev *dd)
262{
263 int err = -ENOMEM;
ebedbf79 264 dma_cap_mask_t mask;
537559a5 265
ebedbf79
MG
266 dd->dma_lch_out = NULL;
267 dd->dma_lch_in = NULL;
537559a5
DK
268
269 dd->buf_in = (void *)__get_free_pages(GFP_KERNEL, OMAP_AES_CACHE_SIZE);
270 dd->buf_out = (void *)__get_free_pages(GFP_KERNEL, OMAP_AES_CACHE_SIZE);
271 dd->buflen = PAGE_SIZE << OMAP_AES_CACHE_SIZE;
272 dd->buflen &= ~(AES_BLOCK_SIZE - 1);
273
274 if (!dd->buf_in || !dd->buf_out) {
275 dev_err(dd->dev, "unable to alloc pages.\n");
276 goto err_alloc;
277 }
278
279 /* MAP here */
280 dd->dma_addr_in = dma_map_single(dd->dev, dd->buf_in, dd->buflen,
281 DMA_TO_DEVICE);
282 if (dma_mapping_error(dd->dev, dd->dma_addr_in)) {
283 dev_err(dd->dev, "dma %d bytes error\n", dd->buflen);
284 err = -EINVAL;
285 goto err_map_in;
286 }
287
288 dd->dma_addr_out = dma_map_single(dd->dev, dd->buf_out, dd->buflen,
289 DMA_FROM_DEVICE);
290 if (dma_mapping_error(dd->dev, dd->dma_addr_out)) {
291 dev_err(dd->dev, "dma %d bytes error\n", dd->buflen);
292 err = -EINVAL;
293 goto err_map_out;
294 }
295
ebedbf79
MG
296 dma_cap_zero(mask);
297 dma_cap_set(DMA_SLAVE, mask);
298
b4b87a93
MG
299 dd->dma_lch_in = dma_request_slave_channel_compat(mask,
300 omap_dma_filter_fn,
301 &dd->dma_in,
302 dd->dev, "rx");
ebedbf79
MG
303 if (!dd->dma_lch_in) {
304 dev_err(dd->dev, "Unable to request in DMA channel\n");
305 goto err_dma_in;
306 }
307
b4b87a93
MG
308 dd->dma_lch_out = dma_request_slave_channel_compat(mask,
309 omap_dma_filter_fn,
310 &dd->dma_out,
311 dd->dev, "tx");
ebedbf79
MG
312 if (!dd->dma_lch_out) {
313 dev_err(dd->dev, "Unable to request out DMA channel\n");
314 goto err_dma_out;
315 }
537559a5 316
537559a5
DK
317 return 0;
318
319err_dma_out:
ebedbf79 320 dma_release_channel(dd->dma_lch_in);
537559a5
DK
321err_dma_in:
322 dma_unmap_single(dd->dev, dd->dma_addr_out, dd->buflen,
323 DMA_FROM_DEVICE);
324err_map_out:
325 dma_unmap_single(dd->dev, dd->dma_addr_in, dd->buflen, DMA_TO_DEVICE);
326err_map_in:
327 free_pages((unsigned long)dd->buf_out, OMAP_AES_CACHE_SIZE);
328 free_pages((unsigned long)dd->buf_in, OMAP_AES_CACHE_SIZE);
329err_alloc:
330 if (err)
331 pr_err("error: %d\n", err);
332 return err;
333}
334
335static void omap_aes_dma_cleanup(struct omap_aes_dev *dd)
336{
ebedbf79
MG
337 dma_release_channel(dd->dma_lch_out);
338 dma_release_channel(dd->dma_lch_in);
537559a5
DK
339 dma_unmap_single(dd->dev, dd->dma_addr_out, dd->buflen,
340 DMA_FROM_DEVICE);
341 dma_unmap_single(dd->dev, dd->dma_addr_in, dd->buflen, DMA_TO_DEVICE);
342 free_pages((unsigned long)dd->buf_out, OMAP_AES_CACHE_SIZE);
343 free_pages((unsigned long)dd->buf_in, OMAP_AES_CACHE_SIZE);
344}
345
346static void sg_copy_buf(void *buf, struct scatterlist *sg,
347 unsigned int start, unsigned int nbytes, int out)
348{
349 struct scatter_walk walk;
350
351 if (!nbytes)
352 return;
353
354 scatterwalk_start(&walk, sg);
355 scatterwalk_advance(&walk, start);
356 scatterwalk_copychunks(buf, &walk, nbytes, out);
357 scatterwalk_done(&walk, out, 0);
358}
359
360static int sg_copy(struct scatterlist **sg, size_t *offset, void *buf,
361 size_t buflen, size_t total, int out)
362{
363 unsigned int count, off = 0;
364
365 while (buflen && total) {
366 count = min((*sg)->length - *offset, total);
367 count = min(count, buflen);
368
369 if (!count)
370 return off;
371
21fe9767
DK
372 /*
373 * buflen and total are AES_BLOCK_SIZE size aligned,
374 * so count should be also aligned
375 */
376
537559a5
DK
377 sg_copy_buf(buf + off, *sg, *offset, count, out);
378
379 off += count;
380 buflen -= count;
381 *offset += count;
382 total -= count;
383
384 if (*offset == (*sg)->length) {
385 *sg = sg_next(*sg);
386 if (*sg)
387 *offset = 0;
388 else
389 total = 0;
390 }
391 }
392
393 return off;
394}
395
ebedbf79
MG
396static int omap_aes_crypt_dma(struct crypto_tfm *tfm,
397 struct scatterlist *in_sg, struct scatterlist *out_sg)
537559a5
DK
398{
399 struct omap_aes_ctx *ctx = crypto_tfm_ctx(tfm);
400 struct omap_aes_dev *dd = ctx->dd;
ebedbf79
MG
401 struct dma_async_tx_descriptor *tx_in, *tx_out;
402 struct dma_slave_config cfg;
403 dma_addr_t dma_addr_in = sg_dma_address(in_sg);
404 int ret, length = sg_dma_len(in_sg);
537559a5
DK
405
406 pr_debug("len: %d\n", length);
407
408 dd->dma_size = length;
409
410 if (!(dd->flags & FLAGS_FAST))
411 dma_sync_single_for_device(dd->dev, dma_addr_in, length,
412 DMA_TO_DEVICE);
413
ebedbf79
MG
414 memset(&cfg, 0, sizeof(cfg));
415
416 cfg.src_addr = dd->phys_base + AES_REG_DATA;
417 cfg.dst_addr = dd->phys_base + AES_REG_DATA;
418 cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
419 cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
420 cfg.src_maxburst = DST_MAXBURST;
421 cfg.dst_maxburst = DST_MAXBURST;
422
423 /* IN */
424 ret = dmaengine_slave_config(dd->dma_lch_in, &cfg);
425 if (ret) {
426 dev_err(dd->dev, "can't configure IN dmaengine slave: %d\n",
427 ret);
428 return ret;
429 }
430
431 tx_in = dmaengine_prep_slave_sg(dd->dma_lch_in, in_sg, 1,
432 DMA_MEM_TO_DEV,
433 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
434 if (!tx_in) {
435 dev_err(dd->dev, "IN prep_slave_sg() failed\n");
436 return -EINVAL;
437 }
438
439 /* No callback necessary */
440 tx_in->callback_param = dd;
441
442 /* OUT */
443 ret = dmaengine_slave_config(dd->dma_lch_out, &cfg);
444 if (ret) {
445 dev_err(dd->dev, "can't configure OUT dmaengine slave: %d\n",
446 ret);
447 return ret;
448 }
449
450 tx_out = dmaengine_prep_slave_sg(dd->dma_lch_out, out_sg, 1,
451 DMA_DEV_TO_MEM,
452 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
453 if (!tx_out) {
454 dev_err(dd->dev, "OUT prep_slave_sg() failed\n");
455 return -EINVAL;
456 }
457
458 tx_out->callback = omap_aes_dma_out_callback;
459 tx_out->callback_param = dd;
460
461 dmaengine_submit(tx_in);
462 dmaengine_submit(tx_out);
463
464 dma_async_issue_pending(dd->dma_lch_in);
465 dma_async_issue_pending(dd->dma_lch_out);
537559a5 466
83ea7e0f
DK
467 /* start DMA or disable idle mode */
468 omap_aes_write_mask(dd, AES_REG_MASK, AES_REG_MASK_START,
469 AES_REG_MASK_START);
470
537559a5
DK
471 return 0;
472}
473
474static int omap_aes_crypt_dma_start(struct omap_aes_dev *dd)
475{
476 struct crypto_tfm *tfm = crypto_ablkcipher_tfm(
477 crypto_ablkcipher_reqtfm(dd->req));
478 int err, fast = 0, in, out;
479 size_t count;
480 dma_addr_t addr_in, addr_out;
ebedbf79
MG
481 struct scatterlist *in_sg, *out_sg;
482 int len32;
537559a5
DK
483
484 pr_debug("total: %d\n", dd->total);
485
486 if (sg_is_last(dd->in_sg) && sg_is_last(dd->out_sg)) {
487 /* check for alignment */
488 in = IS_ALIGNED((u32)dd->in_sg->offset, sizeof(u32));
489 out = IS_ALIGNED((u32)dd->out_sg->offset, sizeof(u32));
490
491 fast = in && out;
492 }
493
494 if (fast) {
495 count = min(dd->total, sg_dma_len(dd->in_sg));
496 count = min(count, sg_dma_len(dd->out_sg));
497
21fe9767
DK
498 if (count != dd->total) {
499 pr_err("request length != buffer length\n");
537559a5 500 return -EINVAL;
21fe9767 501 }
537559a5
DK
502
503 pr_debug("fast\n");
504
505 err = dma_map_sg(dd->dev, dd->in_sg, 1, DMA_TO_DEVICE);
506 if (!err) {
507 dev_err(dd->dev, "dma_map_sg() error\n");
508 return -EINVAL;
509 }
510
511 err = dma_map_sg(dd->dev, dd->out_sg, 1, DMA_FROM_DEVICE);
512 if (!err) {
513 dev_err(dd->dev, "dma_map_sg() error\n");
514 dma_unmap_sg(dd->dev, dd->in_sg, 1, DMA_TO_DEVICE);
515 return -EINVAL;
516 }
517
518 addr_in = sg_dma_address(dd->in_sg);
519 addr_out = sg_dma_address(dd->out_sg);
520
ebedbf79
MG
521 in_sg = dd->in_sg;
522 out_sg = dd->out_sg;
ebedbf79 523
537559a5
DK
524 dd->flags |= FLAGS_FAST;
525
526 } else {
527 /* use cache buffers */
528 count = sg_copy(&dd->in_sg, &dd->in_offset, dd->buf_in,
529 dd->buflen, dd->total, 0);
530
ebedbf79
MG
531 len32 = DIV_ROUND_UP(count, DMA_MIN) * DMA_MIN;
532
533 /*
534 * The data going into the AES module has been copied
535 * to a local buffer and the data coming out will go
536 * into a local buffer so set up local SG entries for
537 * both.
538 */
539 sg_init_table(&dd->in_sgl, 1);
540 dd->in_sgl.offset = dd->in_offset;
541 sg_dma_len(&dd->in_sgl) = len32;
542 sg_dma_address(&dd->in_sgl) = dd->dma_addr_in;
543
544 sg_init_table(&dd->out_sgl, 1);
545 dd->out_sgl.offset = dd->out_offset;
546 sg_dma_len(&dd->out_sgl) = len32;
547 sg_dma_address(&dd->out_sgl) = dd->dma_addr_out;
548
549 in_sg = &dd->in_sgl;
550 out_sg = &dd->out_sgl;
ebedbf79 551
537559a5
DK
552 addr_in = dd->dma_addr_in;
553 addr_out = dd->dma_addr_out;
554
555 dd->flags &= ~FLAGS_FAST;
556
557 }
558
559 dd->total -= count;
560
ebedbf79 561 err = omap_aes_crypt_dma(tfm, in_sg, out_sg);
21fe9767
DK
562 if (err) {
563 dma_unmap_sg(dd->dev, dd->in_sg, 1, DMA_TO_DEVICE);
564 dma_unmap_sg(dd->dev, dd->out_sg, 1, DMA_TO_DEVICE);
565 }
537559a5
DK
566
567 return err;
568}
569
570static void omap_aes_finish_req(struct omap_aes_dev *dd, int err)
571{
21fe9767 572 struct ablkcipher_request *req = dd->req;
537559a5
DK
573
574 pr_debug("err: %d\n", err);
575
5946c4a5 576 pm_runtime_put_sync(dd->dev);
eeb2b202
DK
577 dd->flags &= ~FLAGS_BUSY;
578
67a730ce 579 req->base.complete(&req->base, err);
537559a5
DK
580}
581
582static int omap_aes_crypt_dma_stop(struct omap_aes_dev *dd)
583{
584 int err = 0;
585 size_t count;
586
587 pr_debug("total: %d\n", dd->total);
588
589 omap_aes_write_mask(dd, AES_REG_MASK, 0, AES_REG_MASK_START);
590
ebedbf79
MG
591 dmaengine_terminate_all(dd->dma_lch_in);
592 dmaengine_terminate_all(dd->dma_lch_out);
537559a5
DK
593
594 if (dd->flags & FLAGS_FAST) {
595 dma_unmap_sg(dd->dev, dd->out_sg, 1, DMA_FROM_DEVICE);
596 dma_unmap_sg(dd->dev, dd->in_sg, 1, DMA_TO_DEVICE);
597 } else {
598 dma_sync_single_for_device(dd->dev, dd->dma_addr_out,
599 dd->dma_size, DMA_FROM_DEVICE);
600
601 /* copy data */
602 count = sg_copy(&dd->out_sg, &dd->out_offset, dd->buf_out,
603 dd->buflen, dd->dma_size, 1);
604 if (count != dd->dma_size) {
605 err = -EINVAL;
606 pr_err("not all data converted: %u\n", count);
607 }
608 }
609
537559a5
DK
610 return err;
611}
612
21fe9767 613static int omap_aes_handle_queue(struct omap_aes_dev *dd,
eeb2b202 614 struct ablkcipher_request *req)
537559a5
DK
615{
616 struct crypto_async_request *async_req, *backlog;
617 struct omap_aes_ctx *ctx;
618 struct omap_aes_reqctx *rctx;
537559a5 619 unsigned long flags;
21fe9767 620 int err, ret = 0;
537559a5
DK
621
622 spin_lock_irqsave(&dd->lock, flags);
eeb2b202 623 if (req)
21fe9767 624 ret = ablkcipher_enqueue_request(&dd->queue, req);
eeb2b202
DK
625 if (dd->flags & FLAGS_BUSY) {
626 spin_unlock_irqrestore(&dd->lock, flags);
21fe9767 627 return ret;
eeb2b202 628 }
537559a5
DK
629 backlog = crypto_get_backlog(&dd->queue);
630 async_req = crypto_dequeue_request(&dd->queue);
eeb2b202
DK
631 if (async_req)
632 dd->flags |= FLAGS_BUSY;
537559a5
DK
633 spin_unlock_irqrestore(&dd->lock, flags);
634
635 if (!async_req)
21fe9767 636 return ret;
537559a5
DK
637
638 if (backlog)
639 backlog->complete(backlog, -EINPROGRESS);
640
641 req = ablkcipher_request_cast(async_req);
642
537559a5
DK
643 /* assign new request to device */
644 dd->req = req;
645 dd->total = req->nbytes;
646 dd->in_offset = 0;
647 dd->in_sg = req->src;
648 dd->out_offset = 0;
649 dd->out_sg = req->dst;
650
651 rctx = ablkcipher_request_ctx(req);
652 ctx = crypto_ablkcipher_ctx(crypto_ablkcipher_reqtfm(req));
653 rctx->mode &= FLAGS_MODE_MASK;
654 dd->flags = (dd->flags & ~FLAGS_MODE_MASK) | rctx->mode;
655
67a730ce 656 dd->ctx = ctx;
537559a5 657 ctx->dd = dd;
537559a5 658
83ea7e0f
DK
659 err = omap_aes_write_ctrl(dd);
660 if (!err)
661 err = omap_aes_crypt_dma_start(dd);
21fe9767
DK
662 if (err) {
663 /* aes_task will not finish it, so do it here */
664 omap_aes_finish_req(dd, err);
665 tasklet_schedule(&dd->queue_task);
666 }
eeb2b202 667
21fe9767 668 return ret; /* return ret, which is enqueue return value */
537559a5
DK
669}
670
21fe9767 671static void omap_aes_done_task(unsigned long data)
537559a5
DK
672{
673 struct omap_aes_dev *dd = (struct omap_aes_dev *)data;
21fe9767 674 int err;
537559a5
DK
675
676 pr_debug("enter\n");
677
21fe9767 678 err = omap_aes_crypt_dma_stop(dd);
537559a5 679
21fe9767
DK
680 err = dd->err ? : err;
681
682 if (dd->total && !err) {
683 err = omap_aes_crypt_dma_start(dd);
684 if (!err)
685 return; /* DMA started. Not fininishing. */
686 }
687
688 omap_aes_finish_req(dd, err);
689 omap_aes_handle_queue(dd, NULL);
537559a5
DK
690
691 pr_debug("exit\n");
692}
693
21fe9767
DK
694static void omap_aes_queue_task(unsigned long data)
695{
696 struct omap_aes_dev *dd = (struct omap_aes_dev *)data;
697
698 omap_aes_handle_queue(dd, NULL);
699}
700
537559a5
DK
701static int omap_aes_crypt(struct ablkcipher_request *req, unsigned long mode)
702{
703 struct omap_aes_ctx *ctx = crypto_ablkcipher_ctx(
704 crypto_ablkcipher_reqtfm(req));
705 struct omap_aes_reqctx *rctx = ablkcipher_request_ctx(req);
706 struct omap_aes_dev *dd;
537559a5
DK
707
708 pr_debug("nbytes: %d, enc: %d, cbc: %d\n", req->nbytes,
709 !!(mode & FLAGS_ENCRYPT),
710 !!(mode & FLAGS_CBC));
711
21fe9767
DK
712 if (!IS_ALIGNED(req->nbytes, AES_BLOCK_SIZE)) {
713 pr_err("request size is not exact amount of AES blocks\n");
714 return -EINVAL;
715 }
716
537559a5
DK
717 dd = omap_aes_find_dev(ctx);
718 if (!dd)
719 return -ENODEV;
720
721 rctx->mode = mode;
722
21fe9767 723 return omap_aes_handle_queue(dd, req);
537559a5
DK
724}
725
726/* ********************** ALG API ************************************ */
727
728static int omap_aes_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
729 unsigned int keylen)
730{
731 struct omap_aes_ctx *ctx = crypto_ablkcipher_ctx(tfm);
732
733 if (keylen != AES_KEYSIZE_128 && keylen != AES_KEYSIZE_192 &&
734 keylen != AES_KEYSIZE_256)
735 return -EINVAL;
736
737 pr_debug("enter, keylen: %d\n", keylen);
738
739 memcpy(ctx->key, key, keylen);
740 ctx->keylen = keylen;
537559a5
DK
741
742 return 0;
743}
744
745static int omap_aes_ecb_encrypt(struct ablkcipher_request *req)
746{
747 return omap_aes_crypt(req, FLAGS_ENCRYPT);
748}
749
750static int omap_aes_ecb_decrypt(struct ablkcipher_request *req)
751{
752 return omap_aes_crypt(req, 0);
753}
754
755static int omap_aes_cbc_encrypt(struct ablkcipher_request *req)
756{
757 return omap_aes_crypt(req, FLAGS_ENCRYPT | FLAGS_CBC);
758}
759
760static int omap_aes_cbc_decrypt(struct ablkcipher_request *req)
761{
762 return omap_aes_crypt(req, FLAGS_CBC);
763}
764
765static int omap_aes_cra_init(struct crypto_tfm *tfm)
766{
767 pr_debug("enter\n");
768
769 tfm->crt_ablkcipher.reqsize = sizeof(struct omap_aes_reqctx);
770
771 return 0;
772}
773
774static void omap_aes_cra_exit(struct crypto_tfm *tfm)
775{
776 pr_debug("enter\n");
777}
778
779/* ********************** ALGS ************************************ */
780
781static struct crypto_alg algs[] = {
782{
783 .cra_name = "ecb(aes)",
784 .cra_driver_name = "ecb-aes-omap",
785 .cra_priority = 100,
d912bb76
NM
786 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
787 CRYPTO_ALG_KERN_DRIVER_ONLY |
788 CRYPTO_ALG_ASYNC,
537559a5
DK
789 .cra_blocksize = AES_BLOCK_SIZE,
790 .cra_ctxsize = sizeof(struct omap_aes_ctx),
efce41b6 791 .cra_alignmask = 0,
537559a5
DK
792 .cra_type = &crypto_ablkcipher_type,
793 .cra_module = THIS_MODULE,
794 .cra_init = omap_aes_cra_init,
795 .cra_exit = omap_aes_cra_exit,
796 .cra_u.ablkcipher = {
797 .min_keysize = AES_MIN_KEY_SIZE,
798 .max_keysize = AES_MAX_KEY_SIZE,
799 .setkey = omap_aes_setkey,
800 .encrypt = omap_aes_ecb_encrypt,
801 .decrypt = omap_aes_ecb_decrypt,
802 }
803},
804{
805 .cra_name = "cbc(aes)",
806 .cra_driver_name = "cbc-aes-omap",
807 .cra_priority = 100,
d912bb76
NM
808 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
809 CRYPTO_ALG_KERN_DRIVER_ONLY |
810 CRYPTO_ALG_ASYNC,
537559a5
DK
811 .cra_blocksize = AES_BLOCK_SIZE,
812 .cra_ctxsize = sizeof(struct omap_aes_ctx),
efce41b6 813 .cra_alignmask = 0,
537559a5
DK
814 .cra_type = &crypto_ablkcipher_type,
815 .cra_module = THIS_MODULE,
816 .cra_init = omap_aes_cra_init,
817 .cra_exit = omap_aes_cra_exit,
818 .cra_u.ablkcipher = {
819 .min_keysize = AES_MIN_KEY_SIZE,
820 .max_keysize = AES_MAX_KEY_SIZE,
821 .ivsize = AES_BLOCK_SIZE,
822 .setkey = omap_aes_setkey,
823 .encrypt = omap_aes_cbc_encrypt,
824 .decrypt = omap_aes_cbc_decrypt,
825 }
826}
827};
828
bc69d124
MG
829#ifdef CONFIG_OF
830static const struct of_device_id omap_aes_of_match[] = {
831 {
832 .compatible = "ti,omap2-aes",
833 },
834 {},
835};
836MODULE_DEVICE_TABLE(of, omap_aes_of_match);
837
838static int omap_aes_get_res_of(struct omap_aes_dev *dd,
839 struct device *dev, struct resource *res)
840{
841 struct device_node *node = dev->of_node;
842 const struct of_device_id *match;
843 int err = 0;
844
845 match = of_match_device(of_match_ptr(omap_aes_of_match), dev);
846 if (!match) {
847 dev_err(dev, "no compatible OF match\n");
848 err = -EINVAL;
849 goto err;
850 }
851
852 err = of_address_to_resource(node, 0, res);
853 if (err < 0) {
854 dev_err(dev, "can't translate OF node address\n");
855 err = -EINVAL;
856 goto err;
857 }
858
859 dd->dma_out = -1; /* Dummy value that's unused */
860 dd->dma_in = -1; /* Dummy value that's unused */
861
862err:
863 return err;
864}
865#else
866static const struct of_device_id omap_aes_of_match[] = {
867 {},
868};
869
870static int omap_aes_get_res_of(struct omap_aes_dev *dd,
871 struct device *dev, struct resource *res)
872{
873 return -EINVAL;
874}
875#endif
876
877static int omap_aes_get_res_pdev(struct omap_aes_dev *dd,
878 struct platform_device *pdev, struct resource *res)
879{
880 struct device *dev = &pdev->dev;
881 struct resource *r;
882 int err = 0;
883
884 /* Get the base address */
885 r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
886 if (!r) {
887 dev_err(dev, "no MEM resource info\n");
888 err = -ENODEV;
889 goto err;
890 }
891 memcpy(res, r, sizeof(*res));
892
893 /* Get the DMA out channel */
894 r = platform_get_resource(pdev, IORESOURCE_DMA, 0);
895 if (!r) {
896 dev_err(dev, "no DMA out resource info\n");
897 err = -ENODEV;
898 goto err;
899 }
900 dd->dma_out = r->start;
901
902 /* Get the DMA in channel */
903 r = platform_get_resource(pdev, IORESOURCE_DMA, 1);
904 if (!r) {
905 dev_err(dev, "no DMA in resource info\n");
906 err = -ENODEV;
907 goto err;
908 }
909 dd->dma_in = r->start;
910
911err:
912 return err;
913}
914
537559a5
DK
915static int omap_aes_probe(struct platform_device *pdev)
916{
917 struct device *dev = &pdev->dev;
918 struct omap_aes_dev *dd;
bc69d124 919 struct resource res;
537559a5
DK
920 int err = -ENOMEM, i, j;
921 u32 reg;
922
923 dd = kzalloc(sizeof(struct omap_aes_dev), GFP_KERNEL);
924 if (dd == NULL) {
925 dev_err(dev, "unable to alloc data struct.\n");
926 goto err_data;
927 }
928 dd->dev = dev;
929 platform_set_drvdata(pdev, dd);
930
931 spin_lock_init(&dd->lock);
932 crypto_init_queue(&dd->queue, OMAP_AES_QUEUE_LENGTH);
933
bc69d124
MG
934 err = (dev->of_node) ? omap_aes_get_res_of(dd, dev, &res) :
935 omap_aes_get_res_pdev(dd, pdev, &res);
936 if (err)
537559a5 937 goto err_res;
bc69d124
MG
938
939 dd->io_base = devm_request_and_ioremap(dev, &res);
537559a5
DK
940 if (!dd->io_base) {
941 dev_err(dev, "can't ioremap\n");
942 err = -ENOMEM;
5946c4a5 943 goto err_res;
537559a5 944 }
bc69d124 945 dd->phys_base = res.start;
537559a5 946
5946c4a5
MG
947 pm_runtime_enable(dev);
948 pm_runtime_get_sync(dev);
949
537559a5
DK
950 reg = omap_aes_read(dd, AES_REG_REV);
951 dev_info(dev, "OMAP AES hw accel rev: %u.%u\n",
952 (reg & AES_REG_REV_MAJOR) >> 4, reg & AES_REG_REV_MINOR);
5946c4a5
MG
953
954 pm_runtime_put_sync(dev);
537559a5 955
21fe9767
DK
956 tasklet_init(&dd->done_task, omap_aes_done_task, (unsigned long)dd);
957 tasklet_init(&dd->queue_task, omap_aes_queue_task, (unsigned long)dd);
537559a5
DK
958
959 err = omap_aes_dma_init(dd);
960 if (err)
961 goto err_dma;
962
963 INIT_LIST_HEAD(&dd->list);
964 spin_lock(&list_lock);
965 list_add_tail(&dd->list, &dev_list);
966 spin_unlock(&list_lock);
967
968 for (i = 0; i < ARRAY_SIZE(algs); i++) {
969 pr_debug("i: %d\n", i);
537559a5
DK
970 err = crypto_register_alg(&algs[i]);
971 if (err)
972 goto err_algs;
973 }
974
537559a5
DK
975 return 0;
976err_algs:
977 for (j = 0; j < i; j++)
978 crypto_unregister_alg(&algs[j]);
979 omap_aes_dma_cleanup(dd);
980err_dma:
21fe9767
DK
981 tasklet_kill(&dd->done_task);
982 tasklet_kill(&dd->queue_task);
5946c4a5 983 pm_runtime_disable(dev);
537559a5
DK
984err_res:
985 kfree(dd);
986 dd = NULL;
987err_data:
988 dev_err(dev, "initialization failed.\n");
989 return err;
990}
991
992static int omap_aes_remove(struct platform_device *pdev)
993{
994 struct omap_aes_dev *dd = platform_get_drvdata(pdev);
995 int i;
996
997 if (!dd)
998 return -ENODEV;
999
1000 spin_lock(&list_lock);
1001 list_del(&dd->list);
1002 spin_unlock(&list_lock);
1003
1004 for (i = 0; i < ARRAY_SIZE(algs); i++)
1005 crypto_unregister_alg(&algs[i]);
1006
21fe9767
DK
1007 tasklet_kill(&dd->done_task);
1008 tasklet_kill(&dd->queue_task);
537559a5 1009 omap_aes_dma_cleanup(dd);
5946c4a5 1010 pm_runtime_disable(dd->dev);
537559a5
DK
1011 kfree(dd);
1012 dd = NULL;
1013
1014 return 0;
1015}
1016
0635fb3a
MG
1017#ifdef CONFIG_PM_SLEEP
1018static int omap_aes_suspend(struct device *dev)
1019{
1020 pm_runtime_put_sync(dev);
1021 return 0;
1022}
1023
1024static int omap_aes_resume(struct device *dev)
1025{
1026 pm_runtime_get_sync(dev);
1027 return 0;
1028}
1029#endif
1030
1031static const struct dev_pm_ops omap_aes_pm_ops = {
1032 SET_SYSTEM_SLEEP_PM_OPS(omap_aes_suspend, omap_aes_resume)
1033};
1034
537559a5
DK
1035static struct platform_driver omap_aes_driver = {
1036 .probe = omap_aes_probe,
1037 .remove = omap_aes_remove,
1038 .driver = {
1039 .name = "omap-aes",
1040 .owner = THIS_MODULE,
0635fb3a 1041 .pm = &omap_aes_pm_ops,
bc69d124 1042 .of_match_table = omap_aes_of_match,
537559a5
DK
1043 },
1044};
1045
1046static int __init omap_aes_mod_init(void)
1047{
537559a5
DK
1048 return platform_driver_register(&omap_aes_driver);
1049}
1050
1051static void __exit omap_aes_mod_exit(void)
1052{
1053 platform_driver_unregister(&omap_aes_driver);
1054}
1055
1056module_init(omap_aes_mod_init);
1057module_exit(omap_aes_mod_exit);
1058
1059MODULE_DESCRIPTION("OMAP AES hw acceleration support.");
1060MODULE_LICENSE("GPL v2");
1061MODULE_AUTHOR("Dmitry Kasatkin");
1062
This page took 0.186928 seconds and 5 git commands to generate.