Commit | Line | Data |
---|---|---|
bd3c7b5c NR |
1 | /* |
2 | * Cryptographic API. | |
3 | * | |
4 | * Support for ATMEL AES HW acceleration. | |
5 | * | |
6 | * Copyright (c) 2012 Eukréa Electromatique - ATMEL | |
7 | * Author: Nicolas Royer <nicolas@eukrea.com> | |
8 | * | |
9 | * This program is free software; you can redistribute it and/or modify | |
10 | * it under the terms of the GNU General Public License version 2 as published | |
11 | * by the Free Software Foundation. | |
12 | * | |
13 | * Some ideas are from omap-aes.c driver. | |
14 | */ | |
15 | ||
16 | ||
17 | #include <linux/kernel.h> | |
18 | #include <linux/module.h> | |
19 | #include <linux/slab.h> | |
20 | #include <linux/err.h> | |
21 | #include <linux/clk.h> | |
22 | #include <linux/io.h> | |
23 | #include <linux/hw_random.h> | |
24 | #include <linux/platform_device.h> | |
25 | ||
26 | #include <linux/device.h> | |
bd3c7b5c NR |
27 | #include <linux/init.h> |
28 | #include <linux/errno.h> | |
29 | #include <linux/interrupt.h> | |
bd3c7b5c | 30 | #include <linux/irq.h> |
bd3c7b5c NR |
31 | #include <linux/scatterlist.h> |
32 | #include <linux/dma-mapping.h> | |
be943c7d | 33 | #include <linux/of_device.h> |
bd3c7b5c NR |
34 | #include <linux/delay.h> |
35 | #include <linux/crypto.h> | |
bd3c7b5c NR |
36 | #include <crypto/scatterwalk.h> |
37 | #include <crypto/algapi.h> | |
38 | #include <crypto/aes.h> | |
cadc4ab8 | 39 | #include <linux/platform_data/crypto-atmel.h> |
be943c7d | 40 | #include <dt-bindings/dma/at91.h> |
bd3c7b5c NR |
41 | #include "atmel-aes-regs.h" |
42 | ||
88efd9a9 CP |
43 | #define ATMEL_AES_PRIORITY 300 |
44 | ||
bbe628ed CP |
45 | #define ATMEL_AES_BUFFER_ORDER 2 |
46 | #define ATMEL_AES_BUFFER_SIZE (PAGE_SIZE << ATMEL_AES_BUFFER_ORDER) | |
47 | ||
bd3c7b5c NR |
48 | #define CFB8_BLOCK_SIZE 1 |
49 | #define CFB16_BLOCK_SIZE 2 | |
50 | #define CFB32_BLOCK_SIZE 4 | |
51 | #define CFB64_BLOCK_SIZE 8 | |
52 | ||
bbe628ed CP |
53 | #define SIZE_IN_WORDS(x) ((x) >> 2) |
54 | ||
bd3c7b5c | 55 | /* AES flags */ |
77dacf5f CP |
56 | /* Reserve bits [18:16] [14:12] [0] for mode (same as for AES_MR) */ |
57 | #define AES_FLAGS_ENCRYPT AES_MR_CYPHER_ENC | |
58 | #define AES_FLAGS_OPMODE_MASK (AES_MR_OPMOD_MASK | AES_MR_CFBS_MASK) | |
59 | #define AES_FLAGS_ECB AES_MR_OPMOD_ECB | |
60 | #define AES_FLAGS_CBC AES_MR_OPMOD_CBC | |
61 | #define AES_FLAGS_OFB AES_MR_OPMOD_OFB | |
62 | #define AES_FLAGS_CFB128 (AES_MR_OPMOD_CFB | AES_MR_CFBS_128b) | |
63 | #define AES_FLAGS_CFB64 (AES_MR_OPMOD_CFB | AES_MR_CFBS_64b) | |
64 | #define AES_FLAGS_CFB32 (AES_MR_OPMOD_CFB | AES_MR_CFBS_32b) | |
65 | #define AES_FLAGS_CFB16 (AES_MR_OPMOD_CFB | AES_MR_CFBS_16b) | |
66 | #define AES_FLAGS_CFB8 (AES_MR_OPMOD_CFB | AES_MR_CFBS_8b) | |
67 | #define AES_FLAGS_CTR AES_MR_OPMOD_CTR | |
68 | ||
69 | #define AES_FLAGS_MODE_MASK (AES_FLAGS_OPMODE_MASK | \ | |
70 | AES_FLAGS_ENCRYPT) | |
71 | ||
72 | #define AES_FLAGS_INIT BIT(2) | |
73 | #define AES_FLAGS_BUSY BIT(3) | |
77dacf5f CP |
74 | |
75 | #define AES_FLAGS_PERSISTENT (AES_FLAGS_INIT | AES_FLAGS_BUSY) | |
bd3c7b5c | 76 | |
cadc4ab8 | 77 | #define ATMEL_AES_QUEUE_LENGTH 50 |
bd3c7b5c | 78 | |
129f8bb6 | 79 | #define ATMEL_AES_DMA_THRESHOLD 256 |
bd3c7b5c NR |
80 | |
81 | ||
cadc4ab8 | 82 | struct atmel_aes_caps { |
afbac17e CP |
83 | bool has_dualbuff; |
84 | bool has_cfb64; | |
fcac8365 | 85 | bool has_ctr32; |
afbac17e | 86 | u32 max_burst_size; |
cadc4ab8 NR |
87 | }; |
88 | ||
bd3c7b5c NR |
89 | struct atmel_aes_dev; |
90 | ||
ccbf7298 CP |
91 | |
92 | typedef int (*atmel_aes_fn_t)(struct atmel_aes_dev *); | |
93 | ||
94 | ||
95 | struct atmel_aes_base_ctx { | |
afbac17e CP |
96 | struct atmel_aes_dev *dd; |
97 | atmel_aes_fn_t start; | |
98 | int keylen; | |
99 | u32 key[AES_KEYSIZE_256 / sizeof(u32)]; | |
100 | u16 block_size; | |
bd3c7b5c NR |
101 | }; |
102 | ||
ccbf7298 CP |
103 | struct atmel_aes_ctx { |
104 | struct atmel_aes_base_ctx base; | |
105 | }; | |
106 | ||
fcac8365 CP |
107 | struct atmel_aes_ctr_ctx { |
108 | struct atmel_aes_base_ctx base; | |
109 | ||
110 | u32 iv[AES_BLOCK_SIZE / sizeof(u32)]; | |
111 | size_t offset; | |
112 | struct scatterlist src[2]; | |
113 | struct scatterlist dst[2]; | |
114 | }; | |
115 | ||
bd3c7b5c | 116 | struct atmel_aes_reqctx { |
afbac17e | 117 | unsigned long mode; |
bd3c7b5c NR |
118 | }; |
119 | ||
120 | struct atmel_aes_dma { | |
bbe628ed CP |
121 | struct dma_chan *chan; |
122 | struct scatterlist *sg; | |
123 | int nents; | |
124 | unsigned int remainder; | |
125 | unsigned int sg_len; | |
bd3c7b5c NR |
126 | }; |
127 | ||
128 | struct atmel_aes_dev { | |
129 | struct list_head list; | |
130 | unsigned long phys_base; | |
131 | void __iomem *io_base; | |
132 | ||
ccbf7298 CP |
133 | struct crypto_async_request *areq; |
134 | struct atmel_aes_base_ctx *ctx; | |
135 | ||
10f12c1b CP |
136 | bool is_async; |
137 | atmel_aes_fn_t resume; | |
bbe628ed | 138 | atmel_aes_fn_t cpu_transfer_complete; |
10f12c1b | 139 | |
bd3c7b5c NR |
140 | struct device *dev; |
141 | struct clk *iclk; | |
afbac17e | 142 | int irq; |
bd3c7b5c NR |
143 | |
144 | unsigned long flags; | |
bd3c7b5c NR |
145 | |
146 | spinlock_t lock; | |
147 | struct crypto_queue queue; | |
148 | ||
149 | struct tasklet_struct done_task; | |
150 | struct tasklet_struct queue_task; | |
151 | ||
bbe628ed CP |
152 | size_t total; |
153 | size_t datalen; | |
154 | u32 *data; | |
bd3c7b5c | 155 | |
bbe628ed CP |
156 | struct atmel_aes_dma src; |
157 | struct atmel_aes_dma dst; | |
bd3c7b5c | 158 | |
bbe628ed CP |
159 | size_t buflen; |
160 | void *buf; | |
161 | struct scatterlist aligned_sg; | |
162 | struct scatterlist *real_dst; | |
bd3c7b5c | 163 | |
cadc4ab8 NR |
164 | struct atmel_aes_caps caps; |
165 | ||
afbac17e | 166 | u32 hw_version; |
bd3c7b5c NR |
167 | }; |
168 | ||
169 | struct atmel_aes_drv { | |
170 | struct list_head dev_list; | |
171 | spinlock_t lock; | |
172 | }; | |
173 | ||
174 | static struct atmel_aes_drv atmel_aes = { | |
175 | .dev_list = LIST_HEAD_INIT(atmel_aes.dev_list), | |
176 | .lock = __SPIN_LOCK_UNLOCKED(atmel_aes.lock), | |
177 | }; | |
178 | ||
e37a7e55 | 179 | /* Shared functions */ |
cadc4ab8 | 180 | |
bd3c7b5c NR |
181 | static inline u32 atmel_aes_read(struct atmel_aes_dev *dd, u32 offset) |
182 | { | |
183 | return readl_relaxed(dd->io_base + offset); | |
184 | } | |
185 | ||
186 | static inline void atmel_aes_write(struct atmel_aes_dev *dd, | |
187 | u32 offset, u32 value) | |
188 | { | |
189 | writel_relaxed(value, dd->io_base + offset); | |
190 | } | |
191 | ||
192 | static void atmel_aes_read_n(struct atmel_aes_dev *dd, u32 offset, | |
193 | u32 *value, int count) | |
194 | { | |
195 | for (; count--; value++, offset += 4) | |
196 | *value = atmel_aes_read(dd, offset); | |
197 | } | |
198 | ||
199 | static void atmel_aes_write_n(struct atmel_aes_dev *dd, u32 offset, | |
c0b28d8c | 200 | const u32 *value, int count) |
bd3c7b5c NR |
201 | { |
202 | for (; count--; value++, offset += 4) | |
203 | atmel_aes_write(dd, offset, *value); | |
204 | } | |
205 | ||
bbe628ed CP |
206 | static inline void atmel_aes_read_block(struct atmel_aes_dev *dd, u32 offset, |
207 | u32 *value) | |
208 | { | |
209 | atmel_aes_read_n(dd, offset, value, SIZE_IN_WORDS(AES_BLOCK_SIZE)); | |
210 | } | |
211 | ||
212 | static inline void atmel_aes_write_block(struct atmel_aes_dev *dd, u32 offset, | |
213 | const u32 *value) | |
214 | { | |
215 | atmel_aes_write_n(dd, offset, value, SIZE_IN_WORDS(AES_BLOCK_SIZE)); | |
216 | } | |
217 | ||
218 | static inline int atmel_aes_wait_for_data_ready(struct atmel_aes_dev *dd, | |
219 | atmel_aes_fn_t resume) | |
220 | { | |
221 | u32 isr = atmel_aes_read(dd, AES_ISR); | |
222 | ||
223 | if (unlikely(isr & AES_INT_DATARDY)) | |
224 | return resume(dd); | |
225 | ||
226 | dd->resume = resume; | |
227 | atmel_aes_write(dd, AES_IER, AES_INT_DATARDY); | |
228 | return -EINPROGRESS; | |
229 | } | |
230 | ||
231 | static inline size_t atmel_aes_padlen(size_t len, size_t block_size) | |
232 | { | |
233 | len &= block_size - 1; | |
234 | return len ? block_size - len : 0; | |
235 | } | |
236 | ||
ccbf7298 | 237 | static struct atmel_aes_dev *atmel_aes_find_dev(struct atmel_aes_base_ctx *ctx) |
bd3c7b5c NR |
238 | { |
239 | struct atmel_aes_dev *aes_dd = NULL; | |
240 | struct atmel_aes_dev *tmp; | |
241 | ||
242 | spin_lock_bh(&atmel_aes.lock); | |
243 | if (!ctx->dd) { | |
244 | list_for_each_entry(tmp, &atmel_aes.dev_list, list) { | |
245 | aes_dd = tmp; | |
246 | break; | |
247 | } | |
248 | ctx->dd = aes_dd; | |
249 | } else { | |
250 | aes_dd = ctx->dd; | |
251 | } | |
252 | ||
253 | spin_unlock_bh(&atmel_aes.lock); | |
254 | ||
255 | return aes_dd; | |
256 | } | |
257 | ||
258 | static int atmel_aes_hw_init(struct atmel_aes_dev *dd) | |
259 | { | |
9d83d299 LC |
260 | int err; |
261 | ||
262 | err = clk_prepare_enable(dd->iclk); | |
263 | if (err) | |
264 | return err; | |
bd3c7b5c NR |
265 | |
266 | if (!(dd->flags & AES_FLAGS_INIT)) { | |
267 | atmel_aes_write(dd, AES_CR, AES_CR_SWRST); | |
cadc4ab8 | 268 | atmel_aes_write(dd, AES_MR, 0xE << AES_MR_CKEY_OFFSET); |
bd3c7b5c | 269 | dd->flags |= AES_FLAGS_INIT; |
bd3c7b5c NR |
270 | } |
271 | ||
272 | return 0; | |
273 | } | |
274 | ||
cadc4ab8 NR |
275 | static inline unsigned int atmel_aes_get_version(struct atmel_aes_dev *dd) |
276 | { | |
277 | return atmel_aes_read(dd, AES_HW_VERSION) & 0x00000fff; | |
278 | } | |
279 | ||
aab0a39b | 280 | static int atmel_aes_hw_version_init(struct atmel_aes_dev *dd) |
bd3c7b5c | 281 | { |
aab0a39b CP |
282 | int err; |
283 | ||
284 | err = atmel_aes_hw_init(dd); | |
285 | if (err) | |
286 | return err; | |
bd3c7b5c | 287 | |
cadc4ab8 NR |
288 | dd->hw_version = atmel_aes_get_version(dd); |
289 | ||
aab0a39b | 290 | dev_info(dd->dev, "version: 0x%x\n", dd->hw_version); |
bd3c7b5c NR |
291 | |
292 | clk_disable_unprepare(dd->iclk); | |
aab0a39b | 293 | return 0; |
bd3c7b5c NR |
294 | } |
295 | ||
77dacf5f CP |
296 | static inline void atmel_aes_set_mode(struct atmel_aes_dev *dd, |
297 | const struct atmel_aes_reqctx *rctx) | |
298 | { | |
299 | /* Clear all but persistent flags and set request flags. */ | |
300 | dd->flags = (dd->flags & AES_FLAGS_PERSISTENT) | rctx->mode; | |
301 | } | |
302 | ||
10f12c1b | 303 | static inline int atmel_aes_complete(struct atmel_aes_dev *dd, int err) |
bd3c7b5c | 304 | { |
bd3c7b5c NR |
305 | clk_disable_unprepare(dd->iclk); |
306 | dd->flags &= ~AES_FLAGS_BUSY; | |
307 | ||
10f12c1b CP |
308 | if (dd->is_async) |
309 | dd->areq->complete(dd->areq, err); | |
310 | ||
311 | tasklet_schedule(&dd->queue_task); | |
312 | ||
313 | return err; | |
bd3c7b5c NR |
314 | } |
315 | ||
e37a7e55 CP |
316 | static void atmel_aes_write_ctrl(struct atmel_aes_dev *dd, bool use_dma, |
317 | const u32 *iv) | |
318 | { | |
319 | u32 valmr = 0; | |
320 | ||
321 | /* MR register must be set before IV registers */ | |
322 | if (dd->ctx->keylen == AES_KEYSIZE_128) | |
323 | valmr |= AES_MR_KEYSIZE_128; | |
324 | else if (dd->ctx->keylen == AES_KEYSIZE_192) | |
325 | valmr |= AES_MR_KEYSIZE_192; | |
326 | else | |
327 | valmr |= AES_MR_KEYSIZE_256; | |
328 | ||
329 | valmr |= dd->flags & AES_FLAGS_MODE_MASK; | |
330 | ||
331 | if (use_dma) { | |
332 | valmr |= AES_MR_SMOD_IDATAR0; | |
333 | if (dd->caps.has_dualbuff) | |
334 | valmr |= AES_MR_DUALBUFF; | |
335 | } else { | |
336 | valmr |= AES_MR_SMOD_AUTO; | |
337 | } | |
338 | ||
339 | atmel_aes_write(dd, AES_MR, valmr); | |
340 | ||
341 | atmel_aes_write_n(dd, AES_KEYWR(0), dd->ctx->key, | |
342 | SIZE_IN_WORDS(dd->ctx->keylen)); | |
343 | ||
344 | if (iv && (valmr & AES_MR_OPMOD_MASK) != AES_MR_OPMOD_ECB) | |
345 | atmel_aes_write_block(dd, AES_IVR(0), iv); | |
346 | } | |
347 | ||
bbe628ed CP |
348 | |
349 | /* CPU transfer */ | |
350 | ||
351 | static int atmel_aes_cpu_transfer(struct atmel_aes_dev *dd) | |
bd3c7b5c | 352 | { |
bbe628ed CP |
353 | int err = 0; |
354 | u32 isr; | |
bd3c7b5c | 355 | |
bbe628ed CP |
356 | for (;;) { |
357 | atmel_aes_read_block(dd, AES_ODATAR(0), dd->data); | |
358 | dd->data += 4; | |
359 | dd->datalen -= AES_BLOCK_SIZE; | |
360 | ||
361 | if (dd->datalen < AES_BLOCK_SIZE) | |
362 | break; | |
363 | ||
364 | atmel_aes_write_block(dd, AES_IDATAR(0), dd->data); | |
365 | ||
366 | isr = atmel_aes_read(dd, AES_ISR); | |
367 | if (!(isr & AES_INT_DATARDY)) { | |
368 | dd->resume = atmel_aes_cpu_transfer; | |
369 | atmel_aes_write(dd, AES_IER, AES_INT_DATARDY); | |
370 | return -EINPROGRESS; | |
371 | } | |
372 | } | |
373 | ||
374 | if (!sg_copy_from_buffer(dd->real_dst, sg_nents(dd->real_dst), | |
375 | dd->buf, dd->total)) | |
376 | err = -EINVAL; | |
377 | ||
378 | if (err) | |
379 | return atmel_aes_complete(dd, err); | |
380 | ||
381 | return dd->cpu_transfer_complete(dd); | |
bd3c7b5c NR |
382 | } |
383 | ||
bbe628ed CP |
384 | static int atmel_aes_cpu_start(struct atmel_aes_dev *dd, |
385 | struct scatterlist *src, | |
386 | struct scatterlist *dst, | |
387 | size_t len, | |
388 | atmel_aes_fn_t resume) | |
bd3c7b5c | 389 | { |
bbe628ed | 390 | size_t padlen = atmel_aes_padlen(len, AES_BLOCK_SIZE); |
77dacf5f | 391 | |
bbe628ed CP |
392 | if (unlikely(len == 0)) |
393 | return -EINVAL; | |
77dacf5f | 394 | |
bbe628ed | 395 | sg_copy_to_buffer(src, sg_nents(src), dd->buf, len); |
77dacf5f | 396 | |
bbe628ed CP |
397 | dd->total = len; |
398 | dd->real_dst = dst; | |
399 | dd->cpu_transfer_complete = resume; | |
400 | dd->datalen = len + padlen; | |
401 | dd->data = (u32 *)dd->buf; | |
402 | atmel_aes_write_block(dd, AES_IDATAR(0), dd->data); | |
403 | return atmel_aes_wait_for_data_ready(dd, atmel_aes_cpu_transfer); | |
404 | } | |
77dacf5f | 405 | |
77dacf5f | 406 | |
bbe628ed CP |
407 | /* DMA transfer */ |
408 | ||
409 | static void atmel_aes_dma_callback(void *data); | |
410 | ||
411 | static bool atmel_aes_check_aligned(struct atmel_aes_dev *dd, | |
412 | struct scatterlist *sg, | |
413 | size_t len, | |
414 | struct atmel_aes_dma *dma) | |
415 | { | |
416 | int nents; | |
417 | ||
418 | if (!IS_ALIGNED(len, dd->ctx->block_size)) | |
419 | return false; | |
420 | ||
421 | for (nents = 0; sg; sg = sg_next(sg), ++nents) { | |
422 | if (!IS_ALIGNED(sg->offset, sizeof(u32))) | |
423 | return false; | |
424 | ||
425 | if (len <= sg->length) { | |
426 | if (!IS_ALIGNED(len, dd->ctx->block_size)) | |
427 | return false; | |
428 | ||
429 | dma->nents = nents+1; | |
430 | dma->remainder = sg->length - len; | |
431 | sg->length = len; | |
432 | return true; | |
433 | } | |
434 | ||
435 | if (!IS_ALIGNED(sg->length, dd->ctx->block_size)) | |
436 | return false; | |
437 | ||
438 | len -= sg->length; | |
77dacf5f | 439 | } |
bd3c7b5c | 440 | |
bbe628ed CP |
441 | return false; |
442 | } | |
bd3c7b5c | 443 | |
bbe628ed CP |
444 | static inline void atmel_aes_restore_sg(const struct atmel_aes_dma *dma) |
445 | { | |
446 | struct scatterlist *sg = dma->sg; | |
447 | int nents = dma->nents; | |
bd3c7b5c | 448 | |
bbe628ed CP |
449 | if (!dma->remainder) |
450 | return; | |
bd3c7b5c | 451 | |
bbe628ed CP |
452 | while (--nents > 0 && sg) |
453 | sg = sg_next(sg); | |
bd3c7b5c | 454 | |
bbe628ed CP |
455 | if (!sg) |
456 | return; | |
bd3c7b5c | 457 | |
bbe628ed CP |
458 | sg->length += dma->remainder; |
459 | } | |
bd3c7b5c | 460 | |
bbe628ed CP |
461 | static int atmel_aes_map(struct atmel_aes_dev *dd, |
462 | struct scatterlist *src, | |
463 | struct scatterlist *dst, | |
464 | size_t len) | |
465 | { | |
466 | bool src_aligned, dst_aligned; | |
467 | size_t padlen; | |
cadc4ab8 | 468 | |
bbe628ed CP |
469 | dd->total = len; |
470 | dd->src.sg = src; | |
471 | dd->dst.sg = dst; | |
472 | dd->real_dst = dst; | |
bd3c7b5c | 473 | |
bbe628ed CP |
474 | src_aligned = atmel_aes_check_aligned(dd, src, len, &dd->src); |
475 | if (src == dst) | |
476 | dst_aligned = src_aligned; | |
477 | else | |
478 | dst_aligned = atmel_aes_check_aligned(dd, dst, len, &dd->dst); | |
479 | if (!src_aligned || !dst_aligned) { | |
480 | padlen = atmel_aes_padlen(len, dd->ctx->block_size); | |
481 | ||
482 | if (dd->buflen < len + padlen) | |
483 | return -ENOMEM; | |
484 | ||
485 | if (!src_aligned) { | |
486 | sg_copy_to_buffer(src, sg_nents(src), dd->buf, len); | |
487 | dd->src.sg = &dd->aligned_sg; | |
488 | dd->src.nents = 1; | |
489 | dd->src.remainder = 0; | |
490 | } | |
bd3c7b5c | 491 | |
bbe628ed CP |
492 | if (!dst_aligned) { |
493 | dd->dst.sg = &dd->aligned_sg; | |
494 | dd->dst.nents = 1; | |
495 | dd->dst.remainder = 0; | |
496 | } | |
bd3c7b5c | 497 | |
bbe628ed CP |
498 | sg_init_table(&dd->aligned_sg, 1); |
499 | sg_set_buf(&dd->aligned_sg, dd->buf, len + padlen); | |
500 | } | |
bd3c7b5c | 501 | |
bbe628ed CP |
502 | if (dd->src.sg == dd->dst.sg) { |
503 | dd->src.sg_len = dma_map_sg(dd->dev, dd->src.sg, dd->src.nents, | |
504 | DMA_BIDIRECTIONAL); | |
505 | dd->dst.sg_len = dd->src.sg_len; | |
506 | if (!dd->src.sg_len) | |
507 | return -EFAULT; | |
508 | } else { | |
509 | dd->src.sg_len = dma_map_sg(dd->dev, dd->src.sg, dd->src.nents, | |
510 | DMA_TO_DEVICE); | |
511 | if (!dd->src.sg_len) | |
512 | return -EFAULT; | |
513 | ||
514 | dd->dst.sg_len = dma_map_sg(dd->dev, dd->dst.sg, dd->dst.nents, | |
515 | DMA_FROM_DEVICE); | |
516 | if (!dd->dst.sg_len) { | |
517 | dma_unmap_sg(dd->dev, dd->src.sg, dd->src.nents, | |
518 | DMA_TO_DEVICE); | |
519 | return -EFAULT; | |
520 | } | |
521 | } | |
bd3c7b5c NR |
522 | |
523 | return 0; | |
bd3c7b5c NR |
524 | } |
525 | ||
bbe628ed | 526 | static void atmel_aes_unmap(struct atmel_aes_dev *dd) |
bd3c7b5c | 527 | { |
bbe628ed CP |
528 | if (dd->src.sg == dd->dst.sg) { |
529 | dma_unmap_sg(dd->dev, dd->src.sg, dd->src.nents, | |
530 | DMA_BIDIRECTIONAL); | |
ccbf7298 | 531 | |
bbe628ed CP |
532 | if (dd->src.sg != &dd->aligned_sg) |
533 | atmel_aes_restore_sg(&dd->src); | |
534 | } else { | |
535 | dma_unmap_sg(dd->dev, dd->dst.sg, dd->dst.nents, | |
536 | DMA_FROM_DEVICE); | |
289b2623 | 537 | |
bbe628ed CP |
538 | if (dd->dst.sg != &dd->aligned_sg) |
539 | atmel_aes_restore_sg(&dd->dst); | |
bd3c7b5c | 540 | |
bbe628ed CP |
541 | dma_unmap_sg(dd->dev, dd->src.sg, dd->src.nents, |
542 | DMA_TO_DEVICE); | |
543 | ||
544 | if (dd->src.sg != &dd->aligned_sg) | |
545 | atmel_aes_restore_sg(&dd->src); | |
546 | } | |
547 | ||
548 | if (dd->dst.sg == &dd->aligned_sg) | |
549 | sg_copy_from_buffer(dd->real_dst, sg_nents(dd->real_dst), | |
550 | dd->buf, dd->total); | |
551 | } | |
bd3c7b5c | 552 | |
bbe628ed CP |
553 | static int atmel_aes_dma_transfer_start(struct atmel_aes_dev *dd, |
554 | enum dma_slave_buswidth addr_width, | |
555 | enum dma_transfer_direction dir, | |
556 | u32 maxburst) | |
557 | { | |
558 | struct dma_async_tx_descriptor *desc; | |
559 | struct dma_slave_config config; | |
560 | dma_async_tx_callback callback; | |
561 | struct atmel_aes_dma *dma; | |
562 | int err; | |
563 | ||
564 | memset(&config, 0, sizeof(config)); | |
565 | config.direction = dir; | |
566 | config.src_addr_width = addr_width; | |
567 | config.dst_addr_width = addr_width; | |
568 | config.src_maxburst = maxburst; | |
569 | config.dst_maxburst = maxburst; | |
570 | ||
571 | switch (dir) { | |
572 | case DMA_MEM_TO_DEV: | |
573 | dma = &dd->src; | |
574 | callback = NULL; | |
575 | config.dst_addr = dd->phys_base + AES_IDATAR(0); | |
576 | break; | |
bd3c7b5c | 577 | |
bbe628ed CP |
578 | case DMA_DEV_TO_MEM: |
579 | dma = &dd->dst; | |
580 | callback = atmel_aes_dma_callback; | |
581 | config.src_addr = dd->phys_base + AES_ODATAR(0); | |
582 | break; | |
583 | ||
584 | default: | |
bd3c7b5c | 585 | return -EINVAL; |
bbe628ed | 586 | } |
bd3c7b5c | 587 | |
bbe628ed CP |
588 | err = dmaengine_slave_config(dma->chan, &config); |
589 | if (err) | |
590 | return err; | |
bd3c7b5c | 591 | |
bbe628ed CP |
592 | desc = dmaengine_prep_slave_sg(dma->chan, dma->sg, dma->sg_len, dir, |
593 | DMA_PREP_INTERRUPT | DMA_CTRL_ACK); | |
594 | if (!desc) | |
595 | return -ENOMEM; | |
bd3c7b5c | 596 | |
bbe628ed CP |
597 | desc->callback = callback; |
598 | desc->callback_param = dd; | |
599 | dmaengine_submit(desc); | |
600 | dma_async_issue_pending(dma->chan); | |
bd3c7b5c | 601 | |
bbe628ed CP |
602 | return 0; |
603 | } | |
10f12c1b | 604 | |
bbe628ed CP |
605 | static void atmel_aes_dma_transfer_stop(struct atmel_aes_dev *dd, |
606 | enum dma_transfer_direction dir) | |
bd3c7b5c | 607 | { |
bbe628ed | 608 | struct atmel_aes_dma *dma; |
cadc4ab8 | 609 | |
bbe628ed CP |
610 | switch (dir) { |
611 | case DMA_MEM_TO_DEV: | |
612 | dma = &dd->src; | |
613 | break; | |
614 | ||
615 | case DMA_DEV_TO_MEM: | |
616 | dma = &dd->dst; | |
617 | break; | |
cadc4ab8 | 618 | |
bbe628ed CP |
619 | default: |
620 | return; | |
cadc4ab8 NR |
621 | } |
622 | ||
bbe628ed CP |
623 | dmaengine_terminate_all(dma->chan); |
624 | } | |
cadc4ab8 | 625 | |
bbe628ed CP |
626 | static int atmel_aes_dma_start(struct atmel_aes_dev *dd, |
627 | struct scatterlist *src, | |
628 | struct scatterlist *dst, | |
629 | size_t len, | |
630 | atmel_aes_fn_t resume) | |
631 | { | |
632 | enum dma_slave_buswidth addr_width; | |
633 | u32 maxburst; | |
634 | int err; | |
cadc4ab8 | 635 | |
bbe628ed CP |
636 | switch (dd->ctx->block_size) { |
637 | case CFB8_BLOCK_SIZE: | |
638 | addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE; | |
639 | maxburst = 1; | |
640 | break; | |
cadc4ab8 | 641 | |
bbe628ed CP |
642 | case CFB16_BLOCK_SIZE: |
643 | addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES; | |
644 | maxburst = 1; | |
645 | break; | |
cadc4ab8 | 646 | |
bbe628ed CP |
647 | case CFB32_BLOCK_SIZE: |
648 | case CFB64_BLOCK_SIZE: | |
649 | addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; | |
650 | maxburst = 1; | |
651 | break; | |
cadc4ab8 | 652 | |
bbe628ed CP |
653 | case AES_BLOCK_SIZE: |
654 | addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; | |
655 | maxburst = dd->caps.max_burst_size; | |
656 | break; | |
bd3c7b5c | 657 | |
bbe628ed CP |
658 | default: |
659 | err = -EINVAL; | |
660 | goto exit; | |
661 | } | |
289b2623 | 662 | |
bbe628ed CP |
663 | err = atmel_aes_map(dd, src, dst, len); |
664 | if (err) | |
665 | goto exit; | |
cadc4ab8 | 666 | |
bbe628ed | 667 | dd->resume = resume; |
cadc4ab8 | 668 | |
bbe628ed CP |
669 | /* Set output DMA transfer first */ |
670 | err = atmel_aes_dma_transfer_start(dd, addr_width, DMA_DEV_TO_MEM, | |
671 | maxburst); | |
672 | if (err) | |
673 | goto unmap; | |
bd3c7b5c | 674 | |
bbe628ed CP |
675 | /* Then set input DMA transfer */ |
676 | err = atmel_aes_dma_transfer_start(dd, addr_width, DMA_MEM_TO_DEV, | |
677 | maxburst); | |
678 | if (err) | |
679 | goto output_transfer_stop; | |
bd3c7b5c | 680 | |
bbe628ed | 681 | return -EINPROGRESS; |
cadc4ab8 | 682 | |
bbe628ed CP |
683 | output_transfer_stop: |
684 | atmel_aes_dma_transfer_stop(dd, DMA_DEV_TO_MEM); | |
685 | unmap: | |
686 | atmel_aes_unmap(dd); | |
687 | exit: | |
688 | return atmel_aes_complete(dd, err); | |
689 | } | |
bd3c7b5c | 690 | |
bbe628ed CP |
691 | static void atmel_aes_dma_stop(struct atmel_aes_dev *dd) |
692 | { | |
693 | atmel_aes_dma_transfer_stop(dd, DMA_MEM_TO_DEV); | |
694 | atmel_aes_dma_transfer_stop(dd, DMA_DEV_TO_MEM); | |
695 | atmel_aes_unmap(dd); | |
696 | } | |
697 | ||
698 | static void atmel_aes_dma_callback(void *data) | |
699 | { | |
700 | struct atmel_aes_dev *dd = data; | |
701 | ||
702 | atmel_aes_dma_stop(dd); | |
703 | dd->is_async = true; | |
704 | (void)dd->resume(dd); | |
bd3c7b5c NR |
705 | } |
706 | ||
bd3c7b5c | 707 | static int atmel_aes_handle_queue(struct atmel_aes_dev *dd, |
ccbf7298 | 708 | struct crypto_async_request *new_areq) |
bd3c7b5c | 709 | { |
ccbf7298 CP |
710 | struct crypto_async_request *areq, *backlog; |
711 | struct atmel_aes_base_ctx *ctx; | |
bd3c7b5c NR |
712 | unsigned long flags; |
713 | int err, ret = 0; | |
714 | ||
715 | spin_lock_irqsave(&dd->lock, flags); | |
ccbf7298 CP |
716 | if (new_areq) |
717 | ret = crypto_enqueue_request(&dd->queue, new_areq); | |
bd3c7b5c NR |
718 | if (dd->flags & AES_FLAGS_BUSY) { |
719 | spin_unlock_irqrestore(&dd->lock, flags); | |
720 | return ret; | |
721 | } | |
722 | backlog = crypto_get_backlog(&dd->queue); | |
ccbf7298 CP |
723 | areq = crypto_dequeue_request(&dd->queue); |
724 | if (areq) | |
bd3c7b5c NR |
725 | dd->flags |= AES_FLAGS_BUSY; |
726 | spin_unlock_irqrestore(&dd->lock, flags); | |
727 | ||
ccbf7298 | 728 | if (!areq) |
bd3c7b5c NR |
729 | return ret; |
730 | ||
731 | if (backlog) | |
732 | backlog->complete(backlog, -EINPROGRESS); | |
733 | ||
ccbf7298 CP |
734 | ctx = crypto_tfm_ctx(areq->tfm); |
735 | ||
736 | dd->areq = areq; | |
737 | dd->ctx = ctx; | |
10f12c1b | 738 | dd->is_async = (areq != new_areq); |
ccbf7298 CP |
739 | |
740 | err = ctx->start(dd); | |
10f12c1b | 741 | return (dd->is_async) ? ret : err; |
ccbf7298 CP |
742 | } |
743 | ||
e37a7e55 CP |
744 | |
745 | /* AES async block ciphers */ | |
746 | ||
bbe628ed CP |
747 | static int atmel_aes_transfer_complete(struct atmel_aes_dev *dd) |
748 | { | |
749 | return atmel_aes_complete(dd, 0); | |
750 | } | |
751 | ||
ccbf7298 CP |
752 | static int atmel_aes_start(struct atmel_aes_dev *dd) |
753 | { | |
754 | struct ablkcipher_request *req = ablkcipher_request_cast(dd->areq); | |
bbe628ed CP |
755 | struct atmel_aes_reqctx *rctx = ablkcipher_request_ctx(req); |
756 | bool use_dma = (req->nbytes >= ATMEL_AES_DMA_THRESHOLD || | |
757 | dd->ctx->block_size != AES_BLOCK_SIZE); | |
ccbf7298 | 758 | int err; |
bd3c7b5c | 759 | |
77dacf5f | 760 | atmel_aes_set_mode(dd, rctx); |
bd3c7b5c | 761 | |
cdfab4a7 | 762 | err = atmel_aes_hw_init(dd); |
bbe628ed | 763 | if (err) |
10f12c1b | 764 | return atmel_aes_complete(dd, err); |
bd3c7b5c | 765 | |
bbe628ed CP |
766 | atmel_aes_write_ctrl(dd, use_dma, req->info); |
767 | if (use_dma) | |
768 | return atmel_aes_dma_start(dd, req->src, req->dst, req->nbytes, | |
769 | atmel_aes_transfer_complete); | |
bd3c7b5c | 770 | |
bbe628ed CP |
771 | return atmel_aes_cpu_start(dd, req->src, req->dst, req->nbytes, |
772 | atmel_aes_transfer_complete); | |
bd3c7b5c NR |
773 | } |
774 | ||
fcac8365 CP |
775 | static inline struct atmel_aes_ctr_ctx * |
776 | atmel_aes_ctr_ctx_cast(struct atmel_aes_base_ctx *ctx) | |
777 | { | |
778 | return container_of(ctx, struct atmel_aes_ctr_ctx, base); | |
779 | } | |
780 | ||
781 | static int atmel_aes_ctr_transfer(struct atmel_aes_dev *dd) | |
782 | { | |
783 | struct atmel_aes_ctr_ctx *ctx = atmel_aes_ctr_ctx_cast(dd->ctx); | |
784 | struct ablkcipher_request *req = ablkcipher_request_cast(dd->areq); | |
785 | struct scatterlist *src, *dst; | |
786 | u32 ctr, blocks; | |
787 | size_t datalen; | |
788 | bool use_dma, fragmented = false; | |
789 | ||
790 | /* Check for transfer completion. */ | |
791 | ctx->offset += dd->total; | |
792 | if (ctx->offset >= req->nbytes) | |
793 | return atmel_aes_transfer_complete(dd); | |
794 | ||
795 | /* Compute data length. */ | |
796 | datalen = req->nbytes - ctx->offset; | |
797 | blocks = DIV_ROUND_UP(datalen, AES_BLOCK_SIZE); | |
798 | ctr = be32_to_cpu(ctx->iv[3]); | |
799 | if (dd->caps.has_ctr32) { | |
800 | /* Check 32bit counter overflow. */ | |
801 | u32 start = ctr; | |
802 | u32 end = start + blocks - 1; | |
803 | ||
804 | if (end < start) { | |
805 | ctr |= 0xffffffff; | |
806 | datalen = AES_BLOCK_SIZE * -start; | |
807 | fragmented = true; | |
808 | } | |
809 | } else { | |
810 | /* Check 16bit counter overflow. */ | |
811 | u16 start = ctr & 0xffff; | |
812 | u16 end = start + (u16)blocks - 1; | |
813 | ||
814 | if (blocks >> 16 || end < start) { | |
815 | ctr |= 0xffff; | |
816 | datalen = AES_BLOCK_SIZE * (0x10000-start); | |
817 | fragmented = true; | |
818 | } | |
819 | } | |
820 | use_dma = (datalen >= ATMEL_AES_DMA_THRESHOLD); | |
821 | ||
822 | /* Jump to offset. */ | |
823 | src = scatterwalk_ffwd(ctx->src, req->src, ctx->offset); | |
824 | dst = ((req->src == req->dst) ? src : | |
825 | scatterwalk_ffwd(ctx->dst, req->dst, ctx->offset)); | |
826 | ||
827 | /* Configure hardware. */ | |
828 | atmel_aes_write_ctrl(dd, use_dma, ctx->iv); | |
829 | if (unlikely(fragmented)) { | |
830 | /* | |
831 | * Increment the counter manually to cope with the hardware | |
832 | * counter overflow. | |
833 | */ | |
834 | ctx->iv[3] = cpu_to_be32(ctr); | |
835 | crypto_inc((u8 *)ctx->iv, AES_BLOCK_SIZE); | |
836 | } | |
837 | ||
838 | if (use_dma) | |
839 | return atmel_aes_dma_start(dd, src, dst, datalen, | |
840 | atmel_aes_ctr_transfer); | |
841 | ||
842 | return atmel_aes_cpu_start(dd, src, dst, datalen, | |
843 | atmel_aes_ctr_transfer); | |
844 | } | |
845 | ||
846 | static int atmel_aes_ctr_start(struct atmel_aes_dev *dd) | |
847 | { | |
848 | struct atmel_aes_ctr_ctx *ctx = atmel_aes_ctr_ctx_cast(dd->ctx); | |
849 | struct ablkcipher_request *req = ablkcipher_request_cast(dd->areq); | |
850 | struct atmel_aes_reqctx *rctx = ablkcipher_request_ctx(req); | |
851 | int err; | |
852 | ||
853 | atmel_aes_set_mode(dd, rctx); | |
854 | ||
855 | err = atmel_aes_hw_init(dd); | |
856 | if (err) | |
857 | return atmel_aes_complete(dd, err); | |
858 | ||
859 | memcpy(ctx->iv, req->info, AES_BLOCK_SIZE); | |
860 | ctx->offset = 0; | |
861 | dd->total = 0; | |
862 | return atmel_aes_ctr_transfer(dd); | |
863 | } | |
864 | ||
bd3c7b5c NR |
865 | static int atmel_aes_crypt(struct ablkcipher_request *req, unsigned long mode) |
866 | { | |
afbac17e CP |
867 | struct atmel_aes_base_ctx *ctx; |
868 | struct atmel_aes_reqctx *rctx; | |
bd3c7b5c NR |
869 | struct atmel_aes_dev *dd; |
870 | ||
afbac17e | 871 | ctx = crypto_ablkcipher_ctx(crypto_ablkcipher_reqtfm(req)); |
77dacf5f CP |
872 | switch (mode & AES_FLAGS_OPMODE_MASK) { |
873 | case AES_FLAGS_CFB8: | |
cadc4ab8 | 874 | ctx->block_size = CFB8_BLOCK_SIZE; |
77dacf5f CP |
875 | break; |
876 | ||
877 | case AES_FLAGS_CFB16: | |
cadc4ab8 | 878 | ctx->block_size = CFB16_BLOCK_SIZE; |
77dacf5f CP |
879 | break; |
880 | ||
881 | case AES_FLAGS_CFB32: | |
cadc4ab8 | 882 | ctx->block_size = CFB32_BLOCK_SIZE; |
77dacf5f CP |
883 | break; |
884 | ||
885 | case AES_FLAGS_CFB64: | |
9f84951f | 886 | ctx->block_size = CFB64_BLOCK_SIZE; |
77dacf5f CP |
887 | break; |
888 | ||
889 | default: | |
cadc4ab8 | 890 | ctx->block_size = AES_BLOCK_SIZE; |
77dacf5f | 891 | break; |
bd3c7b5c NR |
892 | } |
893 | ||
894 | dd = atmel_aes_find_dev(ctx); | |
895 | if (!dd) | |
896 | return -ENODEV; | |
897 | ||
afbac17e | 898 | rctx = ablkcipher_request_ctx(req); |
bd3c7b5c NR |
899 | rctx->mode = mode; |
900 | ||
ccbf7298 | 901 | return atmel_aes_handle_queue(dd, &req->base); |
bd3c7b5c NR |
902 | } |
903 | ||
bd3c7b5c NR |
904 | static int atmel_aes_setkey(struct crypto_ablkcipher *tfm, const u8 *key, |
905 | unsigned int keylen) | |
906 | { | |
ccbf7298 | 907 | struct atmel_aes_base_ctx *ctx = crypto_ablkcipher_ctx(tfm); |
bd3c7b5c | 908 | |
afbac17e CP |
909 | if (keylen != AES_KEYSIZE_128 && |
910 | keylen != AES_KEYSIZE_192 && | |
911 | keylen != AES_KEYSIZE_256) { | |
bd3c7b5c NR |
912 | crypto_ablkcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN); |
913 | return -EINVAL; | |
914 | } | |
915 | ||
916 | memcpy(ctx->key, key, keylen); | |
917 | ctx->keylen = keylen; | |
918 | ||
919 | return 0; | |
920 | } | |
921 | ||
922 | static int atmel_aes_ecb_encrypt(struct ablkcipher_request *req) | |
923 | { | |
77dacf5f | 924 | return atmel_aes_crypt(req, AES_FLAGS_ECB | AES_FLAGS_ENCRYPT); |
bd3c7b5c NR |
925 | } |
926 | ||
927 | static int atmel_aes_ecb_decrypt(struct ablkcipher_request *req) | |
928 | { | |
77dacf5f | 929 | return atmel_aes_crypt(req, AES_FLAGS_ECB); |
bd3c7b5c NR |
930 | } |
931 | ||
932 | static int atmel_aes_cbc_encrypt(struct ablkcipher_request *req) | |
933 | { | |
afbac17e | 934 | return atmel_aes_crypt(req, AES_FLAGS_CBC | AES_FLAGS_ENCRYPT); |
bd3c7b5c NR |
935 | } |
936 | ||
937 | static int atmel_aes_cbc_decrypt(struct ablkcipher_request *req) | |
938 | { | |
afbac17e | 939 | return atmel_aes_crypt(req, AES_FLAGS_CBC); |
bd3c7b5c NR |
940 | } |
941 | ||
942 | static int atmel_aes_ofb_encrypt(struct ablkcipher_request *req) | |
943 | { | |
afbac17e | 944 | return atmel_aes_crypt(req, AES_FLAGS_OFB | AES_FLAGS_ENCRYPT); |
bd3c7b5c NR |
945 | } |
946 | ||
947 | static int atmel_aes_ofb_decrypt(struct ablkcipher_request *req) | |
948 | { | |
afbac17e | 949 | return atmel_aes_crypt(req, AES_FLAGS_OFB); |
bd3c7b5c NR |
950 | } |
951 | ||
952 | static int atmel_aes_cfb_encrypt(struct ablkcipher_request *req) | |
953 | { | |
77dacf5f | 954 | return atmel_aes_crypt(req, AES_FLAGS_CFB128 | AES_FLAGS_ENCRYPT); |
bd3c7b5c NR |
955 | } |
956 | ||
957 | static int atmel_aes_cfb_decrypt(struct ablkcipher_request *req) | |
958 | { | |
77dacf5f | 959 | return atmel_aes_crypt(req, AES_FLAGS_CFB128); |
bd3c7b5c NR |
960 | } |
961 | ||
962 | static int atmel_aes_cfb64_encrypt(struct ablkcipher_request *req) | |
963 | { | |
77dacf5f | 964 | return atmel_aes_crypt(req, AES_FLAGS_CFB64 | AES_FLAGS_ENCRYPT); |
bd3c7b5c NR |
965 | } |
966 | ||
967 | static int atmel_aes_cfb64_decrypt(struct ablkcipher_request *req) | |
968 | { | |
77dacf5f | 969 | return atmel_aes_crypt(req, AES_FLAGS_CFB64); |
bd3c7b5c NR |
970 | } |
971 | ||
972 | static int atmel_aes_cfb32_encrypt(struct ablkcipher_request *req) | |
973 | { | |
77dacf5f | 974 | return atmel_aes_crypt(req, AES_FLAGS_CFB32 | AES_FLAGS_ENCRYPT); |
bd3c7b5c NR |
975 | } |
976 | ||
977 | static int atmel_aes_cfb32_decrypt(struct ablkcipher_request *req) | |
978 | { | |
77dacf5f | 979 | return atmel_aes_crypt(req, AES_FLAGS_CFB32); |
bd3c7b5c NR |
980 | } |
981 | ||
982 | static int atmel_aes_cfb16_encrypt(struct ablkcipher_request *req) | |
983 | { | |
77dacf5f | 984 | return atmel_aes_crypt(req, AES_FLAGS_CFB16 | AES_FLAGS_ENCRYPT); |
bd3c7b5c NR |
985 | } |
986 | ||
987 | static int atmel_aes_cfb16_decrypt(struct ablkcipher_request *req) | |
988 | { | |
77dacf5f | 989 | return atmel_aes_crypt(req, AES_FLAGS_CFB16); |
bd3c7b5c NR |
990 | } |
991 | ||
992 | static int atmel_aes_cfb8_encrypt(struct ablkcipher_request *req) | |
993 | { | |
77dacf5f | 994 | return atmel_aes_crypt(req, AES_FLAGS_CFB8 | AES_FLAGS_ENCRYPT); |
bd3c7b5c NR |
995 | } |
996 | ||
997 | static int atmel_aes_cfb8_decrypt(struct ablkcipher_request *req) | |
998 | { | |
77dacf5f | 999 | return atmel_aes_crypt(req, AES_FLAGS_CFB8); |
bd3c7b5c NR |
1000 | } |
1001 | ||
1002 | static int atmel_aes_ctr_encrypt(struct ablkcipher_request *req) | |
1003 | { | |
afbac17e | 1004 | return atmel_aes_crypt(req, AES_FLAGS_CTR | AES_FLAGS_ENCRYPT); |
bd3c7b5c NR |
1005 | } |
1006 | ||
1007 | static int atmel_aes_ctr_decrypt(struct ablkcipher_request *req) | |
1008 | { | |
afbac17e | 1009 | return atmel_aes_crypt(req, AES_FLAGS_CTR); |
bd3c7b5c NR |
1010 | } |
1011 | ||
1012 | static int atmel_aes_cra_init(struct crypto_tfm *tfm) | |
1013 | { | |
ccbf7298 CP |
1014 | struct atmel_aes_ctx *ctx = crypto_tfm_ctx(tfm); |
1015 | ||
bd3c7b5c | 1016 | tfm->crt_ablkcipher.reqsize = sizeof(struct atmel_aes_reqctx); |
ccbf7298 | 1017 | ctx->base.start = atmel_aes_start; |
bd3c7b5c NR |
1018 | |
1019 | return 0; | |
1020 | } | |
1021 | ||
fcac8365 CP |
1022 | static int atmel_aes_ctr_cra_init(struct crypto_tfm *tfm) |
1023 | { | |
1024 | struct atmel_aes_ctx *ctx = crypto_tfm_ctx(tfm); | |
1025 | ||
1026 | tfm->crt_ablkcipher.reqsize = sizeof(struct atmel_aes_reqctx); | |
1027 | ctx->base.start = atmel_aes_ctr_start; | |
1028 | ||
1029 | return 0; | |
1030 | } | |
1031 | ||
bd3c7b5c NR |
1032 | static void atmel_aes_cra_exit(struct crypto_tfm *tfm) |
1033 | { | |
1034 | } | |
1035 | ||
1036 | static struct crypto_alg aes_algs[] = { | |
1037 | { | |
1038 | .cra_name = "ecb(aes)", | |
1039 | .cra_driver_name = "atmel-ecb-aes", | |
88efd9a9 | 1040 | .cra_priority = ATMEL_AES_PRIORITY, |
bd3c7b5c NR |
1041 | .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC, |
1042 | .cra_blocksize = AES_BLOCK_SIZE, | |
1043 | .cra_ctxsize = sizeof(struct atmel_aes_ctx), | |
cadc4ab8 | 1044 | .cra_alignmask = 0xf, |
bd3c7b5c NR |
1045 | .cra_type = &crypto_ablkcipher_type, |
1046 | .cra_module = THIS_MODULE, | |
1047 | .cra_init = atmel_aes_cra_init, | |
1048 | .cra_exit = atmel_aes_cra_exit, | |
1049 | .cra_u.ablkcipher = { | |
1050 | .min_keysize = AES_MIN_KEY_SIZE, | |
1051 | .max_keysize = AES_MAX_KEY_SIZE, | |
1052 | .setkey = atmel_aes_setkey, | |
1053 | .encrypt = atmel_aes_ecb_encrypt, | |
1054 | .decrypt = atmel_aes_ecb_decrypt, | |
1055 | } | |
1056 | }, | |
1057 | { | |
1058 | .cra_name = "cbc(aes)", | |
1059 | .cra_driver_name = "atmel-cbc-aes", | |
88efd9a9 | 1060 | .cra_priority = ATMEL_AES_PRIORITY, |
bd3c7b5c NR |
1061 | .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC, |
1062 | .cra_blocksize = AES_BLOCK_SIZE, | |
1063 | .cra_ctxsize = sizeof(struct atmel_aes_ctx), | |
cadc4ab8 | 1064 | .cra_alignmask = 0xf, |
bd3c7b5c NR |
1065 | .cra_type = &crypto_ablkcipher_type, |
1066 | .cra_module = THIS_MODULE, | |
1067 | .cra_init = atmel_aes_cra_init, | |
1068 | .cra_exit = atmel_aes_cra_exit, | |
1069 | .cra_u.ablkcipher = { | |
1070 | .min_keysize = AES_MIN_KEY_SIZE, | |
1071 | .max_keysize = AES_MAX_KEY_SIZE, | |
1072 | .ivsize = AES_BLOCK_SIZE, | |
1073 | .setkey = atmel_aes_setkey, | |
1074 | .encrypt = atmel_aes_cbc_encrypt, | |
1075 | .decrypt = atmel_aes_cbc_decrypt, | |
1076 | } | |
1077 | }, | |
1078 | { | |
1079 | .cra_name = "ofb(aes)", | |
1080 | .cra_driver_name = "atmel-ofb-aes", | |
88efd9a9 | 1081 | .cra_priority = ATMEL_AES_PRIORITY, |
bd3c7b5c NR |
1082 | .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC, |
1083 | .cra_blocksize = AES_BLOCK_SIZE, | |
1084 | .cra_ctxsize = sizeof(struct atmel_aes_ctx), | |
cadc4ab8 | 1085 | .cra_alignmask = 0xf, |
bd3c7b5c NR |
1086 | .cra_type = &crypto_ablkcipher_type, |
1087 | .cra_module = THIS_MODULE, | |
1088 | .cra_init = atmel_aes_cra_init, | |
1089 | .cra_exit = atmel_aes_cra_exit, | |
1090 | .cra_u.ablkcipher = { | |
1091 | .min_keysize = AES_MIN_KEY_SIZE, | |
1092 | .max_keysize = AES_MAX_KEY_SIZE, | |
1093 | .ivsize = AES_BLOCK_SIZE, | |
1094 | .setkey = atmel_aes_setkey, | |
1095 | .encrypt = atmel_aes_ofb_encrypt, | |
1096 | .decrypt = atmel_aes_ofb_decrypt, | |
1097 | } | |
1098 | }, | |
1099 | { | |
1100 | .cra_name = "cfb(aes)", | |
1101 | .cra_driver_name = "atmel-cfb-aes", | |
88efd9a9 | 1102 | .cra_priority = ATMEL_AES_PRIORITY, |
bd3c7b5c NR |
1103 | .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC, |
1104 | .cra_blocksize = AES_BLOCK_SIZE, | |
1105 | .cra_ctxsize = sizeof(struct atmel_aes_ctx), | |
cadc4ab8 | 1106 | .cra_alignmask = 0xf, |
bd3c7b5c NR |
1107 | .cra_type = &crypto_ablkcipher_type, |
1108 | .cra_module = THIS_MODULE, | |
1109 | .cra_init = atmel_aes_cra_init, | |
1110 | .cra_exit = atmel_aes_cra_exit, | |
1111 | .cra_u.ablkcipher = { | |
1112 | .min_keysize = AES_MIN_KEY_SIZE, | |
1113 | .max_keysize = AES_MAX_KEY_SIZE, | |
1114 | .ivsize = AES_BLOCK_SIZE, | |
1115 | .setkey = atmel_aes_setkey, | |
1116 | .encrypt = atmel_aes_cfb_encrypt, | |
1117 | .decrypt = atmel_aes_cfb_decrypt, | |
1118 | } | |
1119 | }, | |
1120 | { | |
1121 | .cra_name = "cfb32(aes)", | |
1122 | .cra_driver_name = "atmel-cfb32-aes", | |
88efd9a9 | 1123 | .cra_priority = ATMEL_AES_PRIORITY, |
bd3c7b5c NR |
1124 | .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC, |
1125 | .cra_blocksize = CFB32_BLOCK_SIZE, | |
1126 | .cra_ctxsize = sizeof(struct atmel_aes_ctx), | |
cadc4ab8 | 1127 | .cra_alignmask = 0x3, |
bd3c7b5c NR |
1128 | .cra_type = &crypto_ablkcipher_type, |
1129 | .cra_module = THIS_MODULE, | |
1130 | .cra_init = atmel_aes_cra_init, | |
1131 | .cra_exit = atmel_aes_cra_exit, | |
1132 | .cra_u.ablkcipher = { | |
1133 | .min_keysize = AES_MIN_KEY_SIZE, | |
1134 | .max_keysize = AES_MAX_KEY_SIZE, | |
1135 | .ivsize = AES_BLOCK_SIZE, | |
1136 | .setkey = atmel_aes_setkey, | |
1137 | .encrypt = atmel_aes_cfb32_encrypt, | |
1138 | .decrypt = atmel_aes_cfb32_decrypt, | |
1139 | } | |
1140 | }, | |
1141 | { | |
1142 | .cra_name = "cfb16(aes)", | |
1143 | .cra_driver_name = "atmel-cfb16-aes", | |
88efd9a9 | 1144 | .cra_priority = ATMEL_AES_PRIORITY, |
bd3c7b5c NR |
1145 | .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC, |
1146 | .cra_blocksize = CFB16_BLOCK_SIZE, | |
1147 | .cra_ctxsize = sizeof(struct atmel_aes_ctx), | |
cadc4ab8 | 1148 | .cra_alignmask = 0x1, |
bd3c7b5c NR |
1149 | .cra_type = &crypto_ablkcipher_type, |
1150 | .cra_module = THIS_MODULE, | |
1151 | .cra_init = atmel_aes_cra_init, | |
1152 | .cra_exit = atmel_aes_cra_exit, | |
1153 | .cra_u.ablkcipher = { | |
1154 | .min_keysize = AES_MIN_KEY_SIZE, | |
1155 | .max_keysize = AES_MAX_KEY_SIZE, | |
1156 | .ivsize = AES_BLOCK_SIZE, | |
1157 | .setkey = atmel_aes_setkey, | |
1158 | .encrypt = atmel_aes_cfb16_encrypt, | |
1159 | .decrypt = atmel_aes_cfb16_decrypt, | |
1160 | } | |
1161 | }, | |
1162 | { | |
1163 | .cra_name = "cfb8(aes)", | |
1164 | .cra_driver_name = "atmel-cfb8-aes", | |
88efd9a9 | 1165 | .cra_priority = ATMEL_AES_PRIORITY, |
bd3c7b5c | 1166 | .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC, |
e5d8c961 | 1167 | .cra_blocksize = CFB8_BLOCK_SIZE, |
bd3c7b5c NR |
1168 | .cra_ctxsize = sizeof(struct atmel_aes_ctx), |
1169 | .cra_alignmask = 0x0, | |
1170 | .cra_type = &crypto_ablkcipher_type, | |
1171 | .cra_module = THIS_MODULE, | |
1172 | .cra_init = atmel_aes_cra_init, | |
1173 | .cra_exit = atmel_aes_cra_exit, | |
1174 | .cra_u.ablkcipher = { | |
1175 | .min_keysize = AES_MIN_KEY_SIZE, | |
1176 | .max_keysize = AES_MAX_KEY_SIZE, | |
1177 | .ivsize = AES_BLOCK_SIZE, | |
1178 | .setkey = atmel_aes_setkey, | |
1179 | .encrypt = atmel_aes_cfb8_encrypt, | |
1180 | .decrypt = atmel_aes_cfb8_decrypt, | |
1181 | } | |
1182 | }, | |
1183 | { | |
1184 | .cra_name = "ctr(aes)", | |
1185 | .cra_driver_name = "atmel-ctr-aes", | |
88efd9a9 | 1186 | .cra_priority = ATMEL_AES_PRIORITY, |
bd3c7b5c | 1187 | .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC, |
da7b850e | 1188 | .cra_blocksize = 1, |
fcac8365 | 1189 | .cra_ctxsize = sizeof(struct atmel_aes_ctr_ctx), |
cadc4ab8 | 1190 | .cra_alignmask = 0xf, |
bd3c7b5c NR |
1191 | .cra_type = &crypto_ablkcipher_type, |
1192 | .cra_module = THIS_MODULE, | |
fcac8365 | 1193 | .cra_init = atmel_aes_ctr_cra_init, |
bd3c7b5c NR |
1194 | .cra_exit = atmel_aes_cra_exit, |
1195 | .cra_u.ablkcipher = { | |
1196 | .min_keysize = AES_MIN_KEY_SIZE, | |
1197 | .max_keysize = AES_MAX_KEY_SIZE, | |
1198 | .ivsize = AES_BLOCK_SIZE, | |
1199 | .setkey = atmel_aes_setkey, | |
1200 | .encrypt = atmel_aes_ctr_encrypt, | |
1201 | .decrypt = atmel_aes_ctr_decrypt, | |
1202 | } | |
1203 | }, | |
1204 | }; | |
1205 | ||
cadc4ab8 | 1206 | static struct crypto_alg aes_cfb64_alg = { |
bd3c7b5c NR |
1207 | .cra_name = "cfb64(aes)", |
1208 | .cra_driver_name = "atmel-cfb64-aes", | |
88efd9a9 | 1209 | .cra_priority = ATMEL_AES_PRIORITY, |
bd3c7b5c NR |
1210 | .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC, |
1211 | .cra_blocksize = CFB64_BLOCK_SIZE, | |
1212 | .cra_ctxsize = sizeof(struct atmel_aes_ctx), | |
cadc4ab8 | 1213 | .cra_alignmask = 0x7, |
bd3c7b5c NR |
1214 | .cra_type = &crypto_ablkcipher_type, |
1215 | .cra_module = THIS_MODULE, | |
1216 | .cra_init = atmel_aes_cra_init, | |
1217 | .cra_exit = atmel_aes_cra_exit, | |
1218 | .cra_u.ablkcipher = { | |
1219 | .min_keysize = AES_MIN_KEY_SIZE, | |
1220 | .max_keysize = AES_MAX_KEY_SIZE, | |
1221 | .ivsize = AES_BLOCK_SIZE, | |
1222 | .setkey = atmel_aes_setkey, | |
1223 | .encrypt = atmel_aes_cfb64_encrypt, | |
1224 | .decrypt = atmel_aes_cfb64_decrypt, | |
1225 | } | |
bd3c7b5c NR |
1226 | }; |
1227 | ||
e37a7e55 CP |
1228 | |
1229 | /* Probe functions */ | |
1230 | ||
1231 | static int atmel_aes_buff_init(struct atmel_aes_dev *dd) | |
1232 | { | |
1233 | dd->buf = (void *)__get_free_pages(GFP_KERNEL, ATMEL_AES_BUFFER_ORDER); | |
1234 | dd->buflen = ATMEL_AES_BUFFER_SIZE; | |
1235 | dd->buflen &= ~(AES_BLOCK_SIZE - 1); | |
1236 | ||
1237 | if (!dd->buf) { | |
1238 | dev_err(dd->dev, "unable to alloc pages.\n"); | |
1239 | return -ENOMEM; | |
1240 | } | |
1241 | ||
1242 | return 0; | |
1243 | } | |
1244 | ||
1245 | static void atmel_aes_buff_cleanup(struct atmel_aes_dev *dd) | |
1246 | { | |
1247 | free_page((unsigned long)dd->buf); | |
1248 | } | |
1249 | ||
1250 | static bool atmel_aes_filter(struct dma_chan *chan, void *slave) | |
1251 | { | |
1252 | struct at_dma_slave *sl = slave; | |
1253 | ||
1254 | if (sl && sl->dma_dev == chan->device->dev) { | |
1255 | chan->private = sl; | |
1256 | return true; | |
1257 | } else { | |
1258 | return false; | |
1259 | } | |
1260 | } | |
1261 | ||
1262 | static int atmel_aes_dma_init(struct atmel_aes_dev *dd, | |
1263 | struct crypto_platform_data *pdata) | |
1264 | { | |
1265 | struct at_dma_slave *slave; | |
1266 | int err = -ENOMEM; | |
1267 | dma_cap_mask_t mask; | |
1268 | ||
1269 | dma_cap_zero(mask); | |
1270 | dma_cap_set(DMA_SLAVE, mask); | |
1271 | ||
1272 | /* Try to grab 2 DMA channels */ | |
1273 | slave = &pdata->dma_slave->rxdata; | |
1274 | dd->src.chan = dma_request_slave_channel_compat(mask, atmel_aes_filter, | |
1275 | slave, dd->dev, "tx"); | |
1276 | if (!dd->src.chan) | |
1277 | goto err_dma_in; | |
1278 | ||
1279 | slave = &pdata->dma_slave->txdata; | |
1280 | dd->dst.chan = dma_request_slave_channel_compat(mask, atmel_aes_filter, | |
1281 | slave, dd->dev, "rx"); | |
1282 | if (!dd->dst.chan) | |
1283 | goto err_dma_out; | |
1284 | ||
1285 | return 0; | |
1286 | ||
1287 | err_dma_out: | |
1288 | dma_release_channel(dd->src.chan); | |
1289 | err_dma_in: | |
1290 | dev_warn(dd->dev, "no DMA channel available\n"); | |
1291 | return err; | |
1292 | } | |
1293 | ||
1294 | static void atmel_aes_dma_cleanup(struct atmel_aes_dev *dd) | |
1295 | { | |
1296 | dma_release_channel(dd->dst.chan); | |
1297 | dma_release_channel(dd->src.chan); | |
1298 | } | |
1299 | ||
bd3c7b5c NR |
1300 | static void atmel_aes_queue_task(unsigned long data) |
1301 | { | |
1302 | struct atmel_aes_dev *dd = (struct atmel_aes_dev *)data; | |
1303 | ||
1304 | atmel_aes_handle_queue(dd, NULL); | |
1305 | } | |
1306 | ||
1307 | static void atmel_aes_done_task(unsigned long data) | |
1308 | { | |
afbac17e | 1309 | struct atmel_aes_dev *dd = (struct atmel_aes_dev *)data; |
bd3c7b5c | 1310 | |
10f12c1b CP |
1311 | dd->is_async = true; |
1312 | (void)dd->resume(dd); | |
1313 | } | |
bd3c7b5c | 1314 | |
bd3c7b5c NR |
1315 | static irqreturn_t atmel_aes_irq(int irq, void *dev_id) |
1316 | { | |
1317 | struct atmel_aes_dev *aes_dd = dev_id; | |
1318 | u32 reg; | |
1319 | ||
1320 | reg = atmel_aes_read(aes_dd, AES_ISR); | |
1321 | if (reg & atmel_aes_read(aes_dd, AES_IMR)) { | |
1322 | atmel_aes_write(aes_dd, AES_IDR, reg); | |
1323 | if (AES_FLAGS_BUSY & aes_dd->flags) | |
1324 | tasklet_schedule(&aes_dd->done_task); | |
1325 | else | |
1326 | dev_warn(aes_dd->dev, "AES interrupt when no active requests.\n"); | |
1327 | return IRQ_HANDLED; | |
1328 | } | |
1329 | ||
1330 | return IRQ_NONE; | |
1331 | } | |
1332 | ||
1333 | static void atmel_aes_unregister_algs(struct atmel_aes_dev *dd) | |
1334 | { | |
1335 | int i; | |
1336 | ||
cadc4ab8 NR |
1337 | if (dd->caps.has_cfb64) |
1338 | crypto_unregister_alg(&aes_cfb64_alg); | |
924a8bc7 CP |
1339 | |
1340 | for (i = 0; i < ARRAY_SIZE(aes_algs); i++) | |
1341 | crypto_unregister_alg(&aes_algs[i]); | |
bd3c7b5c NR |
1342 | } |
1343 | ||
1344 | static int atmel_aes_register_algs(struct atmel_aes_dev *dd) | |
1345 | { | |
1346 | int err, i, j; | |
1347 | ||
1348 | for (i = 0; i < ARRAY_SIZE(aes_algs); i++) { | |
bd3c7b5c NR |
1349 | err = crypto_register_alg(&aes_algs[i]); |
1350 | if (err) | |
1351 | goto err_aes_algs; | |
1352 | } | |
1353 | ||
cadc4ab8 NR |
1354 | if (dd->caps.has_cfb64) { |
1355 | err = crypto_register_alg(&aes_cfb64_alg); | |
bd3c7b5c NR |
1356 | if (err) |
1357 | goto err_aes_cfb64_alg; | |
1358 | } | |
1359 | ||
1360 | return 0; | |
1361 | ||
1362 | err_aes_cfb64_alg: | |
1363 | i = ARRAY_SIZE(aes_algs); | |
1364 | err_aes_algs: | |
1365 | for (j = 0; j < i; j++) | |
1366 | crypto_unregister_alg(&aes_algs[j]); | |
1367 | ||
1368 | return err; | |
1369 | } | |
1370 | ||
cadc4ab8 NR |
1371 | static void atmel_aes_get_cap(struct atmel_aes_dev *dd) |
1372 | { | |
1373 | dd->caps.has_dualbuff = 0; | |
1374 | dd->caps.has_cfb64 = 0; | |
fcac8365 | 1375 | dd->caps.has_ctr32 = 0; |
cadc4ab8 NR |
1376 | dd->caps.max_burst_size = 1; |
1377 | ||
1378 | /* keep only major version number */ | |
1379 | switch (dd->hw_version & 0xff0) { | |
973e209d LZ |
1380 | case 0x500: |
1381 | dd->caps.has_dualbuff = 1; | |
1382 | dd->caps.has_cfb64 = 1; | |
fcac8365 | 1383 | dd->caps.has_ctr32 = 1; |
973e209d LZ |
1384 | dd->caps.max_burst_size = 4; |
1385 | break; | |
cf1f0d12 LZ |
1386 | case 0x200: |
1387 | dd->caps.has_dualbuff = 1; | |
1388 | dd->caps.has_cfb64 = 1; | |
fcac8365 | 1389 | dd->caps.has_ctr32 = 1; |
cf1f0d12 LZ |
1390 | dd->caps.max_burst_size = 4; |
1391 | break; | |
cadc4ab8 NR |
1392 | case 0x130: |
1393 | dd->caps.has_dualbuff = 1; | |
1394 | dd->caps.has_cfb64 = 1; | |
1395 | dd->caps.max_burst_size = 4; | |
1396 | break; | |
1397 | case 0x120: | |
1398 | break; | |
1399 | default: | |
1400 | dev_warn(dd->dev, | |
1401 | "Unmanaged aes version, set minimum capabilities\n"); | |
1402 | break; | |
1403 | } | |
1404 | } | |
1405 | ||
be943c7d NF |
1406 | #if defined(CONFIG_OF) |
1407 | static const struct of_device_id atmel_aes_dt_ids[] = { | |
1408 | { .compatible = "atmel,at91sam9g46-aes" }, | |
1409 | { /* sentinel */ } | |
1410 | }; | |
1411 | MODULE_DEVICE_TABLE(of, atmel_aes_dt_ids); | |
1412 | ||
1413 | static struct crypto_platform_data *atmel_aes_of_init(struct platform_device *pdev) | |
1414 | { | |
1415 | struct device_node *np = pdev->dev.of_node; | |
1416 | struct crypto_platform_data *pdata; | |
1417 | ||
1418 | if (!np) { | |
1419 | dev_err(&pdev->dev, "device node not found\n"); | |
1420 | return ERR_PTR(-EINVAL); | |
1421 | } | |
1422 | ||
1423 | pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL); | |
1424 | if (!pdata) { | |
1425 | dev_err(&pdev->dev, "could not allocate memory for pdata\n"); | |
1426 | return ERR_PTR(-ENOMEM); | |
1427 | } | |
1428 | ||
1429 | pdata->dma_slave = devm_kzalloc(&pdev->dev, | |
1430 | sizeof(*(pdata->dma_slave)), | |
1431 | GFP_KERNEL); | |
1432 | if (!pdata->dma_slave) { | |
1433 | dev_err(&pdev->dev, "could not allocate memory for dma_slave\n"); | |
1434 | devm_kfree(&pdev->dev, pdata); | |
1435 | return ERR_PTR(-ENOMEM); | |
1436 | } | |
1437 | ||
1438 | return pdata; | |
1439 | } | |
1440 | #else | |
1441 | static inline struct crypto_platform_data *atmel_aes_of_init(struct platform_device *pdev) | |
1442 | { | |
1443 | return ERR_PTR(-EINVAL); | |
1444 | } | |
1445 | #endif | |
1446 | ||
49cfe4db | 1447 | static int atmel_aes_probe(struct platform_device *pdev) |
bd3c7b5c NR |
1448 | { |
1449 | struct atmel_aes_dev *aes_dd; | |
cadc4ab8 | 1450 | struct crypto_platform_data *pdata; |
bd3c7b5c NR |
1451 | struct device *dev = &pdev->dev; |
1452 | struct resource *aes_res; | |
bd3c7b5c NR |
1453 | int err; |
1454 | ||
1455 | pdata = pdev->dev.platform_data; | |
1456 | if (!pdata) { | |
be943c7d NF |
1457 | pdata = atmel_aes_of_init(pdev); |
1458 | if (IS_ERR(pdata)) { | |
1459 | err = PTR_ERR(pdata); | |
1460 | goto aes_dd_err; | |
1461 | } | |
1462 | } | |
1463 | ||
1464 | if (!pdata->dma_slave) { | |
bd3c7b5c NR |
1465 | err = -ENXIO; |
1466 | goto aes_dd_err; | |
1467 | } | |
1468 | ||
b0e8b341 | 1469 | aes_dd = devm_kzalloc(&pdev->dev, sizeof(*aes_dd), GFP_KERNEL); |
bd3c7b5c NR |
1470 | if (aes_dd == NULL) { |
1471 | dev_err(dev, "unable to alloc data struct.\n"); | |
1472 | err = -ENOMEM; | |
1473 | goto aes_dd_err; | |
1474 | } | |
1475 | ||
1476 | aes_dd->dev = dev; | |
1477 | ||
1478 | platform_set_drvdata(pdev, aes_dd); | |
1479 | ||
1480 | INIT_LIST_HEAD(&aes_dd->list); | |
8a10eb8d | 1481 | spin_lock_init(&aes_dd->lock); |
bd3c7b5c NR |
1482 | |
1483 | tasklet_init(&aes_dd->done_task, atmel_aes_done_task, | |
1484 | (unsigned long)aes_dd); | |
1485 | tasklet_init(&aes_dd->queue_task, atmel_aes_queue_task, | |
1486 | (unsigned long)aes_dd); | |
1487 | ||
1488 | crypto_init_queue(&aes_dd->queue, ATMEL_AES_QUEUE_LENGTH); | |
1489 | ||
1490 | aes_dd->irq = -1; | |
1491 | ||
1492 | /* Get the base address */ | |
1493 | aes_res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | |
1494 | if (!aes_res) { | |
1495 | dev_err(dev, "no MEM resource info\n"); | |
1496 | err = -ENODEV; | |
1497 | goto res_err; | |
1498 | } | |
1499 | aes_dd->phys_base = aes_res->start; | |
bd3c7b5c NR |
1500 | |
1501 | /* Get the IRQ */ | |
1502 | aes_dd->irq = platform_get_irq(pdev, 0); | |
1503 | if (aes_dd->irq < 0) { | |
1504 | dev_err(dev, "no IRQ resource info\n"); | |
1505 | err = aes_dd->irq; | |
b0e8b341 | 1506 | goto res_err; |
bd3c7b5c NR |
1507 | } |
1508 | ||
b0e8b341 LC |
1509 | err = devm_request_irq(&pdev->dev, aes_dd->irq, atmel_aes_irq, |
1510 | IRQF_SHARED, "atmel-aes", aes_dd); | |
bd3c7b5c NR |
1511 | if (err) { |
1512 | dev_err(dev, "unable to request aes irq.\n"); | |
b0e8b341 | 1513 | goto res_err; |
bd3c7b5c NR |
1514 | } |
1515 | ||
1516 | /* Initializing the clock */ | |
b0e8b341 | 1517 | aes_dd->iclk = devm_clk_get(&pdev->dev, "aes_clk"); |
bd3c7b5c | 1518 | if (IS_ERR(aes_dd->iclk)) { |
be208356 | 1519 | dev_err(dev, "clock initialization failed.\n"); |
bd3c7b5c | 1520 | err = PTR_ERR(aes_dd->iclk); |
b0e8b341 | 1521 | goto res_err; |
bd3c7b5c NR |
1522 | } |
1523 | ||
b0e8b341 | 1524 | aes_dd->io_base = devm_ioremap_resource(&pdev->dev, aes_res); |
bd3c7b5c NR |
1525 | if (!aes_dd->io_base) { |
1526 | dev_err(dev, "can't ioremap\n"); | |
1527 | err = -ENOMEM; | |
b0e8b341 | 1528 | goto res_err; |
bd3c7b5c NR |
1529 | } |
1530 | ||
aab0a39b CP |
1531 | err = atmel_aes_hw_version_init(aes_dd); |
1532 | if (err) | |
1533 | goto res_err; | |
cadc4ab8 NR |
1534 | |
1535 | atmel_aes_get_cap(aes_dd); | |
1536 | ||
1537 | err = atmel_aes_buff_init(aes_dd); | |
1538 | if (err) | |
1539 | goto err_aes_buff; | |
1540 | ||
1541 | err = atmel_aes_dma_init(aes_dd, pdata); | |
bd3c7b5c NR |
1542 | if (err) |
1543 | goto err_aes_dma; | |
1544 | ||
1545 | spin_lock(&atmel_aes.lock); | |
1546 | list_add_tail(&aes_dd->list, &atmel_aes.dev_list); | |
1547 | spin_unlock(&atmel_aes.lock); | |
1548 | ||
1549 | err = atmel_aes_register_algs(aes_dd); | |
1550 | if (err) | |
1551 | goto err_algs; | |
1552 | ||
be943c7d | 1553 | dev_info(dev, "Atmel AES - Using %s, %s for DMA transfers\n", |
bbe628ed CP |
1554 | dma_chan_name(aes_dd->src.chan), |
1555 | dma_chan_name(aes_dd->dst.chan)); | |
bd3c7b5c NR |
1556 | |
1557 | return 0; | |
1558 | ||
1559 | err_algs: | |
1560 | spin_lock(&atmel_aes.lock); | |
1561 | list_del(&aes_dd->list); | |
1562 | spin_unlock(&atmel_aes.lock); | |
1563 | atmel_aes_dma_cleanup(aes_dd); | |
1564 | err_aes_dma: | |
cadc4ab8 NR |
1565 | atmel_aes_buff_cleanup(aes_dd); |
1566 | err_aes_buff: | |
bd3c7b5c NR |
1567 | res_err: |
1568 | tasklet_kill(&aes_dd->done_task); | |
1569 | tasklet_kill(&aes_dd->queue_task); | |
bd3c7b5c NR |
1570 | aes_dd_err: |
1571 | dev_err(dev, "initialization failed.\n"); | |
1572 | ||
1573 | return err; | |
1574 | } | |
1575 | ||
49cfe4db | 1576 | static int atmel_aes_remove(struct platform_device *pdev) |
bd3c7b5c NR |
1577 | { |
1578 | static struct atmel_aes_dev *aes_dd; | |
1579 | ||
1580 | aes_dd = platform_get_drvdata(pdev); | |
1581 | if (!aes_dd) | |
1582 | return -ENODEV; | |
1583 | spin_lock(&atmel_aes.lock); | |
1584 | list_del(&aes_dd->list); | |
1585 | spin_unlock(&atmel_aes.lock); | |
1586 | ||
1587 | atmel_aes_unregister_algs(aes_dd); | |
1588 | ||
1589 | tasklet_kill(&aes_dd->done_task); | |
1590 | tasklet_kill(&aes_dd->queue_task); | |
1591 | ||
1592 | atmel_aes_dma_cleanup(aes_dd); | |
2a377828 | 1593 | atmel_aes_buff_cleanup(aes_dd); |
bd3c7b5c | 1594 | |
bd3c7b5c NR |
1595 | return 0; |
1596 | } | |
1597 | ||
1598 | static struct platform_driver atmel_aes_driver = { | |
1599 | .probe = atmel_aes_probe, | |
49cfe4db | 1600 | .remove = atmel_aes_remove, |
bd3c7b5c NR |
1601 | .driver = { |
1602 | .name = "atmel_aes", | |
be943c7d | 1603 | .of_match_table = of_match_ptr(atmel_aes_dt_ids), |
bd3c7b5c NR |
1604 | }, |
1605 | }; | |
1606 | ||
1607 | module_platform_driver(atmel_aes_driver); | |
1608 | ||
1609 | MODULE_DESCRIPTION("Atmel AES hw acceleration support."); | |
1610 | MODULE_LICENSE("GPL v2"); | |
1611 | MODULE_AUTHOR("Nicolas Royer - Eukréa Electromatique"); |