Commit | Line | Data |
---|---|---|
ebc82efa NR |
1 | /* |
2 | * Cryptographic API. | |
3 | * | |
4 | * Support for ATMEL SHA1/SHA256 HW acceleration. | |
5 | * | |
6 | * Copyright (c) 2012 Eukréa Electromatique - ATMEL | |
7 | * Author: Nicolas Royer <nicolas@eukrea.com> | |
8 | * | |
9 | * This program is free software; you can redistribute it and/or modify | |
10 | * it under the terms of the GNU General Public License version 2 as published | |
11 | * by the Free Software Foundation. | |
12 | * | |
13 | * Some ideas are from omap-sham.c drivers. | |
14 | */ | |
15 | ||
16 | ||
17 | #include <linux/kernel.h> | |
18 | #include <linux/module.h> | |
19 | #include <linux/slab.h> | |
20 | #include <linux/err.h> | |
21 | #include <linux/clk.h> | |
22 | #include <linux/io.h> | |
23 | #include <linux/hw_random.h> | |
24 | #include <linux/platform_device.h> | |
25 | ||
26 | #include <linux/device.h> | |
ebc82efa NR |
27 | #include <linux/init.h> |
28 | #include <linux/errno.h> | |
29 | #include <linux/interrupt.h> | |
ebc82efa | 30 | #include <linux/irq.h> |
ebc82efa NR |
31 | #include <linux/scatterlist.h> |
32 | #include <linux/dma-mapping.h> | |
abfe7ae4 | 33 | #include <linux/of_device.h> |
ebc82efa NR |
34 | #include <linux/delay.h> |
35 | #include <linux/crypto.h> | |
36 | #include <linux/cryptohash.h> | |
37 | #include <crypto/scatterwalk.h> | |
38 | #include <crypto/algapi.h> | |
39 | #include <crypto/sha.h> | |
40 | #include <crypto/hash.h> | |
41 | #include <crypto/internal/hash.h> | |
d4905b38 | 42 | #include <linux/platform_data/crypto-atmel.h> |
ebc82efa NR |
43 | #include "atmel-sha-regs.h" |
44 | ||
45 | /* SHA flags */ | |
46 | #define SHA_FLAGS_BUSY BIT(0) | |
47 | #define SHA_FLAGS_FINAL BIT(1) | |
48 | #define SHA_FLAGS_DMA_ACTIVE BIT(2) | |
49 | #define SHA_FLAGS_OUTPUT_READY BIT(3) | |
50 | #define SHA_FLAGS_INIT BIT(4) | |
51 | #define SHA_FLAGS_CPU BIT(5) | |
52 | #define SHA_FLAGS_DMA_READY BIT(6) | |
53 | ||
54 | #define SHA_FLAGS_FINUP BIT(16) | |
55 | #define SHA_FLAGS_SG BIT(17) | |
56 | #define SHA_FLAGS_SHA1 BIT(18) | |
d4905b38 NR |
57 | #define SHA_FLAGS_SHA224 BIT(19) |
58 | #define SHA_FLAGS_SHA256 BIT(20) | |
59 | #define SHA_FLAGS_SHA384 BIT(21) | |
60 | #define SHA_FLAGS_SHA512 BIT(22) | |
61 | #define SHA_FLAGS_ERROR BIT(23) | |
62 | #define SHA_FLAGS_PAD BIT(24) | |
ebc82efa NR |
63 | |
64 | #define SHA_OP_UPDATE 1 | |
65 | #define SHA_OP_FINAL 2 | |
66 | ||
67 | #define SHA_BUFFER_LEN PAGE_SIZE | |
68 | ||
69 | #define ATMEL_SHA_DMA_THRESHOLD 56 | |
70 | ||
d4905b38 NR |
71 | struct atmel_sha_caps { |
72 | bool has_dma; | |
73 | bool has_dualbuff; | |
74 | bool has_sha224; | |
75 | bool has_sha_384_512; | |
76 | }; | |
ebc82efa NR |
77 | |
78 | struct atmel_sha_dev; | |
79 | ||
80 | struct atmel_sha_reqctx { | |
81 | struct atmel_sha_dev *dd; | |
82 | unsigned long flags; | |
83 | unsigned long op; | |
84 | ||
d4905b38 NR |
85 | u8 digest[SHA512_DIGEST_SIZE] __aligned(sizeof(u32)); |
86 | u64 digcnt[2]; | |
ebc82efa NR |
87 | size_t bufcnt; |
88 | size_t buflen; | |
89 | dma_addr_t dma_addr; | |
90 | ||
91 | /* walk state */ | |
92 | struct scatterlist *sg; | |
93 | unsigned int offset; /* offset in current sg */ | |
94 | unsigned int total; /* total request */ | |
95 | ||
d4905b38 NR |
96 | size_t block_size; |
97 | ||
ebc82efa NR |
98 | u8 buffer[0] __aligned(sizeof(u32)); |
99 | }; | |
100 | ||
101 | struct atmel_sha_ctx { | |
102 | struct atmel_sha_dev *dd; | |
103 | ||
104 | unsigned long flags; | |
ebc82efa NR |
105 | }; |
106 | ||
d4905b38 NR |
107 | #define ATMEL_SHA_QUEUE_LENGTH 50 |
108 | ||
109 | struct atmel_sha_dma { | |
110 | struct dma_chan *chan; | |
111 | struct dma_slave_config dma_conf; | |
112 | }; | |
ebc82efa NR |
113 | |
114 | struct atmel_sha_dev { | |
115 | struct list_head list; | |
116 | unsigned long phys_base; | |
117 | struct device *dev; | |
118 | struct clk *iclk; | |
119 | int irq; | |
120 | void __iomem *io_base; | |
121 | ||
122 | spinlock_t lock; | |
123 | int err; | |
124 | struct tasklet_struct done_task; | |
125 | ||
126 | unsigned long flags; | |
127 | struct crypto_queue queue; | |
128 | struct ahash_request *req; | |
d4905b38 NR |
129 | |
130 | struct atmel_sha_dma dma_lch_in; | |
131 | ||
132 | struct atmel_sha_caps caps; | |
133 | ||
134 | u32 hw_version; | |
ebc82efa NR |
135 | }; |
136 | ||
137 | struct atmel_sha_drv { | |
138 | struct list_head dev_list; | |
139 | spinlock_t lock; | |
140 | }; | |
141 | ||
142 | static struct atmel_sha_drv atmel_sha = { | |
143 | .dev_list = LIST_HEAD_INIT(atmel_sha.dev_list), | |
144 | .lock = __SPIN_LOCK_UNLOCKED(atmel_sha.lock), | |
145 | }; | |
146 | ||
147 | static inline u32 atmel_sha_read(struct atmel_sha_dev *dd, u32 offset) | |
148 | { | |
149 | return readl_relaxed(dd->io_base + offset); | |
150 | } | |
151 | ||
152 | static inline void atmel_sha_write(struct atmel_sha_dev *dd, | |
153 | u32 offset, u32 value) | |
154 | { | |
155 | writel_relaxed(value, dd->io_base + offset); | |
156 | } | |
157 | ||
ebc82efa NR |
158 | static size_t atmel_sha_append_sg(struct atmel_sha_reqctx *ctx) |
159 | { | |
160 | size_t count; | |
161 | ||
162 | while ((ctx->bufcnt < ctx->buflen) && ctx->total) { | |
163 | count = min(ctx->sg->length - ctx->offset, ctx->total); | |
164 | count = min(count, ctx->buflen - ctx->bufcnt); | |
165 | ||
166 | if (count <= 0) | |
167 | break; | |
168 | ||
169 | scatterwalk_map_and_copy(ctx->buffer + ctx->bufcnt, ctx->sg, | |
170 | ctx->offset, count, 0); | |
171 | ||
172 | ctx->bufcnt += count; | |
173 | ctx->offset += count; | |
174 | ctx->total -= count; | |
175 | ||
176 | if (ctx->offset == ctx->sg->length) { | |
177 | ctx->sg = sg_next(ctx->sg); | |
178 | if (ctx->sg) | |
179 | ctx->offset = 0; | |
180 | else | |
181 | ctx->total = 0; | |
182 | } | |
183 | } | |
184 | ||
185 | return 0; | |
186 | } | |
187 | ||
188 | /* | |
d4905b38 NR |
189 | * The purpose of this padding is to ensure that the padded message is a |
190 | * multiple of 512 bits (SHA1/SHA224/SHA256) or 1024 bits (SHA384/SHA512). | |
191 | * The bit "1" is appended at the end of the message followed by | |
192 | * "padlen-1" zero bits. Then a 64 bits block (SHA1/SHA224/SHA256) or | |
193 | * 128 bits block (SHA384/SHA512) equals to the message length in bits | |
194 | * is appended. | |
ebc82efa | 195 | * |
d4905b38 | 196 | * For SHA1/SHA224/SHA256, padlen is calculated as followed: |
ebc82efa NR |
197 | * - if message length < 56 bytes then padlen = 56 - message length |
198 | * - else padlen = 64 + 56 - message length | |
d4905b38 NR |
199 | * |
200 | * For SHA384/SHA512, padlen is calculated as followed: | |
201 | * - if message length < 112 bytes then padlen = 112 - message length | |
202 | * - else padlen = 128 + 112 - message length | |
ebc82efa NR |
203 | */ |
204 | static void atmel_sha_fill_padding(struct atmel_sha_reqctx *ctx, int length) | |
205 | { | |
206 | unsigned int index, padlen; | |
d4905b38 NR |
207 | u64 bits[2]; |
208 | u64 size[2]; | |
209 | ||
210 | size[0] = ctx->digcnt[0]; | |
211 | size[1] = ctx->digcnt[1]; | |
212 | ||
213 | size[0] += ctx->bufcnt; | |
214 | if (size[0] < ctx->bufcnt) | |
215 | size[1]++; | |
216 | ||
217 | size[0] += length; | |
218 | if (size[0] < length) | |
219 | size[1]++; | |
220 | ||
221 | bits[1] = cpu_to_be64(size[0] << 3); | |
222 | bits[0] = cpu_to_be64(size[1] << 3 | size[0] >> 61); | |
223 | ||
224 | if (ctx->flags & (SHA_FLAGS_SHA384 | SHA_FLAGS_SHA512)) { | |
225 | index = ctx->bufcnt & 0x7f; | |
226 | padlen = (index < 112) ? (112 - index) : ((128+112) - index); | |
227 | *(ctx->buffer + ctx->bufcnt) = 0x80; | |
228 | memset(ctx->buffer + ctx->bufcnt + 1, 0, padlen-1); | |
229 | memcpy(ctx->buffer + ctx->bufcnt + padlen, bits, 16); | |
230 | ctx->bufcnt += padlen + 16; | |
231 | ctx->flags |= SHA_FLAGS_PAD; | |
232 | } else { | |
233 | index = ctx->bufcnt & 0x3f; | |
234 | padlen = (index < 56) ? (56 - index) : ((64+56) - index); | |
235 | *(ctx->buffer + ctx->bufcnt) = 0x80; | |
236 | memset(ctx->buffer + ctx->bufcnt + 1, 0, padlen-1); | |
237 | memcpy(ctx->buffer + ctx->bufcnt + padlen, &bits[1], 8); | |
238 | ctx->bufcnt += padlen + 8; | |
239 | ctx->flags |= SHA_FLAGS_PAD; | |
240 | } | |
ebc82efa NR |
241 | } |
242 | ||
243 | static int atmel_sha_init(struct ahash_request *req) | |
244 | { | |
245 | struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); | |
246 | struct atmel_sha_ctx *tctx = crypto_ahash_ctx(tfm); | |
247 | struct atmel_sha_reqctx *ctx = ahash_request_ctx(req); | |
248 | struct atmel_sha_dev *dd = NULL; | |
249 | struct atmel_sha_dev *tmp; | |
250 | ||
251 | spin_lock_bh(&atmel_sha.lock); | |
252 | if (!tctx->dd) { | |
253 | list_for_each_entry(tmp, &atmel_sha.dev_list, list) { | |
254 | dd = tmp; | |
255 | break; | |
256 | } | |
257 | tctx->dd = dd; | |
258 | } else { | |
259 | dd = tctx->dd; | |
260 | } | |
261 | ||
262 | spin_unlock_bh(&atmel_sha.lock); | |
263 | ||
264 | ctx->dd = dd; | |
265 | ||
266 | ctx->flags = 0; | |
267 | ||
268 | dev_dbg(dd->dev, "init: digest size: %d\n", | |
269 | crypto_ahash_digestsize(tfm)); | |
270 | ||
d4905b38 NR |
271 | switch (crypto_ahash_digestsize(tfm)) { |
272 | case SHA1_DIGEST_SIZE: | |
ebc82efa | 273 | ctx->flags |= SHA_FLAGS_SHA1; |
d4905b38 NR |
274 | ctx->block_size = SHA1_BLOCK_SIZE; |
275 | break; | |
276 | case SHA224_DIGEST_SIZE: | |
277 | ctx->flags |= SHA_FLAGS_SHA224; | |
278 | ctx->block_size = SHA224_BLOCK_SIZE; | |
279 | break; | |
280 | case SHA256_DIGEST_SIZE: | |
ebc82efa | 281 | ctx->flags |= SHA_FLAGS_SHA256; |
d4905b38 NR |
282 | ctx->block_size = SHA256_BLOCK_SIZE; |
283 | break; | |
284 | case SHA384_DIGEST_SIZE: | |
285 | ctx->flags |= SHA_FLAGS_SHA384; | |
286 | ctx->block_size = SHA384_BLOCK_SIZE; | |
287 | break; | |
288 | case SHA512_DIGEST_SIZE: | |
289 | ctx->flags |= SHA_FLAGS_SHA512; | |
290 | ctx->block_size = SHA512_BLOCK_SIZE; | |
291 | break; | |
292 | default: | |
293 | return -EINVAL; | |
294 | break; | |
295 | } | |
ebc82efa NR |
296 | |
297 | ctx->bufcnt = 0; | |
d4905b38 NR |
298 | ctx->digcnt[0] = 0; |
299 | ctx->digcnt[1] = 0; | |
ebc82efa NR |
300 | ctx->buflen = SHA_BUFFER_LEN; |
301 | ||
302 | return 0; | |
303 | } | |
304 | ||
305 | static void atmel_sha_write_ctrl(struct atmel_sha_dev *dd, int dma) | |
306 | { | |
307 | struct atmel_sha_reqctx *ctx = ahash_request_ctx(dd->req); | |
308 | u32 valcr = 0, valmr = SHA_MR_MODE_AUTO; | |
309 | ||
310 | if (likely(dma)) { | |
d4905b38 NR |
311 | if (!dd->caps.has_dma) |
312 | atmel_sha_write(dd, SHA_IER, SHA_INT_TXBUFE); | |
ebc82efa | 313 | valmr = SHA_MR_MODE_PDC; |
d4905b38 NR |
314 | if (dd->caps.has_dualbuff) |
315 | valmr |= SHA_MR_DUALBUFF; | |
ebc82efa NR |
316 | } else { |
317 | atmel_sha_write(dd, SHA_IER, SHA_INT_DATARDY); | |
318 | } | |
319 | ||
d4905b38 NR |
320 | if (ctx->flags & SHA_FLAGS_SHA1) |
321 | valmr |= SHA_MR_ALGO_SHA1; | |
322 | else if (ctx->flags & SHA_FLAGS_SHA224) | |
323 | valmr |= SHA_MR_ALGO_SHA224; | |
324 | else if (ctx->flags & SHA_FLAGS_SHA256) | |
ebc82efa | 325 | valmr |= SHA_MR_ALGO_SHA256; |
d4905b38 NR |
326 | else if (ctx->flags & SHA_FLAGS_SHA384) |
327 | valmr |= SHA_MR_ALGO_SHA384; | |
328 | else if (ctx->flags & SHA_FLAGS_SHA512) | |
329 | valmr |= SHA_MR_ALGO_SHA512; | |
ebc82efa NR |
330 | |
331 | /* Setting CR_FIRST only for the first iteration */ | |
d4905b38 | 332 | if (!(ctx->digcnt[0] || ctx->digcnt[1])) |
ebc82efa NR |
333 | valcr = SHA_CR_FIRST; |
334 | ||
335 | atmel_sha_write(dd, SHA_CR, valcr); | |
336 | atmel_sha_write(dd, SHA_MR, valmr); | |
337 | } | |
338 | ||
339 | static int atmel_sha_xmit_cpu(struct atmel_sha_dev *dd, const u8 *buf, | |
340 | size_t length, int final) | |
341 | { | |
342 | struct atmel_sha_reqctx *ctx = ahash_request_ctx(dd->req); | |
343 | int count, len32; | |
344 | const u32 *buffer = (const u32 *)buf; | |
345 | ||
d4905b38 NR |
346 | dev_dbg(dd->dev, "xmit_cpu: digcnt: 0x%llx 0x%llx, length: %d, final: %d\n", |
347 | ctx->digcnt[1], ctx->digcnt[0], length, final); | |
ebc82efa NR |
348 | |
349 | atmel_sha_write_ctrl(dd, 0); | |
350 | ||
351 | /* should be non-zero before next lines to disable clocks later */ | |
d4905b38 NR |
352 | ctx->digcnt[0] += length; |
353 | if (ctx->digcnt[0] < length) | |
354 | ctx->digcnt[1]++; | |
ebc82efa NR |
355 | |
356 | if (final) | |
357 | dd->flags |= SHA_FLAGS_FINAL; /* catch last interrupt */ | |
358 | ||
359 | len32 = DIV_ROUND_UP(length, sizeof(u32)); | |
360 | ||
361 | dd->flags |= SHA_FLAGS_CPU; | |
362 | ||
363 | for (count = 0; count < len32; count++) | |
364 | atmel_sha_write(dd, SHA_REG_DIN(count), buffer[count]); | |
365 | ||
366 | return -EINPROGRESS; | |
367 | } | |
368 | ||
369 | static int atmel_sha_xmit_pdc(struct atmel_sha_dev *dd, dma_addr_t dma_addr1, | |
370 | size_t length1, dma_addr_t dma_addr2, size_t length2, int final) | |
371 | { | |
372 | struct atmel_sha_reqctx *ctx = ahash_request_ctx(dd->req); | |
373 | int len32; | |
374 | ||
d4905b38 NR |
375 | dev_dbg(dd->dev, "xmit_pdc: digcnt: 0x%llx 0x%llx, length: %d, final: %d\n", |
376 | ctx->digcnt[1], ctx->digcnt[0], length1, final); | |
ebc82efa NR |
377 | |
378 | len32 = DIV_ROUND_UP(length1, sizeof(u32)); | |
379 | atmel_sha_write(dd, SHA_PTCR, SHA_PTCR_TXTDIS); | |
380 | atmel_sha_write(dd, SHA_TPR, dma_addr1); | |
381 | atmel_sha_write(dd, SHA_TCR, len32); | |
382 | ||
383 | len32 = DIV_ROUND_UP(length2, sizeof(u32)); | |
384 | atmel_sha_write(dd, SHA_TNPR, dma_addr2); | |
385 | atmel_sha_write(dd, SHA_TNCR, len32); | |
386 | ||
387 | atmel_sha_write_ctrl(dd, 1); | |
388 | ||
389 | /* should be non-zero before next lines to disable clocks later */ | |
d4905b38 NR |
390 | ctx->digcnt[0] += length1; |
391 | if (ctx->digcnt[0] < length1) | |
392 | ctx->digcnt[1]++; | |
ebc82efa NR |
393 | |
394 | if (final) | |
395 | dd->flags |= SHA_FLAGS_FINAL; /* catch last interrupt */ | |
396 | ||
397 | dd->flags |= SHA_FLAGS_DMA_ACTIVE; | |
398 | ||
399 | /* Start DMA transfer */ | |
400 | atmel_sha_write(dd, SHA_PTCR, SHA_PTCR_TXTEN); | |
401 | ||
402 | return -EINPROGRESS; | |
403 | } | |
404 | ||
d4905b38 NR |
405 | static void atmel_sha_dma_callback(void *data) |
406 | { | |
407 | struct atmel_sha_dev *dd = data; | |
408 | ||
409 | /* dma_lch_in - completed - wait DATRDY */ | |
410 | atmel_sha_write(dd, SHA_IER, SHA_INT_DATARDY); | |
411 | } | |
412 | ||
413 | static int atmel_sha_xmit_dma(struct atmel_sha_dev *dd, dma_addr_t dma_addr1, | |
414 | size_t length1, dma_addr_t dma_addr2, size_t length2, int final) | |
415 | { | |
416 | struct atmel_sha_reqctx *ctx = ahash_request_ctx(dd->req); | |
417 | struct dma_async_tx_descriptor *in_desc; | |
418 | struct scatterlist sg[2]; | |
419 | ||
420 | dev_dbg(dd->dev, "xmit_dma: digcnt: 0x%llx 0x%llx, length: %d, final: %d\n", | |
421 | ctx->digcnt[1], ctx->digcnt[0], length1, final); | |
422 | ||
423 | if (ctx->flags & (SHA_FLAGS_SHA1 | SHA_FLAGS_SHA224 | | |
424 | SHA_FLAGS_SHA256)) { | |
425 | dd->dma_lch_in.dma_conf.src_maxburst = 16; | |
426 | dd->dma_lch_in.dma_conf.dst_maxburst = 16; | |
427 | } else { | |
428 | dd->dma_lch_in.dma_conf.src_maxburst = 32; | |
429 | dd->dma_lch_in.dma_conf.dst_maxburst = 32; | |
430 | } | |
431 | ||
432 | dmaengine_slave_config(dd->dma_lch_in.chan, &dd->dma_lch_in.dma_conf); | |
433 | ||
434 | if (length2) { | |
435 | sg_init_table(sg, 2); | |
436 | sg_dma_address(&sg[0]) = dma_addr1; | |
437 | sg_dma_len(&sg[0]) = length1; | |
438 | sg_dma_address(&sg[1]) = dma_addr2; | |
439 | sg_dma_len(&sg[1]) = length2; | |
440 | in_desc = dmaengine_prep_slave_sg(dd->dma_lch_in.chan, sg, 2, | |
441 | DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT | DMA_CTRL_ACK); | |
442 | } else { | |
443 | sg_init_table(sg, 1); | |
444 | sg_dma_address(&sg[0]) = dma_addr1; | |
445 | sg_dma_len(&sg[0]) = length1; | |
446 | in_desc = dmaengine_prep_slave_sg(dd->dma_lch_in.chan, sg, 1, | |
447 | DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT | DMA_CTRL_ACK); | |
448 | } | |
449 | if (!in_desc) | |
450 | return -EINVAL; | |
451 | ||
452 | in_desc->callback = atmel_sha_dma_callback; | |
453 | in_desc->callback_param = dd; | |
454 | ||
455 | atmel_sha_write_ctrl(dd, 1); | |
456 | ||
457 | /* should be non-zero before next lines to disable clocks later */ | |
458 | ctx->digcnt[0] += length1; | |
459 | if (ctx->digcnt[0] < length1) | |
460 | ctx->digcnt[1]++; | |
461 | ||
462 | if (final) | |
463 | dd->flags |= SHA_FLAGS_FINAL; /* catch last interrupt */ | |
464 | ||
465 | dd->flags |= SHA_FLAGS_DMA_ACTIVE; | |
466 | ||
467 | /* Start DMA transfer */ | |
468 | dmaengine_submit(in_desc); | |
469 | dma_async_issue_pending(dd->dma_lch_in.chan); | |
470 | ||
471 | return -EINPROGRESS; | |
472 | } | |
473 | ||
474 | static int atmel_sha_xmit_start(struct atmel_sha_dev *dd, dma_addr_t dma_addr1, | |
475 | size_t length1, dma_addr_t dma_addr2, size_t length2, int final) | |
476 | { | |
477 | if (dd->caps.has_dma) | |
478 | return atmel_sha_xmit_dma(dd, dma_addr1, length1, | |
479 | dma_addr2, length2, final); | |
480 | else | |
481 | return atmel_sha_xmit_pdc(dd, dma_addr1, length1, | |
482 | dma_addr2, length2, final); | |
483 | } | |
484 | ||
ebc82efa NR |
485 | static int atmel_sha_update_cpu(struct atmel_sha_dev *dd) |
486 | { | |
487 | struct atmel_sha_reqctx *ctx = ahash_request_ctx(dd->req); | |
488 | int bufcnt; | |
489 | ||
490 | atmel_sha_append_sg(ctx); | |
491 | atmel_sha_fill_padding(ctx, 0); | |
ebc82efa NR |
492 | bufcnt = ctx->bufcnt; |
493 | ctx->bufcnt = 0; | |
494 | ||
495 | return atmel_sha_xmit_cpu(dd, ctx->buffer, bufcnt, 1); | |
496 | } | |
497 | ||
498 | static int atmel_sha_xmit_dma_map(struct atmel_sha_dev *dd, | |
499 | struct atmel_sha_reqctx *ctx, | |
500 | size_t length, int final) | |
501 | { | |
502 | ctx->dma_addr = dma_map_single(dd->dev, ctx->buffer, | |
d4905b38 | 503 | ctx->buflen + ctx->block_size, DMA_TO_DEVICE); |
ebc82efa NR |
504 | if (dma_mapping_error(dd->dev, ctx->dma_addr)) { |
505 | dev_err(dd->dev, "dma %u bytes error\n", ctx->buflen + | |
d4905b38 | 506 | ctx->block_size); |
ebc82efa NR |
507 | return -EINVAL; |
508 | } | |
509 | ||
510 | ctx->flags &= ~SHA_FLAGS_SG; | |
511 | ||
512 | /* next call does not fail... so no unmap in the case of error */ | |
d4905b38 | 513 | return atmel_sha_xmit_start(dd, ctx->dma_addr, length, 0, 0, final); |
ebc82efa NR |
514 | } |
515 | ||
516 | static int atmel_sha_update_dma_slow(struct atmel_sha_dev *dd) | |
517 | { | |
518 | struct atmel_sha_reqctx *ctx = ahash_request_ctx(dd->req); | |
519 | unsigned int final; | |
520 | size_t count; | |
521 | ||
522 | atmel_sha_append_sg(ctx); | |
523 | ||
524 | final = (ctx->flags & SHA_FLAGS_FINUP) && !ctx->total; | |
525 | ||
d4905b38 NR |
526 | dev_dbg(dd->dev, "slow: bufcnt: %u, digcnt: 0x%llx 0x%llx, final: %d\n", |
527 | ctx->bufcnt, ctx->digcnt[1], ctx->digcnt[0], final); | |
ebc82efa NR |
528 | |
529 | if (final) | |
530 | atmel_sha_fill_padding(ctx, 0); | |
531 | ||
532 | if (final || (ctx->bufcnt == ctx->buflen && ctx->total)) { | |
533 | count = ctx->bufcnt; | |
534 | ctx->bufcnt = 0; | |
535 | return atmel_sha_xmit_dma_map(dd, ctx, count, final); | |
536 | } | |
537 | ||
538 | return 0; | |
539 | } | |
540 | ||
541 | static int atmel_sha_update_dma_start(struct atmel_sha_dev *dd) | |
542 | { | |
543 | struct atmel_sha_reqctx *ctx = ahash_request_ctx(dd->req); | |
544 | unsigned int length, final, tail; | |
545 | struct scatterlist *sg; | |
546 | unsigned int count; | |
547 | ||
548 | if (!ctx->total) | |
549 | return 0; | |
550 | ||
551 | if (ctx->bufcnt || ctx->offset) | |
552 | return atmel_sha_update_dma_slow(dd); | |
553 | ||
d4905b38 NR |
554 | dev_dbg(dd->dev, "fast: digcnt: 0x%llx 0x%llx, bufcnt: %u, total: %u\n", |
555 | ctx->digcnt[1], ctx->digcnt[0], ctx->bufcnt, ctx->total); | |
ebc82efa NR |
556 | |
557 | sg = ctx->sg; | |
558 | ||
559 | if (!IS_ALIGNED(sg->offset, sizeof(u32))) | |
560 | return atmel_sha_update_dma_slow(dd); | |
561 | ||
d4905b38 NR |
562 | if (!sg_is_last(sg) && !IS_ALIGNED(sg->length, ctx->block_size)) |
563 | /* size is not ctx->block_size aligned */ | |
ebc82efa NR |
564 | return atmel_sha_update_dma_slow(dd); |
565 | ||
566 | length = min(ctx->total, sg->length); | |
567 | ||
568 | if (sg_is_last(sg)) { | |
569 | if (!(ctx->flags & SHA_FLAGS_FINUP)) { | |
d4905b38 NR |
570 | /* not last sg must be ctx->block_size aligned */ |
571 | tail = length & (ctx->block_size - 1); | |
ebc82efa | 572 | length -= tail; |
ebc82efa NR |
573 | } |
574 | } | |
575 | ||
576 | ctx->total -= length; | |
577 | ctx->offset = length; /* offset where to start slow */ | |
578 | ||
579 | final = (ctx->flags & SHA_FLAGS_FINUP) && !ctx->total; | |
580 | ||
581 | /* Add padding */ | |
582 | if (final) { | |
d4905b38 | 583 | tail = length & (ctx->block_size - 1); |
ebc82efa NR |
584 | length -= tail; |
585 | ctx->total += tail; | |
586 | ctx->offset = length; /* offset where to start slow */ | |
587 | ||
588 | sg = ctx->sg; | |
589 | atmel_sha_append_sg(ctx); | |
590 | ||
591 | atmel_sha_fill_padding(ctx, length); | |
592 | ||
593 | ctx->dma_addr = dma_map_single(dd->dev, ctx->buffer, | |
d4905b38 | 594 | ctx->buflen + ctx->block_size, DMA_TO_DEVICE); |
ebc82efa NR |
595 | if (dma_mapping_error(dd->dev, ctx->dma_addr)) { |
596 | dev_err(dd->dev, "dma %u bytes error\n", | |
d4905b38 | 597 | ctx->buflen + ctx->block_size); |
ebc82efa NR |
598 | return -EINVAL; |
599 | } | |
600 | ||
601 | if (length == 0) { | |
602 | ctx->flags &= ~SHA_FLAGS_SG; | |
603 | count = ctx->bufcnt; | |
604 | ctx->bufcnt = 0; | |
d4905b38 | 605 | return atmel_sha_xmit_start(dd, ctx->dma_addr, count, 0, |
ebc82efa NR |
606 | 0, final); |
607 | } else { | |
608 | ctx->sg = sg; | |
609 | if (!dma_map_sg(dd->dev, ctx->sg, 1, | |
610 | DMA_TO_DEVICE)) { | |
611 | dev_err(dd->dev, "dma_map_sg error\n"); | |
612 | return -EINVAL; | |
613 | } | |
614 | ||
615 | ctx->flags |= SHA_FLAGS_SG; | |
616 | ||
617 | count = ctx->bufcnt; | |
618 | ctx->bufcnt = 0; | |
d4905b38 | 619 | return atmel_sha_xmit_start(dd, sg_dma_address(ctx->sg), |
ebc82efa NR |
620 | length, ctx->dma_addr, count, final); |
621 | } | |
622 | } | |
623 | ||
624 | if (!dma_map_sg(dd->dev, ctx->sg, 1, DMA_TO_DEVICE)) { | |
625 | dev_err(dd->dev, "dma_map_sg error\n"); | |
626 | return -EINVAL; | |
627 | } | |
628 | ||
629 | ctx->flags |= SHA_FLAGS_SG; | |
630 | ||
631 | /* next call does not fail... so no unmap in the case of error */ | |
d4905b38 | 632 | return atmel_sha_xmit_start(dd, sg_dma_address(ctx->sg), length, 0, |
ebc82efa NR |
633 | 0, final); |
634 | } | |
635 | ||
636 | static int atmel_sha_update_dma_stop(struct atmel_sha_dev *dd) | |
637 | { | |
638 | struct atmel_sha_reqctx *ctx = ahash_request_ctx(dd->req); | |
639 | ||
640 | if (ctx->flags & SHA_FLAGS_SG) { | |
641 | dma_unmap_sg(dd->dev, ctx->sg, 1, DMA_TO_DEVICE); | |
642 | if (ctx->sg->length == ctx->offset) { | |
643 | ctx->sg = sg_next(ctx->sg); | |
644 | if (ctx->sg) | |
645 | ctx->offset = 0; | |
646 | } | |
d4905b38 | 647 | if (ctx->flags & SHA_FLAGS_PAD) { |
ebc82efa | 648 | dma_unmap_single(dd->dev, ctx->dma_addr, |
d4905b38 NR |
649 | ctx->buflen + ctx->block_size, DMA_TO_DEVICE); |
650 | } | |
ebc82efa NR |
651 | } else { |
652 | dma_unmap_single(dd->dev, ctx->dma_addr, ctx->buflen + | |
d4905b38 | 653 | ctx->block_size, DMA_TO_DEVICE); |
ebc82efa NR |
654 | } |
655 | ||
656 | return 0; | |
657 | } | |
658 | ||
659 | static int atmel_sha_update_req(struct atmel_sha_dev *dd) | |
660 | { | |
661 | struct ahash_request *req = dd->req; | |
662 | struct atmel_sha_reqctx *ctx = ahash_request_ctx(req); | |
663 | int err; | |
664 | ||
d4905b38 NR |
665 | dev_dbg(dd->dev, "update_req: total: %u, digcnt: 0x%llx 0x%llx\n", |
666 | ctx->total, ctx->digcnt[1], ctx->digcnt[0]); | |
ebc82efa NR |
667 | |
668 | if (ctx->flags & SHA_FLAGS_CPU) | |
669 | err = atmel_sha_update_cpu(dd); | |
670 | else | |
671 | err = atmel_sha_update_dma_start(dd); | |
672 | ||
673 | /* wait for dma completion before can take more data */ | |
d4905b38 NR |
674 | dev_dbg(dd->dev, "update: err: %d, digcnt: 0x%llx 0%llx\n", |
675 | err, ctx->digcnt[1], ctx->digcnt[0]); | |
ebc82efa NR |
676 | |
677 | return err; | |
678 | } | |
679 | ||
680 | static int atmel_sha_final_req(struct atmel_sha_dev *dd) | |
681 | { | |
682 | struct ahash_request *req = dd->req; | |
683 | struct atmel_sha_reqctx *ctx = ahash_request_ctx(req); | |
684 | int err = 0; | |
685 | int count; | |
686 | ||
687 | if (ctx->bufcnt >= ATMEL_SHA_DMA_THRESHOLD) { | |
688 | atmel_sha_fill_padding(ctx, 0); | |
689 | count = ctx->bufcnt; | |
690 | ctx->bufcnt = 0; | |
691 | err = atmel_sha_xmit_dma_map(dd, ctx, count, 1); | |
692 | } | |
693 | /* faster to handle last block with cpu */ | |
694 | else { | |
695 | atmel_sha_fill_padding(ctx, 0); | |
696 | count = ctx->bufcnt; | |
697 | ctx->bufcnt = 0; | |
698 | err = atmel_sha_xmit_cpu(dd, ctx->buffer, count, 1); | |
699 | } | |
700 | ||
701 | dev_dbg(dd->dev, "final_req: err: %d\n", err); | |
702 | ||
703 | return err; | |
704 | } | |
705 | ||
706 | static void atmel_sha_copy_hash(struct ahash_request *req) | |
707 | { | |
708 | struct atmel_sha_reqctx *ctx = ahash_request_ctx(req); | |
709 | u32 *hash = (u32 *)ctx->digest; | |
710 | int i; | |
711 | ||
d4905b38 | 712 | if (ctx->flags & SHA_FLAGS_SHA1) |
ebc82efa NR |
713 | for (i = 0; i < SHA1_DIGEST_SIZE / sizeof(u32); i++) |
714 | hash[i] = atmel_sha_read(ctx->dd, SHA_REG_DIGEST(i)); | |
d4905b38 NR |
715 | else if (ctx->flags & SHA_FLAGS_SHA224) |
716 | for (i = 0; i < SHA224_DIGEST_SIZE / sizeof(u32); i++) | |
717 | hash[i] = atmel_sha_read(ctx->dd, SHA_REG_DIGEST(i)); | |
718 | else if (ctx->flags & SHA_FLAGS_SHA256) | |
ebc82efa NR |
719 | for (i = 0; i < SHA256_DIGEST_SIZE / sizeof(u32); i++) |
720 | hash[i] = atmel_sha_read(ctx->dd, SHA_REG_DIGEST(i)); | |
d4905b38 NR |
721 | else if (ctx->flags & SHA_FLAGS_SHA384) |
722 | for (i = 0; i < SHA384_DIGEST_SIZE / sizeof(u32); i++) | |
723 | hash[i] = atmel_sha_read(ctx->dd, SHA_REG_DIGEST(i)); | |
724 | else | |
725 | for (i = 0; i < SHA512_DIGEST_SIZE / sizeof(u32); i++) | |
726 | hash[i] = atmel_sha_read(ctx->dd, SHA_REG_DIGEST(i)); | |
ebc82efa NR |
727 | } |
728 | ||
729 | static void atmel_sha_copy_ready_hash(struct ahash_request *req) | |
730 | { | |
731 | struct atmel_sha_reqctx *ctx = ahash_request_ctx(req); | |
732 | ||
733 | if (!req->result) | |
734 | return; | |
735 | ||
d4905b38 | 736 | if (ctx->flags & SHA_FLAGS_SHA1) |
ebc82efa | 737 | memcpy(req->result, ctx->digest, SHA1_DIGEST_SIZE); |
d4905b38 NR |
738 | else if (ctx->flags & SHA_FLAGS_SHA224) |
739 | memcpy(req->result, ctx->digest, SHA224_DIGEST_SIZE); | |
740 | else if (ctx->flags & SHA_FLAGS_SHA256) | |
ebc82efa | 741 | memcpy(req->result, ctx->digest, SHA256_DIGEST_SIZE); |
d4905b38 NR |
742 | else if (ctx->flags & SHA_FLAGS_SHA384) |
743 | memcpy(req->result, ctx->digest, SHA384_DIGEST_SIZE); | |
744 | else | |
745 | memcpy(req->result, ctx->digest, SHA512_DIGEST_SIZE); | |
ebc82efa NR |
746 | } |
747 | ||
748 | static int atmel_sha_finish(struct ahash_request *req) | |
749 | { | |
750 | struct atmel_sha_reqctx *ctx = ahash_request_ctx(req); | |
751 | struct atmel_sha_dev *dd = ctx->dd; | |
752 | int err = 0; | |
753 | ||
d4905b38 | 754 | if (ctx->digcnt[0] || ctx->digcnt[1]) |
ebc82efa NR |
755 | atmel_sha_copy_ready_hash(req); |
756 | ||
d4905b38 NR |
757 | dev_dbg(dd->dev, "digcnt: 0x%llx 0x%llx, bufcnt: %d\n", ctx->digcnt[1], |
758 | ctx->digcnt[0], ctx->bufcnt); | |
ebc82efa NR |
759 | |
760 | return err; | |
761 | } | |
762 | ||
763 | static void atmel_sha_finish_req(struct ahash_request *req, int err) | |
764 | { | |
765 | struct atmel_sha_reqctx *ctx = ahash_request_ctx(req); | |
766 | struct atmel_sha_dev *dd = ctx->dd; | |
767 | ||
768 | if (!err) { | |
769 | atmel_sha_copy_hash(req); | |
770 | if (SHA_FLAGS_FINAL & dd->flags) | |
771 | err = atmel_sha_finish(req); | |
772 | } else { | |
773 | ctx->flags |= SHA_FLAGS_ERROR; | |
774 | } | |
775 | ||
776 | /* atomic operation is not needed here */ | |
777 | dd->flags &= ~(SHA_FLAGS_BUSY | SHA_FLAGS_FINAL | SHA_FLAGS_CPU | | |
778 | SHA_FLAGS_DMA_READY | SHA_FLAGS_OUTPUT_READY); | |
779 | ||
780 | clk_disable_unprepare(dd->iclk); | |
781 | ||
782 | if (req->base.complete) | |
783 | req->base.complete(&req->base, err); | |
784 | ||
785 | /* handle new request */ | |
786 | tasklet_schedule(&dd->done_task); | |
787 | } | |
788 | ||
789 | static int atmel_sha_hw_init(struct atmel_sha_dev *dd) | |
790 | { | |
791 | clk_prepare_enable(dd->iclk); | |
792 | ||
d4905b38 | 793 | if (!(SHA_FLAGS_INIT & dd->flags)) { |
ebc82efa | 794 | atmel_sha_write(dd, SHA_CR, SHA_CR_SWRST); |
ebc82efa NR |
795 | dd->flags |= SHA_FLAGS_INIT; |
796 | dd->err = 0; | |
797 | } | |
798 | ||
799 | return 0; | |
800 | } | |
801 | ||
d4905b38 NR |
802 | static inline unsigned int atmel_sha_get_version(struct atmel_sha_dev *dd) |
803 | { | |
804 | return atmel_sha_read(dd, SHA_HW_VERSION) & 0x00000fff; | |
805 | } | |
806 | ||
807 | static void atmel_sha_hw_version_init(struct atmel_sha_dev *dd) | |
808 | { | |
809 | atmel_sha_hw_init(dd); | |
810 | ||
811 | dd->hw_version = atmel_sha_get_version(dd); | |
812 | ||
813 | dev_info(dd->dev, | |
814 | "version: 0x%x\n", dd->hw_version); | |
815 | ||
816 | clk_disable_unprepare(dd->iclk); | |
817 | } | |
818 | ||
ebc82efa NR |
819 | static int atmel_sha_handle_queue(struct atmel_sha_dev *dd, |
820 | struct ahash_request *req) | |
821 | { | |
822 | struct crypto_async_request *async_req, *backlog; | |
823 | struct atmel_sha_reqctx *ctx; | |
824 | unsigned long flags; | |
825 | int err = 0, ret = 0; | |
826 | ||
827 | spin_lock_irqsave(&dd->lock, flags); | |
828 | if (req) | |
829 | ret = ahash_enqueue_request(&dd->queue, req); | |
830 | ||
831 | if (SHA_FLAGS_BUSY & dd->flags) { | |
832 | spin_unlock_irqrestore(&dd->lock, flags); | |
833 | return ret; | |
834 | } | |
835 | ||
836 | backlog = crypto_get_backlog(&dd->queue); | |
837 | async_req = crypto_dequeue_request(&dd->queue); | |
838 | if (async_req) | |
839 | dd->flags |= SHA_FLAGS_BUSY; | |
840 | ||
841 | spin_unlock_irqrestore(&dd->lock, flags); | |
842 | ||
843 | if (!async_req) | |
844 | return ret; | |
845 | ||
846 | if (backlog) | |
847 | backlog->complete(backlog, -EINPROGRESS); | |
848 | ||
849 | req = ahash_request_cast(async_req); | |
850 | dd->req = req; | |
851 | ctx = ahash_request_ctx(req); | |
852 | ||
853 | dev_dbg(dd->dev, "handling new req, op: %lu, nbytes: %d\n", | |
854 | ctx->op, req->nbytes); | |
855 | ||
856 | err = atmel_sha_hw_init(dd); | |
857 | ||
858 | if (err) | |
859 | goto err1; | |
860 | ||
861 | if (ctx->op == SHA_OP_UPDATE) { | |
862 | err = atmel_sha_update_req(dd); | |
d4905b38 | 863 | if (err != -EINPROGRESS && (ctx->flags & SHA_FLAGS_FINUP)) |
ebc82efa NR |
864 | /* no final() after finup() */ |
865 | err = atmel_sha_final_req(dd); | |
ebc82efa NR |
866 | } else if (ctx->op == SHA_OP_FINAL) { |
867 | err = atmel_sha_final_req(dd); | |
868 | } | |
869 | ||
870 | err1: | |
871 | if (err != -EINPROGRESS) | |
872 | /* done_task will not finish it, so do it here */ | |
873 | atmel_sha_finish_req(req, err); | |
874 | ||
875 | dev_dbg(dd->dev, "exit, err: %d\n", err); | |
876 | ||
877 | return ret; | |
878 | } | |
879 | ||
880 | static int atmel_sha_enqueue(struct ahash_request *req, unsigned int op) | |
881 | { | |
882 | struct atmel_sha_reqctx *ctx = ahash_request_ctx(req); | |
883 | struct atmel_sha_ctx *tctx = crypto_tfm_ctx(req->base.tfm); | |
884 | struct atmel_sha_dev *dd = tctx->dd; | |
885 | ||
886 | ctx->op = op; | |
887 | ||
888 | return atmel_sha_handle_queue(dd, req); | |
889 | } | |
890 | ||
891 | static int atmel_sha_update(struct ahash_request *req) | |
892 | { | |
893 | struct atmel_sha_reqctx *ctx = ahash_request_ctx(req); | |
894 | ||
895 | if (!req->nbytes) | |
896 | return 0; | |
897 | ||
898 | ctx->total = req->nbytes; | |
899 | ctx->sg = req->src; | |
900 | ctx->offset = 0; | |
901 | ||
902 | if (ctx->flags & SHA_FLAGS_FINUP) { | |
903 | if (ctx->bufcnt + ctx->total < ATMEL_SHA_DMA_THRESHOLD) | |
904 | /* faster to use CPU for short transfers */ | |
905 | ctx->flags |= SHA_FLAGS_CPU; | |
906 | } else if (ctx->bufcnt + ctx->total < ctx->buflen) { | |
907 | atmel_sha_append_sg(ctx); | |
908 | return 0; | |
909 | } | |
910 | return atmel_sha_enqueue(req, SHA_OP_UPDATE); | |
911 | } | |
912 | ||
913 | static int atmel_sha_final(struct ahash_request *req) | |
914 | { | |
915 | struct atmel_sha_reqctx *ctx = ahash_request_ctx(req); | |
916 | struct atmel_sha_ctx *tctx = crypto_tfm_ctx(req->base.tfm); | |
917 | struct atmel_sha_dev *dd = tctx->dd; | |
918 | ||
919 | int err = 0; | |
920 | ||
921 | ctx->flags |= SHA_FLAGS_FINUP; | |
922 | ||
923 | if (ctx->flags & SHA_FLAGS_ERROR) | |
924 | return 0; /* uncompleted hash is not needed */ | |
925 | ||
926 | if (ctx->bufcnt) { | |
927 | return atmel_sha_enqueue(req, SHA_OP_FINAL); | |
928 | } else if (!(ctx->flags & SHA_FLAGS_PAD)) { /* add padding */ | |
929 | err = atmel_sha_hw_init(dd); | |
930 | if (err) | |
931 | goto err1; | |
932 | ||
933 | dd->flags |= SHA_FLAGS_BUSY; | |
934 | err = atmel_sha_final_req(dd); | |
935 | } else { | |
936 | /* copy ready hash (+ finalize hmac) */ | |
937 | return atmel_sha_finish(req); | |
938 | } | |
939 | ||
940 | err1: | |
941 | if (err != -EINPROGRESS) | |
942 | /* done_task will not finish it, so do it here */ | |
943 | atmel_sha_finish_req(req, err); | |
944 | ||
945 | return err; | |
946 | } | |
947 | ||
948 | static int atmel_sha_finup(struct ahash_request *req) | |
949 | { | |
950 | struct atmel_sha_reqctx *ctx = ahash_request_ctx(req); | |
951 | int err1, err2; | |
952 | ||
953 | ctx->flags |= SHA_FLAGS_FINUP; | |
954 | ||
955 | err1 = atmel_sha_update(req); | |
956 | if (err1 == -EINPROGRESS || err1 == -EBUSY) | |
957 | return err1; | |
958 | ||
959 | /* | |
960 | * final() has to be always called to cleanup resources | |
961 | * even if udpate() failed, except EINPROGRESS | |
962 | */ | |
963 | err2 = atmel_sha_final(req); | |
964 | ||
965 | return err1 ?: err2; | |
966 | } | |
967 | ||
968 | static int atmel_sha_digest(struct ahash_request *req) | |
969 | { | |
970 | return atmel_sha_init(req) ?: atmel_sha_finup(req); | |
971 | } | |
972 | ||
be95f0fa | 973 | static int atmel_sha_cra_init(struct crypto_tfm *tfm) |
ebc82efa | 974 | { |
ebc82efa NR |
975 | crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm), |
976 | sizeof(struct atmel_sha_reqctx) + | |
d4905b38 | 977 | SHA_BUFFER_LEN + SHA512_BLOCK_SIZE); |
ebc82efa NR |
978 | |
979 | return 0; | |
980 | } | |
981 | ||
d4905b38 | 982 | static struct ahash_alg sha_1_256_algs[] = { |
ebc82efa NR |
983 | { |
984 | .init = atmel_sha_init, | |
985 | .update = atmel_sha_update, | |
986 | .final = atmel_sha_final, | |
987 | .finup = atmel_sha_finup, | |
988 | .digest = atmel_sha_digest, | |
989 | .halg = { | |
990 | .digestsize = SHA1_DIGEST_SIZE, | |
991 | .base = { | |
992 | .cra_name = "sha1", | |
993 | .cra_driver_name = "atmel-sha1", | |
994 | .cra_priority = 100, | |
be95f0fa | 995 | .cra_flags = CRYPTO_ALG_ASYNC, |
ebc82efa NR |
996 | .cra_blocksize = SHA1_BLOCK_SIZE, |
997 | .cra_ctxsize = sizeof(struct atmel_sha_ctx), | |
998 | .cra_alignmask = 0, | |
999 | .cra_module = THIS_MODULE, | |
1000 | .cra_init = atmel_sha_cra_init, | |
ebc82efa NR |
1001 | } |
1002 | } | |
1003 | }, | |
1004 | { | |
1005 | .init = atmel_sha_init, | |
1006 | .update = atmel_sha_update, | |
1007 | .final = atmel_sha_final, | |
1008 | .finup = atmel_sha_finup, | |
1009 | .digest = atmel_sha_digest, | |
1010 | .halg = { | |
1011 | .digestsize = SHA256_DIGEST_SIZE, | |
1012 | .base = { | |
1013 | .cra_name = "sha256", | |
1014 | .cra_driver_name = "atmel-sha256", | |
1015 | .cra_priority = 100, | |
be95f0fa | 1016 | .cra_flags = CRYPTO_ALG_ASYNC, |
ebc82efa NR |
1017 | .cra_blocksize = SHA256_BLOCK_SIZE, |
1018 | .cra_ctxsize = sizeof(struct atmel_sha_ctx), | |
1019 | .cra_alignmask = 0, | |
1020 | .cra_module = THIS_MODULE, | |
1021 | .cra_init = atmel_sha_cra_init, | |
ebc82efa NR |
1022 | } |
1023 | } | |
1024 | }, | |
1025 | }; | |
1026 | ||
d4905b38 NR |
1027 | static struct ahash_alg sha_224_alg = { |
1028 | .init = atmel_sha_init, | |
1029 | .update = atmel_sha_update, | |
1030 | .final = atmel_sha_final, | |
1031 | .finup = atmel_sha_finup, | |
1032 | .digest = atmel_sha_digest, | |
1033 | .halg = { | |
1034 | .digestsize = SHA224_DIGEST_SIZE, | |
1035 | .base = { | |
1036 | .cra_name = "sha224", | |
1037 | .cra_driver_name = "atmel-sha224", | |
1038 | .cra_priority = 100, | |
be95f0fa | 1039 | .cra_flags = CRYPTO_ALG_ASYNC, |
d4905b38 NR |
1040 | .cra_blocksize = SHA224_BLOCK_SIZE, |
1041 | .cra_ctxsize = sizeof(struct atmel_sha_ctx), | |
1042 | .cra_alignmask = 0, | |
1043 | .cra_module = THIS_MODULE, | |
1044 | .cra_init = atmel_sha_cra_init, | |
d4905b38 NR |
1045 | } |
1046 | } | |
1047 | }; | |
1048 | ||
1049 | static struct ahash_alg sha_384_512_algs[] = { | |
1050 | { | |
1051 | .init = atmel_sha_init, | |
1052 | .update = atmel_sha_update, | |
1053 | .final = atmel_sha_final, | |
1054 | .finup = atmel_sha_finup, | |
1055 | .digest = atmel_sha_digest, | |
1056 | .halg = { | |
1057 | .digestsize = SHA384_DIGEST_SIZE, | |
1058 | .base = { | |
1059 | .cra_name = "sha384", | |
1060 | .cra_driver_name = "atmel-sha384", | |
1061 | .cra_priority = 100, | |
be95f0fa | 1062 | .cra_flags = CRYPTO_ALG_ASYNC, |
d4905b38 NR |
1063 | .cra_blocksize = SHA384_BLOCK_SIZE, |
1064 | .cra_ctxsize = sizeof(struct atmel_sha_ctx), | |
1065 | .cra_alignmask = 0x3, | |
1066 | .cra_module = THIS_MODULE, | |
1067 | .cra_init = atmel_sha_cra_init, | |
d4905b38 NR |
1068 | } |
1069 | } | |
1070 | }, | |
1071 | { | |
1072 | .init = atmel_sha_init, | |
1073 | .update = atmel_sha_update, | |
1074 | .final = atmel_sha_final, | |
1075 | .finup = atmel_sha_finup, | |
1076 | .digest = atmel_sha_digest, | |
1077 | .halg = { | |
1078 | .digestsize = SHA512_DIGEST_SIZE, | |
1079 | .base = { | |
1080 | .cra_name = "sha512", | |
1081 | .cra_driver_name = "atmel-sha512", | |
1082 | .cra_priority = 100, | |
be95f0fa | 1083 | .cra_flags = CRYPTO_ALG_ASYNC, |
d4905b38 NR |
1084 | .cra_blocksize = SHA512_BLOCK_SIZE, |
1085 | .cra_ctxsize = sizeof(struct atmel_sha_ctx), | |
1086 | .cra_alignmask = 0x3, | |
1087 | .cra_module = THIS_MODULE, | |
1088 | .cra_init = atmel_sha_cra_init, | |
d4905b38 NR |
1089 | } |
1090 | } | |
1091 | }, | |
1092 | }; | |
1093 | ||
ebc82efa NR |
1094 | static void atmel_sha_done_task(unsigned long data) |
1095 | { | |
1096 | struct atmel_sha_dev *dd = (struct atmel_sha_dev *)data; | |
1097 | int err = 0; | |
1098 | ||
1099 | if (!(SHA_FLAGS_BUSY & dd->flags)) { | |
1100 | atmel_sha_handle_queue(dd, NULL); | |
1101 | return; | |
1102 | } | |
1103 | ||
1104 | if (SHA_FLAGS_CPU & dd->flags) { | |
1105 | if (SHA_FLAGS_OUTPUT_READY & dd->flags) { | |
1106 | dd->flags &= ~SHA_FLAGS_OUTPUT_READY; | |
1107 | goto finish; | |
1108 | } | |
1109 | } else if (SHA_FLAGS_DMA_READY & dd->flags) { | |
1110 | if (SHA_FLAGS_DMA_ACTIVE & dd->flags) { | |
1111 | dd->flags &= ~SHA_FLAGS_DMA_ACTIVE; | |
1112 | atmel_sha_update_dma_stop(dd); | |
1113 | if (dd->err) { | |
1114 | err = dd->err; | |
1115 | goto finish; | |
1116 | } | |
1117 | } | |
1118 | if (SHA_FLAGS_OUTPUT_READY & dd->flags) { | |
1119 | /* hash or semi-hash ready */ | |
1120 | dd->flags &= ~(SHA_FLAGS_DMA_READY | | |
1121 | SHA_FLAGS_OUTPUT_READY); | |
1122 | err = atmel_sha_update_dma_start(dd); | |
1123 | if (err != -EINPROGRESS) | |
1124 | goto finish; | |
1125 | } | |
1126 | } | |
1127 | return; | |
1128 | ||
1129 | finish: | |
1130 | /* finish curent request */ | |
1131 | atmel_sha_finish_req(dd->req, err); | |
1132 | } | |
1133 | ||
1134 | static irqreturn_t atmel_sha_irq(int irq, void *dev_id) | |
1135 | { | |
1136 | struct atmel_sha_dev *sha_dd = dev_id; | |
1137 | u32 reg; | |
1138 | ||
1139 | reg = atmel_sha_read(sha_dd, SHA_ISR); | |
1140 | if (reg & atmel_sha_read(sha_dd, SHA_IMR)) { | |
1141 | atmel_sha_write(sha_dd, SHA_IDR, reg); | |
1142 | if (SHA_FLAGS_BUSY & sha_dd->flags) { | |
1143 | sha_dd->flags |= SHA_FLAGS_OUTPUT_READY; | |
1144 | if (!(SHA_FLAGS_CPU & sha_dd->flags)) | |
1145 | sha_dd->flags |= SHA_FLAGS_DMA_READY; | |
1146 | tasklet_schedule(&sha_dd->done_task); | |
1147 | } else { | |
1148 | dev_warn(sha_dd->dev, "SHA interrupt when no active requests.\n"); | |
1149 | } | |
1150 | return IRQ_HANDLED; | |
1151 | } | |
1152 | ||
1153 | return IRQ_NONE; | |
1154 | } | |
1155 | ||
1156 | static void atmel_sha_unregister_algs(struct atmel_sha_dev *dd) | |
1157 | { | |
1158 | int i; | |
1159 | ||
d4905b38 NR |
1160 | for (i = 0; i < ARRAY_SIZE(sha_1_256_algs); i++) |
1161 | crypto_unregister_ahash(&sha_1_256_algs[i]); | |
1162 | ||
1163 | if (dd->caps.has_sha224) | |
1164 | crypto_unregister_ahash(&sha_224_alg); | |
1165 | ||
1166 | if (dd->caps.has_sha_384_512) { | |
1167 | for (i = 0; i < ARRAY_SIZE(sha_384_512_algs); i++) | |
1168 | crypto_unregister_ahash(&sha_384_512_algs[i]); | |
1169 | } | |
ebc82efa NR |
1170 | } |
1171 | ||
1172 | static int atmel_sha_register_algs(struct atmel_sha_dev *dd) | |
1173 | { | |
1174 | int err, i, j; | |
1175 | ||
d4905b38 NR |
1176 | for (i = 0; i < ARRAY_SIZE(sha_1_256_algs); i++) { |
1177 | err = crypto_register_ahash(&sha_1_256_algs[i]); | |
ebc82efa | 1178 | if (err) |
d4905b38 NR |
1179 | goto err_sha_1_256_algs; |
1180 | } | |
1181 | ||
1182 | if (dd->caps.has_sha224) { | |
1183 | err = crypto_register_ahash(&sha_224_alg); | |
1184 | if (err) | |
1185 | goto err_sha_224_algs; | |
1186 | } | |
1187 | ||
1188 | if (dd->caps.has_sha_384_512) { | |
1189 | for (i = 0; i < ARRAY_SIZE(sha_384_512_algs); i++) { | |
1190 | err = crypto_register_ahash(&sha_384_512_algs[i]); | |
1191 | if (err) | |
1192 | goto err_sha_384_512_algs; | |
1193 | } | |
ebc82efa NR |
1194 | } |
1195 | ||
1196 | return 0; | |
1197 | ||
d4905b38 NR |
1198 | err_sha_384_512_algs: |
1199 | for (j = 0; j < i; j++) | |
1200 | crypto_unregister_ahash(&sha_384_512_algs[j]); | |
1201 | crypto_unregister_ahash(&sha_224_alg); | |
1202 | err_sha_224_algs: | |
1203 | i = ARRAY_SIZE(sha_1_256_algs); | |
1204 | err_sha_1_256_algs: | |
ebc82efa | 1205 | for (j = 0; j < i; j++) |
d4905b38 | 1206 | crypto_unregister_ahash(&sha_1_256_algs[j]); |
ebc82efa NR |
1207 | |
1208 | return err; | |
1209 | } | |
1210 | ||
d4905b38 NR |
1211 | static bool atmel_sha_filter(struct dma_chan *chan, void *slave) |
1212 | { | |
1213 | struct at_dma_slave *sl = slave; | |
1214 | ||
1215 | if (sl && sl->dma_dev == chan->device->dev) { | |
1216 | chan->private = sl; | |
1217 | return true; | |
1218 | } else { | |
1219 | return false; | |
1220 | } | |
1221 | } | |
1222 | ||
1223 | static int atmel_sha_dma_init(struct atmel_sha_dev *dd, | |
1224 | struct crypto_platform_data *pdata) | |
1225 | { | |
1226 | int err = -ENOMEM; | |
1227 | dma_cap_mask_t mask_in; | |
1228 | ||
abfe7ae4 NF |
1229 | /* Try to grab DMA channel */ |
1230 | dma_cap_zero(mask_in); | |
1231 | dma_cap_set(DMA_SLAVE, mask_in); | |
d4905b38 | 1232 | |
abfe7ae4 NF |
1233 | dd->dma_lch_in.chan = dma_request_slave_channel_compat(mask_in, |
1234 | atmel_sha_filter, &pdata->dma_slave->rxdata, dd->dev, "tx"); | |
1235 | if (!dd->dma_lch_in.chan) { | |
1236 | dev_warn(dd->dev, "no DMA channel available\n"); | |
1237 | return err; | |
d4905b38 NR |
1238 | } |
1239 | ||
abfe7ae4 NF |
1240 | dd->dma_lch_in.dma_conf.direction = DMA_MEM_TO_DEV; |
1241 | dd->dma_lch_in.dma_conf.dst_addr = dd->phys_base + | |
1242 | SHA_REG_DIN(0); | |
1243 | dd->dma_lch_in.dma_conf.src_maxburst = 1; | |
1244 | dd->dma_lch_in.dma_conf.src_addr_width = | |
1245 | DMA_SLAVE_BUSWIDTH_4_BYTES; | |
1246 | dd->dma_lch_in.dma_conf.dst_maxburst = 1; | |
1247 | dd->dma_lch_in.dma_conf.dst_addr_width = | |
1248 | DMA_SLAVE_BUSWIDTH_4_BYTES; | |
1249 | dd->dma_lch_in.dma_conf.device_fc = false; | |
1250 | ||
1251 | return 0; | |
d4905b38 NR |
1252 | } |
1253 | ||
1254 | static void atmel_sha_dma_cleanup(struct atmel_sha_dev *dd) | |
1255 | { | |
1256 | dma_release_channel(dd->dma_lch_in.chan); | |
1257 | } | |
1258 | ||
1259 | static void atmel_sha_get_cap(struct atmel_sha_dev *dd) | |
1260 | { | |
1261 | ||
1262 | dd->caps.has_dma = 0; | |
1263 | dd->caps.has_dualbuff = 0; | |
1264 | dd->caps.has_sha224 = 0; | |
1265 | dd->caps.has_sha_384_512 = 0; | |
1266 | ||
1267 | /* keep only major version number */ | |
1268 | switch (dd->hw_version & 0xff0) { | |
1269 | case 0x410: | |
1270 | dd->caps.has_dma = 1; | |
1271 | dd->caps.has_dualbuff = 1; | |
1272 | dd->caps.has_sha224 = 1; | |
1273 | dd->caps.has_sha_384_512 = 1; | |
1274 | break; | |
1275 | case 0x400: | |
1276 | dd->caps.has_dma = 1; | |
1277 | dd->caps.has_dualbuff = 1; | |
1278 | dd->caps.has_sha224 = 1; | |
1279 | break; | |
1280 | case 0x320: | |
1281 | break; | |
1282 | default: | |
1283 | dev_warn(dd->dev, | |
1284 | "Unmanaged sha version, set minimum capabilities\n"); | |
1285 | break; | |
1286 | } | |
1287 | } | |
1288 | ||
abfe7ae4 NF |
1289 | #if defined(CONFIG_OF) |
1290 | static const struct of_device_id atmel_sha_dt_ids[] = { | |
1291 | { .compatible = "atmel,at91sam9g46-sha" }, | |
1292 | { /* sentinel */ } | |
1293 | }; | |
1294 | ||
1295 | MODULE_DEVICE_TABLE(of, atmel_sha_dt_ids); | |
1296 | ||
1297 | static struct crypto_platform_data *atmel_sha_of_init(struct platform_device *pdev) | |
1298 | { | |
1299 | struct device_node *np = pdev->dev.of_node; | |
1300 | struct crypto_platform_data *pdata; | |
1301 | ||
1302 | if (!np) { | |
1303 | dev_err(&pdev->dev, "device node not found\n"); | |
1304 | return ERR_PTR(-EINVAL); | |
1305 | } | |
1306 | ||
1307 | pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL); | |
1308 | if (!pdata) { | |
1309 | dev_err(&pdev->dev, "could not allocate memory for pdata\n"); | |
1310 | return ERR_PTR(-ENOMEM); | |
1311 | } | |
1312 | ||
1313 | pdata->dma_slave = devm_kzalloc(&pdev->dev, | |
1314 | sizeof(*(pdata->dma_slave)), | |
1315 | GFP_KERNEL); | |
1316 | if (!pdata->dma_slave) { | |
1317 | dev_err(&pdev->dev, "could not allocate memory for dma_slave\n"); | |
abfe7ae4 NF |
1318 | return ERR_PTR(-ENOMEM); |
1319 | } | |
1320 | ||
1321 | return pdata; | |
1322 | } | |
1323 | #else /* CONFIG_OF */ | |
1324 | static inline struct crypto_platform_data *atmel_sha_of_init(struct platform_device *dev) | |
1325 | { | |
1326 | return ERR_PTR(-EINVAL); | |
1327 | } | |
1328 | #endif | |
1329 | ||
49cfe4db | 1330 | static int atmel_sha_probe(struct platform_device *pdev) |
ebc82efa NR |
1331 | { |
1332 | struct atmel_sha_dev *sha_dd; | |
d4905b38 | 1333 | struct crypto_platform_data *pdata; |
ebc82efa NR |
1334 | struct device *dev = &pdev->dev; |
1335 | struct resource *sha_res; | |
1336 | unsigned long sha_phys_size; | |
1337 | int err; | |
1338 | ||
593901aa PG |
1339 | sha_dd = devm_kzalloc(&pdev->dev, sizeof(struct atmel_sha_dev), |
1340 | GFP_KERNEL); | |
ebc82efa NR |
1341 | if (sha_dd == NULL) { |
1342 | dev_err(dev, "unable to alloc data struct.\n"); | |
1343 | err = -ENOMEM; | |
1344 | goto sha_dd_err; | |
1345 | } | |
1346 | ||
1347 | sha_dd->dev = dev; | |
1348 | ||
1349 | platform_set_drvdata(pdev, sha_dd); | |
1350 | ||
1351 | INIT_LIST_HEAD(&sha_dd->list); | |
1352 | ||
1353 | tasklet_init(&sha_dd->done_task, atmel_sha_done_task, | |
1354 | (unsigned long)sha_dd); | |
1355 | ||
1356 | crypto_init_queue(&sha_dd->queue, ATMEL_SHA_QUEUE_LENGTH); | |
1357 | ||
1358 | sha_dd->irq = -1; | |
1359 | ||
1360 | /* Get the base address */ | |
1361 | sha_res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | |
1362 | if (!sha_res) { | |
1363 | dev_err(dev, "no MEM resource info\n"); | |
1364 | err = -ENODEV; | |
1365 | goto res_err; | |
1366 | } | |
1367 | sha_dd->phys_base = sha_res->start; | |
1368 | sha_phys_size = resource_size(sha_res); | |
1369 | ||
1370 | /* Get the IRQ */ | |
1371 | sha_dd->irq = platform_get_irq(pdev, 0); | |
1372 | if (sha_dd->irq < 0) { | |
1373 | dev_err(dev, "no IRQ resource info\n"); | |
1374 | err = sha_dd->irq; | |
1375 | goto res_err; | |
1376 | } | |
1377 | ||
1378 | err = request_irq(sha_dd->irq, atmel_sha_irq, IRQF_SHARED, "atmel-sha", | |
1379 | sha_dd); | |
1380 | if (err) { | |
1381 | dev_err(dev, "unable to request sha irq.\n"); | |
1382 | goto res_err; | |
1383 | } | |
1384 | ||
1385 | /* Initializing the clock */ | |
d4905b38 | 1386 | sha_dd->iclk = clk_get(&pdev->dev, "sha_clk"); |
ebc82efa | 1387 | if (IS_ERR(sha_dd->iclk)) { |
be208356 | 1388 | dev_err(dev, "clock initialization failed.\n"); |
ebc82efa NR |
1389 | err = PTR_ERR(sha_dd->iclk); |
1390 | goto clk_err; | |
1391 | } | |
1392 | ||
1393 | sha_dd->io_base = ioremap(sha_dd->phys_base, sha_phys_size); | |
1394 | if (!sha_dd->io_base) { | |
1395 | dev_err(dev, "can't ioremap\n"); | |
1396 | err = -ENOMEM; | |
1397 | goto sha_io_err; | |
1398 | } | |
1399 | ||
d4905b38 NR |
1400 | atmel_sha_hw_version_init(sha_dd); |
1401 | ||
1402 | atmel_sha_get_cap(sha_dd); | |
1403 | ||
1404 | if (sha_dd->caps.has_dma) { | |
1405 | pdata = pdev->dev.platform_data; | |
1406 | if (!pdata) { | |
abfe7ae4 NF |
1407 | pdata = atmel_sha_of_init(pdev); |
1408 | if (IS_ERR(pdata)) { | |
1409 | dev_err(&pdev->dev, "platform data not available\n"); | |
1410 | err = PTR_ERR(pdata); | |
1411 | goto err_pdata; | |
1412 | } | |
1413 | } | |
1414 | if (!pdata->dma_slave) { | |
d4905b38 NR |
1415 | err = -ENXIO; |
1416 | goto err_pdata; | |
1417 | } | |
1418 | err = atmel_sha_dma_init(sha_dd, pdata); | |
1419 | if (err) | |
1420 | goto err_sha_dma; | |
abfe7ae4 NF |
1421 | |
1422 | dev_info(dev, "using %s for DMA transfers\n", | |
1423 | dma_chan_name(sha_dd->dma_lch_in.chan)); | |
d4905b38 NR |
1424 | } |
1425 | ||
ebc82efa NR |
1426 | spin_lock(&atmel_sha.lock); |
1427 | list_add_tail(&sha_dd->list, &atmel_sha.dev_list); | |
1428 | spin_unlock(&atmel_sha.lock); | |
1429 | ||
1430 | err = atmel_sha_register_algs(sha_dd); | |
1431 | if (err) | |
1432 | goto err_algs; | |
1433 | ||
1ca5b7d9 NF |
1434 | dev_info(dev, "Atmel SHA1/SHA256%s%s\n", |
1435 | sha_dd->caps.has_sha224 ? "/SHA224" : "", | |
1436 | sha_dd->caps.has_sha_384_512 ? "/SHA384/SHA512" : ""); | |
ebc82efa NR |
1437 | |
1438 | return 0; | |
1439 | ||
1440 | err_algs: | |
1441 | spin_lock(&atmel_sha.lock); | |
1442 | list_del(&sha_dd->list); | |
1443 | spin_unlock(&atmel_sha.lock); | |
d4905b38 NR |
1444 | if (sha_dd->caps.has_dma) |
1445 | atmel_sha_dma_cleanup(sha_dd); | |
1446 | err_sha_dma: | |
1447 | err_pdata: | |
ebc82efa NR |
1448 | iounmap(sha_dd->io_base); |
1449 | sha_io_err: | |
1450 | clk_put(sha_dd->iclk); | |
1451 | clk_err: | |
1452 | free_irq(sha_dd->irq, sha_dd); | |
1453 | res_err: | |
1454 | tasklet_kill(&sha_dd->done_task); | |
ebc82efa NR |
1455 | sha_dd_err: |
1456 | dev_err(dev, "initialization failed.\n"); | |
1457 | ||
1458 | return err; | |
1459 | } | |
1460 | ||
49cfe4db | 1461 | static int atmel_sha_remove(struct platform_device *pdev) |
ebc82efa NR |
1462 | { |
1463 | static struct atmel_sha_dev *sha_dd; | |
1464 | ||
1465 | sha_dd = platform_get_drvdata(pdev); | |
1466 | if (!sha_dd) | |
1467 | return -ENODEV; | |
1468 | spin_lock(&atmel_sha.lock); | |
1469 | list_del(&sha_dd->list); | |
1470 | spin_unlock(&atmel_sha.lock); | |
1471 | ||
1472 | atmel_sha_unregister_algs(sha_dd); | |
1473 | ||
1474 | tasklet_kill(&sha_dd->done_task); | |
1475 | ||
d4905b38 NR |
1476 | if (sha_dd->caps.has_dma) |
1477 | atmel_sha_dma_cleanup(sha_dd); | |
1478 | ||
ebc82efa NR |
1479 | iounmap(sha_dd->io_base); |
1480 | ||
1481 | clk_put(sha_dd->iclk); | |
1482 | ||
1483 | if (sha_dd->irq >= 0) | |
1484 | free_irq(sha_dd->irq, sha_dd); | |
1485 | ||
ebc82efa NR |
1486 | return 0; |
1487 | } | |
1488 | ||
1489 | static struct platform_driver atmel_sha_driver = { | |
1490 | .probe = atmel_sha_probe, | |
49cfe4db | 1491 | .remove = atmel_sha_remove, |
ebc82efa NR |
1492 | .driver = { |
1493 | .name = "atmel_sha", | |
abfe7ae4 | 1494 | .of_match_table = of_match_ptr(atmel_sha_dt_ids), |
ebc82efa NR |
1495 | }, |
1496 | }; | |
1497 | ||
1498 | module_platform_driver(atmel_sha_driver); | |
1499 | ||
d4905b38 | 1500 | MODULE_DESCRIPTION("Atmel SHA (1/256/224/384/512) hw acceleration support."); |
ebc82efa NR |
1501 | MODULE_LICENSE("GPL v2"); |
1502 | MODULE_AUTHOR("Nicolas Royer - Eukréa Electromatique"); |