2 * Hash algorithms supported by the CESA: MD5, SHA1 and SHA256.
4 * Author: Boris Brezillon <boris.brezillon@free-electrons.com>
5 * Author: Arnaud Ebalard <arno@natisbad.org>
7 * This work is based on an initial version written by
8 * Sebastian Andrzej Siewior < sebastian at breakpoint dot cc >
10 * This program is free software; you can redistribute it and/or modify it
11 * under the terms of the GNU General Public License version 2 as published
12 * by the Free Software Foundation.
15 #include <crypto/md5.h>
16 #include <crypto/sha.h>
20 struct mv_cesa_ahash_dma_iter
{
21 struct mv_cesa_dma_iter base
;
22 struct mv_cesa_sg_dma_iter src
;
26 mv_cesa_ahash_req_iter_init(struct mv_cesa_ahash_dma_iter
*iter
,
27 struct ahash_request
*req
)
29 struct mv_cesa_ahash_req
*creq
= ahash_request_ctx(req
);
30 unsigned int len
= req
->nbytes
+ creq
->cache_ptr
;
33 len
&= ~CESA_HASH_BLOCK_SIZE_MSK
;
35 mv_cesa_req_dma_iter_init(&iter
->base
, len
);
36 mv_cesa_sg_dma_iter_init(&iter
->src
, req
->src
, DMA_TO_DEVICE
);
37 iter
->src
.op_offset
= creq
->cache_ptr
;
41 mv_cesa_ahash_req_iter_next_op(struct mv_cesa_ahash_dma_iter
*iter
)
43 iter
->src
.op_offset
= 0;
45 return mv_cesa_req_dma_iter_next_op(&iter
->base
);
49 mv_cesa_ahash_dma_alloc_cache(struct mv_cesa_ahash_dma_req
*req
, gfp_t flags
)
51 req
->cache
= dma_pool_alloc(cesa_dev
->dma
->cache_pool
, flags
,
60 mv_cesa_ahash_dma_free_cache(struct mv_cesa_ahash_dma_req
*req
)
65 dma_pool_free(cesa_dev
->dma
->cache_pool
, req
->cache
,
69 static int mv_cesa_ahash_dma_alloc_padding(struct mv_cesa_ahash_dma_req
*req
,
75 req
->padding
= dma_pool_alloc(cesa_dev
->dma
->padding_pool
, flags
,
83 static void mv_cesa_ahash_dma_free_padding(struct mv_cesa_ahash_dma_req
*req
)
88 dma_pool_free(cesa_dev
->dma
->padding_pool
, req
->padding
,
93 static inline void mv_cesa_ahash_dma_last_cleanup(struct ahash_request
*req
)
95 struct mv_cesa_ahash_req
*creq
= ahash_request_ctx(req
);
97 mv_cesa_ahash_dma_free_padding(&creq
->req
.dma
);
100 static inline void mv_cesa_ahash_dma_cleanup(struct ahash_request
*req
)
102 struct mv_cesa_ahash_req
*creq
= ahash_request_ctx(req
);
104 dma_unmap_sg(cesa_dev
->dev
, req
->src
, creq
->src_nents
, DMA_TO_DEVICE
);
105 mv_cesa_ahash_dma_free_cache(&creq
->req
.dma
);
106 mv_cesa_dma_cleanup(&creq
->base
);
109 static inline void mv_cesa_ahash_cleanup(struct ahash_request
*req
)
111 struct mv_cesa_ahash_req
*creq
= ahash_request_ctx(req
);
113 if (mv_cesa_req_get_type(&creq
->base
) == CESA_DMA_REQ
)
114 mv_cesa_ahash_dma_cleanup(req
);
117 static void mv_cesa_ahash_last_cleanup(struct ahash_request
*req
)
119 struct mv_cesa_ahash_req
*creq
= ahash_request_ctx(req
);
121 if (mv_cesa_req_get_type(&creq
->base
) == CESA_DMA_REQ
)
122 mv_cesa_ahash_dma_last_cleanup(req
);
125 static int mv_cesa_ahash_pad_len(struct mv_cesa_ahash_req
*creq
)
127 unsigned int index
, padlen
;
129 index
= creq
->len
& CESA_HASH_BLOCK_SIZE_MSK
;
130 padlen
= (index
< 56) ? (56 - index
) : (64 + 56 - index
);
135 static int mv_cesa_ahash_pad_req(struct mv_cesa_ahash_req
*creq
, u8
*buf
)
137 unsigned int index
, padlen
;
140 /* Pad out to 56 mod 64 */
141 index
= creq
->len
& CESA_HASH_BLOCK_SIZE_MSK
;
142 padlen
= mv_cesa_ahash_pad_len(creq
);
143 memset(buf
+ 1, 0, padlen
- 1);
146 __le64 bits
= cpu_to_le64(creq
->len
<< 3);
147 memcpy(buf
+ padlen
, &bits
, sizeof(bits
));
149 __be64 bits
= cpu_to_be64(creq
->len
<< 3);
150 memcpy(buf
+ padlen
, &bits
, sizeof(bits
));
156 static void mv_cesa_ahash_std_step(struct ahash_request
*req
)
158 struct mv_cesa_ahash_req
*creq
= ahash_request_ctx(req
);
159 struct mv_cesa_ahash_std_req
*sreq
= &creq
->req
.std
;
160 struct mv_cesa_engine
*engine
= creq
->base
.engine
;
161 struct mv_cesa_op_ctx
*op
;
162 unsigned int new_cache_ptr
= 0;
165 unsigned int digsize
;
168 mv_cesa_adjust_op(engine
, &creq
->op_tmpl
);
169 memcpy_toio(engine
->sram
, &creq
->op_tmpl
, sizeof(creq
->op_tmpl
));
171 digsize
= crypto_ahash_digestsize(crypto_ahash_reqtfm(req
));
172 for (i
= 0; i
< digsize
/ 4; i
++)
173 writel_relaxed(creq
->state
[i
], engine
->regs
+ CESA_IVDIG(i
));
175 mv_cesa_adjust_op(engine
, &creq
->op_tmpl
);
176 memcpy_toio(engine
->sram
, &creq
->op_tmpl
, sizeof(creq
->op_tmpl
));
179 memcpy_toio(engine
->sram
+ CESA_SA_DATA_SRAM_OFFSET
,
180 creq
->cache
, creq
->cache_ptr
);
182 len
= min_t(size_t, req
->nbytes
+ creq
->cache_ptr
- sreq
->offset
,
183 CESA_SA_SRAM_PAYLOAD_SIZE
);
185 if (!creq
->last_req
) {
186 new_cache_ptr
= len
& CESA_HASH_BLOCK_SIZE_MSK
;
187 len
&= ~CESA_HASH_BLOCK_SIZE_MSK
;
190 if (len
- creq
->cache_ptr
)
191 sreq
->offset
+= sg_pcopy_to_buffer(req
->src
, creq
->src_nents
,
193 CESA_SA_DATA_SRAM_OFFSET
+
195 len
- creq
->cache_ptr
,
200 frag_mode
= mv_cesa_get_op_cfg(op
) & CESA_SA_DESC_CFG_FRAG_MSK
;
202 if (creq
->last_req
&& sreq
->offset
== req
->nbytes
&&
203 creq
->len
<= CESA_SA_DESC_MAC_SRC_TOTAL_LEN_MAX
) {
204 if (frag_mode
== CESA_SA_DESC_CFG_FIRST_FRAG
)
205 frag_mode
= CESA_SA_DESC_CFG_NOT_FRAG
;
206 else if (frag_mode
== CESA_SA_DESC_CFG_MID_FRAG
)
207 frag_mode
= CESA_SA_DESC_CFG_LAST_FRAG
;
210 if (frag_mode
== CESA_SA_DESC_CFG_NOT_FRAG
||
211 frag_mode
== CESA_SA_DESC_CFG_LAST_FRAG
) {
213 creq
->len
<= CESA_SA_DESC_MAC_SRC_TOTAL_LEN_MAX
) {
214 mv_cesa_set_mac_op_total_len(op
, creq
->len
);
216 int trailerlen
= mv_cesa_ahash_pad_len(creq
) + 8;
218 if (len
+ trailerlen
> CESA_SA_SRAM_PAYLOAD_SIZE
) {
219 len
&= CESA_HASH_BLOCK_SIZE_MSK
;
220 new_cache_ptr
= 64 - trailerlen
;
221 memcpy_fromio(creq
->cache
,
223 CESA_SA_DATA_SRAM_OFFSET
+ len
,
226 len
+= mv_cesa_ahash_pad_req(creq
,
228 CESA_SA_DATA_SRAM_OFFSET
);
231 if (frag_mode
== CESA_SA_DESC_CFG_LAST_FRAG
)
232 frag_mode
= CESA_SA_DESC_CFG_MID_FRAG
;
234 frag_mode
= CESA_SA_DESC_CFG_FIRST_FRAG
;
238 mv_cesa_set_mac_op_frag_len(op
, len
);
239 mv_cesa_update_op_cfg(op
, frag_mode
, CESA_SA_DESC_CFG_FRAG_MSK
);
241 /* FIXME: only update enc_len field */
242 memcpy_toio(engine
->sram
, op
, sizeof(*op
));
244 if (frag_mode
== CESA_SA_DESC_CFG_FIRST_FRAG
)
245 mv_cesa_update_op_cfg(op
, CESA_SA_DESC_CFG_MID_FRAG
,
246 CESA_SA_DESC_CFG_FRAG_MSK
);
248 creq
->cache_ptr
= new_cache_ptr
;
250 mv_cesa_set_int_mask(engine
, CESA_SA_INT_ACCEL0_DONE
);
251 writel_relaxed(CESA_SA_CFG_PARA_DIS
, engine
->regs
+ CESA_SA_CFG
);
252 BUG_ON(readl(engine
->regs
+ CESA_SA_CMD
) &
253 CESA_SA_CMD_EN_CESA_SA_ACCL0
);
254 writel(CESA_SA_CMD_EN_CESA_SA_ACCL0
, engine
->regs
+ CESA_SA_CMD
);
257 static int mv_cesa_ahash_std_process(struct ahash_request
*req
, u32 status
)
259 struct mv_cesa_ahash_req
*creq
= ahash_request_ctx(req
);
260 struct mv_cesa_ahash_std_req
*sreq
= &creq
->req
.std
;
262 if (sreq
->offset
< (req
->nbytes
- creq
->cache_ptr
))
268 static inline void mv_cesa_ahash_dma_prepare(struct ahash_request
*req
)
270 struct mv_cesa_ahash_req
*creq
= ahash_request_ctx(req
);
271 struct mv_cesa_req
*basereq
= &creq
->base
;
273 mv_cesa_dma_prepare(basereq
, basereq
->engine
);
276 static void mv_cesa_ahash_std_prepare(struct ahash_request
*req
)
278 struct mv_cesa_ahash_req
*creq
= ahash_request_ctx(req
);
279 struct mv_cesa_ahash_std_req
*sreq
= &creq
->req
.std
;
284 static void mv_cesa_ahash_step(struct crypto_async_request
*req
)
286 struct ahash_request
*ahashreq
= ahash_request_cast(req
);
287 struct mv_cesa_ahash_req
*creq
= ahash_request_ctx(ahashreq
);
289 if (mv_cesa_req_get_type(&creq
->base
) == CESA_DMA_REQ
)
290 mv_cesa_dma_step(&creq
->base
);
292 mv_cesa_ahash_std_step(ahashreq
);
295 static int mv_cesa_ahash_process(struct crypto_async_request
*req
, u32 status
)
297 struct ahash_request
*ahashreq
= ahash_request_cast(req
);
298 struct mv_cesa_ahash_req
*creq
= ahash_request_ctx(ahashreq
);
300 if (mv_cesa_req_get_type(&creq
->base
) == CESA_DMA_REQ
)
301 return mv_cesa_dma_process(&creq
->base
, status
);
303 return mv_cesa_ahash_std_process(ahashreq
, status
);
306 static void mv_cesa_ahash_complete(struct crypto_async_request
*req
)
308 struct ahash_request
*ahashreq
= ahash_request_cast(req
);
309 struct mv_cesa_ahash_req
*creq
= ahash_request_ctx(ahashreq
);
310 struct mv_cesa_engine
*engine
= creq
->base
.engine
;
311 unsigned int digsize
;
314 digsize
= crypto_ahash_digestsize(crypto_ahash_reqtfm(ahashreq
));
315 for (i
= 0; i
< digsize
/ 4; i
++)
316 creq
->state
[i
] = readl_relaxed(engine
->regs
+ CESA_IVDIG(i
));
319 sg_pcopy_to_buffer(ahashreq
->src
, creq
->src_nents
,
322 ahashreq
->nbytes
- creq
->cache_ptr
);
324 if (creq
->last_req
) {
326 * Hardware's MD5 digest is in little endian format, but
327 * SHA in big endian format
330 __le32
*result
= (void *)ahashreq
->result
;
332 for (i
= 0; i
< digsize
/ 4; i
++)
333 result
[i
] = cpu_to_le32(creq
->state
[i
]);
335 __be32
*result
= (void *)ahashreq
->result
;
337 for (i
= 0; i
< digsize
/ 4; i
++)
338 result
[i
] = cpu_to_be32(creq
->state
[i
]);
342 atomic_sub(ahashreq
->nbytes
, &engine
->load
);
345 static void mv_cesa_ahash_prepare(struct crypto_async_request
*req
,
346 struct mv_cesa_engine
*engine
)
348 struct ahash_request
*ahashreq
= ahash_request_cast(req
);
349 struct mv_cesa_ahash_req
*creq
= ahash_request_ctx(ahashreq
);
351 creq
->base
.engine
= engine
;
353 if (mv_cesa_req_get_type(&creq
->base
) == CESA_DMA_REQ
)
354 mv_cesa_ahash_dma_prepare(ahashreq
);
356 mv_cesa_ahash_std_prepare(ahashreq
);
359 static void mv_cesa_ahash_req_cleanup(struct crypto_async_request
*req
)
361 struct ahash_request
*ahashreq
= ahash_request_cast(req
);
362 struct mv_cesa_ahash_req
*creq
= ahash_request_ctx(ahashreq
);
365 mv_cesa_ahash_last_cleanup(ahashreq
);
367 mv_cesa_ahash_cleanup(ahashreq
);
370 static const struct mv_cesa_req_ops mv_cesa_ahash_req_ops
= {
371 .step
= mv_cesa_ahash_step
,
372 .process
= mv_cesa_ahash_process
,
373 .cleanup
= mv_cesa_ahash_req_cleanup
,
374 .complete
= mv_cesa_ahash_complete
,
377 static int mv_cesa_ahash_init(struct ahash_request
*req
,
378 struct mv_cesa_op_ctx
*tmpl
, bool algo_le
)
380 struct mv_cesa_ahash_req
*creq
= ahash_request_ctx(req
);
382 memset(creq
, 0, sizeof(*creq
));
383 mv_cesa_update_op_cfg(tmpl
,
384 CESA_SA_DESC_CFG_OP_MAC_ONLY
|
385 CESA_SA_DESC_CFG_FIRST_FRAG
,
386 CESA_SA_DESC_CFG_OP_MSK
|
387 CESA_SA_DESC_CFG_FRAG_MSK
);
388 mv_cesa_set_mac_op_total_len(tmpl
, 0);
389 mv_cesa_set_mac_op_frag_len(tmpl
, 0);
390 creq
->op_tmpl
= *tmpl
;
392 creq
->algo_le
= algo_le
;
397 static inline int mv_cesa_ahash_cra_init(struct crypto_tfm
*tfm
)
399 struct mv_cesa_hash_ctx
*ctx
= crypto_tfm_ctx(tfm
);
401 ctx
->base
.ops
= &mv_cesa_ahash_req_ops
;
403 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm
),
404 sizeof(struct mv_cesa_ahash_req
));
408 static int mv_cesa_ahash_cache_req(struct ahash_request
*req
, bool *cached
)
410 struct mv_cesa_ahash_req
*creq
= ahash_request_ctx(req
);
412 if (creq
->cache_ptr
+ req
->nbytes
< 64 && !creq
->last_req
) {
418 sg_pcopy_to_buffer(req
->src
, creq
->src_nents
,
419 creq
->cache
+ creq
->cache_ptr
,
422 creq
->cache_ptr
+= req
->nbytes
;
428 static struct mv_cesa_op_ctx
*
429 mv_cesa_dma_add_frag(struct mv_cesa_tdma_chain
*chain
,
430 struct mv_cesa_op_ctx
*tmpl
, unsigned int frag_len
,
433 struct mv_cesa_op_ctx
*op
;
436 op
= mv_cesa_dma_add_op(chain
, tmpl
, false, flags
);
440 /* Set the operation block fragment length. */
441 mv_cesa_set_mac_op_frag_len(op
, frag_len
);
443 /* Append dummy desc to launch operation */
444 ret
= mv_cesa_dma_add_dummy_launch(chain
, flags
);
448 if (mv_cesa_mac_op_is_first_frag(tmpl
))
449 mv_cesa_update_op_cfg(tmpl
,
450 CESA_SA_DESC_CFG_MID_FRAG
,
451 CESA_SA_DESC_CFG_FRAG_MSK
);
457 mv_cesa_ahash_dma_add_cache(struct mv_cesa_tdma_chain
*chain
,
458 struct mv_cesa_ahash_dma_iter
*dma_iter
,
459 struct mv_cesa_ahash_req
*creq
,
462 struct mv_cesa_ahash_dma_req
*ahashdreq
= &creq
->req
.dma
;
465 if (!creq
->cache_ptr
)
468 ret
= mv_cesa_ahash_dma_alloc_cache(ahashdreq
, flags
);
472 memcpy(ahashdreq
->cache
, creq
->cache
, creq
->cache_ptr
);
474 return mv_cesa_dma_add_data_transfer(chain
,
475 CESA_SA_DATA_SRAM_OFFSET
,
476 ahashdreq
->cache_dma
,
478 CESA_TDMA_DST_IN_SRAM
,
482 static struct mv_cesa_op_ctx
*
483 mv_cesa_ahash_dma_last_req(struct mv_cesa_tdma_chain
*chain
,
484 struct mv_cesa_ahash_dma_iter
*dma_iter
,
485 struct mv_cesa_ahash_req
*creq
,
486 unsigned int frag_len
, gfp_t flags
)
488 struct mv_cesa_ahash_dma_req
*ahashdreq
= &creq
->req
.dma
;
489 unsigned int len
, trailerlen
, padoff
= 0;
490 struct mv_cesa_op_ctx
*op
;
494 * If the transfer is smaller than our maximum length, and we have
495 * some data outstanding, we can ask the engine to finish the hash.
497 if (creq
->len
<= CESA_SA_DESC_MAC_SRC_TOTAL_LEN_MAX
&& frag_len
) {
498 op
= mv_cesa_dma_add_frag(chain
, &creq
->op_tmpl
, frag_len
,
503 mv_cesa_set_mac_op_total_len(op
, creq
->len
);
504 mv_cesa_update_op_cfg(op
, mv_cesa_mac_op_is_first_frag(op
) ?
505 CESA_SA_DESC_CFG_NOT_FRAG
:
506 CESA_SA_DESC_CFG_LAST_FRAG
,
507 CESA_SA_DESC_CFG_FRAG_MSK
);
513 * The request is longer than the engine can handle, or we have
514 * no data outstanding. Manually generate the padding, adding it
515 * as a "mid" fragment.
517 ret
= mv_cesa_ahash_dma_alloc_padding(ahashdreq
, flags
);
521 trailerlen
= mv_cesa_ahash_pad_req(creq
, ahashdreq
->padding
);
523 len
= min(CESA_SA_SRAM_PAYLOAD_SIZE
- frag_len
, trailerlen
);
525 ret
= mv_cesa_dma_add_data_transfer(chain
,
526 CESA_SA_DATA_SRAM_OFFSET
+
528 ahashdreq
->padding_dma
,
529 len
, CESA_TDMA_DST_IN_SRAM
,
534 op
= mv_cesa_dma_add_frag(chain
, &creq
->op_tmpl
, frag_len
+ len
,
539 if (len
== trailerlen
)
545 ret
= mv_cesa_dma_add_data_transfer(chain
,
546 CESA_SA_DATA_SRAM_OFFSET
,
547 ahashdreq
->padding_dma
+
550 CESA_TDMA_DST_IN_SRAM
,
555 return mv_cesa_dma_add_frag(chain
, &creq
->op_tmpl
, trailerlen
- padoff
,
559 static int mv_cesa_ahash_dma_req_init(struct ahash_request
*req
)
561 struct mv_cesa_ahash_req
*creq
= ahash_request_ctx(req
);
562 gfp_t flags
= (req
->base
.flags
& CRYPTO_TFM_REQ_MAY_SLEEP
) ?
563 GFP_KERNEL
: GFP_ATOMIC
;
564 struct mv_cesa_req
*basereq
= &creq
->base
;
565 struct mv_cesa_ahash_dma_iter iter
;
566 struct mv_cesa_op_ctx
*op
= NULL
;
567 unsigned int frag_len
;
570 basereq
->chain
.first
= NULL
;
571 basereq
->chain
.last
= NULL
;
573 if (creq
->src_nents
) {
574 ret
= dma_map_sg(cesa_dev
->dev
, req
->src
, creq
->src_nents
,
582 mv_cesa_tdma_desc_iter_init(&basereq
->chain
);
583 mv_cesa_ahash_req_iter_init(&iter
, req
);
586 * Add the cache (left-over data from a previous block) first.
587 * This will never overflow the SRAM size.
589 ret
= mv_cesa_ahash_dma_add_cache(&basereq
->chain
, &iter
, creq
, flags
);
595 * Add all the new data, inserting an operation block and
596 * launch command between each full SRAM block-worth of
597 * data. We intentionally do not add the final op block.
600 ret
= mv_cesa_dma_add_op_transfers(&basereq
->chain
,
606 frag_len
= iter
.base
.op_len
;
608 if (!mv_cesa_ahash_req_iter_next_op(&iter
))
611 op
= mv_cesa_dma_add_frag(&basereq
->chain
, &creq
->op_tmpl
,
619 /* Account for the data that was in the cache. */
620 frag_len
= iter
.base
.op_len
;
624 * At this point, frag_len indicates whether we have any data
625 * outstanding which needs an operation. Queue up the final
626 * operation, which depends whether this is the final request.
629 op
= mv_cesa_ahash_dma_last_req(&basereq
->chain
, &iter
, creq
,
632 op
= mv_cesa_dma_add_frag(&basereq
->chain
, &creq
->op_tmpl
,
641 /* Add dummy desc to wait for crypto operation end */
642 ret
= mv_cesa_dma_add_dummy_end(&basereq
->chain
, flags
);
648 creq
->cache_ptr
= req
->nbytes
+ creq
->cache_ptr
-
653 basereq
->chain
.last
->flags
|= (CESA_TDMA_END_OF_REQ
|
654 CESA_TDMA_BREAK_CHAIN
);
659 mv_cesa_dma_cleanup(basereq
);
660 dma_unmap_sg(cesa_dev
->dev
, req
->src
, creq
->src_nents
, DMA_TO_DEVICE
);
663 mv_cesa_ahash_last_cleanup(req
);
668 static int mv_cesa_ahash_req_init(struct ahash_request
*req
, bool *cached
)
670 struct mv_cesa_ahash_req
*creq
= ahash_request_ctx(req
);
673 creq
->src_nents
= sg_nents_for_len(req
->src
, req
->nbytes
);
674 if (creq
->src_nents
< 0) {
675 dev_err(cesa_dev
->dev
, "Invalid number of src SG");
676 return creq
->src_nents
;
679 ret
= mv_cesa_ahash_cache_req(req
, cached
);
686 if (cesa_dev
->caps
->has_tdma
)
687 ret
= mv_cesa_ahash_dma_req_init(req
);
692 static int mv_cesa_ahash_queue_req(struct ahash_request
*req
)
694 struct mv_cesa_ahash_req
*creq
= ahash_request_ctx(req
);
695 struct mv_cesa_engine
*engine
;
699 ret
= mv_cesa_ahash_req_init(req
, &cached
);
706 engine
= mv_cesa_select_engine(req
->nbytes
);
707 mv_cesa_ahash_prepare(&req
->base
, engine
);
709 ret
= mv_cesa_queue_req(&req
->base
, &creq
->base
);
711 if (mv_cesa_req_needs_cleanup(&req
->base
, ret
))
712 mv_cesa_ahash_cleanup(req
);
717 static int mv_cesa_ahash_update(struct ahash_request
*req
)
719 struct mv_cesa_ahash_req
*creq
= ahash_request_ctx(req
);
721 creq
->len
+= req
->nbytes
;
723 return mv_cesa_ahash_queue_req(req
);
726 static int mv_cesa_ahash_final(struct ahash_request
*req
)
728 struct mv_cesa_ahash_req
*creq
= ahash_request_ctx(req
);
729 struct mv_cesa_op_ctx
*tmpl
= &creq
->op_tmpl
;
731 mv_cesa_set_mac_op_total_len(tmpl
, creq
->len
);
732 creq
->last_req
= true;
735 return mv_cesa_ahash_queue_req(req
);
738 static int mv_cesa_ahash_finup(struct ahash_request
*req
)
740 struct mv_cesa_ahash_req
*creq
= ahash_request_ctx(req
);
741 struct mv_cesa_op_ctx
*tmpl
= &creq
->op_tmpl
;
743 creq
->len
+= req
->nbytes
;
744 mv_cesa_set_mac_op_total_len(tmpl
, creq
->len
);
745 creq
->last_req
= true;
747 return mv_cesa_ahash_queue_req(req
);
750 static int mv_cesa_ahash_export(struct ahash_request
*req
, void *hash
,
751 u64
*len
, void *cache
)
753 struct crypto_ahash
*ahash
= crypto_ahash_reqtfm(req
);
754 struct mv_cesa_ahash_req
*creq
= ahash_request_ctx(req
);
755 unsigned int digsize
= crypto_ahash_digestsize(ahash
);
756 unsigned int blocksize
;
758 blocksize
= crypto_ahash_blocksize(ahash
);
761 memcpy(hash
, creq
->state
, digsize
);
762 memset(cache
, 0, blocksize
);
763 memcpy(cache
, creq
->cache
, creq
->cache_ptr
);
768 static int mv_cesa_ahash_import(struct ahash_request
*req
, const void *hash
,
769 u64 len
, const void *cache
)
771 struct crypto_ahash
*ahash
= crypto_ahash_reqtfm(req
);
772 struct mv_cesa_ahash_req
*creq
= ahash_request_ctx(req
);
773 unsigned int digsize
= crypto_ahash_digestsize(ahash
);
774 unsigned int blocksize
;
775 unsigned int cache_ptr
;
778 ret
= crypto_ahash_init(req
);
782 blocksize
= crypto_ahash_blocksize(ahash
);
783 if (len
>= blocksize
)
784 mv_cesa_update_op_cfg(&creq
->op_tmpl
,
785 CESA_SA_DESC_CFG_MID_FRAG
,
786 CESA_SA_DESC_CFG_FRAG_MSK
);
789 memcpy(creq
->state
, hash
, digsize
);
792 cache_ptr
= do_div(len
, blocksize
);
796 memcpy(creq
->cache
, cache
, cache_ptr
);
797 creq
->cache_ptr
= cache_ptr
;
802 static int mv_cesa_md5_init(struct ahash_request
*req
)
804 struct mv_cesa_ahash_req
*creq
= ahash_request_ctx(req
);
805 struct mv_cesa_op_ctx tmpl
= { };
807 mv_cesa_set_op_cfg(&tmpl
, CESA_SA_DESC_CFG_MACM_MD5
);
808 creq
->state
[0] = MD5_H0
;
809 creq
->state
[1] = MD5_H1
;
810 creq
->state
[2] = MD5_H2
;
811 creq
->state
[3] = MD5_H3
;
813 mv_cesa_ahash_init(req
, &tmpl
, true);
818 static int mv_cesa_md5_export(struct ahash_request
*req
, void *out
)
820 struct md5_state
*out_state
= out
;
822 return mv_cesa_ahash_export(req
, out_state
->hash
,
823 &out_state
->byte_count
, out_state
->block
);
826 static int mv_cesa_md5_import(struct ahash_request
*req
, const void *in
)
828 const struct md5_state
*in_state
= in
;
830 return mv_cesa_ahash_import(req
, in_state
->hash
, in_state
->byte_count
,
834 static int mv_cesa_md5_digest(struct ahash_request
*req
)
838 ret
= mv_cesa_md5_init(req
);
842 return mv_cesa_ahash_finup(req
);
845 struct ahash_alg mv_md5_alg
= {
846 .init
= mv_cesa_md5_init
,
847 .update
= mv_cesa_ahash_update
,
848 .final
= mv_cesa_ahash_final
,
849 .finup
= mv_cesa_ahash_finup
,
850 .digest
= mv_cesa_md5_digest
,
851 .export
= mv_cesa_md5_export
,
852 .import
= mv_cesa_md5_import
,
854 .digestsize
= MD5_DIGEST_SIZE
,
855 .statesize
= sizeof(struct md5_state
),
858 .cra_driver_name
= "mv-md5",
860 .cra_flags
= CRYPTO_ALG_ASYNC
|
861 CRYPTO_ALG_KERN_DRIVER_ONLY
,
862 .cra_blocksize
= MD5_HMAC_BLOCK_SIZE
,
863 .cra_ctxsize
= sizeof(struct mv_cesa_hash_ctx
),
864 .cra_init
= mv_cesa_ahash_cra_init
,
865 .cra_module
= THIS_MODULE
,
870 static int mv_cesa_sha1_init(struct ahash_request
*req
)
872 struct mv_cesa_ahash_req
*creq
= ahash_request_ctx(req
);
873 struct mv_cesa_op_ctx tmpl
= { };
875 mv_cesa_set_op_cfg(&tmpl
, CESA_SA_DESC_CFG_MACM_SHA1
);
876 creq
->state
[0] = SHA1_H0
;
877 creq
->state
[1] = SHA1_H1
;
878 creq
->state
[2] = SHA1_H2
;
879 creq
->state
[3] = SHA1_H3
;
880 creq
->state
[4] = SHA1_H4
;
882 mv_cesa_ahash_init(req
, &tmpl
, false);
887 static int mv_cesa_sha1_export(struct ahash_request
*req
, void *out
)
889 struct sha1_state
*out_state
= out
;
891 return mv_cesa_ahash_export(req
, out_state
->state
, &out_state
->count
,
895 static int mv_cesa_sha1_import(struct ahash_request
*req
, const void *in
)
897 const struct sha1_state
*in_state
= in
;
899 return mv_cesa_ahash_import(req
, in_state
->state
, in_state
->count
,
903 static int mv_cesa_sha1_digest(struct ahash_request
*req
)
907 ret
= mv_cesa_sha1_init(req
);
911 return mv_cesa_ahash_finup(req
);
914 struct ahash_alg mv_sha1_alg
= {
915 .init
= mv_cesa_sha1_init
,
916 .update
= mv_cesa_ahash_update
,
917 .final
= mv_cesa_ahash_final
,
918 .finup
= mv_cesa_ahash_finup
,
919 .digest
= mv_cesa_sha1_digest
,
920 .export
= mv_cesa_sha1_export
,
921 .import
= mv_cesa_sha1_import
,
923 .digestsize
= SHA1_DIGEST_SIZE
,
924 .statesize
= sizeof(struct sha1_state
),
927 .cra_driver_name
= "mv-sha1",
929 .cra_flags
= CRYPTO_ALG_ASYNC
|
930 CRYPTO_ALG_KERN_DRIVER_ONLY
,
931 .cra_blocksize
= SHA1_BLOCK_SIZE
,
932 .cra_ctxsize
= sizeof(struct mv_cesa_hash_ctx
),
933 .cra_init
= mv_cesa_ahash_cra_init
,
934 .cra_module
= THIS_MODULE
,
939 static int mv_cesa_sha256_init(struct ahash_request
*req
)
941 struct mv_cesa_ahash_req
*creq
= ahash_request_ctx(req
);
942 struct mv_cesa_op_ctx tmpl
= { };
944 mv_cesa_set_op_cfg(&tmpl
, CESA_SA_DESC_CFG_MACM_SHA256
);
945 creq
->state
[0] = SHA256_H0
;
946 creq
->state
[1] = SHA256_H1
;
947 creq
->state
[2] = SHA256_H2
;
948 creq
->state
[3] = SHA256_H3
;
949 creq
->state
[4] = SHA256_H4
;
950 creq
->state
[5] = SHA256_H5
;
951 creq
->state
[6] = SHA256_H6
;
952 creq
->state
[7] = SHA256_H7
;
954 mv_cesa_ahash_init(req
, &tmpl
, false);
959 static int mv_cesa_sha256_digest(struct ahash_request
*req
)
963 ret
= mv_cesa_sha256_init(req
);
967 return mv_cesa_ahash_finup(req
);
970 static int mv_cesa_sha256_export(struct ahash_request
*req
, void *out
)
972 struct sha256_state
*out_state
= out
;
974 return mv_cesa_ahash_export(req
, out_state
->state
, &out_state
->count
,
978 static int mv_cesa_sha256_import(struct ahash_request
*req
, const void *in
)
980 const struct sha256_state
*in_state
= in
;
982 return mv_cesa_ahash_import(req
, in_state
->state
, in_state
->count
,
986 struct ahash_alg mv_sha256_alg
= {
987 .init
= mv_cesa_sha256_init
,
988 .update
= mv_cesa_ahash_update
,
989 .final
= mv_cesa_ahash_final
,
990 .finup
= mv_cesa_ahash_finup
,
991 .digest
= mv_cesa_sha256_digest
,
992 .export
= mv_cesa_sha256_export
,
993 .import
= mv_cesa_sha256_import
,
995 .digestsize
= SHA256_DIGEST_SIZE
,
996 .statesize
= sizeof(struct sha256_state
),
998 .cra_name
= "sha256",
999 .cra_driver_name
= "mv-sha256",
1000 .cra_priority
= 300,
1001 .cra_flags
= CRYPTO_ALG_ASYNC
|
1002 CRYPTO_ALG_KERN_DRIVER_ONLY
,
1003 .cra_blocksize
= SHA256_BLOCK_SIZE
,
1004 .cra_ctxsize
= sizeof(struct mv_cesa_hash_ctx
),
1005 .cra_init
= mv_cesa_ahash_cra_init
,
1006 .cra_module
= THIS_MODULE
,
1011 struct mv_cesa_ahash_result
{
1012 struct completion completion
;
1016 static void mv_cesa_hmac_ahash_complete(struct crypto_async_request
*req
,
1019 struct mv_cesa_ahash_result
*result
= req
->data
;
1021 if (error
== -EINPROGRESS
)
1024 result
->error
= error
;
1025 complete(&result
->completion
);
1028 static int mv_cesa_ahmac_iv_state_init(struct ahash_request
*req
, u8
*pad
,
1029 void *state
, unsigned int blocksize
)
1031 struct mv_cesa_ahash_result result
;
1032 struct scatterlist sg
;
1035 ahash_request_set_callback(req
, CRYPTO_TFM_REQ_MAY_BACKLOG
,
1036 mv_cesa_hmac_ahash_complete
, &result
);
1037 sg_init_one(&sg
, pad
, blocksize
);
1038 ahash_request_set_crypt(req
, &sg
, pad
, blocksize
);
1039 init_completion(&result
.completion
);
1041 ret
= crypto_ahash_init(req
);
1045 ret
= crypto_ahash_update(req
);
1046 if (ret
&& ret
!= -EINPROGRESS
)
1049 wait_for_completion_interruptible(&result
.completion
);
1051 return result
.error
;
1053 ret
= crypto_ahash_export(req
, state
);
1060 static int mv_cesa_ahmac_pad_init(struct ahash_request
*req
,
1061 const u8
*key
, unsigned int keylen
,
1063 unsigned int blocksize
)
1065 struct mv_cesa_ahash_result result
;
1066 struct scatterlist sg
;
1070 if (keylen
<= blocksize
) {
1071 memcpy(ipad
, key
, keylen
);
1073 u8
*keydup
= kmemdup(key
, keylen
, GFP_KERNEL
);
1078 ahash_request_set_callback(req
, CRYPTO_TFM_REQ_MAY_BACKLOG
,
1079 mv_cesa_hmac_ahash_complete
,
1081 sg_init_one(&sg
, keydup
, keylen
);
1082 ahash_request_set_crypt(req
, &sg
, ipad
, keylen
);
1083 init_completion(&result
.completion
);
1085 ret
= crypto_ahash_digest(req
);
1086 if (ret
== -EINPROGRESS
) {
1087 wait_for_completion_interruptible(&result
.completion
);
1091 /* Set the memory region to 0 to avoid any leak. */
1092 memset(keydup
, 0, keylen
);
1098 keylen
= crypto_ahash_digestsize(crypto_ahash_reqtfm(req
));
1101 memset(ipad
+ keylen
, 0, blocksize
- keylen
);
1102 memcpy(opad
, ipad
, blocksize
);
1104 for (i
= 0; i
< blocksize
; i
++) {
1112 static int mv_cesa_ahmac_setkey(const char *hash_alg_name
,
1113 const u8
*key
, unsigned int keylen
,
1114 void *istate
, void *ostate
)
1116 struct ahash_request
*req
;
1117 struct crypto_ahash
*tfm
;
1118 unsigned int blocksize
;
1123 tfm
= crypto_alloc_ahash(hash_alg_name
, CRYPTO_ALG_TYPE_AHASH
,
1124 CRYPTO_ALG_TYPE_AHASH_MASK
);
1126 return PTR_ERR(tfm
);
1128 req
= ahash_request_alloc(tfm
, GFP_KERNEL
);
1134 crypto_ahash_clear_flags(tfm
, ~0);
1136 blocksize
= crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm
));
1138 ipad
= kzalloc(2 * blocksize
, GFP_KERNEL
);
1144 opad
= ipad
+ blocksize
;
1146 ret
= mv_cesa_ahmac_pad_init(req
, key
, keylen
, ipad
, opad
, blocksize
);
1150 ret
= mv_cesa_ahmac_iv_state_init(req
, ipad
, istate
, blocksize
);
1154 ret
= mv_cesa_ahmac_iv_state_init(req
, opad
, ostate
, blocksize
);
1159 ahash_request_free(req
);
1161 crypto_free_ahash(tfm
);
1166 static int mv_cesa_ahmac_cra_init(struct crypto_tfm
*tfm
)
1168 struct mv_cesa_hmac_ctx
*ctx
= crypto_tfm_ctx(tfm
);
1170 ctx
->base
.ops
= &mv_cesa_ahash_req_ops
;
1172 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm
),
1173 sizeof(struct mv_cesa_ahash_req
));
1177 static int mv_cesa_ahmac_md5_init(struct ahash_request
*req
)
1179 struct mv_cesa_hmac_ctx
*ctx
= crypto_tfm_ctx(req
->base
.tfm
);
1180 struct mv_cesa_op_ctx tmpl
= { };
1182 mv_cesa_set_op_cfg(&tmpl
, CESA_SA_DESC_CFG_MACM_HMAC_MD5
);
1183 memcpy(tmpl
.ctx
.hash
.iv
, ctx
->iv
, sizeof(ctx
->iv
));
1185 mv_cesa_ahash_init(req
, &tmpl
, true);
1190 static int mv_cesa_ahmac_md5_setkey(struct crypto_ahash
*tfm
, const u8
*key
,
1191 unsigned int keylen
)
1193 struct mv_cesa_hmac_ctx
*ctx
= crypto_tfm_ctx(crypto_ahash_tfm(tfm
));
1194 struct md5_state istate
, ostate
;
1197 ret
= mv_cesa_ahmac_setkey("mv-md5", key
, keylen
, &istate
, &ostate
);
1201 for (i
= 0; i
< ARRAY_SIZE(istate
.hash
); i
++)
1202 ctx
->iv
[i
] = be32_to_cpu(istate
.hash
[i
]);
1204 for (i
= 0; i
< ARRAY_SIZE(ostate
.hash
); i
++)
1205 ctx
->iv
[i
+ 8] = be32_to_cpu(ostate
.hash
[i
]);
1210 static int mv_cesa_ahmac_md5_digest(struct ahash_request
*req
)
1214 ret
= mv_cesa_ahmac_md5_init(req
);
1218 return mv_cesa_ahash_finup(req
);
1221 struct ahash_alg mv_ahmac_md5_alg
= {
1222 .init
= mv_cesa_ahmac_md5_init
,
1223 .update
= mv_cesa_ahash_update
,
1224 .final
= mv_cesa_ahash_final
,
1225 .finup
= mv_cesa_ahash_finup
,
1226 .digest
= mv_cesa_ahmac_md5_digest
,
1227 .setkey
= mv_cesa_ahmac_md5_setkey
,
1228 .export
= mv_cesa_md5_export
,
1229 .import
= mv_cesa_md5_import
,
1231 .digestsize
= MD5_DIGEST_SIZE
,
1232 .statesize
= sizeof(struct md5_state
),
1234 .cra_name
= "hmac(md5)",
1235 .cra_driver_name
= "mv-hmac-md5",
1236 .cra_priority
= 300,
1237 .cra_flags
= CRYPTO_ALG_ASYNC
|
1238 CRYPTO_ALG_KERN_DRIVER_ONLY
,
1239 .cra_blocksize
= MD5_HMAC_BLOCK_SIZE
,
1240 .cra_ctxsize
= sizeof(struct mv_cesa_hmac_ctx
),
1241 .cra_init
= mv_cesa_ahmac_cra_init
,
1242 .cra_module
= THIS_MODULE
,
1247 static int mv_cesa_ahmac_sha1_init(struct ahash_request
*req
)
1249 struct mv_cesa_hmac_ctx
*ctx
= crypto_tfm_ctx(req
->base
.tfm
);
1250 struct mv_cesa_op_ctx tmpl
= { };
1252 mv_cesa_set_op_cfg(&tmpl
, CESA_SA_DESC_CFG_MACM_HMAC_SHA1
);
1253 memcpy(tmpl
.ctx
.hash
.iv
, ctx
->iv
, sizeof(ctx
->iv
));
1255 mv_cesa_ahash_init(req
, &tmpl
, false);
1260 static int mv_cesa_ahmac_sha1_setkey(struct crypto_ahash
*tfm
, const u8
*key
,
1261 unsigned int keylen
)
1263 struct mv_cesa_hmac_ctx
*ctx
= crypto_tfm_ctx(crypto_ahash_tfm(tfm
));
1264 struct sha1_state istate
, ostate
;
1267 ret
= mv_cesa_ahmac_setkey("mv-sha1", key
, keylen
, &istate
, &ostate
);
1271 for (i
= 0; i
< ARRAY_SIZE(istate
.state
); i
++)
1272 ctx
->iv
[i
] = be32_to_cpu(istate
.state
[i
]);
1274 for (i
= 0; i
< ARRAY_SIZE(ostate
.state
); i
++)
1275 ctx
->iv
[i
+ 8] = be32_to_cpu(ostate
.state
[i
]);
1280 static int mv_cesa_ahmac_sha1_digest(struct ahash_request
*req
)
1284 ret
= mv_cesa_ahmac_sha1_init(req
);
1288 return mv_cesa_ahash_finup(req
);
1291 struct ahash_alg mv_ahmac_sha1_alg
= {
1292 .init
= mv_cesa_ahmac_sha1_init
,
1293 .update
= mv_cesa_ahash_update
,
1294 .final
= mv_cesa_ahash_final
,
1295 .finup
= mv_cesa_ahash_finup
,
1296 .digest
= mv_cesa_ahmac_sha1_digest
,
1297 .setkey
= mv_cesa_ahmac_sha1_setkey
,
1298 .export
= mv_cesa_sha1_export
,
1299 .import
= mv_cesa_sha1_import
,
1301 .digestsize
= SHA1_DIGEST_SIZE
,
1302 .statesize
= sizeof(struct sha1_state
),
1304 .cra_name
= "hmac(sha1)",
1305 .cra_driver_name
= "mv-hmac-sha1",
1306 .cra_priority
= 300,
1307 .cra_flags
= CRYPTO_ALG_ASYNC
|
1308 CRYPTO_ALG_KERN_DRIVER_ONLY
,
1309 .cra_blocksize
= SHA1_BLOCK_SIZE
,
1310 .cra_ctxsize
= sizeof(struct mv_cesa_hmac_ctx
),
1311 .cra_init
= mv_cesa_ahmac_cra_init
,
1312 .cra_module
= THIS_MODULE
,
1317 static int mv_cesa_ahmac_sha256_setkey(struct crypto_ahash
*tfm
, const u8
*key
,
1318 unsigned int keylen
)
1320 struct mv_cesa_hmac_ctx
*ctx
= crypto_tfm_ctx(crypto_ahash_tfm(tfm
));
1321 struct sha256_state istate
, ostate
;
1324 ret
= mv_cesa_ahmac_setkey("mv-sha256", key
, keylen
, &istate
, &ostate
);
1328 for (i
= 0; i
< ARRAY_SIZE(istate
.state
); i
++)
1329 ctx
->iv
[i
] = be32_to_cpu(istate
.state
[i
]);
1331 for (i
= 0; i
< ARRAY_SIZE(ostate
.state
); i
++)
1332 ctx
->iv
[i
+ 8] = be32_to_cpu(ostate
.state
[i
]);
1337 static int mv_cesa_ahmac_sha256_init(struct ahash_request
*req
)
1339 struct mv_cesa_hmac_ctx
*ctx
= crypto_tfm_ctx(req
->base
.tfm
);
1340 struct mv_cesa_op_ctx tmpl
= { };
1342 mv_cesa_set_op_cfg(&tmpl
, CESA_SA_DESC_CFG_MACM_HMAC_SHA256
);
1343 memcpy(tmpl
.ctx
.hash
.iv
, ctx
->iv
, sizeof(ctx
->iv
));
1345 mv_cesa_ahash_init(req
, &tmpl
, false);
1350 static int mv_cesa_ahmac_sha256_digest(struct ahash_request
*req
)
1354 ret
= mv_cesa_ahmac_sha256_init(req
);
1358 return mv_cesa_ahash_finup(req
);
1361 struct ahash_alg mv_ahmac_sha256_alg
= {
1362 .init
= mv_cesa_ahmac_sha256_init
,
1363 .update
= mv_cesa_ahash_update
,
1364 .final
= mv_cesa_ahash_final
,
1365 .finup
= mv_cesa_ahash_finup
,
1366 .digest
= mv_cesa_ahmac_sha256_digest
,
1367 .setkey
= mv_cesa_ahmac_sha256_setkey
,
1368 .export
= mv_cesa_sha256_export
,
1369 .import
= mv_cesa_sha256_import
,
1371 .digestsize
= SHA256_DIGEST_SIZE
,
1372 .statesize
= sizeof(struct sha256_state
),
1374 .cra_name
= "hmac(sha256)",
1375 .cra_driver_name
= "mv-hmac-sha256",
1376 .cra_priority
= 300,
1377 .cra_flags
= CRYPTO_ALG_ASYNC
|
1378 CRYPTO_ALG_KERN_DRIVER_ONLY
,
1379 .cra_blocksize
= SHA256_BLOCK_SIZE
,
1380 .cra_ctxsize
= sizeof(struct mv_cesa_hmac_ctx
),
1381 .cra_init
= mv_cesa_ahmac_cra_init
,
1382 .cra_module
= THIS_MODULE
,