2 * caam - Freescale FSL CAAM support for ahash functions of crypto API
4 * Copyright 2011 Freescale Semiconductor, Inc.
6 * Based on caamalg.c crypto API driver.
8 * relationship of digest job descriptor or first job descriptor after init to
11 * --------------- ---------------
12 * | JobDesc #1 |-------------------->| ShareDesc |
13 * | *(packet 1) | | (hashKey) |
14 * --------------- | (operation) |
17 * relationship of subsequent job descriptors to shared descriptors:
19 * --------------- ---------------
20 * | JobDesc #2 |-------------------->| ShareDesc |
21 * | *(packet 2) | |------------->| (hashKey) |
22 * --------------- | |-------->| (operation) |
23 * . | | | (load ctx2) |
24 * . | | ---------------
26 * | JobDesc #3 |------| |
32 * | JobDesc #4 |------------
36 * The SharedDesc never changes for a connection unless rekeyed, but
37 * each packet will likely be in a different place. So all we need
38 * to know to process the packet is where the input is, where the
39 * output goes, and what context we want to process with. Context is
40 * in the SharedDesc, packet references in the JobDesc.
42 * So, a job desc looks like:
44 * ---------------------
46 * | ShareDesc Pointer |
53 * ---------------------
60 #include "desc_constr.h"
63 #include "sg_sw_sec4.h"
66 #define CAAM_CRA_PRIORITY 3000
68 /* max hash key is max split key size */
69 #define CAAM_MAX_HASH_KEY_SIZE (SHA512_DIGEST_SIZE * 2)
71 #define CAAM_MAX_HASH_BLOCK_SIZE SHA512_BLOCK_SIZE
72 #define CAAM_MAX_HASH_DIGEST_SIZE SHA512_DIGEST_SIZE
74 /* length of descriptors text */
75 #define DESC_AHASH_BASE (4 * CAAM_CMD_SZ)
76 #define DESC_AHASH_UPDATE_LEN (6 * CAAM_CMD_SZ)
77 #define DESC_AHASH_UPDATE_FIRST_LEN (DESC_AHASH_BASE + 4 * CAAM_CMD_SZ)
78 #define DESC_AHASH_FINAL_LEN (DESC_AHASH_BASE + 5 * CAAM_CMD_SZ)
79 #define DESC_AHASH_FINUP_LEN (DESC_AHASH_BASE + 5 * CAAM_CMD_SZ)
80 #define DESC_AHASH_DIGEST_LEN (DESC_AHASH_BASE + 4 * CAAM_CMD_SZ)
82 #define DESC_HASH_MAX_USED_BYTES (DESC_AHASH_FINAL_LEN + \
83 CAAM_MAX_HASH_KEY_SIZE)
84 #define DESC_HASH_MAX_USED_LEN (DESC_HASH_MAX_USED_BYTES / CAAM_CMD_SZ)
86 /* caam context sizes for hashes: running digest + 8 */
87 #define HASH_MSG_LEN 8
88 #define MAX_CTX_LEN (HASH_MSG_LEN + SHA512_DIGEST_SIZE)
91 /* for print_hex_dumps with line references */
92 #define debug(format, arg...) printk(format, arg)
94 #define debug(format, arg...)
98 static struct list_head hash_list
;
100 /* ahash per-session context */
101 struct caam_hash_ctx
{
102 struct device
*jrdev
;
103 u32 sh_desc_update
[DESC_HASH_MAX_USED_LEN
];
104 u32 sh_desc_update_first
[DESC_HASH_MAX_USED_LEN
];
105 u32 sh_desc_fin
[DESC_HASH_MAX_USED_LEN
];
106 u32 sh_desc_digest
[DESC_HASH_MAX_USED_LEN
];
107 u32 sh_desc_finup
[DESC_HASH_MAX_USED_LEN
];
108 dma_addr_t sh_desc_update_dma
;
109 dma_addr_t sh_desc_update_first_dma
;
110 dma_addr_t sh_desc_fin_dma
;
111 dma_addr_t sh_desc_digest_dma
;
112 dma_addr_t sh_desc_finup_dma
;
115 u8 key
[CAAM_MAX_HASH_KEY_SIZE
];
118 unsigned int split_key_len
;
119 unsigned int split_key_pad_len
;
123 struct caam_hash_state
{
126 u8 buf_0
[CAAM_MAX_HASH_BLOCK_SIZE
] ____cacheline_aligned
;
128 u8 buf_1
[CAAM_MAX_HASH_BLOCK_SIZE
] ____cacheline_aligned
;
130 u8 caam_ctx
[MAX_CTX_LEN
] ____cacheline_aligned
;
131 int (*update
)(struct ahash_request
*req
);
132 int (*final
)(struct ahash_request
*req
);
133 int (*finup
)(struct ahash_request
*req
);
137 /* Common job descriptor seq in/out ptr routines */
139 /* Map state->caam_ctx, and append seq_out_ptr command that points to it */
140 static inline int map_seq_out_ptr_ctx(u32
*desc
, struct device
*jrdev
,
141 struct caam_hash_state
*state
,
144 state
->ctx_dma
= dma_map_single(jrdev
, state
->caam_ctx
,
145 ctx_len
, DMA_FROM_DEVICE
);
146 if (dma_mapping_error(jrdev
, state
->ctx_dma
)) {
147 dev_err(jrdev
, "unable to map ctx\n");
151 append_seq_out_ptr(desc
, state
->ctx_dma
, ctx_len
, 0);
156 /* Map req->result, and append seq_out_ptr command that points to it */
157 static inline dma_addr_t
map_seq_out_ptr_result(u32
*desc
, struct device
*jrdev
,
158 u8
*result
, int digestsize
)
162 dst_dma
= dma_map_single(jrdev
, result
, digestsize
, DMA_FROM_DEVICE
);
163 append_seq_out_ptr(desc
, dst_dma
, digestsize
, 0);
168 /* Map current buffer in state and put it in link table */
169 static inline dma_addr_t
buf_map_to_sec4_sg(struct device
*jrdev
,
170 struct sec4_sg_entry
*sec4_sg
,
175 buf_dma
= dma_map_single(jrdev
, buf
, buflen
, DMA_TO_DEVICE
);
176 dma_to_sec4_sg_one(sec4_sg
, buf_dma
, buflen
, 0);
181 /* Map req->src and put it in link table */
182 static inline void src_map_to_sec4_sg(struct device
*jrdev
,
183 struct scatterlist
*src
, int src_nents
,
184 struct sec4_sg_entry
*sec4_sg
)
186 dma_map_sg(jrdev
, src
, src_nents
, DMA_TO_DEVICE
);
187 sg_to_sec4_sg_last(src
, src_nents
, sec4_sg
, 0);
191 * Only put buffer in link table if it contains data, which is possible,
192 * since a buffer has previously been used, and needs to be unmapped,
194 static inline dma_addr_t
195 try_buf_map_to_sec4_sg(struct device
*jrdev
, struct sec4_sg_entry
*sec4_sg
,
196 u8
*buf
, dma_addr_t buf_dma
, int buflen
,
199 if (buf_dma
&& !dma_mapping_error(jrdev
, buf_dma
))
200 dma_unmap_single(jrdev
, buf_dma
, last_buflen
, DMA_TO_DEVICE
);
202 buf_dma
= buf_map_to_sec4_sg(jrdev
, sec4_sg
, buf
, buflen
);
209 /* Map state->caam_ctx, and add it to link table */
210 static inline int ctx_map_to_sec4_sg(u32
*desc
, struct device
*jrdev
,
211 struct caam_hash_state
*state
, int ctx_len
,
212 struct sec4_sg_entry
*sec4_sg
, u32 flag
)
214 state
->ctx_dma
= dma_map_single(jrdev
, state
->caam_ctx
, ctx_len
, flag
);
215 if (dma_mapping_error(jrdev
, state
->ctx_dma
)) {
216 dev_err(jrdev
, "unable to map ctx\n");
220 dma_to_sec4_sg_one(sec4_sg
, state
->ctx_dma
, ctx_len
, 0);
225 /* Common shared descriptor commands */
226 static inline void append_key_ahash(u32
*desc
, struct caam_hash_ctx
*ctx
)
228 append_key_as_imm(desc
, ctx
->key
, ctx
->split_key_pad_len
,
229 ctx
->split_key_len
, CLASS_2
|
230 KEY_DEST_MDHA_SPLIT
| KEY_ENC
);
233 /* Append key if it has been set */
234 static inline void init_sh_desc_key_ahash(u32
*desc
, struct caam_hash_ctx
*ctx
)
238 init_sh_desc(desc
, HDR_SHARE_SERIAL
);
240 if (ctx
->split_key_len
) {
241 /* Skip if already shared */
242 key_jump_cmd
= append_jump(desc
, JUMP_JSL
| JUMP_TEST_ALL
|
245 append_key_ahash(desc
, ctx
);
247 set_jump_tgt_here(desc
, key_jump_cmd
);
250 /* Propagate errors from shared to job descriptor */
251 append_cmd(desc
, SET_OK_NO_PROP_ERRORS
| CMD_LOAD
);
255 * For ahash read data from seqin following state->caam_ctx,
256 * and write resulting class2 context to seqout, which may be state->caam_ctx
259 static inline void ahash_append_load_str(u32
*desc
, int digestsize
)
261 /* Calculate remaining bytes to read */
262 append_math_add(desc
, VARSEQINLEN
, SEQINLEN
, REG0
, CAAM_CMD_SZ
);
264 /* Read remaining bytes */
265 append_seq_fifo_load(desc
, 0, FIFOLD_CLASS_CLASS2
| FIFOLD_TYPE_LAST2
|
266 FIFOLD_TYPE_MSG
| KEY_VLF
);
268 /* Store class2 context bytes */
269 append_seq_store(desc
, digestsize
, LDST_CLASS_2_CCB
|
270 LDST_SRCDST_BYTE_CONTEXT
);
274 * For ahash update, final and finup, import context, read and write to seqout
276 static inline void ahash_ctx_data_to_out(u32
*desc
, u32 op
, u32 state
,
278 struct caam_hash_ctx
*ctx
)
280 init_sh_desc_key_ahash(desc
, ctx
);
282 /* Import context from software */
283 append_cmd(desc
, CMD_SEQ_LOAD
| LDST_SRCDST_BYTE_CONTEXT
|
284 LDST_CLASS_2_CCB
| ctx
->ctx_len
);
286 /* Class 2 operation */
287 append_operation(desc
, op
| state
| OP_ALG_ENCRYPT
);
290 * Load from buf and/or src and write to req->result or state->context
292 ahash_append_load_str(desc
, digestsize
);
295 /* For ahash firsts and digest, read and write to seqout */
296 static inline void ahash_data_to_out(u32
*desc
, u32 op
, u32 state
,
297 int digestsize
, struct caam_hash_ctx
*ctx
)
299 init_sh_desc_key_ahash(desc
, ctx
);
301 /* Class 2 operation */
302 append_operation(desc
, op
| state
| OP_ALG_ENCRYPT
);
305 * Load from buf and/or src and write to req->result or state->context
307 ahash_append_load_str(desc
, digestsize
);
310 static int ahash_set_sh_desc(struct crypto_ahash
*ahash
)
312 struct caam_hash_ctx
*ctx
= crypto_ahash_ctx(ahash
);
313 int digestsize
= crypto_ahash_digestsize(ahash
);
314 struct device
*jrdev
= ctx
->jrdev
;
318 if (ctx
->split_key_len
)
319 have_key
= OP_ALG_AAI_HMAC_PRECOMP
;
321 /* ahash_update shared descriptor */
322 desc
= ctx
->sh_desc_update
;
324 init_sh_desc(desc
, HDR_SHARE_SERIAL
);
326 /* Import context from software */
327 append_cmd(desc
, CMD_SEQ_LOAD
| LDST_SRCDST_BYTE_CONTEXT
|
328 LDST_CLASS_2_CCB
| ctx
->ctx_len
);
330 /* Class 2 operation */
331 append_operation(desc
, ctx
->alg_type
| OP_ALG_AS_UPDATE
|
334 /* Load data and write to result or context */
335 ahash_append_load_str(desc
, ctx
->ctx_len
);
337 ctx
->sh_desc_update_dma
= dma_map_single(jrdev
, desc
, desc_bytes(desc
),
339 if (dma_mapping_error(jrdev
, ctx
->sh_desc_update_dma
)) {
340 dev_err(jrdev
, "unable to map shared descriptor\n");
344 print_hex_dump(KERN_ERR
,
345 "ahash update shdesc@"__stringify(__LINE__
)": ",
346 DUMP_PREFIX_ADDRESS
, 16, 4, desc
, desc_bytes(desc
), 1);
349 /* ahash_update_first shared descriptor */
350 desc
= ctx
->sh_desc_update_first
;
352 ahash_data_to_out(desc
, have_key
| ctx
->alg_type
, OP_ALG_AS_INIT
,
355 ctx
->sh_desc_update_first_dma
= dma_map_single(jrdev
, desc
,
358 if (dma_mapping_error(jrdev
, ctx
->sh_desc_update_first_dma
)) {
359 dev_err(jrdev
, "unable to map shared descriptor\n");
363 print_hex_dump(KERN_ERR
,
364 "ahash update first shdesc@"__stringify(__LINE__
)": ",
365 DUMP_PREFIX_ADDRESS
, 16, 4, desc
, desc_bytes(desc
), 1);
368 /* ahash_final shared descriptor */
369 desc
= ctx
->sh_desc_fin
;
371 ahash_ctx_data_to_out(desc
, have_key
| ctx
->alg_type
,
372 OP_ALG_AS_FINALIZE
, digestsize
, ctx
);
374 ctx
->sh_desc_fin_dma
= dma_map_single(jrdev
, desc
, desc_bytes(desc
),
376 if (dma_mapping_error(jrdev
, ctx
->sh_desc_fin_dma
)) {
377 dev_err(jrdev
, "unable to map shared descriptor\n");
381 print_hex_dump(KERN_ERR
, "ahash final shdesc@"__stringify(__LINE__
)": ",
382 DUMP_PREFIX_ADDRESS
, 16, 4, desc
,
383 desc_bytes(desc
), 1);
386 /* ahash_finup shared descriptor */
387 desc
= ctx
->sh_desc_finup
;
389 ahash_ctx_data_to_out(desc
, have_key
| ctx
->alg_type
,
390 OP_ALG_AS_FINALIZE
, digestsize
, ctx
);
392 ctx
->sh_desc_finup_dma
= dma_map_single(jrdev
, desc
, desc_bytes(desc
),
394 if (dma_mapping_error(jrdev
, ctx
->sh_desc_finup_dma
)) {
395 dev_err(jrdev
, "unable to map shared descriptor\n");
399 print_hex_dump(KERN_ERR
, "ahash finup shdesc@"__stringify(__LINE__
)": ",
400 DUMP_PREFIX_ADDRESS
, 16, 4, desc
,
401 desc_bytes(desc
), 1);
404 /* ahash_digest shared descriptor */
405 desc
= ctx
->sh_desc_digest
;
407 ahash_data_to_out(desc
, have_key
| ctx
->alg_type
, OP_ALG_AS_INITFINAL
,
410 ctx
->sh_desc_digest_dma
= dma_map_single(jrdev
, desc
,
413 if (dma_mapping_error(jrdev
, ctx
->sh_desc_digest_dma
)) {
414 dev_err(jrdev
, "unable to map shared descriptor\n");
418 print_hex_dump(KERN_ERR
,
419 "ahash digest shdesc@"__stringify(__LINE__
)": ",
420 DUMP_PREFIX_ADDRESS
, 16, 4, desc
,
421 desc_bytes(desc
), 1);
427 static int gen_split_hash_key(struct caam_hash_ctx
*ctx
, const u8
*key_in
,
430 return gen_split_key(ctx
->jrdev
, ctx
->key
, ctx
->split_key_len
,
431 ctx
->split_key_pad_len
, key_in
, keylen
,
435 /* Digest hash size if it is too large */
436 static int hash_digest_key(struct caam_hash_ctx
*ctx
, const u8
*key_in
,
437 u32
*keylen
, u8
*key_out
, u32 digestsize
)
439 struct device
*jrdev
= ctx
->jrdev
;
441 struct split_key_result result
;
442 dma_addr_t src_dma
, dst_dma
;
445 desc
= kmalloc(CAAM_CMD_SZ
* 8 + CAAM_PTR_SZ
* 2, GFP_KERNEL
| GFP_DMA
);
447 dev_err(jrdev
, "unable to allocate key input memory\n");
451 init_job_desc(desc
, 0);
453 src_dma
= dma_map_single(jrdev
, (void *)key_in
, *keylen
,
455 if (dma_mapping_error(jrdev
, src_dma
)) {
456 dev_err(jrdev
, "unable to map key input memory\n");
460 dst_dma
= dma_map_single(jrdev
, (void *)key_out
, digestsize
,
462 if (dma_mapping_error(jrdev
, dst_dma
)) {
463 dev_err(jrdev
, "unable to map key output memory\n");
464 dma_unmap_single(jrdev
, src_dma
, *keylen
, DMA_TO_DEVICE
);
469 /* Job descriptor to perform unkeyed hash on key_in */
470 append_operation(desc
, ctx
->alg_type
| OP_ALG_ENCRYPT
|
471 OP_ALG_AS_INITFINAL
);
472 append_seq_in_ptr(desc
, src_dma
, *keylen
, 0);
473 append_seq_fifo_load(desc
, *keylen
, FIFOLD_CLASS_CLASS2
|
474 FIFOLD_TYPE_LAST2
| FIFOLD_TYPE_MSG
);
475 append_seq_out_ptr(desc
, dst_dma
, digestsize
, 0);
476 append_seq_store(desc
, digestsize
, LDST_CLASS_2_CCB
|
477 LDST_SRCDST_BYTE_CONTEXT
);
480 print_hex_dump(KERN_ERR
, "key_in@"__stringify(__LINE__
)": ",
481 DUMP_PREFIX_ADDRESS
, 16, 4, key_in
, *keylen
, 1);
482 print_hex_dump(KERN_ERR
, "jobdesc@"__stringify(__LINE__
)": ",
483 DUMP_PREFIX_ADDRESS
, 16, 4, desc
, desc_bytes(desc
), 1);
487 init_completion(&result
.completion
);
489 ret
= caam_jr_enqueue(jrdev
, desc
, split_key_done
, &result
);
492 wait_for_completion_interruptible(&result
.completion
);
495 print_hex_dump(KERN_ERR
,
496 "digested key@"__stringify(__LINE__
)": ",
497 DUMP_PREFIX_ADDRESS
, 16, 4, key_in
,
501 dma_unmap_single(jrdev
, src_dma
, *keylen
, DMA_TO_DEVICE
);
502 dma_unmap_single(jrdev
, dst_dma
, digestsize
, DMA_FROM_DEVICE
);
504 *keylen
= digestsize
;
511 static int ahash_setkey(struct crypto_ahash
*ahash
,
512 const u8
*key
, unsigned int keylen
)
514 /* Sizes for MDHA pads (*not* keys): MD5, SHA1, 224, 256, 384, 512 */
515 static const u8 mdpadlen
[] = { 16, 20, 32, 32, 64, 64 };
516 struct caam_hash_ctx
*ctx
= crypto_ahash_ctx(ahash
);
517 struct device
*jrdev
= ctx
->jrdev
;
518 int blocksize
= crypto_tfm_alg_blocksize(&ahash
->base
);
519 int digestsize
= crypto_ahash_digestsize(ahash
);
521 u8
*hashed_key
= NULL
;
524 printk(KERN_ERR
"keylen %d\n", keylen
);
527 if (keylen
> blocksize
) {
528 hashed_key
= kmalloc(sizeof(u8
) * digestsize
, GFP_KERNEL
|
532 ret
= hash_digest_key(ctx
, key
, &keylen
, hashed_key
,
539 /* Pick class 2 key length from algorithm submask */
540 ctx
->split_key_len
= mdpadlen
[(ctx
->alg_op
& OP_ALG_ALGSEL_SUBMASK
) >>
541 OP_ALG_ALGSEL_SHIFT
] * 2;
542 ctx
->split_key_pad_len
= ALIGN(ctx
->split_key_len
, 16);
545 printk(KERN_ERR
"split_key_len %d split_key_pad_len %d\n",
546 ctx
->split_key_len
, ctx
->split_key_pad_len
);
547 print_hex_dump(KERN_ERR
, "key in @"__stringify(__LINE__
)": ",
548 DUMP_PREFIX_ADDRESS
, 16, 4, key
, keylen
, 1);
551 ret
= gen_split_hash_key(ctx
, key
, keylen
);
555 ctx
->key_dma
= dma_map_single(jrdev
, ctx
->key
, ctx
->split_key_pad_len
,
557 if (dma_mapping_error(jrdev
, ctx
->key_dma
)) {
558 dev_err(jrdev
, "unable to map key i/o memory\n");
563 print_hex_dump(KERN_ERR
, "ctx.key@"__stringify(__LINE__
)": ",
564 DUMP_PREFIX_ADDRESS
, 16, 4, ctx
->key
,
565 ctx
->split_key_pad_len
, 1);
568 ret
= ahash_set_sh_desc(ahash
);
570 dma_unmap_single(jrdev
, ctx
->key_dma
, ctx
->split_key_pad_len
,
579 crypto_ahash_set_flags(ahash
, CRYPTO_TFM_RES_BAD_KEY_LEN
);
584 * ahash_edesc - s/w-extended ahash descriptor
585 * @dst_dma: physical mapped address of req->result
586 * @sec4_sg_dma: physical mapped address of h/w link table
587 * @src_nents: number of segments in input scatterlist
588 * @sec4_sg_bytes: length of dma mapped sec4_sg space
589 * @sec4_sg: pointer to h/w link table
590 * @hw_desc: the h/w job descriptor followed by any referenced link tables
594 dma_addr_t sec4_sg_dma
;
597 struct sec4_sg_entry
*sec4_sg
;
601 static inline void ahash_unmap(struct device
*dev
,
602 struct ahash_edesc
*edesc
,
603 struct ahash_request
*req
, int dst_len
)
605 if (edesc
->src_nents
)
606 dma_unmap_sg(dev
, req
->src
, edesc
->src_nents
, DMA_TO_DEVICE
);
608 dma_unmap_single(dev
, edesc
->dst_dma
, dst_len
, DMA_FROM_DEVICE
);
610 if (edesc
->sec4_sg_bytes
)
611 dma_unmap_single(dev
, edesc
->sec4_sg_dma
,
612 edesc
->sec4_sg_bytes
, DMA_TO_DEVICE
);
615 static inline void ahash_unmap_ctx(struct device
*dev
,
616 struct ahash_edesc
*edesc
,
617 struct ahash_request
*req
, int dst_len
, u32 flag
)
619 struct crypto_ahash
*ahash
= crypto_ahash_reqtfm(req
);
620 struct caam_hash_ctx
*ctx
= crypto_ahash_ctx(ahash
);
621 struct caam_hash_state
*state
= ahash_request_ctx(req
);
624 dma_unmap_single(dev
, state
->ctx_dma
, ctx
->ctx_len
, flag
);
625 ahash_unmap(dev
, edesc
, req
, dst_len
);
628 static void ahash_done(struct device
*jrdev
, u32
*desc
, u32 err
,
631 struct ahash_request
*req
= context
;
632 struct ahash_edesc
*edesc
;
633 struct crypto_ahash
*ahash
= crypto_ahash_reqtfm(req
);
634 int digestsize
= crypto_ahash_digestsize(ahash
);
636 struct caam_hash_ctx
*ctx
= crypto_ahash_ctx(ahash
);
637 struct caam_hash_state
*state
= ahash_request_ctx(req
);
639 dev_err(jrdev
, "%s %d: err 0x%x\n", __func__
, __LINE__
, err
);
642 edesc
= (struct ahash_edesc
*)((char *)desc
-
643 offsetof(struct ahash_edesc
, hw_desc
));
645 caam_jr_strstatus(jrdev
, err
);
647 ahash_unmap(jrdev
, edesc
, req
, digestsize
);
651 print_hex_dump(KERN_ERR
, "ctx@"__stringify(__LINE__
)": ",
652 DUMP_PREFIX_ADDRESS
, 16, 4, state
->caam_ctx
,
655 print_hex_dump(KERN_ERR
, "result@"__stringify(__LINE__
)": ",
656 DUMP_PREFIX_ADDRESS
, 16, 4, req
->result
,
660 req
->base
.complete(&req
->base
, err
);
663 static void ahash_done_bi(struct device
*jrdev
, u32
*desc
, u32 err
,
666 struct ahash_request
*req
= context
;
667 struct ahash_edesc
*edesc
;
668 struct crypto_ahash
*ahash
= crypto_ahash_reqtfm(req
);
669 struct caam_hash_ctx
*ctx
= crypto_ahash_ctx(ahash
);
671 struct caam_hash_state
*state
= ahash_request_ctx(req
);
672 int digestsize
= crypto_ahash_digestsize(ahash
);
674 dev_err(jrdev
, "%s %d: err 0x%x\n", __func__
, __LINE__
, err
);
677 edesc
= (struct ahash_edesc
*)((char *)desc
-
678 offsetof(struct ahash_edesc
, hw_desc
));
680 caam_jr_strstatus(jrdev
, err
);
682 ahash_unmap_ctx(jrdev
, edesc
, req
, ctx
->ctx_len
, DMA_BIDIRECTIONAL
);
686 print_hex_dump(KERN_ERR
, "ctx@"__stringify(__LINE__
)": ",
687 DUMP_PREFIX_ADDRESS
, 16, 4, state
->caam_ctx
,
690 print_hex_dump(KERN_ERR
, "result@"__stringify(__LINE__
)": ",
691 DUMP_PREFIX_ADDRESS
, 16, 4, req
->result
,
695 req
->base
.complete(&req
->base
, err
);
698 static void ahash_done_ctx_src(struct device
*jrdev
, u32
*desc
, u32 err
,
701 struct ahash_request
*req
= context
;
702 struct ahash_edesc
*edesc
;
703 struct crypto_ahash
*ahash
= crypto_ahash_reqtfm(req
);
704 int digestsize
= crypto_ahash_digestsize(ahash
);
706 struct caam_hash_ctx
*ctx
= crypto_ahash_ctx(ahash
);
707 struct caam_hash_state
*state
= ahash_request_ctx(req
);
709 dev_err(jrdev
, "%s %d: err 0x%x\n", __func__
, __LINE__
, err
);
712 edesc
= (struct ahash_edesc
*)((char *)desc
-
713 offsetof(struct ahash_edesc
, hw_desc
));
715 caam_jr_strstatus(jrdev
, err
);
717 ahash_unmap_ctx(jrdev
, edesc
, req
, digestsize
, DMA_TO_DEVICE
);
721 print_hex_dump(KERN_ERR
, "ctx@"__stringify(__LINE__
)": ",
722 DUMP_PREFIX_ADDRESS
, 16, 4, state
->caam_ctx
,
725 print_hex_dump(KERN_ERR
, "result@"__stringify(__LINE__
)": ",
726 DUMP_PREFIX_ADDRESS
, 16, 4, req
->result
,
730 req
->base
.complete(&req
->base
, err
);
733 static void ahash_done_ctx_dst(struct device
*jrdev
, u32
*desc
, u32 err
,
736 struct ahash_request
*req
= context
;
737 struct ahash_edesc
*edesc
;
738 struct crypto_ahash
*ahash
= crypto_ahash_reqtfm(req
);
739 struct caam_hash_ctx
*ctx
= crypto_ahash_ctx(ahash
);
741 struct caam_hash_state
*state
= ahash_request_ctx(req
);
742 int digestsize
= crypto_ahash_digestsize(ahash
);
744 dev_err(jrdev
, "%s %d: err 0x%x\n", __func__
, __LINE__
, err
);
747 edesc
= (struct ahash_edesc
*)((char *)desc
-
748 offsetof(struct ahash_edesc
, hw_desc
));
750 caam_jr_strstatus(jrdev
, err
);
752 ahash_unmap_ctx(jrdev
, edesc
, req
, ctx
->ctx_len
, DMA_FROM_DEVICE
);
756 print_hex_dump(KERN_ERR
, "ctx@"__stringify(__LINE__
)": ",
757 DUMP_PREFIX_ADDRESS
, 16, 4, state
->caam_ctx
,
760 print_hex_dump(KERN_ERR
, "result@"__stringify(__LINE__
)": ",
761 DUMP_PREFIX_ADDRESS
, 16, 4, req
->result
,
765 req
->base
.complete(&req
->base
, err
);
768 /* submit update job descriptor */
769 static int ahash_update_ctx(struct ahash_request
*req
)
771 struct crypto_ahash
*ahash
= crypto_ahash_reqtfm(req
);
772 struct caam_hash_ctx
*ctx
= crypto_ahash_ctx(ahash
);
773 struct caam_hash_state
*state
= ahash_request_ctx(req
);
774 struct device
*jrdev
= ctx
->jrdev
;
775 gfp_t flags
= (req
->base
.flags
& (CRYPTO_TFM_REQ_MAY_BACKLOG
|
776 CRYPTO_TFM_REQ_MAY_SLEEP
)) ? GFP_KERNEL
: GFP_ATOMIC
;
777 u8
*buf
= state
->current_buf
? state
->buf_1
: state
->buf_0
;
778 int *buflen
= state
->current_buf
? &state
->buflen_1
: &state
->buflen_0
;
779 u8
*next_buf
= state
->current_buf
? state
->buf_0
: state
->buf_1
;
780 int *next_buflen
= state
->current_buf
? &state
->buflen_0
:
781 &state
->buflen_1
, last_buflen
;
782 int in_len
= *buflen
+ req
->nbytes
, to_hash
;
783 u32
*sh_desc
= ctx
->sh_desc_update
, *desc
;
784 dma_addr_t ptr
= ctx
->sh_desc_update_dma
;
785 int src_nents
, sec4_sg_bytes
, sec4_sg_src_index
;
786 struct ahash_edesc
*edesc
;
790 last_buflen
= *next_buflen
;
791 *next_buflen
= in_len
& (crypto_tfm_alg_blocksize(&ahash
->base
) - 1);
792 to_hash
= in_len
- *next_buflen
;
795 src_nents
= sg_nents_for_len(req
->src
,
796 req
->nbytes
- (*next_buflen
));
797 sec4_sg_src_index
= 1 + (*buflen
? 1 : 0);
798 sec4_sg_bytes
= (sec4_sg_src_index
+ src_nents
) *
799 sizeof(struct sec4_sg_entry
);
802 * allocate space for base edesc and hw desc commands,
805 edesc
= kzalloc(sizeof(*edesc
) + DESC_JOB_IO_LEN
+
806 sec4_sg_bytes
, GFP_DMA
| flags
);
809 "could not allocate extended descriptor\n");
813 edesc
->src_nents
= src_nents
;
814 edesc
->sec4_sg_bytes
= sec4_sg_bytes
;
815 edesc
->sec4_sg
= (void *)edesc
+ sizeof(struct ahash_edesc
) +
818 ret
= ctx_map_to_sec4_sg(desc
, jrdev
, state
, ctx
->ctx_len
,
819 edesc
->sec4_sg
, DMA_BIDIRECTIONAL
);
823 state
->buf_dma
= try_buf_map_to_sec4_sg(jrdev
,
826 *next_buflen
, *buflen
);
829 src_map_to_sec4_sg(jrdev
, req
->src
, src_nents
,
830 edesc
->sec4_sg
+ sec4_sg_src_index
);
832 scatterwalk_map_and_copy(next_buf
, req
->src
,
836 (edesc
->sec4_sg
+ sec4_sg_src_index
- 1)->len
|=
840 state
->current_buf
= !state
->current_buf
;
842 sh_len
= desc_len(sh_desc
);
843 desc
= edesc
->hw_desc
;
844 init_job_desc_shared(desc
, ptr
, sh_len
, HDR_SHARE_DEFER
|
847 edesc
->sec4_sg_dma
= dma_map_single(jrdev
, edesc
->sec4_sg
,
850 if (dma_mapping_error(jrdev
, edesc
->sec4_sg_dma
)) {
851 dev_err(jrdev
, "unable to map S/G table\n");
855 append_seq_in_ptr(desc
, edesc
->sec4_sg_dma
, ctx
->ctx_len
+
858 append_seq_out_ptr(desc
, state
->ctx_dma
, ctx
->ctx_len
, 0);
861 print_hex_dump(KERN_ERR
, "jobdesc@"__stringify(__LINE__
)": ",
862 DUMP_PREFIX_ADDRESS
, 16, 4, desc
,
863 desc_bytes(desc
), 1);
866 ret
= caam_jr_enqueue(jrdev
, desc
, ahash_done_bi
, req
);
870 ahash_unmap_ctx(jrdev
, edesc
, req
, ctx
->ctx_len
,
874 } else if (*next_buflen
) {
875 scatterwalk_map_and_copy(buf
+ *buflen
, req
->src
, 0,
877 *buflen
= *next_buflen
;
878 *next_buflen
= last_buflen
;
881 print_hex_dump(KERN_ERR
, "buf@"__stringify(__LINE__
)": ",
882 DUMP_PREFIX_ADDRESS
, 16, 4, buf
, *buflen
, 1);
883 print_hex_dump(KERN_ERR
, "next buf@"__stringify(__LINE__
)": ",
884 DUMP_PREFIX_ADDRESS
, 16, 4, next_buf
,
891 static int ahash_final_ctx(struct ahash_request
*req
)
893 struct crypto_ahash
*ahash
= crypto_ahash_reqtfm(req
);
894 struct caam_hash_ctx
*ctx
= crypto_ahash_ctx(ahash
);
895 struct caam_hash_state
*state
= ahash_request_ctx(req
);
896 struct device
*jrdev
= ctx
->jrdev
;
897 gfp_t flags
= (req
->base
.flags
& (CRYPTO_TFM_REQ_MAY_BACKLOG
|
898 CRYPTO_TFM_REQ_MAY_SLEEP
)) ? GFP_KERNEL
: GFP_ATOMIC
;
899 u8
*buf
= state
->current_buf
? state
->buf_1
: state
->buf_0
;
900 int buflen
= state
->current_buf
? state
->buflen_1
: state
->buflen_0
;
901 int last_buflen
= state
->current_buf
? state
->buflen_0
:
903 u32
*sh_desc
= ctx
->sh_desc_fin
, *desc
;
904 dma_addr_t ptr
= ctx
->sh_desc_fin_dma
;
905 int sec4_sg_bytes
, sec4_sg_src_index
;
906 int digestsize
= crypto_ahash_digestsize(ahash
);
907 struct ahash_edesc
*edesc
;
911 sec4_sg_src_index
= 1 + (buflen
? 1 : 0);
912 sec4_sg_bytes
= sec4_sg_src_index
* sizeof(struct sec4_sg_entry
);
914 /* allocate space for base edesc and hw desc commands, link tables */
915 edesc
= kzalloc(sizeof(*edesc
) + DESC_JOB_IO_LEN
+ sec4_sg_bytes
,
918 dev_err(jrdev
, "could not allocate extended descriptor\n");
922 sh_len
= desc_len(sh_desc
);
923 desc
= edesc
->hw_desc
;
924 init_job_desc_shared(desc
, ptr
, sh_len
, HDR_SHARE_DEFER
| HDR_REVERSE
);
926 edesc
->sec4_sg_bytes
= sec4_sg_bytes
;
927 edesc
->sec4_sg
= (void *)edesc
+ sizeof(struct ahash_edesc
) +
929 edesc
->src_nents
= 0;
931 ret
= ctx_map_to_sec4_sg(desc
, jrdev
, state
, ctx
->ctx_len
,
932 edesc
->sec4_sg
, DMA_TO_DEVICE
);
936 state
->buf_dma
= try_buf_map_to_sec4_sg(jrdev
, edesc
->sec4_sg
+ 1,
937 buf
, state
->buf_dma
, buflen
,
939 (edesc
->sec4_sg
+ sec4_sg_src_index
- 1)->len
|= SEC4_SG_LEN_FIN
;
941 edesc
->sec4_sg_dma
= dma_map_single(jrdev
, edesc
->sec4_sg
,
942 sec4_sg_bytes
, DMA_TO_DEVICE
);
943 if (dma_mapping_error(jrdev
, edesc
->sec4_sg_dma
)) {
944 dev_err(jrdev
, "unable to map S/G table\n");
948 append_seq_in_ptr(desc
, edesc
->sec4_sg_dma
, ctx
->ctx_len
+ buflen
,
951 edesc
->dst_dma
= map_seq_out_ptr_result(desc
, jrdev
, req
->result
,
953 if (dma_mapping_error(jrdev
, edesc
->dst_dma
)) {
954 dev_err(jrdev
, "unable to map dst\n");
959 print_hex_dump(KERN_ERR
, "jobdesc@"__stringify(__LINE__
)": ",
960 DUMP_PREFIX_ADDRESS
, 16, 4, desc
, desc_bytes(desc
), 1);
963 ret
= caam_jr_enqueue(jrdev
, desc
, ahash_done_ctx_src
, req
);
967 ahash_unmap_ctx(jrdev
, edesc
, req
, digestsize
, DMA_FROM_DEVICE
);
974 static int ahash_finup_ctx(struct ahash_request
*req
)
976 struct crypto_ahash
*ahash
= crypto_ahash_reqtfm(req
);
977 struct caam_hash_ctx
*ctx
= crypto_ahash_ctx(ahash
);
978 struct caam_hash_state
*state
= ahash_request_ctx(req
);
979 struct device
*jrdev
= ctx
->jrdev
;
980 gfp_t flags
= (req
->base
.flags
& (CRYPTO_TFM_REQ_MAY_BACKLOG
|
981 CRYPTO_TFM_REQ_MAY_SLEEP
)) ? GFP_KERNEL
: GFP_ATOMIC
;
982 u8
*buf
= state
->current_buf
? state
->buf_1
: state
->buf_0
;
983 int buflen
= state
->current_buf
? state
->buflen_1
: state
->buflen_0
;
984 int last_buflen
= state
->current_buf
? state
->buflen_0
:
986 u32
*sh_desc
= ctx
->sh_desc_finup
, *desc
;
987 dma_addr_t ptr
= ctx
->sh_desc_finup_dma
;
988 int sec4_sg_bytes
, sec4_sg_src_index
;
990 int digestsize
= crypto_ahash_digestsize(ahash
);
991 struct ahash_edesc
*edesc
;
995 src_nents
= sg_nents_for_len(req
->src
, req
->nbytes
);
996 sec4_sg_src_index
= 1 + (buflen
? 1 : 0);
997 sec4_sg_bytes
= (sec4_sg_src_index
+ src_nents
) *
998 sizeof(struct sec4_sg_entry
);
1000 /* allocate space for base edesc and hw desc commands, link tables */
1001 edesc
= kzalloc(sizeof(*edesc
) + DESC_JOB_IO_LEN
+ sec4_sg_bytes
,
1004 dev_err(jrdev
, "could not allocate extended descriptor\n");
1008 sh_len
= desc_len(sh_desc
);
1009 desc
= edesc
->hw_desc
;
1010 init_job_desc_shared(desc
, ptr
, sh_len
, HDR_SHARE_DEFER
| HDR_REVERSE
);
1012 edesc
->src_nents
= src_nents
;
1013 edesc
->sec4_sg_bytes
= sec4_sg_bytes
;
1014 edesc
->sec4_sg
= (void *)edesc
+ sizeof(struct ahash_edesc
) +
1017 ret
= ctx_map_to_sec4_sg(desc
, jrdev
, state
, ctx
->ctx_len
,
1018 edesc
->sec4_sg
, DMA_TO_DEVICE
);
1022 state
->buf_dma
= try_buf_map_to_sec4_sg(jrdev
, edesc
->sec4_sg
+ 1,
1023 buf
, state
->buf_dma
, buflen
,
1026 src_map_to_sec4_sg(jrdev
, req
->src
, src_nents
, edesc
->sec4_sg
+
1029 edesc
->sec4_sg_dma
= dma_map_single(jrdev
, edesc
->sec4_sg
,
1030 sec4_sg_bytes
, DMA_TO_DEVICE
);
1031 if (dma_mapping_error(jrdev
, edesc
->sec4_sg_dma
)) {
1032 dev_err(jrdev
, "unable to map S/G table\n");
1036 append_seq_in_ptr(desc
, edesc
->sec4_sg_dma
, ctx
->ctx_len
+
1037 buflen
+ req
->nbytes
, LDST_SGF
);
1039 edesc
->dst_dma
= map_seq_out_ptr_result(desc
, jrdev
, req
->result
,
1041 if (dma_mapping_error(jrdev
, edesc
->dst_dma
)) {
1042 dev_err(jrdev
, "unable to map dst\n");
1047 print_hex_dump(KERN_ERR
, "jobdesc@"__stringify(__LINE__
)": ",
1048 DUMP_PREFIX_ADDRESS
, 16, 4, desc
, desc_bytes(desc
), 1);
1051 ret
= caam_jr_enqueue(jrdev
, desc
, ahash_done_ctx_src
, req
);
1055 ahash_unmap_ctx(jrdev
, edesc
, req
, digestsize
, DMA_FROM_DEVICE
);
1062 static int ahash_digest(struct ahash_request
*req
)
1064 struct crypto_ahash
*ahash
= crypto_ahash_reqtfm(req
);
1065 struct caam_hash_ctx
*ctx
= crypto_ahash_ctx(ahash
);
1066 struct device
*jrdev
= ctx
->jrdev
;
1067 gfp_t flags
= (req
->base
.flags
& (CRYPTO_TFM_REQ_MAY_BACKLOG
|
1068 CRYPTO_TFM_REQ_MAY_SLEEP
)) ? GFP_KERNEL
: GFP_ATOMIC
;
1069 u32
*sh_desc
= ctx
->sh_desc_digest
, *desc
;
1070 dma_addr_t ptr
= ctx
->sh_desc_digest_dma
;
1071 int digestsize
= crypto_ahash_digestsize(ahash
);
1072 int src_nents
, sec4_sg_bytes
;
1074 struct ahash_edesc
*edesc
;
1079 src_nents
= sg_count(req
->src
, req
->nbytes
);
1080 dma_map_sg(jrdev
, req
->src
, src_nents
? : 1, DMA_TO_DEVICE
);
1081 sec4_sg_bytes
= src_nents
* sizeof(struct sec4_sg_entry
);
1083 /* allocate space for base edesc and hw desc commands, link tables */
1084 edesc
= kzalloc(sizeof(*edesc
) + sec4_sg_bytes
+ DESC_JOB_IO_LEN
,
1087 dev_err(jrdev
, "could not allocate extended descriptor\n");
1090 edesc
->sec4_sg
= (void *)edesc
+ sizeof(struct ahash_edesc
) +
1092 edesc
->sec4_sg_bytes
= sec4_sg_bytes
;
1093 edesc
->src_nents
= src_nents
;
1095 sh_len
= desc_len(sh_desc
);
1096 desc
= edesc
->hw_desc
;
1097 init_job_desc_shared(desc
, ptr
, sh_len
, HDR_SHARE_DEFER
| HDR_REVERSE
);
1100 sg_to_sec4_sg_last(req
->src
, src_nents
, edesc
->sec4_sg
, 0);
1101 edesc
->sec4_sg_dma
= dma_map_single(jrdev
, edesc
->sec4_sg
,
1102 sec4_sg_bytes
, DMA_TO_DEVICE
);
1103 if (dma_mapping_error(jrdev
, edesc
->sec4_sg_dma
)) {
1104 dev_err(jrdev
, "unable to map S/G table\n");
1107 src_dma
= edesc
->sec4_sg_dma
;
1110 src_dma
= sg_dma_address(req
->src
);
1113 append_seq_in_ptr(desc
, src_dma
, req
->nbytes
, options
);
1115 edesc
->dst_dma
= map_seq_out_ptr_result(desc
, jrdev
, req
->result
,
1117 if (dma_mapping_error(jrdev
, edesc
->dst_dma
)) {
1118 dev_err(jrdev
, "unable to map dst\n");
1123 print_hex_dump(KERN_ERR
, "jobdesc@"__stringify(__LINE__
)": ",
1124 DUMP_PREFIX_ADDRESS
, 16, 4, desc
, desc_bytes(desc
), 1);
1127 ret
= caam_jr_enqueue(jrdev
, desc
, ahash_done
, req
);
1131 ahash_unmap(jrdev
, edesc
, req
, digestsize
);
1138 /* submit ahash final if it the first job descriptor */
1139 static int ahash_final_no_ctx(struct ahash_request
*req
)
1141 struct crypto_ahash
*ahash
= crypto_ahash_reqtfm(req
);
1142 struct caam_hash_ctx
*ctx
= crypto_ahash_ctx(ahash
);
1143 struct caam_hash_state
*state
= ahash_request_ctx(req
);
1144 struct device
*jrdev
= ctx
->jrdev
;
1145 gfp_t flags
= (req
->base
.flags
& (CRYPTO_TFM_REQ_MAY_BACKLOG
|
1146 CRYPTO_TFM_REQ_MAY_SLEEP
)) ? GFP_KERNEL
: GFP_ATOMIC
;
1147 u8
*buf
= state
->current_buf
? state
->buf_1
: state
->buf_0
;
1148 int buflen
= state
->current_buf
? state
->buflen_1
: state
->buflen_0
;
1149 u32
*sh_desc
= ctx
->sh_desc_digest
, *desc
;
1150 dma_addr_t ptr
= ctx
->sh_desc_digest_dma
;
1151 int digestsize
= crypto_ahash_digestsize(ahash
);
1152 struct ahash_edesc
*edesc
;
1156 /* allocate space for base edesc and hw desc commands, link tables */
1157 edesc
= kzalloc(sizeof(*edesc
) + DESC_JOB_IO_LEN
, GFP_DMA
| flags
);
1159 dev_err(jrdev
, "could not allocate extended descriptor\n");
1163 edesc
->sec4_sg_bytes
= 0;
1164 sh_len
= desc_len(sh_desc
);
1165 desc
= edesc
->hw_desc
;
1166 init_job_desc_shared(desc
, ptr
, sh_len
, HDR_SHARE_DEFER
| HDR_REVERSE
);
1168 state
->buf_dma
= dma_map_single(jrdev
, buf
, buflen
, DMA_TO_DEVICE
);
1169 if (dma_mapping_error(jrdev
, state
->buf_dma
)) {
1170 dev_err(jrdev
, "unable to map src\n");
1174 append_seq_in_ptr(desc
, state
->buf_dma
, buflen
, 0);
1176 edesc
->dst_dma
= map_seq_out_ptr_result(desc
, jrdev
, req
->result
,
1178 if (dma_mapping_error(jrdev
, edesc
->dst_dma
)) {
1179 dev_err(jrdev
, "unable to map dst\n");
1182 edesc
->src_nents
= 0;
1185 print_hex_dump(KERN_ERR
, "jobdesc@"__stringify(__LINE__
)": ",
1186 DUMP_PREFIX_ADDRESS
, 16, 4, desc
, desc_bytes(desc
), 1);
1189 ret
= caam_jr_enqueue(jrdev
, desc
, ahash_done
, req
);
1193 ahash_unmap(jrdev
, edesc
, req
, digestsize
);
1200 /* submit ahash update if it the first job descriptor after update */
1201 static int ahash_update_no_ctx(struct ahash_request
*req
)
1203 struct crypto_ahash
*ahash
= crypto_ahash_reqtfm(req
);
1204 struct caam_hash_ctx
*ctx
= crypto_ahash_ctx(ahash
);
1205 struct caam_hash_state
*state
= ahash_request_ctx(req
);
1206 struct device
*jrdev
= ctx
->jrdev
;
1207 gfp_t flags
= (req
->base
.flags
& (CRYPTO_TFM_REQ_MAY_BACKLOG
|
1208 CRYPTO_TFM_REQ_MAY_SLEEP
)) ? GFP_KERNEL
: GFP_ATOMIC
;
1209 u8
*buf
= state
->current_buf
? state
->buf_1
: state
->buf_0
;
1210 int *buflen
= state
->current_buf
? &state
->buflen_1
: &state
->buflen_0
;
1211 u8
*next_buf
= state
->current_buf
? state
->buf_0
: state
->buf_1
;
1212 int *next_buflen
= state
->current_buf
? &state
->buflen_0
:
1214 int in_len
= *buflen
+ req
->nbytes
, to_hash
;
1215 int sec4_sg_bytes
, src_nents
;
1216 struct ahash_edesc
*edesc
;
1217 u32
*desc
, *sh_desc
= ctx
->sh_desc_update_first
;
1218 dma_addr_t ptr
= ctx
->sh_desc_update_first_dma
;
1222 *next_buflen
= in_len
& (crypto_tfm_alg_blocksize(&ahash
->base
) - 1);
1223 to_hash
= in_len
- *next_buflen
;
1226 src_nents
= sg_nents_for_len(req
->src
,
1227 req
->nbytes
- (*next_buflen
));
1228 sec4_sg_bytes
= (1 + src_nents
) *
1229 sizeof(struct sec4_sg_entry
);
1232 * allocate space for base edesc and hw desc commands,
1235 edesc
= kzalloc(sizeof(*edesc
) + DESC_JOB_IO_LEN
+
1236 sec4_sg_bytes
, GFP_DMA
| flags
);
1239 "could not allocate extended descriptor\n");
1243 edesc
->src_nents
= src_nents
;
1244 edesc
->sec4_sg_bytes
= sec4_sg_bytes
;
1245 edesc
->sec4_sg
= (void *)edesc
+ sizeof(struct ahash_edesc
) +
1249 state
->buf_dma
= buf_map_to_sec4_sg(jrdev
, edesc
->sec4_sg
,
1251 src_map_to_sec4_sg(jrdev
, req
->src
, src_nents
,
1252 edesc
->sec4_sg
+ 1);
1254 scatterwalk_map_and_copy(next_buf
, req
->src
,
1259 state
->current_buf
= !state
->current_buf
;
1261 sh_len
= desc_len(sh_desc
);
1262 desc
= edesc
->hw_desc
;
1263 init_job_desc_shared(desc
, ptr
, sh_len
, HDR_SHARE_DEFER
|
1266 edesc
->sec4_sg_dma
= dma_map_single(jrdev
, edesc
->sec4_sg
,
1269 if (dma_mapping_error(jrdev
, edesc
->sec4_sg_dma
)) {
1270 dev_err(jrdev
, "unable to map S/G table\n");
1274 append_seq_in_ptr(desc
, edesc
->sec4_sg_dma
, to_hash
, LDST_SGF
);
1276 ret
= map_seq_out_ptr_ctx(desc
, jrdev
, state
, ctx
->ctx_len
);
1281 print_hex_dump(KERN_ERR
, "jobdesc@"__stringify(__LINE__
)": ",
1282 DUMP_PREFIX_ADDRESS
, 16, 4, desc
,
1283 desc_bytes(desc
), 1);
1286 ret
= caam_jr_enqueue(jrdev
, desc
, ahash_done_ctx_dst
, req
);
1289 state
->update
= ahash_update_ctx
;
1290 state
->finup
= ahash_finup_ctx
;
1291 state
->final
= ahash_final_ctx
;
1293 ahash_unmap_ctx(jrdev
, edesc
, req
, ctx
->ctx_len
,
1297 } else if (*next_buflen
) {
1298 scatterwalk_map_and_copy(buf
+ *buflen
, req
->src
, 0,
1300 *buflen
= *next_buflen
;
1304 print_hex_dump(KERN_ERR
, "buf@"__stringify(__LINE__
)": ",
1305 DUMP_PREFIX_ADDRESS
, 16, 4, buf
, *buflen
, 1);
1306 print_hex_dump(KERN_ERR
, "next buf@"__stringify(__LINE__
)": ",
1307 DUMP_PREFIX_ADDRESS
, 16, 4, next_buf
,
1314 /* submit ahash finup if it the first job descriptor after update */
1315 static int ahash_finup_no_ctx(struct ahash_request
*req
)
1317 struct crypto_ahash
*ahash
= crypto_ahash_reqtfm(req
);
1318 struct caam_hash_ctx
*ctx
= crypto_ahash_ctx(ahash
);
1319 struct caam_hash_state
*state
= ahash_request_ctx(req
);
1320 struct device
*jrdev
= ctx
->jrdev
;
1321 gfp_t flags
= (req
->base
.flags
& (CRYPTO_TFM_REQ_MAY_BACKLOG
|
1322 CRYPTO_TFM_REQ_MAY_SLEEP
)) ? GFP_KERNEL
: GFP_ATOMIC
;
1323 u8
*buf
= state
->current_buf
? state
->buf_1
: state
->buf_0
;
1324 int buflen
= state
->current_buf
? state
->buflen_1
: state
->buflen_0
;
1325 int last_buflen
= state
->current_buf
? state
->buflen_0
:
1327 u32
*sh_desc
= ctx
->sh_desc_digest
, *desc
;
1328 dma_addr_t ptr
= ctx
->sh_desc_digest_dma
;
1329 int sec4_sg_bytes
, sec4_sg_src_index
, src_nents
;
1330 int digestsize
= crypto_ahash_digestsize(ahash
);
1331 struct ahash_edesc
*edesc
;
1335 src_nents
= sg_nents_for_len(req
->src
, req
->nbytes
);
1336 sec4_sg_src_index
= 2;
1337 sec4_sg_bytes
= (sec4_sg_src_index
+ src_nents
) *
1338 sizeof(struct sec4_sg_entry
);
1340 /* allocate space for base edesc and hw desc commands, link tables */
1341 edesc
= kzalloc(sizeof(*edesc
) + DESC_JOB_IO_LEN
+ sec4_sg_bytes
,
1344 dev_err(jrdev
, "could not allocate extended descriptor\n");
1348 sh_len
= desc_len(sh_desc
);
1349 desc
= edesc
->hw_desc
;
1350 init_job_desc_shared(desc
, ptr
, sh_len
, HDR_SHARE_DEFER
| HDR_REVERSE
);
1352 edesc
->src_nents
= src_nents
;
1353 edesc
->sec4_sg_bytes
= sec4_sg_bytes
;
1354 edesc
->sec4_sg
= (void *)edesc
+ sizeof(struct ahash_edesc
) +
1357 state
->buf_dma
= try_buf_map_to_sec4_sg(jrdev
, edesc
->sec4_sg
, buf
,
1358 state
->buf_dma
, buflen
,
1361 src_map_to_sec4_sg(jrdev
, req
->src
, src_nents
, edesc
->sec4_sg
+ 1);
1363 edesc
->sec4_sg_dma
= dma_map_single(jrdev
, edesc
->sec4_sg
,
1364 sec4_sg_bytes
, DMA_TO_DEVICE
);
1365 if (dma_mapping_error(jrdev
, edesc
->sec4_sg_dma
)) {
1366 dev_err(jrdev
, "unable to map S/G table\n");
1370 append_seq_in_ptr(desc
, edesc
->sec4_sg_dma
, buflen
+
1371 req
->nbytes
, LDST_SGF
);
1373 edesc
->dst_dma
= map_seq_out_ptr_result(desc
, jrdev
, req
->result
,
1375 if (dma_mapping_error(jrdev
, edesc
->dst_dma
)) {
1376 dev_err(jrdev
, "unable to map dst\n");
1381 print_hex_dump(KERN_ERR
, "jobdesc@"__stringify(__LINE__
)": ",
1382 DUMP_PREFIX_ADDRESS
, 16, 4, desc
, desc_bytes(desc
), 1);
1385 ret
= caam_jr_enqueue(jrdev
, desc
, ahash_done
, req
);
1389 ahash_unmap(jrdev
, edesc
, req
, digestsize
);
1396 /* submit first update job descriptor after init */
1397 static int ahash_update_first(struct ahash_request
*req
)
1399 struct crypto_ahash
*ahash
= crypto_ahash_reqtfm(req
);
1400 struct caam_hash_ctx
*ctx
= crypto_ahash_ctx(ahash
);
1401 struct caam_hash_state
*state
= ahash_request_ctx(req
);
1402 struct device
*jrdev
= ctx
->jrdev
;
1403 gfp_t flags
= (req
->base
.flags
& (CRYPTO_TFM_REQ_MAY_BACKLOG
|
1404 CRYPTO_TFM_REQ_MAY_SLEEP
)) ? GFP_KERNEL
: GFP_ATOMIC
;
1405 u8
*next_buf
= state
->current_buf
? state
->buf_1
: state
->buf_0
;
1406 int *next_buflen
= state
->current_buf
?
1407 &state
->buflen_1
: &state
->buflen_0
;
1409 u32
*sh_desc
= ctx
->sh_desc_update_first
, *desc
;
1410 dma_addr_t ptr
= ctx
->sh_desc_update_first_dma
;
1411 int sec4_sg_bytes
, src_nents
;
1414 struct ahash_edesc
*edesc
;
1418 *next_buflen
= req
->nbytes
& (crypto_tfm_alg_blocksize(&ahash
->base
) -
1420 to_hash
= req
->nbytes
- *next_buflen
;
1423 src_nents
= sg_count(req
->src
, req
->nbytes
- (*next_buflen
));
1424 dma_map_sg(jrdev
, req
->src
, src_nents
? : 1, DMA_TO_DEVICE
);
1425 sec4_sg_bytes
= src_nents
* sizeof(struct sec4_sg_entry
);
1428 * allocate space for base edesc and hw desc commands,
1431 edesc
= kzalloc(sizeof(*edesc
) + DESC_JOB_IO_LEN
+
1432 sec4_sg_bytes
, GFP_DMA
| flags
);
1435 "could not allocate extended descriptor\n");
1439 edesc
->src_nents
= src_nents
;
1440 edesc
->sec4_sg_bytes
= sec4_sg_bytes
;
1441 edesc
->sec4_sg
= (void *)edesc
+ sizeof(struct ahash_edesc
) +
1446 sg_to_sec4_sg_last(req
->src
, src_nents
,
1448 edesc
->sec4_sg_dma
= dma_map_single(jrdev
,
1452 if (dma_mapping_error(jrdev
, edesc
->sec4_sg_dma
)) {
1453 dev_err(jrdev
, "unable to map S/G table\n");
1456 src_dma
= edesc
->sec4_sg_dma
;
1459 src_dma
= sg_dma_address(req
->src
);
1464 scatterwalk_map_and_copy(next_buf
, req
->src
, to_hash
,
1467 sh_len
= desc_len(sh_desc
);
1468 desc
= edesc
->hw_desc
;
1469 init_job_desc_shared(desc
, ptr
, sh_len
, HDR_SHARE_DEFER
|
1472 append_seq_in_ptr(desc
, src_dma
, to_hash
, options
);
1474 ret
= map_seq_out_ptr_ctx(desc
, jrdev
, state
, ctx
->ctx_len
);
1479 print_hex_dump(KERN_ERR
, "jobdesc@"__stringify(__LINE__
)": ",
1480 DUMP_PREFIX_ADDRESS
, 16, 4, desc
,
1481 desc_bytes(desc
), 1);
1484 ret
= caam_jr_enqueue(jrdev
, desc
, ahash_done_ctx_dst
,
1488 state
->update
= ahash_update_ctx
;
1489 state
->finup
= ahash_finup_ctx
;
1490 state
->final
= ahash_final_ctx
;
1492 ahash_unmap_ctx(jrdev
, edesc
, req
, ctx
->ctx_len
,
1496 } else if (*next_buflen
) {
1497 state
->update
= ahash_update_no_ctx
;
1498 state
->finup
= ahash_finup_no_ctx
;
1499 state
->final
= ahash_final_no_ctx
;
1500 scatterwalk_map_and_copy(next_buf
, req
->src
, 0,
1504 print_hex_dump(KERN_ERR
, "next buf@"__stringify(__LINE__
)": ",
1505 DUMP_PREFIX_ADDRESS
, 16, 4, next_buf
,
1512 static int ahash_finup_first(struct ahash_request
*req
)
1514 return ahash_digest(req
);
1517 static int ahash_init(struct ahash_request
*req
)
1519 struct caam_hash_state
*state
= ahash_request_ctx(req
);
1521 state
->update
= ahash_update_first
;
1522 state
->finup
= ahash_finup_first
;
1523 state
->final
= ahash_final_no_ctx
;
1525 state
->current_buf
= 0;
1527 state
->buflen_0
= 0;
1528 state
->buflen_1
= 0;
1533 static int ahash_update(struct ahash_request
*req
)
1535 struct caam_hash_state
*state
= ahash_request_ctx(req
);
1537 return state
->update(req
);
1540 static int ahash_finup(struct ahash_request
*req
)
1542 struct caam_hash_state
*state
= ahash_request_ctx(req
);
1544 return state
->finup(req
);
1547 static int ahash_final(struct ahash_request
*req
)
1549 struct caam_hash_state
*state
= ahash_request_ctx(req
);
1551 return state
->final(req
);
1554 static int ahash_export(struct ahash_request
*req
, void *out
)
1556 struct crypto_ahash
*ahash
= crypto_ahash_reqtfm(req
);
1557 struct caam_hash_ctx
*ctx
= crypto_ahash_ctx(ahash
);
1558 struct caam_hash_state
*state
= ahash_request_ctx(req
);
1560 memcpy(out
, ctx
, sizeof(struct caam_hash_ctx
));
1561 memcpy(out
+ sizeof(struct caam_hash_ctx
), state
,
1562 sizeof(struct caam_hash_state
));
1566 static int ahash_import(struct ahash_request
*req
, const void *in
)
1568 struct crypto_ahash
*ahash
= crypto_ahash_reqtfm(req
);
1569 struct caam_hash_ctx
*ctx
= crypto_ahash_ctx(ahash
);
1570 struct caam_hash_state
*state
= ahash_request_ctx(req
);
1572 memcpy(ctx
, in
, sizeof(struct caam_hash_ctx
));
1573 memcpy(state
, in
+ sizeof(struct caam_hash_ctx
),
1574 sizeof(struct caam_hash_state
));
1578 struct caam_hash_template
{
1579 char name
[CRYPTO_MAX_ALG_NAME
];
1580 char driver_name
[CRYPTO_MAX_ALG_NAME
];
1581 char hmac_name
[CRYPTO_MAX_ALG_NAME
];
1582 char hmac_driver_name
[CRYPTO_MAX_ALG_NAME
];
1583 unsigned int blocksize
;
1584 struct ahash_alg template_ahash
;
1589 /* ahash descriptors */
1590 static struct caam_hash_template driver_hash
[] = {
1593 .driver_name
= "sha1-caam",
1594 .hmac_name
= "hmac(sha1)",
1595 .hmac_driver_name
= "hmac-sha1-caam",
1596 .blocksize
= SHA1_BLOCK_SIZE
,
1599 .update
= ahash_update
,
1600 .final
= ahash_final
,
1601 .finup
= ahash_finup
,
1602 .digest
= ahash_digest
,
1603 .export
= ahash_export
,
1604 .import
= ahash_import
,
1605 .setkey
= ahash_setkey
,
1607 .digestsize
= SHA1_DIGEST_SIZE
,
1610 .alg_type
= OP_ALG_ALGSEL_SHA1
,
1611 .alg_op
= OP_ALG_ALGSEL_SHA1
| OP_ALG_AAI_HMAC
,
1614 .driver_name
= "sha224-caam",
1615 .hmac_name
= "hmac(sha224)",
1616 .hmac_driver_name
= "hmac-sha224-caam",
1617 .blocksize
= SHA224_BLOCK_SIZE
,
1620 .update
= ahash_update
,
1621 .final
= ahash_final
,
1622 .finup
= ahash_finup
,
1623 .digest
= ahash_digest
,
1624 .export
= ahash_export
,
1625 .import
= ahash_import
,
1626 .setkey
= ahash_setkey
,
1628 .digestsize
= SHA224_DIGEST_SIZE
,
1631 .alg_type
= OP_ALG_ALGSEL_SHA224
,
1632 .alg_op
= OP_ALG_ALGSEL_SHA224
| OP_ALG_AAI_HMAC
,
1635 .driver_name
= "sha256-caam",
1636 .hmac_name
= "hmac(sha256)",
1637 .hmac_driver_name
= "hmac-sha256-caam",
1638 .blocksize
= SHA256_BLOCK_SIZE
,
1641 .update
= ahash_update
,
1642 .final
= ahash_final
,
1643 .finup
= ahash_finup
,
1644 .digest
= ahash_digest
,
1645 .export
= ahash_export
,
1646 .import
= ahash_import
,
1647 .setkey
= ahash_setkey
,
1649 .digestsize
= SHA256_DIGEST_SIZE
,
1652 .alg_type
= OP_ALG_ALGSEL_SHA256
,
1653 .alg_op
= OP_ALG_ALGSEL_SHA256
| OP_ALG_AAI_HMAC
,
1656 .driver_name
= "sha384-caam",
1657 .hmac_name
= "hmac(sha384)",
1658 .hmac_driver_name
= "hmac-sha384-caam",
1659 .blocksize
= SHA384_BLOCK_SIZE
,
1662 .update
= ahash_update
,
1663 .final
= ahash_final
,
1664 .finup
= ahash_finup
,
1665 .digest
= ahash_digest
,
1666 .export
= ahash_export
,
1667 .import
= ahash_import
,
1668 .setkey
= ahash_setkey
,
1670 .digestsize
= SHA384_DIGEST_SIZE
,
1673 .alg_type
= OP_ALG_ALGSEL_SHA384
,
1674 .alg_op
= OP_ALG_ALGSEL_SHA384
| OP_ALG_AAI_HMAC
,
1677 .driver_name
= "sha512-caam",
1678 .hmac_name
= "hmac(sha512)",
1679 .hmac_driver_name
= "hmac-sha512-caam",
1680 .blocksize
= SHA512_BLOCK_SIZE
,
1683 .update
= ahash_update
,
1684 .final
= ahash_final
,
1685 .finup
= ahash_finup
,
1686 .digest
= ahash_digest
,
1687 .export
= ahash_export
,
1688 .import
= ahash_import
,
1689 .setkey
= ahash_setkey
,
1691 .digestsize
= SHA512_DIGEST_SIZE
,
1694 .alg_type
= OP_ALG_ALGSEL_SHA512
,
1695 .alg_op
= OP_ALG_ALGSEL_SHA512
| OP_ALG_AAI_HMAC
,
1698 .driver_name
= "md5-caam",
1699 .hmac_name
= "hmac(md5)",
1700 .hmac_driver_name
= "hmac-md5-caam",
1701 .blocksize
= MD5_BLOCK_WORDS
* 4,
1704 .update
= ahash_update
,
1705 .final
= ahash_final
,
1706 .finup
= ahash_finup
,
1707 .digest
= ahash_digest
,
1708 .export
= ahash_export
,
1709 .import
= ahash_import
,
1710 .setkey
= ahash_setkey
,
1712 .digestsize
= MD5_DIGEST_SIZE
,
1715 .alg_type
= OP_ALG_ALGSEL_MD5
,
1716 .alg_op
= OP_ALG_ALGSEL_MD5
| OP_ALG_AAI_HMAC
,
1720 struct caam_hash_alg
{
1721 struct list_head entry
;
1724 struct ahash_alg ahash_alg
;
1727 static int caam_hash_cra_init(struct crypto_tfm
*tfm
)
1729 struct crypto_ahash
*ahash
= __crypto_ahash_cast(tfm
);
1730 struct crypto_alg
*base
= tfm
->__crt_alg
;
1731 struct hash_alg_common
*halg
=
1732 container_of(base
, struct hash_alg_common
, base
);
1733 struct ahash_alg
*alg
=
1734 container_of(halg
, struct ahash_alg
, halg
);
1735 struct caam_hash_alg
*caam_hash
=
1736 container_of(alg
, struct caam_hash_alg
, ahash_alg
);
1737 struct caam_hash_ctx
*ctx
= crypto_tfm_ctx(tfm
);
1738 /* Sizes for MDHA running digests: MD5, SHA1, 224, 256, 384, 512 */
1739 static const u8 runninglen
[] = { HASH_MSG_LEN
+ MD5_DIGEST_SIZE
,
1740 HASH_MSG_LEN
+ SHA1_DIGEST_SIZE
,
1742 HASH_MSG_LEN
+ SHA256_DIGEST_SIZE
,
1744 HASH_MSG_LEN
+ SHA512_DIGEST_SIZE
};
1748 * Get a Job ring from Job Ring driver to ensure in-order
1749 * crypto request processing per tfm
1751 ctx
->jrdev
= caam_jr_alloc();
1752 if (IS_ERR(ctx
->jrdev
)) {
1753 pr_err("Job Ring Device allocation for transform failed\n");
1754 return PTR_ERR(ctx
->jrdev
);
1756 /* copy descriptor header template value */
1757 ctx
->alg_type
= OP_TYPE_CLASS2_ALG
| caam_hash
->alg_type
;
1758 ctx
->alg_op
= OP_TYPE_CLASS2_ALG
| caam_hash
->alg_op
;
1760 ctx
->ctx_len
= runninglen
[(ctx
->alg_op
& OP_ALG_ALGSEL_SUBMASK
) >>
1761 OP_ALG_ALGSEL_SHIFT
];
1763 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm
),
1764 sizeof(struct caam_hash_state
));
1766 ret
= ahash_set_sh_desc(ahash
);
1771 static void caam_hash_cra_exit(struct crypto_tfm
*tfm
)
1773 struct caam_hash_ctx
*ctx
= crypto_tfm_ctx(tfm
);
1775 if (ctx
->sh_desc_update_dma
&&
1776 !dma_mapping_error(ctx
->jrdev
, ctx
->sh_desc_update_dma
))
1777 dma_unmap_single(ctx
->jrdev
, ctx
->sh_desc_update_dma
,
1778 desc_bytes(ctx
->sh_desc_update
),
1780 if (ctx
->sh_desc_update_first_dma
&&
1781 !dma_mapping_error(ctx
->jrdev
, ctx
->sh_desc_update_first_dma
))
1782 dma_unmap_single(ctx
->jrdev
, ctx
->sh_desc_update_first_dma
,
1783 desc_bytes(ctx
->sh_desc_update_first
),
1785 if (ctx
->sh_desc_fin_dma
&&
1786 !dma_mapping_error(ctx
->jrdev
, ctx
->sh_desc_fin_dma
))
1787 dma_unmap_single(ctx
->jrdev
, ctx
->sh_desc_fin_dma
,
1788 desc_bytes(ctx
->sh_desc_fin
), DMA_TO_DEVICE
);
1789 if (ctx
->sh_desc_digest_dma
&&
1790 !dma_mapping_error(ctx
->jrdev
, ctx
->sh_desc_digest_dma
))
1791 dma_unmap_single(ctx
->jrdev
, ctx
->sh_desc_digest_dma
,
1792 desc_bytes(ctx
->sh_desc_digest
),
1794 if (ctx
->sh_desc_finup_dma
&&
1795 !dma_mapping_error(ctx
->jrdev
, ctx
->sh_desc_finup_dma
))
1796 dma_unmap_single(ctx
->jrdev
, ctx
->sh_desc_finup_dma
,
1797 desc_bytes(ctx
->sh_desc_finup
), DMA_TO_DEVICE
);
1799 caam_jr_free(ctx
->jrdev
);
1802 static void __exit
caam_algapi_hash_exit(void)
1804 struct caam_hash_alg
*t_alg
, *n
;
1806 if (!hash_list
.next
)
1809 list_for_each_entry_safe(t_alg
, n
, &hash_list
, entry
) {
1810 crypto_unregister_ahash(&t_alg
->ahash_alg
);
1811 list_del(&t_alg
->entry
);
1816 static struct caam_hash_alg
*
1817 caam_hash_alloc(struct caam_hash_template
*template,
1820 struct caam_hash_alg
*t_alg
;
1821 struct ahash_alg
*halg
;
1822 struct crypto_alg
*alg
;
1824 t_alg
= kzalloc(sizeof(*t_alg
), GFP_KERNEL
);
1826 pr_err("failed to allocate t_alg\n");
1827 return ERR_PTR(-ENOMEM
);
1830 t_alg
->ahash_alg
= template->template_ahash
;
1831 halg
= &t_alg
->ahash_alg
;
1832 alg
= &halg
->halg
.base
;
1835 snprintf(alg
->cra_name
, CRYPTO_MAX_ALG_NAME
, "%s",
1836 template->hmac_name
);
1837 snprintf(alg
->cra_driver_name
, CRYPTO_MAX_ALG_NAME
, "%s",
1838 template->hmac_driver_name
);
1840 snprintf(alg
->cra_name
, CRYPTO_MAX_ALG_NAME
, "%s",
1842 snprintf(alg
->cra_driver_name
, CRYPTO_MAX_ALG_NAME
, "%s",
1843 template->driver_name
);
1845 alg
->cra_module
= THIS_MODULE
;
1846 alg
->cra_init
= caam_hash_cra_init
;
1847 alg
->cra_exit
= caam_hash_cra_exit
;
1848 alg
->cra_ctxsize
= sizeof(struct caam_hash_ctx
);
1849 alg
->cra_priority
= CAAM_CRA_PRIORITY
;
1850 alg
->cra_blocksize
= template->blocksize
;
1851 alg
->cra_alignmask
= 0;
1852 alg
->cra_flags
= CRYPTO_ALG_ASYNC
| CRYPTO_ALG_TYPE_AHASH
;
1853 alg
->cra_type
= &crypto_ahash_type
;
1855 t_alg
->alg_type
= template->alg_type
;
1856 t_alg
->alg_op
= template->alg_op
;
1861 static int __init
caam_algapi_hash_init(void)
1863 struct device_node
*dev_node
;
1864 struct platform_device
*pdev
;
1865 struct device
*ctrldev
;
1867 struct caam_drv_private
*priv
;
1868 unsigned int md_limit
= SHA512_DIGEST_SIZE
;
1869 u32 cha_inst
, cha_vid
;
1871 dev_node
= of_find_compatible_node(NULL
, NULL
, "fsl,sec-v4.0");
1873 dev_node
= of_find_compatible_node(NULL
, NULL
, "fsl,sec4.0");
1878 pdev
= of_find_device_by_node(dev_node
);
1880 of_node_put(dev_node
);
1884 ctrldev
= &pdev
->dev
;
1885 priv
= dev_get_drvdata(ctrldev
);
1886 of_node_put(dev_node
);
1889 * If priv is NULL, it's probably because the caam driver wasn't
1890 * properly initialized (e.g. RNG4 init failed). Thus, bail out here.
1896 * Register crypto algorithms the device supports. First, identify
1897 * presence and attributes of MD block.
1899 cha_vid
= rd_reg32(&priv
->ctrl
->perfmon
.cha_id_ls
);
1900 cha_inst
= rd_reg32(&priv
->ctrl
->perfmon
.cha_num_ls
);
1903 * Skip registration of any hashing algorithms if MD block
1906 if (!((cha_inst
& CHA_ID_LS_MD_MASK
) >> CHA_ID_LS_MD_SHIFT
))
1909 /* Limit digest size based on LP256 */
1910 if ((cha_vid
& CHA_ID_LS_MD_MASK
) == CHA_ID_LS_MD_LP256
)
1911 md_limit
= SHA256_DIGEST_SIZE
;
1913 INIT_LIST_HEAD(&hash_list
);
1915 /* register crypto algorithms the device supports */
1916 for (i
= 0; i
< ARRAY_SIZE(driver_hash
); i
++) {
1917 struct caam_hash_alg
*t_alg
;
1918 struct caam_hash_template
*alg
= driver_hash
+ i
;
1920 /* If MD size is not supported by device, skip registration */
1921 if (alg
->template_ahash
.halg
.digestsize
> md_limit
)
1924 /* register hmac version */
1925 t_alg
= caam_hash_alloc(alg
, true);
1926 if (IS_ERR(t_alg
)) {
1927 err
= PTR_ERR(t_alg
);
1928 pr_warn("%s alg allocation failed\n", alg
->driver_name
);
1932 err
= crypto_register_ahash(&t_alg
->ahash_alg
);
1934 pr_warn("%s alg registration failed: %d\n",
1935 t_alg
->ahash_alg
.halg
.base
.cra_driver_name
,
1939 list_add_tail(&t_alg
->entry
, &hash_list
);
1941 /* register unkeyed version */
1942 t_alg
= caam_hash_alloc(alg
, false);
1943 if (IS_ERR(t_alg
)) {
1944 err
= PTR_ERR(t_alg
);
1945 pr_warn("%s alg allocation failed\n", alg
->driver_name
);
1949 err
= crypto_register_ahash(&t_alg
->ahash_alg
);
1951 pr_warn("%s alg registration failed: %d\n",
1952 t_alg
->ahash_alg
.halg
.base
.cra_driver_name
,
1956 list_add_tail(&t_alg
->entry
, &hash_list
);
1962 module_init(caam_algapi_hash_init
);
1963 module_exit(caam_algapi_hash_exit
);
1965 MODULE_LICENSE("GPL");
1966 MODULE_DESCRIPTION("FSL CAAM support for ahash functions of crypto API");
1967 MODULE_AUTHOR("Freescale Semiconductor - NMG");