2 * caam - Freescale FSL CAAM support for crypto API
4 * Copyright 2008-2011 Freescale Semiconductor, Inc.
6 * Based on talitos crypto API driver.
8 * relationship of job descriptors to shared descriptors (SteveC Dec 10 2008):
10 * --------------- ---------------
11 * | JobDesc #1 |-------------------->| ShareDesc |
12 * | *(packet 1) | | (PDB) |
13 * --------------- |------------->| (hashKey) |
15 * . | |-------->| (operation) |
16 * --------------- | | ---------------
17 * | JobDesc #2 |------| |
23 * | JobDesc #3 |------------
27 * The SharedDesc never changes for a connection unless rekeyed, but
28 * each packet will likely be in a different place. So all we need
29 * to know to process the packet is where the input is, where the
30 * output goes, and what context we want to process with. Context is
31 * in the SharedDesc, packet references in the JobDesc.
33 * So, a job desc looks like:
35 * ---------------------
37 * | ShareDesc Pointer |
44 * ---------------------
51 #include "desc_constr.h"
54 #include "sg_sw_sec4.h"
60 #define CAAM_CRA_PRIORITY 3000
61 /* max key is sum of AES_MAX_KEY_SIZE, max split key size */
62 #define CAAM_MAX_KEY_SIZE (AES_MAX_KEY_SIZE + \
63 CTR_RFC3686_NONCE_SIZE + \
64 SHA512_DIGEST_SIZE * 2)
65 /* max IV is max of AES_BLOCK_SIZE, DES3_EDE_BLOCK_SIZE */
66 #define CAAM_MAX_IV_LENGTH 16
68 /* length of descriptors text */
69 #define DESC_AEAD_BASE (4 * CAAM_CMD_SZ)
70 #define DESC_AEAD_ENC_LEN (DESC_AEAD_BASE + 15 * CAAM_CMD_SZ)
71 #define DESC_AEAD_DEC_LEN (DESC_AEAD_BASE + 18 * CAAM_CMD_SZ)
72 #define DESC_AEAD_GIVENC_LEN (DESC_AEAD_ENC_LEN + 7 * CAAM_CMD_SZ)
74 /* Note: Nonce is counted in enckeylen */
75 #define DESC_AEAD_CTR_RFC3686_LEN (6 * CAAM_CMD_SZ)
77 #define DESC_AEAD_NULL_BASE (3 * CAAM_CMD_SZ)
78 #define DESC_AEAD_NULL_ENC_LEN (DESC_AEAD_NULL_BASE + 14 * CAAM_CMD_SZ)
79 #define DESC_AEAD_NULL_DEC_LEN (DESC_AEAD_NULL_BASE + 17 * CAAM_CMD_SZ)
81 #define DESC_GCM_BASE (3 * CAAM_CMD_SZ)
82 #define DESC_GCM_ENC_LEN (DESC_GCM_BASE + 23 * CAAM_CMD_SZ)
83 #define DESC_GCM_DEC_LEN (DESC_GCM_BASE + 19 * CAAM_CMD_SZ)
85 #define DESC_RFC4106_BASE (3 * CAAM_CMD_SZ)
86 #define DESC_RFC4106_ENC_LEN (DESC_RFC4106_BASE + 15 * CAAM_CMD_SZ)
87 #define DESC_RFC4106_DEC_LEN (DESC_RFC4106_BASE + 14 * CAAM_CMD_SZ)
88 #define DESC_RFC4106_GIVENC_LEN (DESC_RFC4106_BASE + 21 * CAAM_CMD_SZ)
90 #define DESC_RFC4543_BASE (3 * CAAM_CMD_SZ)
91 #define DESC_RFC4543_ENC_LEN (DESC_RFC4543_BASE + 25 * CAAM_CMD_SZ)
92 #define DESC_RFC4543_DEC_LEN (DESC_RFC4543_BASE + 27 * CAAM_CMD_SZ)
93 #define DESC_RFC4543_GIVENC_LEN (DESC_RFC4543_BASE + 30 * CAAM_CMD_SZ)
95 #define DESC_ABLKCIPHER_BASE (3 * CAAM_CMD_SZ)
96 #define DESC_ABLKCIPHER_ENC_LEN (DESC_ABLKCIPHER_BASE + \
98 #define DESC_ABLKCIPHER_DEC_LEN (DESC_ABLKCIPHER_BASE + \
101 #define DESC_MAX_USED_BYTES (DESC_RFC4543_GIVENC_LEN + \
103 #define DESC_MAX_USED_LEN (DESC_MAX_USED_BYTES / CAAM_CMD_SZ)
106 /* for print_hex_dumps with line references */
107 #define debug(format, arg...) printk(format, arg)
109 #define debug(format, arg...)
111 static struct list_head alg_list
;
113 /* Set DK bit in class 1 operation if shared */
114 static inline void append_dec_op1(u32
*desc
, u32 type
)
116 u32
*jump_cmd
, *uncond_jump_cmd
;
118 /* DK bit is valid only for AES */
119 if ((type
& OP_ALG_ALGSEL_MASK
) != OP_ALG_ALGSEL_AES
) {
120 append_operation(desc
, type
| OP_ALG_AS_INITFINAL
|
125 jump_cmd
= append_jump(desc
, JUMP_TEST_ALL
| JUMP_COND_SHRD
);
126 append_operation(desc
, type
| OP_ALG_AS_INITFINAL
|
128 uncond_jump_cmd
= append_jump(desc
, JUMP_TEST_ALL
);
129 set_jump_tgt_here(desc
, jump_cmd
);
130 append_operation(desc
, type
| OP_ALG_AS_INITFINAL
|
131 OP_ALG_DECRYPT
| OP_ALG_AAI_DK
);
132 set_jump_tgt_here(desc
, uncond_jump_cmd
);
136 * For aead functions, read payload and write payload,
137 * both of which are specified in req->src and req->dst
139 static inline void aead_append_src_dst(u32
*desc
, u32 msg_type
)
141 append_seq_fifo_store(desc
, 0, FIFOST_TYPE_MESSAGE_DATA
| KEY_VLF
);
142 append_seq_fifo_load(desc
, 0, FIFOLD_CLASS_BOTH
|
143 KEY_VLF
| msg_type
| FIFOLD_TYPE_LASTBOTH
);
147 * For aead encrypt and decrypt, read iv for both classes
149 static inline void aead_append_ld_iv(u32
*desc
, int ivsize
, int ivoffset
)
151 append_seq_load(desc
, ivsize
, LDST_CLASS_1_CCB
|
152 LDST_SRCDST_BYTE_CONTEXT
|
153 (ivoffset
<< LDST_OFFSET_SHIFT
));
154 append_move(desc
, MOVE_SRC_CLASS1CTX
| MOVE_DEST_CLASS2INFIFO
|
155 (ivoffset
<< MOVE_OFFSET_SHIFT
) | ivsize
);
159 * For ablkcipher encrypt and decrypt, read from req->src and
162 static inline void ablkcipher_append_src_dst(u32
*desc
)
164 append_math_add(desc
, VARSEQOUTLEN
, SEQINLEN
, REG0
, CAAM_CMD_SZ
);
165 append_math_add(desc
, VARSEQINLEN
, SEQINLEN
, REG0
, CAAM_CMD_SZ
);
166 append_seq_fifo_load(desc
, 0, FIFOLD_CLASS_CLASS1
|
167 KEY_VLF
| FIFOLD_TYPE_MSG
| FIFOLD_TYPE_LAST1
);
168 append_seq_fifo_store(desc
, 0, FIFOST_TYPE_MESSAGE_DATA
| KEY_VLF
);
172 * If all data, including src (with assoc and iv) or dst (with iv only) are
175 #define GIV_SRC_CONTIG 1
176 #define GIV_DST_CONTIG (1 << 1)
179 * per-session context
182 struct device
*jrdev
;
183 u32 sh_desc_enc
[DESC_MAX_USED_LEN
];
184 u32 sh_desc_dec
[DESC_MAX_USED_LEN
];
185 u32 sh_desc_givenc
[DESC_MAX_USED_LEN
];
186 dma_addr_t sh_desc_enc_dma
;
187 dma_addr_t sh_desc_dec_dma
;
188 dma_addr_t sh_desc_givenc_dma
;
192 u8 key
[CAAM_MAX_KEY_SIZE
];
194 unsigned int enckeylen
;
195 unsigned int split_key_len
;
196 unsigned int split_key_pad_len
;
197 unsigned int authsize
;
200 static void append_key_aead(u32
*desc
, struct caam_ctx
*ctx
,
201 int keys_fit_inline
, bool is_rfc3686
)
204 unsigned int enckeylen
= ctx
->enckeylen
;
208 * | ctx->key = {AUTH_KEY, ENC_KEY, NONCE}
209 * | enckeylen = encryption key size + nonce size
212 enckeylen
-= CTR_RFC3686_NONCE_SIZE
;
214 if (keys_fit_inline
) {
215 append_key_as_imm(desc
, ctx
->key
, ctx
->split_key_pad_len
,
216 ctx
->split_key_len
, CLASS_2
|
217 KEY_DEST_MDHA_SPLIT
| KEY_ENC
);
218 append_key_as_imm(desc
, (void *)ctx
->key
+
219 ctx
->split_key_pad_len
, enckeylen
,
220 enckeylen
, CLASS_1
| KEY_DEST_CLASS_REG
);
222 append_key(desc
, ctx
->key_dma
, ctx
->split_key_len
, CLASS_2
|
223 KEY_DEST_MDHA_SPLIT
| KEY_ENC
);
224 append_key(desc
, ctx
->key_dma
+ ctx
->split_key_pad_len
,
225 enckeylen
, CLASS_1
| KEY_DEST_CLASS_REG
);
228 /* Load Counter into CONTEXT1 reg */
230 nonce
= (u32
*)((void *)ctx
->key
+ ctx
->split_key_pad_len
+
232 append_load_imm_u32(desc
, *nonce
, LDST_CLASS_IND_CCB
|
233 LDST_SRCDST_BYTE_OUTFIFO
| LDST_IMM
);
236 MOVE_DEST_CLASS1CTX
|
237 (16 << MOVE_OFFSET_SHIFT
) |
238 (CTR_RFC3686_NONCE_SIZE
<< MOVE_LEN_SHIFT
));
242 static void init_sh_desc_key_aead(u32
*desc
, struct caam_ctx
*ctx
,
243 int keys_fit_inline
, bool is_rfc3686
)
247 /* Note: Context registers are saved. */
248 init_sh_desc(desc
, HDR_SHARE_SERIAL
| HDR_SAVECTX
);
250 /* Skip if already shared */
251 key_jump_cmd
= append_jump(desc
, JUMP_JSL
| JUMP_TEST_ALL
|
254 append_key_aead(desc
, ctx
, keys_fit_inline
, is_rfc3686
);
256 set_jump_tgt_here(desc
, key_jump_cmd
);
259 static int aead_null_set_sh_desc(struct crypto_aead
*aead
)
261 struct aead_tfm
*tfm
= &aead
->base
.crt_aead
;
262 struct caam_ctx
*ctx
= crypto_aead_ctx(aead
);
263 struct device
*jrdev
= ctx
->jrdev
;
264 bool keys_fit_inline
= false;
265 u32
*key_jump_cmd
, *jump_cmd
, *read_move_cmd
, *write_move_cmd
;
269 * Job Descriptor and Shared Descriptors
270 * must all fit into the 64-word Descriptor h/w Buffer
272 if (DESC_AEAD_NULL_ENC_LEN
+ DESC_JOB_IO_LEN
+
273 ctx
->split_key_pad_len
<= CAAM_DESC_BYTES_MAX
)
274 keys_fit_inline
= true;
276 /* aead_encrypt shared descriptor */
277 desc
= ctx
->sh_desc_enc
;
279 init_sh_desc(desc
, HDR_SHARE_SERIAL
);
281 /* Skip if already shared */
282 key_jump_cmd
= append_jump(desc
, JUMP_JSL
| JUMP_TEST_ALL
|
285 append_key_as_imm(desc
, ctx
->key
, ctx
->split_key_pad_len
,
286 ctx
->split_key_len
, CLASS_2
|
287 KEY_DEST_MDHA_SPLIT
| KEY_ENC
);
289 append_key(desc
, ctx
->key_dma
, ctx
->split_key_len
, CLASS_2
|
290 KEY_DEST_MDHA_SPLIT
| KEY_ENC
);
291 set_jump_tgt_here(desc
, key_jump_cmd
);
293 /* cryptlen = seqoutlen - authsize */
294 append_math_sub_imm_u32(desc
, REG3
, SEQOUTLEN
, IMM
, ctx
->authsize
);
297 * NULL encryption; IV is zero
298 * assoclen = (assoclen + cryptlen) - cryptlen
300 append_math_sub(desc
, VARSEQINLEN
, SEQINLEN
, REG3
, CAAM_CMD_SZ
);
302 /* read assoc before reading payload */
303 append_seq_fifo_load(desc
, 0, FIFOLD_CLASS_CLASS2
| FIFOLD_TYPE_MSG
|
306 /* Prepare to read and write cryptlen bytes */
307 append_math_add(desc
, VARSEQINLEN
, ZERO
, REG3
, CAAM_CMD_SZ
);
308 append_math_add(desc
, VARSEQOUTLEN
, ZERO
, REG3
, CAAM_CMD_SZ
);
311 * MOVE_LEN opcode is not available in all SEC HW revisions,
312 * thus need to do some magic, i.e. self-patch the descriptor
315 read_move_cmd
= append_move(desc
, MOVE_SRC_DESCBUF
|
317 (0x6 << MOVE_LEN_SHIFT
));
318 write_move_cmd
= append_move(desc
, MOVE_SRC_MATH3
|
321 (0x8 << MOVE_LEN_SHIFT
));
323 /* Class 2 operation */
324 append_operation(desc
, ctx
->class2_alg_type
|
325 OP_ALG_AS_INITFINAL
| OP_ALG_ENCRYPT
);
327 /* Read and write cryptlen bytes */
328 aead_append_src_dst(desc
, FIFOLD_TYPE_MSG
| FIFOLD_TYPE_FLUSH1
);
330 set_move_tgt_here(desc
, read_move_cmd
);
331 set_move_tgt_here(desc
, write_move_cmd
);
332 append_cmd(desc
, CMD_LOAD
| DISABLE_AUTO_INFO_FIFO
);
333 append_move(desc
, MOVE_SRC_INFIFO_CL
| MOVE_DEST_OUTFIFO
|
337 append_seq_store(desc
, ctx
->authsize
, LDST_CLASS_2_CCB
|
338 LDST_SRCDST_BYTE_CONTEXT
);
340 ctx
->sh_desc_enc_dma
= dma_map_single(jrdev
, desc
,
343 if (dma_mapping_error(jrdev
, ctx
->sh_desc_enc_dma
)) {
344 dev_err(jrdev
, "unable to map shared descriptor\n");
348 print_hex_dump(KERN_ERR
,
349 "aead null enc shdesc@"__stringify(__LINE__
)": ",
350 DUMP_PREFIX_ADDRESS
, 16, 4, desc
,
351 desc_bytes(desc
), 1);
355 * Job Descriptor and Shared Descriptors
356 * must all fit into the 64-word Descriptor h/w Buffer
358 keys_fit_inline
= false;
359 if (DESC_AEAD_NULL_DEC_LEN
+ DESC_JOB_IO_LEN
+
360 ctx
->split_key_pad_len
<= CAAM_DESC_BYTES_MAX
)
361 keys_fit_inline
= true;
363 desc
= ctx
->sh_desc_dec
;
365 /* aead_decrypt shared descriptor */
366 init_sh_desc(desc
, HDR_SHARE_SERIAL
);
368 /* Skip if already shared */
369 key_jump_cmd
= append_jump(desc
, JUMP_JSL
| JUMP_TEST_ALL
|
372 append_key_as_imm(desc
, ctx
->key
, ctx
->split_key_pad_len
,
373 ctx
->split_key_len
, CLASS_2
|
374 KEY_DEST_MDHA_SPLIT
| KEY_ENC
);
376 append_key(desc
, ctx
->key_dma
, ctx
->split_key_len
, CLASS_2
|
377 KEY_DEST_MDHA_SPLIT
| KEY_ENC
);
378 set_jump_tgt_here(desc
, key_jump_cmd
);
380 /* Class 2 operation */
381 append_operation(desc
, ctx
->class2_alg_type
|
382 OP_ALG_AS_INITFINAL
| OP_ALG_DECRYPT
| OP_ALG_ICV_ON
);
384 /* assoclen + cryptlen = seqinlen - ivsize - authsize */
385 append_math_sub_imm_u32(desc
, REG3
, SEQINLEN
, IMM
,
386 ctx
->authsize
+ tfm
->ivsize
);
387 /* assoclen = (assoclen + cryptlen) - cryptlen */
388 append_math_sub(desc
, REG2
, SEQOUTLEN
, REG0
, CAAM_CMD_SZ
);
389 append_math_sub(desc
, VARSEQINLEN
, REG3
, REG2
, CAAM_CMD_SZ
);
391 /* read assoc before reading payload */
392 append_seq_fifo_load(desc
, 0, FIFOLD_CLASS_CLASS2
| FIFOLD_TYPE_MSG
|
395 /* Prepare to read and write cryptlen bytes */
396 append_math_add(desc
, VARSEQINLEN
, ZERO
, REG2
, CAAM_CMD_SZ
);
397 append_math_add(desc
, VARSEQOUTLEN
, ZERO
, REG2
, CAAM_CMD_SZ
);
400 * MOVE_LEN opcode is not available in all SEC HW revisions,
401 * thus need to do some magic, i.e. self-patch the descriptor
404 read_move_cmd
= append_move(desc
, MOVE_SRC_DESCBUF
|
406 (0x6 << MOVE_LEN_SHIFT
));
407 write_move_cmd
= append_move(desc
, MOVE_SRC_MATH2
|
410 (0x8 << MOVE_LEN_SHIFT
));
412 /* Read and write cryptlen bytes */
413 aead_append_src_dst(desc
, FIFOLD_TYPE_MSG
| FIFOLD_TYPE_FLUSH1
);
416 * Insert a NOP here, since we need at least 4 instructions between
417 * code patching the descriptor buffer and the location being patched.
419 jump_cmd
= append_jump(desc
, JUMP_TEST_ALL
);
420 set_jump_tgt_here(desc
, jump_cmd
);
422 set_move_tgt_here(desc
, read_move_cmd
);
423 set_move_tgt_here(desc
, write_move_cmd
);
424 append_cmd(desc
, CMD_LOAD
| DISABLE_AUTO_INFO_FIFO
);
425 append_move(desc
, MOVE_SRC_INFIFO_CL
| MOVE_DEST_OUTFIFO
|
427 append_cmd(desc
, CMD_LOAD
| ENABLE_AUTO_INFO_FIFO
);
430 append_seq_fifo_load(desc
, ctx
->authsize
, FIFOLD_CLASS_CLASS2
|
431 FIFOLD_TYPE_LAST2
| FIFOLD_TYPE_ICV
);
433 ctx
->sh_desc_dec_dma
= dma_map_single(jrdev
, desc
,
436 if (dma_mapping_error(jrdev
, ctx
->sh_desc_dec_dma
)) {
437 dev_err(jrdev
, "unable to map shared descriptor\n");
441 print_hex_dump(KERN_ERR
,
442 "aead null dec shdesc@"__stringify(__LINE__
)": ",
443 DUMP_PREFIX_ADDRESS
, 16, 4, desc
,
444 desc_bytes(desc
), 1);
450 static int aead_set_sh_desc(struct crypto_aead
*aead
)
452 struct aead_tfm
*tfm
= &aead
->base
.crt_aead
;
453 struct caam_ctx
*ctx
= crypto_aead_ctx(aead
);
454 struct crypto_tfm
*ctfm
= crypto_aead_tfm(aead
);
455 const char *alg_name
= crypto_tfm_alg_name(ctfm
);
456 struct device
*jrdev
= ctx
->jrdev
;
457 bool keys_fit_inline
;
461 const bool ctr_mode
= ((ctx
->class1_alg_type
& OP_ALG_AAI_MASK
) ==
462 OP_ALG_AAI_CTR_MOD128
);
463 const bool is_rfc3686
= (ctr_mode
&&
464 (strstr(alg_name
, "rfc3686") != NULL
));
469 /* NULL encryption / decryption */
471 return aead_null_set_sh_desc(aead
);
474 * AES-CTR needs to load IV in CONTEXT1 reg
475 * at an offset of 128bits (16bytes)
476 * CONTEXT1[255:128] = IV
483 * CONTEXT1[255:128] = {NONCE, IV, COUNTER}
486 ctx1_iv_off
= 16 + CTR_RFC3686_NONCE_SIZE
;
489 * Job Descriptor and Shared Descriptors
490 * must all fit into the 64-word Descriptor h/w Buffer
492 keys_fit_inline
= false;
493 if (DESC_AEAD_ENC_LEN
+ DESC_JOB_IO_LEN
+
494 ctx
->split_key_pad_len
+ ctx
->enckeylen
+
495 (is_rfc3686
? DESC_AEAD_CTR_RFC3686_LEN
: 0) <=
497 keys_fit_inline
= true;
499 /* aead_encrypt shared descriptor */
500 desc
= ctx
->sh_desc_enc
;
502 /* Note: Context registers are saved. */
503 init_sh_desc_key_aead(desc
, ctx
, keys_fit_inline
, is_rfc3686
);
505 /* Class 2 operation */
506 append_operation(desc
, ctx
->class2_alg_type
|
507 OP_ALG_AS_INITFINAL
| OP_ALG_ENCRYPT
);
509 /* cryptlen = seqoutlen - authsize */
510 append_math_sub_imm_u32(desc
, REG3
, SEQOUTLEN
, IMM
, ctx
->authsize
);
512 /* assoclen + cryptlen = seqinlen - ivsize */
513 append_math_sub_imm_u32(desc
, REG2
, SEQINLEN
, IMM
, tfm
->ivsize
);
515 /* assoclen = (assoclen + cryptlen) - cryptlen */
516 append_math_sub(desc
, VARSEQINLEN
, REG2
, REG3
, CAAM_CMD_SZ
);
518 /* read assoc before reading payload */
519 append_seq_fifo_load(desc
, 0, FIFOLD_CLASS_CLASS2
| FIFOLD_TYPE_MSG
|
521 aead_append_ld_iv(desc
, tfm
->ivsize
, ctx1_iv_off
);
523 /* Load Counter into CONTEXT1 reg */
525 append_load_imm_u32(desc
, be32_to_cpu(1), LDST_IMM
|
527 LDST_SRCDST_BYTE_CONTEXT
|
528 ((ctx1_iv_off
+ CTR_RFC3686_IV_SIZE
) <<
531 /* Class 1 operation */
532 append_operation(desc
, ctx
->class1_alg_type
|
533 OP_ALG_AS_INITFINAL
| OP_ALG_ENCRYPT
);
535 /* Read and write cryptlen bytes */
536 append_math_add(desc
, VARSEQINLEN
, ZERO
, REG3
, CAAM_CMD_SZ
);
537 append_math_add(desc
, VARSEQOUTLEN
, ZERO
, REG3
, CAAM_CMD_SZ
);
538 aead_append_src_dst(desc
, FIFOLD_TYPE_MSG1OUT2
);
541 append_seq_store(desc
, ctx
->authsize
, LDST_CLASS_2_CCB
|
542 LDST_SRCDST_BYTE_CONTEXT
);
544 ctx
->sh_desc_enc_dma
= dma_map_single(jrdev
, desc
,
547 if (dma_mapping_error(jrdev
, ctx
->sh_desc_enc_dma
)) {
548 dev_err(jrdev
, "unable to map shared descriptor\n");
552 print_hex_dump(KERN_ERR
, "aead enc shdesc@"__stringify(__LINE__
)": ",
553 DUMP_PREFIX_ADDRESS
, 16, 4, desc
,
554 desc_bytes(desc
), 1);
558 * Job Descriptor and Shared Descriptors
559 * must all fit into the 64-word Descriptor h/w Buffer
561 keys_fit_inline
= false;
562 if (DESC_AEAD_DEC_LEN
+ DESC_JOB_IO_LEN
+
563 ctx
->split_key_pad_len
+ ctx
->enckeylen
+
564 (is_rfc3686
? DESC_AEAD_CTR_RFC3686_LEN
: 0) <=
566 keys_fit_inline
= true;
568 /* aead_decrypt shared descriptor */
569 desc
= ctx
->sh_desc_dec
;
571 /* Note: Context registers are saved. */
572 init_sh_desc_key_aead(desc
, ctx
, keys_fit_inline
, is_rfc3686
);
574 /* Class 2 operation */
575 append_operation(desc
, ctx
->class2_alg_type
|
576 OP_ALG_AS_INITFINAL
| OP_ALG_DECRYPT
| OP_ALG_ICV_ON
);
578 /* assoclen + cryptlen = seqinlen - ivsize - authsize */
579 append_math_sub_imm_u32(desc
, REG3
, SEQINLEN
, IMM
,
580 ctx
->authsize
+ tfm
->ivsize
);
581 /* assoclen = (assoclen + cryptlen) - cryptlen */
582 append_math_sub(desc
, REG2
, SEQOUTLEN
, REG0
, CAAM_CMD_SZ
);
583 append_math_sub(desc
, VARSEQINLEN
, REG3
, REG2
, CAAM_CMD_SZ
);
585 /* read assoc before reading payload */
586 append_seq_fifo_load(desc
, 0, FIFOLD_CLASS_CLASS2
| FIFOLD_TYPE_MSG
|
589 aead_append_ld_iv(desc
, tfm
->ivsize
, ctx1_iv_off
);
591 /* Load Counter into CONTEXT1 reg */
593 append_load_imm_u32(desc
, be32_to_cpu(1), LDST_IMM
|
595 LDST_SRCDST_BYTE_CONTEXT
|
596 ((ctx1_iv_off
+ CTR_RFC3686_IV_SIZE
) <<
599 /* Choose operation */
601 append_operation(desc
, ctx
->class1_alg_type
|
602 OP_ALG_AS_INITFINAL
| OP_ALG_DECRYPT
);
604 append_dec_op1(desc
, ctx
->class1_alg_type
);
606 /* Read and write cryptlen bytes */
607 append_math_add(desc
, VARSEQINLEN
, ZERO
, REG2
, CAAM_CMD_SZ
);
608 append_math_add(desc
, VARSEQOUTLEN
, ZERO
, REG2
, CAAM_CMD_SZ
);
609 aead_append_src_dst(desc
, FIFOLD_TYPE_MSG
);
612 append_seq_fifo_load(desc
, ctx
->authsize
, FIFOLD_CLASS_CLASS2
|
613 FIFOLD_TYPE_LAST2
| FIFOLD_TYPE_ICV
);
615 ctx
->sh_desc_dec_dma
= dma_map_single(jrdev
, desc
,
618 if (dma_mapping_error(jrdev
, ctx
->sh_desc_dec_dma
)) {
619 dev_err(jrdev
, "unable to map shared descriptor\n");
623 print_hex_dump(KERN_ERR
, "aead dec shdesc@"__stringify(__LINE__
)": ",
624 DUMP_PREFIX_ADDRESS
, 16, 4, desc
,
625 desc_bytes(desc
), 1);
629 * Job Descriptor and Shared Descriptors
630 * must all fit into the 64-word Descriptor h/w Buffer
632 keys_fit_inline
= false;
633 if (DESC_AEAD_GIVENC_LEN
+ DESC_JOB_IO_LEN
+
634 ctx
->split_key_pad_len
+ ctx
->enckeylen
+
635 (is_rfc3686
? DESC_AEAD_CTR_RFC3686_LEN
: 0) <=
637 keys_fit_inline
= true;
639 /* aead_givencrypt shared descriptor */
640 desc
= ctx
->sh_desc_givenc
;
642 /* Note: Context registers are saved. */
643 init_sh_desc_key_aead(desc
, ctx
, keys_fit_inline
, is_rfc3686
);
646 geniv
= NFIFOENTRY_STYPE_PAD
| NFIFOENTRY_DEST_DECO
|
647 NFIFOENTRY_DTYPE_MSG
| NFIFOENTRY_LC1
|
648 NFIFOENTRY_PTYPE_RND
| (tfm
->ivsize
<< NFIFOENTRY_DLEN_SHIFT
);
649 append_load_imm_u32(desc
, geniv
, LDST_CLASS_IND_CCB
|
650 LDST_SRCDST_WORD_INFO_FIFO
| LDST_IMM
);
651 append_cmd(desc
, CMD_LOAD
| DISABLE_AUTO_INFO_FIFO
);
652 append_move(desc
, MOVE_WAITCOMP
|
653 MOVE_SRC_INFIFO
| MOVE_DEST_CLASS1CTX
|
654 (ctx1_iv_off
<< MOVE_OFFSET_SHIFT
) |
655 (tfm
->ivsize
<< MOVE_LEN_SHIFT
));
656 append_cmd(desc
, CMD_LOAD
| ENABLE_AUTO_INFO_FIFO
);
658 /* Copy IV to class 1 context */
659 append_move(desc
, MOVE_SRC_CLASS1CTX
| MOVE_DEST_OUTFIFO
|
660 (ctx1_iv_off
<< MOVE_OFFSET_SHIFT
) |
661 (tfm
->ivsize
<< MOVE_LEN_SHIFT
));
663 /* Return to encryption */
664 append_operation(desc
, ctx
->class2_alg_type
|
665 OP_ALG_AS_INITFINAL
| OP_ALG_ENCRYPT
);
667 /* ivsize + cryptlen = seqoutlen - authsize */
668 append_math_sub_imm_u32(desc
, REG3
, SEQOUTLEN
, IMM
, ctx
->authsize
);
670 /* assoclen = seqinlen - (ivsize + cryptlen) */
671 append_math_sub(desc
, VARSEQINLEN
, SEQINLEN
, REG3
, CAAM_CMD_SZ
);
673 /* read assoc before reading payload */
674 append_seq_fifo_load(desc
, 0, FIFOLD_CLASS_CLASS2
| FIFOLD_TYPE_MSG
|
677 /* Copy iv from outfifo to class 2 fifo */
678 moveiv
= NFIFOENTRY_STYPE_OFIFO
| NFIFOENTRY_DEST_CLASS2
|
679 NFIFOENTRY_DTYPE_MSG
| (tfm
->ivsize
<< NFIFOENTRY_DLEN_SHIFT
);
680 append_load_imm_u32(desc
, moveiv
, LDST_CLASS_IND_CCB
|
681 LDST_SRCDST_WORD_INFO_FIFO
| LDST_IMM
);
682 append_load_imm_u32(desc
, tfm
->ivsize
, LDST_CLASS_2_CCB
|
683 LDST_SRCDST_WORD_DATASZ_REG
| LDST_IMM
);
685 /* Load Counter into CONTEXT1 reg */
687 append_load_imm_u32(desc
, be32_to_cpu(1), LDST_IMM
|
689 LDST_SRCDST_BYTE_CONTEXT
|
690 ((ctx1_iv_off
+ CTR_RFC3686_IV_SIZE
) <<
693 /* Class 1 operation */
694 append_operation(desc
, ctx
->class1_alg_type
|
695 OP_ALG_AS_INITFINAL
| OP_ALG_ENCRYPT
);
697 /* Will write ivsize + cryptlen */
698 append_math_add(desc
, VARSEQOUTLEN
, SEQINLEN
, REG0
, CAAM_CMD_SZ
);
700 /* Not need to reload iv */
701 append_seq_fifo_load(desc
, tfm
->ivsize
,
704 /* Will read cryptlen */
705 append_math_add(desc
, VARSEQINLEN
, SEQINLEN
, REG0
, CAAM_CMD_SZ
);
706 aead_append_src_dst(desc
, FIFOLD_TYPE_MSG1OUT2
);
709 append_seq_store(desc
, ctx
->authsize
, LDST_CLASS_2_CCB
|
710 LDST_SRCDST_BYTE_CONTEXT
);
712 ctx
->sh_desc_givenc_dma
= dma_map_single(jrdev
, desc
,
715 if (dma_mapping_error(jrdev
, ctx
->sh_desc_givenc_dma
)) {
716 dev_err(jrdev
, "unable to map shared descriptor\n");
720 print_hex_dump(KERN_ERR
, "aead givenc shdesc@"__stringify(__LINE__
)": ",
721 DUMP_PREFIX_ADDRESS
, 16, 4, desc
,
722 desc_bytes(desc
), 1);
728 static int aead_setauthsize(struct crypto_aead
*authenc
,
729 unsigned int authsize
)
731 struct caam_ctx
*ctx
= crypto_aead_ctx(authenc
);
733 ctx
->authsize
= authsize
;
734 aead_set_sh_desc(authenc
);
739 static int gcm_set_sh_desc(struct crypto_aead
*aead
)
741 struct aead_tfm
*tfm
= &aead
->base
.crt_aead
;
742 struct caam_ctx
*ctx
= crypto_aead_ctx(aead
);
743 struct device
*jrdev
= ctx
->jrdev
;
744 bool keys_fit_inline
= false;
745 u32
*key_jump_cmd
, *zero_payload_jump_cmd
,
746 *zero_assoc_jump_cmd1
, *zero_assoc_jump_cmd2
;
749 if (!ctx
->enckeylen
|| !ctx
->authsize
)
753 * AES GCM encrypt shared descriptor
754 * Job Descriptor and Shared Descriptor
755 * must fit into the 64-word Descriptor h/w Buffer
757 if (DESC_GCM_ENC_LEN
+ DESC_JOB_IO_LEN
+
758 ctx
->enckeylen
<= CAAM_DESC_BYTES_MAX
)
759 keys_fit_inline
= true;
761 desc
= ctx
->sh_desc_enc
;
763 init_sh_desc(desc
, HDR_SHARE_SERIAL
);
765 /* skip key loading if they are loaded due to sharing */
766 key_jump_cmd
= append_jump(desc
, JUMP_JSL
| JUMP_TEST_ALL
|
767 JUMP_COND_SHRD
| JUMP_COND_SELF
);
769 append_key_as_imm(desc
, (void *)ctx
->key
, ctx
->enckeylen
,
770 ctx
->enckeylen
, CLASS_1
| KEY_DEST_CLASS_REG
);
772 append_key(desc
, ctx
->key_dma
, ctx
->enckeylen
,
773 CLASS_1
| KEY_DEST_CLASS_REG
);
774 set_jump_tgt_here(desc
, key_jump_cmd
);
776 /* class 1 operation */
777 append_operation(desc
, ctx
->class1_alg_type
|
778 OP_ALG_AS_INITFINAL
| OP_ALG_ENCRYPT
);
780 /* cryptlen = seqoutlen - authsize */
781 append_math_sub_imm_u32(desc
, REG3
, SEQOUTLEN
, IMM
, ctx
->authsize
);
783 /* assoclen + cryptlen = seqinlen - ivsize */
784 append_math_sub_imm_u32(desc
, REG2
, SEQINLEN
, IMM
, tfm
->ivsize
);
786 /* assoclen = (assoclen + cryptlen) - cryptlen */
787 append_math_sub(desc
, REG1
, REG2
, REG3
, CAAM_CMD_SZ
);
789 /* if cryptlen is ZERO jump to zero-payload commands */
790 append_math_add(desc
, VARSEQOUTLEN
, ZERO
, REG3
, CAAM_CMD_SZ
);
791 zero_payload_jump_cmd
= append_jump(desc
, JUMP_TEST_ALL
|
794 append_seq_fifo_load(desc
, tfm
->ivsize
, FIFOLD_CLASS_CLASS1
|
795 FIFOLD_TYPE_IV
| FIFOLD_TYPE_FLUSH1
);
797 /* if assoclen is ZERO, skip reading the assoc data */
798 append_math_add(desc
, VARSEQINLEN
, ZERO
, REG1
, CAAM_CMD_SZ
);
799 zero_assoc_jump_cmd1
= append_jump(desc
, JUMP_TEST_ALL
|
802 /* read assoc data */
803 append_seq_fifo_load(desc
, 0, FIFOLD_CLASS_CLASS1
| FIFOLDST_VLF
|
804 FIFOLD_TYPE_AAD
| FIFOLD_TYPE_FLUSH1
);
805 set_jump_tgt_here(desc
, zero_assoc_jump_cmd1
);
807 append_math_add(desc
, VARSEQINLEN
, ZERO
, REG3
, CAAM_CMD_SZ
);
809 /* write encrypted data */
810 append_seq_fifo_store(desc
, 0, FIFOST_TYPE_MESSAGE_DATA
| FIFOLDST_VLF
);
812 /* read payload data */
813 append_seq_fifo_load(desc
, 0, FIFOLD_CLASS_CLASS1
| FIFOLDST_VLF
|
814 FIFOLD_TYPE_MSG
| FIFOLD_TYPE_LAST1
);
816 /* jump the zero-payload commands */
817 append_jump(desc
, JUMP_TEST_ALL
| 7);
819 /* zero-payload commands */
820 set_jump_tgt_here(desc
, zero_payload_jump_cmd
);
822 /* if assoclen is ZERO, jump to IV reading - is the only input data */
823 append_math_add(desc
, VARSEQINLEN
, ZERO
, REG1
, CAAM_CMD_SZ
);
824 zero_assoc_jump_cmd2
= append_jump(desc
, JUMP_TEST_ALL
|
827 append_seq_fifo_load(desc
, tfm
->ivsize
, FIFOLD_CLASS_CLASS1
|
828 FIFOLD_TYPE_IV
| FIFOLD_TYPE_FLUSH1
);
830 /* read assoc data */
831 append_seq_fifo_load(desc
, 0, FIFOLD_CLASS_CLASS1
| FIFOLDST_VLF
|
832 FIFOLD_TYPE_AAD
| FIFOLD_TYPE_LAST1
);
834 /* jump to ICV writing */
835 append_jump(desc
, JUMP_TEST_ALL
| 2);
837 /* read IV - is the only input data */
838 set_jump_tgt_here(desc
, zero_assoc_jump_cmd2
);
839 append_seq_fifo_load(desc
, tfm
->ivsize
, FIFOLD_CLASS_CLASS1
|
840 FIFOLD_TYPE_IV
| FIFOLD_TYPE_FLUSH1
|
844 append_seq_store(desc
, ctx
->authsize
, LDST_CLASS_1_CCB
|
845 LDST_SRCDST_BYTE_CONTEXT
);
847 ctx
->sh_desc_enc_dma
= dma_map_single(jrdev
, desc
,
850 if (dma_mapping_error(jrdev
, ctx
->sh_desc_enc_dma
)) {
851 dev_err(jrdev
, "unable to map shared descriptor\n");
855 print_hex_dump(KERN_ERR
, "gcm enc shdesc@"__stringify(__LINE__
)": ",
856 DUMP_PREFIX_ADDRESS
, 16, 4, desc
,
857 desc_bytes(desc
), 1);
861 * Job Descriptor and Shared Descriptors
862 * must all fit into the 64-word Descriptor h/w Buffer
864 keys_fit_inline
= false;
865 if (DESC_GCM_DEC_LEN
+ DESC_JOB_IO_LEN
+
866 ctx
->enckeylen
<= CAAM_DESC_BYTES_MAX
)
867 keys_fit_inline
= true;
869 desc
= ctx
->sh_desc_dec
;
871 init_sh_desc(desc
, HDR_SHARE_SERIAL
);
873 /* skip key loading if they are loaded due to sharing */
874 key_jump_cmd
= append_jump(desc
, JUMP_JSL
|
875 JUMP_TEST_ALL
| JUMP_COND_SHRD
|
878 append_key_as_imm(desc
, (void *)ctx
->key
, ctx
->enckeylen
,
879 ctx
->enckeylen
, CLASS_1
| KEY_DEST_CLASS_REG
);
881 append_key(desc
, ctx
->key_dma
, ctx
->enckeylen
,
882 CLASS_1
| KEY_DEST_CLASS_REG
);
883 set_jump_tgt_here(desc
, key_jump_cmd
);
885 /* class 1 operation */
886 append_operation(desc
, ctx
->class1_alg_type
|
887 OP_ALG_AS_INITFINAL
| OP_ALG_DECRYPT
| OP_ALG_ICV_ON
);
889 /* assoclen + cryptlen = seqinlen - ivsize - icvsize */
890 append_math_sub_imm_u32(desc
, REG3
, SEQINLEN
, IMM
,
891 ctx
->authsize
+ tfm
->ivsize
);
893 /* assoclen = (assoclen + cryptlen) - cryptlen */
894 append_math_sub(desc
, REG2
, SEQOUTLEN
, REG0
, CAAM_CMD_SZ
);
895 append_math_sub(desc
, REG1
, REG3
, REG2
, CAAM_CMD_SZ
);
898 append_seq_fifo_load(desc
, tfm
->ivsize
, FIFOLD_CLASS_CLASS1
|
899 FIFOLD_TYPE_IV
| FIFOLD_TYPE_FLUSH1
);
901 /* jump to zero-payload command if cryptlen is zero */
902 append_math_add(desc
, VARSEQOUTLEN
, ZERO
, REG2
, CAAM_CMD_SZ
);
903 zero_payload_jump_cmd
= append_jump(desc
, JUMP_TEST_ALL
|
906 append_math_add(desc
, VARSEQINLEN
, ZERO
, REG1
, CAAM_CMD_SZ
);
907 /* if asoclen is ZERO, skip reading assoc data */
908 zero_assoc_jump_cmd1
= append_jump(desc
, JUMP_TEST_ALL
|
910 /* read assoc data */
911 append_seq_fifo_load(desc
, 0, FIFOLD_CLASS_CLASS1
| FIFOLDST_VLF
|
912 FIFOLD_TYPE_AAD
| FIFOLD_TYPE_FLUSH1
);
913 set_jump_tgt_here(desc
, zero_assoc_jump_cmd1
);
915 append_math_add(desc
, VARSEQINLEN
, ZERO
, REG2
, CAAM_CMD_SZ
);
917 /* store encrypted data */
918 append_seq_fifo_store(desc
, 0, FIFOST_TYPE_MESSAGE_DATA
| FIFOLDST_VLF
);
920 /* read payload data */
921 append_seq_fifo_load(desc
, 0, FIFOLD_CLASS_CLASS1
| FIFOLDST_VLF
|
922 FIFOLD_TYPE_MSG
| FIFOLD_TYPE_FLUSH1
);
924 /* jump the zero-payload commands */
925 append_jump(desc
, JUMP_TEST_ALL
| 4);
927 /* zero-payload command */
928 set_jump_tgt_here(desc
, zero_payload_jump_cmd
);
930 /* if assoclen is ZERO, jump to ICV reading */
931 append_math_add(desc
, VARSEQINLEN
, ZERO
, REG1
, CAAM_CMD_SZ
);
932 zero_assoc_jump_cmd2
= append_jump(desc
, JUMP_TEST_ALL
|
934 /* read assoc data */
935 append_seq_fifo_load(desc
, 0, FIFOLD_CLASS_CLASS1
| FIFOLDST_VLF
|
936 FIFOLD_TYPE_AAD
| FIFOLD_TYPE_FLUSH1
);
937 set_jump_tgt_here(desc
, zero_assoc_jump_cmd2
);
940 append_seq_fifo_load(desc
, ctx
->authsize
, FIFOLD_CLASS_CLASS1
|
941 FIFOLD_TYPE_ICV
| FIFOLD_TYPE_LAST1
);
943 ctx
->sh_desc_dec_dma
= dma_map_single(jrdev
, desc
,
946 if (dma_mapping_error(jrdev
, ctx
->sh_desc_dec_dma
)) {
947 dev_err(jrdev
, "unable to map shared descriptor\n");
951 print_hex_dump(KERN_ERR
, "gcm dec shdesc@"__stringify(__LINE__
)": ",
952 DUMP_PREFIX_ADDRESS
, 16, 4, desc
,
953 desc_bytes(desc
), 1);
959 static int gcm_setauthsize(struct crypto_aead
*authenc
, unsigned int authsize
)
961 struct caam_ctx
*ctx
= crypto_aead_ctx(authenc
);
963 ctx
->authsize
= authsize
;
964 gcm_set_sh_desc(authenc
);
969 static int rfc4106_set_sh_desc(struct crypto_aead
*aead
)
971 struct aead_tfm
*tfm
= &aead
->base
.crt_aead
;
972 struct caam_ctx
*ctx
= crypto_aead_ctx(aead
);
973 struct device
*jrdev
= ctx
->jrdev
;
974 bool keys_fit_inline
= false;
975 u32
*key_jump_cmd
, *move_cmd
, *write_iv_cmd
;
979 if (!ctx
->enckeylen
|| !ctx
->authsize
)
983 * RFC4106 encrypt shared descriptor
984 * Job Descriptor and Shared Descriptor
985 * must fit into the 64-word Descriptor h/w Buffer
987 if (DESC_RFC4106_ENC_LEN
+ DESC_JOB_IO_LEN
+
988 ctx
->enckeylen
<= CAAM_DESC_BYTES_MAX
)
989 keys_fit_inline
= true;
991 desc
= ctx
->sh_desc_enc
;
993 init_sh_desc(desc
, HDR_SHARE_SERIAL
);
995 /* Skip key loading if it is loaded due to sharing */
996 key_jump_cmd
= append_jump(desc
, JUMP_JSL
| JUMP_TEST_ALL
|
999 append_key_as_imm(desc
, (void *)ctx
->key
, ctx
->enckeylen
,
1000 ctx
->enckeylen
, CLASS_1
| KEY_DEST_CLASS_REG
);
1002 append_key(desc
, ctx
->key_dma
, ctx
->enckeylen
,
1003 CLASS_1
| KEY_DEST_CLASS_REG
);
1004 set_jump_tgt_here(desc
, key_jump_cmd
);
1006 /* Class 1 operation */
1007 append_operation(desc
, ctx
->class1_alg_type
|
1008 OP_ALG_AS_INITFINAL
| OP_ALG_ENCRYPT
);
1010 /* cryptlen = seqoutlen - authsize */
1011 append_math_sub_imm_u32(desc
, REG3
, SEQOUTLEN
, IMM
, ctx
->authsize
);
1012 append_math_add(desc
, VARSEQOUTLEN
, ZERO
, REG3
, CAAM_CMD_SZ
);
1014 /* assoclen + cryptlen = seqinlen - ivsize */
1015 append_math_sub_imm_u32(desc
, REG2
, SEQINLEN
, IMM
, tfm
->ivsize
);
1017 /* assoclen = (assoclen + cryptlen) - cryptlen */
1018 append_math_sub(desc
, VARSEQINLEN
, REG2
, REG3
, CAAM_CMD_SZ
);
1021 append_fifo_load_as_imm(desc
, (void *)(ctx
->key
+ ctx
->enckeylen
),
1022 4, FIFOLD_CLASS_CLASS1
| FIFOLD_TYPE_IV
);
1023 /* Read AES-GCM-ESP IV */
1024 append_seq_fifo_load(desc
, tfm
->ivsize
, FIFOLD_CLASS_CLASS1
|
1025 FIFOLD_TYPE_IV
| FIFOLD_TYPE_FLUSH1
);
1027 /* Read assoc data */
1028 append_seq_fifo_load(desc
, 0, FIFOLD_CLASS_CLASS1
| FIFOLDST_VLF
|
1029 FIFOLD_TYPE_AAD
| FIFOLD_TYPE_FLUSH1
);
1031 /* Will read cryptlen bytes */
1032 append_math_add(desc
, VARSEQINLEN
, ZERO
, REG3
, CAAM_CMD_SZ
);
1034 /* Write encrypted data */
1035 append_seq_fifo_store(desc
, 0, FIFOST_TYPE_MESSAGE_DATA
| FIFOLDST_VLF
);
1037 /* Read payload data */
1038 append_seq_fifo_load(desc
, 0, FIFOLD_CLASS_CLASS1
| FIFOLDST_VLF
|
1039 FIFOLD_TYPE_MSG
| FIFOLD_TYPE_LAST1
);
1042 append_seq_store(desc
, ctx
->authsize
, LDST_CLASS_1_CCB
|
1043 LDST_SRCDST_BYTE_CONTEXT
);
1045 ctx
->sh_desc_enc_dma
= dma_map_single(jrdev
, desc
,
1048 if (dma_mapping_error(jrdev
, ctx
->sh_desc_enc_dma
)) {
1049 dev_err(jrdev
, "unable to map shared descriptor\n");
1053 print_hex_dump(KERN_ERR
, "rfc4106 enc shdesc@"__stringify(__LINE__
)": ",
1054 DUMP_PREFIX_ADDRESS
, 16, 4, desc
,
1055 desc_bytes(desc
), 1);
1059 * Job Descriptor and Shared Descriptors
1060 * must all fit into the 64-word Descriptor h/w Buffer
1062 keys_fit_inline
= false;
1063 if (DESC_RFC4106_DEC_LEN
+ DESC_JOB_IO_LEN
+
1064 ctx
->enckeylen
<= CAAM_DESC_BYTES_MAX
)
1065 keys_fit_inline
= true;
1067 desc
= ctx
->sh_desc_dec
;
1069 init_sh_desc(desc
, HDR_SHARE_SERIAL
);
1071 /* Skip key loading if it is loaded due to sharing */
1072 key_jump_cmd
= append_jump(desc
, JUMP_JSL
|
1073 JUMP_TEST_ALL
| JUMP_COND_SHRD
);
1074 if (keys_fit_inline
)
1075 append_key_as_imm(desc
, (void *)ctx
->key
, ctx
->enckeylen
,
1076 ctx
->enckeylen
, CLASS_1
| KEY_DEST_CLASS_REG
);
1078 append_key(desc
, ctx
->key_dma
, ctx
->enckeylen
,
1079 CLASS_1
| KEY_DEST_CLASS_REG
);
1080 set_jump_tgt_here(desc
, key_jump_cmd
);
1082 /* Class 1 operation */
1083 append_operation(desc
, ctx
->class1_alg_type
|
1084 OP_ALG_AS_INITFINAL
| OP_ALG_DECRYPT
| OP_ALG_ICV_ON
);
1086 /* assoclen + cryptlen = seqinlen - ivsize - icvsize */
1087 append_math_sub_imm_u32(desc
, REG3
, SEQINLEN
, IMM
,
1088 ctx
->authsize
+ tfm
->ivsize
);
1090 /* assoclen = (assoclen + cryptlen) - cryptlen */
1091 append_math_sub(desc
, REG2
, SEQOUTLEN
, REG0
, CAAM_CMD_SZ
);
1092 append_math_sub(desc
, VARSEQINLEN
, REG3
, REG2
, CAAM_CMD_SZ
);
1094 /* Will write cryptlen bytes */
1095 append_math_sub(desc
, VARSEQOUTLEN
, SEQOUTLEN
, REG0
, CAAM_CMD_SZ
);
1098 append_fifo_load_as_imm(desc
, (void *)(ctx
->key
+ ctx
->enckeylen
),
1099 4, FIFOLD_CLASS_CLASS1
| FIFOLD_TYPE_IV
);
1100 /* Read AES-GCM-ESP IV */
1101 append_seq_fifo_load(desc
, tfm
->ivsize
, FIFOLD_CLASS_CLASS1
|
1102 FIFOLD_TYPE_IV
| FIFOLD_TYPE_FLUSH1
);
1104 /* Read assoc data */
1105 append_seq_fifo_load(desc
, 0, FIFOLD_CLASS_CLASS1
| FIFOLDST_VLF
|
1106 FIFOLD_TYPE_AAD
| FIFOLD_TYPE_FLUSH1
);
1108 /* Will read cryptlen bytes */
1109 append_math_add(desc
, VARSEQINLEN
, ZERO
, REG2
, CAAM_CMD_SZ
);
1111 /* Store payload data */
1112 append_seq_fifo_store(desc
, 0, FIFOST_TYPE_MESSAGE_DATA
| FIFOLDST_VLF
);
1114 /* Read encrypted data */
1115 append_seq_fifo_load(desc
, 0, FIFOLD_CLASS_CLASS1
| FIFOLDST_VLF
|
1116 FIFOLD_TYPE_MSG
| FIFOLD_TYPE_FLUSH1
);
1119 append_seq_fifo_load(desc
, ctx
->authsize
, FIFOLD_CLASS_CLASS1
|
1120 FIFOLD_TYPE_ICV
| FIFOLD_TYPE_LAST1
);
1122 ctx
->sh_desc_dec_dma
= dma_map_single(jrdev
, desc
,
1125 if (dma_mapping_error(jrdev
, ctx
->sh_desc_dec_dma
)) {
1126 dev_err(jrdev
, "unable to map shared descriptor\n");
1130 print_hex_dump(KERN_ERR
, "rfc4106 dec shdesc@"__stringify(__LINE__
)": ",
1131 DUMP_PREFIX_ADDRESS
, 16, 4, desc
,
1132 desc_bytes(desc
), 1);
1136 * Job Descriptor and Shared Descriptors
1137 * must all fit into the 64-word Descriptor h/w Buffer
1139 keys_fit_inline
= false;
1140 if (DESC_RFC4106_GIVENC_LEN
+ DESC_JOB_IO_LEN
+
1141 ctx
->split_key_pad_len
+ ctx
->enckeylen
<=
1142 CAAM_DESC_BYTES_MAX
)
1143 keys_fit_inline
= true;
1145 /* rfc4106_givencrypt shared descriptor */
1146 desc
= ctx
->sh_desc_givenc
;
1148 init_sh_desc(desc
, HDR_SHARE_SERIAL
);
1150 /* Skip key loading if it is loaded due to sharing */
1151 key_jump_cmd
= append_jump(desc
, JUMP_JSL
| JUMP_TEST_ALL
|
1153 if (keys_fit_inline
)
1154 append_key_as_imm(desc
, (void *)ctx
->key
, ctx
->enckeylen
,
1155 ctx
->enckeylen
, CLASS_1
| KEY_DEST_CLASS_REG
);
1157 append_key(desc
, ctx
->key_dma
, ctx
->enckeylen
,
1158 CLASS_1
| KEY_DEST_CLASS_REG
);
1159 set_jump_tgt_here(desc
, key_jump_cmd
);
1162 geniv
= NFIFOENTRY_STYPE_PAD
| NFIFOENTRY_DEST_DECO
|
1163 NFIFOENTRY_DTYPE_MSG
| NFIFOENTRY_LC1
|
1164 NFIFOENTRY_PTYPE_RND
| (tfm
->ivsize
<< NFIFOENTRY_DLEN_SHIFT
);
1165 append_load_imm_u32(desc
, geniv
, LDST_CLASS_IND_CCB
|
1166 LDST_SRCDST_WORD_INFO_FIFO
| LDST_IMM
);
1167 append_cmd(desc
, CMD_LOAD
| DISABLE_AUTO_INFO_FIFO
);
1168 move_cmd
= append_move(desc
, MOVE_SRC_INFIFO
| MOVE_DEST_DESCBUF
|
1169 (tfm
->ivsize
<< MOVE_LEN_SHIFT
));
1170 append_cmd(desc
, CMD_LOAD
| ENABLE_AUTO_INFO_FIFO
);
1172 /* Copy generated IV to OFIFO */
1173 write_iv_cmd
= append_move(desc
, MOVE_SRC_DESCBUF
| MOVE_DEST_OUTFIFO
|
1174 (tfm
->ivsize
<< MOVE_LEN_SHIFT
));
1176 /* Class 1 operation */
1177 append_operation(desc
, ctx
->class1_alg_type
|
1178 OP_ALG_AS_INITFINAL
| OP_ALG_ENCRYPT
);
1180 /* ivsize + cryptlen = seqoutlen - authsize */
1181 append_math_sub_imm_u32(desc
, REG3
, SEQOUTLEN
, IMM
, ctx
->authsize
);
1183 /* assoclen = seqinlen - (ivsize + cryptlen) */
1184 append_math_sub(desc
, VARSEQINLEN
, SEQINLEN
, REG3
, CAAM_CMD_SZ
);
1186 /* Will write ivsize + cryptlen */
1187 append_math_add(desc
, VARSEQOUTLEN
, REG3
, REG0
, CAAM_CMD_SZ
);
1189 /* Read Salt and generated IV */
1190 append_cmd(desc
, CMD_FIFO_LOAD
| FIFOLD_CLASS_CLASS1
| FIFOLD_TYPE_IV
|
1191 FIFOLD_TYPE_FLUSH1
| IMMEDIATE
| 12);
1193 append_data(desc
, (void *)(ctx
->key
+ ctx
->enckeylen
), 4);
1194 set_move_tgt_here(desc
, move_cmd
);
1195 set_move_tgt_here(desc
, write_iv_cmd
);
1196 /* Blank commands. Will be overwritten by generated IV. */
1197 append_cmd(desc
, 0x00000000);
1198 append_cmd(desc
, 0x00000000);
1199 /* End of blank commands */
1201 /* No need to reload iv */
1202 append_seq_fifo_load(desc
, tfm
->ivsize
, FIFOLD_CLASS_SKIP
);
1204 /* Read assoc data */
1205 append_seq_fifo_load(desc
, 0, FIFOLD_CLASS_CLASS1
| FIFOLDST_VLF
|
1206 FIFOLD_TYPE_AAD
| FIFOLD_TYPE_FLUSH1
);
1208 /* Will read cryptlen */
1209 append_math_add(desc
, VARSEQINLEN
, SEQINLEN
, REG0
, CAAM_CMD_SZ
);
1211 /* Store generated IV and encrypted data */
1212 append_seq_fifo_store(desc
, 0, FIFOST_TYPE_MESSAGE_DATA
| FIFOLDST_VLF
);
1214 /* Read payload data */
1215 append_seq_fifo_load(desc
, 0, FIFOLD_CLASS_CLASS1
| FIFOLDST_VLF
|
1216 FIFOLD_TYPE_MSG
| FIFOLD_TYPE_LAST1
);
1219 append_seq_store(desc
, ctx
->authsize
, LDST_CLASS_1_CCB
|
1220 LDST_SRCDST_BYTE_CONTEXT
);
1222 ctx
->sh_desc_givenc_dma
= dma_map_single(jrdev
, desc
,
1225 if (dma_mapping_error(jrdev
, ctx
->sh_desc_givenc_dma
)) {
1226 dev_err(jrdev
, "unable to map shared descriptor\n");
1230 print_hex_dump(KERN_ERR
,
1231 "rfc4106 givenc shdesc@"__stringify(__LINE__
)": ",
1232 DUMP_PREFIX_ADDRESS
, 16, 4, desc
,
1233 desc_bytes(desc
), 1);
1239 static int rfc4106_setauthsize(struct crypto_aead
*authenc
,
1240 unsigned int authsize
)
1242 struct caam_ctx
*ctx
= crypto_aead_ctx(authenc
);
1244 ctx
->authsize
= authsize
;
1245 rfc4106_set_sh_desc(authenc
);
1250 static int rfc4543_set_sh_desc(struct crypto_aead
*aead
)
1252 struct aead_tfm
*tfm
= &aead
->base
.crt_aead
;
1253 struct caam_ctx
*ctx
= crypto_aead_ctx(aead
);
1254 struct device
*jrdev
= ctx
->jrdev
;
1255 bool keys_fit_inline
= false;
1256 u32
*key_jump_cmd
, *write_iv_cmd
, *write_aad_cmd
;
1257 u32
*read_move_cmd
, *write_move_cmd
;
1261 if (!ctx
->enckeylen
|| !ctx
->authsize
)
1265 * RFC4543 encrypt shared descriptor
1266 * Job Descriptor and Shared Descriptor
1267 * must fit into the 64-word Descriptor h/w Buffer
1269 if (DESC_RFC4543_ENC_LEN
+ DESC_JOB_IO_LEN
+
1270 ctx
->enckeylen
<= CAAM_DESC_BYTES_MAX
)
1271 keys_fit_inline
= true;
1273 desc
= ctx
->sh_desc_enc
;
1275 init_sh_desc(desc
, HDR_SHARE_SERIAL
);
1277 /* Skip key loading if it is loaded due to sharing */
1278 key_jump_cmd
= append_jump(desc
, JUMP_JSL
| JUMP_TEST_ALL
|
1280 if (keys_fit_inline
)
1281 append_key_as_imm(desc
, (void *)ctx
->key
, ctx
->enckeylen
,
1282 ctx
->enckeylen
, CLASS_1
| KEY_DEST_CLASS_REG
);
1284 append_key(desc
, ctx
->key_dma
, ctx
->enckeylen
,
1285 CLASS_1
| KEY_DEST_CLASS_REG
);
1286 set_jump_tgt_here(desc
, key_jump_cmd
);
1288 /* Class 1 operation */
1289 append_operation(desc
, ctx
->class1_alg_type
|
1290 OP_ALG_AS_INITFINAL
| OP_ALG_ENCRYPT
);
1292 /* Load AES-GMAC ESP IV into Math1 register */
1293 append_cmd(desc
, CMD_SEQ_LOAD
| LDST_SRCDST_WORD_DECO_MATH1
|
1294 LDST_CLASS_DECO
| tfm
->ivsize
);
1296 /* Wait the DMA transaction to finish */
1297 append_jump(desc
, JUMP_TEST_ALL
| JUMP_COND_CALM
|
1298 (1 << JUMP_OFFSET_SHIFT
));
1300 /* Overwrite blank immediate AES-GMAC ESP IV data */
1301 write_iv_cmd
= append_move(desc
, MOVE_SRC_MATH1
| MOVE_DEST_DESCBUF
|
1302 (tfm
->ivsize
<< MOVE_LEN_SHIFT
));
1304 /* Overwrite blank immediate AAD data */
1305 write_aad_cmd
= append_move(desc
, MOVE_SRC_MATH1
| MOVE_DEST_DESCBUF
|
1306 (tfm
->ivsize
<< MOVE_LEN_SHIFT
));
1308 /* cryptlen = seqoutlen - authsize */
1309 append_math_sub_imm_u32(desc
, REG3
, SEQOUTLEN
, IMM
, ctx
->authsize
);
1311 /* assoclen = (seqinlen - ivsize) - cryptlen */
1312 append_math_sub(desc
, VARSEQINLEN
, SEQINLEN
, REG3
, CAAM_CMD_SZ
);
1314 /* Read Salt and AES-GMAC ESP IV */
1315 append_cmd(desc
, CMD_FIFO_LOAD
| FIFOLD_CLASS_CLASS1
| IMMEDIATE
|
1316 FIFOLD_TYPE_IV
| FIFOLD_TYPE_FLUSH1
| (4 + tfm
->ivsize
));
1318 append_data(desc
, (void *)(ctx
->key
+ ctx
->enckeylen
), 4);
1319 set_move_tgt_here(desc
, write_iv_cmd
);
1320 /* Blank commands. Will be overwritten by AES-GMAC ESP IV. */
1321 append_cmd(desc
, 0x00000000);
1322 append_cmd(desc
, 0x00000000);
1323 /* End of blank commands */
1325 /* Read assoc data */
1326 append_seq_fifo_load(desc
, 0, FIFOLD_CLASS_CLASS1
| FIFOLDST_VLF
|
1329 /* Will read cryptlen bytes */
1330 append_math_add(desc
, VARSEQINLEN
, ZERO
, REG3
, CAAM_CMD_SZ
);
1332 /* Will write cryptlen bytes */
1333 append_math_add(desc
, VARSEQOUTLEN
, ZERO
, REG3
, CAAM_CMD_SZ
);
1336 * MOVE_LEN opcode is not available in all SEC HW revisions,
1337 * thus need to do some magic, i.e. self-patch the descriptor
1340 read_move_cmd
= append_move(desc
, MOVE_SRC_DESCBUF
| MOVE_DEST_MATH3
|
1341 (0x6 << MOVE_LEN_SHIFT
));
1342 write_move_cmd
= append_move(desc
, MOVE_SRC_MATH3
| MOVE_DEST_DESCBUF
|
1343 (0x8 << MOVE_LEN_SHIFT
));
1345 /* Authenticate AES-GMAC ESP IV */
1346 append_cmd(desc
, CMD_FIFO_LOAD
| FIFOLD_CLASS_CLASS1
| IMMEDIATE
|
1347 FIFOLD_TYPE_AAD
| tfm
->ivsize
);
1348 set_move_tgt_here(desc
, write_aad_cmd
);
1349 /* Blank commands. Will be overwritten by AES-GMAC ESP IV. */
1350 append_cmd(desc
, 0x00000000);
1351 append_cmd(desc
, 0x00000000);
1352 /* End of blank commands */
1354 /* Read and write cryptlen bytes */
1355 aead_append_src_dst(desc
, FIFOLD_TYPE_AAD
);
1357 set_move_tgt_here(desc
, read_move_cmd
);
1358 set_move_tgt_here(desc
, write_move_cmd
);
1359 append_cmd(desc
, CMD_LOAD
| DISABLE_AUTO_INFO_FIFO
);
1360 /* Move payload data to OFIFO */
1361 append_move(desc
, MOVE_SRC_INFIFO_CL
| MOVE_DEST_OUTFIFO
);
1364 append_seq_store(desc
, ctx
->authsize
, LDST_CLASS_1_CCB
|
1365 LDST_SRCDST_BYTE_CONTEXT
);
1367 ctx
->sh_desc_enc_dma
= dma_map_single(jrdev
, desc
,
1370 if (dma_mapping_error(jrdev
, ctx
->sh_desc_enc_dma
)) {
1371 dev_err(jrdev
, "unable to map shared descriptor\n");
1375 print_hex_dump(KERN_ERR
, "rfc4543 enc shdesc@"__stringify(__LINE__
)": ",
1376 DUMP_PREFIX_ADDRESS
, 16, 4, desc
,
1377 desc_bytes(desc
), 1);
1381 * Job Descriptor and Shared Descriptors
1382 * must all fit into the 64-word Descriptor h/w Buffer
1384 keys_fit_inline
= false;
1385 if (DESC_RFC4543_DEC_LEN
+ DESC_JOB_IO_LEN
+
1386 ctx
->enckeylen
<= CAAM_DESC_BYTES_MAX
)
1387 keys_fit_inline
= true;
1389 desc
= ctx
->sh_desc_dec
;
1391 init_sh_desc(desc
, HDR_SHARE_SERIAL
);
1393 /* Skip key loading if it is loaded due to sharing */
1394 key_jump_cmd
= append_jump(desc
, JUMP_JSL
|
1395 JUMP_TEST_ALL
| JUMP_COND_SHRD
);
1396 if (keys_fit_inline
)
1397 append_key_as_imm(desc
, (void *)ctx
->key
, ctx
->enckeylen
,
1398 ctx
->enckeylen
, CLASS_1
| KEY_DEST_CLASS_REG
);
1400 append_key(desc
, ctx
->key_dma
, ctx
->enckeylen
,
1401 CLASS_1
| KEY_DEST_CLASS_REG
);
1402 set_jump_tgt_here(desc
, key_jump_cmd
);
1404 /* Class 1 operation */
1405 append_operation(desc
, ctx
->class1_alg_type
|
1406 OP_ALG_AS_INITFINAL
| OP_ALG_DECRYPT
| OP_ALG_ICV_ON
);
1408 /* Load AES-GMAC ESP IV into Math1 register */
1409 append_cmd(desc
, CMD_SEQ_LOAD
| LDST_SRCDST_WORD_DECO_MATH1
|
1410 LDST_CLASS_DECO
| tfm
->ivsize
);
1412 /* Wait the DMA transaction to finish */
1413 append_jump(desc
, JUMP_TEST_ALL
| JUMP_COND_CALM
|
1414 (1 << JUMP_OFFSET_SHIFT
));
1416 /* assoclen + cryptlen = (seqinlen - ivsize) - icvsize */
1417 append_math_sub_imm_u32(desc
, REG3
, SEQINLEN
, IMM
, ctx
->authsize
);
1419 /* Overwrite blank immediate AES-GMAC ESP IV data */
1420 write_iv_cmd
= append_move(desc
, MOVE_SRC_MATH1
| MOVE_DEST_DESCBUF
|
1421 (tfm
->ivsize
<< MOVE_LEN_SHIFT
));
1423 /* Overwrite blank immediate AAD data */
1424 write_aad_cmd
= append_move(desc
, MOVE_SRC_MATH1
| MOVE_DEST_DESCBUF
|
1425 (tfm
->ivsize
<< MOVE_LEN_SHIFT
));
1427 /* assoclen = (assoclen + cryptlen) - cryptlen */
1428 append_math_sub(desc
, REG2
, SEQOUTLEN
, REG0
, CAAM_CMD_SZ
);
1429 append_math_sub(desc
, VARSEQINLEN
, REG3
, REG2
, CAAM_CMD_SZ
);
1432 * MOVE_LEN opcode is not available in all SEC HW revisions,
1433 * thus need to do some magic, i.e. self-patch the descriptor
1436 read_move_cmd
= append_move(desc
, MOVE_SRC_DESCBUF
| MOVE_DEST_MATH3
|
1437 (0x6 << MOVE_LEN_SHIFT
));
1438 write_move_cmd
= append_move(desc
, MOVE_SRC_MATH3
| MOVE_DEST_DESCBUF
|
1439 (0x8 << MOVE_LEN_SHIFT
));
1441 /* Read Salt and AES-GMAC ESP IV */
1442 append_cmd(desc
, CMD_FIFO_LOAD
| FIFOLD_CLASS_CLASS1
| IMMEDIATE
|
1443 FIFOLD_TYPE_IV
| FIFOLD_TYPE_FLUSH1
| (4 + tfm
->ivsize
));
1445 append_data(desc
, (void *)(ctx
->key
+ ctx
->enckeylen
), 4);
1446 set_move_tgt_here(desc
, write_iv_cmd
);
1447 /* Blank commands. Will be overwritten by AES-GMAC ESP IV. */
1448 append_cmd(desc
, 0x00000000);
1449 append_cmd(desc
, 0x00000000);
1450 /* End of blank commands */
1452 /* Read assoc data */
1453 append_seq_fifo_load(desc
, 0, FIFOLD_CLASS_CLASS1
| FIFOLDST_VLF
|
1456 /* Will read cryptlen bytes */
1457 append_math_add(desc
, VARSEQINLEN
, ZERO
, REG2
, CAAM_CMD_SZ
);
1459 /* Will write cryptlen bytes */
1460 append_math_add(desc
, VARSEQOUTLEN
, ZERO
, REG2
, CAAM_CMD_SZ
);
1462 /* Authenticate AES-GMAC ESP IV */
1463 append_cmd(desc
, CMD_FIFO_LOAD
| FIFOLD_CLASS_CLASS1
| IMMEDIATE
|
1464 FIFOLD_TYPE_AAD
| tfm
->ivsize
);
1465 set_move_tgt_here(desc
, write_aad_cmd
);
1466 /* Blank commands. Will be overwritten by AES-GMAC ESP IV. */
1467 append_cmd(desc
, 0x00000000);
1468 append_cmd(desc
, 0x00000000);
1469 /* End of blank commands */
1471 /* Store payload data */
1472 append_seq_fifo_store(desc
, 0, FIFOST_TYPE_MESSAGE_DATA
| FIFOLDST_VLF
);
1474 /* In-snoop cryptlen data */
1475 append_seq_fifo_load(desc
, 0, FIFOLD_CLASS_BOTH
| FIFOLDST_VLF
|
1476 FIFOLD_TYPE_AAD
| FIFOLD_TYPE_LAST2FLUSH1
);
1478 set_move_tgt_here(desc
, read_move_cmd
);
1479 set_move_tgt_here(desc
, write_move_cmd
);
1480 append_cmd(desc
, CMD_LOAD
| DISABLE_AUTO_INFO_FIFO
);
1481 /* Move payload data to OFIFO */
1482 append_move(desc
, MOVE_SRC_INFIFO_CL
| MOVE_DEST_OUTFIFO
);
1483 append_cmd(desc
, CMD_LOAD
| ENABLE_AUTO_INFO_FIFO
);
1486 append_seq_fifo_load(desc
, ctx
->authsize
, FIFOLD_CLASS_CLASS1
|
1487 FIFOLD_TYPE_ICV
| FIFOLD_TYPE_LAST1
);
1489 ctx
->sh_desc_dec_dma
= dma_map_single(jrdev
, desc
,
1492 if (dma_mapping_error(jrdev
, ctx
->sh_desc_dec_dma
)) {
1493 dev_err(jrdev
, "unable to map shared descriptor\n");
1497 print_hex_dump(KERN_ERR
, "rfc4543 dec shdesc@"__stringify(__LINE__
)": ",
1498 DUMP_PREFIX_ADDRESS
, 16, 4, desc
,
1499 desc_bytes(desc
), 1);
1503 * Job Descriptor and Shared Descriptors
1504 * must all fit into the 64-word Descriptor h/w Buffer
1506 keys_fit_inline
= false;
1507 if (DESC_RFC4543_GIVENC_LEN
+ DESC_JOB_IO_LEN
+
1508 ctx
->enckeylen
<= CAAM_DESC_BYTES_MAX
)
1509 keys_fit_inline
= true;
1511 /* rfc4543_givencrypt shared descriptor */
1512 desc
= ctx
->sh_desc_givenc
;
1514 init_sh_desc(desc
, HDR_SHARE_SERIAL
);
1516 /* Skip key loading if it is loaded due to sharing */
1517 key_jump_cmd
= append_jump(desc
, JUMP_JSL
| JUMP_TEST_ALL
|
1519 if (keys_fit_inline
)
1520 append_key_as_imm(desc
, (void *)ctx
->key
, ctx
->enckeylen
,
1521 ctx
->enckeylen
, CLASS_1
| KEY_DEST_CLASS_REG
);
1523 append_key(desc
, ctx
->key_dma
, ctx
->enckeylen
,
1524 CLASS_1
| KEY_DEST_CLASS_REG
);
1525 set_jump_tgt_here(desc
, key_jump_cmd
);
1528 geniv
= NFIFOENTRY_STYPE_PAD
| NFIFOENTRY_DEST_DECO
|
1529 NFIFOENTRY_DTYPE_MSG
| NFIFOENTRY_LC1
|
1530 NFIFOENTRY_PTYPE_RND
| (tfm
->ivsize
<< NFIFOENTRY_DLEN_SHIFT
);
1531 append_load_imm_u32(desc
, geniv
, LDST_CLASS_IND_CCB
|
1532 LDST_SRCDST_WORD_INFO_FIFO
| LDST_IMM
);
1533 append_cmd(desc
, CMD_LOAD
| DISABLE_AUTO_INFO_FIFO
);
1534 /* Move generated IV to Math1 register */
1535 append_move(desc
, MOVE_SRC_INFIFO
| MOVE_DEST_MATH1
|
1536 (tfm
->ivsize
<< MOVE_LEN_SHIFT
));
1537 append_cmd(desc
, CMD_LOAD
| ENABLE_AUTO_INFO_FIFO
);
1539 /* Overwrite blank immediate AES-GMAC IV data */
1540 write_iv_cmd
= append_move(desc
, MOVE_SRC_MATH1
| MOVE_DEST_DESCBUF
|
1541 (tfm
->ivsize
<< MOVE_LEN_SHIFT
));
1543 /* Overwrite blank immediate AAD data */
1544 write_aad_cmd
= append_move(desc
, MOVE_SRC_MATH1
| MOVE_DEST_DESCBUF
|
1545 (tfm
->ivsize
<< MOVE_LEN_SHIFT
));
1547 /* Copy generated IV to OFIFO */
1548 append_move(desc
, MOVE_SRC_MATH1
| MOVE_DEST_OUTFIFO
|
1549 (tfm
->ivsize
<< MOVE_LEN_SHIFT
));
1551 /* Class 1 operation */
1552 append_operation(desc
, ctx
->class1_alg_type
|
1553 OP_ALG_AS_INITFINAL
| OP_ALG_ENCRYPT
);
1555 /* ivsize + cryptlen = seqoutlen - authsize */
1556 append_math_sub_imm_u32(desc
, REG3
, SEQOUTLEN
, IMM
, ctx
->authsize
);
1558 /* assoclen = seqinlen - (ivsize + cryptlen) */
1559 append_math_sub(desc
, VARSEQINLEN
, SEQINLEN
, REG3
, CAAM_CMD_SZ
);
1561 /* Will write ivsize + cryptlen */
1562 append_math_add(desc
, VARSEQOUTLEN
, REG3
, REG0
, CAAM_CMD_SZ
);
1565 * MOVE_LEN opcode is not available in all SEC HW revisions,
1566 * thus need to do some magic, i.e. self-patch the descriptor
1569 read_move_cmd
= append_move(desc
, MOVE_SRC_DESCBUF
| MOVE_DEST_MATH3
|
1570 (0x6 << MOVE_LEN_SHIFT
));
1571 write_move_cmd
= append_move(desc
, MOVE_SRC_MATH3
| MOVE_DEST_DESCBUF
|
1572 (0x8 << MOVE_LEN_SHIFT
));
1574 /* Read Salt and AES-GMAC generated IV */
1575 append_cmd(desc
, CMD_FIFO_LOAD
| FIFOLD_CLASS_CLASS1
| IMMEDIATE
|
1576 FIFOLD_TYPE_IV
| FIFOLD_TYPE_FLUSH1
| (4 + tfm
->ivsize
));
1578 append_data(desc
, (void *)(ctx
->key
+ ctx
->enckeylen
), 4);
1579 set_move_tgt_here(desc
, write_iv_cmd
);
1580 /* Blank commands. Will be overwritten by AES-GMAC generated IV. */
1581 append_cmd(desc
, 0x00000000);
1582 append_cmd(desc
, 0x00000000);
1583 /* End of blank commands */
1585 /* No need to reload iv */
1586 append_seq_fifo_load(desc
, tfm
->ivsize
, FIFOLD_CLASS_SKIP
);
1588 /* Read assoc data */
1589 append_seq_fifo_load(desc
, 0, FIFOLD_CLASS_CLASS1
| FIFOLDST_VLF
|
1592 /* Will read cryptlen */
1593 append_math_add(desc
, VARSEQINLEN
, SEQINLEN
, REG0
, CAAM_CMD_SZ
);
1595 /* Authenticate AES-GMAC IV */
1596 append_cmd(desc
, CMD_FIFO_LOAD
| FIFOLD_CLASS_CLASS1
| IMMEDIATE
|
1597 FIFOLD_TYPE_AAD
| tfm
->ivsize
);
1598 set_move_tgt_here(desc
, write_aad_cmd
);
1599 /* Blank commands. Will be overwritten by AES-GMAC IV. */
1600 append_cmd(desc
, 0x00000000);
1601 append_cmd(desc
, 0x00000000);
1602 /* End of blank commands */
1604 /* Read and write cryptlen bytes */
1605 aead_append_src_dst(desc
, FIFOLD_TYPE_AAD
);
1607 set_move_tgt_here(desc
, read_move_cmd
);
1608 set_move_tgt_here(desc
, write_move_cmd
);
1609 append_cmd(desc
, CMD_LOAD
| DISABLE_AUTO_INFO_FIFO
);
1610 /* Move payload data to OFIFO */
1611 append_move(desc
, MOVE_SRC_INFIFO_CL
| MOVE_DEST_OUTFIFO
);
1614 append_seq_store(desc
, ctx
->authsize
, LDST_CLASS_1_CCB
|
1615 LDST_SRCDST_BYTE_CONTEXT
);
1617 ctx
->sh_desc_givenc_dma
= dma_map_single(jrdev
, desc
,
1620 if (dma_mapping_error(jrdev
, ctx
->sh_desc_givenc_dma
)) {
1621 dev_err(jrdev
, "unable to map shared descriptor\n");
1625 print_hex_dump(KERN_ERR
,
1626 "rfc4543 givenc shdesc@"__stringify(__LINE__
)": ",
1627 DUMP_PREFIX_ADDRESS
, 16, 4, desc
,
1628 desc_bytes(desc
), 1);
1634 static int rfc4543_setauthsize(struct crypto_aead
*authenc
,
1635 unsigned int authsize
)
1637 struct caam_ctx
*ctx
= crypto_aead_ctx(authenc
);
1639 ctx
->authsize
= authsize
;
1640 rfc4543_set_sh_desc(authenc
);
1645 static u32
gen_split_aead_key(struct caam_ctx
*ctx
, const u8
*key_in
,
1648 return gen_split_key(ctx
->jrdev
, ctx
->key
, ctx
->split_key_len
,
1649 ctx
->split_key_pad_len
, key_in
, authkeylen
,
1653 static int aead_setkey(struct crypto_aead
*aead
,
1654 const u8
*key
, unsigned int keylen
)
1656 /* Sizes for MDHA pads (*not* keys): MD5, SHA1, 224, 256, 384, 512 */
1657 static const u8 mdpadlen
[] = { 16, 20, 32, 32, 64, 64 };
1658 struct caam_ctx
*ctx
= crypto_aead_ctx(aead
);
1659 struct device
*jrdev
= ctx
->jrdev
;
1660 struct crypto_authenc_keys keys
;
1663 if (crypto_authenc_extractkeys(&keys
, key
, keylen
) != 0)
1666 /* Pick class 2 key length from algorithm submask */
1667 ctx
->split_key_len
= mdpadlen
[(ctx
->alg_op
& OP_ALG_ALGSEL_SUBMASK
) >>
1668 OP_ALG_ALGSEL_SHIFT
] * 2;
1669 ctx
->split_key_pad_len
= ALIGN(ctx
->split_key_len
, 16);
1671 if (ctx
->split_key_pad_len
+ keys
.enckeylen
> CAAM_MAX_KEY_SIZE
)
1675 printk(KERN_ERR
"keylen %d enckeylen %d authkeylen %d\n",
1676 keys
.authkeylen
+ keys
.enckeylen
, keys
.enckeylen
,
1678 printk(KERN_ERR
"split_key_len %d split_key_pad_len %d\n",
1679 ctx
->split_key_len
, ctx
->split_key_pad_len
);
1680 print_hex_dump(KERN_ERR
, "key in @"__stringify(__LINE__
)": ",
1681 DUMP_PREFIX_ADDRESS
, 16, 4, key
, keylen
, 1);
1684 ret
= gen_split_aead_key(ctx
, keys
.authkey
, keys
.authkeylen
);
1689 /* postpend encryption key to auth split key */
1690 memcpy(ctx
->key
+ ctx
->split_key_pad_len
, keys
.enckey
, keys
.enckeylen
);
1692 ctx
->key_dma
= dma_map_single(jrdev
, ctx
->key
, ctx
->split_key_pad_len
+
1693 keys
.enckeylen
, DMA_TO_DEVICE
);
1694 if (dma_mapping_error(jrdev
, ctx
->key_dma
)) {
1695 dev_err(jrdev
, "unable to map key i/o memory\n");
1699 print_hex_dump(KERN_ERR
, "ctx.key@"__stringify(__LINE__
)": ",
1700 DUMP_PREFIX_ADDRESS
, 16, 4, ctx
->key
,
1701 ctx
->split_key_pad_len
+ keys
.enckeylen
, 1);
1704 ctx
->enckeylen
= keys
.enckeylen
;
1706 ret
= aead_set_sh_desc(aead
);
1708 dma_unmap_single(jrdev
, ctx
->key_dma
, ctx
->split_key_pad_len
+
1709 keys
.enckeylen
, DMA_TO_DEVICE
);
1714 crypto_aead_set_flags(aead
, CRYPTO_TFM_RES_BAD_KEY_LEN
);
1718 static int gcm_setkey(struct crypto_aead
*aead
,
1719 const u8
*key
, unsigned int keylen
)
1721 struct caam_ctx
*ctx
= crypto_aead_ctx(aead
);
1722 struct device
*jrdev
= ctx
->jrdev
;
1726 print_hex_dump(KERN_ERR
, "key in @"__stringify(__LINE__
)": ",
1727 DUMP_PREFIX_ADDRESS
, 16, 4, key
, keylen
, 1);
1730 memcpy(ctx
->key
, key
, keylen
);
1731 ctx
->key_dma
= dma_map_single(jrdev
, ctx
->key
, keylen
,
1733 if (dma_mapping_error(jrdev
, ctx
->key_dma
)) {
1734 dev_err(jrdev
, "unable to map key i/o memory\n");
1737 ctx
->enckeylen
= keylen
;
1739 ret
= gcm_set_sh_desc(aead
);
1741 dma_unmap_single(jrdev
, ctx
->key_dma
, ctx
->enckeylen
,
1748 static int rfc4106_setkey(struct crypto_aead
*aead
,
1749 const u8
*key
, unsigned int keylen
)
1751 struct caam_ctx
*ctx
= crypto_aead_ctx(aead
);
1752 struct device
*jrdev
= ctx
->jrdev
;
1759 print_hex_dump(KERN_ERR
, "key in @"__stringify(__LINE__
)": ",
1760 DUMP_PREFIX_ADDRESS
, 16, 4, key
, keylen
, 1);
1763 memcpy(ctx
->key
, key
, keylen
);
1766 * The last four bytes of the key material are used as the salt value
1767 * in the nonce. Update the AES key length.
1769 ctx
->enckeylen
= keylen
- 4;
1771 ctx
->key_dma
= dma_map_single(jrdev
, ctx
->key
, ctx
->enckeylen
,
1773 if (dma_mapping_error(jrdev
, ctx
->key_dma
)) {
1774 dev_err(jrdev
, "unable to map key i/o memory\n");
1778 ret
= rfc4106_set_sh_desc(aead
);
1780 dma_unmap_single(jrdev
, ctx
->key_dma
, ctx
->enckeylen
,
1787 static int rfc4543_setkey(struct crypto_aead
*aead
,
1788 const u8
*key
, unsigned int keylen
)
1790 struct caam_ctx
*ctx
= crypto_aead_ctx(aead
);
1791 struct device
*jrdev
= ctx
->jrdev
;
1798 print_hex_dump(KERN_ERR
, "key in @"__stringify(__LINE__
)": ",
1799 DUMP_PREFIX_ADDRESS
, 16, 4, key
, keylen
, 1);
1802 memcpy(ctx
->key
, key
, keylen
);
1805 * The last four bytes of the key material are used as the salt value
1806 * in the nonce. Update the AES key length.
1808 ctx
->enckeylen
= keylen
- 4;
1810 ctx
->key_dma
= dma_map_single(jrdev
, ctx
->key
, ctx
->enckeylen
,
1812 if (dma_mapping_error(jrdev
, ctx
->key_dma
)) {
1813 dev_err(jrdev
, "unable to map key i/o memory\n");
1817 ret
= rfc4543_set_sh_desc(aead
);
1819 dma_unmap_single(jrdev
, ctx
->key_dma
, ctx
->enckeylen
,
1826 static int ablkcipher_setkey(struct crypto_ablkcipher
*ablkcipher
,
1827 const u8
*key
, unsigned int keylen
)
1829 struct caam_ctx
*ctx
= crypto_ablkcipher_ctx(ablkcipher
);
1830 struct ablkcipher_tfm
*crt
= &ablkcipher
->base
.crt_ablkcipher
;
1831 struct crypto_tfm
*tfm
= crypto_ablkcipher_tfm(ablkcipher
);
1832 const char *alg_name
= crypto_tfm_alg_name(tfm
);
1833 struct device
*jrdev
= ctx
->jrdev
;
1839 u32 ctx1_iv_off
= 0;
1840 const bool ctr_mode
= ((ctx
->class1_alg_type
& OP_ALG_AAI_MASK
) ==
1841 OP_ALG_AAI_CTR_MOD128
);
1842 const bool is_rfc3686
= (ctr_mode
&&
1843 (strstr(alg_name
, "rfc3686") != NULL
));
1846 print_hex_dump(KERN_ERR
, "key in @"__stringify(__LINE__
)": ",
1847 DUMP_PREFIX_ADDRESS
, 16, 4, key
, keylen
, 1);
1850 * AES-CTR needs to load IV in CONTEXT1 reg
1851 * at an offset of 128bits (16bytes)
1852 * CONTEXT1[255:128] = IV
1859 * | CONTEXT1[255:128] = {NONCE, IV, COUNTER}
1860 * | *key = {KEY, NONCE}
1863 ctx1_iv_off
= 16 + CTR_RFC3686_NONCE_SIZE
;
1864 keylen
-= CTR_RFC3686_NONCE_SIZE
;
1867 memcpy(ctx
->key
, key
, keylen
);
1868 ctx
->key_dma
= dma_map_single(jrdev
, ctx
->key
, keylen
,
1870 if (dma_mapping_error(jrdev
, ctx
->key_dma
)) {
1871 dev_err(jrdev
, "unable to map key i/o memory\n");
1874 ctx
->enckeylen
= keylen
;
1876 /* ablkcipher_encrypt shared descriptor */
1877 desc
= ctx
->sh_desc_enc
;
1878 init_sh_desc(desc
, HDR_SHARE_SERIAL
| HDR_SAVECTX
);
1879 /* Skip if already shared */
1880 key_jump_cmd
= append_jump(desc
, JUMP_JSL
| JUMP_TEST_ALL
|
1883 /* Load class1 key only */
1884 append_key_as_imm(desc
, (void *)ctx
->key
, ctx
->enckeylen
,
1885 ctx
->enckeylen
, CLASS_1
|
1886 KEY_DEST_CLASS_REG
);
1888 /* Load nonce into CONTEXT1 reg */
1890 nonce
= (u32
*)(key
+ keylen
);
1891 append_load_imm_u32(desc
, *nonce
, LDST_CLASS_IND_CCB
|
1892 LDST_SRCDST_BYTE_OUTFIFO
| LDST_IMM
);
1893 append_move(desc
, MOVE_WAITCOMP
|
1895 MOVE_DEST_CLASS1CTX
|
1896 (16 << MOVE_OFFSET_SHIFT
) |
1897 (CTR_RFC3686_NONCE_SIZE
<< MOVE_LEN_SHIFT
));
1900 set_jump_tgt_here(desc
, key_jump_cmd
);
1903 append_seq_load(desc
, crt
->ivsize
, LDST_SRCDST_BYTE_CONTEXT
|
1904 LDST_CLASS_1_CCB
| (ctx1_iv_off
<< LDST_OFFSET_SHIFT
));
1906 /* Load counter into CONTEXT1 reg */
1908 append_load_imm_u32(desc
, be32_to_cpu(1), LDST_IMM
|
1910 LDST_SRCDST_BYTE_CONTEXT
|
1911 ((ctx1_iv_off
+ CTR_RFC3686_IV_SIZE
) <<
1912 LDST_OFFSET_SHIFT
));
1914 /* Load operation */
1915 append_operation(desc
, ctx
->class1_alg_type
|
1916 OP_ALG_AS_INITFINAL
| OP_ALG_ENCRYPT
);
1918 /* Perform operation */
1919 ablkcipher_append_src_dst(desc
);
1921 ctx
->sh_desc_enc_dma
= dma_map_single(jrdev
, desc
,
1924 if (dma_mapping_error(jrdev
, ctx
->sh_desc_enc_dma
)) {
1925 dev_err(jrdev
, "unable to map shared descriptor\n");
1929 print_hex_dump(KERN_ERR
,
1930 "ablkcipher enc shdesc@"__stringify(__LINE__
)": ",
1931 DUMP_PREFIX_ADDRESS
, 16, 4, desc
,
1932 desc_bytes(desc
), 1);
1934 /* ablkcipher_decrypt shared descriptor */
1935 desc
= ctx
->sh_desc_dec
;
1937 init_sh_desc(desc
, HDR_SHARE_SERIAL
| HDR_SAVECTX
);
1938 /* Skip if already shared */
1939 key_jump_cmd
= append_jump(desc
, JUMP_JSL
| JUMP_TEST_ALL
|
1942 /* Load class1 key only */
1943 append_key_as_imm(desc
, (void *)ctx
->key
, ctx
->enckeylen
,
1944 ctx
->enckeylen
, CLASS_1
|
1945 KEY_DEST_CLASS_REG
);
1947 /* Load nonce into CONTEXT1 reg */
1949 nonce
= (u32
*)(key
+ keylen
);
1950 append_load_imm_u32(desc
, *nonce
, LDST_CLASS_IND_CCB
|
1951 LDST_SRCDST_BYTE_OUTFIFO
| LDST_IMM
);
1952 append_move(desc
, MOVE_WAITCOMP
|
1954 MOVE_DEST_CLASS1CTX
|
1955 (16 << MOVE_OFFSET_SHIFT
) |
1956 (CTR_RFC3686_NONCE_SIZE
<< MOVE_LEN_SHIFT
));
1959 set_jump_tgt_here(desc
, key_jump_cmd
);
1962 append_seq_load(desc
, crt
->ivsize
, LDST_SRCDST_BYTE_CONTEXT
|
1963 LDST_CLASS_1_CCB
| (ctx1_iv_off
<< LDST_OFFSET_SHIFT
));
1965 /* Load counter into CONTEXT1 reg */
1967 append_load_imm_u32(desc
, be32_to_cpu(1), LDST_IMM
|
1969 LDST_SRCDST_BYTE_CONTEXT
|
1970 ((ctx1_iv_off
+ CTR_RFC3686_IV_SIZE
) <<
1971 LDST_OFFSET_SHIFT
));
1973 /* Choose operation */
1975 append_operation(desc
, ctx
->class1_alg_type
|
1976 OP_ALG_AS_INITFINAL
| OP_ALG_DECRYPT
);
1978 append_dec_op1(desc
, ctx
->class1_alg_type
);
1980 /* Perform operation */
1981 ablkcipher_append_src_dst(desc
);
1983 ctx
->sh_desc_dec_dma
= dma_map_single(jrdev
, desc
,
1986 if (dma_mapping_error(jrdev
, ctx
->sh_desc_dec_dma
)) {
1987 dev_err(jrdev
, "unable to map shared descriptor\n");
1992 print_hex_dump(KERN_ERR
,
1993 "ablkcipher dec shdesc@"__stringify(__LINE__
)": ",
1994 DUMP_PREFIX_ADDRESS
, 16, 4, desc
,
1995 desc_bytes(desc
), 1);
1997 /* ablkcipher_givencrypt shared descriptor */
1998 desc
= ctx
->sh_desc_givenc
;
2000 init_sh_desc(desc
, HDR_SHARE_SERIAL
| HDR_SAVECTX
);
2001 /* Skip if already shared */
2002 key_jump_cmd
= append_jump(desc
, JUMP_JSL
| JUMP_TEST_ALL
|
2005 /* Load class1 key only */
2006 append_key_as_imm(desc
, (void *)ctx
->key
, ctx
->enckeylen
,
2007 ctx
->enckeylen
, CLASS_1
|
2008 KEY_DEST_CLASS_REG
);
2010 /* Load Nonce into CONTEXT1 reg */
2012 nonce
= (u32
*)(key
+ keylen
);
2013 append_load_imm_u32(desc
, *nonce
, LDST_CLASS_IND_CCB
|
2014 LDST_SRCDST_BYTE_OUTFIFO
| LDST_IMM
);
2015 append_move(desc
, MOVE_WAITCOMP
|
2017 MOVE_DEST_CLASS1CTX
|
2018 (16 << MOVE_OFFSET_SHIFT
) |
2019 (CTR_RFC3686_NONCE_SIZE
<< MOVE_LEN_SHIFT
));
2021 set_jump_tgt_here(desc
, key_jump_cmd
);
2024 geniv
= NFIFOENTRY_STYPE_PAD
| NFIFOENTRY_DEST_DECO
|
2025 NFIFOENTRY_DTYPE_MSG
| NFIFOENTRY_LC1
|
2026 NFIFOENTRY_PTYPE_RND
| (crt
->ivsize
<< NFIFOENTRY_DLEN_SHIFT
);
2027 append_load_imm_u32(desc
, geniv
, LDST_CLASS_IND_CCB
|
2028 LDST_SRCDST_WORD_INFO_FIFO
| LDST_IMM
);
2029 append_cmd(desc
, CMD_LOAD
| DISABLE_AUTO_INFO_FIFO
);
2030 append_move(desc
, MOVE_WAITCOMP
|
2032 MOVE_DEST_CLASS1CTX
|
2033 (crt
->ivsize
<< MOVE_LEN_SHIFT
) |
2034 (ctx1_iv_off
<< MOVE_OFFSET_SHIFT
));
2035 append_cmd(desc
, CMD_LOAD
| ENABLE_AUTO_INFO_FIFO
);
2037 /* Copy generated IV to memory */
2038 append_seq_store(desc
, crt
->ivsize
,
2039 LDST_SRCDST_BYTE_CONTEXT
| LDST_CLASS_1_CCB
|
2040 (ctx1_iv_off
<< LDST_OFFSET_SHIFT
));
2042 /* Load Counter into CONTEXT1 reg */
2044 append_load_imm_u32(desc
, (u32
)1, LDST_IMM
|
2046 LDST_SRCDST_BYTE_CONTEXT
|
2047 ((ctx1_iv_off
+ CTR_RFC3686_IV_SIZE
) <<
2048 LDST_OFFSET_SHIFT
));
2051 append_jump(desc
, JUMP_JSL
| JUMP_TEST_ALL
| JUMP_COND_NCP
|
2052 (1 << JUMP_OFFSET_SHIFT
));
2054 /* Load operation */
2055 append_operation(desc
, ctx
->class1_alg_type
|
2056 OP_ALG_AS_INITFINAL
| OP_ALG_ENCRYPT
);
2058 /* Perform operation */
2059 ablkcipher_append_src_dst(desc
);
2061 ctx
->sh_desc_givenc_dma
= dma_map_single(jrdev
, desc
,
2064 if (dma_mapping_error(jrdev
, ctx
->sh_desc_givenc_dma
)) {
2065 dev_err(jrdev
, "unable to map shared descriptor\n");
2069 print_hex_dump(KERN_ERR
,
2070 "ablkcipher givenc shdesc@" __stringify(__LINE__
) ": ",
2071 DUMP_PREFIX_ADDRESS
, 16, 4, desc
,
2072 desc_bytes(desc
), 1);
2079 * aead_edesc - s/w-extended aead descriptor
2080 * @assoc_nents: number of segments in associated data (SPI+Seq) scatterlist
2081 * @assoc_chained: if source is chained
2082 * @src_nents: number of segments in input scatterlist
2083 * @src_chained: if source is chained
2084 * @dst_nents: number of segments in output scatterlist
2085 * @dst_chained: if destination is chained
2086 * @iv_dma: dma address of iv for checking continuity and link table
2087 * @desc: h/w descriptor (variable length; must not exceed MAX_CAAM_DESCSIZE)
2088 * @sec4_sg_bytes: length of dma mapped sec4_sg space
2089 * @sec4_sg_dma: bus physical mapped address of h/w link table
2090 * @hw_desc: the h/w job descriptor followed by any referenced link tables
2101 dma_addr_t sec4_sg_dma
;
2102 struct sec4_sg_entry
*sec4_sg
;
2107 * ablkcipher_edesc - s/w-extended ablkcipher descriptor
2108 * @src_nents: number of segments in input scatterlist
2109 * @src_chained: if source is chained
2110 * @dst_nents: number of segments in output scatterlist
2111 * @dst_chained: if destination is chained
2112 * @iv_dma: dma address of iv for checking continuity and link table
2113 * @desc: h/w descriptor (variable length; must not exceed MAX_CAAM_DESCSIZE)
2114 * @sec4_sg_bytes: length of dma mapped sec4_sg space
2115 * @sec4_sg_dma: bus physical mapped address of h/w link table
2116 * @hw_desc: the h/w job descriptor followed by any referenced link tables
2118 struct ablkcipher_edesc
{
2125 dma_addr_t sec4_sg_dma
;
2126 struct sec4_sg_entry
*sec4_sg
;
2130 static void caam_unmap(struct device
*dev
, struct scatterlist
*src
,
2131 struct scatterlist
*dst
, int src_nents
,
2132 bool src_chained
, int dst_nents
, bool dst_chained
,
2133 dma_addr_t iv_dma
, int ivsize
, dma_addr_t sec4_sg_dma
,
2137 dma_unmap_sg_chained(dev
, src
, src_nents
? : 1, DMA_TO_DEVICE
,
2139 dma_unmap_sg_chained(dev
, dst
, dst_nents
? : 1, DMA_FROM_DEVICE
,
2142 dma_unmap_sg_chained(dev
, src
, src_nents
? : 1,
2143 DMA_BIDIRECTIONAL
, src_chained
);
2147 dma_unmap_single(dev
, iv_dma
, ivsize
, DMA_TO_DEVICE
);
2149 dma_unmap_single(dev
, sec4_sg_dma
, sec4_sg_bytes
,
2153 static void aead_unmap(struct device
*dev
,
2154 struct aead_edesc
*edesc
,
2155 struct aead_request
*req
)
2157 struct crypto_aead
*aead
= crypto_aead_reqtfm(req
);
2158 int ivsize
= crypto_aead_ivsize(aead
);
2160 dma_unmap_sg_chained(dev
, req
->assoc
, edesc
->assoc_nents
,
2161 DMA_TO_DEVICE
, edesc
->assoc_chained
);
2163 caam_unmap(dev
, req
->src
, req
->dst
,
2164 edesc
->src_nents
, edesc
->src_chained
, edesc
->dst_nents
,
2165 edesc
->dst_chained
, edesc
->iv_dma
, ivsize
,
2166 edesc
->sec4_sg_dma
, edesc
->sec4_sg_bytes
);
2169 static void ablkcipher_unmap(struct device
*dev
,
2170 struct ablkcipher_edesc
*edesc
,
2171 struct ablkcipher_request
*req
)
2173 struct crypto_ablkcipher
*ablkcipher
= crypto_ablkcipher_reqtfm(req
);
2174 int ivsize
= crypto_ablkcipher_ivsize(ablkcipher
);
2176 caam_unmap(dev
, req
->src
, req
->dst
,
2177 edesc
->src_nents
, edesc
->src_chained
, edesc
->dst_nents
,
2178 edesc
->dst_chained
, edesc
->iv_dma
, ivsize
,
2179 edesc
->sec4_sg_dma
, edesc
->sec4_sg_bytes
);
2182 static void aead_encrypt_done(struct device
*jrdev
, u32
*desc
, u32 err
,
2185 struct aead_request
*req
= context
;
2186 struct aead_edesc
*edesc
;
2188 struct crypto_aead
*aead
= crypto_aead_reqtfm(req
);
2189 struct caam_ctx
*ctx
= crypto_aead_ctx(aead
);
2190 int ivsize
= crypto_aead_ivsize(aead
);
2192 dev_err(jrdev
, "%s %d: err 0x%x\n", __func__
, __LINE__
, err
);
2195 edesc
= (struct aead_edesc
*)((char *)desc
-
2196 offsetof(struct aead_edesc
, hw_desc
));
2199 caam_jr_strstatus(jrdev
, err
);
2201 aead_unmap(jrdev
, edesc
, req
);
2204 print_hex_dump(KERN_ERR
, "assoc @"__stringify(__LINE__
)": ",
2205 DUMP_PREFIX_ADDRESS
, 16, 4, sg_virt(req
->assoc
),
2207 print_hex_dump(KERN_ERR
, "dstiv @"__stringify(__LINE__
)": ",
2208 DUMP_PREFIX_ADDRESS
, 16, 4, sg_virt(req
->src
) - ivsize
,
2209 edesc
->src_nents
? 100 : ivsize
, 1);
2210 print_hex_dump(KERN_ERR
, "dst @"__stringify(__LINE__
)": ",
2211 DUMP_PREFIX_ADDRESS
, 16, 4, sg_virt(req
->src
),
2212 edesc
->src_nents
? 100 : req
->cryptlen
+
2213 ctx
->authsize
+ 4, 1);
2218 aead_request_complete(req
, err
);
2221 static void aead_decrypt_done(struct device
*jrdev
, u32
*desc
, u32 err
,
2224 struct aead_request
*req
= context
;
2225 struct aead_edesc
*edesc
;
2227 struct crypto_aead
*aead
= crypto_aead_reqtfm(req
);
2228 struct caam_ctx
*ctx
= crypto_aead_ctx(aead
);
2229 int ivsize
= crypto_aead_ivsize(aead
);
2231 dev_err(jrdev
, "%s %d: err 0x%x\n", __func__
, __LINE__
, err
);
2234 edesc
= (struct aead_edesc
*)((char *)desc
-
2235 offsetof(struct aead_edesc
, hw_desc
));
2238 print_hex_dump(KERN_ERR
, "dstiv @"__stringify(__LINE__
)": ",
2239 DUMP_PREFIX_ADDRESS
, 16, 4, req
->iv
,
2241 print_hex_dump(KERN_ERR
, "dst @"__stringify(__LINE__
)": ",
2242 DUMP_PREFIX_ADDRESS
, 16, 4, sg_virt(req
->dst
),
2243 req
->cryptlen
- ctx
->authsize
, 1);
2247 caam_jr_strstatus(jrdev
, err
);
2249 aead_unmap(jrdev
, edesc
, req
);
2252 * verify hw auth check passed else return -EBADMSG
2254 if ((err
& JRSTA_CCBERR_ERRID_MASK
) == JRSTA_CCBERR_ERRID_ICVCHK
)
2258 print_hex_dump(KERN_ERR
, "iphdrout@"__stringify(__LINE__
)": ",
2259 DUMP_PREFIX_ADDRESS
, 16, 4,
2260 ((char *)sg_virt(req
->assoc
) - sizeof(struct iphdr
)),
2261 sizeof(struct iphdr
) + req
->assoclen
+
2262 ((req
->cryptlen
> 1500) ? 1500 : req
->cryptlen
) +
2263 ctx
->authsize
+ 36, 1);
2264 if (!err
&& edesc
->sec4_sg_bytes
) {
2265 struct scatterlist
*sg
= sg_last(req
->src
, edesc
->src_nents
);
2266 print_hex_dump(KERN_ERR
, "sglastout@"__stringify(__LINE__
)": ",
2267 DUMP_PREFIX_ADDRESS
, 16, 4, sg_virt(sg
),
2268 sg
->length
+ ctx
->authsize
+ 16, 1);
2274 aead_request_complete(req
, err
);
2277 static void ablkcipher_encrypt_done(struct device
*jrdev
, u32
*desc
, u32 err
,
2280 struct ablkcipher_request
*req
= context
;
2281 struct ablkcipher_edesc
*edesc
;
2283 struct crypto_ablkcipher
*ablkcipher
= crypto_ablkcipher_reqtfm(req
);
2284 int ivsize
= crypto_ablkcipher_ivsize(ablkcipher
);
2286 dev_err(jrdev
, "%s %d: err 0x%x\n", __func__
, __LINE__
, err
);
2289 edesc
= (struct ablkcipher_edesc
*)((char *)desc
-
2290 offsetof(struct ablkcipher_edesc
, hw_desc
));
2293 caam_jr_strstatus(jrdev
, err
);
2296 print_hex_dump(KERN_ERR
, "dstiv @"__stringify(__LINE__
)": ",
2297 DUMP_PREFIX_ADDRESS
, 16, 4, req
->info
,
2298 edesc
->src_nents
> 1 ? 100 : ivsize
, 1);
2299 print_hex_dump(KERN_ERR
, "dst @"__stringify(__LINE__
)": ",
2300 DUMP_PREFIX_ADDRESS
, 16, 4, sg_virt(req
->src
),
2301 edesc
->dst_nents
> 1 ? 100 : req
->nbytes
, 1);
2304 ablkcipher_unmap(jrdev
, edesc
, req
);
2307 ablkcipher_request_complete(req
, err
);
2310 static void ablkcipher_decrypt_done(struct device
*jrdev
, u32
*desc
, u32 err
,
2313 struct ablkcipher_request
*req
= context
;
2314 struct ablkcipher_edesc
*edesc
;
2316 struct crypto_ablkcipher
*ablkcipher
= crypto_ablkcipher_reqtfm(req
);
2317 int ivsize
= crypto_ablkcipher_ivsize(ablkcipher
);
2319 dev_err(jrdev
, "%s %d: err 0x%x\n", __func__
, __LINE__
, err
);
2322 edesc
= (struct ablkcipher_edesc
*)((char *)desc
-
2323 offsetof(struct ablkcipher_edesc
, hw_desc
));
2325 caam_jr_strstatus(jrdev
, err
);
2328 print_hex_dump(KERN_ERR
, "dstiv @"__stringify(__LINE__
)": ",
2329 DUMP_PREFIX_ADDRESS
, 16, 4, req
->info
,
2331 print_hex_dump(KERN_ERR
, "dst @"__stringify(__LINE__
)": ",
2332 DUMP_PREFIX_ADDRESS
, 16, 4, sg_virt(req
->src
),
2333 edesc
->dst_nents
> 1 ? 100 : req
->nbytes
, 1);
2336 ablkcipher_unmap(jrdev
, edesc
, req
);
2339 ablkcipher_request_complete(req
, err
);
2343 * Fill in aead job descriptor
2345 static void init_aead_job(u32
*sh_desc
, dma_addr_t ptr
,
2346 struct aead_edesc
*edesc
,
2347 struct aead_request
*req
,
2348 bool all_contig
, bool encrypt
)
2350 struct crypto_aead
*aead
= crypto_aead_reqtfm(req
);
2351 struct caam_ctx
*ctx
= crypto_aead_ctx(aead
);
2352 int ivsize
= crypto_aead_ivsize(aead
);
2353 int authsize
= ctx
->authsize
;
2354 u32
*desc
= edesc
->hw_desc
;
2355 u32 out_options
= 0, in_options
;
2356 dma_addr_t dst_dma
, src_dma
;
2357 int len
, sec4_sg_index
= 0;
2358 bool is_gcm
= false;
2361 debug("assoclen %d cryptlen %d authsize %d\n",
2362 req
->assoclen
, req
->cryptlen
, authsize
);
2363 print_hex_dump(KERN_ERR
, "assoc @"__stringify(__LINE__
)": ",
2364 DUMP_PREFIX_ADDRESS
, 16, 4, sg_virt(req
->assoc
),
2366 print_hex_dump(KERN_ERR
, "presciv@"__stringify(__LINE__
)": ",
2367 DUMP_PREFIX_ADDRESS
, 16, 4, req
->iv
,
2368 edesc
->src_nents
? 100 : ivsize
, 1);
2369 print_hex_dump(KERN_ERR
, "src @"__stringify(__LINE__
)": ",
2370 DUMP_PREFIX_ADDRESS
, 16, 4, sg_virt(req
->src
),
2371 edesc
->src_nents
? 100 : req
->cryptlen
, 1);
2372 print_hex_dump(KERN_ERR
, "shrdesc@"__stringify(__LINE__
)": ",
2373 DUMP_PREFIX_ADDRESS
, 16, 4, sh_desc
,
2374 desc_bytes(sh_desc
), 1);
2377 if (((ctx
->class1_alg_type
& OP_ALG_ALGSEL_MASK
) ==
2378 OP_ALG_ALGSEL_AES
) &&
2379 ((ctx
->class1_alg_type
& OP_ALG_AAI_MASK
) == OP_ALG_AAI_GCM
))
2382 len
= desc_len(sh_desc
);
2383 init_job_desc_shared(desc
, ptr
, len
, HDR_SHARE_DEFER
| HDR_REVERSE
);
2387 src_dma
= edesc
->iv_dma
;
2389 src_dma
= sg_dma_address(req
->assoc
);
2392 src_dma
= edesc
->sec4_sg_dma
;
2393 sec4_sg_index
+= (edesc
->assoc_nents
? : 1) + 1 +
2394 (edesc
->src_nents
? : 1);
2395 in_options
= LDST_SGF
;
2398 append_seq_in_ptr(desc
, src_dma
, req
->assoclen
+ ivsize
+ req
->cryptlen
,
2401 if (likely(req
->src
== req
->dst
)) {
2403 dst_dma
= sg_dma_address(req
->src
);
2405 dst_dma
= src_dma
+ sizeof(struct sec4_sg_entry
) *
2406 ((edesc
->assoc_nents
? : 1) + 1);
2407 out_options
= LDST_SGF
;
2410 if (!edesc
->dst_nents
) {
2411 dst_dma
= sg_dma_address(req
->dst
);
2413 dst_dma
= edesc
->sec4_sg_dma
+
2415 sizeof(struct sec4_sg_entry
);
2416 out_options
= LDST_SGF
;
2420 append_seq_out_ptr(desc
, dst_dma
, req
->cryptlen
+ authsize
,
2423 append_seq_out_ptr(desc
, dst_dma
, req
->cryptlen
- authsize
,
2428 * Fill in aead givencrypt job descriptor
2430 static void init_aead_giv_job(u32
*sh_desc
, dma_addr_t ptr
,
2431 struct aead_edesc
*edesc
,
2432 struct aead_request
*req
,
2435 struct crypto_aead
*aead
= crypto_aead_reqtfm(req
);
2436 struct caam_ctx
*ctx
= crypto_aead_ctx(aead
);
2437 int ivsize
= crypto_aead_ivsize(aead
);
2438 int authsize
= ctx
->authsize
;
2439 u32
*desc
= edesc
->hw_desc
;
2440 u32 out_options
= 0, in_options
;
2441 dma_addr_t dst_dma
, src_dma
;
2442 int len
, sec4_sg_index
= 0;
2443 bool is_gcm
= false;
2446 debug("assoclen %d cryptlen %d authsize %d\n",
2447 req
->assoclen
, req
->cryptlen
, authsize
);
2448 print_hex_dump(KERN_ERR
, "assoc @"__stringify(__LINE__
)": ",
2449 DUMP_PREFIX_ADDRESS
, 16, 4, sg_virt(req
->assoc
),
2451 print_hex_dump(KERN_ERR
, "presciv@"__stringify(__LINE__
)": ",
2452 DUMP_PREFIX_ADDRESS
, 16, 4, req
->iv
, ivsize
, 1);
2453 print_hex_dump(KERN_ERR
, "src @"__stringify(__LINE__
)": ",
2454 DUMP_PREFIX_ADDRESS
, 16, 4, sg_virt(req
->src
),
2455 edesc
->src_nents
> 1 ? 100 : req
->cryptlen
, 1);
2456 print_hex_dump(KERN_ERR
, "shrdesc@"__stringify(__LINE__
)": ",
2457 DUMP_PREFIX_ADDRESS
, 16, 4, sh_desc
,
2458 desc_bytes(sh_desc
), 1);
2461 if (((ctx
->class1_alg_type
& OP_ALG_ALGSEL_MASK
) ==
2462 OP_ALG_ALGSEL_AES
) &&
2463 ((ctx
->class1_alg_type
& OP_ALG_AAI_MASK
) == OP_ALG_AAI_GCM
))
2466 len
= desc_len(sh_desc
);
2467 init_job_desc_shared(desc
, ptr
, len
, HDR_SHARE_DEFER
| HDR_REVERSE
);
2469 if (contig
& GIV_SRC_CONTIG
) {
2471 src_dma
= edesc
->iv_dma
;
2473 src_dma
= sg_dma_address(req
->assoc
);
2476 src_dma
= edesc
->sec4_sg_dma
;
2477 sec4_sg_index
+= edesc
->assoc_nents
+ 1 + edesc
->src_nents
;
2478 in_options
= LDST_SGF
;
2480 append_seq_in_ptr(desc
, src_dma
, req
->assoclen
+ ivsize
+ req
->cryptlen
,
2483 if (contig
& GIV_DST_CONTIG
) {
2484 dst_dma
= edesc
->iv_dma
;
2486 if (likely(req
->src
== req
->dst
)) {
2487 dst_dma
= src_dma
+ sizeof(struct sec4_sg_entry
) *
2488 (edesc
->assoc_nents
+
2489 (is_gcm
? 1 + edesc
->src_nents
: 0));
2490 out_options
= LDST_SGF
;
2492 dst_dma
= edesc
->sec4_sg_dma
+
2494 sizeof(struct sec4_sg_entry
);
2495 out_options
= LDST_SGF
;
2499 append_seq_out_ptr(desc
, dst_dma
, ivsize
+ req
->cryptlen
+ authsize
,
2504 * Fill in ablkcipher job descriptor
2506 static void init_ablkcipher_job(u32
*sh_desc
, dma_addr_t ptr
,
2507 struct ablkcipher_edesc
*edesc
,
2508 struct ablkcipher_request
*req
,
2511 struct crypto_ablkcipher
*ablkcipher
= crypto_ablkcipher_reqtfm(req
);
2512 int ivsize
= crypto_ablkcipher_ivsize(ablkcipher
);
2513 u32
*desc
= edesc
->hw_desc
;
2514 u32 out_options
= 0, in_options
;
2515 dma_addr_t dst_dma
, src_dma
;
2516 int len
, sec4_sg_index
= 0;
2519 print_hex_dump(KERN_ERR
, "presciv@"__stringify(__LINE__
)": ",
2520 DUMP_PREFIX_ADDRESS
, 16, 4, req
->info
,
2522 print_hex_dump(KERN_ERR
, "src @"__stringify(__LINE__
)": ",
2523 DUMP_PREFIX_ADDRESS
, 16, 4, sg_virt(req
->src
),
2524 edesc
->src_nents
? 100 : req
->nbytes
, 1);
2527 len
= desc_len(sh_desc
);
2528 init_job_desc_shared(desc
, ptr
, len
, HDR_SHARE_DEFER
| HDR_REVERSE
);
2531 src_dma
= edesc
->iv_dma
;
2534 src_dma
= edesc
->sec4_sg_dma
;
2535 sec4_sg_index
+= edesc
->src_nents
+ 1;
2536 in_options
= LDST_SGF
;
2538 append_seq_in_ptr(desc
, src_dma
, req
->nbytes
+ ivsize
, in_options
);
2540 if (likely(req
->src
== req
->dst
)) {
2541 if (!edesc
->src_nents
&& iv_contig
) {
2542 dst_dma
= sg_dma_address(req
->src
);
2544 dst_dma
= edesc
->sec4_sg_dma
+
2545 sizeof(struct sec4_sg_entry
);
2546 out_options
= LDST_SGF
;
2549 if (!edesc
->dst_nents
) {
2550 dst_dma
= sg_dma_address(req
->dst
);
2552 dst_dma
= edesc
->sec4_sg_dma
+
2553 sec4_sg_index
* sizeof(struct sec4_sg_entry
);
2554 out_options
= LDST_SGF
;
2557 append_seq_out_ptr(desc
, dst_dma
, req
->nbytes
, out_options
);
2561 * Fill in ablkcipher givencrypt job descriptor
2563 static void init_ablkcipher_giv_job(u32
*sh_desc
, dma_addr_t ptr
,
2564 struct ablkcipher_edesc
*edesc
,
2565 struct ablkcipher_request
*req
,
2568 struct crypto_ablkcipher
*ablkcipher
= crypto_ablkcipher_reqtfm(req
);
2569 int ivsize
= crypto_ablkcipher_ivsize(ablkcipher
);
2570 u32
*desc
= edesc
->hw_desc
;
2571 u32 out_options
, in_options
;
2572 dma_addr_t dst_dma
, src_dma
;
2573 int len
, sec4_sg_index
= 0;
2576 print_hex_dump(KERN_ERR
, "presciv@" __stringify(__LINE__
) ": ",
2577 DUMP_PREFIX_ADDRESS
, 16, 4, req
->info
,
2579 print_hex_dump(KERN_ERR
, "src @" __stringify(__LINE__
) ": ",
2580 DUMP_PREFIX_ADDRESS
, 16, 4, sg_virt(req
->src
),
2581 edesc
->src_nents
? 100 : req
->nbytes
, 1);
2584 len
= desc_len(sh_desc
);
2585 init_job_desc_shared(desc
, ptr
, len
, HDR_SHARE_DEFER
| HDR_REVERSE
);
2587 if (!edesc
->src_nents
) {
2588 src_dma
= sg_dma_address(req
->src
);
2591 src_dma
= edesc
->sec4_sg_dma
;
2592 sec4_sg_index
+= edesc
->src_nents
;
2593 in_options
= LDST_SGF
;
2595 append_seq_in_ptr(desc
, src_dma
, req
->nbytes
, in_options
);
2598 dst_dma
= edesc
->iv_dma
;
2601 dst_dma
= edesc
->sec4_sg_dma
+
2602 sec4_sg_index
* sizeof(struct sec4_sg_entry
);
2603 out_options
= LDST_SGF
;
2605 append_seq_out_ptr(desc
, dst_dma
, req
->nbytes
+ ivsize
, out_options
);
2609 * allocate and map the aead extended descriptor
2611 static struct aead_edesc
*aead_edesc_alloc(struct aead_request
*req
,
2612 int desc_bytes
, bool *all_contig_ptr
,
2615 struct crypto_aead
*aead
= crypto_aead_reqtfm(req
);
2616 struct caam_ctx
*ctx
= crypto_aead_ctx(aead
);
2617 struct device
*jrdev
= ctx
->jrdev
;
2618 gfp_t flags
= (req
->base
.flags
& (CRYPTO_TFM_REQ_MAY_BACKLOG
|
2619 CRYPTO_TFM_REQ_MAY_SLEEP
)) ? GFP_KERNEL
: GFP_ATOMIC
;
2620 int assoc_nents
, src_nents
, dst_nents
= 0;
2621 struct aead_edesc
*edesc
;
2622 dma_addr_t iv_dma
= 0;
2624 bool all_contig
= true;
2625 bool assoc_chained
= false, src_chained
= false, dst_chained
= false;
2626 int ivsize
= crypto_aead_ivsize(aead
);
2627 int sec4_sg_index
, sec4_sg_len
= 0, sec4_sg_bytes
;
2628 unsigned int authsize
= ctx
->authsize
;
2629 bool is_gcm
= false;
2631 assoc_nents
= sg_count(req
->assoc
, req
->assoclen
, &assoc_chained
);
2633 if (unlikely(req
->dst
!= req
->src
)) {
2634 src_nents
= sg_count(req
->src
, req
->cryptlen
, &src_chained
);
2635 dst_nents
= sg_count(req
->dst
,
2637 (encrypt
? authsize
: (-authsize
)),
2640 src_nents
= sg_count(req
->src
,
2642 (encrypt
? authsize
: 0),
2646 sgc
= dma_map_sg_chained(jrdev
, req
->assoc
, assoc_nents
? : 1,
2647 DMA_TO_DEVICE
, assoc_chained
);
2648 if (likely(req
->src
== req
->dst
)) {
2649 sgc
= dma_map_sg_chained(jrdev
, req
->src
, src_nents
? : 1,
2650 DMA_BIDIRECTIONAL
, src_chained
);
2652 sgc
= dma_map_sg_chained(jrdev
, req
->src
, src_nents
? : 1,
2653 DMA_TO_DEVICE
, src_chained
);
2654 sgc
= dma_map_sg_chained(jrdev
, req
->dst
, dst_nents
? : 1,
2655 DMA_FROM_DEVICE
, dst_chained
);
2658 iv_dma
= dma_map_single(jrdev
, req
->iv
, ivsize
, DMA_TO_DEVICE
);
2659 if (dma_mapping_error(jrdev
, iv_dma
)) {
2660 dev_err(jrdev
, "unable to map IV\n");
2661 return ERR_PTR(-ENOMEM
);
2664 if (((ctx
->class1_alg_type
& OP_ALG_ALGSEL_MASK
) ==
2665 OP_ALG_ALGSEL_AES
) &&
2666 ((ctx
->class1_alg_type
& OP_ALG_AAI_MASK
) == OP_ALG_AAI_GCM
))
2670 * Check if data are contiguous.
2671 * GCM expected input sequence: IV, AAD, text
2672 * All other - expected input sequence: AAD, IV, text
2675 all_contig
= (!assoc_nents
&&
2676 iv_dma
+ ivsize
== sg_dma_address(req
->assoc
) &&
2677 !src_nents
&& sg_dma_address(req
->assoc
) +
2678 req
->assoclen
== sg_dma_address(req
->src
));
2680 all_contig
= (!assoc_nents
&& sg_dma_address(req
->assoc
) +
2681 req
->assoclen
== iv_dma
&& !src_nents
&&
2682 iv_dma
+ ivsize
== sg_dma_address(req
->src
));
2684 assoc_nents
= assoc_nents
? : 1;
2685 src_nents
= src_nents
? : 1;
2686 sec4_sg_len
= assoc_nents
+ 1 + src_nents
;
2689 sec4_sg_len
+= dst_nents
;
2691 sec4_sg_bytes
= sec4_sg_len
* sizeof(struct sec4_sg_entry
);
2693 /* allocate space for base edesc and hw desc commands, link tables */
2694 edesc
= kmalloc(sizeof(struct aead_edesc
) + desc_bytes
+
2695 sec4_sg_bytes
, GFP_DMA
| flags
);
2697 dev_err(jrdev
, "could not allocate extended descriptor\n");
2698 return ERR_PTR(-ENOMEM
);
2701 edesc
->assoc_nents
= assoc_nents
;
2702 edesc
->assoc_chained
= assoc_chained
;
2703 edesc
->src_nents
= src_nents
;
2704 edesc
->src_chained
= src_chained
;
2705 edesc
->dst_nents
= dst_nents
;
2706 edesc
->dst_chained
= dst_chained
;
2707 edesc
->iv_dma
= iv_dma
;
2708 edesc
->sec4_sg_bytes
= sec4_sg_bytes
;
2709 edesc
->sec4_sg
= (void *)edesc
+ sizeof(struct aead_edesc
) +
2711 *all_contig_ptr
= all_contig
;
2716 sg_to_sec4_sg(req
->assoc
,
2720 sec4_sg_index
+= assoc_nents
;
2723 dma_to_sec4_sg_one(edesc
->sec4_sg
+ sec4_sg_index
,
2728 sg_to_sec4_sg(req
->assoc
,
2732 sec4_sg_index
+= assoc_nents
;
2735 sg_to_sec4_sg_last(req
->src
,
2739 sec4_sg_index
+= src_nents
;
2742 sg_to_sec4_sg_last(req
->dst
, dst_nents
,
2743 edesc
->sec4_sg
+ sec4_sg_index
, 0);
2745 edesc
->sec4_sg_dma
= dma_map_single(jrdev
, edesc
->sec4_sg
,
2746 sec4_sg_bytes
, DMA_TO_DEVICE
);
2747 if (dma_mapping_error(jrdev
, edesc
->sec4_sg_dma
)) {
2748 dev_err(jrdev
, "unable to map S/G table\n");
2749 return ERR_PTR(-ENOMEM
);
2755 static int aead_encrypt(struct aead_request
*req
)
2757 struct aead_edesc
*edesc
;
2758 struct crypto_aead
*aead
= crypto_aead_reqtfm(req
);
2759 struct caam_ctx
*ctx
= crypto_aead_ctx(aead
);
2760 struct device
*jrdev
= ctx
->jrdev
;
2765 /* allocate extended descriptor */
2766 edesc
= aead_edesc_alloc(req
, DESC_JOB_IO_LEN
*
2767 CAAM_CMD_SZ
, &all_contig
, true);
2769 return PTR_ERR(edesc
);
2771 /* Create and submit job descriptor */
2772 init_aead_job(ctx
->sh_desc_enc
, ctx
->sh_desc_enc_dma
, edesc
, req
,
2775 print_hex_dump(KERN_ERR
, "aead jobdesc@"__stringify(__LINE__
)": ",
2776 DUMP_PREFIX_ADDRESS
, 16, 4, edesc
->hw_desc
,
2777 desc_bytes(edesc
->hw_desc
), 1);
2780 desc
= edesc
->hw_desc
;
2781 ret
= caam_jr_enqueue(jrdev
, desc
, aead_encrypt_done
, req
);
2785 aead_unmap(jrdev
, edesc
, req
);
2792 static int aead_decrypt(struct aead_request
*req
)
2794 struct aead_edesc
*edesc
;
2795 struct crypto_aead
*aead
= crypto_aead_reqtfm(req
);
2796 struct caam_ctx
*ctx
= crypto_aead_ctx(aead
);
2797 struct device
*jrdev
= ctx
->jrdev
;
2802 /* allocate extended descriptor */
2803 edesc
= aead_edesc_alloc(req
, DESC_JOB_IO_LEN
*
2804 CAAM_CMD_SZ
, &all_contig
, false);
2806 return PTR_ERR(edesc
);
2809 print_hex_dump(KERN_ERR
, "dec src@"__stringify(__LINE__
)": ",
2810 DUMP_PREFIX_ADDRESS
, 16, 4, sg_virt(req
->src
),
2814 /* Create and submit job descriptor*/
2815 init_aead_job(ctx
->sh_desc_dec
,
2816 ctx
->sh_desc_dec_dma
, edesc
, req
, all_contig
, false);
2818 print_hex_dump(KERN_ERR
, "aead jobdesc@"__stringify(__LINE__
)": ",
2819 DUMP_PREFIX_ADDRESS
, 16, 4, edesc
->hw_desc
,
2820 desc_bytes(edesc
->hw_desc
), 1);
2823 desc
= edesc
->hw_desc
;
2824 ret
= caam_jr_enqueue(jrdev
, desc
, aead_decrypt_done
, req
);
2828 aead_unmap(jrdev
, edesc
, req
);
2836 * allocate and map the aead extended descriptor for aead givencrypt
2838 static struct aead_edesc
*aead_giv_edesc_alloc(struct aead_givcrypt_request
2839 *greq
, int desc_bytes
,
2842 struct aead_request
*req
= &greq
->areq
;
2843 struct crypto_aead
*aead
= crypto_aead_reqtfm(req
);
2844 struct caam_ctx
*ctx
= crypto_aead_ctx(aead
);
2845 struct device
*jrdev
= ctx
->jrdev
;
2846 gfp_t flags
= (req
->base
.flags
& (CRYPTO_TFM_REQ_MAY_BACKLOG
|
2847 CRYPTO_TFM_REQ_MAY_SLEEP
)) ? GFP_KERNEL
: GFP_ATOMIC
;
2848 int assoc_nents
, src_nents
, dst_nents
= 0;
2849 struct aead_edesc
*edesc
;
2850 dma_addr_t iv_dma
= 0;
2852 u32 contig
= GIV_SRC_CONTIG
| GIV_DST_CONTIG
;
2853 int ivsize
= crypto_aead_ivsize(aead
);
2854 bool assoc_chained
= false, src_chained
= false, dst_chained
= false;
2855 int sec4_sg_index
, sec4_sg_len
= 0, sec4_sg_bytes
;
2856 bool is_gcm
= false;
2858 assoc_nents
= sg_count(req
->assoc
, req
->assoclen
, &assoc_chained
);
2859 src_nents
= sg_count(req
->src
, req
->cryptlen
, &src_chained
);
2861 if (unlikely(req
->dst
!= req
->src
))
2862 dst_nents
= sg_count(req
->dst
, req
->cryptlen
+ ctx
->authsize
,
2865 sgc
= dma_map_sg_chained(jrdev
, req
->assoc
, assoc_nents
? : 1,
2866 DMA_TO_DEVICE
, assoc_chained
);
2867 if (likely(req
->src
== req
->dst
)) {
2868 sgc
= dma_map_sg_chained(jrdev
, req
->src
, src_nents
? : 1,
2869 DMA_BIDIRECTIONAL
, src_chained
);
2871 sgc
= dma_map_sg_chained(jrdev
, req
->src
, src_nents
? : 1,
2872 DMA_TO_DEVICE
, src_chained
);
2873 sgc
= dma_map_sg_chained(jrdev
, req
->dst
, dst_nents
? : 1,
2874 DMA_FROM_DEVICE
, dst_chained
);
2877 iv_dma
= dma_map_single(jrdev
, greq
->giv
, ivsize
, DMA_TO_DEVICE
);
2878 if (dma_mapping_error(jrdev
, iv_dma
)) {
2879 dev_err(jrdev
, "unable to map IV\n");
2880 return ERR_PTR(-ENOMEM
);
2883 if (((ctx
->class1_alg_type
& OP_ALG_ALGSEL_MASK
) ==
2884 OP_ALG_ALGSEL_AES
) &&
2885 ((ctx
->class1_alg_type
& OP_ALG_AAI_MASK
) == OP_ALG_AAI_GCM
))
2889 * Check if data are contiguous.
2890 * GCM expected input sequence: IV, AAD, text
2891 * All other - expected input sequence: AAD, IV, text
2895 if (assoc_nents
|| iv_dma
+ ivsize
!=
2896 sg_dma_address(req
->assoc
) || src_nents
||
2897 sg_dma_address(req
->assoc
) + req
->assoclen
!=
2898 sg_dma_address(req
->src
))
2899 contig
&= ~GIV_SRC_CONTIG
;
2902 sg_dma_address(req
->assoc
) + req
->assoclen
!= iv_dma
||
2903 src_nents
|| iv_dma
+ ivsize
!= sg_dma_address(req
->src
))
2904 contig
&= ~GIV_SRC_CONTIG
;
2907 if (dst_nents
|| iv_dma
+ ivsize
!= sg_dma_address(req
->dst
))
2908 contig
&= ~GIV_DST_CONTIG
;
2910 if (!(contig
& GIV_SRC_CONTIG
)) {
2911 assoc_nents
= assoc_nents
? : 1;
2912 src_nents
= src_nents
? : 1;
2913 sec4_sg_len
+= assoc_nents
+ 1 + src_nents
;
2914 if (req
->src
== req
->dst
&&
2915 (src_nents
|| iv_dma
+ ivsize
!= sg_dma_address(req
->src
)))
2916 contig
&= ~GIV_DST_CONTIG
;
2920 * Add new sg entries for GCM output sequence.
2921 * Expected output sequence: IV, encrypted text.
2923 if (is_gcm
&& req
->src
== req
->dst
&& !(contig
& GIV_DST_CONTIG
))
2924 sec4_sg_len
+= 1 + src_nents
;
2926 if (unlikely(req
->src
!= req
->dst
)) {
2927 dst_nents
= dst_nents
? : 1;
2928 sec4_sg_len
+= 1 + dst_nents
;
2931 sec4_sg_bytes
= sec4_sg_len
* sizeof(struct sec4_sg_entry
);
2933 /* allocate space for base edesc and hw desc commands, link tables */
2934 edesc
= kmalloc(sizeof(struct aead_edesc
) + desc_bytes
+
2935 sec4_sg_bytes
, GFP_DMA
| flags
);
2937 dev_err(jrdev
, "could not allocate extended descriptor\n");
2938 return ERR_PTR(-ENOMEM
);
2941 edesc
->assoc_nents
= assoc_nents
;
2942 edesc
->assoc_chained
= assoc_chained
;
2943 edesc
->src_nents
= src_nents
;
2944 edesc
->src_chained
= src_chained
;
2945 edesc
->dst_nents
= dst_nents
;
2946 edesc
->dst_chained
= dst_chained
;
2947 edesc
->iv_dma
= iv_dma
;
2948 edesc
->sec4_sg_bytes
= sec4_sg_bytes
;
2949 edesc
->sec4_sg
= (void *)edesc
+ sizeof(struct aead_edesc
) +
2951 *contig_ptr
= contig
;
2954 if (!(contig
& GIV_SRC_CONTIG
)) {
2956 sg_to_sec4_sg(req
->assoc
, assoc_nents
,
2957 edesc
->sec4_sg
+ sec4_sg_index
, 0);
2958 sec4_sg_index
+= assoc_nents
;
2961 dma_to_sec4_sg_one(edesc
->sec4_sg
+ sec4_sg_index
,
2966 sg_to_sec4_sg(req
->assoc
, assoc_nents
,
2967 edesc
->sec4_sg
+ sec4_sg_index
, 0);
2968 sec4_sg_index
+= assoc_nents
;
2971 sg_to_sec4_sg_last(req
->src
, src_nents
,
2974 sec4_sg_index
+= src_nents
;
2977 if (is_gcm
&& req
->src
== req
->dst
&& !(contig
& GIV_DST_CONTIG
)) {
2978 dma_to_sec4_sg_one(edesc
->sec4_sg
+ sec4_sg_index
,
2981 sg_to_sec4_sg_last(req
->src
, src_nents
,
2982 edesc
->sec4_sg
+ sec4_sg_index
, 0);
2985 if (unlikely(req
->src
!= req
->dst
&& !(contig
& GIV_DST_CONTIG
))) {
2986 dma_to_sec4_sg_one(edesc
->sec4_sg
+ sec4_sg_index
,
2989 sg_to_sec4_sg_last(req
->dst
, dst_nents
,
2990 edesc
->sec4_sg
+ sec4_sg_index
, 0);
2992 edesc
->sec4_sg_dma
= dma_map_single(jrdev
, edesc
->sec4_sg
,
2993 sec4_sg_bytes
, DMA_TO_DEVICE
);
2994 if (dma_mapping_error(jrdev
, edesc
->sec4_sg_dma
)) {
2995 dev_err(jrdev
, "unable to map S/G table\n");
2996 return ERR_PTR(-ENOMEM
);
3002 static int aead_givencrypt(struct aead_givcrypt_request
*areq
)
3004 struct aead_request
*req
= &areq
->areq
;
3005 struct aead_edesc
*edesc
;
3006 struct crypto_aead
*aead
= crypto_aead_reqtfm(req
);
3007 struct caam_ctx
*ctx
= crypto_aead_ctx(aead
);
3008 struct device
*jrdev
= ctx
->jrdev
;
3013 /* allocate extended descriptor */
3014 edesc
= aead_giv_edesc_alloc(areq
, DESC_JOB_IO_LEN
*
3015 CAAM_CMD_SZ
, &contig
);
3018 return PTR_ERR(edesc
);
3021 print_hex_dump(KERN_ERR
, "giv src@"__stringify(__LINE__
)": ",
3022 DUMP_PREFIX_ADDRESS
, 16, 4, sg_virt(req
->src
),
3026 /* Create and submit job descriptor*/
3027 init_aead_giv_job(ctx
->sh_desc_givenc
,
3028 ctx
->sh_desc_givenc_dma
, edesc
, req
, contig
);
3030 print_hex_dump(KERN_ERR
, "aead jobdesc@"__stringify(__LINE__
)": ",
3031 DUMP_PREFIX_ADDRESS
, 16, 4, edesc
->hw_desc
,
3032 desc_bytes(edesc
->hw_desc
), 1);
3035 desc
= edesc
->hw_desc
;
3036 ret
= caam_jr_enqueue(jrdev
, desc
, aead_encrypt_done
, req
);
3040 aead_unmap(jrdev
, edesc
, req
);
3047 static int aead_null_givencrypt(struct aead_givcrypt_request
*areq
)
3049 return aead_encrypt(&areq
->areq
);
3053 * allocate and map the ablkcipher extended descriptor for ablkcipher
3055 static struct ablkcipher_edesc
*ablkcipher_edesc_alloc(struct ablkcipher_request
3056 *req
, int desc_bytes
,
3057 bool *iv_contig_out
)
3059 struct crypto_ablkcipher
*ablkcipher
= crypto_ablkcipher_reqtfm(req
);
3060 struct caam_ctx
*ctx
= crypto_ablkcipher_ctx(ablkcipher
);
3061 struct device
*jrdev
= ctx
->jrdev
;
3062 gfp_t flags
= (req
->base
.flags
& (CRYPTO_TFM_REQ_MAY_BACKLOG
|
3063 CRYPTO_TFM_REQ_MAY_SLEEP
)) ?
3064 GFP_KERNEL
: GFP_ATOMIC
;
3065 int src_nents
, dst_nents
= 0, sec4_sg_bytes
;
3066 struct ablkcipher_edesc
*edesc
;
3067 dma_addr_t iv_dma
= 0;
3068 bool iv_contig
= false;
3070 int ivsize
= crypto_ablkcipher_ivsize(ablkcipher
);
3071 bool src_chained
= false, dst_chained
= false;
3074 src_nents
= sg_count(req
->src
, req
->nbytes
, &src_chained
);
3076 if (req
->dst
!= req
->src
)
3077 dst_nents
= sg_count(req
->dst
, req
->nbytes
, &dst_chained
);
3079 if (likely(req
->src
== req
->dst
)) {
3080 sgc
= dma_map_sg_chained(jrdev
, req
->src
, src_nents
? : 1,
3081 DMA_BIDIRECTIONAL
, src_chained
);
3083 sgc
= dma_map_sg_chained(jrdev
, req
->src
, src_nents
? : 1,
3084 DMA_TO_DEVICE
, src_chained
);
3085 sgc
= dma_map_sg_chained(jrdev
, req
->dst
, dst_nents
? : 1,
3086 DMA_FROM_DEVICE
, dst_chained
);
3089 iv_dma
= dma_map_single(jrdev
, req
->info
, ivsize
, DMA_TO_DEVICE
);
3090 if (dma_mapping_error(jrdev
, iv_dma
)) {
3091 dev_err(jrdev
, "unable to map IV\n");
3092 return ERR_PTR(-ENOMEM
);
3096 * Check if iv can be contiguous with source and destination.
3097 * If so, include it. If not, create scatterlist.
3099 if (!src_nents
&& iv_dma
+ ivsize
== sg_dma_address(req
->src
))
3102 src_nents
= src_nents
? : 1;
3103 sec4_sg_bytes
= ((iv_contig
? 0 : 1) + src_nents
+ dst_nents
) *
3104 sizeof(struct sec4_sg_entry
);
3106 /* allocate space for base edesc and hw desc commands, link tables */
3107 edesc
= kmalloc(sizeof(struct ablkcipher_edesc
) + desc_bytes
+
3108 sec4_sg_bytes
, GFP_DMA
| flags
);
3110 dev_err(jrdev
, "could not allocate extended descriptor\n");
3111 return ERR_PTR(-ENOMEM
);
3114 edesc
->src_nents
= src_nents
;
3115 edesc
->src_chained
= src_chained
;
3116 edesc
->dst_nents
= dst_nents
;
3117 edesc
->dst_chained
= dst_chained
;
3118 edesc
->sec4_sg_bytes
= sec4_sg_bytes
;
3119 edesc
->sec4_sg
= (void *)edesc
+ sizeof(struct ablkcipher_edesc
) +
3124 dma_to_sec4_sg_one(edesc
->sec4_sg
, iv_dma
, ivsize
, 0);
3125 sg_to_sec4_sg_last(req
->src
, src_nents
,
3126 edesc
->sec4_sg
+ 1, 0);
3127 sec4_sg_index
+= 1 + src_nents
;
3131 sg_to_sec4_sg_last(req
->dst
, dst_nents
,
3132 edesc
->sec4_sg
+ sec4_sg_index
, 0);
3135 edesc
->sec4_sg_dma
= dma_map_single(jrdev
, edesc
->sec4_sg
,
3136 sec4_sg_bytes
, DMA_TO_DEVICE
);
3137 if (dma_mapping_error(jrdev
, edesc
->sec4_sg_dma
)) {
3138 dev_err(jrdev
, "unable to map S/G table\n");
3139 return ERR_PTR(-ENOMEM
);
3142 edesc
->iv_dma
= iv_dma
;
3145 print_hex_dump(KERN_ERR
, "ablkcipher sec4_sg@"__stringify(__LINE__
)": ",
3146 DUMP_PREFIX_ADDRESS
, 16, 4, edesc
->sec4_sg
,
3150 *iv_contig_out
= iv_contig
;
3154 static int ablkcipher_encrypt(struct ablkcipher_request
*req
)
3156 struct ablkcipher_edesc
*edesc
;
3157 struct crypto_ablkcipher
*ablkcipher
= crypto_ablkcipher_reqtfm(req
);
3158 struct caam_ctx
*ctx
= crypto_ablkcipher_ctx(ablkcipher
);
3159 struct device
*jrdev
= ctx
->jrdev
;
3164 /* allocate extended descriptor */
3165 edesc
= ablkcipher_edesc_alloc(req
, DESC_JOB_IO_LEN
*
3166 CAAM_CMD_SZ
, &iv_contig
);
3168 return PTR_ERR(edesc
);
3170 /* Create and submit job descriptor*/
3171 init_ablkcipher_job(ctx
->sh_desc_enc
,
3172 ctx
->sh_desc_enc_dma
, edesc
, req
, iv_contig
);
3174 print_hex_dump(KERN_ERR
, "ablkcipher jobdesc@"__stringify(__LINE__
)": ",
3175 DUMP_PREFIX_ADDRESS
, 16, 4, edesc
->hw_desc
,
3176 desc_bytes(edesc
->hw_desc
), 1);
3178 desc
= edesc
->hw_desc
;
3179 ret
= caam_jr_enqueue(jrdev
, desc
, ablkcipher_encrypt_done
, req
);
3184 ablkcipher_unmap(jrdev
, edesc
, req
);
3191 static int ablkcipher_decrypt(struct ablkcipher_request
*req
)
3193 struct ablkcipher_edesc
*edesc
;
3194 struct crypto_ablkcipher
*ablkcipher
= crypto_ablkcipher_reqtfm(req
);
3195 struct caam_ctx
*ctx
= crypto_ablkcipher_ctx(ablkcipher
);
3196 struct device
*jrdev
= ctx
->jrdev
;
3201 /* allocate extended descriptor */
3202 edesc
= ablkcipher_edesc_alloc(req
, DESC_JOB_IO_LEN
*
3203 CAAM_CMD_SZ
, &iv_contig
);
3205 return PTR_ERR(edesc
);
3207 /* Create and submit job descriptor*/
3208 init_ablkcipher_job(ctx
->sh_desc_dec
,
3209 ctx
->sh_desc_dec_dma
, edesc
, req
, iv_contig
);
3210 desc
= edesc
->hw_desc
;
3212 print_hex_dump(KERN_ERR
, "ablkcipher jobdesc@"__stringify(__LINE__
)": ",
3213 DUMP_PREFIX_ADDRESS
, 16, 4, edesc
->hw_desc
,
3214 desc_bytes(edesc
->hw_desc
), 1);
3217 ret
= caam_jr_enqueue(jrdev
, desc
, ablkcipher_decrypt_done
, req
);
3221 ablkcipher_unmap(jrdev
, edesc
, req
);
3229 * allocate and map the ablkcipher extended descriptor
3230 * for ablkcipher givencrypt
3232 static struct ablkcipher_edesc
*ablkcipher_giv_edesc_alloc(
3233 struct skcipher_givcrypt_request
*greq
,
3235 bool *iv_contig_out
)
3237 struct ablkcipher_request
*req
= &greq
->creq
;
3238 struct crypto_ablkcipher
*ablkcipher
= crypto_ablkcipher_reqtfm(req
);
3239 struct caam_ctx
*ctx
= crypto_ablkcipher_ctx(ablkcipher
);
3240 struct device
*jrdev
= ctx
->jrdev
;
3241 gfp_t flags
= (req
->base
.flags
& (CRYPTO_TFM_REQ_MAY_BACKLOG
|
3242 CRYPTO_TFM_REQ_MAY_SLEEP
)) ?
3243 GFP_KERNEL
: GFP_ATOMIC
;
3244 int src_nents
, dst_nents
= 0, sec4_sg_bytes
;
3245 struct ablkcipher_edesc
*edesc
;
3246 dma_addr_t iv_dma
= 0;
3247 bool iv_contig
= false;
3249 int ivsize
= crypto_ablkcipher_ivsize(ablkcipher
);
3250 bool src_chained
= false, dst_chained
= false;
3253 src_nents
= sg_count(req
->src
, req
->nbytes
, &src_chained
);
3255 if (unlikely(req
->dst
!= req
->src
))
3256 dst_nents
= sg_count(req
->dst
, req
->nbytes
, &dst_chained
);
3258 if (likely(req
->src
== req
->dst
)) {
3259 sgc
= dma_map_sg_chained(jrdev
, req
->src
, src_nents
? : 1,
3260 DMA_BIDIRECTIONAL
, src_chained
);
3262 sgc
= dma_map_sg_chained(jrdev
, req
->src
, src_nents
? : 1,
3263 DMA_TO_DEVICE
, src_chained
);
3264 sgc
= dma_map_sg_chained(jrdev
, req
->dst
, dst_nents
? : 1,
3265 DMA_FROM_DEVICE
, dst_chained
);
3269 * Check if iv can be contiguous with source and destination.
3270 * If so, include it. If not, create scatterlist.
3272 iv_dma
= dma_map_single(jrdev
, greq
->giv
, ivsize
, DMA_TO_DEVICE
);
3273 if (dma_mapping_error(jrdev
, iv_dma
)) {
3274 dev_err(jrdev
, "unable to map IV\n");
3275 return ERR_PTR(-ENOMEM
);
3278 if (!dst_nents
&& iv_dma
+ ivsize
== sg_dma_address(req
->dst
))
3281 dst_nents
= dst_nents
? : 1;
3282 sec4_sg_bytes
= ((iv_contig
? 0 : 1) + src_nents
+ dst_nents
) *
3283 sizeof(struct sec4_sg_entry
);
3285 /* allocate space for base edesc and hw desc commands, link tables */
3286 edesc
= kmalloc(sizeof(*edesc
) + desc_bytes
+
3287 sec4_sg_bytes
, GFP_DMA
| flags
);
3289 dev_err(jrdev
, "could not allocate extended descriptor\n");
3290 return ERR_PTR(-ENOMEM
);
3293 edesc
->src_nents
= src_nents
;
3294 edesc
->src_chained
= src_chained
;
3295 edesc
->dst_nents
= dst_nents
;
3296 edesc
->dst_chained
= dst_chained
;
3297 edesc
->sec4_sg_bytes
= sec4_sg_bytes
;
3298 edesc
->sec4_sg
= (void *)edesc
+ sizeof(struct ablkcipher_edesc
) +
3303 sg_to_sec4_sg_last(req
->src
, src_nents
, edesc
->sec4_sg
, 0);
3304 sec4_sg_index
+= src_nents
;
3308 dma_to_sec4_sg_one(edesc
->sec4_sg
+ sec4_sg_index
,
3311 sg_to_sec4_sg_last(req
->dst
, dst_nents
,
3312 edesc
->sec4_sg
+ sec4_sg_index
, 0);
3315 edesc
->sec4_sg_dma
= dma_map_single(jrdev
, edesc
->sec4_sg
,
3316 sec4_sg_bytes
, DMA_TO_DEVICE
);
3317 if (dma_mapping_error(jrdev
, edesc
->sec4_sg_dma
)) {
3318 dev_err(jrdev
, "unable to map S/G table\n");
3319 return ERR_PTR(-ENOMEM
);
3321 edesc
->iv_dma
= iv_dma
;
3324 print_hex_dump(KERN_ERR
,
3325 "ablkcipher sec4_sg@" __stringify(__LINE__
) ": ",
3326 DUMP_PREFIX_ADDRESS
, 16, 4, edesc
->sec4_sg
,
3330 *iv_contig_out
= iv_contig
;
3334 static int ablkcipher_givencrypt(struct skcipher_givcrypt_request
*creq
)
3336 struct ablkcipher_request
*req
= &creq
->creq
;
3337 struct ablkcipher_edesc
*edesc
;
3338 struct crypto_ablkcipher
*ablkcipher
= crypto_ablkcipher_reqtfm(req
);
3339 struct caam_ctx
*ctx
= crypto_ablkcipher_ctx(ablkcipher
);
3340 struct device
*jrdev
= ctx
->jrdev
;
3345 /* allocate extended descriptor */
3346 edesc
= ablkcipher_giv_edesc_alloc(creq
, DESC_JOB_IO_LEN
*
3347 CAAM_CMD_SZ
, &iv_contig
);
3349 return PTR_ERR(edesc
);
3351 /* Create and submit job descriptor*/
3352 init_ablkcipher_giv_job(ctx
->sh_desc_givenc
, ctx
->sh_desc_givenc_dma
,
3353 edesc
, req
, iv_contig
);
3355 print_hex_dump(KERN_ERR
,
3356 "ablkcipher jobdesc@" __stringify(__LINE__
) ": ",
3357 DUMP_PREFIX_ADDRESS
, 16, 4, edesc
->hw_desc
,
3358 desc_bytes(edesc
->hw_desc
), 1);
3360 desc
= edesc
->hw_desc
;
3361 ret
= caam_jr_enqueue(jrdev
, desc
, ablkcipher_encrypt_done
, req
);
3366 ablkcipher_unmap(jrdev
, edesc
, req
);
3373 #define template_aead template_u.aead
3374 #define template_ablkcipher template_u.ablkcipher
3375 struct caam_alg_template
{
3376 char name
[CRYPTO_MAX_ALG_NAME
];
3377 char driver_name
[CRYPTO_MAX_ALG_NAME
];
3378 unsigned int blocksize
;
3381 struct ablkcipher_alg ablkcipher
;
3382 struct aead_alg aead
;
3383 struct blkcipher_alg blkcipher
;
3384 struct cipher_alg cipher
;
3385 struct compress_alg compress
;
3388 u32 class1_alg_type
;
3389 u32 class2_alg_type
;
3393 static struct caam_alg_template driver_algs
[] = {
3394 /* single-pass ipsec_esp descriptor */
3396 .name
= "authenc(hmac(md5),ecb(cipher_null))",
3397 .driver_name
= "authenc-hmac-md5-ecb-cipher_null-caam",
3398 .blocksize
= NULL_BLOCK_SIZE
,
3399 .type
= CRYPTO_ALG_TYPE_AEAD
,
3401 .setkey
= aead_setkey
,
3402 .setauthsize
= aead_setauthsize
,
3403 .encrypt
= aead_encrypt
,
3404 .decrypt
= aead_decrypt
,
3405 .givencrypt
= aead_null_givencrypt
,
3406 .geniv
= "<built-in>",
3407 .ivsize
= NULL_IV_SIZE
,
3408 .maxauthsize
= MD5_DIGEST_SIZE
,
3410 .class1_alg_type
= 0,
3411 .class2_alg_type
= OP_ALG_ALGSEL_MD5
| OP_ALG_AAI_HMAC_PRECOMP
,
3412 .alg_op
= OP_ALG_ALGSEL_MD5
| OP_ALG_AAI_HMAC
,
3415 .name
= "authenc(hmac(sha1),ecb(cipher_null))",
3416 .driver_name
= "authenc-hmac-sha1-ecb-cipher_null-caam",
3417 .blocksize
= NULL_BLOCK_SIZE
,
3418 .type
= CRYPTO_ALG_TYPE_AEAD
,
3420 .setkey
= aead_setkey
,
3421 .setauthsize
= aead_setauthsize
,
3422 .encrypt
= aead_encrypt
,
3423 .decrypt
= aead_decrypt
,
3424 .givencrypt
= aead_null_givencrypt
,
3425 .geniv
= "<built-in>",
3426 .ivsize
= NULL_IV_SIZE
,
3427 .maxauthsize
= SHA1_DIGEST_SIZE
,
3429 .class1_alg_type
= 0,
3430 .class2_alg_type
= OP_ALG_ALGSEL_SHA1
| OP_ALG_AAI_HMAC_PRECOMP
,
3431 .alg_op
= OP_ALG_ALGSEL_SHA1
| OP_ALG_AAI_HMAC
,
3434 .name
= "authenc(hmac(sha224),ecb(cipher_null))",
3435 .driver_name
= "authenc-hmac-sha224-ecb-cipher_null-caam",
3436 .blocksize
= NULL_BLOCK_SIZE
,
3437 .type
= CRYPTO_ALG_TYPE_AEAD
,
3439 .setkey
= aead_setkey
,
3440 .setauthsize
= aead_setauthsize
,
3441 .encrypt
= aead_encrypt
,
3442 .decrypt
= aead_decrypt
,
3443 .givencrypt
= aead_null_givencrypt
,
3444 .geniv
= "<built-in>",
3445 .ivsize
= NULL_IV_SIZE
,
3446 .maxauthsize
= SHA224_DIGEST_SIZE
,
3448 .class1_alg_type
= 0,
3449 .class2_alg_type
= OP_ALG_ALGSEL_SHA224
|
3450 OP_ALG_AAI_HMAC_PRECOMP
,
3451 .alg_op
= OP_ALG_ALGSEL_SHA224
| OP_ALG_AAI_HMAC
,
3454 .name
= "authenc(hmac(sha256),ecb(cipher_null))",
3455 .driver_name
= "authenc-hmac-sha256-ecb-cipher_null-caam",
3456 .blocksize
= NULL_BLOCK_SIZE
,
3457 .type
= CRYPTO_ALG_TYPE_AEAD
,
3459 .setkey
= aead_setkey
,
3460 .setauthsize
= aead_setauthsize
,
3461 .encrypt
= aead_encrypt
,
3462 .decrypt
= aead_decrypt
,
3463 .givencrypt
= aead_null_givencrypt
,
3464 .geniv
= "<built-in>",
3465 .ivsize
= NULL_IV_SIZE
,
3466 .maxauthsize
= SHA256_DIGEST_SIZE
,
3468 .class1_alg_type
= 0,
3469 .class2_alg_type
= OP_ALG_ALGSEL_SHA256
|
3470 OP_ALG_AAI_HMAC_PRECOMP
,
3471 .alg_op
= OP_ALG_ALGSEL_SHA256
| OP_ALG_AAI_HMAC
,
3474 .name
= "authenc(hmac(sha384),ecb(cipher_null))",
3475 .driver_name
= "authenc-hmac-sha384-ecb-cipher_null-caam",
3476 .blocksize
= NULL_BLOCK_SIZE
,
3477 .type
= CRYPTO_ALG_TYPE_AEAD
,
3479 .setkey
= aead_setkey
,
3480 .setauthsize
= aead_setauthsize
,
3481 .encrypt
= aead_encrypt
,
3482 .decrypt
= aead_decrypt
,
3483 .givencrypt
= aead_null_givencrypt
,
3484 .geniv
= "<built-in>",
3485 .ivsize
= NULL_IV_SIZE
,
3486 .maxauthsize
= SHA384_DIGEST_SIZE
,
3488 .class1_alg_type
= 0,
3489 .class2_alg_type
= OP_ALG_ALGSEL_SHA384
|
3490 OP_ALG_AAI_HMAC_PRECOMP
,
3491 .alg_op
= OP_ALG_ALGSEL_SHA384
| OP_ALG_AAI_HMAC
,
3494 .name
= "authenc(hmac(sha512),ecb(cipher_null))",
3495 .driver_name
= "authenc-hmac-sha512-ecb-cipher_null-caam",
3496 .blocksize
= NULL_BLOCK_SIZE
,
3497 .type
= CRYPTO_ALG_TYPE_AEAD
,
3499 .setkey
= aead_setkey
,
3500 .setauthsize
= aead_setauthsize
,
3501 .encrypt
= aead_encrypt
,
3502 .decrypt
= aead_decrypt
,
3503 .givencrypt
= aead_null_givencrypt
,
3504 .geniv
= "<built-in>",
3505 .ivsize
= NULL_IV_SIZE
,
3506 .maxauthsize
= SHA512_DIGEST_SIZE
,
3508 .class1_alg_type
= 0,
3509 .class2_alg_type
= OP_ALG_ALGSEL_SHA512
|
3510 OP_ALG_AAI_HMAC_PRECOMP
,
3511 .alg_op
= OP_ALG_ALGSEL_SHA512
| OP_ALG_AAI_HMAC
,
3514 .name
= "authenc(hmac(md5),cbc(aes))",
3515 .driver_name
= "authenc-hmac-md5-cbc-aes-caam",
3516 .blocksize
= AES_BLOCK_SIZE
,
3517 .type
= CRYPTO_ALG_TYPE_AEAD
,
3519 .setkey
= aead_setkey
,
3520 .setauthsize
= aead_setauthsize
,
3521 .encrypt
= aead_encrypt
,
3522 .decrypt
= aead_decrypt
,
3523 .givencrypt
= aead_givencrypt
,
3524 .geniv
= "<built-in>",
3525 .ivsize
= AES_BLOCK_SIZE
,
3526 .maxauthsize
= MD5_DIGEST_SIZE
,
3528 .class1_alg_type
= OP_ALG_ALGSEL_AES
| OP_ALG_AAI_CBC
,
3529 .class2_alg_type
= OP_ALG_ALGSEL_MD5
| OP_ALG_AAI_HMAC_PRECOMP
,
3530 .alg_op
= OP_ALG_ALGSEL_MD5
| OP_ALG_AAI_HMAC
,
3533 .name
= "authenc(hmac(sha1),cbc(aes))",
3534 .driver_name
= "authenc-hmac-sha1-cbc-aes-caam",
3535 .blocksize
= AES_BLOCK_SIZE
,
3536 .type
= CRYPTO_ALG_TYPE_AEAD
,
3538 .setkey
= aead_setkey
,
3539 .setauthsize
= aead_setauthsize
,
3540 .encrypt
= aead_encrypt
,
3541 .decrypt
= aead_decrypt
,
3542 .givencrypt
= aead_givencrypt
,
3543 .geniv
= "<built-in>",
3544 .ivsize
= AES_BLOCK_SIZE
,
3545 .maxauthsize
= SHA1_DIGEST_SIZE
,
3547 .class1_alg_type
= OP_ALG_ALGSEL_AES
| OP_ALG_AAI_CBC
,
3548 .class2_alg_type
= OP_ALG_ALGSEL_SHA1
| OP_ALG_AAI_HMAC_PRECOMP
,
3549 .alg_op
= OP_ALG_ALGSEL_SHA1
| OP_ALG_AAI_HMAC
,
3552 .name
= "authenc(hmac(sha224),cbc(aes))",
3553 .driver_name
= "authenc-hmac-sha224-cbc-aes-caam",
3554 .blocksize
= AES_BLOCK_SIZE
,
3555 .type
= CRYPTO_ALG_TYPE_AEAD
,
3557 .setkey
= aead_setkey
,
3558 .setauthsize
= aead_setauthsize
,
3559 .encrypt
= aead_encrypt
,
3560 .decrypt
= aead_decrypt
,
3561 .givencrypt
= aead_givencrypt
,
3562 .geniv
= "<built-in>",
3563 .ivsize
= AES_BLOCK_SIZE
,
3564 .maxauthsize
= SHA224_DIGEST_SIZE
,
3566 .class1_alg_type
= OP_ALG_ALGSEL_AES
| OP_ALG_AAI_CBC
,
3567 .class2_alg_type
= OP_ALG_ALGSEL_SHA224
|
3568 OP_ALG_AAI_HMAC_PRECOMP
,
3569 .alg_op
= OP_ALG_ALGSEL_SHA224
| OP_ALG_AAI_HMAC
,
3572 .name
= "authenc(hmac(sha256),cbc(aes))",
3573 .driver_name
= "authenc-hmac-sha256-cbc-aes-caam",
3574 .blocksize
= AES_BLOCK_SIZE
,
3575 .type
= CRYPTO_ALG_TYPE_AEAD
,
3577 .setkey
= aead_setkey
,
3578 .setauthsize
= aead_setauthsize
,
3579 .encrypt
= aead_encrypt
,
3580 .decrypt
= aead_decrypt
,
3581 .givencrypt
= aead_givencrypt
,
3582 .geniv
= "<built-in>",
3583 .ivsize
= AES_BLOCK_SIZE
,
3584 .maxauthsize
= SHA256_DIGEST_SIZE
,
3586 .class1_alg_type
= OP_ALG_ALGSEL_AES
| OP_ALG_AAI_CBC
,
3587 .class2_alg_type
= OP_ALG_ALGSEL_SHA256
|
3588 OP_ALG_AAI_HMAC_PRECOMP
,
3589 .alg_op
= OP_ALG_ALGSEL_SHA256
| OP_ALG_AAI_HMAC
,
3592 .name
= "authenc(hmac(sha384),cbc(aes))",
3593 .driver_name
= "authenc-hmac-sha384-cbc-aes-caam",
3594 .blocksize
= AES_BLOCK_SIZE
,
3595 .type
= CRYPTO_ALG_TYPE_AEAD
,
3597 .setkey
= aead_setkey
,
3598 .setauthsize
= aead_setauthsize
,
3599 .encrypt
= aead_encrypt
,
3600 .decrypt
= aead_decrypt
,
3601 .givencrypt
= aead_givencrypt
,
3602 .geniv
= "<built-in>",
3603 .ivsize
= AES_BLOCK_SIZE
,
3604 .maxauthsize
= SHA384_DIGEST_SIZE
,
3606 .class1_alg_type
= OP_ALG_ALGSEL_AES
| OP_ALG_AAI_CBC
,
3607 .class2_alg_type
= OP_ALG_ALGSEL_SHA384
|
3608 OP_ALG_AAI_HMAC_PRECOMP
,
3609 .alg_op
= OP_ALG_ALGSEL_SHA384
| OP_ALG_AAI_HMAC
,
3613 .name
= "authenc(hmac(sha512),cbc(aes))",
3614 .driver_name
= "authenc-hmac-sha512-cbc-aes-caam",
3615 .blocksize
= AES_BLOCK_SIZE
,
3616 .type
= CRYPTO_ALG_TYPE_AEAD
,
3618 .setkey
= aead_setkey
,
3619 .setauthsize
= aead_setauthsize
,
3620 .encrypt
= aead_encrypt
,
3621 .decrypt
= aead_decrypt
,
3622 .givencrypt
= aead_givencrypt
,
3623 .geniv
= "<built-in>",
3624 .ivsize
= AES_BLOCK_SIZE
,
3625 .maxauthsize
= SHA512_DIGEST_SIZE
,
3627 .class1_alg_type
= OP_ALG_ALGSEL_AES
| OP_ALG_AAI_CBC
,
3628 .class2_alg_type
= OP_ALG_ALGSEL_SHA512
|
3629 OP_ALG_AAI_HMAC_PRECOMP
,
3630 .alg_op
= OP_ALG_ALGSEL_SHA512
| OP_ALG_AAI_HMAC
,
3633 .name
= "authenc(hmac(md5),cbc(des3_ede))",
3634 .driver_name
= "authenc-hmac-md5-cbc-des3_ede-caam",
3635 .blocksize
= DES3_EDE_BLOCK_SIZE
,
3636 .type
= CRYPTO_ALG_TYPE_AEAD
,
3638 .setkey
= aead_setkey
,
3639 .setauthsize
= aead_setauthsize
,
3640 .encrypt
= aead_encrypt
,
3641 .decrypt
= aead_decrypt
,
3642 .givencrypt
= aead_givencrypt
,
3643 .geniv
= "<built-in>",
3644 .ivsize
= DES3_EDE_BLOCK_SIZE
,
3645 .maxauthsize
= MD5_DIGEST_SIZE
,
3647 .class1_alg_type
= OP_ALG_ALGSEL_3DES
| OP_ALG_AAI_CBC
,
3648 .class2_alg_type
= OP_ALG_ALGSEL_MD5
| OP_ALG_AAI_HMAC_PRECOMP
,
3649 .alg_op
= OP_ALG_ALGSEL_MD5
| OP_ALG_AAI_HMAC
,
3652 .name
= "authenc(hmac(sha1),cbc(des3_ede))",
3653 .driver_name
= "authenc-hmac-sha1-cbc-des3_ede-caam",
3654 .blocksize
= DES3_EDE_BLOCK_SIZE
,
3655 .type
= CRYPTO_ALG_TYPE_AEAD
,
3657 .setkey
= aead_setkey
,
3658 .setauthsize
= aead_setauthsize
,
3659 .encrypt
= aead_encrypt
,
3660 .decrypt
= aead_decrypt
,
3661 .givencrypt
= aead_givencrypt
,
3662 .geniv
= "<built-in>",
3663 .ivsize
= DES3_EDE_BLOCK_SIZE
,
3664 .maxauthsize
= SHA1_DIGEST_SIZE
,
3666 .class1_alg_type
= OP_ALG_ALGSEL_3DES
| OP_ALG_AAI_CBC
,
3667 .class2_alg_type
= OP_ALG_ALGSEL_SHA1
| OP_ALG_AAI_HMAC_PRECOMP
,
3668 .alg_op
= OP_ALG_ALGSEL_SHA1
| OP_ALG_AAI_HMAC
,
3671 .name
= "authenc(hmac(sha224),cbc(des3_ede))",
3672 .driver_name
= "authenc-hmac-sha224-cbc-des3_ede-caam",
3673 .blocksize
= DES3_EDE_BLOCK_SIZE
,
3674 .type
= CRYPTO_ALG_TYPE_AEAD
,
3676 .setkey
= aead_setkey
,
3677 .setauthsize
= aead_setauthsize
,
3678 .encrypt
= aead_encrypt
,
3679 .decrypt
= aead_decrypt
,
3680 .givencrypt
= aead_givencrypt
,
3681 .geniv
= "<built-in>",
3682 .ivsize
= DES3_EDE_BLOCK_SIZE
,
3683 .maxauthsize
= SHA224_DIGEST_SIZE
,
3685 .class1_alg_type
= OP_ALG_ALGSEL_3DES
| OP_ALG_AAI_CBC
,
3686 .class2_alg_type
= OP_ALG_ALGSEL_SHA224
|
3687 OP_ALG_AAI_HMAC_PRECOMP
,
3688 .alg_op
= OP_ALG_ALGSEL_SHA224
| OP_ALG_AAI_HMAC
,
3691 .name
= "authenc(hmac(sha256),cbc(des3_ede))",
3692 .driver_name
= "authenc-hmac-sha256-cbc-des3_ede-caam",
3693 .blocksize
= DES3_EDE_BLOCK_SIZE
,
3694 .type
= CRYPTO_ALG_TYPE_AEAD
,
3696 .setkey
= aead_setkey
,
3697 .setauthsize
= aead_setauthsize
,
3698 .encrypt
= aead_encrypt
,
3699 .decrypt
= aead_decrypt
,
3700 .givencrypt
= aead_givencrypt
,
3701 .geniv
= "<built-in>",
3702 .ivsize
= DES3_EDE_BLOCK_SIZE
,
3703 .maxauthsize
= SHA256_DIGEST_SIZE
,
3705 .class1_alg_type
= OP_ALG_ALGSEL_3DES
| OP_ALG_AAI_CBC
,
3706 .class2_alg_type
= OP_ALG_ALGSEL_SHA256
|
3707 OP_ALG_AAI_HMAC_PRECOMP
,
3708 .alg_op
= OP_ALG_ALGSEL_SHA256
| OP_ALG_AAI_HMAC
,
3711 .name
= "authenc(hmac(sha384),cbc(des3_ede))",
3712 .driver_name
= "authenc-hmac-sha384-cbc-des3_ede-caam",
3713 .blocksize
= DES3_EDE_BLOCK_SIZE
,
3714 .type
= CRYPTO_ALG_TYPE_AEAD
,
3716 .setkey
= aead_setkey
,
3717 .setauthsize
= aead_setauthsize
,
3718 .encrypt
= aead_encrypt
,
3719 .decrypt
= aead_decrypt
,
3720 .givencrypt
= aead_givencrypt
,
3721 .geniv
= "<built-in>",
3722 .ivsize
= DES3_EDE_BLOCK_SIZE
,
3723 .maxauthsize
= SHA384_DIGEST_SIZE
,
3725 .class1_alg_type
= OP_ALG_ALGSEL_3DES
| OP_ALG_AAI_CBC
,
3726 .class2_alg_type
= OP_ALG_ALGSEL_SHA384
|
3727 OP_ALG_AAI_HMAC_PRECOMP
,
3728 .alg_op
= OP_ALG_ALGSEL_SHA384
| OP_ALG_AAI_HMAC
,
3731 .name
= "authenc(hmac(sha512),cbc(des3_ede))",
3732 .driver_name
= "authenc-hmac-sha512-cbc-des3_ede-caam",
3733 .blocksize
= DES3_EDE_BLOCK_SIZE
,
3734 .type
= CRYPTO_ALG_TYPE_AEAD
,
3736 .setkey
= aead_setkey
,
3737 .setauthsize
= aead_setauthsize
,
3738 .encrypt
= aead_encrypt
,
3739 .decrypt
= aead_decrypt
,
3740 .givencrypt
= aead_givencrypt
,
3741 .geniv
= "<built-in>",
3742 .ivsize
= DES3_EDE_BLOCK_SIZE
,
3743 .maxauthsize
= SHA512_DIGEST_SIZE
,
3745 .class1_alg_type
= OP_ALG_ALGSEL_3DES
| OP_ALG_AAI_CBC
,
3746 .class2_alg_type
= OP_ALG_ALGSEL_SHA512
|
3747 OP_ALG_AAI_HMAC_PRECOMP
,
3748 .alg_op
= OP_ALG_ALGSEL_SHA512
| OP_ALG_AAI_HMAC
,
3751 .name
= "authenc(hmac(md5),cbc(des))",
3752 .driver_name
= "authenc-hmac-md5-cbc-des-caam",
3753 .blocksize
= DES_BLOCK_SIZE
,
3754 .type
= CRYPTO_ALG_TYPE_AEAD
,
3756 .setkey
= aead_setkey
,
3757 .setauthsize
= aead_setauthsize
,
3758 .encrypt
= aead_encrypt
,
3759 .decrypt
= aead_decrypt
,
3760 .givencrypt
= aead_givencrypt
,
3761 .geniv
= "<built-in>",
3762 .ivsize
= DES_BLOCK_SIZE
,
3763 .maxauthsize
= MD5_DIGEST_SIZE
,
3765 .class1_alg_type
= OP_ALG_ALGSEL_DES
| OP_ALG_AAI_CBC
,
3766 .class2_alg_type
= OP_ALG_ALGSEL_MD5
| OP_ALG_AAI_HMAC_PRECOMP
,
3767 .alg_op
= OP_ALG_ALGSEL_MD5
| OP_ALG_AAI_HMAC
,
3770 .name
= "authenc(hmac(sha1),cbc(des))",
3771 .driver_name
= "authenc-hmac-sha1-cbc-des-caam",
3772 .blocksize
= DES_BLOCK_SIZE
,
3773 .type
= CRYPTO_ALG_TYPE_AEAD
,
3775 .setkey
= aead_setkey
,
3776 .setauthsize
= aead_setauthsize
,
3777 .encrypt
= aead_encrypt
,
3778 .decrypt
= aead_decrypt
,
3779 .givencrypt
= aead_givencrypt
,
3780 .geniv
= "<built-in>",
3781 .ivsize
= DES_BLOCK_SIZE
,
3782 .maxauthsize
= SHA1_DIGEST_SIZE
,
3784 .class1_alg_type
= OP_ALG_ALGSEL_DES
| OP_ALG_AAI_CBC
,
3785 .class2_alg_type
= OP_ALG_ALGSEL_SHA1
| OP_ALG_AAI_HMAC_PRECOMP
,
3786 .alg_op
= OP_ALG_ALGSEL_SHA1
| OP_ALG_AAI_HMAC
,
3789 .name
= "authenc(hmac(sha224),cbc(des))",
3790 .driver_name
= "authenc-hmac-sha224-cbc-des-caam",
3791 .blocksize
= DES_BLOCK_SIZE
,
3792 .type
= CRYPTO_ALG_TYPE_AEAD
,
3794 .setkey
= aead_setkey
,
3795 .setauthsize
= aead_setauthsize
,
3796 .encrypt
= aead_encrypt
,
3797 .decrypt
= aead_decrypt
,
3798 .givencrypt
= aead_givencrypt
,
3799 .geniv
= "<built-in>",
3800 .ivsize
= DES_BLOCK_SIZE
,
3801 .maxauthsize
= SHA224_DIGEST_SIZE
,
3803 .class1_alg_type
= OP_ALG_ALGSEL_DES
| OP_ALG_AAI_CBC
,
3804 .class2_alg_type
= OP_ALG_ALGSEL_SHA224
|
3805 OP_ALG_AAI_HMAC_PRECOMP
,
3806 .alg_op
= OP_ALG_ALGSEL_SHA224
| OP_ALG_AAI_HMAC
,
3809 .name
= "authenc(hmac(sha256),cbc(des))",
3810 .driver_name
= "authenc-hmac-sha256-cbc-des-caam",
3811 .blocksize
= DES_BLOCK_SIZE
,
3812 .type
= CRYPTO_ALG_TYPE_AEAD
,
3814 .setkey
= aead_setkey
,
3815 .setauthsize
= aead_setauthsize
,
3816 .encrypt
= aead_encrypt
,
3817 .decrypt
= aead_decrypt
,
3818 .givencrypt
= aead_givencrypt
,
3819 .geniv
= "<built-in>",
3820 .ivsize
= DES_BLOCK_SIZE
,
3821 .maxauthsize
= SHA256_DIGEST_SIZE
,
3823 .class1_alg_type
= OP_ALG_ALGSEL_DES
| OP_ALG_AAI_CBC
,
3824 .class2_alg_type
= OP_ALG_ALGSEL_SHA256
|
3825 OP_ALG_AAI_HMAC_PRECOMP
,
3826 .alg_op
= OP_ALG_ALGSEL_SHA256
| OP_ALG_AAI_HMAC
,
3829 .name
= "authenc(hmac(sha384),cbc(des))",
3830 .driver_name
= "authenc-hmac-sha384-cbc-des-caam",
3831 .blocksize
= DES_BLOCK_SIZE
,
3832 .type
= CRYPTO_ALG_TYPE_AEAD
,
3834 .setkey
= aead_setkey
,
3835 .setauthsize
= aead_setauthsize
,
3836 .encrypt
= aead_encrypt
,
3837 .decrypt
= aead_decrypt
,
3838 .givencrypt
= aead_givencrypt
,
3839 .geniv
= "<built-in>",
3840 .ivsize
= DES_BLOCK_SIZE
,
3841 .maxauthsize
= SHA384_DIGEST_SIZE
,
3843 .class1_alg_type
= OP_ALG_ALGSEL_DES
| OP_ALG_AAI_CBC
,
3844 .class2_alg_type
= OP_ALG_ALGSEL_SHA384
|
3845 OP_ALG_AAI_HMAC_PRECOMP
,
3846 .alg_op
= OP_ALG_ALGSEL_SHA384
| OP_ALG_AAI_HMAC
,
3849 .name
= "authenc(hmac(sha512),cbc(des))",
3850 .driver_name
= "authenc-hmac-sha512-cbc-des-caam",
3851 .blocksize
= DES_BLOCK_SIZE
,
3852 .type
= CRYPTO_ALG_TYPE_AEAD
,
3854 .setkey
= aead_setkey
,
3855 .setauthsize
= aead_setauthsize
,
3856 .encrypt
= aead_encrypt
,
3857 .decrypt
= aead_decrypt
,
3858 .givencrypt
= aead_givencrypt
,
3859 .geniv
= "<built-in>",
3860 .ivsize
= DES_BLOCK_SIZE
,
3861 .maxauthsize
= SHA512_DIGEST_SIZE
,
3863 .class1_alg_type
= OP_ALG_ALGSEL_DES
| OP_ALG_AAI_CBC
,
3864 .class2_alg_type
= OP_ALG_ALGSEL_SHA512
|
3865 OP_ALG_AAI_HMAC_PRECOMP
,
3866 .alg_op
= OP_ALG_ALGSEL_SHA512
| OP_ALG_AAI_HMAC
,
3869 .name
= "authenc(hmac(md5),rfc3686(ctr(aes)))",
3870 .driver_name
= "authenc-hmac-md5-rfc3686-ctr-aes-caam",
3872 .type
= CRYPTO_ALG_TYPE_AEAD
,
3874 .setkey
= aead_setkey
,
3875 .setauthsize
= aead_setauthsize
,
3876 .encrypt
= aead_encrypt
,
3877 .decrypt
= aead_decrypt
,
3878 .givencrypt
= aead_givencrypt
,
3879 .geniv
= "<built-in>",
3880 .ivsize
= CTR_RFC3686_IV_SIZE
,
3881 .maxauthsize
= MD5_DIGEST_SIZE
,
3883 .class1_alg_type
= OP_ALG_ALGSEL_AES
| OP_ALG_AAI_CTR_MOD128
,
3884 .class2_alg_type
= OP_ALG_ALGSEL_MD5
| OP_ALG_AAI_HMAC_PRECOMP
,
3885 .alg_op
= OP_ALG_ALGSEL_MD5
| OP_ALG_AAI_HMAC
,
3888 .name
= "authenc(hmac(sha1),rfc3686(ctr(aes)))",
3889 .driver_name
= "authenc-hmac-sha1-rfc3686-ctr-aes-caam",
3891 .type
= CRYPTO_ALG_TYPE_AEAD
,
3893 .setkey
= aead_setkey
,
3894 .setauthsize
= aead_setauthsize
,
3895 .encrypt
= aead_encrypt
,
3896 .decrypt
= aead_decrypt
,
3897 .givencrypt
= aead_givencrypt
,
3898 .geniv
= "<built-in>",
3899 .ivsize
= CTR_RFC3686_IV_SIZE
,
3900 .maxauthsize
= SHA1_DIGEST_SIZE
,
3902 .class1_alg_type
= OP_ALG_ALGSEL_AES
| OP_ALG_AAI_CTR_MOD128
,
3903 .class2_alg_type
= OP_ALG_ALGSEL_SHA1
| OP_ALG_AAI_HMAC_PRECOMP
,
3904 .alg_op
= OP_ALG_ALGSEL_SHA1
| OP_ALG_AAI_HMAC
,
3907 .name
= "authenc(hmac(sha224),rfc3686(ctr(aes)))",
3908 .driver_name
= "authenc-hmac-sha224-rfc3686-ctr-aes-caam",
3910 .type
= CRYPTO_ALG_TYPE_AEAD
,
3912 .setkey
= aead_setkey
,
3913 .setauthsize
= aead_setauthsize
,
3914 .encrypt
= aead_encrypt
,
3915 .decrypt
= aead_decrypt
,
3916 .givencrypt
= aead_givencrypt
,
3917 .geniv
= "<built-in>",
3918 .ivsize
= CTR_RFC3686_IV_SIZE
,
3919 .maxauthsize
= SHA224_DIGEST_SIZE
,
3921 .class1_alg_type
= OP_ALG_ALGSEL_AES
| OP_ALG_AAI_CTR_MOD128
,
3922 .class2_alg_type
= OP_ALG_ALGSEL_SHA224
|
3923 OP_ALG_AAI_HMAC_PRECOMP
,
3924 .alg_op
= OP_ALG_ALGSEL_SHA224
| OP_ALG_AAI_HMAC
,
3927 .name
= "authenc(hmac(sha256),rfc3686(ctr(aes)))",
3928 .driver_name
= "authenc-hmac-sha256-rfc3686-ctr-aes-caam",
3930 .type
= CRYPTO_ALG_TYPE_AEAD
,
3932 .setkey
= aead_setkey
,
3933 .setauthsize
= aead_setauthsize
,
3934 .encrypt
= aead_encrypt
,
3935 .decrypt
= aead_decrypt
,
3936 .givencrypt
= aead_givencrypt
,
3937 .geniv
= "<built-in>",
3938 .ivsize
= CTR_RFC3686_IV_SIZE
,
3939 .maxauthsize
= SHA256_DIGEST_SIZE
,
3941 .class1_alg_type
= OP_ALG_ALGSEL_AES
| OP_ALG_AAI_CTR_MOD128
,
3942 .class2_alg_type
= OP_ALG_ALGSEL_SHA256
|
3943 OP_ALG_AAI_HMAC_PRECOMP
,
3944 .alg_op
= OP_ALG_ALGSEL_SHA256
| OP_ALG_AAI_HMAC
,
3947 .name
= "authenc(hmac(sha384),rfc3686(ctr(aes)))",
3948 .driver_name
= "authenc-hmac-sha384-rfc3686-ctr-aes-caam",
3950 .type
= CRYPTO_ALG_TYPE_AEAD
,
3952 .setkey
= aead_setkey
,
3953 .setauthsize
= aead_setauthsize
,
3954 .encrypt
= aead_encrypt
,
3955 .decrypt
= aead_decrypt
,
3956 .givencrypt
= aead_givencrypt
,
3957 .geniv
= "<built-in>",
3958 .ivsize
= CTR_RFC3686_IV_SIZE
,
3959 .maxauthsize
= SHA384_DIGEST_SIZE
,
3961 .class1_alg_type
= OP_ALG_ALGSEL_AES
| OP_ALG_AAI_CTR_MOD128
,
3962 .class2_alg_type
= OP_ALG_ALGSEL_SHA384
|
3963 OP_ALG_AAI_HMAC_PRECOMP
,
3964 .alg_op
= OP_ALG_ALGSEL_SHA384
| OP_ALG_AAI_HMAC
,
3967 .name
= "authenc(hmac(sha512),rfc3686(ctr(aes)))",
3968 .driver_name
= "authenc-hmac-sha512-rfc3686-ctr-aes-caam",
3970 .type
= CRYPTO_ALG_TYPE_AEAD
,
3972 .setkey
= aead_setkey
,
3973 .setauthsize
= aead_setauthsize
,
3974 .encrypt
= aead_encrypt
,
3975 .decrypt
= aead_decrypt
,
3976 .givencrypt
= aead_givencrypt
,
3977 .geniv
= "<built-in>",
3978 .ivsize
= CTR_RFC3686_IV_SIZE
,
3979 .maxauthsize
= SHA512_DIGEST_SIZE
,
3981 .class1_alg_type
= OP_ALG_ALGSEL_AES
| OP_ALG_AAI_CTR_MOD128
,
3982 .class2_alg_type
= OP_ALG_ALGSEL_SHA512
|
3983 OP_ALG_AAI_HMAC_PRECOMP
,
3984 .alg_op
= OP_ALG_ALGSEL_SHA512
| OP_ALG_AAI_HMAC
,
3987 .name
= "rfc4106(gcm(aes))",
3988 .driver_name
= "rfc4106-gcm-aes-caam",
3990 .type
= CRYPTO_ALG_TYPE_AEAD
,
3992 .setkey
= rfc4106_setkey
,
3993 .setauthsize
= rfc4106_setauthsize
,
3994 .encrypt
= aead_encrypt
,
3995 .decrypt
= aead_decrypt
,
3996 .givencrypt
= aead_givencrypt
,
3997 .geniv
= "<built-in>",
3999 .maxauthsize
= AES_BLOCK_SIZE
,
4001 .class1_alg_type
= OP_ALG_ALGSEL_AES
| OP_ALG_AAI_GCM
,
4004 .name
= "rfc4543(gcm(aes))",
4005 .driver_name
= "rfc4543-gcm-aes-caam",
4007 .type
= CRYPTO_ALG_TYPE_AEAD
,
4009 .setkey
= rfc4543_setkey
,
4010 .setauthsize
= rfc4543_setauthsize
,
4011 .encrypt
= aead_encrypt
,
4012 .decrypt
= aead_decrypt
,
4013 .givencrypt
= aead_givencrypt
,
4014 .geniv
= "<built-in>",
4016 .maxauthsize
= AES_BLOCK_SIZE
,
4018 .class1_alg_type
= OP_ALG_ALGSEL_AES
| OP_ALG_AAI_GCM
,
4020 /* Galois Counter Mode */
4023 .driver_name
= "gcm-aes-caam",
4025 .type
= CRYPTO_ALG_TYPE_AEAD
,
4027 .setkey
= gcm_setkey
,
4028 .setauthsize
= gcm_setauthsize
,
4029 .encrypt
= aead_encrypt
,
4030 .decrypt
= aead_decrypt
,
4032 .geniv
= "<built-in>",
4034 .maxauthsize
= AES_BLOCK_SIZE
,
4036 .class1_alg_type
= OP_ALG_ALGSEL_AES
| OP_ALG_AAI_GCM
,
4038 /* ablkcipher descriptor */
4041 .driver_name
= "cbc-aes-caam",
4042 .blocksize
= AES_BLOCK_SIZE
,
4043 .type
= CRYPTO_ALG_TYPE_GIVCIPHER
,
4044 .template_ablkcipher
= {
4045 .setkey
= ablkcipher_setkey
,
4046 .encrypt
= ablkcipher_encrypt
,
4047 .decrypt
= ablkcipher_decrypt
,
4048 .givencrypt
= ablkcipher_givencrypt
,
4049 .geniv
= "<built-in>",
4050 .min_keysize
= AES_MIN_KEY_SIZE
,
4051 .max_keysize
= AES_MAX_KEY_SIZE
,
4052 .ivsize
= AES_BLOCK_SIZE
,
4054 .class1_alg_type
= OP_ALG_ALGSEL_AES
| OP_ALG_AAI_CBC
,
4057 .name
= "cbc(des3_ede)",
4058 .driver_name
= "cbc-3des-caam",
4059 .blocksize
= DES3_EDE_BLOCK_SIZE
,
4060 .type
= CRYPTO_ALG_TYPE_GIVCIPHER
,
4061 .template_ablkcipher
= {
4062 .setkey
= ablkcipher_setkey
,
4063 .encrypt
= ablkcipher_encrypt
,
4064 .decrypt
= ablkcipher_decrypt
,
4065 .givencrypt
= ablkcipher_givencrypt
,
4066 .geniv
= "<built-in>",
4067 .min_keysize
= DES3_EDE_KEY_SIZE
,
4068 .max_keysize
= DES3_EDE_KEY_SIZE
,
4069 .ivsize
= DES3_EDE_BLOCK_SIZE
,
4071 .class1_alg_type
= OP_ALG_ALGSEL_3DES
| OP_ALG_AAI_CBC
,
4075 .driver_name
= "cbc-des-caam",
4076 .blocksize
= DES_BLOCK_SIZE
,
4077 .type
= CRYPTO_ALG_TYPE_GIVCIPHER
,
4078 .template_ablkcipher
= {
4079 .setkey
= ablkcipher_setkey
,
4080 .encrypt
= ablkcipher_encrypt
,
4081 .decrypt
= ablkcipher_decrypt
,
4082 .givencrypt
= ablkcipher_givencrypt
,
4083 .geniv
= "<built-in>",
4084 .min_keysize
= DES_KEY_SIZE
,
4085 .max_keysize
= DES_KEY_SIZE
,
4086 .ivsize
= DES_BLOCK_SIZE
,
4088 .class1_alg_type
= OP_ALG_ALGSEL_DES
| OP_ALG_AAI_CBC
,
4092 .driver_name
= "ctr-aes-caam",
4094 .type
= CRYPTO_ALG_TYPE_ABLKCIPHER
,
4095 .template_ablkcipher
= {
4096 .setkey
= ablkcipher_setkey
,
4097 .encrypt
= ablkcipher_encrypt
,
4098 .decrypt
= ablkcipher_decrypt
,
4100 .min_keysize
= AES_MIN_KEY_SIZE
,
4101 .max_keysize
= AES_MAX_KEY_SIZE
,
4102 .ivsize
= AES_BLOCK_SIZE
,
4104 .class1_alg_type
= OP_ALG_ALGSEL_AES
| OP_ALG_AAI_CTR_MOD128
,
4107 .name
= "rfc3686(ctr(aes))",
4108 .driver_name
= "rfc3686-ctr-aes-caam",
4110 .type
= CRYPTO_ALG_TYPE_GIVCIPHER
,
4111 .template_ablkcipher
= {
4112 .setkey
= ablkcipher_setkey
,
4113 .encrypt
= ablkcipher_encrypt
,
4114 .decrypt
= ablkcipher_decrypt
,
4115 .givencrypt
= ablkcipher_givencrypt
,
4116 .geniv
= "<built-in>",
4117 .min_keysize
= AES_MIN_KEY_SIZE
+
4118 CTR_RFC3686_NONCE_SIZE
,
4119 .max_keysize
= AES_MAX_KEY_SIZE
+
4120 CTR_RFC3686_NONCE_SIZE
,
4121 .ivsize
= CTR_RFC3686_IV_SIZE
,
4123 .class1_alg_type
= OP_ALG_ALGSEL_AES
| OP_ALG_AAI_CTR_MOD128
,
4127 struct caam_crypto_alg
{
4128 struct list_head entry
;
4129 int class1_alg_type
;
4130 int class2_alg_type
;
4132 struct crypto_alg crypto_alg
;
4135 static int caam_cra_init(struct crypto_tfm
*tfm
)
4137 struct crypto_alg
*alg
= tfm
->__crt_alg
;
4138 struct caam_crypto_alg
*caam_alg
=
4139 container_of(alg
, struct caam_crypto_alg
, crypto_alg
);
4140 struct caam_ctx
*ctx
= crypto_tfm_ctx(tfm
);
4142 ctx
->jrdev
= caam_jr_alloc();
4143 if (IS_ERR(ctx
->jrdev
)) {
4144 pr_err("Job Ring Device allocation for transform failed\n");
4145 return PTR_ERR(ctx
->jrdev
);
4148 /* copy descriptor header template value */
4149 ctx
->class1_alg_type
= OP_TYPE_CLASS1_ALG
| caam_alg
->class1_alg_type
;
4150 ctx
->class2_alg_type
= OP_TYPE_CLASS2_ALG
| caam_alg
->class2_alg_type
;
4151 ctx
->alg_op
= OP_TYPE_CLASS2_ALG
| caam_alg
->alg_op
;
4156 static void caam_cra_exit(struct crypto_tfm
*tfm
)
4158 struct caam_ctx
*ctx
= crypto_tfm_ctx(tfm
);
4160 if (ctx
->sh_desc_enc_dma
&&
4161 !dma_mapping_error(ctx
->jrdev
, ctx
->sh_desc_enc_dma
))
4162 dma_unmap_single(ctx
->jrdev
, ctx
->sh_desc_enc_dma
,
4163 desc_bytes(ctx
->sh_desc_enc
), DMA_TO_DEVICE
);
4164 if (ctx
->sh_desc_dec_dma
&&
4165 !dma_mapping_error(ctx
->jrdev
, ctx
->sh_desc_dec_dma
))
4166 dma_unmap_single(ctx
->jrdev
, ctx
->sh_desc_dec_dma
,
4167 desc_bytes(ctx
->sh_desc_dec
), DMA_TO_DEVICE
);
4168 if (ctx
->sh_desc_givenc_dma
&&
4169 !dma_mapping_error(ctx
->jrdev
, ctx
->sh_desc_givenc_dma
))
4170 dma_unmap_single(ctx
->jrdev
, ctx
->sh_desc_givenc_dma
,
4171 desc_bytes(ctx
->sh_desc_givenc
),
4174 !dma_mapping_error(ctx
->jrdev
, ctx
->key_dma
))
4175 dma_unmap_single(ctx
->jrdev
, ctx
->key_dma
,
4176 ctx
->enckeylen
+ ctx
->split_key_pad_len
,
4179 caam_jr_free(ctx
->jrdev
);
4182 static void __exit
caam_algapi_exit(void)
4185 struct caam_crypto_alg
*t_alg
, *n
;
4190 list_for_each_entry_safe(t_alg
, n
, &alg_list
, entry
) {
4191 crypto_unregister_alg(&t_alg
->crypto_alg
);
4192 list_del(&t_alg
->entry
);
4197 static struct caam_crypto_alg
*caam_alg_alloc(struct caam_alg_template
4200 struct caam_crypto_alg
*t_alg
;
4201 struct crypto_alg
*alg
;
4203 t_alg
= kzalloc(sizeof(struct caam_crypto_alg
), GFP_KERNEL
);
4205 pr_err("failed to allocate t_alg\n");
4206 return ERR_PTR(-ENOMEM
);
4209 alg
= &t_alg
->crypto_alg
;
4211 snprintf(alg
->cra_name
, CRYPTO_MAX_ALG_NAME
, "%s", template->name
);
4212 snprintf(alg
->cra_driver_name
, CRYPTO_MAX_ALG_NAME
, "%s",
4213 template->driver_name
);
4214 alg
->cra_module
= THIS_MODULE
;
4215 alg
->cra_init
= caam_cra_init
;
4216 alg
->cra_exit
= caam_cra_exit
;
4217 alg
->cra_priority
= CAAM_CRA_PRIORITY
;
4218 alg
->cra_blocksize
= template->blocksize
;
4219 alg
->cra_alignmask
= 0;
4220 alg
->cra_ctxsize
= sizeof(struct caam_ctx
);
4221 alg
->cra_flags
= CRYPTO_ALG_ASYNC
| CRYPTO_ALG_KERN_DRIVER_ONLY
|
4223 switch (template->type
) {
4224 case CRYPTO_ALG_TYPE_GIVCIPHER
:
4225 alg
->cra_type
= &crypto_givcipher_type
;
4226 alg
->cra_ablkcipher
= template->template_ablkcipher
;
4228 case CRYPTO_ALG_TYPE_ABLKCIPHER
:
4229 alg
->cra_type
= &crypto_ablkcipher_type
;
4230 alg
->cra_ablkcipher
= template->template_ablkcipher
;
4232 case CRYPTO_ALG_TYPE_AEAD
:
4233 alg
->cra_type
= &crypto_aead_type
;
4234 alg
->cra_aead
= template->template_aead
;
4238 t_alg
->class1_alg_type
= template->class1_alg_type
;
4239 t_alg
->class2_alg_type
= template->class2_alg_type
;
4240 t_alg
->alg_op
= template->alg_op
;
4245 static int __init
caam_algapi_init(void)
4247 struct device_node
*dev_node
;
4248 struct platform_device
*pdev
;
4249 struct device
*ctrldev
;
4253 dev_node
= of_find_compatible_node(NULL
, NULL
, "fsl,sec-v4.0");
4255 dev_node
= of_find_compatible_node(NULL
, NULL
, "fsl,sec4.0");
4260 pdev
= of_find_device_by_node(dev_node
);
4262 of_node_put(dev_node
);
4266 ctrldev
= &pdev
->dev
;
4267 priv
= dev_get_drvdata(ctrldev
);
4268 of_node_put(dev_node
);
4271 * If priv is NULL, it's probably because the caam driver wasn't
4272 * properly initialized (e.g. RNG4 init failed). Thus, bail out here.
4278 INIT_LIST_HEAD(&alg_list
);
4280 /* register crypto algorithms the device supports */
4281 for (i
= 0; i
< ARRAY_SIZE(driver_algs
); i
++) {
4282 /* TODO: check if h/w supports alg */
4283 struct caam_crypto_alg
*t_alg
;
4285 t_alg
= caam_alg_alloc(&driver_algs
[i
]);
4286 if (IS_ERR(t_alg
)) {
4287 err
= PTR_ERR(t_alg
);
4288 pr_warn("%s alg allocation failed\n",
4289 driver_algs
[i
].driver_name
);
4293 err
= crypto_register_alg(&t_alg
->crypto_alg
);
4295 pr_warn("%s alg registration failed\n",
4296 t_alg
->crypto_alg
.cra_driver_name
);
4299 list_add_tail(&t_alg
->entry
, &alg_list
);
4301 if (!list_empty(&alg_list
))
4302 pr_info("caam algorithms registered in /proc/crypto\n");
4307 module_init(caam_algapi_init
);
4308 module_exit(caam_algapi_exit
);
4310 MODULE_LICENSE("GPL");
4311 MODULE_DESCRIPTION("FSL CAAM support for crypto API");
4312 MODULE_AUTHOR("Freescale Semiconductor - NMG/STC");