2 This file is provided under a dual BSD/GPLv2 license. When using or
3 redistributing this file, you may do so under either license.
6 Copyright(c) 2014 Intel Corporation.
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of version 2 of the GNU General Public License as
9 published by the Free Software Foundation.
11 This program is distributed in the hope that it will be useful, but
12 WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 General Public License for more details.
20 Copyright(c) 2014 Intel Corporation.
21 Redistribution and use in source and binary forms, with or without
22 modification, are permitted provided that the following conditions
25 * Redistributions of source code must retain the above copyright
26 notice, this list of conditions and the following disclaimer.
27 * Redistributions in binary form must reproduce the above copyright
28 notice, this list of conditions and the following disclaimer in
29 the documentation and/or other materials provided with the
31 * Neither the name of Intel Corporation nor the names of its
32 contributors may be used to endorse or promote products derived
33 from this software without specific prior written permission.
35 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
36 "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
37 LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
38 A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
39 OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
40 SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
41 LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
42 DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
43 THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
44 (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
45 OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
47 #include <linux/module.h>
48 #include <linux/slab.h>
49 #include <linux/crypto.h>
50 #include <crypto/aead.h>
51 #include <crypto/aes.h>
52 #include <crypto/sha.h>
53 #include <crypto/hash.h>
54 #include <crypto/algapi.h>
55 #include <crypto/authenc.h>
56 #include <crypto/rng.h>
57 #include <linux/dma-mapping.h>
58 #include "adf_accel_devices.h"
59 #include "adf_transport.h"
60 #include "adf_common_drv.h"
61 #include "qat_crypto.h"
62 #include "icp_qat_hw.h"
63 #include "icp_qat_fw.h"
64 #include "icp_qat_fw_la.h"
66 #define QAT_AES_HW_CONFIG_CBC_ENC(alg) \
67 ICP_QAT_HW_CIPHER_CONFIG_BUILD(ICP_QAT_HW_CIPHER_CBC_MODE, alg, \
68 ICP_QAT_HW_CIPHER_NO_CONVERT, \
69 ICP_QAT_HW_CIPHER_ENCRYPT)
71 #define QAT_AES_HW_CONFIG_CBC_DEC(alg) \
72 ICP_QAT_HW_CIPHER_CONFIG_BUILD(ICP_QAT_HW_CIPHER_CBC_MODE, alg, \
73 ICP_QAT_HW_CIPHER_KEY_CONVERT, \
74 ICP_QAT_HW_CIPHER_DECRYPT)
76 static atomic_t active_dev
;
84 struct qat_alg_buf_list
{
87 uint32_t num_mapped_bufs
;
88 struct qat_alg_buf bufers
[];
89 } __packed
__aligned(64);
91 /* Common content descriptor */
94 struct qat_enc
{ /* Encrypt content desc */
95 struct icp_qat_hw_cipher_algo_blk cipher
;
96 struct icp_qat_hw_auth_algo_blk hash
;
98 struct qat_dec
{ /* Decrytp content desc */
99 struct icp_qat_hw_auth_algo_blk hash
;
100 struct icp_qat_hw_cipher_algo_blk cipher
;
105 #define MAX_AUTH_STATE_SIZE sizeof(struct icp_qat_hw_auth_algo_blk)
107 struct qat_auth_state
{
108 uint8_t data
[MAX_AUTH_STATE_SIZE
+ 64];
111 struct qat_alg_aead_ctx
{
112 struct qat_alg_cd
*enc_cd
;
113 struct qat_alg_cd
*dec_cd
;
114 dma_addr_t enc_cd_paddr
;
115 dma_addr_t dec_cd_paddr
;
116 struct icp_qat_fw_la_bulk_req enc_fw_req
;
117 struct icp_qat_fw_la_bulk_req dec_fw_req
;
118 struct crypto_shash
*hash_tfm
;
119 enum icp_qat_hw_auth_algo qat_hash_alg
;
120 struct qat_crypto_instance
*inst
;
121 struct crypto_tfm
*tfm
;
122 uint8_t salt
[AES_BLOCK_SIZE
];
123 spinlock_t lock
; /* protects qat_alg_aead_ctx struct */
126 struct qat_alg_ablkcipher_ctx
{
127 struct icp_qat_hw_cipher_algo_blk
*enc_cd
;
128 struct icp_qat_hw_cipher_algo_blk
*dec_cd
;
129 dma_addr_t enc_cd_paddr
;
130 dma_addr_t dec_cd_paddr
;
131 struct icp_qat_fw_la_bulk_req enc_fw_req
;
132 struct icp_qat_fw_la_bulk_req dec_fw_req
;
133 struct qat_crypto_instance
*inst
;
134 struct crypto_tfm
*tfm
;
135 spinlock_t lock
; /* protects qat_alg_ablkcipher_ctx struct */
138 static int get_current_node(void)
140 return cpu_data(current_thread_info()->cpu
).phys_proc_id
;
143 static int qat_get_inter_state_size(enum icp_qat_hw_auth_algo qat_hash_alg
)
145 switch (qat_hash_alg
) {
146 case ICP_QAT_HW_AUTH_ALGO_SHA1
:
147 return ICP_QAT_HW_SHA1_STATE1_SZ
;
148 case ICP_QAT_HW_AUTH_ALGO_SHA256
:
149 return ICP_QAT_HW_SHA256_STATE1_SZ
;
150 case ICP_QAT_HW_AUTH_ALGO_SHA512
:
151 return ICP_QAT_HW_SHA512_STATE1_SZ
;
158 static int qat_alg_do_precomputes(struct icp_qat_hw_auth_algo_blk
*hash
,
159 struct qat_alg_aead_ctx
*ctx
,
160 const uint8_t *auth_key
,
161 unsigned int auth_keylen
)
163 struct qat_auth_state auth_state
;
164 SHASH_DESC_ON_STACK(shash
, ctx
->hash_tfm
);
165 struct sha1_state sha1
;
166 struct sha256_state sha256
;
167 struct sha512_state sha512
;
168 int block_size
= crypto_shash_blocksize(ctx
->hash_tfm
);
169 int digest_size
= crypto_shash_digestsize(ctx
->hash_tfm
);
170 uint8_t *ipad
= auth_state
.data
;
171 uint8_t *opad
= ipad
+ block_size
;
172 __be32
*hash_state_out
;
173 __be64
*hash512_state_out
;
176 memset(auth_state
.data
, 0, sizeof(auth_state
.data
));
177 shash
->tfm
= ctx
->hash_tfm
;
180 if (auth_keylen
> block_size
) {
181 char buff
[SHA512_BLOCK_SIZE
];
182 int ret
= crypto_shash_digest(shash
, auth_key
,
187 memcpy(ipad
, buff
, digest_size
);
188 memcpy(opad
, buff
, digest_size
);
189 memzero_explicit(buff
, sizeof(buff
));
191 memcpy(ipad
, auth_key
, auth_keylen
);
192 memcpy(opad
, auth_key
, auth_keylen
);
195 for (i
= 0; i
< block_size
; i
++) {
196 char *ipad_ptr
= ipad
+ i
;
197 char *opad_ptr
= opad
+ i
;
202 if (crypto_shash_init(shash
))
205 if (crypto_shash_update(shash
, ipad
, block_size
))
208 hash_state_out
= (__be32
*)hash
->sha
.state1
;
209 hash512_state_out
= (__be64
*)hash_state_out
;
211 switch (ctx
->qat_hash_alg
) {
212 case ICP_QAT_HW_AUTH_ALGO_SHA1
:
213 if (crypto_shash_export(shash
, &sha1
))
215 for (i
= 0; i
< digest_size
>> 2; i
++, hash_state_out
++)
216 *hash_state_out
= cpu_to_be32(*(sha1
.state
+ i
));
218 case ICP_QAT_HW_AUTH_ALGO_SHA256
:
219 if (crypto_shash_export(shash
, &sha256
))
221 for (i
= 0; i
< digest_size
>> 2; i
++, hash_state_out
++)
222 *hash_state_out
= cpu_to_be32(*(sha256
.state
+ i
));
224 case ICP_QAT_HW_AUTH_ALGO_SHA512
:
225 if (crypto_shash_export(shash
, &sha512
))
227 for (i
= 0; i
< digest_size
>> 3; i
++, hash512_state_out
++)
228 *hash512_state_out
= cpu_to_be64(*(sha512
.state
+ i
));
234 if (crypto_shash_init(shash
))
237 if (crypto_shash_update(shash
, opad
, block_size
))
240 offset
= round_up(qat_get_inter_state_size(ctx
->qat_hash_alg
), 8);
241 hash_state_out
= (__be32
*)(hash
->sha
.state1
+ offset
);
242 hash512_state_out
= (__be64
*)hash_state_out
;
244 switch (ctx
->qat_hash_alg
) {
245 case ICP_QAT_HW_AUTH_ALGO_SHA1
:
246 if (crypto_shash_export(shash
, &sha1
))
248 for (i
= 0; i
< digest_size
>> 2; i
++, hash_state_out
++)
249 *hash_state_out
= cpu_to_be32(*(sha1
.state
+ i
));
251 case ICP_QAT_HW_AUTH_ALGO_SHA256
:
252 if (crypto_shash_export(shash
, &sha256
))
254 for (i
= 0; i
< digest_size
>> 2; i
++, hash_state_out
++)
255 *hash_state_out
= cpu_to_be32(*(sha256
.state
+ i
));
257 case ICP_QAT_HW_AUTH_ALGO_SHA512
:
258 if (crypto_shash_export(shash
, &sha512
))
260 for (i
= 0; i
< digest_size
>> 3; i
++, hash512_state_out
++)
261 *hash512_state_out
= cpu_to_be64(*(sha512
.state
+ i
));
266 memzero_explicit(ipad
, block_size
);
267 memzero_explicit(opad
, block_size
);
271 static void qat_alg_init_common_hdr(struct icp_qat_fw_comn_req_hdr
*header
)
274 ICP_QAT_FW_COMN_HDR_FLAGS_BUILD(ICP_QAT_FW_COMN_REQ_FLAG_SET
);
275 header
->service_type
= ICP_QAT_FW_COMN_REQ_CPM_FW_LA
;
276 header
->comn_req_flags
=
277 ICP_QAT_FW_COMN_FLAGS_BUILD(QAT_COMN_CD_FLD_TYPE_64BIT_ADR
,
278 QAT_COMN_PTR_TYPE_SGL
);
279 ICP_QAT_FW_LA_PARTIAL_SET(header
->serv_specif_flags
,
280 ICP_QAT_FW_LA_PARTIAL_NONE
);
281 ICP_QAT_FW_LA_CIPH_IV_FLD_FLAG_SET(header
->serv_specif_flags
,
282 ICP_QAT_FW_CIPH_IV_16BYTE_DATA
);
283 ICP_QAT_FW_LA_PROTO_SET(header
->serv_specif_flags
,
284 ICP_QAT_FW_LA_NO_PROTO
);
285 ICP_QAT_FW_LA_UPDATE_STATE_SET(header
->serv_specif_flags
,
286 ICP_QAT_FW_LA_NO_UPDATE_STATE
);
289 static int qat_alg_aead_init_enc_session(struct qat_alg_aead_ctx
*ctx
,
291 struct crypto_authenc_keys
*keys
)
293 struct crypto_aead
*aead_tfm
= __crypto_aead_cast(ctx
->tfm
);
294 unsigned int digestsize
= crypto_aead_crt(aead_tfm
)->authsize
;
295 struct qat_enc
*enc_ctx
= &ctx
->enc_cd
->qat_enc_cd
;
296 struct icp_qat_hw_cipher_algo_blk
*cipher
= &enc_ctx
->cipher
;
297 struct icp_qat_hw_auth_algo_blk
*hash
=
298 (struct icp_qat_hw_auth_algo_blk
*)((char *)enc_ctx
+
299 sizeof(struct icp_qat_hw_auth_setup
) + keys
->enckeylen
);
300 struct icp_qat_fw_la_bulk_req
*req_tmpl
= &ctx
->enc_fw_req
;
301 struct icp_qat_fw_comn_req_hdr_cd_pars
*cd_pars
= &req_tmpl
->cd_pars
;
302 struct icp_qat_fw_comn_req_hdr
*header
= &req_tmpl
->comn_hdr
;
303 void *ptr
= &req_tmpl
->cd_ctrl
;
304 struct icp_qat_fw_cipher_cd_ctrl_hdr
*cipher_cd_ctrl
= ptr
;
305 struct icp_qat_fw_auth_cd_ctrl_hdr
*hash_cd_ctrl
= ptr
;
308 cipher
->aes
.cipher_config
.val
= QAT_AES_HW_CONFIG_CBC_ENC(alg
);
309 memcpy(cipher
->aes
.key
, keys
->enckey
, keys
->enckeylen
);
310 hash
->sha
.inner_setup
.auth_config
.config
=
311 ICP_QAT_HW_AUTH_CONFIG_BUILD(ICP_QAT_HW_AUTH_MODE1
,
312 ctx
->qat_hash_alg
, digestsize
);
313 hash
->sha
.inner_setup
.auth_counter
.counter
=
314 cpu_to_be32(crypto_shash_blocksize(ctx
->hash_tfm
));
316 if (qat_alg_do_precomputes(hash
, ctx
, keys
->authkey
, keys
->authkeylen
))
320 qat_alg_init_common_hdr(header
);
321 header
->service_cmd_id
= ICP_QAT_FW_LA_CMD_CIPHER_HASH
;
322 ICP_QAT_FW_LA_DIGEST_IN_BUFFER_SET(header
->serv_specif_flags
,
323 ICP_QAT_FW_LA_DIGEST_IN_BUFFER
);
324 ICP_QAT_FW_LA_RET_AUTH_SET(header
->serv_specif_flags
,
325 ICP_QAT_FW_LA_RET_AUTH_RES
);
326 ICP_QAT_FW_LA_CMP_AUTH_SET(header
->serv_specif_flags
,
327 ICP_QAT_FW_LA_NO_CMP_AUTH_RES
);
328 cd_pars
->u
.s
.content_desc_addr
= ctx
->enc_cd_paddr
;
329 cd_pars
->u
.s
.content_desc_params_sz
= sizeof(struct qat_alg_cd
) >> 3;
331 /* Cipher CD config setup */
332 cipher_cd_ctrl
->cipher_key_sz
= keys
->enckeylen
>> 3;
333 cipher_cd_ctrl
->cipher_state_sz
= AES_BLOCK_SIZE
>> 3;
334 cipher_cd_ctrl
->cipher_cfg_offset
= 0;
335 ICP_QAT_FW_COMN_CURR_ID_SET(cipher_cd_ctrl
, ICP_QAT_FW_SLICE_CIPHER
);
336 ICP_QAT_FW_COMN_NEXT_ID_SET(cipher_cd_ctrl
, ICP_QAT_FW_SLICE_AUTH
);
337 /* Auth CD config setup */
338 hash_cd_ctrl
->hash_cfg_offset
= ((char *)hash
- (char *)cipher
) >> 3;
339 hash_cd_ctrl
->hash_flags
= ICP_QAT_FW_AUTH_HDR_FLAG_NO_NESTED
;
340 hash_cd_ctrl
->inner_res_sz
= digestsize
;
341 hash_cd_ctrl
->final_sz
= digestsize
;
343 switch (ctx
->qat_hash_alg
) {
344 case ICP_QAT_HW_AUTH_ALGO_SHA1
:
345 hash_cd_ctrl
->inner_state1_sz
=
346 round_up(ICP_QAT_HW_SHA1_STATE1_SZ
, 8);
347 hash_cd_ctrl
->inner_state2_sz
=
348 round_up(ICP_QAT_HW_SHA1_STATE2_SZ
, 8);
350 case ICP_QAT_HW_AUTH_ALGO_SHA256
:
351 hash_cd_ctrl
->inner_state1_sz
= ICP_QAT_HW_SHA256_STATE1_SZ
;
352 hash_cd_ctrl
->inner_state2_sz
= ICP_QAT_HW_SHA256_STATE2_SZ
;
354 case ICP_QAT_HW_AUTH_ALGO_SHA512
:
355 hash_cd_ctrl
->inner_state1_sz
= ICP_QAT_HW_SHA512_STATE1_SZ
;
356 hash_cd_ctrl
->inner_state2_sz
= ICP_QAT_HW_SHA512_STATE2_SZ
;
361 hash_cd_ctrl
->inner_state2_offset
= hash_cd_ctrl
->hash_cfg_offset
+
362 ((sizeof(struct icp_qat_hw_auth_setup
) +
363 round_up(hash_cd_ctrl
->inner_state1_sz
, 8)) >> 3);
364 ICP_QAT_FW_COMN_CURR_ID_SET(hash_cd_ctrl
, ICP_QAT_FW_SLICE_AUTH
);
365 ICP_QAT_FW_COMN_NEXT_ID_SET(hash_cd_ctrl
, ICP_QAT_FW_SLICE_DRAM_WR
);
369 static int qat_alg_aead_init_dec_session(struct qat_alg_aead_ctx
*ctx
,
371 struct crypto_authenc_keys
*keys
)
373 struct crypto_aead
*aead_tfm
= __crypto_aead_cast(ctx
->tfm
);
374 unsigned int digestsize
= crypto_aead_crt(aead_tfm
)->authsize
;
375 struct qat_dec
*dec_ctx
= &ctx
->dec_cd
->qat_dec_cd
;
376 struct icp_qat_hw_auth_algo_blk
*hash
= &dec_ctx
->hash
;
377 struct icp_qat_hw_cipher_algo_blk
*cipher
=
378 (struct icp_qat_hw_cipher_algo_blk
*)((char *)dec_ctx
+
379 sizeof(struct icp_qat_hw_auth_setup
) +
380 roundup(crypto_shash_digestsize(ctx
->hash_tfm
), 8) * 2);
381 struct icp_qat_fw_la_bulk_req
*req_tmpl
= &ctx
->dec_fw_req
;
382 struct icp_qat_fw_comn_req_hdr_cd_pars
*cd_pars
= &req_tmpl
->cd_pars
;
383 struct icp_qat_fw_comn_req_hdr
*header
= &req_tmpl
->comn_hdr
;
384 void *ptr
= &req_tmpl
->cd_ctrl
;
385 struct icp_qat_fw_cipher_cd_ctrl_hdr
*cipher_cd_ctrl
= ptr
;
386 struct icp_qat_fw_auth_cd_ctrl_hdr
*hash_cd_ctrl
= ptr
;
387 struct icp_qat_fw_la_auth_req_params
*auth_param
=
388 (struct icp_qat_fw_la_auth_req_params
*)
389 ((char *)&req_tmpl
->serv_specif_rqpars
+
390 sizeof(struct icp_qat_fw_la_cipher_req_params
));
393 cipher
->aes
.cipher_config
.val
= QAT_AES_HW_CONFIG_CBC_DEC(alg
);
394 memcpy(cipher
->aes
.key
, keys
->enckey
, keys
->enckeylen
);
395 hash
->sha
.inner_setup
.auth_config
.config
=
396 ICP_QAT_HW_AUTH_CONFIG_BUILD(ICP_QAT_HW_AUTH_MODE1
,
399 hash
->sha
.inner_setup
.auth_counter
.counter
=
400 cpu_to_be32(crypto_shash_blocksize(ctx
->hash_tfm
));
402 if (qat_alg_do_precomputes(hash
, ctx
, keys
->authkey
, keys
->authkeylen
))
406 qat_alg_init_common_hdr(header
);
407 header
->service_cmd_id
= ICP_QAT_FW_LA_CMD_HASH_CIPHER
;
408 ICP_QAT_FW_LA_DIGEST_IN_BUFFER_SET(header
->serv_specif_flags
,
409 ICP_QAT_FW_LA_DIGEST_IN_BUFFER
);
410 ICP_QAT_FW_LA_RET_AUTH_SET(header
->serv_specif_flags
,
411 ICP_QAT_FW_LA_NO_RET_AUTH_RES
);
412 ICP_QAT_FW_LA_CMP_AUTH_SET(header
->serv_specif_flags
,
413 ICP_QAT_FW_LA_CMP_AUTH_RES
);
414 cd_pars
->u
.s
.content_desc_addr
= ctx
->dec_cd_paddr
;
415 cd_pars
->u
.s
.content_desc_params_sz
= sizeof(struct qat_alg_cd
) >> 3;
417 /* Cipher CD config setup */
418 cipher_cd_ctrl
->cipher_key_sz
= keys
->enckeylen
>> 3;
419 cipher_cd_ctrl
->cipher_state_sz
= AES_BLOCK_SIZE
>> 3;
420 cipher_cd_ctrl
->cipher_cfg_offset
=
421 (sizeof(struct icp_qat_hw_auth_setup
) +
422 roundup(crypto_shash_digestsize(ctx
->hash_tfm
), 8) * 2) >> 3;
423 ICP_QAT_FW_COMN_CURR_ID_SET(cipher_cd_ctrl
, ICP_QAT_FW_SLICE_CIPHER
);
424 ICP_QAT_FW_COMN_NEXT_ID_SET(cipher_cd_ctrl
, ICP_QAT_FW_SLICE_DRAM_WR
);
426 /* Auth CD config setup */
427 hash_cd_ctrl
->hash_cfg_offset
= 0;
428 hash_cd_ctrl
->hash_flags
= ICP_QAT_FW_AUTH_HDR_FLAG_NO_NESTED
;
429 hash_cd_ctrl
->inner_res_sz
= digestsize
;
430 hash_cd_ctrl
->final_sz
= digestsize
;
432 switch (ctx
->qat_hash_alg
) {
433 case ICP_QAT_HW_AUTH_ALGO_SHA1
:
434 hash_cd_ctrl
->inner_state1_sz
=
435 round_up(ICP_QAT_HW_SHA1_STATE1_SZ
, 8);
436 hash_cd_ctrl
->inner_state2_sz
=
437 round_up(ICP_QAT_HW_SHA1_STATE2_SZ
, 8);
439 case ICP_QAT_HW_AUTH_ALGO_SHA256
:
440 hash_cd_ctrl
->inner_state1_sz
= ICP_QAT_HW_SHA256_STATE1_SZ
;
441 hash_cd_ctrl
->inner_state2_sz
= ICP_QAT_HW_SHA256_STATE2_SZ
;
443 case ICP_QAT_HW_AUTH_ALGO_SHA512
:
444 hash_cd_ctrl
->inner_state1_sz
= ICP_QAT_HW_SHA512_STATE1_SZ
;
445 hash_cd_ctrl
->inner_state2_sz
= ICP_QAT_HW_SHA512_STATE2_SZ
;
451 hash_cd_ctrl
->inner_state2_offset
= hash_cd_ctrl
->hash_cfg_offset
+
452 ((sizeof(struct icp_qat_hw_auth_setup
) +
453 round_up(hash_cd_ctrl
->inner_state1_sz
, 8)) >> 3);
454 auth_param
->auth_res_sz
= digestsize
;
455 ICP_QAT_FW_COMN_CURR_ID_SET(hash_cd_ctrl
, ICP_QAT_FW_SLICE_AUTH
);
456 ICP_QAT_FW_COMN_NEXT_ID_SET(hash_cd_ctrl
, ICP_QAT_FW_SLICE_CIPHER
);
460 static void qat_alg_ablkcipher_init_com(struct qat_alg_ablkcipher_ctx
*ctx
,
461 struct icp_qat_fw_la_bulk_req
*req
,
462 struct icp_qat_hw_cipher_algo_blk
*cd
,
463 const uint8_t *key
, unsigned int keylen
)
465 struct icp_qat_fw_comn_req_hdr_cd_pars
*cd_pars
= &req
->cd_pars
;
466 struct icp_qat_fw_comn_req_hdr
*header
= &req
->comn_hdr
;
467 struct icp_qat_fw_cipher_cd_ctrl_hdr
*cd_ctrl
= (void *)&req
->cd_ctrl
;
469 memcpy(cd
->aes
.key
, key
, keylen
);
470 qat_alg_init_common_hdr(header
);
471 header
->service_cmd_id
= ICP_QAT_FW_LA_CMD_CIPHER
;
472 cd_pars
->u
.s
.content_desc_params_sz
=
473 sizeof(struct icp_qat_hw_cipher_algo_blk
) >> 3;
474 /* Cipher CD config setup */
475 cd_ctrl
->cipher_key_sz
= keylen
>> 3;
476 cd_ctrl
->cipher_state_sz
= AES_BLOCK_SIZE
>> 3;
477 cd_ctrl
->cipher_cfg_offset
= 0;
478 ICP_QAT_FW_COMN_CURR_ID_SET(cd_ctrl
, ICP_QAT_FW_SLICE_CIPHER
);
479 ICP_QAT_FW_COMN_NEXT_ID_SET(cd_ctrl
, ICP_QAT_FW_SLICE_DRAM_WR
);
482 static void qat_alg_ablkcipher_init_enc(struct qat_alg_ablkcipher_ctx
*ctx
,
483 int alg
, const uint8_t *key
,
486 struct icp_qat_hw_cipher_algo_blk
*enc_cd
= ctx
->enc_cd
;
487 struct icp_qat_fw_la_bulk_req
*req
= &ctx
->enc_fw_req
;
488 struct icp_qat_fw_comn_req_hdr_cd_pars
*cd_pars
= &req
->cd_pars
;
490 qat_alg_ablkcipher_init_com(ctx
, req
, enc_cd
, key
, keylen
);
491 cd_pars
->u
.s
.content_desc_addr
= ctx
->enc_cd_paddr
;
492 enc_cd
->aes
.cipher_config
.val
= QAT_AES_HW_CONFIG_CBC_ENC(alg
);
495 static void qat_alg_ablkcipher_init_dec(struct qat_alg_ablkcipher_ctx
*ctx
,
496 int alg
, const uint8_t *key
,
499 struct icp_qat_hw_cipher_algo_blk
*dec_cd
= ctx
->dec_cd
;
500 struct icp_qat_fw_la_bulk_req
*req
= &ctx
->dec_fw_req
;
501 struct icp_qat_fw_comn_req_hdr_cd_pars
*cd_pars
= &req
->cd_pars
;
503 qat_alg_ablkcipher_init_com(ctx
, req
, dec_cd
, key
, keylen
);
504 cd_pars
->u
.s
.content_desc_addr
= ctx
->dec_cd_paddr
;
505 dec_cd
->aes
.cipher_config
.val
= QAT_AES_HW_CONFIG_CBC_DEC(alg
);
508 static int qat_alg_validate_key(int key_len
, int *alg
)
511 case AES_KEYSIZE_128
:
512 *alg
= ICP_QAT_HW_CIPHER_ALGO_AES128
;
514 case AES_KEYSIZE_192
:
515 *alg
= ICP_QAT_HW_CIPHER_ALGO_AES192
;
517 case AES_KEYSIZE_256
:
518 *alg
= ICP_QAT_HW_CIPHER_ALGO_AES256
;
526 static int qat_alg_aead_init_sessions(struct qat_alg_aead_ctx
*ctx
,
527 const uint8_t *key
, unsigned int keylen
)
529 struct crypto_authenc_keys keys
;
532 if (crypto_rng_get_bytes(crypto_default_rng
, ctx
->salt
, AES_BLOCK_SIZE
))
535 if (crypto_authenc_extractkeys(&keys
, key
, keylen
))
538 if (qat_alg_validate_key(keys
.enckeylen
, &alg
))
541 if (qat_alg_aead_init_enc_session(ctx
, alg
, &keys
))
544 if (qat_alg_aead_init_dec_session(ctx
, alg
, &keys
))
549 crypto_tfm_set_flags(ctx
->tfm
, CRYPTO_TFM_RES_BAD_KEY_LEN
);
555 static int qat_alg_ablkcipher_init_sessions(struct qat_alg_ablkcipher_ctx
*ctx
,
561 if (qat_alg_validate_key(keylen
, &alg
))
564 qat_alg_ablkcipher_init_enc(ctx
, alg
, key
, keylen
);
565 qat_alg_ablkcipher_init_dec(ctx
, alg
, key
, keylen
);
568 crypto_tfm_set_flags(ctx
->tfm
, CRYPTO_TFM_RES_BAD_KEY_LEN
);
572 static int qat_alg_aead_setkey(struct crypto_aead
*tfm
, const uint8_t *key
,
575 struct qat_alg_aead_ctx
*ctx
= crypto_aead_ctx(tfm
);
578 spin_lock(&ctx
->lock
);
581 dev
= &GET_DEV(ctx
->inst
->accel_dev
);
582 memset(ctx
->enc_cd
, 0, sizeof(*ctx
->enc_cd
));
583 memset(ctx
->dec_cd
, 0, sizeof(*ctx
->dec_cd
));
584 memset(&ctx
->enc_fw_req
, 0, sizeof(ctx
->enc_fw_req
));
585 memset(&ctx
->dec_fw_req
, 0, sizeof(ctx
->dec_fw_req
));
588 int node
= get_current_node();
589 struct qat_crypto_instance
*inst
=
590 qat_crypto_get_instance_node(node
);
592 spin_unlock(&ctx
->lock
);
596 dev
= &GET_DEV(inst
->accel_dev
);
598 ctx
->enc_cd
= dma_zalloc_coherent(dev
, sizeof(*ctx
->enc_cd
),
602 spin_unlock(&ctx
->lock
);
605 ctx
->dec_cd
= dma_zalloc_coherent(dev
, sizeof(*ctx
->dec_cd
),
609 spin_unlock(&ctx
->lock
);
613 spin_unlock(&ctx
->lock
);
614 if (qat_alg_aead_init_sessions(ctx
, key
, keylen
))
620 memset(ctx
->dec_cd
, 0, sizeof(struct qat_alg_cd
));
621 dma_free_coherent(dev
, sizeof(struct qat_alg_cd
),
622 ctx
->dec_cd
, ctx
->dec_cd_paddr
);
625 memset(ctx
->enc_cd
, 0, sizeof(struct qat_alg_cd
));
626 dma_free_coherent(dev
, sizeof(struct qat_alg_cd
),
627 ctx
->enc_cd
, ctx
->enc_cd_paddr
);
632 static void qat_alg_free_bufl(struct qat_crypto_instance
*inst
,
633 struct qat_crypto_request
*qat_req
)
635 struct device
*dev
= &GET_DEV(inst
->accel_dev
);
636 struct qat_alg_buf_list
*bl
= qat_req
->buf
.bl
;
637 struct qat_alg_buf_list
*blout
= qat_req
->buf
.blout
;
638 dma_addr_t blp
= qat_req
->buf
.blp
;
639 dma_addr_t blpout
= qat_req
->buf
.bloutp
;
640 size_t sz
= qat_req
->buf
.sz
;
641 size_t sz_out
= qat_req
->buf
.sz_out
;
644 for (i
= 0; i
< bl
->num_bufs
; i
++)
645 dma_unmap_single(dev
, bl
->bufers
[i
].addr
,
646 bl
->bufers
[i
].len
, DMA_BIDIRECTIONAL
);
648 dma_unmap_single(dev
, blp
, sz
, DMA_TO_DEVICE
);
651 /* If out of place operation dma unmap only data */
652 int bufless
= blout
->num_bufs
- blout
->num_mapped_bufs
;
654 for (i
= bufless
; i
< blout
->num_bufs
; i
++) {
655 dma_unmap_single(dev
, blout
->bufers
[i
].addr
,
656 blout
->bufers
[i
].len
,
659 dma_unmap_single(dev
, blpout
, sz_out
, DMA_TO_DEVICE
);
664 static int qat_alg_sgl_to_bufl(struct qat_crypto_instance
*inst
,
665 struct scatterlist
*assoc
,
666 struct scatterlist
*sgl
,
667 struct scatterlist
*sglout
, uint8_t *iv
,
669 struct qat_crypto_request
*qat_req
)
671 struct device
*dev
= &GET_DEV(inst
->accel_dev
);
672 int i
, bufs
= 0, sg_nctr
= 0;
673 int n
= sg_nents(sgl
), assoc_n
= sg_nents(assoc
);
674 struct qat_alg_buf_list
*bufl
;
675 struct qat_alg_buf_list
*buflout
= NULL
;
677 dma_addr_t bloutp
= 0;
678 struct scatterlist
*sg
;
679 size_t sz_out
, sz
= sizeof(struct qat_alg_buf_list
) +
680 ((1 + n
+ assoc_n
) * sizeof(struct qat_alg_buf
));
685 bufl
= kzalloc_node(sz
, GFP_ATOMIC
,
686 dev_to_node(&GET_DEV(inst
->accel_dev
)));
690 blp
= dma_map_single(dev
, bufl
, sz
, DMA_TO_DEVICE
);
691 if (unlikely(dma_mapping_error(dev
, blp
)))
694 for_each_sg(assoc
, sg
, assoc_n
, i
) {
697 bufl
->bufers
[bufs
].addr
= dma_map_single(dev
,
701 bufl
->bufers
[bufs
].len
= sg
->length
;
702 if (unlikely(dma_mapping_error(dev
, bufl
->bufers
[bufs
].addr
)))
707 bufl
->bufers
[bufs
].addr
= dma_map_single(dev
, iv
, ivlen
,
709 bufl
->bufers
[bufs
].len
= ivlen
;
710 if (unlikely(dma_mapping_error(dev
, bufl
->bufers
[bufs
].addr
)))
715 for_each_sg(sgl
, sg
, n
, i
) {
716 int y
= sg_nctr
+ bufs
;
721 bufl
->bufers
[y
].addr
= dma_map_single(dev
, sg_virt(sg
),
724 bufl
->bufers
[y
].len
= sg
->length
;
725 if (unlikely(dma_mapping_error(dev
, bufl
->bufers
[y
].addr
)))
729 bufl
->num_bufs
= sg_nctr
+ bufs
;
730 qat_req
->buf
.bl
= bufl
;
731 qat_req
->buf
.blp
= blp
;
732 qat_req
->buf
.sz
= sz
;
733 /* Handle out of place operation */
735 struct qat_alg_buf
*bufers
;
737 n
= sg_nents(sglout
);
738 sz_out
= sizeof(struct qat_alg_buf_list
) +
739 ((1 + n
+ assoc_n
) * sizeof(struct qat_alg_buf
));
741 buflout
= kzalloc_node(sz_out
, GFP_ATOMIC
,
742 dev_to_node(&GET_DEV(inst
->accel_dev
)));
743 if (unlikely(!buflout
))
745 bloutp
= dma_map_single(dev
, buflout
, sz_out
, DMA_TO_DEVICE
);
746 if (unlikely(dma_mapping_error(dev
, bloutp
)))
748 bufers
= buflout
->bufers
;
749 /* For out of place operation dma map only data and
750 * reuse assoc mapping and iv */
751 for (i
= 0; i
< bufs
; i
++) {
752 bufers
[i
].len
= bufl
->bufers
[i
].len
;
753 bufers
[i
].addr
= bufl
->bufers
[i
].addr
;
755 for_each_sg(sglout
, sg
, n
, i
) {
756 int y
= sg_nctr
+ bufs
;
761 bufers
[y
].addr
= dma_map_single(dev
, sg_virt(sg
),
764 if (unlikely(dma_mapping_error(dev
, bufers
[y
].addr
)))
766 bufers
[y
].len
= sg
->length
;
769 buflout
->num_bufs
= sg_nctr
+ bufs
;
770 buflout
->num_mapped_bufs
= sg_nctr
;
771 qat_req
->buf
.blout
= buflout
;
772 qat_req
->buf
.bloutp
= bloutp
;
773 qat_req
->buf
.sz_out
= sz_out
;
775 /* Otherwise set the src and dst to the same address */
776 qat_req
->buf
.bloutp
= qat_req
->buf
.blp
;
777 qat_req
->buf
.sz_out
= 0;
781 dev_err(dev
, "Failed to map buf for dma\n");
783 for (i
= 0; i
< n
+ bufs
; i
++)
784 if (!dma_mapping_error(dev
, bufl
->bufers
[i
].addr
))
785 dma_unmap_single(dev
, bufl
->bufers
[i
].addr
,
789 if (!dma_mapping_error(dev
, blp
))
790 dma_unmap_single(dev
, blp
, sz
, DMA_TO_DEVICE
);
792 if (sgl
!= sglout
&& buflout
) {
793 n
= sg_nents(sglout
);
794 for (i
= bufs
; i
< n
+ bufs
; i
++)
795 if (!dma_mapping_error(dev
, buflout
->bufers
[i
].addr
))
796 dma_unmap_single(dev
, buflout
->bufers
[i
].addr
,
797 buflout
->bufers
[i
].len
,
799 if (!dma_mapping_error(dev
, bloutp
))
800 dma_unmap_single(dev
, bloutp
, sz_out
, DMA_TO_DEVICE
);
806 static void qat_aead_alg_callback(struct icp_qat_fw_la_resp
*qat_resp
,
807 struct qat_crypto_request
*qat_req
)
809 struct qat_alg_aead_ctx
*ctx
= qat_req
->aead_ctx
;
810 struct qat_crypto_instance
*inst
= ctx
->inst
;
811 struct aead_request
*areq
= qat_req
->aead_req
;
812 uint8_t stat_filed
= qat_resp
->comn_resp
.comn_status
;
813 int res
= 0, qat_res
= ICP_QAT_FW_COMN_RESP_CRYPTO_STAT_GET(stat_filed
);
815 qat_alg_free_bufl(inst
, qat_req
);
816 if (unlikely(qat_res
!= ICP_QAT_FW_COMN_STATUS_FLAG_OK
))
818 areq
->base
.complete(&areq
->base
, res
);
821 static void qat_ablkcipher_alg_callback(struct icp_qat_fw_la_resp
*qat_resp
,
822 struct qat_crypto_request
*qat_req
)
824 struct qat_alg_ablkcipher_ctx
*ctx
= qat_req
->ablkcipher_ctx
;
825 struct qat_crypto_instance
*inst
= ctx
->inst
;
826 struct ablkcipher_request
*areq
= qat_req
->ablkcipher_req
;
827 uint8_t stat_filed
= qat_resp
->comn_resp
.comn_status
;
828 int res
= 0, qat_res
= ICP_QAT_FW_COMN_RESP_CRYPTO_STAT_GET(stat_filed
);
830 qat_alg_free_bufl(inst
, qat_req
);
831 if (unlikely(qat_res
!= ICP_QAT_FW_COMN_STATUS_FLAG_OK
))
833 areq
->base
.complete(&areq
->base
, res
);
836 void qat_alg_callback(void *resp
)
838 struct icp_qat_fw_la_resp
*qat_resp
= resp
;
839 struct qat_crypto_request
*qat_req
=
840 (void *)(__force
long)qat_resp
->opaque_data
;
842 qat_req
->cb(qat_resp
, qat_req
);
845 static int qat_alg_aead_dec(struct aead_request
*areq
)
847 struct crypto_aead
*aead_tfm
= crypto_aead_reqtfm(areq
);
848 struct crypto_tfm
*tfm
= crypto_aead_tfm(aead_tfm
);
849 struct qat_alg_aead_ctx
*ctx
= crypto_tfm_ctx(tfm
);
850 struct qat_crypto_request
*qat_req
= aead_request_ctx(areq
);
851 struct icp_qat_fw_la_cipher_req_params
*cipher_param
;
852 struct icp_qat_fw_la_auth_req_params
*auth_param
;
853 struct icp_qat_fw_la_bulk_req
*msg
;
854 int digst_size
= crypto_aead_crt(aead_tfm
)->authsize
;
857 ret
= qat_alg_sgl_to_bufl(ctx
->inst
, areq
->assoc
, areq
->src
, areq
->dst
,
858 areq
->iv
, AES_BLOCK_SIZE
, qat_req
);
863 *msg
= ctx
->dec_fw_req
;
864 qat_req
->aead_ctx
= ctx
;
865 qat_req
->aead_req
= areq
;
866 qat_req
->cb
= qat_aead_alg_callback
;
867 qat_req
->req
.comn_mid
.opaque_data
= (uint64_t)(__force
long)qat_req
;
868 qat_req
->req
.comn_mid
.src_data_addr
= qat_req
->buf
.blp
;
869 qat_req
->req
.comn_mid
.dest_data_addr
= qat_req
->buf
.bloutp
;
870 cipher_param
= (void *)&qat_req
->req
.serv_specif_rqpars
;
871 cipher_param
->cipher_length
= areq
->cryptlen
- digst_size
;
872 cipher_param
->cipher_offset
= areq
->assoclen
+ AES_BLOCK_SIZE
;
873 memcpy(cipher_param
->u
.cipher_IV_array
, areq
->iv
, AES_BLOCK_SIZE
);
874 auth_param
= (void *)((uint8_t *)cipher_param
+ sizeof(*cipher_param
));
875 auth_param
->auth_off
= 0;
876 auth_param
->auth_len
= areq
->assoclen
+
877 cipher_param
->cipher_length
+ AES_BLOCK_SIZE
;
879 ret
= adf_send_message(ctx
->inst
->sym_tx
, (uint32_t *)msg
);
880 } while (ret
== -EAGAIN
&& ctr
++ < 10);
882 if (ret
== -EAGAIN
) {
883 qat_alg_free_bufl(ctx
->inst
, qat_req
);
889 static int qat_alg_aead_enc_internal(struct aead_request
*areq
, uint8_t *iv
,
892 struct crypto_aead
*aead_tfm
= crypto_aead_reqtfm(areq
);
893 struct crypto_tfm
*tfm
= crypto_aead_tfm(aead_tfm
);
894 struct qat_alg_aead_ctx
*ctx
= crypto_tfm_ctx(tfm
);
895 struct qat_crypto_request
*qat_req
= aead_request_ctx(areq
);
896 struct icp_qat_fw_la_cipher_req_params
*cipher_param
;
897 struct icp_qat_fw_la_auth_req_params
*auth_param
;
898 struct icp_qat_fw_la_bulk_req
*msg
;
901 ret
= qat_alg_sgl_to_bufl(ctx
->inst
, areq
->assoc
, areq
->src
, areq
->dst
,
902 iv
, AES_BLOCK_SIZE
, qat_req
);
907 *msg
= ctx
->enc_fw_req
;
908 qat_req
->aead_ctx
= ctx
;
909 qat_req
->aead_req
= areq
;
910 qat_req
->cb
= qat_aead_alg_callback
;
911 qat_req
->req
.comn_mid
.opaque_data
= (uint64_t)(__force
long)qat_req
;
912 qat_req
->req
.comn_mid
.src_data_addr
= qat_req
->buf
.blp
;
913 qat_req
->req
.comn_mid
.dest_data_addr
= qat_req
->buf
.bloutp
;
914 cipher_param
= (void *)&qat_req
->req
.serv_specif_rqpars
;
915 auth_param
= (void *)((uint8_t *)cipher_param
+ sizeof(*cipher_param
));
918 cipher_param
->cipher_length
= areq
->cryptlen
+ AES_BLOCK_SIZE
;
919 cipher_param
->cipher_offset
= areq
->assoclen
;
921 memcpy(cipher_param
->u
.cipher_IV_array
, iv
, AES_BLOCK_SIZE
);
922 cipher_param
->cipher_length
= areq
->cryptlen
;
923 cipher_param
->cipher_offset
= areq
->assoclen
+ AES_BLOCK_SIZE
;
925 auth_param
->auth_off
= 0;
926 auth_param
->auth_len
= areq
->assoclen
+ areq
->cryptlen
+ AES_BLOCK_SIZE
;
929 ret
= adf_send_message(ctx
->inst
->sym_tx
, (uint32_t *)msg
);
930 } while (ret
== -EAGAIN
&& ctr
++ < 10);
932 if (ret
== -EAGAIN
) {
933 qat_alg_free_bufl(ctx
->inst
, qat_req
);
939 static int qat_alg_aead_enc(struct aead_request
*areq
)
941 return qat_alg_aead_enc_internal(areq
, areq
->iv
, 0);
944 static int qat_alg_aead_genivenc(struct aead_givcrypt_request
*req
)
946 struct crypto_aead
*aead_tfm
= crypto_aead_reqtfm(&req
->areq
);
947 struct crypto_tfm
*tfm
= crypto_aead_tfm(aead_tfm
);
948 struct qat_alg_aead_ctx
*ctx
= crypto_tfm_ctx(tfm
);
951 memcpy(req
->giv
, ctx
->salt
, AES_BLOCK_SIZE
);
952 seq
= cpu_to_be64(req
->seq
);
953 memcpy(req
->giv
+ AES_BLOCK_SIZE
- sizeof(uint64_t),
954 &seq
, sizeof(uint64_t));
955 return qat_alg_aead_enc_internal(&req
->areq
, req
->giv
, 1);
958 static int qat_alg_ablkcipher_setkey(struct crypto_ablkcipher
*tfm
,
962 struct qat_alg_ablkcipher_ctx
*ctx
= crypto_ablkcipher_ctx(tfm
);
965 spin_lock(&ctx
->lock
);
968 dev
= &GET_DEV(ctx
->inst
->accel_dev
);
969 memset(ctx
->enc_cd
, 0, sizeof(*ctx
->enc_cd
));
970 memset(ctx
->dec_cd
, 0, sizeof(*ctx
->dec_cd
));
971 memset(&ctx
->enc_fw_req
, 0, sizeof(ctx
->enc_fw_req
));
972 memset(&ctx
->dec_fw_req
, 0, sizeof(ctx
->dec_fw_req
));
975 int node
= get_current_node();
976 struct qat_crypto_instance
*inst
=
977 qat_crypto_get_instance_node(node
);
979 spin_unlock(&ctx
->lock
);
983 dev
= &GET_DEV(inst
->accel_dev
);
985 ctx
->enc_cd
= dma_zalloc_coherent(dev
, sizeof(*ctx
->enc_cd
),
989 spin_unlock(&ctx
->lock
);
992 ctx
->dec_cd
= dma_zalloc_coherent(dev
, sizeof(*ctx
->dec_cd
),
996 spin_unlock(&ctx
->lock
);
1000 spin_unlock(&ctx
->lock
);
1001 if (qat_alg_ablkcipher_init_sessions(ctx
, key
, keylen
))
1007 memset(ctx
->dec_cd
, 0, sizeof(*ctx
->enc_cd
));
1008 dma_free_coherent(dev
, sizeof(*ctx
->enc_cd
),
1009 ctx
->dec_cd
, ctx
->dec_cd_paddr
);
1012 memset(ctx
->enc_cd
, 0, sizeof(*ctx
->dec_cd
));
1013 dma_free_coherent(dev
, sizeof(*ctx
->dec_cd
),
1014 ctx
->enc_cd
, ctx
->enc_cd_paddr
);
1019 static int qat_alg_ablkcipher_encrypt(struct ablkcipher_request
*req
)
1021 struct crypto_ablkcipher
*atfm
= crypto_ablkcipher_reqtfm(req
);
1022 struct crypto_tfm
*tfm
= crypto_ablkcipher_tfm(atfm
);
1023 struct qat_alg_ablkcipher_ctx
*ctx
= crypto_tfm_ctx(tfm
);
1024 struct qat_crypto_request
*qat_req
= ablkcipher_request_ctx(req
);
1025 struct icp_qat_fw_la_cipher_req_params
*cipher_param
;
1026 struct icp_qat_fw_la_bulk_req
*msg
;
1029 ret
= qat_alg_sgl_to_bufl(ctx
->inst
, NULL
, req
->src
, req
->dst
,
1034 msg
= &qat_req
->req
;
1035 *msg
= ctx
->enc_fw_req
;
1036 qat_req
->ablkcipher_ctx
= ctx
;
1037 qat_req
->ablkcipher_req
= req
;
1038 qat_req
->cb
= qat_ablkcipher_alg_callback
;
1039 qat_req
->req
.comn_mid
.opaque_data
= (uint64_t)(__force
long)qat_req
;
1040 qat_req
->req
.comn_mid
.src_data_addr
= qat_req
->buf
.blp
;
1041 qat_req
->req
.comn_mid
.dest_data_addr
= qat_req
->buf
.bloutp
;
1042 cipher_param
= (void *)&qat_req
->req
.serv_specif_rqpars
;
1043 cipher_param
->cipher_length
= req
->nbytes
;
1044 cipher_param
->cipher_offset
= 0;
1045 memcpy(cipher_param
->u
.cipher_IV_array
, req
->info
, AES_BLOCK_SIZE
);
1047 ret
= adf_send_message(ctx
->inst
->sym_tx
, (uint32_t *)msg
);
1048 } while (ret
== -EAGAIN
&& ctr
++ < 10);
1050 if (ret
== -EAGAIN
) {
1051 qat_alg_free_bufl(ctx
->inst
, qat_req
);
1054 return -EINPROGRESS
;
1057 static int qat_alg_ablkcipher_decrypt(struct ablkcipher_request
*req
)
1059 struct crypto_ablkcipher
*atfm
= crypto_ablkcipher_reqtfm(req
);
1060 struct crypto_tfm
*tfm
= crypto_ablkcipher_tfm(atfm
);
1061 struct qat_alg_ablkcipher_ctx
*ctx
= crypto_tfm_ctx(tfm
);
1062 struct qat_crypto_request
*qat_req
= ablkcipher_request_ctx(req
);
1063 struct icp_qat_fw_la_cipher_req_params
*cipher_param
;
1064 struct icp_qat_fw_la_bulk_req
*msg
;
1067 ret
= qat_alg_sgl_to_bufl(ctx
->inst
, NULL
, req
->src
, req
->dst
,
1072 msg
= &qat_req
->req
;
1073 *msg
= ctx
->dec_fw_req
;
1074 qat_req
->ablkcipher_ctx
= ctx
;
1075 qat_req
->ablkcipher_req
= req
;
1076 qat_req
->cb
= qat_ablkcipher_alg_callback
;
1077 qat_req
->req
.comn_mid
.opaque_data
= (uint64_t)(__force
long)qat_req
;
1078 qat_req
->req
.comn_mid
.src_data_addr
= qat_req
->buf
.blp
;
1079 qat_req
->req
.comn_mid
.dest_data_addr
= qat_req
->buf
.bloutp
;
1080 cipher_param
= (void *)&qat_req
->req
.serv_specif_rqpars
;
1081 cipher_param
->cipher_length
= req
->nbytes
;
1082 cipher_param
->cipher_offset
= 0;
1083 memcpy(cipher_param
->u
.cipher_IV_array
, req
->info
, AES_BLOCK_SIZE
);
1085 ret
= adf_send_message(ctx
->inst
->sym_tx
, (uint32_t *)msg
);
1086 } while (ret
== -EAGAIN
&& ctr
++ < 10);
1088 if (ret
== -EAGAIN
) {
1089 qat_alg_free_bufl(ctx
->inst
, qat_req
);
1092 return -EINPROGRESS
;
1095 static int qat_alg_aead_init(struct crypto_tfm
*tfm
,
1096 enum icp_qat_hw_auth_algo hash
,
1097 const char *hash_name
)
1099 struct qat_alg_aead_ctx
*ctx
= crypto_tfm_ctx(tfm
);
1101 ctx
->hash_tfm
= crypto_alloc_shash(hash_name
, 0, 0);
1102 if (IS_ERR(ctx
->hash_tfm
))
1104 spin_lock_init(&ctx
->lock
);
1105 ctx
->qat_hash_alg
= hash
;
1106 tfm
->crt_aead
.reqsize
= sizeof(struct aead_request
) +
1107 sizeof(struct qat_crypto_request
);
1112 static int qat_alg_aead_sha1_init(struct crypto_tfm
*tfm
)
1114 return qat_alg_aead_init(tfm
, ICP_QAT_HW_AUTH_ALGO_SHA1
, "sha1");
1117 static int qat_alg_aead_sha256_init(struct crypto_tfm
*tfm
)
1119 return qat_alg_aead_init(tfm
, ICP_QAT_HW_AUTH_ALGO_SHA256
, "sha256");
1122 static int qat_alg_aead_sha512_init(struct crypto_tfm
*tfm
)
1124 return qat_alg_aead_init(tfm
, ICP_QAT_HW_AUTH_ALGO_SHA512
, "sha512");
1127 static void qat_alg_aead_exit(struct crypto_tfm
*tfm
)
1129 struct qat_alg_aead_ctx
*ctx
= crypto_tfm_ctx(tfm
);
1130 struct qat_crypto_instance
*inst
= ctx
->inst
;
1133 if (!IS_ERR(ctx
->hash_tfm
))
1134 crypto_free_shash(ctx
->hash_tfm
);
1139 dev
= &GET_DEV(inst
->accel_dev
);
1141 memset(ctx
->enc_cd
, 0, sizeof(struct qat_alg_cd
));
1142 dma_free_coherent(dev
, sizeof(struct qat_alg_cd
),
1143 ctx
->enc_cd
, ctx
->enc_cd_paddr
);
1146 memset(ctx
->dec_cd
, 0, sizeof(struct qat_alg_cd
));
1147 dma_free_coherent(dev
, sizeof(struct qat_alg_cd
),
1148 ctx
->dec_cd
, ctx
->dec_cd_paddr
);
1150 qat_crypto_put_instance(inst
);
1153 static int qat_alg_ablkcipher_init(struct crypto_tfm
*tfm
)
1155 struct qat_alg_ablkcipher_ctx
*ctx
= crypto_tfm_ctx(tfm
);
1157 spin_lock_init(&ctx
->lock
);
1158 tfm
->crt_ablkcipher
.reqsize
= sizeof(struct ablkcipher_request
) +
1159 sizeof(struct qat_crypto_request
);
1164 static void qat_alg_ablkcipher_exit(struct crypto_tfm
*tfm
)
1166 struct qat_alg_ablkcipher_ctx
*ctx
= crypto_tfm_ctx(tfm
);
1167 struct qat_crypto_instance
*inst
= ctx
->inst
;
1173 dev
= &GET_DEV(inst
->accel_dev
);
1175 memset(ctx
->enc_cd
, 0,
1176 sizeof(struct icp_qat_hw_cipher_algo_blk
));
1177 dma_free_coherent(dev
,
1178 sizeof(struct icp_qat_hw_cipher_algo_blk
),
1179 ctx
->enc_cd
, ctx
->enc_cd_paddr
);
1182 memset(ctx
->dec_cd
, 0,
1183 sizeof(struct icp_qat_hw_cipher_algo_blk
));
1184 dma_free_coherent(dev
,
1185 sizeof(struct icp_qat_hw_cipher_algo_blk
),
1186 ctx
->dec_cd
, ctx
->dec_cd_paddr
);
1188 qat_crypto_put_instance(inst
);
1191 static struct crypto_alg qat_algs
[] = { {
1192 .cra_name
= "authenc(hmac(sha1),cbc(aes))",
1193 .cra_driver_name
= "qat_aes_cbc_hmac_sha1",
1194 .cra_priority
= 4001,
1195 .cra_flags
= CRYPTO_ALG_TYPE_AEAD
| CRYPTO_ALG_ASYNC
,
1196 .cra_blocksize
= AES_BLOCK_SIZE
,
1197 .cra_ctxsize
= sizeof(struct qat_alg_aead_ctx
),
1199 .cra_type
= &crypto_aead_type
,
1200 .cra_module
= THIS_MODULE
,
1201 .cra_init
= qat_alg_aead_sha1_init
,
1202 .cra_exit
= qat_alg_aead_exit
,
1205 .setkey
= qat_alg_aead_setkey
,
1206 .decrypt
= qat_alg_aead_dec
,
1207 .encrypt
= qat_alg_aead_enc
,
1208 .givencrypt
= qat_alg_aead_genivenc
,
1209 .ivsize
= AES_BLOCK_SIZE
,
1210 .maxauthsize
= SHA1_DIGEST_SIZE
,
1214 .cra_name
= "authenc(hmac(sha256),cbc(aes))",
1215 .cra_driver_name
= "qat_aes_cbc_hmac_sha256",
1216 .cra_priority
= 4001,
1217 .cra_flags
= CRYPTO_ALG_TYPE_AEAD
| CRYPTO_ALG_ASYNC
,
1218 .cra_blocksize
= AES_BLOCK_SIZE
,
1219 .cra_ctxsize
= sizeof(struct qat_alg_aead_ctx
),
1221 .cra_type
= &crypto_aead_type
,
1222 .cra_module
= THIS_MODULE
,
1223 .cra_init
= qat_alg_aead_sha256_init
,
1224 .cra_exit
= qat_alg_aead_exit
,
1227 .setkey
= qat_alg_aead_setkey
,
1228 .decrypt
= qat_alg_aead_dec
,
1229 .encrypt
= qat_alg_aead_enc
,
1230 .givencrypt
= qat_alg_aead_genivenc
,
1231 .ivsize
= AES_BLOCK_SIZE
,
1232 .maxauthsize
= SHA256_DIGEST_SIZE
,
1236 .cra_name
= "authenc(hmac(sha512),cbc(aes))",
1237 .cra_driver_name
= "qat_aes_cbc_hmac_sha512",
1238 .cra_priority
= 4001,
1239 .cra_flags
= CRYPTO_ALG_TYPE_AEAD
| CRYPTO_ALG_ASYNC
,
1240 .cra_blocksize
= AES_BLOCK_SIZE
,
1241 .cra_ctxsize
= sizeof(struct qat_alg_aead_ctx
),
1243 .cra_type
= &crypto_aead_type
,
1244 .cra_module
= THIS_MODULE
,
1245 .cra_init
= qat_alg_aead_sha512_init
,
1246 .cra_exit
= qat_alg_aead_exit
,
1249 .setkey
= qat_alg_aead_setkey
,
1250 .decrypt
= qat_alg_aead_dec
,
1251 .encrypt
= qat_alg_aead_enc
,
1252 .givencrypt
= qat_alg_aead_genivenc
,
1253 .ivsize
= AES_BLOCK_SIZE
,
1254 .maxauthsize
= SHA512_DIGEST_SIZE
,
1258 .cra_name
= "cbc(aes)",
1259 .cra_driver_name
= "qat_aes_cbc",
1260 .cra_priority
= 4001,
1261 .cra_flags
= CRYPTO_ALG_TYPE_ABLKCIPHER
| CRYPTO_ALG_ASYNC
,
1262 .cra_blocksize
= AES_BLOCK_SIZE
,
1263 .cra_ctxsize
= sizeof(struct qat_alg_ablkcipher_ctx
),
1265 .cra_type
= &crypto_ablkcipher_type
,
1266 .cra_module
= THIS_MODULE
,
1267 .cra_init
= qat_alg_ablkcipher_init
,
1268 .cra_exit
= qat_alg_ablkcipher_exit
,
1271 .setkey
= qat_alg_ablkcipher_setkey
,
1272 .decrypt
= qat_alg_ablkcipher_decrypt
,
1273 .encrypt
= qat_alg_ablkcipher_encrypt
,
1274 .min_keysize
= AES_MIN_KEY_SIZE
,
1275 .max_keysize
= AES_MAX_KEY_SIZE
,
1276 .ivsize
= AES_BLOCK_SIZE
,
1281 int qat_algs_register(void)
1283 if (atomic_add_return(1, &active_dev
) == 1) {
1286 for (i
= 0; i
< ARRAY_SIZE(qat_algs
); i
++)
1287 qat_algs
[i
].cra_flags
=
1288 (qat_algs
[i
].cra_type
== &crypto_aead_type
) ?
1289 CRYPTO_ALG_TYPE_AEAD
| CRYPTO_ALG_ASYNC
:
1290 CRYPTO_ALG_TYPE_ABLKCIPHER
| CRYPTO_ALG_ASYNC
;
1292 return crypto_register_algs(qat_algs
, ARRAY_SIZE(qat_algs
));
1297 int qat_algs_unregister(void)
1299 if (atomic_sub_return(1, &active_dev
) == 0)
1300 return crypto_unregister_algs(qat_algs
, ARRAY_SIZE(qat_algs
));
1304 int qat_algs_init(void)
1306 atomic_set(&active_dev
, 0);
1307 crypto_get_default_rng();
1311 void qat_algs_exit(void)
1313 crypto_put_default_rng();