crypto: qat - Fix incorrect uses of memzero_explicit
[deliverable/linux.git] / drivers / crypto / qat / qat_common / qat_algs.c
1 /*
2 This file is provided under a dual BSD/GPLv2 license. When using or
3 redistributing this file, you may do so under either license.
4
5 GPL LICENSE SUMMARY
6 Copyright(c) 2014 Intel Corporation.
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of version 2 of the GNU General Public License as
9 published by the Free Software Foundation.
10
11 This program is distributed in the hope that it will be useful, but
12 WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 General Public License for more details.
15
16 Contact Information:
17 qat-linux@intel.com
18
19 BSD LICENSE
20 Copyright(c) 2014 Intel Corporation.
21 Redistribution and use in source and binary forms, with or without
22 modification, are permitted provided that the following conditions
23 are met:
24
25 * Redistributions of source code must retain the above copyright
26 notice, this list of conditions and the following disclaimer.
27 * Redistributions in binary form must reproduce the above copyright
28 notice, this list of conditions and the following disclaimer in
29 the documentation and/or other materials provided with the
30 distribution.
31 * Neither the name of Intel Corporation nor the names of its
32 contributors may be used to endorse or promote products derived
33 from this software without specific prior written permission.
34
35 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
36 "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
37 LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
38 A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
39 OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
40 SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
41 LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
42 DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
43 THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
44 (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
45 OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
46 */
47 #include <linux/module.h>
48 #include <linux/slab.h>
49 #include <linux/crypto.h>
50 #include <crypto/aead.h>
51 #include <crypto/aes.h>
52 #include <crypto/sha.h>
53 #include <crypto/hash.h>
54 #include <crypto/algapi.h>
55 #include <crypto/authenc.h>
56 #include <crypto/rng.h>
57 #include <linux/dma-mapping.h>
58 #include "adf_accel_devices.h"
59 #include "adf_transport.h"
60 #include "adf_common_drv.h"
61 #include "qat_crypto.h"
62 #include "icp_qat_hw.h"
63 #include "icp_qat_fw.h"
64 #include "icp_qat_fw_la.h"
65
66 #define QAT_AES_HW_CONFIG_CBC_ENC(alg) \
67 ICP_QAT_HW_CIPHER_CONFIG_BUILD(ICP_QAT_HW_CIPHER_CBC_MODE, alg, \
68 ICP_QAT_HW_CIPHER_NO_CONVERT, \
69 ICP_QAT_HW_CIPHER_ENCRYPT)
70
71 #define QAT_AES_HW_CONFIG_CBC_DEC(alg) \
72 ICP_QAT_HW_CIPHER_CONFIG_BUILD(ICP_QAT_HW_CIPHER_CBC_MODE, alg, \
73 ICP_QAT_HW_CIPHER_KEY_CONVERT, \
74 ICP_QAT_HW_CIPHER_DECRYPT)
75
76 static atomic_t active_dev;
77
78 struct qat_alg_buf {
79 uint32_t len;
80 uint32_t resrvd;
81 uint64_t addr;
82 } __packed;
83
84 struct qat_alg_buf_list {
85 uint64_t resrvd;
86 uint32_t num_bufs;
87 uint32_t num_mapped_bufs;
88 struct qat_alg_buf bufers[];
89 } __packed __aligned(64);
90
91 /* Common content descriptor */
92 struct qat_alg_cd {
93 union {
94 struct qat_enc { /* Encrypt content desc */
95 struct icp_qat_hw_cipher_algo_blk cipher;
96 struct icp_qat_hw_auth_algo_blk hash;
97 } qat_enc_cd;
98 struct qat_dec { /* Decrytp content desc */
99 struct icp_qat_hw_auth_algo_blk hash;
100 struct icp_qat_hw_cipher_algo_blk cipher;
101 } qat_dec_cd;
102 };
103 } __aligned(64);
104
105 #define MAX_AUTH_STATE_SIZE sizeof(struct icp_qat_hw_auth_algo_blk)
106
107 struct qat_auth_state {
108 uint8_t data[MAX_AUTH_STATE_SIZE + 64];
109 } __aligned(64);
110
111 struct qat_alg_aead_ctx {
112 struct qat_alg_cd *enc_cd;
113 struct qat_alg_cd *dec_cd;
114 dma_addr_t enc_cd_paddr;
115 dma_addr_t dec_cd_paddr;
116 struct icp_qat_fw_la_bulk_req enc_fw_req;
117 struct icp_qat_fw_la_bulk_req dec_fw_req;
118 struct crypto_shash *hash_tfm;
119 enum icp_qat_hw_auth_algo qat_hash_alg;
120 struct qat_crypto_instance *inst;
121 struct crypto_tfm *tfm;
122 uint8_t salt[AES_BLOCK_SIZE];
123 spinlock_t lock; /* protects qat_alg_aead_ctx struct */
124 };
125
126 struct qat_alg_ablkcipher_ctx {
127 struct icp_qat_hw_cipher_algo_blk *enc_cd;
128 struct icp_qat_hw_cipher_algo_blk *dec_cd;
129 dma_addr_t enc_cd_paddr;
130 dma_addr_t dec_cd_paddr;
131 struct icp_qat_fw_la_bulk_req enc_fw_req;
132 struct icp_qat_fw_la_bulk_req dec_fw_req;
133 struct qat_crypto_instance *inst;
134 struct crypto_tfm *tfm;
135 spinlock_t lock; /* protects qat_alg_ablkcipher_ctx struct */
136 };
137
138 static int get_current_node(void)
139 {
140 return cpu_data(current_thread_info()->cpu).phys_proc_id;
141 }
142
143 static int qat_get_inter_state_size(enum icp_qat_hw_auth_algo qat_hash_alg)
144 {
145 switch (qat_hash_alg) {
146 case ICP_QAT_HW_AUTH_ALGO_SHA1:
147 return ICP_QAT_HW_SHA1_STATE1_SZ;
148 case ICP_QAT_HW_AUTH_ALGO_SHA256:
149 return ICP_QAT_HW_SHA256_STATE1_SZ;
150 case ICP_QAT_HW_AUTH_ALGO_SHA512:
151 return ICP_QAT_HW_SHA512_STATE1_SZ;
152 default:
153 return -EFAULT;
154 };
155 return -EFAULT;
156 }
157
158 static int qat_alg_do_precomputes(struct icp_qat_hw_auth_algo_blk *hash,
159 struct qat_alg_aead_ctx *ctx,
160 const uint8_t *auth_key,
161 unsigned int auth_keylen)
162 {
163 struct qat_auth_state auth_state;
164 SHASH_DESC_ON_STACK(shash, ctx->hash_tfm);
165 struct sha1_state sha1;
166 struct sha256_state sha256;
167 struct sha512_state sha512;
168 int block_size = crypto_shash_blocksize(ctx->hash_tfm);
169 int digest_size = crypto_shash_digestsize(ctx->hash_tfm);
170 uint8_t *ipad = auth_state.data;
171 uint8_t *opad = ipad + block_size;
172 __be32 *hash_state_out;
173 __be64 *hash512_state_out;
174 int i, offset;
175
176 memset(auth_state.data, 0, sizeof(auth_state.data));
177 shash->tfm = ctx->hash_tfm;
178 shash->flags = 0x0;
179
180 if (auth_keylen > block_size) {
181 char buff[SHA512_BLOCK_SIZE];
182 int ret = crypto_shash_digest(shash, auth_key,
183 auth_keylen, buff);
184 if (ret)
185 return ret;
186
187 memcpy(ipad, buff, digest_size);
188 memcpy(opad, buff, digest_size);
189 memzero_explicit(buff, sizeof(buff));
190 } else {
191 memcpy(ipad, auth_key, auth_keylen);
192 memcpy(opad, auth_key, auth_keylen);
193 }
194
195 for (i = 0; i < block_size; i++) {
196 char *ipad_ptr = ipad + i;
197 char *opad_ptr = opad + i;
198 *ipad_ptr ^= 0x36;
199 *opad_ptr ^= 0x5C;
200 }
201
202 if (crypto_shash_init(shash))
203 return -EFAULT;
204
205 if (crypto_shash_update(shash, ipad, block_size))
206 return -EFAULT;
207
208 hash_state_out = (__be32 *)hash->sha.state1;
209 hash512_state_out = (__be64 *)hash_state_out;
210
211 switch (ctx->qat_hash_alg) {
212 case ICP_QAT_HW_AUTH_ALGO_SHA1:
213 if (crypto_shash_export(shash, &sha1))
214 return -EFAULT;
215 for (i = 0; i < digest_size >> 2; i++, hash_state_out++)
216 *hash_state_out = cpu_to_be32(*(sha1.state + i));
217 break;
218 case ICP_QAT_HW_AUTH_ALGO_SHA256:
219 if (crypto_shash_export(shash, &sha256))
220 return -EFAULT;
221 for (i = 0; i < digest_size >> 2; i++, hash_state_out++)
222 *hash_state_out = cpu_to_be32(*(sha256.state + i));
223 break;
224 case ICP_QAT_HW_AUTH_ALGO_SHA512:
225 if (crypto_shash_export(shash, &sha512))
226 return -EFAULT;
227 for (i = 0; i < digest_size >> 3; i++, hash512_state_out++)
228 *hash512_state_out = cpu_to_be64(*(sha512.state + i));
229 break;
230 default:
231 return -EFAULT;
232 }
233
234 if (crypto_shash_init(shash))
235 return -EFAULT;
236
237 if (crypto_shash_update(shash, opad, block_size))
238 return -EFAULT;
239
240 offset = round_up(qat_get_inter_state_size(ctx->qat_hash_alg), 8);
241 hash_state_out = (__be32 *)(hash->sha.state1 + offset);
242 hash512_state_out = (__be64 *)hash_state_out;
243
244 switch (ctx->qat_hash_alg) {
245 case ICP_QAT_HW_AUTH_ALGO_SHA1:
246 if (crypto_shash_export(shash, &sha1))
247 return -EFAULT;
248 for (i = 0; i < digest_size >> 2; i++, hash_state_out++)
249 *hash_state_out = cpu_to_be32(*(sha1.state + i));
250 break;
251 case ICP_QAT_HW_AUTH_ALGO_SHA256:
252 if (crypto_shash_export(shash, &sha256))
253 return -EFAULT;
254 for (i = 0; i < digest_size >> 2; i++, hash_state_out++)
255 *hash_state_out = cpu_to_be32(*(sha256.state + i));
256 break;
257 case ICP_QAT_HW_AUTH_ALGO_SHA512:
258 if (crypto_shash_export(shash, &sha512))
259 return -EFAULT;
260 for (i = 0; i < digest_size >> 3; i++, hash512_state_out++)
261 *hash512_state_out = cpu_to_be64(*(sha512.state + i));
262 break;
263 default:
264 return -EFAULT;
265 }
266 memzero_explicit(ipad, block_size);
267 memzero_explicit(opad, block_size);
268 return 0;
269 }
270
271 static void qat_alg_init_common_hdr(struct icp_qat_fw_comn_req_hdr *header)
272 {
273 header->hdr_flags =
274 ICP_QAT_FW_COMN_HDR_FLAGS_BUILD(ICP_QAT_FW_COMN_REQ_FLAG_SET);
275 header->service_type = ICP_QAT_FW_COMN_REQ_CPM_FW_LA;
276 header->comn_req_flags =
277 ICP_QAT_FW_COMN_FLAGS_BUILD(QAT_COMN_CD_FLD_TYPE_64BIT_ADR,
278 QAT_COMN_PTR_TYPE_SGL);
279 ICP_QAT_FW_LA_PARTIAL_SET(header->serv_specif_flags,
280 ICP_QAT_FW_LA_PARTIAL_NONE);
281 ICP_QAT_FW_LA_CIPH_IV_FLD_FLAG_SET(header->serv_specif_flags,
282 ICP_QAT_FW_CIPH_IV_16BYTE_DATA);
283 ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags,
284 ICP_QAT_FW_LA_NO_PROTO);
285 ICP_QAT_FW_LA_UPDATE_STATE_SET(header->serv_specif_flags,
286 ICP_QAT_FW_LA_NO_UPDATE_STATE);
287 }
288
289 static int qat_alg_aead_init_enc_session(struct qat_alg_aead_ctx *ctx,
290 int alg,
291 struct crypto_authenc_keys *keys)
292 {
293 struct crypto_aead *aead_tfm = __crypto_aead_cast(ctx->tfm);
294 unsigned int digestsize = crypto_aead_crt(aead_tfm)->authsize;
295 struct qat_enc *enc_ctx = &ctx->enc_cd->qat_enc_cd;
296 struct icp_qat_hw_cipher_algo_blk *cipher = &enc_ctx->cipher;
297 struct icp_qat_hw_auth_algo_blk *hash =
298 (struct icp_qat_hw_auth_algo_blk *)((char *)enc_ctx +
299 sizeof(struct icp_qat_hw_auth_setup) + keys->enckeylen);
300 struct icp_qat_fw_la_bulk_req *req_tmpl = &ctx->enc_fw_req;
301 struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req_tmpl->cd_pars;
302 struct icp_qat_fw_comn_req_hdr *header = &req_tmpl->comn_hdr;
303 void *ptr = &req_tmpl->cd_ctrl;
304 struct icp_qat_fw_cipher_cd_ctrl_hdr *cipher_cd_ctrl = ptr;
305 struct icp_qat_fw_auth_cd_ctrl_hdr *hash_cd_ctrl = ptr;
306
307 /* CD setup */
308 cipher->aes.cipher_config.val = QAT_AES_HW_CONFIG_CBC_ENC(alg);
309 memcpy(cipher->aes.key, keys->enckey, keys->enckeylen);
310 hash->sha.inner_setup.auth_config.config =
311 ICP_QAT_HW_AUTH_CONFIG_BUILD(ICP_QAT_HW_AUTH_MODE1,
312 ctx->qat_hash_alg, digestsize);
313 hash->sha.inner_setup.auth_counter.counter =
314 cpu_to_be32(crypto_shash_blocksize(ctx->hash_tfm));
315
316 if (qat_alg_do_precomputes(hash, ctx, keys->authkey, keys->authkeylen))
317 return -EFAULT;
318
319 /* Request setup */
320 qat_alg_init_common_hdr(header);
321 header->service_cmd_id = ICP_QAT_FW_LA_CMD_CIPHER_HASH;
322 ICP_QAT_FW_LA_DIGEST_IN_BUFFER_SET(header->serv_specif_flags,
323 ICP_QAT_FW_LA_DIGEST_IN_BUFFER);
324 ICP_QAT_FW_LA_RET_AUTH_SET(header->serv_specif_flags,
325 ICP_QAT_FW_LA_RET_AUTH_RES);
326 ICP_QAT_FW_LA_CMP_AUTH_SET(header->serv_specif_flags,
327 ICP_QAT_FW_LA_NO_CMP_AUTH_RES);
328 cd_pars->u.s.content_desc_addr = ctx->enc_cd_paddr;
329 cd_pars->u.s.content_desc_params_sz = sizeof(struct qat_alg_cd) >> 3;
330
331 /* Cipher CD config setup */
332 cipher_cd_ctrl->cipher_key_sz = keys->enckeylen >> 3;
333 cipher_cd_ctrl->cipher_state_sz = AES_BLOCK_SIZE >> 3;
334 cipher_cd_ctrl->cipher_cfg_offset = 0;
335 ICP_QAT_FW_COMN_CURR_ID_SET(cipher_cd_ctrl, ICP_QAT_FW_SLICE_CIPHER);
336 ICP_QAT_FW_COMN_NEXT_ID_SET(cipher_cd_ctrl, ICP_QAT_FW_SLICE_AUTH);
337 /* Auth CD config setup */
338 hash_cd_ctrl->hash_cfg_offset = ((char *)hash - (char *)cipher) >> 3;
339 hash_cd_ctrl->hash_flags = ICP_QAT_FW_AUTH_HDR_FLAG_NO_NESTED;
340 hash_cd_ctrl->inner_res_sz = digestsize;
341 hash_cd_ctrl->final_sz = digestsize;
342
343 switch (ctx->qat_hash_alg) {
344 case ICP_QAT_HW_AUTH_ALGO_SHA1:
345 hash_cd_ctrl->inner_state1_sz =
346 round_up(ICP_QAT_HW_SHA1_STATE1_SZ, 8);
347 hash_cd_ctrl->inner_state2_sz =
348 round_up(ICP_QAT_HW_SHA1_STATE2_SZ, 8);
349 break;
350 case ICP_QAT_HW_AUTH_ALGO_SHA256:
351 hash_cd_ctrl->inner_state1_sz = ICP_QAT_HW_SHA256_STATE1_SZ;
352 hash_cd_ctrl->inner_state2_sz = ICP_QAT_HW_SHA256_STATE2_SZ;
353 break;
354 case ICP_QAT_HW_AUTH_ALGO_SHA512:
355 hash_cd_ctrl->inner_state1_sz = ICP_QAT_HW_SHA512_STATE1_SZ;
356 hash_cd_ctrl->inner_state2_sz = ICP_QAT_HW_SHA512_STATE2_SZ;
357 break;
358 default:
359 break;
360 }
361 hash_cd_ctrl->inner_state2_offset = hash_cd_ctrl->hash_cfg_offset +
362 ((sizeof(struct icp_qat_hw_auth_setup) +
363 round_up(hash_cd_ctrl->inner_state1_sz, 8)) >> 3);
364 ICP_QAT_FW_COMN_CURR_ID_SET(hash_cd_ctrl, ICP_QAT_FW_SLICE_AUTH);
365 ICP_QAT_FW_COMN_NEXT_ID_SET(hash_cd_ctrl, ICP_QAT_FW_SLICE_DRAM_WR);
366 return 0;
367 }
368
369 static int qat_alg_aead_init_dec_session(struct qat_alg_aead_ctx *ctx,
370 int alg,
371 struct crypto_authenc_keys *keys)
372 {
373 struct crypto_aead *aead_tfm = __crypto_aead_cast(ctx->tfm);
374 unsigned int digestsize = crypto_aead_crt(aead_tfm)->authsize;
375 struct qat_dec *dec_ctx = &ctx->dec_cd->qat_dec_cd;
376 struct icp_qat_hw_auth_algo_blk *hash = &dec_ctx->hash;
377 struct icp_qat_hw_cipher_algo_blk *cipher =
378 (struct icp_qat_hw_cipher_algo_blk *)((char *)dec_ctx +
379 sizeof(struct icp_qat_hw_auth_setup) +
380 roundup(crypto_shash_digestsize(ctx->hash_tfm), 8) * 2);
381 struct icp_qat_fw_la_bulk_req *req_tmpl = &ctx->dec_fw_req;
382 struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req_tmpl->cd_pars;
383 struct icp_qat_fw_comn_req_hdr *header = &req_tmpl->comn_hdr;
384 void *ptr = &req_tmpl->cd_ctrl;
385 struct icp_qat_fw_cipher_cd_ctrl_hdr *cipher_cd_ctrl = ptr;
386 struct icp_qat_fw_auth_cd_ctrl_hdr *hash_cd_ctrl = ptr;
387 struct icp_qat_fw_la_auth_req_params *auth_param =
388 (struct icp_qat_fw_la_auth_req_params *)
389 ((char *)&req_tmpl->serv_specif_rqpars +
390 sizeof(struct icp_qat_fw_la_cipher_req_params));
391
392 /* CD setup */
393 cipher->aes.cipher_config.val = QAT_AES_HW_CONFIG_CBC_DEC(alg);
394 memcpy(cipher->aes.key, keys->enckey, keys->enckeylen);
395 hash->sha.inner_setup.auth_config.config =
396 ICP_QAT_HW_AUTH_CONFIG_BUILD(ICP_QAT_HW_AUTH_MODE1,
397 ctx->qat_hash_alg,
398 digestsize);
399 hash->sha.inner_setup.auth_counter.counter =
400 cpu_to_be32(crypto_shash_blocksize(ctx->hash_tfm));
401
402 if (qat_alg_do_precomputes(hash, ctx, keys->authkey, keys->authkeylen))
403 return -EFAULT;
404
405 /* Request setup */
406 qat_alg_init_common_hdr(header);
407 header->service_cmd_id = ICP_QAT_FW_LA_CMD_HASH_CIPHER;
408 ICP_QAT_FW_LA_DIGEST_IN_BUFFER_SET(header->serv_specif_flags,
409 ICP_QAT_FW_LA_DIGEST_IN_BUFFER);
410 ICP_QAT_FW_LA_RET_AUTH_SET(header->serv_specif_flags,
411 ICP_QAT_FW_LA_NO_RET_AUTH_RES);
412 ICP_QAT_FW_LA_CMP_AUTH_SET(header->serv_specif_flags,
413 ICP_QAT_FW_LA_CMP_AUTH_RES);
414 cd_pars->u.s.content_desc_addr = ctx->dec_cd_paddr;
415 cd_pars->u.s.content_desc_params_sz = sizeof(struct qat_alg_cd) >> 3;
416
417 /* Cipher CD config setup */
418 cipher_cd_ctrl->cipher_key_sz = keys->enckeylen >> 3;
419 cipher_cd_ctrl->cipher_state_sz = AES_BLOCK_SIZE >> 3;
420 cipher_cd_ctrl->cipher_cfg_offset =
421 (sizeof(struct icp_qat_hw_auth_setup) +
422 roundup(crypto_shash_digestsize(ctx->hash_tfm), 8) * 2) >> 3;
423 ICP_QAT_FW_COMN_CURR_ID_SET(cipher_cd_ctrl, ICP_QAT_FW_SLICE_CIPHER);
424 ICP_QAT_FW_COMN_NEXT_ID_SET(cipher_cd_ctrl, ICP_QAT_FW_SLICE_DRAM_WR);
425
426 /* Auth CD config setup */
427 hash_cd_ctrl->hash_cfg_offset = 0;
428 hash_cd_ctrl->hash_flags = ICP_QAT_FW_AUTH_HDR_FLAG_NO_NESTED;
429 hash_cd_ctrl->inner_res_sz = digestsize;
430 hash_cd_ctrl->final_sz = digestsize;
431
432 switch (ctx->qat_hash_alg) {
433 case ICP_QAT_HW_AUTH_ALGO_SHA1:
434 hash_cd_ctrl->inner_state1_sz =
435 round_up(ICP_QAT_HW_SHA1_STATE1_SZ, 8);
436 hash_cd_ctrl->inner_state2_sz =
437 round_up(ICP_QAT_HW_SHA1_STATE2_SZ, 8);
438 break;
439 case ICP_QAT_HW_AUTH_ALGO_SHA256:
440 hash_cd_ctrl->inner_state1_sz = ICP_QAT_HW_SHA256_STATE1_SZ;
441 hash_cd_ctrl->inner_state2_sz = ICP_QAT_HW_SHA256_STATE2_SZ;
442 break;
443 case ICP_QAT_HW_AUTH_ALGO_SHA512:
444 hash_cd_ctrl->inner_state1_sz = ICP_QAT_HW_SHA512_STATE1_SZ;
445 hash_cd_ctrl->inner_state2_sz = ICP_QAT_HW_SHA512_STATE2_SZ;
446 break;
447 default:
448 break;
449 }
450
451 hash_cd_ctrl->inner_state2_offset = hash_cd_ctrl->hash_cfg_offset +
452 ((sizeof(struct icp_qat_hw_auth_setup) +
453 round_up(hash_cd_ctrl->inner_state1_sz, 8)) >> 3);
454 auth_param->auth_res_sz = digestsize;
455 ICP_QAT_FW_COMN_CURR_ID_SET(hash_cd_ctrl, ICP_QAT_FW_SLICE_AUTH);
456 ICP_QAT_FW_COMN_NEXT_ID_SET(hash_cd_ctrl, ICP_QAT_FW_SLICE_CIPHER);
457 return 0;
458 }
459
460 static void qat_alg_ablkcipher_init_com(struct qat_alg_ablkcipher_ctx *ctx,
461 struct icp_qat_fw_la_bulk_req *req,
462 struct icp_qat_hw_cipher_algo_blk *cd,
463 const uint8_t *key, unsigned int keylen)
464 {
465 struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req->cd_pars;
466 struct icp_qat_fw_comn_req_hdr *header = &req->comn_hdr;
467 struct icp_qat_fw_cipher_cd_ctrl_hdr *cd_ctrl = (void *)&req->cd_ctrl;
468
469 memcpy(cd->aes.key, key, keylen);
470 qat_alg_init_common_hdr(header);
471 header->service_cmd_id = ICP_QAT_FW_LA_CMD_CIPHER;
472 cd_pars->u.s.content_desc_params_sz =
473 sizeof(struct icp_qat_hw_cipher_algo_blk) >> 3;
474 /* Cipher CD config setup */
475 cd_ctrl->cipher_key_sz = keylen >> 3;
476 cd_ctrl->cipher_state_sz = AES_BLOCK_SIZE >> 3;
477 cd_ctrl->cipher_cfg_offset = 0;
478 ICP_QAT_FW_COMN_CURR_ID_SET(cd_ctrl, ICP_QAT_FW_SLICE_CIPHER);
479 ICP_QAT_FW_COMN_NEXT_ID_SET(cd_ctrl, ICP_QAT_FW_SLICE_DRAM_WR);
480 }
481
482 static void qat_alg_ablkcipher_init_enc(struct qat_alg_ablkcipher_ctx *ctx,
483 int alg, const uint8_t *key,
484 unsigned int keylen)
485 {
486 struct icp_qat_hw_cipher_algo_blk *enc_cd = ctx->enc_cd;
487 struct icp_qat_fw_la_bulk_req *req = &ctx->enc_fw_req;
488 struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req->cd_pars;
489
490 qat_alg_ablkcipher_init_com(ctx, req, enc_cd, key, keylen);
491 cd_pars->u.s.content_desc_addr = ctx->enc_cd_paddr;
492 enc_cd->aes.cipher_config.val = QAT_AES_HW_CONFIG_CBC_ENC(alg);
493 }
494
495 static void qat_alg_ablkcipher_init_dec(struct qat_alg_ablkcipher_ctx *ctx,
496 int alg, const uint8_t *key,
497 unsigned int keylen)
498 {
499 struct icp_qat_hw_cipher_algo_blk *dec_cd = ctx->dec_cd;
500 struct icp_qat_fw_la_bulk_req *req = &ctx->dec_fw_req;
501 struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req->cd_pars;
502
503 qat_alg_ablkcipher_init_com(ctx, req, dec_cd, key, keylen);
504 cd_pars->u.s.content_desc_addr = ctx->dec_cd_paddr;
505 dec_cd->aes.cipher_config.val = QAT_AES_HW_CONFIG_CBC_DEC(alg);
506 }
507
508 static int qat_alg_validate_key(int key_len, int *alg)
509 {
510 switch (key_len) {
511 case AES_KEYSIZE_128:
512 *alg = ICP_QAT_HW_CIPHER_ALGO_AES128;
513 break;
514 case AES_KEYSIZE_192:
515 *alg = ICP_QAT_HW_CIPHER_ALGO_AES192;
516 break;
517 case AES_KEYSIZE_256:
518 *alg = ICP_QAT_HW_CIPHER_ALGO_AES256;
519 break;
520 default:
521 return -EINVAL;
522 }
523 return 0;
524 }
525
526 static int qat_alg_aead_init_sessions(struct qat_alg_aead_ctx *ctx,
527 const uint8_t *key, unsigned int keylen)
528 {
529 struct crypto_authenc_keys keys;
530 int alg;
531
532 if (crypto_rng_get_bytes(crypto_default_rng, ctx->salt, AES_BLOCK_SIZE))
533 return -EFAULT;
534
535 if (crypto_authenc_extractkeys(&keys, key, keylen))
536 goto bad_key;
537
538 if (qat_alg_validate_key(keys.enckeylen, &alg))
539 goto bad_key;
540
541 if (qat_alg_aead_init_enc_session(ctx, alg, &keys))
542 goto error;
543
544 if (qat_alg_aead_init_dec_session(ctx, alg, &keys))
545 goto error;
546
547 return 0;
548 bad_key:
549 crypto_tfm_set_flags(ctx->tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
550 return -EINVAL;
551 error:
552 return -EFAULT;
553 }
554
555 static int qat_alg_ablkcipher_init_sessions(struct qat_alg_ablkcipher_ctx *ctx,
556 const uint8_t *key,
557 unsigned int keylen)
558 {
559 int alg;
560
561 if (qat_alg_validate_key(keylen, &alg))
562 goto bad_key;
563
564 qat_alg_ablkcipher_init_enc(ctx, alg, key, keylen);
565 qat_alg_ablkcipher_init_dec(ctx, alg, key, keylen);
566 return 0;
567 bad_key:
568 crypto_tfm_set_flags(ctx->tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
569 return -EINVAL;
570 }
571
572 static int qat_alg_aead_setkey(struct crypto_aead *tfm, const uint8_t *key,
573 unsigned int keylen)
574 {
575 struct qat_alg_aead_ctx *ctx = crypto_aead_ctx(tfm);
576 struct device *dev;
577
578 spin_lock(&ctx->lock);
579 if (ctx->enc_cd) {
580 /* rekeying */
581 dev = &GET_DEV(ctx->inst->accel_dev);
582 memset(ctx->enc_cd, 0, sizeof(*ctx->enc_cd));
583 memset(ctx->dec_cd, 0, sizeof(*ctx->dec_cd));
584 memset(&ctx->enc_fw_req, 0, sizeof(ctx->enc_fw_req));
585 memset(&ctx->dec_fw_req, 0, sizeof(ctx->dec_fw_req));
586 } else {
587 /* new key */
588 int node = get_current_node();
589 struct qat_crypto_instance *inst =
590 qat_crypto_get_instance_node(node);
591 if (!inst) {
592 spin_unlock(&ctx->lock);
593 return -EINVAL;
594 }
595
596 dev = &GET_DEV(inst->accel_dev);
597 ctx->inst = inst;
598 ctx->enc_cd = dma_zalloc_coherent(dev, sizeof(*ctx->enc_cd),
599 &ctx->enc_cd_paddr,
600 GFP_ATOMIC);
601 if (!ctx->enc_cd) {
602 spin_unlock(&ctx->lock);
603 return -ENOMEM;
604 }
605 ctx->dec_cd = dma_zalloc_coherent(dev, sizeof(*ctx->dec_cd),
606 &ctx->dec_cd_paddr,
607 GFP_ATOMIC);
608 if (!ctx->dec_cd) {
609 spin_unlock(&ctx->lock);
610 goto out_free_enc;
611 }
612 }
613 spin_unlock(&ctx->lock);
614 if (qat_alg_aead_init_sessions(ctx, key, keylen))
615 goto out_free_all;
616
617 return 0;
618
619 out_free_all:
620 memset(ctx->dec_cd, 0, sizeof(struct qat_alg_cd));
621 dma_free_coherent(dev, sizeof(struct qat_alg_cd),
622 ctx->dec_cd, ctx->dec_cd_paddr);
623 ctx->dec_cd = NULL;
624 out_free_enc:
625 memset(ctx->enc_cd, 0, sizeof(struct qat_alg_cd));
626 dma_free_coherent(dev, sizeof(struct qat_alg_cd),
627 ctx->enc_cd, ctx->enc_cd_paddr);
628 ctx->enc_cd = NULL;
629 return -ENOMEM;
630 }
631
632 static void qat_alg_free_bufl(struct qat_crypto_instance *inst,
633 struct qat_crypto_request *qat_req)
634 {
635 struct device *dev = &GET_DEV(inst->accel_dev);
636 struct qat_alg_buf_list *bl = qat_req->buf.bl;
637 struct qat_alg_buf_list *blout = qat_req->buf.blout;
638 dma_addr_t blp = qat_req->buf.blp;
639 dma_addr_t blpout = qat_req->buf.bloutp;
640 size_t sz = qat_req->buf.sz;
641 size_t sz_out = qat_req->buf.sz_out;
642 int i;
643
644 for (i = 0; i < bl->num_bufs; i++)
645 dma_unmap_single(dev, bl->bufers[i].addr,
646 bl->bufers[i].len, DMA_BIDIRECTIONAL);
647
648 dma_unmap_single(dev, blp, sz, DMA_TO_DEVICE);
649 kfree(bl);
650 if (blp != blpout) {
651 /* If out of place operation dma unmap only data */
652 int bufless = blout->num_bufs - blout->num_mapped_bufs;
653
654 for (i = bufless; i < blout->num_bufs; i++) {
655 dma_unmap_single(dev, blout->bufers[i].addr,
656 blout->bufers[i].len,
657 DMA_BIDIRECTIONAL);
658 }
659 dma_unmap_single(dev, blpout, sz_out, DMA_TO_DEVICE);
660 kfree(blout);
661 }
662 }
663
664 static int qat_alg_sgl_to_bufl(struct qat_crypto_instance *inst,
665 struct scatterlist *assoc,
666 struct scatterlist *sgl,
667 struct scatterlist *sglout, uint8_t *iv,
668 uint8_t ivlen,
669 struct qat_crypto_request *qat_req)
670 {
671 struct device *dev = &GET_DEV(inst->accel_dev);
672 int i, bufs = 0, sg_nctr = 0;
673 int n = sg_nents(sgl), assoc_n = sg_nents(assoc);
674 struct qat_alg_buf_list *bufl;
675 struct qat_alg_buf_list *buflout = NULL;
676 dma_addr_t blp;
677 dma_addr_t bloutp = 0;
678 struct scatterlist *sg;
679 size_t sz_out, sz = sizeof(struct qat_alg_buf_list) +
680 ((1 + n + assoc_n) * sizeof(struct qat_alg_buf));
681
682 if (unlikely(!n))
683 return -EINVAL;
684
685 bufl = kzalloc_node(sz, GFP_ATOMIC,
686 dev_to_node(&GET_DEV(inst->accel_dev)));
687 if (unlikely(!bufl))
688 return -ENOMEM;
689
690 blp = dma_map_single(dev, bufl, sz, DMA_TO_DEVICE);
691 if (unlikely(dma_mapping_error(dev, blp)))
692 goto err;
693
694 for_each_sg(assoc, sg, assoc_n, i) {
695 if (!sg->length)
696 continue;
697 bufl->bufers[bufs].addr = dma_map_single(dev,
698 sg_virt(sg),
699 sg->length,
700 DMA_BIDIRECTIONAL);
701 bufl->bufers[bufs].len = sg->length;
702 if (unlikely(dma_mapping_error(dev, bufl->bufers[bufs].addr)))
703 goto err;
704 bufs++;
705 }
706 if (ivlen) {
707 bufl->bufers[bufs].addr = dma_map_single(dev, iv, ivlen,
708 DMA_BIDIRECTIONAL);
709 bufl->bufers[bufs].len = ivlen;
710 if (unlikely(dma_mapping_error(dev, bufl->bufers[bufs].addr)))
711 goto err;
712 bufs++;
713 }
714
715 for_each_sg(sgl, sg, n, i) {
716 int y = sg_nctr + bufs;
717
718 if (!sg->length)
719 continue;
720
721 bufl->bufers[y].addr = dma_map_single(dev, sg_virt(sg),
722 sg->length,
723 DMA_BIDIRECTIONAL);
724 bufl->bufers[y].len = sg->length;
725 if (unlikely(dma_mapping_error(dev, bufl->bufers[y].addr)))
726 goto err;
727 sg_nctr++;
728 }
729 bufl->num_bufs = sg_nctr + bufs;
730 qat_req->buf.bl = bufl;
731 qat_req->buf.blp = blp;
732 qat_req->buf.sz = sz;
733 /* Handle out of place operation */
734 if (sgl != sglout) {
735 struct qat_alg_buf *bufers;
736
737 n = sg_nents(sglout);
738 sz_out = sizeof(struct qat_alg_buf_list) +
739 ((1 + n + assoc_n) * sizeof(struct qat_alg_buf));
740 sg_nctr = 0;
741 buflout = kzalloc_node(sz_out, GFP_ATOMIC,
742 dev_to_node(&GET_DEV(inst->accel_dev)));
743 if (unlikely(!buflout))
744 goto err;
745 bloutp = dma_map_single(dev, buflout, sz_out, DMA_TO_DEVICE);
746 if (unlikely(dma_mapping_error(dev, bloutp)))
747 goto err;
748 bufers = buflout->bufers;
749 /* For out of place operation dma map only data and
750 * reuse assoc mapping and iv */
751 for (i = 0; i < bufs; i++) {
752 bufers[i].len = bufl->bufers[i].len;
753 bufers[i].addr = bufl->bufers[i].addr;
754 }
755 for_each_sg(sglout, sg, n, i) {
756 int y = sg_nctr + bufs;
757
758 if (!sg->length)
759 continue;
760
761 bufers[y].addr = dma_map_single(dev, sg_virt(sg),
762 sg->length,
763 DMA_BIDIRECTIONAL);
764 if (unlikely(dma_mapping_error(dev, bufers[y].addr)))
765 goto err;
766 bufers[y].len = sg->length;
767 sg_nctr++;
768 }
769 buflout->num_bufs = sg_nctr + bufs;
770 buflout->num_mapped_bufs = sg_nctr;
771 qat_req->buf.blout = buflout;
772 qat_req->buf.bloutp = bloutp;
773 qat_req->buf.sz_out = sz_out;
774 } else {
775 /* Otherwise set the src and dst to the same address */
776 qat_req->buf.bloutp = qat_req->buf.blp;
777 qat_req->buf.sz_out = 0;
778 }
779 return 0;
780 err:
781 dev_err(dev, "Failed to map buf for dma\n");
782 sg_nctr = 0;
783 for (i = 0; i < n + bufs; i++)
784 if (!dma_mapping_error(dev, bufl->bufers[i].addr))
785 dma_unmap_single(dev, bufl->bufers[i].addr,
786 bufl->bufers[i].len,
787 DMA_BIDIRECTIONAL);
788
789 if (!dma_mapping_error(dev, blp))
790 dma_unmap_single(dev, blp, sz, DMA_TO_DEVICE);
791 kfree(bufl);
792 if (sgl != sglout && buflout) {
793 n = sg_nents(sglout);
794 for (i = bufs; i < n + bufs; i++)
795 if (!dma_mapping_error(dev, buflout->bufers[i].addr))
796 dma_unmap_single(dev, buflout->bufers[i].addr,
797 buflout->bufers[i].len,
798 DMA_BIDIRECTIONAL);
799 if (!dma_mapping_error(dev, bloutp))
800 dma_unmap_single(dev, bloutp, sz_out, DMA_TO_DEVICE);
801 kfree(buflout);
802 }
803 return -ENOMEM;
804 }
805
806 static void qat_aead_alg_callback(struct icp_qat_fw_la_resp *qat_resp,
807 struct qat_crypto_request *qat_req)
808 {
809 struct qat_alg_aead_ctx *ctx = qat_req->aead_ctx;
810 struct qat_crypto_instance *inst = ctx->inst;
811 struct aead_request *areq = qat_req->aead_req;
812 uint8_t stat_filed = qat_resp->comn_resp.comn_status;
813 int res = 0, qat_res = ICP_QAT_FW_COMN_RESP_CRYPTO_STAT_GET(stat_filed);
814
815 qat_alg_free_bufl(inst, qat_req);
816 if (unlikely(qat_res != ICP_QAT_FW_COMN_STATUS_FLAG_OK))
817 res = -EBADMSG;
818 areq->base.complete(&areq->base, res);
819 }
820
821 static void qat_ablkcipher_alg_callback(struct icp_qat_fw_la_resp *qat_resp,
822 struct qat_crypto_request *qat_req)
823 {
824 struct qat_alg_ablkcipher_ctx *ctx = qat_req->ablkcipher_ctx;
825 struct qat_crypto_instance *inst = ctx->inst;
826 struct ablkcipher_request *areq = qat_req->ablkcipher_req;
827 uint8_t stat_filed = qat_resp->comn_resp.comn_status;
828 int res = 0, qat_res = ICP_QAT_FW_COMN_RESP_CRYPTO_STAT_GET(stat_filed);
829
830 qat_alg_free_bufl(inst, qat_req);
831 if (unlikely(qat_res != ICP_QAT_FW_COMN_STATUS_FLAG_OK))
832 res = -EINVAL;
833 areq->base.complete(&areq->base, res);
834 }
835
836 void qat_alg_callback(void *resp)
837 {
838 struct icp_qat_fw_la_resp *qat_resp = resp;
839 struct qat_crypto_request *qat_req =
840 (void *)(__force long)qat_resp->opaque_data;
841
842 qat_req->cb(qat_resp, qat_req);
843 }
844
845 static int qat_alg_aead_dec(struct aead_request *areq)
846 {
847 struct crypto_aead *aead_tfm = crypto_aead_reqtfm(areq);
848 struct crypto_tfm *tfm = crypto_aead_tfm(aead_tfm);
849 struct qat_alg_aead_ctx *ctx = crypto_tfm_ctx(tfm);
850 struct qat_crypto_request *qat_req = aead_request_ctx(areq);
851 struct icp_qat_fw_la_cipher_req_params *cipher_param;
852 struct icp_qat_fw_la_auth_req_params *auth_param;
853 struct icp_qat_fw_la_bulk_req *msg;
854 int digst_size = crypto_aead_crt(aead_tfm)->authsize;
855 int ret, ctr = 0;
856
857 ret = qat_alg_sgl_to_bufl(ctx->inst, areq->assoc, areq->src, areq->dst,
858 areq->iv, AES_BLOCK_SIZE, qat_req);
859 if (unlikely(ret))
860 return ret;
861
862 msg = &qat_req->req;
863 *msg = ctx->dec_fw_req;
864 qat_req->aead_ctx = ctx;
865 qat_req->aead_req = areq;
866 qat_req->cb = qat_aead_alg_callback;
867 qat_req->req.comn_mid.opaque_data = (uint64_t)(__force long)qat_req;
868 qat_req->req.comn_mid.src_data_addr = qat_req->buf.blp;
869 qat_req->req.comn_mid.dest_data_addr = qat_req->buf.bloutp;
870 cipher_param = (void *)&qat_req->req.serv_specif_rqpars;
871 cipher_param->cipher_length = areq->cryptlen - digst_size;
872 cipher_param->cipher_offset = areq->assoclen + AES_BLOCK_SIZE;
873 memcpy(cipher_param->u.cipher_IV_array, areq->iv, AES_BLOCK_SIZE);
874 auth_param = (void *)((uint8_t *)cipher_param + sizeof(*cipher_param));
875 auth_param->auth_off = 0;
876 auth_param->auth_len = areq->assoclen +
877 cipher_param->cipher_length + AES_BLOCK_SIZE;
878 do {
879 ret = adf_send_message(ctx->inst->sym_tx, (uint32_t *)msg);
880 } while (ret == -EAGAIN && ctr++ < 10);
881
882 if (ret == -EAGAIN) {
883 qat_alg_free_bufl(ctx->inst, qat_req);
884 return -EBUSY;
885 }
886 return -EINPROGRESS;
887 }
888
889 static int qat_alg_aead_enc_internal(struct aead_request *areq, uint8_t *iv,
890 int enc_iv)
891 {
892 struct crypto_aead *aead_tfm = crypto_aead_reqtfm(areq);
893 struct crypto_tfm *tfm = crypto_aead_tfm(aead_tfm);
894 struct qat_alg_aead_ctx *ctx = crypto_tfm_ctx(tfm);
895 struct qat_crypto_request *qat_req = aead_request_ctx(areq);
896 struct icp_qat_fw_la_cipher_req_params *cipher_param;
897 struct icp_qat_fw_la_auth_req_params *auth_param;
898 struct icp_qat_fw_la_bulk_req *msg;
899 int ret, ctr = 0;
900
901 ret = qat_alg_sgl_to_bufl(ctx->inst, areq->assoc, areq->src, areq->dst,
902 iv, AES_BLOCK_SIZE, qat_req);
903 if (unlikely(ret))
904 return ret;
905
906 msg = &qat_req->req;
907 *msg = ctx->enc_fw_req;
908 qat_req->aead_ctx = ctx;
909 qat_req->aead_req = areq;
910 qat_req->cb = qat_aead_alg_callback;
911 qat_req->req.comn_mid.opaque_data = (uint64_t)(__force long)qat_req;
912 qat_req->req.comn_mid.src_data_addr = qat_req->buf.blp;
913 qat_req->req.comn_mid.dest_data_addr = qat_req->buf.bloutp;
914 cipher_param = (void *)&qat_req->req.serv_specif_rqpars;
915 auth_param = (void *)((uint8_t *)cipher_param + sizeof(*cipher_param));
916
917 if (enc_iv) {
918 cipher_param->cipher_length = areq->cryptlen + AES_BLOCK_SIZE;
919 cipher_param->cipher_offset = areq->assoclen;
920 } else {
921 memcpy(cipher_param->u.cipher_IV_array, iv, AES_BLOCK_SIZE);
922 cipher_param->cipher_length = areq->cryptlen;
923 cipher_param->cipher_offset = areq->assoclen + AES_BLOCK_SIZE;
924 }
925 auth_param->auth_off = 0;
926 auth_param->auth_len = areq->assoclen + areq->cryptlen + AES_BLOCK_SIZE;
927
928 do {
929 ret = adf_send_message(ctx->inst->sym_tx, (uint32_t *)msg);
930 } while (ret == -EAGAIN && ctr++ < 10);
931
932 if (ret == -EAGAIN) {
933 qat_alg_free_bufl(ctx->inst, qat_req);
934 return -EBUSY;
935 }
936 return -EINPROGRESS;
937 }
938
939 static int qat_alg_aead_enc(struct aead_request *areq)
940 {
941 return qat_alg_aead_enc_internal(areq, areq->iv, 0);
942 }
943
944 static int qat_alg_aead_genivenc(struct aead_givcrypt_request *req)
945 {
946 struct crypto_aead *aead_tfm = crypto_aead_reqtfm(&req->areq);
947 struct crypto_tfm *tfm = crypto_aead_tfm(aead_tfm);
948 struct qat_alg_aead_ctx *ctx = crypto_tfm_ctx(tfm);
949 __be64 seq;
950
951 memcpy(req->giv, ctx->salt, AES_BLOCK_SIZE);
952 seq = cpu_to_be64(req->seq);
953 memcpy(req->giv + AES_BLOCK_SIZE - sizeof(uint64_t),
954 &seq, sizeof(uint64_t));
955 return qat_alg_aead_enc_internal(&req->areq, req->giv, 1);
956 }
957
958 static int qat_alg_ablkcipher_setkey(struct crypto_ablkcipher *tfm,
959 const uint8_t *key,
960 unsigned int keylen)
961 {
962 struct qat_alg_ablkcipher_ctx *ctx = crypto_ablkcipher_ctx(tfm);
963 struct device *dev;
964
965 spin_lock(&ctx->lock);
966 if (ctx->enc_cd) {
967 /* rekeying */
968 dev = &GET_DEV(ctx->inst->accel_dev);
969 memset(ctx->enc_cd, 0, sizeof(*ctx->enc_cd));
970 memset(ctx->dec_cd, 0, sizeof(*ctx->dec_cd));
971 memset(&ctx->enc_fw_req, 0, sizeof(ctx->enc_fw_req));
972 memset(&ctx->dec_fw_req, 0, sizeof(ctx->dec_fw_req));
973 } else {
974 /* new key */
975 int node = get_current_node();
976 struct qat_crypto_instance *inst =
977 qat_crypto_get_instance_node(node);
978 if (!inst) {
979 spin_unlock(&ctx->lock);
980 return -EINVAL;
981 }
982
983 dev = &GET_DEV(inst->accel_dev);
984 ctx->inst = inst;
985 ctx->enc_cd = dma_zalloc_coherent(dev, sizeof(*ctx->enc_cd),
986 &ctx->enc_cd_paddr,
987 GFP_ATOMIC);
988 if (!ctx->enc_cd) {
989 spin_unlock(&ctx->lock);
990 return -ENOMEM;
991 }
992 ctx->dec_cd = dma_zalloc_coherent(dev, sizeof(*ctx->dec_cd),
993 &ctx->dec_cd_paddr,
994 GFP_ATOMIC);
995 if (!ctx->dec_cd) {
996 spin_unlock(&ctx->lock);
997 goto out_free_enc;
998 }
999 }
1000 spin_unlock(&ctx->lock);
1001 if (qat_alg_ablkcipher_init_sessions(ctx, key, keylen))
1002 goto out_free_all;
1003
1004 return 0;
1005
1006 out_free_all:
1007 memset(ctx->dec_cd, 0, sizeof(*ctx->enc_cd));
1008 dma_free_coherent(dev, sizeof(*ctx->enc_cd),
1009 ctx->dec_cd, ctx->dec_cd_paddr);
1010 ctx->dec_cd = NULL;
1011 out_free_enc:
1012 memset(ctx->enc_cd, 0, sizeof(*ctx->dec_cd));
1013 dma_free_coherent(dev, sizeof(*ctx->dec_cd),
1014 ctx->enc_cd, ctx->enc_cd_paddr);
1015 ctx->enc_cd = NULL;
1016 return -ENOMEM;
1017 }
1018
1019 static int qat_alg_ablkcipher_encrypt(struct ablkcipher_request *req)
1020 {
1021 struct crypto_ablkcipher *atfm = crypto_ablkcipher_reqtfm(req);
1022 struct crypto_tfm *tfm = crypto_ablkcipher_tfm(atfm);
1023 struct qat_alg_ablkcipher_ctx *ctx = crypto_tfm_ctx(tfm);
1024 struct qat_crypto_request *qat_req = ablkcipher_request_ctx(req);
1025 struct icp_qat_fw_la_cipher_req_params *cipher_param;
1026 struct icp_qat_fw_la_bulk_req *msg;
1027 int ret, ctr = 0;
1028
1029 ret = qat_alg_sgl_to_bufl(ctx->inst, NULL, req->src, req->dst,
1030 NULL, 0, qat_req);
1031 if (unlikely(ret))
1032 return ret;
1033
1034 msg = &qat_req->req;
1035 *msg = ctx->enc_fw_req;
1036 qat_req->ablkcipher_ctx = ctx;
1037 qat_req->ablkcipher_req = req;
1038 qat_req->cb = qat_ablkcipher_alg_callback;
1039 qat_req->req.comn_mid.opaque_data = (uint64_t)(__force long)qat_req;
1040 qat_req->req.comn_mid.src_data_addr = qat_req->buf.blp;
1041 qat_req->req.comn_mid.dest_data_addr = qat_req->buf.bloutp;
1042 cipher_param = (void *)&qat_req->req.serv_specif_rqpars;
1043 cipher_param->cipher_length = req->nbytes;
1044 cipher_param->cipher_offset = 0;
1045 memcpy(cipher_param->u.cipher_IV_array, req->info, AES_BLOCK_SIZE);
1046 do {
1047 ret = adf_send_message(ctx->inst->sym_tx, (uint32_t *)msg);
1048 } while (ret == -EAGAIN && ctr++ < 10);
1049
1050 if (ret == -EAGAIN) {
1051 qat_alg_free_bufl(ctx->inst, qat_req);
1052 return -EBUSY;
1053 }
1054 return -EINPROGRESS;
1055 }
1056
1057 static int qat_alg_ablkcipher_decrypt(struct ablkcipher_request *req)
1058 {
1059 struct crypto_ablkcipher *atfm = crypto_ablkcipher_reqtfm(req);
1060 struct crypto_tfm *tfm = crypto_ablkcipher_tfm(atfm);
1061 struct qat_alg_ablkcipher_ctx *ctx = crypto_tfm_ctx(tfm);
1062 struct qat_crypto_request *qat_req = ablkcipher_request_ctx(req);
1063 struct icp_qat_fw_la_cipher_req_params *cipher_param;
1064 struct icp_qat_fw_la_bulk_req *msg;
1065 int ret, ctr = 0;
1066
1067 ret = qat_alg_sgl_to_bufl(ctx->inst, NULL, req->src, req->dst,
1068 NULL, 0, qat_req);
1069 if (unlikely(ret))
1070 return ret;
1071
1072 msg = &qat_req->req;
1073 *msg = ctx->dec_fw_req;
1074 qat_req->ablkcipher_ctx = ctx;
1075 qat_req->ablkcipher_req = req;
1076 qat_req->cb = qat_ablkcipher_alg_callback;
1077 qat_req->req.comn_mid.opaque_data = (uint64_t)(__force long)qat_req;
1078 qat_req->req.comn_mid.src_data_addr = qat_req->buf.blp;
1079 qat_req->req.comn_mid.dest_data_addr = qat_req->buf.bloutp;
1080 cipher_param = (void *)&qat_req->req.serv_specif_rqpars;
1081 cipher_param->cipher_length = req->nbytes;
1082 cipher_param->cipher_offset = 0;
1083 memcpy(cipher_param->u.cipher_IV_array, req->info, AES_BLOCK_SIZE);
1084 do {
1085 ret = adf_send_message(ctx->inst->sym_tx, (uint32_t *)msg);
1086 } while (ret == -EAGAIN && ctr++ < 10);
1087
1088 if (ret == -EAGAIN) {
1089 qat_alg_free_bufl(ctx->inst, qat_req);
1090 return -EBUSY;
1091 }
1092 return -EINPROGRESS;
1093 }
1094
1095 static int qat_alg_aead_init(struct crypto_tfm *tfm,
1096 enum icp_qat_hw_auth_algo hash,
1097 const char *hash_name)
1098 {
1099 struct qat_alg_aead_ctx *ctx = crypto_tfm_ctx(tfm);
1100
1101 ctx->hash_tfm = crypto_alloc_shash(hash_name, 0, 0);
1102 if (IS_ERR(ctx->hash_tfm))
1103 return -EFAULT;
1104 spin_lock_init(&ctx->lock);
1105 ctx->qat_hash_alg = hash;
1106 tfm->crt_aead.reqsize = sizeof(struct aead_request) +
1107 sizeof(struct qat_crypto_request);
1108 ctx->tfm = tfm;
1109 return 0;
1110 }
1111
1112 static int qat_alg_aead_sha1_init(struct crypto_tfm *tfm)
1113 {
1114 return qat_alg_aead_init(tfm, ICP_QAT_HW_AUTH_ALGO_SHA1, "sha1");
1115 }
1116
1117 static int qat_alg_aead_sha256_init(struct crypto_tfm *tfm)
1118 {
1119 return qat_alg_aead_init(tfm, ICP_QAT_HW_AUTH_ALGO_SHA256, "sha256");
1120 }
1121
1122 static int qat_alg_aead_sha512_init(struct crypto_tfm *tfm)
1123 {
1124 return qat_alg_aead_init(tfm, ICP_QAT_HW_AUTH_ALGO_SHA512, "sha512");
1125 }
1126
1127 static void qat_alg_aead_exit(struct crypto_tfm *tfm)
1128 {
1129 struct qat_alg_aead_ctx *ctx = crypto_tfm_ctx(tfm);
1130 struct qat_crypto_instance *inst = ctx->inst;
1131 struct device *dev;
1132
1133 if (!IS_ERR(ctx->hash_tfm))
1134 crypto_free_shash(ctx->hash_tfm);
1135
1136 if (!inst)
1137 return;
1138
1139 dev = &GET_DEV(inst->accel_dev);
1140 if (ctx->enc_cd) {
1141 memset(ctx->enc_cd, 0, sizeof(struct qat_alg_cd));
1142 dma_free_coherent(dev, sizeof(struct qat_alg_cd),
1143 ctx->enc_cd, ctx->enc_cd_paddr);
1144 }
1145 if (ctx->dec_cd) {
1146 memset(ctx->dec_cd, 0, sizeof(struct qat_alg_cd));
1147 dma_free_coherent(dev, sizeof(struct qat_alg_cd),
1148 ctx->dec_cd, ctx->dec_cd_paddr);
1149 }
1150 qat_crypto_put_instance(inst);
1151 }
1152
1153 static int qat_alg_ablkcipher_init(struct crypto_tfm *tfm)
1154 {
1155 struct qat_alg_ablkcipher_ctx *ctx = crypto_tfm_ctx(tfm);
1156
1157 spin_lock_init(&ctx->lock);
1158 tfm->crt_ablkcipher.reqsize = sizeof(struct ablkcipher_request) +
1159 sizeof(struct qat_crypto_request);
1160 ctx->tfm = tfm;
1161 return 0;
1162 }
1163
1164 static void qat_alg_ablkcipher_exit(struct crypto_tfm *tfm)
1165 {
1166 struct qat_alg_ablkcipher_ctx *ctx = crypto_tfm_ctx(tfm);
1167 struct qat_crypto_instance *inst = ctx->inst;
1168 struct device *dev;
1169
1170 if (!inst)
1171 return;
1172
1173 dev = &GET_DEV(inst->accel_dev);
1174 if (ctx->enc_cd) {
1175 memset(ctx->enc_cd, 0,
1176 sizeof(struct icp_qat_hw_cipher_algo_blk));
1177 dma_free_coherent(dev,
1178 sizeof(struct icp_qat_hw_cipher_algo_blk),
1179 ctx->enc_cd, ctx->enc_cd_paddr);
1180 }
1181 if (ctx->dec_cd) {
1182 memset(ctx->dec_cd, 0,
1183 sizeof(struct icp_qat_hw_cipher_algo_blk));
1184 dma_free_coherent(dev,
1185 sizeof(struct icp_qat_hw_cipher_algo_blk),
1186 ctx->dec_cd, ctx->dec_cd_paddr);
1187 }
1188 qat_crypto_put_instance(inst);
1189 }
1190
1191 static struct crypto_alg qat_algs[] = { {
1192 .cra_name = "authenc(hmac(sha1),cbc(aes))",
1193 .cra_driver_name = "qat_aes_cbc_hmac_sha1",
1194 .cra_priority = 4001,
1195 .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
1196 .cra_blocksize = AES_BLOCK_SIZE,
1197 .cra_ctxsize = sizeof(struct qat_alg_aead_ctx),
1198 .cra_alignmask = 0,
1199 .cra_type = &crypto_aead_type,
1200 .cra_module = THIS_MODULE,
1201 .cra_init = qat_alg_aead_sha1_init,
1202 .cra_exit = qat_alg_aead_exit,
1203 .cra_u = {
1204 .aead = {
1205 .setkey = qat_alg_aead_setkey,
1206 .decrypt = qat_alg_aead_dec,
1207 .encrypt = qat_alg_aead_enc,
1208 .givencrypt = qat_alg_aead_genivenc,
1209 .ivsize = AES_BLOCK_SIZE,
1210 .maxauthsize = SHA1_DIGEST_SIZE,
1211 },
1212 },
1213 }, {
1214 .cra_name = "authenc(hmac(sha256),cbc(aes))",
1215 .cra_driver_name = "qat_aes_cbc_hmac_sha256",
1216 .cra_priority = 4001,
1217 .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
1218 .cra_blocksize = AES_BLOCK_SIZE,
1219 .cra_ctxsize = sizeof(struct qat_alg_aead_ctx),
1220 .cra_alignmask = 0,
1221 .cra_type = &crypto_aead_type,
1222 .cra_module = THIS_MODULE,
1223 .cra_init = qat_alg_aead_sha256_init,
1224 .cra_exit = qat_alg_aead_exit,
1225 .cra_u = {
1226 .aead = {
1227 .setkey = qat_alg_aead_setkey,
1228 .decrypt = qat_alg_aead_dec,
1229 .encrypt = qat_alg_aead_enc,
1230 .givencrypt = qat_alg_aead_genivenc,
1231 .ivsize = AES_BLOCK_SIZE,
1232 .maxauthsize = SHA256_DIGEST_SIZE,
1233 },
1234 },
1235 }, {
1236 .cra_name = "authenc(hmac(sha512),cbc(aes))",
1237 .cra_driver_name = "qat_aes_cbc_hmac_sha512",
1238 .cra_priority = 4001,
1239 .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
1240 .cra_blocksize = AES_BLOCK_SIZE,
1241 .cra_ctxsize = sizeof(struct qat_alg_aead_ctx),
1242 .cra_alignmask = 0,
1243 .cra_type = &crypto_aead_type,
1244 .cra_module = THIS_MODULE,
1245 .cra_init = qat_alg_aead_sha512_init,
1246 .cra_exit = qat_alg_aead_exit,
1247 .cra_u = {
1248 .aead = {
1249 .setkey = qat_alg_aead_setkey,
1250 .decrypt = qat_alg_aead_dec,
1251 .encrypt = qat_alg_aead_enc,
1252 .givencrypt = qat_alg_aead_genivenc,
1253 .ivsize = AES_BLOCK_SIZE,
1254 .maxauthsize = SHA512_DIGEST_SIZE,
1255 },
1256 },
1257 }, {
1258 .cra_name = "cbc(aes)",
1259 .cra_driver_name = "qat_aes_cbc",
1260 .cra_priority = 4001,
1261 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
1262 .cra_blocksize = AES_BLOCK_SIZE,
1263 .cra_ctxsize = sizeof(struct qat_alg_ablkcipher_ctx),
1264 .cra_alignmask = 0,
1265 .cra_type = &crypto_ablkcipher_type,
1266 .cra_module = THIS_MODULE,
1267 .cra_init = qat_alg_ablkcipher_init,
1268 .cra_exit = qat_alg_ablkcipher_exit,
1269 .cra_u = {
1270 .ablkcipher = {
1271 .setkey = qat_alg_ablkcipher_setkey,
1272 .decrypt = qat_alg_ablkcipher_decrypt,
1273 .encrypt = qat_alg_ablkcipher_encrypt,
1274 .min_keysize = AES_MIN_KEY_SIZE,
1275 .max_keysize = AES_MAX_KEY_SIZE,
1276 .ivsize = AES_BLOCK_SIZE,
1277 },
1278 },
1279 } };
1280
1281 int qat_algs_register(void)
1282 {
1283 if (atomic_add_return(1, &active_dev) == 1) {
1284 int i;
1285
1286 for (i = 0; i < ARRAY_SIZE(qat_algs); i++)
1287 qat_algs[i].cra_flags =
1288 (qat_algs[i].cra_type == &crypto_aead_type) ?
1289 CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC :
1290 CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC;
1291
1292 return crypto_register_algs(qat_algs, ARRAY_SIZE(qat_algs));
1293 }
1294 return 0;
1295 }
1296
1297 int qat_algs_unregister(void)
1298 {
1299 if (atomic_sub_return(1, &active_dev) == 0)
1300 return crypto_unregister_algs(qat_algs, ARRAY_SIZE(qat_algs));
1301 return 0;
1302 }
1303
1304 int qat_algs_init(void)
1305 {
1306 atomic_set(&active_dev, 0);
1307 crypto_get_default_rng();
1308 return 0;
1309 }
1310
1311 void qat_algs_exit(void)
1312 {
1313 crypto_put_default_rng();
1314 }
This page took 0.062821 seconds and 5 git commands to generate.