crypto: aead - Remove CRYPTO_ALG_AEAD_NEW flag
[deliverable/linux.git] / drivers / crypto / qat / qat_common / qat_algs.c
CommitLineData
d370cec3
TS
1/*
2 This file is provided under a dual BSD/GPLv2 license. When using or
3 redistributing this file, you may do so under either license.
4
5 GPL LICENSE SUMMARY
6 Copyright(c) 2014 Intel Corporation.
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of version 2 of the GNU General Public License as
9 published by the Free Software Foundation.
10
11 This program is distributed in the hope that it will be useful, but
12 WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 General Public License for more details.
15
16 Contact Information:
17 qat-linux@intel.com
18
19 BSD LICENSE
20 Copyright(c) 2014 Intel Corporation.
21 Redistribution and use in source and binary forms, with or without
22 modification, are permitted provided that the following conditions
23 are met:
24
25 * Redistributions of source code must retain the above copyright
26 notice, this list of conditions and the following disclaimer.
27 * Redistributions in binary form must reproduce the above copyright
28 notice, this list of conditions and the following disclaimer in
29 the documentation and/or other materials provided with the
30 distribution.
31 * Neither the name of Intel Corporation nor the names of its
32 contributors may be used to endorse or promote products derived
33 from this software without specific prior written permission.
34
35 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
36 "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
37 LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
38 A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
39 OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
40 SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
41 LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
42 DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
43 THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
44 (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
45 OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
46*/
47#include <linux/module.h>
48#include <linux/slab.h>
49#include <linux/crypto.h>
0ed6264b 50#include <crypto/internal/aead.h>
d370cec3
TS
51#include <crypto/aes.h>
52#include <crypto/sha.h>
53#include <crypto/hash.h>
54#include <crypto/algapi.h>
55#include <crypto/authenc.h>
d370cec3
TS
56#include <linux/dma-mapping.h>
57#include "adf_accel_devices.h"
58#include "adf_transport.h"
59#include "adf_common_drv.h"
60#include "qat_crypto.h"
61#include "icp_qat_hw.h"
62#include "icp_qat_fw.h"
63#include "icp_qat_fw_la.h"
64
338e84f3 65#define QAT_AES_HW_CONFIG_CBC_ENC(alg) \
d370cec3 66 ICP_QAT_HW_CIPHER_CONFIG_BUILD(ICP_QAT_HW_CIPHER_CBC_MODE, alg, \
338e84f3
TS
67 ICP_QAT_HW_CIPHER_NO_CONVERT, \
68 ICP_QAT_HW_CIPHER_ENCRYPT)
d370cec3 69
338e84f3 70#define QAT_AES_HW_CONFIG_CBC_DEC(alg) \
d370cec3 71 ICP_QAT_HW_CIPHER_CONFIG_BUILD(ICP_QAT_HW_CIPHER_CBC_MODE, alg, \
338e84f3
TS
72 ICP_QAT_HW_CIPHER_KEY_CONVERT, \
73 ICP_QAT_HW_CIPHER_DECRYPT)
d370cec3 74
6f043b50
TS
75static DEFINE_MUTEX(algs_lock);
76static unsigned int active_devs;
d370cec3
TS
77
78struct qat_alg_buf {
79 uint32_t len;
80 uint32_t resrvd;
81 uint64_t addr;
82} __packed;
83
84struct qat_alg_buf_list {
85 uint64_t resrvd;
86 uint32_t num_bufs;
87 uint32_t num_mapped_bufs;
88 struct qat_alg_buf bufers[];
89} __packed __aligned(64);
90
91/* Common content descriptor */
92struct qat_alg_cd {
93 union {
94 struct qat_enc { /* Encrypt content desc */
95 struct icp_qat_hw_cipher_algo_blk cipher;
96 struct icp_qat_hw_auth_algo_blk hash;
97 } qat_enc_cd;
98 struct qat_dec { /* Decrytp content desc */
99 struct icp_qat_hw_auth_algo_blk hash;
100 struct icp_qat_hw_cipher_algo_blk cipher;
101 } qat_dec_cd;
102 };
103} __aligned(64);
104
338e84f3 105struct qat_alg_aead_ctx {
d370cec3 106 struct qat_alg_cd *enc_cd;
d370cec3 107 struct qat_alg_cd *dec_cd;
338e84f3 108 dma_addr_t enc_cd_paddr;
d370cec3 109 dma_addr_t dec_cd_paddr;
338e84f3
TS
110 struct icp_qat_fw_la_bulk_req enc_fw_req;
111 struct icp_qat_fw_la_bulk_req dec_fw_req;
d370cec3
TS
112 struct crypto_shash *hash_tfm;
113 enum icp_qat_hw_auth_algo qat_hash_alg;
338e84f3 114 struct qat_crypto_instance *inst;
338e84f3
TS
115};
116
117struct qat_alg_ablkcipher_ctx {
118 struct icp_qat_hw_cipher_algo_blk *enc_cd;
119 struct icp_qat_hw_cipher_algo_blk *dec_cd;
120 dma_addr_t enc_cd_paddr;
121 dma_addr_t dec_cd_paddr;
122 struct icp_qat_fw_la_bulk_req enc_fw_req;
123 struct icp_qat_fw_la_bulk_req dec_fw_req;
124 struct qat_crypto_instance *inst;
125 struct crypto_tfm *tfm;
126 spinlock_t lock; /* protects qat_alg_ablkcipher_ctx struct */
d370cec3
TS
127};
128
d370cec3
TS
129static int qat_get_inter_state_size(enum icp_qat_hw_auth_algo qat_hash_alg)
130{
131 switch (qat_hash_alg) {
132 case ICP_QAT_HW_AUTH_ALGO_SHA1:
133 return ICP_QAT_HW_SHA1_STATE1_SZ;
134 case ICP_QAT_HW_AUTH_ALGO_SHA256:
135 return ICP_QAT_HW_SHA256_STATE1_SZ;
136 case ICP_QAT_HW_AUTH_ALGO_SHA512:
137 return ICP_QAT_HW_SHA512_STATE1_SZ;
138 default:
139 return -EFAULT;
140 };
141 return -EFAULT;
142}
143
144static int qat_alg_do_precomputes(struct icp_qat_hw_auth_algo_blk *hash,
338e84f3 145 struct qat_alg_aead_ctx *ctx,
d370cec3 146 const uint8_t *auth_key,
26c3af6c 147 unsigned int auth_keylen)
d370cec3 148{
37e52654 149 SHASH_DESC_ON_STACK(shash, ctx->hash_tfm);
d370cec3
TS
150 struct sha1_state sha1;
151 struct sha256_state sha256;
152 struct sha512_state sha512;
153 int block_size = crypto_shash_blocksize(ctx->hash_tfm);
154 int digest_size = crypto_shash_digestsize(ctx->hash_tfm);
48eb3691
HX
155 char ipad[block_size];
156 char opad[block_size];
d370cec3
TS
157 __be32 *hash_state_out;
158 __be64 *hash512_state_out;
159 int i, offset;
160
48eb3691
HX
161 memset(ipad, 0, block_size);
162 memset(opad, 0, block_size);
37e52654
BW
163 shash->tfm = ctx->hash_tfm;
164 shash->flags = 0x0;
d370cec3
TS
165
166 if (auth_keylen > block_size) {
37e52654 167 int ret = crypto_shash_digest(shash, auth_key,
48eb3691 168 auth_keylen, ipad);
d370cec3
TS
169 if (ret)
170 return ret;
171
48eb3691 172 memcpy(opad, ipad, digest_size);
d370cec3
TS
173 } else {
174 memcpy(ipad, auth_key, auth_keylen);
175 memcpy(opad, auth_key, auth_keylen);
d370cec3
TS
176 }
177
178 for (i = 0; i < block_size; i++) {
179 char *ipad_ptr = ipad + i;
180 char *opad_ptr = opad + i;
181 *ipad_ptr ^= 0x36;
182 *opad_ptr ^= 0x5C;
183 }
184
37e52654 185 if (crypto_shash_init(shash))
d370cec3
TS
186 return -EFAULT;
187
37e52654 188 if (crypto_shash_update(shash, ipad, block_size))
d370cec3
TS
189 return -EFAULT;
190
191 hash_state_out = (__be32 *)hash->sha.state1;
192 hash512_state_out = (__be64 *)hash_state_out;
193
194 switch (ctx->qat_hash_alg) {
195 case ICP_QAT_HW_AUTH_ALGO_SHA1:
37e52654 196 if (crypto_shash_export(shash, &sha1))
d370cec3
TS
197 return -EFAULT;
198 for (i = 0; i < digest_size >> 2; i++, hash_state_out++)
199 *hash_state_out = cpu_to_be32(*(sha1.state + i));
200 break;
201 case ICP_QAT_HW_AUTH_ALGO_SHA256:
37e52654 202 if (crypto_shash_export(shash, &sha256))
d370cec3
TS
203 return -EFAULT;
204 for (i = 0; i < digest_size >> 2; i++, hash_state_out++)
205 *hash_state_out = cpu_to_be32(*(sha256.state + i));
206 break;
207 case ICP_QAT_HW_AUTH_ALGO_SHA512:
37e52654 208 if (crypto_shash_export(shash, &sha512))
d370cec3
TS
209 return -EFAULT;
210 for (i = 0; i < digest_size >> 3; i++, hash512_state_out++)
211 *hash512_state_out = cpu_to_be64(*(sha512.state + i));
212 break;
213 default:
214 return -EFAULT;
215 }
216
37e52654 217 if (crypto_shash_init(shash))
d370cec3
TS
218 return -EFAULT;
219
37e52654 220 if (crypto_shash_update(shash, opad, block_size))
d370cec3
TS
221 return -EFAULT;
222
223 offset = round_up(qat_get_inter_state_size(ctx->qat_hash_alg), 8);
224 hash_state_out = (__be32 *)(hash->sha.state1 + offset);
225 hash512_state_out = (__be64 *)hash_state_out;
226
227 switch (ctx->qat_hash_alg) {
228 case ICP_QAT_HW_AUTH_ALGO_SHA1:
37e52654 229 if (crypto_shash_export(shash, &sha1))
d370cec3
TS
230 return -EFAULT;
231 for (i = 0; i < digest_size >> 2; i++, hash_state_out++)
232 *hash_state_out = cpu_to_be32(*(sha1.state + i));
233 break;
234 case ICP_QAT_HW_AUTH_ALGO_SHA256:
37e52654 235 if (crypto_shash_export(shash, &sha256))
d370cec3
TS
236 return -EFAULT;
237 for (i = 0; i < digest_size >> 2; i++, hash_state_out++)
238 *hash_state_out = cpu_to_be32(*(sha256.state + i));
239 break;
240 case ICP_QAT_HW_AUTH_ALGO_SHA512:
37e52654 241 if (crypto_shash_export(shash, &sha512))
d370cec3
TS
242 return -EFAULT;
243 for (i = 0; i < digest_size >> 3; i++, hash512_state_out++)
244 *hash512_state_out = cpu_to_be64(*(sha512.state + i));
245 break;
246 default:
247 return -EFAULT;
248 }
aa408d60
ST
249 memzero_explicit(ipad, block_size);
250 memzero_explicit(opad, block_size);
d370cec3
TS
251 return 0;
252}
253
254static void qat_alg_init_common_hdr(struct icp_qat_fw_comn_req_hdr *header)
255{
256 header->hdr_flags =
257 ICP_QAT_FW_COMN_HDR_FLAGS_BUILD(ICP_QAT_FW_COMN_REQ_FLAG_SET);
258 header->service_type = ICP_QAT_FW_COMN_REQ_CPM_FW_LA;
259 header->comn_req_flags =
260 ICP_QAT_FW_COMN_FLAGS_BUILD(QAT_COMN_CD_FLD_TYPE_64BIT_ADR,
261 QAT_COMN_PTR_TYPE_SGL);
d370cec3
TS
262 ICP_QAT_FW_LA_PARTIAL_SET(header->serv_specif_flags,
263 ICP_QAT_FW_LA_PARTIAL_NONE);
264 ICP_QAT_FW_LA_CIPH_IV_FLD_FLAG_SET(header->serv_specif_flags,
265 ICP_QAT_FW_CIPH_IV_16BYTE_DATA);
266 ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags,
267 ICP_QAT_FW_LA_NO_PROTO);
268 ICP_QAT_FW_LA_UPDATE_STATE_SET(header->serv_specif_flags,
269 ICP_QAT_FW_LA_NO_UPDATE_STATE);
270}
271
e19ab121 272static int qat_alg_aead_init_enc_session(struct crypto_aead *aead_tfm,
338e84f3
TS
273 int alg,
274 struct crypto_authenc_keys *keys)
d370cec3 275{
e19ab121 276 struct qat_alg_aead_ctx *ctx = crypto_aead_ctx(aead_tfm);
0a139416 277 unsigned int digestsize = crypto_aead_authsize(aead_tfm);
d370cec3
TS
278 struct qat_enc *enc_ctx = &ctx->enc_cd->qat_enc_cd;
279 struct icp_qat_hw_cipher_algo_blk *cipher = &enc_ctx->cipher;
280 struct icp_qat_hw_auth_algo_blk *hash =
281 (struct icp_qat_hw_auth_algo_blk *)((char *)enc_ctx +
282 sizeof(struct icp_qat_hw_auth_setup) + keys->enckeylen);
338e84f3 283 struct icp_qat_fw_la_bulk_req *req_tmpl = &ctx->enc_fw_req;
d370cec3
TS
284 struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req_tmpl->cd_pars;
285 struct icp_qat_fw_comn_req_hdr *header = &req_tmpl->comn_hdr;
286 void *ptr = &req_tmpl->cd_ctrl;
287 struct icp_qat_fw_cipher_cd_ctrl_hdr *cipher_cd_ctrl = ptr;
288 struct icp_qat_fw_auth_cd_ctrl_hdr *hash_cd_ctrl = ptr;
d370cec3
TS
289
290 /* CD setup */
338e84f3 291 cipher->aes.cipher_config.val = QAT_AES_HW_CONFIG_CBC_ENC(alg);
d370cec3
TS
292 memcpy(cipher->aes.key, keys->enckey, keys->enckeylen);
293 hash->sha.inner_setup.auth_config.config =
294 ICP_QAT_HW_AUTH_CONFIG_BUILD(ICP_QAT_HW_AUTH_MODE1,
295 ctx->qat_hash_alg, digestsize);
296 hash->sha.inner_setup.auth_counter.counter =
297 cpu_to_be32(crypto_shash_blocksize(ctx->hash_tfm));
298
26c3af6c 299 if (qat_alg_do_precomputes(hash, ctx, keys->authkey, keys->authkeylen))
d370cec3
TS
300 return -EFAULT;
301
302 /* Request setup */
303 qat_alg_init_common_hdr(header);
304 header->service_cmd_id = ICP_QAT_FW_LA_CMD_CIPHER_HASH;
338e84f3
TS
305 ICP_QAT_FW_LA_DIGEST_IN_BUFFER_SET(header->serv_specif_flags,
306 ICP_QAT_FW_LA_DIGEST_IN_BUFFER);
d370cec3
TS
307 ICP_QAT_FW_LA_RET_AUTH_SET(header->serv_specif_flags,
308 ICP_QAT_FW_LA_RET_AUTH_RES);
309 ICP_QAT_FW_LA_CMP_AUTH_SET(header->serv_specif_flags,
310 ICP_QAT_FW_LA_NO_CMP_AUTH_RES);
311 cd_pars->u.s.content_desc_addr = ctx->enc_cd_paddr;
312 cd_pars->u.s.content_desc_params_sz = sizeof(struct qat_alg_cd) >> 3;
313
314 /* Cipher CD config setup */
315 cipher_cd_ctrl->cipher_key_sz = keys->enckeylen >> 3;
316 cipher_cd_ctrl->cipher_state_sz = AES_BLOCK_SIZE >> 3;
317 cipher_cd_ctrl->cipher_cfg_offset = 0;
318 ICP_QAT_FW_COMN_CURR_ID_SET(cipher_cd_ctrl, ICP_QAT_FW_SLICE_CIPHER);
319 ICP_QAT_FW_COMN_NEXT_ID_SET(cipher_cd_ctrl, ICP_QAT_FW_SLICE_AUTH);
320 /* Auth CD config setup */
321 hash_cd_ctrl->hash_cfg_offset = ((char *)hash - (char *)cipher) >> 3;
322 hash_cd_ctrl->hash_flags = ICP_QAT_FW_AUTH_HDR_FLAG_NO_NESTED;
323 hash_cd_ctrl->inner_res_sz = digestsize;
324 hash_cd_ctrl->final_sz = digestsize;
325
326 switch (ctx->qat_hash_alg) {
327 case ICP_QAT_HW_AUTH_ALGO_SHA1:
328 hash_cd_ctrl->inner_state1_sz =
329 round_up(ICP_QAT_HW_SHA1_STATE1_SZ, 8);
330 hash_cd_ctrl->inner_state2_sz =
331 round_up(ICP_QAT_HW_SHA1_STATE2_SZ, 8);
332 break;
333 case ICP_QAT_HW_AUTH_ALGO_SHA256:
334 hash_cd_ctrl->inner_state1_sz = ICP_QAT_HW_SHA256_STATE1_SZ;
335 hash_cd_ctrl->inner_state2_sz = ICP_QAT_HW_SHA256_STATE2_SZ;
336 break;
337 case ICP_QAT_HW_AUTH_ALGO_SHA512:
338 hash_cd_ctrl->inner_state1_sz = ICP_QAT_HW_SHA512_STATE1_SZ;
339 hash_cd_ctrl->inner_state2_sz = ICP_QAT_HW_SHA512_STATE2_SZ;
340 break;
341 default:
342 break;
343 }
344 hash_cd_ctrl->inner_state2_offset = hash_cd_ctrl->hash_cfg_offset +
345 ((sizeof(struct icp_qat_hw_auth_setup) +
346 round_up(hash_cd_ctrl->inner_state1_sz, 8)) >> 3);
d370cec3
TS
347 ICP_QAT_FW_COMN_CURR_ID_SET(hash_cd_ctrl, ICP_QAT_FW_SLICE_AUTH);
348 ICP_QAT_FW_COMN_NEXT_ID_SET(hash_cd_ctrl, ICP_QAT_FW_SLICE_DRAM_WR);
349 return 0;
350}
351
e19ab121 352static int qat_alg_aead_init_dec_session(struct crypto_aead *aead_tfm,
338e84f3
TS
353 int alg,
354 struct crypto_authenc_keys *keys)
d370cec3 355{
e19ab121 356 struct qat_alg_aead_ctx *ctx = crypto_aead_ctx(aead_tfm);
0a139416 357 unsigned int digestsize = crypto_aead_authsize(aead_tfm);
d370cec3
TS
358 struct qat_dec *dec_ctx = &ctx->dec_cd->qat_dec_cd;
359 struct icp_qat_hw_auth_algo_blk *hash = &dec_ctx->hash;
360 struct icp_qat_hw_cipher_algo_blk *cipher =
361 (struct icp_qat_hw_cipher_algo_blk *)((char *)dec_ctx +
362 sizeof(struct icp_qat_hw_auth_setup) +
363 roundup(crypto_shash_digestsize(ctx->hash_tfm), 8) * 2);
338e84f3 364 struct icp_qat_fw_la_bulk_req *req_tmpl = &ctx->dec_fw_req;
d370cec3
TS
365 struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req_tmpl->cd_pars;
366 struct icp_qat_fw_comn_req_hdr *header = &req_tmpl->comn_hdr;
367 void *ptr = &req_tmpl->cd_ctrl;
368 struct icp_qat_fw_cipher_cd_ctrl_hdr *cipher_cd_ctrl = ptr;
369 struct icp_qat_fw_auth_cd_ctrl_hdr *hash_cd_ctrl = ptr;
370 struct icp_qat_fw_la_auth_req_params *auth_param =
371 (struct icp_qat_fw_la_auth_req_params *)
372 ((char *)&req_tmpl->serv_specif_rqpars +
373 sizeof(struct icp_qat_fw_la_cipher_req_params));
374
375 /* CD setup */
338e84f3 376 cipher->aes.cipher_config.val = QAT_AES_HW_CONFIG_CBC_DEC(alg);
d370cec3
TS
377 memcpy(cipher->aes.key, keys->enckey, keys->enckeylen);
378 hash->sha.inner_setup.auth_config.config =
379 ICP_QAT_HW_AUTH_CONFIG_BUILD(ICP_QAT_HW_AUTH_MODE1,
380 ctx->qat_hash_alg,
381 digestsize);
382 hash->sha.inner_setup.auth_counter.counter =
383 cpu_to_be32(crypto_shash_blocksize(ctx->hash_tfm));
384
26c3af6c 385 if (qat_alg_do_precomputes(hash, ctx, keys->authkey, keys->authkeylen))
d370cec3
TS
386 return -EFAULT;
387
388 /* Request setup */
389 qat_alg_init_common_hdr(header);
390 header->service_cmd_id = ICP_QAT_FW_LA_CMD_HASH_CIPHER;
338e84f3
TS
391 ICP_QAT_FW_LA_DIGEST_IN_BUFFER_SET(header->serv_specif_flags,
392 ICP_QAT_FW_LA_DIGEST_IN_BUFFER);
d370cec3
TS
393 ICP_QAT_FW_LA_RET_AUTH_SET(header->serv_specif_flags,
394 ICP_QAT_FW_LA_NO_RET_AUTH_RES);
395 ICP_QAT_FW_LA_CMP_AUTH_SET(header->serv_specif_flags,
396 ICP_QAT_FW_LA_CMP_AUTH_RES);
397 cd_pars->u.s.content_desc_addr = ctx->dec_cd_paddr;
398 cd_pars->u.s.content_desc_params_sz = sizeof(struct qat_alg_cd) >> 3;
399
400 /* Cipher CD config setup */
401 cipher_cd_ctrl->cipher_key_sz = keys->enckeylen >> 3;
402 cipher_cd_ctrl->cipher_state_sz = AES_BLOCK_SIZE >> 3;
403 cipher_cd_ctrl->cipher_cfg_offset =
404 (sizeof(struct icp_qat_hw_auth_setup) +
405 roundup(crypto_shash_digestsize(ctx->hash_tfm), 8) * 2) >> 3;
406 ICP_QAT_FW_COMN_CURR_ID_SET(cipher_cd_ctrl, ICP_QAT_FW_SLICE_CIPHER);
407 ICP_QAT_FW_COMN_NEXT_ID_SET(cipher_cd_ctrl, ICP_QAT_FW_SLICE_DRAM_WR);
408
409 /* Auth CD config setup */
410 hash_cd_ctrl->hash_cfg_offset = 0;
411 hash_cd_ctrl->hash_flags = ICP_QAT_FW_AUTH_HDR_FLAG_NO_NESTED;
412 hash_cd_ctrl->inner_res_sz = digestsize;
413 hash_cd_ctrl->final_sz = digestsize;
414
415 switch (ctx->qat_hash_alg) {
416 case ICP_QAT_HW_AUTH_ALGO_SHA1:
417 hash_cd_ctrl->inner_state1_sz =
418 round_up(ICP_QAT_HW_SHA1_STATE1_SZ, 8);
419 hash_cd_ctrl->inner_state2_sz =
420 round_up(ICP_QAT_HW_SHA1_STATE2_SZ, 8);
421 break;
422 case ICP_QAT_HW_AUTH_ALGO_SHA256:
423 hash_cd_ctrl->inner_state1_sz = ICP_QAT_HW_SHA256_STATE1_SZ;
424 hash_cd_ctrl->inner_state2_sz = ICP_QAT_HW_SHA256_STATE2_SZ;
425 break;
426 case ICP_QAT_HW_AUTH_ALGO_SHA512:
427 hash_cd_ctrl->inner_state1_sz = ICP_QAT_HW_SHA512_STATE1_SZ;
428 hash_cd_ctrl->inner_state2_sz = ICP_QAT_HW_SHA512_STATE2_SZ;
429 break;
430 default:
431 break;
432 }
433
434 hash_cd_ctrl->inner_state2_offset = hash_cd_ctrl->hash_cfg_offset +
435 ((sizeof(struct icp_qat_hw_auth_setup) +
436 round_up(hash_cd_ctrl->inner_state1_sz, 8)) >> 3);
d370cec3
TS
437 auth_param->auth_res_sz = digestsize;
438 ICP_QAT_FW_COMN_CURR_ID_SET(hash_cd_ctrl, ICP_QAT_FW_SLICE_AUTH);
439 ICP_QAT_FW_COMN_NEXT_ID_SET(hash_cd_ctrl, ICP_QAT_FW_SLICE_CIPHER);
440 return 0;
441}
442
338e84f3
TS
443static void qat_alg_ablkcipher_init_com(struct qat_alg_ablkcipher_ctx *ctx,
444 struct icp_qat_fw_la_bulk_req *req,
445 struct icp_qat_hw_cipher_algo_blk *cd,
446 const uint8_t *key, unsigned int keylen)
d370cec3 447{
338e84f3
TS
448 struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req->cd_pars;
449 struct icp_qat_fw_comn_req_hdr *header = &req->comn_hdr;
450 struct icp_qat_fw_cipher_cd_ctrl_hdr *cd_ctrl = (void *)&req->cd_ctrl;
d370cec3 451
338e84f3
TS
452 memcpy(cd->aes.key, key, keylen);
453 qat_alg_init_common_hdr(header);
454 header->service_cmd_id = ICP_QAT_FW_LA_CMD_CIPHER;
455 cd_pars->u.s.content_desc_params_sz =
456 sizeof(struct icp_qat_hw_cipher_algo_blk) >> 3;
457 /* Cipher CD config setup */
458 cd_ctrl->cipher_key_sz = keylen >> 3;
459 cd_ctrl->cipher_state_sz = AES_BLOCK_SIZE >> 3;
460 cd_ctrl->cipher_cfg_offset = 0;
461 ICP_QAT_FW_COMN_CURR_ID_SET(cd_ctrl, ICP_QAT_FW_SLICE_CIPHER);
462 ICP_QAT_FW_COMN_NEXT_ID_SET(cd_ctrl, ICP_QAT_FW_SLICE_DRAM_WR);
463}
d370cec3 464
338e84f3
TS
465static void qat_alg_ablkcipher_init_enc(struct qat_alg_ablkcipher_ctx *ctx,
466 int alg, const uint8_t *key,
467 unsigned int keylen)
468{
469 struct icp_qat_hw_cipher_algo_blk *enc_cd = ctx->enc_cd;
470 struct icp_qat_fw_la_bulk_req *req = &ctx->enc_fw_req;
471 struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req->cd_pars;
d370cec3 472
338e84f3
TS
473 qat_alg_ablkcipher_init_com(ctx, req, enc_cd, key, keylen);
474 cd_pars->u.s.content_desc_addr = ctx->enc_cd_paddr;
475 enc_cd->aes.cipher_config.val = QAT_AES_HW_CONFIG_CBC_ENC(alg);
476}
477
478static void qat_alg_ablkcipher_init_dec(struct qat_alg_ablkcipher_ctx *ctx,
479 int alg, const uint8_t *key,
480 unsigned int keylen)
481{
482 struct icp_qat_hw_cipher_algo_blk *dec_cd = ctx->dec_cd;
483 struct icp_qat_fw_la_bulk_req *req = &ctx->dec_fw_req;
484 struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req->cd_pars;
485
486 qat_alg_ablkcipher_init_com(ctx, req, dec_cd, key, keylen);
487 cd_pars->u.s.content_desc_addr = ctx->dec_cd_paddr;
488 dec_cd->aes.cipher_config.val = QAT_AES_HW_CONFIG_CBC_DEC(alg);
489}
490
491static int qat_alg_validate_key(int key_len, int *alg)
492{
493 switch (key_len) {
d370cec3 494 case AES_KEYSIZE_128:
338e84f3 495 *alg = ICP_QAT_HW_CIPHER_ALGO_AES128;
d370cec3
TS
496 break;
497 case AES_KEYSIZE_192:
338e84f3 498 *alg = ICP_QAT_HW_CIPHER_ALGO_AES192;
d370cec3
TS
499 break;
500 case AES_KEYSIZE_256:
338e84f3 501 *alg = ICP_QAT_HW_CIPHER_ALGO_AES256;
d370cec3
TS
502 break;
503 default:
338e84f3 504 return -EINVAL;
d370cec3 505 }
338e84f3
TS
506 return 0;
507}
d370cec3 508
e19ab121 509static int qat_alg_aead_init_sessions(struct crypto_aead *tfm,
338e84f3
TS
510 const uint8_t *key, unsigned int keylen)
511{
512 struct crypto_authenc_keys keys;
513 int alg;
514
338e84f3
TS
515 if (crypto_authenc_extractkeys(&keys, key, keylen))
516 goto bad_key;
517
518 if (qat_alg_validate_key(keys.enckeylen, &alg))
519 goto bad_key;
520
e19ab121 521 if (qat_alg_aead_init_enc_session(tfm, alg, &keys))
d370cec3
TS
522 goto error;
523
e19ab121 524 if (qat_alg_aead_init_dec_session(tfm, alg, &keys))
d370cec3
TS
525 goto error;
526
527 return 0;
528bad_key:
e19ab121 529 crypto_aead_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
d370cec3
TS
530 return -EINVAL;
531error:
532 return -EFAULT;
533}
534
338e84f3
TS
535static int qat_alg_ablkcipher_init_sessions(struct qat_alg_ablkcipher_ctx *ctx,
536 const uint8_t *key,
537 unsigned int keylen)
d370cec3 538{
338e84f3
TS
539 int alg;
540
541 if (qat_alg_validate_key(keylen, &alg))
542 goto bad_key;
543
544 qat_alg_ablkcipher_init_enc(ctx, alg, key, keylen);
545 qat_alg_ablkcipher_init_dec(ctx, alg, key, keylen);
546 return 0;
547bad_key:
548 crypto_tfm_set_flags(ctx->tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
549 return -EINVAL;
550}
551
552static int qat_alg_aead_setkey(struct crypto_aead *tfm, const uint8_t *key,
553 unsigned int keylen)
554{
555 struct qat_alg_aead_ctx *ctx = crypto_aead_ctx(tfm);
d370cec3
TS
556 struct device *dev;
557
d370cec3
TS
558 if (ctx->enc_cd) {
559 /* rekeying */
560 dev = &GET_DEV(ctx->inst->accel_dev);
ad511e26
HX
561 memset(ctx->enc_cd, 0, sizeof(*ctx->enc_cd));
562 memset(ctx->dec_cd, 0, sizeof(*ctx->dec_cd));
563 memset(&ctx->enc_fw_req, 0, sizeof(ctx->enc_fw_req));
564 memset(&ctx->dec_fw_req, 0, sizeof(ctx->dec_fw_req));
d370cec3
TS
565 } else {
566 /* new key */
567 int node = get_current_node();
568 struct qat_crypto_instance *inst =
569 qat_crypto_get_instance_node(node);
570 if (!inst) {
d370cec3
TS
571 return -EINVAL;
572 }
573
574 dev = &GET_DEV(inst->accel_dev);
575 ctx->inst = inst;
338e84f3 576 ctx->enc_cd = dma_zalloc_coherent(dev, sizeof(*ctx->enc_cd),
d370cec3
TS
577 &ctx->enc_cd_paddr,
578 GFP_ATOMIC);
579 if (!ctx->enc_cd) {
d370cec3
TS
580 return -ENOMEM;
581 }
338e84f3 582 ctx->dec_cd = dma_zalloc_coherent(dev, sizeof(*ctx->dec_cd),
d370cec3
TS
583 &ctx->dec_cd_paddr,
584 GFP_ATOMIC);
585 if (!ctx->dec_cd) {
d370cec3
TS
586 goto out_free_enc;
587 }
d370cec3 588 }
e19ab121 589 if (qat_alg_aead_init_sessions(tfm, key, keylen))
d370cec3
TS
590 goto out_free_all;
591
592 return 0;
593
594out_free_all:
ad511e26 595 memset(ctx->dec_cd, 0, sizeof(struct qat_alg_cd));
d370cec3
TS
596 dma_free_coherent(dev, sizeof(struct qat_alg_cd),
597 ctx->dec_cd, ctx->dec_cd_paddr);
598 ctx->dec_cd = NULL;
599out_free_enc:
ad511e26 600 memset(ctx->enc_cd, 0, sizeof(struct qat_alg_cd));
d370cec3
TS
601 dma_free_coherent(dev, sizeof(struct qat_alg_cd),
602 ctx->enc_cd, ctx->enc_cd_paddr);
603 ctx->enc_cd = NULL;
604 return -ENOMEM;
605}
606
607static void qat_alg_free_bufl(struct qat_crypto_instance *inst,
608 struct qat_crypto_request *qat_req)
609{
610 struct device *dev = &GET_DEV(inst->accel_dev);
611 struct qat_alg_buf_list *bl = qat_req->buf.bl;
612 struct qat_alg_buf_list *blout = qat_req->buf.blout;
613 dma_addr_t blp = qat_req->buf.blp;
614 dma_addr_t blpout = qat_req->buf.bloutp;
615 size_t sz = qat_req->buf.sz;
82f82504
TS
616 size_t sz_out = qat_req->buf.sz_out;
617 int i;
d370cec3
TS
618
619 for (i = 0; i < bl->num_bufs; i++)
620 dma_unmap_single(dev, bl->bufers[i].addr,
621 bl->bufers[i].len, DMA_BIDIRECTIONAL);
622
623 dma_unmap_single(dev, blp, sz, DMA_TO_DEVICE);
624 kfree(bl);
625 if (blp != blpout) {
626 /* If out of place operation dma unmap only data */
82f82504 627 int bufless = blout->num_bufs - blout->num_mapped_bufs;
d65071ec 628
82f82504 629 for (i = bufless; i < blout->num_bufs; i++) {
d370cec3
TS
630 dma_unmap_single(dev, blout->bufers[i].addr,
631 blout->bufers[i].len,
632 DMA_BIDIRECTIONAL);
633 }
82f82504 634 dma_unmap_single(dev, blpout, sz_out, DMA_TO_DEVICE);
d370cec3
TS
635 kfree(blout);
636 }
637}
638
639static int qat_alg_sgl_to_bufl(struct qat_crypto_instance *inst,
d370cec3 640 struct scatterlist *sgl,
e19ab121 641 struct scatterlist *sglout,
d370cec3
TS
642 struct qat_crypto_request *qat_req)
643{
644 struct device *dev = &GET_DEV(inst->accel_dev);
e19ab121
HX
645 int i, sg_nctr = 0;
646 int n = sg_nents(sgl);
d370cec3
TS
647 struct qat_alg_buf_list *bufl;
648 struct qat_alg_buf_list *buflout = NULL;
649 dma_addr_t blp;
650 dma_addr_t bloutp = 0;
651 struct scatterlist *sg;
82f82504 652 size_t sz_out, sz = sizeof(struct qat_alg_buf_list) +
e19ab121 653 ((1 + n) * sizeof(struct qat_alg_buf));
d370cec3
TS
654
655 if (unlikely(!n))
656 return -EINVAL;
657
82f82504 658 bufl = kzalloc_node(sz, GFP_ATOMIC,
09adc878 659 dev_to_node(&GET_DEV(inst->accel_dev)));
d370cec3
TS
660 if (unlikely(!bufl))
661 return -ENOMEM;
662
663 blp = dma_map_single(dev, bufl, sz, DMA_TO_DEVICE);
664 if (unlikely(dma_mapping_error(dev, blp)))
665 goto err;
666
d370cec3 667 for_each_sg(sgl, sg, n, i) {
e19ab121 668 int y = sg_nctr;
82f82504
TS
669
670 if (!sg->length)
671 continue;
d65071ec 672
d370cec3
TS
673 bufl->bufers[y].addr = dma_map_single(dev, sg_virt(sg),
674 sg->length,
675 DMA_BIDIRECTIONAL);
676 bufl->bufers[y].len = sg->length;
677 if (unlikely(dma_mapping_error(dev, bufl->bufers[y].addr)))
678 goto err;
82f82504 679 sg_nctr++;
d370cec3 680 }
e19ab121 681 bufl->num_bufs = sg_nctr;
d370cec3
TS
682 qat_req->buf.bl = bufl;
683 qat_req->buf.blp = blp;
684 qat_req->buf.sz = sz;
685 /* Handle out of place operation */
686 if (sgl != sglout) {
687 struct qat_alg_buf *bufers;
688
82f82504
TS
689 n = sg_nents(sglout);
690 sz_out = sizeof(struct qat_alg_buf_list) +
e19ab121 691 ((1 + n) * sizeof(struct qat_alg_buf));
82f82504
TS
692 sg_nctr = 0;
693 buflout = kzalloc_node(sz_out, GFP_ATOMIC,
09adc878 694 dev_to_node(&GET_DEV(inst->accel_dev)));
d370cec3
TS
695 if (unlikely(!buflout))
696 goto err;
82f82504 697 bloutp = dma_map_single(dev, buflout, sz_out, DMA_TO_DEVICE);
d370cec3
TS
698 if (unlikely(dma_mapping_error(dev, bloutp)))
699 goto err;
700 bufers = buflout->bufers;
d370cec3 701 for_each_sg(sglout, sg, n, i) {
e19ab121 702 int y = sg_nctr;
82f82504
TS
703
704 if (!sg->length)
705 continue;
d65071ec 706
d370cec3
TS
707 bufers[y].addr = dma_map_single(dev, sg_virt(sg),
708 sg->length,
709 DMA_BIDIRECTIONAL);
d370cec3
TS
710 if (unlikely(dma_mapping_error(dev, bufers[y].addr)))
711 goto err;
82f82504
TS
712 bufers[y].len = sg->length;
713 sg_nctr++;
d370cec3 714 }
e19ab121 715 buflout->num_bufs = sg_nctr;
82f82504 716 buflout->num_mapped_bufs = sg_nctr;
d370cec3
TS
717 qat_req->buf.blout = buflout;
718 qat_req->buf.bloutp = bloutp;
82f82504 719 qat_req->buf.sz_out = sz_out;
d370cec3
TS
720 } else {
721 /* Otherwise set the src and dst to the same address */
722 qat_req->buf.bloutp = qat_req->buf.blp;
82f82504 723 qat_req->buf.sz_out = 0;
d370cec3
TS
724 }
725 return 0;
726err:
727 dev_err(dev, "Failed to map buf for dma\n");
82f82504 728 sg_nctr = 0;
e19ab121 729 for (i = 0; i < n; i++)
82f82504 730 if (!dma_mapping_error(dev, bufl->bufers[i].addr))
d370cec3
TS
731 dma_unmap_single(dev, bufl->bufers[i].addr,
732 bufl->bufers[i].len,
733 DMA_BIDIRECTIONAL);
82f82504 734
d370cec3
TS
735 if (!dma_mapping_error(dev, blp))
736 dma_unmap_single(dev, blp, sz, DMA_TO_DEVICE);
737 kfree(bufl);
738 if (sgl != sglout && buflout) {
82f82504 739 n = sg_nents(sglout);
e19ab121 740 for (i = 0; i < n; i++)
82f82504
TS
741 if (!dma_mapping_error(dev, buflout->bufers[i].addr))
742 dma_unmap_single(dev, buflout->bufers[i].addr,
743 buflout->bufers[i].len,
d370cec3 744 DMA_BIDIRECTIONAL);
d370cec3 745 if (!dma_mapping_error(dev, bloutp))
82f82504 746 dma_unmap_single(dev, bloutp, sz_out, DMA_TO_DEVICE);
d370cec3
TS
747 kfree(buflout);
748 }
749 return -ENOMEM;
750}
751
338e84f3
TS
752static void qat_aead_alg_callback(struct icp_qat_fw_la_resp *qat_resp,
753 struct qat_crypto_request *qat_req)
d370cec3 754{
338e84f3 755 struct qat_alg_aead_ctx *ctx = qat_req->aead_ctx;
d370cec3 756 struct qat_crypto_instance *inst = ctx->inst;
338e84f3 757 struct aead_request *areq = qat_req->aead_req;
d370cec3
TS
758 uint8_t stat_filed = qat_resp->comn_resp.comn_status;
759 int res = 0, qat_res = ICP_QAT_FW_COMN_RESP_CRYPTO_STAT_GET(stat_filed);
760
761 qat_alg_free_bufl(inst, qat_req);
762 if (unlikely(qat_res != ICP_QAT_FW_COMN_STATUS_FLAG_OK))
763 res = -EBADMSG;
45cff260 764 areq->base.complete(&areq->base, res);
d370cec3
TS
765}
766
338e84f3
TS
767static void qat_ablkcipher_alg_callback(struct icp_qat_fw_la_resp *qat_resp,
768 struct qat_crypto_request *qat_req)
769{
770 struct qat_alg_ablkcipher_ctx *ctx = qat_req->ablkcipher_ctx;
771 struct qat_crypto_instance *inst = ctx->inst;
772 struct ablkcipher_request *areq = qat_req->ablkcipher_req;
773 uint8_t stat_filed = qat_resp->comn_resp.comn_status;
774 int res = 0, qat_res = ICP_QAT_FW_COMN_RESP_CRYPTO_STAT_GET(stat_filed);
775
776 qat_alg_free_bufl(inst, qat_req);
777 if (unlikely(qat_res != ICP_QAT_FW_COMN_STATUS_FLAG_OK))
778 res = -EINVAL;
779 areq->base.complete(&areq->base, res);
780}
781
782void qat_alg_callback(void *resp)
783{
784 struct icp_qat_fw_la_resp *qat_resp = resp;
785 struct qat_crypto_request *qat_req =
786 (void *)(__force long)qat_resp->opaque_data;
787
788 qat_req->cb(qat_resp, qat_req);
789}
790
791static int qat_alg_aead_dec(struct aead_request *areq)
d370cec3
TS
792{
793 struct crypto_aead *aead_tfm = crypto_aead_reqtfm(areq);
794 struct crypto_tfm *tfm = crypto_aead_tfm(aead_tfm);
338e84f3 795 struct qat_alg_aead_ctx *ctx = crypto_tfm_ctx(tfm);
d370cec3
TS
796 struct qat_crypto_request *qat_req = aead_request_ctx(areq);
797 struct icp_qat_fw_la_cipher_req_params *cipher_param;
798 struct icp_qat_fw_la_auth_req_params *auth_param;
799 struct icp_qat_fw_la_bulk_req *msg;
0a139416 800 int digst_size = crypto_aead_authsize(aead_tfm);
d370cec3
TS
801 int ret, ctr = 0;
802
e19ab121 803 ret = qat_alg_sgl_to_bufl(ctx->inst, areq->src, areq->dst, qat_req);
d370cec3
TS
804 if (unlikely(ret))
805 return ret;
806
807 msg = &qat_req->req;
338e84f3
TS
808 *msg = ctx->dec_fw_req;
809 qat_req->aead_ctx = ctx;
810 qat_req->aead_req = areq;
811 qat_req->cb = qat_aead_alg_callback;
bce3cc61 812 qat_req->req.comn_mid.opaque_data = (uint64_t)(__force long)qat_req;
d370cec3
TS
813 qat_req->req.comn_mid.src_data_addr = qat_req->buf.blp;
814 qat_req->req.comn_mid.dest_data_addr = qat_req->buf.bloutp;
815 cipher_param = (void *)&qat_req->req.serv_specif_rqpars;
816 cipher_param->cipher_length = areq->cryptlen - digst_size;
e19ab121 817 cipher_param->cipher_offset = areq->assoclen;
d370cec3
TS
818 memcpy(cipher_param->u.cipher_IV_array, areq->iv, AES_BLOCK_SIZE);
819 auth_param = (void *)((uint8_t *)cipher_param + sizeof(*cipher_param));
820 auth_param->auth_off = 0;
e19ab121 821 auth_param->auth_len = areq->assoclen + cipher_param->cipher_length;
d370cec3
TS
822 do {
823 ret = adf_send_message(ctx->inst->sym_tx, (uint32_t *)msg);
824 } while (ret == -EAGAIN && ctr++ < 10);
825
826 if (ret == -EAGAIN) {
827 qat_alg_free_bufl(ctx->inst, qat_req);
828 return -EBUSY;
829 }
830 return -EINPROGRESS;
831}
832
e19ab121 833static int qat_alg_aead_enc(struct aead_request *areq)
d370cec3
TS
834{
835 struct crypto_aead *aead_tfm = crypto_aead_reqtfm(areq);
836 struct crypto_tfm *tfm = crypto_aead_tfm(aead_tfm);
338e84f3 837 struct qat_alg_aead_ctx *ctx = crypto_tfm_ctx(tfm);
d370cec3
TS
838 struct qat_crypto_request *qat_req = aead_request_ctx(areq);
839 struct icp_qat_fw_la_cipher_req_params *cipher_param;
840 struct icp_qat_fw_la_auth_req_params *auth_param;
841 struct icp_qat_fw_la_bulk_req *msg;
e19ab121 842 uint8_t *iv = areq->iv;
d370cec3
TS
843 int ret, ctr = 0;
844
e19ab121 845 ret = qat_alg_sgl_to_bufl(ctx->inst, areq->src, areq->dst, qat_req);
d370cec3
TS
846 if (unlikely(ret))
847 return ret;
848
849 msg = &qat_req->req;
338e84f3
TS
850 *msg = ctx->enc_fw_req;
851 qat_req->aead_ctx = ctx;
852 qat_req->aead_req = areq;
853 qat_req->cb = qat_aead_alg_callback;
bce3cc61 854 qat_req->req.comn_mid.opaque_data = (uint64_t)(__force long)qat_req;
d370cec3
TS
855 qat_req->req.comn_mid.src_data_addr = qat_req->buf.blp;
856 qat_req->req.comn_mid.dest_data_addr = qat_req->buf.bloutp;
857 cipher_param = (void *)&qat_req->req.serv_specif_rqpars;
858 auth_param = (void *)((uint8_t *)cipher_param + sizeof(*cipher_param));
859
e19ab121
HX
860 memcpy(cipher_param->u.cipher_IV_array, iv, AES_BLOCK_SIZE);
861 cipher_param->cipher_length = areq->cryptlen;
862 cipher_param->cipher_offset = areq->assoclen;
863
d370cec3 864 auth_param->auth_off = 0;
e19ab121 865 auth_param->auth_len = areq->assoclen + areq->cryptlen;
d370cec3
TS
866
867 do {
868 ret = adf_send_message(ctx->inst->sym_tx, (uint32_t *)msg);
869 } while (ret == -EAGAIN && ctr++ < 10);
870
871 if (ret == -EAGAIN) {
872 qat_alg_free_bufl(ctx->inst, qat_req);
873 return -EBUSY;
874 }
875 return -EINPROGRESS;
876}
877
338e84f3
TS
878static int qat_alg_ablkcipher_setkey(struct crypto_ablkcipher *tfm,
879 const uint8_t *key,
880 unsigned int keylen)
d370cec3 881{
338e84f3
TS
882 struct qat_alg_ablkcipher_ctx *ctx = crypto_ablkcipher_ctx(tfm);
883 struct device *dev;
884
885 spin_lock(&ctx->lock);
886 if (ctx->enc_cd) {
887 /* rekeying */
888 dev = &GET_DEV(ctx->inst->accel_dev);
ad511e26
HX
889 memset(ctx->enc_cd, 0, sizeof(*ctx->enc_cd));
890 memset(ctx->dec_cd, 0, sizeof(*ctx->dec_cd));
891 memset(&ctx->enc_fw_req, 0, sizeof(ctx->enc_fw_req));
892 memset(&ctx->dec_fw_req, 0, sizeof(ctx->dec_fw_req));
338e84f3
TS
893 } else {
894 /* new key */
895 int node = get_current_node();
896 struct qat_crypto_instance *inst =
897 qat_crypto_get_instance_node(node);
898 if (!inst) {
899 spin_unlock(&ctx->lock);
900 return -EINVAL;
901 }
902
903 dev = &GET_DEV(inst->accel_dev);
904 ctx->inst = inst;
905 ctx->enc_cd = dma_zalloc_coherent(dev, sizeof(*ctx->enc_cd),
906 &ctx->enc_cd_paddr,
907 GFP_ATOMIC);
908 if (!ctx->enc_cd) {
909 spin_unlock(&ctx->lock);
910 return -ENOMEM;
911 }
912 ctx->dec_cd = dma_zalloc_coherent(dev, sizeof(*ctx->dec_cd),
913 &ctx->dec_cd_paddr,
914 GFP_ATOMIC);
915 if (!ctx->dec_cd) {
916 spin_unlock(&ctx->lock);
917 goto out_free_enc;
918 }
919 }
920 spin_unlock(&ctx->lock);
921 if (qat_alg_ablkcipher_init_sessions(ctx, key, keylen))
922 goto out_free_all;
923
924 return 0;
925
926out_free_all:
ad511e26 927 memset(ctx->dec_cd, 0, sizeof(*ctx->enc_cd));
338e84f3
TS
928 dma_free_coherent(dev, sizeof(*ctx->enc_cd),
929 ctx->dec_cd, ctx->dec_cd_paddr);
930 ctx->dec_cd = NULL;
931out_free_enc:
ad511e26 932 memset(ctx->enc_cd, 0, sizeof(*ctx->dec_cd));
338e84f3
TS
933 dma_free_coherent(dev, sizeof(*ctx->dec_cd),
934 ctx->enc_cd, ctx->enc_cd_paddr);
935 ctx->enc_cd = NULL;
936 return -ENOMEM;
937}
938
939static int qat_alg_ablkcipher_encrypt(struct ablkcipher_request *req)
940{
941 struct crypto_ablkcipher *atfm = crypto_ablkcipher_reqtfm(req);
942 struct crypto_tfm *tfm = crypto_ablkcipher_tfm(atfm);
943 struct qat_alg_ablkcipher_ctx *ctx = crypto_tfm_ctx(tfm);
944 struct qat_crypto_request *qat_req = ablkcipher_request_ctx(req);
945 struct icp_qat_fw_la_cipher_req_params *cipher_param;
946 struct icp_qat_fw_la_bulk_req *msg;
947 int ret, ctr = 0;
948
e19ab121 949 ret = qat_alg_sgl_to_bufl(ctx->inst, req->src, req->dst, qat_req);
338e84f3
TS
950 if (unlikely(ret))
951 return ret;
952
953 msg = &qat_req->req;
954 *msg = ctx->enc_fw_req;
955 qat_req->ablkcipher_ctx = ctx;
956 qat_req->ablkcipher_req = req;
957 qat_req->cb = qat_ablkcipher_alg_callback;
958 qat_req->req.comn_mid.opaque_data = (uint64_t)(__force long)qat_req;
959 qat_req->req.comn_mid.src_data_addr = qat_req->buf.blp;
960 qat_req->req.comn_mid.dest_data_addr = qat_req->buf.bloutp;
961 cipher_param = (void *)&qat_req->req.serv_specif_rqpars;
962 cipher_param->cipher_length = req->nbytes;
963 cipher_param->cipher_offset = 0;
964 memcpy(cipher_param->u.cipher_IV_array, req->info, AES_BLOCK_SIZE);
965 do {
966 ret = adf_send_message(ctx->inst->sym_tx, (uint32_t *)msg);
967 } while (ret == -EAGAIN && ctr++ < 10);
968
969 if (ret == -EAGAIN) {
970 qat_alg_free_bufl(ctx->inst, qat_req);
971 return -EBUSY;
972 }
973 return -EINPROGRESS;
974}
975
976static int qat_alg_ablkcipher_decrypt(struct ablkcipher_request *req)
977{
978 struct crypto_ablkcipher *atfm = crypto_ablkcipher_reqtfm(req);
979 struct crypto_tfm *tfm = crypto_ablkcipher_tfm(atfm);
980 struct qat_alg_ablkcipher_ctx *ctx = crypto_tfm_ctx(tfm);
981 struct qat_crypto_request *qat_req = ablkcipher_request_ctx(req);
982 struct icp_qat_fw_la_cipher_req_params *cipher_param;
983 struct icp_qat_fw_la_bulk_req *msg;
984 int ret, ctr = 0;
985
e19ab121 986 ret = qat_alg_sgl_to_bufl(ctx->inst, req->src, req->dst, qat_req);
338e84f3
TS
987 if (unlikely(ret))
988 return ret;
989
990 msg = &qat_req->req;
991 *msg = ctx->dec_fw_req;
992 qat_req->ablkcipher_ctx = ctx;
993 qat_req->ablkcipher_req = req;
994 qat_req->cb = qat_ablkcipher_alg_callback;
995 qat_req->req.comn_mid.opaque_data = (uint64_t)(__force long)qat_req;
996 qat_req->req.comn_mid.src_data_addr = qat_req->buf.blp;
997 qat_req->req.comn_mid.dest_data_addr = qat_req->buf.bloutp;
998 cipher_param = (void *)&qat_req->req.serv_specif_rqpars;
999 cipher_param->cipher_length = req->nbytes;
1000 cipher_param->cipher_offset = 0;
1001 memcpy(cipher_param->u.cipher_IV_array, req->info, AES_BLOCK_SIZE);
1002 do {
1003 ret = adf_send_message(ctx->inst->sym_tx, (uint32_t *)msg);
1004 } while (ret == -EAGAIN && ctr++ < 10);
1005
1006 if (ret == -EAGAIN) {
1007 qat_alg_free_bufl(ctx->inst, qat_req);
1008 return -EBUSY;
1009 }
1010 return -EINPROGRESS;
1011}
1012
e19ab121 1013static int qat_alg_aead_init(struct crypto_aead *tfm,
338e84f3
TS
1014 enum icp_qat_hw_auth_algo hash,
1015 const char *hash_name)
1016{
e19ab121 1017 struct qat_alg_aead_ctx *ctx = crypto_aead_ctx(tfm);
d370cec3 1018
d370cec3
TS
1019 ctx->hash_tfm = crypto_alloc_shash(hash_name, 0, 0);
1020 if (IS_ERR(ctx->hash_tfm))
e19ab121 1021 return PTR_ERR(ctx->hash_tfm);
d370cec3 1022 ctx->qat_hash_alg = hash;
e19ab121
HX
1023 crypto_aead_set_reqsize(tfm, sizeof(struct aead_request) +
1024 sizeof(struct qat_crypto_request));
d370cec3
TS
1025 return 0;
1026}
1027
e19ab121 1028static int qat_alg_aead_sha1_init(struct crypto_aead *tfm)
d370cec3 1029{
338e84f3 1030 return qat_alg_aead_init(tfm, ICP_QAT_HW_AUTH_ALGO_SHA1, "sha1");
d370cec3
TS
1031}
1032
e19ab121 1033static int qat_alg_aead_sha256_init(struct crypto_aead *tfm)
d370cec3 1034{
338e84f3 1035 return qat_alg_aead_init(tfm, ICP_QAT_HW_AUTH_ALGO_SHA256, "sha256");
d370cec3
TS
1036}
1037
e19ab121 1038static int qat_alg_aead_sha512_init(struct crypto_aead *tfm)
d370cec3 1039{
338e84f3 1040 return qat_alg_aead_init(tfm, ICP_QAT_HW_AUTH_ALGO_SHA512, "sha512");
d370cec3
TS
1041}
1042
e19ab121 1043static void qat_alg_aead_exit(struct crypto_aead *tfm)
d370cec3 1044{
e19ab121 1045 struct qat_alg_aead_ctx *ctx = crypto_aead_ctx(tfm);
d370cec3
TS
1046 struct qat_crypto_instance *inst = ctx->inst;
1047 struct device *dev;
1048
e19ab121 1049 crypto_free_shash(ctx->hash_tfm);
d370cec3
TS
1050
1051 if (!inst)
1052 return;
1053
1054 dev = &GET_DEV(inst->accel_dev);
aa408d60 1055 if (ctx->enc_cd) {
ad511e26 1056 memset(ctx->enc_cd, 0, sizeof(struct qat_alg_cd));
d370cec3
TS
1057 dma_free_coherent(dev, sizeof(struct qat_alg_cd),
1058 ctx->enc_cd, ctx->enc_cd_paddr);
aa408d60
ST
1059 }
1060 if (ctx->dec_cd) {
ad511e26 1061 memset(ctx->dec_cd, 0, sizeof(struct qat_alg_cd));
d370cec3
TS
1062 dma_free_coherent(dev, sizeof(struct qat_alg_cd),
1063 ctx->dec_cd, ctx->dec_cd_paddr);
aa408d60 1064 }
d370cec3
TS
1065 qat_crypto_put_instance(inst);
1066}
1067
338e84f3
TS
1068static int qat_alg_ablkcipher_init(struct crypto_tfm *tfm)
1069{
1070 struct qat_alg_ablkcipher_ctx *ctx = crypto_tfm_ctx(tfm);
1071
338e84f3
TS
1072 spin_lock_init(&ctx->lock);
1073 tfm->crt_ablkcipher.reqsize = sizeof(struct ablkcipher_request) +
1074 sizeof(struct qat_crypto_request);
1075 ctx->tfm = tfm;
1076 return 0;
1077}
1078
1079static void qat_alg_ablkcipher_exit(struct crypto_tfm *tfm)
1080{
1081 struct qat_alg_ablkcipher_ctx *ctx = crypto_tfm_ctx(tfm);
1082 struct qat_crypto_instance *inst = ctx->inst;
1083 struct device *dev;
1084
1085 if (!inst)
1086 return;
1087
1088 dev = &GET_DEV(inst->accel_dev);
1089 if (ctx->enc_cd) {
ad511e26
HX
1090 memset(ctx->enc_cd, 0,
1091 sizeof(struct icp_qat_hw_cipher_algo_blk));
338e84f3
TS
1092 dma_free_coherent(dev,
1093 sizeof(struct icp_qat_hw_cipher_algo_blk),
1094 ctx->enc_cd, ctx->enc_cd_paddr);
1095 }
1096 if (ctx->dec_cd) {
ad511e26
HX
1097 memset(ctx->dec_cd, 0,
1098 sizeof(struct icp_qat_hw_cipher_algo_blk));
338e84f3
TS
1099 dma_free_coherent(dev,
1100 sizeof(struct icp_qat_hw_cipher_algo_blk),
1101 ctx->dec_cd, ctx->dec_cd_paddr);
1102 }
1103 qat_crypto_put_instance(inst);
1104}
1105
e19ab121
HX
1106
1107static struct aead_alg qat_aeads[] = { {
1108 .base = {
1109 .cra_name = "authenc(hmac(sha1),cbc(aes))",
1110 .cra_driver_name = "qat_aes_cbc_hmac_sha1",
1111 .cra_priority = 4001,
5e4b8c1f 1112 .cra_flags = CRYPTO_ALG_ASYNC,
e19ab121
HX
1113 .cra_blocksize = AES_BLOCK_SIZE,
1114 .cra_ctxsize = sizeof(struct qat_alg_aead_ctx),
1115 .cra_module = THIS_MODULE,
d370cec3 1116 },
e19ab121
HX
1117 .init = qat_alg_aead_sha1_init,
1118 .exit = qat_alg_aead_exit,
1119 .setkey = qat_alg_aead_setkey,
1120 .decrypt = qat_alg_aead_dec,
1121 .encrypt = qat_alg_aead_enc,
1122 .ivsize = AES_BLOCK_SIZE,
1123 .maxauthsize = SHA1_DIGEST_SIZE,
d370cec3 1124}, {
e19ab121
HX
1125 .base = {
1126 .cra_name = "authenc(hmac(sha256),cbc(aes))",
1127 .cra_driver_name = "qat_aes_cbc_hmac_sha256",
1128 .cra_priority = 4001,
5e4b8c1f 1129 .cra_flags = CRYPTO_ALG_ASYNC,
e19ab121
HX
1130 .cra_blocksize = AES_BLOCK_SIZE,
1131 .cra_ctxsize = sizeof(struct qat_alg_aead_ctx),
1132 .cra_module = THIS_MODULE,
d370cec3 1133 },
e19ab121
HX
1134 .init = qat_alg_aead_sha256_init,
1135 .exit = qat_alg_aead_exit,
1136 .setkey = qat_alg_aead_setkey,
1137 .decrypt = qat_alg_aead_dec,
1138 .encrypt = qat_alg_aead_enc,
1139 .ivsize = AES_BLOCK_SIZE,
1140 .maxauthsize = SHA256_DIGEST_SIZE,
d370cec3 1141}, {
e19ab121
HX
1142 .base = {
1143 .cra_name = "authenc(hmac(sha512),cbc(aes))",
1144 .cra_driver_name = "qat_aes_cbc_hmac_sha512",
1145 .cra_priority = 4001,
5e4b8c1f 1146 .cra_flags = CRYPTO_ALG_ASYNC,
e19ab121
HX
1147 .cra_blocksize = AES_BLOCK_SIZE,
1148 .cra_ctxsize = sizeof(struct qat_alg_aead_ctx),
1149 .cra_module = THIS_MODULE,
d370cec3 1150 },
e19ab121
HX
1151 .init = qat_alg_aead_sha512_init,
1152 .exit = qat_alg_aead_exit,
1153 .setkey = qat_alg_aead_setkey,
1154 .decrypt = qat_alg_aead_dec,
1155 .encrypt = qat_alg_aead_enc,
1156 .ivsize = AES_BLOCK_SIZE,
1157 .maxauthsize = SHA512_DIGEST_SIZE,
1158} };
1159
1160static struct crypto_alg qat_algs[] = { {
338e84f3
TS
1161 .cra_name = "cbc(aes)",
1162 .cra_driver_name = "qat_aes_cbc",
1163 .cra_priority = 4001,
1164 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
1165 .cra_blocksize = AES_BLOCK_SIZE,
1166 .cra_ctxsize = sizeof(struct qat_alg_ablkcipher_ctx),
1167 .cra_alignmask = 0,
1168 .cra_type = &crypto_ablkcipher_type,
1169 .cra_module = THIS_MODULE,
1170 .cra_init = qat_alg_ablkcipher_init,
1171 .cra_exit = qat_alg_ablkcipher_exit,
1172 .cra_u = {
1173 .ablkcipher = {
1174 .setkey = qat_alg_ablkcipher_setkey,
1175 .decrypt = qat_alg_ablkcipher_decrypt,
1176 .encrypt = qat_alg_ablkcipher_encrypt,
1177 .min_keysize = AES_MIN_KEY_SIZE,
1178 .max_keysize = AES_MAX_KEY_SIZE,
1179 .ivsize = AES_BLOCK_SIZE,
1180 },
1181 },
d370cec3
TS
1182} };
1183
1184int qat_algs_register(void)
1185{
e19ab121 1186 int ret = 0, i;
6f043b50
TS
1187
1188 mutex_lock(&algs_lock);
e19ab121
HX
1189 if (++active_devs != 1)
1190 goto unlock;
d370cec3 1191
e19ab121
HX
1192 for (i = 0; i < ARRAY_SIZE(qat_algs); i++)
1193 qat_algs[i].cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC;
338e84f3 1194
e19ab121
HX
1195 ret = crypto_register_algs(qat_algs, ARRAY_SIZE(qat_algs));
1196 if (ret)
1197 goto unlock;
1198
1199 for (i = 0; i < ARRAY_SIZE(qat_aeads); i++)
5e4b8c1f 1200 qat_aeads[i].base.cra_flags = CRYPTO_ALG_ASYNC;
e19ab121
HX
1201
1202 ret = crypto_register_aeads(qat_aeads, ARRAY_SIZE(qat_aeads));
1203 if (ret)
1204 goto unreg_algs;
1205
1206unlock:
6f043b50
TS
1207 mutex_unlock(&algs_lock);
1208 return ret;
e19ab121
HX
1209
1210unreg_algs:
1211 crypto_unregister_algs(qat_algs, ARRAY_SIZE(qat_algs));
1212 goto unlock;
d370cec3
TS
1213}
1214
1215int qat_algs_unregister(void)
1216{
6f043b50 1217 mutex_lock(&algs_lock);
e19ab121
HX
1218 if (--active_devs != 0)
1219 goto unlock;
1220
1221 crypto_unregister_aeads(qat_aeads, ARRAY_SIZE(qat_aeads));
1222 crypto_unregister_algs(qat_algs, ARRAY_SIZE(qat_algs));
1223
1224unlock:
6f043b50 1225 mutex_unlock(&algs_lock);
e19ab121 1226 return 0;
d370cec3
TS
1227}
1228
1229int qat_algs_init(void)
1230{
d370cec3
TS
1231 return 0;
1232}
1233
1234void qat_algs_exit(void)
1235{
d370cec3 1236}
This page took 0.119448 seconds and 5 git commands to generate.