crypto: qat - Add support for RSA algorithm
[deliverable/linux.git] / drivers / crypto / qat / qat_common / qat_algs.c
CommitLineData
d370cec3
TS
1/*
2 This file is provided under a dual BSD/GPLv2 license. When using or
3 redistributing this file, you may do so under either license.
4
5 GPL LICENSE SUMMARY
6 Copyright(c) 2014 Intel Corporation.
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of version 2 of the GNU General Public License as
9 published by the Free Software Foundation.
10
11 This program is distributed in the hope that it will be useful, but
12 WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 General Public License for more details.
15
16 Contact Information:
17 qat-linux@intel.com
18
19 BSD LICENSE
20 Copyright(c) 2014 Intel Corporation.
21 Redistribution and use in source and binary forms, with or without
22 modification, are permitted provided that the following conditions
23 are met:
24
25 * Redistributions of source code must retain the above copyright
26 notice, this list of conditions and the following disclaimer.
27 * Redistributions in binary form must reproduce the above copyright
28 notice, this list of conditions and the following disclaimer in
29 the documentation and/or other materials provided with the
30 distribution.
31 * Neither the name of Intel Corporation nor the names of its
32 contributors may be used to endorse or promote products derived
33 from this software without specific prior written permission.
34
35 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
36 "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
37 LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
38 A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
39 OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
40 SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
41 LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
42 DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
43 THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
44 (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
45 OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
46*/
47#include <linux/module.h>
48#include <linux/slab.h>
49#include <linux/crypto.h>
0ed6264b 50#include <crypto/internal/aead.h>
d370cec3
TS
51#include <crypto/aes.h>
52#include <crypto/sha.h>
53#include <crypto/hash.h>
54#include <crypto/algapi.h>
55#include <crypto/authenc.h>
56#include <crypto/rng.h>
57#include <linux/dma-mapping.h>
58#include "adf_accel_devices.h"
59#include "adf_transport.h"
60#include "adf_common_drv.h"
61#include "qat_crypto.h"
62#include "icp_qat_hw.h"
63#include "icp_qat_fw.h"
64#include "icp_qat_fw_la.h"
65
338e84f3 66#define QAT_AES_HW_CONFIG_CBC_ENC(alg) \
d370cec3 67 ICP_QAT_HW_CIPHER_CONFIG_BUILD(ICP_QAT_HW_CIPHER_CBC_MODE, alg, \
338e84f3
TS
68 ICP_QAT_HW_CIPHER_NO_CONVERT, \
69 ICP_QAT_HW_CIPHER_ENCRYPT)
d370cec3 70
338e84f3 71#define QAT_AES_HW_CONFIG_CBC_DEC(alg) \
d370cec3 72 ICP_QAT_HW_CIPHER_CONFIG_BUILD(ICP_QAT_HW_CIPHER_CBC_MODE, alg, \
338e84f3
TS
73 ICP_QAT_HW_CIPHER_KEY_CONVERT, \
74 ICP_QAT_HW_CIPHER_DECRYPT)
d370cec3
TS
75
76static atomic_t active_dev;
77
78struct qat_alg_buf {
79 uint32_t len;
80 uint32_t resrvd;
81 uint64_t addr;
82} __packed;
83
84struct qat_alg_buf_list {
85 uint64_t resrvd;
86 uint32_t num_bufs;
87 uint32_t num_mapped_bufs;
88 struct qat_alg_buf bufers[];
89} __packed __aligned(64);
90
91/* Common content descriptor */
92struct qat_alg_cd {
93 union {
94 struct qat_enc { /* Encrypt content desc */
95 struct icp_qat_hw_cipher_algo_blk cipher;
96 struct icp_qat_hw_auth_algo_blk hash;
97 } qat_enc_cd;
98 struct qat_dec { /* Decrytp content desc */
99 struct icp_qat_hw_auth_algo_blk hash;
100 struct icp_qat_hw_cipher_algo_blk cipher;
101 } qat_dec_cd;
102 };
103} __aligned(64);
104
338e84f3 105struct qat_alg_aead_ctx {
d370cec3 106 struct qat_alg_cd *enc_cd;
d370cec3 107 struct qat_alg_cd *dec_cd;
338e84f3 108 dma_addr_t enc_cd_paddr;
d370cec3 109 dma_addr_t dec_cd_paddr;
338e84f3
TS
110 struct icp_qat_fw_la_bulk_req enc_fw_req;
111 struct icp_qat_fw_la_bulk_req dec_fw_req;
d370cec3
TS
112 struct crypto_shash *hash_tfm;
113 enum icp_qat_hw_auth_algo qat_hash_alg;
338e84f3
TS
114 struct qat_crypto_instance *inst;
115 struct crypto_tfm *tfm;
d370cec3 116 uint8_t salt[AES_BLOCK_SIZE];
338e84f3
TS
117 spinlock_t lock; /* protects qat_alg_aead_ctx struct */
118};
119
120struct qat_alg_ablkcipher_ctx {
121 struct icp_qat_hw_cipher_algo_blk *enc_cd;
122 struct icp_qat_hw_cipher_algo_blk *dec_cd;
123 dma_addr_t enc_cd_paddr;
124 dma_addr_t dec_cd_paddr;
125 struct icp_qat_fw_la_bulk_req enc_fw_req;
126 struct icp_qat_fw_la_bulk_req dec_fw_req;
127 struct qat_crypto_instance *inst;
128 struct crypto_tfm *tfm;
129 spinlock_t lock; /* protects qat_alg_ablkcipher_ctx struct */
d370cec3
TS
130};
131
d370cec3
TS
132static int qat_get_inter_state_size(enum icp_qat_hw_auth_algo qat_hash_alg)
133{
134 switch (qat_hash_alg) {
135 case ICP_QAT_HW_AUTH_ALGO_SHA1:
136 return ICP_QAT_HW_SHA1_STATE1_SZ;
137 case ICP_QAT_HW_AUTH_ALGO_SHA256:
138 return ICP_QAT_HW_SHA256_STATE1_SZ;
139 case ICP_QAT_HW_AUTH_ALGO_SHA512:
140 return ICP_QAT_HW_SHA512_STATE1_SZ;
141 default:
142 return -EFAULT;
143 };
144 return -EFAULT;
145}
146
147static int qat_alg_do_precomputes(struct icp_qat_hw_auth_algo_blk *hash,
338e84f3 148 struct qat_alg_aead_ctx *ctx,
d370cec3 149 const uint8_t *auth_key,
26c3af6c 150 unsigned int auth_keylen)
d370cec3 151{
37e52654 152 SHASH_DESC_ON_STACK(shash, ctx->hash_tfm);
d370cec3
TS
153 struct sha1_state sha1;
154 struct sha256_state sha256;
155 struct sha512_state sha512;
156 int block_size = crypto_shash_blocksize(ctx->hash_tfm);
157 int digest_size = crypto_shash_digestsize(ctx->hash_tfm);
48eb3691
HX
158 char ipad[block_size];
159 char opad[block_size];
d370cec3
TS
160 __be32 *hash_state_out;
161 __be64 *hash512_state_out;
162 int i, offset;
163
48eb3691
HX
164 memset(ipad, 0, block_size);
165 memset(opad, 0, block_size);
37e52654
BW
166 shash->tfm = ctx->hash_tfm;
167 shash->flags = 0x0;
d370cec3
TS
168
169 if (auth_keylen > block_size) {
37e52654 170 int ret = crypto_shash_digest(shash, auth_key,
48eb3691 171 auth_keylen, ipad);
d370cec3
TS
172 if (ret)
173 return ret;
174
48eb3691 175 memcpy(opad, ipad, digest_size);
d370cec3
TS
176 } else {
177 memcpy(ipad, auth_key, auth_keylen);
178 memcpy(opad, auth_key, auth_keylen);
d370cec3
TS
179 }
180
181 for (i = 0; i < block_size; i++) {
182 char *ipad_ptr = ipad + i;
183 char *opad_ptr = opad + i;
184 *ipad_ptr ^= 0x36;
185 *opad_ptr ^= 0x5C;
186 }
187
37e52654 188 if (crypto_shash_init(shash))
d370cec3
TS
189 return -EFAULT;
190
37e52654 191 if (crypto_shash_update(shash, ipad, block_size))
d370cec3
TS
192 return -EFAULT;
193
194 hash_state_out = (__be32 *)hash->sha.state1;
195 hash512_state_out = (__be64 *)hash_state_out;
196
197 switch (ctx->qat_hash_alg) {
198 case ICP_QAT_HW_AUTH_ALGO_SHA1:
37e52654 199 if (crypto_shash_export(shash, &sha1))
d370cec3
TS
200 return -EFAULT;
201 for (i = 0; i < digest_size >> 2; i++, hash_state_out++)
202 *hash_state_out = cpu_to_be32(*(sha1.state + i));
203 break;
204 case ICP_QAT_HW_AUTH_ALGO_SHA256:
37e52654 205 if (crypto_shash_export(shash, &sha256))
d370cec3
TS
206 return -EFAULT;
207 for (i = 0; i < digest_size >> 2; i++, hash_state_out++)
208 *hash_state_out = cpu_to_be32(*(sha256.state + i));
209 break;
210 case ICP_QAT_HW_AUTH_ALGO_SHA512:
37e52654 211 if (crypto_shash_export(shash, &sha512))
d370cec3
TS
212 return -EFAULT;
213 for (i = 0; i < digest_size >> 3; i++, hash512_state_out++)
214 *hash512_state_out = cpu_to_be64(*(sha512.state + i));
215 break;
216 default:
217 return -EFAULT;
218 }
219
37e52654 220 if (crypto_shash_init(shash))
d370cec3
TS
221 return -EFAULT;
222
37e52654 223 if (crypto_shash_update(shash, opad, block_size))
d370cec3
TS
224 return -EFAULT;
225
226 offset = round_up(qat_get_inter_state_size(ctx->qat_hash_alg), 8);
227 hash_state_out = (__be32 *)(hash->sha.state1 + offset);
228 hash512_state_out = (__be64 *)hash_state_out;
229
230 switch (ctx->qat_hash_alg) {
231 case ICP_QAT_HW_AUTH_ALGO_SHA1:
37e52654 232 if (crypto_shash_export(shash, &sha1))
d370cec3
TS
233 return -EFAULT;
234 for (i = 0; i < digest_size >> 2; i++, hash_state_out++)
235 *hash_state_out = cpu_to_be32(*(sha1.state + i));
236 break;
237 case ICP_QAT_HW_AUTH_ALGO_SHA256:
37e52654 238 if (crypto_shash_export(shash, &sha256))
d370cec3
TS
239 return -EFAULT;
240 for (i = 0; i < digest_size >> 2; i++, hash_state_out++)
241 *hash_state_out = cpu_to_be32(*(sha256.state + i));
242 break;
243 case ICP_QAT_HW_AUTH_ALGO_SHA512:
37e52654 244 if (crypto_shash_export(shash, &sha512))
d370cec3
TS
245 return -EFAULT;
246 for (i = 0; i < digest_size >> 3; i++, hash512_state_out++)
247 *hash512_state_out = cpu_to_be64(*(sha512.state + i));
248 break;
249 default:
250 return -EFAULT;
251 }
aa408d60
ST
252 memzero_explicit(ipad, block_size);
253 memzero_explicit(opad, block_size);
d370cec3
TS
254 return 0;
255}
256
257static void qat_alg_init_common_hdr(struct icp_qat_fw_comn_req_hdr *header)
258{
259 header->hdr_flags =
260 ICP_QAT_FW_COMN_HDR_FLAGS_BUILD(ICP_QAT_FW_COMN_REQ_FLAG_SET);
261 header->service_type = ICP_QAT_FW_COMN_REQ_CPM_FW_LA;
262 header->comn_req_flags =
263 ICP_QAT_FW_COMN_FLAGS_BUILD(QAT_COMN_CD_FLD_TYPE_64BIT_ADR,
264 QAT_COMN_PTR_TYPE_SGL);
d370cec3
TS
265 ICP_QAT_FW_LA_PARTIAL_SET(header->serv_specif_flags,
266 ICP_QAT_FW_LA_PARTIAL_NONE);
267 ICP_QAT_FW_LA_CIPH_IV_FLD_FLAG_SET(header->serv_specif_flags,
268 ICP_QAT_FW_CIPH_IV_16BYTE_DATA);
269 ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags,
270 ICP_QAT_FW_LA_NO_PROTO);
271 ICP_QAT_FW_LA_UPDATE_STATE_SET(header->serv_specif_flags,
272 ICP_QAT_FW_LA_NO_UPDATE_STATE);
273}
274
338e84f3
TS
275static int qat_alg_aead_init_enc_session(struct qat_alg_aead_ctx *ctx,
276 int alg,
277 struct crypto_authenc_keys *keys)
d370cec3
TS
278{
279 struct crypto_aead *aead_tfm = __crypto_aead_cast(ctx->tfm);
280 unsigned int digestsize = crypto_aead_crt(aead_tfm)->authsize;
281 struct qat_enc *enc_ctx = &ctx->enc_cd->qat_enc_cd;
282 struct icp_qat_hw_cipher_algo_blk *cipher = &enc_ctx->cipher;
283 struct icp_qat_hw_auth_algo_blk *hash =
284 (struct icp_qat_hw_auth_algo_blk *)((char *)enc_ctx +
285 sizeof(struct icp_qat_hw_auth_setup) + keys->enckeylen);
338e84f3 286 struct icp_qat_fw_la_bulk_req *req_tmpl = &ctx->enc_fw_req;
d370cec3
TS
287 struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req_tmpl->cd_pars;
288 struct icp_qat_fw_comn_req_hdr *header = &req_tmpl->comn_hdr;
289 void *ptr = &req_tmpl->cd_ctrl;
290 struct icp_qat_fw_cipher_cd_ctrl_hdr *cipher_cd_ctrl = ptr;
291 struct icp_qat_fw_auth_cd_ctrl_hdr *hash_cd_ctrl = ptr;
d370cec3
TS
292
293 /* CD setup */
338e84f3 294 cipher->aes.cipher_config.val = QAT_AES_HW_CONFIG_CBC_ENC(alg);
d370cec3
TS
295 memcpy(cipher->aes.key, keys->enckey, keys->enckeylen);
296 hash->sha.inner_setup.auth_config.config =
297 ICP_QAT_HW_AUTH_CONFIG_BUILD(ICP_QAT_HW_AUTH_MODE1,
298 ctx->qat_hash_alg, digestsize);
299 hash->sha.inner_setup.auth_counter.counter =
300 cpu_to_be32(crypto_shash_blocksize(ctx->hash_tfm));
301
26c3af6c 302 if (qat_alg_do_precomputes(hash, ctx, keys->authkey, keys->authkeylen))
d370cec3
TS
303 return -EFAULT;
304
305 /* Request setup */
306 qat_alg_init_common_hdr(header);
307 header->service_cmd_id = ICP_QAT_FW_LA_CMD_CIPHER_HASH;
338e84f3
TS
308 ICP_QAT_FW_LA_DIGEST_IN_BUFFER_SET(header->serv_specif_flags,
309 ICP_QAT_FW_LA_DIGEST_IN_BUFFER);
d370cec3
TS
310 ICP_QAT_FW_LA_RET_AUTH_SET(header->serv_specif_flags,
311 ICP_QAT_FW_LA_RET_AUTH_RES);
312 ICP_QAT_FW_LA_CMP_AUTH_SET(header->serv_specif_flags,
313 ICP_QAT_FW_LA_NO_CMP_AUTH_RES);
314 cd_pars->u.s.content_desc_addr = ctx->enc_cd_paddr;
315 cd_pars->u.s.content_desc_params_sz = sizeof(struct qat_alg_cd) >> 3;
316
317 /* Cipher CD config setup */
318 cipher_cd_ctrl->cipher_key_sz = keys->enckeylen >> 3;
319 cipher_cd_ctrl->cipher_state_sz = AES_BLOCK_SIZE >> 3;
320 cipher_cd_ctrl->cipher_cfg_offset = 0;
321 ICP_QAT_FW_COMN_CURR_ID_SET(cipher_cd_ctrl, ICP_QAT_FW_SLICE_CIPHER);
322 ICP_QAT_FW_COMN_NEXT_ID_SET(cipher_cd_ctrl, ICP_QAT_FW_SLICE_AUTH);
323 /* Auth CD config setup */
324 hash_cd_ctrl->hash_cfg_offset = ((char *)hash - (char *)cipher) >> 3;
325 hash_cd_ctrl->hash_flags = ICP_QAT_FW_AUTH_HDR_FLAG_NO_NESTED;
326 hash_cd_ctrl->inner_res_sz = digestsize;
327 hash_cd_ctrl->final_sz = digestsize;
328
329 switch (ctx->qat_hash_alg) {
330 case ICP_QAT_HW_AUTH_ALGO_SHA1:
331 hash_cd_ctrl->inner_state1_sz =
332 round_up(ICP_QAT_HW_SHA1_STATE1_SZ, 8);
333 hash_cd_ctrl->inner_state2_sz =
334 round_up(ICP_QAT_HW_SHA1_STATE2_SZ, 8);
335 break;
336 case ICP_QAT_HW_AUTH_ALGO_SHA256:
337 hash_cd_ctrl->inner_state1_sz = ICP_QAT_HW_SHA256_STATE1_SZ;
338 hash_cd_ctrl->inner_state2_sz = ICP_QAT_HW_SHA256_STATE2_SZ;
339 break;
340 case ICP_QAT_HW_AUTH_ALGO_SHA512:
341 hash_cd_ctrl->inner_state1_sz = ICP_QAT_HW_SHA512_STATE1_SZ;
342 hash_cd_ctrl->inner_state2_sz = ICP_QAT_HW_SHA512_STATE2_SZ;
343 break;
344 default:
345 break;
346 }
347 hash_cd_ctrl->inner_state2_offset = hash_cd_ctrl->hash_cfg_offset +
348 ((sizeof(struct icp_qat_hw_auth_setup) +
349 round_up(hash_cd_ctrl->inner_state1_sz, 8)) >> 3);
d370cec3
TS
350 ICP_QAT_FW_COMN_CURR_ID_SET(hash_cd_ctrl, ICP_QAT_FW_SLICE_AUTH);
351 ICP_QAT_FW_COMN_NEXT_ID_SET(hash_cd_ctrl, ICP_QAT_FW_SLICE_DRAM_WR);
352 return 0;
353}
354
338e84f3
TS
355static int qat_alg_aead_init_dec_session(struct qat_alg_aead_ctx *ctx,
356 int alg,
357 struct crypto_authenc_keys *keys)
d370cec3
TS
358{
359 struct crypto_aead *aead_tfm = __crypto_aead_cast(ctx->tfm);
360 unsigned int digestsize = crypto_aead_crt(aead_tfm)->authsize;
361 struct qat_dec *dec_ctx = &ctx->dec_cd->qat_dec_cd;
362 struct icp_qat_hw_auth_algo_blk *hash = &dec_ctx->hash;
363 struct icp_qat_hw_cipher_algo_blk *cipher =
364 (struct icp_qat_hw_cipher_algo_blk *)((char *)dec_ctx +
365 sizeof(struct icp_qat_hw_auth_setup) +
366 roundup(crypto_shash_digestsize(ctx->hash_tfm), 8) * 2);
338e84f3 367 struct icp_qat_fw_la_bulk_req *req_tmpl = &ctx->dec_fw_req;
d370cec3
TS
368 struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req_tmpl->cd_pars;
369 struct icp_qat_fw_comn_req_hdr *header = &req_tmpl->comn_hdr;
370 void *ptr = &req_tmpl->cd_ctrl;
371 struct icp_qat_fw_cipher_cd_ctrl_hdr *cipher_cd_ctrl = ptr;
372 struct icp_qat_fw_auth_cd_ctrl_hdr *hash_cd_ctrl = ptr;
373 struct icp_qat_fw_la_auth_req_params *auth_param =
374 (struct icp_qat_fw_la_auth_req_params *)
375 ((char *)&req_tmpl->serv_specif_rqpars +
376 sizeof(struct icp_qat_fw_la_cipher_req_params));
377
378 /* CD setup */
338e84f3 379 cipher->aes.cipher_config.val = QAT_AES_HW_CONFIG_CBC_DEC(alg);
d370cec3
TS
380 memcpy(cipher->aes.key, keys->enckey, keys->enckeylen);
381 hash->sha.inner_setup.auth_config.config =
382 ICP_QAT_HW_AUTH_CONFIG_BUILD(ICP_QAT_HW_AUTH_MODE1,
383 ctx->qat_hash_alg,
384 digestsize);
385 hash->sha.inner_setup.auth_counter.counter =
386 cpu_to_be32(crypto_shash_blocksize(ctx->hash_tfm));
387
26c3af6c 388 if (qat_alg_do_precomputes(hash, ctx, keys->authkey, keys->authkeylen))
d370cec3
TS
389 return -EFAULT;
390
391 /* Request setup */
392 qat_alg_init_common_hdr(header);
393 header->service_cmd_id = ICP_QAT_FW_LA_CMD_HASH_CIPHER;
338e84f3
TS
394 ICP_QAT_FW_LA_DIGEST_IN_BUFFER_SET(header->serv_specif_flags,
395 ICP_QAT_FW_LA_DIGEST_IN_BUFFER);
d370cec3
TS
396 ICP_QAT_FW_LA_RET_AUTH_SET(header->serv_specif_flags,
397 ICP_QAT_FW_LA_NO_RET_AUTH_RES);
398 ICP_QAT_FW_LA_CMP_AUTH_SET(header->serv_specif_flags,
399 ICP_QAT_FW_LA_CMP_AUTH_RES);
400 cd_pars->u.s.content_desc_addr = ctx->dec_cd_paddr;
401 cd_pars->u.s.content_desc_params_sz = sizeof(struct qat_alg_cd) >> 3;
402
403 /* Cipher CD config setup */
404 cipher_cd_ctrl->cipher_key_sz = keys->enckeylen >> 3;
405 cipher_cd_ctrl->cipher_state_sz = AES_BLOCK_SIZE >> 3;
406 cipher_cd_ctrl->cipher_cfg_offset =
407 (sizeof(struct icp_qat_hw_auth_setup) +
408 roundup(crypto_shash_digestsize(ctx->hash_tfm), 8) * 2) >> 3;
409 ICP_QAT_FW_COMN_CURR_ID_SET(cipher_cd_ctrl, ICP_QAT_FW_SLICE_CIPHER);
410 ICP_QAT_FW_COMN_NEXT_ID_SET(cipher_cd_ctrl, ICP_QAT_FW_SLICE_DRAM_WR);
411
412 /* Auth CD config setup */
413 hash_cd_ctrl->hash_cfg_offset = 0;
414 hash_cd_ctrl->hash_flags = ICP_QAT_FW_AUTH_HDR_FLAG_NO_NESTED;
415 hash_cd_ctrl->inner_res_sz = digestsize;
416 hash_cd_ctrl->final_sz = digestsize;
417
418 switch (ctx->qat_hash_alg) {
419 case ICP_QAT_HW_AUTH_ALGO_SHA1:
420 hash_cd_ctrl->inner_state1_sz =
421 round_up(ICP_QAT_HW_SHA1_STATE1_SZ, 8);
422 hash_cd_ctrl->inner_state2_sz =
423 round_up(ICP_QAT_HW_SHA1_STATE2_SZ, 8);
424 break;
425 case ICP_QAT_HW_AUTH_ALGO_SHA256:
426 hash_cd_ctrl->inner_state1_sz = ICP_QAT_HW_SHA256_STATE1_SZ;
427 hash_cd_ctrl->inner_state2_sz = ICP_QAT_HW_SHA256_STATE2_SZ;
428 break;
429 case ICP_QAT_HW_AUTH_ALGO_SHA512:
430 hash_cd_ctrl->inner_state1_sz = ICP_QAT_HW_SHA512_STATE1_SZ;
431 hash_cd_ctrl->inner_state2_sz = ICP_QAT_HW_SHA512_STATE2_SZ;
432 break;
433 default:
434 break;
435 }
436
437 hash_cd_ctrl->inner_state2_offset = hash_cd_ctrl->hash_cfg_offset +
438 ((sizeof(struct icp_qat_hw_auth_setup) +
439 round_up(hash_cd_ctrl->inner_state1_sz, 8)) >> 3);
d370cec3
TS
440 auth_param->auth_res_sz = digestsize;
441 ICP_QAT_FW_COMN_CURR_ID_SET(hash_cd_ctrl, ICP_QAT_FW_SLICE_AUTH);
442 ICP_QAT_FW_COMN_NEXT_ID_SET(hash_cd_ctrl, ICP_QAT_FW_SLICE_CIPHER);
443 return 0;
444}
445
338e84f3
TS
446static void qat_alg_ablkcipher_init_com(struct qat_alg_ablkcipher_ctx *ctx,
447 struct icp_qat_fw_la_bulk_req *req,
448 struct icp_qat_hw_cipher_algo_blk *cd,
449 const uint8_t *key, unsigned int keylen)
d370cec3 450{
338e84f3
TS
451 struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req->cd_pars;
452 struct icp_qat_fw_comn_req_hdr *header = &req->comn_hdr;
453 struct icp_qat_fw_cipher_cd_ctrl_hdr *cd_ctrl = (void *)&req->cd_ctrl;
d370cec3 454
338e84f3
TS
455 memcpy(cd->aes.key, key, keylen);
456 qat_alg_init_common_hdr(header);
457 header->service_cmd_id = ICP_QAT_FW_LA_CMD_CIPHER;
458 cd_pars->u.s.content_desc_params_sz =
459 sizeof(struct icp_qat_hw_cipher_algo_blk) >> 3;
460 /* Cipher CD config setup */
461 cd_ctrl->cipher_key_sz = keylen >> 3;
462 cd_ctrl->cipher_state_sz = AES_BLOCK_SIZE >> 3;
463 cd_ctrl->cipher_cfg_offset = 0;
464 ICP_QAT_FW_COMN_CURR_ID_SET(cd_ctrl, ICP_QAT_FW_SLICE_CIPHER);
465 ICP_QAT_FW_COMN_NEXT_ID_SET(cd_ctrl, ICP_QAT_FW_SLICE_DRAM_WR);
466}
d370cec3 467
338e84f3
TS
468static void qat_alg_ablkcipher_init_enc(struct qat_alg_ablkcipher_ctx *ctx,
469 int alg, const uint8_t *key,
470 unsigned int keylen)
471{
472 struct icp_qat_hw_cipher_algo_blk *enc_cd = ctx->enc_cd;
473 struct icp_qat_fw_la_bulk_req *req = &ctx->enc_fw_req;
474 struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req->cd_pars;
d370cec3 475
338e84f3
TS
476 qat_alg_ablkcipher_init_com(ctx, req, enc_cd, key, keylen);
477 cd_pars->u.s.content_desc_addr = ctx->enc_cd_paddr;
478 enc_cd->aes.cipher_config.val = QAT_AES_HW_CONFIG_CBC_ENC(alg);
479}
480
481static void qat_alg_ablkcipher_init_dec(struct qat_alg_ablkcipher_ctx *ctx,
482 int alg, const uint8_t *key,
483 unsigned int keylen)
484{
485 struct icp_qat_hw_cipher_algo_blk *dec_cd = ctx->dec_cd;
486 struct icp_qat_fw_la_bulk_req *req = &ctx->dec_fw_req;
487 struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req->cd_pars;
488
489 qat_alg_ablkcipher_init_com(ctx, req, dec_cd, key, keylen);
490 cd_pars->u.s.content_desc_addr = ctx->dec_cd_paddr;
491 dec_cd->aes.cipher_config.val = QAT_AES_HW_CONFIG_CBC_DEC(alg);
492}
493
494static int qat_alg_validate_key(int key_len, int *alg)
495{
496 switch (key_len) {
d370cec3 497 case AES_KEYSIZE_128:
338e84f3 498 *alg = ICP_QAT_HW_CIPHER_ALGO_AES128;
d370cec3
TS
499 break;
500 case AES_KEYSIZE_192:
338e84f3 501 *alg = ICP_QAT_HW_CIPHER_ALGO_AES192;
d370cec3
TS
502 break;
503 case AES_KEYSIZE_256:
338e84f3 504 *alg = ICP_QAT_HW_CIPHER_ALGO_AES256;
d370cec3
TS
505 break;
506 default:
338e84f3 507 return -EINVAL;
d370cec3 508 }
338e84f3
TS
509 return 0;
510}
d370cec3 511
338e84f3
TS
512static int qat_alg_aead_init_sessions(struct qat_alg_aead_ctx *ctx,
513 const uint8_t *key, unsigned int keylen)
514{
515 struct crypto_authenc_keys keys;
516 int alg;
517
518 if (crypto_rng_get_bytes(crypto_default_rng, ctx->salt, AES_BLOCK_SIZE))
519 return -EFAULT;
520
521 if (crypto_authenc_extractkeys(&keys, key, keylen))
522 goto bad_key;
523
524 if (qat_alg_validate_key(keys.enckeylen, &alg))
525 goto bad_key;
526
527 if (qat_alg_aead_init_enc_session(ctx, alg, &keys))
d370cec3
TS
528 goto error;
529
338e84f3 530 if (qat_alg_aead_init_dec_session(ctx, alg, &keys))
d370cec3
TS
531 goto error;
532
533 return 0;
534bad_key:
535 crypto_tfm_set_flags(ctx->tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
536 return -EINVAL;
537error:
538 return -EFAULT;
539}
540
338e84f3
TS
541static int qat_alg_ablkcipher_init_sessions(struct qat_alg_ablkcipher_ctx *ctx,
542 const uint8_t *key,
543 unsigned int keylen)
d370cec3 544{
338e84f3
TS
545 int alg;
546
547 if (qat_alg_validate_key(keylen, &alg))
548 goto bad_key;
549
550 qat_alg_ablkcipher_init_enc(ctx, alg, key, keylen);
551 qat_alg_ablkcipher_init_dec(ctx, alg, key, keylen);
552 return 0;
553bad_key:
554 crypto_tfm_set_flags(ctx->tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
555 return -EINVAL;
556}
557
558static int qat_alg_aead_setkey(struct crypto_aead *tfm, const uint8_t *key,
559 unsigned int keylen)
560{
561 struct qat_alg_aead_ctx *ctx = crypto_aead_ctx(tfm);
d370cec3
TS
562 struct device *dev;
563
564 spin_lock(&ctx->lock);
565 if (ctx->enc_cd) {
566 /* rekeying */
567 dev = &GET_DEV(ctx->inst->accel_dev);
ad511e26
HX
568 memset(ctx->enc_cd, 0, sizeof(*ctx->enc_cd));
569 memset(ctx->dec_cd, 0, sizeof(*ctx->dec_cd));
570 memset(&ctx->enc_fw_req, 0, sizeof(ctx->enc_fw_req));
571 memset(&ctx->dec_fw_req, 0, sizeof(ctx->dec_fw_req));
d370cec3
TS
572 } else {
573 /* new key */
574 int node = get_current_node();
575 struct qat_crypto_instance *inst =
576 qat_crypto_get_instance_node(node);
577 if (!inst) {
578 spin_unlock(&ctx->lock);
579 return -EINVAL;
580 }
581
582 dev = &GET_DEV(inst->accel_dev);
583 ctx->inst = inst;
338e84f3 584 ctx->enc_cd = dma_zalloc_coherent(dev, sizeof(*ctx->enc_cd),
d370cec3
TS
585 &ctx->enc_cd_paddr,
586 GFP_ATOMIC);
587 if (!ctx->enc_cd) {
588 spin_unlock(&ctx->lock);
589 return -ENOMEM;
590 }
338e84f3 591 ctx->dec_cd = dma_zalloc_coherent(dev, sizeof(*ctx->dec_cd),
d370cec3
TS
592 &ctx->dec_cd_paddr,
593 GFP_ATOMIC);
594 if (!ctx->dec_cd) {
595 spin_unlock(&ctx->lock);
596 goto out_free_enc;
597 }
d370cec3
TS
598 }
599 spin_unlock(&ctx->lock);
338e84f3 600 if (qat_alg_aead_init_sessions(ctx, key, keylen))
d370cec3
TS
601 goto out_free_all;
602
603 return 0;
604
605out_free_all:
ad511e26 606 memset(ctx->dec_cd, 0, sizeof(struct qat_alg_cd));
d370cec3
TS
607 dma_free_coherent(dev, sizeof(struct qat_alg_cd),
608 ctx->dec_cd, ctx->dec_cd_paddr);
609 ctx->dec_cd = NULL;
610out_free_enc:
ad511e26 611 memset(ctx->enc_cd, 0, sizeof(struct qat_alg_cd));
d370cec3
TS
612 dma_free_coherent(dev, sizeof(struct qat_alg_cd),
613 ctx->enc_cd, ctx->enc_cd_paddr);
614 ctx->enc_cd = NULL;
615 return -ENOMEM;
616}
617
618static void qat_alg_free_bufl(struct qat_crypto_instance *inst,
619 struct qat_crypto_request *qat_req)
620{
621 struct device *dev = &GET_DEV(inst->accel_dev);
622 struct qat_alg_buf_list *bl = qat_req->buf.bl;
623 struct qat_alg_buf_list *blout = qat_req->buf.blout;
624 dma_addr_t blp = qat_req->buf.blp;
625 dma_addr_t blpout = qat_req->buf.bloutp;
626 size_t sz = qat_req->buf.sz;
82f82504
TS
627 size_t sz_out = qat_req->buf.sz_out;
628 int i;
d370cec3
TS
629
630 for (i = 0; i < bl->num_bufs; i++)
631 dma_unmap_single(dev, bl->bufers[i].addr,
632 bl->bufers[i].len, DMA_BIDIRECTIONAL);
633
634 dma_unmap_single(dev, blp, sz, DMA_TO_DEVICE);
635 kfree(bl);
636 if (blp != blpout) {
637 /* If out of place operation dma unmap only data */
82f82504 638 int bufless = blout->num_bufs - blout->num_mapped_bufs;
d65071ec 639
82f82504 640 for (i = bufless; i < blout->num_bufs; i++) {
d370cec3
TS
641 dma_unmap_single(dev, blout->bufers[i].addr,
642 blout->bufers[i].len,
643 DMA_BIDIRECTIONAL);
644 }
82f82504 645 dma_unmap_single(dev, blpout, sz_out, DMA_TO_DEVICE);
d370cec3
TS
646 kfree(blout);
647 }
648}
649
650static int qat_alg_sgl_to_bufl(struct qat_crypto_instance *inst,
ecb479d0 651 struct scatterlist *assoc, int assoclen,
d370cec3
TS
652 struct scatterlist *sgl,
653 struct scatterlist *sglout, uint8_t *iv,
654 uint8_t ivlen,
655 struct qat_crypto_request *qat_req)
656{
657 struct device *dev = &GET_DEV(inst->accel_dev);
82f82504
TS
658 int i, bufs = 0, sg_nctr = 0;
659 int n = sg_nents(sgl), assoc_n = sg_nents(assoc);
d370cec3
TS
660 struct qat_alg_buf_list *bufl;
661 struct qat_alg_buf_list *buflout = NULL;
662 dma_addr_t blp;
663 dma_addr_t bloutp = 0;
664 struct scatterlist *sg;
82f82504 665 size_t sz_out, sz = sizeof(struct qat_alg_buf_list) +
d370cec3
TS
666 ((1 + n + assoc_n) * sizeof(struct qat_alg_buf));
667
668 if (unlikely(!n))
669 return -EINVAL;
670
82f82504 671 bufl = kzalloc_node(sz, GFP_ATOMIC,
09adc878 672 dev_to_node(&GET_DEV(inst->accel_dev)));
d370cec3
TS
673 if (unlikely(!bufl))
674 return -ENOMEM;
675
676 blp = dma_map_single(dev, bufl, sz, DMA_TO_DEVICE);
677 if (unlikely(dma_mapping_error(dev, blp)))
678 goto err;
679
680 for_each_sg(assoc, sg, assoc_n, i) {
923a6e5e
TS
681 if (!sg->length)
682 continue;
ecb479d0
TS
683
684 if (!(assoclen > 0))
685 break;
686
687 bufl->bufers[bufs].addr =
688 dma_map_single(dev, sg_virt(sg),
689 min_t(int, assoclen, sg->length),
690 DMA_BIDIRECTIONAL);
691 bufl->bufers[bufs].len = min_t(int, assoclen, sg->length);
d370cec3
TS
692 if (unlikely(dma_mapping_error(dev, bufl->bufers[bufs].addr)))
693 goto err;
694 bufs++;
ecb479d0 695 assoclen -= sg->length;
d370cec3 696 }
ecb479d0 697
82f82504
TS
698 if (ivlen) {
699 bufl->bufers[bufs].addr = dma_map_single(dev, iv, ivlen,
700 DMA_BIDIRECTIONAL);
701 bufl->bufers[bufs].len = ivlen;
702 if (unlikely(dma_mapping_error(dev, bufl->bufers[bufs].addr)))
703 goto err;
704 bufs++;
705 }
d370cec3
TS
706
707 for_each_sg(sgl, sg, n, i) {
82f82504
TS
708 int y = sg_nctr + bufs;
709
710 if (!sg->length)
711 continue;
d65071ec 712
d370cec3
TS
713 bufl->bufers[y].addr = dma_map_single(dev, sg_virt(sg),
714 sg->length,
715 DMA_BIDIRECTIONAL);
716 bufl->bufers[y].len = sg->length;
717 if (unlikely(dma_mapping_error(dev, bufl->bufers[y].addr)))
718 goto err;
82f82504 719 sg_nctr++;
d370cec3 720 }
82f82504 721 bufl->num_bufs = sg_nctr + bufs;
d370cec3
TS
722 qat_req->buf.bl = bufl;
723 qat_req->buf.blp = blp;
724 qat_req->buf.sz = sz;
725 /* Handle out of place operation */
726 if (sgl != sglout) {
727 struct qat_alg_buf *bufers;
728
82f82504
TS
729 n = sg_nents(sglout);
730 sz_out = sizeof(struct qat_alg_buf_list) +
731 ((1 + n + assoc_n) * sizeof(struct qat_alg_buf));
732 sg_nctr = 0;
733 buflout = kzalloc_node(sz_out, GFP_ATOMIC,
09adc878 734 dev_to_node(&GET_DEV(inst->accel_dev)));
d370cec3
TS
735 if (unlikely(!buflout))
736 goto err;
82f82504 737 bloutp = dma_map_single(dev, buflout, sz_out, DMA_TO_DEVICE);
d370cec3
TS
738 if (unlikely(dma_mapping_error(dev, bloutp)))
739 goto err;
740 bufers = buflout->bufers;
741 /* For out of place operation dma map only data and
742 * reuse assoc mapping and iv */
743 for (i = 0; i < bufs; i++) {
744 bufers[i].len = bufl->bufers[i].len;
745 bufers[i].addr = bufl->bufers[i].addr;
746 }
747 for_each_sg(sglout, sg, n, i) {
82f82504
TS
748 int y = sg_nctr + bufs;
749
750 if (!sg->length)
751 continue;
d65071ec 752
d370cec3
TS
753 bufers[y].addr = dma_map_single(dev, sg_virt(sg),
754 sg->length,
755 DMA_BIDIRECTIONAL);
d370cec3
TS
756 if (unlikely(dma_mapping_error(dev, bufers[y].addr)))
757 goto err;
82f82504
TS
758 bufers[y].len = sg->length;
759 sg_nctr++;
d370cec3 760 }
82f82504
TS
761 buflout->num_bufs = sg_nctr + bufs;
762 buflout->num_mapped_bufs = sg_nctr;
d370cec3
TS
763 qat_req->buf.blout = buflout;
764 qat_req->buf.bloutp = bloutp;
82f82504 765 qat_req->buf.sz_out = sz_out;
d370cec3
TS
766 } else {
767 /* Otherwise set the src and dst to the same address */
768 qat_req->buf.bloutp = qat_req->buf.blp;
82f82504 769 qat_req->buf.sz_out = 0;
d370cec3
TS
770 }
771 return 0;
772err:
773 dev_err(dev, "Failed to map buf for dma\n");
82f82504
TS
774 sg_nctr = 0;
775 for (i = 0; i < n + bufs; i++)
776 if (!dma_mapping_error(dev, bufl->bufers[i].addr))
d370cec3
TS
777 dma_unmap_single(dev, bufl->bufers[i].addr,
778 bufl->bufers[i].len,
779 DMA_BIDIRECTIONAL);
82f82504 780
d370cec3
TS
781 if (!dma_mapping_error(dev, blp))
782 dma_unmap_single(dev, blp, sz, DMA_TO_DEVICE);
783 kfree(bufl);
784 if (sgl != sglout && buflout) {
82f82504
TS
785 n = sg_nents(sglout);
786 for (i = bufs; i < n + bufs; i++)
787 if (!dma_mapping_error(dev, buflout->bufers[i].addr))
788 dma_unmap_single(dev, buflout->bufers[i].addr,
789 buflout->bufers[i].len,
d370cec3 790 DMA_BIDIRECTIONAL);
d370cec3 791 if (!dma_mapping_error(dev, bloutp))
82f82504 792 dma_unmap_single(dev, bloutp, sz_out, DMA_TO_DEVICE);
d370cec3
TS
793 kfree(buflout);
794 }
795 return -ENOMEM;
796}
797
338e84f3
TS
798static void qat_aead_alg_callback(struct icp_qat_fw_la_resp *qat_resp,
799 struct qat_crypto_request *qat_req)
d370cec3 800{
338e84f3 801 struct qat_alg_aead_ctx *ctx = qat_req->aead_ctx;
d370cec3 802 struct qat_crypto_instance *inst = ctx->inst;
338e84f3 803 struct aead_request *areq = qat_req->aead_req;
d370cec3
TS
804 uint8_t stat_filed = qat_resp->comn_resp.comn_status;
805 int res = 0, qat_res = ICP_QAT_FW_COMN_RESP_CRYPTO_STAT_GET(stat_filed);
806
807 qat_alg_free_bufl(inst, qat_req);
808 if (unlikely(qat_res != ICP_QAT_FW_COMN_STATUS_FLAG_OK))
809 res = -EBADMSG;
45cff260 810 areq->base.complete(&areq->base, res);
d370cec3
TS
811}
812
338e84f3
TS
813static void qat_ablkcipher_alg_callback(struct icp_qat_fw_la_resp *qat_resp,
814 struct qat_crypto_request *qat_req)
815{
816 struct qat_alg_ablkcipher_ctx *ctx = qat_req->ablkcipher_ctx;
817 struct qat_crypto_instance *inst = ctx->inst;
818 struct ablkcipher_request *areq = qat_req->ablkcipher_req;
819 uint8_t stat_filed = qat_resp->comn_resp.comn_status;
820 int res = 0, qat_res = ICP_QAT_FW_COMN_RESP_CRYPTO_STAT_GET(stat_filed);
821
822 qat_alg_free_bufl(inst, qat_req);
823 if (unlikely(qat_res != ICP_QAT_FW_COMN_STATUS_FLAG_OK))
824 res = -EINVAL;
825 areq->base.complete(&areq->base, res);
826}
827
828void qat_alg_callback(void *resp)
829{
830 struct icp_qat_fw_la_resp *qat_resp = resp;
831 struct qat_crypto_request *qat_req =
832 (void *)(__force long)qat_resp->opaque_data;
833
834 qat_req->cb(qat_resp, qat_req);
835}
836
837static int qat_alg_aead_dec(struct aead_request *areq)
d370cec3
TS
838{
839 struct crypto_aead *aead_tfm = crypto_aead_reqtfm(areq);
840 struct crypto_tfm *tfm = crypto_aead_tfm(aead_tfm);
338e84f3 841 struct qat_alg_aead_ctx *ctx = crypto_tfm_ctx(tfm);
d370cec3
TS
842 struct qat_crypto_request *qat_req = aead_request_ctx(areq);
843 struct icp_qat_fw_la_cipher_req_params *cipher_param;
844 struct icp_qat_fw_la_auth_req_params *auth_param;
845 struct icp_qat_fw_la_bulk_req *msg;
846 int digst_size = crypto_aead_crt(aead_tfm)->authsize;
847 int ret, ctr = 0;
848
ecb479d0
TS
849 ret = qat_alg_sgl_to_bufl(ctx->inst, areq->assoc, areq->assoclen,
850 areq->src, areq->dst, areq->iv,
851 AES_BLOCK_SIZE, qat_req);
d370cec3
TS
852 if (unlikely(ret))
853 return ret;
854
855 msg = &qat_req->req;
338e84f3
TS
856 *msg = ctx->dec_fw_req;
857 qat_req->aead_ctx = ctx;
858 qat_req->aead_req = areq;
859 qat_req->cb = qat_aead_alg_callback;
bce3cc61 860 qat_req->req.comn_mid.opaque_data = (uint64_t)(__force long)qat_req;
d370cec3
TS
861 qat_req->req.comn_mid.src_data_addr = qat_req->buf.blp;
862 qat_req->req.comn_mid.dest_data_addr = qat_req->buf.bloutp;
863 cipher_param = (void *)&qat_req->req.serv_specif_rqpars;
864 cipher_param->cipher_length = areq->cryptlen - digst_size;
865 cipher_param->cipher_offset = areq->assoclen + AES_BLOCK_SIZE;
866 memcpy(cipher_param->u.cipher_IV_array, areq->iv, AES_BLOCK_SIZE);
867 auth_param = (void *)((uint8_t *)cipher_param + sizeof(*cipher_param));
868 auth_param->auth_off = 0;
869 auth_param->auth_len = areq->assoclen +
870 cipher_param->cipher_length + AES_BLOCK_SIZE;
871 do {
872 ret = adf_send_message(ctx->inst->sym_tx, (uint32_t *)msg);
873 } while (ret == -EAGAIN && ctr++ < 10);
874
875 if (ret == -EAGAIN) {
876 qat_alg_free_bufl(ctx->inst, qat_req);
877 return -EBUSY;
878 }
879 return -EINPROGRESS;
880}
881
338e84f3
TS
882static int qat_alg_aead_enc_internal(struct aead_request *areq, uint8_t *iv,
883 int enc_iv)
d370cec3
TS
884{
885 struct crypto_aead *aead_tfm = crypto_aead_reqtfm(areq);
886 struct crypto_tfm *tfm = crypto_aead_tfm(aead_tfm);
338e84f3 887 struct qat_alg_aead_ctx *ctx = crypto_tfm_ctx(tfm);
d370cec3
TS
888 struct qat_crypto_request *qat_req = aead_request_ctx(areq);
889 struct icp_qat_fw_la_cipher_req_params *cipher_param;
890 struct icp_qat_fw_la_auth_req_params *auth_param;
891 struct icp_qat_fw_la_bulk_req *msg;
892 int ret, ctr = 0;
893
ecb479d0
TS
894 ret = qat_alg_sgl_to_bufl(ctx->inst, areq->assoc, areq->assoclen,
895 areq->src, areq->dst, iv, AES_BLOCK_SIZE,
896 qat_req);
d370cec3
TS
897 if (unlikely(ret))
898 return ret;
899
900 msg = &qat_req->req;
338e84f3
TS
901 *msg = ctx->enc_fw_req;
902 qat_req->aead_ctx = ctx;
903 qat_req->aead_req = areq;
904 qat_req->cb = qat_aead_alg_callback;
bce3cc61 905 qat_req->req.comn_mid.opaque_data = (uint64_t)(__force long)qat_req;
d370cec3
TS
906 qat_req->req.comn_mid.src_data_addr = qat_req->buf.blp;
907 qat_req->req.comn_mid.dest_data_addr = qat_req->buf.bloutp;
908 cipher_param = (void *)&qat_req->req.serv_specif_rqpars;
909 auth_param = (void *)((uint8_t *)cipher_param + sizeof(*cipher_param));
910
911 if (enc_iv) {
912 cipher_param->cipher_length = areq->cryptlen + AES_BLOCK_SIZE;
913 cipher_param->cipher_offset = areq->assoclen;
914 } else {
915 memcpy(cipher_param->u.cipher_IV_array, iv, AES_BLOCK_SIZE);
916 cipher_param->cipher_length = areq->cryptlen;
917 cipher_param->cipher_offset = areq->assoclen + AES_BLOCK_SIZE;
918 }
919 auth_param->auth_off = 0;
920 auth_param->auth_len = areq->assoclen + areq->cryptlen + AES_BLOCK_SIZE;
921
922 do {
923 ret = adf_send_message(ctx->inst->sym_tx, (uint32_t *)msg);
924 } while (ret == -EAGAIN && ctr++ < 10);
925
926 if (ret == -EAGAIN) {
927 qat_alg_free_bufl(ctx->inst, qat_req);
928 return -EBUSY;
929 }
930 return -EINPROGRESS;
931}
932
338e84f3 933static int qat_alg_aead_enc(struct aead_request *areq)
d370cec3 934{
338e84f3 935 return qat_alg_aead_enc_internal(areq, areq->iv, 0);
d370cec3
TS
936}
937
338e84f3 938static int qat_alg_aead_genivenc(struct aead_givcrypt_request *req)
d370cec3
TS
939{
940 struct crypto_aead *aead_tfm = crypto_aead_reqtfm(&req->areq);
941 struct crypto_tfm *tfm = crypto_aead_tfm(aead_tfm);
338e84f3 942 struct qat_alg_aead_ctx *ctx = crypto_tfm_ctx(tfm);
d370cec3
TS
943 __be64 seq;
944
945 memcpy(req->giv, ctx->salt, AES_BLOCK_SIZE);
946 seq = cpu_to_be64(req->seq);
947 memcpy(req->giv + AES_BLOCK_SIZE - sizeof(uint64_t),
948 &seq, sizeof(uint64_t));
338e84f3 949 return qat_alg_aead_enc_internal(&req->areq, req->giv, 1);
d370cec3
TS
950}
951
338e84f3
TS
952static int qat_alg_ablkcipher_setkey(struct crypto_ablkcipher *tfm,
953 const uint8_t *key,
954 unsigned int keylen)
d370cec3 955{
338e84f3
TS
956 struct qat_alg_ablkcipher_ctx *ctx = crypto_ablkcipher_ctx(tfm);
957 struct device *dev;
958
959 spin_lock(&ctx->lock);
960 if (ctx->enc_cd) {
961 /* rekeying */
962 dev = &GET_DEV(ctx->inst->accel_dev);
ad511e26
HX
963 memset(ctx->enc_cd, 0, sizeof(*ctx->enc_cd));
964 memset(ctx->dec_cd, 0, sizeof(*ctx->dec_cd));
965 memset(&ctx->enc_fw_req, 0, sizeof(ctx->enc_fw_req));
966 memset(&ctx->dec_fw_req, 0, sizeof(ctx->dec_fw_req));
338e84f3
TS
967 } else {
968 /* new key */
969 int node = get_current_node();
970 struct qat_crypto_instance *inst =
971 qat_crypto_get_instance_node(node);
972 if (!inst) {
973 spin_unlock(&ctx->lock);
974 return -EINVAL;
975 }
976
977 dev = &GET_DEV(inst->accel_dev);
978 ctx->inst = inst;
979 ctx->enc_cd = dma_zalloc_coherent(dev, sizeof(*ctx->enc_cd),
980 &ctx->enc_cd_paddr,
981 GFP_ATOMIC);
982 if (!ctx->enc_cd) {
983 spin_unlock(&ctx->lock);
984 return -ENOMEM;
985 }
986 ctx->dec_cd = dma_zalloc_coherent(dev, sizeof(*ctx->dec_cd),
987 &ctx->dec_cd_paddr,
988 GFP_ATOMIC);
989 if (!ctx->dec_cd) {
990 spin_unlock(&ctx->lock);
991 goto out_free_enc;
992 }
993 }
994 spin_unlock(&ctx->lock);
995 if (qat_alg_ablkcipher_init_sessions(ctx, key, keylen))
996 goto out_free_all;
997
998 return 0;
999
1000out_free_all:
ad511e26 1001 memset(ctx->dec_cd, 0, sizeof(*ctx->enc_cd));
338e84f3
TS
1002 dma_free_coherent(dev, sizeof(*ctx->enc_cd),
1003 ctx->dec_cd, ctx->dec_cd_paddr);
1004 ctx->dec_cd = NULL;
1005out_free_enc:
ad511e26 1006 memset(ctx->enc_cd, 0, sizeof(*ctx->dec_cd));
338e84f3
TS
1007 dma_free_coherent(dev, sizeof(*ctx->dec_cd),
1008 ctx->enc_cd, ctx->enc_cd_paddr);
1009 ctx->enc_cd = NULL;
1010 return -ENOMEM;
1011}
1012
1013static int qat_alg_ablkcipher_encrypt(struct ablkcipher_request *req)
1014{
1015 struct crypto_ablkcipher *atfm = crypto_ablkcipher_reqtfm(req);
1016 struct crypto_tfm *tfm = crypto_ablkcipher_tfm(atfm);
1017 struct qat_alg_ablkcipher_ctx *ctx = crypto_tfm_ctx(tfm);
1018 struct qat_crypto_request *qat_req = ablkcipher_request_ctx(req);
1019 struct icp_qat_fw_la_cipher_req_params *cipher_param;
1020 struct icp_qat_fw_la_bulk_req *msg;
1021 int ret, ctr = 0;
1022
ecb479d0 1023 ret = qat_alg_sgl_to_bufl(ctx->inst, NULL, 0, req->src, req->dst,
338e84f3
TS
1024 NULL, 0, qat_req);
1025 if (unlikely(ret))
1026 return ret;
1027
1028 msg = &qat_req->req;
1029 *msg = ctx->enc_fw_req;
1030 qat_req->ablkcipher_ctx = ctx;
1031 qat_req->ablkcipher_req = req;
1032 qat_req->cb = qat_ablkcipher_alg_callback;
1033 qat_req->req.comn_mid.opaque_data = (uint64_t)(__force long)qat_req;
1034 qat_req->req.comn_mid.src_data_addr = qat_req->buf.blp;
1035 qat_req->req.comn_mid.dest_data_addr = qat_req->buf.bloutp;
1036 cipher_param = (void *)&qat_req->req.serv_specif_rqpars;
1037 cipher_param->cipher_length = req->nbytes;
1038 cipher_param->cipher_offset = 0;
1039 memcpy(cipher_param->u.cipher_IV_array, req->info, AES_BLOCK_SIZE);
1040 do {
1041 ret = adf_send_message(ctx->inst->sym_tx, (uint32_t *)msg);
1042 } while (ret == -EAGAIN && ctr++ < 10);
1043
1044 if (ret == -EAGAIN) {
1045 qat_alg_free_bufl(ctx->inst, qat_req);
1046 return -EBUSY;
1047 }
1048 return -EINPROGRESS;
1049}
1050
1051static int qat_alg_ablkcipher_decrypt(struct ablkcipher_request *req)
1052{
1053 struct crypto_ablkcipher *atfm = crypto_ablkcipher_reqtfm(req);
1054 struct crypto_tfm *tfm = crypto_ablkcipher_tfm(atfm);
1055 struct qat_alg_ablkcipher_ctx *ctx = crypto_tfm_ctx(tfm);
1056 struct qat_crypto_request *qat_req = ablkcipher_request_ctx(req);
1057 struct icp_qat_fw_la_cipher_req_params *cipher_param;
1058 struct icp_qat_fw_la_bulk_req *msg;
1059 int ret, ctr = 0;
1060
ecb479d0 1061 ret = qat_alg_sgl_to_bufl(ctx->inst, NULL, 0, req->src, req->dst,
338e84f3
TS
1062 NULL, 0, qat_req);
1063 if (unlikely(ret))
1064 return ret;
1065
1066 msg = &qat_req->req;
1067 *msg = ctx->dec_fw_req;
1068 qat_req->ablkcipher_ctx = ctx;
1069 qat_req->ablkcipher_req = req;
1070 qat_req->cb = qat_ablkcipher_alg_callback;
1071 qat_req->req.comn_mid.opaque_data = (uint64_t)(__force long)qat_req;
1072 qat_req->req.comn_mid.src_data_addr = qat_req->buf.blp;
1073 qat_req->req.comn_mid.dest_data_addr = qat_req->buf.bloutp;
1074 cipher_param = (void *)&qat_req->req.serv_specif_rqpars;
1075 cipher_param->cipher_length = req->nbytes;
1076 cipher_param->cipher_offset = 0;
1077 memcpy(cipher_param->u.cipher_IV_array, req->info, AES_BLOCK_SIZE);
1078 do {
1079 ret = adf_send_message(ctx->inst->sym_tx, (uint32_t *)msg);
1080 } while (ret == -EAGAIN && ctr++ < 10);
1081
1082 if (ret == -EAGAIN) {
1083 qat_alg_free_bufl(ctx->inst, qat_req);
1084 return -EBUSY;
1085 }
1086 return -EINPROGRESS;
1087}
1088
1089static int qat_alg_aead_init(struct crypto_tfm *tfm,
1090 enum icp_qat_hw_auth_algo hash,
1091 const char *hash_name)
1092{
1093 struct qat_alg_aead_ctx *ctx = crypto_tfm_ctx(tfm);
d370cec3 1094
d370cec3
TS
1095 ctx->hash_tfm = crypto_alloc_shash(hash_name, 0, 0);
1096 if (IS_ERR(ctx->hash_tfm))
1097 return -EFAULT;
1098 spin_lock_init(&ctx->lock);
1099 ctx->qat_hash_alg = hash;
97cacb9f
HX
1100 crypto_aead_set_reqsize(__crypto_aead_cast(tfm),
1101 sizeof(struct aead_request) +
1102 sizeof(struct qat_crypto_request));
d370cec3
TS
1103 ctx->tfm = tfm;
1104 return 0;
1105}
1106
338e84f3 1107static int qat_alg_aead_sha1_init(struct crypto_tfm *tfm)
d370cec3 1108{
338e84f3 1109 return qat_alg_aead_init(tfm, ICP_QAT_HW_AUTH_ALGO_SHA1, "sha1");
d370cec3
TS
1110}
1111
338e84f3 1112static int qat_alg_aead_sha256_init(struct crypto_tfm *tfm)
d370cec3 1113{
338e84f3 1114 return qat_alg_aead_init(tfm, ICP_QAT_HW_AUTH_ALGO_SHA256, "sha256");
d370cec3
TS
1115}
1116
338e84f3 1117static int qat_alg_aead_sha512_init(struct crypto_tfm *tfm)
d370cec3 1118{
338e84f3 1119 return qat_alg_aead_init(tfm, ICP_QAT_HW_AUTH_ALGO_SHA512, "sha512");
d370cec3
TS
1120}
1121
338e84f3 1122static void qat_alg_aead_exit(struct crypto_tfm *tfm)
d370cec3 1123{
338e84f3 1124 struct qat_alg_aead_ctx *ctx = crypto_tfm_ctx(tfm);
d370cec3
TS
1125 struct qat_crypto_instance *inst = ctx->inst;
1126 struct device *dev;
1127
1128 if (!IS_ERR(ctx->hash_tfm))
1129 crypto_free_shash(ctx->hash_tfm);
1130
1131 if (!inst)
1132 return;
1133
1134 dev = &GET_DEV(inst->accel_dev);
aa408d60 1135 if (ctx->enc_cd) {
ad511e26 1136 memset(ctx->enc_cd, 0, sizeof(struct qat_alg_cd));
d370cec3
TS
1137 dma_free_coherent(dev, sizeof(struct qat_alg_cd),
1138 ctx->enc_cd, ctx->enc_cd_paddr);
aa408d60
ST
1139 }
1140 if (ctx->dec_cd) {
ad511e26 1141 memset(ctx->dec_cd, 0, sizeof(struct qat_alg_cd));
d370cec3
TS
1142 dma_free_coherent(dev, sizeof(struct qat_alg_cd),
1143 ctx->dec_cd, ctx->dec_cd_paddr);
aa408d60 1144 }
d370cec3
TS
1145 qat_crypto_put_instance(inst);
1146}
1147
338e84f3
TS
1148static int qat_alg_ablkcipher_init(struct crypto_tfm *tfm)
1149{
1150 struct qat_alg_ablkcipher_ctx *ctx = crypto_tfm_ctx(tfm);
1151
338e84f3
TS
1152 spin_lock_init(&ctx->lock);
1153 tfm->crt_ablkcipher.reqsize = sizeof(struct ablkcipher_request) +
1154 sizeof(struct qat_crypto_request);
1155 ctx->tfm = tfm;
1156 return 0;
1157}
1158
1159static void qat_alg_ablkcipher_exit(struct crypto_tfm *tfm)
1160{
1161 struct qat_alg_ablkcipher_ctx *ctx = crypto_tfm_ctx(tfm);
1162 struct qat_crypto_instance *inst = ctx->inst;
1163 struct device *dev;
1164
1165 if (!inst)
1166 return;
1167
1168 dev = &GET_DEV(inst->accel_dev);
1169 if (ctx->enc_cd) {
ad511e26
HX
1170 memset(ctx->enc_cd, 0,
1171 sizeof(struct icp_qat_hw_cipher_algo_blk));
338e84f3
TS
1172 dma_free_coherent(dev,
1173 sizeof(struct icp_qat_hw_cipher_algo_blk),
1174 ctx->enc_cd, ctx->enc_cd_paddr);
1175 }
1176 if (ctx->dec_cd) {
ad511e26
HX
1177 memset(ctx->dec_cd, 0,
1178 sizeof(struct icp_qat_hw_cipher_algo_blk));
338e84f3
TS
1179 dma_free_coherent(dev,
1180 sizeof(struct icp_qat_hw_cipher_algo_blk),
1181 ctx->dec_cd, ctx->dec_cd_paddr);
1182 }
1183 qat_crypto_put_instance(inst);
1184}
1185
d370cec3
TS
1186static struct crypto_alg qat_algs[] = { {
1187 .cra_name = "authenc(hmac(sha1),cbc(aes))",
1188 .cra_driver_name = "qat_aes_cbc_hmac_sha1",
1189 .cra_priority = 4001,
1190 .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
1191 .cra_blocksize = AES_BLOCK_SIZE,
338e84f3 1192 .cra_ctxsize = sizeof(struct qat_alg_aead_ctx),
d370cec3
TS
1193 .cra_alignmask = 0,
1194 .cra_type = &crypto_aead_type,
1195 .cra_module = THIS_MODULE,
338e84f3
TS
1196 .cra_init = qat_alg_aead_sha1_init,
1197 .cra_exit = qat_alg_aead_exit,
d370cec3
TS
1198 .cra_u = {
1199 .aead = {
338e84f3
TS
1200 .setkey = qat_alg_aead_setkey,
1201 .decrypt = qat_alg_aead_dec,
1202 .encrypt = qat_alg_aead_enc,
1203 .givencrypt = qat_alg_aead_genivenc,
d370cec3
TS
1204 .ivsize = AES_BLOCK_SIZE,
1205 .maxauthsize = SHA1_DIGEST_SIZE,
1206 },
1207 },
1208}, {
1209 .cra_name = "authenc(hmac(sha256),cbc(aes))",
1210 .cra_driver_name = "qat_aes_cbc_hmac_sha256",
1211 .cra_priority = 4001,
1212 .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
1213 .cra_blocksize = AES_BLOCK_SIZE,
338e84f3 1214 .cra_ctxsize = sizeof(struct qat_alg_aead_ctx),
d370cec3
TS
1215 .cra_alignmask = 0,
1216 .cra_type = &crypto_aead_type,
1217 .cra_module = THIS_MODULE,
338e84f3
TS
1218 .cra_init = qat_alg_aead_sha256_init,
1219 .cra_exit = qat_alg_aead_exit,
d370cec3
TS
1220 .cra_u = {
1221 .aead = {
338e84f3
TS
1222 .setkey = qat_alg_aead_setkey,
1223 .decrypt = qat_alg_aead_dec,
1224 .encrypt = qat_alg_aead_enc,
1225 .givencrypt = qat_alg_aead_genivenc,
d370cec3
TS
1226 .ivsize = AES_BLOCK_SIZE,
1227 .maxauthsize = SHA256_DIGEST_SIZE,
1228 },
1229 },
1230}, {
1231 .cra_name = "authenc(hmac(sha512),cbc(aes))",
1232 .cra_driver_name = "qat_aes_cbc_hmac_sha512",
1233 .cra_priority = 4001,
1234 .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
1235 .cra_blocksize = AES_BLOCK_SIZE,
338e84f3 1236 .cra_ctxsize = sizeof(struct qat_alg_aead_ctx),
d370cec3
TS
1237 .cra_alignmask = 0,
1238 .cra_type = &crypto_aead_type,
1239 .cra_module = THIS_MODULE,
338e84f3
TS
1240 .cra_init = qat_alg_aead_sha512_init,
1241 .cra_exit = qat_alg_aead_exit,
d370cec3
TS
1242 .cra_u = {
1243 .aead = {
338e84f3
TS
1244 .setkey = qat_alg_aead_setkey,
1245 .decrypt = qat_alg_aead_dec,
1246 .encrypt = qat_alg_aead_enc,
1247 .givencrypt = qat_alg_aead_genivenc,
d370cec3
TS
1248 .ivsize = AES_BLOCK_SIZE,
1249 .maxauthsize = SHA512_DIGEST_SIZE,
1250 },
1251 },
338e84f3
TS
1252}, {
1253 .cra_name = "cbc(aes)",
1254 .cra_driver_name = "qat_aes_cbc",
1255 .cra_priority = 4001,
1256 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
1257 .cra_blocksize = AES_BLOCK_SIZE,
1258 .cra_ctxsize = sizeof(struct qat_alg_ablkcipher_ctx),
1259 .cra_alignmask = 0,
1260 .cra_type = &crypto_ablkcipher_type,
1261 .cra_module = THIS_MODULE,
1262 .cra_init = qat_alg_ablkcipher_init,
1263 .cra_exit = qat_alg_ablkcipher_exit,
1264 .cra_u = {
1265 .ablkcipher = {
1266 .setkey = qat_alg_ablkcipher_setkey,
1267 .decrypt = qat_alg_ablkcipher_decrypt,
1268 .encrypt = qat_alg_ablkcipher_encrypt,
1269 .min_keysize = AES_MIN_KEY_SIZE,
1270 .max_keysize = AES_MAX_KEY_SIZE,
1271 .ivsize = AES_BLOCK_SIZE,
1272 },
1273 },
d370cec3
TS
1274} };
1275
1276int qat_algs_register(void)
1277{
1278 if (atomic_add_return(1, &active_dev) == 1) {
1279 int i;
1280
1281 for (i = 0; i < ARRAY_SIZE(qat_algs); i++)
338e84f3
TS
1282 qat_algs[i].cra_flags =
1283 (qat_algs[i].cra_type == &crypto_aead_type) ?
1284 CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC :
1285 CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC;
1286
d370cec3
TS
1287 return crypto_register_algs(qat_algs, ARRAY_SIZE(qat_algs));
1288 }
1289 return 0;
1290}
1291
1292int qat_algs_unregister(void)
1293{
1294 if (atomic_sub_return(1, &active_dev) == 0)
1295 return crypto_unregister_algs(qat_algs, ARRAY_SIZE(qat_algs));
1296 return 0;
1297}
1298
1299int qat_algs_init(void)
1300{
1301 atomic_set(&active_dev, 0);
1302 crypto_get_default_rng();
1303 return 0;
1304}
1305
1306void qat_algs_exit(void)
1307{
1308 crypto_put_default_rng();
1309}
This page took 0.165872 seconds and 5 git commands to generate.