crypto: qat - Prevent dma mapping zero length assoc data
[deliverable/linux.git] / drivers / crypto / qat / qat_common / qat_algs.c
CommitLineData
d370cec3
TS
1/*
2 This file is provided under a dual BSD/GPLv2 license. When using or
3 redistributing this file, you may do so under either license.
4
5 GPL LICENSE SUMMARY
6 Copyright(c) 2014 Intel Corporation.
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of version 2 of the GNU General Public License as
9 published by the Free Software Foundation.
10
11 This program is distributed in the hope that it will be useful, but
12 WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 General Public License for more details.
15
16 Contact Information:
17 qat-linux@intel.com
18
19 BSD LICENSE
20 Copyright(c) 2014 Intel Corporation.
21 Redistribution and use in source and binary forms, with or without
22 modification, are permitted provided that the following conditions
23 are met:
24
25 * Redistributions of source code must retain the above copyright
26 notice, this list of conditions and the following disclaimer.
27 * Redistributions in binary form must reproduce the above copyright
28 notice, this list of conditions and the following disclaimer in
29 the documentation and/or other materials provided with the
30 distribution.
31 * Neither the name of Intel Corporation nor the names of its
32 contributors may be used to endorse or promote products derived
33 from this software without specific prior written permission.
34
35 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
36 "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
37 LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
38 A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
39 OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
40 SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
41 LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
42 DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
43 THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
44 (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
45 OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
46*/
47#include <linux/module.h>
48#include <linux/slab.h>
49#include <linux/crypto.h>
50#include <crypto/aead.h>
51#include <crypto/aes.h>
52#include <crypto/sha.h>
53#include <crypto/hash.h>
54#include <crypto/algapi.h>
55#include <crypto/authenc.h>
56#include <crypto/rng.h>
57#include <linux/dma-mapping.h>
58#include "adf_accel_devices.h"
59#include "adf_transport.h"
60#include "adf_common_drv.h"
61#include "qat_crypto.h"
62#include "icp_qat_hw.h"
63#include "icp_qat_fw.h"
64#include "icp_qat_fw_la.h"
65
66#define QAT_AES_HW_CONFIG_ENC(alg) \
67 ICP_QAT_HW_CIPHER_CONFIG_BUILD(ICP_QAT_HW_CIPHER_CBC_MODE, alg, \
68 ICP_QAT_HW_CIPHER_NO_CONVERT, \
69 ICP_QAT_HW_CIPHER_ENCRYPT)
70
71#define QAT_AES_HW_CONFIG_DEC(alg) \
72 ICP_QAT_HW_CIPHER_CONFIG_BUILD(ICP_QAT_HW_CIPHER_CBC_MODE, alg, \
73 ICP_QAT_HW_CIPHER_KEY_CONVERT, \
74 ICP_QAT_HW_CIPHER_DECRYPT)
75
76static atomic_t active_dev;
77
78struct qat_alg_buf {
79 uint32_t len;
80 uint32_t resrvd;
81 uint64_t addr;
82} __packed;
83
84struct qat_alg_buf_list {
85 uint64_t resrvd;
86 uint32_t num_bufs;
87 uint32_t num_mapped_bufs;
88 struct qat_alg_buf bufers[];
89} __packed __aligned(64);
90
91/* Common content descriptor */
92struct qat_alg_cd {
93 union {
94 struct qat_enc { /* Encrypt content desc */
95 struct icp_qat_hw_cipher_algo_blk cipher;
96 struct icp_qat_hw_auth_algo_blk hash;
97 } qat_enc_cd;
98 struct qat_dec { /* Decrytp content desc */
99 struct icp_qat_hw_auth_algo_blk hash;
100 struct icp_qat_hw_cipher_algo_blk cipher;
101 } qat_dec_cd;
102 };
103} __aligned(64);
104
105#define MAX_AUTH_STATE_SIZE sizeof(struct icp_qat_hw_auth_algo_blk)
106
107struct qat_auth_state {
26c3af6c 108 uint8_t data[MAX_AUTH_STATE_SIZE + 64];
d370cec3
TS
109} __aligned(64);
110
111struct qat_alg_session_ctx {
112 struct qat_alg_cd *enc_cd;
113 dma_addr_t enc_cd_paddr;
114 struct qat_alg_cd *dec_cd;
115 dma_addr_t dec_cd_paddr;
d370cec3
TS
116 struct icp_qat_fw_la_bulk_req enc_fw_req_tmpl;
117 struct icp_qat_fw_la_bulk_req dec_fw_req_tmpl;
118 struct qat_crypto_instance *inst;
119 struct crypto_tfm *tfm;
120 struct crypto_shash *hash_tfm;
121 enum icp_qat_hw_auth_algo qat_hash_alg;
122 uint8_t salt[AES_BLOCK_SIZE];
123 spinlock_t lock; /* protects qat_alg_session_ctx struct */
124};
125
126static int get_current_node(void)
127{
128 return cpu_data(current_thread_info()->cpu).phys_proc_id;
129}
130
131static int qat_get_inter_state_size(enum icp_qat_hw_auth_algo qat_hash_alg)
132{
133 switch (qat_hash_alg) {
134 case ICP_QAT_HW_AUTH_ALGO_SHA1:
135 return ICP_QAT_HW_SHA1_STATE1_SZ;
136 case ICP_QAT_HW_AUTH_ALGO_SHA256:
137 return ICP_QAT_HW_SHA256_STATE1_SZ;
138 case ICP_QAT_HW_AUTH_ALGO_SHA512:
139 return ICP_QAT_HW_SHA512_STATE1_SZ;
140 default:
141 return -EFAULT;
142 };
143 return -EFAULT;
144}
145
146static int qat_alg_do_precomputes(struct icp_qat_hw_auth_algo_blk *hash,
147 struct qat_alg_session_ctx *ctx,
148 const uint8_t *auth_key,
26c3af6c 149 unsigned int auth_keylen)
d370cec3 150{
26c3af6c 151 struct qat_auth_state auth_state;
37e52654 152 SHASH_DESC_ON_STACK(shash, ctx->hash_tfm);
d370cec3
TS
153 struct sha1_state sha1;
154 struct sha256_state sha256;
155 struct sha512_state sha512;
156 int block_size = crypto_shash_blocksize(ctx->hash_tfm);
157 int digest_size = crypto_shash_digestsize(ctx->hash_tfm);
26c3af6c 158 uint8_t *ipad = auth_state.data;
d370cec3
TS
159 uint8_t *opad = ipad + block_size;
160 __be32 *hash_state_out;
161 __be64 *hash512_state_out;
162 int i, offset;
163
26c3af6c 164 memset(auth_state.data, '\0', MAX_AUTH_STATE_SIZE + 64);
37e52654
BW
165 shash->tfm = ctx->hash_tfm;
166 shash->flags = 0x0;
d370cec3
TS
167
168 if (auth_keylen > block_size) {
169 char buff[SHA512_BLOCK_SIZE];
37e52654 170 int ret = crypto_shash_digest(shash, auth_key,
d370cec3
TS
171 auth_keylen, buff);
172 if (ret)
173 return ret;
174
175 memcpy(ipad, buff, digest_size);
176 memcpy(opad, buff, digest_size);
177 memset(ipad + digest_size, 0, block_size - digest_size);
178 memset(opad + digest_size, 0, block_size - digest_size);
179 } else {
180 memcpy(ipad, auth_key, auth_keylen);
181 memcpy(opad, auth_key, auth_keylen);
182 memset(ipad + auth_keylen, 0, block_size - auth_keylen);
183 memset(opad + auth_keylen, 0, block_size - auth_keylen);
184 }
185
186 for (i = 0; i < block_size; i++) {
187 char *ipad_ptr = ipad + i;
188 char *opad_ptr = opad + i;
189 *ipad_ptr ^= 0x36;
190 *opad_ptr ^= 0x5C;
191 }
192
37e52654 193 if (crypto_shash_init(shash))
d370cec3
TS
194 return -EFAULT;
195
37e52654 196 if (crypto_shash_update(shash, ipad, block_size))
d370cec3
TS
197 return -EFAULT;
198
199 hash_state_out = (__be32 *)hash->sha.state1;
200 hash512_state_out = (__be64 *)hash_state_out;
201
202 switch (ctx->qat_hash_alg) {
203 case ICP_QAT_HW_AUTH_ALGO_SHA1:
37e52654 204 if (crypto_shash_export(shash, &sha1))
d370cec3
TS
205 return -EFAULT;
206 for (i = 0; i < digest_size >> 2; i++, hash_state_out++)
207 *hash_state_out = cpu_to_be32(*(sha1.state + i));
208 break;
209 case ICP_QAT_HW_AUTH_ALGO_SHA256:
37e52654 210 if (crypto_shash_export(shash, &sha256))
d370cec3
TS
211 return -EFAULT;
212 for (i = 0; i < digest_size >> 2; i++, hash_state_out++)
213 *hash_state_out = cpu_to_be32(*(sha256.state + i));
214 break;
215 case ICP_QAT_HW_AUTH_ALGO_SHA512:
37e52654 216 if (crypto_shash_export(shash, &sha512))
d370cec3
TS
217 return -EFAULT;
218 for (i = 0; i < digest_size >> 3; i++, hash512_state_out++)
219 *hash512_state_out = cpu_to_be64(*(sha512.state + i));
220 break;
221 default:
222 return -EFAULT;
223 }
224
37e52654 225 if (crypto_shash_init(shash))
d370cec3
TS
226 return -EFAULT;
227
37e52654 228 if (crypto_shash_update(shash, opad, block_size))
d370cec3
TS
229 return -EFAULT;
230
231 offset = round_up(qat_get_inter_state_size(ctx->qat_hash_alg), 8);
232 hash_state_out = (__be32 *)(hash->sha.state1 + offset);
233 hash512_state_out = (__be64 *)hash_state_out;
234
235 switch (ctx->qat_hash_alg) {
236 case ICP_QAT_HW_AUTH_ALGO_SHA1:
37e52654 237 if (crypto_shash_export(shash, &sha1))
d370cec3
TS
238 return -EFAULT;
239 for (i = 0; i < digest_size >> 2; i++, hash_state_out++)
240 *hash_state_out = cpu_to_be32(*(sha1.state + i));
241 break;
242 case ICP_QAT_HW_AUTH_ALGO_SHA256:
37e52654 243 if (crypto_shash_export(shash, &sha256))
d370cec3
TS
244 return -EFAULT;
245 for (i = 0; i < digest_size >> 2; i++, hash_state_out++)
246 *hash_state_out = cpu_to_be32(*(sha256.state + i));
247 break;
248 case ICP_QAT_HW_AUTH_ALGO_SHA512:
37e52654 249 if (crypto_shash_export(shash, &sha512))
d370cec3
TS
250 return -EFAULT;
251 for (i = 0; i < digest_size >> 3; i++, hash512_state_out++)
252 *hash512_state_out = cpu_to_be64(*(sha512.state + i));
253 break;
254 default:
255 return -EFAULT;
256 }
257 return 0;
258}
259
260static void qat_alg_init_common_hdr(struct icp_qat_fw_comn_req_hdr *header)
261{
262 header->hdr_flags =
263 ICP_QAT_FW_COMN_HDR_FLAGS_BUILD(ICP_QAT_FW_COMN_REQ_FLAG_SET);
264 header->service_type = ICP_QAT_FW_COMN_REQ_CPM_FW_LA;
265 header->comn_req_flags =
266 ICP_QAT_FW_COMN_FLAGS_BUILD(QAT_COMN_CD_FLD_TYPE_64BIT_ADR,
267 QAT_COMN_PTR_TYPE_SGL);
268 ICP_QAT_FW_LA_DIGEST_IN_BUFFER_SET(header->serv_specif_flags,
269 ICP_QAT_FW_LA_DIGEST_IN_BUFFER);
270 ICP_QAT_FW_LA_PARTIAL_SET(header->serv_specif_flags,
271 ICP_QAT_FW_LA_PARTIAL_NONE);
272 ICP_QAT_FW_LA_CIPH_IV_FLD_FLAG_SET(header->serv_specif_flags,
273 ICP_QAT_FW_CIPH_IV_16BYTE_DATA);
274 ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags,
275 ICP_QAT_FW_LA_NO_PROTO);
276 ICP_QAT_FW_LA_UPDATE_STATE_SET(header->serv_specif_flags,
277 ICP_QAT_FW_LA_NO_UPDATE_STATE);
278}
279
280static int qat_alg_init_enc_session(struct qat_alg_session_ctx *ctx,
281 int alg, struct crypto_authenc_keys *keys)
282{
283 struct crypto_aead *aead_tfm = __crypto_aead_cast(ctx->tfm);
284 unsigned int digestsize = crypto_aead_crt(aead_tfm)->authsize;
285 struct qat_enc *enc_ctx = &ctx->enc_cd->qat_enc_cd;
286 struct icp_qat_hw_cipher_algo_blk *cipher = &enc_ctx->cipher;
287 struct icp_qat_hw_auth_algo_blk *hash =
288 (struct icp_qat_hw_auth_algo_blk *)((char *)enc_ctx +
289 sizeof(struct icp_qat_hw_auth_setup) + keys->enckeylen);
290 struct icp_qat_fw_la_bulk_req *req_tmpl = &ctx->enc_fw_req_tmpl;
291 struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req_tmpl->cd_pars;
292 struct icp_qat_fw_comn_req_hdr *header = &req_tmpl->comn_hdr;
293 void *ptr = &req_tmpl->cd_ctrl;
294 struct icp_qat_fw_cipher_cd_ctrl_hdr *cipher_cd_ctrl = ptr;
295 struct icp_qat_fw_auth_cd_ctrl_hdr *hash_cd_ctrl = ptr;
d370cec3
TS
296
297 /* CD setup */
298 cipher->aes.cipher_config.val = QAT_AES_HW_CONFIG_ENC(alg);
299 memcpy(cipher->aes.key, keys->enckey, keys->enckeylen);
300 hash->sha.inner_setup.auth_config.config =
301 ICP_QAT_HW_AUTH_CONFIG_BUILD(ICP_QAT_HW_AUTH_MODE1,
302 ctx->qat_hash_alg, digestsize);
303 hash->sha.inner_setup.auth_counter.counter =
304 cpu_to_be32(crypto_shash_blocksize(ctx->hash_tfm));
305
26c3af6c 306 if (qat_alg_do_precomputes(hash, ctx, keys->authkey, keys->authkeylen))
d370cec3
TS
307 return -EFAULT;
308
309 /* Request setup */
310 qat_alg_init_common_hdr(header);
311 header->service_cmd_id = ICP_QAT_FW_LA_CMD_CIPHER_HASH;
312 ICP_QAT_FW_LA_RET_AUTH_SET(header->serv_specif_flags,
313 ICP_QAT_FW_LA_RET_AUTH_RES);
314 ICP_QAT_FW_LA_CMP_AUTH_SET(header->serv_specif_flags,
315 ICP_QAT_FW_LA_NO_CMP_AUTH_RES);
316 cd_pars->u.s.content_desc_addr = ctx->enc_cd_paddr;
317 cd_pars->u.s.content_desc_params_sz = sizeof(struct qat_alg_cd) >> 3;
318
319 /* Cipher CD config setup */
320 cipher_cd_ctrl->cipher_key_sz = keys->enckeylen >> 3;
321 cipher_cd_ctrl->cipher_state_sz = AES_BLOCK_SIZE >> 3;
322 cipher_cd_ctrl->cipher_cfg_offset = 0;
323 ICP_QAT_FW_COMN_CURR_ID_SET(cipher_cd_ctrl, ICP_QAT_FW_SLICE_CIPHER);
324 ICP_QAT_FW_COMN_NEXT_ID_SET(cipher_cd_ctrl, ICP_QAT_FW_SLICE_AUTH);
325 /* Auth CD config setup */
326 hash_cd_ctrl->hash_cfg_offset = ((char *)hash - (char *)cipher) >> 3;
327 hash_cd_ctrl->hash_flags = ICP_QAT_FW_AUTH_HDR_FLAG_NO_NESTED;
328 hash_cd_ctrl->inner_res_sz = digestsize;
329 hash_cd_ctrl->final_sz = digestsize;
330
331 switch (ctx->qat_hash_alg) {
332 case ICP_QAT_HW_AUTH_ALGO_SHA1:
333 hash_cd_ctrl->inner_state1_sz =
334 round_up(ICP_QAT_HW_SHA1_STATE1_SZ, 8);
335 hash_cd_ctrl->inner_state2_sz =
336 round_up(ICP_QAT_HW_SHA1_STATE2_SZ, 8);
337 break;
338 case ICP_QAT_HW_AUTH_ALGO_SHA256:
339 hash_cd_ctrl->inner_state1_sz = ICP_QAT_HW_SHA256_STATE1_SZ;
340 hash_cd_ctrl->inner_state2_sz = ICP_QAT_HW_SHA256_STATE2_SZ;
341 break;
342 case ICP_QAT_HW_AUTH_ALGO_SHA512:
343 hash_cd_ctrl->inner_state1_sz = ICP_QAT_HW_SHA512_STATE1_SZ;
344 hash_cd_ctrl->inner_state2_sz = ICP_QAT_HW_SHA512_STATE2_SZ;
345 break;
346 default:
347 break;
348 }
349 hash_cd_ctrl->inner_state2_offset = hash_cd_ctrl->hash_cfg_offset +
350 ((sizeof(struct icp_qat_hw_auth_setup) +
351 round_up(hash_cd_ctrl->inner_state1_sz, 8)) >> 3);
d370cec3
TS
352 ICP_QAT_FW_COMN_CURR_ID_SET(hash_cd_ctrl, ICP_QAT_FW_SLICE_AUTH);
353 ICP_QAT_FW_COMN_NEXT_ID_SET(hash_cd_ctrl, ICP_QAT_FW_SLICE_DRAM_WR);
354 return 0;
355}
356
357static int qat_alg_init_dec_session(struct qat_alg_session_ctx *ctx,
358 int alg, struct crypto_authenc_keys *keys)
359{
360 struct crypto_aead *aead_tfm = __crypto_aead_cast(ctx->tfm);
361 unsigned int digestsize = crypto_aead_crt(aead_tfm)->authsize;
362 struct qat_dec *dec_ctx = &ctx->dec_cd->qat_dec_cd;
363 struct icp_qat_hw_auth_algo_blk *hash = &dec_ctx->hash;
364 struct icp_qat_hw_cipher_algo_blk *cipher =
365 (struct icp_qat_hw_cipher_algo_blk *)((char *)dec_ctx +
366 sizeof(struct icp_qat_hw_auth_setup) +
367 roundup(crypto_shash_digestsize(ctx->hash_tfm), 8) * 2);
368 struct icp_qat_fw_la_bulk_req *req_tmpl = &ctx->dec_fw_req_tmpl;
369 struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req_tmpl->cd_pars;
370 struct icp_qat_fw_comn_req_hdr *header = &req_tmpl->comn_hdr;
371 void *ptr = &req_tmpl->cd_ctrl;
372 struct icp_qat_fw_cipher_cd_ctrl_hdr *cipher_cd_ctrl = ptr;
373 struct icp_qat_fw_auth_cd_ctrl_hdr *hash_cd_ctrl = ptr;
374 struct icp_qat_fw_la_auth_req_params *auth_param =
375 (struct icp_qat_fw_la_auth_req_params *)
376 ((char *)&req_tmpl->serv_specif_rqpars +
377 sizeof(struct icp_qat_fw_la_cipher_req_params));
378
379 /* CD setup */
380 cipher->aes.cipher_config.val = QAT_AES_HW_CONFIG_DEC(alg);
381 memcpy(cipher->aes.key, keys->enckey, keys->enckeylen);
382 hash->sha.inner_setup.auth_config.config =
383 ICP_QAT_HW_AUTH_CONFIG_BUILD(ICP_QAT_HW_AUTH_MODE1,
384 ctx->qat_hash_alg,
385 digestsize);
386 hash->sha.inner_setup.auth_counter.counter =
387 cpu_to_be32(crypto_shash_blocksize(ctx->hash_tfm));
388
26c3af6c 389 if (qat_alg_do_precomputes(hash, ctx, keys->authkey, keys->authkeylen))
d370cec3
TS
390 return -EFAULT;
391
392 /* Request setup */
393 qat_alg_init_common_hdr(header);
394 header->service_cmd_id = ICP_QAT_FW_LA_CMD_HASH_CIPHER;
395 ICP_QAT_FW_LA_RET_AUTH_SET(header->serv_specif_flags,
396 ICP_QAT_FW_LA_NO_RET_AUTH_RES);
397 ICP_QAT_FW_LA_CMP_AUTH_SET(header->serv_specif_flags,
398 ICP_QAT_FW_LA_CMP_AUTH_RES);
399 cd_pars->u.s.content_desc_addr = ctx->dec_cd_paddr;
400 cd_pars->u.s.content_desc_params_sz = sizeof(struct qat_alg_cd) >> 3;
401
402 /* Cipher CD config setup */
403 cipher_cd_ctrl->cipher_key_sz = keys->enckeylen >> 3;
404 cipher_cd_ctrl->cipher_state_sz = AES_BLOCK_SIZE >> 3;
405 cipher_cd_ctrl->cipher_cfg_offset =
406 (sizeof(struct icp_qat_hw_auth_setup) +
407 roundup(crypto_shash_digestsize(ctx->hash_tfm), 8) * 2) >> 3;
408 ICP_QAT_FW_COMN_CURR_ID_SET(cipher_cd_ctrl, ICP_QAT_FW_SLICE_CIPHER);
409 ICP_QAT_FW_COMN_NEXT_ID_SET(cipher_cd_ctrl, ICP_QAT_FW_SLICE_DRAM_WR);
410
411 /* Auth CD config setup */
412 hash_cd_ctrl->hash_cfg_offset = 0;
413 hash_cd_ctrl->hash_flags = ICP_QAT_FW_AUTH_HDR_FLAG_NO_NESTED;
414 hash_cd_ctrl->inner_res_sz = digestsize;
415 hash_cd_ctrl->final_sz = digestsize;
416
417 switch (ctx->qat_hash_alg) {
418 case ICP_QAT_HW_AUTH_ALGO_SHA1:
419 hash_cd_ctrl->inner_state1_sz =
420 round_up(ICP_QAT_HW_SHA1_STATE1_SZ, 8);
421 hash_cd_ctrl->inner_state2_sz =
422 round_up(ICP_QAT_HW_SHA1_STATE2_SZ, 8);
423 break;
424 case ICP_QAT_HW_AUTH_ALGO_SHA256:
425 hash_cd_ctrl->inner_state1_sz = ICP_QAT_HW_SHA256_STATE1_SZ;
426 hash_cd_ctrl->inner_state2_sz = ICP_QAT_HW_SHA256_STATE2_SZ;
427 break;
428 case ICP_QAT_HW_AUTH_ALGO_SHA512:
429 hash_cd_ctrl->inner_state1_sz = ICP_QAT_HW_SHA512_STATE1_SZ;
430 hash_cd_ctrl->inner_state2_sz = ICP_QAT_HW_SHA512_STATE2_SZ;
431 break;
432 default:
433 break;
434 }
435
436 hash_cd_ctrl->inner_state2_offset = hash_cd_ctrl->hash_cfg_offset +
437 ((sizeof(struct icp_qat_hw_auth_setup) +
438 round_up(hash_cd_ctrl->inner_state1_sz, 8)) >> 3);
d370cec3
TS
439 auth_param->auth_res_sz = digestsize;
440 ICP_QAT_FW_COMN_CURR_ID_SET(hash_cd_ctrl, ICP_QAT_FW_SLICE_AUTH);
441 ICP_QAT_FW_COMN_NEXT_ID_SET(hash_cd_ctrl, ICP_QAT_FW_SLICE_CIPHER);
442 return 0;
443}
444
445static int qat_alg_init_sessions(struct qat_alg_session_ctx *ctx,
446 const uint8_t *key, unsigned int keylen)
447{
448 struct crypto_authenc_keys keys;
449 int alg;
450
451 if (crypto_rng_get_bytes(crypto_default_rng, ctx->salt, AES_BLOCK_SIZE))
452 return -EFAULT;
453
454 if (crypto_authenc_extractkeys(&keys, key, keylen))
455 goto bad_key;
456
457 switch (keys.enckeylen) {
458 case AES_KEYSIZE_128:
459 alg = ICP_QAT_HW_CIPHER_ALGO_AES128;
460 break;
461 case AES_KEYSIZE_192:
462 alg = ICP_QAT_HW_CIPHER_ALGO_AES192;
463 break;
464 case AES_KEYSIZE_256:
465 alg = ICP_QAT_HW_CIPHER_ALGO_AES256;
466 break;
467 default:
468 goto bad_key;
469 break;
470 }
471
472 if (qat_alg_init_enc_session(ctx, alg, &keys))
473 goto error;
474
475 if (qat_alg_init_dec_session(ctx, alg, &keys))
476 goto error;
477
478 return 0;
479bad_key:
480 crypto_tfm_set_flags(ctx->tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
481 return -EINVAL;
482error:
483 return -EFAULT;
484}
485
486static int qat_alg_setkey(struct crypto_aead *tfm, const uint8_t *key,
487 unsigned int keylen)
488{
489 struct qat_alg_session_ctx *ctx = crypto_aead_ctx(tfm);
490 struct device *dev;
491
492 spin_lock(&ctx->lock);
493 if (ctx->enc_cd) {
494 /* rekeying */
495 dev = &GET_DEV(ctx->inst->accel_dev);
496 memset(ctx->enc_cd, 0, sizeof(struct qat_alg_cd));
497 memset(ctx->dec_cd, 0, sizeof(struct qat_alg_cd));
d370cec3
TS
498 memset(&ctx->enc_fw_req_tmpl, 0,
499 sizeof(struct icp_qat_fw_la_bulk_req));
500 memset(&ctx->dec_fw_req_tmpl, 0,
501 sizeof(struct icp_qat_fw_la_bulk_req));
502 } else {
503 /* new key */
504 int node = get_current_node();
505 struct qat_crypto_instance *inst =
506 qat_crypto_get_instance_node(node);
507 if (!inst) {
508 spin_unlock(&ctx->lock);
509 return -EINVAL;
510 }
511
512 dev = &GET_DEV(inst->accel_dev);
513 ctx->inst = inst;
514 ctx->enc_cd = dma_zalloc_coherent(dev,
515 sizeof(struct qat_alg_cd),
516 &ctx->enc_cd_paddr,
517 GFP_ATOMIC);
518 if (!ctx->enc_cd) {
519 spin_unlock(&ctx->lock);
520 return -ENOMEM;
521 }
522 ctx->dec_cd = dma_zalloc_coherent(dev,
523 sizeof(struct qat_alg_cd),
524 &ctx->dec_cd_paddr,
525 GFP_ATOMIC);
526 if (!ctx->dec_cd) {
527 spin_unlock(&ctx->lock);
528 goto out_free_enc;
529 }
d370cec3
TS
530 }
531 spin_unlock(&ctx->lock);
532 if (qat_alg_init_sessions(ctx, key, keylen))
533 goto out_free_all;
534
535 return 0;
536
537out_free_all:
d370cec3
TS
538 dma_free_coherent(dev, sizeof(struct qat_alg_cd),
539 ctx->dec_cd, ctx->dec_cd_paddr);
540 ctx->dec_cd = NULL;
541out_free_enc:
542 dma_free_coherent(dev, sizeof(struct qat_alg_cd),
543 ctx->enc_cd, ctx->enc_cd_paddr);
544 ctx->enc_cd = NULL;
545 return -ENOMEM;
546}
547
548static void qat_alg_free_bufl(struct qat_crypto_instance *inst,
549 struct qat_crypto_request *qat_req)
550{
551 struct device *dev = &GET_DEV(inst->accel_dev);
552 struct qat_alg_buf_list *bl = qat_req->buf.bl;
553 struct qat_alg_buf_list *blout = qat_req->buf.blout;
554 dma_addr_t blp = qat_req->buf.blp;
555 dma_addr_t blpout = qat_req->buf.bloutp;
556 size_t sz = qat_req->buf.sz;
557 int i, bufs = bl->num_bufs;
558
559 for (i = 0; i < bl->num_bufs; i++)
560 dma_unmap_single(dev, bl->bufers[i].addr,
561 bl->bufers[i].len, DMA_BIDIRECTIONAL);
562
563 dma_unmap_single(dev, blp, sz, DMA_TO_DEVICE);
564 kfree(bl);
565 if (blp != blpout) {
566 /* If out of place operation dma unmap only data */
567 int bufless = bufs - blout->num_mapped_bufs;
d65071ec 568
d370cec3
TS
569 for (i = bufless; i < bufs; i++) {
570 dma_unmap_single(dev, blout->bufers[i].addr,
571 blout->bufers[i].len,
572 DMA_BIDIRECTIONAL);
573 }
574 dma_unmap_single(dev, blpout, sz, DMA_TO_DEVICE);
575 kfree(blout);
576 }
577}
578
579static int qat_alg_sgl_to_bufl(struct qat_crypto_instance *inst,
580 struct scatterlist *assoc,
581 struct scatterlist *sgl,
582 struct scatterlist *sglout, uint8_t *iv,
583 uint8_t ivlen,
584 struct qat_crypto_request *qat_req)
585{
586 struct device *dev = &GET_DEV(inst->accel_dev);
587 int i, bufs = 0, n = sg_nents(sgl), assoc_n = sg_nents(assoc);
588 struct qat_alg_buf_list *bufl;
589 struct qat_alg_buf_list *buflout = NULL;
590 dma_addr_t blp;
591 dma_addr_t bloutp = 0;
592 struct scatterlist *sg;
593 size_t sz = sizeof(struct qat_alg_buf_list) +
594 ((1 + n + assoc_n) * sizeof(struct qat_alg_buf));
595
596 if (unlikely(!n))
597 return -EINVAL;
598
599 bufl = kmalloc_node(sz, GFP_ATOMIC, inst->accel_dev->numa_node);
600 if (unlikely(!bufl))
601 return -ENOMEM;
602
603 blp = dma_map_single(dev, bufl, sz, DMA_TO_DEVICE);
604 if (unlikely(dma_mapping_error(dev, blp)))
605 goto err;
606
607 for_each_sg(assoc, sg, assoc_n, i) {
923a6e5e
TS
608 if (!sg->length)
609 continue;
d370cec3
TS
610 bufl->bufers[bufs].addr = dma_map_single(dev,
611 sg_virt(sg),
612 sg->length,
613 DMA_BIDIRECTIONAL);
614 bufl->bufers[bufs].len = sg->length;
615 if (unlikely(dma_mapping_error(dev, bufl->bufers[bufs].addr)))
616 goto err;
617 bufs++;
618 }
619 bufl->bufers[bufs].addr = dma_map_single(dev, iv, ivlen,
620 DMA_BIDIRECTIONAL);
621 bufl->bufers[bufs].len = ivlen;
622 if (unlikely(dma_mapping_error(dev, bufl->bufers[bufs].addr)))
623 goto err;
624 bufs++;
625
626 for_each_sg(sgl, sg, n, i) {
627 int y = i + bufs;
d65071ec 628
d370cec3
TS
629 bufl->bufers[y].addr = dma_map_single(dev, sg_virt(sg),
630 sg->length,
631 DMA_BIDIRECTIONAL);
632 bufl->bufers[y].len = sg->length;
633 if (unlikely(dma_mapping_error(dev, bufl->bufers[y].addr)))
634 goto err;
635 }
636 bufl->num_bufs = n + bufs;
637 qat_req->buf.bl = bufl;
638 qat_req->buf.blp = blp;
639 qat_req->buf.sz = sz;
640 /* Handle out of place operation */
641 if (sgl != sglout) {
642 struct qat_alg_buf *bufers;
643
644 buflout = kmalloc_node(sz, GFP_ATOMIC,
645 inst->accel_dev->numa_node);
646 if (unlikely(!buflout))
647 goto err;
648 bloutp = dma_map_single(dev, buflout, sz, DMA_TO_DEVICE);
649 if (unlikely(dma_mapping_error(dev, bloutp)))
650 goto err;
651 bufers = buflout->bufers;
652 /* For out of place operation dma map only data and
653 * reuse assoc mapping and iv */
654 for (i = 0; i < bufs; i++) {
655 bufers[i].len = bufl->bufers[i].len;
656 bufers[i].addr = bufl->bufers[i].addr;
657 }
658 for_each_sg(sglout, sg, n, i) {
659 int y = i + bufs;
d65071ec 660
d370cec3
TS
661 bufers[y].addr = dma_map_single(dev, sg_virt(sg),
662 sg->length,
663 DMA_BIDIRECTIONAL);
664 buflout->bufers[y].len = sg->length;
665 if (unlikely(dma_mapping_error(dev, bufers[y].addr)))
666 goto err;
667 }
668 buflout->num_bufs = n + bufs;
669 buflout->num_mapped_bufs = n;
670 qat_req->buf.blout = buflout;
671 qat_req->buf.bloutp = bloutp;
672 } else {
673 /* Otherwise set the src and dst to the same address */
674 qat_req->buf.bloutp = qat_req->buf.blp;
675 }
676 return 0;
677err:
678 dev_err(dev, "Failed to map buf for dma\n");
679 for_each_sg(sgl, sg, n + bufs, i) {
680 if (!dma_mapping_error(dev, bufl->bufers[i].addr)) {
681 dma_unmap_single(dev, bufl->bufers[i].addr,
682 bufl->bufers[i].len,
683 DMA_BIDIRECTIONAL);
684 }
685 }
686 if (!dma_mapping_error(dev, blp))
687 dma_unmap_single(dev, blp, sz, DMA_TO_DEVICE);
688 kfree(bufl);
689 if (sgl != sglout && buflout) {
690 for_each_sg(sglout, sg, n, i) {
691 int y = i + bufs;
d65071ec 692
d370cec3
TS
693 if (!dma_mapping_error(dev, buflout->bufers[y].addr))
694 dma_unmap_single(dev, buflout->bufers[y].addr,
695 buflout->bufers[y].len,
696 DMA_BIDIRECTIONAL);
697 }
698 if (!dma_mapping_error(dev, bloutp))
699 dma_unmap_single(dev, bloutp, sz, DMA_TO_DEVICE);
700 kfree(buflout);
701 }
702 return -ENOMEM;
703}
704
705void qat_alg_callback(void *resp)
706{
707 struct icp_qat_fw_la_resp *qat_resp = resp;
708 struct qat_crypto_request *qat_req =
bce3cc61 709 (void *)(__force long)qat_resp->opaque_data;
d370cec3
TS
710 struct qat_alg_session_ctx *ctx = qat_req->ctx;
711 struct qat_crypto_instance *inst = ctx->inst;
712 struct aead_request *areq = qat_req->areq;
713 uint8_t stat_filed = qat_resp->comn_resp.comn_status;
714 int res = 0, qat_res = ICP_QAT_FW_COMN_RESP_CRYPTO_STAT_GET(stat_filed);
715
716 qat_alg_free_bufl(inst, qat_req);
717 if (unlikely(qat_res != ICP_QAT_FW_COMN_STATUS_FLAG_OK))
718 res = -EBADMSG;
45cff260 719 areq->base.complete(&areq->base, res);
d370cec3
TS
720}
721
722static int qat_alg_dec(struct aead_request *areq)
723{
724 struct crypto_aead *aead_tfm = crypto_aead_reqtfm(areq);
725 struct crypto_tfm *tfm = crypto_aead_tfm(aead_tfm);
726 struct qat_alg_session_ctx *ctx = crypto_tfm_ctx(tfm);
727 struct qat_crypto_request *qat_req = aead_request_ctx(areq);
728 struct icp_qat_fw_la_cipher_req_params *cipher_param;
729 struct icp_qat_fw_la_auth_req_params *auth_param;
730 struct icp_qat_fw_la_bulk_req *msg;
731 int digst_size = crypto_aead_crt(aead_tfm)->authsize;
732 int ret, ctr = 0;
733
734 ret = qat_alg_sgl_to_bufl(ctx->inst, areq->assoc, areq->src, areq->dst,
735 areq->iv, AES_BLOCK_SIZE, qat_req);
736 if (unlikely(ret))
737 return ret;
738
739 msg = &qat_req->req;
740 *msg = ctx->dec_fw_req_tmpl;
741 qat_req->ctx = ctx;
742 qat_req->areq = areq;
bce3cc61 743 qat_req->req.comn_mid.opaque_data = (uint64_t)(__force long)qat_req;
d370cec3
TS
744 qat_req->req.comn_mid.src_data_addr = qat_req->buf.blp;
745 qat_req->req.comn_mid.dest_data_addr = qat_req->buf.bloutp;
746 cipher_param = (void *)&qat_req->req.serv_specif_rqpars;
747 cipher_param->cipher_length = areq->cryptlen - digst_size;
748 cipher_param->cipher_offset = areq->assoclen + AES_BLOCK_SIZE;
749 memcpy(cipher_param->u.cipher_IV_array, areq->iv, AES_BLOCK_SIZE);
750 auth_param = (void *)((uint8_t *)cipher_param + sizeof(*cipher_param));
751 auth_param->auth_off = 0;
752 auth_param->auth_len = areq->assoclen +
753 cipher_param->cipher_length + AES_BLOCK_SIZE;
754 do {
755 ret = adf_send_message(ctx->inst->sym_tx, (uint32_t *)msg);
756 } while (ret == -EAGAIN && ctr++ < 10);
757
758 if (ret == -EAGAIN) {
759 qat_alg_free_bufl(ctx->inst, qat_req);
760 return -EBUSY;
761 }
762 return -EINPROGRESS;
763}
764
765static int qat_alg_enc_internal(struct aead_request *areq, uint8_t *iv,
766 int enc_iv)
767{
768 struct crypto_aead *aead_tfm = crypto_aead_reqtfm(areq);
769 struct crypto_tfm *tfm = crypto_aead_tfm(aead_tfm);
770 struct qat_alg_session_ctx *ctx = crypto_tfm_ctx(tfm);
771 struct qat_crypto_request *qat_req = aead_request_ctx(areq);
772 struct icp_qat_fw_la_cipher_req_params *cipher_param;
773 struct icp_qat_fw_la_auth_req_params *auth_param;
774 struct icp_qat_fw_la_bulk_req *msg;
775 int ret, ctr = 0;
776
777 ret = qat_alg_sgl_to_bufl(ctx->inst, areq->assoc, areq->src, areq->dst,
778 iv, AES_BLOCK_SIZE, qat_req);
779 if (unlikely(ret))
780 return ret;
781
782 msg = &qat_req->req;
783 *msg = ctx->enc_fw_req_tmpl;
784 qat_req->ctx = ctx;
785 qat_req->areq = areq;
bce3cc61 786 qat_req->req.comn_mid.opaque_data = (uint64_t)(__force long)qat_req;
d370cec3
TS
787 qat_req->req.comn_mid.src_data_addr = qat_req->buf.blp;
788 qat_req->req.comn_mid.dest_data_addr = qat_req->buf.bloutp;
789 cipher_param = (void *)&qat_req->req.serv_specif_rqpars;
790 auth_param = (void *)((uint8_t *)cipher_param + sizeof(*cipher_param));
791
792 if (enc_iv) {
793 cipher_param->cipher_length = areq->cryptlen + AES_BLOCK_SIZE;
794 cipher_param->cipher_offset = areq->assoclen;
795 } else {
796 memcpy(cipher_param->u.cipher_IV_array, iv, AES_BLOCK_SIZE);
797 cipher_param->cipher_length = areq->cryptlen;
798 cipher_param->cipher_offset = areq->assoclen + AES_BLOCK_SIZE;
799 }
800 auth_param->auth_off = 0;
801 auth_param->auth_len = areq->assoclen + areq->cryptlen + AES_BLOCK_SIZE;
802
803 do {
804 ret = adf_send_message(ctx->inst->sym_tx, (uint32_t *)msg);
805 } while (ret == -EAGAIN && ctr++ < 10);
806
807 if (ret == -EAGAIN) {
808 qat_alg_free_bufl(ctx->inst, qat_req);
809 return -EBUSY;
810 }
811 return -EINPROGRESS;
812}
813
814static int qat_alg_enc(struct aead_request *areq)
815{
816 return qat_alg_enc_internal(areq, areq->iv, 0);
817}
818
819static int qat_alg_genivenc(struct aead_givcrypt_request *req)
820{
821 struct crypto_aead *aead_tfm = crypto_aead_reqtfm(&req->areq);
822 struct crypto_tfm *tfm = crypto_aead_tfm(aead_tfm);
823 struct qat_alg_session_ctx *ctx = crypto_tfm_ctx(tfm);
824 __be64 seq;
825
826 memcpy(req->giv, ctx->salt, AES_BLOCK_SIZE);
827 seq = cpu_to_be64(req->seq);
828 memcpy(req->giv + AES_BLOCK_SIZE - sizeof(uint64_t),
829 &seq, sizeof(uint64_t));
830 return qat_alg_enc_internal(&req->areq, req->giv, 1);
831}
832
833static int qat_alg_init(struct crypto_tfm *tfm,
834 enum icp_qat_hw_auth_algo hash, const char *hash_name)
835{
836 struct qat_alg_session_ctx *ctx = crypto_tfm_ctx(tfm);
837
838 memset(ctx, '\0', sizeof(*ctx));
839 ctx->hash_tfm = crypto_alloc_shash(hash_name, 0, 0);
840 if (IS_ERR(ctx->hash_tfm))
841 return -EFAULT;
842 spin_lock_init(&ctx->lock);
843 ctx->qat_hash_alg = hash;
844 tfm->crt_aead.reqsize = sizeof(struct aead_request) +
845 sizeof(struct qat_crypto_request);
846 ctx->tfm = tfm;
847 return 0;
848}
849
850static int qat_alg_sha1_init(struct crypto_tfm *tfm)
851{
852 return qat_alg_init(tfm, ICP_QAT_HW_AUTH_ALGO_SHA1, "sha1");
853}
854
855static int qat_alg_sha256_init(struct crypto_tfm *tfm)
856{
857 return qat_alg_init(tfm, ICP_QAT_HW_AUTH_ALGO_SHA256, "sha256");
858}
859
860static int qat_alg_sha512_init(struct crypto_tfm *tfm)
861{
862 return qat_alg_init(tfm, ICP_QAT_HW_AUTH_ALGO_SHA512, "sha512");
863}
864
865static void qat_alg_exit(struct crypto_tfm *tfm)
866{
867 struct qat_alg_session_ctx *ctx = crypto_tfm_ctx(tfm);
868 struct qat_crypto_instance *inst = ctx->inst;
869 struct device *dev;
870
871 if (!IS_ERR(ctx->hash_tfm))
872 crypto_free_shash(ctx->hash_tfm);
873
874 if (!inst)
875 return;
876
877 dev = &GET_DEV(inst->accel_dev);
878 if (ctx->enc_cd)
879 dma_free_coherent(dev, sizeof(struct qat_alg_cd),
880 ctx->enc_cd, ctx->enc_cd_paddr);
881 if (ctx->dec_cd)
882 dma_free_coherent(dev, sizeof(struct qat_alg_cd),
883 ctx->dec_cd, ctx->dec_cd_paddr);
d370cec3
TS
884 qat_crypto_put_instance(inst);
885}
886
887static struct crypto_alg qat_algs[] = { {
888 .cra_name = "authenc(hmac(sha1),cbc(aes))",
889 .cra_driver_name = "qat_aes_cbc_hmac_sha1",
890 .cra_priority = 4001,
891 .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
892 .cra_blocksize = AES_BLOCK_SIZE,
893 .cra_ctxsize = sizeof(struct qat_alg_session_ctx),
894 .cra_alignmask = 0,
895 .cra_type = &crypto_aead_type,
896 .cra_module = THIS_MODULE,
897 .cra_init = qat_alg_sha1_init,
898 .cra_exit = qat_alg_exit,
899 .cra_u = {
900 .aead = {
901 .setkey = qat_alg_setkey,
902 .decrypt = qat_alg_dec,
903 .encrypt = qat_alg_enc,
904 .givencrypt = qat_alg_genivenc,
905 .ivsize = AES_BLOCK_SIZE,
906 .maxauthsize = SHA1_DIGEST_SIZE,
907 },
908 },
909}, {
910 .cra_name = "authenc(hmac(sha256),cbc(aes))",
911 .cra_driver_name = "qat_aes_cbc_hmac_sha256",
912 .cra_priority = 4001,
913 .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
914 .cra_blocksize = AES_BLOCK_SIZE,
915 .cra_ctxsize = sizeof(struct qat_alg_session_ctx),
916 .cra_alignmask = 0,
917 .cra_type = &crypto_aead_type,
918 .cra_module = THIS_MODULE,
919 .cra_init = qat_alg_sha256_init,
920 .cra_exit = qat_alg_exit,
921 .cra_u = {
922 .aead = {
923 .setkey = qat_alg_setkey,
924 .decrypt = qat_alg_dec,
925 .encrypt = qat_alg_enc,
926 .givencrypt = qat_alg_genivenc,
927 .ivsize = AES_BLOCK_SIZE,
928 .maxauthsize = SHA256_DIGEST_SIZE,
929 },
930 },
931}, {
932 .cra_name = "authenc(hmac(sha512),cbc(aes))",
933 .cra_driver_name = "qat_aes_cbc_hmac_sha512",
934 .cra_priority = 4001,
935 .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
936 .cra_blocksize = AES_BLOCK_SIZE,
937 .cra_ctxsize = sizeof(struct qat_alg_session_ctx),
938 .cra_alignmask = 0,
939 .cra_type = &crypto_aead_type,
940 .cra_module = THIS_MODULE,
941 .cra_init = qat_alg_sha512_init,
942 .cra_exit = qat_alg_exit,
943 .cra_u = {
944 .aead = {
945 .setkey = qat_alg_setkey,
946 .decrypt = qat_alg_dec,
947 .encrypt = qat_alg_enc,
948 .givencrypt = qat_alg_genivenc,
949 .ivsize = AES_BLOCK_SIZE,
950 .maxauthsize = SHA512_DIGEST_SIZE,
951 },
952 },
953} };
954
955int qat_algs_register(void)
956{
957 if (atomic_add_return(1, &active_dev) == 1) {
958 int i;
959
960 for (i = 0; i < ARRAY_SIZE(qat_algs); i++)
961 qat_algs[i].cra_flags = CRYPTO_ALG_TYPE_AEAD |
962 CRYPTO_ALG_ASYNC;
963 return crypto_register_algs(qat_algs, ARRAY_SIZE(qat_algs));
964 }
965 return 0;
966}
967
968int qat_algs_unregister(void)
969{
970 if (atomic_sub_return(1, &active_dev) == 0)
971 return crypto_unregister_algs(qat_algs, ARRAY_SIZE(qat_algs));
972 return 0;
973}
974
975int qat_algs_init(void)
976{
977 atomic_set(&active_dev, 0);
978 crypto_get_default_rng();
979 return 0;
980}
981
982void qat_algs_exit(void)
983{
984 crypto_put_default_rng();
985}
This page took 0.086749 seconds and 5 git commands to generate.