crypto: qat - Add support for RSA algorithm
[deliverable/linux.git] / drivers / crypto / qat / qat_common / qat_asym_algs.c
1 /*
2 This file is provided under a dual BSD/GPLv2 license. When using or
3 redistributing this file, you may do so under either license.
4
5 GPL LICENSE SUMMARY
6 Copyright(c) 2014 Intel Corporation.
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of version 2 of the GNU General Public License as
9 published by the Free Software Foundation.
10
11 This program is distributed in the hope that it will be useful, but
12 WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 General Public License for more details.
15
16 Contact Information:
17 qat-linux@intel.com
18
19 BSD LICENSE
20 Copyright(c) 2014 Intel Corporation.
21 Redistribution and use in source and binary forms, with or without
22 modification, are permitted provided that the following conditions
23 are met:
24
25 * Redistributions of source code must retain the above copyright
26 notice, this list of conditions and the following disclaimer.
27 * Redistributions in binary form must reproduce the above copyright
28 notice, this list of conditions and the following disclaimer in
29 the documentation and/or other materials provided with the
30 distribution.
31 * Neither the name of Intel Corporation nor the names of its
32 contributors may be used to endorse or promote products derived
33 from this software without specific prior written permission.
34
35 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
36 "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
37 LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
38 A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
39 OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
40 SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
41 LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
42 DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
43 THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
44 (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
45 OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
46 */
47
48 #include <linux/module.h>
49 #include <crypto/internal/rsa.h>
50 #include <crypto/internal/akcipher.h>
51 #include <crypto/akcipher.h>
52 #include <linux/dma-mapping.h>
53 #include <linux/fips.h>
54 #include "qat_rsakey-asn1.h"
55 #include "icp_qat_fw_pke.h"
56 #include "adf_accel_devices.h"
57 #include "adf_transport.h"
58 #include "adf_common_drv.h"
59 #include "qat_crypto.h"
60
61 struct qat_rsa_input_params {
62 union {
63 struct {
64 dma_addr_t m;
65 dma_addr_t e;
66 dma_addr_t n;
67 } enc;
68 struct {
69 dma_addr_t c;
70 dma_addr_t d;
71 dma_addr_t n;
72 } dec;
73 u64 in_tab[8];
74 };
75 } __packed __aligned(64);
76
77 struct qat_rsa_output_params {
78 union {
79 struct {
80 dma_addr_t c;
81 } enc;
82 struct {
83 dma_addr_t m;
84 } dec;
85 u64 out_tab[8];
86 };
87 } __packed __aligned(64);
88
89 struct qat_rsa_ctx {
90 char *n;
91 char *e;
92 char *d;
93 dma_addr_t dma_n;
94 dma_addr_t dma_e;
95 dma_addr_t dma_d;
96 unsigned int key_sz;
97 struct qat_crypto_instance *inst;
98 } __packed __aligned(64);
99
100 struct qat_rsa_request {
101 struct qat_rsa_input_params in;
102 struct qat_rsa_output_params out;
103 dma_addr_t phy_in;
104 dma_addr_t phy_out;
105 char *src_align;
106 struct icp_qat_fw_pke_request req;
107 struct qat_rsa_ctx *ctx;
108 int err;
109 } __aligned(64);
110
111 static void qat_rsa_cb(struct icp_qat_fw_pke_resp *resp)
112 {
113 struct akcipher_request *areq = (void *)(__force long)resp->opaque;
114 struct qat_rsa_request *req = PTR_ALIGN(akcipher_request_ctx(areq), 64);
115 struct device *dev = &GET_DEV(req->ctx->inst->accel_dev);
116 int err = ICP_QAT_FW_PKE_RESP_PKE_STAT_GET(
117 resp->pke_resp_hdr.comn_resp_flags);
118 char *ptr = areq->dst;
119
120 err = (err == ICP_QAT_FW_COMN_STATUS_FLAG_OK) ? 0 : -EINVAL;
121
122 if (req->src_align)
123 dma_free_coherent(dev, req->ctx->key_sz, req->src_align,
124 req->in.enc.m);
125 else
126 dma_unmap_single(dev, req->in.enc.m, req->ctx->key_sz,
127 DMA_TO_DEVICE);
128
129 dma_unmap_single(dev, req->out.enc.c, req->ctx->key_sz,
130 DMA_FROM_DEVICE);
131 dma_unmap_single(dev, req->phy_in, sizeof(struct qat_rsa_input_params),
132 DMA_TO_DEVICE);
133 dma_unmap_single(dev, req->phy_out,
134 sizeof(struct qat_rsa_output_params),
135 DMA_TO_DEVICE);
136
137 areq->dst_len = req->ctx->key_sz;
138 /* Need to set the corect length of the output */
139 while (!(*ptr) && areq->dst_len) {
140 areq->dst_len--;
141 ptr++;
142 }
143
144 if (areq->dst_len != req->ctx->key_sz)
145 memcpy(areq->dst, ptr, areq->dst_len);
146
147 akcipher_request_complete(areq, err);
148 }
149
150 void qat_alg_asym_callback(void *_resp)
151 {
152 struct icp_qat_fw_pke_resp *resp = _resp;
153
154 qat_rsa_cb(resp);
155 }
156
157 #define PKE_RSA_EP_512 0x1c161b21
158 #define PKE_RSA_EP_1024 0x35111bf7
159 #define PKE_RSA_EP_1536 0x4d111cdc
160 #define PKE_RSA_EP_2048 0x6e111dba
161 #define PKE_RSA_EP_3072 0x7d111ea3
162 #define PKE_RSA_EP_4096 0xa5101f7e
163
164 static unsigned long qat_rsa_enc_fn_id(unsigned int len)
165 {
166 unsigned int bitslen = len << 3;
167
168 switch (bitslen) {
169 case 512:
170 return PKE_RSA_EP_512;
171 case 1024:
172 return PKE_RSA_EP_1024;
173 case 1536:
174 return PKE_RSA_EP_1536;
175 case 2048:
176 return PKE_RSA_EP_2048;
177 case 3072:
178 return PKE_RSA_EP_3072;
179 case 4096:
180 return PKE_RSA_EP_4096;
181 default:
182 return 0;
183 };
184 }
185
186 #define PKE_RSA_DP1_512 0x1c161b3c
187 #define PKE_RSA_DP1_1024 0x35111c12
188 #define PKE_RSA_DP1_1536 0x4d111cf7
189 #define PKE_RSA_DP1_2048 0x6e111dda
190 #define PKE_RSA_DP1_3072 0x7d111ebe
191 #define PKE_RSA_DP1_4096 0xa5101f98
192
193 static unsigned long qat_rsa_dec_fn_id(unsigned int len)
194 {
195 unsigned int bitslen = len << 3;
196
197 switch (bitslen) {
198 case 512:
199 return PKE_RSA_DP1_512;
200 case 1024:
201 return PKE_RSA_DP1_1024;
202 case 1536:
203 return PKE_RSA_DP1_1536;
204 case 2048:
205 return PKE_RSA_DP1_2048;
206 case 3072:
207 return PKE_RSA_DP1_3072;
208 case 4096:
209 return PKE_RSA_DP1_4096;
210 default:
211 return 0;
212 };
213 }
214
215 static int qat_rsa_enc(struct akcipher_request *req)
216 {
217 struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
218 struct qat_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
219 struct qat_crypto_instance *inst = ctx->inst;
220 struct device *dev = &GET_DEV(inst->accel_dev);
221 struct qat_rsa_request *qat_req =
222 PTR_ALIGN(akcipher_request_ctx(req), 64);
223 struct icp_qat_fw_pke_request *msg = &qat_req->req;
224 int ret, ctr = 0;
225
226 if (unlikely(!ctx->n || !ctx->e))
227 return -EINVAL;
228
229 if (req->dst_len < ctx->key_sz) {
230 req->dst_len = ctx->key_sz;
231 return -EOVERFLOW;
232 }
233 memset(msg, '\0', sizeof(*msg));
234 ICP_QAT_FW_PKE_HDR_VALID_FLAG_SET(msg->pke_hdr,
235 ICP_QAT_FW_COMN_REQ_FLAG_SET);
236 msg->pke_hdr.cd_pars.func_id = qat_rsa_enc_fn_id(ctx->key_sz);
237 if (unlikely(!msg->pke_hdr.cd_pars.func_id))
238 return -EINVAL;
239
240 qat_req->ctx = ctx;
241 msg->pke_hdr.service_type = ICP_QAT_FW_COMN_REQ_CPM_FW_PKE;
242 msg->pke_hdr.comn_req_flags =
243 ICP_QAT_FW_COMN_FLAGS_BUILD(QAT_COMN_PTR_TYPE_FLAT,
244 QAT_COMN_CD_FLD_TYPE_64BIT_ADR);
245
246 qat_req->in.enc.e = ctx->dma_e;
247 qat_req->in.enc.n = ctx->dma_n;
248 ret = -ENOMEM;
249
250 /*
251 * src can be of any size in valid range, but HW expects it to be the
252 * same as modulo n so in case it is different we need to allocate a
253 * new buf and copy src data.
254 * In other case we just need to map the user provided buffer.
255 */
256 if (req->src_len < ctx->key_sz) {
257 int shift = ctx->key_sz - req->src_len;
258
259 qat_req->src_align = dma_zalloc_coherent(dev, ctx->key_sz,
260 &qat_req->in.enc.m,
261 GFP_KERNEL);
262 if (unlikely(!qat_req->src_align))
263 return ret;
264
265 memcpy(qat_req->src_align + shift, req->src, req->src_len);
266 } else {
267 qat_req->src_align = NULL;
268 qat_req->in.enc.m = dma_map_single(dev, req->src, req->src_len,
269 DMA_TO_DEVICE);
270 }
271 qat_req->in.in_tab[3] = 0;
272 qat_req->out.enc.c = dma_map_single(dev, req->dst, req->dst_len,
273 DMA_FROM_DEVICE);
274 qat_req->out.out_tab[1] = 0;
275 qat_req->phy_in = dma_map_single(dev, &qat_req->in.enc.m,
276 sizeof(struct qat_rsa_input_params),
277 DMA_TO_DEVICE);
278 qat_req->phy_out = dma_map_single(dev, &qat_req->out.enc.c,
279 sizeof(struct qat_rsa_output_params),
280 DMA_TO_DEVICE);
281
282 if (unlikely((!qat_req->src_align &&
283 dma_mapping_error(dev, qat_req->in.enc.m)) ||
284 dma_mapping_error(dev, qat_req->out.enc.c) ||
285 dma_mapping_error(dev, qat_req->phy_in) ||
286 dma_mapping_error(dev, qat_req->phy_out)))
287 goto unmap;
288
289 msg->pke_mid.src_data_addr = qat_req->phy_in;
290 msg->pke_mid.dest_data_addr = qat_req->phy_out;
291 msg->pke_mid.opaque = (uint64_t)(__force long)req;
292 msg->input_param_count = 3;
293 msg->output_param_count = 1;
294 do {
295 ret = adf_send_message(ctx->inst->pke_tx, (uint32_t *)msg);
296 } while (ret == -EBUSY && ctr++ < 100);
297
298 if (!ret)
299 return -EINPROGRESS;
300 unmap:
301 if (qat_req->src_align)
302 dma_free_coherent(dev, ctx->key_sz, qat_req->src_align,
303 qat_req->in.enc.m);
304 else
305 if (!dma_mapping_error(dev, qat_req->in.enc.m))
306 dma_unmap_single(dev, qat_req->in.enc.m, ctx->key_sz,
307 DMA_TO_DEVICE);
308 if (!dma_mapping_error(dev, qat_req->out.enc.c))
309 dma_unmap_single(dev, qat_req->out.enc.c, ctx->key_sz,
310 DMA_FROM_DEVICE);
311 if (!dma_mapping_error(dev, qat_req->phy_in))
312 dma_unmap_single(dev, qat_req->phy_in,
313 sizeof(struct qat_rsa_input_params),
314 DMA_TO_DEVICE);
315 if (!dma_mapping_error(dev, qat_req->phy_out))
316 dma_unmap_single(dev, qat_req->phy_out,
317 sizeof(struct qat_rsa_output_params),
318 DMA_TO_DEVICE);
319 return ret;
320 }
321
322 static int qat_rsa_dec(struct akcipher_request *req)
323 {
324 struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
325 struct qat_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
326 struct qat_crypto_instance *inst = ctx->inst;
327 struct device *dev = &GET_DEV(inst->accel_dev);
328 struct qat_rsa_request *qat_req =
329 PTR_ALIGN(akcipher_request_ctx(req), 64);
330 struct icp_qat_fw_pke_request *msg = &qat_req->req;
331 int ret, ctr = 0;
332
333 if (unlikely(!ctx->n || !ctx->d))
334 return -EINVAL;
335
336 if (req->dst_len < ctx->key_sz) {
337 req->dst_len = ctx->key_sz;
338 return -EOVERFLOW;
339 }
340 memset(msg, '\0', sizeof(*msg));
341 ICP_QAT_FW_PKE_HDR_VALID_FLAG_SET(msg->pke_hdr,
342 ICP_QAT_FW_COMN_REQ_FLAG_SET);
343 msg->pke_hdr.cd_pars.func_id = qat_rsa_dec_fn_id(ctx->key_sz);
344 if (unlikely(!msg->pke_hdr.cd_pars.func_id))
345 return -EINVAL;
346
347 qat_req->ctx = ctx;
348 msg->pke_hdr.service_type = ICP_QAT_FW_COMN_REQ_CPM_FW_PKE;
349 msg->pke_hdr.comn_req_flags =
350 ICP_QAT_FW_COMN_FLAGS_BUILD(QAT_COMN_PTR_TYPE_FLAT,
351 QAT_COMN_CD_FLD_TYPE_64BIT_ADR);
352
353 qat_req->in.dec.d = ctx->dma_d;
354 qat_req->in.dec.n = ctx->dma_n;
355 ret = -ENOMEM;
356
357 /*
358 * src can be of any size in valid range, but HW expects it to be the
359 * same as modulo n so in case it is different we need to allocate a
360 * new buf and copy src data.
361 * In other case we just need to map the user provided buffer.
362 */
363 if (req->src_len < ctx->key_sz) {
364 int shift = ctx->key_sz - req->src_len;
365
366 qat_req->src_align = dma_zalloc_coherent(dev, ctx->key_sz,
367 &qat_req->in.dec.c,
368 GFP_KERNEL);
369 if (unlikely(!qat_req->src_align))
370 return ret;
371
372 memcpy(qat_req->src_align + shift, req->src, req->src_len);
373 } else {
374 qat_req->src_align = NULL;
375 qat_req->in.dec.c = dma_map_single(dev, req->src, req->src_len,
376 DMA_TO_DEVICE);
377 }
378 qat_req->in.in_tab[3] = 0;
379 qat_req->out.dec.m = dma_map_single(dev, req->dst, req->dst_len,
380 DMA_FROM_DEVICE);
381 qat_req->out.out_tab[1] = 0;
382 qat_req->phy_in = dma_map_single(dev, &qat_req->in.dec.c,
383 sizeof(struct qat_rsa_input_params),
384 DMA_TO_DEVICE);
385 qat_req->phy_out = dma_map_single(dev, &qat_req->out.dec.m,
386 sizeof(struct qat_rsa_output_params),
387 DMA_TO_DEVICE);
388
389 if (unlikely((!qat_req->src_align &&
390 dma_mapping_error(dev, qat_req->in.dec.c)) ||
391 dma_mapping_error(dev, qat_req->out.dec.m) ||
392 dma_mapping_error(dev, qat_req->phy_in) ||
393 dma_mapping_error(dev, qat_req->phy_out)))
394 goto unmap;
395
396 msg->pke_mid.src_data_addr = qat_req->phy_in;
397 msg->pke_mid.dest_data_addr = qat_req->phy_out;
398 msg->pke_mid.opaque = (uint64_t)(__force long)req;
399 msg->input_param_count = 3;
400 msg->output_param_count = 1;
401 do {
402 ret = adf_send_message(ctx->inst->pke_tx, (uint32_t *)msg);
403 } while (ret == -EBUSY && ctr++ < 100);
404
405 if (!ret)
406 return -EINPROGRESS;
407 unmap:
408 if (qat_req->src_align)
409 dma_free_coherent(dev, ctx->key_sz, qat_req->src_align,
410 qat_req->in.dec.c);
411 else
412 if (!dma_mapping_error(dev, qat_req->in.dec.c))
413 dma_unmap_single(dev, qat_req->in.dec.c, ctx->key_sz,
414 DMA_TO_DEVICE);
415 if (!dma_mapping_error(dev, qat_req->out.dec.m))
416 dma_unmap_single(dev, qat_req->out.dec.m, ctx->key_sz,
417 DMA_FROM_DEVICE);
418 if (!dma_mapping_error(dev, qat_req->phy_in))
419 dma_unmap_single(dev, qat_req->phy_in,
420 sizeof(struct qat_rsa_input_params),
421 DMA_TO_DEVICE);
422 if (!dma_mapping_error(dev, qat_req->phy_out))
423 dma_unmap_single(dev, qat_req->phy_out,
424 sizeof(struct qat_rsa_output_params),
425 DMA_TO_DEVICE);
426 return ret;
427 }
428
429 int qat_rsa_get_n(void *context, size_t hdrlen, unsigned char tag,
430 const void *value, size_t vlen)
431 {
432 struct qat_rsa_ctx *ctx = context;
433 struct qat_crypto_instance *inst = ctx->inst;
434 struct device *dev = &GET_DEV(inst->accel_dev);
435 const char *ptr = value;
436 int ret;
437
438 while (!*ptr && vlen) {
439 ptr++;
440 vlen--;
441 }
442
443 ctx->key_sz = vlen;
444 ret = -EINVAL;
445 /* In FIPS mode only allow key size 2K & 3K */
446 if (fips_enabled && (ctx->key_sz != 256 || ctx->key_sz != 384)) {
447 pr_err("QAT: RSA: key size not allowed in FIPS mode\n");
448 goto err;
449 }
450 /* invalid key size provided */
451 if (!qat_rsa_enc_fn_id(ctx->key_sz))
452 goto err;
453
454 ret = -ENOMEM;
455 ctx->n = dma_zalloc_coherent(dev, ctx->key_sz, &ctx->dma_n, GFP_KERNEL);
456 if (!ctx->n)
457 goto err;
458
459 memcpy(ctx->n, ptr, ctx->key_sz);
460 return 0;
461 err:
462 ctx->key_sz = 0;
463 ctx->n = NULL;
464 return ret;
465 }
466
467 int qat_rsa_get_e(void *context, size_t hdrlen, unsigned char tag,
468 const void *value, size_t vlen)
469 {
470 struct qat_rsa_ctx *ctx = context;
471 struct qat_crypto_instance *inst = ctx->inst;
472 struct device *dev = &GET_DEV(inst->accel_dev);
473 const char *ptr = value;
474
475 while (!*ptr && vlen) {
476 ptr++;
477 vlen--;
478 }
479
480 if (!ctx->key_sz || !vlen || vlen > ctx->key_sz) {
481 ctx->e = NULL;
482 return -EINVAL;
483 }
484
485 ctx->e = dma_zalloc_coherent(dev, ctx->key_sz, &ctx->dma_e, GFP_KERNEL);
486 if (!ctx->e) {
487 ctx->e = NULL;
488 return -ENOMEM;
489 }
490 memcpy(ctx->e + (ctx->key_sz - vlen), ptr, vlen);
491 return 0;
492 }
493
494 int qat_rsa_get_d(void *context, size_t hdrlen, unsigned char tag,
495 const void *value, size_t vlen)
496 {
497 struct qat_rsa_ctx *ctx = context;
498 struct qat_crypto_instance *inst = ctx->inst;
499 struct device *dev = &GET_DEV(inst->accel_dev);
500 const char *ptr = value;
501 int ret;
502
503 while (!*ptr && vlen) {
504 ptr++;
505 vlen--;
506 }
507
508 ret = -EINVAL;
509 if (!ctx->key_sz || !vlen || vlen > ctx->key_sz)
510 goto err;
511
512 /* In FIPS mode only allow key size 2K & 3K */
513 if (fips_enabled && (vlen != 256 || vlen != 384)) {
514 pr_err("QAT: RSA: key size not allowed in FIPS mode\n");
515 goto err;
516 }
517
518 ret = -ENOMEM;
519 ctx->d = dma_zalloc_coherent(dev, ctx->key_sz, &ctx->dma_d, GFP_KERNEL);
520 if (!ctx->n)
521 goto err;
522
523 memcpy(ctx->d + (ctx->key_sz - vlen), ptr, vlen);
524 return 0;
525 err:
526 ctx->d = NULL;
527 return ret;
528 }
529
530 static int qat_rsa_setkey(struct crypto_akcipher *tfm, const void *key,
531 unsigned int keylen)
532 {
533 struct qat_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
534 struct device *dev = &GET_DEV(ctx->inst->accel_dev);
535 int ret;
536
537 /* Free the old key if any */
538 if (ctx->n)
539 dma_free_coherent(dev, ctx->key_sz, ctx->n, ctx->dma_n);
540 if (ctx->e)
541 dma_free_coherent(dev, ctx->key_sz, ctx->e, ctx->dma_e);
542 if (ctx->d) {
543 memset(ctx->d, '\0', ctx->key_sz);
544 dma_free_coherent(dev, ctx->key_sz, ctx->d, ctx->dma_d);
545 }
546
547 ctx->n = NULL;
548 ctx->e = NULL;
549 ctx->d = NULL;
550 ret = asn1_ber_decoder(&qat_rsakey_decoder, ctx, key, keylen);
551 if (ret < 0)
552 goto free;
553
554 if (!ctx->n || !ctx->e) {
555 /* invalid key provided */
556 ret = -EINVAL;
557 goto free;
558 }
559
560 return 0;
561 free:
562 if (ctx->d) {
563 memset(ctx->d, '\0', ctx->key_sz);
564 dma_free_coherent(dev, ctx->key_sz, ctx->d, ctx->dma_d);
565 ctx->d = NULL;
566 }
567 if (ctx->e) {
568 dma_free_coherent(dev, ctx->key_sz, ctx->e, ctx->dma_e);
569 ctx->e = NULL;
570 }
571 if (ctx->n) {
572 dma_free_coherent(dev, ctx->key_sz, ctx->n, ctx->dma_n);
573 ctx->n = NULL;
574 ctx->key_sz = 0;
575 }
576 return ret;
577 }
578
579 static int qat_rsa_init_tfm(struct crypto_akcipher *tfm)
580 {
581 struct qat_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
582 struct qat_crypto_instance *inst =
583 qat_crypto_get_instance_node(get_current_node());
584
585 if (!inst)
586 return -EINVAL;
587
588 ctx->key_sz = 0;
589 ctx->inst = inst;
590 return 0;
591 }
592
593 static void qat_rsa_exit_tfm(struct crypto_akcipher *tfm)
594 {
595 struct qat_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
596 struct device *dev = &GET_DEV(ctx->inst->accel_dev);
597
598 if (ctx->n)
599 dma_free_coherent(dev, ctx->key_sz, ctx->n, ctx->dma_n);
600 if (ctx->e)
601 dma_free_coherent(dev, ctx->key_sz, ctx->e, ctx->dma_e);
602 if (ctx->d) {
603 memset(ctx->d, '\0', ctx->key_sz);
604 dma_free_coherent(dev, ctx->key_sz, ctx->d, ctx->dma_d);
605 }
606 qat_crypto_put_instance(ctx->inst);
607 ctx->n = NULL;
608 ctx->d = NULL;
609 ctx->d = NULL;
610 }
611
612 static struct akcipher_alg rsa = {
613 .encrypt = qat_rsa_enc,
614 .decrypt = qat_rsa_dec,
615 .sign = qat_rsa_dec,
616 .verify = qat_rsa_enc,
617 .setkey = qat_rsa_setkey,
618 .init = qat_rsa_init_tfm,
619 .exit = qat_rsa_exit_tfm,
620 .reqsize = sizeof(struct qat_rsa_request) + 64,
621 .base = {
622 .cra_name = "rsa",
623 .cra_driver_name = "qat-rsa",
624 .cra_priority = 1000,
625 .cra_module = THIS_MODULE,
626 .cra_ctxsize = sizeof(struct qat_rsa_ctx),
627 },
628 };
629
630 int qat_asym_algs_register(void)
631 {
632 rsa.base.cra_flags = 0;
633 return crypto_register_akcipher(&rsa);
634 }
635
636 void qat_asym_algs_unregister(void)
637 {
638 crypto_unregister_akcipher(&rsa);
639 }
This page took 0.060218 seconds and 5 git commands to generate.