crypto: aead - Remove CRYPTO_ALG_AEAD_NEW flag
[deliverable/linux.git] / drivers / crypto / nx / nx-aes-gcm.c
CommitLineData
f2a15f1d
KY
1/**
2 * AES GCM routines supporting the Power 7+ Nest Accelerators driver
3 *
4 * Copyright (C) 2012 International Business Machines Inc.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; version 2 only.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
18 *
19 * Author: Kent Yoder <yoder1@us.ibm.com>
20 */
21
22#include <crypto/internal/aead.h>
23#include <crypto/aes.h>
f2a15f1d
KY
24#include <crypto/scatterwalk.h>
25#include <linux/module.h>
26#include <linux/types.h>
f2a15f1d
KY
27#include <asm/vio.h>
28
29#include "nx_csbcpb.h"
30#include "nx.h"
31
32
33static int gcm_aes_nx_set_key(struct crypto_aead *tfm,
34 const u8 *in_key,
35 unsigned int key_len)
36{
c3d21949 37 struct nx_crypto_ctx *nx_ctx = crypto_aead_ctx(tfm);
f2a15f1d
KY
38 struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
39 struct nx_csbcpb *csbcpb_aead = nx_ctx->csbcpb_aead;
40
41 nx_ctx_init(nx_ctx, HCOP_FC_AES);
42
43 switch (key_len) {
44 case AES_KEYSIZE_128:
45 NX_CPB_SET_KEY_SIZE(csbcpb, NX_KS_AES_128);
46 NX_CPB_SET_KEY_SIZE(csbcpb_aead, NX_KS_AES_128);
47 nx_ctx->ap = &nx_ctx->props[NX_PROPS_AES_128];
48 break;
49 case AES_KEYSIZE_192:
50 NX_CPB_SET_KEY_SIZE(csbcpb, NX_KS_AES_192);
51 NX_CPB_SET_KEY_SIZE(csbcpb_aead, NX_KS_AES_192);
52 nx_ctx->ap = &nx_ctx->props[NX_PROPS_AES_192];
53 break;
54 case AES_KEYSIZE_256:
55 NX_CPB_SET_KEY_SIZE(csbcpb, NX_KS_AES_256);
56 NX_CPB_SET_KEY_SIZE(csbcpb_aead, NX_KS_AES_256);
57 nx_ctx->ap = &nx_ctx->props[NX_PROPS_AES_256];
58 break;
59 default:
60 return -EINVAL;
61 }
62
63 csbcpb->cpb.hdr.mode = NX_MODE_AES_GCM;
64 memcpy(csbcpb->cpb.aes_gcm.key, in_key, key_len);
65
66 csbcpb_aead->cpb.hdr.mode = NX_MODE_AES_GCA;
67 memcpy(csbcpb_aead->cpb.aes_gca.key, in_key, key_len);
68
69 return 0;
70}
71
72static int gcm4106_aes_nx_set_key(struct crypto_aead *tfm,
73 const u8 *in_key,
74 unsigned int key_len)
75{
c3d21949 76 struct nx_crypto_ctx *nx_ctx = crypto_aead_ctx(tfm);
f2a15f1d
KY
77 char *nonce = nx_ctx->priv.gcm.nonce;
78 int rc;
79
80 if (key_len < 4)
81 return -EINVAL;
82
83 key_len -= 4;
84
85 rc = gcm_aes_nx_set_key(tfm, in_key, key_len);
86 if (rc)
87 goto out;
88
89 memcpy(nonce, in_key + key_len, 4);
90out:
91 return rc;
92}
93
f2a15f1d
KY
94static int gcm4106_aes_nx_setauthsize(struct crypto_aead *tfm,
95 unsigned int authsize)
96{
97 switch (authsize) {
98 case 8:
99 case 12:
100 case 16:
101 break;
102 default:
103 return -EINVAL;
104 }
105
f2a15f1d
KY
106 return 0;
107}
108
109static int nx_gca(struct nx_crypto_ctx *nx_ctx,
110 struct aead_request *req,
c3d21949
HX
111 u8 *out,
112 unsigned int assoclen)
f2a15f1d 113{
79980434 114 int rc;
f2a15f1d 115 struct nx_csbcpb *csbcpb_aead = nx_ctx->csbcpb_aead;
f2a15f1d
KY
116 struct scatter_walk walk;
117 struct nx_sg *nx_sg = nx_ctx->in_sg;
c3d21949 118 unsigned int nbytes = assoclen;
79980434 119 unsigned int processed = 0, to_process;
e13a79ac 120 unsigned int max_sg_len;
f2a15f1d 121
79980434 122 if (nbytes <= AES_BLOCK_SIZE) {
201f28f0 123 scatterwalk_start(&walk, req->src);
79980434 124 scatterwalk_copychunks(out, &walk, nbytes, SCATTERWALK_FROM_SG);
f2a15f1d 125 scatterwalk_done(&walk, SCATTERWALK_FROM_SG, 0);
79980434 126 return 0;
f2a15f1d
KY
127 }
128
79980434 129 NX_CPB_FDM(csbcpb_aead) &= ~NX_FDM_CONTINUATION;
f2a15f1d 130
79980434 131 /* page_limit: number of sg entries that fit on one page */
e13a79ac 132 max_sg_len = min_t(u64, nx_driver.of.max_sg_len/sizeof(struct nx_sg),
79980434 133 nx_ctx->ap->sglen);
e13a79ac
LB
134 max_sg_len = min_t(u64, max_sg_len,
135 nx_ctx->ap->databytelen/NX_PAGE_SIZE);
f2a15f1d 136
79980434
MC
137 do {
138 /*
139 * to_process: the data chunk to process in this update.
140 * This value is bound by sg list limits.
141 */
142 to_process = min_t(u64, nbytes - processed,
143 nx_ctx->ap->databytelen);
144 to_process = min_t(u64, to_process,
145 NX_PAGE_SIZE * (max_sg_len - 1));
146
e13a79ac 147 nx_sg = nx_walk_and_build(nx_ctx->in_sg, max_sg_len,
201f28f0 148 req->src, processed, &to_process);
e13a79ac 149
79980434
MC
150 if ((to_process + processed) < nbytes)
151 NX_CPB_FDM(csbcpb_aead) |= NX_FDM_INTERMEDIATE;
152 else
153 NX_CPB_FDM(csbcpb_aead) &= ~NX_FDM_INTERMEDIATE;
154
79980434
MC
155 nx_ctx->op_aead.inlen = (nx_ctx->in_sg - nx_sg)
156 * sizeof(struct nx_sg);
157
158 rc = nx_hcall_sync(nx_ctx, &nx_ctx->op_aead,
159 req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP);
160 if (rc)
161 return rc;
162
163 memcpy(csbcpb_aead->cpb.aes_gca.in_pat,
164 csbcpb_aead->cpb.aes_gca.out_pat,
165 AES_BLOCK_SIZE);
166 NX_CPB_FDM(csbcpb_aead) |= NX_FDM_CONTINUATION;
167
168 atomic_inc(&(nx_ctx->stats->aes_ops));
c3d21949 169 atomic64_add(assoclen, &(nx_ctx->stats->aes_bytes));
79980434
MC
170
171 processed += to_process;
172 } while (processed < nbytes);
f2a15f1d
KY
173
174 memcpy(out, csbcpb_aead->cpb.aes_gca.out_pat, AES_BLOCK_SIZE);
79980434
MC
175
176 return rc;
177}
178
c3d21949
HX
179static int gmac(struct aead_request *req, struct blkcipher_desc *desc,
180 unsigned int assoclen)
dec0ed6c
MC
181{
182 int rc;
c3d21949
HX
183 struct nx_crypto_ctx *nx_ctx =
184 crypto_aead_ctx(crypto_aead_reqtfm(req));
dec0ed6c
MC
185 struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
186 struct nx_sg *nx_sg;
c3d21949 187 unsigned int nbytes = assoclen;
dec0ed6c 188 unsigned int processed = 0, to_process;
e13a79ac 189 unsigned int max_sg_len;
dec0ed6c
MC
190
191 /* Set GMAC mode */
192 csbcpb->cpb.hdr.mode = NX_MODE_AES_GMAC;
193
194 NX_CPB_FDM(csbcpb) &= ~NX_FDM_CONTINUATION;
195
196 /* page_limit: number of sg entries that fit on one page */
e13a79ac 197 max_sg_len = min_t(u64, nx_driver.of.max_sg_len/sizeof(struct nx_sg),
dec0ed6c 198 nx_ctx->ap->sglen);
e13a79ac
LB
199 max_sg_len = min_t(u64, max_sg_len,
200 nx_ctx->ap->databytelen/NX_PAGE_SIZE);
dec0ed6c
MC
201
202 /* Copy IV */
203 memcpy(csbcpb->cpb.aes_gcm.iv_or_cnt, desc->info, AES_BLOCK_SIZE);
204
205 do {
206 /*
207 * to_process: the data chunk to process in this update.
208 * This value is bound by sg list limits.
209 */
210 to_process = min_t(u64, nbytes - processed,
211 nx_ctx->ap->databytelen);
212 to_process = min_t(u64, to_process,
213 NX_PAGE_SIZE * (max_sg_len - 1));
214
e13a79ac 215 nx_sg = nx_walk_and_build(nx_ctx->in_sg, max_sg_len,
201f28f0 216 req->src, processed, &to_process);
e13a79ac 217
dec0ed6c
MC
218 if ((to_process + processed) < nbytes)
219 NX_CPB_FDM(csbcpb) |= NX_FDM_INTERMEDIATE;
220 else
221 NX_CPB_FDM(csbcpb) &= ~NX_FDM_INTERMEDIATE;
222
dec0ed6c
MC
223 nx_ctx->op.inlen = (nx_ctx->in_sg - nx_sg)
224 * sizeof(struct nx_sg);
225
226 csbcpb->cpb.aes_gcm.bit_length_data = 0;
227 csbcpb->cpb.aes_gcm.bit_length_aad = 8 * nbytes;
228
229 rc = nx_hcall_sync(nx_ctx, &nx_ctx->op,
230 req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP);
231 if (rc)
232 goto out;
233
234 memcpy(csbcpb->cpb.aes_gcm.in_pat_or_aad,
235 csbcpb->cpb.aes_gcm.out_pat_or_mac, AES_BLOCK_SIZE);
236 memcpy(csbcpb->cpb.aes_gcm.in_s0,
237 csbcpb->cpb.aes_gcm.out_s0, AES_BLOCK_SIZE);
238
239 NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION;
240
241 atomic_inc(&(nx_ctx->stats->aes_ops));
c3d21949 242 atomic64_add(assoclen, &(nx_ctx->stats->aes_bytes));
dec0ed6c
MC
243
244 processed += to_process;
245 } while (processed < nbytes);
246
247out:
248 /* Restore GCM mode */
249 csbcpb->cpb.hdr.mode = NX_MODE_AES_GCM;
250 return rc;
251}
252
79980434
MC
253static int gcm_empty(struct aead_request *req, struct blkcipher_desc *desc,
254 int enc)
255{
256 int rc;
c3d21949
HX
257 struct nx_crypto_ctx *nx_ctx =
258 crypto_aead_ctx(crypto_aead_reqtfm(req));
79980434 259 struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
dec0ed6c
MC
260 char out[AES_BLOCK_SIZE];
261 struct nx_sg *in_sg, *out_sg;
e13a79ac 262 int len;
79980434
MC
263
264 /* For scenarios where the input message is zero length, AES CTR mode
265 * may be used. Set the source data to be a single block (16B) of all
266 * zeros, and set the input IV value to be the same as the GMAC IV
267 * value. - nx_wb 4.8.1.3 */
79980434 268
dec0ed6c
MC
269 /* Change to ECB mode */
270 csbcpb->cpb.hdr.mode = NX_MODE_AES_ECB;
271 memcpy(csbcpb->cpb.aes_ecb.key, csbcpb->cpb.aes_gcm.key,
272 sizeof(csbcpb->cpb.aes_ecb.key));
79980434 273 if (enc)
dec0ed6c 274 NX_CPB_FDM(csbcpb) |= NX_FDM_ENDE_ENCRYPT;
79980434 275 else
dec0ed6c
MC
276 NX_CPB_FDM(csbcpb) &= ~NX_FDM_ENDE_ENCRYPT;
277
e13a79ac
LB
278 len = AES_BLOCK_SIZE;
279
dec0ed6c
MC
280 /* Encrypt the counter/IV */
281 in_sg = nx_build_sg_list(nx_ctx->in_sg, (u8 *) desc->info,
e13a79ac
LB
282 &len, nx_ctx->ap->sglen);
283
284 if (len != AES_BLOCK_SIZE)
285 return -EINVAL;
286
287 len = sizeof(out);
288 out_sg = nx_build_sg_list(nx_ctx->out_sg, (u8 *) out, &len,
dec0ed6c 289 nx_ctx->ap->sglen);
e13a79ac
LB
290
291 if (len != sizeof(out))
292 return -EINVAL;
293
dec0ed6c
MC
294 nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) * sizeof(struct nx_sg);
295 nx_ctx->op.outlen = (nx_ctx->out_sg - out_sg) * sizeof(struct nx_sg);
79980434 296
dec0ed6c
MC
297 rc = nx_hcall_sync(nx_ctx, &nx_ctx->op,
298 desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP);
299 if (rc)
300 goto out;
301 atomic_inc(&(nx_ctx->stats->aes_ops));
302
303 /* Copy out the auth tag */
304 memcpy(csbcpb->cpb.aes_gcm.out_pat_or_mac, out,
305 crypto_aead_authsize(crypto_aead_reqtfm(req)));
f2a15f1d 306out:
dec0ed6c
MC
307 /* Restore XCBC mode */
308 csbcpb->cpb.hdr.mode = NX_MODE_AES_GCM;
309
310 /*
311 * ECB key uses the same region that GCM AAD and counter, so it's safe
312 * to just fill it with zeroes.
313 */
314 memset(csbcpb->cpb.aes_ecb.key, 0, sizeof(csbcpb->cpb.aes_ecb.key));
315
f2a15f1d
KY
316 return rc;
317}
318
c3d21949
HX
319static int gcm_aes_nx_crypt(struct aead_request *req, int enc,
320 unsigned int assoclen)
f2a15f1d 321{
c3d21949
HX
322 struct nx_crypto_ctx *nx_ctx =
323 crypto_aead_ctx(crypto_aead_reqtfm(req));
030f4e96 324 struct nx_gcm_rctx *rctx = aead_request_ctx(req);
f2a15f1d
KY
325 struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
326 struct blkcipher_desc desc;
327 unsigned int nbytes = req->cryptlen;
79980434 328 unsigned int processed = 0, to_process;
c849163b 329 unsigned long irq_flags;
f2a15f1d
KY
330 int rc = -EINVAL;
331
c849163b
MC
332 spin_lock_irqsave(&nx_ctx->lock, irq_flags);
333
030f4e96 334 desc.info = rctx->iv;
f2a15f1d
KY
335 /* initialize the counter */
336 *(u32 *)(desc.info + NX_GCM_CTR_OFFSET) = 1;
337
f2a15f1d 338 if (nbytes == 0) {
c3d21949 339 if (assoclen == 0)
dec0ed6c
MC
340 rc = gcm_empty(req, &desc, enc);
341 else
c3d21949 342 rc = gmac(req, &desc, assoclen);
dec0ed6c
MC
343 if (rc)
344 goto out;
345 else
346 goto mac;
f2a15f1d
KY
347 }
348
79980434 349 /* Process associated data */
c3d21949
HX
350 csbcpb->cpb.aes_gcm.bit_length_aad = assoclen * 8;
351 if (assoclen) {
352 rc = nx_gca(nx_ctx, req, csbcpb->cpb.aes_gcm.in_pat_or_aad,
353 assoclen);
f2a15f1d
KY
354 if (rc)
355 goto out;
356 }
357
79980434
MC
358 /* Set flags for encryption */
359 NX_CPB_FDM(csbcpb) &= ~NX_FDM_CONTINUATION;
360 if (enc) {
f2a15f1d 361 NX_CPB_FDM(csbcpb) |= NX_FDM_ENDE_ENCRYPT;
79980434
MC
362 } else {
363 NX_CPB_FDM(csbcpb) &= ~NX_FDM_ENDE_ENCRYPT;
1ad936e8 364 nbytes -= crypto_aead_authsize(crypto_aead_reqtfm(req));
79980434 365 }
f2a15f1d 366
79980434 367 do {
e13a79ac 368 to_process = nbytes - processed;
f2a15f1d 369
79980434 370 csbcpb->cpb.aes_gcm.bit_length_data = nbytes * 8;
79980434 371 rc = nx_build_sg_lists(nx_ctx, &desc, req->dst,
201f28f0
HX
372 req->src, &to_process,
373 processed + req->assoclen,
79980434 374 csbcpb->cpb.aes_gcm.iv_or_cnt);
e13a79ac 375
79980434
MC
376 if (rc)
377 goto out;
f2a15f1d 378
e13a79ac
LB
379 if ((to_process + processed) < nbytes)
380 NX_CPB_FDM(csbcpb) |= NX_FDM_INTERMEDIATE;
381 else
382 NX_CPB_FDM(csbcpb) &= ~NX_FDM_INTERMEDIATE;
383
384
79980434
MC
385 rc = nx_hcall_sync(nx_ctx, &nx_ctx->op,
386 req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP);
387 if (rc)
388 goto out;
389
390 memcpy(desc.info, csbcpb->cpb.aes_gcm.out_cnt, AES_BLOCK_SIZE);
391 memcpy(csbcpb->cpb.aes_gcm.in_pat_or_aad,
392 csbcpb->cpb.aes_gcm.out_pat_or_mac, AES_BLOCK_SIZE);
393 memcpy(csbcpb->cpb.aes_gcm.in_s0,
394 csbcpb->cpb.aes_gcm.out_s0, AES_BLOCK_SIZE);
395
396 NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION;
397
398 atomic_inc(&(nx_ctx->stats->aes_ops));
399 atomic64_add(csbcpb->csb.processed_byte_count,
400 &(nx_ctx->stats->aes_bytes));
f2a15f1d 401
79980434
MC
402 processed += to_process;
403 } while (processed < nbytes);
f2a15f1d 404
dec0ed6c 405mac:
f2a15f1d
KY
406 if (enc) {
407 /* copy out the auth tag */
201f28f0
HX
408 scatterwalk_map_and_copy(
409 csbcpb->cpb.aes_gcm.out_pat_or_mac,
410 req->dst, req->assoclen + nbytes,
411 crypto_aead_authsize(crypto_aead_reqtfm(req)),
412 SCATTERWALK_TO_SG);
b4eba0ca 413 } else {
f2a15f1d
KY
414 u8 *itag = nx_ctx->priv.gcm.iauth_tag;
415 u8 *otag = csbcpb->cpb.aes_gcm.out_pat_or_mac;
416
201f28f0
HX
417 scatterwalk_map_and_copy(
418 itag, req->src, req->assoclen + nbytes,
419 crypto_aead_authsize(crypto_aead_reqtfm(req)),
420 SCATTERWALK_FROM_SG);
f2a15f1d
KY
421 rc = memcmp(itag, otag,
422 crypto_aead_authsize(crypto_aead_reqtfm(req))) ?
423 -EBADMSG : 0;
424 }
425out:
c849163b 426 spin_unlock_irqrestore(&nx_ctx->lock, irq_flags);
f2a15f1d
KY
427 return rc;
428}
429
430static int gcm_aes_nx_encrypt(struct aead_request *req)
431{
030f4e96
HX
432 struct nx_gcm_rctx *rctx = aead_request_ctx(req);
433 char *iv = rctx->iv;
f2a15f1d
KY
434
435 memcpy(iv, req->iv, 12);
436
c3d21949 437 return gcm_aes_nx_crypt(req, 1, req->assoclen);
f2a15f1d
KY
438}
439
440static int gcm_aes_nx_decrypt(struct aead_request *req)
441{
030f4e96
HX
442 struct nx_gcm_rctx *rctx = aead_request_ctx(req);
443 char *iv = rctx->iv;
f2a15f1d
KY
444
445 memcpy(iv, req->iv, 12);
446
c3d21949 447 return gcm_aes_nx_crypt(req, 0, req->assoclen);
f2a15f1d
KY
448}
449
450static int gcm4106_aes_nx_encrypt(struct aead_request *req)
451{
c3d21949
HX
452 struct nx_crypto_ctx *nx_ctx =
453 crypto_aead_ctx(crypto_aead_reqtfm(req));
030f4e96
HX
454 struct nx_gcm_rctx *rctx = aead_request_ctx(req);
455 char *iv = rctx->iv;
f2a15f1d
KY
456 char *nonce = nx_ctx->priv.gcm.nonce;
457
458 memcpy(iv, nonce, NX_GCM4106_NONCE_LEN);
459 memcpy(iv + NX_GCM4106_NONCE_LEN, req->iv, 8);
460
c3d21949
HX
461 if (req->assoclen < 8)
462 return -EINVAL;
463
464 return gcm_aes_nx_crypt(req, 1, req->assoclen - 8);
f2a15f1d
KY
465}
466
467static int gcm4106_aes_nx_decrypt(struct aead_request *req)
468{
c3d21949
HX
469 struct nx_crypto_ctx *nx_ctx =
470 crypto_aead_ctx(crypto_aead_reqtfm(req));
030f4e96
HX
471 struct nx_gcm_rctx *rctx = aead_request_ctx(req);
472 char *iv = rctx->iv;
f2a15f1d
KY
473 char *nonce = nx_ctx->priv.gcm.nonce;
474
475 memcpy(iv, nonce, NX_GCM4106_NONCE_LEN);
476 memcpy(iv + NX_GCM4106_NONCE_LEN, req->iv, 8);
477
c3d21949
HX
478 if (req->assoclen < 8)
479 return -EINVAL;
480
481 return gcm_aes_nx_crypt(req, 0, req->assoclen - 8);
f2a15f1d
KY
482}
483
484/* tell the block cipher walk routines that this is a stream cipher by
485 * setting cra_blocksize to 1. Even using blkcipher_walk_virt_block
486 * during encrypt/decrypt doesn't solve this problem, because it calls
487 * blkcipher_walk_done under the covers, which doesn't use walk->blocksize,
488 * but instead uses this tfm->blocksize. */
201f28f0
HX
489struct aead_alg nx_gcm_aes_alg = {
490 .base = {
491 .cra_name = "gcm(aes)",
492 .cra_driver_name = "gcm-aes-nx",
493 .cra_priority = 300,
494 .cra_blocksize = 1,
495 .cra_ctxsize = sizeof(struct nx_crypto_ctx),
496 .cra_module = THIS_MODULE,
497 },
498 .init = nx_crypto_ctx_aes_gcm_init,
499 .exit = nx_crypto_ctx_aead_exit,
500 .ivsize = 12,
501 .maxauthsize = AES_BLOCK_SIZE,
502 .setkey = gcm_aes_nx_set_key,
503 .encrypt = gcm_aes_nx_encrypt,
504 .decrypt = gcm_aes_nx_decrypt,
f2a15f1d
KY
505};
506
201f28f0
HX
507struct aead_alg nx_gcm4106_aes_alg = {
508 .base = {
509 .cra_name = "rfc4106(gcm(aes))",
510 .cra_driver_name = "rfc4106-gcm-aes-nx",
511 .cra_priority = 300,
512 .cra_blocksize = 1,
513 .cra_ctxsize = sizeof(struct nx_crypto_ctx),
514 .cra_module = THIS_MODULE,
515 },
516 .init = nx_crypto_ctx_aes_gcm_init,
517 .exit = nx_crypto_ctx_aead_exit,
518 .ivsize = 8,
519 .maxauthsize = AES_BLOCK_SIZE,
520 .setkey = gcm4106_aes_nx_set_key,
521 .setauthsize = gcm4106_aes_nx_setauthsize,
522 .encrypt = gcm4106_aes_nx_encrypt,
523 .decrypt = gcm4106_aes_nx_decrypt,
f2a15f1d 524};
This page took 0.191073 seconds and 5 git commands to generate.