crypto: qat - Deletion of unnecessary checks before two function calls
[deliverable/linux.git] / arch / x86 / crypto / aesni-intel_glue.c
CommitLineData
54b6a1bd
HY
1/*
2 * Support for Intel AES-NI instructions. This file contains glue
3 * code, the real AES implementation is in intel-aes_asm.S.
4 *
5 * Copyright (C) 2008, Intel Corp.
6 * Author: Huang Ying <ying.huang@intel.com>
7 *
0bd82f5f
TS
8 * Added RFC4106 AES-GCM support for 128-bit keys under the AEAD
9 * interface for 64-bit kernels.
10 * Authors: Adrian Hoban <adrian.hoban@intel.com>
11 * Gabriele Paoloni <gabriele.paoloni@intel.com>
12 * Tadeusz Struk (tadeusz.struk@intel.com)
13 * Aidan O'Mahony (aidan.o.mahony@intel.com)
14 * Copyright (c) 2010, Intel Corporation.
15 *
54b6a1bd
HY
16 * This program is free software; you can redistribute it and/or modify
17 * it under the terms of the GNU General Public License as published by
18 * the Free Software Foundation; either version 2 of the License, or
19 * (at your option) any later version.
20 */
21
22#include <linux/hardirq.h>
23#include <linux/types.h>
24#include <linux/crypto.h>
7c52d551 25#include <linux/module.h>
54b6a1bd
HY
26#include <linux/err.h>
27#include <crypto/algapi.h>
28#include <crypto/aes.h>
29#include <crypto/cryptd.h>
12387a46 30#include <crypto/ctr.h>
023af608
JK
31#include <crypto/b128ops.h>
32#include <crypto/lrw.h>
33#include <crypto/xts.h>
3bd391f0 34#include <asm/cpu_device_id.h>
df6b35f4 35#include <asm/fpu/api.h>
70ef2601 36#include <asm/crypto/aes.h>
801201aa 37#include <crypto/ablk_helper.h>
0bd82f5f
TS
38#include <crypto/scatterwalk.h>
39#include <crypto/internal/aead.h>
40#include <linux/workqueue.h>
41#include <linux/spinlock.h>
c456a9cd
JK
42#ifdef CONFIG_X86_64
43#include <asm/crypto/glue_helper.h>
44#endif
54b6a1bd 45
e31ac32d 46
b7c89d9e
HX
47#define AESNI_ALIGN 16
48#define AES_BLOCK_MASK (~(AES_BLOCK_SIZE - 1))
49#define RFC4106_HASH_SUBKEY_SIZE 16
50
0bd82f5f
TS
51/* This data is stored at the end of the crypto_tfm struct.
52 * It's a type of per "session" data storage location.
53 * This needs to be 16 byte aligned.
54 */
55struct aesni_rfc4106_gcm_ctx {
b7c89d9e
HX
56 u8 hash_subkey[16] __attribute__ ((__aligned__(AESNI_ALIGN)));
57 struct crypto_aes_ctx aes_key_expanded
58 __attribute__ ((__aligned__(AESNI_ALIGN)));
0bd82f5f 59 u8 nonce[4];
0bd82f5f
TS
60};
61
62struct aesni_gcm_set_hash_subkey_result {
63 int err;
64 struct completion completion;
65};
66
67struct aesni_hash_subkey_req_data {
68 u8 iv[16];
69 struct aesni_gcm_set_hash_subkey_result result;
70 struct scatterlist sg;
71};
72
023af608
JK
73struct aesni_lrw_ctx {
74 struct lrw_table_ctx lrw_table;
75 u8 raw_aes_ctx[sizeof(struct crypto_aes_ctx) + AESNI_ALIGN - 1];
76};
77
78struct aesni_xts_ctx {
79 u8 raw_tweak_ctx[sizeof(struct crypto_aes_ctx) + AESNI_ALIGN - 1];
80 u8 raw_crypt_ctx[sizeof(struct crypto_aes_ctx) + AESNI_ALIGN - 1];
81};
82
54b6a1bd
HY
83asmlinkage int aesni_set_key(struct crypto_aes_ctx *ctx, const u8 *in_key,
84 unsigned int key_len);
85asmlinkage void aesni_enc(struct crypto_aes_ctx *ctx, u8 *out,
86 const u8 *in);
87asmlinkage void aesni_dec(struct crypto_aes_ctx *ctx, u8 *out,
88 const u8 *in);
89asmlinkage void aesni_ecb_enc(struct crypto_aes_ctx *ctx, u8 *out,
90 const u8 *in, unsigned int len);
91asmlinkage void aesni_ecb_dec(struct crypto_aes_ctx *ctx, u8 *out,
92 const u8 *in, unsigned int len);
93asmlinkage void aesni_cbc_enc(struct crypto_aes_ctx *ctx, u8 *out,
94 const u8 *in, unsigned int len, u8 *iv);
95asmlinkage void aesni_cbc_dec(struct crypto_aes_ctx *ctx, u8 *out,
96 const u8 *in, unsigned int len, u8 *iv);
9bed4aca
RD
97
98int crypto_fpu_init(void);
99void crypto_fpu_exit(void);
100
d764593a
TC
101#define AVX_GEN2_OPTSIZE 640
102#define AVX_GEN4_OPTSIZE 4096
103
0d258efb 104#ifdef CONFIG_X86_64
22cddcc7 105
106static void (*aesni_ctr_enc_tfm)(struct crypto_aes_ctx *ctx, u8 *out,
107 const u8 *in, unsigned int len, u8 *iv);
12387a46
HY
108asmlinkage void aesni_ctr_enc(struct crypto_aes_ctx *ctx, u8 *out,
109 const u8 *in, unsigned int len, u8 *iv);
54b6a1bd 110
c456a9cd
JK
111asmlinkage void aesni_xts_crypt8(struct crypto_aes_ctx *ctx, u8 *out,
112 const u8 *in, bool enc, u8 *iv);
113
0bd82f5f
TS
114/* asmlinkage void aesni_gcm_enc()
115 * void *ctx, AES Key schedule. Starts on a 16 byte boundary.
116 * u8 *out, Ciphertext output. Encrypt in-place is allowed.
117 * const u8 *in, Plaintext input
118 * unsigned long plaintext_len, Length of data in bytes for encryption.
119 * u8 *iv, Pre-counter block j0: 4 byte salt (from Security Association)
120 * concatenated with 8 byte Initialisation Vector (from IPSec ESP
121 * Payload) concatenated with 0x00000001. 16-byte aligned pointer.
122 * u8 *hash_subkey, the Hash sub key input. Data starts on a 16-byte boundary.
123 * const u8 *aad, Additional Authentication Data (AAD)
124 * unsigned long aad_len, Length of AAD in bytes. With RFC4106 this
125 * is going to be 8 or 12 bytes
126 * u8 *auth_tag, Authenticated Tag output.
127 * unsigned long auth_tag_len), Authenticated Tag Length in bytes.
128 * Valid values are 16 (most likely), 12 or 8.
129 */
130asmlinkage void aesni_gcm_enc(void *ctx, u8 *out,
131 const u8 *in, unsigned long plaintext_len, u8 *iv,
132 u8 *hash_subkey, const u8 *aad, unsigned long aad_len,
133 u8 *auth_tag, unsigned long auth_tag_len);
134
135/* asmlinkage void aesni_gcm_dec()
136 * void *ctx, AES Key schedule. Starts on a 16 byte boundary.
137 * u8 *out, Plaintext output. Decrypt in-place is allowed.
138 * const u8 *in, Ciphertext input
139 * unsigned long ciphertext_len, Length of data in bytes for decryption.
140 * u8 *iv, Pre-counter block j0: 4 byte salt (from Security Association)
141 * concatenated with 8 byte Initialisation Vector (from IPSec ESP
142 * Payload) concatenated with 0x00000001. 16-byte aligned pointer.
143 * u8 *hash_subkey, the Hash sub key input. Data starts on a 16-byte boundary.
144 * const u8 *aad, Additional Authentication Data (AAD)
145 * unsigned long aad_len, Length of AAD in bytes. With RFC4106 this is going
146 * to be 8 or 12 bytes
147 * u8 *auth_tag, Authenticated Tag output.
148 * unsigned long auth_tag_len) Authenticated Tag Length in bytes.
149 * Valid values are 16 (most likely), 12 or 8.
150 */
151asmlinkage void aesni_gcm_dec(void *ctx, u8 *out,
152 const u8 *in, unsigned long ciphertext_len, u8 *iv,
153 u8 *hash_subkey, const u8 *aad, unsigned long aad_len,
154 u8 *auth_tag, unsigned long auth_tag_len);
155
d764593a
TC
156
157#ifdef CONFIG_AS_AVX
22cddcc7 158asmlinkage void aes_ctr_enc_128_avx_by8(const u8 *in, u8 *iv,
159 void *keys, u8 *out, unsigned int num_bytes);
160asmlinkage void aes_ctr_enc_192_avx_by8(const u8 *in, u8 *iv,
161 void *keys, u8 *out, unsigned int num_bytes);
162asmlinkage void aes_ctr_enc_256_avx_by8(const u8 *in, u8 *iv,
163 void *keys, u8 *out, unsigned int num_bytes);
d764593a
TC
164/*
165 * asmlinkage void aesni_gcm_precomp_avx_gen2()
166 * gcm_data *my_ctx_data, context data
167 * u8 *hash_subkey, the Hash sub key input. Data starts on a 16-byte boundary.
168 */
169asmlinkage void aesni_gcm_precomp_avx_gen2(void *my_ctx_data, u8 *hash_subkey);
170
171asmlinkage void aesni_gcm_enc_avx_gen2(void *ctx, u8 *out,
172 const u8 *in, unsigned long plaintext_len, u8 *iv,
173 const u8 *aad, unsigned long aad_len,
174 u8 *auth_tag, unsigned long auth_tag_len);
175
176asmlinkage void aesni_gcm_dec_avx_gen2(void *ctx, u8 *out,
177 const u8 *in, unsigned long ciphertext_len, u8 *iv,
178 const u8 *aad, unsigned long aad_len,
179 u8 *auth_tag, unsigned long auth_tag_len);
180
181static void aesni_gcm_enc_avx(void *ctx, u8 *out,
182 const u8 *in, unsigned long plaintext_len, u8 *iv,
183 u8 *hash_subkey, const u8 *aad, unsigned long aad_len,
184 u8 *auth_tag, unsigned long auth_tag_len)
185{
e31ac32d
TM
186 struct crypto_aes_ctx *aes_ctx = (struct crypto_aes_ctx*)ctx;
187 if ((plaintext_len < AVX_GEN2_OPTSIZE) || (aes_ctx-> key_length != AES_KEYSIZE_128)){
d764593a
TC
188 aesni_gcm_enc(ctx, out, in, plaintext_len, iv, hash_subkey, aad,
189 aad_len, auth_tag, auth_tag_len);
190 } else {
191 aesni_gcm_precomp_avx_gen2(ctx, hash_subkey);
192 aesni_gcm_enc_avx_gen2(ctx, out, in, plaintext_len, iv, aad,
193 aad_len, auth_tag, auth_tag_len);
194 }
195}
196
197static void aesni_gcm_dec_avx(void *ctx, u8 *out,
198 const u8 *in, unsigned long ciphertext_len, u8 *iv,
199 u8 *hash_subkey, const u8 *aad, unsigned long aad_len,
200 u8 *auth_tag, unsigned long auth_tag_len)
201{
e31ac32d
TM
202 struct crypto_aes_ctx *aes_ctx = (struct crypto_aes_ctx*)ctx;
203 if ((ciphertext_len < AVX_GEN2_OPTSIZE) || (aes_ctx-> key_length != AES_KEYSIZE_128)) {
d764593a
TC
204 aesni_gcm_dec(ctx, out, in, ciphertext_len, iv, hash_subkey, aad,
205 aad_len, auth_tag, auth_tag_len);
206 } else {
207 aesni_gcm_precomp_avx_gen2(ctx, hash_subkey);
208 aesni_gcm_dec_avx_gen2(ctx, out, in, ciphertext_len, iv, aad,
209 aad_len, auth_tag, auth_tag_len);
210 }
211}
212#endif
213
214#ifdef CONFIG_AS_AVX2
215/*
216 * asmlinkage void aesni_gcm_precomp_avx_gen4()
217 * gcm_data *my_ctx_data, context data
218 * u8 *hash_subkey, the Hash sub key input. Data starts on a 16-byte boundary.
219 */
220asmlinkage void aesni_gcm_precomp_avx_gen4(void *my_ctx_data, u8 *hash_subkey);
221
222asmlinkage void aesni_gcm_enc_avx_gen4(void *ctx, u8 *out,
223 const u8 *in, unsigned long plaintext_len, u8 *iv,
224 const u8 *aad, unsigned long aad_len,
225 u8 *auth_tag, unsigned long auth_tag_len);
226
227asmlinkage void aesni_gcm_dec_avx_gen4(void *ctx, u8 *out,
228 const u8 *in, unsigned long ciphertext_len, u8 *iv,
229 const u8 *aad, unsigned long aad_len,
230 u8 *auth_tag, unsigned long auth_tag_len);
231
232static void aesni_gcm_enc_avx2(void *ctx, u8 *out,
233 const u8 *in, unsigned long plaintext_len, u8 *iv,
234 u8 *hash_subkey, const u8 *aad, unsigned long aad_len,
235 u8 *auth_tag, unsigned long auth_tag_len)
236{
e31ac32d
TM
237 struct crypto_aes_ctx *aes_ctx = (struct crypto_aes_ctx*)ctx;
238 if ((plaintext_len < AVX_GEN2_OPTSIZE) || (aes_ctx-> key_length != AES_KEYSIZE_128)) {
d764593a
TC
239 aesni_gcm_enc(ctx, out, in, plaintext_len, iv, hash_subkey, aad,
240 aad_len, auth_tag, auth_tag_len);
241 } else if (plaintext_len < AVX_GEN4_OPTSIZE) {
242 aesni_gcm_precomp_avx_gen2(ctx, hash_subkey);
243 aesni_gcm_enc_avx_gen2(ctx, out, in, plaintext_len, iv, aad,
244 aad_len, auth_tag, auth_tag_len);
245 } else {
246 aesni_gcm_precomp_avx_gen4(ctx, hash_subkey);
247 aesni_gcm_enc_avx_gen4(ctx, out, in, plaintext_len, iv, aad,
248 aad_len, auth_tag, auth_tag_len);
249 }
250}
251
252static void aesni_gcm_dec_avx2(void *ctx, u8 *out,
253 const u8 *in, unsigned long ciphertext_len, u8 *iv,
254 u8 *hash_subkey, const u8 *aad, unsigned long aad_len,
255 u8 *auth_tag, unsigned long auth_tag_len)
256{
e31ac32d
TM
257 struct crypto_aes_ctx *aes_ctx = (struct crypto_aes_ctx*)ctx;
258 if ((ciphertext_len < AVX_GEN2_OPTSIZE) || (aes_ctx-> key_length != AES_KEYSIZE_128)) {
d764593a
TC
259 aesni_gcm_dec(ctx, out, in, ciphertext_len, iv, hash_subkey,
260 aad, aad_len, auth_tag, auth_tag_len);
261 } else if (ciphertext_len < AVX_GEN4_OPTSIZE) {
262 aesni_gcm_precomp_avx_gen2(ctx, hash_subkey);
263 aesni_gcm_dec_avx_gen2(ctx, out, in, ciphertext_len, iv, aad,
264 aad_len, auth_tag, auth_tag_len);
265 } else {
266 aesni_gcm_precomp_avx_gen4(ctx, hash_subkey);
267 aesni_gcm_dec_avx_gen4(ctx, out, in, ciphertext_len, iv, aad,
268 aad_len, auth_tag, auth_tag_len);
269 }
270}
271#endif
272
273static void (*aesni_gcm_enc_tfm)(void *ctx, u8 *out,
274 const u8 *in, unsigned long plaintext_len, u8 *iv,
275 u8 *hash_subkey, const u8 *aad, unsigned long aad_len,
276 u8 *auth_tag, unsigned long auth_tag_len);
277
278static void (*aesni_gcm_dec_tfm)(void *ctx, u8 *out,
279 const u8 *in, unsigned long ciphertext_len, u8 *iv,
280 u8 *hash_subkey, const u8 *aad, unsigned long aad_len,
281 u8 *auth_tag, unsigned long auth_tag_len);
282
0bd82f5f
TS
283static inline struct
284aesni_rfc4106_gcm_ctx *aesni_rfc4106_gcm_ctx_get(struct crypto_aead *tfm)
285{
b7c89d9e
HX
286 unsigned long align = AESNI_ALIGN;
287
288 if (align <= crypto_tfm_ctx_alignment())
289 align = 1;
290 return PTR_ALIGN(crypto_aead_ctx(tfm), align);
0bd82f5f 291}
559ad0ff 292#endif
0bd82f5f 293
54b6a1bd
HY
294static inline struct crypto_aes_ctx *aes_ctx(void *raw_ctx)
295{
296 unsigned long addr = (unsigned long)raw_ctx;
297 unsigned long align = AESNI_ALIGN;
298
299 if (align <= crypto_tfm_ctx_alignment())
300 align = 1;
301 return (struct crypto_aes_ctx *)ALIGN(addr, align);
302}
303
304static int aes_set_key_common(struct crypto_tfm *tfm, void *raw_ctx,
305 const u8 *in_key, unsigned int key_len)
306{
307 struct crypto_aes_ctx *ctx = aes_ctx(raw_ctx);
308 u32 *flags = &tfm->crt_flags;
309 int err;
310
311 if (key_len != AES_KEYSIZE_128 && key_len != AES_KEYSIZE_192 &&
312 key_len != AES_KEYSIZE_256) {
313 *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
314 return -EINVAL;
315 }
316
13b79b97 317 if (!irq_fpu_usable())
54b6a1bd
HY
318 err = crypto_aes_expand_key(ctx, in_key, key_len);
319 else {
320 kernel_fpu_begin();
321 err = aesni_set_key(ctx, in_key, key_len);
322 kernel_fpu_end();
323 }
324
325 return err;
326}
327
328static int aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
329 unsigned int key_len)
330{
331 return aes_set_key_common(tfm, crypto_tfm_ctx(tfm), in_key, key_len);
332}
333
334static void aes_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
335{
336 struct crypto_aes_ctx *ctx = aes_ctx(crypto_tfm_ctx(tfm));
337
13b79b97 338 if (!irq_fpu_usable())
54b6a1bd
HY
339 crypto_aes_encrypt_x86(ctx, dst, src);
340 else {
341 kernel_fpu_begin();
342 aesni_enc(ctx, dst, src);
343 kernel_fpu_end();
344 }
345}
346
347static void aes_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
348{
349 struct crypto_aes_ctx *ctx = aes_ctx(crypto_tfm_ctx(tfm));
350
13b79b97 351 if (!irq_fpu_usable())
54b6a1bd
HY
352 crypto_aes_decrypt_x86(ctx, dst, src);
353 else {
354 kernel_fpu_begin();
355 aesni_dec(ctx, dst, src);
356 kernel_fpu_end();
357 }
358}
359
2cf4ac8b
HY
360static void __aes_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
361{
362 struct crypto_aes_ctx *ctx = aes_ctx(crypto_tfm_ctx(tfm));
363
364 aesni_enc(ctx, dst, src);
365}
366
367static void __aes_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
368{
369 struct crypto_aes_ctx *ctx = aes_ctx(crypto_tfm_ctx(tfm));
370
371 aesni_dec(ctx, dst, src);
372}
373
54b6a1bd
HY
374static int ecb_encrypt(struct blkcipher_desc *desc,
375 struct scatterlist *dst, struct scatterlist *src,
376 unsigned int nbytes)
377{
378 struct crypto_aes_ctx *ctx = aes_ctx(crypto_blkcipher_ctx(desc->tfm));
379 struct blkcipher_walk walk;
380 int err;
381
382 blkcipher_walk_init(&walk, dst, src, nbytes);
383 err = blkcipher_walk_virt(desc, &walk);
9251b64f 384 desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
54b6a1bd
HY
385
386 kernel_fpu_begin();
387 while ((nbytes = walk.nbytes)) {
388 aesni_ecb_enc(ctx, walk.dst.virt.addr, walk.src.virt.addr,
389 nbytes & AES_BLOCK_MASK);
390 nbytes &= AES_BLOCK_SIZE - 1;
391 err = blkcipher_walk_done(desc, &walk, nbytes);
392 }
393 kernel_fpu_end();
394
395 return err;
396}
397
398static int ecb_decrypt(struct blkcipher_desc *desc,
399 struct scatterlist *dst, struct scatterlist *src,
400 unsigned int nbytes)
401{
402 struct crypto_aes_ctx *ctx = aes_ctx(crypto_blkcipher_ctx(desc->tfm));
403 struct blkcipher_walk walk;
404 int err;
405
406 blkcipher_walk_init(&walk, dst, src, nbytes);
407 err = blkcipher_walk_virt(desc, &walk);
9251b64f 408 desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
54b6a1bd
HY
409
410 kernel_fpu_begin();
411 while ((nbytes = walk.nbytes)) {
412 aesni_ecb_dec(ctx, walk.dst.virt.addr, walk.src.virt.addr,
413 nbytes & AES_BLOCK_MASK);
414 nbytes &= AES_BLOCK_SIZE - 1;
415 err = blkcipher_walk_done(desc, &walk, nbytes);
416 }
417 kernel_fpu_end();
418
419 return err;
420}
421
54b6a1bd
HY
422static int cbc_encrypt(struct blkcipher_desc *desc,
423 struct scatterlist *dst, struct scatterlist *src,
424 unsigned int nbytes)
425{
426 struct crypto_aes_ctx *ctx = aes_ctx(crypto_blkcipher_ctx(desc->tfm));
427 struct blkcipher_walk walk;
428 int err;
429
430 blkcipher_walk_init(&walk, dst, src, nbytes);
431 err = blkcipher_walk_virt(desc, &walk);
9251b64f 432 desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
54b6a1bd
HY
433
434 kernel_fpu_begin();
435 while ((nbytes = walk.nbytes)) {
436 aesni_cbc_enc(ctx, walk.dst.virt.addr, walk.src.virt.addr,
437 nbytes & AES_BLOCK_MASK, walk.iv);
438 nbytes &= AES_BLOCK_SIZE - 1;
439 err = blkcipher_walk_done(desc, &walk, nbytes);
440 }
441 kernel_fpu_end();
442
443 return err;
444}
445
446static int cbc_decrypt(struct blkcipher_desc *desc,
447 struct scatterlist *dst, struct scatterlist *src,
448 unsigned int nbytes)
449{
450 struct crypto_aes_ctx *ctx = aes_ctx(crypto_blkcipher_ctx(desc->tfm));
451 struct blkcipher_walk walk;
452 int err;
453
454 blkcipher_walk_init(&walk, dst, src, nbytes);
455 err = blkcipher_walk_virt(desc, &walk);
9251b64f 456 desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
54b6a1bd
HY
457
458 kernel_fpu_begin();
459 while ((nbytes = walk.nbytes)) {
460 aesni_cbc_dec(ctx, walk.dst.virt.addr, walk.src.virt.addr,
461 nbytes & AES_BLOCK_MASK, walk.iv);
462 nbytes &= AES_BLOCK_SIZE - 1;
463 err = blkcipher_walk_done(desc, &walk, nbytes);
464 }
465 kernel_fpu_end();
466
467 return err;
468}
469
0d258efb 470#ifdef CONFIG_X86_64
12387a46
HY
471static void ctr_crypt_final(struct crypto_aes_ctx *ctx,
472 struct blkcipher_walk *walk)
473{
474 u8 *ctrblk = walk->iv;
475 u8 keystream[AES_BLOCK_SIZE];
476 u8 *src = walk->src.virt.addr;
477 u8 *dst = walk->dst.virt.addr;
478 unsigned int nbytes = walk->nbytes;
479
480 aesni_enc(ctx, keystream, ctrblk);
481 crypto_xor(keystream, src, nbytes);
482 memcpy(dst, keystream, nbytes);
483 crypto_inc(ctrblk, AES_BLOCK_SIZE);
484}
485
5cfed7b3 486#ifdef CONFIG_AS_AVX
22cddcc7 487static void aesni_ctr_enc_avx_tfm(struct crypto_aes_ctx *ctx, u8 *out,
488 const u8 *in, unsigned int len, u8 *iv)
489{
490 /*
491 * based on key length, override with the by8 version
492 * of ctr mode encryption/decryption for improved performance
493 * aes_set_key_common() ensures that key length is one of
494 * {128,192,256}
495 */
496 if (ctx->key_length == AES_KEYSIZE_128)
497 aes_ctr_enc_128_avx_by8(in, iv, (void *)ctx, out, len);
498 else if (ctx->key_length == AES_KEYSIZE_192)
499 aes_ctr_enc_192_avx_by8(in, iv, (void *)ctx, out, len);
500 else
501 aes_ctr_enc_256_avx_by8(in, iv, (void *)ctx, out, len);
502}
503#endif
504
12387a46
HY
505static int ctr_crypt(struct blkcipher_desc *desc,
506 struct scatterlist *dst, struct scatterlist *src,
507 unsigned int nbytes)
508{
509 struct crypto_aes_ctx *ctx = aes_ctx(crypto_blkcipher_ctx(desc->tfm));
510 struct blkcipher_walk walk;
511 int err;
512
513 blkcipher_walk_init(&walk, dst, src, nbytes);
514 err = blkcipher_walk_virt_block(desc, &walk, AES_BLOCK_SIZE);
515 desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
516
517 kernel_fpu_begin();
518 while ((nbytes = walk.nbytes) >= AES_BLOCK_SIZE) {
22cddcc7 519 aesni_ctr_enc_tfm(ctx, walk.dst.virt.addr, walk.src.virt.addr,
e31ac32d 520 nbytes & AES_BLOCK_MASK, walk.iv);
12387a46
HY
521 nbytes &= AES_BLOCK_SIZE - 1;
522 err = blkcipher_walk_done(desc, &walk, nbytes);
523 }
524 if (walk.nbytes) {
525 ctr_crypt_final(ctx, &walk);
526 err = blkcipher_walk_done(desc, &walk, 0);
527 }
528 kernel_fpu_end();
529
530 return err;
531}
0d258efb 532#endif
12387a46 533
54b6a1bd
HY
534static int ablk_ecb_init(struct crypto_tfm *tfm)
535{
ef45b834 536 return ablk_init_common(tfm, "__driver-ecb-aes-aesni");
54b6a1bd
HY
537}
538
54b6a1bd
HY
539static int ablk_cbc_init(struct crypto_tfm *tfm)
540{
ef45b834 541 return ablk_init_common(tfm, "__driver-cbc-aes-aesni");
54b6a1bd
HY
542}
543
0d258efb 544#ifdef CONFIG_X86_64
2cf4ac8b
HY
545static int ablk_ctr_init(struct crypto_tfm *tfm)
546{
ef45b834 547 return ablk_init_common(tfm, "__driver-ctr-aes-aesni");
2cf4ac8b
HY
548}
549
0d258efb 550#endif
2cf4ac8b 551
304576a7 552#if IS_ENABLED(CONFIG_CRYPTO_PCBC)
2cf4ac8b
HY
553static int ablk_pcbc_init(struct crypto_tfm *tfm)
554{
ef45b834 555 return ablk_init_common(tfm, "fpu(pcbc(__driver-aes-aesni))");
2cf4ac8b 556}
2cf4ac8b
HY
557#endif
558
023af608 559static void lrw_xts_encrypt_callback(void *ctx, u8 *blks, unsigned int nbytes)
2cf4ac8b 560{
023af608
JK
561 aesni_ecb_enc(ctx, blks, blks, nbytes);
562}
563
564static void lrw_xts_decrypt_callback(void *ctx, u8 *blks, unsigned int nbytes)
565{
566 aesni_ecb_dec(ctx, blks, blks, nbytes);
567}
568
569static int lrw_aesni_setkey(struct crypto_tfm *tfm, const u8 *key,
570 unsigned int keylen)
571{
572 struct aesni_lrw_ctx *ctx = crypto_tfm_ctx(tfm);
573 int err;
574
575 err = aes_set_key_common(tfm, ctx->raw_aes_ctx, key,
576 keylen - AES_BLOCK_SIZE);
577 if (err)
578 return err;
579
580 return lrw_init_table(&ctx->lrw_table, key + keylen - AES_BLOCK_SIZE);
581}
582
583static void lrw_aesni_exit_tfm(struct crypto_tfm *tfm)
584{
585 struct aesni_lrw_ctx *ctx = crypto_tfm_ctx(tfm);
586
587 lrw_free_table(&ctx->lrw_table);
588}
589
590static int lrw_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
591 struct scatterlist *src, unsigned int nbytes)
592{
593 struct aesni_lrw_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
594 be128 buf[8];
595 struct lrw_crypt_req req = {
596 .tbuf = buf,
597 .tbuflen = sizeof(buf),
598
599 .table_ctx = &ctx->lrw_table,
600 .crypt_ctx = aes_ctx(ctx->raw_aes_ctx),
601 .crypt_fn = lrw_xts_encrypt_callback,
602 };
603 int ret;
604
605 desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
606
607 kernel_fpu_begin();
608 ret = lrw_crypt(desc, dst, src, nbytes, &req);
609 kernel_fpu_end();
610
611 return ret;
612}
613
614static int lrw_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
615 struct scatterlist *src, unsigned int nbytes)
616{
617 struct aesni_lrw_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
618 be128 buf[8];
619 struct lrw_crypt_req req = {
620 .tbuf = buf,
621 .tbuflen = sizeof(buf),
622
623 .table_ctx = &ctx->lrw_table,
624 .crypt_ctx = aes_ctx(ctx->raw_aes_ctx),
625 .crypt_fn = lrw_xts_decrypt_callback,
626 };
627 int ret;
628
629 desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
630
631 kernel_fpu_begin();
632 ret = lrw_crypt(desc, dst, src, nbytes, &req);
633 kernel_fpu_end();
634
635 return ret;
636}
637
638static int xts_aesni_setkey(struct crypto_tfm *tfm, const u8 *key,
639 unsigned int keylen)
640{
641 struct aesni_xts_ctx *ctx = crypto_tfm_ctx(tfm);
642 u32 *flags = &tfm->crt_flags;
643 int err;
644
645 /* key consists of keys of equal size concatenated, therefore
646 * the length must be even
647 */
648 if (keylen % 2) {
649 *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
650 return -EINVAL;
651 }
652
653 /* first half of xts-key is for crypt */
654 err = aes_set_key_common(tfm, ctx->raw_crypt_ctx, key, keylen / 2);
655 if (err)
656 return err;
657
658 /* second half of xts-key is for tweak */
659 return aes_set_key_common(tfm, ctx->raw_tweak_ctx, key + keylen / 2,
660 keylen / 2);
661}
662
663
32bec973
JK
664static void aesni_xts_tweak(void *ctx, u8 *out, const u8 *in)
665{
666 aesni_enc(ctx, out, in);
667}
668
c456a9cd
JK
669#ifdef CONFIG_X86_64
670
671static void aesni_xts_enc(void *ctx, u128 *dst, const u128 *src, le128 *iv)
672{
673 glue_xts_crypt_128bit_one(ctx, dst, src, iv, GLUE_FUNC_CAST(aesni_enc));
674}
675
676static void aesni_xts_dec(void *ctx, u128 *dst, const u128 *src, le128 *iv)
677{
678 glue_xts_crypt_128bit_one(ctx, dst, src, iv, GLUE_FUNC_CAST(aesni_dec));
679}
680
681static void aesni_xts_enc8(void *ctx, u128 *dst, const u128 *src, le128 *iv)
682{
683 aesni_xts_crypt8(ctx, (u8 *)dst, (const u8 *)src, true, (u8 *)iv);
684}
685
686static void aesni_xts_dec8(void *ctx, u128 *dst, const u128 *src, le128 *iv)
687{
688 aesni_xts_crypt8(ctx, (u8 *)dst, (const u8 *)src, false, (u8 *)iv);
689}
690
691static const struct common_glue_ctx aesni_enc_xts = {
692 .num_funcs = 2,
693 .fpu_blocks_limit = 1,
694
695 .funcs = { {
696 .num_blocks = 8,
697 .fn_u = { .xts = GLUE_XTS_FUNC_CAST(aesni_xts_enc8) }
698 }, {
699 .num_blocks = 1,
700 .fn_u = { .xts = GLUE_XTS_FUNC_CAST(aesni_xts_enc) }
701 } }
702};
703
704static const struct common_glue_ctx aesni_dec_xts = {
705 .num_funcs = 2,
706 .fpu_blocks_limit = 1,
707
708 .funcs = { {
709 .num_blocks = 8,
710 .fn_u = { .xts = GLUE_XTS_FUNC_CAST(aesni_xts_dec8) }
711 }, {
712 .num_blocks = 1,
713 .fn_u = { .xts = GLUE_XTS_FUNC_CAST(aesni_xts_dec) }
714 } }
715};
716
717static int xts_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
718 struct scatterlist *src, unsigned int nbytes)
719{
720 struct aesni_xts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
721
722 return glue_xts_crypt_128bit(&aesni_enc_xts, desc, dst, src, nbytes,
723 XTS_TWEAK_CAST(aesni_xts_tweak),
724 aes_ctx(ctx->raw_tweak_ctx),
725 aes_ctx(ctx->raw_crypt_ctx));
726}
727
728static int xts_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
729 struct scatterlist *src, unsigned int nbytes)
730{
731 struct aesni_xts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
732
733 return glue_xts_crypt_128bit(&aesni_dec_xts, desc, dst, src, nbytes,
734 XTS_TWEAK_CAST(aesni_xts_tweak),
735 aes_ctx(ctx->raw_tweak_ctx),
736 aes_ctx(ctx->raw_crypt_ctx));
737}
738
739#else
740
023af608
JK
741static int xts_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
742 struct scatterlist *src, unsigned int nbytes)
743{
744 struct aesni_xts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
745 be128 buf[8];
746 struct xts_crypt_req req = {
747 .tbuf = buf,
748 .tbuflen = sizeof(buf),
749
750 .tweak_ctx = aes_ctx(ctx->raw_tweak_ctx),
32bec973 751 .tweak_fn = aesni_xts_tweak,
023af608
JK
752 .crypt_ctx = aes_ctx(ctx->raw_crypt_ctx),
753 .crypt_fn = lrw_xts_encrypt_callback,
754 };
755 int ret;
756
757 desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
758
759 kernel_fpu_begin();
760 ret = xts_crypt(desc, dst, src, nbytes, &req);
761 kernel_fpu_end();
762
763 return ret;
764}
765
766static int xts_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
767 struct scatterlist *src, unsigned int nbytes)
768{
769 struct aesni_xts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
770 be128 buf[8];
771 struct xts_crypt_req req = {
772 .tbuf = buf,
773 .tbuflen = sizeof(buf),
774
775 .tweak_ctx = aes_ctx(ctx->raw_tweak_ctx),
32bec973 776 .tweak_fn = aesni_xts_tweak,
023af608
JK
777 .crypt_ctx = aes_ctx(ctx->raw_crypt_ctx),
778 .crypt_fn = lrw_xts_decrypt_callback,
779 };
780 int ret;
781
782 desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
783
784 kernel_fpu_begin();
785 ret = xts_crypt(desc, dst, src, nbytes, &req);
786 kernel_fpu_end();
787
788 return ret;
2cf4ac8b 789}
2cf4ac8b 790
c456a9cd
JK
791#endif
792
559ad0ff 793#ifdef CONFIG_X86_64
af05b300 794static int rfc4106_init(struct crypto_aead *aead)
0bd82f5f
TS
795{
796 struct cryptd_aead *cryptd_tfm;
af05b300
HX
797 struct cryptd_aead **ctx = crypto_aead_ctx(aead);
798
eabdc320
SM
799 cryptd_tfm = cryptd_alloc_aead("__driver-gcm-aes-aesni",
800 CRYPTO_ALG_INTERNAL,
801 CRYPTO_ALG_INTERNAL);
0bd82f5f
TS
802 if (IS_ERR(cryptd_tfm))
803 return PTR_ERR(cryptd_tfm);
60af520c 804
af05b300
HX
805 *ctx = cryptd_tfm;
806 crypto_aead_set_reqsize(
807 aead,
a5a2b4da
HX
808 sizeof(struct aead_request) +
809 crypto_aead_reqsize(&cryptd_tfm->base));
0bd82f5f
TS
810 return 0;
811}
812
af05b300 813static void rfc4106_exit(struct crypto_aead *aead)
0bd82f5f 814{
af05b300
HX
815 struct cryptd_aead **ctx = crypto_aead_ctx(aead);
816
817 cryptd_free_aead(*ctx);
0bd82f5f
TS
818}
819
820static void
821rfc4106_set_hash_subkey_done(struct crypto_async_request *req, int err)
822{
823 struct aesni_gcm_set_hash_subkey_result *result = req->data;
824
825 if (err == -EINPROGRESS)
826 return;
827 result->err = err;
828 complete(&result->completion);
829}
830
831static int
832rfc4106_set_hash_subkey(u8 *hash_subkey, const u8 *key, unsigned int key_len)
833{
834 struct crypto_ablkcipher *ctr_tfm;
835 struct ablkcipher_request *req;
836 int ret = -EINVAL;
837 struct aesni_hash_subkey_req_data *req_data;
838
839 ctr_tfm = crypto_alloc_ablkcipher("ctr(aes)", 0, 0);
840 if (IS_ERR(ctr_tfm))
841 return PTR_ERR(ctr_tfm);
842
0bd82f5f 843 ret = crypto_ablkcipher_setkey(ctr_tfm, key, key_len);
7efd95f6 844 if (ret)
fc9044e2 845 goto out_free_ablkcipher;
0bd82f5f 846
fc9044e2 847 ret = -ENOMEM;
0bd82f5f 848 req = ablkcipher_request_alloc(ctr_tfm, GFP_KERNEL);
fc9044e2 849 if (!req)
7efd95f6 850 goto out_free_ablkcipher;
0bd82f5f
TS
851
852 req_data = kmalloc(sizeof(*req_data), GFP_KERNEL);
fc9044e2 853 if (!req_data)
7efd95f6 854 goto out_free_request;
fc9044e2 855
0bd82f5f
TS
856 memset(req_data->iv, 0, sizeof(req_data->iv));
857
858 /* Clear the data in the hash sub key container to zero.*/
859 /* We want to cipher all zeros to create the hash sub key. */
860 memset(hash_subkey, 0, RFC4106_HASH_SUBKEY_SIZE);
861
862 init_completion(&req_data->result.completion);
863 sg_init_one(&req_data->sg, hash_subkey, RFC4106_HASH_SUBKEY_SIZE);
864 ablkcipher_request_set_tfm(req, ctr_tfm);
865 ablkcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_SLEEP |
866 CRYPTO_TFM_REQ_MAY_BACKLOG,
867 rfc4106_set_hash_subkey_done,
868 &req_data->result);
869
870 ablkcipher_request_set_crypt(req, &req_data->sg,
871 &req_data->sg, RFC4106_HASH_SUBKEY_SIZE, req_data->iv);
872
873 ret = crypto_ablkcipher_encrypt(req);
874 if (ret == -EINPROGRESS || ret == -EBUSY) {
875 ret = wait_for_completion_interruptible
876 (&req_data->result.completion);
877 if (!ret)
878 ret = req_data->result.err;
879 }
fc9044e2 880 kfree(req_data);
7efd95f6 881out_free_request:
0bd82f5f 882 ablkcipher_request_free(req);
7efd95f6 883out_free_ablkcipher:
0bd82f5f
TS
884 crypto_free_ablkcipher(ctr_tfm);
885 return ret;
886}
887
81e397d9
TS
888static int common_rfc4106_set_key(struct crypto_aead *aead, const u8 *key,
889 unsigned int key_len)
0bd82f5f 890{
81e397d9 891 struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(aead);
0bd82f5f
TS
892
893 if (key_len < 4) {
b7c89d9e 894 crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
0bd82f5f
TS
895 return -EINVAL;
896 }
897 /*Account for 4 byte nonce at the end.*/
898 key_len -= 4;
0bd82f5f
TS
899
900 memcpy(ctx->nonce, key + key_len, sizeof(ctx->nonce));
0bd82f5f 901
b7c89d9e
HX
902 return aes_set_key_common(crypto_aead_tfm(aead),
903 &ctx->aes_key_expanded, key, key_len) ?:
904 rfc4106_set_hash_subkey(ctx->hash_subkey, key, key_len);
0bd82f5f
TS
905}
906
81e397d9
TS
907static int rfc4106_set_key(struct crypto_aead *parent, const u8 *key,
908 unsigned int key_len)
0bd82f5f 909{
af05b300
HX
910 struct cryptd_aead **ctx = crypto_aead_ctx(parent);
911 struct cryptd_aead *cryptd_tfm = *ctx;
0bd82f5f 912
af05b300 913 return crypto_aead_setkey(&cryptd_tfm->base, key, key_len);
81e397d9
TS
914}
915
916static int common_rfc4106_set_authsize(struct crypto_aead *aead,
917 unsigned int authsize)
918{
0bd82f5f
TS
919 switch (authsize) {
920 case 8:
921 case 12:
922 case 16:
923 break;
924 default:
925 return -EINVAL;
926 }
b7c89d9e 927
0bd82f5f
TS
928 return 0;
929}
930
81e397d9
TS
931/* This is the Integrity Check Value (aka the authentication tag length and can
932 * be 8, 12 or 16 bytes long. */
933static int rfc4106_set_authsize(struct crypto_aead *parent,
934 unsigned int authsize)
0bd82f5f 935{
af05b300
HX
936 struct cryptd_aead **ctx = crypto_aead_ctx(parent);
937 struct cryptd_aead *cryptd_tfm = *ctx;
0bd82f5f 938
af05b300 939 return crypto_aead_setauthsize(&cryptd_tfm->base, authsize);
0bd82f5f
TS
940}
941
b7c89d9e 942static int helper_rfc4106_encrypt(struct aead_request *req)
0bd82f5f
TS
943{
944 u8 one_entry_in_sg = 0;
945 u8 *src, *dst, *assoc;
946 __be32 counter = cpu_to_be32(1);
947 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
948 struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(tfm);
949 void *aes_ctx = &(ctx->aes_key_expanded);
950 unsigned long auth_tag_len = crypto_aead_authsize(tfm);
b7c89d9e 951 u8 iv[16] __attribute__ ((__aligned__(AESNI_ALIGN)));
0bd82f5f 952 struct scatter_walk src_sg_walk;
0bd82f5f
TS
953 struct scatter_walk dst_sg_walk;
954 unsigned int i;
955
956 /* Assuming we are supporting rfc4106 64-bit extended */
957 /* sequence numbers We need to have the AAD length equal */
958 /* to 8 or 12 bytes */
959 if (unlikely(req->assoclen != 8 && req->assoclen != 12))
960 return -EINVAL;
e31ac32d 961
0bd82f5f
TS
962 /* IV below built */
963 for (i = 0; i < 4; i++)
964 *(iv+i) = ctx->nonce[i];
965 for (i = 0; i < 8; i++)
966 *(iv+4+i) = req->iv[i];
967 *((__be32 *)(iv+12)) = counter;
968
b7c89d9e
HX
969 if (sg_is_last(req->src) &&
970 req->src->offset + req->src->length <= PAGE_SIZE &&
971 sg_is_last(req->dst) &&
972 req->dst->offset + req->dst->length <= PAGE_SIZE) {
0bd82f5f
TS
973 one_entry_in_sg = 1;
974 scatterwalk_start(&src_sg_walk, req->src);
b7c89d9e
HX
975 assoc = scatterwalk_map(&src_sg_walk);
976 src = assoc + req->assoclen;
0bd82f5f
TS
977 dst = src;
978 if (unlikely(req->src != req->dst)) {
979 scatterwalk_start(&dst_sg_walk, req->dst);
b7c89d9e 980 dst = scatterwalk_map(&dst_sg_walk) + req->assoclen;
0bd82f5f 981 }
0bd82f5f
TS
982 } else {
983 /* Allocate memory for src, dst, assoc */
b7c89d9e 984 assoc = kmalloc(req->cryptlen + auth_tag_len + req->assoclen,
0bd82f5f 985 GFP_ATOMIC);
b7c89d9e 986 if (unlikely(!assoc))
0bd82f5f 987 return -ENOMEM;
b7c89d9e
HX
988 scatterwalk_map_and_copy(assoc, req->src, 0,
989 req->assoclen + req->cryptlen, 0);
990 src = assoc + req->assoclen;
0bd82f5f
TS
991 dst = src;
992 }
993
b7c89d9e 994 kernel_fpu_begin();
d764593a 995 aesni_gcm_enc_tfm(aes_ctx, dst, src, (unsigned long)req->cryptlen, iv,
0bd82f5f
TS
996 ctx->hash_subkey, assoc, (unsigned long)req->assoclen, dst
997 + ((unsigned long)req->cryptlen), auth_tag_len);
b7c89d9e 998 kernel_fpu_end();
0bd82f5f
TS
999
1000 /* The authTag (aka the Integrity Check Value) needs to be written
1001 * back to the packet. */
1002 if (one_entry_in_sg) {
1003 if (unlikely(req->src != req->dst)) {
b7c89d9e
HX
1004 scatterwalk_unmap(dst - req->assoclen);
1005 scatterwalk_advance(&dst_sg_walk, req->dst->length);
1006 scatterwalk_done(&dst_sg_walk, 1, 0);
0bd82f5f 1007 }
8fd75e12 1008 scatterwalk_unmap(assoc);
b7c89d9e
HX
1009 scatterwalk_advance(&src_sg_walk, req->src->length);
1010 scatterwalk_done(&src_sg_walk, req->src == req->dst, 0);
0bd82f5f 1011 } else {
b7c89d9e
HX
1012 scatterwalk_map_and_copy(dst, req->dst, req->assoclen,
1013 req->cryptlen + auth_tag_len, 1);
1014 kfree(assoc);
0bd82f5f
TS
1015 }
1016 return 0;
1017}
1018
b7c89d9e 1019static int helper_rfc4106_decrypt(struct aead_request *req)
0bd82f5f
TS
1020{
1021 u8 one_entry_in_sg = 0;
1022 u8 *src, *dst, *assoc;
1023 unsigned long tempCipherLen = 0;
1024 __be32 counter = cpu_to_be32(1);
1025 int retval = 0;
1026 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1027 struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(tfm);
1028 void *aes_ctx = &(ctx->aes_key_expanded);
1029 unsigned long auth_tag_len = crypto_aead_authsize(tfm);
b7c89d9e
HX
1030 u8 iv[16] __attribute__ ((__aligned__(AESNI_ALIGN)));
1031 u8 authTag[16];
0bd82f5f 1032 struct scatter_walk src_sg_walk;
0bd82f5f
TS
1033 struct scatter_walk dst_sg_walk;
1034 unsigned int i;
1035
b7c89d9e 1036 if (unlikely(req->assoclen != 8 && req->assoclen != 12))
0bd82f5f 1037 return -EINVAL;
e31ac32d 1038
0bd82f5f
TS
1039 /* Assuming we are supporting rfc4106 64-bit extended */
1040 /* sequence numbers We need to have the AAD length */
1041 /* equal to 8 or 12 bytes */
1042
1043 tempCipherLen = (unsigned long)(req->cryptlen - auth_tag_len);
1044 /* IV below built */
1045 for (i = 0; i < 4; i++)
1046 *(iv+i) = ctx->nonce[i];
1047 for (i = 0; i < 8; i++)
1048 *(iv+4+i) = req->iv[i];
1049 *((__be32 *)(iv+12)) = counter;
1050
b7c89d9e
HX
1051 if (sg_is_last(req->src) &&
1052 req->src->offset + req->src->length <= PAGE_SIZE &&
1053 sg_is_last(req->dst) &&
1054 req->dst->offset + req->dst->length <= PAGE_SIZE) {
0bd82f5f
TS
1055 one_entry_in_sg = 1;
1056 scatterwalk_start(&src_sg_walk, req->src);
b7c89d9e
HX
1057 assoc = scatterwalk_map(&src_sg_walk);
1058 src = assoc + req->assoclen;
0bd82f5f
TS
1059 dst = src;
1060 if (unlikely(req->src != req->dst)) {
1061 scatterwalk_start(&dst_sg_walk, req->dst);
b7c89d9e 1062 dst = scatterwalk_map(&dst_sg_walk) + req->assoclen;
0bd82f5f
TS
1063 }
1064
1065 } else {
1066 /* Allocate memory for src, dst, assoc */
b7c89d9e
HX
1067 assoc = kmalloc(req->cryptlen + req->assoclen, GFP_ATOMIC);
1068 if (!assoc)
0bd82f5f 1069 return -ENOMEM;
b7c89d9e
HX
1070 scatterwalk_map_and_copy(assoc, req->src, 0,
1071 req->assoclen + req->cryptlen, 0);
1072 src = assoc + req->assoclen;
0bd82f5f
TS
1073 dst = src;
1074 }
1075
b7c89d9e 1076 kernel_fpu_begin();
d764593a 1077 aesni_gcm_dec_tfm(aes_ctx, dst, src, tempCipherLen, iv,
0bd82f5f
TS
1078 ctx->hash_subkey, assoc, (unsigned long)req->assoclen,
1079 authTag, auth_tag_len);
b7c89d9e 1080 kernel_fpu_end();
0bd82f5f
TS
1081
1082 /* Compare generated tag with passed in tag. */
fed28611 1083 retval = crypto_memneq(src + tempCipherLen, authTag, auth_tag_len) ?
0bd82f5f
TS
1084 -EBADMSG : 0;
1085
1086 if (one_entry_in_sg) {
1087 if (unlikely(req->src != req->dst)) {
b7c89d9e
HX
1088 scatterwalk_unmap(dst - req->assoclen);
1089 scatterwalk_advance(&dst_sg_walk, req->dst->length);
1090 scatterwalk_done(&dst_sg_walk, 1, 0);
0bd82f5f 1091 }
8fd75e12 1092 scatterwalk_unmap(assoc);
b7c89d9e
HX
1093 scatterwalk_advance(&src_sg_walk, req->src->length);
1094 scatterwalk_done(&src_sg_walk, req->src == req->dst, 0);
0bd82f5f 1095 } else {
b7c89d9e
HX
1096 scatterwalk_map_and_copy(dst, req->dst, req->assoclen,
1097 tempCipherLen, 1);
1098 kfree(assoc);
0bd82f5f
TS
1099 }
1100 return retval;
1101}
81e397d9
TS
1102
1103static int rfc4106_encrypt(struct aead_request *req)
1104{
81e397d9 1105 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
af05b300
HX
1106 struct cryptd_aead **ctx = crypto_aead_ctx(tfm);
1107 struct cryptd_aead *cryptd_tfm = *ctx;
1108 struct aead_request *subreq = aead_request_ctx(req);
81e397d9 1109
af05b300
HX
1110 aead_request_set_tfm(subreq, irq_fpu_usable() ?
1111 cryptd_aead_child(cryptd_tfm) :
1112 &cryptd_tfm->base);
81e397d9 1113
af05b300
HX
1114 aead_request_set_callback(subreq, req->base.flags,
1115 req->base.complete, req->base.data);
1116 aead_request_set_crypt(subreq, req->src, req->dst,
1117 req->cryptlen, req->iv);
1118 aead_request_set_ad(subreq, req->assoclen);
1119
1120 return crypto_aead_encrypt(subreq);
81e397d9
TS
1121}
1122
1123static int rfc4106_decrypt(struct aead_request *req)
1124{
81e397d9 1125 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
af05b300
HX
1126 struct cryptd_aead **ctx = crypto_aead_ctx(tfm);
1127 struct cryptd_aead *cryptd_tfm = *ctx;
1128 struct aead_request *subreq = aead_request_ctx(req);
81e397d9 1129
af05b300
HX
1130 aead_request_set_tfm(subreq, irq_fpu_usable() ?
1131 cryptd_aead_child(cryptd_tfm) :
1132 &cryptd_tfm->base);
81e397d9 1133
af05b300
HX
1134 aead_request_set_callback(subreq, req->base.flags,
1135 req->base.complete, req->base.data);
1136 aead_request_set_crypt(subreq, req->src, req->dst,
1137 req->cryptlen, req->iv);
1138 aead_request_set_ad(subreq, req->assoclen);
81e397d9 1139
af05b300 1140 return crypto_aead_decrypt(subreq);
81e397d9 1141}
fa46ccb8 1142#endif
0bd82f5f 1143
fa46ccb8
JK
1144static struct crypto_alg aesni_algs[] = { {
1145 .cra_name = "aes",
1146 .cra_driver_name = "aes-aesni",
1147 .cra_priority = 300,
1148 .cra_flags = CRYPTO_ALG_TYPE_CIPHER,
1149 .cra_blocksize = AES_BLOCK_SIZE,
1150 .cra_ctxsize = sizeof(struct crypto_aes_ctx) +
1151 AESNI_ALIGN - 1,
1152 .cra_alignmask = 0,
1153 .cra_module = THIS_MODULE,
1154 .cra_u = {
1155 .cipher = {
1156 .cia_min_keysize = AES_MIN_KEY_SIZE,
1157 .cia_max_keysize = AES_MAX_KEY_SIZE,
1158 .cia_setkey = aes_set_key,
1159 .cia_encrypt = aes_encrypt,
1160 .cia_decrypt = aes_decrypt
1161 }
1162 }
1163}, {
1164 .cra_name = "__aes-aesni",
1165 .cra_driver_name = "__driver-aes-aesni",
1166 .cra_priority = 0,
eabdc320 1167 .cra_flags = CRYPTO_ALG_TYPE_CIPHER | CRYPTO_ALG_INTERNAL,
fa46ccb8
JK
1168 .cra_blocksize = AES_BLOCK_SIZE,
1169 .cra_ctxsize = sizeof(struct crypto_aes_ctx) +
1170 AESNI_ALIGN - 1,
1171 .cra_alignmask = 0,
1172 .cra_module = THIS_MODULE,
1173 .cra_u = {
1174 .cipher = {
1175 .cia_min_keysize = AES_MIN_KEY_SIZE,
1176 .cia_max_keysize = AES_MAX_KEY_SIZE,
1177 .cia_setkey = aes_set_key,
1178 .cia_encrypt = __aes_encrypt,
1179 .cia_decrypt = __aes_decrypt
1180 }
1181 }
1182}, {
1183 .cra_name = "__ecb-aes-aesni",
1184 .cra_driver_name = "__driver-ecb-aes-aesni",
1185 .cra_priority = 0,
eabdc320
SM
1186 .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER |
1187 CRYPTO_ALG_INTERNAL,
fa46ccb8
JK
1188 .cra_blocksize = AES_BLOCK_SIZE,
1189 .cra_ctxsize = sizeof(struct crypto_aes_ctx) +
1190 AESNI_ALIGN - 1,
1191 .cra_alignmask = 0,
1192 .cra_type = &crypto_blkcipher_type,
1193 .cra_module = THIS_MODULE,
1194 .cra_u = {
1195 .blkcipher = {
1196 .min_keysize = AES_MIN_KEY_SIZE,
1197 .max_keysize = AES_MAX_KEY_SIZE,
1198 .setkey = aes_set_key,
1199 .encrypt = ecb_encrypt,
1200 .decrypt = ecb_decrypt,
1201 },
1202 },
1203}, {
1204 .cra_name = "__cbc-aes-aesni",
1205 .cra_driver_name = "__driver-cbc-aes-aesni",
1206 .cra_priority = 0,
eabdc320
SM
1207 .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER |
1208 CRYPTO_ALG_INTERNAL,
fa46ccb8
JK
1209 .cra_blocksize = AES_BLOCK_SIZE,
1210 .cra_ctxsize = sizeof(struct crypto_aes_ctx) +
1211 AESNI_ALIGN - 1,
1212 .cra_alignmask = 0,
1213 .cra_type = &crypto_blkcipher_type,
1214 .cra_module = THIS_MODULE,
1215 .cra_u = {
1216 .blkcipher = {
1217 .min_keysize = AES_MIN_KEY_SIZE,
1218 .max_keysize = AES_MAX_KEY_SIZE,
1219 .setkey = aes_set_key,
1220 .encrypt = cbc_encrypt,
1221 .decrypt = cbc_decrypt,
1222 },
1223 },
1224}, {
1225 .cra_name = "ecb(aes)",
1226 .cra_driver_name = "ecb-aes-aesni",
1227 .cra_priority = 400,
1228 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
1229 .cra_blocksize = AES_BLOCK_SIZE,
a9629d71 1230 .cra_ctxsize = sizeof(struct async_helper_ctx),
fa46ccb8
JK
1231 .cra_alignmask = 0,
1232 .cra_type = &crypto_ablkcipher_type,
1233 .cra_module = THIS_MODULE,
1234 .cra_init = ablk_ecb_init,
1235 .cra_exit = ablk_exit,
1236 .cra_u = {
1237 .ablkcipher = {
1238 .min_keysize = AES_MIN_KEY_SIZE,
1239 .max_keysize = AES_MAX_KEY_SIZE,
1240 .setkey = ablk_set_key,
1241 .encrypt = ablk_encrypt,
1242 .decrypt = ablk_decrypt,
1243 },
1244 },
1245}, {
1246 .cra_name = "cbc(aes)",
1247 .cra_driver_name = "cbc-aes-aesni",
1248 .cra_priority = 400,
1249 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
1250 .cra_blocksize = AES_BLOCK_SIZE,
a9629d71 1251 .cra_ctxsize = sizeof(struct async_helper_ctx),
fa46ccb8
JK
1252 .cra_alignmask = 0,
1253 .cra_type = &crypto_ablkcipher_type,
1254 .cra_module = THIS_MODULE,
1255 .cra_init = ablk_cbc_init,
1256 .cra_exit = ablk_exit,
1257 .cra_u = {
1258 .ablkcipher = {
1259 .min_keysize = AES_MIN_KEY_SIZE,
1260 .max_keysize = AES_MAX_KEY_SIZE,
1261 .ivsize = AES_BLOCK_SIZE,
1262 .setkey = ablk_set_key,
1263 .encrypt = ablk_encrypt,
1264 .decrypt = ablk_decrypt,
1265 },
1266 },
1267#ifdef CONFIG_X86_64
1268}, {
1269 .cra_name = "__ctr-aes-aesni",
1270 .cra_driver_name = "__driver-ctr-aes-aesni",
1271 .cra_priority = 0,
eabdc320
SM
1272 .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER |
1273 CRYPTO_ALG_INTERNAL,
fa46ccb8
JK
1274 .cra_blocksize = 1,
1275 .cra_ctxsize = sizeof(struct crypto_aes_ctx) +
1276 AESNI_ALIGN - 1,
1277 .cra_alignmask = 0,
1278 .cra_type = &crypto_blkcipher_type,
1279 .cra_module = THIS_MODULE,
1280 .cra_u = {
1281 .blkcipher = {
1282 .min_keysize = AES_MIN_KEY_SIZE,
1283 .max_keysize = AES_MAX_KEY_SIZE,
1284 .ivsize = AES_BLOCK_SIZE,
1285 .setkey = aes_set_key,
1286 .encrypt = ctr_crypt,
1287 .decrypt = ctr_crypt,
1288 },
1289 },
1290}, {
1291 .cra_name = "ctr(aes)",
1292 .cra_driver_name = "ctr-aes-aesni",
1293 .cra_priority = 400,
1294 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
1295 .cra_blocksize = 1,
a9629d71 1296 .cra_ctxsize = sizeof(struct async_helper_ctx),
fa46ccb8
JK
1297 .cra_alignmask = 0,
1298 .cra_type = &crypto_ablkcipher_type,
1299 .cra_module = THIS_MODULE,
1300 .cra_init = ablk_ctr_init,
1301 .cra_exit = ablk_exit,
1302 .cra_u = {
1303 .ablkcipher = {
1304 .min_keysize = AES_MIN_KEY_SIZE,
1305 .max_keysize = AES_MAX_KEY_SIZE,
1306 .ivsize = AES_BLOCK_SIZE,
1307 .setkey = ablk_set_key,
1308 .encrypt = ablk_encrypt,
1309 .decrypt = ablk_encrypt,
1310 .geniv = "chainiv",
1311 },
1312 },
fa46ccb8 1313#endif
304576a7 1314#if IS_ENABLED(CONFIG_CRYPTO_PCBC)
fa46ccb8 1315}, {
023af608
JK
1316 .cra_name = "pcbc(aes)",
1317 .cra_driver_name = "pcbc-aes-aesni",
fa46ccb8
JK
1318 .cra_priority = 400,
1319 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
1320 .cra_blocksize = AES_BLOCK_SIZE,
a9629d71 1321 .cra_ctxsize = sizeof(struct async_helper_ctx),
fa46ccb8
JK
1322 .cra_alignmask = 0,
1323 .cra_type = &crypto_ablkcipher_type,
1324 .cra_module = THIS_MODULE,
023af608 1325 .cra_init = ablk_pcbc_init,
fa46ccb8
JK
1326 .cra_exit = ablk_exit,
1327 .cra_u = {
1328 .ablkcipher = {
023af608
JK
1329 .min_keysize = AES_MIN_KEY_SIZE,
1330 .max_keysize = AES_MAX_KEY_SIZE,
fa46ccb8
JK
1331 .ivsize = AES_BLOCK_SIZE,
1332 .setkey = ablk_set_key,
1333 .encrypt = ablk_encrypt,
1334 .decrypt = ablk_decrypt,
1335 },
1336 },
1337#endif
fa46ccb8 1338}, {
023af608
JK
1339 .cra_name = "__lrw-aes-aesni",
1340 .cra_driver_name = "__driver-lrw-aes-aesni",
1341 .cra_priority = 0,
eabdc320
SM
1342 .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER |
1343 CRYPTO_ALG_INTERNAL,
023af608
JK
1344 .cra_blocksize = AES_BLOCK_SIZE,
1345 .cra_ctxsize = sizeof(struct aesni_lrw_ctx),
1346 .cra_alignmask = 0,
1347 .cra_type = &crypto_blkcipher_type,
1348 .cra_module = THIS_MODULE,
1349 .cra_exit = lrw_aesni_exit_tfm,
1350 .cra_u = {
1351 .blkcipher = {
1352 .min_keysize = AES_MIN_KEY_SIZE + AES_BLOCK_SIZE,
1353 .max_keysize = AES_MAX_KEY_SIZE + AES_BLOCK_SIZE,
1354 .ivsize = AES_BLOCK_SIZE,
1355 .setkey = lrw_aesni_setkey,
1356 .encrypt = lrw_encrypt,
1357 .decrypt = lrw_decrypt,
1358 },
1359 },
1360}, {
1361 .cra_name = "__xts-aes-aesni",
1362 .cra_driver_name = "__driver-xts-aes-aesni",
1363 .cra_priority = 0,
eabdc320
SM
1364 .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER |
1365 CRYPTO_ALG_INTERNAL,
023af608
JK
1366 .cra_blocksize = AES_BLOCK_SIZE,
1367 .cra_ctxsize = sizeof(struct aesni_xts_ctx),
1368 .cra_alignmask = 0,
1369 .cra_type = &crypto_blkcipher_type,
1370 .cra_module = THIS_MODULE,
1371 .cra_u = {
1372 .blkcipher = {
1373 .min_keysize = 2 * AES_MIN_KEY_SIZE,
1374 .max_keysize = 2 * AES_MAX_KEY_SIZE,
1375 .ivsize = AES_BLOCK_SIZE,
1376 .setkey = xts_aesni_setkey,
1377 .encrypt = xts_encrypt,
1378 .decrypt = xts_decrypt,
1379 },
1380 },
1381}, {
1382 .cra_name = "lrw(aes)",
1383 .cra_driver_name = "lrw-aes-aesni",
fa46ccb8
JK
1384 .cra_priority = 400,
1385 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
1386 .cra_blocksize = AES_BLOCK_SIZE,
a9629d71 1387 .cra_ctxsize = sizeof(struct async_helper_ctx),
fa46ccb8
JK
1388 .cra_alignmask = 0,
1389 .cra_type = &crypto_ablkcipher_type,
1390 .cra_module = THIS_MODULE,
023af608 1391 .cra_init = ablk_init,
fa46ccb8
JK
1392 .cra_exit = ablk_exit,
1393 .cra_u = {
1394 .ablkcipher = {
023af608
JK
1395 .min_keysize = AES_MIN_KEY_SIZE + AES_BLOCK_SIZE,
1396 .max_keysize = AES_MAX_KEY_SIZE + AES_BLOCK_SIZE,
fa46ccb8
JK
1397 .ivsize = AES_BLOCK_SIZE,
1398 .setkey = ablk_set_key,
1399 .encrypt = ablk_encrypt,
1400 .decrypt = ablk_decrypt,
1401 },
1402 },
fa46ccb8
JK
1403}, {
1404 .cra_name = "xts(aes)",
1405 .cra_driver_name = "xts-aes-aesni",
1406 .cra_priority = 400,
1407 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
1408 .cra_blocksize = AES_BLOCK_SIZE,
a9629d71 1409 .cra_ctxsize = sizeof(struct async_helper_ctx),
fa46ccb8
JK
1410 .cra_alignmask = 0,
1411 .cra_type = &crypto_ablkcipher_type,
1412 .cra_module = THIS_MODULE,
023af608 1413 .cra_init = ablk_init,
fa46ccb8
JK
1414 .cra_exit = ablk_exit,
1415 .cra_u = {
1416 .ablkcipher = {
1417 .min_keysize = 2 * AES_MIN_KEY_SIZE,
1418 .max_keysize = 2 * AES_MAX_KEY_SIZE,
1419 .ivsize = AES_BLOCK_SIZE,
1420 .setkey = ablk_set_key,
1421 .encrypt = ablk_encrypt,
1422 .decrypt = ablk_decrypt,
1423 },
1424 },
fa46ccb8 1425} };
0bd82f5f 1426
af05b300
HX
1427#ifdef CONFIG_X86_64
1428static struct aead_alg aesni_aead_algs[] = { {
b7c89d9e
HX
1429 .setkey = common_rfc4106_set_key,
1430 .setauthsize = common_rfc4106_set_authsize,
1431 .encrypt = helper_rfc4106_encrypt,
1432 .decrypt = helper_rfc4106_decrypt,
1433 .ivsize = 8,
1434 .maxauthsize = 16,
1435 .base = {
1436 .cra_name = "__gcm-aes-aesni",
1437 .cra_driver_name = "__driver-gcm-aes-aesni",
1438 .cra_flags = CRYPTO_ALG_INTERNAL,
1439 .cra_blocksize = 1,
1440 .cra_ctxsize = sizeof(struct aesni_rfc4106_gcm_ctx),
1441 .cra_alignmask = AESNI_ALIGN - 1,
1442 .cra_module = THIS_MODULE,
1443 },
1444}, {
af05b300
HX
1445 .init = rfc4106_init,
1446 .exit = rfc4106_exit,
1447 .setkey = rfc4106_set_key,
1448 .setauthsize = rfc4106_set_authsize,
1449 .encrypt = rfc4106_encrypt,
1450 .decrypt = rfc4106_decrypt,
1451 .ivsize = 8,
1452 .maxauthsize = 16,
1453 .base = {
1454 .cra_name = "rfc4106(gcm(aes))",
1455 .cra_driver_name = "rfc4106-gcm-aesni",
1456 .cra_priority = 400,
1457 .cra_flags = CRYPTO_ALG_ASYNC,
1458 .cra_blocksize = 1,
1459 .cra_ctxsize = sizeof(struct cryptd_aead *),
1460 .cra_module = THIS_MODULE,
1461 },
1462} };
1463#else
1464static struct aead_alg aesni_aead_algs[0];
1465#endif
1466
3bd391f0
AK
1467
1468static const struct x86_cpu_id aesni_cpu_id[] = {
1469 X86_FEATURE_MATCH(X86_FEATURE_AES),
1470 {}
1471};
1472MODULE_DEVICE_TABLE(x86cpu, aesni_cpu_id);
1473
54b6a1bd
HY
1474static int __init aesni_init(void)
1475{
7af6c245 1476 int err;
54b6a1bd 1477
3bd391f0 1478 if (!x86_match_cpu(aesni_cpu_id))
54b6a1bd 1479 return -ENODEV;
8610d7bf 1480#ifdef CONFIG_X86_64
d764593a
TC
1481#ifdef CONFIG_AS_AVX2
1482 if (boot_cpu_has(X86_FEATURE_AVX2)) {
1483 pr_info("AVX2 version of gcm_enc/dec engaged.\n");
1484 aesni_gcm_enc_tfm = aesni_gcm_enc_avx2;
1485 aesni_gcm_dec_tfm = aesni_gcm_dec_avx2;
1486 } else
1487#endif
1488#ifdef CONFIG_AS_AVX
1489 if (boot_cpu_has(X86_FEATURE_AVX)) {
1490 pr_info("AVX version of gcm_enc/dec engaged.\n");
1491 aesni_gcm_enc_tfm = aesni_gcm_enc_avx;
1492 aesni_gcm_dec_tfm = aesni_gcm_dec_avx;
1493 } else
1494#endif
1495 {
1496 pr_info("SSE version of gcm_enc/dec engaged.\n");
1497 aesni_gcm_enc_tfm = aesni_gcm_enc;
1498 aesni_gcm_dec_tfm = aesni_gcm_dec;
1499 }
22cddcc7 1500 aesni_ctr_enc_tfm = aesni_ctr_enc;
5cfed7b3 1501#ifdef CONFIG_AS_AVX
22cddcc7 1502 if (cpu_has_avx) {
1503 /* optimize performance of ctr mode encryption transform */
1504 aesni_ctr_enc_tfm = aesni_ctr_enc_avx_tfm;
1505 pr_info("AES CTR mode by8 optimization enabled\n");
1506 }
1507#endif
8610d7bf 1508#endif
0bd82f5f 1509
fa46ccb8
JK
1510 err = crypto_fpu_init();
1511 if (err)
1512 return err;
54b6a1bd 1513
af05b300
HX
1514 err = crypto_register_algs(aesni_algs, ARRAY_SIZE(aesni_algs));
1515 if (err)
1516 goto fpu_exit;
1517
1518 err = crypto_register_aeads(aesni_aead_algs,
1519 ARRAY_SIZE(aesni_aead_algs));
1520 if (err)
1521 goto unregister_algs;
1522
1523 return err;
1524
1525unregister_algs:
1526 crypto_unregister_algs(aesni_algs, ARRAY_SIZE(aesni_algs));
1527fpu_exit:
1528 crypto_fpu_exit();
1529 return err;
54b6a1bd
HY
1530}
1531
1532static void __exit aesni_exit(void)
1533{
af05b300 1534 crypto_unregister_aeads(aesni_aead_algs, ARRAY_SIZE(aesni_aead_algs));
fa46ccb8 1535 crypto_unregister_algs(aesni_algs, ARRAY_SIZE(aesni_algs));
b23b6451
AL
1536
1537 crypto_fpu_exit();
54b6a1bd
HY
1538}
1539
1540module_init(aesni_init);
1541module_exit(aesni_exit);
1542
1543MODULE_DESCRIPTION("Rijndael (AES) Cipher Algorithm, Intel AES-NI instructions optimized");
1544MODULE_LICENSE("GPL");
5d26a105 1545MODULE_ALIAS_CRYPTO("aes");
This page took 0.319218 seconds and 5 git commands to generate.