crypto: nx - prevent nx 842 load if no hw driver
[deliverable/linux.git] / arch / x86 / crypto / aesni-intel_glue.c
CommitLineData
54b6a1bd
HY
1/*
2 * Support for Intel AES-NI instructions. This file contains glue
3 * code, the real AES implementation is in intel-aes_asm.S.
4 *
5 * Copyright (C) 2008, Intel Corp.
6 * Author: Huang Ying <ying.huang@intel.com>
7 *
0bd82f5f
TS
8 * Added RFC4106 AES-GCM support for 128-bit keys under the AEAD
9 * interface for 64-bit kernels.
10 * Authors: Adrian Hoban <adrian.hoban@intel.com>
11 * Gabriele Paoloni <gabriele.paoloni@intel.com>
12 * Tadeusz Struk (tadeusz.struk@intel.com)
13 * Aidan O'Mahony (aidan.o.mahony@intel.com)
14 * Copyright (c) 2010, Intel Corporation.
15 *
54b6a1bd
HY
16 * This program is free software; you can redistribute it and/or modify
17 * it under the terms of the GNU General Public License as published by
18 * the Free Software Foundation; either version 2 of the License, or
19 * (at your option) any later version.
20 */
21
22#include <linux/hardirq.h>
23#include <linux/types.h>
24#include <linux/crypto.h>
7c52d551 25#include <linux/module.h>
54b6a1bd
HY
26#include <linux/err.h>
27#include <crypto/algapi.h>
28#include <crypto/aes.h>
29#include <crypto/cryptd.h>
12387a46 30#include <crypto/ctr.h>
023af608
JK
31#include <crypto/b128ops.h>
32#include <crypto/lrw.h>
33#include <crypto/xts.h>
3bd391f0 34#include <asm/cpu_device_id.h>
54b6a1bd 35#include <asm/i387.h>
70ef2601 36#include <asm/crypto/aes.h>
801201aa 37#include <crypto/ablk_helper.h>
0bd82f5f
TS
38#include <crypto/scatterwalk.h>
39#include <crypto/internal/aead.h>
40#include <linux/workqueue.h>
41#include <linux/spinlock.h>
c456a9cd
JK
42#ifdef CONFIG_X86_64
43#include <asm/crypto/glue_helper.h>
44#endif
54b6a1bd 45
e31ac32d 46
0bd82f5f
TS
47/* This data is stored at the end of the crypto_tfm struct.
48 * It's a type of per "session" data storage location.
49 * This needs to be 16 byte aligned.
50 */
51struct aesni_rfc4106_gcm_ctx {
52 u8 hash_subkey[16];
53 struct crypto_aes_ctx aes_key_expanded;
54 u8 nonce[4];
0bd82f5f
TS
55};
56
57struct aesni_gcm_set_hash_subkey_result {
58 int err;
59 struct completion completion;
60};
61
62struct aesni_hash_subkey_req_data {
63 u8 iv[16];
64 struct aesni_gcm_set_hash_subkey_result result;
65 struct scatterlist sg;
66};
67
68#define AESNI_ALIGN (16)
54b6a1bd 69#define AES_BLOCK_MASK (~(AES_BLOCK_SIZE-1))
0bd82f5f 70#define RFC4106_HASH_SUBKEY_SIZE 16
54b6a1bd 71
023af608
JK
72struct aesni_lrw_ctx {
73 struct lrw_table_ctx lrw_table;
74 u8 raw_aes_ctx[sizeof(struct crypto_aes_ctx) + AESNI_ALIGN - 1];
75};
76
77struct aesni_xts_ctx {
78 u8 raw_tweak_ctx[sizeof(struct crypto_aes_ctx) + AESNI_ALIGN - 1];
79 u8 raw_crypt_ctx[sizeof(struct crypto_aes_ctx) + AESNI_ALIGN - 1];
80};
81
54b6a1bd
HY
82asmlinkage int aesni_set_key(struct crypto_aes_ctx *ctx, const u8 *in_key,
83 unsigned int key_len);
84asmlinkage void aesni_enc(struct crypto_aes_ctx *ctx, u8 *out,
85 const u8 *in);
86asmlinkage void aesni_dec(struct crypto_aes_ctx *ctx, u8 *out,
87 const u8 *in);
88asmlinkage void aesni_ecb_enc(struct crypto_aes_ctx *ctx, u8 *out,
89 const u8 *in, unsigned int len);
90asmlinkage void aesni_ecb_dec(struct crypto_aes_ctx *ctx, u8 *out,
91 const u8 *in, unsigned int len);
92asmlinkage void aesni_cbc_enc(struct crypto_aes_ctx *ctx, u8 *out,
93 const u8 *in, unsigned int len, u8 *iv);
94asmlinkage void aesni_cbc_dec(struct crypto_aes_ctx *ctx, u8 *out,
95 const u8 *in, unsigned int len, u8 *iv);
9bed4aca
RD
96
97int crypto_fpu_init(void);
98void crypto_fpu_exit(void);
99
d764593a
TC
100#define AVX_GEN2_OPTSIZE 640
101#define AVX_GEN4_OPTSIZE 4096
102
0d258efb 103#ifdef CONFIG_X86_64
22cddcc7 104
105static void (*aesni_ctr_enc_tfm)(struct crypto_aes_ctx *ctx, u8 *out,
106 const u8 *in, unsigned int len, u8 *iv);
12387a46
HY
107asmlinkage void aesni_ctr_enc(struct crypto_aes_ctx *ctx, u8 *out,
108 const u8 *in, unsigned int len, u8 *iv);
54b6a1bd 109
c456a9cd
JK
110asmlinkage void aesni_xts_crypt8(struct crypto_aes_ctx *ctx, u8 *out,
111 const u8 *in, bool enc, u8 *iv);
112
0bd82f5f
TS
113/* asmlinkage void aesni_gcm_enc()
114 * void *ctx, AES Key schedule. Starts on a 16 byte boundary.
115 * u8 *out, Ciphertext output. Encrypt in-place is allowed.
116 * const u8 *in, Plaintext input
117 * unsigned long plaintext_len, Length of data in bytes for encryption.
118 * u8 *iv, Pre-counter block j0: 4 byte salt (from Security Association)
119 * concatenated with 8 byte Initialisation Vector (from IPSec ESP
120 * Payload) concatenated with 0x00000001. 16-byte aligned pointer.
121 * u8 *hash_subkey, the Hash sub key input. Data starts on a 16-byte boundary.
122 * const u8 *aad, Additional Authentication Data (AAD)
123 * unsigned long aad_len, Length of AAD in bytes. With RFC4106 this
124 * is going to be 8 or 12 bytes
125 * u8 *auth_tag, Authenticated Tag output.
126 * unsigned long auth_tag_len), Authenticated Tag Length in bytes.
127 * Valid values are 16 (most likely), 12 or 8.
128 */
129asmlinkage void aesni_gcm_enc(void *ctx, u8 *out,
130 const u8 *in, unsigned long plaintext_len, u8 *iv,
131 u8 *hash_subkey, const u8 *aad, unsigned long aad_len,
132 u8 *auth_tag, unsigned long auth_tag_len);
133
134/* asmlinkage void aesni_gcm_dec()
135 * void *ctx, AES Key schedule. Starts on a 16 byte boundary.
136 * u8 *out, Plaintext output. Decrypt in-place is allowed.
137 * const u8 *in, Ciphertext input
138 * unsigned long ciphertext_len, Length of data in bytes for decryption.
139 * u8 *iv, Pre-counter block j0: 4 byte salt (from Security Association)
140 * concatenated with 8 byte Initialisation Vector (from IPSec ESP
141 * Payload) concatenated with 0x00000001. 16-byte aligned pointer.
142 * u8 *hash_subkey, the Hash sub key input. Data starts on a 16-byte boundary.
143 * const u8 *aad, Additional Authentication Data (AAD)
144 * unsigned long aad_len, Length of AAD in bytes. With RFC4106 this is going
145 * to be 8 or 12 bytes
146 * u8 *auth_tag, Authenticated Tag output.
147 * unsigned long auth_tag_len) Authenticated Tag Length in bytes.
148 * Valid values are 16 (most likely), 12 or 8.
149 */
150asmlinkage void aesni_gcm_dec(void *ctx, u8 *out,
151 const u8 *in, unsigned long ciphertext_len, u8 *iv,
152 u8 *hash_subkey, const u8 *aad, unsigned long aad_len,
153 u8 *auth_tag, unsigned long auth_tag_len);
154
d764593a
TC
155
156#ifdef CONFIG_AS_AVX
22cddcc7 157asmlinkage void aes_ctr_enc_128_avx_by8(const u8 *in, u8 *iv,
158 void *keys, u8 *out, unsigned int num_bytes);
159asmlinkage void aes_ctr_enc_192_avx_by8(const u8 *in, u8 *iv,
160 void *keys, u8 *out, unsigned int num_bytes);
161asmlinkage void aes_ctr_enc_256_avx_by8(const u8 *in, u8 *iv,
162 void *keys, u8 *out, unsigned int num_bytes);
d764593a
TC
163/*
164 * asmlinkage void aesni_gcm_precomp_avx_gen2()
165 * gcm_data *my_ctx_data, context data
166 * u8 *hash_subkey, the Hash sub key input. Data starts on a 16-byte boundary.
167 */
168asmlinkage void aesni_gcm_precomp_avx_gen2(void *my_ctx_data, u8 *hash_subkey);
169
170asmlinkage void aesni_gcm_enc_avx_gen2(void *ctx, u8 *out,
171 const u8 *in, unsigned long plaintext_len, u8 *iv,
172 const u8 *aad, unsigned long aad_len,
173 u8 *auth_tag, unsigned long auth_tag_len);
174
175asmlinkage void aesni_gcm_dec_avx_gen2(void *ctx, u8 *out,
176 const u8 *in, unsigned long ciphertext_len, u8 *iv,
177 const u8 *aad, unsigned long aad_len,
178 u8 *auth_tag, unsigned long auth_tag_len);
179
180static void aesni_gcm_enc_avx(void *ctx, u8 *out,
181 const u8 *in, unsigned long plaintext_len, u8 *iv,
182 u8 *hash_subkey, const u8 *aad, unsigned long aad_len,
183 u8 *auth_tag, unsigned long auth_tag_len)
184{
e31ac32d
TM
185 struct crypto_aes_ctx *aes_ctx = (struct crypto_aes_ctx*)ctx;
186 if ((plaintext_len < AVX_GEN2_OPTSIZE) || (aes_ctx-> key_length != AES_KEYSIZE_128)){
d764593a
TC
187 aesni_gcm_enc(ctx, out, in, plaintext_len, iv, hash_subkey, aad,
188 aad_len, auth_tag, auth_tag_len);
189 } else {
190 aesni_gcm_precomp_avx_gen2(ctx, hash_subkey);
191 aesni_gcm_enc_avx_gen2(ctx, out, in, plaintext_len, iv, aad,
192 aad_len, auth_tag, auth_tag_len);
193 }
194}
195
196static void aesni_gcm_dec_avx(void *ctx, u8 *out,
197 const u8 *in, unsigned long ciphertext_len, u8 *iv,
198 u8 *hash_subkey, const u8 *aad, unsigned long aad_len,
199 u8 *auth_tag, unsigned long auth_tag_len)
200{
e31ac32d
TM
201 struct crypto_aes_ctx *aes_ctx = (struct crypto_aes_ctx*)ctx;
202 if ((ciphertext_len < AVX_GEN2_OPTSIZE) || (aes_ctx-> key_length != AES_KEYSIZE_128)) {
d764593a
TC
203 aesni_gcm_dec(ctx, out, in, ciphertext_len, iv, hash_subkey, aad,
204 aad_len, auth_tag, auth_tag_len);
205 } else {
206 aesni_gcm_precomp_avx_gen2(ctx, hash_subkey);
207 aesni_gcm_dec_avx_gen2(ctx, out, in, ciphertext_len, iv, aad,
208 aad_len, auth_tag, auth_tag_len);
209 }
210}
211#endif
212
213#ifdef CONFIG_AS_AVX2
214/*
215 * asmlinkage void aesni_gcm_precomp_avx_gen4()
216 * gcm_data *my_ctx_data, context data
217 * u8 *hash_subkey, the Hash sub key input. Data starts on a 16-byte boundary.
218 */
219asmlinkage void aesni_gcm_precomp_avx_gen4(void *my_ctx_data, u8 *hash_subkey);
220
221asmlinkage void aesni_gcm_enc_avx_gen4(void *ctx, u8 *out,
222 const u8 *in, unsigned long plaintext_len, u8 *iv,
223 const u8 *aad, unsigned long aad_len,
224 u8 *auth_tag, unsigned long auth_tag_len);
225
226asmlinkage void aesni_gcm_dec_avx_gen4(void *ctx, u8 *out,
227 const u8 *in, unsigned long ciphertext_len, u8 *iv,
228 const u8 *aad, unsigned long aad_len,
229 u8 *auth_tag, unsigned long auth_tag_len);
230
231static void aesni_gcm_enc_avx2(void *ctx, u8 *out,
232 const u8 *in, unsigned long plaintext_len, u8 *iv,
233 u8 *hash_subkey, const u8 *aad, unsigned long aad_len,
234 u8 *auth_tag, unsigned long auth_tag_len)
235{
e31ac32d
TM
236 struct crypto_aes_ctx *aes_ctx = (struct crypto_aes_ctx*)ctx;
237 if ((plaintext_len < AVX_GEN2_OPTSIZE) || (aes_ctx-> key_length != AES_KEYSIZE_128)) {
d764593a
TC
238 aesni_gcm_enc(ctx, out, in, plaintext_len, iv, hash_subkey, aad,
239 aad_len, auth_tag, auth_tag_len);
240 } else if (plaintext_len < AVX_GEN4_OPTSIZE) {
241 aesni_gcm_precomp_avx_gen2(ctx, hash_subkey);
242 aesni_gcm_enc_avx_gen2(ctx, out, in, plaintext_len, iv, aad,
243 aad_len, auth_tag, auth_tag_len);
244 } else {
245 aesni_gcm_precomp_avx_gen4(ctx, hash_subkey);
246 aesni_gcm_enc_avx_gen4(ctx, out, in, plaintext_len, iv, aad,
247 aad_len, auth_tag, auth_tag_len);
248 }
249}
250
251static void aesni_gcm_dec_avx2(void *ctx, u8 *out,
252 const u8 *in, unsigned long ciphertext_len, u8 *iv,
253 u8 *hash_subkey, const u8 *aad, unsigned long aad_len,
254 u8 *auth_tag, unsigned long auth_tag_len)
255{
e31ac32d
TM
256 struct crypto_aes_ctx *aes_ctx = (struct crypto_aes_ctx*)ctx;
257 if ((ciphertext_len < AVX_GEN2_OPTSIZE) || (aes_ctx-> key_length != AES_KEYSIZE_128)) {
d764593a
TC
258 aesni_gcm_dec(ctx, out, in, ciphertext_len, iv, hash_subkey,
259 aad, aad_len, auth_tag, auth_tag_len);
260 } else if (ciphertext_len < AVX_GEN4_OPTSIZE) {
261 aesni_gcm_precomp_avx_gen2(ctx, hash_subkey);
262 aesni_gcm_dec_avx_gen2(ctx, out, in, ciphertext_len, iv, aad,
263 aad_len, auth_tag, auth_tag_len);
264 } else {
265 aesni_gcm_precomp_avx_gen4(ctx, hash_subkey);
266 aesni_gcm_dec_avx_gen4(ctx, out, in, ciphertext_len, iv, aad,
267 aad_len, auth_tag, auth_tag_len);
268 }
269}
270#endif
271
272static void (*aesni_gcm_enc_tfm)(void *ctx, u8 *out,
273 const u8 *in, unsigned long plaintext_len, u8 *iv,
274 u8 *hash_subkey, const u8 *aad, unsigned long aad_len,
275 u8 *auth_tag, unsigned long auth_tag_len);
276
277static void (*aesni_gcm_dec_tfm)(void *ctx, u8 *out,
278 const u8 *in, unsigned long ciphertext_len, u8 *iv,
279 u8 *hash_subkey, const u8 *aad, unsigned long aad_len,
280 u8 *auth_tag, unsigned long auth_tag_len);
281
0bd82f5f
TS
282static inline struct
283aesni_rfc4106_gcm_ctx *aesni_rfc4106_gcm_ctx_get(struct crypto_aead *tfm)
284{
285 return
286 (struct aesni_rfc4106_gcm_ctx *)
287 PTR_ALIGN((u8 *)
288 crypto_tfm_ctx(crypto_aead_tfm(tfm)), AESNI_ALIGN);
289}
559ad0ff 290#endif
0bd82f5f 291
54b6a1bd
HY
292static inline struct crypto_aes_ctx *aes_ctx(void *raw_ctx)
293{
294 unsigned long addr = (unsigned long)raw_ctx;
295 unsigned long align = AESNI_ALIGN;
296
297 if (align <= crypto_tfm_ctx_alignment())
298 align = 1;
299 return (struct crypto_aes_ctx *)ALIGN(addr, align);
300}
301
302static int aes_set_key_common(struct crypto_tfm *tfm, void *raw_ctx,
303 const u8 *in_key, unsigned int key_len)
304{
305 struct crypto_aes_ctx *ctx = aes_ctx(raw_ctx);
306 u32 *flags = &tfm->crt_flags;
307 int err;
308
309 if (key_len != AES_KEYSIZE_128 && key_len != AES_KEYSIZE_192 &&
310 key_len != AES_KEYSIZE_256) {
311 *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
312 return -EINVAL;
313 }
314
13b79b97 315 if (!irq_fpu_usable())
54b6a1bd
HY
316 err = crypto_aes_expand_key(ctx, in_key, key_len);
317 else {
318 kernel_fpu_begin();
319 err = aesni_set_key(ctx, in_key, key_len);
320 kernel_fpu_end();
321 }
322
323 return err;
324}
325
326static int aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
327 unsigned int key_len)
328{
329 return aes_set_key_common(tfm, crypto_tfm_ctx(tfm), in_key, key_len);
330}
331
332static void aes_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
333{
334 struct crypto_aes_ctx *ctx = aes_ctx(crypto_tfm_ctx(tfm));
335
13b79b97 336 if (!irq_fpu_usable())
54b6a1bd
HY
337 crypto_aes_encrypt_x86(ctx, dst, src);
338 else {
339 kernel_fpu_begin();
340 aesni_enc(ctx, dst, src);
341 kernel_fpu_end();
342 }
343}
344
345static void aes_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
346{
347 struct crypto_aes_ctx *ctx = aes_ctx(crypto_tfm_ctx(tfm));
348
13b79b97 349 if (!irq_fpu_usable())
54b6a1bd
HY
350 crypto_aes_decrypt_x86(ctx, dst, src);
351 else {
352 kernel_fpu_begin();
353 aesni_dec(ctx, dst, src);
354 kernel_fpu_end();
355 }
356}
357
2cf4ac8b
HY
358static void __aes_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
359{
360 struct crypto_aes_ctx *ctx = aes_ctx(crypto_tfm_ctx(tfm));
361
362 aesni_enc(ctx, dst, src);
363}
364
365static void __aes_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
366{
367 struct crypto_aes_ctx *ctx = aes_ctx(crypto_tfm_ctx(tfm));
368
369 aesni_dec(ctx, dst, src);
370}
371
54b6a1bd
HY
372static int ecb_encrypt(struct blkcipher_desc *desc,
373 struct scatterlist *dst, struct scatterlist *src,
374 unsigned int nbytes)
375{
376 struct crypto_aes_ctx *ctx = aes_ctx(crypto_blkcipher_ctx(desc->tfm));
377 struct blkcipher_walk walk;
378 int err;
379
380 blkcipher_walk_init(&walk, dst, src, nbytes);
381 err = blkcipher_walk_virt(desc, &walk);
9251b64f 382 desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
54b6a1bd
HY
383
384 kernel_fpu_begin();
385 while ((nbytes = walk.nbytes)) {
386 aesni_ecb_enc(ctx, walk.dst.virt.addr, walk.src.virt.addr,
387 nbytes & AES_BLOCK_MASK);
388 nbytes &= AES_BLOCK_SIZE - 1;
389 err = blkcipher_walk_done(desc, &walk, nbytes);
390 }
391 kernel_fpu_end();
392
393 return err;
394}
395
396static int ecb_decrypt(struct blkcipher_desc *desc,
397 struct scatterlist *dst, struct scatterlist *src,
398 unsigned int nbytes)
399{
400 struct crypto_aes_ctx *ctx = aes_ctx(crypto_blkcipher_ctx(desc->tfm));
401 struct blkcipher_walk walk;
402 int err;
403
404 blkcipher_walk_init(&walk, dst, src, nbytes);
405 err = blkcipher_walk_virt(desc, &walk);
9251b64f 406 desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
54b6a1bd
HY
407
408 kernel_fpu_begin();
409 while ((nbytes = walk.nbytes)) {
410 aesni_ecb_dec(ctx, walk.dst.virt.addr, walk.src.virt.addr,
411 nbytes & AES_BLOCK_MASK);
412 nbytes &= AES_BLOCK_SIZE - 1;
413 err = blkcipher_walk_done(desc, &walk, nbytes);
414 }
415 kernel_fpu_end();
416
417 return err;
418}
419
54b6a1bd
HY
420static int cbc_encrypt(struct blkcipher_desc *desc,
421 struct scatterlist *dst, struct scatterlist *src,
422 unsigned int nbytes)
423{
424 struct crypto_aes_ctx *ctx = aes_ctx(crypto_blkcipher_ctx(desc->tfm));
425 struct blkcipher_walk walk;
426 int err;
427
428 blkcipher_walk_init(&walk, dst, src, nbytes);
429 err = blkcipher_walk_virt(desc, &walk);
9251b64f 430 desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
54b6a1bd
HY
431
432 kernel_fpu_begin();
433 while ((nbytes = walk.nbytes)) {
434 aesni_cbc_enc(ctx, walk.dst.virt.addr, walk.src.virt.addr,
435 nbytes & AES_BLOCK_MASK, walk.iv);
436 nbytes &= AES_BLOCK_SIZE - 1;
437 err = blkcipher_walk_done(desc, &walk, nbytes);
438 }
439 kernel_fpu_end();
440
441 return err;
442}
443
444static int cbc_decrypt(struct blkcipher_desc *desc,
445 struct scatterlist *dst, struct scatterlist *src,
446 unsigned int nbytes)
447{
448 struct crypto_aes_ctx *ctx = aes_ctx(crypto_blkcipher_ctx(desc->tfm));
449 struct blkcipher_walk walk;
450 int err;
451
452 blkcipher_walk_init(&walk, dst, src, nbytes);
453 err = blkcipher_walk_virt(desc, &walk);
9251b64f 454 desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
54b6a1bd
HY
455
456 kernel_fpu_begin();
457 while ((nbytes = walk.nbytes)) {
458 aesni_cbc_dec(ctx, walk.dst.virt.addr, walk.src.virt.addr,
459 nbytes & AES_BLOCK_MASK, walk.iv);
460 nbytes &= AES_BLOCK_SIZE - 1;
461 err = blkcipher_walk_done(desc, &walk, nbytes);
462 }
463 kernel_fpu_end();
464
465 return err;
466}
467
0d258efb 468#ifdef CONFIG_X86_64
12387a46
HY
469static void ctr_crypt_final(struct crypto_aes_ctx *ctx,
470 struct blkcipher_walk *walk)
471{
472 u8 *ctrblk = walk->iv;
473 u8 keystream[AES_BLOCK_SIZE];
474 u8 *src = walk->src.virt.addr;
475 u8 *dst = walk->dst.virt.addr;
476 unsigned int nbytes = walk->nbytes;
477
478 aesni_enc(ctx, keystream, ctrblk);
479 crypto_xor(keystream, src, nbytes);
480 memcpy(dst, keystream, nbytes);
481 crypto_inc(ctrblk, AES_BLOCK_SIZE);
482}
483
5cfed7b3 484#ifdef CONFIG_AS_AVX
22cddcc7 485static void aesni_ctr_enc_avx_tfm(struct crypto_aes_ctx *ctx, u8 *out,
486 const u8 *in, unsigned int len, u8 *iv)
487{
488 /*
489 * based on key length, override with the by8 version
490 * of ctr mode encryption/decryption for improved performance
491 * aes_set_key_common() ensures that key length is one of
492 * {128,192,256}
493 */
494 if (ctx->key_length == AES_KEYSIZE_128)
495 aes_ctr_enc_128_avx_by8(in, iv, (void *)ctx, out, len);
496 else if (ctx->key_length == AES_KEYSIZE_192)
497 aes_ctr_enc_192_avx_by8(in, iv, (void *)ctx, out, len);
498 else
499 aes_ctr_enc_256_avx_by8(in, iv, (void *)ctx, out, len);
500}
501#endif
502
12387a46
HY
503static int ctr_crypt(struct blkcipher_desc *desc,
504 struct scatterlist *dst, struct scatterlist *src,
505 unsigned int nbytes)
506{
507 struct crypto_aes_ctx *ctx = aes_ctx(crypto_blkcipher_ctx(desc->tfm));
508 struct blkcipher_walk walk;
509 int err;
510
511 blkcipher_walk_init(&walk, dst, src, nbytes);
512 err = blkcipher_walk_virt_block(desc, &walk, AES_BLOCK_SIZE);
513 desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
514
515 kernel_fpu_begin();
516 while ((nbytes = walk.nbytes) >= AES_BLOCK_SIZE) {
22cddcc7 517 aesni_ctr_enc_tfm(ctx, walk.dst.virt.addr, walk.src.virt.addr,
e31ac32d 518 nbytes & AES_BLOCK_MASK, walk.iv);
12387a46
HY
519 nbytes &= AES_BLOCK_SIZE - 1;
520 err = blkcipher_walk_done(desc, &walk, nbytes);
521 }
522 if (walk.nbytes) {
523 ctr_crypt_final(ctx, &walk);
524 err = blkcipher_walk_done(desc, &walk, 0);
525 }
526 kernel_fpu_end();
527
528 return err;
529}
0d258efb 530#endif
12387a46 531
54b6a1bd
HY
532static int ablk_ecb_init(struct crypto_tfm *tfm)
533{
ef45b834 534 return ablk_init_common(tfm, "__driver-ecb-aes-aesni");
54b6a1bd
HY
535}
536
54b6a1bd
HY
537static int ablk_cbc_init(struct crypto_tfm *tfm)
538{
ef45b834 539 return ablk_init_common(tfm, "__driver-cbc-aes-aesni");
54b6a1bd
HY
540}
541
0d258efb 542#ifdef CONFIG_X86_64
2cf4ac8b
HY
543static int ablk_ctr_init(struct crypto_tfm *tfm)
544{
ef45b834 545 return ablk_init_common(tfm, "__driver-ctr-aes-aesni");
2cf4ac8b
HY
546}
547
0d258efb 548#endif
2cf4ac8b 549
304576a7 550#if IS_ENABLED(CONFIG_CRYPTO_PCBC)
2cf4ac8b
HY
551static int ablk_pcbc_init(struct crypto_tfm *tfm)
552{
ef45b834 553 return ablk_init_common(tfm, "fpu(pcbc(__driver-aes-aesni))");
2cf4ac8b 554}
2cf4ac8b
HY
555#endif
556
023af608 557static void lrw_xts_encrypt_callback(void *ctx, u8 *blks, unsigned int nbytes)
2cf4ac8b 558{
023af608
JK
559 aesni_ecb_enc(ctx, blks, blks, nbytes);
560}
561
562static void lrw_xts_decrypt_callback(void *ctx, u8 *blks, unsigned int nbytes)
563{
564 aesni_ecb_dec(ctx, blks, blks, nbytes);
565}
566
567static int lrw_aesni_setkey(struct crypto_tfm *tfm, const u8 *key,
568 unsigned int keylen)
569{
570 struct aesni_lrw_ctx *ctx = crypto_tfm_ctx(tfm);
571 int err;
572
573 err = aes_set_key_common(tfm, ctx->raw_aes_ctx, key,
574 keylen - AES_BLOCK_SIZE);
575 if (err)
576 return err;
577
578 return lrw_init_table(&ctx->lrw_table, key + keylen - AES_BLOCK_SIZE);
579}
580
581static void lrw_aesni_exit_tfm(struct crypto_tfm *tfm)
582{
583 struct aesni_lrw_ctx *ctx = crypto_tfm_ctx(tfm);
584
585 lrw_free_table(&ctx->lrw_table);
586}
587
588static int lrw_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
589 struct scatterlist *src, unsigned int nbytes)
590{
591 struct aesni_lrw_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
592 be128 buf[8];
593 struct lrw_crypt_req req = {
594 .tbuf = buf,
595 .tbuflen = sizeof(buf),
596
597 .table_ctx = &ctx->lrw_table,
598 .crypt_ctx = aes_ctx(ctx->raw_aes_ctx),
599 .crypt_fn = lrw_xts_encrypt_callback,
600 };
601 int ret;
602
603 desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
604
605 kernel_fpu_begin();
606 ret = lrw_crypt(desc, dst, src, nbytes, &req);
607 kernel_fpu_end();
608
609 return ret;
610}
611
612static int lrw_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
613 struct scatterlist *src, unsigned int nbytes)
614{
615 struct aesni_lrw_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
616 be128 buf[8];
617 struct lrw_crypt_req req = {
618 .tbuf = buf,
619 .tbuflen = sizeof(buf),
620
621 .table_ctx = &ctx->lrw_table,
622 .crypt_ctx = aes_ctx(ctx->raw_aes_ctx),
623 .crypt_fn = lrw_xts_decrypt_callback,
624 };
625 int ret;
626
627 desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
628
629 kernel_fpu_begin();
630 ret = lrw_crypt(desc, dst, src, nbytes, &req);
631 kernel_fpu_end();
632
633 return ret;
634}
635
636static int xts_aesni_setkey(struct crypto_tfm *tfm, const u8 *key,
637 unsigned int keylen)
638{
639 struct aesni_xts_ctx *ctx = crypto_tfm_ctx(tfm);
640 u32 *flags = &tfm->crt_flags;
641 int err;
642
643 /* key consists of keys of equal size concatenated, therefore
644 * the length must be even
645 */
646 if (keylen % 2) {
647 *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
648 return -EINVAL;
649 }
650
651 /* first half of xts-key is for crypt */
652 err = aes_set_key_common(tfm, ctx->raw_crypt_ctx, key, keylen / 2);
653 if (err)
654 return err;
655
656 /* second half of xts-key is for tweak */
657 return aes_set_key_common(tfm, ctx->raw_tweak_ctx, key + keylen / 2,
658 keylen / 2);
659}
660
661
32bec973
JK
662static void aesni_xts_tweak(void *ctx, u8 *out, const u8 *in)
663{
664 aesni_enc(ctx, out, in);
665}
666
c456a9cd
JK
667#ifdef CONFIG_X86_64
668
669static void aesni_xts_enc(void *ctx, u128 *dst, const u128 *src, le128 *iv)
670{
671 glue_xts_crypt_128bit_one(ctx, dst, src, iv, GLUE_FUNC_CAST(aesni_enc));
672}
673
674static void aesni_xts_dec(void *ctx, u128 *dst, const u128 *src, le128 *iv)
675{
676 glue_xts_crypt_128bit_one(ctx, dst, src, iv, GLUE_FUNC_CAST(aesni_dec));
677}
678
679static void aesni_xts_enc8(void *ctx, u128 *dst, const u128 *src, le128 *iv)
680{
681 aesni_xts_crypt8(ctx, (u8 *)dst, (const u8 *)src, true, (u8 *)iv);
682}
683
684static void aesni_xts_dec8(void *ctx, u128 *dst, const u128 *src, le128 *iv)
685{
686 aesni_xts_crypt8(ctx, (u8 *)dst, (const u8 *)src, false, (u8 *)iv);
687}
688
689static const struct common_glue_ctx aesni_enc_xts = {
690 .num_funcs = 2,
691 .fpu_blocks_limit = 1,
692
693 .funcs = { {
694 .num_blocks = 8,
695 .fn_u = { .xts = GLUE_XTS_FUNC_CAST(aesni_xts_enc8) }
696 }, {
697 .num_blocks = 1,
698 .fn_u = { .xts = GLUE_XTS_FUNC_CAST(aesni_xts_enc) }
699 } }
700};
701
702static const struct common_glue_ctx aesni_dec_xts = {
703 .num_funcs = 2,
704 .fpu_blocks_limit = 1,
705
706 .funcs = { {
707 .num_blocks = 8,
708 .fn_u = { .xts = GLUE_XTS_FUNC_CAST(aesni_xts_dec8) }
709 }, {
710 .num_blocks = 1,
711 .fn_u = { .xts = GLUE_XTS_FUNC_CAST(aesni_xts_dec) }
712 } }
713};
714
715static int xts_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
716 struct scatterlist *src, unsigned int nbytes)
717{
718 struct aesni_xts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
719
720 return glue_xts_crypt_128bit(&aesni_enc_xts, desc, dst, src, nbytes,
721 XTS_TWEAK_CAST(aesni_xts_tweak),
722 aes_ctx(ctx->raw_tweak_ctx),
723 aes_ctx(ctx->raw_crypt_ctx));
724}
725
726static int xts_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
727 struct scatterlist *src, unsigned int nbytes)
728{
729 struct aesni_xts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
730
731 return glue_xts_crypt_128bit(&aesni_dec_xts, desc, dst, src, nbytes,
732 XTS_TWEAK_CAST(aesni_xts_tweak),
733 aes_ctx(ctx->raw_tweak_ctx),
734 aes_ctx(ctx->raw_crypt_ctx));
735}
736
737#else
738
023af608
JK
739static int xts_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
740 struct scatterlist *src, unsigned int nbytes)
741{
742 struct aesni_xts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
743 be128 buf[8];
744 struct xts_crypt_req req = {
745 .tbuf = buf,
746 .tbuflen = sizeof(buf),
747
748 .tweak_ctx = aes_ctx(ctx->raw_tweak_ctx),
32bec973 749 .tweak_fn = aesni_xts_tweak,
023af608
JK
750 .crypt_ctx = aes_ctx(ctx->raw_crypt_ctx),
751 .crypt_fn = lrw_xts_encrypt_callback,
752 };
753 int ret;
754
755 desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
756
757 kernel_fpu_begin();
758 ret = xts_crypt(desc, dst, src, nbytes, &req);
759 kernel_fpu_end();
760
761 return ret;
762}
763
764static int xts_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
765 struct scatterlist *src, unsigned int nbytes)
766{
767 struct aesni_xts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
768 be128 buf[8];
769 struct xts_crypt_req req = {
770 .tbuf = buf,
771 .tbuflen = sizeof(buf),
772
773 .tweak_ctx = aes_ctx(ctx->raw_tweak_ctx),
32bec973 774 .tweak_fn = aesni_xts_tweak,
023af608
JK
775 .crypt_ctx = aes_ctx(ctx->raw_crypt_ctx),
776 .crypt_fn = lrw_xts_decrypt_callback,
777 };
778 int ret;
779
780 desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
781
782 kernel_fpu_begin();
783 ret = xts_crypt(desc, dst, src, nbytes, &req);
784 kernel_fpu_end();
785
786 return ret;
2cf4ac8b 787}
2cf4ac8b 788
c456a9cd
JK
789#endif
790
559ad0ff 791#ifdef CONFIG_X86_64
af05b300 792static int rfc4106_init(struct crypto_aead *aead)
0bd82f5f
TS
793{
794 struct cryptd_aead *cryptd_tfm;
af05b300
HX
795 struct cryptd_aead **ctx = crypto_aead_ctx(aead);
796
eabdc320
SM
797 cryptd_tfm = cryptd_alloc_aead("__driver-gcm-aes-aesni",
798 CRYPTO_ALG_INTERNAL,
799 CRYPTO_ALG_INTERNAL);
0bd82f5f
TS
800 if (IS_ERR(cryptd_tfm))
801 return PTR_ERR(cryptd_tfm);
60af520c 802
af05b300
HX
803 *ctx = cryptd_tfm;
804 crypto_aead_set_reqsize(
805 aead,
a5a2b4da
HX
806 sizeof(struct aead_request) +
807 crypto_aead_reqsize(&cryptd_tfm->base));
0bd82f5f
TS
808 return 0;
809}
810
af05b300 811static void rfc4106_exit(struct crypto_aead *aead)
0bd82f5f 812{
af05b300
HX
813 struct cryptd_aead **ctx = crypto_aead_ctx(aead);
814
815 cryptd_free_aead(*ctx);
0bd82f5f
TS
816}
817
818static void
819rfc4106_set_hash_subkey_done(struct crypto_async_request *req, int err)
820{
821 struct aesni_gcm_set_hash_subkey_result *result = req->data;
822
823 if (err == -EINPROGRESS)
824 return;
825 result->err = err;
826 complete(&result->completion);
827}
828
829static int
830rfc4106_set_hash_subkey(u8 *hash_subkey, const u8 *key, unsigned int key_len)
831{
832 struct crypto_ablkcipher *ctr_tfm;
833 struct ablkcipher_request *req;
834 int ret = -EINVAL;
835 struct aesni_hash_subkey_req_data *req_data;
836
837 ctr_tfm = crypto_alloc_ablkcipher("ctr(aes)", 0, 0);
838 if (IS_ERR(ctr_tfm))
839 return PTR_ERR(ctr_tfm);
840
841 crypto_ablkcipher_clear_flags(ctr_tfm, ~0);
842
843 ret = crypto_ablkcipher_setkey(ctr_tfm, key, key_len);
7efd95f6 844 if (ret)
fc9044e2 845 goto out_free_ablkcipher;
0bd82f5f 846
fc9044e2 847 ret = -ENOMEM;
0bd82f5f 848 req = ablkcipher_request_alloc(ctr_tfm, GFP_KERNEL);
fc9044e2 849 if (!req)
7efd95f6 850 goto out_free_ablkcipher;
0bd82f5f
TS
851
852 req_data = kmalloc(sizeof(*req_data), GFP_KERNEL);
fc9044e2 853 if (!req_data)
7efd95f6 854 goto out_free_request;
fc9044e2 855
0bd82f5f
TS
856 memset(req_data->iv, 0, sizeof(req_data->iv));
857
858 /* Clear the data in the hash sub key container to zero.*/
859 /* We want to cipher all zeros to create the hash sub key. */
860 memset(hash_subkey, 0, RFC4106_HASH_SUBKEY_SIZE);
861
862 init_completion(&req_data->result.completion);
863 sg_init_one(&req_data->sg, hash_subkey, RFC4106_HASH_SUBKEY_SIZE);
864 ablkcipher_request_set_tfm(req, ctr_tfm);
865 ablkcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_SLEEP |
866 CRYPTO_TFM_REQ_MAY_BACKLOG,
867 rfc4106_set_hash_subkey_done,
868 &req_data->result);
869
870 ablkcipher_request_set_crypt(req, &req_data->sg,
871 &req_data->sg, RFC4106_HASH_SUBKEY_SIZE, req_data->iv);
872
873 ret = crypto_ablkcipher_encrypt(req);
874 if (ret == -EINPROGRESS || ret == -EBUSY) {
875 ret = wait_for_completion_interruptible
876 (&req_data->result.completion);
877 if (!ret)
878 ret = req_data->result.err;
879 }
fc9044e2 880 kfree(req_data);
7efd95f6 881out_free_request:
0bd82f5f 882 ablkcipher_request_free(req);
7efd95f6 883out_free_ablkcipher:
0bd82f5f
TS
884 crypto_free_ablkcipher(ctr_tfm);
885 return ret;
886}
887
81e397d9
TS
888static int common_rfc4106_set_key(struct crypto_aead *aead, const u8 *key,
889 unsigned int key_len)
0bd82f5f
TS
890{
891 int ret = 0;
81e397d9
TS
892 struct crypto_tfm *tfm = crypto_aead_tfm(aead);
893 struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(aead);
bf084d8f 894 u8 *new_key_align, *new_key_mem = NULL;
0bd82f5f
TS
895
896 if (key_len < 4) {
897 crypto_tfm_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
898 return -EINVAL;
899 }
900 /*Account for 4 byte nonce at the end.*/
901 key_len -= 4;
e31ac32d
TM
902 if (key_len != AES_KEYSIZE_128 && key_len != AES_KEYSIZE_192 &&
903 key_len != AES_KEYSIZE_256) {
0bd82f5f
TS
904 crypto_tfm_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
905 return -EINVAL;
906 }
907
908 memcpy(ctx->nonce, key + key_len, sizeof(ctx->nonce));
909 /*This must be on a 16 byte boundary!*/
910 if ((unsigned long)(&(ctx->aes_key_expanded.key_enc[0])) % AESNI_ALIGN)
911 return -EINVAL;
912
913 if ((unsigned long)key % AESNI_ALIGN) {
914 /*key is not aligned: use an auxuliar aligned pointer*/
915 new_key_mem = kmalloc(key_len+AESNI_ALIGN, GFP_KERNEL);
916 if (!new_key_mem)
917 return -ENOMEM;
918
bf084d8f
MB
919 new_key_align = PTR_ALIGN(new_key_mem, AESNI_ALIGN);
920 memcpy(new_key_align, key, key_len);
921 key = new_key_align;
0bd82f5f
TS
922 }
923
924 if (!irq_fpu_usable())
925 ret = crypto_aes_expand_key(&(ctx->aes_key_expanded),
926 key, key_len);
927 else {
928 kernel_fpu_begin();
929 ret = aesni_set_key(&(ctx->aes_key_expanded), key, key_len);
930 kernel_fpu_end();
931 }
932 /*This must be on a 16 byte boundary!*/
933 if ((unsigned long)(&(ctx->hash_subkey[0])) % AESNI_ALIGN) {
934 ret = -EINVAL;
935 goto exit;
936 }
937 ret = rfc4106_set_hash_subkey(ctx->hash_subkey, key, key_len);
938exit:
939 kfree(new_key_mem);
940 return ret;
941}
942
81e397d9
TS
943static int rfc4106_set_key(struct crypto_aead *parent, const u8 *key,
944 unsigned int key_len)
0bd82f5f 945{
af05b300
HX
946 struct cryptd_aead **ctx = crypto_aead_ctx(parent);
947 struct cryptd_aead *cryptd_tfm = *ctx;
0bd82f5f 948
af05b300 949 return crypto_aead_setkey(&cryptd_tfm->base, key, key_len);
81e397d9
TS
950}
951
952static int common_rfc4106_set_authsize(struct crypto_aead *aead,
953 unsigned int authsize)
954{
0bd82f5f
TS
955 switch (authsize) {
956 case 8:
957 case 12:
958 case 16:
959 break;
960 default:
961 return -EINVAL;
962 }
81e397d9 963 crypto_aead_crt(aead)->authsize = authsize;
0bd82f5f
TS
964 return 0;
965}
966
81e397d9
TS
967/* This is the Integrity Check Value (aka the authentication tag length and can
968 * be 8, 12 or 16 bytes long. */
969static int rfc4106_set_authsize(struct crypto_aead *parent,
970 unsigned int authsize)
0bd82f5f 971{
af05b300
HX
972 struct cryptd_aead **ctx = crypto_aead_ctx(parent);
973 struct cryptd_aead *cryptd_tfm = *ctx;
0bd82f5f 974
af05b300 975 return crypto_aead_setauthsize(&cryptd_tfm->base, authsize);
0bd82f5f
TS
976}
977
0bd82f5f
TS
978static int __driver_rfc4106_encrypt(struct aead_request *req)
979{
980 u8 one_entry_in_sg = 0;
981 u8 *src, *dst, *assoc;
982 __be32 counter = cpu_to_be32(1);
983 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
984 struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(tfm);
e31ac32d 985 u32 key_len = ctx->aes_key_expanded.key_length;
0bd82f5f
TS
986 void *aes_ctx = &(ctx->aes_key_expanded);
987 unsigned long auth_tag_len = crypto_aead_authsize(tfm);
988 u8 iv_tab[16+AESNI_ALIGN];
989 u8* iv = (u8 *) PTR_ALIGN((u8 *)iv_tab, AESNI_ALIGN);
990 struct scatter_walk src_sg_walk;
991 struct scatter_walk assoc_sg_walk;
992 struct scatter_walk dst_sg_walk;
993 unsigned int i;
994
995 /* Assuming we are supporting rfc4106 64-bit extended */
996 /* sequence numbers We need to have the AAD length equal */
997 /* to 8 or 12 bytes */
998 if (unlikely(req->assoclen != 8 && req->assoclen != 12))
999 return -EINVAL;
e31ac32d
TM
1000 if (unlikely(auth_tag_len != 8 && auth_tag_len != 12 && auth_tag_len != 16))
1001 return -EINVAL;
1002 if (unlikely(key_len != AES_KEYSIZE_128 &&
1003 key_len != AES_KEYSIZE_192 &&
1004 key_len != AES_KEYSIZE_256))
1005 return -EINVAL;
1006
0bd82f5f
TS
1007 /* IV below built */
1008 for (i = 0; i < 4; i++)
1009 *(iv+i) = ctx->nonce[i];
1010 for (i = 0; i < 8; i++)
1011 *(iv+4+i) = req->iv[i];
1012 *((__be32 *)(iv+12)) = counter;
1013
1014 if ((sg_is_last(req->src)) && (sg_is_last(req->assoc))) {
1015 one_entry_in_sg = 1;
1016 scatterwalk_start(&src_sg_walk, req->src);
1017 scatterwalk_start(&assoc_sg_walk, req->assoc);
8fd75e12
CW
1018 src = scatterwalk_map(&src_sg_walk);
1019 assoc = scatterwalk_map(&assoc_sg_walk);
0bd82f5f
TS
1020 dst = src;
1021 if (unlikely(req->src != req->dst)) {
1022 scatterwalk_start(&dst_sg_walk, req->dst);
8fd75e12 1023 dst = scatterwalk_map(&dst_sg_walk);
0bd82f5f
TS
1024 }
1025
1026 } else {
1027 /* Allocate memory for src, dst, assoc */
1028 src = kmalloc(req->cryptlen + auth_tag_len + req->assoclen,
1029 GFP_ATOMIC);
1030 if (unlikely(!src))
1031 return -ENOMEM;
1032 assoc = (src + req->cryptlen + auth_tag_len);
1033 scatterwalk_map_and_copy(src, req->src, 0, req->cryptlen, 0);
1034 scatterwalk_map_and_copy(assoc, req->assoc, 0,
1035 req->assoclen, 0);
1036 dst = src;
1037 }
1038
d764593a 1039 aesni_gcm_enc_tfm(aes_ctx, dst, src, (unsigned long)req->cryptlen, iv,
0bd82f5f
TS
1040 ctx->hash_subkey, assoc, (unsigned long)req->assoclen, dst
1041 + ((unsigned long)req->cryptlen), auth_tag_len);
1042
1043 /* The authTag (aka the Integrity Check Value) needs to be written
1044 * back to the packet. */
1045 if (one_entry_in_sg) {
1046 if (unlikely(req->src != req->dst)) {
8fd75e12 1047 scatterwalk_unmap(dst);
0bd82f5f
TS
1048 scatterwalk_done(&dst_sg_walk, 0, 0);
1049 }
8fd75e12
CW
1050 scatterwalk_unmap(src);
1051 scatterwalk_unmap(assoc);
0bd82f5f
TS
1052 scatterwalk_done(&src_sg_walk, 0, 0);
1053 scatterwalk_done(&assoc_sg_walk, 0, 0);
1054 } else {
1055 scatterwalk_map_and_copy(dst, req->dst, 0,
1056 req->cryptlen + auth_tag_len, 1);
1057 kfree(src);
1058 }
1059 return 0;
1060}
1061
1062static int __driver_rfc4106_decrypt(struct aead_request *req)
1063{
1064 u8 one_entry_in_sg = 0;
1065 u8 *src, *dst, *assoc;
1066 unsigned long tempCipherLen = 0;
1067 __be32 counter = cpu_to_be32(1);
1068 int retval = 0;
1069 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1070 struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(tfm);
e31ac32d 1071 u32 key_len = ctx->aes_key_expanded.key_length;
0bd82f5f
TS
1072 void *aes_ctx = &(ctx->aes_key_expanded);
1073 unsigned long auth_tag_len = crypto_aead_authsize(tfm);
1074 u8 iv_and_authTag[32+AESNI_ALIGN];
1075 u8 *iv = (u8 *) PTR_ALIGN((u8 *)iv_and_authTag, AESNI_ALIGN);
1076 u8 *authTag = iv + 16;
1077 struct scatter_walk src_sg_walk;
1078 struct scatter_walk assoc_sg_walk;
1079 struct scatter_walk dst_sg_walk;
1080 unsigned int i;
1081
1082 if (unlikely((req->cryptlen < auth_tag_len) ||
1083 (req->assoclen != 8 && req->assoclen != 12)))
1084 return -EINVAL;
e31ac32d
TM
1085 if (unlikely(auth_tag_len != 8 && auth_tag_len != 12 && auth_tag_len != 16))
1086 return -EINVAL;
1087 if (unlikely(key_len != AES_KEYSIZE_128 &&
1088 key_len != AES_KEYSIZE_192 &&
1089 key_len != AES_KEYSIZE_256))
1090 return -EINVAL;
1091
0bd82f5f
TS
1092 /* Assuming we are supporting rfc4106 64-bit extended */
1093 /* sequence numbers We need to have the AAD length */
1094 /* equal to 8 or 12 bytes */
1095
1096 tempCipherLen = (unsigned long)(req->cryptlen - auth_tag_len);
1097 /* IV below built */
1098 for (i = 0; i < 4; i++)
1099 *(iv+i) = ctx->nonce[i];
1100 for (i = 0; i < 8; i++)
1101 *(iv+4+i) = req->iv[i];
1102 *((__be32 *)(iv+12)) = counter;
1103
1104 if ((sg_is_last(req->src)) && (sg_is_last(req->assoc))) {
1105 one_entry_in_sg = 1;
1106 scatterwalk_start(&src_sg_walk, req->src);
1107 scatterwalk_start(&assoc_sg_walk, req->assoc);
8fd75e12
CW
1108 src = scatterwalk_map(&src_sg_walk);
1109 assoc = scatterwalk_map(&assoc_sg_walk);
0bd82f5f
TS
1110 dst = src;
1111 if (unlikely(req->src != req->dst)) {
1112 scatterwalk_start(&dst_sg_walk, req->dst);
8fd75e12 1113 dst = scatterwalk_map(&dst_sg_walk);
0bd82f5f
TS
1114 }
1115
1116 } else {
1117 /* Allocate memory for src, dst, assoc */
1118 src = kmalloc(req->cryptlen + req->assoclen, GFP_ATOMIC);
1119 if (!src)
1120 return -ENOMEM;
ccfe8c3f 1121 assoc = (src + req->cryptlen);
0bd82f5f
TS
1122 scatterwalk_map_and_copy(src, req->src, 0, req->cryptlen, 0);
1123 scatterwalk_map_and_copy(assoc, req->assoc, 0,
1124 req->assoclen, 0);
1125 dst = src;
1126 }
1127
d764593a 1128 aesni_gcm_dec_tfm(aes_ctx, dst, src, tempCipherLen, iv,
0bd82f5f
TS
1129 ctx->hash_subkey, assoc, (unsigned long)req->assoclen,
1130 authTag, auth_tag_len);
1131
1132 /* Compare generated tag with passed in tag. */
fed28611 1133 retval = crypto_memneq(src + tempCipherLen, authTag, auth_tag_len) ?
0bd82f5f
TS
1134 -EBADMSG : 0;
1135
1136 if (one_entry_in_sg) {
1137 if (unlikely(req->src != req->dst)) {
8fd75e12 1138 scatterwalk_unmap(dst);
0bd82f5f
TS
1139 scatterwalk_done(&dst_sg_walk, 0, 0);
1140 }
8fd75e12
CW
1141 scatterwalk_unmap(src);
1142 scatterwalk_unmap(assoc);
0bd82f5f
TS
1143 scatterwalk_done(&src_sg_walk, 0, 0);
1144 scatterwalk_done(&assoc_sg_walk, 0, 0);
1145 } else {
ccfe8c3f 1146 scatterwalk_map_and_copy(dst, req->dst, 0, tempCipherLen, 1);
0bd82f5f
TS
1147 kfree(src);
1148 }
1149 return retval;
1150}
81e397d9
TS
1151
1152static int rfc4106_encrypt(struct aead_request *req)
1153{
81e397d9 1154 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
af05b300
HX
1155 struct cryptd_aead **ctx = crypto_aead_ctx(tfm);
1156 struct cryptd_aead *cryptd_tfm = *ctx;
1157 struct aead_request *subreq = aead_request_ctx(req);
81e397d9 1158
af05b300
HX
1159 aead_request_set_tfm(subreq, irq_fpu_usable() ?
1160 cryptd_aead_child(cryptd_tfm) :
1161 &cryptd_tfm->base);
81e397d9 1162
af05b300
HX
1163 aead_request_set_callback(subreq, req->base.flags,
1164 req->base.complete, req->base.data);
1165 aead_request_set_crypt(subreq, req->src, req->dst,
1166 req->cryptlen, req->iv);
1167 aead_request_set_ad(subreq, req->assoclen);
1168
1169 return crypto_aead_encrypt(subreq);
81e397d9
TS
1170}
1171
1172static int rfc4106_decrypt(struct aead_request *req)
1173{
81e397d9 1174 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
af05b300
HX
1175 struct cryptd_aead **ctx = crypto_aead_ctx(tfm);
1176 struct cryptd_aead *cryptd_tfm = *ctx;
1177 struct aead_request *subreq = aead_request_ctx(req);
81e397d9 1178
af05b300
HX
1179 aead_request_set_tfm(subreq, irq_fpu_usable() ?
1180 cryptd_aead_child(cryptd_tfm) :
1181 &cryptd_tfm->base);
81e397d9 1182
af05b300
HX
1183 aead_request_set_callback(subreq, req->base.flags,
1184 req->base.complete, req->base.data);
1185 aead_request_set_crypt(subreq, req->src, req->dst,
1186 req->cryptlen, req->iv);
1187 aead_request_set_ad(subreq, req->assoclen);
1188
1189 return crypto_aead_decrypt(subreq);
81e397d9
TS
1190}
1191
1192static int helper_rfc4106_encrypt(struct aead_request *req)
1193{
1194 int ret;
1195
1196 if (unlikely(!irq_fpu_usable())) {
1197 WARN_ONCE(1, "__gcm-aes-aesni alg used in invalid context");
1198 ret = -EINVAL;
1199 } else {
1200 kernel_fpu_begin();
1201 ret = __driver_rfc4106_encrypt(req);
1202 kernel_fpu_end();
1203 }
1204 return ret;
1205}
1206
1207static int helper_rfc4106_decrypt(struct aead_request *req)
1208{
1209 int ret;
1210
1211 if (unlikely(!irq_fpu_usable())) {
1212 WARN_ONCE(1, "__gcm-aes-aesni alg used in invalid context");
1213 ret = -EINVAL;
1214 } else {
1215 kernel_fpu_begin();
1216 ret = __driver_rfc4106_decrypt(req);
1217 kernel_fpu_end();
1218 }
1219 return ret;
1220}
fa46ccb8 1221#endif
0bd82f5f 1222
fa46ccb8
JK
1223static struct crypto_alg aesni_algs[] = { {
1224 .cra_name = "aes",
1225 .cra_driver_name = "aes-aesni",
1226 .cra_priority = 300,
1227 .cra_flags = CRYPTO_ALG_TYPE_CIPHER,
1228 .cra_blocksize = AES_BLOCK_SIZE,
1229 .cra_ctxsize = sizeof(struct crypto_aes_ctx) +
1230 AESNI_ALIGN - 1,
1231 .cra_alignmask = 0,
1232 .cra_module = THIS_MODULE,
1233 .cra_u = {
1234 .cipher = {
1235 .cia_min_keysize = AES_MIN_KEY_SIZE,
1236 .cia_max_keysize = AES_MAX_KEY_SIZE,
1237 .cia_setkey = aes_set_key,
1238 .cia_encrypt = aes_encrypt,
1239 .cia_decrypt = aes_decrypt
1240 }
1241 }
1242}, {
1243 .cra_name = "__aes-aesni",
1244 .cra_driver_name = "__driver-aes-aesni",
1245 .cra_priority = 0,
eabdc320 1246 .cra_flags = CRYPTO_ALG_TYPE_CIPHER | CRYPTO_ALG_INTERNAL,
fa46ccb8
JK
1247 .cra_blocksize = AES_BLOCK_SIZE,
1248 .cra_ctxsize = sizeof(struct crypto_aes_ctx) +
1249 AESNI_ALIGN - 1,
1250 .cra_alignmask = 0,
1251 .cra_module = THIS_MODULE,
1252 .cra_u = {
1253 .cipher = {
1254 .cia_min_keysize = AES_MIN_KEY_SIZE,
1255 .cia_max_keysize = AES_MAX_KEY_SIZE,
1256 .cia_setkey = aes_set_key,
1257 .cia_encrypt = __aes_encrypt,
1258 .cia_decrypt = __aes_decrypt
1259 }
1260 }
1261}, {
1262 .cra_name = "__ecb-aes-aesni",
1263 .cra_driver_name = "__driver-ecb-aes-aesni",
1264 .cra_priority = 0,
eabdc320
SM
1265 .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER |
1266 CRYPTO_ALG_INTERNAL,
fa46ccb8
JK
1267 .cra_blocksize = AES_BLOCK_SIZE,
1268 .cra_ctxsize = sizeof(struct crypto_aes_ctx) +
1269 AESNI_ALIGN - 1,
1270 .cra_alignmask = 0,
1271 .cra_type = &crypto_blkcipher_type,
1272 .cra_module = THIS_MODULE,
1273 .cra_u = {
1274 .blkcipher = {
1275 .min_keysize = AES_MIN_KEY_SIZE,
1276 .max_keysize = AES_MAX_KEY_SIZE,
1277 .setkey = aes_set_key,
1278 .encrypt = ecb_encrypt,
1279 .decrypt = ecb_decrypt,
1280 },
1281 },
1282}, {
1283 .cra_name = "__cbc-aes-aesni",
1284 .cra_driver_name = "__driver-cbc-aes-aesni",
1285 .cra_priority = 0,
eabdc320
SM
1286 .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER |
1287 CRYPTO_ALG_INTERNAL,
fa46ccb8
JK
1288 .cra_blocksize = AES_BLOCK_SIZE,
1289 .cra_ctxsize = sizeof(struct crypto_aes_ctx) +
1290 AESNI_ALIGN - 1,
1291 .cra_alignmask = 0,
1292 .cra_type = &crypto_blkcipher_type,
1293 .cra_module = THIS_MODULE,
1294 .cra_u = {
1295 .blkcipher = {
1296 .min_keysize = AES_MIN_KEY_SIZE,
1297 .max_keysize = AES_MAX_KEY_SIZE,
1298 .setkey = aes_set_key,
1299 .encrypt = cbc_encrypt,
1300 .decrypt = cbc_decrypt,
1301 },
1302 },
1303}, {
1304 .cra_name = "ecb(aes)",
1305 .cra_driver_name = "ecb-aes-aesni",
1306 .cra_priority = 400,
1307 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
1308 .cra_blocksize = AES_BLOCK_SIZE,
a9629d71 1309 .cra_ctxsize = sizeof(struct async_helper_ctx),
fa46ccb8
JK
1310 .cra_alignmask = 0,
1311 .cra_type = &crypto_ablkcipher_type,
1312 .cra_module = THIS_MODULE,
1313 .cra_init = ablk_ecb_init,
1314 .cra_exit = ablk_exit,
1315 .cra_u = {
1316 .ablkcipher = {
1317 .min_keysize = AES_MIN_KEY_SIZE,
1318 .max_keysize = AES_MAX_KEY_SIZE,
1319 .setkey = ablk_set_key,
1320 .encrypt = ablk_encrypt,
1321 .decrypt = ablk_decrypt,
1322 },
1323 },
1324}, {
1325 .cra_name = "cbc(aes)",
1326 .cra_driver_name = "cbc-aes-aesni",
1327 .cra_priority = 400,
1328 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
1329 .cra_blocksize = AES_BLOCK_SIZE,
a9629d71 1330 .cra_ctxsize = sizeof(struct async_helper_ctx),
fa46ccb8
JK
1331 .cra_alignmask = 0,
1332 .cra_type = &crypto_ablkcipher_type,
1333 .cra_module = THIS_MODULE,
1334 .cra_init = ablk_cbc_init,
1335 .cra_exit = ablk_exit,
1336 .cra_u = {
1337 .ablkcipher = {
1338 .min_keysize = AES_MIN_KEY_SIZE,
1339 .max_keysize = AES_MAX_KEY_SIZE,
1340 .ivsize = AES_BLOCK_SIZE,
1341 .setkey = ablk_set_key,
1342 .encrypt = ablk_encrypt,
1343 .decrypt = ablk_decrypt,
1344 },
1345 },
1346#ifdef CONFIG_X86_64
1347}, {
1348 .cra_name = "__ctr-aes-aesni",
1349 .cra_driver_name = "__driver-ctr-aes-aesni",
1350 .cra_priority = 0,
eabdc320
SM
1351 .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER |
1352 CRYPTO_ALG_INTERNAL,
fa46ccb8
JK
1353 .cra_blocksize = 1,
1354 .cra_ctxsize = sizeof(struct crypto_aes_ctx) +
1355 AESNI_ALIGN - 1,
1356 .cra_alignmask = 0,
1357 .cra_type = &crypto_blkcipher_type,
1358 .cra_module = THIS_MODULE,
1359 .cra_u = {
1360 .blkcipher = {
1361 .min_keysize = AES_MIN_KEY_SIZE,
1362 .max_keysize = AES_MAX_KEY_SIZE,
1363 .ivsize = AES_BLOCK_SIZE,
1364 .setkey = aes_set_key,
1365 .encrypt = ctr_crypt,
1366 .decrypt = ctr_crypt,
1367 },
1368 },
1369}, {
1370 .cra_name = "ctr(aes)",
1371 .cra_driver_name = "ctr-aes-aesni",
1372 .cra_priority = 400,
1373 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
1374 .cra_blocksize = 1,
a9629d71 1375 .cra_ctxsize = sizeof(struct async_helper_ctx),
fa46ccb8
JK
1376 .cra_alignmask = 0,
1377 .cra_type = &crypto_ablkcipher_type,
1378 .cra_module = THIS_MODULE,
1379 .cra_init = ablk_ctr_init,
1380 .cra_exit = ablk_exit,
1381 .cra_u = {
1382 .ablkcipher = {
1383 .min_keysize = AES_MIN_KEY_SIZE,
1384 .max_keysize = AES_MAX_KEY_SIZE,
1385 .ivsize = AES_BLOCK_SIZE,
1386 .setkey = ablk_set_key,
1387 .encrypt = ablk_encrypt,
1388 .decrypt = ablk_encrypt,
1389 .geniv = "chainiv",
1390 },
1391 },
1392}, {
1393 .cra_name = "__gcm-aes-aesni",
1394 .cra_driver_name = "__driver-gcm-aes-aesni",
0bd82f5f 1395 .cra_priority = 0,
eabdc320 1396 .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_INTERNAL,
0bd82f5f 1397 .cra_blocksize = 1,
fa46ccb8
JK
1398 .cra_ctxsize = sizeof(struct aesni_rfc4106_gcm_ctx) +
1399 AESNI_ALIGN,
0bd82f5f
TS
1400 .cra_alignmask = 0,
1401 .cra_type = &crypto_aead_type,
1402 .cra_module = THIS_MODULE,
0bd82f5f
TS
1403 .cra_u = {
1404 .aead = {
81e397d9
TS
1405 .setkey = common_rfc4106_set_key,
1406 .setauthsize = common_rfc4106_set_authsize,
1407 .encrypt = helper_rfc4106_encrypt,
1408 .decrypt = helper_rfc4106_decrypt,
1409 .ivsize = 8,
1410 .maxauthsize = 16,
0bd82f5f
TS
1411 },
1412 },
fa46ccb8 1413#endif
304576a7 1414#if IS_ENABLED(CONFIG_CRYPTO_PCBC)
fa46ccb8 1415}, {
023af608
JK
1416 .cra_name = "pcbc(aes)",
1417 .cra_driver_name = "pcbc-aes-aesni",
fa46ccb8
JK
1418 .cra_priority = 400,
1419 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
1420 .cra_blocksize = AES_BLOCK_SIZE,
a9629d71 1421 .cra_ctxsize = sizeof(struct async_helper_ctx),
fa46ccb8
JK
1422 .cra_alignmask = 0,
1423 .cra_type = &crypto_ablkcipher_type,
1424 .cra_module = THIS_MODULE,
023af608 1425 .cra_init = ablk_pcbc_init,
fa46ccb8
JK
1426 .cra_exit = ablk_exit,
1427 .cra_u = {
1428 .ablkcipher = {
023af608
JK
1429 .min_keysize = AES_MIN_KEY_SIZE,
1430 .max_keysize = AES_MAX_KEY_SIZE,
fa46ccb8
JK
1431 .ivsize = AES_BLOCK_SIZE,
1432 .setkey = ablk_set_key,
1433 .encrypt = ablk_encrypt,
1434 .decrypt = ablk_decrypt,
1435 },
1436 },
1437#endif
fa46ccb8 1438}, {
023af608
JK
1439 .cra_name = "__lrw-aes-aesni",
1440 .cra_driver_name = "__driver-lrw-aes-aesni",
1441 .cra_priority = 0,
eabdc320
SM
1442 .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER |
1443 CRYPTO_ALG_INTERNAL,
023af608
JK
1444 .cra_blocksize = AES_BLOCK_SIZE,
1445 .cra_ctxsize = sizeof(struct aesni_lrw_ctx),
1446 .cra_alignmask = 0,
1447 .cra_type = &crypto_blkcipher_type,
1448 .cra_module = THIS_MODULE,
1449 .cra_exit = lrw_aesni_exit_tfm,
1450 .cra_u = {
1451 .blkcipher = {
1452 .min_keysize = AES_MIN_KEY_SIZE + AES_BLOCK_SIZE,
1453 .max_keysize = AES_MAX_KEY_SIZE + AES_BLOCK_SIZE,
1454 .ivsize = AES_BLOCK_SIZE,
1455 .setkey = lrw_aesni_setkey,
1456 .encrypt = lrw_encrypt,
1457 .decrypt = lrw_decrypt,
1458 },
1459 },
1460}, {
1461 .cra_name = "__xts-aes-aesni",
1462 .cra_driver_name = "__driver-xts-aes-aesni",
1463 .cra_priority = 0,
eabdc320
SM
1464 .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER |
1465 CRYPTO_ALG_INTERNAL,
023af608
JK
1466 .cra_blocksize = AES_BLOCK_SIZE,
1467 .cra_ctxsize = sizeof(struct aesni_xts_ctx),
1468 .cra_alignmask = 0,
1469 .cra_type = &crypto_blkcipher_type,
1470 .cra_module = THIS_MODULE,
1471 .cra_u = {
1472 .blkcipher = {
1473 .min_keysize = 2 * AES_MIN_KEY_SIZE,
1474 .max_keysize = 2 * AES_MAX_KEY_SIZE,
1475 .ivsize = AES_BLOCK_SIZE,
1476 .setkey = xts_aesni_setkey,
1477 .encrypt = xts_encrypt,
1478 .decrypt = xts_decrypt,
1479 },
1480 },
1481}, {
1482 .cra_name = "lrw(aes)",
1483 .cra_driver_name = "lrw-aes-aesni",
fa46ccb8
JK
1484 .cra_priority = 400,
1485 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
1486 .cra_blocksize = AES_BLOCK_SIZE,
a9629d71 1487 .cra_ctxsize = sizeof(struct async_helper_ctx),
fa46ccb8
JK
1488 .cra_alignmask = 0,
1489 .cra_type = &crypto_ablkcipher_type,
1490 .cra_module = THIS_MODULE,
023af608 1491 .cra_init = ablk_init,
fa46ccb8
JK
1492 .cra_exit = ablk_exit,
1493 .cra_u = {
1494 .ablkcipher = {
023af608
JK
1495 .min_keysize = AES_MIN_KEY_SIZE + AES_BLOCK_SIZE,
1496 .max_keysize = AES_MAX_KEY_SIZE + AES_BLOCK_SIZE,
fa46ccb8
JK
1497 .ivsize = AES_BLOCK_SIZE,
1498 .setkey = ablk_set_key,
1499 .encrypt = ablk_encrypt,
1500 .decrypt = ablk_decrypt,
1501 },
1502 },
fa46ccb8
JK
1503}, {
1504 .cra_name = "xts(aes)",
1505 .cra_driver_name = "xts-aes-aesni",
1506 .cra_priority = 400,
1507 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
1508 .cra_blocksize = AES_BLOCK_SIZE,
a9629d71 1509 .cra_ctxsize = sizeof(struct async_helper_ctx),
fa46ccb8
JK
1510 .cra_alignmask = 0,
1511 .cra_type = &crypto_ablkcipher_type,
1512 .cra_module = THIS_MODULE,
023af608 1513 .cra_init = ablk_init,
fa46ccb8
JK
1514 .cra_exit = ablk_exit,
1515 .cra_u = {
1516 .ablkcipher = {
1517 .min_keysize = 2 * AES_MIN_KEY_SIZE,
1518 .max_keysize = 2 * AES_MAX_KEY_SIZE,
1519 .ivsize = AES_BLOCK_SIZE,
1520 .setkey = ablk_set_key,
1521 .encrypt = ablk_encrypt,
1522 .decrypt = ablk_decrypt,
1523 },
1524 },
fa46ccb8 1525} };
0bd82f5f 1526
af05b300
HX
1527#ifdef CONFIG_X86_64
1528static struct aead_alg aesni_aead_algs[] = { {
1529 .init = rfc4106_init,
1530 .exit = rfc4106_exit,
1531 .setkey = rfc4106_set_key,
1532 .setauthsize = rfc4106_set_authsize,
1533 .encrypt = rfc4106_encrypt,
1534 .decrypt = rfc4106_decrypt,
1535 .ivsize = 8,
1536 .maxauthsize = 16,
1537 .base = {
1538 .cra_name = "rfc4106(gcm(aes))",
1539 .cra_driver_name = "rfc4106-gcm-aesni",
1540 .cra_priority = 400,
1541 .cra_flags = CRYPTO_ALG_ASYNC,
1542 .cra_blocksize = 1,
1543 .cra_ctxsize = sizeof(struct cryptd_aead *),
1544 .cra_module = THIS_MODULE,
1545 },
1546} };
1547#else
1548static struct aead_alg aesni_aead_algs[0];
1549#endif
1550
3bd391f0
AK
1551
1552static const struct x86_cpu_id aesni_cpu_id[] = {
1553 X86_FEATURE_MATCH(X86_FEATURE_AES),
1554 {}
1555};
1556MODULE_DEVICE_TABLE(x86cpu, aesni_cpu_id);
1557
54b6a1bd
HY
1558static int __init aesni_init(void)
1559{
7af6c245 1560 int err;
54b6a1bd 1561
3bd391f0 1562 if (!x86_match_cpu(aesni_cpu_id))
54b6a1bd 1563 return -ENODEV;
8610d7bf 1564#ifdef CONFIG_X86_64
d764593a
TC
1565#ifdef CONFIG_AS_AVX2
1566 if (boot_cpu_has(X86_FEATURE_AVX2)) {
1567 pr_info("AVX2 version of gcm_enc/dec engaged.\n");
1568 aesni_gcm_enc_tfm = aesni_gcm_enc_avx2;
1569 aesni_gcm_dec_tfm = aesni_gcm_dec_avx2;
1570 } else
1571#endif
1572#ifdef CONFIG_AS_AVX
1573 if (boot_cpu_has(X86_FEATURE_AVX)) {
1574 pr_info("AVX version of gcm_enc/dec engaged.\n");
1575 aesni_gcm_enc_tfm = aesni_gcm_enc_avx;
1576 aesni_gcm_dec_tfm = aesni_gcm_dec_avx;
1577 } else
1578#endif
1579 {
1580 pr_info("SSE version of gcm_enc/dec engaged.\n");
1581 aesni_gcm_enc_tfm = aesni_gcm_enc;
1582 aesni_gcm_dec_tfm = aesni_gcm_dec;
1583 }
22cddcc7 1584 aesni_ctr_enc_tfm = aesni_ctr_enc;
5cfed7b3 1585#ifdef CONFIG_AS_AVX
22cddcc7 1586 if (cpu_has_avx) {
1587 /* optimize performance of ctr mode encryption transform */
1588 aesni_ctr_enc_tfm = aesni_ctr_enc_avx_tfm;
1589 pr_info("AES CTR mode by8 optimization enabled\n");
1590 }
1591#endif
8610d7bf 1592#endif
0bd82f5f 1593
fa46ccb8
JK
1594 err = crypto_fpu_init();
1595 if (err)
1596 return err;
54b6a1bd 1597
af05b300
HX
1598 err = crypto_register_algs(aesni_algs, ARRAY_SIZE(aesni_algs));
1599 if (err)
1600 goto fpu_exit;
1601
1602 err = crypto_register_aeads(aesni_aead_algs,
1603 ARRAY_SIZE(aesni_aead_algs));
1604 if (err)
1605 goto unregister_algs;
1606
1607 return err;
1608
1609unregister_algs:
1610 crypto_unregister_algs(aesni_algs, ARRAY_SIZE(aesni_algs));
1611fpu_exit:
1612 crypto_fpu_exit();
1613 return err;
54b6a1bd
HY
1614}
1615
1616static void __exit aesni_exit(void)
1617{
af05b300 1618 crypto_unregister_aeads(aesni_aead_algs, ARRAY_SIZE(aesni_aead_algs));
fa46ccb8 1619 crypto_unregister_algs(aesni_algs, ARRAY_SIZE(aesni_algs));
b23b6451
AL
1620
1621 crypto_fpu_exit();
54b6a1bd
HY
1622}
1623
1624module_init(aesni_init);
1625module_exit(aesni_exit);
1626
1627MODULE_DESCRIPTION("Rijndael (AES) Cipher Algorithm, Intel AES-NI instructions optimized");
1628MODULE_LICENSE("GPL");
5d26a105 1629MODULE_ALIAS_CRYPTO("aes");
This page took 1.86605 seconds and 5 git commands to generate.