2 * Support for Intel AES-NI instructions. This file contains glue
3 * code, the real AES implementation is in intel-aes_asm.S.
5 * Copyright (C) 2008, Intel Corp.
6 * Author: Huang Ying <ying.huang@intel.com>
8 * Added RFC4106 AES-GCM support for 128-bit keys under the AEAD
9 * interface for 64-bit kernels.
10 * Authors: Adrian Hoban <adrian.hoban@intel.com>
11 * Gabriele Paoloni <gabriele.paoloni@intel.com>
12 * Tadeusz Struk (tadeusz.struk@intel.com)
13 * Aidan O'Mahony (aidan.o.mahony@intel.com)
14 * Copyright (c) 2010, Intel Corporation.
16 * This program is free software; you can redistribute it and/or modify
17 * it under the terms of the GNU General Public License as published by
18 * the Free Software Foundation; either version 2 of the License, or
19 * (at your option) any later version.
22 #include <linux/hardirq.h>
23 #include <linux/types.h>
24 #include <linux/crypto.h>
25 #include <linux/module.h>
26 #include <linux/err.h>
27 #include <crypto/algapi.h>
28 #include <crypto/aes.h>
29 #include <crypto/cryptd.h>
30 #include <crypto/ctr.h>
31 #include <crypto/b128ops.h>
32 #include <crypto/lrw.h>
33 #include <crypto/xts.h>
34 #include <asm/cpu_device_id.h>
36 #include <asm/crypto/aes.h>
37 #include <crypto/ablk_helper.h>
38 #include <crypto/scatterwalk.h>
39 #include <crypto/internal/aead.h>
40 #include <linux/workqueue.h>
41 #include <linux/spinlock.h>
43 #include <asm/crypto/glue_helper.h>
46 /* This data is stored at the end of the crypto_tfm struct.
47 * It's a type of per "session" data storage location.
48 * This needs to be 16 byte aligned.
50 struct aesni_rfc4106_gcm_ctx
{
52 struct crypto_aes_ctx aes_key_expanded
;
54 struct cryptd_aead
*cryptd_tfm
;
57 struct aesni_gcm_set_hash_subkey_result
{
59 struct completion completion
;
62 struct aesni_hash_subkey_req_data
{
64 struct aesni_gcm_set_hash_subkey_result result
;
65 struct scatterlist sg
;
68 #define AESNI_ALIGN (16)
69 #define AES_BLOCK_MASK (~(AES_BLOCK_SIZE-1))
70 #define RFC4106_HASH_SUBKEY_SIZE 16
72 struct aesni_lrw_ctx
{
73 struct lrw_table_ctx lrw_table
;
74 u8 raw_aes_ctx
[sizeof(struct crypto_aes_ctx
) + AESNI_ALIGN
- 1];
77 struct aesni_xts_ctx
{
78 u8 raw_tweak_ctx
[sizeof(struct crypto_aes_ctx
) + AESNI_ALIGN
- 1];
79 u8 raw_crypt_ctx
[sizeof(struct crypto_aes_ctx
) + AESNI_ALIGN
- 1];
82 asmlinkage
int aesni_set_key(struct crypto_aes_ctx
*ctx
, const u8
*in_key
,
83 unsigned int key_len
);
84 asmlinkage
void aesni_enc(struct crypto_aes_ctx
*ctx
, u8
*out
,
86 asmlinkage
void aesni_dec(struct crypto_aes_ctx
*ctx
, u8
*out
,
88 asmlinkage
void aesni_ecb_enc(struct crypto_aes_ctx
*ctx
, u8
*out
,
89 const u8
*in
, unsigned int len
);
90 asmlinkage
void aesni_ecb_dec(struct crypto_aes_ctx
*ctx
, u8
*out
,
91 const u8
*in
, unsigned int len
);
92 asmlinkage
void aesni_cbc_enc(struct crypto_aes_ctx
*ctx
, u8
*out
,
93 const u8
*in
, unsigned int len
, u8
*iv
);
94 asmlinkage
void aesni_cbc_dec(struct crypto_aes_ctx
*ctx
, u8
*out
,
95 const u8
*in
, unsigned int len
, u8
*iv
);
97 int crypto_fpu_init(void);
98 void crypto_fpu_exit(void);
100 #define AVX_GEN2_OPTSIZE 640
101 #define AVX_GEN4_OPTSIZE 4096
105 static void (*aesni_ctr_enc_tfm
)(struct crypto_aes_ctx
*ctx
, u8
*out
,
106 const u8
*in
, unsigned int len
, u8
*iv
);
107 asmlinkage
void aesni_ctr_enc(struct crypto_aes_ctx
*ctx
, u8
*out
,
108 const u8
*in
, unsigned int len
, u8
*iv
);
110 asmlinkage
void aesni_xts_crypt8(struct crypto_aes_ctx
*ctx
, u8
*out
,
111 const u8
*in
, bool enc
, u8
*iv
);
113 /* asmlinkage void aesni_gcm_enc()
114 * void *ctx, AES Key schedule. Starts on a 16 byte boundary.
115 * u8 *out, Ciphertext output. Encrypt in-place is allowed.
116 * const u8 *in, Plaintext input
117 * unsigned long plaintext_len, Length of data in bytes for encryption.
118 * u8 *iv, Pre-counter block j0: 4 byte salt (from Security Association)
119 * concatenated with 8 byte Initialisation Vector (from IPSec ESP
120 * Payload) concatenated with 0x00000001. 16-byte aligned pointer.
121 * u8 *hash_subkey, the Hash sub key input. Data starts on a 16-byte boundary.
122 * const u8 *aad, Additional Authentication Data (AAD)
123 * unsigned long aad_len, Length of AAD in bytes. With RFC4106 this
124 * is going to be 8 or 12 bytes
125 * u8 *auth_tag, Authenticated Tag output.
126 * unsigned long auth_tag_len), Authenticated Tag Length in bytes.
127 * Valid values are 16 (most likely), 12 or 8.
129 asmlinkage
void aesni_gcm_enc(void *ctx
, u8
*out
,
130 const u8
*in
, unsigned long plaintext_len
, u8
*iv
,
131 u8
*hash_subkey
, const u8
*aad
, unsigned long aad_len
,
132 u8
*auth_tag
, unsigned long auth_tag_len
);
134 /* asmlinkage void aesni_gcm_dec()
135 * void *ctx, AES Key schedule. Starts on a 16 byte boundary.
136 * u8 *out, Plaintext output. Decrypt in-place is allowed.
137 * const u8 *in, Ciphertext input
138 * unsigned long ciphertext_len, Length of data in bytes for decryption.
139 * u8 *iv, Pre-counter block j0: 4 byte salt (from Security Association)
140 * concatenated with 8 byte Initialisation Vector (from IPSec ESP
141 * Payload) concatenated with 0x00000001. 16-byte aligned pointer.
142 * u8 *hash_subkey, the Hash sub key input. Data starts on a 16-byte boundary.
143 * const u8 *aad, Additional Authentication Data (AAD)
144 * unsigned long aad_len, Length of AAD in bytes. With RFC4106 this is going
145 * to be 8 or 12 bytes
146 * u8 *auth_tag, Authenticated Tag output.
147 * unsigned long auth_tag_len) Authenticated Tag Length in bytes.
148 * Valid values are 16 (most likely), 12 or 8.
150 asmlinkage
void aesni_gcm_dec(void *ctx
, u8
*out
,
151 const u8
*in
, unsigned long ciphertext_len
, u8
*iv
,
152 u8
*hash_subkey
, const u8
*aad
, unsigned long aad_len
,
153 u8
*auth_tag
, unsigned long auth_tag_len
);
157 asmlinkage
void aes_ctr_enc_128_avx_by8(const u8
*in
, u8
*iv
,
158 void *keys
, u8
*out
, unsigned int num_bytes
);
159 asmlinkage
void aes_ctr_enc_192_avx_by8(const u8
*in
, u8
*iv
,
160 void *keys
, u8
*out
, unsigned int num_bytes
);
161 asmlinkage
void aes_ctr_enc_256_avx_by8(const u8
*in
, u8
*iv
,
162 void *keys
, u8
*out
, unsigned int num_bytes
);
164 * asmlinkage void aesni_gcm_precomp_avx_gen2()
165 * gcm_data *my_ctx_data, context data
166 * u8 *hash_subkey, the Hash sub key input. Data starts on a 16-byte boundary.
168 asmlinkage
void aesni_gcm_precomp_avx_gen2(void *my_ctx_data
, u8
*hash_subkey
);
170 asmlinkage
void aesni_gcm_enc_avx_gen2(void *ctx
, u8
*out
,
171 const u8
*in
, unsigned long plaintext_len
, u8
*iv
,
172 const u8
*aad
, unsigned long aad_len
,
173 u8
*auth_tag
, unsigned long auth_tag_len
);
175 asmlinkage
void aesni_gcm_dec_avx_gen2(void *ctx
, u8
*out
,
176 const u8
*in
, unsigned long ciphertext_len
, u8
*iv
,
177 const u8
*aad
, unsigned long aad_len
,
178 u8
*auth_tag
, unsigned long auth_tag_len
);
180 static void aesni_gcm_enc_avx(void *ctx
, u8
*out
,
181 const u8
*in
, unsigned long plaintext_len
, u8
*iv
,
182 u8
*hash_subkey
, const u8
*aad
, unsigned long aad_len
,
183 u8
*auth_tag
, unsigned long auth_tag_len
)
185 if (plaintext_len
< AVX_GEN2_OPTSIZE
) {
186 aesni_gcm_enc(ctx
, out
, in
, plaintext_len
, iv
, hash_subkey
, aad
,
187 aad_len
, auth_tag
, auth_tag_len
);
189 aesni_gcm_precomp_avx_gen2(ctx
, hash_subkey
);
190 aesni_gcm_enc_avx_gen2(ctx
, out
, in
, plaintext_len
, iv
, aad
,
191 aad_len
, auth_tag
, auth_tag_len
);
195 static void aesni_gcm_dec_avx(void *ctx
, u8
*out
,
196 const u8
*in
, unsigned long ciphertext_len
, u8
*iv
,
197 u8
*hash_subkey
, const u8
*aad
, unsigned long aad_len
,
198 u8
*auth_tag
, unsigned long auth_tag_len
)
200 if (ciphertext_len
< AVX_GEN2_OPTSIZE
) {
201 aesni_gcm_dec(ctx
, out
, in
, ciphertext_len
, iv
, hash_subkey
, aad
,
202 aad_len
, auth_tag
, auth_tag_len
);
204 aesni_gcm_precomp_avx_gen2(ctx
, hash_subkey
);
205 aesni_gcm_dec_avx_gen2(ctx
, out
, in
, ciphertext_len
, iv
, aad
,
206 aad_len
, auth_tag
, auth_tag_len
);
211 #ifdef CONFIG_AS_AVX2
213 * asmlinkage void aesni_gcm_precomp_avx_gen4()
214 * gcm_data *my_ctx_data, context data
215 * u8 *hash_subkey, the Hash sub key input. Data starts on a 16-byte boundary.
217 asmlinkage
void aesni_gcm_precomp_avx_gen4(void *my_ctx_data
, u8
*hash_subkey
);
219 asmlinkage
void aesni_gcm_enc_avx_gen4(void *ctx
, u8
*out
,
220 const u8
*in
, unsigned long plaintext_len
, u8
*iv
,
221 const u8
*aad
, unsigned long aad_len
,
222 u8
*auth_tag
, unsigned long auth_tag_len
);
224 asmlinkage
void aesni_gcm_dec_avx_gen4(void *ctx
, u8
*out
,
225 const u8
*in
, unsigned long ciphertext_len
, u8
*iv
,
226 const u8
*aad
, unsigned long aad_len
,
227 u8
*auth_tag
, unsigned long auth_tag_len
);
229 static void aesni_gcm_enc_avx2(void *ctx
, u8
*out
,
230 const u8
*in
, unsigned long plaintext_len
, u8
*iv
,
231 u8
*hash_subkey
, const u8
*aad
, unsigned long aad_len
,
232 u8
*auth_tag
, unsigned long auth_tag_len
)
234 if (plaintext_len
< AVX_GEN2_OPTSIZE
) {
235 aesni_gcm_enc(ctx
, out
, in
, plaintext_len
, iv
, hash_subkey
, aad
,
236 aad_len
, auth_tag
, auth_tag_len
);
237 } else if (plaintext_len
< AVX_GEN4_OPTSIZE
) {
238 aesni_gcm_precomp_avx_gen2(ctx
, hash_subkey
);
239 aesni_gcm_enc_avx_gen2(ctx
, out
, in
, plaintext_len
, iv
, aad
,
240 aad_len
, auth_tag
, auth_tag_len
);
242 aesni_gcm_precomp_avx_gen4(ctx
, hash_subkey
);
243 aesni_gcm_enc_avx_gen4(ctx
, out
, in
, plaintext_len
, iv
, aad
,
244 aad_len
, auth_tag
, auth_tag_len
);
248 static void aesni_gcm_dec_avx2(void *ctx
, u8
*out
,
249 const u8
*in
, unsigned long ciphertext_len
, u8
*iv
,
250 u8
*hash_subkey
, const u8
*aad
, unsigned long aad_len
,
251 u8
*auth_tag
, unsigned long auth_tag_len
)
253 if (ciphertext_len
< AVX_GEN2_OPTSIZE
) {
254 aesni_gcm_dec(ctx
, out
, in
, ciphertext_len
, iv
, hash_subkey
,
255 aad
, aad_len
, auth_tag
, auth_tag_len
);
256 } else if (ciphertext_len
< AVX_GEN4_OPTSIZE
) {
257 aesni_gcm_precomp_avx_gen2(ctx
, hash_subkey
);
258 aesni_gcm_dec_avx_gen2(ctx
, out
, in
, ciphertext_len
, iv
, aad
,
259 aad_len
, auth_tag
, auth_tag_len
);
261 aesni_gcm_precomp_avx_gen4(ctx
, hash_subkey
);
262 aesni_gcm_dec_avx_gen4(ctx
, out
, in
, ciphertext_len
, iv
, aad
,
263 aad_len
, auth_tag
, auth_tag_len
);
268 static void (*aesni_gcm_enc_tfm
)(void *ctx
, u8
*out
,
269 const u8
*in
, unsigned long plaintext_len
, u8
*iv
,
270 u8
*hash_subkey
, const u8
*aad
, unsigned long aad_len
,
271 u8
*auth_tag
, unsigned long auth_tag_len
);
273 static void (*aesni_gcm_dec_tfm
)(void *ctx
, u8
*out
,
274 const u8
*in
, unsigned long ciphertext_len
, u8
*iv
,
275 u8
*hash_subkey
, const u8
*aad
, unsigned long aad_len
,
276 u8
*auth_tag
, unsigned long auth_tag_len
);
279 aesni_rfc4106_gcm_ctx
*aesni_rfc4106_gcm_ctx_get(struct crypto_aead
*tfm
)
282 (struct aesni_rfc4106_gcm_ctx
*)
284 crypto_tfm_ctx(crypto_aead_tfm(tfm
)), AESNI_ALIGN
);
288 static inline struct crypto_aes_ctx
*aes_ctx(void *raw_ctx
)
290 unsigned long addr
= (unsigned long)raw_ctx
;
291 unsigned long align
= AESNI_ALIGN
;
293 if (align
<= crypto_tfm_ctx_alignment())
295 return (struct crypto_aes_ctx
*)ALIGN(addr
, align
);
298 static int aes_set_key_common(struct crypto_tfm
*tfm
, void *raw_ctx
,
299 const u8
*in_key
, unsigned int key_len
)
301 struct crypto_aes_ctx
*ctx
= aes_ctx(raw_ctx
);
302 u32
*flags
= &tfm
->crt_flags
;
305 if (key_len
!= AES_KEYSIZE_128
&& key_len
!= AES_KEYSIZE_192
&&
306 key_len
!= AES_KEYSIZE_256
) {
307 *flags
|= CRYPTO_TFM_RES_BAD_KEY_LEN
;
311 if (!irq_fpu_usable())
312 err
= crypto_aes_expand_key(ctx
, in_key
, key_len
);
315 err
= aesni_set_key(ctx
, in_key
, key_len
);
322 static int aes_set_key(struct crypto_tfm
*tfm
, const u8
*in_key
,
323 unsigned int key_len
)
325 return aes_set_key_common(tfm
, crypto_tfm_ctx(tfm
), in_key
, key_len
);
328 static void aes_encrypt(struct crypto_tfm
*tfm
, u8
*dst
, const u8
*src
)
330 struct crypto_aes_ctx
*ctx
= aes_ctx(crypto_tfm_ctx(tfm
));
332 if (!irq_fpu_usable())
333 crypto_aes_encrypt_x86(ctx
, dst
, src
);
336 aesni_enc(ctx
, dst
, src
);
341 static void aes_decrypt(struct crypto_tfm
*tfm
, u8
*dst
, const u8
*src
)
343 struct crypto_aes_ctx
*ctx
= aes_ctx(crypto_tfm_ctx(tfm
));
345 if (!irq_fpu_usable())
346 crypto_aes_decrypt_x86(ctx
, dst
, src
);
349 aesni_dec(ctx
, dst
, src
);
354 static void __aes_encrypt(struct crypto_tfm
*tfm
, u8
*dst
, const u8
*src
)
356 struct crypto_aes_ctx
*ctx
= aes_ctx(crypto_tfm_ctx(tfm
));
358 aesni_enc(ctx
, dst
, src
);
361 static void __aes_decrypt(struct crypto_tfm
*tfm
, u8
*dst
, const u8
*src
)
363 struct crypto_aes_ctx
*ctx
= aes_ctx(crypto_tfm_ctx(tfm
));
365 aesni_dec(ctx
, dst
, src
);
368 static int ecb_encrypt(struct blkcipher_desc
*desc
,
369 struct scatterlist
*dst
, struct scatterlist
*src
,
372 struct crypto_aes_ctx
*ctx
= aes_ctx(crypto_blkcipher_ctx(desc
->tfm
));
373 struct blkcipher_walk walk
;
376 blkcipher_walk_init(&walk
, dst
, src
, nbytes
);
377 err
= blkcipher_walk_virt(desc
, &walk
);
378 desc
->flags
&= ~CRYPTO_TFM_REQ_MAY_SLEEP
;
381 while ((nbytes
= walk
.nbytes
)) {
382 aesni_ecb_enc(ctx
, walk
.dst
.virt
.addr
, walk
.src
.virt
.addr
,
383 nbytes
& AES_BLOCK_MASK
);
384 nbytes
&= AES_BLOCK_SIZE
- 1;
385 err
= blkcipher_walk_done(desc
, &walk
, nbytes
);
392 static int ecb_decrypt(struct blkcipher_desc
*desc
,
393 struct scatterlist
*dst
, struct scatterlist
*src
,
396 struct crypto_aes_ctx
*ctx
= aes_ctx(crypto_blkcipher_ctx(desc
->tfm
));
397 struct blkcipher_walk walk
;
400 blkcipher_walk_init(&walk
, dst
, src
, nbytes
);
401 err
= blkcipher_walk_virt(desc
, &walk
);
402 desc
->flags
&= ~CRYPTO_TFM_REQ_MAY_SLEEP
;
405 while ((nbytes
= walk
.nbytes
)) {
406 aesni_ecb_dec(ctx
, walk
.dst
.virt
.addr
, walk
.src
.virt
.addr
,
407 nbytes
& AES_BLOCK_MASK
);
408 nbytes
&= AES_BLOCK_SIZE
- 1;
409 err
= blkcipher_walk_done(desc
, &walk
, nbytes
);
416 static int cbc_encrypt(struct blkcipher_desc
*desc
,
417 struct scatterlist
*dst
, struct scatterlist
*src
,
420 struct crypto_aes_ctx
*ctx
= aes_ctx(crypto_blkcipher_ctx(desc
->tfm
));
421 struct blkcipher_walk walk
;
424 blkcipher_walk_init(&walk
, dst
, src
, nbytes
);
425 err
= blkcipher_walk_virt(desc
, &walk
);
426 desc
->flags
&= ~CRYPTO_TFM_REQ_MAY_SLEEP
;
429 while ((nbytes
= walk
.nbytes
)) {
430 aesni_cbc_enc(ctx
, walk
.dst
.virt
.addr
, walk
.src
.virt
.addr
,
431 nbytes
& AES_BLOCK_MASK
, walk
.iv
);
432 nbytes
&= AES_BLOCK_SIZE
- 1;
433 err
= blkcipher_walk_done(desc
, &walk
, nbytes
);
440 static int cbc_decrypt(struct blkcipher_desc
*desc
,
441 struct scatterlist
*dst
, struct scatterlist
*src
,
444 struct crypto_aes_ctx
*ctx
= aes_ctx(crypto_blkcipher_ctx(desc
->tfm
));
445 struct blkcipher_walk walk
;
448 blkcipher_walk_init(&walk
, dst
, src
, nbytes
);
449 err
= blkcipher_walk_virt(desc
, &walk
);
450 desc
->flags
&= ~CRYPTO_TFM_REQ_MAY_SLEEP
;
453 while ((nbytes
= walk
.nbytes
)) {
454 aesni_cbc_dec(ctx
, walk
.dst
.virt
.addr
, walk
.src
.virt
.addr
,
455 nbytes
& AES_BLOCK_MASK
, walk
.iv
);
456 nbytes
&= AES_BLOCK_SIZE
- 1;
457 err
= blkcipher_walk_done(desc
, &walk
, nbytes
);
465 static void ctr_crypt_final(struct crypto_aes_ctx
*ctx
,
466 struct blkcipher_walk
*walk
)
468 u8
*ctrblk
= walk
->iv
;
469 u8 keystream
[AES_BLOCK_SIZE
];
470 u8
*src
= walk
->src
.virt
.addr
;
471 u8
*dst
= walk
->dst
.virt
.addr
;
472 unsigned int nbytes
= walk
->nbytes
;
474 aesni_enc(ctx
, keystream
, ctrblk
);
475 crypto_xor(keystream
, src
, nbytes
);
476 memcpy(dst
, keystream
, nbytes
);
477 crypto_inc(ctrblk
, AES_BLOCK_SIZE
);
481 static void aesni_ctr_enc_avx_tfm(struct crypto_aes_ctx
*ctx
, u8
*out
,
482 const u8
*in
, unsigned int len
, u8
*iv
)
485 * based on key length, override with the by8 version
486 * of ctr mode encryption/decryption for improved performance
487 * aes_set_key_common() ensures that key length is one of
490 if (ctx
->key_length
== AES_KEYSIZE_128
)
491 aes_ctr_enc_128_avx_by8(in
, iv
, (void *)ctx
, out
, len
);
492 else if (ctx
->key_length
== AES_KEYSIZE_192
)
493 aes_ctr_enc_192_avx_by8(in
, iv
, (void *)ctx
, out
, len
);
495 aes_ctr_enc_256_avx_by8(in
, iv
, (void *)ctx
, out
, len
);
499 static int ctr_crypt(struct blkcipher_desc
*desc
,
500 struct scatterlist
*dst
, struct scatterlist
*src
,
503 struct crypto_aes_ctx
*ctx
= aes_ctx(crypto_blkcipher_ctx(desc
->tfm
));
504 struct blkcipher_walk walk
;
507 blkcipher_walk_init(&walk
, dst
, src
, nbytes
);
508 err
= blkcipher_walk_virt_block(desc
, &walk
, AES_BLOCK_SIZE
);
509 desc
->flags
&= ~CRYPTO_TFM_REQ_MAY_SLEEP
;
512 while ((nbytes
= walk
.nbytes
) >= AES_BLOCK_SIZE
) {
513 aesni_ctr_enc_tfm(ctx
, walk
.dst
.virt
.addr
, walk
.src
.virt
.addr
,
514 nbytes
& AES_BLOCK_MASK
, walk
.iv
);
515 nbytes
&= AES_BLOCK_SIZE
- 1;
516 err
= blkcipher_walk_done(desc
, &walk
, nbytes
);
519 ctr_crypt_final(ctx
, &walk
);
520 err
= blkcipher_walk_done(desc
, &walk
, 0);
528 static int ablk_ecb_init(struct crypto_tfm
*tfm
)
530 return ablk_init_common(tfm
, "__driver-ecb-aes-aesni");
533 static int ablk_cbc_init(struct crypto_tfm
*tfm
)
535 return ablk_init_common(tfm
, "__driver-cbc-aes-aesni");
539 static int ablk_ctr_init(struct crypto_tfm
*tfm
)
541 return ablk_init_common(tfm
, "__driver-ctr-aes-aesni");
546 #if IS_ENABLED(CONFIG_CRYPTO_PCBC)
547 static int ablk_pcbc_init(struct crypto_tfm
*tfm
)
549 return ablk_init_common(tfm
, "fpu(pcbc(__driver-aes-aesni))");
553 static void lrw_xts_encrypt_callback(void *ctx
, u8
*blks
, unsigned int nbytes
)
555 aesni_ecb_enc(ctx
, blks
, blks
, nbytes
);
558 static void lrw_xts_decrypt_callback(void *ctx
, u8
*blks
, unsigned int nbytes
)
560 aesni_ecb_dec(ctx
, blks
, blks
, nbytes
);
563 static int lrw_aesni_setkey(struct crypto_tfm
*tfm
, const u8
*key
,
566 struct aesni_lrw_ctx
*ctx
= crypto_tfm_ctx(tfm
);
569 err
= aes_set_key_common(tfm
, ctx
->raw_aes_ctx
, key
,
570 keylen
- AES_BLOCK_SIZE
);
574 return lrw_init_table(&ctx
->lrw_table
, key
+ keylen
- AES_BLOCK_SIZE
);
577 static void lrw_aesni_exit_tfm(struct crypto_tfm
*tfm
)
579 struct aesni_lrw_ctx
*ctx
= crypto_tfm_ctx(tfm
);
581 lrw_free_table(&ctx
->lrw_table
);
584 static int lrw_encrypt(struct blkcipher_desc
*desc
, struct scatterlist
*dst
,
585 struct scatterlist
*src
, unsigned int nbytes
)
587 struct aesni_lrw_ctx
*ctx
= crypto_blkcipher_ctx(desc
->tfm
);
589 struct lrw_crypt_req req
= {
591 .tbuflen
= sizeof(buf
),
593 .table_ctx
= &ctx
->lrw_table
,
594 .crypt_ctx
= aes_ctx(ctx
->raw_aes_ctx
),
595 .crypt_fn
= lrw_xts_encrypt_callback
,
599 desc
->flags
&= ~CRYPTO_TFM_REQ_MAY_SLEEP
;
602 ret
= lrw_crypt(desc
, dst
, src
, nbytes
, &req
);
608 static int lrw_decrypt(struct blkcipher_desc
*desc
, struct scatterlist
*dst
,
609 struct scatterlist
*src
, unsigned int nbytes
)
611 struct aesni_lrw_ctx
*ctx
= crypto_blkcipher_ctx(desc
->tfm
);
613 struct lrw_crypt_req req
= {
615 .tbuflen
= sizeof(buf
),
617 .table_ctx
= &ctx
->lrw_table
,
618 .crypt_ctx
= aes_ctx(ctx
->raw_aes_ctx
),
619 .crypt_fn
= lrw_xts_decrypt_callback
,
623 desc
->flags
&= ~CRYPTO_TFM_REQ_MAY_SLEEP
;
626 ret
= lrw_crypt(desc
, dst
, src
, nbytes
, &req
);
632 static int xts_aesni_setkey(struct crypto_tfm
*tfm
, const u8
*key
,
635 struct aesni_xts_ctx
*ctx
= crypto_tfm_ctx(tfm
);
636 u32
*flags
= &tfm
->crt_flags
;
639 /* key consists of keys of equal size concatenated, therefore
640 * the length must be even
643 *flags
|= CRYPTO_TFM_RES_BAD_KEY_LEN
;
647 /* first half of xts-key is for crypt */
648 err
= aes_set_key_common(tfm
, ctx
->raw_crypt_ctx
, key
, keylen
/ 2);
652 /* second half of xts-key is for tweak */
653 return aes_set_key_common(tfm
, ctx
->raw_tweak_ctx
, key
+ keylen
/ 2,
658 static void aesni_xts_tweak(void *ctx
, u8
*out
, const u8
*in
)
660 aesni_enc(ctx
, out
, in
);
665 static void aesni_xts_enc(void *ctx
, u128
*dst
, const u128
*src
, le128
*iv
)
667 glue_xts_crypt_128bit_one(ctx
, dst
, src
, iv
, GLUE_FUNC_CAST(aesni_enc
));
670 static void aesni_xts_dec(void *ctx
, u128
*dst
, const u128
*src
, le128
*iv
)
672 glue_xts_crypt_128bit_one(ctx
, dst
, src
, iv
, GLUE_FUNC_CAST(aesni_dec
));
675 static void aesni_xts_enc8(void *ctx
, u128
*dst
, const u128
*src
, le128
*iv
)
677 aesni_xts_crypt8(ctx
, (u8
*)dst
, (const u8
*)src
, true, (u8
*)iv
);
680 static void aesni_xts_dec8(void *ctx
, u128
*dst
, const u128
*src
, le128
*iv
)
682 aesni_xts_crypt8(ctx
, (u8
*)dst
, (const u8
*)src
, false, (u8
*)iv
);
685 static const struct common_glue_ctx aesni_enc_xts
= {
687 .fpu_blocks_limit
= 1,
691 .fn_u
= { .xts
= GLUE_XTS_FUNC_CAST(aesni_xts_enc8
) }
694 .fn_u
= { .xts
= GLUE_XTS_FUNC_CAST(aesni_xts_enc
) }
698 static const struct common_glue_ctx aesni_dec_xts
= {
700 .fpu_blocks_limit
= 1,
704 .fn_u
= { .xts
= GLUE_XTS_FUNC_CAST(aesni_xts_dec8
) }
707 .fn_u
= { .xts
= GLUE_XTS_FUNC_CAST(aesni_xts_dec
) }
711 static int xts_encrypt(struct blkcipher_desc
*desc
, struct scatterlist
*dst
,
712 struct scatterlist
*src
, unsigned int nbytes
)
714 struct aesni_xts_ctx
*ctx
= crypto_blkcipher_ctx(desc
->tfm
);
716 return glue_xts_crypt_128bit(&aesni_enc_xts
, desc
, dst
, src
, nbytes
,
717 XTS_TWEAK_CAST(aesni_xts_tweak
),
718 aes_ctx(ctx
->raw_tweak_ctx
),
719 aes_ctx(ctx
->raw_crypt_ctx
));
722 static int xts_decrypt(struct blkcipher_desc
*desc
, struct scatterlist
*dst
,
723 struct scatterlist
*src
, unsigned int nbytes
)
725 struct aesni_xts_ctx
*ctx
= crypto_blkcipher_ctx(desc
->tfm
);
727 return glue_xts_crypt_128bit(&aesni_dec_xts
, desc
, dst
, src
, nbytes
,
728 XTS_TWEAK_CAST(aesni_xts_tweak
),
729 aes_ctx(ctx
->raw_tweak_ctx
),
730 aes_ctx(ctx
->raw_crypt_ctx
));
735 static int xts_encrypt(struct blkcipher_desc
*desc
, struct scatterlist
*dst
,
736 struct scatterlist
*src
, unsigned int nbytes
)
738 struct aesni_xts_ctx
*ctx
= crypto_blkcipher_ctx(desc
->tfm
);
740 struct xts_crypt_req req
= {
742 .tbuflen
= sizeof(buf
),
744 .tweak_ctx
= aes_ctx(ctx
->raw_tweak_ctx
),
745 .tweak_fn
= aesni_xts_tweak
,
746 .crypt_ctx
= aes_ctx(ctx
->raw_crypt_ctx
),
747 .crypt_fn
= lrw_xts_encrypt_callback
,
751 desc
->flags
&= ~CRYPTO_TFM_REQ_MAY_SLEEP
;
754 ret
= xts_crypt(desc
, dst
, src
, nbytes
, &req
);
760 static int xts_decrypt(struct blkcipher_desc
*desc
, struct scatterlist
*dst
,
761 struct scatterlist
*src
, unsigned int nbytes
)
763 struct aesni_xts_ctx
*ctx
= crypto_blkcipher_ctx(desc
->tfm
);
765 struct xts_crypt_req req
= {
767 .tbuflen
= sizeof(buf
),
769 .tweak_ctx
= aes_ctx(ctx
->raw_tweak_ctx
),
770 .tweak_fn
= aesni_xts_tweak
,
771 .crypt_ctx
= aes_ctx(ctx
->raw_crypt_ctx
),
772 .crypt_fn
= lrw_xts_decrypt_callback
,
776 desc
->flags
&= ~CRYPTO_TFM_REQ_MAY_SLEEP
;
779 ret
= xts_crypt(desc
, dst
, src
, nbytes
, &req
);
788 static int rfc4106_init(struct crypto_tfm
*tfm
)
790 struct cryptd_aead
*cryptd_tfm
;
791 struct aesni_rfc4106_gcm_ctx
*ctx
= (struct aesni_rfc4106_gcm_ctx
*)
792 PTR_ALIGN((u8
*)crypto_tfm_ctx(tfm
), AESNI_ALIGN
);
793 struct crypto_aead
*cryptd_child
;
794 struct aesni_rfc4106_gcm_ctx
*child_ctx
;
795 cryptd_tfm
= cryptd_alloc_aead("__driver-gcm-aes-aesni", 0, 0);
796 if (IS_ERR(cryptd_tfm
))
797 return PTR_ERR(cryptd_tfm
);
799 cryptd_child
= cryptd_aead_child(cryptd_tfm
);
800 child_ctx
= aesni_rfc4106_gcm_ctx_get(cryptd_child
);
801 memcpy(child_ctx
, ctx
, sizeof(*ctx
));
802 ctx
->cryptd_tfm
= cryptd_tfm
;
803 tfm
->crt_aead
.reqsize
= sizeof(struct aead_request
)
804 + crypto_aead_reqsize(&cryptd_tfm
->base
);
808 static void rfc4106_exit(struct crypto_tfm
*tfm
)
810 struct aesni_rfc4106_gcm_ctx
*ctx
=
811 (struct aesni_rfc4106_gcm_ctx
*)
812 PTR_ALIGN((u8
*)crypto_tfm_ctx(tfm
), AESNI_ALIGN
);
813 if (!IS_ERR(ctx
->cryptd_tfm
))
814 cryptd_free_aead(ctx
->cryptd_tfm
);
819 rfc4106_set_hash_subkey_done(struct crypto_async_request
*req
, int err
)
821 struct aesni_gcm_set_hash_subkey_result
*result
= req
->data
;
823 if (err
== -EINPROGRESS
)
826 complete(&result
->completion
);
830 rfc4106_set_hash_subkey(u8
*hash_subkey
, const u8
*key
, unsigned int key_len
)
832 struct crypto_ablkcipher
*ctr_tfm
;
833 struct ablkcipher_request
*req
;
835 struct aesni_hash_subkey_req_data
*req_data
;
837 ctr_tfm
= crypto_alloc_ablkcipher("ctr(aes)", 0, 0);
839 return PTR_ERR(ctr_tfm
);
841 crypto_ablkcipher_clear_flags(ctr_tfm
, ~0);
843 ret
= crypto_ablkcipher_setkey(ctr_tfm
, key
, key_len
);
845 goto out_free_ablkcipher
;
848 req
= ablkcipher_request_alloc(ctr_tfm
, GFP_KERNEL
);
850 goto out_free_ablkcipher
;
852 req_data
= kmalloc(sizeof(*req_data
), GFP_KERNEL
);
854 goto out_free_request
;
856 memset(req_data
->iv
, 0, sizeof(req_data
->iv
));
858 /* Clear the data in the hash sub key container to zero.*/
859 /* We want to cipher all zeros to create the hash sub key. */
860 memset(hash_subkey
, 0, RFC4106_HASH_SUBKEY_SIZE
);
862 init_completion(&req_data
->result
.completion
);
863 sg_init_one(&req_data
->sg
, hash_subkey
, RFC4106_HASH_SUBKEY_SIZE
);
864 ablkcipher_request_set_tfm(req
, ctr_tfm
);
865 ablkcipher_request_set_callback(req
, CRYPTO_TFM_REQ_MAY_SLEEP
|
866 CRYPTO_TFM_REQ_MAY_BACKLOG
,
867 rfc4106_set_hash_subkey_done
,
870 ablkcipher_request_set_crypt(req
, &req_data
->sg
,
871 &req_data
->sg
, RFC4106_HASH_SUBKEY_SIZE
, req_data
->iv
);
873 ret
= crypto_ablkcipher_encrypt(req
);
874 if (ret
== -EINPROGRESS
|| ret
== -EBUSY
) {
875 ret
= wait_for_completion_interruptible
876 (&req_data
->result
.completion
);
878 ret
= req_data
->result
.err
;
882 ablkcipher_request_free(req
);
884 crypto_free_ablkcipher(ctr_tfm
);
888 static int rfc4106_set_key(struct crypto_aead
*parent
, const u8
*key
,
889 unsigned int key_len
)
892 struct crypto_tfm
*tfm
= crypto_aead_tfm(parent
);
893 struct aesni_rfc4106_gcm_ctx
*ctx
= aesni_rfc4106_gcm_ctx_get(parent
);
894 struct crypto_aead
*cryptd_child
= cryptd_aead_child(ctx
->cryptd_tfm
);
895 struct aesni_rfc4106_gcm_ctx
*child_ctx
=
896 aesni_rfc4106_gcm_ctx_get(cryptd_child
);
897 u8
*new_key_align
, *new_key_mem
= NULL
;
900 crypto_tfm_set_flags(tfm
, CRYPTO_TFM_RES_BAD_KEY_LEN
);
903 /*Account for 4 byte nonce at the end.*/
905 if (key_len
!= AES_KEYSIZE_128
) {
906 crypto_tfm_set_flags(tfm
, CRYPTO_TFM_RES_BAD_KEY_LEN
);
910 memcpy(ctx
->nonce
, key
+ key_len
, sizeof(ctx
->nonce
));
911 /*This must be on a 16 byte boundary!*/
912 if ((unsigned long)(&(ctx
->aes_key_expanded
.key_enc
[0])) % AESNI_ALIGN
)
915 if ((unsigned long)key
% AESNI_ALIGN
) {
916 /*key is not aligned: use an auxuliar aligned pointer*/
917 new_key_mem
= kmalloc(key_len
+AESNI_ALIGN
, GFP_KERNEL
);
921 new_key_align
= PTR_ALIGN(new_key_mem
, AESNI_ALIGN
);
922 memcpy(new_key_align
, key
, key_len
);
926 if (!irq_fpu_usable())
927 ret
= crypto_aes_expand_key(&(ctx
->aes_key_expanded
),
931 ret
= aesni_set_key(&(ctx
->aes_key_expanded
), key
, key_len
);
934 /*This must be on a 16 byte boundary!*/
935 if ((unsigned long)(&(ctx
->hash_subkey
[0])) % AESNI_ALIGN
) {
939 ret
= rfc4106_set_hash_subkey(ctx
->hash_subkey
, key
, key_len
);
940 memcpy(child_ctx
, ctx
, sizeof(*ctx
));
946 /* This is the Integrity Check Value (aka the authentication tag length and can
947 * be 8, 12 or 16 bytes long. */
948 static int rfc4106_set_authsize(struct crypto_aead
*parent
,
949 unsigned int authsize
)
951 struct aesni_rfc4106_gcm_ctx
*ctx
= aesni_rfc4106_gcm_ctx_get(parent
);
952 struct crypto_aead
*cryptd_child
= cryptd_aead_child(ctx
->cryptd_tfm
);
962 crypto_aead_crt(parent
)->authsize
= authsize
;
963 crypto_aead_crt(cryptd_child
)->authsize
= authsize
;
967 static int rfc4106_encrypt(struct aead_request
*req
)
970 struct crypto_aead
*tfm
= crypto_aead_reqtfm(req
);
971 struct aesni_rfc4106_gcm_ctx
*ctx
= aesni_rfc4106_gcm_ctx_get(tfm
);
973 if (!irq_fpu_usable()) {
974 struct aead_request
*cryptd_req
=
975 (struct aead_request
*) aead_request_ctx(req
);
976 memcpy(cryptd_req
, req
, sizeof(*req
));
977 aead_request_set_tfm(cryptd_req
, &ctx
->cryptd_tfm
->base
);
978 return crypto_aead_encrypt(cryptd_req
);
980 struct crypto_aead
*cryptd_child
= cryptd_aead_child(ctx
->cryptd_tfm
);
982 ret
= cryptd_child
->base
.crt_aead
.encrypt(req
);
988 static int rfc4106_decrypt(struct aead_request
*req
)
991 struct crypto_aead
*tfm
= crypto_aead_reqtfm(req
);
992 struct aesni_rfc4106_gcm_ctx
*ctx
= aesni_rfc4106_gcm_ctx_get(tfm
);
994 if (!irq_fpu_usable()) {
995 struct aead_request
*cryptd_req
=
996 (struct aead_request
*) aead_request_ctx(req
);
997 memcpy(cryptd_req
, req
, sizeof(*req
));
998 aead_request_set_tfm(cryptd_req
, &ctx
->cryptd_tfm
->base
);
999 return crypto_aead_decrypt(cryptd_req
);
1001 struct crypto_aead
*cryptd_child
= cryptd_aead_child(ctx
->cryptd_tfm
);
1003 ret
= cryptd_child
->base
.crt_aead
.decrypt(req
);
1009 static int __driver_rfc4106_encrypt(struct aead_request
*req
)
1011 u8 one_entry_in_sg
= 0;
1012 u8
*src
, *dst
, *assoc
;
1013 __be32 counter
= cpu_to_be32(1);
1014 struct crypto_aead
*tfm
= crypto_aead_reqtfm(req
);
1015 struct aesni_rfc4106_gcm_ctx
*ctx
= aesni_rfc4106_gcm_ctx_get(tfm
);
1016 void *aes_ctx
= &(ctx
->aes_key_expanded
);
1017 unsigned long auth_tag_len
= crypto_aead_authsize(tfm
);
1018 u8 iv_tab
[16+AESNI_ALIGN
];
1019 u8
* iv
= (u8
*) PTR_ALIGN((u8
*)iv_tab
, AESNI_ALIGN
);
1020 struct scatter_walk src_sg_walk
;
1021 struct scatter_walk assoc_sg_walk
;
1022 struct scatter_walk dst_sg_walk
;
1025 /* Assuming we are supporting rfc4106 64-bit extended */
1026 /* sequence numbers We need to have the AAD length equal */
1027 /* to 8 or 12 bytes */
1028 if (unlikely(req
->assoclen
!= 8 && req
->assoclen
!= 12))
1030 /* IV below built */
1031 for (i
= 0; i
< 4; i
++)
1032 *(iv
+i
) = ctx
->nonce
[i
];
1033 for (i
= 0; i
< 8; i
++)
1034 *(iv
+4+i
) = req
->iv
[i
];
1035 *((__be32
*)(iv
+12)) = counter
;
1037 if ((sg_is_last(req
->src
)) && (sg_is_last(req
->assoc
))) {
1038 one_entry_in_sg
= 1;
1039 scatterwalk_start(&src_sg_walk
, req
->src
);
1040 scatterwalk_start(&assoc_sg_walk
, req
->assoc
);
1041 src
= scatterwalk_map(&src_sg_walk
);
1042 assoc
= scatterwalk_map(&assoc_sg_walk
);
1044 if (unlikely(req
->src
!= req
->dst
)) {
1045 scatterwalk_start(&dst_sg_walk
, req
->dst
);
1046 dst
= scatterwalk_map(&dst_sg_walk
);
1050 /* Allocate memory for src, dst, assoc */
1051 src
= kmalloc(req
->cryptlen
+ auth_tag_len
+ req
->assoclen
,
1055 assoc
= (src
+ req
->cryptlen
+ auth_tag_len
);
1056 scatterwalk_map_and_copy(src
, req
->src
, 0, req
->cryptlen
, 0);
1057 scatterwalk_map_and_copy(assoc
, req
->assoc
, 0,
1062 aesni_gcm_enc_tfm(aes_ctx
, dst
, src
, (unsigned long)req
->cryptlen
, iv
,
1063 ctx
->hash_subkey
, assoc
, (unsigned long)req
->assoclen
, dst
1064 + ((unsigned long)req
->cryptlen
), auth_tag_len
);
1066 /* The authTag (aka the Integrity Check Value) needs to be written
1067 * back to the packet. */
1068 if (one_entry_in_sg
) {
1069 if (unlikely(req
->src
!= req
->dst
)) {
1070 scatterwalk_unmap(dst
);
1071 scatterwalk_done(&dst_sg_walk
, 0, 0);
1073 scatterwalk_unmap(src
);
1074 scatterwalk_unmap(assoc
);
1075 scatterwalk_done(&src_sg_walk
, 0, 0);
1076 scatterwalk_done(&assoc_sg_walk
, 0, 0);
1078 scatterwalk_map_and_copy(dst
, req
->dst
, 0,
1079 req
->cryptlen
+ auth_tag_len
, 1);
1085 static int __driver_rfc4106_decrypt(struct aead_request
*req
)
1087 u8 one_entry_in_sg
= 0;
1088 u8
*src
, *dst
, *assoc
;
1089 unsigned long tempCipherLen
= 0;
1090 __be32 counter
= cpu_to_be32(1);
1092 struct crypto_aead
*tfm
= crypto_aead_reqtfm(req
);
1093 struct aesni_rfc4106_gcm_ctx
*ctx
= aesni_rfc4106_gcm_ctx_get(tfm
);
1094 void *aes_ctx
= &(ctx
->aes_key_expanded
);
1095 unsigned long auth_tag_len
= crypto_aead_authsize(tfm
);
1096 u8 iv_and_authTag
[32+AESNI_ALIGN
];
1097 u8
*iv
= (u8
*) PTR_ALIGN((u8
*)iv_and_authTag
, AESNI_ALIGN
);
1098 u8
*authTag
= iv
+ 16;
1099 struct scatter_walk src_sg_walk
;
1100 struct scatter_walk assoc_sg_walk
;
1101 struct scatter_walk dst_sg_walk
;
1104 if (unlikely((req
->cryptlen
< auth_tag_len
) ||
1105 (req
->assoclen
!= 8 && req
->assoclen
!= 12)))
1107 /* Assuming we are supporting rfc4106 64-bit extended */
1108 /* sequence numbers We need to have the AAD length */
1109 /* equal to 8 or 12 bytes */
1111 tempCipherLen
= (unsigned long)(req
->cryptlen
- auth_tag_len
);
1112 /* IV below built */
1113 for (i
= 0; i
< 4; i
++)
1114 *(iv
+i
) = ctx
->nonce
[i
];
1115 for (i
= 0; i
< 8; i
++)
1116 *(iv
+4+i
) = req
->iv
[i
];
1117 *((__be32
*)(iv
+12)) = counter
;
1119 if ((sg_is_last(req
->src
)) && (sg_is_last(req
->assoc
))) {
1120 one_entry_in_sg
= 1;
1121 scatterwalk_start(&src_sg_walk
, req
->src
);
1122 scatterwalk_start(&assoc_sg_walk
, req
->assoc
);
1123 src
= scatterwalk_map(&src_sg_walk
);
1124 assoc
= scatterwalk_map(&assoc_sg_walk
);
1126 if (unlikely(req
->src
!= req
->dst
)) {
1127 scatterwalk_start(&dst_sg_walk
, req
->dst
);
1128 dst
= scatterwalk_map(&dst_sg_walk
);
1132 /* Allocate memory for src, dst, assoc */
1133 src
= kmalloc(req
->cryptlen
+ req
->assoclen
, GFP_ATOMIC
);
1136 assoc
= (src
+ req
->cryptlen
+ auth_tag_len
);
1137 scatterwalk_map_and_copy(src
, req
->src
, 0, req
->cryptlen
, 0);
1138 scatterwalk_map_and_copy(assoc
, req
->assoc
, 0,
1143 aesni_gcm_dec_tfm(aes_ctx
, dst
, src
, tempCipherLen
, iv
,
1144 ctx
->hash_subkey
, assoc
, (unsigned long)req
->assoclen
,
1145 authTag
, auth_tag_len
);
1147 /* Compare generated tag with passed in tag. */
1148 retval
= crypto_memneq(src
+ tempCipherLen
, authTag
, auth_tag_len
) ?
1151 if (one_entry_in_sg
) {
1152 if (unlikely(req
->src
!= req
->dst
)) {
1153 scatterwalk_unmap(dst
);
1154 scatterwalk_done(&dst_sg_walk
, 0, 0);
1156 scatterwalk_unmap(src
);
1157 scatterwalk_unmap(assoc
);
1158 scatterwalk_done(&src_sg_walk
, 0, 0);
1159 scatterwalk_done(&assoc_sg_walk
, 0, 0);
1161 scatterwalk_map_and_copy(dst
, req
->dst
, 0, req
->cryptlen
, 1);
1168 static struct crypto_alg aesni_algs
[] = { {
1170 .cra_driver_name
= "aes-aesni",
1171 .cra_priority
= 300,
1172 .cra_flags
= CRYPTO_ALG_TYPE_CIPHER
,
1173 .cra_blocksize
= AES_BLOCK_SIZE
,
1174 .cra_ctxsize
= sizeof(struct crypto_aes_ctx
) +
1177 .cra_module
= THIS_MODULE
,
1180 .cia_min_keysize
= AES_MIN_KEY_SIZE
,
1181 .cia_max_keysize
= AES_MAX_KEY_SIZE
,
1182 .cia_setkey
= aes_set_key
,
1183 .cia_encrypt
= aes_encrypt
,
1184 .cia_decrypt
= aes_decrypt
1188 .cra_name
= "__aes-aesni",
1189 .cra_driver_name
= "__driver-aes-aesni",
1191 .cra_flags
= CRYPTO_ALG_TYPE_CIPHER
,
1192 .cra_blocksize
= AES_BLOCK_SIZE
,
1193 .cra_ctxsize
= sizeof(struct crypto_aes_ctx
) +
1196 .cra_module
= THIS_MODULE
,
1199 .cia_min_keysize
= AES_MIN_KEY_SIZE
,
1200 .cia_max_keysize
= AES_MAX_KEY_SIZE
,
1201 .cia_setkey
= aes_set_key
,
1202 .cia_encrypt
= __aes_encrypt
,
1203 .cia_decrypt
= __aes_decrypt
1207 .cra_name
= "__ecb-aes-aesni",
1208 .cra_driver_name
= "__driver-ecb-aes-aesni",
1210 .cra_flags
= CRYPTO_ALG_TYPE_BLKCIPHER
,
1211 .cra_blocksize
= AES_BLOCK_SIZE
,
1212 .cra_ctxsize
= sizeof(struct crypto_aes_ctx
) +
1215 .cra_type
= &crypto_blkcipher_type
,
1216 .cra_module
= THIS_MODULE
,
1219 .min_keysize
= AES_MIN_KEY_SIZE
,
1220 .max_keysize
= AES_MAX_KEY_SIZE
,
1221 .setkey
= aes_set_key
,
1222 .encrypt
= ecb_encrypt
,
1223 .decrypt
= ecb_decrypt
,
1227 .cra_name
= "__cbc-aes-aesni",
1228 .cra_driver_name
= "__driver-cbc-aes-aesni",
1230 .cra_flags
= CRYPTO_ALG_TYPE_BLKCIPHER
,
1231 .cra_blocksize
= AES_BLOCK_SIZE
,
1232 .cra_ctxsize
= sizeof(struct crypto_aes_ctx
) +
1235 .cra_type
= &crypto_blkcipher_type
,
1236 .cra_module
= THIS_MODULE
,
1239 .min_keysize
= AES_MIN_KEY_SIZE
,
1240 .max_keysize
= AES_MAX_KEY_SIZE
,
1241 .setkey
= aes_set_key
,
1242 .encrypt
= cbc_encrypt
,
1243 .decrypt
= cbc_decrypt
,
1247 .cra_name
= "ecb(aes)",
1248 .cra_driver_name
= "ecb-aes-aesni",
1249 .cra_priority
= 400,
1250 .cra_flags
= CRYPTO_ALG_TYPE_ABLKCIPHER
| CRYPTO_ALG_ASYNC
,
1251 .cra_blocksize
= AES_BLOCK_SIZE
,
1252 .cra_ctxsize
= sizeof(struct async_helper_ctx
),
1254 .cra_type
= &crypto_ablkcipher_type
,
1255 .cra_module
= THIS_MODULE
,
1256 .cra_init
= ablk_ecb_init
,
1257 .cra_exit
= ablk_exit
,
1260 .min_keysize
= AES_MIN_KEY_SIZE
,
1261 .max_keysize
= AES_MAX_KEY_SIZE
,
1262 .setkey
= ablk_set_key
,
1263 .encrypt
= ablk_encrypt
,
1264 .decrypt
= ablk_decrypt
,
1268 .cra_name
= "cbc(aes)",
1269 .cra_driver_name
= "cbc-aes-aesni",
1270 .cra_priority
= 400,
1271 .cra_flags
= CRYPTO_ALG_TYPE_ABLKCIPHER
| CRYPTO_ALG_ASYNC
,
1272 .cra_blocksize
= AES_BLOCK_SIZE
,
1273 .cra_ctxsize
= sizeof(struct async_helper_ctx
),
1275 .cra_type
= &crypto_ablkcipher_type
,
1276 .cra_module
= THIS_MODULE
,
1277 .cra_init
= ablk_cbc_init
,
1278 .cra_exit
= ablk_exit
,
1281 .min_keysize
= AES_MIN_KEY_SIZE
,
1282 .max_keysize
= AES_MAX_KEY_SIZE
,
1283 .ivsize
= AES_BLOCK_SIZE
,
1284 .setkey
= ablk_set_key
,
1285 .encrypt
= ablk_encrypt
,
1286 .decrypt
= ablk_decrypt
,
1289 #ifdef CONFIG_X86_64
1291 .cra_name
= "__ctr-aes-aesni",
1292 .cra_driver_name
= "__driver-ctr-aes-aesni",
1294 .cra_flags
= CRYPTO_ALG_TYPE_BLKCIPHER
,
1296 .cra_ctxsize
= sizeof(struct crypto_aes_ctx
) +
1299 .cra_type
= &crypto_blkcipher_type
,
1300 .cra_module
= THIS_MODULE
,
1303 .min_keysize
= AES_MIN_KEY_SIZE
,
1304 .max_keysize
= AES_MAX_KEY_SIZE
,
1305 .ivsize
= AES_BLOCK_SIZE
,
1306 .setkey
= aes_set_key
,
1307 .encrypt
= ctr_crypt
,
1308 .decrypt
= ctr_crypt
,
1312 .cra_name
= "ctr(aes)",
1313 .cra_driver_name
= "ctr-aes-aesni",
1314 .cra_priority
= 400,
1315 .cra_flags
= CRYPTO_ALG_TYPE_ABLKCIPHER
| CRYPTO_ALG_ASYNC
,
1317 .cra_ctxsize
= sizeof(struct async_helper_ctx
),
1319 .cra_type
= &crypto_ablkcipher_type
,
1320 .cra_module
= THIS_MODULE
,
1321 .cra_init
= ablk_ctr_init
,
1322 .cra_exit
= ablk_exit
,
1325 .min_keysize
= AES_MIN_KEY_SIZE
,
1326 .max_keysize
= AES_MAX_KEY_SIZE
,
1327 .ivsize
= AES_BLOCK_SIZE
,
1328 .setkey
= ablk_set_key
,
1329 .encrypt
= ablk_encrypt
,
1330 .decrypt
= ablk_encrypt
,
1335 .cra_name
= "__gcm-aes-aesni",
1336 .cra_driver_name
= "__driver-gcm-aes-aesni",
1338 .cra_flags
= CRYPTO_ALG_TYPE_AEAD
,
1340 .cra_ctxsize
= sizeof(struct aesni_rfc4106_gcm_ctx
) +
1343 .cra_type
= &crypto_aead_type
,
1344 .cra_module
= THIS_MODULE
,
1347 .encrypt
= __driver_rfc4106_encrypt
,
1348 .decrypt
= __driver_rfc4106_decrypt
,
1352 .cra_name
= "rfc4106(gcm(aes))",
1353 .cra_driver_name
= "rfc4106-gcm-aesni",
1354 .cra_priority
= 400,
1355 .cra_flags
= CRYPTO_ALG_TYPE_AEAD
| CRYPTO_ALG_ASYNC
,
1357 .cra_ctxsize
= sizeof(struct aesni_rfc4106_gcm_ctx
) +
1360 .cra_type
= &crypto_nivaead_type
,
1361 .cra_module
= THIS_MODULE
,
1362 .cra_init
= rfc4106_init
,
1363 .cra_exit
= rfc4106_exit
,
1366 .setkey
= rfc4106_set_key
,
1367 .setauthsize
= rfc4106_set_authsize
,
1368 .encrypt
= rfc4106_encrypt
,
1369 .decrypt
= rfc4106_decrypt
,
1376 #if IS_ENABLED(CONFIG_CRYPTO_PCBC)
1378 .cra_name
= "pcbc(aes)",
1379 .cra_driver_name
= "pcbc-aes-aesni",
1380 .cra_priority
= 400,
1381 .cra_flags
= CRYPTO_ALG_TYPE_ABLKCIPHER
| CRYPTO_ALG_ASYNC
,
1382 .cra_blocksize
= AES_BLOCK_SIZE
,
1383 .cra_ctxsize
= sizeof(struct async_helper_ctx
),
1385 .cra_type
= &crypto_ablkcipher_type
,
1386 .cra_module
= THIS_MODULE
,
1387 .cra_init
= ablk_pcbc_init
,
1388 .cra_exit
= ablk_exit
,
1391 .min_keysize
= AES_MIN_KEY_SIZE
,
1392 .max_keysize
= AES_MAX_KEY_SIZE
,
1393 .ivsize
= AES_BLOCK_SIZE
,
1394 .setkey
= ablk_set_key
,
1395 .encrypt
= ablk_encrypt
,
1396 .decrypt
= ablk_decrypt
,
1401 .cra_name
= "__lrw-aes-aesni",
1402 .cra_driver_name
= "__driver-lrw-aes-aesni",
1404 .cra_flags
= CRYPTO_ALG_TYPE_BLKCIPHER
,
1405 .cra_blocksize
= AES_BLOCK_SIZE
,
1406 .cra_ctxsize
= sizeof(struct aesni_lrw_ctx
),
1408 .cra_type
= &crypto_blkcipher_type
,
1409 .cra_module
= THIS_MODULE
,
1410 .cra_exit
= lrw_aesni_exit_tfm
,
1413 .min_keysize
= AES_MIN_KEY_SIZE
+ AES_BLOCK_SIZE
,
1414 .max_keysize
= AES_MAX_KEY_SIZE
+ AES_BLOCK_SIZE
,
1415 .ivsize
= AES_BLOCK_SIZE
,
1416 .setkey
= lrw_aesni_setkey
,
1417 .encrypt
= lrw_encrypt
,
1418 .decrypt
= lrw_decrypt
,
1422 .cra_name
= "__xts-aes-aesni",
1423 .cra_driver_name
= "__driver-xts-aes-aesni",
1425 .cra_flags
= CRYPTO_ALG_TYPE_BLKCIPHER
,
1426 .cra_blocksize
= AES_BLOCK_SIZE
,
1427 .cra_ctxsize
= sizeof(struct aesni_xts_ctx
),
1429 .cra_type
= &crypto_blkcipher_type
,
1430 .cra_module
= THIS_MODULE
,
1433 .min_keysize
= 2 * AES_MIN_KEY_SIZE
,
1434 .max_keysize
= 2 * AES_MAX_KEY_SIZE
,
1435 .ivsize
= AES_BLOCK_SIZE
,
1436 .setkey
= xts_aesni_setkey
,
1437 .encrypt
= xts_encrypt
,
1438 .decrypt
= xts_decrypt
,
1442 .cra_name
= "lrw(aes)",
1443 .cra_driver_name
= "lrw-aes-aesni",
1444 .cra_priority
= 400,
1445 .cra_flags
= CRYPTO_ALG_TYPE_ABLKCIPHER
| CRYPTO_ALG_ASYNC
,
1446 .cra_blocksize
= AES_BLOCK_SIZE
,
1447 .cra_ctxsize
= sizeof(struct async_helper_ctx
),
1449 .cra_type
= &crypto_ablkcipher_type
,
1450 .cra_module
= THIS_MODULE
,
1451 .cra_init
= ablk_init
,
1452 .cra_exit
= ablk_exit
,
1455 .min_keysize
= AES_MIN_KEY_SIZE
+ AES_BLOCK_SIZE
,
1456 .max_keysize
= AES_MAX_KEY_SIZE
+ AES_BLOCK_SIZE
,
1457 .ivsize
= AES_BLOCK_SIZE
,
1458 .setkey
= ablk_set_key
,
1459 .encrypt
= ablk_encrypt
,
1460 .decrypt
= ablk_decrypt
,
1464 .cra_name
= "xts(aes)",
1465 .cra_driver_name
= "xts-aes-aesni",
1466 .cra_priority
= 400,
1467 .cra_flags
= CRYPTO_ALG_TYPE_ABLKCIPHER
| CRYPTO_ALG_ASYNC
,
1468 .cra_blocksize
= AES_BLOCK_SIZE
,
1469 .cra_ctxsize
= sizeof(struct async_helper_ctx
),
1471 .cra_type
= &crypto_ablkcipher_type
,
1472 .cra_module
= THIS_MODULE
,
1473 .cra_init
= ablk_init
,
1474 .cra_exit
= ablk_exit
,
1477 .min_keysize
= 2 * AES_MIN_KEY_SIZE
,
1478 .max_keysize
= 2 * AES_MAX_KEY_SIZE
,
1479 .ivsize
= AES_BLOCK_SIZE
,
1480 .setkey
= ablk_set_key
,
1481 .encrypt
= ablk_encrypt
,
1482 .decrypt
= ablk_decrypt
,
1488 static const struct x86_cpu_id aesni_cpu_id
[] = {
1489 X86_FEATURE_MATCH(X86_FEATURE_AES
),
1492 MODULE_DEVICE_TABLE(x86cpu
, aesni_cpu_id
);
1494 static int __init
aesni_init(void)
1498 if (!x86_match_cpu(aesni_cpu_id
))
1500 #ifdef CONFIG_X86_64
1501 #ifdef CONFIG_AS_AVX2
1502 if (boot_cpu_has(X86_FEATURE_AVX2
)) {
1503 pr_info("AVX2 version of gcm_enc/dec engaged.\n");
1504 aesni_gcm_enc_tfm
= aesni_gcm_enc_avx2
;
1505 aesni_gcm_dec_tfm
= aesni_gcm_dec_avx2
;
1508 #ifdef CONFIG_AS_AVX
1509 if (boot_cpu_has(X86_FEATURE_AVX
)) {
1510 pr_info("AVX version of gcm_enc/dec engaged.\n");
1511 aesni_gcm_enc_tfm
= aesni_gcm_enc_avx
;
1512 aesni_gcm_dec_tfm
= aesni_gcm_dec_avx
;
1516 pr_info("SSE version of gcm_enc/dec engaged.\n");
1517 aesni_gcm_enc_tfm
= aesni_gcm_enc
;
1518 aesni_gcm_dec_tfm
= aesni_gcm_dec
;
1520 aesni_ctr_enc_tfm
= aesni_ctr_enc
;
1521 #ifdef CONFIG_AS_AVX
1523 /* optimize performance of ctr mode encryption transform */
1524 aesni_ctr_enc_tfm
= aesni_ctr_enc_avx_tfm
;
1525 pr_info("AES CTR mode by8 optimization enabled\n");
1530 err
= crypto_fpu_init();
1534 return crypto_register_algs(aesni_algs
, ARRAY_SIZE(aesni_algs
));
1537 static void __exit
aesni_exit(void)
1539 crypto_unregister_algs(aesni_algs
, ARRAY_SIZE(aesni_algs
));
1544 module_init(aesni_init
);
1545 module_exit(aesni_exit
);
1547 MODULE_DESCRIPTION("Rijndael (AES) Cipher Algorithm, Intel AES-NI instructions optimized");
1548 MODULE_LICENSE("GPL");
1549 MODULE_ALIAS_CRYPTO("aes");