Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mason/linux...
[deliverable/linux.git] / arch / x86 / crypto / aesni-intel_glue.c
1 /*
2 * Support for Intel AES-NI instructions. This file contains glue
3 * code, the real AES implementation is in intel-aes_asm.S.
4 *
5 * Copyright (C) 2008, Intel Corp.
6 * Author: Huang Ying <ying.huang@intel.com>
7 *
8 * Added RFC4106 AES-GCM support for 128-bit keys under the AEAD
9 * interface for 64-bit kernels.
10 * Authors: Adrian Hoban <adrian.hoban@intel.com>
11 * Gabriele Paoloni <gabriele.paoloni@intel.com>
12 * Tadeusz Struk (tadeusz.struk@intel.com)
13 * Aidan O'Mahony (aidan.o.mahony@intel.com)
14 * Copyright (c) 2010, Intel Corporation.
15 *
16 * This program is free software; you can redistribute it and/or modify
17 * it under the terms of the GNU General Public License as published by
18 * the Free Software Foundation; either version 2 of the License, or
19 * (at your option) any later version.
20 */
21
22 #include <linux/hardirq.h>
23 #include <linux/types.h>
24 #include <linux/crypto.h>
25 #include <linux/module.h>
26 #include <linux/err.h>
27 #include <crypto/algapi.h>
28 #include <crypto/aes.h>
29 #include <crypto/cryptd.h>
30 #include <crypto/ctr.h>
31 #include <crypto/b128ops.h>
32 #include <crypto/lrw.h>
33 #include <crypto/xts.h>
34 #include <asm/cpu_device_id.h>
35 #include <asm/i387.h>
36 #include <asm/crypto/aes.h>
37 #include <crypto/ablk_helper.h>
38 #include <crypto/scatterwalk.h>
39 #include <crypto/internal/aead.h>
40 #include <linux/workqueue.h>
41 #include <linux/spinlock.h>
42 #ifdef CONFIG_X86_64
43 #include <asm/crypto/glue_helper.h>
44 #endif
45
46
47 /* This data is stored at the end of the crypto_tfm struct.
48 * It's a type of per "session" data storage location.
49 * This needs to be 16 byte aligned.
50 */
51 struct aesni_rfc4106_gcm_ctx {
52 u8 hash_subkey[16];
53 struct crypto_aes_ctx aes_key_expanded;
54 u8 nonce[4];
55 struct cryptd_aead *cryptd_tfm;
56 };
57
58 struct aesni_gcm_set_hash_subkey_result {
59 int err;
60 struct completion completion;
61 };
62
63 struct aesni_hash_subkey_req_data {
64 u8 iv[16];
65 struct aesni_gcm_set_hash_subkey_result result;
66 struct scatterlist sg;
67 };
68
69 #define AESNI_ALIGN (16)
70 #define AES_BLOCK_MASK (~(AES_BLOCK_SIZE-1))
71 #define RFC4106_HASH_SUBKEY_SIZE 16
72
73 struct aesni_lrw_ctx {
74 struct lrw_table_ctx lrw_table;
75 u8 raw_aes_ctx[sizeof(struct crypto_aes_ctx) + AESNI_ALIGN - 1];
76 };
77
78 struct aesni_xts_ctx {
79 u8 raw_tweak_ctx[sizeof(struct crypto_aes_ctx) + AESNI_ALIGN - 1];
80 u8 raw_crypt_ctx[sizeof(struct crypto_aes_ctx) + AESNI_ALIGN - 1];
81 };
82
83 asmlinkage int aesni_set_key(struct crypto_aes_ctx *ctx, const u8 *in_key,
84 unsigned int key_len);
85 asmlinkage void aesni_enc(struct crypto_aes_ctx *ctx, u8 *out,
86 const u8 *in);
87 asmlinkage void aesni_dec(struct crypto_aes_ctx *ctx, u8 *out,
88 const u8 *in);
89 asmlinkage void aesni_ecb_enc(struct crypto_aes_ctx *ctx, u8 *out,
90 const u8 *in, unsigned int len);
91 asmlinkage void aesni_ecb_dec(struct crypto_aes_ctx *ctx, u8 *out,
92 const u8 *in, unsigned int len);
93 asmlinkage void aesni_cbc_enc(struct crypto_aes_ctx *ctx, u8 *out,
94 const u8 *in, unsigned int len, u8 *iv);
95 asmlinkage void aesni_cbc_dec(struct crypto_aes_ctx *ctx, u8 *out,
96 const u8 *in, unsigned int len, u8 *iv);
97
98 int crypto_fpu_init(void);
99 void crypto_fpu_exit(void);
100
101 #define AVX_GEN2_OPTSIZE 640
102 #define AVX_GEN4_OPTSIZE 4096
103
104 #ifdef CONFIG_X86_64
105
106 static void (*aesni_ctr_enc_tfm)(struct crypto_aes_ctx *ctx, u8 *out,
107 const u8 *in, unsigned int len, u8 *iv);
108 asmlinkage void aesni_ctr_enc(struct crypto_aes_ctx *ctx, u8 *out,
109 const u8 *in, unsigned int len, u8 *iv);
110
111 asmlinkage void aesni_xts_crypt8(struct crypto_aes_ctx *ctx, u8 *out,
112 const u8 *in, bool enc, u8 *iv);
113
114 /* asmlinkage void aesni_gcm_enc()
115 * void *ctx, AES Key schedule. Starts on a 16 byte boundary.
116 * u8 *out, Ciphertext output. Encrypt in-place is allowed.
117 * const u8 *in, Plaintext input
118 * unsigned long plaintext_len, Length of data in bytes for encryption.
119 * u8 *iv, Pre-counter block j0: 4 byte salt (from Security Association)
120 * concatenated with 8 byte Initialisation Vector (from IPSec ESP
121 * Payload) concatenated with 0x00000001. 16-byte aligned pointer.
122 * u8 *hash_subkey, the Hash sub key input. Data starts on a 16-byte boundary.
123 * const u8 *aad, Additional Authentication Data (AAD)
124 * unsigned long aad_len, Length of AAD in bytes. With RFC4106 this
125 * is going to be 8 or 12 bytes
126 * u8 *auth_tag, Authenticated Tag output.
127 * unsigned long auth_tag_len), Authenticated Tag Length in bytes.
128 * Valid values are 16 (most likely), 12 or 8.
129 */
130 asmlinkage void aesni_gcm_enc(void *ctx, u8 *out,
131 const u8 *in, unsigned long plaintext_len, u8 *iv,
132 u8 *hash_subkey, const u8 *aad, unsigned long aad_len,
133 u8 *auth_tag, unsigned long auth_tag_len);
134
135 /* asmlinkage void aesni_gcm_dec()
136 * void *ctx, AES Key schedule. Starts on a 16 byte boundary.
137 * u8 *out, Plaintext output. Decrypt in-place is allowed.
138 * const u8 *in, Ciphertext input
139 * unsigned long ciphertext_len, Length of data in bytes for decryption.
140 * u8 *iv, Pre-counter block j0: 4 byte salt (from Security Association)
141 * concatenated with 8 byte Initialisation Vector (from IPSec ESP
142 * Payload) concatenated with 0x00000001. 16-byte aligned pointer.
143 * u8 *hash_subkey, the Hash sub key input. Data starts on a 16-byte boundary.
144 * const u8 *aad, Additional Authentication Data (AAD)
145 * unsigned long aad_len, Length of AAD in bytes. With RFC4106 this is going
146 * to be 8 or 12 bytes
147 * u8 *auth_tag, Authenticated Tag output.
148 * unsigned long auth_tag_len) Authenticated Tag Length in bytes.
149 * Valid values are 16 (most likely), 12 or 8.
150 */
151 asmlinkage void aesni_gcm_dec(void *ctx, u8 *out,
152 const u8 *in, unsigned long ciphertext_len, u8 *iv,
153 u8 *hash_subkey, const u8 *aad, unsigned long aad_len,
154 u8 *auth_tag, unsigned long auth_tag_len);
155
156
157 #ifdef CONFIG_AS_AVX
158 asmlinkage void aes_ctr_enc_128_avx_by8(const u8 *in, u8 *iv,
159 void *keys, u8 *out, unsigned int num_bytes);
160 asmlinkage void aes_ctr_enc_192_avx_by8(const u8 *in, u8 *iv,
161 void *keys, u8 *out, unsigned int num_bytes);
162 asmlinkage void aes_ctr_enc_256_avx_by8(const u8 *in, u8 *iv,
163 void *keys, u8 *out, unsigned int num_bytes);
164 /*
165 * asmlinkage void aesni_gcm_precomp_avx_gen2()
166 * gcm_data *my_ctx_data, context data
167 * u8 *hash_subkey, the Hash sub key input. Data starts on a 16-byte boundary.
168 */
169 asmlinkage void aesni_gcm_precomp_avx_gen2(void *my_ctx_data, u8 *hash_subkey);
170
171 asmlinkage void aesni_gcm_enc_avx_gen2(void *ctx, u8 *out,
172 const u8 *in, unsigned long plaintext_len, u8 *iv,
173 const u8 *aad, unsigned long aad_len,
174 u8 *auth_tag, unsigned long auth_tag_len);
175
176 asmlinkage void aesni_gcm_dec_avx_gen2(void *ctx, u8 *out,
177 const u8 *in, unsigned long ciphertext_len, u8 *iv,
178 const u8 *aad, unsigned long aad_len,
179 u8 *auth_tag, unsigned long auth_tag_len);
180
181 static void aesni_gcm_enc_avx(void *ctx, u8 *out,
182 const u8 *in, unsigned long plaintext_len, u8 *iv,
183 u8 *hash_subkey, const u8 *aad, unsigned long aad_len,
184 u8 *auth_tag, unsigned long auth_tag_len)
185 {
186 struct crypto_aes_ctx *aes_ctx = (struct crypto_aes_ctx*)ctx;
187 if ((plaintext_len < AVX_GEN2_OPTSIZE) || (aes_ctx-> key_length != AES_KEYSIZE_128)){
188 aesni_gcm_enc(ctx, out, in, plaintext_len, iv, hash_subkey, aad,
189 aad_len, auth_tag, auth_tag_len);
190 } else {
191 aesni_gcm_precomp_avx_gen2(ctx, hash_subkey);
192 aesni_gcm_enc_avx_gen2(ctx, out, in, plaintext_len, iv, aad,
193 aad_len, auth_tag, auth_tag_len);
194 }
195 }
196
197 static void aesni_gcm_dec_avx(void *ctx, u8 *out,
198 const u8 *in, unsigned long ciphertext_len, u8 *iv,
199 u8 *hash_subkey, const u8 *aad, unsigned long aad_len,
200 u8 *auth_tag, unsigned long auth_tag_len)
201 {
202 struct crypto_aes_ctx *aes_ctx = (struct crypto_aes_ctx*)ctx;
203 if ((ciphertext_len < AVX_GEN2_OPTSIZE) || (aes_ctx-> key_length != AES_KEYSIZE_128)) {
204 aesni_gcm_dec(ctx, out, in, ciphertext_len, iv, hash_subkey, aad,
205 aad_len, auth_tag, auth_tag_len);
206 } else {
207 aesni_gcm_precomp_avx_gen2(ctx, hash_subkey);
208 aesni_gcm_dec_avx_gen2(ctx, out, in, ciphertext_len, iv, aad,
209 aad_len, auth_tag, auth_tag_len);
210 }
211 }
212 #endif
213
214 #ifdef CONFIG_AS_AVX2
215 /*
216 * asmlinkage void aesni_gcm_precomp_avx_gen4()
217 * gcm_data *my_ctx_data, context data
218 * u8 *hash_subkey, the Hash sub key input. Data starts on a 16-byte boundary.
219 */
220 asmlinkage void aesni_gcm_precomp_avx_gen4(void *my_ctx_data, u8 *hash_subkey);
221
222 asmlinkage void aesni_gcm_enc_avx_gen4(void *ctx, u8 *out,
223 const u8 *in, unsigned long plaintext_len, u8 *iv,
224 const u8 *aad, unsigned long aad_len,
225 u8 *auth_tag, unsigned long auth_tag_len);
226
227 asmlinkage void aesni_gcm_dec_avx_gen4(void *ctx, u8 *out,
228 const u8 *in, unsigned long ciphertext_len, u8 *iv,
229 const u8 *aad, unsigned long aad_len,
230 u8 *auth_tag, unsigned long auth_tag_len);
231
232 static void aesni_gcm_enc_avx2(void *ctx, u8 *out,
233 const u8 *in, unsigned long plaintext_len, u8 *iv,
234 u8 *hash_subkey, const u8 *aad, unsigned long aad_len,
235 u8 *auth_tag, unsigned long auth_tag_len)
236 {
237 struct crypto_aes_ctx *aes_ctx = (struct crypto_aes_ctx*)ctx;
238 if ((plaintext_len < AVX_GEN2_OPTSIZE) || (aes_ctx-> key_length != AES_KEYSIZE_128)) {
239 aesni_gcm_enc(ctx, out, in, plaintext_len, iv, hash_subkey, aad,
240 aad_len, auth_tag, auth_tag_len);
241 } else if (plaintext_len < AVX_GEN4_OPTSIZE) {
242 aesni_gcm_precomp_avx_gen2(ctx, hash_subkey);
243 aesni_gcm_enc_avx_gen2(ctx, out, in, plaintext_len, iv, aad,
244 aad_len, auth_tag, auth_tag_len);
245 } else {
246 aesni_gcm_precomp_avx_gen4(ctx, hash_subkey);
247 aesni_gcm_enc_avx_gen4(ctx, out, in, plaintext_len, iv, aad,
248 aad_len, auth_tag, auth_tag_len);
249 }
250 }
251
252 static void aesni_gcm_dec_avx2(void *ctx, u8 *out,
253 const u8 *in, unsigned long ciphertext_len, u8 *iv,
254 u8 *hash_subkey, const u8 *aad, unsigned long aad_len,
255 u8 *auth_tag, unsigned long auth_tag_len)
256 {
257 struct crypto_aes_ctx *aes_ctx = (struct crypto_aes_ctx*)ctx;
258 if ((ciphertext_len < AVX_GEN2_OPTSIZE) || (aes_ctx-> key_length != AES_KEYSIZE_128)) {
259 aesni_gcm_dec(ctx, out, in, ciphertext_len, iv, hash_subkey,
260 aad, aad_len, auth_tag, auth_tag_len);
261 } else if (ciphertext_len < AVX_GEN4_OPTSIZE) {
262 aesni_gcm_precomp_avx_gen2(ctx, hash_subkey);
263 aesni_gcm_dec_avx_gen2(ctx, out, in, ciphertext_len, iv, aad,
264 aad_len, auth_tag, auth_tag_len);
265 } else {
266 aesni_gcm_precomp_avx_gen4(ctx, hash_subkey);
267 aesni_gcm_dec_avx_gen4(ctx, out, in, ciphertext_len, iv, aad,
268 aad_len, auth_tag, auth_tag_len);
269 }
270 }
271 #endif
272
273 static void (*aesni_gcm_enc_tfm)(void *ctx, u8 *out,
274 const u8 *in, unsigned long plaintext_len, u8 *iv,
275 u8 *hash_subkey, const u8 *aad, unsigned long aad_len,
276 u8 *auth_tag, unsigned long auth_tag_len);
277
278 static void (*aesni_gcm_dec_tfm)(void *ctx, u8 *out,
279 const u8 *in, unsigned long ciphertext_len, u8 *iv,
280 u8 *hash_subkey, const u8 *aad, unsigned long aad_len,
281 u8 *auth_tag, unsigned long auth_tag_len);
282
283 static inline struct
284 aesni_rfc4106_gcm_ctx *aesni_rfc4106_gcm_ctx_get(struct crypto_aead *tfm)
285 {
286 return
287 (struct aesni_rfc4106_gcm_ctx *)
288 PTR_ALIGN((u8 *)
289 crypto_tfm_ctx(crypto_aead_tfm(tfm)), AESNI_ALIGN);
290 }
291 #endif
292
293 static inline struct crypto_aes_ctx *aes_ctx(void *raw_ctx)
294 {
295 unsigned long addr = (unsigned long)raw_ctx;
296 unsigned long align = AESNI_ALIGN;
297
298 if (align <= crypto_tfm_ctx_alignment())
299 align = 1;
300 return (struct crypto_aes_ctx *)ALIGN(addr, align);
301 }
302
303 static int aes_set_key_common(struct crypto_tfm *tfm, void *raw_ctx,
304 const u8 *in_key, unsigned int key_len)
305 {
306 struct crypto_aes_ctx *ctx = aes_ctx(raw_ctx);
307 u32 *flags = &tfm->crt_flags;
308 int err;
309
310 if (key_len != AES_KEYSIZE_128 && key_len != AES_KEYSIZE_192 &&
311 key_len != AES_KEYSIZE_256) {
312 *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
313 return -EINVAL;
314 }
315
316 if (!irq_fpu_usable())
317 err = crypto_aes_expand_key(ctx, in_key, key_len);
318 else {
319 kernel_fpu_begin();
320 err = aesni_set_key(ctx, in_key, key_len);
321 kernel_fpu_end();
322 }
323
324 return err;
325 }
326
327 static int aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
328 unsigned int key_len)
329 {
330 return aes_set_key_common(tfm, crypto_tfm_ctx(tfm), in_key, key_len);
331 }
332
333 static void aes_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
334 {
335 struct crypto_aes_ctx *ctx = aes_ctx(crypto_tfm_ctx(tfm));
336
337 if (!irq_fpu_usable())
338 crypto_aes_encrypt_x86(ctx, dst, src);
339 else {
340 kernel_fpu_begin();
341 aesni_enc(ctx, dst, src);
342 kernel_fpu_end();
343 }
344 }
345
346 static void aes_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
347 {
348 struct crypto_aes_ctx *ctx = aes_ctx(crypto_tfm_ctx(tfm));
349
350 if (!irq_fpu_usable())
351 crypto_aes_decrypt_x86(ctx, dst, src);
352 else {
353 kernel_fpu_begin();
354 aesni_dec(ctx, dst, src);
355 kernel_fpu_end();
356 }
357 }
358
359 static void __aes_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
360 {
361 struct crypto_aes_ctx *ctx = aes_ctx(crypto_tfm_ctx(tfm));
362
363 aesni_enc(ctx, dst, src);
364 }
365
366 static void __aes_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
367 {
368 struct crypto_aes_ctx *ctx = aes_ctx(crypto_tfm_ctx(tfm));
369
370 aesni_dec(ctx, dst, src);
371 }
372
373 static int ecb_encrypt(struct blkcipher_desc *desc,
374 struct scatterlist *dst, struct scatterlist *src,
375 unsigned int nbytes)
376 {
377 struct crypto_aes_ctx *ctx = aes_ctx(crypto_blkcipher_ctx(desc->tfm));
378 struct blkcipher_walk walk;
379 int err;
380
381 blkcipher_walk_init(&walk, dst, src, nbytes);
382 err = blkcipher_walk_virt(desc, &walk);
383 desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
384
385 kernel_fpu_begin();
386 while ((nbytes = walk.nbytes)) {
387 aesni_ecb_enc(ctx, walk.dst.virt.addr, walk.src.virt.addr,
388 nbytes & AES_BLOCK_MASK);
389 nbytes &= AES_BLOCK_SIZE - 1;
390 err = blkcipher_walk_done(desc, &walk, nbytes);
391 }
392 kernel_fpu_end();
393
394 return err;
395 }
396
397 static int ecb_decrypt(struct blkcipher_desc *desc,
398 struct scatterlist *dst, struct scatterlist *src,
399 unsigned int nbytes)
400 {
401 struct crypto_aes_ctx *ctx = aes_ctx(crypto_blkcipher_ctx(desc->tfm));
402 struct blkcipher_walk walk;
403 int err;
404
405 blkcipher_walk_init(&walk, dst, src, nbytes);
406 err = blkcipher_walk_virt(desc, &walk);
407 desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
408
409 kernel_fpu_begin();
410 while ((nbytes = walk.nbytes)) {
411 aesni_ecb_dec(ctx, walk.dst.virt.addr, walk.src.virt.addr,
412 nbytes & AES_BLOCK_MASK);
413 nbytes &= AES_BLOCK_SIZE - 1;
414 err = blkcipher_walk_done(desc, &walk, nbytes);
415 }
416 kernel_fpu_end();
417
418 return err;
419 }
420
421 static int cbc_encrypt(struct blkcipher_desc *desc,
422 struct scatterlist *dst, struct scatterlist *src,
423 unsigned int nbytes)
424 {
425 struct crypto_aes_ctx *ctx = aes_ctx(crypto_blkcipher_ctx(desc->tfm));
426 struct blkcipher_walk walk;
427 int err;
428
429 blkcipher_walk_init(&walk, dst, src, nbytes);
430 err = blkcipher_walk_virt(desc, &walk);
431 desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
432
433 kernel_fpu_begin();
434 while ((nbytes = walk.nbytes)) {
435 aesni_cbc_enc(ctx, walk.dst.virt.addr, walk.src.virt.addr,
436 nbytes & AES_BLOCK_MASK, walk.iv);
437 nbytes &= AES_BLOCK_SIZE - 1;
438 err = blkcipher_walk_done(desc, &walk, nbytes);
439 }
440 kernel_fpu_end();
441
442 return err;
443 }
444
445 static int cbc_decrypt(struct blkcipher_desc *desc,
446 struct scatterlist *dst, struct scatterlist *src,
447 unsigned int nbytes)
448 {
449 struct crypto_aes_ctx *ctx = aes_ctx(crypto_blkcipher_ctx(desc->tfm));
450 struct blkcipher_walk walk;
451 int err;
452
453 blkcipher_walk_init(&walk, dst, src, nbytes);
454 err = blkcipher_walk_virt(desc, &walk);
455 desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
456
457 kernel_fpu_begin();
458 while ((nbytes = walk.nbytes)) {
459 aesni_cbc_dec(ctx, walk.dst.virt.addr, walk.src.virt.addr,
460 nbytes & AES_BLOCK_MASK, walk.iv);
461 nbytes &= AES_BLOCK_SIZE - 1;
462 err = blkcipher_walk_done(desc, &walk, nbytes);
463 }
464 kernel_fpu_end();
465
466 return err;
467 }
468
469 #ifdef CONFIG_X86_64
470 static void ctr_crypt_final(struct crypto_aes_ctx *ctx,
471 struct blkcipher_walk *walk)
472 {
473 u8 *ctrblk = walk->iv;
474 u8 keystream[AES_BLOCK_SIZE];
475 u8 *src = walk->src.virt.addr;
476 u8 *dst = walk->dst.virt.addr;
477 unsigned int nbytes = walk->nbytes;
478
479 aesni_enc(ctx, keystream, ctrblk);
480 crypto_xor(keystream, src, nbytes);
481 memcpy(dst, keystream, nbytes);
482 crypto_inc(ctrblk, AES_BLOCK_SIZE);
483 }
484
485 #ifdef CONFIG_AS_AVX
486 static void aesni_ctr_enc_avx_tfm(struct crypto_aes_ctx *ctx, u8 *out,
487 const u8 *in, unsigned int len, u8 *iv)
488 {
489 /*
490 * based on key length, override with the by8 version
491 * of ctr mode encryption/decryption for improved performance
492 * aes_set_key_common() ensures that key length is one of
493 * {128,192,256}
494 */
495 if (ctx->key_length == AES_KEYSIZE_128)
496 aes_ctr_enc_128_avx_by8(in, iv, (void *)ctx, out, len);
497 else if (ctx->key_length == AES_KEYSIZE_192)
498 aes_ctr_enc_192_avx_by8(in, iv, (void *)ctx, out, len);
499 else
500 aes_ctr_enc_256_avx_by8(in, iv, (void *)ctx, out, len);
501 }
502 #endif
503
504 static int ctr_crypt(struct blkcipher_desc *desc,
505 struct scatterlist *dst, struct scatterlist *src,
506 unsigned int nbytes)
507 {
508 struct crypto_aes_ctx *ctx = aes_ctx(crypto_blkcipher_ctx(desc->tfm));
509 struct blkcipher_walk walk;
510 int err;
511
512 blkcipher_walk_init(&walk, dst, src, nbytes);
513 err = blkcipher_walk_virt_block(desc, &walk, AES_BLOCK_SIZE);
514 desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
515
516 kernel_fpu_begin();
517 while ((nbytes = walk.nbytes) >= AES_BLOCK_SIZE) {
518 aesni_ctr_enc_tfm(ctx, walk.dst.virt.addr, walk.src.virt.addr,
519 nbytes & AES_BLOCK_MASK, walk.iv);
520 nbytes &= AES_BLOCK_SIZE - 1;
521 err = blkcipher_walk_done(desc, &walk, nbytes);
522 }
523 if (walk.nbytes) {
524 ctr_crypt_final(ctx, &walk);
525 err = blkcipher_walk_done(desc, &walk, 0);
526 }
527 kernel_fpu_end();
528
529 return err;
530 }
531 #endif
532
533 static int ablk_ecb_init(struct crypto_tfm *tfm)
534 {
535 return ablk_init_common(tfm, "__driver-ecb-aes-aesni");
536 }
537
538 static int ablk_cbc_init(struct crypto_tfm *tfm)
539 {
540 return ablk_init_common(tfm, "__driver-cbc-aes-aesni");
541 }
542
543 #ifdef CONFIG_X86_64
544 static int ablk_ctr_init(struct crypto_tfm *tfm)
545 {
546 return ablk_init_common(tfm, "__driver-ctr-aes-aesni");
547 }
548
549 #endif
550
551 #if IS_ENABLED(CONFIG_CRYPTO_PCBC)
552 static int ablk_pcbc_init(struct crypto_tfm *tfm)
553 {
554 return ablk_init_common(tfm, "fpu(pcbc(__driver-aes-aesni))");
555 }
556 #endif
557
558 static void lrw_xts_encrypt_callback(void *ctx, u8 *blks, unsigned int nbytes)
559 {
560 aesni_ecb_enc(ctx, blks, blks, nbytes);
561 }
562
563 static void lrw_xts_decrypt_callback(void *ctx, u8 *blks, unsigned int nbytes)
564 {
565 aesni_ecb_dec(ctx, blks, blks, nbytes);
566 }
567
568 static int lrw_aesni_setkey(struct crypto_tfm *tfm, const u8 *key,
569 unsigned int keylen)
570 {
571 struct aesni_lrw_ctx *ctx = crypto_tfm_ctx(tfm);
572 int err;
573
574 err = aes_set_key_common(tfm, ctx->raw_aes_ctx, key,
575 keylen - AES_BLOCK_SIZE);
576 if (err)
577 return err;
578
579 return lrw_init_table(&ctx->lrw_table, key + keylen - AES_BLOCK_SIZE);
580 }
581
582 static void lrw_aesni_exit_tfm(struct crypto_tfm *tfm)
583 {
584 struct aesni_lrw_ctx *ctx = crypto_tfm_ctx(tfm);
585
586 lrw_free_table(&ctx->lrw_table);
587 }
588
589 static int lrw_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
590 struct scatterlist *src, unsigned int nbytes)
591 {
592 struct aesni_lrw_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
593 be128 buf[8];
594 struct lrw_crypt_req req = {
595 .tbuf = buf,
596 .tbuflen = sizeof(buf),
597
598 .table_ctx = &ctx->lrw_table,
599 .crypt_ctx = aes_ctx(ctx->raw_aes_ctx),
600 .crypt_fn = lrw_xts_encrypt_callback,
601 };
602 int ret;
603
604 desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
605
606 kernel_fpu_begin();
607 ret = lrw_crypt(desc, dst, src, nbytes, &req);
608 kernel_fpu_end();
609
610 return ret;
611 }
612
613 static int lrw_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
614 struct scatterlist *src, unsigned int nbytes)
615 {
616 struct aesni_lrw_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
617 be128 buf[8];
618 struct lrw_crypt_req req = {
619 .tbuf = buf,
620 .tbuflen = sizeof(buf),
621
622 .table_ctx = &ctx->lrw_table,
623 .crypt_ctx = aes_ctx(ctx->raw_aes_ctx),
624 .crypt_fn = lrw_xts_decrypt_callback,
625 };
626 int ret;
627
628 desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
629
630 kernel_fpu_begin();
631 ret = lrw_crypt(desc, dst, src, nbytes, &req);
632 kernel_fpu_end();
633
634 return ret;
635 }
636
637 static int xts_aesni_setkey(struct crypto_tfm *tfm, const u8 *key,
638 unsigned int keylen)
639 {
640 struct aesni_xts_ctx *ctx = crypto_tfm_ctx(tfm);
641 u32 *flags = &tfm->crt_flags;
642 int err;
643
644 /* key consists of keys of equal size concatenated, therefore
645 * the length must be even
646 */
647 if (keylen % 2) {
648 *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
649 return -EINVAL;
650 }
651
652 /* first half of xts-key is for crypt */
653 err = aes_set_key_common(tfm, ctx->raw_crypt_ctx, key, keylen / 2);
654 if (err)
655 return err;
656
657 /* second half of xts-key is for tweak */
658 return aes_set_key_common(tfm, ctx->raw_tweak_ctx, key + keylen / 2,
659 keylen / 2);
660 }
661
662
663 static void aesni_xts_tweak(void *ctx, u8 *out, const u8 *in)
664 {
665 aesni_enc(ctx, out, in);
666 }
667
668 #ifdef CONFIG_X86_64
669
670 static void aesni_xts_enc(void *ctx, u128 *dst, const u128 *src, le128 *iv)
671 {
672 glue_xts_crypt_128bit_one(ctx, dst, src, iv, GLUE_FUNC_CAST(aesni_enc));
673 }
674
675 static void aesni_xts_dec(void *ctx, u128 *dst, const u128 *src, le128 *iv)
676 {
677 glue_xts_crypt_128bit_one(ctx, dst, src, iv, GLUE_FUNC_CAST(aesni_dec));
678 }
679
680 static void aesni_xts_enc8(void *ctx, u128 *dst, const u128 *src, le128 *iv)
681 {
682 aesni_xts_crypt8(ctx, (u8 *)dst, (const u8 *)src, true, (u8 *)iv);
683 }
684
685 static void aesni_xts_dec8(void *ctx, u128 *dst, const u128 *src, le128 *iv)
686 {
687 aesni_xts_crypt8(ctx, (u8 *)dst, (const u8 *)src, false, (u8 *)iv);
688 }
689
690 static const struct common_glue_ctx aesni_enc_xts = {
691 .num_funcs = 2,
692 .fpu_blocks_limit = 1,
693
694 .funcs = { {
695 .num_blocks = 8,
696 .fn_u = { .xts = GLUE_XTS_FUNC_CAST(aesni_xts_enc8) }
697 }, {
698 .num_blocks = 1,
699 .fn_u = { .xts = GLUE_XTS_FUNC_CAST(aesni_xts_enc) }
700 } }
701 };
702
703 static const struct common_glue_ctx aesni_dec_xts = {
704 .num_funcs = 2,
705 .fpu_blocks_limit = 1,
706
707 .funcs = { {
708 .num_blocks = 8,
709 .fn_u = { .xts = GLUE_XTS_FUNC_CAST(aesni_xts_dec8) }
710 }, {
711 .num_blocks = 1,
712 .fn_u = { .xts = GLUE_XTS_FUNC_CAST(aesni_xts_dec) }
713 } }
714 };
715
716 static int xts_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
717 struct scatterlist *src, unsigned int nbytes)
718 {
719 struct aesni_xts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
720
721 return glue_xts_crypt_128bit(&aesni_enc_xts, desc, dst, src, nbytes,
722 XTS_TWEAK_CAST(aesni_xts_tweak),
723 aes_ctx(ctx->raw_tweak_ctx),
724 aes_ctx(ctx->raw_crypt_ctx));
725 }
726
727 static int xts_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
728 struct scatterlist *src, unsigned int nbytes)
729 {
730 struct aesni_xts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
731
732 return glue_xts_crypt_128bit(&aesni_dec_xts, desc, dst, src, nbytes,
733 XTS_TWEAK_CAST(aesni_xts_tweak),
734 aes_ctx(ctx->raw_tweak_ctx),
735 aes_ctx(ctx->raw_crypt_ctx));
736 }
737
738 #else
739
740 static int xts_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
741 struct scatterlist *src, unsigned int nbytes)
742 {
743 struct aesni_xts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
744 be128 buf[8];
745 struct xts_crypt_req req = {
746 .tbuf = buf,
747 .tbuflen = sizeof(buf),
748
749 .tweak_ctx = aes_ctx(ctx->raw_tweak_ctx),
750 .tweak_fn = aesni_xts_tweak,
751 .crypt_ctx = aes_ctx(ctx->raw_crypt_ctx),
752 .crypt_fn = lrw_xts_encrypt_callback,
753 };
754 int ret;
755
756 desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
757
758 kernel_fpu_begin();
759 ret = xts_crypt(desc, dst, src, nbytes, &req);
760 kernel_fpu_end();
761
762 return ret;
763 }
764
765 static int xts_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
766 struct scatterlist *src, unsigned int nbytes)
767 {
768 struct aesni_xts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
769 be128 buf[8];
770 struct xts_crypt_req req = {
771 .tbuf = buf,
772 .tbuflen = sizeof(buf),
773
774 .tweak_ctx = aes_ctx(ctx->raw_tweak_ctx),
775 .tweak_fn = aesni_xts_tweak,
776 .crypt_ctx = aes_ctx(ctx->raw_crypt_ctx),
777 .crypt_fn = lrw_xts_decrypt_callback,
778 };
779 int ret;
780
781 desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
782
783 kernel_fpu_begin();
784 ret = xts_crypt(desc, dst, src, nbytes, &req);
785 kernel_fpu_end();
786
787 return ret;
788 }
789
790 #endif
791
792 #ifdef CONFIG_X86_64
793 static int rfc4106_init(struct crypto_tfm *tfm)
794 {
795 struct cryptd_aead *cryptd_tfm;
796 struct aesni_rfc4106_gcm_ctx *ctx = (struct aesni_rfc4106_gcm_ctx *)
797 PTR_ALIGN((u8 *)crypto_tfm_ctx(tfm), AESNI_ALIGN);
798 struct crypto_aead *cryptd_child;
799 struct aesni_rfc4106_gcm_ctx *child_ctx;
800 cryptd_tfm = cryptd_alloc_aead("__driver-gcm-aes-aesni", 0, 0);
801 if (IS_ERR(cryptd_tfm))
802 return PTR_ERR(cryptd_tfm);
803
804 cryptd_child = cryptd_aead_child(cryptd_tfm);
805 child_ctx = aesni_rfc4106_gcm_ctx_get(cryptd_child);
806 memcpy(child_ctx, ctx, sizeof(*ctx));
807 ctx->cryptd_tfm = cryptd_tfm;
808 tfm->crt_aead.reqsize = sizeof(struct aead_request)
809 + crypto_aead_reqsize(&cryptd_tfm->base);
810 return 0;
811 }
812
813 static void rfc4106_exit(struct crypto_tfm *tfm)
814 {
815 struct aesni_rfc4106_gcm_ctx *ctx =
816 (struct aesni_rfc4106_gcm_ctx *)
817 PTR_ALIGN((u8 *)crypto_tfm_ctx(tfm), AESNI_ALIGN);
818 if (!IS_ERR(ctx->cryptd_tfm))
819 cryptd_free_aead(ctx->cryptd_tfm);
820 return;
821 }
822
823 static void
824 rfc4106_set_hash_subkey_done(struct crypto_async_request *req, int err)
825 {
826 struct aesni_gcm_set_hash_subkey_result *result = req->data;
827
828 if (err == -EINPROGRESS)
829 return;
830 result->err = err;
831 complete(&result->completion);
832 }
833
834 static int
835 rfc4106_set_hash_subkey(u8 *hash_subkey, const u8 *key, unsigned int key_len)
836 {
837 struct crypto_ablkcipher *ctr_tfm;
838 struct ablkcipher_request *req;
839 int ret = -EINVAL;
840 struct aesni_hash_subkey_req_data *req_data;
841
842 ctr_tfm = crypto_alloc_ablkcipher("ctr(aes)", 0, 0);
843 if (IS_ERR(ctr_tfm))
844 return PTR_ERR(ctr_tfm);
845
846 crypto_ablkcipher_clear_flags(ctr_tfm, ~0);
847
848 ret = crypto_ablkcipher_setkey(ctr_tfm, key, key_len);
849 if (ret)
850 goto out_free_ablkcipher;
851
852 ret = -ENOMEM;
853 req = ablkcipher_request_alloc(ctr_tfm, GFP_KERNEL);
854 if (!req)
855 goto out_free_ablkcipher;
856
857 req_data = kmalloc(sizeof(*req_data), GFP_KERNEL);
858 if (!req_data)
859 goto out_free_request;
860
861 memset(req_data->iv, 0, sizeof(req_data->iv));
862
863 /* Clear the data in the hash sub key container to zero.*/
864 /* We want to cipher all zeros to create the hash sub key. */
865 memset(hash_subkey, 0, RFC4106_HASH_SUBKEY_SIZE);
866
867 init_completion(&req_data->result.completion);
868 sg_init_one(&req_data->sg, hash_subkey, RFC4106_HASH_SUBKEY_SIZE);
869 ablkcipher_request_set_tfm(req, ctr_tfm);
870 ablkcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_SLEEP |
871 CRYPTO_TFM_REQ_MAY_BACKLOG,
872 rfc4106_set_hash_subkey_done,
873 &req_data->result);
874
875 ablkcipher_request_set_crypt(req, &req_data->sg,
876 &req_data->sg, RFC4106_HASH_SUBKEY_SIZE, req_data->iv);
877
878 ret = crypto_ablkcipher_encrypt(req);
879 if (ret == -EINPROGRESS || ret == -EBUSY) {
880 ret = wait_for_completion_interruptible
881 (&req_data->result.completion);
882 if (!ret)
883 ret = req_data->result.err;
884 }
885 kfree(req_data);
886 out_free_request:
887 ablkcipher_request_free(req);
888 out_free_ablkcipher:
889 crypto_free_ablkcipher(ctr_tfm);
890 return ret;
891 }
892
893 static int rfc4106_set_key(struct crypto_aead *parent, const u8 *key,
894 unsigned int key_len)
895 {
896 int ret = 0;
897 struct crypto_tfm *tfm = crypto_aead_tfm(parent);
898 struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(parent);
899 struct crypto_aead *cryptd_child = cryptd_aead_child(ctx->cryptd_tfm);
900 struct aesni_rfc4106_gcm_ctx *child_ctx =
901 aesni_rfc4106_gcm_ctx_get(cryptd_child);
902 u8 *new_key_align, *new_key_mem = NULL;
903
904 if (key_len < 4) {
905 crypto_tfm_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
906 return -EINVAL;
907 }
908 /*Account for 4 byte nonce at the end.*/
909 key_len -= 4;
910 if (key_len != AES_KEYSIZE_128 && key_len != AES_KEYSIZE_192 &&
911 key_len != AES_KEYSIZE_256) {
912 crypto_tfm_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
913 return -EINVAL;
914 }
915
916 memcpy(ctx->nonce, key + key_len, sizeof(ctx->nonce));
917 /*This must be on a 16 byte boundary!*/
918 if ((unsigned long)(&(ctx->aes_key_expanded.key_enc[0])) % AESNI_ALIGN)
919 return -EINVAL;
920
921 if ((unsigned long)key % AESNI_ALIGN) {
922 /*key is not aligned: use an auxuliar aligned pointer*/
923 new_key_mem = kmalloc(key_len+AESNI_ALIGN, GFP_KERNEL);
924 if (!new_key_mem)
925 return -ENOMEM;
926
927 new_key_align = PTR_ALIGN(new_key_mem, AESNI_ALIGN);
928 memcpy(new_key_align, key, key_len);
929 key = new_key_align;
930 }
931
932 if (!irq_fpu_usable())
933 ret = crypto_aes_expand_key(&(ctx->aes_key_expanded),
934 key, key_len);
935 else {
936 kernel_fpu_begin();
937 ret = aesni_set_key(&(ctx->aes_key_expanded), key, key_len);
938 kernel_fpu_end();
939 }
940 /*This must be on a 16 byte boundary!*/
941 if ((unsigned long)(&(ctx->hash_subkey[0])) % AESNI_ALIGN) {
942 ret = -EINVAL;
943 goto exit;
944 }
945 ret = rfc4106_set_hash_subkey(ctx->hash_subkey, key, key_len);
946 memcpy(child_ctx, ctx, sizeof(*ctx));
947 exit:
948 kfree(new_key_mem);
949 return ret;
950 }
951
952 /* This is the Integrity Check Value (aka the authentication tag length and can
953 * be 8, 12 or 16 bytes long. */
954 static int rfc4106_set_authsize(struct crypto_aead *parent,
955 unsigned int authsize)
956 {
957 struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(parent);
958 struct crypto_aead *cryptd_child = cryptd_aead_child(ctx->cryptd_tfm);
959
960 switch (authsize) {
961 case 8:
962 case 12:
963 case 16:
964 break;
965 default:
966 return -EINVAL;
967 }
968 crypto_aead_crt(parent)->authsize = authsize;
969 crypto_aead_crt(cryptd_child)->authsize = authsize;
970 return 0;
971 }
972
973 static int rfc4106_encrypt(struct aead_request *req)
974 {
975 int ret;
976 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
977 struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(tfm);
978
979 if (!irq_fpu_usable()) {
980 struct aead_request *cryptd_req =
981 (struct aead_request *) aead_request_ctx(req);
982 memcpy(cryptd_req, req, sizeof(*req));
983 aead_request_set_tfm(cryptd_req, &ctx->cryptd_tfm->base);
984 return crypto_aead_encrypt(cryptd_req);
985 } else {
986 struct crypto_aead *cryptd_child = cryptd_aead_child(ctx->cryptd_tfm);
987 kernel_fpu_begin();
988 ret = cryptd_child->base.crt_aead.encrypt(req);
989 kernel_fpu_end();
990 return ret;
991 }
992 }
993
994 static int rfc4106_decrypt(struct aead_request *req)
995 {
996 int ret;
997 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
998 struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(tfm);
999
1000 if (!irq_fpu_usable()) {
1001 struct aead_request *cryptd_req =
1002 (struct aead_request *) aead_request_ctx(req);
1003 memcpy(cryptd_req, req, sizeof(*req));
1004 aead_request_set_tfm(cryptd_req, &ctx->cryptd_tfm->base);
1005 return crypto_aead_decrypt(cryptd_req);
1006 } else {
1007 struct crypto_aead *cryptd_child = cryptd_aead_child(ctx->cryptd_tfm);
1008 kernel_fpu_begin();
1009 ret = cryptd_child->base.crt_aead.decrypt(req);
1010 kernel_fpu_end();
1011 return ret;
1012 }
1013 }
1014
1015 static int __driver_rfc4106_encrypt(struct aead_request *req)
1016 {
1017 u8 one_entry_in_sg = 0;
1018 u8 *src, *dst, *assoc;
1019 __be32 counter = cpu_to_be32(1);
1020 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1021 struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(tfm);
1022 u32 key_len = ctx->aes_key_expanded.key_length;
1023 void *aes_ctx = &(ctx->aes_key_expanded);
1024 unsigned long auth_tag_len = crypto_aead_authsize(tfm);
1025 u8 iv_tab[16+AESNI_ALIGN];
1026 u8* iv = (u8 *) PTR_ALIGN((u8 *)iv_tab, AESNI_ALIGN);
1027 struct scatter_walk src_sg_walk;
1028 struct scatter_walk assoc_sg_walk;
1029 struct scatter_walk dst_sg_walk;
1030 unsigned int i;
1031
1032 /* Assuming we are supporting rfc4106 64-bit extended */
1033 /* sequence numbers We need to have the AAD length equal */
1034 /* to 8 or 12 bytes */
1035 if (unlikely(req->assoclen != 8 && req->assoclen != 12))
1036 return -EINVAL;
1037 if (unlikely(auth_tag_len != 8 && auth_tag_len != 12 && auth_tag_len != 16))
1038 return -EINVAL;
1039 if (unlikely(key_len != AES_KEYSIZE_128 &&
1040 key_len != AES_KEYSIZE_192 &&
1041 key_len != AES_KEYSIZE_256))
1042 return -EINVAL;
1043
1044 /* IV below built */
1045 for (i = 0; i < 4; i++)
1046 *(iv+i) = ctx->nonce[i];
1047 for (i = 0; i < 8; i++)
1048 *(iv+4+i) = req->iv[i];
1049 *((__be32 *)(iv+12)) = counter;
1050
1051 if ((sg_is_last(req->src)) && (sg_is_last(req->assoc))) {
1052 one_entry_in_sg = 1;
1053 scatterwalk_start(&src_sg_walk, req->src);
1054 scatterwalk_start(&assoc_sg_walk, req->assoc);
1055 src = scatterwalk_map(&src_sg_walk);
1056 assoc = scatterwalk_map(&assoc_sg_walk);
1057 dst = src;
1058 if (unlikely(req->src != req->dst)) {
1059 scatterwalk_start(&dst_sg_walk, req->dst);
1060 dst = scatterwalk_map(&dst_sg_walk);
1061 }
1062
1063 } else {
1064 /* Allocate memory for src, dst, assoc */
1065 src = kmalloc(req->cryptlen + auth_tag_len + req->assoclen,
1066 GFP_ATOMIC);
1067 if (unlikely(!src))
1068 return -ENOMEM;
1069 assoc = (src + req->cryptlen + auth_tag_len);
1070 scatterwalk_map_and_copy(src, req->src, 0, req->cryptlen, 0);
1071 scatterwalk_map_and_copy(assoc, req->assoc, 0,
1072 req->assoclen, 0);
1073 dst = src;
1074 }
1075
1076 aesni_gcm_enc_tfm(aes_ctx, dst, src, (unsigned long)req->cryptlen, iv,
1077 ctx->hash_subkey, assoc, (unsigned long)req->assoclen, dst
1078 + ((unsigned long)req->cryptlen), auth_tag_len);
1079
1080 /* The authTag (aka the Integrity Check Value) needs to be written
1081 * back to the packet. */
1082 if (one_entry_in_sg) {
1083 if (unlikely(req->src != req->dst)) {
1084 scatterwalk_unmap(dst);
1085 scatterwalk_done(&dst_sg_walk, 0, 0);
1086 }
1087 scatterwalk_unmap(src);
1088 scatterwalk_unmap(assoc);
1089 scatterwalk_done(&src_sg_walk, 0, 0);
1090 scatterwalk_done(&assoc_sg_walk, 0, 0);
1091 } else {
1092 scatterwalk_map_and_copy(dst, req->dst, 0,
1093 req->cryptlen + auth_tag_len, 1);
1094 kfree(src);
1095 }
1096 return 0;
1097 }
1098
1099 static int __driver_rfc4106_decrypt(struct aead_request *req)
1100 {
1101 u8 one_entry_in_sg = 0;
1102 u8 *src, *dst, *assoc;
1103 unsigned long tempCipherLen = 0;
1104 __be32 counter = cpu_to_be32(1);
1105 int retval = 0;
1106 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1107 struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(tfm);
1108 u32 key_len = ctx->aes_key_expanded.key_length;
1109 void *aes_ctx = &(ctx->aes_key_expanded);
1110 unsigned long auth_tag_len = crypto_aead_authsize(tfm);
1111 u8 iv_and_authTag[32+AESNI_ALIGN];
1112 u8 *iv = (u8 *) PTR_ALIGN((u8 *)iv_and_authTag, AESNI_ALIGN);
1113 u8 *authTag = iv + 16;
1114 struct scatter_walk src_sg_walk;
1115 struct scatter_walk assoc_sg_walk;
1116 struct scatter_walk dst_sg_walk;
1117 unsigned int i;
1118
1119 if (unlikely((req->cryptlen < auth_tag_len) ||
1120 (req->assoclen != 8 && req->assoclen != 12)))
1121 return -EINVAL;
1122 if (unlikely(auth_tag_len != 8 && auth_tag_len != 12 && auth_tag_len != 16))
1123 return -EINVAL;
1124 if (unlikely(key_len != AES_KEYSIZE_128 &&
1125 key_len != AES_KEYSIZE_192 &&
1126 key_len != AES_KEYSIZE_256))
1127 return -EINVAL;
1128
1129 /* Assuming we are supporting rfc4106 64-bit extended */
1130 /* sequence numbers We need to have the AAD length */
1131 /* equal to 8 or 12 bytes */
1132
1133 tempCipherLen = (unsigned long)(req->cryptlen - auth_tag_len);
1134 /* IV below built */
1135 for (i = 0; i < 4; i++)
1136 *(iv+i) = ctx->nonce[i];
1137 for (i = 0; i < 8; i++)
1138 *(iv+4+i) = req->iv[i];
1139 *((__be32 *)(iv+12)) = counter;
1140
1141 if ((sg_is_last(req->src)) && (sg_is_last(req->assoc))) {
1142 one_entry_in_sg = 1;
1143 scatterwalk_start(&src_sg_walk, req->src);
1144 scatterwalk_start(&assoc_sg_walk, req->assoc);
1145 src = scatterwalk_map(&src_sg_walk);
1146 assoc = scatterwalk_map(&assoc_sg_walk);
1147 dst = src;
1148 if (unlikely(req->src != req->dst)) {
1149 scatterwalk_start(&dst_sg_walk, req->dst);
1150 dst = scatterwalk_map(&dst_sg_walk);
1151 }
1152
1153 } else {
1154 /* Allocate memory for src, dst, assoc */
1155 src = kmalloc(req->cryptlen + req->assoclen, GFP_ATOMIC);
1156 if (!src)
1157 return -ENOMEM;
1158 assoc = (src + req->cryptlen);
1159 scatterwalk_map_and_copy(src, req->src, 0, req->cryptlen, 0);
1160 scatterwalk_map_and_copy(assoc, req->assoc, 0,
1161 req->assoclen, 0);
1162 dst = src;
1163 }
1164
1165 aesni_gcm_dec_tfm(aes_ctx, dst, src, tempCipherLen, iv,
1166 ctx->hash_subkey, assoc, (unsigned long)req->assoclen,
1167 authTag, auth_tag_len);
1168
1169 /* Compare generated tag with passed in tag. */
1170 retval = crypto_memneq(src + tempCipherLen, authTag, auth_tag_len) ?
1171 -EBADMSG : 0;
1172
1173 if (one_entry_in_sg) {
1174 if (unlikely(req->src != req->dst)) {
1175 scatterwalk_unmap(dst);
1176 scatterwalk_done(&dst_sg_walk, 0, 0);
1177 }
1178 scatterwalk_unmap(src);
1179 scatterwalk_unmap(assoc);
1180 scatterwalk_done(&src_sg_walk, 0, 0);
1181 scatterwalk_done(&assoc_sg_walk, 0, 0);
1182 } else {
1183 scatterwalk_map_and_copy(dst, req->dst, 0, tempCipherLen, 1);
1184 kfree(src);
1185 }
1186 return retval;
1187 }
1188 #endif
1189
1190 static struct crypto_alg aesni_algs[] = { {
1191 .cra_name = "aes",
1192 .cra_driver_name = "aes-aesni",
1193 .cra_priority = 300,
1194 .cra_flags = CRYPTO_ALG_TYPE_CIPHER,
1195 .cra_blocksize = AES_BLOCK_SIZE,
1196 .cra_ctxsize = sizeof(struct crypto_aes_ctx) +
1197 AESNI_ALIGN - 1,
1198 .cra_alignmask = 0,
1199 .cra_module = THIS_MODULE,
1200 .cra_u = {
1201 .cipher = {
1202 .cia_min_keysize = AES_MIN_KEY_SIZE,
1203 .cia_max_keysize = AES_MAX_KEY_SIZE,
1204 .cia_setkey = aes_set_key,
1205 .cia_encrypt = aes_encrypt,
1206 .cia_decrypt = aes_decrypt
1207 }
1208 }
1209 }, {
1210 .cra_name = "__aes-aesni",
1211 .cra_driver_name = "__driver-aes-aesni",
1212 .cra_priority = 0,
1213 .cra_flags = CRYPTO_ALG_TYPE_CIPHER,
1214 .cra_blocksize = AES_BLOCK_SIZE,
1215 .cra_ctxsize = sizeof(struct crypto_aes_ctx) +
1216 AESNI_ALIGN - 1,
1217 .cra_alignmask = 0,
1218 .cra_module = THIS_MODULE,
1219 .cra_u = {
1220 .cipher = {
1221 .cia_min_keysize = AES_MIN_KEY_SIZE,
1222 .cia_max_keysize = AES_MAX_KEY_SIZE,
1223 .cia_setkey = aes_set_key,
1224 .cia_encrypt = __aes_encrypt,
1225 .cia_decrypt = __aes_decrypt
1226 }
1227 }
1228 }, {
1229 .cra_name = "__ecb-aes-aesni",
1230 .cra_driver_name = "__driver-ecb-aes-aesni",
1231 .cra_priority = 0,
1232 .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
1233 .cra_blocksize = AES_BLOCK_SIZE,
1234 .cra_ctxsize = sizeof(struct crypto_aes_ctx) +
1235 AESNI_ALIGN - 1,
1236 .cra_alignmask = 0,
1237 .cra_type = &crypto_blkcipher_type,
1238 .cra_module = THIS_MODULE,
1239 .cra_u = {
1240 .blkcipher = {
1241 .min_keysize = AES_MIN_KEY_SIZE,
1242 .max_keysize = AES_MAX_KEY_SIZE,
1243 .setkey = aes_set_key,
1244 .encrypt = ecb_encrypt,
1245 .decrypt = ecb_decrypt,
1246 },
1247 },
1248 }, {
1249 .cra_name = "__cbc-aes-aesni",
1250 .cra_driver_name = "__driver-cbc-aes-aesni",
1251 .cra_priority = 0,
1252 .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
1253 .cra_blocksize = AES_BLOCK_SIZE,
1254 .cra_ctxsize = sizeof(struct crypto_aes_ctx) +
1255 AESNI_ALIGN - 1,
1256 .cra_alignmask = 0,
1257 .cra_type = &crypto_blkcipher_type,
1258 .cra_module = THIS_MODULE,
1259 .cra_u = {
1260 .blkcipher = {
1261 .min_keysize = AES_MIN_KEY_SIZE,
1262 .max_keysize = AES_MAX_KEY_SIZE,
1263 .setkey = aes_set_key,
1264 .encrypt = cbc_encrypt,
1265 .decrypt = cbc_decrypt,
1266 },
1267 },
1268 }, {
1269 .cra_name = "ecb(aes)",
1270 .cra_driver_name = "ecb-aes-aesni",
1271 .cra_priority = 400,
1272 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
1273 .cra_blocksize = AES_BLOCK_SIZE,
1274 .cra_ctxsize = sizeof(struct async_helper_ctx),
1275 .cra_alignmask = 0,
1276 .cra_type = &crypto_ablkcipher_type,
1277 .cra_module = THIS_MODULE,
1278 .cra_init = ablk_ecb_init,
1279 .cra_exit = ablk_exit,
1280 .cra_u = {
1281 .ablkcipher = {
1282 .min_keysize = AES_MIN_KEY_SIZE,
1283 .max_keysize = AES_MAX_KEY_SIZE,
1284 .setkey = ablk_set_key,
1285 .encrypt = ablk_encrypt,
1286 .decrypt = ablk_decrypt,
1287 },
1288 },
1289 }, {
1290 .cra_name = "cbc(aes)",
1291 .cra_driver_name = "cbc-aes-aesni",
1292 .cra_priority = 400,
1293 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
1294 .cra_blocksize = AES_BLOCK_SIZE,
1295 .cra_ctxsize = sizeof(struct async_helper_ctx),
1296 .cra_alignmask = 0,
1297 .cra_type = &crypto_ablkcipher_type,
1298 .cra_module = THIS_MODULE,
1299 .cra_init = ablk_cbc_init,
1300 .cra_exit = ablk_exit,
1301 .cra_u = {
1302 .ablkcipher = {
1303 .min_keysize = AES_MIN_KEY_SIZE,
1304 .max_keysize = AES_MAX_KEY_SIZE,
1305 .ivsize = AES_BLOCK_SIZE,
1306 .setkey = ablk_set_key,
1307 .encrypt = ablk_encrypt,
1308 .decrypt = ablk_decrypt,
1309 },
1310 },
1311 #ifdef CONFIG_X86_64
1312 }, {
1313 .cra_name = "__ctr-aes-aesni",
1314 .cra_driver_name = "__driver-ctr-aes-aesni",
1315 .cra_priority = 0,
1316 .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
1317 .cra_blocksize = 1,
1318 .cra_ctxsize = sizeof(struct crypto_aes_ctx) +
1319 AESNI_ALIGN - 1,
1320 .cra_alignmask = 0,
1321 .cra_type = &crypto_blkcipher_type,
1322 .cra_module = THIS_MODULE,
1323 .cra_u = {
1324 .blkcipher = {
1325 .min_keysize = AES_MIN_KEY_SIZE,
1326 .max_keysize = AES_MAX_KEY_SIZE,
1327 .ivsize = AES_BLOCK_SIZE,
1328 .setkey = aes_set_key,
1329 .encrypt = ctr_crypt,
1330 .decrypt = ctr_crypt,
1331 },
1332 },
1333 }, {
1334 .cra_name = "ctr(aes)",
1335 .cra_driver_name = "ctr-aes-aesni",
1336 .cra_priority = 400,
1337 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
1338 .cra_blocksize = 1,
1339 .cra_ctxsize = sizeof(struct async_helper_ctx),
1340 .cra_alignmask = 0,
1341 .cra_type = &crypto_ablkcipher_type,
1342 .cra_module = THIS_MODULE,
1343 .cra_init = ablk_ctr_init,
1344 .cra_exit = ablk_exit,
1345 .cra_u = {
1346 .ablkcipher = {
1347 .min_keysize = AES_MIN_KEY_SIZE,
1348 .max_keysize = AES_MAX_KEY_SIZE,
1349 .ivsize = AES_BLOCK_SIZE,
1350 .setkey = ablk_set_key,
1351 .encrypt = ablk_encrypt,
1352 .decrypt = ablk_encrypt,
1353 .geniv = "chainiv",
1354 },
1355 },
1356 }, {
1357 .cra_name = "__gcm-aes-aesni",
1358 .cra_driver_name = "__driver-gcm-aes-aesni",
1359 .cra_priority = 0,
1360 .cra_flags = CRYPTO_ALG_TYPE_AEAD,
1361 .cra_blocksize = 1,
1362 .cra_ctxsize = sizeof(struct aesni_rfc4106_gcm_ctx) +
1363 AESNI_ALIGN,
1364 .cra_alignmask = 0,
1365 .cra_type = &crypto_aead_type,
1366 .cra_module = THIS_MODULE,
1367 .cra_u = {
1368 .aead = {
1369 .encrypt = __driver_rfc4106_encrypt,
1370 .decrypt = __driver_rfc4106_decrypt,
1371 },
1372 },
1373 }, {
1374 .cra_name = "rfc4106(gcm(aes))",
1375 .cra_driver_name = "rfc4106-gcm-aesni",
1376 .cra_priority = 400,
1377 .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
1378 .cra_blocksize = 1,
1379 .cra_ctxsize = sizeof(struct aesni_rfc4106_gcm_ctx) +
1380 AESNI_ALIGN,
1381 .cra_alignmask = 0,
1382 .cra_type = &crypto_nivaead_type,
1383 .cra_module = THIS_MODULE,
1384 .cra_init = rfc4106_init,
1385 .cra_exit = rfc4106_exit,
1386 .cra_u = {
1387 .aead = {
1388 .setkey = rfc4106_set_key,
1389 .setauthsize = rfc4106_set_authsize,
1390 .encrypt = rfc4106_encrypt,
1391 .decrypt = rfc4106_decrypt,
1392 .geniv = "seqiv",
1393 .ivsize = 8,
1394 .maxauthsize = 16,
1395 },
1396 },
1397 #endif
1398 #if IS_ENABLED(CONFIG_CRYPTO_PCBC)
1399 }, {
1400 .cra_name = "pcbc(aes)",
1401 .cra_driver_name = "pcbc-aes-aesni",
1402 .cra_priority = 400,
1403 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
1404 .cra_blocksize = AES_BLOCK_SIZE,
1405 .cra_ctxsize = sizeof(struct async_helper_ctx),
1406 .cra_alignmask = 0,
1407 .cra_type = &crypto_ablkcipher_type,
1408 .cra_module = THIS_MODULE,
1409 .cra_init = ablk_pcbc_init,
1410 .cra_exit = ablk_exit,
1411 .cra_u = {
1412 .ablkcipher = {
1413 .min_keysize = AES_MIN_KEY_SIZE,
1414 .max_keysize = AES_MAX_KEY_SIZE,
1415 .ivsize = AES_BLOCK_SIZE,
1416 .setkey = ablk_set_key,
1417 .encrypt = ablk_encrypt,
1418 .decrypt = ablk_decrypt,
1419 },
1420 },
1421 #endif
1422 }, {
1423 .cra_name = "__lrw-aes-aesni",
1424 .cra_driver_name = "__driver-lrw-aes-aesni",
1425 .cra_priority = 0,
1426 .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
1427 .cra_blocksize = AES_BLOCK_SIZE,
1428 .cra_ctxsize = sizeof(struct aesni_lrw_ctx),
1429 .cra_alignmask = 0,
1430 .cra_type = &crypto_blkcipher_type,
1431 .cra_module = THIS_MODULE,
1432 .cra_exit = lrw_aesni_exit_tfm,
1433 .cra_u = {
1434 .blkcipher = {
1435 .min_keysize = AES_MIN_KEY_SIZE + AES_BLOCK_SIZE,
1436 .max_keysize = AES_MAX_KEY_SIZE + AES_BLOCK_SIZE,
1437 .ivsize = AES_BLOCK_SIZE,
1438 .setkey = lrw_aesni_setkey,
1439 .encrypt = lrw_encrypt,
1440 .decrypt = lrw_decrypt,
1441 },
1442 },
1443 }, {
1444 .cra_name = "__xts-aes-aesni",
1445 .cra_driver_name = "__driver-xts-aes-aesni",
1446 .cra_priority = 0,
1447 .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
1448 .cra_blocksize = AES_BLOCK_SIZE,
1449 .cra_ctxsize = sizeof(struct aesni_xts_ctx),
1450 .cra_alignmask = 0,
1451 .cra_type = &crypto_blkcipher_type,
1452 .cra_module = THIS_MODULE,
1453 .cra_u = {
1454 .blkcipher = {
1455 .min_keysize = 2 * AES_MIN_KEY_SIZE,
1456 .max_keysize = 2 * AES_MAX_KEY_SIZE,
1457 .ivsize = AES_BLOCK_SIZE,
1458 .setkey = xts_aesni_setkey,
1459 .encrypt = xts_encrypt,
1460 .decrypt = xts_decrypt,
1461 },
1462 },
1463 }, {
1464 .cra_name = "lrw(aes)",
1465 .cra_driver_name = "lrw-aes-aesni",
1466 .cra_priority = 400,
1467 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
1468 .cra_blocksize = AES_BLOCK_SIZE,
1469 .cra_ctxsize = sizeof(struct async_helper_ctx),
1470 .cra_alignmask = 0,
1471 .cra_type = &crypto_ablkcipher_type,
1472 .cra_module = THIS_MODULE,
1473 .cra_init = ablk_init,
1474 .cra_exit = ablk_exit,
1475 .cra_u = {
1476 .ablkcipher = {
1477 .min_keysize = AES_MIN_KEY_SIZE + AES_BLOCK_SIZE,
1478 .max_keysize = AES_MAX_KEY_SIZE + AES_BLOCK_SIZE,
1479 .ivsize = AES_BLOCK_SIZE,
1480 .setkey = ablk_set_key,
1481 .encrypt = ablk_encrypt,
1482 .decrypt = ablk_decrypt,
1483 },
1484 },
1485 }, {
1486 .cra_name = "xts(aes)",
1487 .cra_driver_name = "xts-aes-aesni",
1488 .cra_priority = 400,
1489 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
1490 .cra_blocksize = AES_BLOCK_SIZE,
1491 .cra_ctxsize = sizeof(struct async_helper_ctx),
1492 .cra_alignmask = 0,
1493 .cra_type = &crypto_ablkcipher_type,
1494 .cra_module = THIS_MODULE,
1495 .cra_init = ablk_init,
1496 .cra_exit = ablk_exit,
1497 .cra_u = {
1498 .ablkcipher = {
1499 .min_keysize = 2 * AES_MIN_KEY_SIZE,
1500 .max_keysize = 2 * AES_MAX_KEY_SIZE,
1501 .ivsize = AES_BLOCK_SIZE,
1502 .setkey = ablk_set_key,
1503 .encrypt = ablk_encrypt,
1504 .decrypt = ablk_decrypt,
1505 },
1506 },
1507 } };
1508
1509
1510 static const struct x86_cpu_id aesni_cpu_id[] = {
1511 X86_FEATURE_MATCH(X86_FEATURE_AES),
1512 {}
1513 };
1514 MODULE_DEVICE_TABLE(x86cpu, aesni_cpu_id);
1515
1516 static int __init aesni_init(void)
1517 {
1518 int err;
1519
1520 if (!x86_match_cpu(aesni_cpu_id))
1521 return -ENODEV;
1522 #ifdef CONFIG_X86_64
1523 #ifdef CONFIG_AS_AVX2
1524 if (boot_cpu_has(X86_FEATURE_AVX2)) {
1525 pr_info("AVX2 version of gcm_enc/dec engaged.\n");
1526 aesni_gcm_enc_tfm = aesni_gcm_enc_avx2;
1527 aesni_gcm_dec_tfm = aesni_gcm_dec_avx2;
1528 } else
1529 #endif
1530 #ifdef CONFIG_AS_AVX
1531 if (boot_cpu_has(X86_FEATURE_AVX)) {
1532 pr_info("AVX version of gcm_enc/dec engaged.\n");
1533 aesni_gcm_enc_tfm = aesni_gcm_enc_avx;
1534 aesni_gcm_dec_tfm = aesni_gcm_dec_avx;
1535 } else
1536 #endif
1537 {
1538 pr_info("SSE version of gcm_enc/dec engaged.\n");
1539 aesni_gcm_enc_tfm = aesni_gcm_enc;
1540 aesni_gcm_dec_tfm = aesni_gcm_dec;
1541 }
1542 aesni_ctr_enc_tfm = aesni_ctr_enc;
1543 #ifdef CONFIG_AS_AVX
1544 if (cpu_has_avx) {
1545 /* optimize performance of ctr mode encryption transform */
1546 aesni_ctr_enc_tfm = aesni_ctr_enc_avx_tfm;
1547 pr_info("AES CTR mode by8 optimization enabled\n");
1548 }
1549 #endif
1550 #endif
1551
1552 err = crypto_fpu_init();
1553 if (err)
1554 return err;
1555
1556 return crypto_register_algs(aesni_algs, ARRAY_SIZE(aesni_algs));
1557 }
1558
1559 static void __exit aesni_exit(void)
1560 {
1561 crypto_unregister_algs(aesni_algs, ARRAY_SIZE(aesni_algs));
1562
1563 crypto_fpu_exit();
1564 }
1565
1566 module_init(aesni_init);
1567 module_exit(aesni_exit);
1568
1569 MODULE_DESCRIPTION("Rijndael (AES) Cipher Algorithm, Intel AES-NI instructions optimized");
1570 MODULE_LICENSE("GPL");
1571 MODULE_ALIAS_CRYPTO("aes");
This page took 0.087312 seconds and 6 git commands to generate.