Merge branch 'syscore' of git://git.kernel.org/pub/scm/linux/kernel/git/rafael/suspen...
[deliverable/linux.git] / arch / x86 / crypto / aesni-intel_glue.c
CommitLineData
54b6a1bd
HY
1/*
2 * Support for Intel AES-NI instructions. This file contains glue
3 * code, the real AES implementation is in intel-aes_asm.S.
4 *
5 * Copyright (C) 2008, Intel Corp.
6 * Author: Huang Ying <ying.huang@intel.com>
7 *
0bd82f5f
TS
8 * Added RFC4106 AES-GCM support for 128-bit keys under the AEAD
9 * interface for 64-bit kernels.
10 * Authors: Adrian Hoban <adrian.hoban@intel.com>
11 * Gabriele Paoloni <gabriele.paoloni@intel.com>
12 * Tadeusz Struk (tadeusz.struk@intel.com)
13 * Aidan O'Mahony (aidan.o.mahony@intel.com)
14 * Copyright (c) 2010, Intel Corporation.
15 *
54b6a1bd
HY
16 * This program is free software; you can redistribute it and/or modify
17 * it under the terms of the GNU General Public License as published by
18 * the Free Software Foundation; either version 2 of the License, or
19 * (at your option) any later version.
20 */
21
22#include <linux/hardirq.h>
23#include <linux/types.h>
24#include <linux/crypto.h>
25#include <linux/err.h>
26#include <crypto/algapi.h>
27#include <crypto/aes.h>
28#include <crypto/cryptd.h>
12387a46 29#include <crypto/ctr.h>
54b6a1bd
HY
30#include <asm/i387.h>
31#include <asm/aes.h>
0bd82f5f
TS
32#include <crypto/scatterwalk.h>
33#include <crypto/internal/aead.h>
34#include <linux/workqueue.h>
35#include <linux/spinlock.h>
54b6a1bd 36
2cf4ac8b
HY
37#if defined(CONFIG_CRYPTO_CTR) || defined(CONFIG_CRYPTO_CTR_MODULE)
38#define HAS_CTR
39#endif
40
41#if defined(CONFIG_CRYPTO_LRW) || defined(CONFIG_CRYPTO_LRW_MODULE)
42#define HAS_LRW
43#endif
44
45#if defined(CONFIG_CRYPTO_PCBC) || defined(CONFIG_CRYPTO_PCBC_MODULE)
46#define HAS_PCBC
47#endif
48
49#if defined(CONFIG_CRYPTO_XTS) || defined(CONFIG_CRYPTO_XTS_MODULE)
50#define HAS_XTS
51#endif
52
54b6a1bd
HY
53struct async_aes_ctx {
54 struct cryptd_ablkcipher *cryptd_tfm;
55};
56
0bd82f5f
TS
57/* This data is stored at the end of the crypto_tfm struct.
58 * It's a type of per "session" data storage location.
59 * This needs to be 16 byte aligned.
60 */
61struct aesni_rfc4106_gcm_ctx {
62 u8 hash_subkey[16];
63 struct crypto_aes_ctx aes_key_expanded;
64 u8 nonce[4];
65 struct cryptd_aead *cryptd_tfm;
66};
67
68struct aesni_gcm_set_hash_subkey_result {
69 int err;
70 struct completion completion;
71};
72
73struct aesni_hash_subkey_req_data {
74 u8 iv[16];
75 struct aesni_gcm_set_hash_subkey_result result;
76 struct scatterlist sg;
77};
78
79#define AESNI_ALIGN (16)
54b6a1bd 80#define AES_BLOCK_MASK (~(AES_BLOCK_SIZE-1))
0bd82f5f 81#define RFC4106_HASH_SUBKEY_SIZE 16
54b6a1bd
HY
82
83asmlinkage int aesni_set_key(struct crypto_aes_ctx *ctx, const u8 *in_key,
84 unsigned int key_len);
85asmlinkage void aesni_enc(struct crypto_aes_ctx *ctx, u8 *out,
86 const u8 *in);
87asmlinkage void aesni_dec(struct crypto_aes_ctx *ctx, u8 *out,
88 const u8 *in);
89asmlinkage void aesni_ecb_enc(struct crypto_aes_ctx *ctx, u8 *out,
90 const u8 *in, unsigned int len);
91asmlinkage void aesni_ecb_dec(struct crypto_aes_ctx *ctx, u8 *out,
92 const u8 *in, unsigned int len);
93asmlinkage void aesni_cbc_enc(struct crypto_aes_ctx *ctx, u8 *out,
94 const u8 *in, unsigned int len, u8 *iv);
95asmlinkage void aesni_cbc_dec(struct crypto_aes_ctx *ctx, u8 *out,
96 const u8 *in, unsigned int len, u8 *iv);
0d258efb 97#ifdef CONFIG_X86_64
12387a46
HY
98asmlinkage void aesni_ctr_enc(struct crypto_aes_ctx *ctx, u8 *out,
99 const u8 *in, unsigned int len, u8 *iv);
54b6a1bd 100
0bd82f5f
TS
101/* asmlinkage void aesni_gcm_enc()
102 * void *ctx, AES Key schedule. Starts on a 16 byte boundary.
103 * u8 *out, Ciphertext output. Encrypt in-place is allowed.
104 * const u8 *in, Plaintext input
105 * unsigned long plaintext_len, Length of data in bytes for encryption.
106 * u8 *iv, Pre-counter block j0: 4 byte salt (from Security Association)
107 * concatenated with 8 byte Initialisation Vector (from IPSec ESP
108 * Payload) concatenated with 0x00000001. 16-byte aligned pointer.
109 * u8 *hash_subkey, the Hash sub key input. Data starts on a 16-byte boundary.
110 * const u8 *aad, Additional Authentication Data (AAD)
111 * unsigned long aad_len, Length of AAD in bytes. With RFC4106 this
112 * is going to be 8 or 12 bytes
113 * u8 *auth_tag, Authenticated Tag output.
114 * unsigned long auth_tag_len), Authenticated Tag Length in bytes.
115 * Valid values are 16 (most likely), 12 or 8.
116 */
117asmlinkage void aesni_gcm_enc(void *ctx, u8 *out,
118 const u8 *in, unsigned long plaintext_len, u8 *iv,
119 u8 *hash_subkey, const u8 *aad, unsigned long aad_len,
120 u8 *auth_tag, unsigned long auth_tag_len);
121
122/* asmlinkage void aesni_gcm_dec()
123 * void *ctx, AES Key schedule. Starts on a 16 byte boundary.
124 * u8 *out, Plaintext output. Decrypt in-place is allowed.
125 * const u8 *in, Ciphertext input
126 * unsigned long ciphertext_len, Length of data in bytes for decryption.
127 * u8 *iv, Pre-counter block j0: 4 byte salt (from Security Association)
128 * concatenated with 8 byte Initialisation Vector (from IPSec ESP
129 * Payload) concatenated with 0x00000001. 16-byte aligned pointer.
130 * u8 *hash_subkey, the Hash sub key input. Data starts on a 16-byte boundary.
131 * const u8 *aad, Additional Authentication Data (AAD)
132 * unsigned long aad_len, Length of AAD in bytes. With RFC4106 this is going
133 * to be 8 or 12 bytes
134 * u8 *auth_tag, Authenticated Tag output.
135 * unsigned long auth_tag_len) Authenticated Tag Length in bytes.
136 * Valid values are 16 (most likely), 12 or 8.
137 */
138asmlinkage void aesni_gcm_dec(void *ctx, u8 *out,
139 const u8 *in, unsigned long ciphertext_len, u8 *iv,
140 u8 *hash_subkey, const u8 *aad, unsigned long aad_len,
141 u8 *auth_tag, unsigned long auth_tag_len);
142
143static inline struct
144aesni_rfc4106_gcm_ctx *aesni_rfc4106_gcm_ctx_get(struct crypto_aead *tfm)
145{
146 return
147 (struct aesni_rfc4106_gcm_ctx *)
148 PTR_ALIGN((u8 *)
149 crypto_tfm_ctx(crypto_aead_tfm(tfm)), AESNI_ALIGN);
150}
559ad0ff 151#endif
0bd82f5f 152
54b6a1bd
HY
153static inline struct crypto_aes_ctx *aes_ctx(void *raw_ctx)
154{
155 unsigned long addr = (unsigned long)raw_ctx;
156 unsigned long align = AESNI_ALIGN;
157
158 if (align <= crypto_tfm_ctx_alignment())
159 align = 1;
160 return (struct crypto_aes_ctx *)ALIGN(addr, align);
161}
162
163static int aes_set_key_common(struct crypto_tfm *tfm, void *raw_ctx,
164 const u8 *in_key, unsigned int key_len)
165{
166 struct crypto_aes_ctx *ctx = aes_ctx(raw_ctx);
167 u32 *flags = &tfm->crt_flags;
168 int err;
169
170 if (key_len != AES_KEYSIZE_128 && key_len != AES_KEYSIZE_192 &&
171 key_len != AES_KEYSIZE_256) {
172 *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
173 return -EINVAL;
174 }
175
13b79b97 176 if (!irq_fpu_usable())
54b6a1bd
HY
177 err = crypto_aes_expand_key(ctx, in_key, key_len);
178 else {
179 kernel_fpu_begin();
180 err = aesni_set_key(ctx, in_key, key_len);
181 kernel_fpu_end();
182 }
183
184 return err;
185}
186
187static int aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
188 unsigned int key_len)
189{
190 return aes_set_key_common(tfm, crypto_tfm_ctx(tfm), in_key, key_len);
191}
192
193static void aes_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
194{
195 struct crypto_aes_ctx *ctx = aes_ctx(crypto_tfm_ctx(tfm));
196
13b79b97 197 if (!irq_fpu_usable())
54b6a1bd
HY
198 crypto_aes_encrypt_x86(ctx, dst, src);
199 else {
200 kernel_fpu_begin();
201 aesni_enc(ctx, dst, src);
202 kernel_fpu_end();
203 }
204}
205
206static void aes_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
207{
208 struct crypto_aes_ctx *ctx = aes_ctx(crypto_tfm_ctx(tfm));
209
13b79b97 210 if (!irq_fpu_usable())
54b6a1bd
HY
211 crypto_aes_decrypt_x86(ctx, dst, src);
212 else {
213 kernel_fpu_begin();
214 aesni_dec(ctx, dst, src);
215 kernel_fpu_end();
216 }
217}
218
219static struct crypto_alg aesni_alg = {
220 .cra_name = "aes",
221 .cra_driver_name = "aes-aesni",
222 .cra_priority = 300,
223 .cra_flags = CRYPTO_ALG_TYPE_CIPHER,
224 .cra_blocksize = AES_BLOCK_SIZE,
225 .cra_ctxsize = sizeof(struct crypto_aes_ctx)+AESNI_ALIGN-1,
226 .cra_alignmask = 0,
227 .cra_module = THIS_MODULE,
228 .cra_list = LIST_HEAD_INIT(aesni_alg.cra_list),
229 .cra_u = {
230 .cipher = {
231 .cia_min_keysize = AES_MIN_KEY_SIZE,
232 .cia_max_keysize = AES_MAX_KEY_SIZE,
233 .cia_setkey = aes_set_key,
234 .cia_encrypt = aes_encrypt,
235 .cia_decrypt = aes_decrypt
236 }
237 }
238};
239
2cf4ac8b
HY
240static void __aes_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
241{
242 struct crypto_aes_ctx *ctx = aes_ctx(crypto_tfm_ctx(tfm));
243
244 aesni_enc(ctx, dst, src);
245}
246
247static void __aes_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
248{
249 struct crypto_aes_ctx *ctx = aes_ctx(crypto_tfm_ctx(tfm));
250
251 aesni_dec(ctx, dst, src);
252}
253
254static struct crypto_alg __aesni_alg = {
255 .cra_name = "__aes-aesni",
256 .cra_driver_name = "__driver-aes-aesni",
257 .cra_priority = 0,
258 .cra_flags = CRYPTO_ALG_TYPE_CIPHER,
259 .cra_blocksize = AES_BLOCK_SIZE,
260 .cra_ctxsize = sizeof(struct crypto_aes_ctx)+AESNI_ALIGN-1,
261 .cra_alignmask = 0,
262 .cra_module = THIS_MODULE,
263 .cra_list = LIST_HEAD_INIT(__aesni_alg.cra_list),
264 .cra_u = {
265 .cipher = {
266 .cia_min_keysize = AES_MIN_KEY_SIZE,
267 .cia_max_keysize = AES_MAX_KEY_SIZE,
268 .cia_setkey = aes_set_key,
269 .cia_encrypt = __aes_encrypt,
270 .cia_decrypt = __aes_decrypt
271 }
272 }
273};
274
54b6a1bd
HY
275static int ecb_encrypt(struct blkcipher_desc *desc,
276 struct scatterlist *dst, struct scatterlist *src,
277 unsigned int nbytes)
278{
279 struct crypto_aes_ctx *ctx = aes_ctx(crypto_blkcipher_ctx(desc->tfm));
280 struct blkcipher_walk walk;
281 int err;
282
283 blkcipher_walk_init(&walk, dst, src, nbytes);
284 err = blkcipher_walk_virt(desc, &walk);
9251b64f 285 desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
54b6a1bd
HY
286
287 kernel_fpu_begin();
288 while ((nbytes = walk.nbytes)) {
289 aesni_ecb_enc(ctx, walk.dst.virt.addr, walk.src.virt.addr,
290 nbytes & AES_BLOCK_MASK);
291 nbytes &= AES_BLOCK_SIZE - 1;
292 err = blkcipher_walk_done(desc, &walk, nbytes);
293 }
294 kernel_fpu_end();
295
296 return err;
297}
298
299static int ecb_decrypt(struct blkcipher_desc *desc,
300 struct scatterlist *dst, struct scatterlist *src,
301 unsigned int nbytes)
302{
303 struct crypto_aes_ctx *ctx = aes_ctx(crypto_blkcipher_ctx(desc->tfm));
304 struct blkcipher_walk walk;
305 int err;
306
307 blkcipher_walk_init(&walk, dst, src, nbytes);
308 err = blkcipher_walk_virt(desc, &walk);
9251b64f 309 desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
54b6a1bd
HY
310
311 kernel_fpu_begin();
312 while ((nbytes = walk.nbytes)) {
313 aesni_ecb_dec(ctx, walk.dst.virt.addr, walk.src.virt.addr,
314 nbytes & AES_BLOCK_MASK);
315 nbytes &= AES_BLOCK_SIZE - 1;
316 err = blkcipher_walk_done(desc, &walk, nbytes);
317 }
318 kernel_fpu_end();
319
320 return err;
321}
322
323static struct crypto_alg blk_ecb_alg = {
324 .cra_name = "__ecb-aes-aesni",
325 .cra_driver_name = "__driver-ecb-aes-aesni",
326 .cra_priority = 0,
327 .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
328 .cra_blocksize = AES_BLOCK_SIZE,
329 .cra_ctxsize = sizeof(struct crypto_aes_ctx)+AESNI_ALIGN-1,
330 .cra_alignmask = 0,
331 .cra_type = &crypto_blkcipher_type,
332 .cra_module = THIS_MODULE,
333 .cra_list = LIST_HEAD_INIT(blk_ecb_alg.cra_list),
334 .cra_u = {
335 .blkcipher = {
336 .min_keysize = AES_MIN_KEY_SIZE,
337 .max_keysize = AES_MAX_KEY_SIZE,
338 .setkey = aes_set_key,
339 .encrypt = ecb_encrypt,
340 .decrypt = ecb_decrypt,
341 },
342 },
343};
344
345static int cbc_encrypt(struct blkcipher_desc *desc,
346 struct scatterlist *dst, struct scatterlist *src,
347 unsigned int nbytes)
348{
349 struct crypto_aes_ctx *ctx = aes_ctx(crypto_blkcipher_ctx(desc->tfm));
350 struct blkcipher_walk walk;
351 int err;
352
353 blkcipher_walk_init(&walk, dst, src, nbytes);
354 err = blkcipher_walk_virt(desc, &walk);
9251b64f 355 desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
54b6a1bd
HY
356
357 kernel_fpu_begin();
358 while ((nbytes = walk.nbytes)) {
359 aesni_cbc_enc(ctx, walk.dst.virt.addr, walk.src.virt.addr,
360 nbytes & AES_BLOCK_MASK, walk.iv);
361 nbytes &= AES_BLOCK_SIZE - 1;
362 err = blkcipher_walk_done(desc, &walk, nbytes);
363 }
364 kernel_fpu_end();
365
366 return err;
367}
368
369static int cbc_decrypt(struct blkcipher_desc *desc,
370 struct scatterlist *dst, struct scatterlist *src,
371 unsigned int nbytes)
372{
373 struct crypto_aes_ctx *ctx = aes_ctx(crypto_blkcipher_ctx(desc->tfm));
374 struct blkcipher_walk walk;
375 int err;
376
377 blkcipher_walk_init(&walk, dst, src, nbytes);
378 err = blkcipher_walk_virt(desc, &walk);
9251b64f 379 desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
54b6a1bd
HY
380
381 kernel_fpu_begin();
382 while ((nbytes = walk.nbytes)) {
383 aesni_cbc_dec(ctx, walk.dst.virt.addr, walk.src.virt.addr,
384 nbytes & AES_BLOCK_MASK, walk.iv);
385 nbytes &= AES_BLOCK_SIZE - 1;
386 err = blkcipher_walk_done(desc, &walk, nbytes);
387 }
388 kernel_fpu_end();
389
390 return err;
391}
392
393static struct crypto_alg blk_cbc_alg = {
394 .cra_name = "__cbc-aes-aesni",
395 .cra_driver_name = "__driver-cbc-aes-aesni",
396 .cra_priority = 0,
397 .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
398 .cra_blocksize = AES_BLOCK_SIZE,
399 .cra_ctxsize = sizeof(struct crypto_aes_ctx)+AESNI_ALIGN-1,
400 .cra_alignmask = 0,
401 .cra_type = &crypto_blkcipher_type,
402 .cra_module = THIS_MODULE,
403 .cra_list = LIST_HEAD_INIT(blk_cbc_alg.cra_list),
404 .cra_u = {
405 .blkcipher = {
406 .min_keysize = AES_MIN_KEY_SIZE,
407 .max_keysize = AES_MAX_KEY_SIZE,
408 .setkey = aes_set_key,
409 .encrypt = cbc_encrypt,
410 .decrypt = cbc_decrypt,
411 },
412 },
413};
414
0d258efb 415#ifdef CONFIG_X86_64
12387a46
HY
416static void ctr_crypt_final(struct crypto_aes_ctx *ctx,
417 struct blkcipher_walk *walk)
418{
419 u8 *ctrblk = walk->iv;
420 u8 keystream[AES_BLOCK_SIZE];
421 u8 *src = walk->src.virt.addr;
422 u8 *dst = walk->dst.virt.addr;
423 unsigned int nbytes = walk->nbytes;
424
425 aesni_enc(ctx, keystream, ctrblk);
426 crypto_xor(keystream, src, nbytes);
427 memcpy(dst, keystream, nbytes);
428 crypto_inc(ctrblk, AES_BLOCK_SIZE);
429}
430
431static int ctr_crypt(struct blkcipher_desc *desc,
432 struct scatterlist *dst, struct scatterlist *src,
433 unsigned int nbytes)
434{
435 struct crypto_aes_ctx *ctx = aes_ctx(crypto_blkcipher_ctx(desc->tfm));
436 struct blkcipher_walk walk;
437 int err;
438
439 blkcipher_walk_init(&walk, dst, src, nbytes);
440 err = blkcipher_walk_virt_block(desc, &walk, AES_BLOCK_SIZE);
441 desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
442
443 kernel_fpu_begin();
444 while ((nbytes = walk.nbytes) >= AES_BLOCK_SIZE) {
445 aesni_ctr_enc(ctx, walk.dst.virt.addr, walk.src.virt.addr,
446 nbytes & AES_BLOCK_MASK, walk.iv);
447 nbytes &= AES_BLOCK_SIZE - 1;
448 err = blkcipher_walk_done(desc, &walk, nbytes);
449 }
450 if (walk.nbytes) {
451 ctr_crypt_final(ctx, &walk);
452 err = blkcipher_walk_done(desc, &walk, 0);
453 }
454 kernel_fpu_end();
455
456 return err;
457}
458
459static struct crypto_alg blk_ctr_alg = {
460 .cra_name = "__ctr-aes-aesni",
461 .cra_driver_name = "__driver-ctr-aes-aesni",
462 .cra_priority = 0,
463 .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
464 .cra_blocksize = 1,
465 .cra_ctxsize = sizeof(struct crypto_aes_ctx)+AESNI_ALIGN-1,
466 .cra_alignmask = 0,
467 .cra_type = &crypto_blkcipher_type,
468 .cra_module = THIS_MODULE,
469 .cra_list = LIST_HEAD_INIT(blk_ctr_alg.cra_list),
470 .cra_u = {
471 .blkcipher = {
472 .min_keysize = AES_MIN_KEY_SIZE,
473 .max_keysize = AES_MAX_KEY_SIZE,
474 .ivsize = AES_BLOCK_SIZE,
475 .setkey = aes_set_key,
476 .encrypt = ctr_crypt,
477 .decrypt = ctr_crypt,
478 },
479 },
480};
0d258efb 481#endif
12387a46 482
54b6a1bd
HY
483static int ablk_set_key(struct crypto_ablkcipher *tfm, const u8 *key,
484 unsigned int key_len)
485{
486 struct async_aes_ctx *ctx = crypto_ablkcipher_ctx(tfm);
2cf4ac8b
HY
487 struct crypto_ablkcipher *child = &ctx->cryptd_tfm->base;
488 int err;
54b6a1bd 489
2cf4ac8b
HY
490 crypto_ablkcipher_clear_flags(child, CRYPTO_TFM_REQ_MASK);
491 crypto_ablkcipher_set_flags(child, crypto_ablkcipher_get_flags(tfm)
492 & CRYPTO_TFM_REQ_MASK);
493 err = crypto_ablkcipher_setkey(child, key, key_len);
494 crypto_ablkcipher_set_flags(tfm, crypto_ablkcipher_get_flags(child)
495 & CRYPTO_TFM_RES_MASK);
496 return err;
54b6a1bd
HY
497}
498
499static int ablk_encrypt(struct ablkcipher_request *req)
500{
501 struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
502 struct async_aes_ctx *ctx = crypto_ablkcipher_ctx(tfm);
503
13b79b97 504 if (!irq_fpu_usable()) {
54b6a1bd
HY
505 struct ablkcipher_request *cryptd_req =
506 ablkcipher_request_ctx(req);
507 memcpy(cryptd_req, req, sizeof(*req));
508 ablkcipher_request_set_tfm(cryptd_req, &ctx->cryptd_tfm->base);
509 return crypto_ablkcipher_encrypt(cryptd_req);
510 } else {
511 struct blkcipher_desc desc;
512 desc.tfm = cryptd_ablkcipher_child(ctx->cryptd_tfm);
513 desc.info = req->info;
514 desc.flags = 0;
515 return crypto_blkcipher_crt(desc.tfm)->encrypt(
516 &desc, req->dst, req->src, req->nbytes);
517 }
518}
519
520static int ablk_decrypt(struct ablkcipher_request *req)
521{
522 struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
523 struct async_aes_ctx *ctx = crypto_ablkcipher_ctx(tfm);
524
13b79b97 525 if (!irq_fpu_usable()) {
54b6a1bd
HY
526 struct ablkcipher_request *cryptd_req =
527 ablkcipher_request_ctx(req);
528 memcpy(cryptd_req, req, sizeof(*req));
529 ablkcipher_request_set_tfm(cryptd_req, &ctx->cryptd_tfm->base);
530 return crypto_ablkcipher_decrypt(cryptd_req);
531 } else {
532 struct blkcipher_desc desc;
533 desc.tfm = cryptd_ablkcipher_child(ctx->cryptd_tfm);
534 desc.info = req->info;
535 desc.flags = 0;
536 return crypto_blkcipher_crt(desc.tfm)->decrypt(
537 &desc, req->dst, req->src, req->nbytes);
538 }
539}
540
541static void ablk_exit(struct crypto_tfm *tfm)
542{
543 struct async_aes_ctx *ctx = crypto_tfm_ctx(tfm);
544
545 cryptd_free_ablkcipher(ctx->cryptd_tfm);
546}
547
548static void ablk_init_common(struct crypto_tfm *tfm,
549 struct cryptd_ablkcipher *cryptd_tfm)
550{
551 struct async_aes_ctx *ctx = crypto_tfm_ctx(tfm);
552
553 ctx->cryptd_tfm = cryptd_tfm;
554 tfm->crt_ablkcipher.reqsize = sizeof(struct ablkcipher_request) +
555 crypto_ablkcipher_reqsize(&cryptd_tfm->base);
556}
557
558static int ablk_ecb_init(struct crypto_tfm *tfm)
559{
560 struct cryptd_ablkcipher *cryptd_tfm;
561
562 cryptd_tfm = cryptd_alloc_ablkcipher("__driver-ecb-aes-aesni", 0, 0);
563 if (IS_ERR(cryptd_tfm))
564 return PTR_ERR(cryptd_tfm);
565 ablk_init_common(tfm, cryptd_tfm);
566 return 0;
567}
568
569static struct crypto_alg ablk_ecb_alg = {
570 .cra_name = "ecb(aes)",
571 .cra_driver_name = "ecb-aes-aesni",
572 .cra_priority = 400,
573 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER|CRYPTO_ALG_ASYNC,
574 .cra_blocksize = AES_BLOCK_SIZE,
575 .cra_ctxsize = sizeof(struct async_aes_ctx),
576 .cra_alignmask = 0,
577 .cra_type = &crypto_ablkcipher_type,
578 .cra_module = THIS_MODULE,
579 .cra_list = LIST_HEAD_INIT(ablk_ecb_alg.cra_list),
580 .cra_init = ablk_ecb_init,
581 .cra_exit = ablk_exit,
582 .cra_u = {
583 .ablkcipher = {
584 .min_keysize = AES_MIN_KEY_SIZE,
585 .max_keysize = AES_MAX_KEY_SIZE,
586 .setkey = ablk_set_key,
587 .encrypt = ablk_encrypt,
588 .decrypt = ablk_decrypt,
589 },
590 },
591};
592
593static int ablk_cbc_init(struct crypto_tfm *tfm)
594{
595 struct cryptd_ablkcipher *cryptd_tfm;
596
597 cryptd_tfm = cryptd_alloc_ablkcipher("__driver-cbc-aes-aesni", 0, 0);
598 if (IS_ERR(cryptd_tfm))
599 return PTR_ERR(cryptd_tfm);
600 ablk_init_common(tfm, cryptd_tfm);
601 return 0;
602}
603
604static struct crypto_alg ablk_cbc_alg = {
605 .cra_name = "cbc(aes)",
606 .cra_driver_name = "cbc-aes-aesni",
607 .cra_priority = 400,
608 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER|CRYPTO_ALG_ASYNC,
609 .cra_blocksize = AES_BLOCK_SIZE,
610 .cra_ctxsize = sizeof(struct async_aes_ctx),
611 .cra_alignmask = 0,
612 .cra_type = &crypto_ablkcipher_type,
613 .cra_module = THIS_MODULE,
614 .cra_list = LIST_HEAD_INIT(ablk_cbc_alg.cra_list),
615 .cra_init = ablk_cbc_init,
616 .cra_exit = ablk_exit,
617 .cra_u = {
618 .ablkcipher = {
619 .min_keysize = AES_MIN_KEY_SIZE,
620 .max_keysize = AES_MAX_KEY_SIZE,
621 .ivsize = AES_BLOCK_SIZE,
622 .setkey = ablk_set_key,
623 .encrypt = ablk_encrypt,
624 .decrypt = ablk_decrypt,
625 },
626 },
627};
628
0d258efb 629#ifdef CONFIG_X86_64
2cf4ac8b
HY
630static int ablk_ctr_init(struct crypto_tfm *tfm)
631{
632 struct cryptd_ablkcipher *cryptd_tfm;
633
12387a46 634 cryptd_tfm = cryptd_alloc_ablkcipher("__driver-ctr-aes-aesni", 0, 0);
2cf4ac8b
HY
635 if (IS_ERR(cryptd_tfm))
636 return PTR_ERR(cryptd_tfm);
637 ablk_init_common(tfm, cryptd_tfm);
638 return 0;
639}
640
641static struct crypto_alg ablk_ctr_alg = {
642 .cra_name = "ctr(aes)",
643 .cra_driver_name = "ctr-aes-aesni",
644 .cra_priority = 400,
645 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER|CRYPTO_ALG_ASYNC,
646 .cra_blocksize = 1,
647 .cra_ctxsize = sizeof(struct async_aes_ctx),
648 .cra_alignmask = 0,
649 .cra_type = &crypto_ablkcipher_type,
650 .cra_module = THIS_MODULE,
651 .cra_list = LIST_HEAD_INIT(ablk_ctr_alg.cra_list),
652 .cra_init = ablk_ctr_init,
653 .cra_exit = ablk_exit,
654 .cra_u = {
655 .ablkcipher = {
656 .min_keysize = AES_MIN_KEY_SIZE,
657 .max_keysize = AES_MAX_KEY_SIZE,
658 .ivsize = AES_BLOCK_SIZE,
659 .setkey = ablk_set_key,
660 .encrypt = ablk_encrypt,
12387a46 661 .decrypt = ablk_encrypt,
2cf4ac8b
HY
662 .geniv = "chainiv",
663 },
664 },
665};
12387a46
HY
666
667#ifdef HAS_CTR
668static int ablk_rfc3686_ctr_init(struct crypto_tfm *tfm)
669{
670 struct cryptd_ablkcipher *cryptd_tfm;
671
672 cryptd_tfm = cryptd_alloc_ablkcipher(
673 "rfc3686(__driver-ctr-aes-aesni)", 0, 0);
674 if (IS_ERR(cryptd_tfm))
675 return PTR_ERR(cryptd_tfm);
676 ablk_init_common(tfm, cryptd_tfm);
677 return 0;
678}
679
680static struct crypto_alg ablk_rfc3686_ctr_alg = {
681 .cra_name = "rfc3686(ctr(aes))",
682 .cra_driver_name = "rfc3686-ctr-aes-aesni",
683 .cra_priority = 400,
684 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER|CRYPTO_ALG_ASYNC,
685 .cra_blocksize = 1,
686 .cra_ctxsize = sizeof(struct async_aes_ctx),
687 .cra_alignmask = 0,
688 .cra_type = &crypto_ablkcipher_type,
689 .cra_module = THIS_MODULE,
690 .cra_list = LIST_HEAD_INIT(ablk_rfc3686_ctr_alg.cra_list),
691 .cra_init = ablk_rfc3686_ctr_init,
692 .cra_exit = ablk_exit,
693 .cra_u = {
694 .ablkcipher = {
695 .min_keysize = AES_MIN_KEY_SIZE+CTR_RFC3686_NONCE_SIZE,
696 .max_keysize = AES_MAX_KEY_SIZE+CTR_RFC3686_NONCE_SIZE,
697 .ivsize = CTR_RFC3686_IV_SIZE,
698 .setkey = ablk_set_key,
699 .encrypt = ablk_encrypt,
700 .decrypt = ablk_decrypt,
701 .geniv = "seqiv",
702 },
703 },
704};
2cf4ac8b 705#endif
0d258efb 706#endif
2cf4ac8b
HY
707
708#ifdef HAS_LRW
709static int ablk_lrw_init(struct crypto_tfm *tfm)
710{
711 struct cryptd_ablkcipher *cryptd_tfm;
712
713 cryptd_tfm = cryptd_alloc_ablkcipher("fpu(lrw(__driver-aes-aesni))",
714 0, 0);
715 if (IS_ERR(cryptd_tfm))
716 return PTR_ERR(cryptd_tfm);
717 ablk_init_common(tfm, cryptd_tfm);
718 return 0;
719}
720
721static struct crypto_alg ablk_lrw_alg = {
722 .cra_name = "lrw(aes)",
723 .cra_driver_name = "lrw-aes-aesni",
724 .cra_priority = 400,
725 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER|CRYPTO_ALG_ASYNC,
726 .cra_blocksize = AES_BLOCK_SIZE,
727 .cra_ctxsize = sizeof(struct async_aes_ctx),
728 .cra_alignmask = 0,
729 .cra_type = &crypto_ablkcipher_type,
730 .cra_module = THIS_MODULE,
731 .cra_list = LIST_HEAD_INIT(ablk_lrw_alg.cra_list),
732 .cra_init = ablk_lrw_init,
733 .cra_exit = ablk_exit,
734 .cra_u = {
735 .ablkcipher = {
736 .min_keysize = AES_MIN_KEY_SIZE + AES_BLOCK_SIZE,
737 .max_keysize = AES_MAX_KEY_SIZE + AES_BLOCK_SIZE,
738 .ivsize = AES_BLOCK_SIZE,
739 .setkey = ablk_set_key,
740 .encrypt = ablk_encrypt,
741 .decrypt = ablk_decrypt,
742 },
743 },
744};
745#endif
746
747#ifdef HAS_PCBC
748static int ablk_pcbc_init(struct crypto_tfm *tfm)
749{
750 struct cryptd_ablkcipher *cryptd_tfm;
751
752 cryptd_tfm = cryptd_alloc_ablkcipher("fpu(pcbc(__driver-aes-aesni))",
753 0, 0);
754 if (IS_ERR(cryptd_tfm))
755 return PTR_ERR(cryptd_tfm);
756 ablk_init_common(tfm, cryptd_tfm);
757 return 0;
758}
759
760static struct crypto_alg ablk_pcbc_alg = {
761 .cra_name = "pcbc(aes)",
762 .cra_driver_name = "pcbc-aes-aesni",
763 .cra_priority = 400,
764 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER|CRYPTO_ALG_ASYNC,
765 .cra_blocksize = AES_BLOCK_SIZE,
766 .cra_ctxsize = sizeof(struct async_aes_ctx),
767 .cra_alignmask = 0,
768 .cra_type = &crypto_ablkcipher_type,
769 .cra_module = THIS_MODULE,
770 .cra_list = LIST_HEAD_INIT(ablk_pcbc_alg.cra_list),
771 .cra_init = ablk_pcbc_init,
772 .cra_exit = ablk_exit,
773 .cra_u = {
774 .ablkcipher = {
775 .min_keysize = AES_MIN_KEY_SIZE,
776 .max_keysize = AES_MAX_KEY_SIZE,
777 .ivsize = AES_BLOCK_SIZE,
778 .setkey = ablk_set_key,
779 .encrypt = ablk_encrypt,
780 .decrypt = ablk_decrypt,
781 },
782 },
783};
784#endif
785
786#ifdef HAS_XTS
787static int ablk_xts_init(struct crypto_tfm *tfm)
788{
789 struct cryptd_ablkcipher *cryptd_tfm;
790
791 cryptd_tfm = cryptd_alloc_ablkcipher("fpu(xts(__driver-aes-aesni))",
792 0, 0);
793 if (IS_ERR(cryptd_tfm))
794 return PTR_ERR(cryptd_tfm);
795 ablk_init_common(tfm, cryptd_tfm);
796 return 0;
797}
798
799static struct crypto_alg ablk_xts_alg = {
800 .cra_name = "xts(aes)",
801 .cra_driver_name = "xts-aes-aesni",
802 .cra_priority = 400,
803 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER|CRYPTO_ALG_ASYNC,
804 .cra_blocksize = AES_BLOCK_SIZE,
805 .cra_ctxsize = sizeof(struct async_aes_ctx),
806 .cra_alignmask = 0,
807 .cra_type = &crypto_ablkcipher_type,
808 .cra_module = THIS_MODULE,
809 .cra_list = LIST_HEAD_INIT(ablk_xts_alg.cra_list),
810 .cra_init = ablk_xts_init,
811 .cra_exit = ablk_exit,
812 .cra_u = {
813 .ablkcipher = {
814 .min_keysize = 2 * AES_MIN_KEY_SIZE,
815 .max_keysize = 2 * AES_MAX_KEY_SIZE,
816 .ivsize = AES_BLOCK_SIZE,
817 .setkey = ablk_set_key,
818 .encrypt = ablk_encrypt,
819 .decrypt = ablk_decrypt,
820 },
821 },
822};
823#endif
824
559ad0ff 825#ifdef CONFIG_X86_64
0bd82f5f
TS
826static int rfc4106_init(struct crypto_tfm *tfm)
827{
828 struct cryptd_aead *cryptd_tfm;
829 struct aesni_rfc4106_gcm_ctx *ctx = (struct aesni_rfc4106_gcm_ctx *)
830 PTR_ALIGN((u8 *)crypto_tfm_ctx(tfm), AESNI_ALIGN);
831 cryptd_tfm = cryptd_alloc_aead("__driver-gcm-aes-aesni", 0, 0);
832 if (IS_ERR(cryptd_tfm))
833 return PTR_ERR(cryptd_tfm);
834 ctx->cryptd_tfm = cryptd_tfm;
835 tfm->crt_aead.reqsize = sizeof(struct aead_request)
836 + crypto_aead_reqsize(&cryptd_tfm->base);
837 return 0;
838}
839
840static void rfc4106_exit(struct crypto_tfm *tfm)
841{
842 struct aesni_rfc4106_gcm_ctx *ctx =
843 (struct aesni_rfc4106_gcm_ctx *)
844 PTR_ALIGN((u8 *)crypto_tfm_ctx(tfm), AESNI_ALIGN);
845 if (!IS_ERR(ctx->cryptd_tfm))
846 cryptd_free_aead(ctx->cryptd_tfm);
847 return;
848}
849
850static void
851rfc4106_set_hash_subkey_done(struct crypto_async_request *req, int err)
852{
853 struct aesni_gcm_set_hash_subkey_result *result = req->data;
854
855 if (err == -EINPROGRESS)
856 return;
857 result->err = err;
858 complete(&result->completion);
859}
860
861static int
862rfc4106_set_hash_subkey(u8 *hash_subkey, const u8 *key, unsigned int key_len)
863{
864 struct crypto_ablkcipher *ctr_tfm;
865 struct ablkcipher_request *req;
866 int ret = -EINVAL;
867 struct aesni_hash_subkey_req_data *req_data;
868
869 ctr_tfm = crypto_alloc_ablkcipher("ctr(aes)", 0, 0);
870 if (IS_ERR(ctr_tfm))
871 return PTR_ERR(ctr_tfm);
872
873 crypto_ablkcipher_clear_flags(ctr_tfm, ~0);
874
875 ret = crypto_ablkcipher_setkey(ctr_tfm, key, key_len);
7efd95f6 876 if (ret)
fc9044e2 877 goto out_free_ablkcipher;
0bd82f5f 878
fc9044e2 879 ret = -ENOMEM;
0bd82f5f 880 req = ablkcipher_request_alloc(ctr_tfm, GFP_KERNEL);
fc9044e2 881 if (!req)
7efd95f6 882 goto out_free_ablkcipher;
0bd82f5f
TS
883
884 req_data = kmalloc(sizeof(*req_data), GFP_KERNEL);
fc9044e2 885 if (!req_data)
7efd95f6 886 goto out_free_request;
fc9044e2 887
0bd82f5f
TS
888 memset(req_data->iv, 0, sizeof(req_data->iv));
889
890 /* Clear the data in the hash sub key container to zero.*/
891 /* We want to cipher all zeros to create the hash sub key. */
892 memset(hash_subkey, 0, RFC4106_HASH_SUBKEY_SIZE);
893
894 init_completion(&req_data->result.completion);
895 sg_init_one(&req_data->sg, hash_subkey, RFC4106_HASH_SUBKEY_SIZE);
896 ablkcipher_request_set_tfm(req, ctr_tfm);
897 ablkcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_SLEEP |
898 CRYPTO_TFM_REQ_MAY_BACKLOG,
899 rfc4106_set_hash_subkey_done,
900 &req_data->result);
901
902 ablkcipher_request_set_crypt(req, &req_data->sg,
903 &req_data->sg, RFC4106_HASH_SUBKEY_SIZE, req_data->iv);
904
905 ret = crypto_ablkcipher_encrypt(req);
906 if (ret == -EINPROGRESS || ret == -EBUSY) {
907 ret = wait_for_completion_interruptible
908 (&req_data->result.completion);
909 if (!ret)
910 ret = req_data->result.err;
911 }
fc9044e2 912 kfree(req_data);
7efd95f6 913out_free_request:
0bd82f5f 914 ablkcipher_request_free(req);
7efd95f6 915out_free_ablkcipher:
0bd82f5f
TS
916 crypto_free_ablkcipher(ctr_tfm);
917 return ret;
918}
919
920static int rfc4106_set_key(struct crypto_aead *parent, const u8 *key,
921 unsigned int key_len)
922{
923 int ret = 0;
924 struct crypto_tfm *tfm = crypto_aead_tfm(parent);
925 struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(parent);
926 u8 *new_key_mem = NULL;
927
928 if (key_len < 4) {
929 crypto_tfm_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
930 return -EINVAL;
931 }
932 /*Account for 4 byte nonce at the end.*/
933 key_len -= 4;
934 if (key_len != AES_KEYSIZE_128) {
935 crypto_tfm_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
936 return -EINVAL;
937 }
938
939 memcpy(ctx->nonce, key + key_len, sizeof(ctx->nonce));
940 /*This must be on a 16 byte boundary!*/
941 if ((unsigned long)(&(ctx->aes_key_expanded.key_enc[0])) % AESNI_ALIGN)
942 return -EINVAL;
943
944 if ((unsigned long)key % AESNI_ALIGN) {
945 /*key is not aligned: use an auxuliar aligned pointer*/
946 new_key_mem = kmalloc(key_len+AESNI_ALIGN, GFP_KERNEL);
947 if (!new_key_mem)
948 return -ENOMEM;
949
950 new_key_mem = PTR_ALIGN(new_key_mem, AESNI_ALIGN);
951 memcpy(new_key_mem, key, key_len);
952 key = new_key_mem;
953 }
954
955 if (!irq_fpu_usable())
956 ret = crypto_aes_expand_key(&(ctx->aes_key_expanded),
957 key, key_len);
958 else {
959 kernel_fpu_begin();
960 ret = aesni_set_key(&(ctx->aes_key_expanded), key, key_len);
961 kernel_fpu_end();
962 }
963 /*This must be on a 16 byte boundary!*/
964 if ((unsigned long)(&(ctx->hash_subkey[0])) % AESNI_ALIGN) {
965 ret = -EINVAL;
966 goto exit;
967 }
968 ret = rfc4106_set_hash_subkey(ctx->hash_subkey, key, key_len);
969exit:
970 kfree(new_key_mem);
971 return ret;
972}
973
974/* This is the Integrity Check Value (aka the authentication tag length and can
975 * be 8, 12 or 16 bytes long. */
976static int rfc4106_set_authsize(struct crypto_aead *parent,
977 unsigned int authsize)
978{
979 struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(parent);
980 struct crypto_aead *cryptd_child = cryptd_aead_child(ctx->cryptd_tfm);
981
982 switch (authsize) {
983 case 8:
984 case 12:
985 case 16:
986 break;
987 default:
988 return -EINVAL;
989 }
990 crypto_aead_crt(parent)->authsize = authsize;
991 crypto_aead_crt(cryptd_child)->authsize = authsize;
992 return 0;
993}
994
995static int rfc4106_encrypt(struct aead_request *req)
996{
997 int ret;
998 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
999 struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(tfm);
1000 struct crypto_aead *cryptd_child = cryptd_aead_child(ctx->cryptd_tfm);
1001
1002 if (!irq_fpu_usable()) {
1003 struct aead_request *cryptd_req =
1004 (struct aead_request *) aead_request_ctx(req);
1005 memcpy(cryptd_req, req, sizeof(*req));
1006 aead_request_set_tfm(cryptd_req, &ctx->cryptd_tfm->base);
1007 return crypto_aead_encrypt(cryptd_req);
1008 } else {
1009 kernel_fpu_begin();
1010 ret = cryptd_child->base.crt_aead.encrypt(req);
1011 kernel_fpu_end();
1012 return ret;
1013 }
1014}
1015
1016static int rfc4106_decrypt(struct aead_request *req)
1017{
1018 int ret;
1019 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1020 struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(tfm);
1021 struct crypto_aead *cryptd_child = cryptd_aead_child(ctx->cryptd_tfm);
1022
1023 if (!irq_fpu_usable()) {
1024 struct aead_request *cryptd_req =
1025 (struct aead_request *) aead_request_ctx(req);
1026 memcpy(cryptd_req, req, sizeof(*req));
1027 aead_request_set_tfm(cryptd_req, &ctx->cryptd_tfm->base);
1028 return crypto_aead_decrypt(cryptd_req);
1029 } else {
1030 kernel_fpu_begin();
1031 ret = cryptd_child->base.crt_aead.decrypt(req);
1032 kernel_fpu_end();
1033 return ret;
1034 }
1035}
1036
1037static struct crypto_alg rfc4106_alg = {
1038 .cra_name = "rfc4106(gcm(aes))",
1039 .cra_driver_name = "rfc4106-gcm-aesni",
1040 .cra_priority = 400,
1041 .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
1042 .cra_blocksize = 1,
1043 .cra_ctxsize = sizeof(struct aesni_rfc4106_gcm_ctx) + AESNI_ALIGN,
1044 .cra_alignmask = 0,
1045 .cra_type = &crypto_nivaead_type,
1046 .cra_module = THIS_MODULE,
1047 .cra_list = LIST_HEAD_INIT(rfc4106_alg.cra_list),
1048 .cra_init = rfc4106_init,
1049 .cra_exit = rfc4106_exit,
1050 .cra_u = {
1051 .aead = {
1052 .setkey = rfc4106_set_key,
1053 .setauthsize = rfc4106_set_authsize,
1054 .encrypt = rfc4106_encrypt,
1055 .decrypt = rfc4106_decrypt,
1056 .geniv = "seqiv",
1057 .ivsize = 8,
1058 .maxauthsize = 16,
1059 },
1060 },
1061};
1062
1063static int __driver_rfc4106_encrypt(struct aead_request *req)
1064{
1065 u8 one_entry_in_sg = 0;
1066 u8 *src, *dst, *assoc;
1067 __be32 counter = cpu_to_be32(1);
1068 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1069 struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(tfm);
1070 void *aes_ctx = &(ctx->aes_key_expanded);
1071 unsigned long auth_tag_len = crypto_aead_authsize(tfm);
1072 u8 iv_tab[16+AESNI_ALIGN];
1073 u8* iv = (u8 *) PTR_ALIGN((u8 *)iv_tab, AESNI_ALIGN);
1074 struct scatter_walk src_sg_walk;
1075 struct scatter_walk assoc_sg_walk;
1076 struct scatter_walk dst_sg_walk;
1077 unsigned int i;
1078
1079 /* Assuming we are supporting rfc4106 64-bit extended */
1080 /* sequence numbers We need to have the AAD length equal */
1081 /* to 8 or 12 bytes */
1082 if (unlikely(req->assoclen != 8 && req->assoclen != 12))
1083 return -EINVAL;
1084 /* IV below built */
1085 for (i = 0; i < 4; i++)
1086 *(iv+i) = ctx->nonce[i];
1087 for (i = 0; i < 8; i++)
1088 *(iv+4+i) = req->iv[i];
1089 *((__be32 *)(iv+12)) = counter;
1090
1091 if ((sg_is_last(req->src)) && (sg_is_last(req->assoc))) {
1092 one_entry_in_sg = 1;
1093 scatterwalk_start(&src_sg_walk, req->src);
1094 scatterwalk_start(&assoc_sg_walk, req->assoc);
1095 src = scatterwalk_map(&src_sg_walk, 0);
1096 assoc = scatterwalk_map(&assoc_sg_walk, 0);
1097 dst = src;
1098 if (unlikely(req->src != req->dst)) {
1099 scatterwalk_start(&dst_sg_walk, req->dst);
1100 dst = scatterwalk_map(&dst_sg_walk, 0);
1101 }
1102
1103 } else {
1104 /* Allocate memory for src, dst, assoc */
1105 src = kmalloc(req->cryptlen + auth_tag_len + req->assoclen,
1106 GFP_ATOMIC);
1107 if (unlikely(!src))
1108 return -ENOMEM;
1109 assoc = (src + req->cryptlen + auth_tag_len);
1110 scatterwalk_map_and_copy(src, req->src, 0, req->cryptlen, 0);
1111 scatterwalk_map_and_copy(assoc, req->assoc, 0,
1112 req->assoclen, 0);
1113 dst = src;
1114 }
1115
1116 aesni_gcm_enc(aes_ctx, dst, src, (unsigned long)req->cryptlen, iv,
1117 ctx->hash_subkey, assoc, (unsigned long)req->assoclen, dst
1118 + ((unsigned long)req->cryptlen), auth_tag_len);
1119
1120 /* The authTag (aka the Integrity Check Value) needs to be written
1121 * back to the packet. */
1122 if (one_entry_in_sg) {
1123 if (unlikely(req->src != req->dst)) {
1124 scatterwalk_unmap(dst, 0);
1125 scatterwalk_done(&dst_sg_walk, 0, 0);
1126 }
1127 scatterwalk_unmap(src, 0);
1128 scatterwalk_unmap(assoc, 0);
1129 scatterwalk_done(&src_sg_walk, 0, 0);
1130 scatterwalk_done(&assoc_sg_walk, 0, 0);
1131 } else {
1132 scatterwalk_map_and_copy(dst, req->dst, 0,
1133 req->cryptlen + auth_tag_len, 1);
1134 kfree(src);
1135 }
1136 return 0;
1137}
1138
1139static int __driver_rfc4106_decrypt(struct aead_request *req)
1140{
1141 u8 one_entry_in_sg = 0;
1142 u8 *src, *dst, *assoc;
1143 unsigned long tempCipherLen = 0;
1144 __be32 counter = cpu_to_be32(1);
1145 int retval = 0;
1146 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1147 struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(tfm);
1148 void *aes_ctx = &(ctx->aes_key_expanded);
1149 unsigned long auth_tag_len = crypto_aead_authsize(tfm);
1150 u8 iv_and_authTag[32+AESNI_ALIGN];
1151 u8 *iv = (u8 *) PTR_ALIGN((u8 *)iv_and_authTag, AESNI_ALIGN);
1152 u8 *authTag = iv + 16;
1153 struct scatter_walk src_sg_walk;
1154 struct scatter_walk assoc_sg_walk;
1155 struct scatter_walk dst_sg_walk;
1156 unsigned int i;
1157
1158 if (unlikely((req->cryptlen < auth_tag_len) ||
1159 (req->assoclen != 8 && req->assoclen != 12)))
1160 return -EINVAL;
1161 /* Assuming we are supporting rfc4106 64-bit extended */
1162 /* sequence numbers We need to have the AAD length */
1163 /* equal to 8 or 12 bytes */
1164
1165 tempCipherLen = (unsigned long)(req->cryptlen - auth_tag_len);
1166 /* IV below built */
1167 for (i = 0; i < 4; i++)
1168 *(iv+i) = ctx->nonce[i];
1169 for (i = 0; i < 8; i++)
1170 *(iv+4+i) = req->iv[i];
1171 *((__be32 *)(iv+12)) = counter;
1172
1173 if ((sg_is_last(req->src)) && (sg_is_last(req->assoc))) {
1174 one_entry_in_sg = 1;
1175 scatterwalk_start(&src_sg_walk, req->src);
1176 scatterwalk_start(&assoc_sg_walk, req->assoc);
1177 src = scatterwalk_map(&src_sg_walk, 0);
1178 assoc = scatterwalk_map(&assoc_sg_walk, 0);
1179 dst = src;
1180 if (unlikely(req->src != req->dst)) {
1181 scatterwalk_start(&dst_sg_walk, req->dst);
1182 dst = scatterwalk_map(&dst_sg_walk, 0);
1183 }
1184
1185 } else {
1186 /* Allocate memory for src, dst, assoc */
1187 src = kmalloc(req->cryptlen + req->assoclen, GFP_ATOMIC);
1188 if (!src)
1189 return -ENOMEM;
1190 assoc = (src + req->cryptlen + auth_tag_len);
1191 scatterwalk_map_and_copy(src, req->src, 0, req->cryptlen, 0);
1192 scatterwalk_map_and_copy(assoc, req->assoc, 0,
1193 req->assoclen, 0);
1194 dst = src;
1195 }
1196
1197 aesni_gcm_dec(aes_ctx, dst, src, tempCipherLen, iv,
1198 ctx->hash_subkey, assoc, (unsigned long)req->assoclen,
1199 authTag, auth_tag_len);
1200
1201 /* Compare generated tag with passed in tag. */
1202 retval = memcmp(src + tempCipherLen, authTag, auth_tag_len) ?
1203 -EBADMSG : 0;
1204
1205 if (one_entry_in_sg) {
1206 if (unlikely(req->src != req->dst)) {
1207 scatterwalk_unmap(dst, 0);
1208 scatterwalk_done(&dst_sg_walk, 0, 0);
1209 }
1210 scatterwalk_unmap(src, 0);
1211 scatterwalk_unmap(assoc, 0);
1212 scatterwalk_done(&src_sg_walk, 0, 0);
1213 scatterwalk_done(&assoc_sg_walk, 0, 0);
1214 } else {
1215 scatterwalk_map_and_copy(dst, req->dst, 0, req->cryptlen, 1);
1216 kfree(src);
1217 }
1218 return retval;
1219}
1220
1221static struct crypto_alg __rfc4106_alg = {
1222 .cra_name = "__gcm-aes-aesni",
1223 .cra_driver_name = "__driver-gcm-aes-aesni",
1224 .cra_priority = 0,
1225 .cra_flags = CRYPTO_ALG_TYPE_AEAD,
1226 .cra_blocksize = 1,
1227 .cra_ctxsize = sizeof(struct aesni_rfc4106_gcm_ctx) + AESNI_ALIGN,
1228 .cra_alignmask = 0,
1229 .cra_type = &crypto_aead_type,
1230 .cra_module = THIS_MODULE,
1231 .cra_list = LIST_HEAD_INIT(__rfc4106_alg.cra_list),
1232 .cra_u = {
1233 .aead = {
1234 .encrypt = __driver_rfc4106_encrypt,
1235 .decrypt = __driver_rfc4106_decrypt,
1236 },
1237 },
1238};
559ad0ff 1239#endif
0bd82f5f 1240
54b6a1bd
HY
1241static int __init aesni_init(void)
1242{
1243 int err;
1244
1245 if (!cpu_has_aes) {
c9944881 1246 printk(KERN_INFO "Intel AES-NI instructions are not detected.\n");
54b6a1bd
HY
1247 return -ENODEV;
1248 }
0bd82f5f 1249
54b6a1bd
HY
1250 if ((err = crypto_register_alg(&aesni_alg)))
1251 goto aes_err;
2cf4ac8b
HY
1252 if ((err = crypto_register_alg(&__aesni_alg)))
1253 goto __aes_err;
54b6a1bd
HY
1254 if ((err = crypto_register_alg(&blk_ecb_alg)))
1255 goto blk_ecb_err;
1256 if ((err = crypto_register_alg(&blk_cbc_alg)))
1257 goto blk_cbc_err;
1258 if ((err = crypto_register_alg(&ablk_ecb_alg)))
1259 goto ablk_ecb_err;
1260 if ((err = crypto_register_alg(&ablk_cbc_alg)))
1261 goto ablk_cbc_err;
0d258efb
MK
1262#ifdef CONFIG_X86_64
1263 if ((err = crypto_register_alg(&blk_ctr_alg)))
1264 goto blk_ctr_err;
2cf4ac8b
HY
1265 if ((err = crypto_register_alg(&ablk_ctr_alg)))
1266 goto ablk_ctr_err;
559ad0ff
MK
1267 if ((err = crypto_register_alg(&__rfc4106_alg)))
1268 goto __aead_gcm_err;
1269 if ((err = crypto_register_alg(&rfc4106_alg)))
1270 goto aead_gcm_err;
12387a46
HY
1271#ifdef HAS_CTR
1272 if ((err = crypto_register_alg(&ablk_rfc3686_ctr_alg)))
1273 goto ablk_rfc3686_ctr_err;
2cf4ac8b 1274#endif
0d258efb 1275#endif
2cf4ac8b
HY
1276#ifdef HAS_LRW
1277 if ((err = crypto_register_alg(&ablk_lrw_alg)))
1278 goto ablk_lrw_err;
1279#endif
1280#ifdef HAS_PCBC
1281 if ((err = crypto_register_alg(&ablk_pcbc_alg)))
1282 goto ablk_pcbc_err;
1283#endif
1284#ifdef HAS_XTS
1285 if ((err = crypto_register_alg(&ablk_xts_alg)))
1286 goto ablk_xts_err;
1287#endif
54b6a1bd
HY
1288 return err;
1289
2cf4ac8b
HY
1290#ifdef HAS_XTS
1291ablk_xts_err:
1292#endif
1293#ifdef HAS_PCBC
1294 crypto_unregister_alg(&ablk_pcbc_alg);
1295ablk_pcbc_err:
1296#endif
1297#ifdef HAS_LRW
1298 crypto_unregister_alg(&ablk_lrw_alg);
1299ablk_lrw_err:
1300#endif
0d258efb 1301#ifdef CONFIG_X86_64
2cf4ac8b 1302#ifdef HAS_CTR
12387a46
HY
1303 crypto_unregister_alg(&ablk_rfc3686_ctr_alg);
1304ablk_rfc3686_ctr_err:
1305#endif
559ad0ff
MK
1306 crypto_unregister_alg(&rfc4106_alg);
1307aead_gcm_err:
1308 crypto_unregister_alg(&__rfc4106_alg);
1309__aead_gcm_err:
2cf4ac8b
HY
1310 crypto_unregister_alg(&ablk_ctr_alg);
1311ablk_ctr_err:
0d258efb
MK
1312 crypto_unregister_alg(&blk_ctr_alg);
1313blk_ctr_err:
1314#endif
2cf4ac8b 1315 crypto_unregister_alg(&ablk_cbc_alg);
54b6a1bd
HY
1316ablk_cbc_err:
1317 crypto_unregister_alg(&ablk_ecb_alg);
1318ablk_ecb_err:
1319 crypto_unregister_alg(&blk_cbc_alg);
1320blk_cbc_err:
1321 crypto_unregister_alg(&blk_ecb_alg);
1322blk_ecb_err:
2cf4ac8b
HY
1323 crypto_unregister_alg(&__aesni_alg);
1324__aes_err:
54b6a1bd
HY
1325 crypto_unregister_alg(&aesni_alg);
1326aes_err:
1327 return err;
1328}
1329
1330static void __exit aesni_exit(void)
1331{
2cf4ac8b
HY
1332#ifdef HAS_XTS
1333 crypto_unregister_alg(&ablk_xts_alg);
1334#endif
1335#ifdef HAS_PCBC
1336 crypto_unregister_alg(&ablk_pcbc_alg);
1337#endif
1338#ifdef HAS_LRW
1339 crypto_unregister_alg(&ablk_lrw_alg);
1340#endif
0d258efb 1341#ifdef CONFIG_X86_64
2cf4ac8b 1342#ifdef HAS_CTR
12387a46 1343 crypto_unregister_alg(&ablk_rfc3686_ctr_alg);
2cf4ac8b 1344#endif
559ad0ff
MK
1345 crypto_unregister_alg(&rfc4106_alg);
1346 crypto_unregister_alg(&__rfc4106_alg);
12387a46 1347 crypto_unregister_alg(&ablk_ctr_alg);
0d258efb
MK
1348 crypto_unregister_alg(&blk_ctr_alg);
1349#endif
54b6a1bd
HY
1350 crypto_unregister_alg(&ablk_cbc_alg);
1351 crypto_unregister_alg(&ablk_ecb_alg);
1352 crypto_unregister_alg(&blk_cbc_alg);
1353 crypto_unregister_alg(&blk_ecb_alg);
2cf4ac8b 1354 crypto_unregister_alg(&__aesni_alg);
54b6a1bd
HY
1355 crypto_unregister_alg(&aesni_alg);
1356}
1357
1358module_init(aesni_init);
1359module_exit(aesni_exit);
1360
1361MODULE_DESCRIPTION("Rijndael (AES) Cipher Algorithm, Intel AES-NI instructions optimized");
1362MODULE_LICENSE("GPL");
1363MODULE_ALIAS("aes");
This page took 0.251151 seconds and 5 git commands to generate.