crypto: Makefile clean up
[deliverable/linux.git] / arch / x86 / crypto / aesni-intel_glue.c
CommitLineData
54b6a1bd
HY
1/*
2 * Support for Intel AES-NI instructions. This file contains glue
3 * code, the real AES implementation is in intel-aes_asm.S.
4 *
5 * Copyright (C) 2008, Intel Corp.
6 * Author: Huang Ying <ying.huang@intel.com>
7 *
0bd82f5f
TS
8 * Added RFC4106 AES-GCM support for 128-bit keys under the AEAD
9 * interface for 64-bit kernels.
10 * Authors: Adrian Hoban <adrian.hoban@intel.com>
11 * Gabriele Paoloni <gabriele.paoloni@intel.com>
12 * Tadeusz Struk (tadeusz.struk@intel.com)
13 * Aidan O'Mahony (aidan.o.mahony@intel.com)
14 * Copyright (c) 2010, Intel Corporation.
15 *
54b6a1bd
HY
16 * This program is free software; you can redistribute it and/or modify
17 * it under the terms of the GNU General Public License as published by
18 * the Free Software Foundation; either version 2 of the License, or
19 * (at your option) any later version.
20 */
21
22#include <linux/hardirq.h>
23#include <linux/types.h>
24#include <linux/crypto.h>
25#include <linux/err.h>
26#include <crypto/algapi.h>
27#include <crypto/aes.h>
28#include <crypto/cryptd.h>
12387a46 29#include <crypto/ctr.h>
54b6a1bd
HY
30#include <asm/i387.h>
31#include <asm/aes.h>
0bd82f5f
TS
32#include <crypto/scatterwalk.h>
33#include <crypto/internal/aead.h>
34#include <linux/workqueue.h>
35#include <linux/spinlock.h>
54b6a1bd 36
2cf4ac8b
HY
37#if defined(CONFIG_CRYPTO_CTR) || defined(CONFIG_CRYPTO_CTR_MODULE)
38#define HAS_CTR
39#endif
40
41#if defined(CONFIG_CRYPTO_LRW) || defined(CONFIG_CRYPTO_LRW_MODULE)
42#define HAS_LRW
43#endif
44
45#if defined(CONFIG_CRYPTO_PCBC) || defined(CONFIG_CRYPTO_PCBC_MODULE)
46#define HAS_PCBC
47#endif
48
49#if defined(CONFIG_CRYPTO_XTS) || defined(CONFIG_CRYPTO_XTS_MODULE)
50#define HAS_XTS
51#endif
52
54b6a1bd
HY
53struct async_aes_ctx {
54 struct cryptd_ablkcipher *cryptd_tfm;
55};
56
0bd82f5f
TS
57/* This data is stored at the end of the crypto_tfm struct.
58 * It's a type of per "session" data storage location.
59 * This needs to be 16 byte aligned.
60 */
61struct aesni_rfc4106_gcm_ctx {
62 u8 hash_subkey[16];
63 struct crypto_aes_ctx aes_key_expanded;
64 u8 nonce[4];
65 struct cryptd_aead *cryptd_tfm;
66};
67
68struct aesni_gcm_set_hash_subkey_result {
69 int err;
70 struct completion completion;
71};
72
73struct aesni_hash_subkey_req_data {
74 u8 iv[16];
75 struct aesni_gcm_set_hash_subkey_result result;
76 struct scatterlist sg;
77};
78
79#define AESNI_ALIGN (16)
54b6a1bd 80#define AES_BLOCK_MASK (~(AES_BLOCK_SIZE-1))
0bd82f5f 81#define RFC4106_HASH_SUBKEY_SIZE 16
54b6a1bd
HY
82
83asmlinkage int aesni_set_key(struct crypto_aes_ctx *ctx, const u8 *in_key,
84 unsigned int key_len);
85asmlinkage void aesni_enc(struct crypto_aes_ctx *ctx, u8 *out,
86 const u8 *in);
87asmlinkage void aesni_dec(struct crypto_aes_ctx *ctx, u8 *out,
88 const u8 *in);
89asmlinkage void aesni_ecb_enc(struct crypto_aes_ctx *ctx, u8 *out,
90 const u8 *in, unsigned int len);
91asmlinkage void aesni_ecb_dec(struct crypto_aes_ctx *ctx, u8 *out,
92 const u8 *in, unsigned int len);
93asmlinkage void aesni_cbc_enc(struct crypto_aes_ctx *ctx, u8 *out,
94 const u8 *in, unsigned int len, u8 *iv);
95asmlinkage void aesni_cbc_dec(struct crypto_aes_ctx *ctx, u8 *out,
96 const u8 *in, unsigned int len, u8 *iv);
12387a46
HY
97asmlinkage void aesni_ctr_enc(struct crypto_aes_ctx *ctx, u8 *out,
98 const u8 *in, unsigned int len, u8 *iv);
54b6a1bd 99
0bd82f5f
TS
100/* asmlinkage void aesni_gcm_enc()
101 * void *ctx, AES Key schedule. Starts on a 16 byte boundary.
102 * u8 *out, Ciphertext output. Encrypt in-place is allowed.
103 * const u8 *in, Plaintext input
104 * unsigned long plaintext_len, Length of data in bytes for encryption.
105 * u8 *iv, Pre-counter block j0: 4 byte salt (from Security Association)
106 * concatenated with 8 byte Initialisation Vector (from IPSec ESP
107 * Payload) concatenated with 0x00000001. 16-byte aligned pointer.
108 * u8 *hash_subkey, the Hash sub key input. Data starts on a 16-byte boundary.
109 * const u8 *aad, Additional Authentication Data (AAD)
110 * unsigned long aad_len, Length of AAD in bytes. With RFC4106 this
111 * is going to be 8 or 12 bytes
112 * u8 *auth_tag, Authenticated Tag output.
113 * unsigned long auth_tag_len), Authenticated Tag Length in bytes.
114 * Valid values are 16 (most likely), 12 or 8.
115 */
116asmlinkage void aesni_gcm_enc(void *ctx, u8 *out,
117 const u8 *in, unsigned long plaintext_len, u8 *iv,
118 u8 *hash_subkey, const u8 *aad, unsigned long aad_len,
119 u8 *auth_tag, unsigned long auth_tag_len);
120
121/* asmlinkage void aesni_gcm_dec()
122 * void *ctx, AES Key schedule. Starts on a 16 byte boundary.
123 * u8 *out, Plaintext output. Decrypt in-place is allowed.
124 * const u8 *in, Ciphertext input
125 * unsigned long ciphertext_len, Length of data in bytes for decryption.
126 * u8 *iv, Pre-counter block j0: 4 byte salt (from Security Association)
127 * concatenated with 8 byte Initialisation Vector (from IPSec ESP
128 * Payload) concatenated with 0x00000001. 16-byte aligned pointer.
129 * u8 *hash_subkey, the Hash sub key input. Data starts on a 16-byte boundary.
130 * const u8 *aad, Additional Authentication Data (AAD)
131 * unsigned long aad_len, Length of AAD in bytes. With RFC4106 this is going
132 * to be 8 or 12 bytes
133 * u8 *auth_tag, Authenticated Tag output.
134 * unsigned long auth_tag_len) Authenticated Tag Length in bytes.
135 * Valid values are 16 (most likely), 12 or 8.
136 */
137asmlinkage void aesni_gcm_dec(void *ctx, u8 *out,
138 const u8 *in, unsigned long ciphertext_len, u8 *iv,
139 u8 *hash_subkey, const u8 *aad, unsigned long aad_len,
140 u8 *auth_tag, unsigned long auth_tag_len);
141
142static inline struct
143aesni_rfc4106_gcm_ctx *aesni_rfc4106_gcm_ctx_get(struct crypto_aead *tfm)
144{
145 return
146 (struct aesni_rfc4106_gcm_ctx *)
147 PTR_ALIGN((u8 *)
148 crypto_tfm_ctx(crypto_aead_tfm(tfm)), AESNI_ALIGN);
149}
150
54b6a1bd
HY
151static inline struct crypto_aes_ctx *aes_ctx(void *raw_ctx)
152{
153 unsigned long addr = (unsigned long)raw_ctx;
154 unsigned long align = AESNI_ALIGN;
155
156 if (align <= crypto_tfm_ctx_alignment())
157 align = 1;
158 return (struct crypto_aes_ctx *)ALIGN(addr, align);
159}
160
161static int aes_set_key_common(struct crypto_tfm *tfm, void *raw_ctx,
162 const u8 *in_key, unsigned int key_len)
163{
164 struct crypto_aes_ctx *ctx = aes_ctx(raw_ctx);
165 u32 *flags = &tfm->crt_flags;
166 int err;
167
168 if (key_len != AES_KEYSIZE_128 && key_len != AES_KEYSIZE_192 &&
169 key_len != AES_KEYSIZE_256) {
170 *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
171 return -EINVAL;
172 }
173
13b79b97 174 if (!irq_fpu_usable())
54b6a1bd
HY
175 err = crypto_aes_expand_key(ctx, in_key, key_len);
176 else {
177 kernel_fpu_begin();
178 err = aesni_set_key(ctx, in_key, key_len);
179 kernel_fpu_end();
180 }
181
182 return err;
183}
184
185static int aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
186 unsigned int key_len)
187{
188 return aes_set_key_common(tfm, crypto_tfm_ctx(tfm), in_key, key_len);
189}
190
191static void aes_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
192{
193 struct crypto_aes_ctx *ctx = aes_ctx(crypto_tfm_ctx(tfm));
194
13b79b97 195 if (!irq_fpu_usable())
54b6a1bd
HY
196 crypto_aes_encrypt_x86(ctx, dst, src);
197 else {
198 kernel_fpu_begin();
199 aesni_enc(ctx, dst, src);
200 kernel_fpu_end();
201 }
202}
203
204static void aes_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
205{
206 struct crypto_aes_ctx *ctx = aes_ctx(crypto_tfm_ctx(tfm));
207
13b79b97 208 if (!irq_fpu_usable())
54b6a1bd
HY
209 crypto_aes_decrypt_x86(ctx, dst, src);
210 else {
211 kernel_fpu_begin();
212 aesni_dec(ctx, dst, src);
213 kernel_fpu_end();
214 }
215}
216
217static struct crypto_alg aesni_alg = {
218 .cra_name = "aes",
219 .cra_driver_name = "aes-aesni",
220 .cra_priority = 300,
221 .cra_flags = CRYPTO_ALG_TYPE_CIPHER,
222 .cra_blocksize = AES_BLOCK_SIZE,
223 .cra_ctxsize = sizeof(struct crypto_aes_ctx)+AESNI_ALIGN-1,
224 .cra_alignmask = 0,
225 .cra_module = THIS_MODULE,
226 .cra_list = LIST_HEAD_INIT(aesni_alg.cra_list),
227 .cra_u = {
228 .cipher = {
229 .cia_min_keysize = AES_MIN_KEY_SIZE,
230 .cia_max_keysize = AES_MAX_KEY_SIZE,
231 .cia_setkey = aes_set_key,
232 .cia_encrypt = aes_encrypt,
233 .cia_decrypt = aes_decrypt
234 }
235 }
236};
237
2cf4ac8b
HY
238static void __aes_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
239{
240 struct crypto_aes_ctx *ctx = aes_ctx(crypto_tfm_ctx(tfm));
241
242 aesni_enc(ctx, dst, src);
243}
244
245static void __aes_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
246{
247 struct crypto_aes_ctx *ctx = aes_ctx(crypto_tfm_ctx(tfm));
248
249 aesni_dec(ctx, dst, src);
250}
251
252static struct crypto_alg __aesni_alg = {
253 .cra_name = "__aes-aesni",
254 .cra_driver_name = "__driver-aes-aesni",
255 .cra_priority = 0,
256 .cra_flags = CRYPTO_ALG_TYPE_CIPHER,
257 .cra_blocksize = AES_BLOCK_SIZE,
258 .cra_ctxsize = sizeof(struct crypto_aes_ctx)+AESNI_ALIGN-1,
259 .cra_alignmask = 0,
260 .cra_module = THIS_MODULE,
261 .cra_list = LIST_HEAD_INIT(__aesni_alg.cra_list),
262 .cra_u = {
263 .cipher = {
264 .cia_min_keysize = AES_MIN_KEY_SIZE,
265 .cia_max_keysize = AES_MAX_KEY_SIZE,
266 .cia_setkey = aes_set_key,
267 .cia_encrypt = __aes_encrypt,
268 .cia_decrypt = __aes_decrypt
269 }
270 }
271};
272
54b6a1bd
HY
273static int ecb_encrypt(struct blkcipher_desc *desc,
274 struct scatterlist *dst, struct scatterlist *src,
275 unsigned int nbytes)
276{
277 struct crypto_aes_ctx *ctx = aes_ctx(crypto_blkcipher_ctx(desc->tfm));
278 struct blkcipher_walk walk;
279 int err;
280
281 blkcipher_walk_init(&walk, dst, src, nbytes);
282 err = blkcipher_walk_virt(desc, &walk);
9251b64f 283 desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
54b6a1bd
HY
284
285 kernel_fpu_begin();
286 while ((nbytes = walk.nbytes)) {
287 aesni_ecb_enc(ctx, walk.dst.virt.addr, walk.src.virt.addr,
288 nbytes & AES_BLOCK_MASK);
289 nbytes &= AES_BLOCK_SIZE - 1;
290 err = blkcipher_walk_done(desc, &walk, nbytes);
291 }
292 kernel_fpu_end();
293
294 return err;
295}
296
297static int ecb_decrypt(struct blkcipher_desc *desc,
298 struct scatterlist *dst, struct scatterlist *src,
299 unsigned int nbytes)
300{
301 struct crypto_aes_ctx *ctx = aes_ctx(crypto_blkcipher_ctx(desc->tfm));
302 struct blkcipher_walk walk;
303 int err;
304
305 blkcipher_walk_init(&walk, dst, src, nbytes);
306 err = blkcipher_walk_virt(desc, &walk);
9251b64f 307 desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
54b6a1bd
HY
308
309 kernel_fpu_begin();
310 while ((nbytes = walk.nbytes)) {
311 aesni_ecb_dec(ctx, walk.dst.virt.addr, walk.src.virt.addr,
312 nbytes & AES_BLOCK_MASK);
313 nbytes &= AES_BLOCK_SIZE - 1;
314 err = blkcipher_walk_done(desc, &walk, nbytes);
315 }
316 kernel_fpu_end();
317
318 return err;
319}
320
321static struct crypto_alg blk_ecb_alg = {
322 .cra_name = "__ecb-aes-aesni",
323 .cra_driver_name = "__driver-ecb-aes-aesni",
324 .cra_priority = 0,
325 .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
326 .cra_blocksize = AES_BLOCK_SIZE,
327 .cra_ctxsize = sizeof(struct crypto_aes_ctx)+AESNI_ALIGN-1,
328 .cra_alignmask = 0,
329 .cra_type = &crypto_blkcipher_type,
330 .cra_module = THIS_MODULE,
331 .cra_list = LIST_HEAD_INIT(blk_ecb_alg.cra_list),
332 .cra_u = {
333 .blkcipher = {
334 .min_keysize = AES_MIN_KEY_SIZE,
335 .max_keysize = AES_MAX_KEY_SIZE,
336 .setkey = aes_set_key,
337 .encrypt = ecb_encrypt,
338 .decrypt = ecb_decrypt,
339 },
340 },
341};
342
343static int cbc_encrypt(struct blkcipher_desc *desc,
344 struct scatterlist *dst, struct scatterlist *src,
345 unsigned int nbytes)
346{
347 struct crypto_aes_ctx *ctx = aes_ctx(crypto_blkcipher_ctx(desc->tfm));
348 struct blkcipher_walk walk;
349 int err;
350
351 blkcipher_walk_init(&walk, dst, src, nbytes);
352 err = blkcipher_walk_virt(desc, &walk);
9251b64f 353 desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
54b6a1bd
HY
354
355 kernel_fpu_begin();
356 while ((nbytes = walk.nbytes)) {
357 aesni_cbc_enc(ctx, walk.dst.virt.addr, walk.src.virt.addr,
358 nbytes & AES_BLOCK_MASK, walk.iv);
359 nbytes &= AES_BLOCK_SIZE - 1;
360 err = blkcipher_walk_done(desc, &walk, nbytes);
361 }
362 kernel_fpu_end();
363
364 return err;
365}
366
367static int cbc_decrypt(struct blkcipher_desc *desc,
368 struct scatterlist *dst, struct scatterlist *src,
369 unsigned int nbytes)
370{
371 struct crypto_aes_ctx *ctx = aes_ctx(crypto_blkcipher_ctx(desc->tfm));
372 struct blkcipher_walk walk;
373 int err;
374
375 blkcipher_walk_init(&walk, dst, src, nbytes);
376 err = blkcipher_walk_virt(desc, &walk);
9251b64f 377 desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
54b6a1bd
HY
378
379 kernel_fpu_begin();
380 while ((nbytes = walk.nbytes)) {
381 aesni_cbc_dec(ctx, walk.dst.virt.addr, walk.src.virt.addr,
382 nbytes & AES_BLOCK_MASK, walk.iv);
383 nbytes &= AES_BLOCK_SIZE - 1;
384 err = blkcipher_walk_done(desc, &walk, nbytes);
385 }
386 kernel_fpu_end();
387
388 return err;
389}
390
391static struct crypto_alg blk_cbc_alg = {
392 .cra_name = "__cbc-aes-aesni",
393 .cra_driver_name = "__driver-cbc-aes-aesni",
394 .cra_priority = 0,
395 .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
396 .cra_blocksize = AES_BLOCK_SIZE,
397 .cra_ctxsize = sizeof(struct crypto_aes_ctx)+AESNI_ALIGN-1,
398 .cra_alignmask = 0,
399 .cra_type = &crypto_blkcipher_type,
400 .cra_module = THIS_MODULE,
401 .cra_list = LIST_HEAD_INIT(blk_cbc_alg.cra_list),
402 .cra_u = {
403 .blkcipher = {
404 .min_keysize = AES_MIN_KEY_SIZE,
405 .max_keysize = AES_MAX_KEY_SIZE,
406 .setkey = aes_set_key,
407 .encrypt = cbc_encrypt,
408 .decrypt = cbc_decrypt,
409 },
410 },
411};
412
12387a46
HY
413static void ctr_crypt_final(struct crypto_aes_ctx *ctx,
414 struct blkcipher_walk *walk)
415{
416 u8 *ctrblk = walk->iv;
417 u8 keystream[AES_BLOCK_SIZE];
418 u8 *src = walk->src.virt.addr;
419 u8 *dst = walk->dst.virt.addr;
420 unsigned int nbytes = walk->nbytes;
421
422 aesni_enc(ctx, keystream, ctrblk);
423 crypto_xor(keystream, src, nbytes);
424 memcpy(dst, keystream, nbytes);
425 crypto_inc(ctrblk, AES_BLOCK_SIZE);
426}
427
428static int ctr_crypt(struct blkcipher_desc *desc,
429 struct scatterlist *dst, struct scatterlist *src,
430 unsigned int nbytes)
431{
432 struct crypto_aes_ctx *ctx = aes_ctx(crypto_blkcipher_ctx(desc->tfm));
433 struct blkcipher_walk walk;
434 int err;
435
436 blkcipher_walk_init(&walk, dst, src, nbytes);
437 err = blkcipher_walk_virt_block(desc, &walk, AES_BLOCK_SIZE);
438 desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
439
440 kernel_fpu_begin();
441 while ((nbytes = walk.nbytes) >= AES_BLOCK_SIZE) {
442 aesni_ctr_enc(ctx, walk.dst.virt.addr, walk.src.virt.addr,
443 nbytes & AES_BLOCK_MASK, walk.iv);
444 nbytes &= AES_BLOCK_SIZE - 1;
445 err = blkcipher_walk_done(desc, &walk, nbytes);
446 }
447 if (walk.nbytes) {
448 ctr_crypt_final(ctx, &walk);
449 err = blkcipher_walk_done(desc, &walk, 0);
450 }
451 kernel_fpu_end();
452
453 return err;
454}
455
456static struct crypto_alg blk_ctr_alg = {
457 .cra_name = "__ctr-aes-aesni",
458 .cra_driver_name = "__driver-ctr-aes-aesni",
459 .cra_priority = 0,
460 .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
461 .cra_blocksize = 1,
462 .cra_ctxsize = sizeof(struct crypto_aes_ctx)+AESNI_ALIGN-1,
463 .cra_alignmask = 0,
464 .cra_type = &crypto_blkcipher_type,
465 .cra_module = THIS_MODULE,
466 .cra_list = LIST_HEAD_INIT(blk_ctr_alg.cra_list),
467 .cra_u = {
468 .blkcipher = {
469 .min_keysize = AES_MIN_KEY_SIZE,
470 .max_keysize = AES_MAX_KEY_SIZE,
471 .ivsize = AES_BLOCK_SIZE,
472 .setkey = aes_set_key,
473 .encrypt = ctr_crypt,
474 .decrypt = ctr_crypt,
475 },
476 },
477};
478
54b6a1bd
HY
479static int ablk_set_key(struct crypto_ablkcipher *tfm, const u8 *key,
480 unsigned int key_len)
481{
482 struct async_aes_ctx *ctx = crypto_ablkcipher_ctx(tfm);
2cf4ac8b
HY
483 struct crypto_ablkcipher *child = &ctx->cryptd_tfm->base;
484 int err;
54b6a1bd 485
2cf4ac8b
HY
486 crypto_ablkcipher_clear_flags(child, CRYPTO_TFM_REQ_MASK);
487 crypto_ablkcipher_set_flags(child, crypto_ablkcipher_get_flags(tfm)
488 & CRYPTO_TFM_REQ_MASK);
489 err = crypto_ablkcipher_setkey(child, key, key_len);
490 crypto_ablkcipher_set_flags(tfm, crypto_ablkcipher_get_flags(child)
491 & CRYPTO_TFM_RES_MASK);
492 return err;
54b6a1bd
HY
493}
494
495static int ablk_encrypt(struct ablkcipher_request *req)
496{
497 struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
498 struct async_aes_ctx *ctx = crypto_ablkcipher_ctx(tfm);
499
13b79b97 500 if (!irq_fpu_usable()) {
54b6a1bd
HY
501 struct ablkcipher_request *cryptd_req =
502 ablkcipher_request_ctx(req);
503 memcpy(cryptd_req, req, sizeof(*req));
504 ablkcipher_request_set_tfm(cryptd_req, &ctx->cryptd_tfm->base);
505 return crypto_ablkcipher_encrypt(cryptd_req);
506 } else {
507 struct blkcipher_desc desc;
508 desc.tfm = cryptd_ablkcipher_child(ctx->cryptd_tfm);
509 desc.info = req->info;
510 desc.flags = 0;
511 return crypto_blkcipher_crt(desc.tfm)->encrypt(
512 &desc, req->dst, req->src, req->nbytes);
513 }
514}
515
516static int ablk_decrypt(struct ablkcipher_request *req)
517{
518 struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
519 struct async_aes_ctx *ctx = crypto_ablkcipher_ctx(tfm);
520
13b79b97 521 if (!irq_fpu_usable()) {
54b6a1bd
HY
522 struct ablkcipher_request *cryptd_req =
523 ablkcipher_request_ctx(req);
524 memcpy(cryptd_req, req, sizeof(*req));
525 ablkcipher_request_set_tfm(cryptd_req, &ctx->cryptd_tfm->base);
526 return crypto_ablkcipher_decrypt(cryptd_req);
527 } else {
528 struct blkcipher_desc desc;
529 desc.tfm = cryptd_ablkcipher_child(ctx->cryptd_tfm);
530 desc.info = req->info;
531 desc.flags = 0;
532 return crypto_blkcipher_crt(desc.tfm)->decrypt(
533 &desc, req->dst, req->src, req->nbytes);
534 }
535}
536
537static void ablk_exit(struct crypto_tfm *tfm)
538{
539 struct async_aes_ctx *ctx = crypto_tfm_ctx(tfm);
540
541 cryptd_free_ablkcipher(ctx->cryptd_tfm);
542}
543
544static void ablk_init_common(struct crypto_tfm *tfm,
545 struct cryptd_ablkcipher *cryptd_tfm)
546{
547 struct async_aes_ctx *ctx = crypto_tfm_ctx(tfm);
548
549 ctx->cryptd_tfm = cryptd_tfm;
550 tfm->crt_ablkcipher.reqsize = sizeof(struct ablkcipher_request) +
551 crypto_ablkcipher_reqsize(&cryptd_tfm->base);
552}
553
554static int ablk_ecb_init(struct crypto_tfm *tfm)
555{
556 struct cryptd_ablkcipher *cryptd_tfm;
557
558 cryptd_tfm = cryptd_alloc_ablkcipher("__driver-ecb-aes-aesni", 0, 0);
559 if (IS_ERR(cryptd_tfm))
560 return PTR_ERR(cryptd_tfm);
561 ablk_init_common(tfm, cryptd_tfm);
562 return 0;
563}
564
565static struct crypto_alg ablk_ecb_alg = {
566 .cra_name = "ecb(aes)",
567 .cra_driver_name = "ecb-aes-aesni",
568 .cra_priority = 400,
569 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER|CRYPTO_ALG_ASYNC,
570 .cra_blocksize = AES_BLOCK_SIZE,
571 .cra_ctxsize = sizeof(struct async_aes_ctx),
572 .cra_alignmask = 0,
573 .cra_type = &crypto_ablkcipher_type,
574 .cra_module = THIS_MODULE,
575 .cra_list = LIST_HEAD_INIT(ablk_ecb_alg.cra_list),
576 .cra_init = ablk_ecb_init,
577 .cra_exit = ablk_exit,
578 .cra_u = {
579 .ablkcipher = {
580 .min_keysize = AES_MIN_KEY_SIZE,
581 .max_keysize = AES_MAX_KEY_SIZE,
582 .setkey = ablk_set_key,
583 .encrypt = ablk_encrypt,
584 .decrypt = ablk_decrypt,
585 },
586 },
587};
588
589static int ablk_cbc_init(struct crypto_tfm *tfm)
590{
591 struct cryptd_ablkcipher *cryptd_tfm;
592
593 cryptd_tfm = cryptd_alloc_ablkcipher("__driver-cbc-aes-aesni", 0, 0);
594 if (IS_ERR(cryptd_tfm))
595 return PTR_ERR(cryptd_tfm);
596 ablk_init_common(tfm, cryptd_tfm);
597 return 0;
598}
599
600static struct crypto_alg ablk_cbc_alg = {
601 .cra_name = "cbc(aes)",
602 .cra_driver_name = "cbc-aes-aesni",
603 .cra_priority = 400,
604 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER|CRYPTO_ALG_ASYNC,
605 .cra_blocksize = AES_BLOCK_SIZE,
606 .cra_ctxsize = sizeof(struct async_aes_ctx),
607 .cra_alignmask = 0,
608 .cra_type = &crypto_ablkcipher_type,
609 .cra_module = THIS_MODULE,
610 .cra_list = LIST_HEAD_INIT(ablk_cbc_alg.cra_list),
611 .cra_init = ablk_cbc_init,
612 .cra_exit = ablk_exit,
613 .cra_u = {
614 .ablkcipher = {
615 .min_keysize = AES_MIN_KEY_SIZE,
616 .max_keysize = AES_MAX_KEY_SIZE,
617 .ivsize = AES_BLOCK_SIZE,
618 .setkey = ablk_set_key,
619 .encrypt = ablk_encrypt,
620 .decrypt = ablk_decrypt,
621 },
622 },
623};
624
2cf4ac8b
HY
625static int ablk_ctr_init(struct crypto_tfm *tfm)
626{
627 struct cryptd_ablkcipher *cryptd_tfm;
628
12387a46 629 cryptd_tfm = cryptd_alloc_ablkcipher("__driver-ctr-aes-aesni", 0, 0);
2cf4ac8b
HY
630 if (IS_ERR(cryptd_tfm))
631 return PTR_ERR(cryptd_tfm);
632 ablk_init_common(tfm, cryptd_tfm);
633 return 0;
634}
635
636static struct crypto_alg ablk_ctr_alg = {
637 .cra_name = "ctr(aes)",
638 .cra_driver_name = "ctr-aes-aesni",
639 .cra_priority = 400,
640 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER|CRYPTO_ALG_ASYNC,
641 .cra_blocksize = 1,
642 .cra_ctxsize = sizeof(struct async_aes_ctx),
643 .cra_alignmask = 0,
644 .cra_type = &crypto_ablkcipher_type,
645 .cra_module = THIS_MODULE,
646 .cra_list = LIST_HEAD_INIT(ablk_ctr_alg.cra_list),
647 .cra_init = ablk_ctr_init,
648 .cra_exit = ablk_exit,
649 .cra_u = {
650 .ablkcipher = {
651 .min_keysize = AES_MIN_KEY_SIZE,
652 .max_keysize = AES_MAX_KEY_SIZE,
653 .ivsize = AES_BLOCK_SIZE,
654 .setkey = ablk_set_key,
655 .encrypt = ablk_encrypt,
12387a46 656 .decrypt = ablk_encrypt,
2cf4ac8b
HY
657 .geniv = "chainiv",
658 },
659 },
660};
12387a46
HY
661
662#ifdef HAS_CTR
663static int ablk_rfc3686_ctr_init(struct crypto_tfm *tfm)
664{
665 struct cryptd_ablkcipher *cryptd_tfm;
666
667 cryptd_tfm = cryptd_alloc_ablkcipher(
668 "rfc3686(__driver-ctr-aes-aesni)", 0, 0);
669 if (IS_ERR(cryptd_tfm))
670 return PTR_ERR(cryptd_tfm);
671 ablk_init_common(tfm, cryptd_tfm);
672 return 0;
673}
674
675static struct crypto_alg ablk_rfc3686_ctr_alg = {
676 .cra_name = "rfc3686(ctr(aes))",
677 .cra_driver_name = "rfc3686-ctr-aes-aesni",
678 .cra_priority = 400,
679 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER|CRYPTO_ALG_ASYNC,
680 .cra_blocksize = 1,
681 .cra_ctxsize = sizeof(struct async_aes_ctx),
682 .cra_alignmask = 0,
683 .cra_type = &crypto_ablkcipher_type,
684 .cra_module = THIS_MODULE,
685 .cra_list = LIST_HEAD_INIT(ablk_rfc3686_ctr_alg.cra_list),
686 .cra_init = ablk_rfc3686_ctr_init,
687 .cra_exit = ablk_exit,
688 .cra_u = {
689 .ablkcipher = {
690 .min_keysize = AES_MIN_KEY_SIZE+CTR_RFC3686_NONCE_SIZE,
691 .max_keysize = AES_MAX_KEY_SIZE+CTR_RFC3686_NONCE_SIZE,
692 .ivsize = CTR_RFC3686_IV_SIZE,
693 .setkey = ablk_set_key,
694 .encrypt = ablk_encrypt,
695 .decrypt = ablk_decrypt,
696 .geniv = "seqiv",
697 },
698 },
699};
2cf4ac8b
HY
700#endif
701
702#ifdef HAS_LRW
703static int ablk_lrw_init(struct crypto_tfm *tfm)
704{
705 struct cryptd_ablkcipher *cryptd_tfm;
706
707 cryptd_tfm = cryptd_alloc_ablkcipher("fpu(lrw(__driver-aes-aesni))",
708 0, 0);
709 if (IS_ERR(cryptd_tfm))
710 return PTR_ERR(cryptd_tfm);
711 ablk_init_common(tfm, cryptd_tfm);
712 return 0;
713}
714
715static struct crypto_alg ablk_lrw_alg = {
716 .cra_name = "lrw(aes)",
717 .cra_driver_name = "lrw-aes-aesni",
718 .cra_priority = 400,
719 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER|CRYPTO_ALG_ASYNC,
720 .cra_blocksize = AES_BLOCK_SIZE,
721 .cra_ctxsize = sizeof(struct async_aes_ctx),
722 .cra_alignmask = 0,
723 .cra_type = &crypto_ablkcipher_type,
724 .cra_module = THIS_MODULE,
725 .cra_list = LIST_HEAD_INIT(ablk_lrw_alg.cra_list),
726 .cra_init = ablk_lrw_init,
727 .cra_exit = ablk_exit,
728 .cra_u = {
729 .ablkcipher = {
730 .min_keysize = AES_MIN_KEY_SIZE + AES_BLOCK_SIZE,
731 .max_keysize = AES_MAX_KEY_SIZE + AES_BLOCK_SIZE,
732 .ivsize = AES_BLOCK_SIZE,
733 .setkey = ablk_set_key,
734 .encrypt = ablk_encrypt,
735 .decrypt = ablk_decrypt,
736 },
737 },
738};
739#endif
740
741#ifdef HAS_PCBC
742static int ablk_pcbc_init(struct crypto_tfm *tfm)
743{
744 struct cryptd_ablkcipher *cryptd_tfm;
745
746 cryptd_tfm = cryptd_alloc_ablkcipher("fpu(pcbc(__driver-aes-aesni))",
747 0, 0);
748 if (IS_ERR(cryptd_tfm))
749 return PTR_ERR(cryptd_tfm);
750 ablk_init_common(tfm, cryptd_tfm);
751 return 0;
752}
753
754static struct crypto_alg ablk_pcbc_alg = {
755 .cra_name = "pcbc(aes)",
756 .cra_driver_name = "pcbc-aes-aesni",
757 .cra_priority = 400,
758 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER|CRYPTO_ALG_ASYNC,
759 .cra_blocksize = AES_BLOCK_SIZE,
760 .cra_ctxsize = sizeof(struct async_aes_ctx),
761 .cra_alignmask = 0,
762 .cra_type = &crypto_ablkcipher_type,
763 .cra_module = THIS_MODULE,
764 .cra_list = LIST_HEAD_INIT(ablk_pcbc_alg.cra_list),
765 .cra_init = ablk_pcbc_init,
766 .cra_exit = ablk_exit,
767 .cra_u = {
768 .ablkcipher = {
769 .min_keysize = AES_MIN_KEY_SIZE,
770 .max_keysize = AES_MAX_KEY_SIZE,
771 .ivsize = AES_BLOCK_SIZE,
772 .setkey = ablk_set_key,
773 .encrypt = ablk_encrypt,
774 .decrypt = ablk_decrypt,
775 },
776 },
777};
778#endif
779
780#ifdef HAS_XTS
781static int ablk_xts_init(struct crypto_tfm *tfm)
782{
783 struct cryptd_ablkcipher *cryptd_tfm;
784
785 cryptd_tfm = cryptd_alloc_ablkcipher("fpu(xts(__driver-aes-aesni))",
786 0, 0);
787 if (IS_ERR(cryptd_tfm))
788 return PTR_ERR(cryptd_tfm);
789 ablk_init_common(tfm, cryptd_tfm);
790 return 0;
791}
792
793static struct crypto_alg ablk_xts_alg = {
794 .cra_name = "xts(aes)",
795 .cra_driver_name = "xts-aes-aesni",
796 .cra_priority = 400,
797 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER|CRYPTO_ALG_ASYNC,
798 .cra_blocksize = AES_BLOCK_SIZE,
799 .cra_ctxsize = sizeof(struct async_aes_ctx),
800 .cra_alignmask = 0,
801 .cra_type = &crypto_ablkcipher_type,
802 .cra_module = THIS_MODULE,
803 .cra_list = LIST_HEAD_INIT(ablk_xts_alg.cra_list),
804 .cra_init = ablk_xts_init,
805 .cra_exit = ablk_exit,
806 .cra_u = {
807 .ablkcipher = {
808 .min_keysize = 2 * AES_MIN_KEY_SIZE,
809 .max_keysize = 2 * AES_MAX_KEY_SIZE,
810 .ivsize = AES_BLOCK_SIZE,
811 .setkey = ablk_set_key,
812 .encrypt = ablk_encrypt,
813 .decrypt = ablk_decrypt,
814 },
815 },
816};
817#endif
818
0bd82f5f
TS
819static int rfc4106_init(struct crypto_tfm *tfm)
820{
821 struct cryptd_aead *cryptd_tfm;
822 struct aesni_rfc4106_gcm_ctx *ctx = (struct aesni_rfc4106_gcm_ctx *)
823 PTR_ALIGN((u8 *)crypto_tfm_ctx(tfm), AESNI_ALIGN);
824 cryptd_tfm = cryptd_alloc_aead("__driver-gcm-aes-aesni", 0, 0);
825 if (IS_ERR(cryptd_tfm))
826 return PTR_ERR(cryptd_tfm);
827 ctx->cryptd_tfm = cryptd_tfm;
828 tfm->crt_aead.reqsize = sizeof(struct aead_request)
829 + crypto_aead_reqsize(&cryptd_tfm->base);
830 return 0;
831}
832
833static void rfc4106_exit(struct crypto_tfm *tfm)
834{
835 struct aesni_rfc4106_gcm_ctx *ctx =
836 (struct aesni_rfc4106_gcm_ctx *)
837 PTR_ALIGN((u8 *)crypto_tfm_ctx(tfm), AESNI_ALIGN);
838 if (!IS_ERR(ctx->cryptd_tfm))
839 cryptd_free_aead(ctx->cryptd_tfm);
840 return;
841}
842
843static void
844rfc4106_set_hash_subkey_done(struct crypto_async_request *req, int err)
845{
846 struct aesni_gcm_set_hash_subkey_result *result = req->data;
847
848 if (err == -EINPROGRESS)
849 return;
850 result->err = err;
851 complete(&result->completion);
852}
853
854static int
855rfc4106_set_hash_subkey(u8 *hash_subkey, const u8 *key, unsigned int key_len)
856{
857 struct crypto_ablkcipher *ctr_tfm;
858 struct ablkcipher_request *req;
859 int ret = -EINVAL;
860 struct aesni_hash_subkey_req_data *req_data;
861
862 ctr_tfm = crypto_alloc_ablkcipher("ctr(aes)", 0, 0);
863 if (IS_ERR(ctr_tfm))
864 return PTR_ERR(ctr_tfm);
865
866 crypto_ablkcipher_clear_flags(ctr_tfm, ~0);
867
868 ret = crypto_ablkcipher_setkey(ctr_tfm, key, key_len);
869 if (ret) {
870 crypto_free_ablkcipher(ctr_tfm);
871 return ret;
872 }
873
874 req = ablkcipher_request_alloc(ctr_tfm, GFP_KERNEL);
875 if (!req) {
876 crypto_free_ablkcipher(ctr_tfm);
877 return -EINVAL;
878 }
879
880 req_data = kmalloc(sizeof(*req_data), GFP_KERNEL);
881 if (!req_data) {
882 crypto_free_ablkcipher(ctr_tfm);
883 return -ENOMEM;
884 }
885 memset(req_data->iv, 0, sizeof(req_data->iv));
886
887 /* Clear the data in the hash sub key container to zero.*/
888 /* We want to cipher all zeros to create the hash sub key. */
889 memset(hash_subkey, 0, RFC4106_HASH_SUBKEY_SIZE);
890
891 init_completion(&req_data->result.completion);
892 sg_init_one(&req_data->sg, hash_subkey, RFC4106_HASH_SUBKEY_SIZE);
893 ablkcipher_request_set_tfm(req, ctr_tfm);
894 ablkcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_SLEEP |
895 CRYPTO_TFM_REQ_MAY_BACKLOG,
896 rfc4106_set_hash_subkey_done,
897 &req_data->result);
898
899 ablkcipher_request_set_crypt(req, &req_data->sg,
900 &req_data->sg, RFC4106_HASH_SUBKEY_SIZE, req_data->iv);
901
902 ret = crypto_ablkcipher_encrypt(req);
903 if (ret == -EINPROGRESS || ret == -EBUSY) {
904 ret = wait_for_completion_interruptible
905 (&req_data->result.completion);
906 if (!ret)
907 ret = req_data->result.err;
908 }
909 ablkcipher_request_free(req);
910 kfree(req_data);
911 crypto_free_ablkcipher(ctr_tfm);
912 return ret;
913}
914
915static int rfc4106_set_key(struct crypto_aead *parent, const u8 *key,
916 unsigned int key_len)
917{
918 int ret = 0;
919 struct crypto_tfm *tfm = crypto_aead_tfm(parent);
920 struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(parent);
921 u8 *new_key_mem = NULL;
922
923 if (key_len < 4) {
924 crypto_tfm_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
925 return -EINVAL;
926 }
927 /*Account for 4 byte nonce at the end.*/
928 key_len -= 4;
929 if (key_len != AES_KEYSIZE_128) {
930 crypto_tfm_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
931 return -EINVAL;
932 }
933
934 memcpy(ctx->nonce, key + key_len, sizeof(ctx->nonce));
935 /*This must be on a 16 byte boundary!*/
936 if ((unsigned long)(&(ctx->aes_key_expanded.key_enc[0])) % AESNI_ALIGN)
937 return -EINVAL;
938
939 if ((unsigned long)key % AESNI_ALIGN) {
940 /*key is not aligned: use an auxuliar aligned pointer*/
941 new_key_mem = kmalloc(key_len+AESNI_ALIGN, GFP_KERNEL);
942 if (!new_key_mem)
943 return -ENOMEM;
944
945 new_key_mem = PTR_ALIGN(new_key_mem, AESNI_ALIGN);
946 memcpy(new_key_mem, key, key_len);
947 key = new_key_mem;
948 }
949
950 if (!irq_fpu_usable())
951 ret = crypto_aes_expand_key(&(ctx->aes_key_expanded),
952 key, key_len);
953 else {
954 kernel_fpu_begin();
955 ret = aesni_set_key(&(ctx->aes_key_expanded), key, key_len);
956 kernel_fpu_end();
957 }
958 /*This must be on a 16 byte boundary!*/
959 if ((unsigned long)(&(ctx->hash_subkey[0])) % AESNI_ALIGN) {
960 ret = -EINVAL;
961 goto exit;
962 }
963 ret = rfc4106_set_hash_subkey(ctx->hash_subkey, key, key_len);
964exit:
965 kfree(new_key_mem);
966 return ret;
967}
968
969/* This is the Integrity Check Value (aka the authentication tag length and can
970 * be 8, 12 or 16 bytes long. */
971static int rfc4106_set_authsize(struct crypto_aead *parent,
972 unsigned int authsize)
973{
974 struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(parent);
975 struct crypto_aead *cryptd_child = cryptd_aead_child(ctx->cryptd_tfm);
976
977 switch (authsize) {
978 case 8:
979 case 12:
980 case 16:
981 break;
982 default:
983 return -EINVAL;
984 }
985 crypto_aead_crt(parent)->authsize = authsize;
986 crypto_aead_crt(cryptd_child)->authsize = authsize;
987 return 0;
988}
989
990static int rfc4106_encrypt(struct aead_request *req)
991{
992 int ret;
993 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
994 struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(tfm);
995 struct crypto_aead *cryptd_child = cryptd_aead_child(ctx->cryptd_tfm);
996
997 if (!irq_fpu_usable()) {
998 struct aead_request *cryptd_req =
999 (struct aead_request *) aead_request_ctx(req);
1000 memcpy(cryptd_req, req, sizeof(*req));
1001 aead_request_set_tfm(cryptd_req, &ctx->cryptd_tfm->base);
1002 return crypto_aead_encrypt(cryptd_req);
1003 } else {
1004 kernel_fpu_begin();
1005 ret = cryptd_child->base.crt_aead.encrypt(req);
1006 kernel_fpu_end();
1007 return ret;
1008 }
1009}
1010
1011static int rfc4106_decrypt(struct aead_request *req)
1012{
1013 int ret;
1014 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1015 struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(tfm);
1016 struct crypto_aead *cryptd_child = cryptd_aead_child(ctx->cryptd_tfm);
1017
1018 if (!irq_fpu_usable()) {
1019 struct aead_request *cryptd_req =
1020 (struct aead_request *) aead_request_ctx(req);
1021 memcpy(cryptd_req, req, sizeof(*req));
1022 aead_request_set_tfm(cryptd_req, &ctx->cryptd_tfm->base);
1023 return crypto_aead_decrypt(cryptd_req);
1024 } else {
1025 kernel_fpu_begin();
1026 ret = cryptd_child->base.crt_aead.decrypt(req);
1027 kernel_fpu_end();
1028 return ret;
1029 }
1030}
1031
1032static struct crypto_alg rfc4106_alg = {
1033 .cra_name = "rfc4106(gcm(aes))",
1034 .cra_driver_name = "rfc4106-gcm-aesni",
1035 .cra_priority = 400,
1036 .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
1037 .cra_blocksize = 1,
1038 .cra_ctxsize = sizeof(struct aesni_rfc4106_gcm_ctx) + AESNI_ALIGN,
1039 .cra_alignmask = 0,
1040 .cra_type = &crypto_nivaead_type,
1041 .cra_module = THIS_MODULE,
1042 .cra_list = LIST_HEAD_INIT(rfc4106_alg.cra_list),
1043 .cra_init = rfc4106_init,
1044 .cra_exit = rfc4106_exit,
1045 .cra_u = {
1046 .aead = {
1047 .setkey = rfc4106_set_key,
1048 .setauthsize = rfc4106_set_authsize,
1049 .encrypt = rfc4106_encrypt,
1050 .decrypt = rfc4106_decrypt,
1051 .geniv = "seqiv",
1052 .ivsize = 8,
1053 .maxauthsize = 16,
1054 },
1055 },
1056};
1057
1058static int __driver_rfc4106_encrypt(struct aead_request *req)
1059{
1060 u8 one_entry_in_sg = 0;
1061 u8 *src, *dst, *assoc;
1062 __be32 counter = cpu_to_be32(1);
1063 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1064 struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(tfm);
1065 void *aes_ctx = &(ctx->aes_key_expanded);
1066 unsigned long auth_tag_len = crypto_aead_authsize(tfm);
1067 u8 iv_tab[16+AESNI_ALIGN];
1068 u8* iv = (u8 *) PTR_ALIGN((u8 *)iv_tab, AESNI_ALIGN);
1069 struct scatter_walk src_sg_walk;
1070 struct scatter_walk assoc_sg_walk;
1071 struct scatter_walk dst_sg_walk;
1072 unsigned int i;
1073
1074 /* Assuming we are supporting rfc4106 64-bit extended */
1075 /* sequence numbers We need to have the AAD length equal */
1076 /* to 8 or 12 bytes */
1077 if (unlikely(req->assoclen != 8 && req->assoclen != 12))
1078 return -EINVAL;
1079 /* IV below built */
1080 for (i = 0; i < 4; i++)
1081 *(iv+i) = ctx->nonce[i];
1082 for (i = 0; i < 8; i++)
1083 *(iv+4+i) = req->iv[i];
1084 *((__be32 *)(iv+12)) = counter;
1085
1086 if ((sg_is_last(req->src)) && (sg_is_last(req->assoc))) {
1087 one_entry_in_sg = 1;
1088 scatterwalk_start(&src_sg_walk, req->src);
1089 scatterwalk_start(&assoc_sg_walk, req->assoc);
1090 src = scatterwalk_map(&src_sg_walk, 0);
1091 assoc = scatterwalk_map(&assoc_sg_walk, 0);
1092 dst = src;
1093 if (unlikely(req->src != req->dst)) {
1094 scatterwalk_start(&dst_sg_walk, req->dst);
1095 dst = scatterwalk_map(&dst_sg_walk, 0);
1096 }
1097
1098 } else {
1099 /* Allocate memory for src, dst, assoc */
1100 src = kmalloc(req->cryptlen + auth_tag_len + req->assoclen,
1101 GFP_ATOMIC);
1102 if (unlikely(!src))
1103 return -ENOMEM;
1104 assoc = (src + req->cryptlen + auth_tag_len);
1105 scatterwalk_map_and_copy(src, req->src, 0, req->cryptlen, 0);
1106 scatterwalk_map_and_copy(assoc, req->assoc, 0,
1107 req->assoclen, 0);
1108 dst = src;
1109 }
1110
1111 aesni_gcm_enc(aes_ctx, dst, src, (unsigned long)req->cryptlen, iv,
1112 ctx->hash_subkey, assoc, (unsigned long)req->assoclen, dst
1113 + ((unsigned long)req->cryptlen), auth_tag_len);
1114
1115 /* The authTag (aka the Integrity Check Value) needs to be written
1116 * back to the packet. */
1117 if (one_entry_in_sg) {
1118 if (unlikely(req->src != req->dst)) {
1119 scatterwalk_unmap(dst, 0);
1120 scatterwalk_done(&dst_sg_walk, 0, 0);
1121 }
1122 scatterwalk_unmap(src, 0);
1123 scatterwalk_unmap(assoc, 0);
1124 scatterwalk_done(&src_sg_walk, 0, 0);
1125 scatterwalk_done(&assoc_sg_walk, 0, 0);
1126 } else {
1127 scatterwalk_map_and_copy(dst, req->dst, 0,
1128 req->cryptlen + auth_tag_len, 1);
1129 kfree(src);
1130 }
1131 return 0;
1132}
1133
1134static int __driver_rfc4106_decrypt(struct aead_request *req)
1135{
1136 u8 one_entry_in_sg = 0;
1137 u8 *src, *dst, *assoc;
1138 unsigned long tempCipherLen = 0;
1139 __be32 counter = cpu_to_be32(1);
1140 int retval = 0;
1141 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1142 struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(tfm);
1143 void *aes_ctx = &(ctx->aes_key_expanded);
1144 unsigned long auth_tag_len = crypto_aead_authsize(tfm);
1145 u8 iv_and_authTag[32+AESNI_ALIGN];
1146 u8 *iv = (u8 *) PTR_ALIGN((u8 *)iv_and_authTag, AESNI_ALIGN);
1147 u8 *authTag = iv + 16;
1148 struct scatter_walk src_sg_walk;
1149 struct scatter_walk assoc_sg_walk;
1150 struct scatter_walk dst_sg_walk;
1151 unsigned int i;
1152
1153 if (unlikely((req->cryptlen < auth_tag_len) ||
1154 (req->assoclen != 8 && req->assoclen != 12)))
1155 return -EINVAL;
1156 /* Assuming we are supporting rfc4106 64-bit extended */
1157 /* sequence numbers We need to have the AAD length */
1158 /* equal to 8 or 12 bytes */
1159
1160 tempCipherLen = (unsigned long)(req->cryptlen - auth_tag_len);
1161 /* IV below built */
1162 for (i = 0; i < 4; i++)
1163 *(iv+i) = ctx->nonce[i];
1164 for (i = 0; i < 8; i++)
1165 *(iv+4+i) = req->iv[i];
1166 *((__be32 *)(iv+12)) = counter;
1167
1168 if ((sg_is_last(req->src)) && (sg_is_last(req->assoc))) {
1169 one_entry_in_sg = 1;
1170 scatterwalk_start(&src_sg_walk, req->src);
1171 scatterwalk_start(&assoc_sg_walk, req->assoc);
1172 src = scatterwalk_map(&src_sg_walk, 0);
1173 assoc = scatterwalk_map(&assoc_sg_walk, 0);
1174 dst = src;
1175 if (unlikely(req->src != req->dst)) {
1176 scatterwalk_start(&dst_sg_walk, req->dst);
1177 dst = scatterwalk_map(&dst_sg_walk, 0);
1178 }
1179
1180 } else {
1181 /* Allocate memory for src, dst, assoc */
1182 src = kmalloc(req->cryptlen + req->assoclen, GFP_ATOMIC);
1183 if (!src)
1184 return -ENOMEM;
1185 assoc = (src + req->cryptlen + auth_tag_len);
1186 scatterwalk_map_and_copy(src, req->src, 0, req->cryptlen, 0);
1187 scatterwalk_map_and_copy(assoc, req->assoc, 0,
1188 req->assoclen, 0);
1189 dst = src;
1190 }
1191
1192 aesni_gcm_dec(aes_ctx, dst, src, tempCipherLen, iv,
1193 ctx->hash_subkey, assoc, (unsigned long)req->assoclen,
1194 authTag, auth_tag_len);
1195
1196 /* Compare generated tag with passed in tag. */
1197 retval = memcmp(src + tempCipherLen, authTag, auth_tag_len) ?
1198 -EBADMSG : 0;
1199
1200 if (one_entry_in_sg) {
1201 if (unlikely(req->src != req->dst)) {
1202 scatterwalk_unmap(dst, 0);
1203 scatterwalk_done(&dst_sg_walk, 0, 0);
1204 }
1205 scatterwalk_unmap(src, 0);
1206 scatterwalk_unmap(assoc, 0);
1207 scatterwalk_done(&src_sg_walk, 0, 0);
1208 scatterwalk_done(&assoc_sg_walk, 0, 0);
1209 } else {
1210 scatterwalk_map_and_copy(dst, req->dst, 0, req->cryptlen, 1);
1211 kfree(src);
1212 }
1213 return retval;
1214}
1215
1216static struct crypto_alg __rfc4106_alg = {
1217 .cra_name = "__gcm-aes-aesni",
1218 .cra_driver_name = "__driver-gcm-aes-aesni",
1219 .cra_priority = 0,
1220 .cra_flags = CRYPTO_ALG_TYPE_AEAD,
1221 .cra_blocksize = 1,
1222 .cra_ctxsize = sizeof(struct aesni_rfc4106_gcm_ctx) + AESNI_ALIGN,
1223 .cra_alignmask = 0,
1224 .cra_type = &crypto_aead_type,
1225 .cra_module = THIS_MODULE,
1226 .cra_list = LIST_HEAD_INIT(__rfc4106_alg.cra_list),
1227 .cra_u = {
1228 .aead = {
1229 .encrypt = __driver_rfc4106_encrypt,
1230 .decrypt = __driver_rfc4106_decrypt,
1231 },
1232 },
1233};
1234
54b6a1bd
HY
1235static int __init aesni_init(void)
1236{
1237 int err;
1238
1239 if (!cpu_has_aes) {
c9944881 1240 printk(KERN_INFO "Intel AES-NI instructions are not detected.\n");
54b6a1bd
HY
1241 return -ENODEV;
1242 }
0bd82f5f 1243
54b6a1bd
HY
1244 if ((err = crypto_register_alg(&aesni_alg)))
1245 goto aes_err;
2cf4ac8b
HY
1246 if ((err = crypto_register_alg(&__aesni_alg)))
1247 goto __aes_err;
54b6a1bd
HY
1248 if ((err = crypto_register_alg(&blk_ecb_alg)))
1249 goto blk_ecb_err;
1250 if ((err = crypto_register_alg(&blk_cbc_alg)))
1251 goto blk_cbc_err;
12387a46
HY
1252 if ((err = crypto_register_alg(&blk_ctr_alg)))
1253 goto blk_ctr_err;
54b6a1bd
HY
1254 if ((err = crypto_register_alg(&ablk_ecb_alg)))
1255 goto ablk_ecb_err;
1256 if ((err = crypto_register_alg(&ablk_cbc_alg)))
1257 goto ablk_cbc_err;
2cf4ac8b
HY
1258 if ((err = crypto_register_alg(&ablk_ctr_alg)))
1259 goto ablk_ctr_err;
12387a46
HY
1260#ifdef HAS_CTR
1261 if ((err = crypto_register_alg(&ablk_rfc3686_ctr_alg)))
1262 goto ablk_rfc3686_ctr_err;
2cf4ac8b
HY
1263#endif
1264#ifdef HAS_LRW
1265 if ((err = crypto_register_alg(&ablk_lrw_alg)))
1266 goto ablk_lrw_err;
1267#endif
1268#ifdef HAS_PCBC
1269 if ((err = crypto_register_alg(&ablk_pcbc_alg)))
1270 goto ablk_pcbc_err;
1271#endif
1272#ifdef HAS_XTS
1273 if ((err = crypto_register_alg(&ablk_xts_alg)))
1274 goto ablk_xts_err;
1275#endif
0bd82f5f
TS
1276 err = crypto_register_alg(&__rfc4106_alg);
1277 if (err)
1278 goto __aead_gcm_err;
1279 err = crypto_register_alg(&rfc4106_alg);
1280 if (err)
1281 goto aead_gcm_err;
54b6a1bd
HY
1282 return err;
1283
0bd82f5f
TS
1284aead_gcm_err:
1285 crypto_unregister_alg(&__rfc4106_alg);
1286__aead_gcm_err:
2cf4ac8b 1287#ifdef HAS_XTS
0bd82f5f 1288 crypto_unregister_alg(&ablk_xts_alg);
2cf4ac8b
HY
1289ablk_xts_err:
1290#endif
1291#ifdef HAS_PCBC
1292 crypto_unregister_alg(&ablk_pcbc_alg);
1293ablk_pcbc_err:
1294#endif
1295#ifdef HAS_LRW
1296 crypto_unregister_alg(&ablk_lrw_alg);
1297ablk_lrw_err:
1298#endif
1299#ifdef HAS_CTR
12387a46
HY
1300 crypto_unregister_alg(&ablk_rfc3686_ctr_alg);
1301ablk_rfc3686_ctr_err:
1302#endif
2cf4ac8b
HY
1303 crypto_unregister_alg(&ablk_ctr_alg);
1304ablk_ctr_err:
2cf4ac8b 1305 crypto_unregister_alg(&ablk_cbc_alg);
54b6a1bd
HY
1306ablk_cbc_err:
1307 crypto_unregister_alg(&ablk_ecb_alg);
1308ablk_ecb_err:
12387a46
HY
1309 crypto_unregister_alg(&blk_ctr_alg);
1310blk_ctr_err:
54b6a1bd
HY
1311 crypto_unregister_alg(&blk_cbc_alg);
1312blk_cbc_err:
1313 crypto_unregister_alg(&blk_ecb_alg);
1314blk_ecb_err:
2cf4ac8b
HY
1315 crypto_unregister_alg(&__aesni_alg);
1316__aes_err:
54b6a1bd
HY
1317 crypto_unregister_alg(&aesni_alg);
1318aes_err:
1319 return err;
1320}
1321
1322static void __exit aesni_exit(void)
1323{
0bd82f5f
TS
1324 crypto_unregister_alg(&__rfc4106_alg);
1325 crypto_unregister_alg(&rfc4106_alg);
2cf4ac8b
HY
1326#ifdef HAS_XTS
1327 crypto_unregister_alg(&ablk_xts_alg);
1328#endif
1329#ifdef HAS_PCBC
1330 crypto_unregister_alg(&ablk_pcbc_alg);
1331#endif
1332#ifdef HAS_LRW
1333 crypto_unregister_alg(&ablk_lrw_alg);
1334#endif
1335#ifdef HAS_CTR
12387a46 1336 crypto_unregister_alg(&ablk_rfc3686_ctr_alg);
2cf4ac8b 1337#endif
12387a46 1338 crypto_unregister_alg(&ablk_ctr_alg);
54b6a1bd
HY
1339 crypto_unregister_alg(&ablk_cbc_alg);
1340 crypto_unregister_alg(&ablk_ecb_alg);
12387a46 1341 crypto_unregister_alg(&blk_ctr_alg);
54b6a1bd
HY
1342 crypto_unregister_alg(&blk_cbc_alg);
1343 crypto_unregister_alg(&blk_ecb_alg);
2cf4ac8b 1344 crypto_unregister_alg(&__aesni_alg);
54b6a1bd
HY
1345 crypto_unregister_alg(&aesni_alg);
1346}
1347
1348module_init(aesni_init);
1349module_exit(aesni_exit);
1350
1351MODULE_DESCRIPTION("Rijndael (AES) Cipher Algorithm, Intel AES-NI instructions optimized");
1352MODULE_LICENSE("GPL");
1353MODULE_ALIAS("aes");
This page took 0.160307 seconds and 5 git commands to generate.