crypto: omap-sham - don't treat NULL clk as an error
[deliverable/linux.git] / arch / x86 / crypto / aesni-intel_glue.c
CommitLineData
54b6a1bd
HY
1/*
2 * Support for Intel AES-NI instructions. This file contains glue
3 * code, the real AES implementation is in intel-aes_asm.S.
4 *
5 * Copyright (C) 2008, Intel Corp.
6 * Author: Huang Ying <ying.huang@intel.com>
7 *
0bd82f5f
TS
8 * Added RFC4106 AES-GCM support for 128-bit keys under the AEAD
9 * interface for 64-bit kernels.
10 * Authors: Adrian Hoban <adrian.hoban@intel.com>
11 * Gabriele Paoloni <gabriele.paoloni@intel.com>
12 * Tadeusz Struk (tadeusz.struk@intel.com)
13 * Aidan O'Mahony (aidan.o.mahony@intel.com)
14 * Copyright (c) 2010, Intel Corporation.
15 *
54b6a1bd
HY
16 * This program is free software; you can redistribute it and/or modify
17 * it under the terms of the GNU General Public License as published by
18 * the Free Software Foundation; either version 2 of the License, or
19 * (at your option) any later version.
20 */
21
22#include <linux/hardirq.h>
23#include <linux/types.h>
24#include <linux/crypto.h>
25#include <linux/err.h>
26#include <crypto/algapi.h>
27#include <crypto/aes.h>
28#include <crypto/cryptd.h>
12387a46 29#include <crypto/ctr.h>
54b6a1bd
HY
30#include <asm/i387.h>
31#include <asm/aes.h>
0bd82f5f
TS
32#include <crypto/scatterwalk.h>
33#include <crypto/internal/aead.h>
34#include <linux/workqueue.h>
35#include <linux/spinlock.h>
54b6a1bd 36
2cf4ac8b
HY
37#if defined(CONFIG_CRYPTO_CTR) || defined(CONFIG_CRYPTO_CTR_MODULE)
38#define HAS_CTR
39#endif
40
41#if defined(CONFIG_CRYPTO_LRW) || defined(CONFIG_CRYPTO_LRW_MODULE)
42#define HAS_LRW
43#endif
44
45#if defined(CONFIG_CRYPTO_PCBC) || defined(CONFIG_CRYPTO_PCBC_MODULE)
46#define HAS_PCBC
47#endif
48
49#if defined(CONFIG_CRYPTO_XTS) || defined(CONFIG_CRYPTO_XTS_MODULE)
50#define HAS_XTS
51#endif
52
54b6a1bd
HY
53struct async_aes_ctx {
54 struct cryptd_ablkcipher *cryptd_tfm;
55};
56
0bd82f5f
TS
57/* This data is stored at the end of the crypto_tfm struct.
58 * It's a type of per "session" data storage location.
59 * This needs to be 16 byte aligned.
60 */
61struct aesni_rfc4106_gcm_ctx {
62 u8 hash_subkey[16];
63 struct crypto_aes_ctx aes_key_expanded;
64 u8 nonce[4];
65 struct cryptd_aead *cryptd_tfm;
66};
67
68struct aesni_gcm_set_hash_subkey_result {
69 int err;
70 struct completion completion;
71};
72
73struct aesni_hash_subkey_req_data {
74 u8 iv[16];
75 struct aesni_gcm_set_hash_subkey_result result;
76 struct scatterlist sg;
77};
78
79#define AESNI_ALIGN (16)
54b6a1bd 80#define AES_BLOCK_MASK (~(AES_BLOCK_SIZE-1))
0bd82f5f 81#define RFC4106_HASH_SUBKEY_SIZE 16
54b6a1bd
HY
82
83asmlinkage int aesni_set_key(struct crypto_aes_ctx *ctx, const u8 *in_key,
84 unsigned int key_len);
85asmlinkage void aesni_enc(struct crypto_aes_ctx *ctx, u8 *out,
86 const u8 *in);
87asmlinkage void aesni_dec(struct crypto_aes_ctx *ctx, u8 *out,
88 const u8 *in);
89asmlinkage void aesni_ecb_enc(struct crypto_aes_ctx *ctx, u8 *out,
90 const u8 *in, unsigned int len);
91asmlinkage void aesni_ecb_dec(struct crypto_aes_ctx *ctx, u8 *out,
92 const u8 *in, unsigned int len);
93asmlinkage void aesni_cbc_enc(struct crypto_aes_ctx *ctx, u8 *out,
94 const u8 *in, unsigned int len, u8 *iv);
95asmlinkage void aesni_cbc_dec(struct crypto_aes_ctx *ctx, u8 *out,
96 const u8 *in, unsigned int len, u8 *iv);
0d258efb 97#ifdef CONFIG_X86_64
12387a46
HY
98asmlinkage void aesni_ctr_enc(struct crypto_aes_ctx *ctx, u8 *out,
99 const u8 *in, unsigned int len, u8 *iv);
54b6a1bd 100
0bd82f5f
TS
101/* asmlinkage void aesni_gcm_enc()
102 * void *ctx, AES Key schedule. Starts on a 16 byte boundary.
103 * u8 *out, Ciphertext output. Encrypt in-place is allowed.
104 * const u8 *in, Plaintext input
105 * unsigned long plaintext_len, Length of data in bytes for encryption.
106 * u8 *iv, Pre-counter block j0: 4 byte salt (from Security Association)
107 * concatenated with 8 byte Initialisation Vector (from IPSec ESP
108 * Payload) concatenated with 0x00000001. 16-byte aligned pointer.
109 * u8 *hash_subkey, the Hash sub key input. Data starts on a 16-byte boundary.
110 * const u8 *aad, Additional Authentication Data (AAD)
111 * unsigned long aad_len, Length of AAD in bytes. With RFC4106 this
112 * is going to be 8 or 12 bytes
113 * u8 *auth_tag, Authenticated Tag output.
114 * unsigned long auth_tag_len), Authenticated Tag Length in bytes.
115 * Valid values are 16 (most likely), 12 or 8.
116 */
117asmlinkage void aesni_gcm_enc(void *ctx, u8 *out,
118 const u8 *in, unsigned long plaintext_len, u8 *iv,
119 u8 *hash_subkey, const u8 *aad, unsigned long aad_len,
120 u8 *auth_tag, unsigned long auth_tag_len);
121
122/* asmlinkage void aesni_gcm_dec()
123 * void *ctx, AES Key schedule. Starts on a 16 byte boundary.
124 * u8 *out, Plaintext output. Decrypt in-place is allowed.
125 * const u8 *in, Ciphertext input
126 * unsigned long ciphertext_len, Length of data in bytes for decryption.
127 * u8 *iv, Pre-counter block j0: 4 byte salt (from Security Association)
128 * concatenated with 8 byte Initialisation Vector (from IPSec ESP
129 * Payload) concatenated with 0x00000001. 16-byte aligned pointer.
130 * u8 *hash_subkey, the Hash sub key input. Data starts on a 16-byte boundary.
131 * const u8 *aad, Additional Authentication Data (AAD)
132 * unsigned long aad_len, Length of AAD in bytes. With RFC4106 this is going
133 * to be 8 or 12 bytes
134 * u8 *auth_tag, Authenticated Tag output.
135 * unsigned long auth_tag_len) Authenticated Tag Length in bytes.
136 * Valid values are 16 (most likely), 12 or 8.
137 */
138asmlinkage void aesni_gcm_dec(void *ctx, u8 *out,
139 const u8 *in, unsigned long ciphertext_len, u8 *iv,
140 u8 *hash_subkey, const u8 *aad, unsigned long aad_len,
141 u8 *auth_tag, unsigned long auth_tag_len);
142
143static inline struct
144aesni_rfc4106_gcm_ctx *aesni_rfc4106_gcm_ctx_get(struct crypto_aead *tfm)
145{
146 return
147 (struct aesni_rfc4106_gcm_ctx *)
148 PTR_ALIGN((u8 *)
149 crypto_tfm_ctx(crypto_aead_tfm(tfm)), AESNI_ALIGN);
150}
559ad0ff 151#endif
0bd82f5f 152
54b6a1bd
HY
153static inline struct crypto_aes_ctx *aes_ctx(void *raw_ctx)
154{
155 unsigned long addr = (unsigned long)raw_ctx;
156 unsigned long align = AESNI_ALIGN;
157
158 if (align <= crypto_tfm_ctx_alignment())
159 align = 1;
160 return (struct crypto_aes_ctx *)ALIGN(addr, align);
161}
162
163static int aes_set_key_common(struct crypto_tfm *tfm, void *raw_ctx,
164 const u8 *in_key, unsigned int key_len)
165{
166 struct crypto_aes_ctx *ctx = aes_ctx(raw_ctx);
167 u32 *flags = &tfm->crt_flags;
168 int err;
169
170 if (key_len != AES_KEYSIZE_128 && key_len != AES_KEYSIZE_192 &&
171 key_len != AES_KEYSIZE_256) {
172 *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
173 return -EINVAL;
174 }
175
13b79b97 176 if (!irq_fpu_usable())
54b6a1bd
HY
177 err = crypto_aes_expand_key(ctx, in_key, key_len);
178 else {
179 kernel_fpu_begin();
180 err = aesni_set_key(ctx, in_key, key_len);
181 kernel_fpu_end();
182 }
183
184 return err;
185}
186
187static int aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
188 unsigned int key_len)
189{
190 return aes_set_key_common(tfm, crypto_tfm_ctx(tfm), in_key, key_len);
191}
192
193static void aes_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
194{
195 struct crypto_aes_ctx *ctx = aes_ctx(crypto_tfm_ctx(tfm));
196
13b79b97 197 if (!irq_fpu_usable())
54b6a1bd
HY
198 crypto_aes_encrypt_x86(ctx, dst, src);
199 else {
200 kernel_fpu_begin();
201 aesni_enc(ctx, dst, src);
202 kernel_fpu_end();
203 }
204}
205
206static void aes_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
207{
208 struct crypto_aes_ctx *ctx = aes_ctx(crypto_tfm_ctx(tfm));
209
13b79b97 210 if (!irq_fpu_usable())
54b6a1bd
HY
211 crypto_aes_decrypt_x86(ctx, dst, src);
212 else {
213 kernel_fpu_begin();
214 aesni_dec(ctx, dst, src);
215 kernel_fpu_end();
216 }
217}
218
219static struct crypto_alg aesni_alg = {
220 .cra_name = "aes",
221 .cra_driver_name = "aes-aesni",
222 .cra_priority = 300,
223 .cra_flags = CRYPTO_ALG_TYPE_CIPHER,
224 .cra_blocksize = AES_BLOCK_SIZE,
225 .cra_ctxsize = sizeof(struct crypto_aes_ctx)+AESNI_ALIGN-1,
226 .cra_alignmask = 0,
227 .cra_module = THIS_MODULE,
228 .cra_list = LIST_HEAD_INIT(aesni_alg.cra_list),
229 .cra_u = {
230 .cipher = {
231 .cia_min_keysize = AES_MIN_KEY_SIZE,
232 .cia_max_keysize = AES_MAX_KEY_SIZE,
233 .cia_setkey = aes_set_key,
234 .cia_encrypt = aes_encrypt,
235 .cia_decrypt = aes_decrypt
236 }
237 }
238};
239
2cf4ac8b
HY
240static void __aes_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
241{
242 struct crypto_aes_ctx *ctx = aes_ctx(crypto_tfm_ctx(tfm));
243
244 aesni_enc(ctx, dst, src);
245}
246
247static void __aes_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
248{
249 struct crypto_aes_ctx *ctx = aes_ctx(crypto_tfm_ctx(tfm));
250
251 aesni_dec(ctx, dst, src);
252}
253
254static struct crypto_alg __aesni_alg = {
255 .cra_name = "__aes-aesni",
256 .cra_driver_name = "__driver-aes-aesni",
257 .cra_priority = 0,
258 .cra_flags = CRYPTO_ALG_TYPE_CIPHER,
259 .cra_blocksize = AES_BLOCK_SIZE,
260 .cra_ctxsize = sizeof(struct crypto_aes_ctx)+AESNI_ALIGN-1,
261 .cra_alignmask = 0,
262 .cra_module = THIS_MODULE,
263 .cra_list = LIST_HEAD_INIT(__aesni_alg.cra_list),
264 .cra_u = {
265 .cipher = {
266 .cia_min_keysize = AES_MIN_KEY_SIZE,
267 .cia_max_keysize = AES_MAX_KEY_SIZE,
268 .cia_setkey = aes_set_key,
269 .cia_encrypt = __aes_encrypt,
270 .cia_decrypt = __aes_decrypt
271 }
272 }
273};
274
54b6a1bd
HY
275static int ecb_encrypt(struct blkcipher_desc *desc,
276 struct scatterlist *dst, struct scatterlist *src,
277 unsigned int nbytes)
278{
279 struct crypto_aes_ctx *ctx = aes_ctx(crypto_blkcipher_ctx(desc->tfm));
280 struct blkcipher_walk walk;
281 int err;
282
283 blkcipher_walk_init(&walk, dst, src, nbytes);
284 err = blkcipher_walk_virt(desc, &walk);
9251b64f 285 desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
54b6a1bd
HY
286
287 kernel_fpu_begin();
288 while ((nbytes = walk.nbytes)) {
289 aesni_ecb_enc(ctx, walk.dst.virt.addr, walk.src.virt.addr,
290 nbytes & AES_BLOCK_MASK);
291 nbytes &= AES_BLOCK_SIZE - 1;
292 err = blkcipher_walk_done(desc, &walk, nbytes);
293 }
294 kernel_fpu_end();
295
296 return err;
297}
298
299static int ecb_decrypt(struct blkcipher_desc *desc,
300 struct scatterlist *dst, struct scatterlist *src,
301 unsigned int nbytes)
302{
303 struct crypto_aes_ctx *ctx = aes_ctx(crypto_blkcipher_ctx(desc->tfm));
304 struct blkcipher_walk walk;
305 int err;
306
307 blkcipher_walk_init(&walk, dst, src, nbytes);
308 err = blkcipher_walk_virt(desc, &walk);
9251b64f 309 desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
54b6a1bd
HY
310
311 kernel_fpu_begin();
312 while ((nbytes = walk.nbytes)) {
313 aesni_ecb_dec(ctx, walk.dst.virt.addr, walk.src.virt.addr,
314 nbytes & AES_BLOCK_MASK);
315 nbytes &= AES_BLOCK_SIZE - 1;
316 err = blkcipher_walk_done(desc, &walk, nbytes);
317 }
318 kernel_fpu_end();
319
320 return err;
321}
322
323static struct crypto_alg blk_ecb_alg = {
324 .cra_name = "__ecb-aes-aesni",
325 .cra_driver_name = "__driver-ecb-aes-aesni",
326 .cra_priority = 0,
327 .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
328 .cra_blocksize = AES_BLOCK_SIZE,
329 .cra_ctxsize = sizeof(struct crypto_aes_ctx)+AESNI_ALIGN-1,
330 .cra_alignmask = 0,
331 .cra_type = &crypto_blkcipher_type,
332 .cra_module = THIS_MODULE,
333 .cra_list = LIST_HEAD_INIT(blk_ecb_alg.cra_list),
334 .cra_u = {
335 .blkcipher = {
336 .min_keysize = AES_MIN_KEY_SIZE,
337 .max_keysize = AES_MAX_KEY_SIZE,
338 .setkey = aes_set_key,
339 .encrypt = ecb_encrypt,
340 .decrypt = ecb_decrypt,
341 },
342 },
343};
344
345static int cbc_encrypt(struct blkcipher_desc *desc,
346 struct scatterlist *dst, struct scatterlist *src,
347 unsigned int nbytes)
348{
349 struct crypto_aes_ctx *ctx = aes_ctx(crypto_blkcipher_ctx(desc->tfm));
350 struct blkcipher_walk walk;
351 int err;
352
353 blkcipher_walk_init(&walk, dst, src, nbytes);
354 err = blkcipher_walk_virt(desc, &walk);
9251b64f 355 desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
54b6a1bd
HY
356
357 kernel_fpu_begin();
358 while ((nbytes = walk.nbytes)) {
359 aesni_cbc_enc(ctx, walk.dst.virt.addr, walk.src.virt.addr,
360 nbytes & AES_BLOCK_MASK, walk.iv);
361 nbytes &= AES_BLOCK_SIZE - 1;
362 err = blkcipher_walk_done(desc, &walk, nbytes);
363 }
364 kernel_fpu_end();
365
366 return err;
367}
368
369static int cbc_decrypt(struct blkcipher_desc *desc,
370 struct scatterlist *dst, struct scatterlist *src,
371 unsigned int nbytes)
372{
373 struct crypto_aes_ctx *ctx = aes_ctx(crypto_blkcipher_ctx(desc->tfm));
374 struct blkcipher_walk walk;
375 int err;
376
377 blkcipher_walk_init(&walk, dst, src, nbytes);
378 err = blkcipher_walk_virt(desc, &walk);
9251b64f 379 desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
54b6a1bd
HY
380
381 kernel_fpu_begin();
382 while ((nbytes = walk.nbytes)) {
383 aesni_cbc_dec(ctx, walk.dst.virt.addr, walk.src.virt.addr,
384 nbytes & AES_BLOCK_MASK, walk.iv);
385 nbytes &= AES_BLOCK_SIZE - 1;
386 err = blkcipher_walk_done(desc, &walk, nbytes);
387 }
388 kernel_fpu_end();
389
390 return err;
391}
392
393static struct crypto_alg blk_cbc_alg = {
394 .cra_name = "__cbc-aes-aesni",
395 .cra_driver_name = "__driver-cbc-aes-aesni",
396 .cra_priority = 0,
397 .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
398 .cra_blocksize = AES_BLOCK_SIZE,
399 .cra_ctxsize = sizeof(struct crypto_aes_ctx)+AESNI_ALIGN-1,
400 .cra_alignmask = 0,
401 .cra_type = &crypto_blkcipher_type,
402 .cra_module = THIS_MODULE,
403 .cra_list = LIST_HEAD_INIT(blk_cbc_alg.cra_list),
404 .cra_u = {
405 .blkcipher = {
406 .min_keysize = AES_MIN_KEY_SIZE,
407 .max_keysize = AES_MAX_KEY_SIZE,
408 .setkey = aes_set_key,
409 .encrypt = cbc_encrypt,
410 .decrypt = cbc_decrypt,
411 },
412 },
413};
414
0d258efb 415#ifdef CONFIG_X86_64
12387a46
HY
416static void ctr_crypt_final(struct crypto_aes_ctx *ctx,
417 struct blkcipher_walk *walk)
418{
419 u8 *ctrblk = walk->iv;
420 u8 keystream[AES_BLOCK_SIZE];
421 u8 *src = walk->src.virt.addr;
422 u8 *dst = walk->dst.virt.addr;
423 unsigned int nbytes = walk->nbytes;
424
425 aesni_enc(ctx, keystream, ctrblk);
426 crypto_xor(keystream, src, nbytes);
427 memcpy(dst, keystream, nbytes);
428 crypto_inc(ctrblk, AES_BLOCK_SIZE);
429}
430
431static int ctr_crypt(struct blkcipher_desc *desc,
432 struct scatterlist *dst, struct scatterlist *src,
433 unsigned int nbytes)
434{
435 struct crypto_aes_ctx *ctx = aes_ctx(crypto_blkcipher_ctx(desc->tfm));
436 struct blkcipher_walk walk;
437 int err;
438
439 blkcipher_walk_init(&walk, dst, src, nbytes);
440 err = blkcipher_walk_virt_block(desc, &walk, AES_BLOCK_SIZE);
441 desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
442
443 kernel_fpu_begin();
444 while ((nbytes = walk.nbytes) >= AES_BLOCK_SIZE) {
445 aesni_ctr_enc(ctx, walk.dst.virt.addr, walk.src.virt.addr,
446 nbytes & AES_BLOCK_MASK, walk.iv);
447 nbytes &= AES_BLOCK_SIZE - 1;
448 err = blkcipher_walk_done(desc, &walk, nbytes);
449 }
450 if (walk.nbytes) {
451 ctr_crypt_final(ctx, &walk);
452 err = blkcipher_walk_done(desc, &walk, 0);
453 }
454 kernel_fpu_end();
455
456 return err;
457}
458
459static struct crypto_alg blk_ctr_alg = {
460 .cra_name = "__ctr-aes-aesni",
461 .cra_driver_name = "__driver-ctr-aes-aesni",
462 .cra_priority = 0,
463 .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
464 .cra_blocksize = 1,
465 .cra_ctxsize = sizeof(struct crypto_aes_ctx)+AESNI_ALIGN-1,
466 .cra_alignmask = 0,
467 .cra_type = &crypto_blkcipher_type,
468 .cra_module = THIS_MODULE,
469 .cra_list = LIST_HEAD_INIT(blk_ctr_alg.cra_list),
470 .cra_u = {
471 .blkcipher = {
472 .min_keysize = AES_MIN_KEY_SIZE,
473 .max_keysize = AES_MAX_KEY_SIZE,
474 .ivsize = AES_BLOCK_SIZE,
475 .setkey = aes_set_key,
476 .encrypt = ctr_crypt,
477 .decrypt = ctr_crypt,
478 },
479 },
480};
0d258efb 481#endif
12387a46 482
54b6a1bd
HY
483static int ablk_set_key(struct crypto_ablkcipher *tfm, const u8 *key,
484 unsigned int key_len)
485{
486 struct async_aes_ctx *ctx = crypto_ablkcipher_ctx(tfm);
2cf4ac8b
HY
487 struct crypto_ablkcipher *child = &ctx->cryptd_tfm->base;
488 int err;
54b6a1bd 489
2cf4ac8b
HY
490 crypto_ablkcipher_clear_flags(child, CRYPTO_TFM_REQ_MASK);
491 crypto_ablkcipher_set_flags(child, crypto_ablkcipher_get_flags(tfm)
492 & CRYPTO_TFM_REQ_MASK);
493 err = crypto_ablkcipher_setkey(child, key, key_len);
494 crypto_ablkcipher_set_flags(tfm, crypto_ablkcipher_get_flags(child)
495 & CRYPTO_TFM_RES_MASK);
496 return err;
54b6a1bd
HY
497}
498
499static int ablk_encrypt(struct ablkcipher_request *req)
500{
501 struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
502 struct async_aes_ctx *ctx = crypto_ablkcipher_ctx(tfm);
503
13b79b97 504 if (!irq_fpu_usable()) {
54b6a1bd
HY
505 struct ablkcipher_request *cryptd_req =
506 ablkcipher_request_ctx(req);
507 memcpy(cryptd_req, req, sizeof(*req));
508 ablkcipher_request_set_tfm(cryptd_req, &ctx->cryptd_tfm->base);
509 return crypto_ablkcipher_encrypt(cryptd_req);
510 } else {
511 struct blkcipher_desc desc;
512 desc.tfm = cryptd_ablkcipher_child(ctx->cryptd_tfm);
513 desc.info = req->info;
514 desc.flags = 0;
515 return crypto_blkcipher_crt(desc.tfm)->encrypt(
516 &desc, req->dst, req->src, req->nbytes);
517 }
518}
519
520static int ablk_decrypt(struct ablkcipher_request *req)
521{
522 struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
523 struct async_aes_ctx *ctx = crypto_ablkcipher_ctx(tfm);
524
13b79b97 525 if (!irq_fpu_usable()) {
54b6a1bd
HY
526 struct ablkcipher_request *cryptd_req =
527 ablkcipher_request_ctx(req);
528 memcpy(cryptd_req, req, sizeof(*req));
529 ablkcipher_request_set_tfm(cryptd_req, &ctx->cryptd_tfm->base);
530 return crypto_ablkcipher_decrypt(cryptd_req);
531 } else {
532 struct blkcipher_desc desc;
533 desc.tfm = cryptd_ablkcipher_child(ctx->cryptd_tfm);
534 desc.info = req->info;
535 desc.flags = 0;
536 return crypto_blkcipher_crt(desc.tfm)->decrypt(
537 &desc, req->dst, req->src, req->nbytes);
538 }
539}
540
541static void ablk_exit(struct crypto_tfm *tfm)
542{
543 struct async_aes_ctx *ctx = crypto_tfm_ctx(tfm);
544
545 cryptd_free_ablkcipher(ctx->cryptd_tfm);
546}
547
548static void ablk_init_common(struct crypto_tfm *tfm,
549 struct cryptd_ablkcipher *cryptd_tfm)
550{
551 struct async_aes_ctx *ctx = crypto_tfm_ctx(tfm);
552
553 ctx->cryptd_tfm = cryptd_tfm;
554 tfm->crt_ablkcipher.reqsize = sizeof(struct ablkcipher_request) +
555 crypto_ablkcipher_reqsize(&cryptd_tfm->base);
556}
557
558static int ablk_ecb_init(struct crypto_tfm *tfm)
559{
560 struct cryptd_ablkcipher *cryptd_tfm;
561
562 cryptd_tfm = cryptd_alloc_ablkcipher("__driver-ecb-aes-aesni", 0, 0);
563 if (IS_ERR(cryptd_tfm))
564 return PTR_ERR(cryptd_tfm);
565 ablk_init_common(tfm, cryptd_tfm);
566 return 0;
567}
568
569static struct crypto_alg ablk_ecb_alg = {
570 .cra_name = "ecb(aes)",
571 .cra_driver_name = "ecb-aes-aesni",
572 .cra_priority = 400,
573 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER|CRYPTO_ALG_ASYNC,
574 .cra_blocksize = AES_BLOCK_SIZE,
575 .cra_ctxsize = sizeof(struct async_aes_ctx),
576 .cra_alignmask = 0,
577 .cra_type = &crypto_ablkcipher_type,
578 .cra_module = THIS_MODULE,
579 .cra_list = LIST_HEAD_INIT(ablk_ecb_alg.cra_list),
580 .cra_init = ablk_ecb_init,
581 .cra_exit = ablk_exit,
582 .cra_u = {
583 .ablkcipher = {
584 .min_keysize = AES_MIN_KEY_SIZE,
585 .max_keysize = AES_MAX_KEY_SIZE,
586 .setkey = ablk_set_key,
587 .encrypt = ablk_encrypt,
588 .decrypt = ablk_decrypt,
589 },
590 },
591};
592
593static int ablk_cbc_init(struct crypto_tfm *tfm)
594{
595 struct cryptd_ablkcipher *cryptd_tfm;
596
597 cryptd_tfm = cryptd_alloc_ablkcipher("__driver-cbc-aes-aesni", 0, 0);
598 if (IS_ERR(cryptd_tfm))
599 return PTR_ERR(cryptd_tfm);
600 ablk_init_common(tfm, cryptd_tfm);
601 return 0;
602}
603
604static struct crypto_alg ablk_cbc_alg = {
605 .cra_name = "cbc(aes)",
606 .cra_driver_name = "cbc-aes-aesni",
607 .cra_priority = 400,
608 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER|CRYPTO_ALG_ASYNC,
609 .cra_blocksize = AES_BLOCK_SIZE,
610 .cra_ctxsize = sizeof(struct async_aes_ctx),
611 .cra_alignmask = 0,
612 .cra_type = &crypto_ablkcipher_type,
613 .cra_module = THIS_MODULE,
614 .cra_list = LIST_HEAD_INIT(ablk_cbc_alg.cra_list),
615 .cra_init = ablk_cbc_init,
616 .cra_exit = ablk_exit,
617 .cra_u = {
618 .ablkcipher = {
619 .min_keysize = AES_MIN_KEY_SIZE,
620 .max_keysize = AES_MAX_KEY_SIZE,
621 .ivsize = AES_BLOCK_SIZE,
622 .setkey = ablk_set_key,
623 .encrypt = ablk_encrypt,
624 .decrypt = ablk_decrypt,
625 },
626 },
627};
628
0d258efb 629#ifdef CONFIG_X86_64
2cf4ac8b
HY
630static int ablk_ctr_init(struct crypto_tfm *tfm)
631{
632 struct cryptd_ablkcipher *cryptd_tfm;
633
12387a46 634 cryptd_tfm = cryptd_alloc_ablkcipher("__driver-ctr-aes-aesni", 0, 0);
2cf4ac8b
HY
635 if (IS_ERR(cryptd_tfm))
636 return PTR_ERR(cryptd_tfm);
637 ablk_init_common(tfm, cryptd_tfm);
638 return 0;
639}
640
641static struct crypto_alg ablk_ctr_alg = {
642 .cra_name = "ctr(aes)",
643 .cra_driver_name = "ctr-aes-aesni",
644 .cra_priority = 400,
645 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER|CRYPTO_ALG_ASYNC,
646 .cra_blocksize = 1,
647 .cra_ctxsize = sizeof(struct async_aes_ctx),
648 .cra_alignmask = 0,
649 .cra_type = &crypto_ablkcipher_type,
650 .cra_module = THIS_MODULE,
651 .cra_list = LIST_HEAD_INIT(ablk_ctr_alg.cra_list),
652 .cra_init = ablk_ctr_init,
653 .cra_exit = ablk_exit,
654 .cra_u = {
655 .ablkcipher = {
656 .min_keysize = AES_MIN_KEY_SIZE,
657 .max_keysize = AES_MAX_KEY_SIZE,
658 .ivsize = AES_BLOCK_SIZE,
659 .setkey = ablk_set_key,
660 .encrypt = ablk_encrypt,
12387a46 661 .decrypt = ablk_encrypt,
2cf4ac8b
HY
662 .geniv = "chainiv",
663 },
664 },
665};
12387a46
HY
666
667#ifdef HAS_CTR
668static int ablk_rfc3686_ctr_init(struct crypto_tfm *tfm)
669{
670 struct cryptd_ablkcipher *cryptd_tfm;
671
672 cryptd_tfm = cryptd_alloc_ablkcipher(
673 "rfc3686(__driver-ctr-aes-aesni)", 0, 0);
674 if (IS_ERR(cryptd_tfm))
675 return PTR_ERR(cryptd_tfm);
676 ablk_init_common(tfm, cryptd_tfm);
677 return 0;
678}
679
680static struct crypto_alg ablk_rfc3686_ctr_alg = {
681 .cra_name = "rfc3686(ctr(aes))",
682 .cra_driver_name = "rfc3686-ctr-aes-aesni",
683 .cra_priority = 400,
684 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER|CRYPTO_ALG_ASYNC,
685 .cra_blocksize = 1,
686 .cra_ctxsize = sizeof(struct async_aes_ctx),
687 .cra_alignmask = 0,
688 .cra_type = &crypto_ablkcipher_type,
689 .cra_module = THIS_MODULE,
690 .cra_list = LIST_HEAD_INIT(ablk_rfc3686_ctr_alg.cra_list),
691 .cra_init = ablk_rfc3686_ctr_init,
692 .cra_exit = ablk_exit,
693 .cra_u = {
694 .ablkcipher = {
695 .min_keysize = AES_MIN_KEY_SIZE+CTR_RFC3686_NONCE_SIZE,
696 .max_keysize = AES_MAX_KEY_SIZE+CTR_RFC3686_NONCE_SIZE,
697 .ivsize = CTR_RFC3686_IV_SIZE,
698 .setkey = ablk_set_key,
699 .encrypt = ablk_encrypt,
700 .decrypt = ablk_decrypt,
701 .geniv = "seqiv",
702 },
703 },
704};
2cf4ac8b 705#endif
0d258efb 706#endif
2cf4ac8b
HY
707
708#ifdef HAS_LRW
709static int ablk_lrw_init(struct crypto_tfm *tfm)
710{
711 struct cryptd_ablkcipher *cryptd_tfm;
712
713 cryptd_tfm = cryptd_alloc_ablkcipher("fpu(lrw(__driver-aes-aesni))",
714 0, 0);
715 if (IS_ERR(cryptd_tfm))
716 return PTR_ERR(cryptd_tfm);
717 ablk_init_common(tfm, cryptd_tfm);
718 return 0;
719}
720
721static struct crypto_alg ablk_lrw_alg = {
722 .cra_name = "lrw(aes)",
723 .cra_driver_name = "lrw-aes-aesni",
724 .cra_priority = 400,
725 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER|CRYPTO_ALG_ASYNC,
726 .cra_blocksize = AES_BLOCK_SIZE,
727 .cra_ctxsize = sizeof(struct async_aes_ctx),
728 .cra_alignmask = 0,
729 .cra_type = &crypto_ablkcipher_type,
730 .cra_module = THIS_MODULE,
731 .cra_list = LIST_HEAD_INIT(ablk_lrw_alg.cra_list),
732 .cra_init = ablk_lrw_init,
733 .cra_exit = ablk_exit,
734 .cra_u = {
735 .ablkcipher = {
736 .min_keysize = AES_MIN_KEY_SIZE + AES_BLOCK_SIZE,
737 .max_keysize = AES_MAX_KEY_SIZE + AES_BLOCK_SIZE,
738 .ivsize = AES_BLOCK_SIZE,
739 .setkey = ablk_set_key,
740 .encrypt = ablk_encrypt,
741 .decrypt = ablk_decrypt,
742 },
743 },
744};
745#endif
746
747#ifdef HAS_PCBC
748static int ablk_pcbc_init(struct crypto_tfm *tfm)
749{
750 struct cryptd_ablkcipher *cryptd_tfm;
751
752 cryptd_tfm = cryptd_alloc_ablkcipher("fpu(pcbc(__driver-aes-aesni))",
753 0, 0);
754 if (IS_ERR(cryptd_tfm))
755 return PTR_ERR(cryptd_tfm);
756 ablk_init_common(tfm, cryptd_tfm);
757 return 0;
758}
759
760static struct crypto_alg ablk_pcbc_alg = {
761 .cra_name = "pcbc(aes)",
762 .cra_driver_name = "pcbc-aes-aesni",
763 .cra_priority = 400,
764 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER|CRYPTO_ALG_ASYNC,
765 .cra_blocksize = AES_BLOCK_SIZE,
766 .cra_ctxsize = sizeof(struct async_aes_ctx),
767 .cra_alignmask = 0,
768 .cra_type = &crypto_ablkcipher_type,
769 .cra_module = THIS_MODULE,
770 .cra_list = LIST_HEAD_INIT(ablk_pcbc_alg.cra_list),
771 .cra_init = ablk_pcbc_init,
772 .cra_exit = ablk_exit,
773 .cra_u = {
774 .ablkcipher = {
775 .min_keysize = AES_MIN_KEY_SIZE,
776 .max_keysize = AES_MAX_KEY_SIZE,
777 .ivsize = AES_BLOCK_SIZE,
778 .setkey = ablk_set_key,
779 .encrypt = ablk_encrypt,
780 .decrypt = ablk_decrypt,
781 },
782 },
783};
784#endif
785
786#ifdef HAS_XTS
787static int ablk_xts_init(struct crypto_tfm *tfm)
788{
789 struct cryptd_ablkcipher *cryptd_tfm;
790
791 cryptd_tfm = cryptd_alloc_ablkcipher("fpu(xts(__driver-aes-aesni))",
792 0, 0);
793 if (IS_ERR(cryptd_tfm))
794 return PTR_ERR(cryptd_tfm);
795 ablk_init_common(tfm, cryptd_tfm);
796 return 0;
797}
798
799static struct crypto_alg ablk_xts_alg = {
800 .cra_name = "xts(aes)",
801 .cra_driver_name = "xts-aes-aesni",
802 .cra_priority = 400,
803 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER|CRYPTO_ALG_ASYNC,
804 .cra_blocksize = AES_BLOCK_SIZE,
805 .cra_ctxsize = sizeof(struct async_aes_ctx),
806 .cra_alignmask = 0,
807 .cra_type = &crypto_ablkcipher_type,
808 .cra_module = THIS_MODULE,
809 .cra_list = LIST_HEAD_INIT(ablk_xts_alg.cra_list),
810 .cra_init = ablk_xts_init,
811 .cra_exit = ablk_exit,
812 .cra_u = {
813 .ablkcipher = {
814 .min_keysize = 2 * AES_MIN_KEY_SIZE,
815 .max_keysize = 2 * AES_MAX_KEY_SIZE,
816 .ivsize = AES_BLOCK_SIZE,
817 .setkey = ablk_set_key,
818 .encrypt = ablk_encrypt,
819 .decrypt = ablk_decrypt,
820 },
821 },
822};
823#endif
824
559ad0ff 825#ifdef CONFIG_X86_64
0bd82f5f
TS
826static int rfc4106_init(struct crypto_tfm *tfm)
827{
828 struct cryptd_aead *cryptd_tfm;
829 struct aesni_rfc4106_gcm_ctx *ctx = (struct aesni_rfc4106_gcm_ctx *)
830 PTR_ALIGN((u8 *)crypto_tfm_ctx(tfm), AESNI_ALIGN);
831 cryptd_tfm = cryptd_alloc_aead("__driver-gcm-aes-aesni", 0, 0);
832 if (IS_ERR(cryptd_tfm))
833 return PTR_ERR(cryptd_tfm);
834 ctx->cryptd_tfm = cryptd_tfm;
835 tfm->crt_aead.reqsize = sizeof(struct aead_request)
836 + crypto_aead_reqsize(&cryptd_tfm->base);
837 return 0;
838}
839
840static void rfc4106_exit(struct crypto_tfm *tfm)
841{
842 struct aesni_rfc4106_gcm_ctx *ctx =
843 (struct aesni_rfc4106_gcm_ctx *)
844 PTR_ALIGN((u8 *)crypto_tfm_ctx(tfm), AESNI_ALIGN);
845 if (!IS_ERR(ctx->cryptd_tfm))
846 cryptd_free_aead(ctx->cryptd_tfm);
847 return;
848}
849
850static void
851rfc4106_set_hash_subkey_done(struct crypto_async_request *req, int err)
852{
853 struct aesni_gcm_set_hash_subkey_result *result = req->data;
854
855 if (err == -EINPROGRESS)
856 return;
857 result->err = err;
858 complete(&result->completion);
859}
860
861static int
862rfc4106_set_hash_subkey(u8 *hash_subkey, const u8 *key, unsigned int key_len)
863{
864 struct crypto_ablkcipher *ctr_tfm;
865 struct ablkcipher_request *req;
866 int ret = -EINVAL;
867 struct aesni_hash_subkey_req_data *req_data;
868
869 ctr_tfm = crypto_alloc_ablkcipher("ctr(aes)", 0, 0);
870 if (IS_ERR(ctr_tfm))
871 return PTR_ERR(ctr_tfm);
872
873 crypto_ablkcipher_clear_flags(ctr_tfm, ~0);
874
875 ret = crypto_ablkcipher_setkey(ctr_tfm, key, key_len);
7efd95f6
JJ
876 if (ret)
877 goto out;
0bd82f5f
TS
878
879 req = ablkcipher_request_alloc(ctr_tfm, GFP_KERNEL);
880 if (!req) {
7efd95f6
JJ
881 ret = -EINVAL;
882 goto out_free_ablkcipher;
0bd82f5f
TS
883 }
884
885 req_data = kmalloc(sizeof(*req_data), GFP_KERNEL);
886 if (!req_data) {
7efd95f6
JJ
887 ret = -ENOMEM;
888 goto out_free_request;
0bd82f5f
TS
889 }
890 memset(req_data->iv, 0, sizeof(req_data->iv));
891
892 /* Clear the data in the hash sub key container to zero.*/
893 /* We want to cipher all zeros to create the hash sub key. */
894 memset(hash_subkey, 0, RFC4106_HASH_SUBKEY_SIZE);
895
896 init_completion(&req_data->result.completion);
897 sg_init_one(&req_data->sg, hash_subkey, RFC4106_HASH_SUBKEY_SIZE);
898 ablkcipher_request_set_tfm(req, ctr_tfm);
899 ablkcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_SLEEP |
900 CRYPTO_TFM_REQ_MAY_BACKLOG,
901 rfc4106_set_hash_subkey_done,
902 &req_data->result);
903
904 ablkcipher_request_set_crypt(req, &req_data->sg,
905 &req_data->sg, RFC4106_HASH_SUBKEY_SIZE, req_data->iv);
906
907 ret = crypto_ablkcipher_encrypt(req);
908 if (ret == -EINPROGRESS || ret == -EBUSY) {
909 ret = wait_for_completion_interruptible
910 (&req_data->result.completion);
911 if (!ret)
912 ret = req_data->result.err;
913 }
7efd95f6 914out_free_request:
0bd82f5f
TS
915 ablkcipher_request_free(req);
916 kfree(req_data);
7efd95f6 917out_free_ablkcipher:
0bd82f5f 918 crypto_free_ablkcipher(ctr_tfm);
7efd95f6 919out:
0bd82f5f
TS
920 return ret;
921}
922
923static int rfc4106_set_key(struct crypto_aead *parent, const u8 *key,
924 unsigned int key_len)
925{
926 int ret = 0;
927 struct crypto_tfm *tfm = crypto_aead_tfm(parent);
928 struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(parent);
929 u8 *new_key_mem = NULL;
930
931 if (key_len < 4) {
932 crypto_tfm_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
933 return -EINVAL;
934 }
935 /*Account for 4 byte nonce at the end.*/
936 key_len -= 4;
937 if (key_len != AES_KEYSIZE_128) {
938 crypto_tfm_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
939 return -EINVAL;
940 }
941
942 memcpy(ctx->nonce, key + key_len, sizeof(ctx->nonce));
943 /*This must be on a 16 byte boundary!*/
944 if ((unsigned long)(&(ctx->aes_key_expanded.key_enc[0])) % AESNI_ALIGN)
945 return -EINVAL;
946
947 if ((unsigned long)key % AESNI_ALIGN) {
948 /*key is not aligned: use an auxuliar aligned pointer*/
949 new_key_mem = kmalloc(key_len+AESNI_ALIGN, GFP_KERNEL);
950 if (!new_key_mem)
951 return -ENOMEM;
952
953 new_key_mem = PTR_ALIGN(new_key_mem, AESNI_ALIGN);
954 memcpy(new_key_mem, key, key_len);
955 key = new_key_mem;
956 }
957
958 if (!irq_fpu_usable())
959 ret = crypto_aes_expand_key(&(ctx->aes_key_expanded),
960 key, key_len);
961 else {
962 kernel_fpu_begin();
963 ret = aesni_set_key(&(ctx->aes_key_expanded), key, key_len);
964 kernel_fpu_end();
965 }
966 /*This must be on a 16 byte boundary!*/
967 if ((unsigned long)(&(ctx->hash_subkey[0])) % AESNI_ALIGN) {
968 ret = -EINVAL;
969 goto exit;
970 }
971 ret = rfc4106_set_hash_subkey(ctx->hash_subkey, key, key_len);
972exit:
973 kfree(new_key_mem);
974 return ret;
975}
976
977/* This is the Integrity Check Value (aka the authentication tag length and can
978 * be 8, 12 or 16 bytes long. */
979static int rfc4106_set_authsize(struct crypto_aead *parent,
980 unsigned int authsize)
981{
982 struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(parent);
983 struct crypto_aead *cryptd_child = cryptd_aead_child(ctx->cryptd_tfm);
984
985 switch (authsize) {
986 case 8:
987 case 12:
988 case 16:
989 break;
990 default:
991 return -EINVAL;
992 }
993 crypto_aead_crt(parent)->authsize = authsize;
994 crypto_aead_crt(cryptd_child)->authsize = authsize;
995 return 0;
996}
997
998static int rfc4106_encrypt(struct aead_request *req)
999{
1000 int ret;
1001 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1002 struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(tfm);
1003 struct crypto_aead *cryptd_child = cryptd_aead_child(ctx->cryptd_tfm);
1004
1005 if (!irq_fpu_usable()) {
1006 struct aead_request *cryptd_req =
1007 (struct aead_request *) aead_request_ctx(req);
1008 memcpy(cryptd_req, req, sizeof(*req));
1009 aead_request_set_tfm(cryptd_req, &ctx->cryptd_tfm->base);
1010 return crypto_aead_encrypt(cryptd_req);
1011 } else {
1012 kernel_fpu_begin();
1013 ret = cryptd_child->base.crt_aead.encrypt(req);
1014 kernel_fpu_end();
1015 return ret;
1016 }
1017}
1018
1019static int rfc4106_decrypt(struct aead_request *req)
1020{
1021 int ret;
1022 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1023 struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(tfm);
1024 struct crypto_aead *cryptd_child = cryptd_aead_child(ctx->cryptd_tfm);
1025
1026 if (!irq_fpu_usable()) {
1027 struct aead_request *cryptd_req =
1028 (struct aead_request *) aead_request_ctx(req);
1029 memcpy(cryptd_req, req, sizeof(*req));
1030 aead_request_set_tfm(cryptd_req, &ctx->cryptd_tfm->base);
1031 return crypto_aead_decrypt(cryptd_req);
1032 } else {
1033 kernel_fpu_begin();
1034 ret = cryptd_child->base.crt_aead.decrypt(req);
1035 kernel_fpu_end();
1036 return ret;
1037 }
1038}
1039
1040static struct crypto_alg rfc4106_alg = {
1041 .cra_name = "rfc4106(gcm(aes))",
1042 .cra_driver_name = "rfc4106-gcm-aesni",
1043 .cra_priority = 400,
1044 .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
1045 .cra_blocksize = 1,
1046 .cra_ctxsize = sizeof(struct aesni_rfc4106_gcm_ctx) + AESNI_ALIGN,
1047 .cra_alignmask = 0,
1048 .cra_type = &crypto_nivaead_type,
1049 .cra_module = THIS_MODULE,
1050 .cra_list = LIST_HEAD_INIT(rfc4106_alg.cra_list),
1051 .cra_init = rfc4106_init,
1052 .cra_exit = rfc4106_exit,
1053 .cra_u = {
1054 .aead = {
1055 .setkey = rfc4106_set_key,
1056 .setauthsize = rfc4106_set_authsize,
1057 .encrypt = rfc4106_encrypt,
1058 .decrypt = rfc4106_decrypt,
1059 .geniv = "seqiv",
1060 .ivsize = 8,
1061 .maxauthsize = 16,
1062 },
1063 },
1064};
1065
1066static int __driver_rfc4106_encrypt(struct aead_request *req)
1067{
1068 u8 one_entry_in_sg = 0;
1069 u8 *src, *dst, *assoc;
1070 __be32 counter = cpu_to_be32(1);
1071 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1072 struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(tfm);
1073 void *aes_ctx = &(ctx->aes_key_expanded);
1074 unsigned long auth_tag_len = crypto_aead_authsize(tfm);
1075 u8 iv_tab[16+AESNI_ALIGN];
1076 u8* iv = (u8 *) PTR_ALIGN((u8 *)iv_tab, AESNI_ALIGN);
1077 struct scatter_walk src_sg_walk;
1078 struct scatter_walk assoc_sg_walk;
1079 struct scatter_walk dst_sg_walk;
1080 unsigned int i;
1081
1082 /* Assuming we are supporting rfc4106 64-bit extended */
1083 /* sequence numbers We need to have the AAD length equal */
1084 /* to 8 or 12 bytes */
1085 if (unlikely(req->assoclen != 8 && req->assoclen != 12))
1086 return -EINVAL;
1087 /* IV below built */
1088 for (i = 0; i < 4; i++)
1089 *(iv+i) = ctx->nonce[i];
1090 for (i = 0; i < 8; i++)
1091 *(iv+4+i) = req->iv[i];
1092 *((__be32 *)(iv+12)) = counter;
1093
1094 if ((sg_is_last(req->src)) && (sg_is_last(req->assoc))) {
1095 one_entry_in_sg = 1;
1096 scatterwalk_start(&src_sg_walk, req->src);
1097 scatterwalk_start(&assoc_sg_walk, req->assoc);
1098 src = scatterwalk_map(&src_sg_walk, 0);
1099 assoc = scatterwalk_map(&assoc_sg_walk, 0);
1100 dst = src;
1101 if (unlikely(req->src != req->dst)) {
1102 scatterwalk_start(&dst_sg_walk, req->dst);
1103 dst = scatterwalk_map(&dst_sg_walk, 0);
1104 }
1105
1106 } else {
1107 /* Allocate memory for src, dst, assoc */
1108 src = kmalloc(req->cryptlen + auth_tag_len + req->assoclen,
1109 GFP_ATOMIC);
1110 if (unlikely(!src))
1111 return -ENOMEM;
1112 assoc = (src + req->cryptlen + auth_tag_len);
1113 scatterwalk_map_and_copy(src, req->src, 0, req->cryptlen, 0);
1114 scatterwalk_map_and_copy(assoc, req->assoc, 0,
1115 req->assoclen, 0);
1116 dst = src;
1117 }
1118
1119 aesni_gcm_enc(aes_ctx, dst, src, (unsigned long)req->cryptlen, iv,
1120 ctx->hash_subkey, assoc, (unsigned long)req->assoclen, dst
1121 + ((unsigned long)req->cryptlen), auth_tag_len);
1122
1123 /* The authTag (aka the Integrity Check Value) needs to be written
1124 * back to the packet. */
1125 if (one_entry_in_sg) {
1126 if (unlikely(req->src != req->dst)) {
1127 scatterwalk_unmap(dst, 0);
1128 scatterwalk_done(&dst_sg_walk, 0, 0);
1129 }
1130 scatterwalk_unmap(src, 0);
1131 scatterwalk_unmap(assoc, 0);
1132 scatterwalk_done(&src_sg_walk, 0, 0);
1133 scatterwalk_done(&assoc_sg_walk, 0, 0);
1134 } else {
1135 scatterwalk_map_and_copy(dst, req->dst, 0,
1136 req->cryptlen + auth_tag_len, 1);
1137 kfree(src);
1138 }
1139 return 0;
1140}
1141
1142static int __driver_rfc4106_decrypt(struct aead_request *req)
1143{
1144 u8 one_entry_in_sg = 0;
1145 u8 *src, *dst, *assoc;
1146 unsigned long tempCipherLen = 0;
1147 __be32 counter = cpu_to_be32(1);
1148 int retval = 0;
1149 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1150 struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(tfm);
1151 void *aes_ctx = &(ctx->aes_key_expanded);
1152 unsigned long auth_tag_len = crypto_aead_authsize(tfm);
1153 u8 iv_and_authTag[32+AESNI_ALIGN];
1154 u8 *iv = (u8 *) PTR_ALIGN((u8 *)iv_and_authTag, AESNI_ALIGN);
1155 u8 *authTag = iv + 16;
1156 struct scatter_walk src_sg_walk;
1157 struct scatter_walk assoc_sg_walk;
1158 struct scatter_walk dst_sg_walk;
1159 unsigned int i;
1160
1161 if (unlikely((req->cryptlen < auth_tag_len) ||
1162 (req->assoclen != 8 && req->assoclen != 12)))
1163 return -EINVAL;
1164 /* Assuming we are supporting rfc4106 64-bit extended */
1165 /* sequence numbers We need to have the AAD length */
1166 /* equal to 8 or 12 bytes */
1167
1168 tempCipherLen = (unsigned long)(req->cryptlen - auth_tag_len);
1169 /* IV below built */
1170 for (i = 0; i < 4; i++)
1171 *(iv+i) = ctx->nonce[i];
1172 for (i = 0; i < 8; i++)
1173 *(iv+4+i) = req->iv[i];
1174 *((__be32 *)(iv+12)) = counter;
1175
1176 if ((sg_is_last(req->src)) && (sg_is_last(req->assoc))) {
1177 one_entry_in_sg = 1;
1178 scatterwalk_start(&src_sg_walk, req->src);
1179 scatterwalk_start(&assoc_sg_walk, req->assoc);
1180 src = scatterwalk_map(&src_sg_walk, 0);
1181 assoc = scatterwalk_map(&assoc_sg_walk, 0);
1182 dst = src;
1183 if (unlikely(req->src != req->dst)) {
1184 scatterwalk_start(&dst_sg_walk, req->dst);
1185 dst = scatterwalk_map(&dst_sg_walk, 0);
1186 }
1187
1188 } else {
1189 /* Allocate memory for src, dst, assoc */
1190 src = kmalloc(req->cryptlen + req->assoclen, GFP_ATOMIC);
1191 if (!src)
1192 return -ENOMEM;
1193 assoc = (src + req->cryptlen + auth_tag_len);
1194 scatterwalk_map_and_copy(src, req->src, 0, req->cryptlen, 0);
1195 scatterwalk_map_and_copy(assoc, req->assoc, 0,
1196 req->assoclen, 0);
1197 dst = src;
1198 }
1199
1200 aesni_gcm_dec(aes_ctx, dst, src, tempCipherLen, iv,
1201 ctx->hash_subkey, assoc, (unsigned long)req->assoclen,
1202 authTag, auth_tag_len);
1203
1204 /* Compare generated tag with passed in tag. */
1205 retval = memcmp(src + tempCipherLen, authTag, auth_tag_len) ?
1206 -EBADMSG : 0;
1207
1208 if (one_entry_in_sg) {
1209 if (unlikely(req->src != req->dst)) {
1210 scatterwalk_unmap(dst, 0);
1211 scatterwalk_done(&dst_sg_walk, 0, 0);
1212 }
1213 scatterwalk_unmap(src, 0);
1214 scatterwalk_unmap(assoc, 0);
1215 scatterwalk_done(&src_sg_walk, 0, 0);
1216 scatterwalk_done(&assoc_sg_walk, 0, 0);
1217 } else {
1218 scatterwalk_map_and_copy(dst, req->dst, 0, req->cryptlen, 1);
1219 kfree(src);
1220 }
1221 return retval;
1222}
1223
1224static struct crypto_alg __rfc4106_alg = {
1225 .cra_name = "__gcm-aes-aesni",
1226 .cra_driver_name = "__driver-gcm-aes-aesni",
1227 .cra_priority = 0,
1228 .cra_flags = CRYPTO_ALG_TYPE_AEAD,
1229 .cra_blocksize = 1,
1230 .cra_ctxsize = sizeof(struct aesni_rfc4106_gcm_ctx) + AESNI_ALIGN,
1231 .cra_alignmask = 0,
1232 .cra_type = &crypto_aead_type,
1233 .cra_module = THIS_MODULE,
1234 .cra_list = LIST_HEAD_INIT(__rfc4106_alg.cra_list),
1235 .cra_u = {
1236 .aead = {
1237 .encrypt = __driver_rfc4106_encrypt,
1238 .decrypt = __driver_rfc4106_decrypt,
1239 },
1240 },
1241};
559ad0ff 1242#endif
0bd82f5f 1243
54b6a1bd
HY
1244static int __init aesni_init(void)
1245{
1246 int err;
1247
1248 if (!cpu_has_aes) {
c9944881 1249 printk(KERN_INFO "Intel AES-NI instructions are not detected.\n");
54b6a1bd
HY
1250 return -ENODEV;
1251 }
0bd82f5f 1252
54b6a1bd
HY
1253 if ((err = crypto_register_alg(&aesni_alg)))
1254 goto aes_err;
2cf4ac8b
HY
1255 if ((err = crypto_register_alg(&__aesni_alg)))
1256 goto __aes_err;
54b6a1bd
HY
1257 if ((err = crypto_register_alg(&blk_ecb_alg)))
1258 goto blk_ecb_err;
1259 if ((err = crypto_register_alg(&blk_cbc_alg)))
1260 goto blk_cbc_err;
1261 if ((err = crypto_register_alg(&ablk_ecb_alg)))
1262 goto ablk_ecb_err;
1263 if ((err = crypto_register_alg(&ablk_cbc_alg)))
1264 goto ablk_cbc_err;
0d258efb
MK
1265#ifdef CONFIG_X86_64
1266 if ((err = crypto_register_alg(&blk_ctr_alg)))
1267 goto blk_ctr_err;
2cf4ac8b
HY
1268 if ((err = crypto_register_alg(&ablk_ctr_alg)))
1269 goto ablk_ctr_err;
559ad0ff
MK
1270 if ((err = crypto_register_alg(&__rfc4106_alg)))
1271 goto __aead_gcm_err;
1272 if ((err = crypto_register_alg(&rfc4106_alg)))
1273 goto aead_gcm_err;
12387a46
HY
1274#ifdef HAS_CTR
1275 if ((err = crypto_register_alg(&ablk_rfc3686_ctr_alg)))
1276 goto ablk_rfc3686_ctr_err;
2cf4ac8b 1277#endif
0d258efb 1278#endif
2cf4ac8b
HY
1279#ifdef HAS_LRW
1280 if ((err = crypto_register_alg(&ablk_lrw_alg)))
1281 goto ablk_lrw_err;
1282#endif
1283#ifdef HAS_PCBC
1284 if ((err = crypto_register_alg(&ablk_pcbc_alg)))
1285 goto ablk_pcbc_err;
1286#endif
1287#ifdef HAS_XTS
1288 if ((err = crypto_register_alg(&ablk_xts_alg)))
1289 goto ablk_xts_err;
1290#endif
54b6a1bd
HY
1291 return err;
1292
2cf4ac8b
HY
1293#ifdef HAS_XTS
1294ablk_xts_err:
1295#endif
1296#ifdef HAS_PCBC
1297 crypto_unregister_alg(&ablk_pcbc_alg);
1298ablk_pcbc_err:
1299#endif
1300#ifdef HAS_LRW
1301 crypto_unregister_alg(&ablk_lrw_alg);
1302ablk_lrw_err:
1303#endif
0d258efb 1304#ifdef CONFIG_X86_64
2cf4ac8b 1305#ifdef HAS_CTR
12387a46
HY
1306 crypto_unregister_alg(&ablk_rfc3686_ctr_alg);
1307ablk_rfc3686_ctr_err:
1308#endif
559ad0ff
MK
1309 crypto_unregister_alg(&rfc4106_alg);
1310aead_gcm_err:
1311 crypto_unregister_alg(&__rfc4106_alg);
1312__aead_gcm_err:
2cf4ac8b
HY
1313 crypto_unregister_alg(&ablk_ctr_alg);
1314ablk_ctr_err:
0d258efb
MK
1315 crypto_unregister_alg(&blk_ctr_alg);
1316blk_ctr_err:
1317#endif
2cf4ac8b 1318 crypto_unregister_alg(&ablk_cbc_alg);
54b6a1bd
HY
1319ablk_cbc_err:
1320 crypto_unregister_alg(&ablk_ecb_alg);
1321ablk_ecb_err:
1322 crypto_unregister_alg(&blk_cbc_alg);
1323blk_cbc_err:
1324 crypto_unregister_alg(&blk_ecb_alg);
1325blk_ecb_err:
2cf4ac8b
HY
1326 crypto_unregister_alg(&__aesni_alg);
1327__aes_err:
54b6a1bd
HY
1328 crypto_unregister_alg(&aesni_alg);
1329aes_err:
1330 return err;
1331}
1332
1333static void __exit aesni_exit(void)
1334{
2cf4ac8b
HY
1335#ifdef HAS_XTS
1336 crypto_unregister_alg(&ablk_xts_alg);
1337#endif
1338#ifdef HAS_PCBC
1339 crypto_unregister_alg(&ablk_pcbc_alg);
1340#endif
1341#ifdef HAS_LRW
1342 crypto_unregister_alg(&ablk_lrw_alg);
1343#endif
0d258efb 1344#ifdef CONFIG_X86_64
2cf4ac8b 1345#ifdef HAS_CTR
12387a46 1346 crypto_unregister_alg(&ablk_rfc3686_ctr_alg);
2cf4ac8b 1347#endif
559ad0ff
MK
1348 crypto_unregister_alg(&rfc4106_alg);
1349 crypto_unregister_alg(&__rfc4106_alg);
12387a46 1350 crypto_unregister_alg(&ablk_ctr_alg);
0d258efb
MK
1351 crypto_unregister_alg(&blk_ctr_alg);
1352#endif
54b6a1bd
HY
1353 crypto_unregister_alg(&ablk_cbc_alg);
1354 crypto_unregister_alg(&ablk_ecb_alg);
1355 crypto_unregister_alg(&blk_cbc_alg);
1356 crypto_unregister_alg(&blk_ecb_alg);
2cf4ac8b 1357 crypto_unregister_alg(&__aesni_alg);
54b6a1bd
HY
1358 crypto_unregister_alg(&aesni_alg);
1359}
1360
1361module_init(aesni_init);
1362module_exit(aesni_exit);
1363
1364MODULE_DESCRIPTION("Rijndael (AES) Cipher Algorithm, Intel AES-NI instructions optimized");
1365MODULE_LICENSE("GPL");
1366MODULE_ALIAS("aes");
This page took 0.172416 seconds and 5 git commands to generate.