crypto: ux500 - Cleanup hardware identification
[deliverable/linux.git] / arch / x86 / crypto / aesni-intel_glue.c
CommitLineData
54b6a1bd
HY
1/*
2 * Support for Intel AES-NI instructions. This file contains glue
3 * code, the real AES implementation is in intel-aes_asm.S.
4 *
5 * Copyright (C) 2008, Intel Corp.
6 * Author: Huang Ying <ying.huang@intel.com>
7 *
0bd82f5f
TS
8 * Added RFC4106 AES-GCM support for 128-bit keys under the AEAD
9 * interface for 64-bit kernels.
10 * Authors: Adrian Hoban <adrian.hoban@intel.com>
11 * Gabriele Paoloni <gabriele.paoloni@intel.com>
12 * Tadeusz Struk (tadeusz.struk@intel.com)
13 * Aidan O'Mahony (aidan.o.mahony@intel.com)
14 * Copyright (c) 2010, Intel Corporation.
15 *
54b6a1bd
HY
16 * This program is free software; you can redistribute it and/or modify
17 * it under the terms of the GNU General Public License as published by
18 * the Free Software Foundation; either version 2 of the License, or
19 * (at your option) any later version.
20 */
21
22#include <linux/hardirq.h>
23#include <linux/types.h>
24#include <linux/crypto.h>
7c52d551 25#include <linux/module.h>
54b6a1bd
HY
26#include <linux/err.h>
27#include <crypto/algapi.h>
28#include <crypto/aes.h>
29#include <crypto/cryptd.h>
12387a46 30#include <crypto/ctr.h>
3bd391f0 31#include <asm/cpu_device_id.h>
54b6a1bd
HY
32#include <asm/i387.h>
33#include <asm/aes.h>
0bd82f5f
TS
34#include <crypto/scatterwalk.h>
35#include <crypto/internal/aead.h>
36#include <linux/workqueue.h>
37#include <linux/spinlock.h>
54b6a1bd 38
2cf4ac8b
HY
39#if defined(CONFIG_CRYPTO_CTR) || defined(CONFIG_CRYPTO_CTR_MODULE)
40#define HAS_CTR
41#endif
42
43#if defined(CONFIG_CRYPTO_LRW) || defined(CONFIG_CRYPTO_LRW_MODULE)
44#define HAS_LRW
45#endif
46
47#if defined(CONFIG_CRYPTO_PCBC) || defined(CONFIG_CRYPTO_PCBC_MODULE)
48#define HAS_PCBC
49#endif
50
51#if defined(CONFIG_CRYPTO_XTS) || defined(CONFIG_CRYPTO_XTS_MODULE)
52#define HAS_XTS
53#endif
54
54b6a1bd
HY
55struct async_aes_ctx {
56 struct cryptd_ablkcipher *cryptd_tfm;
57};
58
0bd82f5f
TS
59/* This data is stored at the end of the crypto_tfm struct.
60 * It's a type of per "session" data storage location.
61 * This needs to be 16 byte aligned.
62 */
63struct aesni_rfc4106_gcm_ctx {
64 u8 hash_subkey[16];
65 struct crypto_aes_ctx aes_key_expanded;
66 u8 nonce[4];
67 struct cryptd_aead *cryptd_tfm;
68};
69
70struct aesni_gcm_set_hash_subkey_result {
71 int err;
72 struct completion completion;
73};
74
75struct aesni_hash_subkey_req_data {
76 u8 iv[16];
77 struct aesni_gcm_set_hash_subkey_result result;
78 struct scatterlist sg;
79};
80
81#define AESNI_ALIGN (16)
54b6a1bd 82#define AES_BLOCK_MASK (~(AES_BLOCK_SIZE-1))
0bd82f5f 83#define RFC4106_HASH_SUBKEY_SIZE 16
54b6a1bd
HY
84
85asmlinkage int aesni_set_key(struct crypto_aes_ctx *ctx, const u8 *in_key,
86 unsigned int key_len);
87asmlinkage void aesni_enc(struct crypto_aes_ctx *ctx, u8 *out,
88 const u8 *in);
89asmlinkage void aesni_dec(struct crypto_aes_ctx *ctx, u8 *out,
90 const u8 *in);
91asmlinkage void aesni_ecb_enc(struct crypto_aes_ctx *ctx, u8 *out,
92 const u8 *in, unsigned int len);
93asmlinkage void aesni_ecb_dec(struct crypto_aes_ctx *ctx, u8 *out,
94 const u8 *in, unsigned int len);
95asmlinkage void aesni_cbc_enc(struct crypto_aes_ctx *ctx, u8 *out,
96 const u8 *in, unsigned int len, u8 *iv);
97asmlinkage void aesni_cbc_dec(struct crypto_aes_ctx *ctx, u8 *out,
98 const u8 *in, unsigned int len, u8 *iv);
9bed4aca
RD
99
100int crypto_fpu_init(void);
101void crypto_fpu_exit(void);
102
0d258efb 103#ifdef CONFIG_X86_64
12387a46
HY
104asmlinkage void aesni_ctr_enc(struct crypto_aes_ctx *ctx, u8 *out,
105 const u8 *in, unsigned int len, u8 *iv);
54b6a1bd 106
0bd82f5f
TS
107/* asmlinkage void aesni_gcm_enc()
108 * void *ctx, AES Key schedule. Starts on a 16 byte boundary.
109 * u8 *out, Ciphertext output. Encrypt in-place is allowed.
110 * const u8 *in, Plaintext input
111 * unsigned long plaintext_len, Length of data in bytes for encryption.
112 * u8 *iv, Pre-counter block j0: 4 byte salt (from Security Association)
113 * concatenated with 8 byte Initialisation Vector (from IPSec ESP
114 * Payload) concatenated with 0x00000001. 16-byte aligned pointer.
115 * u8 *hash_subkey, the Hash sub key input. Data starts on a 16-byte boundary.
116 * const u8 *aad, Additional Authentication Data (AAD)
117 * unsigned long aad_len, Length of AAD in bytes. With RFC4106 this
118 * is going to be 8 or 12 bytes
119 * u8 *auth_tag, Authenticated Tag output.
120 * unsigned long auth_tag_len), Authenticated Tag Length in bytes.
121 * Valid values are 16 (most likely), 12 or 8.
122 */
123asmlinkage void aesni_gcm_enc(void *ctx, u8 *out,
124 const u8 *in, unsigned long plaintext_len, u8 *iv,
125 u8 *hash_subkey, const u8 *aad, unsigned long aad_len,
126 u8 *auth_tag, unsigned long auth_tag_len);
127
128/* asmlinkage void aesni_gcm_dec()
129 * void *ctx, AES Key schedule. Starts on a 16 byte boundary.
130 * u8 *out, Plaintext output. Decrypt in-place is allowed.
131 * const u8 *in, Ciphertext input
132 * unsigned long ciphertext_len, Length of data in bytes for decryption.
133 * u8 *iv, Pre-counter block j0: 4 byte salt (from Security Association)
134 * concatenated with 8 byte Initialisation Vector (from IPSec ESP
135 * Payload) concatenated with 0x00000001. 16-byte aligned pointer.
136 * u8 *hash_subkey, the Hash sub key input. Data starts on a 16-byte boundary.
137 * const u8 *aad, Additional Authentication Data (AAD)
138 * unsigned long aad_len, Length of AAD in bytes. With RFC4106 this is going
139 * to be 8 or 12 bytes
140 * u8 *auth_tag, Authenticated Tag output.
141 * unsigned long auth_tag_len) Authenticated Tag Length in bytes.
142 * Valid values are 16 (most likely), 12 or 8.
143 */
144asmlinkage void aesni_gcm_dec(void *ctx, u8 *out,
145 const u8 *in, unsigned long ciphertext_len, u8 *iv,
146 u8 *hash_subkey, const u8 *aad, unsigned long aad_len,
147 u8 *auth_tag, unsigned long auth_tag_len);
148
149static inline struct
150aesni_rfc4106_gcm_ctx *aesni_rfc4106_gcm_ctx_get(struct crypto_aead *tfm)
151{
152 return
153 (struct aesni_rfc4106_gcm_ctx *)
154 PTR_ALIGN((u8 *)
155 crypto_tfm_ctx(crypto_aead_tfm(tfm)), AESNI_ALIGN);
156}
559ad0ff 157#endif
0bd82f5f 158
54b6a1bd
HY
159static inline struct crypto_aes_ctx *aes_ctx(void *raw_ctx)
160{
161 unsigned long addr = (unsigned long)raw_ctx;
162 unsigned long align = AESNI_ALIGN;
163
164 if (align <= crypto_tfm_ctx_alignment())
165 align = 1;
166 return (struct crypto_aes_ctx *)ALIGN(addr, align);
167}
168
169static int aes_set_key_common(struct crypto_tfm *tfm, void *raw_ctx,
170 const u8 *in_key, unsigned int key_len)
171{
172 struct crypto_aes_ctx *ctx = aes_ctx(raw_ctx);
173 u32 *flags = &tfm->crt_flags;
174 int err;
175
176 if (key_len != AES_KEYSIZE_128 && key_len != AES_KEYSIZE_192 &&
177 key_len != AES_KEYSIZE_256) {
178 *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
179 return -EINVAL;
180 }
181
13b79b97 182 if (!irq_fpu_usable())
54b6a1bd
HY
183 err = crypto_aes_expand_key(ctx, in_key, key_len);
184 else {
185 kernel_fpu_begin();
186 err = aesni_set_key(ctx, in_key, key_len);
187 kernel_fpu_end();
188 }
189
190 return err;
191}
192
193static int aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
194 unsigned int key_len)
195{
196 return aes_set_key_common(tfm, crypto_tfm_ctx(tfm), in_key, key_len);
197}
198
199static void aes_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
200{
201 struct crypto_aes_ctx *ctx = aes_ctx(crypto_tfm_ctx(tfm));
202
13b79b97 203 if (!irq_fpu_usable())
54b6a1bd
HY
204 crypto_aes_encrypt_x86(ctx, dst, src);
205 else {
206 kernel_fpu_begin();
207 aesni_enc(ctx, dst, src);
208 kernel_fpu_end();
209 }
210}
211
212static void aes_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
213{
214 struct crypto_aes_ctx *ctx = aes_ctx(crypto_tfm_ctx(tfm));
215
13b79b97 216 if (!irq_fpu_usable())
54b6a1bd
HY
217 crypto_aes_decrypt_x86(ctx, dst, src);
218 else {
219 kernel_fpu_begin();
220 aesni_dec(ctx, dst, src);
221 kernel_fpu_end();
222 }
223}
224
225static struct crypto_alg aesni_alg = {
226 .cra_name = "aes",
227 .cra_driver_name = "aes-aesni",
228 .cra_priority = 300,
229 .cra_flags = CRYPTO_ALG_TYPE_CIPHER,
230 .cra_blocksize = AES_BLOCK_SIZE,
231 .cra_ctxsize = sizeof(struct crypto_aes_ctx)+AESNI_ALIGN-1,
232 .cra_alignmask = 0,
233 .cra_module = THIS_MODULE,
234 .cra_list = LIST_HEAD_INIT(aesni_alg.cra_list),
235 .cra_u = {
236 .cipher = {
237 .cia_min_keysize = AES_MIN_KEY_SIZE,
238 .cia_max_keysize = AES_MAX_KEY_SIZE,
239 .cia_setkey = aes_set_key,
240 .cia_encrypt = aes_encrypt,
241 .cia_decrypt = aes_decrypt
242 }
243 }
244};
245
2cf4ac8b
HY
246static void __aes_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
247{
248 struct crypto_aes_ctx *ctx = aes_ctx(crypto_tfm_ctx(tfm));
249
250 aesni_enc(ctx, dst, src);
251}
252
253static void __aes_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
254{
255 struct crypto_aes_ctx *ctx = aes_ctx(crypto_tfm_ctx(tfm));
256
257 aesni_dec(ctx, dst, src);
258}
259
260static struct crypto_alg __aesni_alg = {
261 .cra_name = "__aes-aesni",
262 .cra_driver_name = "__driver-aes-aesni",
263 .cra_priority = 0,
264 .cra_flags = CRYPTO_ALG_TYPE_CIPHER,
265 .cra_blocksize = AES_BLOCK_SIZE,
266 .cra_ctxsize = sizeof(struct crypto_aes_ctx)+AESNI_ALIGN-1,
267 .cra_alignmask = 0,
268 .cra_module = THIS_MODULE,
269 .cra_list = LIST_HEAD_INIT(__aesni_alg.cra_list),
270 .cra_u = {
271 .cipher = {
272 .cia_min_keysize = AES_MIN_KEY_SIZE,
273 .cia_max_keysize = AES_MAX_KEY_SIZE,
274 .cia_setkey = aes_set_key,
275 .cia_encrypt = __aes_encrypt,
276 .cia_decrypt = __aes_decrypt
277 }
278 }
279};
280
54b6a1bd
HY
281static int ecb_encrypt(struct blkcipher_desc *desc,
282 struct scatterlist *dst, struct scatterlist *src,
283 unsigned int nbytes)
284{
285 struct crypto_aes_ctx *ctx = aes_ctx(crypto_blkcipher_ctx(desc->tfm));
286 struct blkcipher_walk walk;
287 int err;
288
289 blkcipher_walk_init(&walk, dst, src, nbytes);
290 err = blkcipher_walk_virt(desc, &walk);
9251b64f 291 desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
54b6a1bd
HY
292
293 kernel_fpu_begin();
294 while ((nbytes = walk.nbytes)) {
295 aesni_ecb_enc(ctx, walk.dst.virt.addr, walk.src.virt.addr,
296 nbytes & AES_BLOCK_MASK);
297 nbytes &= AES_BLOCK_SIZE - 1;
298 err = blkcipher_walk_done(desc, &walk, nbytes);
299 }
300 kernel_fpu_end();
301
302 return err;
303}
304
305static int ecb_decrypt(struct blkcipher_desc *desc,
306 struct scatterlist *dst, struct scatterlist *src,
307 unsigned int nbytes)
308{
309 struct crypto_aes_ctx *ctx = aes_ctx(crypto_blkcipher_ctx(desc->tfm));
310 struct blkcipher_walk walk;
311 int err;
312
313 blkcipher_walk_init(&walk, dst, src, nbytes);
314 err = blkcipher_walk_virt(desc, &walk);
9251b64f 315 desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
54b6a1bd
HY
316
317 kernel_fpu_begin();
318 while ((nbytes = walk.nbytes)) {
319 aesni_ecb_dec(ctx, walk.dst.virt.addr, walk.src.virt.addr,
320 nbytes & AES_BLOCK_MASK);
321 nbytes &= AES_BLOCK_SIZE - 1;
322 err = blkcipher_walk_done(desc, &walk, nbytes);
323 }
324 kernel_fpu_end();
325
326 return err;
327}
328
329static struct crypto_alg blk_ecb_alg = {
330 .cra_name = "__ecb-aes-aesni",
331 .cra_driver_name = "__driver-ecb-aes-aesni",
332 .cra_priority = 0,
333 .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
334 .cra_blocksize = AES_BLOCK_SIZE,
335 .cra_ctxsize = sizeof(struct crypto_aes_ctx)+AESNI_ALIGN-1,
336 .cra_alignmask = 0,
337 .cra_type = &crypto_blkcipher_type,
338 .cra_module = THIS_MODULE,
339 .cra_list = LIST_HEAD_INIT(blk_ecb_alg.cra_list),
340 .cra_u = {
341 .blkcipher = {
342 .min_keysize = AES_MIN_KEY_SIZE,
343 .max_keysize = AES_MAX_KEY_SIZE,
344 .setkey = aes_set_key,
345 .encrypt = ecb_encrypt,
346 .decrypt = ecb_decrypt,
347 },
348 },
349};
350
351static int cbc_encrypt(struct blkcipher_desc *desc,
352 struct scatterlist *dst, struct scatterlist *src,
353 unsigned int nbytes)
354{
355 struct crypto_aes_ctx *ctx = aes_ctx(crypto_blkcipher_ctx(desc->tfm));
356 struct blkcipher_walk walk;
357 int err;
358
359 blkcipher_walk_init(&walk, dst, src, nbytes);
360 err = blkcipher_walk_virt(desc, &walk);
9251b64f 361 desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
54b6a1bd
HY
362
363 kernel_fpu_begin();
364 while ((nbytes = walk.nbytes)) {
365 aesni_cbc_enc(ctx, walk.dst.virt.addr, walk.src.virt.addr,
366 nbytes & AES_BLOCK_MASK, walk.iv);
367 nbytes &= AES_BLOCK_SIZE - 1;
368 err = blkcipher_walk_done(desc, &walk, nbytes);
369 }
370 kernel_fpu_end();
371
372 return err;
373}
374
375static int cbc_decrypt(struct blkcipher_desc *desc,
376 struct scatterlist *dst, struct scatterlist *src,
377 unsigned int nbytes)
378{
379 struct crypto_aes_ctx *ctx = aes_ctx(crypto_blkcipher_ctx(desc->tfm));
380 struct blkcipher_walk walk;
381 int err;
382
383 blkcipher_walk_init(&walk, dst, src, nbytes);
384 err = blkcipher_walk_virt(desc, &walk);
9251b64f 385 desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
54b6a1bd
HY
386
387 kernel_fpu_begin();
388 while ((nbytes = walk.nbytes)) {
389 aesni_cbc_dec(ctx, walk.dst.virt.addr, walk.src.virt.addr,
390 nbytes & AES_BLOCK_MASK, walk.iv);
391 nbytes &= AES_BLOCK_SIZE - 1;
392 err = blkcipher_walk_done(desc, &walk, nbytes);
393 }
394 kernel_fpu_end();
395
396 return err;
397}
398
399static struct crypto_alg blk_cbc_alg = {
400 .cra_name = "__cbc-aes-aesni",
401 .cra_driver_name = "__driver-cbc-aes-aesni",
402 .cra_priority = 0,
403 .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
404 .cra_blocksize = AES_BLOCK_SIZE,
405 .cra_ctxsize = sizeof(struct crypto_aes_ctx)+AESNI_ALIGN-1,
406 .cra_alignmask = 0,
407 .cra_type = &crypto_blkcipher_type,
408 .cra_module = THIS_MODULE,
409 .cra_list = LIST_HEAD_INIT(blk_cbc_alg.cra_list),
410 .cra_u = {
411 .blkcipher = {
412 .min_keysize = AES_MIN_KEY_SIZE,
413 .max_keysize = AES_MAX_KEY_SIZE,
414 .setkey = aes_set_key,
415 .encrypt = cbc_encrypt,
416 .decrypt = cbc_decrypt,
417 },
418 },
419};
420
0d258efb 421#ifdef CONFIG_X86_64
12387a46
HY
422static void ctr_crypt_final(struct crypto_aes_ctx *ctx,
423 struct blkcipher_walk *walk)
424{
425 u8 *ctrblk = walk->iv;
426 u8 keystream[AES_BLOCK_SIZE];
427 u8 *src = walk->src.virt.addr;
428 u8 *dst = walk->dst.virt.addr;
429 unsigned int nbytes = walk->nbytes;
430
431 aesni_enc(ctx, keystream, ctrblk);
432 crypto_xor(keystream, src, nbytes);
433 memcpy(dst, keystream, nbytes);
434 crypto_inc(ctrblk, AES_BLOCK_SIZE);
435}
436
437static int ctr_crypt(struct blkcipher_desc *desc,
438 struct scatterlist *dst, struct scatterlist *src,
439 unsigned int nbytes)
440{
441 struct crypto_aes_ctx *ctx = aes_ctx(crypto_blkcipher_ctx(desc->tfm));
442 struct blkcipher_walk walk;
443 int err;
444
445 blkcipher_walk_init(&walk, dst, src, nbytes);
446 err = blkcipher_walk_virt_block(desc, &walk, AES_BLOCK_SIZE);
447 desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
448
449 kernel_fpu_begin();
450 while ((nbytes = walk.nbytes) >= AES_BLOCK_SIZE) {
451 aesni_ctr_enc(ctx, walk.dst.virt.addr, walk.src.virt.addr,
452 nbytes & AES_BLOCK_MASK, walk.iv);
453 nbytes &= AES_BLOCK_SIZE - 1;
454 err = blkcipher_walk_done(desc, &walk, nbytes);
455 }
456 if (walk.nbytes) {
457 ctr_crypt_final(ctx, &walk);
458 err = blkcipher_walk_done(desc, &walk, 0);
459 }
460 kernel_fpu_end();
461
462 return err;
463}
464
465static struct crypto_alg blk_ctr_alg = {
466 .cra_name = "__ctr-aes-aesni",
467 .cra_driver_name = "__driver-ctr-aes-aesni",
468 .cra_priority = 0,
469 .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
470 .cra_blocksize = 1,
471 .cra_ctxsize = sizeof(struct crypto_aes_ctx)+AESNI_ALIGN-1,
472 .cra_alignmask = 0,
473 .cra_type = &crypto_blkcipher_type,
474 .cra_module = THIS_MODULE,
475 .cra_list = LIST_HEAD_INIT(blk_ctr_alg.cra_list),
476 .cra_u = {
477 .blkcipher = {
478 .min_keysize = AES_MIN_KEY_SIZE,
479 .max_keysize = AES_MAX_KEY_SIZE,
480 .ivsize = AES_BLOCK_SIZE,
481 .setkey = aes_set_key,
482 .encrypt = ctr_crypt,
483 .decrypt = ctr_crypt,
484 },
485 },
486};
0d258efb 487#endif
12387a46 488
54b6a1bd
HY
489static int ablk_set_key(struct crypto_ablkcipher *tfm, const u8 *key,
490 unsigned int key_len)
491{
492 struct async_aes_ctx *ctx = crypto_ablkcipher_ctx(tfm);
2cf4ac8b
HY
493 struct crypto_ablkcipher *child = &ctx->cryptd_tfm->base;
494 int err;
54b6a1bd 495
2cf4ac8b
HY
496 crypto_ablkcipher_clear_flags(child, CRYPTO_TFM_REQ_MASK);
497 crypto_ablkcipher_set_flags(child, crypto_ablkcipher_get_flags(tfm)
498 & CRYPTO_TFM_REQ_MASK);
499 err = crypto_ablkcipher_setkey(child, key, key_len);
500 crypto_ablkcipher_set_flags(tfm, crypto_ablkcipher_get_flags(child)
501 & CRYPTO_TFM_RES_MASK);
502 return err;
54b6a1bd
HY
503}
504
505static int ablk_encrypt(struct ablkcipher_request *req)
506{
507 struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
508 struct async_aes_ctx *ctx = crypto_ablkcipher_ctx(tfm);
509
13b79b97 510 if (!irq_fpu_usable()) {
54b6a1bd
HY
511 struct ablkcipher_request *cryptd_req =
512 ablkcipher_request_ctx(req);
513 memcpy(cryptd_req, req, sizeof(*req));
514 ablkcipher_request_set_tfm(cryptd_req, &ctx->cryptd_tfm->base);
515 return crypto_ablkcipher_encrypt(cryptd_req);
516 } else {
517 struct blkcipher_desc desc;
518 desc.tfm = cryptd_ablkcipher_child(ctx->cryptd_tfm);
519 desc.info = req->info;
520 desc.flags = 0;
521 return crypto_blkcipher_crt(desc.tfm)->encrypt(
522 &desc, req->dst, req->src, req->nbytes);
523 }
524}
525
526static int ablk_decrypt(struct ablkcipher_request *req)
527{
528 struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
529 struct async_aes_ctx *ctx = crypto_ablkcipher_ctx(tfm);
530
13b79b97 531 if (!irq_fpu_usable()) {
54b6a1bd
HY
532 struct ablkcipher_request *cryptd_req =
533 ablkcipher_request_ctx(req);
534 memcpy(cryptd_req, req, sizeof(*req));
535 ablkcipher_request_set_tfm(cryptd_req, &ctx->cryptd_tfm->base);
536 return crypto_ablkcipher_decrypt(cryptd_req);
537 } else {
538 struct blkcipher_desc desc;
539 desc.tfm = cryptd_ablkcipher_child(ctx->cryptd_tfm);
540 desc.info = req->info;
541 desc.flags = 0;
542 return crypto_blkcipher_crt(desc.tfm)->decrypt(
543 &desc, req->dst, req->src, req->nbytes);
544 }
545}
546
547static void ablk_exit(struct crypto_tfm *tfm)
548{
549 struct async_aes_ctx *ctx = crypto_tfm_ctx(tfm);
550
551 cryptd_free_ablkcipher(ctx->cryptd_tfm);
552}
553
554static void ablk_init_common(struct crypto_tfm *tfm,
555 struct cryptd_ablkcipher *cryptd_tfm)
556{
557 struct async_aes_ctx *ctx = crypto_tfm_ctx(tfm);
558
559 ctx->cryptd_tfm = cryptd_tfm;
560 tfm->crt_ablkcipher.reqsize = sizeof(struct ablkcipher_request) +
561 crypto_ablkcipher_reqsize(&cryptd_tfm->base);
562}
563
564static int ablk_ecb_init(struct crypto_tfm *tfm)
565{
566 struct cryptd_ablkcipher *cryptd_tfm;
567
568 cryptd_tfm = cryptd_alloc_ablkcipher("__driver-ecb-aes-aesni", 0, 0);
569 if (IS_ERR(cryptd_tfm))
570 return PTR_ERR(cryptd_tfm);
571 ablk_init_common(tfm, cryptd_tfm);
572 return 0;
573}
574
575static struct crypto_alg ablk_ecb_alg = {
576 .cra_name = "ecb(aes)",
577 .cra_driver_name = "ecb-aes-aesni",
578 .cra_priority = 400,
579 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER|CRYPTO_ALG_ASYNC,
580 .cra_blocksize = AES_BLOCK_SIZE,
581 .cra_ctxsize = sizeof(struct async_aes_ctx),
582 .cra_alignmask = 0,
583 .cra_type = &crypto_ablkcipher_type,
584 .cra_module = THIS_MODULE,
585 .cra_list = LIST_HEAD_INIT(ablk_ecb_alg.cra_list),
586 .cra_init = ablk_ecb_init,
587 .cra_exit = ablk_exit,
588 .cra_u = {
589 .ablkcipher = {
590 .min_keysize = AES_MIN_KEY_SIZE,
591 .max_keysize = AES_MAX_KEY_SIZE,
592 .setkey = ablk_set_key,
593 .encrypt = ablk_encrypt,
594 .decrypt = ablk_decrypt,
595 },
596 },
597};
598
599static int ablk_cbc_init(struct crypto_tfm *tfm)
600{
601 struct cryptd_ablkcipher *cryptd_tfm;
602
603 cryptd_tfm = cryptd_alloc_ablkcipher("__driver-cbc-aes-aesni", 0, 0);
604 if (IS_ERR(cryptd_tfm))
605 return PTR_ERR(cryptd_tfm);
606 ablk_init_common(tfm, cryptd_tfm);
607 return 0;
608}
609
610static struct crypto_alg ablk_cbc_alg = {
611 .cra_name = "cbc(aes)",
612 .cra_driver_name = "cbc-aes-aesni",
613 .cra_priority = 400,
614 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER|CRYPTO_ALG_ASYNC,
615 .cra_blocksize = AES_BLOCK_SIZE,
616 .cra_ctxsize = sizeof(struct async_aes_ctx),
617 .cra_alignmask = 0,
618 .cra_type = &crypto_ablkcipher_type,
619 .cra_module = THIS_MODULE,
620 .cra_list = LIST_HEAD_INIT(ablk_cbc_alg.cra_list),
621 .cra_init = ablk_cbc_init,
622 .cra_exit = ablk_exit,
623 .cra_u = {
624 .ablkcipher = {
625 .min_keysize = AES_MIN_KEY_SIZE,
626 .max_keysize = AES_MAX_KEY_SIZE,
627 .ivsize = AES_BLOCK_SIZE,
628 .setkey = ablk_set_key,
629 .encrypt = ablk_encrypt,
630 .decrypt = ablk_decrypt,
631 },
632 },
633};
634
0d258efb 635#ifdef CONFIG_X86_64
2cf4ac8b
HY
636static int ablk_ctr_init(struct crypto_tfm *tfm)
637{
638 struct cryptd_ablkcipher *cryptd_tfm;
639
12387a46 640 cryptd_tfm = cryptd_alloc_ablkcipher("__driver-ctr-aes-aesni", 0, 0);
2cf4ac8b
HY
641 if (IS_ERR(cryptd_tfm))
642 return PTR_ERR(cryptd_tfm);
643 ablk_init_common(tfm, cryptd_tfm);
644 return 0;
645}
646
647static struct crypto_alg ablk_ctr_alg = {
648 .cra_name = "ctr(aes)",
649 .cra_driver_name = "ctr-aes-aesni",
650 .cra_priority = 400,
651 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER|CRYPTO_ALG_ASYNC,
652 .cra_blocksize = 1,
653 .cra_ctxsize = sizeof(struct async_aes_ctx),
654 .cra_alignmask = 0,
655 .cra_type = &crypto_ablkcipher_type,
656 .cra_module = THIS_MODULE,
657 .cra_list = LIST_HEAD_INIT(ablk_ctr_alg.cra_list),
658 .cra_init = ablk_ctr_init,
659 .cra_exit = ablk_exit,
660 .cra_u = {
661 .ablkcipher = {
662 .min_keysize = AES_MIN_KEY_SIZE,
663 .max_keysize = AES_MAX_KEY_SIZE,
664 .ivsize = AES_BLOCK_SIZE,
665 .setkey = ablk_set_key,
666 .encrypt = ablk_encrypt,
12387a46 667 .decrypt = ablk_encrypt,
2cf4ac8b
HY
668 .geniv = "chainiv",
669 },
670 },
671};
12387a46
HY
672
673#ifdef HAS_CTR
674static int ablk_rfc3686_ctr_init(struct crypto_tfm *tfm)
675{
676 struct cryptd_ablkcipher *cryptd_tfm;
677
678 cryptd_tfm = cryptd_alloc_ablkcipher(
679 "rfc3686(__driver-ctr-aes-aesni)", 0, 0);
680 if (IS_ERR(cryptd_tfm))
681 return PTR_ERR(cryptd_tfm);
682 ablk_init_common(tfm, cryptd_tfm);
683 return 0;
684}
685
686static struct crypto_alg ablk_rfc3686_ctr_alg = {
687 .cra_name = "rfc3686(ctr(aes))",
688 .cra_driver_name = "rfc3686-ctr-aes-aesni",
689 .cra_priority = 400,
690 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER|CRYPTO_ALG_ASYNC,
691 .cra_blocksize = 1,
692 .cra_ctxsize = sizeof(struct async_aes_ctx),
693 .cra_alignmask = 0,
694 .cra_type = &crypto_ablkcipher_type,
695 .cra_module = THIS_MODULE,
696 .cra_list = LIST_HEAD_INIT(ablk_rfc3686_ctr_alg.cra_list),
697 .cra_init = ablk_rfc3686_ctr_init,
698 .cra_exit = ablk_exit,
699 .cra_u = {
700 .ablkcipher = {
701 .min_keysize = AES_MIN_KEY_SIZE+CTR_RFC3686_NONCE_SIZE,
702 .max_keysize = AES_MAX_KEY_SIZE+CTR_RFC3686_NONCE_SIZE,
703 .ivsize = CTR_RFC3686_IV_SIZE,
704 .setkey = ablk_set_key,
705 .encrypt = ablk_encrypt,
706 .decrypt = ablk_decrypt,
707 .geniv = "seqiv",
708 },
709 },
710};
2cf4ac8b 711#endif
0d258efb 712#endif
2cf4ac8b
HY
713
714#ifdef HAS_LRW
715static int ablk_lrw_init(struct crypto_tfm *tfm)
716{
717 struct cryptd_ablkcipher *cryptd_tfm;
718
719 cryptd_tfm = cryptd_alloc_ablkcipher("fpu(lrw(__driver-aes-aesni))",
720 0, 0);
721 if (IS_ERR(cryptd_tfm))
722 return PTR_ERR(cryptd_tfm);
723 ablk_init_common(tfm, cryptd_tfm);
724 return 0;
725}
726
727static struct crypto_alg ablk_lrw_alg = {
728 .cra_name = "lrw(aes)",
729 .cra_driver_name = "lrw-aes-aesni",
730 .cra_priority = 400,
731 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER|CRYPTO_ALG_ASYNC,
732 .cra_blocksize = AES_BLOCK_SIZE,
733 .cra_ctxsize = sizeof(struct async_aes_ctx),
734 .cra_alignmask = 0,
735 .cra_type = &crypto_ablkcipher_type,
736 .cra_module = THIS_MODULE,
737 .cra_list = LIST_HEAD_INIT(ablk_lrw_alg.cra_list),
738 .cra_init = ablk_lrw_init,
739 .cra_exit = ablk_exit,
740 .cra_u = {
741 .ablkcipher = {
742 .min_keysize = AES_MIN_KEY_SIZE + AES_BLOCK_SIZE,
743 .max_keysize = AES_MAX_KEY_SIZE + AES_BLOCK_SIZE,
744 .ivsize = AES_BLOCK_SIZE,
745 .setkey = ablk_set_key,
746 .encrypt = ablk_encrypt,
747 .decrypt = ablk_decrypt,
748 },
749 },
750};
751#endif
752
753#ifdef HAS_PCBC
754static int ablk_pcbc_init(struct crypto_tfm *tfm)
755{
756 struct cryptd_ablkcipher *cryptd_tfm;
757
758 cryptd_tfm = cryptd_alloc_ablkcipher("fpu(pcbc(__driver-aes-aesni))",
759 0, 0);
760 if (IS_ERR(cryptd_tfm))
761 return PTR_ERR(cryptd_tfm);
762 ablk_init_common(tfm, cryptd_tfm);
763 return 0;
764}
765
766static struct crypto_alg ablk_pcbc_alg = {
767 .cra_name = "pcbc(aes)",
768 .cra_driver_name = "pcbc-aes-aesni",
769 .cra_priority = 400,
770 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER|CRYPTO_ALG_ASYNC,
771 .cra_blocksize = AES_BLOCK_SIZE,
772 .cra_ctxsize = sizeof(struct async_aes_ctx),
773 .cra_alignmask = 0,
774 .cra_type = &crypto_ablkcipher_type,
775 .cra_module = THIS_MODULE,
776 .cra_list = LIST_HEAD_INIT(ablk_pcbc_alg.cra_list),
777 .cra_init = ablk_pcbc_init,
778 .cra_exit = ablk_exit,
779 .cra_u = {
780 .ablkcipher = {
781 .min_keysize = AES_MIN_KEY_SIZE,
782 .max_keysize = AES_MAX_KEY_SIZE,
783 .ivsize = AES_BLOCK_SIZE,
784 .setkey = ablk_set_key,
785 .encrypt = ablk_encrypt,
786 .decrypt = ablk_decrypt,
787 },
788 },
789};
790#endif
791
792#ifdef HAS_XTS
793static int ablk_xts_init(struct crypto_tfm *tfm)
794{
795 struct cryptd_ablkcipher *cryptd_tfm;
796
797 cryptd_tfm = cryptd_alloc_ablkcipher("fpu(xts(__driver-aes-aesni))",
798 0, 0);
799 if (IS_ERR(cryptd_tfm))
800 return PTR_ERR(cryptd_tfm);
801 ablk_init_common(tfm, cryptd_tfm);
802 return 0;
803}
804
805static struct crypto_alg ablk_xts_alg = {
806 .cra_name = "xts(aes)",
807 .cra_driver_name = "xts-aes-aesni",
808 .cra_priority = 400,
809 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER|CRYPTO_ALG_ASYNC,
810 .cra_blocksize = AES_BLOCK_SIZE,
811 .cra_ctxsize = sizeof(struct async_aes_ctx),
812 .cra_alignmask = 0,
813 .cra_type = &crypto_ablkcipher_type,
814 .cra_module = THIS_MODULE,
815 .cra_list = LIST_HEAD_INIT(ablk_xts_alg.cra_list),
816 .cra_init = ablk_xts_init,
817 .cra_exit = ablk_exit,
818 .cra_u = {
819 .ablkcipher = {
820 .min_keysize = 2 * AES_MIN_KEY_SIZE,
821 .max_keysize = 2 * AES_MAX_KEY_SIZE,
822 .ivsize = AES_BLOCK_SIZE,
823 .setkey = ablk_set_key,
824 .encrypt = ablk_encrypt,
825 .decrypt = ablk_decrypt,
826 },
827 },
828};
829#endif
830
559ad0ff 831#ifdef CONFIG_X86_64
0bd82f5f
TS
832static int rfc4106_init(struct crypto_tfm *tfm)
833{
834 struct cryptd_aead *cryptd_tfm;
835 struct aesni_rfc4106_gcm_ctx *ctx = (struct aesni_rfc4106_gcm_ctx *)
836 PTR_ALIGN((u8 *)crypto_tfm_ctx(tfm), AESNI_ALIGN);
60af520c
TS
837 struct crypto_aead *cryptd_child;
838 struct aesni_rfc4106_gcm_ctx *child_ctx;
0bd82f5f
TS
839 cryptd_tfm = cryptd_alloc_aead("__driver-gcm-aes-aesni", 0, 0);
840 if (IS_ERR(cryptd_tfm))
841 return PTR_ERR(cryptd_tfm);
60af520c
TS
842
843 cryptd_child = cryptd_aead_child(cryptd_tfm);
844 child_ctx = aesni_rfc4106_gcm_ctx_get(cryptd_child);
845 memcpy(child_ctx, ctx, sizeof(*ctx));
0bd82f5f
TS
846 ctx->cryptd_tfm = cryptd_tfm;
847 tfm->crt_aead.reqsize = sizeof(struct aead_request)
848 + crypto_aead_reqsize(&cryptd_tfm->base);
849 return 0;
850}
851
852static void rfc4106_exit(struct crypto_tfm *tfm)
853{
854 struct aesni_rfc4106_gcm_ctx *ctx =
855 (struct aesni_rfc4106_gcm_ctx *)
856 PTR_ALIGN((u8 *)crypto_tfm_ctx(tfm), AESNI_ALIGN);
857 if (!IS_ERR(ctx->cryptd_tfm))
858 cryptd_free_aead(ctx->cryptd_tfm);
859 return;
860}
861
862static void
863rfc4106_set_hash_subkey_done(struct crypto_async_request *req, int err)
864{
865 struct aesni_gcm_set_hash_subkey_result *result = req->data;
866
867 if (err == -EINPROGRESS)
868 return;
869 result->err = err;
870 complete(&result->completion);
871}
872
873static int
874rfc4106_set_hash_subkey(u8 *hash_subkey, const u8 *key, unsigned int key_len)
875{
876 struct crypto_ablkcipher *ctr_tfm;
877 struct ablkcipher_request *req;
878 int ret = -EINVAL;
879 struct aesni_hash_subkey_req_data *req_data;
880
881 ctr_tfm = crypto_alloc_ablkcipher("ctr(aes)", 0, 0);
882 if (IS_ERR(ctr_tfm))
883 return PTR_ERR(ctr_tfm);
884
885 crypto_ablkcipher_clear_flags(ctr_tfm, ~0);
886
887 ret = crypto_ablkcipher_setkey(ctr_tfm, key, key_len);
7efd95f6 888 if (ret)
fc9044e2 889 goto out_free_ablkcipher;
0bd82f5f 890
fc9044e2 891 ret = -ENOMEM;
0bd82f5f 892 req = ablkcipher_request_alloc(ctr_tfm, GFP_KERNEL);
fc9044e2 893 if (!req)
7efd95f6 894 goto out_free_ablkcipher;
0bd82f5f
TS
895
896 req_data = kmalloc(sizeof(*req_data), GFP_KERNEL);
fc9044e2 897 if (!req_data)
7efd95f6 898 goto out_free_request;
fc9044e2 899
0bd82f5f
TS
900 memset(req_data->iv, 0, sizeof(req_data->iv));
901
902 /* Clear the data in the hash sub key container to zero.*/
903 /* We want to cipher all zeros to create the hash sub key. */
904 memset(hash_subkey, 0, RFC4106_HASH_SUBKEY_SIZE);
905
906 init_completion(&req_data->result.completion);
907 sg_init_one(&req_data->sg, hash_subkey, RFC4106_HASH_SUBKEY_SIZE);
908 ablkcipher_request_set_tfm(req, ctr_tfm);
909 ablkcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_SLEEP |
910 CRYPTO_TFM_REQ_MAY_BACKLOG,
911 rfc4106_set_hash_subkey_done,
912 &req_data->result);
913
914 ablkcipher_request_set_crypt(req, &req_data->sg,
915 &req_data->sg, RFC4106_HASH_SUBKEY_SIZE, req_data->iv);
916
917 ret = crypto_ablkcipher_encrypt(req);
918 if (ret == -EINPROGRESS || ret == -EBUSY) {
919 ret = wait_for_completion_interruptible
920 (&req_data->result.completion);
921 if (!ret)
922 ret = req_data->result.err;
923 }
fc9044e2 924 kfree(req_data);
7efd95f6 925out_free_request:
0bd82f5f 926 ablkcipher_request_free(req);
7efd95f6 927out_free_ablkcipher:
0bd82f5f
TS
928 crypto_free_ablkcipher(ctr_tfm);
929 return ret;
930}
931
932static int rfc4106_set_key(struct crypto_aead *parent, const u8 *key,
933 unsigned int key_len)
934{
935 int ret = 0;
936 struct crypto_tfm *tfm = crypto_aead_tfm(parent);
937 struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(parent);
60af520c
TS
938 struct crypto_aead *cryptd_child = cryptd_aead_child(ctx->cryptd_tfm);
939 struct aesni_rfc4106_gcm_ctx *child_ctx =
940 aesni_rfc4106_gcm_ctx_get(cryptd_child);
0bd82f5f
TS
941 u8 *new_key_mem = NULL;
942
943 if (key_len < 4) {
944 crypto_tfm_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
945 return -EINVAL;
946 }
947 /*Account for 4 byte nonce at the end.*/
948 key_len -= 4;
949 if (key_len != AES_KEYSIZE_128) {
950 crypto_tfm_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
951 return -EINVAL;
952 }
953
954 memcpy(ctx->nonce, key + key_len, sizeof(ctx->nonce));
955 /*This must be on a 16 byte boundary!*/
956 if ((unsigned long)(&(ctx->aes_key_expanded.key_enc[0])) % AESNI_ALIGN)
957 return -EINVAL;
958
959 if ((unsigned long)key % AESNI_ALIGN) {
960 /*key is not aligned: use an auxuliar aligned pointer*/
961 new_key_mem = kmalloc(key_len+AESNI_ALIGN, GFP_KERNEL);
962 if (!new_key_mem)
963 return -ENOMEM;
964
965 new_key_mem = PTR_ALIGN(new_key_mem, AESNI_ALIGN);
966 memcpy(new_key_mem, key, key_len);
967 key = new_key_mem;
968 }
969
970 if (!irq_fpu_usable())
971 ret = crypto_aes_expand_key(&(ctx->aes_key_expanded),
972 key, key_len);
973 else {
974 kernel_fpu_begin();
975 ret = aesni_set_key(&(ctx->aes_key_expanded), key, key_len);
976 kernel_fpu_end();
977 }
978 /*This must be on a 16 byte boundary!*/
979 if ((unsigned long)(&(ctx->hash_subkey[0])) % AESNI_ALIGN) {
980 ret = -EINVAL;
981 goto exit;
982 }
983 ret = rfc4106_set_hash_subkey(ctx->hash_subkey, key, key_len);
60af520c 984 memcpy(child_ctx, ctx, sizeof(*ctx));
0bd82f5f
TS
985exit:
986 kfree(new_key_mem);
987 return ret;
988}
989
990/* This is the Integrity Check Value (aka the authentication tag length and can
991 * be 8, 12 or 16 bytes long. */
992static int rfc4106_set_authsize(struct crypto_aead *parent,
993 unsigned int authsize)
994{
995 struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(parent);
996 struct crypto_aead *cryptd_child = cryptd_aead_child(ctx->cryptd_tfm);
997
998 switch (authsize) {
999 case 8:
1000 case 12:
1001 case 16:
1002 break;
1003 default:
1004 return -EINVAL;
1005 }
1006 crypto_aead_crt(parent)->authsize = authsize;
1007 crypto_aead_crt(cryptd_child)->authsize = authsize;
1008 return 0;
1009}
1010
1011static int rfc4106_encrypt(struct aead_request *req)
1012{
1013 int ret;
1014 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1015 struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(tfm);
0bd82f5f
TS
1016
1017 if (!irq_fpu_usable()) {
1018 struct aead_request *cryptd_req =
1019 (struct aead_request *) aead_request_ctx(req);
1020 memcpy(cryptd_req, req, sizeof(*req));
1021 aead_request_set_tfm(cryptd_req, &ctx->cryptd_tfm->base);
1022 return crypto_aead_encrypt(cryptd_req);
1023 } else {
60af520c 1024 struct crypto_aead *cryptd_child = cryptd_aead_child(ctx->cryptd_tfm);
0bd82f5f
TS
1025 kernel_fpu_begin();
1026 ret = cryptd_child->base.crt_aead.encrypt(req);
1027 kernel_fpu_end();
1028 return ret;
1029 }
1030}
1031
1032static int rfc4106_decrypt(struct aead_request *req)
1033{
1034 int ret;
1035 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1036 struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(tfm);
0bd82f5f
TS
1037
1038 if (!irq_fpu_usable()) {
1039 struct aead_request *cryptd_req =
1040 (struct aead_request *) aead_request_ctx(req);
1041 memcpy(cryptd_req, req, sizeof(*req));
1042 aead_request_set_tfm(cryptd_req, &ctx->cryptd_tfm->base);
1043 return crypto_aead_decrypt(cryptd_req);
1044 } else {
60af520c 1045 struct crypto_aead *cryptd_child = cryptd_aead_child(ctx->cryptd_tfm);
0bd82f5f
TS
1046 kernel_fpu_begin();
1047 ret = cryptd_child->base.crt_aead.decrypt(req);
1048 kernel_fpu_end();
1049 return ret;
1050 }
1051}
1052
1053static struct crypto_alg rfc4106_alg = {
1054 .cra_name = "rfc4106(gcm(aes))",
1055 .cra_driver_name = "rfc4106-gcm-aesni",
1056 .cra_priority = 400,
1057 .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
1058 .cra_blocksize = 1,
1059 .cra_ctxsize = sizeof(struct aesni_rfc4106_gcm_ctx) + AESNI_ALIGN,
1060 .cra_alignmask = 0,
1061 .cra_type = &crypto_nivaead_type,
1062 .cra_module = THIS_MODULE,
1063 .cra_list = LIST_HEAD_INIT(rfc4106_alg.cra_list),
1064 .cra_init = rfc4106_init,
1065 .cra_exit = rfc4106_exit,
1066 .cra_u = {
1067 .aead = {
1068 .setkey = rfc4106_set_key,
1069 .setauthsize = rfc4106_set_authsize,
1070 .encrypt = rfc4106_encrypt,
1071 .decrypt = rfc4106_decrypt,
1072 .geniv = "seqiv",
1073 .ivsize = 8,
1074 .maxauthsize = 16,
1075 },
1076 },
1077};
1078
1079static int __driver_rfc4106_encrypt(struct aead_request *req)
1080{
1081 u8 one_entry_in_sg = 0;
1082 u8 *src, *dst, *assoc;
1083 __be32 counter = cpu_to_be32(1);
1084 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1085 struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(tfm);
1086 void *aes_ctx = &(ctx->aes_key_expanded);
1087 unsigned long auth_tag_len = crypto_aead_authsize(tfm);
1088 u8 iv_tab[16+AESNI_ALIGN];
1089 u8* iv = (u8 *) PTR_ALIGN((u8 *)iv_tab, AESNI_ALIGN);
1090 struct scatter_walk src_sg_walk;
1091 struct scatter_walk assoc_sg_walk;
1092 struct scatter_walk dst_sg_walk;
1093 unsigned int i;
1094
1095 /* Assuming we are supporting rfc4106 64-bit extended */
1096 /* sequence numbers We need to have the AAD length equal */
1097 /* to 8 or 12 bytes */
1098 if (unlikely(req->assoclen != 8 && req->assoclen != 12))
1099 return -EINVAL;
1100 /* IV below built */
1101 for (i = 0; i < 4; i++)
1102 *(iv+i) = ctx->nonce[i];
1103 for (i = 0; i < 8; i++)
1104 *(iv+4+i) = req->iv[i];
1105 *((__be32 *)(iv+12)) = counter;
1106
1107 if ((sg_is_last(req->src)) && (sg_is_last(req->assoc))) {
1108 one_entry_in_sg = 1;
1109 scatterwalk_start(&src_sg_walk, req->src);
1110 scatterwalk_start(&assoc_sg_walk, req->assoc);
8fd75e12
CW
1111 src = scatterwalk_map(&src_sg_walk);
1112 assoc = scatterwalk_map(&assoc_sg_walk);
0bd82f5f
TS
1113 dst = src;
1114 if (unlikely(req->src != req->dst)) {
1115 scatterwalk_start(&dst_sg_walk, req->dst);
8fd75e12 1116 dst = scatterwalk_map(&dst_sg_walk);
0bd82f5f
TS
1117 }
1118
1119 } else {
1120 /* Allocate memory for src, dst, assoc */
1121 src = kmalloc(req->cryptlen + auth_tag_len + req->assoclen,
1122 GFP_ATOMIC);
1123 if (unlikely(!src))
1124 return -ENOMEM;
1125 assoc = (src + req->cryptlen + auth_tag_len);
1126 scatterwalk_map_and_copy(src, req->src, 0, req->cryptlen, 0);
1127 scatterwalk_map_and_copy(assoc, req->assoc, 0,
1128 req->assoclen, 0);
1129 dst = src;
1130 }
1131
1132 aesni_gcm_enc(aes_ctx, dst, src, (unsigned long)req->cryptlen, iv,
1133 ctx->hash_subkey, assoc, (unsigned long)req->assoclen, dst
1134 + ((unsigned long)req->cryptlen), auth_tag_len);
1135
1136 /* The authTag (aka the Integrity Check Value) needs to be written
1137 * back to the packet. */
1138 if (one_entry_in_sg) {
1139 if (unlikely(req->src != req->dst)) {
8fd75e12 1140 scatterwalk_unmap(dst);
0bd82f5f
TS
1141 scatterwalk_done(&dst_sg_walk, 0, 0);
1142 }
8fd75e12
CW
1143 scatterwalk_unmap(src);
1144 scatterwalk_unmap(assoc);
0bd82f5f
TS
1145 scatterwalk_done(&src_sg_walk, 0, 0);
1146 scatterwalk_done(&assoc_sg_walk, 0, 0);
1147 } else {
1148 scatterwalk_map_and_copy(dst, req->dst, 0,
1149 req->cryptlen + auth_tag_len, 1);
1150 kfree(src);
1151 }
1152 return 0;
1153}
1154
1155static int __driver_rfc4106_decrypt(struct aead_request *req)
1156{
1157 u8 one_entry_in_sg = 0;
1158 u8 *src, *dst, *assoc;
1159 unsigned long tempCipherLen = 0;
1160 __be32 counter = cpu_to_be32(1);
1161 int retval = 0;
1162 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1163 struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(tfm);
1164 void *aes_ctx = &(ctx->aes_key_expanded);
1165 unsigned long auth_tag_len = crypto_aead_authsize(tfm);
1166 u8 iv_and_authTag[32+AESNI_ALIGN];
1167 u8 *iv = (u8 *) PTR_ALIGN((u8 *)iv_and_authTag, AESNI_ALIGN);
1168 u8 *authTag = iv + 16;
1169 struct scatter_walk src_sg_walk;
1170 struct scatter_walk assoc_sg_walk;
1171 struct scatter_walk dst_sg_walk;
1172 unsigned int i;
1173
1174 if (unlikely((req->cryptlen < auth_tag_len) ||
1175 (req->assoclen != 8 && req->assoclen != 12)))
1176 return -EINVAL;
1177 /* Assuming we are supporting rfc4106 64-bit extended */
1178 /* sequence numbers We need to have the AAD length */
1179 /* equal to 8 or 12 bytes */
1180
1181 tempCipherLen = (unsigned long)(req->cryptlen - auth_tag_len);
1182 /* IV below built */
1183 for (i = 0; i < 4; i++)
1184 *(iv+i) = ctx->nonce[i];
1185 for (i = 0; i < 8; i++)
1186 *(iv+4+i) = req->iv[i];
1187 *((__be32 *)(iv+12)) = counter;
1188
1189 if ((sg_is_last(req->src)) && (sg_is_last(req->assoc))) {
1190 one_entry_in_sg = 1;
1191 scatterwalk_start(&src_sg_walk, req->src);
1192 scatterwalk_start(&assoc_sg_walk, req->assoc);
8fd75e12
CW
1193 src = scatterwalk_map(&src_sg_walk);
1194 assoc = scatterwalk_map(&assoc_sg_walk);
0bd82f5f
TS
1195 dst = src;
1196 if (unlikely(req->src != req->dst)) {
1197 scatterwalk_start(&dst_sg_walk, req->dst);
8fd75e12 1198 dst = scatterwalk_map(&dst_sg_walk);
0bd82f5f
TS
1199 }
1200
1201 } else {
1202 /* Allocate memory for src, dst, assoc */
1203 src = kmalloc(req->cryptlen + req->assoclen, GFP_ATOMIC);
1204 if (!src)
1205 return -ENOMEM;
1206 assoc = (src + req->cryptlen + auth_tag_len);
1207 scatterwalk_map_and_copy(src, req->src, 0, req->cryptlen, 0);
1208 scatterwalk_map_and_copy(assoc, req->assoc, 0,
1209 req->assoclen, 0);
1210 dst = src;
1211 }
1212
1213 aesni_gcm_dec(aes_ctx, dst, src, tempCipherLen, iv,
1214 ctx->hash_subkey, assoc, (unsigned long)req->assoclen,
1215 authTag, auth_tag_len);
1216
1217 /* Compare generated tag with passed in tag. */
1218 retval = memcmp(src + tempCipherLen, authTag, auth_tag_len) ?
1219 -EBADMSG : 0;
1220
1221 if (one_entry_in_sg) {
1222 if (unlikely(req->src != req->dst)) {
8fd75e12 1223 scatterwalk_unmap(dst);
0bd82f5f
TS
1224 scatterwalk_done(&dst_sg_walk, 0, 0);
1225 }
8fd75e12
CW
1226 scatterwalk_unmap(src);
1227 scatterwalk_unmap(assoc);
0bd82f5f
TS
1228 scatterwalk_done(&src_sg_walk, 0, 0);
1229 scatterwalk_done(&assoc_sg_walk, 0, 0);
1230 } else {
1231 scatterwalk_map_and_copy(dst, req->dst, 0, req->cryptlen, 1);
1232 kfree(src);
1233 }
1234 return retval;
1235}
1236
1237static struct crypto_alg __rfc4106_alg = {
1238 .cra_name = "__gcm-aes-aesni",
1239 .cra_driver_name = "__driver-gcm-aes-aesni",
1240 .cra_priority = 0,
1241 .cra_flags = CRYPTO_ALG_TYPE_AEAD,
1242 .cra_blocksize = 1,
1243 .cra_ctxsize = sizeof(struct aesni_rfc4106_gcm_ctx) + AESNI_ALIGN,
1244 .cra_alignmask = 0,
1245 .cra_type = &crypto_aead_type,
1246 .cra_module = THIS_MODULE,
1247 .cra_list = LIST_HEAD_INIT(__rfc4106_alg.cra_list),
1248 .cra_u = {
1249 .aead = {
1250 .encrypt = __driver_rfc4106_encrypt,
1251 .decrypt = __driver_rfc4106_decrypt,
1252 },
1253 },
1254};
559ad0ff 1255#endif
0bd82f5f 1256
3bd391f0
AK
1257
1258static const struct x86_cpu_id aesni_cpu_id[] = {
1259 X86_FEATURE_MATCH(X86_FEATURE_AES),
1260 {}
1261};
1262MODULE_DEVICE_TABLE(x86cpu, aesni_cpu_id);
1263
54b6a1bd
HY
1264static int __init aesni_init(void)
1265{
1266 int err;
1267
3bd391f0 1268 if (!x86_match_cpu(aesni_cpu_id))
54b6a1bd 1269 return -ENODEV;
0bd82f5f 1270
b23b6451
AL
1271 if ((err = crypto_fpu_init()))
1272 goto fpu_err;
54b6a1bd
HY
1273 if ((err = crypto_register_alg(&aesni_alg)))
1274 goto aes_err;
2cf4ac8b
HY
1275 if ((err = crypto_register_alg(&__aesni_alg)))
1276 goto __aes_err;
54b6a1bd
HY
1277 if ((err = crypto_register_alg(&blk_ecb_alg)))
1278 goto blk_ecb_err;
1279 if ((err = crypto_register_alg(&blk_cbc_alg)))
1280 goto blk_cbc_err;
1281 if ((err = crypto_register_alg(&ablk_ecb_alg)))
1282 goto ablk_ecb_err;
1283 if ((err = crypto_register_alg(&ablk_cbc_alg)))
1284 goto ablk_cbc_err;
0d258efb
MK
1285#ifdef CONFIG_X86_64
1286 if ((err = crypto_register_alg(&blk_ctr_alg)))
1287 goto blk_ctr_err;
2cf4ac8b
HY
1288 if ((err = crypto_register_alg(&ablk_ctr_alg)))
1289 goto ablk_ctr_err;
559ad0ff
MK
1290 if ((err = crypto_register_alg(&__rfc4106_alg)))
1291 goto __aead_gcm_err;
1292 if ((err = crypto_register_alg(&rfc4106_alg)))
1293 goto aead_gcm_err;
12387a46
HY
1294#ifdef HAS_CTR
1295 if ((err = crypto_register_alg(&ablk_rfc3686_ctr_alg)))
1296 goto ablk_rfc3686_ctr_err;
2cf4ac8b 1297#endif
0d258efb 1298#endif
2cf4ac8b
HY
1299#ifdef HAS_LRW
1300 if ((err = crypto_register_alg(&ablk_lrw_alg)))
1301 goto ablk_lrw_err;
1302#endif
1303#ifdef HAS_PCBC
1304 if ((err = crypto_register_alg(&ablk_pcbc_alg)))
1305 goto ablk_pcbc_err;
1306#endif
1307#ifdef HAS_XTS
1308 if ((err = crypto_register_alg(&ablk_xts_alg)))
1309 goto ablk_xts_err;
1310#endif
54b6a1bd
HY
1311 return err;
1312
2cf4ac8b
HY
1313#ifdef HAS_XTS
1314ablk_xts_err:
1315#endif
1316#ifdef HAS_PCBC
1317 crypto_unregister_alg(&ablk_pcbc_alg);
1318ablk_pcbc_err:
1319#endif
1320#ifdef HAS_LRW
1321 crypto_unregister_alg(&ablk_lrw_alg);
1322ablk_lrw_err:
1323#endif
0d258efb 1324#ifdef CONFIG_X86_64
2cf4ac8b 1325#ifdef HAS_CTR
12387a46
HY
1326 crypto_unregister_alg(&ablk_rfc3686_ctr_alg);
1327ablk_rfc3686_ctr_err:
1328#endif
559ad0ff
MK
1329 crypto_unregister_alg(&rfc4106_alg);
1330aead_gcm_err:
1331 crypto_unregister_alg(&__rfc4106_alg);
1332__aead_gcm_err:
2cf4ac8b
HY
1333 crypto_unregister_alg(&ablk_ctr_alg);
1334ablk_ctr_err:
0d258efb
MK
1335 crypto_unregister_alg(&blk_ctr_alg);
1336blk_ctr_err:
1337#endif
2cf4ac8b 1338 crypto_unregister_alg(&ablk_cbc_alg);
54b6a1bd
HY
1339ablk_cbc_err:
1340 crypto_unregister_alg(&ablk_ecb_alg);
1341ablk_ecb_err:
1342 crypto_unregister_alg(&blk_cbc_alg);
1343blk_cbc_err:
1344 crypto_unregister_alg(&blk_ecb_alg);
1345blk_ecb_err:
2cf4ac8b
HY
1346 crypto_unregister_alg(&__aesni_alg);
1347__aes_err:
54b6a1bd
HY
1348 crypto_unregister_alg(&aesni_alg);
1349aes_err:
b23b6451 1350fpu_err:
54b6a1bd
HY
1351 return err;
1352}
1353
1354static void __exit aesni_exit(void)
1355{
2cf4ac8b
HY
1356#ifdef HAS_XTS
1357 crypto_unregister_alg(&ablk_xts_alg);
1358#endif
1359#ifdef HAS_PCBC
1360 crypto_unregister_alg(&ablk_pcbc_alg);
1361#endif
1362#ifdef HAS_LRW
1363 crypto_unregister_alg(&ablk_lrw_alg);
1364#endif
0d258efb 1365#ifdef CONFIG_X86_64
2cf4ac8b 1366#ifdef HAS_CTR
12387a46 1367 crypto_unregister_alg(&ablk_rfc3686_ctr_alg);
2cf4ac8b 1368#endif
559ad0ff
MK
1369 crypto_unregister_alg(&rfc4106_alg);
1370 crypto_unregister_alg(&__rfc4106_alg);
12387a46 1371 crypto_unregister_alg(&ablk_ctr_alg);
0d258efb
MK
1372 crypto_unregister_alg(&blk_ctr_alg);
1373#endif
54b6a1bd
HY
1374 crypto_unregister_alg(&ablk_cbc_alg);
1375 crypto_unregister_alg(&ablk_ecb_alg);
1376 crypto_unregister_alg(&blk_cbc_alg);
1377 crypto_unregister_alg(&blk_ecb_alg);
2cf4ac8b 1378 crypto_unregister_alg(&__aesni_alg);
54b6a1bd 1379 crypto_unregister_alg(&aesni_alg);
b23b6451
AL
1380
1381 crypto_fpu_exit();
54b6a1bd
HY
1382}
1383
1384module_init(aesni_init);
1385module_exit(aesni_exit);
1386
1387MODULE_DESCRIPTION("Rijndael (AES) Cipher Algorithm, Intel AES-NI instructions optimized");
1388MODULE_LICENSE("GPL");
1389MODULE_ALIAS("aes");
This page took 0.275963 seconds and 5 git commands to generate.