crypto: aesni-intel - use crypto_[un]register_algs
[deliverable/linux.git] / arch / x86 / crypto / aesni-intel_glue.c
CommitLineData
54b6a1bd
HY
1/*
2 * Support for Intel AES-NI instructions. This file contains glue
3 * code, the real AES implementation is in intel-aes_asm.S.
4 *
5 * Copyright (C) 2008, Intel Corp.
6 * Author: Huang Ying <ying.huang@intel.com>
7 *
0bd82f5f
TS
8 * Added RFC4106 AES-GCM support for 128-bit keys under the AEAD
9 * interface for 64-bit kernels.
10 * Authors: Adrian Hoban <adrian.hoban@intel.com>
11 * Gabriele Paoloni <gabriele.paoloni@intel.com>
12 * Tadeusz Struk (tadeusz.struk@intel.com)
13 * Aidan O'Mahony (aidan.o.mahony@intel.com)
14 * Copyright (c) 2010, Intel Corporation.
15 *
54b6a1bd
HY
16 * This program is free software; you can redistribute it and/or modify
17 * it under the terms of the GNU General Public License as published by
18 * the Free Software Foundation; either version 2 of the License, or
19 * (at your option) any later version.
20 */
21
22#include <linux/hardirq.h>
23#include <linux/types.h>
24#include <linux/crypto.h>
7c52d551 25#include <linux/module.h>
54b6a1bd
HY
26#include <linux/err.h>
27#include <crypto/algapi.h>
28#include <crypto/aes.h>
29#include <crypto/cryptd.h>
12387a46 30#include <crypto/ctr.h>
3bd391f0 31#include <asm/cpu_device_id.h>
54b6a1bd
HY
32#include <asm/i387.h>
33#include <asm/aes.h>
0bd82f5f
TS
34#include <crypto/scatterwalk.h>
35#include <crypto/internal/aead.h>
36#include <linux/workqueue.h>
37#include <linux/spinlock.h>
54b6a1bd 38
2cf4ac8b
HY
39#if defined(CONFIG_CRYPTO_CTR) || defined(CONFIG_CRYPTO_CTR_MODULE)
40#define HAS_CTR
41#endif
42
43#if defined(CONFIG_CRYPTO_LRW) || defined(CONFIG_CRYPTO_LRW_MODULE)
44#define HAS_LRW
45#endif
46
47#if defined(CONFIG_CRYPTO_PCBC) || defined(CONFIG_CRYPTO_PCBC_MODULE)
48#define HAS_PCBC
49#endif
50
51#if defined(CONFIG_CRYPTO_XTS) || defined(CONFIG_CRYPTO_XTS_MODULE)
52#define HAS_XTS
53#endif
54
54b6a1bd
HY
55struct async_aes_ctx {
56 struct cryptd_ablkcipher *cryptd_tfm;
57};
58
0bd82f5f
TS
59/* This data is stored at the end of the crypto_tfm struct.
60 * It's a type of per "session" data storage location.
61 * This needs to be 16 byte aligned.
62 */
63struct aesni_rfc4106_gcm_ctx {
64 u8 hash_subkey[16];
65 struct crypto_aes_ctx aes_key_expanded;
66 u8 nonce[4];
67 struct cryptd_aead *cryptd_tfm;
68};
69
70struct aesni_gcm_set_hash_subkey_result {
71 int err;
72 struct completion completion;
73};
74
75struct aesni_hash_subkey_req_data {
76 u8 iv[16];
77 struct aesni_gcm_set_hash_subkey_result result;
78 struct scatterlist sg;
79};
80
81#define AESNI_ALIGN (16)
54b6a1bd 82#define AES_BLOCK_MASK (~(AES_BLOCK_SIZE-1))
0bd82f5f 83#define RFC4106_HASH_SUBKEY_SIZE 16
54b6a1bd
HY
84
85asmlinkage int aesni_set_key(struct crypto_aes_ctx *ctx, const u8 *in_key,
86 unsigned int key_len);
87asmlinkage void aesni_enc(struct crypto_aes_ctx *ctx, u8 *out,
88 const u8 *in);
89asmlinkage void aesni_dec(struct crypto_aes_ctx *ctx, u8 *out,
90 const u8 *in);
91asmlinkage void aesni_ecb_enc(struct crypto_aes_ctx *ctx, u8 *out,
92 const u8 *in, unsigned int len);
93asmlinkage void aesni_ecb_dec(struct crypto_aes_ctx *ctx, u8 *out,
94 const u8 *in, unsigned int len);
95asmlinkage void aesni_cbc_enc(struct crypto_aes_ctx *ctx, u8 *out,
96 const u8 *in, unsigned int len, u8 *iv);
97asmlinkage void aesni_cbc_dec(struct crypto_aes_ctx *ctx, u8 *out,
98 const u8 *in, unsigned int len, u8 *iv);
9bed4aca
RD
99
100int crypto_fpu_init(void);
101void crypto_fpu_exit(void);
102
0d258efb 103#ifdef CONFIG_X86_64
12387a46
HY
104asmlinkage void aesni_ctr_enc(struct crypto_aes_ctx *ctx, u8 *out,
105 const u8 *in, unsigned int len, u8 *iv);
54b6a1bd 106
0bd82f5f
TS
107/* asmlinkage void aesni_gcm_enc()
108 * void *ctx, AES Key schedule. Starts on a 16 byte boundary.
109 * u8 *out, Ciphertext output. Encrypt in-place is allowed.
110 * const u8 *in, Plaintext input
111 * unsigned long plaintext_len, Length of data in bytes for encryption.
112 * u8 *iv, Pre-counter block j0: 4 byte salt (from Security Association)
113 * concatenated with 8 byte Initialisation Vector (from IPSec ESP
114 * Payload) concatenated with 0x00000001. 16-byte aligned pointer.
115 * u8 *hash_subkey, the Hash sub key input. Data starts on a 16-byte boundary.
116 * const u8 *aad, Additional Authentication Data (AAD)
117 * unsigned long aad_len, Length of AAD in bytes. With RFC4106 this
118 * is going to be 8 or 12 bytes
119 * u8 *auth_tag, Authenticated Tag output.
120 * unsigned long auth_tag_len), Authenticated Tag Length in bytes.
121 * Valid values are 16 (most likely), 12 or 8.
122 */
123asmlinkage void aesni_gcm_enc(void *ctx, u8 *out,
124 const u8 *in, unsigned long plaintext_len, u8 *iv,
125 u8 *hash_subkey, const u8 *aad, unsigned long aad_len,
126 u8 *auth_tag, unsigned long auth_tag_len);
127
128/* asmlinkage void aesni_gcm_dec()
129 * void *ctx, AES Key schedule. Starts on a 16 byte boundary.
130 * u8 *out, Plaintext output. Decrypt in-place is allowed.
131 * const u8 *in, Ciphertext input
132 * unsigned long ciphertext_len, Length of data in bytes for decryption.
133 * u8 *iv, Pre-counter block j0: 4 byte salt (from Security Association)
134 * concatenated with 8 byte Initialisation Vector (from IPSec ESP
135 * Payload) concatenated with 0x00000001. 16-byte aligned pointer.
136 * u8 *hash_subkey, the Hash sub key input. Data starts on a 16-byte boundary.
137 * const u8 *aad, Additional Authentication Data (AAD)
138 * unsigned long aad_len, Length of AAD in bytes. With RFC4106 this is going
139 * to be 8 or 12 bytes
140 * u8 *auth_tag, Authenticated Tag output.
141 * unsigned long auth_tag_len) Authenticated Tag Length in bytes.
142 * Valid values are 16 (most likely), 12 or 8.
143 */
144asmlinkage void aesni_gcm_dec(void *ctx, u8 *out,
145 const u8 *in, unsigned long ciphertext_len, u8 *iv,
146 u8 *hash_subkey, const u8 *aad, unsigned long aad_len,
147 u8 *auth_tag, unsigned long auth_tag_len);
148
149static inline struct
150aesni_rfc4106_gcm_ctx *aesni_rfc4106_gcm_ctx_get(struct crypto_aead *tfm)
151{
152 return
153 (struct aesni_rfc4106_gcm_ctx *)
154 PTR_ALIGN((u8 *)
155 crypto_tfm_ctx(crypto_aead_tfm(tfm)), AESNI_ALIGN);
156}
559ad0ff 157#endif
0bd82f5f 158
54b6a1bd
HY
159static inline struct crypto_aes_ctx *aes_ctx(void *raw_ctx)
160{
161 unsigned long addr = (unsigned long)raw_ctx;
162 unsigned long align = AESNI_ALIGN;
163
164 if (align <= crypto_tfm_ctx_alignment())
165 align = 1;
166 return (struct crypto_aes_ctx *)ALIGN(addr, align);
167}
168
169static int aes_set_key_common(struct crypto_tfm *tfm, void *raw_ctx,
170 const u8 *in_key, unsigned int key_len)
171{
172 struct crypto_aes_ctx *ctx = aes_ctx(raw_ctx);
173 u32 *flags = &tfm->crt_flags;
174 int err;
175
176 if (key_len != AES_KEYSIZE_128 && key_len != AES_KEYSIZE_192 &&
177 key_len != AES_KEYSIZE_256) {
178 *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
179 return -EINVAL;
180 }
181
13b79b97 182 if (!irq_fpu_usable())
54b6a1bd
HY
183 err = crypto_aes_expand_key(ctx, in_key, key_len);
184 else {
185 kernel_fpu_begin();
186 err = aesni_set_key(ctx, in_key, key_len);
187 kernel_fpu_end();
188 }
189
190 return err;
191}
192
193static int aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
194 unsigned int key_len)
195{
196 return aes_set_key_common(tfm, crypto_tfm_ctx(tfm), in_key, key_len);
197}
198
199static void aes_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
200{
201 struct crypto_aes_ctx *ctx = aes_ctx(crypto_tfm_ctx(tfm));
202
13b79b97 203 if (!irq_fpu_usable())
54b6a1bd
HY
204 crypto_aes_encrypt_x86(ctx, dst, src);
205 else {
206 kernel_fpu_begin();
207 aesni_enc(ctx, dst, src);
208 kernel_fpu_end();
209 }
210}
211
212static void aes_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
213{
214 struct crypto_aes_ctx *ctx = aes_ctx(crypto_tfm_ctx(tfm));
215
13b79b97 216 if (!irq_fpu_usable())
54b6a1bd
HY
217 crypto_aes_decrypt_x86(ctx, dst, src);
218 else {
219 kernel_fpu_begin();
220 aesni_dec(ctx, dst, src);
221 kernel_fpu_end();
222 }
223}
224
2cf4ac8b
HY
225static void __aes_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
226{
227 struct crypto_aes_ctx *ctx = aes_ctx(crypto_tfm_ctx(tfm));
228
229 aesni_enc(ctx, dst, src);
230}
231
232static void __aes_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
233{
234 struct crypto_aes_ctx *ctx = aes_ctx(crypto_tfm_ctx(tfm));
235
236 aesni_dec(ctx, dst, src);
237}
238
54b6a1bd
HY
239static int ecb_encrypt(struct blkcipher_desc *desc,
240 struct scatterlist *dst, struct scatterlist *src,
241 unsigned int nbytes)
242{
243 struct crypto_aes_ctx *ctx = aes_ctx(crypto_blkcipher_ctx(desc->tfm));
244 struct blkcipher_walk walk;
245 int err;
246
247 blkcipher_walk_init(&walk, dst, src, nbytes);
248 err = blkcipher_walk_virt(desc, &walk);
9251b64f 249 desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
54b6a1bd
HY
250
251 kernel_fpu_begin();
252 while ((nbytes = walk.nbytes)) {
253 aesni_ecb_enc(ctx, walk.dst.virt.addr, walk.src.virt.addr,
254 nbytes & AES_BLOCK_MASK);
255 nbytes &= AES_BLOCK_SIZE - 1;
256 err = blkcipher_walk_done(desc, &walk, nbytes);
257 }
258 kernel_fpu_end();
259
260 return err;
261}
262
263static int ecb_decrypt(struct blkcipher_desc *desc,
264 struct scatterlist *dst, struct scatterlist *src,
265 unsigned int nbytes)
266{
267 struct crypto_aes_ctx *ctx = aes_ctx(crypto_blkcipher_ctx(desc->tfm));
268 struct blkcipher_walk walk;
269 int err;
270
271 blkcipher_walk_init(&walk, dst, src, nbytes);
272 err = blkcipher_walk_virt(desc, &walk);
9251b64f 273 desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
54b6a1bd
HY
274
275 kernel_fpu_begin();
276 while ((nbytes = walk.nbytes)) {
277 aesni_ecb_dec(ctx, walk.dst.virt.addr, walk.src.virt.addr,
278 nbytes & AES_BLOCK_MASK);
279 nbytes &= AES_BLOCK_SIZE - 1;
280 err = blkcipher_walk_done(desc, &walk, nbytes);
281 }
282 kernel_fpu_end();
283
284 return err;
285}
286
54b6a1bd
HY
287static int cbc_encrypt(struct blkcipher_desc *desc,
288 struct scatterlist *dst, struct scatterlist *src,
289 unsigned int nbytes)
290{
291 struct crypto_aes_ctx *ctx = aes_ctx(crypto_blkcipher_ctx(desc->tfm));
292 struct blkcipher_walk walk;
293 int err;
294
295 blkcipher_walk_init(&walk, dst, src, nbytes);
296 err = blkcipher_walk_virt(desc, &walk);
9251b64f 297 desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
54b6a1bd
HY
298
299 kernel_fpu_begin();
300 while ((nbytes = walk.nbytes)) {
301 aesni_cbc_enc(ctx, walk.dst.virt.addr, walk.src.virt.addr,
302 nbytes & AES_BLOCK_MASK, walk.iv);
303 nbytes &= AES_BLOCK_SIZE - 1;
304 err = blkcipher_walk_done(desc, &walk, nbytes);
305 }
306 kernel_fpu_end();
307
308 return err;
309}
310
311static int cbc_decrypt(struct blkcipher_desc *desc,
312 struct scatterlist *dst, struct scatterlist *src,
313 unsigned int nbytes)
314{
315 struct crypto_aes_ctx *ctx = aes_ctx(crypto_blkcipher_ctx(desc->tfm));
316 struct blkcipher_walk walk;
317 int err;
318
319 blkcipher_walk_init(&walk, dst, src, nbytes);
320 err = blkcipher_walk_virt(desc, &walk);
9251b64f 321 desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
54b6a1bd
HY
322
323 kernel_fpu_begin();
324 while ((nbytes = walk.nbytes)) {
325 aesni_cbc_dec(ctx, walk.dst.virt.addr, walk.src.virt.addr,
326 nbytes & AES_BLOCK_MASK, walk.iv);
327 nbytes &= AES_BLOCK_SIZE - 1;
328 err = blkcipher_walk_done(desc, &walk, nbytes);
329 }
330 kernel_fpu_end();
331
332 return err;
333}
334
0d258efb 335#ifdef CONFIG_X86_64
12387a46
HY
336static void ctr_crypt_final(struct crypto_aes_ctx *ctx,
337 struct blkcipher_walk *walk)
338{
339 u8 *ctrblk = walk->iv;
340 u8 keystream[AES_BLOCK_SIZE];
341 u8 *src = walk->src.virt.addr;
342 u8 *dst = walk->dst.virt.addr;
343 unsigned int nbytes = walk->nbytes;
344
345 aesni_enc(ctx, keystream, ctrblk);
346 crypto_xor(keystream, src, nbytes);
347 memcpy(dst, keystream, nbytes);
348 crypto_inc(ctrblk, AES_BLOCK_SIZE);
349}
350
351static int ctr_crypt(struct blkcipher_desc *desc,
352 struct scatterlist *dst, struct scatterlist *src,
353 unsigned int nbytes)
354{
355 struct crypto_aes_ctx *ctx = aes_ctx(crypto_blkcipher_ctx(desc->tfm));
356 struct blkcipher_walk walk;
357 int err;
358
359 blkcipher_walk_init(&walk, dst, src, nbytes);
360 err = blkcipher_walk_virt_block(desc, &walk, AES_BLOCK_SIZE);
361 desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
362
363 kernel_fpu_begin();
364 while ((nbytes = walk.nbytes) >= AES_BLOCK_SIZE) {
365 aesni_ctr_enc(ctx, walk.dst.virt.addr, walk.src.virt.addr,
366 nbytes & AES_BLOCK_MASK, walk.iv);
367 nbytes &= AES_BLOCK_SIZE - 1;
368 err = blkcipher_walk_done(desc, &walk, nbytes);
369 }
370 if (walk.nbytes) {
371 ctr_crypt_final(ctx, &walk);
372 err = blkcipher_walk_done(desc, &walk, 0);
373 }
374 kernel_fpu_end();
375
376 return err;
377}
0d258efb 378#endif
12387a46 379
54b6a1bd
HY
380static int ablk_set_key(struct crypto_ablkcipher *tfm, const u8 *key,
381 unsigned int key_len)
382{
383 struct async_aes_ctx *ctx = crypto_ablkcipher_ctx(tfm);
2cf4ac8b
HY
384 struct crypto_ablkcipher *child = &ctx->cryptd_tfm->base;
385 int err;
54b6a1bd 386
2cf4ac8b
HY
387 crypto_ablkcipher_clear_flags(child, CRYPTO_TFM_REQ_MASK);
388 crypto_ablkcipher_set_flags(child, crypto_ablkcipher_get_flags(tfm)
389 & CRYPTO_TFM_REQ_MASK);
390 err = crypto_ablkcipher_setkey(child, key, key_len);
391 crypto_ablkcipher_set_flags(tfm, crypto_ablkcipher_get_flags(child)
392 & CRYPTO_TFM_RES_MASK);
393 return err;
54b6a1bd
HY
394}
395
396static int ablk_encrypt(struct ablkcipher_request *req)
397{
398 struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
399 struct async_aes_ctx *ctx = crypto_ablkcipher_ctx(tfm);
400
13b79b97 401 if (!irq_fpu_usable()) {
54b6a1bd
HY
402 struct ablkcipher_request *cryptd_req =
403 ablkcipher_request_ctx(req);
404 memcpy(cryptd_req, req, sizeof(*req));
405 ablkcipher_request_set_tfm(cryptd_req, &ctx->cryptd_tfm->base);
406 return crypto_ablkcipher_encrypt(cryptd_req);
407 } else {
408 struct blkcipher_desc desc;
409 desc.tfm = cryptd_ablkcipher_child(ctx->cryptd_tfm);
410 desc.info = req->info;
411 desc.flags = 0;
412 return crypto_blkcipher_crt(desc.tfm)->encrypt(
413 &desc, req->dst, req->src, req->nbytes);
414 }
415}
416
417static int ablk_decrypt(struct ablkcipher_request *req)
418{
419 struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
420 struct async_aes_ctx *ctx = crypto_ablkcipher_ctx(tfm);
421
13b79b97 422 if (!irq_fpu_usable()) {
54b6a1bd
HY
423 struct ablkcipher_request *cryptd_req =
424 ablkcipher_request_ctx(req);
425 memcpy(cryptd_req, req, sizeof(*req));
426 ablkcipher_request_set_tfm(cryptd_req, &ctx->cryptd_tfm->base);
427 return crypto_ablkcipher_decrypt(cryptd_req);
428 } else {
429 struct blkcipher_desc desc;
430 desc.tfm = cryptd_ablkcipher_child(ctx->cryptd_tfm);
431 desc.info = req->info;
432 desc.flags = 0;
433 return crypto_blkcipher_crt(desc.tfm)->decrypt(
434 &desc, req->dst, req->src, req->nbytes);
435 }
436}
437
438static void ablk_exit(struct crypto_tfm *tfm)
439{
440 struct async_aes_ctx *ctx = crypto_tfm_ctx(tfm);
441
442 cryptd_free_ablkcipher(ctx->cryptd_tfm);
443}
444
445static void ablk_init_common(struct crypto_tfm *tfm,
446 struct cryptd_ablkcipher *cryptd_tfm)
447{
448 struct async_aes_ctx *ctx = crypto_tfm_ctx(tfm);
449
450 ctx->cryptd_tfm = cryptd_tfm;
451 tfm->crt_ablkcipher.reqsize = sizeof(struct ablkcipher_request) +
452 crypto_ablkcipher_reqsize(&cryptd_tfm->base);
453}
454
455static int ablk_ecb_init(struct crypto_tfm *tfm)
456{
457 struct cryptd_ablkcipher *cryptd_tfm;
458
459 cryptd_tfm = cryptd_alloc_ablkcipher("__driver-ecb-aes-aesni", 0, 0);
460 if (IS_ERR(cryptd_tfm))
461 return PTR_ERR(cryptd_tfm);
462 ablk_init_common(tfm, cryptd_tfm);
463 return 0;
464}
465
54b6a1bd
HY
466static int ablk_cbc_init(struct crypto_tfm *tfm)
467{
468 struct cryptd_ablkcipher *cryptd_tfm;
469
470 cryptd_tfm = cryptd_alloc_ablkcipher("__driver-cbc-aes-aesni", 0, 0);
471 if (IS_ERR(cryptd_tfm))
472 return PTR_ERR(cryptd_tfm);
473 ablk_init_common(tfm, cryptd_tfm);
474 return 0;
475}
476
0d258efb 477#ifdef CONFIG_X86_64
2cf4ac8b
HY
478static int ablk_ctr_init(struct crypto_tfm *tfm)
479{
480 struct cryptd_ablkcipher *cryptd_tfm;
481
12387a46 482 cryptd_tfm = cryptd_alloc_ablkcipher("__driver-ctr-aes-aesni", 0, 0);
2cf4ac8b
HY
483 if (IS_ERR(cryptd_tfm))
484 return PTR_ERR(cryptd_tfm);
485 ablk_init_common(tfm, cryptd_tfm);
486 return 0;
487}
488
12387a46
HY
489#ifdef HAS_CTR
490static int ablk_rfc3686_ctr_init(struct crypto_tfm *tfm)
491{
492 struct cryptd_ablkcipher *cryptd_tfm;
493
494 cryptd_tfm = cryptd_alloc_ablkcipher(
495 "rfc3686(__driver-ctr-aes-aesni)", 0, 0);
496 if (IS_ERR(cryptd_tfm))
497 return PTR_ERR(cryptd_tfm);
498 ablk_init_common(tfm, cryptd_tfm);
499 return 0;
500}
2cf4ac8b 501#endif
0d258efb 502#endif
2cf4ac8b
HY
503
504#ifdef HAS_LRW
505static int ablk_lrw_init(struct crypto_tfm *tfm)
506{
507 struct cryptd_ablkcipher *cryptd_tfm;
508
509 cryptd_tfm = cryptd_alloc_ablkcipher("fpu(lrw(__driver-aes-aesni))",
510 0, 0);
511 if (IS_ERR(cryptd_tfm))
512 return PTR_ERR(cryptd_tfm);
513 ablk_init_common(tfm, cryptd_tfm);
514 return 0;
515}
2cf4ac8b
HY
516#endif
517
518#ifdef HAS_PCBC
519static int ablk_pcbc_init(struct crypto_tfm *tfm)
520{
521 struct cryptd_ablkcipher *cryptd_tfm;
522
523 cryptd_tfm = cryptd_alloc_ablkcipher("fpu(pcbc(__driver-aes-aesni))",
524 0, 0);
525 if (IS_ERR(cryptd_tfm))
526 return PTR_ERR(cryptd_tfm);
527 ablk_init_common(tfm, cryptd_tfm);
528 return 0;
529}
2cf4ac8b
HY
530#endif
531
532#ifdef HAS_XTS
533static int ablk_xts_init(struct crypto_tfm *tfm)
534{
535 struct cryptd_ablkcipher *cryptd_tfm;
536
537 cryptd_tfm = cryptd_alloc_ablkcipher("fpu(xts(__driver-aes-aesni))",
538 0, 0);
539 if (IS_ERR(cryptd_tfm))
540 return PTR_ERR(cryptd_tfm);
541 ablk_init_common(tfm, cryptd_tfm);
542 return 0;
543}
2cf4ac8b
HY
544#endif
545
559ad0ff 546#ifdef CONFIG_X86_64
0bd82f5f
TS
547static int rfc4106_init(struct crypto_tfm *tfm)
548{
549 struct cryptd_aead *cryptd_tfm;
550 struct aesni_rfc4106_gcm_ctx *ctx = (struct aesni_rfc4106_gcm_ctx *)
551 PTR_ALIGN((u8 *)crypto_tfm_ctx(tfm), AESNI_ALIGN);
60af520c
TS
552 struct crypto_aead *cryptd_child;
553 struct aesni_rfc4106_gcm_ctx *child_ctx;
0bd82f5f
TS
554 cryptd_tfm = cryptd_alloc_aead("__driver-gcm-aes-aesni", 0, 0);
555 if (IS_ERR(cryptd_tfm))
556 return PTR_ERR(cryptd_tfm);
60af520c
TS
557
558 cryptd_child = cryptd_aead_child(cryptd_tfm);
559 child_ctx = aesni_rfc4106_gcm_ctx_get(cryptd_child);
560 memcpy(child_ctx, ctx, sizeof(*ctx));
0bd82f5f
TS
561 ctx->cryptd_tfm = cryptd_tfm;
562 tfm->crt_aead.reqsize = sizeof(struct aead_request)
563 + crypto_aead_reqsize(&cryptd_tfm->base);
564 return 0;
565}
566
567static void rfc4106_exit(struct crypto_tfm *tfm)
568{
569 struct aesni_rfc4106_gcm_ctx *ctx =
570 (struct aesni_rfc4106_gcm_ctx *)
571 PTR_ALIGN((u8 *)crypto_tfm_ctx(tfm), AESNI_ALIGN);
572 if (!IS_ERR(ctx->cryptd_tfm))
573 cryptd_free_aead(ctx->cryptd_tfm);
574 return;
575}
576
577static void
578rfc4106_set_hash_subkey_done(struct crypto_async_request *req, int err)
579{
580 struct aesni_gcm_set_hash_subkey_result *result = req->data;
581
582 if (err == -EINPROGRESS)
583 return;
584 result->err = err;
585 complete(&result->completion);
586}
587
588static int
589rfc4106_set_hash_subkey(u8 *hash_subkey, const u8 *key, unsigned int key_len)
590{
591 struct crypto_ablkcipher *ctr_tfm;
592 struct ablkcipher_request *req;
593 int ret = -EINVAL;
594 struct aesni_hash_subkey_req_data *req_data;
595
596 ctr_tfm = crypto_alloc_ablkcipher("ctr(aes)", 0, 0);
597 if (IS_ERR(ctr_tfm))
598 return PTR_ERR(ctr_tfm);
599
600 crypto_ablkcipher_clear_flags(ctr_tfm, ~0);
601
602 ret = crypto_ablkcipher_setkey(ctr_tfm, key, key_len);
7efd95f6 603 if (ret)
fc9044e2 604 goto out_free_ablkcipher;
0bd82f5f 605
fc9044e2 606 ret = -ENOMEM;
0bd82f5f 607 req = ablkcipher_request_alloc(ctr_tfm, GFP_KERNEL);
fc9044e2 608 if (!req)
7efd95f6 609 goto out_free_ablkcipher;
0bd82f5f
TS
610
611 req_data = kmalloc(sizeof(*req_data), GFP_KERNEL);
fc9044e2 612 if (!req_data)
7efd95f6 613 goto out_free_request;
fc9044e2 614
0bd82f5f
TS
615 memset(req_data->iv, 0, sizeof(req_data->iv));
616
617 /* Clear the data in the hash sub key container to zero.*/
618 /* We want to cipher all zeros to create the hash sub key. */
619 memset(hash_subkey, 0, RFC4106_HASH_SUBKEY_SIZE);
620
621 init_completion(&req_data->result.completion);
622 sg_init_one(&req_data->sg, hash_subkey, RFC4106_HASH_SUBKEY_SIZE);
623 ablkcipher_request_set_tfm(req, ctr_tfm);
624 ablkcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_SLEEP |
625 CRYPTO_TFM_REQ_MAY_BACKLOG,
626 rfc4106_set_hash_subkey_done,
627 &req_data->result);
628
629 ablkcipher_request_set_crypt(req, &req_data->sg,
630 &req_data->sg, RFC4106_HASH_SUBKEY_SIZE, req_data->iv);
631
632 ret = crypto_ablkcipher_encrypt(req);
633 if (ret == -EINPROGRESS || ret == -EBUSY) {
634 ret = wait_for_completion_interruptible
635 (&req_data->result.completion);
636 if (!ret)
637 ret = req_data->result.err;
638 }
fc9044e2 639 kfree(req_data);
7efd95f6 640out_free_request:
0bd82f5f 641 ablkcipher_request_free(req);
7efd95f6 642out_free_ablkcipher:
0bd82f5f
TS
643 crypto_free_ablkcipher(ctr_tfm);
644 return ret;
645}
646
647static int rfc4106_set_key(struct crypto_aead *parent, const u8 *key,
648 unsigned int key_len)
649{
650 int ret = 0;
651 struct crypto_tfm *tfm = crypto_aead_tfm(parent);
652 struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(parent);
60af520c
TS
653 struct crypto_aead *cryptd_child = cryptd_aead_child(ctx->cryptd_tfm);
654 struct aesni_rfc4106_gcm_ctx *child_ctx =
655 aesni_rfc4106_gcm_ctx_get(cryptd_child);
0bd82f5f
TS
656 u8 *new_key_mem = NULL;
657
658 if (key_len < 4) {
659 crypto_tfm_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
660 return -EINVAL;
661 }
662 /*Account for 4 byte nonce at the end.*/
663 key_len -= 4;
664 if (key_len != AES_KEYSIZE_128) {
665 crypto_tfm_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
666 return -EINVAL;
667 }
668
669 memcpy(ctx->nonce, key + key_len, sizeof(ctx->nonce));
670 /*This must be on a 16 byte boundary!*/
671 if ((unsigned long)(&(ctx->aes_key_expanded.key_enc[0])) % AESNI_ALIGN)
672 return -EINVAL;
673
674 if ((unsigned long)key % AESNI_ALIGN) {
675 /*key is not aligned: use an auxuliar aligned pointer*/
676 new_key_mem = kmalloc(key_len+AESNI_ALIGN, GFP_KERNEL);
677 if (!new_key_mem)
678 return -ENOMEM;
679
680 new_key_mem = PTR_ALIGN(new_key_mem, AESNI_ALIGN);
681 memcpy(new_key_mem, key, key_len);
682 key = new_key_mem;
683 }
684
685 if (!irq_fpu_usable())
686 ret = crypto_aes_expand_key(&(ctx->aes_key_expanded),
687 key, key_len);
688 else {
689 kernel_fpu_begin();
690 ret = aesni_set_key(&(ctx->aes_key_expanded), key, key_len);
691 kernel_fpu_end();
692 }
693 /*This must be on a 16 byte boundary!*/
694 if ((unsigned long)(&(ctx->hash_subkey[0])) % AESNI_ALIGN) {
695 ret = -EINVAL;
696 goto exit;
697 }
698 ret = rfc4106_set_hash_subkey(ctx->hash_subkey, key, key_len);
60af520c 699 memcpy(child_ctx, ctx, sizeof(*ctx));
0bd82f5f
TS
700exit:
701 kfree(new_key_mem);
702 return ret;
703}
704
705/* This is the Integrity Check Value (aka the authentication tag length and can
706 * be 8, 12 or 16 bytes long. */
707static int rfc4106_set_authsize(struct crypto_aead *parent,
708 unsigned int authsize)
709{
710 struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(parent);
711 struct crypto_aead *cryptd_child = cryptd_aead_child(ctx->cryptd_tfm);
712
713 switch (authsize) {
714 case 8:
715 case 12:
716 case 16:
717 break;
718 default:
719 return -EINVAL;
720 }
721 crypto_aead_crt(parent)->authsize = authsize;
722 crypto_aead_crt(cryptd_child)->authsize = authsize;
723 return 0;
724}
725
726static int rfc4106_encrypt(struct aead_request *req)
727{
728 int ret;
729 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
730 struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(tfm);
0bd82f5f
TS
731
732 if (!irq_fpu_usable()) {
733 struct aead_request *cryptd_req =
734 (struct aead_request *) aead_request_ctx(req);
735 memcpy(cryptd_req, req, sizeof(*req));
736 aead_request_set_tfm(cryptd_req, &ctx->cryptd_tfm->base);
737 return crypto_aead_encrypt(cryptd_req);
738 } else {
60af520c 739 struct crypto_aead *cryptd_child = cryptd_aead_child(ctx->cryptd_tfm);
0bd82f5f
TS
740 kernel_fpu_begin();
741 ret = cryptd_child->base.crt_aead.encrypt(req);
742 kernel_fpu_end();
743 return ret;
744 }
745}
746
747static int rfc4106_decrypt(struct aead_request *req)
748{
749 int ret;
750 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
751 struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(tfm);
0bd82f5f
TS
752
753 if (!irq_fpu_usable()) {
754 struct aead_request *cryptd_req =
755 (struct aead_request *) aead_request_ctx(req);
756 memcpy(cryptd_req, req, sizeof(*req));
757 aead_request_set_tfm(cryptd_req, &ctx->cryptd_tfm->base);
758 return crypto_aead_decrypt(cryptd_req);
759 } else {
60af520c 760 struct crypto_aead *cryptd_child = cryptd_aead_child(ctx->cryptd_tfm);
0bd82f5f
TS
761 kernel_fpu_begin();
762 ret = cryptd_child->base.crt_aead.decrypt(req);
763 kernel_fpu_end();
764 return ret;
765 }
766}
767
0bd82f5f
TS
768static int __driver_rfc4106_encrypt(struct aead_request *req)
769{
770 u8 one_entry_in_sg = 0;
771 u8 *src, *dst, *assoc;
772 __be32 counter = cpu_to_be32(1);
773 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
774 struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(tfm);
775 void *aes_ctx = &(ctx->aes_key_expanded);
776 unsigned long auth_tag_len = crypto_aead_authsize(tfm);
777 u8 iv_tab[16+AESNI_ALIGN];
778 u8* iv = (u8 *) PTR_ALIGN((u8 *)iv_tab, AESNI_ALIGN);
779 struct scatter_walk src_sg_walk;
780 struct scatter_walk assoc_sg_walk;
781 struct scatter_walk dst_sg_walk;
782 unsigned int i;
783
784 /* Assuming we are supporting rfc4106 64-bit extended */
785 /* sequence numbers We need to have the AAD length equal */
786 /* to 8 or 12 bytes */
787 if (unlikely(req->assoclen != 8 && req->assoclen != 12))
788 return -EINVAL;
789 /* IV below built */
790 for (i = 0; i < 4; i++)
791 *(iv+i) = ctx->nonce[i];
792 for (i = 0; i < 8; i++)
793 *(iv+4+i) = req->iv[i];
794 *((__be32 *)(iv+12)) = counter;
795
796 if ((sg_is_last(req->src)) && (sg_is_last(req->assoc))) {
797 one_entry_in_sg = 1;
798 scatterwalk_start(&src_sg_walk, req->src);
799 scatterwalk_start(&assoc_sg_walk, req->assoc);
8fd75e12
CW
800 src = scatterwalk_map(&src_sg_walk);
801 assoc = scatterwalk_map(&assoc_sg_walk);
0bd82f5f
TS
802 dst = src;
803 if (unlikely(req->src != req->dst)) {
804 scatterwalk_start(&dst_sg_walk, req->dst);
8fd75e12 805 dst = scatterwalk_map(&dst_sg_walk);
0bd82f5f
TS
806 }
807
808 } else {
809 /* Allocate memory for src, dst, assoc */
810 src = kmalloc(req->cryptlen + auth_tag_len + req->assoclen,
811 GFP_ATOMIC);
812 if (unlikely(!src))
813 return -ENOMEM;
814 assoc = (src + req->cryptlen + auth_tag_len);
815 scatterwalk_map_and_copy(src, req->src, 0, req->cryptlen, 0);
816 scatterwalk_map_and_copy(assoc, req->assoc, 0,
817 req->assoclen, 0);
818 dst = src;
819 }
820
821 aesni_gcm_enc(aes_ctx, dst, src, (unsigned long)req->cryptlen, iv,
822 ctx->hash_subkey, assoc, (unsigned long)req->assoclen, dst
823 + ((unsigned long)req->cryptlen), auth_tag_len);
824
825 /* The authTag (aka the Integrity Check Value) needs to be written
826 * back to the packet. */
827 if (one_entry_in_sg) {
828 if (unlikely(req->src != req->dst)) {
8fd75e12 829 scatterwalk_unmap(dst);
0bd82f5f
TS
830 scatterwalk_done(&dst_sg_walk, 0, 0);
831 }
8fd75e12
CW
832 scatterwalk_unmap(src);
833 scatterwalk_unmap(assoc);
0bd82f5f
TS
834 scatterwalk_done(&src_sg_walk, 0, 0);
835 scatterwalk_done(&assoc_sg_walk, 0, 0);
836 } else {
837 scatterwalk_map_and_copy(dst, req->dst, 0,
838 req->cryptlen + auth_tag_len, 1);
839 kfree(src);
840 }
841 return 0;
842}
843
844static int __driver_rfc4106_decrypt(struct aead_request *req)
845{
846 u8 one_entry_in_sg = 0;
847 u8 *src, *dst, *assoc;
848 unsigned long tempCipherLen = 0;
849 __be32 counter = cpu_to_be32(1);
850 int retval = 0;
851 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
852 struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(tfm);
853 void *aes_ctx = &(ctx->aes_key_expanded);
854 unsigned long auth_tag_len = crypto_aead_authsize(tfm);
855 u8 iv_and_authTag[32+AESNI_ALIGN];
856 u8 *iv = (u8 *) PTR_ALIGN((u8 *)iv_and_authTag, AESNI_ALIGN);
857 u8 *authTag = iv + 16;
858 struct scatter_walk src_sg_walk;
859 struct scatter_walk assoc_sg_walk;
860 struct scatter_walk dst_sg_walk;
861 unsigned int i;
862
863 if (unlikely((req->cryptlen < auth_tag_len) ||
864 (req->assoclen != 8 && req->assoclen != 12)))
865 return -EINVAL;
866 /* Assuming we are supporting rfc4106 64-bit extended */
867 /* sequence numbers We need to have the AAD length */
868 /* equal to 8 or 12 bytes */
869
870 tempCipherLen = (unsigned long)(req->cryptlen - auth_tag_len);
871 /* IV below built */
872 for (i = 0; i < 4; i++)
873 *(iv+i) = ctx->nonce[i];
874 for (i = 0; i < 8; i++)
875 *(iv+4+i) = req->iv[i];
876 *((__be32 *)(iv+12)) = counter;
877
878 if ((sg_is_last(req->src)) && (sg_is_last(req->assoc))) {
879 one_entry_in_sg = 1;
880 scatterwalk_start(&src_sg_walk, req->src);
881 scatterwalk_start(&assoc_sg_walk, req->assoc);
8fd75e12
CW
882 src = scatterwalk_map(&src_sg_walk);
883 assoc = scatterwalk_map(&assoc_sg_walk);
0bd82f5f
TS
884 dst = src;
885 if (unlikely(req->src != req->dst)) {
886 scatterwalk_start(&dst_sg_walk, req->dst);
8fd75e12 887 dst = scatterwalk_map(&dst_sg_walk);
0bd82f5f
TS
888 }
889
890 } else {
891 /* Allocate memory for src, dst, assoc */
892 src = kmalloc(req->cryptlen + req->assoclen, GFP_ATOMIC);
893 if (!src)
894 return -ENOMEM;
895 assoc = (src + req->cryptlen + auth_tag_len);
896 scatterwalk_map_and_copy(src, req->src, 0, req->cryptlen, 0);
897 scatterwalk_map_and_copy(assoc, req->assoc, 0,
898 req->assoclen, 0);
899 dst = src;
900 }
901
902 aesni_gcm_dec(aes_ctx, dst, src, tempCipherLen, iv,
903 ctx->hash_subkey, assoc, (unsigned long)req->assoclen,
904 authTag, auth_tag_len);
905
906 /* Compare generated tag with passed in tag. */
907 retval = memcmp(src + tempCipherLen, authTag, auth_tag_len) ?
908 -EBADMSG : 0;
909
910 if (one_entry_in_sg) {
911 if (unlikely(req->src != req->dst)) {
8fd75e12 912 scatterwalk_unmap(dst);
0bd82f5f
TS
913 scatterwalk_done(&dst_sg_walk, 0, 0);
914 }
8fd75e12
CW
915 scatterwalk_unmap(src);
916 scatterwalk_unmap(assoc);
0bd82f5f
TS
917 scatterwalk_done(&src_sg_walk, 0, 0);
918 scatterwalk_done(&assoc_sg_walk, 0, 0);
919 } else {
920 scatterwalk_map_and_copy(dst, req->dst, 0, req->cryptlen, 1);
921 kfree(src);
922 }
923 return retval;
924}
fa46ccb8 925#endif
0bd82f5f 926
fa46ccb8
JK
927static struct crypto_alg aesni_algs[] = { {
928 .cra_name = "aes",
929 .cra_driver_name = "aes-aesni",
930 .cra_priority = 300,
931 .cra_flags = CRYPTO_ALG_TYPE_CIPHER,
932 .cra_blocksize = AES_BLOCK_SIZE,
933 .cra_ctxsize = sizeof(struct crypto_aes_ctx) +
934 AESNI_ALIGN - 1,
935 .cra_alignmask = 0,
936 .cra_module = THIS_MODULE,
937 .cra_u = {
938 .cipher = {
939 .cia_min_keysize = AES_MIN_KEY_SIZE,
940 .cia_max_keysize = AES_MAX_KEY_SIZE,
941 .cia_setkey = aes_set_key,
942 .cia_encrypt = aes_encrypt,
943 .cia_decrypt = aes_decrypt
944 }
945 }
946}, {
947 .cra_name = "__aes-aesni",
948 .cra_driver_name = "__driver-aes-aesni",
949 .cra_priority = 0,
950 .cra_flags = CRYPTO_ALG_TYPE_CIPHER,
951 .cra_blocksize = AES_BLOCK_SIZE,
952 .cra_ctxsize = sizeof(struct crypto_aes_ctx) +
953 AESNI_ALIGN - 1,
954 .cra_alignmask = 0,
955 .cra_module = THIS_MODULE,
956 .cra_u = {
957 .cipher = {
958 .cia_min_keysize = AES_MIN_KEY_SIZE,
959 .cia_max_keysize = AES_MAX_KEY_SIZE,
960 .cia_setkey = aes_set_key,
961 .cia_encrypt = __aes_encrypt,
962 .cia_decrypt = __aes_decrypt
963 }
964 }
965}, {
966 .cra_name = "__ecb-aes-aesni",
967 .cra_driver_name = "__driver-ecb-aes-aesni",
968 .cra_priority = 0,
969 .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
970 .cra_blocksize = AES_BLOCK_SIZE,
971 .cra_ctxsize = sizeof(struct crypto_aes_ctx) +
972 AESNI_ALIGN - 1,
973 .cra_alignmask = 0,
974 .cra_type = &crypto_blkcipher_type,
975 .cra_module = THIS_MODULE,
976 .cra_u = {
977 .blkcipher = {
978 .min_keysize = AES_MIN_KEY_SIZE,
979 .max_keysize = AES_MAX_KEY_SIZE,
980 .setkey = aes_set_key,
981 .encrypt = ecb_encrypt,
982 .decrypt = ecb_decrypt,
983 },
984 },
985}, {
986 .cra_name = "__cbc-aes-aesni",
987 .cra_driver_name = "__driver-cbc-aes-aesni",
988 .cra_priority = 0,
989 .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
990 .cra_blocksize = AES_BLOCK_SIZE,
991 .cra_ctxsize = sizeof(struct crypto_aes_ctx) +
992 AESNI_ALIGN - 1,
993 .cra_alignmask = 0,
994 .cra_type = &crypto_blkcipher_type,
995 .cra_module = THIS_MODULE,
996 .cra_u = {
997 .blkcipher = {
998 .min_keysize = AES_MIN_KEY_SIZE,
999 .max_keysize = AES_MAX_KEY_SIZE,
1000 .setkey = aes_set_key,
1001 .encrypt = cbc_encrypt,
1002 .decrypt = cbc_decrypt,
1003 },
1004 },
1005}, {
1006 .cra_name = "ecb(aes)",
1007 .cra_driver_name = "ecb-aes-aesni",
1008 .cra_priority = 400,
1009 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
1010 .cra_blocksize = AES_BLOCK_SIZE,
1011 .cra_ctxsize = sizeof(struct async_aes_ctx),
1012 .cra_alignmask = 0,
1013 .cra_type = &crypto_ablkcipher_type,
1014 .cra_module = THIS_MODULE,
1015 .cra_init = ablk_ecb_init,
1016 .cra_exit = ablk_exit,
1017 .cra_u = {
1018 .ablkcipher = {
1019 .min_keysize = AES_MIN_KEY_SIZE,
1020 .max_keysize = AES_MAX_KEY_SIZE,
1021 .setkey = ablk_set_key,
1022 .encrypt = ablk_encrypt,
1023 .decrypt = ablk_decrypt,
1024 },
1025 },
1026}, {
1027 .cra_name = "cbc(aes)",
1028 .cra_driver_name = "cbc-aes-aesni",
1029 .cra_priority = 400,
1030 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
1031 .cra_blocksize = AES_BLOCK_SIZE,
1032 .cra_ctxsize = sizeof(struct async_aes_ctx),
1033 .cra_alignmask = 0,
1034 .cra_type = &crypto_ablkcipher_type,
1035 .cra_module = THIS_MODULE,
1036 .cra_init = ablk_cbc_init,
1037 .cra_exit = ablk_exit,
1038 .cra_u = {
1039 .ablkcipher = {
1040 .min_keysize = AES_MIN_KEY_SIZE,
1041 .max_keysize = AES_MAX_KEY_SIZE,
1042 .ivsize = AES_BLOCK_SIZE,
1043 .setkey = ablk_set_key,
1044 .encrypt = ablk_encrypt,
1045 .decrypt = ablk_decrypt,
1046 },
1047 },
1048#ifdef CONFIG_X86_64
1049}, {
1050 .cra_name = "__ctr-aes-aesni",
1051 .cra_driver_name = "__driver-ctr-aes-aesni",
1052 .cra_priority = 0,
1053 .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
1054 .cra_blocksize = 1,
1055 .cra_ctxsize = sizeof(struct crypto_aes_ctx) +
1056 AESNI_ALIGN - 1,
1057 .cra_alignmask = 0,
1058 .cra_type = &crypto_blkcipher_type,
1059 .cra_module = THIS_MODULE,
1060 .cra_u = {
1061 .blkcipher = {
1062 .min_keysize = AES_MIN_KEY_SIZE,
1063 .max_keysize = AES_MAX_KEY_SIZE,
1064 .ivsize = AES_BLOCK_SIZE,
1065 .setkey = aes_set_key,
1066 .encrypt = ctr_crypt,
1067 .decrypt = ctr_crypt,
1068 },
1069 },
1070}, {
1071 .cra_name = "ctr(aes)",
1072 .cra_driver_name = "ctr-aes-aesni",
1073 .cra_priority = 400,
1074 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
1075 .cra_blocksize = 1,
1076 .cra_ctxsize = sizeof(struct async_aes_ctx),
1077 .cra_alignmask = 0,
1078 .cra_type = &crypto_ablkcipher_type,
1079 .cra_module = THIS_MODULE,
1080 .cra_init = ablk_ctr_init,
1081 .cra_exit = ablk_exit,
1082 .cra_u = {
1083 .ablkcipher = {
1084 .min_keysize = AES_MIN_KEY_SIZE,
1085 .max_keysize = AES_MAX_KEY_SIZE,
1086 .ivsize = AES_BLOCK_SIZE,
1087 .setkey = ablk_set_key,
1088 .encrypt = ablk_encrypt,
1089 .decrypt = ablk_encrypt,
1090 .geniv = "chainiv",
1091 },
1092 },
1093}, {
1094 .cra_name = "__gcm-aes-aesni",
1095 .cra_driver_name = "__driver-gcm-aes-aesni",
0bd82f5f
TS
1096 .cra_priority = 0,
1097 .cra_flags = CRYPTO_ALG_TYPE_AEAD,
1098 .cra_blocksize = 1,
fa46ccb8
JK
1099 .cra_ctxsize = sizeof(struct aesni_rfc4106_gcm_ctx) +
1100 AESNI_ALIGN,
0bd82f5f
TS
1101 .cra_alignmask = 0,
1102 .cra_type = &crypto_aead_type,
1103 .cra_module = THIS_MODULE,
0bd82f5f
TS
1104 .cra_u = {
1105 .aead = {
1106 .encrypt = __driver_rfc4106_encrypt,
1107 .decrypt = __driver_rfc4106_decrypt,
1108 },
1109 },
fa46ccb8
JK
1110}, {
1111 .cra_name = "rfc4106(gcm(aes))",
1112 .cra_driver_name = "rfc4106-gcm-aesni",
1113 .cra_priority = 400,
1114 .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
1115 .cra_blocksize = 1,
1116 .cra_ctxsize = sizeof(struct aesni_rfc4106_gcm_ctx) +
1117 AESNI_ALIGN,
1118 .cra_alignmask = 0,
1119 .cra_type = &crypto_nivaead_type,
1120 .cra_module = THIS_MODULE,
1121 .cra_init = rfc4106_init,
1122 .cra_exit = rfc4106_exit,
1123 .cra_u = {
1124 .aead = {
1125 .setkey = rfc4106_set_key,
1126 .setauthsize = rfc4106_set_authsize,
1127 .encrypt = rfc4106_encrypt,
1128 .decrypt = rfc4106_decrypt,
1129 .geniv = "seqiv",
1130 .ivsize = 8,
1131 .maxauthsize = 16,
1132 },
1133 },
1134#ifdef HAS_CTR
1135}, {
1136 .cra_name = "rfc3686(ctr(aes))",
1137 .cra_driver_name = "rfc3686-ctr-aes-aesni",
1138 .cra_priority = 400,
1139 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
1140 .cra_blocksize = 1,
1141 .cra_ctxsize = sizeof(struct async_aes_ctx),
1142 .cra_alignmask = 0,
1143 .cra_type = &crypto_ablkcipher_type,
1144 .cra_module = THIS_MODULE,
1145 .cra_init = ablk_rfc3686_ctr_init,
1146 .cra_exit = ablk_exit,
1147 .cra_u = {
1148 .ablkcipher = {
1149 .min_keysize = AES_MIN_KEY_SIZE +
1150 CTR_RFC3686_NONCE_SIZE,
1151 .max_keysize = AES_MAX_KEY_SIZE +
1152 CTR_RFC3686_NONCE_SIZE,
1153 .ivsize = CTR_RFC3686_IV_SIZE,
1154 .setkey = ablk_set_key,
1155 .encrypt = ablk_encrypt,
1156 .decrypt = ablk_decrypt,
1157 .geniv = "seqiv",
1158 },
1159 },
1160#endif
1161#endif
1162#ifdef HAS_LRW
1163}, {
1164 .cra_name = "lrw(aes)",
1165 .cra_driver_name = "lrw-aes-aesni",
1166 .cra_priority = 400,
1167 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
1168 .cra_blocksize = AES_BLOCK_SIZE,
1169 .cra_ctxsize = sizeof(struct async_aes_ctx),
1170 .cra_alignmask = 0,
1171 .cra_type = &crypto_ablkcipher_type,
1172 .cra_module = THIS_MODULE,
1173 .cra_init = ablk_lrw_init,
1174 .cra_exit = ablk_exit,
1175 .cra_u = {
1176 .ablkcipher = {
1177 .min_keysize = AES_MIN_KEY_SIZE + AES_BLOCK_SIZE,
1178 .max_keysize = AES_MAX_KEY_SIZE + AES_BLOCK_SIZE,
1179 .ivsize = AES_BLOCK_SIZE,
1180 .setkey = ablk_set_key,
1181 .encrypt = ablk_encrypt,
1182 .decrypt = ablk_decrypt,
1183 },
1184 },
1185#endif
1186#ifdef HAS_PCBC
1187}, {
1188 .cra_name = "pcbc(aes)",
1189 .cra_driver_name = "pcbc-aes-aesni",
1190 .cra_priority = 400,
1191 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
1192 .cra_blocksize = AES_BLOCK_SIZE,
1193 .cra_ctxsize = sizeof(struct async_aes_ctx),
1194 .cra_alignmask = 0,
1195 .cra_type = &crypto_ablkcipher_type,
1196 .cra_module = THIS_MODULE,
1197 .cra_init = ablk_pcbc_init,
1198 .cra_exit = ablk_exit,
1199 .cra_u = {
1200 .ablkcipher = {
1201 .min_keysize = AES_MIN_KEY_SIZE,
1202 .max_keysize = AES_MAX_KEY_SIZE,
1203 .ivsize = AES_BLOCK_SIZE,
1204 .setkey = ablk_set_key,
1205 .encrypt = ablk_encrypt,
1206 .decrypt = ablk_decrypt,
1207 },
1208 },
1209#endif
1210#ifdef HAS_XTS
1211}, {
1212 .cra_name = "xts(aes)",
1213 .cra_driver_name = "xts-aes-aesni",
1214 .cra_priority = 400,
1215 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
1216 .cra_blocksize = AES_BLOCK_SIZE,
1217 .cra_ctxsize = sizeof(struct async_aes_ctx),
1218 .cra_alignmask = 0,
1219 .cra_type = &crypto_ablkcipher_type,
1220 .cra_module = THIS_MODULE,
1221 .cra_init = ablk_xts_init,
1222 .cra_exit = ablk_exit,
1223 .cra_u = {
1224 .ablkcipher = {
1225 .min_keysize = 2 * AES_MIN_KEY_SIZE,
1226 .max_keysize = 2 * AES_MAX_KEY_SIZE,
1227 .ivsize = AES_BLOCK_SIZE,
1228 .setkey = ablk_set_key,
1229 .encrypt = ablk_encrypt,
1230 .decrypt = ablk_decrypt,
1231 },
1232 },
559ad0ff 1233#endif
fa46ccb8 1234} };
0bd82f5f 1235
3bd391f0
AK
1236
1237static const struct x86_cpu_id aesni_cpu_id[] = {
1238 X86_FEATURE_MATCH(X86_FEATURE_AES),
1239 {}
1240};
1241MODULE_DEVICE_TABLE(x86cpu, aesni_cpu_id);
1242
54b6a1bd
HY
1243static int __init aesni_init(void)
1244{
fa46ccb8 1245 int err, i;
54b6a1bd 1246
3bd391f0 1247 if (!x86_match_cpu(aesni_cpu_id))
54b6a1bd 1248 return -ENODEV;
0bd82f5f 1249
fa46ccb8
JK
1250 err = crypto_fpu_init();
1251 if (err)
1252 return err;
54b6a1bd 1253
fa46ccb8
JK
1254 for (i = 0; i < ARRAY_SIZE(aesni_algs); i++)
1255 INIT_LIST_HEAD(&aesni_algs[i].cra_list);
1256
1257 return crypto_register_algs(aesni_algs, ARRAY_SIZE(aesni_algs));
54b6a1bd
HY
1258}
1259
1260static void __exit aesni_exit(void)
1261{
fa46ccb8 1262 crypto_unregister_algs(aesni_algs, ARRAY_SIZE(aesni_algs));
b23b6451
AL
1263
1264 crypto_fpu_exit();
54b6a1bd
HY
1265}
1266
1267module_init(aesni_init);
1268module_exit(aesni_exit);
1269
1270MODULE_DESCRIPTION("Rijndael (AES) Cipher Algorithm, Intel AES-NI instructions optimized");
1271MODULE_LICENSE("GPL");
1272MODULE_ALIAS("aes");
This page took 0.195878 seconds and 5 git commands to generate.