crypto: twofish-avx - change to use shared ablk_* functions
[deliverable/linux.git] / arch / x86 / crypto / aesni-intel_glue.c
CommitLineData
54b6a1bd
HY
1/*
2 * Support for Intel AES-NI instructions. This file contains glue
3 * code, the real AES implementation is in intel-aes_asm.S.
4 *
5 * Copyright (C) 2008, Intel Corp.
6 * Author: Huang Ying <ying.huang@intel.com>
7 *
0bd82f5f
TS
8 * Added RFC4106 AES-GCM support for 128-bit keys under the AEAD
9 * interface for 64-bit kernels.
10 * Authors: Adrian Hoban <adrian.hoban@intel.com>
11 * Gabriele Paoloni <gabriele.paoloni@intel.com>
12 * Tadeusz Struk (tadeusz.struk@intel.com)
13 * Aidan O'Mahony (aidan.o.mahony@intel.com)
14 * Copyright (c) 2010, Intel Corporation.
15 *
54b6a1bd
HY
16 * This program is free software; you can redistribute it and/or modify
17 * it under the terms of the GNU General Public License as published by
18 * the Free Software Foundation; either version 2 of the License, or
19 * (at your option) any later version.
20 */
21
22#include <linux/hardirq.h>
23#include <linux/types.h>
24#include <linux/crypto.h>
7c52d551 25#include <linux/module.h>
54b6a1bd
HY
26#include <linux/err.h>
27#include <crypto/algapi.h>
28#include <crypto/aes.h>
29#include <crypto/cryptd.h>
12387a46 30#include <crypto/ctr.h>
3bd391f0 31#include <asm/cpu_device_id.h>
54b6a1bd
HY
32#include <asm/i387.h>
33#include <asm/aes.h>
0bd82f5f
TS
34#include <crypto/scatterwalk.h>
35#include <crypto/internal/aead.h>
36#include <linux/workqueue.h>
37#include <linux/spinlock.h>
54b6a1bd 38
2cf4ac8b
HY
39#if defined(CONFIG_CRYPTO_CTR) || defined(CONFIG_CRYPTO_CTR_MODULE)
40#define HAS_CTR
41#endif
42
43#if defined(CONFIG_CRYPTO_LRW) || defined(CONFIG_CRYPTO_LRW_MODULE)
44#define HAS_LRW
45#endif
46
47#if defined(CONFIG_CRYPTO_PCBC) || defined(CONFIG_CRYPTO_PCBC_MODULE)
48#define HAS_PCBC
49#endif
50
51#if defined(CONFIG_CRYPTO_XTS) || defined(CONFIG_CRYPTO_XTS_MODULE)
52#define HAS_XTS
53#endif
54
54b6a1bd
HY
55struct async_aes_ctx {
56 struct cryptd_ablkcipher *cryptd_tfm;
57};
58
0bd82f5f
TS
59/* This data is stored at the end of the crypto_tfm struct.
60 * It's a type of per "session" data storage location.
61 * This needs to be 16 byte aligned.
62 */
63struct aesni_rfc4106_gcm_ctx {
64 u8 hash_subkey[16];
65 struct crypto_aes_ctx aes_key_expanded;
66 u8 nonce[4];
67 struct cryptd_aead *cryptd_tfm;
68};
69
70struct aesni_gcm_set_hash_subkey_result {
71 int err;
72 struct completion completion;
73};
74
75struct aesni_hash_subkey_req_data {
76 u8 iv[16];
77 struct aesni_gcm_set_hash_subkey_result result;
78 struct scatterlist sg;
79};
80
81#define AESNI_ALIGN (16)
54b6a1bd 82#define AES_BLOCK_MASK (~(AES_BLOCK_SIZE-1))
0bd82f5f 83#define RFC4106_HASH_SUBKEY_SIZE 16
54b6a1bd
HY
84
85asmlinkage int aesni_set_key(struct crypto_aes_ctx *ctx, const u8 *in_key,
86 unsigned int key_len);
87asmlinkage void aesni_enc(struct crypto_aes_ctx *ctx, u8 *out,
88 const u8 *in);
89asmlinkage void aesni_dec(struct crypto_aes_ctx *ctx, u8 *out,
90 const u8 *in);
91asmlinkage void aesni_ecb_enc(struct crypto_aes_ctx *ctx, u8 *out,
92 const u8 *in, unsigned int len);
93asmlinkage void aesni_ecb_dec(struct crypto_aes_ctx *ctx, u8 *out,
94 const u8 *in, unsigned int len);
95asmlinkage void aesni_cbc_enc(struct crypto_aes_ctx *ctx, u8 *out,
96 const u8 *in, unsigned int len, u8 *iv);
97asmlinkage void aesni_cbc_dec(struct crypto_aes_ctx *ctx, u8 *out,
98 const u8 *in, unsigned int len, u8 *iv);
9bed4aca
RD
99
100int crypto_fpu_init(void);
101void crypto_fpu_exit(void);
102
0d258efb 103#ifdef CONFIG_X86_64
12387a46
HY
104asmlinkage void aesni_ctr_enc(struct crypto_aes_ctx *ctx, u8 *out,
105 const u8 *in, unsigned int len, u8 *iv);
54b6a1bd 106
0bd82f5f
TS
107/* asmlinkage void aesni_gcm_enc()
108 * void *ctx, AES Key schedule. Starts on a 16 byte boundary.
109 * u8 *out, Ciphertext output. Encrypt in-place is allowed.
110 * const u8 *in, Plaintext input
111 * unsigned long plaintext_len, Length of data in bytes for encryption.
112 * u8 *iv, Pre-counter block j0: 4 byte salt (from Security Association)
113 * concatenated with 8 byte Initialisation Vector (from IPSec ESP
114 * Payload) concatenated with 0x00000001. 16-byte aligned pointer.
115 * u8 *hash_subkey, the Hash sub key input. Data starts on a 16-byte boundary.
116 * const u8 *aad, Additional Authentication Data (AAD)
117 * unsigned long aad_len, Length of AAD in bytes. With RFC4106 this
118 * is going to be 8 or 12 bytes
119 * u8 *auth_tag, Authenticated Tag output.
120 * unsigned long auth_tag_len), Authenticated Tag Length in bytes.
121 * Valid values are 16 (most likely), 12 or 8.
122 */
123asmlinkage void aesni_gcm_enc(void *ctx, u8 *out,
124 const u8 *in, unsigned long plaintext_len, u8 *iv,
125 u8 *hash_subkey, const u8 *aad, unsigned long aad_len,
126 u8 *auth_tag, unsigned long auth_tag_len);
127
128/* asmlinkage void aesni_gcm_dec()
129 * void *ctx, AES Key schedule. Starts on a 16 byte boundary.
130 * u8 *out, Plaintext output. Decrypt in-place is allowed.
131 * const u8 *in, Ciphertext input
132 * unsigned long ciphertext_len, Length of data in bytes for decryption.
133 * u8 *iv, Pre-counter block j0: 4 byte salt (from Security Association)
134 * concatenated with 8 byte Initialisation Vector (from IPSec ESP
135 * Payload) concatenated with 0x00000001. 16-byte aligned pointer.
136 * u8 *hash_subkey, the Hash sub key input. Data starts on a 16-byte boundary.
137 * const u8 *aad, Additional Authentication Data (AAD)
138 * unsigned long aad_len, Length of AAD in bytes. With RFC4106 this is going
139 * to be 8 or 12 bytes
140 * u8 *auth_tag, Authenticated Tag output.
141 * unsigned long auth_tag_len) Authenticated Tag Length in bytes.
142 * Valid values are 16 (most likely), 12 or 8.
143 */
144asmlinkage void aesni_gcm_dec(void *ctx, u8 *out,
145 const u8 *in, unsigned long ciphertext_len, u8 *iv,
146 u8 *hash_subkey, const u8 *aad, unsigned long aad_len,
147 u8 *auth_tag, unsigned long auth_tag_len);
148
149static inline struct
150aesni_rfc4106_gcm_ctx *aesni_rfc4106_gcm_ctx_get(struct crypto_aead *tfm)
151{
152 return
153 (struct aesni_rfc4106_gcm_ctx *)
154 PTR_ALIGN((u8 *)
155 crypto_tfm_ctx(crypto_aead_tfm(tfm)), AESNI_ALIGN);
156}
559ad0ff 157#endif
0bd82f5f 158
54b6a1bd
HY
159static inline struct crypto_aes_ctx *aes_ctx(void *raw_ctx)
160{
161 unsigned long addr = (unsigned long)raw_ctx;
162 unsigned long align = AESNI_ALIGN;
163
164 if (align <= crypto_tfm_ctx_alignment())
165 align = 1;
166 return (struct crypto_aes_ctx *)ALIGN(addr, align);
167}
168
169static int aes_set_key_common(struct crypto_tfm *tfm, void *raw_ctx,
170 const u8 *in_key, unsigned int key_len)
171{
172 struct crypto_aes_ctx *ctx = aes_ctx(raw_ctx);
173 u32 *flags = &tfm->crt_flags;
174 int err;
175
176 if (key_len != AES_KEYSIZE_128 && key_len != AES_KEYSIZE_192 &&
177 key_len != AES_KEYSIZE_256) {
178 *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
179 return -EINVAL;
180 }
181
13b79b97 182 if (!irq_fpu_usable())
54b6a1bd
HY
183 err = crypto_aes_expand_key(ctx, in_key, key_len);
184 else {
185 kernel_fpu_begin();
186 err = aesni_set_key(ctx, in_key, key_len);
187 kernel_fpu_end();
188 }
189
190 return err;
191}
192
193static int aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
194 unsigned int key_len)
195{
196 return aes_set_key_common(tfm, crypto_tfm_ctx(tfm), in_key, key_len);
197}
198
199static void aes_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
200{
201 struct crypto_aes_ctx *ctx = aes_ctx(crypto_tfm_ctx(tfm));
202
13b79b97 203 if (!irq_fpu_usable())
54b6a1bd
HY
204 crypto_aes_encrypt_x86(ctx, dst, src);
205 else {
206 kernel_fpu_begin();
207 aesni_enc(ctx, dst, src);
208 kernel_fpu_end();
209 }
210}
211
212static void aes_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
213{
214 struct crypto_aes_ctx *ctx = aes_ctx(crypto_tfm_ctx(tfm));
215
13b79b97 216 if (!irq_fpu_usable())
54b6a1bd
HY
217 crypto_aes_decrypt_x86(ctx, dst, src);
218 else {
219 kernel_fpu_begin();
220 aesni_dec(ctx, dst, src);
221 kernel_fpu_end();
222 }
223}
224
2cf4ac8b
HY
225static void __aes_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
226{
227 struct crypto_aes_ctx *ctx = aes_ctx(crypto_tfm_ctx(tfm));
228
229 aesni_enc(ctx, dst, src);
230}
231
232static void __aes_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
233{
234 struct crypto_aes_ctx *ctx = aes_ctx(crypto_tfm_ctx(tfm));
235
236 aesni_dec(ctx, dst, src);
237}
238
54b6a1bd
HY
239static int ecb_encrypt(struct blkcipher_desc *desc,
240 struct scatterlist *dst, struct scatterlist *src,
241 unsigned int nbytes)
242{
243 struct crypto_aes_ctx *ctx = aes_ctx(crypto_blkcipher_ctx(desc->tfm));
244 struct blkcipher_walk walk;
245 int err;
246
247 blkcipher_walk_init(&walk, dst, src, nbytes);
248 err = blkcipher_walk_virt(desc, &walk);
9251b64f 249 desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
54b6a1bd
HY
250
251 kernel_fpu_begin();
252 while ((nbytes = walk.nbytes)) {
253 aesni_ecb_enc(ctx, walk.dst.virt.addr, walk.src.virt.addr,
254 nbytes & AES_BLOCK_MASK);
255 nbytes &= AES_BLOCK_SIZE - 1;
256 err = blkcipher_walk_done(desc, &walk, nbytes);
257 }
258 kernel_fpu_end();
259
260 return err;
261}
262
263static int ecb_decrypt(struct blkcipher_desc *desc,
264 struct scatterlist *dst, struct scatterlist *src,
265 unsigned int nbytes)
266{
267 struct crypto_aes_ctx *ctx = aes_ctx(crypto_blkcipher_ctx(desc->tfm));
268 struct blkcipher_walk walk;
269 int err;
270
271 blkcipher_walk_init(&walk, dst, src, nbytes);
272 err = blkcipher_walk_virt(desc, &walk);
9251b64f 273 desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
54b6a1bd
HY
274
275 kernel_fpu_begin();
276 while ((nbytes = walk.nbytes)) {
277 aesni_ecb_dec(ctx, walk.dst.virt.addr, walk.src.virt.addr,
278 nbytes & AES_BLOCK_MASK);
279 nbytes &= AES_BLOCK_SIZE - 1;
280 err = blkcipher_walk_done(desc, &walk, nbytes);
281 }
282 kernel_fpu_end();
283
284 return err;
285}
286
54b6a1bd
HY
287static int cbc_encrypt(struct blkcipher_desc *desc,
288 struct scatterlist *dst, struct scatterlist *src,
289 unsigned int nbytes)
290{
291 struct crypto_aes_ctx *ctx = aes_ctx(crypto_blkcipher_ctx(desc->tfm));
292 struct blkcipher_walk walk;
293 int err;
294
295 blkcipher_walk_init(&walk, dst, src, nbytes);
296 err = blkcipher_walk_virt(desc, &walk);
9251b64f 297 desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
54b6a1bd
HY
298
299 kernel_fpu_begin();
300 while ((nbytes = walk.nbytes)) {
301 aesni_cbc_enc(ctx, walk.dst.virt.addr, walk.src.virt.addr,
302 nbytes & AES_BLOCK_MASK, walk.iv);
303 nbytes &= AES_BLOCK_SIZE - 1;
304 err = blkcipher_walk_done(desc, &walk, nbytes);
305 }
306 kernel_fpu_end();
307
308 return err;
309}
310
311static int cbc_decrypt(struct blkcipher_desc *desc,
312 struct scatterlist *dst, struct scatterlist *src,
313 unsigned int nbytes)
314{
315 struct crypto_aes_ctx *ctx = aes_ctx(crypto_blkcipher_ctx(desc->tfm));
316 struct blkcipher_walk walk;
317 int err;
318
319 blkcipher_walk_init(&walk, dst, src, nbytes);
320 err = blkcipher_walk_virt(desc, &walk);
9251b64f 321 desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
54b6a1bd
HY
322
323 kernel_fpu_begin();
324 while ((nbytes = walk.nbytes)) {
325 aesni_cbc_dec(ctx, walk.dst.virt.addr, walk.src.virt.addr,
326 nbytes & AES_BLOCK_MASK, walk.iv);
327 nbytes &= AES_BLOCK_SIZE - 1;
328 err = blkcipher_walk_done(desc, &walk, nbytes);
329 }
330 kernel_fpu_end();
331
332 return err;
333}
334
0d258efb 335#ifdef CONFIG_X86_64
12387a46
HY
336static void ctr_crypt_final(struct crypto_aes_ctx *ctx,
337 struct blkcipher_walk *walk)
338{
339 u8 *ctrblk = walk->iv;
340 u8 keystream[AES_BLOCK_SIZE];
341 u8 *src = walk->src.virt.addr;
342 u8 *dst = walk->dst.virt.addr;
343 unsigned int nbytes = walk->nbytes;
344
345 aesni_enc(ctx, keystream, ctrblk);
346 crypto_xor(keystream, src, nbytes);
347 memcpy(dst, keystream, nbytes);
348 crypto_inc(ctrblk, AES_BLOCK_SIZE);
349}
350
351static int ctr_crypt(struct blkcipher_desc *desc,
352 struct scatterlist *dst, struct scatterlist *src,
353 unsigned int nbytes)
354{
355 struct crypto_aes_ctx *ctx = aes_ctx(crypto_blkcipher_ctx(desc->tfm));
356 struct blkcipher_walk walk;
357 int err;
358
359 blkcipher_walk_init(&walk, dst, src, nbytes);
360 err = blkcipher_walk_virt_block(desc, &walk, AES_BLOCK_SIZE);
361 desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
362
363 kernel_fpu_begin();
364 while ((nbytes = walk.nbytes) >= AES_BLOCK_SIZE) {
365 aesni_ctr_enc(ctx, walk.dst.virt.addr, walk.src.virt.addr,
366 nbytes & AES_BLOCK_MASK, walk.iv);
367 nbytes &= AES_BLOCK_SIZE - 1;
368 err = blkcipher_walk_done(desc, &walk, nbytes);
369 }
370 if (walk.nbytes) {
371 ctr_crypt_final(ctx, &walk);
372 err = blkcipher_walk_done(desc, &walk, 0);
373 }
374 kernel_fpu_end();
375
376 return err;
377}
0d258efb 378#endif
12387a46 379
54b6a1bd
HY
380static int ablk_set_key(struct crypto_ablkcipher *tfm, const u8 *key,
381 unsigned int key_len)
382{
383 struct async_aes_ctx *ctx = crypto_ablkcipher_ctx(tfm);
2cf4ac8b
HY
384 struct crypto_ablkcipher *child = &ctx->cryptd_tfm->base;
385 int err;
54b6a1bd 386
2cf4ac8b
HY
387 crypto_ablkcipher_clear_flags(child, CRYPTO_TFM_REQ_MASK);
388 crypto_ablkcipher_set_flags(child, crypto_ablkcipher_get_flags(tfm)
389 & CRYPTO_TFM_REQ_MASK);
390 err = crypto_ablkcipher_setkey(child, key, key_len);
391 crypto_ablkcipher_set_flags(tfm, crypto_ablkcipher_get_flags(child)
392 & CRYPTO_TFM_RES_MASK);
393 return err;
54b6a1bd
HY
394}
395
396static int ablk_encrypt(struct ablkcipher_request *req)
397{
398 struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
399 struct async_aes_ctx *ctx = crypto_ablkcipher_ctx(tfm);
400
13b79b97 401 if (!irq_fpu_usable()) {
54b6a1bd
HY
402 struct ablkcipher_request *cryptd_req =
403 ablkcipher_request_ctx(req);
404 memcpy(cryptd_req, req, sizeof(*req));
405 ablkcipher_request_set_tfm(cryptd_req, &ctx->cryptd_tfm->base);
406 return crypto_ablkcipher_encrypt(cryptd_req);
407 } else {
408 struct blkcipher_desc desc;
409 desc.tfm = cryptd_ablkcipher_child(ctx->cryptd_tfm);
410 desc.info = req->info;
411 desc.flags = 0;
412 return crypto_blkcipher_crt(desc.tfm)->encrypt(
413 &desc, req->dst, req->src, req->nbytes);
414 }
415}
416
417static int ablk_decrypt(struct ablkcipher_request *req)
418{
419 struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
420 struct async_aes_ctx *ctx = crypto_ablkcipher_ctx(tfm);
421
13b79b97 422 if (!irq_fpu_usable()) {
54b6a1bd
HY
423 struct ablkcipher_request *cryptd_req =
424 ablkcipher_request_ctx(req);
425 memcpy(cryptd_req, req, sizeof(*req));
426 ablkcipher_request_set_tfm(cryptd_req, &ctx->cryptd_tfm->base);
427 return crypto_ablkcipher_decrypt(cryptd_req);
428 } else {
429 struct blkcipher_desc desc;
430 desc.tfm = cryptd_ablkcipher_child(ctx->cryptd_tfm);
431 desc.info = req->info;
432 desc.flags = 0;
433 return crypto_blkcipher_crt(desc.tfm)->decrypt(
434 &desc, req->dst, req->src, req->nbytes);
435 }
436}
437
438static void ablk_exit(struct crypto_tfm *tfm)
439{
440 struct async_aes_ctx *ctx = crypto_tfm_ctx(tfm);
441
442 cryptd_free_ablkcipher(ctx->cryptd_tfm);
443}
444
ef45b834 445static int ablk_init_common(struct crypto_tfm *tfm, const char *drv_name)
54b6a1bd
HY
446{
447 struct async_aes_ctx *ctx = crypto_tfm_ctx(tfm);
ef45b834
JK
448 struct cryptd_ablkcipher *cryptd_tfm;
449
450 cryptd_tfm = cryptd_alloc_ablkcipher(drv_name, 0, 0);
451 if (IS_ERR(cryptd_tfm))
452 return PTR_ERR(cryptd_tfm);
54b6a1bd
HY
453
454 ctx->cryptd_tfm = cryptd_tfm;
455 tfm->crt_ablkcipher.reqsize = sizeof(struct ablkcipher_request) +
456 crypto_ablkcipher_reqsize(&cryptd_tfm->base);
ef45b834
JK
457
458 return 0;
54b6a1bd
HY
459}
460
461static int ablk_ecb_init(struct crypto_tfm *tfm)
462{
ef45b834 463 return ablk_init_common(tfm, "__driver-ecb-aes-aesni");
54b6a1bd
HY
464}
465
54b6a1bd
HY
466static int ablk_cbc_init(struct crypto_tfm *tfm)
467{
ef45b834 468 return ablk_init_common(tfm, "__driver-cbc-aes-aesni");
54b6a1bd
HY
469}
470
0d258efb 471#ifdef CONFIG_X86_64
2cf4ac8b
HY
472static int ablk_ctr_init(struct crypto_tfm *tfm)
473{
ef45b834 474 return ablk_init_common(tfm, "__driver-ctr-aes-aesni");
2cf4ac8b
HY
475}
476
12387a46
HY
477#ifdef HAS_CTR
478static int ablk_rfc3686_ctr_init(struct crypto_tfm *tfm)
479{
ef45b834 480 return ablk_init_common(tfm, "rfc3686(__driver-ctr-aes-aesni)");
12387a46 481}
2cf4ac8b 482#endif
0d258efb 483#endif
2cf4ac8b
HY
484
485#ifdef HAS_LRW
486static int ablk_lrw_init(struct crypto_tfm *tfm)
487{
ef45b834 488 return ablk_init_common(tfm, "fpu(lrw(__driver-aes-aesni))");
2cf4ac8b 489}
2cf4ac8b
HY
490#endif
491
492#ifdef HAS_PCBC
493static int ablk_pcbc_init(struct crypto_tfm *tfm)
494{
ef45b834 495 return ablk_init_common(tfm, "fpu(pcbc(__driver-aes-aesni))");
2cf4ac8b 496}
2cf4ac8b
HY
497#endif
498
499#ifdef HAS_XTS
500static int ablk_xts_init(struct crypto_tfm *tfm)
501{
ef45b834 502 return ablk_init_common(tfm, "fpu(xts(__driver-aes-aesni))");
2cf4ac8b 503}
2cf4ac8b
HY
504#endif
505
559ad0ff 506#ifdef CONFIG_X86_64
0bd82f5f
TS
507static int rfc4106_init(struct crypto_tfm *tfm)
508{
509 struct cryptd_aead *cryptd_tfm;
510 struct aesni_rfc4106_gcm_ctx *ctx = (struct aesni_rfc4106_gcm_ctx *)
511 PTR_ALIGN((u8 *)crypto_tfm_ctx(tfm), AESNI_ALIGN);
60af520c
TS
512 struct crypto_aead *cryptd_child;
513 struct aesni_rfc4106_gcm_ctx *child_ctx;
0bd82f5f
TS
514 cryptd_tfm = cryptd_alloc_aead("__driver-gcm-aes-aesni", 0, 0);
515 if (IS_ERR(cryptd_tfm))
516 return PTR_ERR(cryptd_tfm);
60af520c
TS
517
518 cryptd_child = cryptd_aead_child(cryptd_tfm);
519 child_ctx = aesni_rfc4106_gcm_ctx_get(cryptd_child);
520 memcpy(child_ctx, ctx, sizeof(*ctx));
0bd82f5f
TS
521 ctx->cryptd_tfm = cryptd_tfm;
522 tfm->crt_aead.reqsize = sizeof(struct aead_request)
523 + crypto_aead_reqsize(&cryptd_tfm->base);
524 return 0;
525}
526
527static void rfc4106_exit(struct crypto_tfm *tfm)
528{
529 struct aesni_rfc4106_gcm_ctx *ctx =
530 (struct aesni_rfc4106_gcm_ctx *)
531 PTR_ALIGN((u8 *)crypto_tfm_ctx(tfm), AESNI_ALIGN);
532 if (!IS_ERR(ctx->cryptd_tfm))
533 cryptd_free_aead(ctx->cryptd_tfm);
534 return;
535}
536
537static void
538rfc4106_set_hash_subkey_done(struct crypto_async_request *req, int err)
539{
540 struct aesni_gcm_set_hash_subkey_result *result = req->data;
541
542 if (err == -EINPROGRESS)
543 return;
544 result->err = err;
545 complete(&result->completion);
546}
547
548static int
549rfc4106_set_hash_subkey(u8 *hash_subkey, const u8 *key, unsigned int key_len)
550{
551 struct crypto_ablkcipher *ctr_tfm;
552 struct ablkcipher_request *req;
553 int ret = -EINVAL;
554 struct aesni_hash_subkey_req_data *req_data;
555
556 ctr_tfm = crypto_alloc_ablkcipher("ctr(aes)", 0, 0);
557 if (IS_ERR(ctr_tfm))
558 return PTR_ERR(ctr_tfm);
559
560 crypto_ablkcipher_clear_flags(ctr_tfm, ~0);
561
562 ret = crypto_ablkcipher_setkey(ctr_tfm, key, key_len);
7efd95f6 563 if (ret)
fc9044e2 564 goto out_free_ablkcipher;
0bd82f5f 565
fc9044e2 566 ret = -ENOMEM;
0bd82f5f 567 req = ablkcipher_request_alloc(ctr_tfm, GFP_KERNEL);
fc9044e2 568 if (!req)
7efd95f6 569 goto out_free_ablkcipher;
0bd82f5f
TS
570
571 req_data = kmalloc(sizeof(*req_data), GFP_KERNEL);
fc9044e2 572 if (!req_data)
7efd95f6 573 goto out_free_request;
fc9044e2 574
0bd82f5f
TS
575 memset(req_data->iv, 0, sizeof(req_data->iv));
576
577 /* Clear the data in the hash sub key container to zero.*/
578 /* We want to cipher all zeros to create the hash sub key. */
579 memset(hash_subkey, 0, RFC4106_HASH_SUBKEY_SIZE);
580
581 init_completion(&req_data->result.completion);
582 sg_init_one(&req_data->sg, hash_subkey, RFC4106_HASH_SUBKEY_SIZE);
583 ablkcipher_request_set_tfm(req, ctr_tfm);
584 ablkcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_SLEEP |
585 CRYPTO_TFM_REQ_MAY_BACKLOG,
586 rfc4106_set_hash_subkey_done,
587 &req_data->result);
588
589 ablkcipher_request_set_crypt(req, &req_data->sg,
590 &req_data->sg, RFC4106_HASH_SUBKEY_SIZE, req_data->iv);
591
592 ret = crypto_ablkcipher_encrypt(req);
593 if (ret == -EINPROGRESS || ret == -EBUSY) {
594 ret = wait_for_completion_interruptible
595 (&req_data->result.completion);
596 if (!ret)
597 ret = req_data->result.err;
598 }
fc9044e2 599 kfree(req_data);
7efd95f6 600out_free_request:
0bd82f5f 601 ablkcipher_request_free(req);
7efd95f6 602out_free_ablkcipher:
0bd82f5f
TS
603 crypto_free_ablkcipher(ctr_tfm);
604 return ret;
605}
606
607static int rfc4106_set_key(struct crypto_aead *parent, const u8 *key,
608 unsigned int key_len)
609{
610 int ret = 0;
611 struct crypto_tfm *tfm = crypto_aead_tfm(parent);
612 struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(parent);
60af520c
TS
613 struct crypto_aead *cryptd_child = cryptd_aead_child(ctx->cryptd_tfm);
614 struct aesni_rfc4106_gcm_ctx *child_ctx =
615 aesni_rfc4106_gcm_ctx_get(cryptd_child);
0bd82f5f
TS
616 u8 *new_key_mem = NULL;
617
618 if (key_len < 4) {
619 crypto_tfm_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
620 return -EINVAL;
621 }
622 /*Account for 4 byte nonce at the end.*/
623 key_len -= 4;
624 if (key_len != AES_KEYSIZE_128) {
625 crypto_tfm_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
626 return -EINVAL;
627 }
628
629 memcpy(ctx->nonce, key + key_len, sizeof(ctx->nonce));
630 /*This must be on a 16 byte boundary!*/
631 if ((unsigned long)(&(ctx->aes_key_expanded.key_enc[0])) % AESNI_ALIGN)
632 return -EINVAL;
633
634 if ((unsigned long)key % AESNI_ALIGN) {
635 /*key is not aligned: use an auxuliar aligned pointer*/
636 new_key_mem = kmalloc(key_len+AESNI_ALIGN, GFP_KERNEL);
637 if (!new_key_mem)
638 return -ENOMEM;
639
640 new_key_mem = PTR_ALIGN(new_key_mem, AESNI_ALIGN);
641 memcpy(new_key_mem, key, key_len);
642 key = new_key_mem;
643 }
644
645 if (!irq_fpu_usable())
646 ret = crypto_aes_expand_key(&(ctx->aes_key_expanded),
647 key, key_len);
648 else {
649 kernel_fpu_begin();
650 ret = aesni_set_key(&(ctx->aes_key_expanded), key, key_len);
651 kernel_fpu_end();
652 }
653 /*This must be on a 16 byte boundary!*/
654 if ((unsigned long)(&(ctx->hash_subkey[0])) % AESNI_ALIGN) {
655 ret = -EINVAL;
656 goto exit;
657 }
658 ret = rfc4106_set_hash_subkey(ctx->hash_subkey, key, key_len);
60af520c 659 memcpy(child_ctx, ctx, sizeof(*ctx));
0bd82f5f
TS
660exit:
661 kfree(new_key_mem);
662 return ret;
663}
664
665/* This is the Integrity Check Value (aka the authentication tag length and can
666 * be 8, 12 or 16 bytes long. */
667static int rfc4106_set_authsize(struct crypto_aead *parent,
668 unsigned int authsize)
669{
670 struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(parent);
671 struct crypto_aead *cryptd_child = cryptd_aead_child(ctx->cryptd_tfm);
672
673 switch (authsize) {
674 case 8:
675 case 12:
676 case 16:
677 break;
678 default:
679 return -EINVAL;
680 }
681 crypto_aead_crt(parent)->authsize = authsize;
682 crypto_aead_crt(cryptd_child)->authsize = authsize;
683 return 0;
684}
685
686static int rfc4106_encrypt(struct aead_request *req)
687{
688 int ret;
689 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
690 struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(tfm);
0bd82f5f
TS
691
692 if (!irq_fpu_usable()) {
693 struct aead_request *cryptd_req =
694 (struct aead_request *) aead_request_ctx(req);
695 memcpy(cryptd_req, req, sizeof(*req));
696 aead_request_set_tfm(cryptd_req, &ctx->cryptd_tfm->base);
697 return crypto_aead_encrypt(cryptd_req);
698 } else {
60af520c 699 struct crypto_aead *cryptd_child = cryptd_aead_child(ctx->cryptd_tfm);
0bd82f5f
TS
700 kernel_fpu_begin();
701 ret = cryptd_child->base.crt_aead.encrypt(req);
702 kernel_fpu_end();
703 return ret;
704 }
705}
706
707static int rfc4106_decrypt(struct aead_request *req)
708{
709 int ret;
710 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
711 struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(tfm);
0bd82f5f
TS
712
713 if (!irq_fpu_usable()) {
714 struct aead_request *cryptd_req =
715 (struct aead_request *) aead_request_ctx(req);
716 memcpy(cryptd_req, req, sizeof(*req));
717 aead_request_set_tfm(cryptd_req, &ctx->cryptd_tfm->base);
718 return crypto_aead_decrypt(cryptd_req);
719 } else {
60af520c 720 struct crypto_aead *cryptd_child = cryptd_aead_child(ctx->cryptd_tfm);
0bd82f5f
TS
721 kernel_fpu_begin();
722 ret = cryptd_child->base.crt_aead.decrypt(req);
723 kernel_fpu_end();
724 return ret;
725 }
726}
727
0bd82f5f
TS
728static int __driver_rfc4106_encrypt(struct aead_request *req)
729{
730 u8 one_entry_in_sg = 0;
731 u8 *src, *dst, *assoc;
732 __be32 counter = cpu_to_be32(1);
733 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
734 struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(tfm);
735 void *aes_ctx = &(ctx->aes_key_expanded);
736 unsigned long auth_tag_len = crypto_aead_authsize(tfm);
737 u8 iv_tab[16+AESNI_ALIGN];
738 u8* iv = (u8 *) PTR_ALIGN((u8 *)iv_tab, AESNI_ALIGN);
739 struct scatter_walk src_sg_walk;
740 struct scatter_walk assoc_sg_walk;
741 struct scatter_walk dst_sg_walk;
742 unsigned int i;
743
744 /* Assuming we are supporting rfc4106 64-bit extended */
745 /* sequence numbers We need to have the AAD length equal */
746 /* to 8 or 12 bytes */
747 if (unlikely(req->assoclen != 8 && req->assoclen != 12))
748 return -EINVAL;
749 /* IV below built */
750 for (i = 0; i < 4; i++)
751 *(iv+i) = ctx->nonce[i];
752 for (i = 0; i < 8; i++)
753 *(iv+4+i) = req->iv[i];
754 *((__be32 *)(iv+12)) = counter;
755
756 if ((sg_is_last(req->src)) && (sg_is_last(req->assoc))) {
757 one_entry_in_sg = 1;
758 scatterwalk_start(&src_sg_walk, req->src);
759 scatterwalk_start(&assoc_sg_walk, req->assoc);
8fd75e12
CW
760 src = scatterwalk_map(&src_sg_walk);
761 assoc = scatterwalk_map(&assoc_sg_walk);
0bd82f5f
TS
762 dst = src;
763 if (unlikely(req->src != req->dst)) {
764 scatterwalk_start(&dst_sg_walk, req->dst);
8fd75e12 765 dst = scatterwalk_map(&dst_sg_walk);
0bd82f5f
TS
766 }
767
768 } else {
769 /* Allocate memory for src, dst, assoc */
770 src = kmalloc(req->cryptlen + auth_tag_len + req->assoclen,
771 GFP_ATOMIC);
772 if (unlikely(!src))
773 return -ENOMEM;
774 assoc = (src + req->cryptlen + auth_tag_len);
775 scatterwalk_map_and_copy(src, req->src, 0, req->cryptlen, 0);
776 scatterwalk_map_and_copy(assoc, req->assoc, 0,
777 req->assoclen, 0);
778 dst = src;
779 }
780
781 aesni_gcm_enc(aes_ctx, dst, src, (unsigned long)req->cryptlen, iv,
782 ctx->hash_subkey, assoc, (unsigned long)req->assoclen, dst
783 + ((unsigned long)req->cryptlen), auth_tag_len);
784
785 /* The authTag (aka the Integrity Check Value) needs to be written
786 * back to the packet. */
787 if (one_entry_in_sg) {
788 if (unlikely(req->src != req->dst)) {
8fd75e12 789 scatterwalk_unmap(dst);
0bd82f5f
TS
790 scatterwalk_done(&dst_sg_walk, 0, 0);
791 }
8fd75e12
CW
792 scatterwalk_unmap(src);
793 scatterwalk_unmap(assoc);
0bd82f5f
TS
794 scatterwalk_done(&src_sg_walk, 0, 0);
795 scatterwalk_done(&assoc_sg_walk, 0, 0);
796 } else {
797 scatterwalk_map_and_copy(dst, req->dst, 0,
798 req->cryptlen + auth_tag_len, 1);
799 kfree(src);
800 }
801 return 0;
802}
803
804static int __driver_rfc4106_decrypt(struct aead_request *req)
805{
806 u8 one_entry_in_sg = 0;
807 u8 *src, *dst, *assoc;
808 unsigned long tempCipherLen = 0;
809 __be32 counter = cpu_to_be32(1);
810 int retval = 0;
811 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
812 struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(tfm);
813 void *aes_ctx = &(ctx->aes_key_expanded);
814 unsigned long auth_tag_len = crypto_aead_authsize(tfm);
815 u8 iv_and_authTag[32+AESNI_ALIGN];
816 u8 *iv = (u8 *) PTR_ALIGN((u8 *)iv_and_authTag, AESNI_ALIGN);
817 u8 *authTag = iv + 16;
818 struct scatter_walk src_sg_walk;
819 struct scatter_walk assoc_sg_walk;
820 struct scatter_walk dst_sg_walk;
821 unsigned int i;
822
823 if (unlikely((req->cryptlen < auth_tag_len) ||
824 (req->assoclen != 8 && req->assoclen != 12)))
825 return -EINVAL;
826 /* Assuming we are supporting rfc4106 64-bit extended */
827 /* sequence numbers We need to have the AAD length */
828 /* equal to 8 or 12 bytes */
829
830 tempCipherLen = (unsigned long)(req->cryptlen - auth_tag_len);
831 /* IV below built */
832 for (i = 0; i < 4; i++)
833 *(iv+i) = ctx->nonce[i];
834 for (i = 0; i < 8; i++)
835 *(iv+4+i) = req->iv[i];
836 *((__be32 *)(iv+12)) = counter;
837
838 if ((sg_is_last(req->src)) && (sg_is_last(req->assoc))) {
839 one_entry_in_sg = 1;
840 scatterwalk_start(&src_sg_walk, req->src);
841 scatterwalk_start(&assoc_sg_walk, req->assoc);
8fd75e12
CW
842 src = scatterwalk_map(&src_sg_walk);
843 assoc = scatterwalk_map(&assoc_sg_walk);
0bd82f5f
TS
844 dst = src;
845 if (unlikely(req->src != req->dst)) {
846 scatterwalk_start(&dst_sg_walk, req->dst);
8fd75e12 847 dst = scatterwalk_map(&dst_sg_walk);
0bd82f5f
TS
848 }
849
850 } else {
851 /* Allocate memory for src, dst, assoc */
852 src = kmalloc(req->cryptlen + req->assoclen, GFP_ATOMIC);
853 if (!src)
854 return -ENOMEM;
855 assoc = (src + req->cryptlen + auth_tag_len);
856 scatterwalk_map_and_copy(src, req->src, 0, req->cryptlen, 0);
857 scatterwalk_map_and_copy(assoc, req->assoc, 0,
858 req->assoclen, 0);
859 dst = src;
860 }
861
862 aesni_gcm_dec(aes_ctx, dst, src, tempCipherLen, iv,
863 ctx->hash_subkey, assoc, (unsigned long)req->assoclen,
864 authTag, auth_tag_len);
865
866 /* Compare generated tag with passed in tag. */
867 retval = memcmp(src + tempCipherLen, authTag, auth_tag_len) ?
868 -EBADMSG : 0;
869
870 if (one_entry_in_sg) {
871 if (unlikely(req->src != req->dst)) {
8fd75e12 872 scatterwalk_unmap(dst);
0bd82f5f
TS
873 scatterwalk_done(&dst_sg_walk, 0, 0);
874 }
8fd75e12
CW
875 scatterwalk_unmap(src);
876 scatterwalk_unmap(assoc);
0bd82f5f
TS
877 scatterwalk_done(&src_sg_walk, 0, 0);
878 scatterwalk_done(&assoc_sg_walk, 0, 0);
879 } else {
880 scatterwalk_map_and_copy(dst, req->dst, 0, req->cryptlen, 1);
881 kfree(src);
882 }
883 return retval;
884}
fa46ccb8 885#endif
0bd82f5f 886
fa46ccb8
JK
887static struct crypto_alg aesni_algs[] = { {
888 .cra_name = "aes",
889 .cra_driver_name = "aes-aesni",
890 .cra_priority = 300,
891 .cra_flags = CRYPTO_ALG_TYPE_CIPHER,
892 .cra_blocksize = AES_BLOCK_SIZE,
893 .cra_ctxsize = sizeof(struct crypto_aes_ctx) +
894 AESNI_ALIGN - 1,
895 .cra_alignmask = 0,
896 .cra_module = THIS_MODULE,
897 .cra_u = {
898 .cipher = {
899 .cia_min_keysize = AES_MIN_KEY_SIZE,
900 .cia_max_keysize = AES_MAX_KEY_SIZE,
901 .cia_setkey = aes_set_key,
902 .cia_encrypt = aes_encrypt,
903 .cia_decrypt = aes_decrypt
904 }
905 }
906}, {
907 .cra_name = "__aes-aesni",
908 .cra_driver_name = "__driver-aes-aesni",
909 .cra_priority = 0,
910 .cra_flags = CRYPTO_ALG_TYPE_CIPHER,
911 .cra_blocksize = AES_BLOCK_SIZE,
912 .cra_ctxsize = sizeof(struct crypto_aes_ctx) +
913 AESNI_ALIGN - 1,
914 .cra_alignmask = 0,
915 .cra_module = THIS_MODULE,
916 .cra_u = {
917 .cipher = {
918 .cia_min_keysize = AES_MIN_KEY_SIZE,
919 .cia_max_keysize = AES_MAX_KEY_SIZE,
920 .cia_setkey = aes_set_key,
921 .cia_encrypt = __aes_encrypt,
922 .cia_decrypt = __aes_decrypt
923 }
924 }
925}, {
926 .cra_name = "__ecb-aes-aesni",
927 .cra_driver_name = "__driver-ecb-aes-aesni",
928 .cra_priority = 0,
929 .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
930 .cra_blocksize = AES_BLOCK_SIZE,
931 .cra_ctxsize = sizeof(struct crypto_aes_ctx) +
932 AESNI_ALIGN - 1,
933 .cra_alignmask = 0,
934 .cra_type = &crypto_blkcipher_type,
935 .cra_module = THIS_MODULE,
936 .cra_u = {
937 .blkcipher = {
938 .min_keysize = AES_MIN_KEY_SIZE,
939 .max_keysize = AES_MAX_KEY_SIZE,
940 .setkey = aes_set_key,
941 .encrypt = ecb_encrypt,
942 .decrypt = ecb_decrypt,
943 },
944 },
945}, {
946 .cra_name = "__cbc-aes-aesni",
947 .cra_driver_name = "__driver-cbc-aes-aesni",
948 .cra_priority = 0,
949 .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
950 .cra_blocksize = AES_BLOCK_SIZE,
951 .cra_ctxsize = sizeof(struct crypto_aes_ctx) +
952 AESNI_ALIGN - 1,
953 .cra_alignmask = 0,
954 .cra_type = &crypto_blkcipher_type,
955 .cra_module = THIS_MODULE,
956 .cra_u = {
957 .blkcipher = {
958 .min_keysize = AES_MIN_KEY_SIZE,
959 .max_keysize = AES_MAX_KEY_SIZE,
960 .setkey = aes_set_key,
961 .encrypt = cbc_encrypt,
962 .decrypt = cbc_decrypt,
963 },
964 },
965}, {
966 .cra_name = "ecb(aes)",
967 .cra_driver_name = "ecb-aes-aesni",
968 .cra_priority = 400,
969 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
970 .cra_blocksize = AES_BLOCK_SIZE,
971 .cra_ctxsize = sizeof(struct async_aes_ctx),
972 .cra_alignmask = 0,
973 .cra_type = &crypto_ablkcipher_type,
974 .cra_module = THIS_MODULE,
975 .cra_init = ablk_ecb_init,
976 .cra_exit = ablk_exit,
977 .cra_u = {
978 .ablkcipher = {
979 .min_keysize = AES_MIN_KEY_SIZE,
980 .max_keysize = AES_MAX_KEY_SIZE,
981 .setkey = ablk_set_key,
982 .encrypt = ablk_encrypt,
983 .decrypt = ablk_decrypt,
984 },
985 },
986}, {
987 .cra_name = "cbc(aes)",
988 .cra_driver_name = "cbc-aes-aesni",
989 .cra_priority = 400,
990 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
991 .cra_blocksize = AES_BLOCK_SIZE,
992 .cra_ctxsize = sizeof(struct async_aes_ctx),
993 .cra_alignmask = 0,
994 .cra_type = &crypto_ablkcipher_type,
995 .cra_module = THIS_MODULE,
996 .cra_init = ablk_cbc_init,
997 .cra_exit = ablk_exit,
998 .cra_u = {
999 .ablkcipher = {
1000 .min_keysize = AES_MIN_KEY_SIZE,
1001 .max_keysize = AES_MAX_KEY_SIZE,
1002 .ivsize = AES_BLOCK_SIZE,
1003 .setkey = ablk_set_key,
1004 .encrypt = ablk_encrypt,
1005 .decrypt = ablk_decrypt,
1006 },
1007 },
1008#ifdef CONFIG_X86_64
1009}, {
1010 .cra_name = "__ctr-aes-aesni",
1011 .cra_driver_name = "__driver-ctr-aes-aesni",
1012 .cra_priority = 0,
1013 .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
1014 .cra_blocksize = 1,
1015 .cra_ctxsize = sizeof(struct crypto_aes_ctx) +
1016 AESNI_ALIGN - 1,
1017 .cra_alignmask = 0,
1018 .cra_type = &crypto_blkcipher_type,
1019 .cra_module = THIS_MODULE,
1020 .cra_u = {
1021 .blkcipher = {
1022 .min_keysize = AES_MIN_KEY_SIZE,
1023 .max_keysize = AES_MAX_KEY_SIZE,
1024 .ivsize = AES_BLOCK_SIZE,
1025 .setkey = aes_set_key,
1026 .encrypt = ctr_crypt,
1027 .decrypt = ctr_crypt,
1028 },
1029 },
1030}, {
1031 .cra_name = "ctr(aes)",
1032 .cra_driver_name = "ctr-aes-aesni",
1033 .cra_priority = 400,
1034 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
1035 .cra_blocksize = 1,
1036 .cra_ctxsize = sizeof(struct async_aes_ctx),
1037 .cra_alignmask = 0,
1038 .cra_type = &crypto_ablkcipher_type,
1039 .cra_module = THIS_MODULE,
1040 .cra_init = ablk_ctr_init,
1041 .cra_exit = ablk_exit,
1042 .cra_u = {
1043 .ablkcipher = {
1044 .min_keysize = AES_MIN_KEY_SIZE,
1045 .max_keysize = AES_MAX_KEY_SIZE,
1046 .ivsize = AES_BLOCK_SIZE,
1047 .setkey = ablk_set_key,
1048 .encrypt = ablk_encrypt,
1049 .decrypt = ablk_encrypt,
1050 .geniv = "chainiv",
1051 },
1052 },
1053}, {
1054 .cra_name = "__gcm-aes-aesni",
1055 .cra_driver_name = "__driver-gcm-aes-aesni",
0bd82f5f
TS
1056 .cra_priority = 0,
1057 .cra_flags = CRYPTO_ALG_TYPE_AEAD,
1058 .cra_blocksize = 1,
fa46ccb8
JK
1059 .cra_ctxsize = sizeof(struct aesni_rfc4106_gcm_ctx) +
1060 AESNI_ALIGN,
0bd82f5f
TS
1061 .cra_alignmask = 0,
1062 .cra_type = &crypto_aead_type,
1063 .cra_module = THIS_MODULE,
0bd82f5f
TS
1064 .cra_u = {
1065 .aead = {
1066 .encrypt = __driver_rfc4106_encrypt,
1067 .decrypt = __driver_rfc4106_decrypt,
1068 },
1069 },
fa46ccb8
JK
1070}, {
1071 .cra_name = "rfc4106(gcm(aes))",
1072 .cra_driver_name = "rfc4106-gcm-aesni",
1073 .cra_priority = 400,
1074 .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
1075 .cra_blocksize = 1,
1076 .cra_ctxsize = sizeof(struct aesni_rfc4106_gcm_ctx) +
1077 AESNI_ALIGN,
1078 .cra_alignmask = 0,
1079 .cra_type = &crypto_nivaead_type,
1080 .cra_module = THIS_MODULE,
1081 .cra_init = rfc4106_init,
1082 .cra_exit = rfc4106_exit,
1083 .cra_u = {
1084 .aead = {
1085 .setkey = rfc4106_set_key,
1086 .setauthsize = rfc4106_set_authsize,
1087 .encrypt = rfc4106_encrypt,
1088 .decrypt = rfc4106_decrypt,
1089 .geniv = "seqiv",
1090 .ivsize = 8,
1091 .maxauthsize = 16,
1092 },
1093 },
1094#ifdef HAS_CTR
1095}, {
1096 .cra_name = "rfc3686(ctr(aes))",
1097 .cra_driver_name = "rfc3686-ctr-aes-aesni",
1098 .cra_priority = 400,
1099 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
1100 .cra_blocksize = 1,
1101 .cra_ctxsize = sizeof(struct async_aes_ctx),
1102 .cra_alignmask = 0,
1103 .cra_type = &crypto_ablkcipher_type,
1104 .cra_module = THIS_MODULE,
1105 .cra_init = ablk_rfc3686_ctr_init,
1106 .cra_exit = ablk_exit,
1107 .cra_u = {
1108 .ablkcipher = {
1109 .min_keysize = AES_MIN_KEY_SIZE +
1110 CTR_RFC3686_NONCE_SIZE,
1111 .max_keysize = AES_MAX_KEY_SIZE +
1112 CTR_RFC3686_NONCE_SIZE,
1113 .ivsize = CTR_RFC3686_IV_SIZE,
1114 .setkey = ablk_set_key,
1115 .encrypt = ablk_encrypt,
1116 .decrypt = ablk_decrypt,
1117 .geniv = "seqiv",
1118 },
1119 },
1120#endif
1121#endif
1122#ifdef HAS_LRW
1123}, {
1124 .cra_name = "lrw(aes)",
1125 .cra_driver_name = "lrw-aes-aesni",
1126 .cra_priority = 400,
1127 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
1128 .cra_blocksize = AES_BLOCK_SIZE,
1129 .cra_ctxsize = sizeof(struct async_aes_ctx),
1130 .cra_alignmask = 0,
1131 .cra_type = &crypto_ablkcipher_type,
1132 .cra_module = THIS_MODULE,
1133 .cra_init = ablk_lrw_init,
1134 .cra_exit = ablk_exit,
1135 .cra_u = {
1136 .ablkcipher = {
1137 .min_keysize = AES_MIN_KEY_SIZE + AES_BLOCK_SIZE,
1138 .max_keysize = AES_MAX_KEY_SIZE + AES_BLOCK_SIZE,
1139 .ivsize = AES_BLOCK_SIZE,
1140 .setkey = ablk_set_key,
1141 .encrypt = ablk_encrypt,
1142 .decrypt = ablk_decrypt,
1143 },
1144 },
1145#endif
1146#ifdef HAS_PCBC
1147}, {
1148 .cra_name = "pcbc(aes)",
1149 .cra_driver_name = "pcbc-aes-aesni",
1150 .cra_priority = 400,
1151 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
1152 .cra_blocksize = AES_BLOCK_SIZE,
1153 .cra_ctxsize = sizeof(struct async_aes_ctx),
1154 .cra_alignmask = 0,
1155 .cra_type = &crypto_ablkcipher_type,
1156 .cra_module = THIS_MODULE,
1157 .cra_init = ablk_pcbc_init,
1158 .cra_exit = ablk_exit,
1159 .cra_u = {
1160 .ablkcipher = {
1161 .min_keysize = AES_MIN_KEY_SIZE,
1162 .max_keysize = AES_MAX_KEY_SIZE,
1163 .ivsize = AES_BLOCK_SIZE,
1164 .setkey = ablk_set_key,
1165 .encrypt = ablk_encrypt,
1166 .decrypt = ablk_decrypt,
1167 },
1168 },
1169#endif
1170#ifdef HAS_XTS
1171}, {
1172 .cra_name = "xts(aes)",
1173 .cra_driver_name = "xts-aes-aesni",
1174 .cra_priority = 400,
1175 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
1176 .cra_blocksize = AES_BLOCK_SIZE,
1177 .cra_ctxsize = sizeof(struct async_aes_ctx),
1178 .cra_alignmask = 0,
1179 .cra_type = &crypto_ablkcipher_type,
1180 .cra_module = THIS_MODULE,
1181 .cra_init = ablk_xts_init,
1182 .cra_exit = ablk_exit,
1183 .cra_u = {
1184 .ablkcipher = {
1185 .min_keysize = 2 * AES_MIN_KEY_SIZE,
1186 .max_keysize = 2 * AES_MAX_KEY_SIZE,
1187 .ivsize = AES_BLOCK_SIZE,
1188 .setkey = ablk_set_key,
1189 .encrypt = ablk_encrypt,
1190 .decrypt = ablk_decrypt,
1191 },
1192 },
559ad0ff 1193#endif
fa46ccb8 1194} };
0bd82f5f 1195
3bd391f0
AK
1196
1197static const struct x86_cpu_id aesni_cpu_id[] = {
1198 X86_FEATURE_MATCH(X86_FEATURE_AES),
1199 {}
1200};
1201MODULE_DEVICE_TABLE(x86cpu, aesni_cpu_id);
1202
54b6a1bd
HY
1203static int __init aesni_init(void)
1204{
fa46ccb8 1205 int err, i;
54b6a1bd 1206
3bd391f0 1207 if (!x86_match_cpu(aesni_cpu_id))
54b6a1bd 1208 return -ENODEV;
0bd82f5f 1209
fa46ccb8
JK
1210 err = crypto_fpu_init();
1211 if (err)
1212 return err;
54b6a1bd 1213
fa46ccb8
JK
1214 for (i = 0; i < ARRAY_SIZE(aesni_algs); i++)
1215 INIT_LIST_HEAD(&aesni_algs[i].cra_list);
1216
1217 return crypto_register_algs(aesni_algs, ARRAY_SIZE(aesni_algs));
54b6a1bd
HY
1218}
1219
1220static void __exit aesni_exit(void)
1221{
fa46ccb8 1222 crypto_unregister_algs(aesni_algs, ARRAY_SIZE(aesni_algs));
b23b6451
AL
1223
1224 crypto_fpu_exit();
54b6a1bd
HY
1225}
1226
1227module_init(aesni_init);
1228module_exit(aesni_exit);
1229
1230MODULE_DESCRIPTION("Rijndael (AES) Cipher Algorithm, Intel AES-NI instructions optimized");
1231MODULE_LICENSE("GPL");
1232MODULE_ALIAS("aes");
This page took 0.319185 seconds and 5 git commands to generate.