powerpc/crypto: add 842 crypto driver
[deliverable/linux.git] / arch / x86 / crypto / aesni-intel_glue.c
CommitLineData
54b6a1bd
HY
1/*
2 * Support for Intel AES-NI instructions. This file contains glue
3 * code, the real AES implementation is in intel-aes_asm.S.
4 *
5 * Copyright (C) 2008, Intel Corp.
6 * Author: Huang Ying <ying.huang@intel.com>
7 *
0bd82f5f
TS
8 * Added RFC4106 AES-GCM support for 128-bit keys under the AEAD
9 * interface for 64-bit kernels.
10 * Authors: Adrian Hoban <adrian.hoban@intel.com>
11 * Gabriele Paoloni <gabriele.paoloni@intel.com>
12 * Tadeusz Struk (tadeusz.struk@intel.com)
13 * Aidan O'Mahony (aidan.o.mahony@intel.com)
14 * Copyright (c) 2010, Intel Corporation.
15 *
54b6a1bd
HY
16 * This program is free software; you can redistribute it and/or modify
17 * it under the terms of the GNU General Public License as published by
18 * the Free Software Foundation; either version 2 of the License, or
19 * (at your option) any later version.
20 */
21
22#include <linux/hardirq.h>
23#include <linux/types.h>
24#include <linux/crypto.h>
7c52d551 25#include <linux/module.h>
54b6a1bd
HY
26#include <linux/err.h>
27#include <crypto/algapi.h>
28#include <crypto/aes.h>
29#include <crypto/cryptd.h>
12387a46 30#include <crypto/ctr.h>
3bd391f0 31#include <asm/cpu_device_id.h>
54b6a1bd 32#include <asm/i387.h>
70ef2601 33#include <asm/crypto/aes.h>
a9629d71 34#include <asm/crypto/ablk_helper.h>
0bd82f5f
TS
35#include <crypto/scatterwalk.h>
36#include <crypto/internal/aead.h>
37#include <linux/workqueue.h>
38#include <linux/spinlock.h>
54b6a1bd 39
2cf4ac8b
HY
40#if defined(CONFIG_CRYPTO_CTR) || defined(CONFIG_CRYPTO_CTR_MODULE)
41#define HAS_CTR
42#endif
43
44#if defined(CONFIG_CRYPTO_LRW) || defined(CONFIG_CRYPTO_LRW_MODULE)
45#define HAS_LRW
46#endif
47
48#if defined(CONFIG_CRYPTO_PCBC) || defined(CONFIG_CRYPTO_PCBC_MODULE)
49#define HAS_PCBC
50#endif
51
52#if defined(CONFIG_CRYPTO_XTS) || defined(CONFIG_CRYPTO_XTS_MODULE)
53#define HAS_XTS
54#endif
55
0bd82f5f
TS
56/* This data is stored at the end of the crypto_tfm struct.
57 * It's a type of per "session" data storage location.
58 * This needs to be 16 byte aligned.
59 */
60struct aesni_rfc4106_gcm_ctx {
61 u8 hash_subkey[16];
62 struct crypto_aes_ctx aes_key_expanded;
63 u8 nonce[4];
64 struct cryptd_aead *cryptd_tfm;
65};
66
67struct aesni_gcm_set_hash_subkey_result {
68 int err;
69 struct completion completion;
70};
71
72struct aesni_hash_subkey_req_data {
73 u8 iv[16];
74 struct aesni_gcm_set_hash_subkey_result result;
75 struct scatterlist sg;
76};
77
78#define AESNI_ALIGN (16)
54b6a1bd 79#define AES_BLOCK_MASK (~(AES_BLOCK_SIZE-1))
0bd82f5f 80#define RFC4106_HASH_SUBKEY_SIZE 16
54b6a1bd
HY
81
82asmlinkage int aesni_set_key(struct crypto_aes_ctx *ctx, const u8 *in_key,
83 unsigned int key_len);
84asmlinkage void aesni_enc(struct crypto_aes_ctx *ctx, u8 *out,
85 const u8 *in);
86asmlinkage void aesni_dec(struct crypto_aes_ctx *ctx, u8 *out,
87 const u8 *in);
88asmlinkage void aesni_ecb_enc(struct crypto_aes_ctx *ctx, u8 *out,
89 const u8 *in, unsigned int len);
90asmlinkage void aesni_ecb_dec(struct crypto_aes_ctx *ctx, u8 *out,
91 const u8 *in, unsigned int len);
92asmlinkage void aesni_cbc_enc(struct crypto_aes_ctx *ctx, u8 *out,
93 const u8 *in, unsigned int len, u8 *iv);
94asmlinkage void aesni_cbc_dec(struct crypto_aes_ctx *ctx, u8 *out,
95 const u8 *in, unsigned int len, u8 *iv);
9bed4aca
RD
96
97int crypto_fpu_init(void);
98void crypto_fpu_exit(void);
99
0d258efb 100#ifdef CONFIG_X86_64
12387a46
HY
101asmlinkage void aesni_ctr_enc(struct crypto_aes_ctx *ctx, u8 *out,
102 const u8 *in, unsigned int len, u8 *iv);
54b6a1bd 103
0bd82f5f
TS
104/* asmlinkage void aesni_gcm_enc()
105 * void *ctx, AES Key schedule. Starts on a 16 byte boundary.
106 * u8 *out, Ciphertext output. Encrypt in-place is allowed.
107 * const u8 *in, Plaintext input
108 * unsigned long plaintext_len, Length of data in bytes for encryption.
109 * u8 *iv, Pre-counter block j0: 4 byte salt (from Security Association)
110 * concatenated with 8 byte Initialisation Vector (from IPSec ESP
111 * Payload) concatenated with 0x00000001. 16-byte aligned pointer.
112 * u8 *hash_subkey, the Hash sub key input. Data starts on a 16-byte boundary.
113 * const u8 *aad, Additional Authentication Data (AAD)
114 * unsigned long aad_len, Length of AAD in bytes. With RFC4106 this
115 * is going to be 8 or 12 bytes
116 * u8 *auth_tag, Authenticated Tag output.
117 * unsigned long auth_tag_len), Authenticated Tag Length in bytes.
118 * Valid values are 16 (most likely), 12 or 8.
119 */
120asmlinkage void aesni_gcm_enc(void *ctx, u8 *out,
121 const u8 *in, unsigned long plaintext_len, u8 *iv,
122 u8 *hash_subkey, const u8 *aad, unsigned long aad_len,
123 u8 *auth_tag, unsigned long auth_tag_len);
124
125/* asmlinkage void aesni_gcm_dec()
126 * void *ctx, AES Key schedule. Starts on a 16 byte boundary.
127 * u8 *out, Plaintext output. Decrypt in-place is allowed.
128 * const u8 *in, Ciphertext input
129 * unsigned long ciphertext_len, Length of data in bytes for decryption.
130 * u8 *iv, Pre-counter block j0: 4 byte salt (from Security Association)
131 * concatenated with 8 byte Initialisation Vector (from IPSec ESP
132 * Payload) concatenated with 0x00000001. 16-byte aligned pointer.
133 * u8 *hash_subkey, the Hash sub key input. Data starts on a 16-byte boundary.
134 * const u8 *aad, Additional Authentication Data (AAD)
135 * unsigned long aad_len, Length of AAD in bytes. With RFC4106 this is going
136 * to be 8 or 12 bytes
137 * u8 *auth_tag, Authenticated Tag output.
138 * unsigned long auth_tag_len) Authenticated Tag Length in bytes.
139 * Valid values are 16 (most likely), 12 or 8.
140 */
141asmlinkage void aesni_gcm_dec(void *ctx, u8 *out,
142 const u8 *in, unsigned long ciphertext_len, u8 *iv,
143 u8 *hash_subkey, const u8 *aad, unsigned long aad_len,
144 u8 *auth_tag, unsigned long auth_tag_len);
145
146static inline struct
147aesni_rfc4106_gcm_ctx *aesni_rfc4106_gcm_ctx_get(struct crypto_aead *tfm)
148{
149 return
150 (struct aesni_rfc4106_gcm_ctx *)
151 PTR_ALIGN((u8 *)
152 crypto_tfm_ctx(crypto_aead_tfm(tfm)), AESNI_ALIGN);
153}
559ad0ff 154#endif
0bd82f5f 155
54b6a1bd
HY
156static inline struct crypto_aes_ctx *aes_ctx(void *raw_ctx)
157{
158 unsigned long addr = (unsigned long)raw_ctx;
159 unsigned long align = AESNI_ALIGN;
160
161 if (align <= crypto_tfm_ctx_alignment())
162 align = 1;
163 return (struct crypto_aes_ctx *)ALIGN(addr, align);
164}
165
166static int aes_set_key_common(struct crypto_tfm *tfm, void *raw_ctx,
167 const u8 *in_key, unsigned int key_len)
168{
169 struct crypto_aes_ctx *ctx = aes_ctx(raw_ctx);
170 u32 *flags = &tfm->crt_flags;
171 int err;
172
173 if (key_len != AES_KEYSIZE_128 && key_len != AES_KEYSIZE_192 &&
174 key_len != AES_KEYSIZE_256) {
175 *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
176 return -EINVAL;
177 }
178
13b79b97 179 if (!irq_fpu_usable())
54b6a1bd
HY
180 err = crypto_aes_expand_key(ctx, in_key, key_len);
181 else {
182 kernel_fpu_begin();
183 err = aesni_set_key(ctx, in_key, key_len);
184 kernel_fpu_end();
185 }
186
187 return err;
188}
189
190static int aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
191 unsigned int key_len)
192{
193 return aes_set_key_common(tfm, crypto_tfm_ctx(tfm), in_key, key_len);
194}
195
196static void aes_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
197{
198 struct crypto_aes_ctx *ctx = aes_ctx(crypto_tfm_ctx(tfm));
199
13b79b97 200 if (!irq_fpu_usable())
54b6a1bd
HY
201 crypto_aes_encrypt_x86(ctx, dst, src);
202 else {
203 kernel_fpu_begin();
204 aesni_enc(ctx, dst, src);
205 kernel_fpu_end();
206 }
207}
208
209static void aes_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
210{
211 struct crypto_aes_ctx *ctx = aes_ctx(crypto_tfm_ctx(tfm));
212
13b79b97 213 if (!irq_fpu_usable())
54b6a1bd
HY
214 crypto_aes_decrypt_x86(ctx, dst, src);
215 else {
216 kernel_fpu_begin();
217 aesni_dec(ctx, dst, src);
218 kernel_fpu_end();
219 }
220}
221
2cf4ac8b
HY
222static void __aes_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
223{
224 struct crypto_aes_ctx *ctx = aes_ctx(crypto_tfm_ctx(tfm));
225
226 aesni_enc(ctx, dst, src);
227}
228
229static void __aes_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
230{
231 struct crypto_aes_ctx *ctx = aes_ctx(crypto_tfm_ctx(tfm));
232
233 aesni_dec(ctx, dst, src);
234}
235
54b6a1bd
HY
236static int ecb_encrypt(struct blkcipher_desc *desc,
237 struct scatterlist *dst, struct scatterlist *src,
238 unsigned int nbytes)
239{
240 struct crypto_aes_ctx *ctx = aes_ctx(crypto_blkcipher_ctx(desc->tfm));
241 struct blkcipher_walk walk;
242 int err;
243
244 blkcipher_walk_init(&walk, dst, src, nbytes);
245 err = blkcipher_walk_virt(desc, &walk);
9251b64f 246 desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
54b6a1bd
HY
247
248 kernel_fpu_begin();
249 while ((nbytes = walk.nbytes)) {
250 aesni_ecb_enc(ctx, walk.dst.virt.addr, walk.src.virt.addr,
251 nbytes & AES_BLOCK_MASK);
252 nbytes &= AES_BLOCK_SIZE - 1;
253 err = blkcipher_walk_done(desc, &walk, nbytes);
254 }
255 kernel_fpu_end();
256
257 return err;
258}
259
260static int ecb_decrypt(struct blkcipher_desc *desc,
261 struct scatterlist *dst, struct scatterlist *src,
262 unsigned int nbytes)
263{
264 struct crypto_aes_ctx *ctx = aes_ctx(crypto_blkcipher_ctx(desc->tfm));
265 struct blkcipher_walk walk;
266 int err;
267
268 blkcipher_walk_init(&walk, dst, src, nbytes);
269 err = blkcipher_walk_virt(desc, &walk);
9251b64f 270 desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
54b6a1bd
HY
271
272 kernel_fpu_begin();
273 while ((nbytes = walk.nbytes)) {
274 aesni_ecb_dec(ctx, walk.dst.virt.addr, walk.src.virt.addr,
275 nbytes & AES_BLOCK_MASK);
276 nbytes &= AES_BLOCK_SIZE - 1;
277 err = blkcipher_walk_done(desc, &walk, nbytes);
278 }
279 kernel_fpu_end();
280
281 return err;
282}
283
54b6a1bd
HY
284static int cbc_encrypt(struct blkcipher_desc *desc,
285 struct scatterlist *dst, struct scatterlist *src,
286 unsigned int nbytes)
287{
288 struct crypto_aes_ctx *ctx = aes_ctx(crypto_blkcipher_ctx(desc->tfm));
289 struct blkcipher_walk walk;
290 int err;
291
292 blkcipher_walk_init(&walk, dst, src, nbytes);
293 err = blkcipher_walk_virt(desc, &walk);
9251b64f 294 desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
54b6a1bd
HY
295
296 kernel_fpu_begin();
297 while ((nbytes = walk.nbytes)) {
298 aesni_cbc_enc(ctx, walk.dst.virt.addr, walk.src.virt.addr,
299 nbytes & AES_BLOCK_MASK, walk.iv);
300 nbytes &= AES_BLOCK_SIZE - 1;
301 err = blkcipher_walk_done(desc, &walk, nbytes);
302 }
303 kernel_fpu_end();
304
305 return err;
306}
307
308static int cbc_decrypt(struct blkcipher_desc *desc,
309 struct scatterlist *dst, struct scatterlist *src,
310 unsigned int nbytes)
311{
312 struct crypto_aes_ctx *ctx = aes_ctx(crypto_blkcipher_ctx(desc->tfm));
313 struct blkcipher_walk walk;
314 int err;
315
316 blkcipher_walk_init(&walk, dst, src, nbytes);
317 err = blkcipher_walk_virt(desc, &walk);
9251b64f 318 desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
54b6a1bd
HY
319
320 kernel_fpu_begin();
321 while ((nbytes = walk.nbytes)) {
322 aesni_cbc_dec(ctx, walk.dst.virt.addr, walk.src.virt.addr,
323 nbytes & AES_BLOCK_MASK, walk.iv);
324 nbytes &= AES_BLOCK_SIZE - 1;
325 err = blkcipher_walk_done(desc, &walk, nbytes);
326 }
327 kernel_fpu_end();
328
329 return err;
330}
331
0d258efb 332#ifdef CONFIG_X86_64
12387a46
HY
333static void ctr_crypt_final(struct crypto_aes_ctx *ctx,
334 struct blkcipher_walk *walk)
335{
336 u8 *ctrblk = walk->iv;
337 u8 keystream[AES_BLOCK_SIZE];
338 u8 *src = walk->src.virt.addr;
339 u8 *dst = walk->dst.virt.addr;
340 unsigned int nbytes = walk->nbytes;
341
342 aesni_enc(ctx, keystream, ctrblk);
343 crypto_xor(keystream, src, nbytes);
344 memcpy(dst, keystream, nbytes);
345 crypto_inc(ctrblk, AES_BLOCK_SIZE);
346}
347
348static int ctr_crypt(struct blkcipher_desc *desc,
349 struct scatterlist *dst, struct scatterlist *src,
350 unsigned int nbytes)
351{
352 struct crypto_aes_ctx *ctx = aes_ctx(crypto_blkcipher_ctx(desc->tfm));
353 struct blkcipher_walk walk;
354 int err;
355
356 blkcipher_walk_init(&walk, dst, src, nbytes);
357 err = blkcipher_walk_virt_block(desc, &walk, AES_BLOCK_SIZE);
358 desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
359
360 kernel_fpu_begin();
361 while ((nbytes = walk.nbytes) >= AES_BLOCK_SIZE) {
362 aesni_ctr_enc(ctx, walk.dst.virt.addr, walk.src.virt.addr,
363 nbytes & AES_BLOCK_MASK, walk.iv);
364 nbytes &= AES_BLOCK_SIZE - 1;
365 err = blkcipher_walk_done(desc, &walk, nbytes);
366 }
367 if (walk.nbytes) {
368 ctr_crypt_final(ctx, &walk);
369 err = blkcipher_walk_done(desc, &walk, 0);
370 }
371 kernel_fpu_end();
372
373 return err;
374}
0d258efb 375#endif
12387a46 376
54b6a1bd
HY
377static int ablk_ecb_init(struct crypto_tfm *tfm)
378{
ef45b834 379 return ablk_init_common(tfm, "__driver-ecb-aes-aesni");
54b6a1bd
HY
380}
381
54b6a1bd
HY
382static int ablk_cbc_init(struct crypto_tfm *tfm)
383{
ef45b834 384 return ablk_init_common(tfm, "__driver-cbc-aes-aesni");
54b6a1bd
HY
385}
386
0d258efb 387#ifdef CONFIG_X86_64
2cf4ac8b
HY
388static int ablk_ctr_init(struct crypto_tfm *tfm)
389{
ef45b834 390 return ablk_init_common(tfm, "__driver-ctr-aes-aesni");
2cf4ac8b
HY
391}
392
12387a46
HY
393#ifdef HAS_CTR
394static int ablk_rfc3686_ctr_init(struct crypto_tfm *tfm)
395{
ef45b834 396 return ablk_init_common(tfm, "rfc3686(__driver-ctr-aes-aesni)");
12387a46 397}
2cf4ac8b 398#endif
0d258efb 399#endif
2cf4ac8b
HY
400
401#ifdef HAS_LRW
402static int ablk_lrw_init(struct crypto_tfm *tfm)
403{
ef45b834 404 return ablk_init_common(tfm, "fpu(lrw(__driver-aes-aesni))");
2cf4ac8b 405}
2cf4ac8b
HY
406#endif
407
408#ifdef HAS_PCBC
409static int ablk_pcbc_init(struct crypto_tfm *tfm)
410{
ef45b834 411 return ablk_init_common(tfm, "fpu(pcbc(__driver-aes-aesni))");
2cf4ac8b 412}
2cf4ac8b
HY
413#endif
414
415#ifdef HAS_XTS
416static int ablk_xts_init(struct crypto_tfm *tfm)
417{
ef45b834 418 return ablk_init_common(tfm, "fpu(xts(__driver-aes-aesni))");
2cf4ac8b 419}
2cf4ac8b
HY
420#endif
421
559ad0ff 422#ifdef CONFIG_X86_64
0bd82f5f
TS
423static int rfc4106_init(struct crypto_tfm *tfm)
424{
425 struct cryptd_aead *cryptd_tfm;
426 struct aesni_rfc4106_gcm_ctx *ctx = (struct aesni_rfc4106_gcm_ctx *)
427 PTR_ALIGN((u8 *)crypto_tfm_ctx(tfm), AESNI_ALIGN);
60af520c
TS
428 struct crypto_aead *cryptd_child;
429 struct aesni_rfc4106_gcm_ctx *child_ctx;
0bd82f5f
TS
430 cryptd_tfm = cryptd_alloc_aead("__driver-gcm-aes-aesni", 0, 0);
431 if (IS_ERR(cryptd_tfm))
432 return PTR_ERR(cryptd_tfm);
60af520c
TS
433
434 cryptd_child = cryptd_aead_child(cryptd_tfm);
435 child_ctx = aesni_rfc4106_gcm_ctx_get(cryptd_child);
436 memcpy(child_ctx, ctx, sizeof(*ctx));
0bd82f5f
TS
437 ctx->cryptd_tfm = cryptd_tfm;
438 tfm->crt_aead.reqsize = sizeof(struct aead_request)
439 + crypto_aead_reqsize(&cryptd_tfm->base);
440 return 0;
441}
442
443static void rfc4106_exit(struct crypto_tfm *tfm)
444{
445 struct aesni_rfc4106_gcm_ctx *ctx =
446 (struct aesni_rfc4106_gcm_ctx *)
447 PTR_ALIGN((u8 *)crypto_tfm_ctx(tfm), AESNI_ALIGN);
448 if (!IS_ERR(ctx->cryptd_tfm))
449 cryptd_free_aead(ctx->cryptd_tfm);
450 return;
451}
452
453static void
454rfc4106_set_hash_subkey_done(struct crypto_async_request *req, int err)
455{
456 struct aesni_gcm_set_hash_subkey_result *result = req->data;
457
458 if (err == -EINPROGRESS)
459 return;
460 result->err = err;
461 complete(&result->completion);
462}
463
464static int
465rfc4106_set_hash_subkey(u8 *hash_subkey, const u8 *key, unsigned int key_len)
466{
467 struct crypto_ablkcipher *ctr_tfm;
468 struct ablkcipher_request *req;
469 int ret = -EINVAL;
470 struct aesni_hash_subkey_req_data *req_data;
471
472 ctr_tfm = crypto_alloc_ablkcipher("ctr(aes)", 0, 0);
473 if (IS_ERR(ctr_tfm))
474 return PTR_ERR(ctr_tfm);
475
476 crypto_ablkcipher_clear_flags(ctr_tfm, ~0);
477
478 ret = crypto_ablkcipher_setkey(ctr_tfm, key, key_len);
7efd95f6 479 if (ret)
fc9044e2 480 goto out_free_ablkcipher;
0bd82f5f 481
fc9044e2 482 ret = -ENOMEM;
0bd82f5f 483 req = ablkcipher_request_alloc(ctr_tfm, GFP_KERNEL);
fc9044e2 484 if (!req)
7efd95f6 485 goto out_free_ablkcipher;
0bd82f5f
TS
486
487 req_data = kmalloc(sizeof(*req_data), GFP_KERNEL);
fc9044e2 488 if (!req_data)
7efd95f6 489 goto out_free_request;
fc9044e2 490
0bd82f5f
TS
491 memset(req_data->iv, 0, sizeof(req_data->iv));
492
493 /* Clear the data in the hash sub key container to zero.*/
494 /* We want to cipher all zeros to create the hash sub key. */
495 memset(hash_subkey, 0, RFC4106_HASH_SUBKEY_SIZE);
496
497 init_completion(&req_data->result.completion);
498 sg_init_one(&req_data->sg, hash_subkey, RFC4106_HASH_SUBKEY_SIZE);
499 ablkcipher_request_set_tfm(req, ctr_tfm);
500 ablkcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_SLEEP |
501 CRYPTO_TFM_REQ_MAY_BACKLOG,
502 rfc4106_set_hash_subkey_done,
503 &req_data->result);
504
505 ablkcipher_request_set_crypt(req, &req_data->sg,
506 &req_data->sg, RFC4106_HASH_SUBKEY_SIZE, req_data->iv);
507
508 ret = crypto_ablkcipher_encrypt(req);
509 if (ret == -EINPROGRESS || ret == -EBUSY) {
510 ret = wait_for_completion_interruptible
511 (&req_data->result.completion);
512 if (!ret)
513 ret = req_data->result.err;
514 }
fc9044e2 515 kfree(req_data);
7efd95f6 516out_free_request:
0bd82f5f 517 ablkcipher_request_free(req);
7efd95f6 518out_free_ablkcipher:
0bd82f5f
TS
519 crypto_free_ablkcipher(ctr_tfm);
520 return ret;
521}
522
523static int rfc4106_set_key(struct crypto_aead *parent, const u8 *key,
524 unsigned int key_len)
525{
526 int ret = 0;
527 struct crypto_tfm *tfm = crypto_aead_tfm(parent);
528 struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(parent);
60af520c
TS
529 struct crypto_aead *cryptd_child = cryptd_aead_child(ctx->cryptd_tfm);
530 struct aesni_rfc4106_gcm_ctx *child_ctx =
531 aesni_rfc4106_gcm_ctx_get(cryptd_child);
bf084d8f 532 u8 *new_key_align, *new_key_mem = NULL;
0bd82f5f
TS
533
534 if (key_len < 4) {
535 crypto_tfm_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
536 return -EINVAL;
537 }
538 /*Account for 4 byte nonce at the end.*/
539 key_len -= 4;
540 if (key_len != AES_KEYSIZE_128) {
541 crypto_tfm_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
542 return -EINVAL;
543 }
544
545 memcpy(ctx->nonce, key + key_len, sizeof(ctx->nonce));
546 /*This must be on a 16 byte boundary!*/
547 if ((unsigned long)(&(ctx->aes_key_expanded.key_enc[0])) % AESNI_ALIGN)
548 return -EINVAL;
549
550 if ((unsigned long)key % AESNI_ALIGN) {
551 /*key is not aligned: use an auxuliar aligned pointer*/
552 new_key_mem = kmalloc(key_len+AESNI_ALIGN, GFP_KERNEL);
553 if (!new_key_mem)
554 return -ENOMEM;
555
bf084d8f
MB
556 new_key_align = PTR_ALIGN(new_key_mem, AESNI_ALIGN);
557 memcpy(new_key_align, key, key_len);
558 key = new_key_align;
0bd82f5f
TS
559 }
560
561 if (!irq_fpu_usable())
562 ret = crypto_aes_expand_key(&(ctx->aes_key_expanded),
563 key, key_len);
564 else {
565 kernel_fpu_begin();
566 ret = aesni_set_key(&(ctx->aes_key_expanded), key, key_len);
567 kernel_fpu_end();
568 }
569 /*This must be on a 16 byte boundary!*/
570 if ((unsigned long)(&(ctx->hash_subkey[0])) % AESNI_ALIGN) {
571 ret = -EINVAL;
572 goto exit;
573 }
574 ret = rfc4106_set_hash_subkey(ctx->hash_subkey, key, key_len);
60af520c 575 memcpy(child_ctx, ctx, sizeof(*ctx));
0bd82f5f
TS
576exit:
577 kfree(new_key_mem);
578 return ret;
579}
580
581/* This is the Integrity Check Value (aka the authentication tag length and can
582 * be 8, 12 or 16 bytes long. */
583static int rfc4106_set_authsize(struct crypto_aead *parent,
584 unsigned int authsize)
585{
586 struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(parent);
587 struct crypto_aead *cryptd_child = cryptd_aead_child(ctx->cryptd_tfm);
588
589 switch (authsize) {
590 case 8:
591 case 12:
592 case 16:
593 break;
594 default:
595 return -EINVAL;
596 }
597 crypto_aead_crt(parent)->authsize = authsize;
598 crypto_aead_crt(cryptd_child)->authsize = authsize;
599 return 0;
600}
601
602static int rfc4106_encrypt(struct aead_request *req)
603{
604 int ret;
605 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
606 struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(tfm);
0bd82f5f
TS
607
608 if (!irq_fpu_usable()) {
609 struct aead_request *cryptd_req =
610 (struct aead_request *) aead_request_ctx(req);
611 memcpy(cryptd_req, req, sizeof(*req));
612 aead_request_set_tfm(cryptd_req, &ctx->cryptd_tfm->base);
613 return crypto_aead_encrypt(cryptd_req);
614 } else {
60af520c 615 struct crypto_aead *cryptd_child = cryptd_aead_child(ctx->cryptd_tfm);
0bd82f5f
TS
616 kernel_fpu_begin();
617 ret = cryptd_child->base.crt_aead.encrypt(req);
618 kernel_fpu_end();
619 return ret;
620 }
621}
622
623static int rfc4106_decrypt(struct aead_request *req)
624{
625 int ret;
626 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
627 struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(tfm);
0bd82f5f
TS
628
629 if (!irq_fpu_usable()) {
630 struct aead_request *cryptd_req =
631 (struct aead_request *) aead_request_ctx(req);
632 memcpy(cryptd_req, req, sizeof(*req));
633 aead_request_set_tfm(cryptd_req, &ctx->cryptd_tfm->base);
634 return crypto_aead_decrypt(cryptd_req);
635 } else {
60af520c 636 struct crypto_aead *cryptd_child = cryptd_aead_child(ctx->cryptd_tfm);
0bd82f5f
TS
637 kernel_fpu_begin();
638 ret = cryptd_child->base.crt_aead.decrypt(req);
639 kernel_fpu_end();
640 return ret;
641 }
642}
643
0bd82f5f
TS
644static int __driver_rfc4106_encrypt(struct aead_request *req)
645{
646 u8 one_entry_in_sg = 0;
647 u8 *src, *dst, *assoc;
648 __be32 counter = cpu_to_be32(1);
649 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
650 struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(tfm);
651 void *aes_ctx = &(ctx->aes_key_expanded);
652 unsigned long auth_tag_len = crypto_aead_authsize(tfm);
653 u8 iv_tab[16+AESNI_ALIGN];
654 u8* iv = (u8 *) PTR_ALIGN((u8 *)iv_tab, AESNI_ALIGN);
655 struct scatter_walk src_sg_walk;
656 struct scatter_walk assoc_sg_walk;
657 struct scatter_walk dst_sg_walk;
658 unsigned int i;
659
660 /* Assuming we are supporting rfc4106 64-bit extended */
661 /* sequence numbers We need to have the AAD length equal */
662 /* to 8 or 12 bytes */
663 if (unlikely(req->assoclen != 8 && req->assoclen != 12))
664 return -EINVAL;
665 /* IV below built */
666 for (i = 0; i < 4; i++)
667 *(iv+i) = ctx->nonce[i];
668 for (i = 0; i < 8; i++)
669 *(iv+4+i) = req->iv[i];
670 *((__be32 *)(iv+12)) = counter;
671
672 if ((sg_is_last(req->src)) && (sg_is_last(req->assoc))) {
673 one_entry_in_sg = 1;
674 scatterwalk_start(&src_sg_walk, req->src);
675 scatterwalk_start(&assoc_sg_walk, req->assoc);
8fd75e12
CW
676 src = scatterwalk_map(&src_sg_walk);
677 assoc = scatterwalk_map(&assoc_sg_walk);
0bd82f5f
TS
678 dst = src;
679 if (unlikely(req->src != req->dst)) {
680 scatterwalk_start(&dst_sg_walk, req->dst);
8fd75e12 681 dst = scatterwalk_map(&dst_sg_walk);
0bd82f5f
TS
682 }
683
684 } else {
685 /* Allocate memory for src, dst, assoc */
686 src = kmalloc(req->cryptlen + auth_tag_len + req->assoclen,
687 GFP_ATOMIC);
688 if (unlikely(!src))
689 return -ENOMEM;
690 assoc = (src + req->cryptlen + auth_tag_len);
691 scatterwalk_map_and_copy(src, req->src, 0, req->cryptlen, 0);
692 scatterwalk_map_and_copy(assoc, req->assoc, 0,
693 req->assoclen, 0);
694 dst = src;
695 }
696
697 aesni_gcm_enc(aes_ctx, dst, src, (unsigned long)req->cryptlen, iv,
698 ctx->hash_subkey, assoc, (unsigned long)req->assoclen, dst
699 + ((unsigned long)req->cryptlen), auth_tag_len);
700
701 /* The authTag (aka the Integrity Check Value) needs to be written
702 * back to the packet. */
703 if (one_entry_in_sg) {
704 if (unlikely(req->src != req->dst)) {
8fd75e12 705 scatterwalk_unmap(dst);
0bd82f5f
TS
706 scatterwalk_done(&dst_sg_walk, 0, 0);
707 }
8fd75e12
CW
708 scatterwalk_unmap(src);
709 scatterwalk_unmap(assoc);
0bd82f5f
TS
710 scatterwalk_done(&src_sg_walk, 0, 0);
711 scatterwalk_done(&assoc_sg_walk, 0, 0);
712 } else {
713 scatterwalk_map_and_copy(dst, req->dst, 0,
714 req->cryptlen + auth_tag_len, 1);
715 kfree(src);
716 }
717 return 0;
718}
719
720static int __driver_rfc4106_decrypt(struct aead_request *req)
721{
722 u8 one_entry_in_sg = 0;
723 u8 *src, *dst, *assoc;
724 unsigned long tempCipherLen = 0;
725 __be32 counter = cpu_to_be32(1);
726 int retval = 0;
727 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
728 struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(tfm);
729 void *aes_ctx = &(ctx->aes_key_expanded);
730 unsigned long auth_tag_len = crypto_aead_authsize(tfm);
731 u8 iv_and_authTag[32+AESNI_ALIGN];
732 u8 *iv = (u8 *) PTR_ALIGN((u8 *)iv_and_authTag, AESNI_ALIGN);
733 u8 *authTag = iv + 16;
734 struct scatter_walk src_sg_walk;
735 struct scatter_walk assoc_sg_walk;
736 struct scatter_walk dst_sg_walk;
737 unsigned int i;
738
739 if (unlikely((req->cryptlen < auth_tag_len) ||
740 (req->assoclen != 8 && req->assoclen != 12)))
741 return -EINVAL;
742 /* Assuming we are supporting rfc4106 64-bit extended */
743 /* sequence numbers We need to have the AAD length */
744 /* equal to 8 or 12 bytes */
745
746 tempCipherLen = (unsigned long)(req->cryptlen - auth_tag_len);
747 /* IV below built */
748 for (i = 0; i < 4; i++)
749 *(iv+i) = ctx->nonce[i];
750 for (i = 0; i < 8; i++)
751 *(iv+4+i) = req->iv[i];
752 *((__be32 *)(iv+12)) = counter;
753
754 if ((sg_is_last(req->src)) && (sg_is_last(req->assoc))) {
755 one_entry_in_sg = 1;
756 scatterwalk_start(&src_sg_walk, req->src);
757 scatterwalk_start(&assoc_sg_walk, req->assoc);
8fd75e12
CW
758 src = scatterwalk_map(&src_sg_walk);
759 assoc = scatterwalk_map(&assoc_sg_walk);
0bd82f5f
TS
760 dst = src;
761 if (unlikely(req->src != req->dst)) {
762 scatterwalk_start(&dst_sg_walk, req->dst);
8fd75e12 763 dst = scatterwalk_map(&dst_sg_walk);
0bd82f5f
TS
764 }
765
766 } else {
767 /* Allocate memory for src, dst, assoc */
768 src = kmalloc(req->cryptlen + req->assoclen, GFP_ATOMIC);
769 if (!src)
770 return -ENOMEM;
771 assoc = (src + req->cryptlen + auth_tag_len);
772 scatterwalk_map_and_copy(src, req->src, 0, req->cryptlen, 0);
773 scatterwalk_map_and_copy(assoc, req->assoc, 0,
774 req->assoclen, 0);
775 dst = src;
776 }
777
778 aesni_gcm_dec(aes_ctx, dst, src, tempCipherLen, iv,
779 ctx->hash_subkey, assoc, (unsigned long)req->assoclen,
780 authTag, auth_tag_len);
781
782 /* Compare generated tag with passed in tag. */
783 retval = memcmp(src + tempCipherLen, authTag, auth_tag_len) ?
784 -EBADMSG : 0;
785
786 if (one_entry_in_sg) {
787 if (unlikely(req->src != req->dst)) {
8fd75e12 788 scatterwalk_unmap(dst);
0bd82f5f
TS
789 scatterwalk_done(&dst_sg_walk, 0, 0);
790 }
8fd75e12
CW
791 scatterwalk_unmap(src);
792 scatterwalk_unmap(assoc);
0bd82f5f
TS
793 scatterwalk_done(&src_sg_walk, 0, 0);
794 scatterwalk_done(&assoc_sg_walk, 0, 0);
795 } else {
796 scatterwalk_map_and_copy(dst, req->dst, 0, req->cryptlen, 1);
797 kfree(src);
798 }
799 return retval;
800}
fa46ccb8 801#endif
0bd82f5f 802
fa46ccb8
JK
803static struct crypto_alg aesni_algs[] = { {
804 .cra_name = "aes",
805 .cra_driver_name = "aes-aesni",
806 .cra_priority = 300,
807 .cra_flags = CRYPTO_ALG_TYPE_CIPHER,
808 .cra_blocksize = AES_BLOCK_SIZE,
809 .cra_ctxsize = sizeof(struct crypto_aes_ctx) +
810 AESNI_ALIGN - 1,
811 .cra_alignmask = 0,
812 .cra_module = THIS_MODULE,
813 .cra_u = {
814 .cipher = {
815 .cia_min_keysize = AES_MIN_KEY_SIZE,
816 .cia_max_keysize = AES_MAX_KEY_SIZE,
817 .cia_setkey = aes_set_key,
818 .cia_encrypt = aes_encrypt,
819 .cia_decrypt = aes_decrypt
820 }
821 }
822}, {
823 .cra_name = "__aes-aesni",
824 .cra_driver_name = "__driver-aes-aesni",
825 .cra_priority = 0,
826 .cra_flags = CRYPTO_ALG_TYPE_CIPHER,
827 .cra_blocksize = AES_BLOCK_SIZE,
828 .cra_ctxsize = sizeof(struct crypto_aes_ctx) +
829 AESNI_ALIGN - 1,
830 .cra_alignmask = 0,
831 .cra_module = THIS_MODULE,
832 .cra_u = {
833 .cipher = {
834 .cia_min_keysize = AES_MIN_KEY_SIZE,
835 .cia_max_keysize = AES_MAX_KEY_SIZE,
836 .cia_setkey = aes_set_key,
837 .cia_encrypt = __aes_encrypt,
838 .cia_decrypt = __aes_decrypt
839 }
840 }
841}, {
842 .cra_name = "__ecb-aes-aesni",
843 .cra_driver_name = "__driver-ecb-aes-aesni",
844 .cra_priority = 0,
845 .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
846 .cra_blocksize = AES_BLOCK_SIZE,
847 .cra_ctxsize = sizeof(struct crypto_aes_ctx) +
848 AESNI_ALIGN - 1,
849 .cra_alignmask = 0,
850 .cra_type = &crypto_blkcipher_type,
851 .cra_module = THIS_MODULE,
852 .cra_u = {
853 .blkcipher = {
854 .min_keysize = AES_MIN_KEY_SIZE,
855 .max_keysize = AES_MAX_KEY_SIZE,
856 .setkey = aes_set_key,
857 .encrypt = ecb_encrypt,
858 .decrypt = ecb_decrypt,
859 },
860 },
861}, {
862 .cra_name = "__cbc-aes-aesni",
863 .cra_driver_name = "__driver-cbc-aes-aesni",
864 .cra_priority = 0,
865 .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
866 .cra_blocksize = AES_BLOCK_SIZE,
867 .cra_ctxsize = sizeof(struct crypto_aes_ctx) +
868 AESNI_ALIGN - 1,
869 .cra_alignmask = 0,
870 .cra_type = &crypto_blkcipher_type,
871 .cra_module = THIS_MODULE,
872 .cra_u = {
873 .blkcipher = {
874 .min_keysize = AES_MIN_KEY_SIZE,
875 .max_keysize = AES_MAX_KEY_SIZE,
876 .setkey = aes_set_key,
877 .encrypt = cbc_encrypt,
878 .decrypt = cbc_decrypt,
879 },
880 },
881}, {
882 .cra_name = "ecb(aes)",
883 .cra_driver_name = "ecb-aes-aesni",
884 .cra_priority = 400,
885 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
886 .cra_blocksize = AES_BLOCK_SIZE,
a9629d71 887 .cra_ctxsize = sizeof(struct async_helper_ctx),
fa46ccb8
JK
888 .cra_alignmask = 0,
889 .cra_type = &crypto_ablkcipher_type,
890 .cra_module = THIS_MODULE,
891 .cra_init = ablk_ecb_init,
892 .cra_exit = ablk_exit,
893 .cra_u = {
894 .ablkcipher = {
895 .min_keysize = AES_MIN_KEY_SIZE,
896 .max_keysize = AES_MAX_KEY_SIZE,
897 .setkey = ablk_set_key,
898 .encrypt = ablk_encrypt,
899 .decrypt = ablk_decrypt,
900 },
901 },
902}, {
903 .cra_name = "cbc(aes)",
904 .cra_driver_name = "cbc-aes-aesni",
905 .cra_priority = 400,
906 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
907 .cra_blocksize = AES_BLOCK_SIZE,
a9629d71 908 .cra_ctxsize = sizeof(struct async_helper_ctx),
fa46ccb8
JK
909 .cra_alignmask = 0,
910 .cra_type = &crypto_ablkcipher_type,
911 .cra_module = THIS_MODULE,
912 .cra_init = ablk_cbc_init,
913 .cra_exit = ablk_exit,
914 .cra_u = {
915 .ablkcipher = {
916 .min_keysize = AES_MIN_KEY_SIZE,
917 .max_keysize = AES_MAX_KEY_SIZE,
918 .ivsize = AES_BLOCK_SIZE,
919 .setkey = ablk_set_key,
920 .encrypt = ablk_encrypt,
921 .decrypt = ablk_decrypt,
922 },
923 },
924#ifdef CONFIG_X86_64
925}, {
926 .cra_name = "__ctr-aes-aesni",
927 .cra_driver_name = "__driver-ctr-aes-aesni",
928 .cra_priority = 0,
929 .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
930 .cra_blocksize = 1,
931 .cra_ctxsize = sizeof(struct crypto_aes_ctx) +
932 AESNI_ALIGN - 1,
933 .cra_alignmask = 0,
934 .cra_type = &crypto_blkcipher_type,
935 .cra_module = THIS_MODULE,
936 .cra_u = {
937 .blkcipher = {
938 .min_keysize = AES_MIN_KEY_SIZE,
939 .max_keysize = AES_MAX_KEY_SIZE,
940 .ivsize = AES_BLOCK_SIZE,
941 .setkey = aes_set_key,
942 .encrypt = ctr_crypt,
943 .decrypt = ctr_crypt,
944 },
945 },
946}, {
947 .cra_name = "ctr(aes)",
948 .cra_driver_name = "ctr-aes-aesni",
949 .cra_priority = 400,
950 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
951 .cra_blocksize = 1,
a9629d71 952 .cra_ctxsize = sizeof(struct async_helper_ctx),
fa46ccb8
JK
953 .cra_alignmask = 0,
954 .cra_type = &crypto_ablkcipher_type,
955 .cra_module = THIS_MODULE,
956 .cra_init = ablk_ctr_init,
957 .cra_exit = ablk_exit,
958 .cra_u = {
959 .ablkcipher = {
960 .min_keysize = AES_MIN_KEY_SIZE,
961 .max_keysize = AES_MAX_KEY_SIZE,
962 .ivsize = AES_BLOCK_SIZE,
963 .setkey = ablk_set_key,
964 .encrypt = ablk_encrypt,
965 .decrypt = ablk_encrypt,
966 .geniv = "chainiv",
967 },
968 },
969}, {
970 .cra_name = "__gcm-aes-aesni",
971 .cra_driver_name = "__driver-gcm-aes-aesni",
0bd82f5f
TS
972 .cra_priority = 0,
973 .cra_flags = CRYPTO_ALG_TYPE_AEAD,
974 .cra_blocksize = 1,
fa46ccb8
JK
975 .cra_ctxsize = sizeof(struct aesni_rfc4106_gcm_ctx) +
976 AESNI_ALIGN,
0bd82f5f
TS
977 .cra_alignmask = 0,
978 .cra_type = &crypto_aead_type,
979 .cra_module = THIS_MODULE,
0bd82f5f
TS
980 .cra_u = {
981 .aead = {
982 .encrypt = __driver_rfc4106_encrypt,
983 .decrypt = __driver_rfc4106_decrypt,
984 },
985 },
fa46ccb8
JK
986}, {
987 .cra_name = "rfc4106(gcm(aes))",
988 .cra_driver_name = "rfc4106-gcm-aesni",
989 .cra_priority = 400,
990 .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
991 .cra_blocksize = 1,
992 .cra_ctxsize = sizeof(struct aesni_rfc4106_gcm_ctx) +
993 AESNI_ALIGN,
994 .cra_alignmask = 0,
995 .cra_type = &crypto_nivaead_type,
996 .cra_module = THIS_MODULE,
997 .cra_init = rfc4106_init,
998 .cra_exit = rfc4106_exit,
999 .cra_u = {
1000 .aead = {
1001 .setkey = rfc4106_set_key,
1002 .setauthsize = rfc4106_set_authsize,
1003 .encrypt = rfc4106_encrypt,
1004 .decrypt = rfc4106_decrypt,
1005 .geniv = "seqiv",
1006 .ivsize = 8,
1007 .maxauthsize = 16,
1008 },
1009 },
1010#ifdef HAS_CTR
1011}, {
1012 .cra_name = "rfc3686(ctr(aes))",
1013 .cra_driver_name = "rfc3686-ctr-aes-aesni",
1014 .cra_priority = 400,
1015 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
1016 .cra_blocksize = 1,
a9629d71 1017 .cra_ctxsize = sizeof(struct async_helper_ctx),
fa46ccb8
JK
1018 .cra_alignmask = 0,
1019 .cra_type = &crypto_ablkcipher_type,
1020 .cra_module = THIS_MODULE,
1021 .cra_init = ablk_rfc3686_ctr_init,
1022 .cra_exit = ablk_exit,
1023 .cra_u = {
1024 .ablkcipher = {
1025 .min_keysize = AES_MIN_KEY_SIZE +
1026 CTR_RFC3686_NONCE_SIZE,
1027 .max_keysize = AES_MAX_KEY_SIZE +
1028 CTR_RFC3686_NONCE_SIZE,
1029 .ivsize = CTR_RFC3686_IV_SIZE,
1030 .setkey = ablk_set_key,
1031 .encrypt = ablk_encrypt,
1032 .decrypt = ablk_decrypt,
1033 .geniv = "seqiv",
1034 },
1035 },
1036#endif
1037#endif
1038#ifdef HAS_LRW
1039}, {
1040 .cra_name = "lrw(aes)",
1041 .cra_driver_name = "lrw-aes-aesni",
1042 .cra_priority = 400,
1043 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
1044 .cra_blocksize = AES_BLOCK_SIZE,
a9629d71 1045 .cra_ctxsize = sizeof(struct async_helper_ctx),
fa46ccb8
JK
1046 .cra_alignmask = 0,
1047 .cra_type = &crypto_ablkcipher_type,
1048 .cra_module = THIS_MODULE,
1049 .cra_init = ablk_lrw_init,
1050 .cra_exit = ablk_exit,
1051 .cra_u = {
1052 .ablkcipher = {
1053 .min_keysize = AES_MIN_KEY_SIZE + AES_BLOCK_SIZE,
1054 .max_keysize = AES_MAX_KEY_SIZE + AES_BLOCK_SIZE,
1055 .ivsize = AES_BLOCK_SIZE,
1056 .setkey = ablk_set_key,
1057 .encrypt = ablk_encrypt,
1058 .decrypt = ablk_decrypt,
1059 },
1060 },
1061#endif
1062#ifdef HAS_PCBC
1063}, {
1064 .cra_name = "pcbc(aes)",
1065 .cra_driver_name = "pcbc-aes-aesni",
1066 .cra_priority = 400,
1067 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
1068 .cra_blocksize = AES_BLOCK_SIZE,
a9629d71 1069 .cra_ctxsize = sizeof(struct async_helper_ctx),
fa46ccb8
JK
1070 .cra_alignmask = 0,
1071 .cra_type = &crypto_ablkcipher_type,
1072 .cra_module = THIS_MODULE,
1073 .cra_init = ablk_pcbc_init,
1074 .cra_exit = ablk_exit,
1075 .cra_u = {
1076 .ablkcipher = {
1077 .min_keysize = AES_MIN_KEY_SIZE,
1078 .max_keysize = AES_MAX_KEY_SIZE,
1079 .ivsize = AES_BLOCK_SIZE,
1080 .setkey = ablk_set_key,
1081 .encrypt = ablk_encrypt,
1082 .decrypt = ablk_decrypt,
1083 },
1084 },
1085#endif
1086#ifdef HAS_XTS
1087}, {
1088 .cra_name = "xts(aes)",
1089 .cra_driver_name = "xts-aes-aesni",
1090 .cra_priority = 400,
1091 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
1092 .cra_blocksize = AES_BLOCK_SIZE,
a9629d71 1093 .cra_ctxsize = sizeof(struct async_helper_ctx),
fa46ccb8
JK
1094 .cra_alignmask = 0,
1095 .cra_type = &crypto_ablkcipher_type,
1096 .cra_module = THIS_MODULE,
1097 .cra_init = ablk_xts_init,
1098 .cra_exit = ablk_exit,
1099 .cra_u = {
1100 .ablkcipher = {
1101 .min_keysize = 2 * AES_MIN_KEY_SIZE,
1102 .max_keysize = 2 * AES_MAX_KEY_SIZE,
1103 .ivsize = AES_BLOCK_SIZE,
1104 .setkey = ablk_set_key,
1105 .encrypt = ablk_encrypt,
1106 .decrypt = ablk_decrypt,
1107 },
1108 },
559ad0ff 1109#endif
fa46ccb8 1110} };
0bd82f5f 1111
3bd391f0
AK
1112
1113static const struct x86_cpu_id aesni_cpu_id[] = {
1114 X86_FEATURE_MATCH(X86_FEATURE_AES),
1115 {}
1116};
1117MODULE_DEVICE_TABLE(x86cpu, aesni_cpu_id);
1118
54b6a1bd
HY
1119static int __init aesni_init(void)
1120{
7af6c245 1121 int err;
54b6a1bd 1122
3bd391f0 1123 if (!x86_match_cpu(aesni_cpu_id))
54b6a1bd 1124 return -ENODEV;
0bd82f5f 1125
fa46ccb8
JK
1126 err = crypto_fpu_init();
1127 if (err)
1128 return err;
54b6a1bd 1129
fa46ccb8 1130 return crypto_register_algs(aesni_algs, ARRAY_SIZE(aesni_algs));
54b6a1bd
HY
1131}
1132
1133static void __exit aesni_exit(void)
1134{
fa46ccb8 1135 crypto_unregister_algs(aesni_algs, ARRAY_SIZE(aesni_algs));
b23b6451
AL
1136
1137 crypto_fpu_exit();
54b6a1bd
HY
1138}
1139
1140module_init(aesni_init);
1141module_exit(aesni_exit);
1142
1143MODULE_DESCRIPTION("Rijndael (AES) Cipher Algorithm, Intel AES-NI instructions optimized");
1144MODULE_LICENSE("GPL");
1145MODULE_ALIAS("aes");
This page took 0.198193 seconds and 5 git commands to generate.