genirq: Dont allow affinity mask to be updated on IPIs
[deliverable/linux.git] / arch / arm / crypto / aesbs-glue.c
CommitLineData
e4e7f10b
AB
1/*
2 * linux/arch/arm/crypto/aesbs-glue.c - glue code for NEON bit sliced AES
3 *
4 * Copyright (C) 2013 Linaro Ltd <ard.biesheuvel@linaro.org>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10
11#include <asm/neon.h>
12#include <crypto/aes.h>
13#include <crypto/ablk_helper.h>
14#include <crypto/algapi.h>
15#include <linux/module.h>
49abc0d2 16#include <crypto/xts.h>
e4e7f10b
AB
17
18#include "aes_glue.h"
19
20#define BIT_SLICED_KEY_MAXSIZE (128 * (AES_MAXNR - 1) + 2 * AES_BLOCK_SIZE)
21
22struct BS_KEY {
23 struct AES_KEY rk;
24 int converted;
25 u8 __aligned(8) bs[BIT_SLICED_KEY_MAXSIZE];
26} __aligned(8);
27
28asmlinkage void bsaes_enc_key_convert(u8 out[], struct AES_KEY const *in);
29asmlinkage void bsaes_dec_key_convert(u8 out[], struct AES_KEY const *in);
30
31asmlinkage void bsaes_cbc_encrypt(u8 const in[], u8 out[], u32 bytes,
32 struct BS_KEY *key, u8 iv[]);
33
34asmlinkage void bsaes_ctr32_encrypt_blocks(u8 const in[], u8 out[], u32 blocks,
35 struct BS_KEY *key, u8 const iv[]);
36
37asmlinkage void bsaes_xts_encrypt(u8 const in[], u8 out[], u32 bytes,
38 struct BS_KEY *key, u8 tweak[]);
39
40asmlinkage void bsaes_xts_decrypt(u8 const in[], u8 out[], u32 bytes,
41 struct BS_KEY *key, u8 tweak[]);
42
43struct aesbs_cbc_ctx {
44 struct AES_KEY enc;
45 struct BS_KEY dec;
46};
47
48struct aesbs_ctr_ctx {
49 struct BS_KEY enc;
50};
51
52struct aesbs_xts_ctx {
53 struct BS_KEY enc;
54 struct BS_KEY dec;
55 struct AES_KEY twkey;
56};
57
58static int aesbs_cbc_set_key(struct crypto_tfm *tfm, const u8 *in_key,
59 unsigned int key_len)
60{
61 struct aesbs_cbc_ctx *ctx = crypto_tfm_ctx(tfm);
62 int bits = key_len * 8;
63
64 if (private_AES_set_encrypt_key(in_key, bits, &ctx->enc)) {
65 tfm->crt_flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
66 return -EINVAL;
67 }
68 ctx->dec.rk = ctx->enc;
69 private_AES_set_decrypt_key(in_key, bits, &ctx->dec.rk);
70 ctx->dec.converted = 0;
71 return 0;
72}
73
74static int aesbs_ctr_set_key(struct crypto_tfm *tfm, const u8 *in_key,
75 unsigned int key_len)
76{
77 struct aesbs_ctr_ctx *ctx = crypto_tfm_ctx(tfm);
78 int bits = key_len * 8;
79
80 if (private_AES_set_encrypt_key(in_key, bits, &ctx->enc.rk)) {
81 tfm->crt_flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
82 return -EINVAL;
83 }
84 ctx->enc.converted = 0;
85 return 0;
86}
87
88static int aesbs_xts_set_key(struct crypto_tfm *tfm, const u8 *in_key,
89 unsigned int key_len)
90{
91 struct aesbs_xts_ctx *ctx = crypto_tfm_ctx(tfm);
92 int bits = key_len * 4;
28856a9e
SM
93 int err;
94
95 err = xts_check_key(tfm, in_key, key_len);
96 if (err)
97 return err;
e4e7f10b
AB
98
99 if (private_AES_set_encrypt_key(in_key, bits, &ctx->enc.rk)) {
100 tfm->crt_flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
101 return -EINVAL;
102 }
103 ctx->dec.rk = ctx->enc.rk;
104 private_AES_set_decrypt_key(in_key, bits, &ctx->dec.rk);
105 private_AES_set_encrypt_key(in_key + key_len / 2, bits, &ctx->twkey);
106 ctx->enc.converted = ctx->dec.converted = 0;
107 return 0;
108}
109
110static int aesbs_cbc_encrypt(struct blkcipher_desc *desc,
111 struct scatterlist *dst,
112 struct scatterlist *src, unsigned int nbytes)
113{
114 struct aesbs_cbc_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
115 struct blkcipher_walk walk;
116 int err;
117
118 blkcipher_walk_init(&walk, dst, src, nbytes);
119 err = blkcipher_walk_virt(desc, &walk);
120
121 while (walk.nbytes) {
122 u32 blocks = walk.nbytes / AES_BLOCK_SIZE;
123 u8 *src = walk.src.virt.addr;
124
125 if (walk.dst.virt.addr == walk.src.virt.addr) {
126 u8 *iv = walk.iv;
127
128 do {
129 crypto_xor(src, iv, AES_BLOCK_SIZE);
130 AES_encrypt(src, src, &ctx->enc);
131 iv = src;
132 src += AES_BLOCK_SIZE;
133 } while (--blocks);
134 memcpy(walk.iv, iv, AES_BLOCK_SIZE);
135 } else {
136 u8 *dst = walk.dst.virt.addr;
137
138 do {
139 crypto_xor(walk.iv, src, AES_BLOCK_SIZE);
140 AES_encrypt(walk.iv, dst, &ctx->enc);
141 memcpy(walk.iv, dst, AES_BLOCK_SIZE);
142 src += AES_BLOCK_SIZE;
143 dst += AES_BLOCK_SIZE;
144 } while (--blocks);
145 }
f3c400ef 146 err = blkcipher_walk_done(desc, &walk, walk.nbytes % AES_BLOCK_SIZE);
e4e7f10b
AB
147 }
148 return err;
149}
150
151static int aesbs_cbc_decrypt(struct blkcipher_desc *desc,
152 struct scatterlist *dst,
153 struct scatterlist *src, unsigned int nbytes)
154{
155 struct aesbs_cbc_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
156 struct blkcipher_walk walk;
157 int err;
158
159 blkcipher_walk_init(&walk, dst, src, nbytes);
160 err = blkcipher_walk_virt_block(desc, &walk, 8 * AES_BLOCK_SIZE);
161
162 while ((walk.nbytes / AES_BLOCK_SIZE) >= 8) {
163 kernel_neon_begin();
164 bsaes_cbc_encrypt(walk.src.virt.addr, walk.dst.virt.addr,
165 walk.nbytes, &ctx->dec, walk.iv);
166 kernel_neon_end();
f3c400ef 167 err = blkcipher_walk_done(desc, &walk, walk.nbytes % AES_BLOCK_SIZE);
e4e7f10b
AB
168 }
169 while (walk.nbytes) {
170 u32 blocks = walk.nbytes / AES_BLOCK_SIZE;
171 u8 *dst = walk.dst.virt.addr;
172 u8 *src = walk.src.virt.addr;
173 u8 bk[2][AES_BLOCK_SIZE];
174 u8 *iv = walk.iv;
175
176 do {
177 if (walk.dst.virt.addr == walk.src.virt.addr)
178 memcpy(bk[blocks & 1], src, AES_BLOCK_SIZE);
179
180 AES_decrypt(src, dst, &ctx->dec.rk);
181 crypto_xor(dst, iv, AES_BLOCK_SIZE);
182
183 if (walk.dst.virt.addr == walk.src.virt.addr)
184 iv = bk[blocks & 1];
185 else
186 iv = src;
187
188 dst += AES_BLOCK_SIZE;
189 src += AES_BLOCK_SIZE;
190 } while (--blocks);
f3c400ef 191 err = blkcipher_walk_done(desc, &walk, walk.nbytes % AES_BLOCK_SIZE);
e4e7f10b
AB
192 }
193 return err;
194}
195
196static void inc_be128_ctr(__be32 ctr[], u32 addend)
197{
198 int i;
199
200 for (i = 3; i >= 0; i--, addend = 1) {
201 u32 n = be32_to_cpu(ctr[i]) + addend;
202
203 ctr[i] = cpu_to_be32(n);
204 if (n >= addend)
205 break;
206 }
207}
208
209static int aesbs_ctr_encrypt(struct blkcipher_desc *desc,
210 struct scatterlist *dst, struct scatterlist *src,
211 unsigned int nbytes)
212{
213 struct aesbs_ctr_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
214 struct blkcipher_walk walk;
215 u32 blocks;
216 int err;
217
218 blkcipher_walk_init(&walk, dst, src, nbytes);
219 err = blkcipher_walk_virt_block(desc, &walk, 8 * AES_BLOCK_SIZE);
220
221 while ((blocks = walk.nbytes / AES_BLOCK_SIZE)) {
222 u32 tail = walk.nbytes % AES_BLOCK_SIZE;
223 __be32 *ctr = (__be32 *)walk.iv;
224 u32 headroom = UINT_MAX - be32_to_cpu(ctr[3]);
225
226 /* avoid 32 bit counter overflow in the NEON code */
227 if (unlikely(headroom < blocks)) {
228 blocks = headroom + 1;
229 tail = walk.nbytes - blocks * AES_BLOCK_SIZE;
230 }
231 kernel_neon_begin();
232 bsaes_ctr32_encrypt_blocks(walk.src.virt.addr,
233 walk.dst.virt.addr, blocks,
234 &ctx->enc, walk.iv);
235 kernel_neon_end();
236 inc_be128_ctr(ctr, blocks);
237
238 nbytes -= blocks * AES_BLOCK_SIZE;
239 if (nbytes && nbytes == tail && nbytes <= AES_BLOCK_SIZE)
240 break;
241
242 err = blkcipher_walk_done(desc, &walk, tail);
243 }
244 if (walk.nbytes) {
245 u8 *tdst = walk.dst.virt.addr + blocks * AES_BLOCK_SIZE;
246 u8 *tsrc = walk.src.virt.addr + blocks * AES_BLOCK_SIZE;
247 u8 ks[AES_BLOCK_SIZE];
248
249 AES_encrypt(walk.iv, ks, &ctx->enc.rk);
250 if (tdst != tsrc)
251 memcpy(tdst, tsrc, nbytes);
252 crypto_xor(tdst, ks, nbytes);
253 err = blkcipher_walk_done(desc, &walk, 0);
254 }
255 return err;
256}
257
258static int aesbs_xts_encrypt(struct blkcipher_desc *desc,
259 struct scatterlist *dst,
260 struct scatterlist *src, unsigned int nbytes)
261{
262 struct aesbs_xts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
263 struct blkcipher_walk walk;
264 int err;
265
266 blkcipher_walk_init(&walk, dst, src, nbytes);
267 err = blkcipher_walk_virt_block(desc, &walk, 8 * AES_BLOCK_SIZE);
268
269 /* generate the initial tweak */
270 AES_encrypt(walk.iv, walk.iv, &ctx->twkey);
271
272 while (walk.nbytes) {
273 kernel_neon_begin();
274 bsaes_xts_encrypt(walk.src.virt.addr, walk.dst.virt.addr,
275 walk.nbytes, &ctx->enc, walk.iv);
276 kernel_neon_end();
f3c400ef 277 err = blkcipher_walk_done(desc, &walk, walk.nbytes % AES_BLOCK_SIZE);
e4e7f10b
AB
278 }
279 return err;
280}
281
282static int aesbs_xts_decrypt(struct blkcipher_desc *desc,
283 struct scatterlist *dst,
284 struct scatterlist *src, unsigned int nbytes)
285{
286 struct aesbs_xts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
287 struct blkcipher_walk walk;
288 int err;
289
290 blkcipher_walk_init(&walk, dst, src, nbytes);
291 err = blkcipher_walk_virt_block(desc, &walk, 8 * AES_BLOCK_SIZE);
292
293 /* generate the initial tweak */
294 AES_encrypt(walk.iv, walk.iv, &ctx->twkey);
295
296 while (walk.nbytes) {
297 kernel_neon_begin();
298 bsaes_xts_decrypt(walk.src.virt.addr, walk.dst.virt.addr,
299 walk.nbytes, &ctx->dec, walk.iv);
300 kernel_neon_end();
f3c400ef 301 err = blkcipher_walk_done(desc, &walk, walk.nbytes % AES_BLOCK_SIZE);
e4e7f10b
AB
302 }
303 return err;
304}
305
306static struct crypto_alg aesbs_algs[] = { {
307 .cra_name = "__cbc-aes-neonbs",
308 .cra_driver_name = "__driver-cbc-aes-neonbs",
309 .cra_priority = 0,
76aa9d5f
SM
310 .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER |
311 CRYPTO_ALG_INTERNAL,
e4e7f10b
AB
312 .cra_blocksize = AES_BLOCK_SIZE,
313 .cra_ctxsize = sizeof(struct aesbs_cbc_ctx),
314 .cra_alignmask = 7,
315 .cra_type = &crypto_blkcipher_type,
316 .cra_module = THIS_MODULE,
317 .cra_blkcipher = {
318 .min_keysize = AES_MIN_KEY_SIZE,
319 .max_keysize = AES_MAX_KEY_SIZE,
320 .ivsize = AES_BLOCK_SIZE,
321 .setkey = aesbs_cbc_set_key,
322 .encrypt = aesbs_cbc_encrypt,
323 .decrypt = aesbs_cbc_decrypt,
324 },
325}, {
326 .cra_name = "__ctr-aes-neonbs",
327 .cra_driver_name = "__driver-ctr-aes-neonbs",
328 .cra_priority = 0,
76aa9d5f
SM
329 .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER |
330 CRYPTO_ALG_INTERNAL,
e4e7f10b
AB
331 .cra_blocksize = 1,
332 .cra_ctxsize = sizeof(struct aesbs_ctr_ctx),
333 .cra_alignmask = 7,
334 .cra_type = &crypto_blkcipher_type,
335 .cra_module = THIS_MODULE,
336 .cra_blkcipher = {
337 .min_keysize = AES_MIN_KEY_SIZE,
338 .max_keysize = AES_MAX_KEY_SIZE,
339 .ivsize = AES_BLOCK_SIZE,
340 .setkey = aesbs_ctr_set_key,
341 .encrypt = aesbs_ctr_encrypt,
342 .decrypt = aesbs_ctr_encrypt,
343 },
344}, {
345 .cra_name = "__xts-aes-neonbs",
346 .cra_driver_name = "__driver-xts-aes-neonbs",
347 .cra_priority = 0,
76aa9d5f
SM
348 .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER |
349 CRYPTO_ALG_INTERNAL,
e4e7f10b
AB
350 .cra_blocksize = AES_BLOCK_SIZE,
351 .cra_ctxsize = sizeof(struct aesbs_xts_ctx),
352 .cra_alignmask = 7,
353 .cra_type = &crypto_blkcipher_type,
354 .cra_module = THIS_MODULE,
355 .cra_blkcipher = {
356 .min_keysize = 2 * AES_MIN_KEY_SIZE,
357 .max_keysize = 2 * AES_MAX_KEY_SIZE,
358 .ivsize = AES_BLOCK_SIZE,
359 .setkey = aesbs_xts_set_key,
360 .encrypt = aesbs_xts_encrypt,
361 .decrypt = aesbs_xts_decrypt,
362 },
363}, {
364 .cra_name = "cbc(aes)",
365 .cra_driver_name = "cbc-aes-neonbs",
366 .cra_priority = 300,
367 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER|CRYPTO_ALG_ASYNC,
368 .cra_blocksize = AES_BLOCK_SIZE,
369 .cra_ctxsize = sizeof(struct async_helper_ctx),
370 .cra_alignmask = 7,
371 .cra_type = &crypto_ablkcipher_type,
372 .cra_module = THIS_MODULE,
373 .cra_init = ablk_init,
374 .cra_exit = ablk_exit,
375 .cra_ablkcipher = {
376 .min_keysize = AES_MIN_KEY_SIZE,
377 .max_keysize = AES_MAX_KEY_SIZE,
378 .ivsize = AES_BLOCK_SIZE,
379 .setkey = ablk_set_key,
380 .encrypt = __ablk_encrypt,
381 .decrypt = ablk_decrypt,
382 }
383}, {
384 .cra_name = "ctr(aes)",
385 .cra_driver_name = "ctr-aes-neonbs",
386 .cra_priority = 300,
387 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER|CRYPTO_ALG_ASYNC,
388 .cra_blocksize = 1,
389 .cra_ctxsize = sizeof(struct async_helper_ctx),
390 .cra_alignmask = 7,
391 .cra_type = &crypto_ablkcipher_type,
392 .cra_module = THIS_MODULE,
393 .cra_init = ablk_init,
394 .cra_exit = ablk_exit,
395 .cra_ablkcipher = {
396 .min_keysize = AES_MIN_KEY_SIZE,
397 .max_keysize = AES_MAX_KEY_SIZE,
398 .ivsize = AES_BLOCK_SIZE,
399 .setkey = ablk_set_key,
400 .encrypt = ablk_encrypt,
401 .decrypt = ablk_decrypt,
402 }
403}, {
404 .cra_name = "xts(aes)",
405 .cra_driver_name = "xts-aes-neonbs",
406 .cra_priority = 300,
407 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER|CRYPTO_ALG_ASYNC,
408 .cra_blocksize = AES_BLOCK_SIZE,
409 .cra_ctxsize = sizeof(struct async_helper_ctx),
410 .cra_alignmask = 7,
411 .cra_type = &crypto_ablkcipher_type,
412 .cra_module = THIS_MODULE,
413 .cra_init = ablk_init,
414 .cra_exit = ablk_exit,
415 .cra_ablkcipher = {
416 .min_keysize = 2 * AES_MIN_KEY_SIZE,
417 .max_keysize = 2 * AES_MAX_KEY_SIZE,
418 .ivsize = AES_BLOCK_SIZE,
419 .setkey = ablk_set_key,
420 .encrypt = ablk_encrypt,
421 .decrypt = ablk_decrypt,
422 }
423} };
424
425static int __init aesbs_mod_init(void)
426{
427 if (!cpu_has_neon())
428 return -ENODEV;
429
430 return crypto_register_algs(aesbs_algs, ARRAY_SIZE(aesbs_algs));
431}
432
433static void __exit aesbs_mod_exit(void)
434{
435 crypto_unregister_algs(aesbs_algs, ARRAY_SIZE(aesbs_algs));
436}
437
438module_init(aesbs_mod_init);
439module_exit(aesbs_mod_exit);
440
441MODULE_DESCRIPTION("Bit sliced AES in CBC/CTR/XTS modes using NEON");
442MODULE_AUTHOR("Ard Biesheuvel <ard.biesheuvel@linaro.org>");
443MODULE_LICENSE("GPL");
This page took 0.141626 seconds and 5 git commands to generate.