Merge git://git.kernel.org/pub/scm/linux/kernel/git/wim/linux-2.6-watchdog
[deliverable/linux.git] / crypto / seqiv.c
1 /*
2 * seqiv: Sequence Number IV Generator
3 *
4 * This generator generates an IV based on a sequence number by xoring it
5 * with a salt. This algorithm is mainly useful for CTR and similar modes.
6 *
7 * Copyright (c) 2007 Herbert Xu <herbert@gondor.apana.org.au>
8 *
9 * This program is free software; you can redistribute it and/or modify it
10 * under the terms of the GNU General Public License as published by the Free
11 * Software Foundation; either version 2 of the License, or (at your option)
12 * any later version.
13 *
14 */
15
16 #include <crypto/internal/aead.h>
17 #include <crypto/internal/skcipher.h>
18 #include <crypto/rng.h>
19 #include <linux/err.h>
20 #include <linux/init.h>
21 #include <linux/kernel.h>
22 #include <linux/module.h>
23 #include <linux/spinlock.h>
24 #include <linux/string.h>
25
26 struct seqiv_ctx {
27 spinlock_t lock;
28 u8 salt[] __attribute__ ((aligned(__alignof__(u32))));
29 };
30
31 static void seqiv_complete2(struct skcipher_givcrypt_request *req, int err)
32 {
33 struct ablkcipher_request *subreq = skcipher_givcrypt_reqctx(req);
34 struct crypto_ablkcipher *geniv;
35
36 if (err == -EINPROGRESS)
37 return;
38
39 if (err)
40 goto out;
41
42 geniv = skcipher_givcrypt_reqtfm(req);
43 memcpy(req->creq.info, subreq->info, crypto_ablkcipher_ivsize(geniv));
44
45 out:
46 kfree(subreq->info);
47 }
48
49 static void seqiv_complete(struct crypto_async_request *base, int err)
50 {
51 struct skcipher_givcrypt_request *req = base->data;
52
53 seqiv_complete2(req, err);
54 skcipher_givcrypt_complete(req, err);
55 }
56
57 static void seqiv_aead_complete2(struct aead_givcrypt_request *req, int err)
58 {
59 struct aead_request *subreq = aead_givcrypt_reqctx(req);
60 struct crypto_aead *geniv;
61
62 if (err == -EINPROGRESS)
63 return;
64
65 if (err)
66 goto out;
67
68 geniv = aead_givcrypt_reqtfm(req);
69 memcpy(req->areq.iv, subreq->iv, crypto_aead_ivsize(geniv));
70
71 out:
72 kfree(subreq->iv);
73 }
74
75 static void seqiv_aead_complete(struct crypto_async_request *base, int err)
76 {
77 struct aead_givcrypt_request *req = base->data;
78
79 seqiv_aead_complete2(req, err);
80 aead_givcrypt_complete(req, err);
81 }
82
83 static void seqiv_geniv(struct seqiv_ctx *ctx, u8 *info, u64 seq,
84 unsigned int ivsize)
85 {
86 unsigned int len = ivsize;
87
88 if (ivsize > sizeof(u64)) {
89 memset(info, 0, ivsize - sizeof(u64));
90 len = sizeof(u64);
91 }
92 seq = cpu_to_be64(seq);
93 memcpy(info + ivsize - len, &seq, len);
94 crypto_xor(info, ctx->salt, ivsize);
95 }
96
97 static int seqiv_givencrypt(struct skcipher_givcrypt_request *req)
98 {
99 struct crypto_ablkcipher *geniv = skcipher_givcrypt_reqtfm(req);
100 struct seqiv_ctx *ctx = crypto_ablkcipher_ctx(geniv);
101 struct ablkcipher_request *subreq = skcipher_givcrypt_reqctx(req);
102 crypto_completion_t complete;
103 void *data;
104 u8 *info;
105 unsigned int ivsize;
106 int err;
107
108 ablkcipher_request_set_tfm(subreq, skcipher_geniv_cipher(geniv));
109
110 complete = req->creq.base.complete;
111 data = req->creq.base.data;
112 info = req->creq.info;
113
114 ivsize = crypto_ablkcipher_ivsize(geniv);
115
116 if (unlikely(!IS_ALIGNED((unsigned long)info,
117 crypto_ablkcipher_alignmask(geniv) + 1))) {
118 info = kmalloc(ivsize, req->creq.base.flags &
119 CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL:
120 GFP_ATOMIC);
121 if (!info)
122 return -ENOMEM;
123
124 complete = seqiv_complete;
125 data = req;
126 }
127
128 ablkcipher_request_set_callback(subreq, req->creq.base.flags, complete,
129 data);
130 ablkcipher_request_set_crypt(subreq, req->creq.src, req->creq.dst,
131 req->creq.nbytes, info);
132
133 seqiv_geniv(ctx, info, req->seq, ivsize);
134 memcpy(req->giv, info, ivsize);
135
136 err = crypto_ablkcipher_encrypt(subreq);
137 if (unlikely(info != req->creq.info))
138 seqiv_complete2(req, err);
139 return err;
140 }
141
142 static int seqiv_aead_givencrypt(struct aead_givcrypt_request *req)
143 {
144 struct crypto_aead *geniv = aead_givcrypt_reqtfm(req);
145 struct seqiv_ctx *ctx = crypto_aead_ctx(geniv);
146 struct aead_request *areq = &req->areq;
147 struct aead_request *subreq = aead_givcrypt_reqctx(req);
148 crypto_completion_t complete;
149 void *data;
150 u8 *info;
151 unsigned int ivsize;
152 int err;
153
154 aead_request_set_tfm(subreq, aead_geniv_base(geniv));
155
156 complete = areq->base.complete;
157 data = areq->base.data;
158 info = areq->iv;
159
160 ivsize = crypto_aead_ivsize(geniv);
161
162 if (unlikely(!IS_ALIGNED((unsigned long)info,
163 crypto_aead_alignmask(geniv) + 1))) {
164 info = kmalloc(ivsize, areq->base.flags &
165 CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL:
166 GFP_ATOMIC);
167 if (!info)
168 return -ENOMEM;
169
170 complete = seqiv_aead_complete;
171 data = req;
172 }
173
174 aead_request_set_callback(subreq, areq->base.flags, complete, data);
175 aead_request_set_crypt(subreq, areq->src, areq->dst, areq->cryptlen,
176 info);
177 aead_request_set_assoc(subreq, areq->assoc, areq->assoclen);
178
179 seqiv_geniv(ctx, info, req->seq, ivsize);
180 memcpy(req->giv, info, ivsize);
181
182 err = crypto_aead_encrypt(subreq);
183 if (unlikely(info != areq->iv))
184 seqiv_aead_complete2(req, err);
185 return err;
186 }
187
188 static int seqiv_givencrypt_first(struct skcipher_givcrypt_request *req)
189 {
190 struct crypto_ablkcipher *geniv = skcipher_givcrypt_reqtfm(req);
191 struct seqiv_ctx *ctx = crypto_ablkcipher_ctx(geniv);
192 int err = 0;
193
194 spin_lock_bh(&ctx->lock);
195 if (crypto_ablkcipher_crt(geniv)->givencrypt != seqiv_givencrypt_first)
196 goto unlock;
197
198 crypto_ablkcipher_crt(geniv)->givencrypt = seqiv_givencrypt;
199 err = crypto_rng_get_bytes(crypto_default_rng, ctx->salt,
200 crypto_ablkcipher_ivsize(geniv));
201
202 unlock:
203 spin_unlock_bh(&ctx->lock);
204
205 if (err)
206 return err;
207
208 return seqiv_givencrypt(req);
209 }
210
211 static int seqiv_aead_givencrypt_first(struct aead_givcrypt_request *req)
212 {
213 struct crypto_aead *geniv = aead_givcrypt_reqtfm(req);
214 struct seqiv_ctx *ctx = crypto_aead_ctx(geniv);
215 int err = 0;
216
217 spin_lock_bh(&ctx->lock);
218 if (crypto_aead_crt(geniv)->givencrypt != seqiv_aead_givencrypt_first)
219 goto unlock;
220
221 crypto_aead_crt(geniv)->givencrypt = seqiv_aead_givencrypt;
222 err = crypto_rng_get_bytes(crypto_default_rng, ctx->salt,
223 crypto_aead_ivsize(geniv));
224
225 unlock:
226 spin_unlock_bh(&ctx->lock);
227
228 if (err)
229 return err;
230
231 return seqiv_aead_givencrypt(req);
232 }
233
234 static int seqiv_init(struct crypto_tfm *tfm)
235 {
236 struct crypto_ablkcipher *geniv = __crypto_ablkcipher_cast(tfm);
237 struct seqiv_ctx *ctx = crypto_ablkcipher_ctx(geniv);
238
239 spin_lock_init(&ctx->lock);
240
241 tfm->crt_ablkcipher.reqsize = sizeof(struct ablkcipher_request);
242
243 return skcipher_geniv_init(tfm);
244 }
245
246 static int seqiv_aead_init(struct crypto_tfm *tfm)
247 {
248 struct crypto_aead *geniv = __crypto_aead_cast(tfm);
249 struct seqiv_ctx *ctx = crypto_aead_ctx(geniv);
250
251 spin_lock_init(&ctx->lock);
252
253 tfm->crt_aead.reqsize = sizeof(struct aead_request);
254
255 return aead_geniv_init(tfm);
256 }
257
258 static struct crypto_template seqiv_tmpl;
259
260 static struct crypto_instance *seqiv_ablkcipher_alloc(struct rtattr **tb)
261 {
262 struct crypto_instance *inst;
263
264 inst = skcipher_geniv_alloc(&seqiv_tmpl, tb, 0, 0);
265
266 if (IS_ERR(inst))
267 goto out;
268
269 inst->alg.cra_ablkcipher.givencrypt = seqiv_givencrypt_first;
270
271 inst->alg.cra_init = seqiv_init;
272 inst->alg.cra_exit = skcipher_geniv_exit;
273
274 inst->alg.cra_ctxsize += inst->alg.cra_ablkcipher.ivsize;
275
276 out:
277 return inst;
278 }
279
280 static struct crypto_instance *seqiv_aead_alloc(struct rtattr **tb)
281 {
282 struct crypto_instance *inst;
283
284 inst = aead_geniv_alloc(&seqiv_tmpl, tb, 0, 0);
285
286 if (IS_ERR(inst))
287 goto out;
288
289 inst->alg.cra_aead.givencrypt = seqiv_aead_givencrypt_first;
290
291 inst->alg.cra_init = seqiv_aead_init;
292 inst->alg.cra_exit = aead_geniv_exit;
293
294 inst->alg.cra_ctxsize = inst->alg.cra_aead.ivsize;
295
296 out:
297 return inst;
298 }
299
300 static struct crypto_instance *seqiv_alloc(struct rtattr **tb)
301 {
302 struct crypto_attr_type *algt;
303 struct crypto_instance *inst;
304 int err;
305
306 algt = crypto_get_attr_type(tb);
307 err = PTR_ERR(algt);
308 if (IS_ERR(algt))
309 return ERR_PTR(err);
310
311 err = crypto_get_default_rng();
312 if (err)
313 return ERR_PTR(err);
314
315 if ((algt->type ^ CRYPTO_ALG_TYPE_AEAD) & CRYPTO_ALG_TYPE_MASK)
316 inst = seqiv_ablkcipher_alloc(tb);
317 else
318 inst = seqiv_aead_alloc(tb);
319
320 if (IS_ERR(inst))
321 goto put_rng;
322
323 inst->alg.cra_alignmask |= __alignof__(u32) - 1;
324 inst->alg.cra_ctxsize += sizeof(struct seqiv_ctx);
325
326 out:
327 return inst;
328
329 put_rng:
330 crypto_put_default_rng();
331 goto out;
332 }
333
334 static void seqiv_free(struct crypto_instance *inst)
335 {
336 if ((inst->alg.cra_flags ^ CRYPTO_ALG_TYPE_AEAD) & CRYPTO_ALG_TYPE_MASK)
337 skcipher_geniv_free(inst);
338 else
339 aead_geniv_free(inst);
340 crypto_put_default_rng();
341 }
342
343 static struct crypto_template seqiv_tmpl = {
344 .name = "seqiv",
345 .alloc = seqiv_alloc,
346 .free = seqiv_free,
347 .module = THIS_MODULE,
348 };
349
350 static int __init seqiv_module_init(void)
351 {
352 return crypto_register_template(&seqiv_tmpl);
353 }
354
355 static void __exit seqiv_module_exit(void)
356 {
357 crypto_unregister_template(&seqiv_tmpl);
358 }
359
360 module_init(seqiv_module_init);
361 module_exit(seqiv_module_exit);
362
363 MODULE_LICENSE("GPL");
364 MODULE_DESCRIPTION("Sequence Number IV Generator");
This page took 0.042091 seconds and 5 git commands to generate.