tpm/st33zp24/spi: Remove nbr_dummy_bytes variable usage
[deliverable/linux.git] / net / ceph / crypto.c
1
2 #include <linux/ceph/ceph_debug.h>
3
4 #include <linux/err.h>
5 #include <linux/scatterlist.h>
6 #include <linux/slab.h>
7 #include <crypto/aes.h>
8 #include <crypto/skcipher.h>
9 #include <linux/key-type.h>
10
11 #include <keys/ceph-type.h>
12 #include <keys/user-type.h>
13 #include <linux/ceph/decode.h>
14 #include "crypto.h"
15
16 int ceph_crypto_key_clone(struct ceph_crypto_key *dst,
17 const struct ceph_crypto_key *src)
18 {
19 memcpy(dst, src, sizeof(struct ceph_crypto_key));
20 dst->key = kmemdup(src->key, src->len, GFP_NOFS);
21 if (!dst->key)
22 return -ENOMEM;
23 return 0;
24 }
25
26 int ceph_crypto_key_encode(struct ceph_crypto_key *key, void **p, void *end)
27 {
28 if (*p + sizeof(u16) + sizeof(key->created) +
29 sizeof(u16) + key->len > end)
30 return -ERANGE;
31 ceph_encode_16(p, key->type);
32 ceph_encode_copy(p, &key->created, sizeof(key->created));
33 ceph_encode_16(p, key->len);
34 ceph_encode_copy(p, key->key, key->len);
35 return 0;
36 }
37
38 int ceph_crypto_key_decode(struct ceph_crypto_key *key, void **p, void *end)
39 {
40 ceph_decode_need(p, end, 2*sizeof(u16) + sizeof(key->created), bad);
41 key->type = ceph_decode_16(p);
42 ceph_decode_copy(p, &key->created, sizeof(key->created));
43 key->len = ceph_decode_16(p);
44 ceph_decode_need(p, end, key->len, bad);
45 key->key = kmalloc(key->len, GFP_NOFS);
46 if (!key->key)
47 return -ENOMEM;
48 ceph_decode_copy(p, key->key, key->len);
49 return 0;
50
51 bad:
52 dout("failed to decode crypto key\n");
53 return -EINVAL;
54 }
55
56 int ceph_crypto_key_unarmor(struct ceph_crypto_key *key, const char *inkey)
57 {
58 int inlen = strlen(inkey);
59 int blen = inlen * 3 / 4;
60 void *buf, *p;
61 int ret;
62
63 dout("crypto_key_unarmor %s\n", inkey);
64 buf = kmalloc(blen, GFP_NOFS);
65 if (!buf)
66 return -ENOMEM;
67 blen = ceph_unarmor(buf, inkey, inkey+inlen);
68 if (blen < 0) {
69 kfree(buf);
70 return blen;
71 }
72
73 p = buf;
74 ret = ceph_crypto_key_decode(key, &p, p + blen);
75 kfree(buf);
76 if (ret)
77 return ret;
78 dout("crypto_key_unarmor key %p type %d len %d\n", key,
79 key->type, key->len);
80 return 0;
81 }
82
83 static struct crypto_skcipher *ceph_crypto_alloc_cipher(void)
84 {
85 return crypto_alloc_skcipher("cbc(aes)", 0, CRYPTO_ALG_ASYNC);
86 }
87
88 static const u8 *aes_iv = (u8 *)CEPH_AES_IV;
89
90 /*
91 * Should be used for buffers allocated with ceph_kvmalloc().
92 * Currently these are encrypt out-buffer (ceph_buffer) and decrypt
93 * in-buffer (msg front).
94 *
95 * Dispose of @sgt with teardown_sgtable().
96 *
97 * @prealloc_sg is to avoid memory allocation inside sg_alloc_table()
98 * in cases where a single sg is sufficient. No attempt to reduce the
99 * number of sgs by squeezing physically contiguous pages together is
100 * made though, for simplicity.
101 */
102 static int setup_sgtable(struct sg_table *sgt, struct scatterlist *prealloc_sg,
103 const void *buf, unsigned int buf_len)
104 {
105 struct scatterlist *sg;
106 const bool is_vmalloc = is_vmalloc_addr(buf);
107 unsigned int off = offset_in_page(buf);
108 unsigned int chunk_cnt = 1;
109 unsigned int chunk_len = PAGE_ALIGN(off + buf_len);
110 int i;
111 int ret;
112
113 if (buf_len == 0) {
114 memset(sgt, 0, sizeof(*sgt));
115 return -EINVAL;
116 }
117
118 if (is_vmalloc) {
119 chunk_cnt = chunk_len >> PAGE_SHIFT;
120 chunk_len = PAGE_SIZE;
121 }
122
123 if (chunk_cnt > 1) {
124 ret = sg_alloc_table(sgt, chunk_cnt, GFP_NOFS);
125 if (ret)
126 return ret;
127 } else {
128 WARN_ON(chunk_cnt != 1);
129 sg_init_table(prealloc_sg, 1);
130 sgt->sgl = prealloc_sg;
131 sgt->nents = sgt->orig_nents = 1;
132 }
133
134 for_each_sg(sgt->sgl, sg, sgt->orig_nents, i) {
135 struct page *page;
136 unsigned int len = min(chunk_len - off, buf_len);
137
138 if (is_vmalloc)
139 page = vmalloc_to_page(buf);
140 else
141 page = virt_to_page(buf);
142
143 sg_set_page(sg, page, len, off);
144
145 off = 0;
146 buf += len;
147 buf_len -= len;
148 }
149 WARN_ON(buf_len != 0);
150
151 return 0;
152 }
153
154 static void teardown_sgtable(struct sg_table *sgt)
155 {
156 if (sgt->orig_nents > 1)
157 sg_free_table(sgt);
158 }
159
160 static int ceph_aes_encrypt(const void *key, int key_len,
161 void *dst, size_t *dst_len,
162 const void *src, size_t src_len)
163 {
164 struct scatterlist sg_in[2], prealloc_sg;
165 struct sg_table sg_out;
166 struct crypto_skcipher *tfm = ceph_crypto_alloc_cipher();
167 SKCIPHER_REQUEST_ON_STACK(req, tfm);
168 int ret;
169 char iv[AES_BLOCK_SIZE];
170 size_t zero_padding = (0x10 - (src_len & 0x0f));
171 char pad[16];
172
173 if (IS_ERR(tfm))
174 return PTR_ERR(tfm);
175
176 memset(pad, zero_padding, zero_padding);
177
178 *dst_len = src_len + zero_padding;
179
180 sg_init_table(sg_in, 2);
181 sg_set_buf(&sg_in[0], src, src_len);
182 sg_set_buf(&sg_in[1], pad, zero_padding);
183 ret = setup_sgtable(&sg_out, &prealloc_sg, dst, *dst_len);
184 if (ret)
185 goto out_tfm;
186
187 crypto_skcipher_setkey((void *)tfm, key, key_len);
188 memcpy(iv, aes_iv, AES_BLOCK_SIZE);
189
190 skcipher_request_set_tfm(req, tfm);
191 skcipher_request_set_callback(req, 0, NULL, NULL);
192 skcipher_request_set_crypt(req, sg_in, sg_out.sgl,
193 src_len + zero_padding, iv);
194
195 /*
196 print_hex_dump(KERN_ERR, "enc key: ", DUMP_PREFIX_NONE, 16, 1,
197 key, key_len, 1);
198 print_hex_dump(KERN_ERR, "enc src: ", DUMP_PREFIX_NONE, 16, 1,
199 src, src_len, 1);
200 print_hex_dump(KERN_ERR, "enc pad: ", DUMP_PREFIX_NONE, 16, 1,
201 pad, zero_padding, 1);
202 */
203 ret = crypto_skcipher_encrypt(req);
204 skcipher_request_zero(req);
205 if (ret < 0) {
206 pr_err("ceph_aes_crypt failed %d\n", ret);
207 goto out_sg;
208 }
209 /*
210 print_hex_dump(KERN_ERR, "enc out: ", DUMP_PREFIX_NONE, 16, 1,
211 dst, *dst_len, 1);
212 */
213
214 out_sg:
215 teardown_sgtable(&sg_out);
216 out_tfm:
217 crypto_free_skcipher(tfm);
218 return ret;
219 }
220
221 static int ceph_aes_encrypt2(const void *key, int key_len, void *dst,
222 size_t *dst_len,
223 const void *src1, size_t src1_len,
224 const void *src2, size_t src2_len)
225 {
226 struct scatterlist sg_in[3], prealloc_sg;
227 struct sg_table sg_out;
228 struct crypto_skcipher *tfm = ceph_crypto_alloc_cipher();
229 SKCIPHER_REQUEST_ON_STACK(req, tfm);
230 int ret;
231 char iv[AES_BLOCK_SIZE];
232 size_t zero_padding = (0x10 - ((src1_len + src2_len) & 0x0f));
233 char pad[16];
234
235 if (IS_ERR(tfm))
236 return PTR_ERR(tfm);
237
238 memset(pad, zero_padding, zero_padding);
239
240 *dst_len = src1_len + src2_len + zero_padding;
241
242 sg_init_table(sg_in, 3);
243 sg_set_buf(&sg_in[0], src1, src1_len);
244 sg_set_buf(&sg_in[1], src2, src2_len);
245 sg_set_buf(&sg_in[2], pad, zero_padding);
246 ret = setup_sgtable(&sg_out, &prealloc_sg, dst, *dst_len);
247 if (ret)
248 goto out_tfm;
249
250 crypto_skcipher_setkey((void *)tfm, key, key_len);
251 memcpy(iv, aes_iv, AES_BLOCK_SIZE);
252
253 skcipher_request_set_tfm(req, tfm);
254 skcipher_request_set_callback(req, 0, NULL, NULL);
255 skcipher_request_set_crypt(req, sg_in, sg_out.sgl,
256 src1_len + src2_len + zero_padding, iv);
257
258 /*
259 print_hex_dump(KERN_ERR, "enc key: ", DUMP_PREFIX_NONE, 16, 1,
260 key, key_len, 1);
261 print_hex_dump(KERN_ERR, "enc src1: ", DUMP_PREFIX_NONE, 16, 1,
262 src1, src1_len, 1);
263 print_hex_dump(KERN_ERR, "enc src2: ", DUMP_PREFIX_NONE, 16, 1,
264 src2, src2_len, 1);
265 print_hex_dump(KERN_ERR, "enc pad: ", DUMP_PREFIX_NONE, 16, 1,
266 pad, zero_padding, 1);
267 */
268 ret = crypto_skcipher_encrypt(req);
269 skcipher_request_zero(req);
270 if (ret < 0) {
271 pr_err("ceph_aes_crypt2 failed %d\n", ret);
272 goto out_sg;
273 }
274 /*
275 print_hex_dump(KERN_ERR, "enc out: ", DUMP_PREFIX_NONE, 16, 1,
276 dst, *dst_len, 1);
277 */
278
279 out_sg:
280 teardown_sgtable(&sg_out);
281 out_tfm:
282 crypto_free_skcipher(tfm);
283 return ret;
284 }
285
286 static int ceph_aes_decrypt(const void *key, int key_len,
287 void *dst, size_t *dst_len,
288 const void *src, size_t src_len)
289 {
290 struct sg_table sg_in;
291 struct scatterlist sg_out[2], prealloc_sg;
292 struct crypto_skcipher *tfm = ceph_crypto_alloc_cipher();
293 SKCIPHER_REQUEST_ON_STACK(req, tfm);
294 char pad[16];
295 char iv[AES_BLOCK_SIZE];
296 int ret;
297 int last_byte;
298
299 if (IS_ERR(tfm))
300 return PTR_ERR(tfm);
301
302 sg_init_table(sg_out, 2);
303 sg_set_buf(&sg_out[0], dst, *dst_len);
304 sg_set_buf(&sg_out[1], pad, sizeof(pad));
305 ret = setup_sgtable(&sg_in, &prealloc_sg, src, src_len);
306 if (ret)
307 goto out_tfm;
308
309 crypto_skcipher_setkey((void *)tfm, key, key_len);
310 memcpy(iv, aes_iv, AES_BLOCK_SIZE);
311
312 skcipher_request_set_tfm(req, tfm);
313 skcipher_request_set_callback(req, 0, NULL, NULL);
314 skcipher_request_set_crypt(req, sg_in.sgl, sg_out,
315 src_len, iv);
316
317 /*
318 print_hex_dump(KERN_ERR, "dec key: ", DUMP_PREFIX_NONE, 16, 1,
319 key, key_len, 1);
320 print_hex_dump(KERN_ERR, "dec in: ", DUMP_PREFIX_NONE, 16, 1,
321 src, src_len, 1);
322 */
323 ret = crypto_skcipher_decrypt(req);
324 skcipher_request_zero(req);
325 if (ret < 0) {
326 pr_err("ceph_aes_decrypt failed %d\n", ret);
327 goto out_sg;
328 }
329
330 if (src_len <= *dst_len)
331 last_byte = ((char *)dst)[src_len - 1];
332 else
333 last_byte = pad[src_len - *dst_len - 1];
334 if (last_byte <= 16 && src_len >= last_byte) {
335 *dst_len = src_len - last_byte;
336 } else {
337 pr_err("ceph_aes_decrypt got bad padding %d on src len %d\n",
338 last_byte, (int)src_len);
339 return -EPERM; /* bad padding */
340 }
341 /*
342 print_hex_dump(KERN_ERR, "dec out: ", DUMP_PREFIX_NONE, 16, 1,
343 dst, *dst_len, 1);
344 */
345
346 out_sg:
347 teardown_sgtable(&sg_in);
348 out_tfm:
349 crypto_free_skcipher(tfm);
350 return ret;
351 }
352
353 static int ceph_aes_decrypt2(const void *key, int key_len,
354 void *dst1, size_t *dst1_len,
355 void *dst2, size_t *dst2_len,
356 const void *src, size_t src_len)
357 {
358 struct sg_table sg_in;
359 struct scatterlist sg_out[3], prealloc_sg;
360 struct crypto_skcipher *tfm = ceph_crypto_alloc_cipher();
361 SKCIPHER_REQUEST_ON_STACK(req, tfm);
362 char pad[16];
363 char iv[AES_BLOCK_SIZE];
364 int ret;
365 int last_byte;
366
367 if (IS_ERR(tfm))
368 return PTR_ERR(tfm);
369
370 sg_init_table(sg_out, 3);
371 sg_set_buf(&sg_out[0], dst1, *dst1_len);
372 sg_set_buf(&sg_out[1], dst2, *dst2_len);
373 sg_set_buf(&sg_out[2], pad, sizeof(pad));
374 ret = setup_sgtable(&sg_in, &prealloc_sg, src, src_len);
375 if (ret)
376 goto out_tfm;
377
378 crypto_skcipher_setkey((void *)tfm, key, key_len);
379 memcpy(iv, aes_iv, AES_BLOCK_SIZE);
380
381 skcipher_request_set_tfm(req, tfm);
382 skcipher_request_set_callback(req, 0, NULL, NULL);
383 skcipher_request_set_crypt(req, sg_in.sgl, sg_out,
384 src_len, iv);
385
386 /*
387 print_hex_dump(KERN_ERR, "dec key: ", DUMP_PREFIX_NONE, 16, 1,
388 key, key_len, 1);
389 print_hex_dump(KERN_ERR, "dec in: ", DUMP_PREFIX_NONE, 16, 1,
390 src, src_len, 1);
391 */
392 ret = crypto_skcipher_decrypt(req);
393 skcipher_request_zero(req);
394 if (ret < 0) {
395 pr_err("ceph_aes_decrypt failed %d\n", ret);
396 goto out_sg;
397 }
398
399 if (src_len <= *dst1_len)
400 last_byte = ((char *)dst1)[src_len - 1];
401 else if (src_len <= *dst1_len + *dst2_len)
402 last_byte = ((char *)dst2)[src_len - *dst1_len - 1];
403 else
404 last_byte = pad[src_len - *dst1_len - *dst2_len - 1];
405 if (last_byte <= 16 && src_len >= last_byte) {
406 src_len -= last_byte;
407 } else {
408 pr_err("ceph_aes_decrypt got bad padding %d on src len %d\n",
409 last_byte, (int)src_len);
410 return -EPERM; /* bad padding */
411 }
412
413 if (src_len < *dst1_len) {
414 *dst1_len = src_len;
415 *dst2_len = 0;
416 } else {
417 *dst2_len = src_len - *dst1_len;
418 }
419 /*
420 print_hex_dump(KERN_ERR, "dec out1: ", DUMP_PREFIX_NONE, 16, 1,
421 dst1, *dst1_len, 1);
422 print_hex_dump(KERN_ERR, "dec out2: ", DUMP_PREFIX_NONE, 16, 1,
423 dst2, *dst2_len, 1);
424 */
425
426 out_sg:
427 teardown_sgtable(&sg_in);
428 out_tfm:
429 crypto_free_skcipher(tfm);
430 return ret;
431 }
432
433
434 int ceph_decrypt(struct ceph_crypto_key *secret, void *dst, size_t *dst_len,
435 const void *src, size_t src_len)
436 {
437 switch (secret->type) {
438 case CEPH_CRYPTO_NONE:
439 if (*dst_len < src_len)
440 return -ERANGE;
441 memcpy(dst, src, src_len);
442 *dst_len = src_len;
443 return 0;
444
445 case CEPH_CRYPTO_AES:
446 return ceph_aes_decrypt(secret->key, secret->len, dst,
447 dst_len, src, src_len);
448
449 default:
450 return -EINVAL;
451 }
452 }
453
454 int ceph_decrypt2(struct ceph_crypto_key *secret,
455 void *dst1, size_t *dst1_len,
456 void *dst2, size_t *dst2_len,
457 const void *src, size_t src_len)
458 {
459 size_t t;
460
461 switch (secret->type) {
462 case CEPH_CRYPTO_NONE:
463 if (*dst1_len + *dst2_len < src_len)
464 return -ERANGE;
465 t = min(*dst1_len, src_len);
466 memcpy(dst1, src, t);
467 *dst1_len = t;
468 src += t;
469 src_len -= t;
470 if (src_len) {
471 t = min(*dst2_len, src_len);
472 memcpy(dst2, src, t);
473 *dst2_len = t;
474 }
475 return 0;
476
477 case CEPH_CRYPTO_AES:
478 return ceph_aes_decrypt2(secret->key, secret->len,
479 dst1, dst1_len, dst2, dst2_len,
480 src, src_len);
481
482 default:
483 return -EINVAL;
484 }
485 }
486
487 int ceph_encrypt(struct ceph_crypto_key *secret, void *dst, size_t *dst_len,
488 const void *src, size_t src_len)
489 {
490 switch (secret->type) {
491 case CEPH_CRYPTO_NONE:
492 if (*dst_len < src_len)
493 return -ERANGE;
494 memcpy(dst, src, src_len);
495 *dst_len = src_len;
496 return 0;
497
498 case CEPH_CRYPTO_AES:
499 return ceph_aes_encrypt(secret->key, secret->len, dst,
500 dst_len, src, src_len);
501
502 default:
503 return -EINVAL;
504 }
505 }
506
507 int ceph_encrypt2(struct ceph_crypto_key *secret, void *dst, size_t *dst_len,
508 const void *src1, size_t src1_len,
509 const void *src2, size_t src2_len)
510 {
511 switch (secret->type) {
512 case CEPH_CRYPTO_NONE:
513 if (*dst_len < src1_len + src2_len)
514 return -ERANGE;
515 memcpy(dst, src1, src1_len);
516 memcpy(dst + src1_len, src2, src2_len);
517 *dst_len = src1_len + src2_len;
518 return 0;
519
520 case CEPH_CRYPTO_AES:
521 return ceph_aes_encrypt2(secret->key, secret->len, dst, dst_len,
522 src1, src1_len, src2, src2_len);
523
524 default:
525 return -EINVAL;
526 }
527 }
528
529 static int ceph_key_preparse(struct key_preparsed_payload *prep)
530 {
531 struct ceph_crypto_key *ckey;
532 size_t datalen = prep->datalen;
533 int ret;
534 void *p;
535
536 ret = -EINVAL;
537 if (datalen <= 0 || datalen > 32767 || !prep->data)
538 goto err;
539
540 ret = -ENOMEM;
541 ckey = kmalloc(sizeof(*ckey), GFP_KERNEL);
542 if (!ckey)
543 goto err;
544
545 /* TODO ceph_crypto_key_decode should really take const input */
546 p = (void *)prep->data;
547 ret = ceph_crypto_key_decode(ckey, &p, (char*)prep->data+datalen);
548 if (ret < 0)
549 goto err_ckey;
550
551 prep->payload.data[0] = ckey;
552 prep->quotalen = datalen;
553 return 0;
554
555 err_ckey:
556 kfree(ckey);
557 err:
558 return ret;
559 }
560
561 static void ceph_key_free_preparse(struct key_preparsed_payload *prep)
562 {
563 struct ceph_crypto_key *ckey = prep->payload.data[0];
564 ceph_crypto_key_destroy(ckey);
565 kfree(ckey);
566 }
567
568 static void ceph_key_destroy(struct key *key)
569 {
570 struct ceph_crypto_key *ckey = key->payload.data[0];
571
572 ceph_crypto_key_destroy(ckey);
573 kfree(ckey);
574 }
575
576 struct key_type key_type_ceph = {
577 .name = "ceph",
578 .preparse = ceph_key_preparse,
579 .free_preparse = ceph_key_free_preparse,
580 .instantiate = generic_key_instantiate,
581 .destroy = ceph_key_destroy,
582 };
583
584 int ceph_crypto_init(void) {
585 return register_key_type(&key_type_ceph);
586 }
587
588 void ceph_crypto_shutdown(void) {
589 unregister_key_type(&key_type_ceph);
590 }
This page took 0.044182 seconds and 5 git commands to generate.