2 #include <linux/ceph/ceph_debug.h>
5 #include <linux/scatterlist.h>
6 #include <linux/slab.h>
7 #include <crypto/aes.h>
8 #include <crypto/skcipher.h>
9 #include <linux/key-type.h>
11 #include <keys/ceph-type.h>
12 #include <keys/user-type.h>
13 #include <linux/ceph/decode.h>
16 int ceph_crypto_key_clone(struct ceph_crypto_key
*dst
,
17 const struct ceph_crypto_key
*src
)
19 memcpy(dst
, src
, sizeof(struct ceph_crypto_key
));
20 dst
->key
= kmemdup(src
->key
, src
->len
, GFP_NOFS
);
26 int ceph_crypto_key_encode(struct ceph_crypto_key
*key
, void **p
, void *end
)
28 if (*p
+ sizeof(u16
) + sizeof(key
->created
) +
29 sizeof(u16
) + key
->len
> end
)
31 ceph_encode_16(p
, key
->type
);
32 ceph_encode_copy(p
, &key
->created
, sizeof(key
->created
));
33 ceph_encode_16(p
, key
->len
);
34 ceph_encode_copy(p
, key
->key
, key
->len
);
38 int ceph_crypto_key_decode(struct ceph_crypto_key
*key
, void **p
, void *end
)
40 ceph_decode_need(p
, end
, 2*sizeof(u16
) + sizeof(key
->created
), bad
);
41 key
->type
= ceph_decode_16(p
);
42 ceph_decode_copy(p
, &key
->created
, sizeof(key
->created
));
43 key
->len
= ceph_decode_16(p
);
44 ceph_decode_need(p
, end
, key
->len
, bad
);
45 key
->key
= kmalloc(key
->len
, GFP_NOFS
);
48 ceph_decode_copy(p
, key
->key
, key
->len
);
52 dout("failed to decode crypto key\n");
56 int ceph_crypto_key_unarmor(struct ceph_crypto_key
*key
, const char *inkey
)
58 int inlen
= strlen(inkey
);
59 int blen
= inlen
* 3 / 4;
63 dout("crypto_key_unarmor %s\n", inkey
);
64 buf
= kmalloc(blen
, GFP_NOFS
);
67 blen
= ceph_unarmor(buf
, inkey
, inkey
+inlen
);
74 ret
= ceph_crypto_key_decode(key
, &p
, p
+ blen
);
78 dout("crypto_key_unarmor key %p type %d len %d\n", key
,
83 static struct crypto_skcipher
*ceph_crypto_alloc_cipher(void)
85 return crypto_alloc_skcipher("cbc(aes)", 0, CRYPTO_ALG_ASYNC
);
88 static const u8
*aes_iv
= (u8
*)CEPH_AES_IV
;
91 * Should be used for buffers allocated with ceph_kvmalloc().
92 * Currently these are encrypt out-buffer (ceph_buffer) and decrypt
93 * in-buffer (msg front).
95 * Dispose of @sgt with teardown_sgtable().
97 * @prealloc_sg is to avoid memory allocation inside sg_alloc_table()
98 * in cases where a single sg is sufficient. No attempt to reduce the
99 * number of sgs by squeezing physically contiguous pages together is
100 * made though, for simplicity.
102 static int setup_sgtable(struct sg_table
*sgt
, struct scatterlist
*prealloc_sg
,
103 const void *buf
, unsigned int buf_len
)
105 struct scatterlist
*sg
;
106 const bool is_vmalloc
= is_vmalloc_addr(buf
);
107 unsigned int off
= offset_in_page(buf
);
108 unsigned int chunk_cnt
= 1;
109 unsigned int chunk_len
= PAGE_ALIGN(off
+ buf_len
);
114 memset(sgt
, 0, sizeof(*sgt
));
119 chunk_cnt
= chunk_len
>> PAGE_SHIFT
;
120 chunk_len
= PAGE_SIZE
;
124 ret
= sg_alloc_table(sgt
, chunk_cnt
, GFP_NOFS
);
128 WARN_ON(chunk_cnt
!= 1);
129 sg_init_table(prealloc_sg
, 1);
130 sgt
->sgl
= prealloc_sg
;
131 sgt
->nents
= sgt
->orig_nents
= 1;
134 for_each_sg(sgt
->sgl
, sg
, sgt
->orig_nents
, i
) {
136 unsigned int len
= min(chunk_len
- off
, buf_len
);
139 page
= vmalloc_to_page(buf
);
141 page
= virt_to_page(buf
);
143 sg_set_page(sg
, page
, len
, off
);
149 WARN_ON(buf_len
!= 0);
154 static void teardown_sgtable(struct sg_table
*sgt
)
156 if (sgt
->orig_nents
> 1)
160 static int ceph_aes_encrypt(const void *key
, int key_len
,
161 void *dst
, size_t *dst_len
,
162 const void *src
, size_t src_len
)
164 struct scatterlist sg_in
[2], prealloc_sg
;
165 struct sg_table sg_out
;
166 struct crypto_skcipher
*tfm
= ceph_crypto_alloc_cipher();
167 SKCIPHER_REQUEST_ON_STACK(req
, tfm
);
169 int ivsize
= AES_BLOCK_SIZE
;
171 size_t zero_padding
= (0x10 - (src_len
& 0x0f));
177 memset(pad
, zero_padding
, zero_padding
);
179 *dst_len
= src_len
+ zero_padding
;
181 sg_init_table(sg_in
, 2);
182 sg_set_buf(&sg_in
[0], src
, src_len
);
183 sg_set_buf(&sg_in
[1], pad
, zero_padding
);
184 ret
= setup_sgtable(&sg_out
, &prealloc_sg
, dst
, *dst_len
);
188 crypto_skcipher_setkey((void *)tfm
, key
, key_len
);
189 memcpy(iv
, aes_iv
, ivsize
);
191 skcipher_request_set_tfm(req
, tfm
);
192 skcipher_request_set_callback(req
, 0, NULL
, NULL
);
193 skcipher_request_set_crypt(req
, sg_in
, sg_out
.sgl
,
194 src_len
+ zero_padding
, iv
);
197 print_hex_dump(KERN_ERR, "enc key: ", DUMP_PREFIX_NONE, 16, 1,
199 print_hex_dump(KERN_ERR, "enc src: ", DUMP_PREFIX_NONE, 16, 1,
201 print_hex_dump(KERN_ERR, "enc pad: ", DUMP_PREFIX_NONE, 16, 1,
202 pad, zero_padding, 1);
204 ret
= crypto_skcipher_encrypt(req
);
205 skcipher_request_zero(req
);
207 pr_err("ceph_aes_crypt failed %d\n", ret
);
211 print_hex_dump(KERN_ERR, "enc out: ", DUMP_PREFIX_NONE, 16, 1,
216 teardown_sgtable(&sg_out
);
218 crypto_free_skcipher(tfm
);
222 static int ceph_aes_encrypt2(const void *key
, int key_len
, void *dst
,
224 const void *src1
, size_t src1_len
,
225 const void *src2
, size_t src2_len
)
227 struct scatterlist sg_in
[3], prealloc_sg
;
228 struct sg_table sg_out
;
229 struct crypto_skcipher
*tfm
= ceph_crypto_alloc_cipher();
230 SKCIPHER_REQUEST_ON_STACK(req
, tfm
);
232 int ivsize
= AES_BLOCK_SIZE
;
234 size_t zero_padding
= (0x10 - ((src1_len
+ src2_len
) & 0x0f));
240 memset(pad
, zero_padding
, zero_padding
);
242 *dst_len
= src1_len
+ src2_len
+ zero_padding
;
244 sg_init_table(sg_in
, 3);
245 sg_set_buf(&sg_in
[0], src1
, src1_len
);
246 sg_set_buf(&sg_in
[1], src2
, src2_len
);
247 sg_set_buf(&sg_in
[2], pad
, zero_padding
);
248 ret
= setup_sgtable(&sg_out
, &prealloc_sg
, dst
, *dst_len
);
252 crypto_skcipher_setkey((void *)tfm
, key
, key_len
);
253 memcpy(iv
, aes_iv
, ivsize
);
255 skcipher_request_set_tfm(req
, tfm
);
256 skcipher_request_set_callback(req
, 0, NULL
, NULL
);
257 skcipher_request_set_crypt(req
, sg_in
, sg_out
.sgl
,
258 src1_len
+ src2_len
+ zero_padding
, iv
);
261 print_hex_dump(KERN_ERR, "enc key: ", DUMP_PREFIX_NONE, 16, 1,
263 print_hex_dump(KERN_ERR, "enc src1: ", DUMP_PREFIX_NONE, 16, 1,
265 print_hex_dump(KERN_ERR, "enc src2: ", DUMP_PREFIX_NONE, 16, 1,
267 print_hex_dump(KERN_ERR, "enc pad: ", DUMP_PREFIX_NONE, 16, 1,
268 pad, zero_padding, 1);
270 ret
= crypto_skcipher_encrypt(req
);
271 skcipher_request_zero(req
);
273 pr_err("ceph_aes_crypt2 failed %d\n", ret
);
277 print_hex_dump(KERN_ERR, "enc out: ", DUMP_PREFIX_NONE, 16, 1,
282 teardown_sgtable(&sg_out
);
284 crypto_free_skcipher(tfm
);
288 static int ceph_aes_decrypt(const void *key
, int key_len
,
289 void *dst
, size_t *dst_len
,
290 const void *src
, size_t src_len
)
292 struct sg_table sg_in
;
293 struct scatterlist sg_out
[2], prealloc_sg
;
294 struct crypto_skcipher
*tfm
= ceph_crypto_alloc_cipher();
295 SKCIPHER_REQUEST_ON_STACK(req
, tfm
);
297 int ivsize
= AES_BLOCK_SIZE
;
305 sg_init_table(sg_out
, 2);
306 sg_set_buf(&sg_out
[0], dst
, *dst_len
);
307 sg_set_buf(&sg_out
[1], pad
, sizeof(pad
));
308 ret
= setup_sgtable(&sg_in
, &prealloc_sg
, src
, src_len
);
312 crypto_skcipher_setkey((void *)tfm
, key
, key_len
);
313 memcpy(iv
, aes_iv
, ivsize
);
315 skcipher_request_set_tfm(req
, tfm
);
316 skcipher_request_set_callback(req
, 0, NULL
, NULL
);
317 skcipher_request_set_crypt(req
, sg_in
.sgl
, sg_out
,
321 print_hex_dump(KERN_ERR, "dec key: ", DUMP_PREFIX_NONE, 16, 1,
323 print_hex_dump(KERN_ERR, "dec in: ", DUMP_PREFIX_NONE, 16, 1,
326 ret
= crypto_skcipher_decrypt(req
);
327 skcipher_request_zero(req
);
329 pr_err("ceph_aes_decrypt failed %d\n", ret
);
333 if (src_len
<= *dst_len
)
334 last_byte
= ((char *)dst
)[src_len
- 1];
336 last_byte
= pad
[src_len
- *dst_len
- 1];
337 if (last_byte
<= 16 && src_len
>= last_byte
) {
338 *dst_len
= src_len
- last_byte
;
340 pr_err("ceph_aes_decrypt got bad padding %d on src len %d\n",
341 last_byte
, (int)src_len
);
342 return -EPERM
; /* bad padding */
345 print_hex_dump(KERN_ERR, "dec out: ", DUMP_PREFIX_NONE, 16, 1,
350 teardown_sgtable(&sg_in
);
352 crypto_free_skcipher(tfm
);
356 static int ceph_aes_decrypt2(const void *key
, int key_len
,
357 void *dst1
, size_t *dst1_len
,
358 void *dst2
, size_t *dst2_len
,
359 const void *src
, size_t src_len
)
361 struct sg_table sg_in
;
362 struct scatterlist sg_out
[3], prealloc_sg
;
363 struct crypto_skcipher
*tfm
= ceph_crypto_alloc_cipher();
364 SKCIPHER_REQUEST_ON_STACK(req
, tfm
);
366 int ivsize
= AES_BLOCK_SIZE
;
374 sg_init_table(sg_out
, 3);
375 sg_set_buf(&sg_out
[0], dst1
, *dst1_len
);
376 sg_set_buf(&sg_out
[1], dst2
, *dst2_len
);
377 sg_set_buf(&sg_out
[2], pad
, sizeof(pad
));
378 ret
= setup_sgtable(&sg_in
, &prealloc_sg
, src
, src_len
);
382 crypto_skcipher_setkey((void *)tfm
, key
, key_len
);
383 memcpy(iv
, aes_iv
, ivsize
);
385 skcipher_request_set_tfm(req
, tfm
);
386 skcipher_request_set_callback(req
, 0, NULL
, NULL
);
387 skcipher_request_set_crypt(req
, sg_in
.sgl
, sg_out
,
391 print_hex_dump(KERN_ERR, "dec key: ", DUMP_PREFIX_NONE, 16, 1,
393 print_hex_dump(KERN_ERR, "dec in: ", DUMP_PREFIX_NONE, 16, 1,
396 ret
= crypto_skcipher_decrypt(req
);
397 skcipher_request_zero(req
);
399 pr_err("ceph_aes_decrypt failed %d\n", ret
);
403 if (src_len
<= *dst1_len
)
404 last_byte
= ((char *)dst1
)[src_len
- 1];
405 else if (src_len
<= *dst1_len
+ *dst2_len
)
406 last_byte
= ((char *)dst2
)[src_len
- *dst1_len
- 1];
408 last_byte
= pad
[src_len
- *dst1_len
- *dst2_len
- 1];
409 if (last_byte
<= 16 && src_len
>= last_byte
) {
410 src_len
-= last_byte
;
412 pr_err("ceph_aes_decrypt got bad padding %d on src len %d\n",
413 last_byte
, (int)src_len
);
414 return -EPERM
; /* bad padding */
417 if (src_len
< *dst1_len
) {
421 *dst2_len
= src_len
- *dst1_len
;
424 print_hex_dump(KERN_ERR, "dec out1: ", DUMP_PREFIX_NONE, 16, 1,
426 print_hex_dump(KERN_ERR, "dec out2: ", DUMP_PREFIX_NONE, 16, 1,
431 teardown_sgtable(&sg_in
);
433 crypto_free_skcipher(tfm
);
438 int ceph_decrypt(struct ceph_crypto_key
*secret
, void *dst
, size_t *dst_len
,
439 const void *src
, size_t src_len
)
441 switch (secret
->type
) {
442 case CEPH_CRYPTO_NONE
:
443 if (*dst_len
< src_len
)
445 memcpy(dst
, src
, src_len
);
449 case CEPH_CRYPTO_AES
:
450 return ceph_aes_decrypt(secret
->key
, secret
->len
, dst
,
451 dst_len
, src
, src_len
);
458 int ceph_decrypt2(struct ceph_crypto_key
*secret
,
459 void *dst1
, size_t *dst1_len
,
460 void *dst2
, size_t *dst2_len
,
461 const void *src
, size_t src_len
)
465 switch (secret
->type
) {
466 case CEPH_CRYPTO_NONE
:
467 if (*dst1_len
+ *dst2_len
< src_len
)
469 t
= min(*dst1_len
, src_len
);
470 memcpy(dst1
, src
, t
);
475 t
= min(*dst2_len
, src_len
);
476 memcpy(dst2
, src
, t
);
481 case CEPH_CRYPTO_AES
:
482 return ceph_aes_decrypt2(secret
->key
, secret
->len
,
483 dst1
, dst1_len
, dst2
, dst2_len
,
491 int ceph_encrypt(struct ceph_crypto_key
*secret
, void *dst
, size_t *dst_len
,
492 const void *src
, size_t src_len
)
494 switch (secret
->type
) {
495 case CEPH_CRYPTO_NONE
:
496 if (*dst_len
< src_len
)
498 memcpy(dst
, src
, src_len
);
502 case CEPH_CRYPTO_AES
:
503 return ceph_aes_encrypt(secret
->key
, secret
->len
, dst
,
504 dst_len
, src
, src_len
);
511 int ceph_encrypt2(struct ceph_crypto_key
*secret
, void *dst
, size_t *dst_len
,
512 const void *src1
, size_t src1_len
,
513 const void *src2
, size_t src2_len
)
515 switch (secret
->type
) {
516 case CEPH_CRYPTO_NONE
:
517 if (*dst_len
< src1_len
+ src2_len
)
519 memcpy(dst
, src1
, src1_len
);
520 memcpy(dst
+ src1_len
, src2
, src2_len
);
521 *dst_len
= src1_len
+ src2_len
;
524 case CEPH_CRYPTO_AES
:
525 return ceph_aes_encrypt2(secret
->key
, secret
->len
, dst
, dst_len
,
526 src1
, src1_len
, src2
, src2_len
);
533 static int ceph_key_preparse(struct key_preparsed_payload
*prep
)
535 struct ceph_crypto_key
*ckey
;
536 size_t datalen
= prep
->datalen
;
541 if (datalen
<= 0 || datalen
> 32767 || !prep
->data
)
545 ckey
= kmalloc(sizeof(*ckey
), GFP_KERNEL
);
549 /* TODO ceph_crypto_key_decode should really take const input */
550 p
= (void *)prep
->data
;
551 ret
= ceph_crypto_key_decode(ckey
, &p
, (char*)prep
->data
+datalen
);
555 prep
->payload
.data
[0] = ckey
;
556 prep
->quotalen
= datalen
;
565 static void ceph_key_free_preparse(struct key_preparsed_payload
*prep
)
567 struct ceph_crypto_key
*ckey
= prep
->payload
.data
[0];
568 ceph_crypto_key_destroy(ckey
);
572 static void ceph_key_destroy(struct key
*key
)
574 struct ceph_crypto_key
*ckey
= key
->payload
.data
[0];
576 ceph_crypto_key_destroy(ckey
);
580 struct key_type key_type_ceph
= {
582 .preparse
= ceph_key_preparse
,
583 .free_preparse
= ceph_key_free_preparse
,
584 .instantiate
= generic_key_instantiate
,
585 .destroy
= ceph_key_destroy
,
588 int ceph_crypto_init(void) {
589 return register_key_type(&key_type_ceph
);
592 void ceph_crypto_shutdown(void) {
593 unregister_key_type(&key_type_ceph
);
This page took 0.049667 seconds and 6 git commands to generate.