[CRYPTO] tcrypt: Use HMAC template and hash interface
[deliverable/linux.git] / net / xfrm / xfrm_algo.c
CommitLineData
1da177e4
LT
1/*
2 * xfrm algorithm interface
3 *
4 * Copyright (c) 2002 James Morris <jmorris@intercode.com.au>
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the Free
8 * Software Foundation; either version 2 of the License, or (at your option)
9 * any later version.
10 */
11
1da177e4
LT
12#include <linux/module.h>
13#include <linux/kernel.h>
14#include <linux/pfkeyv2.h>
15#include <linux/crypto.h>
16#include <net/xfrm.h>
17#if defined(CONFIG_INET_AH) || defined(CONFIG_INET_AH_MODULE) || defined(CONFIG_INET6_AH) || defined(CONFIG_INET6_AH_MODULE)
18#include <net/ah.h>
19#endif
20#if defined(CONFIG_INET_ESP) || defined(CONFIG_INET_ESP_MODULE) || defined(CONFIG_INET6_ESP) || defined(CONFIG_INET6_ESP_MODULE)
21#include <net/esp.h>
22#endif
23#include <asm/scatterlist.h>
24
25/*
26 * Algorithms supported by IPsec. These entries contain properties which
27 * are used in key negotiation and xfrm processing, and are used to verify
28 * that instantiated crypto transforms have correct parameters for IPsec
29 * purposes.
30 */
31static struct xfrm_algo_desc aalg_list[] = {
32{
33 .name = "digest_null",
34
35 .uinfo = {
36 .auth = {
37 .icv_truncbits = 0,
38 .icv_fullbits = 0,
39 }
40 },
41
42 .desc = {
43 .sadb_alg_id = SADB_X_AALG_NULL,
44 .sadb_alg_ivlen = 0,
45 .sadb_alg_minbits = 0,
46 .sadb_alg_maxbits = 0
47 }
48},
49{
50 .name = "md5",
51
52 .uinfo = {
53 .auth = {
54 .icv_truncbits = 96,
55 .icv_fullbits = 128,
56 }
57 },
58
59 .desc = {
60 .sadb_alg_id = SADB_AALG_MD5HMAC,
61 .sadb_alg_ivlen = 0,
62 .sadb_alg_minbits = 128,
63 .sadb_alg_maxbits = 128
64 }
65},
66{
67 .name = "sha1",
68
69 .uinfo = {
70 .auth = {
71 .icv_truncbits = 96,
72 .icv_fullbits = 160,
73 }
74 },
75
76 .desc = {
77 .sadb_alg_id = SADB_AALG_SHA1HMAC,
78 .sadb_alg_ivlen = 0,
79 .sadb_alg_minbits = 160,
80 .sadb_alg_maxbits = 160
81 }
82},
83{
84 .name = "sha256",
85
86 .uinfo = {
87 .auth = {
88 .icv_truncbits = 96,
89 .icv_fullbits = 256,
90 }
91 },
92
93 .desc = {
94 .sadb_alg_id = SADB_X_AALG_SHA2_256HMAC,
95 .sadb_alg_ivlen = 0,
96 .sadb_alg_minbits = 256,
97 .sadb_alg_maxbits = 256
98 }
99},
100{
101 .name = "ripemd160",
102
103 .uinfo = {
104 .auth = {
105 .icv_truncbits = 96,
106 .icv_fullbits = 160,
107 }
108 },
109
110 .desc = {
111 .sadb_alg_id = SADB_X_AALG_RIPEMD160HMAC,
112 .sadb_alg_ivlen = 0,
113 .sadb_alg_minbits = 160,
114 .sadb_alg_maxbits = 160
115 }
116},
117};
118
119static struct xfrm_algo_desc ealg_list[] = {
120{
6b7326c8
HX
121 .name = "ecb(cipher_null)",
122 .compat = "cipher_null",
1da177e4
LT
123
124 .uinfo = {
125 .encr = {
126 .blockbits = 8,
127 .defkeybits = 0,
128 }
129 },
130
131 .desc = {
132 .sadb_alg_id = SADB_EALG_NULL,
133 .sadb_alg_ivlen = 0,
134 .sadb_alg_minbits = 0,
135 .sadb_alg_maxbits = 0
136 }
137},
138{
6b7326c8
HX
139 .name = "cbc(des)",
140 .compat = "des",
1da177e4
LT
141
142 .uinfo = {
143 .encr = {
144 .blockbits = 64,
145 .defkeybits = 64,
146 }
147 },
148
149 .desc = {
150 .sadb_alg_id = SADB_EALG_DESCBC,
151 .sadb_alg_ivlen = 8,
152 .sadb_alg_minbits = 64,
153 .sadb_alg_maxbits = 64
154 }
155},
156{
6b7326c8
HX
157 .name = "cbc(des3_ede)",
158 .compat = "des3_ede",
1da177e4
LT
159
160 .uinfo = {
161 .encr = {
162 .blockbits = 64,
163 .defkeybits = 192,
164 }
165 },
166
167 .desc = {
168 .sadb_alg_id = SADB_EALG_3DESCBC,
169 .sadb_alg_ivlen = 8,
170 .sadb_alg_minbits = 192,
171 .sadb_alg_maxbits = 192
172 }
173},
174{
6b7326c8
HX
175 .name = "cbc(cast128)",
176 .compat = "cast128",
1da177e4
LT
177
178 .uinfo = {
179 .encr = {
180 .blockbits = 64,
181 .defkeybits = 128,
182 }
183 },
184
185 .desc = {
186 .sadb_alg_id = SADB_X_EALG_CASTCBC,
187 .sadb_alg_ivlen = 8,
188 .sadb_alg_minbits = 40,
189 .sadb_alg_maxbits = 128
190 }
191},
192{
6b7326c8
HX
193 .name = "cbc(blowfish)",
194 .compat = "blowfish",
1da177e4
LT
195
196 .uinfo = {
197 .encr = {
198 .blockbits = 64,
199 .defkeybits = 128,
200 }
201 },
202
203 .desc = {
204 .sadb_alg_id = SADB_X_EALG_BLOWFISHCBC,
205 .sadb_alg_ivlen = 8,
206 .sadb_alg_minbits = 40,
207 .sadb_alg_maxbits = 448
208 }
209},
210{
6b7326c8
HX
211 .name = "cbc(aes)",
212 .compat = "aes",
1da177e4
LT
213
214 .uinfo = {
215 .encr = {
216 .blockbits = 128,
217 .defkeybits = 128,
218 }
219 },
220
221 .desc = {
222 .sadb_alg_id = SADB_X_EALG_AESCBC,
223 .sadb_alg_ivlen = 8,
224 .sadb_alg_minbits = 128,
225 .sadb_alg_maxbits = 256
226 }
227},
228{
6b7326c8
HX
229 .name = "cbc(serpent)",
230 .compat = "serpent",
1da177e4
LT
231
232 .uinfo = {
233 .encr = {
234 .blockbits = 128,
235 .defkeybits = 128,
236 }
237 },
238
239 .desc = {
240 .sadb_alg_id = SADB_X_EALG_SERPENTCBC,
241 .sadb_alg_ivlen = 8,
242 .sadb_alg_minbits = 128,
243 .sadb_alg_maxbits = 256,
244 }
245},
246{
6b7326c8
HX
247 .name = "cbc(twofish)",
248 .compat = "twofish",
1da177e4
LT
249
250 .uinfo = {
251 .encr = {
252 .blockbits = 128,
253 .defkeybits = 128,
254 }
255 },
256
257 .desc = {
258 .sadb_alg_id = SADB_X_EALG_TWOFISHCBC,
259 .sadb_alg_ivlen = 8,
260 .sadb_alg_minbits = 128,
261 .sadb_alg_maxbits = 256
262 }
263},
264};
265
266static struct xfrm_algo_desc calg_list[] = {
267{
268 .name = "deflate",
269 .uinfo = {
270 .comp = {
271 .threshold = 90,
272 }
273 },
274 .desc = { .sadb_alg_id = SADB_X_CALG_DEFLATE }
275},
276{
277 .name = "lzs",
278 .uinfo = {
279 .comp = {
280 .threshold = 90,
281 }
282 },
283 .desc = { .sadb_alg_id = SADB_X_CALG_LZS }
284},
285{
286 .name = "lzjh",
287 .uinfo = {
288 .comp = {
289 .threshold = 50,
290 }
291 },
292 .desc = { .sadb_alg_id = SADB_X_CALG_LZJH }
293},
294};
295
296static inline int aalg_entries(void)
297{
298 return ARRAY_SIZE(aalg_list);
299}
300
301static inline int ealg_entries(void)
302{
303 return ARRAY_SIZE(ealg_list);
304}
305
306static inline int calg_entries(void)
307{
308 return ARRAY_SIZE(calg_list);
309}
310
311/* Todo: generic iterators */
312struct xfrm_algo_desc *xfrm_aalg_get_byid(int alg_id)
313{
314 int i;
315
316 for (i = 0; i < aalg_entries(); i++) {
317 if (aalg_list[i].desc.sadb_alg_id == alg_id) {
318 if (aalg_list[i].available)
319 return &aalg_list[i];
320 else
321 break;
322 }
323 }
324 return NULL;
325}
326EXPORT_SYMBOL_GPL(xfrm_aalg_get_byid);
327
328struct xfrm_algo_desc *xfrm_ealg_get_byid(int alg_id)
329{
330 int i;
331
332 for (i = 0; i < ealg_entries(); i++) {
333 if (ealg_list[i].desc.sadb_alg_id == alg_id) {
334 if (ealg_list[i].available)
335 return &ealg_list[i];
336 else
337 break;
338 }
339 }
340 return NULL;
341}
342EXPORT_SYMBOL_GPL(xfrm_ealg_get_byid);
343
344struct xfrm_algo_desc *xfrm_calg_get_byid(int alg_id)
345{
346 int i;
347
348 for (i = 0; i < calg_entries(); i++) {
349 if (calg_list[i].desc.sadb_alg_id == alg_id) {
350 if (calg_list[i].available)
351 return &calg_list[i];
352 else
353 break;
354 }
355 }
356 return NULL;
357}
358EXPORT_SYMBOL_GPL(xfrm_calg_get_byid);
359
360static struct xfrm_algo_desc *xfrm_get_byname(struct xfrm_algo_desc *list,
361 int entries, char *name,
362 int probe)
363{
364 int i, status;
365
366 if (!name)
367 return NULL;
368
369 for (i = 0; i < entries; i++) {
04ff1260
HX
370 if (strcmp(name, list[i].name) &&
371 (!list[i].compat || strcmp(name, list[i].compat)))
1da177e4
LT
372 continue;
373
374 if (list[i].available)
375 return &list[i];
376
377 if (!probe)
378 break;
379
380 status = crypto_alg_available(name, 0);
381 if (!status)
382 break;
383
384 list[i].available = status;
385 return &list[i];
386 }
387 return NULL;
388}
389
390struct xfrm_algo_desc *xfrm_aalg_get_byname(char *name, int probe)
391{
392 return xfrm_get_byname(aalg_list, aalg_entries(), name, probe);
393}
394EXPORT_SYMBOL_GPL(xfrm_aalg_get_byname);
395
396struct xfrm_algo_desc *xfrm_ealg_get_byname(char *name, int probe)
397{
398 return xfrm_get_byname(ealg_list, ealg_entries(), name, probe);
399}
400EXPORT_SYMBOL_GPL(xfrm_ealg_get_byname);
401
402struct xfrm_algo_desc *xfrm_calg_get_byname(char *name, int probe)
403{
404 return xfrm_get_byname(calg_list, calg_entries(), name, probe);
405}
406EXPORT_SYMBOL_GPL(xfrm_calg_get_byname);
407
408struct xfrm_algo_desc *xfrm_aalg_get_byidx(unsigned int idx)
409{
410 if (idx >= aalg_entries())
411 return NULL;
412
413 return &aalg_list[idx];
414}
415EXPORT_SYMBOL_GPL(xfrm_aalg_get_byidx);
416
417struct xfrm_algo_desc *xfrm_ealg_get_byidx(unsigned int idx)
418{
419 if (idx >= ealg_entries())
420 return NULL;
421
422 return &ealg_list[idx];
423}
424EXPORT_SYMBOL_GPL(xfrm_ealg_get_byidx);
425
426/*
427 * Probe for the availability of crypto algorithms, and set the available
428 * flag for any algorithms found on the system. This is typically called by
429 * pfkey during userspace SA add, update or register.
430 */
431void xfrm_probe_algs(void)
432{
433#ifdef CONFIG_CRYPTO
434 int i, status;
435
436 BUG_ON(in_softirq());
437
438 for (i = 0; i < aalg_entries(); i++) {
439 status = crypto_alg_available(aalg_list[i].name, 0);
440 if (aalg_list[i].available != status)
441 aalg_list[i].available = status;
442 }
443
444 for (i = 0; i < ealg_entries(); i++) {
445 status = crypto_alg_available(ealg_list[i].name, 0);
446 if (ealg_list[i].available != status)
447 ealg_list[i].available = status;
448 }
449
450 for (i = 0; i < calg_entries(); i++) {
451 status = crypto_alg_available(calg_list[i].name, 0);
452 if (calg_list[i].available != status)
453 calg_list[i].available = status;
454 }
455#endif
456}
457EXPORT_SYMBOL_GPL(xfrm_probe_algs);
458
459int xfrm_count_auth_supported(void)
460{
461 int i, n;
462
463 for (i = 0, n = 0; i < aalg_entries(); i++)
464 if (aalg_list[i].available)
465 n++;
466 return n;
467}
468EXPORT_SYMBOL_GPL(xfrm_count_auth_supported);
469
470int xfrm_count_enc_supported(void)
471{
472 int i, n;
473
474 for (i = 0, n = 0; i < ealg_entries(); i++)
475 if (ealg_list[i].available)
476 n++;
477 return n;
478}
479EXPORT_SYMBOL_GPL(xfrm_count_enc_supported);
480
481/* Move to common area: it is shared with AH. */
482
483void skb_icv_walk(const struct sk_buff *skb, struct crypto_tfm *tfm,
484 int offset, int len, icv_update_fn_t icv_update)
485{
486 int start = skb_headlen(skb);
487 int i, copy = start - offset;
488 struct scatterlist sg;
489
490 /* Checksum header. */
491 if (copy > 0) {
492 if (copy > len)
493 copy = len;
494
495 sg.page = virt_to_page(skb->data + offset);
496 sg.offset = (unsigned long)(skb->data + offset) % PAGE_SIZE;
497 sg.length = copy;
498
499 icv_update(tfm, &sg, 1);
500
501 if ((len -= copy) == 0)
502 return;
503 offset += copy;
504 }
505
506 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
507 int end;
508
509 BUG_TRAP(start <= offset + len);
510
511 end = start + skb_shinfo(skb)->frags[i].size;
512 if ((copy = end - offset) > 0) {
513 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
514
515 if (copy > len)
516 copy = len;
517
518 sg.page = frag->page;
519 sg.offset = frag->page_offset + offset-start;
520 sg.length = copy;
521
522 icv_update(tfm, &sg, 1);
523
524 if (!(len -= copy))
525 return;
526 offset += copy;
527 }
528 start = end;
529 }
530
531 if (skb_shinfo(skb)->frag_list) {
532 struct sk_buff *list = skb_shinfo(skb)->frag_list;
533
534 for (; list; list = list->next) {
535 int end;
536
537 BUG_TRAP(start <= offset + len);
538
539 end = start + list->len;
540 if ((copy = end - offset) > 0) {
541 if (copy > len)
542 copy = len;
543 skb_icv_walk(list, tfm, offset-start, copy, icv_update);
544 if ((len -= copy) == 0)
545 return;
546 offset += copy;
547 }
548 start = end;
549 }
550 }
09a62660 551 BUG_ON(len);
1da177e4
LT
552}
553EXPORT_SYMBOL_GPL(skb_icv_walk);
554
555#if defined(CONFIG_INET_ESP) || defined(CONFIG_INET_ESP_MODULE) || defined(CONFIG_INET6_ESP) || defined(CONFIG_INET6_ESP_MODULE)
556
557/* Looking generic it is not used in another places. */
558
559int
560skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len)
561{
562 int start = skb_headlen(skb);
563 int i, copy = start - offset;
564 int elt = 0;
565
566 if (copy > 0) {
567 if (copy > len)
568 copy = len;
569 sg[elt].page = virt_to_page(skb->data + offset);
570 sg[elt].offset = (unsigned long)(skb->data + offset) % PAGE_SIZE;
571 sg[elt].length = copy;
572 elt++;
573 if ((len -= copy) == 0)
574 return elt;
575 offset += copy;
576 }
577
578 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
579 int end;
580
581 BUG_TRAP(start <= offset + len);
582
583 end = start + skb_shinfo(skb)->frags[i].size;
584 if ((copy = end - offset) > 0) {
585 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
586
587 if (copy > len)
588 copy = len;
589 sg[elt].page = frag->page;
590 sg[elt].offset = frag->page_offset+offset-start;
591 sg[elt].length = copy;
592 elt++;
593 if (!(len -= copy))
594 return elt;
595 offset += copy;
596 }
597 start = end;
598 }
599
600 if (skb_shinfo(skb)->frag_list) {
601 struct sk_buff *list = skb_shinfo(skb)->frag_list;
602
603 for (; list; list = list->next) {
604 int end;
605
606 BUG_TRAP(start <= offset + len);
607
608 end = start + list->len;
609 if ((copy = end - offset) > 0) {
610 if (copy > len)
611 copy = len;
612 elt += skb_to_sgvec(list, sg+elt, offset - start, copy);
613 if ((len -= copy) == 0)
614 return elt;
615 offset += copy;
616 }
617 start = end;
618 }
619 }
09a62660 620 BUG_ON(len);
1da177e4
LT
621 return elt;
622}
623EXPORT_SYMBOL_GPL(skb_to_sgvec);
624
625/* Check that skb data bits are writable. If they are not, copy data
626 * to newly created private area. If "tailbits" is given, make sure that
627 * tailbits bytes beyond current end of skb are writable.
628 *
629 * Returns amount of elements of scatterlist to load for subsequent
630 * transformations and pointer to writable trailer skb.
631 */
632
633int skb_cow_data(struct sk_buff *skb, int tailbits, struct sk_buff **trailer)
634{
635 int copyflag;
636 int elt;
637 struct sk_buff *skb1, **skb_p;
638
639 /* If skb is cloned or its head is paged, reallocate
640 * head pulling out all the pages (pages are considered not writable
641 * at the moment even if they are anonymous).
642 */
643 if ((skb_cloned(skb) || skb_shinfo(skb)->nr_frags) &&
644 __pskb_pull_tail(skb, skb_pagelen(skb)-skb_headlen(skb)) == NULL)
645 return -ENOMEM;
646
647 /* Easy case. Most of packets will go this way. */
648 if (!skb_shinfo(skb)->frag_list) {
649 /* A little of trouble, not enough of space for trailer.
650 * This should not happen, when stack is tuned to generate
651 * good frames. OK, on miss we reallocate and reserve even more
652 * space, 128 bytes is fair. */
653
654 if (skb_tailroom(skb) < tailbits &&
655 pskb_expand_head(skb, 0, tailbits-skb_tailroom(skb)+128, GFP_ATOMIC))
656 return -ENOMEM;
657
658 /* Voila! */
659 *trailer = skb;
660 return 1;
661 }
662
663 /* Misery. We are in troubles, going to mincer fragments... */
664
665 elt = 1;
666 skb_p = &skb_shinfo(skb)->frag_list;
667 copyflag = 0;
668
669 while ((skb1 = *skb_p) != NULL) {
670 int ntail = 0;
671
672 /* The fragment is partially pulled by someone,
673 * this can happen on input. Copy it and everything
674 * after it. */
675
676 if (skb_shared(skb1))
677 copyflag = 1;
678
679 /* If the skb is the last, worry about trailer. */
680
681 if (skb1->next == NULL && tailbits) {
682 if (skb_shinfo(skb1)->nr_frags ||
683 skb_shinfo(skb1)->frag_list ||
684 skb_tailroom(skb1) < tailbits)
685 ntail = tailbits + 128;
686 }
687
688 if (copyflag ||
689 skb_cloned(skb1) ||
690 ntail ||
691 skb_shinfo(skb1)->nr_frags ||
692 skb_shinfo(skb1)->frag_list) {
693 struct sk_buff *skb2;
694
695 /* Fuck, we are miserable poor guys... */
696 if (ntail == 0)
697 skb2 = skb_copy(skb1, GFP_ATOMIC);
698 else
699 skb2 = skb_copy_expand(skb1,
700 skb_headroom(skb1),
701 ntail,
702 GFP_ATOMIC);
703 if (unlikely(skb2 == NULL))
704 return -ENOMEM;
705
706 if (skb1->sk)
d4810200 707 skb_set_owner_w(skb2, skb1->sk);
1da177e4
LT
708
709 /* Looking around. Are we still alive?
710 * OK, link new skb, drop old one */
711
712 skb2->next = skb1->next;
713 *skb_p = skb2;
714 kfree_skb(skb1);
715 skb1 = skb2;
716 }
717 elt++;
718 *trailer = skb1;
719 skb_p = &skb1->next;
720 }
721
722 return elt;
723}
724EXPORT_SYMBOL_GPL(skb_cow_data);
725
726void *pskb_put(struct sk_buff *skb, struct sk_buff *tail, int len)
727{
728 if (tail != skb) {
729 skb->data_len += len;
730 skb->len += len;
731 }
732 return skb_put(tail, len);
733}
734EXPORT_SYMBOL_GPL(pskb_put);
735#endif
This page took 0.258463 seconds and 5 git commands to generate.