[XFRM]: Add sorting interface for state and template.
[deliverable/linux.git] / include / net / xfrm.h
1 #ifndef _NET_XFRM_H
2 #define _NET_XFRM_H
3
4 #include <linux/compiler.h>
5 #include <linux/in.h>
6 #include <linux/xfrm.h>
7 #include <linux/spinlock.h>
8 #include <linux/list.h>
9 #include <linux/skbuff.h>
10 #include <linux/socket.h>
11 #include <linux/pfkeyv2.h>
12 #include <linux/ipsec.h>
13 #include <linux/in6.h>
14 #include <linux/mutex.h>
15
16 #include <net/sock.h>
17 #include <net/dst.h>
18 #include <net/route.h>
19 #include <net/ipv6.h>
20 #include <net/ip6_fib.h>
21
22 #define XFRM_ALIGN8(len) (((len) + 7) & ~7)
23 #define MODULE_ALIAS_XFRM_MODE(family, encap) \
24 MODULE_ALIAS("xfrm-mode-" __stringify(family) "-" __stringify(encap))
25
26 extern struct sock *xfrm_nl;
27 extern u32 sysctl_xfrm_aevent_etime;
28 extern u32 sysctl_xfrm_aevent_rseqth;
29
30 extern struct mutex xfrm_cfg_mutex;
31
32 /* Organization of SPD aka "XFRM rules"
33 ------------------------------------
34
35 Basic objects:
36 - policy rule, struct xfrm_policy (=SPD entry)
37 - bundle of transformations, struct dst_entry == struct xfrm_dst (=SA bundle)
38 - instance of a transformer, struct xfrm_state (=SA)
39 - template to clone xfrm_state, struct xfrm_tmpl
40
41 SPD is plain linear list of xfrm_policy rules, ordered by priority.
42 (To be compatible with existing pfkeyv2 implementations,
43 many rules with priority of 0x7fffffff are allowed to exist and
44 such rules are ordered in an unpredictable way, thanks to bsd folks.)
45
46 Lookup is plain linear search until the first match with selector.
47
48 If "action" is "block", then we prohibit the flow, otherwise:
49 if "xfrms_nr" is zero, the flow passes untransformed. Otherwise,
50 policy entry has list of up to XFRM_MAX_DEPTH transformations,
51 described by templates xfrm_tmpl. Each template is resolved
52 to a complete xfrm_state (see below) and we pack bundle of transformations
53 to a dst_entry returned to requestor.
54
55 dst -. xfrm .-> xfrm_state #1
56 |---. child .-> dst -. xfrm .-> xfrm_state #2
57 |---. child .-> dst -. xfrm .-> xfrm_state #3
58 |---. child .-> NULL
59
60 Bundles are cached at xrfm_policy struct (field ->bundles).
61
62
63 Resolution of xrfm_tmpl
64 -----------------------
65 Template contains:
66 1. ->mode Mode: transport or tunnel
67 2. ->id.proto Protocol: AH/ESP/IPCOMP
68 3. ->id.daddr Remote tunnel endpoint, ignored for transport mode.
69 Q: allow to resolve security gateway?
70 4. ->id.spi If not zero, static SPI.
71 5. ->saddr Local tunnel endpoint, ignored for transport mode.
72 6. ->algos List of allowed algos. Plain bitmask now.
73 Q: ealgos, aalgos, calgos. What a mess...
74 7. ->share Sharing mode.
75 Q: how to implement private sharing mode? To add struct sock* to
76 flow id?
77
78 Having this template we search through SAD searching for entries
79 with appropriate mode/proto/algo, permitted by selector.
80 If no appropriate entry found, it is requested from key manager.
81
82 PROBLEMS:
83 Q: How to find all the bundles referring to a physical path for
84 PMTU discovery? Seems, dst should contain list of all parents...
85 and enter to infinite locking hierarchy disaster.
86 No! It is easier, we will not search for them, let them find us.
87 We add genid to each dst plus pointer to genid of raw IP route,
88 pmtu disc will update pmtu on raw IP route and increase its genid.
89 dst_check() will see this for top level and trigger resyncing
90 metrics. Plus, it will be made via sk->sk_dst_cache. Solved.
91 */
92
93 /* Full description of state of transformer. */
94 struct xfrm_state
95 {
96 /* Note: bydst is re-used during gc */
97 struct list_head bydst;
98 struct list_head bysrc;
99 struct list_head byspi;
100
101 atomic_t refcnt;
102 spinlock_t lock;
103
104 struct xfrm_id id;
105 struct xfrm_selector sel;
106
107 /* Key manger bits */
108 struct {
109 u8 state;
110 u8 dying;
111 u32 seq;
112 } km;
113
114 /* Parameters of this state. */
115 struct {
116 u32 reqid;
117 u8 mode;
118 u8 replay_window;
119 u8 aalgo, ealgo, calgo;
120 u8 flags;
121 u16 family;
122 xfrm_address_t saddr;
123 int header_len;
124 int trailer_len;
125 } props;
126
127 struct xfrm_lifetime_cfg lft;
128
129 /* Data for transformer */
130 struct xfrm_algo *aalg;
131 struct xfrm_algo *ealg;
132 struct xfrm_algo *calg;
133
134 /* Data for encapsulator */
135 struct xfrm_encap_tmpl *encap;
136
137 /* Data for care-of address */
138 xfrm_address_t *coaddr;
139
140 /* IPComp needs an IPIP tunnel for handling uncompressed packets */
141 struct xfrm_state *tunnel;
142
143 /* If a tunnel, number of users + 1 */
144 atomic_t tunnel_users;
145
146 /* State for replay detection */
147 struct xfrm_replay_state replay;
148
149 /* Replay detection state at the time we sent the last notification */
150 struct xfrm_replay_state preplay;
151
152 /* internal flag that only holds state for delayed aevent at the
153 * moment
154 */
155 u32 xflags;
156
157 /* Replay detection notification settings */
158 u32 replay_maxage;
159 u32 replay_maxdiff;
160
161 /* Replay detection notification timer */
162 struct timer_list rtimer;
163
164 /* Statistics */
165 struct xfrm_stats stats;
166
167 struct xfrm_lifetime_cur curlft;
168 struct timer_list timer;
169
170 /* Last used time */
171 u64 lastused;
172
173 /* Reference to data common to all the instances of this
174 * transformer. */
175 struct xfrm_type *type;
176 struct xfrm_mode *mode;
177
178 /* Security context */
179 struct xfrm_sec_ctx *security;
180
181 /* Private data of this transformer, format is opaque,
182 * interpreted by xfrm_type methods. */
183 void *data;
184 };
185
186 /* xflags - make enum if more show up */
187 #define XFRM_TIME_DEFER 1
188
189 enum {
190 XFRM_STATE_VOID,
191 XFRM_STATE_ACQ,
192 XFRM_STATE_VALID,
193 XFRM_STATE_ERROR,
194 XFRM_STATE_EXPIRED,
195 XFRM_STATE_DEAD
196 };
197
198 /* callback structure passed from either netlink or pfkey */
199 struct km_event
200 {
201 union {
202 u32 hard;
203 u32 proto;
204 u32 byid;
205 u32 aevent;
206 } data;
207
208 u32 seq;
209 u32 pid;
210 u32 event;
211 };
212
213 struct xfrm_type;
214 struct xfrm_dst;
215 struct xfrm_policy_afinfo {
216 unsigned short family;
217 struct xfrm_type *type_map[IPPROTO_MAX];
218 struct xfrm_mode *mode_map[XFRM_MODE_MAX];
219 struct dst_ops *dst_ops;
220 void (*garbage_collect)(void);
221 int (*dst_lookup)(struct xfrm_dst **dst, struct flowi *fl);
222 struct dst_entry *(*find_bundle)(struct flowi *fl, struct xfrm_policy *policy);
223 int (*bundle_create)(struct xfrm_policy *policy,
224 struct xfrm_state **xfrm,
225 int nx,
226 struct flowi *fl,
227 struct dst_entry **dst_p);
228 void (*decode_session)(struct sk_buff *skb,
229 struct flowi *fl);
230 };
231
232 extern int xfrm_policy_register_afinfo(struct xfrm_policy_afinfo *afinfo);
233 extern int xfrm_policy_unregister_afinfo(struct xfrm_policy_afinfo *afinfo);
234 extern void km_policy_notify(struct xfrm_policy *xp, int dir, struct km_event *c);
235 extern void km_state_notify(struct xfrm_state *x, struct km_event *c);
236 #define XFRM_ACQ_EXPIRES 30
237
238 struct xfrm_tmpl;
239 extern int km_query(struct xfrm_state *x, struct xfrm_tmpl *t, struct xfrm_policy *pol);
240 extern void km_state_expired(struct xfrm_state *x, int hard, u32 pid);
241 extern int __xfrm_state_delete(struct xfrm_state *x);
242
243 struct xfrm_state_afinfo {
244 unsigned short family;
245 struct list_head *state_bydst;
246 struct list_head *state_bysrc;
247 struct list_head *state_byspi;
248 int (*init_flags)(struct xfrm_state *x);
249 void (*init_tempsel)(struct xfrm_state *x, struct flowi *fl,
250 struct xfrm_tmpl *tmpl,
251 xfrm_address_t *daddr, xfrm_address_t *saddr);
252 struct xfrm_state *(*state_lookup)(xfrm_address_t *daddr, u32 spi, u8 proto);
253 struct xfrm_state *(*state_lookup_byaddr)(xfrm_address_t *daddr, xfrm_address_t *saddr, u8 proto);
254 struct xfrm_state *(*find_acq)(u8 mode, u32 reqid, u8 proto,
255 xfrm_address_t *daddr, xfrm_address_t *saddr,
256 int create);
257 int (*tmpl_sort)(struct xfrm_tmpl **dst, struct xfrm_tmpl **src, int n);
258 int (*state_sort)(struct xfrm_state **dst, struct xfrm_state **src, int n);
259 };
260
261 extern int xfrm_state_register_afinfo(struct xfrm_state_afinfo *afinfo);
262 extern int xfrm_state_unregister_afinfo(struct xfrm_state_afinfo *afinfo);
263
264 extern void xfrm_state_delete_tunnel(struct xfrm_state *x);
265
266 struct xfrm_type
267 {
268 char *description;
269 struct module *owner;
270 __u8 proto;
271 __u8 flags;
272 #define XFRM_TYPE_NON_FRAGMENT 1
273
274 int (*init_state)(struct xfrm_state *x);
275 void (*destructor)(struct xfrm_state *);
276 int (*input)(struct xfrm_state *, struct sk_buff *skb);
277 int (*output)(struct xfrm_state *, struct sk_buff *pskb);
278 int (*reject)(struct xfrm_state *, struct sk_buff *, struct flowi *);
279 int (*hdr_offset)(struct xfrm_state *, struct sk_buff *, u8 **);
280 xfrm_address_t *(*local_addr)(struct xfrm_state *, xfrm_address_t *);
281 xfrm_address_t *(*remote_addr)(struct xfrm_state *, xfrm_address_t *);
282 /* Estimate maximal size of result of transformation of a dgram */
283 u32 (*get_max_size)(struct xfrm_state *, int size);
284 };
285
286 extern int xfrm_register_type(struct xfrm_type *type, unsigned short family);
287 extern int xfrm_unregister_type(struct xfrm_type *type, unsigned short family);
288 extern struct xfrm_type *xfrm_get_type(u8 proto, unsigned short family);
289 extern void xfrm_put_type(struct xfrm_type *type);
290
291 struct xfrm_mode {
292 int (*input)(struct xfrm_state *x, struct sk_buff *skb);
293 int (*output)(struct sk_buff *skb);
294
295 struct module *owner;
296 unsigned int encap;
297 };
298
299 extern int xfrm_register_mode(struct xfrm_mode *mode, int family);
300 extern int xfrm_unregister_mode(struct xfrm_mode *mode, int family);
301 extern struct xfrm_mode *xfrm_get_mode(unsigned int encap, int family);
302 extern void xfrm_put_mode(struct xfrm_mode *mode);
303
304 struct xfrm_tmpl
305 {
306 /* id in template is interpreted as:
307 * daddr - destination of tunnel, may be zero for transport mode.
308 * spi - zero to acquire spi. Not zero if spi is static, then
309 * daddr must be fixed too.
310 * proto - AH/ESP/IPCOMP
311 */
312 struct xfrm_id id;
313
314 /* Source address of tunnel. Ignored, if it is not a tunnel. */
315 xfrm_address_t saddr;
316
317 __u32 reqid;
318
319 /* Mode: transport, tunnel etc. */
320 __u8 mode;
321
322 /* Sharing mode: unique, this session only, this user only etc. */
323 __u8 share;
324
325 /* May skip this transfomration if no SA is found */
326 __u8 optional;
327
328 /* Bit mask of algos allowed for acquisition */
329 __u32 aalgos;
330 __u32 ealgos;
331 __u32 calgos;
332 };
333
334 #define XFRM_MAX_DEPTH 6
335
336 struct xfrm_policy
337 {
338 struct xfrm_policy *next;
339 struct list_head list;
340
341 /* This lock only affects elements except for entry. */
342 rwlock_t lock;
343 atomic_t refcnt;
344 struct timer_list timer;
345
346 u8 type;
347 u32 priority;
348 u32 index;
349 struct xfrm_selector selector;
350 struct xfrm_lifetime_cfg lft;
351 struct xfrm_lifetime_cur curlft;
352 struct dst_entry *bundles;
353 __u16 family;
354 __u8 action;
355 __u8 flags;
356 __u8 dead;
357 __u8 xfrm_nr;
358 struct xfrm_sec_ctx *security;
359 struct xfrm_tmpl xfrm_vec[XFRM_MAX_DEPTH];
360 };
361
362 #define XFRM_KM_TIMEOUT 30
363 /* which seqno */
364 #define XFRM_REPLAY_SEQ 1
365 #define XFRM_REPLAY_OSEQ 2
366 #define XFRM_REPLAY_SEQ_MASK 3
367 /* what happened */
368 #define XFRM_REPLAY_UPDATE XFRM_AE_CR
369 #define XFRM_REPLAY_TIMEOUT XFRM_AE_CE
370
371 /* default aevent timeout in units of 100ms */
372 #define XFRM_AE_ETIME 10
373 /* Async Event timer multiplier */
374 #define XFRM_AE_ETH_M 10
375 /* default seq threshold size */
376 #define XFRM_AE_SEQT_SIZE 2
377
378 struct xfrm_mgr
379 {
380 struct list_head list;
381 char *id;
382 int (*notify)(struct xfrm_state *x, struct km_event *c);
383 int (*acquire)(struct xfrm_state *x, struct xfrm_tmpl *, struct xfrm_policy *xp, int dir);
384 struct xfrm_policy *(*compile_policy)(struct sock *sk, int opt, u8 *data, int len, int *dir);
385 int (*new_mapping)(struct xfrm_state *x, xfrm_address_t *ipaddr, u16 sport);
386 int (*notify_policy)(struct xfrm_policy *x, int dir, struct km_event *c);
387 int (*report)(u8 proto, struct xfrm_selector *sel, xfrm_address_t *addr);
388 };
389
390 extern int xfrm_register_km(struct xfrm_mgr *km);
391 extern int xfrm_unregister_km(struct xfrm_mgr *km);
392
393
394 extern struct xfrm_policy *xfrm_policy_list[XFRM_POLICY_MAX*2];
395 #ifdef CONFIG_XFRM_SUB_POLICY
396 extern struct xfrm_policy *xfrm_policy_list_sub[XFRM_POLICY_MAX*2];
397
398 static inline int xfrm_policy_lists_empty(int dir)
399 {
400 return (!xfrm_policy_list[dir] && !xfrm_policy_list_sub[dir]);
401 }
402 #else
403 static inline int xfrm_policy_lists_empty(int dir)
404 {
405 return (!xfrm_policy_list[dir]);
406 }
407 #endif
408
409 static inline void xfrm_pol_hold(struct xfrm_policy *policy)
410 {
411 if (likely(policy != NULL))
412 atomic_inc(&policy->refcnt);
413 }
414
415 extern void __xfrm_policy_destroy(struct xfrm_policy *policy);
416
417 static inline void xfrm_pol_put(struct xfrm_policy *policy)
418 {
419 if (atomic_dec_and_test(&policy->refcnt))
420 __xfrm_policy_destroy(policy);
421 }
422
423 #ifdef CONFIG_XFRM_SUB_POLICY
424 static inline void xfrm_pols_put(struct xfrm_policy **pols, int npols)
425 {
426 int i;
427 for (i = npols - 1; i >= 0; --i)
428 xfrm_pol_put(pols[i]);
429 }
430 #else
431 static inline void xfrm_pols_put(struct xfrm_policy **pols, int npols)
432 {
433 xfrm_pol_put(pols[0]);
434 }
435 #endif
436
437 #define XFRM_DST_HSIZE 1024
438
439 static __inline__
440 unsigned __xfrm4_dst_hash(xfrm_address_t *addr)
441 {
442 unsigned h;
443 h = ntohl(addr->a4);
444 h = (h ^ (h>>16)) % XFRM_DST_HSIZE;
445 return h;
446 }
447
448 static __inline__
449 unsigned __xfrm6_dst_hash(xfrm_address_t *addr)
450 {
451 unsigned h;
452 h = ntohl(addr->a6[2]^addr->a6[3]);
453 h = (h ^ (h>>16)) % XFRM_DST_HSIZE;
454 return h;
455 }
456
457 static __inline__
458 unsigned xfrm_dst_hash(xfrm_address_t *addr, unsigned short family)
459 {
460 switch (family) {
461 case AF_INET:
462 return __xfrm4_dst_hash(addr);
463 case AF_INET6:
464 return __xfrm6_dst_hash(addr);
465 }
466 return 0;
467 }
468
469 static __inline__
470 unsigned __xfrm4_src_hash(xfrm_address_t *addr)
471 {
472 return __xfrm4_dst_hash(addr);
473 }
474
475 static __inline__
476 unsigned __xfrm6_src_hash(xfrm_address_t *addr)
477 {
478 return __xfrm6_dst_hash(addr);
479 }
480
481 static __inline__
482 unsigned xfrm_src_hash(xfrm_address_t *addr, unsigned short family)
483 {
484 switch (family) {
485 case AF_INET:
486 return __xfrm4_src_hash(addr);
487 case AF_INET6:
488 return __xfrm6_src_hash(addr);
489 }
490 return 0;
491 }
492
493 static __inline__
494 unsigned __xfrm4_spi_hash(xfrm_address_t *addr, u32 spi, u8 proto)
495 {
496 unsigned h;
497 h = ntohl(addr->a4^spi^proto);
498 h = (h ^ (h>>10) ^ (h>>20)) % XFRM_DST_HSIZE;
499 return h;
500 }
501
502 static __inline__
503 unsigned __xfrm6_spi_hash(xfrm_address_t *addr, u32 spi, u8 proto)
504 {
505 unsigned h;
506 h = ntohl(addr->a6[2]^addr->a6[3]^spi^proto);
507 h = (h ^ (h>>10) ^ (h>>20)) % XFRM_DST_HSIZE;
508 return h;
509 }
510
511 static __inline__
512 unsigned xfrm_spi_hash(xfrm_address_t *addr, u32 spi, u8 proto, unsigned short family)
513 {
514 switch (family) {
515 case AF_INET:
516 return __xfrm4_spi_hash(addr, spi, proto);
517 case AF_INET6:
518 return __xfrm6_spi_hash(addr, spi, proto);
519 }
520 return 0; /*XXX*/
521 }
522
523 extern void __xfrm_state_destroy(struct xfrm_state *);
524
525 static inline void __xfrm_state_put(struct xfrm_state *x)
526 {
527 atomic_dec(&x->refcnt);
528 }
529
530 static inline void xfrm_state_put(struct xfrm_state *x)
531 {
532 if (atomic_dec_and_test(&x->refcnt))
533 __xfrm_state_destroy(x);
534 }
535
536 static inline void xfrm_state_hold(struct xfrm_state *x)
537 {
538 atomic_inc(&x->refcnt);
539 }
540
541 static __inline__ int addr_match(void *token1, void *token2, int prefixlen)
542 {
543 __u32 *a1 = token1;
544 __u32 *a2 = token2;
545 int pdw;
546 int pbi;
547
548 pdw = prefixlen >> 5; /* num of whole __u32 in prefix */
549 pbi = prefixlen & 0x1f; /* num of bits in incomplete u32 in prefix */
550
551 if (pdw)
552 if (memcmp(a1, a2, pdw << 2))
553 return 0;
554
555 if (pbi) {
556 __u32 mask;
557
558 mask = htonl((0xffffffff) << (32 - pbi));
559
560 if ((a1[pdw] ^ a2[pdw]) & mask)
561 return 0;
562 }
563
564 return 1;
565 }
566
567 static __inline__
568 u16 xfrm_flowi_sport(struct flowi *fl)
569 {
570 u16 port;
571 switch(fl->proto) {
572 case IPPROTO_TCP:
573 case IPPROTO_UDP:
574 case IPPROTO_SCTP:
575 port = fl->fl_ip_sport;
576 break;
577 case IPPROTO_ICMP:
578 case IPPROTO_ICMPV6:
579 port = htons(fl->fl_icmp_type);
580 break;
581 #ifdef CONFIG_IPV6_MIP6
582 case IPPROTO_MH:
583 port = htons(fl->fl_mh_type);
584 break;
585 #endif
586 default:
587 port = 0; /*XXX*/
588 }
589 return port;
590 }
591
592 static __inline__
593 u16 xfrm_flowi_dport(struct flowi *fl)
594 {
595 u16 port;
596 switch(fl->proto) {
597 case IPPROTO_TCP:
598 case IPPROTO_UDP:
599 case IPPROTO_SCTP:
600 port = fl->fl_ip_dport;
601 break;
602 case IPPROTO_ICMP:
603 case IPPROTO_ICMPV6:
604 port = htons(fl->fl_icmp_code);
605 break;
606 default:
607 port = 0; /*XXX*/
608 }
609 return port;
610 }
611
612 static inline int
613 __xfrm4_selector_match(struct xfrm_selector *sel, struct flowi *fl)
614 {
615 return addr_match(&fl->fl4_dst, &sel->daddr, sel->prefixlen_d) &&
616 addr_match(&fl->fl4_src, &sel->saddr, sel->prefixlen_s) &&
617 !((xfrm_flowi_dport(fl) ^ sel->dport) & sel->dport_mask) &&
618 !((xfrm_flowi_sport(fl) ^ sel->sport) & sel->sport_mask) &&
619 (fl->proto == sel->proto || !sel->proto) &&
620 (fl->oif == sel->ifindex || !sel->ifindex);
621 }
622
623 static inline int
624 __xfrm6_selector_match(struct xfrm_selector *sel, struct flowi *fl)
625 {
626 return addr_match(&fl->fl6_dst, &sel->daddr, sel->prefixlen_d) &&
627 addr_match(&fl->fl6_src, &sel->saddr, sel->prefixlen_s) &&
628 !((xfrm_flowi_dport(fl) ^ sel->dport) & sel->dport_mask) &&
629 !((xfrm_flowi_sport(fl) ^ sel->sport) & sel->sport_mask) &&
630 (fl->proto == sel->proto || !sel->proto) &&
631 (fl->oif == sel->ifindex || !sel->ifindex);
632 }
633
634 static inline int
635 xfrm_selector_match(struct xfrm_selector *sel, struct flowi *fl,
636 unsigned short family)
637 {
638 switch (family) {
639 case AF_INET:
640 return __xfrm4_selector_match(sel, fl);
641 case AF_INET6:
642 return __xfrm6_selector_match(sel, fl);
643 }
644 return 0;
645 }
646
647 #ifdef CONFIG_SECURITY_NETWORK_XFRM
648 /* If neither has a context --> match
649 * Otherwise, both must have a context and the sids, doi, alg must match
650 */
651 static inline int xfrm_sec_ctx_match(struct xfrm_sec_ctx *s1, struct xfrm_sec_ctx *s2)
652 {
653 return ((!s1 && !s2) ||
654 (s1 && s2 &&
655 (s1->ctx_sid == s2->ctx_sid) &&
656 (s1->ctx_doi == s2->ctx_doi) &&
657 (s1->ctx_alg == s2->ctx_alg)));
658 }
659 #else
660 static inline int xfrm_sec_ctx_match(struct xfrm_sec_ctx *s1, struct xfrm_sec_ctx *s2)
661 {
662 return 1;
663 }
664 #endif
665
666 /* A struct encoding bundle of transformations to apply to some set of flow.
667 *
668 * dst->child points to the next element of bundle.
669 * dst->xfrm points to an instanse of transformer.
670 *
671 * Due to unfortunate limitations of current routing cache, which we
672 * have no time to fix, it mirrors struct rtable and bound to the same
673 * routing key, including saddr,daddr. However, we can have many of
674 * bundles differing by session id. All the bundles grow from a parent
675 * policy rule.
676 */
677 struct xfrm_dst
678 {
679 union {
680 struct xfrm_dst *next;
681 struct dst_entry dst;
682 struct rtable rt;
683 struct rt6_info rt6;
684 } u;
685 struct dst_entry *route;
686 u32 route_mtu_cached;
687 u32 child_mtu_cached;
688 u32 route_cookie;
689 u32 path_cookie;
690 };
691
692 static inline void xfrm_dst_destroy(struct xfrm_dst *xdst)
693 {
694 dst_release(xdst->route);
695 if (likely(xdst->u.dst.xfrm))
696 xfrm_state_put(xdst->u.dst.xfrm);
697 }
698
699 extern void xfrm_dst_ifdown(struct dst_entry *dst, struct net_device *dev);
700
701 struct sec_path
702 {
703 atomic_t refcnt;
704 int len;
705 struct xfrm_state *xvec[XFRM_MAX_DEPTH];
706 };
707
708 static inline struct sec_path *
709 secpath_get(struct sec_path *sp)
710 {
711 if (sp)
712 atomic_inc(&sp->refcnt);
713 return sp;
714 }
715
716 extern void __secpath_destroy(struct sec_path *sp);
717
718 static inline void
719 secpath_put(struct sec_path *sp)
720 {
721 if (sp && atomic_dec_and_test(&sp->refcnt))
722 __secpath_destroy(sp);
723 }
724
725 extern struct sec_path *secpath_dup(struct sec_path *src);
726
727 static inline void
728 secpath_reset(struct sk_buff *skb)
729 {
730 #ifdef CONFIG_XFRM
731 secpath_put(skb->sp);
732 skb->sp = NULL;
733 #endif
734 }
735
736 static inline int
737 __xfrm4_state_addr_cmp(struct xfrm_tmpl *tmpl, struct xfrm_state *x)
738 {
739 return (tmpl->saddr.a4 &&
740 tmpl->saddr.a4 != x->props.saddr.a4);
741 }
742
743 static inline int
744 __xfrm6_state_addr_cmp(struct xfrm_tmpl *tmpl, struct xfrm_state *x)
745 {
746 return (!ipv6_addr_any((struct in6_addr*)&tmpl->saddr) &&
747 ipv6_addr_cmp((struct in6_addr *)&tmpl->saddr, (struct in6_addr*)&x->props.saddr));
748 }
749
750 static inline int
751 xfrm_state_addr_cmp(struct xfrm_tmpl *tmpl, struct xfrm_state *x, unsigned short family)
752 {
753 switch (family) {
754 case AF_INET:
755 return __xfrm4_state_addr_cmp(tmpl, x);
756 case AF_INET6:
757 return __xfrm6_state_addr_cmp(tmpl, x);
758 }
759 return !0;
760 }
761
762 #ifdef CONFIG_XFRM
763
764 extern int __xfrm_policy_check(struct sock *, int dir, struct sk_buff *skb, unsigned short family);
765
766 static inline int xfrm_policy_check(struct sock *sk, int dir, struct sk_buff *skb, unsigned short family)
767 {
768 if (sk && sk->sk_policy[XFRM_POLICY_IN])
769 return __xfrm_policy_check(sk, dir, skb, family);
770
771 return (xfrm_policy_lists_empty(dir) && !skb->sp) ||
772 (skb->dst->flags & DST_NOPOLICY) ||
773 __xfrm_policy_check(sk, dir, skb, family);
774 }
775
776 static inline int xfrm4_policy_check(struct sock *sk, int dir, struct sk_buff *skb)
777 {
778 return xfrm_policy_check(sk, dir, skb, AF_INET);
779 }
780
781 static inline int xfrm6_policy_check(struct sock *sk, int dir, struct sk_buff *skb)
782 {
783 return xfrm_policy_check(sk, dir, skb, AF_INET6);
784 }
785
786 extern int xfrm_decode_session(struct sk_buff *skb, struct flowi *fl, unsigned short family);
787 extern int __xfrm_route_forward(struct sk_buff *skb, unsigned short family);
788
789 static inline int xfrm_route_forward(struct sk_buff *skb, unsigned short family)
790 {
791 return xfrm_policy_lists_empty(XFRM_POLICY_OUT) ||
792 (skb->dst->flags & DST_NOXFRM) ||
793 __xfrm_route_forward(skb, family);
794 }
795
796 static inline int xfrm4_route_forward(struct sk_buff *skb)
797 {
798 return xfrm_route_forward(skb, AF_INET);
799 }
800
801 static inline int xfrm6_route_forward(struct sk_buff *skb)
802 {
803 return xfrm_route_forward(skb, AF_INET6);
804 }
805
806 extern int __xfrm_sk_clone_policy(struct sock *sk);
807
808 static inline int xfrm_sk_clone_policy(struct sock *sk)
809 {
810 if (unlikely(sk->sk_policy[0] || sk->sk_policy[1]))
811 return __xfrm_sk_clone_policy(sk);
812 return 0;
813 }
814
815 extern int xfrm_policy_delete(struct xfrm_policy *pol, int dir);
816
817 static inline void xfrm_sk_free_policy(struct sock *sk)
818 {
819 if (unlikely(sk->sk_policy[0] != NULL)) {
820 xfrm_policy_delete(sk->sk_policy[0], XFRM_POLICY_MAX);
821 sk->sk_policy[0] = NULL;
822 }
823 if (unlikely(sk->sk_policy[1] != NULL)) {
824 xfrm_policy_delete(sk->sk_policy[1], XFRM_POLICY_MAX+1);
825 sk->sk_policy[1] = NULL;
826 }
827 }
828
829 #else
830
831 static inline void xfrm_sk_free_policy(struct sock *sk) {}
832 static inline int xfrm_sk_clone_policy(struct sock *sk) { return 0; }
833 static inline int xfrm6_route_forward(struct sk_buff *skb) { return 1; }
834 static inline int xfrm4_route_forward(struct sk_buff *skb) { return 1; }
835 static inline int xfrm6_policy_check(struct sock *sk, int dir, struct sk_buff *skb)
836 {
837 return 1;
838 }
839 static inline int xfrm4_policy_check(struct sock *sk, int dir, struct sk_buff *skb)
840 {
841 return 1;
842 }
843 static inline int xfrm_policy_check(struct sock *sk, int dir, struct sk_buff *skb, unsigned short family)
844 {
845 return 1;
846 }
847 #endif
848
849 static __inline__
850 xfrm_address_t *xfrm_flowi_daddr(struct flowi *fl, unsigned short family)
851 {
852 switch (family){
853 case AF_INET:
854 return (xfrm_address_t *)&fl->fl4_dst;
855 case AF_INET6:
856 return (xfrm_address_t *)&fl->fl6_dst;
857 }
858 return NULL;
859 }
860
861 static __inline__
862 xfrm_address_t *xfrm_flowi_saddr(struct flowi *fl, unsigned short family)
863 {
864 switch (family){
865 case AF_INET:
866 return (xfrm_address_t *)&fl->fl4_src;
867 case AF_INET6:
868 return (xfrm_address_t *)&fl->fl6_src;
869 }
870 return NULL;
871 }
872
873 static __inline__ int
874 __xfrm4_state_addr_check(struct xfrm_state *x,
875 xfrm_address_t *daddr, xfrm_address_t *saddr)
876 {
877 if (daddr->a4 == x->id.daddr.a4 &&
878 (saddr->a4 == x->props.saddr.a4 || !saddr->a4 || !x->props.saddr.a4))
879 return 1;
880 return 0;
881 }
882
883 static __inline__ int
884 __xfrm6_state_addr_check(struct xfrm_state *x,
885 xfrm_address_t *daddr, xfrm_address_t *saddr)
886 {
887 if (!ipv6_addr_cmp((struct in6_addr *)daddr, (struct in6_addr *)&x->id.daddr) &&
888 (!ipv6_addr_cmp((struct in6_addr *)saddr, (struct in6_addr *)&x->props.saddr)||
889 ipv6_addr_any((struct in6_addr *)saddr) ||
890 ipv6_addr_any((struct in6_addr *)&x->props.saddr)))
891 return 1;
892 return 0;
893 }
894
895 static __inline__ int
896 xfrm_state_addr_check(struct xfrm_state *x,
897 xfrm_address_t *daddr, xfrm_address_t *saddr,
898 unsigned short family)
899 {
900 switch (family) {
901 case AF_INET:
902 return __xfrm4_state_addr_check(x, daddr, saddr);
903 case AF_INET6:
904 return __xfrm6_state_addr_check(x, daddr, saddr);
905 }
906 return 0;
907 }
908
909 static __inline__ int
910 xfrm_state_addr_flow_check(struct xfrm_state *x, struct flowi *fl,
911 unsigned short family)
912 {
913 switch (family) {
914 case AF_INET:
915 return __xfrm4_state_addr_check(x,
916 (xfrm_address_t *)&fl->fl4_dst,
917 (xfrm_address_t *)&fl->fl4_src);
918 case AF_INET6:
919 return __xfrm6_state_addr_check(x,
920 (xfrm_address_t *)&fl->fl6_dst,
921 (xfrm_address_t *)&fl->fl6_src);
922 }
923 return 0;
924 }
925
926 static inline int xfrm_state_kern(struct xfrm_state *x)
927 {
928 return atomic_read(&x->tunnel_users);
929 }
930
931 static inline int xfrm_id_proto_match(u8 proto, u8 userproto)
932 {
933 return (!userproto || proto == userproto ||
934 (userproto == IPSEC_PROTO_ANY && (proto == IPPROTO_AH ||
935 proto == IPPROTO_ESP ||
936 proto == IPPROTO_COMP)));
937 }
938
939 /*
940 * xfrm algorithm information
941 */
942 struct xfrm_algo_auth_info {
943 u16 icv_truncbits;
944 u16 icv_fullbits;
945 };
946
947 struct xfrm_algo_encr_info {
948 u16 blockbits;
949 u16 defkeybits;
950 };
951
952 struct xfrm_algo_comp_info {
953 u16 threshold;
954 };
955
956 struct xfrm_algo_desc {
957 char *name;
958 char *compat;
959 u8 available:1;
960 union {
961 struct xfrm_algo_auth_info auth;
962 struct xfrm_algo_encr_info encr;
963 struct xfrm_algo_comp_info comp;
964 } uinfo;
965 struct sadb_alg desc;
966 };
967
968 /* XFRM tunnel handlers. */
969 struct xfrm_tunnel {
970 int (*handler)(struct sk_buff *skb);
971 int (*err_handler)(struct sk_buff *skb, __u32 info);
972
973 struct xfrm_tunnel *next;
974 int priority;
975 };
976
977 struct xfrm6_tunnel {
978 int (*handler)(struct sk_buff *skb);
979 int (*err_handler)(struct sk_buff *skb, struct inet6_skb_parm *opt,
980 int type, int code, int offset, __u32 info);
981
982 struct xfrm6_tunnel *next;
983 int priority;
984 };
985
986 extern void xfrm_init(void);
987 extern void xfrm4_init(void);
988 extern void xfrm6_init(void);
989 extern void xfrm6_fini(void);
990 extern void xfrm_state_init(void);
991 extern void xfrm4_state_init(void);
992 extern void xfrm6_state_init(void);
993 extern void xfrm6_state_fini(void);
994
995 extern int xfrm_state_walk(u8 proto, int (*func)(struct xfrm_state *, int, void*), void *);
996 extern struct xfrm_state *xfrm_state_alloc(void);
997 extern struct xfrm_state *xfrm_state_find(xfrm_address_t *daddr, xfrm_address_t *saddr,
998 struct flowi *fl, struct xfrm_tmpl *tmpl,
999 struct xfrm_policy *pol, int *err,
1000 unsigned short family);
1001 extern int xfrm_state_check_expire(struct xfrm_state *x);
1002 extern void xfrm_state_insert(struct xfrm_state *x);
1003 extern int xfrm_state_add(struct xfrm_state *x);
1004 extern int xfrm_state_update(struct xfrm_state *x);
1005 extern struct xfrm_state *xfrm_state_lookup(xfrm_address_t *daddr, u32 spi, u8 proto, unsigned short family);
1006 extern struct xfrm_state *xfrm_state_lookup_byaddr(xfrm_address_t *daddr, xfrm_address_t *saddr, u8 proto, unsigned short family);
1007 #ifdef CONFIG_XFRM_SUB_POLICY
1008 extern int xfrm_tmpl_sort(struct xfrm_tmpl **dst, struct xfrm_tmpl **src,
1009 int n, unsigned short family);
1010 extern int xfrm_state_sort(struct xfrm_state **dst, struct xfrm_state **src,
1011 int n, unsigned short family);
1012 #else
1013 static inline int xfrm_tmpl_sort(struct xfrm_tmpl **dst, struct xfrm_tmpl **src,
1014 int n, unsigned short family)
1015 {
1016 return -ENOSYS;
1017 }
1018
1019 static inline int xfrm_state_sort(struct xfrm_state **dst, struct xfrm_state **src,
1020 int n, unsigned short family)
1021 {
1022 return -ENOSYS;
1023 }
1024 #endif
1025 extern struct xfrm_state *xfrm_find_acq_byseq(u32 seq);
1026 extern int xfrm_state_delete(struct xfrm_state *x);
1027 extern void xfrm_state_flush(u8 proto);
1028 extern int xfrm_replay_check(struct xfrm_state *x, u32 seq);
1029 extern void xfrm_replay_advance(struct xfrm_state *x, u32 seq);
1030 extern void xfrm_replay_notify(struct xfrm_state *x, int event);
1031 extern int xfrm_state_check(struct xfrm_state *x, struct sk_buff *skb);
1032 extern int xfrm_state_mtu(struct xfrm_state *x, int mtu);
1033 extern int xfrm_init_state(struct xfrm_state *x);
1034 extern int xfrm4_rcv(struct sk_buff *skb);
1035 extern int xfrm4_output(struct sk_buff *skb);
1036 extern int xfrm4_tunnel_register(struct xfrm_tunnel *handler);
1037 extern int xfrm4_tunnel_deregister(struct xfrm_tunnel *handler);
1038 extern int xfrm6_rcv_spi(struct sk_buff *skb, u32 spi);
1039 extern int xfrm6_rcv(struct sk_buff **pskb);
1040 extern int xfrm6_input_addr(struct sk_buff *skb, xfrm_address_t *daddr,
1041 xfrm_address_t *saddr, u8 proto);
1042 extern int xfrm6_tunnel_register(struct xfrm6_tunnel *handler);
1043 extern int xfrm6_tunnel_deregister(struct xfrm6_tunnel *handler);
1044 extern u32 xfrm6_tunnel_alloc_spi(xfrm_address_t *saddr);
1045 extern void xfrm6_tunnel_free_spi(xfrm_address_t *saddr);
1046 extern u32 xfrm6_tunnel_spi_lookup(xfrm_address_t *saddr);
1047 extern int xfrm6_output(struct sk_buff *skb);
1048 extern int xfrm6_find_1stfragopt(struct xfrm_state *x, struct sk_buff *skb,
1049 u8 **prevhdr);
1050
1051 #ifdef CONFIG_XFRM
1052 extern int xfrm4_rcv_encap(struct sk_buff *skb, __u16 encap_type);
1053 extern int xfrm_user_policy(struct sock *sk, int optname, u8 __user *optval, int optlen);
1054 extern int xfrm_dst_lookup(struct xfrm_dst **dst, struct flowi *fl, unsigned short family);
1055 #else
1056 static inline int xfrm_user_policy(struct sock *sk, int optname, u8 __user *optval, int optlen)
1057 {
1058 return -ENOPROTOOPT;
1059 }
1060
1061 static inline int xfrm4_rcv_encap(struct sk_buff *skb, __u16 encap_type)
1062 {
1063 /* should not happen */
1064 kfree_skb(skb);
1065 return 0;
1066 }
1067 static inline int xfrm_dst_lookup(struct xfrm_dst **dst, struct flowi *fl, unsigned short family)
1068 {
1069 return -EINVAL;
1070 }
1071 #endif
1072
1073 struct xfrm_policy *xfrm_policy_alloc(gfp_t gfp);
1074 extern int xfrm_policy_walk(u8 type, int (*func)(struct xfrm_policy *, int, int, void*), void *);
1075 int xfrm_policy_insert(int dir, struct xfrm_policy *policy, int excl);
1076 struct xfrm_policy *xfrm_policy_bysel_ctx(u8 type, int dir,
1077 struct xfrm_selector *sel,
1078 struct xfrm_sec_ctx *ctx, int delete);
1079 struct xfrm_policy *xfrm_policy_byid(u8, int dir, u32 id, int delete);
1080 void xfrm_policy_flush(u8 type);
1081 u32 xfrm_get_acqseq(void);
1082 void xfrm_alloc_spi(struct xfrm_state *x, u32 minspi, u32 maxspi);
1083 struct xfrm_state * xfrm_find_acq(u8 mode, u32 reqid, u8 proto,
1084 xfrm_address_t *daddr, xfrm_address_t *saddr,
1085 int create, unsigned short family);
1086 extern void xfrm_policy_flush(u8 type);
1087 extern int xfrm_sk_policy_insert(struct sock *sk, int dir, struct xfrm_policy *pol);
1088 extern int xfrm_flush_bundles(void);
1089 extern void xfrm_flush_all_bundles(void);
1090 extern int xfrm_bundle_ok(struct xfrm_dst *xdst, struct flowi *fl, int family, int strict);
1091 extern void xfrm_init_pmtu(struct dst_entry *dst);
1092
1093 extern wait_queue_head_t km_waitq;
1094 extern int km_new_mapping(struct xfrm_state *x, xfrm_address_t *ipaddr, u16 sport);
1095 extern void km_policy_expired(struct xfrm_policy *pol, int dir, int hard, u32 pid);
1096 extern int km_report(u8 proto, struct xfrm_selector *sel, xfrm_address_t *addr);
1097
1098 extern void xfrm_input_init(void);
1099 extern int xfrm_parse_spi(struct sk_buff *skb, u8 nexthdr, u32 *spi, u32 *seq);
1100
1101 extern void xfrm_probe_algs(void);
1102 extern int xfrm_count_auth_supported(void);
1103 extern int xfrm_count_enc_supported(void);
1104 extern struct xfrm_algo_desc *xfrm_aalg_get_byidx(unsigned int idx);
1105 extern struct xfrm_algo_desc *xfrm_ealg_get_byidx(unsigned int idx);
1106 extern struct xfrm_algo_desc *xfrm_aalg_get_byid(int alg_id);
1107 extern struct xfrm_algo_desc *xfrm_ealg_get_byid(int alg_id);
1108 extern struct xfrm_algo_desc *xfrm_calg_get_byid(int alg_id);
1109 extern struct xfrm_algo_desc *xfrm_aalg_get_byname(char *name, int probe);
1110 extern struct xfrm_algo_desc *xfrm_ealg_get_byname(char *name, int probe);
1111 extern struct xfrm_algo_desc *xfrm_calg_get_byname(char *name, int probe);
1112
1113 struct hash_desc;
1114 struct scatterlist;
1115 typedef int (icv_update_fn_t)(struct hash_desc *, struct scatterlist *,
1116 unsigned int);
1117
1118 extern int skb_icv_walk(const struct sk_buff *skb, struct hash_desc *tfm,
1119 int offset, int len, icv_update_fn_t icv_update);
1120
1121 static inline int xfrm_addr_cmp(xfrm_address_t *a, xfrm_address_t *b,
1122 int family)
1123 {
1124 switch (family) {
1125 default:
1126 case AF_INET:
1127 return a->a4 - b->a4;
1128 case AF_INET6:
1129 return ipv6_addr_cmp((struct in6_addr *)a,
1130 (struct in6_addr *)b);
1131 }
1132 }
1133
1134 static inline int xfrm_policy_id2dir(u32 index)
1135 {
1136 return index & 7;
1137 }
1138
1139 static inline int xfrm_aevent_is_on(void)
1140 {
1141 struct sock *nlsk;
1142 int ret = 0;
1143
1144 rcu_read_lock();
1145 nlsk = rcu_dereference(xfrm_nl);
1146 if (nlsk)
1147 ret = netlink_has_listeners(nlsk, XFRMNLGRP_AEVENTS);
1148 rcu_read_unlock();
1149 return ret;
1150 }
1151
1152 static inline void xfrm_aevent_doreplay(struct xfrm_state *x)
1153 {
1154 if (xfrm_aevent_is_on())
1155 xfrm_replay_notify(x, XFRM_REPLAY_UPDATE);
1156 }
1157
1158
1159 #endif /* _NET_XFRM_H */
This page took 0.057788 seconds and 6 git commands to generate.