xfrm: rename struct xfrm_filter
[deliverable/linux.git] / include / net / xfrm.h
1 #ifndef _NET_XFRM_H
2 #define _NET_XFRM_H
3
4 #include <linux/compiler.h>
5 #include <linux/xfrm.h>
6 #include <linux/spinlock.h>
7 #include <linux/list.h>
8 #include <linux/skbuff.h>
9 #include <linux/socket.h>
10 #include <linux/pfkeyv2.h>
11 #include <linux/ipsec.h>
12 #include <linux/in6.h>
13 #include <linux/mutex.h>
14 #include <linux/audit.h>
15 #include <linux/slab.h>
16
17 #include <net/sock.h>
18 #include <net/dst.h>
19 #include <net/ip.h>
20 #include <net/route.h>
21 #include <net/ipv6.h>
22 #include <net/ip6_fib.h>
23 #include <net/flow.h>
24
25 #include <linux/interrupt.h>
26
27 #ifdef CONFIG_XFRM_STATISTICS
28 #include <net/snmp.h>
29 #endif
30
31 #define XFRM_PROTO_ESP 50
32 #define XFRM_PROTO_AH 51
33 #define XFRM_PROTO_COMP 108
34 #define XFRM_PROTO_IPIP 4
35 #define XFRM_PROTO_IPV6 41
36 #define XFRM_PROTO_ROUTING IPPROTO_ROUTING
37 #define XFRM_PROTO_DSTOPTS IPPROTO_DSTOPTS
38
39 #define XFRM_ALIGN4(len) (((len) + 3) & ~3)
40 #define XFRM_ALIGN8(len) (((len) + 7) & ~7)
41 #define MODULE_ALIAS_XFRM_MODE(family, encap) \
42 MODULE_ALIAS("xfrm-mode-" __stringify(family) "-" __stringify(encap))
43 #define MODULE_ALIAS_XFRM_TYPE(family, proto) \
44 MODULE_ALIAS("xfrm-type-" __stringify(family) "-" __stringify(proto))
45
46 #ifdef CONFIG_XFRM_STATISTICS
47 #define XFRM_INC_STATS(net, field) SNMP_INC_STATS((net)->mib.xfrm_statistics, field)
48 #define XFRM_INC_STATS_BH(net, field) SNMP_INC_STATS_BH((net)->mib.xfrm_statistics, field)
49 #define XFRM_INC_STATS_USER(net, field) SNMP_INC_STATS_USER((net)-mib.xfrm_statistics, field)
50 #else
51 #define XFRM_INC_STATS(net, field) ((void)(net))
52 #define XFRM_INC_STATS_BH(net, field) ((void)(net))
53 #define XFRM_INC_STATS_USER(net, field) ((void)(net))
54 #endif
55
56
57 /* Organization of SPD aka "XFRM rules"
58 ------------------------------------
59
60 Basic objects:
61 - policy rule, struct xfrm_policy (=SPD entry)
62 - bundle of transformations, struct dst_entry == struct xfrm_dst (=SA bundle)
63 - instance of a transformer, struct xfrm_state (=SA)
64 - template to clone xfrm_state, struct xfrm_tmpl
65
66 SPD is plain linear list of xfrm_policy rules, ordered by priority.
67 (To be compatible with existing pfkeyv2 implementations,
68 many rules with priority of 0x7fffffff are allowed to exist and
69 such rules are ordered in an unpredictable way, thanks to bsd folks.)
70
71 Lookup is plain linear search until the first match with selector.
72
73 If "action" is "block", then we prohibit the flow, otherwise:
74 if "xfrms_nr" is zero, the flow passes untransformed. Otherwise,
75 policy entry has list of up to XFRM_MAX_DEPTH transformations,
76 described by templates xfrm_tmpl. Each template is resolved
77 to a complete xfrm_state (see below) and we pack bundle of transformations
78 to a dst_entry returned to requestor.
79
80 dst -. xfrm .-> xfrm_state #1
81 |---. child .-> dst -. xfrm .-> xfrm_state #2
82 |---. child .-> dst -. xfrm .-> xfrm_state #3
83 |---. child .-> NULL
84
85 Bundles are cached at xrfm_policy struct (field ->bundles).
86
87
88 Resolution of xrfm_tmpl
89 -----------------------
90 Template contains:
91 1. ->mode Mode: transport or tunnel
92 2. ->id.proto Protocol: AH/ESP/IPCOMP
93 3. ->id.daddr Remote tunnel endpoint, ignored for transport mode.
94 Q: allow to resolve security gateway?
95 4. ->id.spi If not zero, static SPI.
96 5. ->saddr Local tunnel endpoint, ignored for transport mode.
97 6. ->algos List of allowed algos. Plain bitmask now.
98 Q: ealgos, aalgos, calgos. What a mess...
99 7. ->share Sharing mode.
100 Q: how to implement private sharing mode? To add struct sock* to
101 flow id?
102
103 Having this template we search through SAD searching for entries
104 with appropriate mode/proto/algo, permitted by selector.
105 If no appropriate entry found, it is requested from key manager.
106
107 PROBLEMS:
108 Q: How to find all the bundles referring to a physical path for
109 PMTU discovery? Seems, dst should contain list of all parents...
110 and enter to infinite locking hierarchy disaster.
111 No! It is easier, we will not search for them, let them find us.
112 We add genid to each dst plus pointer to genid of raw IP route,
113 pmtu disc will update pmtu on raw IP route and increase its genid.
114 dst_check() will see this for top level and trigger resyncing
115 metrics. Plus, it will be made via sk->sk_dst_cache. Solved.
116 */
117
118 struct xfrm_state_walk {
119 struct list_head all;
120 u8 state;
121 u8 dying;
122 u8 proto;
123 u32 seq;
124 struct xfrm_address_filter *filter;
125 };
126
127 /* Full description of state of transformer. */
128 struct xfrm_state {
129 #ifdef CONFIG_NET_NS
130 struct net *xs_net;
131 #endif
132 union {
133 struct hlist_node gclist;
134 struct hlist_node bydst;
135 };
136 struct hlist_node bysrc;
137 struct hlist_node byspi;
138
139 atomic_t refcnt;
140 spinlock_t lock;
141
142 struct xfrm_id id;
143 struct xfrm_selector sel;
144 struct xfrm_mark mark;
145 u32 tfcpad;
146
147 u32 genid;
148
149 /* Key manager bits */
150 struct xfrm_state_walk km;
151
152 /* Parameters of this state. */
153 struct {
154 u32 reqid;
155 u8 mode;
156 u8 replay_window;
157 u8 aalgo, ealgo, calgo;
158 u8 flags;
159 u16 family;
160 xfrm_address_t saddr;
161 int header_len;
162 int trailer_len;
163 u32 extra_flags;
164 } props;
165
166 struct xfrm_lifetime_cfg lft;
167
168 /* Data for transformer */
169 struct xfrm_algo_auth *aalg;
170 struct xfrm_algo *ealg;
171 struct xfrm_algo *calg;
172 struct xfrm_algo_aead *aead;
173
174 /* Data for encapsulator */
175 struct xfrm_encap_tmpl *encap;
176
177 /* Data for care-of address */
178 xfrm_address_t *coaddr;
179
180 /* IPComp needs an IPIP tunnel for handling uncompressed packets */
181 struct xfrm_state *tunnel;
182
183 /* If a tunnel, number of users + 1 */
184 atomic_t tunnel_users;
185
186 /* State for replay detection */
187 struct xfrm_replay_state replay;
188 struct xfrm_replay_state_esn *replay_esn;
189
190 /* Replay detection state at the time we sent the last notification */
191 struct xfrm_replay_state preplay;
192 struct xfrm_replay_state_esn *preplay_esn;
193
194 /* The functions for replay detection. */
195 struct xfrm_replay *repl;
196
197 /* internal flag that only holds state for delayed aevent at the
198 * moment
199 */
200 u32 xflags;
201
202 /* Replay detection notification settings */
203 u32 replay_maxage;
204 u32 replay_maxdiff;
205
206 /* Replay detection notification timer */
207 struct timer_list rtimer;
208
209 /* Statistics */
210 struct xfrm_stats stats;
211
212 struct xfrm_lifetime_cur curlft;
213 struct tasklet_hrtimer mtimer;
214
215 /* used to fix curlft->add_time when changing date */
216 long saved_tmo;
217
218 /* Last used time */
219 unsigned long lastused;
220
221 /* Reference to data common to all the instances of this
222 * transformer. */
223 const struct xfrm_type *type;
224 struct xfrm_mode *inner_mode;
225 struct xfrm_mode *inner_mode_iaf;
226 struct xfrm_mode *outer_mode;
227
228 /* Security context */
229 struct xfrm_sec_ctx *security;
230
231 /* Private data of this transformer, format is opaque,
232 * interpreted by xfrm_type methods. */
233 void *data;
234 };
235
236 static inline struct net *xs_net(struct xfrm_state *x)
237 {
238 return read_pnet(&x->xs_net);
239 }
240
241 /* xflags - make enum if more show up */
242 #define XFRM_TIME_DEFER 1
243 #define XFRM_SOFT_EXPIRE 2
244
245 enum {
246 XFRM_STATE_VOID,
247 XFRM_STATE_ACQ,
248 XFRM_STATE_VALID,
249 XFRM_STATE_ERROR,
250 XFRM_STATE_EXPIRED,
251 XFRM_STATE_DEAD
252 };
253
254 /* callback structure passed from either netlink or pfkey */
255 struct km_event {
256 union {
257 u32 hard;
258 u32 proto;
259 u32 byid;
260 u32 aevent;
261 u32 type;
262 } data;
263
264 u32 seq;
265 u32 portid;
266 u32 event;
267 struct net *net;
268 };
269
270 struct xfrm_replay {
271 void (*advance)(struct xfrm_state *x, __be32 net_seq);
272 int (*check)(struct xfrm_state *x,
273 struct sk_buff *skb,
274 __be32 net_seq);
275 int (*recheck)(struct xfrm_state *x,
276 struct sk_buff *skb,
277 __be32 net_seq);
278 void (*notify)(struct xfrm_state *x, int event);
279 int (*overflow)(struct xfrm_state *x, struct sk_buff *skb);
280 };
281
282 struct net_device;
283 struct xfrm_type;
284 struct xfrm_dst;
285 struct xfrm_policy_afinfo {
286 unsigned short family;
287 struct dst_ops *dst_ops;
288 void (*garbage_collect)(struct net *net);
289 struct dst_entry *(*dst_lookup)(struct net *net, int tos,
290 const xfrm_address_t *saddr,
291 const xfrm_address_t *daddr);
292 int (*get_saddr)(struct net *net, xfrm_address_t *saddr, xfrm_address_t *daddr);
293 void (*decode_session)(struct sk_buff *skb,
294 struct flowi *fl,
295 int reverse);
296 int (*get_tos)(const struct flowi *fl);
297 void (*init_dst)(struct net *net,
298 struct xfrm_dst *dst);
299 int (*init_path)(struct xfrm_dst *path,
300 struct dst_entry *dst,
301 int nfheader_len);
302 int (*fill_dst)(struct xfrm_dst *xdst,
303 struct net_device *dev,
304 const struct flowi *fl);
305 struct dst_entry *(*blackhole_route)(struct net *net, struct dst_entry *orig);
306 };
307
308 int xfrm_policy_register_afinfo(struct xfrm_policy_afinfo *afinfo);
309 int xfrm_policy_unregister_afinfo(struct xfrm_policy_afinfo *afinfo);
310 void km_policy_notify(struct xfrm_policy *xp, int dir,
311 const struct km_event *c);
312 void km_state_notify(struct xfrm_state *x, const struct km_event *c);
313
314 struct xfrm_tmpl;
315 int km_query(struct xfrm_state *x, struct xfrm_tmpl *t,
316 struct xfrm_policy *pol);
317 void km_state_expired(struct xfrm_state *x, int hard, u32 portid);
318 int __xfrm_state_delete(struct xfrm_state *x);
319
320 struct xfrm_state_afinfo {
321 unsigned int family;
322 unsigned int proto;
323 __be16 eth_proto;
324 struct module *owner;
325 const struct xfrm_type *type_map[IPPROTO_MAX];
326 struct xfrm_mode *mode_map[XFRM_MODE_MAX];
327 int (*init_flags)(struct xfrm_state *x);
328 void (*init_tempsel)(struct xfrm_selector *sel,
329 const struct flowi *fl);
330 void (*init_temprop)(struct xfrm_state *x,
331 const struct xfrm_tmpl *tmpl,
332 const xfrm_address_t *daddr,
333 const xfrm_address_t *saddr);
334 int (*tmpl_sort)(struct xfrm_tmpl **dst, struct xfrm_tmpl **src, int n);
335 int (*state_sort)(struct xfrm_state **dst, struct xfrm_state **src, int n);
336 int (*output)(struct sk_buff *skb);
337 int (*output_finish)(struct sk_buff *skb);
338 int (*extract_input)(struct xfrm_state *x,
339 struct sk_buff *skb);
340 int (*extract_output)(struct xfrm_state *x,
341 struct sk_buff *skb);
342 int (*transport_finish)(struct sk_buff *skb,
343 int async);
344 void (*local_error)(struct sk_buff *skb, u32 mtu);
345 };
346
347 int xfrm_state_register_afinfo(struct xfrm_state_afinfo *afinfo);
348 int xfrm_state_unregister_afinfo(struct xfrm_state_afinfo *afinfo);
349 struct xfrm_state_afinfo *xfrm_state_get_afinfo(unsigned int family);
350 void xfrm_state_put_afinfo(struct xfrm_state_afinfo *afinfo);
351
352 void xfrm_state_delete_tunnel(struct xfrm_state *x);
353
354 struct xfrm_type {
355 char *description;
356 struct module *owner;
357 u8 proto;
358 u8 flags;
359 #define XFRM_TYPE_NON_FRAGMENT 1
360 #define XFRM_TYPE_REPLAY_PROT 2
361 #define XFRM_TYPE_LOCAL_COADDR 4
362 #define XFRM_TYPE_REMOTE_COADDR 8
363
364 int (*init_state)(struct xfrm_state *x);
365 void (*destructor)(struct xfrm_state *);
366 int (*input)(struct xfrm_state *, struct sk_buff *skb);
367 int (*output)(struct xfrm_state *, struct sk_buff *pskb);
368 int (*reject)(struct xfrm_state *, struct sk_buff *,
369 const struct flowi *);
370 int (*hdr_offset)(struct xfrm_state *, struct sk_buff *, u8 **);
371 /* Estimate maximal size of result of transformation of a dgram */
372 u32 (*get_mtu)(struct xfrm_state *, int size);
373 };
374
375 int xfrm_register_type(const struct xfrm_type *type, unsigned short family);
376 int xfrm_unregister_type(const struct xfrm_type *type, unsigned short family);
377
378 struct xfrm_mode {
379 /*
380 * Remove encapsulation header.
381 *
382 * The IP header will be moved over the top of the encapsulation
383 * header.
384 *
385 * On entry, the transport header shall point to where the IP header
386 * should be and the network header shall be set to where the IP
387 * header currently is. skb->data shall point to the start of the
388 * payload.
389 */
390 int (*input2)(struct xfrm_state *x, struct sk_buff *skb);
391
392 /*
393 * This is the actual input entry point.
394 *
395 * For transport mode and equivalent this would be identical to
396 * input2 (which does not need to be set). While tunnel mode
397 * and equivalent would set this to the tunnel encapsulation function
398 * xfrm4_prepare_input that would in turn call input2.
399 */
400 int (*input)(struct xfrm_state *x, struct sk_buff *skb);
401
402 /*
403 * Add encapsulation header.
404 *
405 * On exit, the transport header will be set to the start of the
406 * encapsulation header to be filled in by x->type->output and
407 * the mac header will be set to the nextheader (protocol for
408 * IPv4) field of the extension header directly preceding the
409 * encapsulation header, or in its absence, that of the top IP
410 * header. The value of the network header will always point
411 * to the top IP header while skb->data will point to the payload.
412 */
413 int (*output2)(struct xfrm_state *x,struct sk_buff *skb);
414
415 /*
416 * This is the actual output entry point.
417 *
418 * For transport mode and equivalent this would be identical to
419 * output2 (which does not need to be set). While tunnel mode
420 * and equivalent would set this to a tunnel encapsulation function
421 * (xfrm4_prepare_output or xfrm6_prepare_output) that would in turn
422 * call output2.
423 */
424 int (*output)(struct xfrm_state *x, struct sk_buff *skb);
425
426 struct xfrm_state_afinfo *afinfo;
427 struct module *owner;
428 unsigned int encap;
429 int flags;
430 };
431
432 /* Flags for xfrm_mode. */
433 enum {
434 XFRM_MODE_FLAG_TUNNEL = 1,
435 };
436
437 int xfrm_register_mode(struct xfrm_mode *mode, int family);
438 int xfrm_unregister_mode(struct xfrm_mode *mode, int family);
439
440 static inline int xfrm_af2proto(unsigned int family)
441 {
442 switch(family) {
443 case AF_INET:
444 return IPPROTO_IPIP;
445 case AF_INET6:
446 return IPPROTO_IPV6;
447 default:
448 return 0;
449 }
450 }
451
452 static inline struct xfrm_mode *xfrm_ip2inner_mode(struct xfrm_state *x, int ipproto)
453 {
454 if ((ipproto == IPPROTO_IPIP && x->props.family == AF_INET) ||
455 (ipproto == IPPROTO_IPV6 && x->props.family == AF_INET6))
456 return x->inner_mode;
457 else
458 return x->inner_mode_iaf;
459 }
460
461 struct xfrm_tmpl {
462 /* id in template is interpreted as:
463 * daddr - destination of tunnel, may be zero for transport mode.
464 * spi - zero to acquire spi. Not zero if spi is static, then
465 * daddr must be fixed too.
466 * proto - AH/ESP/IPCOMP
467 */
468 struct xfrm_id id;
469
470 /* Source address of tunnel. Ignored, if it is not a tunnel. */
471 xfrm_address_t saddr;
472
473 unsigned short encap_family;
474
475 u32 reqid;
476
477 /* Mode: transport, tunnel etc. */
478 u8 mode;
479
480 /* Sharing mode: unique, this session only, this user only etc. */
481 u8 share;
482
483 /* May skip this transfomration if no SA is found */
484 u8 optional;
485
486 /* Skip aalgos/ealgos/calgos checks. */
487 u8 allalgs;
488
489 /* Bit mask of algos allowed for acquisition */
490 u32 aalgos;
491 u32 ealgos;
492 u32 calgos;
493 };
494
495 #define XFRM_MAX_DEPTH 6
496
497 struct xfrm_policy_walk_entry {
498 struct list_head all;
499 u8 dead;
500 };
501
502 struct xfrm_policy_walk {
503 struct xfrm_policy_walk_entry walk;
504 u8 type;
505 u32 seq;
506 };
507
508 struct xfrm_policy_queue {
509 struct sk_buff_head hold_queue;
510 struct timer_list hold_timer;
511 unsigned long timeout;
512 };
513
514 struct xfrm_policy {
515 #ifdef CONFIG_NET_NS
516 struct net *xp_net;
517 #endif
518 struct hlist_node bydst;
519 struct hlist_node byidx;
520
521 /* This lock only affects elements except for entry. */
522 rwlock_t lock;
523 atomic_t refcnt;
524 struct timer_list timer;
525
526 struct flow_cache_object flo;
527 atomic_t genid;
528 u32 priority;
529 u32 index;
530 struct xfrm_mark mark;
531 struct xfrm_selector selector;
532 struct xfrm_lifetime_cfg lft;
533 struct xfrm_lifetime_cur curlft;
534 struct xfrm_policy_walk_entry walk;
535 struct xfrm_policy_queue polq;
536 u8 type;
537 u8 action;
538 u8 flags;
539 u8 xfrm_nr;
540 u16 family;
541 struct xfrm_sec_ctx *security;
542 struct xfrm_tmpl xfrm_vec[XFRM_MAX_DEPTH];
543 };
544
545 static inline struct net *xp_net(const struct xfrm_policy *xp)
546 {
547 return read_pnet(&xp->xp_net);
548 }
549
550 struct xfrm_kmaddress {
551 xfrm_address_t local;
552 xfrm_address_t remote;
553 u32 reserved;
554 u16 family;
555 };
556
557 struct xfrm_migrate {
558 xfrm_address_t old_daddr;
559 xfrm_address_t old_saddr;
560 xfrm_address_t new_daddr;
561 xfrm_address_t new_saddr;
562 u8 proto;
563 u8 mode;
564 u16 reserved;
565 u32 reqid;
566 u16 old_family;
567 u16 new_family;
568 };
569
570 #define XFRM_KM_TIMEOUT 30
571 /* what happened */
572 #define XFRM_REPLAY_UPDATE XFRM_AE_CR
573 #define XFRM_REPLAY_TIMEOUT XFRM_AE_CE
574
575 /* default aevent timeout in units of 100ms */
576 #define XFRM_AE_ETIME 10
577 /* Async Event timer multiplier */
578 #define XFRM_AE_ETH_M 10
579 /* default seq threshold size */
580 #define XFRM_AE_SEQT_SIZE 2
581
582 struct xfrm_mgr {
583 struct list_head list;
584 char *id;
585 int (*notify)(struct xfrm_state *x, const struct km_event *c);
586 int (*acquire)(struct xfrm_state *x, struct xfrm_tmpl *, struct xfrm_policy *xp);
587 struct xfrm_policy *(*compile_policy)(struct sock *sk, int opt, u8 *data, int len, int *dir);
588 int (*new_mapping)(struct xfrm_state *x, xfrm_address_t *ipaddr, __be16 sport);
589 int (*notify_policy)(struct xfrm_policy *x, int dir, const struct km_event *c);
590 int (*report)(struct net *net, u8 proto, struct xfrm_selector *sel, xfrm_address_t *addr);
591 int (*migrate)(const struct xfrm_selector *sel,
592 u8 dir, u8 type,
593 const struct xfrm_migrate *m,
594 int num_bundles,
595 const struct xfrm_kmaddress *k);
596 bool (*is_alive)(const struct km_event *c);
597 };
598
599 int xfrm_register_km(struct xfrm_mgr *km);
600 int xfrm_unregister_km(struct xfrm_mgr *km);
601
602 struct xfrm_tunnel_skb_cb {
603 union {
604 struct inet_skb_parm h4;
605 struct inet6_skb_parm h6;
606 } header;
607
608 union {
609 struct ip_tunnel *ip4;
610 struct ip6_tnl *ip6;
611 } tunnel;
612 };
613
614 #define XFRM_TUNNEL_SKB_CB(__skb) ((struct xfrm_tunnel_skb_cb *)&((__skb)->cb[0]))
615
616 /*
617 * This structure is used for the duration where packets are being
618 * transformed by IPsec. As soon as the packet leaves IPsec the
619 * area beyond the generic IP part may be overwritten.
620 */
621 struct xfrm_skb_cb {
622 struct xfrm_tunnel_skb_cb header;
623
624 /* Sequence number for replay protection. */
625 union {
626 struct {
627 __u32 low;
628 __u32 hi;
629 } output;
630 struct {
631 __be32 low;
632 __be32 hi;
633 } input;
634 } seq;
635 };
636
637 #define XFRM_SKB_CB(__skb) ((struct xfrm_skb_cb *)&((__skb)->cb[0]))
638
639 /*
640 * This structure is used by the afinfo prepare_input/prepare_output functions
641 * to transmit header information to the mode input/output functions.
642 */
643 struct xfrm_mode_skb_cb {
644 struct xfrm_tunnel_skb_cb header;
645
646 /* Copied from header for IPv4, always set to zero and DF for IPv6. */
647 __be16 id;
648 __be16 frag_off;
649
650 /* IP header length (excluding options or extension headers). */
651 u8 ihl;
652
653 /* TOS for IPv4, class for IPv6. */
654 u8 tos;
655
656 /* TTL for IPv4, hop limitfor IPv6. */
657 u8 ttl;
658
659 /* Protocol for IPv4, NH for IPv6. */
660 u8 protocol;
661
662 /* Option length for IPv4, zero for IPv6. */
663 u8 optlen;
664
665 /* Used by IPv6 only, zero for IPv4. */
666 u8 flow_lbl[3];
667 };
668
669 #define XFRM_MODE_SKB_CB(__skb) ((struct xfrm_mode_skb_cb *)&((__skb)->cb[0]))
670
671 /*
672 * This structure is used by the input processing to locate the SPI and
673 * related information.
674 */
675 struct xfrm_spi_skb_cb {
676 struct xfrm_tunnel_skb_cb header;
677
678 unsigned int daddroff;
679 unsigned int family;
680 };
681
682 #define XFRM_SPI_SKB_CB(__skb) ((struct xfrm_spi_skb_cb *)&((__skb)->cb[0]))
683
684 /* Audit Information */
685 struct xfrm_audit {
686 u32 secid;
687 kuid_t loginuid;
688 unsigned int sessionid;
689 };
690
691 #ifdef CONFIG_AUDITSYSCALL
692 static inline struct audit_buffer *xfrm_audit_start(const char *op)
693 {
694 struct audit_buffer *audit_buf = NULL;
695
696 if (audit_enabled == 0)
697 return NULL;
698 audit_buf = audit_log_start(current->audit_context, GFP_ATOMIC,
699 AUDIT_MAC_IPSEC_EVENT);
700 if (audit_buf == NULL)
701 return NULL;
702 audit_log_format(audit_buf, "op=%s", op);
703 return audit_buf;
704 }
705
706 static inline void xfrm_audit_helper_usrinfo(kuid_t auid, unsigned int ses, u32 secid,
707 struct audit_buffer *audit_buf)
708 {
709 char *secctx;
710 u32 secctx_len;
711
712 audit_log_format(audit_buf, " auid=%u ses=%u",
713 from_kuid(&init_user_ns, auid), ses);
714 if (secid != 0 &&
715 security_secid_to_secctx(secid, &secctx, &secctx_len) == 0) {
716 audit_log_format(audit_buf, " subj=%s", secctx);
717 security_release_secctx(secctx, secctx_len);
718 } else
719 audit_log_task_context(audit_buf);
720 }
721
722 void xfrm_audit_policy_add(struct xfrm_policy *xp, int result, kuid_t auid,
723 unsigned int ses, u32 secid);
724 void xfrm_audit_policy_delete(struct xfrm_policy *xp, int result, kuid_t auid,
725 unsigned int ses, u32 secid);
726 void xfrm_audit_state_add(struct xfrm_state *x, int result, kuid_t auid,
727 unsigned int ses, u32 secid);
728 void xfrm_audit_state_delete(struct xfrm_state *x, int result, kuid_t auid,
729 unsigned int ses, u32 secid);
730 void xfrm_audit_state_replay_overflow(struct xfrm_state *x,
731 struct sk_buff *skb);
732 void xfrm_audit_state_replay(struct xfrm_state *x, struct sk_buff *skb,
733 __be32 net_seq);
734 void xfrm_audit_state_notfound_simple(struct sk_buff *skb, u16 family);
735 void xfrm_audit_state_notfound(struct sk_buff *skb, u16 family, __be32 net_spi,
736 __be32 net_seq);
737 void xfrm_audit_state_icvfail(struct xfrm_state *x, struct sk_buff *skb,
738 u8 proto);
739 #else
740
741 static inline void xfrm_audit_policy_add(struct xfrm_policy *xp, int result,
742 kuid_t auid, unsigned int ses, u32 secid)
743 {
744 }
745
746 static inline void xfrm_audit_policy_delete(struct xfrm_policy *xp, int result,
747 kuid_t auid, unsigned int ses, u32 secid)
748 {
749 }
750
751 static inline void xfrm_audit_state_add(struct xfrm_state *x, int result,
752 kuid_t auid, unsigned int ses, u32 secid)
753 {
754 }
755
756 static inline void xfrm_audit_state_delete(struct xfrm_state *x, int result,
757 kuid_t auid, unsigned int ses, u32 secid)
758 {
759 }
760
761 static inline void xfrm_audit_state_replay_overflow(struct xfrm_state *x,
762 struct sk_buff *skb)
763 {
764 }
765
766 static inline void xfrm_audit_state_replay(struct xfrm_state *x,
767 struct sk_buff *skb, __be32 net_seq)
768 {
769 }
770
771 static inline void xfrm_audit_state_notfound_simple(struct sk_buff *skb,
772 u16 family)
773 {
774 }
775
776 static inline void xfrm_audit_state_notfound(struct sk_buff *skb, u16 family,
777 __be32 net_spi, __be32 net_seq)
778 {
779 }
780
781 static inline void xfrm_audit_state_icvfail(struct xfrm_state *x,
782 struct sk_buff *skb, u8 proto)
783 {
784 }
785 #endif /* CONFIG_AUDITSYSCALL */
786
787 static inline void xfrm_pol_hold(struct xfrm_policy *policy)
788 {
789 if (likely(policy != NULL))
790 atomic_inc(&policy->refcnt);
791 }
792
793 void xfrm_policy_destroy(struct xfrm_policy *policy);
794
795 static inline void xfrm_pol_put(struct xfrm_policy *policy)
796 {
797 if (atomic_dec_and_test(&policy->refcnt))
798 xfrm_policy_destroy(policy);
799 }
800
801 static inline void xfrm_pols_put(struct xfrm_policy **pols, int npols)
802 {
803 int i;
804 for (i = npols - 1; i >= 0; --i)
805 xfrm_pol_put(pols[i]);
806 }
807
808 void __xfrm_state_destroy(struct xfrm_state *);
809
810 static inline void __xfrm_state_put(struct xfrm_state *x)
811 {
812 atomic_dec(&x->refcnt);
813 }
814
815 static inline void xfrm_state_put(struct xfrm_state *x)
816 {
817 if (atomic_dec_and_test(&x->refcnt))
818 __xfrm_state_destroy(x);
819 }
820
821 static inline void xfrm_state_hold(struct xfrm_state *x)
822 {
823 atomic_inc(&x->refcnt);
824 }
825
826 static inline bool addr_match(const void *token1, const void *token2,
827 int prefixlen)
828 {
829 const __be32 *a1 = token1;
830 const __be32 *a2 = token2;
831 int pdw;
832 int pbi;
833
834 pdw = prefixlen >> 5; /* num of whole u32 in prefix */
835 pbi = prefixlen & 0x1f; /* num of bits in incomplete u32 in prefix */
836
837 if (pdw)
838 if (memcmp(a1, a2, pdw << 2))
839 return false;
840
841 if (pbi) {
842 __be32 mask;
843
844 mask = htonl((0xffffffff) << (32 - pbi));
845
846 if ((a1[pdw] ^ a2[pdw]) & mask)
847 return false;
848 }
849
850 return true;
851 }
852
853 static inline bool addr4_match(__be32 a1, __be32 a2, u8 prefixlen)
854 {
855 /* C99 6.5.7 (3): u32 << 32 is undefined behaviour */
856 if (prefixlen == 0)
857 return true;
858 return !((a1 ^ a2) & htonl(0xFFFFFFFFu << (32 - prefixlen)));
859 }
860
861 static __inline__
862 __be16 xfrm_flowi_sport(const struct flowi *fl, const union flowi_uli *uli)
863 {
864 __be16 port;
865 switch(fl->flowi_proto) {
866 case IPPROTO_TCP:
867 case IPPROTO_UDP:
868 case IPPROTO_UDPLITE:
869 case IPPROTO_SCTP:
870 port = uli->ports.sport;
871 break;
872 case IPPROTO_ICMP:
873 case IPPROTO_ICMPV6:
874 port = htons(uli->icmpt.type);
875 break;
876 case IPPROTO_MH:
877 port = htons(uli->mht.type);
878 break;
879 case IPPROTO_GRE:
880 port = htons(ntohl(uli->gre_key) >> 16);
881 break;
882 default:
883 port = 0; /*XXX*/
884 }
885 return port;
886 }
887
888 static __inline__
889 __be16 xfrm_flowi_dport(const struct flowi *fl, const union flowi_uli *uli)
890 {
891 __be16 port;
892 switch(fl->flowi_proto) {
893 case IPPROTO_TCP:
894 case IPPROTO_UDP:
895 case IPPROTO_UDPLITE:
896 case IPPROTO_SCTP:
897 port = uli->ports.dport;
898 break;
899 case IPPROTO_ICMP:
900 case IPPROTO_ICMPV6:
901 port = htons(uli->icmpt.code);
902 break;
903 case IPPROTO_GRE:
904 port = htons(ntohl(uli->gre_key) & 0xffff);
905 break;
906 default:
907 port = 0; /*XXX*/
908 }
909 return port;
910 }
911
912 bool xfrm_selector_match(const struct xfrm_selector *sel,
913 const struct flowi *fl, unsigned short family);
914
915 #ifdef CONFIG_SECURITY_NETWORK_XFRM
916 /* If neither has a context --> match
917 * Otherwise, both must have a context and the sids, doi, alg must match
918 */
919 static inline bool xfrm_sec_ctx_match(struct xfrm_sec_ctx *s1, struct xfrm_sec_ctx *s2)
920 {
921 return ((!s1 && !s2) ||
922 (s1 && s2 &&
923 (s1->ctx_sid == s2->ctx_sid) &&
924 (s1->ctx_doi == s2->ctx_doi) &&
925 (s1->ctx_alg == s2->ctx_alg)));
926 }
927 #else
928 static inline bool xfrm_sec_ctx_match(struct xfrm_sec_ctx *s1, struct xfrm_sec_ctx *s2)
929 {
930 return true;
931 }
932 #endif
933
934 /* A struct encoding bundle of transformations to apply to some set of flow.
935 *
936 * dst->child points to the next element of bundle.
937 * dst->xfrm points to an instanse of transformer.
938 *
939 * Due to unfortunate limitations of current routing cache, which we
940 * have no time to fix, it mirrors struct rtable and bound to the same
941 * routing key, including saddr,daddr. However, we can have many of
942 * bundles differing by session id. All the bundles grow from a parent
943 * policy rule.
944 */
945 struct xfrm_dst {
946 union {
947 struct dst_entry dst;
948 struct rtable rt;
949 struct rt6_info rt6;
950 } u;
951 struct dst_entry *route;
952 struct flow_cache_object flo;
953 struct xfrm_policy *pols[XFRM_POLICY_TYPE_MAX];
954 int num_pols, num_xfrms;
955 #ifdef CONFIG_XFRM_SUB_POLICY
956 struct flowi *origin;
957 struct xfrm_selector *partner;
958 #endif
959 u32 xfrm_genid;
960 u32 policy_genid;
961 u32 route_mtu_cached;
962 u32 child_mtu_cached;
963 u32 route_cookie;
964 u32 path_cookie;
965 };
966
967 #ifdef CONFIG_XFRM
968 static inline void xfrm_dst_destroy(struct xfrm_dst *xdst)
969 {
970 xfrm_pols_put(xdst->pols, xdst->num_pols);
971 dst_release(xdst->route);
972 if (likely(xdst->u.dst.xfrm))
973 xfrm_state_put(xdst->u.dst.xfrm);
974 #ifdef CONFIG_XFRM_SUB_POLICY
975 kfree(xdst->origin);
976 xdst->origin = NULL;
977 kfree(xdst->partner);
978 xdst->partner = NULL;
979 #endif
980 }
981 #endif
982
983 void xfrm_dst_ifdown(struct dst_entry *dst, struct net_device *dev);
984
985 struct sec_path {
986 atomic_t refcnt;
987 int len;
988 struct xfrm_state *xvec[XFRM_MAX_DEPTH];
989 };
990
991 static inline int secpath_exists(struct sk_buff *skb)
992 {
993 #ifdef CONFIG_XFRM
994 return skb->sp != NULL;
995 #else
996 return 0;
997 #endif
998 }
999
1000 static inline struct sec_path *
1001 secpath_get(struct sec_path *sp)
1002 {
1003 if (sp)
1004 atomic_inc(&sp->refcnt);
1005 return sp;
1006 }
1007
1008 void __secpath_destroy(struct sec_path *sp);
1009
1010 static inline void
1011 secpath_put(struct sec_path *sp)
1012 {
1013 if (sp && atomic_dec_and_test(&sp->refcnt))
1014 __secpath_destroy(sp);
1015 }
1016
1017 struct sec_path *secpath_dup(struct sec_path *src);
1018
1019 static inline void
1020 secpath_reset(struct sk_buff *skb)
1021 {
1022 #ifdef CONFIG_XFRM
1023 secpath_put(skb->sp);
1024 skb->sp = NULL;
1025 #endif
1026 }
1027
1028 static inline int
1029 xfrm_addr_any(const xfrm_address_t *addr, unsigned short family)
1030 {
1031 switch (family) {
1032 case AF_INET:
1033 return addr->a4 == 0;
1034 case AF_INET6:
1035 return ipv6_addr_any((struct in6_addr *)&addr->a6);
1036 }
1037 return 0;
1038 }
1039
1040 static inline int
1041 __xfrm4_state_addr_cmp(const struct xfrm_tmpl *tmpl, const struct xfrm_state *x)
1042 {
1043 return (tmpl->saddr.a4 &&
1044 tmpl->saddr.a4 != x->props.saddr.a4);
1045 }
1046
1047 static inline int
1048 __xfrm6_state_addr_cmp(const struct xfrm_tmpl *tmpl, const struct xfrm_state *x)
1049 {
1050 return (!ipv6_addr_any((struct in6_addr*)&tmpl->saddr) &&
1051 !ipv6_addr_equal((struct in6_addr *)&tmpl->saddr, (struct in6_addr*)&x->props.saddr));
1052 }
1053
1054 static inline int
1055 xfrm_state_addr_cmp(const struct xfrm_tmpl *tmpl, const struct xfrm_state *x, unsigned short family)
1056 {
1057 switch (family) {
1058 case AF_INET:
1059 return __xfrm4_state_addr_cmp(tmpl, x);
1060 case AF_INET6:
1061 return __xfrm6_state_addr_cmp(tmpl, x);
1062 }
1063 return !0;
1064 }
1065
1066 #ifdef CONFIG_XFRM
1067 int __xfrm_policy_check(struct sock *, int dir, struct sk_buff *skb,
1068 unsigned short family);
1069
1070 static inline int __xfrm_policy_check2(struct sock *sk, int dir,
1071 struct sk_buff *skb,
1072 unsigned int family, int reverse)
1073 {
1074 struct net *net = dev_net(skb->dev);
1075 int ndir = dir | (reverse ? XFRM_POLICY_MASK + 1 : 0);
1076
1077 if (sk && sk->sk_policy[XFRM_POLICY_IN])
1078 return __xfrm_policy_check(sk, ndir, skb, family);
1079
1080 return (!net->xfrm.policy_count[dir] && !skb->sp) ||
1081 (skb_dst(skb)->flags & DST_NOPOLICY) ||
1082 __xfrm_policy_check(sk, ndir, skb, family);
1083 }
1084
1085 static inline int xfrm_policy_check(struct sock *sk, int dir, struct sk_buff *skb, unsigned short family)
1086 {
1087 return __xfrm_policy_check2(sk, dir, skb, family, 0);
1088 }
1089
1090 static inline int xfrm4_policy_check(struct sock *sk, int dir, struct sk_buff *skb)
1091 {
1092 return xfrm_policy_check(sk, dir, skb, AF_INET);
1093 }
1094
1095 static inline int xfrm6_policy_check(struct sock *sk, int dir, struct sk_buff *skb)
1096 {
1097 return xfrm_policy_check(sk, dir, skb, AF_INET6);
1098 }
1099
1100 static inline int xfrm4_policy_check_reverse(struct sock *sk, int dir,
1101 struct sk_buff *skb)
1102 {
1103 return __xfrm_policy_check2(sk, dir, skb, AF_INET, 1);
1104 }
1105
1106 static inline int xfrm6_policy_check_reverse(struct sock *sk, int dir,
1107 struct sk_buff *skb)
1108 {
1109 return __xfrm_policy_check2(sk, dir, skb, AF_INET6, 1);
1110 }
1111
1112 int __xfrm_decode_session(struct sk_buff *skb, struct flowi *fl,
1113 unsigned int family, int reverse);
1114
1115 static inline int xfrm_decode_session(struct sk_buff *skb, struct flowi *fl,
1116 unsigned int family)
1117 {
1118 return __xfrm_decode_session(skb, fl, family, 0);
1119 }
1120
1121 static inline int xfrm_decode_session_reverse(struct sk_buff *skb,
1122 struct flowi *fl,
1123 unsigned int family)
1124 {
1125 return __xfrm_decode_session(skb, fl, family, 1);
1126 }
1127
1128 int __xfrm_route_forward(struct sk_buff *skb, unsigned short family);
1129
1130 static inline int xfrm_route_forward(struct sk_buff *skb, unsigned short family)
1131 {
1132 struct net *net = dev_net(skb->dev);
1133
1134 return !net->xfrm.policy_count[XFRM_POLICY_OUT] ||
1135 (skb_dst(skb)->flags & DST_NOXFRM) ||
1136 __xfrm_route_forward(skb, family);
1137 }
1138
1139 static inline int xfrm4_route_forward(struct sk_buff *skb)
1140 {
1141 return xfrm_route_forward(skb, AF_INET);
1142 }
1143
1144 static inline int xfrm6_route_forward(struct sk_buff *skb)
1145 {
1146 return xfrm_route_forward(skb, AF_INET6);
1147 }
1148
1149 int __xfrm_sk_clone_policy(struct sock *sk);
1150
1151 static inline int xfrm_sk_clone_policy(struct sock *sk)
1152 {
1153 if (unlikely(sk->sk_policy[0] || sk->sk_policy[1]))
1154 return __xfrm_sk_clone_policy(sk);
1155 return 0;
1156 }
1157
1158 int xfrm_policy_delete(struct xfrm_policy *pol, int dir);
1159
1160 static inline void xfrm_sk_free_policy(struct sock *sk)
1161 {
1162 if (unlikely(sk->sk_policy[0] != NULL)) {
1163 xfrm_policy_delete(sk->sk_policy[0], XFRM_POLICY_MAX);
1164 sk->sk_policy[0] = NULL;
1165 }
1166 if (unlikely(sk->sk_policy[1] != NULL)) {
1167 xfrm_policy_delete(sk->sk_policy[1], XFRM_POLICY_MAX+1);
1168 sk->sk_policy[1] = NULL;
1169 }
1170 }
1171
1172 void xfrm_garbage_collect(struct net *net);
1173
1174 #else
1175
1176 static inline void xfrm_sk_free_policy(struct sock *sk) {}
1177 static inline int xfrm_sk_clone_policy(struct sock *sk) { return 0; }
1178 static inline int xfrm6_route_forward(struct sk_buff *skb) { return 1; }
1179 static inline int xfrm4_route_forward(struct sk_buff *skb) { return 1; }
1180 static inline int xfrm6_policy_check(struct sock *sk, int dir, struct sk_buff *skb)
1181 {
1182 return 1;
1183 }
1184 static inline int xfrm4_policy_check(struct sock *sk, int dir, struct sk_buff *skb)
1185 {
1186 return 1;
1187 }
1188 static inline int xfrm_policy_check(struct sock *sk, int dir, struct sk_buff *skb, unsigned short family)
1189 {
1190 return 1;
1191 }
1192 static inline int xfrm_decode_session_reverse(struct sk_buff *skb,
1193 struct flowi *fl,
1194 unsigned int family)
1195 {
1196 return -ENOSYS;
1197 }
1198 static inline int xfrm4_policy_check_reverse(struct sock *sk, int dir,
1199 struct sk_buff *skb)
1200 {
1201 return 1;
1202 }
1203 static inline int xfrm6_policy_check_reverse(struct sock *sk, int dir,
1204 struct sk_buff *skb)
1205 {
1206 return 1;
1207 }
1208 static inline void xfrm_garbage_collect(struct net *net)
1209 {
1210 }
1211 #endif
1212
1213 static __inline__
1214 xfrm_address_t *xfrm_flowi_daddr(const struct flowi *fl, unsigned short family)
1215 {
1216 switch (family){
1217 case AF_INET:
1218 return (xfrm_address_t *)&fl->u.ip4.daddr;
1219 case AF_INET6:
1220 return (xfrm_address_t *)&fl->u.ip6.daddr;
1221 }
1222 return NULL;
1223 }
1224
1225 static __inline__
1226 xfrm_address_t *xfrm_flowi_saddr(const struct flowi *fl, unsigned short family)
1227 {
1228 switch (family){
1229 case AF_INET:
1230 return (xfrm_address_t *)&fl->u.ip4.saddr;
1231 case AF_INET6:
1232 return (xfrm_address_t *)&fl->u.ip6.saddr;
1233 }
1234 return NULL;
1235 }
1236
1237 static __inline__
1238 void xfrm_flowi_addr_get(const struct flowi *fl,
1239 xfrm_address_t *saddr, xfrm_address_t *daddr,
1240 unsigned short family)
1241 {
1242 switch(family) {
1243 case AF_INET:
1244 memcpy(&saddr->a4, &fl->u.ip4.saddr, sizeof(saddr->a4));
1245 memcpy(&daddr->a4, &fl->u.ip4.daddr, sizeof(daddr->a4));
1246 break;
1247 case AF_INET6:
1248 *(struct in6_addr *)saddr->a6 = fl->u.ip6.saddr;
1249 *(struct in6_addr *)daddr->a6 = fl->u.ip6.daddr;
1250 break;
1251 }
1252 }
1253
1254 static __inline__ int
1255 __xfrm4_state_addr_check(const struct xfrm_state *x,
1256 const xfrm_address_t *daddr, const xfrm_address_t *saddr)
1257 {
1258 if (daddr->a4 == x->id.daddr.a4 &&
1259 (saddr->a4 == x->props.saddr.a4 || !saddr->a4 || !x->props.saddr.a4))
1260 return 1;
1261 return 0;
1262 }
1263
1264 static __inline__ int
1265 __xfrm6_state_addr_check(const struct xfrm_state *x,
1266 const xfrm_address_t *daddr, const xfrm_address_t *saddr)
1267 {
1268 if (ipv6_addr_equal((struct in6_addr *)daddr, (struct in6_addr *)&x->id.daddr) &&
1269 (ipv6_addr_equal((struct in6_addr *)saddr, (struct in6_addr *)&x->props.saddr) ||
1270 ipv6_addr_any((struct in6_addr *)saddr) ||
1271 ipv6_addr_any((struct in6_addr *)&x->props.saddr)))
1272 return 1;
1273 return 0;
1274 }
1275
1276 static __inline__ int
1277 xfrm_state_addr_check(const struct xfrm_state *x,
1278 const xfrm_address_t *daddr, const xfrm_address_t *saddr,
1279 unsigned short family)
1280 {
1281 switch (family) {
1282 case AF_INET:
1283 return __xfrm4_state_addr_check(x, daddr, saddr);
1284 case AF_INET6:
1285 return __xfrm6_state_addr_check(x, daddr, saddr);
1286 }
1287 return 0;
1288 }
1289
1290 static __inline__ int
1291 xfrm_state_addr_flow_check(const struct xfrm_state *x, const struct flowi *fl,
1292 unsigned short family)
1293 {
1294 switch (family) {
1295 case AF_INET:
1296 return __xfrm4_state_addr_check(x,
1297 (const xfrm_address_t *)&fl->u.ip4.daddr,
1298 (const xfrm_address_t *)&fl->u.ip4.saddr);
1299 case AF_INET6:
1300 return __xfrm6_state_addr_check(x,
1301 (const xfrm_address_t *)&fl->u.ip6.daddr,
1302 (const xfrm_address_t *)&fl->u.ip6.saddr);
1303 }
1304 return 0;
1305 }
1306
1307 static inline int xfrm_state_kern(const struct xfrm_state *x)
1308 {
1309 return atomic_read(&x->tunnel_users);
1310 }
1311
1312 static inline int xfrm_id_proto_match(u8 proto, u8 userproto)
1313 {
1314 return (!userproto || proto == userproto ||
1315 (userproto == IPSEC_PROTO_ANY && (proto == IPPROTO_AH ||
1316 proto == IPPROTO_ESP ||
1317 proto == IPPROTO_COMP)));
1318 }
1319
1320 /*
1321 * xfrm algorithm information
1322 */
1323 struct xfrm_algo_aead_info {
1324 u16 icv_truncbits;
1325 };
1326
1327 struct xfrm_algo_auth_info {
1328 u16 icv_truncbits;
1329 u16 icv_fullbits;
1330 };
1331
1332 struct xfrm_algo_encr_info {
1333 u16 blockbits;
1334 u16 defkeybits;
1335 };
1336
1337 struct xfrm_algo_comp_info {
1338 u16 threshold;
1339 };
1340
1341 struct xfrm_algo_desc {
1342 char *name;
1343 char *compat;
1344 u8 available:1;
1345 u8 pfkey_supported:1;
1346 union {
1347 struct xfrm_algo_aead_info aead;
1348 struct xfrm_algo_auth_info auth;
1349 struct xfrm_algo_encr_info encr;
1350 struct xfrm_algo_comp_info comp;
1351 } uinfo;
1352 struct sadb_alg desc;
1353 };
1354
1355 /* XFRM protocol handlers. */
1356 struct xfrm4_protocol {
1357 int (*handler)(struct sk_buff *skb);
1358 int (*input_handler)(struct sk_buff *skb, int nexthdr, __be32 spi,
1359 int encap_type);
1360 int (*cb_handler)(struct sk_buff *skb, int err);
1361 int (*err_handler)(struct sk_buff *skb, u32 info);
1362
1363 struct xfrm4_protocol __rcu *next;
1364 int priority;
1365 };
1366
1367 /* XFRM tunnel handlers. */
1368 struct xfrm_tunnel {
1369 int (*handler)(struct sk_buff *skb);
1370 int (*err_handler)(struct sk_buff *skb, u32 info);
1371
1372 struct xfrm_tunnel __rcu *next;
1373 int priority;
1374 };
1375
1376 struct xfrm_tunnel_notifier {
1377 int (*handler)(struct sk_buff *skb);
1378 struct xfrm_tunnel_notifier __rcu *next;
1379 int priority;
1380 };
1381
1382 struct xfrm6_tunnel {
1383 int (*handler)(struct sk_buff *skb);
1384 int (*err_handler)(struct sk_buff *skb, struct inet6_skb_parm *opt,
1385 u8 type, u8 code, int offset, __be32 info);
1386 struct xfrm6_tunnel __rcu *next;
1387 int priority;
1388 };
1389
1390 void xfrm_init(void);
1391 void xfrm4_init(void);
1392 int xfrm_state_init(struct net *net);
1393 void xfrm_state_fini(struct net *net);
1394 void xfrm4_state_init(void);
1395 #ifdef CONFIG_XFRM
1396 int xfrm6_init(void);
1397 void xfrm6_fini(void);
1398 int xfrm6_state_init(void);
1399 void xfrm6_state_fini(void);
1400 #else
1401 static inline int xfrm6_init(void)
1402 {
1403 return 0;
1404 }
1405 static inline void xfrm6_fini(void)
1406 {
1407 ;
1408 }
1409 #endif
1410
1411 #ifdef CONFIG_XFRM_STATISTICS
1412 int xfrm_proc_init(struct net *net);
1413 void xfrm_proc_fini(struct net *net);
1414 #endif
1415
1416 int xfrm_sysctl_init(struct net *net);
1417 #ifdef CONFIG_SYSCTL
1418 void xfrm_sysctl_fini(struct net *net);
1419 #else
1420 static inline void xfrm_sysctl_fini(struct net *net)
1421 {
1422 }
1423 #endif
1424
1425 void xfrm_state_walk_init(struct xfrm_state_walk *walk, u8 proto,
1426 struct xfrm_address_filter *filter);
1427 int xfrm_state_walk(struct net *net, struct xfrm_state_walk *walk,
1428 int (*func)(struct xfrm_state *, int, void*), void *);
1429 void xfrm_state_walk_done(struct xfrm_state_walk *walk, struct net *net);
1430 struct xfrm_state *xfrm_state_alloc(struct net *net);
1431 struct xfrm_state *xfrm_state_find(const xfrm_address_t *daddr,
1432 const xfrm_address_t *saddr,
1433 const struct flowi *fl,
1434 struct xfrm_tmpl *tmpl,
1435 struct xfrm_policy *pol, int *err,
1436 unsigned short family);
1437 struct xfrm_state *xfrm_stateonly_find(struct net *net, u32 mark,
1438 xfrm_address_t *daddr,
1439 xfrm_address_t *saddr,
1440 unsigned short family,
1441 u8 mode, u8 proto, u32 reqid);
1442 struct xfrm_state *xfrm_state_lookup_byspi(struct net *net, __be32 spi,
1443 unsigned short family);
1444 int xfrm_state_check_expire(struct xfrm_state *x);
1445 void xfrm_state_insert(struct xfrm_state *x);
1446 int xfrm_state_add(struct xfrm_state *x);
1447 int xfrm_state_update(struct xfrm_state *x);
1448 struct xfrm_state *xfrm_state_lookup(struct net *net, u32 mark,
1449 const xfrm_address_t *daddr, __be32 spi,
1450 u8 proto, unsigned short family);
1451 struct xfrm_state *xfrm_state_lookup_byaddr(struct net *net, u32 mark,
1452 const xfrm_address_t *daddr,
1453 const xfrm_address_t *saddr,
1454 u8 proto,
1455 unsigned short family);
1456 #ifdef CONFIG_XFRM_SUB_POLICY
1457 int xfrm_tmpl_sort(struct xfrm_tmpl **dst, struct xfrm_tmpl **src, int n,
1458 unsigned short family, struct net *net);
1459 int xfrm_state_sort(struct xfrm_state **dst, struct xfrm_state **src, int n,
1460 unsigned short family);
1461 #else
1462 static inline int xfrm_tmpl_sort(struct xfrm_tmpl **dst, struct xfrm_tmpl **src,
1463 int n, unsigned short family, struct net *net)
1464 {
1465 return -ENOSYS;
1466 }
1467
1468 static inline int xfrm_state_sort(struct xfrm_state **dst, struct xfrm_state **src,
1469 int n, unsigned short family)
1470 {
1471 return -ENOSYS;
1472 }
1473 #endif
1474
1475 struct xfrmk_sadinfo {
1476 u32 sadhcnt; /* current hash bkts */
1477 u32 sadhmcnt; /* max allowed hash bkts */
1478 u32 sadcnt; /* current running count */
1479 };
1480
1481 struct xfrmk_spdinfo {
1482 u32 incnt;
1483 u32 outcnt;
1484 u32 fwdcnt;
1485 u32 inscnt;
1486 u32 outscnt;
1487 u32 fwdscnt;
1488 u32 spdhcnt;
1489 u32 spdhmcnt;
1490 };
1491
1492 struct xfrm_state *xfrm_find_acq_byseq(struct net *net, u32 mark, u32 seq);
1493 int xfrm_state_delete(struct xfrm_state *x);
1494 int xfrm_state_flush(struct net *net, u8 proto, struct xfrm_audit *audit_info);
1495 void xfrm_sad_getinfo(struct net *net, struct xfrmk_sadinfo *si);
1496 void xfrm_spd_getinfo(struct net *net, struct xfrmk_spdinfo *si);
1497 u32 xfrm_replay_seqhi(struct xfrm_state *x, __be32 net_seq);
1498 int xfrm_init_replay(struct xfrm_state *x);
1499 int xfrm_state_mtu(struct xfrm_state *x, int mtu);
1500 int __xfrm_init_state(struct xfrm_state *x, bool init_replay);
1501 int xfrm_init_state(struct xfrm_state *x);
1502 int xfrm_prepare_input(struct xfrm_state *x, struct sk_buff *skb);
1503 int xfrm_input(struct sk_buff *skb, int nexthdr, __be32 spi, int encap_type);
1504 int xfrm_input_resume(struct sk_buff *skb, int nexthdr);
1505 int xfrm_output_resume(struct sk_buff *skb, int err);
1506 int xfrm_output(struct sk_buff *skb);
1507 int xfrm_inner_extract_output(struct xfrm_state *x, struct sk_buff *skb);
1508 void xfrm_local_error(struct sk_buff *skb, int mtu);
1509 int xfrm4_extract_header(struct sk_buff *skb);
1510 int xfrm4_extract_input(struct xfrm_state *x, struct sk_buff *skb);
1511 int xfrm4_rcv_encap(struct sk_buff *skb, int nexthdr, __be32 spi,
1512 int encap_type);
1513 int xfrm4_transport_finish(struct sk_buff *skb, int async);
1514 int xfrm4_rcv(struct sk_buff *skb);
1515
1516 static inline int xfrm4_rcv_spi(struct sk_buff *skb, int nexthdr, __be32 spi)
1517 {
1518 XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip4 = NULL;
1519 XFRM_SPI_SKB_CB(skb)->family = AF_INET;
1520 XFRM_SPI_SKB_CB(skb)->daddroff = offsetof(struct iphdr, daddr);
1521 return xfrm_input(skb, nexthdr, spi, 0);
1522 }
1523
1524 int xfrm4_extract_output(struct xfrm_state *x, struct sk_buff *skb);
1525 int xfrm4_prepare_output(struct xfrm_state *x, struct sk_buff *skb);
1526 int xfrm4_output(struct sk_buff *skb);
1527 int xfrm4_output_finish(struct sk_buff *skb);
1528 int xfrm4_rcv_cb(struct sk_buff *skb, u8 protocol, int err);
1529 int xfrm4_protocol_register(struct xfrm4_protocol *handler, unsigned char protocol);
1530 int xfrm4_protocol_deregister(struct xfrm4_protocol *handler, unsigned char protocol);
1531 int xfrm4_tunnel_register(struct xfrm_tunnel *handler, unsigned short family);
1532 int xfrm4_tunnel_deregister(struct xfrm_tunnel *handler, unsigned short family);
1533 void xfrm4_local_error(struct sk_buff *skb, u32 mtu);
1534 int xfrm6_mode_tunnel_input_register(struct xfrm_tunnel_notifier *handler);
1535 int xfrm6_mode_tunnel_input_deregister(struct xfrm_tunnel_notifier *handler);
1536 int xfrm6_extract_header(struct sk_buff *skb);
1537 int xfrm6_extract_input(struct xfrm_state *x, struct sk_buff *skb);
1538 int xfrm6_rcv_spi(struct sk_buff *skb, int nexthdr, __be32 spi);
1539 int xfrm6_transport_finish(struct sk_buff *skb, int async);
1540 int xfrm6_rcv(struct sk_buff *skb);
1541 int xfrm6_input_addr(struct sk_buff *skb, xfrm_address_t *daddr,
1542 xfrm_address_t *saddr, u8 proto);
1543 void xfrm6_local_error(struct sk_buff *skb, u32 mtu);
1544 int xfrm6_tunnel_register(struct xfrm6_tunnel *handler, unsigned short family);
1545 int xfrm6_tunnel_deregister(struct xfrm6_tunnel *handler, unsigned short family);
1546 __be32 xfrm6_tunnel_alloc_spi(struct net *net, xfrm_address_t *saddr);
1547 __be32 xfrm6_tunnel_spi_lookup(struct net *net, const xfrm_address_t *saddr);
1548 int xfrm6_extract_output(struct xfrm_state *x, struct sk_buff *skb);
1549 int xfrm6_prepare_output(struct xfrm_state *x, struct sk_buff *skb);
1550 int xfrm6_output(struct sk_buff *skb);
1551 int xfrm6_output_finish(struct sk_buff *skb);
1552 int xfrm6_find_1stfragopt(struct xfrm_state *x, struct sk_buff *skb,
1553 u8 **prevhdr);
1554
1555 #ifdef CONFIG_XFRM
1556 int xfrm4_udp_encap_rcv(struct sock *sk, struct sk_buff *skb);
1557 int xfrm_user_policy(struct sock *sk, int optname,
1558 u8 __user *optval, int optlen);
1559 #else
1560 static inline int xfrm_user_policy(struct sock *sk, int optname, u8 __user *optval, int optlen)
1561 {
1562 return -ENOPROTOOPT;
1563 }
1564
1565 static inline int xfrm4_udp_encap_rcv(struct sock *sk, struct sk_buff *skb)
1566 {
1567 /* should not happen */
1568 kfree_skb(skb);
1569 return 0;
1570 }
1571 #endif
1572
1573 struct xfrm_policy *xfrm_policy_alloc(struct net *net, gfp_t gfp);
1574
1575 void xfrm_policy_walk_init(struct xfrm_policy_walk *walk, u8 type);
1576 int xfrm_policy_walk(struct net *net, struct xfrm_policy_walk *walk,
1577 int (*func)(struct xfrm_policy *, int, int, void*),
1578 void *);
1579 void xfrm_policy_walk_done(struct xfrm_policy_walk *walk, struct net *net);
1580 int xfrm_policy_insert(int dir, struct xfrm_policy *policy, int excl);
1581 struct xfrm_policy *xfrm_policy_bysel_ctx(struct net *net, u32 mark,
1582 u8 type, int dir,
1583 struct xfrm_selector *sel,
1584 struct xfrm_sec_ctx *ctx, int delete,
1585 int *err);
1586 struct xfrm_policy *xfrm_policy_byid(struct net *net, u32 mark, u8, int dir,
1587 u32 id, int delete, int *err);
1588 int xfrm_policy_flush(struct net *net, u8 type, struct xfrm_audit *audit_info);
1589 u32 xfrm_get_acqseq(void);
1590 int verify_spi_info(u8 proto, u32 min, u32 max);
1591 int xfrm_alloc_spi(struct xfrm_state *x, u32 minspi, u32 maxspi);
1592 struct xfrm_state *xfrm_find_acq(struct net *net, const struct xfrm_mark *mark,
1593 u8 mode, u32 reqid, u8 proto,
1594 const xfrm_address_t *daddr,
1595 const xfrm_address_t *saddr, int create,
1596 unsigned short family);
1597 int xfrm_sk_policy_insert(struct sock *sk, int dir, struct xfrm_policy *pol);
1598
1599 #ifdef CONFIG_XFRM_MIGRATE
1600 int km_migrate(const struct xfrm_selector *sel, u8 dir, u8 type,
1601 const struct xfrm_migrate *m, int num_bundles,
1602 const struct xfrm_kmaddress *k);
1603 struct xfrm_state *xfrm_migrate_state_find(struct xfrm_migrate *m, struct net *net);
1604 struct xfrm_state *xfrm_state_migrate(struct xfrm_state *x,
1605 struct xfrm_migrate *m);
1606 int xfrm_migrate(const struct xfrm_selector *sel, u8 dir, u8 type,
1607 struct xfrm_migrate *m, int num_bundles,
1608 struct xfrm_kmaddress *k, struct net *net);
1609 #endif
1610
1611 int km_new_mapping(struct xfrm_state *x, xfrm_address_t *ipaddr, __be16 sport);
1612 void km_policy_expired(struct xfrm_policy *pol, int dir, int hard, u32 portid);
1613 int km_report(struct net *net, u8 proto, struct xfrm_selector *sel,
1614 xfrm_address_t *addr);
1615
1616 void xfrm_input_init(void);
1617 int xfrm_parse_spi(struct sk_buff *skb, u8 nexthdr, __be32 *spi, __be32 *seq);
1618
1619 void xfrm_probe_algs(void);
1620 int xfrm_count_pfkey_auth_supported(void);
1621 int xfrm_count_pfkey_enc_supported(void);
1622 struct xfrm_algo_desc *xfrm_aalg_get_byidx(unsigned int idx);
1623 struct xfrm_algo_desc *xfrm_ealg_get_byidx(unsigned int idx);
1624 struct xfrm_algo_desc *xfrm_aalg_get_byid(int alg_id);
1625 struct xfrm_algo_desc *xfrm_ealg_get_byid(int alg_id);
1626 struct xfrm_algo_desc *xfrm_calg_get_byid(int alg_id);
1627 struct xfrm_algo_desc *xfrm_aalg_get_byname(const char *name, int probe);
1628 struct xfrm_algo_desc *xfrm_ealg_get_byname(const char *name, int probe);
1629 struct xfrm_algo_desc *xfrm_calg_get_byname(const char *name, int probe);
1630 struct xfrm_algo_desc *xfrm_aead_get_byname(const char *name, int icv_len,
1631 int probe);
1632
1633 static inline bool xfrm6_addr_equal(const xfrm_address_t *a,
1634 const xfrm_address_t *b)
1635 {
1636 return ipv6_addr_equal((const struct in6_addr *)a,
1637 (const struct in6_addr *)b);
1638 }
1639
1640 static inline bool xfrm_addr_equal(const xfrm_address_t *a,
1641 const xfrm_address_t *b,
1642 sa_family_t family)
1643 {
1644 switch (family) {
1645 default:
1646 case AF_INET:
1647 return ((__force u32)a->a4 ^ (__force u32)b->a4) == 0;
1648 case AF_INET6:
1649 return xfrm6_addr_equal(a, b);
1650 }
1651 }
1652
1653 static inline int xfrm_policy_id2dir(u32 index)
1654 {
1655 return index & 7;
1656 }
1657
1658 #ifdef CONFIG_XFRM
1659 static inline int xfrm_aevent_is_on(struct net *net)
1660 {
1661 struct sock *nlsk;
1662 int ret = 0;
1663
1664 rcu_read_lock();
1665 nlsk = rcu_dereference(net->xfrm.nlsk);
1666 if (nlsk)
1667 ret = netlink_has_listeners(nlsk, XFRMNLGRP_AEVENTS);
1668 rcu_read_unlock();
1669 return ret;
1670 }
1671
1672 static inline int xfrm_acquire_is_on(struct net *net)
1673 {
1674 struct sock *nlsk;
1675 int ret = 0;
1676
1677 rcu_read_lock();
1678 nlsk = rcu_dereference(net->xfrm.nlsk);
1679 if (nlsk)
1680 ret = netlink_has_listeners(nlsk, XFRMNLGRP_ACQUIRE);
1681 rcu_read_unlock();
1682
1683 return ret;
1684 }
1685 #endif
1686
1687 static inline int xfrm_alg_len(const struct xfrm_algo *alg)
1688 {
1689 return sizeof(*alg) + ((alg->alg_key_len + 7) / 8);
1690 }
1691
1692 static inline int xfrm_alg_auth_len(const struct xfrm_algo_auth *alg)
1693 {
1694 return sizeof(*alg) + ((alg->alg_key_len + 7) / 8);
1695 }
1696
1697 static inline int xfrm_replay_state_esn_len(struct xfrm_replay_state_esn *replay_esn)
1698 {
1699 return sizeof(*replay_esn) + replay_esn->bmp_len * sizeof(__u32);
1700 }
1701
1702 #ifdef CONFIG_XFRM_MIGRATE
1703 static inline int xfrm_replay_clone(struct xfrm_state *x,
1704 struct xfrm_state *orig)
1705 {
1706 x->replay_esn = kzalloc(xfrm_replay_state_esn_len(orig->replay_esn),
1707 GFP_KERNEL);
1708 if (!x->replay_esn)
1709 return -ENOMEM;
1710
1711 x->replay_esn->bmp_len = orig->replay_esn->bmp_len;
1712 x->replay_esn->replay_window = orig->replay_esn->replay_window;
1713
1714 x->preplay_esn = kmemdup(x->replay_esn,
1715 xfrm_replay_state_esn_len(x->replay_esn),
1716 GFP_KERNEL);
1717 if (!x->preplay_esn) {
1718 kfree(x->replay_esn);
1719 return -ENOMEM;
1720 }
1721
1722 return 0;
1723 }
1724
1725 static inline struct xfrm_algo *xfrm_algo_clone(struct xfrm_algo *orig)
1726 {
1727 return kmemdup(orig, xfrm_alg_len(orig), GFP_KERNEL);
1728 }
1729
1730 static inline struct xfrm_algo_auth *xfrm_algo_auth_clone(struct xfrm_algo_auth *orig)
1731 {
1732 return kmemdup(orig, xfrm_alg_auth_len(orig), GFP_KERNEL);
1733 }
1734
1735 static inline void xfrm_states_put(struct xfrm_state **states, int n)
1736 {
1737 int i;
1738 for (i = 0; i < n; i++)
1739 xfrm_state_put(*(states + i));
1740 }
1741
1742 static inline void xfrm_states_delete(struct xfrm_state **states, int n)
1743 {
1744 int i;
1745 for (i = 0; i < n; i++)
1746 xfrm_state_delete(*(states + i));
1747 }
1748 #endif
1749
1750 #ifdef CONFIG_XFRM
1751 static inline struct xfrm_state *xfrm_input_state(struct sk_buff *skb)
1752 {
1753 return skb->sp->xvec[skb->sp->len - 1];
1754 }
1755 #endif
1756
1757 static inline int xfrm_mark_get(struct nlattr **attrs, struct xfrm_mark *m)
1758 {
1759 if (attrs[XFRMA_MARK])
1760 memcpy(m, nla_data(attrs[XFRMA_MARK]), sizeof(struct xfrm_mark));
1761 else
1762 m->v = m->m = 0;
1763
1764 return m->v & m->m;
1765 }
1766
1767 static inline int xfrm_mark_put(struct sk_buff *skb, const struct xfrm_mark *m)
1768 {
1769 int ret = 0;
1770
1771 if (m->m | m->v)
1772 ret = nla_put(skb, XFRMA_MARK, sizeof(struct xfrm_mark), m);
1773 return ret;
1774 }
1775
1776 static inline int xfrm_rcv_cb(struct sk_buff *skb, unsigned int family,
1777 u8 protocol, int err)
1778 {
1779 switch(family) {
1780 #ifdef CONFIG_INET
1781 case AF_INET:
1782 return xfrm4_rcv_cb(skb, protocol, err);
1783 #endif
1784 }
1785 return 0;
1786 }
1787
1788 static inline int xfrm_tunnel_check(struct sk_buff *skb, struct xfrm_state *x,
1789 unsigned int family)
1790 {
1791 bool tunnel = false;
1792
1793 switch(family) {
1794 case AF_INET:
1795 if (XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip4)
1796 tunnel = true;
1797 break;
1798 case AF_INET6:
1799 if (XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip6)
1800 tunnel = true;
1801 break;
1802 }
1803 if (tunnel && !(x->outer_mode->flags & XFRM_MODE_FLAG_TUNNEL))
1804 return -EINVAL;
1805
1806 return 0;
1807 }
1808 #endif /* _NET_XFRM_H */
This page took 0.067543 seconds and 5 git commands to generate.