ipsec: Fix xfrm_state_walk race
[deliverable/linux.git] / include / net / xfrm.h
1 #ifndef _NET_XFRM_H
2 #define _NET_XFRM_H
3
4 #include <linux/compiler.h>
5 #include <linux/xfrm.h>
6 #include <linux/spinlock.h>
7 #include <linux/list.h>
8 #include <linux/skbuff.h>
9 #include <linux/socket.h>
10 #include <linux/pfkeyv2.h>
11 #include <linux/ipsec.h>
12 #include <linux/in6.h>
13 #include <linux/mutex.h>
14 #include <linux/audit.h>
15
16 #include <net/sock.h>
17 #include <net/dst.h>
18 #include <net/ip.h>
19 #include <net/route.h>
20 #include <net/ipv6.h>
21 #include <net/ip6_fib.h>
22 #ifdef CONFIG_XFRM_STATISTICS
23 #include <net/snmp.h>
24 #endif
25
26 #define XFRM_PROTO_ESP 50
27 #define XFRM_PROTO_AH 51
28 #define XFRM_PROTO_COMP 108
29 #define XFRM_PROTO_IPIP 4
30 #define XFRM_PROTO_IPV6 41
31 #define XFRM_PROTO_ROUTING IPPROTO_ROUTING
32 #define XFRM_PROTO_DSTOPTS IPPROTO_DSTOPTS
33
34 #define XFRM_ALIGN8(len) (((len) + 7) & ~7)
35 #define MODULE_ALIAS_XFRM_MODE(family, encap) \
36 MODULE_ALIAS("xfrm-mode-" __stringify(family) "-" __stringify(encap))
37 #define MODULE_ALIAS_XFRM_TYPE(family, proto) \
38 MODULE_ALIAS("xfrm-type-" __stringify(family) "-" __stringify(proto))
39
40 #ifdef CONFIG_XFRM_STATISTICS
41 DECLARE_SNMP_STAT(struct linux_xfrm_mib, xfrm_statistics);
42 #define XFRM_INC_STATS(field) SNMP_INC_STATS(xfrm_statistics, field)
43 #define XFRM_INC_STATS_BH(field) SNMP_INC_STATS_BH(xfrm_statistics, field)
44 #define XFRM_INC_STATS_USER(field) SNMP_INC_STATS_USER(xfrm_statistics, field)
45 #else
46 #define XFRM_INC_STATS(field)
47 #define XFRM_INC_STATS_BH(field)
48 #define XFRM_INC_STATS_USER(field)
49 #endif
50
51 extern struct sock *xfrm_nl;
52 extern u32 sysctl_xfrm_aevent_etime;
53 extern u32 sysctl_xfrm_aevent_rseqth;
54 extern int sysctl_xfrm_larval_drop;
55 extern u32 sysctl_xfrm_acq_expires;
56
57 extern struct mutex xfrm_cfg_mutex;
58
59 /* Organization of SPD aka "XFRM rules"
60 ------------------------------------
61
62 Basic objects:
63 - policy rule, struct xfrm_policy (=SPD entry)
64 - bundle of transformations, struct dst_entry == struct xfrm_dst (=SA bundle)
65 - instance of a transformer, struct xfrm_state (=SA)
66 - template to clone xfrm_state, struct xfrm_tmpl
67
68 SPD is plain linear list of xfrm_policy rules, ordered by priority.
69 (To be compatible with existing pfkeyv2 implementations,
70 many rules with priority of 0x7fffffff are allowed to exist and
71 such rules are ordered in an unpredictable way, thanks to bsd folks.)
72
73 Lookup is plain linear search until the first match with selector.
74
75 If "action" is "block", then we prohibit the flow, otherwise:
76 if "xfrms_nr" is zero, the flow passes untransformed. Otherwise,
77 policy entry has list of up to XFRM_MAX_DEPTH transformations,
78 described by templates xfrm_tmpl. Each template is resolved
79 to a complete xfrm_state (see below) and we pack bundle of transformations
80 to a dst_entry returned to requestor.
81
82 dst -. xfrm .-> xfrm_state #1
83 |---. child .-> dst -. xfrm .-> xfrm_state #2
84 |---. child .-> dst -. xfrm .-> xfrm_state #3
85 |---. child .-> NULL
86
87 Bundles are cached at xrfm_policy struct (field ->bundles).
88
89
90 Resolution of xrfm_tmpl
91 -----------------------
92 Template contains:
93 1. ->mode Mode: transport or tunnel
94 2. ->id.proto Protocol: AH/ESP/IPCOMP
95 3. ->id.daddr Remote tunnel endpoint, ignored for transport mode.
96 Q: allow to resolve security gateway?
97 4. ->id.spi If not zero, static SPI.
98 5. ->saddr Local tunnel endpoint, ignored for transport mode.
99 6. ->algos List of allowed algos. Plain bitmask now.
100 Q: ealgos, aalgos, calgos. What a mess...
101 7. ->share Sharing mode.
102 Q: how to implement private sharing mode? To add struct sock* to
103 flow id?
104
105 Having this template we search through SAD searching for entries
106 with appropriate mode/proto/algo, permitted by selector.
107 If no appropriate entry found, it is requested from key manager.
108
109 PROBLEMS:
110 Q: How to find all the bundles referring to a physical path for
111 PMTU discovery? Seems, dst should contain list of all parents...
112 and enter to infinite locking hierarchy disaster.
113 No! It is easier, we will not search for them, let them find us.
114 We add genid to each dst plus pointer to genid of raw IP route,
115 pmtu disc will update pmtu on raw IP route and increase its genid.
116 dst_check() will see this for top level and trigger resyncing
117 metrics. Plus, it will be made via sk->sk_dst_cache. Solved.
118 */
119
120 /* Full description of state of transformer. */
121 struct xfrm_state
122 {
123 struct list_head all;
124 union {
125 struct list_head gclist;
126 struct hlist_node bydst;
127 };
128 struct hlist_node bysrc;
129 struct hlist_node byspi;
130
131 atomic_t refcnt;
132 spinlock_t lock;
133
134 struct xfrm_id id;
135 struct xfrm_selector sel;
136
137 u32 genid;
138
139 /* Key manger bits */
140 struct {
141 u8 state;
142 u8 dying;
143 u32 seq;
144 } km;
145
146 /* Parameters of this state. */
147 struct {
148 u32 reqid;
149 u8 mode;
150 u8 replay_window;
151 u8 aalgo, ealgo, calgo;
152 u8 flags;
153 u16 family;
154 xfrm_address_t saddr;
155 int header_len;
156 int trailer_len;
157 } props;
158
159 struct xfrm_lifetime_cfg lft;
160
161 /* Data for transformer */
162 struct xfrm_algo *aalg;
163 struct xfrm_algo *ealg;
164 struct xfrm_algo *calg;
165 struct xfrm_algo_aead *aead;
166
167 /* Data for encapsulator */
168 struct xfrm_encap_tmpl *encap;
169
170 /* Data for care-of address */
171 xfrm_address_t *coaddr;
172
173 /* IPComp needs an IPIP tunnel for handling uncompressed packets */
174 struct xfrm_state *tunnel;
175
176 /* If a tunnel, number of users + 1 */
177 atomic_t tunnel_users;
178
179 /* State for replay detection */
180 struct xfrm_replay_state replay;
181
182 /* Replay detection state at the time we sent the last notification */
183 struct xfrm_replay_state preplay;
184
185 /* internal flag that only holds state for delayed aevent at the
186 * moment
187 */
188 u32 xflags;
189
190 /* Replay detection notification settings */
191 u32 replay_maxage;
192 u32 replay_maxdiff;
193
194 /* Replay detection notification timer */
195 struct timer_list rtimer;
196
197 /* Statistics */
198 struct xfrm_stats stats;
199
200 struct xfrm_lifetime_cur curlft;
201 struct timer_list timer;
202
203 /* Last used time */
204 unsigned long lastused;
205
206 /* Reference to data common to all the instances of this
207 * transformer. */
208 const struct xfrm_type *type;
209 struct xfrm_mode *inner_mode;
210 struct xfrm_mode *inner_mode_iaf;
211 struct xfrm_mode *outer_mode;
212
213 /* Security context */
214 struct xfrm_sec_ctx *security;
215
216 /* Private data of this transformer, format is opaque,
217 * interpreted by xfrm_type methods. */
218 void *data;
219 };
220
221 /* xflags - make enum if more show up */
222 #define XFRM_TIME_DEFER 1
223
224 enum {
225 XFRM_STATE_VOID,
226 XFRM_STATE_ACQ,
227 XFRM_STATE_VALID,
228 XFRM_STATE_ERROR,
229 XFRM_STATE_EXPIRED,
230 XFRM_STATE_DEAD
231 };
232
233 /* callback structure passed from either netlink or pfkey */
234 struct km_event
235 {
236 union {
237 u32 hard;
238 u32 proto;
239 u32 byid;
240 u32 aevent;
241 u32 type;
242 } data;
243
244 u32 seq;
245 u32 pid;
246 u32 event;
247 };
248
249 struct net_device;
250 struct xfrm_type;
251 struct xfrm_dst;
252 struct xfrm_policy_afinfo {
253 unsigned short family;
254 struct dst_ops *dst_ops;
255 void (*garbage_collect)(void);
256 struct dst_entry *(*dst_lookup)(int tos, xfrm_address_t *saddr,
257 xfrm_address_t *daddr);
258 int (*get_saddr)(xfrm_address_t *saddr, xfrm_address_t *daddr);
259 struct dst_entry *(*find_bundle)(struct flowi *fl, struct xfrm_policy *policy);
260 void (*decode_session)(struct sk_buff *skb,
261 struct flowi *fl,
262 int reverse);
263 int (*get_tos)(struct flowi *fl);
264 int (*init_path)(struct xfrm_dst *path,
265 struct dst_entry *dst,
266 int nfheader_len);
267 int (*fill_dst)(struct xfrm_dst *xdst,
268 struct net_device *dev);
269 };
270
271 extern int xfrm_policy_register_afinfo(struct xfrm_policy_afinfo *afinfo);
272 extern int xfrm_policy_unregister_afinfo(struct xfrm_policy_afinfo *afinfo);
273 extern void km_policy_notify(struct xfrm_policy *xp, int dir, struct km_event *c);
274 extern void km_state_notify(struct xfrm_state *x, struct km_event *c);
275
276 struct xfrm_tmpl;
277 extern int km_query(struct xfrm_state *x, struct xfrm_tmpl *t, struct xfrm_policy *pol);
278 extern void km_state_expired(struct xfrm_state *x, int hard, u32 pid);
279 extern int __xfrm_state_delete(struct xfrm_state *x);
280
281 struct xfrm_state_afinfo {
282 unsigned int family;
283 unsigned int proto;
284 __be16 eth_proto;
285 struct module *owner;
286 const struct xfrm_type *type_map[IPPROTO_MAX];
287 struct xfrm_mode *mode_map[XFRM_MODE_MAX];
288 int (*init_flags)(struct xfrm_state *x);
289 void (*init_tempsel)(struct xfrm_state *x, struct flowi *fl,
290 struct xfrm_tmpl *tmpl,
291 xfrm_address_t *daddr, xfrm_address_t *saddr);
292 int (*tmpl_sort)(struct xfrm_tmpl **dst, struct xfrm_tmpl **src, int n);
293 int (*state_sort)(struct xfrm_state **dst, struct xfrm_state **src, int n);
294 int (*output)(struct sk_buff *skb);
295 int (*extract_input)(struct xfrm_state *x,
296 struct sk_buff *skb);
297 int (*extract_output)(struct xfrm_state *x,
298 struct sk_buff *skb);
299 int (*transport_finish)(struct sk_buff *skb,
300 int async);
301 };
302
303 extern int xfrm_state_register_afinfo(struct xfrm_state_afinfo *afinfo);
304 extern int xfrm_state_unregister_afinfo(struct xfrm_state_afinfo *afinfo);
305
306 extern void xfrm_state_delete_tunnel(struct xfrm_state *x);
307
308 struct xfrm_type
309 {
310 char *description;
311 struct module *owner;
312 __u8 proto;
313 __u8 flags;
314 #define XFRM_TYPE_NON_FRAGMENT 1
315 #define XFRM_TYPE_REPLAY_PROT 2
316 #define XFRM_TYPE_LOCAL_COADDR 4
317 #define XFRM_TYPE_REMOTE_COADDR 8
318
319 int (*init_state)(struct xfrm_state *x);
320 void (*destructor)(struct xfrm_state *);
321 int (*input)(struct xfrm_state *, struct sk_buff *skb);
322 int (*output)(struct xfrm_state *, struct sk_buff *pskb);
323 int (*reject)(struct xfrm_state *, struct sk_buff *, struct flowi *);
324 int (*hdr_offset)(struct xfrm_state *, struct sk_buff *, u8 **);
325 /* Estimate maximal size of result of transformation of a dgram */
326 u32 (*get_mtu)(struct xfrm_state *, int size);
327 };
328
329 extern int xfrm_register_type(const struct xfrm_type *type, unsigned short family);
330 extern int xfrm_unregister_type(const struct xfrm_type *type, unsigned short family);
331
332 struct xfrm_mode {
333 /*
334 * Remove encapsulation header.
335 *
336 * The IP header will be moved over the top of the encapsulation
337 * header.
338 *
339 * On entry, the transport header shall point to where the IP header
340 * should be and the network header shall be set to where the IP
341 * header currently is. skb->data shall point to the start of the
342 * payload.
343 */
344 int (*input2)(struct xfrm_state *x, struct sk_buff *skb);
345
346 /*
347 * This is the actual input entry point.
348 *
349 * For transport mode and equivalent this would be identical to
350 * input2 (which does not need to be set). While tunnel mode
351 * and equivalent would set this to the tunnel encapsulation function
352 * xfrm4_prepare_input that would in turn call input2.
353 */
354 int (*input)(struct xfrm_state *x, struct sk_buff *skb);
355
356 /*
357 * Add encapsulation header.
358 *
359 * On exit, the transport header will be set to the start of the
360 * encapsulation header to be filled in by x->type->output and
361 * the mac header will be set to the nextheader (protocol for
362 * IPv4) field of the extension header directly preceding the
363 * encapsulation header, or in its absence, that of the top IP
364 * header. The value of the network header will always point
365 * to the top IP header while skb->data will point to the payload.
366 */
367 int (*output2)(struct xfrm_state *x,struct sk_buff *skb);
368
369 /*
370 * This is the actual output entry point.
371 *
372 * For transport mode and equivalent this would be identical to
373 * output2 (which does not need to be set). While tunnel mode
374 * and equivalent would set this to a tunnel encapsulation function
375 * (xfrm4_prepare_output or xfrm6_prepare_output) that would in turn
376 * call output2.
377 */
378 int (*output)(struct xfrm_state *x, struct sk_buff *skb);
379
380 struct xfrm_state_afinfo *afinfo;
381 struct module *owner;
382 unsigned int encap;
383 int flags;
384 };
385
386 /* Flags for xfrm_mode. */
387 enum {
388 XFRM_MODE_FLAG_TUNNEL = 1,
389 };
390
391 extern int xfrm_register_mode(struct xfrm_mode *mode, int family);
392 extern int xfrm_unregister_mode(struct xfrm_mode *mode, int family);
393
394 static inline int xfrm_af2proto(unsigned int family)
395 {
396 switch(family) {
397 case AF_INET:
398 return IPPROTO_IPIP;
399 case AF_INET6:
400 return IPPROTO_IPV6;
401 default:
402 return 0;
403 }
404 }
405
406 static inline struct xfrm_mode *xfrm_ip2inner_mode(struct xfrm_state *x, int ipproto)
407 {
408 if ((ipproto == IPPROTO_IPIP && x->props.family == AF_INET) ||
409 (ipproto == IPPROTO_IPV6 && x->props.family == AF_INET6))
410 return x->inner_mode;
411 else
412 return x->inner_mode_iaf;
413 }
414
415 struct xfrm_tmpl
416 {
417 /* id in template is interpreted as:
418 * daddr - destination of tunnel, may be zero for transport mode.
419 * spi - zero to acquire spi. Not zero if spi is static, then
420 * daddr must be fixed too.
421 * proto - AH/ESP/IPCOMP
422 */
423 struct xfrm_id id;
424
425 /* Source address of tunnel. Ignored, if it is not a tunnel. */
426 xfrm_address_t saddr;
427
428 unsigned short encap_family;
429
430 __u32 reqid;
431
432 /* Mode: transport, tunnel etc. */
433 __u8 mode;
434
435 /* Sharing mode: unique, this session only, this user only etc. */
436 __u8 share;
437
438 /* May skip this transfomration if no SA is found */
439 __u8 optional;
440
441 /* Skip aalgos/ealgos/calgos checks. */
442 __u8 allalgs;
443
444 /* Bit mask of algos allowed for acquisition */
445 __u32 aalgos;
446 __u32 ealgos;
447 __u32 calgos;
448 };
449
450 #define XFRM_MAX_DEPTH 6
451
452 struct xfrm_policy
453 {
454 struct xfrm_policy *next;
455 struct list_head bytype;
456 struct hlist_node bydst;
457 struct hlist_node byidx;
458
459 /* This lock only affects elements except for entry. */
460 rwlock_t lock;
461 atomic_t refcnt;
462 struct timer_list timer;
463
464 u32 priority;
465 u32 index;
466 struct xfrm_selector selector;
467 struct xfrm_lifetime_cfg lft;
468 struct xfrm_lifetime_cur curlft;
469 struct dst_entry *bundles;
470 u16 family;
471 u8 type;
472 u8 action;
473 u8 flags;
474 u8 dead;
475 u8 xfrm_nr;
476 /* XXX 1 byte hole, try to pack */
477 struct xfrm_sec_ctx *security;
478 struct xfrm_tmpl xfrm_vec[XFRM_MAX_DEPTH];
479 };
480
481 struct xfrm_migrate {
482 xfrm_address_t old_daddr;
483 xfrm_address_t old_saddr;
484 xfrm_address_t new_daddr;
485 xfrm_address_t new_saddr;
486 u8 proto;
487 u8 mode;
488 u16 reserved;
489 u32 reqid;
490 u16 old_family;
491 u16 new_family;
492 };
493
494 #define XFRM_KM_TIMEOUT 30
495 /* which seqno */
496 #define XFRM_REPLAY_SEQ 1
497 #define XFRM_REPLAY_OSEQ 2
498 #define XFRM_REPLAY_SEQ_MASK 3
499 /* what happened */
500 #define XFRM_REPLAY_UPDATE XFRM_AE_CR
501 #define XFRM_REPLAY_TIMEOUT XFRM_AE_CE
502
503 /* default aevent timeout in units of 100ms */
504 #define XFRM_AE_ETIME 10
505 /* Async Event timer multiplier */
506 #define XFRM_AE_ETH_M 10
507 /* default seq threshold size */
508 #define XFRM_AE_SEQT_SIZE 2
509
510 struct xfrm_mgr
511 {
512 struct list_head list;
513 char *id;
514 int (*notify)(struct xfrm_state *x, struct km_event *c);
515 int (*acquire)(struct xfrm_state *x, struct xfrm_tmpl *, struct xfrm_policy *xp, int dir);
516 struct xfrm_policy *(*compile_policy)(struct sock *sk, int opt, u8 *data, int len, int *dir);
517 int (*new_mapping)(struct xfrm_state *x, xfrm_address_t *ipaddr, __be16 sport);
518 int (*notify_policy)(struct xfrm_policy *x, int dir, struct km_event *c);
519 int (*report)(u8 proto, struct xfrm_selector *sel, xfrm_address_t *addr);
520 int (*migrate)(struct xfrm_selector *sel, u8 dir, u8 type, struct xfrm_migrate *m, int num_bundles);
521 };
522
523 extern int xfrm_register_km(struct xfrm_mgr *km);
524 extern int xfrm_unregister_km(struct xfrm_mgr *km);
525
526 extern unsigned int xfrm_policy_count[XFRM_POLICY_MAX*2];
527
528 /*
529 * This structure is used for the duration where packets are being
530 * transformed by IPsec. As soon as the packet leaves IPsec the
531 * area beyond the generic IP part may be overwritten.
532 */
533 struct xfrm_skb_cb {
534 union {
535 struct inet_skb_parm h4;
536 struct inet6_skb_parm h6;
537 } header;
538
539 /* Sequence number for replay protection. */
540 union {
541 u64 output;
542 __be32 input;
543 } seq;
544 };
545
546 #define XFRM_SKB_CB(__skb) ((struct xfrm_skb_cb *)&((__skb)->cb[0]))
547
548 /*
549 * This structure is used by the afinfo prepare_input/prepare_output functions
550 * to transmit header information to the mode input/output functions.
551 */
552 struct xfrm_mode_skb_cb {
553 union {
554 struct inet_skb_parm h4;
555 struct inet6_skb_parm h6;
556 } header;
557
558 /* Copied from header for IPv4, always set to zero and DF for IPv6. */
559 __be16 id;
560 __be16 frag_off;
561
562 /* IP header length (excluding options or extension headers). */
563 u8 ihl;
564
565 /* TOS for IPv4, class for IPv6. */
566 u8 tos;
567
568 /* TTL for IPv4, hop limitfor IPv6. */
569 u8 ttl;
570
571 /* Protocol for IPv4, NH for IPv6. */
572 u8 protocol;
573
574 /* Option length for IPv4, zero for IPv6. */
575 u8 optlen;
576
577 /* Used by IPv6 only, zero for IPv4. */
578 u8 flow_lbl[3];
579 };
580
581 #define XFRM_MODE_SKB_CB(__skb) ((struct xfrm_mode_skb_cb *)&((__skb)->cb[0]))
582
583 /*
584 * This structure is used by the input processing to locate the SPI and
585 * related information.
586 */
587 struct xfrm_spi_skb_cb {
588 union {
589 struct inet_skb_parm h4;
590 struct inet6_skb_parm h6;
591 } header;
592
593 unsigned int daddroff;
594 unsigned int family;
595 };
596
597 #define XFRM_SPI_SKB_CB(__skb) ((struct xfrm_spi_skb_cb *)&((__skb)->cb[0]))
598
599 /* Audit Information */
600 struct xfrm_audit
601 {
602 u32 secid;
603 uid_t loginuid;
604 u32 sessionid;
605 };
606
607 #ifdef CONFIG_AUDITSYSCALL
608 static inline struct audit_buffer *xfrm_audit_start(const char *op)
609 {
610 struct audit_buffer *audit_buf = NULL;
611
612 if (audit_enabled == 0)
613 return NULL;
614 audit_buf = audit_log_start(current->audit_context, GFP_ATOMIC,
615 AUDIT_MAC_IPSEC_EVENT);
616 if (audit_buf == NULL)
617 return NULL;
618 audit_log_format(audit_buf, "op=%s", op);
619 return audit_buf;
620 }
621
622 static inline void xfrm_audit_helper_usrinfo(uid_t auid, u32 ses, u32 secid,
623 struct audit_buffer *audit_buf)
624 {
625 char *secctx;
626 u32 secctx_len;
627
628 audit_log_format(audit_buf, " auid=%u ses=%u", auid, ses);
629 if (secid != 0 &&
630 security_secid_to_secctx(secid, &secctx, &secctx_len) == 0) {
631 audit_log_format(audit_buf, " subj=%s", secctx);
632 security_release_secctx(secctx, secctx_len);
633 } else
634 audit_log_task_context(audit_buf);
635 }
636
637 extern void xfrm_audit_policy_add(struct xfrm_policy *xp, int result,
638 u32 auid, u32 ses, u32 secid);
639 extern void xfrm_audit_policy_delete(struct xfrm_policy *xp, int result,
640 u32 auid, u32 ses, u32 secid);
641 extern void xfrm_audit_state_add(struct xfrm_state *x, int result,
642 u32 auid, u32 ses, u32 secid);
643 extern void xfrm_audit_state_delete(struct xfrm_state *x, int result,
644 u32 auid, u32 ses, u32 secid);
645 extern void xfrm_audit_state_replay_overflow(struct xfrm_state *x,
646 struct sk_buff *skb);
647 extern void xfrm_audit_state_notfound_simple(struct sk_buff *skb, u16 family);
648 extern void xfrm_audit_state_notfound(struct sk_buff *skb, u16 family,
649 __be32 net_spi, __be32 net_seq);
650 extern void xfrm_audit_state_icvfail(struct xfrm_state *x,
651 struct sk_buff *skb, u8 proto);
652 #else
653
654 static inline void xfrm_audit_policy_add(struct xfrm_policy *xp, int result,
655 u32 auid, u32 ses, u32 secid)
656 {
657 }
658
659 static inline void xfrm_audit_policy_delete(struct xfrm_policy *xp, int result,
660 u32 auid, u32 ses, u32 secid)
661 {
662 }
663
664 static inline void xfrm_audit_state_add(struct xfrm_state *x, int result,
665 u32 auid, u32 ses, u32 secid)
666 {
667 }
668
669 static inline void xfrm_audit_state_delete(struct xfrm_state *x, int result,
670 u32 auid, u32 ses, u32 secid)
671 {
672 }
673
674 static inline void xfrm_audit_state_replay_overflow(struct xfrm_state *x,
675 struct sk_buff *skb)
676 {
677 }
678
679 static inline void xfrm_audit_state_notfound_simple(struct sk_buff *skb,
680 u16 family)
681 {
682 }
683
684 static inline void xfrm_audit_state_notfound(struct sk_buff *skb, u16 family,
685 __be32 net_spi, __be32 net_seq)
686 {
687 }
688
689 static inline void xfrm_audit_state_icvfail(struct xfrm_state *x,
690 struct sk_buff *skb, u8 proto)
691 {
692 }
693 #endif /* CONFIG_AUDITSYSCALL */
694
695 static inline void xfrm_pol_hold(struct xfrm_policy *policy)
696 {
697 if (likely(policy != NULL))
698 atomic_inc(&policy->refcnt);
699 }
700
701 extern void xfrm_policy_destroy(struct xfrm_policy *policy);
702
703 static inline void xfrm_pol_put(struct xfrm_policy *policy)
704 {
705 if (atomic_dec_and_test(&policy->refcnt))
706 xfrm_policy_destroy(policy);
707 }
708
709 #ifdef CONFIG_XFRM_SUB_POLICY
710 static inline void xfrm_pols_put(struct xfrm_policy **pols, int npols)
711 {
712 int i;
713 for (i = npols - 1; i >= 0; --i)
714 xfrm_pol_put(pols[i]);
715 }
716 #else
717 static inline void xfrm_pols_put(struct xfrm_policy **pols, int npols)
718 {
719 xfrm_pol_put(pols[0]);
720 }
721 #endif
722
723 extern void __xfrm_state_destroy(struct xfrm_state *);
724
725 static inline void __xfrm_state_put(struct xfrm_state *x)
726 {
727 atomic_dec(&x->refcnt);
728 }
729
730 static inline void xfrm_state_put(struct xfrm_state *x)
731 {
732 if (atomic_dec_and_test(&x->refcnt))
733 __xfrm_state_destroy(x);
734 }
735
736 static inline void xfrm_state_hold(struct xfrm_state *x)
737 {
738 atomic_inc(&x->refcnt);
739 }
740
741 static __inline__ int addr_match(void *token1, void *token2, int prefixlen)
742 {
743 __be32 *a1 = token1;
744 __be32 *a2 = token2;
745 int pdw;
746 int pbi;
747
748 pdw = prefixlen >> 5; /* num of whole __u32 in prefix */
749 pbi = prefixlen & 0x1f; /* num of bits in incomplete u32 in prefix */
750
751 if (pdw)
752 if (memcmp(a1, a2, pdw << 2))
753 return 0;
754
755 if (pbi) {
756 __be32 mask;
757
758 mask = htonl((0xffffffff) << (32 - pbi));
759
760 if ((a1[pdw] ^ a2[pdw]) & mask)
761 return 0;
762 }
763
764 return 1;
765 }
766
767 static __inline__
768 __be16 xfrm_flowi_sport(struct flowi *fl)
769 {
770 __be16 port;
771 switch(fl->proto) {
772 case IPPROTO_TCP:
773 case IPPROTO_UDP:
774 case IPPROTO_UDPLITE:
775 case IPPROTO_SCTP:
776 port = fl->fl_ip_sport;
777 break;
778 case IPPROTO_ICMP:
779 case IPPROTO_ICMPV6:
780 port = htons(fl->fl_icmp_type);
781 break;
782 case IPPROTO_MH:
783 port = htons(fl->fl_mh_type);
784 break;
785 default:
786 port = 0; /*XXX*/
787 }
788 return port;
789 }
790
791 static __inline__
792 __be16 xfrm_flowi_dport(struct flowi *fl)
793 {
794 __be16 port;
795 switch(fl->proto) {
796 case IPPROTO_TCP:
797 case IPPROTO_UDP:
798 case IPPROTO_UDPLITE:
799 case IPPROTO_SCTP:
800 port = fl->fl_ip_dport;
801 break;
802 case IPPROTO_ICMP:
803 case IPPROTO_ICMPV6:
804 port = htons(fl->fl_icmp_code);
805 break;
806 default:
807 port = 0; /*XXX*/
808 }
809 return port;
810 }
811
812 extern int xfrm_selector_match(struct xfrm_selector *sel, struct flowi *fl,
813 unsigned short family);
814
815 #ifdef CONFIG_SECURITY_NETWORK_XFRM
816 /* If neither has a context --> match
817 * Otherwise, both must have a context and the sids, doi, alg must match
818 */
819 static inline int xfrm_sec_ctx_match(struct xfrm_sec_ctx *s1, struct xfrm_sec_ctx *s2)
820 {
821 return ((!s1 && !s2) ||
822 (s1 && s2 &&
823 (s1->ctx_sid == s2->ctx_sid) &&
824 (s1->ctx_doi == s2->ctx_doi) &&
825 (s1->ctx_alg == s2->ctx_alg)));
826 }
827 #else
828 static inline int xfrm_sec_ctx_match(struct xfrm_sec_ctx *s1, struct xfrm_sec_ctx *s2)
829 {
830 return 1;
831 }
832 #endif
833
834 /* A struct encoding bundle of transformations to apply to some set of flow.
835 *
836 * dst->child points to the next element of bundle.
837 * dst->xfrm points to an instanse of transformer.
838 *
839 * Due to unfortunate limitations of current routing cache, which we
840 * have no time to fix, it mirrors struct rtable and bound to the same
841 * routing key, including saddr,daddr. However, we can have many of
842 * bundles differing by session id. All the bundles grow from a parent
843 * policy rule.
844 */
845 struct xfrm_dst
846 {
847 union {
848 struct dst_entry dst;
849 struct rtable rt;
850 struct rt6_info rt6;
851 } u;
852 struct dst_entry *route;
853 #ifdef CONFIG_XFRM_SUB_POLICY
854 struct flowi *origin;
855 struct xfrm_selector *partner;
856 #endif
857 u32 genid;
858 u32 route_mtu_cached;
859 u32 child_mtu_cached;
860 u32 route_cookie;
861 u32 path_cookie;
862 };
863
864 static inline void xfrm_dst_destroy(struct xfrm_dst *xdst)
865 {
866 dst_release(xdst->route);
867 if (likely(xdst->u.dst.xfrm))
868 xfrm_state_put(xdst->u.dst.xfrm);
869 #ifdef CONFIG_XFRM_SUB_POLICY
870 kfree(xdst->origin);
871 xdst->origin = NULL;
872 kfree(xdst->partner);
873 xdst->partner = NULL;
874 #endif
875 }
876
877 extern void xfrm_dst_ifdown(struct dst_entry *dst, struct net_device *dev);
878
879 struct sec_path
880 {
881 atomic_t refcnt;
882 int len;
883 struct xfrm_state *xvec[XFRM_MAX_DEPTH];
884 };
885
886 static inline struct sec_path *
887 secpath_get(struct sec_path *sp)
888 {
889 if (sp)
890 atomic_inc(&sp->refcnt);
891 return sp;
892 }
893
894 extern void __secpath_destroy(struct sec_path *sp);
895
896 static inline void
897 secpath_put(struct sec_path *sp)
898 {
899 if (sp && atomic_dec_and_test(&sp->refcnt))
900 __secpath_destroy(sp);
901 }
902
903 extern struct sec_path *secpath_dup(struct sec_path *src);
904
905 static inline void
906 secpath_reset(struct sk_buff *skb)
907 {
908 #ifdef CONFIG_XFRM
909 secpath_put(skb->sp);
910 skb->sp = NULL;
911 #endif
912 }
913
914 static inline int
915 xfrm_addr_any(xfrm_address_t *addr, unsigned short family)
916 {
917 switch (family) {
918 case AF_INET:
919 return addr->a4 == 0;
920 case AF_INET6:
921 return ipv6_addr_any((struct in6_addr *)&addr->a6);
922 }
923 return 0;
924 }
925
926 static inline int
927 __xfrm4_state_addr_cmp(struct xfrm_tmpl *tmpl, struct xfrm_state *x)
928 {
929 return (tmpl->saddr.a4 &&
930 tmpl->saddr.a4 != x->props.saddr.a4);
931 }
932
933 static inline int
934 __xfrm6_state_addr_cmp(struct xfrm_tmpl *tmpl, struct xfrm_state *x)
935 {
936 return (!ipv6_addr_any((struct in6_addr*)&tmpl->saddr) &&
937 ipv6_addr_cmp((struct in6_addr *)&tmpl->saddr, (struct in6_addr*)&x->props.saddr));
938 }
939
940 static inline int
941 xfrm_state_addr_cmp(struct xfrm_tmpl *tmpl, struct xfrm_state *x, unsigned short family)
942 {
943 switch (family) {
944 case AF_INET:
945 return __xfrm4_state_addr_cmp(tmpl, x);
946 case AF_INET6:
947 return __xfrm6_state_addr_cmp(tmpl, x);
948 }
949 return !0;
950 }
951
952 #ifdef CONFIG_XFRM
953 extern int __xfrm_policy_check(struct sock *, int dir, struct sk_buff *skb, unsigned short family);
954
955 static inline int __xfrm_policy_check2(struct sock *sk, int dir,
956 struct sk_buff *skb,
957 unsigned int family, int reverse)
958 {
959 int ndir = dir | (reverse ? XFRM_POLICY_MASK + 1 : 0);
960
961 if (sk && sk->sk_policy[XFRM_POLICY_IN])
962 return __xfrm_policy_check(sk, ndir, skb, family);
963
964 return (!xfrm_policy_count[dir] && !skb->sp) ||
965 (skb->dst->flags & DST_NOPOLICY) ||
966 __xfrm_policy_check(sk, ndir, skb, family);
967 }
968
969 static inline int xfrm_policy_check(struct sock *sk, int dir, struct sk_buff *skb, unsigned short family)
970 {
971 return __xfrm_policy_check2(sk, dir, skb, family, 0);
972 }
973
974 static inline int xfrm4_policy_check(struct sock *sk, int dir, struct sk_buff *skb)
975 {
976 return xfrm_policy_check(sk, dir, skb, AF_INET);
977 }
978
979 static inline int xfrm6_policy_check(struct sock *sk, int dir, struct sk_buff *skb)
980 {
981 return xfrm_policy_check(sk, dir, skb, AF_INET6);
982 }
983
984 static inline int xfrm4_policy_check_reverse(struct sock *sk, int dir,
985 struct sk_buff *skb)
986 {
987 return __xfrm_policy_check2(sk, dir, skb, AF_INET, 1);
988 }
989
990 static inline int xfrm6_policy_check_reverse(struct sock *sk, int dir,
991 struct sk_buff *skb)
992 {
993 return __xfrm_policy_check2(sk, dir, skb, AF_INET6, 1);
994 }
995
996 extern int __xfrm_decode_session(struct sk_buff *skb, struct flowi *fl,
997 unsigned int family, int reverse);
998
999 static inline int xfrm_decode_session(struct sk_buff *skb, struct flowi *fl,
1000 unsigned int family)
1001 {
1002 return __xfrm_decode_session(skb, fl, family, 0);
1003 }
1004
1005 static inline int xfrm_decode_session_reverse(struct sk_buff *skb,
1006 struct flowi *fl,
1007 unsigned int family)
1008 {
1009 return __xfrm_decode_session(skb, fl, family, 1);
1010 }
1011
1012 extern int __xfrm_route_forward(struct sk_buff *skb, unsigned short family);
1013
1014 static inline int xfrm_route_forward(struct sk_buff *skb, unsigned short family)
1015 {
1016 return !xfrm_policy_count[XFRM_POLICY_OUT] ||
1017 (skb->dst->flags & DST_NOXFRM) ||
1018 __xfrm_route_forward(skb, family);
1019 }
1020
1021 static inline int xfrm4_route_forward(struct sk_buff *skb)
1022 {
1023 return xfrm_route_forward(skb, AF_INET);
1024 }
1025
1026 static inline int xfrm6_route_forward(struct sk_buff *skb)
1027 {
1028 return xfrm_route_forward(skb, AF_INET6);
1029 }
1030
1031 extern int __xfrm_sk_clone_policy(struct sock *sk);
1032
1033 static inline int xfrm_sk_clone_policy(struct sock *sk)
1034 {
1035 if (unlikely(sk->sk_policy[0] || sk->sk_policy[1]))
1036 return __xfrm_sk_clone_policy(sk);
1037 return 0;
1038 }
1039
1040 extern int xfrm_policy_delete(struct xfrm_policy *pol, int dir);
1041
1042 static inline void xfrm_sk_free_policy(struct sock *sk)
1043 {
1044 if (unlikely(sk->sk_policy[0] != NULL)) {
1045 xfrm_policy_delete(sk->sk_policy[0], XFRM_POLICY_MAX);
1046 sk->sk_policy[0] = NULL;
1047 }
1048 if (unlikely(sk->sk_policy[1] != NULL)) {
1049 xfrm_policy_delete(sk->sk_policy[1], XFRM_POLICY_MAX+1);
1050 sk->sk_policy[1] = NULL;
1051 }
1052 }
1053
1054 #else
1055
1056 static inline void xfrm_sk_free_policy(struct sock *sk) {}
1057 static inline int xfrm_sk_clone_policy(struct sock *sk) { return 0; }
1058 static inline int xfrm6_route_forward(struct sk_buff *skb) { return 1; }
1059 static inline int xfrm4_route_forward(struct sk_buff *skb) { return 1; }
1060 static inline int xfrm6_policy_check(struct sock *sk, int dir, struct sk_buff *skb)
1061 {
1062 return 1;
1063 }
1064 static inline int xfrm4_policy_check(struct sock *sk, int dir, struct sk_buff *skb)
1065 {
1066 return 1;
1067 }
1068 static inline int xfrm_policy_check(struct sock *sk, int dir, struct sk_buff *skb, unsigned short family)
1069 {
1070 return 1;
1071 }
1072 static inline int xfrm_decode_session_reverse(struct sk_buff *skb,
1073 struct flowi *fl,
1074 unsigned int family)
1075 {
1076 return -ENOSYS;
1077 }
1078 static inline int xfrm4_policy_check_reverse(struct sock *sk, int dir,
1079 struct sk_buff *skb)
1080 {
1081 return 1;
1082 }
1083 static inline int xfrm6_policy_check_reverse(struct sock *sk, int dir,
1084 struct sk_buff *skb)
1085 {
1086 return 1;
1087 }
1088 #endif
1089
1090 static __inline__
1091 xfrm_address_t *xfrm_flowi_daddr(struct flowi *fl, unsigned short family)
1092 {
1093 switch (family){
1094 case AF_INET:
1095 return (xfrm_address_t *)&fl->fl4_dst;
1096 case AF_INET6:
1097 return (xfrm_address_t *)&fl->fl6_dst;
1098 }
1099 return NULL;
1100 }
1101
1102 static __inline__
1103 xfrm_address_t *xfrm_flowi_saddr(struct flowi *fl, unsigned short family)
1104 {
1105 switch (family){
1106 case AF_INET:
1107 return (xfrm_address_t *)&fl->fl4_src;
1108 case AF_INET6:
1109 return (xfrm_address_t *)&fl->fl6_src;
1110 }
1111 return NULL;
1112 }
1113
1114 static __inline__
1115 void xfrm_flowi_addr_get(struct flowi *fl,
1116 xfrm_address_t *saddr, xfrm_address_t *daddr,
1117 unsigned short family)
1118 {
1119 switch(family) {
1120 case AF_INET:
1121 memcpy(&saddr->a4, &fl->fl4_src, sizeof(saddr->a4));
1122 memcpy(&daddr->a4, &fl->fl4_dst, sizeof(daddr->a4));
1123 break;
1124 case AF_INET6:
1125 ipv6_addr_copy((struct in6_addr *)&saddr->a6, &fl->fl6_src);
1126 ipv6_addr_copy((struct in6_addr *)&daddr->a6, &fl->fl6_dst);
1127 break;
1128 }
1129 }
1130
1131 static __inline__ int
1132 __xfrm4_state_addr_check(struct xfrm_state *x,
1133 xfrm_address_t *daddr, xfrm_address_t *saddr)
1134 {
1135 if (daddr->a4 == x->id.daddr.a4 &&
1136 (saddr->a4 == x->props.saddr.a4 || !saddr->a4 || !x->props.saddr.a4))
1137 return 1;
1138 return 0;
1139 }
1140
1141 static __inline__ int
1142 __xfrm6_state_addr_check(struct xfrm_state *x,
1143 xfrm_address_t *daddr, xfrm_address_t *saddr)
1144 {
1145 if (!ipv6_addr_cmp((struct in6_addr *)daddr, (struct in6_addr *)&x->id.daddr) &&
1146 (!ipv6_addr_cmp((struct in6_addr *)saddr, (struct in6_addr *)&x->props.saddr)||
1147 ipv6_addr_any((struct in6_addr *)saddr) ||
1148 ipv6_addr_any((struct in6_addr *)&x->props.saddr)))
1149 return 1;
1150 return 0;
1151 }
1152
1153 static __inline__ int
1154 xfrm_state_addr_check(struct xfrm_state *x,
1155 xfrm_address_t *daddr, xfrm_address_t *saddr,
1156 unsigned short family)
1157 {
1158 switch (family) {
1159 case AF_INET:
1160 return __xfrm4_state_addr_check(x, daddr, saddr);
1161 case AF_INET6:
1162 return __xfrm6_state_addr_check(x, daddr, saddr);
1163 }
1164 return 0;
1165 }
1166
1167 static __inline__ int
1168 xfrm_state_addr_flow_check(struct xfrm_state *x, struct flowi *fl,
1169 unsigned short family)
1170 {
1171 switch (family) {
1172 case AF_INET:
1173 return __xfrm4_state_addr_check(x,
1174 (xfrm_address_t *)&fl->fl4_dst,
1175 (xfrm_address_t *)&fl->fl4_src);
1176 case AF_INET6:
1177 return __xfrm6_state_addr_check(x,
1178 (xfrm_address_t *)&fl->fl6_dst,
1179 (xfrm_address_t *)&fl->fl6_src);
1180 }
1181 return 0;
1182 }
1183
1184 static inline int xfrm_state_kern(struct xfrm_state *x)
1185 {
1186 return atomic_read(&x->tunnel_users);
1187 }
1188
1189 static inline int xfrm_id_proto_match(u8 proto, u8 userproto)
1190 {
1191 return (!userproto || proto == userproto ||
1192 (userproto == IPSEC_PROTO_ANY && (proto == IPPROTO_AH ||
1193 proto == IPPROTO_ESP ||
1194 proto == IPPROTO_COMP)));
1195 }
1196
1197 /*
1198 * xfrm algorithm information
1199 */
1200 struct xfrm_algo_aead_info {
1201 u16 icv_truncbits;
1202 };
1203
1204 struct xfrm_algo_auth_info {
1205 u16 icv_truncbits;
1206 u16 icv_fullbits;
1207 };
1208
1209 struct xfrm_algo_encr_info {
1210 u16 blockbits;
1211 u16 defkeybits;
1212 };
1213
1214 struct xfrm_algo_comp_info {
1215 u16 threshold;
1216 };
1217
1218 struct xfrm_algo_desc {
1219 char *name;
1220 char *compat;
1221 u8 available:1;
1222 union {
1223 struct xfrm_algo_aead_info aead;
1224 struct xfrm_algo_auth_info auth;
1225 struct xfrm_algo_encr_info encr;
1226 struct xfrm_algo_comp_info comp;
1227 } uinfo;
1228 struct sadb_alg desc;
1229 };
1230
1231 /* XFRM tunnel handlers. */
1232 struct xfrm_tunnel {
1233 int (*handler)(struct sk_buff *skb);
1234 int (*err_handler)(struct sk_buff *skb, __u32 info);
1235
1236 struct xfrm_tunnel *next;
1237 int priority;
1238 };
1239
1240 struct xfrm6_tunnel {
1241 int (*handler)(struct sk_buff *skb);
1242 int (*err_handler)(struct sk_buff *skb, struct inet6_skb_parm *opt,
1243 int type, int code, int offset, __be32 info);
1244 struct xfrm6_tunnel *next;
1245 int priority;
1246 };
1247
1248 struct xfrm_state_walk {
1249 struct list_head list;
1250 unsigned long genid;
1251 struct xfrm_state *state;
1252 int count;
1253 u8 proto;
1254 };
1255
1256 struct xfrm_policy_walk {
1257 struct xfrm_policy *policy;
1258 int count;
1259 u8 type, cur_type;
1260 };
1261
1262 extern void xfrm_init(void);
1263 extern void xfrm4_init(void);
1264 extern void xfrm_state_init(void);
1265 extern void xfrm4_state_init(void);
1266 #ifdef CONFIG_XFRM
1267 extern int xfrm6_init(void);
1268 extern void xfrm6_fini(void);
1269 extern int xfrm6_state_init(void);
1270 extern void xfrm6_state_fini(void);
1271 #else
1272 static inline int xfrm6_init(void)
1273 {
1274 return 0;
1275 }
1276 static inline void xfrm6_fini(void)
1277 {
1278 ;
1279 }
1280 #endif
1281
1282 #ifdef CONFIG_XFRM_STATISTICS
1283 extern int xfrm_proc_init(void);
1284 #endif
1285
1286 extern void xfrm_state_walk_init(struct xfrm_state_walk *walk, u8 proto);
1287 extern int xfrm_state_walk(struct xfrm_state_walk *walk,
1288 int (*func)(struct xfrm_state *, int, void*), void *);
1289 extern void xfrm_state_walk_done(struct xfrm_state_walk *walk);
1290 extern struct xfrm_state *xfrm_state_alloc(void);
1291 extern struct xfrm_state *xfrm_state_find(xfrm_address_t *daddr, xfrm_address_t *saddr,
1292 struct flowi *fl, struct xfrm_tmpl *tmpl,
1293 struct xfrm_policy *pol, int *err,
1294 unsigned short family);
1295 extern struct xfrm_state * xfrm_stateonly_find(xfrm_address_t *daddr,
1296 xfrm_address_t *saddr,
1297 unsigned short family,
1298 u8 mode, u8 proto, u32 reqid);
1299 extern int xfrm_state_check_expire(struct xfrm_state *x);
1300 extern void xfrm_state_insert(struct xfrm_state *x);
1301 extern int xfrm_state_add(struct xfrm_state *x);
1302 extern int xfrm_state_update(struct xfrm_state *x);
1303 extern struct xfrm_state *xfrm_state_lookup(xfrm_address_t *daddr, __be32 spi, u8 proto, unsigned short family);
1304 extern struct xfrm_state *xfrm_state_lookup_byaddr(xfrm_address_t *daddr, xfrm_address_t *saddr, u8 proto, unsigned short family);
1305 #ifdef CONFIG_XFRM_SUB_POLICY
1306 extern int xfrm_tmpl_sort(struct xfrm_tmpl **dst, struct xfrm_tmpl **src,
1307 int n, unsigned short family);
1308 extern int xfrm_state_sort(struct xfrm_state **dst, struct xfrm_state **src,
1309 int n, unsigned short family);
1310 #else
1311 static inline int xfrm_tmpl_sort(struct xfrm_tmpl **dst, struct xfrm_tmpl **src,
1312 int n, unsigned short family)
1313 {
1314 return -ENOSYS;
1315 }
1316
1317 static inline int xfrm_state_sort(struct xfrm_state **dst, struct xfrm_state **src,
1318 int n, unsigned short family)
1319 {
1320 return -ENOSYS;
1321 }
1322 #endif
1323
1324 struct xfrmk_sadinfo {
1325 u32 sadhcnt; /* current hash bkts */
1326 u32 sadhmcnt; /* max allowed hash bkts */
1327 u32 sadcnt; /* current running count */
1328 };
1329
1330 struct xfrmk_spdinfo {
1331 u32 incnt;
1332 u32 outcnt;
1333 u32 fwdcnt;
1334 u32 inscnt;
1335 u32 outscnt;
1336 u32 fwdscnt;
1337 u32 spdhcnt;
1338 u32 spdhmcnt;
1339 };
1340
1341 extern struct xfrm_state *xfrm_find_acq_byseq(u32 seq);
1342 extern int xfrm_state_delete(struct xfrm_state *x);
1343 extern int xfrm_state_flush(u8 proto, struct xfrm_audit *audit_info);
1344 extern void xfrm_sad_getinfo(struct xfrmk_sadinfo *si);
1345 extern void xfrm_spd_getinfo(struct xfrmk_spdinfo *si);
1346 extern int xfrm_replay_check(struct xfrm_state *x,
1347 struct sk_buff *skb, __be32 seq);
1348 extern void xfrm_replay_advance(struct xfrm_state *x, __be32 seq);
1349 extern void xfrm_replay_notify(struct xfrm_state *x, int event);
1350 extern int xfrm_state_mtu(struct xfrm_state *x, int mtu);
1351 extern int xfrm_init_state(struct xfrm_state *x);
1352 extern int xfrm_prepare_input(struct xfrm_state *x, struct sk_buff *skb);
1353 extern int xfrm_input(struct sk_buff *skb, int nexthdr, __be32 spi,
1354 int encap_type);
1355 extern int xfrm_input_resume(struct sk_buff *skb, int nexthdr);
1356 extern int xfrm_output_resume(struct sk_buff *skb, int err);
1357 extern int xfrm_output(struct sk_buff *skb);
1358 extern int xfrm_inner_extract_output(struct xfrm_state *x, struct sk_buff *skb);
1359 extern int xfrm4_extract_header(struct sk_buff *skb);
1360 extern int xfrm4_extract_input(struct xfrm_state *x, struct sk_buff *skb);
1361 extern int xfrm4_rcv_encap(struct sk_buff *skb, int nexthdr, __be32 spi,
1362 int encap_type);
1363 extern int xfrm4_transport_finish(struct sk_buff *skb, int async);
1364 extern int xfrm4_rcv(struct sk_buff *skb);
1365
1366 static inline int xfrm4_rcv_spi(struct sk_buff *skb, int nexthdr, __be32 spi)
1367 {
1368 return xfrm4_rcv_encap(skb, nexthdr, spi, 0);
1369 }
1370
1371 extern int xfrm4_extract_output(struct xfrm_state *x, struct sk_buff *skb);
1372 extern int xfrm4_prepare_output(struct xfrm_state *x, struct sk_buff *skb);
1373 extern int xfrm4_output(struct sk_buff *skb);
1374 extern int xfrm4_tunnel_register(struct xfrm_tunnel *handler, unsigned short family);
1375 extern int xfrm4_tunnel_deregister(struct xfrm_tunnel *handler, unsigned short family);
1376 extern int xfrm6_extract_header(struct sk_buff *skb);
1377 extern int xfrm6_extract_input(struct xfrm_state *x, struct sk_buff *skb);
1378 extern int xfrm6_rcv_spi(struct sk_buff *skb, int nexthdr, __be32 spi);
1379 extern int xfrm6_transport_finish(struct sk_buff *skb, int async);
1380 extern int xfrm6_rcv(struct sk_buff *skb);
1381 extern int xfrm6_input_addr(struct sk_buff *skb, xfrm_address_t *daddr,
1382 xfrm_address_t *saddr, u8 proto);
1383 extern int xfrm6_tunnel_register(struct xfrm6_tunnel *handler, unsigned short family);
1384 extern int xfrm6_tunnel_deregister(struct xfrm6_tunnel *handler, unsigned short family);
1385 extern __be32 xfrm6_tunnel_alloc_spi(xfrm_address_t *saddr);
1386 extern void xfrm6_tunnel_free_spi(xfrm_address_t *saddr);
1387 extern __be32 xfrm6_tunnel_spi_lookup(xfrm_address_t *saddr);
1388 extern int xfrm6_extract_output(struct xfrm_state *x, struct sk_buff *skb);
1389 extern int xfrm6_prepare_output(struct xfrm_state *x, struct sk_buff *skb);
1390 extern int xfrm6_output(struct sk_buff *skb);
1391 extern int xfrm6_find_1stfragopt(struct xfrm_state *x, struct sk_buff *skb,
1392 u8 **prevhdr);
1393
1394 #ifdef CONFIG_XFRM
1395 extern int xfrm4_udp_encap_rcv(struct sock *sk, struct sk_buff *skb);
1396 extern int xfrm_user_policy(struct sock *sk, int optname, u8 __user *optval, int optlen);
1397 #else
1398 static inline int xfrm_user_policy(struct sock *sk, int optname, u8 __user *optval, int optlen)
1399 {
1400 return -ENOPROTOOPT;
1401 }
1402
1403 static inline int xfrm4_udp_encap_rcv(struct sock *sk, struct sk_buff *skb)
1404 {
1405 /* should not happen */
1406 kfree_skb(skb);
1407 return 0;
1408 }
1409 #endif
1410
1411 struct xfrm_policy *xfrm_policy_alloc(gfp_t gfp);
1412
1413 static inline void xfrm_policy_walk_init(struct xfrm_policy_walk *walk, u8 type)
1414 {
1415 walk->cur_type = XFRM_POLICY_TYPE_MAIN;
1416 walk->type = type;
1417 walk->policy = NULL;
1418 walk->count = 0;
1419 }
1420
1421 static inline void xfrm_policy_walk_done(struct xfrm_policy_walk *walk)
1422 {
1423 if (walk->policy != NULL) {
1424 xfrm_pol_put(walk->policy);
1425 walk->policy = NULL;
1426 }
1427 }
1428
1429 extern int xfrm_policy_walk(struct xfrm_policy_walk *walk,
1430 int (*func)(struct xfrm_policy *, int, int, void*), void *);
1431 int xfrm_policy_insert(int dir, struct xfrm_policy *policy, int excl);
1432 struct xfrm_policy *xfrm_policy_bysel_ctx(u8 type, int dir,
1433 struct xfrm_selector *sel,
1434 struct xfrm_sec_ctx *ctx, int delete,
1435 int *err);
1436 struct xfrm_policy *xfrm_policy_byid(u8, int dir, u32 id, int delete, int *err);
1437 int xfrm_policy_flush(u8 type, struct xfrm_audit *audit_info);
1438 u32 xfrm_get_acqseq(void);
1439 extern int xfrm_alloc_spi(struct xfrm_state *x, u32 minspi, u32 maxspi);
1440 struct xfrm_state * xfrm_find_acq(u8 mode, u32 reqid, u8 proto,
1441 xfrm_address_t *daddr, xfrm_address_t *saddr,
1442 int create, unsigned short family);
1443 extern int xfrm_sk_policy_insert(struct sock *sk, int dir, struct xfrm_policy *pol);
1444 extern int xfrm_bundle_ok(struct xfrm_policy *pol, struct xfrm_dst *xdst,
1445 struct flowi *fl, int family, int strict);
1446
1447 #ifdef CONFIG_XFRM_MIGRATE
1448 extern int km_migrate(struct xfrm_selector *sel, u8 dir, u8 type,
1449 struct xfrm_migrate *m, int num_bundles);
1450 extern struct xfrm_state * xfrm_migrate_state_find(struct xfrm_migrate *m);
1451 extern struct xfrm_state * xfrm_state_migrate(struct xfrm_state *x,
1452 struct xfrm_migrate *m);
1453 extern int xfrm_migrate(struct xfrm_selector *sel, u8 dir, u8 type,
1454 struct xfrm_migrate *m, int num_bundles);
1455 #endif
1456
1457 extern wait_queue_head_t km_waitq;
1458 extern int km_new_mapping(struct xfrm_state *x, xfrm_address_t *ipaddr, __be16 sport);
1459 extern void km_policy_expired(struct xfrm_policy *pol, int dir, int hard, u32 pid);
1460 extern int km_report(u8 proto, struct xfrm_selector *sel, xfrm_address_t *addr);
1461
1462 extern void xfrm_input_init(void);
1463 extern int xfrm_parse_spi(struct sk_buff *skb, u8 nexthdr, __be32 *spi, __be32 *seq);
1464
1465 extern void xfrm_probe_algs(void);
1466 extern int xfrm_count_auth_supported(void);
1467 extern int xfrm_count_enc_supported(void);
1468 extern struct xfrm_algo_desc *xfrm_aalg_get_byidx(unsigned int idx);
1469 extern struct xfrm_algo_desc *xfrm_ealg_get_byidx(unsigned int idx);
1470 extern struct xfrm_algo_desc *xfrm_aalg_get_byid(int alg_id);
1471 extern struct xfrm_algo_desc *xfrm_ealg_get_byid(int alg_id);
1472 extern struct xfrm_algo_desc *xfrm_calg_get_byid(int alg_id);
1473 extern struct xfrm_algo_desc *xfrm_aalg_get_byname(char *name, int probe);
1474 extern struct xfrm_algo_desc *xfrm_ealg_get_byname(char *name, int probe);
1475 extern struct xfrm_algo_desc *xfrm_calg_get_byname(char *name, int probe);
1476 extern struct xfrm_algo_desc *xfrm_aead_get_byname(char *name, int icv_len,
1477 int probe);
1478
1479 struct hash_desc;
1480 struct scatterlist;
1481 typedef int (icv_update_fn_t)(struct hash_desc *, struct scatterlist *,
1482 unsigned int);
1483
1484 extern int skb_icv_walk(const struct sk_buff *skb, struct hash_desc *tfm,
1485 int offset, int len, icv_update_fn_t icv_update);
1486
1487 static inline int xfrm_addr_cmp(xfrm_address_t *a, xfrm_address_t *b,
1488 int family)
1489 {
1490 switch (family) {
1491 default:
1492 case AF_INET:
1493 return (__force __u32)a->a4 - (__force __u32)b->a4;
1494 case AF_INET6:
1495 return ipv6_addr_cmp((struct in6_addr *)a,
1496 (struct in6_addr *)b);
1497 }
1498 }
1499
1500 static inline int xfrm_policy_id2dir(u32 index)
1501 {
1502 return index & 7;
1503 }
1504
1505 static inline int xfrm_aevent_is_on(void)
1506 {
1507 struct sock *nlsk;
1508 int ret = 0;
1509
1510 rcu_read_lock();
1511 nlsk = rcu_dereference(xfrm_nl);
1512 if (nlsk)
1513 ret = netlink_has_listeners(nlsk, XFRMNLGRP_AEVENTS);
1514 rcu_read_unlock();
1515 return ret;
1516 }
1517
1518 static inline int xfrm_alg_len(struct xfrm_algo *alg)
1519 {
1520 return sizeof(*alg) + ((alg->alg_key_len + 7) / 8);
1521 }
1522
1523 #ifdef CONFIG_XFRM_MIGRATE
1524 static inline struct xfrm_algo *xfrm_algo_clone(struct xfrm_algo *orig)
1525 {
1526 return kmemdup(orig, xfrm_alg_len(orig), GFP_KERNEL);
1527 }
1528
1529 static inline void xfrm_states_put(struct xfrm_state **states, int n)
1530 {
1531 int i;
1532 for (i = 0; i < n; i++)
1533 xfrm_state_put(*(states + i));
1534 }
1535
1536 static inline void xfrm_states_delete(struct xfrm_state **states, int n)
1537 {
1538 int i;
1539 for (i = 0; i < n; i++)
1540 xfrm_state_delete(*(states + i));
1541 }
1542 #endif
1543
1544 static inline struct xfrm_state *xfrm_input_state(struct sk_buff *skb)
1545 {
1546 return skb->sp->xvec[skb->sp->len - 1];
1547 }
1548
1549 #endif /* _NET_XFRM_H */
This page took 0.088225 seconds and 5 git commands to generate.