9b6205665190c6a050265ec26d7f2321804d83d0
[deliverable/linux.git] / include / net / xfrm.h
1 #ifndef _NET_XFRM_H
2 #define _NET_XFRM_H
3
4 #include <linux/compiler.h>
5 #include <linux/xfrm.h>
6 #include <linux/spinlock.h>
7 #include <linux/list.h>
8 #include <linux/skbuff.h>
9 #include <linux/socket.h>
10 #include <linux/pfkeyv2.h>
11 #include <linux/ipsec.h>
12 #include <linux/in6.h>
13 #include <linux/mutex.h>
14 #include <linux/audit.h>
15
16 #include <net/sock.h>
17 #include <net/dst.h>
18 #include <net/ip.h>
19 #include <net/route.h>
20 #include <net/ipv6.h>
21 #include <net/ip6_fib.h>
22 #ifdef CONFIG_XFRM_STATISTICS
23 #include <net/snmp.h>
24 #endif
25
26 #define XFRM_PROTO_ESP 50
27 #define XFRM_PROTO_AH 51
28 #define XFRM_PROTO_COMP 108
29 #define XFRM_PROTO_IPIP 4
30 #define XFRM_PROTO_IPV6 41
31 #define XFRM_PROTO_ROUTING IPPROTO_ROUTING
32 #define XFRM_PROTO_DSTOPTS IPPROTO_DSTOPTS
33
34 #define XFRM_ALIGN8(len) (((len) + 7) & ~7)
35 #define MODULE_ALIAS_XFRM_MODE(family, encap) \
36 MODULE_ALIAS("xfrm-mode-" __stringify(family) "-" __stringify(encap))
37 #define MODULE_ALIAS_XFRM_TYPE(family, proto) \
38 MODULE_ALIAS("xfrm-type-" __stringify(family) "-" __stringify(proto))
39
40 #ifdef CONFIG_XFRM_STATISTICS
41 DECLARE_SNMP_STAT(struct linux_xfrm_mib, xfrm_statistics);
42 #define XFRM_INC_STATS(field) SNMP_INC_STATS(xfrm_statistics, field)
43 #define XFRM_INC_STATS_BH(field) SNMP_INC_STATS_BH(xfrm_statistics, field)
44 #define XFRM_INC_STATS_USER(field) SNMP_INC_STATS_USER(xfrm_statistics, field)
45 #else
46 #define XFRM_INC_STATS(field)
47 #define XFRM_INC_STATS_BH(field)
48 #define XFRM_INC_STATS_USER(field)
49 #endif
50
51 extern struct sock *xfrm_nl;
52 extern u32 sysctl_xfrm_aevent_etime;
53 extern u32 sysctl_xfrm_aevent_rseqth;
54 extern int sysctl_xfrm_larval_drop;
55 extern u32 sysctl_xfrm_acq_expires;
56
57 extern struct mutex xfrm_cfg_mutex;
58
59 /* Organization of SPD aka "XFRM rules"
60 ------------------------------------
61
62 Basic objects:
63 - policy rule, struct xfrm_policy (=SPD entry)
64 - bundle of transformations, struct dst_entry == struct xfrm_dst (=SA bundle)
65 - instance of a transformer, struct xfrm_state (=SA)
66 - template to clone xfrm_state, struct xfrm_tmpl
67
68 SPD is plain linear list of xfrm_policy rules, ordered by priority.
69 (To be compatible with existing pfkeyv2 implementations,
70 many rules with priority of 0x7fffffff are allowed to exist and
71 such rules are ordered in an unpredictable way, thanks to bsd folks.)
72
73 Lookup is plain linear search until the first match with selector.
74
75 If "action" is "block", then we prohibit the flow, otherwise:
76 if "xfrms_nr" is zero, the flow passes untransformed. Otherwise,
77 policy entry has list of up to XFRM_MAX_DEPTH transformations,
78 described by templates xfrm_tmpl. Each template is resolved
79 to a complete xfrm_state (see below) and we pack bundle of transformations
80 to a dst_entry returned to requestor.
81
82 dst -. xfrm .-> xfrm_state #1
83 |---. child .-> dst -. xfrm .-> xfrm_state #2
84 |---. child .-> dst -. xfrm .-> xfrm_state #3
85 |---. child .-> NULL
86
87 Bundles are cached at xrfm_policy struct (field ->bundles).
88
89
90 Resolution of xrfm_tmpl
91 -----------------------
92 Template contains:
93 1. ->mode Mode: transport or tunnel
94 2. ->id.proto Protocol: AH/ESP/IPCOMP
95 3. ->id.daddr Remote tunnel endpoint, ignored for transport mode.
96 Q: allow to resolve security gateway?
97 4. ->id.spi If not zero, static SPI.
98 5. ->saddr Local tunnel endpoint, ignored for transport mode.
99 6. ->algos List of allowed algos. Plain bitmask now.
100 Q: ealgos, aalgos, calgos. What a mess...
101 7. ->share Sharing mode.
102 Q: how to implement private sharing mode? To add struct sock* to
103 flow id?
104
105 Having this template we search through SAD searching for entries
106 with appropriate mode/proto/algo, permitted by selector.
107 If no appropriate entry found, it is requested from key manager.
108
109 PROBLEMS:
110 Q: How to find all the bundles referring to a physical path for
111 PMTU discovery? Seems, dst should contain list of all parents...
112 and enter to infinite locking hierarchy disaster.
113 No! It is easier, we will not search for them, let them find us.
114 We add genid to each dst plus pointer to genid of raw IP route,
115 pmtu disc will update pmtu on raw IP route and increase its genid.
116 dst_check() will see this for top level and trigger resyncing
117 metrics. Plus, it will be made via sk->sk_dst_cache. Solved.
118 */
119
120 /* Full description of state of transformer. */
121 struct xfrm_state
122 {
123 /* Note: bydst is re-used during gc */
124 struct list_head all;
125 struct hlist_node bydst;
126 struct hlist_node bysrc;
127 struct hlist_node byspi;
128
129 atomic_t refcnt;
130 spinlock_t lock;
131
132 struct xfrm_id id;
133 struct xfrm_selector sel;
134
135 u32 genid;
136
137 /* Key manger bits */
138 struct {
139 u8 state;
140 u8 dying;
141 u32 seq;
142 } km;
143
144 /* Parameters of this state. */
145 struct {
146 u32 reqid;
147 u8 mode;
148 u8 replay_window;
149 u8 aalgo, ealgo, calgo;
150 u8 flags;
151 u16 family;
152 xfrm_address_t saddr;
153 int header_len;
154 int trailer_len;
155 } props;
156
157 struct xfrm_lifetime_cfg lft;
158
159 /* Data for transformer */
160 struct xfrm_algo *aalg;
161 struct xfrm_algo *ealg;
162 struct xfrm_algo *calg;
163 struct xfrm_algo_aead *aead;
164
165 /* Data for encapsulator */
166 struct xfrm_encap_tmpl *encap;
167
168 /* Data for care-of address */
169 xfrm_address_t *coaddr;
170
171 /* IPComp needs an IPIP tunnel for handling uncompressed packets */
172 struct xfrm_state *tunnel;
173
174 /* If a tunnel, number of users + 1 */
175 atomic_t tunnel_users;
176
177 /* State for replay detection */
178 struct xfrm_replay_state replay;
179
180 /* Replay detection state at the time we sent the last notification */
181 struct xfrm_replay_state preplay;
182
183 /* internal flag that only holds state for delayed aevent at the
184 * moment
185 */
186 u32 xflags;
187
188 /* Replay detection notification settings */
189 u32 replay_maxage;
190 u32 replay_maxdiff;
191
192 /* Replay detection notification timer */
193 struct timer_list rtimer;
194
195 /* Statistics */
196 struct xfrm_stats stats;
197
198 struct xfrm_lifetime_cur curlft;
199 struct timer_list timer;
200
201 /* Last used time */
202 unsigned long lastused;
203
204 /* Reference to data common to all the instances of this
205 * transformer. */
206 const struct xfrm_type *type;
207 struct xfrm_mode *inner_mode;
208 struct xfrm_mode *outer_mode;
209
210 /* Security context */
211 struct xfrm_sec_ctx *security;
212
213 /* Private data of this transformer, format is opaque,
214 * interpreted by xfrm_type methods. */
215 void *data;
216 };
217
218 /* xflags - make enum if more show up */
219 #define XFRM_TIME_DEFER 1
220
221 enum {
222 XFRM_STATE_VOID,
223 XFRM_STATE_ACQ,
224 XFRM_STATE_VALID,
225 XFRM_STATE_ERROR,
226 XFRM_STATE_EXPIRED,
227 XFRM_STATE_DEAD
228 };
229
230 /* callback structure passed from either netlink or pfkey */
231 struct km_event
232 {
233 union {
234 u32 hard;
235 u32 proto;
236 u32 byid;
237 u32 aevent;
238 u32 type;
239 } data;
240
241 u32 seq;
242 u32 pid;
243 u32 event;
244 };
245
246 struct net_device;
247 struct xfrm_type;
248 struct xfrm_dst;
249 struct xfrm_policy_afinfo {
250 unsigned short family;
251 struct dst_ops *dst_ops;
252 void (*garbage_collect)(void);
253 struct dst_entry *(*dst_lookup)(int tos, xfrm_address_t *saddr,
254 xfrm_address_t *daddr);
255 int (*get_saddr)(xfrm_address_t *saddr, xfrm_address_t *daddr);
256 struct dst_entry *(*find_bundle)(struct flowi *fl, struct xfrm_policy *policy);
257 void (*decode_session)(struct sk_buff *skb,
258 struct flowi *fl,
259 int reverse);
260 int (*get_tos)(struct flowi *fl);
261 int (*init_path)(struct xfrm_dst *path,
262 struct dst_entry *dst,
263 int nfheader_len);
264 int (*fill_dst)(struct xfrm_dst *xdst,
265 struct net_device *dev);
266 };
267
268 extern int xfrm_policy_register_afinfo(struct xfrm_policy_afinfo *afinfo);
269 extern int xfrm_policy_unregister_afinfo(struct xfrm_policy_afinfo *afinfo);
270 extern void km_policy_notify(struct xfrm_policy *xp, int dir, struct km_event *c);
271 extern void km_state_notify(struct xfrm_state *x, struct km_event *c);
272
273 struct xfrm_tmpl;
274 extern int km_query(struct xfrm_state *x, struct xfrm_tmpl *t, struct xfrm_policy *pol);
275 extern void km_state_expired(struct xfrm_state *x, int hard, u32 pid);
276 extern int __xfrm_state_delete(struct xfrm_state *x);
277
278 struct xfrm_state_afinfo {
279 unsigned int family;
280 unsigned int proto;
281 unsigned int eth_proto;
282 struct module *owner;
283 const struct xfrm_type *type_map[IPPROTO_MAX];
284 struct xfrm_mode *mode_map[XFRM_MODE_MAX];
285 int (*init_flags)(struct xfrm_state *x);
286 void (*init_tempsel)(struct xfrm_state *x, struct flowi *fl,
287 struct xfrm_tmpl *tmpl,
288 xfrm_address_t *daddr, xfrm_address_t *saddr);
289 int (*tmpl_sort)(struct xfrm_tmpl **dst, struct xfrm_tmpl **src, int n);
290 int (*state_sort)(struct xfrm_state **dst, struct xfrm_state **src, int n);
291 int (*output)(struct sk_buff *skb);
292 int (*extract_input)(struct xfrm_state *x,
293 struct sk_buff *skb);
294 int (*extract_output)(struct xfrm_state *x,
295 struct sk_buff *skb);
296 int (*transport_finish)(struct sk_buff *skb,
297 int async);
298 };
299
300 extern int xfrm_state_register_afinfo(struct xfrm_state_afinfo *afinfo);
301 extern int xfrm_state_unregister_afinfo(struct xfrm_state_afinfo *afinfo);
302
303 extern void xfrm_state_delete_tunnel(struct xfrm_state *x);
304
305 struct xfrm_type
306 {
307 char *description;
308 struct module *owner;
309 __u8 proto;
310 __u8 flags;
311 #define XFRM_TYPE_NON_FRAGMENT 1
312 #define XFRM_TYPE_REPLAY_PROT 2
313 #define XFRM_TYPE_LOCAL_COADDR 4
314 #define XFRM_TYPE_REMOTE_COADDR 8
315
316 int (*init_state)(struct xfrm_state *x);
317 void (*destructor)(struct xfrm_state *);
318 int (*input)(struct xfrm_state *, struct sk_buff *skb);
319 int (*output)(struct xfrm_state *, struct sk_buff *pskb);
320 int (*reject)(struct xfrm_state *, struct sk_buff *, struct flowi *);
321 int (*hdr_offset)(struct xfrm_state *, struct sk_buff *, u8 **);
322 /* Estimate maximal size of result of transformation of a dgram */
323 u32 (*get_mtu)(struct xfrm_state *, int size);
324 };
325
326 extern int xfrm_register_type(const struct xfrm_type *type, unsigned short family);
327 extern int xfrm_unregister_type(const struct xfrm_type *type, unsigned short family);
328
329 struct xfrm_mode {
330 /*
331 * Remove encapsulation header.
332 *
333 * The IP header will be moved over the top of the encapsulation
334 * header.
335 *
336 * On entry, the transport header shall point to where the IP header
337 * should be and the network header shall be set to where the IP
338 * header currently is. skb->data shall point to the start of the
339 * payload.
340 */
341 int (*input2)(struct xfrm_state *x, struct sk_buff *skb);
342
343 /*
344 * This is the actual input entry point.
345 *
346 * For transport mode and equivalent this would be identical to
347 * input2 (which does not need to be set). While tunnel mode
348 * and equivalent would set this to the tunnel encapsulation function
349 * xfrm4_prepare_input that would in turn call input2.
350 */
351 int (*input)(struct xfrm_state *x, struct sk_buff *skb);
352
353 /*
354 * Add encapsulation header.
355 *
356 * On exit, the transport header will be set to the start of the
357 * encapsulation header to be filled in by x->type->output and
358 * the mac header will be set to the nextheader (protocol for
359 * IPv4) field of the extension header directly preceding the
360 * encapsulation header, or in its absence, that of the top IP
361 * header. The value of the network header will always point
362 * to the top IP header while skb->data will point to the payload.
363 */
364 int (*output2)(struct xfrm_state *x,struct sk_buff *skb);
365
366 /*
367 * This is the actual output entry point.
368 *
369 * For transport mode and equivalent this would be identical to
370 * output2 (which does not need to be set). While tunnel mode
371 * and equivalent would set this to a tunnel encapsulation function
372 * (xfrm4_prepare_output or xfrm6_prepare_output) that would in turn
373 * call output2.
374 */
375 int (*output)(struct xfrm_state *x, struct sk_buff *skb);
376
377 struct xfrm_state_afinfo *afinfo;
378 struct module *owner;
379 unsigned int encap;
380 int flags;
381 };
382
383 /* Flags for xfrm_mode. */
384 enum {
385 XFRM_MODE_FLAG_TUNNEL = 1,
386 };
387
388 extern int xfrm_register_mode(struct xfrm_mode *mode, int family);
389 extern int xfrm_unregister_mode(struct xfrm_mode *mode, int family);
390
391 struct xfrm_tmpl
392 {
393 /* id in template is interpreted as:
394 * daddr - destination of tunnel, may be zero for transport mode.
395 * spi - zero to acquire spi. Not zero if spi is static, then
396 * daddr must be fixed too.
397 * proto - AH/ESP/IPCOMP
398 */
399 struct xfrm_id id;
400
401 /* Source address of tunnel. Ignored, if it is not a tunnel. */
402 xfrm_address_t saddr;
403
404 unsigned short encap_family;
405
406 __u32 reqid;
407
408 /* Mode: transport, tunnel etc. */
409 __u8 mode;
410
411 /* Sharing mode: unique, this session only, this user only etc. */
412 __u8 share;
413
414 /* May skip this transfomration if no SA is found */
415 __u8 optional;
416
417 /* Bit mask of algos allowed for acquisition */
418 __u32 aalgos;
419 __u32 ealgos;
420 __u32 calgos;
421 };
422
423 #define XFRM_MAX_DEPTH 6
424
425 struct xfrm_policy
426 {
427 struct xfrm_policy *next;
428 struct list_head bytype;
429 struct hlist_node bydst;
430 struct hlist_node byidx;
431
432 /* This lock only affects elements except for entry. */
433 rwlock_t lock;
434 atomic_t refcnt;
435 struct timer_list timer;
436
437 u32 priority;
438 u32 index;
439 struct xfrm_selector selector;
440 struct xfrm_lifetime_cfg lft;
441 struct xfrm_lifetime_cur curlft;
442 struct dst_entry *bundles;
443 u16 family;
444 u8 type;
445 u8 action;
446 u8 flags;
447 u8 dead;
448 u8 xfrm_nr;
449 /* XXX 1 byte hole, try to pack */
450 struct xfrm_sec_ctx *security;
451 struct xfrm_tmpl xfrm_vec[XFRM_MAX_DEPTH];
452 };
453
454 struct xfrm_migrate {
455 xfrm_address_t old_daddr;
456 xfrm_address_t old_saddr;
457 xfrm_address_t new_daddr;
458 xfrm_address_t new_saddr;
459 u8 proto;
460 u8 mode;
461 u16 reserved;
462 u32 reqid;
463 u16 old_family;
464 u16 new_family;
465 };
466
467 #define XFRM_KM_TIMEOUT 30
468 /* which seqno */
469 #define XFRM_REPLAY_SEQ 1
470 #define XFRM_REPLAY_OSEQ 2
471 #define XFRM_REPLAY_SEQ_MASK 3
472 /* what happened */
473 #define XFRM_REPLAY_UPDATE XFRM_AE_CR
474 #define XFRM_REPLAY_TIMEOUT XFRM_AE_CE
475
476 /* default aevent timeout in units of 100ms */
477 #define XFRM_AE_ETIME 10
478 /* Async Event timer multiplier */
479 #define XFRM_AE_ETH_M 10
480 /* default seq threshold size */
481 #define XFRM_AE_SEQT_SIZE 2
482
483 struct xfrm_mgr
484 {
485 struct list_head list;
486 char *id;
487 int (*notify)(struct xfrm_state *x, struct km_event *c);
488 int (*acquire)(struct xfrm_state *x, struct xfrm_tmpl *, struct xfrm_policy *xp, int dir);
489 struct xfrm_policy *(*compile_policy)(struct sock *sk, int opt, u8 *data, int len, int *dir);
490 int (*new_mapping)(struct xfrm_state *x, xfrm_address_t *ipaddr, __be16 sport);
491 int (*notify_policy)(struct xfrm_policy *x, int dir, struct km_event *c);
492 int (*report)(u8 proto, struct xfrm_selector *sel, xfrm_address_t *addr);
493 int (*migrate)(struct xfrm_selector *sel, u8 dir, u8 type, struct xfrm_migrate *m, int num_bundles);
494 };
495
496 extern int xfrm_register_km(struct xfrm_mgr *km);
497 extern int xfrm_unregister_km(struct xfrm_mgr *km);
498
499 extern unsigned int xfrm_policy_count[XFRM_POLICY_MAX*2];
500
501 /*
502 * This structure is used for the duration where packets are being
503 * transformed by IPsec. As soon as the packet leaves IPsec the
504 * area beyond the generic IP part may be overwritten.
505 */
506 struct xfrm_skb_cb {
507 union {
508 struct inet_skb_parm h4;
509 struct inet6_skb_parm h6;
510 } header;
511
512 /* Sequence number for replay protection. */
513 union {
514 u64 output;
515 __be32 input;
516 } seq;
517 };
518
519 #define XFRM_SKB_CB(__skb) ((struct xfrm_skb_cb *)&((__skb)->cb[0]))
520
521 /*
522 * This structure is used by the afinfo prepare_input/prepare_output functions
523 * to transmit header information to the mode input/output functions.
524 */
525 struct xfrm_mode_skb_cb {
526 union {
527 struct inet_skb_parm h4;
528 struct inet6_skb_parm h6;
529 } header;
530
531 /* Copied from header for IPv4, always set to zero and DF for IPv6. */
532 __be16 id;
533 __be16 frag_off;
534
535 /* TOS for IPv4, class for IPv6. */
536 u8 tos;
537
538 /* TTL for IPv4, hop limitfor IPv6. */
539 u8 ttl;
540
541 /* Protocol for IPv4, NH for IPv6. */
542 u8 protocol;
543
544 /* Used by IPv6 only, zero for IPv4. */
545 u8 flow_lbl[3];
546 };
547
548 #define XFRM_MODE_SKB_CB(__skb) ((struct xfrm_mode_skb_cb *)&((__skb)->cb[0]))
549
550 /*
551 * This structure is used by the input processing to locate the SPI and
552 * related information.
553 */
554 struct xfrm_spi_skb_cb {
555 union {
556 struct inet_skb_parm h4;
557 struct inet6_skb_parm h6;
558 } header;
559
560 unsigned int daddroff;
561 unsigned int family;
562 };
563
564 #define XFRM_SPI_SKB_CB(__skb) ((struct xfrm_spi_skb_cb *)&((__skb)->cb[0]))
565
566 /* Audit Information */
567 struct xfrm_audit
568 {
569 u32 loginuid;
570 u32 secid;
571 };
572
573 #ifdef CONFIG_AUDITSYSCALL
574 static inline struct audit_buffer *xfrm_audit_start(const char *op)
575 {
576 struct audit_buffer *audit_buf = NULL;
577
578 if (audit_enabled == 0)
579 return NULL;
580 audit_buf = audit_log_start(current->audit_context, GFP_ATOMIC,
581 AUDIT_MAC_IPSEC_EVENT);
582 if (audit_buf == NULL)
583 return NULL;
584 audit_log_format(audit_buf, "op=%s", op);
585 return audit_buf;
586 }
587
588 static inline void xfrm_audit_helper_usrinfo(u32 auid, u32 secid,
589 struct audit_buffer *audit_buf)
590 {
591 char *secctx;
592 u32 secctx_len;
593
594 audit_log_format(audit_buf, " auid=%u", auid);
595 if (secid != 0 &&
596 security_secid_to_secctx(secid, &secctx, &secctx_len) == 0) {
597 audit_log_format(audit_buf, " subj=%s", secctx);
598 security_release_secctx(secctx, secctx_len);
599 } else
600 audit_log_task_context(audit_buf);
601 }
602
603 extern void xfrm_audit_policy_add(struct xfrm_policy *xp, int result,
604 u32 auid, u32 secid);
605 extern void xfrm_audit_policy_delete(struct xfrm_policy *xp, int result,
606 u32 auid, u32 secid);
607 extern void xfrm_audit_state_add(struct xfrm_state *x, int result,
608 u32 auid, u32 secid);
609 extern void xfrm_audit_state_delete(struct xfrm_state *x, int result,
610 u32 auid, u32 secid);
611 extern void xfrm_audit_state_replay_overflow(struct xfrm_state *x,
612 struct sk_buff *skb);
613 extern void xfrm_audit_state_notfound_simple(struct sk_buff *skb, u16 family);
614 extern void xfrm_audit_state_notfound(struct sk_buff *skb, u16 family,
615 __be32 net_spi, __be32 net_seq);
616 extern void xfrm_audit_state_icvfail(struct xfrm_state *x,
617 struct sk_buff *skb, u8 proto);
618 #else
619 #define xfrm_audit_policy_add(x, r, a, s) do { ; } while (0)
620 #define xfrm_audit_policy_delete(x, r, a, s) do { ; } while (0)
621 #define xfrm_audit_state_add(x, r, a, s) do { ; } while (0)
622 #define xfrm_audit_state_delete(x, r, a, s) do { ; } while (0)
623 #define xfrm_audit_state_replay_overflow(x, s) do { ; } while (0)
624 #define xfrm_audit_state_notfound_simple(s, f) do { ; } while (0)
625 #define xfrm_audit_state_notfound(s, f, sp, sq) do { ; } while (0)
626 #define xfrm_audit_state_icvfail(x, s, p) do { ; } while (0)
627 #endif /* CONFIG_AUDITSYSCALL */
628
629 static inline void xfrm_pol_hold(struct xfrm_policy *policy)
630 {
631 if (likely(policy != NULL))
632 atomic_inc(&policy->refcnt);
633 }
634
635 extern void xfrm_policy_destroy(struct xfrm_policy *policy);
636
637 static inline void xfrm_pol_put(struct xfrm_policy *policy)
638 {
639 if (atomic_dec_and_test(&policy->refcnt))
640 xfrm_policy_destroy(policy);
641 }
642
643 #ifdef CONFIG_XFRM_SUB_POLICY
644 static inline void xfrm_pols_put(struct xfrm_policy **pols, int npols)
645 {
646 int i;
647 for (i = npols - 1; i >= 0; --i)
648 xfrm_pol_put(pols[i]);
649 }
650 #else
651 static inline void xfrm_pols_put(struct xfrm_policy **pols, int npols)
652 {
653 xfrm_pol_put(pols[0]);
654 }
655 #endif
656
657 extern void __xfrm_state_destroy(struct xfrm_state *);
658
659 static inline void __xfrm_state_put(struct xfrm_state *x)
660 {
661 atomic_dec(&x->refcnt);
662 }
663
664 static inline void xfrm_state_put(struct xfrm_state *x)
665 {
666 if (atomic_dec_and_test(&x->refcnt))
667 __xfrm_state_destroy(x);
668 }
669
670 static inline void xfrm_state_hold(struct xfrm_state *x)
671 {
672 atomic_inc(&x->refcnt);
673 }
674
675 static __inline__ int addr_match(void *token1, void *token2, int prefixlen)
676 {
677 __be32 *a1 = token1;
678 __be32 *a2 = token2;
679 int pdw;
680 int pbi;
681
682 pdw = prefixlen >> 5; /* num of whole __u32 in prefix */
683 pbi = prefixlen & 0x1f; /* num of bits in incomplete u32 in prefix */
684
685 if (pdw)
686 if (memcmp(a1, a2, pdw << 2))
687 return 0;
688
689 if (pbi) {
690 __be32 mask;
691
692 mask = htonl((0xffffffff) << (32 - pbi));
693
694 if ((a1[pdw] ^ a2[pdw]) & mask)
695 return 0;
696 }
697
698 return 1;
699 }
700
701 static __inline__
702 __be16 xfrm_flowi_sport(struct flowi *fl)
703 {
704 __be16 port;
705 switch(fl->proto) {
706 case IPPROTO_TCP:
707 case IPPROTO_UDP:
708 case IPPROTO_UDPLITE:
709 case IPPROTO_SCTP:
710 port = fl->fl_ip_sport;
711 break;
712 case IPPROTO_ICMP:
713 case IPPROTO_ICMPV6:
714 port = htons(fl->fl_icmp_type);
715 break;
716 case IPPROTO_MH:
717 port = htons(fl->fl_mh_type);
718 break;
719 default:
720 port = 0; /*XXX*/
721 }
722 return port;
723 }
724
725 static __inline__
726 __be16 xfrm_flowi_dport(struct flowi *fl)
727 {
728 __be16 port;
729 switch(fl->proto) {
730 case IPPROTO_TCP:
731 case IPPROTO_UDP:
732 case IPPROTO_UDPLITE:
733 case IPPROTO_SCTP:
734 port = fl->fl_ip_dport;
735 break;
736 case IPPROTO_ICMP:
737 case IPPROTO_ICMPV6:
738 port = htons(fl->fl_icmp_code);
739 break;
740 default:
741 port = 0; /*XXX*/
742 }
743 return port;
744 }
745
746 extern int xfrm_selector_match(struct xfrm_selector *sel, struct flowi *fl,
747 unsigned short family);
748
749 #ifdef CONFIG_SECURITY_NETWORK_XFRM
750 /* If neither has a context --> match
751 * Otherwise, both must have a context and the sids, doi, alg must match
752 */
753 static inline int xfrm_sec_ctx_match(struct xfrm_sec_ctx *s1, struct xfrm_sec_ctx *s2)
754 {
755 return ((!s1 && !s2) ||
756 (s1 && s2 &&
757 (s1->ctx_sid == s2->ctx_sid) &&
758 (s1->ctx_doi == s2->ctx_doi) &&
759 (s1->ctx_alg == s2->ctx_alg)));
760 }
761 #else
762 static inline int xfrm_sec_ctx_match(struct xfrm_sec_ctx *s1, struct xfrm_sec_ctx *s2)
763 {
764 return 1;
765 }
766 #endif
767
768 /* A struct encoding bundle of transformations to apply to some set of flow.
769 *
770 * dst->child points to the next element of bundle.
771 * dst->xfrm points to an instanse of transformer.
772 *
773 * Due to unfortunate limitations of current routing cache, which we
774 * have no time to fix, it mirrors struct rtable and bound to the same
775 * routing key, including saddr,daddr. However, we can have many of
776 * bundles differing by session id. All the bundles grow from a parent
777 * policy rule.
778 */
779 struct xfrm_dst
780 {
781 union {
782 struct dst_entry dst;
783 struct rtable rt;
784 struct rt6_info rt6;
785 } u;
786 struct dst_entry *route;
787 #ifdef CONFIG_XFRM_SUB_POLICY
788 struct flowi *origin;
789 struct xfrm_selector *partner;
790 #endif
791 u32 genid;
792 u32 route_mtu_cached;
793 u32 child_mtu_cached;
794 u32 route_cookie;
795 u32 path_cookie;
796 };
797
798 static inline void xfrm_dst_destroy(struct xfrm_dst *xdst)
799 {
800 dst_release(xdst->route);
801 if (likely(xdst->u.dst.xfrm))
802 xfrm_state_put(xdst->u.dst.xfrm);
803 #ifdef CONFIG_XFRM_SUB_POLICY
804 kfree(xdst->origin);
805 xdst->origin = NULL;
806 kfree(xdst->partner);
807 xdst->partner = NULL;
808 #endif
809 }
810
811 extern void xfrm_dst_ifdown(struct dst_entry *dst, struct net_device *dev);
812
813 struct sec_path
814 {
815 atomic_t refcnt;
816 int len;
817 struct xfrm_state *xvec[XFRM_MAX_DEPTH];
818 };
819
820 static inline struct sec_path *
821 secpath_get(struct sec_path *sp)
822 {
823 if (sp)
824 atomic_inc(&sp->refcnt);
825 return sp;
826 }
827
828 extern void __secpath_destroy(struct sec_path *sp);
829
830 static inline void
831 secpath_put(struct sec_path *sp)
832 {
833 if (sp && atomic_dec_and_test(&sp->refcnt))
834 __secpath_destroy(sp);
835 }
836
837 extern struct sec_path *secpath_dup(struct sec_path *src);
838
839 static inline void
840 secpath_reset(struct sk_buff *skb)
841 {
842 #ifdef CONFIG_XFRM
843 secpath_put(skb->sp);
844 skb->sp = NULL;
845 #endif
846 }
847
848 static inline int
849 xfrm_addr_any(xfrm_address_t *addr, unsigned short family)
850 {
851 switch (family) {
852 case AF_INET:
853 return addr->a4 == 0;
854 case AF_INET6:
855 return ipv6_addr_any((struct in6_addr *)&addr->a6);
856 }
857 return 0;
858 }
859
860 static inline int
861 __xfrm4_state_addr_cmp(struct xfrm_tmpl *tmpl, struct xfrm_state *x)
862 {
863 return (tmpl->saddr.a4 &&
864 tmpl->saddr.a4 != x->props.saddr.a4);
865 }
866
867 static inline int
868 __xfrm6_state_addr_cmp(struct xfrm_tmpl *tmpl, struct xfrm_state *x)
869 {
870 return (!ipv6_addr_any((struct in6_addr*)&tmpl->saddr) &&
871 ipv6_addr_cmp((struct in6_addr *)&tmpl->saddr, (struct in6_addr*)&x->props.saddr));
872 }
873
874 static inline int
875 xfrm_state_addr_cmp(struct xfrm_tmpl *tmpl, struct xfrm_state *x, unsigned short family)
876 {
877 switch (family) {
878 case AF_INET:
879 return __xfrm4_state_addr_cmp(tmpl, x);
880 case AF_INET6:
881 return __xfrm6_state_addr_cmp(tmpl, x);
882 }
883 return !0;
884 }
885
886 #ifdef CONFIG_XFRM
887 extern int __xfrm_policy_check(struct sock *, int dir, struct sk_buff *skb, unsigned short family);
888
889 static inline int __xfrm_policy_check2(struct sock *sk, int dir,
890 struct sk_buff *skb,
891 unsigned int family, int reverse)
892 {
893 int ndir = dir | (reverse ? XFRM_POLICY_MASK + 1 : 0);
894
895 if (sk && sk->sk_policy[XFRM_POLICY_IN])
896 return __xfrm_policy_check(sk, ndir, skb, family);
897
898 return (!xfrm_policy_count[dir] && !skb->sp) ||
899 (skb->dst->flags & DST_NOPOLICY) ||
900 __xfrm_policy_check(sk, ndir, skb, family);
901 }
902
903 static inline int xfrm_policy_check(struct sock *sk, int dir, struct sk_buff *skb, unsigned short family)
904 {
905 return __xfrm_policy_check2(sk, dir, skb, family, 0);
906 }
907
908 static inline int xfrm4_policy_check(struct sock *sk, int dir, struct sk_buff *skb)
909 {
910 return xfrm_policy_check(sk, dir, skb, AF_INET);
911 }
912
913 static inline int xfrm6_policy_check(struct sock *sk, int dir, struct sk_buff *skb)
914 {
915 return xfrm_policy_check(sk, dir, skb, AF_INET6);
916 }
917
918 static inline int xfrm4_policy_check_reverse(struct sock *sk, int dir,
919 struct sk_buff *skb)
920 {
921 return __xfrm_policy_check2(sk, dir, skb, AF_INET, 1);
922 }
923
924 static inline int xfrm6_policy_check_reverse(struct sock *sk, int dir,
925 struct sk_buff *skb)
926 {
927 return __xfrm_policy_check2(sk, dir, skb, AF_INET6, 1);
928 }
929
930 extern int __xfrm_decode_session(struct sk_buff *skb, struct flowi *fl,
931 unsigned int family, int reverse);
932
933 static inline int xfrm_decode_session(struct sk_buff *skb, struct flowi *fl,
934 unsigned int family)
935 {
936 return __xfrm_decode_session(skb, fl, family, 0);
937 }
938
939 static inline int xfrm_decode_session_reverse(struct sk_buff *skb,
940 struct flowi *fl,
941 unsigned int family)
942 {
943 return __xfrm_decode_session(skb, fl, family, 1);
944 }
945
946 extern int __xfrm_route_forward(struct sk_buff *skb, unsigned short family);
947
948 static inline int xfrm_route_forward(struct sk_buff *skb, unsigned short family)
949 {
950 return !xfrm_policy_count[XFRM_POLICY_OUT] ||
951 (skb->dst->flags & DST_NOXFRM) ||
952 __xfrm_route_forward(skb, family);
953 }
954
955 static inline int xfrm4_route_forward(struct sk_buff *skb)
956 {
957 return xfrm_route_forward(skb, AF_INET);
958 }
959
960 static inline int xfrm6_route_forward(struct sk_buff *skb)
961 {
962 return xfrm_route_forward(skb, AF_INET6);
963 }
964
965 extern int __xfrm_sk_clone_policy(struct sock *sk);
966
967 static inline int xfrm_sk_clone_policy(struct sock *sk)
968 {
969 if (unlikely(sk->sk_policy[0] || sk->sk_policy[1]))
970 return __xfrm_sk_clone_policy(sk);
971 return 0;
972 }
973
974 extern int xfrm_policy_delete(struct xfrm_policy *pol, int dir);
975
976 static inline void xfrm_sk_free_policy(struct sock *sk)
977 {
978 if (unlikely(sk->sk_policy[0] != NULL)) {
979 xfrm_policy_delete(sk->sk_policy[0], XFRM_POLICY_MAX);
980 sk->sk_policy[0] = NULL;
981 }
982 if (unlikely(sk->sk_policy[1] != NULL)) {
983 xfrm_policy_delete(sk->sk_policy[1], XFRM_POLICY_MAX+1);
984 sk->sk_policy[1] = NULL;
985 }
986 }
987
988 #else
989
990 static inline void xfrm_sk_free_policy(struct sock *sk) {}
991 static inline int xfrm_sk_clone_policy(struct sock *sk) { return 0; }
992 static inline int xfrm6_route_forward(struct sk_buff *skb) { return 1; }
993 static inline int xfrm4_route_forward(struct sk_buff *skb) { return 1; }
994 static inline int xfrm6_policy_check(struct sock *sk, int dir, struct sk_buff *skb)
995 {
996 return 1;
997 }
998 static inline int xfrm4_policy_check(struct sock *sk, int dir, struct sk_buff *skb)
999 {
1000 return 1;
1001 }
1002 static inline int xfrm_policy_check(struct sock *sk, int dir, struct sk_buff *skb, unsigned short family)
1003 {
1004 return 1;
1005 }
1006 static inline int xfrm_decode_session_reverse(struct sk_buff *skb,
1007 struct flowi *fl,
1008 unsigned int family)
1009 {
1010 return -ENOSYS;
1011 }
1012 static inline int xfrm4_policy_check_reverse(struct sock *sk, int dir,
1013 struct sk_buff *skb)
1014 {
1015 return 1;
1016 }
1017 static inline int xfrm6_policy_check_reverse(struct sock *sk, int dir,
1018 struct sk_buff *skb)
1019 {
1020 return 1;
1021 }
1022 #endif
1023
1024 static __inline__
1025 xfrm_address_t *xfrm_flowi_daddr(struct flowi *fl, unsigned short family)
1026 {
1027 switch (family){
1028 case AF_INET:
1029 return (xfrm_address_t *)&fl->fl4_dst;
1030 case AF_INET6:
1031 return (xfrm_address_t *)&fl->fl6_dst;
1032 }
1033 return NULL;
1034 }
1035
1036 static __inline__
1037 xfrm_address_t *xfrm_flowi_saddr(struct flowi *fl, unsigned short family)
1038 {
1039 switch (family){
1040 case AF_INET:
1041 return (xfrm_address_t *)&fl->fl4_src;
1042 case AF_INET6:
1043 return (xfrm_address_t *)&fl->fl6_src;
1044 }
1045 return NULL;
1046 }
1047
1048 static __inline__ int
1049 __xfrm4_state_addr_check(struct xfrm_state *x,
1050 xfrm_address_t *daddr, xfrm_address_t *saddr)
1051 {
1052 if (daddr->a4 == x->id.daddr.a4 &&
1053 (saddr->a4 == x->props.saddr.a4 || !saddr->a4 || !x->props.saddr.a4))
1054 return 1;
1055 return 0;
1056 }
1057
1058 static __inline__ int
1059 __xfrm6_state_addr_check(struct xfrm_state *x,
1060 xfrm_address_t *daddr, xfrm_address_t *saddr)
1061 {
1062 if (!ipv6_addr_cmp((struct in6_addr *)daddr, (struct in6_addr *)&x->id.daddr) &&
1063 (!ipv6_addr_cmp((struct in6_addr *)saddr, (struct in6_addr *)&x->props.saddr)||
1064 ipv6_addr_any((struct in6_addr *)saddr) ||
1065 ipv6_addr_any((struct in6_addr *)&x->props.saddr)))
1066 return 1;
1067 return 0;
1068 }
1069
1070 static __inline__ int
1071 xfrm_state_addr_check(struct xfrm_state *x,
1072 xfrm_address_t *daddr, xfrm_address_t *saddr,
1073 unsigned short family)
1074 {
1075 switch (family) {
1076 case AF_INET:
1077 return __xfrm4_state_addr_check(x, daddr, saddr);
1078 case AF_INET6:
1079 return __xfrm6_state_addr_check(x, daddr, saddr);
1080 }
1081 return 0;
1082 }
1083
1084 static __inline__ int
1085 xfrm_state_addr_flow_check(struct xfrm_state *x, struct flowi *fl,
1086 unsigned short family)
1087 {
1088 switch (family) {
1089 case AF_INET:
1090 return __xfrm4_state_addr_check(x,
1091 (xfrm_address_t *)&fl->fl4_dst,
1092 (xfrm_address_t *)&fl->fl4_src);
1093 case AF_INET6:
1094 return __xfrm6_state_addr_check(x,
1095 (xfrm_address_t *)&fl->fl6_dst,
1096 (xfrm_address_t *)&fl->fl6_src);
1097 }
1098 return 0;
1099 }
1100
1101 static inline int xfrm_state_kern(struct xfrm_state *x)
1102 {
1103 return atomic_read(&x->tunnel_users);
1104 }
1105
1106 static inline int xfrm_id_proto_match(u8 proto, u8 userproto)
1107 {
1108 return (!userproto || proto == userproto ||
1109 (userproto == IPSEC_PROTO_ANY && (proto == IPPROTO_AH ||
1110 proto == IPPROTO_ESP ||
1111 proto == IPPROTO_COMP)));
1112 }
1113
1114 /*
1115 * xfrm algorithm information
1116 */
1117 struct xfrm_algo_aead_info {
1118 u16 icv_truncbits;
1119 };
1120
1121 struct xfrm_algo_auth_info {
1122 u16 icv_truncbits;
1123 u16 icv_fullbits;
1124 };
1125
1126 struct xfrm_algo_encr_info {
1127 u16 blockbits;
1128 u16 defkeybits;
1129 };
1130
1131 struct xfrm_algo_comp_info {
1132 u16 threshold;
1133 };
1134
1135 struct xfrm_algo_desc {
1136 char *name;
1137 char *compat;
1138 u8 available:1;
1139 union {
1140 struct xfrm_algo_aead_info aead;
1141 struct xfrm_algo_auth_info auth;
1142 struct xfrm_algo_encr_info encr;
1143 struct xfrm_algo_comp_info comp;
1144 } uinfo;
1145 struct sadb_alg desc;
1146 };
1147
1148 /* XFRM tunnel handlers. */
1149 struct xfrm_tunnel {
1150 int (*handler)(struct sk_buff *skb);
1151 int (*err_handler)(struct sk_buff *skb, __u32 info);
1152
1153 struct xfrm_tunnel *next;
1154 int priority;
1155 };
1156
1157 struct xfrm6_tunnel {
1158 int (*handler)(struct sk_buff *skb);
1159 int (*err_handler)(struct sk_buff *skb, struct inet6_skb_parm *opt,
1160 int type, int code, int offset, __be32 info);
1161 struct xfrm6_tunnel *next;
1162 int priority;
1163 };
1164
1165 struct xfrm_state_walk {
1166 struct xfrm_state *state;
1167 int count;
1168 u8 proto;
1169 };
1170
1171 struct xfrm_policy_walk {
1172 struct xfrm_policy *policy;
1173 int count;
1174 u8 type, cur_type;
1175 };
1176
1177 extern void xfrm_init(void);
1178 extern void xfrm4_init(void);
1179 extern void xfrm_state_init(void);
1180 extern void xfrm4_state_init(void);
1181 #ifdef CONFIG_XFRM
1182 extern int xfrm6_init(void);
1183 extern void xfrm6_fini(void);
1184 extern int xfrm6_state_init(void);
1185 extern void xfrm6_state_fini(void);
1186 #else
1187 static inline int xfrm6_init(void)
1188 {
1189 return 0;
1190 }
1191 static inline void xfrm6_fini(void)
1192 {
1193 ;
1194 }
1195 #endif
1196
1197 #ifdef CONFIG_XFRM_STATISTICS
1198 extern int xfrm_proc_init(void);
1199 #endif
1200
1201 static inline void xfrm_state_walk_init(struct xfrm_state_walk *walk, u8 proto)
1202 {
1203 walk->proto = proto;
1204 walk->state = NULL;
1205 walk->count = 0;
1206 }
1207
1208 static inline void xfrm_state_walk_done(struct xfrm_state_walk *walk)
1209 {
1210 if (walk->state != NULL) {
1211 xfrm_state_put(walk->state);
1212 walk->state = NULL;
1213 }
1214 }
1215
1216 extern int xfrm_state_walk(struct xfrm_state_walk *walk,
1217 int (*func)(struct xfrm_state *, int, void*), void *);
1218 extern struct xfrm_state *xfrm_state_alloc(void);
1219 extern struct xfrm_state *xfrm_state_find(xfrm_address_t *daddr, xfrm_address_t *saddr,
1220 struct flowi *fl, struct xfrm_tmpl *tmpl,
1221 struct xfrm_policy *pol, int *err,
1222 unsigned short family);
1223 extern struct xfrm_state * xfrm_stateonly_find(xfrm_address_t *daddr,
1224 xfrm_address_t *saddr,
1225 unsigned short family,
1226 u8 mode, u8 proto, u32 reqid);
1227 extern int xfrm_state_check_expire(struct xfrm_state *x);
1228 extern void xfrm_state_insert(struct xfrm_state *x);
1229 extern int xfrm_state_add(struct xfrm_state *x);
1230 extern int xfrm_state_update(struct xfrm_state *x);
1231 extern struct xfrm_state *xfrm_state_lookup(xfrm_address_t *daddr, __be32 spi, u8 proto, unsigned short family);
1232 extern struct xfrm_state *xfrm_state_lookup_byaddr(xfrm_address_t *daddr, xfrm_address_t *saddr, u8 proto, unsigned short family);
1233 #ifdef CONFIG_XFRM_SUB_POLICY
1234 extern int xfrm_tmpl_sort(struct xfrm_tmpl **dst, struct xfrm_tmpl **src,
1235 int n, unsigned short family);
1236 extern int xfrm_state_sort(struct xfrm_state **dst, struct xfrm_state **src,
1237 int n, unsigned short family);
1238 #else
1239 static inline int xfrm_tmpl_sort(struct xfrm_tmpl **dst, struct xfrm_tmpl **src,
1240 int n, unsigned short family)
1241 {
1242 return -ENOSYS;
1243 }
1244
1245 static inline int xfrm_state_sort(struct xfrm_state **dst, struct xfrm_state **src,
1246 int n, unsigned short family)
1247 {
1248 return -ENOSYS;
1249 }
1250 #endif
1251
1252 struct xfrmk_sadinfo {
1253 u32 sadhcnt; /* current hash bkts */
1254 u32 sadhmcnt; /* max allowed hash bkts */
1255 u32 sadcnt; /* current running count */
1256 };
1257
1258 struct xfrmk_spdinfo {
1259 u32 incnt;
1260 u32 outcnt;
1261 u32 fwdcnt;
1262 u32 inscnt;
1263 u32 outscnt;
1264 u32 fwdscnt;
1265 u32 spdhcnt;
1266 u32 spdhmcnt;
1267 };
1268
1269 extern struct xfrm_state *xfrm_find_acq_byseq(u32 seq);
1270 extern int xfrm_state_delete(struct xfrm_state *x);
1271 extern int xfrm_state_flush(u8 proto, struct xfrm_audit *audit_info);
1272 extern void xfrm_sad_getinfo(struct xfrmk_sadinfo *si);
1273 extern void xfrm_spd_getinfo(struct xfrmk_spdinfo *si);
1274 extern int xfrm_replay_check(struct xfrm_state *x,
1275 struct sk_buff *skb, __be32 seq);
1276 extern void xfrm_replay_advance(struct xfrm_state *x, __be32 seq);
1277 extern void xfrm_replay_notify(struct xfrm_state *x, int event);
1278 extern int xfrm_state_mtu(struct xfrm_state *x, int mtu);
1279 extern int xfrm_init_state(struct xfrm_state *x);
1280 extern int xfrm_prepare_input(struct xfrm_state *x, struct sk_buff *skb);
1281 extern int xfrm_input(struct sk_buff *skb, int nexthdr, __be32 spi,
1282 int encap_type);
1283 extern int xfrm_input_resume(struct sk_buff *skb, int nexthdr);
1284 extern int xfrm_output_resume(struct sk_buff *skb, int err);
1285 extern int xfrm_output(struct sk_buff *skb);
1286 extern int xfrm4_extract_header(struct sk_buff *skb);
1287 extern int xfrm4_extract_input(struct xfrm_state *x, struct sk_buff *skb);
1288 extern int xfrm4_rcv_encap(struct sk_buff *skb, int nexthdr, __be32 spi,
1289 int encap_type);
1290 extern int xfrm4_transport_finish(struct sk_buff *skb, int async);
1291 extern int xfrm4_rcv(struct sk_buff *skb);
1292
1293 static inline int xfrm4_rcv_spi(struct sk_buff *skb, int nexthdr, __be32 spi)
1294 {
1295 return xfrm4_rcv_encap(skb, nexthdr, spi, 0);
1296 }
1297
1298 extern int xfrm4_extract_output(struct xfrm_state *x, struct sk_buff *skb);
1299 extern int xfrm4_prepare_output(struct xfrm_state *x, struct sk_buff *skb);
1300 extern int xfrm4_output(struct sk_buff *skb);
1301 extern int xfrm4_tunnel_register(struct xfrm_tunnel *handler, unsigned short family);
1302 extern int xfrm4_tunnel_deregister(struct xfrm_tunnel *handler, unsigned short family);
1303 extern int xfrm6_extract_header(struct sk_buff *skb);
1304 extern int xfrm6_extract_input(struct xfrm_state *x, struct sk_buff *skb);
1305 extern int xfrm6_rcv_spi(struct sk_buff *skb, int nexthdr, __be32 spi);
1306 extern int xfrm6_transport_finish(struct sk_buff *skb, int async);
1307 extern int xfrm6_rcv(struct sk_buff *skb);
1308 extern int xfrm6_input_addr(struct sk_buff *skb, xfrm_address_t *daddr,
1309 xfrm_address_t *saddr, u8 proto);
1310 extern int xfrm6_tunnel_register(struct xfrm6_tunnel *handler, unsigned short family);
1311 extern int xfrm6_tunnel_deregister(struct xfrm6_tunnel *handler, unsigned short family);
1312 extern __be32 xfrm6_tunnel_alloc_spi(xfrm_address_t *saddr);
1313 extern void xfrm6_tunnel_free_spi(xfrm_address_t *saddr);
1314 extern __be32 xfrm6_tunnel_spi_lookup(xfrm_address_t *saddr);
1315 extern int xfrm6_extract_output(struct xfrm_state *x, struct sk_buff *skb);
1316 extern int xfrm6_prepare_output(struct xfrm_state *x, struct sk_buff *skb);
1317 extern int xfrm6_output(struct sk_buff *skb);
1318 extern int xfrm6_find_1stfragopt(struct xfrm_state *x, struct sk_buff *skb,
1319 u8 **prevhdr);
1320
1321 #ifdef CONFIG_XFRM
1322 extern int xfrm4_udp_encap_rcv(struct sock *sk, struct sk_buff *skb);
1323 extern int xfrm_user_policy(struct sock *sk, int optname, u8 __user *optval, int optlen);
1324 #else
1325 static inline int xfrm_user_policy(struct sock *sk, int optname, u8 __user *optval, int optlen)
1326 {
1327 return -ENOPROTOOPT;
1328 }
1329
1330 static inline int xfrm4_udp_encap_rcv(struct sock *sk, struct sk_buff *skb)
1331 {
1332 /* should not happen */
1333 kfree_skb(skb);
1334 return 0;
1335 }
1336 #endif
1337
1338 struct xfrm_policy *xfrm_policy_alloc(gfp_t gfp);
1339
1340 static inline void xfrm_policy_walk_init(struct xfrm_policy_walk *walk, u8 type)
1341 {
1342 walk->cur_type = XFRM_POLICY_TYPE_MAIN;
1343 walk->type = type;
1344 walk->policy = NULL;
1345 walk->count = 0;
1346 }
1347
1348 static inline void xfrm_policy_walk_done(struct xfrm_policy_walk *walk)
1349 {
1350 if (walk->policy != NULL) {
1351 xfrm_pol_put(walk->policy);
1352 walk->policy = NULL;
1353 }
1354 }
1355
1356 extern int xfrm_policy_walk(struct xfrm_policy_walk *walk,
1357 int (*func)(struct xfrm_policy *, int, int, void*), void *);
1358 int xfrm_policy_insert(int dir, struct xfrm_policy *policy, int excl);
1359 struct xfrm_policy *xfrm_policy_bysel_ctx(u8 type, int dir,
1360 struct xfrm_selector *sel,
1361 struct xfrm_sec_ctx *ctx, int delete,
1362 int *err);
1363 struct xfrm_policy *xfrm_policy_byid(u8, int dir, u32 id, int delete, int *err);
1364 int xfrm_policy_flush(u8 type, struct xfrm_audit *audit_info);
1365 u32 xfrm_get_acqseq(void);
1366 extern int xfrm_alloc_spi(struct xfrm_state *x, u32 minspi, u32 maxspi);
1367 struct xfrm_state * xfrm_find_acq(u8 mode, u32 reqid, u8 proto,
1368 xfrm_address_t *daddr, xfrm_address_t *saddr,
1369 int create, unsigned short family);
1370 extern int xfrm_sk_policy_insert(struct sock *sk, int dir, struct xfrm_policy *pol);
1371 extern int xfrm_bundle_ok(struct xfrm_policy *pol, struct xfrm_dst *xdst,
1372 struct flowi *fl, int family, int strict);
1373
1374 #ifdef CONFIG_XFRM_MIGRATE
1375 extern int km_migrate(struct xfrm_selector *sel, u8 dir, u8 type,
1376 struct xfrm_migrate *m, int num_bundles);
1377 extern struct xfrm_state * xfrm_migrate_state_find(struct xfrm_migrate *m);
1378 extern struct xfrm_state * xfrm_state_migrate(struct xfrm_state *x,
1379 struct xfrm_migrate *m);
1380 extern int xfrm_migrate(struct xfrm_selector *sel, u8 dir, u8 type,
1381 struct xfrm_migrate *m, int num_bundles);
1382 #endif
1383
1384 extern wait_queue_head_t km_waitq;
1385 extern int km_new_mapping(struct xfrm_state *x, xfrm_address_t *ipaddr, __be16 sport);
1386 extern void km_policy_expired(struct xfrm_policy *pol, int dir, int hard, u32 pid);
1387 extern int km_report(u8 proto, struct xfrm_selector *sel, xfrm_address_t *addr);
1388
1389 extern void xfrm_input_init(void);
1390 extern int xfrm_parse_spi(struct sk_buff *skb, u8 nexthdr, __be32 *spi, __be32 *seq);
1391
1392 extern void xfrm_probe_algs(void);
1393 extern int xfrm_count_auth_supported(void);
1394 extern int xfrm_count_enc_supported(void);
1395 extern struct xfrm_algo_desc *xfrm_aalg_get_byidx(unsigned int idx);
1396 extern struct xfrm_algo_desc *xfrm_ealg_get_byidx(unsigned int idx);
1397 extern struct xfrm_algo_desc *xfrm_aalg_get_byid(int alg_id);
1398 extern struct xfrm_algo_desc *xfrm_ealg_get_byid(int alg_id);
1399 extern struct xfrm_algo_desc *xfrm_calg_get_byid(int alg_id);
1400 extern struct xfrm_algo_desc *xfrm_aalg_get_byname(char *name, int probe);
1401 extern struct xfrm_algo_desc *xfrm_ealg_get_byname(char *name, int probe);
1402 extern struct xfrm_algo_desc *xfrm_calg_get_byname(char *name, int probe);
1403 extern struct xfrm_algo_desc *xfrm_aead_get_byname(char *name, int icv_len,
1404 int probe);
1405
1406 struct hash_desc;
1407 struct scatterlist;
1408 typedef int (icv_update_fn_t)(struct hash_desc *, struct scatterlist *,
1409 unsigned int);
1410
1411 extern int skb_icv_walk(const struct sk_buff *skb, struct hash_desc *tfm,
1412 int offset, int len, icv_update_fn_t icv_update);
1413
1414 static inline int xfrm_addr_cmp(xfrm_address_t *a, xfrm_address_t *b,
1415 int family)
1416 {
1417 switch (family) {
1418 default:
1419 case AF_INET:
1420 return (__force __u32)a->a4 - (__force __u32)b->a4;
1421 case AF_INET6:
1422 return ipv6_addr_cmp((struct in6_addr *)a,
1423 (struct in6_addr *)b);
1424 }
1425 }
1426
1427 static inline int xfrm_policy_id2dir(u32 index)
1428 {
1429 return index & 7;
1430 }
1431
1432 static inline int xfrm_aevent_is_on(void)
1433 {
1434 struct sock *nlsk;
1435 int ret = 0;
1436
1437 rcu_read_lock();
1438 nlsk = rcu_dereference(xfrm_nl);
1439 if (nlsk)
1440 ret = netlink_has_listeners(nlsk, XFRMNLGRP_AEVENTS);
1441 rcu_read_unlock();
1442 return ret;
1443 }
1444
1445 static inline int xfrm_alg_len(struct xfrm_algo *alg)
1446 {
1447 return sizeof(*alg) + ((alg->alg_key_len + 7) / 8);
1448 }
1449
1450 #ifdef CONFIG_XFRM_MIGRATE
1451 static inline struct xfrm_algo *xfrm_algo_clone(struct xfrm_algo *orig)
1452 {
1453 return kmemdup(orig, xfrm_alg_len(orig), GFP_KERNEL);
1454 }
1455
1456 static inline void xfrm_states_put(struct xfrm_state **states, int n)
1457 {
1458 int i;
1459 for (i = 0; i < n; i++)
1460 xfrm_state_put(*(states + i));
1461 }
1462
1463 static inline void xfrm_states_delete(struct xfrm_state **states, int n)
1464 {
1465 int i;
1466 for (i = 0; i < n; i++)
1467 xfrm_state_delete(*(states + i));
1468 }
1469 #endif
1470
1471 static inline struct xfrm_state *xfrm_input_state(struct sk_buff *skb)
1472 {
1473 return skb->sp->xvec[skb->sp->len - 1];
1474 }
1475
1476 #endif /* _NET_XFRM_H */
This page took 0.112929 seconds and 5 git commands to generate.