6 * Kazunori MIYAZAWA @USAGI
7 * Kunihiro Ishiguro <kunihiro@ipinfusion.com>
9 * YOSHIFUJI Hideaki @USAGI
10 * Split up af-specific functions
11 * Derek Atkins <derek@ihtfp.com>
12 * Add UDP Encapsulation
16 #include <linux/workqueue.h>
18 #include <linux/pfkeyv2.h>
19 #include <linux/ipsec.h>
20 #include <linux/module.h>
21 #include <linux/cache.h>
22 #include <linux/audit.h>
23 #include <asm/uaccess.h>
25 #include "xfrm_hash.h"
27 u32 sysctl_xfrm_aevent_etime __read_mostly
= XFRM_AE_ETIME
;
28 EXPORT_SYMBOL(sysctl_xfrm_aevent_etime
);
30 u32 sysctl_xfrm_aevent_rseqth __read_mostly
= XFRM_AE_SEQT_SIZE
;
31 EXPORT_SYMBOL(sysctl_xfrm_aevent_rseqth
);
33 u32 sysctl_xfrm_acq_expires __read_mostly
= 30;
35 /* Each xfrm_state may be linked to two tables:
37 1. Hash table by (spi,daddr,ah/esp) to find SA by SPI. (input,ctl)
38 2. Hash table by (daddr,family,reqid) to find what SAs exist for given
39 destination/tunnel endpoint. (output)
42 static DEFINE_SPINLOCK(xfrm_state_lock
);
44 static unsigned int xfrm_state_hashmax __read_mostly
= 1 * 1024 * 1024;
45 static unsigned int xfrm_state_genid
;
47 static struct xfrm_state_afinfo
*xfrm_state_get_afinfo(unsigned int family
);
48 static void xfrm_state_put_afinfo(struct xfrm_state_afinfo
*afinfo
);
50 #ifdef CONFIG_AUDITSYSCALL
51 static void xfrm_audit_state_replay(struct xfrm_state
*x
,
52 struct sk_buff
*skb
, __be32 net_seq
);
54 #define xfrm_audit_state_replay(x, s, sq) do { ; } while (0)
55 #endif /* CONFIG_AUDITSYSCALL */
57 static inline unsigned int xfrm_dst_hash(struct net
*net
,
58 xfrm_address_t
*daddr
,
59 xfrm_address_t
*saddr
,
61 unsigned short family
)
63 return __xfrm_dst_hash(daddr
, saddr
, reqid
, family
, net
->xfrm
.state_hmask
);
66 static inline unsigned int xfrm_src_hash(struct net
*net
,
67 xfrm_address_t
*daddr
,
68 xfrm_address_t
*saddr
,
69 unsigned short family
)
71 return __xfrm_src_hash(daddr
, saddr
, family
, net
->xfrm
.state_hmask
);
74 static inline unsigned int
75 xfrm_spi_hash(struct net
*net
, xfrm_address_t
*daddr
, __be32 spi
, u8 proto
, unsigned short family
)
77 return __xfrm_spi_hash(daddr
, spi
, proto
, family
, net
->xfrm
.state_hmask
);
80 static void xfrm_hash_transfer(struct hlist_head
*list
,
81 struct hlist_head
*ndsttable
,
82 struct hlist_head
*nsrctable
,
83 struct hlist_head
*nspitable
,
84 unsigned int nhashmask
)
86 struct hlist_node
*entry
, *tmp
;
89 hlist_for_each_entry_safe(x
, entry
, tmp
, list
, bydst
) {
92 h
= __xfrm_dst_hash(&x
->id
.daddr
, &x
->props
.saddr
,
93 x
->props
.reqid
, x
->props
.family
,
95 hlist_add_head(&x
->bydst
, ndsttable
+h
);
97 h
= __xfrm_src_hash(&x
->id
.daddr
, &x
->props
.saddr
,
100 hlist_add_head(&x
->bysrc
, nsrctable
+h
);
103 h
= __xfrm_spi_hash(&x
->id
.daddr
, x
->id
.spi
,
104 x
->id
.proto
, x
->props
.family
,
106 hlist_add_head(&x
->byspi
, nspitable
+h
);
111 static unsigned long xfrm_hash_new_size(unsigned int state_hmask
)
113 return ((state_hmask
+ 1) << 1) * sizeof(struct hlist_head
);
116 static DEFINE_MUTEX(hash_resize_mutex
);
118 static void xfrm_hash_resize(struct work_struct
*work
)
120 struct net
*net
= container_of(work
, struct net
, xfrm
.state_hash_work
);
121 struct hlist_head
*ndst
, *nsrc
, *nspi
, *odst
, *osrc
, *ospi
;
122 unsigned long nsize
, osize
;
123 unsigned int nhashmask
, ohashmask
;
126 mutex_lock(&hash_resize_mutex
);
128 nsize
= xfrm_hash_new_size(net
->xfrm
.state_hmask
);
129 ndst
= xfrm_hash_alloc(nsize
);
132 nsrc
= xfrm_hash_alloc(nsize
);
134 xfrm_hash_free(ndst
, nsize
);
137 nspi
= xfrm_hash_alloc(nsize
);
139 xfrm_hash_free(ndst
, nsize
);
140 xfrm_hash_free(nsrc
, nsize
);
144 spin_lock_bh(&xfrm_state_lock
);
146 nhashmask
= (nsize
/ sizeof(struct hlist_head
)) - 1U;
147 for (i
= net
->xfrm
.state_hmask
; i
>= 0; i
--)
148 xfrm_hash_transfer(net
->xfrm
.state_bydst
+i
, ndst
, nsrc
, nspi
,
151 odst
= net
->xfrm
.state_bydst
;
152 osrc
= net
->xfrm
.state_bysrc
;
153 ospi
= net
->xfrm
.state_byspi
;
154 ohashmask
= net
->xfrm
.state_hmask
;
156 net
->xfrm
.state_bydst
= ndst
;
157 net
->xfrm
.state_bysrc
= nsrc
;
158 net
->xfrm
.state_byspi
= nspi
;
159 net
->xfrm
.state_hmask
= nhashmask
;
161 spin_unlock_bh(&xfrm_state_lock
);
163 osize
= (ohashmask
+ 1) * sizeof(struct hlist_head
);
164 xfrm_hash_free(odst
, osize
);
165 xfrm_hash_free(osrc
, osize
);
166 xfrm_hash_free(ospi
, osize
);
169 mutex_unlock(&hash_resize_mutex
);
172 static DEFINE_RWLOCK(xfrm_state_afinfo_lock
);
173 static struct xfrm_state_afinfo
*xfrm_state_afinfo
[NPROTO
];
175 static DEFINE_SPINLOCK(xfrm_state_gc_lock
);
177 int __xfrm_state_delete(struct xfrm_state
*x
);
179 int km_query(struct xfrm_state
*x
, struct xfrm_tmpl
*t
, struct xfrm_policy
*pol
);
180 void km_state_expired(struct xfrm_state
*x
, int hard
, u32 pid
);
182 static struct xfrm_state_afinfo
*xfrm_state_lock_afinfo(unsigned int family
)
184 struct xfrm_state_afinfo
*afinfo
;
185 if (unlikely(family
>= NPROTO
))
187 write_lock_bh(&xfrm_state_afinfo_lock
);
188 afinfo
= xfrm_state_afinfo
[family
];
189 if (unlikely(!afinfo
))
190 write_unlock_bh(&xfrm_state_afinfo_lock
);
194 static void xfrm_state_unlock_afinfo(struct xfrm_state_afinfo
*afinfo
)
195 __releases(xfrm_state_afinfo_lock
)
197 write_unlock_bh(&xfrm_state_afinfo_lock
);
200 int xfrm_register_type(const struct xfrm_type
*type
, unsigned short family
)
202 struct xfrm_state_afinfo
*afinfo
= xfrm_state_lock_afinfo(family
);
203 const struct xfrm_type
**typemap
;
206 if (unlikely(afinfo
== NULL
))
207 return -EAFNOSUPPORT
;
208 typemap
= afinfo
->type_map
;
210 if (likely(typemap
[type
->proto
] == NULL
))
211 typemap
[type
->proto
] = type
;
214 xfrm_state_unlock_afinfo(afinfo
);
217 EXPORT_SYMBOL(xfrm_register_type
);
219 int xfrm_unregister_type(const struct xfrm_type
*type
, unsigned short family
)
221 struct xfrm_state_afinfo
*afinfo
= xfrm_state_lock_afinfo(family
);
222 const struct xfrm_type
**typemap
;
225 if (unlikely(afinfo
== NULL
))
226 return -EAFNOSUPPORT
;
227 typemap
= afinfo
->type_map
;
229 if (unlikely(typemap
[type
->proto
] != type
))
232 typemap
[type
->proto
] = NULL
;
233 xfrm_state_unlock_afinfo(afinfo
);
236 EXPORT_SYMBOL(xfrm_unregister_type
);
238 static const struct xfrm_type
*xfrm_get_type(u8 proto
, unsigned short family
)
240 struct xfrm_state_afinfo
*afinfo
;
241 const struct xfrm_type
**typemap
;
242 const struct xfrm_type
*type
;
243 int modload_attempted
= 0;
246 afinfo
= xfrm_state_get_afinfo(family
);
247 if (unlikely(afinfo
== NULL
))
249 typemap
= afinfo
->type_map
;
251 type
= typemap
[proto
];
252 if (unlikely(type
&& !try_module_get(type
->owner
)))
254 if (!type
&& !modload_attempted
) {
255 xfrm_state_put_afinfo(afinfo
);
256 request_module("xfrm-type-%d-%d", family
, proto
);
257 modload_attempted
= 1;
261 xfrm_state_put_afinfo(afinfo
);
265 static void xfrm_put_type(const struct xfrm_type
*type
)
267 module_put(type
->owner
);
270 int xfrm_register_mode(struct xfrm_mode
*mode
, int family
)
272 struct xfrm_state_afinfo
*afinfo
;
273 struct xfrm_mode
**modemap
;
276 if (unlikely(mode
->encap
>= XFRM_MODE_MAX
))
279 afinfo
= xfrm_state_lock_afinfo(family
);
280 if (unlikely(afinfo
== NULL
))
281 return -EAFNOSUPPORT
;
284 modemap
= afinfo
->mode_map
;
285 if (modemap
[mode
->encap
])
289 if (!try_module_get(afinfo
->owner
))
292 mode
->afinfo
= afinfo
;
293 modemap
[mode
->encap
] = mode
;
297 xfrm_state_unlock_afinfo(afinfo
);
300 EXPORT_SYMBOL(xfrm_register_mode
);
302 int xfrm_unregister_mode(struct xfrm_mode
*mode
, int family
)
304 struct xfrm_state_afinfo
*afinfo
;
305 struct xfrm_mode
**modemap
;
308 if (unlikely(mode
->encap
>= XFRM_MODE_MAX
))
311 afinfo
= xfrm_state_lock_afinfo(family
);
312 if (unlikely(afinfo
== NULL
))
313 return -EAFNOSUPPORT
;
316 modemap
= afinfo
->mode_map
;
317 if (likely(modemap
[mode
->encap
] == mode
)) {
318 modemap
[mode
->encap
] = NULL
;
319 module_put(mode
->afinfo
->owner
);
323 xfrm_state_unlock_afinfo(afinfo
);
326 EXPORT_SYMBOL(xfrm_unregister_mode
);
328 static struct xfrm_mode
*xfrm_get_mode(unsigned int encap
, int family
)
330 struct xfrm_state_afinfo
*afinfo
;
331 struct xfrm_mode
*mode
;
332 int modload_attempted
= 0;
334 if (unlikely(encap
>= XFRM_MODE_MAX
))
338 afinfo
= xfrm_state_get_afinfo(family
);
339 if (unlikely(afinfo
== NULL
))
342 mode
= afinfo
->mode_map
[encap
];
343 if (unlikely(mode
&& !try_module_get(mode
->owner
)))
345 if (!mode
&& !modload_attempted
) {
346 xfrm_state_put_afinfo(afinfo
);
347 request_module("xfrm-mode-%d-%d", family
, encap
);
348 modload_attempted
= 1;
352 xfrm_state_put_afinfo(afinfo
);
356 static void xfrm_put_mode(struct xfrm_mode
*mode
)
358 module_put(mode
->owner
);
361 static void xfrm_state_gc_destroy(struct xfrm_state
*x
)
363 del_timer_sync(&x
->timer
);
364 del_timer_sync(&x
->rtimer
);
371 xfrm_put_mode(x
->inner_mode
);
372 if (x
->inner_mode_iaf
)
373 xfrm_put_mode(x
->inner_mode_iaf
);
375 xfrm_put_mode(x
->outer_mode
);
377 x
->type
->destructor(x
);
378 xfrm_put_type(x
->type
);
380 security_xfrm_state_free(x
);
384 static void xfrm_state_gc_task(struct work_struct
*work
)
386 struct net
*net
= container_of(work
, struct net
, xfrm
.state_gc_work
);
387 struct xfrm_state
*x
;
388 struct hlist_node
*entry
, *tmp
;
389 struct hlist_head gc_list
;
391 spin_lock_bh(&xfrm_state_gc_lock
);
392 hlist_move_list(&net
->xfrm
.state_gc_list
, &gc_list
);
393 spin_unlock_bh(&xfrm_state_gc_lock
);
395 hlist_for_each_entry_safe(x
, entry
, tmp
, &gc_list
, gclist
)
396 xfrm_state_gc_destroy(x
);
398 wake_up(&net
->xfrm
.km_waitq
);
401 static inline unsigned long make_jiffies(long secs
)
403 if (secs
>= (MAX_SCHEDULE_TIMEOUT
-1)/HZ
)
404 return MAX_SCHEDULE_TIMEOUT
-1;
409 static void xfrm_timer_handler(unsigned long data
)
411 struct xfrm_state
*x
= (struct xfrm_state
*)data
;
412 struct net
*net
= xs_net(x
);
413 unsigned long now
= get_seconds();
414 long next
= LONG_MAX
;
419 if (x
->km
.state
== XFRM_STATE_DEAD
)
421 if (x
->km
.state
== XFRM_STATE_EXPIRED
)
423 if (x
->lft
.hard_add_expires_seconds
) {
424 long tmo
= x
->lft
.hard_add_expires_seconds
+
425 x
->curlft
.add_time
- now
;
431 if (x
->lft
.hard_use_expires_seconds
) {
432 long tmo
= x
->lft
.hard_use_expires_seconds
+
433 (x
->curlft
.use_time
? : now
) - now
;
441 if (x
->lft
.soft_add_expires_seconds
) {
442 long tmo
= x
->lft
.soft_add_expires_seconds
+
443 x
->curlft
.add_time
- now
;
449 if (x
->lft
.soft_use_expires_seconds
) {
450 long tmo
= x
->lft
.soft_use_expires_seconds
+
451 (x
->curlft
.use_time
? : now
) - now
;
460 km_state_expired(x
, 0, 0);
462 if (next
!= LONG_MAX
)
463 mod_timer(&x
->timer
, jiffies
+ make_jiffies(next
));
468 if (x
->km
.state
== XFRM_STATE_ACQ
&& x
->id
.spi
== 0) {
469 x
->km
.state
= XFRM_STATE_EXPIRED
;
470 wake_up(&net
->xfrm
.km_waitq
);
475 err
= __xfrm_state_delete(x
);
476 if (!err
&& x
->id
.spi
)
477 km_state_expired(x
, 1, 0);
479 xfrm_audit_state_delete(x
, err
? 0 : 1,
480 audit_get_loginuid(current
),
481 audit_get_sessionid(current
), 0);
484 spin_unlock(&x
->lock
);
487 static void xfrm_replay_timer_handler(unsigned long data
);
489 struct xfrm_state
*xfrm_state_alloc(struct net
*net
)
491 struct xfrm_state
*x
;
493 x
= kzalloc(sizeof(struct xfrm_state
), GFP_ATOMIC
);
496 write_pnet(&x
->xs_net
, net
);
497 atomic_set(&x
->refcnt
, 1);
498 atomic_set(&x
->tunnel_users
, 0);
499 INIT_LIST_HEAD(&x
->km
.all
);
500 INIT_HLIST_NODE(&x
->bydst
);
501 INIT_HLIST_NODE(&x
->bysrc
);
502 INIT_HLIST_NODE(&x
->byspi
);
503 setup_timer(&x
->timer
, xfrm_timer_handler
, (unsigned long)x
);
504 setup_timer(&x
->rtimer
, xfrm_replay_timer_handler
,
506 x
->curlft
.add_time
= get_seconds();
507 x
->lft
.soft_byte_limit
= XFRM_INF
;
508 x
->lft
.soft_packet_limit
= XFRM_INF
;
509 x
->lft
.hard_byte_limit
= XFRM_INF
;
510 x
->lft
.hard_packet_limit
= XFRM_INF
;
511 x
->replay_maxage
= 0;
512 x
->replay_maxdiff
= 0;
513 x
->inner_mode
= NULL
;
514 x
->inner_mode_iaf
= NULL
;
515 spin_lock_init(&x
->lock
);
519 EXPORT_SYMBOL(xfrm_state_alloc
);
521 void __xfrm_state_destroy(struct xfrm_state
*x
)
523 struct net
*net
= xs_net(x
);
525 WARN_ON(x
->km
.state
!= XFRM_STATE_DEAD
);
527 spin_lock_bh(&xfrm_state_gc_lock
);
528 hlist_add_head(&x
->gclist
, &net
->xfrm
.state_gc_list
);
529 spin_unlock_bh(&xfrm_state_gc_lock
);
530 schedule_work(&net
->xfrm
.state_gc_work
);
532 EXPORT_SYMBOL(__xfrm_state_destroy
);
534 int __xfrm_state_delete(struct xfrm_state
*x
)
536 struct net
*net
= xs_net(x
);
539 if (x
->km
.state
!= XFRM_STATE_DEAD
) {
540 x
->km
.state
= XFRM_STATE_DEAD
;
541 spin_lock(&xfrm_state_lock
);
542 list_del(&x
->km
.all
);
543 hlist_del(&x
->bydst
);
544 hlist_del(&x
->bysrc
);
546 hlist_del(&x
->byspi
);
547 net
->xfrm
.state_num
--;
548 spin_unlock(&xfrm_state_lock
);
550 /* All xfrm_state objects are created by xfrm_state_alloc.
551 * The xfrm_state_alloc call gives a reference, and that
552 * is what we are dropping here.
560 EXPORT_SYMBOL(__xfrm_state_delete
);
562 int xfrm_state_delete(struct xfrm_state
*x
)
566 spin_lock_bh(&x
->lock
);
567 err
= __xfrm_state_delete(x
);
568 spin_unlock_bh(&x
->lock
);
572 EXPORT_SYMBOL(xfrm_state_delete
);
574 #ifdef CONFIG_SECURITY_NETWORK_XFRM
576 xfrm_state_flush_secctx_check(struct net
*net
, u8 proto
, struct xfrm_audit
*audit_info
)
580 for (i
= 0; i
<= net
->xfrm
.state_hmask
; i
++) {
581 struct hlist_node
*entry
;
582 struct xfrm_state
*x
;
584 hlist_for_each_entry(x
, entry
, net
->xfrm
.state_bydst
+i
, bydst
) {
585 if (xfrm_id_proto_match(x
->id
.proto
, proto
) &&
586 (err
= security_xfrm_state_delete(x
)) != 0) {
587 xfrm_audit_state_delete(x
, 0,
588 audit_info
->loginuid
,
589 audit_info
->sessionid
,
600 xfrm_state_flush_secctx_check(struct net
*net
, u8 proto
, struct xfrm_audit
*audit_info
)
606 int xfrm_state_flush(struct net
*net
, u8 proto
, struct xfrm_audit
*audit_info
)
610 spin_lock_bh(&xfrm_state_lock
);
611 err
= xfrm_state_flush_secctx_check(net
, proto
, audit_info
);
615 for (i
= 0; i
<= net
->xfrm
.state_hmask
; i
++) {
616 struct hlist_node
*entry
;
617 struct xfrm_state
*x
;
619 hlist_for_each_entry(x
, entry
, net
->xfrm
.state_bydst
+i
, bydst
) {
620 if (!xfrm_state_kern(x
) &&
621 xfrm_id_proto_match(x
->id
.proto
, proto
)) {
623 spin_unlock_bh(&xfrm_state_lock
);
625 err
= xfrm_state_delete(x
);
626 xfrm_audit_state_delete(x
, err
? 0 : 1,
627 audit_info
->loginuid
,
628 audit_info
->sessionid
,
632 spin_lock_bh(&xfrm_state_lock
);
640 spin_unlock_bh(&xfrm_state_lock
);
641 wake_up(&net
->xfrm
.km_waitq
);
644 EXPORT_SYMBOL(xfrm_state_flush
);
646 void xfrm_sad_getinfo(struct xfrmk_sadinfo
*si
)
648 spin_lock_bh(&xfrm_state_lock
);
649 si
->sadcnt
= init_net
.xfrm
.state_num
;
650 si
->sadhcnt
= init_net
.xfrm
.state_hmask
;
651 si
->sadhmcnt
= xfrm_state_hashmax
;
652 spin_unlock_bh(&xfrm_state_lock
);
654 EXPORT_SYMBOL(xfrm_sad_getinfo
);
657 xfrm_init_tempsel(struct xfrm_state
*x
, struct flowi
*fl
,
658 struct xfrm_tmpl
*tmpl
,
659 xfrm_address_t
*daddr
, xfrm_address_t
*saddr
,
660 unsigned short family
)
662 struct xfrm_state_afinfo
*afinfo
= xfrm_state_get_afinfo(family
);
665 afinfo
->init_tempsel(x
, fl
, tmpl
, daddr
, saddr
);
666 xfrm_state_put_afinfo(afinfo
);
670 static struct xfrm_state
*__xfrm_state_lookup(struct net
*net
, xfrm_address_t
*daddr
, __be32 spi
, u8 proto
, unsigned short family
)
672 unsigned int h
= xfrm_spi_hash(net
, daddr
, spi
, proto
, family
);
673 struct xfrm_state
*x
;
674 struct hlist_node
*entry
;
676 hlist_for_each_entry(x
, entry
, net
->xfrm
.state_byspi
+h
, byspi
) {
677 if (x
->props
.family
!= family
||
679 x
->id
.proto
!= proto
)
684 if (x
->id
.daddr
.a4
!= daddr
->a4
)
688 if (!ipv6_addr_equal((struct in6_addr
*)daddr
,
702 static struct xfrm_state
*__xfrm_state_lookup_byaddr(struct net
*net
, xfrm_address_t
*daddr
, xfrm_address_t
*saddr
, u8 proto
, unsigned short family
)
704 unsigned int h
= xfrm_src_hash(net
, daddr
, saddr
, family
);
705 struct xfrm_state
*x
;
706 struct hlist_node
*entry
;
708 hlist_for_each_entry(x
, entry
, net
->xfrm
.state_bysrc
+h
, bysrc
) {
709 if (x
->props
.family
!= family
||
710 x
->id
.proto
!= proto
)
715 if (x
->id
.daddr
.a4
!= daddr
->a4
||
716 x
->props
.saddr
.a4
!= saddr
->a4
)
720 if (!ipv6_addr_equal((struct in6_addr
*)daddr
,
723 !ipv6_addr_equal((struct in6_addr
*)saddr
,
737 static inline struct xfrm_state
*
738 __xfrm_state_locate(struct xfrm_state
*x
, int use_spi
, int family
)
740 struct net
*net
= xs_net(x
);
743 return __xfrm_state_lookup(net
, &x
->id
.daddr
, x
->id
.spi
,
744 x
->id
.proto
, family
);
746 return __xfrm_state_lookup_byaddr(net
, &x
->id
.daddr
,
748 x
->id
.proto
, family
);
751 static void xfrm_hash_grow_check(struct net
*net
, int have_hash_collision
)
753 if (have_hash_collision
&&
754 (net
->xfrm
.state_hmask
+ 1) < xfrm_state_hashmax
&&
755 net
->xfrm
.state_num
> net
->xfrm
.state_hmask
)
756 schedule_work(&net
->xfrm
.state_hash_work
);
760 xfrm_state_find(xfrm_address_t
*daddr
, xfrm_address_t
*saddr
,
761 struct flowi
*fl
, struct xfrm_tmpl
*tmpl
,
762 struct xfrm_policy
*pol
, int *err
,
763 unsigned short family
)
765 struct net
*net
= xp_net(pol
);
767 struct hlist_node
*entry
;
768 struct xfrm_state
*x
, *x0
, *to_put
;
769 int acquire_in_progress
= 0;
771 struct xfrm_state
*best
= NULL
;
775 spin_lock_bh(&xfrm_state_lock
);
776 h
= xfrm_dst_hash(net
, daddr
, saddr
, tmpl
->reqid
, family
);
777 hlist_for_each_entry(x
, entry
, net
->xfrm
.state_bydst
+h
, bydst
) {
778 if (x
->props
.family
== family
&&
779 x
->props
.reqid
== tmpl
->reqid
&&
780 !(x
->props
.flags
& XFRM_STATE_WILDRECV
) &&
781 xfrm_state_addr_check(x
, daddr
, saddr
, family
) &&
782 tmpl
->mode
== x
->props
.mode
&&
783 tmpl
->id
.proto
== x
->id
.proto
&&
784 (tmpl
->id
.spi
== x
->id
.spi
|| !tmpl
->id
.spi
)) {
786 1. There is a valid state with matching selector.
788 2. Valid state with inappropriate selector. Skip.
790 Entering area of "sysdeps".
792 3. If state is not valid, selector is temporary,
793 it selects only session which triggered
794 previous resolution. Key manager will do
795 something to install a state with proper
798 if (x
->km
.state
== XFRM_STATE_VALID
) {
799 if ((x
->sel
.family
&& !xfrm_selector_match(&x
->sel
, fl
, x
->sel
.family
)) ||
800 !security_xfrm_state_pol_flow_match(x
, pol
, fl
))
803 best
->km
.dying
> x
->km
.dying
||
804 (best
->km
.dying
== x
->km
.dying
&&
805 best
->curlft
.add_time
< x
->curlft
.add_time
))
807 } else if (x
->km
.state
== XFRM_STATE_ACQ
) {
808 acquire_in_progress
= 1;
809 } else if (x
->km
.state
== XFRM_STATE_ERROR
||
810 x
->km
.state
== XFRM_STATE_EXPIRED
) {
811 if (xfrm_selector_match(&x
->sel
, fl
, x
->sel
.family
) &&
812 security_xfrm_state_pol_flow_match(x
, pol
, fl
))
819 if (!x
&& !error
&& !acquire_in_progress
) {
821 (x0
= __xfrm_state_lookup(net
, daddr
, tmpl
->id
.spi
,
822 tmpl
->id
.proto
, family
)) != NULL
) {
827 x
= xfrm_state_alloc(net
);
832 /* Initialize temporary selector matching only
833 * to current session. */
834 xfrm_init_tempsel(x
, fl
, tmpl
, daddr
, saddr
, family
);
836 error
= security_xfrm_state_alloc_acquire(x
, pol
->security
, fl
->secid
);
838 x
->km
.state
= XFRM_STATE_DEAD
;
844 if (km_query(x
, tmpl
, pol
) == 0) {
845 x
->km
.state
= XFRM_STATE_ACQ
;
846 list_add(&x
->km
.all
, &net
->xfrm
.state_all
);
847 hlist_add_head(&x
->bydst
, net
->xfrm
.state_bydst
+h
);
848 h
= xfrm_src_hash(net
, daddr
, saddr
, family
);
849 hlist_add_head(&x
->bysrc
, net
->xfrm
.state_bysrc
+h
);
851 h
= xfrm_spi_hash(net
, &x
->id
.daddr
, x
->id
.spi
, x
->id
.proto
, family
);
852 hlist_add_head(&x
->byspi
, net
->xfrm
.state_byspi
+h
);
854 x
->lft
.hard_add_expires_seconds
= sysctl_xfrm_acq_expires
;
855 x
->timer
.expires
= jiffies
+ sysctl_xfrm_acq_expires
*HZ
;
856 add_timer(&x
->timer
);
857 net
->xfrm
.state_num
++;
858 xfrm_hash_grow_check(net
, x
->bydst
.next
!= NULL
);
860 x
->km
.state
= XFRM_STATE_DEAD
;
870 *err
= acquire_in_progress
? -EAGAIN
: error
;
871 spin_unlock_bh(&xfrm_state_lock
);
873 xfrm_state_put(to_put
);
878 xfrm_stateonly_find(struct net
*net
,
879 xfrm_address_t
*daddr
, xfrm_address_t
*saddr
,
880 unsigned short family
, u8 mode
, u8 proto
, u32 reqid
)
883 struct xfrm_state
*rx
= NULL
, *x
= NULL
;
884 struct hlist_node
*entry
;
886 spin_lock(&xfrm_state_lock
);
887 h
= xfrm_dst_hash(net
, daddr
, saddr
, reqid
, family
);
888 hlist_for_each_entry(x
, entry
, net
->xfrm
.state_bydst
+h
, bydst
) {
889 if (x
->props
.family
== family
&&
890 x
->props
.reqid
== reqid
&&
891 !(x
->props
.flags
& XFRM_STATE_WILDRECV
) &&
892 xfrm_state_addr_check(x
, daddr
, saddr
, family
) &&
893 mode
== x
->props
.mode
&&
894 proto
== x
->id
.proto
&&
895 x
->km
.state
== XFRM_STATE_VALID
) {
903 spin_unlock(&xfrm_state_lock
);
908 EXPORT_SYMBOL(xfrm_stateonly_find
);
910 static void __xfrm_state_insert(struct xfrm_state
*x
)
912 struct net
*net
= xs_net(x
);
915 x
->genid
= ++xfrm_state_genid
;
917 list_add(&x
->km
.all
, &net
->xfrm
.state_all
);
919 h
= xfrm_dst_hash(net
, &x
->id
.daddr
, &x
->props
.saddr
,
920 x
->props
.reqid
, x
->props
.family
);
921 hlist_add_head(&x
->bydst
, net
->xfrm
.state_bydst
+h
);
923 h
= xfrm_src_hash(net
, &x
->id
.daddr
, &x
->props
.saddr
, x
->props
.family
);
924 hlist_add_head(&x
->bysrc
, net
->xfrm
.state_bysrc
+h
);
927 h
= xfrm_spi_hash(net
, &x
->id
.daddr
, x
->id
.spi
, x
->id
.proto
,
930 hlist_add_head(&x
->byspi
, net
->xfrm
.state_byspi
+h
);
933 mod_timer(&x
->timer
, jiffies
+ HZ
);
934 if (x
->replay_maxage
)
935 mod_timer(&x
->rtimer
, jiffies
+ x
->replay_maxage
);
937 wake_up(&net
->xfrm
.km_waitq
);
939 net
->xfrm
.state_num
++;
941 xfrm_hash_grow_check(net
, x
->bydst
.next
!= NULL
);
944 /* xfrm_state_lock is held */
945 static void __xfrm_state_bump_genids(struct xfrm_state
*xnew
)
947 struct net
*net
= xs_net(xnew
);
948 unsigned short family
= xnew
->props
.family
;
949 u32 reqid
= xnew
->props
.reqid
;
950 struct xfrm_state
*x
;
951 struct hlist_node
*entry
;
954 h
= xfrm_dst_hash(net
, &xnew
->id
.daddr
, &xnew
->props
.saddr
, reqid
, family
);
955 hlist_for_each_entry(x
, entry
, net
->xfrm
.state_bydst
+h
, bydst
) {
956 if (x
->props
.family
== family
&&
957 x
->props
.reqid
== reqid
&&
958 !xfrm_addr_cmp(&x
->id
.daddr
, &xnew
->id
.daddr
, family
) &&
959 !xfrm_addr_cmp(&x
->props
.saddr
, &xnew
->props
.saddr
, family
))
960 x
->genid
= xfrm_state_genid
;
964 void xfrm_state_insert(struct xfrm_state
*x
)
966 spin_lock_bh(&xfrm_state_lock
);
967 __xfrm_state_bump_genids(x
);
968 __xfrm_state_insert(x
);
969 spin_unlock_bh(&xfrm_state_lock
);
971 EXPORT_SYMBOL(xfrm_state_insert
);
973 /* xfrm_state_lock is held */
974 static struct xfrm_state
*__find_acq_core(struct net
*net
, unsigned short family
, u8 mode
, u32 reqid
, u8 proto
, xfrm_address_t
*daddr
, xfrm_address_t
*saddr
, int create
)
976 unsigned int h
= xfrm_dst_hash(net
, daddr
, saddr
, reqid
, family
);
977 struct hlist_node
*entry
;
978 struct xfrm_state
*x
;
980 hlist_for_each_entry(x
, entry
, net
->xfrm
.state_bydst
+h
, bydst
) {
981 if (x
->props
.reqid
!= reqid
||
982 x
->props
.mode
!= mode
||
983 x
->props
.family
!= family
||
984 x
->km
.state
!= XFRM_STATE_ACQ
||
986 x
->id
.proto
!= proto
)
991 if (x
->id
.daddr
.a4
!= daddr
->a4
||
992 x
->props
.saddr
.a4
!= saddr
->a4
)
996 if (!ipv6_addr_equal((struct in6_addr
*)x
->id
.daddr
.a6
,
997 (struct in6_addr
*)daddr
) ||
998 !ipv6_addr_equal((struct in6_addr
*)
1000 (struct in6_addr
*)saddr
))
1012 x
= xfrm_state_alloc(net
);
1016 x
->sel
.daddr
.a4
= daddr
->a4
;
1017 x
->sel
.saddr
.a4
= saddr
->a4
;
1018 x
->sel
.prefixlen_d
= 32;
1019 x
->sel
.prefixlen_s
= 32;
1020 x
->props
.saddr
.a4
= saddr
->a4
;
1021 x
->id
.daddr
.a4
= daddr
->a4
;
1025 ipv6_addr_copy((struct in6_addr
*)x
->sel
.daddr
.a6
,
1026 (struct in6_addr
*)daddr
);
1027 ipv6_addr_copy((struct in6_addr
*)x
->sel
.saddr
.a6
,
1028 (struct in6_addr
*)saddr
);
1029 x
->sel
.prefixlen_d
= 128;
1030 x
->sel
.prefixlen_s
= 128;
1031 ipv6_addr_copy((struct in6_addr
*)x
->props
.saddr
.a6
,
1032 (struct in6_addr
*)saddr
);
1033 ipv6_addr_copy((struct in6_addr
*)x
->id
.daddr
.a6
,
1034 (struct in6_addr
*)daddr
);
1038 x
->km
.state
= XFRM_STATE_ACQ
;
1039 x
->id
.proto
= proto
;
1040 x
->props
.family
= family
;
1041 x
->props
.mode
= mode
;
1042 x
->props
.reqid
= reqid
;
1043 x
->lft
.hard_add_expires_seconds
= sysctl_xfrm_acq_expires
;
1045 x
->timer
.expires
= jiffies
+ sysctl_xfrm_acq_expires
*HZ
;
1046 add_timer(&x
->timer
);
1047 list_add(&x
->km
.all
, &net
->xfrm
.state_all
);
1048 hlist_add_head(&x
->bydst
, net
->xfrm
.state_bydst
+h
);
1049 h
= xfrm_src_hash(net
, daddr
, saddr
, family
);
1050 hlist_add_head(&x
->bysrc
, net
->xfrm
.state_bysrc
+h
);
1052 net
->xfrm
.state_num
++;
1054 xfrm_hash_grow_check(net
, x
->bydst
.next
!= NULL
);
1060 static struct xfrm_state
*__xfrm_find_acq_byseq(struct net
*net
, u32 seq
);
1062 int xfrm_state_add(struct xfrm_state
*x
)
1064 struct net
*net
= xs_net(x
);
1065 struct xfrm_state
*x1
, *to_put
;
1068 int use_spi
= xfrm_id_proto_match(x
->id
.proto
, IPSEC_PROTO_ANY
);
1070 family
= x
->props
.family
;
1074 spin_lock_bh(&xfrm_state_lock
);
1076 x1
= __xfrm_state_locate(x
, use_spi
, family
);
1084 if (use_spi
&& x
->km
.seq
) {
1085 x1
= __xfrm_find_acq_byseq(net
, x
->km
.seq
);
1086 if (x1
&& ((x1
->id
.proto
!= x
->id
.proto
) ||
1087 xfrm_addr_cmp(&x1
->id
.daddr
, &x
->id
.daddr
, family
))) {
1094 x1
= __find_acq_core(net
, family
, x
->props
.mode
, x
->props
.reqid
,
1096 &x
->id
.daddr
, &x
->props
.saddr
, 0);
1098 __xfrm_state_bump_genids(x
);
1099 __xfrm_state_insert(x
);
1103 spin_unlock_bh(&xfrm_state_lock
);
1106 xfrm_state_delete(x1
);
1111 xfrm_state_put(to_put
);
1115 EXPORT_SYMBOL(xfrm_state_add
);
1117 #ifdef CONFIG_XFRM_MIGRATE
1118 static struct xfrm_state
*xfrm_state_clone(struct xfrm_state
*orig
, int *errp
)
1120 struct net
*net
= xs_net(orig
);
1122 struct xfrm_state
*x
= xfrm_state_alloc(net
);
1126 memcpy(&x
->id
, &orig
->id
, sizeof(x
->id
));
1127 memcpy(&x
->sel
, &orig
->sel
, sizeof(x
->sel
));
1128 memcpy(&x
->lft
, &orig
->lft
, sizeof(x
->lft
));
1129 x
->props
.mode
= orig
->props
.mode
;
1130 x
->props
.replay_window
= orig
->props
.replay_window
;
1131 x
->props
.reqid
= orig
->props
.reqid
;
1132 x
->props
.family
= orig
->props
.family
;
1133 x
->props
.saddr
= orig
->props
.saddr
;
1136 x
->aalg
= xfrm_algo_clone(orig
->aalg
);
1140 x
->props
.aalgo
= orig
->props
.aalgo
;
1143 x
->ealg
= xfrm_algo_clone(orig
->ealg
);
1147 x
->props
.ealgo
= orig
->props
.ealgo
;
1150 x
->calg
= xfrm_algo_clone(orig
->calg
);
1154 x
->props
.calgo
= orig
->props
.calgo
;
1157 x
->encap
= kmemdup(orig
->encap
, sizeof(*x
->encap
), GFP_KERNEL
);
1163 x
->coaddr
= kmemdup(orig
->coaddr
, sizeof(*x
->coaddr
),
1169 err
= xfrm_init_state(x
);
1173 x
->props
.flags
= orig
->props
.flags
;
1175 x
->curlft
.add_time
= orig
->curlft
.add_time
;
1176 x
->km
.state
= orig
->km
.state
;
1177 x
->km
.seq
= orig
->km
.seq
;
1195 /* xfrm_state_lock is held */
1196 struct xfrm_state
* xfrm_migrate_state_find(struct xfrm_migrate
*m
)
1199 struct xfrm_state
*x
;
1200 struct hlist_node
*entry
;
1203 h
= xfrm_dst_hash(&init_net
, &m
->old_daddr
, &m
->old_saddr
,
1204 m
->reqid
, m
->old_family
);
1205 hlist_for_each_entry(x
, entry
, init_net
.xfrm
.state_bydst
+h
, bydst
) {
1206 if (x
->props
.mode
!= m
->mode
||
1207 x
->id
.proto
!= m
->proto
)
1209 if (m
->reqid
&& x
->props
.reqid
!= m
->reqid
)
1211 if (xfrm_addr_cmp(&x
->id
.daddr
, &m
->old_daddr
,
1213 xfrm_addr_cmp(&x
->props
.saddr
, &m
->old_saddr
,
1220 h
= xfrm_src_hash(&init_net
, &m
->old_daddr
, &m
->old_saddr
,
1222 hlist_for_each_entry(x
, entry
, init_net
.xfrm
.state_bysrc
+h
, bysrc
) {
1223 if (x
->props
.mode
!= m
->mode
||
1224 x
->id
.proto
!= m
->proto
)
1226 if (xfrm_addr_cmp(&x
->id
.daddr
, &m
->old_daddr
,
1228 xfrm_addr_cmp(&x
->props
.saddr
, &m
->old_saddr
,
1238 EXPORT_SYMBOL(xfrm_migrate_state_find
);
1240 struct xfrm_state
* xfrm_state_migrate(struct xfrm_state
*x
,
1241 struct xfrm_migrate
*m
)
1243 struct xfrm_state
*xc
;
1246 xc
= xfrm_state_clone(x
, &err
);
1250 memcpy(&xc
->id
.daddr
, &m
->new_daddr
, sizeof(xc
->id
.daddr
));
1251 memcpy(&xc
->props
.saddr
, &m
->new_saddr
, sizeof(xc
->props
.saddr
));
1254 if (!xfrm_addr_cmp(&x
->id
.daddr
, &m
->new_daddr
, m
->new_family
)) {
1255 /* a care is needed when the destination address of the
1256 state is to be updated as it is a part of triplet */
1257 xfrm_state_insert(xc
);
1259 if ((err
= xfrm_state_add(xc
)) < 0)
1268 EXPORT_SYMBOL(xfrm_state_migrate
);
1271 int xfrm_state_update(struct xfrm_state
*x
)
1273 struct xfrm_state
*x1
, *to_put
;
1275 int use_spi
= xfrm_id_proto_match(x
->id
.proto
, IPSEC_PROTO_ANY
);
1279 spin_lock_bh(&xfrm_state_lock
);
1280 x1
= __xfrm_state_locate(x
, use_spi
, x
->props
.family
);
1286 if (xfrm_state_kern(x1
)) {
1292 if (x1
->km
.state
== XFRM_STATE_ACQ
) {
1293 __xfrm_state_insert(x
);
1299 spin_unlock_bh(&xfrm_state_lock
);
1302 xfrm_state_put(to_put
);
1308 xfrm_state_delete(x1
);
1314 spin_lock_bh(&x1
->lock
);
1315 if (likely(x1
->km
.state
== XFRM_STATE_VALID
)) {
1316 if (x
->encap
&& x1
->encap
)
1317 memcpy(x1
->encap
, x
->encap
, sizeof(*x1
->encap
));
1318 if (x
->coaddr
&& x1
->coaddr
) {
1319 memcpy(x1
->coaddr
, x
->coaddr
, sizeof(*x1
->coaddr
));
1321 if (!use_spi
&& memcmp(&x1
->sel
, &x
->sel
, sizeof(x1
->sel
)))
1322 memcpy(&x1
->sel
, &x
->sel
, sizeof(x1
->sel
));
1323 memcpy(&x1
->lft
, &x
->lft
, sizeof(x1
->lft
));
1326 mod_timer(&x1
->timer
, jiffies
+ HZ
);
1327 if (x1
->curlft
.use_time
)
1328 xfrm_state_check_expire(x1
);
1332 spin_unlock_bh(&x1
->lock
);
1338 EXPORT_SYMBOL(xfrm_state_update
);
1340 int xfrm_state_check_expire(struct xfrm_state
*x
)
1342 if (!x
->curlft
.use_time
)
1343 x
->curlft
.use_time
= get_seconds();
1345 if (x
->km
.state
!= XFRM_STATE_VALID
)
1348 if (x
->curlft
.bytes
>= x
->lft
.hard_byte_limit
||
1349 x
->curlft
.packets
>= x
->lft
.hard_packet_limit
) {
1350 x
->km
.state
= XFRM_STATE_EXPIRED
;
1351 mod_timer(&x
->timer
, jiffies
);
1356 (x
->curlft
.bytes
>= x
->lft
.soft_byte_limit
||
1357 x
->curlft
.packets
>= x
->lft
.soft_packet_limit
)) {
1359 km_state_expired(x
, 0, 0);
1363 EXPORT_SYMBOL(xfrm_state_check_expire
);
1366 xfrm_state_lookup(struct net
*net
, xfrm_address_t
*daddr
, __be32 spi
, u8 proto
,
1367 unsigned short family
)
1369 struct xfrm_state
*x
;
1371 spin_lock_bh(&xfrm_state_lock
);
1372 x
= __xfrm_state_lookup(net
, daddr
, spi
, proto
, family
);
1373 spin_unlock_bh(&xfrm_state_lock
);
1376 EXPORT_SYMBOL(xfrm_state_lookup
);
1379 xfrm_state_lookup_byaddr(struct net
*net
,
1380 xfrm_address_t
*daddr
, xfrm_address_t
*saddr
,
1381 u8 proto
, unsigned short family
)
1383 struct xfrm_state
*x
;
1385 spin_lock_bh(&xfrm_state_lock
);
1386 x
= __xfrm_state_lookup_byaddr(net
, daddr
, saddr
, proto
, family
);
1387 spin_unlock_bh(&xfrm_state_lock
);
1390 EXPORT_SYMBOL(xfrm_state_lookup_byaddr
);
1393 xfrm_find_acq(struct net
*net
, u8 mode
, u32 reqid
, u8 proto
,
1394 xfrm_address_t
*daddr
, xfrm_address_t
*saddr
,
1395 int create
, unsigned short family
)
1397 struct xfrm_state
*x
;
1399 spin_lock_bh(&xfrm_state_lock
);
1400 x
= __find_acq_core(net
, family
, mode
, reqid
, proto
, daddr
, saddr
, create
);
1401 spin_unlock_bh(&xfrm_state_lock
);
1405 EXPORT_SYMBOL(xfrm_find_acq
);
1407 #ifdef CONFIG_XFRM_SUB_POLICY
1409 xfrm_tmpl_sort(struct xfrm_tmpl
**dst
, struct xfrm_tmpl
**src
, int n
,
1410 unsigned short family
)
1413 struct xfrm_state_afinfo
*afinfo
= xfrm_state_get_afinfo(family
);
1415 return -EAFNOSUPPORT
;
1417 spin_lock_bh(&xfrm_state_lock
);
1418 if (afinfo
->tmpl_sort
)
1419 err
= afinfo
->tmpl_sort(dst
, src
, n
);
1420 spin_unlock_bh(&xfrm_state_lock
);
1421 xfrm_state_put_afinfo(afinfo
);
1424 EXPORT_SYMBOL(xfrm_tmpl_sort
);
1427 xfrm_state_sort(struct xfrm_state
**dst
, struct xfrm_state
**src
, int n
,
1428 unsigned short family
)
1431 struct xfrm_state_afinfo
*afinfo
= xfrm_state_get_afinfo(family
);
1433 return -EAFNOSUPPORT
;
1435 spin_lock_bh(&xfrm_state_lock
);
1436 if (afinfo
->state_sort
)
1437 err
= afinfo
->state_sort(dst
, src
, n
);
1438 spin_unlock_bh(&xfrm_state_lock
);
1439 xfrm_state_put_afinfo(afinfo
);
1442 EXPORT_SYMBOL(xfrm_state_sort
);
1445 /* Silly enough, but I'm lazy to build resolution list */
1447 static struct xfrm_state
*__xfrm_find_acq_byseq(struct net
*net
, u32 seq
)
1451 for (i
= 0; i
<= net
->xfrm
.state_hmask
; i
++) {
1452 struct hlist_node
*entry
;
1453 struct xfrm_state
*x
;
1455 hlist_for_each_entry(x
, entry
, net
->xfrm
.state_bydst
+i
, bydst
) {
1456 if (x
->km
.seq
== seq
&&
1457 x
->km
.state
== XFRM_STATE_ACQ
) {
1466 struct xfrm_state
*xfrm_find_acq_byseq(struct net
*net
, u32 seq
)
1468 struct xfrm_state
*x
;
1470 spin_lock_bh(&xfrm_state_lock
);
1471 x
= __xfrm_find_acq_byseq(net
, seq
);
1472 spin_unlock_bh(&xfrm_state_lock
);
1475 EXPORT_SYMBOL(xfrm_find_acq_byseq
);
1477 u32
xfrm_get_acqseq(void)
1481 static DEFINE_SPINLOCK(acqseq_lock
);
1483 spin_lock_bh(&acqseq_lock
);
1484 res
= (++acqseq
? : ++acqseq
);
1485 spin_unlock_bh(&acqseq_lock
);
1488 EXPORT_SYMBOL(xfrm_get_acqseq
);
1490 int xfrm_alloc_spi(struct xfrm_state
*x
, u32 low
, u32 high
)
1492 struct net
*net
= xs_net(x
);
1494 struct xfrm_state
*x0
;
1496 __be32 minspi
= htonl(low
);
1497 __be32 maxspi
= htonl(high
);
1499 spin_lock_bh(&x
->lock
);
1500 if (x
->km
.state
== XFRM_STATE_DEAD
)
1509 if (minspi
== maxspi
) {
1510 x0
= xfrm_state_lookup(net
, &x
->id
.daddr
, minspi
, x
->id
.proto
, x
->props
.family
);
1518 for (h
=0; h
<high
-low
+1; h
++) {
1519 spi
= low
+ net_random()%(high
-low
+1);
1520 x0
= xfrm_state_lookup(net
, &x
->id
.daddr
, htonl(spi
), x
->id
.proto
, x
->props
.family
);
1522 x
->id
.spi
= htonl(spi
);
1529 spin_lock_bh(&xfrm_state_lock
);
1530 h
= xfrm_spi_hash(net
, &x
->id
.daddr
, x
->id
.spi
, x
->id
.proto
, x
->props
.family
);
1531 hlist_add_head(&x
->byspi
, net
->xfrm
.state_byspi
+h
);
1532 spin_unlock_bh(&xfrm_state_lock
);
1538 spin_unlock_bh(&x
->lock
);
1542 EXPORT_SYMBOL(xfrm_alloc_spi
);
1544 int xfrm_state_walk(struct net
*net
, struct xfrm_state_walk
*walk
,
1545 int (*func
)(struct xfrm_state
*, int, void*),
1548 struct xfrm_state
*state
;
1549 struct xfrm_state_walk
*x
;
1552 if (walk
->seq
!= 0 && list_empty(&walk
->all
))
1555 spin_lock_bh(&xfrm_state_lock
);
1556 if (list_empty(&walk
->all
))
1557 x
= list_first_entry(&net
->xfrm
.state_all
, struct xfrm_state_walk
, all
);
1559 x
= list_entry(&walk
->all
, struct xfrm_state_walk
, all
);
1560 list_for_each_entry_from(x
, &net
->xfrm
.state_all
, all
) {
1561 if (x
->state
== XFRM_STATE_DEAD
)
1563 state
= container_of(x
, struct xfrm_state
, km
);
1564 if (!xfrm_id_proto_match(state
->id
.proto
, walk
->proto
))
1566 err
= func(state
, walk
->seq
, data
);
1568 list_move_tail(&walk
->all
, &x
->all
);
1573 if (walk
->seq
== 0) {
1577 list_del_init(&walk
->all
);
1579 spin_unlock_bh(&xfrm_state_lock
);
1582 EXPORT_SYMBOL(xfrm_state_walk
);
1584 void xfrm_state_walk_init(struct xfrm_state_walk
*walk
, u8 proto
)
1586 INIT_LIST_HEAD(&walk
->all
);
1587 walk
->proto
= proto
;
1588 walk
->state
= XFRM_STATE_DEAD
;
1591 EXPORT_SYMBOL(xfrm_state_walk_init
);
1593 void xfrm_state_walk_done(struct xfrm_state_walk
*walk
)
1595 if (list_empty(&walk
->all
))
1598 spin_lock_bh(&xfrm_state_lock
);
1599 list_del(&walk
->all
);
1600 spin_lock_bh(&xfrm_state_lock
);
1602 EXPORT_SYMBOL(xfrm_state_walk_done
);
1605 void xfrm_replay_notify(struct xfrm_state
*x
, int event
)
1608 /* we send notify messages in case
1609 * 1. we updated on of the sequence numbers, and the seqno difference
1610 * is at least x->replay_maxdiff, in this case we also update the
1611 * timeout of our timer function
1612 * 2. if x->replay_maxage has elapsed since last update,
1613 * and there were changes
1615 * The state structure must be locked!
1619 case XFRM_REPLAY_UPDATE
:
1620 if (x
->replay_maxdiff
&&
1621 (x
->replay
.seq
- x
->preplay
.seq
< x
->replay_maxdiff
) &&
1622 (x
->replay
.oseq
- x
->preplay
.oseq
< x
->replay_maxdiff
)) {
1623 if (x
->xflags
& XFRM_TIME_DEFER
)
1624 event
= XFRM_REPLAY_TIMEOUT
;
1631 case XFRM_REPLAY_TIMEOUT
:
1632 if ((x
->replay
.seq
== x
->preplay
.seq
) &&
1633 (x
->replay
.bitmap
== x
->preplay
.bitmap
) &&
1634 (x
->replay
.oseq
== x
->preplay
.oseq
)) {
1635 x
->xflags
|= XFRM_TIME_DEFER
;
1642 memcpy(&x
->preplay
, &x
->replay
, sizeof(struct xfrm_replay_state
));
1643 c
.event
= XFRM_MSG_NEWAE
;
1644 c
.data
.aevent
= event
;
1645 km_state_notify(x
, &c
);
1647 if (x
->replay_maxage
&&
1648 !mod_timer(&x
->rtimer
, jiffies
+ x
->replay_maxage
))
1649 x
->xflags
&= ~XFRM_TIME_DEFER
;
1652 static void xfrm_replay_timer_handler(unsigned long data
)
1654 struct xfrm_state
*x
= (struct xfrm_state
*)data
;
1656 spin_lock(&x
->lock
);
1658 if (x
->km
.state
== XFRM_STATE_VALID
) {
1659 if (xfrm_aevent_is_on(xs_net(x
)))
1660 xfrm_replay_notify(x
, XFRM_REPLAY_TIMEOUT
);
1662 x
->xflags
|= XFRM_TIME_DEFER
;
1665 spin_unlock(&x
->lock
);
1668 int xfrm_replay_check(struct xfrm_state
*x
,
1669 struct sk_buff
*skb
, __be32 net_seq
)
1672 u32 seq
= ntohl(net_seq
);
1674 if (unlikely(seq
== 0))
1677 if (likely(seq
> x
->replay
.seq
))
1680 diff
= x
->replay
.seq
- seq
;
1681 if (diff
>= min_t(unsigned int, x
->props
.replay_window
,
1682 sizeof(x
->replay
.bitmap
) * 8)) {
1683 x
->stats
.replay_window
++;
1687 if (x
->replay
.bitmap
& (1U << diff
)) {
1694 xfrm_audit_state_replay(x
, skb
, net_seq
);
1698 void xfrm_replay_advance(struct xfrm_state
*x
, __be32 net_seq
)
1701 u32 seq
= ntohl(net_seq
);
1703 if (seq
> x
->replay
.seq
) {
1704 diff
= seq
- x
->replay
.seq
;
1705 if (diff
< x
->props
.replay_window
)
1706 x
->replay
.bitmap
= ((x
->replay
.bitmap
) << diff
) | 1;
1708 x
->replay
.bitmap
= 1;
1709 x
->replay
.seq
= seq
;
1711 diff
= x
->replay
.seq
- seq
;
1712 x
->replay
.bitmap
|= (1U << diff
);
1715 if (xfrm_aevent_is_on(xs_net(x
)))
1716 xfrm_replay_notify(x
, XFRM_REPLAY_UPDATE
);
1719 static LIST_HEAD(xfrm_km_list
);
1720 static DEFINE_RWLOCK(xfrm_km_lock
);
1722 void km_policy_notify(struct xfrm_policy
*xp
, int dir
, struct km_event
*c
)
1724 struct xfrm_mgr
*km
;
1726 read_lock(&xfrm_km_lock
);
1727 list_for_each_entry(km
, &xfrm_km_list
, list
)
1728 if (km
->notify_policy
)
1729 km
->notify_policy(xp
, dir
, c
);
1730 read_unlock(&xfrm_km_lock
);
1733 void km_state_notify(struct xfrm_state
*x
, struct km_event
*c
)
1735 struct xfrm_mgr
*km
;
1736 read_lock(&xfrm_km_lock
);
1737 list_for_each_entry(km
, &xfrm_km_list
, list
)
1740 read_unlock(&xfrm_km_lock
);
1743 EXPORT_SYMBOL(km_policy_notify
);
1744 EXPORT_SYMBOL(km_state_notify
);
1746 void km_state_expired(struct xfrm_state
*x
, int hard
, u32 pid
)
1748 struct net
*net
= xs_net(x
);
1753 c
.event
= XFRM_MSG_EXPIRE
;
1754 km_state_notify(x
, &c
);
1757 wake_up(&net
->xfrm
.km_waitq
);
1760 EXPORT_SYMBOL(km_state_expired
);
1762 * We send to all registered managers regardless of failure
1763 * We are happy with one success
1765 int km_query(struct xfrm_state
*x
, struct xfrm_tmpl
*t
, struct xfrm_policy
*pol
)
1767 int err
= -EINVAL
, acqret
;
1768 struct xfrm_mgr
*km
;
1770 read_lock(&xfrm_km_lock
);
1771 list_for_each_entry(km
, &xfrm_km_list
, list
) {
1772 acqret
= km
->acquire(x
, t
, pol
, XFRM_POLICY_OUT
);
1776 read_unlock(&xfrm_km_lock
);
1779 EXPORT_SYMBOL(km_query
);
1781 int km_new_mapping(struct xfrm_state
*x
, xfrm_address_t
*ipaddr
, __be16 sport
)
1784 struct xfrm_mgr
*km
;
1786 read_lock(&xfrm_km_lock
);
1787 list_for_each_entry(km
, &xfrm_km_list
, list
) {
1788 if (km
->new_mapping
)
1789 err
= km
->new_mapping(x
, ipaddr
, sport
);
1793 read_unlock(&xfrm_km_lock
);
1796 EXPORT_SYMBOL(km_new_mapping
);
1798 void km_policy_expired(struct xfrm_policy
*pol
, int dir
, int hard
, u32 pid
)
1800 struct net
*net
= xp_net(pol
);
1805 c
.event
= XFRM_MSG_POLEXPIRE
;
1806 km_policy_notify(pol
, dir
, &c
);
1809 wake_up(&net
->xfrm
.km_waitq
);
1811 EXPORT_SYMBOL(km_policy_expired
);
1813 #ifdef CONFIG_XFRM_MIGRATE
1814 int km_migrate(struct xfrm_selector
*sel
, u8 dir
, u8 type
,
1815 struct xfrm_migrate
*m
, int num_migrate
,
1816 struct xfrm_kmaddress
*k
)
1820 struct xfrm_mgr
*km
;
1822 read_lock(&xfrm_km_lock
);
1823 list_for_each_entry(km
, &xfrm_km_list
, list
) {
1825 ret
= km
->migrate(sel
, dir
, type
, m
, num_migrate
, k
);
1830 read_unlock(&xfrm_km_lock
);
1833 EXPORT_SYMBOL(km_migrate
);
1836 int km_report(u8 proto
, struct xfrm_selector
*sel
, xfrm_address_t
*addr
)
1840 struct xfrm_mgr
*km
;
1842 read_lock(&xfrm_km_lock
);
1843 list_for_each_entry(km
, &xfrm_km_list
, list
) {
1845 ret
= km
->report(proto
, sel
, addr
);
1850 read_unlock(&xfrm_km_lock
);
1853 EXPORT_SYMBOL(km_report
);
1855 int xfrm_user_policy(struct sock
*sk
, int optname
, u8 __user
*optval
, int optlen
)
1859 struct xfrm_mgr
*km
;
1860 struct xfrm_policy
*pol
= NULL
;
1862 if (optlen
<= 0 || optlen
> PAGE_SIZE
)
1865 data
= kmalloc(optlen
, GFP_KERNEL
);
1870 if (copy_from_user(data
, optval
, optlen
))
1874 read_lock(&xfrm_km_lock
);
1875 list_for_each_entry(km
, &xfrm_km_list
, list
) {
1876 pol
= km
->compile_policy(sk
, optname
, data
,
1881 read_unlock(&xfrm_km_lock
);
1884 xfrm_sk_policy_insert(sk
, err
, pol
);
1893 EXPORT_SYMBOL(xfrm_user_policy
);
1895 int xfrm_register_km(struct xfrm_mgr
*km
)
1897 write_lock_bh(&xfrm_km_lock
);
1898 list_add_tail(&km
->list
, &xfrm_km_list
);
1899 write_unlock_bh(&xfrm_km_lock
);
1902 EXPORT_SYMBOL(xfrm_register_km
);
1904 int xfrm_unregister_km(struct xfrm_mgr
*km
)
1906 write_lock_bh(&xfrm_km_lock
);
1907 list_del(&km
->list
);
1908 write_unlock_bh(&xfrm_km_lock
);
1911 EXPORT_SYMBOL(xfrm_unregister_km
);
1913 int xfrm_state_register_afinfo(struct xfrm_state_afinfo
*afinfo
)
1916 if (unlikely(afinfo
== NULL
))
1918 if (unlikely(afinfo
->family
>= NPROTO
))
1919 return -EAFNOSUPPORT
;
1920 write_lock_bh(&xfrm_state_afinfo_lock
);
1921 if (unlikely(xfrm_state_afinfo
[afinfo
->family
] != NULL
))
1924 xfrm_state_afinfo
[afinfo
->family
] = afinfo
;
1925 write_unlock_bh(&xfrm_state_afinfo_lock
);
1928 EXPORT_SYMBOL(xfrm_state_register_afinfo
);
1930 int xfrm_state_unregister_afinfo(struct xfrm_state_afinfo
*afinfo
)
1933 if (unlikely(afinfo
== NULL
))
1935 if (unlikely(afinfo
->family
>= NPROTO
))
1936 return -EAFNOSUPPORT
;
1937 write_lock_bh(&xfrm_state_afinfo_lock
);
1938 if (likely(xfrm_state_afinfo
[afinfo
->family
] != NULL
)) {
1939 if (unlikely(xfrm_state_afinfo
[afinfo
->family
] != afinfo
))
1942 xfrm_state_afinfo
[afinfo
->family
] = NULL
;
1944 write_unlock_bh(&xfrm_state_afinfo_lock
);
1947 EXPORT_SYMBOL(xfrm_state_unregister_afinfo
);
1949 static struct xfrm_state_afinfo
*xfrm_state_get_afinfo(unsigned int family
)
1951 struct xfrm_state_afinfo
*afinfo
;
1952 if (unlikely(family
>= NPROTO
))
1954 read_lock(&xfrm_state_afinfo_lock
);
1955 afinfo
= xfrm_state_afinfo
[family
];
1956 if (unlikely(!afinfo
))
1957 read_unlock(&xfrm_state_afinfo_lock
);
1961 static void xfrm_state_put_afinfo(struct xfrm_state_afinfo
*afinfo
)
1962 __releases(xfrm_state_afinfo_lock
)
1964 read_unlock(&xfrm_state_afinfo_lock
);
1967 /* Temporarily located here until net/xfrm/xfrm_tunnel.c is created */
1968 void xfrm_state_delete_tunnel(struct xfrm_state
*x
)
1971 struct xfrm_state
*t
= x
->tunnel
;
1973 if (atomic_read(&t
->tunnel_users
) == 2)
1974 xfrm_state_delete(t
);
1975 atomic_dec(&t
->tunnel_users
);
1980 EXPORT_SYMBOL(xfrm_state_delete_tunnel
);
1982 int xfrm_state_mtu(struct xfrm_state
*x
, int mtu
)
1986 spin_lock_bh(&x
->lock
);
1987 if (x
->km
.state
== XFRM_STATE_VALID
&&
1988 x
->type
&& x
->type
->get_mtu
)
1989 res
= x
->type
->get_mtu(x
, mtu
);
1991 res
= mtu
- x
->props
.header_len
;
1992 spin_unlock_bh(&x
->lock
);
1996 int xfrm_init_state(struct xfrm_state
*x
)
1998 struct xfrm_state_afinfo
*afinfo
;
1999 struct xfrm_mode
*inner_mode
;
2000 int family
= x
->props
.family
;
2003 err
= -EAFNOSUPPORT
;
2004 afinfo
= xfrm_state_get_afinfo(family
);
2009 if (afinfo
->init_flags
)
2010 err
= afinfo
->init_flags(x
);
2012 xfrm_state_put_afinfo(afinfo
);
2017 err
= -EPROTONOSUPPORT
;
2019 if (x
->sel
.family
!= AF_UNSPEC
) {
2020 inner_mode
= xfrm_get_mode(x
->props
.mode
, x
->sel
.family
);
2021 if (inner_mode
== NULL
)
2024 if (!(inner_mode
->flags
& XFRM_MODE_FLAG_TUNNEL
) &&
2025 family
!= x
->sel
.family
) {
2026 xfrm_put_mode(inner_mode
);
2030 x
->inner_mode
= inner_mode
;
2032 struct xfrm_mode
*inner_mode_iaf
;
2034 inner_mode
= xfrm_get_mode(x
->props
.mode
, AF_INET
);
2035 if (inner_mode
== NULL
)
2038 if (!(inner_mode
->flags
& XFRM_MODE_FLAG_TUNNEL
)) {
2039 xfrm_put_mode(inner_mode
);
2043 inner_mode_iaf
= xfrm_get_mode(x
->props
.mode
, AF_INET6
);
2044 if (inner_mode_iaf
== NULL
)
2047 if (!(inner_mode_iaf
->flags
& XFRM_MODE_FLAG_TUNNEL
)) {
2048 xfrm_put_mode(inner_mode_iaf
);
2052 if (x
->props
.family
== AF_INET
) {
2053 x
->inner_mode
= inner_mode
;
2054 x
->inner_mode_iaf
= inner_mode_iaf
;
2056 x
->inner_mode
= inner_mode_iaf
;
2057 x
->inner_mode_iaf
= inner_mode
;
2061 x
->type
= xfrm_get_type(x
->id
.proto
, family
);
2062 if (x
->type
== NULL
)
2065 err
= x
->type
->init_state(x
);
2069 x
->outer_mode
= xfrm_get_mode(x
->props
.mode
, family
);
2070 if (x
->outer_mode
== NULL
)
2073 x
->km
.state
= XFRM_STATE_VALID
;
2079 EXPORT_SYMBOL(xfrm_init_state
);
2081 int __net_init
xfrm_state_init(struct net
*net
)
2085 INIT_LIST_HEAD(&net
->xfrm
.state_all
);
2087 sz
= sizeof(struct hlist_head
) * 8;
2089 net
->xfrm
.state_bydst
= xfrm_hash_alloc(sz
);
2090 if (!net
->xfrm
.state_bydst
)
2092 net
->xfrm
.state_bysrc
= xfrm_hash_alloc(sz
);
2093 if (!net
->xfrm
.state_bysrc
)
2095 net
->xfrm
.state_byspi
= xfrm_hash_alloc(sz
);
2096 if (!net
->xfrm
.state_byspi
)
2098 net
->xfrm
.state_hmask
= ((sz
/ sizeof(struct hlist_head
)) - 1);
2100 net
->xfrm
.state_num
= 0;
2101 INIT_WORK(&net
->xfrm
.state_hash_work
, xfrm_hash_resize
);
2102 INIT_HLIST_HEAD(&net
->xfrm
.state_gc_list
);
2103 INIT_WORK(&net
->xfrm
.state_gc_work
, xfrm_state_gc_task
);
2104 init_waitqueue_head(&net
->xfrm
.km_waitq
);
2108 xfrm_hash_free(net
->xfrm
.state_bysrc
, sz
);
2110 xfrm_hash_free(net
->xfrm
.state_bydst
, sz
);
2115 void xfrm_state_fini(struct net
*net
)
2119 WARN_ON(!list_empty(&net
->xfrm
.state_all
));
2121 sz
= (net
->xfrm
.state_hmask
+ 1) * sizeof(struct hlist_head
);
2122 WARN_ON(!hlist_empty(net
->xfrm
.state_byspi
));
2123 xfrm_hash_free(net
->xfrm
.state_byspi
, sz
);
2124 WARN_ON(!hlist_empty(net
->xfrm
.state_bysrc
));
2125 xfrm_hash_free(net
->xfrm
.state_bysrc
, sz
);
2126 WARN_ON(!hlist_empty(net
->xfrm
.state_bydst
));
2127 xfrm_hash_free(net
->xfrm
.state_bydst
, sz
);
2130 #ifdef CONFIG_AUDITSYSCALL
2131 static void xfrm_audit_helper_sainfo(struct xfrm_state
*x
,
2132 struct audit_buffer
*audit_buf
)
2134 struct xfrm_sec_ctx
*ctx
= x
->security
;
2135 u32 spi
= ntohl(x
->id
.spi
);
2138 audit_log_format(audit_buf
, " sec_alg=%u sec_doi=%u sec_obj=%s",
2139 ctx
->ctx_alg
, ctx
->ctx_doi
, ctx
->ctx_str
);
2141 switch(x
->props
.family
) {
2143 audit_log_format(audit_buf
, " src=%pI4 dst=%pI4",
2144 &x
->props
.saddr
.a4
, &x
->id
.daddr
.a4
);
2147 audit_log_format(audit_buf
, " src=%pI6 dst=%pI6",
2148 x
->props
.saddr
.a6
, x
->id
.daddr
.a6
);
2152 audit_log_format(audit_buf
, " spi=%u(0x%x)", spi
, spi
);
2155 static void xfrm_audit_helper_pktinfo(struct sk_buff
*skb
, u16 family
,
2156 struct audit_buffer
*audit_buf
)
2159 struct ipv6hdr
*iph6
;
2164 audit_log_format(audit_buf
, " src=%pI4 dst=%pI4",
2165 &iph4
->saddr
, &iph4
->daddr
);
2168 iph6
= ipv6_hdr(skb
);
2169 audit_log_format(audit_buf
,
2170 " src=%pI6 dst=%pI6 flowlbl=0x%x%02x%02x",
2171 &iph6
->saddr
,&iph6
->daddr
,
2172 iph6
->flow_lbl
[0] & 0x0f,
2179 void xfrm_audit_state_add(struct xfrm_state
*x
, int result
,
2180 uid_t auid
, u32 sessionid
, u32 secid
)
2182 struct audit_buffer
*audit_buf
;
2184 audit_buf
= xfrm_audit_start("SAD-add");
2185 if (audit_buf
== NULL
)
2187 xfrm_audit_helper_usrinfo(auid
, sessionid
, secid
, audit_buf
);
2188 xfrm_audit_helper_sainfo(x
, audit_buf
);
2189 audit_log_format(audit_buf
, " res=%u", result
);
2190 audit_log_end(audit_buf
);
2192 EXPORT_SYMBOL_GPL(xfrm_audit_state_add
);
2194 void xfrm_audit_state_delete(struct xfrm_state
*x
, int result
,
2195 uid_t auid
, u32 sessionid
, u32 secid
)
2197 struct audit_buffer
*audit_buf
;
2199 audit_buf
= xfrm_audit_start("SAD-delete");
2200 if (audit_buf
== NULL
)
2202 xfrm_audit_helper_usrinfo(auid
, sessionid
, secid
, audit_buf
);
2203 xfrm_audit_helper_sainfo(x
, audit_buf
);
2204 audit_log_format(audit_buf
, " res=%u", result
);
2205 audit_log_end(audit_buf
);
2207 EXPORT_SYMBOL_GPL(xfrm_audit_state_delete
);
2209 void xfrm_audit_state_replay_overflow(struct xfrm_state
*x
,
2210 struct sk_buff
*skb
)
2212 struct audit_buffer
*audit_buf
;
2215 audit_buf
= xfrm_audit_start("SA-replay-overflow");
2216 if (audit_buf
== NULL
)
2218 xfrm_audit_helper_pktinfo(skb
, x
->props
.family
, audit_buf
);
2219 /* don't record the sequence number because it's inherent in this kind
2220 * of audit message */
2221 spi
= ntohl(x
->id
.spi
);
2222 audit_log_format(audit_buf
, " spi=%u(0x%x)", spi
, spi
);
2223 audit_log_end(audit_buf
);
2225 EXPORT_SYMBOL_GPL(xfrm_audit_state_replay_overflow
);
2227 static void xfrm_audit_state_replay(struct xfrm_state
*x
,
2228 struct sk_buff
*skb
, __be32 net_seq
)
2230 struct audit_buffer
*audit_buf
;
2233 audit_buf
= xfrm_audit_start("SA-replayed-pkt");
2234 if (audit_buf
== NULL
)
2236 xfrm_audit_helper_pktinfo(skb
, x
->props
.family
, audit_buf
);
2237 spi
= ntohl(x
->id
.spi
);
2238 audit_log_format(audit_buf
, " spi=%u(0x%x) seqno=%u",
2239 spi
, spi
, ntohl(net_seq
));
2240 audit_log_end(audit_buf
);
2243 void xfrm_audit_state_notfound_simple(struct sk_buff
*skb
, u16 family
)
2245 struct audit_buffer
*audit_buf
;
2247 audit_buf
= xfrm_audit_start("SA-notfound");
2248 if (audit_buf
== NULL
)
2250 xfrm_audit_helper_pktinfo(skb
, family
, audit_buf
);
2251 audit_log_end(audit_buf
);
2253 EXPORT_SYMBOL_GPL(xfrm_audit_state_notfound_simple
);
2255 void xfrm_audit_state_notfound(struct sk_buff
*skb
, u16 family
,
2256 __be32 net_spi
, __be32 net_seq
)
2258 struct audit_buffer
*audit_buf
;
2261 audit_buf
= xfrm_audit_start("SA-notfound");
2262 if (audit_buf
== NULL
)
2264 xfrm_audit_helper_pktinfo(skb
, family
, audit_buf
);
2265 spi
= ntohl(net_spi
);
2266 audit_log_format(audit_buf
, " spi=%u(0x%x) seqno=%u",
2267 spi
, spi
, ntohl(net_seq
));
2268 audit_log_end(audit_buf
);
2270 EXPORT_SYMBOL_GPL(xfrm_audit_state_notfound
);
2272 void xfrm_audit_state_icvfail(struct xfrm_state
*x
,
2273 struct sk_buff
*skb
, u8 proto
)
2275 struct audit_buffer
*audit_buf
;
2279 audit_buf
= xfrm_audit_start("SA-icv-failure");
2280 if (audit_buf
== NULL
)
2282 xfrm_audit_helper_pktinfo(skb
, x
->props
.family
, audit_buf
);
2283 if (xfrm_parse_spi(skb
, proto
, &net_spi
, &net_seq
) == 0) {
2284 u32 spi
= ntohl(net_spi
);
2285 audit_log_format(audit_buf
, " spi=%u(0x%x) seqno=%u",
2286 spi
, spi
, ntohl(net_seq
));
2288 audit_log_end(audit_buf
);
2290 EXPORT_SYMBOL_GPL(xfrm_audit_state_icvfail
);
2291 #endif /* CONFIG_AUDITSYSCALL */