6 * Kazunori MIYAZAWA @USAGI
7 * Kunihiro Ishiguro <kunihiro@ipinfusion.com>
9 * YOSHIFUJI Hideaki @USAGI
10 * Split up af-specific functions
11 * Derek Atkins <derek@ihtfp.com>
12 * Add UDP Encapsulation
16 #include <linux/workqueue.h>
18 #include <linux/pfkeyv2.h>
19 #include <linux/ipsec.h>
20 #include <linux/module.h>
21 #include <linux/cache.h>
22 #include <linux/audit.h>
23 #include <asm/uaccess.h>
25 #include "xfrm_hash.h"
28 EXPORT_SYMBOL(xfrm_nl
);
30 u32 sysctl_xfrm_aevent_etime __read_mostly
= XFRM_AE_ETIME
;
31 EXPORT_SYMBOL(sysctl_xfrm_aevent_etime
);
33 u32 sysctl_xfrm_aevent_rseqth __read_mostly
= XFRM_AE_SEQT_SIZE
;
34 EXPORT_SYMBOL(sysctl_xfrm_aevent_rseqth
);
36 u32 sysctl_xfrm_acq_expires __read_mostly
= 30;
38 /* Each xfrm_state may be linked to two tables:
40 1. Hash table by (spi,daddr,ah/esp) to find SA by SPI. (input,ctl)
41 2. Hash table by (daddr,family,reqid) to find what SAs exist for given
42 destination/tunnel endpoint. (output)
45 static DEFINE_SPINLOCK(xfrm_state_lock
);
47 /* Hash table to find appropriate SA towards given target (endpoint
48 * of tunnel or destination of transport mode) allowed by selector.
50 * Main use is finding SA after policy selected tunnel or transport mode.
51 * Also, it can be used by ah/esp icmp error handler to find offending SA.
53 static struct hlist_head
*xfrm_state_bydst __read_mostly
;
54 static struct hlist_head
*xfrm_state_bysrc __read_mostly
;
55 static struct hlist_head
*xfrm_state_byspi __read_mostly
;
56 static unsigned int xfrm_state_hmask __read_mostly
;
57 static unsigned int xfrm_state_hashmax __read_mostly
= 1 * 1024 * 1024;
58 static unsigned int xfrm_state_num
;
59 static unsigned int xfrm_state_genid
;
61 static struct xfrm_state_afinfo
*xfrm_state_get_afinfo(unsigned int family
);
62 static void xfrm_state_put_afinfo(struct xfrm_state_afinfo
*afinfo
);
64 static inline unsigned int xfrm_dst_hash(xfrm_address_t
*daddr
,
65 xfrm_address_t
*saddr
,
67 unsigned short family
)
69 return __xfrm_dst_hash(daddr
, saddr
, reqid
, family
, xfrm_state_hmask
);
72 static inline unsigned int xfrm_src_hash(xfrm_address_t
*daddr
,
73 xfrm_address_t
*saddr
,
74 unsigned short family
)
76 return __xfrm_src_hash(daddr
, saddr
, family
, xfrm_state_hmask
);
79 static inline unsigned int
80 xfrm_spi_hash(xfrm_address_t
*daddr
, __be32 spi
, u8 proto
, unsigned short family
)
82 return __xfrm_spi_hash(daddr
, spi
, proto
, family
, xfrm_state_hmask
);
85 static void xfrm_hash_transfer(struct hlist_head
*list
,
86 struct hlist_head
*ndsttable
,
87 struct hlist_head
*nsrctable
,
88 struct hlist_head
*nspitable
,
89 unsigned int nhashmask
)
91 struct hlist_node
*entry
, *tmp
;
94 hlist_for_each_entry_safe(x
, entry
, tmp
, list
, bydst
) {
97 h
= __xfrm_dst_hash(&x
->id
.daddr
, &x
->props
.saddr
,
98 x
->props
.reqid
, x
->props
.family
,
100 hlist_add_head(&x
->bydst
, ndsttable
+h
);
102 h
= __xfrm_src_hash(&x
->id
.daddr
, &x
->props
.saddr
,
105 hlist_add_head(&x
->bysrc
, nsrctable
+h
);
108 h
= __xfrm_spi_hash(&x
->id
.daddr
, x
->id
.spi
,
109 x
->id
.proto
, x
->props
.family
,
111 hlist_add_head(&x
->byspi
, nspitable
+h
);
116 static unsigned long xfrm_hash_new_size(void)
118 return ((xfrm_state_hmask
+ 1) << 1) *
119 sizeof(struct hlist_head
);
122 static DEFINE_MUTEX(hash_resize_mutex
);
124 static void xfrm_hash_resize(struct work_struct
*__unused
)
126 struct hlist_head
*ndst
, *nsrc
, *nspi
, *odst
, *osrc
, *ospi
;
127 unsigned long nsize
, osize
;
128 unsigned int nhashmask
, ohashmask
;
131 mutex_lock(&hash_resize_mutex
);
133 nsize
= xfrm_hash_new_size();
134 ndst
= xfrm_hash_alloc(nsize
);
137 nsrc
= xfrm_hash_alloc(nsize
);
139 xfrm_hash_free(ndst
, nsize
);
142 nspi
= xfrm_hash_alloc(nsize
);
144 xfrm_hash_free(ndst
, nsize
);
145 xfrm_hash_free(nsrc
, nsize
);
149 spin_lock_bh(&xfrm_state_lock
);
151 nhashmask
= (nsize
/ sizeof(struct hlist_head
)) - 1U;
152 for (i
= xfrm_state_hmask
; i
>= 0; i
--)
153 xfrm_hash_transfer(xfrm_state_bydst
+i
, ndst
, nsrc
, nspi
,
156 odst
= xfrm_state_bydst
;
157 osrc
= xfrm_state_bysrc
;
158 ospi
= xfrm_state_byspi
;
159 ohashmask
= xfrm_state_hmask
;
161 xfrm_state_bydst
= ndst
;
162 xfrm_state_bysrc
= nsrc
;
163 xfrm_state_byspi
= nspi
;
164 xfrm_state_hmask
= nhashmask
;
166 spin_unlock_bh(&xfrm_state_lock
);
168 osize
= (ohashmask
+ 1) * sizeof(struct hlist_head
);
169 xfrm_hash_free(odst
, osize
);
170 xfrm_hash_free(osrc
, osize
);
171 xfrm_hash_free(ospi
, osize
);
174 mutex_unlock(&hash_resize_mutex
);
177 static DECLARE_WORK(xfrm_hash_work
, xfrm_hash_resize
);
179 DECLARE_WAIT_QUEUE_HEAD(km_waitq
);
180 EXPORT_SYMBOL(km_waitq
);
182 static DEFINE_RWLOCK(xfrm_state_afinfo_lock
);
183 static struct xfrm_state_afinfo
*xfrm_state_afinfo
[NPROTO
];
185 static struct work_struct xfrm_state_gc_work
;
186 static HLIST_HEAD(xfrm_state_gc_list
);
187 static DEFINE_SPINLOCK(xfrm_state_gc_lock
);
189 int __xfrm_state_delete(struct xfrm_state
*x
);
191 int km_query(struct xfrm_state
*x
, struct xfrm_tmpl
*t
, struct xfrm_policy
*pol
);
192 void km_state_expired(struct xfrm_state
*x
, int hard
, u32 pid
);
194 static struct xfrm_state_afinfo
*xfrm_state_lock_afinfo(unsigned int family
)
196 struct xfrm_state_afinfo
*afinfo
;
197 if (unlikely(family
>= NPROTO
))
199 write_lock_bh(&xfrm_state_afinfo_lock
);
200 afinfo
= xfrm_state_afinfo
[family
];
201 if (unlikely(!afinfo
))
202 write_unlock_bh(&xfrm_state_afinfo_lock
);
206 static void xfrm_state_unlock_afinfo(struct xfrm_state_afinfo
*afinfo
)
208 write_unlock_bh(&xfrm_state_afinfo_lock
);
211 int xfrm_register_type(struct xfrm_type
*type
, unsigned short family
)
213 struct xfrm_state_afinfo
*afinfo
= xfrm_state_lock_afinfo(family
);
214 struct xfrm_type
**typemap
;
217 if (unlikely(afinfo
== NULL
))
218 return -EAFNOSUPPORT
;
219 typemap
= afinfo
->type_map
;
221 if (likely(typemap
[type
->proto
] == NULL
))
222 typemap
[type
->proto
] = type
;
225 xfrm_state_unlock_afinfo(afinfo
);
228 EXPORT_SYMBOL(xfrm_register_type
);
230 int xfrm_unregister_type(struct xfrm_type
*type
, unsigned short family
)
232 struct xfrm_state_afinfo
*afinfo
= xfrm_state_lock_afinfo(family
);
233 struct xfrm_type
**typemap
;
236 if (unlikely(afinfo
== NULL
))
237 return -EAFNOSUPPORT
;
238 typemap
= afinfo
->type_map
;
240 if (unlikely(typemap
[type
->proto
] != type
))
243 typemap
[type
->proto
] = NULL
;
244 xfrm_state_unlock_afinfo(afinfo
);
247 EXPORT_SYMBOL(xfrm_unregister_type
);
249 static struct xfrm_type
*xfrm_get_type(u8 proto
, unsigned short family
)
251 struct xfrm_state_afinfo
*afinfo
;
252 struct xfrm_type
**typemap
;
253 struct xfrm_type
*type
;
254 int modload_attempted
= 0;
257 afinfo
= xfrm_state_get_afinfo(family
);
258 if (unlikely(afinfo
== NULL
))
260 typemap
= afinfo
->type_map
;
262 type
= typemap
[proto
];
263 if (unlikely(type
&& !try_module_get(type
->owner
)))
265 if (!type
&& !modload_attempted
) {
266 xfrm_state_put_afinfo(afinfo
);
267 request_module("xfrm-type-%d-%d", family
, proto
);
268 modload_attempted
= 1;
272 xfrm_state_put_afinfo(afinfo
);
276 static void xfrm_put_type(struct xfrm_type
*type
)
278 module_put(type
->owner
);
281 int xfrm_register_mode(struct xfrm_mode
*mode
, int family
)
283 struct xfrm_state_afinfo
*afinfo
;
284 struct xfrm_mode
**modemap
;
287 if (unlikely(mode
->encap
>= XFRM_MODE_MAX
))
290 afinfo
= xfrm_state_lock_afinfo(family
);
291 if (unlikely(afinfo
== NULL
))
292 return -EAFNOSUPPORT
;
295 modemap
= afinfo
->mode_map
;
296 if (modemap
[mode
->encap
])
300 if (!try_module_get(afinfo
->owner
))
303 mode
->afinfo
= afinfo
;
304 modemap
[mode
->encap
] = mode
;
308 xfrm_state_unlock_afinfo(afinfo
);
311 EXPORT_SYMBOL(xfrm_register_mode
);
313 int xfrm_unregister_mode(struct xfrm_mode
*mode
, int family
)
315 struct xfrm_state_afinfo
*afinfo
;
316 struct xfrm_mode
**modemap
;
319 if (unlikely(mode
->encap
>= XFRM_MODE_MAX
))
322 afinfo
= xfrm_state_lock_afinfo(family
);
323 if (unlikely(afinfo
== NULL
))
324 return -EAFNOSUPPORT
;
327 modemap
= afinfo
->mode_map
;
328 if (likely(modemap
[mode
->encap
] == mode
)) {
329 modemap
[mode
->encap
] = NULL
;
330 module_put(mode
->afinfo
->owner
);
334 xfrm_state_unlock_afinfo(afinfo
);
337 EXPORT_SYMBOL(xfrm_unregister_mode
);
339 static struct xfrm_mode
*xfrm_get_mode(unsigned int encap
, int family
)
341 struct xfrm_state_afinfo
*afinfo
;
342 struct xfrm_mode
*mode
;
343 int modload_attempted
= 0;
345 if (unlikely(encap
>= XFRM_MODE_MAX
))
349 afinfo
= xfrm_state_get_afinfo(family
);
350 if (unlikely(afinfo
== NULL
))
353 mode
= afinfo
->mode_map
[encap
];
354 if (unlikely(mode
&& !try_module_get(mode
->owner
)))
356 if (!mode
&& !modload_attempted
) {
357 xfrm_state_put_afinfo(afinfo
);
358 request_module("xfrm-mode-%d-%d", family
, encap
);
359 modload_attempted
= 1;
363 xfrm_state_put_afinfo(afinfo
);
367 static void xfrm_put_mode(struct xfrm_mode
*mode
)
369 module_put(mode
->owner
);
372 static void xfrm_state_gc_destroy(struct xfrm_state
*x
)
374 del_timer_sync(&x
->timer
);
375 del_timer_sync(&x
->rtimer
);
382 xfrm_put_mode(x
->inner_mode
);
384 xfrm_put_mode(x
->outer_mode
);
386 x
->type
->destructor(x
);
387 xfrm_put_type(x
->type
);
389 security_xfrm_state_free(x
);
393 static void xfrm_state_gc_task(struct work_struct
*data
)
395 struct xfrm_state
*x
;
396 struct hlist_node
*entry
, *tmp
;
397 struct hlist_head gc_list
;
399 spin_lock_bh(&xfrm_state_gc_lock
);
400 gc_list
.first
= xfrm_state_gc_list
.first
;
401 INIT_HLIST_HEAD(&xfrm_state_gc_list
);
402 spin_unlock_bh(&xfrm_state_gc_lock
);
404 hlist_for_each_entry_safe(x
, entry
, tmp
, &gc_list
, bydst
)
405 xfrm_state_gc_destroy(x
);
410 static inline unsigned long make_jiffies(long secs
)
412 if (secs
>= (MAX_SCHEDULE_TIMEOUT
-1)/HZ
)
413 return MAX_SCHEDULE_TIMEOUT
-1;
418 static void xfrm_timer_handler(unsigned long data
)
420 struct xfrm_state
*x
= (struct xfrm_state
*)data
;
421 unsigned long now
= get_seconds();
422 long next
= LONG_MAX
;
427 if (x
->km
.state
== XFRM_STATE_DEAD
)
429 if (x
->km
.state
== XFRM_STATE_EXPIRED
)
431 if (x
->lft
.hard_add_expires_seconds
) {
432 long tmo
= x
->lft
.hard_add_expires_seconds
+
433 x
->curlft
.add_time
- now
;
439 if (x
->lft
.hard_use_expires_seconds
) {
440 long tmo
= x
->lft
.hard_use_expires_seconds
+
441 (x
->curlft
.use_time
? : now
) - now
;
449 if (x
->lft
.soft_add_expires_seconds
) {
450 long tmo
= x
->lft
.soft_add_expires_seconds
+
451 x
->curlft
.add_time
- now
;
457 if (x
->lft
.soft_use_expires_seconds
) {
458 long tmo
= x
->lft
.soft_use_expires_seconds
+
459 (x
->curlft
.use_time
? : now
) - now
;
468 km_state_expired(x
, 0, 0);
470 if (next
!= LONG_MAX
)
471 mod_timer(&x
->timer
, jiffies
+ make_jiffies(next
));
476 if (x
->km
.state
== XFRM_STATE_ACQ
&& x
->id
.spi
== 0) {
477 x
->km
.state
= XFRM_STATE_EXPIRED
;
483 err
= __xfrm_state_delete(x
);
484 if (!err
&& x
->id
.spi
)
485 km_state_expired(x
, 1, 0);
487 xfrm_audit_state_delete(x
, err
? 0 : 1,
488 audit_get_loginuid(current
->audit_context
), 0);
491 spin_unlock(&x
->lock
);
494 static void xfrm_replay_timer_handler(unsigned long data
);
496 struct xfrm_state
*xfrm_state_alloc(void)
498 struct xfrm_state
*x
;
500 x
= kzalloc(sizeof(struct xfrm_state
), GFP_ATOMIC
);
503 atomic_set(&x
->refcnt
, 1);
504 atomic_set(&x
->tunnel_users
, 0);
505 INIT_HLIST_NODE(&x
->bydst
);
506 INIT_HLIST_NODE(&x
->bysrc
);
507 INIT_HLIST_NODE(&x
->byspi
);
508 setup_timer(&x
->timer
, xfrm_timer_handler
, (unsigned long)x
);
509 setup_timer(&x
->rtimer
, xfrm_replay_timer_handler
,
511 x
->curlft
.add_time
= get_seconds();
512 x
->lft
.soft_byte_limit
= XFRM_INF
;
513 x
->lft
.soft_packet_limit
= XFRM_INF
;
514 x
->lft
.hard_byte_limit
= XFRM_INF
;
515 x
->lft
.hard_packet_limit
= XFRM_INF
;
516 x
->replay_maxage
= 0;
517 x
->replay_maxdiff
= 0;
518 spin_lock_init(&x
->lock
);
522 EXPORT_SYMBOL(xfrm_state_alloc
);
524 void __xfrm_state_destroy(struct xfrm_state
*x
)
526 BUG_TRAP(x
->km
.state
== XFRM_STATE_DEAD
);
528 spin_lock_bh(&xfrm_state_gc_lock
);
529 hlist_add_head(&x
->bydst
, &xfrm_state_gc_list
);
530 spin_unlock_bh(&xfrm_state_gc_lock
);
531 schedule_work(&xfrm_state_gc_work
);
533 EXPORT_SYMBOL(__xfrm_state_destroy
);
535 int __xfrm_state_delete(struct xfrm_state
*x
)
539 if (x
->km
.state
!= XFRM_STATE_DEAD
) {
540 x
->km
.state
= XFRM_STATE_DEAD
;
541 spin_lock(&xfrm_state_lock
);
542 hlist_del(&x
->bydst
);
543 hlist_del(&x
->bysrc
);
545 hlist_del(&x
->byspi
);
547 spin_unlock(&xfrm_state_lock
);
549 /* All xfrm_state objects are created by xfrm_state_alloc.
550 * The xfrm_state_alloc call gives a reference, and that
551 * is what we are dropping here.
559 EXPORT_SYMBOL(__xfrm_state_delete
);
561 int xfrm_state_delete(struct xfrm_state
*x
)
565 spin_lock_bh(&x
->lock
);
566 err
= __xfrm_state_delete(x
);
567 spin_unlock_bh(&x
->lock
);
571 EXPORT_SYMBOL(xfrm_state_delete
);
573 #ifdef CONFIG_SECURITY_NETWORK_XFRM
575 xfrm_state_flush_secctx_check(u8 proto
, struct xfrm_audit
*audit_info
)
579 for (i
= 0; i
<= xfrm_state_hmask
; i
++) {
580 struct hlist_node
*entry
;
581 struct xfrm_state
*x
;
583 hlist_for_each_entry(x
, entry
, xfrm_state_bydst
+i
, bydst
) {
584 if (xfrm_id_proto_match(x
->id
.proto
, proto
) &&
585 (err
= security_xfrm_state_delete(x
)) != 0) {
586 xfrm_audit_state_delete(x
, 0,
587 audit_info
->loginuid
,
598 xfrm_state_flush_secctx_check(u8 proto
, struct xfrm_audit
*audit_info
)
604 int xfrm_state_flush(u8 proto
, struct xfrm_audit
*audit_info
)
608 spin_lock_bh(&xfrm_state_lock
);
609 err
= xfrm_state_flush_secctx_check(proto
, audit_info
);
613 for (i
= 0; i
<= xfrm_state_hmask
; i
++) {
614 struct hlist_node
*entry
;
615 struct xfrm_state
*x
;
617 hlist_for_each_entry(x
, entry
, xfrm_state_bydst
+i
, bydst
) {
618 if (!xfrm_state_kern(x
) &&
619 xfrm_id_proto_match(x
->id
.proto
, proto
)) {
621 spin_unlock_bh(&xfrm_state_lock
);
623 err
= xfrm_state_delete(x
);
624 xfrm_audit_state_delete(x
, err
? 0 : 1,
625 audit_info
->loginuid
,
629 spin_lock_bh(&xfrm_state_lock
);
637 spin_unlock_bh(&xfrm_state_lock
);
641 EXPORT_SYMBOL(xfrm_state_flush
);
643 void xfrm_sad_getinfo(struct xfrmk_sadinfo
*si
)
645 spin_lock_bh(&xfrm_state_lock
);
646 si
->sadcnt
= xfrm_state_num
;
647 si
->sadhcnt
= xfrm_state_hmask
;
648 si
->sadhmcnt
= xfrm_state_hashmax
;
649 spin_unlock_bh(&xfrm_state_lock
);
651 EXPORT_SYMBOL(xfrm_sad_getinfo
);
654 xfrm_init_tempsel(struct xfrm_state
*x
, struct flowi
*fl
,
655 struct xfrm_tmpl
*tmpl
,
656 xfrm_address_t
*daddr
, xfrm_address_t
*saddr
,
657 unsigned short family
)
659 struct xfrm_state_afinfo
*afinfo
= xfrm_state_get_afinfo(family
);
662 afinfo
->init_tempsel(x
, fl
, tmpl
, daddr
, saddr
);
663 xfrm_state_put_afinfo(afinfo
);
667 static struct xfrm_state
*__xfrm_state_lookup(xfrm_address_t
*daddr
, __be32 spi
, u8 proto
, unsigned short family
)
669 unsigned int h
= xfrm_spi_hash(daddr
, spi
, proto
, family
);
670 struct xfrm_state
*x
;
671 struct hlist_node
*entry
;
673 hlist_for_each_entry(x
, entry
, xfrm_state_byspi
+h
, byspi
) {
674 if (x
->props
.family
!= family
||
676 x
->id
.proto
!= proto
)
681 if (x
->id
.daddr
.a4
!= daddr
->a4
)
685 if (!ipv6_addr_equal((struct in6_addr
*)daddr
,
699 static struct xfrm_state
*__xfrm_state_lookup_byaddr(xfrm_address_t
*daddr
, xfrm_address_t
*saddr
, u8 proto
, unsigned short family
)
701 unsigned int h
= xfrm_src_hash(daddr
, saddr
, family
);
702 struct xfrm_state
*x
;
703 struct hlist_node
*entry
;
705 hlist_for_each_entry(x
, entry
, xfrm_state_bysrc
+h
, bysrc
) {
706 if (x
->props
.family
!= family
||
707 x
->id
.proto
!= proto
)
712 if (x
->id
.daddr
.a4
!= daddr
->a4
||
713 x
->props
.saddr
.a4
!= saddr
->a4
)
717 if (!ipv6_addr_equal((struct in6_addr
*)daddr
,
720 !ipv6_addr_equal((struct in6_addr
*)saddr
,
734 static inline struct xfrm_state
*
735 __xfrm_state_locate(struct xfrm_state
*x
, int use_spi
, int family
)
738 return __xfrm_state_lookup(&x
->id
.daddr
, x
->id
.spi
,
739 x
->id
.proto
, family
);
741 return __xfrm_state_lookup_byaddr(&x
->id
.daddr
,
743 x
->id
.proto
, family
);
746 static void xfrm_hash_grow_check(int have_hash_collision
)
748 if (have_hash_collision
&&
749 (xfrm_state_hmask
+ 1) < xfrm_state_hashmax
&&
750 xfrm_state_num
> xfrm_state_hmask
)
751 schedule_work(&xfrm_hash_work
);
755 xfrm_state_find(xfrm_address_t
*daddr
, xfrm_address_t
*saddr
,
756 struct flowi
*fl
, struct xfrm_tmpl
*tmpl
,
757 struct xfrm_policy
*pol
, int *err
,
758 unsigned short family
)
761 struct hlist_node
*entry
;
762 struct xfrm_state
*x
, *x0
;
763 int acquire_in_progress
= 0;
765 struct xfrm_state
*best
= NULL
;
767 spin_lock_bh(&xfrm_state_lock
);
768 h
= xfrm_dst_hash(daddr
, saddr
, tmpl
->reqid
, family
);
769 hlist_for_each_entry(x
, entry
, xfrm_state_bydst
+h
, bydst
) {
770 if (x
->props
.family
== family
&&
771 x
->props
.reqid
== tmpl
->reqid
&&
772 !(x
->props
.flags
& XFRM_STATE_WILDRECV
) &&
773 xfrm_state_addr_check(x
, daddr
, saddr
, family
) &&
774 tmpl
->mode
== x
->props
.mode
&&
775 tmpl
->id
.proto
== x
->id
.proto
&&
776 (tmpl
->id
.spi
== x
->id
.spi
|| !tmpl
->id
.spi
)) {
778 1. There is a valid state with matching selector.
780 2. Valid state with inappropriate selector. Skip.
782 Entering area of "sysdeps".
784 3. If state is not valid, selector is temporary,
785 it selects only session which triggered
786 previous resolution. Key manager will do
787 something to install a state with proper
790 if (x
->km
.state
== XFRM_STATE_VALID
) {
791 if (!xfrm_selector_match(&x
->sel
, fl
, x
->sel
.family
) ||
792 !security_xfrm_state_pol_flow_match(x
, pol
, fl
))
795 best
->km
.dying
> x
->km
.dying
||
796 (best
->km
.dying
== x
->km
.dying
&&
797 best
->curlft
.add_time
< x
->curlft
.add_time
))
799 } else if (x
->km
.state
== XFRM_STATE_ACQ
) {
800 acquire_in_progress
= 1;
801 } else if (x
->km
.state
== XFRM_STATE_ERROR
||
802 x
->km
.state
== XFRM_STATE_EXPIRED
) {
803 if (xfrm_selector_match(&x
->sel
, fl
, x
->sel
.family
) &&
804 security_xfrm_state_pol_flow_match(x
, pol
, fl
))
811 if (!x
&& !error
&& !acquire_in_progress
) {
813 (x0
= __xfrm_state_lookup(daddr
, tmpl
->id
.spi
,
814 tmpl
->id
.proto
, family
)) != NULL
) {
819 x
= xfrm_state_alloc();
824 /* Initialize temporary selector matching only
825 * to current session. */
826 xfrm_init_tempsel(x
, fl
, tmpl
, daddr
, saddr
, family
);
828 error
= security_xfrm_state_alloc_acquire(x
, pol
->security
, fl
->secid
);
830 x
->km
.state
= XFRM_STATE_DEAD
;
836 if (km_query(x
, tmpl
, pol
) == 0) {
837 x
->km
.state
= XFRM_STATE_ACQ
;
838 hlist_add_head(&x
->bydst
, xfrm_state_bydst
+h
);
839 h
= xfrm_src_hash(daddr
, saddr
, family
);
840 hlist_add_head(&x
->bysrc
, xfrm_state_bysrc
+h
);
842 h
= xfrm_spi_hash(&x
->id
.daddr
, x
->id
.spi
, x
->id
.proto
, family
);
843 hlist_add_head(&x
->byspi
, xfrm_state_byspi
+h
);
845 x
->lft
.hard_add_expires_seconds
= sysctl_xfrm_acq_expires
;
846 x
->timer
.expires
= jiffies
+ sysctl_xfrm_acq_expires
*HZ
;
847 add_timer(&x
->timer
);
849 xfrm_hash_grow_check(x
->bydst
.next
!= NULL
);
851 x
->km
.state
= XFRM_STATE_DEAD
;
861 *err
= acquire_in_progress
? -EAGAIN
: error
;
862 spin_unlock_bh(&xfrm_state_lock
);
867 xfrm_stateonly_find(xfrm_address_t
*daddr
, xfrm_address_t
*saddr
,
868 unsigned short family
, u8 mode
, u8 proto
, u32 reqid
)
871 struct xfrm_state
*rx
= NULL
, *x
= NULL
;
872 struct hlist_node
*entry
;
874 spin_lock(&xfrm_state_lock
);
875 h
= xfrm_dst_hash(daddr
, saddr
, reqid
, family
);
876 hlist_for_each_entry(x
, entry
, xfrm_state_bydst
+h
, bydst
) {
877 if (x
->props
.family
== family
&&
878 x
->props
.reqid
== reqid
&&
879 !(x
->props
.flags
& XFRM_STATE_WILDRECV
) &&
880 xfrm_state_addr_check(x
, daddr
, saddr
, family
) &&
881 mode
== x
->props
.mode
&&
882 proto
== x
->id
.proto
&&
883 x
->km
.state
== XFRM_STATE_VALID
) {
891 spin_unlock(&xfrm_state_lock
);
896 EXPORT_SYMBOL(xfrm_stateonly_find
);
898 static void __xfrm_state_insert(struct xfrm_state
*x
)
902 x
->genid
= ++xfrm_state_genid
;
904 h
= xfrm_dst_hash(&x
->id
.daddr
, &x
->props
.saddr
,
905 x
->props
.reqid
, x
->props
.family
);
906 hlist_add_head(&x
->bydst
, xfrm_state_bydst
+h
);
908 h
= xfrm_src_hash(&x
->id
.daddr
, &x
->props
.saddr
, x
->props
.family
);
909 hlist_add_head(&x
->bysrc
, xfrm_state_bysrc
+h
);
912 h
= xfrm_spi_hash(&x
->id
.daddr
, x
->id
.spi
, x
->id
.proto
,
915 hlist_add_head(&x
->byspi
, xfrm_state_byspi
+h
);
918 mod_timer(&x
->timer
, jiffies
+ HZ
);
919 if (x
->replay_maxage
)
920 mod_timer(&x
->rtimer
, jiffies
+ x
->replay_maxage
);
926 xfrm_hash_grow_check(x
->bydst
.next
!= NULL
);
929 /* xfrm_state_lock is held */
930 static void __xfrm_state_bump_genids(struct xfrm_state
*xnew
)
932 unsigned short family
= xnew
->props
.family
;
933 u32 reqid
= xnew
->props
.reqid
;
934 struct xfrm_state
*x
;
935 struct hlist_node
*entry
;
938 h
= xfrm_dst_hash(&xnew
->id
.daddr
, &xnew
->props
.saddr
, reqid
, family
);
939 hlist_for_each_entry(x
, entry
, xfrm_state_bydst
+h
, bydst
) {
940 if (x
->props
.family
== family
&&
941 x
->props
.reqid
== reqid
&&
942 !xfrm_addr_cmp(&x
->id
.daddr
, &xnew
->id
.daddr
, family
) &&
943 !xfrm_addr_cmp(&x
->props
.saddr
, &xnew
->props
.saddr
, family
))
944 x
->genid
= xfrm_state_genid
;
948 void xfrm_state_insert(struct xfrm_state
*x
)
950 spin_lock_bh(&xfrm_state_lock
);
951 __xfrm_state_bump_genids(x
);
952 __xfrm_state_insert(x
);
953 spin_unlock_bh(&xfrm_state_lock
);
955 EXPORT_SYMBOL(xfrm_state_insert
);
957 /* xfrm_state_lock is held */
958 static struct xfrm_state
*__find_acq_core(unsigned short family
, u8 mode
, u32 reqid
, u8 proto
, xfrm_address_t
*daddr
, xfrm_address_t
*saddr
, int create
)
960 unsigned int h
= xfrm_dst_hash(daddr
, saddr
, reqid
, family
);
961 struct hlist_node
*entry
;
962 struct xfrm_state
*x
;
964 hlist_for_each_entry(x
, entry
, xfrm_state_bydst
+h
, bydst
) {
965 if (x
->props
.reqid
!= reqid
||
966 x
->props
.mode
!= mode
||
967 x
->props
.family
!= family
||
968 x
->km
.state
!= XFRM_STATE_ACQ
||
970 x
->id
.proto
!= proto
)
975 if (x
->id
.daddr
.a4
!= daddr
->a4
||
976 x
->props
.saddr
.a4
!= saddr
->a4
)
980 if (!ipv6_addr_equal((struct in6_addr
*)x
->id
.daddr
.a6
,
981 (struct in6_addr
*)daddr
) ||
982 !ipv6_addr_equal((struct in6_addr
*)
984 (struct in6_addr
*)saddr
))
996 x
= xfrm_state_alloc();
1000 x
->sel
.daddr
.a4
= daddr
->a4
;
1001 x
->sel
.saddr
.a4
= saddr
->a4
;
1002 x
->sel
.prefixlen_d
= 32;
1003 x
->sel
.prefixlen_s
= 32;
1004 x
->props
.saddr
.a4
= saddr
->a4
;
1005 x
->id
.daddr
.a4
= daddr
->a4
;
1009 ipv6_addr_copy((struct in6_addr
*)x
->sel
.daddr
.a6
,
1010 (struct in6_addr
*)daddr
);
1011 ipv6_addr_copy((struct in6_addr
*)x
->sel
.saddr
.a6
,
1012 (struct in6_addr
*)saddr
);
1013 x
->sel
.prefixlen_d
= 128;
1014 x
->sel
.prefixlen_s
= 128;
1015 ipv6_addr_copy((struct in6_addr
*)x
->props
.saddr
.a6
,
1016 (struct in6_addr
*)saddr
);
1017 ipv6_addr_copy((struct in6_addr
*)x
->id
.daddr
.a6
,
1018 (struct in6_addr
*)daddr
);
1022 x
->km
.state
= XFRM_STATE_ACQ
;
1023 x
->id
.proto
= proto
;
1024 x
->props
.family
= family
;
1025 x
->props
.mode
= mode
;
1026 x
->props
.reqid
= reqid
;
1027 x
->lft
.hard_add_expires_seconds
= sysctl_xfrm_acq_expires
;
1029 x
->timer
.expires
= jiffies
+ sysctl_xfrm_acq_expires
*HZ
;
1030 add_timer(&x
->timer
);
1031 hlist_add_head(&x
->bydst
, xfrm_state_bydst
+h
);
1032 h
= xfrm_src_hash(daddr
, saddr
, family
);
1033 hlist_add_head(&x
->bysrc
, xfrm_state_bysrc
+h
);
1037 xfrm_hash_grow_check(x
->bydst
.next
!= NULL
);
1043 static struct xfrm_state
*__xfrm_find_acq_byseq(u32 seq
);
1045 int xfrm_state_add(struct xfrm_state
*x
)
1047 struct xfrm_state
*x1
;
1050 int use_spi
= xfrm_id_proto_match(x
->id
.proto
, IPSEC_PROTO_ANY
);
1052 family
= x
->props
.family
;
1054 spin_lock_bh(&xfrm_state_lock
);
1056 x1
= __xfrm_state_locate(x
, use_spi
, family
);
1064 if (use_spi
&& x
->km
.seq
) {
1065 x1
= __xfrm_find_acq_byseq(x
->km
.seq
);
1066 if (x1
&& ((x1
->id
.proto
!= x
->id
.proto
) ||
1067 xfrm_addr_cmp(&x1
->id
.daddr
, &x
->id
.daddr
, family
))) {
1074 x1
= __find_acq_core(family
, x
->props
.mode
, x
->props
.reqid
,
1076 &x
->id
.daddr
, &x
->props
.saddr
, 0);
1078 __xfrm_state_bump_genids(x
);
1079 __xfrm_state_insert(x
);
1083 spin_unlock_bh(&xfrm_state_lock
);
1086 xfrm_state_delete(x1
);
1092 EXPORT_SYMBOL(xfrm_state_add
);
1094 #ifdef CONFIG_XFRM_MIGRATE
1095 struct xfrm_state
*xfrm_state_clone(struct xfrm_state
*orig
, int *errp
)
1098 struct xfrm_state
*x
= xfrm_state_alloc();
1102 memcpy(&x
->id
, &orig
->id
, sizeof(x
->id
));
1103 memcpy(&x
->sel
, &orig
->sel
, sizeof(x
->sel
));
1104 memcpy(&x
->lft
, &orig
->lft
, sizeof(x
->lft
));
1105 x
->props
.mode
= orig
->props
.mode
;
1106 x
->props
.replay_window
= orig
->props
.replay_window
;
1107 x
->props
.reqid
= orig
->props
.reqid
;
1108 x
->props
.family
= orig
->props
.family
;
1109 x
->props
.saddr
= orig
->props
.saddr
;
1112 x
->aalg
= xfrm_algo_clone(orig
->aalg
);
1116 x
->props
.aalgo
= orig
->props
.aalgo
;
1119 x
->ealg
= xfrm_algo_clone(orig
->ealg
);
1123 x
->props
.ealgo
= orig
->props
.ealgo
;
1126 x
->calg
= xfrm_algo_clone(orig
->calg
);
1130 x
->props
.calgo
= orig
->props
.calgo
;
1133 x
->encap
= kmemdup(orig
->encap
, sizeof(*x
->encap
), GFP_KERNEL
);
1139 x
->coaddr
= kmemdup(orig
->coaddr
, sizeof(*x
->coaddr
),
1145 err
= xfrm_init_state(x
);
1149 x
->props
.flags
= orig
->props
.flags
;
1151 x
->curlft
.add_time
= orig
->curlft
.add_time
;
1152 x
->km
.state
= orig
->km
.state
;
1153 x
->km
.seq
= orig
->km
.seq
;
1170 EXPORT_SYMBOL(xfrm_state_clone
);
1172 /* xfrm_state_lock is held */
1173 struct xfrm_state
* xfrm_migrate_state_find(struct xfrm_migrate
*m
)
1176 struct xfrm_state
*x
;
1177 struct hlist_node
*entry
;
1180 h
= xfrm_dst_hash(&m
->old_daddr
, &m
->old_saddr
,
1181 m
->reqid
, m
->old_family
);
1182 hlist_for_each_entry(x
, entry
, xfrm_state_bydst
+h
, bydst
) {
1183 if (x
->props
.mode
!= m
->mode
||
1184 x
->id
.proto
!= m
->proto
)
1186 if (m
->reqid
&& x
->props
.reqid
!= m
->reqid
)
1188 if (xfrm_addr_cmp(&x
->id
.daddr
, &m
->old_daddr
,
1190 xfrm_addr_cmp(&x
->props
.saddr
, &m
->old_saddr
,
1197 h
= xfrm_src_hash(&m
->old_daddr
, &m
->old_saddr
,
1199 hlist_for_each_entry(x
, entry
, xfrm_state_bysrc
+h
, bysrc
) {
1200 if (x
->props
.mode
!= m
->mode
||
1201 x
->id
.proto
!= m
->proto
)
1203 if (xfrm_addr_cmp(&x
->id
.daddr
, &m
->old_daddr
,
1205 xfrm_addr_cmp(&x
->props
.saddr
, &m
->old_saddr
,
1215 EXPORT_SYMBOL(xfrm_migrate_state_find
);
1217 struct xfrm_state
* xfrm_state_migrate(struct xfrm_state
*x
,
1218 struct xfrm_migrate
*m
)
1220 struct xfrm_state
*xc
;
1223 xc
= xfrm_state_clone(x
, &err
);
1227 memcpy(&xc
->id
.daddr
, &m
->new_daddr
, sizeof(xc
->id
.daddr
));
1228 memcpy(&xc
->props
.saddr
, &m
->new_saddr
, sizeof(xc
->props
.saddr
));
1231 if (!xfrm_addr_cmp(&x
->id
.daddr
, &m
->new_daddr
, m
->new_family
)) {
1232 /* a care is needed when the destination address of the
1233 state is to be updated as it is a part of triplet */
1234 xfrm_state_insert(xc
);
1236 if ((err
= xfrm_state_add(xc
)) < 0)
1245 EXPORT_SYMBOL(xfrm_state_migrate
);
1248 int xfrm_state_update(struct xfrm_state
*x
)
1250 struct xfrm_state
*x1
;
1252 int use_spi
= xfrm_id_proto_match(x
->id
.proto
, IPSEC_PROTO_ANY
);
1254 spin_lock_bh(&xfrm_state_lock
);
1255 x1
= __xfrm_state_locate(x
, use_spi
, x
->props
.family
);
1261 if (xfrm_state_kern(x1
)) {
1267 if (x1
->km
.state
== XFRM_STATE_ACQ
) {
1268 __xfrm_state_insert(x
);
1274 spin_unlock_bh(&xfrm_state_lock
);
1280 xfrm_state_delete(x1
);
1286 spin_lock_bh(&x1
->lock
);
1287 if (likely(x1
->km
.state
== XFRM_STATE_VALID
)) {
1288 if (x
->encap
&& x1
->encap
)
1289 memcpy(x1
->encap
, x
->encap
, sizeof(*x1
->encap
));
1290 if (x
->coaddr
&& x1
->coaddr
) {
1291 memcpy(x1
->coaddr
, x
->coaddr
, sizeof(*x1
->coaddr
));
1293 if (!use_spi
&& memcmp(&x1
->sel
, &x
->sel
, sizeof(x1
->sel
)))
1294 memcpy(&x1
->sel
, &x
->sel
, sizeof(x1
->sel
));
1295 memcpy(&x1
->lft
, &x
->lft
, sizeof(x1
->lft
));
1298 mod_timer(&x1
->timer
, jiffies
+ HZ
);
1299 if (x1
->curlft
.use_time
)
1300 xfrm_state_check_expire(x1
);
1304 spin_unlock_bh(&x1
->lock
);
1310 EXPORT_SYMBOL(xfrm_state_update
);
1312 int xfrm_state_check_expire(struct xfrm_state
*x
)
1314 if (!x
->curlft
.use_time
)
1315 x
->curlft
.use_time
= get_seconds();
1317 if (x
->km
.state
!= XFRM_STATE_VALID
)
1320 if (x
->curlft
.bytes
>= x
->lft
.hard_byte_limit
||
1321 x
->curlft
.packets
>= x
->lft
.hard_packet_limit
) {
1322 x
->km
.state
= XFRM_STATE_EXPIRED
;
1323 mod_timer(&x
->timer
, jiffies
);
1328 (x
->curlft
.bytes
>= x
->lft
.soft_byte_limit
||
1329 x
->curlft
.packets
>= x
->lft
.soft_packet_limit
)) {
1331 km_state_expired(x
, 0, 0);
1335 EXPORT_SYMBOL(xfrm_state_check_expire
);
1338 xfrm_state_lookup(xfrm_address_t
*daddr
, __be32 spi
, u8 proto
,
1339 unsigned short family
)
1341 struct xfrm_state
*x
;
1343 spin_lock_bh(&xfrm_state_lock
);
1344 x
= __xfrm_state_lookup(daddr
, spi
, proto
, family
);
1345 spin_unlock_bh(&xfrm_state_lock
);
1348 EXPORT_SYMBOL(xfrm_state_lookup
);
1351 xfrm_state_lookup_byaddr(xfrm_address_t
*daddr
, xfrm_address_t
*saddr
,
1352 u8 proto
, unsigned short family
)
1354 struct xfrm_state
*x
;
1356 spin_lock_bh(&xfrm_state_lock
);
1357 x
= __xfrm_state_lookup_byaddr(daddr
, saddr
, proto
, family
);
1358 spin_unlock_bh(&xfrm_state_lock
);
1361 EXPORT_SYMBOL(xfrm_state_lookup_byaddr
);
1364 xfrm_find_acq(u8 mode
, u32 reqid
, u8 proto
,
1365 xfrm_address_t
*daddr
, xfrm_address_t
*saddr
,
1366 int create
, unsigned short family
)
1368 struct xfrm_state
*x
;
1370 spin_lock_bh(&xfrm_state_lock
);
1371 x
= __find_acq_core(family
, mode
, reqid
, proto
, daddr
, saddr
, create
);
1372 spin_unlock_bh(&xfrm_state_lock
);
1376 EXPORT_SYMBOL(xfrm_find_acq
);
1378 #ifdef CONFIG_XFRM_SUB_POLICY
1380 xfrm_tmpl_sort(struct xfrm_tmpl
**dst
, struct xfrm_tmpl
**src
, int n
,
1381 unsigned short family
)
1384 struct xfrm_state_afinfo
*afinfo
= xfrm_state_get_afinfo(family
);
1386 return -EAFNOSUPPORT
;
1388 spin_lock_bh(&xfrm_state_lock
);
1389 if (afinfo
->tmpl_sort
)
1390 err
= afinfo
->tmpl_sort(dst
, src
, n
);
1391 spin_unlock_bh(&xfrm_state_lock
);
1392 xfrm_state_put_afinfo(afinfo
);
1395 EXPORT_SYMBOL(xfrm_tmpl_sort
);
1398 xfrm_state_sort(struct xfrm_state
**dst
, struct xfrm_state
**src
, int n
,
1399 unsigned short family
)
1402 struct xfrm_state_afinfo
*afinfo
= xfrm_state_get_afinfo(family
);
1404 return -EAFNOSUPPORT
;
1406 spin_lock_bh(&xfrm_state_lock
);
1407 if (afinfo
->state_sort
)
1408 err
= afinfo
->state_sort(dst
, src
, n
);
1409 spin_unlock_bh(&xfrm_state_lock
);
1410 xfrm_state_put_afinfo(afinfo
);
1413 EXPORT_SYMBOL(xfrm_state_sort
);
1416 /* Silly enough, but I'm lazy to build resolution list */
1418 static struct xfrm_state
*__xfrm_find_acq_byseq(u32 seq
)
1422 for (i
= 0; i
<= xfrm_state_hmask
; i
++) {
1423 struct hlist_node
*entry
;
1424 struct xfrm_state
*x
;
1426 hlist_for_each_entry(x
, entry
, xfrm_state_bydst
+i
, bydst
) {
1427 if (x
->km
.seq
== seq
&&
1428 x
->km
.state
== XFRM_STATE_ACQ
) {
1437 struct xfrm_state
*xfrm_find_acq_byseq(u32 seq
)
1439 struct xfrm_state
*x
;
1441 spin_lock_bh(&xfrm_state_lock
);
1442 x
= __xfrm_find_acq_byseq(seq
);
1443 spin_unlock_bh(&xfrm_state_lock
);
1446 EXPORT_SYMBOL(xfrm_find_acq_byseq
);
1448 u32
xfrm_get_acqseq(void)
1452 static DEFINE_SPINLOCK(acqseq_lock
);
1454 spin_lock_bh(&acqseq_lock
);
1455 res
= (++acqseq
? : ++acqseq
);
1456 spin_unlock_bh(&acqseq_lock
);
1459 EXPORT_SYMBOL(xfrm_get_acqseq
);
1461 int xfrm_alloc_spi(struct xfrm_state
*x
, u32 low
, u32 high
)
1464 struct xfrm_state
*x0
;
1466 __be32 minspi
= htonl(low
);
1467 __be32 maxspi
= htonl(high
);
1469 spin_lock_bh(&x
->lock
);
1470 if (x
->km
.state
== XFRM_STATE_DEAD
)
1479 if (minspi
== maxspi
) {
1480 x0
= xfrm_state_lookup(&x
->id
.daddr
, minspi
, x
->id
.proto
, x
->props
.family
);
1488 for (h
=0; h
<high
-low
+1; h
++) {
1489 spi
= low
+ net_random()%(high
-low
+1);
1490 x0
= xfrm_state_lookup(&x
->id
.daddr
, htonl(spi
), x
->id
.proto
, x
->props
.family
);
1492 x
->id
.spi
= htonl(spi
);
1499 spin_lock_bh(&xfrm_state_lock
);
1500 h
= xfrm_spi_hash(&x
->id
.daddr
, x
->id
.spi
, x
->id
.proto
, x
->props
.family
);
1501 hlist_add_head(&x
->byspi
, xfrm_state_byspi
+h
);
1502 spin_unlock_bh(&xfrm_state_lock
);
1508 spin_unlock_bh(&x
->lock
);
1512 EXPORT_SYMBOL(xfrm_alloc_spi
);
1514 int xfrm_state_walk(u8 proto
, int (*func
)(struct xfrm_state
*, int, void*),
1518 struct xfrm_state
*x
, *last
= NULL
;
1519 struct hlist_node
*entry
;
1523 spin_lock_bh(&xfrm_state_lock
);
1524 for (i
= 0; i
<= xfrm_state_hmask
; i
++) {
1525 hlist_for_each_entry(x
, entry
, xfrm_state_bydst
+i
, bydst
) {
1526 if (!xfrm_id_proto_match(x
->id
.proto
, proto
))
1529 err
= func(last
, count
, data
);
1541 err
= func(last
, 0, data
);
1543 spin_unlock_bh(&xfrm_state_lock
);
1546 EXPORT_SYMBOL(xfrm_state_walk
);
1549 void xfrm_replay_notify(struct xfrm_state
*x
, int event
)
1552 /* we send notify messages in case
1553 * 1. we updated on of the sequence numbers, and the seqno difference
1554 * is at least x->replay_maxdiff, in this case we also update the
1555 * timeout of our timer function
1556 * 2. if x->replay_maxage has elapsed since last update,
1557 * and there were changes
1559 * The state structure must be locked!
1563 case XFRM_REPLAY_UPDATE
:
1564 if (x
->replay_maxdiff
&&
1565 (x
->replay
.seq
- x
->preplay
.seq
< x
->replay_maxdiff
) &&
1566 (x
->replay
.oseq
- x
->preplay
.oseq
< x
->replay_maxdiff
)) {
1567 if (x
->xflags
& XFRM_TIME_DEFER
)
1568 event
= XFRM_REPLAY_TIMEOUT
;
1575 case XFRM_REPLAY_TIMEOUT
:
1576 if ((x
->replay
.seq
== x
->preplay
.seq
) &&
1577 (x
->replay
.bitmap
== x
->preplay
.bitmap
) &&
1578 (x
->replay
.oseq
== x
->preplay
.oseq
)) {
1579 x
->xflags
|= XFRM_TIME_DEFER
;
1586 memcpy(&x
->preplay
, &x
->replay
, sizeof(struct xfrm_replay_state
));
1587 c
.event
= XFRM_MSG_NEWAE
;
1588 c
.data
.aevent
= event
;
1589 km_state_notify(x
, &c
);
1591 if (x
->replay_maxage
&&
1592 !mod_timer(&x
->rtimer
, jiffies
+ x
->replay_maxage
))
1593 x
->xflags
&= ~XFRM_TIME_DEFER
;
1596 static void xfrm_replay_timer_handler(unsigned long data
)
1598 struct xfrm_state
*x
= (struct xfrm_state
*)data
;
1600 spin_lock(&x
->lock
);
1602 if (x
->km
.state
== XFRM_STATE_VALID
) {
1603 if (xfrm_aevent_is_on())
1604 xfrm_replay_notify(x
, XFRM_REPLAY_TIMEOUT
);
1606 x
->xflags
|= XFRM_TIME_DEFER
;
1609 spin_unlock(&x
->lock
);
1612 int xfrm_replay_check(struct xfrm_state
*x
, __be32 net_seq
)
1615 u32 seq
= ntohl(net_seq
);
1617 if (unlikely(seq
== 0))
1620 if (likely(seq
> x
->replay
.seq
))
1623 diff
= x
->replay
.seq
- seq
;
1624 if (diff
>= min_t(unsigned int, x
->props
.replay_window
,
1625 sizeof(x
->replay
.bitmap
) * 8)) {
1626 x
->stats
.replay_window
++;
1630 if (x
->replay
.bitmap
& (1U << diff
)) {
1636 EXPORT_SYMBOL(xfrm_replay_check
);
1638 void xfrm_replay_advance(struct xfrm_state
*x
, __be32 net_seq
)
1641 u32 seq
= ntohl(net_seq
);
1643 if (seq
> x
->replay
.seq
) {
1644 diff
= seq
- x
->replay
.seq
;
1645 if (diff
< x
->props
.replay_window
)
1646 x
->replay
.bitmap
= ((x
->replay
.bitmap
) << diff
) | 1;
1648 x
->replay
.bitmap
= 1;
1649 x
->replay
.seq
= seq
;
1651 diff
= x
->replay
.seq
- seq
;
1652 x
->replay
.bitmap
|= (1U << diff
);
1655 if (xfrm_aevent_is_on())
1656 xfrm_replay_notify(x
, XFRM_REPLAY_UPDATE
);
1658 EXPORT_SYMBOL(xfrm_replay_advance
);
1660 static LIST_HEAD(xfrm_km_list
);
1661 static DEFINE_RWLOCK(xfrm_km_lock
);
1663 void km_policy_notify(struct xfrm_policy
*xp
, int dir
, struct km_event
*c
)
1665 struct xfrm_mgr
*km
;
1667 read_lock(&xfrm_km_lock
);
1668 list_for_each_entry(km
, &xfrm_km_list
, list
)
1669 if (km
->notify_policy
)
1670 km
->notify_policy(xp
, dir
, c
);
1671 read_unlock(&xfrm_km_lock
);
1674 void km_state_notify(struct xfrm_state
*x
, struct km_event
*c
)
1676 struct xfrm_mgr
*km
;
1677 read_lock(&xfrm_km_lock
);
1678 list_for_each_entry(km
, &xfrm_km_list
, list
)
1681 read_unlock(&xfrm_km_lock
);
1684 EXPORT_SYMBOL(km_policy_notify
);
1685 EXPORT_SYMBOL(km_state_notify
);
1687 void km_state_expired(struct xfrm_state
*x
, int hard
, u32 pid
)
1693 c
.event
= XFRM_MSG_EXPIRE
;
1694 km_state_notify(x
, &c
);
1700 EXPORT_SYMBOL(km_state_expired
);
1702 * We send to all registered managers regardless of failure
1703 * We are happy with one success
1705 int km_query(struct xfrm_state
*x
, struct xfrm_tmpl
*t
, struct xfrm_policy
*pol
)
1707 int err
= -EINVAL
, acqret
;
1708 struct xfrm_mgr
*km
;
1710 read_lock(&xfrm_km_lock
);
1711 list_for_each_entry(km
, &xfrm_km_list
, list
) {
1712 acqret
= km
->acquire(x
, t
, pol
, XFRM_POLICY_OUT
);
1716 read_unlock(&xfrm_km_lock
);
1719 EXPORT_SYMBOL(km_query
);
1721 int km_new_mapping(struct xfrm_state
*x
, xfrm_address_t
*ipaddr
, __be16 sport
)
1724 struct xfrm_mgr
*km
;
1726 read_lock(&xfrm_km_lock
);
1727 list_for_each_entry(km
, &xfrm_km_list
, list
) {
1728 if (km
->new_mapping
)
1729 err
= km
->new_mapping(x
, ipaddr
, sport
);
1733 read_unlock(&xfrm_km_lock
);
1736 EXPORT_SYMBOL(km_new_mapping
);
1738 void km_policy_expired(struct xfrm_policy
*pol
, int dir
, int hard
, u32 pid
)
1744 c
.event
= XFRM_MSG_POLEXPIRE
;
1745 km_policy_notify(pol
, dir
, &c
);
1750 EXPORT_SYMBOL(km_policy_expired
);
1752 #ifdef CONFIG_XFRM_MIGRATE
1753 int km_migrate(struct xfrm_selector
*sel
, u8 dir
, u8 type
,
1754 struct xfrm_migrate
*m
, int num_migrate
)
1758 struct xfrm_mgr
*km
;
1760 read_lock(&xfrm_km_lock
);
1761 list_for_each_entry(km
, &xfrm_km_list
, list
) {
1763 ret
= km
->migrate(sel
, dir
, type
, m
, num_migrate
);
1768 read_unlock(&xfrm_km_lock
);
1771 EXPORT_SYMBOL(km_migrate
);
1774 int km_report(u8 proto
, struct xfrm_selector
*sel
, xfrm_address_t
*addr
)
1778 struct xfrm_mgr
*km
;
1780 read_lock(&xfrm_km_lock
);
1781 list_for_each_entry(km
, &xfrm_km_list
, list
) {
1783 ret
= km
->report(proto
, sel
, addr
);
1788 read_unlock(&xfrm_km_lock
);
1791 EXPORT_SYMBOL(km_report
);
1793 int xfrm_user_policy(struct sock
*sk
, int optname
, u8 __user
*optval
, int optlen
)
1797 struct xfrm_mgr
*km
;
1798 struct xfrm_policy
*pol
= NULL
;
1800 if (optlen
<= 0 || optlen
> PAGE_SIZE
)
1803 data
= kmalloc(optlen
, GFP_KERNEL
);
1808 if (copy_from_user(data
, optval
, optlen
))
1812 read_lock(&xfrm_km_lock
);
1813 list_for_each_entry(km
, &xfrm_km_list
, list
) {
1814 pol
= km
->compile_policy(sk
, optname
, data
,
1819 read_unlock(&xfrm_km_lock
);
1822 xfrm_sk_policy_insert(sk
, err
, pol
);
1831 EXPORT_SYMBOL(xfrm_user_policy
);
1833 int xfrm_register_km(struct xfrm_mgr
*km
)
1835 write_lock_bh(&xfrm_km_lock
);
1836 list_add_tail(&km
->list
, &xfrm_km_list
);
1837 write_unlock_bh(&xfrm_km_lock
);
1840 EXPORT_SYMBOL(xfrm_register_km
);
1842 int xfrm_unregister_km(struct xfrm_mgr
*km
)
1844 write_lock_bh(&xfrm_km_lock
);
1845 list_del(&km
->list
);
1846 write_unlock_bh(&xfrm_km_lock
);
1849 EXPORT_SYMBOL(xfrm_unregister_km
);
1851 int xfrm_state_register_afinfo(struct xfrm_state_afinfo
*afinfo
)
1854 if (unlikely(afinfo
== NULL
))
1856 if (unlikely(afinfo
->family
>= NPROTO
))
1857 return -EAFNOSUPPORT
;
1858 write_lock_bh(&xfrm_state_afinfo_lock
);
1859 if (unlikely(xfrm_state_afinfo
[afinfo
->family
] != NULL
))
1862 xfrm_state_afinfo
[afinfo
->family
] = afinfo
;
1863 write_unlock_bh(&xfrm_state_afinfo_lock
);
1866 EXPORT_SYMBOL(xfrm_state_register_afinfo
);
1868 int xfrm_state_unregister_afinfo(struct xfrm_state_afinfo
*afinfo
)
1871 if (unlikely(afinfo
== NULL
))
1873 if (unlikely(afinfo
->family
>= NPROTO
))
1874 return -EAFNOSUPPORT
;
1875 write_lock_bh(&xfrm_state_afinfo_lock
);
1876 if (likely(xfrm_state_afinfo
[afinfo
->family
] != NULL
)) {
1877 if (unlikely(xfrm_state_afinfo
[afinfo
->family
] != afinfo
))
1880 xfrm_state_afinfo
[afinfo
->family
] = NULL
;
1882 write_unlock_bh(&xfrm_state_afinfo_lock
);
1885 EXPORT_SYMBOL(xfrm_state_unregister_afinfo
);
1887 static struct xfrm_state_afinfo
*xfrm_state_get_afinfo(unsigned int family
)
1889 struct xfrm_state_afinfo
*afinfo
;
1890 if (unlikely(family
>= NPROTO
))
1892 read_lock(&xfrm_state_afinfo_lock
);
1893 afinfo
= xfrm_state_afinfo
[family
];
1894 if (unlikely(!afinfo
))
1895 read_unlock(&xfrm_state_afinfo_lock
);
1899 static void xfrm_state_put_afinfo(struct xfrm_state_afinfo
*afinfo
)
1901 read_unlock(&xfrm_state_afinfo_lock
);
1904 /* Temporarily located here until net/xfrm/xfrm_tunnel.c is created */
1905 void xfrm_state_delete_tunnel(struct xfrm_state
*x
)
1908 struct xfrm_state
*t
= x
->tunnel
;
1910 if (atomic_read(&t
->tunnel_users
) == 2)
1911 xfrm_state_delete(t
);
1912 atomic_dec(&t
->tunnel_users
);
1917 EXPORT_SYMBOL(xfrm_state_delete_tunnel
);
1919 int xfrm_state_mtu(struct xfrm_state
*x
, int mtu
)
1923 spin_lock_bh(&x
->lock
);
1924 if (x
->km
.state
== XFRM_STATE_VALID
&&
1925 x
->type
&& x
->type
->get_mtu
)
1926 res
= x
->type
->get_mtu(x
, mtu
);
1928 res
= mtu
- x
->props
.header_len
;
1929 spin_unlock_bh(&x
->lock
);
1933 int xfrm_init_state(struct xfrm_state
*x
)
1935 struct xfrm_state_afinfo
*afinfo
;
1936 int family
= x
->props
.family
;
1939 err
= -EAFNOSUPPORT
;
1940 afinfo
= xfrm_state_get_afinfo(family
);
1945 if (afinfo
->init_flags
)
1946 err
= afinfo
->init_flags(x
);
1948 xfrm_state_put_afinfo(afinfo
);
1953 err
= -EPROTONOSUPPORT
;
1954 x
->inner_mode
= xfrm_get_mode(x
->props
.mode
, x
->sel
.family
);
1955 if (x
->inner_mode
== NULL
)
1958 if (!(x
->inner_mode
->flags
& XFRM_MODE_FLAG_TUNNEL
) &&
1959 family
!= x
->sel
.family
)
1962 x
->type
= xfrm_get_type(x
->id
.proto
, family
);
1963 if (x
->type
== NULL
)
1966 err
= x
->type
->init_state(x
);
1970 x
->outer_mode
= xfrm_get_mode(x
->props
.mode
, family
);
1971 if (x
->outer_mode
== NULL
)
1974 x
->km
.state
= XFRM_STATE_VALID
;
1980 EXPORT_SYMBOL(xfrm_init_state
);
1982 void __init
xfrm_state_init(void)
1986 sz
= sizeof(struct hlist_head
) * 8;
1988 xfrm_state_bydst
= xfrm_hash_alloc(sz
);
1989 xfrm_state_bysrc
= xfrm_hash_alloc(sz
);
1990 xfrm_state_byspi
= xfrm_hash_alloc(sz
);
1991 if (!xfrm_state_bydst
|| !xfrm_state_bysrc
|| !xfrm_state_byspi
)
1992 panic("XFRM: Cannot allocate bydst/bysrc/byspi hashes.");
1993 xfrm_state_hmask
= ((sz
/ sizeof(struct hlist_head
)) - 1);
1995 INIT_WORK(&xfrm_state_gc_work
, xfrm_state_gc_task
);
1998 #ifdef CONFIG_AUDITSYSCALL
1999 static inline void xfrm_audit_common_stateinfo(struct xfrm_state
*x
,
2000 struct audit_buffer
*audit_buf
)
2002 struct xfrm_sec_ctx
*ctx
= x
->security
;
2003 u32 spi
= ntohl(x
->id
.spi
);
2006 audit_log_format(audit_buf
, " sec_alg=%u sec_doi=%u sec_obj=%s",
2007 ctx
->ctx_alg
, ctx
->ctx_doi
, ctx
->ctx_str
);
2009 switch(x
->props
.family
) {
2011 audit_log_format(audit_buf
,
2012 " src=" NIPQUAD_FMT
" dst=" NIPQUAD_FMT
,
2013 NIPQUAD(x
->props
.saddr
.a4
),
2014 NIPQUAD(x
->id
.daddr
.a4
));
2017 audit_log_format(audit_buf
,
2018 " src=" NIP6_FMT
" dst=" NIP6_FMT
,
2019 NIP6(*(struct in6_addr
*)x
->props
.saddr
.a6
),
2020 NIP6(*(struct in6_addr
*)x
->id
.daddr
.a6
));
2024 audit_log_format(audit_buf
, " spi=%u(0x%x)", spi
, spi
);
2027 void xfrm_audit_state_add(struct xfrm_state
*x
, int result
,
2028 u32 auid
, u32 secid
)
2030 struct audit_buffer
*audit_buf
;
2032 if (audit_enabled
== 0)
2034 audit_buf
= xfrm_audit_start(auid
, secid
);
2035 if (audit_buf
== NULL
)
2037 audit_log_format(audit_buf
, " op=SAD-add res=%u", result
);
2038 xfrm_audit_common_stateinfo(x
, audit_buf
);
2039 audit_log_end(audit_buf
);
2041 EXPORT_SYMBOL_GPL(xfrm_audit_state_add
);
2043 void xfrm_audit_state_delete(struct xfrm_state
*x
, int result
,
2044 u32 auid
, u32 secid
)
2046 struct audit_buffer
*audit_buf
;
2048 if (audit_enabled
== 0)
2050 audit_buf
= xfrm_audit_start(auid
, secid
);
2051 if (audit_buf
== NULL
)
2053 audit_log_format(audit_buf
, " op=SAD-delete res=%u", result
);
2054 xfrm_audit_common_stateinfo(x
, audit_buf
);
2055 audit_log_end(audit_buf
);
2057 EXPORT_SYMBOL_GPL(xfrm_audit_state_delete
);
2058 #endif /* CONFIG_AUDITSYSCALL */