6 * Kazunori MIYAZAWA @USAGI
7 * Kunihiro Ishiguro <kunihiro@ipinfusion.com>
9 * YOSHIFUJI Hideaki @USAGI
10 * Split up af-specific functions
11 * Derek Atkins <derek@ihtfp.com>
12 * Add UDP Encapsulation
16 #include <linux/workqueue.h>
18 #include <linux/pfkeyv2.h>
19 #include <linux/ipsec.h>
20 #include <linux/module.h>
21 #include <linux/bootmem.h>
22 #include <linux/vmalloc.h>
23 #include <linux/cache.h>
24 #include <asm/uaccess.h>
27 EXPORT_SYMBOL(xfrm_nl
);
29 u32 sysctl_xfrm_aevent_etime
= XFRM_AE_ETIME
;
30 EXPORT_SYMBOL(sysctl_xfrm_aevent_etime
);
32 u32 sysctl_xfrm_aevent_rseqth
= XFRM_AE_SEQT_SIZE
;
33 EXPORT_SYMBOL(sysctl_xfrm_aevent_rseqth
);
35 /* Each xfrm_state may be linked to two tables:
37 1. Hash table by (spi,daddr,ah/esp) to find SA by SPI. (input,ctl)
38 2. Hash table by (daddr,family,reqid) to find what SAs exist for given
39 destination/tunnel endpoint. (output)
42 static DEFINE_SPINLOCK(xfrm_state_lock
);
44 /* Hash table to find appropriate SA towards given target (endpoint
45 * of tunnel or destination of transport mode) allowed by selector.
47 * Main use is finding SA after policy selected tunnel or transport mode.
48 * Also, it can be used by ah/esp icmp error handler to find offending SA.
50 static struct hlist_head
*xfrm_state_bydst __read_mostly
;
51 static struct hlist_head
*xfrm_state_bysrc __read_mostly
;
52 static struct hlist_head
*xfrm_state_byspi __read_mostly
;
53 static unsigned int xfrm_state_hmask __read_mostly
;
54 static unsigned int xfrm_state_hashmax __read_mostly
= 1 * 1024 * 1024;
55 static unsigned int xfrm_state_num
;
56 static unsigned int xfrm_state_genid
;
58 static inline unsigned int __xfrm4_addr_hash(xfrm_address_t
*addr
)
60 return ntohl(addr
->a4
);
63 static inline unsigned int __xfrm6_addr_hash(xfrm_address_t
*addr
)
65 return ntohl(addr
->a6
[2]^addr
->a6
[3]);
68 static inline unsigned int __xfrm_dst_hash(xfrm_address_t
*addr
,
69 u32 reqid
, unsigned short family
,
72 unsigned int h
= family
^ reqid
;
75 h
^= __xfrm4_addr_hash(addr
);
78 h
^= __xfrm6_addr_hash(addr
);
81 return (h
^ (h
>> 16)) & hmask
;
84 static inline unsigned int xfrm_dst_hash(xfrm_address_t
*addr
, u32 reqid
,
85 unsigned short family
)
87 return __xfrm_dst_hash(addr
, reqid
, family
, xfrm_state_hmask
);
90 static inline unsigned __xfrm_src_hash(xfrm_address_t
*addr
, unsigned short family
,
93 unsigned int h
= family
;
96 h
^= __xfrm4_addr_hash(addr
);
99 h
^= __xfrm6_addr_hash(addr
);
102 return (h
^ (h
>> 16)) & hmask
;
105 static inline unsigned xfrm_src_hash(xfrm_address_t
*addr
, unsigned short family
)
107 return __xfrm_src_hash(addr
, family
, xfrm_state_hmask
);
110 static inline unsigned int
111 __xfrm_spi_hash(xfrm_address_t
*addr
, u32 spi
, u8 proto
, unsigned short family
,
114 unsigned int h
= spi
^ proto
;
117 h
^= __xfrm4_addr_hash(addr
);
120 h
^= __xfrm6_addr_hash(addr
);
123 return (h
^ (h
>> 10) ^ (h
>> 20)) & hmask
;
126 static inline unsigned int
127 xfrm_spi_hash(xfrm_address_t
*addr
, u32 spi
, u8 proto
, unsigned short family
)
129 return __xfrm_spi_hash(addr
, spi
, proto
, family
, xfrm_state_hmask
);
132 static struct hlist_head
*xfrm_state_hash_alloc(unsigned int sz
)
134 struct hlist_head
*n
;
137 n
= kmalloc(sz
, GFP_KERNEL
);
139 n
= __vmalloc(sz
, GFP_KERNEL
, PAGE_KERNEL
);
141 n
= (struct hlist_head
*)
142 __get_free_pages(GFP_KERNEL
, get_order(sz
));
150 static void xfrm_state_hash_free(struct hlist_head
*n
, unsigned int sz
)
157 free_pages((unsigned long)n
, get_order(sz
));
160 static void xfrm_hash_transfer(struct hlist_head
*list
,
161 struct hlist_head
*ndsttable
,
162 struct hlist_head
*nsrctable
,
163 struct hlist_head
*nspitable
,
164 unsigned int nhashmask
)
166 struct hlist_node
*entry
, *tmp
;
167 struct xfrm_state
*x
;
169 hlist_for_each_entry_safe(x
, entry
, tmp
, list
, bydst
) {
172 h
= __xfrm_dst_hash(&x
->id
.daddr
, x
->props
.reqid
,
173 x
->props
.family
, nhashmask
);
174 hlist_add_head(&x
->bydst
, ndsttable
+h
);
176 h
= __xfrm_src_hash(&x
->props
.saddr
, x
->props
.family
,
178 hlist_add_head(&x
->bysrc
, nsrctable
+h
);
180 h
= __xfrm_spi_hash(&x
->id
.daddr
, x
->id
.spi
, x
->id
.proto
,
181 x
->props
.family
, nhashmask
);
182 hlist_add_head(&x
->byspi
, nspitable
+h
);
186 static unsigned long xfrm_hash_new_size(void)
188 return ((xfrm_state_hmask
+ 1) << 1) *
189 sizeof(struct hlist_head
);
192 static DEFINE_MUTEX(hash_resize_mutex
);
194 static void xfrm_hash_resize(void *__unused
)
196 struct hlist_head
*ndst
, *nsrc
, *nspi
, *odst
, *osrc
, *ospi
;
197 unsigned long nsize
, osize
;
198 unsigned int nhashmask
, ohashmask
;
201 mutex_lock(&hash_resize_mutex
);
203 nsize
= xfrm_hash_new_size();
204 ndst
= xfrm_state_hash_alloc(nsize
);
207 nsrc
= xfrm_state_hash_alloc(nsize
);
209 xfrm_state_hash_free(ndst
, nsize
);
212 nspi
= xfrm_state_hash_alloc(nsize
);
214 xfrm_state_hash_free(ndst
, nsize
);
215 xfrm_state_hash_free(nsrc
, nsize
);
219 spin_lock_bh(&xfrm_state_lock
);
221 nhashmask
= (nsize
/ sizeof(struct hlist_head
)) - 1U;
222 for (i
= xfrm_state_hmask
; i
>= 0; i
--)
223 xfrm_hash_transfer(xfrm_state_bydst
+i
, ndst
, nsrc
, nspi
,
226 odst
= xfrm_state_bydst
;
227 osrc
= xfrm_state_bysrc
;
228 ospi
= xfrm_state_byspi
;
229 ohashmask
= xfrm_state_hmask
;
231 xfrm_state_bydst
= ndst
;
232 xfrm_state_bysrc
= nsrc
;
233 xfrm_state_byspi
= nspi
;
234 xfrm_state_hmask
= nhashmask
;
236 spin_unlock_bh(&xfrm_state_lock
);
238 osize
= (ohashmask
+ 1) * sizeof(struct hlist_head
);
239 xfrm_state_hash_free(odst
, osize
);
240 xfrm_state_hash_free(osrc
, osize
);
241 xfrm_state_hash_free(ospi
, osize
);
244 mutex_unlock(&hash_resize_mutex
);
247 static DECLARE_WORK(xfrm_hash_work
, xfrm_hash_resize
, NULL
);
249 DECLARE_WAIT_QUEUE_HEAD(km_waitq
);
250 EXPORT_SYMBOL(km_waitq
);
252 static DEFINE_RWLOCK(xfrm_state_afinfo_lock
);
253 static struct xfrm_state_afinfo
*xfrm_state_afinfo
[NPROTO
];
255 static struct work_struct xfrm_state_gc_work
;
256 static HLIST_HEAD(xfrm_state_gc_list
);
257 static DEFINE_SPINLOCK(xfrm_state_gc_lock
);
259 static int xfrm_state_gc_flush_bundles
;
261 int __xfrm_state_delete(struct xfrm_state
*x
);
263 static struct xfrm_state_afinfo
*xfrm_state_get_afinfo(unsigned short family
);
264 static void xfrm_state_put_afinfo(struct xfrm_state_afinfo
*afinfo
);
266 int km_query(struct xfrm_state
*x
, struct xfrm_tmpl
*t
, struct xfrm_policy
*pol
);
267 void km_state_expired(struct xfrm_state
*x
, int hard
, u32 pid
);
269 static void xfrm_state_gc_destroy(struct xfrm_state
*x
)
271 if (del_timer(&x
->timer
))
273 if (del_timer(&x
->rtimer
))
281 xfrm_put_mode(x
->mode
);
283 x
->type
->destructor(x
);
284 xfrm_put_type(x
->type
);
286 security_xfrm_state_free(x
);
290 static void xfrm_state_gc_task(void *data
)
292 struct xfrm_state
*x
;
293 struct hlist_node
*entry
, *tmp
;
294 struct hlist_head gc_list
;
296 if (xfrm_state_gc_flush_bundles
) {
297 xfrm_state_gc_flush_bundles
= 0;
298 xfrm_flush_bundles();
301 spin_lock_bh(&xfrm_state_gc_lock
);
302 gc_list
.first
= xfrm_state_gc_list
.first
;
303 INIT_HLIST_HEAD(&xfrm_state_gc_list
);
304 spin_unlock_bh(&xfrm_state_gc_lock
);
306 hlist_for_each_entry_safe(x
, entry
, tmp
, &gc_list
, bydst
)
307 xfrm_state_gc_destroy(x
);
312 static inline unsigned long make_jiffies(long secs
)
314 if (secs
>= (MAX_SCHEDULE_TIMEOUT
-1)/HZ
)
315 return MAX_SCHEDULE_TIMEOUT
-1;
320 static void xfrm_timer_handler(unsigned long data
)
322 struct xfrm_state
*x
= (struct xfrm_state
*)data
;
323 unsigned long now
= (unsigned long)xtime
.tv_sec
;
324 long next
= LONG_MAX
;
328 if (x
->km
.state
== XFRM_STATE_DEAD
)
330 if (x
->km
.state
== XFRM_STATE_EXPIRED
)
332 if (x
->lft
.hard_add_expires_seconds
) {
333 long tmo
= x
->lft
.hard_add_expires_seconds
+
334 x
->curlft
.add_time
- now
;
340 if (x
->lft
.hard_use_expires_seconds
) {
341 long tmo
= x
->lft
.hard_use_expires_seconds
+
342 (x
->curlft
.use_time
? : now
) - now
;
350 if (x
->lft
.soft_add_expires_seconds
) {
351 long tmo
= x
->lft
.soft_add_expires_seconds
+
352 x
->curlft
.add_time
- now
;
358 if (x
->lft
.soft_use_expires_seconds
) {
359 long tmo
= x
->lft
.soft_use_expires_seconds
+
360 (x
->curlft
.use_time
? : now
) - now
;
369 km_state_expired(x
, 0, 0);
371 if (next
!= LONG_MAX
&&
372 !mod_timer(&x
->timer
, jiffies
+ make_jiffies(next
)))
377 if (x
->km
.state
== XFRM_STATE_ACQ
&& x
->id
.spi
== 0) {
378 x
->km
.state
= XFRM_STATE_EXPIRED
;
383 if (!__xfrm_state_delete(x
) && x
->id
.spi
)
384 km_state_expired(x
, 1, 0);
387 spin_unlock(&x
->lock
);
391 static void xfrm_replay_timer_handler(unsigned long data
);
393 struct xfrm_state
*xfrm_state_alloc(void)
395 struct xfrm_state
*x
;
397 x
= kzalloc(sizeof(struct xfrm_state
), GFP_ATOMIC
);
400 atomic_set(&x
->refcnt
, 1);
401 atomic_set(&x
->tunnel_users
, 0);
402 INIT_HLIST_NODE(&x
->bydst
);
403 INIT_HLIST_NODE(&x
->bysrc
);
404 INIT_HLIST_NODE(&x
->byspi
);
405 init_timer(&x
->timer
);
406 x
->timer
.function
= xfrm_timer_handler
;
407 x
->timer
.data
= (unsigned long)x
;
408 init_timer(&x
->rtimer
);
409 x
->rtimer
.function
= xfrm_replay_timer_handler
;
410 x
->rtimer
.data
= (unsigned long)x
;
411 x
->curlft
.add_time
= (unsigned long)xtime
.tv_sec
;
412 x
->lft
.soft_byte_limit
= XFRM_INF
;
413 x
->lft
.soft_packet_limit
= XFRM_INF
;
414 x
->lft
.hard_byte_limit
= XFRM_INF
;
415 x
->lft
.hard_packet_limit
= XFRM_INF
;
416 x
->replay_maxage
= 0;
417 x
->replay_maxdiff
= 0;
418 spin_lock_init(&x
->lock
);
422 EXPORT_SYMBOL(xfrm_state_alloc
);
424 void __xfrm_state_destroy(struct xfrm_state
*x
)
426 BUG_TRAP(x
->km
.state
== XFRM_STATE_DEAD
);
428 spin_lock_bh(&xfrm_state_gc_lock
);
429 hlist_add_head(&x
->bydst
, &xfrm_state_gc_list
);
430 spin_unlock_bh(&xfrm_state_gc_lock
);
431 schedule_work(&xfrm_state_gc_work
);
433 EXPORT_SYMBOL(__xfrm_state_destroy
);
435 int __xfrm_state_delete(struct xfrm_state
*x
)
439 if (x
->km
.state
!= XFRM_STATE_DEAD
) {
440 x
->km
.state
= XFRM_STATE_DEAD
;
441 spin_lock(&xfrm_state_lock
);
442 hlist_del(&x
->bydst
);
444 hlist_del(&x
->bysrc
);
447 hlist_del(&x
->byspi
);
451 spin_unlock(&xfrm_state_lock
);
452 if (del_timer(&x
->timer
))
454 if (del_timer(&x
->rtimer
))
457 /* The number two in this test is the reference
458 * mentioned in the comment below plus the reference
459 * our caller holds. A larger value means that
460 * there are DSTs attached to this xfrm_state.
462 if (atomic_read(&x
->refcnt
) > 2) {
463 xfrm_state_gc_flush_bundles
= 1;
464 schedule_work(&xfrm_state_gc_work
);
467 /* All xfrm_state objects are created by xfrm_state_alloc.
468 * The xfrm_state_alloc call gives a reference, and that
469 * is what we are dropping here.
477 EXPORT_SYMBOL(__xfrm_state_delete
);
479 int xfrm_state_delete(struct xfrm_state
*x
)
483 spin_lock_bh(&x
->lock
);
484 err
= __xfrm_state_delete(x
);
485 spin_unlock_bh(&x
->lock
);
489 EXPORT_SYMBOL(xfrm_state_delete
);
491 void xfrm_state_flush(u8 proto
)
495 spin_lock_bh(&xfrm_state_lock
);
496 for (i
= 0; i
< xfrm_state_hmask
; i
++) {
497 struct hlist_node
*entry
;
498 struct xfrm_state
*x
;
500 hlist_for_each_entry(x
, entry
, xfrm_state_bydst
+i
, bydst
) {
501 if (!xfrm_state_kern(x
) &&
502 xfrm_id_proto_match(x
->id
.proto
, proto
)) {
504 spin_unlock_bh(&xfrm_state_lock
);
506 xfrm_state_delete(x
);
509 spin_lock_bh(&xfrm_state_lock
);
514 spin_unlock_bh(&xfrm_state_lock
);
517 EXPORT_SYMBOL(xfrm_state_flush
);
520 xfrm_init_tempsel(struct xfrm_state
*x
, struct flowi
*fl
,
521 struct xfrm_tmpl
*tmpl
,
522 xfrm_address_t
*daddr
, xfrm_address_t
*saddr
,
523 unsigned short family
)
525 struct xfrm_state_afinfo
*afinfo
= xfrm_state_get_afinfo(family
);
528 afinfo
->init_tempsel(x
, fl
, tmpl
, daddr
, saddr
);
529 xfrm_state_put_afinfo(afinfo
);
533 static struct xfrm_state
*__xfrm_state_lookup(xfrm_address_t
*daddr
, u32 spi
, u8 proto
, unsigned short family
)
535 unsigned int h
= xfrm_spi_hash(daddr
, spi
, proto
, family
);
536 struct xfrm_state
*x
;
537 struct hlist_node
*entry
;
539 hlist_for_each_entry(x
, entry
, xfrm_state_byspi
+h
, byspi
) {
540 if (x
->props
.family
!= family
||
542 x
->id
.proto
!= proto
)
547 if (x
->id
.daddr
.a4
!= daddr
->a4
)
551 if (!ipv6_addr_equal((struct in6_addr
*)daddr
,
565 static struct xfrm_state
*__xfrm_state_lookup_byaddr(xfrm_address_t
*daddr
, xfrm_address_t
*saddr
, u8 proto
, unsigned short family
)
567 unsigned int h
= xfrm_src_hash(saddr
, family
);
568 struct xfrm_state
*x
;
569 struct hlist_node
*entry
;
571 hlist_for_each_entry(x
, entry
, xfrm_state_bysrc
+h
, bysrc
) {
572 if (x
->props
.family
!= family
||
573 x
->id
.proto
!= proto
)
578 if (x
->id
.daddr
.a4
!= daddr
->a4
||
579 x
->props
.saddr
.a4
!= saddr
->a4
)
583 if (!ipv6_addr_equal((struct in6_addr
*)daddr
,
586 !ipv6_addr_equal((struct in6_addr
*)saddr
,
600 static inline struct xfrm_state
*
601 __xfrm_state_locate(struct xfrm_state
*x
, int use_spi
, int family
)
604 return __xfrm_state_lookup(&x
->id
.daddr
, x
->id
.spi
,
605 x
->id
.proto
, family
);
607 return __xfrm_state_lookup_byaddr(&x
->id
.daddr
,
609 x
->id
.proto
, family
);
613 xfrm_state_find(xfrm_address_t
*daddr
, xfrm_address_t
*saddr
,
614 struct flowi
*fl
, struct xfrm_tmpl
*tmpl
,
615 struct xfrm_policy
*pol
, int *err
,
616 unsigned short family
)
618 unsigned int h
= xfrm_dst_hash(daddr
, tmpl
->reqid
, family
);
619 struct hlist_node
*entry
;
620 struct xfrm_state
*x
, *x0
;
621 int acquire_in_progress
= 0;
623 struct xfrm_state
*best
= NULL
;
625 spin_lock_bh(&xfrm_state_lock
);
626 hlist_for_each_entry(x
, entry
, xfrm_state_bydst
+h
, bydst
) {
627 if (x
->props
.family
== family
&&
628 x
->props
.reqid
== tmpl
->reqid
&&
629 !(x
->props
.flags
& XFRM_STATE_WILDRECV
) &&
630 xfrm_state_addr_check(x
, daddr
, saddr
, family
) &&
631 tmpl
->mode
== x
->props
.mode
&&
632 tmpl
->id
.proto
== x
->id
.proto
&&
633 (tmpl
->id
.spi
== x
->id
.spi
|| !tmpl
->id
.spi
)) {
635 1. There is a valid state with matching selector.
637 2. Valid state with inappropriate selector. Skip.
639 Entering area of "sysdeps".
641 3. If state is not valid, selector is temporary,
642 it selects only session which triggered
643 previous resolution. Key manager will do
644 something to install a state with proper
647 if (x
->km
.state
== XFRM_STATE_VALID
) {
648 if (!xfrm_selector_match(&x
->sel
, fl
, family
) ||
649 !security_xfrm_state_pol_flow_match(x
, pol
, fl
))
652 best
->km
.dying
> x
->km
.dying
||
653 (best
->km
.dying
== x
->km
.dying
&&
654 best
->curlft
.add_time
< x
->curlft
.add_time
))
656 } else if (x
->km
.state
== XFRM_STATE_ACQ
) {
657 acquire_in_progress
= 1;
658 } else if (x
->km
.state
== XFRM_STATE_ERROR
||
659 x
->km
.state
== XFRM_STATE_EXPIRED
) {
660 if (xfrm_selector_match(&x
->sel
, fl
, family
) &&
661 security_xfrm_state_pol_flow_match(x
, pol
, fl
))
668 if (!x
&& !error
&& !acquire_in_progress
) {
670 (x0
= __xfrm_state_lookup(daddr
, tmpl
->id
.spi
,
671 tmpl
->id
.proto
, family
)) != NULL
) {
676 x
= xfrm_state_alloc();
681 /* Initialize temporary selector matching only
682 * to current session. */
683 xfrm_init_tempsel(x
, fl
, tmpl
, daddr
, saddr
, family
);
685 error
= security_xfrm_state_alloc_acquire(x
, pol
->security
, fl
->secid
);
687 x
->km
.state
= XFRM_STATE_DEAD
;
693 if (km_query(x
, tmpl
, pol
) == 0) {
694 x
->km
.state
= XFRM_STATE_ACQ
;
695 hlist_add_head(&x
->bydst
, xfrm_state_bydst
+h
);
697 h
= xfrm_src_hash(saddr
, family
);
698 hlist_add_head(&x
->bysrc
, xfrm_state_bysrc
+h
);
701 h
= xfrm_spi_hash(&x
->id
.daddr
, x
->id
.spi
, x
->id
.proto
, family
);
702 hlist_add_head(&x
->byspi
, xfrm_state_byspi
+h
);
705 x
->lft
.hard_add_expires_seconds
= XFRM_ACQ_EXPIRES
;
707 x
->timer
.expires
= jiffies
+ XFRM_ACQ_EXPIRES
*HZ
;
708 add_timer(&x
->timer
);
710 x
->km
.state
= XFRM_STATE_DEAD
;
720 *err
= acquire_in_progress
? -EAGAIN
: error
;
721 spin_unlock_bh(&xfrm_state_lock
);
725 static void __xfrm_state_insert(struct xfrm_state
*x
)
729 x
->genid
= ++xfrm_state_genid
;
731 h
= xfrm_dst_hash(&x
->id
.daddr
, x
->props
.reqid
, x
->props
.family
);
732 hlist_add_head(&x
->bydst
, xfrm_state_bydst
+h
);
735 h
= xfrm_src_hash(&x
->props
.saddr
, x
->props
.family
);
736 hlist_add_head(&x
->bysrc
, xfrm_state_bysrc
+h
);
739 if (xfrm_id_proto_match(x
->id
.proto
, IPSEC_PROTO_ANY
)) {
740 h
= xfrm_spi_hash(&x
->id
.daddr
, x
->id
.spi
, x
->id
.proto
,
743 hlist_add_head(&x
->byspi
, xfrm_state_byspi
+h
);
747 if (!mod_timer(&x
->timer
, jiffies
+ HZ
))
750 if (x
->replay_maxage
&&
751 !mod_timer(&x
->rtimer
, jiffies
+ x
->replay_maxage
))
758 if (x
->bydst
.next
!= NULL
&&
759 (xfrm_state_hmask
+ 1) < xfrm_state_hashmax
&&
760 xfrm_state_num
> xfrm_state_hmask
)
761 schedule_work(&xfrm_hash_work
);
764 void xfrm_state_insert(struct xfrm_state
*x
)
766 spin_lock_bh(&xfrm_state_lock
);
767 __xfrm_state_insert(x
);
768 spin_unlock_bh(&xfrm_state_lock
);
770 xfrm_flush_all_bundles();
772 EXPORT_SYMBOL(xfrm_state_insert
);
774 /* xfrm_state_lock is held */
775 static struct xfrm_state
*__find_acq_core(unsigned short family
, u8 mode
, u32 reqid
, u8 proto
, xfrm_address_t
*daddr
, xfrm_address_t
*saddr
, int create
)
777 unsigned int h
= xfrm_dst_hash(daddr
, reqid
, family
);
778 struct hlist_node
*entry
;
779 struct xfrm_state
*x
;
781 hlist_for_each_entry(x
, entry
, xfrm_state_bydst
+h
, bydst
) {
782 if (x
->props
.reqid
!= reqid
||
783 x
->props
.mode
!= mode
||
784 x
->props
.family
!= family
||
785 x
->km
.state
!= XFRM_STATE_ACQ
||
791 if (x
->id
.daddr
.a4
!= daddr
->a4
||
792 x
->props
.saddr
.a4
!= saddr
->a4
)
796 if (!ipv6_addr_equal((struct in6_addr
*)x
->id
.daddr
.a6
,
797 (struct in6_addr
*)daddr
) ||
798 !ipv6_addr_equal((struct in6_addr
*)
800 (struct in6_addr
*)saddr
))
812 x
= xfrm_state_alloc();
816 x
->sel
.daddr
.a4
= daddr
->a4
;
817 x
->sel
.saddr
.a4
= saddr
->a4
;
818 x
->sel
.prefixlen_d
= 32;
819 x
->sel
.prefixlen_s
= 32;
820 x
->props
.saddr
.a4
= saddr
->a4
;
821 x
->id
.daddr
.a4
= daddr
->a4
;
825 ipv6_addr_copy((struct in6_addr
*)x
->sel
.daddr
.a6
,
826 (struct in6_addr
*)daddr
);
827 ipv6_addr_copy((struct in6_addr
*)x
->sel
.saddr
.a6
,
828 (struct in6_addr
*)saddr
);
829 x
->sel
.prefixlen_d
= 128;
830 x
->sel
.prefixlen_s
= 128;
831 ipv6_addr_copy((struct in6_addr
*)x
->props
.saddr
.a6
,
832 (struct in6_addr
*)saddr
);
833 ipv6_addr_copy((struct in6_addr
*)x
->id
.daddr
.a6
,
834 (struct in6_addr
*)daddr
);
838 x
->km
.state
= XFRM_STATE_ACQ
;
840 x
->props
.family
= family
;
841 x
->props
.mode
= mode
;
842 x
->props
.reqid
= reqid
;
843 x
->lft
.hard_add_expires_seconds
= XFRM_ACQ_EXPIRES
;
845 x
->timer
.expires
= jiffies
+ XFRM_ACQ_EXPIRES
*HZ
;
846 add_timer(&x
->timer
);
848 hlist_add_head(&x
->bydst
, xfrm_state_bydst
+h
);
849 h
= xfrm_src_hash(saddr
, family
);
851 hlist_add_head(&x
->bysrc
, xfrm_state_bysrc
+h
);
858 static struct xfrm_state
*__xfrm_find_acq_byseq(u32 seq
);
860 int xfrm_state_add(struct xfrm_state
*x
)
862 struct xfrm_state
*x1
;
865 int use_spi
= xfrm_id_proto_match(x
->id
.proto
, IPSEC_PROTO_ANY
);
867 family
= x
->props
.family
;
869 spin_lock_bh(&xfrm_state_lock
);
871 x1
= __xfrm_state_locate(x
, use_spi
, family
);
879 if (use_spi
&& x
->km
.seq
) {
880 x1
= __xfrm_find_acq_byseq(x
->km
.seq
);
881 if (x1
&& xfrm_addr_cmp(&x1
->id
.daddr
, &x
->id
.daddr
, family
)) {
888 x1
= __find_acq_core(family
, x
->props
.mode
, x
->props
.reqid
,
890 &x
->id
.daddr
, &x
->props
.saddr
, 0);
892 __xfrm_state_insert(x
);
896 spin_unlock_bh(&xfrm_state_lock
);
899 xfrm_flush_all_bundles();
902 xfrm_state_delete(x1
);
908 EXPORT_SYMBOL(xfrm_state_add
);
910 int xfrm_state_update(struct xfrm_state
*x
)
912 struct xfrm_state
*x1
;
914 int use_spi
= xfrm_id_proto_match(x
->id
.proto
, IPSEC_PROTO_ANY
);
916 spin_lock_bh(&xfrm_state_lock
);
917 x1
= __xfrm_state_locate(x
, use_spi
, x
->props
.family
);
923 if (xfrm_state_kern(x1
)) {
929 if (x1
->km
.state
== XFRM_STATE_ACQ
) {
930 __xfrm_state_insert(x
);
936 spin_unlock_bh(&xfrm_state_lock
);
942 xfrm_state_delete(x1
);
948 spin_lock_bh(&x1
->lock
);
949 if (likely(x1
->km
.state
== XFRM_STATE_VALID
)) {
950 if (x
->encap
&& x1
->encap
)
951 memcpy(x1
->encap
, x
->encap
, sizeof(*x1
->encap
));
952 if (x
->coaddr
&& x1
->coaddr
) {
953 memcpy(x1
->coaddr
, x
->coaddr
, sizeof(*x1
->coaddr
));
955 if (!use_spi
&& memcmp(&x1
->sel
, &x
->sel
, sizeof(x1
->sel
)))
956 memcpy(&x1
->sel
, &x
->sel
, sizeof(x1
->sel
));
957 memcpy(&x1
->lft
, &x
->lft
, sizeof(x1
->lft
));
960 if (!mod_timer(&x1
->timer
, jiffies
+ HZ
))
962 if (x1
->curlft
.use_time
)
963 xfrm_state_check_expire(x1
);
967 spin_unlock_bh(&x1
->lock
);
973 EXPORT_SYMBOL(xfrm_state_update
);
975 int xfrm_state_check_expire(struct xfrm_state
*x
)
977 if (!x
->curlft
.use_time
)
978 x
->curlft
.use_time
= (unsigned long)xtime
.tv_sec
;
980 if (x
->km
.state
!= XFRM_STATE_VALID
)
983 if (x
->curlft
.bytes
>= x
->lft
.hard_byte_limit
||
984 x
->curlft
.packets
>= x
->lft
.hard_packet_limit
) {
985 x
->km
.state
= XFRM_STATE_EXPIRED
;
986 if (!mod_timer(&x
->timer
, jiffies
))
992 (x
->curlft
.bytes
>= x
->lft
.soft_byte_limit
||
993 x
->curlft
.packets
>= x
->lft
.soft_packet_limit
)) {
995 km_state_expired(x
, 0, 0);
999 EXPORT_SYMBOL(xfrm_state_check_expire
);
1001 static int xfrm_state_check_space(struct xfrm_state
*x
, struct sk_buff
*skb
)
1003 int nhead
= x
->props
.header_len
+ LL_RESERVED_SPACE(skb
->dst
->dev
)
1004 - skb_headroom(skb
);
1007 return pskb_expand_head(skb
, nhead
, 0, GFP_ATOMIC
);
1009 /* Check tail too... */
1013 int xfrm_state_check(struct xfrm_state
*x
, struct sk_buff
*skb
)
1015 int err
= xfrm_state_check_expire(x
);
1018 err
= xfrm_state_check_space(x
, skb
);
1022 EXPORT_SYMBOL(xfrm_state_check
);
1025 xfrm_state_lookup(xfrm_address_t
*daddr
, u32 spi
, u8 proto
,
1026 unsigned short family
)
1028 struct xfrm_state
*x
;
1030 spin_lock_bh(&xfrm_state_lock
);
1031 x
= __xfrm_state_lookup(daddr
, spi
, proto
, family
);
1032 spin_unlock_bh(&xfrm_state_lock
);
1035 EXPORT_SYMBOL(xfrm_state_lookup
);
1038 xfrm_state_lookup_byaddr(xfrm_address_t
*daddr
, xfrm_address_t
*saddr
,
1039 u8 proto
, unsigned short family
)
1041 struct xfrm_state
*x
;
1043 spin_lock_bh(&xfrm_state_lock
);
1044 x
= __xfrm_state_lookup_byaddr(daddr
, saddr
, proto
, family
);
1045 spin_unlock_bh(&xfrm_state_lock
);
1048 EXPORT_SYMBOL(xfrm_state_lookup_byaddr
);
1051 xfrm_find_acq(u8 mode
, u32 reqid
, u8 proto
,
1052 xfrm_address_t
*daddr
, xfrm_address_t
*saddr
,
1053 int create
, unsigned short family
)
1055 struct xfrm_state
*x
;
1057 spin_lock_bh(&xfrm_state_lock
);
1058 x
= __find_acq_core(family
, mode
, reqid
, proto
, daddr
, saddr
, create
);
1059 spin_unlock_bh(&xfrm_state_lock
);
1063 EXPORT_SYMBOL(xfrm_find_acq
);
1065 #ifdef CONFIG_XFRM_SUB_POLICY
1067 xfrm_tmpl_sort(struct xfrm_tmpl
**dst
, struct xfrm_tmpl
**src
, int n
,
1068 unsigned short family
)
1071 struct xfrm_state_afinfo
*afinfo
= xfrm_state_get_afinfo(family
);
1073 return -EAFNOSUPPORT
;
1075 spin_lock_bh(&xfrm_state_lock
);
1076 if (afinfo
->tmpl_sort
)
1077 err
= afinfo
->tmpl_sort(dst
, src
, n
);
1078 spin_unlock_bh(&xfrm_state_lock
);
1079 xfrm_state_put_afinfo(afinfo
);
1082 EXPORT_SYMBOL(xfrm_tmpl_sort
);
1085 xfrm_state_sort(struct xfrm_state
**dst
, struct xfrm_state
**src
, int n
,
1086 unsigned short family
)
1089 struct xfrm_state_afinfo
*afinfo
= xfrm_state_get_afinfo(family
);
1091 return -EAFNOSUPPORT
;
1093 spin_lock_bh(&xfrm_state_lock
);
1094 if (afinfo
->state_sort
)
1095 err
= afinfo
->state_sort(dst
, src
, n
);
1096 spin_unlock_bh(&xfrm_state_lock
);
1097 xfrm_state_put_afinfo(afinfo
);
1100 EXPORT_SYMBOL(xfrm_state_sort
);
1103 /* Silly enough, but I'm lazy to build resolution list */
1105 static struct xfrm_state
*__xfrm_find_acq_byseq(u32 seq
)
1109 for (i
= 0; i
<= xfrm_state_hmask
; i
++) {
1110 struct hlist_node
*entry
;
1111 struct xfrm_state
*x
;
1113 hlist_for_each_entry(x
, entry
, xfrm_state_bydst
+i
, bydst
) {
1114 if (x
->km
.seq
== seq
&&
1115 x
->km
.state
== XFRM_STATE_ACQ
) {
1124 struct xfrm_state
*xfrm_find_acq_byseq(u32 seq
)
1126 struct xfrm_state
*x
;
1128 spin_lock_bh(&xfrm_state_lock
);
1129 x
= __xfrm_find_acq_byseq(seq
);
1130 spin_unlock_bh(&xfrm_state_lock
);
1133 EXPORT_SYMBOL(xfrm_find_acq_byseq
);
1135 u32
xfrm_get_acqseq(void)
1139 static DEFINE_SPINLOCK(acqseq_lock
);
1141 spin_lock_bh(&acqseq_lock
);
1142 res
= (++acqseq
? : ++acqseq
);
1143 spin_unlock_bh(&acqseq_lock
);
1146 EXPORT_SYMBOL(xfrm_get_acqseq
);
1149 xfrm_alloc_spi(struct xfrm_state
*x
, u32 minspi
, u32 maxspi
)
1152 struct xfrm_state
*x0
;
1157 if (minspi
== maxspi
) {
1158 x0
= xfrm_state_lookup(&x
->id
.daddr
, minspi
, x
->id
.proto
, x
->props
.family
);
1166 minspi
= ntohl(minspi
);
1167 maxspi
= ntohl(maxspi
);
1168 for (h
=0; h
<maxspi
-minspi
+1; h
++) {
1169 spi
= minspi
+ net_random()%(maxspi
-minspi
+1);
1170 x0
= xfrm_state_lookup(&x
->id
.daddr
, htonl(spi
), x
->id
.proto
, x
->props
.family
);
1172 x
->id
.spi
= htonl(spi
);
1179 spin_lock_bh(&xfrm_state_lock
);
1180 h
= xfrm_spi_hash(&x
->id
.daddr
, x
->id
.spi
, x
->id
.proto
, x
->props
.family
);
1181 hlist_add_head(&x
->byspi
, xfrm_state_byspi
+h
);
1183 spin_unlock_bh(&xfrm_state_lock
);
1187 EXPORT_SYMBOL(xfrm_alloc_spi
);
1189 int xfrm_state_walk(u8 proto
, int (*func
)(struct xfrm_state
*, int, void*),
1193 struct xfrm_state
*x
;
1194 struct hlist_node
*entry
;
1198 spin_lock_bh(&xfrm_state_lock
);
1199 for (i
= 0; i
<= xfrm_state_hmask
; i
++) {
1200 hlist_for_each_entry(x
, entry
, xfrm_state_bydst
+i
, bydst
) {
1201 if (xfrm_id_proto_match(x
->id
.proto
, proto
))
1210 for (i
= 0; i
<= xfrm_state_hmask
; i
++) {
1211 hlist_for_each_entry(x
, entry
, xfrm_state_bydst
+i
, bydst
) {
1212 if (!xfrm_id_proto_match(x
->id
.proto
, proto
))
1214 err
= func(x
, --count
, data
);
1220 spin_unlock_bh(&xfrm_state_lock
);
1223 EXPORT_SYMBOL(xfrm_state_walk
);
1226 void xfrm_replay_notify(struct xfrm_state
*x
, int event
)
1229 /* we send notify messages in case
1230 * 1. we updated on of the sequence numbers, and the seqno difference
1231 * is at least x->replay_maxdiff, in this case we also update the
1232 * timeout of our timer function
1233 * 2. if x->replay_maxage has elapsed since last update,
1234 * and there were changes
1236 * The state structure must be locked!
1240 case XFRM_REPLAY_UPDATE
:
1241 if (x
->replay_maxdiff
&&
1242 (x
->replay
.seq
- x
->preplay
.seq
< x
->replay_maxdiff
) &&
1243 (x
->replay
.oseq
- x
->preplay
.oseq
< x
->replay_maxdiff
)) {
1244 if (x
->xflags
& XFRM_TIME_DEFER
)
1245 event
= XFRM_REPLAY_TIMEOUT
;
1252 case XFRM_REPLAY_TIMEOUT
:
1253 if ((x
->replay
.seq
== x
->preplay
.seq
) &&
1254 (x
->replay
.bitmap
== x
->preplay
.bitmap
) &&
1255 (x
->replay
.oseq
== x
->preplay
.oseq
)) {
1256 x
->xflags
|= XFRM_TIME_DEFER
;
1263 memcpy(&x
->preplay
, &x
->replay
, sizeof(struct xfrm_replay_state
));
1264 c
.event
= XFRM_MSG_NEWAE
;
1265 c
.data
.aevent
= event
;
1266 km_state_notify(x
, &c
);
1268 if (x
->replay_maxage
&&
1269 !mod_timer(&x
->rtimer
, jiffies
+ x
->replay_maxage
)) {
1271 x
->xflags
&= ~XFRM_TIME_DEFER
;
1274 EXPORT_SYMBOL(xfrm_replay_notify
);
1276 static void xfrm_replay_timer_handler(unsigned long data
)
1278 struct xfrm_state
*x
= (struct xfrm_state
*)data
;
1280 spin_lock(&x
->lock
);
1282 if (x
->km
.state
== XFRM_STATE_VALID
) {
1283 if (xfrm_aevent_is_on())
1284 xfrm_replay_notify(x
, XFRM_REPLAY_TIMEOUT
);
1286 x
->xflags
|= XFRM_TIME_DEFER
;
1289 spin_unlock(&x
->lock
);
1293 int xfrm_replay_check(struct xfrm_state
*x
, u32 seq
)
1299 if (unlikely(seq
== 0))
1302 if (likely(seq
> x
->replay
.seq
))
1305 diff
= x
->replay
.seq
- seq
;
1306 if (diff
>= x
->props
.replay_window
) {
1307 x
->stats
.replay_window
++;
1311 if (x
->replay
.bitmap
& (1U << diff
)) {
1317 EXPORT_SYMBOL(xfrm_replay_check
);
1319 void xfrm_replay_advance(struct xfrm_state
*x
, u32 seq
)
1325 if (seq
> x
->replay
.seq
) {
1326 diff
= seq
- x
->replay
.seq
;
1327 if (diff
< x
->props
.replay_window
)
1328 x
->replay
.bitmap
= ((x
->replay
.bitmap
) << diff
) | 1;
1330 x
->replay
.bitmap
= 1;
1331 x
->replay
.seq
= seq
;
1333 diff
= x
->replay
.seq
- seq
;
1334 x
->replay
.bitmap
|= (1U << diff
);
1337 if (xfrm_aevent_is_on())
1338 xfrm_replay_notify(x
, XFRM_REPLAY_UPDATE
);
1340 EXPORT_SYMBOL(xfrm_replay_advance
);
1342 static struct list_head xfrm_km_list
= LIST_HEAD_INIT(xfrm_km_list
);
1343 static DEFINE_RWLOCK(xfrm_km_lock
);
1345 void km_policy_notify(struct xfrm_policy
*xp
, int dir
, struct km_event
*c
)
1347 struct xfrm_mgr
*km
;
1349 read_lock(&xfrm_km_lock
);
1350 list_for_each_entry(km
, &xfrm_km_list
, list
)
1351 if (km
->notify_policy
)
1352 km
->notify_policy(xp
, dir
, c
);
1353 read_unlock(&xfrm_km_lock
);
1356 void km_state_notify(struct xfrm_state
*x
, struct km_event
*c
)
1358 struct xfrm_mgr
*km
;
1359 read_lock(&xfrm_km_lock
);
1360 list_for_each_entry(km
, &xfrm_km_list
, list
)
1363 read_unlock(&xfrm_km_lock
);
1366 EXPORT_SYMBOL(km_policy_notify
);
1367 EXPORT_SYMBOL(km_state_notify
);
1369 void km_state_expired(struct xfrm_state
*x
, int hard
, u32 pid
)
1375 c
.event
= XFRM_MSG_EXPIRE
;
1376 km_state_notify(x
, &c
);
1382 EXPORT_SYMBOL(km_state_expired
);
1384 * We send to all registered managers regardless of failure
1385 * We are happy with one success
1387 int km_query(struct xfrm_state
*x
, struct xfrm_tmpl
*t
, struct xfrm_policy
*pol
)
1389 int err
= -EINVAL
, acqret
;
1390 struct xfrm_mgr
*km
;
1392 read_lock(&xfrm_km_lock
);
1393 list_for_each_entry(km
, &xfrm_km_list
, list
) {
1394 acqret
= km
->acquire(x
, t
, pol
, XFRM_POLICY_OUT
);
1398 read_unlock(&xfrm_km_lock
);
1401 EXPORT_SYMBOL(km_query
);
1403 int km_new_mapping(struct xfrm_state
*x
, xfrm_address_t
*ipaddr
, u16 sport
)
1406 struct xfrm_mgr
*km
;
1408 read_lock(&xfrm_km_lock
);
1409 list_for_each_entry(km
, &xfrm_km_list
, list
) {
1410 if (km
->new_mapping
)
1411 err
= km
->new_mapping(x
, ipaddr
, sport
);
1415 read_unlock(&xfrm_km_lock
);
1418 EXPORT_SYMBOL(km_new_mapping
);
1420 void km_policy_expired(struct xfrm_policy
*pol
, int dir
, int hard
, u32 pid
)
1426 c
.event
= XFRM_MSG_POLEXPIRE
;
1427 km_policy_notify(pol
, dir
, &c
);
1432 EXPORT_SYMBOL(km_policy_expired
);
1434 int km_report(u8 proto
, struct xfrm_selector
*sel
, xfrm_address_t
*addr
)
1438 struct xfrm_mgr
*km
;
1440 read_lock(&xfrm_km_lock
);
1441 list_for_each_entry(km
, &xfrm_km_list
, list
) {
1443 ret
= km
->report(proto
, sel
, addr
);
1448 read_unlock(&xfrm_km_lock
);
1451 EXPORT_SYMBOL(km_report
);
1453 int xfrm_user_policy(struct sock
*sk
, int optname
, u8 __user
*optval
, int optlen
)
1457 struct xfrm_mgr
*km
;
1458 struct xfrm_policy
*pol
= NULL
;
1460 if (optlen
<= 0 || optlen
> PAGE_SIZE
)
1463 data
= kmalloc(optlen
, GFP_KERNEL
);
1468 if (copy_from_user(data
, optval
, optlen
))
1472 read_lock(&xfrm_km_lock
);
1473 list_for_each_entry(km
, &xfrm_km_list
, list
) {
1474 pol
= km
->compile_policy(sk
, optname
, data
,
1479 read_unlock(&xfrm_km_lock
);
1482 xfrm_sk_policy_insert(sk
, err
, pol
);
1491 EXPORT_SYMBOL(xfrm_user_policy
);
1493 int xfrm_register_km(struct xfrm_mgr
*km
)
1495 write_lock_bh(&xfrm_km_lock
);
1496 list_add_tail(&km
->list
, &xfrm_km_list
);
1497 write_unlock_bh(&xfrm_km_lock
);
1500 EXPORT_SYMBOL(xfrm_register_km
);
1502 int xfrm_unregister_km(struct xfrm_mgr
*km
)
1504 write_lock_bh(&xfrm_km_lock
);
1505 list_del(&km
->list
);
1506 write_unlock_bh(&xfrm_km_lock
);
1509 EXPORT_SYMBOL(xfrm_unregister_km
);
1511 int xfrm_state_register_afinfo(struct xfrm_state_afinfo
*afinfo
)
1514 if (unlikely(afinfo
== NULL
))
1516 if (unlikely(afinfo
->family
>= NPROTO
))
1517 return -EAFNOSUPPORT
;
1518 write_lock_bh(&xfrm_state_afinfo_lock
);
1519 if (unlikely(xfrm_state_afinfo
[afinfo
->family
] != NULL
))
1522 xfrm_state_afinfo
[afinfo
->family
] = afinfo
;
1523 write_unlock_bh(&xfrm_state_afinfo_lock
);
1526 EXPORT_SYMBOL(xfrm_state_register_afinfo
);
1528 int xfrm_state_unregister_afinfo(struct xfrm_state_afinfo
*afinfo
)
1531 if (unlikely(afinfo
== NULL
))
1533 if (unlikely(afinfo
->family
>= NPROTO
))
1534 return -EAFNOSUPPORT
;
1535 write_lock_bh(&xfrm_state_afinfo_lock
);
1536 if (likely(xfrm_state_afinfo
[afinfo
->family
] != NULL
)) {
1537 if (unlikely(xfrm_state_afinfo
[afinfo
->family
] != afinfo
))
1540 xfrm_state_afinfo
[afinfo
->family
] = NULL
;
1542 write_unlock_bh(&xfrm_state_afinfo_lock
);
1545 EXPORT_SYMBOL(xfrm_state_unregister_afinfo
);
1547 static struct xfrm_state_afinfo
*xfrm_state_get_afinfo(unsigned short family
)
1549 struct xfrm_state_afinfo
*afinfo
;
1550 if (unlikely(family
>= NPROTO
))
1552 read_lock(&xfrm_state_afinfo_lock
);
1553 afinfo
= xfrm_state_afinfo
[family
];
1554 if (unlikely(!afinfo
))
1555 read_unlock(&xfrm_state_afinfo_lock
);
1559 static void xfrm_state_put_afinfo(struct xfrm_state_afinfo
*afinfo
)
1561 read_unlock(&xfrm_state_afinfo_lock
);
1564 /* Temporarily located here until net/xfrm/xfrm_tunnel.c is created */
1565 void xfrm_state_delete_tunnel(struct xfrm_state
*x
)
1568 struct xfrm_state
*t
= x
->tunnel
;
1570 if (atomic_read(&t
->tunnel_users
) == 2)
1571 xfrm_state_delete(t
);
1572 atomic_dec(&t
->tunnel_users
);
1577 EXPORT_SYMBOL(xfrm_state_delete_tunnel
);
1580 * This function is NOT optimal. For example, with ESP it will give an
1581 * MTU that's usually two bytes short of being optimal. However, it will
1582 * usually give an answer that's a multiple of 4 provided the input is
1583 * also a multiple of 4.
1585 int xfrm_state_mtu(struct xfrm_state
*x
, int mtu
)
1589 res
-= x
->props
.header_len
;
1597 spin_lock_bh(&x
->lock
);
1598 if (x
->km
.state
== XFRM_STATE_VALID
&&
1599 x
->type
&& x
->type
->get_max_size
)
1600 m
= x
->type
->get_max_size(x
, m
);
1602 m
+= x
->props
.header_len
;
1603 spin_unlock_bh(&x
->lock
);
1613 int xfrm_init_state(struct xfrm_state
*x
)
1615 struct xfrm_state_afinfo
*afinfo
;
1616 int family
= x
->props
.family
;
1619 err
= -EAFNOSUPPORT
;
1620 afinfo
= xfrm_state_get_afinfo(family
);
1625 if (afinfo
->init_flags
)
1626 err
= afinfo
->init_flags(x
);
1628 xfrm_state_put_afinfo(afinfo
);
1633 err
= -EPROTONOSUPPORT
;
1634 x
->type
= xfrm_get_type(x
->id
.proto
, family
);
1635 if (x
->type
== NULL
)
1638 err
= x
->type
->init_state(x
);
1642 x
->mode
= xfrm_get_mode(x
->props
.mode
, family
);
1643 if (x
->mode
== NULL
)
1646 x
->km
.state
= XFRM_STATE_VALID
;
1652 EXPORT_SYMBOL(xfrm_init_state
);
1654 void __init
xfrm_state_init(void)
1658 sz
= sizeof(struct hlist_head
) * 8;
1660 xfrm_state_bydst
= xfrm_state_hash_alloc(sz
);
1661 xfrm_state_bysrc
= xfrm_state_hash_alloc(sz
);
1662 xfrm_state_byspi
= xfrm_state_hash_alloc(sz
);
1663 if (!xfrm_state_bydst
|| !xfrm_state_bysrc
|| !xfrm_state_byspi
)
1664 panic("XFRM: Cannot allocate bydst/bysrc/byspi hashes.");
1665 xfrm_state_hmask
= ((sz
/ sizeof(struct hlist_head
)) - 1);
1667 INIT_WORK(&xfrm_state_gc_work
, xfrm_state_gc_task
, NULL
);