[XFRM]: Assorted IPsec fixups
[deliverable/linux.git] / net / xfrm / xfrm_state.c
1 /*
2 * xfrm_state.c
3 *
4 * Changes:
5 * Mitsuru KANDA @USAGI
6 * Kazunori MIYAZAWA @USAGI
7 * Kunihiro Ishiguro <kunihiro@ipinfusion.com>
8 * IPv6 support
9 * YOSHIFUJI Hideaki @USAGI
10 * Split up af-specific functions
11 * Derek Atkins <derek@ihtfp.com>
12 * Add UDP Encapsulation
13 *
14 */
15
16 #include <linux/workqueue.h>
17 #include <net/xfrm.h>
18 #include <linux/pfkeyv2.h>
19 #include <linux/ipsec.h>
20 #include <linux/module.h>
21 #include <linux/cache.h>
22 #include <linux/audit.h>
23 #include <asm/uaccess.h>
24
25 #include "xfrm_hash.h"
26
27 struct sock *xfrm_nl;
28 EXPORT_SYMBOL(xfrm_nl);
29
30 u32 sysctl_xfrm_aevent_etime __read_mostly = XFRM_AE_ETIME;
31 EXPORT_SYMBOL(sysctl_xfrm_aevent_etime);
32
33 u32 sysctl_xfrm_aevent_rseqth __read_mostly = XFRM_AE_SEQT_SIZE;
34 EXPORT_SYMBOL(sysctl_xfrm_aevent_rseqth);
35
36 u32 sysctl_xfrm_acq_expires __read_mostly = 30;
37
38 /* Each xfrm_state may be linked to two tables:
39
40 1. Hash table by (spi,daddr,ah/esp) to find SA by SPI. (input,ctl)
41 2. Hash table by (daddr,family,reqid) to find what SAs exist for given
42 destination/tunnel endpoint. (output)
43 */
44
45 static DEFINE_SPINLOCK(xfrm_state_lock);
46
47 /* Hash table to find appropriate SA towards given target (endpoint
48 * of tunnel or destination of transport mode) allowed by selector.
49 *
50 * Main use is finding SA after policy selected tunnel or transport mode.
51 * Also, it can be used by ah/esp icmp error handler to find offending SA.
52 */
53 static struct hlist_head *xfrm_state_bydst __read_mostly;
54 static struct hlist_head *xfrm_state_bysrc __read_mostly;
55 static struct hlist_head *xfrm_state_byspi __read_mostly;
56 static unsigned int xfrm_state_hmask __read_mostly;
57 static unsigned int xfrm_state_hashmax __read_mostly = 1 * 1024 * 1024;
58 static unsigned int xfrm_state_num;
59 static unsigned int xfrm_state_genid;
60
61 static struct xfrm_state_afinfo *xfrm_state_get_afinfo(unsigned int family);
62 static void xfrm_state_put_afinfo(struct xfrm_state_afinfo *afinfo);
63
64 static inline unsigned int xfrm_dst_hash(xfrm_address_t *daddr,
65 xfrm_address_t *saddr,
66 u32 reqid,
67 unsigned short family)
68 {
69 return __xfrm_dst_hash(daddr, saddr, reqid, family, xfrm_state_hmask);
70 }
71
72 static inline unsigned int xfrm_src_hash(xfrm_address_t *daddr,
73 xfrm_address_t *saddr,
74 unsigned short family)
75 {
76 return __xfrm_src_hash(daddr, saddr, family, xfrm_state_hmask);
77 }
78
79 static inline unsigned int
80 xfrm_spi_hash(xfrm_address_t *daddr, __be32 spi, u8 proto, unsigned short family)
81 {
82 return __xfrm_spi_hash(daddr, spi, proto, family, xfrm_state_hmask);
83 }
84
85 static void xfrm_hash_transfer(struct hlist_head *list,
86 struct hlist_head *ndsttable,
87 struct hlist_head *nsrctable,
88 struct hlist_head *nspitable,
89 unsigned int nhashmask)
90 {
91 struct hlist_node *entry, *tmp;
92 struct xfrm_state *x;
93
94 hlist_for_each_entry_safe(x, entry, tmp, list, bydst) {
95 unsigned int h;
96
97 h = __xfrm_dst_hash(&x->id.daddr, &x->props.saddr,
98 x->props.reqid, x->props.family,
99 nhashmask);
100 hlist_add_head(&x->bydst, ndsttable+h);
101
102 h = __xfrm_src_hash(&x->id.daddr, &x->props.saddr,
103 x->props.family,
104 nhashmask);
105 hlist_add_head(&x->bysrc, nsrctable+h);
106
107 if (x->id.spi) {
108 h = __xfrm_spi_hash(&x->id.daddr, x->id.spi,
109 x->id.proto, x->props.family,
110 nhashmask);
111 hlist_add_head(&x->byspi, nspitable+h);
112 }
113 }
114 }
115
116 static unsigned long xfrm_hash_new_size(void)
117 {
118 return ((xfrm_state_hmask + 1) << 1) *
119 sizeof(struct hlist_head);
120 }
121
122 static DEFINE_MUTEX(hash_resize_mutex);
123
124 static void xfrm_hash_resize(struct work_struct *__unused)
125 {
126 struct hlist_head *ndst, *nsrc, *nspi, *odst, *osrc, *ospi;
127 unsigned long nsize, osize;
128 unsigned int nhashmask, ohashmask;
129 int i;
130
131 mutex_lock(&hash_resize_mutex);
132
133 nsize = xfrm_hash_new_size();
134 ndst = xfrm_hash_alloc(nsize);
135 if (!ndst)
136 goto out_unlock;
137 nsrc = xfrm_hash_alloc(nsize);
138 if (!nsrc) {
139 xfrm_hash_free(ndst, nsize);
140 goto out_unlock;
141 }
142 nspi = xfrm_hash_alloc(nsize);
143 if (!nspi) {
144 xfrm_hash_free(ndst, nsize);
145 xfrm_hash_free(nsrc, nsize);
146 goto out_unlock;
147 }
148
149 spin_lock_bh(&xfrm_state_lock);
150
151 nhashmask = (nsize / sizeof(struct hlist_head)) - 1U;
152 for (i = xfrm_state_hmask; i >= 0; i--)
153 xfrm_hash_transfer(xfrm_state_bydst+i, ndst, nsrc, nspi,
154 nhashmask);
155
156 odst = xfrm_state_bydst;
157 osrc = xfrm_state_bysrc;
158 ospi = xfrm_state_byspi;
159 ohashmask = xfrm_state_hmask;
160
161 xfrm_state_bydst = ndst;
162 xfrm_state_bysrc = nsrc;
163 xfrm_state_byspi = nspi;
164 xfrm_state_hmask = nhashmask;
165
166 spin_unlock_bh(&xfrm_state_lock);
167
168 osize = (ohashmask + 1) * sizeof(struct hlist_head);
169 xfrm_hash_free(odst, osize);
170 xfrm_hash_free(osrc, osize);
171 xfrm_hash_free(ospi, osize);
172
173 out_unlock:
174 mutex_unlock(&hash_resize_mutex);
175 }
176
177 static DECLARE_WORK(xfrm_hash_work, xfrm_hash_resize);
178
179 DECLARE_WAIT_QUEUE_HEAD(km_waitq);
180 EXPORT_SYMBOL(km_waitq);
181
182 static DEFINE_RWLOCK(xfrm_state_afinfo_lock);
183 static struct xfrm_state_afinfo *xfrm_state_afinfo[NPROTO];
184
185 static struct work_struct xfrm_state_gc_work;
186 static HLIST_HEAD(xfrm_state_gc_list);
187 static DEFINE_SPINLOCK(xfrm_state_gc_lock);
188
189 int __xfrm_state_delete(struct xfrm_state *x);
190
191 int km_query(struct xfrm_state *x, struct xfrm_tmpl *t, struct xfrm_policy *pol);
192 void km_state_expired(struct xfrm_state *x, int hard, u32 pid);
193
194 static struct xfrm_state_afinfo *xfrm_state_lock_afinfo(unsigned int family)
195 {
196 struct xfrm_state_afinfo *afinfo;
197 if (unlikely(family >= NPROTO))
198 return NULL;
199 write_lock_bh(&xfrm_state_afinfo_lock);
200 afinfo = xfrm_state_afinfo[family];
201 if (unlikely(!afinfo))
202 write_unlock_bh(&xfrm_state_afinfo_lock);
203 return afinfo;
204 }
205
206 static void xfrm_state_unlock_afinfo(struct xfrm_state_afinfo *afinfo)
207 {
208 write_unlock_bh(&xfrm_state_afinfo_lock);
209 }
210
211 int xfrm_register_type(struct xfrm_type *type, unsigned short family)
212 {
213 struct xfrm_state_afinfo *afinfo = xfrm_state_lock_afinfo(family);
214 struct xfrm_type **typemap;
215 int err = 0;
216
217 if (unlikely(afinfo == NULL))
218 return -EAFNOSUPPORT;
219 typemap = afinfo->type_map;
220
221 if (likely(typemap[type->proto] == NULL))
222 typemap[type->proto] = type;
223 else
224 err = -EEXIST;
225 xfrm_state_unlock_afinfo(afinfo);
226 return err;
227 }
228 EXPORT_SYMBOL(xfrm_register_type);
229
230 int xfrm_unregister_type(struct xfrm_type *type, unsigned short family)
231 {
232 struct xfrm_state_afinfo *afinfo = xfrm_state_lock_afinfo(family);
233 struct xfrm_type **typemap;
234 int err = 0;
235
236 if (unlikely(afinfo == NULL))
237 return -EAFNOSUPPORT;
238 typemap = afinfo->type_map;
239
240 if (unlikely(typemap[type->proto] != type))
241 err = -ENOENT;
242 else
243 typemap[type->proto] = NULL;
244 xfrm_state_unlock_afinfo(afinfo);
245 return err;
246 }
247 EXPORT_SYMBOL(xfrm_unregister_type);
248
249 static struct xfrm_type *xfrm_get_type(u8 proto, unsigned short family)
250 {
251 struct xfrm_state_afinfo *afinfo;
252 struct xfrm_type **typemap;
253 struct xfrm_type *type;
254 int modload_attempted = 0;
255
256 retry:
257 afinfo = xfrm_state_get_afinfo(family);
258 if (unlikely(afinfo == NULL))
259 return NULL;
260 typemap = afinfo->type_map;
261
262 type = typemap[proto];
263 if (unlikely(type && !try_module_get(type->owner)))
264 type = NULL;
265 if (!type && !modload_attempted) {
266 xfrm_state_put_afinfo(afinfo);
267 request_module("xfrm-type-%d-%d", family, proto);
268 modload_attempted = 1;
269 goto retry;
270 }
271
272 xfrm_state_put_afinfo(afinfo);
273 return type;
274 }
275
276 static void xfrm_put_type(struct xfrm_type *type)
277 {
278 module_put(type->owner);
279 }
280
281 int xfrm_register_mode(struct xfrm_mode *mode, int family)
282 {
283 struct xfrm_state_afinfo *afinfo;
284 struct xfrm_mode **modemap;
285 int err;
286
287 if (unlikely(mode->encap >= XFRM_MODE_MAX))
288 return -EINVAL;
289
290 afinfo = xfrm_state_lock_afinfo(family);
291 if (unlikely(afinfo == NULL))
292 return -EAFNOSUPPORT;
293
294 err = -EEXIST;
295 modemap = afinfo->mode_map;
296 if (modemap[mode->encap])
297 goto out;
298
299 err = -ENOENT;
300 if (!try_module_get(afinfo->owner))
301 goto out;
302
303 mode->afinfo = afinfo;
304 modemap[mode->encap] = mode;
305 err = 0;
306
307 out:
308 xfrm_state_unlock_afinfo(afinfo);
309 return err;
310 }
311 EXPORT_SYMBOL(xfrm_register_mode);
312
313 int xfrm_unregister_mode(struct xfrm_mode *mode, int family)
314 {
315 struct xfrm_state_afinfo *afinfo;
316 struct xfrm_mode **modemap;
317 int err;
318
319 if (unlikely(mode->encap >= XFRM_MODE_MAX))
320 return -EINVAL;
321
322 afinfo = xfrm_state_lock_afinfo(family);
323 if (unlikely(afinfo == NULL))
324 return -EAFNOSUPPORT;
325
326 err = -ENOENT;
327 modemap = afinfo->mode_map;
328 if (likely(modemap[mode->encap] == mode)) {
329 modemap[mode->encap] = NULL;
330 module_put(mode->afinfo->owner);
331 err = 0;
332 }
333
334 xfrm_state_unlock_afinfo(afinfo);
335 return err;
336 }
337 EXPORT_SYMBOL(xfrm_unregister_mode);
338
339 static struct xfrm_mode *xfrm_get_mode(unsigned int encap, int family)
340 {
341 struct xfrm_state_afinfo *afinfo;
342 struct xfrm_mode *mode;
343 int modload_attempted = 0;
344
345 if (unlikely(encap >= XFRM_MODE_MAX))
346 return NULL;
347
348 retry:
349 afinfo = xfrm_state_get_afinfo(family);
350 if (unlikely(afinfo == NULL))
351 return NULL;
352
353 mode = afinfo->mode_map[encap];
354 if (unlikely(mode && !try_module_get(mode->owner)))
355 mode = NULL;
356 if (!mode && !modload_attempted) {
357 xfrm_state_put_afinfo(afinfo);
358 request_module("xfrm-mode-%d-%d", family, encap);
359 modload_attempted = 1;
360 goto retry;
361 }
362
363 xfrm_state_put_afinfo(afinfo);
364 return mode;
365 }
366
367 static void xfrm_put_mode(struct xfrm_mode *mode)
368 {
369 module_put(mode->owner);
370 }
371
372 static void xfrm_state_gc_destroy(struct xfrm_state *x)
373 {
374 del_timer_sync(&x->timer);
375 del_timer_sync(&x->rtimer);
376 kfree(x->aalg);
377 kfree(x->ealg);
378 kfree(x->calg);
379 kfree(x->encap);
380 kfree(x->coaddr);
381 if (x->inner_mode)
382 xfrm_put_mode(x->inner_mode);
383 if (x->outer_mode)
384 xfrm_put_mode(x->outer_mode);
385 if (x->type) {
386 x->type->destructor(x);
387 xfrm_put_type(x->type);
388 }
389 security_xfrm_state_free(x);
390 kfree(x);
391 }
392
393 static void xfrm_state_gc_task(struct work_struct *data)
394 {
395 struct xfrm_state *x;
396 struct hlist_node *entry, *tmp;
397 struct hlist_head gc_list;
398
399 spin_lock_bh(&xfrm_state_gc_lock);
400 gc_list.first = xfrm_state_gc_list.first;
401 INIT_HLIST_HEAD(&xfrm_state_gc_list);
402 spin_unlock_bh(&xfrm_state_gc_lock);
403
404 hlist_for_each_entry_safe(x, entry, tmp, &gc_list, bydst)
405 xfrm_state_gc_destroy(x);
406
407 wake_up(&km_waitq);
408 }
409
410 static inline unsigned long make_jiffies(long secs)
411 {
412 if (secs >= (MAX_SCHEDULE_TIMEOUT-1)/HZ)
413 return MAX_SCHEDULE_TIMEOUT-1;
414 else
415 return secs*HZ;
416 }
417
418 static void xfrm_timer_handler(unsigned long data)
419 {
420 struct xfrm_state *x = (struct xfrm_state*)data;
421 unsigned long now = get_seconds();
422 long next = LONG_MAX;
423 int warn = 0;
424 int err = 0;
425
426 spin_lock(&x->lock);
427 if (x->km.state == XFRM_STATE_DEAD)
428 goto out;
429 if (x->km.state == XFRM_STATE_EXPIRED)
430 goto expired;
431 if (x->lft.hard_add_expires_seconds) {
432 long tmo = x->lft.hard_add_expires_seconds +
433 x->curlft.add_time - now;
434 if (tmo <= 0)
435 goto expired;
436 if (tmo < next)
437 next = tmo;
438 }
439 if (x->lft.hard_use_expires_seconds) {
440 long tmo = x->lft.hard_use_expires_seconds +
441 (x->curlft.use_time ? : now) - now;
442 if (tmo <= 0)
443 goto expired;
444 if (tmo < next)
445 next = tmo;
446 }
447 if (x->km.dying)
448 goto resched;
449 if (x->lft.soft_add_expires_seconds) {
450 long tmo = x->lft.soft_add_expires_seconds +
451 x->curlft.add_time - now;
452 if (tmo <= 0)
453 warn = 1;
454 else if (tmo < next)
455 next = tmo;
456 }
457 if (x->lft.soft_use_expires_seconds) {
458 long tmo = x->lft.soft_use_expires_seconds +
459 (x->curlft.use_time ? : now) - now;
460 if (tmo <= 0)
461 warn = 1;
462 else if (tmo < next)
463 next = tmo;
464 }
465
466 x->km.dying = warn;
467 if (warn)
468 km_state_expired(x, 0, 0);
469 resched:
470 if (next != LONG_MAX)
471 mod_timer(&x->timer, jiffies + make_jiffies(next));
472
473 goto out;
474
475 expired:
476 if (x->km.state == XFRM_STATE_ACQ && x->id.spi == 0) {
477 x->km.state = XFRM_STATE_EXPIRED;
478 wake_up(&km_waitq);
479 next = 2;
480 goto resched;
481 }
482
483 err = __xfrm_state_delete(x);
484 if (!err && x->id.spi)
485 km_state_expired(x, 1, 0);
486
487 xfrm_audit_state_delete(x, err ? 0 : 1,
488 audit_get_loginuid(current->audit_context), 0);
489
490 out:
491 spin_unlock(&x->lock);
492 }
493
494 static void xfrm_replay_timer_handler(unsigned long data);
495
496 struct xfrm_state *xfrm_state_alloc(void)
497 {
498 struct xfrm_state *x;
499
500 x = kzalloc(sizeof(struct xfrm_state), GFP_ATOMIC);
501
502 if (x) {
503 atomic_set(&x->refcnt, 1);
504 atomic_set(&x->tunnel_users, 0);
505 INIT_HLIST_NODE(&x->bydst);
506 INIT_HLIST_NODE(&x->bysrc);
507 INIT_HLIST_NODE(&x->byspi);
508 setup_timer(&x->timer, xfrm_timer_handler, (unsigned long)x);
509 setup_timer(&x->rtimer, xfrm_replay_timer_handler,
510 (unsigned long)x);
511 x->curlft.add_time = get_seconds();
512 x->lft.soft_byte_limit = XFRM_INF;
513 x->lft.soft_packet_limit = XFRM_INF;
514 x->lft.hard_byte_limit = XFRM_INF;
515 x->lft.hard_packet_limit = XFRM_INF;
516 x->replay_maxage = 0;
517 x->replay_maxdiff = 0;
518 spin_lock_init(&x->lock);
519 }
520 return x;
521 }
522 EXPORT_SYMBOL(xfrm_state_alloc);
523
524 void __xfrm_state_destroy(struct xfrm_state *x)
525 {
526 BUG_TRAP(x->km.state == XFRM_STATE_DEAD);
527
528 spin_lock_bh(&xfrm_state_gc_lock);
529 hlist_add_head(&x->bydst, &xfrm_state_gc_list);
530 spin_unlock_bh(&xfrm_state_gc_lock);
531 schedule_work(&xfrm_state_gc_work);
532 }
533 EXPORT_SYMBOL(__xfrm_state_destroy);
534
535 int __xfrm_state_delete(struct xfrm_state *x)
536 {
537 int err = -ESRCH;
538
539 if (x->km.state != XFRM_STATE_DEAD) {
540 x->km.state = XFRM_STATE_DEAD;
541 spin_lock(&xfrm_state_lock);
542 hlist_del(&x->bydst);
543 hlist_del(&x->bysrc);
544 if (x->id.spi)
545 hlist_del(&x->byspi);
546 xfrm_state_num--;
547 spin_unlock(&xfrm_state_lock);
548
549 /* All xfrm_state objects are created by xfrm_state_alloc.
550 * The xfrm_state_alloc call gives a reference, and that
551 * is what we are dropping here.
552 */
553 xfrm_state_put(x);
554 err = 0;
555 }
556
557 return err;
558 }
559 EXPORT_SYMBOL(__xfrm_state_delete);
560
561 int xfrm_state_delete(struct xfrm_state *x)
562 {
563 int err;
564
565 spin_lock_bh(&x->lock);
566 err = __xfrm_state_delete(x);
567 spin_unlock_bh(&x->lock);
568
569 return err;
570 }
571 EXPORT_SYMBOL(xfrm_state_delete);
572
573 #ifdef CONFIG_SECURITY_NETWORK_XFRM
574 static inline int
575 xfrm_state_flush_secctx_check(u8 proto, struct xfrm_audit *audit_info)
576 {
577 int i, err = 0;
578
579 for (i = 0; i <= xfrm_state_hmask; i++) {
580 struct hlist_node *entry;
581 struct xfrm_state *x;
582
583 hlist_for_each_entry(x, entry, xfrm_state_bydst+i, bydst) {
584 if (xfrm_id_proto_match(x->id.proto, proto) &&
585 (err = security_xfrm_state_delete(x)) != 0) {
586 xfrm_audit_state_delete(x, 0,
587 audit_info->loginuid,
588 audit_info->secid);
589 return err;
590 }
591 }
592 }
593
594 return err;
595 }
596 #else
597 static inline int
598 xfrm_state_flush_secctx_check(u8 proto, struct xfrm_audit *audit_info)
599 {
600 return 0;
601 }
602 #endif
603
604 int xfrm_state_flush(u8 proto, struct xfrm_audit *audit_info)
605 {
606 int i, err = 0;
607
608 spin_lock_bh(&xfrm_state_lock);
609 err = xfrm_state_flush_secctx_check(proto, audit_info);
610 if (err)
611 goto out;
612
613 for (i = 0; i <= xfrm_state_hmask; i++) {
614 struct hlist_node *entry;
615 struct xfrm_state *x;
616 restart:
617 hlist_for_each_entry(x, entry, xfrm_state_bydst+i, bydst) {
618 if (!xfrm_state_kern(x) &&
619 xfrm_id_proto_match(x->id.proto, proto)) {
620 xfrm_state_hold(x);
621 spin_unlock_bh(&xfrm_state_lock);
622
623 err = xfrm_state_delete(x);
624 xfrm_audit_state_delete(x, err ? 0 : 1,
625 audit_info->loginuid,
626 audit_info->secid);
627 xfrm_state_put(x);
628
629 spin_lock_bh(&xfrm_state_lock);
630 goto restart;
631 }
632 }
633 }
634 err = 0;
635
636 out:
637 spin_unlock_bh(&xfrm_state_lock);
638 wake_up(&km_waitq);
639 return err;
640 }
641 EXPORT_SYMBOL(xfrm_state_flush);
642
643 void xfrm_sad_getinfo(struct xfrmk_sadinfo *si)
644 {
645 spin_lock_bh(&xfrm_state_lock);
646 si->sadcnt = xfrm_state_num;
647 si->sadhcnt = xfrm_state_hmask;
648 si->sadhmcnt = xfrm_state_hashmax;
649 spin_unlock_bh(&xfrm_state_lock);
650 }
651 EXPORT_SYMBOL(xfrm_sad_getinfo);
652
653 static int
654 xfrm_init_tempsel(struct xfrm_state *x, struct flowi *fl,
655 struct xfrm_tmpl *tmpl,
656 xfrm_address_t *daddr, xfrm_address_t *saddr,
657 unsigned short family)
658 {
659 struct xfrm_state_afinfo *afinfo = xfrm_state_get_afinfo(family);
660 if (!afinfo)
661 return -1;
662 afinfo->init_tempsel(x, fl, tmpl, daddr, saddr);
663 xfrm_state_put_afinfo(afinfo);
664 return 0;
665 }
666
667 static struct xfrm_state *__xfrm_state_lookup(xfrm_address_t *daddr, __be32 spi, u8 proto, unsigned short family)
668 {
669 unsigned int h = xfrm_spi_hash(daddr, spi, proto, family);
670 struct xfrm_state *x;
671 struct hlist_node *entry;
672
673 hlist_for_each_entry(x, entry, xfrm_state_byspi+h, byspi) {
674 if (x->props.family != family ||
675 x->id.spi != spi ||
676 x->id.proto != proto)
677 continue;
678
679 switch (family) {
680 case AF_INET:
681 if (x->id.daddr.a4 != daddr->a4)
682 continue;
683 break;
684 case AF_INET6:
685 if (!ipv6_addr_equal((struct in6_addr *)daddr,
686 (struct in6_addr *)
687 x->id.daddr.a6))
688 continue;
689 break;
690 }
691
692 xfrm_state_hold(x);
693 return x;
694 }
695
696 return NULL;
697 }
698
699 static struct xfrm_state *__xfrm_state_lookup_byaddr(xfrm_address_t *daddr, xfrm_address_t *saddr, u8 proto, unsigned short family)
700 {
701 unsigned int h = xfrm_src_hash(daddr, saddr, family);
702 struct xfrm_state *x;
703 struct hlist_node *entry;
704
705 hlist_for_each_entry(x, entry, xfrm_state_bysrc+h, bysrc) {
706 if (x->props.family != family ||
707 x->id.proto != proto)
708 continue;
709
710 switch (family) {
711 case AF_INET:
712 if (x->id.daddr.a4 != daddr->a4 ||
713 x->props.saddr.a4 != saddr->a4)
714 continue;
715 break;
716 case AF_INET6:
717 if (!ipv6_addr_equal((struct in6_addr *)daddr,
718 (struct in6_addr *)
719 x->id.daddr.a6) ||
720 !ipv6_addr_equal((struct in6_addr *)saddr,
721 (struct in6_addr *)
722 x->props.saddr.a6))
723 continue;
724 break;
725 }
726
727 xfrm_state_hold(x);
728 return x;
729 }
730
731 return NULL;
732 }
733
734 static inline struct xfrm_state *
735 __xfrm_state_locate(struct xfrm_state *x, int use_spi, int family)
736 {
737 if (use_spi)
738 return __xfrm_state_lookup(&x->id.daddr, x->id.spi,
739 x->id.proto, family);
740 else
741 return __xfrm_state_lookup_byaddr(&x->id.daddr,
742 &x->props.saddr,
743 x->id.proto, family);
744 }
745
746 static void xfrm_hash_grow_check(int have_hash_collision)
747 {
748 if (have_hash_collision &&
749 (xfrm_state_hmask + 1) < xfrm_state_hashmax &&
750 xfrm_state_num > xfrm_state_hmask)
751 schedule_work(&xfrm_hash_work);
752 }
753
754 struct xfrm_state *
755 xfrm_state_find(xfrm_address_t *daddr, xfrm_address_t *saddr,
756 struct flowi *fl, struct xfrm_tmpl *tmpl,
757 struct xfrm_policy *pol, int *err,
758 unsigned short family)
759 {
760 unsigned int h;
761 struct hlist_node *entry;
762 struct xfrm_state *x, *x0;
763 int acquire_in_progress = 0;
764 int error = 0;
765 struct xfrm_state *best = NULL;
766
767 spin_lock_bh(&xfrm_state_lock);
768 h = xfrm_dst_hash(daddr, saddr, tmpl->reqid, family);
769 hlist_for_each_entry(x, entry, xfrm_state_bydst+h, bydst) {
770 if (x->props.family == family &&
771 x->props.reqid == tmpl->reqid &&
772 !(x->props.flags & XFRM_STATE_WILDRECV) &&
773 xfrm_state_addr_check(x, daddr, saddr, family) &&
774 tmpl->mode == x->props.mode &&
775 tmpl->id.proto == x->id.proto &&
776 (tmpl->id.spi == x->id.spi || !tmpl->id.spi)) {
777 /* Resolution logic:
778 1. There is a valid state with matching selector.
779 Done.
780 2. Valid state with inappropriate selector. Skip.
781
782 Entering area of "sysdeps".
783
784 3. If state is not valid, selector is temporary,
785 it selects only session which triggered
786 previous resolution. Key manager will do
787 something to install a state with proper
788 selector.
789 */
790 if (x->km.state == XFRM_STATE_VALID) {
791 if (!xfrm_selector_match(&x->sel, fl, x->sel.family) ||
792 !security_xfrm_state_pol_flow_match(x, pol, fl))
793 continue;
794 if (!best ||
795 best->km.dying > x->km.dying ||
796 (best->km.dying == x->km.dying &&
797 best->curlft.add_time < x->curlft.add_time))
798 best = x;
799 } else if (x->km.state == XFRM_STATE_ACQ) {
800 acquire_in_progress = 1;
801 } else if (x->km.state == XFRM_STATE_ERROR ||
802 x->km.state == XFRM_STATE_EXPIRED) {
803 if (xfrm_selector_match(&x->sel, fl, x->sel.family) &&
804 security_xfrm_state_pol_flow_match(x, pol, fl))
805 error = -ESRCH;
806 }
807 }
808 }
809
810 x = best;
811 if (!x && !error && !acquire_in_progress) {
812 if (tmpl->id.spi &&
813 (x0 = __xfrm_state_lookup(daddr, tmpl->id.spi,
814 tmpl->id.proto, family)) != NULL) {
815 xfrm_state_put(x0);
816 error = -EEXIST;
817 goto out;
818 }
819 x = xfrm_state_alloc();
820 if (x == NULL) {
821 error = -ENOMEM;
822 goto out;
823 }
824 /* Initialize temporary selector matching only
825 * to current session. */
826 xfrm_init_tempsel(x, fl, tmpl, daddr, saddr, family);
827
828 error = security_xfrm_state_alloc_acquire(x, pol->security, fl->secid);
829 if (error) {
830 x->km.state = XFRM_STATE_DEAD;
831 xfrm_state_put(x);
832 x = NULL;
833 goto out;
834 }
835
836 if (km_query(x, tmpl, pol) == 0) {
837 x->km.state = XFRM_STATE_ACQ;
838 hlist_add_head(&x->bydst, xfrm_state_bydst+h);
839 h = xfrm_src_hash(daddr, saddr, family);
840 hlist_add_head(&x->bysrc, xfrm_state_bysrc+h);
841 if (x->id.spi) {
842 h = xfrm_spi_hash(&x->id.daddr, x->id.spi, x->id.proto, family);
843 hlist_add_head(&x->byspi, xfrm_state_byspi+h);
844 }
845 x->lft.hard_add_expires_seconds = sysctl_xfrm_acq_expires;
846 x->timer.expires = jiffies + sysctl_xfrm_acq_expires*HZ;
847 add_timer(&x->timer);
848 xfrm_state_num++;
849 xfrm_hash_grow_check(x->bydst.next != NULL);
850 } else {
851 x->km.state = XFRM_STATE_DEAD;
852 xfrm_state_put(x);
853 x = NULL;
854 error = -ESRCH;
855 }
856 }
857 out:
858 if (x)
859 xfrm_state_hold(x);
860 else
861 *err = acquire_in_progress ? -EAGAIN : error;
862 spin_unlock_bh(&xfrm_state_lock);
863 return x;
864 }
865
866 struct xfrm_state *
867 xfrm_stateonly_find(xfrm_address_t *daddr, xfrm_address_t *saddr,
868 unsigned short family, u8 mode, u8 proto, u32 reqid)
869 {
870 unsigned int h;
871 struct xfrm_state *rx = NULL, *x = NULL;
872 struct hlist_node *entry;
873
874 spin_lock(&xfrm_state_lock);
875 h = xfrm_dst_hash(daddr, saddr, reqid, family);
876 hlist_for_each_entry(x, entry, xfrm_state_bydst+h, bydst) {
877 if (x->props.family == family &&
878 x->props.reqid == reqid &&
879 !(x->props.flags & XFRM_STATE_WILDRECV) &&
880 xfrm_state_addr_check(x, daddr, saddr, family) &&
881 mode == x->props.mode &&
882 proto == x->id.proto &&
883 x->km.state == XFRM_STATE_VALID) {
884 rx = x;
885 break;
886 }
887 }
888
889 if (rx)
890 xfrm_state_hold(rx);
891 spin_unlock(&xfrm_state_lock);
892
893
894 return rx;
895 }
896 EXPORT_SYMBOL(xfrm_stateonly_find);
897
898 static void __xfrm_state_insert(struct xfrm_state *x)
899 {
900 unsigned int h;
901
902 x->genid = ++xfrm_state_genid;
903
904 h = xfrm_dst_hash(&x->id.daddr, &x->props.saddr,
905 x->props.reqid, x->props.family);
906 hlist_add_head(&x->bydst, xfrm_state_bydst+h);
907
908 h = xfrm_src_hash(&x->id.daddr, &x->props.saddr, x->props.family);
909 hlist_add_head(&x->bysrc, xfrm_state_bysrc+h);
910
911 if (x->id.spi) {
912 h = xfrm_spi_hash(&x->id.daddr, x->id.spi, x->id.proto,
913 x->props.family);
914
915 hlist_add_head(&x->byspi, xfrm_state_byspi+h);
916 }
917
918 mod_timer(&x->timer, jiffies + HZ);
919 if (x->replay_maxage)
920 mod_timer(&x->rtimer, jiffies + x->replay_maxage);
921
922 wake_up(&km_waitq);
923
924 xfrm_state_num++;
925
926 xfrm_hash_grow_check(x->bydst.next != NULL);
927 }
928
929 /* xfrm_state_lock is held */
930 static void __xfrm_state_bump_genids(struct xfrm_state *xnew)
931 {
932 unsigned short family = xnew->props.family;
933 u32 reqid = xnew->props.reqid;
934 struct xfrm_state *x;
935 struct hlist_node *entry;
936 unsigned int h;
937
938 h = xfrm_dst_hash(&xnew->id.daddr, &xnew->props.saddr, reqid, family);
939 hlist_for_each_entry(x, entry, xfrm_state_bydst+h, bydst) {
940 if (x->props.family == family &&
941 x->props.reqid == reqid &&
942 !xfrm_addr_cmp(&x->id.daddr, &xnew->id.daddr, family) &&
943 !xfrm_addr_cmp(&x->props.saddr, &xnew->props.saddr, family))
944 x->genid = xfrm_state_genid;
945 }
946 }
947
948 void xfrm_state_insert(struct xfrm_state *x)
949 {
950 spin_lock_bh(&xfrm_state_lock);
951 __xfrm_state_bump_genids(x);
952 __xfrm_state_insert(x);
953 spin_unlock_bh(&xfrm_state_lock);
954 }
955 EXPORT_SYMBOL(xfrm_state_insert);
956
957 /* xfrm_state_lock is held */
958 static struct xfrm_state *__find_acq_core(unsigned short family, u8 mode, u32 reqid, u8 proto, xfrm_address_t *daddr, xfrm_address_t *saddr, int create)
959 {
960 unsigned int h = xfrm_dst_hash(daddr, saddr, reqid, family);
961 struct hlist_node *entry;
962 struct xfrm_state *x;
963
964 hlist_for_each_entry(x, entry, xfrm_state_bydst+h, bydst) {
965 if (x->props.reqid != reqid ||
966 x->props.mode != mode ||
967 x->props.family != family ||
968 x->km.state != XFRM_STATE_ACQ ||
969 x->id.spi != 0 ||
970 x->id.proto != proto)
971 continue;
972
973 switch (family) {
974 case AF_INET:
975 if (x->id.daddr.a4 != daddr->a4 ||
976 x->props.saddr.a4 != saddr->a4)
977 continue;
978 break;
979 case AF_INET6:
980 if (!ipv6_addr_equal((struct in6_addr *)x->id.daddr.a6,
981 (struct in6_addr *)daddr) ||
982 !ipv6_addr_equal((struct in6_addr *)
983 x->props.saddr.a6,
984 (struct in6_addr *)saddr))
985 continue;
986 break;
987 }
988
989 xfrm_state_hold(x);
990 return x;
991 }
992
993 if (!create)
994 return NULL;
995
996 x = xfrm_state_alloc();
997 if (likely(x)) {
998 switch (family) {
999 case AF_INET:
1000 x->sel.daddr.a4 = daddr->a4;
1001 x->sel.saddr.a4 = saddr->a4;
1002 x->sel.prefixlen_d = 32;
1003 x->sel.prefixlen_s = 32;
1004 x->props.saddr.a4 = saddr->a4;
1005 x->id.daddr.a4 = daddr->a4;
1006 break;
1007
1008 case AF_INET6:
1009 ipv6_addr_copy((struct in6_addr *)x->sel.daddr.a6,
1010 (struct in6_addr *)daddr);
1011 ipv6_addr_copy((struct in6_addr *)x->sel.saddr.a6,
1012 (struct in6_addr *)saddr);
1013 x->sel.prefixlen_d = 128;
1014 x->sel.prefixlen_s = 128;
1015 ipv6_addr_copy((struct in6_addr *)x->props.saddr.a6,
1016 (struct in6_addr *)saddr);
1017 ipv6_addr_copy((struct in6_addr *)x->id.daddr.a6,
1018 (struct in6_addr *)daddr);
1019 break;
1020 }
1021
1022 x->km.state = XFRM_STATE_ACQ;
1023 x->id.proto = proto;
1024 x->props.family = family;
1025 x->props.mode = mode;
1026 x->props.reqid = reqid;
1027 x->lft.hard_add_expires_seconds = sysctl_xfrm_acq_expires;
1028 xfrm_state_hold(x);
1029 x->timer.expires = jiffies + sysctl_xfrm_acq_expires*HZ;
1030 add_timer(&x->timer);
1031 hlist_add_head(&x->bydst, xfrm_state_bydst+h);
1032 h = xfrm_src_hash(daddr, saddr, family);
1033 hlist_add_head(&x->bysrc, xfrm_state_bysrc+h);
1034
1035 xfrm_state_num++;
1036
1037 xfrm_hash_grow_check(x->bydst.next != NULL);
1038 }
1039
1040 return x;
1041 }
1042
1043 static struct xfrm_state *__xfrm_find_acq_byseq(u32 seq);
1044
1045 int xfrm_state_add(struct xfrm_state *x)
1046 {
1047 struct xfrm_state *x1;
1048 int family;
1049 int err;
1050 int use_spi = xfrm_id_proto_match(x->id.proto, IPSEC_PROTO_ANY);
1051
1052 family = x->props.family;
1053
1054 spin_lock_bh(&xfrm_state_lock);
1055
1056 x1 = __xfrm_state_locate(x, use_spi, family);
1057 if (x1) {
1058 xfrm_state_put(x1);
1059 x1 = NULL;
1060 err = -EEXIST;
1061 goto out;
1062 }
1063
1064 if (use_spi && x->km.seq) {
1065 x1 = __xfrm_find_acq_byseq(x->km.seq);
1066 if (x1 && ((x1->id.proto != x->id.proto) ||
1067 xfrm_addr_cmp(&x1->id.daddr, &x->id.daddr, family))) {
1068 xfrm_state_put(x1);
1069 x1 = NULL;
1070 }
1071 }
1072
1073 if (use_spi && !x1)
1074 x1 = __find_acq_core(family, x->props.mode, x->props.reqid,
1075 x->id.proto,
1076 &x->id.daddr, &x->props.saddr, 0);
1077
1078 __xfrm_state_bump_genids(x);
1079 __xfrm_state_insert(x);
1080 err = 0;
1081
1082 out:
1083 spin_unlock_bh(&xfrm_state_lock);
1084
1085 if (x1) {
1086 xfrm_state_delete(x1);
1087 xfrm_state_put(x1);
1088 }
1089
1090 return err;
1091 }
1092 EXPORT_SYMBOL(xfrm_state_add);
1093
1094 #ifdef CONFIG_XFRM_MIGRATE
1095 struct xfrm_state *xfrm_state_clone(struct xfrm_state *orig, int *errp)
1096 {
1097 int err = -ENOMEM;
1098 struct xfrm_state *x = xfrm_state_alloc();
1099 if (!x)
1100 goto error;
1101
1102 memcpy(&x->id, &orig->id, sizeof(x->id));
1103 memcpy(&x->sel, &orig->sel, sizeof(x->sel));
1104 memcpy(&x->lft, &orig->lft, sizeof(x->lft));
1105 x->props.mode = orig->props.mode;
1106 x->props.replay_window = orig->props.replay_window;
1107 x->props.reqid = orig->props.reqid;
1108 x->props.family = orig->props.family;
1109 x->props.saddr = orig->props.saddr;
1110
1111 if (orig->aalg) {
1112 x->aalg = xfrm_algo_clone(orig->aalg);
1113 if (!x->aalg)
1114 goto error;
1115 }
1116 x->props.aalgo = orig->props.aalgo;
1117
1118 if (orig->ealg) {
1119 x->ealg = xfrm_algo_clone(orig->ealg);
1120 if (!x->ealg)
1121 goto error;
1122 }
1123 x->props.ealgo = orig->props.ealgo;
1124
1125 if (orig->calg) {
1126 x->calg = xfrm_algo_clone(orig->calg);
1127 if (!x->calg)
1128 goto error;
1129 }
1130 x->props.calgo = orig->props.calgo;
1131
1132 if (orig->encap) {
1133 x->encap = kmemdup(orig->encap, sizeof(*x->encap), GFP_KERNEL);
1134 if (!x->encap)
1135 goto error;
1136 }
1137
1138 if (orig->coaddr) {
1139 x->coaddr = kmemdup(orig->coaddr, sizeof(*x->coaddr),
1140 GFP_KERNEL);
1141 if (!x->coaddr)
1142 goto error;
1143 }
1144
1145 err = xfrm_init_state(x);
1146 if (err)
1147 goto error;
1148
1149 x->props.flags = orig->props.flags;
1150
1151 x->curlft.add_time = orig->curlft.add_time;
1152 x->km.state = orig->km.state;
1153 x->km.seq = orig->km.seq;
1154
1155 return x;
1156
1157 error:
1158 if (errp)
1159 *errp = err;
1160 if (x) {
1161 kfree(x->aalg);
1162 kfree(x->ealg);
1163 kfree(x->calg);
1164 kfree(x->encap);
1165 kfree(x->coaddr);
1166 }
1167 kfree(x);
1168 return NULL;
1169 }
1170 EXPORT_SYMBOL(xfrm_state_clone);
1171
1172 /* xfrm_state_lock is held */
1173 struct xfrm_state * xfrm_migrate_state_find(struct xfrm_migrate *m)
1174 {
1175 unsigned int h;
1176 struct xfrm_state *x;
1177 struct hlist_node *entry;
1178
1179 if (m->reqid) {
1180 h = xfrm_dst_hash(&m->old_daddr, &m->old_saddr,
1181 m->reqid, m->old_family);
1182 hlist_for_each_entry(x, entry, xfrm_state_bydst+h, bydst) {
1183 if (x->props.mode != m->mode ||
1184 x->id.proto != m->proto)
1185 continue;
1186 if (m->reqid && x->props.reqid != m->reqid)
1187 continue;
1188 if (xfrm_addr_cmp(&x->id.daddr, &m->old_daddr,
1189 m->old_family) ||
1190 xfrm_addr_cmp(&x->props.saddr, &m->old_saddr,
1191 m->old_family))
1192 continue;
1193 xfrm_state_hold(x);
1194 return x;
1195 }
1196 } else {
1197 h = xfrm_src_hash(&m->old_daddr, &m->old_saddr,
1198 m->old_family);
1199 hlist_for_each_entry(x, entry, xfrm_state_bysrc+h, bysrc) {
1200 if (x->props.mode != m->mode ||
1201 x->id.proto != m->proto)
1202 continue;
1203 if (xfrm_addr_cmp(&x->id.daddr, &m->old_daddr,
1204 m->old_family) ||
1205 xfrm_addr_cmp(&x->props.saddr, &m->old_saddr,
1206 m->old_family))
1207 continue;
1208 xfrm_state_hold(x);
1209 return x;
1210 }
1211 }
1212
1213 return NULL;
1214 }
1215 EXPORT_SYMBOL(xfrm_migrate_state_find);
1216
1217 struct xfrm_state * xfrm_state_migrate(struct xfrm_state *x,
1218 struct xfrm_migrate *m)
1219 {
1220 struct xfrm_state *xc;
1221 int err;
1222
1223 xc = xfrm_state_clone(x, &err);
1224 if (!xc)
1225 return NULL;
1226
1227 memcpy(&xc->id.daddr, &m->new_daddr, sizeof(xc->id.daddr));
1228 memcpy(&xc->props.saddr, &m->new_saddr, sizeof(xc->props.saddr));
1229
1230 /* add state */
1231 if (!xfrm_addr_cmp(&x->id.daddr, &m->new_daddr, m->new_family)) {
1232 /* a care is needed when the destination address of the
1233 state is to be updated as it is a part of triplet */
1234 xfrm_state_insert(xc);
1235 } else {
1236 if ((err = xfrm_state_add(xc)) < 0)
1237 goto error;
1238 }
1239
1240 return xc;
1241 error:
1242 kfree(xc);
1243 return NULL;
1244 }
1245 EXPORT_SYMBOL(xfrm_state_migrate);
1246 #endif
1247
1248 int xfrm_state_update(struct xfrm_state *x)
1249 {
1250 struct xfrm_state *x1;
1251 int err;
1252 int use_spi = xfrm_id_proto_match(x->id.proto, IPSEC_PROTO_ANY);
1253
1254 spin_lock_bh(&xfrm_state_lock);
1255 x1 = __xfrm_state_locate(x, use_spi, x->props.family);
1256
1257 err = -ESRCH;
1258 if (!x1)
1259 goto out;
1260
1261 if (xfrm_state_kern(x1)) {
1262 xfrm_state_put(x1);
1263 err = -EEXIST;
1264 goto out;
1265 }
1266
1267 if (x1->km.state == XFRM_STATE_ACQ) {
1268 __xfrm_state_insert(x);
1269 x = NULL;
1270 }
1271 err = 0;
1272
1273 out:
1274 spin_unlock_bh(&xfrm_state_lock);
1275
1276 if (err)
1277 return err;
1278
1279 if (!x) {
1280 xfrm_state_delete(x1);
1281 xfrm_state_put(x1);
1282 return 0;
1283 }
1284
1285 err = -EINVAL;
1286 spin_lock_bh(&x1->lock);
1287 if (likely(x1->km.state == XFRM_STATE_VALID)) {
1288 if (x->encap && x1->encap)
1289 memcpy(x1->encap, x->encap, sizeof(*x1->encap));
1290 if (x->coaddr && x1->coaddr) {
1291 memcpy(x1->coaddr, x->coaddr, sizeof(*x1->coaddr));
1292 }
1293 if (!use_spi && memcmp(&x1->sel, &x->sel, sizeof(x1->sel)))
1294 memcpy(&x1->sel, &x->sel, sizeof(x1->sel));
1295 memcpy(&x1->lft, &x->lft, sizeof(x1->lft));
1296 x1->km.dying = 0;
1297
1298 mod_timer(&x1->timer, jiffies + HZ);
1299 if (x1->curlft.use_time)
1300 xfrm_state_check_expire(x1);
1301
1302 err = 0;
1303 }
1304 spin_unlock_bh(&x1->lock);
1305
1306 xfrm_state_put(x1);
1307
1308 return err;
1309 }
1310 EXPORT_SYMBOL(xfrm_state_update);
1311
1312 int xfrm_state_check_expire(struct xfrm_state *x)
1313 {
1314 if (!x->curlft.use_time)
1315 x->curlft.use_time = get_seconds();
1316
1317 if (x->km.state != XFRM_STATE_VALID)
1318 return -EINVAL;
1319
1320 if (x->curlft.bytes >= x->lft.hard_byte_limit ||
1321 x->curlft.packets >= x->lft.hard_packet_limit) {
1322 x->km.state = XFRM_STATE_EXPIRED;
1323 mod_timer(&x->timer, jiffies);
1324 return -EINVAL;
1325 }
1326
1327 if (!x->km.dying &&
1328 (x->curlft.bytes >= x->lft.soft_byte_limit ||
1329 x->curlft.packets >= x->lft.soft_packet_limit)) {
1330 x->km.dying = 1;
1331 km_state_expired(x, 0, 0);
1332 }
1333 return 0;
1334 }
1335 EXPORT_SYMBOL(xfrm_state_check_expire);
1336
1337 struct xfrm_state *
1338 xfrm_state_lookup(xfrm_address_t *daddr, __be32 spi, u8 proto,
1339 unsigned short family)
1340 {
1341 struct xfrm_state *x;
1342
1343 spin_lock_bh(&xfrm_state_lock);
1344 x = __xfrm_state_lookup(daddr, spi, proto, family);
1345 spin_unlock_bh(&xfrm_state_lock);
1346 return x;
1347 }
1348 EXPORT_SYMBOL(xfrm_state_lookup);
1349
1350 struct xfrm_state *
1351 xfrm_state_lookup_byaddr(xfrm_address_t *daddr, xfrm_address_t *saddr,
1352 u8 proto, unsigned short family)
1353 {
1354 struct xfrm_state *x;
1355
1356 spin_lock_bh(&xfrm_state_lock);
1357 x = __xfrm_state_lookup_byaddr(daddr, saddr, proto, family);
1358 spin_unlock_bh(&xfrm_state_lock);
1359 return x;
1360 }
1361 EXPORT_SYMBOL(xfrm_state_lookup_byaddr);
1362
1363 struct xfrm_state *
1364 xfrm_find_acq(u8 mode, u32 reqid, u8 proto,
1365 xfrm_address_t *daddr, xfrm_address_t *saddr,
1366 int create, unsigned short family)
1367 {
1368 struct xfrm_state *x;
1369
1370 spin_lock_bh(&xfrm_state_lock);
1371 x = __find_acq_core(family, mode, reqid, proto, daddr, saddr, create);
1372 spin_unlock_bh(&xfrm_state_lock);
1373
1374 return x;
1375 }
1376 EXPORT_SYMBOL(xfrm_find_acq);
1377
1378 #ifdef CONFIG_XFRM_SUB_POLICY
1379 int
1380 xfrm_tmpl_sort(struct xfrm_tmpl **dst, struct xfrm_tmpl **src, int n,
1381 unsigned short family)
1382 {
1383 int err = 0;
1384 struct xfrm_state_afinfo *afinfo = xfrm_state_get_afinfo(family);
1385 if (!afinfo)
1386 return -EAFNOSUPPORT;
1387
1388 spin_lock_bh(&xfrm_state_lock);
1389 if (afinfo->tmpl_sort)
1390 err = afinfo->tmpl_sort(dst, src, n);
1391 spin_unlock_bh(&xfrm_state_lock);
1392 xfrm_state_put_afinfo(afinfo);
1393 return err;
1394 }
1395 EXPORT_SYMBOL(xfrm_tmpl_sort);
1396
1397 int
1398 xfrm_state_sort(struct xfrm_state **dst, struct xfrm_state **src, int n,
1399 unsigned short family)
1400 {
1401 int err = 0;
1402 struct xfrm_state_afinfo *afinfo = xfrm_state_get_afinfo(family);
1403 if (!afinfo)
1404 return -EAFNOSUPPORT;
1405
1406 spin_lock_bh(&xfrm_state_lock);
1407 if (afinfo->state_sort)
1408 err = afinfo->state_sort(dst, src, n);
1409 spin_unlock_bh(&xfrm_state_lock);
1410 xfrm_state_put_afinfo(afinfo);
1411 return err;
1412 }
1413 EXPORT_SYMBOL(xfrm_state_sort);
1414 #endif
1415
1416 /* Silly enough, but I'm lazy to build resolution list */
1417
1418 static struct xfrm_state *__xfrm_find_acq_byseq(u32 seq)
1419 {
1420 int i;
1421
1422 for (i = 0; i <= xfrm_state_hmask; i++) {
1423 struct hlist_node *entry;
1424 struct xfrm_state *x;
1425
1426 hlist_for_each_entry(x, entry, xfrm_state_bydst+i, bydst) {
1427 if (x->km.seq == seq &&
1428 x->km.state == XFRM_STATE_ACQ) {
1429 xfrm_state_hold(x);
1430 return x;
1431 }
1432 }
1433 }
1434 return NULL;
1435 }
1436
1437 struct xfrm_state *xfrm_find_acq_byseq(u32 seq)
1438 {
1439 struct xfrm_state *x;
1440
1441 spin_lock_bh(&xfrm_state_lock);
1442 x = __xfrm_find_acq_byseq(seq);
1443 spin_unlock_bh(&xfrm_state_lock);
1444 return x;
1445 }
1446 EXPORT_SYMBOL(xfrm_find_acq_byseq);
1447
1448 u32 xfrm_get_acqseq(void)
1449 {
1450 u32 res;
1451 static u32 acqseq;
1452 static DEFINE_SPINLOCK(acqseq_lock);
1453
1454 spin_lock_bh(&acqseq_lock);
1455 res = (++acqseq ? : ++acqseq);
1456 spin_unlock_bh(&acqseq_lock);
1457 return res;
1458 }
1459 EXPORT_SYMBOL(xfrm_get_acqseq);
1460
1461 int xfrm_alloc_spi(struct xfrm_state *x, u32 low, u32 high)
1462 {
1463 unsigned int h;
1464 struct xfrm_state *x0;
1465 int err = -ENOENT;
1466 __be32 minspi = htonl(low);
1467 __be32 maxspi = htonl(high);
1468
1469 spin_lock_bh(&x->lock);
1470 if (x->km.state == XFRM_STATE_DEAD)
1471 goto unlock;
1472
1473 err = 0;
1474 if (x->id.spi)
1475 goto unlock;
1476
1477 err = -ENOENT;
1478
1479 if (minspi == maxspi) {
1480 x0 = xfrm_state_lookup(&x->id.daddr, minspi, x->id.proto, x->props.family);
1481 if (x0) {
1482 xfrm_state_put(x0);
1483 goto unlock;
1484 }
1485 x->id.spi = minspi;
1486 } else {
1487 u32 spi = 0;
1488 for (h=0; h<high-low+1; h++) {
1489 spi = low + net_random()%(high-low+1);
1490 x0 = xfrm_state_lookup(&x->id.daddr, htonl(spi), x->id.proto, x->props.family);
1491 if (x0 == NULL) {
1492 x->id.spi = htonl(spi);
1493 break;
1494 }
1495 xfrm_state_put(x0);
1496 }
1497 }
1498 if (x->id.spi) {
1499 spin_lock_bh(&xfrm_state_lock);
1500 h = xfrm_spi_hash(&x->id.daddr, x->id.spi, x->id.proto, x->props.family);
1501 hlist_add_head(&x->byspi, xfrm_state_byspi+h);
1502 spin_unlock_bh(&xfrm_state_lock);
1503
1504 err = 0;
1505 }
1506
1507 unlock:
1508 spin_unlock_bh(&x->lock);
1509
1510 return err;
1511 }
1512 EXPORT_SYMBOL(xfrm_alloc_spi);
1513
1514 int xfrm_state_walk(u8 proto, int (*func)(struct xfrm_state *, int, void*),
1515 void *data)
1516 {
1517 int i;
1518 struct xfrm_state *x, *last = NULL;
1519 struct hlist_node *entry;
1520 int count = 0;
1521 int err = 0;
1522
1523 spin_lock_bh(&xfrm_state_lock);
1524 for (i = 0; i <= xfrm_state_hmask; i++) {
1525 hlist_for_each_entry(x, entry, xfrm_state_bydst+i, bydst) {
1526 if (!xfrm_id_proto_match(x->id.proto, proto))
1527 continue;
1528 if (last) {
1529 err = func(last, count, data);
1530 if (err)
1531 goto out;
1532 }
1533 last = x;
1534 count++;
1535 }
1536 }
1537 if (count == 0) {
1538 err = -ENOENT;
1539 goto out;
1540 }
1541 err = func(last, 0, data);
1542 out:
1543 spin_unlock_bh(&xfrm_state_lock);
1544 return err;
1545 }
1546 EXPORT_SYMBOL(xfrm_state_walk);
1547
1548
1549 void xfrm_replay_notify(struct xfrm_state *x, int event)
1550 {
1551 struct km_event c;
1552 /* we send notify messages in case
1553 * 1. we updated on of the sequence numbers, and the seqno difference
1554 * is at least x->replay_maxdiff, in this case we also update the
1555 * timeout of our timer function
1556 * 2. if x->replay_maxage has elapsed since last update,
1557 * and there were changes
1558 *
1559 * The state structure must be locked!
1560 */
1561
1562 switch (event) {
1563 case XFRM_REPLAY_UPDATE:
1564 if (x->replay_maxdiff &&
1565 (x->replay.seq - x->preplay.seq < x->replay_maxdiff) &&
1566 (x->replay.oseq - x->preplay.oseq < x->replay_maxdiff)) {
1567 if (x->xflags & XFRM_TIME_DEFER)
1568 event = XFRM_REPLAY_TIMEOUT;
1569 else
1570 return;
1571 }
1572
1573 break;
1574
1575 case XFRM_REPLAY_TIMEOUT:
1576 if ((x->replay.seq == x->preplay.seq) &&
1577 (x->replay.bitmap == x->preplay.bitmap) &&
1578 (x->replay.oseq == x->preplay.oseq)) {
1579 x->xflags |= XFRM_TIME_DEFER;
1580 return;
1581 }
1582
1583 break;
1584 }
1585
1586 memcpy(&x->preplay, &x->replay, sizeof(struct xfrm_replay_state));
1587 c.event = XFRM_MSG_NEWAE;
1588 c.data.aevent = event;
1589 km_state_notify(x, &c);
1590
1591 if (x->replay_maxage &&
1592 !mod_timer(&x->rtimer, jiffies + x->replay_maxage))
1593 x->xflags &= ~XFRM_TIME_DEFER;
1594 }
1595
1596 static void xfrm_replay_timer_handler(unsigned long data)
1597 {
1598 struct xfrm_state *x = (struct xfrm_state*)data;
1599
1600 spin_lock(&x->lock);
1601
1602 if (x->km.state == XFRM_STATE_VALID) {
1603 if (xfrm_aevent_is_on())
1604 xfrm_replay_notify(x, XFRM_REPLAY_TIMEOUT);
1605 else
1606 x->xflags |= XFRM_TIME_DEFER;
1607 }
1608
1609 spin_unlock(&x->lock);
1610 }
1611
1612 int xfrm_replay_check(struct xfrm_state *x, __be32 net_seq)
1613 {
1614 u32 diff;
1615 u32 seq = ntohl(net_seq);
1616
1617 if (unlikely(seq == 0))
1618 return -EINVAL;
1619
1620 if (likely(seq > x->replay.seq))
1621 return 0;
1622
1623 diff = x->replay.seq - seq;
1624 if (diff >= min_t(unsigned int, x->props.replay_window,
1625 sizeof(x->replay.bitmap) * 8)) {
1626 x->stats.replay_window++;
1627 return -EINVAL;
1628 }
1629
1630 if (x->replay.bitmap & (1U << diff)) {
1631 x->stats.replay++;
1632 return -EINVAL;
1633 }
1634 return 0;
1635 }
1636 EXPORT_SYMBOL(xfrm_replay_check);
1637
1638 void xfrm_replay_advance(struct xfrm_state *x, __be32 net_seq)
1639 {
1640 u32 diff;
1641 u32 seq = ntohl(net_seq);
1642
1643 if (seq > x->replay.seq) {
1644 diff = seq - x->replay.seq;
1645 if (diff < x->props.replay_window)
1646 x->replay.bitmap = ((x->replay.bitmap) << diff) | 1;
1647 else
1648 x->replay.bitmap = 1;
1649 x->replay.seq = seq;
1650 } else {
1651 diff = x->replay.seq - seq;
1652 x->replay.bitmap |= (1U << diff);
1653 }
1654
1655 if (xfrm_aevent_is_on())
1656 xfrm_replay_notify(x, XFRM_REPLAY_UPDATE);
1657 }
1658 EXPORT_SYMBOL(xfrm_replay_advance);
1659
1660 static LIST_HEAD(xfrm_km_list);
1661 static DEFINE_RWLOCK(xfrm_km_lock);
1662
1663 void km_policy_notify(struct xfrm_policy *xp, int dir, struct km_event *c)
1664 {
1665 struct xfrm_mgr *km;
1666
1667 read_lock(&xfrm_km_lock);
1668 list_for_each_entry(km, &xfrm_km_list, list)
1669 if (km->notify_policy)
1670 km->notify_policy(xp, dir, c);
1671 read_unlock(&xfrm_km_lock);
1672 }
1673
1674 void km_state_notify(struct xfrm_state *x, struct km_event *c)
1675 {
1676 struct xfrm_mgr *km;
1677 read_lock(&xfrm_km_lock);
1678 list_for_each_entry(km, &xfrm_km_list, list)
1679 if (km->notify)
1680 km->notify(x, c);
1681 read_unlock(&xfrm_km_lock);
1682 }
1683
1684 EXPORT_SYMBOL(km_policy_notify);
1685 EXPORT_SYMBOL(km_state_notify);
1686
1687 void km_state_expired(struct xfrm_state *x, int hard, u32 pid)
1688 {
1689 struct km_event c;
1690
1691 c.data.hard = hard;
1692 c.pid = pid;
1693 c.event = XFRM_MSG_EXPIRE;
1694 km_state_notify(x, &c);
1695
1696 if (hard)
1697 wake_up(&km_waitq);
1698 }
1699
1700 EXPORT_SYMBOL(km_state_expired);
1701 /*
1702 * We send to all registered managers regardless of failure
1703 * We are happy with one success
1704 */
1705 int km_query(struct xfrm_state *x, struct xfrm_tmpl *t, struct xfrm_policy *pol)
1706 {
1707 int err = -EINVAL, acqret;
1708 struct xfrm_mgr *km;
1709
1710 read_lock(&xfrm_km_lock);
1711 list_for_each_entry(km, &xfrm_km_list, list) {
1712 acqret = km->acquire(x, t, pol, XFRM_POLICY_OUT);
1713 if (!acqret)
1714 err = acqret;
1715 }
1716 read_unlock(&xfrm_km_lock);
1717 return err;
1718 }
1719 EXPORT_SYMBOL(km_query);
1720
1721 int km_new_mapping(struct xfrm_state *x, xfrm_address_t *ipaddr, __be16 sport)
1722 {
1723 int err = -EINVAL;
1724 struct xfrm_mgr *km;
1725
1726 read_lock(&xfrm_km_lock);
1727 list_for_each_entry(km, &xfrm_km_list, list) {
1728 if (km->new_mapping)
1729 err = km->new_mapping(x, ipaddr, sport);
1730 if (!err)
1731 break;
1732 }
1733 read_unlock(&xfrm_km_lock);
1734 return err;
1735 }
1736 EXPORT_SYMBOL(km_new_mapping);
1737
1738 void km_policy_expired(struct xfrm_policy *pol, int dir, int hard, u32 pid)
1739 {
1740 struct km_event c;
1741
1742 c.data.hard = hard;
1743 c.pid = pid;
1744 c.event = XFRM_MSG_POLEXPIRE;
1745 km_policy_notify(pol, dir, &c);
1746
1747 if (hard)
1748 wake_up(&km_waitq);
1749 }
1750 EXPORT_SYMBOL(km_policy_expired);
1751
1752 #ifdef CONFIG_XFRM_MIGRATE
1753 int km_migrate(struct xfrm_selector *sel, u8 dir, u8 type,
1754 struct xfrm_migrate *m, int num_migrate)
1755 {
1756 int err = -EINVAL;
1757 int ret;
1758 struct xfrm_mgr *km;
1759
1760 read_lock(&xfrm_km_lock);
1761 list_for_each_entry(km, &xfrm_km_list, list) {
1762 if (km->migrate) {
1763 ret = km->migrate(sel, dir, type, m, num_migrate);
1764 if (!ret)
1765 err = ret;
1766 }
1767 }
1768 read_unlock(&xfrm_km_lock);
1769 return err;
1770 }
1771 EXPORT_SYMBOL(km_migrate);
1772 #endif
1773
1774 int km_report(u8 proto, struct xfrm_selector *sel, xfrm_address_t *addr)
1775 {
1776 int err = -EINVAL;
1777 int ret;
1778 struct xfrm_mgr *km;
1779
1780 read_lock(&xfrm_km_lock);
1781 list_for_each_entry(km, &xfrm_km_list, list) {
1782 if (km->report) {
1783 ret = km->report(proto, sel, addr);
1784 if (!ret)
1785 err = ret;
1786 }
1787 }
1788 read_unlock(&xfrm_km_lock);
1789 return err;
1790 }
1791 EXPORT_SYMBOL(km_report);
1792
1793 int xfrm_user_policy(struct sock *sk, int optname, u8 __user *optval, int optlen)
1794 {
1795 int err;
1796 u8 *data;
1797 struct xfrm_mgr *km;
1798 struct xfrm_policy *pol = NULL;
1799
1800 if (optlen <= 0 || optlen > PAGE_SIZE)
1801 return -EMSGSIZE;
1802
1803 data = kmalloc(optlen, GFP_KERNEL);
1804 if (!data)
1805 return -ENOMEM;
1806
1807 err = -EFAULT;
1808 if (copy_from_user(data, optval, optlen))
1809 goto out;
1810
1811 err = -EINVAL;
1812 read_lock(&xfrm_km_lock);
1813 list_for_each_entry(km, &xfrm_km_list, list) {
1814 pol = km->compile_policy(sk, optname, data,
1815 optlen, &err);
1816 if (err >= 0)
1817 break;
1818 }
1819 read_unlock(&xfrm_km_lock);
1820
1821 if (err >= 0) {
1822 xfrm_sk_policy_insert(sk, err, pol);
1823 xfrm_pol_put(pol);
1824 err = 0;
1825 }
1826
1827 out:
1828 kfree(data);
1829 return err;
1830 }
1831 EXPORT_SYMBOL(xfrm_user_policy);
1832
1833 int xfrm_register_km(struct xfrm_mgr *km)
1834 {
1835 write_lock_bh(&xfrm_km_lock);
1836 list_add_tail(&km->list, &xfrm_km_list);
1837 write_unlock_bh(&xfrm_km_lock);
1838 return 0;
1839 }
1840 EXPORT_SYMBOL(xfrm_register_km);
1841
1842 int xfrm_unregister_km(struct xfrm_mgr *km)
1843 {
1844 write_lock_bh(&xfrm_km_lock);
1845 list_del(&km->list);
1846 write_unlock_bh(&xfrm_km_lock);
1847 return 0;
1848 }
1849 EXPORT_SYMBOL(xfrm_unregister_km);
1850
1851 int xfrm_state_register_afinfo(struct xfrm_state_afinfo *afinfo)
1852 {
1853 int err = 0;
1854 if (unlikely(afinfo == NULL))
1855 return -EINVAL;
1856 if (unlikely(afinfo->family >= NPROTO))
1857 return -EAFNOSUPPORT;
1858 write_lock_bh(&xfrm_state_afinfo_lock);
1859 if (unlikely(xfrm_state_afinfo[afinfo->family] != NULL))
1860 err = -ENOBUFS;
1861 else
1862 xfrm_state_afinfo[afinfo->family] = afinfo;
1863 write_unlock_bh(&xfrm_state_afinfo_lock);
1864 return err;
1865 }
1866 EXPORT_SYMBOL(xfrm_state_register_afinfo);
1867
1868 int xfrm_state_unregister_afinfo(struct xfrm_state_afinfo *afinfo)
1869 {
1870 int err = 0;
1871 if (unlikely(afinfo == NULL))
1872 return -EINVAL;
1873 if (unlikely(afinfo->family >= NPROTO))
1874 return -EAFNOSUPPORT;
1875 write_lock_bh(&xfrm_state_afinfo_lock);
1876 if (likely(xfrm_state_afinfo[afinfo->family] != NULL)) {
1877 if (unlikely(xfrm_state_afinfo[afinfo->family] != afinfo))
1878 err = -EINVAL;
1879 else
1880 xfrm_state_afinfo[afinfo->family] = NULL;
1881 }
1882 write_unlock_bh(&xfrm_state_afinfo_lock);
1883 return err;
1884 }
1885 EXPORT_SYMBOL(xfrm_state_unregister_afinfo);
1886
1887 static struct xfrm_state_afinfo *xfrm_state_get_afinfo(unsigned int family)
1888 {
1889 struct xfrm_state_afinfo *afinfo;
1890 if (unlikely(family >= NPROTO))
1891 return NULL;
1892 read_lock(&xfrm_state_afinfo_lock);
1893 afinfo = xfrm_state_afinfo[family];
1894 if (unlikely(!afinfo))
1895 read_unlock(&xfrm_state_afinfo_lock);
1896 return afinfo;
1897 }
1898
1899 static void xfrm_state_put_afinfo(struct xfrm_state_afinfo *afinfo)
1900 {
1901 read_unlock(&xfrm_state_afinfo_lock);
1902 }
1903
1904 /* Temporarily located here until net/xfrm/xfrm_tunnel.c is created */
1905 void xfrm_state_delete_tunnel(struct xfrm_state *x)
1906 {
1907 if (x->tunnel) {
1908 struct xfrm_state *t = x->tunnel;
1909
1910 if (atomic_read(&t->tunnel_users) == 2)
1911 xfrm_state_delete(t);
1912 atomic_dec(&t->tunnel_users);
1913 xfrm_state_put(t);
1914 x->tunnel = NULL;
1915 }
1916 }
1917 EXPORT_SYMBOL(xfrm_state_delete_tunnel);
1918
1919 int xfrm_state_mtu(struct xfrm_state *x, int mtu)
1920 {
1921 int res;
1922
1923 spin_lock_bh(&x->lock);
1924 if (x->km.state == XFRM_STATE_VALID &&
1925 x->type && x->type->get_mtu)
1926 res = x->type->get_mtu(x, mtu);
1927 else
1928 res = mtu - x->props.header_len;
1929 spin_unlock_bh(&x->lock);
1930 return res;
1931 }
1932
1933 int xfrm_init_state(struct xfrm_state *x)
1934 {
1935 struct xfrm_state_afinfo *afinfo;
1936 int family = x->props.family;
1937 int err;
1938
1939 err = -EAFNOSUPPORT;
1940 afinfo = xfrm_state_get_afinfo(family);
1941 if (!afinfo)
1942 goto error;
1943
1944 err = 0;
1945 if (afinfo->init_flags)
1946 err = afinfo->init_flags(x);
1947
1948 xfrm_state_put_afinfo(afinfo);
1949
1950 if (err)
1951 goto error;
1952
1953 err = -EPROTONOSUPPORT;
1954 x->inner_mode = xfrm_get_mode(x->props.mode, x->sel.family);
1955 if (x->inner_mode == NULL)
1956 goto error;
1957
1958 if (!(x->inner_mode->flags & XFRM_MODE_FLAG_TUNNEL) &&
1959 family != x->sel.family)
1960 goto error;
1961
1962 x->type = xfrm_get_type(x->id.proto, family);
1963 if (x->type == NULL)
1964 goto error;
1965
1966 err = x->type->init_state(x);
1967 if (err)
1968 goto error;
1969
1970 x->outer_mode = xfrm_get_mode(x->props.mode, family);
1971 if (x->outer_mode == NULL)
1972 goto error;
1973
1974 x->km.state = XFRM_STATE_VALID;
1975
1976 error:
1977 return err;
1978 }
1979
1980 EXPORT_SYMBOL(xfrm_init_state);
1981
1982 void __init xfrm_state_init(void)
1983 {
1984 unsigned int sz;
1985
1986 sz = sizeof(struct hlist_head) * 8;
1987
1988 xfrm_state_bydst = xfrm_hash_alloc(sz);
1989 xfrm_state_bysrc = xfrm_hash_alloc(sz);
1990 xfrm_state_byspi = xfrm_hash_alloc(sz);
1991 if (!xfrm_state_bydst || !xfrm_state_bysrc || !xfrm_state_byspi)
1992 panic("XFRM: Cannot allocate bydst/bysrc/byspi hashes.");
1993 xfrm_state_hmask = ((sz / sizeof(struct hlist_head)) - 1);
1994
1995 INIT_WORK(&xfrm_state_gc_work, xfrm_state_gc_task);
1996 }
1997
1998 #ifdef CONFIG_AUDITSYSCALL
1999 static inline void xfrm_audit_common_stateinfo(struct xfrm_state *x,
2000 struct audit_buffer *audit_buf)
2001 {
2002 struct xfrm_sec_ctx *ctx = x->security;
2003 u32 spi = ntohl(x->id.spi);
2004
2005 if (ctx)
2006 audit_log_format(audit_buf, " sec_alg=%u sec_doi=%u sec_obj=%s",
2007 ctx->ctx_alg, ctx->ctx_doi, ctx->ctx_str);
2008
2009 switch(x->props.family) {
2010 case AF_INET:
2011 audit_log_format(audit_buf,
2012 " src=" NIPQUAD_FMT " dst=" NIPQUAD_FMT,
2013 NIPQUAD(x->props.saddr.a4),
2014 NIPQUAD(x->id.daddr.a4));
2015 break;
2016 case AF_INET6:
2017 audit_log_format(audit_buf,
2018 " src=" NIP6_FMT " dst=" NIP6_FMT,
2019 NIP6(*(struct in6_addr *)x->props.saddr.a6),
2020 NIP6(*(struct in6_addr *)x->id.daddr.a6));
2021 break;
2022 }
2023
2024 audit_log_format(audit_buf, " spi=%u(0x%x)", spi, spi);
2025 }
2026
2027 void xfrm_audit_state_add(struct xfrm_state *x, int result,
2028 u32 auid, u32 secid)
2029 {
2030 struct audit_buffer *audit_buf;
2031
2032 if (audit_enabled == 0)
2033 return;
2034 audit_buf = xfrm_audit_start(auid, secid);
2035 if (audit_buf == NULL)
2036 return;
2037 audit_log_format(audit_buf, " op=SAD-add res=%u", result);
2038 xfrm_audit_common_stateinfo(x, audit_buf);
2039 audit_log_end(audit_buf);
2040 }
2041 EXPORT_SYMBOL_GPL(xfrm_audit_state_add);
2042
2043 void xfrm_audit_state_delete(struct xfrm_state *x, int result,
2044 u32 auid, u32 secid)
2045 {
2046 struct audit_buffer *audit_buf;
2047
2048 if (audit_enabled == 0)
2049 return;
2050 audit_buf = xfrm_audit_start(auid, secid);
2051 if (audit_buf == NULL)
2052 return;
2053 audit_log_format(audit_buf, " op=SAD-delete res=%u", result);
2054 xfrm_audit_common_stateinfo(x, audit_buf);
2055 audit_log_end(audit_buf);
2056 }
2057 EXPORT_SYMBOL_GPL(xfrm_audit_state_delete);
2058 #endif /* CONFIG_AUDITSYSCALL */
This page took 0.107687 seconds and 5 git commands to generate.