Merge branch 'master' of master.kernel.org:/pub/scm/linux/kernel/git/davem/net-2.6
[deliverable/linux.git] / net / xfrm / xfrm_state.c
1 /*
2 * xfrm_state.c
3 *
4 * Changes:
5 * Mitsuru KANDA @USAGI
6 * Kazunori MIYAZAWA @USAGI
7 * Kunihiro Ishiguro <kunihiro@ipinfusion.com>
8 * IPv6 support
9 * YOSHIFUJI Hideaki @USAGI
10 * Split up af-specific functions
11 * Derek Atkins <derek@ihtfp.com>
12 * Add UDP Encapsulation
13 *
14 */
15
16 #include <linux/workqueue.h>
17 #include <net/xfrm.h>
18 #include <linux/pfkeyv2.h>
19 #include <linux/ipsec.h>
20 #include <linux/module.h>
21 #include <linux/cache.h>
22 #include <asm/uaccess.h>
23
24 #include "xfrm_hash.h"
25
26 struct sock *xfrm_nl;
27 EXPORT_SYMBOL(xfrm_nl);
28
29 u32 sysctl_xfrm_aevent_etime __read_mostly = XFRM_AE_ETIME;
30 EXPORT_SYMBOL(sysctl_xfrm_aevent_etime);
31
32 u32 sysctl_xfrm_aevent_rseqth __read_mostly = XFRM_AE_SEQT_SIZE;
33 EXPORT_SYMBOL(sysctl_xfrm_aevent_rseqth);
34
35 u32 sysctl_xfrm_acq_expires __read_mostly = 30;
36
37 /* Each xfrm_state may be linked to two tables:
38
39 1. Hash table by (spi,daddr,ah/esp) to find SA by SPI. (input,ctl)
40 2. Hash table by (daddr,family,reqid) to find what SAs exist for given
41 destination/tunnel endpoint. (output)
42 */
43
44 static DEFINE_SPINLOCK(xfrm_state_lock);
45
46 /* Hash table to find appropriate SA towards given target (endpoint
47 * of tunnel or destination of transport mode) allowed by selector.
48 *
49 * Main use is finding SA after policy selected tunnel or transport mode.
50 * Also, it can be used by ah/esp icmp error handler to find offending SA.
51 */
52 static struct hlist_head *xfrm_state_bydst __read_mostly;
53 static struct hlist_head *xfrm_state_bysrc __read_mostly;
54 static struct hlist_head *xfrm_state_byspi __read_mostly;
55 static unsigned int xfrm_state_hmask __read_mostly;
56 static unsigned int xfrm_state_hashmax __read_mostly = 1 * 1024 * 1024;
57 static unsigned int xfrm_state_num;
58 static unsigned int xfrm_state_genid;
59
60 static struct xfrm_state_afinfo *xfrm_state_get_afinfo(unsigned int family);
61 static void xfrm_state_put_afinfo(struct xfrm_state_afinfo *afinfo);
62
63 static inline unsigned int xfrm_dst_hash(xfrm_address_t *daddr,
64 xfrm_address_t *saddr,
65 u32 reqid,
66 unsigned short family)
67 {
68 return __xfrm_dst_hash(daddr, saddr, reqid, family, xfrm_state_hmask);
69 }
70
71 static inline unsigned int xfrm_src_hash(xfrm_address_t *daddr,
72 xfrm_address_t *saddr,
73 unsigned short family)
74 {
75 return __xfrm_src_hash(daddr, saddr, family, xfrm_state_hmask);
76 }
77
78 static inline unsigned int
79 xfrm_spi_hash(xfrm_address_t *daddr, __be32 spi, u8 proto, unsigned short family)
80 {
81 return __xfrm_spi_hash(daddr, spi, proto, family, xfrm_state_hmask);
82 }
83
84 static void xfrm_hash_transfer(struct hlist_head *list,
85 struct hlist_head *ndsttable,
86 struct hlist_head *nsrctable,
87 struct hlist_head *nspitable,
88 unsigned int nhashmask)
89 {
90 struct hlist_node *entry, *tmp;
91 struct xfrm_state *x;
92
93 hlist_for_each_entry_safe(x, entry, tmp, list, bydst) {
94 unsigned int h;
95
96 h = __xfrm_dst_hash(&x->id.daddr, &x->props.saddr,
97 x->props.reqid, x->props.family,
98 nhashmask);
99 hlist_add_head(&x->bydst, ndsttable+h);
100
101 h = __xfrm_src_hash(&x->id.daddr, &x->props.saddr,
102 x->props.family,
103 nhashmask);
104 hlist_add_head(&x->bysrc, nsrctable+h);
105
106 if (x->id.spi) {
107 h = __xfrm_spi_hash(&x->id.daddr, x->id.spi,
108 x->id.proto, x->props.family,
109 nhashmask);
110 hlist_add_head(&x->byspi, nspitable+h);
111 }
112 }
113 }
114
115 static unsigned long xfrm_hash_new_size(void)
116 {
117 return ((xfrm_state_hmask + 1) << 1) *
118 sizeof(struct hlist_head);
119 }
120
121 static DEFINE_MUTEX(hash_resize_mutex);
122
123 static void xfrm_hash_resize(struct work_struct *__unused)
124 {
125 struct hlist_head *ndst, *nsrc, *nspi, *odst, *osrc, *ospi;
126 unsigned long nsize, osize;
127 unsigned int nhashmask, ohashmask;
128 int i;
129
130 mutex_lock(&hash_resize_mutex);
131
132 nsize = xfrm_hash_new_size();
133 ndst = xfrm_hash_alloc(nsize);
134 if (!ndst)
135 goto out_unlock;
136 nsrc = xfrm_hash_alloc(nsize);
137 if (!nsrc) {
138 xfrm_hash_free(ndst, nsize);
139 goto out_unlock;
140 }
141 nspi = xfrm_hash_alloc(nsize);
142 if (!nspi) {
143 xfrm_hash_free(ndst, nsize);
144 xfrm_hash_free(nsrc, nsize);
145 goto out_unlock;
146 }
147
148 spin_lock_bh(&xfrm_state_lock);
149
150 nhashmask = (nsize / sizeof(struct hlist_head)) - 1U;
151 for (i = xfrm_state_hmask; i >= 0; i--)
152 xfrm_hash_transfer(xfrm_state_bydst+i, ndst, nsrc, nspi,
153 nhashmask);
154
155 odst = xfrm_state_bydst;
156 osrc = xfrm_state_bysrc;
157 ospi = xfrm_state_byspi;
158 ohashmask = xfrm_state_hmask;
159
160 xfrm_state_bydst = ndst;
161 xfrm_state_bysrc = nsrc;
162 xfrm_state_byspi = nspi;
163 xfrm_state_hmask = nhashmask;
164
165 spin_unlock_bh(&xfrm_state_lock);
166
167 osize = (ohashmask + 1) * sizeof(struct hlist_head);
168 xfrm_hash_free(odst, osize);
169 xfrm_hash_free(osrc, osize);
170 xfrm_hash_free(ospi, osize);
171
172 out_unlock:
173 mutex_unlock(&hash_resize_mutex);
174 }
175
176 static DECLARE_WORK(xfrm_hash_work, xfrm_hash_resize);
177
178 DECLARE_WAIT_QUEUE_HEAD(km_waitq);
179 EXPORT_SYMBOL(km_waitq);
180
181 static DEFINE_RWLOCK(xfrm_state_afinfo_lock);
182 static struct xfrm_state_afinfo *xfrm_state_afinfo[NPROTO];
183
184 static struct work_struct xfrm_state_gc_work;
185 static HLIST_HEAD(xfrm_state_gc_list);
186 static DEFINE_SPINLOCK(xfrm_state_gc_lock);
187
188 int __xfrm_state_delete(struct xfrm_state *x);
189
190 int km_query(struct xfrm_state *x, struct xfrm_tmpl *t, struct xfrm_policy *pol);
191 void km_state_expired(struct xfrm_state *x, int hard, u32 pid);
192
193 static struct xfrm_state_afinfo *xfrm_state_lock_afinfo(unsigned int family)
194 {
195 struct xfrm_state_afinfo *afinfo;
196 if (unlikely(family >= NPROTO))
197 return NULL;
198 write_lock_bh(&xfrm_state_afinfo_lock);
199 afinfo = xfrm_state_afinfo[family];
200 if (unlikely(!afinfo))
201 write_unlock_bh(&xfrm_state_afinfo_lock);
202 return afinfo;
203 }
204
205 static void xfrm_state_unlock_afinfo(struct xfrm_state_afinfo *afinfo)
206 {
207 write_unlock_bh(&xfrm_state_afinfo_lock);
208 }
209
210 int xfrm_register_type(struct xfrm_type *type, unsigned short family)
211 {
212 struct xfrm_state_afinfo *afinfo = xfrm_state_lock_afinfo(family);
213 struct xfrm_type **typemap;
214 int err = 0;
215
216 if (unlikely(afinfo == NULL))
217 return -EAFNOSUPPORT;
218 typemap = afinfo->type_map;
219
220 if (likely(typemap[type->proto] == NULL))
221 typemap[type->proto] = type;
222 else
223 err = -EEXIST;
224 xfrm_state_unlock_afinfo(afinfo);
225 return err;
226 }
227 EXPORT_SYMBOL(xfrm_register_type);
228
229 int xfrm_unregister_type(struct xfrm_type *type, unsigned short family)
230 {
231 struct xfrm_state_afinfo *afinfo = xfrm_state_lock_afinfo(family);
232 struct xfrm_type **typemap;
233 int err = 0;
234
235 if (unlikely(afinfo == NULL))
236 return -EAFNOSUPPORT;
237 typemap = afinfo->type_map;
238
239 if (unlikely(typemap[type->proto] != type))
240 err = -ENOENT;
241 else
242 typemap[type->proto] = NULL;
243 xfrm_state_unlock_afinfo(afinfo);
244 return err;
245 }
246 EXPORT_SYMBOL(xfrm_unregister_type);
247
248 static struct xfrm_type *xfrm_get_type(u8 proto, unsigned short family)
249 {
250 struct xfrm_state_afinfo *afinfo;
251 struct xfrm_type **typemap;
252 struct xfrm_type *type;
253 int modload_attempted = 0;
254
255 retry:
256 afinfo = xfrm_state_get_afinfo(family);
257 if (unlikely(afinfo == NULL))
258 return NULL;
259 typemap = afinfo->type_map;
260
261 type = typemap[proto];
262 if (unlikely(type && !try_module_get(type->owner)))
263 type = NULL;
264 if (!type && !modload_attempted) {
265 xfrm_state_put_afinfo(afinfo);
266 request_module("xfrm-type-%d-%d", family, proto);
267 modload_attempted = 1;
268 goto retry;
269 }
270
271 xfrm_state_put_afinfo(afinfo);
272 return type;
273 }
274
275 static void xfrm_put_type(struct xfrm_type *type)
276 {
277 module_put(type->owner);
278 }
279
280 int xfrm_register_mode(struct xfrm_mode *mode, int family)
281 {
282 struct xfrm_state_afinfo *afinfo;
283 struct xfrm_mode **modemap;
284 int err;
285
286 if (unlikely(mode->encap >= XFRM_MODE_MAX))
287 return -EINVAL;
288
289 afinfo = xfrm_state_lock_afinfo(family);
290 if (unlikely(afinfo == NULL))
291 return -EAFNOSUPPORT;
292
293 err = -EEXIST;
294 modemap = afinfo->mode_map;
295 if (modemap[mode->encap])
296 goto out;
297
298 err = -ENOENT;
299 if (!try_module_get(afinfo->owner))
300 goto out;
301
302 mode->afinfo = afinfo;
303 modemap[mode->encap] = mode;
304 err = 0;
305
306 out:
307 xfrm_state_unlock_afinfo(afinfo);
308 return err;
309 }
310 EXPORT_SYMBOL(xfrm_register_mode);
311
312 int xfrm_unregister_mode(struct xfrm_mode *mode, int family)
313 {
314 struct xfrm_state_afinfo *afinfo;
315 struct xfrm_mode **modemap;
316 int err;
317
318 if (unlikely(mode->encap >= XFRM_MODE_MAX))
319 return -EINVAL;
320
321 afinfo = xfrm_state_lock_afinfo(family);
322 if (unlikely(afinfo == NULL))
323 return -EAFNOSUPPORT;
324
325 err = -ENOENT;
326 modemap = afinfo->mode_map;
327 if (likely(modemap[mode->encap] == mode)) {
328 modemap[mode->encap] = NULL;
329 module_put(mode->afinfo->owner);
330 err = 0;
331 }
332
333 xfrm_state_unlock_afinfo(afinfo);
334 return err;
335 }
336 EXPORT_SYMBOL(xfrm_unregister_mode);
337
338 static struct xfrm_mode *xfrm_get_mode(unsigned int encap, int family)
339 {
340 struct xfrm_state_afinfo *afinfo;
341 struct xfrm_mode *mode;
342 int modload_attempted = 0;
343
344 if (unlikely(encap >= XFRM_MODE_MAX))
345 return NULL;
346
347 retry:
348 afinfo = xfrm_state_get_afinfo(family);
349 if (unlikely(afinfo == NULL))
350 return NULL;
351
352 mode = afinfo->mode_map[encap];
353 if (unlikely(mode && !try_module_get(mode->owner)))
354 mode = NULL;
355 if (!mode && !modload_attempted) {
356 xfrm_state_put_afinfo(afinfo);
357 request_module("xfrm-mode-%d-%d", family, encap);
358 modload_attempted = 1;
359 goto retry;
360 }
361
362 xfrm_state_put_afinfo(afinfo);
363 return mode;
364 }
365
366 static void xfrm_put_mode(struct xfrm_mode *mode)
367 {
368 module_put(mode->owner);
369 }
370
371 static void xfrm_state_gc_destroy(struct xfrm_state *x)
372 {
373 del_timer_sync(&x->timer);
374 del_timer_sync(&x->rtimer);
375 kfree(x->aalg);
376 kfree(x->ealg);
377 kfree(x->calg);
378 kfree(x->encap);
379 kfree(x->coaddr);
380 if (x->inner_mode)
381 xfrm_put_mode(x->inner_mode);
382 if (x->outer_mode)
383 xfrm_put_mode(x->outer_mode);
384 if (x->type) {
385 x->type->destructor(x);
386 xfrm_put_type(x->type);
387 }
388 security_xfrm_state_free(x);
389 kfree(x);
390 }
391
392 static void xfrm_state_gc_task(struct work_struct *data)
393 {
394 struct xfrm_state *x;
395 struct hlist_node *entry, *tmp;
396 struct hlist_head gc_list;
397
398 spin_lock_bh(&xfrm_state_gc_lock);
399 gc_list.first = xfrm_state_gc_list.first;
400 INIT_HLIST_HEAD(&xfrm_state_gc_list);
401 spin_unlock_bh(&xfrm_state_gc_lock);
402
403 hlist_for_each_entry_safe(x, entry, tmp, &gc_list, bydst)
404 xfrm_state_gc_destroy(x);
405
406 wake_up(&km_waitq);
407 }
408
409 static inline unsigned long make_jiffies(long secs)
410 {
411 if (secs >= (MAX_SCHEDULE_TIMEOUT-1)/HZ)
412 return MAX_SCHEDULE_TIMEOUT-1;
413 else
414 return secs*HZ;
415 }
416
417 static void xfrm_timer_handler(unsigned long data)
418 {
419 struct xfrm_state *x = (struct xfrm_state*)data;
420 unsigned long now = get_seconds();
421 long next = LONG_MAX;
422 int warn = 0;
423 int err = 0;
424
425 spin_lock(&x->lock);
426 if (x->km.state == XFRM_STATE_DEAD)
427 goto out;
428 if (x->km.state == XFRM_STATE_EXPIRED)
429 goto expired;
430 if (x->lft.hard_add_expires_seconds) {
431 long tmo = x->lft.hard_add_expires_seconds +
432 x->curlft.add_time - now;
433 if (tmo <= 0)
434 goto expired;
435 if (tmo < next)
436 next = tmo;
437 }
438 if (x->lft.hard_use_expires_seconds) {
439 long tmo = x->lft.hard_use_expires_seconds +
440 (x->curlft.use_time ? : now) - now;
441 if (tmo <= 0)
442 goto expired;
443 if (tmo < next)
444 next = tmo;
445 }
446 if (x->km.dying)
447 goto resched;
448 if (x->lft.soft_add_expires_seconds) {
449 long tmo = x->lft.soft_add_expires_seconds +
450 x->curlft.add_time - now;
451 if (tmo <= 0)
452 warn = 1;
453 else if (tmo < next)
454 next = tmo;
455 }
456 if (x->lft.soft_use_expires_seconds) {
457 long tmo = x->lft.soft_use_expires_seconds +
458 (x->curlft.use_time ? : now) - now;
459 if (tmo <= 0)
460 warn = 1;
461 else if (tmo < next)
462 next = tmo;
463 }
464
465 x->km.dying = warn;
466 if (warn)
467 km_state_expired(x, 0, 0);
468 resched:
469 if (next != LONG_MAX)
470 mod_timer(&x->timer, jiffies + make_jiffies(next));
471
472 goto out;
473
474 expired:
475 if (x->km.state == XFRM_STATE_ACQ && x->id.spi == 0) {
476 x->km.state = XFRM_STATE_EXPIRED;
477 wake_up(&km_waitq);
478 next = 2;
479 goto resched;
480 }
481
482 err = __xfrm_state_delete(x);
483 if (!err && x->id.spi)
484 km_state_expired(x, 1, 0);
485
486 xfrm_audit_state_delete(x, err ? 0 : 1,
487 audit_get_loginuid(current->audit_context), 0);
488
489 out:
490 spin_unlock(&x->lock);
491 }
492
493 static void xfrm_replay_timer_handler(unsigned long data);
494
495 struct xfrm_state *xfrm_state_alloc(void)
496 {
497 struct xfrm_state *x;
498
499 x = kzalloc(sizeof(struct xfrm_state), GFP_ATOMIC);
500
501 if (x) {
502 atomic_set(&x->refcnt, 1);
503 atomic_set(&x->tunnel_users, 0);
504 INIT_HLIST_NODE(&x->bydst);
505 INIT_HLIST_NODE(&x->bysrc);
506 INIT_HLIST_NODE(&x->byspi);
507 init_timer(&x->timer);
508 x->timer.function = xfrm_timer_handler;
509 x->timer.data = (unsigned long)x;
510 init_timer(&x->rtimer);
511 x->rtimer.function = xfrm_replay_timer_handler;
512 x->rtimer.data = (unsigned long)x;
513 x->curlft.add_time = get_seconds();
514 x->lft.soft_byte_limit = XFRM_INF;
515 x->lft.soft_packet_limit = XFRM_INF;
516 x->lft.hard_byte_limit = XFRM_INF;
517 x->lft.hard_packet_limit = XFRM_INF;
518 x->replay_maxage = 0;
519 x->replay_maxdiff = 0;
520 spin_lock_init(&x->lock);
521 }
522 return x;
523 }
524 EXPORT_SYMBOL(xfrm_state_alloc);
525
526 void __xfrm_state_destroy(struct xfrm_state *x)
527 {
528 BUG_TRAP(x->km.state == XFRM_STATE_DEAD);
529
530 spin_lock_bh(&xfrm_state_gc_lock);
531 hlist_add_head(&x->bydst, &xfrm_state_gc_list);
532 spin_unlock_bh(&xfrm_state_gc_lock);
533 schedule_work(&xfrm_state_gc_work);
534 }
535 EXPORT_SYMBOL(__xfrm_state_destroy);
536
537 int __xfrm_state_delete(struct xfrm_state *x)
538 {
539 int err = -ESRCH;
540
541 if (x->km.state != XFRM_STATE_DEAD) {
542 x->km.state = XFRM_STATE_DEAD;
543 spin_lock(&xfrm_state_lock);
544 hlist_del(&x->bydst);
545 hlist_del(&x->bysrc);
546 if (x->id.spi)
547 hlist_del(&x->byspi);
548 xfrm_state_num--;
549 spin_unlock(&xfrm_state_lock);
550
551 /* All xfrm_state objects are created by xfrm_state_alloc.
552 * The xfrm_state_alloc call gives a reference, and that
553 * is what we are dropping here.
554 */
555 __xfrm_state_put(x);
556 err = 0;
557 }
558
559 return err;
560 }
561 EXPORT_SYMBOL(__xfrm_state_delete);
562
563 int xfrm_state_delete(struct xfrm_state *x)
564 {
565 int err;
566
567 spin_lock_bh(&x->lock);
568 err = __xfrm_state_delete(x);
569 spin_unlock_bh(&x->lock);
570
571 return err;
572 }
573 EXPORT_SYMBOL(xfrm_state_delete);
574
575 #ifdef CONFIG_SECURITY_NETWORK_XFRM
576 static inline int
577 xfrm_state_flush_secctx_check(u8 proto, struct xfrm_audit *audit_info)
578 {
579 int i, err = 0;
580
581 for (i = 0; i <= xfrm_state_hmask; i++) {
582 struct hlist_node *entry;
583 struct xfrm_state *x;
584
585 hlist_for_each_entry(x, entry, xfrm_state_bydst+i, bydst) {
586 if (xfrm_id_proto_match(x->id.proto, proto) &&
587 (err = security_xfrm_state_delete(x)) != 0) {
588 xfrm_audit_state_delete(x, 0,
589 audit_info->loginuid,
590 audit_info->secid);
591 return err;
592 }
593 }
594 }
595
596 return err;
597 }
598 #else
599 static inline int
600 xfrm_state_flush_secctx_check(u8 proto, struct xfrm_audit *audit_info)
601 {
602 return 0;
603 }
604 #endif
605
606 int xfrm_state_flush(u8 proto, struct xfrm_audit *audit_info)
607 {
608 int i, err = 0;
609
610 spin_lock_bh(&xfrm_state_lock);
611 err = xfrm_state_flush_secctx_check(proto, audit_info);
612 if (err)
613 goto out;
614
615 for (i = 0; i <= xfrm_state_hmask; i++) {
616 struct hlist_node *entry;
617 struct xfrm_state *x;
618 restart:
619 hlist_for_each_entry(x, entry, xfrm_state_bydst+i, bydst) {
620 if (!xfrm_state_kern(x) &&
621 xfrm_id_proto_match(x->id.proto, proto)) {
622 xfrm_state_hold(x);
623 spin_unlock_bh(&xfrm_state_lock);
624
625 err = xfrm_state_delete(x);
626 xfrm_audit_state_delete(x, err ? 0 : 1,
627 audit_info->loginuid,
628 audit_info->secid);
629 xfrm_state_put(x);
630
631 spin_lock_bh(&xfrm_state_lock);
632 goto restart;
633 }
634 }
635 }
636 err = 0;
637
638 out:
639 spin_unlock_bh(&xfrm_state_lock);
640 wake_up(&km_waitq);
641 return err;
642 }
643 EXPORT_SYMBOL(xfrm_state_flush);
644
645 void xfrm_sad_getinfo(struct xfrmk_sadinfo *si)
646 {
647 spin_lock_bh(&xfrm_state_lock);
648 si->sadcnt = xfrm_state_num;
649 si->sadhcnt = xfrm_state_hmask;
650 si->sadhmcnt = xfrm_state_hashmax;
651 spin_unlock_bh(&xfrm_state_lock);
652 }
653 EXPORT_SYMBOL(xfrm_sad_getinfo);
654
655 static int
656 xfrm_init_tempsel(struct xfrm_state *x, struct flowi *fl,
657 struct xfrm_tmpl *tmpl,
658 xfrm_address_t *daddr, xfrm_address_t *saddr,
659 unsigned short family)
660 {
661 struct xfrm_state_afinfo *afinfo = xfrm_state_get_afinfo(family);
662 if (!afinfo)
663 return -1;
664 afinfo->init_tempsel(x, fl, tmpl, daddr, saddr);
665 xfrm_state_put_afinfo(afinfo);
666 return 0;
667 }
668
669 static struct xfrm_state *__xfrm_state_lookup(xfrm_address_t *daddr, __be32 spi, u8 proto, unsigned short family)
670 {
671 unsigned int h = xfrm_spi_hash(daddr, spi, proto, family);
672 struct xfrm_state *x;
673 struct hlist_node *entry;
674
675 hlist_for_each_entry(x, entry, xfrm_state_byspi+h, byspi) {
676 if (x->props.family != family ||
677 x->id.spi != spi ||
678 x->id.proto != proto)
679 continue;
680
681 switch (family) {
682 case AF_INET:
683 if (x->id.daddr.a4 != daddr->a4)
684 continue;
685 break;
686 case AF_INET6:
687 if (!ipv6_addr_equal((struct in6_addr *)daddr,
688 (struct in6_addr *)
689 x->id.daddr.a6))
690 continue;
691 break;
692 }
693
694 xfrm_state_hold(x);
695 return x;
696 }
697
698 return NULL;
699 }
700
701 static struct xfrm_state *__xfrm_state_lookup_byaddr(xfrm_address_t *daddr, xfrm_address_t *saddr, u8 proto, unsigned short family)
702 {
703 unsigned int h = xfrm_src_hash(daddr, saddr, family);
704 struct xfrm_state *x;
705 struct hlist_node *entry;
706
707 hlist_for_each_entry(x, entry, xfrm_state_bysrc+h, bysrc) {
708 if (x->props.family != family ||
709 x->id.proto != proto)
710 continue;
711
712 switch (family) {
713 case AF_INET:
714 if (x->id.daddr.a4 != daddr->a4 ||
715 x->props.saddr.a4 != saddr->a4)
716 continue;
717 break;
718 case AF_INET6:
719 if (!ipv6_addr_equal((struct in6_addr *)daddr,
720 (struct in6_addr *)
721 x->id.daddr.a6) ||
722 !ipv6_addr_equal((struct in6_addr *)saddr,
723 (struct in6_addr *)
724 x->props.saddr.a6))
725 continue;
726 break;
727 }
728
729 xfrm_state_hold(x);
730 return x;
731 }
732
733 return NULL;
734 }
735
736 static inline struct xfrm_state *
737 __xfrm_state_locate(struct xfrm_state *x, int use_spi, int family)
738 {
739 if (use_spi)
740 return __xfrm_state_lookup(&x->id.daddr, x->id.spi,
741 x->id.proto, family);
742 else
743 return __xfrm_state_lookup_byaddr(&x->id.daddr,
744 &x->props.saddr,
745 x->id.proto, family);
746 }
747
748 static void xfrm_hash_grow_check(int have_hash_collision)
749 {
750 if (have_hash_collision &&
751 (xfrm_state_hmask + 1) < xfrm_state_hashmax &&
752 xfrm_state_num > xfrm_state_hmask)
753 schedule_work(&xfrm_hash_work);
754 }
755
756 struct xfrm_state *
757 xfrm_state_find(xfrm_address_t *daddr, xfrm_address_t *saddr,
758 struct flowi *fl, struct xfrm_tmpl *tmpl,
759 struct xfrm_policy *pol, int *err,
760 unsigned short family)
761 {
762 unsigned int h = xfrm_dst_hash(daddr, saddr, tmpl->reqid, family);
763 struct hlist_node *entry;
764 struct xfrm_state *x, *x0;
765 int acquire_in_progress = 0;
766 int error = 0;
767 struct xfrm_state *best = NULL;
768
769 spin_lock_bh(&xfrm_state_lock);
770 hlist_for_each_entry(x, entry, xfrm_state_bydst+h, bydst) {
771 if (x->props.family == family &&
772 x->props.reqid == tmpl->reqid &&
773 !(x->props.flags & XFRM_STATE_WILDRECV) &&
774 xfrm_state_addr_check(x, daddr, saddr, family) &&
775 tmpl->mode == x->props.mode &&
776 tmpl->id.proto == x->id.proto &&
777 (tmpl->id.spi == x->id.spi || !tmpl->id.spi)) {
778 /* Resolution logic:
779 1. There is a valid state with matching selector.
780 Done.
781 2. Valid state with inappropriate selector. Skip.
782
783 Entering area of "sysdeps".
784
785 3. If state is not valid, selector is temporary,
786 it selects only session which triggered
787 previous resolution. Key manager will do
788 something to install a state with proper
789 selector.
790 */
791 if (x->km.state == XFRM_STATE_VALID) {
792 if (!xfrm_selector_match(&x->sel, fl, x->sel.family) ||
793 !security_xfrm_state_pol_flow_match(x, pol, fl))
794 continue;
795 if (!best ||
796 best->km.dying > x->km.dying ||
797 (best->km.dying == x->km.dying &&
798 best->curlft.add_time < x->curlft.add_time))
799 best = x;
800 } else if (x->km.state == XFRM_STATE_ACQ) {
801 acquire_in_progress = 1;
802 } else if (x->km.state == XFRM_STATE_ERROR ||
803 x->km.state == XFRM_STATE_EXPIRED) {
804 if (xfrm_selector_match(&x->sel, fl, x->sel.family) &&
805 security_xfrm_state_pol_flow_match(x, pol, fl))
806 error = -ESRCH;
807 }
808 }
809 }
810
811 x = best;
812 if (!x && !error && !acquire_in_progress) {
813 if (tmpl->id.spi &&
814 (x0 = __xfrm_state_lookup(daddr, tmpl->id.spi,
815 tmpl->id.proto, family)) != NULL) {
816 xfrm_state_put(x0);
817 error = -EEXIST;
818 goto out;
819 }
820 x = xfrm_state_alloc();
821 if (x == NULL) {
822 error = -ENOMEM;
823 goto out;
824 }
825 /* Initialize temporary selector matching only
826 * to current session. */
827 xfrm_init_tempsel(x, fl, tmpl, daddr, saddr, family);
828
829 error = security_xfrm_state_alloc_acquire(x, pol->security, fl->secid);
830 if (error) {
831 x->km.state = XFRM_STATE_DEAD;
832 xfrm_state_put(x);
833 x = NULL;
834 goto out;
835 }
836
837 if (km_query(x, tmpl, pol) == 0) {
838 x->km.state = XFRM_STATE_ACQ;
839 hlist_add_head(&x->bydst, xfrm_state_bydst+h);
840 h = xfrm_src_hash(daddr, saddr, family);
841 hlist_add_head(&x->bysrc, xfrm_state_bysrc+h);
842 if (x->id.spi) {
843 h = xfrm_spi_hash(&x->id.daddr, x->id.spi, x->id.proto, family);
844 hlist_add_head(&x->byspi, xfrm_state_byspi+h);
845 }
846 x->lft.hard_add_expires_seconds = sysctl_xfrm_acq_expires;
847 x->timer.expires = jiffies + sysctl_xfrm_acq_expires*HZ;
848 add_timer(&x->timer);
849 xfrm_state_num++;
850 xfrm_hash_grow_check(x->bydst.next != NULL);
851 } else {
852 x->km.state = XFRM_STATE_DEAD;
853 xfrm_state_put(x);
854 x = NULL;
855 error = -ESRCH;
856 }
857 }
858 out:
859 if (x)
860 xfrm_state_hold(x);
861 else
862 *err = acquire_in_progress ? -EAGAIN : error;
863 spin_unlock_bh(&xfrm_state_lock);
864 return x;
865 }
866
867 struct xfrm_state *
868 xfrm_stateonly_find(xfrm_address_t *daddr, xfrm_address_t *saddr,
869 unsigned short family, u8 mode, u8 proto, u32 reqid)
870 {
871 unsigned int h = xfrm_dst_hash(daddr, saddr, reqid, family);
872 struct xfrm_state *rx = NULL, *x = NULL;
873 struct hlist_node *entry;
874
875 spin_lock(&xfrm_state_lock);
876 hlist_for_each_entry(x, entry, xfrm_state_bydst+h, bydst) {
877 if (x->props.family == family &&
878 x->props.reqid == reqid &&
879 !(x->props.flags & XFRM_STATE_WILDRECV) &&
880 xfrm_state_addr_check(x, daddr, saddr, family) &&
881 mode == x->props.mode &&
882 proto == x->id.proto &&
883 x->km.state == XFRM_STATE_VALID) {
884 rx = x;
885 break;
886 }
887 }
888
889 if (rx)
890 xfrm_state_hold(rx);
891 spin_unlock(&xfrm_state_lock);
892
893
894 return rx;
895 }
896 EXPORT_SYMBOL(xfrm_stateonly_find);
897
898 static void __xfrm_state_insert(struct xfrm_state *x)
899 {
900 unsigned int h;
901
902 x->genid = ++xfrm_state_genid;
903
904 h = xfrm_dst_hash(&x->id.daddr, &x->props.saddr,
905 x->props.reqid, x->props.family);
906 hlist_add_head(&x->bydst, xfrm_state_bydst+h);
907
908 h = xfrm_src_hash(&x->id.daddr, &x->props.saddr, x->props.family);
909 hlist_add_head(&x->bysrc, xfrm_state_bysrc+h);
910
911 if (x->id.spi) {
912 h = xfrm_spi_hash(&x->id.daddr, x->id.spi, x->id.proto,
913 x->props.family);
914
915 hlist_add_head(&x->byspi, xfrm_state_byspi+h);
916 }
917
918 mod_timer(&x->timer, jiffies + HZ);
919 if (x->replay_maxage)
920 mod_timer(&x->rtimer, jiffies + x->replay_maxage);
921
922 wake_up(&km_waitq);
923
924 xfrm_state_num++;
925
926 xfrm_hash_grow_check(x->bydst.next != NULL);
927 }
928
929 /* xfrm_state_lock is held */
930 static void __xfrm_state_bump_genids(struct xfrm_state *xnew)
931 {
932 unsigned short family = xnew->props.family;
933 u32 reqid = xnew->props.reqid;
934 struct xfrm_state *x;
935 struct hlist_node *entry;
936 unsigned int h;
937
938 h = xfrm_dst_hash(&xnew->id.daddr, &xnew->props.saddr, reqid, family);
939 hlist_for_each_entry(x, entry, xfrm_state_bydst+h, bydst) {
940 if (x->props.family == family &&
941 x->props.reqid == reqid &&
942 !xfrm_addr_cmp(&x->id.daddr, &xnew->id.daddr, family) &&
943 !xfrm_addr_cmp(&x->props.saddr, &xnew->props.saddr, family))
944 x->genid = xfrm_state_genid;
945 }
946 }
947
948 void xfrm_state_insert(struct xfrm_state *x)
949 {
950 spin_lock_bh(&xfrm_state_lock);
951 __xfrm_state_bump_genids(x);
952 __xfrm_state_insert(x);
953 spin_unlock_bh(&xfrm_state_lock);
954 }
955 EXPORT_SYMBOL(xfrm_state_insert);
956
957 /* xfrm_state_lock is held */
958 static struct xfrm_state *__find_acq_core(unsigned short family, u8 mode, u32 reqid, u8 proto, xfrm_address_t *daddr, xfrm_address_t *saddr, int create)
959 {
960 unsigned int h = xfrm_dst_hash(daddr, saddr, reqid, family);
961 struct hlist_node *entry;
962 struct xfrm_state *x;
963
964 hlist_for_each_entry(x, entry, xfrm_state_bydst+h, bydst) {
965 if (x->props.reqid != reqid ||
966 x->props.mode != mode ||
967 x->props.family != family ||
968 x->km.state != XFRM_STATE_ACQ ||
969 x->id.spi != 0 ||
970 x->id.proto != proto)
971 continue;
972
973 switch (family) {
974 case AF_INET:
975 if (x->id.daddr.a4 != daddr->a4 ||
976 x->props.saddr.a4 != saddr->a4)
977 continue;
978 break;
979 case AF_INET6:
980 if (!ipv6_addr_equal((struct in6_addr *)x->id.daddr.a6,
981 (struct in6_addr *)daddr) ||
982 !ipv6_addr_equal((struct in6_addr *)
983 x->props.saddr.a6,
984 (struct in6_addr *)saddr))
985 continue;
986 break;
987 }
988
989 xfrm_state_hold(x);
990 return x;
991 }
992
993 if (!create)
994 return NULL;
995
996 x = xfrm_state_alloc();
997 if (likely(x)) {
998 switch (family) {
999 case AF_INET:
1000 x->sel.daddr.a4 = daddr->a4;
1001 x->sel.saddr.a4 = saddr->a4;
1002 x->sel.prefixlen_d = 32;
1003 x->sel.prefixlen_s = 32;
1004 x->props.saddr.a4 = saddr->a4;
1005 x->id.daddr.a4 = daddr->a4;
1006 break;
1007
1008 case AF_INET6:
1009 ipv6_addr_copy((struct in6_addr *)x->sel.daddr.a6,
1010 (struct in6_addr *)daddr);
1011 ipv6_addr_copy((struct in6_addr *)x->sel.saddr.a6,
1012 (struct in6_addr *)saddr);
1013 x->sel.prefixlen_d = 128;
1014 x->sel.prefixlen_s = 128;
1015 ipv6_addr_copy((struct in6_addr *)x->props.saddr.a6,
1016 (struct in6_addr *)saddr);
1017 ipv6_addr_copy((struct in6_addr *)x->id.daddr.a6,
1018 (struct in6_addr *)daddr);
1019 break;
1020 }
1021
1022 x->km.state = XFRM_STATE_ACQ;
1023 x->id.proto = proto;
1024 x->props.family = family;
1025 x->props.mode = mode;
1026 x->props.reqid = reqid;
1027 x->lft.hard_add_expires_seconds = sysctl_xfrm_acq_expires;
1028 xfrm_state_hold(x);
1029 x->timer.expires = jiffies + sysctl_xfrm_acq_expires*HZ;
1030 add_timer(&x->timer);
1031 hlist_add_head(&x->bydst, xfrm_state_bydst+h);
1032 h = xfrm_src_hash(daddr, saddr, family);
1033 hlist_add_head(&x->bysrc, xfrm_state_bysrc+h);
1034
1035 xfrm_state_num++;
1036
1037 xfrm_hash_grow_check(x->bydst.next != NULL);
1038 }
1039
1040 return x;
1041 }
1042
1043 static struct xfrm_state *__xfrm_find_acq_byseq(u32 seq);
1044
1045 int xfrm_state_add(struct xfrm_state *x)
1046 {
1047 struct xfrm_state *x1;
1048 int family;
1049 int err;
1050 int use_spi = xfrm_id_proto_match(x->id.proto, IPSEC_PROTO_ANY);
1051
1052 family = x->props.family;
1053
1054 spin_lock_bh(&xfrm_state_lock);
1055
1056 x1 = __xfrm_state_locate(x, use_spi, family);
1057 if (x1) {
1058 xfrm_state_put(x1);
1059 x1 = NULL;
1060 err = -EEXIST;
1061 goto out;
1062 }
1063
1064 if (use_spi && x->km.seq) {
1065 x1 = __xfrm_find_acq_byseq(x->km.seq);
1066 if (x1 && ((x1->id.proto != x->id.proto) ||
1067 xfrm_addr_cmp(&x1->id.daddr, &x->id.daddr, family))) {
1068 xfrm_state_put(x1);
1069 x1 = NULL;
1070 }
1071 }
1072
1073 if (use_spi && !x1)
1074 x1 = __find_acq_core(family, x->props.mode, x->props.reqid,
1075 x->id.proto,
1076 &x->id.daddr, &x->props.saddr, 0);
1077
1078 __xfrm_state_bump_genids(x);
1079 __xfrm_state_insert(x);
1080 err = 0;
1081
1082 out:
1083 spin_unlock_bh(&xfrm_state_lock);
1084
1085 if (x1) {
1086 xfrm_state_delete(x1);
1087 xfrm_state_put(x1);
1088 }
1089
1090 return err;
1091 }
1092 EXPORT_SYMBOL(xfrm_state_add);
1093
1094 #ifdef CONFIG_XFRM_MIGRATE
1095 struct xfrm_state *xfrm_state_clone(struct xfrm_state *orig, int *errp)
1096 {
1097 int err = -ENOMEM;
1098 struct xfrm_state *x = xfrm_state_alloc();
1099 if (!x)
1100 goto error;
1101
1102 memcpy(&x->id, &orig->id, sizeof(x->id));
1103 memcpy(&x->sel, &orig->sel, sizeof(x->sel));
1104 memcpy(&x->lft, &orig->lft, sizeof(x->lft));
1105 x->props.mode = orig->props.mode;
1106 x->props.replay_window = orig->props.replay_window;
1107 x->props.reqid = orig->props.reqid;
1108 x->props.family = orig->props.family;
1109 x->props.saddr = orig->props.saddr;
1110
1111 if (orig->aalg) {
1112 x->aalg = xfrm_algo_clone(orig->aalg);
1113 if (!x->aalg)
1114 goto error;
1115 }
1116 x->props.aalgo = orig->props.aalgo;
1117
1118 if (orig->ealg) {
1119 x->ealg = xfrm_algo_clone(orig->ealg);
1120 if (!x->ealg)
1121 goto error;
1122 }
1123 x->props.ealgo = orig->props.ealgo;
1124
1125 if (orig->calg) {
1126 x->calg = xfrm_algo_clone(orig->calg);
1127 if (!x->calg)
1128 goto error;
1129 }
1130 x->props.calgo = orig->props.calgo;
1131
1132 if (orig->encap) {
1133 x->encap = kmemdup(orig->encap, sizeof(*x->encap), GFP_KERNEL);
1134 if (!x->encap)
1135 goto error;
1136 }
1137
1138 if (orig->coaddr) {
1139 x->coaddr = kmemdup(orig->coaddr, sizeof(*x->coaddr),
1140 GFP_KERNEL);
1141 if (!x->coaddr)
1142 goto error;
1143 }
1144
1145 err = xfrm_init_state(x);
1146 if (err)
1147 goto error;
1148
1149 x->props.flags = orig->props.flags;
1150
1151 x->curlft.add_time = orig->curlft.add_time;
1152 x->km.state = orig->km.state;
1153 x->km.seq = orig->km.seq;
1154
1155 return x;
1156
1157 error:
1158 if (errp)
1159 *errp = err;
1160 if (x) {
1161 kfree(x->aalg);
1162 kfree(x->ealg);
1163 kfree(x->calg);
1164 kfree(x->encap);
1165 kfree(x->coaddr);
1166 }
1167 kfree(x);
1168 return NULL;
1169 }
1170 EXPORT_SYMBOL(xfrm_state_clone);
1171
1172 /* xfrm_state_lock is held */
1173 struct xfrm_state * xfrm_migrate_state_find(struct xfrm_migrate *m)
1174 {
1175 unsigned int h;
1176 struct xfrm_state *x;
1177 struct hlist_node *entry;
1178
1179 if (m->reqid) {
1180 h = xfrm_dst_hash(&m->old_daddr, &m->old_saddr,
1181 m->reqid, m->old_family);
1182 hlist_for_each_entry(x, entry, xfrm_state_bydst+h, bydst) {
1183 if (x->props.mode != m->mode ||
1184 x->id.proto != m->proto)
1185 continue;
1186 if (m->reqid && x->props.reqid != m->reqid)
1187 continue;
1188 if (xfrm_addr_cmp(&x->id.daddr, &m->old_daddr,
1189 m->old_family) ||
1190 xfrm_addr_cmp(&x->props.saddr, &m->old_saddr,
1191 m->old_family))
1192 continue;
1193 xfrm_state_hold(x);
1194 return x;
1195 }
1196 } else {
1197 h = xfrm_src_hash(&m->old_daddr, &m->old_saddr,
1198 m->old_family);
1199 hlist_for_each_entry(x, entry, xfrm_state_bysrc+h, bysrc) {
1200 if (x->props.mode != m->mode ||
1201 x->id.proto != m->proto)
1202 continue;
1203 if (xfrm_addr_cmp(&x->id.daddr, &m->old_daddr,
1204 m->old_family) ||
1205 xfrm_addr_cmp(&x->props.saddr, &m->old_saddr,
1206 m->old_family))
1207 continue;
1208 xfrm_state_hold(x);
1209 return x;
1210 }
1211 }
1212
1213 return NULL;
1214 }
1215 EXPORT_SYMBOL(xfrm_migrate_state_find);
1216
1217 struct xfrm_state * xfrm_state_migrate(struct xfrm_state *x,
1218 struct xfrm_migrate *m)
1219 {
1220 struct xfrm_state *xc;
1221 int err;
1222
1223 xc = xfrm_state_clone(x, &err);
1224 if (!xc)
1225 return NULL;
1226
1227 memcpy(&xc->id.daddr, &m->new_daddr, sizeof(xc->id.daddr));
1228 memcpy(&xc->props.saddr, &m->new_saddr, sizeof(xc->props.saddr));
1229
1230 /* add state */
1231 if (!xfrm_addr_cmp(&x->id.daddr, &m->new_daddr, m->new_family)) {
1232 /* a care is needed when the destination address of the
1233 state is to be updated as it is a part of triplet */
1234 xfrm_state_insert(xc);
1235 } else {
1236 if ((err = xfrm_state_add(xc)) < 0)
1237 goto error;
1238 }
1239
1240 return xc;
1241 error:
1242 kfree(xc);
1243 return NULL;
1244 }
1245 EXPORT_SYMBOL(xfrm_state_migrate);
1246 #endif
1247
1248 int xfrm_state_update(struct xfrm_state *x)
1249 {
1250 struct xfrm_state *x1;
1251 int err;
1252 int use_spi = xfrm_id_proto_match(x->id.proto, IPSEC_PROTO_ANY);
1253
1254 spin_lock_bh(&xfrm_state_lock);
1255 x1 = __xfrm_state_locate(x, use_spi, x->props.family);
1256
1257 err = -ESRCH;
1258 if (!x1)
1259 goto out;
1260
1261 if (xfrm_state_kern(x1)) {
1262 xfrm_state_put(x1);
1263 err = -EEXIST;
1264 goto out;
1265 }
1266
1267 if (x1->km.state == XFRM_STATE_ACQ) {
1268 __xfrm_state_insert(x);
1269 x = NULL;
1270 }
1271 err = 0;
1272
1273 out:
1274 spin_unlock_bh(&xfrm_state_lock);
1275
1276 if (err)
1277 return err;
1278
1279 if (!x) {
1280 xfrm_state_delete(x1);
1281 xfrm_state_put(x1);
1282 return 0;
1283 }
1284
1285 err = -EINVAL;
1286 spin_lock_bh(&x1->lock);
1287 if (likely(x1->km.state == XFRM_STATE_VALID)) {
1288 if (x->encap && x1->encap)
1289 memcpy(x1->encap, x->encap, sizeof(*x1->encap));
1290 if (x->coaddr && x1->coaddr) {
1291 memcpy(x1->coaddr, x->coaddr, sizeof(*x1->coaddr));
1292 }
1293 if (!use_spi && memcmp(&x1->sel, &x->sel, sizeof(x1->sel)))
1294 memcpy(&x1->sel, &x->sel, sizeof(x1->sel));
1295 memcpy(&x1->lft, &x->lft, sizeof(x1->lft));
1296 x1->km.dying = 0;
1297
1298 mod_timer(&x1->timer, jiffies + HZ);
1299 if (x1->curlft.use_time)
1300 xfrm_state_check_expire(x1);
1301
1302 err = 0;
1303 }
1304 spin_unlock_bh(&x1->lock);
1305
1306 xfrm_state_put(x1);
1307
1308 return err;
1309 }
1310 EXPORT_SYMBOL(xfrm_state_update);
1311
1312 int xfrm_state_check_expire(struct xfrm_state *x)
1313 {
1314 if (!x->curlft.use_time)
1315 x->curlft.use_time = get_seconds();
1316
1317 if (x->km.state != XFRM_STATE_VALID)
1318 return -EINVAL;
1319
1320 if (x->curlft.bytes >= x->lft.hard_byte_limit ||
1321 x->curlft.packets >= x->lft.hard_packet_limit) {
1322 x->km.state = XFRM_STATE_EXPIRED;
1323 mod_timer(&x->timer, jiffies);
1324 return -EINVAL;
1325 }
1326
1327 if (!x->km.dying &&
1328 (x->curlft.bytes >= x->lft.soft_byte_limit ||
1329 x->curlft.packets >= x->lft.soft_packet_limit)) {
1330 x->km.dying = 1;
1331 km_state_expired(x, 0, 0);
1332 }
1333 return 0;
1334 }
1335 EXPORT_SYMBOL(xfrm_state_check_expire);
1336
1337 struct xfrm_state *
1338 xfrm_state_lookup(xfrm_address_t *daddr, __be32 spi, u8 proto,
1339 unsigned short family)
1340 {
1341 struct xfrm_state *x;
1342
1343 spin_lock_bh(&xfrm_state_lock);
1344 x = __xfrm_state_lookup(daddr, spi, proto, family);
1345 spin_unlock_bh(&xfrm_state_lock);
1346 return x;
1347 }
1348 EXPORT_SYMBOL(xfrm_state_lookup);
1349
1350 struct xfrm_state *
1351 xfrm_state_lookup_byaddr(xfrm_address_t *daddr, xfrm_address_t *saddr,
1352 u8 proto, unsigned short family)
1353 {
1354 struct xfrm_state *x;
1355
1356 spin_lock_bh(&xfrm_state_lock);
1357 x = __xfrm_state_lookup_byaddr(daddr, saddr, proto, family);
1358 spin_unlock_bh(&xfrm_state_lock);
1359 return x;
1360 }
1361 EXPORT_SYMBOL(xfrm_state_lookup_byaddr);
1362
1363 struct xfrm_state *
1364 xfrm_find_acq(u8 mode, u32 reqid, u8 proto,
1365 xfrm_address_t *daddr, xfrm_address_t *saddr,
1366 int create, unsigned short family)
1367 {
1368 struct xfrm_state *x;
1369
1370 spin_lock_bh(&xfrm_state_lock);
1371 x = __find_acq_core(family, mode, reqid, proto, daddr, saddr, create);
1372 spin_unlock_bh(&xfrm_state_lock);
1373
1374 return x;
1375 }
1376 EXPORT_SYMBOL(xfrm_find_acq);
1377
1378 #ifdef CONFIG_XFRM_SUB_POLICY
1379 int
1380 xfrm_tmpl_sort(struct xfrm_tmpl **dst, struct xfrm_tmpl **src, int n,
1381 unsigned short family)
1382 {
1383 int err = 0;
1384 struct xfrm_state_afinfo *afinfo = xfrm_state_get_afinfo(family);
1385 if (!afinfo)
1386 return -EAFNOSUPPORT;
1387
1388 spin_lock_bh(&xfrm_state_lock);
1389 if (afinfo->tmpl_sort)
1390 err = afinfo->tmpl_sort(dst, src, n);
1391 spin_unlock_bh(&xfrm_state_lock);
1392 xfrm_state_put_afinfo(afinfo);
1393 return err;
1394 }
1395 EXPORT_SYMBOL(xfrm_tmpl_sort);
1396
1397 int
1398 xfrm_state_sort(struct xfrm_state **dst, struct xfrm_state **src, int n,
1399 unsigned short family)
1400 {
1401 int err = 0;
1402 struct xfrm_state_afinfo *afinfo = xfrm_state_get_afinfo(family);
1403 if (!afinfo)
1404 return -EAFNOSUPPORT;
1405
1406 spin_lock_bh(&xfrm_state_lock);
1407 if (afinfo->state_sort)
1408 err = afinfo->state_sort(dst, src, n);
1409 spin_unlock_bh(&xfrm_state_lock);
1410 xfrm_state_put_afinfo(afinfo);
1411 return err;
1412 }
1413 EXPORT_SYMBOL(xfrm_state_sort);
1414 #endif
1415
1416 /* Silly enough, but I'm lazy to build resolution list */
1417
1418 static struct xfrm_state *__xfrm_find_acq_byseq(u32 seq)
1419 {
1420 int i;
1421
1422 for (i = 0; i <= xfrm_state_hmask; i++) {
1423 struct hlist_node *entry;
1424 struct xfrm_state *x;
1425
1426 hlist_for_each_entry(x, entry, xfrm_state_bydst+i, bydst) {
1427 if (x->km.seq == seq &&
1428 x->km.state == XFRM_STATE_ACQ) {
1429 xfrm_state_hold(x);
1430 return x;
1431 }
1432 }
1433 }
1434 return NULL;
1435 }
1436
1437 struct xfrm_state *xfrm_find_acq_byseq(u32 seq)
1438 {
1439 struct xfrm_state *x;
1440
1441 spin_lock_bh(&xfrm_state_lock);
1442 x = __xfrm_find_acq_byseq(seq);
1443 spin_unlock_bh(&xfrm_state_lock);
1444 return x;
1445 }
1446 EXPORT_SYMBOL(xfrm_find_acq_byseq);
1447
1448 u32 xfrm_get_acqseq(void)
1449 {
1450 u32 res;
1451 static u32 acqseq;
1452 static DEFINE_SPINLOCK(acqseq_lock);
1453
1454 spin_lock_bh(&acqseq_lock);
1455 res = (++acqseq ? : ++acqseq);
1456 spin_unlock_bh(&acqseq_lock);
1457 return res;
1458 }
1459 EXPORT_SYMBOL(xfrm_get_acqseq);
1460
1461 int xfrm_alloc_spi(struct xfrm_state *x, u32 low, u32 high)
1462 {
1463 unsigned int h;
1464 struct xfrm_state *x0;
1465 int err = -ENOENT;
1466 __be32 minspi = htonl(low);
1467 __be32 maxspi = htonl(high);
1468
1469 spin_lock_bh(&x->lock);
1470 if (x->km.state == XFRM_STATE_DEAD)
1471 goto unlock;
1472
1473 err = 0;
1474 if (x->id.spi)
1475 goto unlock;
1476
1477 err = -ENOENT;
1478
1479 if (minspi == maxspi) {
1480 x0 = xfrm_state_lookup(&x->id.daddr, minspi, x->id.proto, x->props.family);
1481 if (x0) {
1482 xfrm_state_put(x0);
1483 goto unlock;
1484 }
1485 x->id.spi = minspi;
1486 } else {
1487 u32 spi = 0;
1488 for (h=0; h<high-low+1; h++) {
1489 spi = low + net_random()%(high-low+1);
1490 x0 = xfrm_state_lookup(&x->id.daddr, htonl(spi), x->id.proto, x->props.family);
1491 if (x0 == NULL) {
1492 x->id.spi = htonl(spi);
1493 break;
1494 }
1495 xfrm_state_put(x0);
1496 }
1497 }
1498 if (x->id.spi) {
1499 spin_lock_bh(&xfrm_state_lock);
1500 h = xfrm_spi_hash(&x->id.daddr, x->id.spi, x->id.proto, x->props.family);
1501 hlist_add_head(&x->byspi, xfrm_state_byspi+h);
1502 spin_unlock_bh(&xfrm_state_lock);
1503
1504 err = 0;
1505 }
1506
1507 unlock:
1508 spin_unlock_bh(&x->lock);
1509
1510 return err;
1511 }
1512 EXPORT_SYMBOL(xfrm_alloc_spi);
1513
1514 int xfrm_state_walk(u8 proto, int (*func)(struct xfrm_state *, int, void*),
1515 void *data)
1516 {
1517 int i;
1518 struct xfrm_state *x, *last = NULL;
1519 struct hlist_node *entry;
1520 int count = 0;
1521 int err = 0;
1522
1523 spin_lock_bh(&xfrm_state_lock);
1524 for (i = 0; i <= xfrm_state_hmask; i++) {
1525 hlist_for_each_entry(x, entry, xfrm_state_bydst+i, bydst) {
1526 if (!xfrm_id_proto_match(x->id.proto, proto))
1527 continue;
1528 if (last) {
1529 err = func(last, count, data);
1530 if (err)
1531 goto out;
1532 }
1533 last = x;
1534 count++;
1535 }
1536 }
1537 if (count == 0) {
1538 err = -ENOENT;
1539 goto out;
1540 }
1541 err = func(last, 0, data);
1542 out:
1543 spin_unlock_bh(&xfrm_state_lock);
1544 return err;
1545 }
1546 EXPORT_SYMBOL(xfrm_state_walk);
1547
1548
1549 void xfrm_replay_notify(struct xfrm_state *x, int event)
1550 {
1551 struct km_event c;
1552 /* we send notify messages in case
1553 * 1. we updated on of the sequence numbers, and the seqno difference
1554 * is at least x->replay_maxdiff, in this case we also update the
1555 * timeout of our timer function
1556 * 2. if x->replay_maxage has elapsed since last update,
1557 * and there were changes
1558 *
1559 * The state structure must be locked!
1560 */
1561
1562 switch (event) {
1563 case XFRM_REPLAY_UPDATE:
1564 if (x->replay_maxdiff &&
1565 (x->replay.seq - x->preplay.seq < x->replay_maxdiff) &&
1566 (x->replay.oseq - x->preplay.oseq < x->replay_maxdiff)) {
1567 if (x->xflags & XFRM_TIME_DEFER)
1568 event = XFRM_REPLAY_TIMEOUT;
1569 else
1570 return;
1571 }
1572
1573 break;
1574
1575 case XFRM_REPLAY_TIMEOUT:
1576 if ((x->replay.seq == x->preplay.seq) &&
1577 (x->replay.bitmap == x->preplay.bitmap) &&
1578 (x->replay.oseq == x->preplay.oseq)) {
1579 x->xflags |= XFRM_TIME_DEFER;
1580 return;
1581 }
1582
1583 break;
1584 }
1585
1586 memcpy(&x->preplay, &x->replay, sizeof(struct xfrm_replay_state));
1587 c.event = XFRM_MSG_NEWAE;
1588 c.data.aevent = event;
1589 km_state_notify(x, &c);
1590
1591 if (x->replay_maxage &&
1592 !mod_timer(&x->rtimer, jiffies + x->replay_maxage))
1593 x->xflags &= ~XFRM_TIME_DEFER;
1594 }
1595
1596 static void xfrm_replay_timer_handler(unsigned long data)
1597 {
1598 struct xfrm_state *x = (struct xfrm_state*)data;
1599
1600 spin_lock(&x->lock);
1601
1602 if (x->km.state == XFRM_STATE_VALID) {
1603 if (xfrm_aevent_is_on())
1604 xfrm_replay_notify(x, XFRM_REPLAY_TIMEOUT);
1605 else
1606 x->xflags |= XFRM_TIME_DEFER;
1607 }
1608
1609 spin_unlock(&x->lock);
1610 }
1611
1612 int xfrm_replay_check(struct xfrm_state *x, __be32 net_seq)
1613 {
1614 u32 diff;
1615 u32 seq = ntohl(net_seq);
1616
1617 if (unlikely(seq == 0))
1618 return -EINVAL;
1619
1620 if (likely(seq > x->replay.seq))
1621 return 0;
1622
1623 diff = x->replay.seq - seq;
1624 if (diff >= min_t(unsigned int, x->props.replay_window,
1625 sizeof(x->replay.bitmap) * 8)) {
1626 x->stats.replay_window++;
1627 return -EINVAL;
1628 }
1629
1630 if (x->replay.bitmap & (1U << diff)) {
1631 x->stats.replay++;
1632 return -EINVAL;
1633 }
1634 return 0;
1635 }
1636 EXPORT_SYMBOL(xfrm_replay_check);
1637
1638 void xfrm_replay_advance(struct xfrm_state *x, __be32 net_seq)
1639 {
1640 u32 diff;
1641 u32 seq = ntohl(net_seq);
1642
1643 if (seq > x->replay.seq) {
1644 diff = seq - x->replay.seq;
1645 if (diff < x->props.replay_window)
1646 x->replay.bitmap = ((x->replay.bitmap) << diff) | 1;
1647 else
1648 x->replay.bitmap = 1;
1649 x->replay.seq = seq;
1650 } else {
1651 diff = x->replay.seq - seq;
1652 x->replay.bitmap |= (1U << diff);
1653 }
1654
1655 if (xfrm_aevent_is_on())
1656 xfrm_replay_notify(x, XFRM_REPLAY_UPDATE);
1657 }
1658 EXPORT_SYMBOL(xfrm_replay_advance);
1659
1660 static struct list_head xfrm_km_list = LIST_HEAD_INIT(xfrm_km_list);
1661 static DEFINE_RWLOCK(xfrm_km_lock);
1662
1663 void km_policy_notify(struct xfrm_policy *xp, int dir, struct km_event *c)
1664 {
1665 struct xfrm_mgr *km;
1666
1667 read_lock(&xfrm_km_lock);
1668 list_for_each_entry(km, &xfrm_km_list, list)
1669 if (km->notify_policy)
1670 km->notify_policy(xp, dir, c);
1671 read_unlock(&xfrm_km_lock);
1672 }
1673
1674 void km_state_notify(struct xfrm_state *x, struct km_event *c)
1675 {
1676 struct xfrm_mgr *km;
1677 read_lock(&xfrm_km_lock);
1678 list_for_each_entry(km, &xfrm_km_list, list)
1679 if (km->notify)
1680 km->notify(x, c);
1681 read_unlock(&xfrm_km_lock);
1682 }
1683
1684 EXPORT_SYMBOL(km_policy_notify);
1685 EXPORT_SYMBOL(km_state_notify);
1686
1687 void km_state_expired(struct xfrm_state *x, int hard, u32 pid)
1688 {
1689 struct km_event c;
1690
1691 c.data.hard = hard;
1692 c.pid = pid;
1693 c.event = XFRM_MSG_EXPIRE;
1694 km_state_notify(x, &c);
1695
1696 if (hard)
1697 wake_up(&km_waitq);
1698 }
1699
1700 EXPORT_SYMBOL(km_state_expired);
1701 /*
1702 * We send to all registered managers regardless of failure
1703 * We are happy with one success
1704 */
1705 int km_query(struct xfrm_state *x, struct xfrm_tmpl *t, struct xfrm_policy *pol)
1706 {
1707 int err = -EINVAL, acqret;
1708 struct xfrm_mgr *km;
1709
1710 read_lock(&xfrm_km_lock);
1711 list_for_each_entry(km, &xfrm_km_list, list) {
1712 acqret = km->acquire(x, t, pol, XFRM_POLICY_OUT);
1713 if (!acqret)
1714 err = acqret;
1715 }
1716 read_unlock(&xfrm_km_lock);
1717 return err;
1718 }
1719 EXPORT_SYMBOL(km_query);
1720
1721 int km_new_mapping(struct xfrm_state *x, xfrm_address_t *ipaddr, __be16 sport)
1722 {
1723 int err = -EINVAL;
1724 struct xfrm_mgr *km;
1725
1726 read_lock(&xfrm_km_lock);
1727 list_for_each_entry(km, &xfrm_km_list, list) {
1728 if (km->new_mapping)
1729 err = km->new_mapping(x, ipaddr, sport);
1730 if (!err)
1731 break;
1732 }
1733 read_unlock(&xfrm_km_lock);
1734 return err;
1735 }
1736 EXPORT_SYMBOL(km_new_mapping);
1737
1738 void km_policy_expired(struct xfrm_policy *pol, int dir, int hard, u32 pid)
1739 {
1740 struct km_event c;
1741
1742 c.data.hard = hard;
1743 c.pid = pid;
1744 c.event = XFRM_MSG_POLEXPIRE;
1745 km_policy_notify(pol, dir, &c);
1746
1747 if (hard)
1748 wake_up(&km_waitq);
1749 }
1750 EXPORT_SYMBOL(km_policy_expired);
1751
1752 int km_migrate(struct xfrm_selector *sel, u8 dir, u8 type,
1753 struct xfrm_migrate *m, int num_migrate)
1754 {
1755 int err = -EINVAL;
1756 int ret;
1757 struct xfrm_mgr *km;
1758
1759 read_lock(&xfrm_km_lock);
1760 list_for_each_entry(km, &xfrm_km_list, list) {
1761 if (km->migrate) {
1762 ret = km->migrate(sel, dir, type, m, num_migrate);
1763 if (!ret)
1764 err = ret;
1765 }
1766 }
1767 read_unlock(&xfrm_km_lock);
1768 return err;
1769 }
1770 EXPORT_SYMBOL(km_migrate);
1771
1772 int km_report(u8 proto, struct xfrm_selector *sel, xfrm_address_t *addr)
1773 {
1774 int err = -EINVAL;
1775 int ret;
1776 struct xfrm_mgr *km;
1777
1778 read_lock(&xfrm_km_lock);
1779 list_for_each_entry(km, &xfrm_km_list, list) {
1780 if (km->report) {
1781 ret = km->report(proto, sel, addr);
1782 if (!ret)
1783 err = ret;
1784 }
1785 }
1786 read_unlock(&xfrm_km_lock);
1787 return err;
1788 }
1789 EXPORT_SYMBOL(km_report);
1790
1791 int xfrm_user_policy(struct sock *sk, int optname, u8 __user *optval, int optlen)
1792 {
1793 int err;
1794 u8 *data;
1795 struct xfrm_mgr *km;
1796 struct xfrm_policy *pol = NULL;
1797
1798 if (optlen <= 0 || optlen > PAGE_SIZE)
1799 return -EMSGSIZE;
1800
1801 data = kmalloc(optlen, GFP_KERNEL);
1802 if (!data)
1803 return -ENOMEM;
1804
1805 err = -EFAULT;
1806 if (copy_from_user(data, optval, optlen))
1807 goto out;
1808
1809 err = -EINVAL;
1810 read_lock(&xfrm_km_lock);
1811 list_for_each_entry(km, &xfrm_km_list, list) {
1812 pol = km->compile_policy(sk, optname, data,
1813 optlen, &err);
1814 if (err >= 0)
1815 break;
1816 }
1817 read_unlock(&xfrm_km_lock);
1818
1819 if (err >= 0) {
1820 xfrm_sk_policy_insert(sk, err, pol);
1821 xfrm_pol_put(pol);
1822 err = 0;
1823 }
1824
1825 out:
1826 kfree(data);
1827 return err;
1828 }
1829 EXPORT_SYMBOL(xfrm_user_policy);
1830
1831 int xfrm_register_km(struct xfrm_mgr *km)
1832 {
1833 write_lock_bh(&xfrm_km_lock);
1834 list_add_tail(&km->list, &xfrm_km_list);
1835 write_unlock_bh(&xfrm_km_lock);
1836 return 0;
1837 }
1838 EXPORT_SYMBOL(xfrm_register_km);
1839
1840 int xfrm_unregister_km(struct xfrm_mgr *km)
1841 {
1842 write_lock_bh(&xfrm_km_lock);
1843 list_del(&km->list);
1844 write_unlock_bh(&xfrm_km_lock);
1845 return 0;
1846 }
1847 EXPORT_SYMBOL(xfrm_unregister_km);
1848
1849 int xfrm_state_register_afinfo(struct xfrm_state_afinfo *afinfo)
1850 {
1851 int err = 0;
1852 if (unlikely(afinfo == NULL))
1853 return -EINVAL;
1854 if (unlikely(afinfo->family >= NPROTO))
1855 return -EAFNOSUPPORT;
1856 write_lock_bh(&xfrm_state_afinfo_lock);
1857 if (unlikely(xfrm_state_afinfo[afinfo->family] != NULL))
1858 err = -ENOBUFS;
1859 else
1860 xfrm_state_afinfo[afinfo->family] = afinfo;
1861 write_unlock_bh(&xfrm_state_afinfo_lock);
1862 return err;
1863 }
1864 EXPORT_SYMBOL(xfrm_state_register_afinfo);
1865
1866 int xfrm_state_unregister_afinfo(struct xfrm_state_afinfo *afinfo)
1867 {
1868 int err = 0;
1869 if (unlikely(afinfo == NULL))
1870 return -EINVAL;
1871 if (unlikely(afinfo->family >= NPROTO))
1872 return -EAFNOSUPPORT;
1873 write_lock_bh(&xfrm_state_afinfo_lock);
1874 if (likely(xfrm_state_afinfo[afinfo->family] != NULL)) {
1875 if (unlikely(xfrm_state_afinfo[afinfo->family] != afinfo))
1876 err = -EINVAL;
1877 else
1878 xfrm_state_afinfo[afinfo->family] = NULL;
1879 }
1880 write_unlock_bh(&xfrm_state_afinfo_lock);
1881 return err;
1882 }
1883 EXPORT_SYMBOL(xfrm_state_unregister_afinfo);
1884
1885 static struct xfrm_state_afinfo *xfrm_state_get_afinfo(unsigned int family)
1886 {
1887 struct xfrm_state_afinfo *afinfo;
1888 if (unlikely(family >= NPROTO))
1889 return NULL;
1890 read_lock(&xfrm_state_afinfo_lock);
1891 afinfo = xfrm_state_afinfo[family];
1892 if (unlikely(!afinfo))
1893 read_unlock(&xfrm_state_afinfo_lock);
1894 return afinfo;
1895 }
1896
1897 static void xfrm_state_put_afinfo(struct xfrm_state_afinfo *afinfo)
1898 {
1899 read_unlock(&xfrm_state_afinfo_lock);
1900 }
1901
1902 /* Temporarily located here until net/xfrm/xfrm_tunnel.c is created */
1903 void xfrm_state_delete_tunnel(struct xfrm_state *x)
1904 {
1905 if (x->tunnel) {
1906 struct xfrm_state *t = x->tunnel;
1907
1908 if (atomic_read(&t->tunnel_users) == 2)
1909 xfrm_state_delete(t);
1910 atomic_dec(&t->tunnel_users);
1911 xfrm_state_put(t);
1912 x->tunnel = NULL;
1913 }
1914 }
1915 EXPORT_SYMBOL(xfrm_state_delete_tunnel);
1916
1917 int xfrm_state_mtu(struct xfrm_state *x, int mtu)
1918 {
1919 int res;
1920
1921 spin_lock_bh(&x->lock);
1922 if (x->km.state == XFRM_STATE_VALID &&
1923 x->type && x->type->get_mtu)
1924 res = x->type->get_mtu(x, mtu);
1925 else
1926 res = mtu - x->props.header_len;
1927 spin_unlock_bh(&x->lock);
1928 return res;
1929 }
1930
1931 int xfrm_init_state(struct xfrm_state *x)
1932 {
1933 struct xfrm_state_afinfo *afinfo;
1934 int family = x->props.family;
1935 int err;
1936
1937 err = -EAFNOSUPPORT;
1938 afinfo = xfrm_state_get_afinfo(family);
1939 if (!afinfo)
1940 goto error;
1941
1942 err = 0;
1943 if (afinfo->init_flags)
1944 err = afinfo->init_flags(x);
1945
1946 xfrm_state_put_afinfo(afinfo);
1947
1948 if (err)
1949 goto error;
1950
1951 err = -EPROTONOSUPPORT;
1952 x->inner_mode = xfrm_get_mode(x->props.mode, x->sel.family);
1953 if (x->inner_mode == NULL)
1954 goto error;
1955
1956 if (!(x->inner_mode->flags & XFRM_MODE_FLAG_TUNNEL) &&
1957 family != x->sel.family)
1958 goto error;
1959
1960 x->type = xfrm_get_type(x->id.proto, family);
1961 if (x->type == NULL)
1962 goto error;
1963
1964 err = x->type->init_state(x);
1965 if (err)
1966 goto error;
1967
1968 x->outer_mode = xfrm_get_mode(x->props.mode, family);
1969 if (x->outer_mode == NULL)
1970 goto error;
1971
1972 x->km.state = XFRM_STATE_VALID;
1973
1974 error:
1975 return err;
1976 }
1977
1978 EXPORT_SYMBOL(xfrm_init_state);
1979
1980 void __init xfrm_state_init(void)
1981 {
1982 unsigned int sz;
1983
1984 sz = sizeof(struct hlist_head) * 8;
1985
1986 xfrm_state_bydst = xfrm_hash_alloc(sz);
1987 xfrm_state_bysrc = xfrm_hash_alloc(sz);
1988 xfrm_state_byspi = xfrm_hash_alloc(sz);
1989 if (!xfrm_state_bydst || !xfrm_state_bysrc || !xfrm_state_byspi)
1990 panic("XFRM: Cannot allocate bydst/bysrc/byspi hashes.");
1991 xfrm_state_hmask = ((sz / sizeof(struct hlist_head)) - 1);
1992
1993 INIT_WORK(&xfrm_state_gc_work, xfrm_state_gc_task);
1994 }
1995
1996 #ifdef CONFIG_AUDITSYSCALL
1997 static inline void xfrm_audit_common_stateinfo(struct xfrm_state *x,
1998 struct audit_buffer *audit_buf)
1999 {
2000 if (x->security)
2001 audit_log_format(audit_buf, " sec_alg=%u sec_doi=%u sec_obj=%s",
2002 x->security->ctx_alg, x->security->ctx_doi,
2003 x->security->ctx_str);
2004
2005 switch(x->props.family) {
2006 case AF_INET:
2007 audit_log_format(audit_buf, " src=%u.%u.%u.%u dst=%u.%u.%u.%u",
2008 NIPQUAD(x->props.saddr.a4),
2009 NIPQUAD(x->id.daddr.a4));
2010 break;
2011 case AF_INET6:
2012 {
2013 struct in6_addr saddr6, daddr6;
2014
2015 memcpy(&saddr6, x->props.saddr.a6,
2016 sizeof(struct in6_addr));
2017 memcpy(&daddr6, x->id.daddr.a6,
2018 sizeof(struct in6_addr));
2019 audit_log_format(audit_buf,
2020 " src=" NIP6_FMT " dst=" NIP6_FMT,
2021 NIP6(saddr6), NIP6(daddr6));
2022 }
2023 break;
2024 }
2025 }
2026
2027 void
2028 xfrm_audit_state_add(struct xfrm_state *x, int result, u32 auid, u32 sid)
2029 {
2030 struct audit_buffer *audit_buf;
2031 extern int audit_enabled;
2032
2033 if (audit_enabled == 0)
2034 return;
2035 audit_buf = xfrm_audit_start(sid, auid);
2036 if (audit_buf == NULL)
2037 return;
2038 audit_log_format(audit_buf, " op=SAD-add res=%u",result);
2039 xfrm_audit_common_stateinfo(x, audit_buf);
2040 audit_log_format(audit_buf, " spi=%lu(0x%lx)",
2041 (unsigned long)x->id.spi, (unsigned long)x->id.spi);
2042 audit_log_end(audit_buf);
2043 }
2044 EXPORT_SYMBOL_GPL(xfrm_audit_state_add);
2045
2046 void
2047 xfrm_audit_state_delete(struct xfrm_state *x, int result, u32 auid, u32 sid)
2048 {
2049 struct audit_buffer *audit_buf;
2050 extern int audit_enabled;
2051
2052 if (audit_enabled == 0)
2053 return;
2054 audit_buf = xfrm_audit_start(sid, auid);
2055 if (audit_buf == NULL)
2056 return;
2057 audit_log_format(audit_buf, " op=SAD-delete res=%u",result);
2058 xfrm_audit_common_stateinfo(x, audit_buf);
2059 audit_log_format(audit_buf, " spi=%lu(0x%lx)",
2060 (unsigned long)x->id.spi, (unsigned long)x->id.spi);
2061 audit_log_end(audit_buf);
2062 }
2063 EXPORT_SYMBOL_GPL(xfrm_audit_state_delete);
2064 #endif /* CONFIG_AUDITSYSCALL */
This page took 0.074604 seconds and 6 git commands to generate.