netns xfrm: per-netns state GC work
[deliverable/linux.git] / net / xfrm / xfrm_state.c
1 /*
2 * xfrm_state.c
3 *
4 * Changes:
5 * Mitsuru KANDA @USAGI
6 * Kazunori MIYAZAWA @USAGI
7 * Kunihiro Ishiguro <kunihiro@ipinfusion.com>
8 * IPv6 support
9 * YOSHIFUJI Hideaki @USAGI
10 * Split up af-specific functions
11 * Derek Atkins <derek@ihtfp.com>
12 * Add UDP Encapsulation
13 *
14 */
15
16 #include <linux/workqueue.h>
17 #include <net/xfrm.h>
18 #include <linux/pfkeyv2.h>
19 #include <linux/ipsec.h>
20 #include <linux/module.h>
21 #include <linux/cache.h>
22 #include <linux/audit.h>
23 #include <asm/uaccess.h>
24
25 #include "xfrm_hash.h"
26
27 struct sock *xfrm_nl;
28 EXPORT_SYMBOL(xfrm_nl);
29
30 u32 sysctl_xfrm_aevent_etime __read_mostly = XFRM_AE_ETIME;
31 EXPORT_SYMBOL(sysctl_xfrm_aevent_etime);
32
33 u32 sysctl_xfrm_aevent_rseqth __read_mostly = XFRM_AE_SEQT_SIZE;
34 EXPORT_SYMBOL(sysctl_xfrm_aevent_rseqth);
35
36 u32 sysctl_xfrm_acq_expires __read_mostly = 30;
37
38 /* Each xfrm_state may be linked to two tables:
39
40 1. Hash table by (spi,daddr,ah/esp) to find SA by SPI. (input,ctl)
41 2. Hash table by (daddr,family,reqid) to find what SAs exist for given
42 destination/tunnel endpoint. (output)
43 */
44
45 static DEFINE_SPINLOCK(xfrm_state_lock);
46
47 static unsigned int xfrm_state_hashmax __read_mostly = 1 * 1024 * 1024;
48 static unsigned int xfrm_state_genid;
49
50 static struct xfrm_state_afinfo *xfrm_state_get_afinfo(unsigned int family);
51 static void xfrm_state_put_afinfo(struct xfrm_state_afinfo *afinfo);
52
53 #ifdef CONFIG_AUDITSYSCALL
54 static void xfrm_audit_state_replay(struct xfrm_state *x,
55 struct sk_buff *skb, __be32 net_seq);
56 #else
57 #define xfrm_audit_state_replay(x, s, sq) do { ; } while (0)
58 #endif /* CONFIG_AUDITSYSCALL */
59
60 static inline unsigned int xfrm_dst_hash(xfrm_address_t *daddr,
61 xfrm_address_t *saddr,
62 u32 reqid,
63 unsigned short family)
64 {
65 return __xfrm_dst_hash(daddr, saddr, reqid, family, init_net.xfrm.state_hmask);
66 }
67
68 static inline unsigned int xfrm_src_hash(xfrm_address_t *daddr,
69 xfrm_address_t *saddr,
70 unsigned short family)
71 {
72 return __xfrm_src_hash(daddr, saddr, family, init_net.xfrm.state_hmask);
73 }
74
75 static inline unsigned int
76 xfrm_spi_hash(xfrm_address_t *daddr, __be32 spi, u8 proto, unsigned short family)
77 {
78 return __xfrm_spi_hash(daddr, spi, proto, family, init_net.xfrm.state_hmask);
79 }
80
81 static void xfrm_hash_transfer(struct hlist_head *list,
82 struct hlist_head *ndsttable,
83 struct hlist_head *nsrctable,
84 struct hlist_head *nspitable,
85 unsigned int nhashmask)
86 {
87 struct hlist_node *entry, *tmp;
88 struct xfrm_state *x;
89
90 hlist_for_each_entry_safe(x, entry, tmp, list, bydst) {
91 unsigned int h;
92
93 h = __xfrm_dst_hash(&x->id.daddr, &x->props.saddr,
94 x->props.reqid, x->props.family,
95 nhashmask);
96 hlist_add_head(&x->bydst, ndsttable+h);
97
98 h = __xfrm_src_hash(&x->id.daddr, &x->props.saddr,
99 x->props.family,
100 nhashmask);
101 hlist_add_head(&x->bysrc, nsrctable+h);
102
103 if (x->id.spi) {
104 h = __xfrm_spi_hash(&x->id.daddr, x->id.spi,
105 x->id.proto, x->props.family,
106 nhashmask);
107 hlist_add_head(&x->byspi, nspitable+h);
108 }
109 }
110 }
111
112 static unsigned long xfrm_hash_new_size(unsigned int state_hmask)
113 {
114 return ((state_hmask + 1) << 1) * sizeof(struct hlist_head);
115 }
116
117 static DEFINE_MUTEX(hash_resize_mutex);
118
119 static void xfrm_hash_resize(struct work_struct *work)
120 {
121 struct net *net = container_of(work, struct net, xfrm.state_hash_work);
122 struct hlist_head *ndst, *nsrc, *nspi, *odst, *osrc, *ospi;
123 unsigned long nsize, osize;
124 unsigned int nhashmask, ohashmask;
125 int i;
126
127 mutex_lock(&hash_resize_mutex);
128
129 nsize = xfrm_hash_new_size(net->xfrm.state_hmask);
130 ndst = xfrm_hash_alloc(nsize);
131 if (!ndst)
132 goto out_unlock;
133 nsrc = xfrm_hash_alloc(nsize);
134 if (!nsrc) {
135 xfrm_hash_free(ndst, nsize);
136 goto out_unlock;
137 }
138 nspi = xfrm_hash_alloc(nsize);
139 if (!nspi) {
140 xfrm_hash_free(ndst, nsize);
141 xfrm_hash_free(nsrc, nsize);
142 goto out_unlock;
143 }
144
145 spin_lock_bh(&xfrm_state_lock);
146
147 nhashmask = (nsize / sizeof(struct hlist_head)) - 1U;
148 for (i = net->xfrm.state_hmask; i >= 0; i--)
149 xfrm_hash_transfer(net->xfrm.state_bydst+i, ndst, nsrc, nspi,
150 nhashmask);
151
152 odst = net->xfrm.state_bydst;
153 osrc = net->xfrm.state_bysrc;
154 ospi = net->xfrm.state_byspi;
155 ohashmask = net->xfrm.state_hmask;
156
157 net->xfrm.state_bydst = ndst;
158 net->xfrm.state_bysrc = nsrc;
159 net->xfrm.state_byspi = nspi;
160 net->xfrm.state_hmask = nhashmask;
161
162 spin_unlock_bh(&xfrm_state_lock);
163
164 osize = (ohashmask + 1) * sizeof(struct hlist_head);
165 xfrm_hash_free(odst, osize);
166 xfrm_hash_free(osrc, osize);
167 xfrm_hash_free(ospi, osize);
168
169 out_unlock:
170 mutex_unlock(&hash_resize_mutex);
171 }
172
173 DECLARE_WAIT_QUEUE_HEAD(km_waitq);
174 EXPORT_SYMBOL(km_waitq);
175
176 static DEFINE_RWLOCK(xfrm_state_afinfo_lock);
177 static struct xfrm_state_afinfo *xfrm_state_afinfo[NPROTO];
178
179 static DEFINE_SPINLOCK(xfrm_state_gc_lock);
180
181 int __xfrm_state_delete(struct xfrm_state *x);
182
183 int km_query(struct xfrm_state *x, struct xfrm_tmpl *t, struct xfrm_policy *pol);
184 void km_state_expired(struct xfrm_state *x, int hard, u32 pid);
185
186 static struct xfrm_state_afinfo *xfrm_state_lock_afinfo(unsigned int family)
187 {
188 struct xfrm_state_afinfo *afinfo;
189 if (unlikely(family >= NPROTO))
190 return NULL;
191 write_lock_bh(&xfrm_state_afinfo_lock);
192 afinfo = xfrm_state_afinfo[family];
193 if (unlikely(!afinfo))
194 write_unlock_bh(&xfrm_state_afinfo_lock);
195 return afinfo;
196 }
197
198 static void xfrm_state_unlock_afinfo(struct xfrm_state_afinfo *afinfo)
199 __releases(xfrm_state_afinfo_lock)
200 {
201 write_unlock_bh(&xfrm_state_afinfo_lock);
202 }
203
204 int xfrm_register_type(const struct xfrm_type *type, unsigned short family)
205 {
206 struct xfrm_state_afinfo *afinfo = xfrm_state_lock_afinfo(family);
207 const struct xfrm_type **typemap;
208 int err = 0;
209
210 if (unlikely(afinfo == NULL))
211 return -EAFNOSUPPORT;
212 typemap = afinfo->type_map;
213
214 if (likely(typemap[type->proto] == NULL))
215 typemap[type->proto] = type;
216 else
217 err = -EEXIST;
218 xfrm_state_unlock_afinfo(afinfo);
219 return err;
220 }
221 EXPORT_SYMBOL(xfrm_register_type);
222
223 int xfrm_unregister_type(const struct xfrm_type *type, unsigned short family)
224 {
225 struct xfrm_state_afinfo *afinfo = xfrm_state_lock_afinfo(family);
226 const struct xfrm_type **typemap;
227 int err = 0;
228
229 if (unlikely(afinfo == NULL))
230 return -EAFNOSUPPORT;
231 typemap = afinfo->type_map;
232
233 if (unlikely(typemap[type->proto] != type))
234 err = -ENOENT;
235 else
236 typemap[type->proto] = NULL;
237 xfrm_state_unlock_afinfo(afinfo);
238 return err;
239 }
240 EXPORT_SYMBOL(xfrm_unregister_type);
241
242 static const struct xfrm_type *xfrm_get_type(u8 proto, unsigned short family)
243 {
244 struct xfrm_state_afinfo *afinfo;
245 const struct xfrm_type **typemap;
246 const struct xfrm_type *type;
247 int modload_attempted = 0;
248
249 retry:
250 afinfo = xfrm_state_get_afinfo(family);
251 if (unlikely(afinfo == NULL))
252 return NULL;
253 typemap = afinfo->type_map;
254
255 type = typemap[proto];
256 if (unlikely(type && !try_module_get(type->owner)))
257 type = NULL;
258 if (!type && !modload_attempted) {
259 xfrm_state_put_afinfo(afinfo);
260 request_module("xfrm-type-%d-%d", family, proto);
261 modload_attempted = 1;
262 goto retry;
263 }
264
265 xfrm_state_put_afinfo(afinfo);
266 return type;
267 }
268
269 static void xfrm_put_type(const struct xfrm_type *type)
270 {
271 module_put(type->owner);
272 }
273
274 int xfrm_register_mode(struct xfrm_mode *mode, int family)
275 {
276 struct xfrm_state_afinfo *afinfo;
277 struct xfrm_mode **modemap;
278 int err;
279
280 if (unlikely(mode->encap >= XFRM_MODE_MAX))
281 return -EINVAL;
282
283 afinfo = xfrm_state_lock_afinfo(family);
284 if (unlikely(afinfo == NULL))
285 return -EAFNOSUPPORT;
286
287 err = -EEXIST;
288 modemap = afinfo->mode_map;
289 if (modemap[mode->encap])
290 goto out;
291
292 err = -ENOENT;
293 if (!try_module_get(afinfo->owner))
294 goto out;
295
296 mode->afinfo = afinfo;
297 modemap[mode->encap] = mode;
298 err = 0;
299
300 out:
301 xfrm_state_unlock_afinfo(afinfo);
302 return err;
303 }
304 EXPORT_SYMBOL(xfrm_register_mode);
305
306 int xfrm_unregister_mode(struct xfrm_mode *mode, int family)
307 {
308 struct xfrm_state_afinfo *afinfo;
309 struct xfrm_mode **modemap;
310 int err;
311
312 if (unlikely(mode->encap >= XFRM_MODE_MAX))
313 return -EINVAL;
314
315 afinfo = xfrm_state_lock_afinfo(family);
316 if (unlikely(afinfo == NULL))
317 return -EAFNOSUPPORT;
318
319 err = -ENOENT;
320 modemap = afinfo->mode_map;
321 if (likely(modemap[mode->encap] == mode)) {
322 modemap[mode->encap] = NULL;
323 module_put(mode->afinfo->owner);
324 err = 0;
325 }
326
327 xfrm_state_unlock_afinfo(afinfo);
328 return err;
329 }
330 EXPORT_SYMBOL(xfrm_unregister_mode);
331
332 static struct xfrm_mode *xfrm_get_mode(unsigned int encap, int family)
333 {
334 struct xfrm_state_afinfo *afinfo;
335 struct xfrm_mode *mode;
336 int modload_attempted = 0;
337
338 if (unlikely(encap >= XFRM_MODE_MAX))
339 return NULL;
340
341 retry:
342 afinfo = xfrm_state_get_afinfo(family);
343 if (unlikely(afinfo == NULL))
344 return NULL;
345
346 mode = afinfo->mode_map[encap];
347 if (unlikely(mode && !try_module_get(mode->owner)))
348 mode = NULL;
349 if (!mode && !modload_attempted) {
350 xfrm_state_put_afinfo(afinfo);
351 request_module("xfrm-mode-%d-%d", family, encap);
352 modload_attempted = 1;
353 goto retry;
354 }
355
356 xfrm_state_put_afinfo(afinfo);
357 return mode;
358 }
359
360 static void xfrm_put_mode(struct xfrm_mode *mode)
361 {
362 module_put(mode->owner);
363 }
364
365 static void xfrm_state_gc_destroy(struct xfrm_state *x)
366 {
367 del_timer_sync(&x->timer);
368 del_timer_sync(&x->rtimer);
369 kfree(x->aalg);
370 kfree(x->ealg);
371 kfree(x->calg);
372 kfree(x->encap);
373 kfree(x->coaddr);
374 if (x->inner_mode)
375 xfrm_put_mode(x->inner_mode);
376 if (x->inner_mode_iaf)
377 xfrm_put_mode(x->inner_mode_iaf);
378 if (x->outer_mode)
379 xfrm_put_mode(x->outer_mode);
380 if (x->type) {
381 x->type->destructor(x);
382 xfrm_put_type(x->type);
383 }
384 security_xfrm_state_free(x);
385 kfree(x);
386 }
387
388 static void xfrm_state_gc_task(struct work_struct *work)
389 {
390 struct net *net = container_of(work, struct net, xfrm.state_gc_work);
391 struct xfrm_state *x;
392 struct hlist_node *entry, *tmp;
393 struct hlist_head gc_list;
394
395 spin_lock_bh(&xfrm_state_gc_lock);
396 hlist_move_list(&net->xfrm.state_gc_list, &gc_list);
397 spin_unlock_bh(&xfrm_state_gc_lock);
398
399 hlist_for_each_entry_safe(x, entry, tmp, &gc_list, gclist)
400 xfrm_state_gc_destroy(x);
401
402 wake_up(&km_waitq);
403 }
404
405 static inline unsigned long make_jiffies(long secs)
406 {
407 if (secs >= (MAX_SCHEDULE_TIMEOUT-1)/HZ)
408 return MAX_SCHEDULE_TIMEOUT-1;
409 else
410 return secs*HZ;
411 }
412
413 static void xfrm_timer_handler(unsigned long data)
414 {
415 struct xfrm_state *x = (struct xfrm_state*)data;
416 unsigned long now = get_seconds();
417 long next = LONG_MAX;
418 int warn = 0;
419 int err = 0;
420
421 spin_lock(&x->lock);
422 if (x->km.state == XFRM_STATE_DEAD)
423 goto out;
424 if (x->km.state == XFRM_STATE_EXPIRED)
425 goto expired;
426 if (x->lft.hard_add_expires_seconds) {
427 long tmo = x->lft.hard_add_expires_seconds +
428 x->curlft.add_time - now;
429 if (tmo <= 0)
430 goto expired;
431 if (tmo < next)
432 next = tmo;
433 }
434 if (x->lft.hard_use_expires_seconds) {
435 long tmo = x->lft.hard_use_expires_seconds +
436 (x->curlft.use_time ? : now) - now;
437 if (tmo <= 0)
438 goto expired;
439 if (tmo < next)
440 next = tmo;
441 }
442 if (x->km.dying)
443 goto resched;
444 if (x->lft.soft_add_expires_seconds) {
445 long tmo = x->lft.soft_add_expires_seconds +
446 x->curlft.add_time - now;
447 if (tmo <= 0)
448 warn = 1;
449 else if (tmo < next)
450 next = tmo;
451 }
452 if (x->lft.soft_use_expires_seconds) {
453 long tmo = x->lft.soft_use_expires_seconds +
454 (x->curlft.use_time ? : now) - now;
455 if (tmo <= 0)
456 warn = 1;
457 else if (tmo < next)
458 next = tmo;
459 }
460
461 x->km.dying = warn;
462 if (warn)
463 km_state_expired(x, 0, 0);
464 resched:
465 if (next != LONG_MAX)
466 mod_timer(&x->timer, jiffies + make_jiffies(next));
467
468 goto out;
469
470 expired:
471 if (x->km.state == XFRM_STATE_ACQ && x->id.spi == 0) {
472 x->km.state = XFRM_STATE_EXPIRED;
473 wake_up(&km_waitq);
474 next = 2;
475 goto resched;
476 }
477
478 err = __xfrm_state_delete(x);
479 if (!err && x->id.spi)
480 km_state_expired(x, 1, 0);
481
482 xfrm_audit_state_delete(x, err ? 0 : 1,
483 audit_get_loginuid(current),
484 audit_get_sessionid(current), 0);
485
486 out:
487 spin_unlock(&x->lock);
488 }
489
490 static void xfrm_replay_timer_handler(unsigned long data);
491
492 struct xfrm_state *xfrm_state_alloc(struct net *net)
493 {
494 struct xfrm_state *x;
495
496 x = kzalloc(sizeof(struct xfrm_state), GFP_ATOMIC);
497
498 if (x) {
499 write_pnet(&x->xs_net, net);
500 atomic_set(&x->refcnt, 1);
501 atomic_set(&x->tunnel_users, 0);
502 INIT_LIST_HEAD(&x->km.all);
503 INIT_HLIST_NODE(&x->bydst);
504 INIT_HLIST_NODE(&x->bysrc);
505 INIT_HLIST_NODE(&x->byspi);
506 setup_timer(&x->timer, xfrm_timer_handler, (unsigned long)x);
507 setup_timer(&x->rtimer, xfrm_replay_timer_handler,
508 (unsigned long)x);
509 x->curlft.add_time = get_seconds();
510 x->lft.soft_byte_limit = XFRM_INF;
511 x->lft.soft_packet_limit = XFRM_INF;
512 x->lft.hard_byte_limit = XFRM_INF;
513 x->lft.hard_packet_limit = XFRM_INF;
514 x->replay_maxage = 0;
515 x->replay_maxdiff = 0;
516 x->inner_mode = NULL;
517 x->inner_mode_iaf = NULL;
518 spin_lock_init(&x->lock);
519 }
520 return x;
521 }
522 EXPORT_SYMBOL(xfrm_state_alloc);
523
524 void __xfrm_state_destroy(struct xfrm_state *x)
525 {
526 WARN_ON(x->km.state != XFRM_STATE_DEAD);
527
528 spin_lock_bh(&xfrm_state_gc_lock);
529 hlist_add_head(&x->gclist, &init_net.xfrm.state_gc_list);
530 spin_unlock_bh(&xfrm_state_gc_lock);
531 schedule_work(&init_net.xfrm.state_gc_work);
532 }
533 EXPORT_SYMBOL(__xfrm_state_destroy);
534
535 int __xfrm_state_delete(struct xfrm_state *x)
536 {
537 int err = -ESRCH;
538
539 if (x->km.state != XFRM_STATE_DEAD) {
540 x->km.state = XFRM_STATE_DEAD;
541 spin_lock(&xfrm_state_lock);
542 list_del(&x->km.all);
543 hlist_del(&x->bydst);
544 hlist_del(&x->bysrc);
545 if (x->id.spi)
546 hlist_del(&x->byspi);
547 init_net.xfrm.state_num--;
548 spin_unlock(&xfrm_state_lock);
549
550 /* All xfrm_state objects are created by xfrm_state_alloc.
551 * The xfrm_state_alloc call gives a reference, and that
552 * is what we are dropping here.
553 */
554 xfrm_state_put(x);
555 err = 0;
556 }
557
558 return err;
559 }
560 EXPORT_SYMBOL(__xfrm_state_delete);
561
562 int xfrm_state_delete(struct xfrm_state *x)
563 {
564 int err;
565
566 spin_lock_bh(&x->lock);
567 err = __xfrm_state_delete(x);
568 spin_unlock_bh(&x->lock);
569
570 return err;
571 }
572 EXPORT_SYMBOL(xfrm_state_delete);
573
574 #ifdef CONFIG_SECURITY_NETWORK_XFRM
575 static inline int
576 xfrm_state_flush_secctx_check(u8 proto, struct xfrm_audit *audit_info)
577 {
578 int i, err = 0;
579
580 for (i = 0; i <= init_net.xfrm.state_hmask; i++) {
581 struct hlist_node *entry;
582 struct xfrm_state *x;
583
584 hlist_for_each_entry(x, entry, init_net.xfrm.state_bydst+i, bydst) {
585 if (xfrm_id_proto_match(x->id.proto, proto) &&
586 (err = security_xfrm_state_delete(x)) != 0) {
587 xfrm_audit_state_delete(x, 0,
588 audit_info->loginuid,
589 audit_info->sessionid,
590 audit_info->secid);
591 return err;
592 }
593 }
594 }
595
596 return err;
597 }
598 #else
599 static inline int
600 xfrm_state_flush_secctx_check(u8 proto, struct xfrm_audit *audit_info)
601 {
602 return 0;
603 }
604 #endif
605
606 int xfrm_state_flush(u8 proto, struct xfrm_audit *audit_info)
607 {
608 int i, err = 0;
609
610 spin_lock_bh(&xfrm_state_lock);
611 err = xfrm_state_flush_secctx_check(proto, audit_info);
612 if (err)
613 goto out;
614
615 for (i = 0; i <= init_net.xfrm.state_hmask; i++) {
616 struct hlist_node *entry;
617 struct xfrm_state *x;
618 restart:
619 hlist_for_each_entry(x, entry, init_net.xfrm.state_bydst+i, bydst) {
620 if (!xfrm_state_kern(x) &&
621 xfrm_id_proto_match(x->id.proto, proto)) {
622 xfrm_state_hold(x);
623 spin_unlock_bh(&xfrm_state_lock);
624
625 err = xfrm_state_delete(x);
626 xfrm_audit_state_delete(x, err ? 0 : 1,
627 audit_info->loginuid,
628 audit_info->sessionid,
629 audit_info->secid);
630 xfrm_state_put(x);
631
632 spin_lock_bh(&xfrm_state_lock);
633 goto restart;
634 }
635 }
636 }
637 err = 0;
638
639 out:
640 spin_unlock_bh(&xfrm_state_lock);
641 wake_up(&km_waitq);
642 return err;
643 }
644 EXPORT_SYMBOL(xfrm_state_flush);
645
646 void xfrm_sad_getinfo(struct xfrmk_sadinfo *si)
647 {
648 spin_lock_bh(&xfrm_state_lock);
649 si->sadcnt = init_net.xfrm.state_num;
650 si->sadhcnt = init_net.xfrm.state_hmask;
651 si->sadhmcnt = xfrm_state_hashmax;
652 spin_unlock_bh(&xfrm_state_lock);
653 }
654 EXPORT_SYMBOL(xfrm_sad_getinfo);
655
656 static int
657 xfrm_init_tempsel(struct xfrm_state *x, struct flowi *fl,
658 struct xfrm_tmpl *tmpl,
659 xfrm_address_t *daddr, xfrm_address_t *saddr,
660 unsigned short family)
661 {
662 struct xfrm_state_afinfo *afinfo = xfrm_state_get_afinfo(family);
663 if (!afinfo)
664 return -1;
665 afinfo->init_tempsel(x, fl, tmpl, daddr, saddr);
666 xfrm_state_put_afinfo(afinfo);
667 return 0;
668 }
669
670 static struct xfrm_state *__xfrm_state_lookup(xfrm_address_t *daddr, __be32 spi, u8 proto, unsigned short family)
671 {
672 unsigned int h = xfrm_spi_hash(daddr, spi, proto, family);
673 struct xfrm_state *x;
674 struct hlist_node *entry;
675
676 hlist_for_each_entry(x, entry, init_net.xfrm.state_byspi+h, byspi) {
677 if (x->props.family != family ||
678 x->id.spi != spi ||
679 x->id.proto != proto)
680 continue;
681
682 switch (family) {
683 case AF_INET:
684 if (x->id.daddr.a4 != daddr->a4)
685 continue;
686 break;
687 case AF_INET6:
688 if (!ipv6_addr_equal((struct in6_addr *)daddr,
689 (struct in6_addr *)
690 x->id.daddr.a6))
691 continue;
692 break;
693 }
694
695 xfrm_state_hold(x);
696 return x;
697 }
698
699 return NULL;
700 }
701
702 static struct xfrm_state *__xfrm_state_lookup_byaddr(xfrm_address_t *daddr, xfrm_address_t *saddr, u8 proto, unsigned short family)
703 {
704 unsigned int h = xfrm_src_hash(daddr, saddr, family);
705 struct xfrm_state *x;
706 struct hlist_node *entry;
707
708 hlist_for_each_entry(x, entry, init_net.xfrm.state_bysrc+h, bysrc) {
709 if (x->props.family != family ||
710 x->id.proto != proto)
711 continue;
712
713 switch (family) {
714 case AF_INET:
715 if (x->id.daddr.a4 != daddr->a4 ||
716 x->props.saddr.a4 != saddr->a4)
717 continue;
718 break;
719 case AF_INET6:
720 if (!ipv6_addr_equal((struct in6_addr *)daddr,
721 (struct in6_addr *)
722 x->id.daddr.a6) ||
723 !ipv6_addr_equal((struct in6_addr *)saddr,
724 (struct in6_addr *)
725 x->props.saddr.a6))
726 continue;
727 break;
728 }
729
730 xfrm_state_hold(x);
731 return x;
732 }
733
734 return NULL;
735 }
736
737 static inline struct xfrm_state *
738 __xfrm_state_locate(struct xfrm_state *x, int use_spi, int family)
739 {
740 if (use_spi)
741 return __xfrm_state_lookup(&x->id.daddr, x->id.spi,
742 x->id.proto, family);
743 else
744 return __xfrm_state_lookup_byaddr(&x->id.daddr,
745 &x->props.saddr,
746 x->id.proto, family);
747 }
748
749 static void xfrm_hash_grow_check(int have_hash_collision)
750 {
751 if (have_hash_collision &&
752 (init_net.xfrm.state_hmask + 1) < xfrm_state_hashmax &&
753 init_net.xfrm.state_num > init_net.xfrm.state_hmask)
754 schedule_work(&init_net.xfrm.state_hash_work);
755 }
756
757 struct xfrm_state *
758 xfrm_state_find(xfrm_address_t *daddr, xfrm_address_t *saddr,
759 struct flowi *fl, struct xfrm_tmpl *tmpl,
760 struct xfrm_policy *pol, int *err,
761 unsigned short family)
762 {
763 unsigned int h;
764 struct hlist_node *entry;
765 struct xfrm_state *x, *x0, *to_put;
766 int acquire_in_progress = 0;
767 int error = 0;
768 struct xfrm_state *best = NULL;
769
770 to_put = NULL;
771
772 spin_lock_bh(&xfrm_state_lock);
773 h = xfrm_dst_hash(daddr, saddr, tmpl->reqid, family);
774 hlist_for_each_entry(x, entry, init_net.xfrm.state_bydst+h, bydst) {
775 if (x->props.family == family &&
776 x->props.reqid == tmpl->reqid &&
777 !(x->props.flags & XFRM_STATE_WILDRECV) &&
778 xfrm_state_addr_check(x, daddr, saddr, family) &&
779 tmpl->mode == x->props.mode &&
780 tmpl->id.proto == x->id.proto &&
781 (tmpl->id.spi == x->id.spi || !tmpl->id.spi)) {
782 /* Resolution logic:
783 1. There is a valid state with matching selector.
784 Done.
785 2. Valid state with inappropriate selector. Skip.
786
787 Entering area of "sysdeps".
788
789 3. If state is not valid, selector is temporary,
790 it selects only session which triggered
791 previous resolution. Key manager will do
792 something to install a state with proper
793 selector.
794 */
795 if (x->km.state == XFRM_STATE_VALID) {
796 if ((x->sel.family && !xfrm_selector_match(&x->sel, fl, x->sel.family)) ||
797 !security_xfrm_state_pol_flow_match(x, pol, fl))
798 continue;
799 if (!best ||
800 best->km.dying > x->km.dying ||
801 (best->km.dying == x->km.dying &&
802 best->curlft.add_time < x->curlft.add_time))
803 best = x;
804 } else if (x->km.state == XFRM_STATE_ACQ) {
805 acquire_in_progress = 1;
806 } else if (x->km.state == XFRM_STATE_ERROR ||
807 x->km.state == XFRM_STATE_EXPIRED) {
808 if (xfrm_selector_match(&x->sel, fl, x->sel.family) &&
809 security_xfrm_state_pol_flow_match(x, pol, fl))
810 error = -ESRCH;
811 }
812 }
813 }
814
815 x = best;
816 if (!x && !error && !acquire_in_progress) {
817 if (tmpl->id.spi &&
818 (x0 = __xfrm_state_lookup(daddr, tmpl->id.spi,
819 tmpl->id.proto, family)) != NULL) {
820 to_put = x0;
821 error = -EEXIST;
822 goto out;
823 }
824 x = xfrm_state_alloc(&init_net);
825 if (x == NULL) {
826 error = -ENOMEM;
827 goto out;
828 }
829 /* Initialize temporary selector matching only
830 * to current session. */
831 xfrm_init_tempsel(x, fl, tmpl, daddr, saddr, family);
832
833 error = security_xfrm_state_alloc_acquire(x, pol->security, fl->secid);
834 if (error) {
835 x->km.state = XFRM_STATE_DEAD;
836 to_put = x;
837 x = NULL;
838 goto out;
839 }
840
841 if (km_query(x, tmpl, pol) == 0) {
842 x->km.state = XFRM_STATE_ACQ;
843 list_add(&x->km.all, &init_net.xfrm.state_all);
844 hlist_add_head(&x->bydst, init_net.xfrm.state_bydst+h);
845 h = xfrm_src_hash(daddr, saddr, family);
846 hlist_add_head(&x->bysrc, init_net.xfrm.state_bysrc+h);
847 if (x->id.spi) {
848 h = xfrm_spi_hash(&x->id.daddr, x->id.spi, x->id.proto, family);
849 hlist_add_head(&x->byspi, init_net.xfrm.state_byspi+h);
850 }
851 x->lft.hard_add_expires_seconds = sysctl_xfrm_acq_expires;
852 x->timer.expires = jiffies + sysctl_xfrm_acq_expires*HZ;
853 add_timer(&x->timer);
854 init_net.xfrm.state_num++;
855 xfrm_hash_grow_check(x->bydst.next != NULL);
856 } else {
857 x->km.state = XFRM_STATE_DEAD;
858 to_put = x;
859 x = NULL;
860 error = -ESRCH;
861 }
862 }
863 out:
864 if (x)
865 xfrm_state_hold(x);
866 else
867 *err = acquire_in_progress ? -EAGAIN : error;
868 spin_unlock_bh(&xfrm_state_lock);
869 if (to_put)
870 xfrm_state_put(to_put);
871 return x;
872 }
873
874 struct xfrm_state *
875 xfrm_stateonly_find(xfrm_address_t *daddr, xfrm_address_t *saddr,
876 unsigned short family, u8 mode, u8 proto, u32 reqid)
877 {
878 unsigned int h;
879 struct xfrm_state *rx = NULL, *x = NULL;
880 struct hlist_node *entry;
881
882 spin_lock(&xfrm_state_lock);
883 h = xfrm_dst_hash(daddr, saddr, reqid, family);
884 hlist_for_each_entry(x, entry, init_net.xfrm.state_bydst+h, bydst) {
885 if (x->props.family == family &&
886 x->props.reqid == reqid &&
887 !(x->props.flags & XFRM_STATE_WILDRECV) &&
888 xfrm_state_addr_check(x, daddr, saddr, family) &&
889 mode == x->props.mode &&
890 proto == x->id.proto &&
891 x->km.state == XFRM_STATE_VALID) {
892 rx = x;
893 break;
894 }
895 }
896
897 if (rx)
898 xfrm_state_hold(rx);
899 spin_unlock(&xfrm_state_lock);
900
901
902 return rx;
903 }
904 EXPORT_SYMBOL(xfrm_stateonly_find);
905
906 static void __xfrm_state_insert(struct xfrm_state *x)
907 {
908 unsigned int h;
909
910 x->genid = ++xfrm_state_genid;
911
912 list_add(&x->km.all, &init_net.xfrm.state_all);
913
914 h = xfrm_dst_hash(&x->id.daddr, &x->props.saddr,
915 x->props.reqid, x->props.family);
916 hlist_add_head(&x->bydst, init_net.xfrm.state_bydst+h);
917
918 h = xfrm_src_hash(&x->id.daddr, &x->props.saddr, x->props.family);
919 hlist_add_head(&x->bysrc, init_net.xfrm.state_bysrc+h);
920
921 if (x->id.spi) {
922 h = xfrm_spi_hash(&x->id.daddr, x->id.spi, x->id.proto,
923 x->props.family);
924
925 hlist_add_head(&x->byspi, init_net.xfrm.state_byspi+h);
926 }
927
928 mod_timer(&x->timer, jiffies + HZ);
929 if (x->replay_maxage)
930 mod_timer(&x->rtimer, jiffies + x->replay_maxage);
931
932 wake_up(&km_waitq);
933
934 init_net.xfrm.state_num++;
935
936 xfrm_hash_grow_check(x->bydst.next != NULL);
937 }
938
939 /* xfrm_state_lock is held */
940 static void __xfrm_state_bump_genids(struct xfrm_state *xnew)
941 {
942 unsigned short family = xnew->props.family;
943 u32 reqid = xnew->props.reqid;
944 struct xfrm_state *x;
945 struct hlist_node *entry;
946 unsigned int h;
947
948 h = xfrm_dst_hash(&xnew->id.daddr, &xnew->props.saddr, reqid, family);
949 hlist_for_each_entry(x, entry, init_net.xfrm.state_bydst+h, bydst) {
950 if (x->props.family == family &&
951 x->props.reqid == reqid &&
952 !xfrm_addr_cmp(&x->id.daddr, &xnew->id.daddr, family) &&
953 !xfrm_addr_cmp(&x->props.saddr, &xnew->props.saddr, family))
954 x->genid = xfrm_state_genid;
955 }
956 }
957
958 void xfrm_state_insert(struct xfrm_state *x)
959 {
960 spin_lock_bh(&xfrm_state_lock);
961 __xfrm_state_bump_genids(x);
962 __xfrm_state_insert(x);
963 spin_unlock_bh(&xfrm_state_lock);
964 }
965 EXPORT_SYMBOL(xfrm_state_insert);
966
967 /* xfrm_state_lock is held */
968 static struct xfrm_state *__find_acq_core(unsigned short family, u8 mode, u32 reqid, u8 proto, xfrm_address_t *daddr, xfrm_address_t *saddr, int create)
969 {
970 unsigned int h = xfrm_dst_hash(daddr, saddr, reqid, family);
971 struct hlist_node *entry;
972 struct xfrm_state *x;
973
974 hlist_for_each_entry(x, entry, init_net.xfrm.state_bydst+h, bydst) {
975 if (x->props.reqid != reqid ||
976 x->props.mode != mode ||
977 x->props.family != family ||
978 x->km.state != XFRM_STATE_ACQ ||
979 x->id.spi != 0 ||
980 x->id.proto != proto)
981 continue;
982
983 switch (family) {
984 case AF_INET:
985 if (x->id.daddr.a4 != daddr->a4 ||
986 x->props.saddr.a4 != saddr->a4)
987 continue;
988 break;
989 case AF_INET6:
990 if (!ipv6_addr_equal((struct in6_addr *)x->id.daddr.a6,
991 (struct in6_addr *)daddr) ||
992 !ipv6_addr_equal((struct in6_addr *)
993 x->props.saddr.a6,
994 (struct in6_addr *)saddr))
995 continue;
996 break;
997 }
998
999 xfrm_state_hold(x);
1000 return x;
1001 }
1002
1003 if (!create)
1004 return NULL;
1005
1006 x = xfrm_state_alloc(&init_net);
1007 if (likely(x)) {
1008 switch (family) {
1009 case AF_INET:
1010 x->sel.daddr.a4 = daddr->a4;
1011 x->sel.saddr.a4 = saddr->a4;
1012 x->sel.prefixlen_d = 32;
1013 x->sel.prefixlen_s = 32;
1014 x->props.saddr.a4 = saddr->a4;
1015 x->id.daddr.a4 = daddr->a4;
1016 break;
1017
1018 case AF_INET6:
1019 ipv6_addr_copy((struct in6_addr *)x->sel.daddr.a6,
1020 (struct in6_addr *)daddr);
1021 ipv6_addr_copy((struct in6_addr *)x->sel.saddr.a6,
1022 (struct in6_addr *)saddr);
1023 x->sel.prefixlen_d = 128;
1024 x->sel.prefixlen_s = 128;
1025 ipv6_addr_copy((struct in6_addr *)x->props.saddr.a6,
1026 (struct in6_addr *)saddr);
1027 ipv6_addr_copy((struct in6_addr *)x->id.daddr.a6,
1028 (struct in6_addr *)daddr);
1029 break;
1030 }
1031
1032 x->km.state = XFRM_STATE_ACQ;
1033 x->id.proto = proto;
1034 x->props.family = family;
1035 x->props.mode = mode;
1036 x->props.reqid = reqid;
1037 x->lft.hard_add_expires_seconds = sysctl_xfrm_acq_expires;
1038 xfrm_state_hold(x);
1039 x->timer.expires = jiffies + sysctl_xfrm_acq_expires*HZ;
1040 add_timer(&x->timer);
1041 list_add(&x->km.all, &init_net.xfrm.state_all);
1042 hlist_add_head(&x->bydst, init_net.xfrm.state_bydst+h);
1043 h = xfrm_src_hash(daddr, saddr, family);
1044 hlist_add_head(&x->bysrc, init_net.xfrm.state_bysrc+h);
1045
1046 init_net.xfrm.state_num++;
1047
1048 xfrm_hash_grow_check(x->bydst.next != NULL);
1049 }
1050
1051 return x;
1052 }
1053
1054 static struct xfrm_state *__xfrm_find_acq_byseq(u32 seq);
1055
1056 int xfrm_state_add(struct xfrm_state *x)
1057 {
1058 struct xfrm_state *x1, *to_put;
1059 int family;
1060 int err;
1061 int use_spi = xfrm_id_proto_match(x->id.proto, IPSEC_PROTO_ANY);
1062
1063 family = x->props.family;
1064
1065 to_put = NULL;
1066
1067 spin_lock_bh(&xfrm_state_lock);
1068
1069 x1 = __xfrm_state_locate(x, use_spi, family);
1070 if (x1) {
1071 to_put = x1;
1072 x1 = NULL;
1073 err = -EEXIST;
1074 goto out;
1075 }
1076
1077 if (use_spi && x->km.seq) {
1078 x1 = __xfrm_find_acq_byseq(x->km.seq);
1079 if (x1 && ((x1->id.proto != x->id.proto) ||
1080 xfrm_addr_cmp(&x1->id.daddr, &x->id.daddr, family))) {
1081 to_put = x1;
1082 x1 = NULL;
1083 }
1084 }
1085
1086 if (use_spi && !x1)
1087 x1 = __find_acq_core(family, x->props.mode, x->props.reqid,
1088 x->id.proto,
1089 &x->id.daddr, &x->props.saddr, 0);
1090
1091 __xfrm_state_bump_genids(x);
1092 __xfrm_state_insert(x);
1093 err = 0;
1094
1095 out:
1096 spin_unlock_bh(&xfrm_state_lock);
1097
1098 if (x1) {
1099 xfrm_state_delete(x1);
1100 xfrm_state_put(x1);
1101 }
1102
1103 if (to_put)
1104 xfrm_state_put(to_put);
1105
1106 return err;
1107 }
1108 EXPORT_SYMBOL(xfrm_state_add);
1109
1110 #ifdef CONFIG_XFRM_MIGRATE
1111 static struct xfrm_state *xfrm_state_clone(struct xfrm_state *orig, int *errp)
1112 {
1113 int err = -ENOMEM;
1114 struct xfrm_state *x = xfrm_state_alloc(&init_net);
1115 if (!x)
1116 goto error;
1117
1118 memcpy(&x->id, &orig->id, sizeof(x->id));
1119 memcpy(&x->sel, &orig->sel, sizeof(x->sel));
1120 memcpy(&x->lft, &orig->lft, sizeof(x->lft));
1121 x->props.mode = orig->props.mode;
1122 x->props.replay_window = orig->props.replay_window;
1123 x->props.reqid = orig->props.reqid;
1124 x->props.family = orig->props.family;
1125 x->props.saddr = orig->props.saddr;
1126
1127 if (orig->aalg) {
1128 x->aalg = xfrm_algo_clone(orig->aalg);
1129 if (!x->aalg)
1130 goto error;
1131 }
1132 x->props.aalgo = orig->props.aalgo;
1133
1134 if (orig->ealg) {
1135 x->ealg = xfrm_algo_clone(orig->ealg);
1136 if (!x->ealg)
1137 goto error;
1138 }
1139 x->props.ealgo = orig->props.ealgo;
1140
1141 if (orig->calg) {
1142 x->calg = xfrm_algo_clone(orig->calg);
1143 if (!x->calg)
1144 goto error;
1145 }
1146 x->props.calgo = orig->props.calgo;
1147
1148 if (orig->encap) {
1149 x->encap = kmemdup(orig->encap, sizeof(*x->encap), GFP_KERNEL);
1150 if (!x->encap)
1151 goto error;
1152 }
1153
1154 if (orig->coaddr) {
1155 x->coaddr = kmemdup(orig->coaddr, sizeof(*x->coaddr),
1156 GFP_KERNEL);
1157 if (!x->coaddr)
1158 goto error;
1159 }
1160
1161 err = xfrm_init_state(x);
1162 if (err)
1163 goto error;
1164
1165 x->props.flags = orig->props.flags;
1166
1167 x->curlft.add_time = orig->curlft.add_time;
1168 x->km.state = orig->km.state;
1169 x->km.seq = orig->km.seq;
1170
1171 return x;
1172
1173 error:
1174 if (errp)
1175 *errp = err;
1176 if (x) {
1177 kfree(x->aalg);
1178 kfree(x->ealg);
1179 kfree(x->calg);
1180 kfree(x->encap);
1181 kfree(x->coaddr);
1182 }
1183 kfree(x);
1184 return NULL;
1185 }
1186
1187 /* xfrm_state_lock is held */
1188 struct xfrm_state * xfrm_migrate_state_find(struct xfrm_migrate *m)
1189 {
1190 unsigned int h;
1191 struct xfrm_state *x;
1192 struct hlist_node *entry;
1193
1194 if (m->reqid) {
1195 h = xfrm_dst_hash(&m->old_daddr, &m->old_saddr,
1196 m->reqid, m->old_family);
1197 hlist_for_each_entry(x, entry, init_net.xfrm.state_bydst+h, bydst) {
1198 if (x->props.mode != m->mode ||
1199 x->id.proto != m->proto)
1200 continue;
1201 if (m->reqid && x->props.reqid != m->reqid)
1202 continue;
1203 if (xfrm_addr_cmp(&x->id.daddr, &m->old_daddr,
1204 m->old_family) ||
1205 xfrm_addr_cmp(&x->props.saddr, &m->old_saddr,
1206 m->old_family))
1207 continue;
1208 xfrm_state_hold(x);
1209 return x;
1210 }
1211 } else {
1212 h = xfrm_src_hash(&m->old_daddr, &m->old_saddr,
1213 m->old_family);
1214 hlist_for_each_entry(x, entry, init_net.xfrm.state_bysrc+h, bysrc) {
1215 if (x->props.mode != m->mode ||
1216 x->id.proto != m->proto)
1217 continue;
1218 if (xfrm_addr_cmp(&x->id.daddr, &m->old_daddr,
1219 m->old_family) ||
1220 xfrm_addr_cmp(&x->props.saddr, &m->old_saddr,
1221 m->old_family))
1222 continue;
1223 xfrm_state_hold(x);
1224 return x;
1225 }
1226 }
1227
1228 return NULL;
1229 }
1230 EXPORT_SYMBOL(xfrm_migrate_state_find);
1231
1232 struct xfrm_state * xfrm_state_migrate(struct xfrm_state *x,
1233 struct xfrm_migrate *m)
1234 {
1235 struct xfrm_state *xc;
1236 int err;
1237
1238 xc = xfrm_state_clone(x, &err);
1239 if (!xc)
1240 return NULL;
1241
1242 memcpy(&xc->id.daddr, &m->new_daddr, sizeof(xc->id.daddr));
1243 memcpy(&xc->props.saddr, &m->new_saddr, sizeof(xc->props.saddr));
1244
1245 /* add state */
1246 if (!xfrm_addr_cmp(&x->id.daddr, &m->new_daddr, m->new_family)) {
1247 /* a care is needed when the destination address of the
1248 state is to be updated as it is a part of triplet */
1249 xfrm_state_insert(xc);
1250 } else {
1251 if ((err = xfrm_state_add(xc)) < 0)
1252 goto error;
1253 }
1254
1255 return xc;
1256 error:
1257 kfree(xc);
1258 return NULL;
1259 }
1260 EXPORT_SYMBOL(xfrm_state_migrate);
1261 #endif
1262
1263 int xfrm_state_update(struct xfrm_state *x)
1264 {
1265 struct xfrm_state *x1, *to_put;
1266 int err;
1267 int use_spi = xfrm_id_proto_match(x->id.proto, IPSEC_PROTO_ANY);
1268
1269 to_put = NULL;
1270
1271 spin_lock_bh(&xfrm_state_lock);
1272 x1 = __xfrm_state_locate(x, use_spi, x->props.family);
1273
1274 err = -ESRCH;
1275 if (!x1)
1276 goto out;
1277
1278 if (xfrm_state_kern(x1)) {
1279 to_put = x1;
1280 err = -EEXIST;
1281 goto out;
1282 }
1283
1284 if (x1->km.state == XFRM_STATE_ACQ) {
1285 __xfrm_state_insert(x);
1286 x = NULL;
1287 }
1288 err = 0;
1289
1290 out:
1291 spin_unlock_bh(&xfrm_state_lock);
1292
1293 if (to_put)
1294 xfrm_state_put(to_put);
1295
1296 if (err)
1297 return err;
1298
1299 if (!x) {
1300 xfrm_state_delete(x1);
1301 xfrm_state_put(x1);
1302 return 0;
1303 }
1304
1305 err = -EINVAL;
1306 spin_lock_bh(&x1->lock);
1307 if (likely(x1->km.state == XFRM_STATE_VALID)) {
1308 if (x->encap && x1->encap)
1309 memcpy(x1->encap, x->encap, sizeof(*x1->encap));
1310 if (x->coaddr && x1->coaddr) {
1311 memcpy(x1->coaddr, x->coaddr, sizeof(*x1->coaddr));
1312 }
1313 if (!use_spi && memcmp(&x1->sel, &x->sel, sizeof(x1->sel)))
1314 memcpy(&x1->sel, &x->sel, sizeof(x1->sel));
1315 memcpy(&x1->lft, &x->lft, sizeof(x1->lft));
1316 x1->km.dying = 0;
1317
1318 mod_timer(&x1->timer, jiffies + HZ);
1319 if (x1->curlft.use_time)
1320 xfrm_state_check_expire(x1);
1321
1322 err = 0;
1323 }
1324 spin_unlock_bh(&x1->lock);
1325
1326 xfrm_state_put(x1);
1327
1328 return err;
1329 }
1330 EXPORT_SYMBOL(xfrm_state_update);
1331
1332 int xfrm_state_check_expire(struct xfrm_state *x)
1333 {
1334 if (!x->curlft.use_time)
1335 x->curlft.use_time = get_seconds();
1336
1337 if (x->km.state != XFRM_STATE_VALID)
1338 return -EINVAL;
1339
1340 if (x->curlft.bytes >= x->lft.hard_byte_limit ||
1341 x->curlft.packets >= x->lft.hard_packet_limit) {
1342 x->km.state = XFRM_STATE_EXPIRED;
1343 mod_timer(&x->timer, jiffies);
1344 return -EINVAL;
1345 }
1346
1347 if (!x->km.dying &&
1348 (x->curlft.bytes >= x->lft.soft_byte_limit ||
1349 x->curlft.packets >= x->lft.soft_packet_limit)) {
1350 x->km.dying = 1;
1351 km_state_expired(x, 0, 0);
1352 }
1353 return 0;
1354 }
1355 EXPORT_SYMBOL(xfrm_state_check_expire);
1356
1357 struct xfrm_state *
1358 xfrm_state_lookup(xfrm_address_t *daddr, __be32 spi, u8 proto,
1359 unsigned short family)
1360 {
1361 struct xfrm_state *x;
1362
1363 spin_lock_bh(&xfrm_state_lock);
1364 x = __xfrm_state_lookup(daddr, spi, proto, family);
1365 spin_unlock_bh(&xfrm_state_lock);
1366 return x;
1367 }
1368 EXPORT_SYMBOL(xfrm_state_lookup);
1369
1370 struct xfrm_state *
1371 xfrm_state_lookup_byaddr(xfrm_address_t *daddr, xfrm_address_t *saddr,
1372 u8 proto, unsigned short family)
1373 {
1374 struct xfrm_state *x;
1375
1376 spin_lock_bh(&xfrm_state_lock);
1377 x = __xfrm_state_lookup_byaddr(daddr, saddr, proto, family);
1378 spin_unlock_bh(&xfrm_state_lock);
1379 return x;
1380 }
1381 EXPORT_SYMBOL(xfrm_state_lookup_byaddr);
1382
1383 struct xfrm_state *
1384 xfrm_find_acq(u8 mode, u32 reqid, u8 proto,
1385 xfrm_address_t *daddr, xfrm_address_t *saddr,
1386 int create, unsigned short family)
1387 {
1388 struct xfrm_state *x;
1389
1390 spin_lock_bh(&xfrm_state_lock);
1391 x = __find_acq_core(family, mode, reqid, proto, daddr, saddr, create);
1392 spin_unlock_bh(&xfrm_state_lock);
1393
1394 return x;
1395 }
1396 EXPORT_SYMBOL(xfrm_find_acq);
1397
1398 #ifdef CONFIG_XFRM_SUB_POLICY
1399 int
1400 xfrm_tmpl_sort(struct xfrm_tmpl **dst, struct xfrm_tmpl **src, int n,
1401 unsigned short family)
1402 {
1403 int err = 0;
1404 struct xfrm_state_afinfo *afinfo = xfrm_state_get_afinfo(family);
1405 if (!afinfo)
1406 return -EAFNOSUPPORT;
1407
1408 spin_lock_bh(&xfrm_state_lock);
1409 if (afinfo->tmpl_sort)
1410 err = afinfo->tmpl_sort(dst, src, n);
1411 spin_unlock_bh(&xfrm_state_lock);
1412 xfrm_state_put_afinfo(afinfo);
1413 return err;
1414 }
1415 EXPORT_SYMBOL(xfrm_tmpl_sort);
1416
1417 int
1418 xfrm_state_sort(struct xfrm_state **dst, struct xfrm_state **src, int n,
1419 unsigned short family)
1420 {
1421 int err = 0;
1422 struct xfrm_state_afinfo *afinfo = xfrm_state_get_afinfo(family);
1423 if (!afinfo)
1424 return -EAFNOSUPPORT;
1425
1426 spin_lock_bh(&xfrm_state_lock);
1427 if (afinfo->state_sort)
1428 err = afinfo->state_sort(dst, src, n);
1429 spin_unlock_bh(&xfrm_state_lock);
1430 xfrm_state_put_afinfo(afinfo);
1431 return err;
1432 }
1433 EXPORT_SYMBOL(xfrm_state_sort);
1434 #endif
1435
1436 /* Silly enough, but I'm lazy to build resolution list */
1437
1438 static struct xfrm_state *__xfrm_find_acq_byseq(u32 seq)
1439 {
1440 int i;
1441
1442 for (i = 0; i <= init_net.xfrm.state_hmask; i++) {
1443 struct hlist_node *entry;
1444 struct xfrm_state *x;
1445
1446 hlist_for_each_entry(x, entry, init_net.xfrm.state_bydst+i, bydst) {
1447 if (x->km.seq == seq &&
1448 x->km.state == XFRM_STATE_ACQ) {
1449 xfrm_state_hold(x);
1450 return x;
1451 }
1452 }
1453 }
1454 return NULL;
1455 }
1456
1457 struct xfrm_state *xfrm_find_acq_byseq(u32 seq)
1458 {
1459 struct xfrm_state *x;
1460
1461 spin_lock_bh(&xfrm_state_lock);
1462 x = __xfrm_find_acq_byseq(seq);
1463 spin_unlock_bh(&xfrm_state_lock);
1464 return x;
1465 }
1466 EXPORT_SYMBOL(xfrm_find_acq_byseq);
1467
1468 u32 xfrm_get_acqseq(void)
1469 {
1470 u32 res;
1471 static u32 acqseq;
1472 static DEFINE_SPINLOCK(acqseq_lock);
1473
1474 spin_lock_bh(&acqseq_lock);
1475 res = (++acqseq ? : ++acqseq);
1476 spin_unlock_bh(&acqseq_lock);
1477 return res;
1478 }
1479 EXPORT_SYMBOL(xfrm_get_acqseq);
1480
1481 int xfrm_alloc_spi(struct xfrm_state *x, u32 low, u32 high)
1482 {
1483 unsigned int h;
1484 struct xfrm_state *x0;
1485 int err = -ENOENT;
1486 __be32 minspi = htonl(low);
1487 __be32 maxspi = htonl(high);
1488
1489 spin_lock_bh(&x->lock);
1490 if (x->km.state == XFRM_STATE_DEAD)
1491 goto unlock;
1492
1493 err = 0;
1494 if (x->id.spi)
1495 goto unlock;
1496
1497 err = -ENOENT;
1498
1499 if (minspi == maxspi) {
1500 x0 = xfrm_state_lookup(&x->id.daddr, minspi, x->id.proto, x->props.family);
1501 if (x0) {
1502 xfrm_state_put(x0);
1503 goto unlock;
1504 }
1505 x->id.spi = minspi;
1506 } else {
1507 u32 spi = 0;
1508 for (h=0; h<high-low+1; h++) {
1509 spi = low + net_random()%(high-low+1);
1510 x0 = xfrm_state_lookup(&x->id.daddr, htonl(spi), x->id.proto, x->props.family);
1511 if (x0 == NULL) {
1512 x->id.spi = htonl(spi);
1513 break;
1514 }
1515 xfrm_state_put(x0);
1516 }
1517 }
1518 if (x->id.spi) {
1519 spin_lock_bh(&xfrm_state_lock);
1520 h = xfrm_spi_hash(&x->id.daddr, x->id.spi, x->id.proto, x->props.family);
1521 hlist_add_head(&x->byspi, init_net.xfrm.state_byspi+h);
1522 spin_unlock_bh(&xfrm_state_lock);
1523
1524 err = 0;
1525 }
1526
1527 unlock:
1528 spin_unlock_bh(&x->lock);
1529
1530 return err;
1531 }
1532 EXPORT_SYMBOL(xfrm_alloc_spi);
1533
1534 int xfrm_state_walk(struct xfrm_state_walk *walk,
1535 int (*func)(struct xfrm_state *, int, void*),
1536 void *data)
1537 {
1538 struct xfrm_state *state;
1539 struct xfrm_state_walk *x;
1540 int err = 0;
1541
1542 if (walk->seq != 0 && list_empty(&walk->all))
1543 return 0;
1544
1545 spin_lock_bh(&xfrm_state_lock);
1546 if (list_empty(&walk->all))
1547 x = list_first_entry(&init_net.xfrm.state_all, struct xfrm_state_walk, all);
1548 else
1549 x = list_entry(&walk->all, struct xfrm_state_walk, all);
1550 list_for_each_entry_from(x, &init_net.xfrm.state_all, all) {
1551 if (x->state == XFRM_STATE_DEAD)
1552 continue;
1553 state = container_of(x, struct xfrm_state, km);
1554 if (!xfrm_id_proto_match(state->id.proto, walk->proto))
1555 continue;
1556 err = func(state, walk->seq, data);
1557 if (err) {
1558 list_move_tail(&walk->all, &x->all);
1559 goto out;
1560 }
1561 walk->seq++;
1562 }
1563 if (walk->seq == 0) {
1564 err = -ENOENT;
1565 goto out;
1566 }
1567 list_del_init(&walk->all);
1568 out:
1569 spin_unlock_bh(&xfrm_state_lock);
1570 return err;
1571 }
1572 EXPORT_SYMBOL(xfrm_state_walk);
1573
1574 void xfrm_state_walk_init(struct xfrm_state_walk *walk, u8 proto)
1575 {
1576 INIT_LIST_HEAD(&walk->all);
1577 walk->proto = proto;
1578 walk->state = XFRM_STATE_DEAD;
1579 walk->seq = 0;
1580 }
1581 EXPORT_SYMBOL(xfrm_state_walk_init);
1582
1583 void xfrm_state_walk_done(struct xfrm_state_walk *walk)
1584 {
1585 if (list_empty(&walk->all))
1586 return;
1587
1588 spin_lock_bh(&xfrm_state_lock);
1589 list_del(&walk->all);
1590 spin_lock_bh(&xfrm_state_lock);
1591 }
1592 EXPORT_SYMBOL(xfrm_state_walk_done);
1593
1594
1595 void xfrm_replay_notify(struct xfrm_state *x, int event)
1596 {
1597 struct km_event c;
1598 /* we send notify messages in case
1599 * 1. we updated on of the sequence numbers, and the seqno difference
1600 * is at least x->replay_maxdiff, in this case we also update the
1601 * timeout of our timer function
1602 * 2. if x->replay_maxage has elapsed since last update,
1603 * and there were changes
1604 *
1605 * The state structure must be locked!
1606 */
1607
1608 switch (event) {
1609 case XFRM_REPLAY_UPDATE:
1610 if (x->replay_maxdiff &&
1611 (x->replay.seq - x->preplay.seq < x->replay_maxdiff) &&
1612 (x->replay.oseq - x->preplay.oseq < x->replay_maxdiff)) {
1613 if (x->xflags & XFRM_TIME_DEFER)
1614 event = XFRM_REPLAY_TIMEOUT;
1615 else
1616 return;
1617 }
1618
1619 break;
1620
1621 case XFRM_REPLAY_TIMEOUT:
1622 if ((x->replay.seq == x->preplay.seq) &&
1623 (x->replay.bitmap == x->preplay.bitmap) &&
1624 (x->replay.oseq == x->preplay.oseq)) {
1625 x->xflags |= XFRM_TIME_DEFER;
1626 return;
1627 }
1628
1629 break;
1630 }
1631
1632 memcpy(&x->preplay, &x->replay, sizeof(struct xfrm_replay_state));
1633 c.event = XFRM_MSG_NEWAE;
1634 c.data.aevent = event;
1635 km_state_notify(x, &c);
1636
1637 if (x->replay_maxage &&
1638 !mod_timer(&x->rtimer, jiffies + x->replay_maxage))
1639 x->xflags &= ~XFRM_TIME_DEFER;
1640 }
1641
1642 static void xfrm_replay_timer_handler(unsigned long data)
1643 {
1644 struct xfrm_state *x = (struct xfrm_state*)data;
1645
1646 spin_lock(&x->lock);
1647
1648 if (x->km.state == XFRM_STATE_VALID) {
1649 if (xfrm_aevent_is_on())
1650 xfrm_replay_notify(x, XFRM_REPLAY_TIMEOUT);
1651 else
1652 x->xflags |= XFRM_TIME_DEFER;
1653 }
1654
1655 spin_unlock(&x->lock);
1656 }
1657
1658 int xfrm_replay_check(struct xfrm_state *x,
1659 struct sk_buff *skb, __be32 net_seq)
1660 {
1661 u32 diff;
1662 u32 seq = ntohl(net_seq);
1663
1664 if (unlikely(seq == 0))
1665 goto err;
1666
1667 if (likely(seq > x->replay.seq))
1668 return 0;
1669
1670 diff = x->replay.seq - seq;
1671 if (diff >= min_t(unsigned int, x->props.replay_window,
1672 sizeof(x->replay.bitmap) * 8)) {
1673 x->stats.replay_window++;
1674 goto err;
1675 }
1676
1677 if (x->replay.bitmap & (1U << diff)) {
1678 x->stats.replay++;
1679 goto err;
1680 }
1681 return 0;
1682
1683 err:
1684 xfrm_audit_state_replay(x, skb, net_seq);
1685 return -EINVAL;
1686 }
1687
1688 void xfrm_replay_advance(struct xfrm_state *x, __be32 net_seq)
1689 {
1690 u32 diff;
1691 u32 seq = ntohl(net_seq);
1692
1693 if (seq > x->replay.seq) {
1694 diff = seq - x->replay.seq;
1695 if (diff < x->props.replay_window)
1696 x->replay.bitmap = ((x->replay.bitmap) << diff) | 1;
1697 else
1698 x->replay.bitmap = 1;
1699 x->replay.seq = seq;
1700 } else {
1701 diff = x->replay.seq - seq;
1702 x->replay.bitmap |= (1U << diff);
1703 }
1704
1705 if (xfrm_aevent_is_on())
1706 xfrm_replay_notify(x, XFRM_REPLAY_UPDATE);
1707 }
1708
1709 static LIST_HEAD(xfrm_km_list);
1710 static DEFINE_RWLOCK(xfrm_km_lock);
1711
1712 void km_policy_notify(struct xfrm_policy *xp, int dir, struct km_event *c)
1713 {
1714 struct xfrm_mgr *km;
1715
1716 read_lock(&xfrm_km_lock);
1717 list_for_each_entry(km, &xfrm_km_list, list)
1718 if (km->notify_policy)
1719 km->notify_policy(xp, dir, c);
1720 read_unlock(&xfrm_km_lock);
1721 }
1722
1723 void km_state_notify(struct xfrm_state *x, struct km_event *c)
1724 {
1725 struct xfrm_mgr *km;
1726 read_lock(&xfrm_km_lock);
1727 list_for_each_entry(km, &xfrm_km_list, list)
1728 if (km->notify)
1729 km->notify(x, c);
1730 read_unlock(&xfrm_km_lock);
1731 }
1732
1733 EXPORT_SYMBOL(km_policy_notify);
1734 EXPORT_SYMBOL(km_state_notify);
1735
1736 void km_state_expired(struct xfrm_state *x, int hard, u32 pid)
1737 {
1738 struct km_event c;
1739
1740 c.data.hard = hard;
1741 c.pid = pid;
1742 c.event = XFRM_MSG_EXPIRE;
1743 km_state_notify(x, &c);
1744
1745 if (hard)
1746 wake_up(&km_waitq);
1747 }
1748
1749 EXPORT_SYMBOL(km_state_expired);
1750 /*
1751 * We send to all registered managers regardless of failure
1752 * We are happy with one success
1753 */
1754 int km_query(struct xfrm_state *x, struct xfrm_tmpl *t, struct xfrm_policy *pol)
1755 {
1756 int err = -EINVAL, acqret;
1757 struct xfrm_mgr *km;
1758
1759 read_lock(&xfrm_km_lock);
1760 list_for_each_entry(km, &xfrm_km_list, list) {
1761 acqret = km->acquire(x, t, pol, XFRM_POLICY_OUT);
1762 if (!acqret)
1763 err = acqret;
1764 }
1765 read_unlock(&xfrm_km_lock);
1766 return err;
1767 }
1768 EXPORT_SYMBOL(km_query);
1769
1770 int km_new_mapping(struct xfrm_state *x, xfrm_address_t *ipaddr, __be16 sport)
1771 {
1772 int err = -EINVAL;
1773 struct xfrm_mgr *km;
1774
1775 read_lock(&xfrm_km_lock);
1776 list_for_each_entry(km, &xfrm_km_list, list) {
1777 if (km->new_mapping)
1778 err = km->new_mapping(x, ipaddr, sport);
1779 if (!err)
1780 break;
1781 }
1782 read_unlock(&xfrm_km_lock);
1783 return err;
1784 }
1785 EXPORT_SYMBOL(km_new_mapping);
1786
1787 void km_policy_expired(struct xfrm_policy *pol, int dir, int hard, u32 pid)
1788 {
1789 struct km_event c;
1790
1791 c.data.hard = hard;
1792 c.pid = pid;
1793 c.event = XFRM_MSG_POLEXPIRE;
1794 km_policy_notify(pol, dir, &c);
1795
1796 if (hard)
1797 wake_up(&km_waitq);
1798 }
1799 EXPORT_SYMBOL(km_policy_expired);
1800
1801 #ifdef CONFIG_XFRM_MIGRATE
1802 int km_migrate(struct xfrm_selector *sel, u8 dir, u8 type,
1803 struct xfrm_migrate *m, int num_migrate,
1804 struct xfrm_kmaddress *k)
1805 {
1806 int err = -EINVAL;
1807 int ret;
1808 struct xfrm_mgr *km;
1809
1810 read_lock(&xfrm_km_lock);
1811 list_for_each_entry(km, &xfrm_km_list, list) {
1812 if (km->migrate) {
1813 ret = km->migrate(sel, dir, type, m, num_migrate, k);
1814 if (!ret)
1815 err = ret;
1816 }
1817 }
1818 read_unlock(&xfrm_km_lock);
1819 return err;
1820 }
1821 EXPORT_SYMBOL(km_migrate);
1822 #endif
1823
1824 int km_report(u8 proto, struct xfrm_selector *sel, xfrm_address_t *addr)
1825 {
1826 int err = -EINVAL;
1827 int ret;
1828 struct xfrm_mgr *km;
1829
1830 read_lock(&xfrm_km_lock);
1831 list_for_each_entry(km, &xfrm_km_list, list) {
1832 if (km->report) {
1833 ret = km->report(proto, sel, addr);
1834 if (!ret)
1835 err = ret;
1836 }
1837 }
1838 read_unlock(&xfrm_km_lock);
1839 return err;
1840 }
1841 EXPORT_SYMBOL(km_report);
1842
1843 int xfrm_user_policy(struct sock *sk, int optname, u8 __user *optval, int optlen)
1844 {
1845 int err;
1846 u8 *data;
1847 struct xfrm_mgr *km;
1848 struct xfrm_policy *pol = NULL;
1849
1850 if (optlen <= 0 || optlen > PAGE_SIZE)
1851 return -EMSGSIZE;
1852
1853 data = kmalloc(optlen, GFP_KERNEL);
1854 if (!data)
1855 return -ENOMEM;
1856
1857 err = -EFAULT;
1858 if (copy_from_user(data, optval, optlen))
1859 goto out;
1860
1861 err = -EINVAL;
1862 read_lock(&xfrm_km_lock);
1863 list_for_each_entry(km, &xfrm_km_list, list) {
1864 pol = km->compile_policy(sk, optname, data,
1865 optlen, &err);
1866 if (err >= 0)
1867 break;
1868 }
1869 read_unlock(&xfrm_km_lock);
1870
1871 if (err >= 0) {
1872 xfrm_sk_policy_insert(sk, err, pol);
1873 xfrm_pol_put(pol);
1874 err = 0;
1875 }
1876
1877 out:
1878 kfree(data);
1879 return err;
1880 }
1881 EXPORT_SYMBOL(xfrm_user_policy);
1882
1883 int xfrm_register_km(struct xfrm_mgr *km)
1884 {
1885 write_lock_bh(&xfrm_km_lock);
1886 list_add_tail(&km->list, &xfrm_km_list);
1887 write_unlock_bh(&xfrm_km_lock);
1888 return 0;
1889 }
1890 EXPORT_SYMBOL(xfrm_register_km);
1891
1892 int xfrm_unregister_km(struct xfrm_mgr *km)
1893 {
1894 write_lock_bh(&xfrm_km_lock);
1895 list_del(&km->list);
1896 write_unlock_bh(&xfrm_km_lock);
1897 return 0;
1898 }
1899 EXPORT_SYMBOL(xfrm_unregister_km);
1900
1901 int xfrm_state_register_afinfo(struct xfrm_state_afinfo *afinfo)
1902 {
1903 int err = 0;
1904 if (unlikely(afinfo == NULL))
1905 return -EINVAL;
1906 if (unlikely(afinfo->family >= NPROTO))
1907 return -EAFNOSUPPORT;
1908 write_lock_bh(&xfrm_state_afinfo_lock);
1909 if (unlikely(xfrm_state_afinfo[afinfo->family] != NULL))
1910 err = -ENOBUFS;
1911 else
1912 xfrm_state_afinfo[afinfo->family] = afinfo;
1913 write_unlock_bh(&xfrm_state_afinfo_lock);
1914 return err;
1915 }
1916 EXPORT_SYMBOL(xfrm_state_register_afinfo);
1917
1918 int xfrm_state_unregister_afinfo(struct xfrm_state_afinfo *afinfo)
1919 {
1920 int err = 0;
1921 if (unlikely(afinfo == NULL))
1922 return -EINVAL;
1923 if (unlikely(afinfo->family >= NPROTO))
1924 return -EAFNOSUPPORT;
1925 write_lock_bh(&xfrm_state_afinfo_lock);
1926 if (likely(xfrm_state_afinfo[afinfo->family] != NULL)) {
1927 if (unlikely(xfrm_state_afinfo[afinfo->family] != afinfo))
1928 err = -EINVAL;
1929 else
1930 xfrm_state_afinfo[afinfo->family] = NULL;
1931 }
1932 write_unlock_bh(&xfrm_state_afinfo_lock);
1933 return err;
1934 }
1935 EXPORT_SYMBOL(xfrm_state_unregister_afinfo);
1936
1937 static struct xfrm_state_afinfo *xfrm_state_get_afinfo(unsigned int family)
1938 {
1939 struct xfrm_state_afinfo *afinfo;
1940 if (unlikely(family >= NPROTO))
1941 return NULL;
1942 read_lock(&xfrm_state_afinfo_lock);
1943 afinfo = xfrm_state_afinfo[family];
1944 if (unlikely(!afinfo))
1945 read_unlock(&xfrm_state_afinfo_lock);
1946 return afinfo;
1947 }
1948
1949 static void xfrm_state_put_afinfo(struct xfrm_state_afinfo *afinfo)
1950 __releases(xfrm_state_afinfo_lock)
1951 {
1952 read_unlock(&xfrm_state_afinfo_lock);
1953 }
1954
1955 /* Temporarily located here until net/xfrm/xfrm_tunnel.c is created */
1956 void xfrm_state_delete_tunnel(struct xfrm_state *x)
1957 {
1958 if (x->tunnel) {
1959 struct xfrm_state *t = x->tunnel;
1960
1961 if (atomic_read(&t->tunnel_users) == 2)
1962 xfrm_state_delete(t);
1963 atomic_dec(&t->tunnel_users);
1964 xfrm_state_put(t);
1965 x->tunnel = NULL;
1966 }
1967 }
1968 EXPORT_SYMBOL(xfrm_state_delete_tunnel);
1969
1970 int xfrm_state_mtu(struct xfrm_state *x, int mtu)
1971 {
1972 int res;
1973
1974 spin_lock_bh(&x->lock);
1975 if (x->km.state == XFRM_STATE_VALID &&
1976 x->type && x->type->get_mtu)
1977 res = x->type->get_mtu(x, mtu);
1978 else
1979 res = mtu - x->props.header_len;
1980 spin_unlock_bh(&x->lock);
1981 return res;
1982 }
1983
1984 int xfrm_init_state(struct xfrm_state *x)
1985 {
1986 struct xfrm_state_afinfo *afinfo;
1987 struct xfrm_mode *inner_mode;
1988 int family = x->props.family;
1989 int err;
1990
1991 err = -EAFNOSUPPORT;
1992 afinfo = xfrm_state_get_afinfo(family);
1993 if (!afinfo)
1994 goto error;
1995
1996 err = 0;
1997 if (afinfo->init_flags)
1998 err = afinfo->init_flags(x);
1999
2000 xfrm_state_put_afinfo(afinfo);
2001
2002 if (err)
2003 goto error;
2004
2005 err = -EPROTONOSUPPORT;
2006
2007 if (x->sel.family != AF_UNSPEC) {
2008 inner_mode = xfrm_get_mode(x->props.mode, x->sel.family);
2009 if (inner_mode == NULL)
2010 goto error;
2011
2012 if (!(inner_mode->flags & XFRM_MODE_FLAG_TUNNEL) &&
2013 family != x->sel.family) {
2014 xfrm_put_mode(inner_mode);
2015 goto error;
2016 }
2017
2018 x->inner_mode = inner_mode;
2019 } else {
2020 struct xfrm_mode *inner_mode_iaf;
2021
2022 inner_mode = xfrm_get_mode(x->props.mode, AF_INET);
2023 if (inner_mode == NULL)
2024 goto error;
2025
2026 if (!(inner_mode->flags & XFRM_MODE_FLAG_TUNNEL)) {
2027 xfrm_put_mode(inner_mode);
2028 goto error;
2029 }
2030
2031 inner_mode_iaf = xfrm_get_mode(x->props.mode, AF_INET6);
2032 if (inner_mode_iaf == NULL)
2033 goto error;
2034
2035 if (!(inner_mode_iaf->flags & XFRM_MODE_FLAG_TUNNEL)) {
2036 xfrm_put_mode(inner_mode_iaf);
2037 goto error;
2038 }
2039
2040 if (x->props.family == AF_INET) {
2041 x->inner_mode = inner_mode;
2042 x->inner_mode_iaf = inner_mode_iaf;
2043 } else {
2044 x->inner_mode = inner_mode_iaf;
2045 x->inner_mode_iaf = inner_mode;
2046 }
2047 }
2048
2049 x->type = xfrm_get_type(x->id.proto, family);
2050 if (x->type == NULL)
2051 goto error;
2052
2053 err = x->type->init_state(x);
2054 if (err)
2055 goto error;
2056
2057 x->outer_mode = xfrm_get_mode(x->props.mode, family);
2058 if (x->outer_mode == NULL)
2059 goto error;
2060
2061 x->km.state = XFRM_STATE_VALID;
2062
2063 error:
2064 return err;
2065 }
2066
2067 EXPORT_SYMBOL(xfrm_init_state);
2068
2069 int __net_init xfrm_state_init(struct net *net)
2070 {
2071 unsigned int sz;
2072
2073 INIT_LIST_HEAD(&net->xfrm.state_all);
2074
2075 sz = sizeof(struct hlist_head) * 8;
2076
2077 net->xfrm.state_bydst = xfrm_hash_alloc(sz);
2078 if (!net->xfrm.state_bydst)
2079 goto out_bydst;
2080 net->xfrm.state_bysrc = xfrm_hash_alloc(sz);
2081 if (!net->xfrm.state_bysrc)
2082 goto out_bysrc;
2083 net->xfrm.state_byspi = xfrm_hash_alloc(sz);
2084 if (!net->xfrm.state_byspi)
2085 goto out_byspi;
2086 net->xfrm.state_hmask = ((sz / sizeof(struct hlist_head)) - 1);
2087
2088 net->xfrm.state_num = 0;
2089 INIT_WORK(&net->xfrm.state_hash_work, xfrm_hash_resize);
2090 INIT_HLIST_HEAD(&net->xfrm.state_gc_list);
2091 INIT_WORK(&net->xfrm.state_gc_work, xfrm_state_gc_task);
2092 return 0;
2093
2094 out_byspi:
2095 xfrm_hash_free(net->xfrm.state_bysrc, sz);
2096 out_bysrc:
2097 xfrm_hash_free(net->xfrm.state_bydst, sz);
2098 out_bydst:
2099 return -ENOMEM;
2100 }
2101
2102 void xfrm_state_fini(struct net *net)
2103 {
2104 unsigned int sz;
2105
2106 WARN_ON(!list_empty(&net->xfrm.state_all));
2107
2108 sz = (net->xfrm.state_hmask + 1) * sizeof(struct hlist_head);
2109 WARN_ON(!hlist_empty(net->xfrm.state_byspi));
2110 xfrm_hash_free(net->xfrm.state_byspi, sz);
2111 WARN_ON(!hlist_empty(net->xfrm.state_bysrc));
2112 xfrm_hash_free(net->xfrm.state_bysrc, sz);
2113 WARN_ON(!hlist_empty(net->xfrm.state_bydst));
2114 xfrm_hash_free(net->xfrm.state_bydst, sz);
2115 }
2116
2117 #ifdef CONFIG_AUDITSYSCALL
2118 static void xfrm_audit_helper_sainfo(struct xfrm_state *x,
2119 struct audit_buffer *audit_buf)
2120 {
2121 struct xfrm_sec_ctx *ctx = x->security;
2122 u32 spi = ntohl(x->id.spi);
2123
2124 if (ctx)
2125 audit_log_format(audit_buf, " sec_alg=%u sec_doi=%u sec_obj=%s",
2126 ctx->ctx_alg, ctx->ctx_doi, ctx->ctx_str);
2127
2128 switch(x->props.family) {
2129 case AF_INET:
2130 audit_log_format(audit_buf, " src=%pI4 dst=%pI4",
2131 &x->props.saddr.a4, &x->id.daddr.a4);
2132 break;
2133 case AF_INET6:
2134 audit_log_format(audit_buf, " src=%pI6 dst=%pI6",
2135 x->props.saddr.a6, x->id.daddr.a6);
2136 break;
2137 }
2138
2139 audit_log_format(audit_buf, " spi=%u(0x%x)", spi, spi);
2140 }
2141
2142 static void xfrm_audit_helper_pktinfo(struct sk_buff *skb, u16 family,
2143 struct audit_buffer *audit_buf)
2144 {
2145 struct iphdr *iph4;
2146 struct ipv6hdr *iph6;
2147
2148 switch (family) {
2149 case AF_INET:
2150 iph4 = ip_hdr(skb);
2151 audit_log_format(audit_buf, " src=%pI4 dst=%pI4",
2152 &iph4->saddr, &iph4->daddr);
2153 break;
2154 case AF_INET6:
2155 iph6 = ipv6_hdr(skb);
2156 audit_log_format(audit_buf,
2157 " src=%pI6 dst=%pI6 flowlbl=0x%x%02x%02x",
2158 &iph6->saddr,&iph6->daddr,
2159 iph6->flow_lbl[0] & 0x0f,
2160 iph6->flow_lbl[1],
2161 iph6->flow_lbl[2]);
2162 break;
2163 }
2164 }
2165
2166 void xfrm_audit_state_add(struct xfrm_state *x, int result,
2167 uid_t auid, u32 sessionid, u32 secid)
2168 {
2169 struct audit_buffer *audit_buf;
2170
2171 audit_buf = xfrm_audit_start("SAD-add");
2172 if (audit_buf == NULL)
2173 return;
2174 xfrm_audit_helper_usrinfo(auid, sessionid, secid, audit_buf);
2175 xfrm_audit_helper_sainfo(x, audit_buf);
2176 audit_log_format(audit_buf, " res=%u", result);
2177 audit_log_end(audit_buf);
2178 }
2179 EXPORT_SYMBOL_GPL(xfrm_audit_state_add);
2180
2181 void xfrm_audit_state_delete(struct xfrm_state *x, int result,
2182 uid_t auid, u32 sessionid, u32 secid)
2183 {
2184 struct audit_buffer *audit_buf;
2185
2186 audit_buf = xfrm_audit_start("SAD-delete");
2187 if (audit_buf == NULL)
2188 return;
2189 xfrm_audit_helper_usrinfo(auid, sessionid, secid, audit_buf);
2190 xfrm_audit_helper_sainfo(x, audit_buf);
2191 audit_log_format(audit_buf, " res=%u", result);
2192 audit_log_end(audit_buf);
2193 }
2194 EXPORT_SYMBOL_GPL(xfrm_audit_state_delete);
2195
2196 void xfrm_audit_state_replay_overflow(struct xfrm_state *x,
2197 struct sk_buff *skb)
2198 {
2199 struct audit_buffer *audit_buf;
2200 u32 spi;
2201
2202 audit_buf = xfrm_audit_start("SA-replay-overflow");
2203 if (audit_buf == NULL)
2204 return;
2205 xfrm_audit_helper_pktinfo(skb, x->props.family, audit_buf);
2206 /* don't record the sequence number because it's inherent in this kind
2207 * of audit message */
2208 spi = ntohl(x->id.spi);
2209 audit_log_format(audit_buf, " spi=%u(0x%x)", spi, spi);
2210 audit_log_end(audit_buf);
2211 }
2212 EXPORT_SYMBOL_GPL(xfrm_audit_state_replay_overflow);
2213
2214 static void xfrm_audit_state_replay(struct xfrm_state *x,
2215 struct sk_buff *skb, __be32 net_seq)
2216 {
2217 struct audit_buffer *audit_buf;
2218 u32 spi;
2219
2220 audit_buf = xfrm_audit_start("SA-replayed-pkt");
2221 if (audit_buf == NULL)
2222 return;
2223 xfrm_audit_helper_pktinfo(skb, x->props.family, audit_buf);
2224 spi = ntohl(x->id.spi);
2225 audit_log_format(audit_buf, " spi=%u(0x%x) seqno=%u",
2226 spi, spi, ntohl(net_seq));
2227 audit_log_end(audit_buf);
2228 }
2229
2230 void xfrm_audit_state_notfound_simple(struct sk_buff *skb, u16 family)
2231 {
2232 struct audit_buffer *audit_buf;
2233
2234 audit_buf = xfrm_audit_start("SA-notfound");
2235 if (audit_buf == NULL)
2236 return;
2237 xfrm_audit_helper_pktinfo(skb, family, audit_buf);
2238 audit_log_end(audit_buf);
2239 }
2240 EXPORT_SYMBOL_GPL(xfrm_audit_state_notfound_simple);
2241
2242 void xfrm_audit_state_notfound(struct sk_buff *skb, u16 family,
2243 __be32 net_spi, __be32 net_seq)
2244 {
2245 struct audit_buffer *audit_buf;
2246 u32 spi;
2247
2248 audit_buf = xfrm_audit_start("SA-notfound");
2249 if (audit_buf == NULL)
2250 return;
2251 xfrm_audit_helper_pktinfo(skb, family, audit_buf);
2252 spi = ntohl(net_spi);
2253 audit_log_format(audit_buf, " spi=%u(0x%x) seqno=%u",
2254 spi, spi, ntohl(net_seq));
2255 audit_log_end(audit_buf);
2256 }
2257 EXPORT_SYMBOL_GPL(xfrm_audit_state_notfound);
2258
2259 void xfrm_audit_state_icvfail(struct xfrm_state *x,
2260 struct sk_buff *skb, u8 proto)
2261 {
2262 struct audit_buffer *audit_buf;
2263 __be32 net_spi;
2264 __be32 net_seq;
2265
2266 audit_buf = xfrm_audit_start("SA-icv-failure");
2267 if (audit_buf == NULL)
2268 return;
2269 xfrm_audit_helper_pktinfo(skb, x->props.family, audit_buf);
2270 if (xfrm_parse_spi(skb, proto, &net_spi, &net_seq) == 0) {
2271 u32 spi = ntohl(net_spi);
2272 audit_log_format(audit_buf, " spi=%u(0x%x) seqno=%u",
2273 spi, spi, ntohl(net_seq));
2274 }
2275 audit_log_end(audit_buf);
2276 }
2277 EXPORT_SYMBOL_GPL(xfrm_audit_state_icvfail);
2278 #endif /* CONFIG_AUDITSYSCALL */
This page took 0.075234 seconds and 6 git commands to generate.