netns xfrm: per-netns km_waitq
[deliverable/linux.git] / net / xfrm / xfrm_state.c
1 /*
2 * xfrm_state.c
3 *
4 * Changes:
5 * Mitsuru KANDA @USAGI
6 * Kazunori MIYAZAWA @USAGI
7 * Kunihiro Ishiguro <kunihiro@ipinfusion.com>
8 * IPv6 support
9 * YOSHIFUJI Hideaki @USAGI
10 * Split up af-specific functions
11 * Derek Atkins <derek@ihtfp.com>
12 * Add UDP Encapsulation
13 *
14 */
15
16 #include <linux/workqueue.h>
17 #include <net/xfrm.h>
18 #include <linux/pfkeyv2.h>
19 #include <linux/ipsec.h>
20 #include <linux/module.h>
21 #include <linux/cache.h>
22 #include <linux/audit.h>
23 #include <asm/uaccess.h>
24
25 #include "xfrm_hash.h"
26
27 struct sock *xfrm_nl;
28 EXPORT_SYMBOL(xfrm_nl);
29
30 u32 sysctl_xfrm_aevent_etime __read_mostly = XFRM_AE_ETIME;
31 EXPORT_SYMBOL(sysctl_xfrm_aevent_etime);
32
33 u32 sysctl_xfrm_aevent_rseqth __read_mostly = XFRM_AE_SEQT_SIZE;
34 EXPORT_SYMBOL(sysctl_xfrm_aevent_rseqth);
35
36 u32 sysctl_xfrm_acq_expires __read_mostly = 30;
37
38 /* Each xfrm_state may be linked to two tables:
39
40 1. Hash table by (spi,daddr,ah/esp) to find SA by SPI. (input,ctl)
41 2. Hash table by (daddr,family,reqid) to find what SAs exist for given
42 destination/tunnel endpoint. (output)
43 */
44
45 static DEFINE_SPINLOCK(xfrm_state_lock);
46
47 static unsigned int xfrm_state_hashmax __read_mostly = 1 * 1024 * 1024;
48 static unsigned int xfrm_state_genid;
49
50 static struct xfrm_state_afinfo *xfrm_state_get_afinfo(unsigned int family);
51 static void xfrm_state_put_afinfo(struct xfrm_state_afinfo *afinfo);
52
53 #ifdef CONFIG_AUDITSYSCALL
54 static void xfrm_audit_state_replay(struct xfrm_state *x,
55 struct sk_buff *skb, __be32 net_seq);
56 #else
57 #define xfrm_audit_state_replay(x, s, sq) do { ; } while (0)
58 #endif /* CONFIG_AUDITSYSCALL */
59
60 static inline unsigned int xfrm_dst_hash(xfrm_address_t *daddr,
61 xfrm_address_t *saddr,
62 u32 reqid,
63 unsigned short family)
64 {
65 return __xfrm_dst_hash(daddr, saddr, reqid, family, init_net.xfrm.state_hmask);
66 }
67
68 static inline unsigned int xfrm_src_hash(xfrm_address_t *daddr,
69 xfrm_address_t *saddr,
70 unsigned short family)
71 {
72 return __xfrm_src_hash(daddr, saddr, family, init_net.xfrm.state_hmask);
73 }
74
75 static inline unsigned int
76 xfrm_spi_hash(xfrm_address_t *daddr, __be32 spi, u8 proto, unsigned short family)
77 {
78 return __xfrm_spi_hash(daddr, spi, proto, family, init_net.xfrm.state_hmask);
79 }
80
81 static void xfrm_hash_transfer(struct hlist_head *list,
82 struct hlist_head *ndsttable,
83 struct hlist_head *nsrctable,
84 struct hlist_head *nspitable,
85 unsigned int nhashmask)
86 {
87 struct hlist_node *entry, *tmp;
88 struct xfrm_state *x;
89
90 hlist_for_each_entry_safe(x, entry, tmp, list, bydst) {
91 unsigned int h;
92
93 h = __xfrm_dst_hash(&x->id.daddr, &x->props.saddr,
94 x->props.reqid, x->props.family,
95 nhashmask);
96 hlist_add_head(&x->bydst, ndsttable+h);
97
98 h = __xfrm_src_hash(&x->id.daddr, &x->props.saddr,
99 x->props.family,
100 nhashmask);
101 hlist_add_head(&x->bysrc, nsrctable+h);
102
103 if (x->id.spi) {
104 h = __xfrm_spi_hash(&x->id.daddr, x->id.spi,
105 x->id.proto, x->props.family,
106 nhashmask);
107 hlist_add_head(&x->byspi, nspitable+h);
108 }
109 }
110 }
111
112 static unsigned long xfrm_hash_new_size(unsigned int state_hmask)
113 {
114 return ((state_hmask + 1) << 1) * sizeof(struct hlist_head);
115 }
116
117 static DEFINE_MUTEX(hash_resize_mutex);
118
119 static void xfrm_hash_resize(struct work_struct *work)
120 {
121 struct net *net = container_of(work, struct net, xfrm.state_hash_work);
122 struct hlist_head *ndst, *nsrc, *nspi, *odst, *osrc, *ospi;
123 unsigned long nsize, osize;
124 unsigned int nhashmask, ohashmask;
125 int i;
126
127 mutex_lock(&hash_resize_mutex);
128
129 nsize = xfrm_hash_new_size(net->xfrm.state_hmask);
130 ndst = xfrm_hash_alloc(nsize);
131 if (!ndst)
132 goto out_unlock;
133 nsrc = xfrm_hash_alloc(nsize);
134 if (!nsrc) {
135 xfrm_hash_free(ndst, nsize);
136 goto out_unlock;
137 }
138 nspi = xfrm_hash_alloc(nsize);
139 if (!nspi) {
140 xfrm_hash_free(ndst, nsize);
141 xfrm_hash_free(nsrc, nsize);
142 goto out_unlock;
143 }
144
145 spin_lock_bh(&xfrm_state_lock);
146
147 nhashmask = (nsize / sizeof(struct hlist_head)) - 1U;
148 for (i = net->xfrm.state_hmask; i >= 0; i--)
149 xfrm_hash_transfer(net->xfrm.state_bydst+i, ndst, nsrc, nspi,
150 nhashmask);
151
152 odst = net->xfrm.state_bydst;
153 osrc = net->xfrm.state_bysrc;
154 ospi = net->xfrm.state_byspi;
155 ohashmask = net->xfrm.state_hmask;
156
157 net->xfrm.state_bydst = ndst;
158 net->xfrm.state_bysrc = nsrc;
159 net->xfrm.state_byspi = nspi;
160 net->xfrm.state_hmask = nhashmask;
161
162 spin_unlock_bh(&xfrm_state_lock);
163
164 osize = (ohashmask + 1) * sizeof(struct hlist_head);
165 xfrm_hash_free(odst, osize);
166 xfrm_hash_free(osrc, osize);
167 xfrm_hash_free(ospi, osize);
168
169 out_unlock:
170 mutex_unlock(&hash_resize_mutex);
171 }
172
173 static DEFINE_RWLOCK(xfrm_state_afinfo_lock);
174 static struct xfrm_state_afinfo *xfrm_state_afinfo[NPROTO];
175
176 static DEFINE_SPINLOCK(xfrm_state_gc_lock);
177
178 int __xfrm_state_delete(struct xfrm_state *x);
179
180 int km_query(struct xfrm_state *x, struct xfrm_tmpl *t, struct xfrm_policy *pol);
181 void km_state_expired(struct xfrm_state *x, int hard, u32 pid);
182
183 static struct xfrm_state_afinfo *xfrm_state_lock_afinfo(unsigned int family)
184 {
185 struct xfrm_state_afinfo *afinfo;
186 if (unlikely(family >= NPROTO))
187 return NULL;
188 write_lock_bh(&xfrm_state_afinfo_lock);
189 afinfo = xfrm_state_afinfo[family];
190 if (unlikely(!afinfo))
191 write_unlock_bh(&xfrm_state_afinfo_lock);
192 return afinfo;
193 }
194
195 static void xfrm_state_unlock_afinfo(struct xfrm_state_afinfo *afinfo)
196 __releases(xfrm_state_afinfo_lock)
197 {
198 write_unlock_bh(&xfrm_state_afinfo_lock);
199 }
200
201 int xfrm_register_type(const struct xfrm_type *type, unsigned short family)
202 {
203 struct xfrm_state_afinfo *afinfo = xfrm_state_lock_afinfo(family);
204 const struct xfrm_type **typemap;
205 int err = 0;
206
207 if (unlikely(afinfo == NULL))
208 return -EAFNOSUPPORT;
209 typemap = afinfo->type_map;
210
211 if (likely(typemap[type->proto] == NULL))
212 typemap[type->proto] = type;
213 else
214 err = -EEXIST;
215 xfrm_state_unlock_afinfo(afinfo);
216 return err;
217 }
218 EXPORT_SYMBOL(xfrm_register_type);
219
220 int xfrm_unregister_type(const struct xfrm_type *type, unsigned short family)
221 {
222 struct xfrm_state_afinfo *afinfo = xfrm_state_lock_afinfo(family);
223 const struct xfrm_type **typemap;
224 int err = 0;
225
226 if (unlikely(afinfo == NULL))
227 return -EAFNOSUPPORT;
228 typemap = afinfo->type_map;
229
230 if (unlikely(typemap[type->proto] != type))
231 err = -ENOENT;
232 else
233 typemap[type->proto] = NULL;
234 xfrm_state_unlock_afinfo(afinfo);
235 return err;
236 }
237 EXPORT_SYMBOL(xfrm_unregister_type);
238
239 static const struct xfrm_type *xfrm_get_type(u8 proto, unsigned short family)
240 {
241 struct xfrm_state_afinfo *afinfo;
242 const struct xfrm_type **typemap;
243 const struct xfrm_type *type;
244 int modload_attempted = 0;
245
246 retry:
247 afinfo = xfrm_state_get_afinfo(family);
248 if (unlikely(afinfo == NULL))
249 return NULL;
250 typemap = afinfo->type_map;
251
252 type = typemap[proto];
253 if (unlikely(type && !try_module_get(type->owner)))
254 type = NULL;
255 if (!type && !modload_attempted) {
256 xfrm_state_put_afinfo(afinfo);
257 request_module("xfrm-type-%d-%d", family, proto);
258 modload_attempted = 1;
259 goto retry;
260 }
261
262 xfrm_state_put_afinfo(afinfo);
263 return type;
264 }
265
266 static void xfrm_put_type(const struct xfrm_type *type)
267 {
268 module_put(type->owner);
269 }
270
271 int xfrm_register_mode(struct xfrm_mode *mode, int family)
272 {
273 struct xfrm_state_afinfo *afinfo;
274 struct xfrm_mode **modemap;
275 int err;
276
277 if (unlikely(mode->encap >= XFRM_MODE_MAX))
278 return -EINVAL;
279
280 afinfo = xfrm_state_lock_afinfo(family);
281 if (unlikely(afinfo == NULL))
282 return -EAFNOSUPPORT;
283
284 err = -EEXIST;
285 modemap = afinfo->mode_map;
286 if (modemap[mode->encap])
287 goto out;
288
289 err = -ENOENT;
290 if (!try_module_get(afinfo->owner))
291 goto out;
292
293 mode->afinfo = afinfo;
294 modemap[mode->encap] = mode;
295 err = 0;
296
297 out:
298 xfrm_state_unlock_afinfo(afinfo);
299 return err;
300 }
301 EXPORT_SYMBOL(xfrm_register_mode);
302
303 int xfrm_unregister_mode(struct xfrm_mode *mode, int family)
304 {
305 struct xfrm_state_afinfo *afinfo;
306 struct xfrm_mode **modemap;
307 int err;
308
309 if (unlikely(mode->encap >= XFRM_MODE_MAX))
310 return -EINVAL;
311
312 afinfo = xfrm_state_lock_afinfo(family);
313 if (unlikely(afinfo == NULL))
314 return -EAFNOSUPPORT;
315
316 err = -ENOENT;
317 modemap = afinfo->mode_map;
318 if (likely(modemap[mode->encap] == mode)) {
319 modemap[mode->encap] = NULL;
320 module_put(mode->afinfo->owner);
321 err = 0;
322 }
323
324 xfrm_state_unlock_afinfo(afinfo);
325 return err;
326 }
327 EXPORT_SYMBOL(xfrm_unregister_mode);
328
329 static struct xfrm_mode *xfrm_get_mode(unsigned int encap, int family)
330 {
331 struct xfrm_state_afinfo *afinfo;
332 struct xfrm_mode *mode;
333 int modload_attempted = 0;
334
335 if (unlikely(encap >= XFRM_MODE_MAX))
336 return NULL;
337
338 retry:
339 afinfo = xfrm_state_get_afinfo(family);
340 if (unlikely(afinfo == NULL))
341 return NULL;
342
343 mode = afinfo->mode_map[encap];
344 if (unlikely(mode && !try_module_get(mode->owner)))
345 mode = NULL;
346 if (!mode && !modload_attempted) {
347 xfrm_state_put_afinfo(afinfo);
348 request_module("xfrm-mode-%d-%d", family, encap);
349 modload_attempted = 1;
350 goto retry;
351 }
352
353 xfrm_state_put_afinfo(afinfo);
354 return mode;
355 }
356
357 static void xfrm_put_mode(struct xfrm_mode *mode)
358 {
359 module_put(mode->owner);
360 }
361
362 static void xfrm_state_gc_destroy(struct xfrm_state *x)
363 {
364 del_timer_sync(&x->timer);
365 del_timer_sync(&x->rtimer);
366 kfree(x->aalg);
367 kfree(x->ealg);
368 kfree(x->calg);
369 kfree(x->encap);
370 kfree(x->coaddr);
371 if (x->inner_mode)
372 xfrm_put_mode(x->inner_mode);
373 if (x->inner_mode_iaf)
374 xfrm_put_mode(x->inner_mode_iaf);
375 if (x->outer_mode)
376 xfrm_put_mode(x->outer_mode);
377 if (x->type) {
378 x->type->destructor(x);
379 xfrm_put_type(x->type);
380 }
381 security_xfrm_state_free(x);
382 kfree(x);
383 }
384
385 static void xfrm_state_gc_task(struct work_struct *work)
386 {
387 struct net *net = container_of(work, struct net, xfrm.state_gc_work);
388 struct xfrm_state *x;
389 struct hlist_node *entry, *tmp;
390 struct hlist_head gc_list;
391
392 spin_lock_bh(&xfrm_state_gc_lock);
393 hlist_move_list(&net->xfrm.state_gc_list, &gc_list);
394 spin_unlock_bh(&xfrm_state_gc_lock);
395
396 hlist_for_each_entry_safe(x, entry, tmp, &gc_list, gclist)
397 xfrm_state_gc_destroy(x);
398
399 wake_up(&net->xfrm.km_waitq);
400 }
401
402 static inline unsigned long make_jiffies(long secs)
403 {
404 if (secs >= (MAX_SCHEDULE_TIMEOUT-1)/HZ)
405 return MAX_SCHEDULE_TIMEOUT-1;
406 else
407 return secs*HZ;
408 }
409
410 static void xfrm_timer_handler(unsigned long data)
411 {
412 struct xfrm_state *x = (struct xfrm_state*)data;
413 unsigned long now = get_seconds();
414 long next = LONG_MAX;
415 int warn = 0;
416 int err = 0;
417
418 spin_lock(&x->lock);
419 if (x->km.state == XFRM_STATE_DEAD)
420 goto out;
421 if (x->km.state == XFRM_STATE_EXPIRED)
422 goto expired;
423 if (x->lft.hard_add_expires_seconds) {
424 long tmo = x->lft.hard_add_expires_seconds +
425 x->curlft.add_time - now;
426 if (tmo <= 0)
427 goto expired;
428 if (tmo < next)
429 next = tmo;
430 }
431 if (x->lft.hard_use_expires_seconds) {
432 long tmo = x->lft.hard_use_expires_seconds +
433 (x->curlft.use_time ? : now) - now;
434 if (tmo <= 0)
435 goto expired;
436 if (tmo < next)
437 next = tmo;
438 }
439 if (x->km.dying)
440 goto resched;
441 if (x->lft.soft_add_expires_seconds) {
442 long tmo = x->lft.soft_add_expires_seconds +
443 x->curlft.add_time - now;
444 if (tmo <= 0)
445 warn = 1;
446 else if (tmo < next)
447 next = tmo;
448 }
449 if (x->lft.soft_use_expires_seconds) {
450 long tmo = x->lft.soft_use_expires_seconds +
451 (x->curlft.use_time ? : now) - now;
452 if (tmo <= 0)
453 warn = 1;
454 else if (tmo < next)
455 next = tmo;
456 }
457
458 x->km.dying = warn;
459 if (warn)
460 km_state_expired(x, 0, 0);
461 resched:
462 if (next != LONG_MAX)
463 mod_timer(&x->timer, jiffies + make_jiffies(next));
464
465 goto out;
466
467 expired:
468 if (x->km.state == XFRM_STATE_ACQ && x->id.spi == 0) {
469 x->km.state = XFRM_STATE_EXPIRED;
470 wake_up(&init_net.xfrm.km_waitq);
471 next = 2;
472 goto resched;
473 }
474
475 err = __xfrm_state_delete(x);
476 if (!err && x->id.spi)
477 km_state_expired(x, 1, 0);
478
479 xfrm_audit_state_delete(x, err ? 0 : 1,
480 audit_get_loginuid(current),
481 audit_get_sessionid(current), 0);
482
483 out:
484 spin_unlock(&x->lock);
485 }
486
487 static void xfrm_replay_timer_handler(unsigned long data);
488
489 struct xfrm_state *xfrm_state_alloc(struct net *net)
490 {
491 struct xfrm_state *x;
492
493 x = kzalloc(sizeof(struct xfrm_state), GFP_ATOMIC);
494
495 if (x) {
496 write_pnet(&x->xs_net, net);
497 atomic_set(&x->refcnt, 1);
498 atomic_set(&x->tunnel_users, 0);
499 INIT_LIST_HEAD(&x->km.all);
500 INIT_HLIST_NODE(&x->bydst);
501 INIT_HLIST_NODE(&x->bysrc);
502 INIT_HLIST_NODE(&x->byspi);
503 setup_timer(&x->timer, xfrm_timer_handler, (unsigned long)x);
504 setup_timer(&x->rtimer, xfrm_replay_timer_handler,
505 (unsigned long)x);
506 x->curlft.add_time = get_seconds();
507 x->lft.soft_byte_limit = XFRM_INF;
508 x->lft.soft_packet_limit = XFRM_INF;
509 x->lft.hard_byte_limit = XFRM_INF;
510 x->lft.hard_packet_limit = XFRM_INF;
511 x->replay_maxage = 0;
512 x->replay_maxdiff = 0;
513 x->inner_mode = NULL;
514 x->inner_mode_iaf = NULL;
515 spin_lock_init(&x->lock);
516 }
517 return x;
518 }
519 EXPORT_SYMBOL(xfrm_state_alloc);
520
521 void __xfrm_state_destroy(struct xfrm_state *x)
522 {
523 WARN_ON(x->km.state != XFRM_STATE_DEAD);
524
525 spin_lock_bh(&xfrm_state_gc_lock);
526 hlist_add_head(&x->gclist, &init_net.xfrm.state_gc_list);
527 spin_unlock_bh(&xfrm_state_gc_lock);
528 schedule_work(&init_net.xfrm.state_gc_work);
529 }
530 EXPORT_SYMBOL(__xfrm_state_destroy);
531
532 int __xfrm_state_delete(struct xfrm_state *x)
533 {
534 int err = -ESRCH;
535
536 if (x->km.state != XFRM_STATE_DEAD) {
537 x->km.state = XFRM_STATE_DEAD;
538 spin_lock(&xfrm_state_lock);
539 list_del(&x->km.all);
540 hlist_del(&x->bydst);
541 hlist_del(&x->bysrc);
542 if (x->id.spi)
543 hlist_del(&x->byspi);
544 init_net.xfrm.state_num--;
545 spin_unlock(&xfrm_state_lock);
546
547 /* All xfrm_state objects are created by xfrm_state_alloc.
548 * The xfrm_state_alloc call gives a reference, and that
549 * is what we are dropping here.
550 */
551 xfrm_state_put(x);
552 err = 0;
553 }
554
555 return err;
556 }
557 EXPORT_SYMBOL(__xfrm_state_delete);
558
559 int xfrm_state_delete(struct xfrm_state *x)
560 {
561 int err;
562
563 spin_lock_bh(&x->lock);
564 err = __xfrm_state_delete(x);
565 spin_unlock_bh(&x->lock);
566
567 return err;
568 }
569 EXPORT_SYMBOL(xfrm_state_delete);
570
571 #ifdef CONFIG_SECURITY_NETWORK_XFRM
572 static inline int
573 xfrm_state_flush_secctx_check(u8 proto, struct xfrm_audit *audit_info)
574 {
575 int i, err = 0;
576
577 for (i = 0; i <= init_net.xfrm.state_hmask; i++) {
578 struct hlist_node *entry;
579 struct xfrm_state *x;
580
581 hlist_for_each_entry(x, entry, init_net.xfrm.state_bydst+i, bydst) {
582 if (xfrm_id_proto_match(x->id.proto, proto) &&
583 (err = security_xfrm_state_delete(x)) != 0) {
584 xfrm_audit_state_delete(x, 0,
585 audit_info->loginuid,
586 audit_info->sessionid,
587 audit_info->secid);
588 return err;
589 }
590 }
591 }
592
593 return err;
594 }
595 #else
596 static inline int
597 xfrm_state_flush_secctx_check(u8 proto, struct xfrm_audit *audit_info)
598 {
599 return 0;
600 }
601 #endif
602
603 int xfrm_state_flush(u8 proto, struct xfrm_audit *audit_info)
604 {
605 int i, err = 0;
606
607 spin_lock_bh(&xfrm_state_lock);
608 err = xfrm_state_flush_secctx_check(proto, audit_info);
609 if (err)
610 goto out;
611
612 for (i = 0; i <= init_net.xfrm.state_hmask; i++) {
613 struct hlist_node *entry;
614 struct xfrm_state *x;
615 restart:
616 hlist_for_each_entry(x, entry, init_net.xfrm.state_bydst+i, bydst) {
617 if (!xfrm_state_kern(x) &&
618 xfrm_id_proto_match(x->id.proto, proto)) {
619 xfrm_state_hold(x);
620 spin_unlock_bh(&xfrm_state_lock);
621
622 err = xfrm_state_delete(x);
623 xfrm_audit_state_delete(x, err ? 0 : 1,
624 audit_info->loginuid,
625 audit_info->sessionid,
626 audit_info->secid);
627 xfrm_state_put(x);
628
629 spin_lock_bh(&xfrm_state_lock);
630 goto restart;
631 }
632 }
633 }
634 err = 0;
635
636 out:
637 spin_unlock_bh(&xfrm_state_lock);
638 wake_up(&init_net.xfrm.km_waitq);
639 return err;
640 }
641 EXPORT_SYMBOL(xfrm_state_flush);
642
643 void xfrm_sad_getinfo(struct xfrmk_sadinfo *si)
644 {
645 spin_lock_bh(&xfrm_state_lock);
646 si->sadcnt = init_net.xfrm.state_num;
647 si->sadhcnt = init_net.xfrm.state_hmask;
648 si->sadhmcnt = xfrm_state_hashmax;
649 spin_unlock_bh(&xfrm_state_lock);
650 }
651 EXPORT_SYMBOL(xfrm_sad_getinfo);
652
653 static int
654 xfrm_init_tempsel(struct xfrm_state *x, struct flowi *fl,
655 struct xfrm_tmpl *tmpl,
656 xfrm_address_t *daddr, xfrm_address_t *saddr,
657 unsigned short family)
658 {
659 struct xfrm_state_afinfo *afinfo = xfrm_state_get_afinfo(family);
660 if (!afinfo)
661 return -1;
662 afinfo->init_tempsel(x, fl, tmpl, daddr, saddr);
663 xfrm_state_put_afinfo(afinfo);
664 return 0;
665 }
666
667 static struct xfrm_state *__xfrm_state_lookup(xfrm_address_t *daddr, __be32 spi, u8 proto, unsigned short family)
668 {
669 unsigned int h = xfrm_spi_hash(daddr, spi, proto, family);
670 struct xfrm_state *x;
671 struct hlist_node *entry;
672
673 hlist_for_each_entry(x, entry, init_net.xfrm.state_byspi+h, byspi) {
674 if (x->props.family != family ||
675 x->id.spi != spi ||
676 x->id.proto != proto)
677 continue;
678
679 switch (family) {
680 case AF_INET:
681 if (x->id.daddr.a4 != daddr->a4)
682 continue;
683 break;
684 case AF_INET6:
685 if (!ipv6_addr_equal((struct in6_addr *)daddr,
686 (struct in6_addr *)
687 x->id.daddr.a6))
688 continue;
689 break;
690 }
691
692 xfrm_state_hold(x);
693 return x;
694 }
695
696 return NULL;
697 }
698
699 static struct xfrm_state *__xfrm_state_lookup_byaddr(xfrm_address_t *daddr, xfrm_address_t *saddr, u8 proto, unsigned short family)
700 {
701 unsigned int h = xfrm_src_hash(daddr, saddr, family);
702 struct xfrm_state *x;
703 struct hlist_node *entry;
704
705 hlist_for_each_entry(x, entry, init_net.xfrm.state_bysrc+h, bysrc) {
706 if (x->props.family != family ||
707 x->id.proto != proto)
708 continue;
709
710 switch (family) {
711 case AF_INET:
712 if (x->id.daddr.a4 != daddr->a4 ||
713 x->props.saddr.a4 != saddr->a4)
714 continue;
715 break;
716 case AF_INET6:
717 if (!ipv6_addr_equal((struct in6_addr *)daddr,
718 (struct in6_addr *)
719 x->id.daddr.a6) ||
720 !ipv6_addr_equal((struct in6_addr *)saddr,
721 (struct in6_addr *)
722 x->props.saddr.a6))
723 continue;
724 break;
725 }
726
727 xfrm_state_hold(x);
728 return x;
729 }
730
731 return NULL;
732 }
733
734 static inline struct xfrm_state *
735 __xfrm_state_locate(struct xfrm_state *x, int use_spi, int family)
736 {
737 if (use_spi)
738 return __xfrm_state_lookup(&x->id.daddr, x->id.spi,
739 x->id.proto, family);
740 else
741 return __xfrm_state_lookup_byaddr(&x->id.daddr,
742 &x->props.saddr,
743 x->id.proto, family);
744 }
745
746 static void xfrm_hash_grow_check(int have_hash_collision)
747 {
748 if (have_hash_collision &&
749 (init_net.xfrm.state_hmask + 1) < xfrm_state_hashmax &&
750 init_net.xfrm.state_num > init_net.xfrm.state_hmask)
751 schedule_work(&init_net.xfrm.state_hash_work);
752 }
753
754 struct xfrm_state *
755 xfrm_state_find(xfrm_address_t *daddr, xfrm_address_t *saddr,
756 struct flowi *fl, struct xfrm_tmpl *tmpl,
757 struct xfrm_policy *pol, int *err,
758 unsigned short family)
759 {
760 unsigned int h;
761 struct hlist_node *entry;
762 struct xfrm_state *x, *x0, *to_put;
763 int acquire_in_progress = 0;
764 int error = 0;
765 struct xfrm_state *best = NULL;
766
767 to_put = NULL;
768
769 spin_lock_bh(&xfrm_state_lock);
770 h = xfrm_dst_hash(daddr, saddr, tmpl->reqid, family);
771 hlist_for_each_entry(x, entry, init_net.xfrm.state_bydst+h, bydst) {
772 if (x->props.family == family &&
773 x->props.reqid == tmpl->reqid &&
774 !(x->props.flags & XFRM_STATE_WILDRECV) &&
775 xfrm_state_addr_check(x, daddr, saddr, family) &&
776 tmpl->mode == x->props.mode &&
777 tmpl->id.proto == x->id.proto &&
778 (tmpl->id.spi == x->id.spi || !tmpl->id.spi)) {
779 /* Resolution logic:
780 1. There is a valid state with matching selector.
781 Done.
782 2. Valid state with inappropriate selector. Skip.
783
784 Entering area of "sysdeps".
785
786 3. If state is not valid, selector is temporary,
787 it selects only session which triggered
788 previous resolution. Key manager will do
789 something to install a state with proper
790 selector.
791 */
792 if (x->km.state == XFRM_STATE_VALID) {
793 if ((x->sel.family && !xfrm_selector_match(&x->sel, fl, x->sel.family)) ||
794 !security_xfrm_state_pol_flow_match(x, pol, fl))
795 continue;
796 if (!best ||
797 best->km.dying > x->km.dying ||
798 (best->km.dying == x->km.dying &&
799 best->curlft.add_time < x->curlft.add_time))
800 best = x;
801 } else if (x->km.state == XFRM_STATE_ACQ) {
802 acquire_in_progress = 1;
803 } else if (x->km.state == XFRM_STATE_ERROR ||
804 x->km.state == XFRM_STATE_EXPIRED) {
805 if (xfrm_selector_match(&x->sel, fl, x->sel.family) &&
806 security_xfrm_state_pol_flow_match(x, pol, fl))
807 error = -ESRCH;
808 }
809 }
810 }
811
812 x = best;
813 if (!x && !error && !acquire_in_progress) {
814 if (tmpl->id.spi &&
815 (x0 = __xfrm_state_lookup(daddr, tmpl->id.spi,
816 tmpl->id.proto, family)) != NULL) {
817 to_put = x0;
818 error = -EEXIST;
819 goto out;
820 }
821 x = xfrm_state_alloc(&init_net);
822 if (x == NULL) {
823 error = -ENOMEM;
824 goto out;
825 }
826 /* Initialize temporary selector matching only
827 * to current session. */
828 xfrm_init_tempsel(x, fl, tmpl, daddr, saddr, family);
829
830 error = security_xfrm_state_alloc_acquire(x, pol->security, fl->secid);
831 if (error) {
832 x->km.state = XFRM_STATE_DEAD;
833 to_put = x;
834 x = NULL;
835 goto out;
836 }
837
838 if (km_query(x, tmpl, pol) == 0) {
839 x->km.state = XFRM_STATE_ACQ;
840 list_add(&x->km.all, &init_net.xfrm.state_all);
841 hlist_add_head(&x->bydst, init_net.xfrm.state_bydst+h);
842 h = xfrm_src_hash(daddr, saddr, family);
843 hlist_add_head(&x->bysrc, init_net.xfrm.state_bysrc+h);
844 if (x->id.spi) {
845 h = xfrm_spi_hash(&x->id.daddr, x->id.spi, x->id.proto, family);
846 hlist_add_head(&x->byspi, init_net.xfrm.state_byspi+h);
847 }
848 x->lft.hard_add_expires_seconds = sysctl_xfrm_acq_expires;
849 x->timer.expires = jiffies + sysctl_xfrm_acq_expires*HZ;
850 add_timer(&x->timer);
851 init_net.xfrm.state_num++;
852 xfrm_hash_grow_check(x->bydst.next != NULL);
853 } else {
854 x->km.state = XFRM_STATE_DEAD;
855 to_put = x;
856 x = NULL;
857 error = -ESRCH;
858 }
859 }
860 out:
861 if (x)
862 xfrm_state_hold(x);
863 else
864 *err = acquire_in_progress ? -EAGAIN : error;
865 spin_unlock_bh(&xfrm_state_lock);
866 if (to_put)
867 xfrm_state_put(to_put);
868 return x;
869 }
870
871 struct xfrm_state *
872 xfrm_stateonly_find(xfrm_address_t *daddr, xfrm_address_t *saddr,
873 unsigned short family, u8 mode, u8 proto, u32 reqid)
874 {
875 unsigned int h;
876 struct xfrm_state *rx = NULL, *x = NULL;
877 struct hlist_node *entry;
878
879 spin_lock(&xfrm_state_lock);
880 h = xfrm_dst_hash(daddr, saddr, reqid, family);
881 hlist_for_each_entry(x, entry, init_net.xfrm.state_bydst+h, bydst) {
882 if (x->props.family == family &&
883 x->props.reqid == reqid &&
884 !(x->props.flags & XFRM_STATE_WILDRECV) &&
885 xfrm_state_addr_check(x, daddr, saddr, family) &&
886 mode == x->props.mode &&
887 proto == x->id.proto &&
888 x->km.state == XFRM_STATE_VALID) {
889 rx = x;
890 break;
891 }
892 }
893
894 if (rx)
895 xfrm_state_hold(rx);
896 spin_unlock(&xfrm_state_lock);
897
898
899 return rx;
900 }
901 EXPORT_SYMBOL(xfrm_stateonly_find);
902
903 static void __xfrm_state_insert(struct xfrm_state *x)
904 {
905 unsigned int h;
906
907 x->genid = ++xfrm_state_genid;
908
909 list_add(&x->km.all, &init_net.xfrm.state_all);
910
911 h = xfrm_dst_hash(&x->id.daddr, &x->props.saddr,
912 x->props.reqid, x->props.family);
913 hlist_add_head(&x->bydst, init_net.xfrm.state_bydst+h);
914
915 h = xfrm_src_hash(&x->id.daddr, &x->props.saddr, x->props.family);
916 hlist_add_head(&x->bysrc, init_net.xfrm.state_bysrc+h);
917
918 if (x->id.spi) {
919 h = xfrm_spi_hash(&x->id.daddr, x->id.spi, x->id.proto,
920 x->props.family);
921
922 hlist_add_head(&x->byspi, init_net.xfrm.state_byspi+h);
923 }
924
925 mod_timer(&x->timer, jiffies + HZ);
926 if (x->replay_maxage)
927 mod_timer(&x->rtimer, jiffies + x->replay_maxage);
928
929 wake_up(&init_net.xfrm.km_waitq);
930
931 init_net.xfrm.state_num++;
932
933 xfrm_hash_grow_check(x->bydst.next != NULL);
934 }
935
936 /* xfrm_state_lock is held */
937 static void __xfrm_state_bump_genids(struct xfrm_state *xnew)
938 {
939 unsigned short family = xnew->props.family;
940 u32 reqid = xnew->props.reqid;
941 struct xfrm_state *x;
942 struct hlist_node *entry;
943 unsigned int h;
944
945 h = xfrm_dst_hash(&xnew->id.daddr, &xnew->props.saddr, reqid, family);
946 hlist_for_each_entry(x, entry, init_net.xfrm.state_bydst+h, bydst) {
947 if (x->props.family == family &&
948 x->props.reqid == reqid &&
949 !xfrm_addr_cmp(&x->id.daddr, &xnew->id.daddr, family) &&
950 !xfrm_addr_cmp(&x->props.saddr, &xnew->props.saddr, family))
951 x->genid = xfrm_state_genid;
952 }
953 }
954
955 void xfrm_state_insert(struct xfrm_state *x)
956 {
957 spin_lock_bh(&xfrm_state_lock);
958 __xfrm_state_bump_genids(x);
959 __xfrm_state_insert(x);
960 spin_unlock_bh(&xfrm_state_lock);
961 }
962 EXPORT_SYMBOL(xfrm_state_insert);
963
964 /* xfrm_state_lock is held */
965 static struct xfrm_state *__find_acq_core(unsigned short family, u8 mode, u32 reqid, u8 proto, xfrm_address_t *daddr, xfrm_address_t *saddr, int create)
966 {
967 unsigned int h = xfrm_dst_hash(daddr, saddr, reqid, family);
968 struct hlist_node *entry;
969 struct xfrm_state *x;
970
971 hlist_for_each_entry(x, entry, init_net.xfrm.state_bydst+h, bydst) {
972 if (x->props.reqid != reqid ||
973 x->props.mode != mode ||
974 x->props.family != family ||
975 x->km.state != XFRM_STATE_ACQ ||
976 x->id.spi != 0 ||
977 x->id.proto != proto)
978 continue;
979
980 switch (family) {
981 case AF_INET:
982 if (x->id.daddr.a4 != daddr->a4 ||
983 x->props.saddr.a4 != saddr->a4)
984 continue;
985 break;
986 case AF_INET6:
987 if (!ipv6_addr_equal((struct in6_addr *)x->id.daddr.a6,
988 (struct in6_addr *)daddr) ||
989 !ipv6_addr_equal((struct in6_addr *)
990 x->props.saddr.a6,
991 (struct in6_addr *)saddr))
992 continue;
993 break;
994 }
995
996 xfrm_state_hold(x);
997 return x;
998 }
999
1000 if (!create)
1001 return NULL;
1002
1003 x = xfrm_state_alloc(&init_net);
1004 if (likely(x)) {
1005 switch (family) {
1006 case AF_INET:
1007 x->sel.daddr.a4 = daddr->a4;
1008 x->sel.saddr.a4 = saddr->a4;
1009 x->sel.prefixlen_d = 32;
1010 x->sel.prefixlen_s = 32;
1011 x->props.saddr.a4 = saddr->a4;
1012 x->id.daddr.a4 = daddr->a4;
1013 break;
1014
1015 case AF_INET6:
1016 ipv6_addr_copy((struct in6_addr *)x->sel.daddr.a6,
1017 (struct in6_addr *)daddr);
1018 ipv6_addr_copy((struct in6_addr *)x->sel.saddr.a6,
1019 (struct in6_addr *)saddr);
1020 x->sel.prefixlen_d = 128;
1021 x->sel.prefixlen_s = 128;
1022 ipv6_addr_copy((struct in6_addr *)x->props.saddr.a6,
1023 (struct in6_addr *)saddr);
1024 ipv6_addr_copy((struct in6_addr *)x->id.daddr.a6,
1025 (struct in6_addr *)daddr);
1026 break;
1027 }
1028
1029 x->km.state = XFRM_STATE_ACQ;
1030 x->id.proto = proto;
1031 x->props.family = family;
1032 x->props.mode = mode;
1033 x->props.reqid = reqid;
1034 x->lft.hard_add_expires_seconds = sysctl_xfrm_acq_expires;
1035 xfrm_state_hold(x);
1036 x->timer.expires = jiffies + sysctl_xfrm_acq_expires*HZ;
1037 add_timer(&x->timer);
1038 list_add(&x->km.all, &init_net.xfrm.state_all);
1039 hlist_add_head(&x->bydst, init_net.xfrm.state_bydst+h);
1040 h = xfrm_src_hash(daddr, saddr, family);
1041 hlist_add_head(&x->bysrc, init_net.xfrm.state_bysrc+h);
1042
1043 init_net.xfrm.state_num++;
1044
1045 xfrm_hash_grow_check(x->bydst.next != NULL);
1046 }
1047
1048 return x;
1049 }
1050
1051 static struct xfrm_state *__xfrm_find_acq_byseq(u32 seq);
1052
1053 int xfrm_state_add(struct xfrm_state *x)
1054 {
1055 struct xfrm_state *x1, *to_put;
1056 int family;
1057 int err;
1058 int use_spi = xfrm_id_proto_match(x->id.proto, IPSEC_PROTO_ANY);
1059
1060 family = x->props.family;
1061
1062 to_put = NULL;
1063
1064 spin_lock_bh(&xfrm_state_lock);
1065
1066 x1 = __xfrm_state_locate(x, use_spi, family);
1067 if (x1) {
1068 to_put = x1;
1069 x1 = NULL;
1070 err = -EEXIST;
1071 goto out;
1072 }
1073
1074 if (use_spi && x->km.seq) {
1075 x1 = __xfrm_find_acq_byseq(x->km.seq);
1076 if (x1 && ((x1->id.proto != x->id.proto) ||
1077 xfrm_addr_cmp(&x1->id.daddr, &x->id.daddr, family))) {
1078 to_put = x1;
1079 x1 = NULL;
1080 }
1081 }
1082
1083 if (use_spi && !x1)
1084 x1 = __find_acq_core(family, x->props.mode, x->props.reqid,
1085 x->id.proto,
1086 &x->id.daddr, &x->props.saddr, 0);
1087
1088 __xfrm_state_bump_genids(x);
1089 __xfrm_state_insert(x);
1090 err = 0;
1091
1092 out:
1093 spin_unlock_bh(&xfrm_state_lock);
1094
1095 if (x1) {
1096 xfrm_state_delete(x1);
1097 xfrm_state_put(x1);
1098 }
1099
1100 if (to_put)
1101 xfrm_state_put(to_put);
1102
1103 return err;
1104 }
1105 EXPORT_SYMBOL(xfrm_state_add);
1106
1107 #ifdef CONFIG_XFRM_MIGRATE
1108 static struct xfrm_state *xfrm_state_clone(struct xfrm_state *orig, int *errp)
1109 {
1110 int err = -ENOMEM;
1111 struct xfrm_state *x = xfrm_state_alloc(&init_net);
1112 if (!x)
1113 goto error;
1114
1115 memcpy(&x->id, &orig->id, sizeof(x->id));
1116 memcpy(&x->sel, &orig->sel, sizeof(x->sel));
1117 memcpy(&x->lft, &orig->lft, sizeof(x->lft));
1118 x->props.mode = orig->props.mode;
1119 x->props.replay_window = orig->props.replay_window;
1120 x->props.reqid = orig->props.reqid;
1121 x->props.family = orig->props.family;
1122 x->props.saddr = orig->props.saddr;
1123
1124 if (orig->aalg) {
1125 x->aalg = xfrm_algo_clone(orig->aalg);
1126 if (!x->aalg)
1127 goto error;
1128 }
1129 x->props.aalgo = orig->props.aalgo;
1130
1131 if (orig->ealg) {
1132 x->ealg = xfrm_algo_clone(orig->ealg);
1133 if (!x->ealg)
1134 goto error;
1135 }
1136 x->props.ealgo = orig->props.ealgo;
1137
1138 if (orig->calg) {
1139 x->calg = xfrm_algo_clone(orig->calg);
1140 if (!x->calg)
1141 goto error;
1142 }
1143 x->props.calgo = orig->props.calgo;
1144
1145 if (orig->encap) {
1146 x->encap = kmemdup(orig->encap, sizeof(*x->encap), GFP_KERNEL);
1147 if (!x->encap)
1148 goto error;
1149 }
1150
1151 if (orig->coaddr) {
1152 x->coaddr = kmemdup(orig->coaddr, sizeof(*x->coaddr),
1153 GFP_KERNEL);
1154 if (!x->coaddr)
1155 goto error;
1156 }
1157
1158 err = xfrm_init_state(x);
1159 if (err)
1160 goto error;
1161
1162 x->props.flags = orig->props.flags;
1163
1164 x->curlft.add_time = orig->curlft.add_time;
1165 x->km.state = orig->km.state;
1166 x->km.seq = orig->km.seq;
1167
1168 return x;
1169
1170 error:
1171 if (errp)
1172 *errp = err;
1173 if (x) {
1174 kfree(x->aalg);
1175 kfree(x->ealg);
1176 kfree(x->calg);
1177 kfree(x->encap);
1178 kfree(x->coaddr);
1179 }
1180 kfree(x);
1181 return NULL;
1182 }
1183
1184 /* xfrm_state_lock is held */
1185 struct xfrm_state * xfrm_migrate_state_find(struct xfrm_migrate *m)
1186 {
1187 unsigned int h;
1188 struct xfrm_state *x;
1189 struct hlist_node *entry;
1190
1191 if (m->reqid) {
1192 h = xfrm_dst_hash(&m->old_daddr, &m->old_saddr,
1193 m->reqid, m->old_family);
1194 hlist_for_each_entry(x, entry, init_net.xfrm.state_bydst+h, bydst) {
1195 if (x->props.mode != m->mode ||
1196 x->id.proto != m->proto)
1197 continue;
1198 if (m->reqid && x->props.reqid != m->reqid)
1199 continue;
1200 if (xfrm_addr_cmp(&x->id.daddr, &m->old_daddr,
1201 m->old_family) ||
1202 xfrm_addr_cmp(&x->props.saddr, &m->old_saddr,
1203 m->old_family))
1204 continue;
1205 xfrm_state_hold(x);
1206 return x;
1207 }
1208 } else {
1209 h = xfrm_src_hash(&m->old_daddr, &m->old_saddr,
1210 m->old_family);
1211 hlist_for_each_entry(x, entry, init_net.xfrm.state_bysrc+h, bysrc) {
1212 if (x->props.mode != m->mode ||
1213 x->id.proto != m->proto)
1214 continue;
1215 if (xfrm_addr_cmp(&x->id.daddr, &m->old_daddr,
1216 m->old_family) ||
1217 xfrm_addr_cmp(&x->props.saddr, &m->old_saddr,
1218 m->old_family))
1219 continue;
1220 xfrm_state_hold(x);
1221 return x;
1222 }
1223 }
1224
1225 return NULL;
1226 }
1227 EXPORT_SYMBOL(xfrm_migrate_state_find);
1228
1229 struct xfrm_state * xfrm_state_migrate(struct xfrm_state *x,
1230 struct xfrm_migrate *m)
1231 {
1232 struct xfrm_state *xc;
1233 int err;
1234
1235 xc = xfrm_state_clone(x, &err);
1236 if (!xc)
1237 return NULL;
1238
1239 memcpy(&xc->id.daddr, &m->new_daddr, sizeof(xc->id.daddr));
1240 memcpy(&xc->props.saddr, &m->new_saddr, sizeof(xc->props.saddr));
1241
1242 /* add state */
1243 if (!xfrm_addr_cmp(&x->id.daddr, &m->new_daddr, m->new_family)) {
1244 /* a care is needed when the destination address of the
1245 state is to be updated as it is a part of triplet */
1246 xfrm_state_insert(xc);
1247 } else {
1248 if ((err = xfrm_state_add(xc)) < 0)
1249 goto error;
1250 }
1251
1252 return xc;
1253 error:
1254 kfree(xc);
1255 return NULL;
1256 }
1257 EXPORT_SYMBOL(xfrm_state_migrate);
1258 #endif
1259
1260 int xfrm_state_update(struct xfrm_state *x)
1261 {
1262 struct xfrm_state *x1, *to_put;
1263 int err;
1264 int use_spi = xfrm_id_proto_match(x->id.proto, IPSEC_PROTO_ANY);
1265
1266 to_put = NULL;
1267
1268 spin_lock_bh(&xfrm_state_lock);
1269 x1 = __xfrm_state_locate(x, use_spi, x->props.family);
1270
1271 err = -ESRCH;
1272 if (!x1)
1273 goto out;
1274
1275 if (xfrm_state_kern(x1)) {
1276 to_put = x1;
1277 err = -EEXIST;
1278 goto out;
1279 }
1280
1281 if (x1->km.state == XFRM_STATE_ACQ) {
1282 __xfrm_state_insert(x);
1283 x = NULL;
1284 }
1285 err = 0;
1286
1287 out:
1288 spin_unlock_bh(&xfrm_state_lock);
1289
1290 if (to_put)
1291 xfrm_state_put(to_put);
1292
1293 if (err)
1294 return err;
1295
1296 if (!x) {
1297 xfrm_state_delete(x1);
1298 xfrm_state_put(x1);
1299 return 0;
1300 }
1301
1302 err = -EINVAL;
1303 spin_lock_bh(&x1->lock);
1304 if (likely(x1->km.state == XFRM_STATE_VALID)) {
1305 if (x->encap && x1->encap)
1306 memcpy(x1->encap, x->encap, sizeof(*x1->encap));
1307 if (x->coaddr && x1->coaddr) {
1308 memcpy(x1->coaddr, x->coaddr, sizeof(*x1->coaddr));
1309 }
1310 if (!use_spi && memcmp(&x1->sel, &x->sel, sizeof(x1->sel)))
1311 memcpy(&x1->sel, &x->sel, sizeof(x1->sel));
1312 memcpy(&x1->lft, &x->lft, sizeof(x1->lft));
1313 x1->km.dying = 0;
1314
1315 mod_timer(&x1->timer, jiffies + HZ);
1316 if (x1->curlft.use_time)
1317 xfrm_state_check_expire(x1);
1318
1319 err = 0;
1320 }
1321 spin_unlock_bh(&x1->lock);
1322
1323 xfrm_state_put(x1);
1324
1325 return err;
1326 }
1327 EXPORT_SYMBOL(xfrm_state_update);
1328
1329 int xfrm_state_check_expire(struct xfrm_state *x)
1330 {
1331 if (!x->curlft.use_time)
1332 x->curlft.use_time = get_seconds();
1333
1334 if (x->km.state != XFRM_STATE_VALID)
1335 return -EINVAL;
1336
1337 if (x->curlft.bytes >= x->lft.hard_byte_limit ||
1338 x->curlft.packets >= x->lft.hard_packet_limit) {
1339 x->km.state = XFRM_STATE_EXPIRED;
1340 mod_timer(&x->timer, jiffies);
1341 return -EINVAL;
1342 }
1343
1344 if (!x->km.dying &&
1345 (x->curlft.bytes >= x->lft.soft_byte_limit ||
1346 x->curlft.packets >= x->lft.soft_packet_limit)) {
1347 x->km.dying = 1;
1348 km_state_expired(x, 0, 0);
1349 }
1350 return 0;
1351 }
1352 EXPORT_SYMBOL(xfrm_state_check_expire);
1353
1354 struct xfrm_state *
1355 xfrm_state_lookup(xfrm_address_t *daddr, __be32 spi, u8 proto,
1356 unsigned short family)
1357 {
1358 struct xfrm_state *x;
1359
1360 spin_lock_bh(&xfrm_state_lock);
1361 x = __xfrm_state_lookup(daddr, spi, proto, family);
1362 spin_unlock_bh(&xfrm_state_lock);
1363 return x;
1364 }
1365 EXPORT_SYMBOL(xfrm_state_lookup);
1366
1367 struct xfrm_state *
1368 xfrm_state_lookup_byaddr(xfrm_address_t *daddr, xfrm_address_t *saddr,
1369 u8 proto, unsigned short family)
1370 {
1371 struct xfrm_state *x;
1372
1373 spin_lock_bh(&xfrm_state_lock);
1374 x = __xfrm_state_lookup_byaddr(daddr, saddr, proto, family);
1375 spin_unlock_bh(&xfrm_state_lock);
1376 return x;
1377 }
1378 EXPORT_SYMBOL(xfrm_state_lookup_byaddr);
1379
1380 struct xfrm_state *
1381 xfrm_find_acq(u8 mode, u32 reqid, u8 proto,
1382 xfrm_address_t *daddr, xfrm_address_t *saddr,
1383 int create, unsigned short family)
1384 {
1385 struct xfrm_state *x;
1386
1387 spin_lock_bh(&xfrm_state_lock);
1388 x = __find_acq_core(family, mode, reqid, proto, daddr, saddr, create);
1389 spin_unlock_bh(&xfrm_state_lock);
1390
1391 return x;
1392 }
1393 EXPORT_SYMBOL(xfrm_find_acq);
1394
1395 #ifdef CONFIG_XFRM_SUB_POLICY
1396 int
1397 xfrm_tmpl_sort(struct xfrm_tmpl **dst, struct xfrm_tmpl **src, int n,
1398 unsigned short family)
1399 {
1400 int err = 0;
1401 struct xfrm_state_afinfo *afinfo = xfrm_state_get_afinfo(family);
1402 if (!afinfo)
1403 return -EAFNOSUPPORT;
1404
1405 spin_lock_bh(&xfrm_state_lock);
1406 if (afinfo->tmpl_sort)
1407 err = afinfo->tmpl_sort(dst, src, n);
1408 spin_unlock_bh(&xfrm_state_lock);
1409 xfrm_state_put_afinfo(afinfo);
1410 return err;
1411 }
1412 EXPORT_SYMBOL(xfrm_tmpl_sort);
1413
1414 int
1415 xfrm_state_sort(struct xfrm_state **dst, struct xfrm_state **src, int n,
1416 unsigned short family)
1417 {
1418 int err = 0;
1419 struct xfrm_state_afinfo *afinfo = xfrm_state_get_afinfo(family);
1420 if (!afinfo)
1421 return -EAFNOSUPPORT;
1422
1423 spin_lock_bh(&xfrm_state_lock);
1424 if (afinfo->state_sort)
1425 err = afinfo->state_sort(dst, src, n);
1426 spin_unlock_bh(&xfrm_state_lock);
1427 xfrm_state_put_afinfo(afinfo);
1428 return err;
1429 }
1430 EXPORT_SYMBOL(xfrm_state_sort);
1431 #endif
1432
1433 /* Silly enough, but I'm lazy to build resolution list */
1434
1435 static struct xfrm_state *__xfrm_find_acq_byseq(u32 seq)
1436 {
1437 int i;
1438
1439 for (i = 0; i <= init_net.xfrm.state_hmask; i++) {
1440 struct hlist_node *entry;
1441 struct xfrm_state *x;
1442
1443 hlist_for_each_entry(x, entry, init_net.xfrm.state_bydst+i, bydst) {
1444 if (x->km.seq == seq &&
1445 x->km.state == XFRM_STATE_ACQ) {
1446 xfrm_state_hold(x);
1447 return x;
1448 }
1449 }
1450 }
1451 return NULL;
1452 }
1453
1454 struct xfrm_state *xfrm_find_acq_byseq(u32 seq)
1455 {
1456 struct xfrm_state *x;
1457
1458 spin_lock_bh(&xfrm_state_lock);
1459 x = __xfrm_find_acq_byseq(seq);
1460 spin_unlock_bh(&xfrm_state_lock);
1461 return x;
1462 }
1463 EXPORT_SYMBOL(xfrm_find_acq_byseq);
1464
1465 u32 xfrm_get_acqseq(void)
1466 {
1467 u32 res;
1468 static u32 acqseq;
1469 static DEFINE_SPINLOCK(acqseq_lock);
1470
1471 spin_lock_bh(&acqseq_lock);
1472 res = (++acqseq ? : ++acqseq);
1473 spin_unlock_bh(&acqseq_lock);
1474 return res;
1475 }
1476 EXPORT_SYMBOL(xfrm_get_acqseq);
1477
1478 int xfrm_alloc_spi(struct xfrm_state *x, u32 low, u32 high)
1479 {
1480 unsigned int h;
1481 struct xfrm_state *x0;
1482 int err = -ENOENT;
1483 __be32 minspi = htonl(low);
1484 __be32 maxspi = htonl(high);
1485
1486 spin_lock_bh(&x->lock);
1487 if (x->km.state == XFRM_STATE_DEAD)
1488 goto unlock;
1489
1490 err = 0;
1491 if (x->id.spi)
1492 goto unlock;
1493
1494 err = -ENOENT;
1495
1496 if (minspi == maxspi) {
1497 x0 = xfrm_state_lookup(&x->id.daddr, minspi, x->id.proto, x->props.family);
1498 if (x0) {
1499 xfrm_state_put(x0);
1500 goto unlock;
1501 }
1502 x->id.spi = minspi;
1503 } else {
1504 u32 spi = 0;
1505 for (h=0; h<high-low+1; h++) {
1506 spi = low + net_random()%(high-low+1);
1507 x0 = xfrm_state_lookup(&x->id.daddr, htonl(spi), x->id.proto, x->props.family);
1508 if (x0 == NULL) {
1509 x->id.spi = htonl(spi);
1510 break;
1511 }
1512 xfrm_state_put(x0);
1513 }
1514 }
1515 if (x->id.spi) {
1516 spin_lock_bh(&xfrm_state_lock);
1517 h = xfrm_spi_hash(&x->id.daddr, x->id.spi, x->id.proto, x->props.family);
1518 hlist_add_head(&x->byspi, init_net.xfrm.state_byspi+h);
1519 spin_unlock_bh(&xfrm_state_lock);
1520
1521 err = 0;
1522 }
1523
1524 unlock:
1525 spin_unlock_bh(&x->lock);
1526
1527 return err;
1528 }
1529 EXPORT_SYMBOL(xfrm_alloc_spi);
1530
1531 int xfrm_state_walk(struct xfrm_state_walk *walk,
1532 int (*func)(struct xfrm_state *, int, void*),
1533 void *data)
1534 {
1535 struct xfrm_state *state;
1536 struct xfrm_state_walk *x;
1537 int err = 0;
1538
1539 if (walk->seq != 0 && list_empty(&walk->all))
1540 return 0;
1541
1542 spin_lock_bh(&xfrm_state_lock);
1543 if (list_empty(&walk->all))
1544 x = list_first_entry(&init_net.xfrm.state_all, struct xfrm_state_walk, all);
1545 else
1546 x = list_entry(&walk->all, struct xfrm_state_walk, all);
1547 list_for_each_entry_from(x, &init_net.xfrm.state_all, all) {
1548 if (x->state == XFRM_STATE_DEAD)
1549 continue;
1550 state = container_of(x, struct xfrm_state, km);
1551 if (!xfrm_id_proto_match(state->id.proto, walk->proto))
1552 continue;
1553 err = func(state, walk->seq, data);
1554 if (err) {
1555 list_move_tail(&walk->all, &x->all);
1556 goto out;
1557 }
1558 walk->seq++;
1559 }
1560 if (walk->seq == 0) {
1561 err = -ENOENT;
1562 goto out;
1563 }
1564 list_del_init(&walk->all);
1565 out:
1566 spin_unlock_bh(&xfrm_state_lock);
1567 return err;
1568 }
1569 EXPORT_SYMBOL(xfrm_state_walk);
1570
1571 void xfrm_state_walk_init(struct xfrm_state_walk *walk, u8 proto)
1572 {
1573 INIT_LIST_HEAD(&walk->all);
1574 walk->proto = proto;
1575 walk->state = XFRM_STATE_DEAD;
1576 walk->seq = 0;
1577 }
1578 EXPORT_SYMBOL(xfrm_state_walk_init);
1579
1580 void xfrm_state_walk_done(struct xfrm_state_walk *walk)
1581 {
1582 if (list_empty(&walk->all))
1583 return;
1584
1585 spin_lock_bh(&xfrm_state_lock);
1586 list_del(&walk->all);
1587 spin_lock_bh(&xfrm_state_lock);
1588 }
1589 EXPORT_SYMBOL(xfrm_state_walk_done);
1590
1591
1592 void xfrm_replay_notify(struct xfrm_state *x, int event)
1593 {
1594 struct km_event c;
1595 /* we send notify messages in case
1596 * 1. we updated on of the sequence numbers, and the seqno difference
1597 * is at least x->replay_maxdiff, in this case we also update the
1598 * timeout of our timer function
1599 * 2. if x->replay_maxage has elapsed since last update,
1600 * and there were changes
1601 *
1602 * The state structure must be locked!
1603 */
1604
1605 switch (event) {
1606 case XFRM_REPLAY_UPDATE:
1607 if (x->replay_maxdiff &&
1608 (x->replay.seq - x->preplay.seq < x->replay_maxdiff) &&
1609 (x->replay.oseq - x->preplay.oseq < x->replay_maxdiff)) {
1610 if (x->xflags & XFRM_TIME_DEFER)
1611 event = XFRM_REPLAY_TIMEOUT;
1612 else
1613 return;
1614 }
1615
1616 break;
1617
1618 case XFRM_REPLAY_TIMEOUT:
1619 if ((x->replay.seq == x->preplay.seq) &&
1620 (x->replay.bitmap == x->preplay.bitmap) &&
1621 (x->replay.oseq == x->preplay.oseq)) {
1622 x->xflags |= XFRM_TIME_DEFER;
1623 return;
1624 }
1625
1626 break;
1627 }
1628
1629 memcpy(&x->preplay, &x->replay, sizeof(struct xfrm_replay_state));
1630 c.event = XFRM_MSG_NEWAE;
1631 c.data.aevent = event;
1632 km_state_notify(x, &c);
1633
1634 if (x->replay_maxage &&
1635 !mod_timer(&x->rtimer, jiffies + x->replay_maxage))
1636 x->xflags &= ~XFRM_TIME_DEFER;
1637 }
1638
1639 static void xfrm_replay_timer_handler(unsigned long data)
1640 {
1641 struct xfrm_state *x = (struct xfrm_state*)data;
1642
1643 spin_lock(&x->lock);
1644
1645 if (x->km.state == XFRM_STATE_VALID) {
1646 if (xfrm_aevent_is_on())
1647 xfrm_replay_notify(x, XFRM_REPLAY_TIMEOUT);
1648 else
1649 x->xflags |= XFRM_TIME_DEFER;
1650 }
1651
1652 spin_unlock(&x->lock);
1653 }
1654
1655 int xfrm_replay_check(struct xfrm_state *x,
1656 struct sk_buff *skb, __be32 net_seq)
1657 {
1658 u32 diff;
1659 u32 seq = ntohl(net_seq);
1660
1661 if (unlikely(seq == 0))
1662 goto err;
1663
1664 if (likely(seq > x->replay.seq))
1665 return 0;
1666
1667 diff = x->replay.seq - seq;
1668 if (diff >= min_t(unsigned int, x->props.replay_window,
1669 sizeof(x->replay.bitmap) * 8)) {
1670 x->stats.replay_window++;
1671 goto err;
1672 }
1673
1674 if (x->replay.bitmap & (1U << diff)) {
1675 x->stats.replay++;
1676 goto err;
1677 }
1678 return 0;
1679
1680 err:
1681 xfrm_audit_state_replay(x, skb, net_seq);
1682 return -EINVAL;
1683 }
1684
1685 void xfrm_replay_advance(struct xfrm_state *x, __be32 net_seq)
1686 {
1687 u32 diff;
1688 u32 seq = ntohl(net_seq);
1689
1690 if (seq > x->replay.seq) {
1691 diff = seq - x->replay.seq;
1692 if (diff < x->props.replay_window)
1693 x->replay.bitmap = ((x->replay.bitmap) << diff) | 1;
1694 else
1695 x->replay.bitmap = 1;
1696 x->replay.seq = seq;
1697 } else {
1698 diff = x->replay.seq - seq;
1699 x->replay.bitmap |= (1U << diff);
1700 }
1701
1702 if (xfrm_aevent_is_on())
1703 xfrm_replay_notify(x, XFRM_REPLAY_UPDATE);
1704 }
1705
1706 static LIST_HEAD(xfrm_km_list);
1707 static DEFINE_RWLOCK(xfrm_km_lock);
1708
1709 void km_policy_notify(struct xfrm_policy *xp, int dir, struct km_event *c)
1710 {
1711 struct xfrm_mgr *km;
1712
1713 read_lock(&xfrm_km_lock);
1714 list_for_each_entry(km, &xfrm_km_list, list)
1715 if (km->notify_policy)
1716 km->notify_policy(xp, dir, c);
1717 read_unlock(&xfrm_km_lock);
1718 }
1719
1720 void km_state_notify(struct xfrm_state *x, struct km_event *c)
1721 {
1722 struct xfrm_mgr *km;
1723 read_lock(&xfrm_km_lock);
1724 list_for_each_entry(km, &xfrm_km_list, list)
1725 if (km->notify)
1726 km->notify(x, c);
1727 read_unlock(&xfrm_km_lock);
1728 }
1729
1730 EXPORT_SYMBOL(km_policy_notify);
1731 EXPORT_SYMBOL(km_state_notify);
1732
1733 void km_state_expired(struct xfrm_state *x, int hard, u32 pid)
1734 {
1735 struct km_event c;
1736
1737 c.data.hard = hard;
1738 c.pid = pid;
1739 c.event = XFRM_MSG_EXPIRE;
1740 km_state_notify(x, &c);
1741
1742 if (hard)
1743 wake_up(&init_net.xfrm.km_waitq);
1744 }
1745
1746 EXPORT_SYMBOL(km_state_expired);
1747 /*
1748 * We send to all registered managers regardless of failure
1749 * We are happy with one success
1750 */
1751 int km_query(struct xfrm_state *x, struct xfrm_tmpl *t, struct xfrm_policy *pol)
1752 {
1753 int err = -EINVAL, acqret;
1754 struct xfrm_mgr *km;
1755
1756 read_lock(&xfrm_km_lock);
1757 list_for_each_entry(km, &xfrm_km_list, list) {
1758 acqret = km->acquire(x, t, pol, XFRM_POLICY_OUT);
1759 if (!acqret)
1760 err = acqret;
1761 }
1762 read_unlock(&xfrm_km_lock);
1763 return err;
1764 }
1765 EXPORT_SYMBOL(km_query);
1766
1767 int km_new_mapping(struct xfrm_state *x, xfrm_address_t *ipaddr, __be16 sport)
1768 {
1769 int err = -EINVAL;
1770 struct xfrm_mgr *km;
1771
1772 read_lock(&xfrm_km_lock);
1773 list_for_each_entry(km, &xfrm_km_list, list) {
1774 if (km->new_mapping)
1775 err = km->new_mapping(x, ipaddr, sport);
1776 if (!err)
1777 break;
1778 }
1779 read_unlock(&xfrm_km_lock);
1780 return err;
1781 }
1782 EXPORT_SYMBOL(km_new_mapping);
1783
1784 void km_policy_expired(struct xfrm_policy *pol, int dir, int hard, u32 pid)
1785 {
1786 struct km_event c;
1787
1788 c.data.hard = hard;
1789 c.pid = pid;
1790 c.event = XFRM_MSG_POLEXPIRE;
1791 km_policy_notify(pol, dir, &c);
1792
1793 if (hard)
1794 wake_up(&init_net.xfrm.km_waitq);
1795 }
1796 EXPORT_SYMBOL(km_policy_expired);
1797
1798 #ifdef CONFIG_XFRM_MIGRATE
1799 int km_migrate(struct xfrm_selector *sel, u8 dir, u8 type,
1800 struct xfrm_migrate *m, int num_migrate,
1801 struct xfrm_kmaddress *k)
1802 {
1803 int err = -EINVAL;
1804 int ret;
1805 struct xfrm_mgr *km;
1806
1807 read_lock(&xfrm_km_lock);
1808 list_for_each_entry(km, &xfrm_km_list, list) {
1809 if (km->migrate) {
1810 ret = km->migrate(sel, dir, type, m, num_migrate, k);
1811 if (!ret)
1812 err = ret;
1813 }
1814 }
1815 read_unlock(&xfrm_km_lock);
1816 return err;
1817 }
1818 EXPORT_SYMBOL(km_migrate);
1819 #endif
1820
1821 int km_report(u8 proto, struct xfrm_selector *sel, xfrm_address_t *addr)
1822 {
1823 int err = -EINVAL;
1824 int ret;
1825 struct xfrm_mgr *km;
1826
1827 read_lock(&xfrm_km_lock);
1828 list_for_each_entry(km, &xfrm_km_list, list) {
1829 if (km->report) {
1830 ret = km->report(proto, sel, addr);
1831 if (!ret)
1832 err = ret;
1833 }
1834 }
1835 read_unlock(&xfrm_km_lock);
1836 return err;
1837 }
1838 EXPORT_SYMBOL(km_report);
1839
1840 int xfrm_user_policy(struct sock *sk, int optname, u8 __user *optval, int optlen)
1841 {
1842 int err;
1843 u8 *data;
1844 struct xfrm_mgr *km;
1845 struct xfrm_policy *pol = NULL;
1846
1847 if (optlen <= 0 || optlen > PAGE_SIZE)
1848 return -EMSGSIZE;
1849
1850 data = kmalloc(optlen, GFP_KERNEL);
1851 if (!data)
1852 return -ENOMEM;
1853
1854 err = -EFAULT;
1855 if (copy_from_user(data, optval, optlen))
1856 goto out;
1857
1858 err = -EINVAL;
1859 read_lock(&xfrm_km_lock);
1860 list_for_each_entry(km, &xfrm_km_list, list) {
1861 pol = km->compile_policy(sk, optname, data,
1862 optlen, &err);
1863 if (err >= 0)
1864 break;
1865 }
1866 read_unlock(&xfrm_km_lock);
1867
1868 if (err >= 0) {
1869 xfrm_sk_policy_insert(sk, err, pol);
1870 xfrm_pol_put(pol);
1871 err = 0;
1872 }
1873
1874 out:
1875 kfree(data);
1876 return err;
1877 }
1878 EXPORT_SYMBOL(xfrm_user_policy);
1879
1880 int xfrm_register_km(struct xfrm_mgr *km)
1881 {
1882 write_lock_bh(&xfrm_km_lock);
1883 list_add_tail(&km->list, &xfrm_km_list);
1884 write_unlock_bh(&xfrm_km_lock);
1885 return 0;
1886 }
1887 EXPORT_SYMBOL(xfrm_register_km);
1888
1889 int xfrm_unregister_km(struct xfrm_mgr *km)
1890 {
1891 write_lock_bh(&xfrm_km_lock);
1892 list_del(&km->list);
1893 write_unlock_bh(&xfrm_km_lock);
1894 return 0;
1895 }
1896 EXPORT_SYMBOL(xfrm_unregister_km);
1897
1898 int xfrm_state_register_afinfo(struct xfrm_state_afinfo *afinfo)
1899 {
1900 int err = 0;
1901 if (unlikely(afinfo == NULL))
1902 return -EINVAL;
1903 if (unlikely(afinfo->family >= NPROTO))
1904 return -EAFNOSUPPORT;
1905 write_lock_bh(&xfrm_state_afinfo_lock);
1906 if (unlikely(xfrm_state_afinfo[afinfo->family] != NULL))
1907 err = -ENOBUFS;
1908 else
1909 xfrm_state_afinfo[afinfo->family] = afinfo;
1910 write_unlock_bh(&xfrm_state_afinfo_lock);
1911 return err;
1912 }
1913 EXPORT_SYMBOL(xfrm_state_register_afinfo);
1914
1915 int xfrm_state_unregister_afinfo(struct xfrm_state_afinfo *afinfo)
1916 {
1917 int err = 0;
1918 if (unlikely(afinfo == NULL))
1919 return -EINVAL;
1920 if (unlikely(afinfo->family >= NPROTO))
1921 return -EAFNOSUPPORT;
1922 write_lock_bh(&xfrm_state_afinfo_lock);
1923 if (likely(xfrm_state_afinfo[afinfo->family] != NULL)) {
1924 if (unlikely(xfrm_state_afinfo[afinfo->family] != afinfo))
1925 err = -EINVAL;
1926 else
1927 xfrm_state_afinfo[afinfo->family] = NULL;
1928 }
1929 write_unlock_bh(&xfrm_state_afinfo_lock);
1930 return err;
1931 }
1932 EXPORT_SYMBOL(xfrm_state_unregister_afinfo);
1933
1934 static struct xfrm_state_afinfo *xfrm_state_get_afinfo(unsigned int family)
1935 {
1936 struct xfrm_state_afinfo *afinfo;
1937 if (unlikely(family >= NPROTO))
1938 return NULL;
1939 read_lock(&xfrm_state_afinfo_lock);
1940 afinfo = xfrm_state_afinfo[family];
1941 if (unlikely(!afinfo))
1942 read_unlock(&xfrm_state_afinfo_lock);
1943 return afinfo;
1944 }
1945
1946 static void xfrm_state_put_afinfo(struct xfrm_state_afinfo *afinfo)
1947 __releases(xfrm_state_afinfo_lock)
1948 {
1949 read_unlock(&xfrm_state_afinfo_lock);
1950 }
1951
1952 /* Temporarily located here until net/xfrm/xfrm_tunnel.c is created */
1953 void xfrm_state_delete_tunnel(struct xfrm_state *x)
1954 {
1955 if (x->tunnel) {
1956 struct xfrm_state *t = x->tunnel;
1957
1958 if (atomic_read(&t->tunnel_users) == 2)
1959 xfrm_state_delete(t);
1960 atomic_dec(&t->tunnel_users);
1961 xfrm_state_put(t);
1962 x->tunnel = NULL;
1963 }
1964 }
1965 EXPORT_SYMBOL(xfrm_state_delete_tunnel);
1966
1967 int xfrm_state_mtu(struct xfrm_state *x, int mtu)
1968 {
1969 int res;
1970
1971 spin_lock_bh(&x->lock);
1972 if (x->km.state == XFRM_STATE_VALID &&
1973 x->type && x->type->get_mtu)
1974 res = x->type->get_mtu(x, mtu);
1975 else
1976 res = mtu - x->props.header_len;
1977 spin_unlock_bh(&x->lock);
1978 return res;
1979 }
1980
1981 int xfrm_init_state(struct xfrm_state *x)
1982 {
1983 struct xfrm_state_afinfo *afinfo;
1984 struct xfrm_mode *inner_mode;
1985 int family = x->props.family;
1986 int err;
1987
1988 err = -EAFNOSUPPORT;
1989 afinfo = xfrm_state_get_afinfo(family);
1990 if (!afinfo)
1991 goto error;
1992
1993 err = 0;
1994 if (afinfo->init_flags)
1995 err = afinfo->init_flags(x);
1996
1997 xfrm_state_put_afinfo(afinfo);
1998
1999 if (err)
2000 goto error;
2001
2002 err = -EPROTONOSUPPORT;
2003
2004 if (x->sel.family != AF_UNSPEC) {
2005 inner_mode = xfrm_get_mode(x->props.mode, x->sel.family);
2006 if (inner_mode == NULL)
2007 goto error;
2008
2009 if (!(inner_mode->flags & XFRM_MODE_FLAG_TUNNEL) &&
2010 family != x->sel.family) {
2011 xfrm_put_mode(inner_mode);
2012 goto error;
2013 }
2014
2015 x->inner_mode = inner_mode;
2016 } else {
2017 struct xfrm_mode *inner_mode_iaf;
2018
2019 inner_mode = xfrm_get_mode(x->props.mode, AF_INET);
2020 if (inner_mode == NULL)
2021 goto error;
2022
2023 if (!(inner_mode->flags & XFRM_MODE_FLAG_TUNNEL)) {
2024 xfrm_put_mode(inner_mode);
2025 goto error;
2026 }
2027
2028 inner_mode_iaf = xfrm_get_mode(x->props.mode, AF_INET6);
2029 if (inner_mode_iaf == NULL)
2030 goto error;
2031
2032 if (!(inner_mode_iaf->flags & XFRM_MODE_FLAG_TUNNEL)) {
2033 xfrm_put_mode(inner_mode_iaf);
2034 goto error;
2035 }
2036
2037 if (x->props.family == AF_INET) {
2038 x->inner_mode = inner_mode;
2039 x->inner_mode_iaf = inner_mode_iaf;
2040 } else {
2041 x->inner_mode = inner_mode_iaf;
2042 x->inner_mode_iaf = inner_mode;
2043 }
2044 }
2045
2046 x->type = xfrm_get_type(x->id.proto, family);
2047 if (x->type == NULL)
2048 goto error;
2049
2050 err = x->type->init_state(x);
2051 if (err)
2052 goto error;
2053
2054 x->outer_mode = xfrm_get_mode(x->props.mode, family);
2055 if (x->outer_mode == NULL)
2056 goto error;
2057
2058 x->km.state = XFRM_STATE_VALID;
2059
2060 error:
2061 return err;
2062 }
2063
2064 EXPORT_SYMBOL(xfrm_init_state);
2065
2066 int __net_init xfrm_state_init(struct net *net)
2067 {
2068 unsigned int sz;
2069
2070 INIT_LIST_HEAD(&net->xfrm.state_all);
2071
2072 sz = sizeof(struct hlist_head) * 8;
2073
2074 net->xfrm.state_bydst = xfrm_hash_alloc(sz);
2075 if (!net->xfrm.state_bydst)
2076 goto out_bydst;
2077 net->xfrm.state_bysrc = xfrm_hash_alloc(sz);
2078 if (!net->xfrm.state_bysrc)
2079 goto out_bysrc;
2080 net->xfrm.state_byspi = xfrm_hash_alloc(sz);
2081 if (!net->xfrm.state_byspi)
2082 goto out_byspi;
2083 net->xfrm.state_hmask = ((sz / sizeof(struct hlist_head)) - 1);
2084
2085 net->xfrm.state_num = 0;
2086 INIT_WORK(&net->xfrm.state_hash_work, xfrm_hash_resize);
2087 INIT_HLIST_HEAD(&net->xfrm.state_gc_list);
2088 INIT_WORK(&net->xfrm.state_gc_work, xfrm_state_gc_task);
2089 init_waitqueue_head(&net->xfrm.km_waitq);
2090 return 0;
2091
2092 out_byspi:
2093 xfrm_hash_free(net->xfrm.state_bysrc, sz);
2094 out_bysrc:
2095 xfrm_hash_free(net->xfrm.state_bydst, sz);
2096 out_bydst:
2097 return -ENOMEM;
2098 }
2099
2100 void xfrm_state_fini(struct net *net)
2101 {
2102 unsigned int sz;
2103
2104 WARN_ON(!list_empty(&net->xfrm.state_all));
2105
2106 sz = (net->xfrm.state_hmask + 1) * sizeof(struct hlist_head);
2107 WARN_ON(!hlist_empty(net->xfrm.state_byspi));
2108 xfrm_hash_free(net->xfrm.state_byspi, sz);
2109 WARN_ON(!hlist_empty(net->xfrm.state_bysrc));
2110 xfrm_hash_free(net->xfrm.state_bysrc, sz);
2111 WARN_ON(!hlist_empty(net->xfrm.state_bydst));
2112 xfrm_hash_free(net->xfrm.state_bydst, sz);
2113 }
2114
2115 #ifdef CONFIG_AUDITSYSCALL
2116 static void xfrm_audit_helper_sainfo(struct xfrm_state *x,
2117 struct audit_buffer *audit_buf)
2118 {
2119 struct xfrm_sec_ctx *ctx = x->security;
2120 u32 spi = ntohl(x->id.spi);
2121
2122 if (ctx)
2123 audit_log_format(audit_buf, " sec_alg=%u sec_doi=%u sec_obj=%s",
2124 ctx->ctx_alg, ctx->ctx_doi, ctx->ctx_str);
2125
2126 switch(x->props.family) {
2127 case AF_INET:
2128 audit_log_format(audit_buf, " src=%pI4 dst=%pI4",
2129 &x->props.saddr.a4, &x->id.daddr.a4);
2130 break;
2131 case AF_INET6:
2132 audit_log_format(audit_buf, " src=%pI6 dst=%pI6",
2133 x->props.saddr.a6, x->id.daddr.a6);
2134 break;
2135 }
2136
2137 audit_log_format(audit_buf, " spi=%u(0x%x)", spi, spi);
2138 }
2139
2140 static void xfrm_audit_helper_pktinfo(struct sk_buff *skb, u16 family,
2141 struct audit_buffer *audit_buf)
2142 {
2143 struct iphdr *iph4;
2144 struct ipv6hdr *iph6;
2145
2146 switch (family) {
2147 case AF_INET:
2148 iph4 = ip_hdr(skb);
2149 audit_log_format(audit_buf, " src=%pI4 dst=%pI4",
2150 &iph4->saddr, &iph4->daddr);
2151 break;
2152 case AF_INET6:
2153 iph6 = ipv6_hdr(skb);
2154 audit_log_format(audit_buf,
2155 " src=%pI6 dst=%pI6 flowlbl=0x%x%02x%02x",
2156 &iph6->saddr,&iph6->daddr,
2157 iph6->flow_lbl[0] & 0x0f,
2158 iph6->flow_lbl[1],
2159 iph6->flow_lbl[2]);
2160 break;
2161 }
2162 }
2163
2164 void xfrm_audit_state_add(struct xfrm_state *x, int result,
2165 uid_t auid, u32 sessionid, u32 secid)
2166 {
2167 struct audit_buffer *audit_buf;
2168
2169 audit_buf = xfrm_audit_start("SAD-add");
2170 if (audit_buf == NULL)
2171 return;
2172 xfrm_audit_helper_usrinfo(auid, sessionid, secid, audit_buf);
2173 xfrm_audit_helper_sainfo(x, audit_buf);
2174 audit_log_format(audit_buf, " res=%u", result);
2175 audit_log_end(audit_buf);
2176 }
2177 EXPORT_SYMBOL_GPL(xfrm_audit_state_add);
2178
2179 void xfrm_audit_state_delete(struct xfrm_state *x, int result,
2180 uid_t auid, u32 sessionid, u32 secid)
2181 {
2182 struct audit_buffer *audit_buf;
2183
2184 audit_buf = xfrm_audit_start("SAD-delete");
2185 if (audit_buf == NULL)
2186 return;
2187 xfrm_audit_helper_usrinfo(auid, sessionid, secid, audit_buf);
2188 xfrm_audit_helper_sainfo(x, audit_buf);
2189 audit_log_format(audit_buf, " res=%u", result);
2190 audit_log_end(audit_buf);
2191 }
2192 EXPORT_SYMBOL_GPL(xfrm_audit_state_delete);
2193
2194 void xfrm_audit_state_replay_overflow(struct xfrm_state *x,
2195 struct sk_buff *skb)
2196 {
2197 struct audit_buffer *audit_buf;
2198 u32 spi;
2199
2200 audit_buf = xfrm_audit_start("SA-replay-overflow");
2201 if (audit_buf == NULL)
2202 return;
2203 xfrm_audit_helper_pktinfo(skb, x->props.family, audit_buf);
2204 /* don't record the sequence number because it's inherent in this kind
2205 * of audit message */
2206 spi = ntohl(x->id.spi);
2207 audit_log_format(audit_buf, " spi=%u(0x%x)", spi, spi);
2208 audit_log_end(audit_buf);
2209 }
2210 EXPORT_SYMBOL_GPL(xfrm_audit_state_replay_overflow);
2211
2212 static void xfrm_audit_state_replay(struct xfrm_state *x,
2213 struct sk_buff *skb, __be32 net_seq)
2214 {
2215 struct audit_buffer *audit_buf;
2216 u32 spi;
2217
2218 audit_buf = xfrm_audit_start("SA-replayed-pkt");
2219 if (audit_buf == NULL)
2220 return;
2221 xfrm_audit_helper_pktinfo(skb, x->props.family, audit_buf);
2222 spi = ntohl(x->id.spi);
2223 audit_log_format(audit_buf, " spi=%u(0x%x) seqno=%u",
2224 spi, spi, ntohl(net_seq));
2225 audit_log_end(audit_buf);
2226 }
2227
2228 void xfrm_audit_state_notfound_simple(struct sk_buff *skb, u16 family)
2229 {
2230 struct audit_buffer *audit_buf;
2231
2232 audit_buf = xfrm_audit_start("SA-notfound");
2233 if (audit_buf == NULL)
2234 return;
2235 xfrm_audit_helper_pktinfo(skb, family, audit_buf);
2236 audit_log_end(audit_buf);
2237 }
2238 EXPORT_SYMBOL_GPL(xfrm_audit_state_notfound_simple);
2239
2240 void xfrm_audit_state_notfound(struct sk_buff *skb, u16 family,
2241 __be32 net_spi, __be32 net_seq)
2242 {
2243 struct audit_buffer *audit_buf;
2244 u32 spi;
2245
2246 audit_buf = xfrm_audit_start("SA-notfound");
2247 if (audit_buf == NULL)
2248 return;
2249 xfrm_audit_helper_pktinfo(skb, family, audit_buf);
2250 spi = ntohl(net_spi);
2251 audit_log_format(audit_buf, " spi=%u(0x%x) seqno=%u",
2252 spi, spi, ntohl(net_seq));
2253 audit_log_end(audit_buf);
2254 }
2255 EXPORT_SYMBOL_GPL(xfrm_audit_state_notfound);
2256
2257 void xfrm_audit_state_icvfail(struct xfrm_state *x,
2258 struct sk_buff *skb, u8 proto)
2259 {
2260 struct audit_buffer *audit_buf;
2261 __be32 net_spi;
2262 __be32 net_seq;
2263
2264 audit_buf = xfrm_audit_start("SA-icv-failure");
2265 if (audit_buf == NULL)
2266 return;
2267 xfrm_audit_helper_pktinfo(skb, x->props.family, audit_buf);
2268 if (xfrm_parse_spi(skb, proto, &net_spi, &net_seq) == 0) {
2269 u32 spi = ntohl(net_spi);
2270 audit_log_format(audit_buf, " spi=%u(0x%x) seqno=%u",
2271 spi, spi, ntohl(net_seq));
2272 }
2273 audit_log_end(audit_buf);
2274 }
2275 EXPORT_SYMBOL_GPL(xfrm_audit_state_icvfail);
2276 #endif /* CONFIG_AUDITSYSCALL */
This page took 0.073388 seconds and 6 git commands to generate.