52d828bdf3d7244ab2cd5703d46864f00c5c0417
[deliverable/linux.git] / net / xfrm / xfrm_state.c
1 /*
2 * xfrm_state.c
3 *
4 * Changes:
5 * Mitsuru KANDA @USAGI
6 * Kazunori MIYAZAWA @USAGI
7 * Kunihiro Ishiguro <kunihiro@ipinfusion.com>
8 * IPv6 support
9 * YOSHIFUJI Hideaki @USAGI
10 * Split up af-specific functions
11 * Derek Atkins <derek@ihtfp.com>
12 * Add UDP Encapsulation
13 *
14 */
15
16 #include <linux/workqueue.h>
17 #include <net/xfrm.h>
18 #include <linux/pfkeyv2.h>
19 #include <linux/ipsec.h>
20 #include <linux/module.h>
21 #include <linux/cache.h>
22 #include <linux/audit.h>
23 #include <asm/uaccess.h>
24
25 #include "xfrm_hash.h"
26
27 struct sock *xfrm_nl;
28 EXPORT_SYMBOL(xfrm_nl);
29
30 u32 sysctl_xfrm_aevent_etime __read_mostly = XFRM_AE_ETIME;
31 EXPORT_SYMBOL(sysctl_xfrm_aevent_etime);
32
33 u32 sysctl_xfrm_aevent_rseqth __read_mostly = XFRM_AE_SEQT_SIZE;
34 EXPORT_SYMBOL(sysctl_xfrm_aevent_rseqth);
35
36 u32 sysctl_xfrm_acq_expires __read_mostly = 30;
37
38 /* Each xfrm_state may be linked to two tables:
39
40 1. Hash table by (spi,daddr,ah/esp) to find SA by SPI. (input,ctl)
41 2. Hash table by (daddr,family,reqid) to find what SAs exist for given
42 destination/tunnel endpoint. (output)
43 */
44
45 static DEFINE_SPINLOCK(xfrm_state_lock);
46
47 static struct hlist_head *xfrm_state_byspi __read_mostly;
48 static unsigned int xfrm_state_hmask __read_mostly;
49 static unsigned int xfrm_state_hashmax __read_mostly = 1 * 1024 * 1024;
50 static unsigned int xfrm_state_num;
51 static unsigned int xfrm_state_genid;
52
53 static struct xfrm_state_afinfo *xfrm_state_get_afinfo(unsigned int family);
54 static void xfrm_state_put_afinfo(struct xfrm_state_afinfo *afinfo);
55
56 #ifdef CONFIG_AUDITSYSCALL
57 static void xfrm_audit_state_replay(struct xfrm_state *x,
58 struct sk_buff *skb, __be32 net_seq);
59 #else
60 #define xfrm_audit_state_replay(x, s, sq) do { ; } while (0)
61 #endif /* CONFIG_AUDITSYSCALL */
62
63 static inline unsigned int xfrm_dst_hash(xfrm_address_t *daddr,
64 xfrm_address_t *saddr,
65 u32 reqid,
66 unsigned short family)
67 {
68 return __xfrm_dst_hash(daddr, saddr, reqid, family, xfrm_state_hmask);
69 }
70
71 static inline unsigned int xfrm_src_hash(xfrm_address_t *daddr,
72 xfrm_address_t *saddr,
73 unsigned short family)
74 {
75 return __xfrm_src_hash(daddr, saddr, family, xfrm_state_hmask);
76 }
77
78 static inline unsigned int
79 xfrm_spi_hash(xfrm_address_t *daddr, __be32 spi, u8 proto, unsigned short family)
80 {
81 return __xfrm_spi_hash(daddr, spi, proto, family, xfrm_state_hmask);
82 }
83
84 static void xfrm_hash_transfer(struct hlist_head *list,
85 struct hlist_head *ndsttable,
86 struct hlist_head *nsrctable,
87 struct hlist_head *nspitable,
88 unsigned int nhashmask)
89 {
90 struct hlist_node *entry, *tmp;
91 struct xfrm_state *x;
92
93 hlist_for_each_entry_safe(x, entry, tmp, list, bydst) {
94 unsigned int h;
95
96 h = __xfrm_dst_hash(&x->id.daddr, &x->props.saddr,
97 x->props.reqid, x->props.family,
98 nhashmask);
99 hlist_add_head(&x->bydst, ndsttable+h);
100
101 h = __xfrm_src_hash(&x->id.daddr, &x->props.saddr,
102 x->props.family,
103 nhashmask);
104 hlist_add_head(&x->bysrc, nsrctable+h);
105
106 if (x->id.spi) {
107 h = __xfrm_spi_hash(&x->id.daddr, x->id.spi,
108 x->id.proto, x->props.family,
109 nhashmask);
110 hlist_add_head(&x->byspi, nspitable+h);
111 }
112 }
113 }
114
115 static unsigned long xfrm_hash_new_size(void)
116 {
117 return ((xfrm_state_hmask + 1) << 1) *
118 sizeof(struct hlist_head);
119 }
120
121 static DEFINE_MUTEX(hash_resize_mutex);
122
123 static void xfrm_hash_resize(struct work_struct *__unused)
124 {
125 struct hlist_head *ndst, *nsrc, *nspi, *odst, *osrc, *ospi;
126 unsigned long nsize, osize;
127 unsigned int nhashmask, ohashmask;
128 int i;
129
130 mutex_lock(&hash_resize_mutex);
131
132 nsize = xfrm_hash_new_size();
133 ndst = xfrm_hash_alloc(nsize);
134 if (!ndst)
135 goto out_unlock;
136 nsrc = xfrm_hash_alloc(nsize);
137 if (!nsrc) {
138 xfrm_hash_free(ndst, nsize);
139 goto out_unlock;
140 }
141 nspi = xfrm_hash_alloc(nsize);
142 if (!nspi) {
143 xfrm_hash_free(ndst, nsize);
144 xfrm_hash_free(nsrc, nsize);
145 goto out_unlock;
146 }
147
148 spin_lock_bh(&xfrm_state_lock);
149
150 nhashmask = (nsize / sizeof(struct hlist_head)) - 1U;
151 for (i = xfrm_state_hmask; i >= 0; i--)
152 xfrm_hash_transfer(init_net.xfrm.state_bydst+i, ndst, nsrc, nspi,
153 nhashmask);
154
155 odst = init_net.xfrm.state_bydst;
156 osrc = init_net.xfrm.state_bysrc;
157 ospi = xfrm_state_byspi;
158 ohashmask = xfrm_state_hmask;
159
160 init_net.xfrm.state_bydst = ndst;
161 init_net.xfrm.state_bysrc = nsrc;
162 xfrm_state_byspi = nspi;
163 xfrm_state_hmask = nhashmask;
164
165 spin_unlock_bh(&xfrm_state_lock);
166
167 osize = (ohashmask + 1) * sizeof(struct hlist_head);
168 xfrm_hash_free(odst, osize);
169 xfrm_hash_free(osrc, osize);
170 xfrm_hash_free(ospi, osize);
171
172 out_unlock:
173 mutex_unlock(&hash_resize_mutex);
174 }
175
176 static DECLARE_WORK(xfrm_hash_work, xfrm_hash_resize);
177
178 DECLARE_WAIT_QUEUE_HEAD(km_waitq);
179 EXPORT_SYMBOL(km_waitq);
180
181 static DEFINE_RWLOCK(xfrm_state_afinfo_lock);
182 static struct xfrm_state_afinfo *xfrm_state_afinfo[NPROTO];
183
184 static struct work_struct xfrm_state_gc_work;
185 static HLIST_HEAD(xfrm_state_gc_list);
186 static DEFINE_SPINLOCK(xfrm_state_gc_lock);
187
188 int __xfrm_state_delete(struct xfrm_state *x);
189
190 int km_query(struct xfrm_state *x, struct xfrm_tmpl *t, struct xfrm_policy *pol);
191 void km_state_expired(struct xfrm_state *x, int hard, u32 pid);
192
193 static struct xfrm_state_afinfo *xfrm_state_lock_afinfo(unsigned int family)
194 {
195 struct xfrm_state_afinfo *afinfo;
196 if (unlikely(family >= NPROTO))
197 return NULL;
198 write_lock_bh(&xfrm_state_afinfo_lock);
199 afinfo = xfrm_state_afinfo[family];
200 if (unlikely(!afinfo))
201 write_unlock_bh(&xfrm_state_afinfo_lock);
202 return afinfo;
203 }
204
205 static void xfrm_state_unlock_afinfo(struct xfrm_state_afinfo *afinfo)
206 __releases(xfrm_state_afinfo_lock)
207 {
208 write_unlock_bh(&xfrm_state_afinfo_lock);
209 }
210
211 int xfrm_register_type(const struct xfrm_type *type, unsigned short family)
212 {
213 struct xfrm_state_afinfo *afinfo = xfrm_state_lock_afinfo(family);
214 const struct xfrm_type **typemap;
215 int err = 0;
216
217 if (unlikely(afinfo == NULL))
218 return -EAFNOSUPPORT;
219 typemap = afinfo->type_map;
220
221 if (likely(typemap[type->proto] == NULL))
222 typemap[type->proto] = type;
223 else
224 err = -EEXIST;
225 xfrm_state_unlock_afinfo(afinfo);
226 return err;
227 }
228 EXPORT_SYMBOL(xfrm_register_type);
229
230 int xfrm_unregister_type(const struct xfrm_type *type, unsigned short family)
231 {
232 struct xfrm_state_afinfo *afinfo = xfrm_state_lock_afinfo(family);
233 const struct xfrm_type **typemap;
234 int err = 0;
235
236 if (unlikely(afinfo == NULL))
237 return -EAFNOSUPPORT;
238 typemap = afinfo->type_map;
239
240 if (unlikely(typemap[type->proto] != type))
241 err = -ENOENT;
242 else
243 typemap[type->proto] = NULL;
244 xfrm_state_unlock_afinfo(afinfo);
245 return err;
246 }
247 EXPORT_SYMBOL(xfrm_unregister_type);
248
249 static const struct xfrm_type *xfrm_get_type(u8 proto, unsigned short family)
250 {
251 struct xfrm_state_afinfo *afinfo;
252 const struct xfrm_type **typemap;
253 const struct xfrm_type *type;
254 int modload_attempted = 0;
255
256 retry:
257 afinfo = xfrm_state_get_afinfo(family);
258 if (unlikely(afinfo == NULL))
259 return NULL;
260 typemap = afinfo->type_map;
261
262 type = typemap[proto];
263 if (unlikely(type && !try_module_get(type->owner)))
264 type = NULL;
265 if (!type && !modload_attempted) {
266 xfrm_state_put_afinfo(afinfo);
267 request_module("xfrm-type-%d-%d", family, proto);
268 modload_attempted = 1;
269 goto retry;
270 }
271
272 xfrm_state_put_afinfo(afinfo);
273 return type;
274 }
275
276 static void xfrm_put_type(const struct xfrm_type *type)
277 {
278 module_put(type->owner);
279 }
280
281 int xfrm_register_mode(struct xfrm_mode *mode, int family)
282 {
283 struct xfrm_state_afinfo *afinfo;
284 struct xfrm_mode **modemap;
285 int err;
286
287 if (unlikely(mode->encap >= XFRM_MODE_MAX))
288 return -EINVAL;
289
290 afinfo = xfrm_state_lock_afinfo(family);
291 if (unlikely(afinfo == NULL))
292 return -EAFNOSUPPORT;
293
294 err = -EEXIST;
295 modemap = afinfo->mode_map;
296 if (modemap[mode->encap])
297 goto out;
298
299 err = -ENOENT;
300 if (!try_module_get(afinfo->owner))
301 goto out;
302
303 mode->afinfo = afinfo;
304 modemap[mode->encap] = mode;
305 err = 0;
306
307 out:
308 xfrm_state_unlock_afinfo(afinfo);
309 return err;
310 }
311 EXPORT_SYMBOL(xfrm_register_mode);
312
313 int xfrm_unregister_mode(struct xfrm_mode *mode, int family)
314 {
315 struct xfrm_state_afinfo *afinfo;
316 struct xfrm_mode **modemap;
317 int err;
318
319 if (unlikely(mode->encap >= XFRM_MODE_MAX))
320 return -EINVAL;
321
322 afinfo = xfrm_state_lock_afinfo(family);
323 if (unlikely(afinfo == NULL))
324 return -EAFNOSUPPORT;
325
326 err = -ENOENT;
327 modemap = afinfo->mode_map;
328 if (likely(modemap[mode->encap] == mode)) {
329 modemap[mode->encap] = NULL;
330 module_put(mode->afinfo->owner);
331 err = 0;
332 }
333
334 xfrm_state_unlock_afinfo(afinfo);
335 return err;
336 }
337 EXPORT_SYMBOL(xfrm_unregister_mode);
338
339 static struct xfrm_mode *xfrm_get_mode(unsigned int encap, int family)
340 {
341 struct xfrm_state_afinfo *afinfo;
342 struct xfrm_mode *mode;
343 int modload_attempted = 0;
344
345 if (unlikely(encap >= XFRM_MODE_MAX))
346 return NULL;
347
348 retry:
349 afinfo = xfrm_state_get_afinfo(family);
350 if (unlikely(afinfo == NULL))
351 return NULL;
352
353 mode = afinfo->mode_map[encap];
354 if (unlikely(mode && !try_module_get(mode->owner)))
355 mode = NULL;
356 if (!mode && !modload_attempted) {
357 xfrm_state_put_afinfo(afinfo);
358 request_module("xfrm-mode-%d-%d", family, encap);
359 modload_attempted = 1;
360 goto retry;
361 }
362
363 xfrm_state_put_afinfo(afinfo);
364 return mode;
365 }
366
367 static void xfrm_put_mode(struct xfrm_mode *mode)
368 {
369 module_put(mode->owner);
370 }
371
372 static void xfrm_state_gc_destroy(struct xfrm_state *x)
373 {
374 del_timer_sync(&x->timer);
375 del_timer_sync(&x->rtimer);
376 kfree(x->aalg);
377 kfree(x->ealg);
378 kfree(x->calg);
379 kfree(x->encap);
380 kfree(x->coaddr);
381 if (x->inner_mode)
382 xfrm_put_mode(x->inner_mode);
383 if (x->inner_mode_iaf)
384 xfrm_put_mode(x->inner_mode_iaf);
385 if (x->outer_mode)
386 xfrm_put_mode(x->outer_mode);
387 if (x->type) {
388 x->type->destructor(x);
389 xfrm_put_type(x->type);
390 }
391 security_xfrm_state_free(x);
392 kfree(x);
393 }
394
395 static void xfrm_state_gc_task(struct work_struct *data)
396 {
397 struct xfrm_state *x;
398 struct hlist_node *entry, *tmp;
399 struct hlist_head gc_list;
400
401 spin_lock_bh(&xfrm_state_gc_lock);
402 hlist_move_list(&xfrm_state_gc_list, &gc_list);
403 spin_unlock_bh(&xfrm_state_gc_lock);
404
405 hlist_for_each_entry_safe(x, entry, tmp, &gc_list, gclist)
406 xfrm_state_gc_destroy(x);
407
408 wake_up(&km_waitq);
409 }
410
411 static inline unsigned long make_jiffies(long secs)
412 {
413 if (secs >= (MAX_SCHEDULE_TIMEOUT-1)/HZ)
414 return MAX_SCHEDULE_TIMEOUT-1;
415 else
416 return secs*HZ;
417 }
418
419 static void xfrm_timer_handler(unsigned long data)
420 {
421 struct xfrm_state *x = (struct xfrm_state*)data;
422 unsigned long now = get_seconds();
423 long next = LONG_MAX;
424 int warn = 0;
425 int err = 0;
426
427 spin_lock(&x->lock);
428 if (x->km.state == XFRM_STATE_DEAD)
429 goto out;
430 if (x->km.state == XFRM_STATE_EXPIRED)
431 goto expired;
432 if (x->lft.hard_add_expires_seconds) {
433 long tmo = x->lft.hard_add_expires_seconds +
434 x->curlft.add_time - now;
435 if (tmo <= 0)
436 goto expired;
437 if (tmo < next)
438 next = tmo;
439 }
440 if (x->lft.hard_use_expires_seconds) {
441 long tmo = x->lft.hard_use_expires_seconds +
442 (x->curlft.use_time ? : now) - now;
443 if (tmo <= 0)
444 goto expired;
445 if (tmo < next)
446 next = tmo;
447 }
448 if (x->km.dying)
449 goto resched;
450 if (x->lft.soft_add_expires_seconds) {
451 long tmo = x->lft.soft_add_expires_seconds +
452 x->curlft.add_time - now;
453 if (tmo <= 0)
454 warn = 1;
455 else if (tmo < next)
456 next = tmo;
457 }
458 if (x->lft.soft_use_expires_seconds) {
459 long tmo = x->lft.soft_use_expires_seconds +
460 (x->curlft.use_time ? : now) - now;
461 if (tmo <= 0)
462 warn = 1;
463 else if (tmo < next)
464 next = tmo;
465 }
466
467 x->km.dying = warn;
468 if (warn)
469 km_state_expired(x, 0, 0);
470 resched:
471 if (next != LONG_MAX)
472 mod_timer(&x->timer, jiffies + make_jiffies(next));
473
474 goto out;
475
476 expired:
477 if (x->km.state == XFRM_STATE_ACQ && x->id.spi == 0) {
478 x->km.state = XFRM_STATE_EXPIRED;
479 wake_up(&km_waitq);
480 next = 2;
481 goto resched;
482 }
483
484 err = __xfrm_state_delete(x);
485 if (!err && x->id.spi)
486 km_state_expired(x, 1, 0);
487
488 xfrm_audit_state_delete(x, err ? 0 : 1,
489 audit_get_loginuid(current),
490 audit_get_sessionid(current), 0);
491
492 out:
493 spin_unlock(&x->lock);
494 }
495
496 static void xfrm_replay_timer_handler(unsigned long data);
497
498 struct xfrm_state *xfrm_state_alloc(struct net *net)
499 {
500 struct xfrm_state *x;
501
502 x = kzalloc(sizeof(struct xfrm_state), GFP_ATOMIC);
503
504 if (x) {
505 write_pnet(&x->xs_net, net);
506 atomic_set(&x->refcnt, 1);
507 atomic_set(&x->tunnel_users, 0);
508 INIT_LIST_HEAD(&x->km.all);
509 INIT_HLIST_NODE(&x->bydst);
510 INIT_HLIST_NODE(&x->bysrc);
511 INIT_HLIST_NODE(&x->byspi);
512 setup_timer(&x->timer, xfrm_timer_handler, (unsigned long)x);
513 setup_timer(&x->rtimer, xfrm_replay_timer_handler,
514 (unsigned long)x);
515 x->curlft.add_time = get_seconds();
516 x->lft.soft_byte_limit = XFRM_INF;
517 x->lft.soft_packet_limit = XFRM_INF;
518 x->lft.hard_byte_limit = XFRM_INF;
519 x->lft.hard_packet_limit = XFRM_INF;
520 x->replay_maxage = 0;
521 x->replay_maxdiff = 0;
522 x->inner_mode = NULL;
523 x->inner_mode_iaf = NULL;
524 spin_lock_init(&x->lock);
525 }
526 return x;
527 }
528 EXPORT_SYMBOL(xfrm_state_alloc);
529
530 void __xfrm_state_destroy(struct xfrm_state *x)
531 {
532 WARN_ON(x->km.state != XFRM_STATE_DEAD);
533
534 spin_lock_bh(&xfrm_state_gc_lock);
535 hlist_add_head(&x->gclist, &xfrm_state_gc_list);
536 spin_unlock_bh(&xfrm_state_gc_lock);
537 schedule_work(&xfrm_state_gc_work);
538 }
539 EXPORT_SYMBOL(__xfrm_state_destroy);
540
541 int __xfrm_state_delete(struct xfrm_state *x)
542 {
543 int err = -ESRCH;
544
545 if (x->km.state != XFRM_STATE_DEAD) {
546 x->km.state = XFRM_STATE_DEAD;
547 spin_lock(&xfrm_state_lock);
548 list_del(&x->km.all);
549 hlist_del(&x->bydst);
550 hlist_del(&x->bysrc);
551 if (x->id.spi)
552 hlist_del(&x->byspi);
553 xfrm_state_num--;
554 spin_unlock(&xfrm_state_lock);
555
556 /* All xfrm_state objects are created by xfrm_state_alloc.
557 * The xfrm_state_alloc call gives a reference, and that
558 * is what we are dropping here.
559 */
560 xfrm_state_put(x);
561 err = 0;
562 }
563
564 return err;
565 }
566 EXPORT_SYMBOL(__xfrm_state_delete);
567
568 int xfrm_state_delete(struct xfrm_state *x)
569 {
570 int err;
571
572 spin_lock_bh(&x->lock);
573 err = __xfrm_state_delete(x);
574 spin_unlock_bh(&x->lock);
575
576 return err;
577 }
578 EXPORT_SYMBOL(xfrm_state_delete);
579
580 #ifdef CONFIG_SECURITY_NETWORK_XFRM
581 static inline int
582 xfrm_state_flush_secctx_check(u8 proto, struct xfrm_audit *audit_info)
583 {
584 int i, err = 0;
585
586 for (i = 0; i <= xfrm_state_hmask; i++) {
587 struct hlist_node *entry;
588 struct xfrm_state *x;
589
590 hlist_for_each_entry(x, entry, init_net.xfrm.state_bydst+i, bydst) {
591 if (xfrm_id_proto_match(x->id.proto, proto) &&
592 (err = security_xfrm_state_delete(x)) != 0) {
593 xfrm_audit_state_delete(x, 0,
594 audit_info->loginuid,
595 audit_info->sessionid,
596 audit_info->secid);
597 return err;
598 }
599 }
600 }
601
602 return err;
603 }
604 #else
605 static inline int
606 xfrm_state_flush_secctx_check(u8 proto, struct xfrm_audit *audit_info)
607 {
608 return 0;
609 }
610 #endif
611
612 int xfrm_state_flush(u8 proto, struct xfrm_audit *audit_info)
613 {
614 int i, err = 0;
615
616 spin_lock_bh(&xfrm_state_lock);
617 err = xfrm_state_flush_secctx_check(proto, audit_info);
618 if (err)
619 goto out;
620
621 for (i = 0; i <= xfrm_state_hmask; i++) {
622 struct hlist_node *entry;
623 struct xfrm_state *x;
624 restart:
625 hlist_for_each_entry(x, entry, init_net.xfrm.state_bydst+i, bydst) {
626 if (!xfrm_state_kern(x) &&
627 xfrm_id_proto_match(x->id.proto, proto)) {
628 xfrm_state_hold(x);
629 spin_unlock_bh(&xfrm_state_lock);
630
631 err = xfrm_state_delete(x);
632 xfrm_audit_state_delete(x, err ? 0 : 1,
633 audit_info->loginuid,
634 audit_info->sessionid,
635 audit_info->secid);
636 xfrm_state_put(x);
637
638 spin_lock_bh(&xfrm_state_lock);
639 goto restart;
640 }
641 }
642 }
643 err = 0;
644
645 out:
646 spin_unlock_bh(&xfrm_state_lock);
647 wake_up(&km_waitq);
648 return err;
649 }
650 EXPORT_SYMBOL(xfrm_state_flush);
651
652 void xfrm_sad_getinfo(struct xfrmk_sadinfo *si)
653 {
654 spin_lock_bh(&xfrm_state_lock);
655 si->sadcnt = xfrm_state_num;
656 si->sadhcnt = xfrm_state_hmask;
657 si->sadhmcnt = xfrm_state_hashmax;
658 spin_unlock_bh(&xfrm_state_lock);
659 }
660 EXPORT_SYMBOL(xfrm_sad_getinfo);
661
662 static int
663 xfrm_init_tempsel(struct xfrm_state *x, struct flowi *fl,
664 struct xfrm_tmpl *tmpl,
665 xfrm_address_t *daddr, xfrm_address_t *saddr,
666 unsigned short family)
667 {
668 struct xfrm_state_afinfo *afinfo = xfrm_state_get_afinfo(family);
669 if (!afinfo)
670 return -1;
671 afinfo->init_tempsel(x, fl, tmpl, daddr, saddr);
672 xfrm_state_put_afinfo(afinfo);
673 return 0;
674 }
675
676 static struct xfrm_state *__xfrm_state_lookup(xfrm_address_t *daddr, __be32 spi, u8 proto, unsigned short family)
677 {
678 unsigned int h = xfrm_spi_hash(daddr, spi, proto, family);
679 struct xfrm_state *x;
680 struct hlist_node *entry;
681
682 hlist_for_each_entry(x, entry, xfrm_state_byspi+h, byspi) {
683 if (x->props.family != family ||
684 x->id.spi != spi ||
685 x->id.proto != proto)
686 continue;
687
688 switch (family) {
689 case AF_INET:
690 if (x->id.daddr.a4 != daddr->a4)
691 continue;
692 break;
693 case AF_INET6:
694 if (!ipv6_addr_equal((struct in6_addr *)daddr,
695 (struct in6_addr *)
696 x->id.daddr.a6))
697 continue;
698 break;
699 }
700
701 xfrm_state_hold(x);
702 return x;
703 }
704
705 return NULL;
706 }
707
708 static struct xfrm_state *__xfrm_state_lookup_byaddr(xfrm_address_t *daddr, xfrm_address_t *saddr, u8 proto, unsigned short family)
709 {
710 unsigned int h = xfrm_src_hash(daddr, saddr, family);
711 struct xfrm_state *x;
712 struct hlist_node *entry;
713
714 hlist_for_each_entry(x, entry, init_net.xfrm.state_bysrc+h, bysrc) {
715 if (x->props.family != family ||
716 x->id.proto != proto)
717 continue;
718
719 switch (family) {
720 case AF_INET:
721 if (x->id.daddr.a4 != daddr->a4 ||
722 x->props.saddr.a4 != saddr->a4)
723 continue;
724 break;
725 case AF_INET6:
726 if (!ipv6_addr_equal((struct in6_addr *)daddr,
727 (struct in6_addr *)
728 x->id.daddr.a6) ||
729 !ipv6_addr_equal((struct in6_addr *)saddr,
730 (struct in6_addr *)
731 x->props.saddr.a6))
732 continue;
733 break;
734 }
735
736 xfrm_state_hold(x);
737 return x;
738 }
739
740 return NULL;
741 }
742
743 static inline struct xfrm_state *
744 __xfrm_state_locate(struct xfrm_state *x, int use_spi, int family)
745 {
746 if (use_spi)
747 return __xfrm_state_lookup(&x->id.daddr, x->id.spi,
748 x->id.proto, family);
749 else
750 return __xfrm_state_lookup_byaddr(&x->id.daddr,
751 &x->props.saddr,
752 x->id.proto, family);
753 }
754
755 static void xfrm_hash_grow_check(int have_hash_collision)
756 {
757 if (have_hash_collision &&
758 (xfrm_state_hmask + 1) < xfrm_state_hashmax &&
759 xfrm_state_num > xfrm_state_hmask)
760 schedule_work(&xfrm_hash_work);
761 }
762
763 struct xfrm_state *
764 xfrm_state_find(xfrm_address_t *daddr, xfrm_address_t *saddr,
765 struct flowi *fl, struct xfrm_tmpl *tmpl,
766 struct xfrm_policy *pol, int *err,
767 unsigned short family)
768 {
769 unsigned int h;
770 struct hlist_node *entry;
771 struct xfrm_state *x, *x0, *to_put;
772 int acquire_in_progress = 0;
773 int error = 0;
774 struct xfrm_state *best = NULL;
775
776 to_put = NULL;
777
778 spin_lock_bh(&xfrm_state_lock);
779 h = xfrm_dst_hash(daddr, saddr, tmpl->reqid, family);
780 hlist_for_each_entry(x, entry, init_net.xfrm.state_bydst+h, bydst) {
781 if (x->props.family == family &&
782 x->props.reqid == tmpl->reqid &&
783 !(x->props.flags & XFRM_STATE_WILDRECV) &&
784 xfrm_state_addr_check(x, daddr, saddr, family) &&
785 tmpl->mode == x->props.mode &&
786 tmpl->id.proto == x->id.proto &&
787 (tmpl->id.spi == x->id.spi || !tmpl->id.spi)) {
788 /* Resolution logic:
789 1. There is a valid state with matching selector.
790 Done.
791 2. Valid state with inappropriate selector. Skip.
792
793 Entering area of "sysdeps".
794
795 3. If state is not valid, selector is temporary,
796 it selects only session which triggered
797 previous resolution. Key manager will do
798 something to install a state with proper
799 selector.
800 */
801 if (x->km.state == XFRM_STATE_VALID) {
802 if ((x->sel.family && !xfrm_selector_match(&x->sel, fl, x->sel.family)) ||
803 !security_xfrm_state_pol_flow_match(x, pol, fl))
804 continue;
805 if (!best ||
806 best->km.dying > x->km.dying ||
807 (best->km.dying == x->km.dying &&
808 best->curlft.add_time < x->curlft.add_time))
809 best = x;
810 } else if (x->km.state == XFRM_STATE_ACQ) {
811 acquire_in_progress = 1;
812 } else if (x->km.state == XFRM_STATE_ERROR ||
813 x->km.state == XFRM_STATE_EXPIRED) {
814 if (xfrm_selector_match(&x->sel, fl, x->sel.family) &&
815 security_xfrm_state_pol_flow_match(x, pol, fl))
816 error = -ESRCH;
817 }
818 }
819 }
820
821 x = best;
822 if (!x && !error && !acquire_in_progress) {
823 if (tmpl->id.spi &&
824 (x0 = __xfrm_state_lookup(daddr, tmpl->id.spi,
825 tmpl->id.proto, family)) != NULL) {
826 to_put = x0;
827 error = -EEXIST;
828 goto out;
829 }
830 x = xfrm_state_alloc(&init_net);
831 if (x == NULL) {
832 error = -ENOMEM;
833 goto out;
834 }
835 /* Initialize temporary selector matching only
836 * to current session. */
837 xfrm_init_tempsel(x, fl, tmpl, daddr, saddr, family);
838
839 error = security_xfrm_state_alloc_acquire(x, pol->security, fl->secid);
840 if (error) {
841 x->km.state = XFRM_STATE_DEAD;
842 to_put = x;
843 x = NULL;
844 goto out;
845 }
846
847 if (km_query(x, tmpl, pol) == 0) {
848 x->km.state = XFRM_STATE_ACQ;
849 list_add(&x->km.all, &init_net.xfrm.state_all);
850 hlist_add_head(&x->bydst, init_net.xfrm.state_bydst+h);
851 h = xfrm_src_hash(daddr, saddr, family);
852 hlist_add_head(&x->bysrc, init_net.xfrm.state_bysrc+h);
853 if (x->id.spi) {
854 h = xfrm_spi_hash(&x->id.daddr, x->id.spi, x->id.proto, family);
855 hlist_add_head(&x->byspi, xfrm_state_byspi+h);
856 }
857 x->lft.hard_add_expires_seconds = sysctl_xfrm_acq_expires;
858 x->timer.expires = jiffies + sysctl_xfrm_acq_expires*HZ;
859 add_timer(&x->timer);
860 xfrm_state_num++;
861 xfrm_hash_grow_check(x->bydst.next != NULL);
862 } else {
863 x->km.state = XFRM_STATE_DEAD;
864 to_put = x;
865 x = NULL;
866 error = -ESRCH;
867 }
868 }
869 out:
870 if (x)
871 xfrm_state_hold(x);
872 else
873 *err = acquire_in_progress ? -EAGAIN : error;
874 spin_unlock_bh(&xfrm_state_lock);
875 if (to_put)
876 xfrm_state_put(to_put);
877 return x;
878 }
879
880 struct xfrm_state *
881 xfrm_stateonly_find(xfrm_address_t *daddr, xfrm_address_t *saddr,
882 unsigned short family, u8 mode, u8 proto, u32 reqid)
883 {
884 unsigned int h;
885 struct xfrm_state *rx = NULL, *x = NULL;
886 struct hlist_node *entry;
887
888 spin_lock(&xfrm_state_lock);
889 h = xfrm_dst_hash(daddr, saddr, reqid, family);
890 hlist_for_each_entry(x, entry, init_net.xfrm.state_bydst+h, bydst) {
891 if (x->props.family == family &&
892 x->props.reqid == reqid &&
893 !(x->props.flags & XFRM_STATE_WILDRECV) &&
894 xfrm_state_addr_check(x, daddr, saddr, family) &&
895 mode == x->props.mode &&
896 proto == x->id.proto &&
897 x->km.state == XFRM_STATE_VALID) {
898 rx = x;
899 break;
900 }
901 }
902
903 if (rx)
904 xfrm_state_hold(rx);
905 spin_unlock(&xfrm_state_lock);
906
907
908 return rx;
909 }
910 EXPORT_SYMBOL(xfrm_stateonly_find);
911
912 static void __xfrm_state_insert(struct xfrm_state *x)
913 {
914 unsigned int h;
915
916 x->genid = ++xfrm_state_genid;
917
918 list_add(&x->km.all, &init_net.xfrm.state_all);
919
920 h = xfrm_dst_hash(&x->id.daddr, &x->props.saddr,
921 x->props.reqid, x->props.family);
922 hlist_add_head(&x->bydst, init_net.xfrm.state_bydst+h);
923
924 h = xfrm_src_hash(&x->id.daddr, &x->props.saddr, x->props.family);
925 hlist_add_head(&x->bysrc, init_net.xfrm.state_bysrc+h);
926
927 if (x->id.spi) {
928 h = xfrm_spi_hash(&x->id.daddr, x->id.spi, x->id.proto,
929 x->props.family);
930
931 hlist_add_head(&x->byspi, xfrm_state_byspi+h);
932 }
933
934 mod_timer(&x->timer, jiffies + HZ);
935 if (x->replay_maxage)
936 mod_timer(&x->rtimer, jiffies + x->replay_maxage);
937
938 wake_up(&km_waitq);
939
940 xfrm_state_num++;
941
942 xfrm_hash_grow_check(x->bydst.next != NULL);
943 }
944
945 /* xfrm_state_lock is held */
946 static void __xfrm_state_bump_genids(struct xfrm_state *xnew)
947 {
948 unsigned short family = xnew->props.family;
949 u32 reqid = xnew->props.reqid;
950 struct xfrm_state *x;
951 struct hlist_node *entry;
952 unsigned int h;
953
954 h = xfrm_dst_hash(&xnew->id.daddr, &xnew->props.saddr, reqid, family);
955 hlist_for_each_entry(x, entry, init_net.xfrm.state_bydst+h, bydst) {
956 if (x->props.family == family &&
957 x->props.reqid == reqid &&
958 !xfrm_addr_cmp(&x->id.daddr, &xnew->id.daddr, family) &&
959 !xfrm_addr_cmp(&x->props.saddr, &xnew->props.saddr, family))
960 x->genid = xfrm_state_genid;
961 }
962 }
963
964 void xfrm_state_insert(struct xfrm_state *x)
965 {
966 spin_lock_bh(&xfrm_state_lock);
967 __xfrm_state_bump_genids(x);
968 __xfrm_state_insert(x);
969 spin_unlock_bh(&xfrm_state_lock);
970 }
971 EXPORT_SYMBOL(xfrm_state_insert);
972
973 /* xfrm_state_lock is held */
974 static struct xfrm_state *__find_acq_core(unsigned short family, u8 mode, u32 reqid, u8 proto, xfrm_address_t *daddr, xfrm_address_t *saddr, int create)
975 {
976 unsigned int h = xfrm_dst_hash(daddr, saddr, reqid, family);
977 struct hlist_node *entry;
978 struct xfrm_state *x;
979
980 hlist_for_each_entry(x, entry, init_net.xfrm.state_bydst+h, bydst) {
981 if (x->props.reqid != reqid ||
982 x->props.mode != mode ||
983 x->props.family != family ||
984 x->km.state != XFRM_STATE_ACQ ||
985 x->id.spi != 0 ||
986 x->id.proto != proto)
987 continue;
988
989 switch (family) {
990 case AF_INET:
991 if (x->id.daddr.a4 != daddr->a4 ||
992 x->props.saddr.a4 != saddr->a4)
993 continue;
994 break;
995 case AF_INET6:
996 if (!ipv6_addr_equal((struct in6_addr *)x->id.daddr.a6,
997 (struct in6_addr *)daddr) ||
998 !ipv6_addr_equal((struct in6_addr *)
999 x->props.saddr.a6,
1000 (struct in6_addr *)saddr))
1001 continue;
1002 break;
1003 }
1004
1005 xfrm_state_hold(x);
1006 return x;
1007 }
1008
1009 if (!create)
1010 return NULL;
1011
1012 x = xfrm_state_alloc(&init_net);
1013 if (likely(x)) {
1014 switch (family) {
1015 case AF_INET:
1016 x->sel.daddr.a4 = daddr->a4;
1017 x->sel.saddr.a4 = saddr->a4;
1018 x->sel.prefixlen_d = 32;
1019 x->sel.prefixlen_s = 32;
1020 x->props.saddr.a4 = saddr->a4;
1021 x->id.daddr.a4 = daddr->a4;
1022 break;
1023
1024 case AF_INET6:
1025 ipv6_addr_copy((struct in6_addr *)x->sel.daddr.a6,
1026 (struct in6_addr *)daddr);
1027 ipv6_addr_copy((struct in6_addr *)x->sel.saddr.a6,
1028 (struct in6_addr *)saddr);
1029 x->sel.prefixlen_d = 128;
1030 x->sel.prefixlen_s = 128;
1031 ipv6_addr_copy((struct in6_addr *)x->props.saddr.a6,
1032 (struct in6_addr *)saddr);
1033 ipv6_addr_copy((struct in6_addr *)x->id.daddr.a6,
1034 (struct in6_addr *)daddr);
1035 break;
1036 }
1037
1038 x->km.state = XFRM_STATE_ACQ;
1039 x->id.proto = proto;
1040 x->props.family = family;
1041 x->props.mode = mode;
1042 x->props.reqid = reqid;
1043 x->lft.hard_add_expires_seconds = sysctl_xfrm_acq_expires;
1044 xfrm_state_hold(x);
1045 x->timer.expires = jiffies + sysctl_xfrm_acq_expires*HZ;
1046 add_timer(&x->timer);
1047 list_add(&x->km.all, &init_net.xfrm.state_all);
1048 hlist_add_head(&x->bydst, init_net.xfrm.state_bydst+h);
1049 h = xfrm_src_hash(daddr, saddr, family);
1050 hlist_add_head(&x->bysrc, init_net.xfrm.state_bysrc+h);
1051
1052 xfrm_state_num++;
1053
1054 xfrm_hash_grow_check(x->bydst.next != NULL);
1055 }
1056
1057 return x;
1058 }
1059
1060 static struct xfrm_state *__xfrm_find_acq_byseq(u32 seq);
1061
1062 int xfrm_state_add(struct xfrm_state *x)
1063 {
1064 struct xfrm_state *x1, *to_put;
1065 int family;
1066 int err;
1067 int use_spi = xfrm_id_proto_match(x->id.proto, IPSEC_PROTO_ANY);
1068
1069 family = x->props.family;
1070
1071 to_put = NULL;
1072
1073 spin_lock_bh(&xfrm_state_lock);
1074
1075 x1 = __xfrm_state_locate(x, use_spi, family);
1076 if (x1) {
1077 to_put = x1;
1078 x1 = NULL;
1079 err = -EEXIST;
1080 goto out;
1081 }
1082
1083 if (use_spi && x->km.seq) {
1084 x1 = __xfrm_find_acq_byseq(x->km.seq);
1085 if (x1 && ((x1->id.proto != x->id.proto) ||
1086 xfrm_addr_cmp(&x1->id.daddr, &x->id.daddr, family))) {
1087 to_put = x1;
1088 x1 = NULL;
1089 }
1090 }
1091
1092 if (use_spi && !x1)
1093 x1 = __find_acq_core(family, x->props.mode, x->props.reqid,
1094 x->id.proto,
1095 &x->id.daddr, &x->props.saddr, 0);
1096
1097 __xfrm_state_bump_genids(x);
1098 __xfrm_state_insert(x);
1099 err = 0;
1100
1101 out:
1102 spin_unlock_bh(&xfrm_state_lock);
1103
1104 if (x1) {
1105 xfrm_state_delete(x1);
1106 xfrm_state_put(x1);
1107 }
1108
1109 if (to_put)
1110 xfrm_state_put(to_put);
1111
1112 return err;
1113 }
1114 EXPORT_SYMBOL(xfrm_state_add);
1115
1116 #ifdef CONFIG_XFRM_MIGRATE
1117 static struct xfrm_state *xfrm_state_clone(struct xfrm_state *orig, int *errp)
1118 {
1119 int err = -ENOMEM;
1120 struct xfrm_state *x = xfrm_state_alloc(&init_net);
1121 if (!x)
1122 goto error;
1123
1124 memcpy(&x->id, &orig->id, sizeof(x->id));
1125 memcpy(&x->sel, &orig->sel, sizeof(x->sel));
1126 memcpy(&x->lft, &orig->lft, sizeof(x->lft));
1127 x->props.mode = orig->props.mode;
1128 x->props.replay_window = orig->props.replay_window;
1129 x->props.reqid = orig->props.reqid;
1130 x->props.family = orig->props.family;
1131 x->props.saddr = orig->props.saddr;
1132
1133 if (orig->aalg) {
1134 x->aalg = xfrm_algo_clone(orig->aalg);
1135 if (!x->aalg)
1136 goto error;
1137 }
1138 x->props.aalgo = orig->props.aalgo;
1139
1140 if (orig->ealg) {
1141 x->ealg = xfrm_algo_clone(orig->ealg);
1142 if (!x->ealg)
1143 goto error;
1144 }
1145 x->props.ealgo = orig->props.ealgo;
1146
1147 if (orig->calg) {
1148 x->calg = xfrm_algo_clone(orig->calg);
1149 if (!x->calg)
1150 goto error;
1151 }
1152 x->props.calgo = orig->props.calgo;
1153
1154 if (orig->encap) {
1155 x->encap = kmemdup(orig->encap, sizeof(*x->encap), GFP_KERNEL);
1156 if (!x->encap)
1157 goto error;
1158 }
1159
1160 if (orig->coaddr) {
1161 x->coaddr = kmemdup(orig->coaddr, sizeof(*x->coaddr),
1162 GFP_KERNEL);
1163 if (!x->coaddr)
1164 goto error;
1165 }
1166
1167 err = xfrm_init_state(x);
1168 if (err)
1169 goto error;
1170
1171 x->props.flags = orig->props.flags;
1172
1173 x->curlft.add_time = orig->curlft.add_time;
1174 x->km.state = orig->km.state;
1175 x->km.seq = orig->km.seq;
1176
1177 return x;
1178
1179 error:
1180 if (errp)
1181 *errp = err;
1182 if (x) {
1183 kfree(x->aalg);
1184 kfree(x->ealg);
1185 kfree(x->calg);
1186 kfree(x->encap);
1187 kfree(x->coaddr);
1188 }
1189 kfree(x);
1190 return NULL;
1191 }
1192
1193 /* xfrm_state_lock is held */
1194 struct xfrm_state * xfrm_migrate_state_find(struct xfrm_migrate *m)
1195 {
1196 unsigned int h;
1197 struct xfrm_state *x;
1198 struct hlist_node *entry;
1199
1200 if (m->reqid) {
1201 h = xfrm_dst_hash(&m->old_daddr, &m->old_saddr,
1202 m->reqid, m->old_family);
1203 hlist_for_each_entry(x, entry, init_net.xfrm.state_bydst+h, bydst) {
1204 if (x->props.mode != m->mode ||
1205 x->id.proto != m->proto)
1206 continue;
1207 if (m->reqid && x->props.reqid != m->reqid)
1208 continue;
1209 if (xfrm_addr_cmp(&x->id.daddr, &m->old_daddr,
1210 m->old_family) ||
1211 xfrm_addr_cmp(&x->props.saddr, &m->old_saddr,
1212 m->old_family))
1213 continue;
1214 xfrm_state_hold(x);
1215 return x;
1216 }
1217 } else {
1218 h = xfrm_src_hash(&m->old_daddr, &m->old_saddr,
1219 m->old_family);
1220 hlist_for_each_entry(x, entry, init_net.xfrm.state_bysrc+h, bysrc) {
1221 if (x->props.mode != m->mode ||
1222 x->id.proto != m->proto)
1223 continue;
1224 if (xfrm_addr_cmp(&x->id.daddr, &m->old_daddr,
1225 m->old_family) ||
1226 xfrm_addr_cmp(&x->props.saddr, &m->old_saddr,
1227 m->old_family))
1228 continue;
1229 xfrm_state_hold(x);
1230 return x;
1231 }
1232 }
1233
1234 return NULL;
1235 }
1236 EXPORT_SYMBOL(xfrm_migrate_state_find);
1237
1238 struct xfrm_state * xfrm_state_migrate(struct xfrm_state *x,
1239 struct xfrm_migrate *m)
1240 {
1241 struct xfrm_state *xc;
1242 int err;
1243
1244 xc = xfrm_state_clone(x, &err);
1245 if (!xc)
1246 return NULL;
1247
1248 memcpy(&xc->id.daddr, &m->new_daddr, sizeof(xc->id.daddr));
1249 memcpy(&xc->props.saddr, &m->new_saddr, sizeof(xc->props.saddr));
1250
1251 /* add state */
1252 if (!xfrm_addr_cmp(&x->id.daddr, &m->new_daddr, m->new_family)) {
1253 /* a care is needed when the destination address of the
1254 state is to be updated as it is a part of triplet */
1255 xfrm_state_insert(xc);
1256 } else {
1257 if ((err = xfrm_state_add(xc)) < 0)
1258 goto error;
1259 }
1260
1261 return xc;
1262 error:
1263 kfree(xc);
1264 return NULL;
1265 }
1266 EXPORT_SYMBOL(xfrm_state_migrate);
1267 #endif
1268
1269 int xfrm_state_update(struct xfrm_state *x)
1270 {
1271 struct xfrm_state *x1, *to_put;
1272 int err;
1273 int use_spi = xfrm_id_proto_match(x->id.proto, IPSEC_PROTO_ANY);
1274
1275 to_put = NULL;
1276
1277 spin_lock_bh(&xfrm_state_lock);
1278 x1 = __xfrm_state_locate(x, use_spi, x->props.family);
1279
1280 err = -ESRCH;
1281 if (!x1)
1282 goto out;
1283
1284 if (xfrm_state_kern(x1)) {
1285 to_put = x1;
1286 err = -EEXIST;
1287 goto out;
1288 }
1289
1290 if (x1->km.state == XFRM_STATE_ACQ) {
1291 __xfrm_state_insert(x);
1292 x = NULL;
1293 }
1294 err = 0;
1295
1296 out:
1297 spin_unlock_bh(&xfrm_state_lock);
1298
1299 if (to_put)
1300 xfrm_state_put(to_put);
1301
1302 if (err)
1303 return err;
1304
1305 if (!x) {
1306 xfrm_state_delete(x1);
1307 xfrm_state_put(x1);
1308 return 0;
1309 }
1310
1311 err = -EINVAL;
1312 spin_lock_bh(&x1->lock);
1313 if (likely(x1->km.state == XFRM_STATE_VALID)) {
1314 if (x->encap && x1->encap)
1315 memcpy(x1->encap, x->encap, sizeof(*x1->encap));
1316 if (x->coaddr && x1->coaddr) {
1317 memcpy(x1->coaddr, x->coaddr, sizeof(*x1->coaddr));
1318 }
1319 if (!use_spi && memcmp(&x1->sel, &x->sel, sizeof(x1->sel)))
1320 memcpy(&x1->sel, &x->sel, sizeof(x1->sel));
1321 memcpy(&x1->lft, &x->lft, sizeof(x1->lft));
1322 x1->km.dying = 0;
1323
1324 mod_timer(&x1->timer, jiffies + HZ);
1325 if (x1->curlft.use_time)
1326 xfrm_state_check_expire(x1);
1327
1328 err = 0;
1329 }
1330 spin_unlock_bh(&x1->lock);
1331
1332 xfrm_state_put(x1);
1333
1334 return err;
1335 }
1336 EXPORT_SYMBOL(xfrm_state_update);
1337
1338 int xfrm_state_check_expire(struct xfrm_state *x)
1339 {
1340 if (!x->curlft.use_time)
1341 x->curlft.use_time = get_seconds();
1342
1343 if (x->km.state != XFRM_STATE_VALID)
1344 return -EINVAL;
1345
1346 if (x->curlft.bytes >= x->lft.hard_byte_limit ||
1347 x->curlft.packets >= x->lft.hard_packet_limit) {
1348 x->km.state = XFRM_STATE_EXPIRED;
1349 mod_timer(&x->timer, jiffies);
1350 return -EINVAL;
1351 }
1352
1353 if (!x->km.dying &&
1354 (x->curlft.bytes >= x->lft.soft_byte_limit ||
1355 x->curlft.packets >= x->lft.soft_packet_limit)) {
1356 x->km.dying = 1;
1357 km_state_expired(x, 0, 0);
1358 }
1359 return 0;
1360 }
1361 EXPORT_SYMBOL(xfrm_state_check_expire);
1362
1363 struct xfrm_state *
1364 xfrm_state_lookup(xfrm_address_t *daddr, __be32 spi, u8 proto,
1365 unsigned short family)
1366 {
1367 struct xfrm_state *x;
1368
1369 spin_lock_bh(&xfrm_state_lock);
1370 x = __xfrm_state_lookup(daddr, spi, proto, family);
1371 spin_unlock_bh(&xfrm_state_lock);
1372 return x;
1373 }
1374 EXPORT_SYMBOL(xfrm_state_lookup);
1375
1376 struct xfrm_state *
1377 xfrm_state_lookup_byaddr(xfrm_address_t *daddr, xfrm_address_t *saddr,
1378 u8 proto, unsigned short family)
1379 {
1380 struct xfrm_state *x;
1381
1382 spin_lock_bh(&xfrm_state_lock);
1383 x = __xfrm_state_lookup_byaddr(daddr, saddr, proto, family);
1384 spin_unlock_bh(&xfrm_state_lock);
1385 return x;
1386 }
1387 EXPORT_SYMBOL(xfrm_state_lookup_byaddr);
1388
1389 struct xfrm_state *
1390 xfrm_find_acq(u8 mode, u32 reqid, u8 proto,
1391 xfrm_address_t *daddr, xfrm_address_t *saddr,
1392 int create, unsigned short family)
1393 {
1394 struct xfrm_state *x;
1395
1396 spin_lock_bh(&xfrm_state_lock);
1397 x = __find_acq_core(family, mode, reqid, proto, daddr, saddr, create);
1398 spin_unlock_bh(&xfrm_state_lock);
1399
1400 return x;
1401 }
1402 EXPORT_SYMBOL(xfrm_find_acq);
1403
1404 #ifdef CONFIG_XFRM_SUB_POLICY
1405 int
1406 xfrm_tmpl_sort(struct xfrm_tmpl **dst, struct xfrm_tmpl **src, int n,
1407 unsigned short family)
1408 {
1409 int err = 0;
1410 struct xfrm_state_afinfo *afinfo = xfrm_state_get_afinfo(family);
1411 if (!afinfo)
1412 return -EAFNOSUPPORT;
1413
1414 spin_lock_bh(&xfrm_state_lock);
1415 if (afinfo->tmpl_sort)
1416 err = afinfo->tmpl_sort(dst, src, n);
1417 spin_unlock_bh(&xfrm_state_lock);
1418 xfrm_state_put_afinfo(afinfo);
1419 return err;
1420 }
1421 EXPORT_SYMBOL(xfrm_tmpl_sort);
1422
1423 int
1424 xfrm_state_sort(struct xfrm_state **dst, struct xfrm_state **src, int n,
1425 unsigned short family)
1426 {
1427 int err = 0;
1428 struct xfrm_state_afinfo *afinfo = xfrm_state_get_afinfo(family);
1429 if (!afinfo)
1430 return -EAFNOSUPPORT;
1431
1432 spin_lock_bh(&xfrm_state_lock);
1433 if (afinfo->state_sort)
1434 err = afinfo->state_sort(dst, src, n);
1435 spin_unlock_bh(&xfrm_state_lock);
1436 xfrm_state_put_afinfo(afinfo);
1437 return err;
1438 }
1439 EXPORT_SYMBOL(xfrm_state_sort);
1440 #endif
1441
1442 /* Silly enough, but I'm lazy to build resolution list */
1443
1444 static struct xfrm_state *__xfrm_find_acq_byseq(u32 seq)
1445 {
1446 int i;
1447
1448 for (i = 0; i <= xfrm_state_hmask; i++) {
1449 struct hlist_node *entry;
1450 struct xfrm_state *x;
1451
1452 hlist_for_each_entry(x, entry, init_net.xfrm.state_bydst+i, bydst) {
1453 if (x->km.seq == seq &&
1454 x->km.state == XFRM_STATE_ACQ) {
1455 xfrm_state_hold(x);
1456 return x;
1457 }
1458 }
1459 }
1460 return NULL;
1461 }
1462
1463 struct xfrm_state *xfrm_find_acq_byseq(u32 seq)
1464 {
1465 struct xfrm_state *x;
1466
1467 spin_lock_bh(&xfrm_state_lock);
1468 x = __xfrm_find_acq_byseq(seq);
1469 spin_unlock_bh(&xfrm_state_lock);
1470 return x;
1471 }
1472 EXPORT_SYMBOL(xfrm_find_acq_byseq);
1473
1474 u32 xfrm_get_acqseq(void)
1475 {
1476 u32 res;
1477 static u32 acqseq;
1478 static DEFINE_SPINLOCK(acqseq_lock);
1479
1480 spin_lock_bh(&acqseq_lock);
1481 res = (++acqseq ? : ++acqseq);
1482 spin_unlock_bh(&acqseq_lock);
1483 return res;
1484 }
1485 EXPORT_SYMBOL(xfrm_get_acqseq);
1486
1487 int xfrm_alloc_spi(struct xfrm_state *x, u32 low, u32 high)
1488 {
1489 unsigned int h;
1490 struct xfrm_state *x0;
1491 int err = -ENOENT;
1492 __be32 minspi = htonl(low);
1493 __be32 maxspi = htonl(high);
1494
1495 spin_lock_bh(&x->lock);
1496 if (x->km.state == XFRM_STATE_DEAD)
1497 goto unlock;
1498
1499 err = 0;
1500 if (x->id.spi)
1501 goto unlock;
1502
1503 err = -ENOENT;
1504
1505 if (minspi == maxspi) {
1506 x0 = xfrm_state_lookup(&x->id.daddr, minspi, x->id.proto, x->props.family);
1507 if (x0) {
1508 xfrm_state_put(x0);
1509 goto unlock;
1510 }
1511 x->id.spi = minspi;
1512 } else {
1513 u32 spi = 0;
1514 for (h=0; h<high-low+1; h++) {
1515 spi = low + net_random()%(high-low+1);
1516 x0 = xfrm_state_lookup(&x->id.daddr, htonl(spi), x->id.proto, x->props.family);
1517 if (x0 == NULL) {
1518 x->id.spi = htonl(spi);
1519 break;
1520 }
1521 xfrm_state_put(x0);
1522 }
1523 }
1524 if (x->id.spi) {
1525 spin_lock_bh(&xfrm_state_lock);
1526 h = xfrm_spi_hash(&x->id.daddr, x->id.spi, x->id.proto, x->props.family);
1527 hlist_add_head(&x->byspi, xfrm_state_byspi+h);
1528 spin_unlock_bh(&xfrm_state_lock);
1529
1530 err = 0;
1531 }
1532
1533 unlock:
1534 spin_unlock_bh(&x->lock);
1535
1536 return err;
1537 }
1538 EXPORT_SYMBOL(xfrm_alloc_spi);
1539
1540 int xfrm_state_walk(struct xfrm_state_walk *walk,
1541 int (*func)(struct xfrm_state *, int, void*),
1542 void *data)
1543 {
1544 struct xfrm_state *state;
1545 struct xfrm_state_walk *x;
1546 int err = 0;
1547
1548 if (walk->seq != 0 && list_empty(&walk->all))
1549 return 0;
1550
1551 spin_lock_bh(&xfrm_state_lock);
1552 if (list_empty(&walk->all))
1553 x = list_first_entry(&init_net.xfrm.state_all, struct xfrm_state_walk, all);
1554 else
1555 x = list_entry(&walk->all, struct xfrm_state_walk, all);
1556 list_for_each_entry_from(x, &init_net.xfrm.state_all, all) {
1557 if (x->state == XFRM_STATE_DEAD)
1558 continue;
1559 state = container_of(x, struct xfrm_state, km);
1560 if (!xfrm_id_proto_match(state->id.proto, walk->proto))
1561 continue;
1562 err = func(state, walk->seq, data);
1563 if (err) {
1564 list_move_tail(&walk->all, &x->all);
1565 goto out;
1566 }
1567 walk->seq++;
1568 }
1569 if (walk->seq == 0) {
1570 err = -ENOENT;
1571 goto out;
1572 }
1573 list_del_init(&walk->all);
1574 out:
1575 spin_unlock_bh(&xfrm_state_lock);
1576 return err;
1577 }
1578 EXPORT_SYMBOL(xfrm_state_walk);
1579
1580 void xfrm_state_walk_init(struct xfrm_state_walk *walk, u8 proto)
1581 {
1582 INIT_LIST_HEAD(&walk->all);
1583 walk->proto = proto;
1584 walk->state = XFRM_STATE_DEAD;
1585 walk->seq = 0;
1586 }
1587 EXPORT_SYMBOL(xfrm_state_walk_init);
1588
1589 void xfrm_state_walk_done(struct xfrm_state_walk *walk)
1590 {
1591 if (list_empty(&walk->all))
1592 return;
1593
1594 spin_lock_bh(&xfrm_state_lock);
1595 list_del(&walk->all);
1596 spin_lock_bh(&xfrm_state_lock);
1597 }
1598 EXPORT_SYMBOL(xfrm_state_walk_done);
1599
1600
1601 void xfrm_replay_notify(struct xfrm_state *x, int event)
1602 {
1603 struct km_event c;
1604 /* we send notify messages in case
1605 * 1. we updated on of the sequence numbers, and the seqno difference
1606 * is at least x->replay_maxdiff, in this case we also update the
1607 * timeout of our timer function
1608 * 2. if x->replay_maxage has elapsed since last update,
1609 * and there were changes
1610 *
1611 * The state structure must be locked!
1612 */
1613
1614 switch (event) {
1615 case XFRM_REPLAY_UPDATE:
1616 if (x->replay_maxdiff &&
1617 (x->replay.seq - x->preplay.seq < x->replay_maxdiff) &&
1618 (x->replay.oseq - x->preplay.oseq < x->replay_maxdiff)) {
1619 if (x->xflags & XFRM_TIME_DEFER)
1620 event = XFRM_REPLAY_TIMEOUT;
1621 else
1622 return;
1623 }
1624
1625 break;
1626
1627 case XFRM_REPLAY_TIMEOUT:
1628 if ((x->replay.seq == x->preplay.seq) &&
1629 (x->replay.bitmap == x->preplay.bitmap) &&
1630 (x->replay.oseq == x->preplay.oseq)) {
1631 x->xflags |= XFRM_TIME_DEFER;
1632 return;
1633 }
1634
1635 break;
1636 }
1637
1638 memcpy(&x->preplay, &x->replay, sizeof(struct xfrm_replay_state));
1639 c.event = XFRM_MSG_NEWAE;
1640 c.data.aevent = event;
1641 km_state_notify(x, &c);
1642
1643 if (x->replay_maxage &&
1644 !mod_timer(&x->rtimer, jiffies + x->replay_maxage))
1645 x->xflags &= ~XFRM_TIME_DEFER;
1646 }
1647
1648 static void xfrm_replay_timer_handler(unsigned long data)
1649 {
1650 struct xfrm_state *x = (struct xfrm_state*)data;
1651
1652 spin_lock(&x->lock);
1653
1654 if (x->km.state == XFRM_STATE_VALID) {
1655 if (xfrm_aevent_is_on())
1656 xfrm_replay_notify(x, XFRM_REPLAY_TIMEOUT);
1657 else
1658 x->xflags |= XFRM_TIME_DEFER;
1659 }
1660
1661 spin_unlock(&x->lock);
1662 }
1663
1664 int xfrm_replay_check(struct xfrm_state *x,
1665 struct sk_buff *skb, __be32 net_seq)
1666 {
1667 u32 diff;
1668 u32 seq = ntohl(net_seq);
1669
1670 if (unlikely(seq == 0))
1671 goto err;
1672
1673 if (likely(seq > x->replay.seq))
1674 return 0;
1675
1676 diff = x->replay.seq - seq;
1677 if (diff >= min_t(unsigned int, x->props.replay_window,
1678 sizeof(x->replay.bitmap) * 8)) {
1679 x->stats.replay_window++;
1680 goto err;
1681 }
1682
1683 if (x->replay.bitmap & (1U << diff)) {
1684 x->stats.replay++;
1685 goto err;
1686 }
1687 return 0;
1688
1689 err:
1690 xfrm_audit_state_replay(x, skb, net_seq);
1691 return -EINVAL;
1692 }
1693
1694 void xfrm_replay_advance(struct xfrm_state *x, __be32 net_seq)
1695 {
1696 u32 diff;
1697 u32 seq = ntohl(net_seq);
1698
1699 if (seq > x->replay.seq) {
1700 diff = seq - x->replay.seq;
1701 if (diff < x->props.replay_window)
1702 x->replay.bitmap = ((x->replay.bitmap) << diff) | 1;
1703 else
1704 x->replay.bitmap = 1;
1705 x->replay.seq = seq;
1706 } else {
1707 diff = x->replay.seq - seq;
1708 x->replay.bitmap |= (1U << diff);
1709 }
1710
1711 if (xfrm_aevent_is_on())
1712 xfrm_replay_notify(x, XFRM_REPLAY_UPDATE);
1713 }
1714
1715 static LIST_HEAD(xfrm_km_list);
1716 static DEFINE_RWLOCK(xfrm_km_lock);
1717
1718 void km_policy_notify(struct xfrm_policy *xp, int dir, struct km_event *c)
1719 {
1720 struct xfrm_mgr *km;
1721
1722 read_lock(&xfrm_km_lock);
1723 list_for_each_entry(km, &xfrm_km_list, list)
1724 if (km->notify_policy)
1725 km->notify_policy(xp, dir, c);
1726 read_unlock(&xfrm_km_lock);
1727 }
1728
1729 void km_state_notify(struct xfrm_state *x, struct km_event *c)
1730 {
1731 struct xfrm_mgr *km;
1732 read_lock(&xfrm_km_lock);
1733 list_for_each_entry(km, &xfrm_km_list, list)
1734 if (km->notify)
1735 km->notify(x, c);
1736 read_unlock(&xfrm_km_lock);
1737 }
1738
1739 EXPORT_SYMBOL(km_policy_notify);
1740 EXPORT_SYMBOL(km_state_notify);
1741
1742 void km_state_expired(struct xfrm_state *x, int hard, u32 pid)
1743 {
1744 struct km_event c;
1745
1746 c.data.hard = hard;
1747 c.pid = pid;
1748 c.event = XFRM_MSG_EXPIRE;
1749 km_state_notify(x, &c);
1750
1751 if (hard)
1752 wake_up(&km_waitq);
1753 }
1754
1755 EXPORT_SYMBOL(km_state_expired);
1756 /*
1757 * We send to all registered managers regardless of failure
1758 * We are happy with one success
1759 */
1760 int km_query(struct xfrm_state *x, struct xfrm_tmpl *t, struct xfrm_policy *pol)
1761 {
1762 int err = -EINVAL, acqret;
1763 struct xfrm_mgr *km;
1764
1765 read_lock(&xfrm_km_lock);
1766 list_for_each_entry(km, &xfrm_km_list, list) {
1767 acqret = km->acquire(x, t, pol, XFRM_POLICY_OUT);
1768 if (!acqret)
1769 err = acqret;
1770 }
1771 read_unlock(&xfrm_km_lock);
1772 return err;
1773 }
1774 EXPORT_SYMBOL(km_query);
1775
1776 int km_new_mapping(struct xfrm_state *x, xfrm_address_t *ipaddr, __be16 sport)
1777 {
1778 int err = -EINVAL;
1779 struct xfrm_mgr *km;
1780
1781 read_lock(&xfrm_km_lock);
1782 list_for_each_entry(km, &xfrm_km_list, list) {
1783 if (km->new_mapping)
1784 err = km->new_mapping(x, ipaddr, sport);
1785 if (!err)
1786 break;
1787 }
1788 read_unlock(&xfrm_km_lock);
1789 return err;
1790 }
1791 EXPORT_SYMBOL(km_new_mapping);
1792
1793 void km_policy_expired(struct xfrm_policy *pol, int dir, int hard, u32 pid)
1794 {
1795 struct km_event c;
1796
1797 c.data.hard = hard;
1798 c.pid = pid;
1799 c.event = XFRM_MSG_POLEXPIRE;
1800 km_policy_notify(pol, dir, &c);
1801
1802 if (hard)
1803 wake_up(&km_waitq);
1804 }
1805 EXPORT_SYMBOL(km_policy_expired);
1806
1807 #ifdef CONFIG_XFRM_MIGRATE
1808 int km_migrate(struct xfrm_selector *sel, u8 dir, u8 type,
1809 struct xfrm_migrate *m, int num_migrate,
1810 struct xfrm_kmaddress *k)
1811 {
1812 int err = -EINVAL;
1813 int ret;
1814 struct xfrm_mgr *km;
1815
1816 read_lock(&xfrm_km_lock);
1817 list_for_each_entry(km, &xfrm_km_list, list) {
1818 if (km->migrate) {
1819 ret = km->migrate(sel, dir, type, m, num_migrate, k);
1820 if (!ret)
1821 err = ret;
1822 }
1823 }
1824 read_unlock(&xfrm_km_lock);
1825 return err;
1826 }
1827 EXPORT_SYMBOL(km_migrate);
1828 #endif
1829
1830 int km_report(u8 proto, struct xfrm_selector *sel, xfrm_address_t *addr)
1831 {
1832 int err = -EINVAL;
1833 int ret;
1834 struct xfrm_mgr *km;
1835
1836 read_lock(&xfrm_km_lock);
1837 list_for_each_entry(km, &xfrm_km_list, list) {
1838 if (km->report) {
1839 ret = km->report(proto, sel, addr);
1840 if (!ret)
1841 err = ret;
1842 }
1843 }
1844 read_unlock(&xfrm_km_lock);
1845 return err;
1846 }
1847 EXPORT_SYMBOL(km_report);
1848
1849 int xfrm_user_policy(struct sock *sk, int optname, u8 __user *optval, int optlen)
1850 {
1851 int err;
1852 u8 *data;
1853 struct xfrm_mgr *km;
1854 struct xfrm_policy *pol = NULL;
1855
1856 if (optlen <= 0 || optlen > PAGE_SIZE)
1857 return -EMSGSIZE;
1858
1859 data = kmalloc(optlen, GFP_KERNEL);
1860 if (!data)
1861 return -ENOMEM;
1862
1863 err = -EFAULT;
1864 if (copy_from_user(data, optval, optlen))
1865 goto out;
1866
1867 err = -EINVAL;
1868 read_lock(&xfrm_km_lock);
1869 list_for_each_entry(km, &xfrm_km_list, list) {
1870 pol = km->compile_policy(sk, optname, data,
1871 optlen, &err);
1872 if (err >= 0)
1873 break;
1874 }
1875 read_unlock(&xfrm_km_lock);
1876
1877 if (err >= 0) {
1878 xfrm_sk_policy_insert(sk, err, pol);
1879 xfrm_pol_put(pol);
1880 err = 0;
1881 }
1882
1883 out:
1884 kfree(data);
1885 return err;
1886 }
1887 EXPORT_SYMBOL(xfrm_user_policy);
1888
1889 int xfrm_register_km(struct xfrm_mgr *km)
1890 {
1891 write_lock_bh(&xfrm_km_lock);
1892 list_add_tail(&km->list, &xfrm_km_list);
1893 write_unlock_bh(&xfrm_km_lock);
1894 return 0;
1895 }
1896 EXPORT_SYMBOL(xfrm_register_km);
1897
1898 int xfrm_unregister_km(struct xfrm_mgr *km)
1899 {
1900 write_lock_bh(&xfrm_km_lock);
1901 list_del(&km->list);
1902 write_unlock_bh(&xfrm_km_lock);
1903 return 0;
1904 }
1905 EXPORT_SYMBOL(xfrm_unregister_km);
1906
1907 int xfrm_state_register_afinfo(struct xfrm_state_afinfo *afinfo)
1908 {
1909 int err = 0;
1910 if (unlikely(afinfo == NULL))
1911 return -EINVAL;
1912 if (unlikely(afinfo->family >= NPROTO))
1913 return -EAFNOSUPPORT;
1914 write_lock_bh(&xfrm_state_afinfo_lock);
1915 if (unlikely(xfrm_state_afinfo[afinfo->family] != NULL))
1916 err = -ENOBUFS;
1917 else
1918 xfrm_state_afinfo[afinfo->family] = afinfo;
1919 write_unlock_bh(&xfrm_state_afinfo_lock);
1920 return err;
1921 }
1922 EXPORT_SYMBOL(xfrm_state_register_afinfo);
1923
1924 int xfrm_state_unregister_afinfo(struct xfrm_state_afinfo *afinfo)
1925 {
1926 int err = 0;
1927 if (unlikely(afinfo == NULL))
1928 return -EINVAL;
1929 if (unlikely(afinfo->family >= NPROTO))
1930 return -EAFNOSUPPORT;
1931 write_lock_bh(&xfrm_state_afinfo_lock);
1932 if (likely(xfrm_state_afinfo[afinfo->family] != NULL)) {
1933 if (unlikely(xfrm_state_afinfo[afinfo->family] != afinfo))
1934 err = -EINVAL;
1935 else
1936 xfrm_state_afinfo[afinfo->family] = NULL;
1937 }
1938 write_unlock_bh(&xfrm_state_afinfo_lock);
1939 return err;
1940 }
1941 EXPORT_SYMBOL(xfrm_state_unregister_afinfo);
1942
1943 static struct xfrm_state_afinfo *xfrm_state_get_afinfo(unsigned int family)
1944 {
1945 struct xfrm_state_afinfo *afinfo;
1946 if (unlikely(family >= NPROTO))
1947 return NULL;
1948 read_lock(&xfrm_state_afinfo_lock);
1949 afinfo = xfrm_state_afinfo[family];
1950 if (unlikely(!afinfo))
1951 read_unlock(&xfrm_state_afinfo_lock);
1952 return afinfo;
1953 }
1954
1955 static void xfrm_state_put_afinfo(struct xfrm_state_afinfo *afinfo)
1956 __releases(xfrm_state_afinfo_lock)
1957 {
1958 read_unlock(&xfrm_state_afinfo_lock);
1959 }
1960
1961 /* Temporarily located here until net/xfrm/xfrm_tunnel.c is created */
1962 void xfrm_state_delete_tunnel(struct xfrm_state *x)
1963 {
1964 if (x->tunnel) {
1965 struct xfrm_state *t = x->tunnel;
1966
1967 if (atomic_read(&t->tunnel_users) == 2)
1968 xfrm_state_delete(t);
1969 atomic_dec(&t->tunnel_users);
1970 xfrm_state_put(t);
1971 x->tunnel = NULL;
1972 }
1973 }
1974 EXPORT_SYMBOL(xfrm_state_delete_tunnel);
1975
1976 int xfrm_state_mtu(struct xfrm_state *x, int mtu)
1977 {
1978 int res;
1979
1980 spin_lock_bh(&x->lock);
1981 if (x->km.state == XFRM_STATE_VALID &&
1982 x->type && x->type->get_mtu)
1983 res = x->type->get_mtu(x, mtu);
1984 else
1985 res = mtu - x->props.header_len;
1986 spin_unlock_bh(&x->lock);
1987 return res;
1988 }
1989
1990 int xfrm_init_state(struct xfrm_state *x)
1991 {
1992 struct xfrm_state_afinfo *afinfo;
1993 struct xfrm_mode *inner_mode;
1994 int family = x->props.family;
1995 int err;
1996
1997 err = -EAFNOSUPPORT;
1998 afinfo = xfrm_state_get_afinfo(family);
1999 if (!afinfo)
2000 goto error;
2001
2002 err = 0;
2003 if (afinfo->init_flags)
2004 err = afinfo->init_flags(x);
2005
2006 xfrm_state_put_afinfo(afinfo);
2007
2008 if (err)
2009 goto error;
2010
2011 err = -EPROTONOSUPPORT;
2012
2013 if (x->sel.family != AF_UNSPEC) {
2014 inner_mode = xfrm_get_mode(x->props.mode, x->sel.family);
2015 if (inner_mode == NULL)
2016 goto error;
2017
2018 if (!(inner_mode->flags & XFRM_MODE_FLAG_TUNNEL) &&
2019 family != x->sel.family) {
2020 xfrm_put_mode(inner_mode);
2021 goto error;
2022 }
2023
2024 x->inner_mode = inner_mode;
2025 } else {
2026 struct xfrm_mode *inner_mode_iaf;
2027
2028 inner_mode = xfrm_get_mode(x->props.mode, AF_INET);
2029 if (inner_mode == NULL)
2030 goto error;
2031
2032 if (!(inner_mode->flags & XFRM_MODE_FLAG_TUNNEL)) {
2033 xfrm_put_mode(inner_mode);
2034 goto error;
2035 }
2036
2037 inner_mode_iaf = xfrm_get_mode(x->props.mode, AF_INET6);
2038 if (inner_mode_iaf == NULL)
2039 goto error;
2040
2041 if (!(inner_mode_iaf->flags & XFRM_MODE_FLAG_TUNNEL)) {
2042 xfrm_put_mode(inner_mode_iaf);
2043 goto error;
2044 }
2045
2046 if (x->props.family == AF_INET) {
2047 x->inner_mode = inner_mode;
2048 x->inner_mode_iaf = inner_mode_iaf;
2049 } else {
2050 x->inner_mode = inner_mode_iaf;
2051 x->inner_mode_iaf = inner_mode;
2052 }
2053 }
2054
2055 x->type = xfrm_get_type(x->id.proto, family);
2056 if (x->type == NULL)
2057 goto error;
2058
2059 err = x->type->init_state(x);
2060 if (err)
2061 goto error;
2062
2063 x->outer_mode = xfrm_get_mode(x->props.mode, family);
2064 if (x->outer_mode == NULL)
2065 goto error;
2066
2067 x->km.state = XFRM_STATE_VALID;
2068
2069 error:
2070 return err;
2071 }
2072
2073 EXPORT_SYMBOL(xfrm_init_state);
2074
2075 int __net_init xfrm_state_init(struct net *net)
2076 {
2077 unsigned int sz;
2078
2079 INIT_LIST_HEAD(&net->xfrm.state_all);
2080
2081 sz = sizeof(struct hlist_head) * 8;
2082
2083 net->xfrm.state_bydst = xfrm_hash_alloc(sz);
2084 if (!net->xfrm.state_bydst)
2085 goto out_bydst;
2086 net->xfrm.state_bysrc = xfrm_hash_alloc(sz);
2087 if (!net->xfrm.state_bysrc)
2088 goto out_bysrc;
2089 xfrm_state_byspi = xfrm_hash_alloc(sz);
2090 xfrm_state_hmask = ((sz / sizeof(struct hlist_head)) - 1);
2091
2092 INIT_WORK(&xfrm_state_gc_work, xfrm_state_gc_task);
2093 return 0;
2094
2095 out_bysrc:
2096 xfrm_hash_free(net->xfrm.state_bydst, sz);
2097 out_bydst:
2098 return -ENOMEM;
2099 }
2100
2101 void xfrm_state_fini(struct net *net)
2102 {
2103 unsigned int sz;
2104
2105 WARN_ON(!list_empty(&net->xfrm.state_all));
2106
2107 sz = (xfrm_state_hmask + 1) * sizeof(struct hlist_head);
2108 WARN_ON(!hlist_empty(net->xfrm.state_bysrc));
2109 xfrm_hash_free(net->xfrm.state_bysrc, sz);
2110 WARN_ON(!hlist_empty(net->xfrm.state_bydst));
2111 xfrm_hash_free(net->xfrm.state_bydst, sz);
2112 }
2113
2114 #ifdef CONFIG_AUDITSYSCALL
2115 static void xfrm_audit_helper_sainfo(struct xfrm_state *x,
2116 struct audit_buffer *audit_buf)
2117 {
2118 struct xfrm_sec_ctx *ctx = x->security;
2119 u32 spi = ntohl(x->id.spi);
2120
2121 if (ctx)
2122 audit_log_format(audit_buf, " sec_alg=%u sec_doi=%u sec_obj=%s",
2123 ctx->ctx_alg, ctx->ctx_doi, ctx->ctx_str);
2124
2125 switch(x->props.family) {
2126 case AF_INET:
2127 audit_log_format(audit_buf, " src=%pI4 dst=%pI4",
2128 &x->props.saddr.a4, &x->id.daddr.a4);
2129 break;
2130 case AF_INET6:
2131 audit_log_format(audit_buf, " src=%pI6 dst=%pI6",
2132 x->props.saddr.a6, x->id.daddr.a6);
2133 break;
2134 }
2135
2136 audit_log_format(audit_buf, " spi=%u(0x%x)", spi, spi);
2137 }
2138
2139 static void xfrm_audit_helper_pktinfo(struct sk_buff *skb, u16 family,
2140 struct audit_buffer *audit_buf)
2141 {
2142 struct iphdr *iph4;
2143 struct ipv6hdr *iph6;
2144
2145 switch (family) {
2146 case AF_INET:
2147 iph4 = ip_hdr(skb);
2148 audit_log_format(audit_buf, " src=%pI4 dst=%pI4",
2149 &iph4->saddr, &iph4->daddr);
2150 break;
2151 case AF_INET6:
2152 iph6 = ipv6_hdr(skb);
2153 audit_log_format(audit_buf,
2154 " src=%pI6 dst=%pI6 flowlbl=0x%x%02x%02x",
2155 &iph6->saddr,&iph6->daddr,
2156 iph6->flow_lbl[0] & 0x0f,
2157 iph6->flow_lbl[1],
2158 iph6->flow_lbl[2]);
2159 break;
2160 }
2161 }
2162
2163 void xfrm_audit_state_add(struct xfrm_state *x, int result,
2164 uid_t auid, u32 sessionid, u32 secid)
2165 {
2166 struct audit_buffer *audit_buf;
2167
2168 audit_buf = xfrm_audit_start("SAD-add");
2169 if (audit_buf == NULL)
2170 return;
2171 xfrm_audit_helper_usrinfo(auid, sessionid, secid, audit_buf);
2172 xfrm_audit_helper_sainfo(x, audit_buf);
2173 audit_log_format(audit_buf, " res=%u", result);
2174 audit_log_end(audit_buf);
2175 }
2176 EXPORT_SYMBOL_GPL(xfrm_audit_state_add);
2177
2178 void xfrm_audit_state_delete(struct xfrm_state *x, int result,
2179 uid_t auid, u32 sessionid, u32 secid)
2180 {
2181 struct audit_buffer *audit_buf;
2182
2183 audit_buf = xfrm_audit_start("SAD-delete");
2184 if (audit_buf == NULL)
2185 return;
2186 xfrm_audit_helper_usrinfo(auid, sessionid, secid, audit_buf);
2187 xfrm_audit_helper_sainfo(x, audit_buf);
2188 audit_log_format(audit_buf, " res=%u", result);
2189 audit_log_end(audit_buf);
2190 }
2191 EXPORT_SYMBOL_GPL(xfrm_audit_state_delete);
2192
2193 void xfrm_audit_state_replay_overflow(struct xfrm_state *x,
2194 struct sk_buff *skb)
2195 {
2196 struct audit_buffer *audit_buf;
2197 u32 spi;
2198
2199 audit_buf = xfrm_audit_start("SA-replay-overflow");
2200 if (audit_buf == NULL)
2201 return;
2202 xfrm_audit_helper_pktinfo(skb, x->props.family, audit_buf);
2203 /* don't record the sequence number because it's inherent in this kind
2204 * of audit message */
2205 spi = ntohl(x->id.spi);
2206 audit_log_format(audit_buf, " spi=%u(0x%x)", spi, spi);
2207 audit_log_end(audit_buf);
2208 }
2209 EXPORT_SYMBOL_GPL(xfrm_audit_state_replay_overflow);
2210
2211 static void xfrm_audit_state_replay(struct xfrm_state *x,
2212 struct sk_buff *skb, __be32 net_seq)
2213 {
2214 struct audit_buffer *audit_buf;
2215 u32 spi;
2216
2217 audit_buf = xfrm_audit_start("SA-replayed-pkt");
2218 if (audit_buf == NULL)
2219 return;
2220 xfrm_audit_helper_pktinfo(skb, x->props.family, audit_buf);
2221 spi = ntohl(x->id.spi);
2222 audit_log_format(audit_buf, " spi=%u(0x%x) seqno=%u",
2223 spi, spi, ntohl(net_seq));
2224 audit_log_end(audit_buf);
2225 }
2226
2227 void xfrm_audit_state_notfound_simple(struct sk_buff *skb, u16 family)
2228 {
2229 struct audit_buffer *audit_buf;
2230
2231 audit_buf = xfrm_audit_start("SA-notfound");
2232 if (audit_buf == NULL)
2233 return;
2234 xfrm_audit_helper_pktinfo(skb, family, audit_buf);
2235 audit_log_end(audit_buf);
2236 }
2237 EXPORT_SYMBOL_GPL(xfrm_audit_state_notfound_simple);
2238
2239 void xfrm_audit_state_notfound(struct sk_buff *skb, u16 family,
2240 __be32 net_spi, __be32 net_seq)
2241 {
2242 struct audit_buffer *audit_buf;
2243 u32 spi;
2244
2245 audit_buf = xfrm_audit_start("SA-notfound");
2246 if (audit_buf == NULL)
2247 return;
2248 xfrm_audit_helper_pktinfo(skb, family, audit_buf);
2249 spi = ntohl(net_spi);
2250 audit_log_format(audit_buf, " spi=%u(0x%x) seqno=%u",
2251 spi, spi, ntohl(net_seq));
2252 audit_log_end(audit_buf);
2253 }
2254 EXPORT_SYMBOL_GPL(xfrm_audit_state_notfound);
2255
2256 void xfrm_audit_state_icvfail(struct xfrm_state *x,
2257 struct sk_buff *skb, u8 proto)
2258 {
2259 struct audit_buffer *audit_buf;
2260 __be32 net_spi;
2261 __be32 net_seq;
2262
2263 audit_buf = xfrm_audit_start("SA-icv-failure");
2264 if (audit_buf == NULL)
2265 return;
2266 xfrm_audit_helper_pktinfo(skb, x->props.family, audit_buf);
2267 if (xfrm_parse_spi(skb, proto, &net_spi, &net_seq) == 0) {
2268 u32 spi = ntohl(net_spi);
2269 audit_log_format(audit_buf, " spi=%u(0x%x) seqno=%u",
2270 spi, spi, ntohl(net_seq));
2271 }
2272 audit_log_end(audit_buf);
2273 }
2274 EXPORT_SYMBOL_GPL(xfrm_audit_state_icvfail);
2275 #endif /* CONFIG_AUDITSYSCALL */
This page took 0.075693 seconds and 4 git commands to generate.