abbe2702c4007e42e5ad7b53986adb5776328867
[deliverable/linux.git] / net / xfrm / xfrm_state.c
1 /*
2 * xfrm_state.c
3 *
4 * Changes:
5 * Mitsuru KANDA @USAGI
6 * Kazunori MIYAZAWA @USAGI
7 * Kunihiro Ishiguro <kunihiro@ipinfusion.com>
8 * IPv6 support
9 * YOSHIFUJI Hideaki @USAGI
10 * Split up af-specific functions
11 * Derek Atkins <derek@ihtfp.com>
12 * Add UDP Encapsulation
13 *
14 */
15
16 #include <linux/workqueue.h>
17 #include <net/xfrm.h>
18 #include <linux/pfkeyv2.h>
19 #include <linux/ipsec.h>
20 #include <linux/module.h>
21 #include <linux/cache.h>
22 #include <linux/audit.h>
23 #include <asm/uaccess.h>
24
25 #include "xfrm_hash.h"
26
27 struct sock *xfrm_nl;
28 EXPORT_SYMBOL(xfrm_nl);
29
30 u32 sysctl_xfrm_aevent_etime __read_mostly = XFRM_AE_ETIME;
31 EXPORT_SYMBOL(sysctl_xfrm_aevent_etime);
32
33 u32 sysctl_xfrm_aevent_rseqth __read_mostly = XFRM_AE_SEQT_SIZE;
34 EXPORT_SYMBOL(sysctl_xfrm_aevent_rseqth);
35
36 u32 sysctl_xfrm_acq_expires __read_mostly = 30;
37
38 /* Each xfrm_state may be linked to two tables:
39
40 1. Hash table by (spi,daddr,ah/esp) to find SA by SPI. (input,ctl)
41 2. Hash table by (daddr,family,reqid) to find what SAs exist for given
42 destination/tunnel endpoint. (output)
43 */
44
45 static DEFINE_SPINLOCK(xfrm_state_lock);
46
47 /* Hash table to find appropriate SA towards given target (endpoint
48 * of tunnel or destination of transport mode) allowed by selector.
49 *
50 * Main use is finding SA after policy selected tunnel or transport mode.
51 * Also, it can be used by ah/esp icmp error handler to find offending SA.
52 */
53 static LIST_HEAD(xfrm_state_all);
54 static struct hlist_head *xfrm_state_bydst __read_mostly;
55 static struct hlist_head *xfrm_state_bysrc __read_mostly;
56 static struct hlist_head *xfrm_state_byspi __read_mostly;
57 static unsigned int xfrm_state_hmask __read_mostly;
58 static unsigned int xfrm_state_hashmax __read_mostly = 1 * 1024 * 1024;
59 static unsigned int xfrm_state_num;
60 static unsigned int xfrm_state_genid;
61
62 /* Counter indicating ongoing walk, protected by xfrm_state_lock. */
63 static unsigned long xfrm_state_walk_ongoing;
64 /* Counter indicating walk completion, protected by xfrm_cfg_mutex. */
65 static unsigned long xfrm_state_walk_completed;
66
67 static struct xfrm_state_afinfo *xfrm_state_get_afinfo(unsigned int family);
68 static void xfrm_state_put_afinfo(struct xfrm_state_afinfo *afinfo);
69
70 #ifdef CONFIG_AUDITSYSCALL
71 static void xfrm_audit_state_replay(struct xfrm_state *x,
72 struct sk_buff *skb, __be32 net_seq);
73 #else
74 #define xfrm_audit_state_replay(x, s, sq) do { ; } while (0)
75 #endif /* CONFIG_AUDITSYSCALL */
76
77 static inline unsigned int xfrm_dst_hash(xfrm_address_t *daddr,
78 xfrm_address_t *saddr,
79 u32 reqid,
80 unsigned short family)
81 {
82 return __xfrm_dst_hash(daddr, saddr, reqid, family, xfrm_state_hmask);
83 }
84
85 static inline unsigned int xfrm_src_hash(xfrm_address_t *daddr,
86 xfrm_address_t *saddr,
87 unsigned short family)
88 {
89 return __xfrm_src_hash(daddr, saddr, family, xfrm_state_hmask);
90 }
91
92 static inline unsigned int
93 xfrm_spi_hash(xfrm_address_t *daddr, __be32 spi, u8 proto, unsigned short family)
94 {
95 return __xfrm_spi_hash(daddr, spi, proto, family, xfrm_state_hmask);
96 }
97
98 static void xfrm_hash_transfer(struct hlist_head *list,
99 struct hlist_head *ndsttable,
100 struct hlist_head *nsrctable,
101 struct hlist_head *nspitable,
102 unsigned int nhashmask)
103 {
104 struct hlist_node *entry, *tmp;
105 struct xfrm_state *x;
106
107 hlist_for_each_entry_safe(x, entry, tmp, list, bydst) {
108 unsigned int h;
109
110 h = __xfrm_dst_hash(&x->id.daddr, &x->props.saddr,
111 x->props.reqid, x->props.family,
112 nhashmask);
113 hlist_add_head(&x->bydst, ndsttable+h);
114
115 h = __xfrm_src_hash(&x->id.daddr, &x->props.saddr,
116 x->props.family,
117 nhashmask);
118 hlist_add_head(&x->bysrc, nsrctable+h);
119
120 if (x->id.spi) {
121 h = __xfrm_spi_hash(&x->id.daddr, x->id.spi,
122 x->id.proto, x->props.family,
123 nhashmask);
124 hlist_add_head(&x->byspi, nspitable+h);
125 }
126 }
127 }
128
129 static unsigned long xfrm_hash_new_size(void)
130 {
131 return ((xfrm_state_hmask + 1) << 1) *
132 sizeof(struct hlist_head);
133 }
134
135 static DEFINE_MUTEX(hash_resize_mutex);
136
137 static void xfrm_hash_resize(struct work_struct *__unused)
138 {
139 struct hlist_head *ndst, *nsrc, *nspi, *odst, *osrc, *ospi;
140 unsigned long nsize, osize;
141 unsigned int nhashmask, ohashmask;
142 int i;
143
144 mutex_lock(&hash_resize_mutex);
145
146 nsize = xfrm_hash_new_size();
147 ndst = xfrm_hash_alloc(nsize);
148 if (!ndst)
149 goto out_unlock;
150 nsrc = xfrm_hash_alloc(nsize);
151 if (!nsrc) {
152 xfrm_hash_free(ndst, nsize);
153 goto out_unlock;
154 }
155 nspi = xfrm_hash_alloc(nsize);
156 if (!nspi) {
157 xfrm_hash_free(ndst, nsize);
158 xfrm_hash_free(nsrc, nsize);
159 goto out_unlock;
160 }
161
162 spin_lock_bh(&xfrm_state_lock);
163
164 nhashmask = (nsize / sizeof(struct hlist_head)) - 1U;
165 for (i = xfrm_state_hmask; i >= 0; i--)
166 xfrm_hash_transfer(xfrm_state_bydst+i, ndst, nsrc, nspi,
167 nhashmask);
168
169 odst = xfrm_state_bydst;
170 osrc = xfrm_state_bysrc;
171 ospi = xfrm_state_byspi;
172 ohashmask = xfrm_state_hmask;
173
174 xfrm_state_bydst = ndst;
175 xfrm_state_bysrc = nsrc;
176 xfrm_state_byspi = nspi;
177 xfrm_state_hmask = nhashmask;
178
179 spin_unlock_bh(&xfrm_state_lock);
180
181 osize = (ohashmask + 1) * sizeof(struct hlist_head);
182 xfrm_hash_free(odst, osize);
183 xfrm_hash_free(osrc, osize);
184 xfrm_hash_free(ospi, osize);
185
186 out_unlock:
187 mutex_unlock(&hash_resize_mutex);
188 }
189
190 static DECLARE_WORK(xfrm_hash_work, xfrm_hash_resize);
191
192 DECLARE_WAIT_QUEUE_HEAD(km_waitq);
193 EXPORT_SYMBOL(km_waitq);
194
195 static DEFINE_RWLOCK(xfrm_state_afinfo_lock);
196 static struct xfrm_state_afinfo *xfrm_state_afinfo[NPROTO];
197
198 static struct work_struct xfrm_state_gc_work;
199 static LIST_HEAD(xfrm_state_gc_leftovers);
200 static LIST_HEAD(xfrm_state_gc_list);
201 static DEFINE_SPINLOCK(xfrm_state_gc_lock);
202
203 int __xfrm_state_delete(struct xfrm_state *x);
204
205 int km_query(struct xfrm_state *x, struct xfrm_tmpl *t, struct xfrm_policy *pol);
206 void km_state_expired(struct xfrm_state *x, int hard, u32 pid);
207
208 static struct xfrm_state_afinfo *xfrm_state_lock_afinfo(unsigned int family)
209 {
210 struct xfrm_state_afinfo *afinfo;
211 if (unlikely(family >= NPROTO))
212 return NULL;
213 write_lock_bh(&xfrm_state_afinfo_lock);
214 afinfo = xfrm_state_afinfo[family];
215 if (unlikely(!afinfo))
216 write_unlock_bh(&xfrm_state_afinfo_lock);
217 return afinfo;
218 }
219
220 static void xfrm_state_unlock_afinfo(struct xfrm_state_afinfo *afinfo)
221 __releases(xfrm_state_afinfo_lock)
222 {
223 write_unlock_bh(&xfrm_state_afinfo_lock);
224 }
225
226 int xfrm_register_type(const struct xfrm_type *type, unsigned short family)
227 {
228 struct xfrm_state_afinfo *afinfo = xfrm_state_lock_afinfo(family);
229 const struct xfrm_type **typemap;
230 int err = 0;
231
232 if (unlikely(afinfo == NULL))
233 return -EAFNOSUPPORT;
234 typemap = afinfo->type_map;
235
236 if (likely(typemap[type->proto] == NULL))
237 typemap[type->proto] = type;
238 else
239 err = -EEXIST;
240 xfrm_state_unlock_afinfo(afinfo);
241 return err;
242 }
243 EXPORT_SYMBOL(xfrm_register_type);
244
245 int xfrm_unregister_type(const struct xfrm_type *type, unsigned short family)
246 {
247 struct xfrm_state_afinfo *afinfo = xfrm_state_lock_afinfo(family);
248 const struct xfrm_type **typemap;
249 int err = 0;
250
251 if (unlikely(afinfo == NULL))
252 return -EAFNOSUPPORT;
253 typemap = afinfo->type_map;
254
255 if (unlikely(typemap[type->proto] != type))
256 err = -ENOENT;
257 else
258 typemap[type->proto] = NULL;
259 xfrm_state_unlock_afinfo(afinfo);
260 return err;
261 }
262 EXPORT_SYMBOL(xfrm_unregister_type);
263
264 static const struct xfrm_type *xfrm_get_type(u8 proto, unsigned short family)
265 {
266 struct xfrm_state_afinfo *afinfo;
267 const struct xfrm_type **typemap;
268 const struct xfrm_type *type;
269 int modload_attempted = 0;
270
271 retry:
272 afinfo = xfrm_state_get_afinfo(family);
273 if (unlikely(afinfo == NULL))
274 return NULL;
275 typemap = afinfo->type_map;
276
277 type = typemap[proto];
278 if (unlikely(type && !try_module_get(type->owner)))
279 type = NULL;
280 if (!type && !modload_attempted) {
281 xfrm_state_put_afinfo(afinfo);
282 request_module("xfrm-type-%d-%d", family, proto);
283 modload_attempted = 1;
284 goto retry;
285 }
286
287 xfrm_state_put_afinfo(afinfo);
288 return type;
289 }
290
291 static void xfrm_put_type(const struct xfrm_type *type)
292 {
293 module_put(type->owner);
294 }
295
296 int xfrm_register_mode(struct xfrm_mode *mode, int family)
297 {
298 struct xfrm_state_afinfo *afinfo;
299 struct xfrm_mode **modemap;
300 int err;
301
302 if (unlikely(mode->encap >= XFRM_MODE_MAX))
303 return -EINVAL;
304
305 afinfo = xfrm_state_lock_afinfo(family);
306 if (unlikely(afinfo == NULL))
307 return -EAFNOSUPPORT;
308
309 err = -EEXIST;
310 modemap = afinfo->mode_map;
311 if (modemap[mode->encap])
312 goto out;
313
314 err = -ENOENT;
315 if (!try_module_get(afinfo->owner))
316 goto out;
317
318 mode->afinfo = afinfo;
319 modemap[mode->encap] = mode;
320 err = 0;
321
322 out:
323 xfrm_state_unlock_afinfo(afinfo);
324 return err;
325 }
326 EXPORT_SYMBOL(xfrm_register_mode);
327
328 int xfrm_unregister_mode(struct xfrm_mode *mode, int family)
329 {
330 struct xfrm_state_afinfo *afinfo;
331 struct xfrm_mode **modemap;
332 int err;
333
334 if (unlikely(mode->encap >= XFRM_MODE_MAX))
335 return -EINVAL;
336
337 afinfo = xfrm_state_lock_afinfo(family);
338 if (unlikely(afinfo == NULL))
339 return -EAFNOSUPPORT;
340
341 err = -ENOENT;
342 modemap = afinfo->mode_map;
343 if (likely(modemap[mode->encap] == mode)) {
344 modemap[mode->encap] = NULL;
345 module_put(mode->afinfo->owner);
346 err = 0;
347 }
348
349 xfrm_state_unlock_afinfo(afinfo);
350 return err;
351 }
352 EXPORT_SYMBOL(xfrm_unregister_mode);
353
354 static struct xfrm_mode *xfrm_get_mode(unsigned int encap, int family)
355 {
356 struct xfrm_state_afinfo *afinfo;
357 struct xfrm_mode *mode;
358 int modload_attempted = 0;
359
360 if (unlikely(encap >= XFRM_MODE_MAX))
361 return NULL;
362
363 retry:
364 afinfo = xfrm_state_get_afinfo(family);
365 if (unlikely(afinfo == NULL))
366 return NULL;
367
368 mode = afinfo->mode_map[encap];
369 if (unlikely(mode && !try_module_get(mode->owner)))
370 mode = NULL;
371 if (!mode && !modload_attempted) {
372 xfrm_state_put_afinfo(afinfo);
373 request_module("xfrm-mode-%d-%d", family, encap);
374 modload_attempted = 1;
375 goto retry;
376 }
377
378 xfrm_state_put_afinfo(afinfo);
379 return mode;
380 }
381
382 static void xfrm_put_mode(struct xfrm_mode *mode)
383 {
384 module_put(mode->owner);
385 }
386
387 static void xfrm_state_gc_destroy(struct xfrm_state *x)
388 {
389 del_timer_sync(&x->timer);
390 del_timer_sync(&x->rtimer);
391 kfree(x->aalg);
392 kfree(x->ealg);
393 kfree(x->calg);
394 kfree(x->encap);
395 kfree(x->coaddr);
396 if (x->inner_mode)
397 xfrm_put_mode(x->inner_mode);
398 if (x->inner_mode_iaf)
399 xfrm_put_mode(x->inner_mode_iaf);
400 if (x->outer_mode)
401 xfrm_put_mode(x->outer_mode);
402 if (x->type) {
403 x->type->destructor(x);
404 xfrm_put_type(x->type);
405 }
406 security_xfrm_state_free(x);
407 kfree(x);
408 }
409
410 static void xfrm_state_gc_task(struct work_struct *data)
411 {
412 struct xfrm_state *x, *tmp;
413 unsigned long completed;
414
415 mutex_lock(&xfrm_cfg_mutex);
416 spin_lock_bh(&xfrm_state_gc_lock);
417 list_splice_tail_init(&xfrm_state_gc_list, &xfrm_state_gc_leftovers);
418 spin_unlock_bh(&xfrm_state_gc_lock);
419
420 completed = xfrm_state_walk_completed;
421 mutex_unlock(&xfrm_cfg_mutex);
422
423 list_for_each_entry_safe(x, tmp, &xfrm_state_gc_leftovers, gclist) {
424 if ((long)(x->lastused - completed) > 0)
425 break;
426 list_del(&x->gclist);
427 xfrm_state_gc_destroy(x);
428 }
429
430 wake_up(&km_waitq);
431 }
432
433 static inline unsigned long make_jiffies(long secs)
434 {
435 if (secs >= (MAX_SCHEDULE_TIMEOUT-1)/HZ)
436 return MAX_SCHEDULE_TIMEOUT-1;
437 else
438 return secs*HZ;
439 }
440
441 static void xfrm_timer_handler(unsigned long data)
442 {
443 struct xfrm_state *x = (struct xfrm_state*)data;
444 unsigned long now = get_seconds();
445 long next = LONG_MAX;
446 int warn = 0;
447 int err = 0;
448
449 spin_lock(&x->lock);
450 if (x->km.state == XFRM_STATE_DEAD)
451 goto out;
452 if (x->km.state == XFRM_STATE_EXPIRED)
453 goto expired;
454 if (x->lft.hard_add_expires_seconds) {
455 long tmo = x->lft.hard_add_expires_seconds +
456 x->curlft.add_time - now;
457 if (tmo <= 0)
458 goto expired;
459 if (tmo < next)
460 next = tmo;
461 }
462 if (x->lft.hard_use_expires_seconds) {
463 long tmo = x->lft.hard_use_expires_seconds +
464 (x->curlft.use_time ? : now) - now;
465 if (tmo <= 0)
466 goto expired;
467 if (tmo < next)
468 next = tmo;
469 }
470 if (x->km.dying)
471 goto resched;
472 if (x->lft.soft_add_expires_seconds) {
473 long tmo = x->lft.soft_add_expires_seconds +
474 x->curlft.add_time - now;
475 if (tmo <= 0)
476 warn = 1;
477 else if (tmo < next)
478 next = tmo;
479 }
480 if (x->lft.soft_use_expires_seconds) {
481 long tmo = x->lft.soft_use_expires_seconds +
482 (x->curlft.use_time ? : now) - now;
483 if (tmo <= 0)
484 warn = 1;
485 else if (tmo < next)
486 next = tmo;
487 }
488
489 x->km.dying = warn;
490 if (warn)
491 km_state_expired(x, 0, 0);
492 resched:
493 if (next != LONG_MAX)
494 mod_timer(&x->timer, jiffies + make_jiffies(next));
495
496 goto out;
497
498 expired:
499 if (x->km.state == XFRM_STATE_ACQ && x->id.spi == 0) {
500 x->km.state = XFRM_STATE_EXPIRED;
501 wake_up(&km_waitq);
502 next = 2;
503 goto resched;
504 }
505
506 err = __xfrm_state_delete(x);
507 if (!err && x->id.spi)
508 km_state_expired(x, 1, 0);
509
510 xfrm_audit_state_delete(x, err ? 0 : 1,
511 audit_get_loginuid(current),
512 audit_get_sessionid(current), 0);
513
514 out:
515 spin_unlock(&x->lock);
516 }
517
518 static void xfrm_replay_timer_handler(unsigned long data);
519
520 struct xfrm_state *xfrm_state_alloc(void)
521 {
522 struct xfrm_state *x;
523
524 x = kzalloc(sizeof(struct xfrm_state), GFP_ATOMIC);
525
526 if (x) {
527 atomic_set(&x->refcnt, 1);
528 atomic_set(&x->tunnel_users, 0);
529 INIT_LIST_HEAD(&x->all);
530 INIT_HLIST_NODE(&x->bydst);
531 INIT_HLIST_NODE(&x->bysrc);
532 INIT_HLIST_NODE(&x->byspi);
533 setup_timer(&x->timer, xfrm_timer_handler, (unsigned long)x);
534 setup_timer(&x->rtimer, xfrm_replay_timer_handler,
535 (unsigned long)x);
536 x->curlft.add_time = get_seconds();
537 x->lft.soft_byte_limit = XFRM_INF;
538 x->lft.soft_packet_limit = XFRM_INF;
539 x->lft.hard_byte_limit = XFRM_INF;
540 x->lft.hard_packet_limit = XFRM_INF;
541 x->replay_maxage = 0;
542 x->replay_maxdiff = 0;
543 x->inner_mode = NULL;
544 x->inner_mode_iaf = NULL;
545 spin_lock_init(&x->lock);
546 }
547 return x;
548 }
549 EXPORT_SYMBOL(xfrm_state_alloc);
550
551 void __xfrm_state_destroy(struct xfrm_state *x)
552 {
553 WARN_ON(x->km.state != XFRM_STATE_DEAD);
554
555 spin_lock_bh(&xfrm_state_gc_lock);
556 list_add_tail(&x->gclist, &xfrm_state_gc_list);
557 spin_unlock_bh(&xfrm_state_gc_lock);
558 schedule_work(&xfrm_state_gc_work);
559 }
560 EXPORT_SYMBOL(__xfrm_state_destroy);
561
562 int __xfrm_state_delete(struct xfrm_state *x)
563 {
564 int err = -ESRCH;
565
566 if (x->km.state != XFRM_STATE_DEAD) {
567 x->km.state = XFRM_STATE_DEAD;
568 spin_lock(&xfrm_state_lock);
569 x->lastused = xfrm_state_walk_ongoing;
570 list_del_rcu(&x->all);
571 hlist_del(&x->bydst);
572 hlist_del(&x->bysrc);
573 if (x->id.spi)
574 hlist_del(&x->byspi);
575 xfrm_state_num--;
576 spin_unlock(&xfrm_state_lock);
577
578 /* All xfrm_state objects are created by xfrm_state_alloc.
579 * The xfrm_state_alloc call gives a reference, and that
580 * is what we are dropping here.
581 */
582 xfrm_state_put(x);
583 err = 0;
584 }
585
586 return err;
587 }
588 EXPORT_SYMBOL(__xfrm_state_delete);
589
590 int xfrm_state_delete(struct xfrm_state *x)
591 {
592 int err;
593
594 spin_lock_bh(&x->lock);
595 err = __xfrm_state_delete(x);
596 spin_unlock_bh(&x->lock);
597
598 return err;
599 }
600 EXPORT_SYMBOL(xfrm_state_delete);
601
602 #ifdef CONFIG_SECURITY_NETWORK_XFRM
603 static inline int
604 xfrm_state_flush_secctx_check(u8 proto, struct xfrm_audit *audit_info)
605 {
606 int i, err = 0;
607
608 for (i = 0; i <= xfrm_state_hmask; i++) {
609 struct hlist_node *entry;
610 struct xfrm_state *x;
611
612 hlist_for_each_entry(x, entry, xfrm_state_bydst+i, bydst) {
613 if (xfrm_id_proto_match(x->id.proto, proto) &&
614 (err = security_xfrm_state_delete(x)) != 0) {
615 xfrm_audit_state_delete(x, 0,
616 audit_info->loginuid,
617 audit_info->sessionid,
618 audit_info->secid);
619 return err;
620 }
621 }
622 }
623
624 return err;
625 }
626 #else
627 static inline int
628 xfrm_state_flush_secctx_check(u8 proto, struct xfrm_audit *audit_info)
629 {
630 return 0;
631 }
632 #endif
633
634 int xfrm_state_flush(u8 proto, struct xfrm_audit *audit_info)
635 {
636 int i, err = 0;
637
638 spin_lock_bh(&xfrm_state_lock);
639 err = xfrm_state_flush_secctx_check(proto, audit_info);
640 if (err)
641 goto out;
642
643 for (i = 0; i <= xfrm_state_hmask; i++) {
644 struct hlist_node *entry;
645 struct xfrm_state *x;
646 restart:
647 hlist_for_each_entry(x, entry, xfrm_state_bydst+i, bydst) {
648 if (!xfrm_state_kern(x) &&
649 xfrm_id_proto_match(x->id.proto, proto)) {
650 xfrm_state_hold(x);
651 spin_unlock_bh(&xfrm_state_lock);
652
653 err = xfrm_state_delete(x);
654 xfrm_audit_state_delete(x, err ? 0 : 1,
655 audit_info->loginuid,
656 audit_info->sessionid,
657 audit_info->secid);
658 xfrm_state_put(x);
659
660 spin_lock_bh(&xfrm_state_lock);
661 goto restart;
662 }
663 }
664 }
665 err = 0;
666
667 out:
668 spin_unlock_bh(&xfrm_state_lock);
669 wake_up(&km_waitq);
670 return err;
671 }
672 EXPORT_SYMBOL(xfrm_state_flush);
673
674 void xfrm_sad_getinfo(struct xfrmk_sadinfo *si)
675 {
676 spin_lock_bh(&xfrm_state_lock);
677 si->sadcnt = xfrm_state_num;
678 si->sadhcnt = xfrm_state_hmask;
679 si->sadhmcnt = xfrm_state_hashmax;
680 spin_unlock_bh(&xfrm_state_lock);
681 }
682 EXPORT_SYMBOL(xfrm_sad_getinfo);
683
684 static int
685 xfrm_init_tempsel(struct xfrm_state *x, struct flowi *fl,
686 struct xfrm_tmpl *tmpl,
687 xfrm_address_t *daddr, xfrm_address_t *saddr,
688 unsigned short family)
689 {
690 struct xfrm_state_afinfo *afinfo = xfrm_state_get_afinfo(family);
691 if (!afinfo)
692 return -1;
693 afinfo->init_tempsel(x, fl, tmpl, daddr, saddr);
694 xfrm_state_put_afinfo(afinfo);
695 return 0;
696 }
697
698 static struct xfrm_state *__xfrm_state_lookup(xfrm_address_t *daddr, __be32 spi, u8 proto, unsigned short family)
699 {
700 unsigned int h = xfrm_spi_hash(daddr, spi, proto, family);
701 struct xfrm_state *x;
702 struct hlist_node *entry;
703
704 hlist_for_each_entry(x, entry, xfrm_state_byspi+h, byspi) {
705 if (x->props.family != family ||
706 x->id.spi != spi ||
707 x->id.proto != proto)
708 continue;
709
710 switch (family) {
711 case AF_INET:
712 if (x->id.daddr.a4 != daddr->a4)
713 continue;
714 break;
715 case AF_INET6:
716 if (!ipv6_addr_equal((struct in6_addr *)daddr,
717 (struct in6_addr *)
718 x->id.daddr.a6))
719 continue;
720 break;
721 }
722
723 xfrm_state_hold(x);
724 return x;
725 }
726
727 return NULL;
728 }
729
730 static struct xfrm_state *__xfrm_state_lookup_byaddr(xfrm_address_t *daddr, xfrm_address_t *saddr, u8 proto, unsigned short family)
731 {
732 unsigned int h = xfrm_src_hash(daddr, saddr, family);
733 struct xfrm_state *x;
734 struct hlist_node *entry;
735
736 hlist_for_each_entry(x, entry, xfrm_state_bysrc+h, bysrc) {
737 if (x->props.family != family ||
738 x->id.proto != proto)
739 continue;
740
741 switch (family) {
742 case AF_INET:
743 if (x->id.daddr.a4 != daddr->a4 ||
744 x->props.saddr.a4 != saddr->a4)
745 continue;
746 break;
747 case AF_INET6:
748 if (!ipv6_addr_equal((struct in6_addr *)daddr,
749 (struct in6_addr *)
750 x->id.daddr.a6) ||
751 !ipv6_addr_equal((struct in6_addr *)saddr,
752 (struct in6_addr *)
753 x->props.saddr.a6))
754 continue;
755 break;
756 }
757
758 xfrm_state_hold(x);
759 return x;
760 }
761
762 return NULL;
763 }
764
765 static inline struct xfrm_state *
766 __xfrm_state_locate(struct xfrm_state *x, int use_spi, int family)
767 {
768 if (use_spi)
769 return __xfrm_state_lookup(&x->id.daddr, x->id.spi,
770 x->id.proto, family);
771 else
772 return __xfrm_state_lookup_byaddr(&x->id.daddr,
773 &x->props.saddr,
774 x->id.proto, family);
775 }
776
777 static void xfrm_hash_grow_check(int have_hash_collision)
778 {
779 if (have_hash_collision &&
780 (xfrm_state_hmask + 1) < xfrm_state_hashmax &&
781 xfrm_state_num > xfrm_state_hmask)
782 schedule_work(&xfrm_hash_work);
783 }
784
785 struct xfrm_state *
786 xfrm_state_find(xfrm_address_t *daddr, xfrm_address_t *saddr,
787 struct flowi *fl, struct xfrm_tmpl *tmpl,
788 struct xfrm_policy *pol, int *err,
789 unsigned short family)
790 {
791 unsigned int h;
792 struct hlist_node *entry;
793 struct xfrm_state *x, *x0, *to_put;
794 int acquire_in_progress = 0;
795 int error = 0;
796 struct xfrm_state *best = NULL;
797
798 to_put = NULL;
799
800 spin_lock_bh(&xfrm_state_lock);
801 h = xfrm_dst_hash(daddr, saddr, tmpl->reqid, family);
802 hlist_for_each_entry(x, entry, xfrm_state_bydst+h, bydst) {
803 if (x->props.family == family &&
804 x->props.reqid == tmpl->reqid &&
805 !(x->props.flags & XFRM_STATE_WILDRECV) &&
806 xfrm_state_addr_check(x, daddr, saddr, family) &&
807 tmpl->mode == x->props.mode &&
808 tmpl->id.proto == x->id.proto &&
809 (tmpl->id.spi == x->id.spi || !tmpl->id.spi)) {
810 /* Resolution logic:
811 1. There is a valid state with matching selector.
812 Done.
813 2. Valid state with inappropriate selector. Skip.
814
815 Entering area of "sysdeps".
816
817 3. If state is not valid, selector is temporary,
818 it selects only session which triggered
819 previous resolution. Key manager will do
820 something to install a state with proper
821 selector.
822 */
823 if (x->km.state == XFRM_STATE_VALID) {
824 if ((x->sel.family && !xfrm_selector_match(&x->sel, fl, x->sel.family)) ||
825 !security_xfrm_state_pol_flow_match(x, pol, fl))
826 continue;
827 if (!best ||
828 best->km.dying > x->km.dying ||
829 (best->km.dying == x->km.dying &&
830 best->curlft.add_time < x->curlft.add_time))
831 best = x;
832 } else if (x->km.state == XFRM_STATE_ACQ) {
833 acquire_in_progress = 1;
834 } else if (x->km.state == XFRM_STATE_ERROR ||
835 x->km.state == XFRM_STATE_EXPIRED) {
836 if (xfrm_selector_match(&x->sel, fl, x->sel.family) &&
837 security_xfrm_state_pol_flow_match(x, pol, fl))
838 error = -ESRCH;
839 }
840 }
841 }
842
843 x = best;
844 if (!x && !error && !acquire_in_progress) {
845 if (tmpl->id.spi &&
846 (x0 = __xfrm_state_lookup(daddr, tmpl->id.spi,
847 tmpl->id.proto, family)) != NULL) {
848 to_put = x0;
849 error = -EEXIST;
850 goto out;
851 }
852 x = xfrm_state_alloc();
853 if (x == NULL) {
854 error = -ENOMEM;
855 goto out;
856 }
857 /* Initialize temporary selector matching only
858 * to current session. */
859 xfrm_init_tempsel(x, fl, tmpl, daddr, saddr, family);
860
861 error = security_xfrm_state_alloc_acquire(x, pol->security, fl->secid);
862 if (error) {
863 x->km.state = XFRM_STATE_DEAD;
864 to_put = x;
865 x = NULL;
866 goto out;
867 }
868
869 if (km_query(x, tmpl, pol) == 0) {
870 x->km.state = XFRM_STATE_ACQ;
871 list_add_tail(&x->all, &xfrm_state_all);
872 hlist_add_head(&x->bydst, xfrm_state_bydst+h);
873 h = xfrm_src_hash(daddr, saddr, family);
874 hlist_add_head(&x->bysrc, xfrm_state_bysrc+h);
875 if (x->id.spi) {
876 h = xfrm_spi_hash(&x->id.daddr, x->id.spi, x->id.proto, family);
877 hlist_add_head(&x->byspi, xfrm_state_byspi+h);
878 }
879 x->lft.hard_add_expires_seconds = sysctl_xfrm_acq_expires;
880 x->timer.expires = jiffies + sysctl_xfrm_acq_expires*HZ;
881 add_timer(&x->timer);
882 xfrm_state_num++;
883 xfrm_hash_grow_check(x->bydst.next != NULL);
884 } else {
885 x->km.state = XFRM_STATE_DEAD;
886 to_put = x;
887 x = NULL;
888 error = -ESRCH;
889 }
890 }
891 out:
892 if (x)
893 xfrm_state_hold(x);
894 else
895 *err = acquire_in_progress ? -EAGAIN : error;
896 spin_unlock_bh(&xfrm_state_lock);
897 if (to_put)
898 xfrm_state_put(to_put);
899 return x;
900 }
901
902 struct xfrm_state *
903 xfrm_stateonly_find(xfrm_address_t *daddr, xfrm_address_t *saddr,
904 unsigned short family, u8 mode, u8 proto, u32 reqid)
905 {
906 unsigned int h;
907 struct xfrm_state *rx = NULL, *x = NULL;
908 struct hlist_node *entry;
909
910 spin_lock(&xfrm_state_lock);
911 h = xfrm_dst_hash(daddr, saddr, reqid, family);
912 hlist_for_each_entry(x, entry, xfrm_state_bydst+h, bydst) {
913 if (x->props.family == family &&
914 x->props.reqid == reqid &&
915 !(x->props.flags & XFRM_STATE_WILDRECV) &&
916 xfrm_state_addr_check(x, daddr, saddr, family) &&
917 mode == x->props.mode &&
918 proto == x->id.proto &&
919 x->km.state == XFRM_STATE_VALID) {
920 rx = x;
921 break;
922 }
923 }
924
925 if (rx)
926 xfrm_state_hold(rx);
927 spin_unlock(&xfrm_state_lock);
928
929
930 return rx;
931 }
932 EXPORT_SYMBOL(xfrm_stateonly_find);
933
934 static void __xfrm_state_insert(struct xfrm_state *x)
935 {
936 unsigned int h;
937
938 x->genid = ++xfrm_state_genid;
939
940 list_add_tail(&x->all, &xfrm_state_all);
941
942 h = xfrm_dst_hash(&x->id.daddr, &x->props.saddr,
943 x->props.reqid, x->props.family);
944 hlist_add_head(&x->bydst, xfrm_state_bydst+h);
945
946 h = xfrm_src_hash(&x->id.daddr, &x->props.saddr, x->props.family);
947 hlist_add_head(&x->bysrc, xfrm_state_bysrc+h);
948
949 if (x->id.spi) {
950 h = xfrm_spi_hash(&x->id.daddr, x->id.spi, x->id.proto,
951 x->props.family);
952
953 hlist_add_head(&x->byspi, xfrm_state_byspi+h);
954 }
955
956 mod_timer(&x->timer, jiffies + HZ);
957 if (x->replay_maxage)
958 mod_timer(&x->rtimer, jiffies + x->replay_maxage);
959
960 wake_up(&km_waitq);
961
962 xfrm_state_num++;
963
964 xfrm_hash_grow_check(x->bydst.next != NULL);
965 }
966
967 /* xfrm_state_lock is held */
968 static void __xfrm_state_bump_genids(struct xfrm_state *xnew)
969 {
970 unsigned short family = xnew->props.family;
971 u32 reqid = xnew->props.reqid;
972 struct xfrm_state *x;
973 struct hlist_node *entry;
974 unsigned int h;
975
976 h = xfrm_dst_hash(&xnew->id.daddr, &xnew->props.saddr, reqid, family);
977 hlist_for_each_entry(x, entry, xfrm_state_bydst+h, bydst) {
978 if (x->props.family == family &&
979 x->props.reqid == reqid &&
980 !xfrm_addr_cmp(&x->id.daddr, &xnew->id.daddr, family) &&
981 !xfrm_addr_cmp(&x->props.saddr, &xnew->props.saddr, family))
982 x->genid = xfrm_state_genid;
983 }
984 }
985
986 void xfrm_state_insert(struct xfrm_state *x)
987 {
988 spin_lock_bh(&xfrm_state_lock);
989 __xfrm_state_bump_genids(x);
990 __xfrm_state_insert(x);
991 spin_unlock_bh(&xfrm_state_lock);
992 }
993 EXPORT_SYMBOL(xfrm_state_insert);
994
995 /* xfrm_state_lock is held */
996 static struct xfrm_state *__find_acq_core(unsigned short family, u8 mode, u32 reqid, u8 proto, xfrm_address_t *daddr, xfrm_address_t *saddr, int create)
997 {
998 unsigned int h = xfrm_dst_hash(daddr, saddr, reqid, family);
999 struct hlist_node *entry;
1000 struct xfrm_state *x;
1001
1002 hlist_for_each_entry(x, entry, xfrm_state_bydst+h, bydst) {
1003 if (x->props.reqid != reqid ||
1004 x->props.mode != mode ||
1005 x->props.family != family ||
1006 x->km.state != XFRM_STATE_ACQ ||
1007 x->id.spi != 0 ||
1008 x->id.proto != proto)
1009 continue;
1010
1011 switch (family) {
1012 case AF_INET:
1013 if (x->id.daddr.a4 != daddr->a4 ||
1014 x->props.saddr.a4 != saddr->a4)
1015 continue;
1016 break;
1017 case AF_INET6:
1018 if (!ipv6_addr_equal((struct in6_addr *)x->id.daddr.a6,
1019 (struct in6_addr *)daddr) ||
1020 !ipv6_addr_equal((struct in6_addr *)
1021 x->props.saddr.a6,
1022 (struct in6_addr *)saddr))
1023 continue;
1024 break;
1025 }
1026
1027 xfrm_state_hold(x);
1028 return x;
1029 }
1030
1031 if (!create)
1032 return NULL;
1033
1034 x = xfrm_state_alloc();
1035 if (likely(x)) {
1036 switch (family) {
1037 case AF_INET:
1038 x->sel.daddr.a4 = daddr->a4;
1039 x->sel.saddr.a4 = saddr->a4;
1040 x->sel.prefixlen_d = 32;
1041 x->sel.prefixlen_s = 32;
1042 x->props.saddr.a4 = saddr->a4;
1043 x->id.daddr.a4 = daddr->a4;
1044 break;
1045
1046 case AF_INET6:
1047 ipv6_addr_copy((struct in6_addr *)x->sel.daddr.a6,
1048 (struct in6_addr *)daddr);
1049 ipv6_addr_copy((struct in6_addr *)x->sel.saddr.a6,
1050 (struct in6_addr *)saddr);
1051 x->sel.prefixlen_d = 128;
1052 x->sel.prefixlen_s = 128;
1053 ipv6_addr_copy((struct in6_addr *)x->props.saddr.a6,
1054 (struct in6_addr *)saddr);
1055 ipv6_addr_copy((struct in6_addr *)x->id.daddr.a6,
1056 (struct in6_addr *)daddr);
1057 break;
1058 }
1059
1060 x->km.state = XFRM_STATE_ACQ;
1061 x->id.proto = proto;
1062 x->props.family = family;
1063 x->props.mode = mode;
1064 x->props.reqid = reqid;
1065 x->lft.hard_add_expires_seconds = sysctl_xfrm_acq_expires;
1066 xfrm_state_hold(x);
1067 x->timer.expires = jiffies + sysctl_xfrm_acq_expires*HZ;
1068 add_timer(&x->timer);
1069 list_add_tail(&x->all, &xfrm_state_all);
1070 hlist_add_head(&x->bydst, xfrm_state_bydst+h);
1071 h = xfrm_src_hash(daddr, saddr, family);
1072 hlist_add_head(&x->bysrc, xfrm_state_bysrc+h);
1073
1074 xfrm_state_num++;
1075
1076 xfrm_hash_grow_check(x->bydst.next != NULL);
1077 }
1078
1079 return x;
1080 }
1081
1082 static struct xfrm_state *__xfrm_find_acq_byseq(u32 seq);
1083
1084 int xfrm_state_add(struct xfrm_state *x)
1085 {
1086 struct xfrm_state *x1, *to_put;
1087 int family;
1088 int err;
1089 int use_spi = xfrm_id_proto_match(x->id.proto, IPSEC_PROTO_ANY);
1090
1091 family = x->props.family;
1092
1093 to_put = NULL;
1094
1095 spin_lock_bh(&xfrm_state_lock);
1096
1097 x1 = __xfrm_state_locate(x, use_spi, family);
1098 if (x1) {
1099 to_put = x1;
1100 x1 = NULL;
1101 err = -EEXIST;
1102 goto out;
1103 }
1104
1105 if (use_spi && x->km.seq) {
1106 x1 = __xfrm_find_acq_byseq(x->km.seq);
1107 if (x1 && ((x1->id.proto != x->id.proto) ||
1108 xfrm_addr_cmp(&x1->id.daddr, &x->id.daddr, family))) {
1109 to_put = x1;
1110 x1 = NULL;
1111 }
1112 }
1113
1114 if (use_spi && !x1)
1115 x1 = __find_acq_core(family, x->props.mode, x->props.reqid,
1116 x->id.proto,
1117 &x->id.daddr, &x->props.saddr, 0);
1118
1119 __xfrm_state_bump_genids(x);
1120 __xfrm_state_insert(x);
1121 err = 0;
1122
1123 out:
1124 spin_unlock_bh(&xfrm_state_lock);
1125
1126 if (x1) {
1127 xfrm_state_delete(x1);
1128 xfrm_state_put(x1);
1129 }
1130
1131 if (to_put)
1132 xfrm_state_put(to_put);
1133
1134 return err;
1135 }
1136 EXPORT_SYMBOL(xfrm_state_add);
1137
1138 #ifdef CONFIG_XFRM_MIGRATE
1139 static struct xfrm_state *xfrm_state_clone(struct xfrm_state *orig, int *errp)
1140 {
1141 int err = -ENOMEM;
1142 struct xfrm_state *x = xfrm_state_alloc();
1143 if (!x)
1144 goto error;
1145
1146 memcpy(&x->id, &orig->id, sizeof(x->id));
1147 memcpy(&x->sel, &orig->sel, sizeof(x->sel));
1148 memcpy(&x->lft, &orig->lft, sizeof(x->lft));
1149 x->props.mode = orig->props.mode;
1150 x->props.replay_window = orig->props.replay_window;
1151 x->props.reqid = orig->props.reqid;
1152 x->props.family = orig->props.family;
1153 x->props.saddr = orig->props.saddr;
1154
1155 if (orig->aalg) {
1156 x->aalg = xfrm_algo_clone(orig->aalg);
1157 if (!x->aalg)
1158 goto error;
1159 }
1160 x->props.aalgo = orig->props.aalgo;
1161
1162 if (orig->ealg) {
1163 x->ealg = xfrm_algo_clone(orig->ealg);
1164 if (!x->ealg)
1165 goto error;
1166 }
1167 x->props.ealgo = orig->props.ealgo;
1168
1169 if (orig->calg) {
1170 x->calg = xfrm_algo_clone(orig->calg);
1171 if (!x->calg)
1172 goto error;
1173 }
1174 x->props.calgo = orig->props.calgo;
1175
1176 if (orig->encap) {
1177 x->encap = kmemdup(orig->encap, sizeof(*x->encap), GFP_KERNEL);
1178 if (!x->encap)
1179 goto error;
1180 }
1181
1182 if (orig->coaddr) {
1183 x->coaddr = kmemdup(orig->coaddr, sizeof(*x->coaddr),
1184 GFP_KERNEL);
1185 if (!x->coaddr)
1186 goto error;
1187 }
1188
1189 err = xfrm_init_state(x);
1190 if (err)
1191 goto error;
1192
1193 x->props.flags = orig->props.flags;
1194
1195 x->curlft.add_time = orig->curlft.add_time;
1196 x->km.state = orig->km.state;
1197 x->km.seq = orig->km.seq;
1198
1199 return x;
1200
1201 error:
1202 if (errp)
1203 *errp = err;
1204 if (x) {
1205 kfree(x->aalg);
1206 kfree(x->ealg);
1207 kfree(x->calg);
1208 kfree(x->encap);
1209 kfree(x->coaddr);
1210 }
1211 kfree(x);
1212 return NULL;
1213 }
1214
1215 /* xfrm_state_lock is held */
1216 struct xfrm_state * xfrm_migrate_state_find(struct xfrm_migrate *m)
1217 {
1218 unsigned int h;
1219 struct xfrm_state *x;
1220 struct hlist_node *entry;
1221
1222 if (m->reqid) {
1223 h = xfrm_dst_hash(&m->old_daddr, &m->old_saddr,
1224 m->reqid, m->old_family);
1225 hlist_for_each_entry(x, entry, xfrm_state_bydst+h, bydst) {
1226 if (x->props.mode != m->mode ||
1227 x->id.proto != m->proto)
1228 continue;
1229 if (m->reqid && x->props.reqid != m->reqid)
1230 continue;
1231 if (xfrm_addr_cmp(&x->id.daddr, &m->old_daddr,
1232 m->old_family) ||
1233 xfrm_addr_cmp(&x->props.saddr, &m->old_saddr,
1234 m->old_family))
1235 continue;
1236 xfrm_state_hold(x);
1237 return x;
1238 }
1239 } else {
1240 h = xfrm_src_hash(&m->old_daddr, &m->old_saddr,
1241 m->old_family);
1242 hlist_for_each_entry(x, entry, xfrm_state_bysrc+h, bysrc) {
1243 if (x->props.mode != m->mode ||
1244 x->id.proto != m->proto)
1245 continue;
1246 if (xfrm_addr_cmp(&x->id.daddr, &m->old_daddr,
1247 m->old_family) ||
1248 xfrm_addr_cmp(&x->props.saddr, &m->old_saddr,
1249 m->old_family))
1250 continue;
1251 xfrm_state_hold(x);
1252 return x;
1253 }
1254 }
1255
1256 return NULL;
1257 }
1258 EXPORT_SYMBOL(xfrm_migrate_state_find);
1259
1260 struct xfrm_state * xfrm_state_migrate(struct xfrm_state *x,
1261 struct xfrm_migrate *m)
1262 {
1263 struct xfrm_state *xc;
1264 int err;
1265
1266 xc = xfrm_state_clone(x, &err);
1267 if (!xc)
1268 return NULL;
1269
1270 memcpy(&xc->id.daddr, &m->new_daddr, sizeof(xc->id.daddr));
1271 memcpy(&xc->props.saddr, &m->new_saddr, sizeof(xc->props.saddr));
1272
1273 /* add state */
1274 if (!xfrm_addr_cmp(&x->id.daddr, &m->new_daddr, m->new_family)) {
1275 /* a care is needed when the destination address of the
1276 state is to be updated as it is a part of triplet */
1277 xfrm_state_insert(xc);
1278 } else {
1279 if ((err = xfrm_state_add(xc)) < 0)
1280 goto error;
1281 }
1282
1283 return xc;
1284 error:
1285 kfree(xc);
1286 return NULL;
1287 }
1288 EXPORT_SYMBOL(xfrm_state_migrate);
1289 #endif
1290
1291 int xfrm_state_update(struct xfrm_state *x)
1292 {
1293 struct xfrm_state *x1, *to_put;
1294 int err;
1295 int use_spi = xfrm_id_proto_match(x->id.proto, IPSEC_PROTO_ANY);
1296
1297 to_put = NULL;
1298
1299 spin_lock_bh(&xfrm_state_lock);
1300 x1 = __xfrm_state_locate(x, use_spi, x->props.family);
1301
1302 err = -ESRCH;
1303 if (!x1)
1304 goto out;
1305
1306 if (xfrm_state_kern(x1)) {
1307 to_put = x1;
1308 err = -EEXIST;
1309 goto out;
1310 }
1311
1312 if (x1->km.state == XFRM_STATE_ACQ) {
1313 __xfrm_state_insert(x);
1314 x = NULL;
1315 }
1316 err = 0;
1317
1318 out:
1319 spin_unlock_bh(&xfrm_state_lock);
1320
1321 if (to_put)
1322 xfrm_state_put(to_put);
1323
1324 if (err)
1325 return err;
1326
1327 if (!x) {
1328 xfrm_state_delete(x1);
1329 xfrm_state_put(x1);
1330 return 0;
1331 }
1332
1333 err = -EINVAL;
1334 spin_lock_bh(&x1->lock);
1335 if (likely(x1->km.state == XFRM_STATE_VALID)) {
1336 if (x->encap && x1->encap)
1337 memcpy(x1->encap, x->encap, sizeof(*x1->encap));
1338 if (x->coaddr && x1->coaddr) {
1339 memcpy(x1->coaddr, x->coaddr, sizeof(*x1->coaddr));
1340 }
1341 if (!use_spi && memcmp(&x1->sel, &x->sel, sizeof(x1->sel)))
1342 memcpy(&x1->sel, &x->sel, sizeof(x1->sel));
1343 memcpy(&x1->lft, &x->lft, sizeof(x1->lft));
1344 x1->km.dying = 0;
1345
1346 mod_timer(&x1->timer, jiffies + HZ);
1347 if (x1->curlft.use_time)
1348 xfrm_state_check_expire(x1);
1349
1350 err = 0;
1351 }
1352 spin_unlock_bh(&x1->lock);
1353
1354 xfrm_state_put(x1);
1355
1356 return err;
1357 }
1358 EXPORT_SYMBOL(xfrm_state_update);
1359
1360 int xfrm_state_check_expire(struct xfrm_state *x)
1361 {
1362 if (!x->curlft.use_time)
1363 x->curlft.use_time = get_seconds();
1364
1365 if (x->km.state != XFRM_STATE_VALID)
1366 return -EINVAL;
1367
1368 if (x->curlft.bytes >= x->lft.hard_byte_limit ||
1369 x->curlft.packets >= x->lft.hard_packet_limit) {
1370 x->km.state = XFRM_STATE_EXPIRED;
1371 mod_timer(&x->timer, jiffies);
1372 return -EINVAL;
1373 }
1374
1375 if (!x->km.dying &&
1376 (x->curlft.bytes >= x->lft.soft_byte_limit ||
1377 x->curlft.packets >= x->lft.soft_packet_limit)) {
1378 x->km.dying = 1;
1379 km_state_expired(x, 0, 0);
1380 }
1381 return 0;
1382 }
1383 EXPORT_SYMBOL(xfrm_state_check_expire);
1384
1385 struct xfrm_state *
1386 xfrm_state_lookup(xfrm_address_t *daddr, __be32 spi, u8 proto,
1387 unsigned short family)
1388 {
1389 struct xfrm_state *x;
1390
1391 spin_lock_bh(&xfrm_state_lock);
1392 x = __xfrm_state_lookup(daddr, spi, proto, family);
1393 spin_unlock_bh(&xfrm_state_lock);
1394 return x;
1395 }
1396 EXPORT_SYMBOL(xfrm_state_lookup);
1397
1398 struct xfrm_state *
1399 xfrm_state_lookup_byaddr(xfrm_address_t *daddr, xfrm_address_t *saddr,
1400 u8 proto, unsigned short family)
1401 {
1402 struct xfrm_state *x;
1403
1404 spin_lock_bh(&xfrm_state_lock);
1405 x = __xfrm_state_lookup_byaddr(daddr, saddr, proto, family);
1406 spin_unlock_bh(&xfrm_state_lock);
1407 return x;
1408 }
1409 EXPORT_SYMBOL(xfrm_state_lookup_byaddr);
1410
1411 struct xfrm_state *
1412 xfrm_find_acq(u8 mode, u32 reqid, u8 proto,
1413 xfrm_address_t *daddr, xfrm_address_t *saddr,
1414 int create, unsigned short family)
1415 {
1416 struct xfrm_state *x;
1417
1418 spin_lock_bh(&xfrm_state_lock);
1419 x = __find_acq_core(family, mode, reqid, proto, daddr, saddr, create);
1420 spin_unlock_bh(&xfrm_state_lock);
1421
1422 return x;
1423 }
1424 EXPORT_SYMBOL(xfrm_find_acq);
1425
1426 #ifdef CONFIG_XFRM_SUB_POLICY
1427 int
1428 xfrm_tmpl_sort(struct xfrm_tmpl **dst, struct xfrm_tmpl **src, int n,
1429 unsigned short family)
1430 {
1431 int err = 0;
1432 struct xfrm_state_afinfo *afinfo = xfrm_state_get_afinfo(family);
1433 if (!afinfo)
1434 return -EAFNOSUPPORT;
1435
1436 spin_lock_bh(&xfrm_state_lock);
1437 if (afinfo->tmpl_sort)
1438 err = afinfo->tmpl_sort(dst, src, n);
1439 spin_unlock_bh(&xfrm_state_lock);
1440 xfrm_state_put_afinfo(afinfo);
1441 return err;
1442 }
1443 EXPORT_SYMBOL(xfrm_tmpl_sort);
1444
1445 int
1446 xfrm_state_sort(struct xfrm_state **dst, struct xfrm_state **src, int n,
1447 unsigned short family)
1448 {
1449 int err = 0;
1450 struct xfrm_state_afinfo *afinfo = xfrm_state_get_afinfo(family);
1451 if (!afinfo)
1452 return -EAFNOSUPPORT;
1453
1454 spin_lock_bh(&xfrm_state_lock);
1455 if (afinfo->state_sort)
1456 err = afinfo->state_sort(dst, src, n);
1457 spin_unlock_bh(&xfrm_state_lock);
1458 xfrm_state_put_afinfo(afinfo);
1459 return err;
1460 }
1461 EXPORT_SYMBOL(xfrm_state_sort);
1462 #endif
1463
1464 /* Silly enough, but I'm lazy to build resolution list */
1465
1466 static struct xfrm_state *__xfrm_find_acq_byseq(u32 seq)
1467 {
1468 int i;
1469
1470 for (i = 0; i <= xfrm_state_hmask; i++) {
1471 struct hlist_node *entry;
1472 struct xfrm_state *x;
1473
1474 hlist_for_each_entry(x, entry, xfrm_state_bydst+i, bydst) {
1475 if (x->km.seq == seq &&
1476 x->km.state == XFRM_STATE_ACQ) {
1477 xfrm_state_hold(x);
1478 return x;
1479 }
1480 }
1481 }
1482 return NULL;
1483 }
1484
1485 struct xfrm_state *xfrm_find_acq_byseq(u32 seq)
1486 {
1487 struct xfrm_state *x;
1488
1489 spin_lock_bh(&xfrm_state_lock);
1490 x = __xfrm_find_acq_byseq(seq);
1491 spin_unlock_bh(&xfrm_state_lock);
1492 return x;
1493 }
1494 EXPORT_SYMBOL(xfrm_find_acq_byseq);
1495
1496 u32 xfrm_get_acqseq(void)
1497 {
1498 u32 res;
1499 static u32 acqseq;
1500 static DEFINE_SPINLOCK(acqseq_lock);
1501
1502 spin_lock_bh(&acqseq_lock);
1503 res = (++acqseq ? : ++acqseq);
1504 spin_unlock_bh(&acqseq_lock);
1505 return res;
1506 }
1507 EXPORT_SYMBOL(xfrm_get_acqseq);
1508
1509 int xfrm_alloc_spi(struct xfrm_state *x, u32 low, u32 high)
1510 {
1511 unsigned int h;
1512 struct xfrm_state *x0;
1513 int err = -ENOENT;
1514 __be32 minspi = htonl(low);
1515 __be32 maxspi = htonl(high);
1516
1517 spin_lock_bh(&x->lock);
1518 if (x->km.state == XFRM_STATE_DEAD)
1519 goto unlock;
1520
1521 err = 0;
1522 if (x->id.spi)
1523 goto unlock;
1524
1525 err = -ENOENT;
1526
1527 if (minspi == maxspi) {
1528 x0 = xfrm_state_lookup(&x->id.daddr, minspi, x->id.proto, x->props.family);
1529 if (x0) {
1530 xfrm_state_put(x0);
1531 goto unlock;
1532 }
1533 x->id.spi = minspi;
1534 } else {
1535 u32 spi = 0;
1536 for (h=0; h<high-low+1; h++) {
1537 spi = low + net_random()%(high-low+1);
1538 x0 = xfrm_state_lookup(&x->id.daddr, htonl(spi), x->id.proto, x->props.family);
1539 if (x0 == NULL) {
1540 x->id.spi = htonl(spi);
1541 break;
1542 }
1543 xfrm_state_put(x0);
1544 }
1545 }
1546 if (x->id.spi) {
1547 spin_lock_bh(&xfrm_state_lock);
1548 h = xfrm_spi_hash(&x->id.daddr, x->id.spi, x->id.proto, x->props.family);
1549 hlist_add_head(&x->byspi, xfrm_state_byspi+h);
1550 spin_unlock_bh(&xfrm_state_lock);
1551
1552 err = 0;
1553 }
1554
1555 unlock:
1556 spin_unlock_bh(&x->lock);
1557
1558 return err;
1559 }
1560 EXPORT_SYMBOL(xfrm_alloc_spi);
1561
1562 int xfrm_state_walk(struct xfrm_state_walk *walk,
1563 int (*func)(struct xfrm_state *, int, void*),
1564 void *data)
1565 {
1566 struct xfrm_state *old, *x, *last = NULL;
1567 int err = 0;
1568
1569 if (walk->state == NULL && walk->count != 0)
1570 return 0;
1571
1572 old = x = walk->state;
1573 walk->state = NULL;
1574 spin_lock_bh(&xfrm_state_lock);
1575 if (x == NULL)
1576 x = list_first_entry(&xfrm_state_all, struct xfrm_state, all);
1577 list_for_each_entry_from(x, &xfrm_state_all, all) {
1578 if (x->km.state == XFRM_STATE_DEAD)
1579 continue;
1580 if (!xfrm_id_proto_match(x->id.proto, walk->proto))
1581 continue;
1582 if (last) {
1583 err = func(last, walk->count, data);
1584 if (err) {
1585 xfrm_state_hold(last);
1586 walk->state = last;
1587 xfrm_state_walk_ongoing++;
1588 goto out;
1589 }
1590 }
1591 last = x;
1592 walk->count++;
1593 }
1594 if (walk->count == 0) {
1595 err = -ENOENT;
1596 goto out;
1597 }
1598 if (last)
1599 err = func(last, 0, data);
1600 out:
1601 spin_unlock_bh(&xfrm_state_lock);
1602 if (old != NULL) {
1603 xfrm_state_put(old);
1604 xfrm_state_walk_completed++;
1605 if (!list_empty(&xfrm_state_gc_leftovers))
1606 schedule_work(&xfrm_state_gc_work);
1607 }
1608 return err;
1609 }
1610 EXPORT_SYMBOL(xfrm_state_walk);
1611
1612 void xfrm_state_walk_done(struct xfrm_state_walk *walk)
1613 {
1614 if (walk->state != NULL) {
1615 xfrm_state_put(walk->state);
1616 walk->state = NULL;
1617 xfrm_state_walk_completed++;
1618 if (!list_empty(&xfrm_state_gc_leftovers))
1619 schedule_work(&xfrm_state_gc_work);
1620 }
1621 }
1622 EXPORT_SYMBOL(xfrm_state_walk_done);
1623
1624
1625 void xfrm_replay_notify(struct xfrm_state *x, int event)
1626 {
1627 struct km_event c;
1628 /* we send notify messages in case
1629 * 1. we updated on of the sequence numbers, and the seqno difference
1630 * is at least x->replay_maxdiff, in this case we also update the
1631 * timeout of our timer function
1632 * 2. if x->replay_maxage has elapsed since last update,
1633 * and there were changes
1634 *
1635 * The state structure must be locked!
1636 */
1637
1638 switch (event) {
1639 case XFRM_REPLAY_UPDATE:
1640 if (x->replay_maxdiff &&
1641 (x->replay.seq - x->preplay.seq < x->replay_maxdiff) &&
1642 (x->replay.oseq - x->preplay.oseq < x->replay_maxdiff)) {
1643 if (x->xflags & XFRM_TIME_DEFER)
1644 event = XFRM_REPLAY_TIMEOUT;
1645 else
1646 return;
1647 }
1648
1649 break;
1650
1651 case XFRM_REPLAY_TIMEOUT:
1652 if ((x->replay.seq == x->preplay.seq) &&
1653 (x->replay.bitmap == x->preplay.bitmap) &&
1654 (x->replay.oseq == x->preplay.oseq)) {
1655 x->xflags |= XFRM_TIME_DEFER;
1656 return;
1657 }
1658
1659 break;
1660 }
1661
1662 memcpy(&x->preplay, &x->replay, sizeof(struct xfrm_replay_state));
1663 c.event = XFRM_MSG_NEWAE;
1664 c.data.aevent = event;
1665 km_state_notify(x, &c);
1666
1667 if (x->replay_maxage &&
1668 !mod_timer(&x->rtimer, jiffies + x->replay_maxage))
1669 x->xflags &= ~XFRM_TIME_DEFER;
1670 }
1671
1672 static void xfrm_replay_timer_handler(unsigned long data)
1673 {
1674 struct xfrm_state *x = (struct xfrm_state*)data;
1675
1676 spin_lock(&x->lock);
1677
1678 if (x->km.state == XFRM_STATE_VALID) {
1679 if (xfrm_aevent_is_on())
1680 xfrm_replay_notify(x, XFRM_REPLAY_TIMEOUT);
1681 else
1682 x->xflags |= XFRM_TIME_DEFER;
1683 }
1684
1685 spin_unlock(&x->lock);
1686 }
1687
1688 int xfrm_replay_check(struct xfrm_state *x,
1689 struct sk_buff *skb, __be32 net_seq)
1690 {
1691 u32 diff;
1692 u32 seq = ntohl(net_seq);
1693
1694 if (unlikely(seq == 0))
1695 goto err;
1696
1697 if (likely(seq > x->replay.seq))
1698 return 0;
1699
1700 diff = x->replay.seq - seq;
1701 if (diff >= min_t(unsigned int, x->props.replay_window,
1702 sizeof(x->replay.bitmap) * 8)) {
1703 x->stats.replay_window++;
1704 goto err;
1705 }
1706
1707 if (x->replay.bitmap & (1U << diff)) {
1708 x->stats.replay++;
1709 goto err;
1710 }
1711 return 0;
1712
1713 err:
1714 xfrm_audit_state_replay(x, skb, net_seq);
1715 return -EINVAL;
1716 }
1717
1718 void xfrm_replay_advance(struct xfrm_state *x, __be32 net_seq)
1719 {
1720 u32 diff;
1721 u32 seq = ntohl(net_seq);
1722
1723 if (seq > x->replay.seq) {
1724 diff = seq - x->replay.seq;
1725 if (diff < x->props.replay_window)
1726 x->replay.bitmap = ((x->replay.bitmap) << diff) | 1;
1727 else
1728 x->replay.bitmap = 1;
1729 x->replay.seq = seq;
1730 } else {
1731 diff = x->replay.seq - seq;
1732 x->replay.bitmap |= (1U << diff);
1733 }
1734
1735 if (xfrm_aevent_is_on())
1736 xfrm_replay_notify(x, XFRM_REPLAY_UPDATE);
1737 }
1738
1739 static LIST_HEAD(xfrm_km_list);
1740 static DEFINE_RWLOCK(xfrm_km_lock);
1741
1742 void km_policy_notify(struct xfrm_policy *xp, int dir, struct km_event *c)
1743 {
1744 struct xfrm_mgr *km;
1745
1746 read_lock(&xfrm_km_lock);
1747 list_for_each_entry(km, &xfrm_km_list, list)
1748 if (km->notify_policy)
1749 km->notify_policy(xp, dir, c);
1750 read_unlock(&xfrm_km_lock);
1751 }
1752
1753 void km_state_notify(struct xfrm_state *x, struct km_event *c)
1754 {
1755 struct xfrm_mgr *km;
1756 read_lock(&xfrm_km_lock);
1757 list_for_each_entry(km, &xfrm_km_list, list)
1758 if (km->notify)
1759 km->notify(x, c);
1760 read_unlock(&xfrm_km_lock);
1761 }
1762
1763 EXPORT_SYMBOL(km_policy_notify);
1764 EXPORT_SYMBOL(km_state_notify);
1765
1766 void km_state_expired(struct xfrm_state *x, int hard, u32 pid)
1767 {
1768 struct km_event c;
1769
1770 c.data.hard = hard;
1771 c.pid = pid;
1772 c.event = XFRM_MSG_EXPIRE;
1773 km_state_notify(x, &c);
1774
1775 if (hard)
1776 wake_up(&km_waitq);
1777 }
1778
1779 EXPORT_SYMBOL(km_state_expired);
1780 /*
1781 * We send to all registered managers regardless of failure
1782 * We are happy with one success
1783 */
1784 int km_query(struct xfrm_state *x, struct xfrm_tmpl *t, struct xfrm_policy *pol)
1785 {
1786 int err = -EINVAL, acqret;
1787 struct xfrm_mgr *km;
1788
1789 read_lock(&xfrm_km_lock);
1790 list_for_each_entry(km, &xfrm_km_list, list) {
1791 acqret = km->acquire(x, t, pol, XFRM_POLICY_OUT);
1792 if (!acqret)
1793 err = acqret;
1794 }
1795 read_unlock(&xfrm_km_lock);
1796 return err;
1797 }
1798 EXPORT_SYMBOL(km_query);
1799
1800 int km_new_mapping(struct xfrm_state *x, xfrm_address_t *ipaddr, __be16 sport)
1801 {
1802 int err = -EINVAL;
1803 struct xfrm_mgr *km;
1804
1805 read_lock(&xfrm_km_lock);
1806 list_for_each_entry(km, &xfrm_km_list, list) {
1807 if (km->new_mapping)
1808 err = km->new_mapping(x, ipaddr, sport);
1809 if (!err)
1810 break;
1811 }
1812 read_unlock(&xfrm_km_lock);
1813 return err;
1814 }
1815 EXPORT_SYMBOL(km_new_mapping);
1816
1817 void km_policy_expired(struct xfrm_policy *pol, int dir, int hard, u32 pid)
1818 {
1819 struct km_event c;
1820
1821 c.data.hard = hard;
1822 c.pid = pid;
1823 c.event = XFRM_MSG_POLEXPIRE;
1824 km_policy_notify(pol, dir, &c);
1825
1826 if (hard)
1827 wake_up(&km_waitq);
1828 }
1829 EXPORT_SYMBOL(km_policy_expired);
1830
1831 #ifdef CONFIG_XFRM_MIGRATE
1832 int km_migrate(struct xfrm_selector *sel, u8 dir, u8 type,
1833 struct xfrm_migrate *m, int num_migrate)
1834 {
1835 int err = -EINVAL;
1836 int ret;
1837 struct xfrm_mgr *km;
1838
1839 read_lock(&xfrm_km_lock);
1840 list_for_each_entry(km, &xfrm_km_list, list) {
1841 if (km->migrate) {
1842 ret = km->migrate(sel, dir, type, m, num_migrate);
1843 if (!ret)
1844 err = ret;
1845 }
1846 }
1847 read_unlock(&xfrm_km_lock);
1848 return err;
1849 }
1850 EXPORT_SYMBOL(km_migrate);
1851 #endif
1852
1853 int km_report(u8 proto, struct xfrm_selector *sel, xfrm_address_t *addr)
1854 {
1855 int err = -EINVAL;
1856 int ret;
1857 struct xfrm_mgr *km;
1858
1859 read_lock(&xfrm_km_lock);
1860 list_for_each_entry(km, &xfrm_km_list, list) {
1861 if (km->report) {
1862 ret = km->report(proto, sel, addr);
1863 if (!ret)
1864 err = ret;
1865 }
1866 }
1867 read_unlock(&xfrm_km_lock);
1868 return err;
1869 }
1870 EXPORT_SYMBOL(km_report);
1871
1872 int xfrm_user_policy(struct sock *sk, int optname, u8 __user *optval, int optlen)
1873 {
1874 int err;
1875 u8 *data;
1876 struct xfrm_mgr *km;
1877 struct xfrm_policy *pol = NULL;
1878
1879 if (optlen <= 0 || optlen > PAGE_SIZE)
1880 return -EMSGSIZE;
1881
1882 data = kmalloc(optlen, GFP_KERNEL);
1883 if (!data)
1884 return -ENOMEM;
1885
1886 err = -EFAULT;
1887 if (copy_from_user(data, optval, optlen))
1888 goto out;
1889
1890 err = -EINVAL;
1891 read_lock(&xfrm_km_lock);
1892 list_for_each_entry(km, &xfrm_km_list, list) {
1893 pol = km->compile_policy(sk, optname, data,
1894 optlen, &err);
1895 if (err >= 0)
1896 break;
1897 }
1898 read_unlock(&xfrm_km_lock);
1899
1900 if (err >= 0) {
1901 xfrm_sk_policy_insert(sk, err, pol);
1902 xfrm_pol_put(pol);
1903 err = 0;
1904 }
1905
1906 out:
1907 kfree(data);
1908 return err;
1909 }
1910 EXPORT_SYMBOL(xfrm_user_policy);
1911
1912 int xfrm_register_km(struct xfrm_mgr *km)
1913 {
1914 write_lock_bh(&xfrm_km_lock);
1915 list_add_tail(&km->list, &xfrm_km_list);
1916 write_unlock_bh(&xfrm_km_lock);
1917 return 0;
1918 }
1919 EXPORT_SYMBOL(xfrm_register_km);
1920
1921 int xfrm_unregister_km(struct xfrm_mgr *km)
1922 {
1923 write_lock_bh(&xfrm_km_lock);
1924 list_del(&km->list);
1925 write_unlock_bh(&xfrm_km_lock);
1926 return 0;
1927 }
1928 EXPORT_SYMBOL(xfrm_unregister_km);
1929
1930 int xfrm_state_register_afinfo(struct xfrm_state_afinfo *afinfo)
1931 {
1932 int err = 0;
1933 if (unlikely(afinfo == NULL))
1934 return -EINVAL;
1935 if (unlikely(afinfo->family >= NPROTO))
1936 return -EAFNOSUPPORT;
1937 write_lock_bh(&xfrm_state_afinfo_lock);
1938 if (unlikely(xfrm_state_afinfo[afinfo->family] != NULL))
1939 err = -ENOBUFS;
1940 else
1941 xfrm_state_afinfo[afinfo->family] = afinfo;
1942 write_unlock_bh(&xfrm_state_afinfo_lock);
1943 return err;
1944 }
1945 EXPORT_SYMBOL(xfrm_state_register_afinfo);
1946
1947 int xfrm_state_unregister_afinfo(struct xfrm_state_afinfo *afinfo)
1948 {
1949 int err = 0;
1950 if (unlikely(afinfo == NULL))
1951 return -EINVAL;
1952 if (unlikely(afinfo->family >= NPROTO))
1953 return -EAFNOSUPPORT;
1954 write_lock_bh(&xfrm_state_afinfo_lock);
1955 if (likely(xfrm_state_afinfo[afinfo->family] != NULL)) {
1956 if (unlikely(xfrm_state_afinfo[afinfo->family] != afinfo))
1957 err = -EINVAL;
1958 else
1959 xfrm_state_afinfo[afinfo->family] = NULL;
1960 }
1961 write_unlock_bh(&xfrm_state_afinfo_lock);
1962 return err;
1963 }
1964 EXPORT_SYMBOL(xfrm_state_unregister_afinfo);
1965
1966 static struct xfrm_state_afinfo *xfrm_state_get_afinfo(unsigned int family)
1967 {
1968 struct xfrm_state_afinfo *afinfo;
1969 if (unlikely(family >= NPROTO))
1970 return NULL;
1971 read_lock(&xfrm_state_afinfo_lock);
1972 afinfo = xfrm_state_afinfo[family];
1973 if (unlikely(!afinfo))
1974 read_unlock(&xfrm_state_afinfo_lock);
1975 return afinfo;
1976 }
1977
1978 static void xfrm_state_put_afinfo(struct xfrm_state_afinfo *afinfo)
1979 __releases(xfrm_state_afinfo_lock)
1980 {
1981 read_unlock(&xfrm_state_afinfo_lock);
1982 }
1983
1984 /* Temporarily located here until net/xfrm/xfrm_tunnel.c is created */
1985 void xfrm_state_delete_tunnel(struct xfrm_state *x)
1986 {
1987 if (x->tunnel) {
1988 struct xfrm_state *t = x->tunnel;
1989
1990 if (atomic_read(&t->tunnel_users) == 2)
1991 xfrm_state_delete(t);
1992 atomic_dec(&t->tunnel_users);
1993 xfrm_state_put(t);
1994 x->tunnel = NULL;
1995 }
1996 }
1997 EXPORT_SYMBOL(xfrm_state_delete_tunnel);
1998
1999 int xfrm_state_mtu(struct xfrm_state *x, int mtu)
2000 {
2001 int res;
2002
2003 spin_lock_bh(&x->lock);
2004 if (x->km.state == XFRM_STATE_VALID &&
2005 x->type && x->type->get_mtu)
2006 res = x->type->get_mtu(x, mtu);
2007 else
2008 res = mtu - x->props.header_len;
2009 spin_unlock_bh(&x->lock);
2010 return res;
2011 }
2012
2013 int xfrm_init_state(struct xfrm_state *x)
2014 {
2015 struct xfrm_state_afinfo *afinfo;
2016 struct xfrm_mode *inner_mode;
2017 int family = x->props.family;
2018 int err;
2019
2020 err = -EAFNOSUPPORT;
2021 afinfo = xfrm_state_get_afinfo(family);
2022 if (!afinfo)
2023 goto error;
2024
2025 err = 0;
2026 if (afinfo->init_flags)
2027 err = afinfo->init_flags(x);
2028
2029 xfrm_state_put_afinfo(afinfo);
2030
2031 if (err)
2032 goto error;
2033
2034 err = -EPROTONOSUPPORT;
2035
2036 if (x->sel.family != AF_UNSPEC) {
2037 inner_mode = xfrm_get_mode(x->props.mode, x->sel.family);
2038 if (inner_mode == NULL)
2039 goto error;
2040
2041 if (!(inner_mode->flags & XFRM_MODE_FLAG_TUNNEL) &&
2042 family != x->sel.family) {
2043 xfrm_put_mode(inner_mode);
2044 goto error;
2045 }
2046
2047 x->inner_mode = inner_mode;
2048 } else {
2049 struct xfrm_mode *inner_mode_iaf;
2050
2051 inner_mode = xfrm_get_mode(x->props.mode, AF_INET);
2052 if (inner_mode == NULL)
2053 goto error;
2054
2055 if (!(inner_mode->flags & XFRM_MODE_FLAG_TUNNEL)) {
2056 xfrm_put_mode(inner_mode);
2057 goto error;
2058 }
2059
2060 inner_mode_iaf = xfrm_get_mode(x->props.mode, AF_INET6);
2061 if (inner_mode_iaf == NULL)
2062 goto error;
2063
2064 if (!(inner_mode_iaf->flags & XFRM_MODE_FLAG_TUNNEL)) {
2065 xfrm_put_mode(inner_mode_iaf);
2066 goto error;
2067 }
2068
2069 if (x->props.family == AF_INET) {
2070 x->inner_mode = inner_mode;
2071 x->inner_mode_iaf = inner_mode_iaf;
2072 } else {
2073 x->inner_mode = inner_mode_iaf;
2074 x->inner_mode_iaf = inner_mode;
2075 }
2076 }
2077
2078 x->type = xfrm_get_type(x->id.proto, family);
2079 if (x->type == NULL)
2080 goto error;
2081
2082 err = x->type->init_state(x);
2083 if (err)
2084 goto error;
2085
2086 x->outer_mode = xfrm_get_mode(x->props.mode, family);
2087 if (x->outer_mode == NULL)
2088 goto error;
2089
2090 x->km.state = XFRM_STATE_VALID;
2091
2092 error:
2093 return err;
2094 }
2095
2096 EXPORT_SYMBOL(xfrm_init_state);
2097
2098 void __init xfrm_state_init(void)
2099 {
2100 unsigned int sz;
2101
2102 sz = sizeof(struct hlist_head) * 8;
2103
2104 xfrm_state_bydst = xfrm_hash_alloc(sz);
2105 xfrm_state_bysrc = xfrm_hash_alloc(sz);
2106 xfrm_state_byspi = xfrm_hash_alloc(sz);
2107 if (!xfrm_state_bydst || !xfrm_state_bysrc || !xfrm_state_byspi)
2108 panic("XFRM: Cannot allocate bydst/bysrc/byspi hashes.");
2109 xfrm_state_hmask = ((sz / sizeof(struct hlist_head)) - 1);
2110
2111 INIT_WORK(&xfrm_state_gc_work, xfrm_state_gc_task);
2112 }
2113
2114 #ifdef CONFIG_AUDITSYSCALL
2115 static void xfrm_audit_helper_sainfo(struct xfrm_state *x,
2116 struct audit_buffer *audit_buf)
2117 {
2118 struct xfrm_sec_ctx *ctx = x->security;
2119 u32 spi = ntohl(x->id.spi);
2120
2121 if (ctx)
2122 audit_log_format(audit_buf, " sec_alg=%u sec_doi=%u sec_obj=%s",
2123 ctx->ctx_alg, ctx->ctx_doi, ctx->ctx_str);
2124
2125 switch(x->props.family) {
2126 case AF_INET:
2127 audit_log_format(audit_buf,
2128 " src=" NIPQUAD_FMT " dst=" NIPQUAD_FMT,
2129 NIPQUAD(x->props.saddr.a4),
2130 NIPQUAD(x->id.daddr.a4));
2131 break;
2132 case AF_INET6:
2133 audit_log_format(audit_buf,
2134 " src=" NIP6_FMT " dst=" NIP6_FMT,
2135 NIP6(*(struct in6_addr *)x->props.saddr.a6),
2136 NIP6(*(struct in6_addr *)x->id.daddr.a6));
2137 break;
2138 }
2139
2140 audit_log_format(audit_buf, " spi=%u(0x%x)", spi, spi);
2141 }
2142
2143 static void xfrm_audit_helper_pktinfo(struct sk_buff *skb, u16 family,
2144 struct audit_buffer *audit_buf)
2145 {
2146 struct iphdr *iph4;
2147 struct ipv6hdr *iph6;
2148
2149 switch (family) {
2150 case AF_INET:
2151 iph4 = ip_hdr(skb);
2152 audit_log_format(audit_buf,
2153 " src=" NIPQUAD_FMT " dst=" NIPQUAD_FMT,
2154 NIPQUAD(iph4->saddr),
2155 NIPQUAD(iph4->daddr));
2156 break;
2157 case AF_INET6:
2158 iph6 = ipv6_hdr(skb);
2159 audit_log_format(audit_buf,
2160 " src=" NIP6_FMT " dst=" NIP6_FMT
2161 " flowlbl=0x%x%02x%02x",
2162 NIP6(iph6->saddr),
2163 NIP6(iph6->daddr),
2164 iph6->flow_lbl[0] & 0x0f,
2165 iph6->flow_lbl[1],
2166 iph6->flow_lbl[2]);
2167 break;
2168 }
2169 }
2170
2171 void xfrm_audit_state_add(struct xfrm_state *x, int result,
2172 uid_t auid, u32 sessionid, u32 secid)
2173 {
2174 struct audit_buffer *audit_buf;
2175
2176 audit_buf = xfrm_audit_start("SAD-add");
2177 if (audit_buf == NULL)
2178 return;
2179 xfrm_audit_helper_usrinfo(auid, sessionid, secid, audit_buf);
2180 xfrm_audit_helper_sainfo(x, audit_buf);
2181 audit_log_format(audit_buf, " res=%u", result);
2182 audit_log_end(audit_buf);
2183 }
2184 EXPORT_SYMBOL_GPL(xfrm_audit_state_add);
2185
2186 void xfrm_audit_state_delete(struct xfrm_state *x, int result,
2187 uid_t auid, u32 sessionid, u32 secid)
2188 {
2189 struct audit_buffer *audit_buf;
2190
2191 audit_buf = xfrm_audit_start("SAD-delete");
2192 if (audit_buf == NULL)
2193 return;
2194 xfrm_audit_helper_usrinfo(auid, sessionid, secid, audit_buf);
2195 xfrm_audit_helper_sainfo(x, audit_buf);
2196 audit_log_format(audit_buf, " res=%u", result);
2197 audit_log_end(audit_buf);
2198 }
2199 EXPORT_SYMBOL_GPL(xfrm_audit_state_delete);
2200
2201 void xfrm_audit_state_replay_overflow(struct xfrm_state *x,
2202 struct sk_buff *skb)
2203 {
2204 struct audit_buffer *audit_buf;
2205 u32 spi;
2206
2207 audit_buf = xfrm_audit_start("SA-replay-overflow");
2208 if (audit_buf == NULL)
2209 return;
2210 xfrm_audit_helper_pktinfo(skb, x->props.family, audit_buf);
2211 /* don't record the sequence number because it's inherent in this kind
2212 * of audit message */
2213 spi = ntohl(x->id.spi);
2214 audit_log_format(audit_buf, " spi=%u(0x%x)", spi, spi);
2215 audit_log_end(audit_buf);
2216 }
2217 EXPORT_SYMBOL_GPL(xfrm_audit_state_replay_overflow);
2218
2219 static void xfrm_audit_state_replay(struct xfrm_state *x,
2220 struct sk_buff *skb, __be32 net_seq)
2221 {
2222 struct audit_buffer *audit_buf;
2223 u32 spi;
2224
2225 audit_buf = xfrm_audit_start("SA-replayed-pkt");
2226 if (audit_buf == NULL)
2227 return;
2228 xfrm_audit_helper_pktinfo(skb, x->props.family, audit_buf);
2229 spi = ntohl(x->id.spi);
2230 audit_log_format(audit_buf, " spi=%u(0x%x) seqno=%u",
2231 spi, spi, ntohl(net_seq));
2232 audit_log_end(audit_buf);
2233 }
2234
2235 void xfrm_audit_state_notfound_simple(struct sk_buff *skb, u16 family)
2236 {
2237 struct audit_buffer *audit_buf;
2238
2239 audit_buf = xfrm_audit_start("SA-notfound");
2240 if (audit_buf == NULL)
2241 return;
2242 xfrm_audit_helper_pktinfo(skb, family, audit_buf);
2243 audit_log_end(audit_buf);
2244 }
2245 EXPORT_SYMBOL_GPL(xfrm_audit_state_notfound_simple);
2246
2247 void xfrm_audit_state_notfound(struct sk_buff *skb, u16 family,
2248 __be32 net_spi, __be32 net_seq)
2249 {
2250 struct audit_buffer *audit_buf;
2251 u32 spi;
2252
2253 audit_buf = xfrm_audit_start("SA-notfound");
2254 if (audit_buf == NULL)
2255 return;
2256 xfrm_audit_helper_pktinfo(skb, family, audit_buf);
2257 spi = ntohl(net_spi);
2258 audit_log_format(audit_buf, " spi=%u(0x%x) seqno=%u",
2259 spi, spi, ntohl(net_seq));
2260 audit_log_end(audit_buf);
2261 }
2262 EXPORT_SYMBOL_GPL(xfrm_audit_state_notfound);
2263
2264 void xfrm_audit_state_icvfail(struct xfrm_state *x,
2265 struct sk_buff *skb, u8 proto)
2266 {
2267 struct audit_buffer *audit_buf;
2268 __be32 net_spi;
2269 __be32 net_seq;
2270
2271 audit_buf = xfrm_audit_start("SA-icv-failure");
2272 if (audit_buf == NULL)
2273 return;
2274 xfrm_audit_helper_pktinfo(skb, x->props.family, audit_buf);
2275 if (xfrm_parse_spi(skb, proto, &net_spi, &net_seq) == 0) {
2276 u32 spi = ntohl(net_spi);
2277 audit_log_format(audit_buf, " spi=%u(0x%x) seqno=%u",
2278 spi, spi, ntohl(net_seq));
2279 }
2280 audit_log_end(audit_buf);
2281 }
2282 EXPORT_SYMBOL_GPL(xfrm_audit_state_icvfail);
2283 #endif /* CONFIG_AUDITSYSCALL */
This page took 0.075531 seconds and 4 git commands to generate.