aaafcee02fc596b39d8556e0ed793225fe57b733
[deliverable/linux.git] / net / xfrm / xfrm_state.c
1 /*
2 * xfrm_state.c
3 *
4 * Changes:
5 * Mitsuru KANDA @USAGI
6 * Kazunori MIYAZAWA @USAGI
7 * Kunihiro Ishiguro <kunihiro@ipinfusion.com>
8 * IPv6 support
9 * YOSHIFUJI Hideaki @USAGI
10 * Split up af-specific functions
11 * Derek Atkins <derek@ihtfp.com>
12 * Add UDP Encapsulation
13 *
14 */
15
16 #include <linux/workqueue.h>
17 #include <net/xfrm.h>
18 #include <linux/pfkeyv2.h>
19 #include <linux/ipsec.h>
20 #include <linux/module.h>
21 #include <linux/cache.h>
22 #include <linux/audit.h>
23 #include <asm/uaccess.h>
24
25 #include "xfrm_hash.h"
26
27 struct sock *xfrm_nl;
28 EXPORT_SYMBOL(xfrm_nl);
29
30 u32 sysctl_xfrm_aevent_etime __read_mostly = XFRM_AE_ETIME;
31 EXPORT_SYMBOL(sysctl_xfrm_aevent_etime);
32
33 u32 sysctl_xfrm_aevent_rseqth __read_mostly = XFRM_AE_SEQT_SIZE;
34 EXPORT_SYMBOL(sysctl_xfrm_aevent_rseqth);
35
36 u32 sysctl_xfrm_acq_expires __read_mostly = 30;
37
38 /* Each xfrm_state may be linked to two tables:
39
40 1. Hash table by (spi,daddr,ah/esp) to find SA by SPI. (input,ctl)
41 2. Hash table by (daddr,family,reqid) to find what SAs exist for given
42 destination/tunnel endpoint. (output)
43 */
44
45 static DEFINE_SPINLOCK(xfrm_state_lock);
46
47 /* Hash table to find appropriate SA towards given target (endpoint
48 * of tunnel or destination of transport mode) allowed by selector.
49 *
50 * Main use is finding SA after policy selected tunnel or transport mode.
51 * Also, it can be used by ah/esp icmp error handler to find offending SA.
52 */
53 static LIST_HEAD(xfrm_state_all);
54 static struct hlist_head *xfrm_state_bydst __read_mostly;
55 static struct hlist_head *xfrm_state_bysrc __read_mostly;
56 static struct hlist_head *xfrm_state_byspi __read_mostly;
57 static unsigned int xfrm_state_hmask __read_mostly;
58 static unsigned int xfrm_state_hashmax __read_mostly = 1 * 1024 * 1024;
59 static unsigned int xfrm_state_num;
60 static unsigned int xfrm_state_genid;
61
62 /* Counter indicating ongoing walk, protected by xfrm_state_lock. */
63 static unsigned long xfrm_state_walk_ongoing;
64 /* Counter indicating walk completion, protected by xfrm_cfg_mutex. */
65 static unsigned long xfrm_state_walk_completed;
66
67 static struct xfrm_state_afinfo *xfrm_state_get_afinfo(unsigned int family);
68 static void xfrm_state_put_afinfo(struct xfrm_state_afinfo *afinfo);
69
70 #ifdef CONFIG_AUDITSYSCALL
71 static void xfrm_audit_state_replay(struct xfrm_state *x,
72 struct sk_buff *skb, __be32 net_seq);
73 #else
74 #define xfrm_audit_state_replay(x, s, sq) do { ; } while (0)
75 #endif /* CONFIG_AUDITSYSCALL */
76
77 static inline unsigned int xfrm_dst_hash(xfrm_address_t *daddr,
78 xfrm_address_t *saddr,
79 u32 reqid,
80 unsigned short family)
81 {
82 return __xfrm_dst_hash(daddr, saddr, reqid, family, xfrm_state_hmask);
83 }
84
85 static inline unsigned int xfrm_src_hash(xfrm_address_t *daddr,
86 xfrm_address_t *saddr,
87 unsigned short family)
88 {
89 return __xfrm_src_hash(daddr, saddr, family, xfrm_state_hmask);
90 }
91
92 static inline unsigned int
93 xfrm_spi_hash(xfrm_address_t *daddr, __be32 spi, u8 proto, unsigned short family)
94 {
95 return __xfrm_spi_hash(daddr, spi, proto, family, xfrm_state_hmask);
96 }
97
98 static void xfrm_hash_transfer(struct hlist_head *list,
99 struct hlist_head *ndsttable,
100 struct hlist_head *nsrctable,
101 struct hlist_head *nspitable,
102 unsigned int nhashmask)
103 {
104 struct hlist_node *entry, *tmp;
105 struct xfrm_state *x;
106
107 hlist_for_each_entry_safe(x, entry, tmp, list, bydst) {
108 unsigned int h;
109
110 h = __xfrm_dst_hash(&x->id.daddr, &x->props.saddr,
111 x->props.reqid, x->props.family,
112 nhashmask);
113 hlist_add_head(&x->bydst, ndsttable+h);
114
115 h = __xfrm_src_hash(&x->id.daddr, &x->props.saddr,
116 x->props.family,
117 nhashmask);
118 hlist_add_head(&x->bysrc, nsrctable+h);
119
120 if (x->id.spi) {
121 h = __xfrm_spi_hash(&x->id.daddr, x->id.spi,
122 x->id.proto, x->props.family,
123 nhashmask);
124 hlist_add_head(&x->byspi, nspitable+h);
125 }
126 }
127 }
128
129 static unsigned long xfrm_hash_new_size(void)
130 {
131 return ((xfrm_state_hmask + 1) << 1) *
132 sizeof(struct hlist_head);
133 }
134
135 static DEFINE_MUTEX(hash_resize_mutex);
136
137 static void xfrm_hash_resize(struct work_struct *__unused)
138 {
139 struct hlist_head *ndst, *nsrc, *nspi, *odst, *osrc, *ospi;
140 unsigned long nsize, osize;
141 unsigned int nhashmask, ohashmask;
142 int i;
143
144 mutex_lock(&hash_resize_mutex);
145
146 nsize = xfrm_hash_new_size();
147 ndst = xfrm_hash_alloc(nsize);
148 if (!ndst)
149 goto out_unlock;
150 nsrc = xfrm_hash_alloc(nsize);
151 if (!nsrc) {
152 xfrm_hash_free(ndst, nsize);
153 goto out_unlock;
154 }
155 nspi = xfrm_hash_alloc(nsize);
156 if (!nspi) {
157 xfrm_hash_free(ndst, nsize);
158 xfrm_hash_free(nsrc, nsize);
159 goto out_unlock;
160 }
161
162 spin_lock_bh(&xfrm_state_lock);
163
164 nhashmask = (nsize / sizeof(struct hlist_head)) - 1U;
165 for (i = xfrm_state_hmask; i >= 0; i--)
166 xfrm_hash_transfer(xfrm_state_bydst+i, ndst, nsrc, nspi,
167 nhashmask);
168
169 odst = xfrm_state_bydst;
170 osrc = xfrm_state_bysrc;
171 ospi = xfrm_state_byspi;
172 ohashmask = xfrm_state_hmask;
173
174 xfrm_state_bydst = ndst;
175 xfrm_state_bysrc = nsrc;
176 xfrm_state_byspi = nspi;
177 xfrm_state_hmask = nhashmask;
178
179 spin_unlock_bh(&xfrm_state_lock);
180
181 osize = (ohashmask + 1) * sizeof(struct hlist_head);
182 xfrm_hash_free(odst, osize);
183 xfrm_hash_free(osrc, osize);
184 xfrm_hash_free(ospi, osize);
185
186 out_unlock:
187 mutex_unlock(&hash_resize_mutex);
188 }
189
190 static DECLARE_WORK(xfrm_hash_work, xfrm_hash_resize);
191
192 DECLARE_WAIT_QUEUE_HEAD(km_waitq);
193 EXPORT_SYMBOL(km_waitq);
194
195 static DEFINE_RWLOCK(xfrm_state_afinfo_lock);
196 static struct xfrm_state_afinfo *xfrm_state_afinfo[NPROTO];
197
198 static struct work_struct xfrm_state_gc_work;
199 static LIST_HEAD(xfrm_state_gc_leftovers);
200 static LIST_HEAD(xfrm_state_gc_list);
201 static DEFINE_SPINLOCK(xfrm_state_gc_lock);
202
203 int __xfrm_state_delete(struct xfrm_state *x);
204
205 int km_query(struct xfrm_state *x, struct xfrm_tmpl *t, struct xfrm_policy *pol);
206 void km_state_expired(struct xfrm_state *x, int hard, u32 pid);
207
208 static struct xfrm_state_afinfo *xfrm_state_lock_afinfo(unsigned int family)
209 {
210 struct xfrm_state_afinfo *afinfo;
211 if (unlikely(family >= NPROTO))
212 return NULL;
213 write_lock_bh(&xfrm_state_afinfo_lock);
214 afinfo = xfrm_state_afinfo[family];
215 if (unlikely(!afinfo))
216 write_unlock_bh(&xfrm_state_afinfo_lock);
217 return afinfo;
218 }
219
220 static void xfrm_state_unlock_afinfo(struct xfrm_state_afinfo *afinfo)
221 __releases(xfrm_state_afinfo_lock)
222 {
223 write_unlock_bh(&xfrm_state_afinfo_lock);
224 }
225
226 int xfrm_register_type(const struct xfrm_type *type, unsigned short family)
227 {
228 struct xfrm_state_afinfo *afinfo = xfrm_state_lock_afinfo(family);
229 const struct xfrm_type **typemap;
230 int err = 0;
231
232 if (unlikely(afinfo == NULL))
233 return -EAFNOSUPPORT;
234 typemap = afinfo->type_map;
235
236 if (likely(typemap[type->proto] == NULL))
237 typemap[type->proto] = type;
238 else
239 err = -EEXIST;
240 xfrm_state_unlock_afinfo(afinfo);
241 return err;
242 }
243 EXPORT_SYMBOL(xfrm_register_type);
244
245 int xfrm_unregister_type(const struct xfrm_type *type, unsigned short family)
246 {
247 struct xfrm_state_afinfo *afinfo = xfrm_state_lock_afinfo(family);
248 const struct xfrm_type **typemap;
249 int err = 0;
250
251 if (unlikely(afinfo == NULL))
252 return -EAFNOSUPPORT;
253 typemap = afinfo->type_map;
254
255 if (unlikely(typemap[type->proto] != type))
256 err = -ENOENT;
257 else
258 typemap[type->proto] = NULL;
259 xfrm_state_unlock_afinfo(afinfo);
260 return err;
261 }
262 EXPORT_SYMBOL(xfrm_unregister_type);
263
264 static const struct xfrm_type *xfrm_get_type(u8 proto, unsigned short family)
265 {
266 struct xfrm_state_afinfo *afinfo;
267 const struct xfrm_type **typemap;
268 const struct xfrm_type *type;
269 int modload_attempted = 0;
270
271 retry:
272 afinfo = xfrm_state_get_afinfo(family);
273 if (unlikely(afinfo == NULL))
274 return NULL;
275 typemap = afinfo->type_map;
276
277 type = typemap[proto];
278 if (unlikely(type && !try_module_get(type->owner)))
279 type = NULL;
280 if (!type && !modload_attempted) {
281 xfrm_state_put_afinfo(afinfo);
282 request_module("xfrm-type-%d-%d", family, proto);
283 modload_attempted = 1;
284 goto retry;
285 }
286
287 xfrm_state_put_afinfo(afinfo);
288 return type;
289 }
290
291 static void xfrm_put_type(const struct xfrm_type *type)
292 {
293 module_put(type->owner);
294 }
295
296 int xfrm_register_mode(struct xfrm_mode *mode, int family)
297 {
298 struct xfrm_state_afinfo *afinfo;
299 struct xfrm_mode **modemap;
300 int err;
301
302 if (unlikely(mode->encap >= XFRM_MODE_MAX))
303 return -EINVAL;
304
305 afinfo = xfrm_state_lock_afinfo(family);
306 if (unlikely(afinfo == NULL))
307 return -EAFNOSUPPORT;
308
309 err = -EEXIST;
310 modemap = afinfo->mode_map;
311 if (modemap[mode->encap])
312 goto out;
313
314 err = -ENOENT;
315 if (!try_module_get(afinfo->owner))
316 goto out;
317
318 mode->afinfo = afinfo;
319 modemap[mode->encap] = mode;
320 err = 0;
321
322 out:
323 xfrm_state_unlock_afinfo(afinfo);
324 return err;
325 }
326 EXPORT_SYMBOL(xfrm_register_mode);
327
328 int xfrm_unregister_mode(struct xfrm_mode *mode, int family)
329 {
330 struct xfrm_state_afinfo *afinfo;
331 struct xfrm_mode **modemap;
332 int err;
333
334 if (unlikely(mode->encap >= XFRM_MODE_MAX))
335 return -EINVAL;
336
337 afinfo = xfrm_state_lock_afinfo(family);
338 if (unlikely(afinfo == NULL))
339 return -EAFNOSUPPORT;
340
341 err = -ENOENT;
342 modemap = afinfo->mode_map;
343 if (likely(modemap[mode->encap] == mode)) {
344 modemap[mode->encap] = NULL;
345 module_put(mode->afinfo->owner);
346 err = 0;
347 }
348
349 xfrm_state_unlock_afinfo(afinfo);
350 return err;
351 }
352 EXPORT_SYMBOL(xfrm_unregister_mode);
353
354 static struct xfrm_mode *xfrm_get_mode(unsigned int encap, int family)
355 {
356 struct xfrm_state_afinfo *afinfo;
357 struct xfrm_mode *mode;
358 int modload_attempted = 0;
359
360 if (unlikely(encap >= XFRM_MODE_MAX))
361 return NULL;
362
363 retry:
364 afinfo = xfrm_state_get_afinfo(family);
365 if (unlikely(afinfo == NULL))
366 return NULL;
367
368 mode = afinfo->mode_map[encap];
369 if (unlikely(mode && !try_module_get(mode->owner)))
370 mode = NULL;
371 if (!mode && !modload_attempted) {
372 xfrm_state_put_afinfo(afinfo);
373 request_module("xfrm-mode-%d-%d", family, encap);
374 modload_attempted = 1;
375 goto retry;
376 }
377
378 xfrm_state_put_afinfo(afinfo);
379 return mode;
380 }
381
382 static void xfrm_put_mode(struct xfrm_mode *mode)
383 {
384 module_put(mode->owner);
385 }
386
387 static void xfrm_state_gc_destroy(struct xfrm_state *x)
388 {
389 del_timer_sync(&x->timer);
390 del_timer_sync(&x->rtimer);
391 kfree(x->aalg);
392 kfree(x->ealg);
393 kfree(x->calg);
394 kfree(x->encap);
395 kfree(x->coaddr);
396 if (x->inner_mode)
397 xfrm_put_mode(x->inner_mode);
398 if (x->inner_mode_iaf)
399 xfrm_put_mode(x->inner_mode_iaf);
400 if (x->outer_mode)
401 xfrm_put_mode(x->outer_mode);
402 if (x->type) {
403 x->type->destructor(x);
404 xfrm_put_type(x->type);
405 }
406 security_xfrm_state_free(x);
407 kfree(x);
408 }
409
410 static void xfrm_state_gc_task(struct work_struct *data)
411 {
412 struct xfrm_state *x, *tmp;
413 unsigned long completed;
414
415 mutex_lock(&xfrm_cfg_mutex);
416 spin_lock_bh(&xfrm_state_gc_lock);
417 list_splice_tail_init(&xfrm_state_gc_list, &xfrm_state_gc_leftovers);
418 spin_unlock_bh(&xfrm_state_gc_lock);
419
420 completed = xfrm_state_walk_completed;
421 mutex_unlock(&xfrm_cfg_mutex);
422
423 list_for_each_entry_safe(x, tmp, &xfrm_state_gc_leftovers, gclist) {
424 if ((long)(x->lastused - completed) > 0)
425 break;
426 xfrm_state_gc_destroy(x);
427 }
428
429 wake_up(&km_waitq);
430 }
431
432 static inline unsigned long make_jiffies(long secs)
433 {
434 if (secs >= (MAX_SCHEDULE_TIMEOUT-1)/HZ)
435 return MAX_SCHEDULE_TIMEOUT-1;
436 else
437 return secs*HZ;
438 }
439
440 static void xfrm_timer_handler(unsigned long data)
441 {
442 struct xfrm_state *x = (struct xfrm_state*)data;
443 unsigned long now = get_seconds();
444 long next = LONG_MAX;
445 int warn = 0;
446 int err = 0;
447
448 spin_lock(&x->lock);
449 if (x->km.state == XFRM_STATE_DEAD)
450 goto out;
451 if (x->km.state == XFRM_STATE_EXPIRED)
452 goto expired;
453 if (x->lft.hard_add_expires_seconds) {
454 long tmo = x->lft.hard_add_expires_seconds +
455 x->curlft.add_time - now;
456 if (tmo <= 0)
457 goto expired;
458 if (tmo < next)
459 next = tmo;
460 }
461 if (x->lft.hard_use_expires_seconds) {
462 long tmo = x->lft.hard_use_expires_seconds +
463 (x->curlft.use_time ? : now) - now;
464 if (tmo <= 0)
465 goto expired;
466 if (tmo < next)
467 next = tmo;
468 }
469 if (x->km.dying)
470 goto resched;
471 if (x->lft.soft_add_expires_seconds) {
472 long tmo = x->lft.soft_add_expires_seconds +
473 x->curlft.add_time - now;
474 if (tmo <= 0)
475 warn = 1;
476 else if (tmo < next)
477 next = tmo;
478 }
479 if (x->lft.soft_use_expires_seconds) {
480 long tmo = x->lft.soft_use_expires_seconds +
481 (x->curlft.use_time ? : now) - now;
482 if (tmo <= 0)
483 warn = 1;
484 else if (tmo < next)
485 next = tmo;
486 }
487
488 x->km.dying = warn;
489 if (warn)
490 km_state_expired(x, 0, 0);
491 resched:
492 if (next != LONG_MAX)
493 mod_timer(&x->timer, jiffies + make_jiffies(next));
494
495 goto out;
496
497 expired:
498 if (x->km.state == XFRM_STATE_ACQ && x->id.spi == 0) {
499 x->km.state = XFRM_STATE_EXPIRED;
500 wake_up(&km_waitq);
501 next = 2;
502 goto resched;
503 }
504
505 err = __xfrm_state_delete(x);
506 if (!err && x->id.spi)
507 km_state_expired(x, 1, 0);
508
509 xfrm_audit_state_delete(x, err ? 0 : 1,
510 audit_get_loginuid(current),
511 audit_get_sessionid(current), 0);
512
513 out:
514 spin_unlock(&x->lock);
515 }
516
517 static void xfrm_replay_timer_handler(unsigned long data);
518
519 struct xfrm_state *xfrm_state_alloc(void)
520 {
521 struct xfrm_state *x;
522
523 x = kzalloc(sizeof(struct xfrm_state), GFP_ATOMIC);
524
525 if (x) {
526 atomic_set(&x->refcnt, 1);
527 atomic_set(&x->tunnel_users, 0);
528 INIT_LIST_HEAD(&x->all);
529 INIT_HLIST_NODE(&x->bydst);
530 INIT_HLIST_NODE(&x->bysrc);
531 INIT_HLIST_NODE(&x->byspi);
532 setup_timer(&x->timer, xfrm_timer_handler, (unsigned long)x);
533 setup_timer(&x->rtimer, xfrm_replay_timer_handler,
534 (unsigned long)x);
535 x->curlft.add_time = get_seconds();
536 x->lft.soft_byte_limit = XFRM_INF;
537 x->lft.soft_packet_limit = XFRM_INF;
538 x->lft.hard_byte_limit = XFRM_INF;
539 x->lft.hard_packet_limit = XFRM_INF;
540 x->replay_maxage = 0;
541 x->replay_maxdiff = 0;
542 x->inner_mode = NULL;
543 x->inner_mode_iaf = NULL;
544 spin_lock_init(&x->lock);
545 }
546 return x;
547 }
548 EXPORT_SYMBOL(xfrm_state_alloc);
549
550 void __xfrm_state_destroy(struct xfrm_state *x)
551 {
552 WARN_ON(x->km.state != XFRM_STATE_DEAD);
553
554 spin_lock_bh(&xfrm_state_gc_lock);
555 list_add_tail(&x->gclist, &xfrm_state_gc_list);
556 spin_unlock_bh(&xfrm_state_gc_lock);
557 schedule_work(&xfrm_state_gc_work);
558 }
559 EXPORT_SYMBOL(__xfrm_state_destroy);
560
561 int __xfrm_state_delete(struct xfrm_state *x)
562 {
563 int err = -ESRCH;
564
565 if (x->km.state != XFRM_STATE_DEAD) {
566 x->km.state = XFRM_STATE_DEAD;
567 spin_lock(&xfrm_state_lock);
568 x->lastused = xfrm_state_walk_ongoing;
569 list_del_rcu(&x->all);
570 hlist_del(&x->bydst);
571 hlist_del(&x->bysrc);
572 if (x->id.spi)
573 hlist_del(&x->byspi);
574 xfrm_state_num--;
575 spin_unlock(&xfrm_state_lock);
576
577 /* All xfrm_state objects are created by xfrm_state_alloc.
578 * The xfrm_state_alloc call gives a reference, and that
579 * is what we are dropping here.
580 */
581 xfrm_state_put(x);
582 err = 0;
583 }
584
585 return err;
586 }
587 EXPORT_SYMBOL(__xfrm_state_delete);
588
589 int xfrm_state_delete(struct xfrm_state *x)
590 {
591 int err;
592
593 spin_lock_bh(&x->lock);
594 err = __xfrm_state_delete(x);
595 spin_unlock_bh(&x->lock);
596
597 return err;
598 }
599 EXPORT_SYMBOL(xfrm_state_delete);
600
601 #ifdef CONFIG_SECURITY_NETWORK_XFRM
602 static inline int
603 xfrm_state_flush_secctx_check(u8 proto, struct xfrm_audit *audit_info)
604 {
605 int i, err = 0;
606
607 for (i = 0; i <= xfrm_state_hmask; i++) {
608 struct hlist_node *entry;
609 struct xfrm_state *x;
610
611 hlist_for_each_entry(x, entry, xfrm_state_bydst+i, bydst) {
612 if (xfrm_id_proto_match(x->id.proto, proto) &&
613 (err = security_xfrm_state_delete(x)) != 0) {
614 xfrm_audit_state_delete(x, 0,
615 audit_info->loginuid,
616 audit_info->sessionid,
617 audit_info->secid);
618 return err;
619 }
620 }
621 }
622
623 return err;
624 }
625 #else
626 static inline int
627 xfrm_state_flush_secctx_check(u8 proto, struct xfrm_audit *audit_info)
628 {
629 return 0;
630 }
631 #endif
632
633 int xfrm_state_flush(u8 proto, struct xfrm_audit *audit_info)
634 {
635 int i, err = 0;
636
637 spin_lock_bh(&xfrm_state_lock);
638 err = xfrm_state_flush_secctx_check(proto, audit_info);
639 if (err)
640 goto out;
641
642 for (i = 0; i <= xfrm_state_hmask; i++) {
643 struct hlist_node *entry;
644 struct xfrm_state *x;
645 restart:
646 hlist_for_each_entry(x, entry, xfrm_state_bydst+i, bydst) {
647 if (!xfrm_state_kern(x) &&
648 xfrm_id_proto_match(x->id.proto, proto)) {
649 xfrm_state_hold(x);
650 spin_unlock_bh(&xfrm_state_lock);
651
652 err = xfrm_state_delete(x);
653 xfrm_audit_state_delete(x, err ? 0 : 1,
654 audit_info->loginuid,
655 audit_info->sessionid,
656 audit_info->secid);
657 xfrm_state_put(x);
658
659 spin_lock_bh(&xfrm_state_lock);
660 goto restart;
661 }
662 }
663 }
664 err = 0;
665
666 out:
667 spin_unlock_bh(&xfrm_state_lock);
668 wake_up(&km_waitq);
669 return err;
670 }
671 EXPORT_SYMBOL(xfrm_state_flush);
672
673 void xfrm_sad_getinfo(struct xfrmk_sadinfo *si)
674 {
675 spin_lock_bh(&xfrm_state_lock);
676 si->sadcnt = xfrm_state_num;
677 si->sadhcnt = xfrm_state_hmask;
678 si->sadhmcnt = xfrm_state_hashmax;
679 spin_unlock_bh(&xfrm_state_lock);
680 }
681 EXPORT_SYMBOL(xfrm_sad_getinfo);
682
683 static int
684 xfrm_init_tempsel(struct xfrm_state *x, struct flowi *fl,
685 struct xfrm_tmpl *tmpl,
686 xfrm_address_t *daddr, xfrm_address_t *saddr,
687 unsigned short family)
688 {
689 struct xfrm_state_afinfo *afinfo = xfrm_state_get_afinfo(family);
690 if (!afinfo)
691 return -1;
692 afinfo->init_tempsel(x, fl, tmpl, daddr, saddr);
693 xfrm_state_put_afinfo(afinfo);
694 return 0;
695 }
696
697 static struct xfrm_state *__xfrm_state_lookup(xfrm_address_t *daddr, __be32 spi, u8 proto, unsigned short family)
698 {
699 unsigned int h = xfrm_spi_hash(daddr, spi, proto, family);
700 struct xfrm_state *x;
701 struct hlist_node *entry;
702
703 hlist_for_each_entry(x, entry, xfrm_state_byspi+h, byspi) {
704 if (x->props.family != family ||
705 x->id.spi != spi ||
706 x->id.proto != proto)
707 continue;
708
709 switch (family) {
710 case AF_INET:
711 if (x->id.daddr.a4 != daddr->a4)
712 continue;
713 break;
714 case AF_INET6:
715 if (!ipv6_addr_equal((struct in6_addr *)daddr,
716 (struct in6_addr *)
717 x->id.daddr.a6))
718 continue;
719 break;
720 }
721
722 xfrm_state_hold(x);
723 return x;
724 }
725
726 return NULL;
727 }
728
729 static struct xfrm_state *__xfrm_state_lookup_byaddr(xfrm_address_t *daddr, xfrm_address_t *saddr, u8 proto, unsigned short family)
730 {
731 unsigned int h = xfrm_src_hash(daddr, saddr, family);
732 struct xfrm_state *x;
733 struct hlist_node *entry;
734
735 hlist_for_each_entry(x, entry, xfrm_state_bysrc+h, bysrc) {
736 if (x->props.family != family ||
737 x->id.proto != proto)
738 continue;
739
740 switch (family) {
741 case AF_INET:
742 if (x->id.daddr.a4 != daddr->a4 ||
743 x->props.saddr.a4 != saddr->a4)
744 continue;
745 break;
746 case AF_INET6:
747 if (!ipv6_addr_equal((struct in6_addr *)daddr,
748 (struct in6_addr *)
749 x->id.daddr.a6) ||
750 !ipv6_addr_equal((struct in6_addr *)saddr,
751 (struct in6_addr *)
752 x->props.saddr.a6))
753 continue;
754 break;
755 }
756
757 xfrm_state_hold(x);
758 return x;
759 }
760
761 return NULL;
762 }
763
764 static inline struct xfrm_state *
765 __xfrm_state_locate(struct xfrm_state *x, int use_spi, int family)
766 {
767 if (use_spi)
768 return __xfrm_state_lookup(&x->id.daddr, x->id.spi,
769 x->id.proto, family);
770 else
771 return __xfrm_state_lookup_byaddr(&x->id.daddr,
772 &x->props.saddr,
773 x->id.proto, family);
774 }
775
776 static void xfrm_hash_grow_check(int have_hash_collision)
777 {
778 if (have_hash_collision &&
779 (xfrm_state_hmask + 1) < xfrm_state_hashmax &&
780 xfrm_state_num > xfrm_state_hmask)
781 schedule_work(&xfrm_hash_work);
782 }
783
784 struct xfrm_state *
785 xfrm_state_find(xfrm_address_t *daddr, xfrm_address_t *saddr,
786 struct flowi *fl, struct xfrm_tmpl *tmpl,
787 struct xfrm_policy *pol, int *err,
788 unsigned short family)
789 {
790 unsigned int h;
791 struct hlist_node *entry;
792 struct xfrm_state *x, *x0, *to_put;
793 int acquire_in_progress = 0;
794 int error = 0;
795 struct xfrm_state *best = NULL;
796
797 to_put = NULL;
798
799 spin_lock_bh(&xfrm_state_lock);
800 h = xfrm_dst_hash(daddr, saddr, tmpl->reqid, family);
801 hlist_for_each_entry(x, entry, xfrm_state_bydst+h, bydst) {
802 if (x->props.family == family &&
803 x->props.reqid == tmpl->reqid &&
804 !(x->props.flags & XFRM_STATE_WILDRECV) &&
805 xfrm_state_addr_check(x, daddr, saddr, family) &&
806 tmpl->mode == x->props.mode &&
807 tmpl->id.proto == x->id.proto &&
808 (tmpl->id.spi == x->id.spi || !tmpl->id.spi)) {
809 /* Resolution logic:
810 1. There is a valid state with matching selector.
811 Done.
812 2. Valid state with inappropriate selector. Skip.
813
814 Entering area of "sysdeps".
815
816 3. If state is not valid, selector is temporary,
817 it selects only session which triggered
818 previous resolution. Key manager will do
819 something to install a state with proper
820 selector.
821 */
822 if (x->km.state == XFRM_STATE_VALID) {
823 if ((x->sel.family && !xfrm_selector_match(&x->sel, fl, x->sel.family)) ||
824 !security_xfrm_state_pol_flow_match(x, pol, fl))
825 continue;
826 if (!best ||
827 best->km.dying > x->km.dying ||
828 (best->km.dying == x->km.dying &&
829 best->curlft.add_time < x->curlft.add_time))
830 best = x;
831 } else if (x->km.state == XFRM_STATE_ACQ) {
832 acquire_in_progress = 1;
833 } else if (x->km.state == XFRM_STATE_ERROR ||
834 x->km.state == XFRM_STATE_EXPIRED) {
835 if (xfrm_selector_match(&x->sel, fl, x->sel.family) &&
836 security_xfrm_state_pol_flow_match(x, pol, fl))
837 error = -ESRCH;
838 }
839 }
840 }
841
842 x = best;
843 if (!x && !error && !acquire_in_progress) {
844 if (tmpl->id.spi &&
845 (x0 = __xfrm_state_lookup(daddr, tmpl->id.spi,
846 tmpl->id.proto, family)) != NULL) {
847 to_put = x0;
848 error = -EEXIST;
849 goto out;
850 }
851 x = xfrm_state_alloc();
852 if (x == NULL) {
853 error = -ENOMEM;
854 goto out;
855 }
856 /* Initialize temporary selector matching only
857 * to current session. */
858 xfrm_init_tempsel(x, fl, tmpl, daddr, saddr, family);
859
860 error = security_xfrm_state_alloc_acquire(x, pol->security, fl->secid);
861 if (error) {
862 x->km.state = XFRM_STATE_DEAD;
863 to_put = x;
864 x = NULL;
865 goto out;
866 }
867
868 if (km_query(x, tmpl, pol) == 0) {
869 x->km.state = XFRM_STATE_ACQ;
870 list_add_tail(&x->all, &xfrm_state_all);
871 hlist_add_head(&x->bydst, xfrm_state_bydst+h);
872 h = xfrm_src_hash(daddr, saddr, family);
873 hlist_add_head(&x->bysrc, xfrm_state_bysrc+h);
874 if (x->id.spi) {
875 h = xfrm_spi_hash(&x->id.daddr, x->id.spi, x->id.proto, family);
876 hlist_add_head(&x->byspi, xfrm_state_byspi+h);
877 }
878 x->lft.hard_add_expires_seconds = sysctl_xfrm_acq_expires;
879 x->timer.expires = jiffies + sysctl_xfrm_acq_expires*HZ;
880 add_timer(&x->timer);
881 xfrm_state_num++;
882 xfrm_hash_grow_check(x->bydst.next != NULL);
883 } else {
884 x->km.state = XFRM_STATE_DEAD;
885 to_put = x;
886 x = NULL;
887 error = -ESRCH;
888 }
889 }
890 out:
891 if (x)
892 xfrm_state_hold(x);
893 else
894 *err = acquire_in_progress ? -EAGAIN : error;
895 spin_unlock_bh(&xfrm_state_lock);
896 if (to_put)
897 xfrm_state_put(to_put);
898 return x;
899 }
900
901 struct xfrm_state *
902 xfrm_stateonly_find(xfrm_address_t *daddr, xfrm_address_t *saddr,
903 unsigned short family, u8 mode, u8 proto, u32 reqid)
904 {
905 unsigned int h;
906 struct xfrm_state *rx = NULL, *x = NULL;
907 struct hlist_node *entry;
908
909 spin_lock(&xfrm_state_lock);
910 h = xfrm_dst_hash(daddr, saddr, reqid, family);
911 hlist_for_each_entry(x, entry, xfrm_state_bydst+h, bydst) {
912 if (x->props.family == family &&
913 x->props.reqid == reqid &&
914 !(x->props.flags & XFRM_STATE_WILDRECV) &&
915 xfrm_state_addr_check(x, daddr, saddr, family) &&
916 mode == x->props.mode &&
917 proto == x->id.proto &&
918 x->km.state == XFRM_STATE_VALID) {
919 rx = x;
920 break;
921 }
922 }
923
924 if (rx)
925 xfrm_state_hold(rx);
926 spin_unlock(&xfrm_state_lock);
927
928
929 return rx;
930 }
931 EXPORT_SYMBOL(xfrm_stateonly_find);
932
933 static void __xfrm_state_insert(struct xfrm_state *x)
934 {
935 unsigned int h;
936
937 x->genid = ++xfrm_state_genid;
938
939 list_add_tail(&x->all, &xfrm_state_all);
940
941 h = xfrm_dst_hash(&x->id.daddr, &x->props.saddr,
942 x->props.reqid, x->props.family);
943 hlist_add_head(&x->bydst, xfrm_state_bydst+h);
944
945 h = xfrm_src_hash(&x->id.daddr, &x->props.saddr, x->props.family);
946 hlist_add_head(&x->bysrc, xfrm_state_bysrc+h);
947
948 if (x->id.spi) {
949 h = xfrm_spi_hash(&x->id.daddr, x->id.spi, x->id.proto,
950 x->props.family);
951
952 hlist_add_head(&x->byspi, xfrm_state_byspi+h);
953 }
954
955 mod_timer(&x->timer, jiffies + HZ);
956 if (x->replay_maxage)
957 mod_timer(&x->rtimer, jiffies + x->replay_maxage);
958
959 wake_up(&km_waitq);
960
961 xfrm_state_num++;
962
963 xfrm_hash_grow_check(x->bydst.next != NULL);
964 }
965
966 /* xfrm_state_lock is held */
967 static void __xfrm_state_bump_genids(struct xfrm_state *xnew)
968 {
969 unsigned short family = xnew->props.family;
970 u32 reqid = xnew->props.reqid;
971 struct xfrm_state *x;
972 struct hlist_node *entry;
973 unsigned int h;
974
975 h = xfrm_dst_hash(&xnew->id.daddr, &xnew->props.saddr, reqid, family);
976 hlist_for_each_entry(x, entry, xfrm_state_bydst+h, bydst) {
977 if (x->props.family == family &&
978 x->props.reqid == reqid &&
979 !xfrm_addr_cmp(&x->id.daddr, &xnew->id.daddr, family) &&
980 !xfrm_addr_cmp(&x->props.saddr, &xnew->props.saddr, family))
981 x->genid = xfrm_state_genid;
982 }
983 }
984
985 void xfrm_state_insert(struct xfrm_state *x)
986 {
987 spin_lock_bh(&xfrm_state_lock);
988 __xfrm_state_bump_genids(x);
989 __xfrm_state_insert(x);
990 spin_unlock_bh(&xfrm_state_lock);
991 }
992 EXPORT_SYMBOL(xfrm_state_insert);
993
994 /* xfrm_state_lock is held */
995 static struct xfrm_state *__find_acq_core(unsigned short family, u8 mode, u32 reqid, u8 proto, xfrm_address_t *daddr, xfrm_address_t *saddr, int create)
996 {
997 unsigned int h = xfrm_dst_hash(daddr, saddr, reqid, family);
998 struct hlist_node *entry;
999 struct xfrm_state *x;
1000
1001 hlist_for_each_entry(x, entry, xfrm_state_bydst+h, bydst) {
1002 if (x->props.reqid != reqid ||
1003 x->props.mode != mode ||
1004 x->props.family != family ||
1005 x->km.state != XFRM_STATE_ACQ ||
1006 x->id.spi != 0 ||
1007 x->id.proto != proto)
1008 continue;
1009
1010 switch (family) {
1011 case AF_INET:
1012 if (x->id.daddr.a4 != daddr->a4 ||
1013 x->props.saddr.a4 != saddr->a4)
1014 continue;
1015 break;
1016 case AF_INET6:
1017 if (!ipv6_addr_equal((struct in6_addr *)x->id.daddr.a6,
1018 (struct in6_addr *)daddr) ||
1019 !ipv6_addr_equal((struct in6_addr *)
1020 x->props.saddr.a6,
1021 (struct in6_addr *)saddr))
1022 continue;
1023 break;
1024 }
1025
1026 xfrm_state_hold(x);
1027 return x;
1028 }
1029
1030 if (!create)
1031 return NULL;
1032
1033 x = xfrm_state_alloc();
1034 if (likely(x)) {
1035 switch (family) {
1036 case AF_INET:
1037 x->sel.daddr.a4 = daddr->a4;
1038 x->sel.saddr.a4 = saddr->a4;
1039 x->sel.prefixlen_d = 32;
1040 x->sel.prefixlen_s = 32;
1041 x->props.saddr.a4 = saddr->a4;
1042 x->id.daddr.a4 = daddr->a4;
1043 break;
1044
1045 case AF_INET6:
1046 ipv6_addr_copy((struct in6_addr *)x->sel.daddr.a6,
1047 (struct in6_addr *)daddr);
1048 ipv6_addr_copy((struct in6_addr *)x->sel.saddr.a6,
1049 (struct in6_addr *)saddr);
1050 x->sel.prefixlen_d = 128;
1051 x->sel.prefixlen_s = 128;
1052 ipv6_addr_copy((struct in6_addr *)x->props.saddr.a6,
1053 (struct in6_addr *)saddr);
1054 ipv6_addr_copy((struct in6_addr *)x->id.daddr.a6,
1055 (struct in6_addr *)daddr);
1056 break;
1057 }
1058
1059 x->km.state = XFRM_STATE_ACQ;
1060 x->id.proto = proto;
1061 x->props.family = family;
1062 x->props.mode = mode;
1063 x->props.reqid = reqid;
1064 x->lft.hard_add_expires_seconds = sysctl_xfrm_acq_expires;
1065 xfrm_state_hold(x);
1066 x->timer.expires = jiffies + sysctl_xfrm_acq_expires*HZ;
1067 add_timer(&x->timer);
1068 list_add_tail(&x->all, &xfrm_state_all);
1069 hlist_add_head(&x->bydst, xfrm_state_bydst+h);
1070 h = xfrm_src_hash(daddr, saddr, family);
1071 hlist_add_head(&x->bysrc, xfrm_state_bysrc+h);
1072
1073 xfrm_state_num++;
1074
1075 xfrm_hash_grow_check(x->bydst.next != NULL);
1076 }
1077
1078 return x;
1079 }
1080
1081 static struct xfrm_state *__xfrm_find_acq_byseq(u32 seq);
1082
1083 int xfrm_state_add(struct xfrm_state *x)
1084 {
1085 struct xfrm_state *x1, *to_put;
1086 int family;
1087 int err;
1088 int use_spi = xfrm_id_proto_match(x->id.proto, IPSEC_PROTO_ANY);
1089
1090 family = x->props.family;
1091
1092 to_put = NULL;
1093
1094 spin_lock_bh(&xfrm_state_lock);
1095
1096 x1 = __xfrm_state_locate(x, use_spi, family);
1097 if (x1) {
1098 to_put = x1;
1099 x1 = NULL;
1100 err = -EEXIST;
1101 goto out;
1102 }
1103
1104 if (use_spi && x->km.seq) {
1105 x1 = __xfrm_find_acq_byseq(x->km.seq);
1106 if (x1 && ((x1->id.proto != x->id.proto) ||
1107 xfrm_addr_cmp(&x1->id.daddr, &x->id.daddr, family))) {
1108 to_put = x1;
1109 x1 = NULL;
1110 }
1111 }
1112
1113 if (use_spi && !x1)
1114 x1 = __find_acq_core(family, x->props.mode, x->props.reqid,
1115 x->id.proto,
1116 &x->id.daddr, &x->props.saddr, 0);
1117
1118 __xfrm_state_bump_genids(x);
1119 __xfrm_state_insert(x);
1120 err = 0;
1121
1122 out:
1123 spin_unlock_bh(&xfrm_state_lock);
1124
1125 if (x1) {
1126 xfrm_state_delete(x1);
1127 xfrm_state_put(x1);
1128 }
1129
1130 if (to_put)
1131 xfrm_state_put(to_put);
1132
1133 return err;
1134 }
1135 EXPORT_SYMBOL(xfrm_state_add);
1136
1137 #ifdef CONFIG_XFRM_MIGRATE
1138 static struct xfrm_state *xfrm_state_clone(struct xfrm_state *orig, int *errp)
1139 {
1140 int err = -ENOMEM;
1141 struct xfrm_state *x = xfrm_state_alloc();
1142 if (!x)
1143 goto error;
1144
1145 memcpy(&x->id, &orig->id, sizeof(x->id));
1146 memcpy(&x->sel, &orig->sel, sizeof(x->sel));
1147 memcpy(&x->lft, &orig->lft, sizeof(x->lft));
1148 x->props.mode = orig->props.mode;
1149 x->props.replay_window = orig->props.replay_window;
1150 x->props.reqid = orig->props.reqid;
1151 x->props.family = orig->props.family;
1152 x->props.saddr = orig->props.saddr;
1153
1154 if (orig->aalg) {
1155 x->aalg = xfrm_algo_clone(orig->aalg);
1156 if (!x->aalg)
1157 goto error;
1158 }
1159 x->props.aalgo = orig->props.aalgo;
1160
1161 if (orig->ealg) {
1162 x->ealg = xfrm_algo_clone(orig->ealg);
1163 if (!x->ealg)
1164 goto error;
1165 }
1166 x->props.ealgo = orig->props.ealgo;
1167
1168 if (orig->calg) {
1169 x->calg = xfrm_algo_clone(orig->calg);
1170 if (!x->calg)
1171 goto error;
1172 }
1173 x->props.calgo = orig->props.calgo;
1174
1175 if (orig->encap) {
1176 x->encap = kmemdup(orig->encap, sizeof(*x->encap), GFP_KERNEL);
1177 if (!x->encap)
1178 goto error;
1179 }
1180
1181 if (orig->coaddr) {
1182 x->coaddr = kmemdup(orig->coaddr, sizeof(*x->coaddr),
1183 GFP_KERNEL);
1184 if (!x->coaddr)
1185 goto error;
1186 }
1187
1188 err = xfrm_init_state(x);
1189 if (err)
1190 goto error;
1191
1192 x->props.flags = orig->props.flags;
1193
1194 x->curlft.add_time = orig->curlft.add_time;
1195 x->km.state = orig->km.state;
1196 x->km.seq = orig->km.seq;
1197
1198 return x;
1199
1200 error:
1201 if (errp)
1202 *errp = err;
1203 if (x) {
1204 kfree(x->aalg);
1205 kfree(x->ealg);
1206 kfree(x->calg);
1207 kfree(x->encap);
1208 kfree(x->coaddr);
1209 }
1210 kfree(x);
1211 return NULL;
1212 }
1213
1214 /* xfrm_state_lock is held */
1215 struct xfrm_state * xfrm_migrate_state_find(struct xfrm_migrate *m)
1216 {
1217 unsigned int h;
1218 struct xfrm_state *x;
1219 struct hlist_node *entry;
1220
1221 if (m->reqid) {
1222 h = xfrm_dst_hash(&m->old_daddr, &m->old_saddr,
1223 m->reqid, m->old_family);
1224 hlist_for_each_entry(x, entry, xfrm_state_bydst+h, bydst) {
1225 if (x->props.mode != m->mode ||
1226 x->id.proto != m->proto)
1227 continue;
1228 if (m->reqid && x->props.reqid != m->reqid)
1229 continue;
1230 if (xfrm_addr_cmp(&x->id.daddr, &m->old_daddr,
1231 m->old_family) ||
1232 xfrm_addr_cmp(&x->props.saddr, &m->old_saddr,
1233 m->old_family))
1234 continue;
1235 xfrm_state_hold(x);
1236 return x;
1237 }
1238 } else {
1239 h = xfrm_src_hash(&m->old_daddr, &m->old_saddr,
1240 m->old_family);
1241 hlist_for_each_entry(x, entry, xfrm_state_bysrc+h, bysrc) {
1242 if (x->props.mode != m->mode ||
1243 x->id.proto != m->proto)
1244 continue;
1245 if (xfrm_addr_cmp(&x->id.daddr, &m->old_daddr,
1246 m->old_family) ||
1247 xfrm_addr_cmp(&x->props.saddr, &m->old_saddr,
1248 m->old_family))
1249 continue;
1250 xfrm_state_hold(x);
1251 return x;
1252 }
1253 }
1254
1255 return NULL;
1256 }
1257 EXPORT_SYMBOL(xfrm_migrate_state_find);
1258
1259 struct xfrm_state * xfrm_state_migrate(struct xfrm_state *x,
1260 struct xfrm_migrate *m)
1261 {
1262 struct xfrm_state *xc;
1263 int err;
1264
1265 xc = xfrm_state_clone(x, &err);
1266 if (!xc)
1267 return NULL;
1268
1269 memcpy(&xc->id.daddr, &m->new_daddr, sizeof(xc->id.daddr));
1270 memcpy(&xc->props.saddr, &m->new_saddr, sizeof(xc->props.saddr));
1271
1272 /* add state */
1273 if (!xfrm_addr_cmp(&x->id.daddr, &m->new_daddr, m->new_family)) {
1274 /* a care is needed when the destination address of the
1275 state is to be updated as it is a part of triplet */
1276 xfrm_state_insert(xc);
1277 } else {
1278 if ((err = xfrm_state_add(xc)) < 0)
1279 goto error;
1280 }
1281
1282 return xc;
1283 error:
1284 kfree(xc);
1285 return NULL;
1286 }
1287 EXPORT_SYMBOL(xfrm_state_migrate);
1288 #endif
1289
1290 int xfrm_state_update(struct xfrm_state *x)
1291 {
1292 struct xfrm_state *x1, *to_put;
1293 int err;
1294 int use_spi = xfrm_id_proto_match(x->id.proto, IPSEC_PROTO_ANY);
1295
1296 to_put = NULL;
1297
1298 spin_lock_bh(&xfrm_state_lock);
1299 x1 = __xfrm_state_locate(x, use_spi, x->props.family);
1300
1301 err = -ESRCH;
1302 if (!x1)
1303 goto out;
1304
1305 if (xfrm_state_kern(x1)) {
1306 to_put = x1;
1307 err = -EEXIST;
1308 goto out;
1309 }
1310
1311 if (x1->km.state == XFRM_STATE_ACQ) {
1312 __xfrm_state_insert(x);
1313 x = NULL;
1314 }
1315 err = 0;
1316
1317 out:
1318 spin_unlock_bh(&xfrm_state_lock);
1319
1320 if (to_put)
1321 xfrm_state_put(to_put);
1322
1323 if (err)
1324 return err;
1325
1326 if (!x) {
1327 xfrm_state_delete(x1);
1328 xfrm_state_put(x1);
1329 return 0;
1330 }
1331
1332 err = -EINVAL;
1333 spin_lock_bh(&x1->lock);
1334 if (likely(x1->km.state == XFRM_STATE_VALID)) {
1335 if (x->encap && x1->encap)
1336 memcpy(x1->encap, x->encap, sizeof(*x1->encap));
1337 if (x->coaddr && x1->coaddr) {
1338 memcpy(x1->coaddr, x->coaddr, sizeof(*x1->coaddr));
1339 }
1340 if (!use_spi && memcmp(&x1->sel, &x->sel, sizeof(x1->sel)))
1341 memcpy(&x1->sel, &x->sel, sizeof(x1->sel));
1342 memcpy(&x1->lft, &x->lft, sizeof(x1->lft));
1343 x1->km.dying = 0;
1344
1345 mod_timer(&x1->timer, jiffies + HZ);
1346 if (x1->curlft.use_time)
1347 xfrm_state_check_expire(x1);
1348
1349 err = 0;
1350 }
1351 spin_unlock_bh(&x1->lock);
1352
1353 xfrm_state_put(x1);
1354
1355 return err;
1356 }
1357 EXPORT_SYMBOL(xfrm_state_update);
1358
1359 int xfrm_state_check_expire(struct xfrm_state *x)
1360 {
1361 if (!x->curlft.use_time)
1362 x->curlft.use_time = get_seconds();
1363
1364 if (x->km.state != XFRM_STATE_VALID)
1365 return -EINVAL;
1366
1367 if (x->curlft.bytes >= x->lft.hard_byte_limit ||
1368 x->curlft.packets >= x->lft.hard_packet_limit) {
1369 x->km.state = XFRM_STATE_EXPIRED;
1370 mod_timer(&x->timer, jiffies);
1371 return -EINVAL;
1372 }
1373
1374 if (!x->km.dying &&
1375 (x->curlft.bytes >= x->lft.soft_byte_limit ||
1376 x->curlft.packets >= x->lft.soft_packet_limit)) {
1377 x->km.dying = 1;
1378 km_state_expired(x, 0, 0);
1379 }
1380 return 0;
1381 }
1382 EXPORT_SYMBOL(xfrm_state_check_expire);
1383
1384 struct xfrm_state *
1385 xfrm_state_lookup(xfrm_address_t *daddr, __be32 spi, u8 proto,
1386 unsigned short family)
1387 {
1388 struct xfrm_state *x;
1389
1390 spin_lock_bh(&xfrm_state_lock);
1391 x = __xfrm_state_lookup(daddr, spi, proto, family);
1392 spin_unlock_bh(&xfrm_state_lock);
1393 return x;
1394 }
1395 EXPORT_SYMBOL(xfrm_state_lookup);
1396
1397 struct xfrm_state *
1398 xfrm_state_lookup_byaddr(xfrm_address_t *daddr, xfrm_address_t *saddr,
1399 u8 proto, unsigned short family)
1400 {
1401 struct xfrm_state *x;
1402
1403 spin_lock_bh(&xfrm_state_lock);
1404 x = __xfrm_state_lookup_byaddr(daddr, saddr, proto, family);
1405 spin_unlock_bh(&xfrm_state_lock);
1406 return x;
1407 }
1408 EXPORT_SYMBOL(xfrm_state_lookup_byaddr);
1409
1410 struct xfrm_state *
1411 xfrm_find_acq(u8 mode, u32 reqid, u8 proto,
1412 xfrm_address_t *daddr, xfrm_address_t *saddr,
1413 int create, unsigned short family)
1414 {
1415 struct xfrm_state *x;
1416
1417 spin_lock_bh(&xfrm_state_lock);
1418 x = __find_acq_core(family, mode, reqid, proto, daddr, saddr, create);
1419 spin_unlock_bh(&xfrm_state_lock);
1420
1421 return x;
1422 }
1423 EXPORT_SYMBOL(xfrm_find_acq);
1424
1425 #ifdef CONFIG_XFRM_SUB_POLICY
1426 int
1427 xfrm_tmpl_sort(struct xfrm_tmpl **dst, struct xfrm_tmpl **src, int n,
1428 unsigned short family)
1429 {
1430 int err = 0;
1431 struct xfrm_state_afinfo *afinfo = xfrm_state_get_afinfo(family);
1432 if (!afinfo)
1433 return -EAFNOSUPPORT;
1434
1435 spin_lock_bh(&xfrm_state_lock);
1436 if (afinfo->tmpl_sort)
1437 err = afinfo->tmpl_sort(dst, src, n);
1438 spin_unlock_bh(&xfrm_state_lock);
1439 xfrm_state_put_afinfo(afinfo);
1440 return err;
1441 }
1442 EXPORT_SYMBOL(xfrm_tmpl_sort);
1443
1444 int
1445 xfrm_state_sort(struct xfrm_state **dst, struct xfrm_state **src, int n,
1446 unsigned short family)
1447 {
1448 int err = 0;
1449 struct xfrm_state_afinfo *afinfo = xfrm_state_get_afinfo(family);
1450 if (!afinfo)
1451 return -EAFNOSUPPORT;
1452
1453 spin_lock_bh(&xfrm_state_lock);
1454 if (afinfo->state_sort)
1455 err = afinfo->state_sort(dst, src, n);
1456 spin_unlock_bh(&xfrm_state_lock);
1457 xfrm_state_put_afinfo(afinfo);
1458 return err;
1459 }
1460 EXPORT_SYMBOL(xfrm_state_sort);
1461 #endif
1462
1463 /* Silly enough, but I'm lazy to build resolution list */
1464
1465 static struct xfrm_state *__xfrm_find_acq_byseq(u32 seq)
1466 {
1467 int i;
1468
1469 for (i = 0; i <= xfrm_state_hmask; i++) {
1470 struct hlist_node *entry;
1471 struct xfrm_state *x;
1472
1473 hlist_for_each_entry(x, entry, xfrm_state_bydst+i, bydst) {
1474 if (x->km.seq == seq &&
1475 x->km.state == XFRM_STATE_ACQ) {
1476 xfrm_state_hold(x);
1477 return x;
1478 }
1479 }
1480 }
1481 return NULL;
1482 }
1483
1484 struct xfrm_state *xfrm_find_acq_byseq(u32 seq)
1485 {
1486 struct xfrm_state *x;
1487
1488 spin_lock_bh(&xfrm_state_lock);
1489 x = __xfrm_find_acq_byseq(seq);
1490 spin_unlock_bh(&xfrm_state_lock);
1491 return x;
1492 }
1493 EXPORT_SYMBOL(xfrm_find_acq_byseq);
1494
1495 u32 xfrm_get_acqseq(void)
1496 {
1497 u32 res;
1498 static u32 acqseq;
1499 static DEFINE_SPINLOCK(acqseq_lock);
1500
1501 spin_lock_bh(&acqseq_lock);
1502 res = (++acqseq ? : ++acqseq);
1503 spin_unlock_bh(&acqseq_lock);
1504 return res;
1505 }
1506 EXPORT_SYMBOL(xfrm_get_acqseq);
1507
1508 int xfrm_alloc_spi(struct xfrm_state *x, u32 low, u32 high)
1509 {
1510 unsigned int h;
1511 struct xfrm_state *x0;
1512 int err = -ENOENT;
1513 __be32 minspi = htonl(low);
1514 __be32 maxspi = htonl(high);
1515
1516 spin_lock_bh(&x->lock);
1517 if (x->km.state == XFRM_STATE_DEAD)
1518 goto unlock;
1519
1520 err = 0;
1521 if (x->id.spi)
1522 goto unlock;
1523
1524 err = -ENOENT;
1525
1526 if (minspi == maxspi) {
1527 x0 = xfrm_state_lookup(&x->id.daddr, minspi, x->id.proto, x->props.family);
1528 if (x0) {
1529 xfrm_state_put(x0);
1530 goto unlock;
1531 }
1532 x->id.spi = minspi;
1533 } else {
1534 u32 spi = 0;
1535 for (h=0; h<high-low+1; h++) {
1536 spi = low + net_random()%(high-low+1);
1537 x0 = xfrm_state_lookup(&x->id.daddr, htonl(spi), x->id.proto, x->props.family);
1538 if (x0 == NULL) {
1539 x->id.spi = htonl(spi);
1540 break;
1541 }
1542 xfrm_state_put(x0);
1543 }
1544 }
1545 if (x->id.spi) {
1546 spin_lock_bh(&xfrm_state_lock);
1547 h = xfrm_spi_hash(&x->id.daddr, x->id.spi, x->id.proto, x->props.family);
1548 hlist_add_head(&x->byspi, xfrm_state_byspi+h);
1549 spin_unlock_bh(&xfrm_state_lock);
1550
1551 err = 0;
1552 }
1553
1554 unlock:
1555 spin_unlock_bh(&x->lock);
1556
1557 return err;
1558 }
1559 EXPORT_SYMBOL(xfrm_alloc_spi);
1560
1561 int xfrm_state_walk(struct xfrm_state_walk *walk,
1562 int (*func)(struct xfrm_state *, int, void*),
1563 void *data)
1564 {
1565 struct xfrm_state *old, *x, *last = NULL;
1566 int err = 0;
1567
1568 if (walk->state == NULL && walk->count != 0)
1569 return 0;
1570
1571 old = x = walk->state;
1572 walk->state = NULL;
1573 spin_lock_bh(&xfrm_state_lock);
1574 if (x == NULL)
1575 x = list_first_entry(&xfrm_state_all, struct xfrm_state, all);
1576 list_for_each_entry_from(x, &xfrm_state_all, all) {
1577 if (x->km.state == XFRM_STATE_DEAD)
1578 continue;
1579 if (!xfrm_id_proto_match(x->id.proto, walk->proto))
1580 continue;
1581 if (last) {
1582 err = func(last, walk->count, data);
1583 if (err) {
1584 xfrm_state_hold(last);
1585 walk->state = last;
1586 xfrm_state_walk_ongoing++;
1587 goto out;
1588 }
1589 }
1590 last = x;
1591 walk->count++;
1592 }
1593 if (walk->count == 0) {
1594 err = -ENOENT;
1595 goto out;
1596 }
1597 if (last)
1598 err = func(last, 0, data);
1599 out:
1600 spin_unlock_bh(&xfrm_state_lock);
1601 if (old != NULL) {
1602 xfrm_state_put(old);
1603 xfrm_state_walk_completed++;
1604 if (!list_empty(&xfrm_state_gc_leftovers))
1605 schedule_work(&xfrm_state_gc_work);
1606 }
1607 return err;
1608 }
1609 EXPORT_SYMBOL(xfrm_state_walk);
1610
1611 void xfrm_state_walk_done(struct xfrm_state_walk *walk)
1612 {
1613 if (walk->state != NULL) {
1614 xfrm_state_put(walk->state);
1615 walk->state = NULL;
1616 xfrm_state_walk_completed++;
1617 if (!list_empty(&xfrm_state_gc_leftovers))
1618 schedule_work(&xfrm_state_gc_work);
1619 }
1620 }
1621 EXPORT_SYMBOL(xfrm_state_walk_done);
1622
1623
1624 void xfrm_replay_notify(struct xfrm_state *x, int event)
1625 {
1626 struct km_event c;
1627 /* we send notify messages in case
1628 * 1. we updated on of the sequence numbers, and the seqno difference
1629 * is at least x->replay_maxdiff, in this case we also update the
1630 * timeout of our timer function
1631 * 2. if x->replay_maxage has elapsed since last update,
1632 * and there were changes
1633 *
1634 * The state structure must be locked!
1635 */
1636
1637 switch (event) {
1638 case XFRM_REPLAY_UPDATE:
1639 if (x->replay_maxdiff &&
1640 (x->replay.seq - x->preplay.seq < x->replay_maxdiff) &&
1641 (x->replay.oseq - x->preplay.oseq < x->replay_maxdiff)) {
1642 if (x->xflags & XFRM_TIME_DEFER)
1643 event = XFRM_REPLAY_TIMEOUT;
1644 else
1645 return;
1646 }
1647
1648 break;
1649
1650 case XFRM_REPLAY_TIMEOUT:
1651 if ((x->replay.seq == x->preplay.seq) &&
1652 (x->replay.bitmap == x->preplay.bitmap) &&
1653 (x->replay.oseq == x->preplay.oseq)) {
1654 x->xflags |= XFRM_TIME_DEFER;
1655 return;
1656 }
1657
1658 break;
1659 }
1660
1661 memcpy(&x->preplay, &x->replay, sizeof(struct xfrm_replay_state));
1662 c.event = XFRM_MSG_NEWAE;
1663 c.data.aevent = event;
1664 km_state_notify(x, &c);
1665
1666 if (x->replay_maxage &&
1667 !mod_timer(&x->rtimer, jiffies + x->replay_maxage))
1668 x->xflags &= ~XFRM_TIME_DEFER;
1669 }
1670
1671 static void xfrm_replay_timer_handler(unsigned long data)
1672 {
1673 struct xfrm_state *x = (struct xfrm_state*)data;
1674
1675 spin_lock(&x->lock);
1676
1677 if (x->km.state == XFRM_STATE_VALID) {
1678 if (xfrm_aevent_is_on())
1679 xfrm_replay_notify(x, XFRM_REPLAY_TIMEOUT);
1680 else
1681 x->xflags |= XFRM_TIME_DEFER;
1682 }
1683
1684 spin_unlock(&x->lock);
1685 }
1686
1687 int xfrm_replay_check(struct xfrm_state *x,
1688 struct sk_buff *skb, __be32 net_seq)
1689 {
1690 u32 diff;
1691 u32 seq = ntohl(net_seq);
1692
1693 if (unlikely(seq == 0))
1694 goto err;
1695
1696 if (likely(seq > x->replay.seq))
1697 return 0;
1698
1699 diff = x->replay.seq - seq;
1700 if (diff >= min_t(unsigned int, x->props.replay_window,
1701 sizeof(x->replay.bitmap) * 8)) {
1702 x->stats.replay_window++;
1703 goto err;
1704 }
1705
1706 if (x->replay.bitmap & (1U << diff)) {
1707 x->stats.replay++;
1708 goto err;
1709 }
1710 return 0;
1711
1712 err:
1713 xfrm_audit_state_replay(x, skb, net_seq);
1714 return -EINVAL;
1715 }
1716
1717 void xfrm_replay_advance(struct xfrm_state *x, __be32 net_seq)
1718 {
1719 u32 diff;
1720 u32 seq = ntohl(net_seq);
1721
1722 if (seq > x->replay.seq) {
1723 diff = seq - x->replay.seq;
1724 if (diff < x->props.replay_window)
1725 x->replay.bitmap = ((x->replay.bitmap) << diff) | 1;
1726 else
1727 x->replay.bitmap = 1;
1728 x->replay.seq = seq;
1729 } else {
1730 diff = x->replay.seq - seq;
1731 x->replay.bitmap |= (1U << diff);
1732 }
1733
1734 if (xfrm_aevent_is_on())
1735 xfrm_replay_notify(x, XFRM_REPLAY_UPDATE);
1736 }
1737
1738 static LIST_HEAD(xfrm_km_list);
1739 static DEFINE_RWLOCK(xfrm_km_lock);
1740
1741 void km_policy_notify(struct xfrm_policy *xp, int dir, struct km_event *c)
1742 {
1743 struct xfrm_mgr *km;
1744
1745 read_lock(&xfrm_km_lock);
1746 list_for_each_entry(km, &xfrm_km_list, list)
1747 if (km->notify_policy)
1748 km->notify_policy(xp, dir, c);
1749 read_unlock(&xfrm_km_lock);
1750 }
1751
1752 void km_state_notify(struct xfrm_state *x, struct km_event *c)
1753 {
1754 struct xfrm_mgr *km;
1755 read_lock(&xfrm_km_lock);
1756 list_for_each_entry(km, &xfrm_km_list, list)
1757 if (km->notify)
1758 km->notify(x, c);
1759 read_unlock(&xfrm_km_lock);
1760 }
1761
1762 EXPORT_SYMBOL(km_policy_notify);
1763 EXPORT_SYMBOL(km_state_notify);
1764
1765 void km_state_expired(struct xfrm_state *x, int hard, u32 pid)
1766 {
1767 struct km_event c;
1768
1769 c.data.hard = hard;
1770 c.pid = pid;
1771 c.event = XFRM_MSG_EXPIRE;
1772 km_state_notify(x, &c);
1773
1774 if (hard)
1775 wake_up(&km_waitq);
1776 }
1777
1778 EXPORT_SYMBOL(km_state_expired);
1779 /*
1780 * We send to all registered managers regardless of failure
1781 * We are happy with one success
1782 */
1783 int km_query(struct xfrm_state *x, struct xfrm_tmpl *t, struct xfrm_policy *pol)
1784 {
1785 int err = -EINVAL, acqret;
1786 struct xfrm_mgr *km;
1787
1788 read_lock(&xfrm_km_lock);
1789 list_for_each_entry(km, &xfrm_km_list, list) {
1790 acqret = km->acquire(x, t, pol, XFRM_POLICY_OUT);
1791 if (!acqret)
1792 err = acqret;
1793 }
1794 read_unlock(&xfrm_km_lock);
1795 return err;
1796 }
1797 EXPORT_SYMBOL(km_query);
1798
1799 int km_new_mapping(struct xfrm_state *x, xfrm_address_t *ipaddr, __be16 sport)
1800 {
1801 int err = -EINVAL;
1802 struct xfrm_mgr *km;
1803
1804 read_lock(&xfrm_km_lock);
1805 list_for_each_entry(km, &xfrm_km_list, list) {
1806 if (km->new_mapping)
1807 err = km->new_mapping(x, ipaddr, sport);
1808 if (!err)
1809 break;
1810 }
1811 read_unlock(&xfrm_km_lock);
1812 return err;
1813 }
1814 EXPORT_SYMBOL(km_new_mapping);
1815
1816 void km_policy_expired(struct xfrm_policy *pol, int dir, int hard, u32 pid)
1817 {
1818 struct km_event c;
1819
1820 c.data.hard = hard;
1821 c.pid = pid;
1822 c.event = XFRM_MSG_POLEXPIRE;
1823 km_policy_notify(pol, dir, &c);
1824
1825 if (hard)
1826 wake_up(&km_waitq);
1827 }
1828 EXPORT_SYMBOL(km_policy_expired);
1829
1830 #ifdef CONFIG_XFRM_MIGRATE
1831 int km_migrate(struct xfrm_selector *sel, u8 dir, u8 type,
1832 struct xfrm_migrate *m, int num_migrate)
1833 {
1834 int err = -EINVAL;
1835 int ret;
1836 struct xfrm_mgr *km;
1837
1838 read_lock(&xfrm_km_lock);
1839 list_for_each_entry(km, &xfrm_km_list, list) {
1840 if (km->migrate) {
1841 ret = km->migrate(sel, dir, type, m, num_migrate);
1842 if (!ret)
1843 err = ret;
1844 }
1845 }
1846 read_unlock(&xfrm_km_lock);
1847 return err;
1848 }
1849 EXPORT_SYMBOL(km_migrate);
1850 #endif
1851
1852 int km_report(u8 proto, struct xfrm_selector *sel, xfrm_address_t *addr)
1853 {
1854 int err = -EINVAL;
1855 int ret;
1856 struct xfrm_mgr *km;
1857
1858 read_lock(&xfrm_km_lock);
1859 list_for_each_entry(km, &xfrm_km_list, list) {
1860 if (km->report) {
1861 ret = km->report(proto, sel, addr);
1862 if (!ret)
1863 err = ret;
1864 }
1865 }
1866 read_unlock(&xfrm_km_lock);
1867 return err;
1868 }
1869 EXPORT_SYMBOL(km_report);
1870
1871 int xfrm_user_policy(struct sock *sk, int optname, u8 __user *optval, int optlen)
1872 {
1873 int err;
1874 u8 *data;
1875 struct xfrm_mgr *km;
1876 struct xfrm_policy *pol = NULL;
1877
1878 if (optlen <= 0 || optlen > PAGE_SIZE)
1879 return -EMSGSIZE;
1880
1881 data = kmalloc(optlen, GFP_KERNEL);
1882 if (!data)
1883 return -ENOMEM;
1884
1885 err = -EFAULT;
1886 if (copy_from_user(data, optval, optlen))
1887 goto out;
1888
1889 err = -EINVAL;
1890 read_lock(&xfrm_km_lock);
1891 list_for_each_entry(km, &xfrm_km_list, list) {
1892 pol = km->compile_policy(sk, optname, data,
1893 optlen, &err);
1894 if (err >= 0)
1895 break;
1896 }
1897 read_unlock(&xfrm_km_lock);
1898
1899 if (err >= 0) {
1900 xfrm_sk_policy_insert(sk, err, pol);
1901 xfrm_pol_put(pol);
1902 err = 0;
1903 }
1904
1905 out:
1906 kfree(data);
1907 return err;
1908 }
1909 EXPORT_SYMBOL(xfrm_user_policy);
1910
1911 int xfrm_register_km(struct xfrm_mgr *km)
1912 {
1913 write_lock_bh(&xfrm_km_lock);
1914 list_add_tail(&km->list, &xfrm_km_list);
1915 write_unlock_bh(&xfrm_km_lock);
1916 return 0;
1917 }
1918 EXPORT_SYMBOL(xfrm_register_km);
1919
1920 int xfrm_unregister_km(struct xfrm_mgr *km)
1921 {
1922 write_lock_bh(&xfrm_km_lock);
1923 list_del(&km->list);
1924 write_unlock_bh(&xfrm_km_lock);
1925 return 0;
1926 }
1927 EXPORT_SYMBOL(xfrm_unregister_km);
1928
1929 int xfrm_state_register_afinfo(struct xfrm_state_afinfo *afinfo)
1930 {
1931 int err = 0;
1932 if (unlikely(afinfo == NULL))
1933 return -EINVAL;
1934 if (unlikely(afinfo->family >= NPROTO))
1935 return -EAFNOSUPPORT;
1936 write_lock_bh(&xfrm_state_afinfo_lock);
1937 if (unlikely(xfrm_state_afinfo[afinfo->family] != NULL))
1938 err = -ENOBUFS;
1939 else
1940 xfrm_state_afinfo[afinfo->family] = afinfo;
1941 write_unlock_bh(&xfrm_state_afinfo_lock);
1942 return err;
1943 }
1944 EXPORT_SYMBOL(xfrm_state_register_afinfo);
1945
1946 int xfrm_state_unregister_afinfo(struct xfrm_state_afinfo *afinfo)
1947 {
1948 int err = 0;
1949 if (unlikely(afinfo == NULL))
1950 return -EINVAL;
1951 if (unlikely(afinfo->family >= NPROTO))
1952 return -EAFNOSUPPORT;
1953 write_lock_bh(&xfrm_state_afinfo_lock);
1954 if (likely(xfrm_state_afinfo[afinfo->family] != NULL)) {
1955 if (unlikely(xfrm_state_afinfo[afinfo->family] != afinfo))
1956 err = -EINVAL;
1957 else
1958 xfrm_state_afinfo[afinfo->family] = NULL;
1959 }
1960 write_unlock_bh(&xfrm_state_afinfo_lock);
1961 return err;
1962 }
1963 EXPORT_SYMBOL(xfrm_state_unregister_afinfo);
1964
1965 static struct xfrm_state_afinfo *xfrm_state_get_afinfo(unsigned int family)
1966 {
1967 struct xfrm_state_afinfo *afinfo;
1968 if (unlikely(family >= NPROTO))
1969 return NULL;
1970 read_lock(&xfrm_state_afinfo_lock);
1971 afinfo = xfrm_state_afinfo[family];
1972 if (unlikely(!afinfo))
1973 read_unlock(&xfrm_state_afinfo_lock);
1974 return afinfo;
1975 }
1976
1977 static void xfrm_state_put_afinfo(struct xfrm_state_afinfo *afinfo)
1978 __releases(xfrm_state_afinfo_lock)
1979 {
1980 read_unlock(&xfrm_state_afinfo_lock);
1981 }
1982
1983 /* Temporarily located here until net/xfrm/xfrm_tunnel.c is created */
1984 void xfrm_state_delete_tunnel(struct xfrm_state *x)
1985 {
1986 if (x->tunnel) {
1987 struct xfrm_state *t = x->tunnel;
1988
1989 if (atomic_read(&t->tunnel_users) == 2)
1990 xfrm_state_delete(t);
1991 atomic_dec(&t->tunnel_users);
1992 xfrm_state_put(t);
1993 x->tunnel = NULL;
1994 }
1995 }
1996 EXPORT_SYMBOL(xfrm_state_delete_tunnel);
1997
1998 int xfrm_state_mtu(struct xfrm_state *x, int mtu)
1999 {
2000 int res;
2001
2002 spin_lock_bh(&x->lock);
2003 if (x->km.state == XFRM_STATE_VALID &&
2004 x->type && x->type->get_mtu)
2005 res = x->type->get_mtu(x, mtu);
2006 else
2007 res = mtu - x->props.header_len;
2008 spin_unlock_bh(&x->lock);
2009 return res;
2010 }
2011
2012 int xfrm_init_state(struct xfrm_state *x)
2013 {
2014 struct xfrm_state_afinfo *afinfo;
2015 struct xfrm_mode *inner_mode;
2016 int family = x->props.family;
2017 int err;
2018
2019 err = -EAFNOSUPPORT;
2020 afinfo = xfrm_state_get_afinfo(family);
2021 if (!afinfo)
2022 goto error;
2023
2024 err = 0;
2025 if (afinfo->init_flags)
2026 err = afinfo->init_flags(x);
2027
2028 xfrm_state_put_afinfo(afinfo);
2029
2030 if (err)
2031 goto error;
2032
2033 err = -EPROTONOSUPPORT;
2034
2035 if (x->sel.family != AF_UNSPEC) {
2036 inner_mode = xfrm_get_mode(x->props.mode, x->sel.family);
2037 if (inner_mode == NULL)
2038 goto error;
2039
2040 if (!(inner_mode->flags & XFRM_MODE_FLAG_TUNNEL) &&
2041 family != x->sel.family) {
2042 xfrm_put_mode(inner_mode);
2043 goto error;
2044 }
2045
2046 x->inner_mode = inner_mode;
2047 } else {
2048 struct xfrm_mode *inner_mode_iaf;
2049
2050 inner_mode = xfrm_get_mode(x->props.mode, AF_INET);
2051 if (inner_mode == NULL)
2052 goto error;
2053
2054 if (!(inner_mode->flags & XFRM_MODE_FLAG_TUNNEL)) {
2055 xfrm_put_mode(inner_mode);
2056 goto error;
2057 }
2058
2059 inner_mode_iaf = xfrm_get_mode(x->props.mode, AF_INET6);
2060 if (inner_mode_iaf == NULL)
2061 goto error;
2062
2063 if (!(inner_mode_iaf->flags & XFRM_MODE_FLAG_TUNNEL)) {
2064 xfrm_put_mode(inner_mode_iaf);
2065 goto error;
2066 }
2067
2068 if (x->props.family == AF_INET) {
2069 x->inner_mode = inner_mode;
2070 x->inner_mode_iaf = inner_mode_iaf;
2071 } else {
2072 x->inner_mode = inner_mode_iaf;
2073 x->inner_mode_iaf = inner_mode;
2074 }
2075 }
2076
2077 x->type = xfrm_get_type(x->id.proto, family);
2078 if (x->type == NULL)
2079 goto error;
2080
2081 err = x->type->init_state(x);
2082 if (err)
2083 goto error;
2084
2085 x->outer_mode = xfrm_get_mode(x->props.mode, family);
2086 if (x->outer_mode == NULL)
2087 goto error;
2088
2089 x->km.state = XFRM_STATE_VALID;
2090
2091 error:
2092 return err;
2093 }
2094
2095 EXPORT_SYMBOL(xfrm_init_state);
2096
2097 void __init xfrm_state_init(void)
2098 {
2099 unsigned int sz;
2100
2101 sz = sizeof(struct hlist_head) * 8;
2102
2103 xfrm_state_bydst = xfrm_hash_alloc(sz);
2104 xfrm_state_bysrc = xfrm_hash_alloc(sz);
2105 xfrm_state_byspi = xfrm_hash_alloc(sz);
2106 if (!xfrm_state_bydst || !xfrm_state_bysrc || !xfrm_state_byspi)
2107 panic("XFRM: Cannot allocate bydst/bysrc/byspi hashes.");
2108 xfrm_state_hmask = ((sz / sizeof(struct hlist_head)) - 1);
2109
2110 INIT_WORK(&xfrm_state_gc_work, xfrm_state_gc_task);
2111 }
2112
2113 #ifdef CONFIG_AUDITSYSCALL
2114 static void xfrm_audit_helper_sainfo(struct xfrm_state *x,
2115 struct audit_buffer *audit_buf)
2116 {
2117 struct xfrm_sec_ctx *ctx = x->security;
2118 u32 spi = ntohl(x->id.spi);
2119
2120 if (ctx)
2121 audit_log_format(audit_buf, " sec_alg=%u sec_doi=%u sec_obj=%s",
2122 ctx->ctx_alg, ctx->ctx_doi, ctx->ctx_str);
2123
2124 switch(x->props.family) {
2125 case AF_INET:
2126 audit_log_format(audit_buf,
2127 " src=" NIPQUAD_FMT " dst=" NIPQUAD_FMT,
2128 NIPQUAD(x->props.saddr.a4),
2129 NIPQUAD(x->id.daddr.a4));
2130 break;
2131 case AF_INET6:
2132 audit_log_format(audit_buf,
2133 " src=" NIP6_FMT " dst=" NIP6_FMT,
2134 NIP6(*(struct in6_addr *)x->props.saddr.a6),
2135 NIP6(*(struct in6_addr *)x->id.daddr.a6));
2136 break;
2137 }
2138
2139 audit_log_format(audit_buf, " spi=%u(0x%x)", spi, spi);
2140 }
2141
2142 static void xfrm_audit_helper_pktinfo(struct sk_buff *skb, u16 family,
2143 struct audit_buffer *audit_buf)
2144 {
2145 struct iphdr *iph4;
2146 struct ipv6hdr *iph6;
2147
2148 switch (family) {
2149 case AF_INET:
2150 iph4 = ip_hdr(skb);
2151 audit_log_format(audit_buf,
2152 " src=" NIPQUAD_FMT " dst=" NIPQUAD_FMT,
2153 NIPQUAD(iph4->saddr),
2154 NIPQUAD(iph4->daddr));
2155 break;
2156 case AF_INET6:
2157 iph6 = ipv6_hdr(skb);
2158 audit_log_format(audit_buf,
2159 " src=" NIP6_FMT " dst=" NIP6_FMT
2160 " flowlbl=0x%x%02x%02x",
2161 NIP6(iph6->saddr),
2162 NIP6(iph6->daddr),
2163 iph6->flow_lbl[0] & 0x0f,
2164 iph6->flow_lbl[1],
2165 iph6->flow_lbl[2]);
2166 break;
2167 }
2168 }
2169
2170 void xfrm_audit_state_add(struct xfrm_state *x, int result,
2171 uid_t auid, u32 sessionid, u32 secid)
2172 {
2173 struct audit_buffer *audit_buf;
2174
2175 audit_buf = xfrm_audit_start("SAD-add");
2176 if (audit_buf == NULL)
2177 return;
2178 xfrm_audit_helper_usrinfo(auid, sessionid, secid, audit_buf);
2179 xfrm_audit_helper_sainfo(x, audit_buf);
2180 audit_log_format(audit_buf, " res=%u", result);
2181 audit_log_end(audit_buf);
2182 }
2183 EXPORT_SYMBOL_GPL(xfrm_audit_state_add);
2184
2185 void xfrm_audit_state_delete(struct xfrm_state *x, int result,
2186 uid_t auid, u32 sessionid, u32 secid)
2187 {
2188 struct audit_buffer *audit_buf;
2189
2190 audit_buf = xfrm_audit_start("SAD-delete");
2191 if (audit_buf == NULL)
2192 return;
2193 xfrm_audit_helper_usrinfo(auid, sessionid, secid, audit_buf);
2194 xfrm_audit_helper_sainfo(x, audit_buf);
2195 audit_log_format(audit_buf, " res=%u", result);
2196 audit_log_end(audit_buf);
2197 }
2198 EXPORT_SYMBOL_GPL(xfrm_audit_state_delete);
2199
2200 void xfrm_audit_state_replay_overflow(struct xfrm_state *x,
2201 struct sk_buff *skb)
2202 {
2203 struct audit_buffer *audit_buf;
2204 u32 spi;
2205
2206 audit_buf = xfrm_audit_start("SA-replay-overflow");
2207 if (audit_buf == NULL)
2208 return;
2209 xfrm_audit_helper_pktinfo(skb, x->props.family, audit_buf);
2210 /* don't record the sequence number because it's inherent in this kind
2211 * of audit message */
2212 spi = ntohl(x->id.spi);
2213 audit_log_format(audit_buf, " spi=%u(0x%x)", spi, spi);
2214 audit_log_end(audit_buf);
2215 }
2216 EXPORT_SYMBOL_GPL(xfrm_audit_state_replay_overflow);
2217
2218 static void xfrm_audit_state_replay(struct xfrm_state *x,
2219 struct sk_buff *skb, __be32 net_seq)
2220 {
2221 struct audit_buffer *audit_buf;
2222 u32 spi;
2223
2224 audit_buf = xfrm_audit_start("SA-replayed-pkt");
2225 if (audit_buf == NULL)
2226 return;
2227 xfrm_audit_helper_pktinfo(skb, x->props.family, audit_buf);
2228 spi = ntohl(x->id.spi);
2229 audit_log_format(audit_buf, " spi=%u(0x%x) seqno=%u",
2230 spi, spi, ntohl(net_seq));
2231 audit_log_end(audit_buf);
2232 }
2233
2234 void xfrm_audit_state_notfound_simple(struct sk_buff *skb, u16 family)
2235 {
2236 struct audit_buffer *audit_buf;
2237
2238 audit_buf = xfrm_audit_start("SA-notfound");
2239 if (audit_buf == NULL)
2240 return;
2241 xfrm_audit_helper_pktinfo(skb, family, audit_buf);
2242 audit_log_end(audit_buf);
2243 }
2244 EXPORT_SYMBOL_GPL(xfrm_audit_state_notfound_simple);
2245
2246 void xfrm_audit_state_notfound(struct sk_buff *skb, u16 family,
2247 __be32 net_spi, __be32 net_seq)
2248 {
2249 struct audit_buffer *audit_buf;
2250 u32 spi;
2251
2252 audit_buf = xfrm_audit_start("SA-notfound");
2253 if (audit_buf == NULL)
2254 return;
2255 xfrm_audit_helper_pktinfo(skb, family, audit_buf);
2256 spi = ntohl(net_spi);
2257 audit_log_format(audit_buf, " spi=%u(0x%x) seqno=%u",
2258 spi, spi, ntohl(net_seq));
2259 audit_log_end(audit_buf);
2260 }
2261 EXPORT_SYMBOL_GPL(xfrm_audit_state_notfound);
2262
2263 void xfrm_audit_state_icvfail(struct xfrm_state *x,
2264 struct sk_buff *skb, u8 proto)
2265 {
2266 struct audit_buffer *audit_buf;
2267 __be32 net_spi;
2268 __be32 net_seq;
2269
2270 audit_buf = xfrm_audit_start("SA-icv-failure");
2271 if (audit_buf == NULL)
2272 return;
2273 xfrm_audit_helper_pktinfo(skb, x->props.family, audit_buf);
2274 if (xfrm_parse_spi(skb, proto, &net_spi, &net_seq) == 0) {
2275 u32 spi = ntohl(net_spi);
2276 audit_log_format(audit_buf, " spi=%u(0x%x) seqno=%u",
2277 spi, spi, ntohl(net_seq));
2278 }
2279 audit_log_end(audit_buf);
2280 }
2281 EXPORT_SYMBOL_GPL(xfrm_audit_state_icvfail);
2282 #endif /* CONFIG_AUDITSYSCALL */
This page took 0.083728 seconds and 4 git commands to generate.