flow: virtualize flow cache entry methods
[deliverable/linux.git] / net / xfrm / xfrm_policy.c
1 /*
2 * xfrm_policy.c
3 *
4 * Changes:
5 * Mitsuru KANDA @USAGI
6 * Kazunori MIYAZAWA @USAGI
7 * Kunihiro Ishiguro <kunihiro@ipinfusion.com>
8 * IPv6 support
9 * Kazunori MIYAZAWA @USAGI
10 * YOSHIFUJI Hideaki
11 * Split up af-specific portion
12 * Derek Atkins <derek@ihtfp.com> Add the post_input processor
13 *
14 */
15
16 #include <linux/err.h>
17 #include <linux/slab.h>
18 #include <linux/kmod.h>
19 #include <linux/list.h>
20 #include <linux/spinlock.h>
21 #include <linux/workqueue.h>
22 #include <linux/notifier.h>
23 #include <linux/netdevice.h>
24 #include <linux/netfilter.h>
25 #include <linux/module.h>
26 #include <linux/cache.h>
27 #include <linux/audit.h>
28 #include <net/dst.h>
29 #include <net/xfrm.h>
30 #include <net/ip.h>
31 #ifdef CONFIG_XFRM_STATISTICS
32 #include <net/snmp.h>
33 #endif
34
35 #include "xfrm_hash.h"
36
37 DEFINE_MUTEX(xfrm_cfg_mutex);
38 EXPORT_SYMBOL(xfrm_cfg_mutex);
39
40 static DEFINE_RWLOCK(xfrm_policy_lock);
41
42 static DEFINE_RWLOCK(xfrm_policy_afinfo_lock);
43 static struct xfrm_policy_afinfo *xfrm_policy_afinfo[NPROTO];
44
45 static struct kmem_cache *xfrm_dst_cache __read_mostly;
46
47 static HLIST_HEAD(xfrm_policy_gc_list);
48 static DEFINE_SPINLOCK(xfrm_policy_gc_lock);
49
50 static struct xfrm_policy_afinfo *xfrm_policy_get_afinfo(unsigned short family);
51 static void xfrm_policy_put_afinfo(struct xfrm_policy_afinfo *afinfo);
52 static void xfrm_init_pmtu(struct dst_entry *dst);
53
54 static struct xfrm_policy *__xfrm_policy_unlink(struct xfrm_policy *pol,
55 int dir);
56
57 static inline int
58 __xfrm4_selector_match(struct xfrm_selector *sel, struct flowi *fl)
59 {
60 return addr_match(&fl->fl4_dst, &sel->daddr, sel->prefixlen_d) &&
61 addr_match(&fl->fl4_src, &sel->saddr, sel->prefixlen_s) &&
62 !((xfrm_flowi_dport(fl) ^ sel->dport) & sel->dport_mask) &&
63 !((xfrm_flowi_sport(fl) ^ sel->sport) & sel->sport_mask) &&
64 (fl->proto == sel->proto || !sel->proto) &&
65 (fl->oif == sel->ifindex || !sel->ifindex);
66 }
67
68 static inline int
69 __xfrm6_selector_match(struct xfrm_selector *sel, struct flowi *fl)
70 {
71 return addr_match(&fl->fl6_dst, &sel->daddr, sel->prefixlen_d) &&
72 addr_match(&fl->fl6_src, &sel->saddr, sel->prefixlen_s) &&
73 !((xfrm_flowi_dport(fl) ^ sel->dport) & sel->dport_mask) &&
74 !((xfrm_flowi_sport(fl) ^ sel->sport) & sel->sport_mask) &&
75 (fl->proto == sel->proto || !sel->proto) &&
76 (fl->oif == sel->ifindex || !sel->ifindex);
77 }
78
79 int xfrm_selector_match(struct xfrm_selector *sel, struct flowi *fl,
80 unsigned short family)
81 {
82 switch (family) {
83 case AF_INET:
84 return __xfrm4_selector_match(sel, fl);
85 case AF_INET6:
86 return __xfrm6_selector_match(sel, fl);
87 }
88 return 0;
89 }
90
91 static inline struct dst_entry *__xfrm_dst_lookup(struct net *net, int tos,
92 xfrm_address_t *saddr,
93 xfrm_address_t *daddr,
94 int family)
95 {
96 struct xfrm_policy_afinfo *afinfo;
97 struct dst_entry *dst;
98
99 afinfo = xfrm_policy_get_afinfo(family);
100 if (unlikely(afinfo == NULL))
101 return ERR_PTR(-EAFNOSUPPORT);
102
103 dst = afinfo->dst_lookup(net, tos, saddr, daddr);
104
105 xfrm_policy_put_afinfo(afinfo);
106
107 return dst;
108 }
109
110 static inline struct dst_entry *xfrm_dst_lookup(struct xfrm_state *x, int tos,
111 xfrm_address_t *prev_saddr,
112 xfrm_address_t *prev_daddr,
113 int family)
114 {
115 struct net *net = xs_net(x);
116 xfrm_address_t *saddr = &x->props.saddr;
117 xfrm_address_t *daddr = &x->id.daddr;
118 struct dst_entry *dst;
119
120 if (x->type->flags & XFRM_TYPE_LOCAL_COADDR) {
121 saddr = x->coaddr;
122 daddr = prev_daddr;
123 }
124 if (x->type->flags & XFRM_TYPE_REMOTE_COADDR) {
125 saddr = prev_saddr;
126 daddr = x->coaddr;
127 }
128
129 dst = __xfrm_dst_lookup(net, tos, saddr, daddr, family);
130
131 if (!IS_ERR(dst)) {
132 if (prev_saddr != saddr)
133 memcpy(prev_saddr, saddr, sizeof(*prev_saddr));
134 if (prev_daddr != daddr)
135 memcpy(prev_daddr, daddr, sizeof(*prev_daddr));
136 }
137
138 return dst;
139 }
140
141 static inline unsigned long make_jiffies(long secs)
142 {
143 if (secs >= (MAX_SCHEDULE_TIMEOUT-1)/HZ)
144 return MAX_SCHEDULE_TIMEOUT-1;
145 else
146 return secs*HZ;
147 }
148
149 static void xfrm_policy_timer(unsigned long data)
150 {
151 struct xfrm_policy *xp = (struct xfrm_policy*)data;
152 unsigned long now = get_seconds();
153 long next = LONG_MAX;
154 int warn = 0;
155 int dir;
156
157 read_lock(&xp->lock);
158
159 if (unlikely(xp->walk.dead))
160 goto out;
161
162 dir = xfrm_policy_id2dir(xp->index);
163
164 if (xp->lft.hard_add_expires_seconds) {
165 long tmo = xp->lft.hard_add_expires_seconds +
166 xp->curlft.add_time - now;
167 if (tmo <= 0)
168 goto expired;
169 if (tmo < next)
170 next = tmo;
171 }
172 if (xp->lft.hard_use_expires_seconds) {
173 long tmo = xp->lft.hard_use_expires_seconds +
174 (xp->curlft.use_time ? : xp->curlft.add_time) - now;
175 if (tmo <= 0)
176 goto expired;
177 if (tmo < next)
178 next = tmo;
179 }
180 if (xp->lft.soft_add_expires_seconds) {
181 long tmo = xp->lft.soft_add_expires_seconds +
182 xp->curlft.add_time - now;
183 if (tmo <= 0) {
184 warn = 1;
185 tmo = XFRM_KM_TIMEOUT;
186 }
187 if (tmo < next)
188 next = tmo;
189 }
190 if (xp->lft.soft_use_expires_seconds) {
191 long tmo = xp->lft.soft_use_expires_seconds +
192 (xp->curlft.use_time ? : xp->curlft.add_time) - now;
193 if (tmo <= 0) {
194 warn = 1;
195 tmo = XFRM_KM_TIMEOUT;
196 }
197 if (tmo < next)
198 next = tmo;
199 }
200
201 if (warn)
202 km_policy_expired(xp, dir, 0, 0);
203 if (next != LONG_MAX &&
204 !mod_timer(&xp->timer, jiffies + make_jiffies(next)))
205 xfrm_pol_hold(xp);
206
207 out:
208 read_unlock(&xp->lock);
209 xfrm_pol_put(xp);
210 return;
211
212 expired:
213 read_unlock(&xp->lock);
214 if (!xfrm_policy_delete(xp, dir))
215 km_policy_expired(xp, dir, 1, 0);
216 xfrm_pol_put(xp);
217 }
218
219 static struct flow_cache_object *xfrm_policy_flo_get(struct flow_cache_object *flo)
220 {
221 struct xfrm_policy *pol = container_of(flo, struct xfrm_policy, flo);
222
223 if (unlikely(pol->walk.dead))
224 flo = NULL;
225 else
226 xfrm_pol_hold(pol);
227
228 return flo;
229 }
230
231 static int xfrm_policy_flo_check(struct flow_cache_object *flo)
232 {
233 struct xfrm_policy *pol = container_of(flo, struct xfrm_policy, flo);
234
235 return !pol->walk.dead;
236 }
237
238 static void xfrm_policy_flo_delete(struct flow_cache_object *flo)
239 {
240 xfrm_pol_put(container_of(flo, struct xfrm_policy, flo));
241 }
242
243 static const struct flow_cache_ops xfrm_policy_fc_ops = {
244 .get = xfrm_policy_flo_get,
245 .check = xfrm_policy_flo_check,
246 .delete = xfrm_policy_flo_delete,
247 };
248
249 /* Allocate xfrm_policy. Not used here, it is supposed to be used by pfkeyv2
250 * SPD calls.
251 */
252
253 struct xfrm_policy *xfrm_policy_alloc(struct net *net, gfp_t gfp)
254 {
255 struct xfrm_policy *policy;
256
257 policy = kzalloc(sizeof(struct xfrm_policy), gfp);
258
259 if (policy) {
260 write_pnet(&policy->xp_net, net);
261 INIT_LIST_HEAD(&policy->walk.all);
262 INIT_HLIST_NODE(&policy->bydst);
263 INIT_HLIST_NODE(&policy->byidx);
264 rwlock_init(&policy->lock);
265 atomic_set(&policy->refcnt, 1);
266 setup_timer(&policy->timer, xfrm_policy_timer,
267 (unsigned long)policy);
268 policy->flo.ops = &xfrm_policy_fc_ops;
269 }
270 return policy;
271 }
272 EXPORT_SYMBOL(xfrm_policy_alloc);
273
274 /* Destroy xfrm_policy: descendant resources must be released to this moment. */
275
276 void xfrm_policy_destroy(struct xfrm_policy *policy)
277 {
278 BUG_ON(!policy->walk.dead);
279
280 BUG_ON(policy->bundles);
281
282 if (del_timer(&policy->timer))
283 BUG();
284
285 security_xfrm_policy_free(policy->security);
286 kfree(policy);
287 }
288 EXPORT_SYMBOL(xfrm_policy_destroy);
289
290 static void xfrm_policy_gc_kill(struct xfrm_policy *policy)
291 {
292 struct dst_entry *dst;
293
294 while ((dst = policy->bundles) != NULL) {
295 policy->bundles = dst->next;
296 dst_free(dst);
297 }
298
299 if (del_timer(&policy->timer))
300 atomic_dec(&policy->refcnt);
301
302 xfrm_pol_put(policy);
303 }
304
305 static void xfrm_policy_gc_task(struct work_struct *work)
306 {
307 struct xfrm_policy *policy;
308 struct hlist_node *entry, *tmp;
309 struct hlist_head gc_list;
310
311 spin_lock_bh(&xfrm_policy_gc_lock);
312 gc_list.first = xfrm_policy_gc_list.first;
313 INIT_HLIST_HEAD(&xfrm_policy_gc_list);
314 spin_unlock_bh(&xfrm_policy_gc_lock);
315
316 hlist_for_each_entry_safe(policy, entry, tmp, &gc_list, bydst)
317 xfrm_policy_gc_kill(policy);
318 }
319 static DECLARE_WORK(xfrm_policy_gc_work, xfrm_policy_gc_task);
320
321 /* Rule must be locked. Release descentant resources, announce
322 * entry dead. The rule must be unlinked from lists to the moment.
323 */
324
325 static void xfrm_policy_kill(struct xfrm_policy *policy)
326 {
327 policy->walk.dead = 1;
328
329 spin_lock_bh(&xfrm_policy_gc_lock);
330 hlist_add_head(&policy->bydst, &xfrm_policy_gc_list);
331 spin_unlock_bh(&xfrm_policy_gc_lock);
332
333 schedule_work(&xfrm_policy_gc_work);
334 }
335
336 static unsigned int xfrm_policy_hashmax __read_mostly = 1 * 1024 * 1024;
337
338 static inline unsigned int idx_hash(struct net *net, u32 index)
339 {
340 return __idx_hash(index, net->xfrm.policy_idx_hmask);
341 }
342
343 static struct hlist_head *policy_hash_bysel(struct net *net, struct xfrm_selector *sel, unsigned short family, int dir)
344 {
345 unsigned int hmask = net->xfrm.policy_bydst[dir].hmask;
346 unsigned int hash = __sel_hash(sel, family, hmask);
347
348 return (hash == hmask + 1 ?
349 &net->xfrm.policy_inexact[dir] :
350 net->xfrm.policy_bydst[dir].table + hash);
351 }
352
353 static struct hlist_head *policy_hash_direct(struct net *net, xfrm_address_t *daddr, xfrm_address_t *saddr, unsigned short family, int dir)
354 {
355 unsigned int hmask = net->xfrm.policy_bydst[dir].hmask;
356 unsigned int hash = __addr_hash(daddr, saddr, family, hmask);
357
358 return net->xfrm.policy_bydst[dir].table + hash;
359 }
360
361 static void xfrm_dst_hash_transfer(struct hlist_head *list,
362 struct hlist_head *ndsttable,
363 unsigned int nhashmask)
364 {
365 struct hlist_node *entry, *tmp, *entry0 = NULL;
366 struct xfrm_policy *pol;
367 unsigned int h0 = 0;
368
369 redo:
370 hlist_for_each_entry_safe(pol, entry, tmp, list, bydst) {
371 unsigned int h;
372
373 h = __addr_hash(&pol->selector.daddr, &pol->selector.saddr,
374 pol->family, nhashmask);
375 if (!entry0) {
376 hlist_del(entry);
377 hlist_add_head(&pol->bydst, ndsttable+h);
378 h0 = h;
379 } else {
380 if (h != h0)
381 continue;
382 hlist_del(entry);
383 hlist_add_after(entry0, &pol->bydst);
384 }
385 entry0 = entry;
386 }
387 if (!hlist_empty(list)) {
388 entry0 = NULL;
389 goto redo;
390 }
391 }
392
393 static void xfrm_idx_hash_transfer(struct hlist_head *list,
394 struct hlist_head *nidxtable,
395 unsigned int nhashmask)
396 {
397 struct hlist_node *entry, *tmp;
398 struct xfrm_policy *pol;
399
400 hlist_for_each_entry_safe(pol, entry, tmp, list, byidx) {
401 unsigned int h;
402
403 h = __idx_hash(pol->index, nhashmask);
404 hlist_add_head(&pol->byidx, nidxtable+h);
405 }
406 }
407
408 static unsigned long xfrm_new_hash_mask(unsigned int old_hmask)
409 {
410 return ((old_hmask + 1) << 1) - 1;
411 }
412
413 static void xfrm_bydst_resize(struct net *net, int dir)
414 {
415 unsigned int hmask = net->xfrm.policy_bydst[dir].hmask;
416 unsigned int nhashmask = xfrm_new_hash_mask(hmask);
417 unsigned int nsize = (nhashmask + 1) * sizeof(struct hlist_head);
418 struct hlist_head *odst = net->xfrm.policy_bydst[dir].table;
419 struct hlist_head *ndst = xfrm_hash_alloc(nsize);
420 int i;
421
422 if (!ndst)
423 return;
424
425 write_lock_bh(&xfrm_policy_lock);
426
427 for (i = hmask; i >= 0; i--)
428 xfrm_dst_hash_transfer(odst + i, ndst, nhashmask);
429
430 net->xfrm.policy_bydst[dir].table = ndst;
431 net->xfrm.policy_bydst[dir].hmask = nhashmask;
432
433 write_unlock_bh(&xfrm_policy_lock);
434
435 xfrm_hash_free(odst, (hmask + 1) * sizeof(struct hlist_head));
436 }
437
438 static void xfrm_byidx_resize(struct net *net, int total)
439 {
440 unsigned int hmask = net->xfrm.policy_idx_hmask;
441 unsigned int nhashmask = xfrm_new_hash_mask(hmask);
442 unsigned int nsize = (nhashmask + 1) * sizeof(struct hlist_head);
443 struct hlist_head *oidx = net->xfrm.policy_byidx;
444 struct hlist_head *nidx = xfrm_hash_alloc(nsize);
445 int i;
446
447 if (!nidx)
448 return;
449
450 write_lock_bh(&xfrm_policy_lock);
451
452 for (i = hmask; i >= 0; i--)
453 xfrm_idx_hash_transfer(oidx + i, nidx, nhashmask);
454
455 net->xfrm.policy_byidx = nidx;
456 net->xfrm.policy_idx_hmask = nhashmask;
457
458 write_unlock_bh(&xfrm_policy_lock);
459
460 xfrm_hash_free(oidx, (hmask + 1) * sizeof(struct hlist_head));
461 }
462
463 static inline int xfrm_bydst_should_resize(struct net *net, int dir, int *total)
464 {
465 unsigned int cnt = net->xfrm.policy_count[dir];
466 unsigned int hmask = net->xfrm.policy_bydst[dir].hmask;
467
468 if (total)
469 *total += cnt;
470
471 if ((hmask + 1) < xfrm_policy_hashmax &&
472 cnt > hmask)
473 return 1;
474
475 return 0;
476 }
477
478 static inline int xfrm_byidx_should_resize(struct net *net, int total)
479 {
480 unsigned int hmask = net->xfrm.policy_idx_hmask;
481
482 if ((hmask + 1) < xfrm_policy_hashmax &&
483 total > hmask)
484 return 1;
485
486 return 0;
487 }
488
489 void xfrm_spd_getinfo(struct net *net, struct xfrmk_spdinfo *si)
490 {
491 read_lock_bh(&xfrm_policy_lock);
492 si->incnt = net->xfrm.policy_count[XFRM_POLICY_IN];
493 si->outcnt = net->xfrm.policy_count[XFRM_POLICY_OUT];
494 si->fwdcnt = net->xfrm.policy_count[XFRM_POLICY_FWD];
495 si->inscnt = net->xfrm.policy_count[XFRM_POLICY_IN+XFRM_POLICY_MAX];
496 si->outscnt = net->xfrm.policy_count[XFRM_POLICY_OUT+XFRM_POLICY_MAX];
497 si->fwdscnt = net->xfrm.policy_count[XFRM_POLICY_FWD+XFRM_POLICY_MAX];
498 si->spdhcnt = net->xfrm.policy_idx_hmask;
499 si->spdhmcnt = xfrm_policy_hashmax;
500 read_unlock_bh(&xfrm_policy_lock);
501 }
502 EXPORT_SYMBOL(xfrm_spd_getinfo);
503
504 static DEFINE_MUTEX(hash_resize_mutex);
505 static void xfrm_hash_resize(struct work_struct *work)
506 {
507 struct net *net = container_of(work, struct net, xfrm.policy_hash_work);
508 int dir, total;
509
510 mutex_lock(&hash_resize_mutex);
511
512 total = 0;
513 for (dir = 0; dir < XFRM_POLICY_MAX * 2; dir++) {
514 if (xfrm_bydst_should_resize(net, dir, &total))
515 xfrm_bydst_resize(net, dir);
516 }
517 if (xfrm_byidx_should_resize(net, total))
518 xfrm_byidx_resize(net, total);
519
520 mutex_unlock(&hash_resize_mutex);
521 }
522
523 /* Generate new index... KAME seems to generate them ordered by cost
524 * of an absolute inpredictability of ordering of rules. This will not pass. */
525 static u32 xfrm_gen_index(struct net *net, int dir)
526 {
527 static u32 idx_generator;
528
529 for (;;) {
530 struct hlist_node *entry;
531 struct hlist_head *list;
532 struct xfrm_policy *p;
533 u32 idx;
534 int found;
535
536 idx = (idx_generator | dir);
537 idx_generator += 8;
538 if (idx == 0)
539 idx = 8;
540 list = net->xfrm.policy_byidx + idx_hash(net, idx);
541 found = 0;
542 hlist_for_each_entry(p, entry, list, byidx) {
543 if (p->index == idx) {
544 found = 1;
545 break;
546 }
547 }
548 if (!found)
549 return idx;
550 }
551 }
552
553 static inline int selector_cmp(struct xfrm_selector *s1, struct xfrm_selector *s2)
554 {
555 u32 *p1 = (u32 *) s1;
556 u32 *p2 = (u32 *) s2;
557 int len = sizeof(struct xfrm_selector) / sizeof(u32);
558 int i;
559
560 for (i = 0; i < len; i++) {
561 if (p1[i] != p2[i])
562 return 1;
563 }
564
565 return 0;
566 }
567
568 int xfrm_policy_insert(int dir, struct xfrm_policy *policy, int excl)
569 {
570 struct net *net = xp_net(policy);
571 struct xfrm_policy *pol;
572 struct xfrm_policy *delpol;
573 struct hlist_head *chain;
574 struct hlist_node *entry, *newpos;
575 struct dst_entry *gc_list;
576 u32 mark = policy->mark.v & policy->mark.m;
577
578 write_lock_bh(&xfrm_policy_lock);
579 chain = policy_hash_bysel(net, &policy->selector, policy->family, dir);
580 delpol = NULL;
581 newpos = NULL;
582 hlist_for_each_entry(pol, entry, chain, bydst) {
583 if (pol->type == policy->type &&
584 !selector_cmp(&pol->selector, &policy->selector) &&
585 (mark & pol->mark.m) == pol->mark.v &&
586 xfrm_sec_ctx_match(pol->security, policy->security) &&
587 !WARN_ON(delpol)) {
588 if (excl) {
589 write_unlock_bh(&xfrm_policy_lock);
590 return -EEXIST;
591 }
592 delpol = pol;
593 if (policy->priority > pol->priority)
594 continue;
595 } else if (policy->priority >= pol->priority) {
596 newpos = &pol->bydst;
597 continue;
598 }
599 if (delpol)
600 break;
601 }
602 if (newpos)
603 hlist_add_after(newpos, &policy->bydst);
604 else
605 hlist_add_head(&policy->bydst, chain);
606 xfrm_pol_hold(policy);
607 net->xfrm.policy_count[dir]++;
608 atomic_inc(&flow_cache_genid);
609 if (delpol)
610 __xfrm_policy_unlink(delpol, dir);
611 policy->index = delpol ? delpol->index : xfrm_gen_index(net, dir);
612 hlist_add_head(&policy->byidx, net->xfrm.policy_byidx+idx_hash(net, policy->index));
613 policy->curlft.add_time = get_seconds();
614 policy->curlft.use_time = 0;
615 if (!mod_timer(&policy->timer, jiffies + HZ))
616 xfrm_pol_hold(policy);
617 list_add(&policy->walk.all, &net->xfrm.policy_all);
618 write_unlock_bh(&xfrm_policy_lock);
619
620 if (delpol)
621 xfrm_policy_kill(delpol);
622 else if (xfrm_bydst_should_resize(net, dir, NULL))
623 schedule_work(&net->xfrm.policy_hash_work);
624
625 read_lock_bh(&xfrm_policy_lock);
626 gc_list = NULL;
627 entry = &policy->bydst;
628 hlist_for_each_entry_continue(policy, entry, bydst) {
629 struct dst_entry *dst;
630
631 write_lock(&policy->lock);
632 dst = policy->bundles;
633 if (dst) {
634 struct dst_entry *tail = dst;
635 while (tail->next)
636 tail = tail->next;
637 tail->next = gc_list;
638 gc_list = dst;
639
640 policy->bundles = NULL;
641 }
642 write_unlock(&policy->lock);
643 }
644 read_unlock_bh(&xfrm_policy_lock);
645
646 while (gc_list) {
647 struct dst_entry *dst = gc_list;
648
649 gc_list = dst->next;
650 dst_free(dst);
651 }
652
653 return 0;
654 }
655 EXPORT_SYMBOL(xfrm_policy_insert);
656
657 struct xfrm_policy *xfrm_policy_bysel_ctx(struct net *net, u32 mark, u8 type,
658 int dir, struct xfrm_selector *sel,
659 struct xfrm_sec_ctx *ctx, int delete,
660 int *err)
661 {
662 struct xfrm_policy *pol, *ret;
663 struct hlist_head *chain;
664 struct hlist_node *entry;
665
666 *err = 0;
667 write_lock_bh(&xfrm_policy_lock);
668 chain = policy_hash_bysel(net, sel, sel->family, dir);
669 ret = NULL;
670 hlist_for_each_entry(pol, entry, chain, bydst) {
671 if (pol->type == type &&
672 (mark & pol->mark.m) == pol->mark.v &&
673 !selector_cmp(sel, &pol->selector) &&
674 xfrm_sec_ctx_match(ctx, pol->security)) {
675 xfrm_pol_hold(pol);
676 if (delete) {
677 *err = security_xfrm_policy_delete(
678 pol->security);
679 if (*err) {
680 write_unlock_bh(&xfrm_policy_lock);
681 return pol;
682 }
683 __xfrm_policy_unlink(pol, dir);
684 }
685 ret = pol;
686 break;
687 }
688 }
689 write_unlock_bh(&xfrm_policy_lock);
690
691 if (ret && delete)
692 xfrm_policy_kill(ret);
693 return ret;
694 }
695 EXPORT_SYMBOL(xfrm_policy_bysel_ctx);
696
697 struct xfrm_policy *xfrm_policy_byid(struct net *net, u32 mark, u8 type,
698 int dir, u32 id, int delete, int *err)
699 {
700 struct xfrm_policy *pol, *ret;
701 struct hlist_head *chain;
702 struct hlist_node *entry;
703
704 *err = -ENOENT;
705 if (xfrm_policy_id2dir(id) != dir)
706 return NULL;
707
708 *err = 0;
709 write_lock_bh(&xfrm_policy_lock);
710 chain = net->xfrm.policy_byidx + idx_hash(net, id);
711 ret = NULL;
712 hlist_for_each_entry(pol, entry, chain, byidx) {
713 if (pol->type == type && pol->index == id &&
714 (mark & pol->mark.m) == pol->mark.v) {
715 xfrm_pol_hold(pol);
716 if (delete) {
717 *err = security_xfrm_policy_delete(
718 pol->security);
719 if (*err) {
720 write_unlock_bh(&xfrm_policy_lock);
721 return pol;
722 }
723 __xfrm_policy_unlink(pol, dir);
724 }
725 ret = pol;
726 break;
727 }
728 }
729 write_unlock_bh(&xfrm_policy_lock);
730
731 if (ret && delete)
732 xfrm_policy_kill(ret);
733 return ret;
734 }
735 EXPORT_SYMBOL(xfrm_policy_byid);
736
737 #ifdef CONFIG_SECURITY_NETWORK_XFRM
738 static inline int
739 xfrm_policy_flush_secctx_check(struct net *net, u8 type, struct xfrm_audit *audit_info)
740 {
741 int dir, err = 0;
742
743 for (dir = 0; dir < XFRM_POLICY_MAX; dir++) {
744 struct xfrm_policy *pol;
745 struct hlist_node *entry;
746 int i;
747
748 hlist_for_each_entry(pol, entry,
749 &net->xfrm.policy_inexact[dir], bydst) {
750 if (pol->type != type)
751 continue;
752 err = security_xfrm_policy_delete(pol->security);
753 if (err) {
754 xfrm_audit_policy_delete(pol, 0,
755 audit_info->loginuid,
756 audit_info->sessionid,
757 audit_info->secid);
758 return err;
759 }
760 }
761 for (i = net->xfrm.policy_bydst[dir].hmask; i >= 0; i--) {
762 hlist_for_each_entry(pol, entry,
763 net->xfrm.policy_bydst[dir].table + i,
764 bydst) {
765 if (pol->type != type)
766 continue;
767 err = security_xfrm_policy_delete(
768 pol->security);
769 if (err) {
770 xfrm_audit_policy_delete(pol, 0,
771 audit_info->loginuid,
772 audit_info->sessionid,
773 audit_info->secid);
774 return err;
775 }
776 }
777 }
778 }
779 return err;
780 }
781 #else
782 static inline int
783 xfrm_policy_flush_secctx_check(struct net *net, u8 type, struct xfrm_audit *audit_info)
784 {
785 return 0;
786 }
787 #endif
788
789 int xfrm_policy_flush(struct net *net, u8 type, struct xfrm_audit *audit_info)
790 {
791 int dir, err = 0, cnt = 0;
792
793 write_lock_bh(&xfrm_policy_lock);
794
795 err = xfrm_policy_flush_secctx_check(net, type, audit_info);
796 if (err)
797 goto out;
798
799 for (dir = 0; dir < XFRM_POLICY_MAX; dir++) {
800 struct xfrm_policy *pol;
801 struct hlist_node *entry;
802 int i;
803
804 again1:
805 hlist_for_each_entry(pol, entry,
806 &net->xfrm.policy_inexact[dir], bydst) {
807 if (pol->type != type)
808 continue;
809 __xfrm_policy_unlink(pol, dir);
810 write_unlock_bh(&xfrm_policy_lock);
811 cnt++;
812
813 xfrm_audit_policy_delete(pol, 1, audit_info->loginuid,
814 audit_info->sessionid,
815 audit_info->secid);
816
817 xfrm_policy_kill(pol);
818
819 write_lock_bh(&xfrm_policy_lock);
820 goto again1;
821 }
822
823 for (i = net->xfrm.policy_bydst[dir].hmask; i >= 0; i--) {
824 again2:
825 hlist_for_each_entry(pol, entry,
826 net->xfrm.policy_bydst[dir].table + i,
827 bydst) {
828 if (pol->type != type)
829 continue;
830 __xfrm_policy_unlink(pol, dir);
831 write_unlock_bh(&xfrm_policy_lock);
832 cnt++;
833
834 xfrm_audit_policy_delete(pol, 1,
835 audit_info->loginuid,
836 audit_info->sessionid,
837 audit_info->secid);
838 xfrm_policy_kill(pol);
839
840 write_lock_bh(&xfrm_policy_lock);
841 goto again2;
842 }
843 }
844
845 }
846 if (!cnt)
847 err = -ESRCH;
848 out:
849 write_unlock_bh(&xfrm_policy_lock);
850 return err;
851 }
852 EXPORT_SYMBOL(xfrm_policy_flush);
853
854 int xfrm_policy_walk(struct net *net, struct xfrm_policy_walk *walk,
855 int (*func)(struct xfrm_policy *, int, int, void*),
856 void *data)
857 {
858 struct xfrm_policy *pol;
859 struct xfrm_policy_walk_entry *x;
860 int error = 0;
861
862 if (walk->type >= XFRM_POLICY_TYPE_MAX &&
863 walk->type != XFRM_POLICY_TYPE_ANY)
864 return -EINVAL;
865
866 if (list_empty(&walk->walk.all) && walk->seq != 0)
867 return 0;
868
869 write_lock_bh(&xfrm_policy_lock);
870 if (list_empty(&walk->walk.all))
871 x = list_first_entry(&net->xfrm.policy_all, struct xfrm_policy_walk_entry, all);
872 else
873 x = list_entry(&walk->walk.all, struct xfrm_policy_walk_entry, all);
874 list_for_each_entry_from(x, &net->xfrm.policy_all, all) {
875 if (x->dead)
876 continue;
877 pol = container_of(x, struct xfrm_policy, walk);
878 if (walk->type != XFRM_POLICY_TYPE_ANY &&
879 walk->type != pol->type)
880 continue;
881 error = func(pol, xfrm_policy_id2dir(pol->index),
882 walk->seq, data);
883 if (error) {
884 list_move_tail(&walk->walk.all, &x->all);
885 goto out;
886 }
887 walk->seq++;
888 }
889 if (walk->seq == 0) {
890 error = -ENOENT;
891 goto out;
892 }
893 list_del_init(&walk->walk.all);
894 out:
895 write_unlock_bh(&xfrm_policy_lock);
896 return error;
897 }
898 EXPORT_SYMBOL(xfrm_policy_walk);
899
900 void xfrm_policy_walk_init(struct xfrm_policy_walk *walk, u8 type)
901 {
902 INIT_LIST_HEAD(&walk->walk.all);
903 walk->walk.dead = 1;
904 walk->type = type;
905 walk->seq = 0;
906 }
907 EXPORT_SYMBOL(xfrm_policy_walk_init);
908
909 void xfrm_policy_walk_done(struct xfrm_policy_walk *walk)
910 {
911 if (list_empty(&walk->walk.all))
912 return;
913
914 write_lock_bh(&xfrm_policy_lock);
915 list_del(&walk->walk.all);
916 write_unlock_bh(&xfrm_policy_lock);
917 }
918 EXPORT_SYMBOL(xfrm_policy_walk_done);
919
920 /*
921 * Find policy to apply to this flow.
922 *
923 * Returns 0 if policy found, else an -errno.
924 */
925 static int xfrm_policy_match(struct xfrm_policy *pol, struct flowi *fl,
926 u8 type, u16 family, int dir)
927 {
928 struct xfrm_selector *sel = &pol->selector;
929 int match, ret = -ESRCH;
930
931 if (pol->family != family ||
932 (fl->mark & pol->mark.m) != pol->mark.v ||
933 pol->type != type)
934 return ret;
935
936 match = xfrm_selector_match(sel, fl, family);
937 if (match)
938 ret = security_xfrm_policy_lookup(pol->security, fl->secid,
939 dir);
940
941 return ret;
942 }
943
944 static struct xfrm_policy *xfrm_policy_lookup_bytype(struct net *net, u8 type,
945 struct flowi *fl,
946 u16 family, u8 dir)
947 {
948 int err;
949 struct xfrm_policy *pol, *ret;
950 xfrm_address_t *daddr, *saddr;
951 struct hlist_node *entry;
952 struct hlist_head *chain;
953 u32 priority = ~0U;
954
955 daddr = xfrm_flowi_daddr(fl, family);
956 saddr = xfrm_flowi_saddr(fl, family);
957 if (unlikely(!daddr || !saddr))
958 return NULL;
959
960 read_lock_bh(&xfrm_policy_lock);
961 chain = policy_hash_direct(net, daddr, saddr, family, dir);
962 ret = NULL;
963 hlist_for_each_entry(pol, entry, chain, bydst) {
964 err = xfrm_policy_match(pol, fl, type, family, dir);
965 if (err) {
966 if (err == -ESRCH)
967 continue;
968 else {
969 ret = ERR_PTR(err);
970 goto fail;
971 }
972 } else {
973 ret = pol;
974 priority = ret->priority;
975 break;
976 }
977 }
978 chain = &net->xfrm.policy_inexact[dir];
979 hlist_for_each_entry(pol, entry, chain, bydst) {
980 err = xfrm_policy_match(pol, fl, type, family, dir);
981 if (err) {
982 if (err == -ESRCH)
983 continue;
984 else {
985 ret = ERR_PTR(err);
986 goto fail;
987 }
988 } else if (pol->priority < priority) {
989 ret = pol;
990 break;
991 }
992 }
993 if (ret)
994 xfrm_pol_hold(ret);
995 fail:
996 read_unlock_bh(&xfrm_policy_lock);
997
998 return ret;
999 }
1000
1001 static struct flow_cache_object *
1002 xfrm_policy_lookup(struct net *net, struct flowi *fl, u16 family,
1003 u8 dir, struct flow_cache_object *old_obj, void *ctx)
1004 {
1005 struct xfrm_policy *pol;
1006
1007 if (old_obj)
1008 xfrm_pol_put(container_of(old_obj, struct xfrm_policy, flo));
1009
1010 #ifdef CONFIG_XFRM_SUB_POLICY
1011 pol = xfrm_policy_lookup_bytype(net, XFRM_POLICY_TYPE_SUB, fl, family, dir);
1012 if (IS_ERR(pol))
1013 return ERR_CAST(pol);
1014 if (pol)
1015 goto found;
1016 #endif
1017 pol = xfrm_policy_lookup_bytype(net, XFRM_POLICY_TYPE_MAIN, fl, family, dir);
1018 if (IS_ERR(pol))
1019 return ERR_CAST(pol);
1020 if (pol)
1021 goto found;
1022 return NULL;
1023
1024 found:
1025 /* Resolver returns two references:
1026 * one for cache and one for caller of flow_cache_lookup() */
1027 xfrm_pol_hold(pol);
1028
1029 return &pol->flo;
1030 }
1031
1032 static inline int policy_to_flow_dir(int dir)
1033 {
1034 if (XFRM_POLICY_IN == FLOW_DIR_IN &&
1035 XFRM_POLICY_OUT == FLOW_DIR_OUT &&
1036 XFRM_POLICY_FWD == FLOW_DIR_FWD)
1037 return dir;
1038 switch (dir) {
1039 default:
1040 case XFRM_POLICY_IN:
1041 return FLOW_DIR_IN;
1042 case XFRM_POLICY_OUT:
1043 return FLOW_DIR_OUT;
1044 case XFRM_POLICY_FWD:
1045 return FLOW_DIR_FWD;
1046 }
1047 }
1048
1049 static struct xfrm_policy *xfrm_sk_policy_lookup(struct sock *sk, int dir, struct flowi *fl)
1050 {
1051 struct xfrm_policy *pol;
1052
1053 read_lock_bh(&xfrm_policy_lock);
1054 if ((pol = sk->sk_policy[dir]) != NULL) {
1055 int match = xfrm_selector_match(&pol->selector, fl,
1056 sk->sk_family);
1057 int err = 0;
1058
1059 if (match) {
1060 if ((sk->sk_mark & pol->mark.m) != pol->mark.v) {
1061 pol = NULL;
1062 goto out;
1063 }
1064 err = security_xfrm_policy_lookup(pol->security,
1065 fl->secid,
1066 policy_to_flow_dir(dir));
1067 if (!err)
1068 xfrm_pol_hold(pol);
1069 else if (err == -ESRCH)
1070 pol = NULL;
1071 else
1072 pol = ERR_PTR(err);
1073 } else
1074 pol = NULL;
1075 }
1076 out:
1077 read_unlock_bh(&xfrm_policy_lock);
1078 return pol;
1079 }
1080
1081 static void __xfrm_policy_link(struct xfrm_policy *pol, int dir)
1082 {
1083 struct net *net = xp_net(pol);
1084 struct hlist_head *chain = policy_hash_bysel(net, &pol->selector,
1085 pol->family, dir);
1086
1087 list_add(&pol->walk.all, &net->xfrm.policy_all);
1088 hlist_add_head(&pol->bydst, chain);
1089 hlist_add_head(&pol->byidx, net->xfrm.policy_byidx+idx_hash(net, pol->index));
1090 net->xfrm.policy_count[dir]++;
1091 xfrm_pol_hold(pol);
1092
1093 if (xfrm_bydst_should_resize(net, dir, NULL))
1094 schedule_work(&net->xfrm.policy_hash_work);
1095 }
1096
1097 static struct xfrm_policy *__xfrm_policy_unlink(struct xfrm_policy *pol,
1098 int dir)
1099 {
1100 struct net *net = xp_net(pol);
1101
1102 if (hlist_unhashed(&pol->bydst))
1103 return NULL;
1104
1105 hlist_del(&pol->bydst);
1106 hlist_del(&pol->byidx);
1107 list_del(&pol->walk.all);
1108 net->xfrm.policy_count[dir]--;
1109
1110 return pol;
1111 }
1112
1113 int xfrm_policy_delete(struct xfrm_policy *pol, int dir)
1114 {
1115 write_lock_bh(&xfrm_policy_lock);
1116 pol = __xfrm_policy_unlink(pol, dir);
1117 write_unlock_bh(&xfrm_policy_lock);
1118 if (pol) {
1119 xfrm_policy_kill(pol);
1120 return 0;
1121 }
1122 return -ENOENT;
1123 }
1124 EXPORT_SYMBOL(xfrm_policy_delete);
1125
1126 int xfrm_sk_policy_insert(struct sock *sk, int dir, struct xfrm_policy *pol)
1127 {
1128 struct net *net = xp_net(pol);
1129 struct xfrm_policy *old_pol;
1130
1131 #ifdef CONFIG_XFRM_SUB_POLICY
1132 if (pol && pol->type != XFRM_POLICY_TYPE_MAIN)
1133 return -EINVAL;
1134 #endif
1135
1136 write_lock_bh(&xfrm_policy_lock);
1137 old_pol = sk->sk_policy[dir];
1138 sk->sk_policy[dir] = pol;
1139 if (pol) {
1140 pol->curlft.add_time = get_seconds();
1141 pol->index = xfrm_gen_index(net, XFRM_POLICY_MAX+dir);
1142 __xfrm_policy_link(pol, XFRM_POLICY_MAX+dir);
1143 }
1144 if (old_pol)
1145 /* Unlinking succeeds always. This is the only function
1146 * allowed to delete or replace socket policy.
1147 */
1148 __xfrm_policy_unlink(old_pol, XFRM_POLICY_MAX+dir);
1149 write_unlock_bh(&xfrm_policy_lock);
1150
1151 if (old_pol) {
1152 xfrm_policy_kill(old_pol);
1153 }
1154 return 0;
1155 }
1156
1157 static struct xfrm_policy *clone_policy(struct xfrm_policy *old, int dir)
1158 {
1159 struct xfrm_policy *newp = xfrm_policy_alloc(xp_net(old), GFP_ATOMIC);
1160
1161 if (newp) {
1162 newp->selector = old->selector;
1163 if (security_xfrm_policy_clone(old->security,
1164 &newp->security)) {
1165 kfree(newp);
1166 return NULL; /* ENOMEM */
1167 }
1168 newp->lft = old->lft;
1169 newp->curlft = old->curlft;
1170 newp->mark = old->mark;
1171 newp->action = old->action;
1172 newp->flags = old->flags;
1173 newp->xfrm_nr = old->xfrm_nr;
1174 newp->index = old->index;
1175 newp->type = old->type;
1176 memcpy(newp->xfrm_vec, old->xfrm_vec,
1177 newp->xfrm_nr*sizeof(struct xfrm_tmpl));
1178 write_lock_bh(&xfrm_policy_lock);
1179 __xfrm_policy_link(newp, XFRM_POLICY_MAX+dir);
1180 write_unlock_bh(&xfrm_policy_lock);
1181 xfrm_pol_put(newp);
1182 }
1183 return newp;
1184 }
1185
1186 int __xfrm_sk_clone_policy(struct sock *sk)
1187 {
1188 struct xfrm_policy *p0 = sk->sk_policy[0],
1189 *p1 = sk->sk_policy[1];
1190
1191 sk->sk_policy[0] = sk->sk_policy[1] = NULL;
1192 if (p0 && (sk->sk_policy[0] = clone_policy(p0, 0)) == NULL)
1193 return -ENOMEM;
1194 if (p1 && (sk->sk_policy[1] = clone_policy(p1, 1)) == NULL)
1195 return -ENOMEM;
1196 return 0;
1197 }
1198
1199 static int
1200 xfrm_get_saddr(struct net *net, xfrm_address_t *local, xfrm_address_t *remote,
1201 unsigned short family)
1202 {
1203 int err;
1204 struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family);
1205
1206 if (unlikely(afinfo == NULL))
1207 return -EINVAL;
1208 err = afinfo->get_saddr(net, local, remote);
1209 xfrm_policy_put_afinfo(afinfo);
1210 return err;
1211 }
1212
1213 /* Resolve list of templates for the flow, given policy. */
1214
1215 static int
1216 xfrm_tmpl_resolve_one(struct xfrm_policy *policy, struct flowi *fl,
1217 struct xfrm_state **xfrm,
1218 unsigned short family)
1219 {
1220 struct net *net = xp_net(policy);
1221 int nx;
1222 int i, error;
1223 xfrm_address_t *daddr = xfrm_flowi_daddr(fl, family);
1224 xfrm_address_t *saddr = xfrm_flowi_saddr(fl, family);
1225 xfrm_address_t tmp;
1226
1227 for (nx=0, i = 0; i < policy->xfrm_nr; i++) {
1228 struct xfrm_state *x;
1229 xfrm_address_t *remote = daddr;
1230 xfrm_address_t *local = saddr;
1231 struct xfrm_tmpl *tmpl = &policy->xfrm_vec[i];
1232
1233 if (tmpl->mode == XFRM_MODE_TUNNEL ||
1234 tmpl->mode == XFRM_MODE_BEET) {
1235 remote = &tmpl->id.daddr;
1236 local = &tmpl->saddr;
1237 family = tmpl->encap_family;
1238 if (xfrm_addr_any(local, family)) {
1239 error = xfrm_get_saddr(net, &tmp, remote, family);
1240 if (error)
1241 goto fail;
1242 local = &tmp;
1243 }
1244 }
1245
1246 x = xfrm_state_find(remote, local, fl, tmpl, policy, &error, family);
1247
1248 if (x && x->km.state == XFRM_STATE_VALID) {
1249 xfrm[nx++] = x;
1250 daddr = remote;
1251 saddr = local;
1252 continue;
1253 }
1254 if (x) {
1255 error = (x->km.state == XFRM_STATE_ERROR ?
1256 -EINVAL : -EAGAIN);
1257 xfrm_state_put(x);
1258 }
1259 else if (error == -ESRCH)
1260 error = -EAGAIN;
1261
1262 if (!tmpl->optional)
1263 goto fail;
1264 }
1265 return nx;
1266
1267 fail:
1268 for (nx--; nx>=0; nx--)
1269 xfrm_state_put(xfrm[nx]);
1270 return error;
1271 }
1272
1273 static int
1274 xfrm_tmpl_resolve(struct xfrm_policy **pols, int npols, struct flowi *fl,
1275 struct xfrm_state **xfrm,
1276 unsigned short family)
1277 {
1278 struct xfrm_state *tp[XFRM_MAX_DEPTH];
1279 struct xfrm_state **tpp = (npols > 1) ? tp : xfrm;
1280 int cnx = 0;
1281 int error;
1282 int ret;
1283 int i;
1284
1285 for (i = 0; i < npols; i++) {
1286 if (cnx + pols[i]->xfrm_nr >= XFRM_MAX_DEPTH) {
1287 error = -ENOBUFS;
1288 goto fail;
1289 }
1290
1291 ret = xfrm_tmpl_resolve_one(pols[i], fl, &tpp[cnx], family);
1292 if (ret < 0) {
1293 error = ret;
1294 goto fail;
1295 } else
1296 cnx += ret;
1297 }
1298
1299 /* found states are sorted for outbound processing */
1300 if (npols > 1)
1301 xfrm_state_sort(xfrm, tpp, cnx, family);
1302
1303 return cnx;
1304
1305 fail:
1306 for (cnx--; cnx>=0; cnx--)
1307 xfrm_state_put(tpp[cnx]);
1308 return error;
1309
1310 }
1311
1312 /* Check that the bundle accepts the flow and its components are
1313 * still valid.
1314 */
1315
1316 static struct dst_entry *
1317 xfrm_find_bundle(struct flowi *fl, struct xfrm_policy *policy, unsigned short family)
1318 {
1319 struct dst_entry *x;
1320 struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family);
1321 if (unlikely(afinfo == NULL))
1322 return ERR_PTR(-EINVAL);
1323 x = afinfo->find_bundle(fl, policy);
1324 xfrm_policy_put_afinfo(afinfo);
1325 return x;
1326 }
1327
1328 static inline int xfrm_get_tos(struct flowi *fl, int family)
1329 {
1330 struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family);
1331 int tos;
1332
1333 if (!afinfo)
1334 return -EINVAL;
1335
1336 tos = afinfo->get_tos(fl);
1337
1338 xfrm_policy_put_afinfo(afinfo);
1339
1340 return tos;
1341 }
1342
1343 static inline struct xfrm_dst *xfrm_alloc_dst(struct net *net, int family)
1344 {
1345 struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family);
1346 struct dst_ops *dst_ops;
1347 struct xfrm_dst *xdst;
1348
1349 if (!afinfo)
1350 return ERR_PTR(-EINVAL);
1351
1352 switch (family) {
1353 case AF_INET:
1354 dst_ops = &net->xfrm.xfrm4_dst_ops;
1355 break;
1356 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
1357 case AF_INET6:
1358 dst_ops = &net->xfrm.xfrm6_dst_ops;
1359 break;
1360 #endif
1361 default:
1362 BUG();
1363 }
1364 xdst = dst_alloc(dst_ops) ?: ERR_PTR(-ENOBUFS);
1365
1366 xfrm_policy_put_afinfo(afinfo);
1367
1368 return xdst;
1369 }
1370
1371 static inline int xfrm_init_path(struct xfrm_dst *path, struct dst_entry *dst,
1372 int nfheader_len)
1373 {
1374 struct xfrm_policy_afinfo *afinfo =
1375 xfrm_policy_get_afinfo(dst->ops->family);
1376 int err;
1377
1378 if (!afinfo)
1379 return -EINVAL;
1380
1381 err = afinfo->init_path(path, dst, nfheader_len);
1382
1383 xfrm_policy_put_afinfo(afinfo);
1384
1385 return err;
1386 }
1387
1388 static inline int xfrm_fill_dst(struct xfrm_dst *xdst, struct net_device *dev,
1389 struct flowi *fl)
1390 {
1391 struct xfrm_policy_afinfo *afinfo =
1392 xfrm_policy_get_afinfo(xdst->u.dst.ops->family);
1393 int err;
1394
1395 if (!afinfo)
1396 return -EINVAL;
1397
1398 err = afinfo->fill_dst(xdst, dev, fl);
1399
1400 xfrm_policy_put_afinfo(afinfo);
1401
1402 return err;
1403 }
1404
1405 /* Allocate chain of dst_entry's, attach known xfrm's, calculate
1406 * all the metrics... Shortly, bundle a bundle.
1407 */
1408
1409 static struct dst_entry *xfrm_bundle_create(struct xfrm_policy *policy,
1410 struct xfrm_state **xfrm, int nx,
1411 struct flowi *fl,
1412 struct dst_entry *dst)
1413 {
1414 struct net *net = xp_net(policy);
1415 unsigned long now = jiffies;
1416 struct net_device *dev;
1417 struct dst_entry *dst_prev = NULL;
1418 struct dst_entry *dst0 = NULL;
1419 int i = 0;
1420 int err;
1421 int header_len = 0;
1422 int nfheader_len = 0;
1423 int trailer_len = 0;
1424 int tos;
1425 int family = policy->selector.family;
1426 xfrm_address_t saddr, daddr;
1427
1428 xfrm_flowi_addr_get(fl, &saddr, &daddr, family);
1429
1430 tos = xfrm_get_tos(fl, family);
1431 err = tos;
1432 if (tos < 0)
1433 goto put_states;
1434
1435 dst_hold(dst);
1436
1437 for (; i < nx; i++) {
1438 struct xfrm_dst *xdst = xfrm_alloc_dst(net, family);
1439 struct dst_entry *dst1 = &xdst->u.dst;
1440
1441 err = PTR_ERR(xdst);
1442 if (IS_ERR(xdst)) {
1443 dst_release(dst);
1444 goto put_states;
1445 }
1446
1447 if (!dst_prev)
1448 dst0 = dst1;
1449 else {
1450 dst_prev->child = dst_clone(dst1);
1451 dst1->flags |= DST_NOHASH;
1452 }
1453
1454 xdst->route = dst;
1455 memcpy(&dst1->metrics, &dst->metrics, sizeof(dst->metrics));
1456
1457 if (xfrm[i]->props.mode != XFRM_MODE_TRANSPORT) {
1458 family = xfrm[i]->props.family;
1459 dst = xfrm_dst_lookup(xfrm[i], tos, &saddr, &daddr,
1460 family);
1461 err = PTR_ERR(dst);
1462 if (IS_ERR(dst))
1463 goto put_states;
1464 } else
1465 dst_hold(dst);
1466
1467 dst1->xfrm = xfrm[i];
1468 xdst->genid = xfrm[i]->genid;
1469
1470 dst1->obsolete = -1;
1471 dst1->flags |= DST_HOST;
1472 dst1->lastuse = now;
1473
1474 dst1->input = dst_discard;
1475 dst1->output = xfrm[i]->outer_mode->afinfo->output;
1476
1477 dst1->next = dst_prev;
1478 dst_prev = dst1;
1479
1480 header_len += xfrm[i]->props.header_len;
1481 if (xfrm[i]->type->flags & XFRM_TYPE_NON_FRAGMENT)
1482 nfheader_len += xfrm[i]->props.header_len;
1483 trailer_len += xfrm[i]->props.trailer_len;
1484 }
1485
1486 dst_prev->child = dst;
1487 dst0->path = dst;
1488
1489 err = -ENODEV;
1490 dev = dst->dev;
1491 if (!dev)
1492 goto free_dst;
1493
1494 /* Copy neighbour for reachability confirmation */
1495 dst0->neighbour = neigh_clone(dst->neighbour);
1496
1497 xfrm_init_path((struct xfrm_dst *)dst0, dst, nfheader_len);
1498 xfrm_init_pmtu(dst_prev);
1499
1500 for (dst_prev = dst0; dst_prev != dst; dst_prev = dst_prev->child) {
1501 struct xfrm_dst *xdst = (struct xfrm_dst *)dst_prev;
1502
1503 err = xfrm_fill_dst(xdst, dev, fl);
1504 if (err)
1505 goto free_dst;
1506
1507 dst_prev->header_len = header_len;
1508 dst_prev->trailer_len = trailer_len;
1509 header_len -= xdst->u.dst.xfrm->props.header_len;
1510 trailer_len -= xdst->u.dst.xfrm->props.trailer_len;
1511 }
1512
1513 out:
1514 return dst0;
1515
1516 put_states:
1517 for (; i < nx; i++)
1518 xfrm_state_put(xfrm[i]);
1519 free_dst:
1520 if (dst0)
1521 dst_free(dst0);
1522 dst0 = ERR_PTR(err);
1523 goto out;
1524 }
1525
1526 static int inline
1527 xfrm_dst_alloc_copy(void **target, void *src, int size)
1528 {
1529 if (!*target) {
1530 *target = kmalloc(size, GFP_ATOMIC);
1531 if (!*target)
1532 return -ENOMEM;
1533 }
1534 memcpy(*target, src, size);
1535 return 0;
1536 }
1537
1538 static int inline
1539 xfrm_dst_update_parent(struct dst_entry *dst, struct xfrm_selector *sel)
1540 {
1541 #ifdef CONFIG_XFRM_SUB_POLICY
1542 struct xfrm_dst *xdst = (struct xfrm_dst *)dst;
1543 return xfrm_dst_alloc_copy((void **)&(xdst->partner),
1544 sel, sizeof(*sel));
1545 #else
1546 return 0;
1547 #endif
1548 }
1549
1550 static int inline
1551 xfrm_dst_update_origin(struct dst_entry *dst, struct flowi *fl)
1552 {
1553 #ifdef CONFIG_XFRM_SUB_POLICY
1554 struct xfrm_dst *xdst = (struct xfrm_dst *)dst;
1555 return xfrm_dst_alloc_copy((void **)&(xdst->origin), fl, sizeof(*fl));
1556 #else
1557 return 0;
1558 #endif
1559 }
1560
1561 static int stale_bundle(struct dst_entry *dst);
1562
1563 /* Main function: finds/creates a bundle for given flow.
1564 *
1565 * At the moment we eat a raw IP route. Mostly to speed up lookups
1566 * on interfaces with disabled IPsec.
1567 */
1568 int __xfrm_lookup(struct net *net, struct dst_entry **dst_p, struct flowi *fl,
1569 struct sock *sk, int flags)
1570 {
1571 struct xfrm_policy *policy;
1572 struct xfrm_policy *pols[XFRM_POLICY_TYPE_MAX];
1573 int npols;
1574 int pol_dead;
1575 int xfrm_nr;
1576 int pi;
1577 struct xfrm_state *xfrm[XFRM_MAX_DEPTH];
1578 struct dst_entry *dst, *dst_orig = *dst_p;
1579 int nx = 0;
1580 int err;
1581 u32 genid;
1582 u16 family;
1583 u8 dir = policy_to_flow_dir(XFRM_POLICY_OUT);
1584
1585 restart:
1586 genid = atomic_read(&flow_cache_genid);
1587 policy = NULL;
1588 for (pi = 0; pi < ARRAY_SIZE(pols); pi++)
1589 pols[pi] = NULL;
1590 npols = 0;
1591 pol_dead = 0;
1592 xfrm_nr = 0;
1593
1594 if (sk && sk->sk_policy[XFRM_POLICY_OUT]) {
1595 policy = xfrm_sk_policy_lookup(sk, XFRM_POLICY_OUT, fl);
1596 err = PTR_ERR(policy);
1597 if (IS_ERR(policy)) {
1598 XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTPOLERROR);
1599 goto dropdst;
1600 }
1601 }
1602
1603 if (!policy) {
1604 struct flow_cache_object *flo;
1605
1606 /* To accelerate a bit... */
1607 if ((dst_orig->flags & DST_NOXFRM) ||
1608 !net->xfrm.policy_count[XFRM_POLICY_OUT])
1609 goto nopol;
1610
1611 flo = flow_cache_lookup(net, fl, dst_orig->ops->family,
1612 dir, xfrm_policy_lookup, NULL);
1613 err = PTR_ERR(flo);
1614 if (IS_ERR(flo)) {
1615 XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTPOLERROR);
1616 goto dropdst;
1617 }
1618 if (flo)
1619 policy = container_of(flo, struct xfrm_policy, flo);
1620 else
1621 policy = NULL;
1622 }
1623
1624 if (!policy)
1625 goto nopol;
1626
1627 family = dst_orig->ops->family;
1628 pols[0] = policy;
1629 npols ++;
1630 xfrm_nr += pols[0]->xfrm_nr;
1631
1632 err = -ENOENT;
1633 if ((flags & XFRM_LOOKUP_ICMP) && !(policy->flags & XFRM_POLICY_ICMP))
1634 goto error;
1635
1636 policy->curlft.use_time = get_seconds();
1637
1638 switch (policy->action) {
1639 default:
1640 case XFRM_POLICY_BLOCK:
1641 /* Prohibit the flow */
1642 XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTPOLBLOCK);
1643 err = -EPERM;
1644 goto error;
1645
1646 case XFRM_POLICY_ALLOW:
1647 #ifndef CONFIG_XFRM_SUB_POLICY
1648 if (policy->xfrm_nr == 0) {
1649 /* Flow passes not transformed. */
1650 xfrm_pol_put(policy);
1651 return 0;
1652 }
1653 #endif
1654
1655 /* Try to find matching bundle.
1656 *
1657 * LATER: help from flow cache. It is optional, this
1658 * is required only for output policy.
1659 */
1660 dst = xfrm_find_bundle(fl, policy, family);
1661 if (IS_ERR(dst)) {
1662 XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTBUNDLECHECKERROR);
1663 err = PTR_ERR(dst);
1664 goto error;
1665 }
1666
1667 if (dst)
1668 break;
1669
1670 #ifdef CONFIG_XFRM_SUB_POLICY
1671 if (pols[0]->type != XFRM_POLICY_TYPE_MAIN) {
1672 pols[1] = xfrm_policy_lookup_bytype(net,
1673 XFRM_POLICY_TYPE_MAIN,
1674 fl, family,
1675 XFRM_POLICY_OUT);
1676 if (pols[1]) {
1677 if (IS_ERR(pols[1])) {
1678 XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTPOLERROR);
1679 err = PTR_ERR(pols[1]);
1680 goto error;
1681 }
1682 if (pols[1]->action == XFRM_POLICY_BLOCK) {
1683 XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTPOLBLOCK);
1684 err = -EPERM;
1685 goto error;
1686 }
1687 npols ++;
1688 xfrm_nr += pols[1]->xfrm_nr;
1689 }
1690 }
1691
1692 /*
1693 * Because neither flowi nor bundle information knows about
1694 * transformation template size. On more than one policy usage
1695 * we can realize whether all of them is bypass or not after
1696 * they are searched. See above not-transformed bypass
1697 * is surrounded by non-sub policy configuration, too.
1698 */
1699 if (xfrm_nr == 0) {
1700 /* Flow passes not transformed. */
1701 xfrm_pols_put(pols, npols);
1702 return 0;
1703 }
1704
1705 #endif
1706 nx = xfrm_tmpl_resolve(pols, npols, fl, xfrm, family);
1707
1708 if (unlikely(nx<0)) {
1709 err = nx;
1710 if (err == -EAGAIN && net->xfrm.sysctl_larval_drop) {
1711 /* EREMOTE tells the caller to generate
1712 * a one-shot blackhole route.
1713 */
1714 XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTNOSTATES);
1715 xfrm_pol_put(policy);
1716 return -EREMOTE;
1717 }
1718 if (err == -EAGAIN && (flags & XFRM_LOOKUP_WAIT)) {
1719 DECLARE_WAITQUEUE(wait, current);
1720
1721 add_wait_queue(&net->xfrm.km_waitq, &wait);
1722 set_current_state(TASK_INTERRUPTIBLE);
1723 schedule();
1724 set_current_state(TASK_RUNNING);
1725 remove_wait_queue(&net->xfrm.km_waitq, &wait);
1726
1727 nx = xfrm_tmpl_resolve(pols, npols, fl, xfrm, family);
1728
1729 if (nx == -EAGAIN && signal_pending(current)) {
1730 XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTNOSTATES);
1731 err = -ERESTART;
1732 goto error;
1733 }
1734 if (nx == -EAGAIN ||
1735 genid != atomic_read(&flow_cache_genid)) {
1736 xfrm_pols_put(pols, npols);
1737 goto restart;
1738 }
1739 err = nx;
1740 }
1741 if (err < 0) {
1742 XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTNOSTATES);
1743 goto error;
1744 }
1745 }
1746 if (nx == 0) {
1747 /* Flow passes not transformed. */
1748 xfrm_pols_put(pols, npols);
1749 return 0;
1750 }
1751
1752 dst = xfrm_bundle_create(policy, xfrm, nx, fl, dst_orig);
1753 err = PTR_ERR(dst);
1754 if (IS_ERR(dst)) {
1755 XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTBUNDLEGENERROR);
1756 goto error;
1757 }
1758
1759 for (pi = 0; pi < npols; pi++)
1760 pol_dead |= pols[pi]->walk.dead;
1761
1762 write_lock_bh(&policy->lock);
1763 if (unlikely(pol_dead || stale_bundle(dst))) {
1764 /* Wow! While we worked on resolving, this
1765 * policy has gone. Retry. It is not paranoia,
1766 * we just cannot enlist new bundle to dead object.
1767 * We can't enlist stable bundles either.
1768 */
1769 write_unlock_bh(&policy->lock);
1770 dst_free(dst);
1771
1772 if (pol_dead)
1773 XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTPOLDEAD);
1774 else
1775 XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTBUNDLECHECKERROR);
1776 err = -EHOSTUNREACH;
1777 goto error;
1778 }
1779
1780 if (npols > 1)
1781 err = xfrm_dst_update_parent(dst, &pols[1]->selector);
1782 else
1783 err = xfrm_dst_update_origin(dst, fl);
1784 if (unlikely(err)) {
1785 write_unlock_bh(&policy->lock);
1786 dst_free(dst);
1787 XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTBUNDLECHECKERROR);
1788 goto error;
1789 }
1790
1791 dst->next = policy->bundles;
1792 policy->bundles = dst;
1793 dst_hold(dst);
1794 write_unlock_bh(&policy->lock);
1795 }
1796 *dst_p = dst;
1797 dst_release(dst_orig);
1798 xfrm_pols_put(pols, npols);
1799 return 0;
1800
1801 error:
1802 xfrm_pols_put(pols, npols);
1803 dropdst:
1804 dst_release(dst_orig);
1805 *dst_p = NULL;
1806 return err;
1807
1808 nopol:
1809 err = -ENOENT;
1810 if (flags & XFRM_LOOKUP_ICMP)
1811 goto dropdst;
1812 return 0;
1813 }
1814 EXPORT_SYMBOL(__xfrm_lookup);
1815
1816 int xfrm_lookup(struct net *net, struct dst_entry **dst_p, struct flowi *fl,
1817 struct sock *sk, int flags)
1818 {
1819 int err = __xfrm_lookup(net, dst_p, fl, sk, flags);
1820
1821 if (err == -EREMOTE) {
1822 dst_release(*dst_p);
1823 *dst_p = NULL;
1824 err = -EAGAIN;
1825 }
1826
1827 return err;
1828 }
1829 EXPORT_SYMBOL(xfrm_lookup);
1830
1831 static inline int
1832 xfrm_secpath_reject(int idx, struct sk_buff *skb, struct flowi *fl)
1833 {
1834 struct xfrm_state *x;
1835
1836 if (!skb->sp || idx < 0 || idx >= skb->sp->len)
1837 return 0;
1838 x = skb->sp->xvec[idx];
1839 if (!x->type->reject)
1840 return 0;
1841 return x->type->reject(x, skb, fl);
1842 }
1843
1844 /* When skb is transformed back to its "native" form, we have to
1845 * check policy restrictions. At the moment we make this in maximally
1846 * stupid way. Shame on me. :-) Of course, connected sockets must
1847 * have policy cached at them.
1848 */
1849
1850 static inline int
1851 xfrm_state_ok(struct xfrm_tmpl *tmpl, struct xfrm_state *x,
1852 unsigned short family)
1853 {
1854 if (xfrm_state_kern(x))
1855 return tmpl->optional && !xfrm_state_addr_cmp(tmpl, x, tmpl->encap_family);
1856 return x->id.proto == tmpl->id.proto &&
1857 (x->id.spi == tmpl->id.spi || !tmpl->id.spi) &&
1858 (x->props.reqid == tmpl->reqid || !tmpl->reqid) &&
1859 x->props.mode == tmpl->mode &&
1860 (tmpl->allalgs || (tmpl->aalgos & (1<<x->props.aalgo)) ||
1861 !(xfrm_id_proto_match(tmpl->id.proto, IPSEC_PROTO_ANY))) &&
1862 !(x->props.mode != XFRM_MODE_TRANSPORT &&
1863 xfrm_state_addr_cmp(tmpl, x, family));
1864 }
1865
1866 /*
1867 * 0 or more than 0 is returned when validation is succeeded (either bypass
1868 * because of optional transport mode, or next index of the mathced secpath
1869 * state with the template.
1870 * -1 is returned when no matching template is found.
1871 * Otherwise "-2 - errored_index" is returned.
1872 */
1873 static inline int
1874 xfrm_policy_ok(struct xfrm_tmpl *tmpl, struct sec_path *sp, int start,
1875 unsigned short family)
1876 {
1877 int idx = start;
1878
1879 if (tmpl->optional) {
1880 if (tmpl->mode == XFRM_MODE_TRANSPORT)
1881 return start;
1882 } else
1883 start = -1;
1884 for (; idx < sp->len; idx++) {
1885 if (xfrm_state_ok(tmpl, sp->xvec[idx], family))
1886 return ++idx;
1887 if (sp->xvec[idx]->props.mode != XFRM_MODE_TRANSPORT) {
1888 if (start == -1)
1889 start = -2-idx;
1890 break;
1891 }
1892 }
1893 return start;
1894 }
1895
1896 int __xfrm_decode_session(struct sk_buff *skb, struct flowi *fl,
1897 unsigned int family, int reverse)
1898 {
1899 struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family);
1900 int err;
1901
1902 if (unlikely(afinfo == NULL))
1903 return -EAFNOSUPPORT;
1904
1905 afinfo->decode_session(skb, fl, reverse);
1906 err = security_xfrm_decode_session(skb, &fl->secid);
1907 xfrm_policy_put_afinfo(afinfo);
1908 return err;
1909 }
1910 EXPORT_SYMBOL(__xfrm_decode_session);
1911
1912 static inline int secpath_has_nontransport(struct sec_path *sp, int k, int *idxp)
1913 {
1914 for (; k < sp->len; k++) {
1915 if (sp->xvec[k]->props.mode != XFRM_MODE_TRANSPORT) {
1916 *idxp = k;
1917 return 1;
1918 }
1919 }
1920
1921 return 0;
1922 }
1923
1924 int __xfrm_policy_check(struct sock *sk, int dir, struct sk_buff *skb,
1925 unsigned short family)
1926 {
1927 struct net *net = dev_net(skb->dev);
1928 struct xfrm_policy *pol;
1929 struct xfrm_policy *pols[XFRM_POLICY_TYPE_MAX];
1930 int npols = 0;
1931 int xfrm_nr;
1932 int pi;
1933 int reverse;
1934 struct flowi fl;
1935 u8 fl_dir;
1936 int xerr_idx = -1;
1937
1938 reverse = dir & ~XFRM_POLICY_MASK;
1939 dir &= XFRM_POLICY_MASK;
1940 fl_dir = policy_to_flow_dir(dir);
1941
1942 if (__xfrm_decode_session(skb, &fl, family, reverse) < 0) {
1943 XFRM_INC_STATS(net, LINUX_MIB_XFRMINHDRERROR);
1944 return 0;
1945 }
1946
1947 nf_nat_decode_session(skb, &fl, family);
1948
1949 /* First, check used SA against their selectors. */
1950 if (skb->sp) {
1951 int i;
1952
1953 for (i=skb->sp->len-1; i>=0; i--) {
1954 struct xfrm_state *x = skb->sp->xvec[i];
1955 if (!xfrm_selector_match(&x->sel, &fl, family)) {
1956 XFRM_INC_STATS(net, LINUX_MIB_XFRMINSTATEMISMATCH);
1957 return 0;
1958 }
1959 }
1960 }
1961
1962 pol = NULL;
1963 if (sk && sk->sk_policy[dir]) {
1964 pol = xfrm_sk_policy_lookup(sk, dir, &fl);
1965 if (IS_ERR(pol)) {
1966 XFRM_INC_STATS(net, LINUX_MIB_XFRMINPOLERROR);
1967 return 0;
1968 }
1969 }
1970
1971 if (!pol) {
1972 struct flow_cache_object *flo;
1973
1974 flo = flow_cache_lookup(net, &fl, family, fl_dir,
1975 xfrm_policy_lookup, NULL);
1976 if (IS_ERR_OR_NULL(flo))
1977 pol = ERR_CAST(flo);
1978 else
1979 pol = container_of(flo, struct xfrm_policy, flo);
1980 }
1981
1982 if (IS_ERR(pol)) {
1983 XFRM_INC_STATS(net, LINUX_MIB_XFRMINPOLERROR);
1984 return 0;
1985 }
1986
1987 if (!pol) {
1988 if (skb->sp && secpath_has_nontransport(skb->sp, 0, &xerr_idx)) {
1989 xfrm_secpath_reject(xerr_idx, skb, &fl);
1990 XFRM_INC_STATS(net, LINUX_MIB_XFRMINNOPOLS);
1991 return 0;
1992 }
1993 return 1;
1994 }
1995
1996 pol->curlft.use_time = get_seconds();
1997
1998 pols[0] = pol;
1999 npols ++;
2000 #ifdef CONFIG_XFRM_SUB_POLICY
2001 if (pols[0]->type != XFRM_POLICY_TYPE_MAIN) {
2002 pols[1] = xfrm_policy_lookup_bytype(net, XFRM_POLICY_TYPE_MAIN,
2003 &fl, family,
2004 XFRM_POLICY_IN);
2005 if (pols[1]) {
2006 if (IS_ERR(pols[1])) {
2007 XFRM_INC_STATS(net, LINUX_MIB_XFRMINPOLERROR);
2008 return 0;
2009 }
2010 pols[1]->curlft.use_time = get_seconds();
2011 npols ++;
2012 }
2013 }
2014 #endif
2015
2016 if (pol->action == XFRM_POLICY_ALLOW) {
2017 struct sec_path *sp;
2018 static struct sec_path dummy;
2019 struct xfrm_tmpl *tp[XFRM_MAX_DEPTH];
2020 struct xfrm_tmpl *stp[XFRM_MAX_DEPTH];
2021 struct xfrm_tmpl **tpp = tp;
2022 int ti = 0;
2023 int i, k;
2024
2025 if ((sp = skb->sp) == NULL)
2026 sp = &dummy;
2027
2028 for (pi = 0; pi < npols; pi++) {
2029 if (pols[pi] != pol &&
2030 pols[pi]->action != XFRM_POLICY_ALLOW) {
2031 XFRM_INC_STATS(net, LINUX_MIB_XFRMINPOLBLOCK);
2032 goto reject;
2033 }
2034 if (ti + pols[pi]->xfrm_nr >= XFRM_MAX_DEPTH) {
2035 XFRM_INC_STATS(net, LINUX_MIB_XFRMINBUFFERERROR);
2036 goto reject_error;
2037 }
2038 for (i = 0; i < pols[pi]->xfrm_nr; i++)
2039 tpp[ti++] = &pols[pi]->xfrm_vec[i];
2040 }
2041 xfrm_nr = ti;
2042 if (npols > 1) {
2043 xfrm_tmpl_sort(stp, tpp, xfrm_nr, family);
2044 tpp = stp;
2045 }
2046
2047 /* For each tunnel xfrm, find the first matching tmpl.
2048 * For each tmpl before that, find corresponding xfrm.
2049 * Order is _important_. Later we will implement
2050 * some barriers, but at the moment barriers
2051 * are implied between each two transformations.
2052 */
2053 for (i = xfrm_nr-1, k = 0; i >= 0; i--) {
2054 k = xfrm_policy_ok(tpp[i], sp, k, family);
2055 if (k < 0) {
2056 if (k < -1)
2057 /* "-2 - errored_index" returned */
2058 xerr_idx = -(2+k);
2059 XFRM_INC_STATS(net, LINUX_MIB_XFRMINTMPLMISMATCH);
2060 goto reject;
2061 }
2062 }
2063
2064 if (secpath_has_nontransport(sp, k, &xerr_idx)) {
2065 XFRM_INC_STATS(net, LINUX_MIB_XFRMINTMPLMISMATCH);
2066 goto reject;
2067 }
2068
2069 xfrm_pols_put(pols, npols);
2070 return 1;
2071 }
2072 XFRM_INC_STATS(net, LINUX_MIB_XFRMINPOLBLOCK);
2073
2074 reject:
2075 xfrm_secpath_reject(xerr_idx, skb, &fl);
2076 reject_error:
2077 xfrm_pols_put(pols, npols);
2078 return 0;
2079 }
2080 EXPORT_SYMBOL(__xfrm_policy_check);
2081
2082 int __xfrm_route_forward(struct sk_buff *skb, unsigned short family)
2083 {
2084 struct net *net = dev_net(skb->dev);
2085 struct flowi fl;
2086 struct dst_entry *dst;
2087 int res;
2088
2089 if (xfrm_decode_session(skb, &fl, family) < 0) {
2090 XFRM_INC_STATS(net, LINUX_MIB_XFRMFWDHDRERROR);
2091 return 0;
2092 }
2093
2094 dst = skb_dst(skb);
2095
2096 res = xfrm_lookup(net, &dst, &fl, NULL, 0) == 0;
2097 skb_dst_set(skb, dst);
2098 return res;
2099 }
2100 EXPORT_SYMBOL(__xfrm_route_forward);
2101
2102 /* Optimize later using cookies and generation ids. */
2103
2104 static struct dst_entry *xfrm_dst_check(struct dst_entry *dst, u32 cookie)
2105 {
2106 /* Code (such as __xfrm4_bundle_create()) sets dst->obsolete
2107 * to "-1" to force all XFRM destinations to get validated by
2108 * dst_ops->check on every use. We do this because when a
2109 * normal route referenced by an XFRM dst is obsoleted we do
2110 * not go looking around for all parent referencing XFRM dsts
2111 * so that we can invalidate them. It is just too much work.
2112 * Instead we make the checks here on every use. For example:
2113 *
2114 * XFRM dst A --> IPv4 dst X
2115 *
2116 * X is the "xdst->route" of A (X is also the "dst->path" of A
2117 * in this example). If X is marked obsolete, "A" will not
2118 * notice. That's what we are validating here via the
2119 * stale_bundle() check.
2120 *
2121 * When a policy's bundle is pruned, we dst_free() the XFRM
2122 * dst which causes it's ->obsolete field to be set to a
2123 * positive non-zero integer. If an XFRM dst has been pruned
2124 * like this, we want to force a new route lookup.
2125 */
2126 if (dst->obsolete < 0 && !stale_bundle(dst))
2127 return dst;
2128
2129 return NULL;
2130 }
2131
2132 static int stale_bundle(struct dst_entry *dst)
2133 {
2134 return !xfrm_bundle_ok(NULL, (struct xfrm_dst *)dst, NULL, AF_UNSPEC, 0);
2135 }
2136
2137 void xfrm_dst_ifdown(struct dst_entry *dst, struct net_device *dev)
2138 {
2139 while ((dst = dst->child) && dst->xfrm && dst->dev == dev) {
2140 dst->dev = dev_net(dev)->loopback_dev;
2141 dev_hold(dst->dev);
2142 dev_put(dev);
2143 }
2144 }
2145 EXPORT_SYMBOL(xfrm_dst_ifdown);
2146
2147 static void xfrm_link_failure(struct sk_buff *skb)
2148 {
2149 /* Impossible. Such dst must be popped before reaches point of failure. */
2150 return;
2151 }
2152
2153 static struct dst_entry *xfrm_negative_advice(struct dst_entry *dst)
2154 {
2155 if (dst) {
2156 if (dst->obsolete) {
2157 dst_release(dst);
2158 dst = NULL;
2159 }
2160 }
2161 return dst;
2162 }
2163
2164 static void prune_one_bundle(struct xfrm_policy *pol, int (*func)(struct dst_entry *), struct dst_entry **gc_list_p)
2165 {
2166 struct dst_entry *dst, **dstp;
2167
2168 write_lock(&pol->lock);
2169 dstp = &pol->bundles;
2170 while ((dst=*dstp) != NULL) {
2171 if (func(dst)) {
2172 *dstp = dst->next;
2173 dst->next = *gc_list_p;
2174 *gc_list_p = dst;
2175 } else {
2176 dstp = &dst->next;
2177 }
2178 }
2179 write_unlock(&pol->lock);
2180 }
2181
2182 static void xfrm_prune_bundles(struct net *net, int (*func)(struct dst_entry *))
2183 {
2184 struct dst_entry *gc_list = NULL;
2185 int dir;
2186
2187 read_lock_bh(&xfrm_policy_lock);
2188 for (dir = 0; dir < XFRM_POLICY_MAX * 2; dir++) {
2189 struct xfrm_policy *pol;
2190 struct hlist_node *entry;
2191 struct hlist_head *table;
2192 int i;
2193
2194 hlist_for_each_entry(pol, entry,
2195 &net->xfrm.policy_inexact[dir], bydst)
2196 prune_one_bundle(pol, func, &gc_list);
2197
2198 table = net->xfrm.policy_bydst[dir].table;
2199 for (i = net->xfrm.policy_bydst[dir].hmask; i >= 0; i--) {
2200 hlist_for_each_entry(pol, entry, table + i, bydst)
2201 prune_one_bundle(pol, func, &gc_list);
2202 }
2203 }
2204 read_unlock_bh(&xfrm_policy_lock);
2205
2206 while (gc_list) {
2207 struct dst_entry *dst = gc_list;
2208 gc_list = dst->next;
2209 dst_free(dst);
2210 }
2211 }
2212
2213 static int unused_bundle(struct dst_entry *dst)
2214 {
2215 return !atomic_read(&dst->__refcnt);
2216 }
2217
2218 static void __xfrm_garbage_collect(struct net *net)
2219 {
2220 xfrm_prune_bundles(net, unused_bundle);
2221 }
2222
2223 static int xfrm_flush_bundles(struct net *net)
2224 {
2225 xfrm_prune_bundles(net, stale_bundle);
2226 return 0;
2227 }
2228
2229 static void xfrm_init_pmtu(struct dst_entry *dst)
2230 {
2231 do {
2232 struct xfrm_dst *xdst = (struct xfrm_dst *)dst;
2233 u32 pmtu, route_mtu_cached;
2234
2235 pmtu = dst_mtu(dst->child);
2236 xdst->child_mtu_cached = pmtu;
2237
2238 pmtu = xfrm_state_mtu(dst->xfrm, pmtu);
2239
2240 route_mtu_cached = dst_mtu(xdst->route);
2241 xdst->route_mtu_cached = route_mtu_cached;
2242
2243 if (pmtu > route_mtu_cached)
2244 pmtu = route_mtu_cached;
2245
2246 dst->metrics[RTAX_MTU-1] = pmtu;
2247 } while ((dst = dst->next));
2248 }
2249
2250 /* Check that the bundle accepts the flow and its components are
2251 * still valid.
2252 */
2253
2254 int xfrm_bundle_ok(struct xfrm_policy *pol, struct xfrm_dst *first,
2255 struct flowi *fl, int family, int strict)
2256 {
2257 struct dst_entry *dst = &first->u.dst;
2258 struct xfrm_dst *last;
2259 u32 mtu;
2260
2261 if (!dst_check(dst->path, ((struct xfrm_dst *)dst)->path_cookie) ||
2262 (dst->dev && !netif_running(dst->dev)))
2263 return 0;
2264 #ifdef CONFIG_XFRM_SUB_POLICY
2265 if (fl) {
2266 if (first->origin && !flow_cache_uli_match(first->origin, fl))
2267 return 0;
2268 if (first->partner &&
2269 !xfrm_selector_match(first->partner, fl, family))
2270 return 0;
2271 }
2272 #endif
2273
2274 last = NULL;
2275
2276 do {
2277 struct xfrm_dst *xdst = (struct xfrm_dst *)dst;
2278
2279 if (fl && !xfrm_selector_match(&dst->xfrm->sel, fl, family))
2280 return 0;
2281 if (fl && pol &&
2282 !security_xfrm_state_pol_flow_match(dst->xfrm, pol, fl))
2283 return 0;
2284 if (dst->xfrm->km.state != XFRM_STATE_VALID)
2285 return 0;
2286 if (xdst->genid != dst->xfrm->genid)
2287 return 0;
2288
2289 if (strict && fl &&
2290 !(dst->xfrm->outer_mode->flags & XFRM_MODE_FLAG_TUNNEL) &&
2291 !xfrm_state_addr_flow_check(dst->xfrm, fl, family))
2292 return 0;
2293
2294 mtu = dst_mtu(dst->child);
2295 if (xdst->child_mtu_cached != mtu) {
2296 last = xdst;
2297 xdst->child_mtu_cached = mtu;
2298 }
2299
2300 if (!dst_check(xdst->route, xdst->route_cookie))
2301 return 0;
2302 mtu = dst_mtu(xdst->route);
2303 if (xdst->route_mtu_cached != mtu) {
2304 last = xdst;
2305 xdst->route_mtu_cached = mtu;
2306 }
2307
2308 dst = dst->child;
2309 } while (dst->xfrm);
2310
2311 if (likely(!last))
2312 return 1;
2313
2314 mtu = last->child_mtu_cached;
2315 for (;;) {
2316 dst = &last->u.dst;
2317
2318 mtu = xfrm_state_mtu(dst->xfrm, mtu);
2319 if (mtu > last->route_mtu_cached)
2320 mtu = last->route_mtu_cached;
2321 dst->metrics[RTAX_MTU-1] = mtu;
2322
2323 if (last == first)
2324 break;
2325
2326 last = (struct xfrm_dst *)last->u.dst.next;
2327 last->child_mtu_cached = mtu;
2328 }
2329
2330 return 1;
2331 }
2332
2333 EXPORT_SYMBOL(xfrm_bundle_ok);
2334
2335 int xfrm_policy_register_afinfo(struct xfrm_policy_afinfo *afinfo)
2336 {
2337 struct net *net;
2338 int err = 0;
2339 if (unlikely(afinfo == NULL))
2340 return -EINVAL;
2341 if (unlikely(afinfo->family >= NPROTO))
2342 return -EAFNOSUPPORT;
2343 write_lock_bh(&xfrm_policy_afinfo_lock);
2344 if (unlikely(xfrm_policy_afinfo[afinfo->family] != NULL))
2345 err = -ENOBUFS;
2346 else {
2347 struct dst_ops *dst_ops = afinfo->dst_ops;
2348 if (likely(dst_ops->kmem_cachep == NULL))
2349 dst_ops->kmem_cachep = xfrm_dst_cache;
2350 if (likely(dst_ops->check == NULL))
2351 dst_ops->check = xfrm_dst_check;
2352 if (likely(dst_ops->negative_advice == NULL))
2353 dst_ops->negative_advice = xfrm_negative_advice;
2354 if (likely(dst_ops->link_failure == NULL))
2355 dst_ops->link_failure = xfrm_link_failure;
2356 if (likely(afinfo->garbage_collect == NULL))
2357 afinfo->garbage_collect = __xfrm_garbage_collect;
2358 xfrm_policy_afinfo[afinfo->family] = afinfo;
2359 }
2360 write_unlock_bh(&xfrm_policy_afinfo_lock);
2361
2362 rtnl_lock();
2363 for_each_net(net) {
2364 struct dst_ops *xfrm_dst_ops;
2365
2366 switch (afinfo->family) {
2367 case AF_INET:
2368 xfrm_dst_ops = &net->xfrm.xfrm4_dst_ops;
2369 break;
2370 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
2371 case AF_INET6:
2372 xfrm_dst_ops = &net->xfrm.xfrm6_dst_ops;
2373 break;
2374 #endif
2375 default:
2376 BUG();
2377 }
2378 *xfrm_dst_ops = *afinfo->dst_ops;
2379 }
2380 rtnl_unlock();
2381
2382 return err;
2383 }
2384 EXPORT_SYMBOL(xfrm_policy_register_afinfo);
2385
2386 int xfrm_policy_unregister_afinfo(struct xfrm_policy_afinfo *afinfo)
2387 {
2388 int err = 0;
2389 if (unlikely(afinfo == NULL))
2390 return -EINVAL;
2391 if (unlikely(afinfo->family >= NPROTO))
2392 return -EAFNOSUPPORT;
2393 write_lock_bh(&xfrm_policy_afinfo_lock);
2394 if (likely(xfrm_policy_afinfo[afinfo->family] != NULL)) {
2395 if (unlikely(xfrm_policy_afinfo[afinfo->family] != afinfo))
2396 err = -EINVAL;
2397 else {
2398 struct dst_ops *dst_ops = afinfo->dst_ops;
2399 xfrm_policy_afinfo[afinfo->family] = NULL;
2400 dst_ops->kmem_cachep = NULL;
2401 dst_ops->check = NULL;
2402 dst_ops->negative_advice = NULL;
2403 dst_ops->link_failure = NULL;
2404 afinfo->garbage_collect = NULL;
2405 }
2406 }
2407 write_unlock_bh(&xfrm_policy_afinfo_lock);
2408 return err;
2409 }
2410 EXPORT_SYMBOL(xfrm_policy_unregister_afinfo);
2411
2412 static void __net_init xfrm_dst_ops_init(struct net *net)
2413 {
2414 struct xfrm_policy_afinfo *afinfo;
2415
2416 read_lock_bh(&xfrm_policy_afinfo_lock);
2417 afinfo = xfrm_policy_afinfo[AF_INET];
2418 if (afinfo)
2419 net->xfrm.xfrm4_dst_ops = *afinfo->dst_ops;
2420 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
2421 afinfo = xfrm_policy_afinfo[AF_INET6];
2422 if (afinfo)
2423 net->xfrm.xfrm6_dst_ops = *afinfo->dst_ops;
2424 #endif
2425 read_unlock_bh(&xfrm_policy_afinfo_lock);
2426 }
2427
2428 static struct xfrm_policy_afinfo *xfrm_policy_get_afinfo(unsigned short family)
2429 {
2430 struct xfrm_policy_afinfo *afinfo;
2431 if (unlikely(family >= NPROTO))
2432 return NULL;
2433 read_lock(&xfrm_policy_afinfo_lock);
2434 afinfo = xfrm_policy_afinfo[family];
2435 if (unlikely(!afinfo))
2436 read_unlock(&xfrm_policy_afinfo_lock);
2437 return afinfo;
2438 }
2439
2440 static void xfrm_policy_put_afinfo(struct xfrm_policy_afinfo *afinfo)
2441 {
2442 read_unlock(&xfrm_policy_afinfo_lock);
2443 }
2444
2445 static int xfrm_dev_event(struct notifier_block *this, unsigned long event, void *ptr)
2446 {
2447 struct net_device *dev = ptr;
2448
2449 switch (event) {
2450 case NETDEV_DOWN:
2451 xfrm_flush_bundles(dev_net(dev));
2452 }
2453 return NOTIFY_DONE;
2454 }
2455
2456 static struct notifier_block xfrm_dev_notifier = {
2457 .notifier_call = xfrm_dev_event,
2458 };
2459
2460 #ifdef CONFIG_XFRM_STATISTICS
2461 static int __net_init xfrm_statistics_init(struct net *net)
2462 {
2463 int rv;
2464
2465 if (snmp_mib_init((void __percpu **)net->mib.xfrm_statistics,
2466 sizeof(struct linux_xfrm_mib)) < 0)
2467 return -ENOMEM;
2468 rv = xfrm_proc_init(net);
2469 if (rv < 0)
2470 snmp_mib_free((void __percpu **)net->mib.xfrm_statistics);
2471 return rv;
2472 }
2473
2474 static void xfrm_statistics_fini(struct net *net)
2475 {
2476 xfrm_proc_fini(net);
2477 snmp_mib_free((void __percpu **)net->mib.xfrm_statistics);
2478 }
2479 #else
2480 static int __net_init xfrm_statistics_init(struct net *net)
2481 {
2482 return 0;
2483 }
2484
2485 static void xfrm_statistics_fini(struct net *net)
2486 {
2487 }
2488 #endif
2489
2490 static int __net_init xfrm_policy_init(struct net *net)
2491 {
2492 unsigned int hmask, sz;
2493 int dir;
2494
2495 if (net_eq(net, &init_net))
2496 xfrm_dst_cache = kmem_cache_create("xfrm_dst_cache",
2497 sizeof(struct xfrm_dst),
2498 0, SLAB_HWCACHE_ALIGN|SLAB_PANIC,
2499 NULL);
2500
2501 hmask = 8 - 1;
2502 sz = (hmask+1) * sizeof(struct hlist_head);
2503
2504 net->xfrm.policy_byidx = xfrm_hash_alloc(sz);
2505 if (!net->xfrm.policy_byidx)
2506 goto out_byidx;
2507 net->xfrm.policy_idx_hmask = hmask;
2508
2509 for (dir = 0; dir < XFRM_POLICY_MAX * 2; dir++) {
2510 struct xfrm_policy_hash *htab;
2511
2512 net->xfrm.policy_count[dir] = 0;
2513 INIT_HLIST_HEAD(&net->xfrm.policy_inexact[dir]);
2514
2515 htab = &net->xfrm.policy_bydst[dir];
2516 htab->table = xfrm_hash_alloc(sz);
2517 if (!htab->table)
2518 goto out_bydst;
2519 htab->hmask = hmask;
2520 }
2521
2522 INIT_LIST_HEAD(&net->xfrm.policy_all);
2523 INIT_WORK(&net->xfrm.policy_hash_work, xfrm_hash_resize);
2524 if (net_eq(net, &init_net))
2525 register_netdevice_notifier(&xfrm_dev_notifier);
2526 return 0;
2527
2528 out_bydst:
2529 for (dir--; dir >= 0; dir--) {
2530 struct xfrm_policy_hash *htab;
2531
2532 htab = &net->xfrm.policy_bydst[dir];
2533 xfrm_hash_free(htab->table, sz);
2534 }
2535 xfrm_hash_free(net->xfrm.policy_byidx, sz);
2536 out_byidx:
2537 return -ENOMEM;
2538 }
2539
2540 static void xfrm_policy_fini(struct net *net)
2541 {
2542 struct xfrm_audit audit_info;
2543 unsigned int sz;
2544 int dir;
2545
2546 flush_work(&net->xfrm.policy_hash_work);
2547 #ifdef CONFIG_XFRM_SUB_POLICY
2548 audit_info.loginuid = -1;
2549 audit_info.sessionid = -1;
2550 audit_info.secid = 0;
2551 xfrm_policy_flush(net, XFRM_POLICY_TYPE_SUB, &audit_info);
2552 #endif
2553 audit_info.loginuid = -1;
2554 audit_info.sessionid = -1;
2555 audit_info.secid = 0;
2556 xfrm_policy_flush(net, XFRM_POLICY_TYPE_MAIN, &audit_info);
2557 flush_work(&xfrm_policy_gc_work);
2558
2559 WARN_ON(!list_empty(&net->xfrm.policy_all));
2560
2561 for (dir = 0; dir < XFRM_POLICY_MAX * 2; dir++) {
2562 struct xfrm_policy_hash *htab;
2563
2564 WARN_ON(!hlist_empty(&net->xfrm.policy_inexact[dir]));
2565
2566 htab = &net->xfrm.policy_bydst[dir];
2567 sz = (htab->hmask + 1);
2568 WARN_ON(!hlist_empty(htab->table));
2569 xfrm_hash_free(htab->table, sz);
2570 }
2571
2572 sz = (net->xfrm.policy_idx_hmask + 1) * sizeof(struct hlist_head);
2573 WARN_ON(!hlist_empty(net->xfrm.policy_byidx));
2574 xfrm_hash_free(net->xfrm.policy_byidx, sz);
2575 }
2576
2577 static int __net_init xfrm_net_init(struct net *net)
2578 {
2579 int rv;
2580
2581 rv = xfrm_statistics_init(net);
2582 if (rv < 0)
2583 goto out_statistics;
2584 rv = xfrm_state_init(net);
2585 if (rv < 0)
2586 goto out_state;
2587 rv = xfrm_policy_init(net);
2588 if (rv < 0)
2589 goto out_policy;
2590 xfrm_dst_ops_init(net);
2591 rv = xfrm_sysctl_init(net);
2592 if (rv < 0)
2593 goto out_sysctl;
2594 return 0;
2595
2596 out_sysctl:
2597 xfrm_policy_fini(net);
2598 out_policy:
2599 xfrm_state_fini(net);
2600 out_state:
2601 xfrm_statistics_fini(net);
2602 out_statistics:
2603 return rv;
2604 }
2605
2606 static void __net_exit xfrm_net_exit(struct net *net)
2607 {
2608 xfrm_sysctl_fini(net);
2609 xfrm_policy_fini(net);
2610 xfrm_state_fini(net);
2611 xfrm_statistics_fini(net);
2612 }
2613
2614 static struct pernet_operations __net_initdata xfrm_net_ops = {
2615 .init = xfrm_net_init,
2616 .exit = xfrm_net_exit,
2617 };
2618
2619 void __init xfrm_init(void)
2620 {
2621 register_pernet_subsys(&xfrm_net_ops);
2622 xfrm_input_init();
2623 }
2624
2625 #ifdef CONFIG_AUDITSYSCALL
2626 static void xfrm_audit_common_policyinfo(struct xfrm_policy *xp,
2627 struct audit_buffer *audit_buf)
2628 {
2629 struct xfrm_sec_ctx *ctx = xp->security;
2630 struct xfrm_selector *sel = &xp->selector;
2631
2632 if (ctx)
2633 audit_log_format(audit_buf, " sec_alg=%u sec_doi=%u sec_obj=%s",
2634 ctx->ctx_alg, ctx->ctx_doi, ctx->ctx_str);
2635
2636 switch(sel->family) {
2637 case AF_INET:
2638 audit_log_format(audit_buf, " src=%pI4", &sel->saddr.a4);
2639 if (sel->prefixlen_s != 32)
2640 audit_log_format(audit_buf, " src_prefixlen=%d",
2641 sel->prefixlen_s);
2642 audit_log_format(audit_buf, " dst=%pI4", &sel->daddr.a4);
2643 if (sel->prefixlen_d != 32)
2644 audit_log_format(audit_buf, " dst_prefixlen=%d",
2645 sel->prefixlen_d);
2646 break;
2647 case AF_INET6:
2648 audit_log_format(audit_buf, " src=%pI6", sel->saddr.a6);
2649 if (sel->prefixlen_s != 128)
2650 audit_log_format(audit_buf, " src_prefixlen=%d",
2651 sel->prefixlen_s);
2652 audit_log_format(audit_buf, " dst=%pI6", sel->daddr.a6);
2653 if (sel->prefixlen_d != 128)
2654 audit_log_format(audit_buf, " dst_prefixlen=%d",
2655 sel->prefixlen_d);
2656 break;
2657 }
2658 }
2659
2660 void xfrm_audit_policy_add(struct xfrm_policy *xp, int result,
2661 uid_t auid, u32 sessionid, u32 secid)
2662 {
2663 struct audit_buffer *audit_buf;
2664
2665 audit_buf = xfrm_audit_start("SPD-add");
2666 if (audit_buf == NULL)
2667 return;
2668 xfrm_audit_helper_usrinfo(auid, sessionid, secid, audit_buf);
2669 audit_log_format(audit_buf, " res=%u", result);
2670 xfrm_audit_common_policyinfo(xp, audit_buf);
2671 audit_log_end(audit_buf);
2672 }
2673 EXPORT_SYMBOL_GPL(xfrm_audit_policy_add);
2674
2675 void xfrm_audit_policy_delete(struct xfrm_policy *xp, int result,
2676 uid_t auid, u32 sessionid, u32 secid)
2677 {
2678 struct audit_buffer *audit_buf;
2679
2680 audit_buf = xfrm_audit_start("SPD-delete");
2681 if (audit_buf == NULL)
2682 return;
2683 xfrm_audit_helper_usrinfo(auid, sessionid, secid, audit_buf);
2684 audit_log_format(audit_buf, " res=%u", result);
2685 xfrm_audit_common_policyinfo(xp, audit_buf);
2686 audit_log_end(audit_buf);
2687 }
2688 EXPORT_SYMBOL_GPL(xfrm_audit_policy_delete);
2689 #endif
2690
2691 #ifdef CONFIG_XFRM_MIGRATE
2692 static int xfrm_migrate_selector_match(struct xfrm_selector *sel_cmp,
2693 struct xfrm_selector *sel_tgt)
2694 {
2695 if (sel_cmp->proto == IPSEC_ULPROTO_ANY) {
2696 if (sel_tgt->family == sel_cmp->family &&
2697 xfrm_addr_cmp(&sel_tgt->daddr, &sel_cmp->daddr,
2698 sel_cmp->family) == 0 &&
2699 xfrm_addr_cmp(&sel_tgt->saddr, &sel_cmp->saddr,
2700 sel_cmp->family) == 0 &&
2701 sel_tgt->prefixlen_d == sel_cmp->prefixlen_d &&
2702 sel_tgt->prefixlen_s == sel_cmp->prefixlen_s) {
2703 return 1;
2704 }
2705 } else {
2706 if (memcmp(sel_tgt, sel_cmp, sizeof(*sel_tgt)) == 0) {
2707 return 1;
2708 }
2709 }
2710 return 0;
2711 }
2712
2713 static struct xfrm_policy * xfrm_migrate_policy_find(struct xfrm_selector *sel,
2714 u8 dir, u8 type)
2715 {
2716 struct xfrm_policy *pol, *ret = NULL;
2717 struct hlist_node *entry;
2718 struct hlist_head *chain;
2719 u32 priority = ~0U;
2720
2721 read_lock_bh(&xfrm_policy_lock);
2722 chain = policy_hash_direct(&init_net, &sel->daddr, &sel->saddr, sel->family, dir);
2723 hlist_for_each_entry(pol, entry, chain, bydst) {
2724 if (xfrm_migrate_selector_match(sel, &pol->selector) &&
2725 pol->type == type) {
2726 ret = pol;
2727 priority = ret->priority;
2728 break;
2729 }
2730 }
2731 chain = &init_net.xfrm.policy_inexact[dir];
2732 hlist_for_each_entry(pol, entry, chain, bydst) {
2733 if (xfrm_migrate_selector_match(sel, &pol->selector) &&
2734 pol->type == type &&
2735 pol->priority < priority) {
2736 ret = pol;
2737 break;
2738 }
2739 }
2740
2741 if (ret)
2742 xfrm_pol_hold(ret);
2743
2744 read_unlock_bh(&xfrm_policy_lock);
2745
2746 return ret;
2747 }
2748
2749 static int migrate_tmpl_match(struct xfrm_migrate *m, struct xfrm_tmpl *t)
2750 {
2751 int match = 0;
2752
2753 if (t->mode == m->mode && t->id.proto == m->proto &&
2754 (m->reqid == 0 || t->reqid == m->reqid)) {
2755 switch (t->mode) {
2756 case XFRM_MODE_TUNNEL:
2757 case XFRM_MODE_BEET:
2758 if (xfrm_addr_cmp(&t->id.daddr, &m->old_daddr,
2759 m->old_family) == 0 &&
2760 xfrm_addr_cmp(&t->saddr, &m->old_saddr,
2761 m->old_family) == 0) {
2762 match = 1;
2763 }
2764 break;
2765 case XFRM_MODE_TRANSPORT:
2766 /* in case of transport mode, template does not store
2767 any IP addresses, hence we just compare mode and
2768 protocol */
2769 match = 1;
2770 break;
2771 default:
2772 break;
2773 }
2774 }
2775 return match;
2776 }
2777
2778 /* update endpoint address(es) of template(s) */
2779 static int xfrm_policy_migrate(struct xfrm_policy *pol,
2780 struct xfrm_migrate *m, int num_migrate)
2781 {
2782 struct xfrm_migrate *mp;
2783 struct dst_entry *dst;
2784 int i, j, n = 0;
2785
2786 write_lock_bh(&pol->lock);
2787 if (unlikely(pol->walk.dead)) {
2788 /* target policy has been deleted */
2789 write_unlock_bh(&pol->lock);
2790 return -ENOENT;
2791 }
2792
2793 for (i = 0; i < pol->xfrm_nr; i++) {
2794 for (j = 0, mp = m; j < num_migrate; j++, mp++) {
2795 if (!migrate_tmpl_match(mp, &pol->xfrm_vec[i]))
2796 continue;
2797 n++;
2798 if (pol->xfrm_vec[i].mode != XFRM_MODE_TUNNEL &&
2799 pol->xfrm_vec[i].mode != XFRM_MODE_BEET)
2800 continue;
2801 /* update endpoints */
2802 memcpy(&pol->xfrm_vec[i].id.daddr, &mp->new_daddr,
2803 sizeof(pol->xfrm_vec[i].id.daddr));
2804 memcpy(&pol->xfrm_vec[i].saddr, &mp->new_saddr,
2805 sizeof(pol->xfrm_vec[i].saddr));
2806 pol->xfrm_vec[i].encap_family = mp->new_family;
2807 /* flush bundles */
2808 while ((dst = pol->bundles) != NULL) {
2809 pol->bundles = dst->next;
2810 dst_free(dst);
2811 }
2812 }
2813 }
2814
2815 write_unlock_bh(&pol->lock);
2816
2817 if (!n)
2818 return -ENODATA;
2819
2820 return 0;
2821 }
2822
2823 static int xfrm_migrate_check(struct xfrm_migrate *m, int num_migrate)
2824 {
2825 int i, j;
2826
2827 if (num_migrate < 1 || num_migrate > XFRM_MAX_DEPTH)
2828 return -EINVAL;
2829
2830 for (i = 0; i < num_migrate; i++) {
2831 if ((xfrm_addr_cmp(&m[i].old_daddr, &m[i].new_daddr,
2832 m[i].old_family) == 0) &&
2833 (xfrm_addr_cmp(&m[i].old_saddr, &m[i].new_saddr,
2834 m[i].old_family) == 0))
2835 return -EINVAL;
2836 if (xfrm_addr_any(&m[i].new_daddr, m[i].new_family) ||
2837 xfrm_addr_any(&m[i].new_saddr, m[i].new_family))
2838 return -EINVAL;
2839
2840 /* check if there is any duplicated entry */
2841 for (j = i + 1; j < num_migrate; j++) {
2842 if (!memcmp(&m[i].old_daddr, &m[j].old_daddr,
2843 sizeof(m[i].old_daddr)) &&
2844 !memcmp(&m[i].old_saddr, &m[j].old_saddr,
2845 sizeof(m[i].old_saddr)) &&
2846 m[i].proto == m[j].proto &&
2847 m[i].mode == m[j].mode &&
2848 m[i].reqid == m[j].reqid &&
2849 m[i].old_family == m[j].old_family)
2850 return -EINVAL;
2851 }
2852 }
2853
2854 return 0;
2855 }
2856
2857 int xfrm_migrate(struct xfrm_selector *sel, u8 dir, u8 type,
2858 struct xfrm_migrate *m, int num_migrate,
2859 struct xfrm_kmaddress *k)
2860 {
2861 int i, err, nx_cur = 0, nx_new = 0;
2862 struct xfrm_policy *pol = NULL;
2863 struct xfrm_state *x, *xc;
2864 struct xfrm_state *x_cur[XFRM_MAX_DEPTH];
2865 struct xfrm_state *x_new[XFRM_MAX_DEPTH];
2866 struct xfrm_migrate *mp;
2867
2868 if ((err = xfrm_migrate_check(m, num_migrate)) < 0)
2869 goto out;
2870
2871 /* Stage 1 - find policy */
2872 if ((pol = xfrm_migrate_policy_find(sel, dir, type)) == NULL) {
2873 err = -ENOENT;
2874 goto out;
2875 }
2876
2877 /* Stage 2 - find and update state(s) */
2878 for (i = 0, mp = m; i < num_migrate; i++, mp++) {
2879 if ((x = xfrm_migrate_state_find(mp))) {
2880 x_cur[nx_cur] = x;
2881 nx_cur++;
2882 if ((xc = xfrm_state_migrate(x, mp))) {
2883 x_new[nx_new] = xc;
2884 nx_new++;
2885 } else {
2886 err = -ENODATA;
2887 goto restore_state;
2888 }
2889 }
2890 }
2891
2892 /* Stage 3 - update policy */
2893 if ((err = xfrm_policy_migrate(pol, m, num_migrate)) < 0)
2894 goto restore_state;
2895
2896 /* Stage 4 - delete old state(s) */
2897 if (nx_cur) {
2898 xfrm_states_put(x_cur, nx_cur);
2899 xfrm_states_delete(x_cur, nx_cur);
2900 }
2901
2902 /* Stage 5 - announce */
2903 km_migrate(sel, dir, type, m, num_migrate, k);
2904
2905 xfrm_pol_put(pol);
2906
2907 return 0;
2908 out:
2909 return err;
2910
2911 restore_state:
2912 if (pol)
2913 xfrm_pol_put(pol);
2914 if (nx_cur)
2915 xfrm_states_put(x_cur, nx_cur);
2916 if (nx_new)
2917 xfrm_states_delete(x_new, nx_new);
2918
2919 return err;
2920 }
2921 EXPORT_SYMBOL(xfrm_migrate);
2922 #endif
This page took 0.115811 seconds and 5 git commands to generate.