3692a4783a7524e7a2a637bbfd97061323d76ae5
[deliverable/linux.git] / net / xfrm / xfrm_state.c
1 /*
2 * xfrm_state.c
3 *
4 * Changes:
5 * Mitsuru KANDA @USAGI
6 * Kazunori MIYAZAWA @USAGI
7 * Kunihiro Ishiguro <kunihiro@ipinfusion.com>
8 * IPv6 support
9 * YOSHIFUJI Hideaki @USAGI
10 * Split up af-specific functions
11 * Derek Atkins <derek@ihtfp.com>
12 * Add UDP Encapsulation
13 *
14 */
15
16 #include <linux/workqueue.h>
17 #include <net/xfrm.h>
18 #include <linux/pfkeyv2.h>
19 #include <linux/ipsec.h>
20 #include <linux/module.h>
21 #include <linux/cache.h>
22 #include <asm/uaccess.h>
23
24 #include "xfrm_hash.h"
25
26 struct sock *xfrm_nl;
27 EXPORT_SYMBOL(xfrm_nl);
28
29 u32 sysctl_xfrm_aevent_etime = XFRM_AE_ETIME;
30 EXPORT_SYMBOL(sysctl_xfrm_aevent_etime);
31
32 u32 sysctl_xfrm_aevent_rseqth = XFRM_AE_SEQT_SIZE;
33 EXPORT_SYMBOL(sysctl_xfrm_aevent_rseqth);
34
35 /* Each xfrm_state may be linked to two tables:
36
37 1. Hash table by (spi,daddr,ah/esp) to find SA by SPI. (input,ctl)
38 2. Hash table by (daddr,family,reqid) to find what SAs exist for given
39 destination/tunnel endpoint. (output)
40 */
41
42 static DEFINE_SPINLOCK(xfrm_state_lock);
43
44 /* Hash table to find appropriate SA towards given target (endpoint
45 * of tunnel or destination of transport mode) allowed by selector.
46 *
47 * Main use is finding SA after policy selected tunnel or transport mode.
48 * Also, it can be used by ah/esp icmp error handler to find offending SA.
49 */
50 static struct hlist_head *xfrm_state_bydst __read_mostly;
51 static struct hlist_head *xfrm_state_bysrc __read_mostly;
52 static struct hlist_head *xfrm_state_byspi __read_mostly;
53 static unsigned int xfrm_state_hmask __read_mostly;
54 static unsigned int xfrm_state_hashmax __read_mostly = 1 * 1024 * 1024;
55 static unsigned int xfrm_state_num;
56 static unsigned int xfrm_state_genid;
57
58 static inline unsigned int xfrm_dst_hash(xfrm_address_t *daddr,
59 xfrm_address_t *saddr,
60 u32 reqid,
61 unsigned short family)
62 {
63 return __xfrm_dst_hash(daddr, saddr, reqid, family, xfrm_state_hmask);
64 }
65
66 static inline unsigned int xfrm_src_hash(xfrm_address_t *addr,
67 unsigned short family)
68 {
69 return __xfrm_src_hash(addr, family, xfrm_state_hmask);
70 }
71
72 static inline unsigned int
73 xfrm_spi_hash(xfrm_address_t *daddr, u32 spi, u8 proto, unsigned short family)
74 {
75 return __xfrm_spi_hash(daddr, spi, proto, family, xfrm_state_hmask);
76 }
77
78 static void xfrm_hash_transfer(struct hlist_head *list,
79 struct hlist_head *ndsttable,
80 struct hlist_head *nsrctable,
81 struct hlist_head *nspitable,
82 unsigned int nhashmask)
83 {
84 struct hlist_node *entry, *tmp;
85 struct xfrm_state *x;
86
87 hlist_for_each_entry_safe(x, entry, tmp, list, bydst) {
88 unsigned int h;
89
90 h = __xfrm_dst_hash(&x->id.daddr, &x->props.saddr,
91 x->props.reqid, x->props.family,
92 nhashmask);
93 hlist_add_head(&x->bydst, ndsttable+h);
94
95 h = __xfrm_src_hash(&x->props.saddr, x->props.family,
96 nhashmask);
97 hlist_add_head(&x->bysrc, nsrctable+h);
98
99 h = __xfrm_spi_hash(&x->id.daddr, x->id.spi, x->id.proto,
100 x->props.family, nhashmask);
101 hlist_add_head(&x->byspi, nspitable+h);
102 }
103 }
104
105 static unsigned long xfrm_hash_new_size(void)
106 {
107 return ((xfrm_state_hmask + 1) << 1) *
108 sizeof(struct hlist_head);
109 }
110
111 static DEFINE_MUTEX(hash_resize_mutex);
112
113 static void xfrm_hash_resize(void *__unused)
114 {
115 struct hlist_head *ndst, *nsrc, *nspi, *odst, *osrc, *ospi;
116 unsigned long nsize, osize;
117 unsigned int nhashmask, ohashmask;
118 int i;
119
120 mutex_lock(&hash_resize_mutex);
121
122 nsize = xfrm_hash_new_size();
123 ndst = xfrm_hash_alloc(nsize);
124 if (!ndst)
125 goto out_unlock;
126 nsrc = xfrm_hash_alloc(nsize);
127 if (!nsrc) {
128 xfrm_hash_free(ndst, nsize);
129 goto out_unlock;
130 }
131 nspi = xfrm_hash_alloc(nsize);
132 if (!nspi) {
133 xfrm_hash_free(ndst, nsize);
134 xfrm_hash_free(nsrc, nsize);
135 goto out_unlock;
136 }
137
138 spin_lock_bh(&xfrm_state_lock);
139
140 nhashmask = (nsize / sizeof(struct hlist_head)) - 1U;
141 for (i = xfrm_state_hmask; i >= 0; i--)
142 xfrm_hash_transfer(xfrm_state_bydst+i, ndst, nsrc, nspi,
143 nhashmask);
144
145 odst = xfrm_state_bydst;
146 osrc = xfrm_state_bysrc;
147 ospi = xfrm_state_byspi;
148 ohashmask = xfrm_state_hmask;
149
150 xfrm_state_bydst = ndst;
151 xfrm_state_bysrc = nsrc;
152 xfrm_state_byspi = nspi;
153 xfrm_state_hmask = nhashmask;
154
155 spin_unlock_bh(&xfrm_state_lock);
156
157 osize = (ohashmask + 1) * sizeof(struct hlist_head);
158 xfrm_hash_free(odst, osize);
159 xfrm_hash_free(osrc, osize);
160 xfrm_hash_free(ospi, osize);
161
162 out_unlock:
163 mutex_unlock(&hash_resize_mutex);
164 }
165
166 static DECLARE_WORK(xfrm_hash_work, xfrm_hash_resize, NULL);
167
168 DECLARE_WAIT_QUEUE_HEAD(km_waitq);
169 EXPORT_SYMBOL(km_waitq);
170
171 static DEFINE_RWLOCK(xfrm_state_afinfo_lock);
172 static struct xfrm_state_afinfo *xfrm_state_afinfo[NPROTO];
173
174 static struct work_struct xfrm_state_gc_work;
175 static HLIST_HEAD(xfrm_state_gc_list);
176 static DEFINE_SPINLOCK(xfrm_state_gc_lock);
177
178 int __xfrm_state_delete(struct xfrm_state *x);
179
180 static struct xfrm_state_afinfo *xfrm_state_get_afinfo(unsigned short family);
181 static void xfrm_state_put_afinfo(struct xfrm_state_afinfo *afinfo);
182
183 int km_query(struct xfrm_state *x, struct xfrm_tmpl *t, struct xfrm_policy *pol);
184 void km_state_expired(struct xfrm_state *x, int hard, u32 pid);
185
186 static void xfrm_state_gc_destroy(struct xfrm_state *x)
187 {
188 del_timer_sync(&x->timer);
189 del_timer_sync(&x->rtimer);
190 kfree(x->aalg);
191 kfree(x->ealg);
192 kfree(x->calg);
193 kfree(x->encap);
194 kfree(x->coaddr);
195 if (x->mode)
196 xfrm_put_mode(x->mode);
197 if (x->type) {
198 x->type->destructor(x);
199 xfrm_put_type(x->type);
200 }
201 security_xfrm_state_free(x);
202 kfree(x);
203 }
204
205 static void xfrm_state_gc_task(void *data)
206 {
207 struct xfrm_state *x;
208 struct hlist_node *entry, *tmp;
209 struct hlist_head gc_list;
210
211 spin_lock_bh(&xfrm_state_gc_lock);
212 gc_list.first = xfrm_state_gc_list.first;
213 INIT_HLIST_HEAD(&xfrm_state_gc_list);
214 spin_unlock_bh(&xfrm_state_gc_lock);
215
216 hlist_for_each_entry_safe(x, entry, tmp, &gc_list, bydst)
217 xfrm_state_gc_destroy(x);
218
219 wake_up(&km_waitq);
220 }
221
222 static inline unsigned long make_jiffies(long secs)
223 {
224 if (secs >= (MAX_SCHEDULE_TIMEOUT-1)/HZ)
225 return MAX_SCHEDULE_TIMEOUT-1;
226 else
227 return secs*HZ;
228 }
229
230 static void xfrm_timer_handler(unsigned long data)
231 {
232 struct xfrm_state *x = (struct xfrm_state*)data;
233 unsigned long now = (unsigned long)xtime.tv_sec;
234 long next = LONG_MAX;
235 int warn = 0;
236
237 spin_lock(&x->lock);
238 if (x->km.state == XFRM_STATE_DEAD)
239 goto out;
240 if (x->km.state == XFRM_STATE_EXPIRED)
241 goto expired;
242 if (x->lft.hard_add_expires_seconds) {
243 long tmo = x->lft.hard_add_expires_seconds +
244 x->curlft.add_time - now;
245 if (tmo <= 0)
246 goto expired;
247 if (tmo < next)
248 next = tmo;
249 }
250 if (x->lft.hard_use_expires_seconds) {
251 long tmo = x->lft.hard_use_expires_seconds +
252 (x->curlft.use_time ? : now) - now;
253 if (tmo <= 0)
254 goto expired;
255 if (tmo < next)
256 next = tmo;
257 }
258 if (x->km.dying)
259 goto resched;
260 if (x->lft.soft_add_expires_seconds) {
261 long tmo = x->lft.soft_add_expires_seconds +
262 x->curlft.add_time - now;
263 if (tmo <= 0)
264 warn = 1;
265 else if (tmo < next)
266 next = tmo;
267 }
268 if (x->lft.soft_use_expires_seconds) {
269 long tmo = x->lft.soft_use_expires_seconds +
270 (x->curlft.use_time ? : now) - now;
271 if (tmo <= 0)
272 warn = 1;
273 else if (tmo < next)
274 next = tmo;
275 }
276
277 x->km.dying = warn;
278 if (warn)
279 km_state_expired(x, 0, 0);
280 resched:
281 if (next != LONG_MAX)
282 mod_timer(&x->timer, jiffies + make_jiffies(next));
283
284 goto out;
285
286 expired:
287 if (x->km.state == XFRM_STATE_ACQ && x->id.spi == 0) {
288 x->km.state = XFRM_STATE_EXPIRED;
289 wake_up(&km_waitq);
290 next = 2;
291 goto resched;
292 }
293 if (!__xfrm_state_delete(x) && x->id.spi)
294 km_state_expired(x, 1, 0);
295
296 out:
297 spin_unlock(&x->lock);
298 }
299
300 static void xfrm_replay_timer_handler(unsigned long data);
301
302 struct xfrm_state *xfrm_state_alloc(void)
303 {
304 struct xfrm_state *x;
305
306 x = kzalloc(sizeof(struct xfrm_state), GFP_ATOMIC);
307
308 if (x) {
309 atomic_set(&x->refcnt, 1);
310 atomic_set(&x->tunnel_users, 0);
311 INIT_HLIST_NODE(&x->bydst);
312 INIT_HLIST_NODE(&x->bysrc);
313 INIT_HLIST_NODE(&x->byspi);
314 init_timer(&x->timer);
315 x->timer.function = xfrm_timer_handler;
316 x->timer.data = (unsigned long)x;
317 init_timer(&x->rtimer);
318 x->rtimer.function = xfrm_replay_timer_handler;
319 x->rtimer.data = (unsigned long)x;
320 x->curlft.add_time = (unsigned long)xtime.tv_sec;
321 x->lft.soft_byte_limit = XFRM_INF;
322 x->lft.soft_packet_limit = XFRM_INF;
323 x->lft.hard_byte_limit = XFRM_INF;
324 x->lft.hard_packet_limit = XFRM_INF;
325 x->replay_maxage = 0;
326 x->replay_maxdiff = 0;
327 spin_lock_init(&x->lock);
328 }
329 return x;
330 }
331 EXPORT_SYMBOL(xfrm_state_alloc);
332
333 void __xfrm_state_destroy(struct xfrm_state *x)
334 {
335 BUG_TRAP(x->km.state == XFRM_STATE_DEAD);
336
337 spin_lock_bh(&xfrm_state_gc_lock);
338 hlist_add_head(&x->bydst, &xfrm_state_gc_list);
339 spin_unlock_bh(&xfrm_state_gc_lock);
340 schedule_work(&xfrm_state_gc_work);
341 }
342 EXPORT_SYMBOL(__xfrm_state_destroy);
343
344 int __xfrm_state_delete(struct xfrm_state *x)
345 {
346 int err = -ESRCH;
347
348 if (x->km.state != XFRM_STATE_DEAD) {
349 x->km.state = XFRM_STATE_DEAD;
350 spin_lock(&xfrm_state_lock);
351 hlist_del(&x->bydst);
352 hlist_del(&x->bysrc);
353 if (x->id.spi)
354 hlist_del(&x->byspi);
355 xfrm_state_num--;
356 spin_unlock(&xfrm_state_lock);
357
358 /* All xfrm_state objects are created by xfrm_state_alloc.
359 * The xfrm_state_alloc call gives a reference, and that
360 * is what we are dropping here.
361 */
362 __xfrm_state_put(x);
363 err = 0;
364 }
365
366 return err;
367 }
368 EXPORT_SYMBOL(__xfrm_state_delete);
369
370 int xfrm_state_delete(struct xfrm_state *x)
371 {
372 int err;
373
374 spin_lock_bh(&x->lock);
375 err = __xfrm_state_delete(x);
376 spin_unlock_bh(&x->lock);
377
378 return err;
379 }
380 EXPORT_SYMBOL(xfrm_state_delete);
381
382 void xfrm_state_flush(u8 proto)
383 {
384 int i;
385
386 spin_lock_bh(&xfrm_state_lock);
387 for (i = 0; i <= xfrm_state_hmask; i++) {
388 struct hlist_node *entry;
389 struct xfrm_state *x;
390 restart:
391 hlist_for_each_entry(x, entry, xfrm_state_bydst+i, bydst) {
392 if (!xfrm_state_kern(x) &&
393 xfrm_id_proto_match(x->id.proto, proto)) {
394 xfrm_state_hold(x);
395 spin_unlock_bh(&xfrm_state_lock);
396
397 xfrm_state_delete(x);
398 xfrm_state_put(x);
399
400 spin_lock_bh(&xfrm_state_lock);
401 goto restart;
402 }
403 }
404 }
405 spin_unlock_bh(&xfrm_state_lock);
406 wake_up(&km_waitq);
407 }
408 EXPORT_SYMBOL(xfrm_state_flush);
409
410 static int
411 xfrm_init_tempsel(struct xfrm_state *x, struct flowi *fl,
412 struct xfrm_tmpl *tmpl,
413 xfrm_address_t *daddr, xfrm_address_t *saddr,
414 unsigned short family)
415 {
416 struct xfrm_state_afinfo *afinfo = xfrm_state_get_afinfo(family);
417 if (!afinfo)
418 return -1;
419 afinfo->init_tempsel(x, fl, tmpl, daddr, saddr);
420 xfrm_state_put_afinfo(afinfo);
421 return 0;
422 }
423
424 static struct xfrm_state *__xfrm_state_lookup(xfrm_address_t *daddr, __be32 spi, u8 proto, unsigned short family)
425 {
426 unsigned int h = xfrm_spi_hash(daddr, spi, proto, family);
427 struct xfrm_state *x;
428 struct hlist_node *entry;
429
430 hlist_for_each_entry(x, entry, xfrm_state_byspi+h, byspi) {
431 if (x->props.family != family ||
432 x->id.spi != spi ||
433 x->id.proto != proto)
434 continue;
435
436 switch (family) {
437 case AF_INET:
438 if (x->id.daddr.a4 != daddr->a4)
439 continue;
440 break;
441 case AF_INET6:
442 if (!ipv6_addr_equal((struct in6_addr *)daddr,
443 (struct in6_addr *)
444 x->id.daddr.a6))
445 continue;
446 break;
447 };
448
449 xfrm_state_hold(x);
450 return x;
451 }
452
453 return NULL;
454 }
455
456 static struct xfrm_state *__xfrm_state_lookup_byaddr(xfrm_address_t *daddr, xfrm_address_t *saddr, u8 proto, unsigned short family)
457 {
458 unsigned int h = xfrm_src_hash(saddr, family);
459 struct xfrm_state *x;
460 struct hlist_node *entry;
461
462 hlist_for_each_entry(x, entry, xfrm_state_bysrc+h, bysrc) {
463 if (x->props.family != family ||
464 x->id.proto != proto)
465 continue;
466
467 switch (family) {
468 case AF_INET:
469 if (x->id.daddr.a4 != daddr->a4 ||
470 x->props.saddr.a4 != saddr->a4)
471 continue;
472 break;
473 case AF_INET6:
474 if (!ipv6_addr_equal((struct in6_addr *)daddr,
475 (struct in6_addr *)
476 x->id.daddr.a6) ||
477 !ipv6_addr_equal((struct in6_addr *)saddr,
478 (struct in6_addr *)
479 x->props.saddr.a6))
480 continue;
481 break;
482 };
483
484 xfrm_state_hold(x);
485 return x;
486 }
487
488 return NULL;
489 }
490
491 static inline struct xfrm_state *
492 __xfrm_state_locate(struct xfrm_state *x, int use_spi, int family)
493 {
494 if (use_spi)
495 return __xfrm_state_lookup(&x->id.daddr, x->id.spi,
496 x->id.proto, family);
497 else
498 return __xfrm_state_lookup_byaddr(&x->id.daddr,
499 &x->props.saddr,
500 x->id.proto, family);
501 }
502
503 struct xfrm_state *
504 xfrm_state_find(xfrm_address_t *daddr, xfrm_address_t *saddr,
505 struct flowi *fl, struct xfrm_tmpl *tmpl,
506 struct xfrm_policy *pol, int *err,
507 unsigned short family)
508 {
509 unsigned int h = xfrm_dst_hash(daddr, saddr, tmpl->reqid, family);
510 struct hlist_node *entry;
511 struct xfrm_state *x, *x0;
512 int acquire_in_progress = 0;
513 int error = 0;
514 struct xfrm_state *best = NULL;
515
516 spin_lock_bh(&xfrm_state_lock);
517 hlist_for_each_entry(x, entry, xfrm_state_bydst+h, bydst) {
518 if (x->props.family == family &&
519 x->props.reqid == tmpl->reqid &&
520 !(x->props.flags & XFRM_STATE_WILDRECV) &&
521 xfrm_state_addr_check(x, daddr, saddr, family) &&
522 tmpl->mode == x->props.mode &&
523 tmpl->id.proto == x->id.proto &&
524 (tmpl->id.spi == x->id.spi || !tmpl->id.spi)) {
525 /* Resolution logic:
526 1. There is a valid state with matching selector.
527 Done.
528 2. Valid state with inappropriate selector. Skip.
529
530 Entering area of "sysdeps".
531
532 3. If state is not valid, selector is temporary,
533 it selects only session which triggered
534 previous resolution. Key manager will do
535 something to install a state with proper
536 selector.
537 */
538 if (x->km.state == XFRM_STATE_VALID) {
539 if (!xfrm_selector_match(&x->sel, fl, family) ||
540 !security_xfrm_state_pol_flow_match(x, pol, fl))
541 continue;
542 if (!best ||
543 best->km.dying > x->km.dying ||
544 (best->km.dying == x->km.dying &&
545 best->curlft.add_time < x->curlft.add_time))
546 best = x;
547 } else if (x->km.state == XFRM_STATE_ACQ) {
548 acquire_in_progress = 1;
549 } else if (x->km.state == XFRM_STATE_ERROR ||
550 x->km.state == XFRM_STATE_EXPIRED) {
551 if (xfrm_selector_match(&x->sel, fl, family) &&
552 security_xfrm_state_pol_flow_match(x, pol, fl))
553 error = -ESRCH;
554 }
555 }
556 }
557
558 x = best;
559 if (!x && !error && !acquire_in_progress) {
560 if (tmpl->id.spi &&
561 (x0 = __xfrm_state_lookup(daddr, tmpl->id.spi,
562 tmpl->id.proto, family)) != NULL) {
563 xfrm_state_put(x0);
564 error = -EEXIST;
565 goto out;
566 }
567 x = xfrm_state_alloc();
568 if (x == NULL) {
569 error = -ENOMEM;
570 goto out;
571 }
572 /* Initialize temporary selector matching only
573 * to current session. */
574 xfrm_init_tempsel(x, fl, tmpl, daddr, saddr, family);
575
576 error = security_xfrm_state_alloc_acquire(x, pol->security, fl->secid);
577 if (error) {
578 x->km.state = XFRM_STATE_DEAD;
579 xfrm_state_put(x);
580 x = NULL;
581 goto out;
582 }
583
584 if (km_query(x, tmpl, pol) == 0) {
585 x->km.state = XFRM_STATE_ACQ;
586 hlist_add_head(&x->bydst, xfrm_state_bydst+h);
587 h = xfrm_src_hash(saddr, family);
588 hlist_add_head(&x->bysrc, xfrm_state_bysrc+h);
589 if (x->id.spi) {
590 h = xfrm_spi_hash(&x->id.daddr, x->id.spi, x->id.proto, family);
591 hlist_add_head(&x->byspi, xfrm_state_byspi+h);
592 }
593 x->lft.hard_add_expires_seconds = XFRM_ACQ_EXPIRES;
594 x->timer.expires = jiffies + XFRM_ACQ_EXPIRES*HZ;
595 add_timer(&x->timer);
596 } else {
597 x->km.state = XFRM_STATE_DEAD;
598 xfrm_state_put(x);
599 x = NULL;
600 error = -ESRCH;
601 }
602 }
603 out:
604 if (x)
605 xfrm_state_hold(x);
606 else
607 *err = acquire_in_progress ? -EAGAIN : error;
608 spin_unlock_bh(&xfrm_state_lock);
609 return x;
610 }
611
612 static void __xfrm_state_insert(struct xfrm_state *x)
613 {
614 unsigned int h;
615
616 x->genid = ++xfrm_state_genid;
617
618 h = xfrm_dst_hash(&x->id.daddr, &x->props.saddr,
619 x->props.reqid, x->props.family);
620 hlist_add_head(&x->bydst, xfrm_state_bydst+h);
621
622 h = xfrm_src_hash(&x->props.saddr, x->props.family);
623 hlist_add_head(&x->bysrc, xfrm_state_bysrc+h);
624
625 if (xfrm_id_proto_match(x->id.proto, IPSEC_PROTO_ANY)) {
626 h = xfrm_spi_hash(&x->id.daddr, x->id.spi, x->id.proto,
627 x->props.family);
628
629 hlist_add_head(&x->byspi, xfrm_state_byspi+h);
630 }
631
632 mod_timer(&x->timer, jiffies + HZ);
633 if (x->replay_maxage)
634 mod_timer(&x->rtimer, jiffies + x->replay_maxage);
635
636 wake_up(&km_waitq);
637
638 xfrm_state_num++;
639
640 if (x->bydst.next != NULL &&
641 (xfrm_state_hmask + 1) < xfrm_state_hashmax &&
642 xfrm_state_num > xfrm_state_hmask)
643 schedule_work(&xfrm_hash_work);
644 }
645
646 /* xfrm_state_lock is held */
647 static void __xfrm_state_bump_genids(struct xfrm_state *xnew)
648 {
649 unsigned short family = xnew->props.family;
650 u32 reqid = xnew->props.reqid;
651 struct xfrm_state *x;
652 struct hlist_node *entry;
653 unsigned int h;
654
655 h = xfrm_dst_hash(&xnew->id.daddr, &xnew->props.saddr, reqid, family);
656 hlist_for_each_entry(x, entry, xfrm_state_bydst+h, bydst) {
657 if (x->props.family == family &&
658 x->props.reqid == reqid &&
659 !xfrm_addr_cmp(&x->id.daddr, &xnew->id.daddr, family) &&
660 !xfrm_addr_cmp(&x->props.saddr, &xnew->props.saddr, family))
661 x->genid = xfrm_state_genid;
662 }
663 }
664
665 void xfrm_state_insert(struct xfrm_state *x)
666 {
667 spin_lock_bh(&xfrm_state_lock);
668 __xfrm_state_bump_genids(x);
669 __xfrm_state_insert(x);
670 spin_unlock_bh(&xfrm_state_lock);
671 }
672 EXPORT_SYMBOL(xfrm_state_insert);
673
674 /* xfrm_state_lock is held */
675 static struct xfrm_state *__find_acq_core(unsigned short family, u8 mode, u32 reqid, u8 proto, xfrm_address_t *daddr, xfrm_address_t *saddr, int create)
676 {
677 unsigned int h = xfrm_dst_hash(daddr, saddr, reqid, family);
678 struct hlist_node *entry;
679 struct xfrm_state *x;
680
681 hlist_for_each_entry(x, entry, xfrm_state_bydst+h, bydst) {
682 if (x->props.reqid != reqid ||
683 x->props.mode != mode ||
684 x->props.family != family ||
685 x->km.state != XFRM_STATE_ACQ ||
686 x->id.spi != 0)
687 continue;
688
689 switch (family) {
690 case AF_INET:
691 if (x->id.daddr.a4 != daddr->a4 ||
692 x->props.saddr.a4 != saddr->a4)
693 continue;
694 break;
695 case AF_INET6:
696 if (!ipv6_addr_equal((struct in6_addr *)x->id.daddr.a6,
697 (struct in6_addr *)daddr) ||
698 !ipv6_addr_equal((struct in6_addr *)
699 x->props.saddr.a6,
700 (struct in6_addr *)saddr))
701 continue;
702 break;
703 };
704
705 xfrm_state_hold(x);
706 return x;
707 }
708
709 if (!create)
710 return NULL;
711
712 x = xfrm_state_alloc();
713 if (likely(x)) {
714 switch (family) {
715 case AF_INET:
716 x->sel.daddr.a4 = daddr->a4;
717 x->sel.saddr.a4 = saddr->a4;
718 x->sel.prefixlen_d = 32;
719 x->sel.prefixlen_s = 32;
720 x->props.saddr.a4 = saddr->a4;
721 x->id.daddr.a4 = daddr->a4;
722 break;
723
724 case AF_INET6:
725 ipv6_addr_copy((struct in6_addr *)x->sel.daddr.a6,
726 (struct in6_addr *)daddr);
727 ipv6_addr_copy((struct in6_addr *)x->sel.saddr.a6,
728 (struct in6_addr *)saddr);
729 x->sel.prefixlen_d = 128;
730 x->sel.prefixlen_s = 128;
731 ipv6_addr_copy((struct in6_addr *)x->props.saddr.a6,
732 (struct in6_addr *)saddr);
733 ipv6_addr_copy((struct in6_addr *)x->id.daddr.a6,
734 (struct in6_addr *)daddr);
735 break;
736 };
737
738 x->km.state = XFRM_STATE_ACQ;
739 x->id.proto = proto;
740 x->props.family = family;
741 x->props.mode = mode;
742 x->props.reqid = reqid;
743 x->lft.hard_add_expires_seconds = XFRM_ACQ_EXPIRES;
744 xfrm_state_hold(x);
745 x->timer.expires = jiffies + XFRM_ACQ_EXPIRES*HZ;
746 add_timer(&x->timer);
747 hlist_add_head(&x->bydst, xfrm_state_bydst+h);
748 h = xfrm_src_hash(saddr, family);
749 hlist_add_head(&x->bysrc, xfrm_state_bysrc+h);
750 wake_up(&km_waitq);
751 }
752
753 return x;
754 }
755
756 static struct xfrm_state *__xfrm_find_acq_byseq(u32 seq);
757
758 int xfrm_state_add(struct xfrm_state *x)
759 {
760 struct xfrm_state *x1;
761 int family;
762 int err;
763 int use_spi = xfrm_id_proto_match(x->id.proto, IPSEC_PROTO_ANY);
764
765 family = x->props.family;
766
767 spin_lock_bh(&xfrm_state_lock);
768
769 x1 = __xfrm_state_locate(x, use_spi, family);
770 if (x1) {
771 xfrm_state_put(x1);
772 x1 = NULL;
773 err = -EEXIST;
774 goto out;
775 }
776
777 if (use_spi && x->km.seq) {
778 x1 = __xfrm_find_acq_byseq(x->km.seq);
779 if (x1 && xfrm_addr_cmp(&x1->id.daddr, &x->id.daddr, family)) {
780 xfrm_state_put(x1);
781 x1 = NULL;
782 }
783 }
784
785 if (use_spi && !x1)
786 x1 = __find_acq_core(family, x->props.mode, x->props.reqid,
787 x->id.proto,
788 &x->id.daddr, &x->props.saddr, 0);
789
790 __xfrm_state_bump_genids(x);
791 __xfrm_state_insert(x);
792 err = 0;
793
794 out:
795 spin_unlock_bh(&xfrm_state_lock);
796
797 if (x1) {
798 xfrm_state_delete(x1);
799 xfrm_state_put(x1);
800 }
801
802 return err;
803 }
804 EXPORT_SYMBOL(xfrm_state_add);
805
806 int xfrm_state_update(struct xfrm_state *x)
807 {
808 struct xfrm_state *x1;
809 int err;
810 int use_spi = xfrm_id_proto_match(x->id.proto, IPSEC_PROTO_ANY);
811
812 spin_lock_bh(&xfrm_state_lock);
813 x1 = __xfrm_state_locate(x, use_spi, x->props.family);
814
815 err = -ESRCH;
816 if (!x1)
817 goto out;
818
819 if (xfrm_state_kern(x1)) {
820 xfrm_state_put(x1);
821 err = -EEXIST;
822 goto out;
823 }
824
825 if (x1->km.state == XFRM_STATE_ACQ) {
826 __xfrm_state_insert(x);
827 x = NULL;
828 }
829 err = 0;
830
831 out:
832 spin_unlock_bh(&xfrm_state_lock);
833
834 if (err)
835 return err;
836
837 if (!x) {
838 xfrm_state_delete(x1);
839 xfrm_state_put(x1);
840 return 0;
841 }
842
843 err = -EINVAL;
844 spin_lock_bh(&x1->lock);
845 if (likely(x1->km.state == XFRM_STATE_VALID)) {
846 if (x->encap && x1->encap)
847 memcpy(x1->encap, x->encap, sizeof(*x1->encap));
848 if (x->coaddr && x1->coaddr) {
849 memcpy(x1->coaddr, x->coaddr, sizeof(*x1->coaddr));
850 }
851 if (!use_spi && memcmp(&x1->sel, &x->sel, sizeof(x1->sel)))
852 memcpy(&x1->sel, &x->sel, sizeof(x1->sel));
853 memcpy(&x1->lft, &x->lft, sizeof(x1->lft));
854 x1->km.dying = 0;
855
856 mod_timer(&x1->timer, jiffies + HZ);
857 if (x1->curlft.use_time)
858 xfrm_state_check_expire(x1);
859
860 err = 0;
861 }
862 spin_unlock_bh(&x1->lock);
863
864 xfrm_state_put(x1);
865
866 return err;
867 }
868 EXPORT_SYMBOL(xfrm_state_update);
869
870 int xfrm_state_check_expire(struct xfrm_state *x)
871 {
872 if (!x->curlft.use_time)
873 x->curlft.use_time = (unsigned long)xtime.tv_sec;
874
875 if (x->km.state != XFRM_STATE_VALID)
876 return -EINVAL;
877
878 if (x->curlft.bytes >= x->lft.hard_byte_limit ||
879 x->curlft.packets >= x->lft.hard_packet_limit) {
880 x->km.state = XFRM_STATE_EXPIRED;
881 mod_timer(&x->timer, jiffies);
882 return -EINVAL;
883 }
884
885 if (!x->km.dying &&
886 (x->curlft.bytes >= x->lft.soft_byte_limit ||
887 x->curlft.packets >= x->lft.soft_packet_limit)) {
888 x->km.dying = 1;
889 km_state_expired(x, 0, 0);
890 }
891 return 0;
892 }
893 EXPORT_SYMBOL(xfrm_state_check_expire);
894
895 static int xfrm_state_check_space(struct xfrm_state *x, struct sk_buff *skb)
896 {
897 int nhead = x->props.header_len + LL_RESERVED_SPACE(skb->dst->dev)
898 - skb_headroom(skb);
899
900 if (nhead > 0)
901 return pskb_expand_head(skb, nhead, 0, GFP_ATOMIC);
902
903 /* Check tail too... */
904 return 0;
905 }
906
907 int xfrm_state_check(struct xfrm_state *x, struct sk_buff *skb)
908 {
909 int err = xfrm_state_check_expire(x);
910 if (err < 0)
911 goto err;
912 err = xfrm_state_check_space(x, skb);
913 err:
914 return err;
915 }
916 EXPORT_SYMBOL(xfrm_state_check);
917
918 struct xfrm_state *
919 xfrm_state_lookup(xfrm_address_t *daddr, __be32 spi, u8 proto,
920 unsigned short family)
921 {
922 struct xfrm_state *x;
923
924 spin_lock_bh(&xfrm_state_lock);
925 x = __xfrm_state_lookup(daddr, spi, proto, family);
926 spin_unlock_bh(&xfrm_state_lock);
927 return x;
928 }
929 EXPORT_SYMBOL(xfrm_state_lookup);
930
931 struct xfrm_state *
932 xfrm_state_lookup_byaddr(xfrm_address_t *daddr, xfrm_address_t *saddr,
933 u8 proto, unsigned short family)
934 {
935 struct xfrm_state *x;
936
937 spin_lock_bh(&xfrm_state_lock);
938 x = __xfrm_state_lookup_byaddr(daddr, saddr, proto, family);
939 spin_unlock_bh(&xfrm_state_lock);
940 return x;
941 }
942 EXPORT_SYMBOL(xfrm_state_lookup_byaddr);
943
944 struct xfrm_state *
945 xfrm_find_acq(u8 mode, u32 reqid, u8 proto,
946 xfrm_address_t *daddr, xfrm_address_t *saddr,
947 int create, unsigned short family)
948 {
949 struct xfrm_state *x;
950
951 spin_lock_bh(&xfrm_state_lock);
952 x = __find_acq_core(family, mode, reqid, proto, daddr, saddr, create);
953 spin_unlock_bh(&xfrm_state_lock);
954
955 return x;
956 }
957 EXPORT_SYMBOL(xfrm_find_acq);
958
959 #ifdef CONFIG_XFRM_SUB_POLICY
960 int
961 xfrm_tmpl_sort(struct xfrm_tmpl **dst, struct xfrm_tmpl **src, int n,
962 unsigned short family)
963 {
964 int err = 0;
965 struct xfrm_state_afinfo *afinfo = xfrm_state_get_afinfo(family);
966 if (!afinfo)
967 return -EAFNOSUPPORT;
968
969 spin_lock_bh(&xfrm_state_lock);
970 if (afinfo->tmpl_sort)
971 err = afinfo->tmpl_sort(dst, src, n);
972 spin_unlock_bh(&xfrm_state_lock);
973 xfrm_state_put_afinfo(afinfo);
974 return err;
975 }
976 EXPORT_SYMBOL(xfrm_tmpl_sort);
977
978 int
979 xfrm_state_sort(struct xfrm_state **dst, struct xfrm_state **src, int n,
980 unsigned short family)
981 {
982 int err = 0;
983 struct xfrm_state_afinfo *afinfo = xfrm_state_get_afinfo(family);
984 if (!afinfo)
985 return -EAFNOSUPPORT;
986
987 spin_lock_bh(&xfrm_state_lock);
988 if (afinfo->state_sort)
989 err = afinfo->state_sort(dst, src, n);
990 spin_unlock_bh(&xfrm_state_lock);
991 xfrm_state_put_afinfo(afinfo);
992 return err;
993 }
994 EXPORT_SYMBOL(xfrm_state_sort);
995 #endif
996
997 /* Silly enough, but I'm lazy to build resolution list */
998
999 static struct xfrm_state *__xfrm_find_acq_byseq(u32 seq)
1000 {
1001 int i;
1002
1003 for (i = 0; i <= xfrm_state_hmask; i++) {
1004 struct hlist_node *entry;
1005 struct xfrm_state *x;
1006
1007 hlist_for_each_entry(x, entry, xfrm_state_bydst+i, bydst) {
1008 if (x->km.seq == seq &&
1009 x->km.state == XFRM_STATE_ACQ) {
1010 xfrm_state_hold(x);
1011 return x;
1012 }
1013 }
1014 }
1015 return NULL;
1016 }
1017
1018 struct xfrm_state *xfrm_find_acq_byseq(u32 seq)
1019 {
1020 struct xfrm_state *x;
1021
1022 spin_lock_bh(&xfrm_state_lock);
1023 x = __xfrm_find_acq_byseq(seq);
1024 spin_unlock_bh(&xfrm_state_lock);
1025 return x;
1026 }
1027 EXPORT_SYMBOL(xfrm_find_acq_byseq);
1028
1029 u32 xfrm_get_acqseq(void)
1030 {
1031 u32 res;
1032 static u32 acqseq;
1033 static DEFINE_SPINLOCK(acqseq_lock);
1034
1035 spin_lock_bh(&acqseq_lock);
1036 res = (++acqseq ? : ++acqseq);
1037 spin_unlock_bh(&acqseq_lock);
1038 return res;
1039 }
1040 EXPORT_SYMBOL(xfrm_get_acqseq);
1041
1042 void
1043 xfrm_alloc_spi(struct xfrm_state *x, __be32 minspi, __be32 maxspi)
1044 {
1045 unsigned int h;
1046 struct xfrm_state *x0;
1047
1048 if (x->id.spi)
1049 return;
1050
1051 if (minspi == maxspi) {
1052 x0 = xfrm_state_lookup(&x->id.daddr, minspi, x->id.proto, x->props.family);
1053 if (x0) {
1054 xfrm_state_put(x0);
1055 return;
1056 }
1057 x->id.spi = minspi;
1058 } else {
1059 u32 spi = 0;
1060 u32 low = ntohl(minspi);
1061 u32 high = ntohl(maxspi);
1062 for (h=0; h<high-low+1; h++) {
1063 spi = low + net_random()%(high-low+1);
1064 x0 = xfrm_state_lookup(&x->id.daddr, htonl(spi), x->id.proto, x->props.family);
1065 if (x0 == NULL) {
1066 x->id.spi = htonl(spi);
1067 break;
1068 }
1069 xfrm_state_put(x0);
1070 }
1071 }
1072 if (x->id.spi) {
1073 spin_lock_bh(&xfrm_state_lock);
1074 h = xfrm_spi_hash(&x->id.daddr, x->id.spi, x->id.proto, x->props.family);
1075 hlist_add_head(&x->byspi, xfrm_state_byspi+h);
1076 spin_unlock_bh(&xfrm_state_lock);
1077 wake_up(&km_waitq);
1078 }
1079 }
1080 EXPORT_SYMBOL(xfrm_alloc_spi);
1081
1082 int xfrm_state_walk(u8 proto, int (*func)(struct xfrm_state *, int, void*),
1083 void *data)
1084 {
1085 int i;
1086 struct xfrm_state *x;
1087 struct hlist_node *entry;
1088 int count = 0;
1089 int err = 0;
1090
1091 spin_lock_bh(&xfrm_state_lock);
1092 for (i = 0; i <= xfrm_state_hmask; i++) {
1093 hlist_for_each_entry(x, entry, xfrm_state_bydst+i, bydst) {
1094 if (xfrm_id_proto_match(x->id.proto, proto))
1095 count++;
1096 }
1097 }
1098 if (count == 0) {
1099 err = -ENOENT;
1100 goto out;
1101 }
1102
1103 for (i = 0; i <= xfrm_state_hmask; i++) {
1104 hlist_for_each_entry(x, entry, xfrm_state_bydst+i, bydst) {
1105 if (!xfrm_id_proto_match(x->id.proto, proto))
1106 continue;
1107 err = func(x, --count, data);
1108 if (err)
1109 goto out;
1110 }
1111 }
1112 out:
1113 spin_unlock_bh(&xfrm_state_lock);
1114 return err;
1115 }
1116 EXPORT_SYMBOL(xfrm_state_walk);
1117
1118
1119 void xfrm_replay_notify(struct xfrm_state *x, int event)
1120 {
1121 struct km_event c;
1122 /* we send notify messages in case
1123 * 1. we updated on of the sequence numbers, and the seqno difference
1124 * is at least x->replay_maxdiff, in this case we also update the
1125 * timeout of our timer function
1126 * 2. if x->replay_maxage has elapsed since last update,
1127 * and there were changes
1128 *
1129 * The state structure must be locked!
1130 */
1131
1132 switch (event) {
1133 case XFRM_REPLAY_UPDATE:
1134 if (x->replay_maxdiff &&
1135 (x->replay.seq - x->preplay.seq < x->replay_maxdiff) &&
1136 (x->replay.oseq - x->preplay.oseq < x->replay_maxdiff)) {
1137 if (x->xflags & XFRM_TIME_DEFER)
1138 event = XFRM_REPLAY_TIMEOUT;
1139 else
1140 return;
1141 }
1142
1143 break;
1144
1145 case XFRM_REPLAY_TIMEOUT:
1146 if ((x->replay.seq == x->preplay.seq) &&
1147 (x->replay.bitmap == x->preplay.bitmap) &&
1148 (x->replay.oseq == x->preplay.oseq)) {
1149 x->xflags |= XFRM_TIME_DEFER;
1150 return;
1151 }
1152
1153 break;
1154 }
1155
1156 memcpy(&x->preplay, &x->replay, sizeof(struct xfrm_replay_state));
1157 c.event = XFRM_MSG_NEWAE;
1158 c.data.aevent = event;
1159 km_state_notify(x, &c);
1160
1161 if (x->replay_maxage &&
1162 !mod_timer(&x->rtimer, jiffies + x->replay_maxage))
1163 x->xflags &= ~XFRM_TIME_DEFER;
1164 }
1165 EXPORT_SYMBOL(xfrm_replay_notify);
1166
1167 static void xfrm_replay_timer_handler(unsigned long data)
1168 {
1169 struct xfrm_state *x = (struct xfrm_state*)data;
1170
1171 spin_lock(&x->lock);
1172
1173 if (x->km.state == XFRM_STATE_VALID) {
1174 if (xfrm_aevent_is_on())
1175 xfrm_replay_notify(x, XFRM_REPLAY_TIMEOUT);
1176 else
1177 x->xflags |= XFRM_TIME_DEFER;
1178 }
1179
1180 spin_unlock(&x->lock);
1181 }
1182
1183 int xfrm_replay_check(struct xfrm_state *x, u32 seq)
1184 {
1185 u32 diff;
1186
1187 seq = ntohl(seq);
1188
1189 if (unlikely(seq == 0))
1190 return -EINVAL;
1191
1192 if (likely(seq > x->replay.seq))
1193 return 0;
1194
1195 diff = x->replay.seq - seq;
1196 if (diff >= x->props.replay_window) {
1197 x->stats.replay_window++;
1198 return -EINVAL;
1199 }
1200
1201 if (x->replay.bitmap & (1U << diff)) {
1202 x->stats.replay++;
1203 return -EINVAL;
1204 }
1205 return 0;
1206 }
1207 EXPORT_SYMBOL(xfrm_replay_check);
1208
1209 void xfrm_replay_advance(struct xfrm_state *x, u32 seq)
1210 {
1211 u32 diff;
1212
1213 seq = ntohl(seq);
1214
1215 if (seq > x->replay.seq) {
1216 diff = seq - x->replay.seq;
1217 if (diff < x->props.replay_window)
1218 x->replay.bitmap = ((x->replay.bitmap) << diff) | 1;
1219 else
1220 x->replay.bitmap = 1;
1221 x->replay.seq = seq;
1222 } else {
1223 diff = x->replay.seq - seq;
1224 x->replay.bitmap |= (1U << diff);
1225 }
1226
1227 if (xfrm_aevent_is_on())
1228 xfrm_replay_notify(x, XFRM_REPLAY_UPDATE);
1229 }
1230 EXPORT_SYMBOL(xfrm_replay_advance);
1231
1232 static struct list_head xfrm_km_list = LIST_HEAD_INIT(xfrm_km_list);
1233 static DEFINE_RWLOCK(xfrm_km_lock);
1234
1235 void km_policy_notify(struct xfrm_policy *xp, int dir, struct km_event *c)
1236 {
1237 struct xfrm_mgr *km;
1238
1239 read_lock(&xfrm_km_lock);
1240 list_for_each_entry(km, &xfrm_km_list, list)
1241 if (km->notify_policy)
1242 km->notify_policy(xp, dir, c);
1243 read_unlock(&xfrm_km_lock);
1244 }
1245
1246 void km_state_notify(struct xfrm_state *x, struct km_event *c)
1247 {
1248 struct xfrm_mgr *km;
1249 read_lock(&xfrm_km_lock);
1250 list_for_each_entry(km, &xfrm_km_list, list)
1251 if (km->notify)
1252 km->notify(x, c);
1253 read_unlock(&xfrm_km_lock);
1254 }
1255
1256 EXPORT_SYMBOL(km_policy_notify);
1257 EXPORT_SYMBOL(km_state_notify);
1258
1259 void km_state_expired(struct xfrm_state *x, int hard, u32 pid)
1260 {
1261 struct km_event c;
1262
1263 c.data.hard = hard;
1264 c.pid = pid;
1265 c.event = XFRM_MSG_EXPIRE;
1266 km_state_notify(x, &c);
1267
1268 if (hard)
1269 wake_up(&km_waitq);
1270 }
1271
1272 EXPORT_SYMBOL(km_state_expired);
1273 /*
1274 * We send to all registered managers regardless of failure
1275 * We are happy with one success
1276 */
1277 int km_query(struct xfrm_state *x, struct xfrm_tmpl *t, struct xfrm_policy *pol)
1278 {
1279 int err = -EINVAL, acqret;
1280 struct xfrm_mgr *km;
1281
1282 read_lock(&xfrm_km_lock);
1283 list_for_each_entry(km, &xfrm_km_list, list) {
1284 acqret = km->acquire(x, t, pol, XFRM_POLICY_OUT);
1285 if (!acqret)
1286 err = acqret;
1287 }
1288 read_unlock(&xfrm_km_lock);
1289 return err;
1290 }
1291 EXPORT_SYMBOL(km_query);
1292
1293 int km_new_mapping(struct xfrm_state *x, xfrm_address_t *ipaddr, u16 sport)
1294 {
1295 int err = -EINVAL;
1296 struct xfrm_mgr *km;
1297
1298 read_lock(&xfrm_km_lock);
1299 list_for_each_entry(km, &xfrm_km_list, list) {
1300 if (km->new_mapping)
1301 err = km->new_mapping(x, ipaddr, sport);
1302 if (!err)
1303 break;
1304 }
1305 read_unlock(&xfrm_km_lock);
1306 return err;
1307 }
1308 EXPORT_SYMBOL(km_new_mapping);
1309
1310 void km_policy_expired(struct xfrm_policy *pol, int dir, int hard, u32 pid)
1311 {
1312 struct km_event c;
1313
1314 c.data.hard = hard;
1315 c.pid = pid;
1316 c.event = XFRM_MSG_POLEXPIRE;
1317 km_policy_notify(pol, dir, &c);
1318
1319 if (hard)
1320 wake_up(&km_waitq);
1321 }
1322 EXPORT_SYMBOL(km_policy_expired);
1323
1324 int km_report(u8 proto, struct xfrm_selector *sel, xfrm_address_t *addr)
1325 {
1326 int err = -EINVAL;
1327 int ret;
1328 struct xfrm_mgr *km;
1329
1330 read_lock(&xfrm_km_lock);
1331 list_for_each_entry(km, &xfrm_km_list, list) {
1332 if (km->report) {
1333 ret = km->report(proto, sel, addr);
1334 if (!ret)
1335 err = ret;
1336 }
1337 }
1338 read_unlock(&xfrm_km_lock);
1339 return err;
1340 }
1341 EXPORT_SYMBOL(km_report);
1342
1343 int xfrm_user_policy(struct sock *sk, int optname, u8 __user *optval, int optlen)
1344 {
1345 int err;
1346 u8 *data;
1347 struct xfrm_mgr *km;
1348 struct xfrm_policy *pol = NULL;
1349
1350 if (optlen <= 0 || optlen > PAGE_SIZE)
1351 return -EMSGSIZE;
1352
1353 data = kmalloc(optlen, GFP_KERNEL);
1354 if (!data)
1355 return -ENOMEM;
1356
1357 err = -EFAULT;
1358 if (copy_from_user(data, optval, optlen))
1359 goto out;
1360
1361 err = -EINVAL;
1362 read_lock(&xfrm_km_lock);
1363 list_for_each_entry(km, &xfrm_km_list, list) {
1364 pol = km->compile_policy(sk, optname, data,
1365 optlen, &err);
1366 if (err >= 0)
1367 break;
1368 }
1369 read_unlock(&xfrm_km_lock);
1370
1371 if (err >= 0) {
1372 xfrm_sk_policy_insert(sk, err, pol);
1373 xfrm_pol_put(pol);
1374 err = 0;
1375 }
1376
1377 out:
1378 kfree(data);
1379 return err;
1380 }
1381 EXPORT_SYMBOL(xfrm_user_policy);
1382
1383 int xfrm_register_km(struct xfrm_mgr *km)
1384 {
1385 write_lock_bh(&xfrm_km_lock);
1386 list_add_tail(&km->list, &xfrm_km_list);
1387 write_unlock_bh(&xfrm_km_lock);
1388 return 0;
1389 }
1390 EXPORT_SYMBOL(xfrm_register_km);
1391
1392 int xfrm_unregister_km(struct xfrm_mgr *km)
1393 {
1394 write_lock_bh(&xfrm_km_lock);
1395 list_del(&km->list);
1396 write_unlock_bh(&xfrm_km_lock);
1397 return 0;
1398 }
1399 EXPORT_SYMBOL(xfrm_unregister_km);
1400
1401 int xfrm_state_register_afinfo(struct xfrm_state_afinfo *afinfo)
1402 {
1403 int err = 0;
1404 if (unlikely(afinfo == NULL))
1405 return -EINVAL;
1406 if (unlikely(afinfo->family >= NPROTO))
1407 return -EAFNOSUPPORT;
1408 write_lock_bh(&xfrm_state_afinfo_lock);
1409 if (unlikely(xfrm_state_afinfo[afinfo->family] != NULL))
1410 err = -ENOBUFS;
1411 else
1412 xfrm_state_afinfo[afinfo->family] = afinfo;
1413 write_unlock_bh(&xfrm_state_afinfo_lock);
1414 return err;
1415 }
1416 EXPORT_SYMBOL(xfrm_state_register_afinfo);
1417
1418 int xfrm_state_unregister_afinfo(struct xfrm_state_afinfo *afinfo)
1419 {
1420 int err = 0;
1421 if (unlikely(afinfo == NULL))
1422 return -EINVAL;
1423 if (unlikely(afinfo->family >= NPROTO))
1424 return -EAFNOSUPPORT;
1425 write_lock_bh(&xfrm_state_afinfo_lock);
1426 if (likely(xfrm_state_afinfo[afinfo->family] != NULL)) {
1427 if (unlikely(xfrm_state_afinfo[afinfo->family] != afinfo))
1428 err = -EINVAL;
1429 else
1430 xfrm_state_afinfo[afinfo->family] = NULL;
1431 }
1432 write_unlock_bh(&xfrm_state_afinfo_lock);
1433 return err;
1434 }
1435 EXPORT_SYMBOL(xfrm_state_unregister_afinfo);
1436
1437 static struct xfrm_state_afinfo *xfrm_state_get_afinfo(unsigned short family)
1438 {
1439 struct xfrm_state_afinfo *afinfo;
1440 if (unlikely(family >= NPROTO))
1441 return NULL;
1442 read_lock(&xfrm_state_afinfo_lock);
1443 afinfo = xfrm_state_afinfo[family];
1444 if (unlikely(!afinfo))
1445 read_unlock(&xfrm_state_afinfo_lock);
1446 return afinfo;
1447 }
1448
1449 static void xfrm_state_put_afinfo(struct xfrm_state_afinfo *afinfo)
1450 {
1451 read_unlock(&xfrm_state_afinfo_lock);
1452 }
1453
1454 /* Temporarily located here until net/xfrm/xfrm_tunnel.c is created */
1455 void xfrm_state_delete_tunnel(struct xfrm_state *x)
1456 {
1457 if (x->tunnel) {
1458 struct xfrm_state *t = x->tunnel;
1459
1460 if (atomic_read(&t->tunnel_users) == 2)
1461 xfrm_state_delete(t);
1462 atomic_dec(&t->tunnel_users);
1463 xfrm_state_put(t);
1464 x->tunnel = NULL;
1465 }
1466 }
1467 EXPORT_SYMBOL(xfrm_state_delete_tunnel);
1468
1469 /*
1470 * This function is NOT optimal. For example, with ESP it will give an
1471 * MTU that's usually two bytes short of being optimal. However, it will
1472 * usually give an answer that's a multiple of 4 provided the input is
1473 * also a multiple of 4.
1474 */
1475 int xfrm_state_mtu(struct xfrm_state *x, int mtu)
1476 {
1477 int res = mtu;
1478
1479 res -= x->props.header_len;
1480
1481 for (;;) {
1482 int m = res;
1483
1484 if (m < 68)
1485 return 68;
1486
1487 spin_lock_bh(&x->lock);
1488 if (x->km.state == XFRM_STATE_VALID &&
1489 x->type && x->type->get_max_size)
1490 m = x->type->get_max_size(x, m);
1491 else
1492 m += x->props.header_len;
1493 spin_unlock_bh(&x->lock);
1494
1495 if (m <= mtu)
1496 break;
1497 res -= (m - mtu);
1498 }
1499
1500 return res;
1501 }
1502
1503 int xfrm_init_state(struct xfrm_state *x)
1504 {
1505 struct xfrm_state_afinfo *afinfo;
1506 int family = x->props.family;
1507 int err;
1508
1509 err = -EAFNOSUPPORT;
1510 afinfo = xfrm_state_get_afinfo(family);
1511 if (!afinfo)
1512 goto error;
1513
1514 err = 0;
1515 if (afinfo->init_flags)
1516 err = afinfo->init_flags(x);
1517
1518 xfrm_state_put_afinfo(afinfo);
1519
1520 if (err)
1521 goto error;
1522
1523 err = -EPROTONOSUPPORT;
1524 x->type = xfrm_get_type(x->id.proto, family);
1525 if (x->type == NULL)
1526 goto error;
1527
1528 err = x->type->init_state(x);
1529 if (err)
1530 goto error;
1531
1532 x->mode = xfrm_get_mode(x->props.mode, family);
1533 if (x->mode == NULL)
1534 goto error;
1535
1536 x->km.state = XFRM_STATE_VALID;
1537
1538 error:
1539 return err;
1540 }
1541
1542 EXPORT_SYMBOL(xfrm_init_state);
1543
1544 void __init xfrm_state_init(void)
1545 {
1546 unsigned int sz;
1547
1548 sz = sizeof(struct hlist_head) * 8;
1549
1550 xfrm_state_bydst = xfrm_hash_alloc(sz);
1551 xfrm_state_bysrc = xfrm_hash_alloc(sz);
1552 xfrm_state_byspi = xfrm_hash_alloc(sz);
1553 if (!xfrm_state_bydst || !xfrm_state_bysrc || !xfrm_state_byspi)
1554 panic("XFRM: Cannot allocate bydst/bysrc/byspi hashes.");
1555 xfrm_state_hmask = ((sz / sizeof(struct hlist_head)) - 1);
1556
1557 INIT_WORK(&xfrm_state_gc_work, xfrm_state_gc_task, NULL);
1558 }
1559
This page took 0.079635 seconds and 4 git commands to generate.