3e32ec2ea1ad777e1d4bee7a42c5a3e10962e287
[deliverable/linux.git] / net / xfrm / xfrm_user.c
1 /* xfrm_user.c: User interface to configure xfrm engine.
2 *
3 * Copyright (C) 2002 David S. Miller (davem@redhat.com)
4 *
5 * Changes:
6 * Mitsuru KANDA @USAGI
7 * Kazunori MIYAZAWA @USAGI
8 * Kunihiro Ishiguro <kunihiro@ipinfusion.com>
9 * IPv6 support
10 *
11 */
12
13 #include <linux/crypto.h>
14 #include <linux/module.h>
15 #include <linux/kernel.h>
16 #include <linux/types.h>
17 #include <linux/slab.h>
18 #include <linux/socket.h>
19 #include <linux/string.h>
20 #include <linux/net.h>
21 #include <linux/skbuff.h>
22 #include <linux/pfkeyv2.h>
23 #include <linux/ipsec.h>
24 #include <linux/init.h>
25 #include <linux/security.h>
26 #include <net/sock.h>
27 #include <net/xfrm.h>
28 #include <net/netlink.h>
29 #include <asm/uaccess.h>
30 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
31 #include <linux/in6.h>
32 #endif
33
34 static inline int aead_len(struct xfrm_algo_aead *alg)
35 {
36 return sizeof(*alg) + ((alg->alg_key_len + 7) / 8);
37 }
38
39 static int verify_one_alg(struct nlattr **attrs, enum xfrm_attr_type_t type)
40 {
41 struct nlattr *rt = attrs[type];
42 struct xfrm_algo *algp;
43
44 if (!rt)
45 return 0;
46
47 algp = nla_data(rt);
48 if (nla_len(rt) < xfrm_alg_len(algp))
49 return -EINVAL;
50
51 switch (type) {
52 case XFRMA_ALG_AUTH:
53 case XFRMA_ALG_CRYPT:
54 case XFRMA_ALG_COMP:
55 break;
56
57 default:
58 return -EINVAL;
59 }
60
61 algp->alg_name[CRYPTO_MAX_ALG_NAME - 1] = '\0';
62 return 0;
63 }
64
65 static int verify_aead(struct nlattr **attrs)
66 {
67 struct nlattr *rt = attrs[XFRMA_ALG_AEAD];
68 struct xfrm_algo_aead *algp;
69
70 if (!rt)
71 return 0;
72
73 algp = nla_data(rt);
74 if (nla_len(rt) < aead_len(algp))
75 return -EINVAL;
76
77 algp->alg_name[CRYPTO_MAX_ALG_NAME - 1] = '\0';
78 return 0;
79 }
80
81 static void verify_one_addr(struct nlattr **attrs, enum xfrm_attr_type_t type,
82 xfrm_address_t **addrp)
83 {
84 struct nlattr *rt = attrs[type];
85
86 if (rt && addrp)
87 *addrp = nla_data(rt);
88 }
89
90 static inline int verify_sec_ctx_len(struct nlattr **attrs)
91 {
92 struct nlattr *rt = attrs[XFRMA_SEC_CTX];
93 struct xfrm_user_sec_ctx *uctx;
94
95 if (!rt)
96 return 0;
97
98 uctx = nla_data(rt);
99 if (uctx->len != (sizeof(struct xfrm_user_sec_ctx) + uctx->ctx_len))
100 return -EINVAL;
101
102 return 0;
103 }
104
105
106 static int verify_newsa_info(struct xfrm_usersa_info *p,
107 struct nlattr **attrs)
108 {
109 int err;
110
111 err = -EINVAL;
112 switch (p->family) {
113 case AF_INET:
114 break;
115
116 case AF_INET6:
117 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
118 break;
119 #else
120 err = -EAFNOSUPPORT;
121 goto out;
122 #endif
123
124 default:
125 goto out;
126 }
127
128 err = -EINVAL;
129 switch (p->id.proto) {
130 case IPPROTO_AH:
131 if (!attrs[XFRMA_ALG_AUTH] ||
132 attrs[XFRMA_ALG_AEAD] ||
133 attrs[XFRMA_ALG_CRYPT] ||
134 attrs[XFRMA_ALG_COMP])
135 goto out;
136 break;
137
138 case IPPROTO_ESP:
139 if (attrs[XFRMA_ALG_COMP])
140 goto out;
141 if (!attrs[XFRMA_ALG_AUTH] &&
142 !attrs[XFRMA_ALG_CRYPT] &&
143 !attrs[XFRMA_ALG_AEAD])
144 goto out;
145 if ((attrs[XFRMA_ALG_AUTH] ||
146 attrs[XFRMA_ALG_CRYPT]) &&
147 attrs[XFRMA_ALG_AEAD])
148 goto out;
149 break;
150
151 case IPPROTO_COMP:
152 if (!attrs[XFRMA_ALG_COMP] ||
153 attrs[XFRMA_ALG_AEAD] ||
154 attrs[XFRMA_ALG_AUTH] ||
155 attrs[XFRMA_ALG_CRYPT])
156 goto out;
157 break;
158
159 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
160 case IPPROTO_DSTOPTS:
161 case IPPROTO_ROUTING:
162 if (attrs[XFRMA_ALG_COMP] ||
163 attrs[XFRMA_ALG_AUTH] ||
164 attrs[XFRMA_ALG_AEAD] ||
165 attrs[XFRMA_ALG_CRYPT] ||
166 attrs[XFRMA_ENCAP] ||
167 attrs[XFRMA_SEC_CTX] ||
168 !attrs[XFRMA_COADDR])
169 goto out;
170 break;
171 #endif
172
173 default:
174 goto out;
175 }
176
177 if ((err = verify_aead(attrs)))
178 goto out;
179 if ((err = verify_one_alg(attrs, XFRMA_ALG_AUTH)))
180 goto out;
181 if ((err = verify_one_alg(attrs, XFRMA_ALG_CRYPT)))
182 goto out;
183 if ((err = verify_one_alg(attrs, XFRMA_ALG_COMP)))
184 goto out;
185 if ((err = verify_sec_ctx_len(attrs)))
186 goto out;
187
188 err = -EINVAL;
189 switch (p->mode) {
190 case XFRM_MODE_TRANSPORT:
191 case XFRM_MODE_TUNNEL:
192 case XFRM_MODE_ROUTEOPTIMIZATION:
193 case XFRM_MODE_BEET:
194 break;
195
196 default:
197 goto out;
198 }
199
200 err = 0;
201
202 out:
203 return err;
204 }
205
206 static int attach_one_algo(struct xfrm_algo **algpp, u8 *props,
207 struct xfrm_algo_desc *(*get_byname)(char *, int),
208 struct nlattr *rta)
209 {
210 struct xfrm_algo *p, *ualg;
211 struct xfrm_algo_desc *algo;
212
213 if (!rta)
214 return 0;
215
216 ualg = nla_data(rta);
217
218 algo = get_byname(ualg->alg_name, 1);
219 if (!algo)
220 return -ENOSYS;
221 *props = algo->desc.sadb_alg_id;
222
223 p = kmemdup(ualg, xfrm_alg_len(ualg), GFP_KERNEL);
224 if (!p)
225 return -ENOMEM;
226
227 strcpy(p->alg_name, algo->name);
228 *algpp = p;
229 return 0;
230 }
231
232 static int attach_aead(struct xfrm_algo_aead **algpp, u8 *props,
233 struct nlattr *rta)
234 {
235 struct xfrm_algo_aead *p, *ualg;
236 struct xfrm_algo_desc *algo;
237
238 if (!rta)
239 return 0;
240
241 ualg = nla_data(rta);
242
243 algo = xfrm_aead_get_byname(ualg->alg_name, ualg->alg_icv_len, 1);
244 if (!algo)
245 return -ENOSYS;
246 *props = algo->desc.sadb_alg_id;
247
248 p = kmemdup(ualg, aead_len(ualg), GFP_KERNEL);
249 if (!p)
250 return -ENOMEM;
251
252 strcpy(p->alg_name, algo->name);
253 *algpp = p;
254 return 0;
255 }
256
257 static inline int xfrm_user_sec_ctx_size(struct xfrm_sec_ctx *xfrm_ctx)
258 {
259 int len = 0;
260
261 if (xfrm_ctx) {
262 len += sizeof(struct xfrm_user_sec_ctx);
263 len += xfrm_ctx->ctx_len;
264 }
265 return len;
266 }
267
268 static void copy_from_user_state(struct xfrm_state *x, struct xfrm_usersa_info *p)
269 {
270 memcpy(&x->id, &p->id, sizeof(x->id));
271 memcpy(&x->sel, &p->sel, sizeof(x->sel));
272 memcpy(&x->lft, &p->lft, sizeof(x->lft));
273 x->props.mode = p->mode;
274 x->props.replay_window = p->replay_window;
275 x->props.reqid = p->reqid;
276 x->props.family = p->family;
277 memcpy(&x->props.saddr, &p->saddr, sizeof(x->props.saddr));
278 x->props.flags = p->flags;
279
280 if (!x->sel.family && !(p->flags & XFRM_STATE_AF_UNSPEC))
281 x->sel.family = p->family;
282 }
283
284 /*
285 * someday when pfkey also has support, we could have the code
286 * somehow made shareable and move it to xfrm_state.c - JHS
287 *
288 */
289 static void xfrm_update_ae_params(struct xfrm_state *x, struct nlattr **attrs)
290 {
291 struct nlattr *rp = attrs[XFRMA_REPLAY_VAL];
292 struct nlattr *lt = attrs[XFRMA_LTIME_VAL];
293 struct nlattr *et = attrs[XFRMA_ETIMER_THRESH];
294 struct nlattr *rt = attrs[XFRMA_REPLAY_THRESH];
295
296 if (rp) {
297 struct xfrm_replay_state *replay;
298 replay = nla_data(rp);
299 memcpy(&x->replay, replay, sizeof(*replay));
300 memcpy(&x->preplay, replay, sizeof(*replay));
301 }
302
303 if (lt) {
304 struct xfrm_lifetime_cur *ltime;
305 ltime = nla_data(lt);
306 x->curlft.bytes = ltime->bytes;
307 x->curlft.packets = ltime->packets;
308 x->curlft.add_time = ltime->add_time;
309 x->curlft.use_time = ltime->use_time;
310 }
311
312 if (et)
313 x->replay_maxage = nla_get_u32(et);
314
315 if (rt)
316 x->replay_maxdiff = nla_get_u32(rt);
317 }
318
319 static struct xfrm_state *xfrm_state_construct(struct net *net,
320 struct xfrm_usersa_info *p,
321 struct nlattr **attrs,
322 int *errp)
323 {
324 struct xfrm_state *x = xfrm_state_alloc(net);
325 int err = -ENOMEM;
326
327 if (!x)
328 goto error_no_put;
329
330 copy_from_user_state(x, p);
331
332 if ((err = attach_aead(&x->aead, &x->props.ealgo,
333 attrs[XFRMA_ALG_AEAD])))
334 goto error;
335 if ((err = attach_one_algo(&x->aalg, &x->props.aalgo,
336 xfrm_aalg_get_byname,
337 attrs[XFRMA_ALG_AUTH])))
338 goto error;
339 if ((err = attach_one_algo(&x->ealg, &x->props.ealgo,
340 xfrm_ealg_get_byname,
341 attrs[XFRMA_ALG_CRYPT])))
342 goto error;
343 if ((err = attach_one_algo(&x->calg, &x->props.calgo,
344 xfrm_calg_get_byname,
345 attrs[XFRMA_ALG_COMP])))
346 goto error;
347
348 if (attrs[XFRMA_ENCAP]) {
349 x->encap = kmemdup(nla_data(attrs[XFRMA_ENCAP]),
350 sizeof(*x->encap), GFP_KERNEL);
351 if (x->encap == NULL)
352 goto error;
353 }
354
355 if (attrs[XFRMA_COADDR]) {
356 x->coaddr = kmemdup(nla_data(attrs[XFRMA_COADDR]),
357 sizeof(*x->coaddr), GFP_KERNEL);
358 if (x->coaddr == NULL)
359 goto error;
360 }
361
362 err = xfrm_init_state(x);
363 if (err)
364 goto error;
365
366 if (attrs[XFRMA_SEC_CTX] &&
367 security_xfrm_state_alloc(x, nla_data(attrs[XFRMA_SEC_CTX])))
368 goto error;
369
370 x->km.seq = p->seq;
371 x->replay_maxdiff = sysctl_xfrm_aevent_rseqth;
372 /* sysctl_xfrm_aevent_etime is in 100ms units */
373 x->replay_maxage = (sysctl_xfrm_aevent_etime*HZ)/XFRM_AE_ETH_M;
374 x->preplay.bitmap = 0;
375 x->preplay.seq = x->replay.seq+x->replay_maxdiff;
376 x->preplay.oseq = x->replay.oseq +x->replay_maxdiff;
377
378 /* override default values from above */
379
380 xfrm_update_ae_params(x, attrs);
381
382 return x;
383
384 error:
385 x->km.state = XFRM_STATE_DEAD;
386 xfrm_state_put(x);
387 error_no_put:
388 *errp = err;
389 return NULL;
390 }
391
392 static int xfrm_add_sa(struct sk_buff *skb, struct nlmsghdr *nlh,
393 struct nlattr **attrs)
394 {
395 struct net *net = sock_net(skb->sk);
396 struct xfrm_usersa_info *p = nlmsg_data(nlh);
397 struct xfrm_state *x;
398 int err;
399 struct km_event c;
400 uid_t loginuid = NETLINK_CB(skb).loginuid;
401 u32 sessionid = NETLINK_CB(skb).sessionid;
402 u32 sid = NETLINK_CB(skb).sid;
403
404 err = verify_newsa_info(p, attrs);
405 if (err)
406 return err;
407
408 x = xfrm_state_construct(net, p, attrs, &err);
409 if (!x)
410 return err;
411
412 xfrm_state_hold(x);
413 if (nlh->nlmsg_type == XFRM_MSG_NEWSA)
414 err = xfrm_state_add(x);
415 else
416 err = xfrm_state_update(x);
417
418 xfrm_audit_state_add(x, err ? 0 : 1, loginuid, sessionid, sid);
419
420 if (err < 0) {
421 x->km.state = XFRM_STATE_DEAD;
422 __xfrm_state_put(x);
423 goto out;
424 }
425
426 c.seq = nlh->nlmsg_seq;
427 c.pid = nlh->nlmsg_pid;
428 c.event = nlh->nlmsg_type;
429
430 km_state_notify(x, &c);
431 out:
432 xfrm_state_put(x);
433 return err;
434 }
435
436 static struct xfrm_state *xfrm_user_state_lookup(struct net *net,
437 struct xfrm_usersa_id *p,
438 struct nlattr **attrs,
439 int *errp)
440 {
441 struct xfrm_state *x = NULL;
442 int err;
443
444 if (xfrm_id_proto_match(p->proto, IPSEC_PROTO_ANY)) {
445 err = -ESRCH;
446 x = xfrm_state_lookup(net, &p->daddr, p->spi, p->proto, p->family);
447 } else {
448 xfrm_address_t *saddr = NULL;
449
450 verify_one_addr(attrs, XFRMA_SRCADDR, &saddr);
451 if (!saddr) {
452 err = -EINVAL;
453 goto out;
454 }
455
456 err = -ESRCH;
457 x = xfrm_state_lookup_byaddr(net, &p->daddr, saddr,
458 p->proto, p->family);
459 }
460
461 out:
462 if (!x && errp)
463 *errp = err;
464 return x;
465 }
466
467 static int xfrm_del_sa(struct sk_buff *skb, struct nlmsghdr *nlh,
468 struct nlattr **attrs)
469 {
470 struct net *net = sock_net(skb->sk);
471 struct xfrm_state *x;
472 int err = -ESRCH;
473 struct km_event c;
474 struct xfrm_usersa_id *p = nlmsg_data(nlh);
475 uid_t loginuid = NETLINK_CB(skb).loginuid;
476 u32 sessionid = NETLINK_CB(skb).sessionid;
477 u32 sid = NETLINK_CB(skb).sid;
478
479 x = xfrm_user_state_lookup(net, p, attrs, &err);
480 if (x == NULL)
481 return err;
482
483 if ((err = security_xfrm_state_delete(x)) != 0)
484 goto out;
485
486 if (xfrm_state_kern(x)) {
487 err = -EPERM;
488 goto out;
489 }
490
491 err = xfrm_state_delete(x);
492
493 if (err < 0)
494 goto out;
495
496 c.seq = nlh->nlmsg_seq;
497 c.pid = nlh->nlmsg_pid;
498 c.event = nlh->nlmsg_type;
499 km_state_notify(x, &c);
500
501 out:
502 xfrm_audit_state_delete(x, err ? 0 : 1, loginuid, sessionid, sid);
503 xfrm_state_put(x);
504 return err;
505 }
506
507 static void copy_to_user_state(struct xfrm_state *x, struct xfrm_usersa_info *p)
508 {
509 memcpy(&p->id, &x->id, sizeof(p->id));
510 memcpy(&p->sel, &x->sel, sizeof(p->sel));
511 memcpy(&p->lft, &x->lft, sizeof(p->lft));
512 memcpy(&p->curlft, &x->curlft, sizeof(p->curlft));
513 memcpy(&p->stats, &x->stats, sizeof(p->stats));
514 memcpy(&p->saddr, &x->props.saddr, sizeof(p->saddr));
515 p->mode = x->props.mode;
516 p->replay_window = x->props.replay_window;
517 p->reqid = x->props.reqid;
518 p->family = x->props.family;
519 p->flags = x->props.flags;
520 p->seq = x->km.seq;
521 }
522
523 struct xfrm_dump_info {
524 struct sk_buff *in_skb;
525 struct sk_buff *out_skb;
526 u32 nlmsg_seq;
527 u16 nlmsg_flags;
528 };
529
530 static int copy_sec_ctx(struct xfrm_sec_ctx *s, struct sk_buff *skb)
531 {
532 struct xfrm_user_sec_ctx *uctx;
533 struct nlattr *attr;
534 int ctx_size = sizeof(*uctx) + s->ctx_len;
535
536 attr = nla_reserve(skb, XFRMA_SEC_CTX, ctx_size);
537 if (attr == NULL)
538 return -EMSGSIZE;
539
540 uctx = nla_data(attr);
541 uctx->exttype = XFRMA_SEC_CTX;
542 uctx->len = ctx_size;
543 uctx->ctx_doi = s->ctx_doi;
544 uctx->ctx_alg = s->ctx_alg;
545 uctx->ctx_len = s->ctx_len;
546 memcpy(uctx + 1, s->ctx_str, s->ctx_len);
547
548 return 0;
549 }
550
551 /* Don't change this without updating xfrm_sa_len! */
552 static int copy_to_user_state_extra(struct xfrm_state *x,
553 struct xfrm_usersa_info *p,
554 struct sk_buff *skb)
555 {
556 copy_to_user_state(x, p);
557
558 if (x->coaddr)
559 NLA_PUT(skb, XFRMA_COADDR, sizeof(*x->coaddr), x->coaddr);
560
561 if (x->lastused)
562 NLA_PUT_U64(skb, XFRMA_LASTUSED, x->lastused);
563
564 if (x->aead)
565 NLA_PUT(skb, XFRMA_ALG_AEAD, aead_len(x->aead), x->aead);
566 if (x->aalg)
567 NLA_PUT(skb, XFRMA_ALG_AUTH, xfrm_alg_len(x->aalg), x->aalg);
568 if (x->ealg)
569 NLA_PUT(skb, XFRMA_ALG_CRYPT, xfrm_alg_len(x->ealg), x->ealg);
570 if (x->calg)
571 NLA_PUT(skb, XFRMA_ALG_COMP, sizeof(*(x->calg)), x->calg);
572
573 if (x->encap)
574 NLA_PUT(skb, XFRMA_ENCAP, sizeof(*x->encap), x->encap);
575
576 if (x->security && copy_sec_ctx(x->security, skb) < 0)
577 goto nla_put_failure;
578
579 return 0;
580
581 nla_put_failure:
582 return -EMSGSIZE;
583 }
584
585 static int dump_one_state(struct xfrm_state *x, int count, void *ptr)
586 {
587 struct xfrm_dump_info *sp = ptr;
588 struct sk_buff *in_skb = sp->in_skb;
589 struct sk_buff *skb = sp->out_skb;
590 struct xfrm_usersa_info *p;
591 struct nlmsghdr *nlh;
592 int err;
593
594 nlh = nlmsg_put(skb, NETLINK_CB(in_skb).pid, sp->nlmsg_seq,
595 XFRM_MSG_NEWSA, sizeof(*p), sp->nlmsg_flags);
596 if (nlh == NULL)
597 return -EMSGSIZE;
598
599 p = nlmsg_data(nlh);
600
601 err = copy_to_user_state_extra(x, p, skb);
602 if (err)
603 goto nla_put_failure;
604
605 nlmsg_end(skb, nlh);
606 return 0;
607
608 nla_put_failure:
609 nlmsg_cancel(skb, nlh);
610 return err;
611 }
612
613 static int xfrm_dump_sa_done(struct netlink_callback *cb)
614 {
615 struct xfrm_state_walk *walk = (struct xfrm_state_walk *) &cb->args[1];
616 xfrm_state_walk_done(walk);
617 return 0;
618 }
619
620 static int xfrm_dump_sa(struct sk_buff *skb, struct netlink_callback *cb)
621 {
622 struct net *net = sock_net(skb->sk);
623 struct xfrm_state_walk *walk = (struct xfrm_state_walk *) &cb->args[1];
624 struct xfrm_dump_info info;
625
626 BUILD_BUG_ON(sizeof(struct xfrm_state_walk) >
627 sizeof(cb->args) - sizeof(cb->args[0]));
628
629 info.in_skb = cb->skb;
630 info.out_skb = skb;
631 info.nlmsg_seq = cb->nlh->nlmsg_seq;
632 info.nlmsg_flags = NLM_F_MULTI;
633
634 if (!cb->args[0]) {
635 cb->args[0] = 1;
636 xfrm_state_walk_init(walk, 0);
637 }
638
639 (void) xfrm_state_walk(net, walk, dump_one_state, &info);
640
641 return skb->len;
642 }
643
644 static struct sk_buff *xfrm_state_netlink(struct sk_buff *in_skb,
645 struct xfrm_state *x, u32 seq)
646 {
647 struct xfrm_dump_info info;
648 struct sk_buff *skb;
649
650 skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_ATOMIC);
651 if (!skb)
652 return ERR_PTR(-ENOMEM);
653
654 info.in_skb = in_skb;
655 info.out_skb = skb;
656 info.nlmsg_seq = seq;
657 info.nlmsg_flags = 0;
658
659 if (dump_one_state(x, 0, &info)) {
660 kfree_skb(skb);
661 return NULL;
662 }
663
664 return skb;
665 }
666
667 static inline size_t xfrm_spdinfo_msgsize(void)
668 {
669 return NLMSG_ALIGN(4)
670 + nla_total_size(sizeof(struct xfrmu_spdinfo))
671 + nla_total_size(sizeof(struct xfrmu_spdhinfo));
672 }
673
674 static int build_spdinfo(struct sk_buff *skb, u32 pid, u32 seq, u32 flags)
675 {
676 struct xfrmk_spdinfo si;
677 struct xfrmu_spdinfo spc;
678 struct xfrmu_spdhinfo sph;
679 struct nlmsghdr *nlh;
680 u32 *f;
681
682 nlh = nlmsg_put(skb, pid, seq, XFRM_MSG_NEWSPDINFO, sizeof(u32), 0);
683 if (nlh == NULL) /* shouldnt really happen ... */
684 return -EMSGSIZE;
685
686 f = nlmsg_data(nlh);
687 *f = flags;
688 xfrm_spd_getinfo(&si);
689 spc.incnt = si.incnt;
690 spc.outcnt = si.outcnt;
691 spc.fwdcnt = si.fwdcnt;
692 spc.inscnt = si.inscnt;
693 spc.outscnt = si.outscnt;
694 spc.fwdscnt = si.fwdscnt;
695 sph.spdhcnt = si.spdhcnt;
696 sph.spdhmcnt = si.spdhmcnt;
697
698 NLA_PUT(skb, XFRMA_SPD_INFO, sizeof(spc), &spc);
699 NLA_PUT(skb, XFRMA_SPD_HINFO, sizeof(sph), &sph);
700
701 return nlmsg_end(skb, nlh);
702
703 nla_put_failure:
704 nlmsg_cancel(skb, nlh);
705 return -EMSGSIZE;
706 }
707
708 static int xfrm_get_spdinfo(struct sk_buff *skb, struct nlmsghdr *nlh,
709 struct nlattr **attrs)
710 {
711 struct net *net = sock_net(skb->sk);
712 struct sk_buff *r_skb;
713 u32 *flags = nlmsg_data(nlh);
714 u32 spid = NETLINK_CB(skb).pid;
715 u32 seq = nlh->nlmsg_seq;
716
717 r_skb = nlmsg_new(xfrm_spdinfo_msgsize(), GFP_ATOMIC);
718 if (r_skb == NULL)
719 return -ENOMEM;
720
721 if (build_spdinfo(r_skb, spid, seq, *flags) < 0)
722 BUG();
723
724 return nlmsg_unicast(net->xfrm.nlsk, r_skb, spid);
725 }
726
727 static inline size_t xfrm_sadinfo_msgsize(void)
728 {
729 return NLMSG_ALIGN(4)
730 + nla_total_size(sizeof(struct xfrmu_sadhinfo))
731 + nla_total_size(4); /* XFRMA_SAD_CNT */
732 }
733
734 static int build_sadinfo(struct sk_buff *skb, u32 pid, u32 seq, u32 flags)
735 {
736 struct xfrmk_sadinfo si;
737 struct xfrmu_sadhinfo sh;
738 struct nlmsghdr *nlh;
739 u32 *f;
740
741 nlh = nlmsg_put(skb, pid, seq, XFRM_MSG_NEWSADINFO, sizeof(u32), 0);
742 if (nlh == NULL) /* shouldnt really happen ... */
743 return -EMSGSIZE;
744
745 f = nlmsg_data(nlh);
746 *f = flags;
747 xfrm_sad_getinfo(&si);
748
749 sh.sadhmcnt = si.sadhmcnt;
750 sh.sadhcnt = si.sadhcnt;
751
752 NLA_PUT_U32(skb, XFRMA_SAD_CNT, si.sadcnt);
753 NLA_PUT(skb, XFRMA_SAD_HINFO, sizeof(sh), &sh);
754
755 return nlmsg_end(skb, nlh);
756
757 nla_put_failure:
758 nlmsg_cancel(skb, nlh);
759 return -EMSGSIZE;
760 }
761
762 static int xfrm_get_sadinfo(struct sk_buff *skb, struct nlmsghdr *nlh,
763 struct nlattr **attrs)
764 {
765 struct net *net = sock_net(skb->sk);
766 struct sk_buff *r_skb;
767 u32 *flags = nlmsg_data(nlh);
768 u32 spid = NETLINK_CB(skb).pid;
769 u32 seq = nlh->nlmsg_seq;
770
771 r_skb = nlmsg_new(xfrm_sadinfo_msgsize(), GFP_ATOMIC);
772 if (r_skb == NULL)
773 return -ENOMEM;
774
775 if (build_sadinfo(r_skb, spid, seq, *flags) < 0)
776 BUG();
777
778 return nlmsg_unicast(net->xfrm.nlsk, r_skb, spid);
779 }
780
781 static int xfrm_get_sa(struct sk_buff *skb, struct nlmsghdr *nlh,
782 struct nlattr **attrs)
783 {
784 struct net *net = sock_net(skb->sk);
785 struct xfrm_usersa_id *p = nlmsg_data(nlh);
786 struct xfrm_state *x;
787 struct sk_buff *resp_skb;
788 int err = -ESRCH;
789
790 x = xfrm_user_state_lookup(net, p, attrs, &err);
791 if (x == NULL)
792 goto out_noput;
793
794 resp_skb = xfrm_state_netlink(skb, x, nlh->nlmsg_seq);
795 if (IS_ERR(resp_skb)) {
796 err = PTR_ERR(resp_skb);
797 } else {
798 err = nlmsg_unicast(net->xfrm.nlsk, resp_skb, NETLINK_CB(skb).pid);
799 }
800 xfrm_state_put(x);
801 out_noput:
802 return err;
803 }
804
805 static int verify_userspi_info(struct xfrm_userspi_info *p)
806 {
807 switch (p->info.id.proto) {
808 case IPPROTO_AH:
809 case IPPROTO_ESP:
810 break;
811
812 case IPPROTO_COMP:
813 /* IPCOMP spi is 16-bits. */
814 if (p->max >= 0x10000)
815 return -EINVAL;
816 break;
817
818 default:
819 return -EINVAL;
820 }
821
822 if (p->min > p->max)
823 return -EINVAL;
824
825 return 0;
826 }
827
828 static int xfrm_alloc_userspi(struct sk_buff *skb, struct nlmsghdr *nlh,
829 struct nlattr **attrs)
830 {
831 struct net *net = sock_net(skb->sk);
832 struct xfrm_state *x;
833 struct xfrm_userspi_info *p;
834 struct sk_buff *resp_skb;
835 xfrm_address_t *daddr;
836 int family;
837 int err;
838
839 p = nlmsg_data(nlh);
840 err = verify_userspi_info(p);
841 if (err)
842 goto out_noput;
843
844 family = p->info.family;
845 daddr = &p->info.id.daddr;
846
847 x = NULL;
848 if (p->info.seq) {
849 x = xfrm_find_acq_byseq(net, p->info.seq);
850 if (x && xfrm_addr_cmp(&x->id.daddr, daddr, family)) {
851 xfrm_state_put(x);
852 x = NULL;
853 }
854 }
855
856 if (!x)
857 x = xfrm_find_acq(net, p->info.mode, p->info.reqid,
858 p->info.id.proto, daddr,
859 &p->info.saddr, 1,
860 family);
861 err = -ENOENT;
862 if (x == NULL)
863 goto out_noput;
864
865 err = xfrm_alloc_spi(x, p->min, p->max);
866 if (err)
867 goto out;
868
869 resp_skb = xfrm_state_netlink(skb, x, nlh->nlmsg_seq);
870 if (IS_ERR(resp_skb)) {
871 err = PTR_ERR(resp_skb);
872 goto out;
873 }
874
875 err = nlmsg_unicast(net->xfrm.nlsk, resp_skb, NETLINK_CB(skb).pid);
876
877 out:
878 xfrm_state_put(x);
879 out_noput:
880 return err;
881 }
882
883 static int verify_policy_dir(u8 dir)
884 {
885 switch (dir) {
886 case XFRM_POLICY_IN:
887 case XFRM_POLICY_OUT:
888 case XFRM_POLICY_FWD:
889 break;
890
891 default:
892 return -EINVAL;
893 }
894
895 return 0;
896 }
897
898 static int verify_policy_type(u8 type)
899 {
900 switch (type) {
901 case XFRM_POLICY_TYPE_MAIN:
902 #ifdef CONFIG_XFRM_SUB_POLICY
903 case XFRM_POLICY_TYPE_SUB:
904 #endif
905 break;
906
907 default:
908 return -EINVAL;
909 }
910
911 return 0;
912 }
913
914 static int verify_newpolicy_info(struct xfrm_userpolicy_info *p)
915 {
916 switch (p->share) {
917 case XFRM_SHARE_ANY:
918 case XFRM_SHARE_SESSION:
919 case XFRM_SHARE_USER:
920 case XFRM_SHARE_UNIQUE:
921 break;
922
923 default:
924 return -EINVAL;
925 }
926
927 switch (p->action) {
928 case XFRM_POLICY_ALLOW:
929 case XFRM_POLICY_BLOCK:
930 break;
931
932 default:
933 return -EINVAL;
934 }
935
936 switch (p->sel.family) {
937 case AF_INET:
938 break;
939
940 case AF_INET6:
941 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
942 break;
943 #else
944 return -EAFNOSUPPORT;
945 #endif
946
947 default:
948 return -EINVAL;
949 }
950
951 return verify_policy_dir(p->dir);
952 }
953
954 static int copy_from_user_sec_ctx(struct xfrm_policy *pol, struct nlattr **attrs)
955 {
956 struct nlattr *rt = attrs[XFRMA_SEC_CTX];
957 struct xfrm_user_sec_ctx *uctx;
958
959 if (!rt)
960 return 0;
961
962 uctx = nla_data(rt);
963 return security_xfrm_policy_alloc(&pol->security, uctx);
964 }
965
966 static void copy_templates(struct xfrm_policy *xp, struct xfrm_user_tmpl *ut,
967 int nr)
968 {
969 int i;
970
971 xp->xfrm_nr = nr;
972 for (i = 0; i < nr; i++, ut++) {
973 struct xfrm_tmpl *t = &xp->xfrm_vec[i];
974
975 memcpy(&t->id, &ut->id, sizeof(struct xfrm_id));
976 memcpy(&t->saddr, &ut->saddr,
977 sizeof(xfrm_address_t));
978 t->reqid = ut->reqid;
979 t->mode = ut->mode;
980 t->share = ut->share;
981 t->optional = ut->optional;
982 t->aalgos = ut->aalgos;
983 t->ealgos = ut->ealgos;
984 t->calgos = ut->calgos;
985 /* If all masks are ~0, then we allow all algorithms. */
986 t->allalgs = !~(t->aalgos & t->ealgos & t->calgos);
987 t->encap_family = ut->family;
988 }
989 }
990
991 static int validate_tmpl(int nr, struct xfrm_user_tmpl *ut, u16 family)
992 {
993 int i;
994
995 if (nr > XFRM_MAX_DEPTH)
996 return -EINVAL;
997
998 for (i = 0; i < nr; i++) {
999 /* We never validated the ut->family value, so many
1000 * applications simply leave it at zero. The check was
1001 * never made and ut->family was ignored because all
1002 * templates could be assumed to have the same family as
1003 * the policy itself. Now that we will have ipv4-in-ipv6
1004 * and ipv6-in-ipv4 tunnels, this is no longer true.
1005 */
1006 if (!ut[i].family)
1007 ut[i].family = family;
1008
1009 switch (ut[i].family) {
1010 case AF_INET:
1011 break;
1012 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
1013 case AF_INET6:
1014 break;
1015 #endif
1016 default:
1017 return -EINVAL;
1018 }
1019 }
1020
1021 return 0;
1022 }
1023
1024 static int copy_from_user_tmpl(struct xfrm_policy *pol, struct nlattr **attrs)
1025 {
1026 struct nlattr *rt = attrs[XFRMA_TMPL];
1027
1028 if (!rt) {
1029 pol->xfrm_nr = 0;
1030 } else {
1031 struct xfrm_user_tmpl *utmpl = nla_data(rt);
1032 int nr = nla_len(rt) / sizeof(*utmpl);
1033 int err;
1034
1035 err = validate_tmpl(nr, utmpl, pol->family);
1036 if (err)
1037 return err;
1038
1039 copy_templates(pol, utmpl, nr);
1040 }
1041 return 0;
1042 }
1043
1044 static int copy_from_user_policy_type(u8 *tp, struct nlattr **attrs)
1045 {
1046 struct nlattr *rt = attrs[XFRMA_POLICY_TYPE];
1047 struct xfrm_userpolicy_type *upt;
1048 u8 type = XFRM_POLICY_TYPE_MAIN;
1049 int err;
1050
1051 if (rt) {
1052 upt = nla_data(rt);
1053 type = upt->type;
1054 }
1055
1056 err = verify_policy_type(type);
1057 if (err)
1058 return err;
1059
1060 *tp = type;
1061 return 0;
1062 }
1063
1064 static void copy_from_user_policy(struct xfrm_policy *xp, struct xfrm_userpolicy_info *p)
1065 {
1066 xp->priority = p->priority;
1067 xp->index = p->index;
1068 memcpy(&xp->selector, &p->sel, sizeof(xp->selector));
1069 memcpy(&xp->lft, &p->lft, sizeof(xp->lft));
1070 xp->action = p->action;
1071 xp->flags = p->flags;
1072 xp->family = p->sel.family;
1073 /* XXX xp->share = p->share; */
1074 }
1075
1076 static void copy_to_user_policy(struct xfrm_policy *xp, struct xfrm_userpolicy_info *p, int dir)
1077 {
1078 memcpy(&p->sel, &xp->selector, sizeof(p->sel));
1079 memcpy(&p->lft, &xp->lft, sizeof(p->lft));
1080 memcpy(&p->curlft, &xp->curlft, sizeof(p->curlft));
1081 p->priority = xp->priority;
1082 p->index = xp->index;
1083 p->sel.family = xp->family;
1084 p->dir = dir;
1085 p->action = xp->action;
1086 p->flags = xp->flags;
1087 p->share = XFRM_SHARE_ANY; /* XXX xp->share */
1088 }
1089
1090 static struct xfrm_policy *xfrm_policy_construct(struct net *net, struct xfrm_userpolicy_info *p, struct nlattr **attrs, int *errp)
1091 {
1092 struct xfrm_policy *xp = xfrm_policy_alloc(net, GFP_KERNEL);
1093 int err;
1094
1095 if (!xp) {
1096 *errp = -ENOMEM;
1097 return NULL;
1098 }
1099
1100 copy_from_user_policy(xp, p);
1101
1102 err = copy_from_user_policy_type(&xp->type, attrs);
1103 if (err)
1104 goto error;
1105
1106 if (!(err = copy_from_user_tmpl(xp, attrs)))
1107 err = copy_from_user_sec_ctx(xp, attrs);
1108 if (err)
1109 goto error;
1110
1111 return xp;
1112 error:
1113 *errp = err;
1114 xp->walk.dead = 1;
1115 xfrm_policy_destroy(xp);
1116 return NULL;
1117 }
1118
1119 static int xfrm_add_policy(struct sk_buff *skb, struct nlmsghdr *nlh,
1120 struct nlattr **attrs)
1121 {
1122 struct net *net = sock_net(skb->sk);
1123 struct xfrm_userpolicy_info *p = nlmsg_data(nlh);
1124 struct xfrm_policy *xp;
1125 struct km_event c;
1126 int err;
1127 int excl;
1128 uid_t loginuid = NETLINK_CB(skb).loginuid;
1129 u32 sessionid = NETLINK_CB(skb).sessionid;
1130 u32 sid = NETLINK_CB(skb).sid;
1131
1132 err = verify_newpolicy_info(p);
1133 if (err)
1134 return err;
1135 err = verify_sec_ctx_len(attrs);
1136 if (err)
1137 return err;
1138
1139 xp = xfrm_policy_construct(net, p, attrs, &err);
1140 if (!xp)
1141 return err;
1142
1143 /* shouldnt excl be based on nlh flags??
1144 * Aha! this is anti-netlink really i.e more pfkey derived
1145 * in netlink excl is a flag and you wouldnt need
1146 * a type XFRM_MSG_UPDPOLICY - JHS */
1147 excl = nlh->nlmsg_type == XFRM_MSG_NEWPOLICY;
1148 err = xfrm_policy_insert(p->dir, xp, excl);
1149 xfrm_audit_policy_add(xp, err ? 0 : 1, loginuid, sessionid, sid);
1150
1151 if (err) {
1152 security_xfrm_policy_free(xp->security);
1153 kfree(xp);
1154 return err;
1155 }
1156
1157 c.event = nlh->nlmsg_type;
1158 c.seq = nlh->nlmsg_seq;
1159 c.pid = nlh->nlmsg_pid;
1160 km_policy_notify(xp, p->dir, &c);
1161
1162 xfrm_pol_put(xp);
1163
1164 return 0;
1165 }
1166
1167 static int copy_to_user_tmpl(struct xfrm_policy *xp, struct sk_buff *skb)
1168 {
1169 struct xfrm_user_tmpl vec[XFRM_MAX_DEPTH];
1170 int i;
1171
1172 if (xp->xfrm_nr == 0)
1173 return 0;
1174
1175 for (i = 0; i < xp->xfrm_nr; i++) {
1176 struct xfrm_user_tmpl *up = &vec[i];
1177 struct xfrm_tmpl *kp = &xp->xfrm_vec[i];
1178
1179 memcpy(&up->id, &kp->id, sizeof(up->id));
1180 up->family = kp->encap_family;
1181 memcpy(&up->saddr, &kp->saddr, sizeof(up->saddr));
1182 up->reqid = kp->reqid;
1183 up->mode = kp->mode;
1184 up->share = kp->share;
1185 up->optional = kp->optional;
1186 up->aalgos = kp->aalgos;
1187 up->ealgos = kp->ealgos;
1188 up->calgos = kp->calgos;
1189 }
1190
1191 return nla_put(skb, XFRMA_TMPL,
1192 sizeof(struct xfrm_user_tmpl) * xp->xfrm_nr, vec);
1193 }
1194
1195 static inline int copy_to_user_state_sec_ctx(struct xfrm_state *x, struct sk_buff *skb)
1196 {
1197 if (x->security) {
1198 return copy_sec_ctx(x->security, skb);
1199 }
1200 return 0;
1201 }
1202
1203 static inline int copy_to_user_sec_ctx(struct xfrm_policy *xp, struct sk_buff *skb)
1204 {
1205 if (xp->security) {
1206 return copy_sec_ctx(xp->security, skb);
1207 }
1208 return 0;
1209 }
1210 static inline size_t userpolicy_type_attrsize(void)
1211 {
1212 #ifdef CONFIG_XFRM_SUB_POLICY
1213 return nla_total_size(sizeof(struct xfrm_userpolicy_type));
1214 #else
1215 return 0;
1216 #endif
1217 }
1218
1219 #ifdef CONFIG_XFRM_SUB_POLICY
1220 static int copy_to_user_policy_type(u8 type, struct sk_buff *skb)
1221 {
1222 struct xfrm_userpolicy_type upt = {
1223 .type = type,
1224 };
1225
1226 return nla_put(skb, XFRMA_POLICY_TYPE, sizeof(upt), &upt);
1227 }
1228
1229 #else
1230 static inline int copy_to_user_policy_type(u8 type, struct sk_buff *skb)
1231 {
1232 return 0;
1233 }
1234 #endif
1235
1236 static int dump_one_policy(struct xfrm_policy *xp, int dir, int count, void *ptr)
1237 {
1238 struct xfrm_dump_info *sp = ptr;
1239 struct xfrm_userpolicy_info *p;
1240 struct sk_buff *in_skb = sp->in_skb;
1241 struct sk_buff *skb = sp->out_skb;
1242 struct nlmsghdr *nlh;
1243
1244 nlh = nlmsg_put(skb, NETLINK_CB(in_skb).pid, sp->nlmsg_seq,
1245 XFRM_MSG_NEWPOLICY, sizeof(*p), sp->nlmsg_flags);
1246 if (nlh == NULL)
1247 return -EMSGSIZE;
1248
1249 p = nlmsg_data(nlh);
1250 copy_to_user_policy(xp, p, dir);
1251 if (copy_to_user_tmpl(xp, skb) < 0)
1252 goto nlmsg_failure;
1253 if (copy_to_user_sec_ctx(xp, skb))
1254 goto nlmsg_failure;
1255 if (copy_to_user_policy_type(xp->type, skb) < 0)
1256 goto nlmsg_failure;
1257
1258 nlmsg_end(skb, nlh);
1259 return 0;
1260
1261 nlmsg_failure:
1262 nlmsg_cancel(skb, nlh);
1263 return -EMSGSIZE;
1264 }
1265
1266 static int xfrm_dump_policy_done(struct netlink_callback *cb)
1267 {
1268 struct xfrm_policy_walk *walk = (struct xfrm_policy_walk *) &cb->args[1];
1269
1270 xfrm_policy_walk_done(walk);
1271 return 0;
1272 }
1273
1274 static int xfrm_dump_policy(struct sk_buff *skb, struct netlink_callback *cb)
1275 {
1276 struct net *net = sock_net(skb->sk);
1277 struct xfrm_policy_walk *walk = (struct xfrm_policy_walk *) &cb->args[1];
1278 struct xfrm_dump_info info;
1279
1280 BUILD_BUG_ON(sizeof(struct xfrm_policy_walk) >
1281 sizeof(cb->args) - sizeof(cb->args[0]));
1282
1283 info.in_skb = cb->skb;
1284 info.out_skb = skb;
1285 info.nlmsg_seq = cb->nlh->nlmsg_seq;
1286 info.nlmsg_flags = NLM_F_MULTI;
1287
1288 if (!cb->args[0]) {
1289 cb->args[0] = 1;
1290 xfrm_policy_walk_init(walk, XFRM_POLICY_TYPE_ANY);
1291 }
1292
1293 (void) xfrm_policy_walk(net, walk, dump_one_policy, &info);
1294
1295 return skb->len;
1296 }
1297
1298 static struct sk_buff *xfrm_policy_netlink(struct sk_buff *in_skb,
1299 struct xfrm_policy *xp,
1300 int dir, u32 seq)
1301 {
1302 struct xfrm_dump_info info;
1303 struct sk_buff *skb;
1304
1305 skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
1306 if (!skb)
1307 return ERR_PTR(-ENOMEM);
1308
1309 info.in_skb = in_skb;
1310 info.out_skb = skb;
1311 info.nlmsg_seq = seq;
1312 info.nlmsg_flags = 0;
1313
1314 if (dump_one_policy(xp, dir, 0, &info) < 0) {
1315 kfree_skb(skb);
1316 return NULL;
1317 }
1318
1319 return skb;
1320 }
1321
1322 static int xfrm_get_policy(struct sk_buff *skb, struct nlmsghdr *nlh,
1323 struct nlattr **attrs)
1324 {
1325 struct net *net = sock_net(skb->sk);
1326 struct xfrm_policy *xp;
1327 struct xfrm_userpolicy_id *p;
1328 u8 type = XFRM_POLICY_TYPE_MAIN;
1329 int err;
1330 struct km_event c;
1331 int delete;
1332
1333 p = nlmsg_data(nlh);
1334 delete = nlh->nlmsg_type == XFRM_MSG_DELPOLICY;
1335
1336 err = copy_from_user_policy_type(&type, attrs);
1337 if (err)
1338 return err;
1339
1340 err = verify_policy_dir(p->dir);
1341 if (err)
1342 return err;
1343
1344 if (p->index)
1345 xp = xfrm_policy_byid(net, type, p->dir, p->index, delete, &err);
1346 else {
1347 struct nlattr *rt = attrs[XFRMA_SEC_CTX];
1348 struct xfrm_sec_ctx *ctx;
1349
1350 err = verify_sec_ctx_len(attrs);
1351 if (err)
1352 return err;
1353
1354 ctx = NULL;
1355 if (rt) {
1356 struct xfrm_user_sec_ctx *uctx = nla_data(rt);
1357
1358 err = security_xfrm_policy_alloc(&ctx, uctx);
1359 if (err)
1360 return err;
1361 }
1362 xp = xfrm_policy_bysel_ctx(net, type, p->dir, &p->sel, ctx,
1363 delete, &err);
1364 security_xfrm_policy_free(ctx);
1365 }
1366 if (xp == NULL)
1367 return -ENOENT;
1368
1369 if (!delete) {
1370 struct sk_buff *resp_skb;
1371
1372 resp_skb = xfrm_policy_netlink(skb, xp, p->dir, nlh->nlmsg_seq);
1373 if (IS_ERR(resp_skb)) {
1374 err = PTR_ERR(resp_skb);
1375 } else {
1376 err = nlmsg_unicast(net->xfrm.nlsk, resp_skb,
1377 NETLINK_CB(skb).pid);
1378 }
1379 } else {
1380 uid_t loginuid = NETLINK_CB(skb).loginuid;
1381 u32 sessionid = NETLINK_CB(skb).sessionid;
1382 u32 sid = NETLINK_CB(skb).sid;
1383
1384 xfrm_audit_policy_delete(xp, err ? 0 : 1, loginuid, sessionid,
1385 sid);
1386
1387 if (err != 0)
1388 goto out;
1389
1390 c.data.byid = p->index;
1391 c.event = nlh->nlmsg_type;
1392 c.seq = nlh->nlmsg_seq;
1393 c.pid = nlh->nlmsg_pid;
1394 km_policy_notify(xp, p->dir, &c);
1395 }
1396
1397 out:
1398 xfrm_pol_put(xp);
1399 return err;
1400 }
1401
1402 static int xfrm_flush_sa(struct sk_buff *skb, struct nlmsghdr *nlh,
1403 struct nlattr **attrs)
1404 {
1405 struct net *net = sock_net(skb->sk);
1406 struct km_event c;
1407 struct xfrm_usersa_flush *p = nlmsg_data(nlh);
1408 struct xfrm_audit audit_info;
1409 int err;
1410
1411 audit_info.loginuid = NETLINK_CB(skb).loginuid;
1412 audit_info.sessionid = NETLINK_CB(skb).sessionid;
1413 audit_info.secid = NETLINK_CB(skb).sid;
1414 err = xfrm_state_flush(net, p->proto, &audit_info);
1415 if (err)
1416 return err;
1417 c.data.proto = p->proto;
1418 c.event = nlh->nlmsg_type;
1419 c.seq = nlh->nlmsg_seq;
1420 c.pid = nlh->nlmsg_pid;
1421 c.net = net;
1422 km_state_notify(NULL, &c);
1423
1424 return 0;
1425 }
1426
1427 static inline size_t xfrm_aevent_msgsize(void)
1428 {
1429 return NLMSG_ALIGN(sizeof(struct xfrm_aevent_id))
1430 + nla_total_size(sizeof(struct xfrm_replay_state))
1431 + nla_total_size(sizeof(struct xfrm_lifetime_cur))
1432 + nla_total_size(4) /* XFRM_AE_RTHR */
1433 + nla_total_size(4); /* XFRM_AE_ETHR */
1434 }
1435
1436 static int build_aevent(struct sk_buff *skb, struct xfrm_state *x, struct km_event *c)
1437 {
1438 struct xfrm_aevent_id *id;
1439 struct nlmsghdr *nlh;
1440
1441 nlh = nlmsg_put(skb, c->pid, c->seq, XFRM_MSG_NEWAE, sizeof(*id), 0);
1442 if (nlh == NULL)
1443 return -EMSGSIZE;
1444
1445 id = nlmsg_data(nlh);
1446 memcpy(&id->sa_id.daddr, &x->id.daddr,sizeof(x->id.daddr));
1447 id->sa_id.spi = x->id.spi;
1448 id->sa_id.family = x->props.family;
1449 id->sa_id.proto = x->id.proto;
1450 memcpy(&id->saddr, &x->props.saddr,sizeof(x->props.saddr));
1451 id->reqid = x->props.reqid;
1452 id->flags = c->data.aevent;
1453
1454 NLA_PUT(skb, XFRMA_REPLAY_VAL, sizeof(x->replay), &x->replay);
1455 NLA_PUT(skb, XFRMA_LTIME_VAL, sizeof(x->curlft), &x->curlft);
1456
1457 if (id->flags & XFRM_AE_RTHR)
1458 NLA_PUT_U32(skb, XFRMA_REPLAY_THRESH, x->replay_maxdiff);
1459
1460 if (id->flags & XFRM_AE_ETHR)
1461 NLA_PUT_U32(skb, XFRMA_ETIMER_THRESH,
1462 x->replay_maxage * 10 / HZ);
1463
1464 return nlmsg_end(skb, nlh);
1465
1466 nla_put_failure:
1467 nlmsg_cancel(skb, nlh);
1468 return -EMSGSIZE;
1469 }
1470
1471 static int xfrm_get_ae(struct sk_buff *skb, struct nlmsghdr *nlh,
1472 struct nlattr **attrs)
1473 {
1474 struct net *net = sock_net(skb->sk);
1475 struct xfrm_state *x;
1476 struct sk_buff *r_skb;
1477 int err;
1478 struct km_event c;
1479 struct xfrm_aevent_id *p = nlmsg_data(nlh);
1480 struct xfrm_usersa_id *id = &p->sa_id;
1481
1482 r_skb = nlmsg_new(xfrm_aevent_msgsize(), GFP_ATOMIC);
1483 if (r_skb == NULL)
1484 return -ENOMEM;
1485
1486 x = xfrm_state_lookup(net, &id->daddr, id->spi, id->proto, id->family);
1487 if (x == NULL) {
1488 kfree_skb(r_skb);
1489 return -ESRCH;
1490 }
1491
1492 /*
1493 * XXX: is this lock really needed - none of the other
1494 * gets lock (the concern is things getting updated
1495 * while we are still reading) - jhs
1496 */
1497 spin_lock_bh(&x->lock);
1498 c.data.aevent = p->flags;
1499 c.seq = nlh->nlmsg_seq;
1500 c.pid = nlh->nlmsg_pid;
1501
1502 if (build_aevent(r_skb, x, &c) < 0)
1503 BUG();
1504 err = nlmsg_unicast(net->xfrm.nlsk, r_skb, NETLINK_CB(skb).pid);
1505 spin_unlock_bh(&x->lock);
1506 xfrm_state_put(x);
1507 return err;
1508 }
1509
1510 static int xfrm_new_ae(struct sk_buff *skb, struct nlmsghdr *nlh,
1511 struct nlattr **attrs)
1512 {
1513 struct net *net = sock_net(skb->sk);
1514 struct xfrm_state *x;
1515 struct km_event c;
1516 int err = - EINVAL;
1517 struct xfrm_aevent_id *p = nlmsg_data(nlh);
1518 struct nlattr *rp = attrs[XFRMA_REPLAY_VAL];
1519 struct nlattr *lt = attrs[XFRMA_LTIME_VAL];
1520
1521 if (!lt && !rp)
1522 return err;
1523
1524 /* pedantic mode - thou shalt sayeth replaceth */
1525 if (!(nlh->nlmsg_flags&NLM_F_REPLACE))
1526 return err;
1527
1528 x = xfrm_state_lookup(net, &p->sa_id.daddr, p->sa_id.spi, p->sa_id.proto, p->sa_id.family);
1529 if (x == NULL)
1530 return -ESRCH;
1531
1532 if (x->km.state != XFRM_STATE_VALID)
1533 goto out;
1534
1535 spin_lock_bh(&x->lock);
1536 xfrm_update_ae_params(x, attrs);
1537 spin_unlock_bh(&x->lock);
1538
1539 c.event = nlh->nlmsg_type;
1540 c.seq = nlh->nlmsg_seq;
1541 c.pid = nlh->nlmsg_pid;
1542 c.data.aevent = XFRM_AE_CU;
1543 km_state_notify(x, &c);
1544 err = 0;
1545 out:
1546 xfrm_state_put(x);
1547 return err;
1548 }
1549
1550 static int xfrm_flush_policy(struct sk_buff *skb, struct nlmsghdr *nlh,
1551 struct nlattr **attrs)
1552 {
1553 struct net *net = sock_net(skb->sk);
1554 struct km_event c;
1555 u8 type = XFRM_POLICY_TYPE_MAIN;
1556 int err;
1557 struct xfrm_audit audit_info;
1558
1559 err = copy_from_user_policy_type(&type, attrs);
1560 if (err)
1561 return err;
1562
1563 audit_info.loginuid = NETLINK_CB(skb).loginuid;
1564 audit_info.sessionid = NETLINK_CB(skb).sessionid;
1565 audit_info.secid = NETLINK_CB(skb).sid;
1566 err = xfrm_policy_flush(net, type, &audit_info);
1567 if (err)
1568 return err;
1569 c.data.type = type;
1570 c.event = nlh->nlmsg_type;
1571 c.seq = nlh->nlmsg_seq;
1572 c.pid = nlh->nlmsg_pid;
1573 c.net = net;
1574 km_policy_notify(NULL, 0, &c);
1575 return 0;
1576 }
1577
1578 static int xfrm_add_pol_expire(struct sk_buff *skb, struct nlmsghdr *nlh,
1579 struct nlattr **attrs)
1580 {
1581 struct net *net = sock_net(skb->sk);
1582 struct xfrm_policy *xp;
1583 struct xfrm_user_polexpire *up = nlmsg_data(nlh);
1584 struct xfrm_userpolicy_info *p = &up->pol;
1585 u8 type = XFRM_POLICY_TYPE_MAIN;
1586 int err = -ENOENT;
1587
1588 err = copy_from_user_policy_type(&type, attrs);
1589 if (err)
1590 return err;
1591
1592 if (p->index)
1593 xp = xfrm_policy_byid(net, type, p->dir, p->index, 0, &err);
1594 else {
1595 struct nlattr *rt = attrs[XFRMA_SEC_CTX];
1596 struct xfrm_sec_ctx *ctx;
1597
1598 err = verify_sec_ctx_len(attrs);
1599 if (err)
1600 return err;
1601
1602 ctx = NULL;
1603 if (rt) {
1604 struct xfrm_user_sec_ctx *uctx = nla_data(rt);
1605
1606 err = security_xfrm_policy_alloc(&ctx, uctx);
1607 if (err)
1608 return err;
1609 }
1610 xp = xfrm_policy_bysel_ctx(net, type, p->dir, &p->sel, ctx, 0, &err);
1611 security_xfrm_policy_free(ctx);
1612 }
1613 if (xp == NULL)
1614 return -ENOENT;
1615
1616 read_lock(&xp->lock);
1617 if (xp->walk.dead) {
1618 read_unlock(&xp->lock);
1619 goto out;
1620 }
1621
1622 read_unlock(&xp->lock);
1623 err = 0;
1624 if (up->hard) {
1625 uid_t loginuid = NETLINK_CB(skb).loginuid;
1626 uid_t sessionid = NETLINK_CB(skb).sessionid;
1627 u32 sid = NETLINK_CB(skb).sid;
1628 xfrm_policy_delete(xp, p->dir);
1629 xfrm_audit_policy_delete(xp, 1, loginuid, sessionid, sid);
1630
1631 } else {
1632 // reset the timers here?
1633 printk("Dont know what to do with soft policy expire\n");
1634 }
1635 km_policy_expired(xp, p->dir, up->hard, current->pid);
1636
1637 out:
1638 xfrm_pol_put(xp);
1639 return err;
1640 }
1641
1642 static int xfrm_add_sa_expire(struct sk_buff *skb, struct nlmsghdr *nlh,
1643 struct nlattr **attrs)
1644 {
1645 struct net *net = sock_net(skb->sk);
1646 struct xfrm_state *x;
1647 int err;
1648 struct xfrm_user_expire *ue = nlmsg_data(nlh);
1649 struct xfrm_usersa_info *p = &ue->state;
1650
1651 x = xfrm_state_lookup(net, &p->id.daddr, p->id.spi, p->id.proto, p->family);
1652
1653 err = -ENOENT;
1654 if (x == NULL)
1655 return err;
1656
1657 spin_lock_bh(&x->lock);
1658 err = -EINVAL;
1659 if (x->km.state != XFRM_STATE_VALID)
1660 goto out;
1661 km_state_expired(x, ue->hard, current->pid);
1662
1663 if (ue->hard) {
1664 uid_t loginuid = NETLINK_CB(skb).loginuid;
1665 uid_t sessionid = NETLINK_CB(skb).sessionid;
1666 u32 sid = NETLINK_CB(skb).sid;
1667 __xfrm_state_delete(x);
1668 xfrm_audit_state_delete(x, 1, loginuid, sessionid, sid);
1669 }
1670 err = 0;
1671 out:
1672 spin_unlock_bh(&x->lock);
1673 xfrm_state_put(x);
1674 return err;
1675 }
1676
1677 static int xfrm_add_acquire(struct sk_buff *skb, struct nlmsghdr *nlh,
1678 struct nlattr **attrs)
1679 {
1680 struct net *net = sock_net(skb->sk);
1681 struct xfrm_policy *xp;
1682 struct xfrm_user_tmpl *ut;
1683 int i;
1684 struct nlattr *rt = attrs[XFRMA_TMPL];
1685
1686 struct xfrm_user_acquire *ua = nlmsg_data(nlh);
1687 struct xfrm_state *x = xfrm_state_alloc(net);
1688 int err = -ENOMEM;
1689
1690 if (!x)
1691 return err;
1692
1693 err = verify_newpolicy_info(&ua->policy);
1694 if (err) {
1695 printk("BAD policy passed\n");
1696 kfree(x);
1697 return err;
1698 }
1699
1700 /* build an XP */
1701 xp = xfrm_policy_construct(net, &ua->policy, attrs, &err);
1702 if (!xp) {
1703 kfree(x);
1704 return err;
1705 }
1706
1707 memcpy(&x->id, &ua->id, sizeof(ua->id));
1708 memcpy(&x->props.saddr, &ua->saddr, sizeof(ua->saddr));
1709 memcpy(&x->sel, &ua->sel, sizeof(ua->sel));
1710
1711 ut = nla_data(rt);
1712 /* extract the templates and for each call km_key */
1713 for (i = 0; i < xp->xfrm_nr; i++, ut++) {
1714 struct xfrm_tmpl *t = &xp->xfrm_vec[i];
1715 memcpy(&x->id, &t->id, sizeof(x->id));
1716 x->props.mode = t->mode;
1717 x->props.reqid = t->reqid;
1718 x->props.family = ut->family;
1719 t->aalgos = ua->aalgos;
1720 t->ealgos = ua->ealgos;
1721 t->calgos = ua->calgos;
1722 err = km_query(x, t, xp);
1723
1724 }
1725
1726 kfree(x);
1727 kfree(xp);
1728
1729 return 0;
1730 }
1731
1732 #ifdef CONFIG_XFRM_MIGRATE
1733 static int copy_from_user_migrate(struct xfrm_migrate *ma,
1734 struct xfrm_kmaddress *k,
1735 struct nlattr **attrs, int *num)
1736 {
1737 struct nlattr *rt = attrs[XFRMA_MIGRATE];
1738 struct xfrm_user_migrate *um;
1739 int i, num_migrate;
1740
1741 if (k != NULL) {
1742 struct xfrm_user_kmaddress *uk;
1743
1744 uk = nla_data(attrs[XFRMA_KMADDRESS]);
1745 memcpy(&k->local, &uk->local, sizeof(k->local));
1746 memcpy(&k->remote, &uk->remote, sizeof(k->remote));
1747 k->family = uk->family;
1748 k->reserved = uk->reserved;
1749 }
1750
1751 um = nla_data(rt);
1752 num_migrate = nla_len(rt) / sizeof(*um);
1753
1754 if (num_migrate <= 0 || num_migrate > XFRM_MAX_DEPTH)
1755 return -EINVAL;
1756
1757 for (i = 0; i < num_migrate; i++, um++, ma++) {
1758 memcpy(&ma->old_daddr, &um->old_daddr, sizeof(ma->old_daddr));
1759 memcpy(&ma->old_saddr, &um->old_saddr, sizeof(ma->old_saddr));
1760 memcpy(&ma->new_daddr, &um->new_daddr, sizeof(ma->new_daddr));
1761 memcpy(&ma->new_saddr, &um->new_saddr, sizeof(ma->new_saddr));
1762
1763 ma->proto = um->proto;
1764 ma->mode = um->mode;
1765 ma->reqid = um->reqid;
1766
1767 ma->old_family = um->old_family;
1768 ma->new_family = um->new_family;
1769 }
1770
1771 *num = i;
1772 return 0;
1773 }
1774
1775 static int xfrm_do_migrate(struct sk_buff *skb, struct nlmsghdr *nlh,
1776 struct nlattr **attrs)
1777 {
1778 struct xfrm_userpolicy_id *pi = nlmsg_data(nlh);
1779 struct xfrm_migrate m[XFRM_MAX_DEPTH];
1780 struct xfrm_kmaddress km, *kmp;
1781 u8 type;
1782 int err;
1783 int n = 0;
1784
1785 if (attrs[XFRMA_MIGRATE] == NULL)
1786 return -EINVAL;
1787
1788 kmp = attrs[XFRMA_KMADDRESS] ? &km : NULL;
1789
1790 err = copy_from_user_policy_type(&type, attrs);
1791 if (err)
1792 return err;
1793
1794 err = copy_from_user_migrate((struct xfrm_migrate *)m, kmp, attrs, &n);
1795 if (err)
1796 return err;
1797
1798 if (!n)
1799 return 0;
1800
1801 xfrm_migrate(&pi->sel, pi->dir, type, m, n, kmp);
1802
1803 return 0;
1804 }
1805 #else
1806 static int xfrm_do_migrate(struct sk_buff *skb, struct nlmsghdr *nlh,
1807 struct nlattr **attrs)
1808 {
1809 return -ENOPROTOOPT;
1810 }
1811 #endif
1812
1813 #ifdef CONFIG_XFRM_MIGRATE
1814 static int copy_to_user_migrate(struct xfrm_migrate *m, struct sk_buff *skb)
1815 {
1816 struct xfrm_user_migrate um;
1817
1818 memset(&um, 0, sizeof(um));
1819 um.proto = m->proto;
1820 um.mode = m->mode;
1821 um.reqid = m->reqid;
1822 um.old_family = m->old_family;
1823 memcpy(&um.old_daddr, &m->old_daddr, sizeof(um.old_daddr));
1824 memcpy(&um.old_saddr, &m->old_saddr, sizeof(um.old_saddr));
1825 um.new_family = m->new_family;
1826 memcpy(&um.new_daddr, &m->new_daddr, sizeof(um.new_daddr));
1827 memcpy(&um.new_saddr, &m->new_saddr, sizeof(um.new_saddr));
1828
1829 return nla_put(skb, XFRMA_MIGRATE, sizeof(um), &um);
1830 }
1831
1832 static int copy_to_user_kmaddress(struct xfrm_kmaddress *k, struct sk_buff *skb)
1833 {
1834 struct xfrm_user_kmaddress uk;
1835
1836 memset(&uk, 0, sizeof(uk));
1837 uk.family = k->family;
1838 uk.reserved = k->reserved;
1839 memcpy(&uk.local, &k->local, sizeof(uk.local));
1840 memcpy(&uk.remote, &k->remote, sizeof(uk.remote));
1841
1842 return nla_put(skb, XFRMA_KMADDRESS, sizeof(uk), &uk);
1843 }
1844
1845 static inline size_t xfrm_migrate_msgsize(int num_migrate, int with_kma)
1846 {
1847 return NLMSG_ALIGN(sizeof(struct xfrm_userpolicy_id))
1848 + (with_kma ? nla_total_size(sizeof(struct xfrm_kmaddress)) : 0)
1849 + nla_total_size(sizeof(struct xfrm_user_migrate) * num_migrate)
1850 + userpolicy_type_attrsize();
1851 }
1852
1853 static int build_migrate(struct sk_buff *skb, struct xfrm_migrate *m,
1854 int num_migrate, struct xfrm_kmaddress *k,
1855 struct xfrm_selector *sel, u8 dir, u8 type)
1856 {
1857 struct xfrm_migrate *mp;
1858 struct xfrm_userpolicy_id *pol_id;
1859 struct nlmsghdr *nlh;
1860 int i;
1861
1862 nlh = nlmsg_put(skb, 0, 0, XFRM_MSG_MIGRATE, sizeof(*pol_id), 0);
1863 if (nlh == NULL)
1864 return -EMSGSIZE;
1865
1866 pol_id = nlmsg_data(nlh);
1867 /* copy data from selector, dir, and type to the pol_id */
1868 memset(pol_id, 0, sizeof(*pol_id));
1869 memcpy(&pol_id->sel, sel, sizeof(pol_id->sel));
1870 pol_id->dir = dir;
1871
1872 if (k != NULL && (copy_to_user_kmaddress(k, skb) < 0))
1873 goto nlmsg_failure;
1874
1875 if (copy_to_user_policy_type(type, skb) < 0)
1876 goto nlmsg_failure;
1877
1878 for (i = 0, mp = m ; i < num_migrate; i++, mp++) {
1879 if (copy_to_user_migrate(mp, skb) < 0)
1880 goto nlmsg_failure;
1881 }
1882
1883 return nlmsg_end(skb, nlh);
1884 nlmsg_failure:
1885 nlmsg_cancel(skb, nlh);
1886 return -EMSGSIZE;
1887 }
1888
1889 static int xfrm_send_migrate(struct xfrm_selector *sel, u8 dir, u8 type,
1890 struct xfrm_migrate *m, int num_migrate,
1891 struct xfrm_kmaddress *k)
1892 {
1893 struct net *net = &init_net;
1894 struct sk_buff *skb;
1895
1896 skb = nlmsg_new(xfrm_migrate_msgsize(num_migrate, !!k), GFP_ATOMIC);
1897 if (skb == NULL)
1898 return -ENOMEM;
1899
1900 /* build migrate */
1901 if (build_migrate(skb, m, num_migrate, k, sel, dir, type) < 0)
1902 BUG();
1903
1904 return nlmsg_multicast(net->xfrm.nlsk, skb, 0, XFRMNLGRP_MIGRATE, GFP_ATOMIC);
1905 }
1906 #else
1907 static int xfrm_send_migrate(struct xfrm_selector *sel, u8 dir, u8 type,
1908 struct xfrm_migrate *m, int num_migrate,
1909 struct xfrm_kmaddress *k)
1910 {
1911 return -ENOPROTOOPT;
1912 }
1913 #endif
1914
1915 #define XMSGSIZE(type) sizeof(struct type)
1916
1917 static const int xfrm_msg_min[XFRM_NR_MSGTYPES] = {
1918 [XFRM_MSG_NEWSA - XFRM_MSG_BASE] = XMSGSIZE(xfrm_usersa_info),
1919 [XFRM_MSG_DELSA - XFRM_MSG_BASE] = XMSGSIZE(xfrm_usersa_id),
1920 [XFRM_MSG_GETSA - XFRM_MSG_BASE] = XMSGSIZE(xfrm_usersa_id),
1921 [XFRM_MSG_NEWPOLICY - XFRM_MSG_BASE] = XMSGSIZE(xfrm_userpolicy_info),
1922 [XFRM_MSG_DELPOLICY - XFRM_MSG_BASE] = XMSGSIZE(xfrm_userpolicy_id),
1923 [XFRM_MSG_GETPOLICY - XFRM_MSG_BASE] = XMSGSIZE(xfrm_userpolicy_id),
1924 [XFRM_MSG_ALLOCSPI - XFRM_MSG_BASE] = XMSGSIZE(xfrm_userspi_info),
1925 [XFRM_MSG_ACQUIRE - XFRM_MSG_BASE] = XMSGSIZE(xfrm_user_acquire),
1926 [XFRM_MSG_EXPIRE - XFRM_MSG_BASE] = XMSGSIZE(xfrm_user_expire),
1927 [XFRM_MSG_UPDPOLICY - XFRM_MSG_BASE] = XMSGSIZE(xfrm_userpolicy_info),
1928 [XFRM_MSG_UPDSA - XFRM_MSG_BASE] = XMSGSIZE(xfrm_usersa_info),
1929 [XFRM_MSG_POLEXPIRE - XFRM_MSG_BASE] = XMSGSIZE(xfrm_user_polexpire),
1930 [XFRM_MSG_FLUSHSA - XFRM_MSG_BASE] = XMSGSIZE(xfrm_usersa_flush),
1931 [XFRM_MSG_FLUSHPOLICY - XFRM_MSG_BASE] = 0,
1932 [XFRM_MSG_NEWAE - XFRM_MSG_BASE] = XMSGSIZE(xfrm_aevent_id),
1933 [XFRM_MSG_GETAE - XFRM_MSG_BASE] = XMSGSIZE(xfrm_aevent_id),
1934 [XFRM_MSG_REPORT - XFRM_MSG_BASE] = XMSGSIZE(xfrm_user_report),
1935 [XFRM_MSG_MIGRATE - XFRM_MSG_BASE] = XMSGSIZE(xfrm_userpolicy_id),
1936 [XFRM_MSG_GETSADINFO - XFRM_MSG_BASE] = sizeof(u32),
1937 [XFRM_MSG_GETSPDINFO - XFRM_MSG_BASE] = sizeof(u32),
1938 };
1939
1940 #undef XMSGSIZE
1941
1942 static const struct nla_policy xfrma_policy[XFRMA_MAX+1] = {
1943 [XFRMA_ALG_AEAD] = { .len = sizeof(struct xfrm_algo_aead) },
1944 [XFRMA_ALG_AUTH] = { .len = sizeof(struct xfrm_algo) },
1945 [XFRMA_ALG_CRYPT] = { .len = sizeof(struct xfrm_algo) },
1946 [XFRMA_ALG_COMP] = { .len = sizeof(struct xfrm_algo) },
1947 [XFRMA_ENCAP] = { .len = sizeof(struct xfrm_encap_tmpl) },
1948 [XFRMA_TMPL] = { .len = sizeof(struct xfrm_user_tmpl) },
1949 [XFRMA_SEC_CTX] = { .len = sizeof(struct xfrm_sec_ctx) },
1950 [XFRMA_LTIME_VAL] = { .len = sizeof(struct xfrm_lifetime_cur) },
1951 [XFRMA_REPLAY_VAL] = { .len = sizeof(struct xfrm_replay_state) },
1952 [XFRMA_REPLAY_THRESH] = { .type = NLA_U32 },
1953 [XFRMA_ETIMER_THRESH] = { .type = NLA_U32 },
1954 [XFRMA_SRCADDR] = { .len = sizeof(xfrm_address_t) },
1955 [XFRMA_COADDR] = { .len = sizeof(xfrm_address_t) },
1956 [XFRMA_POLICY_TYPE] = { .len = sizeof(struct xfrm_userpolicy_type)},
1957 [XFRMA_MIGRATE] = { .len = sizeof(struct xfrm_user_migrate) },
1958 [XFRMA_KMADDRESS] = { .len = sizeof(struct xfrm_user_kmaddress) },
1959 };
1960
1961 static struct xfrm_link {
1962 int (*doit)(struct sk_buff *, struct nlmsghdr *, struct nlattr **);
1963 int (*dump)(struct sk_buff *, struct netlink_callback *);
1964 int (*done)(struct netlink_callback *);
1965 } xfrm_dispatch[XFRM_NR_MSGTYPES] = {
1966 [XFRM_MSG_NEWSA - XFRM_MSG_BASE] = { .doit = xfrm_add_sa },
1967 [XFRM_MSG_DELSA - XFRM_MSG_BASE] = { .doit = xfrm_del_sa },
1968 [XFRM_MSG_GETSA - XFRM_MSG_BASE] = { .doit = xfrm_get_sa,
1969 .dump = xfrm_dump_sa,
1970 .done = xfrm_dump_sa_done },
1971 [XFRM_MSG_NEWPOLICY - XFRM_MSG_BASE] = { .doit = xfrm_add_policy },
1972 [XFRM_MSG_DELPOLICY - XFRM_MSG_BASE] = { .doit = xfrm_get_policy },
1973 [XFRM_MSG_GETPOLICY - XFRM_MSG_BASE] = { .doit = xfrm_get_policy,
1974 .dump = xfrm_dump_policy,
1975 .done = xfrm_dump_policy_done },
1976 [XFRM_MSG_ALLOCSPI - XFRM_MSG_BASE] = { .doit = xfrm_alloc_userspi },
1977 [XFRM_MSG_ACQUIRE - XFRM_MSG_BASE] = { .doit = xfrm_add_acquire },
1978 [XFRM_MSG_EXPIRE - XFRM_MSG_BASE] = { .doit = xfrm_add_sa_expire },
1979 [XFRM_MSG_UPDPOLICY - XFRM_MSG_BASE] = { .doit = xfrm_add_policy },
1980 [XFRM_MSG_UPDSA - XFRM_MSG_BASE] = { .doit = xfrm_add_sa },
1981 [XFRM_MSG_POLEXPIRE - XFRM_MSG_BASE] = { .doit = xfrm_add_pol_expire},
1982 [XFRM_MSG_FLUSHSA - XFRM_MSG_BASE] = { .doit = xfrm_flush_sa },
1983 [XFRM_MSG_FLUSHPOLICY - XFRM_MSG_BASE] = { .doit = xfrm_flush_policy },
1984 [XFRM_MSG_NEWAE - XFRM_MSG_BASE] = { .doit = xfrm_new_ae },
1985 [XFRM_MSG_GETAE - XFRM_MSG_BASE] = { .doit = xfrm_get_ae },
1986 [XFRM_MSG_MIGRATE - XFRM_MSG_BASE] = { .doit = xfrm_do_migrate },
1987 [XFRM_MSG_GETSADINFO - XFRM_MSG_BASE] = { .doit = xfrm_get_sadinfo },
1988 [XFRM_MSG_GETSPDINFO - XFRM_MSG_BASE] = { .doit = xfrm_get_spdinfo },
1989 };
1990
1991 static int xfrm_user_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
1992 {
1993 struct net *net = sock_net(skb->sk);
1994 struct nlattr *attrs[XFRMA_MAX+1];
1995 struct xfrm_link *link;
1996 int type, err;
1997
1998 type = nlh->nlmsg_type;
1999 if (type > XFRM_MSG_MAX)
2000 return -EINVAL;
2001
2002 type -= XFRM_MSG_BASE;
2003 link = &xfrm_dispatch[type];
2004
2005 /* All operations require privileges, even GET */
2006 if (security_netlink_recv(skb, CAP_NET_ADMIN))
2007 return -EPERM;
2008
2009 if ((type == (XFRM_MSG_GETSA - XFRM_MSG_BASE) ||
2010 type == (XFRM_MSG_GETPOLICY - XFRM_MSG_BASE)) &&
2011 (nlh->nlmsg_flags & NLM_F_DUMP)) {
2012 if (link->dump == NULL)
2013 return -EINVAL;
2014
2015 return netlink_dump_start(net->xfrm.nlsk, skb, nlh, link->dump, link->done);
2016 }
2017
2018 err = nlmsg_parse(nlh, xfrm_msg_min[type], attrs, XFRMA_MAX,
2019 xfrma_policy);
2020 if (err < 0)
2021 return err;
2022
2023 if (link->doit == NULL)
2024 return -EINVAL;
2025
2026 return link->doit(skb, nlh, attrs);
2027 }
2028
2029 static void xfrm_netlink_rcv(struct sk_buff *skb)
2030 {
2031 mutex_lock(&xfrm_cfg_mutex);
2032 netlink_rcv_skb(skb, &xfrm_user_rcv_msg);
2033 mutex_unlock(&xfrm_cfg_mutex);
2034 }
2035
2036 static inline size_t xfrm_expire_msgsize(void)
2037 {
2038 return NLMSG_ALIGN(sizeof(struct xfrm_user_expire));
2039 }
2040
2041 static int build_expire(struct sk_buff *skb, struct xfrm_state *x, struct km_event *c)
2042 {
2043 struct xfrm_user_expire *ue;
2044 struct nlmsghdr *nlh;
2045
2046 nlh = nlmsg_put(skb, c->pid, 0, XFRM_MSG_EXPIRE, sizeof(*ue), 0);
2047 if (nlh == NULL)
2048 return -EMSGSIZE;
2049
2050 ue = nlmsg_data(nlh);
2051 copy_to_user_state(x, &ue->state);
2052 ue->hard = (c->data.hard != 0) ? 1 : 0;
2053
2054 return nlmsg_end(skb, nlh);
2055 }
2056
2057 static int xfrm_exp_state_notify(struct xfrm_state *x, struct km_event *c)
2058 {
2059 struct net *net = xs_net(x);
2060 struct sk_buff *skb;
2061
2062 skb = nlmsg_new(xfrm_expire_msgsize(), GFP_ATOMIC);
2063 if (skb == NULL)
2064 return -ENOMEM;
2065
2066 if (build_expire(skb, x, c) < 0)
2067 BUG();
2068
2069 return nlmsg_multicast(net->xfrm.nlsk, skb, 0, XFRMNLGRP_EXPIRE, GFP_ATOMIC);
2070 }
2071
2072 static int xfrm_aevent_state_notify(struct xfrm_state *x, struct km_event *c)
2073 {
2074 struct net *net = xs_net(x);
2075 struct sk_buff *skb;
2076
2077 skb = nlmsg_new(xfrm_aevent_msgsize(), GFP_ATOMIC);
2078 if (skb == NULL)
2079 return -ENOMEM;
2080
2081 if (build_aevent(skb, x, c) < 0)
2082 BUG();
2083
2084 return nlmsg_multicast(net->xfrm.nlsk, skb, 0, XFRMNLGRP_AEVENTS, GFP_ATOMIC);
2085 }
2086
2087 static int xfrm_notify_sa_flush(struct km_event *c)
2088 {
2089 struct net *net = c->net;
2090 struct xfrm_usersa_flush *p;
2091 struct nlmsghdr *nlh;
2092 struct sk_buff *skb;
2093 int len = NLMSG_ALIGN(sizeof(struct xfrm_usersa_flush));
2094
2095 skb = nlmsg_new(len, GFP_ATOMIC);
2096 if (skb == NULL)
2097 return -ENOMEM;
2098
2099 nlh = nlmsg_put(skb, c->pid, c->seq, XFRM_MSG_FLUSHSA, sizeof(*p), 0);
2100 if (nlh == NULL) {
2101 kfree_skb(skb);
2102 return -EMSGSIZE;
2103 }
2104
2105 p = nlmsg_data(nlh);
2106 p->proto = c->data.proto;
2107
2108 nlmsg_end(skb, nlh);
2109
2110 return nlmsg_multicast(net->xfrm.nlsk, skb, 0, XFRMNLGRP_SA, GFP_ATOMIC);
2111 }
2112
2113 static inline size_t xfrm_sa_len(struct xfrm_state *x)
2114 {
2115 size_t l = 0;
2116 if (x->aead)
2117 l += nla_total_size(aead_len(x->aead));
2118 if (x->aalg)
2119 l += nla_total_size(xfrm_alg_len(x->aalg));
2120 if (x->ealg)
2121 l += nla_total_size(xfrm_alg_len(x->ealg));
2122 if (x->calg)
2123 l += nla_total_size(sizeof(*x->calg));
2124 if (x->encap)
2125 l += nla_total_size(sizeof(*x->encap));
2126 if (x->security)
2127 l += nla_total_size(sizeof(struct xfrm_user_sec_ctx) +
2128 x->security->ctx_len);
2129 if (x->coaddr)
2130 l += nla_total_size(sizeof(*x->coaddr));
2131
2132 /* Must count x->lastused as it may become non-zero behind our back. */
2133 l += nla_total_size(sizeof(u64));
2134
2135 return l;
2136 }
2137
2138 static int xfrm_notify_sa(struct xfrm_state *x, struct km_event *c)
2139 {
2140 struct net *net = xs_net(x);
2141 struct xfrm_usersa_info *p;
2142 struct xfrm_usersa_id *id;
2143 struct nlmsghdr *nlh;
2144 struct sk_buff *skb;
2145 int len = xfrm_sa_len(x);
2146 int headlen;
2147
2148 headlen = sizeof(*p);
2149 if (c->event == XFRM_MSG_DELSA) {
2150 len += nla_total_size(headlen);
2151 headlen = sizeof(*id);
2152 }
2153 len += NLMSG_ALIGN(headlen);
2154
2155 skb = nlmsg_new(len, GFP_ATOMIC);
2156 if (skb == NULL)
2157 return -ENOMEM;
2158
2159 nlh = nlmsg_put(skb, c->pid, c->seq, c->event, headlen, 0);
2160 if (nlh == NULL)
2161 goto nla_put_failure;
2162
2163 p = nlmsg_data(nlh);
2164 if (c->event == XFRM_MSG_DELSA) {
2165 struct nlattr *attr;
2166
2167 id = nlmsg_data(nlh);
2168 memcpy(&id->daddr, &x->id.daddr, sizeof(id->daddr));
2169 id->spi = x->id.spi;
2170 id->family = x->props.family;
2171 id->proto = x->id.proto;
2172
2173 attr = nla_reserve(skb, XFRMA_SA, sizeof(*p));
2174 if (attr == NULL)
2175 goto nla_put_failure;
2176
2177 p = nla_data(attr);
2178 }
2179
2180 if (copy_to_user_state_extra(x, p, skb))
2181 goto nla_put_failure;
2182
2183 nlmsg_end(skb, nlh);
2184
2185 return nlmsg_multicast(net->xfrm.nlsk, skb, 0, XFRMNLGRP_SA, GFP_ATOMIC);
2186
2187 nla_put_failure:
2188 /* Somebody screwed up with xfrm_sa_len! */
2189 WARN_ON(1);
2190 kfree_skb(skb);
2191 return -1;
2192 }
2193
2194 static int xfrm_send_state_notify(struct xfrm_state *x, struct km_event *c)
2195 {
2196
2197 switch (c->event) {
2198 case XFRM_MSG_EXPIRE:
2199 return xfrm_exp_state_notify(x, c);
2200 case XFRM_MSG_NEWAE:
2201 return xfrm_aevent_state_notify(x, c);
2202 case XFRM_MSG_DELSA:
2203 case XFRM_MSG_UPDSA:
2204 case XFRM_MSG_NEWSA:
2205 return xfrm_notify_sa(x, c);
2206 case XFRM_MSG_FLUSHSA:
2207 return xfrm_notify_sa_flush(c);
2208 default:
2209 printk("xfrm_user: Unknown SA event %d\n", c->event);
2210 break;
2211 }
2212
2213 return 0;
2214
2215 }
2216
2217 static inline size_t xfrm_acquire_msgsize(struct xfrm_state *x,
2218 struct xfrm_policy *xp)
2219 {
2220 return NLMSG_ALIGN(sizeof(struct xfrm_user_acquire))
2221 + nla_total_size(sizeof(struct xfrm_user_tmpl) * xp->xfrm_nr)
2222 + nla_total_size(xfrm_user_sec_ctx_size(x->security))
2223 + userpolicy_type_attrsize();
2224 }
2225
2226 static int build_acquire(struct sk_buff *skb, struct xfrm_state *x,
2227 struct xfrm_tmpl *xt, struct xfrm_policy *xp,
2228 int dir)
2229 {
2230 struct xfrm_user_acquire *ua;
2231 struct nlmsghdr *nlh;
2232 __u32 seq = xfrm_get_acqseq();
2233
2234 nlh = nlmsg_put(skb, 0, 0, XFRM_MSG_ACQUIRE, sizeof(*ua), 0);
2235 if (nlh == NULL)
2236 return -EMSGSIZE;
2237
2238 ua = nlmsg_data(nlh);
2239 memcpy(&ua->id, &x->id, sizeof(ua->id));
2240 memcpy(&ua->saddr, &x->props.saddr, sizeof(ua->saddr));
2241 memcpy(&ua->sel, &x->sel, sizeof(ua->sel));
2242 copy_to_user_policy(xp, &ua->policy, dir);
2243 ua->aalgos = xt->aalgos;
2244 ua->ealgos = xt->ealgos;
2245 ua->calgos = xt->calgos;
2246 ua->seq = x->km.seq = seq;
2247
2248 if (copy_to_user_tmpl(xp, skb) < 0)
2249 goto nlmsg_failure;
2250 if (copy_to_user_state_sec_ctx(x, skb))
2251 goto nlmsg_failure;
2252 if (copy_to_user_policy_type(xp->type, skb) < 0)
2253 goto nlmsg_failure;
2254
2255 return nlmsg_end(skb, nlh);
2256
2257 nlmsg_failure:
2258 nlmsg_cancel(skb, nlh);
2259 return -EMSGSIZE;
2260 }
2261
2262 static int xfrm_send_acquire(struct xfrm_state *x, struct xfrm_tmpl *xt,
2263 struct xfrm_policy *xp, int dir)
2264 {
2265 struct net *net = xs_net(x);
2266 struct sk_buff *skb;
2267
2268 skb = nlmsg_new(xfrm_acquire_msgsize(x, xp), GFP_ATOMIC);
2269 if (skb == NULL)
2270 return -ENOMEM;
2271
2272 if (build_acquire(skb, x, xt, xp, dir) < 0)
2273 BUG();
2274
2275 return nlmsg_multicast(net->xfrm.nlsk, skb, 0, XFRMNLGRP_ACQUIRE, GFP_ATOMIC);
2276 }
2277
2278 /* User gives us xfrm_user_policy_info followed by an array of 0
2279 * or more templates.
2280 */
2281 static struct xfrm_policy *xfrm_compile_policy(struct sock *sk, int opt,
2282 u8 *data, int len, int *dir)
2283 {
2284 struct net *net = sock_net(sk);
2285 struct xfrm_userpolicy_info *p = (struct xfrm_userpolicy_info *)data;
2286 struct xfrm_user_tmpl *ut = (struct xfrm_user_tmpl *) (p + 1);
2287 struct xfrm_policy *xp;
2288 int nr;
2289
2290 switch (sk->sk_family) {
2291 case AF_INET:
2292 if (opt != IP_XFRM_POLICY) {
2293 *dir = -EOPNOTSUPP;
2294 return NULL;
2295 }
2296 break;
2297 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
2298 case AF_INET6:
2299 if (opt != IPV6_XFRM_POLICY) {
2300 *dir = -EOPNOTSUPP;
2301 return NULL;
2302 }
2303 break;
2304 #endif
2305 default:
2306 *dir = -EINVAL;
2307 return NULL;
2308 }
2309
2310 *dir = -EINVAL;
2311
2312 if (len < sizeof(*p) ||
2313 verify_newpolicy_info(p))
2314 return NULL;
2315
2316 nr = ((len - sizeof(*p)) / sizeof(*ut));
2317 if (validate_tmpl(nr, ut, p->sel.family))
2318 return NULL;
2319
2320 if (p->dir > XFRM_POLICY_OUT)
2321 return NULL;
2322
2323 xp = xfrm_policy_alloc(net, GFP_KERNEL);
2324 if (xp == NULL) {
2325 *dir = -ENOBUFS;
2326 return NULL;
2327 }
2328
2329 copy_from_user_policy(xp, p);
2330 xp->type = XFRM_POLICY_TYPE_MAIN;
2331 copy_templates(xp, ut, nr);
2332
2333 *dir = p->dir;
2334
2335 return xp;
2336 }
2337
2338 static inline size_t xfrm_polexpire_msgsize(struct xfrm_policy *xp)
2339 {
2340 return NLMSG_ALIGN(sizeof(struct xfrm_user_polexpire))
2341 + nla_total_size(sizeof(struct xfrm_user_tmpl) * xp->xfrm_nr)
2342 + nla_total_size(xfrm_user_sec_ctx_size(xp->security))
2343 + userpolicy_type_attrsize();
2344 }
2345
2346 static int build_polexpire(struct sk_buff *skb, struct xfrm_policy *xp,
2347 int dir, struct km_event *c)
2348 {
2349 struct xfrm_user_polexpire *upe;
2350 struct nlmsghdr *nlh;
2351 int hard = c->data.hard;
2352
2353 nlh = nlmsg_put(skb, c->pid, 0, XFRM_MSG_POLEXPIRE, sizeof(*upe), 0);
2354 if (nlh == NULL)
2355 return -EMSGSIZE;
2356
2357 upe = nlmsg_data(nlh);
2358 copy_to_user_policy(xp, &upe->pol, dir);
2359 if (copy_to_user_tmpl(xp, skb) < 0)
2360 goto nlmsg_failure;
2361 if (copy_to_user_sec_ctx(xp, skb))
2362 goto nlmsg_failure;
2363 if (copy_to_user_policy_type(xp->type, skb) < 0)
2364 goto nlmsg_failure;
2365 upe->hard = !!hard;
2366
2367 return nlmsg_end(skb, nlh);
2368
2369 nlmsg_failure:
2370 nlmsg_cancel(skb, nlh);
2371 return -EMSGSIZE;
2372 }
2373
2374 static int xfrm_exp_policy_notify(struct xfrm_policy *xp, int dir, struct km_event *c)
2375 {
2376 struct net *net = xp_net(xp);
2377 struct sk_buff *skb;
2378
2379 skb = nlmsg_new(xfrm_polexpire_msgsize(xp), GFP_ATOMIC);
2380 if (skb == NULL)
2381 return -ENOMEM;
2382
2383 if (build_polexpire(skb, xp, dir, c) < 0)
2384 BUG();
2385
2386 return nlmsg_multicast(net->xfrm.nlsk, skb, 0, XFRMNLGRP_EXPIRE, GFP_ATOMIC);
2387 }
2388
2389 static int xfrm_notify_policy(struct xfrm_policy *xp, int dir, struct km_event *c)
2390 {
2391 struct net *net = xp_net(xp);
2392 struct xfrm_userpolicy_info *p;
2393 struct xfrm_userpolicy_id *id;
2394 struct nlmsghdr *nlh;
2395 struct sk_buff *skb;
2396 int len = nla_total_size(sizeof(struct xfrm_user_tmpl) * xp->xfrm_nr);
2397 int headlen;
2398
2399 headlen = sizeof(*p);
2400 if (c->event == XFRM_MSG_DELPOLICY) {
2401 len += nla_total_size(headlen);
2402 headlen = sizeof(*id);
2403 }
2404 len += userpolicy_type_attrsize();
2405 len += NLMSG_ALIGN(headlen);
2406
2407 skb = nlmsg_new(len, GFP_ATOMIC);
2408 if (skb == NULL)
2409 return -ENOMEM;
2410
2411 nlh = nlmsg_put(skb, c->pid, c->seq, c->event, headlen, 0);
2412 if (nlh == NULL)
2413 goto nlmsg_failure;
2414
2415 p = nlmsg_data(nlh);
2416 if (c->event == XFRM_MSG_DELPOLICY) {
2417 struct nlattr *attr;
2418
2419 id = nlmsg_data(nlh);
2420 memset(id, 0, sizeof(*id));
2421 id->dir = dir;
2422 if (c->data.byid)
2423 id->index = xp->index;
2424 else
2425 memcpy(&id->sel, &xp->selector, sizeof(id->sel));
2426
2427 attr = nla_reserve(skb, XFRMA_POLICY, sizeof(*p));
2428 if (attr == NULL)
2429 goto nlmsg_failure;
2430
2431 p = nla_data(attr);
2432 }
2433
2434 copy_to_user_policy(xp, p, dir);
2435 if (copy_to_user_tmpl(xp, skb) < 0)
2436 goto nlmsg_failure;
2437 if (copy_to_user_policy_type(xp->type, skb) < 0)
2438 goto nlmsg_failure;
2439
2440 nlmsg_end(skb, nlh);
2441
2442 return nlmsg_multicast(net->xfrm.nlsk, skb, 0, XFRMNLGRP_POLICY, GFP_ATOMIC);
2443
2444 nlmsg_failure:
2445 kfree_skb(skb);
2446 return -1;
2447 }
2448
2449 static int xfrm_notify_policy_flush(struct km_event *c)
2450 {
2451 struct net *net = c->net;
2452 struct nlmsghdr *nlh;
2453 struct sk_buff *skb;
2454
2455 skb = nlmsg_new(userpolicy_type_attrsize(), GFP_ATOMIC);
2456 if (skb == NULL)
2457 return -ENOMEM;
2458
2459 nlh = nlmsg_put(skb, c->pid, c->seq, XFRM_MSG_FLUSHPOLICY, 0, 0);
2460 if (nlh == NULL)
2461 goto nlmsg_failure;
2462 if (copy_to_user_policy_type(c->data.type, skb) < 0)
2463 goto nlmsg_failure;
2464
2465 nlmsg_end(skb, nlh);
2466
2467 return nlmsg_multicast(net->xfrm.nlsk, skb, 0, XFRMNLGRP_POLICY, GFP_ATOMIC);
2468
2469 nlmsg_failure:
2470 kfree_skb(skb);
2471 return -1;
2472 }
2473
2474 static int xfrm_send_policy_notify(struct xfrm_policy *xp, int dir, struct km_event *c)
2475 {
2476
2477 switch (c->event) {
2478 case XFRM_MSG_NEWPOLICY:
2479 case XFRM_MSG_UPDPOLICY:
2480 case XFRM_MSG_DELPOLICY:
2481 return xfrm_notify_policy(xp, dir, c);
2482 case XFRM_MSG_FLUSHPOLICY:
2483 return xfrm_notify_policy_flush(c);
2484 case XFRM_MSG_POLEXPIRE:
2485 return xfrm_exp_policy_notify(xp, dir, c);
2486 default:
2487 printk("xfrm_user: Unknown Policy event %d\n", c->event);
2488 }
2489
2490 return 0;
2491
2492 }
2493
2494 static inline size_t xfrm_report_msgsize(void)
2495 {
2496 return NLMSG_ALIGN(sizeof(struct xfrm_user_report));
2497 }
2498
2499 static int build_report(struct sk_buff *skb, u8 proto,
2500 struct xfrm_selector *sel, xfrm_address_t *addr)
2501 {
2502 struct xfrm_user_report *ur;
2503 struct nlmsghdr *nlh;
2504
2505 nlh = nlmsg_put(skb, 0, 0, XFRM_MSG_REPORT, sizeof(*ur), 0);
2506 if (nlh == NULL)
2507 return -EMSGSIZE;
2508
2509 ur = nlmsg_data(nlh);
2510 ur->proto = proto;
2511 memcpy(&ur->sel, sel, sizeof(ur->sel));
2512
2513 if (addr)
2514 NLA_PUT(skb, XFRMA_COADDR, sizeof(*addr), addr);
2515
2516 return nlmsg_end(skb, nlh);
2517
2518 nla_put_failure:
2519 nlmsg_cancel(skb, nlh);
2520 return -EMSGSIZE;
2521 }
2522
2523 static int xfrm_send_report(u8 proto, struct xfrm_selector *sel,
2524 xfrm_address_t *addr)
2525 {
2526 struct net *net = &init_net;
2527 struct sk_buff *skb;
2528
2529 skb = nlmsg_new(xfrm_report_msgsize(), GFP_ATOMIC);
2530 if (skb == NULL)
2531 return -ENOMEM;
2532
2533 if (build_report(skb, proto, sel, addr) < 0)
2534 BUG();
2535
2536 return nlmsg_multicast(net->xfrm.nlsk, skb, 0, XFRMNLGRP_REPORT, GFP_ATOMIC);
2537 }
2538
2539 static inline size_t xfrm_mapping_msgsize(void)
2540 {
2541 return NLMSG_ALIGN(sizeof(struct xfrm_user_mapping));
2542 }
2543
2544 static int build_mapping(struct sk_buff *skb, struct xfrm_state *x,
2545 xfrm_address_t *new_saddr, __be16 new_sport)
2546 {
2547 struct xfrm_user_mapping *um;
2548 struct nlmsghdr *nlh;
2549
2550 nlh = nlmsg_put(skb, 0, 0, XFRM_MSG_MAPPING, sizeof(*um), 0);
2551 if (nlh == NULL)
2552 return -EMSGSIZE;
2553
2554 um = nlmsg_data(nlh);
2555
2556 memcpy(&um->id.daddr, &x->id.daddr, sizeof(um->id.daddr));
2557 um->id.spi = x->id.spi;
2558 um->id.family = x->props.family;
2559 um->id.proto = x->id.proto;
2560 memcpy(&um->new_saddr, new_saddr, sizeof(um->new_saddr));
2561 memcpy(&um->old_saddr, &x->props.saddr, sizeof(um->old_saddr));
2562 um->new_sport = new_sport;
2563 um->old_sport = x->encap->encap_sport;
2564 um->reqid = x->props.reqid;
2565
2566 return nlmsg_end(skb, nlh);
2567 }
2568
2569 static int xfrm_send_mapping(struct xfrm_state *x, xfrm_address_t *ipaddr,
2570 __be16 sport)
2571 {
2572 struct net *net = xs_net(x);
2573 struct sk_buff *skb;
2574
2575 if (x->id.proto != IPPROTO_ESP)
2576 return -EINVAL;
2577
2578 if (!x->encap)
2579 return -EINVAL;
2580
2581 skb = nlmsg_new(xfrm_mapping_msgsize(), GFP_ATOMIC);
2582 if (skb == NULL)
2583 return -ENOMEM;
2584
2585 if (build_mapping(skb, x, ipaddr, sport) < 0)
2586 BUG();
2587
2588 return nlmsg_multicast(net->xfrm.nlsk, skb, 0, XFRMNLGRP_MAPPING, GFP_ATOMIC);
2589 }
2590
2591 static struct xfrm_mgr netlink_mgr = {
2592 .id = "netlink",
2593 .notify = xfrm_send_state_notify,
2594 .acquire = xfrm_send_acquire,
2595 .compile_policy = xfrm_compile_policy,
2596 .notify_policy = xfrm_send_policy_notify,
2597 .report = xfrm_send_report,
2598 .migrate = xfrm_send_migrate,
2599 .new_mapping = xfrm_send_mapping,
2600 };
2601
2602 static int __net_init xfrm_user_net_init(struct net *net)
2603 {
2604 struct sock *nlsk;
2605
2606 nlsk = netlink_kernel_create(net, NETLINK_XFRM, XFRMNLGRP_MAX,
2607 xfrm_netlink_rcv, NULL, THIS_MODULE);
2608 if (nlsk == NULL)
2609 return -ENOMEM;
2610 rcu_assign_pointer(net->xfrm.nlsk, nlsk);
2611 return 0;
2612 }
2613
2614 static void __net_exit xfrm_user_net_exit(struct net *net)
2615 {
2616 struct sock *nlsk = net->xfrm.nlsk;
2617
2618 rcu_assign_pointer(net->xfrm.nlsk, NULL);
2619 synchronize_rcu();
2620 netlink_kernel_release(nlsk);
2621 }
2622
2623 static struct pernet_operations xfrm_user_net_ops = {
2624 .init = xfrm_user_net_init,
2625 .exit = xfrm_user_net_exit,
2626 };
2627
2628 static int __init xfrm_user_init(void)
2629 {
2630 int rv;
2631
2632 printk(KERN_INFO "Initializing XFRM netlink socket\n");
2633
2634 rv = register_pernet_subsys(&xfrm_user_net_ops);
2635 if (rv < 0)
2636 return rv;
2637 rv = xfrm_register_km(&netlink_mgr);
2638 if (rv < 0)
2639 unregister_pernet_subsys(&xfrm_user_net_ops);
2640 return rv;
2641 }
2642
2643 static void __exit xfrm_user_exit(void)
2644 {
2645 xfrm_unregister_km(&netlink_mgr);
2646 unregister_pernet_subsys(&xfrm_user_net_ops);
2647 }
2648
2649 module_init(xfrm_user_init);
2650 module_exit(xfrm_user_exit);
2651 MODULE_LICENSE("GPL");
2652 MODULE_ALIAS_NET_PF_PROTO(PF_NETLINK, NETLINK_XFRM);
2653
This page took 0.081648 seconds and 4 git commands to generate.