[XFRM]: xfrm audit calls
[deliverable/linux.git] / net / xfrm / xfrm_user.c
1 /* xfrm_user.c: User interface to configure xfrm engine.
2 *
3 * Copyright (C) 2002 David S. Miller (davem@redhat.com)
4 *
5 * Changes:
6 * Mitsuru KANDA @USAGI
7 * Kazunori MIYAZAWA @USAGI
8 * Kunihiro Ishiguro <kunihiro@ipinfusion.com>
9 * IPv6 support
10 *
11 */
12
13 #include <linux/crypto.h>
14 #include <linux/module.h>
15 #include <linux/kernel.h>
16 #include <linux/types.h>
17 #include <linux/slab.h>
18 #include <linux/socket.h>
19 #include <linux/string.h>
20 #include <linux/net.h>
21 #include <linux/skbuff.h>
22 #include <linux/pfkeyv2.h>
23 #include <linux/ipsec.h>
24 #include <linux/init.h>
25 #include <linux/security.h>
26 #include <net/sock.h>
27 #include <net/xfrm.h>
28 #include <net/netlink.h>
29 #include <asm/uaccess.h>
30 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
31 #include <linux/in6.h>
32 #endif
33
34 static inline int alg_len(struct xfrm_algo *alg)
35 {
36 return sizeof(*alg) + ((alg->alg_key_len + 7) / 8);
37 }
38
39 static int verify_one_alg(struct nlattr **attrs, enum xfrm_attr_type_t type)
40 {
41 struct nlattr *rt = attrs[type];
42 struct xfrm_algo *algp;
43
44 if (!rt)
45 return 0;
46
47 algp = nla_data(rt);
48 if (nla_len(rt) < alg_len(algp))
49 return -EINVAL;
50
51 switch (type) {
52 case XFRMA_ALG_AUTH:
53 if (!algp->alg_key_len &&
54 strcmp(algp->alg_name, "digest_null") != 0)
55 return -EINVAL;
56 break;
57
58 case XFRMA_ALG_CRYPT:
59 if (!algp->alg_key_len &&
60 strcmp(algp->alg_name, "cipher_null") != 0)
61 return -EINVAL;
62 break;
63
64 case XFRMA_ALG_COMP:
65 /* Zero length keys are legal. */
66 break;
67
68 default:
69 return -EINVAL;
70 }
71
72 algp->alg_name[CRYPTO_MAX_ALG_NAME - 1] = '\0';
73 return 0;
74 }
75
76 static void verify_one_addr(struct nlattr **attrs, enum xfrm_attr_type_t type,
77 xfrm_address_t **addrp)
78 {
79 struct nlattr *rt = attrs[type];
80
81 if (rt && addrp)
82 *addrp = nla_data(rt);
83 }
84
85 static inline int verify_sec_ctx_len(struct nlattr **attrs)
86 {
87 struct nlattr *rt = attrs[XFRMA_SEC_CTX];
88 struct xfrm_user_sec_ctx *uctx;
89
90 if (!rt)
91 return 0;
92
93 uctx = nla_data(rt);
94 if (uctx->len != (sizeof(struct xfrm_user_sec_ctx) + uctx->ctx_len))
95 return -EINVAL;
96
97 return 0;
98 }
99
100
101 static int verify_newsa_info(struct xfrm_usersa_info *p,
102 struct nlattr **attrs)
103 {
104 int err;
105
106 err = -EINVAL;
107 switch (p->family) {
108 case AF_INET:
109 break;
110
111 case AF_INET6:
112 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
113 break;
114 #else
115 err = -EAFNOSUPPORT;
116 goto out;
117 #endif
118
119 default:
120 goto out;
121 }
122
123 err = -EINVAL;
124 switch (p->id.proto) {
125 case IPPROTO_AH:
126 if (!attrs[XFRMA_ALG_AUTH] ||
127 attrs[XFRMA_ALG_CRYPT] ||
128 attrs[XFRMA_ALG_COMP])
129 goto out;
130 break;
131
132 case IPPROTO_ESP:
133 if ((!attrs[XFRMA_ALG_AUTH] &&
134 !attrs[XFRMA_ALG_CRYPT]) ||
135 attrs[XFRMA_ALG_COMP])
136 goto out;
137 break;
138
139 case IPPROTO_COMP:
140 if (!attrs[XFRMA_ALG_COMP] ||
141 attrs[XFRMA_ALG_AUTH] ||
142 attrs[XFRMA_ALG_CRYPT])
143 goto out;
144 break;
145
146 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
147 case IPPROTO_DSTOPTS:
148 case IPPROTO_ROUTING:
149 if (attrs[XFRMA_ALG_COMP] ||
150 attrs[XFRMA_ALG_AUTH] ||
151 attrs[XFRMA_ALG_CRYPT] ||
152 attrs[XFRMA_ENCAP] ||
153 attrs[XFRMA_SEC_CTX] ||
154 !attrs[XFRMA_COADDR])
155 goto out;
156 break;
157 #endif
158
159 default:
160 goto out;
161 }
162
163 if ((err = verify_one_alg(attrs, XFRMA_ALG_AUTH)))
164 goto out;
165 if ((err = verify_one_alg(attrs, XFRMA_ALG_CRYPT)))
166 goto out;
167 if ((err = verify_one_alg(attrs, XFRMA_ALG_COMP)))
168 goto out;
169 if ((err = verify_sec_ctx_len(attrs)))
170 goto out;
171
172 err = -EINVAL;
173 switch (p->mode) {
174 case XFRM_MODE_TRANSPORT:
175 case XFRM_MODE_TUNNEL:
176 case XFRM_MODE_ROUTEOPTIMIZATION:
177 case XFRM_MODE_BEET:
178 break;
179
180 default:
181 goto out;
182 }
183
184 err = 0;
185
186 out:
187 return err;
188 }
189
190 static int attach_one_algo(struct xfrm_algo **algpp, u8 *props,
191 struct xfrm_algo_desc *(*get_byname)(char *, int),
192 struct nlattr *rta)
193 {
194 struct xfrm_algo *p, *ualg;
195 struct xfrm_algo_desc *algo;
196
197 if (!rta)
198 return 0;
199
200 ualg = nla_data(rta);
201
202 algo = get_byname(ualg->alg_name, 1);
203 if (!algo)
204 return -ENOSYS;
205 *props = algo->desc.sadb_alg_id;
206
207 p = kmemdup(ualg, alg_len(ualg), GFP_KERNEL);
208 if (!p)
209 return -ENOMEM;
210
211 strcpy(p->alg_name, algo->name);
212 *algpp = p;
213 return 0;
214 }
215
216 static inline int xfrm_user_sec_ctx_size(struct xfrm_sec_ctx *xfrm_ctx)
217 {
218 int len = 0;
219
220 if (xfrm_ctx) {
221 len += sizeof(struct xfrm_user_sec_ctx);
222 len += xfrm_ctx->ctx_len;
223 }
224 return len;
225 }
226
227 static void copy_from_user_state(struct xfrm_state *x, struct xfrm_usersa_info *p)
228 {
229 memcpy(&x->id, &p->id, sizeof(x->id));
230 memcpy(&x->sel, &p->sel, sizeof(x->sel));
231 memcpy(&x->lft, &p->lft, sizeof(x->lft));
232 x->props.mode = p->mode;
233 x->props.replay_window = p->replay_window;
234 x->props.reqid = p->reqid;
235 x->props.family = p->family;
236 memcpy(&x->props.saddr, &p->saddr, sizeof(x->props.saddr));
237 x->props.flags = p->flags;
238
239 /*
240 * Set inner address family if the KM left it as zero.
241 * See comment in validate_tmpl.
242 */
243 if (!x->sel.family)
244 x->sel.family = p->family;
245 }
246
247 /*
248 * someday when pfkey also has support, we could have the code
249 * somehow made shareable and move it to xfrm_state.c - JHS
250 *
251 */
252 static void xfrm_update_ae_params(struct xfrm_state *x, struct nlattr **attrs)
253 {
254 struct nlattr *rp = attrs[XFRMA_REPLAY_VAL];
255 struct nlattr *lt = attrs[XFRMA_LTIME_VAL];
256 struct nlattr *et = attrs[XFRMA_ETIMER_THRESH];
257 struct nlattr *rt = attrs[XFRMA_REPLAY_THRESH];
258
259 if (rp) {
260 struct xfrm_replay_state *replay;
261 replay = nla_data(rp);
262 memcpy(&x->replay, replay, sizeof(*replay));
263 memcpy(&x->preplay, replay, sizeof(*replay));
264 }
265
266 if (lt) {
267 struct xfrm_lifetime_cur *ltime;
268 ltime = nla_data(lt);
269 x->curlft.bytes = ltime->bytes;
270 x->curlft.packets = ltime->packets;
271 x->curlft.add_time = ltime->add_time;
272 x->curlft.use_time = ltime->use_time;
273 }
274
275 if (et)
276 x->replay_maxage = nla_get_u32(et);
277
278 if (rt)
279 x->replay_maxdiff = nla_get_u32(rt);
280 }
281
282 static struct xfrm_state *xfrm_state_construct(struct xfrm_usersa_info *p,
283 struct nlattr **attrs,
284 int *errp)
285 {
286 struct xfrm_state *x = xfrm_state_alloc();
287 int err = -ENOMEM;
288
289 if (!x)
290 goto error_no_put;
291
292 copy_from_user_state(x, p);
293
294 if ((err = attach_one_algo(&x->aalg, &x->props.aalgo,
295 xfrm_aalg_get_byname,
296 attrs[XFRMA_ALG_AUTH])))
297 goto error;
298 if ((err = attach_one_algo(&x->ealg, &x->props.ealgo,
299 xfrm_ealg_get_byname,
300 attrs[XFRMA_ALG_CRYPT])))
301 goto error;
302 if ((err = attach_one_algo(&x->calg, &x->props.calgo,
303 xfrm_calg_get_byname,
304 attrs[XFRMA_ALG_COMP])))
305 goto error;
306
307 if (attrs[XFRMA_ENCAP]) {
308 x->encap = kmemdup(nla_data(attrs[XFRMA_ENCAP]),
309 sizeof(*x->encap), GFP_KERNEL);
310 if (x->encap == NULL)
311 goto error;
312 }
313
314 if (attrs[XFRMA_COADDR]) {
315 x->coaddr = kmemdup(nla_data(attrs[XFRMA_COADDR]),
316 sizeof(*x->coaddr), GFP_KERNEL);
317 if (x->coaddr == NULL)
318 goto error;
319 }
320
321 err = xfrm_init_state(x);
322 if (err)
323 goto error;
324
325 if (attrs[XFRMA_SEC_CTX] &&
326 security_xfrm_state_alloc(x, nla_data(attrs[XFRMA_SEC_CTX])))
327 goto error;
328
329 x->km.seq = p->seq;
330 x->replay_maxdiff = sysctl_xfrm_aevent_rseqth;
331 /* sysctl_xfrm_aevent_etime is in 100ms units */
332 x->replay_maxage = (sysctl_xfrm_aevent_etime*HZ)/XFRM_AE_ETH_M;
333 x->preplay.bitmap = 0;
334 x->preplay.seq = x->replay.seq+x->replay_maxdiff;
335 x->preplay.oseq = x->replay.oseq +x->replay_maxdiff;
336
337 /* override default values from above */
338
339 xfrm_update_ae_params(x, attrs);
340
341 return x;
342
343 error:
344 x->km.state = XFRM_STATE_DEAD;
345 xfrm_state_put(x);
346 error_no_put:
347 *errp = err;
348 return NULL;
349 }
350
351 static int xfrm_add_sa(struct sk_buff *skb, struct nlmsghdr *nlh,
352 struct nlattr **attrs)
353 {
354 struct xfrm_usersa_info *p = nlmsg_data(nlh);
355 struct xfrm_state *x;
356 int err;
357 struct km_event c;
358
359 err = verify_newsa_info(p, attrs);
360 if (err)
361 return err;
362
363 x = xfrm_state_construct(p, attrs, &err);
364 if (!x)
365 return err;
366
367 xfrm_state_hold(x);
368 if (nlh->nlmsg_type == XFRM_MSG_NEWSA)
369 err = xfrm_state_add(x);
370 else
371 err = xfrm_state_update(x);
372
373 xfrm_audit_state_add(x, err ? 0 : 1, NETLINK_CB(skb).loginuid,
374 NETLINK_CB(skb).sid);
375
376 if (err < 0) {
377 x->km.state = XFRM_STATE_DEAD;
378 __xfrm_state_put(x);
379 goto out;
380 }
381
382 c.seq = nlh->nlmsg_seq;
383 c.pid = nlh->nlmsg_pid;
384 c.event = nlh->nlmsg_type;
385
386 km_state_notify(x, &c);
387 out:
388 xfrm_state_put(x);
389 return err;
390 }
391
392 static struct xfrm_state *xfrm_user_state_lookup(struct xfrm_usersa_id *p,
393 struct nlattr **attrs,
394 int *errp)
395 {
396 struct xfrm_state *x = NULL;
397 int err;
398
399 if (xfrm_id_proto_match(p->proto, IPSEC_PROTO_ANY)) {
400 err = -ESRCH;
401 x = xfrm_state_lookup(&p->daddr, p->spi, p->proto, p->family);
402 } else {
403 xfrm_address_t *saddr = NULL;
404
405 verify_one_addr(attrs, XFRMA_SRCADDR, &saddr);
406 if (!saddr) {
407 err = -EINVAL;
408 goto out;
409 }
410
411 err = -ESRCH;
412 x = xfrm_state_lookup_byaddr(&p->daddr, saddr, p->proto,
413 p->family);
414 }
415
416 out:
417 if (!x && errp)
418 *errp = err;
419 return x;
420 }
421
422 static int xfrm_del_sa(struct sk_buff *skb, struct nlmsghdr *nlh,
423 struct nlattr **attrs)
424 {
425 struct xfrm_state *x;
426 int err = -ESRCH;
427 struct km_event c;
428 struct xfrm_usersa_id *p = nlmsg_data(nlh);
429
430 x = xfrm_user_state_lookup(p, attrs, &err);
431 if (x == NULL)
432 return err;
433
434 if ((err = security_xfrm_state_delete(x)) != 0)
435 goto out;
436
437 if (xfrm_state_kern(x)) {
438 err = -EPERM;
439 goto out;
440 }
441
442 err = xfrm_state_delete(x);
443
444 if (err < 0)
445 goto out;
446
447 c.seq = nlh->nlmsg_seq;
448 c.pid = nlh->nlmsg_pid;
449 c.event = nlh->nlmsg_type;
450 km_state_notify(x, &c);
451
452 out:
453 xfrm_audit_state_delete(x, err ? 0 : 1, NETLINK_CB(skb).loginuid,
454 NETLINK_CB(skb).sid);
455 xfrm_state_put(x);
456 return err;
457 }
458
459 static void copy_to_user_state(struct xfrm_state *x, struct xfrm_usersa_info *p)
460 {
461 memcpy(&p->id, &x->id, sizeof(p->id));
462 memcpy(&p->sel, &x->sel, sizeof(p->sel));
463 memcpy(&p->lft, &x->lft, sizeof(p->lft));
464 memcpy(&p->curlft, &x->curlft, sizeof(p->curlft));
465 memcpy(&p->stats, &x->stats, sizeof(p->stats));
466 memcpy(&p->saddr, &x->props.saddr, sizeof(p->saddr));
467 p->mode = x->props.mode;
468 p->replay_window = x->props.replay_window;
469 p->reqid = x->props.reqid;
470 p->family = x->props.family;
471 p->flags = x->props.flags;
472 p->seq = x->km.seq;
473 }
474
475 struct xfrm_dump_info {
476 struct sk_buff *in_skb;
477 struct sk_buff *out_skb;
478 u32 nlmsg_seq;
479 u16 nlmsg_flags;
480 int start_idx;
481 int this_idx;
482 };
483
484 static int copy_sec_ctx(struct xfrm_sec_ctx *s, struct sk_buff *skb)
485 {
486 int ctx_size = sizeof(struct xfrm_sec_ctx) + s->ctx_len;
487 struct xfrm_user_sec_ctx *uctx;
488 struct nlattr *attr;
489
490 attr = nla_reserve(skb, XFRMA_SEC_CTX, ctx_size);
491 if (attr == NULL)
492 return -EMSGSIZE;
493
494 uctx = nla_data(attr);
495 uctx->exttype = XFRMA_SEC_CTX;
496 uctx->len = ctx_size;
497 uctx->ctx_doi = s->ctx_doi;
498 uctx->ctx_alg = s->ctx_alg;
499 uctx->ctx_len = s->ctx_len;
500 memcpy(uctx + 1, s->ctx_str, s->ctx_len);
501
502 return 0;
503 }
504
505 static int dump_one_state(struct xfrm_state *x, int count, void *ptr)
506 {
507 struct xfrm_dump_info *sp = ptr;
508 struct sk_buff *in_skb = sp->in_skb;
509 struct sk_buff *skb = sp->out_skb;
510 struct xfrm_usersa_info *p;
511 struct nlmsghdr *nlh;
512
513 if (sp->this_idx < sp->start_idx)
514 goto out;
515
516 nlh = nlmsg_put(skb, NETLINK_CB(in_skb).pid, sp->nlmsg_seq,
517 XFRM_MSG_NEWSA, sizeof(*p), sp->nlmsg_flags);
518 if (nlh == NULL)
519 return -EMSGSIZE;
520
521 p = nlmsg_data(nlh);
522 copy_to_user_state(x, p);
523
524 if (x->aalg)
525 NLA_PUT(skb, XFRMA_ALG_AUTH, alg_len(x->aalg), x->aalg);
526 if (x->ealg)
527 NLA_PUT(skb, XFRMA_ALG_CRYPT, alg_len(x->ealg), x->ealg);
528 if (x->calg)
529 NLA_PUT(skb, XFRMA_ALG_COMP, sizeof(*(x->calg)), x->calg);
530
531 if (x->encap)
532 NLA_PUT(skb, XFRMA_ENCAP, sizeof(*x->encap), x->encap);
533
534 if (x->security && copy_sec_ctx(x->security, skb) < 0)
535 goto nla_put_failure;
536
537 if (x->coaddr)
538 NLA_PUT(skb, XFRMA_COADDR, sizeof(*x->coaddr), x->coaddr);
539
540 if (x->lastused)
541 NLA_PUT_U64(skb, XFRMA_LASTUSED, x->lastused);
542
543 nlmsg_end(skb, nlh);
544 out:
545 sp->this_idx++;
546 return 0;
547
548 nla_put_failure:
549 nlmsg_cancel(skb, nlh);
550 return -EMSGSIZE;
551 }
552
553 static int xfrm_dump_sa(struct sk_buff *skb, struct netlink_callback *cb)
554 {
555 struct xfrm_dump_info info;
556
557 info.in_skb = cb->skb;
558 info.out_skb = skb;
559 info.nlmsg_seq = cb->nlh->nlmsg_seq;
560 info.nlmsg_flags = NLM_F_MULTI;
561 info.this_idx = 0;
562 info.start_idx = cb->args[0];
563 (void) xfrm_state_walk(0, dump_one_state, &info);
564 cb->args[0] = info.this_idx;
565
566 return skb->len;
567 }
568
569 static struct sk_buff *xfrm_state_netlink(struct sk_buff *in_skb,
570 struct xfrm_state *x, u32 seq)
571 {
572 struct xfrm_dump_info info;
573 struct sk_buff *skb;
574
575 skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_ATOMIC);
576 if (!skb)
577 return ERR_PTR(-ENOMEM);
578
579 info.in_skb = in_skb;
580 info.out_skb = skb;
581 info.nlmsg_seq = seq;
582 info.nlmsg_flags = 0;
583 info.this_idx = info.start_idx = 0;
584
585 if (dump_one_state(x, 0, &info)) {
586 kfree_skb(skb);
587 return NULL;
588 }
589
590 return skb;
591 }
592
593 static inline size_t xfrm_spdinfo_msgsize(void)
594 {
595 return NLMSG_ALIGN(4)
596 + nla_total_size(sizeof(struct xfrmu_spdinfo))
597 + nla_total_size(sizeof(struct xfrmu_spdhinfo));
598 }
599
600 static int build_spdinfo(struct sk_buff *skb, u32 pid, u32 seq, u32 flags)
601 {
602 struct xfrmk_spdinfo si;
603 struct xfrmu_spdinfo spc;
604 struct xfrmu_spdhinfo sph;
605 struct nlmsghdr *nlh;
606 u32 *f;
607
608 nlh = nlmsg_put(skb, pid, seq, XFRM_MSG_NEWSPDINFO, sizeof(u32), 0);
609 if (nlh == NULL) /* shouldnt really happen ... */
610 return -EMSGSIZE;
611
612 f = nlmsg_data(nlh);
613 *f = flags;
614 xfrm_spd_getinfo(&si);
615 spc.incnt = si.incnt;
616 spc.outcnt = si.outcnt;
617 spc.fwdcnt = si.fwdcnt;
618 spc.inscnt = si.inscnt;
619 spc.outscnt = si.outscnt;
620 spc.fwdscnt = si.fwdscnt;
621 sph.spdhcnt = si.spdhcnt;
622 sph.spdhmcnt = si.spdhmcnt;
623
624 NLA_PUT(skb, XFRMA_SPD_INFO, sizeof(spc), &spc);
625 NLA_PUT(skb, XFRMA_SPD_HINFO, sizeof(sph), &sph);
626
627 return nlmsg_end(skb, nlh);
628
629 nla_put_failure:
630 nlmsg_cancel(skb, nlh);
631 return -EMSGSIZE;
632 }
633
634 static int xfrm_get_spdinfo(struct sk_buff *skb, struct nlmsghdr *nlh,
635 struct nlattr **attrs)
636 {
637 struct sk_buff *r_skb;
638 u32 *flags = nlmsg_data(nlh);
639 u32 spid = NETLINK_CB(skb).pid;
640 u32 seq = nlh->nlmsg_seq;
641
642 r_skb = nlmsg_new(xfrm_spdinfo_msgsize(), GFP_ATOMIC);
643 if (r_skb == NULL)
644 return -ENOMEM;
645
646 if (build_spdinfo(r_skb, spid, seq, *flags) < 0)
647 BUG();
648
649 return nlmsg_unicast(xfrm_nl, r_skb, spid);
650 }
651
652 static inline size_t xfrm_sadinfo_msgsize(void)
653 {
654 return NLMSG_ALIGN(4)
655 + nla_total_size(sizeof(struct xfrmu_sadhinfo))
656 + nla_total_size(4); /* XFRMA_SAD_CNT */
657 }
658
659 static int build_sadinfo(struct sk_buff *skb, u32 pid, u32 seq, u32 flags)
660 {
661 struct xfrmk_sadinfo si;
662 struct xfrmu_sadhinfo sh;
663 struct nlmsghdr *nlh;
664 u32 *f;
665
666 nlh = nlmsg_put(skb, pid, seq, XFRM_MSG_NEWSADINFO, sizeof(u32), 0);
667 if (nlh == NULL) /* shouldnt really happen ... */
668 return -EMSGSIZE;
669
670 f = nlmsg_data(nlh);
671 *f = flags;
672 xfrm_sad_getinfo(&si);
673
674 sh.sadhmcnt = si.sadhmcnt;
675 sh.sadhcnt = si.sadhcnt;
676
677 NLA_PUT_U32(skb, XFRMA_SAD_CNT, si.sadcnt);
678 NLA_PUT(skb, XFRMA_SAD_HINFO, sizeof(sh), &sh);
679
680 return nlmsg_end(skb, nlh);
681
682 nla_put_failure:
683 nlmsg_cancel(skb, nlh);
684 return -EMSGSIZE;
685 }
686
687 static int xfrm_get_sadinfo(struct sk_buff *skb, struct nlmsghdr *nlh,
688 struct nlattr **attrs)
689 {
690 struct sk_buff *r_skb;
691 u32 *flags = nlmsg_data(nlh);
692 u32 spid = NETLINK_CB(skb).pid;
693 u32 seq = nlh->nlmsg_seq;
694
695 r_skb = nlmsg_new(xfrm_sadinfo_msgsize(), GFP_ATOMIC);
696 if (r_skb == NULL)
697 return -ENOMEM;
698
699 if (build_sadinfo(r_skb, spid, seq, *flags) < 0)
700 BUG();
701
702 return nlmsg_unicast(xfrm_nl, r_skb, spid);
703 }
704
705 static int xfrm_get_sa(struct sk_buff *skb, struct nlmsghdr *nlh,
706 struct nlattr **attrs)
707 {
708 struct xfrm_usersa_id *p = nlmsg_data(nlh);
709 struct xfrm_state *x;
710 struct sk_buff *resp_skb;
711 int err = -ESRCH;
712
713 x = xfrm_user_state_lookup(p, attrs, &err);
714 if (x == NULL)
715 goto out_noput;
716
717 resp_skb = xfrm_state_netlink(skb, x, nlh->nlmsg_seq);
718 if (IS_ERR(resp_skb)) {
719 err = PTR_ERR(resp_skb);
720 } else {
721 err = nlmsg_unicast(xfrm_nl, resp_skb, NETLINK_CB(skb).pid);
722 }
723 xfrm_state_put(x);
724 out_noput:
725 return err;
726 }
727
728 static int verify_userspi_info(struct xfrm_userspi_info *p)
729 {
730 switch (p->info.id.proto) {
731 case IPPROTO_AH:
732 case IPPROTO_ESP:
733 break;
734
735 case IPPROTO_COMP:
736 /* IPCOMP spi is 16-bits. */
737 if (p->max >= 0x10000)
738 return -EINVAL;
739 break;
740
741 default:
742 return -EINVAL;
743 }
744
745 if (p->min > p->max)
746 return -EINVAL;
747
748 return 0;
749 }
750
751 static int xfrm_alloc_userspi(struct sk_buff *skb, struct nlmsghdr *nlh,
752 struct nlattr **attrs)
753 {
754 struct xfrm_state *x;
755 struct xfrm_userspi_info *p;
756 struct sk_buff *resp_skb;
757 xfrm_address_t *daddr;
758 int family;
759 int err;
760
761 p = nlmsg_data(nlh);
762 err = verify_userspi_info(p);
763 if (err)
764 goto out_noput;
765
766 family = p->info.family;
767 daddr = &p->info.id.daddr;
768
769 x = NULL;
770 if (p->info.seq) {
771 x = xfrm_find_acq_byseq(p->info.seq);
772 if (x && xfrm_addr_cmp(&x->id.daddr, daddr, family)) {
773 xfrm_state_put(x);
774 x = NULL;
775 }
776 }
777
778 if (!x)
779 x = xfrm_find_acq(p->info.mode, p->info.reqid,
780 p->info.id.proto, daddr,
781 &p->info.saddr, 1,
782 family);
783 err = -ENOENT;
784 if (x == NULL)
785 goto out_noput;
786
787 resp_skb = ERR_PTR(-ENOENT);
788
789 spin_lock_bh(&x->lock);
790 if (x->km.state != XFRM_STATE_DEAD) {
791 xfrm_alloc_spi(x, htonl(p->min), htonl(p->max));
792 if (x->id.spi)
793 resp_skb = xfrm_state_netlink(skb, x, nlh->nlmsg_seq);
794 }
795 spin_unlock_bh(&x->lock);
796
797 if (IS_ERR(resp_skb)) {
798 err = PTR_ERR(resp_skb);
799 goto out;
800 }
801
802 err = nlmsg_unicast(xfrm_nl, resp_skb, NETLINK_CB(skb).pid);
803
804 out:
805 xfrm_state_put(x);
806 out_noput:
807 return err;
808 }
809
810 static int verify_policy_dir(u8 dir)
811 {
812 switch (dir) {
813 case XFRM_POLICY_IN:
814 case XFRM_POLICY_OUT:
815 case XFRM_POLICY_FWD:
816 break;
817
818 default:
819 return -EINVAL;
820 }
821
822 return 0;
823 }
824
825 static int verify_policy_type(u8 type)
826 {
827 switch (type) {
828 case XFRM_POLICY_TYPE_MAIN:
829 #ifdef CONFIG_XFRM_SUB_POLICY
830 case XFRM_POLICY_TYPE_SUB:
831 #endif
832 break;
833
834 default:
835 return -EINVAL;
836 }
837
838 return 0;
839 }
840
841 static int verify_newpolicy_info(struct xfrm_userpolicy_info *p)
842 {
843 switch (p->share) {
844 case XFRM_SHARE_ANY:
845 case XFRM_SHARE_SESSION:
846 case XFRM_SHARE_USER:
847 case XFRM_SHARE_UNIQUE:
848 break;
849
850 default:
851 return -EINVAL;
852 }
853
854 switch (p->action) {
855 case XFRM_POLICY_ALLOW:
856 case XFRM_POLICY_BLOCK:
857 break;
858
859 default:
860 return -EINVAL;
861 }
862
863 switch (p->sel.family) {
864 case AF_INET:
865 break;
866
867 case AF_INET6:
868 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
869 break;
870 #else
871 return -EAFNOSUPPORT;
872 #endif
873
874 default:
875 return -EINVAL;
876 }
877
878 return verify_policy_dir(p->dir);
879 }
880
881 static int copy_from_user_sec_ctx(struct xfrm_policy *pol, struct nlattr **attrs)
882 {
883 struct nlattr *rt = attrs[XFRMA_SEC_CTX];
884 struct xfrm_user_sec_ctx *uctx;
885
886 if (!rt)
887 return 0;
888
889 uctx = nla_data(rt);
890 return security_xfrm_policy_alloc(pol, uctx);
891 }
892
893 static void copy_templates(struct xfrm_policy *xp, struct xfrm_user_tmpl *ut,
894 int nr)
895 {
896 int i;
897
898 xp->xfrm_nr = nr;
899 for (i = 0; i < nr; i++, ut++) {
900 struct xfrm_tmpl *t = &xp->xfrm_vec[i];
901
902 memcpy(&t->id, &ut->id, sizeof(struct xfrm_id));
903 memcpy(&t->saddr, &ut->saddr,
904 sizeof(xfrm_address_t));
905 t->reqid = ut->reqid;
906 t->mode = ut->mode;
907 t->share = ut->share;
908 t->optional = ut->optional;
909 t->aalgos = ut->aalgos;
910 t->ealgos = ut->ealgos;
911 t->calgos = ut->calgos;
912 t->encap_family = ut->family;
913 }
914 }
915
916 static int validate_tmpl(int nr, struct xfrm_user_tmpl *ut, u16 family)
917 {
918 int i;
919
920 if (nr > XFRM_MAX_DEPTH)
921 return -EINVAL;
922
923 for (i = 0; i < nr; i++) {
924 /* We never validated the ut->family value, so many
925 * applications simply leave it at zero. The check was
926 * never made and ut->family was ignored because all
927 * templates could be assumed to have the same family as
928 * the policy itself. Now that we will have ipv4-in-ipv6
929 * and ipv6-in-ipv4 tunnels, this is no longer true.
930 */
931 if (!ut[i].family)
932 ut[i].family = family;
933
934 switch (ut[i].family) {
935 case AF_INET:
936 break;
937 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
938 case AF_INET6:
939 break;
940 #endif
941 default:
942 return -EINVAL;
943 }
944 }
945
946 return 0;
947 }
948
949 static int copy_from_user_tmpl(struct xfrm_policy *pol, struct nlattr **attrs)
950 {
951 struct nlattr *rt = attrs[XFRMA_TMPL];
952
953 if (!rt) {
954 pol->xfrm_nr = 0;
955 } else {
956 struct xfrm_user_tmpl *utmpl = nla_data(rt);
957 int nr = nla_len(rt) / sizeof(*utmpl);
958 int err;
959
960 err = validate_tmpl(nr, utmpl, pol->family);
961 if (err)
962 return err;
963
964 copy_templates(pol, utmpl, nr);
965 }
966 return 0;
967 }
968
969 static int copy_from_user_policy_type(u8 *tp, struct nlattr **attrs)
970 {
971 struct nlattr *rt = attrs[XFRMA_POLICY_TYPE];
972 struct xfrm_userpolicy_type *upt;
973 u8 type = XFRM_POLICY_TYPE_MAIN;
974 int err;
975
976 if (rt) {
977 upt = nla_data(rt);
978 type = upt->type;
979 }
980
981 err = verify_policy_type(type);
982 if (err)
983 return err;
984
985 *tp = type;
986 return 0;
987 }
988
989 static void copy_from_user_policy(struct xfrm_policy *xp, struct xfrm_userpolicy_info *p)
990 {
991 xp->priority = p->priority;
992 xp->index = p->index;
993 memcpy(&xp->selector, &p->sel, sizeof(xp->selector));
994 memcpy(&xp->lft, &p->lft, sizeof(xp->lft));
995 xp->action = p->action;
996 xp->flags = p->flags;
997 xp->family = p->sel.family;
998 /* XXX xp->share = p->share; */
999 }
1000
1001 static void copy_to_user_policy(struct xfrm_policy *xp, struct xfrm_userpolicy_info *p, int dir)
1002 {
1003 memcpy(&p->sel, &xp->selector, sizeof(p->sel));
1004 memcpy(&p->lft, &xp->lft, sizeof(p->lft));
1005 memcpy(&p->curlft, &xp->curlft, sizeof(p->curlft));
1006 p->priority = xp->priority;
1007 p->index = xp->index;
1008 p->sel.family = xp->family;
1009 p->dir = dir;
1010 p->action = xp->action;
1011 p->flags = xp->flags;
1012 p->share = XFRM_SHARE_ANY; /* XXX xp->share */
1013 }
1014
1015 static struct xfrm_policy *xfrm_policy_construct(struct xfrm_userpolicy_info *p, struct nlattr **attrs, int *errp)
1016 {
1017 struct xfrm_policy *xp = xfrm_policy_alloc(GFP_KERNEL);
1018 int err;
1019
1020 if (!xp) {
1021 *errp = -ENOMEM;
1022 return NULL;
1023 }
1024
1025 copy_from_user_policy(xp, p);
1026
1027 err = copy_from_user_policy_type(&xp->type, attrs);
1028 if (err)
1029 goto error;
1030
1031 if (!(err = copy_from_user_tmpl(xp, attrs)))
1032 err = copy_from_user_sec_ctx(xp, attrs);
1033 if (err)
1034 goto error;
1035
1036 return xp;
1037 error:
1038 *errp = err;
1039 kfree(xp);
1040 return NULL;
1041 }
1042
1043 static int xfrm_add_policy(struct sk_buff *skb, struct nlmsghdr *nlh,
1044 struct nlattr **attrs)
1045 {
1046 struct xfrm_userpolicy_info *p = nlmsg_data(nlh);
1047 struct xfrm_policy *xp;
1048 struct km_event c;
1049 int err;
1050 int excl;
1051
1052 err = verify_newpolicy_info(p);
1053 if (err)
1054 return err;
1055 err = verify_sec_ctx_len(attrs);
1056 if (err)
1057 return err;
1058
1059 xp = xfrm_policy_construct(p, attrs, &err);
1060 if (!xp)
1061 return err;
1062
1063 /* shouldnt excl be based on nlh flags??
1064 * Aha! this is anti-netlink really i.e more pfkey derived
1065 * in netlink excl is a flag and you wouldnt need
1066 * a type XFRM_MSG_UPDPOLICY - JHS */
1067 excl = nlh->nlmsg_type == XFRM_MSG_NEWPOLICY;
1068 err = xfrm_policy_insert(p->dir, xp, excl);
1069 xfrm_audit_policy_add(xp, err ? 0 : 1, NETLINK_CB(skb).loginuid,
1070 NETLINK_CB(skb).sid);
1071
1072 if (err) {
1073 security_xfrm_policy_free(xp);
1074 kfree(xp);
1075 return err;
1076 }
1077
1078 c.event = nlh->nlmsg_type;
1079 c.seq = nlh->nlmsg_seq;
1080 c.pid = nlh->nlmsg_pid;
1081 km_policy_notify(xp, p->dir, &c);
1082
1083 xfrm_pol_put(xp);
1084
1085 return 0;
1086 }
1087
1088 static int copy_to_user_tmpl(struct xfrm_policy *xp, struct sk_buff *skb)
1089 {
1090 struct xfrm_user_tmpl vec[XFRM_MAX_DEPTH];
1091 int i;
1092
1093 if (xp->xfrm_nr == 0)
1094 return 0;
1095
1096 for (i = 0; i < xp->xfrm_nr; i++) {
1097 struct xfrm_user_tmpl *up = &vec[i];
1098 struct xfrm_tmpl *kp = &xp->xfrm_vec[i];
1099
1100 memcpy(&up->id, &kp->id, sizeof(up->id));
1101 up->family = kp->encap_family;
1102 memcpy(&up->saddr, &kp->saddr, sizeof(up->saddr));
1103 up->reqid = kp->reqid;
1104 up->mode = kp->mode;
1105 up->share = kp->share;
1106 up->optional = kp->optional;
1107 up->aalgos = kp->aalgos;
1108 up->ealgos = kp->ealgos;
1109 up->calgos = kp->calgos;
1110 }
1111
1112 return nla_put(skb, XFRMA_TMPL,
1113 sizeof(struct xfrm_user_tmpl) * xp->xfrm_nr, vec);
1114 }
1115
1116 static inline int copy_to_user_state_sec_ctx(struct xfrm_state *x, struct sk_buff *skb)
1117 {
1118 if (x->security) {
1119 return copy_sec_ctx(x->security, skb);
1120 }
1121 return 0;
1122 }
1123
1124 static inline int copy_to_user_sec_ctx(struct xfrm_policy *xp, struct sk_buff *skb)
1125 {
1126 if (xp->security) {
1127 return copy_sec_ctx(xp->security, skb);
1128 }
1129 return 0;
1130 }
1131 static inline size_t userpolicy_type_attrsize(void)
1132 {
1133 #ifdef CONFIG_XFRM_SUB_POLICY
1134 return nla_total_size(sizeof(struct xfrm_userpolicy_type));
1135 #else
1136 return 0;
1137 #endif
1138 }
1139
1140 #ifdef CONFIG_XFRM_SUB_POLICY
1141 static int copy_to_user_policy_type(u8 type, struct sk_buff *skb)
1142 {
1143 struct xfrm_userpolicy_type upt = {
1144 .type = type,
1145 };
1146
1147 return nla_put(skb, XFRMA_POLICY_TYPE, sizeof(upt), &upt);
1148 }
1149
1150 #else
1151 static inline int copy_to_user_policy_type(u8 type, struct sk_buff *skb)
1152 {
1153 return 0;
1154 }
1155 #endif
1156
1157 static int dump_one_policy(struct xfrm_policy *xp, int dir, int count, void *ptr)
1158 {
1159 struct xfrm_dump_info *sp = ptr;
1160 struct xfrm_userpolicy_info *p;
1161 struct sk_buff *in_skb = sp->in_skb;
1162 struct sk_buff *skb = sp->out_skb;
1163 struct nlmsghdr *nlh;
1164
1165 if (sp->this_idx < sp->start_idx)
1166 goto out;
1167
1168 nlh = nlmsg_put(skb, NETLINK_CB(in_skb).pid, sp->nlmsg_seq,
1169 XFRM_MSG_NEWPOLICY, sizeof(*p), sp->nlmsg_flags);
1170 if (nlh == NULL)
1171 return -EMSGSIZE;
1172
1173 p = nlmsg_data(nlh);
1174 copy_to_user_policy(xp, p, dir);
1175 if (copy_to_user_tmpl(xp, skb) < 0)
1176 goto nlmsg_failure;
1177 if (copy_to_user_sec_ctx(xp, skb))
1178 goto nlmsg_failure;
1179 if (copy_to_user_policy_type(xp->type, skb) < 0)
1180 goto nlmsg_failure;
1181
1182 nlmsg_end(skb, nlh);
1183 out:
1184 sp->this_idx++;
1185 return 0;
1186
1187 nlmsg_failure:
1188 nlmsg_cancel(skb, nlh);
1189 return -EMSGSIZE;
1190 }
1191
1192 static int xfrm_dump_policy(struct sk_buff *skb, struct netlink_callback *cb)
1193 {
1194 struct xfrm_dump_info info;
1195
1196 info.in_skb = cb->skb;
1197 info.out_skb = skb;
1198 info.nlmsg_seq = cb->nlh->nlmsg_seq;
1199 info.nlmsg_flags = NLM_F_MULTI;
1200 info.this_idx = 0;
1201 info.start_idx = cb->args[0];
1202 (void) xfrm_policy_walk(XFRM_POLICY_TYPE_MAIN, dump_one_policy, &info);
1203 #ifdef CONFIG_XFRM_SUB_POLICY
1204 (void) xfrm_policy_walk(XFRM_POLICY_TYPE_SUB, dump_one_policy, &info);
1205 #endif
1206 cb->args[0] = info.this_idx;
1207
1208 return skb->len;
1209 }
1210
1211 static struct sk_buff *xfrm_policy_netlink(struct sk_buff *in_skb,
1212 struct xfrm_policy *xp,
1213 int dir, u32 seq)
1214 {
1215 struct xfrm_dump_info info;
1216 struct sk_buff *skb;
1217
1218 skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
1219 if (!skb)
1220 return ERR_PTR(-ENOMEM);
1221
1222 info.in_skb = in_skb;
1223 info.out_skb = skb;
1224 info.nlmsg_seq = seq;
1225 info.nlmsg_flags = 0;
1226 info.this_idx = info.start_idx = 0;
1227
1228 if (dump_one_policy(xp, dir, 0, &info) < 0) {
1229 kfree_skb(skb);
1230 return NULL;
1231 }
1232
1233 return skb;
1234 }
1235
1236 static int xfrm_get_policy(struct sk_buff *skb, struct nlmsghdr *nlh,
1237 struct nlattr **attrs)
1238 {
1239 struct xfrm_policy *xp;
1240 struct xfrm_userpolicy_id *p;
1241 u8 type = XFRM_POLICY_TYPE_MAIN;
1242 int err;
1243 struct km_event c;
1244 int delete;
1245
1246 p = nlmsg_data(nlh);
1247 delete = nlh->nlmsg_type == XFRM_MSG_DELPOLICY;
1248
1249 err = copy_from_user_policy_type(&type, attrs);
1250 if (err)
1251 return err;
1252
1253 err = verify_policy_dir(p->dir);
1254 if (err)
1255 return err;
1256
1257 if (p->index)
1258 xp = xfrm_policy_byid(type, p->dir, p->index, delete, &err);
1259 else {
1260 struct nlattr *rt = attrs[XFRMA_SEC_CTX];
1261 struct xfrm_policy tmp;
1262
1263 err = verify_sec_ctx_len(attrs);
1264 if (err)
1265 return err;
1266
1267 memset(&tmp, 0, sizeof(struct xfrm_policy));
1268 if (rt) {
1269 struct xfrm_user_sec_ctx *uctx = nla_data(rt);
1270
1271 if ((err = security_xfrm_policy_alloc(&tmp, uctx)))
1272 return err;
1273 }
1274 xp = xfrm_policy_bysel_ctx(type, p->dir, &p->sel, tmp.security,
1275 delete, &err);
1276 security_xfrm_policy_free(&tmp);
1277 }
1278 if (xp == NULL)
1279 return -ENOENT;
1280
1281 if (!delete) {
1282 struct sk_buff *resp_skb;
1283
1284 resp_skb = xfrm_policy_netlink(skb, xp, p->dir, nlh->nlmsg_seq);
1285 if (IS_ERR(resp_skb)) {
1286 err = PTR_ERR(resp_skb);
1287 } else {
1288 err = nlmsg_unicast(xfrm_nl, resp_skb,
1289 NETLINK_CB(skb).pid);
1290 }
1291 } else {
1292 xfrm_audit_policy_delete(xp, err ? 0 : 1,
1293 NETLINK_CB(skb).loginuid,
1294 NETLINK_CB(skb).sid);
1295
1296 if (err != 0)
1297 goto out;
1298
1299 c.data.byid = p->index;
1300 c.event = nlh->nlmsg_type;
1301 c.seq = nlh->nlmsg_seq;
1302 c.pid = nlh->nlmsg_pid;
1303 km_policy_notify(xp, p->dir, &c);
1304 }
1305
1306 out:
1307 xfrm_pol_put(xp);
1308 return err;
1309 }
1310
1311 static int xfrm_flush_sa(struct sk_buff *skb, struct nlmsghdr *nlh,
1312 struct nlattr **attrs)
1313 {
1314 struct km_event c;
1315 struct xfrm_usersa_flush *p = nlmsg_data(nlh);
1316 struct xfrm_audit audit_info;
1317 int err;
1318
1319 audit_info.loginuid = NETLINK_CB(skb).loginuid;
1320 audit_info.secid = NETLINK_CB(skb).sid;
1321 err = xfrm_state_flush(p->proto, &audit_info);
1322 if (err)
1323 return err;
1324 c.data.proto = p->proto;
1325 c.event = nlh->nlmsg_type;
1326 c.seq = nlh->nlmsg_seq;
1327 c.pid = nlh->nlmsg_pid;
1328 km_state_notify(NULL, &c);
1329
1330 return 0;
1331 }
1332
1333 static inline size_t xfrm_aevent_msgsize(void)
1334 {
1335 return NLMSG_ALIGN(sizeof(struct xfrm_aevent_id))
1336 + nla_total_size(sizeof(struct xfrm_replay_state))
1337 + nla_total_size(sizeof(struct xfrm_lifetime_cur))
1338 + nla_total_size(4) /* XFRM_AE_RTHR */
1339 + nla_total_size(4); /* XFRM_AE_ETHR */
1340 }
1341
1342 static int build_aevent(struct sk_buff *skb, struct xfrm_state *x, struct km_event *c)
1343 {
1344 struct xfrm_aevent_id *id;
1345 struct nlmsghdr *nlh;
1346
1347 nlh = nlmsg_put(skb, c->pid, c->seq, XFRM_MSG_NEWAE, sizeof(*id), 0);
1348 if (nlh == NULL)
1349 return -EMSGSIZE;
1350
1351 id = nlmsg_data(nlh);
1352 memcpy(&id->sa_id.daddr, &x->id.daddr,sizeof(x->id.daddr));
1353 id->sa_id.spi = x->id.spi;
1354 id->sa_id.family = x->props.family;
1355 id->sa_id.proto = x->id.proto;
1356 memcpy(&id->saddr, &x->props.saddr,sizeof(x->props.saddr));
1357 id->reqid = x->props.reqid;
1358 id->flags = c->data.aevent;
1359
1360 NLA_PUT(skb, XFRMA_REPLAY_VAL, sizeof(x->replay), &x->replay);
1361 NLA_PUT(skb, XFRMA_LTIME_VAL, sizeof(x->curlft), &x->curlft);
1362
1363 if (id->flags & XFRM_AE_RTHR)
1364 NLA_PUT_U32(skb, XFRMA_REPLAY_THRESH, x->replay_maxdiff);
1365
1366 if (id->flags & XFRM_AE_ETHR)
1367 NLA_PUT_U32(skb, XFRMA_ETIMER_THRESH,
1368 x->replay_maxage * 10 / HZ);
1369
1370 return nlmsg_end(skb, nlh);
1371
1372 nla_put_failure:
1373 nlmsg_cancel(skb, nlh);
1374 return -EMSGSIZE;
1375 }
1376
1377 static int xfrm_get_ae(struct sk_buff *skb, struct nlmsghdr *nlh,
1378 struct nlattr **attrs)
1379 {
1380 struct xfrm_state *x;
1381 struct sk_buff *r_skb;
1382 int err;
1383 struct km_event c;
1384 struct xfrm_aevent_id *p = nlmsg_data(nlh);
1385 struct xfrm_usersa_id *id = &p->sa_id;
1386
1387 r_skb = nlmsg_new(xfrm_aevent_msgsize(), GFP_ATOMIC);
1388 if (r_skb == NULL)
1389 return -ENOMEM;
1390
1391 x = xfrm_state_lookup(&id->daddr, id->spi, id->proto, id->family);
1392 if (x == NULL) {
1393 kfree_skb(r_skb);
1394 return -ESRCH;
1395 }
1396
1397 /*
1398 * XXX: is this lock really needed - none of the other
1399 * gets lock (the concern is things getting updated
1400 * while we are still reading) - jhs
1401 */
1402 spin_lock_bh(&x->lock);
1403 c.data.aevent = p->flags;
1404 c.seq = nlh->nlmsg_seq;
1405 c.pid = nlh->nlmsg_pid;
1406
1407 if (build_aevent(r_skb, x, &c) < 0)
1408 BUG();
1409 err = nlmsg_unicast(xfrm_nl, r_skb, NETLINK_CB(skb).pid);
1410 spin_unlock_bh(&x->lock);
1411 xfrm_state_put(x);
1412 return err;
1413 }
1414
1415 static int xfrm_new_ae(struct sk_buff *skb, struct nlmsghdr *nlh,
1416 struct nlattr **attrs)
1417 {
1418 struct xfrm_state *x;
1419 struct km_event c;
1420 int err = - EINVAL;
1421 struct xfrm_aevent_id *p = nlmsg_data(nlh);
1422 struct nlattr *rp = attrs[XFRMA_REPLAY_VAL];
1423 struct nlattr *lt = attrs[XFRMA_LTIME_VAL];
1424
1425 if (!lt && !rp)
1426 return err;
1427
1428 /* pedantic mode - thou shalt sayeth replaceth */
1429 if (!(nlh->nlmsg_flags&NLM_F_REPLACE))
1430 return err;
1431
1432 x = xfrm_state_lookup(&p->sa_id.daddr, p->sa_id.spi, p->sa_id.proto, p->sa_id.family);
1433 if (x == NULL)
1434 return -ESRCH;
1435
1436 if (x->km.state != XFRM_STATE_VALID)
1437 goto out;
1438
1439 spin_lock_bh(&x->lock);
1440 xfrm_update_ae_params(x, attrs);
1441 spin_unlock_bh(&x->lock);
1442
1443 c.event = nlh->nlmsg_type;
1444 c.seq = nlh->nlmsg_seq;
1445 c.pid = nlh->nlmsg_pid;
1446 c.data.aevent = XFRM_AE_CU;
1447 km_state_notify(x, &c);
1448 err = 0;
1449 out:
1450 xfrm_state_put(x);
1451 return err;
1452 }
1453
1454 static int xfrm_flush_policy(struct sk_buff *skb, struct nlmsghdr *nlh,
1455 struct nlattr **attrs)
1456 {
1457 struct km_event c;
1458 u8 type = XFRM_POLICY_TYPE_MAIN;
1459 int err;
1460 struct xfrm_audit audit_info;
1461
1462 err = copy_from_user_policy_type(&type, attrs);
1463 if (err)
1464 return err;
1465
1466 audit_info.loginuid = NETLINK_CB(skb).loginuid;
1467 audit_info.secid = NETLINK_CB(skb).sid;
1468 err = xfrm_policy_flush(type, &audit_info);
1469 if (err)
1470 return err;
1471 c.data.type = type;
1472 c.event = nlh->nlmsg_type;
1473 c.seq = nlh->nlmsg_seq;
1474 c.pid = nlh->nlmsg_pid;
1475 km_policy_notify(NULL, 0, &c);
1476 return 0;
1477 }
1478
1479 static int xfrm_add_pol_expire(struct sk_buff *skb, struct nlmsghdr *nlh,
1480 struct nlattr **attrs)
1481 {
1482 struct xfrm_policy *xp;
1483 struct xfrm_user_polexpire *up = nlmsg_data(nlh);
1484 struct xfrm_userpolicy_info *p = &up->pol;
1485 u8 type = XFRM_POLICY_TYPE_MAIN;
1486 int err = -ENOENT;
1487
1488 err = copy_from_user_policy_type(&type, attrs);
1489 if (err)
1490 return err;
1491
1492 if (p->index)
1493 xp = xfrm_policy_byid(type, p->dir, p->index, 0, &err);
1494 else {
1495 struct nlattr *rt = attrs[XFRMA_SEC_CTX];
1496 struct xfrm_policy tmp;
1497
1498 err = verify_sec_ctx_len(attrs);
1499 if (err)
1500 return err;
1501
1502 memset(&tmp, 0, sizeof(struct xfrm_policy));
1503 if (rt) {
1504 struct xfrm_user_sec_ctx *uctx = nla_data(rt);
1505
1506 if ((err = security_xfrm_policy_alloc(&tmp, uctx)))
1507 return err;
1508 }
1509 xp = xfrm_policy_bysel_ctx(type, p->dir, &p->sel, tmp.security,
1510 0, &err);
1511 security_xfrm_policy_free(&tmp);
1512 }
1513
1514 if (xp == NULL)
1515 return -ENOENT;
1516 read_lock(&xp->lock);
1517 if (xp->dead) {
1518 read_unlock(&xp->lock);
1519 goto out;
1520 }
1521
1522 read_unlock(&xp->lock);
1523 err = 0;
1524 if (up->hard) {
1525 xfrm_policy_delete(xp, p->dir);
1526 xfrm_audit_policy_delete(xp, 1, NETLINK_CB(skb).loginuid,
1527 NETLINK_CB(skb).sid);
1528
1529 } else {
1530 // reset the timers here?
1531 printk("Dont know what to do with soft policy expire\n");
1532 }
1533 km_policy_expired(xp, p->dir, up->hard, current->pid);
1534
1535 out:
1536 xfrm_pol_put(xp);
1537 return err;
1538 }
1539
1540 static int xfrm_add_sa_expire(struct sk_buff *skb, struct nlmsghdr *nlh,
1541 struct nlattr **attrs)
1542 {
1543 struct xfrm_state *x;
1544 int err;
1545 struct xfrm_user_expire *ue = nlmsg_data(nlh);
1546 struct xfrm_usersa_info *p = &ue->state;
1547
1548 x = xfrm_state_lookup(&p->id.daddr, p->id.spi, p->id.proto, p->family);
1549
1550 err = -ENOENT;
1551 if (x == NULL)
1552 return err;
1553
1554 spin_lock_bh(&x->lock);
1555 err = -EINVAL;
1556 if (x->km.state != XFRM_STATE_VALID)
1557 goto out;
1558 km_state_expired(x, ue->hard, current->pid);
1559
1560 if (ue->hard) {
1561 __xfrm_state_delete(x);
1562 xfrm_audit_state_delete(x, 1, NETLINK_CB(skb).loginuid,
1563 NETLINK_CB(skb).sid);
1564 }
1565 err = 0;
1566 out:
1567 spin_unlock_bh(&x->lock);
1568 xfrm_state_put(x);
1569 return err;
1570 }
1571
1572 static int xfrm_add_acquire(struct sk_buff *skb, struct nlmsghdr *nlh,
1573 struct nlattr **attrs)
1574 {
1575 struct xfrm_policy *xp;
1576 struct xfrm_user_tmpl *ut;
1577 int i;
1578 struct nlattr *rt = attrs[XFRMA_TMPL];
1579
1580 struct xfrm_user_acquire *ua = nlmsg_data(nlh);
1581 struct xfrm_state *x = xfrm_state_alloc();
1582 int err = -ENOMEM;
1583
1584 if (!x)
1585 return err;
1586
1587 err = verify_newpolicy_info(&ua->policy);
1588 if (err) {
1589 printk("BAD policy passed\n");
1590 kfree(x);
1591 return err;
1592 }
1593
1594 /* build an XP */
1595 xp = xfrm_policy_construct(&ua->policy, attrs, &err);
1596 if (!xp) {
1597 kfree(x);
1598 return err;
1599 }
1600
1601 memcpy(&x->id, &ua->id, sizeof(ua->id));
1602 memcpy(&x->props.saddr, &ua->saddr, sizeof(ua->saddr));
1603 memcpy(&x->sel, &ua->sel, sizeof(ua->sel));
1604
1605 ut = nla_data(rt);
1606 /* extract the templates and for each call km_key */
1607 for (i = 0; i < xp->xfrm_nr; i++, ut++) {
1608 struct xfrm_tmpl *t = &xp->xfrm_vec[i];
1609 memcpy(&x->id, &t->id, sizeof(x->id));
1610 x->props.mode = t->mode;
1611 x->props.reqid = t->reqid;
1612 x->props.family = ut->family;
1613 t->aalgos = ua->aalgos;
1614 t->ealgos = ua->ealgos;
1615 t->calgos = ua->calgos;
1616 err = km_query(x, t, xp);
1617
1618 }
1619
1620 kfree(x);
1621 kfree(xp);
1622
1623 return 0;
1624 }
1625
1626 #ifdef CONFIG_XFRM_MIGRATE
1627 static int copy_from_user_migrate(struct xfrm_migrate *ma,
1628 struct nlattr **attrs, int *num)
1629 {
1630 struct nlattr *rt = attrs[XFRMA_MIGRATE];
1631 struct xfrm_user_migrate *um;
1632 int i, num_migrate;
1633
1634 um = nla_data(rt);
1635 num_migrate = nla_len(rt) / sizeof(*um);
1636
1637 if (num_migrate <= 0 || num_migrate > XFRM_MAX_DEPTH)
1638 return -EINVAL;
1639
1640 for (i = 0; i < num_migrate; i++, um++, ma++) {
1641 memcpy(&ma->old_daddr, &um->old_daddr, sizeof(ma->old_daddr));
1642 memcpy(&ma->old_saddr, &um->old_saddr, sizeof(ma->old_saddr));
1643 memcpy(&ma->new_daddr, &um->new_daddr, sizeof(ma->new_daddr));
1644 memcpy(&ma->new_saddr, &um->new_saddr, sizeof(ma->new_saddr));
1645
1646 ma->proto = um->proto;
1647 ma->mode = um->mode;
1648 ma->reqid = um->reqid;
1649
1650 ma->old_family = um->old_family;
1651 ma->new_family = um->new_family;
1652 }
1653
1654 *num = i;
1655 return 0;
1656 }
1657
1658 static int xfrm_do_migrate(struct sk_buff *skb, struct nlmsghdr *nlh,
1659 struct nlattr **attrs)
1660 {
1661 struct xfrm_userpolicy_id *pi = nlmsg_data(nlh);
1662 struct xfrm_migrate m[XFRM_MAX_DEPTH];
1663 u8 type;
1664 int err;
1665 int n = 0;
1666
1667 if (attrs[XFRMA_MIGRATE] == NULL)
1668 return -EINVAL;
1669
1670 err = copy_from_user_policy_type(&type, attrs);
1671 if (err)
1672 return err;
1673
1674 err = copy_from_user_migrate((struct xfrm_migrate *)m,
1675 attrs, &n);
1676 if (err)
1677 return err;
1678
1679 if (!n)
1680 return 0;
1681
1682 xfrm_migrate(&pi->sel, pi->dir, type, m, n);
1683
1684 return 0;
1685 }
1686 #else
1687 static int xfrm_do_migrate(struct sk_buff *skb, struct nlmsghdr *nlh,
1688 struct nlattr **attrs)
1689 {
1690 return -ENOPROTOOPT;
1691 }
1692 #endif
1693
1694 #ifdef CONFIG_XFRM_MIGRATE
1695 static int copy_to_user_migrate(struct xfrm_migrate *m, struct sk_buff *skb)
1696 {
1697 struct xfrm_user_migrate um;
1698
1699 memset(&um, 0, sizeof(um));
1700 um.proto = m->proto;
1701 um.mode = m->mode;
1702 um.reqid = m->reqid;
1703 um.old_family = m->old_family;
1704 memcpy(&um.old_daddr, &m->old_daddr, sizeof(um.old_daddr));
1705 memcpy(&um.old_saddr, &m->old_saddr, sizeof(um.old_saddr));
1706 um.new_family = m->new_family;
1707 memcpy(&um.new_daddr, &m->new_daddr, sizeof(um.new_daddr));
1708 memcpy(&um.new_saddr, &m->new_saddr, sizeof(um.new_saddr));
1709
1710 return nla_put(skb, XFRMA_MIGRATE, sizeof(um), &um);
1711 }
1712
1713 static inline size_t xfrm_migrate_msgsize(int num_migrate)
1714 {
1715 return NLMSG_ALIGN(sizeof(struct xfrm_userpolicy_id))
1716 + nla_total_size(sizeof(struct xfrm_user_migrate) * num_migrate)
1717 + userpolicy_type_attrsize();
1718 }
1719
1720 static int build_migrate(struct sk_buff *skb, struct xfrm_migrate *m,
1721 int num_migrate, struct xfrm_selector *sel,
1722 u8 dir, u8 type)
1723 {
1724 struct xfrm_migrate *mp;
1725 struct xfrm_userpolicy_id *pol_id;
1726 struct nlmsghdr *nlh;
1727 int i;
1728
1729 nlh = nlmsg_put(skb, 0, 0, XFRM_MSG_MIGRATE, sizeof(*pol_id), 0);
1730 if (nlh == NULL)
1731 return -EMSGSIZE;
1732
1733 pol_id = nlmsg_data(nlh);
1734 /* copy data from selector, dir, and type to the pol_id */
1735 memset(pol_id, 0, sizeof(*pol_id));
1736 memcpy(&pol_id->sel, sel, sizeof(pol_id->sel));
1737 pol_id->dir = dir;
1738
1739 if (copy_to_user_policy_type(type, skb) < 0)
1740 goto nlmsg_failure;
1741
1742 for (i = 0, mp = m ; i < num_migrate; i++, mp++) {
1743 if (copy_to_user_migrate(mp, skb) < 0)
1744 goto nlmsg_failure;
1745 }
1746
1747 return nlmsg_end(skb, nlh);
1748 nlmsg_failure:
1749 nlmsg_cancel(skb, nlh);
1750 return -EMSGSIZE;
1751 }
1752
1753 static int xfrm_send_migrate(struct xfrm_selector *sel, u8 dir, u8 type,
1754 struct xfrm_migrate *m, int num_migrate)
1755 {
1756 struct sk_buff *skb;
1757
1758 skb = nlmsg_new(xfrm_migrate_msgsize(num_migrate), GFP_ATOMIC);
1759 if (skb == NULL)
1760 return -ENOMEM;
1761
1762 /* build migrate */
1763 if (build_migrate(skb, m, num_migrate, sel, dir, type) < 0)
1764 BUG();
1765
1766 return nlmsg_multicast(xfrm_nl, skb, 0, XFRMNLGRP_MIGRATE, GFP_ATOMIC);
1767 }
1768 #else
1769 static int xfrm_send_migrate(struct xfrm_selector *sel, u8 dir, u8 type,
1770 struct xfrm_migrate *m, int num_migrate)
1771 {
1772 return -ENOPROTOOPT;
1773 }
1774 #endif
1775
1776 #define XMSGSIZE(type) sizeof(struct type)
1777
1778 static const int xfrm_msg_min[XFRM_NR_MSGTYPES] = {
1779 [XFRM_MSG_NEWSA - XFRM_MSG_BASE] = XMSGSIZE(xfrm_usersa_info),
1780 [XFRM_MSG_DELSA - XFRM_MSG_BASE] = XMSGSIZE(xfrm_usersa_id),
1781 [XFRM_MSG_GETSA - XFRM_MSG_BASE] = XMSGSIZE(xfrm_usersa_id),
1782 [XFRM_MSG_NEWPOLICY - XFRM_MSG_BASE] = XMSGSIZE(xfrm_userpolicy_info),
1783 [XFRM_MSG_DELPOLICY - XFRM_MSG_BASE] = XMSGSIZE(xfrm_userpolicy_id),
1784 [XFRM_MSG_GETPOLICY - XFRM_MSG_BASE] = XMSGSIZE(xfrm_userpolicy_id),
1785 [XFRM_MSG_ALLOCSPI - XFRM_MSG_BASE] = XMSGSIZE(xfrm_userspi_info),
1786 [XFRM_MSG_ACQUIRE - XFRM_MSG_BASE] = XMSGSIZE(xfrm_user_acquire),
1787 [XFRM_MSG_EXPIRE - XFRM_MSG_BASE] = XMSGSIZE(xfrm_user_expire),
1788 [XFRM_MSG_UPDPOLICY - XFRM_MSG_BASE] = XMSGSIZE(xfrm_userpolicy_info),
1789 [XFRM_MSG_UPDSA - XFRM_MSG_BASE] = XMSGSIZE(xfrm_usersa_info),
1790 [XFRM_MSG_POLEXPIRE - XFRM_MSG_BASE] = XMSGSIZE(xfrm_user_polexpire),
1791 [XFRM_MSG_FLUSHSA - XFRM_MSG_BASE] = XMSGSIZE(xfrm_usersa_flush),
1792 [XFRM_MSG_FLUSHPOLICY - XFRM_MSG_BASE] = 0,
1793 [XFRM_MSG_NEWAE - XFRM_MSG_BASE] = XMSGSIZE(xfrm_aevent_id),
1794 [XFRM_MSG_GETAE - XFRM_MSG_BASE] = XMSGSIZE(xfrm_aevent_id),
1795 [XFRM_MSG_REPORT - XFRM_MSG_BASE] = XMSGSIZE(xfrm_user_report),
1796 [XFRM_MSG_MIGRATE - XFRM_MSG_BASE] = XMSGSIZE(xfrm_userpolicy_id),
1797 [XFRM_MSG_GETSADINFO - XFRM_MSG_BASE] = sizeof(u32),
1798 [XFRM_MSG_GETSPDINFO - XFRM_MSG_BASE] = sizeof(u32),
1799 };
1800
1801 #undef XMSGSIZE
1802
1803 static const struct nla_policy xfrma_policy[XFRMA_MAX+1] = {
1804 [XFRMA_ALG_AUTH] = { .len = sizeof(struct xfrm_algo) },
1805 [XFRMA_ALG_CRYPT] = { .len = sizeof(struct xfrm_algo) },
1806 [XFRMA_ALG_COMP] = { .len = sizeof(struct xfrm_algo) },
1807 [XFRMA_ENCAP] = { .len = sizeof(struct xfrm_encap_tmpl) },
1808 [XFRMA_TMPL] = { .len = sizeof(struct xfrm_user_tmpl) },
1809 [XFRMA_SEC_CTX] = { .len = sizeof(struct xfrm_sec_ctx) },
1810 [XFRMA_LTIME_VAL] = { .len = sizeof(struct xfrm_lifetime_cur) },
1811 [XFRMA_REPLAY_VAL] = { .len = sizeof(struct xfrm_replay_state) },
1812 [XFRMA_REPLAY_THRESH] = { .type = NLA_U32 },
1813 [XFRMA_ETIMER_THRESH] = { .type = NLA_U32 },
1814 [XFRMA_SRCADDR] = { .len = sizeof(xfrm_address_t) },
1815 [XFRMA_COADDR] = { .len = sizeof(xfrm_address_t) },
1816 [XFRMA_POLICY_TYPE] = { .len = sizeof(struct xfrm_userpolicy_type)},
1817 [XFRMA_MIGRATE] = { .len = sizeof(struct xfrm_user_migrate) },
1818 };
1819
1820 static struct xfrm_link {
1821 int (*doit)(struct sk_buff *, struct nlmsghdr *, struct nlattr **);
1822 int (*dump)(struct sk_buff *, struct netlink_callback *);
1823 } xfrm_dispatch[XFRM_NR_MSGTYPES] = {
1824 [XFRM_MSG_NEWSA - XFRM_MSG_BASE] = { .doit = xfrm_add_sa },
1825 [XFRM_MSG_DELSA - XFRM_MSG_BASE] = { .doit = xfrm_del_sa },
1826 [XFRM_MSG_GETSA - XFRM_MSG_BASE] = { .doit = xfrm_get_sa,
1827 .dump = xfrm_dump_sa },
1828 [XFRM_MSG_NEWPOLICY - XFRM_MSG_BASE] = { .doit = xfrm_add_policy },
1829 [XFRM_MSG_DELPOLICY - XFRM_MSG_BASE] = { .doit = xfrm_get_policy },
1830 [XFRM_MSG_GETPOLICY - XFRM_MSG_BASE] = { .doit = xfrm_get_policy,
1831 .dump = xfrm_dump_policy },
1832 [XFRM_MSG_ALLOCSPI - XFRM_MSG_BASE] = { .doit = xfrm_alloc_userspi },
1833 [XFRM_MSG_ACQUIRE - XFRM_MSG_BASE] = { .doit = xfrm_add_acquire },
1834 [XFRM_MSG_EXPIRE - XFRM_MSG_BASE] = { .doit = xfrm_add_sa_expire },
1835 [XFRM_MSG_UPDPOLICY - XFRM_MSG_BASE] = { .doit = xfrm_add_policy },
1836 [XFRM_MSG_UPDSA - XFRM_MSG_BASE] = { .doit = xfrm_add_sa },
1837 [XFRM_MSG_POLEXPIRE - XFRM_MSG_BASE] = { .doit = xfrm_add_pol_expire},
1838 [XFRM_MSG_FLUSHSA - XFRM_MSG_BASE] = { .doit = xfrm_flush_sa },
1839 [XFRM_MSG_FLUSHPOLICY - XFRM_MSG_BASE] = { .doit = xfrm_flush_policy },
1840 [XFRM_MSG_NEWAE - XFRM_MSG_BASE] = { .doit = xfrm_new_ae },
1841 [XFRM_MSG_GETAE - XFRM_MSG_BASE] = { .doit = xfrm_get_ae },
1842 [XFRM_MSG_MIGRATE - XFRM_MSG_BASE] = { .doit = xfrm_do_migrate },
1843 [XFRM_MSG_GETSADINFO - XFRM_MSG_BASE] = { .doit = xfrm_get_sadinfo },
1844 [XFRM_MSG_GETSPDINFO - XFRM_MSG_BASE] = { .doit = xfrm_get_spdinfo },
1845 };
1846
1847 static int xfrm_user_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
1848 {
1849 struct nlattr *attrs[XFRMA_MAX+1];
1850 struct xfrm_link *link;
1851 int type, err;
1852
1853 type = nlh->nlmsg_type;
1854 if (type > XFRM_MSG_MAX)
1855 return -EINVAL;
1856
1857 type -= XFRM_MSG_BASE;
1858 link = &xfrm_dispatch[type];
1859
1860 /* All operations require privileges, even GET */
1861 if (security_netlink_recv(skb, CAP_NET_ADMIN))
1862 return -EPERM;
1863
1864 if ((type == (XFRM_MSG_GETSA - XFRM_MSG_BASE) ||
1865 type == (XFRM_MSG_GETPOLICY - XFRM_MSG_BASE)) &&
1866 (nlh->nlmsg_flags & NLM_F_DUMP)) {
1867 if (link->dump == NULL)
1868 return -EINVAL;
1869
1870 return netlink_dump_start(xfrm_nl, skb, nlh, link->dump, NULL);
1871 }
1872
1873 err = nlmsg_parse(nlh, xfrm_msg_min[type], attrs, XFRMA_MAX,
1874 xfrma_policy);
1875 if (err < 0)
1876 return err;
1877
1878 if (link->doit == NULL)
1879 return -EINVAL;
1880
1881 return link->doit(skb, nlh, attrs);
1882 }
1883
1884 static void xfrm_netlink_rcv(struct sock *sk, int len)
1885 {
1886 unsigned int qlen = 0;
1887
1888 do {
1889 mutex_lock(&xfrm_cfg_mutex);
1890 netlink_run_queue(sk, &qlen, &xfrm_user_rcv_msg);
1891 mutex_unlock(&xfrm_cfg_mutex);
1892
1893 } while (qlen);
1894 }
1895
1896 static inline size_t xfrm_expire_msgsize(void)
1897 {
1898 return NLMSG_ALIGN(sizeof(struct xfrm_user_expire));
1899 }
1900
1901 static int build_expire(struct sk_buff *skb, struct xfrm_state *x, struct km_event *c)
1902 {
1903 struct xfrm_user_expire *ue;
1904 struct nlmsghdr *nlh;
1905
1906 nlh = nlmsg_put(skb, c->pid, 0, XFRM_MSG_EXPIRE, sizeof(*ue), 0);
1907 if (nlh == NULL)
1908 return -EMSGSIZE;
1909
1910 ue = nlmsg_data(nlh);
1911 copy_to_user_state(x, &ue->state);
1912 ue->hard = (c->data.hard != 0) ? 1 : 0;
1913
1914 return nlmsg_end(skb, nlh);
1915 }
1916
1917 static int xfrm_exp_state_notify(struct xfrm_state *x, struct km_event *c)
1918 {
1919 struct sk_buff *skb;
1920
1921 skb = nlmsg_new(xfrm_expire_msgsize(), GFP_ATOMIC);
1922 if (skb == NULL)
1923 return -ENOMEM;
1924
1925 if (build_expire(skb, x, c) < 0)
1926 BUG();
1927
1928 return nlmsg_multicast(xfrm_nl, skb, 0, XFRMNLGRP_EXPIRE, GFP_ATOMIC);
1929 }
1930
1931 static int xfrm_aevent_state_notify(struct xfrm_state *x, struct km_event *c)
1932 {
1933 struct sk_buff *skb;
1934
1935 skb = nlmsg_new(xfrm_aevent_msgsize(), GFP_ATOMIC);
1936 if (skb == NULL)
1937 return -ENOMEM;
1938
1939 if (build_aevent(skb, x, c) < 0)
1940 BUG();
1941
1942 return nlmsg_multicast(xfrm_nl, skb, 0, XFRMNLGRP_AEVENTS, GFP_ATOMIC);
1943 }
1944
1945 static int xfrm_notify_sa_flush(struct km_event *c)
1946 {
1947 struct xfrm_usersa_flush *p;
1948 struct nlmsghdr *nlh;
1949 struct sk_buff *skb;
1950 int len = NLMSG_ALIGN(sizeof(struct xfrm_usersa_flush));
1951
1952 skb = nlmsg_new(len, GFP_ATOMIC);
1953 if (skb == NULL)
1954 return -ENOMEM;
1955
1956 nlh = nlmsg_put(skb, c->pid, c->seq, XFRM_MSG_FLUSHSA, sizeof(*p), 0);
1957 if (nlh == NULL) {
1958 kfree_skb(skb);
1959 return -EMSGSIZE;
1960 }
1961
1962 p = nlmsg_data(nlh);
1963 p->proto = c->data.proto;
1964
1965 nlmsg_end(skb, nlh);
1966
1967 return nlmsg_multicast(xfrm_nl, skb, 0, XFRMNLGRP_SA, GFP_ATOMIC);
1968 }
1969
1970 static inline size_t xfrm_sa_len(struct xfrm_state *x)
1971 {
1972 size_t l = 0;
1973 if (x->aalg)
1974 l += nla_total_size(alg_len(x->aalg));
1975 if (x->ealg)
1976 l += nla_total_size(alg_len(x->ealg));
1977 if (x->calg)
1978 l += nla_total_size(sizeof(*x->calg));
1979 if (x->encap)
1980 l += nla_total_size(sizeof(*x->encap));
1981
1982 return l;
1983 }
1984
1985 static int xfrm_notify_sa(struct xfrm_state *x, struct km_event *c)
1986 {
1987 struct xfrm_usersa_info *p;
1988 struct xfrm_usersa_id *id;
1989 struct nlmsghdr *nlh;
1990 struct sk_buff *skb;
1991 int len = xfrm_sa_len(x);
1992 int headlen;
1993
1994 headlen = sizeof(*p);
1995 if (c->event == XFRM_MSG_DELSA) {
1996 len += nla_total_size(headlen);
1997 headlen = sizeof(*id);
1998 }
1999 len += NLMSG_ALIGN(headlen);
2000
2001 skb = nlmsg_new(len, GFP_ATOMIC);
2002 if (skb == NULL)
2003 return -ENOMEM;
2004
2005 nlh = nlmsg_put(skb, c->pid, c->seq, c->event, headlen, 0);
2006 if (nlh == NULL)
2007 goto nla_put_failure;
2008
2009 p = nlmsg_data(nlh);
2010 if (c->event == XFRM_MSG_DELSA) {
2011 struct nlattr *attr;
2012
2013 id = nlmsg_data(nlh);
2014 memcpy(&id->daddr, &x->id.daddr, sizeof(id->daddr));
2015 id->spi = x->id.spi;
2016 id->family = x->props.family;
2017 id->proto = x->id.proto;
2018
2019 attr = nla_reserve(skb, XFRMA_SA, sizeof(*p));
2020 if (attr == NULL)
2021 goto nla_put_failure;
2022
2023 p = nla_data(attr);
2024 }
2025
2026 copy_to_user_state(x, p);
2027
2028 if (x->aalg)
2029 NLA_PUT(skb, XFRMA_ALG_AUTH, alg_len(x->aalg), x->aalg);
2030 if (x->ealg)
2031 NLA_PUT(skb, XFRMA_ALG_CRYPT, alg_len(x->ealg), x->ealg);
2032 if (x->calg)
2033 NLA_PUT(skb, XFRMA_ALG_COMP, sizeof(*(x->calg)), x->calg);
2034
2035 if (x->encap)
2036 NLA_PUT(skb, XFRMA_ENCAP, sizeof(*x->encap), x->encap);
2037
2038 nlmsg_end(skb, nlh);
2039
2040 return nlmsg_multicast(xfrm_nl, skb, 0, XFRMNLGRP_SA, GFP_ATOMIC);
2041
2042 nla_put_failure:
2043 kfree_skb(skb);
2044 return -1;
2045 }
2046
2047 static int xfrm_send_state_notify(struct xfrm_state *x, struct km_event *c)
2048 {
2049
2050 switch (c->event) {
2051 case XFRM_MSG_EXPIRE:
2052 return xfrm_exp_state_notify(x, c);
2053 case XFRM_MSG_NEWAE:
2054 return xfrm_aevent_state_notify(x, c);
2055 case XFRM_MSG_DELSA:
2056 case XFRM_MSG_UPDSA:
2057 case XFRM_MSG_NEWSA:
2058 return xfrm_notify_sa(x, c);
2059 case XFRM_MSG_FLUSHSA:
2060 return xfrm_notify_sa_flush(c);
2061 default:
2062 printk("xfrm_user: Unknown SA event %d\n", c->event);
2063 break;
2064 }
2065
2066 return 0;
2067
2068 }
2069
2070 static inline size_t xfrm_acquire_msgsize(struct xfrm_state *x,
2071 struct xfrm_policy *xp)
2072 {
2073 return NLMSG_ALIGN(sizeof(struct xfrm_user_acquire))
2074 + nla_total_size(sizeof(struct xfrm_user_tmpl) * xp->xfrm_nr)
2075 + nla_total_size(xfrm_user_sec_ctx_size(x->security))
2076 + userpolicy_type_attrsize();
2077 }
2078
2079 static int build_acquire(struct sk_buff *skb, struct xfrm_state *x,
2080 struct xfrm_tmpl *xt, struct xfrm_policy *xp,
2081 int dir)
2082 {
2083 struct xfrm_user_acquire *ua;
2084 struct nlmsghdr *nlh;
2085 __u32 seq = xfrm_get_acqseq();
2086
2087 nlh = nlmsg_put(skb, 0, 0, XFRM_MSG_ACQUIRE, sizeof(*ua), 0);
2088 if (nlh == NULL)
2089 return -EMSGSIZE;
2090
2091 ua = nlmsg_data(nlh);
2092 memcpy(&ua->id, &x->id, sizeof(ua->id));
2093 memcpy(&ua->saddr, &x->props.saddr, sizeof(ua->saddr));
2094 memcpy(&ua->sel, &x->sel, sizeof(ua->sel));
2095 copy_to_user_policy(xp, &ua->policy, dir);
2096 ua->aalgos = xt->aalgos;
2097 ua->ealgos = xt->ealgos;
2098 ua->calgos = xt->calgos;
2099 ua->seq = x->km.seq = seq;
2100
2101 if (copy_to_user_tmpl(xp, skb) < 0)
2102 goto nlmsg_failure;
2103 if (copy_to_user_state_sec_ctx(x, skb))
2104 goto nlmsg_failure;
2105 if (copy_to_user_policy_type(xp->type, skb) < 0)
2106 goto nlmsg_failure;
2107
2108 return nlmsg_end(skb, nlh);
2109
2110 nlmsg_failure:
2111 nlmsg_cancel(skb, nlh);
2112 return -EMSGSIZE;
2113 }
2114
2115 static int xfrm_send_acquire(struct xfrm_state *x, struct xfrm_tmpl *xt,
2116 struct xfrm_policy *xp, int dir)
2117 {
2118 struct sk_buff *skb;
2119
2120 skb = nlmsg_new(xfrm_acquire_msgsize(x, xp), GFP_ATOMIC);
2121 if (skb == NULL)
2122 return -ENOMEM;
2123
2124 if (build_acquire(skb, x, xt, xp, dir) < 0)
2125 BUG();
2126
2127 return nlmsg_multicast(xfrm_nl, skb, 0, XFRMNLGRP_ACQUIRE, GFP_ATOMIC);
2128 }
2129
2130 /* User gives us xfrm_user_policy_info followed by an array of 0
2131 * or more templates.
2132 */
2133 static struct xfrm_policy *xfrm_compile_policy(struct sock *sk, int opt,
2134 u8 *data, int len, int *dir)
2135 {
2136 struct xfrm_userpolicy_info *p = (struct xfrm_userpolicy_info *)data;
2137 struct xfrm_user_tmpl *ut = (struct xfrm_user_tmpl *) (p + 1);
2138 struct xfrm_policy *xp;
2139 int nr;
2140
2141 switch (sk->sk_family) {
2142 case AF_INET:
2143 if (opt != IP_XFRM_POLICY) {
2144 *dir = -EOPNOTSUPP;
2145 return NULL;
2146 }
2147 break;
2148 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
2149 case AF_INET6:
2150 if (opt != IPV6_XFRM_POLICY) {
2151 *dir = -EOPNOTSUPP;
2152 return NULL;
2153 }
2154 break;
2155 #endif
2156 default:
2157 *dir = -EINVAL;
2158 return NULL;
2159 }
2160
2161 *dir = -EINVAL;
2162
2163 if (len < sizeof(*p) ||
2164 verify_newpolicy_info(p))
2165 return NULL;
2166
2167 nr = ((len - sizeof(*p)) / sizeof(*ut));
2168 if (validate_tmpl(nr, ut, p->sel.family))
2169 return NULL;
2170
2171 if (p->dir > XFRM_POLICY_OUT)
2172 return NULL;
2173
2174 xp = xfrm_policy_alloc(GFP_KERNEL);
2175 if (xp == NULL) {
2176 *dir = -ENOBUFS;
2177 return NULL;
2178 }
2179
2180 copy_from_user_policy(xp, p);
2181 xp->type = XFRM_POLICY_TYPE_MAIN;
2182 copy_templates(xp, ut, nr);
2183
2184 *dir = p->dir;
2185
2186 return xp;
2187 }
2188
2189 static inline size_t xfrm_polexpire_msgsize(struct xfrm_policy *xp)
2190 {
2191 return NLMSG_ALIGN(sizeof(struct xfrm_user_polexpire))
2192 + nla_total_size(sizeof(struct xfrm_user_tmpl) * xp->xfrm_nr)
2193 + nla_total_size(xfrm_user_sec_ctx_size(xp->security))
2194 + userpolicy_type_attrsize();
2195 }
2196
2197 static int build_polexpire(struct sk_buff *skb, struct xfrm_policy *xp,
2198 int dir, struct km_event *c)
2199 {
2200 struct xfrm_user_polexpire *upe;
2201 struct nlmsghdr *nlh;
2202 int hard = c->data.hard;
2203
2204 nlh = nlmsg_put(skb, c->pid, 0, XFRM_MSG_POLEXPIRE, sizeof(*upe), 0);
2205 if (nlh == NULL)
2206 return -EMSGSIZE;
2207
2208 upe = nlmsg_data(nlh);
2209 copy_to_user_policy(xp, &upe->pol, dir);
2210 if (copy_to_user_tmpl(xp, skb) < 0)
2211 goto nlmsg_failure;
2212 if (copy_to_user_sec_ctx(xp, skb))
2213 goto nlmsg_failure;
2214 if (copy_to_user_policy_type(xp->type, skb) < 0)
2215 goto nlmsg_failure;
2216 upe->hard = !!hard;
2217
2218 return nlmsg_end(skb, nlh);
2219
2220 nlmsg_failure:
2221 nlmsg_cancel(skb, nlh);
2222 return -EMSGSIZE;
2223 }
2224
2225 static int xfrm_exp_policy_notify(struct xfrm_policy *xp, int dir, struct km_event *c)
2226 {
2227 struct sk_buff *skb;
2228
2229 skb = nlmsg_new(xfrm_polexpire_msgsize(xp), GFP_ATOMIC);
2230 if (skb == NULL)
2231 return -ENOMEM;
2232
2233 if (build_polexpire(skb, xp, dir, c) < 0)
2234 BUG();
2235
2236 return nlmsg_multicast(xfrm_nl, skb, 0, XFRMNLGRP_EXPIRE, GFP_ATOMIC);
2237 }
2238
2239 static int xfrm_notify_policy(struct xfrm_policy *xp, int dir, struct km_event *c)
2240 {
2241 struct xfrm_userpolicy_info *p;
2242 struct xfrm_userpolicy_id *id;
2243 struct nlmsghdr *nlh;
2244 struct sk_buff *skb;
2245 int len = nla_total_size(sizeof(struct xfrm_user_tmpl) * xp->xfrm_nr);
2246 int headlen;
2247
2248 headlen = sizeof(*p);
2249 if (c->event == XFRM_MSG_DELPOLICY) {
2250 len += nla_total_size(headlen);
2251 headlen = sizeof(*id);
2252 }
2253 len += userpolicy_type_attrsize();
2254 len += NLMSG_ALIGN(headlen);
2255
2256 skb = nlmsg_new(len, GFP_ATOMIC);
2257 if (skb == NULL)
2258 return -ENOMEM;
2259
2260 nlh = nlmsg_put(skb, c->pid, c->seq, c->event, headlen, 0);
2261 if (nlh == NULL)
2262 goto nlmsg_failure;
2263
2264 p = nlmsg_data(nlh);
2265 if (c->event == XFRM_MSG_DELPOLICY) {
2266 struct nlattr *attr;
2267
2268 id = nlmsg_data(nlh);
2269 memset(id, 0, sizeof(*id));
2270 id->dir = dir;
2271 if (c->data.byid)
2272 id->index = xp->index;
2273 else
2274 memcpy(&id->sel, &xp->selector, sizeof(id->sel));
2275
2276 attr = nla_reserve(skb, XFRMA_POLICY, sizeof(*p));
2277 if (attr == NULL)
2278 goto nlmsg_failure;
2279
2280 p = nla_data(attr);
2281 }
2282
2283 copy_to_user_policy(xp, p, dir);
2284 if (copy_to_user_tmpl(xp, skb) < 0)
2285 goto nlmsg_failure;
2286 if (copy_to_user_policy_type(xp->type, skb) < 0)
2287 goto nlmsg_failure;
2288
2289 nlmsg_end(skb, nlh);
2290
2291 return nlmsg_multicast(xfrm_nl, skb, 0, XFRMNLGRP_POLICY, GFP_ATOMIC);
2292
2293 nlmsg_failure:
2294 kfree_skb(skb);
2295 return -1;
2296 }
2297
2298 static int xfrm_notify_policy_flush(struct km_event *c)
2299 {
2300 struct nlmsghdr *nlh;
2301 struct sk_buff *skb;
2302
2303 skb = nlmsg_new(userpolicy_type_attrsize(), GFP_ATOMIC);
2304 if (skb == NULL)
2305 return -ENOMEM;
2306
2307 nlh = nlmsg_put(skb, c->pid, c->seq, XFRM_MSG_FLUSHPOLICY, 0, 0);
2308 if (nlh == NULL)
2309 goto nlmsg_failure;
2310 if (copy_to_user_policy_type(c->data.type, skb) < 0)
2311 goto nlmsg_failure;
2312
2313 nlmsg_end(skb, nlh);
2314
2315 return nlmsg_multicast(xfrm_nl, skb, 0, XFRMNLGRP_POLICY, GFP_ATOMIC);
2316
2317 nlmsg_failure:
2318 kfree_skb(skb);
2319 return -1;
2320 }
2321
2322 static int xfrm_send_policy_notify(struct xfrm_policy *xp, int dir, struct km_event *c)
2323 {
2324
2325 switch (c->event) {
2326 case XFRM_MSG_NEWPOLICY:
2327 case XFRM_MSG_UPDPOLICY:
2328 case XFRM_MSG_DELPOLICY:
2329 return xfrm_notify_policy(xp, dir, c);
2330 case XFRM_MSG_FLUSHPOLICY:
2331 return xfrm_notify_policy_flush(c);
2332 case XFRM_MSG_POLEXPIRE:
2333 return xfrm_exp_policy_notify(xp, dir, c);
2334 default:
2335 printk("xfrm_user: Unknown Policy event %d\n", c->event);
2336 }
2337
2338 return 0;
2339
2340 }
2341
2342 static inline size_t xfrm_report_msgsize(void)
2343 {
2344 return NLMSG_ALIGN(sizeof(struct xfrm_user_report));
2345 }
2346
2347 static int build_report(struct sk_buff *skb, u8 proto,
2348 struct xfrm_selector *sel, xfrm_address_t *addr)
2349 {
2350 struct xfrm_user_report *ur;
2351 struct nlmsghdr *nlh;
2352
2353 nlh = nlmsg_put(skb, 0, 0, XFRM_MSG_REPORT, sizeof(*ur), 0);
2354 if (nlh == NULL)
2355 return -EMSGSIZE;
2356
2357 ur = nlmsg_data(nlh);
2358 ur->proto = proto;
2359 memcpy(&ur->sel, sel, sizeof(ur->sel));
2360
2361 if (addr)
2362 NLA_PUT(skb, XFRMA_COADDR, sizeof(*addr), addr);
2363
2364 return nlmsg_end(skb, nlh);
2365
2366 nla_put_failure:
2367 nlmsg_cancel(skb, nlh);
2368 return -EMSGSIZE;
2369 }
2370
2371 static int xfrm_send_report(u8 proto, struct xfrm_selector *sel,
2372 xfrm_address_t *addr)
2373 {
2374 struct sk_buff *skb;
2375
2376 skb = nlmsg_new(xfrm_report_msgsize(), GFP_ATOMIC);
2377 if (skb == NULL)
2378 return -ENOMEM;
2379
2380 if (build_report(skb, proto, sel, addr) < 0)
2381 BUG();
2382
2383 return nlmsg_multicast(xfrm_nl, skb, 0, XFRMNLGRP_REPORT, GFP_ATOMIC);
2384 }
2385
2386 static struct xfrm_mgr netlink_mgr = {
2387 .id = "netlink",
2388 .notify = xfrm_send_state_notify,
2389 .acquire = xfrm_send_acquire,
2390 .compile_policy = xfrm_compile_policy,
2391 .notify_policy = xfrm_send_policy_notify,
2392 .report = xfrm_send_report,
2393 .migrate = xfrm_send_migrate,
2394 };
2395
2396 static int __init xfrm_user_init(void)
2397 {
2398 struct sock *nlsk;
2399
2400 printk(KERN_INFO "Initializing XFRM netlink socket\n");
2401
2402 nlsk = netlink_kernel_create(NETLINK_XFRM, XFRMNLGRP_MAX,
2403 xfrm_netlink_rcv, NULL, THIS_MODULE);
2404 if (nlsk == NULL)
2405 return -ENOMEM;
2406 rcu_assign_pointer(xfrm_nl, nlsk);
2407
2408 xfrm_register_km(&netlink_mgr);
2409
2410 return 0;
2411 }
2412
2413 static void __exit xfrm_user_exit(void)
2414 {
2415 struct sock *nlsk = xfrm_nl;
2416
2417 xfrm_unregister_km(&netlink_mgr);
2418 rcu_assign_pointer(xfrm_nl, NULL);
2419 synchronize_rcu();
2420 sock_release(nlsk->sk_socket);
2421 }
2422
2423 module_init(xfrm_user_init);
2424 module_exit(xfrm_user_exit);
2425 MODULE_LICENSE("GPL");
2426 MODULE_ALIAS_NET_PF_PROTO(PF_NETLINK, NETLINK_XFRM);
2427
This page took 0.112383 seconds and 5 git commands to generate.