Merge tag 'pci-v3.15-changes' of git://git.kernel.org/pub/scm/linux/kernel/git/helgaa...
[deliverable/linux.git] / net / xfrm / xfrm_user.c
1 /* xfrm_user.c: User interface to configure xfrm engine.
2 *
3 * Copyright (C) 2002 David S. Miller (davem@redhat.com)
4 *
5 * Changes:
6 * Mitsuru KANDA @USAGI
7 * Kazunori MIYAZAWA @USAGI
8 * Kunihiro Ishiguro <kunihiro@ipinfusion.com>
9 * IPv6 support
10 *
11 */
12
13 #include <linux/crypto.h>
14 #include <linux/module.h>
15 #include <linux/kernel.h>
16 #include <linux/types.h>
17 #include <linux/slab.h>
18 #include <linux/socket.h>
19 #include <linux/string.h>
20 #include <linux/net.h>
21 #include <linux/skbuff.h>
22 #include <linux/pfkeyv2.h>
23 #include <linux/ipsec.h>
24 #include <linux/init.h>
25 #include <linux/security.h>
26 #include <net/sock.h>
27 #include <net/xfrm.h>
28 #include <net/netlink.h>
29 #include <net/ah.h>
30 #include <asm/uaccess.h>
31 #if IS_ENABLED(CONFIG_IPV6)
32 #include <linux/in6.h>
33 #endif
34
35 static int verify_one_alg(struct nlattr **attrs, enum xfrm_attr_type_t type)
36 {
37 struct nlattr *rt = attrs[type];
38 struct xfrm_algo *algp;
39
40 if (!rt)
41 return 0;
42
43 algp = nla_data(rt);
44 if (nla_len(rt) < xfrm_alg_len(algp))
45 return -EINVAL;
46
47 switch (type) {
48 case XFRMA_ALG_AUTH:
49 case XFRMA_ALG_CRYPT:
50 case XFRMA_ALG_COMP:
51 break;
52
53 default:
54 return -EINVAL;
55 }
56
57 algp->alg_name[CRYPTO_MAX_ALG_NAME - 1] = '\0';
58 return 0;
59 }
60
61 static int verify_auth_trunc(struct nlattr **attrs)
62 {
63 struct nlattr *rt = attrs[XFRMA_ALG_AUTH_TRUNC];
64 struct xfrm_algo_auth *algp;
65
66 if (!rt)
67 return 0;
68
69 algp = nla_data(rt);
70 if (nla_len(rt) < xfrm_alg_auth_len(algp))
71 return -EINVAL;
72
73 algp->alg_name[CRYPTO_MAX_ALG_NAME - 1] = '\0';
74 return 0;
75 }
76
77 static int verify_aead(struct nlattr **attrs)
78 {
79 struct nlattr *rt = attrs[XFRMA_ALG_AEAD];
80 struct xfrm_algo_aead *algp;
81
82 if (!rt)
83 return 0;
84
85 algp = nla_data(rt);
86 if (nla_len(rt) < aead_len(algp))
87 return -EINVAL;
88
89 algp->alg_name[CRYPTO_MAX_ALG_NAME - 1] = '\0';
90 return 0;
91 }
92
93 static void verify_one_addr(struct nlattr **attrs, enum xfrm_attr_type_t type,
94 xfrm_address_t **addrp)
95 {
96 struct nlattr *rt = attrs[type];
97
98 if (rt && addrp)
99 *addrp = nla_data(rt);
100 }
101
102 static inline int verify_sec_ctx_len(struct nlattr **attrs)
103 {
104 struct nlattr *rt = attrs[XFRMA_SEC_CTX];
105 struct xfrm_user_sec_ctx *uctx;
106
107 if (!rt)
108 return 0;
109
110 uctx = nla_data(rt);
111 if (uctx->len != (sizeof(struct xfrm_user_sec_ctx) + uctx->ctx_len))
112 return -EINVAL;
113
114 return 0;
115 }
116
117 static inline int verify_replay(struct xfrm_usersa_info *p,
118 struct nlattr **attrs)
119 {
120 struct nlattr *rt = attrs[XFRMA_REPLAY_ESN_VAL];
121 struct xfrm_replay_state_esn *rs;
122
123 if (p->flags & XFRM_STATE_ESN) {
124 if (!rt)
125 return -EINVAL;
126
127 rs = nla_data(rt);
128
129 if (rs->bmp_len > XFRMA_REPLAY_ESN_MAX / sizeof(rs->bmp[0]) / 8)
130 return -EINVAL;
131
132 if (nla_len(rt) < xfrm_replay_state_esn_len(rs) &&
133 nla_len(rt) != sizeof(*rs))
134 return -EINVAL;
135 }
136
137 if (!rt)
138 return 0;
139
140 if (p->id.proto != IPPROTO_ESP)
141 return -EINVAL;
142
143 if (p->replay_window != 0)
144 return -EINVAL;
145
146 return 0;
147 }
148
149 static int verify_newsa_info(struct xfrm_usersa_info *p,
150 struct nlattr **attrs)
151 {
152 int err;
153
154 err = -EINVAL;
155 switch (p->family) {
156 case AF_INET:
157 break;
158
159 case AF_INET6:
160 #if IS_ENABLED(CONFIG_IPV6)
161 break;
162 #else
163 err = -EAFNOSUPPORT;
164 goto out;
165 #endif
166
167 default:
168 goto out;
169 }
170
171 err = -EINVAL;
172 switch (p->id.proto) {
173 case IPPROTO_AH:
174 if ((!attrs[XFRMA_ALG_AUTH] &&
175 !attrs[XFRMA_ALG_AUTH_TRUNC]) ||
176 attrs[XFRMA_ALG_AEAD] ||
177 attrs[XFRMA_ALG_CRYPT] ||
178 attrs[XFRMA_ALG_COMP] ||
179 attrs[XFRMA_TFCPAD] ||
180 (ntohl(p->id.spi) >= 0x10000))
181
182 goto out;
183 break;
184
185 case IPPROTO_ESP:
186 if (attrs[XFRMA_ALG_COMP])
187 goto out;
188 if (!attrs[XFRMA_ALG_AUTH] &&
189 !attrs[XFRMA_ALG_AUTH_TRUNC] &&
190 !attrs[XFRMA_ALG_CRYPT] &&
191 !attrs[XFRMA_ALG_AEAD])
192 goto out;
193 if ((attrs[XFRMA_ALG_AUTH] ||
194 attrs[XFRMA_ALG_AUTH_TRUNC] ||
195 attrs[XFRMA_ALG_CRYPT]) &&
196 attrs[XFRMA_ALG_AEAD])
197 goto out;
198 if (attrs[XFRMA_TFCPAD] &&
199 p->mode != XFRM_MODE_TUNNEL)
200 goto out;
201 break;
202
203 case IPPROTO_COMP:
204 if (!attrs[XFRMA_ALG_COMP] ||
205 attrs[XFRMA_ALG_AEAD] ||
206 attrs[XFRMA_ALG_AUTH] ||
207 attrs[XFRMA_ALG_AUTH_TRUNC] ||
208 attrs[XFRMA_ALG_CRYPT] ||
209 attrs[XFRMA_TFCPAD])
210 goto out;
211 break;
212
213 #if IS_ENABLED(CONFIG_IPV6)
214 case IPPROTO_DSTOPTS:
215 case IPPROTO_ROUTING:
216 if (attrs[XFRMA_ALG_COMP] ||
217 attrs[XFRMA_ALG_AUTH] ||
218 attrs[XFRMA_ALG_AUTH_TRUNC] ||
219 attrs[XFRMA_ALG_AEAD] ||
220 attrs[XFRMA_ALG_CRYPT] ||
221 attrs[XFRMA_ENCAP] ||
222 attrs[XFRMA_SEC_CTX] ||
223 attrs[XFRMA_TFCPAD] ||
224 !attrs[XFRMA_COADDR])
225 goto out;
226 break;
227 #endif
228
229 default:
230 goto out;
231 }
232
233 if ((err = verify_aead(attrs)))
234 goto out;
235 if ((err = verify_auth_trunc(attrs)))
236 goto out;
237 if ((err = verify_one_alg(attrs, XFRMA_ALG_AUTH)))
238 goto out;
239 if ((err = verify_one_alg(attrs, XFRMA_ALG_CRYPT)))
240 goto out;
241 if ((err = verify_one_alg(attrs, XFRMA_ALG_COMP)))
242 goto out;
243 if ((err = verify_sec_ctx_len(attrs)))
244 goto out;
245 if ((err = verify_replay(p, attrs)))
246 goto out;
247
248 err = -EINVAL;
249 switch (p->mode) {
250 case XFRM_MODE_TRANSPORT:
251 case XFRM_MODE_TUNNEL:
252 case XFRM_MODE_ROUTEOPTIMIZATION:
253 case XFRM_MODE_BEET:
254 break;
255
256 default:
257 goto out;
258 }
259
260 err = 0;
261
262 out:
263 return err;
264 }
265
266 static int attach_one_algo(struct xfrm_algo **algpp, u8 *props,
267 struct xfrm_algo_desc *(*get_byname)(const char *, int),
268 struct nlattr *rta)
269 {
270 struct xfrm_algo *p, *ualg;
271 struct xfrm_algo_desc *algo;
272
273 if (!rta)
274 return 0;
275
276 ualg = nla_data(rta);
277
278 algo = get_byname(ualg->alg_name, 1);
279 if (!algo)
280 return -ENOSYS;
281 *props = algo->desc.sadb_alg_id;
282
283 p = kmemdup(ualg, xfrm_alg_len(ualg), GFP_KERNEL);
284 if (!p)
285 return -ENOMEM;
286
287 strcpy(p->alg_name, algo->name);
288 *algpp = p;
289 return 0;
290 }
291
292 static int attach_auth(struct xfrm_algo_auth **algpp, u8 *props,
293 struct nlattr *rta)
294 {
295 struct xfrm_algo *ualg;
296 struct xfrm_algo_auth *p;
297 struct xfrm_algo_desc *algo;
298
299 if (!rta)
300 return 0;
301
302 ualg = nla_data(rta);
303
304 algo = xfrm_aalg_get_byname(ualg->alg_name, 1);
305 if (!algo)
306 return -ENOSYS;
307 *props = algo->desc.sadb_alg_id;
308
309 p = kmalloc(sizeof(*p) + (ualg->alg_key_len + 7) / 8, GFP_KERNEL);
310 if (!p)
311 return -ENOMEM;
312
313 strcpy(p->alg_name, algo->name);
314 p->alg_key_len = ualg->alg_key_len;
315 p->alg_trunc_len = algo->uinfo.auth.icv_truncbits;
316 memcpy(p->alg_key, ualg->alg_key, (ualg->alg_key_len + 7) / 8);
317
318 *algpp = p;
319 return 0;
320 }
321
322 static int attach_auth_trunc(struct xfrm_algo_auth **algpp, u8 *props,
323 struct nlattr *rta)
324 {
325 struct xfrm_algo_auth *p, *ualg;
326 struct xfrm_algo_desc *algo;
327
328 if (!rta)
329 return 0;
330
331 ualg = nla_data(rta);
332
333 algo = xfrm_aalg_get_byname(ualg->alg_name, 1);
334 if (!algo)
335 return -ENOSYS;
336 if ((ualg->alg_trunc_len / 8) > MAX_AH_AUTH_LEN ||
337 ualg->alg_trunc_len > algo->uinfo.auth.icv_fullbits)
338 return -EINVAL;
339 *props = algo->desc.sadb_alg_id;
340
341 p = kmemdup(ualg, xfrm_alg_auth_len(ualg), GFP_KERNEL);
342 if (!p)
343 return -ENOMEM;
344
345 strcpy(p->alg_name, algo->name);
346 if (!p->alg_trunc_len)
347 p->alg_trunc_len = algo->uinfo.auth.icv_truncbits;
348
349 *algpp = p;
350 return 0;
351 }
352
353 static int attach_aead(struct xfrm_algo_aead **algpp, u8 *props,
354 struct nlattr *rta)
355 {
356 struct xfrm_algo_aead *p, *ualg;
357 struct xfrm_algo_desc *algo;
358
359 if (!rta)
360 return 0;
361
362 ualg = nla_data(rta);
363
364 algo = xfrm_aead_get_byname(ualg->alg_name, ualg->alg_icv_len, 1);
365 if (!algo)
366 return -ENOSYS;
367 *props = algo->desc.sadb_alg_id;
368
369 p = kmemdup(ualg, aead_len(ualg), GFP_KERNEL);
370 if (!p)
371 return -ENOMEM;
372
373 strcpy(p->alg_name, algo->name);
374 *algpp = p;
375 return 0;
376 }
377
378 static inline int xfrm_replay_verify_len(struct xfrm_replay_state_esn *replay_esn,
379 struct nlattr *rp)
380 {
381 struct xfrm_replay_state_esn *up;
382 int ulen;
383
384 if (!replay_esn || !rp)
385 return 0;
386
387 up = nla_data(rp);
388 ulen = xfrm_replay_state_esn_len(up);
389
390 if (nla_len(rp) < ulen || xfrm_replay_state_esn_len(replay_esn) != ulen)
391 return -EINVAL;
392
393 return 0;
394 }
395
396 static int xfrm_alloc_replay_state_esn(struct xfrm_replay_state_esn **replay_esn,
397 struct xfrm_replay_state_esn **preplay_esn,
398 struct nlattr *rta)
399 {
400 struct xfrm_replay_state_esn *p, *pp, *up;
401 int klen, ulen;
402
403 if (!rta)
404 return 0;
405
406 up = nla_data(rta);
407 klen = xfrm_replay_state_esn_len(up);
408 ulen = nla_len(rta) >= klen ? klen : sizeof(*up);
409
410 p = kzalloc(klen, GFP_KERNEL);
411 if (!p)
412 return -ENOMEM;
413
414 pp = kzalloc(klen, GFP_KERNEL);
415 if (!pp) {
416 kfree(p);
417 return -ENOMEM;
418 }
419
420 memcpy(p, up, ulen);
421 memcpy(pp, up, ulen);
422
423 *replay_esn = p;
424 *preplay_esn = pp;
425
426 return 0;
427 }
428
429 static inline int xfrm_user_sec_ctx_size(struct xfrm_sec_ctx *xfrm_ctx)
430 {
431 int len = 0;
432
433 if (xfrm_ctx) {
434 len += sizeof(struct xfrm_user_sec_ctx);
435 len += xfrm_ctx->ctx_len;
436 }
437 return len;
438 }
439
440 static void copy_from_user_state(struct xfrm_state *x, struct xfrm_usersa_info *p)
441 {
442 memcpy(&x->id, &p->id, sizeof(x->id));
443 memcpy(&x->sel, &p->sel, sizeof(x->sel));
444 memcpy(&x->lft, &p->lft, sizeof(x->lft));
445 x->props.mode = p->mode;
446 x->props.replay_window = min_t(unsigned int, p->replay_window,
447 sizeof(x->replay.bitmap) * 8);
448 x->props.reqid = p->reqid;
449 x->props.family = p->family;
450 memcpy(&x->props.saddr, &p->saddr, sizeof(x->props.saddr));
451 x->props.flags = p->flags;
452
453 if (!x->sel.family && !(p->flags & XFRM_STATE_AF_UNSPEC))
454 x->sel.family = p->family;
455 }
456
457 /*
458 * someday when pfkey also has support, we could have the code
459 * somehow made shareable and move it to xfrm_state.c - JHS
460 *
461 */
462 static void xfrm_update_ae_params(struct xfrm_state *x, struct nlattr **attrs,
463 int update_esn)
464 {
465 struct nlattr *rp = attrs[XFRMA_REPLAY_VAL];
466 struct nlattr *re = update_esn ? attrs[XFRMA_REPLAY_ESN_VAL] : NULL;
467 struct nlattr *lt = attrs[XFRMA_LTIME_VAL];
468 struct nlattr *et = attrs[XFRMA_ETIMER_THRESH];
469 struct nlattr *rt = attrs[XFRMA_REPLAY_THRESH];
470
471 if (re) {
472 struct xfrm_replay_state_esn *replay_esn;
473 replay_esn = nla_data(re);
474 memcpy(x->replay_esn, replay_esn,
475 xfrm_replay_state_esn_len(replay_esn));
476 memcpy(x->preplay_esn, replay_esn,
477 xfrm_replay_state_esn_len(replay_esn));
478 }
479
480 if (rp) {
481 struct xfrm_replay_state *replay;
482 replay = nla_data(rp);
483 memcpy(&x->replay, replay, sizeof(*replay));
484 memcpy(&x->preplay, replay, sizeof(*replay));
485 }
486
487 if (lt) {
488 struct xfrm_lifetime_cur *ltime;
489 ltime = nla_data(lt);
490 x->curlft.bytes = ltime->bytes;
491 x->curlft.packets = ltime->packets;
492 x->curlft.add_time = ltime->add_time;
493 x->curlft.use_time = ltime->use_time;
494 }
495
496 if (et)
497 x->replay_maxage = nla_get_u32(et);
498
499 if (rt)
500 x->replay_maxdiff = nla_get_u32(rt);
501 }
502
503 static struct xfrm_state *xfrm_state_construct(struct net *net,
504 struct xfrm_usersa_info *p,
505 struct nlattr **attrs,
506 int *errp)
507 {
508 struct xfrm_state *x = xfrm_state_alloc(net);
509 int err = -ENOMEM;
510
511 if (!x)
512 goto error_no_put;
513
514 copy_from_user_state(x, p);
515
516 if (attrs[XFRMA_SA_EXTRA_FLAGS])
517 x->props.extra_flags = nla_get_u32(attrs[XFRMA_SA_EXTRA_FLAGS]);
518
519 if ((err = attach_aead(&x->aead, &x->props.ealgo,
520 attrs[XFRMA_ALG_AEAD])))
521 goto error;
522 if ((err = attach_auth_trunc(&x->aalg, &x->props.aalgo,
523 attrs[XFRMA_ALG_AUTH_TRUNC])))
524 goto error;
525 if (!x->props.aalgo) {
526 if ((err = attach_auth(&x->aalg, &x->props.aalgo,
527 attrs[XFRMA_ALG_AUTH])))
528 goto error;
529 }
530 if ((err = attach_one_algo(&x->ealg, &x->props.ealgo,
531 xfrm_ealg_get_byname,
532 attrs[XFRMA_ALG_CRYPT])))
533 goto error;
534 if ((err = attach_one_algo(&x->calg, &x->props.calgo,
535 xfrm_calg_get_byname,
536 attrs[XFRMA_ALG_COMP])))
537 goto error;
538
539 if (attrs[XFRMA_ENCAP]) {
540 x->encap = kmemdup(nla_data(attrs[XFRMA_ENCAP]),
541 sizeof(*x->encap), GFP_KERNEL);
542 if (x->encap == NULL)
543 goto error;
544 }
545
546 if (attrs[XFRMA_TFCPAD])
547 x->tfcpad = nla_get_u32(attrs[XFRMA_TFCPAD]);
548
549 if (attrs[XFRMA_COADDR]) {
550 x->coaddr = kmemdup(nla_data(attrs[XFRMA_COADDR]),
551 sizeof(*x->coaddr), GFP_KERNEL);
552 if (x->coaddr == NULL)
553 goto error;
554 }
555
556 xfrm_mark_get(attrs, &x->mark);
557
558 err = __xfrm_init_state(x, false);
559 if (err)
560 goto error;
561
562 if (attrs[XFRMA_SEC_CTX] &&
563 security_xfrm_state_alloc(x, nla_data(attrs[XFRMA_SEC_CTX])))
564 goto error;
565
566 if ((err = xfrm_alloc_replay_state_esn(&x->replay_esn, &x->preplay_esn,
567 attrs[XFRMA_REPLAY_ESN_VAL])))
568 goto error;
569
570 x->km.seq = p->seq;
571 x->replay_maxdiff = net->xfrm.sysctl_aevent_rseqth;
572 /* sysctl_xfrm_aevent_etime is in 100ms units */
573 x->replay_maxage = (net->xfrm.sysctl_aevent_etime*HZ)/XFRM_AE_ETH_M;
574
575 if ((err = xfrm_init_replay(x)))
576 goto error;
577
578 /* override default values from above */
579 xfrm_update_ae_params(x, attrs, 0);
580
581 return x;
582
583 error:
584 x->km.state = XFRM_STATE_DEAD;
585 xfrm_state_put(x);
586 error_no_put:
587 *errp = err;
588 return NULL;
589 }
590
591 static int xfrm_add_sa(struct sk_buff *skb, struct nlmsghdr *nlh,
592 struct nlattr **attrs)
593 {
594 struct net *net = sock_net(skb->sk);
595 struct xfrm_usersa_info *p = nlmsg_data(nlh);
596 struct xfrm_state *x;
597 int err;
598 struct km_event c;
599 kuid_t loginuid = audit_get_loginuid(current);
600 unsigned int sessionid = audit_get_sessionid(current);
601 u32 sid;
602
603 err = verify_newsa_info(p, attrs);
604 if (err)
605 return err;
606
607 x = xfrm_state_construct(net, p, attrs, &err);
608 if (!x)
609 return err;
610
611 xfrm_state_hold(x);
612 if (nlh->nlmsg_type == XFRM_MSG_NEWSA)
613 err = xfrm_state_add(x);
614 else
615 err = xfrm_state_update(x);
616
617 security_task_getsecid(current, &sid);
618 xfrm_audit_state_add(x, err ? 0 : 1, loginuid, sessionid, sid);
619
620 if (err < 0) {
621 x->km.state = XFRM_STATE_DEAD;
622 __xfrm_state_put(x);
623 goto out;
624 }
625
626 c.seq = nlh->nlmsg_seq;
627 c.portid = nlh->nlmsg_pid;
628 c.event = nlh->nlmsg_type;
629
630 km_state_notify(x, &c);
631 out:
632 xfrm_state_put(x);
633 return err;
634 }
635
636 static struct xfrm_state *xfrm_user_state_lookup(struct net *net,
637 struct xfrm_usersa_id *p,
638 struct nlattr **attrs,
639 int *errp)
640 {
641 struct xfrm_state *x = NULL;
642 struct xfrm_mark m;
643 int err;
644 u32 mark = xfrm_mark_get(attrs, &m);
645
646 if (xfrm_id_proto_match(p->proto, IPSEC_PROTO_ANY)) {
647 err = -ESRCH;
648 x = xfrm_state_lookup(net, mark, &p->daddr, p->spi, p->proto, p->family);
649 } else {
650 xfrm_address_t *saddr = NULL;
651
652 verify_one_addr(attrs, XFRMA_SRCADDR, &saddr);
653 if (!saddr) {
654 err = -EINVAL;
655 goto out;
656 }
657
658 err = -ESRCH;
659 x = xfrm_state_lookup_byaddr(net, mark,
660 &p->daddr, saddr,
661 p->proto, p->family);
662 }
663
664 out:
665 if (!x && errp)
666 *errp = err;
667 return x;
668 }
669
670 static int xfrm_del_sa(struct sk_buff *skb, struct nlmsghdr *nlh,
671 struct nlattr **attrs)
672 {
673 struct net *net = sock_net(skb->sk);
674 struct xfrm_state *x;
675 int err = -ESRCH;
676 struct km_event c;
677 struct xfrm_usersa_id *p = nlmsg_data(nlh);
678 kuid_t loginuid = audit_get_loginuid(current);
679 unsigned int sessionid = audit_get_sessionid(current);
680 u32 sid;
681
682 x = xfrm_user_state_lookup(net, p, attrs, &err);
683 if (x == NULL)
684 return err;
685
686 if ((err = security_xfrm_state_delete(x)) != 0)
687 goto out;
688
689 if (xfrm_state_kern(x)) {
690 err = -EPERM;
691 goto out;
692 }
693
694 err = xfrm_state_delete(x);
695
696 if (err < 0)
697 goto out;
698
699 c.seq = nlh->nlmsg_seq;
700 c.portid = nlh->nlmsg_pid;
701 c.event = nlh->nlmsg_type;
702 km_state_notify(x, &c);
703
704 out:
705 security_task_getsecid(current, &sid);
706 xfrm_audit_state_delete(x, err ? 0 : 1, loginuid, sessionid, sid);
707 xfrm_state_put(x);
708 return err;
709 }
710
711 static void copy_to_user_state(struct xfrm_state *x, struct xfrm_usersa_info *p)
712 {
713 memset(p, 0, sizeof(*p));
714 memcpy(&p->id, &x->id, sizeof(p->id));
715 memcpy(&p->sel, &x->sel, sizeof(p->sel));
716 memcpy(&p->lft, &x->lft, sizeof(p->lft));
717 memcpy(&p->curlft, &x->curlft, sizeof(p->curlft));
718 memcpy(&p->stats, &x->stats, sizeof(p->stats));
719 memcpy(&p->saddr, &x->props.saddr, sizeof(p->saddr));
720 p->mode = x->props.mode;
721 p->replay_window = x->props.replay_window;
722 p->reqid = x->props.reqid;
723 p->family = x->props.family;
724 p->flags = x->props.flags;
725 p->seq = x->km.seq;
726 }
727
728 struct xfrm_dump_info {
729 struct sk_buff *in_skb;
730 struct sk_buff *out_skb;
731 u32 nlmsg_seq;
732 u16 nlmsg_flags;
733 };
734
735 static int copy_sec_ctx(struct xfrm_sec_ctx *s, struct sk_buff *skb)
736 {
737 struct xfrm_user_sec_ctx *uctx;
738 struct nlattr *attr;
739 int ctx_size = sizeof(*uctx) + s->ctx_len;
740
741 attr = nla_reserve(skb, XFRMA_SEC_CTX, ctx_size);
742 if (attr == NULL)
743 return -EMSGSIZE;
744
745 uctx = nla_data(attr);
746 uctx->exttype = XFRMA_SEC_CTX;
747 uctx->len = ctx_size;
748 uctx->ctx_doi = s->ctx_doi;
749 uctx->ctx_alg = s->ctx_alg;
750 uctx->ctx_len = s->ctx_len;
751 memcpy(uctx + 1, s->ctx_str, s->ctx_len);
752
753 return 0;
754 }
755
756 static int copy_to_user_auth(struct xfrm_algo_auth *auth, struct sk_buff *skb)
757 {
758 struct xfrm_algo *algo;
759 struct nlattr *nla;
760
761 nla = nla_reserve(skb, XFRMA_ALG_AUTH,
762 sizeof(*algo) + (auth->alg_key_len + 7) / 8);
763 if (!nla)
764 return -EMSGSIZE;
765
766 algo = nla_data(nla);
767 strncpy(algo->alg_name, auth->alg_name, sizeof(algo->alg_name));
768 memcpy(algo->alg_key, auth->alg_key, (auth->alg_key_len + 7) / 8);
769 algo->alg_key_len = auth->alg_key_len;
770
771 return 0;
772 }
773
774 /* Don't change this without updating xfrm_sa_len! */
775 static int copy_to_user_state_extra(struct xfrm_state *x,
776 struct xfrm_usersa_info *p,
777 struct sk_buff *skb)
778 {
779 int ret = 0;
780
781 copy_to_user_state(x, p);
782
783 if (x->props.extra_flags) {
784 ret = nla_put_u32(skb, XFRMA_SA_EXTRA_FLAGS,
785 x->props.extra_flags);
786 if (ret)
787 goto out;
788 }
789
790 if (x->coaddr) {
791 ret = nla_put(skb, XFRMA_COADDR, sizeof(*x->coaddr), x->coaddr);
792 if (ret)
793 goto out;
794 }
795 if (x->lastused) {
796 ret = nla_put_u64(skb, XFRMA_LASTUSED, x->lastused);
797 if (ret)
798 goto out;
799 }
800 if (x->aead) {
801 ret = nla_put(skb, XFRMA_ALG_AEAD, aead_len(x->aead), x->aead);
802 if (ret)
803 goto out;
804 }
805 if (x->aalg) {
806 ret = copy_to_user_auth(x->aalg, skb);
807 if (!ret)
808 ret = nla_put(skb, XFRMA_ALG_AUTH_TRUNC,
809 xfrm_alg_auth_len(x->aalg), x->aalg);
810 if (ret)
811 goto out;
812 }
813 if (x->ealg) {
814 ret = nla_put(skb, XFRMA_ALG_CRYPT, xfrm_alg_len(x->ealg), x->ealg);
815 if (ret)
816 goto out;
817 }
818 if (x->calg) {
819 ret = nla_put(skb, XFRMA_ALG_COMP, sizeof(*(x->calg)), x->calg);
820 if (ret)
821 goto out;
822 }
823 if (x->encap) {
824 ret = nla_put(skb, XFRMA_ENCAP, sizeof(*x->encap), x->encap);
825 if (ret)
826 goto out;
827 }
828 if (x->tfcpad) {
829 ret = nla_put_u32(skb, XFRMA_TFCPAD, x->tfcpad);
830 if (ret)
831 goto out;
832 }
833 ret = xfrm_mark_put(skb, &x->mark);
834 if (ret)
835 goto out;
836 if (x->replay_esn) {
837 ret = nla_put(skb, XFRMA_REPLAY_ESN_VAL,
838 xfrm_replay_state_esn_len(x->replay_esn),
839 x->replay_esn);
840 if (ret)
841 goto out;
842 }
843 if (x->security)
844 ret = copy_sec_ctx(x->security, skb);
845 out:
846 return ret;
847 }
848
849 static int dump_one_state(struct xfrm_state *x, int count, void *ptr)
850 {
851 struct xfrm_dump_info *sp = ptr;
852 struct sk_buff *in_skb = sp->in_skb;
853 struct sk_buff *skb = sp->out_skb;
854 struct xfrm_usersa_info *p;
855 struct nlmsghdr *nlh;
856 int err;
857
858 nlh = nlmsg_put(skb, NETLINK_CB(in_skb).portid, sp->nlmsg_seq,
859 XFRM_MSG_NEWSA, sizeof(*p), sp->nlmsg_flags);
860 if (nlh == NULL)
861 return -EMSGSIZE;
862
863 p = nlmsg_data(nlh);
864
865 err = copy_to_user_state_extra(x, p, skb);
866 if (err) {
867 nlmsg_cancel(skb, nlh);
868 return err;
869 }
870 nlmsg_end(skb, nlh);
871 return 0;
872 }
873
874 static int xfrm_dump_sa_done(struct netlink_callback *cb)
875 {
876 struct xfrm_state_walk *walk = (struct xfrm_state_walk *) &cb->args[1];
877 struct sock *sk = cb->skb->sk;
878 struct net *net = sock_net(sk);
879
880 xfrm_state_walk_done(walk, net);
881 return 0;
882 }
883
884 static int xfrm_dump_sa(struct sk_buff *skb, struct netlink_callback *cb)
885 {
886 struct net *net = sock_net(skb->sk);
887 struct xfrm_state_walk *walk = (struct xfrm_state_walk *) &cb->args[1];
888 struct xfrm_dump_info info;
889
890 BUILD_BUG_ON(sizeof(struct xfrm_state_walk) >
891 sizeof(cb->args) - sizeof(cb->args[0]));
892
893 info.in_skb = cb->skb;
894 info.out_skb = skb;
895 info.nlmsg_seq = cb->nlh->nlmsg_seq;
896 info.nlmsg_flags = NLM_F_MULTI;
897
898 if (!cb->args[0]) {
899 cb->args[0] = 1;
900 xfrm_state_walk_init(walk, 0);
901 }
902
903 (void) xfrm_state_walk(net, walk, dump_one_state, &info);
904
905 return skb->len;
906 }
907
908 static struct sk_buff *xfrm_state_netlink(struct sk_buff *in_skb,
909 struct xfrm_state *x, u32 seq)
910 {
911 struct xfrm_dump_info info;
912 struct sk_buff *skb;
913 int err;
914
915 skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_ATOMIC);
916 if (!skb)
917 return ERR_PTR(-ENOMEM);
918
919 info.in_skb = in_skb;
920 info.out_skb = skb;
921 info.nlmsg_seq = seq;
922 info.nlmsg_flags = 0;
923
924 err = dump_one_state(x, 0, &info);
925 if (err) {
926 kfree_skb(skb);
927 return ERR_PTR(err);
928 }
929
930 return skb;
931 }
932
933 static inline size_t xfrm_spdinfo_msgsize(void)
934 {
935 return NLMSG_ALIGN(4)
936 + nla_total_size(sizeof(struct xfrmu_spdinfo))
937 + nla_total_size(sizeof(struct xfrmu_spdhinfo));
938 }
939
940 static int build_spdinfo(struct sk_buff *skb, struct net *net,
941 u32 portid, u32 seq, u32 flags)
942 {
943 struct xfrmk_spdinfo si;
944 struct xfrmu_spdinfo spc;
945 struct xfrmu_spdhinfo sph;
946 struct nlmsghdr *nlh;
947 int err;
948 u32 *f;
949
950 nlh = nlmsg_put(skb, portid, seq, XFRM_MSG_NEWSPDINFO, sizeof(u32), 0);
951 if (nlh == NULL) /* shouldn't really happen ... */
952 return -EMSGSIZE;
953
954 f = nlmsg_data(nlh);
955 *f = flags;
956 xfrm_spd_getinfo(net, &si);
957 spc.incnt = si.incnt;
958 spc.outcnt = si.outcnt;
959 spc.fwdcnt = si.fwdcnt;
960 spc.inscnt = si.inscnt;
961 spc.outscnt = si.outscnt;
962 spc.fwdscnt = si.fwdscnt;
963 sph.spdhcnt = si.spdhcnt;
964 sph.spdhmcnt = si.spdhmcnt;
965
966 err = nla_put(skb, XFRMA_SPD_INFO, sizeof(spc), &spc);
967 if (!err)
968 err = nla_put(skb, XFRMA_SPD_HINFO, sizeof(sph), &sph);
969 if (err) {
970 nlmsg_cancel(skb, nlh);
971 return err;
972 }
973
974 return nlmsg_end(skb, nlh);
975 }
976
977 static int xfrm_get_spdinfo(struct sk_buff *skb, struct nlmsghdr *nlh,
978 struct nlattr **attrs)
979 {
980 struct net *net = sock_net(skb->sk);
981 struct sk_buff *r_skb;
982 u32 *flags = nlmsg_data(nlh);
983 u32 sportid = NETLINK_CB(skb).portid;
984 u32 seq = nlh->nlmsg_seq;
985
986 r_skb = nlmsg_new(xfrm_spdinfo_msgsize(), GFP_ATOMIC);
987 if (r_skb == NULL)
988 return -ENOMEM;
989
990 if (build_spdinfo(r_skb, net, sportid, seq, *flags) < 0)
991 BUG();
992
993 return nlmsg_unicast(net->xfrm.nlsk, r_skb, sportid);
994 }
995
996 static inline size_t xfrm_sadinfo_msgsize(void)
997 {
998 return NLMSG_ALIGN(4)
999 + nla_total_size(sizeof(struct xfrmu_sadhinfo))
1000 + nla_total_size(4); /* XFRMA_SAD_CNT */
1001 }
1002
1003 static int build_sadinfo(struct sk_buff *skb, struct net *net,
1004 u32 portid, u32 seq, u32 flags)
1005 {
1006 struct xfrmk_sadinfo si;
1007 struct xfrmu_sadhinfo sh;
1008 struct nlmsghdr *nlh;
1009 int err;
1010 u32 *f;
1011
1012 nlh = nlmsg_put(skb, portid, seq, XFRM_MSG_NEWSADINFO, sizeof(u32), 0);
1013 if (nlh == NULL) /* shouldn't really happen ... */
1014 return -EMSGSIZE;
1015
1016 f = nlmsg_data(nlh);
1017 *f = flags;
1018 xfrm_sad_getinfo(net, &si);
1019
1020 sh.sadhmcnt = si.sadhmcnt;
1021 sh.sadhcnt = si.sadhcnt;
1022
1023 err = nla_put_u32(skb, XFRMA_SAD_CNT, si.sadcnt);
1024 if (!err)
1025 err = nla_put(skb, XFRMA_SAD_HINFO, sizeof(sh), &sh);
1026 if (err) {
1027 nlmsg_cancel(skb, nlh);
1028 return err;
1029 }
1030
1031 return nlmsg_end(skb, nlh);
1032 }
1033
1034 static int xfrm_get_sadinfo(struct sk_buff *skb, struct nlmsghdr *nlh,
1035 struct nlattr **attrs)
1036 {
1037 struct net *net = sock_net(skb->sk);
1038 struct sk_buff *r_skb;
1039 u32 *flags = nlmsg_data(nlh);
1040 u32 sportid = NETLINK_CB(skb).portid;
1041 u32 seq = nlh->nlmsg_seq;
1042
1043 r_skb = nlmsg_new(xfrm_sadinfo_msgsize(), GFP_ATOMIC);
1044 if (r_skb == NULL)
1045 return -ENOMEM;
1046
1047 if (build_sadinfo(r_skb, net, sportid, seq, *flags) < 0)
1048 BUG();
1049
1050 return nlmsg_unicast(net->xfrm.nlsk, r_skb, sportid);
1051 }
1052
1053 static int xfrm_get_sa(struct sk_buff *skb, struct nlmsghdr *nlh,
1054 struct nlattr **attrs)
1055 {
1056 struct net *net = sock_net(skb->sk);
1057 struct xfrm_usersa_id *p = nlmsg_data(nlh);
1058 struct xfrm_state *x;
1059 struct sk_buff *resp_skb;
1060 int err = -ESRCH;
1061
1062 x = xfrm_user_state_lookup(net, p, attrs, &err);
1063 if (x == NULL)
1064 goto out_noput;
1065
1066 resp_skb = xfrm_state_netlink(skb, x, nlh->nlmsg_seq);
1067 if (IS_ERR(resp_skb)) {
1068 err = PTR_ERR(resp_skb);
1069 } else {
1070 err = nlmsg_unicast(net->xfrm.nlsk, resp_skb, NETLINK_CB(skb).portid);
1071 }
1072 xfrm_state_put(x);
1073 out_noput:
1074 return err;
1075 }
1076
1077 static int xfrm_alloc_userspi(struct sk_buff *skb, struct nlmsghdr *nlh,
1078 struct nlattr **attrs)
1079 {
1080 struct net *net = sock_net(skb->sk);
1081 struct xfrm_state *x;
1082 struct xfrm_userspi_info *p;
1083 struct sk_buff *resp_skb;
1084 xfrm_address_t *daddr;
1085 int family;
1086 int err;
1087 u32 mark;
1088 struct xfrm_mark m;
1089
1090 p = nlmsg_data(nlh);
1091 err = verify_spi_info(p->info.id.proto, p->min, p->max);
1092 if (err)
1093 goto out_noput;
1094
1095 family = p->info.family;
1096 daddr = &p->info.id.daddr;
1097
1098 x = NULL;
1099
1100 mark = xfrm_mark_get(attrs, &m);
1101 if (p->info.seq) {
1102 x = xfrm_find_acq_byseq(net, mark, p->info.seq);
1103 if (x && !xfrm_addr_equal(&x->id.daddr, daddr, family)) {
1104 xfrm_state_put(x);
1105 x = NULL;
1106 }
1107 }
1108
1109 if (!x)
1110 x = xfrm_find_acq(net, &m, p->info.mode, p->info.reqid,
1111 p->info.id.proto, daddr,
1112 &p->info.saddr, 1,
1113 family);
1114 err = -ENOENT;
1115 if (x == NULL)
1116 goto out_noput;
1117
1118 err = xfrm_alloc_spi(x, p->min, p->max);
1119 if (err)
1120 goto out;
1121
1122 resp_skb = xfrm_state_netlink(skb, x, nlh->nlmsg_seq);
1123 if (IS_ERR(resp_skb)) {
1124 err = PTR_ERR(resp_skb);
1125 goto out;
1126 }
1127
1128 err = nlmsg_unicast(net->xfrm.nlsk, resp_skb, NETLINK_CB(skb).portid);
1129
1130 out:
1131 xfrm_state_put(x);
1132 out_noput:
1133 return err;
1134 }
1135
1136 static int verify_policy_dir(u8 dir)
1137 {
1138 switch (dir) {
1139 case XFRM_POLICY_IN:
1140 case XFRM_POLICY_OUT:
1141 case XFRM_POLICY_FWD:
1142 break;
1143
1144 default:
1145 return -EINVAL;
1146 }
1147
1148 return 0;
1149 }
1150
1151 static int verify_policy_type(u8 type)
1152 {
1153 switch (type) {
1154 case XFRM_POLICY_TYPE_MAIN:
1155 #ifdef CONFIG_XFRM_SUB_POLICY
1156 case XFRM_POLICY_TYPE_SUB:
1157 #endif
1158 break;
1159
1160 default:
1161 return -EINVAL;
1162 }
1163
1164 return 0;
1165 }
1166
1167 static int verify_newpolicy_info(struct xfrm_userpolicy_info *p)
1168 {
1169 int ret;
1170
1171 switch (p->share) {
1172 case XFRM_SHARE_ANY:
1173 case XFRM_SHARE_SESSION:
1174 case XFRM_SHARE_USER:
1175 case XFRM_SHARE_UNIQUE:
1176 break;
1177
1178 default:
1179 return -EINVAL;
1180 }
1181
1182 switch (p->action) {
1183 case XFRM_POLICY_ALLOW:
1184 case XFRM_POLICY_BLOCK:
1185 break;
1186
1187 default:
1188 return -EINVAL;
1189 }
1190
1191 switch (p->sel.family) {
1192 case AF_INET:
1193 break;
1194
1195 case AF_INET6:
1196 #if IS_ENABLED(CONFIG_IPV6)
1197 break;
1198 #else
1199 return -EAFNOSUPPORT;
1200 #endif
1201
1202 default:
1203 return -EINVAL;
1204 }
1205
1206 ret = verify_policy_dir(p->dir);
1207 if (ret)
1208 return ret;
1209 if (p->index && ((p->index & XFRM_POLICY_MAX) != p->dir))
1210 return -EINVAL;
1211
1212 return 0;
1213 }
1214
1215 static int copy_from_user_sec_ctx(struct xfrm_policy *pol, struct nlattr **attrs)
1216 {
1217 struct nlattr *rt = attrs[XFRMA_SEC_CTX];
1218 struct xfrm_user_sec_ctx *uctx;
1219
1220 if (!rt)
1221 return 0;
1222
1223 uctx = nla_data(rt);
1224 return security_xfrm_policy_alloc(&pol->security, uctx, GFP_KERNEL);
1225 }
1226
1227 static void copy_templates(struct xfrm_policy *xp, struct xfrm_user_tmpl *ut,
1228 int nr)
1229 {
1230 int i;
1231
1232 xp->xfrm_nr = nr;
1233 for (i = 0; i < nr; i++, ut++) {
1234 struct xfrm_tmpl *t = &xp->xfrm_vec[i];
1235
1236 memcpy(&t->id, &ut->id, sizeof(struct xfrm_id));
1237 memcpy(&t->saddr, &ut->saddr,
1238 sizeof(xfrm_address_t));
1239 t->reqid = ut->reqid;
1240 t->mode = ut->mode;
1241 t->share = ut->share;
1242 t->optional = ut->optional;
1243 t->aalgos = ut->aalgos;
1244 t->ealgos = ut->ealgos;
1245 t->calgos = ut->calgos;
1246 /* If all masks are ~0, then we allow all algorithms. */
1247 t->allalgs = !~(t->aalgos & t->ealgos & t->calgos);
1248 t->encap_family = ut->family;
1249 }
1250 }
1251
1252 static int validate_tmpl(int nr, struct xfrm_user_tmpl *ut, u16 family)
1253 {
1254 int i;
1255
1256 if (nr > XFRM_MAX_DEPTH)
1257 return -EINVAL;
1258
1259 for (i = 0; i < nr; i++) {
1260 /* We never validated the ut->family value, so many
1261 * applications simply leave it at zero. The check was
1262 * never made and ut->family was ignored because all
1263 * templates could be assumed to have the same family as
1264 * the policy itself. Now that we will have ipv4-in-ipv6
1265 * and ipv6-in-ipv4 tunnels, this is no longer true.
1266 */
1267 if (!ut[i].family)
1268 ut[i].family = family;
1269
1270 switch (ut[i].family) {
1271 case AF_INET:
1272 break;
1273 #if IS_ENABLED(CONFIG_IPV6)
1274 case AF_INET6:
1275 break;
1276 #endif
1277 default:
1278 return -EINVAL;
1279 }
1280 }
1281
1282 return 0;
1283 }
1284
1285 static int copy_from_user_tmpl(struct xfrm_policy *pol, struct nlattr **attrs)
1286 {
1287 struct nlattr *rt = attrs[XFRMA_TMPL];
1288
1289 if (!rt) {
1290 pol->xfrm_nr = 0;
1291 } else {
1292 struct xfrm_user_tmpl *utmpl = nla_data(rt);
1293 int nr = nla_len(rt) / sizeof(*utmpl);
1294 int err;
1295
1296 err = validate_tmpl(nr, utmpl, pol->family);
1297 if (err)
1298 return err;
1299
1300 copy_templates(pol, utmpl, nr);
1301 }
1302 return 0;
1303 }
1304
1305 static int copy_from_user_policy_type(u8 *tp, struct nlattr **attrs)
1306 {
1307 struct nlattr *rt = attrs[XFRMA_POLICY_TYPE];
1308 struct xfrm_userpolicy_type *upt;
1309 u8 type = XFRM_POLICY_TYPE_MAIN;
1310 int err;
1311
1312 if (rt) {
1313 upt = nla_data(rt);
1314 type = upt->type;
1315 }
1316
1317 err = verify_policy_type(type);
1318 if (err)
1319 return err;
1320
1321 *tp = type;
1322 return 0;
1323 }
1324
1325 static void copy_from_user_policy(struct xfrm_policy *xp, struct xfrm_userpolicy_info *p)
1326 {
1327 xp->priority = p->priority;
1328 xp->index = p->index;
1329 memcpy(&xp->selector, &p->sel, sizeof(xp->selector));
1330 memcpy(&xp->lft, &p->lft, sizeof(xp->lft));
1331 xp->action = p->action;
1332 xp->flags = p->flags;
1333 xp->family = p->sel.family;
1334 /* XXX xp->share = p->share; */
1335 }
1336
1337 static void copy_to_user_policy(struct xfrm_policy *xp, struct xfrm_userpolicy_info *p, int dir)
1338 {
1339 memset(p, 0, sizeof(*p));
1340 memcpy(&p->sel, &xp->selector, sizeof(p->sel));
1341 memcpy(&p->lft, &xp->lft, sizeof(p->lft));
1342 memcpy(&p->curlft, &xp->curlft, sizeof(p->curlft));
1343 p->priority = xp->priority;
1344 p->index = xp->index;
1345 p->sel.family = xp->family;
1346 p->dir = dir;
1347 p->action = xp->action;
1348 p->flags = xp->flags;
1349 p->share = XFRM_SHARE_ANY; /* XXX xp->share */
1350 }
1351
1352 static struct xfrm_policy *xfrm_policy_construct(struct net *net, struct xfrm_userpolicy_info *p, struct nlattr **attrs, int *errp)
1353 {
1354 struct xfrm_policy *xp = xfrm_policy_alloc(net, GFP_KERNEL);
1355 int err;
1356
1357 if (!xp) {
1358 *errp = -ENOMEM;
1359 return NULL;
1360 }
1361
1362 copy_from_user_policy(xp, p);
1363
1364 err = copy_from_user_policy_type(&xp->type, attrs);
1365 if (err)
1366 goto error;
1367
1368 if (!(err = copy_from_user_tmpl(xp, attrs)))
1369 err = copy_from_user_sec_ctx(xp, attrs);
1370 if (err)
1371 goto error;
1372
1373 xfrm_mark_get(attrs, &xp->mark);
1374
1375 return xp;
1376 error:
1377 *errp = err;
1378 xp->walk.dead = 1;
1379 xfrm_policy_destroy(xp);
1380 return NULL;
1381 }
1382
1383 static int xfrm_add_policy(struct sk_buff *skb, struct nlmsghdr *nlh,
1384 struct nlattr **attrs)
1385 {
1386 struct net *net = sock_net(skb->sk);
1387 struct xfrm_userpolicy_info *p = nlmsg_data(nlh);
1388 struct xfrm_policy *xp;
1389 struct km_event c;
1390 int err;
1391 int excl;
1392 kuid_t loginuid = audit_get_loginuid(current);
1393 unsigned int sessionid = audit_get_sessionid(current);
1394 u32 sid;
1395
1396 err = verify_newpolicy_info(p);
1397 if (err)
1398 return err;
1399 err = verify_sec_ctx_len(attrs);
1400 if (err)
1401 return err;
1402
1403 xp = xfrm_policy_construct(net, p, attrs, &err);
1404 if (!xp)
1405 return err;
1406
1407 /* shouldn't excl be based on nlh flags??
1408 * Aha! this is anti-netlink really i.e more pfkey derived
1409 * in netlink excl is a flag and you wouldnt need
1410 * a type XFRM_MSG_UPDPOLICY - JHS */
1411 excl = nlh->nlmsg_type == XFRM_MSG_NEWPOLICY;
1412 err = xfrm_policy_insert(p->dir, xp, excl);
1413 security_task_getsecid(current, &sid);
1414 xfrm_audit_policy_add(xp, err ? 0 : 1, loginuid, sessionid, sid);
1415
1416 if (err) {
1417 security_xfrm_policy_free(xp->security);
1418 kfree(xp);
1419 return err;
1420 }
1421
1422 c.event = nlh->nlmsg_type;
1423 c.seq = nlh->nlmsg_seq;
1424 c.portid = nlh->nlmsg_pid;
1425 km_policy_notify(xp, p->dir, &c);
1426
1427 xfrm_pol_put(xp);
1428
1429 return 0;
1430 }
1431
1432 static int copy_to_user_tmpl(struct xfrm_policy *xp, struct sk_buff *skb)
1433 {
1434 struct xfrm_user_tmpl vec[XFRM_MAX_DEPTH];
1435 int i;
1436
1437 if (xp->xfrm_nr == 0)
1438 return 0;
1439
1440 for (i = 0; i < xp->xfrm_nr; i++) {
1441 struct xfrm_user_tmpl *up = &vec[i];
1442 struct xfrm_tmpl *kp = &xp->xfrm_vec[i];
1443
1444 memset(up, 0, sizeof(*up));
1445 memcpy(&up->id, &kp->id, sizeof(up->id));
1446 up->family = kp->encap_family;
1447 memcpy(&up->saddr, &kp->saddr, sizeof(up->saddr));
1448 up->reqid = kp->reqid;
1449 up->mode = kp->mode;
1450 up->share = kp->share;
1451 up->optional = kp->optional;
1452 up->aalgos = kp->aalgos;
1453 up->ealgos = kp->ealgos;
1454 up->calgos = kp->calgos;
1455 }
1456
1457 return nla_put(skb, XFRMA_TMPL,
1458 sizeof(struct xfrm_user_tmpl) * xp->xfrm_nr, vec);
1459 }
1460
1461 static inline int copy_to_user_state_sec_ctx(struct xfrm_state *x, struct sk_buff *skb)
1462 {
1463 if (x->security) {
1464 return copy_sec_ctx(x->security, skb);
1465 }
1466 return 0;
1467 }
1468
1469 static inline int copy_to_user_sec_ctx(struct xfrm_policy *xp, struct sk_buff *skb)
1470 {
1471 if (xp->security)
1472 return copy_sec_ctx(xp->security, skb);
1473 return 0;
1474 }
1475 static inline size_t userpolicy_type_attrsize(void)
1476 {
1477 #ifdef CONFIG_XFRM_SUB_POLICY
1478 return nla_total_size(sizeof(struct xfrm_userpolicy_type));
1479 #else
1480 return 0;
1481 #endif
1482 }
1483
1484 #ifdef CONFIG_XFRM_SUB_POLICY
1485 static int copy_to_user_policy_type(u8 type, struct sk_buff *skb)
1486 {
1487 struct xfrm_userpolicy_type upt = {
1488 .type = type,
1489 };
1490
1491 return nla_put(skb, XFRMA_POLICY_TYPE, sizeof(upt), &upt);
1492 }
1493
1494 #else
1495 static inline int copy_to_user_policy_type(u8 type, struct sk_buff *skb)
1496 {
1497 return 0;
1498 }
1499 #endif
1500
1501 static int dump_one_policy(struct xfrm_policy *xp, int dir, int count, void *ptr)
1502 {
1503 struct xfrm_dump_info *sp = ptr;
1504 struct xfrm_userpolicy_info *p;
1505 struct sk_buff *in_skb = sp->in_skb;
1506 struct sk_buff *skb = sp->out_skb;
1507 struct nlmsghdr *nlh;
1508 int err;
1509
1510 nlh = nlmsg_put(skb, NETLINK_CB(in_skb).portid, sp->nlmsg_seq,
1511 XFRM_MSG_NEWPOLICY, sizeof(*p), sp->nlmsg_flags);
1512 if (nlh == NULL)
1513 return -EMSGSIZE;
1514
1515 p = nlmsg_data(nlh);
1516 copy_to_user_policy(xp, p, dir);
1517 err = copy_to_user_tmpl(xp, skb);
1518 if (!err)
1519 err = copy_to_user_sec_ctx(xp, skb);
1520 if (!err)
1521 err = copy_to_user_policy_type(xp->type, skb);
1522 if (!err)
1523 err = xfrm_mark_put(skb, &xp->mark);
1524 if (err) {
1525 nlmsg_cancel(skb, nlh);
1526 return err;
1527 }
1528 nlmsg_end(skb, nlh);
1529 return 0;
1530 }
1531
1532 static int xfrm_dump_policy_done(struct netlink_callback *cb)
1533 {
1534 struct xfrm_policy_walk *walk = (struct xfrm_policy_walk *) &cb->args[1];
1535 struct net *net = sock_net(cb->skb->sk);
1536
1537 xfrm_policy_walk_done(walk, net);
1538 return 0;
1539 }
1540
1541 static int xfrm_dump_policy(struct sk_buff *skb, struct netlink_callback *cb)
1542 {
1543 struct net *net = sock_net(skb->sk);
1544 struct xfrm_policy_walk *walk = (struct xfrm_policy_walk *) &cb->args[1];
1545 struct xfrm_dump_info info;
1546
1547 BUILD_BUG_ON(sizeof(struct xfrm_policy_walk) >
1548 sizeof(cb->args) - sizeof(cb->args[0]));
1549
1550 info.in_skb = cb->skb;
1551 info.out_skb = skb;
1552 info.nlmsg_seq = cb->nlh->nlmsg_seq;
1553 info.nlmsg_flags = NLM_F_MULTI;
1554
1555 if (!cb->args[0]) {
1556 cb->args[0] = 1;
1557 xfrm_policy_walk_init(walk, XFRM_POLICY_TYPE_ANY);
1558 }
1559
1560 (void) xfrm_policy_walk(net, walk, dump_one_policy, &info);
1561
1562 return skb->len;
1563 }
1564
1565 static struct sk_buff *xfrm_policy_netlink(struct sk_buff *in_skb,
1566 struct xfrm_policy *xp,
1567 int dir, u32 seq)
1568 {
1569 struct xfrm_dump_info info;
1570 struct sk_buff *skb;
1571 int err;
1572
1573 skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
1574 if (!skb)
1575 return ERR_PTR(-ENOMEM);
1576
1577 info.in_skb = in_skb;
1578 info.out_skb = skb;
1579 info.nlmsg_seq = seq;
1580 info.nlmsg_flags = 0;
1581
1582 err = dump_one_policy(xp, dir, 0, &info);
1583 if (err) {
1584 kfree_skb(skb);
1585 return ERR_PTR(err);
1586 }
1587
1588 return skb;
1589 }
1590
1591 static int xfrm_get_policy(struct sk_buff *skb, struct nlmsghdr *nlh,
1592 struct nlattr **attrs)
1593 {
1594 struct net *net = sock_net(skb->sk);
1595 struct xfrm_policy *xp;
1596 struct xfrm_userpolicy_id *p;
1597 u8 type = XFRM_POLICY_TYPE_MAIN;
1598 int err;
1599 struct km_event c;
1600 int delete;
1601 struct xfrm_mark m;
1602 u32 mark = xfrm_mark_get(attrs, &m);
1603
1604 p = nlmsg_data(nlh);
1605 delete = nlh->nlmsg_type == XFRM_MSG_DELPOLICY;
1606
1607 err = copy_from_user_policy_type(&type, attrs);
1608 if (err)
1609 return err;
1610
1611 err = verify_policy_dir(p->dir);
1612 if (err)
1613 return err;
1614
1615 if (p->index)
1616 xp = xfrm_policy_byid(net, mark, type, p->dir, p->index, delete, &err);
1617 else {
1618 struct nlattr *rt = attrs[XFRMA_SEC_CTX];
1619 struct xfrm_sec_ctx *ctx;
1620
1621 err = verify_sec_ctx_len(attrs);
1622 if (err)
1623 return err;
1624
1625 ctx = NULL;
1626 if (rt) {
1627 struct xfrm_user_sec_ctx *uctx = nla_data(rt);
1628
1629 err = security_xfrm_policy_alloc(&ctx, uctx, GFP_KERNEL);
1630 if (err)
1631 return err;
1632 }
1633 xp = xfrm_policy_bysel_ctx(net, mark, type, p->dir, &p->sel,
1634 ctx, delete, &err);
1635 security_xfrm_policy_free(ctx);
1636 }
1637 if (xp == NULL)
1638 return -ENOENT;
1639
1640 if (!delete) {
1641 struct sk_buff *resp_skb;
1642
1643 resp_skb = xfrm_policy_netlink(skb, xp, p->dir, nlh->nlmsg_seq);
1644 if (IS_ERR(resp_skb)) {
1645 err = PTR_ERR(resp_skb);
1646 } else {
1647 err = nlmsg_unicast(net->xfrm.nlsk, resp_skb,
1648 NETLINK_CB(skb).portid);
1649 }
1650 } else {
1651 kuid_t loginuid = audit_get_loginuid(current);
1652 unsigned int sessionid = audit_get_sessionid(current);
1653 u32 sid;
1654
1655 security_task_getsecid(current, &sid);
1656 xfrm_audit_policy_delete(xp, err ? 0 : 1, loginuid, sessionid,
1657 sid);
1658
1659 if (err != 0)
1660 goto out;
1661
1662 c.data.byid = p->index;
1663 c.event = nlh->nlmsg_type;
1664 c.seq = nlh->nlmsg_seq;
1665 c.portid = nlh->nlmsg_pid;
1666 km_policy_notify(xp, p->dir, &c);
1667 }
1668
1669 out:
1670 xfrm_pol_put(xp);
1671 if (delete && err == 0)
1672 xfrm_garbage_collect(net);
1673 return err;
1674 }
1675
1676 static int xfrm_flush_sa(struct sk_buff *skb, struct nlmsghdr *nlh,
1677 struct nlattr **attrs)
1678 {
1679 struct net *net = sock_net(skb->sk);
1680 struct km_event c;
1681 struct xfrm_usersa_flush *p = nlmsg_data(nlh);
1682 struct xfrm_audit audit_info;
1683 int err;
1684
1685 audit_info.loginuid = audit_get_loginuid(current);
1686 audit_info.sessionid = audit_get_sessionid(current);
1687 security_task_getsecid(current, &audit_info.secid);
1688 err = xfrm_state_flush(net, p->proto, &audit_info);
1689 if (err) {
1690 if (err == -ESRCH) /* empty table */
1691 return 0;
1692 return err;
1693 }
1694 c.data.proto = p->proto;
1695 c.event = nlh->nlmsg_type;
1696 c.seq = nlh->nlmsg_seq;
1697 c.portid = nlh->nlmsg_pid;
1698 c.net = net;
1699 km_state_notify(NULL, &c);
1700
1701 return 0;
1702 }
1703
1704 static inline size_t xfrm_aevent_msgsize(struct xfrm_state *x)
1705 {
1706 size_t replay_size = x->replay_esn ?
1707 xfrm_replay_state_esn_len(x->replay_esn) :
1708 sizeof(struct xfrm_replay_state);
1709
1710 return NLMSG_ALIGN(sizeof(struct xfrm_aevent_id))
1711 + nla_total_size(replay_size)
1712 + nla_total_size(sizeof(struct xfrm_lifetime_cur))
1713 + nla_total_size(sizeof(struct xfrm_mark))
1714 + nla_total_size(4) /* XFRM_AE_RTHR */
1715 + nla_total_size(4); /* XFRM_AE_ETHR */
1716 }
1717
1718 static int build_aevent(struct sk_buff *skb, struct xfrm_state *x, const struct km_event *c)
1719 {
1720 struct xfrm_aevent_id *id;
1721 struct nlmsghdr *nlh;
1722 int err;
1723
1724 nlh = nlmsg_put(skb, c->portid, c->seq, XFRM_MSG_NEWAE, sizeof(*id), 0);
1725 if (nlh == NULL)
1726 return -EMSGSIZE;
1727
1728 id = nlmsg_data(nlh);
1729 memcpy(&id->sa_id.daddr, &x->id.daddr, sizeof(x->id.daddr));
1730 id->sa_id.spi = x->id.spi;
1731 id->sa_id.family = x->props.family;
1732 id->sa_id.proto = x->id.proto;
1733 memcpy(&id->saddr, &x->props.saddr, sizeof(x->props.saddr));
1734 id->reqid = x->props.reqid;
1735 id->flags = c->data.aevent;
1736
1737 if (x->replay_esn) {
1738 err = nla_put(skb, XFRMA_REPLAY_ESN_VAL,
1739 xfrm_replay_state_esn_len(x->replay_esn),
1740 x->replay_esn);
1741 } else {
1742 err = nla_put(skb, XFRMA_REPLAY_VAL, sizeof(x->replay),
1743 &x->replay);
1744 }
1745 if (err)
1746 goto out_cancel;
1747 err = nla_put(skb, XFRMA_LTIME_VAL, sizeof(x->curlft), &x->curlft);
1748 if (err)
1749 goto out_cancel;
1750
1751 if (id->flags & XFRM_AE_RTHR) {
1752 err = nla_put_u32(skb, XFRMA_REPLAY_THRESH, x->replay_maxdiff);
1753 if (err)
1754 goto out_cancel;
1755 }
1756 if (id->flags & XFRM_AE_ETHR) {
1757 err = nla_put_u32(skb, XFRMA_ETIMER_THRESH,
1758 x->replay_maxage * 10 / HZ);
1759 if (err)
1760 goto out_cancel;
1761 }
1762 err = xfrm_mark_put(skb, &x->mark);
1763 if (err)
1764 goto out_cancel;
1765
1766 return nlmsg_end(skb, nlh);
1767
1768 out_cancel:
1769 nlmsg_cancel(skb, nlh);
1770 return err;
1771 }
1772
1773 static int xfrm_get_ae(struct sk_buff *skb, struct nlmsghdr *nlh,
1774 struct nlattr **attrs)
1775 {
1776 struct net *net = sock_net(skb->sk);
1777 struct xfrm_state *x;
1778 struct sk_buff *r_skb;
1779 int err;
1780 struct km_event c;
1781 u32 mark;
1782 struct xfrm_mark m;
1783 struct xfrm_aevent_id *p = nlmsg_data(nlh);
1784 struct xfrm_usersa_id *id = &p->sa_id;
1785
1786 mark = xfrm_mark_get(attrs, &m);
1787
1788 x = xfrm_state_lookup(net, mark, &id->daddr, id->spi, id->proto, id->family);
1789 if (x == NULL)
1790 return -ESRCH;
1791
1792 r_skb = nlmsg_new(xfrm_aevent_msgsize(x), GFP_ATOMIC);
1793 if (r_skb == NULL) {
1794 xfrm_state_put(x);
1795 return -ENOMEM;
1796 }
1797
1798 /*
1799 * XXX: is this lock really needed - none of the other
1800 * gets lock (the concern is things getting updated
1801 * while we are still reading) - jhs
1802 */
1803 spin_lock_bh(&x->lock);
1804 c.data.aevent = p->flags;
1805 c.seq = nlh->nlmsg_seq;
1806 c.portid = nlh->nlmsg_pid;
1807
1808 if (build_aevent(r_skb, x, &c) < 0)
1809 BUG();
1810 err = nlmsg_unicast(net->xfrm.nlsk, r_skb, NETLINK_CB(skb).portid);
1811 spin_unlock_bh(&x->lock);
1812 xfrm_state_put(x);
1813 return err;
1814 }
1815
1816 static int xfrm_new_ae(struct sk_buff *skb, struct nlmsghdr *nlh,
1817 struct nlattr **attrs)
1818 {
1819 struct net *net = sock_net(skb->sk);
1820 struct xfrm_state *x;
1821 struct km_event c;
1822 int err = -EINVAL;
1823 u32 mark = 0;
1824 struct xfrm_mark m;
1825 struct xfrm_aevent_id *p = nlmsg_data(nlh);
1826 struct nlattr *rp = attrs[XFRMA_REPLAY_VAL];
1827 struct nlattr *re = attrs[XFRMA_REPLAY_ESN_VAL];
1828 struct nlattr *lt = attrs[XFRMA_LTIME_VAL];
1829
1830 if (!lt && !rp && !re)
1831 return err;
1832
1833 /* pedantic mode - thou shalt sayeth replaceth */
1834 if (!(nlh->nlmsg_flags&NLM_F_REPLACE))
1835 return err;
1836
1837 mark = xfrm_mark_get(attrs, &m);
1838
1839 x = xfrm_state_lookup(net, mark, &p->sa_id.daddr, p->sa_id.spi, p->sa_id.proto, p->sa_id.family);
1840 if (x == NULL)
1841 return -ESRCH;
1842
1843 if (x->km.state != XFRM_STATE_VALID)
1844 goto out;
1845
1846 err = xfrm_replay_verify_len(x->replay_esn, re);
1847 if (err)
1848 goto out;
1849
1850 spin_lock_bh(&x->lock);
1851 xfrm_update_ae_params(x, attrs, 1);
1852 spin_unlock_bh(&x->lock);
1853
1854 c.event = nlh->nlmsg_type;
1855 c.seq = nlh->nlmsg_seq;
1856 c.portid = nlh->nlmsg_pid;
1857 c.data.aevent = XFRM_AE_CU;
1858 km_state_notify(x, &c);
1859 err = 0;
1860 out:
1861 xfrm_state_put(x);
1862 return err;
1863 }
1864
1865 static int xfrm_flush_policy(struct sk_buff *skb, struct nlmsghdr *nlh,
1866 struct nlattr **attrs)
1867 {
1868 struct net *net = sock_net(skb->sk);
1869 struct km_event c;
1870 u8 type = XFRM_POLICY_TYPE_MAIN;
1871 int err;
1872 struct xfrm_audit audit_info;
1873
1874 err = copy_from_user_policy_type(&type, attrs);
1875 if (err)
1876 return err;
1877
1878 audit_info.loginuid = audit_get_loginuid(current);
1879 audit_info.sessionid = audit_get_sessionid(current);
1880 security_task_getsecid(current, &audit_info.secid);
1881 err = xfrm_policy_flush(net, type, &audit_info);
1882 if (err) {
1883 if (err == -ESRCH) /* empty table */
1884 return 0;
1885 return err;
1886 }
1887
1888 c.data.type = type;
1889 c.event = nlh->nlmsg_type;
1890 c.seq = nlh->nlmsg_seq;
1891 c.portid = nlh->nlmsg_pid;
1892 c.net = net;
1893 km_policy_notify(NULL, 0, &c);
1894 return 0;
1895 }
1896
1897 static int xfrm_add_pol_expire(struct sk_buff *skb, struct nlmsghdr *nlh,
1898 struct nlattr **attrs)
1899 {
1900 struct net *net = sock_net(skb->sk);
1901 struct xfrm_policy *xp;
1902 struct xfrm_user_polexpire *up = nlmsg_data(nlh);
1903 struct xfrm_userpolicy_info *p = &up->pol;
1904 u8 type = XFRM_POLICY_TYPE_MAIN;
1905 int err = -ENOENT;
1906 struct xfrm_mark m;
1907 u32 mark = xfrm_mark_get(attrs, &m);
1908
1909 err = copy_from_user_policy_type(&type, attrs);
1910 if (err)
1911 return err;
1912
1913 err = verify_policy_dir(p->dir);
1914 if (err)
1915 return err;
1916
1917 if (p->index)
1918 xp = xfrm_policy_byid(net, mark, type, p->dir, p->index, 0, &err);
1919 else {
1920 struct nlattr *rt = attrs[XFRMA_SEC_CTX];
1921 struct xfrm_sec_ctx *ctx;
1922
1923 err = verify_sec_ctx_len(attrs);
1924 if (err)
1925 return err;
1926
1927 ctx = NULL;
1928 if (rt) {
1929 struct xfrm_user_sec_ctx *uctx = nla_data(rt);
1930
1931 err = security_xfrm_policy_alloc(&ctx, uctx, GFP_KERNEL);
1932 if (err)
1933 return err;
1934 }
1935 xp = xfrm_policy_bysel_ctx(net, mark, type, p->dir,
1936 &p->sel, ctx, 0, &err);
1937 security_xfrm_policy_free(ctx);
1938 }
1939 if (xp == NULL)
1940 return -ENOENT;
1941
1942 if (unlikely(xp->walk.dead))
1943 goto out;
1944
1945 err = 0;
1946 if (up->hard) {
1947 kuid_t loginuid = audit_get_loginuid(current);
1948 unsigned int sessionid = audit_get_sessionid(current);
1949 u32 sid;
1950
1951 security_task_getsecid(current, &sid);
1952 xfrm_policy_delete(xp, p->dir);
1953 xfrm_audit_policy_delete(xp, 1, loginuid, sessionid, sid);
1954
1955 } else {
1956 // reset the timers here?
1957 WARN(1, "Dont know what to do with soft policy expire\n");
1958 }
1959 km_policy_expired(xp, p->dir, up->hard, nlh->nlmsg_pid);
1960
1961 out:
1962 xfrm_pol_put(xp);
1963 return err;
1964 }
1965
1966 static int xfrm_add_sa_expire(struct sk_buff *skb, struct nlmsghdr *nlh,
1967 struct nlattr **attrs)
1968 {
1969 struct net *net = sock_net(skb->sk);
1970 struct xfrm_state *x;
1971 int err;
1972 struct xfrm_user_expire *ue = nlmsg_data(nlh);
1973 struct xfrm_usersa_info *p = &ue->state;
1974 struct xfrm_mark m;
1975 u32 mark = xfrm_mark_get(attrs, &m);
1976
1977 x = xfrm_state_lookup(net, mark, &p->id.daddr, p->id.spi, p->id.proto, p->family);
1978
1979 err = -ENOENT;
1980 if (x == NULL)
1981 return err;
1982
1983 spin_lock_bh(&x->lock);
1984 err = -EINVAL;
1985 if (x->km.state != XFRM_STATE_VALID)
1986 goto out;
1987 km_state_expired(x, ue->hard, nlh->nlmsg_pid);
1988
1989 if (ue->hard) {
1990 kuid_t loginuid = audit_get_loginuid(current);
1991 unsigned int sessionid = audit_get_sessionid(current);
1992 u32 sid;
1993
1994 security_task_getsecid(current, &sid);
1995 __xfrm_state_delete(x);
1996 xfrm_audit_state_delete(x, 1, loginuid, sessionid, sid);
1997 }
1998 err = 0;
1999 out:
2000 spin_unlock_bh(&x->lock);
2001 xfrm_state_put(x);
2002 return err;
2003 }
2004
2005 static int xfrm_add_acquire(struct sk_buff *skb, struct nlmsghdr *nlh,
2006 struct nlattr **attrs)
2007 {
2008 struct net *net = sock_net(skb->sk);
2009 struct xfrm_policy *xp;
2010 struct xfrm_user_tmpl *ut;
2011 int i;
2012 struct nlattr *rt = attrs[XFRMA_TMPL];
2013 struct xfrm_mark mark;
2014
2015 struct xfrm_user_acquire *ua = nlmsg_data(nlh);
2016 struct xfrm_state *x = xfrm_state_alloc(net);
2017 int err = -ENOMEM;
2018
2019 if (!x)
2020 goto nomem;
2021
2022 xfrm_mark_get(attrs, &mark);
2023
2024 err = verify_newpolicy_info(&ua->policy);
2025 if (err)
2026 goto bad_policy;
2027
2028 /* build an XP */
2029 xp = xfrm_policy_construct(net, &ua->policy, attrs, &err);
2030 if (!xp)
2031 goto free_state;
2032
2033 memcpy(&x->id, &ua->id, sizeof(ua->id));
2034 memcpy(&x->props.saddr, &ua->saddr, sizeof(ua->saddr));
2035 memcpy(&x->sel, &ua->sel, sizeof(ua->sel));
2036 xp->mark.m = x->mark.m = mark.m;
2037 xp->mark.v = x->mark.v = mark.v;
2038 ut = nla_data(rt);
2039 /* extract the templates and for each call km_key */
2040 for (i = 0; i < xp->xfrm_nr; i++, ut++) {
2041 struct xfrm_tmpl *t = &xp->xfrm_vec[i];
2042 memcpy(&x->id, &t->id, sizeof(x->id));
2043 x->props.mode = t->mode;
2044 x->props.reqid = t->reqid;
2045 x->props.family = ut->family;
2046 t->aalgos = ua->aalgos;
2047 t->ealgos = ua->ealgos;
2048 t->calgos = ua->calgos;
2049 err = km_query(x, t, xp);
2050
2051 }
2052
2053 kfree(x);
2054 kfree(xp);
2055
2056 return 0;
2057
2058 bad_policy:
2059 WARN(1, "BAD policy passed\n");
2060 free_state:
2061 kfree(x);
2062 nomem:
2063 return err;
2064 }
2065
2066 #ifdef CONFIG_XFRM_MIGRATE
2067 static int copy_from_user_migrate(struct xfrm_migrate *ma,
2068 struct xfrm_kmaddress *k,
2069 struct nlattr **attrs, int *num)
2070 {
2071 struct nlattr *rt = attrs[XFRMA_MIGRATE];
2072 struct xfrm_user_migrate *um;
2073 int i, num_migrate;
2074
2075 if (k != NULL) {
2076 struct xfrm_user_kmaddress *uk;
2077
2078 uk = nla_data(attrs[XFRMA_KMADDRESS]);
2079 memcpy(&k->local, &uk->local, sizeof(k->local));
2080 memcpy(&k->remote, &uk->remote, sizeof(k->remote));
2081 k->family = uk->family;
2082 k->reserved = uk->reserved;
2083 }
2084
2085 um = nla_data(rt);
2086 num_migrate = nla_len(rt) / sizeof(*um);
2087
2088 if (num_migrate <= 0 || num_migrate > XFRM_MAX_DEPTH)
2089 return -EINVAL;
2090
2091 for (i = 0; i < num_migrate; i++, um++, ma++) {
2092 memcpy(&ma->old_daddr, &um->old_daddr, sizeof(ma->old_daddr));
2093 memcpy(&ma->old_saddr, &um->old_saddr, sizeof(ma->old_saddr));
2094 memcpy(&ma->new_daddr, &um->new_daddr, sizeof(ma->new_daddr));
2095 memcpy(&ma->new_saddr, &um->new_saddr, sizeof(ma->new_saddr));
2096
2097 ma->proto = um->proto;
2098 ma->mode = um->mode;
2099 ma->reqid = um->reqid;
2100
2101 ma->old_family = um->old_family;
2102 ma->new_family = um->new_family;
2103 }
2104
2105 *num = i;
2106 return 0;
2107 }
2108
2109 static int xfrm_do_migrate(struct sk_buff *skb, struct nlmsghdr *nlh,
2110 struct nlattr **attrs)
2111 {
2112 struct xfrm_userpolicy_id *pi = nlmsg_data(nlh);
2113 struct xfrm_migrate m[XFRM_MAX_DEPTH];
2114 struct xfrm_kmaddress km, *kmp;
2115 u8 type;
2116 int err;
2117 int n = 0;
2118 struct net *net = sock_net(skb->sk);
2119
2120 if (attrs[XFRMA_MIGRATE] == NULL)
2121 return -EINVAL;
2122
2123 kmp = attrs[XFRMA_KMADDRESS] ? &km : NULL;
2124
2125 err = copy_from_user_policy_type(&type, attrs);
2126 if (err)
2127 return err;
2128
2129 err = copy_from_user_migrate((struct xfrm_migrate *)m, kmp, attrs, &n);
2130 if (err)
2131 return err;
2132
2133 if (!n)
2134 return 0;
2135
2136 xfrm_migrate(&pi->sel, pi->dir, type, m, n, kmp, net);
2137
2138 return 0;
2139 }
2140 #else
2141 static int xfrm_do_migrate(struct sk_buff *skb, struct nlmsghdr *nlh,
2142 struct nlattr **attrs)
2143 {
2144 return -ENOPROTOOPT;
2145 }
2146 #endif
2147
2148 #ifdef CONFIG_XFRM_MIGRATE
2149 static int copy_to_user_migrate(const struct xfrm_migrate *m, struct sk_buff *skb)
2150 {
2151 struct xfrm_user_migrate um;
2152
2153 memset(&um, 0, sizeof(um));
2154 um.proto = m->proto;
2155 um.mode = m->mode;
2156 um.reqid = m->reqid;
2157 um.old_family = m->old_family;
2158 memcpy(&um.old_daddr, &m->old_daddr, sizeof(um.old_daddr));
2159 memcpy(&um.old_saddr, &m->old_saddr, sizeof(um.old_saddr));
2160 um.new_family = m->new_family;
2161 memcpy(&um.new_daddr, &m->new_daddr, sizeof(um.new_daddr));
2162 memcpy(&um.new_saddr, &m->new_saddr, sizeof(um.new_saddr));
2163
2164 return nla_put(skb, XFRMA_MIGRATE, sizeof(um), &um);
2165 }
2166
2167 static int copy_to_user_kmaddress(const struct xfrm_kmaddress *k, struct sk_buff *skb)
2168 {
2169 struct xfrm_user_kmaddress uk;
2170
2171 memset(&uk, 0, sizeof(uk));
2172 uk.family = k->family;
2173 uk.reserved = k->reserved;
2174 memcpy(&uk.local, &k->local, sizeof(uk.local));
2175 memcpy(&uk.remote, &k->remote, sizeof(uk.remote));
2176
2177 return nla_put(skb, XFRMA_KMADDRESS, sizeof(uk), &uk);
2178 }
2179
2180 static inline size_t xfrm_migrate_msgsize(int num_migrate, int with_kma)
2181 {
2182 return NLMSG_ALIGN(sizeof(struct xfrm_userpolicy_id))
2183 + (with_kma ? nla_total_size(sizeof(struct xfrm_kmaddress)) : 0)
2184 + nla_total_size(sizeof(struct xfrm_user_migrate) * num_migrate)
2185 + userpolicy_type_attrsize();
2186 }
2187
2188 static int build_migrate(struct sk_buff *skb, const struct xfrm_migrate *m,
2189 int num_migrate, const struct xfrm_kmaddress *k,
2190 const struct xfrm_selector *sel, u8 dir, u8 type)
2191 {
2192 const struct xfrm_migrate *mp;
2193 struct xfrm_userpolicy_id *pol_id;
2194 struct nlmsghdr *nlh;
2195 int i, err;
2196
2197 nlh = nlmsg_put(skb, 0, 0, XFRM_MSG_MIGRATE, sizeof(*pol_id), 0);
2198 if (nlh == NULL)
2199 return -EMSGSIZE;
2200
2201 pol_id = nlmsg_data(nlh);
2202 /* copy data from selector, dir, and type to the pol_id */
2203 memset(pol_id, 0, sizeof(*pol_id));
2204 memcpy(&pol_id->sel, sel, sizeof(pol_id->sel));
2205 pol_id->dir = dir;
2206
2207 if (k != NULL) {
2208 err = copy_to_user_kmaddress(k, skb);
2209 if (err)
2210 goto out_cancel;
2211 }
2212 err = copy_to_user_policy_type(type, skb);
2213 if (err)
2214 goto out_cancel;
2215 for (i = 0, mp = m ; i < num_migrate; i++, mp++) {
2216 err = copy_to_user_migrate(mp, skb);
2217 if (err)
2218 goto out_cancel;
2219 }
2220
2221 return nlmsg_end(skb, nlh);
2222
2223 out_cancel:
2224 nlmsg_cancel(skb, nlh);
2225 return err;
2226 }
2227
2228 static int xfrm_send_migrate(const struct xfrm_selector *sel, u8 dir, u8 type,
2229 const struct xfrm_migrate *m, int num_migrate,
2230 const struct xfrm_kmaddress *k)
2231 {
2232 struct net *net = &init_net;
2233 struct sk_buff *skb;
2234
2235 skb = nlmsg_new(xfrm_migrate_msgsize(num_migrate, !!k), GFP_ATOMIC);
2236 if (skb == NULL)
2237 return -ENOMEM;
2238
2239 /* build migrate */
2240 if (build_migrate(skb, m, num_migrate, k, sel, dir, type) < 0)
2241 BUG();
2242
2243 return nlmsg_multicast(net->xfrm.nlsk, skb, 0, XFRMNLGRP_MIGRATE, GFP_ATOMIC);
2244 }
2245 #else
2246 static int xfrm_send_migrate(const struct xfrm_selector *sel, u8 dir, u8 type,
2247 const struct xfrm_migrate *m, int num_migrate,
2248 const struct xfrm_kmaddress *k)
2249 {
2250 return -ENOPROTOOPT;
2251 }
2252 #endif
2253
2254 #define XMSGSIZE(type) sizeof(struct type)
2255
2256 static const int xfrm_msg_min[XFRM_NR_MSGTYPES] = {
2257 [XFRM_MSG_NEWSA - XFRM_MSG_BASE] = XMSGSIZE(xfrm_usersa_info),
2258 [XFRM_MSG_DELSA - XFRM_MSG_BASE] = XMSGSIZE(xfrm_usersa_id),
2259 [XFRM_MSG_GETSA - XFRM_MSG_BASE] = XMSGSIZE(xfrm_usersa_id),
2260 [XFRM_MSG_NEWPOLICY - XFRM_MSG_BASE] = XMSGSIZE(xfrm_userpolicy_info),
2261 [XFRM_MSG_DELPOLICY - XFRM_MSG_BASE] = XMSGSIZE(xfrm_userpolicy_id),
2262 [XFRM_MSG_GETPOLICY - XFRM_MSG_BASE] = XMSGSIZE(xfrm_userpolicy_id),
2263 [XFRM_MSG_ALLOCSPI - XFRM_MSG_BASE] = XMSGSIZE(xfrm_userspi_info),
2264 [XFRM_MSG_ACQUIRE - XFRM_MSG_BASE] = XMSGSIZE(xfrm_user_acquire),
2265 [XFRM_MSG_EXPIRE - XFRM_MSG_BASE] = XMSGSIZE(xfrm_user_expire),
2266 [XFRM_MSG_UPDPOLICY - XFRM_MSG_BASE] = XMSGSIZE(xfrm_userpolicy_info),
2267 [XFRM_MSG_UPDSA - XFRM_MSG_BASE] = XMSGSIZE(xfrm_usersa_info),
2268 [XFRM_MSG_POLEXPIRE - XFRM_MSG_BASE] = XMSGSIZE(xfrm_user_polexpire),
2269 [XFRM_MSG_FLUSHSA - XFRM_MSG_BASE] = XMSGSIZE(xfrm_usersa_flush),
2270 [XFRM_MSG_FLUSHPOLICY - XFRM_MSG_BASE] = 0,
2271 [XFRM_MSG_NEWAE - XFRM_MSG_BASE] = XMSGSIZE(xfrm_aevent_id),
2272 [XFRM_MSG_GETAE - XFRM_MSG_BASE] = XMSGSIZE(xfrm_aevent_id),
2273 [XFRM_MSG_REPORT - XFRM_MSG_BASE] = XMSGSIZE(xfrm_user_report),
2274 [XFRM_MSG_MIGRATE - XFRM_MSG_BASE] = XMSGSIZE(xfrm_userpolicy_id),
2275 [XFRM_MSG_GETSADINFO - XFRM_MSG_BASE] = sizeof(u32),
2276 [XFRM_MSG_GETSPDINFO - XFRM_MSG_BASE] = sizeof(u32),
2277 };
2278
2279 #undef XMSGSIZE
2280
2281 static const struct nla_policy xfrma_policy[XFRMA_MAX+1] = {
2282 [XFRMA_SA] = { .len = sizeof(struct xfrm_usersa_info)},
2283 [XFRMA_POLICY] = { .len = sizeof(struct xfrm_userpolicy_info)},
2284 [XFRMA_LASTUSED] = { .type = NLA_U64},
2285 [XFRMA_ALG_AUTH_TRUNC] = { .len = sizeof(struct xfrm_algo_auth)},
2286 [XFRMA_ALG_AEAD] = { .len = sizeof(struct xfrm_algo_aead) },
2287 [XFRMA_ALG_AUTH] = { .len = sizeof(struct xfrm_algo) },
2288 [XFRMA_ALG_CRYPT] = { .len = sizeof(struct xfrm_algo) },
2289 [XFRMA_ALG_COMP] = { .len = sizeof(struct xfrm_algo) },
2290 [XFRMA_ENCAP] = { .len = sizeof(struct xfrm_encap_tmpl) },
2291 [XFRMA_TMPL] = { .len = sizeof(struct xfrm_user_tmpl) },
2292 [XFRMA_SEC_CTX] = { .len = sizeof(struct xfrm_sec_ctx) },
2293 [XFRMA_LTIME_VAL] = { .len = sizeof(struct xfrm_lifetime_cur) },
2294 [XFRMA_REPLAY_VAL] = { .len = sizeof(struct xfrm_replay_state) },
2295 [XFRMA_REPLAY_THRESH] = { .type = NLA_U32 },
2296 [XFRMA_ETIMER_THRESH] = { .type = NLA_U32 },
2297 [XFRMA_SRCADDR] = { .len = sizeof(xfrm_address_t) },
2298 [XFRMA_COADDR] = { .len = sizeof(xfrm_address_t) },
2299 [XFRMA_POLICY_TYPE] = { .len = sizeof(struct xfrm_userpolicy_type)},
2300 [XFRMA_MIGRATE] = { .len = sizeof(struct xfrm_user_migrate) },
2301 [XFRMA_KMADDRESS] = { .len = sizeof(struct xfrm_user_kmaddress) },
2302 [XFRMA_MARK] = { .len = sizeof(struct xfrm_mark) },
2303 [XFRMA_TFCPAD] = { .type = NLA_U32 },
2304 [XFRMA_REPLAY_ESN_VAL] = { .len = sizeof(struct xfrm_replay_state_esn) },
2305 [XFRMA_SA_EXTRA_FLAGS] = { .type = NLA_U32 },
2306 };
2307
2308 static const struct xfrm_link {
2309 int (*doit)(struct sk_buff *, struct nlmsghdr *, struct nlattr **);
2310 int (*dump)(struct sk_buff *, struct netlink_callback *);
2311 int (*done)(struct netlink_callback *);
2312 } xfrm_dispatch[XFRM_NR_MSGTYPES] = {
2313 [XFRM_MSG_NEWSA - XFRM_MSG_BASE] = { .doit = xfrm_add_sa },
2314 [XFRM_MSG_DELSA - XFRM_MSG_BASE] = { .doit = xfrm_del_sa },
2315 [XFRM_MSG_GETSA - XFRM_MSG_BASE] = { .doit = xfrm_get_sa,
2316 .dump = xfrm_dump_sa,
2317 .done = xfrm_dump_sa_done },
2318 [XFRM_MSG_NEWPOLICY - XFRM_MSG_BASE] = { .doit = xfrm_add_policy },
2319 [XFRM_MSG_DELPOLICY - XFRM_MSG_BASE] = { .doit = xfrm_get_policy },
2320 [XFRM_MSG_GETPOLICY - XFRM_MSG_BASE] = { .doit = xfrm_get_policy,
2321 .dump = xfrm_dump_policy,
2322 .done = xfrm_dump_policy_done },
2323 [XFRM_MSG_ALLOCSPI - XFRM_MSG_BASE] = { .doit = xfrm_alloc_userspi },
2324 [XFRM_MSG_ACQUIRE - XFRM_MSG_BASE] = { .doit = xfrm_add_acquire },
2325 [XFRM_MSG_EXPIRE - XFRM_MSG_BASE] = { .doit = xfrm_add_sa_expire },
2326 [XFRM_MSG_UPDPOLICY - XFRM_MSG_BASE] = { .doit = xfrm_add_policy },
2327 [XFRM_MSG_UPDSA - XFRM_MSG_BASE] = { .doit = xfrm_add_sa },
2328 [XFRM_MSG_POLEXPIRE - XFRM_MSG_BASE] = { .doit = xfrm_add_pol_expire},
2329 [XFRM_MSG_FLUSHSA - XFRM_MSG_BASE] = { .doit = xfrm_flush_sa },
2330 [XFRM_MSG_FLUSHPOLICY - XFRM_MSG_BASE] = { .doit = xfrm_flush_policy },
2331 [XFRM_MSG_NEWAE - XFRM_MSG_BASE] = { .doit = xfrm_new_ae },
2332 [XFRM_MSG_GETAE - XFRM_MSG_BASE] = { .doit = xfrm_get_ae },
2333 [XFRM_MSG_MIGRATE - XFRM_MSG_BASE] = { .doit = xfrm_do_migrate },
2334 [XFRM_MSG_GETSADINFO - XFRM_MSG_BASE] = { .doit = xfrm_get_sadinfo },
2335 [XFRM_MSG_GETSPDINFO - XFRM_MSG_BASE] = { .doit = xfrm_get_spdinfo },
2336 };
2337
2338 static int xfrm_user_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
2339 {
2340 struct net *net = sock_net(skb->sk);
2341 struct nlattr *attrs[XFRMA_MAX+1];
2342 const struct xfrm_link *link;
2343 int type, err;
2344
2345 type = nlh->nlmsg_type;
2346 if (type > XFRM_MSG_MAX)
2347 return -EINVAL;
2348
2349 type -= XFRM_MSG_BASE;
2350 link = &xfrm_dispatch[type];
2351
2352 /* All operations require privileges, even GET */
2353 if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
2354 return -EPERM;
2355
2356 if ((type == (XFRM_MSG_GETSA - XFRM_MSG_BASE) ||
2357 type == (XFRM_MSG_GETPOLICY - XFRM_MSG_BASE)) &&
2358 (nlh->nlmsg_flags & NLM_F_DUMP)) {
2359 if (link->dump == NULL)
2360 return -EINVAL;
2361
2362 {
2363 struct netlink_dump_control c = {
2364 .dump = link->dump,
2365 .done = link->done,
2366 };
2367 return netlink_dump_start(net->xfrm.nlsk, skb, nlh, &c);
2368 }
2369 }
2370
2371 err = nlmsg_parse(nlh, xfrm_msg_min[type], attrs, XFRMA_MAX,
2372 xfrma_policy);
2373 if (err < 0)
2374 return err;
2375
2376 if (link->doit == NULL)
2377 return -EINVAL;
2378
2379 return link->doit(skb, nlh, attrs);
2380 }
2381
2382 static void xfrm_netlink_rcv(struct sk_buff *skb)
2383 {
2384 struct net *net = sock_net(skb->sk);
2385
2386 mutex_lock(&net->xfrm.xfrm_cfg_mutex);
2387 netlink_rcv_skb(skb, &xfrm_user_rcv_msg);
2388 mutex_unlock(&net->xfrm.xfrm_cfg_mutex);
2389 }
2390
2391 static inline size_t xfrm_expire_msgsize(void)
2392 {
2393 return NLMSG_ALIGN(sizeof(struct xfrm_user_expire))
2394 + nla_total_size(sizeof(struct xfrm_mark));
2395 }
2396
2397 static int build_expire(struct sk_buff *skb, struct xfrm_state *x, const struct km_event *c)
2398 {
2399 struct xfrm_user_expire *ue;
2400 struct nlmsghdr *nlh;
2401 int err;
2402
2403 nlh = nlmsg_put(skb, c->portid, 0, XFRM_MSG_EXPIRE, sizeof(*ue), 0);
2404 if (nlh == NULL)
2405 return -EMSGSIZE;
2406
2407 ue = nlmsg_data(nlh);
2408 copy_to_user_state(x, &ue->state);
2409 ue->hard = (c->data.hard != 0) ? 1 : 0;
2410
2411 err = xfrm_mark_put(skb, &x->mark);
2412 if (err)
2413 return err;
2414
2415 return nlmsg_end(skb, nlh);
2416 }
2417
2418 static int xfrm_exp_state_notify(struct xfrm_state *x, const struct km_event *c)
2419 {
2420 struct net *net = xs_net(x);
2421 struct sk_buff *skb;
2422
2423 skb = nlmsg_new(xfrm_expire_msgsize(), GFP_ATOMIC);
2424 if (skb == NULL)
2425 return -ENOMEM;
2426
2427 if (build_expire(skb, x, c) < 0) {
2428 kfree_skb(skb);
2429 return -EMSGSIZE;
2430 }
2431
2432 return nlmsg_multicast(net->xfrm.nlsk, skb, 0, XFRMNLGRP_EXPIRE, GFP_ATOMIC);
2433 }
2434
2435 static int xfrm_aevent_state_notify(struct xfrm_state *x, const struct km_event *c)
2436 {
2437 struct net *net = xs_net(x);
2438 struct sk_buff *skb;
2439
2440 skb = nlmsg_new(xfrm_aevent_msgsize(x), GFP_ATOMIC);
2441 if (skb == NULL)
2442 return -ENOMEM;
2443
2444 if (build_aevent(skb, x, c) < 0)
2445 BUG();
2446
2447 return nlmsg_multicast(net->xfrm.nlsk, skb, 0, XFRMNLGRP_AEVENTS, GFP_ATOMIC);
2448 }
2449
2450 static int xfrm_notify_sa_flush(const struct km_event *c)
2451 {
2452 struct net *net = c->net;
2453 struct xfrm_usersa_flush *p;
2454 struct nlmsghdr *nlh;
2455 struct sk_buff *skb;
2456 int len = NLMSG_ALIGN(sizeof(struct xfrm_usersa_flush));
2457
2458 skb = nlmsg_new(len, GFP_ATOMIC);
2459 if (skb == NULL)
2460 return -ENOMEM;
2461
2462 nlh = nlmsg_put(skb, c->portid, c->seq, XFRM_MSG_FLUSHSA, sizeof(*p), 0);
2463 if (nlh == NULL) {
2464 kfree_skb(skb);
2465 return -EMSGSIZE;
2466 }
2467
2468 p = nlmsg_data(nlh);
2469 p->proto = c->data.proto;
2470
2471 nlmsg_end(skb, nlh);
2472
2473 return nlmsg_multicast(net->xfrm.nlsk, skb, 0, XFRMNLGRP_SA, GFP_ATOMIC);
2474 }
2475
2476 static inline size_t xfrm_sa_len(struct xfrm_state *x)
2477 {
2478 size_t l = 0;
2479 if (x->aead)
2480 l += nla_total_size(aead_len(x->aead));
2481 if (x->aalg) {
2482 l += nla_total_size(sizeof(struct xfrm_algo) +
2483 (x->aalg->alg_key_len + 7) / 8);
2484 l += nla_total_size(xfrm_alg_auth_len(x->aalg));
2485 }
2486 if (x->ealg)
2487 l += nla_total_size(xfrm_alg_len(x->ealg));
2488 if (x->calg)
2489 l += nla_total_size(sizeof(*x->calg));
2490 if (x->encap)
2491 l += nla_total_size(sizeof(*x->encap));
2492 if (x->tfcpad)
2493 l += nla_total_size(sizeof(x->tfcpad));
2494 if (x->replay_esn)
2495 l += nla_total_size(xfrm_replay_state_esn_len(x->replay_esn));
2496 if (x->security)
2497 l += nla_total_size(sizeof(struct xfrm_user_sec_ctx) +
2498 x->security->ctx_len);
2499 if (x->coaddr)
2500 l += nla_total_size(sizeof(*x->coaddr));
2501 if (x->props.extra_flags)
2502 l += nla_total_size(sizeof(x->props.extra_flags));
2503
2504 /* Must count x->lastused as it may become non-zero behind our back. */
2505 l += nla_total_size(sizeof(u64));
2506
2507 return l;
2508 }
2509
2510 static int xfrm_notify_sa(struct xfrm_state *x, const struct km_event *c)
2511 {
2512 struct net *net = xs_net(x);
2513 struct xfrm_usersa_info *p;
2514 struct xfrm_usersa_id *id;
2515 struct nlmsghdr *nlh;
2516 struct sk_buff *skb;
2517 int len = xfrm_sa_len(x);
2518 int headlen, err;
2519
2520 headlen = sizeof(*p);
2521 if (c->event == XFRM_MSG_DELSA) {
2522 len += nla_total_size(headlen);
2523 headlen = sizeof(*id);
2524 len += nla_total_size(sizeof(struct xfrm_mark));
2525 }
2526 len += NLMSG_ALIGN(headlen);
2527
2528 skb = nlmsg_new(len, GFP_ATOMIC);
2529 if (skb == NULL)
2530 return -ENOMEM;
2531
2532 nlh = nlmsg_put(skb, c->portid, c->seq, c->event, headlen, 0);
2533 err = -EMSGSIZE;
2534 if (nlh == NULL)
2535 goto out_free_skb;
2536
2537 p = nlmsg_data(nlh);
2538 if (c->event == XFRM_MSG_DELSA) {
2539 struct nlattr *attr;
2540
2541 id = nlmsg_data(nlh);
2542 memcpy(&id->daddr, &x->id.daddr, sizeof(id->daddr));
2543 id->spi = x->id.spi;
2544 id->family = x->props.family;
2545 id->proto = x->id.proto;
2546
2547 attr = nla_reserve(skb, XFRMA_SA, sizeof(*p));
2548 err = -EMSGSIZE;
2549 if (attr == NULL)
2550 goto out_free_skb;
2551
2552 p = nla_data(attr);
2553 }
2554 err = copy_to_user_state_extra(x, p, skb);
2555 if (err)
2556 goto out_free_skb;
2557
2558 nlmsg_end(skb, nlh);
2559
2560 return nlmsg_multicast(net->xfrm.nlsk, skb, 0, XFRMNLGRP_SA, GFP_ATOMIC);
2561
2562 out_free_skb:
2563 kfree_skb(skb);
2564 return err;
2565 }
2566
2567 static int xfrm_send_state_notify(struct xfrm_state *x, const struct km_event *c)
2568 {
2569
2570 switch (c->event) {
2571 case XFRM_MSG_EXPIRE:
2572 return xfrm_exp_state_notify(x, c);
2573 case XFRM_MSG_NEWAE:
2574 return xfrm_aevent_state_notify(x, c);
2575 case XFRM_MSG_DELSA:
2576 case XFRM_MSG_UPDSA:
2577 case XFRM_MSG_NEWSA:
2578 return xfrm_notify_sa(x, c);
2579 case XFRM_MSG_FLUSHSA:
2580 return xfrm_notify_sa_flush(c);
2581 default:
2582 printk(KERN_NOTICE "xfrm_user: Unknown SA event %d\n",
2583 c->event);
2584 break;
2585 }
2586
2587 return 0;
2588
2589 }
2590
2591 static inline size_t xfrm_acquire_msgsize(struct xfrm_state *x,
2592 struct xfrm_policy *xp)
2593 {
2594 return NLMSG_ALIGN(sizeof(struct xfrm_user_acquire))
2595 + nla_total_size(sizeof(struct xfrm_user_tmpl) * xp->xfrm_nr)
2596 + nla_total_size(sizeof(struct xfrm_mark))
2597 + nla_total_size(xfrm_user_sec_ctx_size(x->security))
2598 + userpolicy_type_attrsize();
2599 }
2600
2601 static int build_acquire(struct sk_buff *skb, struct xfrm_state *x,
2602 struct xfrm_tmpl *xt, struct xfrm_policy *xp)
2603 {
2604 __u32 seq = xfrm_get_acqseq();
2605 struct xfrm_user_acquire *ua;
2606 struct nlmsghdr *nlh;
2607 int err;
2608
2609 nlh = nlmsg_put(skb, 0, 0, XFRM_MSG_ACQUIRE, sizeof(*ua), 0);
2610 if (nlh == NULL)
2611 return -EMSGSIZE;
2612
2613 ua = nlmsg_data(nlh);
2614 memcpy(&ua->id, &x->id, sizeof(ua->id));
2615 memcpy(&ua->saddr, &x->props.saddr, sizeof(ua->saddr));
2616 memcpy(&ua->sel, &x->sel, sizeof(ua->sel));
2617 copy_to_user_policy(xp, &ua->policy, XFRM_POLICY_OUT);
2618 ua->aalgos = xt->aalgos;
2619 ua->ealgos = xt->ealgos;
2620 ua->calgos = xt->calgos;
2621 ua->seq = x->km.seq = seq;
2622
2623 err = copy_to_user_tmpl(xp, skb);
2624 if (!err)
2625 err = copy_to_user_state_sec_ctx(x, skb);
2626 if (!err)
2627 err = copy_to_user_policy_type(xp->type, skb);
2628 if (!err)
2629 err = xfrm_mark_put(skb, &xp->mark);
2630 if (err) {
2631 nlmsg_cancel(skb, nlh);
2632 return err;
2633 }
2634
2635 return nlmsg_end(skb, nlh);
2636 }
2637
2638 static int xfrm_send_acquire(struct xfrm_state *x, struct xfrm_tmpl *xt,
2639 struct xfrm_policy *xp)
2640 {
2641 struct net *net = xs_net(x);
2642 struct sk_buff *skb;
2643
2644 skb = nlmsg_new(xfrm_acquire_msgsize(x, xp), GFP_ATOMIC);
2645 if (skb == NULL)
2646 return -ENOMEM;
2647
2648 if (build_acquire(skb, x, xt, xp) < 0)
2649 BUG();
2650
2651 return nlmsg_multicast(net->xfrm.nlsk, skb, 0, XFRMNLGRP_ACQUIRE, GFP_ATOMIC);
2652 }
2653
2654 /* User gives us xfrm_user_policy_info followed by an array of 0
2655 * or more templates.
2656 */
2657 static struct xfrm_policy *xfrm_compile_policy(struct sock *sk, int opt,
2658 u8 *data, int len, int *dir)
2659 {
2660 struct net *net = sock_net(sk);
2661 struct xfrm_userpolicy_info *p = (struct xfrm_userpolicy_info *)data;
2662 struct xfrm_user_tmpl *ut = (struct xfrm_user_tmpl *) (p + 1);
2663 struct xfrm_policy *xp;
2664 int nr;
2665
2666 switch (sk->sk_family) {
2667 case AF_INET:
2668 if (opt != IP_XFRM_POLICY) {
2669 *dir = -EOPNOTSUPP;
2670 return NULL;
2671 }
2672 break;
2673 #if IS_ENABLED(CONFIG_IPV6)
2674 case AF_INET6:
2675 if (opt != IPV6_XFRM_POLICY) {
2676 *dir = -EOPNOTSUPP;
2677 return NULL;
2678 }
2679 break;
2680 #endif
2681 default:
2682 *dir = -EINVAL;
2683 return NULL;
2684 }
2685
2686 *dir = -EINVAL;
2687
2688 if (len < sizeof(*p) ||
2689 verify_newpolicy_info(p))
2690 return NULL;
2691
2692 nr = ((len - sizeof(*p)) / sizeof(*ut));
2693 if (validate_tmpl(nr, ut, p->sel.family))
2694 return NULL;
2695
2696 if (p->dir > XFRM_POLICY_OUT)
2697 return NULL;
2698
2699 xp = xfrm_policy_alloc(net, GFP_ATOMIC);
2700 if (xp == NULL) {
2701 *dir = -ENOBUFS;
2702 return NULL;
2703 }
2704
2705 copy_from_user_policy(xp, p);
2706 xp->type = XFRM_POLICY_TYPE_MAIN;
2707 copy_templates(xp, ut, nr);
2708
2709 *dir = p->dir;
2710
2711 return xp;
2712 }
2713
2714 static inline size_t xfrm_polexpire_msgsize(struct xfrm_policy *xp)
2715 {
2716 return NLMSG_ALIGN(sizeof(struct xfrm_user_polexpire))
2717 + nla_total_size(sizeof(struct xfrm_user_tmpl) * xp->xfrm_nr)
2718 + nla_total_size(xfrm_user_sec_ctx_size(xp->security))
2719 + nla_total_size(sizeof(struct xfrm_mark))
2720 + userpolicy_type_attrsize();
2721 }
2722
2723 static int build_polexpire(struct sk_buff *skb, struct xfrm_policy *xp,
2724 int dir, const struct km_event *c)
2725 {
2726 struct xfrm_user_polexpire *upe;
2727 int hard = c->data.hard;
2728 struct nlmsghdr *nlh;
2729 int err;
2730
2731 nlh = nlmsg_put(skb, c->portid, 0, XFRM_MSG_POLEXPIRE, sizeof(*upe), 0);
2732 if (nlh == NULL)
2733 return -EMSGSIZE;
2734
2735 upe = nlmsg_data(nlh);
2736 copy_to_user_policy(xp, &upe->pol, dir);
2737 err = copy_to_user_tmpl(xp, skb);
2738 if (!err)
2739 err = copy_to_user_sec_ctx(xp, skb);
2740 if (!err)
2741 err = copy_to_user_policy_type(xp->type, skb);
2742 if (!err)
2743 err = xfrm_mark_put(skb, &xp->mark);
2744 if (err) {
2745 nlmsg_cancel(skb, nlh);
2746 return err;
2747 }
2748 upe->hard = !!hard;
2749
2750 return nlmsg_end(skb, nlh);
2751 }
2752
2753 static int xfrm_exp_policy_notify(struct xfrm_policy *xp, int dir, const struct km_event *c)
2754 {
2755 struct net *net = xp_net(xp);
2756 struct sk_buff *skb;
2757
2758 skb = nlmsg_new(xfrm_polexpire_msgsize(xp), GFP_ATOMIC);
2759 if (skb == NULL)
2760 return -ENOMEM;
2761
2762 if (build_polexpire(skb, xp, dir, c) < 0)
2763 BUG();
2764
2765 return nlmsg_multicast(net->xfrm.nlsk, skb, 0, XFRMNLGRP_EXPIRE, GFP_ATOMIC);
2766 }
2767
2768 static int xfrm_notify_policy(struct xfrm_policy *xp, int dir, const struct km_event *c)
2769 {
2770 int len = nla_total_size(sizeof(struct xfrm_user_tmpl) * xp->xfrm_nr);
2771 struct net *net = xp_net(xp);
2772 struct xfrm_userpolicy_info *p;
2773 struct xfrm_userpolicy_id *id;
2774 struct nlmsghdr *nlh;
2775 struct sk_buff *skb;
2776 int headlen, err;
2777
2778 headlen = sizeof(*p);
2779 if (c->event == XFRM_MSG_DELPOLICY) {
2780 len += nla_total_size(headlen);
2781 headlen = sizeof(*id);
2782 }
2783 len += userpolicy_type_attrsize();
2784 len += nla_total_size(sizeof(struct xfrm_mark));
2785 len += NLMSG_ALIGN(headlen);
2786
2787 skb = nlmsg_new(len, GFP_ATOMIC);
2788 if (skb == NULL)
2789 return -ENOMEM;
2790
2791 nlh = nlmsg_put(skb, c->portid, c->seq, c->event, headlen, 0);
2792 err = -EMSGSIZE;
2793 if (nlh == NULL)
2794 goto out_free_skb;
2795
2796 p = nlmsg_data(nlh);
2797 if (c->event == XFRM_MSG_DELPOLICY) {
2798 struct nlattr *attr;
2799
2800 id = nlmsg_data(nlh);
2801 memset(id, 0, sizeof(*id));
2802 id->dir = dir;
2803 if (c->data.byid)
2804 id->index = xp->index;
2805 else
2806 memcpy(&id->sel, &xp->selector, sizeof(id->sel));
2807
2808 attr = nla_reserve(skb, XFRMA_POLICY, sizeof(*p));
2809 err = -EMSGSIZE;
2810 if (attr == NULL)
2811 goto out_free_skb;
2812
2813 p = nla_data(attr);
2814 }
2815
2816 copy_to_user_policy(xp, p, dir);
2817 err = copy_to_user_tmpl(xp, skb);
2818 if (!err)
2819 err = copy_to_user_policy_type(xp->type, skb);
2820 if (!err)
2821 err = xfrm_mark_put(skb, &xp->mark);
2822 if (err)
2823 goto out_free_skb;
2824
2825 nlmsg_end(skb, nlh);
2826
2827 return nlmsg_multicast(net->xfrm.nlsk, skb, 0, XFRMNLGRP_POLICY, GFP_ATOMIC);
2828
2829 out_free_skb:
2830 kfree_skb(skb);
2831 return err;
2832 }
2833
2834 static int xfrm_notify_policy_flush(const struct km_event *c)
2835 {
2836 struct net *net = c->net;
2837 struct nlmsghdr *nlh;
2838 struct sk_buff *skb;
2839 int err;
2840
2841 skb = nlmsg_new(userpolicy_type_attrsize(), GFP_ATOMIC);
2842 if (skb == NULL)
2843 return -ENOMEM;
2844
2845 nlh = nlmsg_put(skb, c->portid, c->seq, XFRM_MSG_FLUSHPOLICY, 0, 0);
2846 err = -EMSGSIZE;
2847 if (nlh == NULL)
2848 goto out_free_skb;
2849 err = copy_to_user_policy_type(c->data.type, skb);
2850 if (err)
2851 goto out_free_skb;
2852
2853 nlmsg_end(skb, nlh);
2854
2855 return nlmsg_multicast(net->xfrm.nlsk, skb, 0, XFRMNLGRP_POLICY, GFP_ATOMIC);
2856
2857 out_free_skb:
2858 kfree_skb(skb);
2859 return err;
2860 }
2861
2862 static int xfrm_send_policy_notify(struct xfrm_policy *xp, int dir, const struct km_event *c)
2863 {
2864
2865 switch (c->event) {
2866 case XFRM_MSG_NEWPOLICY:
2867 case XFRM_MSG_UPDPOLICY:
2868 case XFRM_MSG_DELPOLICY:
2869 return xfrm_notify_policy(xp, dir, c);
2870 case XFRM_MSG_FLUSHPOLICY:
2871 return xfrm_notify_policy_flush(c);
2872 case XFRM_MSG_POLEXPIRE:
2873 return xfrm_exp_policy_notify(xp, dir, c);
2874 default:
2875 printk(KERN_NOTICE "xfrm_user: Unknown Policy event %d\n",
2876 c->event);
2877 }
2878
2879 return 0;
2880
2881 }
2882
2883 static inline size_t xfrm_report_msgsize(void)
2884 {
2885 return NLMSG_ALIGN(sizeof(struct xfrm_user_report));
2886 }
2887
2888 static int build_report(struct sk_buff *skb, u8 proto,
2889 struct xfrm_selector *sel, xfrm_address_t *addr)
2890 {
2891 struct xfrm_user_report *ur;
2892 struct nlmsghdr *nlh;
2893
2894 nlh = nlmsg_put(skb, 0, 0, XFRM_MSG_REPORT, sizeof(*ur), 0);
2895 if (nlh == NULL)
2896 return -EMSGSIZE;
2897
2898 ur = nlmsg_data(nlh);
2899 ur->proto = proto;
2900 memcpy(&ur->sel, sel, sizeof(ur->sel));
2901
2902 if (addr) {
2903 int err = nla_put(skb, XFRMA_COADDR, sizeof(*addr), addr);
2904 if (err) {
2905 nlmsg_cancel(skb, nlh);
2906 return err;
2907 }
2908 }
2909 return nlmsg_end(skb, nlh);
2910 }
2911
2912 static int xfrm_send_report(struct net *net, u8 proto,
2913 struct xfrm_selector *sel, xfrm_address_t *addr)
2914 {
2915 struct sk_buff *skb;
2916
2917 skb = nlmsg_new(xfrm_report_msgsize(), GFP_ATOMIC);
2918 if (skb == NULL)
2919 return -ENOMEM;
2920
2921 if (build_report(skb, proto, sel, addr) < 0)
2922 BUG();
2923
2924 return nlmsg_multicast(net->xfrm.nlsk, skb, 0, XFRMNLGRP_REPORT, GFP_ATOMIC);
2925 }
2926
2927 static inline size_t xfrm_mapping_msgsize(void)
2928 {
2929 return NLMSG_ALIGN(sizeof(struct xfrm_user_mapping));
2930 }
2931
2932 static int build_mapping(struct sk_buff *skb, struct xfrm_state *x,
2933 xfrm_address_t *new_saddr, __be16 new_sport)
2934 {
2935 struct xfrm_user_mapping *um;
2936 struct nlmsghdr *nlh;
2937
2938 nlh = nlmsg_put(skb, 0, 0, XFRM_MSG_MAPPING, sizeof(*um), 0);
2939 if (nlh == NULL)
2940 return -EMSGSIZE;
2941
2942 um = nlmsg_data(nlh);
2943
2944 memcpy(&um->id.daddr, &x->id.daddr, sizeof(um->id.daddr));
2945 um->id.spi = x->id.spi;
2946 um->id.family = x->props.family;
2947 um->id.proto = x->id.proto;
2948 memcpy(&um->new_saddr, new_saddr, sizeof(um->new_saddr));
2949 memcpy(&um->old_saddr, &x->props.saddr, sizeof(um->old_saddr));
2950 um->new_sport = new_sport;
2951 um->old_sport = x->encap->encap_sport;
2952 um->reqid = x->props.reqid;
2953
2954 return nlmsg_end(skb, nlh);
2955 }
2956
2957 static int xfrm_send_mapping(struct xfrm_state *x, xfrm_address_t *ipaddr,
2958 __be16 sport)
2959 {
2960 struct net *net = xs_net(x);
2961 struct sk_buff *skb;
2962
2963 if (x->id.proto != IPPROTO_ESP)
2964 return -EINVAL;
2965
2966 if (!x->encap)
2967 return -EINVAL;
2968
2969 skb = nlmsg_new(xfrm_mapping_msgsize(), GFP_ATOMIC);
2970 if (skb == NULL)
2971 return -ENOMEM;
2972
2973 if (build_mapping(skb, x, ipaddr, sport) < 0)
2974 BUG();
2975
2976 return nlmsg_multicast(net->xfrm.nlsk, skb, 0, XFRMNLGRP_MAPPING, GFP_ATOMIC);
2977 }
2978
2979 static struct xfrm_mgr netlink_mgr = {
2980 .id = "netlink",
2981 .notify = xfrm_send_state_notify,
2982 .acquire = xfrm_send_acquire,
2983 .compile_policy = xfrm_compile_policy,
2984 .notify_policy = xfrm_send_policy_notify,
2985 .report = xfrm_send_report,
2986 .migrate = xfrm_send_migrate,
2987 .new_mapping = xfrm_send_mapping,
2988 };
2989
2990 static int __net_init xfrm_user_net_init(struct net *net)
2991 {
2992 struct sock *nlsk;
2993 struct netlink_kernel_cfg cfg = {
2994 .groups = XFRMNLGRP_MAX,
2995 .input = xfrm_netlink_rcv,
2996 };
2997
2998 nlsk = netlink_kernel_create(net, NETLINK_XFRM, &cfg);
2999 if (nlsk == NULL)
3000 return -ENOMEM;
3001 net->xfrm.nlsk_stash = nlsk; /* Don't set to NULL */
3002 rcu_assign_pointer(net->xfrm.nlsk, nlsk);
3003 return 0;
3004 }
3005
3006 static void __net_exit xfrm_user_net_exit(struct list_head *net_exit_list)
3007 {
3008 struct net *net;
3009 list_for_each_entry(net, net_exit_list, exit_list)
3010 RCU_INIT_POINTER(net->xfrm.nlsk, NULL);
3011 synchronize_net();
3012 list_for_each_entry(net, net_exit_list, exit_list)
3013 netlink_kernel_release(net->xfrm.nlsk_stash);
3014 }
3015
3016 static struct pernet_operations xfrm_user_net_ops = {
3017 .init = xfrm_user_net_init,
3018 .exit_batch = xfrm_user_net_exit,
3019 };
3020
3021 static int __init xfrm_user_init(void)
3022 {
3023 int rv;
3024
3025 printk(KERN_INFO "Initializing XFRM netlink socket\n");
3026
3027 rv = register_pernet_subsys(&xfrm_user_net_ops);
3028 if (rv < 0)
3029 return rv;
3030 rv = xfrm_register_km(&netlink_mgr);
3031 if (rv < 0)
3032 unregister_pernet_subsys(&xfrm_user_net_ops);
3033 return rv;
3034 }
3035
3036 static void __exit xfrm_user_exit(void)
3037 {
3038 xfrm_unregister_km(&netlink_mgr);
3039 unregister_pernet_subsys(&xfrm_user_net_ops);
3040 }
3041
3042 module_init(xfrm_user_init);
3043 module_exit(xfrm_user_exit);
3044 MODULE_LICENSE("GPL");
3045 MODULE_ALIAS_NET_PF_PROTO(PF_NETLINK, NETLINK_XFRM);
3046
This page took 0.092802 seconds and 5 git commands to generate.