Merge branch 'for-linus' into for-next
[deliverable/linux.git] / net / netfilter / nf_conntrack_netlink.c
1 /* Connection tracking via netlink socket. Allows for user space
2 * protocol helpers and general trouble making from userspace.
3 *
4 * (C) 2001 by Jay Schulist <jschlst@samba.org>
5 * (C) 2002-2006 by Harald Welte <laforge@gnumonks.org>
6 * (C) 2003 by Patrick Mchardy <kaber@trash.net>
7 * (C) 2005-2012 by Pablo Neira Ayuso <pablo@netfilter.org>
8 *
9 * Initial connection tracking via netlink development funded and
10 * generally made possible by Network Robots, Inc. (www.networkrobots.com)
11 *
12 * Further development of this code funded by Astaro AG (http://www.astaro.com)
13 *
14 * This software may be used and distributed according to the terms
15 * of the GNU General Public License, incorporated herein by reference.
16 */
17
18 #include <linux/init.h>
19 #include <linux/module.h>
20 #include <linux/kernel.h>
21 #include <linux/rculist.h>
22 #include <linux/rculist_nulls.h>
23 #include <linux/types.h>
24 #include <linux/timer.h>
25 #include <linux/security.h>
26 #include <linux/skbuff.h>
27 #include <linux/errno.h>
28 #include <linux/netlink.h>
29 #include <linux/spinlock.h>
30 #include <linux/interrupt.h>
31 #include <linux/slab.h>
32
33 #include <linux/netfilter.h>
34 #include <net/netlink.h>
35 #include <net/sock.h>
36 #include <net/netfilter/nf_conntrack.h>
37 #include <net/netfilter/nf_conntrack_core.h>
38 #include <net/netfilter/nf_conntrack_expect.h>
39 #include <net/netfilter/nf_conntrack_helper.h>
40 #include <net/netfilter/nf_conntrack_l3proto.h>
41 #include <net/netfilter/nf_conntrack_l4proto.h>
42 #include <net/netfilter/nf_conntrack_tuple.h>
43 #include <net/netfilter/nf_conntrack_acct.h>
44 #include <net/netfilter/nf_conntrack_zones.h>
45 #include <net/netfilter/nf_conntrack_timestamp.h>
46 #ifdef CONFIG_NF_NAT_NEEDED
47 #include <net/netfilter/nf_nat_core.h>
48 #include <net/netfilter/nf_nat_protocol.h>
49 #include <net/netfilter/nf_nat_helper.h>
50 #endif
51
52 #include <linux/netfilter/nfnetlink.h>
53 #include <linux/netfilter/nfnetlink_conntrack.h>
54
55 MODULE_LICENSE("GPL");
56
57 static char __initdata version[] = "0.93";
58
59 static inline int
60 ctnetlink_dump_tuples_proto(struct sk_buff *skb,
61 const struct nf_conntrack_tuple *tuple,
62 struct nf_conntrack_l4proto *l4proto)
63 {
64 int ret = 0;
65 struct nlattr *nest_parms;
66
67 nest_parms = nla_nest_start(skb, CTA_TUPLE_PROTO | NLA_F_NESTED);
68 if (!nest_parms)
69 goto nla_put_failure;
70 if (nla_put_u8(skb, CTA_PROTO_NUM, tuple->dst.protonum))
71 goto nla_put_failure;
72
73 if (likely(l4proto->tuple_to_nlattr))
74 ret = l4proto->tuple_to_nlattr(skb, tuple);
75
76 nla_nest_end(skb, nest_parms);
77
78 return ret;
79
80 nla_put_failure:
81 return -1;
82 }
83
84 static inline int
85 ctnetlink_dump_tuples_ip(struct sk_buff *skb,
86 const struct nf_conntrack_tuple *tuple,
87 struct nf_conntrack_l3proto *l3proto)
88 {
89 int ret = 0;
90 struct nlattr *nest_parms;
91
92 nest_parms = nla_nest_start(skb, CTA_TUPLE_IP | NLA_F_NESTED);
93 if (!nest_parms)
94 goto nla_put_failure;
95
96 if (likely(l3proto->tuple_to_nlattr))
97 ret = l3proto->tuple_to_nlattr(skb, tuple);
98
99 nla_nest_end(skb, nest_parms);
100
101 return ret;
102
103 nla_put_failure:
104 return -1;
105 }
106
107 static int
108 ctnetlink_dump_tuples(struct sk_buff *skb,
109 const struct nf_conntrack_tuple *tuple)
110 {
111 int ret;
112 struct nf_conntrack_l3proto *l3proto;
113 struct nf_conntrack_l4proto *l4proto;
114
115 rcu_read_lock();
116 l3proto = __nf_ct_l3proto_find(tuple->src.l3num);
117 ret = ctnetlink_dump_tuples_ip(skb, tuple, l3proto);
118
119 if (ret >= 0) {
120 l4proto = __nf_ct_l4proto_find(tuple->src.l3num,
121 tuple->dst.protonum);
122 ret = ctnetlink_dump_tuples_proto(skb, tuple, l4proto);
123 }
124 rcu_read_unlock();
125 return ret;
126 }
127
128 static inline int
129 ctnetlink_dump_status(struct sk_buff *skb, const struct nf_conn *ct)
130 {
131 if (nla_put_be32(skb, CTA_STATUS, htonl(ct->status)))
132 goto nla_put_failure;
133 return 0;
134
135 nla_put_failure:
136 return -1;
137 }
138
139 static inline int
140 ctnetlink_dump_timeout(struct sk_buff *skb, const struct nf_conn *ct)
141 {
142 long timeout = ((long)ct->timeout.expires - (long)jiffies) / HZ;
143
144 if (timeout < 0)
145 timeout = 0;
146
147 if (nla_put_be32(skb, CTA_TIMEOUT, htonl(timeout)))
148 goto nla_put_failure;
149 return 0;
150
151 nla_put_failure:
152 return -1;
153 }
154
155 static inline int
156 ctnetlink_dump_protoinfo(struct sk_buff *skb, struct nf_conn *ct)
157 {
158 struct nf_conntrack_l4proto *l4proto;
159 struct nlattr *nest_proto;
160 int ret;
161
162 l4proto = __nf_ct_l4proto_find(nf_ct_l3num(ct), nf_ct_protonum(ct));
163 if (!l4proto->to_nlattr)
164 return 0;
165
166 nest_proto = nla_nest_start(skb, CTA_PROTOINFO | NLA_F_NESTED);
167 if (!nest_proto)
168 goto nla_put_failure;
169
170 ret = l4proto->to_nlattr(skb, nest_proto, ct);
171
172 nla_nest_end(skb, nest_proto);
173
174 return ret;
175
176 nla_put_failure:
177 return -1;
178 }
179
180 static inline int
181 ctnetlink_dump_helpinfo(struct sk_buff *skb, const struct nf_conn *ct)
182 {
183 struct nlattr *nest_helper;
184 const struct nf_conn_help *help = nfct_help(ct);
185 struct nf_conntrack_helper *helper;
186
187 if (!help)
188 return 0;
189
190 helper = rcu_dereference(help->helper);
191 if (!helper)
192 goto out;
193
194 nest_helper = nla_nest_start(skb, CTA_HELP | NLA_F_NESTED);
195 if (!nest_helper)
196 goto nla_put_failure;
197 if (nla_put_string(skb, CTA_HELP_NAME, helper->name))
198 goto nla_put_failure;
199
200 if (helper->to_nlattr)
201 helper->to_nlattr(skb, ct);
202
203 nla_nest_end(skb, nest_helper);
204 out:
205 return 0;
206
207 nla_put_failure:
208 return -1;
209 }
210
211 static int
212 dump_counters(struct sk_buff *skb, u64 pkts, u64 bytes,
213 enum ip_conntrack_dir dir)
214 {
215 enum ctattr_type type = dir ? CTA_COUNTERS_REPLY: CTA_COUNTERS_ORIG;
216 struct nlattr *nest_count;
217
218 nest_count = nla_nest_start(skb, type | NLA_F_NESTED);
219 if (!nest_count)
220 goto nla_put_failure;
221
222 if (nla_put_be64(skb, CTA_COUNTERS_PACKETS, cpu_to_be64(pkts)) ||
223 nla_put_be64(skb, CTA_COUNTERS_BYTES, cpu_to_be64(bytes)))
224 goto nla_put_failure;
225
226 nla_nest_end(skb, nest_count);
227
228 return 0;
229
230 nla_put_failure:
231 return -1;
232 }
233
234 static int
235 ctnetlink_dump_counters(struct sk_buff *skb, const struct nf_conn *ct,
236 enum ip_conntrack_dir dir, int type)
237 {
238 struct nf_conn_counter *acct;
239 u64 pkts, bytes;
240
241 acct = nf_conn_acct_find(ct);
242 if (!acct)
243 return 0;
244
245 if (type == IPCTNL_MSG_CT_GET_CTRZERO) {
246 pkts = atomic64_xchg(&acct[dir].packets, 0);
247 bytes = atomic64_xchg(&acct[dir].bytes, 0);
248 } else {
249 pkts = atomic64_read(&acct[dir].packets);
250 bytes = atomic64_read(&acct[dir].bytes);
251 }
252 return dump_counters(skb, pkts, bytes, dir);
253 }
254
255 static int
256 ctnetlink_dump_timestamp(struct sk_buff *skb, const struct nf_conn *ct)
257 {
258 struct nlattr *nest_count;
259 const struct nf_conn_tstamp *tstamp;
260
261 tstamp = nf_conn_tstamp_find(ct);
262 if (!tstamp)
263 return 0;
264
265 nest_count = nla_nest_start(skb, CTA_TIMESTAMP | NLA_F_NESTED);
266 if (!nest_count)
267 goto nla_put_failure;
268
269 if (nla_put_be64(skb, CTA_TIMESTAMP_START, cpu_to_be64(tstamp->start)) ||
270 (tstamp->stop != 0 && nla_put_be64(skb, CTA_TIMESTAMP_STOP,
271 cpu_to_be64(tstamp->stop))))
272 goto nla_put_failure;
273 nla_nest_end(skb, nest_count);
274
275 return 0;
276
277 nla_put_failure:
278 return -1;
279 }
280
281 #ifdef CONFIG_NF_CONNTRACK_MARK
282 static inline int
283 ctnetlink_dump_mark(struct sk_buff *skb, const struct nf_conn *ct)
284 {
285 if (nla_put_be32(skb, CTA_MARK, htonl(ct->mark)))
286 goto nla_put_failure;
287 return 0;
288
289 nla_put_failure:
290 return -1;
291 }
292 #else
293 #define ctnetlink_dump_mark(a, b) (0)
294 #endif
295
296 #ifdef CONFIG_NF_CONNTRACK_SECMARK
297 static inline int
298 ctnetlink_dump_secctx(struct sk_buff *skb, const struct nf_conn *ct)
299 {
300 struct nlattr *nest_secctx;
301 int len, ret;
302 char *secctx;
303
304 ret = security_secid_to_secctx(ct->secmark, &secctx, &len);
305 if (ret)
306 return 0;
307
308 ret = -1;
309 nest_secctx = nla_nest_start(skb, CTA_SECCTX | NLA_F_NESTED);
310 if (!nest_secctx)
311 goto nla_put_failure;
312
313 if (nla_put_string(skb, CTA_SECCTX_NAME, secctx))
314 goto nla_put_failure;
315 nla_nest_end(skb, nest_secctx);
316
317 ret = 0;
318 nla_put_failure:
319 security_release_secctx(secctx, len);
320 return ret;
321 }
322 #else
323 #define ctnetlink_dump_secctx(a, b) (0)
324 #endif
325
326 #define master_tuple(ct) &(ct->master->tuplehash[IP_CT_DIR_ORIGINAL].tuple)
327
328 static inline int
329 ctnetlink_dump_master(struct sk_buff *skb, const struct nf_conn *ct)
330 {
331 struct nlattr *nest_parms;
332
333 if (!(ct->status & IPS_EXPECTED))
334 return 0;
335
336 nest_parms = nla_nest_start(skb, CTA_TUPLE_MASTER | NLA_F_NESTED);
337 if (!nest_parms)
338 goto nla_put_failure;
339 if (ctnetlink_dump_tuples(skb, master_tuple(ct)) < 0)
340 goto nla_put_failure;
341 nla_nest_end(skb, nest_parms);
342
343 return 0;
344
345 nla_put_failure:
346 return -1;
347 }
348
349 #ifdef CONFIG_NF_NAT_NEEDED
350 static int
351 dump_nat_seq_adj(struct sk_buff *skb, const struct nf_nat_seq *natseq, int type)
352 {
353 struct nlattr *nest_parms;
354
355 nest_parms = nla_nest_start(skb, type | NLA_F_NESTED);
356 if (!nest_parms)
357 goto nla_put_failure;
358
359 if (nla_put_be32(skb, CTA_NAT_SEQ_CORRECTION_POS,
360 htonl(natseq->correction_pos)) ||
361 nla_put_be32(skb, CTA_NAT_SEQ_OFFSET_BEFORE,
362 htonl(natseq->offset_before)) ||
363 nla_put_be32(skb, CTA_NAT_SEQ_OFFSET_AFTER,
364 htonl(natseq->offset_after)))
365 goto nla_put_failure;
366
367 nla_nest_end(skb, nest_parms);
368
369 return 0;
370
371 nla_put_failure:
372 return -1;
373 }
374
375 static inline int
376 ctnetlink_dump_nat_seq_adj(struct sk_buff *skb, const struct nf_conn *ct)
377 {
378 struct nf_nat_seq *natseq;
379 struct nf_conn_nat *nat = nfct_nat(ct);
380
381 if (!(ct->status & IPS_SEQ_ADJUST) || !nat)
382 return 0;
383
384 natseq = &nat->seq[IP_CT_DIR_ORIGINAL];
385 if (dump_nat_seq_adj(skb, natseq, CTA_NAT_SEQ_ADJ_ORIG) == -1)
386 return -1;
387
388 natseq = &nat->seq[IP_CT_DIR_REPLY];
389 if (dump_nat_seq_adj(skb, natseq, CTA_NAT_SEQ_ADJ_REPLY) == -1)
390 return -1;
391
392 return 0;
393 }
394 #else
395 #define ctnetlink_dump_nat_seq_adj(a, b) (0)
396 #endif
397
398 static inline int
399 ctnetlink_dump_id(struct sk_buff *skb, const struct nf_conn *ct)
400 {
401 if (nla_put_be32(skb, CTA_ID, htonl((unsigned long)ct)))
402 goto nla_put_failure;
403 return 0;
404
405 nla_put_failure:
406 return -1;
407 }
408
409 static inline int
410 ctnetlink_dump_use(struct sk_buff *skb, const struct nf_conn *ct)
411 {
412 if (nla_put_be32(skb, CTA_USE, htonl(atomic_read(&ct->ct_general.use))))
413 goto nla_put_failure;
414 return 0;
415
416 nla_put_failure:
417 return -1;
418 }
419
420 static int
421 ctnetlink_fill_info(struct sk_buff *skb, u32 pid, u32 seq, u32 type,
422 struct nf_conn *ct)
423 {
424 struct nlmsghdr *nlh;
425 struct nfgenmsg *nfmsg;
426 struct nlattr *nest_parms;
427 unsigned int flags = pid ? NLM_F_MULTI : 0, event;
428
429 event = (NFNL_SUBSYS_CTNETLINK << 8 | IPCTNL_MSG_CT_NEW);
430 nlh = nlmsg_put(skb, pid, seq, event, sizeof(*nfmsg), flags);
431 if (nlh == NULL)
432 goto nlmsg_failure;
433
434 nfmsg = nlmsg_data(nlh);
435 nfmsg->nfgen_family = nf_ct_l3num(ct);
436 nfmsg->version = NFNETLINK_V0;
437 nfmsg->res_id = 0;
438
439 nest_parms = nla_nest_start(skb, CTA_TUPLE_ORIG | NLA_F_NESTED);
440 if (!nest_parms)
441 goto nla_put_failure;
442 if (ctnetlink_dump_tuples(skb, nf_ct_tuple(ct, IP_CT_DIR_ORIGINAL)) < 0)
443 goto nla_put_failure;
444 nla_nest_end(skb, nest_parms);
445
446 nest_parms = nla_nest_start(skb, CTA_TUPLE_REPLY | NLA_F_NESTED);
447 if (!nest_parms)
448 goto nla_put_failure;
449 if (ctnetlink_dump_tuples(skb, nf_ct_tuple(ct, IP_CT_DIR_REPLY)) < 0)
450 goto nla_put_failure;
451 nla_nest_end(skb, nest_parms);
452
453 if (nf_ct_zone(ct) &&
454 nla_put_be16(skb, CTA_ZONE, htons(nf_ct_zone(ct))))
455 goto nla_put_failure;
456
457 if (ctnetlink_dump_status(skb, ct) < 0 ||
458 ctnetlink_dump_timeout(skb, ct) < 0 ||
459 ctnetlink_dump_counters(skb, ct, IP_CT_DIR_ORIGINAL, type) < 0 ||
460 ctnetlink_dump_counters(skb, ct, IP_CT_DIR_REPLY, type) < 0 ||
461 ctnetlink_dump_timestamp(skb, ct) < 0 ||
462 ctnetlink_dump_protoinfo(skb, ct) < 0 ||
463 ctnetlink_dump_helpinfo(skb, ct) < 0 ||
464 ctnetlink_dump_mark(skb, ct) < 0 ||
465 ctnetlink_dump_secctx(skb, ct) < 0 ||
466 ctnetlink_dump_id(skb, ct) < 0 ||
467 ctnetlink_dump_use(skb, ct) < 0 ||
468 ctnetlink_dump_master(skb, ct) < 0 ||
469 ctnetlink_dump_nat_seq_adj(skb, ct) < 0)
470 goto nla_put_failure;
471
472 nlmsg_end(skb, nlh);
473 return skb->len;
474
475 nlmsg_failure:
476 nla_put_failure:
477 nlmsg_cancel(skb, nlh);
478 return -1;
479 }
480
481 static inline size_t
482 ctnetlink_proto_size(const struct nf_conn *ct)
483 {
484 struct nf_conntrack_l3proto *l3proto;
485 struct nf_conntrack_l4proto *l4proto;
486 size_t len = 0;
487
488 rcu_read_lock();
489 l3proto = __nf_ct_l3proto_find(nf_ct_l3num(ct));
490 len += l3proto->nla_size;
491
492 l4proto = __nf_ct_l4proto_find(nf_ct_l3num(ct), nf_ct_protonum(ct));
493 len += l4proto->nla_size;
494 rcu_read_unlock();
495
496 return len;
497 }
498
499 static inline size_t
500 ctnetlink_counters_size(const struct nf_conn *ct)
501 {
502 if (!nf_ct_ext_exist(ct, NF_CT_EXT_ACCT))
503 return 0;
504 return 2 * nla_total_size(0) /* CTA_COUNTERS_ORIG|REPL */
505 + 2 * nla_total_size(sizeof(uint64_t)) /* CTA_COUNTERS_PACKETS */
506 + 2 * nla_total_size(sizeof(uint64_t)) /* CTA_COUNTERS_BYTES */
507 ;
508 }
509
510 static inline int
511 ctnetlink_secctx_size(const struct nf_conn *ct)
512 {
513 #ifdef CONFIG_NF_CONNTRACK_SECMARK
514 int len, ret;
515
516 ret = security_secid_to_secctx(ct->secmark, NULL, &len);
517 if (ret)
518 return 0;
519
520 return nla_total_size(0) /* CTA_SECCTX */
521 + nla_total_size(sizeof(char) * len); /* CTA_SECCTX_NAME */
522 #else
523 return 0;
524 #endif
525 }
526
527 static inline size_t
528 ctnetlink_timestamp_size(const struct nf_conn *ct)
529 {
530 #ifdef CONFIG_NF_CONNTRACK_TIMESTAMP
531 if (!nf_ct_ext_exist(ct, NF_CT_EXT_TSTAMP))
532 return 0;
533 return nla_total_size(0) + 2 * nla_total_size(sizeof(uint64_t));
534 #else
535 return 0;
536 #endif
537 }
538
539 static inline size_t
540 ctnetlink_nlmsg_size(const struct nf_conn *ct)
541 {
542 return NLMSG_ALIGN(sizeof(struct nfgenmsg))
543 + 3 * nla_total_size(0) /* CTA_TUPLE_ORIG|REPL|MASTER */
544 + 3 * nla_total_size(0) /* CTA_TUPLE_IP */
545 + 3 * nla_total_size(0) /* CTA_TUPLE_PROTO */
546 + 3 * nla_total_size(sizeof(u_int8_t)) /* CTA_PROTO_NUM */
547 + nla_total_size(sizeof(u_int32_t)) /* CTA_ID */
548 + nla_total_size(sizeof(u_int32_t)) /* CTA_STATUS */
549 + ctnetlink_counters_size(ct)
550 + ctnetlink_timestamp_size(ct)
551 + nla_total_size(sizeof(u_int32_t)) /* CTA_TIMEOUT */
552 + nla_total_size(0) /* CTA_PROTOINFO */
553 + nla_total_size(0) /* CTA_HELP */
554 + nla_total_size(NF_CT_HELPER_NAME_LEN) /* CTA_HELP_NAME */
555 + ctnetlink_secctx_size(ct)
556 #ifdef CONFIG_NF_NAT_NEEDED
557 + 2 * nla_total_size(0) /* CTA_NAT_SEQ_ADJ_ORIG|REPL */
558 + 6 * nla_total_size(sizeof(u_int32_t)) /* CTA_NAT_SEQ_OFFSET */
559 #endif
560 #ifdef CONFIG_NF_CONNTRACK_MARK
561 + nla_total_size(sizeof(u_int32_t)) /* CTA_MARK */
562 #endif
563 + ctnetlink_proto_size(ct)
564 ;
565 }
566
567 #ifdef CONFIG_NF_CONNTRACK_EVENTS
568 static int
569 ctnetlink_conntrack_event(unsigned int events, struct nf_ct_event *item)
570 {
571 struct net *net;
572 struct nlmsghdr *nlh;
573 struct nfgenmsg *nfmsg;
574 struct nlattr *nest_parms;
575 struct nf_conn *ct = item->ct;
576 struct sk_buff *skb;
577 unsigned int type;
578 unsigned int flags = 0, group;
579 int err;
580
581 /* ignore our fake conntrack entry */
582 if (nf_ct_is_untracked(ct))
583 return 0;
584
585 if (events & (1 << IPCT_DESTROY)) {
586 type = IPCTNL_MSG_CT_DELETE;
587 group = NFNLGRP_CONNTRACK_DESTROY;
588 } else if (events & ((1 << IPCT_NEW) | (1 << IPCT_RELATED))) {
589 type = IPCTNL_MSG_CT_NEW;
590 flags = NLM_F_CREATE|NLM_F_EXCL;
591 group = NFNLGRP_CONNTRACK_NEW;
592 } else if (events) {
593 type = IPCTNL_MSG_CT_NEW;
594 group = NFNLGRP_CONNTRACK_UPDATE;
595 } else
596 return 0;
597
598 net = nf_ct_net(ct);
599 if (!item->report && !nfnetlink_has_listeners(net, group))
600 return 0;
601
602 skb = nlmsg_new(ctnetlink_nlmsg_size(ct), GFP_ATOMIC);
603 if (skb == NULL)
604 goto errout;
605
606 type |= NFNL_SUBSYS_CTNETLINK << 8;
607 nlh = nlmsg_put(skb, item->pid, 0, type, sizeof(*nfmsg), flags);
608 if (nlh == NULL)
609 goto nlmsg_failure;
610
611 nfmsg = nlmsg_data(nlh);
612 nfmsg->nfgen_family = nf_ct_l3num(ct);
613 nfmsg->version = NFNETLINK_V0;
614 nfmsg->res_id = 0;
615
616 rcu_read_lock();
617 nest_parms = nla_nest_start(skb, CTA_TUPLE_ORIG | NLA_F_NESTED);
618 if (!nest_parms)
619 goto nla_put_failure;
620 if (ctnetlink_dump_tuples(skb, nf_ct_tuple(ct, IP_CT_DIR_ORIGINAL)) < 0)
621 goto nla_put_failure;
622 nla_nest_end(skb, nest_parms);
623
624 nest_parms = nla_nest_start(skb, CTA_TUPLE_REPLY | NLA_F_NESTED);
625 if (!nest_parms)
626 goto nla_put_failure;
627 if (ctnetlink_dump_tuples(skb, nf_ct_tuple(ct, IP_CT_DIR_REPLY)) < 0)
628 goto nla_put_failure;
629 nla_nest_end(skb, nest_parms);
630
631 if (nf_ct_zone(ct) &&
632 nla_put_be16(skb, CTA_ZONE, htons(nf_ct_zone(ct))))
633 goto nla_put_failure;
634
635 if (ctnetlink_dump_id(skb, ct) < 0)
636 goto nla_put_failure;
637
638 if (ctnetlink_dump_status(skb, ct) < 0)
639 goto nla_put_failure;
640
641 if (events & (1 << IPCT_DESTROY)) {
642 if (ctnetlink_dump_counters(skb, ct,
643 IP_CT_DIR_ORIGINAL, type) < 0 ||
644 ctnetlink_dump_counters(skb, ct,
645 IP_CT_DIR_REPLY, type) < 0 ||
646 ctnetlink_dump_timestamp(skb, ct) < 0)
647 goto nla_put_failure;
648 } else {
649 if (ctnetlink_dump_timeout(skb, ct) < 0)
650 goto nla_put_failure;
651
652 if (events & (1 << IPCT_PROTOINFO)
653 && ctnetlink_dump_protoinfo(skb, ct) < 0)
654 goto nla_put_failure;
655
656 if ((events & (1 << IPCT_HELPER) || nfct_help(ct))
657 && ctnetlink_dump_helpinfo(skb, ct) < 0)
658 goto nla_put_failure;
659
660 #ifdef CONFIG_NF_CONNTRACK_SECMARK
661 if ((events & (1 << IPCT_SECMARK) || ct->secmark)
662 && ctnetlink_dump_secctx(skb, ct) < 0)
663 goto nla_put_failure;
664 #endif
665
666 if (events & (1 << IPCT_RELATED) &&
667 ctnetlink_dump_master(skb, ct) < 0)
668 goto nla_put_failure;
669
670 if (events & (1 << IPCT_NATSEQADJ) &&
671 ctnetlink_dump_nat_seq_adj(skb, ct) < 0)
672 goto nla_put_failure;
673 }
674
675 #ifdef CONFIG_NF_CONNTRACK_MARK
676 if ((events & (1 << IPCT_MARK) || ct->mark)
677 && ctnetlink_dump_mark(skb, ct) < 0)
678 goto nla_put_failure;
679 #endif
680 rcu_read_unlock();
681
682 nlmsg_end(skb, nlh);
683 err = nfnetlink_send(skb, net, item->pid, group, item->report,
684 GFP_ATOMIC);
685 if (err == -ENOBUFS || err == -EAGAIN)
686 return -ENOBUFS;
687
688 return 0;
689
690 nla_put_failure:
691 rcu_read_unlock();
692 nlmsg_cancel(skb, nlh);
693 nlmsg_failure:
694 kfree_skb(skb);
695 errout:
696 if (nfnetlink_set_err(net, 0, group, -ENOBUFS) > 0)
697 return -ENOBUFS;
698
699 return 0;
700 }
701 #endif /* CONFIG_NF_CONNTRACK_EVENTS */
702
703 static int ctnetlink_done(struct netlink_callback *cb)
704 {
705 if (cb->args[1])
706 nf_ct_put((struct nf_conn *)cb->args[1]);
707 if (cb->data)
708 kfree(cb->data);
709 return 0;
710 }
711
712 struct ctnetlink_dump_filter {
713 struct {
714 u_int32_t val;
715 u_int32_t mask;
716 } mark;
717 };
718
719 static int
720 ctnetlink_dump_table(struct sk_buff *skb, struct netlink_callback *cb)
721 {
722 struct net *net = sock_net(skb->sk);
723 struct nf_conn *ct, *last;
724 struct nf_conntrack_tuple_hash *h;
725 struct hlist_nulls_node *n;
726 struct nfgenmsg *nfmsg = nlmsg_data(cb->nlh);
727 u_int8_t l3proto = nfmsg->nfgen_family;
728 int res;
729 #ifdef CONFIG_NF_CONNTRACK_MARK
730 const struct ctnetlink_dump_filter *filter = cb->data;
731 #endif
732
733 spin_lock_bh(&nf_conntrack_lock);
734 last = (struct nf_conn *)cb->args[1];
735 for (; cb->args[0] < net->ct.htable_size; cb->args[0]++) {
736 restart:
737 hlist_nulls_for_each_entry(h, n, &net->ct.hash[cb->args[0]],
738 hnnode) {
739 if (NF_CT_DIRECTION(h) != IP_CT_DIR_ORIGINAL)
740 continue;
741 ct = nf_ct_tuplehash_to_ctrack(h);
742 /* Dump entries of a given L3 protocol number.
743 * If it is not specified, ie. l3proto == 0,
744 * then dump everything. */
745 if (l3proto && nf_ct_l3num(ct) != l3proto)
746 continue;
747 if (cb->args[1]) {
748 if (ct != last)
749 continue;
750 cb->args[1] = 0;
751 }
752 #ifdef CONFIG_NF_CONNTRACK_MARK
753 if (filter && !((ct->mark & filter->mark.mask) ==
754 filter->mark.val)) {
755 continue;
756 }
757 #endif
758 rcu_read_lock();
759 res =
760 ctnetlink_fill_info(skb, NETLINK_CB(cb->skb).pid,
761 cb->nlh->nlmsg_seq,
762 NFNL_MSG_TYPE(cb->nlh->nlmsg_type),
763 ct);
764 rcu_read_unlock();
765 if (res < 0) {
766 nf_conntrack_get(&ct->ct_general);
767 cb->args[1] = (unsigned long)ct;
768 goto out;
769 }
770 }
771 if (cb->args[1]) {
772 cb->args[1] = 0;
773 goto restart;
774 }
775 }
776 out:
777 spin_unlock_bh(&nf_conntrack_lock);
778 if (last)
779 nf_ct_put(last);
780
781 return skb->len;
782 }
783
784 static inline int
785 ctnetlink_parse_tuple_ip(struct nlattr *attr, struct nf_conntrack_tuple *tuple)
786 {
787 struct nlattr *tb[CTA_IP_MAX+1];
788 struct nf_conntrack_l3proto *l3proto;
789 int ret = 0;
790
791 nla_parse_nested(tb, CTA_IP_MAX, attr, NULL);
792
793 rcu_read_lock();
794 l3proto = __nf_ct_l3proto_find(tuple->src.l3num);
795
796 if (likely(l3proto->nlattr_to_tuple)) {
797 ret = nla_validate_nested(attr, CTA_IP_MAX,
798 l3proto->nla_policy);
799 if (ret == 0)
800 ret = l3proto->nlattr_to_tuple(tb, tuple);
801 }
802
803 rcu_read_unlock();
804
805 return ret;
806 }
807
808 static const struct nla_policy proto_nla_policy[CTA_PROTO_MAX+1] = {
809 [CTA_PROTO_NUM] = { .type = NLA_U8 },
810 };
811
812 static inline int
813 ctnetlink_parse_tuple_proto(struct nlattr *attr,
814 struct nf_conntrack_tuple *tuple)
815 {
816 struct nlattr *tb[CTA_PROTO_MAX+1];
817 struct nf_conntrack_l4proto *l4proto;
818 int ret = 0;
819
820 ret = nla_parse_nested(tb, CTA_PROTO_MAX, attr, proto_nla_policy);
821 if (ret < 0)
822 return ret;
823
824 if (!tb[CTA_PROTO_NUM])
825 return -EINVAL;
826 tuple->dst.protonum = nla_get_u8(tb[CTA_PROTO_NUM]);
827
828 rcu_read_lock();
829 l4proto = __nf_ct_l4proto_find(tuple->src.l3num, tuple->dst.protonum);
830
831 if (likely(l4proto->nlattr_to_tuple)) {
832 ret = nla_validate_nested(attr, CTA_PROTO_MAX,
833 l4proto->nla_policy);
834 if (ret == 0)
835 ret = l4proto->nlattr_to_tuple(tb, tuple);
836 }
837
838 rcu_read_unlock();
839
840 return ret;
841 }
842
843 static const struct nla_policy tuple_nla_policy[CTA_TUPLE_MAX+1] = {
844 [CTA_TUPLE_IP] = { .type = NLA_NESTED },
845 [CTA_TUPLE_PROTO] = { .type = NLA_NESTED },
846 };
847
848 static int
849 ctnetlink_parse_tuple(const struct nlattr * const cda[],
850 struct nf_conntrack_tuple *tuple,
851 enum ctattr_type type, u_int8_t l3num)
852 {
853 struct nlattr *tb[CTA_TUPLE_MAX+1];
854 int err;
855
856 memset(tuple, 0, sizeof(*tuple));
857
858 nla_parse_nested(tb, CTA_TUPLE_MAX, cda[type], tuple_nla_policy);
859
860 if (!tb[CTA_TUPLE_IP])
861 return -EINVAL;
862
863 tuple->src.l3num = l3num;
864
865 err = ctnetlink_parse_tuple_ip(tb[CTA_TUPLE_IP], tuple);
866 if (err < 0)
867 return err;
868
869 if (!tb[CTA_TUPLE_PROTO])
870 return -EINVAL;
871
872 err = ctnetlink_parse_tuple_proto(tb[CTA_TUPLE_PROTO], tuple);
873 if (err < 0)
874 return err;
875
876 /* orig and expect tuples get DIR_ORIGINAL */
877 if (type == CTA_TUPLE_REPLY)
878 tuple->dst.dir = IP_CT_DIR_REPLY;
879 else
880 tuple->dst.dir = IP_CT_DIR_ORIGINAL;
881
882 return 0;
883 }
884
885 static int
886 ctnetlink_parse_zone(const struct nlattr *attr, u16 *zone)
887 {
888 if (attr)
889 #ifdef CONFIG_NF_CONNTRACK_ZONES
890 *zone = ntohs(nla_get_be16(attr));
891 #else
892 return -EOPNOTSUPP;
893 #endif
894 else
895 *zone = 0;
896
897 return 0;
898 }
899
900 static const struct nla_policy help_nla_policy[CTA_HELP_MAX+1] = {
901 [CTA_HELP_NAME] = { .type = NLA_NUL_STRING },
902 };
903
904 static inline int
905 ctnetlink_parse_help(const struct nlattr *attr, char **helper_name,
906 struct nlattr **helpinfo)
907 {
908 struct nlattr *tb[CTA_HELP_MAX+1];
909
910 nla_parse_nested(tb, CTA_HELP_MAX, attr, help_nla_policy);
911
912 if (!tb[CTA_HELP_NAME])
913 return -EINVAL;
914
915 *helper_name = nla_data(tb[CTA_HELP_NAME]);
916
917 if (tb[CTA_HELP_INFO])
918 *helpinfo = tb[CTA_HELP_INFO];
919
920 return 0;
921 }
922
923 static const struct nla_policy ct_nla_policy[CTA_MAX+1] = {
924 [CTA_TUPLE_ORIG] = { .type = NLA_NESTED },
925 [CTA_TUPLE_REPLY] = { .type = NLA_NESTED },
926 [CTA_STATUS] = { .type = NLA_U32 },
927 [CTA_PROTOINFO] = { .type = NLA_NESTED },
928 [CTA_HELP] = { .type = NLA_NESTED },
929 [CTA_NAT_SRC] = { .type = NLA_NESTED },
930 [CTA_TIMEOUT] = { .type = NLA_U32 },
931 [CTA_MARK] = { .type = NLA_U32 },
932 [CTA_ID] = { .type = NLA_U32 },
933 [CTA_NAT_DST] = { .type = NLA_NESTED },
934 [CTA_TUPLE_MASTER] = { .type = NLA_NESTED },
935 [CTA_ZONE] = { .type = NLA_U16 },
936 [CTA_MARK_MASK] = { .type = NLA_U32 },
937 };
938
939 static int
940 ctnetlink_del_conntrack(struct sock *ctnl, struct sk_buff *skb,
941 const struct nlmsghdr *nlh,
942 const struct nlattr * const cda[])
943 {
944 struct net *net = sock_net(ctnl);
945 struct nf_conntrack_tuple_hash *h;
946 struct nf_conntrack_tuple tuple;
947 struct nf_conn *ct;
948 struct nfgenmsg *nfmsg = nlmsg_data(nlh);
949 u_int8_t u3 = nfmsg->nfgen_family;
950 u16 zone;
951 int err;
952
953 err = ctnetlink_parse_zone(cda[CTA_ZONE], &zone);
954 if (err < 0)
955 return err;
956
957 if (cda[CTA_TUPLE_ORIG])
958 err = ctnetlink_parse_tuple(cda, &tuple, CTA_TUPLE_ORIG, u3);
959 else if (cda[CTA_TUPLE_REPLY])
960 err = ctnetlink_parse_tuple(cda, &tuple, CTA_TUPLE_REPLY, u3);
961 else {
962 /* Flush the whole table */
963 nf_conntrack_flush_report(net,
964 NETLINK_CB(skb).pid,
965 nlmsg_report(nlh));
966 return 0;
967 }
968
969 if (err < 0)
970 return err;
971
972 h = nf_conntrack_find_get(net, zone, &tuple);
973 if (!h)
974 return -ENOENT;
975
976 ct = nf_ct_tuplehash_to_ctrack(h);
977
978 if (cda[CTA_ID]) {
979 u_int32_t id = ntohl(nla_get_be32(cda[CTA_ID]));
980 if (id != (u32)(unsigned long)ct) {
981 nf_ct_put(ct);
982 return -ENOENT;
983 }
984 }
985
986 if (del_timer(&ct->timeout)) {
987 if (nf_conntrack_event_report(IPCT_DESTROY, ct,
988 NETLINK_CB(skb).pid,
989 nlmsg_report(nlh)) < 0) {
990 nf_ct_delete_from_lists(ct);
991 /* we failed to report the event, try later */
992 nf_ct_insert_dying_list(ct);
993 nf_ct_put(ct);
994 return 0;
995 }
996 /* death_by_timeout would report the event again */
997 set_bit(IPS_DYING_BIT, &ct->status);
998 nf_ct_delete_from_lists(ct);
999 nf_ct_put(ct);
1000 }
1001 nf_ct_put(ct);
1002
1003 return 0;
1004 }
1005
1006 static int
1007 ctnetlink_get_conntrack(struct sock *ctnl, struct sk_buff *skb,
1008 const struct nlmsghdr *nlh,
1009 const struct nlattr * const cda[])
1010 {
1011 struct net *net = sock_net(ctnl);
1012 struct nf_conntrack_tuple_hash *h;
1013 struct nf_conntrack_tuple tuple;
1014 struct nf_conn *ct;
1015 struct sk_buff *skb2 = NULL;
1016 struct nfgenmsg *nfmsg = nlmsg_data(nlh);
1017 u_int8_t u3 = nfmsg->nfgen_family;
1018 u16 zone;
1019 int err;
1020
1021 if (nlh->nlmsg_flags & NLM_F_DUMP) {
1022 struct netlink_dump_control c = {
1023 .dump = ctnetlink_dump_table,
1024 .done = ctnetlink_done,
1025 };
1026 #ifdef CONFIG_NF_CONNTRACK_MARK
1027 if (cda[CTA_MARK] && cda[CTA_MARK_MASK]) {
1028 struct ctnetlink_dump_filter *filter;
1029
1030 filter = kzalloc(sizeof(struct ctnetlink_dump_filter),
1031 GFP_ATOMIC);
1032 if (filter == NULL)
1033 return -ENOMEM;
1034
1035 filter->mark.val = ntohl(nla_get_be32(cda[CTA_MARK]));
1036 filter->mark.mask =
1037 ntohl(nla_get_be32(cda[CTA_MARK_MASK]));
1038 c.data = filter;
1039 }
1040 #endif
1041 return netlink_dump_start(ctnl, skb, nlh, &c);
1042 }
1043
1044 err = ctnetlink_parse_zone(cda[CTA_ZONE], &zone);
1045 if (err < 0)
1046 return err;
1047
1048 if (cda[CTA_TUPLE_ORIG])
1049 err = ctnetlink_parse_tuple(cda, &tuple, CTA_TUPLE_ORIG, u3);
1050 else if (cda[CTA_TUPLE_REPLY])
1051 err = ctnetlink_parse_tuple(cda, &tuple, CTA_TUPLE_REPLY, u3);
1052 else
1053 return -EINVAL;
1054
1055 if (err < 0)
1056 return err;
1057
1058 h = nf_conntrack_find_get(net, zone, &tuple);
1059 if (!h)
1060 return -ENOENT;
1061
1062 ct = nf_ct_tuplehash_to_ctrack(h);
1063
1064 err = -ENOMEM;
1065 skb2 = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
1066 if (skb2 == NULL) {
1067 nf_ct_put(ct);
1068 return -ENOMEM;
1069 }
1070
1071 rcu_read_lock();
1072 err = ctnetlink_fill_info(skb2, NETLINK_CB(skb).pid, nlh->nlmsg_seq,
1073 NFNL_MSG_TYPE(nlh->nlmsg_type), ct);
1074 rcu_read_unlock();
1075 nf_ct_put(ct);
1076 if (err <= 0)
1077 goto free;
1078
1079 err = netlink_unicast(ctnl, skb2, NETLINK_CB(skb).pid, MSG_DONTWAIT);
1080 if (err < 0)
1081 goto out;
1082
1083 return 0;
1084
1085 free:
1086 kfree_skb(skb2);
1087 out:
1088 /* this avoids a loop in nfnetlink. */
1089 return err == -EAGAIN ? -ENOBUFS : err;
1090 }
1091
1092 #ifdef CONFIG_NF_NAT_NEEDED
1093 static int
1094 ctnetlink_parse_nat_setup(struct nf_conn *ct,
1095 enum nf_nat_manip_type manip,
1096 const struct nlattr *attr)
1097 {
1098 typeof(nfnetlink_parse_nat_setup_hook) parse_nat_setup;
1099
1100 parse_nat_setup = rcu_dereference(nfnetlink_parse_nat_setup_hook);
1101 if (!parse_nat_setup) {
1102 #ifdef CONFIG_MODULES
1103 rcu_read_unlock();
1104 nfnl_unlock();
1105 if (request_module("nf-nat-ipv4") < 0) {
1106 nfnl_lock();
1107 rcu_read_lock();
1108 return -EOPNOTSUPP;
1109 }
1110 nfnl_lock();
1111 rcu_read_lock();
1112 if (nfnetlink_parse_nat_setup_hook)
1113 return -EAGAIN;
1114 #endif
1115 return -EOPNOTSUPP;
1116 }
1117
1118 return parse_nat_setup(ct, manip, attr);
1119 }
1120 #endif
1121
1122 static int
1123 ctnetlink_change_status(struct nf_conn *ct, const struct nlattr * const cda[])
1124 {
1125 unsigned long d;
1126 unsigned int status = ntohl(nla_get_be32(cda[CTA_STATUS]));
1127 d = ct->status ^ status;
1128
1129 if (d & (IPS_EXPECTED|IPS_CONFIRMED|IPS_DYING))
1130 /* unchangeable */
1131 return -EBUSY;
1132
1133 if (d & IPS_SEEN_REPLY && !(status & IPS_SEEN_REPLY))
1134 /* SEEN_REPLY bit can only be set */
1135 return -EBUSY;
1136
1137 if (d & IPS_ASSURED && !(status & IPS_ASSURED))
1138 /* ASSURED bit can only be set */
1139 return -EBUSY;
1140
1141 /* Be careful here, modifying NAT bits can screw up things,
1142 * so don't let users modify them directly if they don't pass
1143 * nf_nat_range. */
1144 ct->status |= status & ~(IPS_NAT_DONE_MASK | IPS_NAT_MASK);
1145 return 0;
1146 }
1147
1148 static int
1149 ctnetlink_change_nat(struct nf_conn *ct, const struct nlattr * const cda[])
1150 {
1151 #ifdef CONFIG_NF_NAT_NEEDED
1152 int ret;
1153
1154 if (cda[CTA_NAT_DST]) {
1155 ret = ctnetlink_parse_nat_setup(ct,
1156 NF_NAT_MANIP_DST,
1157 cda[CTA_NAT_DST]);
1158 if (ret < 0)
1159 return ret;
1160 }
1161 if (cda[CTA_NAT_SRC]) {
1162 ret = ctnetlink_parse_nat_setup(ct,
1163 NF_NAT_MANIP_SRC,
1164 cda[CTA_NAT_SRC]);
1165 if (ret < 0)
1166 return ret;
1167 }
1168 return 0;
1169 #else
1170 return -EOPNOTSUPP;
1171 #endif
1172 }
1173
1174 static inline int
1175 ctnetlink_change_helper(struct nf_conn *ct, const struct nlattr * const cda[])
1176 {
1177 struct nf_conntrack_helper *helper;
1178 struct nf_conn_help *help = nfct_help(ct);
1179 char *helpname = NULL;
1180 struct nlattr *helpinfo = NULL;
1181 int err;
1182
1183 /* don't change helper of sibling connections */
1184 if (ct->master)
1185 return -EBUSY;
1186
1187 err = ctnetlink_parse_help(cda[CTA_HELP], &helpname, &helpinfo);
1188 if (err < 0)
1189 return err;
1190
1191 if (!strcmp(helpname, "")) {
1192 if (help && help->helper) {
1193 /* we had a helper before ... */
1194 nf_ct_remove_expectations(ct);
1195 RCU_INIT_POINTER(help->helper, NULL);
1196 }
1197
1198 return 0;
1199 }
1200
1201 helper = __nf_conntrack_helper_find(helpname, nf_ct_l3num(ct),
1202 nf_ct_protonum(ct));
1203 if (helper == NULL) {
1204 #ifdef CONFIG_MODULES
1205 spin_unlock_bh(&nf_conntrack_lock);
1206
1207 if (request_module("nfct-helper-%s", helpname) < 0) {
1208 spin_lock_bh(&nf_conntrack_lock);
1209 return -EOPNOTSUPP;
1210 }
1211
1212 spin_lock_bh(&nf_conntrack_lock);
1213 helper = __nf_conntrack_helper_find(helpname, nf_ct_l3num(ct),
1214 nf_ct_protonum(ct));
1215 if (helper)
1216 return -EAGAIN;
1217 #endif
1218 return -EOPNOTSUPP;
1219 }
1220
1221 if (help) {
1222 if (help->helper == helper) {
1223 /* update private helper data if allowed. */
1224 if (helper->from_nlattr && helpinfo)
1225 helper->from_nlattr(helpinfo, ct);
1226 return 0;
1227 } else
1228 return -EBUSY;
1229 }
1230
1231 /* we cannot set a helper for an existing conntrack */
1232 return -EOPNOTSUPP;
1233 }
1234
1235 static inline int
1236 ctnetlink_change_timeout(struct nf_conn *ct, const struct nlattr * const cda[])
1237 {
1238 u_int32_t timeout = ntohl(nla_get_be32(cda[CTA_TIMEOUT]));
1239
1240 if (!del_timer(&ct->timeout))
1241 return -ETIME;
1242
1243 ct->timeout.expires = jiffies + timeout * HZ;
1244 add_timer(&ct->timeout);
1245
1246 return 0;
1247 }
1248
1249 static const struct nla_policy protoinfo_policy[CTA_PROTOINFO_MAX+1] = {
1250 [CTA_PROTOINFO_TCP] = { .type = NLA_NESTED },
1251 [CTA_PROTOINFO_DCCP] = { .type = NLA_NESTED },
1252 [CTA_PROTOINFO_SCTP] = { .type = NLA_NESTED },
1253 };
1254
1255 static inline int
1256 ctnetlink_change_protoinfo(struct nf_conn *ct, const struct nlattr * const cda[])
1257 {
1258 const struct nlattr *attr = cda[CTA_PROTOINFO];
1259 struct nlattr *tb[CTA_PROTOINFO_MAX+1];
1260 struct nf_conntrack_l4proto *l4proto;
1261 int err = 0;
1262
1263 nla_parse_nested(tb, CTA_PROTOINFO_MAX, attr, protoinfo_policy);
1264
1265 rcu_read_lock();
1266 l4proto = __nf_ct_l4proto_find(nf_ct_l3num(ct), nf_ct_protonum(ct));
1267 if (l4proto->from_nlattr)
1268 err = l4proto->from_nlattr(tb, ct);
1269 rcu_read_unlock();
1270
1271 return err;
1272 }
1273
1274 #ifdef CONFIG_NF_NAT_NEEDED
1275 static const struct nla_policy nat_seq_policy[CTA_NAT_SEQ_MAX+1] = {
1276 [CTA_NAT_SEQ_CORRECTION_POS] = { .type = NLA_U32 },
1277 [CTA_NAT_SEQ_OFFSET_BEFORE] = { .type = NLA_U32 },
1278 [CTA_NAT_SEQ_OFFSET_AFTER] = { .type = NLA_U32 },
1279 };
1280
1281 static inline int
1282 change_nat_seq_adj(struct nf_nat_seq *natseq, const struct nlattr * const attr)
1283 {
1284 struct nlattr *cda[CTA_NAT_SEQ_MAX+1];
1285
1286 nla_parse_nested(cda, CTA_NAT_SEQ_MAX, attr, nat_seq_policy);
1287
1288 if (!cda[CTA_NAT_SEQ_CORRECTION_POS])
1289 return -EINVAL;
1290
1291 natseq->correction_pos =
1292 ntohl(nla_get_be32(cda[CTA_NAT_SEQ_CORRECTION_POS]));
1293
1294 if (!cda[CTA_NAT_SEQ_OFFSET_BEFORE])
1295 return -EINVAL;
1296
1297 natseq->offset_before =
1298 ntohl(nla_get_be32(cda[CTA_NAT_SEQ_OFFSET_BEFORE]));
1299
1300 if (!cda[CTA_NAT_SEQ_OFFSET_AFTER])
1301 return -EINVAL;
1302
1303 natseq->offset_after =
1304 ntohl(nla_get_be32(cda[CTA_NAT_SEQ_OFFSET_AFTER]));
1305
1306 return 0;
1307 }
1308
1309 static int
1310 ctnetlink_change_nat_seq_adj(struct nf_conn *ct,
1311 const struct nlattr * const cda[])
1312 {
1313 int ret = 0;
1314 struct nf_conn_nat *nat = nfct_nat(ct);
1315
1316 if (!nat)
1317 return 0;
1318
1319 if (cda[CTA_NAT_SEQ_ADJ_ORIG]) {
1320 ret = change_nat_seq_adj(&nat->seq[IP_CT_DIR_ORIGINAL],
1321 cda[CTA_NAT_SEQ_ADJ_ORIG]);
1322 if (ret < 0)
1323 return ret;
1324
1325 ct->status |= IPS_SEQ_ADJUST;
1326 }
1327
1328 if (cda[CTA_NAT_SEQ_ADJ_REPLY]) {
1329 ret = change_nat_seq_adj(&nat->seq[IP_CT_DIR_REPLY],
1330 cda[CTA_NAT_SEQ_ADJ_REPLY]);
1331 if (ret < 0)
1332 return ret;
1333
1334 ct->status |= IPS_SEQ_ADJUST;
1335 }
1336
1337 return 0;
1338 }
1339 #endif
1340
1341 static int
1342 ctnetlink_change_conntrack(struct nf_conn *ct,
1343 const struct nlattr * const cda[])
1344 {
1345 int err;
1346
1347 /* only allow NAT changes and master assignation for new conntracks */
1348 if (cda[CTA_NAT_SRC] || cda[CTA_NAT_DST] || cda[CTA_TUPLE_MASTER])
1349 return -EOPNOTSUPP;
1350
1351 if (cda[CTA_HELP]) {
1352 err = ctnetlink_change_helper(ct, cda);
1353 if (err < 0)
1354 return err;
1355 }
1356
1357 if (cda[CTA_TIMEOUT]) {
1358 err = ctnetlink_change_timeout(ct, cda);
1359 if (err < 0)
1360 return err;
1361 }
1362
1363 if (cda[CTA_STATUS]) {
1364 err = ctnetlink_change_status(ct, cda);
1365 if (err < 0)
1366 return err;
1367 }
1368
1369 if (cda[CTA_PROTOINFO]) {
1370 err = ctnetlink_change_protoinfo(ct, cda);
1371 if (err < 0)
1372 return err;
1373 }
1374
1375 #if defined(CONFIG_NF_CONNTRACK_MARK)
1376 if (cda[CTA_MARK])
1377 ct->mark = ntohl(nla_get_be32(cda[CTA_MARK]));
1378 #endif
1379
1380 #ifdef CONFIG_NF_NAT_NEEDED
1381 if (cda[CTA_NAT_SEQ_ADJ_ORIG] || cda[CTA_NAT_SEQ_ADJ_REPLY]) {
1382 err = ctnetlink_change_nat_seq_adj(ct, cda);
1383 if (err < 0)
1384 return err;
1385 }
1386 #endif
1387
1388 return 0;
1389 }
1390
1391 static struct nf_conn *
1392 ctnetlink_create_conntrack(struct net *net, u16 zone,
1393 const struct nlattr * const cda[],
1394 struct nf_conntrack_tuple *otuple,
1395 struct nf_conntrack_tuple *rtuple,
1396 u8 u3)
1397 {
1398 struct nf_conn *ct;
1399 int err = -EINVAL;
1400 struct nf_conntrack_helper *helper;
1401 struct nf_conn_tstamp *tstamp;
1402
1403 ct = nf_conntrack_alloc(net, zone, otuple, rtuple, GFP_ATOMIC);
1404 if (IS_ERR(ct))
1405 return ERR_PTR(-ENOMEM);
1406
1407 if (!cda[CTA_TIMEOUT])
1408 goto err1;
1409 ct->timeout.expires = ntohl(nla_get_be32(cda[CTA_TIMEOUT]));
1410
1411 ct->timeout.expires = jiffies + ct->timeout.expires * HZ;
1412
1413 rcu_read_lock();
1414 if (cda[CTA_HELP]) {
1415 char *helpname = NULL;
1416 struct nlattr *helpinfo = NULL;
1417
1418 err = ctnetlink_parse_help(cda[CTA_HELP], &helpname, &helpinfo);
1419 if (err < 0)
1420 goto err2;
1421
1422 helper = __nf_conntrack_helper_find(helpname, nf_ct_l3num(ct),
1423 nf_ct_protonum(ct));
1424 if (helper == NULL) {
1425 rcu_read_unlock();
1426 #ifdef CONFIG_MODULES
1427 if (request_module("nfct-helper-%s", helpname) < 0) {
1428 err = -EOPNOTSUPP;
1429 goto err1;
1430 }
1431
1432 rcu_read_lock();
1433 helper = __nf_conntrack_helper_find(helpname,
1434 nf_ct_l3num(ct),
1435 nf_ct_protonum(ct));
1436 if (helper) {
1437 err = -EAGAIN;
1438 goto err2;
1439 }
1440 rcu_read_unlock();
1441 #endif
1442 err = -EOPNOTSUPP;
1443 goto err1;
1444 } else {
1445 struct nf_conn_help *help;
1446
1447 help = nf_ct_helper_ext_add(ct, helper, GFP_ATOMIC);
1448 if (help == NULL) {
1449 err = -ENOMEM;
1450 goto err2;
1451 }
1452 /* set private helper data if allowed. */
1453 if (helper->from_nlattr && helpinfo)
1454 helper->from_nlattr(helpinfo, ct);
1455
1456 /* not in hash table yet so not strictly necessary */
1457 RCU_INIT_POINTER(help->helper, helper);
1458 }
1459 } else {
1460 /* try an implicit helper assignation */
1461 err = __nf_ct_try_assign_helper(ct, NULL, GFP_ATOMIC);
1462 if (err < 0)
1463 goto err2;
1464 }
1465
1466 if (cda[CTA_NAT_SRC] || cda[CTA_NAT_DST]) {
1467 err = ctnetlink_change_nat(ct, cda);
1468 if (err < 0)
1469 goto err2;
1470 }
1471
1472 nf_ct_acct_ext_add(ct, GFP_ATOMIC);
1473 nf_ct_tstamp_ext_add(ct, GFP_ATOMIC);
1474 nf_ct_ecache_ext_add(ct, 0, 0, GFP_ATOMIC);
1475 /* we must add conntrack extensions before confirmation. */
1476 ct->status |= IPS_CONFIRMED;
1477
1478 if (cda[CTA_STATUS]) {
1479 err = ctnetlink_change_status(ct, cda);
1480 if (err < 0)
1481 goto err2;
1482 }
1483
1484 #ifdef CONFIG_NF_NAT_NEEDED
1485 if (cda[CTA_NAT_SEQ_ADJ_ORIG] || cda[CTA_NAT_SEQ_ADJ_REPLY]) {
1486 err = ctnetlink_change_nat_seq_adj(ct, cda);
1487 if (err < 0)
1488 goto err2;
1489 }
1490 #endif
1491
1492 memset(&ct->proto, 0, sizeof(ct->proto));
1493 if (cda[CTA_PROTOINFO]) {
1494 err = ctnetlink_change_protoinfo(ct, cda);
1495 if (err < 0)
1496 goto err2;
1497 }
1498
1499 #if defined(CONFIG_NF_CONNTRACK_MARK)
1500 if (cda[CTA_MARK])
1501 ct->mark = ntohl(nla_get_be32(cda[CTA_MARK]));
1502 #endif
1503
1504 /* setup master conntrack: this is a confirmed expectation */
1505 if (cda[CTA_TUPLE_MASTER]) {
1506 struct nf_conntrack_tuple master;
1507 struct nf_conntrack_tuple_hash *master_h;
1508 struct nf_conn *master_ct;
1509
1510 err = ctnetlink_parse_tuple(cda, &master, CTA_TUPLE_MASTER, u3);
1511 if (err < 0)
1512 goto err2;
1513
1514 master_h = nf_conntrack_find_get(net, zone, &master);
1515 if (master_h == NULL) {
1516 err = -ENOENT;
1517 goto err2;
1518 }
1519 master_ct = nf_ct_tuplehash_to_ctrack(master_h);
1520 __set_bit(IPS_EXPECTED_BIT, &ct->status);
1521 ct->master = master_ct;
1522 }
1523 tstamp = nf_conn_tstamp_find(ct);
1524 if (tstamp)
1525 tstamp->start = ktime_to_ns(ktime_get_real());
1526
1527 err = nf_conntrack_hash_check_insert(ct);
1528 if (err < 0)
1529 goto err2;
1530
1531 rcu_read_unlock();
1532
1533 return ct;
1534
1535 err2:
1536 rcu_read_unlock();
1537 err1:
1538 nf_conntrack_free(ct);
1539 return ERR_PTR(err);
1540 }
1541
1542 static int
1543 ctnetlink_new_conntrack(struct sock *ctnl, struct sk_buff *skb,
1544 const struct nlmsghdr *nlh,
1545 const struct nlattr * const cda[])
1546 {
1547 struct net *net = sock_net(ctnl);
1548 struct nf_conntrack_tuple otuple, rtuple;
1549 struct nf_conntrack_tuple_hash *h = NULL;
1550 struct nfgenmsg *nfmsg = nlmsg_data(nlh);
1551 struct nf_conn *ct;
1552 u_int8_t u3 = nfmsg->nfgen_family;
1553 u16 zone;
1554 int err;
1555
1556 err = ctnetlink_parse_zone(cda[CTA_ZONE], &zone);
1557 if (err < 0)
1558 return err;
1559
1560 if (cda[CTA_TUPLE_ORIG]) {
1561 err = ctnetlink_parse_tuple(cda, &otuple, CTA_TUPLE_ORIG, u3);
1562 if (err < 0)
1563 return err;
1564 }
1565
1566 if (cda[CTA_TUPLE_REPLY]) {
1567 err = ctnetlink_parse_tuple(cda, &rtuple, CTA_TUPLE_REPLY, u3);
1568 if (err < 0)
1569 return err;
1570 }
1571
1572 if (cda[CTA_TUPLE_ORIG])
1573 h = nf_conntrack_find_get(net, zone, &otuple);
1574 else if (cda[CTA_TUPLE_REPLY])
1575 h = nf_conntrack_find_get(net, zone, &rtuple);
1576
1577 if (h == NULL) {
1578 err = -ENOENT;
1579 if (nlh->nlmsg_flags & NLM_F_CREATE) {
1580 enum ip_conntrack_events events;
1581
1582 ct = ctnetlink_create_conntrack(net, zone, cda, &otuple,
1583 &rtuple, u3);
1584 if (IS_ERR(ct))
1585 return PTR_ERR(ct);
1586
1587 err = 0;
1588 if (test_bit(IPS_EXPECTED_BIT, &ct->status))
1589 events = IPCT_RELATED;
1590 else
1591 events = IPCT_NEW;
1592
1593 nf_conntrack_eventmask_report((1 << IPCT_REPLY) |
1594 (1 << IPCT_ASSURED) |
1595 (1 << IPCT_HELPER) |
1596 (1 << IPCT_PROTOINFO) |
1597 (1 << IPCT_NATSEQADJ) |
1598 (1 << IPCT_MARK) | events,
1599 ct, NETLINK_CB(skb).pid,
1600 nlmsg_report(nlh));
1601 nf_ct_put(ct);
1602 }
1603
1604 return err;
1605 }
1606 /* implicit 'else' */
1607
1608 err = -EEXIST;
1609 ct = nf_ct_tuplehash_to_ctrack(h);
1610 if (!(nlh->nlmsg_flags & NLM_F_EXCL)) {
1611 spin_lock_bh(&nf_conntrack_lock);
1612 err = ctnetlink_change_conntrack(ct, cda);
1613 spin_unlock_bh(&nf_conntrack_lock);
1614 if (err == 0) {
1615 nf_conntrack_eventmask_report((1 << IPCT_REPLY) |
1616 (1 << IPCT_ASSURED) |
1617 (1 << IPCT_HELPER) |
1618 (1 << IPCT_PROTOINFO) |
1619 (1 << IPCT_NATSEQADJ) |
1620 (1 << IPCT_MARK),
1621 ct, NETLINK_CB(skb).pid,
1622 nlmsg_report(nlh));
1623 }
1624 }
1625
1626 nf_ct_put(ct);
1627 return err;
1628 }
1629
1630 static int
1631 ctnetlink_ct_stat_cpu_fill_info(struct sk_buff *skb, u32 pid, u32 seq,
1632 __u16 cpu, const struct ip_conntrack_stat *st)
1633 {
1634 struct nlmsghdr *nlh;
1635 struct nfgenmsg *nfmsg;
1636 unsigned int flags = pid ? NLM_F_MULTI : 0, event;
1637
1638 event = (NFNL_SUBSYS_CTNETLINK << 8 | IPCTNL_MSG_CT_GET_STATS_CPU);
1639 nlh = nlmsg_put(skb, pid, seq, event, sizeof(*nfmsg), flags);
1640 if (nlh == NULL)
1641 goto nlmsg_failure;
1642
1643 nfmsg = nlmsg_data(nlh);
1644 nfmsg->nfgen_family = AF_UNSPEC;
1645 nfmsg->version = NFNETLINK_V0;
1646 nfmsg->res_id = htons(cpu);
1647
1648 if (nla_put_be32(skb, CTA_STATS_SEARCHED, htonl(st->searched)) ||
1649 nla_put_be32(skb, CTA_STATS_FOUND, htonl(st->found)) ||
1650 nla_put_be32(skb, CTA_STATS_NEW, htonl(st->new)) ||
1651 nla_put_be32(skb, CTA_STATS_INVALID, htonl(st->invalid)) ||
1652 nla_put_be32(skb, CTA_STATS_IGNORE, htonl(st->ignore)) ||
1653 nla_put_be32(skb, CTA_STATS_DELETE, htonl(st->delete)) ||
1654 nla_put_be32(skb, CTA_STATS_DELETE_LIST, htonl(st->delete_list)) ||
1655 nla_put_be32(skb, CTA_STATS_INSERT, htonl(st->insert)) ||
1656 nla_put_be32(skb, CTA_STATS_INSERT_FAILED,
1657 htonl(st->insert_failed)) ||
1658 nla_put_be32(skb, CTA_STATS_DROP, htonl(st->drop)) ||
1659 nla_put_be32(skb, CTA_STATS_EARLY_DROP, htonl(st->early_drop)) ||
1660 nla_put_be32(skb, CTA_STATS_ERROR, htonl(st->error)) ||
1661 nla_put_be32(skb, CTA_STATS_SEARCH_RESTART,
1662 htonl(st->search_restart)))
1663 goto nla_put_failure;
1664
1665 nlmsg_end(skb, nlh);
1666 return skb->len;
1667
1668 nla_put_failure:
1669 nlmsg_failure:
1670 nlmsg_cancel(skb, nlh);
1671 return -1;
1672 }
1673
1674 static int
1675 ctnetlink_ct_stat_cpu_dump(struct sk_buff *skb, struct netlink_callback *cb)
1676 {
1677 int cpu;
1678 struct net *net = sock_net(skb->sk);
1679
1680 if (cb->args[0] == nr_cpu_ids)
1681 return 0;
1682
1683 for (cpu = cb->args[0]; cpu < nr_cpu_ids; cpu++) {
1684 const struct ip_conntrack_stat *st;
1685
1686 if (!cpu_possible(cpu))
1687 continue;
1688
1689 st = per_cpu_ptr(net->ct.stat, cpu);
1690 if (ctnetlink_ct_stat_cpu_fill_info(skb,
1691 NETLINK_CB(cb->skb).pid,
1692 cb->nlh->nlmsg_seq,
1693 cpu, st) < 0)
1694 break;
1695 }
1696 cb->args[0] = cpu;
1697
1698 return skb->len;
1699 }
1700
1701 static int
1702 ctnetlink_stat_ct_cpu(struct sock *ctnl, struct sk_buff *skb,
1703 const struct nlmsghdr *nlh,
1704 const struct nlattr * const cda[])
1705 {
1706 if (nlh->nlmsg_flags & NLM_F_DUMP) {
1707 struct netlink_dump_control c = {
1708 .dump = ctnetlink_ct_stat_cpu_dump,
1709 };
1710 return netlink_dump_start(ctnl, skb, nlh, &c);
1711 }
1712
1713 return 0;
1714 }
1715
1716 static int
1717 ctnetlink_stat_ct_fill_info(struct sk_buff *skb, u32 pid, u32 seq, u32 type,
1718 struct net *net)
1719 {
1720 struct nlmsghdr *nlh;
1721 struct nfgenmsg *nfmsg;
1722 unsigned int flags = pid ? NLM_F_MULTI : 0, event;
1723 unsigned int nr_conntracks = atomic_read(&net->ct.count);
1724
1725 event = (NFNL_SUBSYS_CTNETLINK << 8 | IPCTNL_MSG_CT_GET_STATS);
1726 nlh = nlmsg_put(skb, pid, seq, event, sizeof(*nfmsg), flags);
1727 if (nlh == NULL)
1728 goto nlmsg_failure;
1729
1730 nfmsg = nlmsg_data(nlh);
1731 nfmsg->nfgen_family = AF_UNSPEC;
1732 nfmsg->version = NFNETLINK_V0;
1733 nfmsg->res_id = 0;
1734
1735 if (nla_put_be32(skb, CTA_STATS_GLOBAL_ENTRIES, htonl(nr_conntracks)))
1736 goto nla_put_failure;
1737
1738 nlmsg_end(skb, nlh);
1739 return skb->len;
1740
1741 nla_put_failure:
1742 nlmsg_failure:
1743 nlmsg_cancel(skb, nlh);
1744 return -1;
1745 }
1746
1747 static int
1748 ctnetlink_stat_ct(struct sock *ctnl, struct sk_buff *skb,
1749 const struct nlmsghdr *nlh,
1750 const struct nlattr * const cda[])
1751 {
1752 struct sk_buff *skb2;
1753 int err;
1754
1755 skb2 = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
1756 if (skb2 == NULL)
1757 return -ENOMEM;
1758
1759 err = ctnetlink_stat_ct_fill_info(skb2, NETLINK_CB(skb).pid,
1760 nlh->nlmsg_seq,
1761 NFNL_MSG_TYPE(nlh->nlmsg_type),
1762 sock_net(skb->sk));
1763 if (err <= 0)
1764 goto free;
1765
1766 err = netlink_unicast(ctnl, skb2, NETLINK_CB(skb).pid, MSG_DONTWAIT);
1767 if (err < 0)
1768 goto out;
1769
1770 return 0;
1771
1772 free:
1773 kfree_skb(skb2);
1774 out:
1775 /* this avoids a loop in nfnetlink. */
1776 return err == -EAGAIN ? -ENOBUFS : err;
1777 }
1778
1779 #ifdef CONFIG_NETFILTER_NETLINK_QUEUE_CT
1780 static size_t
1781 ctnetlink_nfqueue_build_size(const struct nf_conn *ct)
1782 {
1783 return 3 * nla_total_size(0) /* CTA_TUPLE_ORIG|REPL|MASTER */
1784 + 3 * nla_total_size(0) /* CTA_TUPLE_IP */
1785 + 3 * nla_total_size(0) /* CTA_TUPLE_PROTO */
1786 + 3 * nla_total_size(sizeof(u_int8_t)) /* CTA_PROTO_NUM */
1787 + nla_total_size(sizeof(u_int32_t)) /* CTA_ID */
1788 + nla_total_size(sizeof(u_int32_t)) /* CTA_STATUS */
1789 + nla_total_size(sizeof(u_int32_t)) /* CTA_TIMEOUT */
1790 + nla_total_size(0) /* CTA_PROTOINFO */
1791 + nla_total_size(0) /* CTA_HELP */
1792 + nla_total_size(NF_CT_HELPER_NAME_LEN) /* CTA_HELP_NAME */
1793 + ctnetlink_secctx_size(ct)
1794 #ifdef CONFIG_NF_NAT_NEEDED
1795 + 2 * nla_total_size(0) /* CTA_NAT_SEQ_ADJ_ORIG|REPL */
1796 + 6 * nla_total_size(sizeof(u_int32_t)) /* CTA_NAT_SEQ_OFFSET */
1797 #endif
1798 #ifdef CONFIG_NF_CONNTRACK_MARK
1799 + nla_total_size(sizeof(u_int32_t)) /* CTA_MARK */
1800 #endif
1801 + ctnetlink_proto_size(ct)
1802 ;
1803 }
1804
1805 static int
1806 ctnetlink_nfqueue_build(struct sk_buff *skb, struct nf_conn *ct)
1807 {
1808 struct nlattr *nest_parms;
1809
1810 rcu_read_lock();
1811 nest_parms = nla_nest_start(skb, CTA_TUPLE_ORIG | NLA_F_NESTED);
1812 if (!nest_parms)
1813 goto nla_put_failure;
1814 if (ctnetlink_dump_tuples(skb, nf_ct_tuple(ct, IP_CT_DIR_ORIGINAL)) < 0)
1815 goto nla_put_failure;
1816 nla_nest_end(skb, nest_parms);
1817
1818 nest_parms = nla_nest_start(skb, CTA_TUPLE_REPLY | NLA_F_NESTED);
1819 if (!nest_parms)
1820 goto nla_put_failure;
1821 if (ctnetlink_dump_tuples(skb, nf_ct_tuple(ct, IP_CT_DIR_REPLY)) < 0)
1822 goto nla_put_failure;
1823 nla_nest_end(skb, nest_parms);
1824
1825 if (nf_ct_zone(ct)) {
1826 if (nla_put_be16(skb, CTA_ZONE, htons(nf_ct_zone(ct))))
1827 goto nla_put_failure;
1828 }
1829
1830 if (ctnetlink_dump_id(skb, ct) < 0)
1831 goto nla_put_failure;
1832
1833 if (ctnetlink_dump_status(skb, ct) < 0)
1834 goto nla_put_failure;
1835
1836 if (ctnetlink_dump_timeout(skb, ct) < 0)
1837 goto nla_put_failure;
1838
1839 if (ctnetlink_dump_protoinfo(skb, ct) < 0)
1840 goto nla_put_failure;
1841
1842 if (ctnetlink_dump_helpinfo(skb, ct) < 0)
1843 goto nla_put_failure;
1844
1845 #ifdef CONFIG_NF_CONNTRACK_SECMARK
1846 if (ct->secmark && ctnetlink_dump_secctx(skb, ct) < 0)
1847 goto nla_put_failure;
1848 #endif
1849 if (ct->master && ctnetlink_dump_master(skb, ct) < 0)
1850 goto nla_put_failure;
1851
1852 if ((ct->status & IPS_SEQ_ADJUST) &&
1853 ctnetlink_dump_nat_seq_adj(skb, ct) < 0)
1854 goto nla_put_failure;
1855
1856 #ifdef CONFIG_NF_CONNTRACK_MARK
1857 if (ct->mark && ctnetlink_dump_mark(skb, ct) < 0)
1858 goto nla_put_failure;
1859 #endif
1860 rcu_read_unlock();
1861 return 0;
1862
1863 nla_put_failure:
1864 rcu_read_unlock();
1865 return -ENOSPC;
1866 }
1867
1868 static int
1869 ctnetlink_nfqueue_parse_ct(const struct nlattr *cda[], struct nf_conn *ct)
1870 {
1871 int err;
1872
1873 if (cda[CTA_TIMEOUT]) {
1874 err = ctnetlink_change_timeout(ct, cda);
1875 if (err < 0)
1876 return err;
1877 }
1878 if (cda[CTA_STATUS]) {
1879 err = ctnetlink_change_status(ct, cda);
1880 if (err < 0)
1881 return err;
1882 }
1883 if (cda[CTA_HELP]) {
1884 err = ctnetlink_change_helper(ct, cda);
1885 if (err < 0)
1886 return err;
1887 }
1888 #if defined(CONFIG_NF_CONNTRACK_MARK)
1889 if (cda[CTA_MARK])
1890 ct->mark = ntohl(nla_get_be32(cda[CTA_MARK]));
1891 #endif
1892 return 0;
1893 }
1894
1895 static int
1896 ctnetlink_nfqueue_parse(const struct nlattr *attr, struct nf_conn *ct)
1897 {
1898 struct nlattr *cda[CTA_MAX+1];
1899 int ret;
1900
1901 nla_parse_nested(cda, CTA_MAX, attr, ct_nla_policy);
1902
1903 spin_lock_bh(&nf_conntrack_lock);
1904 ret = ctnetlink_nfqueue_parse_ct((const struct nlattr **)cda, ct);
1905 spin_unlock_bh(&nf_conntrack_lock);
1906
1907 return ret;
1908 }
1909
1910 static struct nfq_ct_hook ctnetlink_nfqueue_hook = {
1911 .build_size = ctnetlink_nfqueue_build_size,
1912 .build = ctnetlink_nfqueue_build,
1913 .parse = ctnetlink_nfqueue_parse,
1914 };
1915 #endif /* CONFIG_NETFILTER_NETLINK_QUEUE_CT */
1916
1917 /***********************************************************************
1918 * EXPECT
1919 ***********************************************************************/
1920
1921 static inline int
1922 ctnetlink_exp_dump_tuple(struct sk_buff *skb,
1923 const struct nf_conntrack_tuple *tuple,
1924 enum ctattr_expect type)
1925 {
1926 struct nlattr *nest_parms;
1927
1928 nest_parms = nla_nest_start(skb, type | NLA_F_NESTED);
1929 if (!nest_parms)
1930 goto nla_put_failure;
1931 if (ctnetlink_dump_tuples(skb, tuple) < 0)
1932 goto nla_put_failure;
1933 nla_nest_end(skb, nest_parms);
1934
1935 return 0;
1936
1937 nla_put_failure:
1938 return -1;
1939 }
1940
1941 static inline int
1942 ctnetlink_exp_dump_mask(struct sk_buff *skb,
1943 const struct nf_conntrack_tuple *tuple,
1944 const struct nf_conntrack_tuple_mask *mask)
1945 {
1946 int ret;
1947 struct nf_conntrack_l3proto *l3proto;
1948 struct nf_conntrack_l4proto *l4proto;
1949 struct nf_conntrack_tuple m;
1950 struct nlattr *nest_parms;
1951
1952 memset(&m, 0xFF, sizeof(m));
1953 memcpy(&m.src.u3, &mask->src.u3, sizeof(m.src.u3));
1954 m.src.u.all = mask->src.u.all;
1955 m.dst.protonum = tuple->dst.protonum;
1956
1957 nest_parms = nla_nest_start(skb, CTA_EXPECT_MASK | NLA_F_NESTED);
1958 if (!nest_parms)
1959 goto nla_put_failure;
1960
1961 rcu_read_lock();
1962 l3proto = __nf_ct_l3proto_find(tuple->src.l3num);
1963 ret = ctnetlink_dump_tuples_ip(skb, &m, l3proto);
1964 if (ret >= 0) {
1965 l4proto = __nf_ct_l4proto_find(tuple->src.l3num,
1966 tuple->dst.protonum);
1967 ret = ctnetlink_dump_tuples_proto(skb, &m, l4proto);
1968 }
1969 rcu_read_unlock();
1970
1971 if (unlikely(ret < 0))
1972 goto nla_put_failure;
1973
1974 nla_nest_end(skb, nest_parms);
1975
1976 return 0;
1977
1978 nla_put_failure:
1979 return -1;
1980 }
1981
1982 static int
1983 ctnetlink_exp_dump_expect(struct sk_buff *skb,
1984 const struct nf_conntrack_expect *exp)
1985 {
1986 struct nf_conn *master = exp->master;
1987 long timeout = ((long)exp->timeout.expires - (long)jiffies) / HZ;
1988 struct nf_conn_help *help;
1989 #ifdef CONFIG_NF_NAT_NEEDED
1990 struct nlattr *nest_parms;
1991 struct nf_conntrack_tuple nat_tuple = {};
1992 #endif
1993 struct nf_ct_helper_expectfn *expfn;
1994
1995 if (timeout < 0)
1996 timeout = 0;
1997
1998 if (ctnetlink_exp_dump_tuple(skb, &exp->tuple, CTA_EXPECT_TUPLE) < 0)
1999 goto nla_put_failure;
2000 if (ctnetlink_exp_dump_mask(skb, &exp->tuple, &exp->mask) < 0)
2001 goto nla_put_failure;
2002 if (ctnetlink_exp_dump_tuple(skb,
2003 &master->tuplehash[IP_CT_DIR_ORIGINAL].tuple,
2004 CTA_EXPECT_MASTER) < 0)
2005 goto nla_put_failure;
2006
2007 #ifdef CONFIG_NF_NAT_NEEDED
2008 if (exp->saved_ip || exp->saved_proto.all) {
2009 nest_parms = nla_nest_start(skb, CTA_EXPECT_NAT | NLA_F_NESTED);
2010 if (!nest_parms)
2011 goto nla_put_failure;
2012
2013 if (nla_put_be32(skb, CTA_EXPECT_NAT_DIR, htonl(exp->dir)))
2014 goto nla_put_failure;
2015
2016 nat_tuple.src.l3num = nf_ct_l3num(master);
2017 nat_tuple.src.u3.ip = exp->saved_ip;
2018 nat_tuple.dst.protonum = nf_ct_protonum(master);
2019 nat_tuple.src.u = exp->saved_proto;
2020
2021 if (ctnetlink_exp_dump_tuple(skb, &nat_tuple,
2022 CTA_EXPECT_NAT_TUPLE) < 0)
2023 goto nla_put_failure;
2024 nla_nest_end(skb, nest_parms);
2025 }
2026 #endif
2027 if (nla_put_be32(skb, CTA_EXPECT_TIMEOUT, htonl(timeout)) ||
2028 nla_put_be32(skb, CTA_EXPECT_ID, htonl((unsigned long)exp)) ||
2029 nla_put_be32(skb, CTA_EXPECT_FLAGS, htonl(exp->flags)) ||
2030 nla_put_be32(skb, CTA_EXPECT_CLASS, htonl(exp->class)))
2031 goto nla_put_failure;
2032 help = nfct_help(master);
2033 if (help) {
2034 struct nf_conntrack_helper *helper;
2035
2036 helper = rcu_dereference(help->helper);
2037 if (helper &&
2038 nla_put_string(skb, CTA_EXPECT_HELP_NAME, helper->name))
2039 goto nla_put_failure;
2040 }
2041 expfn = nf_ct_helper_expectfn_find_by_symbol(exp->expectfn);
2042 if (expfn != NULL &&
2043 nla_put_string(skb, CTA_EXPECT_FN, expfn->name))
2044 goto nla_put_failure;
2045
2046 return 0;
2047
2048 nla_put_failure:
2049 return -1;
2050 }
2051
2052 static int
2053 ctnetlink_exp_fill_info(struct sk_buff *skb, u32 pid, u32 seq,
2054 int event, const struct nf_conntrack_expect *exp)
2055 {
2056 struct nlmsghdr *nlh;
2057 struct nfgenmsg *nfmsg;
2058 unsigned int flags = pid ? NLM_F_MULTI : 0;
2059
2060 event |= NFNL_SUBSYS_CTNETLINK_EXP << 8;
2061 nlh = nlmsg_put(skb, pid, seq, event, sizeof(*nfmsg), flags);
2062 if (nlh == NULL)
2063 goto nlmsg_failure;
2064
2065 nfmsg = nlmsg_data(nlh);
2066 nfmsg->nfgen_family = exp->tuple.src.l3num;
2067 nfmsg->version = NFNETLINK_V0;
2068 nfmsg->res_id = 0;
2069
2070 if (ctnetlink_exp_dump_expect(skb, exp) < 0)
2071 goto nla_put_failure;
2072
2073 nlmsg_end(skb, nlh);
2074 return skb->len;
2075
2076 nlmsg_failure:
2077 nla_put_failure:
2078 nlmsg_cancel(skb, nlh);
2079 return -1;
2080 }
2081
2082 #ifdef CONFIG_NF_CONNTRACK_EVENTS
2083 static int
2084 ctnetlink_expect_event(unsigned int events, struct nf_exp_event *item)
2085 {
2086 struct nf_conntrack_expect *exp = item->exp;
2087 struct net *net = nf_ct_exp_net(exp);
2088 struct nlmsghdr *nlh;
2089 struct nfgenmsg *nfmsg;
2090 struct sk_buff *skb;
2091 unsigned int type, group;
2092 int flags = 0;
2093
2094 if (events & (1 << IPEXP_DESTROY)) {
2095 type = IPCTNL_MSG_EXP_DELETE;
2096 group = NFNLGRP_CONNTRACK_EXP_DESTROY;
2097 } else if (events & (1 << IPEXP_NEW)) {
2098 type = IPCTNL_MSG_EXP_NEW;
2099 flags = NLM_F_CREATE|NLM_F_EXCL;
2100 group = NFNLGRP_CONNTRACK_EXP_NEW;
2101 } else
2102 return 0;
2103
2104 if (!item->report && !nfnetlink_has_listeners(net, group))
2105 return 0;
2106
2107 skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_ATOMIC);
2108 if (skb == NULL)
2109 goto errout;
2110
2111 type |= NFNL_SUBSYS_CTNETLINK_EXP << 8;
2112 nlh = nlmsg_put(skb, item->pid, 0, type, sizeof(*nfmsg), flags);
2113 if (nlh == NULL)
2114 goto nlmsg_failure;
2115
2116 nfmsg = nlmsg_data(nlh);
2117 nfmsg->nfgen_family = exp->tuple.src.l3num;
2118 nfmsg->version = NFNETLINK_V0;
2119 nfmsg->res_id = 0;
2120
2121 rcu_read_lock();
2122 if (ctnetlink_exp_dump_expect(skb, exp) < 0)
2123 goto nla_put_failure;
2124 rcu_read_unlock();
2125
2126 nlmsg_end(skb, nlh);
2127 nfnetlink_send(skb, net, item->pid, group, item->report, GFP_ATOMIC);
2128 return 0;
2129
2130 nla_put_failure:
2131 rcu_read_unlock();
2132 nlmsg_cancel(skb, nlh);
2133 nlmsg_failure:
2134 kfree_skb(skb);
2135 errout:
2136 nfnetlink_set_err(net, 0, 0, -ENOBUFS);
2137 return 0;
2138 }
2139 #endif
2140 static int ctnetlink_exp_done(struct netlink_callback *cb)
2141 {
2142 if (cb->args[1])
2143 nf_ct_expect_put((struct nf_conntrack_expect *)cb->args[1]);
2144 return 0;
2145 }
2146
2147 static int
2148 ctnetlink_exp_dump_table(struct sk_buff *skb, struct netlink_callback *cb)
2149 {
2150 struct net *net = sock_net(skb->sk);
2151 struct nf_conntrack_expect *exp, *last;
2152 struct nfgenmsg *nfmsg = nlmsg_data(cb->nlh);
2153 struct hlist_node *n;
2154 u_int8_t l3proto = nfmsg->nfgen_family;
2155
2156 rcu_read_lock();
2157 last = (struct nf_conntrack_expect *)cb->args[1];
2158 for (; cb->args[0] < nf_ct_expect_hsize; cb->args[0]++) {
2159 restart:
2160 hlist_for_each_entry(exp, n, &net->ct.expect_hash[cb->args[0]],
2161 hnode) {
2162 if (l3proto && exp->tuple.src.l3num != l3proto)
2163 continue;
2164 if (cb->args[1]) {
2165 if (exp != last)
2166 continue;
2167 cb->args[1] = 0;
2168 }
2169 if (ctnetlink_exp_fill_info(skb,
2170 NETLINK_CB(cb->skb).pid,
2171 cb->nlh->nlmsg_seq,
2172 IPCTNL_MSG_EXP_NEW,
2173 exp) < 0) {
2174 if (!atomic_inc_not_zero(&exp->use))
2175 continue;
2176 cb->args[1] = (unsigned long)exp;
2177 goto out;
2178 }
2179 }
2180 if (cb->args[1]) {
2181 cb->args[1] = 0;
2182 goto restart;
2183 }
2184 }
2185 out:
2186 rcu_read_unlock();
2187 if (last)
2188 nf_ct_expect_put(last);
2189
2190 return skb->len;
2191 }
2192
2193 static const struct nla_policy exp_nla_policy[CTA_EXPECT_MAX+1] = {
2194 [CTA_EXPECT_MASTER] = { .type = NLA_NESTED },
2195 [CTA_EXPECT_TUPLE] = { .type = NLA_NESTED },
2196 [CTA_EXPECT_MASK] = { .type = NLA_NESTED },
2197 [CTA_EXPECT_TIMEOUT] = { .type = NLA_U32 },
2198 [CTA_EXPECT_ID] = { .type = NLA_U32 },
2199 [CTA_EXPECT_HELP_NAME] = { .type = NLA_NUL_STRING },
2200 [CTA_EXPECT_ZONE] = { .type = NLA_U16 },
2201 [CTA_EXPECT_FLAGS] = { .type = NLA_U32 },
2202 [CTA_EXPECT_CLASS] = { .type = NLA_U32 },
2203 [CTA_EXPECT_NAT] = { .type = NLA_NESTED },
2204 [CTA_EXPECT_FN] = { .type = NLA_NUL_STRING },
2205 };
2206
2207 static int
2208 ctnetlink_get_expect(struct sock *ctnl, struct sk_buff *skb,
2209 const struct nlmsghdr *nlh,
2210 const struct nlattr * const cda[])
2211 {
2212 struct net *net = sock_net(ctnl);
2213 struct nf_conntrack_tuple tuple;
2214 struct nf_conntrack_expect *exp;
2215 struct sk_buff *skb2;
2216 struct nfgenmsg *nfmsg = nlmsg_data(nlh);
2217 u_int8_t u3 = nfmsg->nfgen_family;
2218 u16 zone;
2219 int err;
2220
2221 if (nlh->nlmsg_flags & NLM_F_DUMP) {
2222 struct netlink_dump_control c = {
2223 .dump = ctnetlink_exp_dump_table,
2224 .done = ctnetlink_exp_done,
2225 };
2226 return netlink_dump_start(ctnl, skb, nlh, &c);
2227 }
2228
2229 err = ctnetlink_parse_zone(cda[CTA_EXPECT_ZONE], &zone);
2230 if (err < 0)
2231 return err;
2232
2233 if (cda[CTA_EXPECT_TUPLE])
2234 err = ctnetlink_parse_tuple(cda, &tuple, CTA_EXPECT_TUPLE, u3);
2235 else if (cda[CTA_EXPECT_MASTER])
2236 err = ctnetlink_parse_tuple(cda, &tuple, CTA_EXPECT_MASTER, u3);
2237 else
2238 return -EINVAL;
2239
2240 if (err < 0)
2241 return err;
2242
2243 exp = nf_ct_expect_find_get(net, zone, &tuple);
2244 if (!exp)
2245 return -ENOENT;
2246
2247 if (cda[CTA_EXPECT_ID]) {
2248 __be32 id = nla_get_be32(cda[CTA_EXPECT_ID]);
2249 if (ntohl(id) != (u32)(unsigned long)exp) {
2250 nf_ct_expect_put(exp);
2251 return -ENOENT;
2252 }
2253 }
2254
2255 err = -ENOMEM;
2256 skb2 = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
2257 if (skb2 == NULL) {
2258 nf_ct_expect_put(exp);
2259 goto out;
2260 }
2261
2262 rcu_read_lock();
2263 err = ctnetlink_exp_fill_info(skb2, NETLINK_CB(skb).pid,
2264 nlh->nlmsg_seq, IPCTNL_MSG_EXP_NEW, exp);
2265 rcu_read_unlock();
2266 nf_ct_expect_put(exp);
2267 if (err <= 0)
2268 goto free;
2269
2270 err = netlink_unicast(ctnl, skb2, NETLINK_CB(skb).pid, MSG_DONTWAIT);
2271 if (err < 0)
2272 goto out;
2273
2274 return 0;
2275
2276 free:
2277 kfree_skb(skb2);
2278 out:
2279 /* this avoids a loop in nfnetlink. */
2280 return err == -EAGAIN ? -ENOBUFS : err;
2281 }
2282
2283 static int
2284 ctnetlink_del_expect(struct sock *ctnl, struct sk_buff *skb,
2285 const struct nlmsghdr *nlh,
2286 const struct nlattr * const cda[])
2287 {
2288 struct net *net = sock_net(ctnl);
2289 struct nf_conntrack_expect *exp;
2290 struct nf_conntrack_tuple tuple;
2291 struct nfgenmsg *nfmsg = nlmsg_data(nlh);
2292 struct hlist_node *n, *next;
2293 u_int8_t u3 = nfmsg->nfgen_family;
2294 unsigned int i;
2295 u16 zone;
2296 int err;
2297
2298 if (cda[CTA_EXPECT_TUPLE]) {
2299 /* delete a single expect by tuple */
2300 err = ctnetlink_parse_zone(cda[CTA_EXPECT_ZONE], &zone);
2301 if (err < 0)
2302 return err;
2303
2304 err = ctnetlink_parse_tuple(cda, &tuple, CTA_EXPECT_TUPLE, u3);
2305 if (err < 0)
2306 return err;
2307
2308 /* bump usage count to 2 */
2309 exp = nf_ct_expect_find_get(net, zone, &tuple);
2310 if (!exp)
2311 return -ENOENT;
2312
2313 if (cda[CTA_EXPECT_ID]) {
2314 __be32 id = nla_get_be32(cda[CTA_EXPECT_ID]);
2315 if (ntohl(id) != (u32)(unsigned long)exp) {
2316 nf_ct_expect_put(exp);
2317 return -ENOENT;
2318 }
2319 }
2320
2321 /* after list removal, usage count == 1 */
2322 spin_lock_bh(&nf_conntrack_lock);
2323 if (del_timer(&exp->timeout)) {
2324 nf_ct_unlink_expect_report(exp, NETLINK_CB(skb).pid,
2325 nlmsg_report(nlh));
2326 nf_ct_expect_put(exp);
2327 }
2328 spin_unlock_bh(&nf_conntrack_lock);
2329 /* have to put what we 'get' above.
2330 * after this line usage count == 0 */
2331 nf_ct_expect_put(exp);
2332 } else if (cda[CTA_EXPECT_HELP_NAME]) {
2333 char *name = nla_data(cda[CTA_EXPECT_HELP_NAME]);
2334 struct nf_conn_help *m_help;
2335
2336 /* delete all expectations for this helper */
2337 spin_lock_bh(&nf_conntrack_lock);
2338 for (i = 0; i < nf_ct_expect_hsize; i++) {
2339 hlist_for_each_entry_safe(exp, n, next,
2340 &net->ct.expect_hash[i],
2341 hnode) {
2342 m_help = nfct_help(exp->master);
2343 if (!strcmp(m_help->helper->name, name) &&
2344 del_timer(&exp->timeout)) {
2345 nf_ct_unlink_expect_report(exp,
2346 NETLINK_CB(skb).pid,
2347 nlmsg_report(nlh));
2348 nf_ct_expect_put(exp);
2349 }
2350 }
2351 }
2352 spin_unlock_bh(&nf_conntrack_lock);
2353 } else {
2354 /* This basically means we have to flush everything*/
2355 spin_lock_bh(&nf_conntrack_lock);
2356 for (i = 0; i < nf_ct_expect_hsize; i++) {
2357 hlist_for_each_entry_safe(exp, n, next,
2358 &net->ct.expect_hash[i],
2359 hnode) {
2360 if (del_timer(&exp->timeout)) {
2361 nf_ct_unlink_expect_report(exp,
2362 NETLINK_CB(skb).pid,
2363 nlmsg_report(nlh));
2364 nf_ct_expect_put(exp);
2365 }
2366 }
2367 }
2368 spin_unlock_bh(&nf_conntrack_lock);
2369 }
2370
2371 return 0;
2372 }
2373 static int
2374 ctnetlink_change_expect(struct nf_conntrack_expect *x,
2375 const struct nlattr * const cda[])
2376 {
2377 if (cda[CTA_EXPECT_TIMEOUT]) {
2378 if (!del_timer(&x->timeout))
2379 return -ETIME;
2380
2381 x->timeout.expires = jiffies +
2382 ntohl(nla_get_be32(cda[CTA_EXPECT_TIMEOUT])) * HZ;
2383 add_timer(&x->timeout);
2384 }
2385 return 0;
2386 }
2387
2388 static const struct nla_policy exp_nat_nla_policy[CTA_EXPECT_NAT_MAX+1] = {
2389 [CTA_EXPECT_NAT_DIR] = { .type = NLA_U32 },
2390 [CTA_EXPECT_NAT_TUPLE] = { .type = NLA_NESTED },
2391 };
2392
2393 static int
2394 ctnetlink_parse_expect_nat(const struct nlattr *attr,
2395 struct nf_conntrack_expect *exp,
2396 u_int8_t u3)
2397 {
2398 #ifdef CONFIG_NF_NAT_NEEDED
2399 struct nlattr *tb[CTA_EXPECT_NAT_MAX+1];
2400 struct nf_conntrack_tuple nat_tuple = {};
2401 int err;
2402
2403 nla_parse_nested(tb, CTA_EXPECT_NAT_MAX, attr, exp_nat_nla_policy);
2404
2405 if (!tb[CTA_EXPECT_NAT_DIR] || !tb[CTA_EXPECT_NAT_TUPLE])
2406 return -EINVAL;
2407
2408 err = ctnetlink_parse_tuple((const struct nlattr * const *)tb,
2409 &nat_tuple, CTA_EXPECT_NAT_TUPLE, u3);
2410 if (err < 0)
2411 return err;
2412
2413 exp->saved_ip = nat_tuple.src.u3.ip;
2414 exp->saved_proto = nat_tuple.src.u;
2415 exp->dir = ntohl(nla_get_be32(tb[CTA_EXPECT_NAT_DIR]));
2416
2417 return 0;
2418 #else
2419 return -EOPNOTSUPP;
2420 #endif
2421 }
2422
2423 static int
2424 ctnetlink_create_expect(struct net *net, u16 zone,
2425 const struct nlattr * const cda[],
2426 u_int8_t u3,
2427 u32 pid, int report)
2428 {
2429 struct nf_conntrack_tuple tuple, mask, master_tuple;
2430 struct nf_conntrack_tuple_hash *h = NULL;
2431 struct nf_conntrack_expect *exp;
2432 struct nf_conn *ct;
2433 struct nf_conn_help *help;
2434 struct nf_conntrack_helper *helper = NULL;
2435 u_int32_t class = 0;
2436 int err = 0;
2437
2438 /* caller guarantees that those three CTA_EXPECT_* exist */
2439 err = ctnetlink_parse_tuple(cda, &tuple, CTA_EXPECT_TUPLE, u3);
2440 if (err < 0)
2441 return err;
2442 err = ctnetlink_parse_tuple(cda, &mask, CTA_EXPECT_MASK, u3);
2443 if (err < 0)
2444 return err;
2445 err = ctnetlink_parse_tuple(cda, &master_tuple, CTA_EXPECT_MASTER, u3);
2446 if (err < 0)
2447 return err;
2448
2449 /* Look for master conntrack of this expectation */
2450 h = nf_conntrack_find_get(net, zone, &master_tuple);
2451 if (!h)
2452 return -ENOENT;
2453 ct = nf_ct_tuplehash_to_ctrack(h);
2454
2455 /* Look for helper of this expectation */
2456 if (cda[CTA_EXPECT_HELP_NAME]) {
2457 const char *helpname = nla_data(cda[CTA_EXPECT_HELP_NAME]);
2458
2459 helper = __nf_conntrack_helper_find(helpname, nf_ct_l3num(ct),
2460 nf_ct_protonum(ct));
2461 if (helper == NULL) {
2462 #ifdef CONFIG_MODULES
2463 if (request_module("nfct-helper-%s", helpname) < 0) {
2464 err = -EOPNOTSUPP;
2465 goto out;
2466 }
2467
2468 helper = __nf_conntrack_helper_find(helpname,
2469 nf_ct_l3num(ct),
2470 nf_ct_protonum(ct));
2471 if (helper) {
2472 err = -EAGAIN;
2473 goto out;
2474 }
2475 #endif
2476 err = -EOPNOTSUPP;
2477 goto out;
2478 }
2479 }
2480
2481 if (cda[CTA_EXPECT_CLASS] && helper) {
2482 class = ntohl(nla_get_be32(cda[CTA_EXPECT_CLASS]));
2483 if (class > helper->expect_class_max) {
2484 err = -EINVAL;
2485 goto out;
2486 }
2487 }
2488 exp = nf_ct_expect_alloc(ct);
2489 if (!exp) {
2490 err = -ENOMEM;
2491 goto out;
2492 }
2493 help = nfct_help(ct);
2494 if (!help) {
2495 if (!cda[CTA_EXPECT_TIMEOUT]) {
2496 err = -EINVAL;
2497 goto out;
2498 }
2499 exp->timeout.expires =
2500 jiffies + ntohl(nla_get_be32(cda[CTA_EXPECT_TIMEOUT])) * HZ;
2501
2502 exp->flags = NF_CT_EXPECT_USERSPACE;
2503 if (cda[CTA_EXPECT_FLAGS]) {
2504 exp->flags |=
2505 ntohl(nla_get_be32(cda[CTA_EXPECT_FLAGS]));
2506 }
2507 } else {
2508 if (cda[CTA_EXPECT_FLAGS]) {
2509 exp->flags = ntohl(nla_get_be32(cda[CTA_EXPECT_FLAGS]));
2510 exp->flags &= ~NF_CT_EXPECT_USERSPACE;
2511 } else
2512 exp->flags = 0;
2513 }
2514 if (cda[CTA_EXPECT_FN]) {
2515 const char *name = nla_data(cda[CTA_EXPECT_FN]);
2516 struct nf_ct_helper_expectfn *expfn;
2517
2518 expfn = nf_ct_helper_expectfn_find_by_name(name);
2519 if (expfn == NULL) {
2520 err = -EINVAL;
2521 goto err_out;
2522 }
2523 exp->expectfn = expfn->expectfn;
2524 } else
2525 exp->expectfn = NULL;
2526
2527 exp->class = class;
2528 exp->master = ct;
2529 exp->helper = helper;
2530 memcpy(&exp->tuple, &tuple, sizeof(struct nf_conntrack_tuple));
2531 memcpy(&exp->mask.src.u3, &mask.src.u3, sizeof(exp->mask.src.u3));
2532 exp->mask.src.u.all = mask.src.u.all;
2533
2534 if (cda[CTA_EXPECT_NAT]) {
2535 err = ctnetlink_parse_expect_nat(cda[CTA_EXPECT_NAT],
2536 exp, u3);
2537 if (err < 0)
2538 goto err_out;
2539 }
2540 err = nf_ct_expect_related_report(exp, pid, report);
2541 err_out:
2542 nf_ct_expect_put(exp);
2543 out:
2544 nf_ct_put(nf_ct_tuplehash_to_ctrack(h));
2545 return err;
2546 }
2547
2548 static int
2549 ctnetlink_new_expect(struct sock *ctnl, struct sk_buff *skb,
2550 const struct nlmsghdr *nlh,
2551 const struct nlattr * const cda[])
2552 {
2553 struct net *net = sock_net(ctnl);
2554 struct nf_conntrack_tuple tuple;
2555 struct nf_conntrack_expect *exp;
2556 struct nfgenmsg *nfmsg = nlmsg_data(nlh);
2557 u_int8_t u3 = nfmsg->nfgen_family;
2558 u16 zone;
2559 int err;
2560
2561 if (!cda[CTA_EXPECT_TUPLE]
2562 || !cda[CTA_EXPECT_MASK]
2563 || !cda[CTA_EXPECT_MASTER])
2564 return -EINVAL;
2565
2566 err = ctnetlink_parse_zone(cda[CTA_EXPECT_ZONE], &zone);
2567 if (err < 0)
2568 return err;
2569
2570 err = ctnetlink_parse_tuple(cda, &tuple, CTA_EXPECT_TUPLE, u3);
2571 if (err < 0)
2572 return err;
2573
2574 spin_lock_bh(&nf_conntrack_lock);
2575 exp = __nf_ct_expect_find(net, zone, &tuple);
2576
2577 if (!exp) {
2578 spin_unlock_bh(&nf_conntrack_lock);
2579 err = -ENOENT;
2580 if (nlh->nlmsg_flags & NLM_F_CREATE) {
2581 err = ctnetlink_create_expect(net, zone, cda,
2582 u3,
2583 NETLINK_CB(skb).pid,
2584 nlmsg_report(nlh));
2585 }
2586 return err;
2587 }
2588
2589 err = -EEXIST;
2590 if (!(nlh->nlmsg_flags & NLM_F_EXCL))
2591 err = ctnetlink_change_expect(exp, cda);
2592 spin_unlock_bh(&nf_conntrack_lock);
2593
2594 return err;
2595 }
2596
2597 static int
2598 ctnetlink_exp_stat_fill_info(struct sk_buff *skb, u32 pid, u32 seq, int cpu,
2599 const struct ip_conntrack_stat *st)
2600 {
2601 struct nlmsghdr *nlh;
2602 struct nfgenmsg *nfmsg;
2603 unsigned int flags = pid ? NLM_F_MULTI : 0, event;
2604
2605 event = (NFNL_SUBSYS_CTNETLINK << 8 | IPCTNL_MSG_EXP_GET_STATS_CPU);
2606 nlh = nlmsg_put(skb, pid, seq, event, sizeof(*nfmsg), flags);
2607 if (nlh == NULL)
2608 goto nlmsg_failure;
2609
2610 nfmsg = nlmsg_data(nlh);
2611 nfmsg->nfgen_family = AF_UNSPEC;
2612 nfmsg->version = NFNETLINK_V0;
2613 nfmsg->res_id = htons(cpu);
2614
2615 if (nla_put_be32(skb, CTA_STATS_EXP_NEW, htonl(st->expect_new)) ||
2616 nla_put_be32(skb, CTA_STATS_EXP_CREATE, htonl(st->expect_create)) ||
2617 nla_put_be32(skb, CTA_STATS_EXP_DELETE, htonl(st->expect_delete)))
2618 goto nla_put_failure;
2619
2620 nlmsg_end(skb, nlh);
2621 return skb->len;
2622
2623 nla_put_failure:
2624 nlmsg_failure:
2625 nlmsg_cancel(skb, nlh);
2626 return -1;
2627 }
2628
2629 static int
2630 ctnetlink_exp_stat_cpu_dump(struct sk_buff *skb, struct netlink_callback *cb)
2631 {
2632 int cpu;
2633 struct net *net = sock_net(skb->sk);
2634
2635 if (cb->args[0] == nr_cpu_ids)
2636 return 0;
2637
2638 for (cpu = cb->args[0]; cpu < nr_cpu_ids; cpu++) {
2639 const struct ip_conntrack_stat *st;
2640
2641 if (!cpu_possible(cpu))
2642 continue;
2643
2644 st = per_cpu_ptr(net->ct.stat, cpu);
2645 if (ctnetlink_exp_stat_fill_info(skb, NETLINK_CB(cb->skb).pid,
2646 cb->nlh->nlmsg_seq,
2647 cpu, st) < 0)
2648 break;
2649 }
2650 cb->args[0] = cpu;
2651
2652 return skb->len;
2653 }
2654
2655 static int
2656 ctnetlink_stat_exp_cpu(struct sock *ctnl, struct sk_buff *skb,
2657 const struct nlmsghdr *nlh,
2658 const struct nlattr * const cda[])
2659 {
2660 if (nlh->nlmsg_flags & NLM_F_DUMP) {
2661 struct netlink_dump_control c = {
2662 .dump = ctnetlink_exp_stat_cpu_dump,
2663 };
2664 return netlink_dump_start(ctnl, skb, nlh, &c);
2665 }
2666
2667 return 0;
2668 }
2669
2670 #ifdef CONFIG_NF_CONNTRACK_EVENTS
2671 static struct nf_ct_event_notifier ctnl_notifier = {
2672 .fcn = ctnetlink_conntrack_event,
2673 };
2674
2675 static struct nf_exp_event_notifier ctnl_notifier_exp = {
2676 .fcn = ctnetlink_expect_event,
2677 };
2678 #endif
2679
2680 static const struct nfnl_callback ctnl_cb[IPCTNL_MSG_MAX] = {
2681 [IPCTNL_MSG_CT_NEW] = { .call = ctnetlink_new_conntrack,
2682 .attr_count = CTA_MAX,
2683 .policy = ct_nla_policy },
2684 [IPCTNL_MSG_CT_GET] = { .call = ctnetlink_get_conntrack,
2685 .attr_count = CTA_MAX,
2686 .policy = ct_nla_policy },
2687 [IPCTNL_MSG_CT_DELETE] = { .call = ctnetlink_del_conntrack,
2688 .attr_count = CTA_MAX,
2689 .policy = ct_nla_policy },
2690 [IPCTNL_MSG_CT_GET_CTRZERO] = { .call = ctnetlink_get_conntrack,
2691 .attr_count = CTA_MAX,
2692 .policy = ct_nla_policy },
2693 [IPCTNL_MSG_CT_GET_STATS_CPU] = { .call = ctnetlink_stat_ct_cpu },
2694 [IPCTNL_MSG_CT_GET_STATS] = { .call = ctnetlink_stat_ct },
2695 };
2696
2697 static const struct nfnl_callback ctnl_exp_cb[IPCTNL_MSG_EXP_MAX] = {
2698 [IPCTNL_MSG_EXP_GET] = { .call = ctnetlink_get_expect,
2699 .attr_count = CTA_EXPECT_MAX,
2700 .policy = exp_nla_policy },
2701 [IPCTNL_MSG_EXP_NEW] = { .call = ctnetlink_new_expect,
2702 .attr_count = CTA_EXPECT_MAX,
2703 .policy = exp_nla_policy },
2704 [IPCTNL_MSG_EXP_DELETE] = { .call = ctnetlink_del_expect,
2705 .attr_count = CTA_EXPECT_MAX,
2706 .policy = exp_nla_policy },
2707 [IPCTNL_MSG_EXP_GET_STATS_CPU] = { .call = ctnetlink_stat_exp_cpu },
2708 };
2709
2710 static const struct nfnetlink_subsystem ctnl_subsys = {
2711 .name = "conntrack",
2712 .subsys_id = NFNL_SUBSYS_CTNETLINK,
2713 .cb_count = IPCTNL_MSG_MAX,
2714 .cb = ctnl_cb,
2715 };
2716
2717 static const struct nfnetlink_subsystem ctnl_exp_subsys = {
2718 .name = "conntrack_expect",
2719 .subsys_id = NFNL_SUBSYS_CTNETLINK_EXP,
2720 .cb_count = IPCTNL_MSG_EXP_MAX,
2721 .cb = ctnl_exp_cb,
2722 };
2723
2724 MODULE_ALIAS("ip_conntrack_netlink");
2725 MODULE_ALIAS_NFNL_SUBSYS(NFNL_SUBSYS_CTNETLINK);
2726 MODULE_ALIAS_NFNL_SUBSYS(NFNL_SUBSYS_CTNETLINK_EXP);
2727
2728 static int __net_init ctnetlink_net_init(struct net *net)
2729 {
2730 #ifdef CONFIG_NF_CONNTRACK_EVENTS
2731 int ret;
2732
2733 ret = nf_conntrack_register_notifier(net, &ctnl_notifier);
2734 if (ret < 0) {
2735 pr_err("ctnetlink_init: cannot register notifier.\n");
2736 goto err_out;
2737 }
2738
2739 ret = nf_ct_expect_register_notifier(net, &ctnl_notifier_exp);
2740 if (ret < 0) {
2741 pr_err("ctnetlink_init: cannot expect register notifier.\n");
2742 goto err_unreg_notifier;
2743 }
2744 #endif
2745 return 0;
2746
2747 #ifdef CONFIG_NF_CONNTRACK_EVENTS
2748 err_unreg_notifier:
2749 nf_conntrack_unregister_notifier(net, &ctnl_notifier);
2750 err_out:
2751 return ret;
2752 #endif
2753 }
2754
2755 static void ctnetlink_net_exit(struct net *net)
2756 {
2757 #ifdef CONFIG_NF_CONNTRACK_EVENTS
2758 nf_ct_expect_unregister_notifier(net, &ctnl_notifier_exp);
2759 nf_conntrack_unregister_notifier(net, &ctnl_notifier);
2760 #endif
2761 }
2762
2763 static void __net_exit ctnetlink_net_exit_batch(struct list_head *net_exit_list)
2764 {
2765 struct net *net;
2766
2767 list_for_each_entry(net, net_exit_list, exit_list)
2768 ctnetlink_net_exit(net);
2769 }
2770
2771 static struct pernet_operations ctnetlink_net_ops = {
2772 .init = ctnetlink_net_init,
2773 .exit_batch = ctnetlink_net_exit_batch,
2774 };
2775
2776 static int __init ctnetlink_init(void)
2777 {
2778 int ret;
2779
2780 pr_info("ctnetlink v%s: registering with nfnetlink.\n", version);
2781 ret = nfnetlink_subsys_register(&ctnl_subsys);
2782 if (ret < 0) {
2783 pr_err("ctnetlink_init: cannot register with nfnetlink.\n");
2784 goto err_out;
2785 }
2786
2787 ret = nfnetlink_subsys_register(&ctnl_exp_subsys);
2788 if (ret < 0) {
2789 pr_err("ctnetlink_init: cannot register exp with nfnetlink.\n");
2790 goto err_unreg_subsys;
2791 }
2792
2793 if (register_pernet_subsys(&ctnetlink_net_ops)) {
2794 pr_err("ctnetlink_init: cannot register pernet operations\n");
2795 goto err_unreg_exp_subsys;
2796 }
2797 #ifdef CONFIG_NETFILTER_NETLINK_QUEUE_CT
2798 /* setup interaction between nf_queue and nf_conntrack_netlink. */
2799 RCU_INIT_POINTER(nfq_ct_hook, &ctnetlink_nfqueue_hook);
2800 #endif
2801 return 0;
2802
2803 err_unreg_exp_subsys:
2804 nfnetlink_subsys_unregister(&ctnl_exp_subsys);
2805 err_unreg_subsys:
2806 nfnetlink_subsys_unregister(&ctnl_subsys);
2807 err_out:
2808 return ret;
2809 }
2810
2811 static void __exit ctnetlink_exit(void)
2812 {
2813 pr_info("ctnetlink: unregistering from nfnetlink.\n");
2814
2815 unregister_pernet_subsys(&ctnetlink_net_ops);
2816 nfnetlink_subsys_unregister(&ctnl_exp_subsys);
2817 nfnetlink_subsys_unregister(&ctnl_subsys);
2818 #ifdef CONFIG_NETFILTER_NETLINK_QUEUE_CT
2819 RCU_INIT_POINTER(nfq_ct_hook, NULL);
2820 #endif
2821 }
2822
2823 module_init(ctnetlink_init);
2824 module_exit(ctnetlink_exit);
This page took 0.136043 seconds and 5 git commands to generate.