1 /* Connection tracking via netlink socket. Allows for user space
2 * protocol helpers and general trouble making from userspace.
4 * (C) 2001 by Jay Schulist <jschlst@samba.org>
5 * (C) 2002-2006 by Harald Welte <laforge@gnumonks.org>
6 * (C) 2003 by Patrick Mchardy <kaber@trash.net>
7 * (C) 2005-2012 by Pablo Neira Ayuso <pablo@netfilter.org>
9 * Initial connection tracking via netlink development funded and
10 * generally made possible by Network Robots, Inc. (www.networkrobots.com)
12 * Further development of this code funded by Astaro AG (http://www.astaro.com)
14 * This software may be used and distributed according to the terms
15 * of the GNU General Public License, incorporated herein by reference.
18 #include <linux/init.h>
19 #include <linux/module.h>
20 #include <linux/kernel.h>
21 #include <linux/rculist.h>
22 #include <linux/rculist_nulls.h>
23 #include <linux/types.h>
24 #include <linux/timer.h>
25 #include <linux/security.h>
26 #include <linux/skbuff.h>
27 #include <linux/errno.h>
28 #include <linux/netlink.h>
29 #include <linux/spinlock.h>
30 #include <linux/interrupt.h>
31 #include <linux/slab.h>
33 #include <linux/netfilter.h>
34 #include <net/netlink.h>
36 #include <net/netfilter/nf_conntrack.h>
37 #include <net/netfilter/nf_conntrack_core.h>
38 #include <net/netfilter/nf_conntrack_expect.h>
39 #include <net/netfilter/nf_conntrack_helper.h>
40 #include <net/netfilter/nf_conntrack_seqadj.h>
41 #include <net/netfilter/nf_conntrack_l3proto.h>
42 #include <net/netfilter/nf_conntrack_l4proto.h>
43 #include <net/netfilter/nf_conntrack_tuple.h>
44 #include <net/netfilter/nf_conntrack_acct.h>
45 #include <net/netfilter/nf_conntrack_zones.h>
46 #include <net/netfilter/nf_conntrack_timestamp.h>
47 #include <net/netfilter/nf_conntrack_labels.h>
48 #ifdef CONFIG_NF_NAT_NEEDED
49 #include <net/netfilter/nf_nat_core.h>
50 #include <net/netfilter/nf_nat_l4proto.h>
51 #include <net/netfilter/nf_nat_helper.h>
54 #include <linux/netfilter/nfnetlink.h>
55 #include <linux/netfilter/nfnetlink_conntrack.h>
57 MODULE_LICENSE("GPL");
59 static char __initdata version
[] = "0.93";
62 ctnetlink_dump_tuples_proto(struct sk_buff
*skb
,
63 const struct nf_conntrack_tuple
*tuple
,
64 struct nf_conntrack_l4proto
*l4proto
)
67 struct nlattr
*nest_parms
;
69 nest_parms
= nla_nest_start(skb
, CTA_TUPLE_PROTO
| NLA_F_NESTED
);
72 if (nla_put_u8(skb
, CTA_PROTO_NUM
, tuple
->dst
.protonum
))
75 if (likely(l4proto
->tuple_to_nlattr
))
76 ret
= l4proto
->tuple_to_nlattr(skb
, tuple
);
78 nla_nest_end(skb
, nest_parms
);
87 ctnetlink_dump_tuples_ip(struct sk_buff
*skb
,
88 const struct nf_conntrack_tuple
*tuple
,
89 struct nf_conntrack_l3proto
*l3proto
)
92 struct nlattr
*nest_parms
;
94 nest_parms
= nla_nest_start(skb
, CTA_TUPLE_IP
| NLA_F_NESTED
);
98 if (likely(l3proto
->tuple_to_nlattr
))
99 ret
= l3proto
->tuple_to_nlattr(skb
, tuple
);
101 nla_nest_end(skb
, nest_parms
);
110 ctnetlink_dump_tuples(struct sk_buff
*skb
,
111 const struct nf_conntrack_tuple
*tuple
)
114 struct nf_conntrack_l3proto
*l3proto
;
115 struct nf_conntrack_l4proto
*l4proto
;
118 l3proto
= __nf_ct_l3proto_find(tuple
->src
.l3num
);
119 ret
= ctnetlink_dump_tuples_ip(skb
, tuple
, l3proto
);
122 l4proto
= __nf_ct_l4proto_find(tuple
->src
.l3num
,
123 tuple
->dst
.protonum
);
124 ret
= ctnetlink_dump_tuples_proto(skb
, tuple
, l4proto
);
131 ctnetlink_dump_status(struct sk_buff
*skb
, const struct nf_conn
*ct
)
133 if (nla_put_be32(skb
, CTA_STATUS
, htonl(ct
->status
)))
134 goto nla_put_failure
;
142 ctnetlink_dump_timeout(struct sk_buff
*skb
, const struct nf_conn
*ct
)
144 long timeout
= ((long)ct
->timeout
.expires
- (long)jiffies
) / HZ
;
149 if (nla_put_be32(skb
, CTA_TIMEOUT
, htonl(timeout
)))
150 goto nla_put_failure
;
158 ctnetlink_dump_protoinfo(struct sk_buff
*skb
, struct nf_conn
*ct
)
160 struct nf_conntrack_l4proto
*l4proto
;
161 struct nlattr
*nest_proto
;
164 l4proto
= __nf_ct_l4proto_find(nf_ct_l3num(ct
), nf_ct_protonum(ct
));
165 if (!l4proto
->to_nlattr
)
168 nest_proto
= nla_nest_start(skb
, CTA_PROTOINFO
| NLA_F_NESTED
);
170 goto nla_put_failure
;
172 ret
= l4proto
->to_nlattr(skb
, nest_proto
, ct
);
174 nla_nest_end(skb
, nest_proto
);
183 ctnetlink_dump_helpinfo(struct sk_buff
*skb
, const struct nf_conn
*ct
)
185 struct nlattr
*nest_helper
;
186 const struct nf_conn_help
*help
= nfct_help(ct
);
187 struct nf_conntrack_helper
*helper
;
192 helper
= rcu_dereference(help
->helper
);
196 nest_helper
= nla_nest_start(skb
, CTA_HELP
| NLA_F_NESTED
);
198 goto nla_put_failure
;
199 if (nla_put_string(skb
, CTA_HELP_NAME
, helper
->name
))
200 goto nla_put_failure
;
202 if (helper
->to_nlattr
)
203 helper
->to_nlattr(skb
, ct
);
205 nla_nest_end(skb
, nest_helper
);
214 dump_counters(struct sk_buff
*skb
, struct nf_conn_acct
*acct
,
215 enum ip_conntrack_dir dir
, int type
)
217 enum ctattr_type attr
= dir
? CTA_COUNTERS_REPLY
: CTA_COUNTERS_ORIG
;
218 struct nf_conn_counter
*counter
= acct
->counter
;
219 struct nlattr
*nest_count
;
222 if (type
== IPCTNL_MSG_CT_GET_CTRZERO
) {
223 pkts
= atomic64_xchg(&counter
[dir
].packets
, 0);
224 bytes
= atomic64_xchg(&counter
[dir
].bytes
, 0);
226 pkts
= atomic64_read(&counter
[dir
].packets
);
227 bytes
= atomic64_read(&counter
[dir
].bytes
);
230 nest_count
= nla_nest_start(skb
, attr
| NLA_F_NESTED
);
232 goto nla_put_failure
;
234 if (nla_put_be64(skb
, CTA_COUNTERS_PACKETS
, cpu_to_be64(pkts
)) ||
235 nla_put_be64(skb
, CTA_COUNTERS_BYTES
, cpu_to_be64(bytes
)))
236 goto nla_put_failure
;
238 nla_nest_end(skb
, nest_count
);
247 ctnetlink_dump_acct(struct sk_buff
*skb
, const struct nf_conn
*ct
, int type
)
249 struct nf_conn_acct
*acct
= nf_conn_acct_find(ct
);
254 if (dump_counters(skb
, acct
, IP_CT_DIR_ORIGINAL
, type
) < 0)
256 if (dump_counters(skb
, acct
, IP_CT_DIR_REPLY
, type
) < 0)
263 ctnetlink_dump_timestamp(struct sk_buff
*skb
, const struct nf_conn
*ct
)
265 struct nlattr
*nest_count
;
266 const struct nf_conn_tstamp
*tstamp
;
268 tstamp
= nf_conn_tstamp_find(ct
);
272 nest_count
= nla_nest_start(skb
, CTA_TIMESTAMP
| NLA_F_NESTED
);
274 goto nla_put_failure
;
276 if (nla_put_be64(skb
, CTA_TIMESTAMP_START
, cpu_to_be64(tstamp
->start
)) ||
277 (tstamp
->stop
!= 0 && nla_put_be64(skb
, CTA_TIMESTAMP_STOP
,
278 cpu_to_be64(tstamp
->stop
))))
279 goto nla_put_failure
;
280 nla_nest_end(skb
, nest_count
);
288 #ifdef CONFIG_NF_CONNTRACK_MARK
290 ctnetlink_dump_mark(struct sk_buff
*skb
, const struct nf_conn
*ct
)
292 if (nla_put_be32(skb
, CTA_MARK
, htonl(ct
->mark
)))
293 goto nla_put_failure
;
300 #define ctnetlink_dump_mark(a, b) (0)
303 #ifdef CONFIG_NF_CONNTRACK_SECMARK
305 ctnetlink_dump_secctx(struct sk_buff
*skb
, const struct nf_conn
*ct
)
307 struct nlattr
*nest_secctx
;
311 ret
= security_secid_to_secctx(ct
->secmark
, &secctx
, &len
);
316 nest_secctx
= nla_nest_start(skb
, CTA_SECCTX
| NLA_F_NESTED
);
318 goto nla_put_failure
;
320 if (nla_put_string(skb
, CTA_SECCTX_NAME
, secctx
))
321 goto nla_put_failure
;
322 nla_nest_end(skb
, nest_secctx
);
326 security_release_secctx(secctx
, len
);
330 #define ctnetlink_dump_secctx(a, b) (0)
333 #ifdef CONFIG_NF_CONNTRACK_LABELS
334 static int ctnetlink_label_size(const struct nf_conn
*ct
)
336 struct nf_conn_labels
*labels
= nf_ct_labels_find(ct
);
340 return nla_total_size(labels
->words
* sizeof(long));
344 ctnetlink_dump_labels(struct sk_buff
*skb
, const struct nf_conn
*ct
)
346 struct nf_conn_labels
*labels
= nf_ct_labels_find(ct
);
352 len
= labels
->words
* sizeof(long);
355 if (labels
->bits
[i
] != 0)
356 return nla_put(skb
, CTA_LABELS
, len
, labels
->bits
);
358 } while (i
< labels
->words
);
363 #define ctnetlink_dump_labels(a, b) (0)
364 #define ctnetlink_label_size(a) (0)
367 #define master_tuple(ct) &(ct->master->tuplehash[IP_CT_DIR_ORIGINAL].tuple)
370 ctnetlink_dump_master(struct sk_buff
*skb
, const struct nf_conn
*ct
)
372 struct nlattr
*nest_parms
;
374 if (!(ct
->status
& IPS_EXPECTED
))
377 nest_parms
= nla_nest_start(skb
, CTA_TUPLE_MASTER
| NLA_F_NESTED
);
379 goto nla_put_failure
;
380 if (ctnetlink_dump_tuples(skb
, master_tuple(ct
)) < 0)
381 goto nla_put_failure
;
382 nla_nest_end(skb
, nest_parms
);
391 dump_ct_seq_adj(struct sk_buff
*skb
, const struct nf_ct_seqadj
*seq
, int type
)
393 struct nlattr
*nest_parms
;
395 nest_parms
= nla_nest_start(skb
, type
| NLA_F_NESTED
);
397 goto nla_put_failure
;
399 if (nla_put_be32(skb
, CTA_SEQADJ_CORRECTION_POS
,
400 htonl(seq
->correction_pos
)) ||
401 nla_put_be32(skb
, CTA_SEQADJ_OFFSET_BEFORE
,
402 htonl(seq
->offset_before
)) ||
403 nla_put_be32(skb
, CTA_SEQADJ_OFFSET_AFTER
,
404 htonl(seq
->offset_after
)))
405 goto nla_put_failure
;
407 nla_nest_end(skb
, nest_parms
);
416 ctnetlink_dump_ct_seq_adj(struct sk_buff
*skb
, const struct nf_conn
*ct
)
418 struct nf_conn_seqadj
*seqadj
= nfct_seqadj(ct
);
419 struct nf_ct_seqadj
*seq
;
421 if (!(ct
->status
& IPS_SEQ_ADJUST
) || !seqadj
)
424 seq
= &seqadj
->seq
[IP_CT_DIR_ORIGINAL
];
425 if (dump_ct_seq_adj(skb
, seq
, CTA_SEQ_ADJ_ORIG
) == -1)
428 seq
= &seqadj
->seq
[IP_CT_DIR_REPLY
];
429 if (dump_ct_seq_adj(skb
, seq
, CTA_SEQ_ADJ_REPLY
) == -1)
436 ctnetlink_dump_id(struct sk_buff
*skb
, const struct nf_conn
*ct
)
438 if (nla_put_be32(skb
, CTA_ID
, htonl((unsigned long)ct
)))
439 goto nla_put_failure
;
447 ctnetlink_dump_use(struct sk_buff
*skb
, const struct nf_conn
*ct
)
449 if (nla_put_be32(skb
, CTA_USE
, htonl(atomic_read(&ct
->ct_general
.use
))))
450 goto nla_put_failure
;
458 ctnetlink_fill_info(struct sk_buff
*skb
, u32 portid
, u32 seq
, u32 type
,
461 struct nlmsghdr
*nlh
;
462 struct nfgenmsg
*nfmsg
;
463 struct nlattr
*nest_parms
;
464 unsigned int flags
= portid
? NLM_F_MULTI
: 0, event
;
466 event
= (NFNL_SUBSYS_CTNETLINK
<< 8 | IPCTNL_MSG_CT_NEW
);
467 nlh
= nlmsg_put(skb
, portid
, seq
, event
, sizeof(*nfmsg
), flags
);
471 nfmsg
= nlmsg_data(nlh
);
472 nfmsg
->nfgen_family
= nf_ct_l3num(ct
);
473 nfmsg
->version
= NFNETLINK_V0
;
476 nest_parms
= nla_nest_start(skb
, CTA_TUPLE_ORIG
| NLA_F_NESTED
);
478 goto nla_put_failure
;
479 if (ctnetlink_dump_tuples(skb
, nf_ct_tuple(ct
, IP_CT_DIR_ORIGINAL
)) < 0)
480 goto nla_put_failure
;
481 nla_nest_end(skb
, nest_parms
);
483 nest_parms
= nla_nest_start(skb
, CTA_TUPLE_REPLY
| NLA_F_NESTED
);
485 goto nla_put_failure
;
486 if (ctnetlink_dump_tuples(skb
, nf_ct_tuple(ct
, IP_CT_DIR_REPLY
)) < 0)
487 goto nla_put_failure
;
488 nla_nest_end(skb
, nest_parms
);
490 if (nf_ct_zone(ct
) &&
491 nla_put_be16(skb
, CTA_ZONE
, htons(nf_ct_zone(ct
))))
492 goto nla_put_failure
;
494 if (ctnetlink_dump_status(skb
, ct
) < 0 ||
495 ctnetlink_dump_timeout(skb
, ct
) < 0 ||
496 ctnetlink_dump_acct(skb
, ct
, type
) < 0 ||
497 ctnetlink_dump_timestamp(skb
, ct
) < 0 ||
498 ctnetlink_dump_protoinfo(skb
, ct
) < 0 ||
499 ctnetlink_dump_helpinfo(skb
, ct
) < 0 ||
500 ctnetlink_dump_mark(skb
, ct
) < 0 ||
501 ctnetlink_dump_secctx(skb
, ct
) < 0 ||
502 ctnetlink_dump_labels(skb
, ct
) < 0 ||
503 ctnetlink_dump_id(skb
, ct
) < 0 ||
504 ctnetlink_dump_use(skb
, ct
) < 0 ||
505 ctnetlink_dump_master(skb
, ct
) < 0 ||
506 ctnetlink_dump_ct_seq_adj(skb
, ct
) < 0)
507 goto nla_put_failure
;
514 nlmsg_cancel(skb
, nlh
);
519 ctnetlink_proto_size(const struct nf_conn
*ct
)
521 struct nf_conntrack_l3proto
*l3proto
;
522 struct nf_conntrack_l4proto
*l4proto
;
526 l3proto
= __nf_ct_l3proto_find(nf_ct_l3num(ct
));
527 len
+= l3proto
->nla_size
;
529 l4proto
= __nf_ct_l4proto_find(nf_ct_l3num(ct
), nf_ct_protonum(ct
));
530 len
+= l4proto
->nla_size
;
537 ctnetlink_acct_size(const struct nf_conn
*ct
)
539 if (!nf_ct_ext_exist(ct
, NF_CT_EXT_ACCT
))
541 return 2 * nla_total_size(0) /* CTA_COUNTERS_ORIG|REPL */
542 + 2 * nla_total_size(sizeof(uint64_t)) /* CTA_COUNTERS_PACKETS */
543 + 2 * nla_total_size(sizeof(uint64_t)) /* CTA_COUNTERS_BYTES */
548 ctnetlink_secctx_size(const struct nf_conn
*ct
)
550 #ifdef CONFIG_NF_CONNTRACK_SECMARK
553 ret
= security_secid_to_secctx(ct
->secmark
, NULL
, &len
);
557 return nla_total_size(0) /* CTA_SECCTX */
558 + nla_total_size(sizeof(char) * len
); /* CTA_SECCTX_NAME */
565 ctnetlink_timestamp_size(const struct nf_conn
*ct
)
567 #ifdef CONFIG_NF_CONNTRACK_TIMESTAMP
568 if (!nf_ct_ext_exist(ct
, NF_CT_EXT_TSTAMP
))
570 return nla_total_size(0) + 2 * nla_total_size(sizeof(uint64_t));
577 ctnetlink_nlmsg_size(const struct nf_conn
*ct
)
579 return NLMSG_ALIGN(sizeof(struct nfgenmsg
))
580 + 3 * nla_total_size(0) /* CTA_TUPLE_ORIG|REPL|MASTER */
581 + 3 * nla_total_size(0) /* CTA_TUPLE_IP */
582 + 3 * nla_total_size(0) /* CTA_TUPLE_PROTO */
583 + 3 * nla_total_size(sizeof(u_int8_t
)) /* CTA_PROTO_NUM */
584 + nla_total_size(sizeof(u_int32_t
)) /* CTA_ID */
585 + nla_total_size(sizeof(u_int32_t
)) /* CTA_STATUS */
586 + ctnetlink_acct_size(ct
)
587 + ctnetlink_timestamp_size(ct
)
588 + nla_total_size(sizeof(u_int32_t
)) /* CTA_TIMEOUT */
589 + nla_total_size(0) /* CTA_PROTOINFO */
590 + nla_total_size(0) /* CTA_HELP */
591 + nla_total_size(NF_CT_HELPER_NAME_LEN
) /* CTA_HELP_NAME */
592 + ctnetlink_secctx_size(ct
)
593 #ifdef CONFIG_NF_NAT_NEEDED
594 + 2 * nla_total_size(0) /* CTA_NAT_SEQ_ADJ_ORIG|REPL */
595 + 6 * nla_total_size(sizeof(u_int32_t
)) /* CTA_NAT_SEQ_OFFSET */
597 #ifdef CONFIG_NF_CONNTRACK_MARK
598 + nla_total_size(sizeof(u_int32_t
)) /* CTA_MARK */
600 #ifdef CONFIG_NF_CONNTRACK_ZONES
601 + nla_total_size(sizeof(u_int16_t
)) /* CTA_ZONE */
603 + ctnetlink_proto_size(ct
)
604 + ctnetlink_label_size(ct
)
608 #ifdef CONFIG_NF_CONNTRACK_EVENTS
610 ctnetlink_conntrack_event(unsigned int events
, struct nf_ct_event
*item
)
613 struct nlmsghdr
*nlh
;
614 struct nfgenmsg
*nfmsg
;
615 struct nlattr
*nest_parms
;
616 struct nf_conn
*ct
= item
->ct
;
619 unsigned int flags
= 0, group
;
622 /* ignore our fake conntrack entry */
623 if (nf_ct_is_untracked(ct
))
626 if (events
& (1 << IPCT_DESTROY
)) {
627 type
= IPCTNL_MSG_CT_DELETE
;
628 group
= NFNLGRP_CONNTRACK_DESTROY
;
629 } else if (events
& ((1 << IPCT_NEW
) | (1 << IPCT_RELATED
))) {
630 type
= IPCTNL_MSG_CT_NEW
;
631 flags
= NLM_F_CREATE
|NLM_F_EXCL
;
632 group
= NFNLGRP_CONNTRACK_NEW
;
634 type
= IPCTNL_MSG_CT_NEW
;
635 group
= NFNLGRP_CONNTRACK_UPDATE
;
640 if (!item
->report
&& !nfnetlink_has_listeners(net
, group
))
643 skb
= nlmsg_new(ctnetlink_nlmsg_size(ct
), GFP_ATOMIC
);
647 type
|= NFNL_SUBSYS_CTNETLINK
<< 8;
648 nlh
= nlmsg_put(skb
, item
->portid
, 0, type
, sizeof(*nfmsg
), flags
);
652 nfmsg
= nlmsg_data(nlh
);
653 nfmsg
->nfgen_family
= nf_ct_l3num(ct
);
654 nfmsg
->version
= NFNETLINK_V0
;
658 nest_parms
= nla_nest_start(skb
, CTA_TUPLE_ORIG
| NLA_F_NESTED
);
660 goto nla_put_failure
;
661 if (ctnetlink_dump_tuples(skb
, nf_ct_tuple(ct
, IP_CT_DIR_ORIGINAL
)) < 0)
662 goto nla_put_failure
;
663 nla_nest_end(skb
, nest_parms
);
665 nest_parms
= nla_nest_start(skb
, CTA_TUPLE_REPLY
| NLA_F_NESTED
);
667 goto nla_put_failure
;
668 if (ctnetlink_dump_tuples(skb
, nf_ct_tuple(ct
, IP_CT_DIR_REPLY
)) < 0)
669 goto nla_put_failure
;
670 nla_nest_end(skb
, nest_parms
);
672 if (nf_ct_zone(ct
) &&
673 nla_put_be16(skb
, CTA_ZONE
, htons(nf_ct_zone(ct
))))
674 goto nla_put_failure
;
676 if (ctnetlink_dump_id(skb
, ct
) < 0)
677 goto nla_put_failure
;
679 if (ctnetlink_dump_status(skb
, ct
) < 0)
680 goto nla_put_failure
;
682 if (events
& (1 << IPCT_DESTROY
)) {
683 if (ctnetlink_dump_acct(skb
, ct
, type
) < 0 ||
684 ctnetlink_dump_timestamp(skb
, ct
) < 0)
685 goto nla_put_failure
;
687 if (ctnetlink_dump_timeout(skb
, ct
) < 0)
688 goto nla_put_failure
;
690 if (events
& (1 << IPCT_PROTOINFO
)
691 && ctnetlink_dump_protoinfo(skb
, ct
) < 0)
692 goto nla_put_failure
;
694 if ((events
& (1 << IPCT_HELPER
) || nfct_help(ct
))
695 && ctnetlink_dump_helpinfo(skb
, ct
) < 0)
696 goto nla_put_failure
;
698 #ifdef CONFIG_NF_CONNTRACK_SECMARK
699 if ((events
& (1 << IPCT_SECMARK
) || ct
->secmark
)
700 && ctnetlink_dump_secctx(skb
, ct
) < 0)
701 goto nla_put_failure
;
703 if (events
& (1 << IPCT_LABEL
) &&
704 ctnetlink_dump_labels(skb
, ct
) < 0)
705 goto nla_put_failure
;
707 if (events
& (1 << IPCT_RELATED
) &&
708 ctnetlink_dump_master(skb
, ct
) < 0)
709 goto nla_put_failure
;
711 if (events
& (1 << IPCT_SEQADJ
) &&
712 ctnetlink_dump_ct_seq_adj(skb
, ct
) < 0)
713 goto nla_put_failure
;
716 #ifdef CONFIG_NF_CONNTRACK_MARK
717 if ((events
& (1 << IPCT_MARK
) || ct
->mark
)
718 && ctnetlink_dump_mark(skb
, ct
) < 0)
719 goto nla_put_failure
;
724 err
= nfnetlink_send(skb
, net
, item
->portid
, group
, item
->report
,
726 if (err
== -ENOBUFS
|| err
== -EAGAIN
)
733 nlmsg_cancel(skb
, nlh
);
737 if (nfnetlink_set_err(net
, 0, group
, -ENOBUFS
) > 0)
742 #endif /* CONFIG_NF_CONNTRACK_EVENTS */
744 static int ctnetlink_done(struct netlink_callback
*cb
)
747 nf_ct_put((struct nf_conn
*)cb
->args
[1]);
752 struct ctnetlink_dump_filter
{
760 ctnetlink_dump_table(struct sk_buff
*skb
, struct netlink_callback
*cb
)
762 struct net
*net
= sock_net(skb
->sk
);
763 struct nf_conn
*ct
, *last
;
764 struct nf_conntrack_tuple_hash
*h
;
765 struct hlist_nulls_node
*n
;
766 struct nfgenmsg
*nfmsg
= nlmsg_data(cb
->nlh
);
767 u_int8_t l3proto
= nfmsg
->nfgen_family
;
771 #ifdef CONFIG_NF_CONNTRACK_MARK
772 const struct ctnetlink_dump_filter
*filter
= cb
->data
;
775 last
= (struct nf_conn
*)cb
->args
[1];
778 for (; cb
->args
[0] < net
->ct
.htable_size
; cb
->args
[0]++) {
780 lockp
= &nf_conntrack_locks
[cb
->args
[0] % CONNTRACK_LOCKS
];
782 if (cb
->args
[0] >= net
->ct
.htable_size
) {
786 hlist_nulls_for_each_entry(h
, n
, &net
->ct
.hash
[cb
->args
[0]],
788 if (NF_CT_DIRECTION(h
) != IP_CT_DIR_ORIGINAL
)
790 ct
= nf_ct_tuplehash_to_ctrack(h
);
791 /* Dump entries of a given L3 protocol number.
792 * If it is not specified, ie. l3proto == 0,
793 * then dump everything. */
794 if (l3proto
&& nf_ct_l3num(ct
) != l3proto
)
801 #ifdef CONFIG_NF_CONNTRACK_MARK
802 if (filter
&& !((ct
->mark
& filter
->mark
.mask
) ==
809 ctnetlink_fill_info(skb
, NETLINK_CB(cb
->skb
).portid
,
811 NFNL_MSG_TYPE(cb
->nlh
->nlmsg_type
),
815 nf_conntrack_get(&ct
->ct_general
);
816 cb
->args
[1] = (unsigned long)ct
;
836 ctnetlink_parse_tuple_ip(struct nlattr
*attr
, struct nf_conntrack_tuple
*tuple
)
838 struct nlattr
*tb
[CTA_IP_MAX
+1];
839 struct nf_conntrack_l3proto
*l3proto
;
842 ret
= nla_parse_nested(tb
, CTA_IP_MAX
, attr
, NULL
);
847 l3proto
= __nf_ct_l3proto_find(tuple
->src
.l3num
);
849 if (likely(l3proto
->nlattr_to_tuple
)) {
850 ret
= nla_validate_nested(attr
, CTA_IP_MAX
,
851 l3proto
->nla_policy
);
853 ret
= l3proto
->nlattr_to_tuple(tb
, tuple
);
861 static const struct nla_policy proto_nla_policy
[CTA_PROTO_MAX
+1] = {
862 [CTA_PROTO_NUM
] = { .type
= NLA_U8
},
866 ctnetlink_parse_tuple_proto(struct nlattr
*attr
,
867 struct nf_conntrack_tuple
*tuple
)
869 struct nlattr
*tb
[CTA_PROTO_MAX
+1];
870 struct nf_conntrack_l4proto
*l4proto
;
873 ret
= nla_parse_nested(tb
, CTA_PROTO_MAX
, attr
, proto_nla_policy
);
877 if (!tb
[CTA_PROTO_NUM
])
879 tuple
->dst
.protonum
= nla_get_u8(tb
[CTA_PROTO_NUM
]);
882 l4proto
= __nf_ct_l4proto_find(tuple
->src
.l3num
, tuple
->dst
.protonum
);
884 if (likely(l4proto
->nlattr_to_tuple
)) {
885 ret
= nla_validate_nested(attr
, CTA_PROTO_MAX
,
886 l4proto
->nla_policy
);
888 ret
= l4proto
->nlattr_to_tuple(tb
, tuple
);
896 static const struct nla_policy tuple_nla_policy
[CTA_TUPLE_MAX
+1] = {
897 [CTA_TUPLE_IP
] = { .type
= NLA_NESTED
},
898 [CTA_TUPLE_PROTO
] = { .type
= NLA_NESTED
},
902 ctnetlink_parse_tuple(const struct nlattr
* const cda
[],
903 struct nf_conntrack_tuple
*tuple
,
904 enum ctattr_type type
, u_int8_t l3num
)
906 struct nlattr
*tb
[CTA_TUPLE_MAX
+1];
909 memset(tuple
, 0, sizeof(*tuple
));
911 err
= nla_parse_nested(tb
, CTA_TUPLE_MAX
, cda
[type
], tuple_nla_policy
);
915 if (!tb
[CTA_TUPLE_IP
])
918 tuple
->src
.l3num
= l3num
;
920 err
= ctnetlink_parse_tuple_ip(tb
[CTA_TUPLE_IP
], tuple
);
924 if (!tb
[CTA_TUPLE_PROTO
])
927 err
= ctnetlink_parse_tuple_proto(tb
[CTA_TUPLE_PROTO
], tuple
);
931 /* orig and expect tuples get DIR_ORIGINAL */
932 if (type
== CTA_TUPLE_REPLY
)
933 tuple
->dst
.dir
= IP_CT_DIR_REPLY
;
935 tuple
->dst
.dir
= IP_CT_DIR_ORIGINAL
;
941 ctnetlink_parse_zone(const struct nlattr
*attr
, u16
*zone
)
944 #ifdef CONFIG_NF_CONNTRACK_ZONES
945 *zone
= ntohs(nla_get_be16(attr
));
955 static const struct nla_policy help_nla_policy
[CTA_HELP_MAX
+1] = {
956 [CTA_HELP_NAME
] = { .type
= NLA_NUL_STRING
,
957 .len
= NF_CT_HELPER_NAME_LEN
- 1 },
961 ctnetlink_parse_help(const struct nlattr
*attr
, char **helper_name
,
962 struct nlattr
**helpinfo
)
965 struct nlattr
*tb
[CTA_HELP_MAX
+1];
967 err
= nla_parse_nested(tb
, CTA_HELP_MAX
, attr
, help_nla_policy
);
971 if (!tb
[CTA_HELP_NAME
])
974 *helper_name
= nla_data(tb
[CTA_HELP_NAME
]);
976 if (tb
[CTA_HELP_INFO
])
977 *helpinfo
= tb
[CTA_HELP_INFO
];
982 static const struct nla_policy ct_nla_policy
[CTA_MAX
+1] = {
983 [CTA_TUPLE_ORIG
] = { .type
= NLA_NESTED
},
984 [CTA_TUPLE_REPLY
] = { .type
= NLA_NESTED
},
985 [CTA_STATUS
] = { .type
= NLA_U32
},
986 [CTA_PROTOINFO
] = { .type
= NLA_NESTED
},
987 [CTA_HELP
] = { .type
= NLA_NESTED
},
988 [CTA_NAT_SRC
] = { .type
= NLA_NESTED
},
989 [CTA_TIMEOUT
] = { .type
= NLA_U32
},
990 [CTA_MARK
] = { .type
= NLA_U32
},
991 [CTA_ID
] = { .type
= NLA_U32
},
992 [CTA_NAT_DST
] = { .type
= NLA_NESTED
},
993 [CTA_TUPLE_MASTER
] = { .type
= NLA_NESTED
},
994 [CTA_NAT_SEQ_ADJ_ORIG
] = { .type
= NLA_NESTED
},
995 [CTA_NAT_SEQ_ADJ_REPLY
] = { .type
= NLA_NESTED
},
996 [CTA_ZONE
] = { .type
= NLA_U16
},
997 [CTA_MARK_MASK
] = { .type
= NLA_U32
},
998 [CTA_LABELS
] = { .type
= NLA_BINARY
,
999 .len
= NF_CT_LABELS_MAX_SIZE
},
1000 [CTA_LABELS_MASK
] = { .type
= NLA_BINARY
,
1001 .len
= NF_CT_LABELS_MAX_SIZE
},
1005 ctnetlink_del_conntrack(struct sock
*ctnl
, struct sk_buff
*skb
,
1006 const struct nlmsghdr
*nlh
,
1007 const struct nlattr
* const cda
[])
1009 struct net
*net
= sock_net(ctnl
);
1010 struct nf_conntrack_tuple_hash
*h
;
1011 struct nf_conntrack_tuple tuple
;
1013 struct nfgenmsg
*nfmsg
= nlmsg_data(nlh
);
1014 u_int8_t u3
= nfmsg
->nfgen_family
;
1018 err
= ctnetlink_parse_zone(cda
[CTA_ZONE
], &zone
);
1022 if (cda
[CTA_TUPLE_ORIG
])
1023 err
= ctnetlink_parse_tuple(cda
, &tuple
, CTA_TUPLE_ORIG
, u3
);
1024 else if (cda
[CTA_TUPLE_REPLY
])
1025 err
= ctnetlink_parse_tuple(cda
, &tuple
, CTA_TUPLE_REPLY
, u3
);
1027 /* Flush the whole table */
1028 nf_conntrack_flush_report(net
,
1029 NETLINK_CB(skb
).portid
,
1037 h
= nf_conntrack_find_get(net
, zone
, &tuple
);
1041 ct
= nf_ct_tuplehash_to_ctrack(h
);
1044 u_int32_t id
= ntohl(nla_get_be32(cda
[CTA_ID
]));
1045 if (id
!= (u32
)(unsigned long)ct
) {
1051 if (del_timer(&ct
->timeout
))
1052 nf_ct_delete(ct
, NETLINK_CB(skb
).portid
, nlmsg_report(nlh
));
1060 ctnetlink_get_conntrack(struct sock
*ctnl
, struct sk_buff
*skb
,
1061 const struct nlmsghdr
*nlh
,
1062 const struct nlattr
* const cda
[])
1064 struct net
*net
= sock_net(ctnl
);
1065 struct nf_conntrack_tuple_hash
*h
;
1066 struct nf_conntrack_tuple tuple
;
1068 struct sk_buff
*skb2
= NULL
;
1069 struct nfgenmsg
*nfmsg
= nlmsg_data(nlh
);
1070 u_int8_t u3
= nfmsg
->nfgen_family
;
1074 if (nlh
->nlmsg_flags
& NLM_F_DUMP
) {
1075 struct netlink_dump_control c
= {
1076 .dump
= ctnetlink_dump_table
,
1077 .done
= ctnetlink_done
,
1079 #ifdef CONFIG_NF_CONNTRACK_MARK
1080 if (cda
[CTA_MARK
] && cda
[CTA_MARK_MASK
]) {
1081 struct ctnetlink_dump_filter
*filter
;
1083 filter
= kzalloc(sizeof(struct ctnetlink_dump_filter
),
1088 filter
->mark
.val
= ntohl(nla_get_be32(cda
[CTA_MARK
]));
1090 ntohl(nla_get_be32(cda
[CTA_MARK_MASK
]));
1094 return netlink_dump_start(ctnl
, skb
, nlh
, &c
);
1097 err
= ctnetlink_parse_zone(cda
[CTA_ZONE
], &zone
);
1101 if (cda
[CTA_TUPLE_ORIG
])
1102 err
= ctnetlink_parse_tuple(cda
, &tuple
, CTA_TUPLE_ORIG
, u3
);
1103 else if (cda
[CTA_TUPLE_REPLY
])
1104 err
= ctnetlink_parse_tuple(cda
, &tuple
, CTA_TUPLE_REPLY
, u3
);
1111 h
= nf_conntrack_find_get(net
, zone
, &tuple
);
1115 ct
= nf_ct_tuplehash_to_ctrack(h
);
1118 skb2
= nlmsg_new(NLMSG_DEFAULT_SIZE
, GFP_KERNEL
);
1125 err
= ctnetlink_fill_info(skb2
, NETLINK_CB(skb
).portid
, nlh
->nlmsg_seq
,
1126 NFNL_MSG_TYPE(nlh
->nlmsg_type
), ct
);
1132 err
= netlink_unicast(ctnl
, skb2
, NETLINK_CB(skb
).portid
, MSG_DONTWAIT
);
1141 /* this avoids a loop in nfnetlink. */
1142 return err
== -EAGAIN
? -ENOBUFS
: err
;
1145 static int ctnetlink_done_list(struct netlink_callback
*cb
)
1148 nf_ct_put((struct nf_conn
*)cb
->args
[1]);
1153 ctnetlink_dump_list(struct sk_buff
*skb
, struct netlink_callback
*cb
, bool dying
)
1155 struct nf_conn
*ct
, *last
;
1156 struct nf_conntrack_tuple_hash
*h
;
1157 struct hlist_nulls_node
*n
;
1158 struct nfgenmsg
*nfmsg
= nlmsg_data(cb
->nlh
);
1159 u_int8_t l3proto
= nfmsg
->nfgen_family
;
1162 struct hlist_nulls_head
*list
;
1163 struct net
*net
= sock_net(skb
->sk
);
1168 last
= (struct nf_conn
*)cb
->args
[1];
1170 for (cpu
= cb
->args
[0]; cpu
< nr_cpu_ids
; cpu
++) {
1171 struct ct_pcpu
*pcpu
;
1173 if (!cpu_possible(cpu
))
1176 pcpu
= per_cpu_ptr(net
->ct
.pcpu_lists
, cpu
);
1177 spin_lock_bh(&pcpu
->lock
);
1178 list
= dying
? &pcpu
->dying
: &pcpu
->unconfirmed
;
1180 hlist_nulls_for_each_entry(h
, n
, list
, hnnode
) {
1181 ct
= nf_ct_tuplehash_to_ctrack(h
);
1182 if (l3proto
&& nf_ct_l3num(ct
) != l3proto
)
1190 res
= ctnetlink_fill_info(skb
, NETLINK_CB(cb
->skb
).portid
,
1192 NFNL_MSG_TYPE(cb
->nlh
->nlmsg_type
),
1196 if (!atomic_inc_not_zero(&ct
->ct_general
.use
))
1199 cb
->args
[1] = (unsigned long)ct
;
1200 spin_unlock_bh(&pcpu
->lock
);
1208 spin_unlock_bh(&pcpu
->lock
);
1219 ctnetlink_dump_dying(struct sk_buff
*skb
, struct netlink_callback
*cb
)
1221 return ctnetlink_dump_list(skb
, cb
, true);
1225 ctnetlink_get_ct_dying(struct sock
*ctnl
, struct sk_buff
*skb
,
1226 const struct nlmsghdr
*nlh
,
1227 const struct nlattr
* const cda
[])
1229 if (nlh
->nlmsg_flags
& NLM_F_DUMP
) {
1230 struct netlink_dump_control c
= {
1231 .dump
= ctnetlink_dump_dying
,
1232 .done
= ctnetlink_done_list
,
1234 return netlink_dump_start(ctnl
, skb
, nlh
, &c
);
1241 ctnetlink_dump_unconfirmed(struct sk_buff
*skb
, struct netlink_callback
*cb
)
1243 return ctnetlink_dump_list(skb
, cb
, false);
1247 ctnetlink_get_ct_unconfirmed(struct sock
*ctnl
, struct sk_buff
*skb
,
1248 const struct nlmsghdr
*nlh
,
1249 const struct nlattr
* const cda
[])
1251 if (nlh
->nlmsg_flags
& NLM_F_DUMP
) {
1252 struct netlink_dump_control c
= {
1253 .dump
= ctnetlink_dump_unconfirmed
,
1254 .done
= ctnetlink_done_list
,
1256 return netlink_dump_start(ctnl
, skb
, nlh
, &c
);
1262 #ifdef CONFIG_NF_NAT_NEEDED
1264 ctnetlink_parse_nat_setup(struct nf_conn
*ct
,
1265 enum nf_nat_manip_type manip
,
1266 const struct nlattr
*attr
)
1268 typeof(nfnetlink_parse_nat_setup_hook
) parse_nat_setup
;
1271 parse_nat_setup
= rcu_dereference(nfnetlink_parse_nat_setup_hook
);
1272 if (!parse_nat_setup
) {
1273 #ifdef CONFIG_MODULES
1275 nfnl_unlock(NFNL_SUBSYS_CTNETLINK
);
1276 if (request_module("nf-nat") < 0) {
1277 nfnl_lock(NFNL_SUBSYS_CTNETLINK
);
1281 nfnl_lock(NFNL_SUBSYS_CTNETLINK
);
1283 if (nfnetlink_parse_nat_setup_hook
)
1289 err
= parse_nat_setup(ct
, manip
, attr
);
1290 if (err
== -EAGAIN
) {
1291 #ifdef CONFIG_MODULES
1293 nfnl_unlock(NFNL_SUBSYS_CTNETLINK
);
1294 if (request_module("nf-nat-%u", nf_ct_l3num(ct
)) < 0) {
1295 nfnl_lock(NFNL_SUBSYS_CTNETLINK
);
1299 nfnl_lock(NFNL_SUBSYS_CTNETLINK
);
1310 ctnetlink_change_status(struct nf_conn
*ct
, const struct nlattr
* const cda
[])
1313 unsigned int status
= ntohl(nla_get_be32(cda
[CTA_STATUS
]));
1314 d
= ct
->status
^ status
;
1316 if (d
& (IPS_EXPECTED
|IPS_CONFIRMED
|IPS_DYING
))
1320 if (d
& IPS_SEEN_REPLY
&& !(status
& IPS_SEEN_REPLY
))
1321 /* SEEN_REPLY bit can only be set */
1324 if (d
& IPS_ASSURED
&& !(status
& IPS_ASSURED
))
1325 /* ASSURED bit can only be set */
1328 /* Be careful here, modifying NAT bits can screw up things,
1329 * so don't let users modify them directly if they don't pass
1331 ct
->status
|= status
& ~(IPS_NAT_DONE_MASK
| IPS_NAT_MASK
);
1336 ctnetlink_setup_nat(struct nf_conn
*ct
, const struct nlattr
* const cda
[])
1338 #ifdef CONFIG_NF_NAT_NEEDED
1341 if (!cda
[CTA_NAT_DST
] && !cda
[CTA_NAT_SRC
])
1344 ret
= ctnetlink_parse_nat_setup(ct
, NF_NAT_MANIP_DST
,
1349 ret
= ctnetlink_parse_nat_setup(ct
, NF_NAT_MANIP_SRC
,
1353 if (!cda
[CTA_NAT_DST
] && !cda
[CTA_NAT_SRC
])
1360 ctnetlink_change_helper(struct nf_conn
*ct
, const struct nlattr
* const cda
[])
1362 struct nf_conntrack_helper
*helper
;
1363 struct nf_conn_help
*help
= nfct_help(ct
);
1364 char *helpname
= NULL
;
1365 struct nlattr
*helpinfo
= NULL
;
1368 /* don't change helper of sibling connections */
1372 err
= ctnetlink_parse_help(cda
[CTA_HELP
], &helpname
, &helpinfo
);
1376 if (!strcmp(helpname
, "")) {
1377 if (help
&& help
->helper
) {
1378 /* we had a helper before ... */
1379 nf_ct_remove_expectations(ct
);
1380 RCU_INIT_POINTER(help
->helper
, NULL
);
1386 helper
= __nf_conntrack_helper_find(helpname
, nf_ct_l3num(ct
),
1387 nf_ct_protonum(ct
));
1388 if (helper
== NULL
) {
1389 #ifdef CONFIG_MODULES
1390 spin_unlock_bh(&nf_conntrack_expect_lock
);
1392 if (request_module("nfct-helper-%s", helpname
) < 0) {
1393 spin_lock_bh(&nf_conntrack_expect_lock
);
1397 spin_lock_bh(&nf_conntrack_expect_lock
);
1398 helper
= __nf_conntrack_helper_find(helpname
, nf_ct_l3num(ct
),
1399 nf_ct_protonum(ct
));
1407 if (help
->helper
== helper
) {
1408 /* update private helper data if allowed. */
1409 if (helper
->from_nlattr
)
1410 helper
->from_nlattr(helpinfo
, ct
);
1416 /* we cannot set a helper for an existing conntrack */
1421 ctnetlink_change_timeout(struct nf_conn
*ct
, const struct nlattr
* const cda
[])
1423 u_int32_t timeout
= ntohl(nla_get_be32(cda
[CTA_TIMEOUT
]));
1425 if (!del_timer(&ct
->timeout
))
1428 ct
->timeout
.expires
= jiffies
+ timeout
* HZ
;
1429 add_timer(&ct
->timeout
);
1434 static const struct nla_policy protoinfo_policy
[CTA_PROTOINFO_MAX
+1] = {
1435 [CTA_PROTOINFO_TCP
] = { .type
= NLA_NESTED
},
1436 [CTA_PROTOINFO_DCCP
] = { .type
= NLA_NESTED
},
1437 [CTA_PROTOINFO_SCTP
] = { .type
= NLA_NESTED
},
1441 ctnetlink_change_protoinfo(struct nf_conn
*ct
, const struct nlattr
* const cda
[])
1443 const struct nlattr
*attr
= cda
[CTA_PROTOINFO
];
1444 struct nlattr
*tb
[CTA_PROTOINFO_MAX
+1];
1445 struct nf_conntrack_l4proto
*l4proto
;
1448 err
= nla_parse_nested(tb
, CTA_PROTOINFO_MAX
, attr
, protoinfo_policy
);
1453 l4proto
= __nf_ct_l4proto_find(nf_ct_l3num(ct
), nf_ct_protonum(ct
));
1454 if (l4proto
->from_nlattr
)
1455 err
= l4proto
->from_nlattr(tb
, ct
);
1461 static const struct nla_policy seqadj_policy
[CTA_SEQADJ_MAX
+1] = {
1462 [CTA_SEQADJ_CORRECTION_POS
] = { .type
= NLA_U32
},
1463 [CTA_SEQADJ_OFFSET_BEFORE
] = { .type
= NLA_U32
},
1464 [CTA_SEQADJ_OFFSET_AFTER
] = { .type
= NLA_U32
},
1468 change_seq_adj(struct nf_ct_seqadj
*seq
, const struct nlattr
* const attr
)
1471 struct nlattr
*cda
[CTA_SEQADJ_MAX
+1];
1473 err
= nla_parse_nested(cda
, CTA_SEQADJ_MAX
, attr
, seqadj_policy
);
1477 if (!cda
[CTA_SEQADJ_CORRECTION_POS
])
1480 seq
->correction_pos
=
1481 ntohl(nla_get_be32(cda
[CTA_SEQADJ_CORRECTION_POS
]));
1483 if (!cda
[CTA_SEQADJ_OFFSET_BEFORE
])
1486 seq
->offset_before
=
1487 ntohl(nla_get_be32(cda
[CTA_SEQADJ_OFFSET_BEFORE
]));
1489 if (!cda
[CTA_SEQADJ_OFFSET_AFTER
])
1493 ntohl(nla_get_be32(cda
[CTA_SEQADJ_OFFSET_AFTER
]));
1499 ctnetlink_change_seq_adj(struct nf_conn
*ct
,
1500 const struct nlattr
* const cda
[])
1502 struct nf_conn_seqadj
*seqadj
= nfct_seqadj(ct
);
1508 if (cda
[CTA_SEQ_ADJ_ORIG
]) {
1509 ret
= change_seq_adj(&seqadj
->seq
[IP_CT_DIR_ORIGINAL
],
1510 cda
[CTA_SEQ_ADJ_ORIG
]);
1514 ct
->status
|= IPS_SEQ_ADJUST
;
1517 if (cda
[CTA_SEQ_ADJ_REPLY
]) {
1518 ret
= change_seq_adj(&seqadj
->seq
[IP_CT_DIR_REPLY
],
1519 cda
[CTA_SEQ_ADJ_REPLY
]);
1523 ct
->status
|= IPS_SEQ_ADJUST
;
1530 ctnetlink_attach_labels(struct nf_conn
*ct
, const struct nlattr
* const cda
[])
1532 #ifdef CONFIG_NF_CONNTRACK_LABELS
1533 size_t len
= nla_len(cda
[CTA_LABELS
]);
1534 const void *mask
= cda
[CTA_LABELS_MASK
];
1536 if (len
& (sizeof(u32
)-1)) /* must be multiple of u32 */
1540 if (nla_len(cda
[CTA_LABELS_MASK
]) == 0 ||
1541 nla_len(cda
[CTA_LABELS_MASK
]) != len
)
1543 mask
= nla_data(cda
[CTA_LABELS_MASK
]);
1548 return nf_connlabels_replace(ct
, nla_data(cda
[CTA_LABELS
]), mask
, len
);
1555 ctnetlink_change_conntrack(struct nf_conn
*ct
,
1556 const struct nlattr
* const cda
[])
1560 /* only allow NAT changes and master assignation for new conntracks */
1561 if (cda
[CTA_NAT_SRC
] || cda
[CTA_NAT_DST
] || cda
[CTA_TUPLE_MASTER
])
1564 if (cda
[CTA_HELP
]) {
1565 err
= ctnetlink_change_helper(ct
, cda
);
1570 if (cda
[CTA_TIMEOUT
]) {
1571 err
= ctnetlink_change_timeout(ct
, cda
);
1576 if (cda
[CTA_STATUS
]) {
1577 err
= ctnetlink_change_status(ct
, cda
);
1582 if (cda
[CTA_PROTOINFO
]) {
1583 err
= ctnetlink_change_protoinfo(ct
, cda
);
1588 #if defined(CONFIG_NF_CONNTRACK_MARK)
1590 ct
->mark
= ntohl(nla_get_be32(cda
[CTA_MARK
]));
1593 if (cda
[CTA_SEQ_ADJ_ORIG
] || cda
[CTA_SEQ_ADJ_REPLY
]) {
1594 err
= ctnetlink_change_seq_adj(ct
, cda
);
1599 if (cda
[CTA_LABELS
]) {
1600 err
= ctnetlink_attach_labels(ct
, cda
);
1608 static struct nf_conn
*
1609 ctnetlink_create_conntrack(struct net
*net
, u16 zone
,
1610 const struct nlattr
* const cda
[],
1611 struct nf_conntrack_tuple
*otuple
,
1612 struct nf_conntrack_tuple
*rtuple
,
1617 struct nf_conntrack_helper
*helper
;
1618 struct nf_conn_tstamp
*tstamp
;
1620 ct
= nf_conntrack_alloc(net
, zone
, otuple
, rtuple
, GFP_ATOMIC
);
1622 return ERR_PTR(-ENOMEM
);
1624 if (!cda
[CTA_TIMEOUT
])
1626 ct
->timeout
.expires
= ntohl(nla_get_be32(cda
[CTA_TIMEOUT
]));
1628 ct
->timeout
.expires
= jiffies
+ ct
->timeout
.expires
* HZ
;
1631 if (cda
[CTA_HELP
]) {
1632 char *helpname
= NULL
;
1633 struct nlattr
*helpinfo
= NULL
;
1635 err
= ctnetlink_parse_help(cda
[CTA_HELP
], &helpname
, &helpinfo
);
1639 helper
= __nf_conntrack_helper_find(helpname
, nf_ct_l3num(ct
),
1640 nf_ct_protonum(ct
));
1641 if (helper
== NULL
) {
1643 #ifdef CONFIG_MODULES
1644 if (request_module("nfct-helper-%s", helpname
) < 0) {
1650 helper
= __nf_conntrack_helper_find(helpname
,
1652 nf_ct_protonum(ct
));
1662 struct nf_conn_help
*help
;
1664 help
= nf_ct_helper_ext_add(ct
, helper
, GFP_ATOMIC
);
1669 /* set private helper data if allowed. */
1670 if (helper
->from_nlattr
)
1671 helper
->from_nlattr(helpinfo
, ct
);
1673 /* not in hash table yet so not strictly necessary */
1674 RCU_INIT_POINTER(help
->helper
, helper
);
1677 /* try an implicit helper assignation */
1678 err
= __nf_ct_try_assign_helper(ct
, NULL
, GFP_ATOMIC
);
1683 err
= ctnetlink_setup_nat(ct
, cda
);
1687 nf_ct_acct_ext_add(ct
, GFP_ATOMIC
);
1688 nf_ct_tstamp_ext_add(ct
, GFP_ATOMIC
);
1689 nf_ct_ecache_ext_add(ct
, 0, 0, GFP_ATOMIC
);
1690 nf_ct_labels_ext_add(ct
);
1692 /* we must add conntrack extensions before confirmation. */
1693 ct
->status
|= IPS_CONFIRMED
;
1695 if (cda
[CTA_STATUS
]) {
1696 err
= ctnetlink_change_status(ct
, cda
);
1701 if (cda
[CTA_SEQ_ADJ_ORIG
] || cda
[CTA_SEQ_ADJ_REPLY
]) {
1702 err
= ctnetlink_change_seq_adj(ct
, cda
);
1707 memset(&ct
->proto
, 0, sizeof(ct
->proto
));
1708 if (cda
[CTA_PROTOINFO
]) {
1709 err
= ctnetlink_change_protoinfo(ct
, cda
);
1714 #if defined(CONFIG_NF_CONNTRACK_MARK)
1716 ct
->mark
= ntohl(nla_get_be32(cda
[CTA_MARK
]));
1719 /* setup master conntrack: this is a confirmed expectation */
1720 if (cda
[CTA_TUPLE_MASTER
]) {
1721 struct nf_conntrack_tuple master
;
1722 struct nf_conntrack_tuple_hash
*master_h
;
1723 struct nf_conn
*master_ct
;
1725 err
= ctnetlink_parse_tuple(cda
, &master
, CTA_TUPLE_MASTER
, u3
);
1729 master_h
= nf_conntrack_find_get(net
, zone
, &master
);
1730 if (master_h
== NULL
) {
1734 master_ct
= nf_ct_tuplehash_to_ctrack(master_h
);
1735 __set_bit(IPS_EXPECTED_BIT
, &ct
->status
);
1736 ct
->master
= master_ct
;
1738 tstamp
= nf_conn_tstamp_find(ct
);
1740 tstamp
->start
= ktime_get_real_ns();
1742 err
= nf_conntrack_hash_check_insert(ct
);
1753 nf_conntrack_free(ct
);
1754 return ERR_PTR(err
);
1758 ctnetlink_new_conntrack(struct sock
*ctnl
, struct sk_buff
*skb
,
1759 const struct nlmsghdr
*nlh
,
1760 const struct nlattr
* const cda
[])
1762 struct net
*net
= sock_net(ctnl
);
1763 struct nf_conntrack_tuple otuple
, rtuple
;
1764 struct nf_conntrack_tuple_hash
*h
= NULL
;
1765 struct nfgenmsg
*nfmsg
= nlmsg_data(nlh
);
1767 u_int8_t u3
= nfmsg
->nfgen_family
;
1771 err
= ctnetlink_parse_zone(cda
[CTA_ZONE
], &zone
);
1775 if (cda
[CTA_TUPLE_ORIG
]) {
1776 err
= ctnetlink_parse_tuple(cda
, &otuple
, CTA_TUPLE_ORIG
, u3
);
1781 if (cda
[CTA_TUPLE_REPLY
]) {
1782 err
= ctnetlink_parse_tuple(cda
, &rtuple
, CTA_TUPLE_REPLY
, u3
);
1787 if (cda
[CTA_TUPLE_ORIG
])
1788 h
= nf_conntrack_find_get(net
, zone
, &otuple
);
1789 else if (cda
[CTA_TUPLE_REPLY
])
1790 h
= nf_conntrack_find_get(net
, zone
, &rtuple
);
1794 if (nlh
->nlmsg_flags
& NLM_F_CREATE
) {
1795 enum ip_conntrack_events events
;
1797 if (!cda
[CTA_TUPLE_ORIG
] || !cda
[CTA_TUPLE_REPLY
])
1800 ct
= ctnetlink_create_conntrack(net
, zone
, cda
, &otuple
,
1806 if (test_bit(IPS_EXPECTED_BIT
, &ct
->status
))
1807 events
= IPCT_RELATED
;
1811 if (cda
[CTA_LABELS
] &&
1812 ctnetlink_attach_labels(ct
, cda
) == 0)
1813 events
|= (1 << IPCT_LABEL
);
1815 nf_conntrack_eventmask_report((1 << IPCT_REPLY
) |
1816 (1 << IPCT_ASSURED
) |
1817 (1 << IPCT_HELPER
) |
1818 (1 << IPCT_PROTOINFO
) |
1819 (1 << IPCT_SEQADJ
) |
1820 (1 << IPCT_MARK
) | events
,
1821 ct
, NETLINK_CB(skb
).portid
,
1828 /* implicit 'else' */
1831 ct
= nf_ct_tuplehash_to_ctrack(h
);
1832 if (!(nlh
->nlmsg_flags
& NLM_F_EXCL
)) {
1833 spin_lock_bh(&nf_conntrack_expect_lock
);
1834 err
= ctnetlink_change_conntrack(ct
, cda
);
1835 spin_unlock_bh(&nf_conntrack_expect_lock
);
1837 nf_conntrack_eventmask_report((1 << IPCT_REPLY
) |
1838 (1 << IPCT_ASSURED
) |
1839 (1 << IPCT_HELPER
) |
1841 (1 << IPCT_PROTOINFO
) |
1842 (1 << IPCT_SEQADJ
) |
1844 ct
, NETLINK_CB(skb
).portid
,
1854 ctnetlink_ct_stat_cpu_fill_info(struct sk_buff
*skb
, u32 portid
, u32 seq
,
1855 __u16 cpu
, const struct ip_conntrack_stat
*st
)
1857 struct nlmsghdr
*nlh
;
1858 struct nfgenmsg
*nfmsg
;
1859 unsigned int flags
= portid
? NLM_F_MULTI
: 0, event
;
1861 event
= (NFNL_SUBSYS_CTNETLINK
<< 8 | IPCTNL_MSG_CT_GET_STATS_CPU
);
1862 nlh
= nlmsg_put(skb
, portid
, seq
, event
, sizeof(*nfmsg
), flags
);
1866 nfmsg
= nlmsg_data(nlh
);
1867 nfmsg
->nfgen_family
= AF_UNSPEC
;
1868 nfmsg
->version
= NFNETLINK_V0
;
1869 nfmsg
->res_id
= htons(cpu
);
1871 if (nla_put_be32(skb
, CTA_STATS_SEARCHED
, htonl(st
->searched
)) ||
1872 nla_put_be32(skb
, CTA_STATS_FOUND
, htonl(st
->found
)) ||
1873 nla_put_be32(skb
, CTA_STATS_NEW
, htonl(st
->new)) ||
1874 nla_put_be32(skb
, CTA_STATS_INVALID
, htonl(st
->invalid
)) ||
1875 nla_put_be32(skb
, CTA_STATS_IGNORE
, htonl(st
->ignore
)) ||
1876 nla_put_be32(skb
, CTA_STATS_DELETE
, htonl(st
->delete)) ||
1877 nla_put_be32(skb
, CTA_STATS_DELETE_LIST
, htonl(st
->delete_list
)) ||
1878 nla_put_be32(skb
, CTA_STATS_INSERT
, htonl(st
->insert
)) ||
1879 nla_put_be32(skb
, CTA_STATS_INSERT_FAILED
,
1880 htonl(st
->insert_failed
)) ||
1881 nla_put_be32(skb
, CTA_STATS_DROP
, htonl(st
->drop
)) ||
1882 nla_put_be32(skb
, CTA_STATS_EARLY_DROP
, htonl(st
->early_drop
)) ||
1883 nla_put_be32(skb
, CTA_STATS_ERROR
, htonl(st
->error
)) ||
1884 nla_put_be32(skb
, CTA_STATS_SEARCH_RESTART
,
1885 htonl(st
->search_restart
)))
1886 goto nla_put_failure
;
1888 nlmsg_end(skb
, nlh
);
1893 nlmsg_cancel(skb
, nlh
);
1898 ctnetlink_ct_stat_cpu_dump(struct sk_buff
*skb
, struct netlink_callback
*cb
)
1901 struct net
*net
= sock_net(skb
->sk
);
1903 if (cb
->args
[0] == nr_cpu_ids
)
1906 for (cpu
= cb
->args
[0]; cpu
< nr_cpu_ids
; cpu
++) {
1907 const struct ip_conntrack_stat
*st
;
1909 if (!cpu_possible(cpu
))
1912 st
= per_cpu_ptr(net
->ct
.stat
, cpu
);
1913 if (ctnetlink_ct_stat_cpu_fill_info(skb
,
1914 NETLINK_CB(cb
->skb
).portid
,
1925 ctnetlink_stat_ct_cpu(struct sock
*ctnl
, struct sk_buff
*skb
,
1926 const struct nlmsghdr
*nlh
,
1927 const struct nlattr
* const cda
[])
1929 if (nlh
->nlmsg_flags
& NLM_F_DUMP
) {
1930 struct netlink_dump_control c
= {
1931 .dump
= ctnetlink_ct_stat_cpu_dump
,
1933 return netlink_dump_start(ctnl
, skb
, nlh
, &c
);
1940 ctnetlink_stat_ct_fill_info(struct sk_buff
*skb
, u32 portid
, u32 seq
, u32 type
,
1943 struct nlmsghdr
*nlh
;
1944 struct nfgenmsg
*nfmsg
;
1945 unsigned int flags
= portid
? NLM_F_MULTI
: 0, event
;
1946 unsigned int nr_conntracks
= atomic_read(&net
->ct
.count
);
1948 event
= (NFNL_SUBSYS_CTNETLINK
<< 8 | IPCTNL_MSG_CT_GET_STATS
);
1949 nlh
= nlmsg_put(skb
, portid
, seq
, event
, sizeof(*nfmsg
), flags
);
1953 nfmsg
= nlmsg_data(nlh
);
1954 nfmsg
->nfgen_family
= AF_UNSPEC
;
1955 nfmsg
->version
= NFNETLINK_V0
;
1958 if (nla_put_be32(skb
, CTA_STATS_GLOBAL_ENTRIES
, htonl(nr_conntracks
)))
1959 goto nla_put_failure
;
1961 nlmsg_end(skb
, nlh
);
1966 nlmsg_cancel(skb
, nlh
);
1971 ctnetlink_stat_ct(struct sock
*ctnl
, struct sk_buff
*skb
,
1972 const struct nlmsghdr
*nlh
,
1973 const struct nlattr
* const cda
[])
1975 struct sk_buff
*skb2
;
1978 skb2
= nlmsg_new(NLMSG_DEFAULT_SIZE
, GFP_KERNEL
);
1982 err
= ctnetlink_stat_ct_fill_info(skb2
, NETLINK_CB(skb
).portid
,
1984 NFNL_MSG_TYPE(nlh
->nlmsg_type
),
1989 err
= netlink_unicast(ctnl
, skb2
, NETLINK_CB(skb
).portid
, MSG_DONTWAIT
);
1998 /* this avoids a loop in nfnetlink. */
1999 return err
== -EAGAIN
? -ENOBUFS
: err
;
2002 static const struct nla_policy exp_nla_policy
[CTA_EXPECT_MAX
+1] = {
2003 [CTA_EXPECT_MASTER
] = { .type
= NLA_NESTED
},
2004 [CTA_EXPECT_TUPLE
] = { .type
= NLA_NESTED
},
2005 [CTA_EXPECT_MASK
] = { .type
= NLA_NESTED
},
2006 [CTA_EXPECT_TIMEOUT
] = { .type
= NLA_U32
},
2007 [CTA_EXPECT_ID
] = { .type
= NLA_U32
},
2008 [CTA_EXPECT_HELP_NAME
] = { .type
= NLA_NUL_STRING
,
2009 .len
= NF_CT_HELPER_NAME_LEN
- 1 },
2010 [CTA_EXPECT_ZONE
] = { .type
= NLA_U16
},
2011 [CTA_EXPECT_FLAGS
] = { .type
= NLA_U32
},
2012 [CTA_EXPECT_CLASS
] = { .type
= NLA_U32
},
2013 [CTA_EXPECT_NAT
] = { .type
= NLA_NESTED
},
2014 [CTA_EXPECT_FN
] = { .type
= NLA_NUL_STRING
},
2017 static struct nf_conntrack_expect
*
2018 ctnetlink_alloc_expect(const struct nlattr
*const cda
[], struct nf_conn
*ct
,
2019 struct nf_conntrack_helper
*helper
,
2020 struct nf_conntrack_tuple
*tuple
,
2021 struct nf_conntrack_tuple
*mask
);
2023 #ifdef CONFIG_NETFILTER_NETLINK_QUEUE_CT
2025 ctnetlink_nfqueue_build_size(const struct nf_conn
*ct
)
2027 return 3 * nla_total_size(0) /* CTA_TUPLE_ORIG|REPL|MASTER */
2028 + 3 * nla_total_size(0) /* CTA_TUPLE_IP */
2029 + 3 * nla_total_size(0) /* CTA_TUPLE_PROTO */
2030 + 3 * nla_total_size(sizeof(u_int8_t
)) /* CTA_PROTO_NUM */
2031 + nla_total_size(sizeof(u_int32_t
)) /* CTA_ID */
2032 + nla_total_size(sizeof(u_int32_t
)) /* CTA_STATUS */
2033 + nla_total_size(sizeof(u_int32_t
)) /* CTA_TIMEOUT */
2034 + nla_total_size(0) /* CTA_PROTOINFO */
2035 + nla_total_size(0) /* CTA_HELP */
2036 + nla_total_size(NF_CT_HELPER_NAME_LEN
) /* CTA_HELP_NAME */
2037 + ctnetlink_secctx_size(ct
)
2038 #ifdef CONFIG_NF_NAT_NEEDED
2039 + 2 * nla_total_size(0) /* CTA_NAT_SEQ_ADJ_ORIG|REPL */
2040 + 6 * nla_total_size(sizeof(u_int32_t
)) /* CTA_NAT_SEQ_OFFSET */
2042 #ifdef CONFIG_NF_CONNTRACK_MARK
2043 + nla_total_size(sizeof(u_int32_t
)) /* CTA_MARK */
2045 #ifdef CONFIG_NF_CONNTRACK_ZONES
2046 + nla_total_size(sizeof(u_int16_t
)) /* CTA_ZONE */
2048 + ctnetlink_proto_size(ct
)
2053 ctnetlink_nfqueue_build(struct sk_buff
*skb
, struct nf_conn
*ct
)
2055 struct nlattr
*nest_parms
;
2058 nest_parms
= nla_nest_start(skb
, CTA_TUPLE_ORIG
| NLA_F_NESTED
);
2060 goto nla_put_failure
;
2061 if (ctnetlink_dump_tuples(skb
, nf_ct_tuple(ct
, IP_CT_DIR_ORIGINAL
)) < 0)
2062 goto nla_put_failure
;
2063 nla_nest_end(skb
, nest_parms
);
2065 nest_parms
= nla_nest_start(skb
, CTA_TUPLE_REPLY
| NLA_F_NESTED
);
2067 goto nla_put_failure
;
2068 if (ctnetlink_dump_tuples(skb
, nf_ct_tuple(ct
, IP_CT_DIR_REPLY
)) < 0)
2069 goto nla_put_failure
;
2070 nla_nest_end(skb
, nest_parms
);
2072 if (nf_ct_zone(ct
)) {
2073 if (nla_put_be16(skb
, CTA_ZONE
, htons(nf_ct_zone(ct
))))
2074 goto nla_put_failure
;
2077 if (ctnetlink_dump_id(skb
, ct
) < 0)
2078 goto nla_put_failure
;
2080 if (ctnetlink_dump_status(skb
, ct
) < 0)
2081 goto nla_put_failure
;
2083 if (ctnetlink_dump_timeout(skb
, ct
) < 0)
2084 goto nla_put_failure
;
2086 if (ctnetlink_dump_protoinfo(skb
, ct
) < 0)
2087 goto nla_put_failure
;
2089 if (ctnetlink_dump_helpinfo(skb
, ct
) < 0)
2090 goto nla_put_failure
;
2092 #ifdef CONFIG_NF_CONNTRACK_SECMARK
2093 if (ct
->secmark
&& ctnetlink_dump_secctx(skb
, ct
) < 0)
2094 goto nla_put_failure
;
2096 if (ct
->master
&& ctnetlink_dump_master(skb
, ct
) < 0)
2097 goto nla_put_failure
;
2099 if ((ct
->status
& IPS_SEQ_ADJUST
) &&
2100 ctnetlink_dump_ct_seq_adj(skb
, ct
) < 0)
2101 goto nla_put_failure
;
2103 #ifdef CONFIG_NF_CONNTRACK_MARK
2104 if (ct
->mark
&& ctnetlink_dump_mark(skb
, ct
) < 0)
2105 goto nla_put_failure
;
2107 if (ctnetlink_dump_labels(skb
, ct
) < 0)
2108 goto nla_put_failure
;
2118 ctnetlink_nfqueue_parse_ct(const struct nlattr
*cda
[], struct nf_conn
*ct
)
2122 if (cda
[CTA_TIMEOUT
]) {
2123 err
= ctnetlink_change_timeout(ct
, cda
);
2127 if (cda
[CTA_STATUS
]) {
2128 err
= ctnetlink_change_status(ct
, cda
);
2132 if (cda
[CTA_HELP
]) {
2133 err
= ctnetlink_change_helper(ct
, cda
);
2137 if (cda
[CTA_LABELS
]) {
2138 err
= ctnetlink_attach_labels(ct
, cda
);
2142 #if defined(CONFIG_NF_CONNTRACK_MARK)
2143 if (cda
[CTA_MARK
]) {
2144 u32 mask
= 0, mark
, newmark
;
2145 if (cda
[CTA_MARK_MASK
])
2146 mask
= ~ntohl(nla_get_be32(cda
[CTA_MARK_MASK
]));
2148 mark
= ntohl(nla_get_be32(cda
[CTA_MARK
]));
2149 newmark
= (ct
->mark
& mask
) ^ mark
;
2150 if (newmark
!= ct
->mark
)
2158 ctnetlink_nfqueue_parse(const struct nlattr
*attr
, struct nf_conn
*ct
)
2160 struct nlattr
*cda
[CTA_MAX
+1];
2163 ret
= nla_parse_nested(cda
, CTA_MAX
, attr
, ct_nla_policy
);
2167 spin_lock_bh(&nf_conntrack_expect_lock
);
2168 ret
= ctnetlink_nfqueue_parse_ct((const struct nlattr
**)cda
, ct
);
2169 spin_unlock_bh(&nf_conntrack_expect_lock
);
2174 static int ctnetlink_nfqueue_exp_parse(const struct nlattr
* const *cda
,
2175 const struct nf_conn
*ct
,
2176 struct nf_conntrack_tuple
*tuple
,
2177 struct nf_conntrack_tuple
*mask
)
2181 err
= ctnetlink_parse_tuple(cda
, tuple
, CTA_EXPECT_TUPLE
,
2186 return ctnetlink_parse_tuple(cda
, mask
, CTA_EXPECT_MASK
,
2191 ctnetlink_nfqueue_attach_expect(const struct nlattr
*attr
, struct nf_conn
*ct
,
2192 u32 portid
, u32 report
)
2194 struct nlattr
*cda
[CTA_EXPECT_MAX
+1];
2195 struct nf_conntrack_tuple tuple
, mask
;
2196 struct nf_conntrack_helper
*helper
= NULL
;
2197 struct nf_conntrack_expect
*exp
;
2200 err
= nla_parse_nested(cda
, CTA_EXPECT_MAX
, attr
, exp_nla_policy
);
2204 err
= ctnetlink_nfqueue_exp_parse((const struct nlattr
* const *)cda
,
2209 if (cda
[CTA_EXPECT_HELP_NAME
]) {
2210 const char *helpname
= nla_data(cda
[CTA_EXPECT_HELP_NAME
]);
2212 helper
= __nf_conntrack_helper_find(helpname
, nf_ct_l3num(ct
),
2213 nf_ct_protonum(ct
));
2218 exp
= ctnetlink_alloc_expect((const struct nlattr
* const *)cda
, ct
,
2219 helper
, &tuple
, &mask
);
2221 return PTR_ERR(exp
);
2223 err
= nf_ct_expect_related_report(exp
, portid
, report
);
2225 nf_ct_expect_put(exp
);
2232 static struct nfq_ct_hook ctnetlink_nfqueue_hook
= {
2233 .build_size
= ctnetlink_nfqueue_build_size
,
2234 .build
= ctnetlink_nfqueue_build
,
2235 .parse
= ctnetlink_nfqueue_parse
,
2236 .attach_expect
= ctnetlink_nfqueue_attach_expect
,
2237 .seq_adjust
= nf_ct_tcp_seqadj_set
,
2239 #endif /* CONFIG_NETFILTER_NETLINK_QUEUE_CT */
2241 /***********************************************************************
2243 ***********************************************************************/
2246 ctnetlink_exp_dump_tuple(struct sk_buff
*skb
,
2247 const struct nf_conntrack_tuple
*tuple
,
2248 enum ctattr_expect type
)
2250 struct nlattr
*nest_parms
;
2252 nest_parms
= nla_nest_start(skb
, type
| NLA_F_NESTED
);
2254 goto nla_put_failure
;
2255 if (ctnetlink_dump_tuples(skb
, tuple
) < 0)
2256 goto nla_put_failure
;
2257 nla_nest_end(skb
, nest_parms
);
2266 ctnetlink_exp_dump_mask(struct sk_buff
*skb
,
2267 const struct nf_conntrack_tuple
*tuple
,
2268 const struct nf_conntrack_tuple_mask
*mask
)
2271 struct nf_conntrack_l3proto
*l3proto
;
2272 struct nf_conntrack_l4proto
*l4proto
;
2273 struct nf_conntrack_tuple m
;
2274 struct nlattr
*nest_parms
;
2276 memset(&m
, 0xFF, sizeof(m
));
2277 memcpy(&m
.src
.u3
, &mask
->src
.u3
, sizeof(m
.src
.u3
));
2278 m
.src
.u
.all
= mask
->src
.u
.all
;
2279 m
.dst
.protonum
= tuple
->dst
.protonum
;
2281 nest_parms
= nla_nest_start(skb
, CTA_EXPECT_MASK
| NLA_F_NESTED
);
2283 goto nla_put_failure
;
2286 l3proto
= __nf_ct_l3proto_find(tuple
->src
.l3num
);
2287 ret
= ctnetlink_dump_tuples_ip(skb
, &m
, l3proto
);
2289 l4proto
= __nf_ct_l4proto_find(tuple
->src
.l3num
,
2290 tuple
->dst
.protonum
);
2291 ret
= ctnetlink_dump_tuples_proto(skb
, &m
, l4proto
);
2295 if (unlikely(ret
< 0))
2296 goto nla_put_failure
;
2298 nla_nest_end(skb
, nest_parms
);
2306 static const union nf_inet_addr any_addr
;
2309 ctnetlink_exp_dump_expect(struct sk_buff
*skb
,
2310 const struct nf_conntrack_expect
*exp
)
2312 struct nf_conn
*master
= exp
->master
;
2313 long timeout
= ((long)exp
->timeout
.expires
- (long)jiffies
) / HZ
;
2314 struct nf_conn_help
*help
;
2315 #ifdef CONFIG_NF_NAT_NEEDED
2316 struct nlattr
*nest_parms
;
2317 struct nf_conntrack_tuple nat_tuple
= {};
2319 struct nf_ct_helper_expectfn
*expfn
;
2324 if (ctnetlink_exp_dump_tuple(skb
, &exp
->tuple
, CTA_EXPECT_TUPLE
) < 0)
2325 goto nla_put_failure
;
2326 if (ctnetlink_exp_dump_mask(skb
, &exp
->tuple
, &exp
->mask
) < 0)
2327 goto nla_put_failure
;
2328 if (ctnetlink_exp_dump_tuple(skb
,
2329 &master
->tuplehash
[IP_CT_DIR_ORIGINAL
].tuple
,
2330 CTA_EXPECT_MASTER
) < 0)
2331 goto nla_put_failure
;
2333 #ifdef CONFIG_NF_NAT_NEEDED
2334 if (!nf_inet_addr_cmp(&exp
->saved_addr
, &any_addr
) ||
2335 exp
->saved_proto
.all
) {
2336 nest_parms
= nla_nest_start(skb
, CTA_EXPECT_NAT
| NLA_F_NESTED
);
2338 goto nla_put_failure
;
2340 if (nla_put_be32(skb
, CTA_EXPECT_NAT_DIR
, htonl(exp
->dir
)))
2341 goto nla_put_failure
;
2343 nat_tuple
.src
.l3num
= nf_ct_l3num(master
);
2344 nat_tuple
.src
.u3
= exp
->saved_addr
;
2345 nat_tuple
.dst
.protonum
= nf_ct_protonum(master
);
2346 nat_tuple
.src
.u
= exp
->saved_proto
;
2348 if (ctnetlink_exp_dump_tuple(skb
, &nat_tuple
,
2349 CTA_EXPECT_NAT_TUPLE
) < 0)
2350 goto nla_put_failure
;
2351 nla_nest_end(skb
, nest_parms
);
2354 if (nla_put_be32(skb
, CTA_EXPECT_TIMEOUT
, htonl(timeout
)) ||
2355 nla_put_be32(skb
, CTA_EXPECT_ID
, htonl((unsigned long)exp
)) ||
2356 nla_put_be32(skb
, CTA_EXPECT_FLAGS
, htonl(exp
->flags
)) ||
2357 nla_put_be32(skb
, CTA_EXPECT_CLASS
, htonl(exp
->class)))
2358 goto nla_put_failure
;
2359 help
= nfct_help(master
);
2361 struct nf_conntrack_helper
*helper
;
2363 helper
= rcu_dereference(help
->helper
);
2365 nla_put_string(skb
, CTA_EXPECT_HELP_NAME
, helper
->name
))
2366 goto nla_put_failure
;
2368 expfn
= nf_ct_helper_expectfn_find_by_symbol(exp
->expectfn
);
2369 if (expfn
!= NULL
&&
2370 nla_put_string(skb
, CTA_EXPECT_FN
, expfn
->name
))
2371 goto nla_put_failure
;
2380 ctnetlink_exp_fill_info(struct sk_buff
*skb
, u32 portid
, u32 seq
,
2381 int event
, const struct nf_conntrack_expect
*exp
)
2383 struct nlmsghdr
*nlh
;
2384 struct nfgenmsg
*nfmsg
;
2385 unsigned int flags
= portid
? NLM_F_MULTI
: 0;
2387 event
|= NFNL_SUBSYS_CTNETLINK_EXP
<< 8;
2388 nlh
= nlmsg_put(skb
, portid
, seq
, event
, sizeof(*nfmsg
), flags
);
2392 nfmsg
= nlmsg_data(nlh
);
2393 nfmsg
->nfgen_family
= exp
->tuple
.src
.l3num
;
2394 nfmsg
->version
= NFNETLINK_V0
;
2397 if (ctnetlink_exp_dump_expect(skb
, exp
) < 0)
2398 goto nla_put_failure
;
2400 nlmsg_end(skb
, nlh
);
2405 nlmsg_cancel(skb
, nlh
);
2409 #ifdef CONFIG_NF_CONNTRACK_EVENTS
2411 ctnetlink_expect_event(unsigned int events
, struct nf_exp_event
*item
)
2413 struct nf_conntrack_expect
*exp
= item
->exp
;
2414 struct net
*net
= nf_ct_exp_net(exp
);
2415 struct nlmsghdr
*nlh
;
2416 struct nfgenmsg
*nfmsg
;
2417 struct sk_buff
*skb
;
2418 unsigned int type
, group
;
2421 if (events
& (1 << IPEXP_DESTROY
)) {
2422 type
= IPCTNL_MSG_EXP_DELETE
;
2423 group
= NFNLGRP_CONNTRACK_EXP_DESTROY
;
2424 } else if (events
& (1 << IPEXP_NEW
)) {
2425 type
= IPCTNL_MSG_EXP_NEW
;
2426 flags
= NLM_F_CREATE
|NLM_F_EXCL
;
2427 group
= NFNLGRP_CONNTRACK_EXP_NEW
;
2431 if (!item
->report
&& !nfnetlink_has_listeners(net
, group
))
2434 skb
= nlmsg_new(NLMSG_DEFAULT_SIZE
, GFP_ATOMIC
);
2438 type
|= NFNL_SUBSYS_CTNETLINK_EXP
<< 8;
2439 nlh
= nlmsg_put(skb
, item
->portid
, 0, type
, sizeof(*nfmsg
), flags
);
2443 nfmsg
= nlmsg_data(nlh
);
2444 nfmsg
->nfgen_family
= exp
->tuple
.src
.l3num
;
2445 nfmsg
->version
= NFNETLINK_V0
;
2449 if (ctnetlink_exp_dump_expect(skb
, exp
) < 0)
2450 goto nla_put_failure
;
2453 nlmsg_end(skb
, nlh
);
2454 nfnetlink_send(skb
, net
, item
->portid
, group
, item
->report
, GFP_ATOMIC
);
2459 nlmsg_cancel(skb
, nlh
);
2463 nfnetlink_set_err(net
, 0, 0, -ENOBUFS
);
2467 static int ctnetlink_exp_done(struct netlink_callback
*cb
)
2470 nf_ct_expect_put((struct nf_conntrack_expect
*)cb
->args
[1]);
2475 ctnetlink_exp_dump_table(struct sk_buff
*skb
, struct netlink_callback
*cb
)
2477 struct net
*net
= sock_net(skb
->sk
);
2478 struct nf_conntrack_expect
*exp
, *last
;
2479 struct nfgenmsg
*nfmsg
= nlmsg_data(cb
->nlh
);
2480 u_int8_t l3proto
= nfmsg
->nfgen_family
;
2483 last
= (struct nf_conntrack_expect
*)cb
->args
[1];
2484 for (; cb
->args
[0] < nf_ct_expect_hsize
; cb
->args
[0]++) {
2486 hlist_for_each_entry(exp
, &net
->ct
.expect_hash
[cb
->args
[0]],
2488 if (l3proto
&& exp
->tuple
.src
.l3num
!= l3proto
)
2495 if (ctnetlink_exp_fill_info(skb
,
2496 NETLINK_CB(cb
->skb
).portid
,
2500 if (!atomic_inc_not_zero(&exp
->use
))
2502 cb
->args
[1] = (unsigned long)exp
;
2514 nf_ct_expect_put(last
);
2520 ctnetlink_exp_ct_dump_table(struct sk_buff
*skb
, struct netlink_callback
*cb
)
2522 struct nf_conntrack_expect
*exp
, *last
;
2523 struct nfgenmsg
*nfmsg
= nlmsg_data(cb
->nlh
);
2524 struct nf_conn
*ct
= cb
->data
;
2525 struct nf_conn_help
*help
= nfct_help(ct
);
2526 u_int8_t l3proto
= nfmsg
->nfgen_family
;
2532 last
= (struct nf_conntrack_expect
*)cb
->args
[1];
2534 hlist_for_each_entry(exp
, &help
->expectations
, lnode
) {
2535 if (l3proto
&& exp
->tuple
.src
.l3num
!= l3proto
)
2542 if (ctnetlink_exp_fill_info(skb
, NETLINK_CB(cb
->skb
).portid
,
2546 if (!atomic_inc_not_zero(&exp
->use
))
2548 cb
->args
[1] = (unsigned long)exp
;
2560 nf_ct_expect_put(last
);
2565 static int ctnetlink_dump_exp_ct(struct sock
*ctnl
, struct sk_buff
*skb
,
2566 const struct nlmsghdr
*nlh
,
2567 const struct nlattr
* const cda
[])
2570 struct net
*net
= sock_net(ctnl
);
2571 struct nfgenmsg
*nfmsg
= nlmsg_data(nlh
);
2572 u_int8_t u3
= nfmsg
->nfgen_family
;
2573 struct nf_conntrack_tuple tuple
;
2574 struct nf_conntrack_tuple_hash
*h
;
2577 struct netlink_dump_control c
= {
2578 .dump
= ctnetlink_exp_ct_dump_table
,
2579 .done
= ctnetlink_exp_done
,
2582 err
= ctnetlink_parse_tuple(cda
, &tuple
, CTA_EXPECT_MASTER
, u3
);
2586 if (cda
[CTA_EXPECT_ZONE
]) {
2587 err
= ctnetlink_parse_zone(cda
[CTA_EXPECT_ZONE
], &zone
);
2592 h
= nf_conntrack_find_get(net
, zone
, &tuple
);
2596 ct
= nf_ct_tuplehash_to_ctrack(h
);
2599 err
= netlink_dump_start(ctnl
, skb
, nlh
, &c
);
2606 ctnetlink_get_expect(struct sock
*ctnl
, struct sk_buff
*skb
,
2607 const struct nlmsghdr
*nlh
,
2608 const struct nlattr
* const cda
[])
2610 struct net
*net
= sock_net(ctnl
);
2611 struct nf_conntrack_tuple tuple
;
2612 struct nf_conntrack_expect
*exp
;
2613 struct sk_buff
*skb2
;
2614 struct nfgenmsg
*nfmsg
= nlmsg_data(nlh
);
2615 u_int8_t u3
= nfmsg
->nfgen_family
;
2619 if (nlh
->nlmsg_flags
& NLM_F_DUMP
) {
2620 if (cda
[CTA_EXPECT_MASTER
])
2621 return ctnetlink_dump_exp_ct(ctnl
, skb
, nlh
, cda
);
2623 struct netlink_dump_control c
= {
2624 .dump
= ctnetlink_exp_dump_table
,
2625 .done
= ctnetlink_exp_done
,
2627 return netlink_dump_start(ctnl
, skb
, nlh
, &c
);
2631 err
= ctnetlink_parse_zone(cda
[CTA_EXPECT_ZONE
], &zone
);
2635 if (cda
[CTA_EXPECT_TUPLE
])
2636 err
= ctnetlink_parse_tuple(cda
, &tuple
, CTA_EXPECT_TUPLE
, u3
);
2637 else if (cda
[CTA_EXPECT_MASTER
])
2638 err
= ctnetlink_parse_tuple(cda
, &tuple
, CTA_EXPECT_MASTER
, u3
);
2645 exp
= nf_ct_expect_find_get(net
, zone
, &tuple
);
2649 if (cda
[CTA_EXPECT_ID
]) {
2650 __be32 id
= nla_get_be32(cda
[CTA_EXPECT_ID
]);
2651 if (ntohl(id
) != (u32
)(unsigned long)exp
) {
2652 nf_ct_expect_put(exp
);
2658 skb2
= nlmsg_new(NLMSG_DEFAULT_SIZE
, GFP_KERNEL
);
2660 nf_ct_expect_put(exp
);
2665 err
= ctnetlink_exp_fill_info(skb2
, NETLINK_CB(skb
).portid
,
2666 nlh
->nlmsg_seq
, IPCTNL_MSG_EXP_NEW
, exp
);
2668 nf_ct_expect_put(exp
);
2672 err
= netlink_unicast(ctnl
, skb2
, NETLINK_CB(skb
).portid
, MSG_DONTWAIT
);
2681 /* this avoids a loop in nfnetlink. */
2682 return err
== -EAGAIN
? -ENOBUFS
: err
;
2686 ctnetlink_del_expect(struct sock
*ctnl
, struct sk_buff
*skb
,
2687 const struct nlmsghdr
*nlh
,
2688 const struct nlattr
* const cda
[])
2690 struct net
*net
= sock_net(ctnl
);
2691 struct nf_conntrack_expect
*exp
;
2692 struct nf_conntrack_tuple tuple
;
2693 struct nfgenmsg
*nfmsg
= nlmsg_data(nlh
);
2694 struct hlist_node
*next
;
2695 u_int8_t u3
= nfmsg
->nfgen_family
;
2700 if (cda
[CTA_EXPECT_TUPLE
]) {
2701 /* delete a single expect by tuple */
2702 err
= ctnetlink_parse_zone(cda
[CTA_EXPECT_ZONE
], &zone
);
2706 err
= ctnetlink_parse_tuple(cda
, &tuple
, CTA_EXPECT_TUPLE
, u3
);
2710 /* bump usage count to 2 */
2711 exp
= nf_ct_expect_find_get(net
, zone
, &tuple
);
2715 if (cda
[CTA_EXPECT_ID
]) {
2716 __be32 id
= nla_get_be32(cda
[CTA_EXPECT_ID
]);
2717 if (ntohl(id
) != (u32
)(unsigned long)exp
) {
2718 nf_ct_expect_put(exp
);
2723 /* after list removal, usage count == 1 */
2724 spin_lock_bh(&nf_conntrack_expect_lock
);
2725 if (del_timer(&exp
->timeout
)) {
2726 nf_ct_unlink_expect_report(exp
, NETLINK_CB(skb
).portid
,
2728 nf_ct_expect_put(exp
);
2730 spin_unlock_bh(&nf_conntrack_expect_lock
);
2731 /* have to put what we 'get' above.
2732 * after this line usage count == 0 */
2733 nf_ct_expect_put(exp
);
2734 } else if (cda
[CTA_EXPECT_HELP_NAME
]) {
2735 char *name
= nla_data(cda
[CTA_EXPECT_HELP_NAME
]);
2736 struct nf_conn_help
*m_help
;
2738 /* delete all expectations for this helper */
2739 spin_lock_bh(&nf_conntrack_expect_lock
);
2740 for (i
= 0; i
< nf_ct_expect_hsize
; i
++) {
2741 hlist_for_each_entry_safe(exp
, next
,
2742 &net
->ct
.expect_hash
[i
],
2744 m_help
= nfct_help(exp
->master
);
2745 if (!strcmp(m_help
->helper
->name
, name
) &&
2746 del_timer(&exp
->timeout
)) {
2747 nf_ct_unlink_expect_report(exp
,
2748 NETLINK_CB(skb
).portid
,
2750 nf_ct_expect_put(exp
);
2754 spin_unlock_bh(&nf_conntrack_expect_lock
);
2756 /* This basically means we have to flush everything*/
2757 spin_lock_bh(&nf_conntrack_expect_lock
);
2758 for (i
= 0; i
< nf_ct_expect_hsize
; i
++) {
2759 hlist_for_each_entry_safe(exp
, next
,
2760 &net
->ct
.expect_hash
[i
],
2762 if (del_timer(&exp
->timeout
)) {
2763 nf_ct_unlink_expect_report(exp
,
2764 NETLINK_CB(skb
).portid
,
2766 nf_ct_expect_put(exp
);
2770 spin_unlock_bh(&nf_conntrack_expect_lock
);
2776 ctnetlink_change_expect(struct nf_conntrack_expect
*x
,
2777 const struct nlattr
* const cda
[])
2779 if (cda
[CTA_EXPECT_TIMEOUT
]) {
2780 if (!del_timer(&x
->timeout
))
2783 x
->timeout
.expires
= jiffies
+
2784 ntohl(nla_get_be32(cda
[CTA_EXPECT_TIMEOUT
])) * HZ
;
2785 add_timer(&x
->timeout
);
2790 static const struct nla_policy exp_nat_nla_policy
[CTA_EXPECT_NAT_MAX
+1] = {
2791 [CTA_EXPECT_NAT_DIR
] = { .type
= NLA_U32
},
2792 [CTA_EXPECT_NAT_TUPLE
] = { .type
= NLA_NESTED
},
2796 ctnetlink_parse_expect_nat(const struct nlattr
*attr
,
2797 struct nf_conntrack_expect
*exp
,
2800 #ifdef CONFIG_NF_NAT_NEEDED
2801 struct nlattr
*tb
[CTA_EXPECT_NAT_MAX
+1];
2802 struct nf_conntrack_tuple nat_tuple
= {};
2805 err
= nla_parse_nested(tb
, CTA_EXPECT_NAT_MAX
, attr
, exp_nat_nla_policy
);
2809 if (!tb
[CTA_EXPECT_NAT_DIR
] || !tb
[CTA_EXPECT_NAT_TUPLE
])
2812 err
= ctnetlink_parse_tuple((const struct nlattr
* const *)tb
,
2813 &nat_tuple
, CTA_EXPECT_NAT_TUPLE
, u3
);
2817 exp
->saved_addr
= nat_tuple
.src
.u3
;
2818 exp
->saved_proto
= nat_tuple
.src
.u
;
2819 exp
->dir
= ntohl(nla_get_be32(tb
[CTA_EXPECT_NAT_DIR
]));
2827 static struct nf_conntrack_expect
*
2828 ctnetlink_alloc_expect(const struct nlattr
* const cda
[], struct nf_conn
*ct
,
2829 struct nf_conntrack_helper
*helper
,
2830 struct nf_conntrack_tuple
*tuple
,
2831 struct nf_conntrack_tuple
*mask
)
2833 u_int32_t
class = 0;
2834 struct nf_conntrack_expect
*exp
;
2835 struct nf_conn_help
*help
;
2838 if (cda
[CTA_EXPECT_CLASS
] && helper
) {
2839 class = ntohl(nla_get_be32(cda
[CTA_EXPECT_CLASS
]));
2840 if (class > helper
->expect_class_max
)
2841 return ERR_PTR(-EINVAL
);
2843 exp
= nf_ct_expect_alloc(ct
);
2845 return ERR_PTR(-ENOMEM
);
2847 help
= nfct_help(ct
);
2849 if (!cda
[CTA_EXPECT_TIMEOUT
]) {
2853 exp
->timeout
.expires
=
2854 jiffies
+ ntohl(nla_get_be32(cda
[CTA_EXPECT_TIMEOUT
])) * HZ
;
2856 exp
->flags
= NF_CT_EXPECT_USERSPACE
;
2857 if (cda
[CTA_EXPECT_FLAGS
]) {
2859 ntohl(nla_get_be32(cda
[CTA_EXPECT_FLAGS
]));
2862 if (cda
[CTA_EXPECT_FLAGS
]) {
2863 exp
->flags
= ntohl(nla_get_be32(cda
[CTA_EXPECT_FLAGS
]));
2864 exp
->flags
&= ~NF_CT_EXPECT_USERSPACE
;
2868 if (cda
[CTA_EXPECT_FN
]) {
2869 const char *name
= nla_data(cda
[CTA_EXPECT_FN
]);
2870 struct nf_ct_helper_expectfn
*expfn
;
2872 expfn
= nf_ct_helper_expectfn_find_by_name(name
);
2873 if (expfn
== NULL
) {
2877 exp
->expectfn
= expfn
->expectfn
;
2879 exp
->expectfn
= NULL
;
2883 exp
->helper
= helper
;
2884 exp
->tuple
= *tuple
;
2885 exp
->mask
.src
.u3
= mask
->src
.u3
;
2886 exp
->mask
.src
.u
.all
= mask
->src
.u
.all
;
2888 if (cda
[CTA_EXPECT_NAT
]) {
2889 err
= ctnetlink_parse_expect_nat(cda
[CTA_EXPECT_NAT
],
2890 exp
, nf_ct_l3num(ct
));
2896 nf_ct_expect_put(exp
);
2897 return ERR_PTR(err
);
2901 ctnetlink_create_expect(struct net
*net
, u16 zone
,
2902 const struct nlattr
* const cda
[],
2903 u_int8_t u3
, u32 portid
, int report
)
2905 struct nf_conntrack_tuple tuple
, mask
, master_tuple
;
2906 struct nf_conntrack_tuple_hash
*h
= NULL
;
2907 struct nf_conntrack_helper
*helper
= NULL
;
2908 struct nf_conntrack_expect
*exp
;
2912 /* caller guarantees that those three CTA_EXPECT_* exist */
2913 err
= ctnetlink_parse_tuple(cda
, &tuple
, CTA_EXPECT_TUPLE
, u3
);
2916 err
= ctnetlink_parse_tuple(cda
, &mask
, CTA_EXPECT_MASK
, u3
);
2919 err
= ctnetlink_parse_tuple(cda
, &master_tuple
, CTA_EXPECT_MASTER
, u3
);
2923 /* Look for master conntrack of this expectation */
2924 h
= nf_conntrack_find_get(net
, zone
, &master_tuple
);
2927 ct
= nf_ct_tuplehash_to_ctrack(h
);
2929 if (cda
[CTA_EXPECT_HELP_NAME
]) {
2930 const char *helpname
= nla_data(cda
[CTA_EXPECT_HELP_NAME
]);
2932 helper
= __nf_conntrack_helper_find(helpname
, u3
,
2933 nf_ct_protonum(ct
));
2934 if (helper
== NULL
) {
2935 #ifdef CONFIG_MODULES
2936 if (request_module("nfct-helper-%s", helpname
) < 0) {
2940 helper
= __nf_conntrack_helper_find(helpname
, u3
,
2941 nf_ct_protonum(ct
));
2952 exp
= ctnetlink_alloc_expect(cda
, ct
, helper
, &tuple
, &mask
);
2958 err
= nf_ct_expect_related_report(exp
, portid
, report
);
2964 nf_ct_expect_put(exp
);
2971 ctnetlink_new_expect(struct sock
*ctnl
, struct sk_buff
*skb
,
2972 const struct nlmsghdr
*nlh
,
2973 const struct nlattr
* const cda
[])
2975 struct net
*net
= sock_net(ctnl
);
2976 struct nf_conntrack_tuple tuple
;
2977 struct nf_conntrack_expect
*exp
;
2978 struct nfgenmsg
*nfmsg
= nlmsg_data(nlh
);
2979 u_int8_t u3
= nfmsg
->nfgen_family
;
2983 if (!cda
[CTA_EXPECT_TUPLE
]
2984 || !cda
[CTA_EXPECT_MASK
]
2985 || !cda
[CTA_EXPECT_MASTER
])
2988 err
= ctnetlink_parse_zone(cda
[CTA_EXPECT_ZONE
], &zone
);
2992 err
= ctnetlink_parse_tuple(cda
, &tuple
, CTA_EXPECT_TUPLE
, u3
);
2996 spin_lock_bh(&nf_conntrack_expect_lock
);
2997 exp
= __nf_ct_expect_find(net
, zone
, &tuple
);
3000 spin_unlock_bh(&nf_conntrack_expect_lock
);
3002 if (nlh
->nlmsg_flags
& NLM_F_CREATE
) {
3003 err
= ctnetlink_create_expect(net
, zone
, cda
,
3005 NETLINK_CB(skb
).portid
,
3012 if (!(nlh
->nlmsg_flags
& NLM_F_EXCL
))
3013 err
= ctnetlink_change_expect(exp
, cda
);
3014 spin_unlock_bh(&nf_conntrack_expect_lock
);
3020 ctnetlink_exp_stat_fill_info(struct sk_buff
*skb
, u32 portid
, u32 seq
, int cpu
,
3021 const struct ip_conntrack_stat
*st
)
3023 struct nlmsghdr
*nlh
;
3024 struct nfgenmsg
*nfmsg
;
3025 unsigned int flags
= portid
? NLM_F_MULTI
: 0, event
;
3027 event
= (NFNL_SUBSYS_CTNETLINK
<< 8 | IPCTNL_MSG_EXP_GET_STATS_CPU
);
3028 nlh
= nlmsg_put(skb
, portid
, seq
, event
, sizeof(*nfmsg
), flags
);
3032 nfmsg
= nlmsg_data(nlh
);
3033 nfmsg
->nfgen_family
= AF_UNSPEC
;
3034 nfmsg
->version
= NFNETLINK_V0
;
3035 nfmsg
->res_id
= htons(cpu
);
3037 if (nla_put_be32(skb
, CTA_STATS_EXP_NEW
, htonl(st
->expect_new
)) ||
3038 nla_put_be32(skb
, CTA_STATS_EXP_CREATE
, htonl(st
->expect_create
)) ||
3039 nla_put_be32(skb
, CTA_STATS_EXP_DELETE
, htonl(st
->expect_delete
)))
3040 goto nla_put_failure
;
3042 nlmsg_end(skb
, nlh
);
3047 nlmsg_cancel(skb
, nlh
);
3052 ctnetlink_exp_stat_cpu_dump(struct sk_buff
*skb
, struct netlink_callback
*cb
)
3055 struct net
*net
= sock_net(skb
->sk
);
3057 if (cb
->args
[0] == nr_cpu_ids
)
3060 for (cpu
= cb
->args
[0]; cpu
< nr_cpu_ids
; cpu
++) {
3061 const struct ip_conntrack_stat
*st
;
3063 if (!cpu_possible(cpu
))
3066 st
= per_cpu_ptr(net
->ct
.stat
, cpu
);
3067 if (ctnetlink_exp_stat_fill_info(skb
, NETLINK_CB(cb
->skb
).portid
,
3078 ctnetlink_stat_exp_cpu(struct sock
*ctnl
, struct sk_buff
*skb
,
3079 const struct nlmsghdr
*nlh
,
3080 const struct nlattr
* const cda
[])
3082 if (nlh
->nlmsg_flags
& NLM_F_DUMP
) {
3083 struct netlink_dump_control c
= {
3084 .dump
= ctnetlink_exp_stat_cpu_dump
,
3086 return netlink_dump_start(ctnl
, skb
, nlh
, &c
);
3092 #ifdef CONFIG_NF_CONNTRACK_EVENTS
3093 static struct nf_ct_event_notifier ctnl_notifier
= {
3094 .fcn
= ctnetlink_conntrack_event
,
3097 static struct nf_exp_event_notifier ctnl_notifier_exp
= {
3098 .fcn
= ctnetlink_expect_event
,
3102 static const struct nfnl_callback ctnl_cb
[IPCTNL_MSG_MAX
] = {
3103 [IPCTNL_MSG_CT_NEW
] = { .call
= ctnetlink_new_conntrack
,
3104 .attr_count
= CTA_MAX
,
3105 .policy
= ct_nla_policy
},
3106 [IPCTNL_MSG_CT_GET
] = { .call
= ctnetlink_get_conntrack
,
3107 .attr_count
= CTA_MAX
,
3108 .policy
= ct_nla_policy
},
3109 [IPCTNL_MSG_CT_DELETE
] = { .call
= ctnetlink_del_conntrack
,
3110 .attr_count
= CTA_MAX
,
3111 .policy
= ct_nla_policy
},
3112 [IPCTNL_MSG_CT_GET_CTRZERO
] = { .call
= ctnetlink_get_conntrack
,
3113 .attr_count
= CTA_MAX
,
3114 .policy
= ct_nla_policy
},
3115 [IPCTNL_MSG_CT_GET_STATS_CPU
] = { .call
= ctnetlink_stat_ct_cpu
},
3116 [IPCTNL_MSG_CT_GET_STATS
] = { .call
= ctnetlink_stat_ct
},
3117 [IPCTNL_MSG_CT_GET_DYING
] = { .call
= ctnetlink_get_ct_dying
},
3118 [IPCTNL_MSG_CT_GET_UNCONFIRMED
] = { .call
= ctnetlink_get_ct_unconfirmed
},
3121 static const struct nfnl_callback ctnl_exp_cb
[IPCTNL_MSG_EXP_MAX
] = {
3122 [IPCTNL_MSG_EXP_GET
] = { .call
= ctnetlink_get_expect
,
3123 .attr_count
= CTA_EXPECT_MAX
,
3124 .policy
= exp_nla_policy
},
3125 [IPCTNL_MSG_EXP_NEW
] = { .call
= ctnetlink_new_expect
,
3126 .attr_count
= CTA_EXPECT_MAX
,
3127 .policy
= exp_nla_policy
},
3128 [IPCTNL_MSG_EXP_DELETE
] = { .call
= ctnetlink_del_expect
,
3129 .attr_count
= CTA_EXPECT_MAX
,
3130 .policy
= exp_nla_policy
},
3131 [IPCTNL_MSG_EXP_GET_STATS_CPU
] = { .call
= ctnetlink_stat_exp_cpu
},
3134 static const struct nfnetlink_subsystem ctnl_subsys
= {
3135 .name
= "conntrack",
3136 .subsys_id
= NFNL_SUBSYS_CTNETLINK
,
3137 .cb_count
= IPCTNL_MSG_MAX
,
3141 static const struct nfnetlink_subsystem ctnl_exp_subsys
= {
3142 .name
= "conntrack_expect",
3143 .subsys_id
= NFNL_SUBSYS_CTNETLINK_EXP
,
3144 .cb_count
= IPCTNL_MSG_EXP_MAX
,
3148 MODULE_ALIAS("ip_conntrack_netlink");
3149 MODULE_ALIAS_NFNL_SUBSYS(NFNL_SUBSYS_CTNETLINK
);
3150 MODULE_ALIAS_NFNL_SUBSYS(NFNL_SUBSYS_CTNETLINK_EXP
);
3152 static int __net_init
ctnetlink_net_init(struct net
*net
)
3154 #ifdef CONFIG_NF_CONNTRACK_EVENTS
3157 ret
= nf_conntrack_register_notifier(net
, &ctnl_notifier
);
3159 pr_err("ctnetlink_init: cannot register notifier.\n");
3163 ret
= nf_ct_expect_register_notifier(net
, &ctnl_notifier_exp
);
3165 pr_err("ctnetlink_init: cannot expect register notifier.\n");
3166 goto err_unreg_notifier
;
3171 #ifdef CONFIG_NF_CONNTRACK_EVENTS
3173 nf_conntrack_unregister_notifier(net
, &ctnl_notifier
);
3179 static void ctnetlink_net_exit(struct net
*net
)
3181 #ifdef CONFIG_NF_CONNTRACK_EVENTS
3182 nf_ct_expect_unregister_notifier(net
, &ctnl_notifier_exp
);
3183 nf_conntrack_unregister_notifier(net
, &ctnl_notifier
);
3187 static void __net_exit
ctnetlink_net_exit_batch(struct list_head
*net_exit_list
)
3191 list_for_each_entry(net
, net_exit_list
, exit_list
)
3192 ctnetlink_net_exit(net
);
3195 static struct pernet_operations ctnetlink_net_ops
= {
3196 .init
= ctnetlink_net_init
,
3197 .exit_batch
= ctnetlink_net_exit_batch
,
3200 static int __init
ctnetlink_init(void)
3204 pr_info("ctnetlink v%s: registering with nfnetlink.\n", version
);
3205 ret
= nfnetlink_subsys_register(&ctnl_subsys
);
3207 pr_err("ctnetlink_init: cannot register with nfnetlink.\n");
3211 ret
= nfnetlink_subsys_register(&ctnl_exp_subsys
);
3213 pr_err("ctnetlink_init: cannot register exp with nfnetlink.\n");
3214 goto err_unreg_subsys
;
3217 ret
= register_pernet_subsys(&ctnetlink_net_ops
);
3219 pr_err("ctnetlink_init: cannot register pernet operations\n");
3220 goto err_unreg_exp_subsys
;
3222 #ifdef CONFIG_NETFILTER_NETLINK_QUEUE_CT
3223 /* setup interaction between nf_queue and nf_conntrack_netlink. */
3224 RCU_INIT_POINTER(nfq_ct_hook
, &ctnetlink_nfqueue_hook
);
3228 err_unreg_exp_subsys
:
3229 nfnetlink_subsys_unregister(&ctnl_exp_subsys
);
3231 nfnetlink_subsys_unregister(&ctnl_subsys
);
3236 static void __exit
ctnetlink_exit(void)
3238 pr_info("ctnetlink: unregistering from nfnetlink.\n");
3240 unregister_pernet_subsys(&ctnetlink_net_ops
);
3241 nfnetlink_subsys_unregister(&ctnl_exp_subsys
);
3242 nfnetlink_subsys_unregister(&ctnl_subsys
);
3243 #ifdef CONFIG_NETFILTER_NETLINK_QUEUE_CT
3244 RCU_INIT_POINTER(nfq_ct_hook
, NULL
);
3248 module_init(ctnetlink_init
);
3249 module_exit(ctnetlink_exit
);