netfilter: conntrack: spinlock per cpu to protect special lists.
[deliverable/linux.git] / net / netfilter / nf_conntrack_netlink.c
1 /* Connection tracking via netlink socket. Allows for user space
2 * protocol helpers and general trouble making from userspace.
3 *
4 * (C) 2001 by Jay Schulist <jschlst@samba.org>
5 * (C) 2002-2006 by Harald Welte <laforge@gnumonks.org>
6 * (C) 2003 by Patrick Mchardy <kaber@trash.net>
7 * (C) 2005-2012 by Pablo Neira Ayuso <pablo@netfilter.org>
8 *
9 * Initial connection tracking via netlink development funded and
10 * generally made possible by Network Robots, Inc. (www.networkrobots.com)
11 *
12 * Further development of this code funded by Astaro AG (http://www.astaro.com)
13 *
14 * This software may be used and distributed according to the terms
15 * of the GNU General Public License, incorporated herein by reference.
16 */
17
18 #include <linux/init.h>
19 #include <linux/module.h>
20 #include <linux/kernel.h>
21 #include <linux/rculist.h>
22 #include <linux/rculist_nulls.h>
23 #include <linux/types.h>
24 #include <linux/timer.h>
25 #include <linux/security.h>
26 #include <linux/skbuff.h>
27 #include <linux/errno.h>
28 #include <linux/netlink.h>
29 #include <linux/spinlock.h>
30 #include <linux/interrupt.h>
31 #include <linux/slab.h>
32
33 #include <linux/netfilter.h>
34 #include <net/netlink.h>
35 #include <net/sock.h>
36 #include <net/netfilter/nf_conntrack.h>
37 #include <net/netfilter/nf_conntrack_core.h>
38 #include <net/netfilter/nf_conntrack_expect.h>
39 #include <net/netfilter/nf_conntrack_helper.h>
40 #include <net/netfilter/nf_conntrack_seqadj.h>
41 #include <net/netfilter/nf_conntrack_l3proto.h>
42 #include <net/netfilter/nf_conntrack_l4proto.h>
43 #include <net/netfilter/nf_conntrack_tuple.h>
44 #include <net/netfilter/nf_conntrack_acct.h>
45 #include <net/netfilter/nf_conntrack_zones.h>
46 #include <net/netfilter/nf_conntrack_timestamp.h>
47 #include <net/netfilter/nf_conntrack_labels.h>
48 #ifdef CONFIG_NF_NAT_NEEDED
49 #include <net/netfilter/nf_nat_core.h>
50 #include <net/netfilter/nf_nat_l4proto.h>
51 #include <net/netfilter/nf_nat_helper.h>
52 #endif
53
54 #include <linux/netfilter/nfnetlink.h>
55 #include <linux/netfilter/nfnetlink_conntrack.h>
56
57 MODULE_LICENSE("GPL");
58
59 static char __initdata version[] = "0.93";
60
61 static inline int
62 ctnetlink_dump_tuples_proto(struct sk_buff *skb,
63 const struct nf_conntrack_tuple *tuple,
64 struct nf_conntrack_l4proto *l4proto)
65 {
66 int ret = 0;
67 struct nlattr *nest_parms;
68
69 nest_parms = nla_nest_start(skb, CTA_TUPLE_PROTO | NLA_F_NESTED);
70 if (!nest_parms)
71 goto nla_put_failure;
72 if (nla_put_u8(skb, CTA_PROTO_NUM, tuple->dst.protonum))
73 goto nla_put_failure;
74
75 if (likely(l4proto->tuple_to_nlattr))
76 ret = l4proto->tuple_to_nlattr(skb, tuple);
77
78 nla_nest_end(skb, nest_parms);
79
80 return ret;
81
82 nla_put_failure:
83 return -1;
84 }
85
86 static inline int
87 ctnetlink_dump_tuples_ip(struct sk_buff *skb,
88 const struct nf_conntrack_tuple *tuple,
89 struct nf_conntrack_l3proto *l3proto)
90 {
91 int ret = 0;
92 struct nlattr *nest_parms;
93
94 nest_parms = nla_nest_start(skb, CTA_TUPLE_IP | NLA_F_NESTED);
95 if (!nest_parms)
96 goto nla_put_failure;
97
98 if (likely(l3proto->tuple_to_nlattr))
99 ret = l3proto->tuple_to_nlattr(skb, tuple);
100
101 nla_nest_end(skb, nest_parms);
102
103 return ret;
104
105 nla_put_failure:
106 return -1;
107 }
108
109 static int
110 ctnetlink_dump_tuples(struct sk_buff *skb,
111 const struct nf_conntrack_tuple *tuple)
112 {
113 int ret;
114 struct nf_conntrack_l3proto *l3proto;
115 struct nf_conntrack_l4proto *l4proto;
116
117 rcu_read_lock();
118 l3proto = __nf_ct_l3proto_find(tuple->src.l3num);
119 ret = ctnetlink_dump_tuples_ip(skb, tuple, l3proto);
120
121 if (ret >= 0) {
122 l4proto = __nf_ct_l4proto_find(tuple->src.l3num,
123 tuple->dst.protonum);
124 ret = ctnetlink_dump_tuples_proto(skb, tuple, l4proto);
125 }
126 rcu_read_unlock();
127 return ret;
128 }
129
130 static inline int
131 ctnetlink_dump_status(struct sk_buff *skb, const struct nf_conn *ct)
132 {
133 if (nla_put_be32(skb, CTA_STATUS, htonl(ct->status)))
134 goto nla_put_failure;
135 return 0;
136
137 nla_put_failure:
138 return -1;
139 }
140
141 static inline int
142 ctnetlink_dump_timeout(struct sk_buff *skb, const struct nf_conn *ct)
143 {
144 long timeout = ((long)ct->timeout.expires - (long)jiffies) / HZ;
145
146 if (timeout < 0)
147 timeout = 0;
148
149 if (nla_put_be32(skb, CTA_TIMEOUT, htonl(timeout)))
150 goto nla_put_failure;
151 return 0;
152
153 nla_put_failure:
154 return -1;
155 }
156
157 static inline int
158 ctnetlink_dump_protoinfo(struct sk_buff *skb, struct nf_conn *ct)
159 {
160 struct nf_conntrack_l4proto *l4proto;
161 struct nlattr *nest_proto;
162 int ret;
163
164 l4proto = __nf_ct_l4proto_find(nf_ct_l3num(ct), nf_ct_protonum(ct));
165 if (!l4proto->to_nlattr)
166 return 0;
167
168 nest_proto = nla_nest_start(skb, CTA_PROTOINFO | NLA_F_NESTED);
169 if (!nest_proto)
170 goto nla_put_failure;
171
172 ret = l4proto->to_nlattr(skb, nest_proto, ct);
173
174 nla_nest_end(skb, nest_proto);
175
176 return ret;
177
178 nla_put_failure:
179 return -1;
180 }
181
182 static inline int
183 ctnetlink_dump_helpinfo(struct sk_buff *skb, const struct nf_conn *ct)
184 {
185 struct nlattr *nest_helper;
186 const struct nf_conn_help *help = nfct_help(ct);
187 struct nf_conntrack_helper *helper;
188
189 if (!help)
190 return 0;
191
192 helper = rcu_dereference(help->helper);
193 if (!helper)
194 goto out;
195
196 nest_helper = nla_nest_start(skb, CTA_HELP | NLA_F_NESTED);
197 if (!nest_helper)
198 goto nla_put_failure;
199 if (nla_put_string(skb, CTA_HELP_NAME, helper->name))
200 goto nla_put_failure;
201
202 if (helper->to_nlattr)
203 helper->to_nlattr(skb, ct);
204
205 nla_nest_end(skb, nest_helper);
206 out:
207 return 0;
208
209 nla_put_failure:
210 return -1;
211 }
212
213 static int
214 dump_counters(struct sk_buff *skb, struct nf_conn_acct *acct,
215 enum ip_conntrack_dir dir, int type)
216 {
217 enum ctattr_type attr = dir ? CTA_COUNTERS_REPLY: CTA_COUNTERS_ORIG;
218 struct nf_conn_counter *counter = acct->counter;
219 struct nlattr *nest_count;
220 u64 pkts, bytes;
221
222 if (type == IPCTNL_MSG_CT_GET_CTRZERO) {
223 pkts = atomic64_xchg(&counter[dir].packets, 0);
224 bytes = atomic64_xchg(&counter[dir].bytes, 0);
225 } else {
226 pkts = atomic64_read(&counter[dir].packets);
227 bytes = atomic64_read(&counter[dir].bytes);
228 }
229
230 nest_count = nla_nest_start(skb, attr | NLA_F_NESTED);
231 if (!nest_count)
232 goto nla_put_failure;
233
234 if (nla_put_be64(skb, CTA_COUNTERS_PACKETS, cpu_to_be64(pkts)) ||
235 nla_put_be64(skb, CTA_COUNTERS_BYTES, cpu_to_be64(bytes)))
236 goto nla_put_failure;
237
238 nla_nest_end(skb, nest_count);
239
240 return 0;
241
242 nla_put_failure:
243 return -1;
244 }
245
246 static int
247 ctnetlink_dump_acct(struct sk_buff *skb, const struct nf_conn *ct, int type)
248 {
249 struct nf_conn_acct *acct = nf_conn_acct_find(ct);
250
251 if (!acct)
252 return 0;
253
254 if (dump_counters(skb, acct, IP_CT_DIR_ORIGINAL, type) < 0)
255 return -1;
256 if (dump_counters(skb, acct, IP_CT_DIR_REPLY, type) < 0)
257 return -1;
258
259 return 0;
260 }
261
262 static int
263 ctnetlink_dump_timestamp(struct sk_buff *skb, const struct nf_conn *ct)
264 {
265 struct nlattr *nest_count;
266 const struct nf_conn_tstamp *tstamp;
267
268 tstamp = nf_conn_tstamp_find(ct);
269 if (!tstamp)
270 return 0;
271
272 nest_count = nla_nest_start(skb, CTA_TIMESTAMP | NLA_F_NESTED);
273 if (!nest_count)
274 goto nla_put_failure;
275
276 if (nla_put_be64(skb, CTA_TIMESTAMP_START, cpu_to_be64(tstamp->start)) ||
277 (tstamp->stop != 0 && nla_put_be64(skb, CTA_TIMESTAMP_STOP,
278 cpu_to_be64(tstamp->stop))))
279 goto nla_put_failure;
280 nla_nest_end(skb, nest_count);
281
282 return 0;
283
284 nla_put_failure:
285 return -1;
286 }
287
288 #ifdef CONFIG_NF_CONNTRACK_MARK
289 static inline int
290 ctnetlink_dump_mark(struct sk_buff *skb, const struct nf_conn *ct)
291 {
292 if (nla_put_be32(skb, CTA_MARK, htonl(ct->mark)))
293 goto nla_put_failure;
294 return 0;
295
296 nla_put_failure:
297 return -1;
298 }
299 #else
300 #define ctnetlink_dump_mark(a, b) (0)
301 #endif
302
303 #ifdef CONFIG_NF_CONNTRACK_SECMARK
304 static inline int
305 ctnetlink_dump_secctx(struct sk_buff *skb, const struct nf_conn *ct)
306 {
307 struct nlattr *nest_secctx;
308 int len, ret;
309 char *secctx;
310
311 ret = security_secid_to_secctx(ct->secmark, &secctx, &len);
312 if (ret)
313 return 0;
314
315 ret = -1;
316 nest_secctx = nla_nest_start(skb, CTA_SECCTX | NLA_F_NESTED);
317 if (!nest_secctx)
318 goto nla_put_failure;
319
320 if (nla_put_string(skb, CTA_SECCTX_NAME, secctx))
321 goto nla_put_failure;
322 nla_nest_end(skb, nest_secctx);
323
324 ret = 0;
325 nla_put_failure:
326 security_release_secctx(secctx, len);
327 return ret;
328 }
329 #else
330 #define ctnetlink_dump_secctx(a, b) (0)
331 #endif
332
333 #ifdef CONFIG_NF_CONNTRACK_LABELS
334 static int ctnetlink_label_size(const struct nf_conn *ct)
335 {
336 struct nf_conn_labels *labels = nf_ct_labels_find(ct);
337
338 if (!labels)
339 return 0;
340 return nla_total_size(labels->words * sizeof(long));
341 }
342
343 static int
344 ctnetlink_dump_labels(struct sk_buff *skb, const struct nf_conn *ct)
345 {
346 struct nf_conn_labels *labels = nf_ct_labels_find(ct);
347 unsigned int len, i;
348
349 if (!labels)
350 return 0;
351
352 len = labels->words * sizeof(long);
353 i = 0;
354 do {
355 if (labels->bits[i] != 0)
356 return nla_put(skb, CTA_LABELS, len, labels->bits);
357 i++;
358 } while (i < labels->words);
359
360 return 0;
361 }
362 #else
363 #define ctnetlink_dump_labels(a, b) (0)
364 #define ctnetlink_label_size(a) (0)
365 #endif
366
367 #define master_tuple(ct) &(ct->master->tuplehash[IP_CT_DIR_ORIGINAL].tuple)
368
369 static inline int
370 ctnetlink_dump_master(struct sk_buff *skb, const struct nf_conn *ct)
371 {
372 struct nlattr *nest_parms;
373
374 if (!(ct->status & IPS_EXPECTED))
375 return 0;
376
377 nest_parms = nla_nest_start(skb, CTA_TUPLE_MASTER | NLA_F_NESTED);
378 if (!nest_parms)
379 goto nla_put_failure;
380 if (ctnetlink_dump_tuples(skb, master_tuple(ct)) < 0)
381 goto nla_put_failure;
382 nla_nest_end(skb, nest_parms);
383
384 return 0;
385
386 nla_put_failure:
387 return -1;
388 }
389
390 static int
391 dump_ct_seq_adj(struct sk_buff *skb, const struct nf_ct_seqadj *seq, int type)
392 {
393 struct nlattr *nest_parms;
394
395 nest_parms = nla_nest_start(skb, type | NLA_F_NESTED);
396 if (!nest_parms)
397 goto nla_put_failure;
398
399 if (nla_put_be32(skb, CTA_SEQADJ_CORRECTION_POS,
400 htonl(seq->correction_pos)) ||
401 nla_put_be32(skb, CTA_SEQADJ_OFFSET_BEFORE,
402 htonl(seq->offset_before)) ||
403 nla_put_be32(skb, CTA_SEQADJ_OFFSET_AFTER,
404 htonl(seq->offset_after)))
405 goto nla_put_failure;
406
407 nla_nest_end(skb, nest_parms);
408
409 return 0;
410
411 nla_put_failure:
412 return -1;
413 }
414
415 static inline int
416 ctnetlink_dump_ct_seq_adj(struct sk_buff *skb, const struct nf_conn *ct)
417 {
418 struct nf_conn_seqadj *seqadj = nfct_seqadj(ct);
419 struct nf_ct_seqadj *seq;
420
421 if (!(ct->status & IPS_SEQ_ADJUST) || !seqadj)
422 return 0;
423
424 seq = &seqadj->seq[IP_CT_DIR_ORIGINAL];
425 if (dump_ct_seq_adj(skb, seq, CTA_SEQ_ADJ_ORIG) == -1)
426 return -1;
427
428 seq = &seqadj->seq[IP_CT_DIR_REPLY];
429 if (dump_ct_seq_adj(skb, seq, CTA_SEQ_ADJ_REPLY) == -1)
430 return -1;
431
432 return 0;
433 }
434
435 static inline int
436 ctnetlink_dump_id(struct sk_buff *skb, const struct nf_conn *ct)
437 {
438 if (nla_put_be32(skb, CTA_ID, htonl((unsigned long)ct)))
439 goto nla_put_failure;
440 return 0;
441
442 nla_put_failure:
443 return -1;
444 }
445
446 static inline int
447 ctnetlink_dump_use(struct sk_buff *skb, const struct nf_conn *ct)
448 {
449 if (nla_put_be32(skb, CTA_USE, htonl(atomic_read(&ct->ct_general.use))))
450 goto nla_put_failure;
451 return 0;
452
453 nla_put_failure:
454 return -1;
455 }
456
457 static int
458 ctnetlink_fill_info(struct sk_buff *skb, u32 portid, u32 seq, u32 type,
459 struct nf_conn *ct)
460 {
461 struct nlmsghdr *nlh;
462 struct nfgenmsg *nfmsg;
463 struct nlattr *nest_parms;
464 unsigned int flags = portid ? NLM_F_MULTI : 0, event;
465
466 event = (NFNL_SUBSYS_CTNETLINK << 8 | IPCTNL_MSG_CT_NEW);
467 nlh = nlmsg_put(skb, portid, seq, event, sizeof(*nfmsg), flags);
468 if (nlh == NULL)
469 goto nlmsg_failure;
470
471 nfmsg = nlmsg_data(nlh);
472 nfmsg->nfgen_family = nf_ct_l3num(ct);
473 nfmsg->version = NFNETLINK_V0;
474 nfmsg->res_id = 0;
475
476 nest_parms = nla_nest_start(skb, CTA_TUPLE_ORIG | NLA_F_NESTED);
477 if (!nest_parms)
478 goto nla_put_failure;
479 if (ctnetlink_dump_tuples(skb, nf_ct_tuple(ct, IP_CT_DIR_ORIGINAL)) < 0)
480 goto nla_put_failure;
481 nla_nest_end(skb, nest_parms);
482
483 nest_parms = nla_nest_start(skb, CTA_TUPLE_REPLY | NLA_F_NESTED);
484 if (!nest_parms)
485 goto nla_put_failure;
486 if (ctnetlink_dump_tuples(skb, nf_ct_tuple(ct, IP_CT_DIR_REPLY)) < 0)
487 goto nla_put_failure;
488 nla_nest_end(skb, nest_parms);
489
490 if (nf_ct_zone(ct) &&
491 nla_put_be16(skb, CTA_ZONE, htons(nf_ct_zone(ct))))
492 goto nla_put_failure;
493
494 if (ctnetlink_dump_status(skb, ct) < 0 ||
495 ctnetlink_dump_timeout(skb, ct) < 0 ||
496 ctnetlink_dump_acct(skb, ct, type) < 0 ||
497 ctnetlink_dump_timestamp(skb, ct) < 0 ||
498 ctnetlink_dump_protoinfo(skb, ct) < 0 ||
499 ctnetlink_dump_helpinfo(skb, ct) < 0 ||
500 ctnetlink_dump_mark(skb, ct) < 0 ||
501 ctnetlink_dump_secctx(skb, ct) < 0 ||
502 ctnetlink_dump_labels(skb, ct) < 0 ||
503 ctnetlink_dump_id(skb, ct) < 0 ||
504 ctnetlink_dump_use(skb, ct) < 0 ||
505 ctnetlink_dump_master(skb, ct) < 0 ||
506 ctnetlink_dump_ct_seq_adj(skb, ct) < 0)
507 goto nla_put_failure;
508
509 nlmsg_end(skb, nlh);
510 return skb->len;
511
512 nlmsg_failure:
513 nla_put_failure:
514 nlmsg_cancel(skb, nlh);
515 return -1;
516 }
517
518 static inline size_t
519 ctnetlink_proto_size(const struct nf_conn *ct)
520 {
521 struct nf_conntrack_l3proto *l3proto;
522 struct nf_conntrack_l4proto *l4proto;
523 size_t len = 0;
524
525 rcu_read_lock();
526 l3proto = __nf_ct_l3proto_find(nf_ct_l3num(ct));
527 len += l3proto->nla_size;
528
529 l4proto = __nf_ct_l4proto_find(nf_ct_l3num(ct), nf_ct_protonum(ct));
530 len += l4proto->nla_size;
531 rcu_read_unlock();
532
533 return len;
534 }
535
536 static inline size_t
537 ctnetlink_acct_size(const struct nf_conn *ct)
538 {
539 if (!nf_ct_ext_exist(ct, NF_CT_EXT_ACCT))
540 return 0;
541 return 2 * nla_total_size(0) /* CTA_COUNTERS_ORIG|REPL */
542 + 2 * nla_total_size(sizeof(uint64_t)) /* CTA_COUNTERS_PACKETS */
543 + 2 * nla_total_size(sizeof(uint64_t)) /* CTA_COUNTERS_BYTES */
544 ;
545 }
546
547 static inline int
548 ctnetlink_secctx_size(const struct nf_conn *ct)
549 {
550 #ifdef CONFIG_NF_CONNTRACK_SECMARK
551 int len, ret;
552
553 ret = security_secid_to_secctx(ct->secmark, NULL, &len);
554 if (ret)
555 return 0;
556
557 return nla_total_size(0) /* CTA_SECCTX */
558 + nla_total_size(sizeof(char) * len); /* CTA_SECCTX_NAME */
559 #else
560 return 0;
561 #endif
562 }
563
564 static inline size_t
565 ctnetlink_timestamp_size(const struct nf_conn *ct)
566 {
567 #ifdef CONFIG_NF_CONNTRACK_TIMESTAMP
568 if (!nf_ct_ext_exist(ct, NF_CT_EXT_TSTAMP))
569 return 0;
570 return nla_total_size(0) + 2 * nla_total_size(sizeof(uint64_t));
571 #else
572 return 0;
573 #endif
574 }
575
576 static inline size_t
577 ctnetlink_nlmsg_size(const struct nf_conn *ct)
578 {
579 return NLMSG_ALIGN(sizeof(struct nfgenmsg))
580 + 3 * nla_total_size(0) /* CTA_TUPLE_ORIG|REPL|MASTER */
581 + 3 * nla_total_size(0) /* CTA_TUPLE_IP */
582 + 3 * nla_total_size(0) /* CTA_TUPLE_PROTO */
583 + 3 * nla_total_size(sizeof(u_int8_t)) /* CTA_PROTO_NUM */
584 + nla_total_size(sizeof(u_int32_t)) /* CTA_ID */
585 + nla_total_size(sizeof(u_int32_t)) /* CTA_STATUS */
586 + ctnetlink_acct_size(ct)
587 + ctnetlink_timestamp_size(ct)
588 + nla_total_size(sizeof(u_int32_t)) /* CTA_TIMEOUT */
589 + nla_total_size(0) /* CTA_PROTOINFO */
590 + nla_total_size(0) /* CTA_HELP */
591 + nla_total_size(NF_CT_HELPER_NAME_LEN) /* CTA_HELP_NAME */
592 + ctnetlink_secctx_size(ct)
593 #ifdef CONFIG_NF_NAT_NEEDED
594 + 2 * nla_total_size(0) /* CTA_NAT_SEQ_ADJ_ORIG|REPL */
595 + 6 * nla_total_size(sizeof(u_int32_t)) /* CTA_NAT_SEQ_OFFSET */
596 #endif
597 #ifdef CONFIG_NF_CONNTRACK_MARK
598 + nla_total_size(sizeof(u_int32_t)) /* CTA_MARK */
599 #endif
600 + ctnetlink_proto_size(ct)
601 + ctnetlink_label_size(ct)
602 ;
603 }
604
605 #ifdef CONFIG_NF_CONNTRACK_EVENTS
606 static int
607 ctnetlink_conntrack_event(unsigned int events, struct nf_ct_event *item)
608 {
609 struct net *net;
610 struct nlmsghdr *nlh;
611 struct nfgenmsg *nfmsg;
612 struct nlattr *nest_parms;
613 struct nf_conn *ct = item->ct;
614 struct sk_buff *skb;
615 unsigned int type;
616 unsigned int flags = 0, group;
617 int err;
618
619 /* ignore our fake conntrack entry */
620 if (nf_ct_is_untracked(ct))
621 return 0;
622
623 if (events & (1 << IPCT_DESTROY)) {
624 type = IPCTNL_MSG_CT_DELETE;
625 group = NFNLGRP_CONNTRACK_DESTROY;
626 } else if (events & ((1 << IPCT_NEW) | (1 << IPCT_RELATED))) {
627 type = IPCTNL_MSG_CT_NEW;
628 flags = NLM_F_CREATE|NLM_F_EXCL;
629 group = NFNLGRP_CONNTRACK_NEW;
630 } else if (events) {
631 type = IPCTNL_MSG_CT_NEW;
632 group = NFNLGRP_CONNTRACK_UPDATE;
633 } else
634 return 0;
635
636 net = nf_ct_net(ct);
637 if (!item->report && !nfnetlink_has_listeners(net, group))
638 return 0;
639
640 skb = nlmsg_new(ctnetlink_nlmsg_size(ct), GFP_ATOMIC);
641 if (skb == NULL)
642 goto errout;
643
644 type |= NFNL_SUBSYS_CTNETLINK << 8;
645 nlh = nlmsg_put(skb, item->portid, 0, type, sizeof(*nfmsg), flags);
646 if (nlh == NULL)
647 goto nlmsg_failure;
648
649 nfmsg = nlmsg_data(nlh);
650 nfmsg->nfgen_family = nf_ct_l3num(ct);
651 nfmsg->version = NFNETLINK_V0;
652 nfmsg->res_id = 0;
653
654 rcu_read_lock();
655 nest_parms = nla_nest_start(skb, CTA_TUPLE_ORIG | NLA_F_NESTED);
656 if (!nest_parms)
657 goto nla_put_failure;
658 if (ctnetlink_dump_tuples(skb, nf_ct_tuple(ct, IP_CT_DIR_ORIGINAL)) < 0)
659 goto nla_put_failure;
660 nla_nest_end(skb, nest_parms);
661
662 nest_parms = nla_nest_start(skb, CTA_TUPLE_REPLY | NLA_F_NESTED);
663 if (!nest_parms)
664 goto nla_put_failure;
665 if (ctnetlink_dump_tuples(skb, nf_ct_tuple(ct, IP_CT_DIR_REPLY)) < 0)
666 goto nla_put_failure;
667 nla_nest_end(skb, nest_parms);
668
669 if (nf_ct_zone(ct) &&
670 nla_put_be16(skb, CTA_ZONE, htons(nf_ct_zone(ct))))
671 goto nla_put_failure;
672
673 if (ctnetlink_dump_id(skb, ct) < 0)
674 goto nla_put_failure;
675
676 if (ctnetlink_dump_status(skb, ct) < 0)
677 goto nla_put_failure;
678
679 if (events & (1 << IPCT_DESTROY)) {
680 if (ctnetlink_dump_acct(skb, ct, type) < 0 ||
681 ctnetlink_dump_timestamp(skb, ct) < 0)
682 goto nla_put_failure;
683 } else {
684 if (ctnetlink_dump_timeout(skb, ct) < 0)
685 goto nla_put_failure;
686
687 if (events & (1 << IPCT_PROTOINFO)
688 && ctnetlink_dump_protoinfo(skb, ct) < 0)
689 goto nla_put_failure;
690
691 if ((events & (1 << IPCT_HELPER) || nfct_help(ct))
692 && ctnetlink_dump_helpinfo(skb, ct) < 0)
693 goto nla_put_failure;
694
695 #ifdef CONFIG_NF_CONNTRACK_SECMARK
696 if ((events & (1 << IPCT_SECMARK) || ct->secmark)
697 && ctnetlink_dump_secctx(skb, ct) < 0)
698 goto nla_put_failure;
699 #endif
700 if (events & (1 << IPCT_LABEL) &&
701 ctnetlink_dump_labels(skb, ct) < 0)
702 goto nla_put_failure;
703
704 if (events & (1 << IPCT_RELATED) &&
705 ctnetlink_dump_master(skb, ct) < 0)
706 goto nla_put_failure;
707
708 if (events & (1 << IPCT_SEQADJ) &&
709 ctnetlink_dump_ct_seq_adj(skb, ct) < 0)
710 goto nla_put_failure;
711 }
712
713 #ifdef CONFIG_NF_CONNTRACK_MARK
714 if ((events & (1 << IPCT_MARK) || ct->mark)
715 && ctnetlink_dump_mark(skb, ct) < 0)
716 goto nla_put_failure;
717 #endif
718 rcu_read_unlock();
719
720 nlmsg_end(skb, nlh);
721 err = nfnetlink_send(skb, net, item->portid, group, item->report,
722 GFP_ATOMIC);
723 if (err == -ENOBUFS || err == -EAGAIN)
724 return -ENOBUFS;
725
726 return 0;
727
728 nla_put_failure:
729 rcu_read_unlock();
730 nlmsg_cancel(skb, nlh);
731 nlmsg_failure:
732 kfree_skb(skb);
733 errout:
734 if (nfnetlink_set_err(net, 0, group, -ENOBUFS) > 0)
735 return -ENOBUFS;
736
737 return 0;
738 }
739 #endif /* CONFIG_NF_CONNTRACK_EVENTS */
740
741 static int ctnetlink_done(struct netlink_callback *cb)
742 {
743 if (cb->args[1])
744 nf_ct_put((struct nf_conn *)cb->args[1]);
745 if (cb->data)
746 kfree(cb->data);
747 return 0;
748 }
749
750 struct ctnetlink_dump_filter {
751 struct {
752 u_int32_t val;
753 u_int32_t mask;
754 } mark;
755 };
756
757 static int
758 ctnetlink_dump_table(struct sk_buff *skb, struct netlink_callback *cb)
759 {
760 struct net *net = sock_net(skb->sk);
761 struct nf_conn *ct, *last;
762 struct nf_conntrack_tuple_hash *h;
763 struct hlist_nulls_node *n;
764 struct nfgenmsg *nfmsg = nlmsg_data(cb->nlh);
765 u_int8_t l3proto = nfmsg->nfgen_family;
766 int res;
767 #ifdef CONFIG_NF_CONNTRACK_MARK
768 const struct ctnetlink_dump_filter *filter = cb->data;
769 #endif
770
771 spin_lock_bh(&nf_conntrack_lock);
772 last = (struct nf_conn *)cb->args[1];
773 for (; cb->args[0] < net->ct.htable_size; cb->args[0]++) {
774 restart:
775 hlist_nulls_for_each_entry(h, n, &net->ct.hash[cb->args[0]],
776 hnnode) {
777 if (NF_CT_DIRECTION(h) != IP_CT_DIR_ORIGINAL)
778 continue;
779 ct = nf_ct_tuplehash_to_ctrack(h);
780 /* Dump entries of a given L3 protocol number.
781 * If it is not specified, ie. l3proto == 0,
782 * then dump everything. */
783 if (l3proto && nf_ct_l3num(ct) != l3proto)
784 continue;
785 if (cb->args[1]) {
786 if (ct != last)
787 continue;
788 cb->args[1] = 0;
789 }
790 #ifdef CONFIG_NF_CONNTRACK_MARK
791 if (filter && !((ct->mark & filter->mark.mask) ==
792 filter->mark.val)) {
793 continue;
794 }
795 #endif
796 rcu_read_lock();
797 res =
798 ctnetlink_fill_info(skb, NETLINK_CB(cb->skb).portid,
799 cb->nlh->nlmsg_seq,
800 NFNL_MSG_TYPE(cb->nlh->nlmsg_type),
801 ct);
802 rcu_read_unlock();
803 if (res < 0) {
804 nf_conntrack_get(&ct->ct_general);
805 cb->args[1] = (unsigned long)ct;
806 goto out;
807 }
808 }
809 if (cb->args[1]) {
810 cb->args[1] = 0;
811 goto restart;
812 }
813 }
814 out:
815 spin_unlock_bh(&nf_conntrack_lock);
816 if (last)
817 nf_ct_put(last);
818
819 return skb->len;
820 }
821
822 static inline int
823 ctnetlink_parse_tuple_ip(struct nlattr *attr, struct nf_conntrack_tuple *tuple)
824 {
825 struct nlattr *tb[CTA_IP_MAX+1];
826 struct nf_conntrack_l3proto *l3proto;
827 int ret = 0;
828
829 ret = nla_parse_nested(tb, CTA_IP_MAX, attr, NULL);
830 if (ret < 0)
831 return ret;
832
833 rcu_read_lock();
834 l3proto = __nf_ct_l3proto_find(tuple->src.l3num);
835
836 if (likely(l3proto->nlattr_to_tuple)) {
837 ret = nla_validate_nested(attr, CTA_IP_MAX,
838 l3proto->nla_policy);
839 if (ret == 0)
840 ret = l3proto->nlattr_to_tuple(tb, tuple);
841 }
842
843 rcu_read_unlock();
844
845 return ret;
846 }
847
848 static const struct nla_policy proto_nla_policy[CTA_PROTO_MAX+1] = {
849 [CTA_PROTO_NUM] = { .type = NLA_U8 },
850 };
851
852 static inline int
853 ctnetlink_parse_tuple_proto(struct nlattr *attr,
854 struct nf_conntrack_tuple *tuple)
855 {
856 struct nlattr *tb[CTA_PROTO_MAX+1];
857 struct nf_conntrack_l4proto *l4proto;
858 int ret = 0;
859
860 ret = nla_parse_nested(tb, CTA_PROTO_MAX, attr, proto_nla_policy);
861 if (ret < 0)
862 return ret;
863
864 if (!tb[CTA_PROTO_NUM])
865 return -EINVAL;
866 tuple->dst.protonum = nla_get_u8(tb[CTA_PROTO_NUM]);
867
868 rcu_read_lock();
869 l4proto = __nf_ct_l4proto_find(tuple->src.l3num, tuple->dst.protonum);
870
871 if (likely(l4proto->nlattr_to_tuple)) {
872 ret = nla_validate_nested(attr, CTA_PROTO_MAX,
873 l4proto->nla_policy);
874 if (ret == 0)
875 ret = l4proto->nlattr_to_tuple(tb, tuple);
876 }
877
878 rcu_read_unlock();
879
880 return ret;
881 }
882
883 static const struct nla_policy tuple_nla_policy[CTA_TUPLE_MAX+1] = {
884 [CTA_TUPLE_IP] = { .type = NLA_NESTED },
885 [CTA_TUPLE_PROTO] = { .type = NLA_NESTED },
886 };
887
888 static int
889 ctnetlink_parse_tuple(const struct nlattr * const cda[],
890 struct nf_conntrack_tuple *tuple,
891 enum ctattr_type type, u_int8_t l3num)
892 {
893 struct nlattr *tb[CTA_TUPLE_MAX+1];
894 int err;
895
896 memset(tuple, 0, sizeof(*tuple));
897
898 err = nla_parse_nested(tb, CTA_TUPLE_MAX, cda[type], tuple_nla_policy);
899 if (err < 0)
900 return err;
901
902 if (!tb[CTA_TUPLE_IP])
903 return -EINVAL;
904
905 tuple->src.l3num = l3num;
906
907 err = ctnetlink_parse_tuple_ip(tb[CTA_TUPLE_IP], tuple);
908 if (err < 0)
909 return err;
910
911 if (!tb[CTA_TUPLE_PROTO])
912 return -EINVAL;
913
914 err = ctnetlink_parse_tuple_proto(tb[CTA_TUPLE_PROTO], tuple);
915 if (err < 0)
916 return err;
917
918 /* orig and expect tuples get DIR_ORIGINAL */
919 if (type == CTA_TUPLE_REPLY)
920 tuple->dst.dir = IP_CT_DIR_REPLY;
921 else
922 tuple->dst.dir = IP_CT_DIR_ORIGINAL;
923
924 return 0;
925 }
926
927 static int
928 ctnetlink_parse_zone(const struct nlattr *attr, u16 *zone)
929 {
930 if (attr)
931 #ifdef CONFIG_NF_CONNTRACK_ZONES
932 *zone = ntohs(nla_get_be16(attr));
933 #else
934 return -EOPNOTSUPP;
935 #endif
936 else
937 *zone = 0;
938
939 return 0;
940 }
941
942 static const struct nla_policy help_nla_policy[CTA_HELP_MAX+1] = {
943 [CTA_HELP_NAME] = { .type = NLA_NUL_STRING,
944 .len = NF_CT_HELPER_NAME_LEN - 1 },
945 };
946
947 static inline int
948 ctnetlink_parse_help(const struct nlattr *attr, char **helper_name,
949 struct nlattr **helpinfo)
950 {
951 int err;
952 struct nlattr *tb[CTA_HELP_MAX+1];
953
954 err = nla_parse_nested(tb, CTA_HELP_MAX, attr, help_nla_policy);
955 if (err < 0)
956 return err;
957
958 if (!tb[CTA_HELP_NAME])
959 return -EINVAL;
960
961 *helper_name = nla_data(tb[CTA_HELP_NAME]);
962
963 if (tb[CTA_HELP_INFO])
964 *helpinfo = tb[CTA_HELP_INFO];
965
966 return 0;
967 }
968
969 static const struct nla_policy ct_nla_policy[CTA_MAX+1] = {
970 [CTA_TUPLE_ORIG] = { .type = NLA_NESTED },
971 [CTA_TUPLE_REPLY] = { .type = NLA_NESTED },
972 [CTA_STATUS] = { .type = NLA_U32 },
973 [CTA_PROTOINFO] = { .type = NLA_NESTED },
974 [CTA_HELP] = { .type = NLA_NESTED },
975 [CTA_NAT_SRC] = { .type = NLA_NESTED },
976 [CTA_TIMEOUT] = { .type = NLA_U32 },
977 [CTA_MARK] = { .type = NLA_U32 },
978 [CTA_ID] = { .type = NLA_U32 },
979 [CTA_NAT_DST] = { .type = NLA_NESTED },
980 [CTA_TUPLE_MASTER] = { .type = NLA_NESTED },
981 [CTA_NAT_SEQ_ADJ_ORIG] = { .type = NLA_NESTED },
982 [CTA_NAT_SEQ_ADJ_REPLY] = { .type = NLA_NESTED },
983 [CTA_ZONE] = { .type = NLA_U16 },
984 [CTA_MARK_MASK] = { .type = NLA_U32 },
985 [CTA_LABELS] = { .type = NLA_BINARY,
986 .len = NF_CT_LABELS_MAX_SIZE },
987 [CTA_LABELS_MASK] = { .type = NLA_BINARY,
988 .len = NF_CT_LABELS_MAX_SIZE },
989 };
990
991 static int
992 ctnetlink_del_conntrack(struct sock *ctnl, struct sk_buff *skb,
993 const struct nlmsghdr *nlh,
994 const struct nlattr * const cda[])
995 {
996 struct net *net = sock_net(ctnl);
997 struct nf_conntrack_tuple_hash *h;
998 struct nf_conntrack_tuple tuple;
999 struct nf_conn *ct;
1000 struct nfgenmsg *nfmsg = nlmsg_data(nlh);
1001 u_int8_t u3 = nfmsg->nfgen_family;
1002 u16 zone;
1003 int err;
1004
1005 err = ctnetlink_parse_zone(cda[CTA_ZONE], &zone);
1006 if (err < 0)
1007 return err;
1008
1009 if (cda[CTA_TUPLE_ORIG])
1010 err = ctnetlink_parse_tuple(cda, &tuple, CTA_TUPLE_ORIG, u3);
1011 else if (cda[CTA_TUPLE_REPLY])
1012 err = ctnetlink_parse_tuple(cda, &tuple, CTA_TUPLE_REPLY, u3);
1013 else {
1014 /* Flush the whole table */
1015 nf_conntrack_flush_report(net,
1016 NETLINK_CB(skb).portid,
1017 nlmsg_report(nlh));
1018 return 0;
1019 }
1020
1021 if (err < 0)
1022 return err;
1023
1024 h = nf_conntrack_find_get(net, zone, &tuple);
1025 if (!h)
1026 return -ENOENT;
1027
1028 ct = nf_ct_tuplehash_to_ctrack(h);
1029
1030 if (cda[CTA_ID]) {
1031 u_int32_t id = ntohl(nla_get_be32(cda[CTA_ID]));
1032 if (id != (u32)(unsigned long)ct) {
1033 nf_ct_put(ct);
1034 return -ENOENT;
1035 }
1036 }
1037
1038 if (del_timer(&ct->timeout))
1039 nf_ct_delete(ct, NETLINK_CB(skb).portid, nlmsg_report(nlh));
1040
1041 nf_ct_put(ct);
1042
1043 return 0;
1044 }
1045
1046 static int
1047 ctnetlink_get_conntrack(struct sock *ctnl, struct sk_buff *skb,
1048 const struct nlmsghdr *nlh,
1049 const struct nlattr * const cda[])
1050 {
1051 struct net *net = sock_net(ctnl);
1052 struct nf_conntrack_tuple_hash *h;
1053 struct nf_conntrack_tuple tuple;
1054 struct nf_conn *ct;
1055 struct sk_buff *skb2 = NULL;
1056 struct nfgenmsg *nfmsg = nlmsg_data(nlh);
1057 u_int8_t u3 = nfmsg->nfgen_family;
1058 u16 zone;
1059 int err;
1060
1061 if (nlh->nlmsg_flags & NLM_F_DUMP) {
1062 struct netlink_dump_control c = {
1063 .dump = ctnetlink_dump_table,
1064 .done = ctnetlink_done,
1065 };
1066 #ifdef CONFIG_NF_CONNTRACK_MARK
1067 if (cda[CTA_MARK] && cda[CTA_MARK_MASK]) {
1068 struct ctnetlink_dump_filter *filter;
1069
1070 filter = kzalloc(sizeof(struct ctnetlink_dump_filter),
1071 GFP_ATOMIC);
1072 if (filter == NULL)
1073 return -ENOMEM;
1074
1075 filter->mark.val = ntohl(nla_get_be32(cda[CTA_MARK]));
1076 filter->mark.mask =
1077 ntohl(nla_get_be32(cda[CTA_MARK_MASK]));
1078 c.data = filter;
1079 }
1080 #endif
1081 return netlink_dump_start(ctnl, skb, nlh, &c);
1082 }
1083
1084 err = ctnetlink_parse_zone(cda[CTA_ZONE], &zone);
1085 if (err < 0)
1086 return err;
1087
1088 if (cda[CTA_TUPLE_ORIG])
1089 err = ctnetlink_parse_tuple(cda, &tuple, CTA_TUPLE_ORIG, u3);
1090 else if (cda[CTA_TUPLE_REPLY])
1091 err = ctnetlink_parse_tuple(cda, &tuple, CTA_TUPLE_REPLY, u3);
1092 else
1093 return -EINVAL;
1094
1095 if (err < 0)
1096 return err;
1097
1098 h = nf_conntrack_find_get(net, zone, &tuple);
1099 if (!h)
1100 return -ENOENT;
1101
1102 ct = nf_ct_tuplehash_to_ctrack(h);
1103
1104 err = -ENOMEM;
1105 skb2 = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
1106 if (skb2 == NULL) {
1107 nf_ct_put(ct);
1108 return -ENOMEM;
1109 }
1110
1111 rcu_read_lock();
1112 err = ctnetlink_fill_info(skb2, NETLINK_CB(skb).portid, nlh->nlmsg_seq,
1113 NFNL_MSG_TYPE(nlh->nlmsg_type), ct);
1114 rcu_read_unlock();
1115 nf_ct_put(ct);
1116 if (err <= 0)
1117 goto free;
1118
1119 err = netlink_unicast(ctnl, skb2, NETLINK_CB(skb).portid, MSG_DONTWAIT);
1120 if (err < 0)
1121 goto out;
1122
1123 return 0;
1124
1125 free:
1126 kfree_skb(skb2);
1127 out:
1128 /* this avoids a loop in nfnetlink. */
1129 return err == -EAGAIN ? -ENOBUFS : err;
1130 }
1131
1132 static int ctnetlink_done_list(struct netlink_callback *cb)
1133 {
1134 if (cb->args[1])
1135 nf_ct_put((struct nf_conn *)cb->args[1]);
1136 return 0;
1137 }
1138
1139 static int
1140 ctnetlink_dump_list(struct sk_buff *skb, struct netlink_callback *cb, bool dying)
1141 {
1142 struct nf_conn *ct, *last = NULL;
1143 struct nf_conntrack_tuple_hash *h;
1144 struct hlist_nulls_node *n;
1145 struct nfgenmsg *nfmsg = nlmsg_data(cb->nlh);
1146 u_int8_t l3proto = nfmsg->nfgen_family;
1147 int res;
1148 int cpu;
1149 struct hlist_nulls_head *list;
1150 struct net *net = sock_net(skb->sk);
1151
1152 if (cb->args[2])
1153 return 0;
1154
1155 if (cb->args[0] == nr_cpu_ids)
1156 return 0;
1157
1158 for (cpu = cb->args[0]; cpu < nr_cpu_ids; cpu++) {
1159 struct ct_pcpu *pcpu;
1160
1161 if (!cpu_possible(cpu))
1162 continue;
1163
1164 pcpu = per_cpu_ptr(net->ct.pcpu_lists, cpu);
1165 spin_lock_bh(&pcpu->lock);
1166 last = (struct nf_conn *)cb->args[1];
1167 list = dying ? &pcpu->dying : &pcpu->unconfirmed;
1168 restart:
1169 hlist_nulls_for_each_entry(h, n, list, hnnode) {
1170 ct = nf_ct_tuplehash_to_ctrack(h);
1171 if (l3proto && nf_ct_l3num(ct) != l3proto)
1172 continue;
1173 if (cb->args[1]) {
1174 if (ct != last)
1175 continue;
1176 cb->args[1] = 0;
1177 }
1178 rcu_read_lock();
1179 res = ctnetlink_fill_info(skb, NETLINK_CB(cb->skb).portid,
1180 cb->nlh->nlmsg_seq,
1181 NFNL_MSG_TYPE(cb->nlh->nlmsg_type),
1182 ct);
1183 rcu_read_unlock();
1184 if (res < 0) {
1185 nf_conntrack_get(&ct->ct_general);
1186 cb->args[1] = (unsigned long)ct;
1187 spin_unlock_bh(&pcpu->lock);
1188 goto out;
1189 }
1190 }
1191 if (cb->args[1]) {
1192 cb->args[1] = 0;
1193 goto restart;
1194 } else
1195 cb->args[2] = 1;
1196 spin_unlock_bh(&pcpu->lock);
1197 }
1198 out:
1199 if (last)
1200 nf_ct_put(last);
1201
1202 return skb->len;
1203 }
1204
1205 static int
1206 ctnetlink_dump_dying(struct sk_buff *skb, struct netlink_callback *cb)
1207 {
1208 return ctnetlink_dump_list(skb, cb, true);
1209 }
1210
1211 static int
1212 ctnetlink_get_ct_dying(struct sock *ctnl, struct sk_buff *skb,
1213 const struct nlmsghdr *nlh,
1214 const struct nlattr * const cda[])
1215 {
1216 if (nlh->nlmsg_flags & NLM_F_DUMP) {
1217 struct netlink_dump_control c = {
1218 .dump = ctnetlink_dump_dying,
1219 .done = ctnetlink_done_list,
1220 };
1221 return netlink_dump_start(ctnl, skb, nlh, &c);
1222 }
1223
1224 return -EOPNOTSUPP;
1225 }
1226
1227 static int
1228 ctnetlink_dump_unconfirmed(struct sk_buff *skb, struct netlink_callback *cb)
1229 {
1230 return ctnetlink_dump_list(skb, cb, false);
1231 }
1232
1233 static int
1234 ctnetlink_get_ct_unconfirmed(struct sock *ctnl, struct sk_buff *skb,
1235 const struct nlmsghdr *nlh,
1236 const struct nlattr * const cda[])
1237 {
1238 if (nlh->nlmsg_flags & NLM_F_DUMP) {
1239 struct netlink_dump_control c = {
1240 .dump = ctnetlink_dump_unconfirmed,
1241 .done = ctnetlink_done_list,
1242 };
1243 return netlink_dump_start(ctnl, skb, nlh, &c);
1244 }
1245
1246 return -EOPNOTSUPP;
1247 }
1248
1249 #ifdef CONFIG_NF_NAT_NEEDED
1250 static int
1251 ctnetlink_parse_nat_setup(struct nf_conn *ct,
1252 enum nf_nat_manip_type manip,
1253 const struct nlattr *attr)
1254 {
1255 typeof(nfnetlink_parse_nat_setup_hook) parse_nat_setup;
1256 int err;
1257
1258 parse_nat_setup = rcu_dereference(nfnetlink_parse_nat_setup_hook);
1259 if (!parse_nat_setup) {
1260 #ifdef CONFIG_MODULES
1261 rcu_read_unlock();
1262 nfnl_unlock(NFNL_SUBSYS_CTNETLINK);
1263 if (request_module("nf-nat") < 0) {
1264 nfnl_lock(NFNL_SUBSYS_CTNETLINK);
1265 rcu_read_lock();
1266 return -EOPNOTSUPP;
1267 }
1268 nfnl_lock(NFNL_SUBSYS_CTNETLINK);
1269 rcu_read_lock();
1270 if (nfnetlink_parse_nat_setup_hook)
1271 return -EAGAIN;
1272 #endif
1273 return -EOPNOTSUPP;
1274 }
1275
1276 err = parse_nat_setup(ct, manip, attr);
1277 if (err == -EAGAIN) {
1278 #ifdef CONFIG_MODULES
1279 rcu_read_unlock();
1280 nfnl_unlock(NFNL_SUBSYS_CTNETLINK);
1281 if (request_module("nf-nat-%u", nf_ct_l3num(ct)) < 0) {
1282 nfnl_lock(NFNL_SUBSYS_CTNETLINK);
1283 rcu_read_lock();
1284 return -EOPNOTSUPP;
1285 }
1286 nfnl_lock(NFNL_SUBSYS_CTNETLINK);
1287 rcu_read_lock();
1288 #else
1289 err = -EOPNOTSUPP;
1290 #endif
1291 }
1292 return err;
1293 }
1294 #endif
1295
1296 static int
1297 ctnetlink_change_status(struct nf_conn *ct, const struct nlattr * const cda[])
1298 {
1299 unsigned long d;
1300 unsigned int status = ntohl(nla_get_be32(cda[CTA_STATUS]));
1301 d = ct->status ^ status;
1302
1303 if (d & (IPS_EXPECTED|IPS_CONFIRMED|IPS_DYING))
1304 /* unchangeable */
1305 return -EBUSY;
1306
1307 if (d & IPS_SEEN_REPLY && !(status & IPS_SEEN_REPLY))
1308 /* SEEN_REPLY bit can only be set */
1309 return -EBUSY;
1310
1311 if (d & IPS_ASSURED && !(status & IPS_ASSURED))
1312 /* ASSURED bit can only be set */
1313 return -EBUSY;
1314
1315 /* Be careful here, modifying NAT bits can screw up things,
1316 * so don't let users modify them directly if they don't pass
1317 * nf_nat_range. */
1318 ct->status |= status & ~(IPS_NAT_DONE_MASK | IPS_NAT_MASK);
1319 return 0;
1320 }
1321
1322 static int
1323 ctnetlink_change_nat(struct nf_conn *ct, const struct nlattr * const cda[])
1324 {
1325 #ifdef CONFIG_NF_NAT_NEEDED
1326 int ret;
1327
1328 if (cda[CTA_NAT_DST]) {
1329 ret = ctnetlink_parse_nat_setup(ct,
1330 NF_NAT_MANIP_DST,
1331 cda[CTA_NAT_DST]);
1332 if (ret < 0)
1333 return ret;
1334 }
1335 if (cda[CTA_NAT_SRC]) {
1336 ret = ctnetlink_parse_nat_setup(ct,
1337 NF_NAT_MANIP_SRC,
1338 cda[CTA_NAT_SRC]);
1339 if (ret < 0)
1340 return ret;
1341 }
1342 return 0;
1343 #else
1344 return -EOPNOTSUPP;
1345 #endif
1346 }
1347
1348 static inline int
1349 ctnetlink_change_helper(struct nf_conn *ct, const struct nlattr * const cda[])
1350 {
1351 struct nf_conntrack_helper *helper;
1352 struct nf_conn_help *help = nfct_help(ct);
1353 char *helpname = NULL;
1354 struct nlattr *helpinfo = NULL;
1355 int err;
1356
1357 /* don't change helper of sibling connections */
1358 if (ct->master)
1359 return -EBUSY;
1360
1361 err = ctnetlink_parse_help(cda[CTA_HELP], &helpname, &helpinfo);
1362 if (err < 0)
1363 return err;
1364
1365 if (!strcmp(helpname, "")) {
1366 if (help && help->helper) {
1367 /* we had a helper before ... */
1368 nf_ct_remove_expectations(ct);
1369 RCU_INIT_POINTER(help->helper, NULL);
1370 }
1371
1372 return 0;
1373 }
1374
1375 helper = __nf_conntrack_helper_find(helpname, nf_ct_l3num(ct),
1376 nf_ct_protonum(ct));
1377 if (helper == NULL) {
1378 #ifdef CONFIG_MODULES
1379 spin_unlock_bh(&nf_conntrack_lock);
1380
1381 if (request_module("nfct-helper-%s", helpname) < 0) {
1382 spin_lock_bh(&nf_conntrack_lock);
1383 return -EOPNOTSUPP;
1384 }
1385
1386 spin_lock_bh(&nf_conntrack_lock);
1387 helper = __nf_conntrack_helper_find(helpname, nf_ct_l3num(ct),
1388 nf_ct_protonum(ct));
1389 if (helper)
1390 return -EAGAIN;
1391 #endif
1392 return -EOPNOTSUPP;
1393 }
1394
1395 if (help) {
1396 if (help->helper == helper) {
1397 /* update private helper data if allowed. */
1398 if (helper->from_nlattr)
1399 helper->from_nlattr(helpinfo, ct);
1400 return 0;
1401 } else
1402 return -EBUSY;
1403 }
1404
1405 /* we cannot set a helper for an existing conntrack */
1406 return -EOPNOTSUPP;
1407 }
1408
1409 static inline int
1410 ctnetlink_change_timeout(struct nf_conn *ct, const struct nlattr * const cda[])
1411 {
1412 u_int32_t timeout = ntohl(nla_get_be32(cda[CTA_TIMEOUT]));
1413
1414 if (!del_timer(&ct->timeout))
1415 return -ETIME;
1416
1417 ct->timeout.expires = jiffies + timeout * HZ;
1418 add_timer(&ct->timeout);
1419
1420 return 0;
1421 }
1422
1423 static const struct nla_policy protoinfo_policy[CTA_PROTOINFO_MAX+1] = {
1424 [CTA_PROTOINFO_TCP] = { .type = NLA_NESTED },
1425 [CTA_PROTOINFO_DCCP] = { .type = NLA_NESTED },
1426 [CTA_PROTOINFO_SCTP] = { .type = NLA_NESTED },
1427 };
1428
1429 static inline int
1430 ctnetlink_change_protoinfo(struct nf_conn *ct, const struct nlattr * const cda[])
1431 {
1432 const struct nlattr *attr = cda[CTA_PROTOINFO];
1433 struct nlattr *tb[CTA_PROTOINFO_MAX+1];
1434 struct nf_conntrack_l4proto *l4proto;
1435 int err = 0;
1436
1437 err = nla_parse_nested(tb, CTA_PROTOINFO_MAX, attr, protoinfo_policy);
1438 if (err < 0)
1439 return err;
1440
1441 rcu_read_lock();
1442 l4proto = __nf_ct_l4proto_find(nf_ct_l3num(ct), nf_ct_protonum(ct));
1443 if (l4proto->from_nlattr)
1444 err = l4proto->from_nlattr(tb, ct);
1445 rcu_read_unlock();
1446
1447 return err;
1448 }
1449
1450 static const struct nla_policy seqadj_policy[CTA_SEQADJ_MAX+1] = {
1451 [CTA_SEQADJ_CORRECTION_POS] = { .type = NLA_U32 },
1452 [CTA_SEQADJ_OFFSET_BEFORE] = { .type = NLA_U32 },
1453 [CTA_SEQADJ_OFFSET_AFTER] = { .type = NLA_U32 },
1454 };
1455
1456 static inline int
1457 change_seq_adj(struct nf_ct_seqadj *seq, const struct nlattr * const attr)
1458 {
1459 int err;
1460 struct nlattr *cda[CTA_SEQADJ_MAX+1];
1461
1462 err = nla_parse_nested(cda, CTA_SEQADJ_MAX, attr, seqadj_policy);
1463 if (err < 0)
1464 return err;
1465
1466 if (!cda[CTA_SEQADJ_CORRECTION_POS])
1467 return -EINVAL;
1468
1469 seq->correction_pos =
1470 ntohl(nla_get_be32(cda[CTA_SEQADJ_CORRECTION_POS]));
1471
1472 if (!cda[CTA_SEQADJ_OFFSET_BEFORE])
1473 return -EINVAL;
1474
1475 seq->offset_before =
1476 ntohl(nla_get_be32(cda[CTA_SEQADJ_OFFSET_BEFORE]));
1477
1478 if (!cda[CTA_SEQADJ_OFFSET_AFTER])
1479 return -EINVAL;
1480
1481 seq->offset_after =
1482 ntohl(nla_get_be32(cda[CTA_SEQADJ_OFFSET_AFTER]));
1483
1484 return 0;
1485 }
1486
1487 static int
1488 ctnetlink_change_seq_adj(struct nf_conn *ct,
1489 const struct nlattr * const cda[])
1490 {
1491 struct nf_conn_seqadj *seqadj = nfct_seqadj(ct);
1492 int ret = 0;
1493
1494 if (!seqadj)
1495 return 0;
1496
1497 if (cda[CTA_SEQ_ADJ_ORIG]) {
1498 ret = change_seq_adj(&seqadj->seq[IP_CT_DIR_ORIGINAL],
1499 cda[CTA_SEQ_ADJ_ORIG]);
1500 if (ret < 0)
1501 return ret;
1502
1503 ct->status |= IPS_SEQ_ADJUST;
1504 }
1505
1506 if (cda[CTA_SEQ_ADJ_REPLY]) {
1507 ret = change_seq_adj(&seqadj->seq[IP_CT_DIR_REPLY],
1508 cda[CTA_SEQ_ADJ_REPLY]);
1509 if (ret < 0)
1510 return ret;
1511
1512 ct->status |= IPS_SEQ_ADJUST;
1513 }
1514
1515 return 0;
1516 }
1517
1518 static int
1519 ctnetlink_attach_labels(struct nf_conn *ct, const struct nlattr * const cda[])
1520 {
1521 #ifdef CONFIG_NF_CONNTRACK_LABELS
1522 size_t len = nla_len(cda[CTA_LABELS]);
1523 const void *mask = cda[CTA_LABELS_MASK];
1524
1525 if (len & (sizeof(u32)-1)) /* must be multiple of u32 */
1526 return -EINVAL;
1527
1528 if (mask) {
1529 if (nla_len(cda[CTA_LABELS_MASK]) == 0 ||
1530 nla_len(cda[CTA_LABELS_MASK]) != len)
1531 return -EINVAL;
1532 mask = nla_data(cda[CTA_LABELS_MASK]);
1533 }
1534
1535 len /= sizeof(u32);
1536
1537 return nf_connlabels_replace(ct, nla_data(cda[CTA_LABELS]), mask, len);
1538 #else
1539 return -EOPNOTSUPP;
1540 #endif
1541 }
1542
1543 static int
1544 ctnetlink_change_conntrack(struct nf_conn *ct,
1545 const struct nlattr * const cda[])
1546 {
1547 int err;
1548
1549 /* only allow NAT changes and master assignation for new conntracks */
1550 if (cda[CTA_NAT_SRC] || cda[CTA_NAT_DST] || cda[CTA_TUPLE_MASTER])
1551 return -EOPNOTSUPP;
1552
1553 if (cda[CTA_HELP]) {
1554 err = ctnetlink_change_helper(ct, cda);
1555 if (err < 0)
1556 return err;
1557 }
1558
1559 if (cda[CTA_TIMEOUT]) {
1560 err = ctnetlink_change_timeout(ct, cda);
1561 if (err < 0)
1562 return err;
1563 }
1564
1565 if (cda[CTA_STATUS]) {
1566 err = ctnetlink_change_status(ct, cda);
1567 if (err < 0)
1568 return err;
1569 }
1570
1571 if (cda[CTA_PROTOINFO]) {
1572 err = ctnetlink_change_protoinfo(ct, cda);
1573 if (err < 0)
1574 return err;
1575 }
1576
1577 #if defined(CONFIG_NF_CONNTRACK_MARK)
1578 if (cda[CTA_MARK])
1579 ct->mark = ntohl(nla_get_be32(cda[CTA_MARK]));
1580 #endif
1581
1582 if (cda[CTA_SEQ_ADJ_ORIG] || cda[CTA_SEQ_ADJ_REPLY]) {
1583 err = ctnetlink_change_seq_adj(ct, cda);
1584 if (err < 0)
1585 return err;
1586 }
1587
1588 if (cda[CTA_LABELS]) {
1589 err = ctnetlink_attach_labels(ct, cda);
1590 if (err < 0)
1591 return err;
1592 }
1593
1594 return 0;
1595 }
1596
1597 static struct nf_conn *
1598 ctnetlink_create_conntrack(struct net *net, u16 zone,
1599 const struct nlattr * const cda[],
1600 struct nf_conntrack_tuple *otuple,
1601 struct nf_conntrack_tuple *rtuple,
1602 u8 u3)
1603 {
1604 struct nf_conn *ct;
1605 int err = -EINVAL;
1606 struct nf_conntrack_helper *helper;
1607 struct nf_conn_tstamp *tstamp;
1608
1609 ct = nf_conntrack_alloc(net, zone, otuple, rtuple, GFP_ATOMIC);
1610 if (IS_ERR(ct))
1611 return ERR_PTR(-ENOMEM);
1612
1613 if (!cda[CTA_TIMEOUT])
1614 goto err1;
1615 ct->timeout.expires = ntohl(nla_get_be32(cda[CTA_TIMEOUT]));
1616
1617 ct->timeout.expires = jiffies + ct->timeout.expires * HZ;
1618
1619 rcu_read_lock();
1620 if (cda[CTA_HELP]) {
1621 char *helpname = NULL;
1622 struct nlattr *helpinfo = NULL;
1623
1624 err = ctnetlink_parse_help(cda[CTA_HELP], &helpname, &helpinfo);
1625 if (err < 0)
1626 goto err2;
1627
1628 helper = __nf_conntrack_helper_find(helpname, nf_ct_l3num(ct),
1629 nf_ct_protonum(ct));
1630 if (helper == NULL) {
1631 rcu_read_unlock();
1632 #ifdef CONFIG_MODULES
1633 if (request_module("nfct-helper-%s", helpname) < 0) {
1634 err = -EOPNOTSUPP;
1635 goto err1;
1636 }
1637
1638 rcu_read_lock();
1639 helper = __nf_conntrack_helper_find(helpname,
1640 nf_ct_l3num(ct),
1641 nf_ct_protonum(ct));
1642 if (helper) {
1643 err = -EAGAIN;
1644 goto err2;
1645 }
1646 rcu_read_unlock();
1647 #endif
1648 err = -EOPNOTSUPP;
1649 goto err1;
1650 } else {
1651 struct nf_conn_help *help;
1652
1653 help = nf_ct_helper_ext_add(ct, helper, GFP_ATOMIC);
1654 if (help == NULL) {
1655 err = -ENOMEM;
1656 goto err2;
1657 }
1658 /* set private helper data if allowed. */
1659 if (helper->from_nlattr)
1660 helper->from_nlattr(helpinfo, ct);
1661
1662 /* not in hash table yet so not strictly necessary */
1663 RCU_INIT_POINTER(help->helper, helper);
1664 }
1665 } else {
1666 /* try an implicit helper assignation */
1667 err = __nf_ct_try_assign_helper(ct, NULL, GFP_ATOMIC);
1668 if (err < 0)
1669 goto err2;
1670 }
1671
1672 if (cda[CTA_NAT_SRC] || cda[CTA_NAT_DST]) {
1673 err = ctnetlink_change_nat(ct, cda);
1674 if (err < 0)
1675 goto err2;
1676 }
1677
1678 nf_ct_acct_ext_add(ct, GFP_ATOMIC);
1679 nf_ct_tstamp_ext_add(ct, GFP_ATOMIC);
1680 nf_ct_ecache_ext_add(ct, 0, 0, GFP_ATOMIC);
1681 nf_ct_labels_ext_add(ct);
1682
1683 /* we must add conntrack extensions before confirmation. */
1684 ct->status |= IPS_CONFIRMED;
1685
1686 if (cda[CTA_STATUS]) {
1687 err = ctnetlink_change_status(ct, cda);
1688 if (err < 0)
1689 goto err2;
1690 }
1691
1692 if (cda[CTA_SEQ_ADJ_ORIG] || cda[CTA_SEQ_ADJ_REPLY]) {
1693 err = ctnetlink_change_seq_adj(ct, cda);
1694 if (err < 0)
1695 goto err2;
1696 }
1697
1698 memset(&ct->proto, 0, sizeof(ct->proto));
1699 if (cda[CTA_PROTOINFO]) {
1700 err = ctnetlink_change_protoinfo(ct, cda);
1701 if (err < 0)
1702 goto err2;
1703 }
1704
1705 #if defined(CONFIG_NF_CONNTRACK_MARK)
1706 if (cda[CTA_MARK])
1707 ct->mark = ntohl(nla_get_be32(cda[CTA_MARK]));
1708 #endif
1709
1710 /* setup master conntrack: this is a confirmed expectation */
1711 if (cda[CTA_TUPLE_MASTER]) {
1712 struct nf_conntrack_tuple master;
1713 struct nf_conntrack_tuple_hash *master_h;
1714 struct nf_conn *master_ct;
1715
1716 err = ctnetlink_parse_tuple(cda, &master, CTA_TUPLE_MASTER, u3);
1717 if (err < 0)
1718 goto err2;
1719
1720 master_h = nf_conntrack_find_get(net, zone, &master);
1721 if (master_h == NULL) {
1722 err = -ENOENT;
1723 goto err2;
1724 }
1725 master_ct = nf_ct_tuplehash_to_ctrack(master_h);
1726 __set_bit(IPS_EXPECTED_BIT, &ct->status);
1727 ct->master = master_ct;
1728 }
1729 tstamp = nf_conn_tstamp_find(ct);
1730 if (tstamp)
1731 tstamp->start = ktime_to_ns(ktime_get_real());
1732
1733 err = nf_conntrack_hash_check_insert(ct);
1734 if (err < 0)
1735 goto err2;
1736
1737 rcu_read_unlock();
1738
1739 return ct;
1740
1741 err2:
1742 rcu_read_unlock();
1743 err1:
1744 nf_conntrack_free(ct);
1745 return ERR_PTR(err);
1746 }
1747
1748 static int
1749 ctnetlink_new_conntrack(struct sock *ctnl, struct sk_buff *skb,
1750 const struct nlmsghdr *nlh,
1751 const struct nlattr * const cda[])
1752 {
1753 struct net *net = sock_net(ctnl);
1754 struct nf_conntrack_tuple otuple, rtuple;
1755 struct nf_conntrack_tuple_hash *h = NULL;
1756 struct nfgenmsg *nfmsg = nlmsg_data(nlh);
1757 struct nf_conn *ct;
1758 u_int8_t u3 = nfmsg->nfgen_family;
1759 u16 zone;
1760 int err;
1761
1762 err = ctnetlink_parse_zone(cda[CTA_ZONE], &zone);
1763 if (err < 0)
1764 return err;
1765
1766 if (cda[CTA_TUPLE_ORIG]) {
1767 err = ctnetlink_parse_tuple(cda, &otuple, CTA_TUPLE_ORIG, u3);
1768 if (err < 0)
1769 return err;
1770 }
1771
1772 if (cda[CTA_TUPLE_REPLY]) {
1773 err = ctnetlink_parse_tuple(cda, &rtuple, CTA_TUPLE_REPLY, u3);
1774 if (err < 0)
1775 return err;
1776 }
1777
1778 if (cda[CTA_TUPLE_ORIG])
1779 h = nf_conntrack_find_get(net, zone, &otuple);
1780 else if (cda[CTA_TUPLE_REPLY])
1781 h = nf_conntrack_find_get(net, zone, &rtuple);
1782
1783 if (h == NULL) {
1784 err = -ENOENT;
1785 if (nlh->nlmsg_flags & NLM_F_CREATE) {
1786 enum ip_conntrack_events events;
1787
1788 if (!cda[CTA_TUPLE_ORIG] || !cda[CTA_TUPLE_REPLY])
1789 return -EINVAL;
1790
1791 ct = ctnetlink_create_conntrack(net, zone, cda, &otuple,
1792 &rtuple, u3);
1793 if (IS_ERR(ct))
1794 return PTR_ERR(ct);
1795
1796 err = 0;
1797 if (test_bit(IPS_EXPECTED_BIT, &ct->status))
1798 events = IPCT_RELATED;
1799 else
1800 events = IPCT_NEW;
1801
1802 if (cda[CTA_LABELS] &&
1803 ctnetlink_attach_labels(ct, cda) == 0)
1804 events |= (1 << IPCT_LABEL);
1805
1806 nf_conntrack_eventmask_report((1 << IPCT_REPLY) |
1807 (1 << IPCT_ASSURED) |
1808 (1 << IPCT_HELPER) |
1809 (1 << IPCT_PROTOINFO) |
1810 (1 << IPCT_SEQADJ) |
1811 (1 << IPCT_MARK) | events,
1812 ct, NETLINK_CB(skb).portid,
1813 nlmsg_report(nlh));
1814 nf_ct_put(ct);
1815 }
1816
1817 return err;
1818 }
1819 /* implicit 'else' */
1820
1821 err = -EEXIST;
1822 ct = nf_ct_tuplehash_to_ctrack(h);
1823 if (!(nlh->nlmsg_flags & NLM_F_EXCL)) {
1824 spin_lock_bh(&nf_conntrack_lock);
1825 err = ctnetlink_change_conntrack(ct, cda);
1826 spin_unlock_bh(&nf_conntrack_lock);
1827 if (err == 0) {
1828 nf_conntrack_eventmask_report((1 << IPCT_REPLY) |
1829 (1 << IPCT_ASSURED) |
1830 (1 << IPCT_HELPER) |
1831 (1 << IPCT_LABEL) |
1832 (1 << IPCT_PROTOINFO) |
1833 (1 << IPCT_SEQADJ) |
1834 (1 << IPCT_MARK),
1835 ct, NETLINK_CB(skb).portid,
1836 nlmsg_report(nlh));
1837 }
1838 }
1839
1840 nf_ct_put(ct);
1841 return err;
1842 }
1843
1844 static int
1845 ctnetlink_ct_stat_cpu_fill_info(struct sk_buff *skb, u32 portid, u32 seq,
1846 __u16 cpu, const struct ip_conntrack_stat *st)
1847 {
1848 struct nlmsghdr *nlh;
1849 struct nfgenmsg *nfmsg;
1850 unsigned int flags = portid ? NLM_F_MULTI : 0, event;
1851
1852 event = (NFNL_SUBSYS_CTNETLINK << 8 | IPCTNL_MSG_CT_GET_STATS_CPU);
1853 nlh = nlmsg_put(skb, portid, seq, event, sizeof(*nfmsg), flags);
1854 if (nlh == NULL)
1855 goto nlmsg_failure;
1856
1857 nfmsg = nlmsg_data(nlh);
1858 nfmsg->nfgen_family = AF_UNSPEC;
1859 nfmsg->version = NFNETLINK_V0;
1860 nfmsg->res_id = htons(cpu);
1861
1862 if (nla_put_be32(skb, CTA_STATS_SEARCHED, htonl(st->searched)) ||
1863 nla_put_be32(skb, CTA_STATS_FOUND, htonl(st->found)) ||
1864 nla_put_be32(skb, CTA_STATS_NEW, htonl(st->new)) ||
1865 nla_put_be32(skb, CTA_STATS_INVALID, htonl(st->invalid)) ||
1866 nla_put_be32(skb, CTA_STATS_IGNORE, htonl(st->ignore)) ||
1867 nla_put_be32(skb, CTA_STATS_DELETE, htonl(st->delete)) ||
1868 nla_put_be32(skb, CTA_STATS_DELETE_LIST, htonl(st->delete_list)) ||
1869 nla_put_be32(skb, CTA_STATS_INSERT, htonl(st->insert)) ||
1870 nla_put_be32(skb, CTA_STATS_INSERT_FAILED,
1871 htonl(st->insert_failed)) ||
1872 nla_put_be32(skb, CTA_STATS_DROP, htonl(st->drop)) ||
1873 nla_put_be32(skb, CTA_STATS_EARLY_DROP, htonl(st->early_drop)) ||
1874 nla_put_be32(skb, CTA_STATS_ERROR, htonl(st->error)) ||
1875 nla_put_be32(skb, CTA_STATS_SEARCH_RESTART,
1876 htonl(st->search_restart)))
1877 goto nla_put_failure;
1878
1879 nlmsg_end(skb, nlh);
1880 return skb->len;
1881
1882 nla_put_failure:
1883 nlmsg_failure:
1884 nlmsg_cancel(skb, nlh);
1885 return -1;
1886 }
1887
1888 static int
1889 ctnetlink_ct_stat_cpu_dump(struct sk_buff *skb, struct netlink_callback *cb)
1890 {
1891 int cpu;
1892 struct net *net = sock_net(skb->sk);
1893
1894 if (cb->args[0] == nr_cpu_ids)
1895 return 0;
1896
1897 for (cpu = cb->args[0]; cpu < nr_cpu_ids; cpu++) {
1898 const struct ip_conntrack_stat *st;
1899
1900 if (!cpu_possible(cpu))
1901 continue;
1902
1903 st = per_cpu_ptr(net->ct.stat, cpu);
1904 if (ctnetlink_ct_stat_cpu_fill_info(skb,
1905 NETLINK_CB(cb->skb).portid,
1906 cb->nlh->nlmsg_seq,
1907 cpu, st) < 0)
1908 break;
1909 }
1910 cb->args[0] = cpu;
1911
1912 return skb->len;
1913 }
1914
1915 static int
1916 ctnetlink_stat_ct_cpu(struct sock *ctnl, struct sk_buff *skb,
1917 const struct nlmsghdr *nlh,
1918 const struct nlattr * const cda[])
1919 {
1920 if (nlh->nlmsg_flags & NLM_F_DUMP) {
1921 struct netlink_dump_control c = {
1922 .dump = ctnetlink_ct_stat_cpu_dump,
1923 };
1924 return netlink_dump_start(ctnl, skb, nlh, &c);
1925 }
1926
1927 return 0;
1928 }
1929
1930 static int
1931 ctnetlink_stat_ct_fill_info(struct sk_buff *skb, u32 portid, u32 seq, u32 type,
1932 struct net *net)
1933 {
1934 struct nlmsghdr *nlh;
1935 struct nfgenmsg *nfmsg;
1936 unsigned int flags = portid ? NLM_F_MULTI : 0, event;
1937 unsigned int nr_conntracks = atomic_read(&net->ct.count);
1938
1939 event = (NFNL_SUBSYS_CTNETLINK << 8 | IPCTNL_MSG_CT_GET_STATS);
1940 nlh = nlmsg_put(skb, portid, seq, event, sizeof(*nfmsg), flags);
1941 if (nlh == NULL)
1942 goto nlmsg_failure;
1943
1944 nfmsg = nlmsg_data(nlh);
1945 nfmsg->nfgen_family = AF_UNSPEC;
1946 nfmsg->version = NFNETLINK_V0;
1947 nfmsg->res_id = 0;
1948
1949 if (nla_put_be32(skb, CTA_STATS_GLOBAL_ENTRIES, htonl(nr_conntracks)))
1950 goto nla_put_failure;
1951
1952 nlmsg_end(skb, nlh);
1953 return skb->len;
1954
1955 nla_put_failure:
1956 nlmsg_failure:
1957 nlmsg_cancel(skb, nlh);
1958 return -1;
1959 }
1960
1961 static int
1962 ctnetlink_stat_ct(struct sock *ctnl, struct sk_buff *skb,
1963 const struct nlmsghdr *nlh,
1964 const struct nlattr * const cda[])
1965 {
1966 struct sk_buff *skb2;
1967 int err;
1968
1969 skb2 = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
1970 if (skb2 == NULL)
1971 return -ENOMEM;
1972
1973 err = ctnetlink_stat_ct_fill_info(skb2, NETLINK_CB(skb).portid,
1974 nlh->nlmsg_seq,
1975 NFNL_MSG_TYPE(nlh->nlmsg_type),
1976 sock_net(skb->sk));
1977 if (err <= 0)
1978 goto free;
1979
1980 err = netlink_unicast(ctnl, skb2, NETLINK_CB(skb).portid, MSG_DONTWAIT);
1981 if (err < 0)
1982 goto out;
1983
1984 return 0;
1985
1986 free:
1987 kfree_skb(skb2);
1988 out:
1989 /* this avoids a loop in nfnetlink. */
1990 return err == -EAGAIN ? -ENOBUFS : err;
1991 }
1992
1993 static const struct nla_policy exp_nla_policy[CTA_EXPECT_MAX+1] = {
1994 [CTA_EXPECT_MASTER] = { .type = NLA_NESTED },
1995 [CTA_EXPECT_TUPLE] = { .type = NLA_NESTED },
1996 [CTA_EXPECT_MASK] = { .type = NLA_NESTED },
1997 [CTA_EXPECT_TIMEOUT] = { .type = NLA_U32 },
1998 [CTA_EXPECT_ID] = { .type = NLA_U32 },
1999 [CTA_EXPECT_HELP_NAME] = { .type = NLA_NUL_STRING,
2000 .len = NF_CT_HELPER_NAME_LEN - 1 },
2001 [CTA_EXPECT_ZONE] = { .type = NLA_U16 },
2002 [CTA_EXPECT_FLAGS] = { .type = NLA_U32 },
2003 [CTA_EXPECT_CLASS] = { .type = NLA_U32 },
2004 [CTA_EXPECT_NAT] = { .type = NLA_NESTED },
2005 [CTA_EXPECT_FN] = { .type = NLA_NUL_STRING },
2006 };
2007
2008 static struct nf_conntrack_expect *
2009 ctnetlink_alloc_expect(const struct nlattr *const cda[], struct nf_conn *ct,
2010 struct nf_conntrack_helper *helper,
2011 struct nf_conntrack_tuple *tuple,
2012 struct nf_conntrack_tuple *mask);
2013
2014 #ifdef CONFIG_NETFILTER_NETLINK_QUEUE_CT
2015 static size_t
2016 ctnetlink_nfqueue_build_size(const struct nf_conn *ct)
2017 {
2018 return 3 * nla_total_size(0) /* CTA_TUPLE_ORIG|REPL|MASTER */
2019 + 3 * nla_total_size(0) /* CTA_TUPLE_IP */
2020 + 3 * nla_total_size(0) /* CTA_TUPLE_PROTO */
2021 + 3 * nla_total_size(sizeof(u_int8_t)) /* CTA_PROTO_NUM */
2022 + nla_total_size(sizeof(u_int32_t)) /* CTA_ID */
2023 + nla_total_size(sizeof(u_int32_t)) /* CTA_STATUS */
2024 + nla_total_size(sizeof(u_int32_t)) /* CTA_TIMEOUT */
2025 + nla_total_size(0) /* CTA_PROTOINFO */
2026 + nla_total_size(0) /* CTA_HELP */
2027 + nla_total_size(NF_CT_HELPER_NAME_LEN) /* CTA_HELP_NAME */
2028 + ctnetlink_secctx_size(ct)
2029 #ifdef CONFIG_NF_NAT_NEEDED
2030 + 2 * nla_total_size(0) /* CTA_NAT_SEQ_ADJ_ORIG|REPL */
2031 + 6 * nla_total_size(sizeof(u_int32_t)) /* CTA_NAT_SEQ_OFFSET */
2032 #endif
2033 #ifdef CONFIG_NF_CONNTRACK_MARK
2034 + nla_total_size(sizeof(u_int32_t)) /* CTA_MARK */
2035 #endif
2036 + ctnetlink_proto_size(ct)
2037 ;
2038 }
2039
2040 static int
2041 ctnetlink_nfqueue_build(struct sk_buff *skb, struct nf_conn *ct)
2042 {
2043 struct nlattr *nest_parms;
2044
2045 rcu_read_lock();
2046 nest_parms = nla_nest_start(skb, CTA_TUPLE_ORIG | NLA_F_NESTED);
2047 if (!nest_parms)
2048 goto nla_put_failure;
2049 if (ctnetlink_dump_tuples(skb, nf_ct_tuple(ct, IP_CT_DIR_ORIGINAL)) < 0)
2050 goto nla_put_failure;
2051 nla_nest_end(skb, nest_parms);
2052
2053 nest_parms = nla_nest_start(skb, CTA_TUPLE_REPLY | NLA_F_NESTED);
2054 if (!nest_parms)
2055 goto nla_put_failure;
2056 if (ctnetlink_dump_tuples(skb, nf_ct_tuple(ct, IP_CT_DIR_REPLY)) < 0)
2057 goto nla_put_failure;
2058 nla_nest_end(skb, nest_parms);
2059
2060 if (nf_ct_zone(ct)) {
2061 if (nla_put_be16(skb, CTA_ZONE, htons(nf_ct_zone(ct))))
2062 goto nla_put_failure;
2063 }
2064
2065 if (ctnetlink_dump_id(skb, ct) < 0)
2066 goto nla_put_failure;
2067
2068 if (ctnetlink_dump_status(skb, ct) < 0)
2069 goto nla_put_failure;
2070
2071 if (ctnetlink_dump_timeout(skb, ct) < 0)
2072 goto nla_put_failure;
2073
2074 if (ctnetlink_dump_protoinfo(skb, ct) < 0)
2075 goto nla_put_failure;
2076
2077 if (ctnetlink_dump_helpinfo(skb, ct) < 0)
2078 goto nla_put_failure;
2079
2080 #ifdef CONFIG_NF_CONNTRACK_SECMARK
2081 if (ct->secmark && ctnetlink_dump_secctx(skb, ct) < 0)
2082 goto nla_put_failure;
2083 #endif
2084 if (ct->master && ctnetlink_dump_master(skb, ct) < 0)
2085 goto nla_put_failure;
2086
2087 if ((ct->status & IPS_SEQ_ADJUST) &&
2088 ctnetlink_dump_ct_seq_adj(skb, ct) < 0)
2089 goto nla_put_failure;
2090
2091 #ifdef CONFIG_NF_CONNTRACK_MARK
2092 if (ct->mark && ctnetlink_dump_mark(skb, ct) < 0)
2093 goto nla_put_failure;
2094 #endif
2095 if (ctnetlink_dump_labels(skb, ct) < 0)
2096 goto nla_put_failure;
2097 rcu_read_unlock();
2098 return 0;
2099
2100 nla_put_failure:
2101 rcu_read_unlock();
2102 return -ENOSPC;
2103 }
2104
2105 static int
2106 ctnetlink_nfqueue_parse_ct(const struct nlattr *cda[], struct nf_conn *ct)
2107 {
2108 int err;
2109
2110 if (cda[CTA_TIMEOUT]) {
2111 err = ctnetlink_change_timeout(ct, cda);
2112 if (err < 0)
2113 return err;
2114 }
2115 if (cda[CTA_STATUS]) {
2116 err = ctnetlink_change_status(ct, cda);
2117 if (err < 0)
2118 return err;
2119 }
2120 if (cda[CTA_HELP]) {
2121 err = ctnetlink_change_helper(ct, cda);
2122 if (err < 0)
2123 return err;
2124 }
2125 if (cda[CTA_LABELS]) {
2126 err = ctnetlink_attach_labels(ct, cda);
2127 if (err < 0)
2128 return err;
2129 }
2130 #if defined(CONFIG_NF_CONNTRACK_MARK)
2131 if (cda[CTA_MARK]) {
2132 u32 mask = 0, mark, newmark;
2133 if (cda[CTA_MARK_MASK])
2134 mask = ~ntohl(nla_get_be32(cda[CTA_MARK_MASK]));
2135
2136 mark = ntohl(nla_get_be32(cda[CTA_MARK]));
2137 newmark = (ct->mark & mask) ^ mark;
2138 if (newmark != ct->mark)
2139 ct->mark = newmark;
2140 }
2141 #endif
2142 return 0;
2143 }
2144
2145 static int
2146 ctnetlink_nfqueue_parse(const struct nlattr *attr, struct nf_conn *ct)
2147 {
2148 struct nlattr *cda[CTA_MAX+1];
2149 int ret;
2150
2151 ret = nla_parse_nested(cda, CTA_MAX, attr, ct_nla_policy);
2152 if (ret < 0)
2153 return ret;
2154
2155 spin_lock_bh(&nf_conntrack_lock);
2156 ret = ctnetlink_nfqueue_parse_ct((const struct nlattr **)cda, ct);
2157 spin_unlock_bh(&nf_conntrack_lock);
2158
2159 return ret;
2160 }
2161
2162 static int ctnetlink_nfqueue_exp_parse(const struct nlattr * const *cda,
2163 const struct nf_conn *ct,
2164 struct nf_conntrack_tuple *tuple,
2165 struct nf_conntrack_tuple *mask)
2166 {
2167 int err;
2168
2169 err = ctnetlink_parse_tuple(cda, tuple, CTA_EXPECT_TUPLE,
2170 nf_ct_l3num(ct));
2171 if (err < 0)
2172 return err;
2173
2174 return ctnetlink_parse_tuple(cda, mask, CTA_EXPECT_MASK,
2175 nf_ct_l3num(ct));
2176 }
2177
2178 static int
2179 ctnetlink_nfqueue_attach_expect(const struct nlattr *attr, struct nf_conn *ct,
2180 u32 portid, u32 report)
2181 {
2182 struct nlattr *cda[CTA_EXPECT_MAX+1];
2183 struct nf_conntrack_tuple tuple, mask;
2184 struct nf_conntrack_helper *helper = NULL;
2185 struct nf_conntrack_expect *exp;
2186 int err;
2187
2188 err = nla_parse_nested(cda, CTA_EXPECT_MAX, attr, exp_nla_policy);
2189 if (err < 0)
2190 return err;
2191
2192 err = ctnetlink_nfqueue_exp_parse((const struct nlattr * const *)cda,
2193 ct, &tuple, &mask);
2194 if (err < 0)
2195 return err;
2196
2197 if (cda[CTA_EXPECT_HELP_NAME]) {
2198 const char *helpname = nla_data(cda[CTA_EXPECT_HELP_NAME]);
2199
2200 helper = __nf_conntrack_helper_find(helpname, nf_ct_l3num(ct),
2201 nf_ct_protonum(ct));
2202 if (helper == NULL)
2203 return -EOPNOTSUPP;
2204 }
2205
2206 exp = ctnetlink_alloc_expect((const struct nlattr * const *)cda, ct,
2207 helper, &tuple, &mask);
2208 if (IS_ERR(exp))
2209 return PTR_ERR(exp);
2210
2211 err = nf_ct_expect_related_report(exp, portid, report);
2212 if (err < 0) {
2213 nf_ct_expect_put(exp);
2214 return err;
2215 }
2216
2217 return 0;
2218 }
2219
2220 static struct nfq_ct_hook ctnetlink_nfqueue_hook = {
2221 .build_size = ctnetlink_nfqueue_build_size,
2222 .build = ctnetlink_nfqueue_build,
2223 .parse = ctnetlink_nfqueue_parse,
2224 .attach_expect = ctnetlink_nfqueue_attach_expect,
2225 .seq_adjust = nf_ct_tcp_seqadj_set,
2226 };
2227 #endif /* CONFIG_NETFILTER_NETLINK_QUEUE_CT */
2228
2229 /***********************************************************************
2230 * EXPECT
2231 ***********************************************************************/
2232
2233 static inline int
2234 ctnetlink_exp_dump_tuple(struct sk_buff *skb,
2235 const struct nf_conntrack_tuple *tuple,
2236 enum ctattr_expect type)
2237 {
2238 struct nlattr *nest_parms;
2239
2240 nest_parms = nla_nest_start(skb, type | NLA_F_NESTED);
2241 if (!nest_parms)
2242 goto nla_put_failure;
2243 if (ctnetlink_dump_tuples(skb, tuple) < 0)
2244 goto nla_put_failure;
2245 nla_nest_end(skb, nest_parms);
2246
2247 return 0;
2248
2249 nla_put_failure:
2250 return -1;
2251 }
2252
2253 static inline int
2254 ctnetlink_exp_dump_mask(struct sk_buff *skb,
2255 const struct nf_conntrack_tuple *tuple,
2256 const struct nf_conntrack_tuple_mask *mask)
2257 {
2258 int ret;
2259 struct nf_conntrack_l3proto *l3proto;
2260 struct nf_conntrack_l4proto *l4proto;
2261 struct nf_conntrack_tuple m;
2262 struct nlattr *nest_parms;
2263
2264 memset(&m, 0xFF, sizeof(m));
2265 memcpy(&m.src.u3, &mask->src.u3, sizeof(m.src.u3));
2266 m.src.u.all = mask->src.u.all;
2267 m.dst.protonum = tuple->dst.protonum;
2268
2269 nest_parms = nla_nest_start(skb, CTA_EXPECT_MASK | NLA_F_NESTED);
2270 if (!nest_parms)
2271 goto nla_put_failure;
2272
2273 rcu_read_lock();
2274 l3proto = __nf_ct_l3proto_find(tuple->src.l3num);
2275 ret = ctnetlink_dump_tuples_ip(skb, &m, l3proto);
2276 if (ret >= 0) {
2277 l4proto = __nf_ct_l4proto_find(tuple->src.l3num,
2278 tuple->dst.protonum);
2279 ret = ctnetlink_dump_tuples_proto(skb, &m, l4proto);
2280 }
2281 rcu_read_unlock();
2282
2283 if (unlikely(ret < 0))
2284 goto nla_put_failure;
2285
2286 nla_nest_end(skb, nest_parms);
2287
2288 return 0;
2289
2290 nla_put_failure:
2291 return -1;
2292 }
2293
2294 static const union nf_inet_addr any_addr;
2295
2296 static int
2297 ctnetlink_exp_dump_expect(struct sk_buff *skb,
2298 const struct nf_conntrack_expect *exp)
2299 {
2300 struct nf_conn *master = exp->master;
2301 long timeout = ((long)exp->timeout.expires - (long)jiffies) / HZ;
2302 struct nf_conn_help *help;
2303 #ifdef CONFIG_NF_NAT_NEEDED
2304 struct nlattr *nest_parms;
2305 struct nf_conntrack_tuple nat_tuple = {};
2306 #endif
2307 struct nf_ct_helper_expectfn *expfn;
2308
2309 if (timeout < 0)
2310 timeout = 0;
2311
2312 if (ctnetlink_exp_dump_tuple(skb, &exp->tuple, CTA_EXPECT_TUPLE) < 0)
2313 goto nla_put_failure;
2314 if (ctnetlink_exp_dump_mask(skb, &exp->tuple, &exp->mask) < 0)
2315 goto nla_put_failure;
2316 if (ctnetlink_exp_dump_tuple(skb,
2317 &master->tuplehash[IP_CT_DIR_ORIGINAL].tuple,
2318 CTA_EXPECT_MASTER) < 0)
2319 goto nla_put_failure;
2320
2321 #ifdef CONFIG_NF_NAT_NEEDED
2322 if (!nf_inet_addr_cmp(&exp->saved_addr, &any_addr) ||
2323 exp->saved_proto.all) {
2324 nest_parms = nla_nest_start(skb, CTA_EXPECT_NAT | NLA_F_NESTED);
2325 if (!nest_parms)
2326 goto nla_put_failure;
2327
2328 if (nla_put_be32(skb, CTA_EXPECT_NAT_DIR, htonl(exp->dir)))
2329 goto nla_put_failure;
2330
2331 nat_tuple.src.l3num = nf_ct_l3num(master);
2332 nat_tuple.src.u3 = exp->saved_addr;
2333 nat_tuple.dst.protonum = nf_ct_protonum(master);
2334 nat_tuple.src.u = exp->saved_proto;
2335
2336 if (ctnetlink_exp_dump_tuple(skb, &nat_tuple,
2337 CTA_EXPECT_NAT_TUPLE) < 0)
2338 goto nla_put_failure;
2339 nla_nest_end(skb, nest_parms);
2340 }
2341 #endif
2342 if (nla_put_be32(skb, CTA_EXPECT_TIMEOUT, htonl(timeout)) ||
2343 nla_put_be32(skb, CTA_EXPECT_ID, htonl((unsigned long)exp)) ||
2344 nla_put_be32(skb, CTA_EXPECT_FLAGS, htonl(exp->flags)) ||
2345 nla_put_be32(skb, CTA_EXPECT_CLASS, htonl(exp->class)))
2346 goto nla_put_failure;
2347 help = nfct_help(master);
2348 if (help) {
2349 struct nf_conntrack_helper *helper;
2350
2351 helper = rcu_dereference(help->helper);
2352 if (helper &&
2353 nla_put_string(skb, CTA_EXPECT_HELP_NAME, helper->name))
2354 goto nla_put_failure;
2355 }
2356 expfn = nf_ct_helper_expectfn_find_by_symbol(exp->expectfn);
2357 if (expfn != NULL &&
2358 nla_put_string(skb, CTA_EXPECT_FN, expfn->name))
2359 goto nla_put_failure;
2360
2361 return 0;
2362
2363 nla_put_failure:
2364 return -1;
2365 }
2366
2367 static int
2368 ctnetlink_exp_fill_info(struct sk_buff *skb, u32 portid, u32 seq,
2369 int event, const struct nf_conntrack_expect *exp)
2370 {
2371 struct nlmsghdr *nlh;
2372 struct nfgenmsg *nfmsg;
2373 unsigned int flags = portid ? NLM_F_MULTI : 0;
2374
2375 event |= NFNL_SUBSYS_CTNETLINK_EXP << 8;
2376 nlh = nlmsg_put(skb, portid, seq, event, sizeof(*nfmsg), flags);
2377 if (nlh == NULL)
2378 goto nlmsg_failure;
2379
2380 nfmsg = nlmsg_data(nlh);
2381 nfmsg->nfgen_family = exp->tuple.src.l3num;
2382 nfmsg->version = NFNETLINK_V0;
2383 nfmsg->res_id = 0;
2384
2385 if (ctnetlink_exp_dump_expect(skb, exp) < 0)
2386 goto nla_put_failure;
2387
2388 nlmsg_end(skb, nlh);
2389 return skb->len;
2390
2391 nlmsg_failure:
2392 nla_put_failure:
2393 nlmsg_cancel(skb, nlh);
2394 return -1;
2395 }
2396
2397 #ifdef CONFIG_NF_CONNTRACK_EVENTS
2398 static int
2399 ctnetlink_expect_event(unsigned int events, struct nf_exp_event *item)
2400 {
2401 struct nf_conntrack_expect *exp = item->exp;
2402 struct net *net = nf_ct_exp_net(exp);
2403 struct nlmsghdr *nlh;
2404 struct nfgenmsg *nfmsg;
2405 struct sk_buff *skb;
2406 unsigned int type, group;
2407 int flags = 0;
2408
2409 if (events & (1 << IPEXP_DESTROY)) {
2410 type = IPCTNL_MSG_EXP_DELETE;
2411 group = NFNLGRP_CONNTRACK_EXP_DESTROY;
2412 } else if (events & (1 << IPEXP_NEW)) {
2413 type = IPCTNL_MSG_EXP_NEW;
2414 flags = NLM_F_CREATE|NLM_F_EXCL;
2415 group = NFNLGRP_CONNTRACK_EXP_NEW;
2416 } else
2417 return 0;
2418
2419 if (!item->report && !nfnetlink_has_listeners(net, group))
2420 return 0;
2421
2422 skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_ATOMIC);
2423 if (skb == NULL)
2424 goto errout;
2425
2426 type |= NFNL_SUBSYS_CTNETLINK_EXP << 8;
2427 nlh = nlmsg_put(skb, item->portid, 0, type, sizeof(*nfmsg), flags);
2428 if (nlh == NULL)
2429 goto nlmsg_failure;
2430
2431 nfmsg = nlmsg_data(nlh);
2432 nfmsg->nfgen_family = exp->tuple.src.l3num;
2433 nfmsg->version = NFNETLINK_V0;
2434 nfmsg->res_id = 0;
2435
2436 rcu_read_lock();
2437 if (ctnetlink_exp_dump_expect(skb, exp) < 0)
2438 goto nla_put_failure;
2439 rcu_read_unlock();
2440
2441 nlmsg_end(skb, nlh);
2442 nfnetlink_send(skb, net, item->portid, group, item->report, GFP_ATOMIC);
2443 return 0;
2444
2445 nla_put_failure:
2446 rcu_read_unlock();
2447 nlmsg_cancel(skb, nlh);
2448 nlmsg_failure:
2449 kfree_skb(skb);
2450 errout:
2451 nfnetlink_set_err(net, 0, 0, -ENOBUFS);
2452 return 0;
2453 }
2454 #endif
2455 static int ctnetlink_exp_done(struct netlink_callback *cb)
2456 {
2457 if (cb->args[1])
2458 nf_ct_expect_put((struct nf_conntrack_expect *)cb->args[1]);
2459 return 0;
2460 }
2461
2462 static int
2463 ctnetlink_exp_dump_table(struct sk_buff *skb, struct netlink_callback *cb)
2464 {
2465 struct net *net = sock_net(skb->sk);
2466 struct nf_conntrack_expect *exp, *last;
2467 struct nfgenmsg *nfmsg = nlmsg_data(cb->nlh);
2468 u_int8_t l3proto = nfmsg->nfgen_family;
2469
2470 rcu_read_lock();
2471 last = (struct nf_conntrack_expect *)cb->args[1];
2472 for (; cb->args[0] < nf_ct_expect_hsize; cb->args[0]++) {
2473 restart:
2474 hlist_for_each_entry(exp, &net->ct.expect_hash[cb->args[0]],
2475 hnode) {
2476 if (l3proto && exp->tuple.src.l3num != l3proto)
2477 continue;
2478 if (cb->args[1]) {
2479 if (exp != last)
2480 continue;
2481 cb->args[1] = 0;
2482 }
2483 if (ctnetlink_exp_fill_info(skb,
2484 NETLINK_CB(cb->skb).portid,
2485 cb->nlh->nlmsg_seq,
2486 IPCTNL_MSG_EXP_NEW,
2487 exp) < 0) {
2488 if (!atomic_inc_not_zero(&exp->use))
2489 continue;
2490 cb->args[1] = (unsigned long)exp;
2491 goto out;
2492 }
2493 }
2494 if (cb->args[1]) {
2495 cb->args[1] = 0;
2496 goto restart;
2497 }
2498 }
2499 out:
2500 rcu_read_unlock();
2501 if (last)
2502 nf_ct_expect_put(last);
2503
2504 return skb->len;
2505 }
2506
2507 static int
2508 ctnetlink_exp_ct_dump_table(struct sk_buff *skb, struct netlink_callback *cb)
2509 {
2510 struct nf_conntrack_expect *exp, *last;
2511 struct nfgenmsg *nfmsg = nlmsg_data(cb->nlh);
2512 struct nf_conn *ct = cb->data;
2513 struct nf_conn_help *help = nfct_help(ct);
2514 u_int8_t l3proto = nfmsg->nfgen_family;
2515
2516 if (cb->args[0])
2517 return 0;
2518
2519 rcu_read_lock();
2520 last = (struct nf_conntrack_expect *)cb->args[1];
2521 restart:
2522 hlist_for_each_entry(exp, &help->expectations, lnode) {
2523 if (l3proto && exp->tuple.src.l3num != l3proto)
2524 continue;
2525 if (cb->args[1]) {
2526 if (exp != last)
2527 continue;
2528 cb->args[1] = 0;
2529 }
2530 if (ctnetlink_exp_fill_info(skb, NETLINK_CB(cb->skb).portid,
2531 cb->nlh->nlmsg_seq,
2532 IPCTNL_MSG_EXP_NEW,
2533 exp) < 0) {
2534 if (!atomic_inc_not_zero(&exp->use))
2535 continue;
2536 cb->args[1] = (unsigned long)exp;
2537 goto out;
2538 }
2539 }
2540 if (cb->args[1]) {
2541 cb->args[1] = 0;
2542 goto restart;
2543 }
2544 cb->args[0] = 1;
2545 out:
2546 rcu_read_unlock();
2547 if (last)
2548 nf_ct_expect_put(last);
2549
2550 return skb->len;
2551 }
2552
2553 static int ctnetlink_dump_exp_ct(struct sock *ctnl, struct sk_buff *skb,
2554 const struct nlmsghdr *nlh,
2555 const struct nlattr * const cda[])
2556 {
2557 int err;
2558 struct net *net = sock_net(ctnl);
2559 struct nfgenmsg *nfmsg = nlmsg_data(nlh);
2560 u_int8_t u3 = nfmsg->nfgen_family;
2561 struct nf_conntrack_tuple tuple;
2562 struct nf_conntrack_tuple_hash *h;
2563 struct nf_conn *ct;
2564 u16 zone = 0;
2565 struct netlink_dump_control c = {
2566 .dump = ctnetlink_exp_ct_dump_table,
2567 .done = ctnetlink_exp_done,
2568 };
2569
2570 err = ctnetlink_parse_tuple(cda, &tuple, CTA_EXPECT_MASTER, u3);
2571 if (err < 0)
2572 return err;
2573
2574 if (cda[CTA_EXPECT_ZONE]) {
2575 err = ctnetlink_parse_zone(cda[CTA_EXPECT_ZONE], &zone);
2576 if (err < 0)
2577 return err;
2578 }
2579
2580 h = nf_conntrack_find_get(net, zone, &tuple);
2581 if (!h)
2582 return -ENOENT;
2583
2584 ct = nf_ct_tuplehash_to_ctrack(h);
2585 c.data = ct;
2586
2587 err = netlink_dump_start(ctnl, skb, nlh, &c);
2588 nf_ct_put(ct);
2589
2590 return err;
2591 }
2592
2593 static int
2594 ctnetlink_get_expect(struct sock *ctnl, struct sk_buff *skb,
2595 const struct nlmsghdr *nlh,
2596 const struct nlattr * const cda[])
2597 {
2598 struct net *net = sock_net(ctnl);
2599 struct nf_conntrack_tuple tuple;
2600 struct nf_conntrack_expect *exp;
2601 struct sk_buff *skb2;
2602 struct nfgenmsg *nfmsg = nlmsg_data(nlh);
2603 u_int8_t u3 = nfmsg->nfgen_family;
2604 u16 zone;
2605 int err;
2606
2607 if (nlh->nlmsg_flags & NLM_F_DUMP) {
2608 if (cda[CTA_EXPECT_MASTER])
2609 return ctnetlink_dump_exp_ct(ctnl, skb, nlh, cda);
2610 else {
2611 struct netlink_dump_control c = {
2612 .dump = ctnetlink_exp_dump_table,
2613 .done = ctnetlink_exp_done,
2614 };
2615 return netlink_dump_start(ctnl, skb, nlh, &c);
2616 }
2617 }
2618
2619 err = ctnetlink_parse_zone(cda[CTA_EXPECT_ZONE], &zone);
2620 if (err < 0)
2621 return err;
2622
2623 if (cda[CTA_EXPECT_TUPLE])
2624 err = ctnetlink_parse_tuple(cda, &tuple, CTA_EXPECT_TUPLE, u3);
2625 else if (cda[CTA_EXPECT_MASTER])
2626 err = ctnetlink_parse_tuple(cda, &tuple, CTA_EXPECT_MASTER, u3);
2627 else
2628 return -EINVAL;
2629
2630 if (err < 0)
2631 return err;
2632
2633 exp = nf_ct_expect_find_get(net, zone, &tuple);
2634 if (!exp)
2635 return -ENOENT;
2636
2637 if (cda[CTA_EXPECT_ID]) {
2638 __be32 id = nla_get_be32(cda[CTA_EXPECT_ID]);
2639 if (ntohl(id) != (u32)(unsigned long)exp) {
2640 nf_ct_expect_put(exp);
2641 return -ENOENT;
2642 }
2643 }
2644
2645 err = -ENOMEM;
2646 skb2 = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
2647 if (skb2 == NULL) {
2648 nf_ct_expect_put(exp);
2649 goto out;
2650 }
2651
2652 rcu_read_lock();
2653 err = ctnetlink_exp_fill_info(skb2, NETLINK_CB(skb).portid,
2654 nlh->nlmsg_seq, IPCTNL_MSG_EXP_NEW, exp);
2655 rcu_read_unlock();
2656 nf_ct_expect_put(exp);
2657 if (err <= 0)
2658 goto free;
2659
2660 err = netlink_unicast(ctnl, skb2, NETLINK_CB(skb).portid, MSG_DONTWAIT);
2661 if (err < 0)
2662 goto out;
2663
2664 return 0;
2665
2666 free:
2667 kfree_skb(skb2);
2668 out:
2669 /* this avoids a loop in nfnetlink. */
2670 return err == -EAGAIN ? -ENOBUFS : err;
2671 }
2672
2673 static int
2674 ctnetlink_del_expect(struct sock *ctnl, struct sk_buff *skb,
2675 const struct nlmsghdr *nlh,
2676 const struct nlattr * const cda[])
2677 {
2678 struct net *net = sock_net(ctnl);
2679 struct nf_conntrack_expect *exp;
2680 struct nf_conntrack_tuple tuple;
2681 struct nfgenmsg *nfmsg = nlmsg_data(nlh);
2682 struct hlist_node *next;
2683 u_int8_t u3 = nfmsg->nfgen_family;
2684 unsigned int i;
2685 u16 zone;
2686 int err;
2687
2688 if (cda[CTA_EXPECT_TUPLE]) {
2689 /* delete a single expect by tuple */
2690 err = ctnetlink_parse_zone(cda[CTA_EXPECT_ZONE], &zone);
2691 if (err < 0)
2692 return err;
2693
2694 err = ctnetlink_parse_tuple(cda, &tuple, CTA_EXPECT_TUPLE, u3);
2695 if (err < 0)
2696 return err;
2697
2698 /* bump usage count to 2 */
2699 exp = nf_ct_expect_find_get(net, zone, &tuple);
2700 if (!exp)
2701 return -ENOENT;
2702
2703 if (cda[CTA_EXPECT_ID]) {
2704 __be32 id = nla_get_be32(cda[CTA_EXPECT_ID]);
2705 if (ntohl(id) != (u32)(unsigned long)exp) {
2706 nf_ct_expect_put(exp);
2707 return -ENOENT;
2708 }
2709 }
2710
2711 /* after list removal, usage count == 1 */
2712 spin_lock_bh(&nf_conntrack_lock);
2713 if (del_timer(&exp->timeout)) {
2714 nf_ct_unlink_expect_report(exp, NETLINK_CB(skb).portid,
2715 nlmsg_report(nlh));
2716 nf_ct_expect_put(exp);
2717 }
2718 spin_unlock_bh(&nf_conntrack_lock);
2719 /* have to put what we 'get' above.
2720 * after this line usage count == 0 */
2721 nf_ct_expect_put(exp);
2722 } else if (cda[CTA_EXPECT_HELP_NAME]) {
2723 char *name = nla_data(cda[CTA_EXPECT_HELP_NAME]);
2724 struct nf_conn_help *m_help;
2725
2726 /* delete all expectations for this helper */
2727 spin_lock_bh(&nf_conntrack_lock);
2728 for (i = 0; i < nf_ct_expect_hsize; i++) {
2729 hlist_for_each_entry_safe(exp, next,
2730 &net->ct.expect_hash[i],
2731 hnode) {
2732 m_help = nfct_help(exp->master);
2733 if (!strcmp(m_help->helper->name, name) &&
2734 del_timer(&exp->timeout)) {
2735 nf_ct_unlink_expect_report(exp,
2736 NETLINK_CB(skb).portid,
2737 nlmsg_report(nlh));
2738 nf_ct_expect_put(exp);
2739 }
2740 }
2741 }
2742 spin_unlock_bh(&nf_conntrack_lock);
2743 } else {
2744 /* This basically means we have to flush everything*/
2745 spin_lock_bh(&nf_conntrack_lock);
2746 for (i = 0; i < nf_ct_expect_hsize; i++) {
2747 hlist_for_each_entry_safe(exp, next,
2748 &net->ct.expect_hash[i],
2749 hnode) {
2750 if (del_timer(&exp->timeout)) {
2751 nf_ct_unlink_expect_report(exp,
2752 NETLINK_CB(skb).portid,
2753 nlmsg_report(nlh));
2754 nf_ct_expect_put(exp);
2755 }
2756 }
2757 }
2758 spin_unlock_bh(&nf_conntrack_lock);
2759 }
2760
2761 return 0;
2762 }
2763 static int
2764 ctnetlink_change_expect(struct nf_conntrack_expect *x,
2765 const struct nlattr * const cda[])
2766 {
2767 if (cda[CTA_EXPECT_TIMEOUT]) {
2768 if (!del_timer(&x->timeout))
2769 return -ETIME;
2770
2771 x->timeout.expires = jiffies +
2772 ntohl(nla_get_be32(cda[CTA_EXPECT_TIMEOUT])) * HZ;
2773 add_timer(&x->timeout);
2774 }
2775 return 0;
2776 }
2777
2778 static const struct nla_policy exp_nat_nla_policy[CTA_EXPECT_NAT_MAX+1] = {
2779 [CTA_EXPECT_NAT_DIR] = { .type = NLA_U32 },
2780 [CTA_EXPECT_NAT_TUPLE] = { .type = NLA_NESTED },
2781 };
2782
2783 static int
2784 ctnetlink_parse_expect_nat(const struct nlattr *attr,
2785 struct nf_conntrack_expect *exp,
2786 u_int8_t u3)
2787 {
2788 #ifdef CONFIG_NF_NAT_NEEDED
2789 struct nlattr *tb[CTA_EXPECT_NAT_MAX+1];
2790 struct nf_conntrack_tuple nat_tuple = {};
2791 int err;
2792
2793 err = nla_parse_nested(tb, CTA_EXPECT_NAT_MAX, attr, exp_nat_nla_policy);
2794 if (err < 0)
2795 return err;
2796
2797 if (!tb[CTA_EXPECT_NAT_DIR] || !tb[CTA_EXPECT_NAT_TUPLE])
2798 return -EINVAL;
2799
2800 err = ctnetlink_parse_tuple((const struct nlattr * const *)tb,
2801 &nat_tuple, CTA_EXPECT_NAT_TUPLE, u3);
2802 if (err < 0)
2803 return err;
2804
2805 exp->saved_addr = nat_tuple.src.u3;
2806 exp->saved_proto = nat_tuple.src.u;
2807 exp->dir = ntohl(nla_get_be32(tb[CTA_EXPECT_NAT_DIR]));
2808
2809 return 0;
2810 #else
2811 return -EOPNOTSUPP;
2812 #endif
2813 }
2814
2815 static struct nf_conntrack_expect *
2816 ctnetlink_alloc_expect(const struct nlattr * const cda[], struct nf_conn *ct,
2817 struct nf_conntrack_helper *helper,
2818 struct nf_conntrack_tuple *tuple,
2819 struct nf_conntrack_tuple *mask)
2820 {
2821 u_int32_t class = 0;
2822 struct nf_conntrack_expect *exp;
2823 struct nf_conn_help *help;
2824 int err;
2825
2826 if (cda[CTA_EXPECT_CLASS] && helper) {
2827 class = ntohl(nla_get_be32(cda[CTA_EXPECT_CLASS]));
2828 if (class > helper->expect_class_max)
2829 return ERR_PTR(-EINVAL);
2830 }
2831 exp = nf_ct_expect_alloc(ct);
2832 if (!exp)
2833 return ERR_PTR(-ENOMEM);
2834
2835 help = nfct_help(ct);
2836 if (!help) {
2837 if (!cda[CTA_EXPECT_TIMEOUT]) {
2838 err = -EINVAL;
2839 goto err_out;
2840 }
2841 exp->timeout.expires =
2842 jiffies + ntohl(nla_get_be32(cda[CTA_EXPECT_TIMEOUT])) * HZ;
2843
2844 exp->flags = NF_CT_EXPECT_USERSPACE;
2845 if (cda[CTA_EXPECT_FLAGS]) {
2846 exp->flags |=
2847 ntohl(nla_get_be32(cda[CTA_EXPECT_FLAGS]));
2848 }
2849 } else {
2850 if (cda[CTA_EXPECT_FLAGS]) {
2851 exp->flags = ntohl(nla_get_be32(cda[CTA_EXPECT_FLAGS]));
2852 exp->flags &= ~NF_CT_EXPECT_USERSPACE;
2853 } else
2854 exp->flags = 0;
2855 }
2856 if (cda[CTA_EXPECT_FN]) {
2857 const char *name = nla_data(cda[CTA_EXPECT_FN]);
2858 struct nf_ct_helper_expectfn *expfn;
2859
2860 expfn = nf_ct_helper_expectfn_find_by_name(name);
2861 if (expfn == NULL) {
2862 err = -EINVAL;
2863 goto err_out;
2864 }
2865 exp->expectfn = expfn->expectfn;
2866 } else
2867 exp->expectfn = NULL;
2868
2869 exp->class = class;
2870 exp->master = ct;
2871 exp->helper = helper;
2872 exp->tuple = *tuple;
2873 exp->mask.src.u3 = mask->src.u3;
2874 exp->mask.src.u.all = mask->src.u.all;
2875
2876 if (cda[CTA_EXPECT_NAT]) {
2877 err = ctnetlink_parse_expect_nat(cda[CTA_EXPECT_NAT],
2878 exp, nf_ct_l3num(ct));
2879 if (err < 0)
2880 goto err_out;
2881 }
2882 return exp;
2883 err_out:
2884 nf_ct_expect_put(exp);
2885 return ERR_PTR(err);
2886 }
2887
2888 static int
2889 ctnetlink_create_expect(struct net *net, u16 zone,
2890 const struct nlattr * const cda[],
2891 u_int8_t u3, u32 portid, int report)
2892 {
2893 struct nf_conntrack_tuple tuple, mask, master_tuple;
2894 struct nf_conntrack_tuple_hash *h = NULL;
2895 struct nf_conntrack_helper *helper = NULL;
2896 struct nf_conntrack_expect *exp;
2897 struct nf_conn *ct;
2898 int err;
2899
2900 /* caller guarantees that those three CTA_EXPECT_* exist */
2901 err = ctnetlink_parse_tuple(cda, &tuple, CTA_EXPECT_TUPLE, u3);
2902 if (err < 0)
2903 return err;
2904 err = ctnetlink_parse_tuple(cda, &mask, CTA_EXPECT_MASK, u3);
2905 if (err < 0)
2906 return err;
2907 err = ctnetlink_parse_tuple(cda, &master_tuple, CTA_EXPECT_MASTER, u3);
2908 if (err < 0)
2909 return err;
2910
2911 /* Look for master conntrack of this expectation */
2912 h = nf_conntrack_find_get(net, zone, &master_tuple);
2913 if (!h)
2914 return -ENOENT;
2915 ct = nf_ct_tuplehash_to_ctrack(h);
2916
2917 if (cda[CTA_EXPECT_HELP_NAME]) {
2918 const char *helpname = nla_data(cda[CTA_EXPECT_HELP_NAME]);
2919
2920 helper = __nf_conntrack_helper_find(helpname, u3,
2921 nf_ct_protonum(ct));
2922 if (helper == NULL) {
2923 #ifdef CONFIG_MODULES
2924 if (request_module("nfct-helper-%s", helpname) < 0) {
2925 err = -EOPNOTSUPP;
2926 goto err_ct;
2927 }
2928 helper = __nf_conntrack_helper_find(helpname, u3,
2929 nf_ct_protonum(ct));
2930 if (helper) {
2931 err = -EAGAIN;
2932 goto err_ct;
2933 }
2934 #endif
2935 err = -EOPNOTSUPP;
2936 goto err_ct;
2937 }
2938 }
2939
2940 exp = ctnetlink_alloc_expect(cda, ct, helper, &tuple, &mask);
2941 if (IS_ERR(exp)) {
2942 err = PTR_ERR(exp);
2943 goto err_ct;
2944 }
2945
2946 err = nf_ct_expect_related_report(exp, portid, report);
2947 if (err < 0)
2948 goto err_exp;
2949
2950 return 0;
2951 err_exp:
2952 nf_ct_expect_put(exp);
2953 err_ct:
2954 nf_ct_put(ct);
2955 return err;
2956 }
2957
2958 static int
2959 ctnetlink_new_expect(struct sock *ctnl, struct sk_buff *skb,
2960 const struct nlmsghdr *nlh,
2961 const struct nlattr * const cda[])
2962 {
2963 struct net *net = sock_net(ctnl);
2964 struct nf_conntrack_tuple tuple;
2965 struct nf_conntrack_expect *exp;
2966 struct nfgenmsg *nfmsg = nlmsg_data(nlh);
2967 u_int8_t u3 = nfmsg->nfgen_family;
2968 u16 zone;
2969 int err;
2970
2971 if (!cda[CTA_EXPECT_TUPLE]
2972 || !cda[CTA_EXPECT_MASK]
2973 || !cda[CTA_EXPECT_MASTER])
2974 return -EINVAL;
2975
2976 err = ctnetlink_parse_zone(cda[CTA_EXPECT_ZONE], &zone);
2977 if (err < 0)
2978 return err;
2979
2980 err = ctnetlink_parse_tuple(cda, &tuple, CTA_EXPECT_TUPLE, u3);
2981 if (err < 0)
2982 return err;
2983
2984 spin_lock_bh(&nf_conntrack_lock);
2985 exp = __nf_ct_expect_find(net, zone, &tuple);
2986
2987 if (!exp) {
2988 spin_unlock_bh(&nf_conntrack_lock);
2989 err = -ENOENT;
2990 if (nlh->nlmsg_flags & NLM_F_CREATE) {
2991 err = ctnetlink_create_expect(net, zone, cda,
2992 u3,
2993 NETLINK_CB(skb).portid,
2994 nlmsg_report(nlh));
2995 }
2996 return err;
2997 }
2998
2999 err = -EEXIST;
3000 if (!(nlh->nlmsg_flags & NLM_F_EXCL))
3001 err = ctnetlink_change_expect(exp, cda);
3002 spin_unlock_bh(&nf_conntrack_lock);
3003
3004 return err;
3005 }
3006
3007 static int
3008 ctnetlink_exp_stat_fill_info(struct sk_buff *skb, u32 portid, u32 seq, int cpu,
3009 const struct ip_conntrack_stat *st)
3010 {
3011 struct nlmsghdr *nlh;
3012 struct nfgenmsg *nfmsg;
3013 unsigned int flags = portid ? NLM_F_MULTI : 0, event;
3014
3015 event = (NFNL_SUBSYS_CTNETLINK << 8 | IPCTNL_MSG_EXP_GET_STATS_CPU);
3016 nlh = nlmsg_put(skb, portid, seq, event, sizeof(*nfmsg), flags);
3017 if (nlh == NULL)
3018 goto nlmsg_failure;
3019
3020 nfmsg = nlmsg_data(nlh);
3021 nfmsg->nfgen_family = AF_UNSPEC;
3022 nfmsg->version = NFNETLINK_V0;
3023 nfmsg->res_id = htons(cpu);
3024
3025 if (nla_put_be32(skb, CTA_STATS_EXP_NEW, htonl(st->expect_new)) ||
3026 nla_put_be32(skb, CTA_STATS_EXP_CREATE, htonl(st->expect_create)) ||
3027 nla_put_be32(skb, CTA_STATS_EXP_DELETE, htonl(st->expect_delete)))
3028 goto nla_put_failure;
3029
3030 nlmsg_end(skb, nlh);
3031 return skb->len;
3032
3033 nla_put_failure:
3034 nlmsg_failure:
3035 nlmsg_cancel(skb, nlh);
3036 return -1;
3037 }
3038
3039 static int
3040 ctnetlink_exp_stat_cpu_dump(struct sk_buff *skb, struct netlink_callback *cb)
3041 {
3042 int cpu;
3043 struct net *net = sock_net(skb->sk);
3044
3045 if (cb->args[0] == nr_cpu_ids)
3046 return 0;
3047
3048 for (cpu = cb->args[0]; cpu < nr_cpu_ids; cpu++) {
3049 const struct ip_conntrack_stat *st;
3050
3051 if (!cpu_possible(cpu))
3052 continue;
3053
3054 st = per_cpu_ptr(net->ct.stat, cpu);
3055 if (ctnetlink_exp_stat_fill_info(skb, NETLINK_CB(cb->skb).portid,
3056 cb->nlh->nlmsg_seq,
3057 cpu, st) < 0)
3058 break;
3059 }
3060 cb->args[0] = cpu;
3061
3062 return skb->len;
3063 }
3064
3065 static int
3066 ctnetlink_stat_exp_cpu(struct sock *ctnl, struct sk_buff *skb,
3067 const struct nlmsghdr *nlh,
3068 const struct nlattr * const cda[])
3069 {
3070 if (nlh->nlmsg_flags & NLM_F_DUMP) {
3071 struct netlink_dump_control c = {
3072 .dump = ctnetlink_exp_stat_cpu_dump,
3073 };
3074 return netlink_dump_start(ctnl, skb, nlh, &c);
3075 }
3076
3077 return 0;
3078 }
3079
3080 #ifdef CONFIG_NF_CONNTRACK_EVENTS
3081 static struct nf_ct_event_notifier ctnl_notifier = {
3082 .fcn = ctnetlink_conntrack_event,
3083 };
3084
3085 static struct nf_exp_event_notifier ctnl_notifier_exp = {
3086 .fcn = ctnetlink_expect_event,
3087 };
3088 #endif
3089
3090 static const struct nfnl_callback ctnl_cb[IPCTNL_MSG_MAX] = {
3091 [IPCTNL_MSG_CT_NEW] = { .call = ctnetlink_new_conntrack,
3092 .attr_count = CTA_MAX,
3093 .policy = ct_nla_policy },
3094 [IPCTNL_MSG_CT_GET] = { .call = ctnetlink_get_conntrack,
3095 .attr_count = CTA_MAX,
3096 .policy = ct_nla_policy },
3097 [IPCTNL_MSG_CT_DELETE] = { .call = ctnetlink_del_conntrack,
3098 .attr_count = CTA_MAX,
3099 .policy = ct_nla_policy },
3100 [IPCTNL_MSG_CT_GET_CTRZERO] = { .call = ctnetlink_get_conntrack,
3101 .attr_count = CTA_MAX,
3102 .policy = ct_nla_policy },
3103 [IPCTNL_MSG_CT_GET_STATS_CPU] = { .call = ctnetlink_stat_ct_cpu },
3104 [IPCTNL_MSG_CT_GET_STATS] = { .call = ctnetlink_stat_ct },
3105 [IPCTNL_MSG_CT_GET_DYING] = { .call = ctnetlink_get_ct_dying },
3106 [IPCTNL_MSG_CT_GET_UNCONFIRMED] = { .call = ctnetlink_get_ct_unconfirmed },
3107 };
3108
3109 static const struct nfnl_callback ctnl_exp_cb[IPCTNL_MSG_EXP_MAX] = {
3110 [IPCTNL_MSG_EXP_GET] = { .call = ctnetlink_get_expect,
3111 .attr_count = CTA_EXPECT_MAX,
3112 .policy = exp_nla_policy },
3113 [IPCTNL_MSG_EXP_NEW] = { .call = ctnetlink_new_expect,
3114 .attr_count = CTA_EXPECT_MAX,
3115 .policy = exp_nla_policy },
3116 [IPCTNL_MSG_EXP_DELETE] = { .call = ctnetlink_del_expect,
3117 .attr_count = CTA_EXPECT_MAX,
3118 .policy = exp_nla_policy },
3119 [IPCTNL_MSG_EXP_GET_STATS_CPU] = { .call = ctnetlink_stat_exp_cpu },
3120 };
3121
3122 static const struct nfnetlink_subsystem ctnl_subsys = {
3123 .name = "conntrack",
3124 .subsys_id = NFNL_SUBSYS_CTNETLINK,
3125 .cb_count = IPCTNL_MSG_MAX,
3126 .cb = ctnl_cb,
3127 };
3128
3129 static const struct nfnetlink_subsystem ctnl_exp_subsys = {
3130 .name = "conntrack_expect",
3131 .subsys_id = NFNL_SUBSYS_CTNETLINK_EXP,
3132 .cb_count = IPCTNL_MSG_EXP_MAX,
3133 .cb = ctnl_exp_cb,
3134 };
3135
3136 MODULE_ALIAS("ip_conntrack_netlink");
3137 MODULE_ALIAS_NFNL_SUBSYS(NFNL_SUBSYS_CTNETLINK);
3138 MODULE_ALIAS_NFNL_SUBSYS(NFNL_SUBSYS_CTNETLINK_EXP);
3139
3140 static int __net_init ctnetlink_net_init(struct net *net)
3141 {
3142 #ifdef CONFIG_NF_CONNTRACK_EVENTS
3143 int ret;
3144
3145 ret = nf_conntrack_register_notifier(net, &ctnl_notifier);
3146 if (ret < 0) {
3147 pr_err("ctnetlink_init: cannot register notifier.\n");
3148 goto err_out;
3149 }
3150
3151 ret = nf_ct_expect_register_notifier(net, &ctnl_notifier_exp);
3152 if (ret < 0) {
3153 pr_err("ctnetlink_init: cannot expect register notifier.\n");
3154 goto err_unreg_notifier;
3155 }
3156 #endif
3157 return 0;
3158
3159 #ifdef CONFIG_NF_CONNTRACK_EVENTS
3160 err_unreg_notifier:
3161 nf_conntrack_unregister_notifier(net, &ctnl_notifier);
3162 err_out:
3163 return ret;
3164 #endif
3165 }
3166
3167 static void ctnetlink_net_exit(struct net *net)
3168 {
3169 #ifdef CONFIG_NF_CONNTRACK_EVENTS
3170 nf_ct_expect_unregister_notifier(net, &ctnl_notifier_exp);
3171 nf_conntrack_unregister_notifier(net, &ctnl_notifier);
3172 #endif
3173 }
3174
3175 static void __net_exit ctnetlink_net_exit_batch(struct list_head *net_exit_list)
3176 {
3177 struct net *net;
3178
3179 list_for_each_entry(net, net_exit_list, exit_list)
3180 ctnetlink_net_exit(net);
3181 }
3182
3183 static struct pernet_operations ctnetlink_net_ops = {
3184 .init = ctnetlink_net_init,
3185 .exit_batch = ctnetlink_net_exit_batch,
3186 };
3187
3188 static int __init ctnetlink_init(void)
3189 {
3190 int ret;
3191
3192 pr_info("ctnetlink v%s: registering with nfnetlink.\n", version);
3193 ret = nfnetlink_subsys_register(&ctnl_subsys);
3194 if (ret < 0) {
3195 pr_err("ctnetlink_init: cannot register with nfnetlink.\n");
3196 goto err_out;
3197 }
3198
3199 ret = nfnetlink_subsys_register(&ctnl_exp_subsys);
3200 if (ret < 0) {
3201 pr_err("ctnetlink_init: cannot register exp with nfnetlink.\n");
3202 goto err_unreg_subsys;
3203 }
3204
3205 ret = register_pernet_subsys(&ctnetlink_net_ops);
3206 if (ret < 0) {
3207 pr_err("ctnetlink_init: cannot register pernet operations\n");
3208 goto err_unreg_exp_subsys;
3209 }
3210 #ifdef CONFIG_NETFILTER_NETLINK_QUEUE_CT
3211 /* setup interaction between nf_queue and nf_conntrack_netlink. */
3212 RCU_INIT_POINTER(nfq_ct_hook, &ctnetlink_nfqueue_hook);
3213 #endif
3214 return 0;
3215
3216 err_unreg_exp_subsys:
3217 nfnetlink_subsys_unregister(&ctnl_exp_subsys);
3218 err_unreg_subsys:
3219 nfnetlink_subsys_unregister(&ctnl_subsys);
3220 err_out:
3221 return ret;
3222 }
3223
3224 static void __exit ctnetlink_exit(void)
3225 {
3226 pr_info("ctnetlink: unregistering from nfnetlink.\n");
3227
3228 unregister_pernet_subsys(&ctnetlink_net_ops);
3229 nfnetlink_subsys_unregister(&ctnl_exp_subsys);
3230 nfnetlink_subsys_unregister(&ctnl_subsys);
3231 #ifdef CONFIG_NETFILTER_NETLINK_QUEUE_CT
3232 RCU_INIT_POINTER(nfq_ct_hook, NULL);
3233 #endif
3234 }
3235
3236 module_init(ctnetlink_init);
3237 module_exit(ctnetlink_exit);
This page took 0.111378 seconds and 5 git commands to generate.