Merge tag 'mac80211-for-davem-2016-06-09' of git://git.kernel.org/pub/scm/linux/kerne...
[deliverable/linux.git] / net / ipv4 / netfilter / nf_conntrack_l3proto_ipv4_compat.c
1 /* ip_conntrack proc compat - based on ip_conntrack_standalone.c
2 *
3 * (C) 1999-2001 Paul `Rusty' Russell
4 * (C) 2002-2006 Netfilter Core Team <coreteam@netfilter.org>
5 * (C) 2006-2010 Patrick McHardy <kaber@trash.net>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11 #include <linux/types.h>
12 #include <linux/proc_fs.h>
13 #include <linux/seq_file.h>
14 #include <linux/percpu.h>
15 #include <linux/security.h>
16 #include <net/net_namespace.h>
17
18 #include <linux/netfilter.h>
19 #include <net/netfilter/nf_conntrack_core.h>
20 #include <net/netfilter/nf_conntrack_l3proto.h>
21 #include <net/netfilter/nf_conntrack_l4proto.h>
22 #include <net/netfilter/nf_conntrack_expect.h>
23 #include <net/netfilter/nf_conntrack_acct.h>
24 #include <linux/rculist_nulls.h>
25 #include <linux/export.h>
26
27 struct ct_iter_state {
28 struct seq_net_private p;
29 unsigned int bucket;
30 };
31
32 static struct hlist_nulls_node *ct_get_first(struct seq_file *seq)
33 {
34 struct ct_iter_state *st = seq->private;
35 struct hlist_nulls_node *n;
36
37 for (st->bucket = 0;
38 st->bucket < nf_conntrack_htable_size;
39 st->bucket++) {
40 n = rcu_dereference(
41 hlist_nulls_first_rcu(&nf_conntrack_hash[st->bucket]));
42 if (!is_a_nulls(n))
43 return n;
44 }
45 return NULL;
46 }
47
48 static struct hlist_nulls_node *ct_get_next(struct seq_file *seq,
49 struct hlist_nulls_node *head)
50 {
51 struct ct_iter_state *st = seq->private;
52
53 head = rcu_dereference(hlist_nulls_next_rcu(head));
54 while (is_a_nulls(head)) {
55 if (likely(get_nulls_value(head) == st->bucket)) {
56 if (++st->bucket >= nf_conntrack_htable_size)
57 return NULL;
58 }
59 head = rcu_dereference(
60 hlist_nulls_first_rcu(&nf_conntrack_hash[st->bucket]));
61 }
62 return head;
63 }
64
65 static struct hlist_nulls_node *ct_get_idx(struct seq_file *seq, loff_t pos)
66 {
67 struct hlist_nulls_node *head = ct_get_first(seq);
68
69 if (head)
70 while (pos && (head = ct_get_next(seq, head)))
71 pos--;
72 return pos ? NULL : head;
73 }
74
75 static void *ct_seq_start(struct seq_file *seq, loff_t *pos)
76 __acquires(RCU)
77 {
78 rcu_read_lock();
79 return ct_get_idx(seq, *pos);
80 }
81
82 static void *ct_seq_next(struct seq_file *s, void *v, loff_t *pos)
83 {
84 (*pos)++;
85 return ct_get_next(s, v);
86 }
87
88 static void ct_seq_stop(struct seq_file *s, void *v)
89 __releases(RCU)
90 {
91 rcu_read_unlock();
92 }
93
94 #ifdef CONFIG_NF_CONNTRACK_SECMARK
95 static void ct_show_secctx(struct seq_file *s, const struct nf_conn *ct)
96 {
97 int ret;
98 u32 len;
99 char *secctx;
100
101 ret = security_secid_to_secctx(ct->secmark, &secctx, &len);
102 if (ret)
103 return;
104
105 seq_printf(s, "secctx=%s ", secctx);
106
107 security_release_secctx(secctx, len);
108 }
109 #else
110 static inline void ct_show_secctx(struct seq_file *s, const struct nf_conn *ct)
111 {
112 }
113 #endif
114
115 static bool ct_seq_should_skip(const struct nf_conn *ct,
116 const struct net *net,
117 const struct nf_conntrack_tuple_hash *hash)
118 {
119 /* we only want to print DIR_ORIGINAL */
120 if (NF_CT_DIRECTION(hash))
121 return true;
122
123 if (nf_ct_l3num(ct) != AF_INET)
124 return true;
125
126 if (!net_eq(nf_ct_net(ct), net))
127 return true;
128
129 return false;
130 }
131
132 static int ct_seq_show(struct seq_file *s, void *v)
133 {
134 struct nf_conntrack_tuple_hash *hash = v;
135 struct nf_conn *ct = nf_ct_tuplehash_to_ctrack(hash);
136 const struct nf_conntrack_l3proto *l3proto;
137 const struct nf_conntrack_l4proto *l4proto;
138 int ret = 0;
139
140 NF_CT_ASSERT(ct);
141 if (ct_seq_should_skip(ct, seq_file_net(s), hash))
142 return 0;
143
144 if (unlikely(!atomic_inc_not_zero(&ct->ct_general.use)))
145 return 0;
146
147 /* check if we raced w. object reuse */
148 if (!nf_ct_is_confirmed(ct) ||
149 ct_seq_should_skip(ct, seq_file_net(s), hash))
150 goto release;
151
152 l3proto = __nf_ct_l3proto_find(nf_ct_l3num(ct));
153 NF_CT_ASSERT(l3proto);
154 l4proto = __nf_ct_l4proto_find(nf_ct_l3num(ct), nf_ct_protonum(ct));
155 NF_CT_ASSERT(l4proto);
156
157 ret = -ENOSPC;
158 seq_printf(s, "%-8s %u %ld ",
159 l4proto->name, nf_ct_protonum(ct),
160 timer_pending(&ct->timeout)
161 ? (long)(ct->timeout.expires - jiffies)/HZ : 0);
162
163 if (l4proto->print_conntrack)
164 l4proto->print_conntrack(s, ct);
165
166 if (seq_has_overflowed(s))
167 goto release;
168
169 print_tuple(s, &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple,
170 l3proto, l4proto);
171
172 if (seq_has_overflowed(s))
173 goto release;
174
175 if (seq_print_acct(s, ct, IP_CT_DIR_ORIGINAL))
176 goto release;
177
178 if (!(test_bit(IPS_SEEN_REPLY_BIT, &ct->status)))
179 seq_printf(s, "[UNREPLIED] ");
180
181 print_tuple(s, &ct->tuplehash[IP_CT_DIR_REPLY].tuple,
182 l3proto, l4proto);
183
184 if (seq_has_overflowed(s))
185 goto release;
186
187 if (seq_print_acct(s, ct, IP_CT_DIR_REPLY))
188 goto release;
189
190 if (test_bit(IPS_ASSURED_BIT, &ct->status))
191 seq_printf(s, "[ASSURED] ");
192
193 #ifdef CONFIG_NF_CONNTRACK_MARK
194 seq_printf(s, "mark=%u ", ct->mark);
195 #endif
196
197 ct_show_secctx(s, ct);
198
199 seq_printf(s, "use=%u\n", atomic_read(&ct->ct_general.use));
200
201 if (seq_has_overflowed(s))
202 goto release;
203
204 ret = 0;
205 release:
206 nf_ct_put(ct);
207 return ret;
208 }
209
210 static const struct seq_operations ct_seq_ops = {
211 .start = ct_seq_start,
212 .next = ct_seq_next,
213 .stop = ct_seq_stop,
214 .show = ct_seq_show
215 };
216
217 static int ct_open(struct inode *inode, struct file *file)
218 {
219 return seq_open_net(inode, file, &ct_seq_ops,
220 sizeof(struct ct_iter_state));
221 }
222
223 static const struct file_operations ct_file_ops = {
224 .owner = THIS_MODULE,
225 .open = ct_open,
226 .read = seq_read,
227 .llseek = seq_lseek,
228 .release = seq_release_net,
229 };
230
231 /* expects */
232 struct ct_expect_iter_state {
233 struct seq_net_private p;
234 unsigned int bucket;
235 };
236
237 static struct hlist_node *ct_expect_get_first(struct seq_file *seq)
238 {
239 struct ct_expect_iter_state *st = seq->private;
240 struct hlist_node *n;
241
242 for (st->bucket = 0; st->bucket < nf_ct_expect_hsize; st->bucket++) {
243 n = rcu_dereference(
244 hlist_first_rcu(&nf_ct_expect_hash[st->bucket]));
245 if (n)
246 return n;
247 }
248 return NULL;
249 }
250
251 static struct hlist_node *ct_expect_get_next(struct seq_file *seq,
252 struct hlist_node *head)
253 {
254 struct ct_expect_iter_state *st = seq->private;
255
256 head = rcu_dereference(hlist_next_rcu(head));
257 while (head == NULL) {
258 if (++st->bucket >= nf_ct_expect_hsize)
259 return NULL;
260 head = rcu_dereference(
261 hlist_first_rcu(&nf_ct_expect_hash[st->bucket]));
262 }
263 return head;
264 }
265
266 static struct hlist_node *ct_expect_get_idx(struct seq_file *seq, loff_t pos)
267 {
268 struct hlist_node *head = ct_expect_get_first(seq);
269
270 if (head)
271 while (pos && (head = ct_expect_get_next(seq, head)))
272 pos--;
273 return pos ? NULL : head;
274 }
275
276 static void *exp_seq_start(struct seq_file *seq, loff_t *pos)
277 __acquires(RCU)
278 {
279 rcu_read_lock();
280 return ct_expect_get_idx(seq, *pos);
281 }
282
283 static void *exp_seq_next(struct seq_file *seq, void *v, loff_t *pos)
284 {
285 (*pos)++;
286 return ct_expect_get_next(seq, v);
287 }
288
289 static void exp_seq_stop(struct seq_file *seq, void *v)
290 __releases(RCU)
291 {
292 rcu_read_unlock();
293 }
294
295 static int exp_seq_show(struct seq_file *s, void *v)
296 {
297 struct nf_conntrack_expect *exp;
298 const struct hlist_node *n = v;
299
300 exp = hlist_entry(n, struct nf_conntrack_expect, hnode);
301
302 if (!net_eq(nf_ct_net(exp->master), seq_file_net(s)))
303 return 0;
304
305 if (exp->tuple.src.l3num != AF_INET)
306 return 0;
307
308 if (exp->timeout.function)
309 seq_printf(s, "%ld ", timer_pending(&exp->timeout)
310 ? (long)(exp->timeout.expires - jiffies)/HZ : 0);
311 else
312 seq_printf(s, "- ");
313
314 seq_printf(s, "proto=%u ", exp->tuple.dst.protonum);
315
316 print_tuple(s, &exp->tuple,
317 __nf_ct_l3proto_find(exp->tuple.src.l3num),
318 __nf_ct_l4proto_find(exp->tuple.src.l3num,
319 exp->tuple.dst.protonum));
320 seq_putc(s, '\n');
321
322 return 0;
323 }
324
325 static const struct seq_operations exp_seq_ops = {
326 .start = exp_seq_start,
327 .next = exp_seq_next,
328 .stop = exp_seq_stop,
329 .show = exp_seq_show
330 };
331
332 static int exp_open(struct inode *inode, struct file *file)
333 {
334 return seq_open_net(inode, file, &exp_seq_ops,
335 sizeof(struct ct_expect_iter_state));
336 }
337
338 static const struct file_operations ip_exp_file_ops = {
339 .owner = THIS_MODULE,
340 .open = exp_open,
341 .read = seq_read,
342 .llseek = seq_lseek,
343 .release = seq_release_net,
344 };
345
346 static void *ct_cpu_seq_start(struct seq_file *seq, loff_t *pos)
347 {
348 struct net *net = seq_file_net(seq);
349 int cpu;
350
351 if (*pos == 0)
352 return SEQ_START_TOKEN;
353
354 for (cpu = *pos-1; cpu < nr_cpu_ids; ++cpu) {
355 if (!cpu_possible(cpu))
356 continue;
357 *pos = cpu+1;
358 return per_cpu_ptr(net->ct.stat, cpu);
359 }
360
361 return NULL;
362 }
363
364 static void *ct_cpu_seq_next(struct seq_file *seq, void *v, loff_t *pos)
365 {
366 struct net *net = seq_file_net(seq);
367 int cpu;
368
369 for (cpu = *pos; cpu < nr_cpu_ids; ++cpu) {
370 if (!cpu_possible(cpu))
371 continue;
372 *pos = cpu+1;
373 return per_cpu_ptr(net->ct.stat, cpu);
374 }
375
376 return NULL;
377 }
378
379 static void ct_cpu_seq_stop(struct seq_file *seq, void *v)
380 {
381 }
382
383 static int ct_cpu_seq_show(struct seq_file *seq, void *v)
384 {
385 struct net *net = seq_file_net(seq);
386 unsigned int nr_conntracks = atomic_read(&net->ct.count);
387 const struct ip_conntrack_stat *st = v;
388
389 if (v == SEQ_START_TOKEN) {
390 seq_printf(seq, "entries searched found new invalid ignore delete delete_list insert insert_failed drop early_drop icmp_error expect_new expect_create expect_delete search_restart\n");
391 return 0;
392 }
393
394 seq_printf(seq, "%08x %08x %08x %08x %08x %08x %08x %08x "
395 "%08x %08x %08x %08x %08x %08x %08x %08x %08x\n",
396 nr_conntracks,
397 st->searched,
398 st->found,
399 st->new,
400 st->invalid,
401 st->ignore,
402 st->delete,
403 st->delete_list,
404 st->insert,
405 st->insert_failed,
406 st->drop,
407 st->early_drop,
408 st->error,
409
410 st->expect_new,
411 st->expect_create,
412 st->expect_delete,
413 st->search_restart
414 );
415 return 0;
416 }
417
418 static const struct seq_operations ct_cpu_seq_ops = {
419 .start = ct_cpu_seq_start,
420 .next = ct_cpu_seq_next,
421 .stop = ct_cpu_seq_stop,
422 .show = ct_cpu_seq_show,
423 };
424
425 static int ct_cpu_seq_open(struct inode *inode, struct file *file)
426 {
427 return seq_open_net(inode, file, &ct_cpu_seq_ops,
428 sizeof(struct seq_net_private));
429 }
430
431 static const struct file_operations ct_cpu_seq_fops = {
432 .owner = THIS_MODULE,
433 .open = ct_cpu_seq_open,
434 .read = seq_read,
435 .llseek = seq_lseek,
436 .release = seq_release_net,
437 };
438
439 static int __net_init ip_conntrack_net_init(struct net *net)
440 {
441 struct proc_dir_entry *proc, *proc_exp, *proc_stat;
442
443 proc = proc_create("ip_conntrack", 0440, net->proc_net, &ct_file_ops);
444 if (!proc)
445 goto err1;
446
447 proc_exp = proc_create("ip_conntrack_expect", 0440, net->proc_net,
448 &ip_exp_file_ops);
449 if (!proc_exp)
450 goto err2;
451
452 proc_stat = proc_create("ip_conntrack", S_IRUGO,
453 net->proc_net_stat, &ct_cpu_seq_fops);
454 if (!proc_stat)
455 goto err3;
456 return 0;
457
458 err3:
459 remove_proc_entry("ip_conntrack_expect", net->proc_net);
460 err2:
461 remove_proc_entry("ip_conntrack", net->proc_net);
462 err1:
463 return -ENOMEM;
464 }
465
466 static void __net_exit ip_conntrack_net_exit(struct net *net)
467 {
468 remove_proc_entry("ip_conntrack", net->proc_net_stat);
469 remove_proc_entry("ip_conntrack_expect", net->proc_net);
470 remove_proc_entry("ip_conntrack", net->proc_net);
471 }
472
473 static struct pernet_operations ip_conntrack_net_ops = {
474 .init = ip_conntrack_net_init,
475 .exit = ip_conntrack_net_exit,
476 };
477
478 int __init nf_conntrack_ipv4_compat_init(void)
479 {
480 return register_pernet_subsys(&ip_conntrack_net_ops);
481 }
482
483 void __exit nf_conntrack_ipv4_compat_fini(void)
484 {
485 unregister_pernet_subsys(&ip_conntrack_net_ops);
486 }
This page took 0.049769 seconds and 5 git commands to generate.