netfilter: nf_tables: allow set names up to 32 bytes
[deliverable/linux.git] / net / netfilter / nf_conntrack_expect.c
CommitLineData
77ab9cff
MJ
1/* Expectation handling for nf_conntrack. */
2
3/* (C) 1999-2001 Paul `Rusty' Russell
4 * (C) 2002-2006 Netfilter Core Team <coreteam@netfilter.org>
5 * (C) 2003,2004 USAGI/WIDE Project <http://www.linux-ipv6.org>
f229f6ce 6 * (c) 2005-2012 Patrick McHardy <kaber@trash.net>
77ab9cff
MJ
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 */
12
13#include <linux/types.h>
14#include <linux/netfilter.h>
15#include <linux/skbuff.h>
16#include <linux/proc_fs.h>
17#include <linux/seq_file.h>
18#include <linux/stddef.h>
19#include <linux/slab.h>
20#include <linux/err.h>
21#include <linux/percpu.h>
22#include <linux/kernel.h>
a71c0855 23#include <linux/jhash.h>
d9b93842 24#include <linux/moduleparam.h>
bc3b2d7f 25#include <linux/export.h>
457c4cbc 26#include <net/net_namespace.h>
77ab9cff
MJ
27
28#include <net/netfilter/nf_conntrack.h>
29#include <net/netfilter/nf_conntrack_core.h>
30#include <net/netfilter/nf_conntrack_expect.h>
31#include <net/netfilter/nf_conntrack_helper.h>
32#include <net/netfilter/nf_conntrack_tuple.h>
5d0aa2cc 33#include <net/netfilter/nf_conntrack_zones.h>
77ab9cff 34
a71c0855
PM
35unsigned int nf_ct_expect_hsize __read_mostly;
36EXPORT_SYMBOL_GPL(nf_ct_expect_hsize);
37
f264a7df 38unsigned int nf_ct_expect_max __read_mostly;
a71c0855 39
e9c1b084 40static struct kmem_cache *nf_ct_expect_cachep __read_mostly;
7001c6d1 41static unsigned int nf_ct_expect_hashrnd __read_mostly;
77ab9cff
MJ
42
43/* nf_conntrack_expect helper functions */
ebbf41df 44void nf_ct_unlink_expect_report(struct nf_conntrack_expect *exp,
ec464e5d 45 u32 portid, int report)
77ab9cff
MJ
46{
47 struct nf_conn_help *master_help = nfct_help(exp->master);
9b03f38d 48 struct net *net = nf_ct_exp_net(exp);
77ab9cff 49
3d058d7b 50 NF_CT_ASSERT(master_help);
77ab9cff
MJ
51 NF_CT_ASSERT(!timer_pending(&exp->timeout));
52
7d0742da 53 hlist_del_rcu(&exp->hnode);
9b03f38d 54 net->ct.expect_count--;
a71c0855 55
b560580a 56 hlist_del(&exp->lnode);
3d058d7b 57 master_help->expecting[exp->class]--;
bc01befd 58
ec464e5d 59 nf_ct_expect_event_report(IPEXP_DESTROY, exp, portid, report);
6823645d 60 nf_ct_expect_put(exp);
b560580a 61
0d55af87 62 NF_CT_STAT_INC(net, expect_delete);
77ab9cff 63}
ebbf41df 64EXPORT_SYMBOL_GPL(nf_ct_unlink_expect_report);
77ab9cff 65
6823645d 66static void nf_ct_expectation_timed_out(unsigned long ul_expect)
77ab9cff
MJ
67{
68 struct nf_conntrack_expect *exp = (void *)ul_expect;
69
ca7433df 70 spin_lock_bh(&nf_conntrack_expect_lock);
77ab9cff 71 nf_ct_unlink_expect(exp);
ca7433df 72 spin_unlock_bh(&nf_conntrack_expect_lock);
6823645d 73 nf_ct_expect_put(exp);
77ab9cff
MJ
74}
75
a71c0855
PM
76static unsigned int nf_ct_expect_dst_hash(const struct nf_conntrack_tuple *tuple)
77{
34498825
PM
78 unsigned int hash;
79
7001c6d1 80 get_random_once(&nf_ct_expect_hashrnd, sizeof(nf_ct_expect_hashrnd));
a71c0855 81
34498825 82 hash = jhash2(tuple->dst.u3.all, ARRAY_SIZE(tuple->dst.u3.all),
a71c0855 83 (((tuple->dst.protonum ^ tuple->src.l3num) << 16) |
7001c6d1 84 (__force __u16)tuple->dst.u.all) ^ nf_ct_expect_hashrnd);
8fc54f68
DB
85
86 return reciprocal_scale(hash, nf_ct_expect_hsize);
a71c0855
PM
87}
88
77ab9cff 89struct nf_conntrack_expect *
308ac914
DB
90__nf_ct_expect_find(struct net *net,
91 const struct nf_conntrack_zone *zone,
5d0aa2cc 92 const struct nf_conntrack_tuple *tuple)
77ab9cff
MJ
93{
94 struct nf_conntrack_expect *i;
a71c0855
PM
95 unsigned int h;
96
9b03f38d 97 if (!net->ct.expect_count)
a71c0855 98 return NULL;
77ab9cff 99
a71c0855 100 h = nf_ct_expect_dst_hash(tuple);
b67bfe0d 101 hlist_for_each_entry_rcu(i, &net->ct.expect_hash[h], hnode) {
5d0aa2cc 102 if (nf_ct_tuple_mask_cmp(tuple, &i->tuple, &i->mask) &&
deedb590 103 nf_ct_zone_equal_any(i->master, zone))
77ab9cff
MJ
104 return i;
105 }
106 return NULL;
107}
6823645d 108EXPORT_SYMBOL_GPL(__nf_ct_expect_find);
77ab9cff
MJ
109
110/* Just find a expectation corresponding to a tuple. */
111struct nf_conntrack_expect *
308ac914
DB
112nf_ct_expect_find_get(struct net *net,
113 const struct nf_conntrack_zone *zone,
5d0aa2cc 114 const struct nf_conntrack_tuple *tuple)
77ab9cff
MJ
115{
116 struct nf_conntrack_expect *i;
117
7d0742da 118 rcu_read_lock();
5d0aa2cc 119 i = __nf_ct_expect_find(net, zone, tuple);
7d0742da
PM
120 if (i && !atomic_inc_not_zero(&i->use))
121 i = NULL;
122 rcu_read_unlock();
77ab9cff
MJ
123
124 return i;
125}
6823645d 126EXPORT_SYMBOL_GPL(nf_ct_expect_find_get);
77ab9cff
MJ
127
128/* If an expectation for this connection is found, it gets delete from
129 * global list then returned. */
130struct nf_conntrack_expect *
308ac914
DB
131nf_ct_find_expectation(struct net *net,
132 const struct nf_conntrack_zone *zone,
5d0aa2cc 133 const struct nf_conntrack_tuple *tuple)
77ab9cff 134{
359b9ab6 135 struct nf_conntrack_expect *i, *exp = NULL;
359b9ab6
PM
136 unsigned int h;
137
9b03f38d 138 if (!net->ct.expect_count)
359b9ab6 139 return NULL;
ece00641 140
359b9ab6 141 h = nf_ct_expect_dst_hash(tuple);
b67bfe0d 142 hlist_for_each_entry(i, &net->ct.expect_hash[h], hnode) {
359b9ab6 143 if (!(i->flags & NF_CT_EXPECT_INACTIVE) &&
5d0aa2cc 144 nf_ct_tuple_mask_cmp(tuple, &i->tuple, &i->mask) &&
deedb590 145 nf_ct_zone_equal_any(i->master, zone)) {
359b9ab6
PM
146 exp = i;
147 break;
148 }
149 }
ece00641
YK
150 if (!exp)
151 return NULL;
77ab9cff 152
77ab9cff
MJ
153 /* If master is not in hash table yet (ie. packet hasn't left
154 this machine yet), how can other end know about expected?
155 Hence these are not the droids you are looking for (if
156 master ct never got confirmed, we'd hold a reference to it
157 and weird things would happen to future packets). */
ece00641
YK
158 if (!nf_ct_is_confirmed(exp->master))
159 return NULL;
160
e1b207da
JDB
161 /* Avoid race with other CPUs, that for exp->master ct, is
162 * about to invoke ->destroy(), or nf_ct_delete() via timeout
163 * or early_drop().
164 *
165 * The atomic_inc_not_zero() check tells: If that fails, we
166 * know that the ct is being destroyed. If it succeeds, we
167 * can be sure the ct cannot disappear underneath.
168 */
169 if (unlikely(nf_ct_is_dying(exp->master) ||
170 !atomic_inc_not_zero(&exp->master->ct_general.use)))
171 return NULL;
172
ece00641
YK
173 if (exp->flags & NF_CT_EXPECT_PERMANENT) {
174 atomic_inc(&exp->use);
175 return exp;
176 } else if (del_timer(&exp->timeout)) {
177 nf_ct_unlink_expect(exp);
178 return exp;
77ab9cff 179 }
e1b207da
JDB
180 /* Undo exp->master refcnt increase, if del_timer() failed */
181 nf_ct_put(exp->master);
ece00641 182
77ab9cff
MJ
183 return NULL;
184}
185
186/* delete all expectations for this conntrack */
187void nf_ct_remove_expectations(struct nf_conn *ct)
188{
77ab9cff 189 struct nf_conn_help *help = nfct_help(ct);
b560580a 190 struct nf_conntrack_expect *exp;
b67bfe0d 191 struct hlist_node *next;
77ab9cff
MJ
192
193 /* Optimization: most connection never expect any others. */
6002f266 194 if (!help)
77ab9cff
MJ
195 return;
196
ca7433df 197 spin_lock_bh(&nf_conntrack_expect_lock);
b67bfe0d 198 hlist_for_each_entry_safe(exp, next, &help->expectations, lnode) {
b560580a
PM
199 if (del_timer(&exp->timeout)) {
200 nf_ct_unlink_expect(exp);
201 nf_ct_expect_put(exp);
601e68e1 202 }
77ab9cff 203 }
ca7433df 204 spin_unlock_bh(&nf_conntrack_expect_lock);
77ab9cff 205}
13b18339 206EXPORT_SYMBOL_GPL(nf_ct_remove_expectations);
77ab9cff
MJ
207
208/* Would two expected things clash? */
209static inline int expect_clash(const struct nf_conntrack_expect *a,
210 const struct nf_conntrack_expect *b)
211{
212 /* Part covered by intersection of masks must be unequal,
213 otherwise they clash */
d4156e8c 214 struct nf_conntrack_tuple_mask intersect_mask;
77ab9cff
MJ
215 int count;
216
77ab9cff 217 intersect_mask.src.u.all = a->mask.src.u.all & b->mask.src.u.all;
77ab9cff
MJ
218
219 for (count = 0; count < NF_CT_TUPLE_L3SIZE; count++){
220 intersect_mask.src.u3.all[count] =
221 a->mask.src.u3.all[count] & b->mask.src.u3.all[count];
222 }
223
4b31814d 224 return nf_ct_tuple_mask_cmp(&a->tuple, &b->tuple, &intersect_mask) &&
deedb590 225 nf_ct_zone_equal_any(a->master, nf_ct_zone(b->master));
77ab9cff
MJ
226}
227
228static inline int expect_matches(const struct nf_conntrack_expect *a,
229 const struct nf_conntrack_expect *b)
230{
f64f9e71 231 return a->master == b->master && a->class == b->class &&
308ac914
DB
232 nf_ct_tuple_equal(&a->tuple, &b->tuple) &&
233 nf_ct_tuple_mask_equal(&a->mask, &b->mask) &&
deedb590 234 nf_ct_zone_equal_any(a->master, nf_ct_zone(b->master));
77ab9cff
MJ
235}
236
237/* Generally a bad idea to call this: could have matched already. */
6823645d 238void nf_ct_unexpect_related(struct nf_conntrack_expect *exp)
77ab9cff 239{
ca7433df 240 spin_lock_bh(&nf_conntrack_expect_lock);
4e1d4e6c
PM
241 if (del_timer(&exp->timeout)) {
242 nf_ct_unlink_expect(exp);
243 nf_ct_expect_put(exp);
77ab9cff 244 }
ca7433df 245 spin_unlock_bh(&nf_conntrack_expect_lock);
77ab9cff 246}
6823645d 247EXPORT_SYMBOL_GPL(nf_ct_unexpect_related);
77ab9cff
MJ
248
249/* We don't increase the master conntrack refcount for non-fulfilled
250 * conntracks. During the conntrack destruction, the expectations are
251 * always killed before the conntrack itself */
6823645d 252struct nf_conntrack_expect *nf_ct_expect_alloc(struct nf_conn *me)
77ab9cff
MJ
253{
254 struct nf_conntrack_expect *new;
255
6823645d 256 new = kmem_cache_alloc(nf_ct_expect_cachep, GFP_ATOMIC);
77ab9cff
MJ
257 if (!new)
258 return NULL;
259
260 new->master = me;
261 atomic_set(&new->use, 1);
262 return new;
263}
6823645d 264EXPORT_SYMBOL_GPL(nf_ct_expect_alloc);
77ab9cff 265
6002f266 266void nf_ct_expect_init(struct nf_conntrack_expect *exp, unsigned int class,
76108cea 267 u_int8_t family,
1d9d7522
PM
268 const union nf_inet_addr *saddr,
269 const union nf_inet_addr *daddr,
270 u_int8_t proto, const __be16 *src, const __be16 *dst)
d6a9b650
PM
271{
272 int len;
273
274 if (family == AF_INET)
275 len = 4;
276 else
277 len = 16;
278
279 exp->flags = 0;
6002f266 280 exp->class = class;
d6a9b650
PM
281 exp->expectfn = NULL;
282 exp->helper = NULL;
283 exp->tuple.src.l3num = family;
284 exp->tuple.dst.protonum = proto;
d6a9b650
PM
285
286 if (saddr) {
287 memcpy(&exp->tuple.src.u3, saddr, len);
288 if (sizeof(exp->tuple.src.u3) > len)
289 /* address needs to be cleared for nf_ct_tuple_equal */
290 memset((void *)&exp->tuple.src.u3 + len, 0x00,
291 sizeof(exp->tuple.src.u3) - len);
292 memset(&exp->mask.src.u3, 0xFF, len);
293 if (sizeof(exp->mask.src.u3) > len)
294 memset((void *)&exp->mask.src.u3 + len, 0x00,
295 sizeof(exp->mask.src.u3) - len);
296 } else {
297 memset(&exp->tuple.src.u3, 0x00, sizeof(exp->tuple.src.u3));
298 memset(&exp->mask.src.u3, 0x00, sizeof(exp->mask.src.u3));
299 }
300
d6a9b650 301 if (src) {
a34c4589
AV
302 exp->tuple.src.u.all = *src;
303 exp->mask.src.u.all = htons(0xFFFF);
d6a9b650
PM
304 } else {
305 exp->tuple.src.u.all = 0;
306 exp->mask.src.u.all = 0;
307 }
308
d4156e8c
PM
309 memcpy(&exp->tuple.dst.u3, daddr, len);
310 if (sizeof(exp->tuple.dst.u3) > len)
311 /* address needs to be cleared for nf_ct_tuple_equal */
312 memset((void *)&exp->tuple.dst.u3 + len, 0x00,
313 sizeof(exp->tuple.dst.u3) - len);
314
a34c4589 315 exp->tuple.dst.u.all = *dst;
f09eca8d
PNA
316
317#ifdef CONFIG_NF_NAT_NEEDED
318 memset(&exp->saved_addr, 0, sizeof(exp->saved_addr));
319 memset(&exp->saved_proto, 0, sizeof(exp->saved_proto));
320#endif
d6a9b650 321}
6823645d 322EXPORT_SYMBOL_GPL(nf_ct_expect_init);
d6a9b650 323
7d0742da
PM
324static void nf_ct_expect_free_rcu(struct rcu_head *head)
325{
326 struct nf_conntrack_expect *exp;
327
328 exp = container_of(head, struct nf_conntrack_expect, rcu);
329 kmem_cache_free(nf_ct_expect_cachep, exp);
330}
331
6823645d 332void nf_ct_expect_put(struct nf_conntrack_expect *exp)
77ab9cff
MJ
333{
334 if (atomic_dec_and_test(&exp->use))
7d0742da 335 call_rcu(&exp->rcu, nf_ct_expect_free_rcu);
77ab9cff 336}
6823645d 337EXPORT_SYMBOL_GPL(nf_ct_expect_put);
77ab9cff 338
3d058d7b 339static int nf_ct_expect_insert(struct nf_conntrack_expect *exp)
77ab9cff
MJ
340{
341 struct nf_conn_help *master_help = nfct_help(exp->master);
3d058d7b 342 struct nf_conntrack_helper *helper;
9b03f38d 343 struct net *net = nf_ct_exp_net(exp);
a71c0855 344 unsigned int h = nf_ct_expect_dst_hash(&exp->tuple);
77ab9cff 345
3bfd45f9
ED
346 /* two references : one for hash insert, one for the timer */
347 atomic_add(2, &exp->use);
b560580a 348
3d058d7b
PNA
349 hlist_add_head(&exp->lnode, &master_help->expectations);
350 master_help->expecting[exp->class]++;
a71c0855 351
9b03f38d
AD
352 hlist_add_head_rcu(&exp->hnode, &net->ct.expect_hash[h]);
353 net->ct.expect_count++;
77ab9cff 354
6823645d
PM
355 setup_timer(&exp->timeout, nf_ct_expectation_timed_out,
356 (unsigned long)exp);
3d058d7b 357 helper = rcu_dereference_protected(master_help->helper,
ca7433df 358 lockdep_is_held(&nf_conntrack_expect_lock));
3d058d7b
PNA
359 if (helper) {
360 exp->timeout.expires = jiffies +
361 helper->expect_policy[exp->class].timeout * HZ;
bc01befd 362 }
77ab9cff
MJ
363 add_timer(&exp->timeout);
364
0d55af87 365 NF_CT_STAT_INC(net, expect_create);
3d058d7b 366 return 0;
77ab9cff
MJ
367}
368
369/* Race with expectations being used means we could have none to find; OK. */
6002f266
PM
370static void evict_oldest_expect(struct nf_conn *master,
371 struct nf_conntrack_expect *new)
77ab9cff 372{
b560580a 373 struct nf_conn_help *master_help = nfct_help(master);
6002f266 374 struct nf_conntrack_expect *exp, *last = NULL;
77ab9cff 375
b67bfe0d 376 hlist_for_each_entry(exp, &master_help->expectations, lnode) {
6002f266
PM
377 if (exp->class == new->class)
378 last = exp;
379 }
b560580a 380
6002f266
PM
381 if (last && del_timer(&last->timeout)) {
382 nf_ct_unlink_expect(last);
383 nf_ct_expect_put(last);
77ab9cff
MJ
384 }
385}
386
19abb7b0 387static inline int __nf_ct_expect_check(struct nf_conntrack_expect *expect)
77ab9cff 388{
6002f266 389 const struct nf_conntrack_expect_policy *p;
77ab9cff
MJ
390 struct nf_conntrack_expect *i;
391 struct nf_conn *master = expect->master;
392 struct nf_conn_help *master_help = nfct_help(master);
3d058d7b 393 struct nf_conntrack_helper *helper;
9b03f38d 394 struct net *net = nf_ct_exp_net(expect);
b67bfe0d 395 struct hlist_node *next;
a71c0855 396 unsigned int h;
83731671 397 int ret = 1;
77ab9cff 398
3d058d7b 399 if (!master_help) {
3c158f7f
PM
400 ret = -ESHUTDOWN;
401 goto out;
402 }
a71c0855 403 h = nf_ct_expect_dst_hash(&expect->tuple);
b67bfe0d 404 hlist_for_each_entry_safe(i, next, &net->ct.expect_hash[h], hnode) {
77ab9cff 405 if (expect_matches(i, expect)) {
2614f864
PNA
406 if (del_timer(&i->timeout)) {
407 nf_ct_unlink_expect(i);
408 nf_ct_expect_put(i);
409 break;
77ab9cff
MJ
410 }
411 } else if (expect_clash(i, expect)) {
412 ret = -EBUSY;
413 goto out;
414 }
415 }
416 /* Will be over limit? */
3d058d7b 417 helper = rcu_dereference_protected(master_help->helper,
ca7433df 418 lockdep_is_held(&nf_conntrack_expect_lock));
3d058d7b
PNA
419 if (helper) {
420 p = &helper->expect_policy[expect->class];
bc01befd
PNA
421 if (p->max_expected &&
422 master_help->expecting[expect->class] >= p->max_expected) {
423 evict_oldest_expect(master, expect);
424 if (master_help->expecting[expect->class]
425 >= p->max_expected) {
426 ret = -EMFILE;
427 goto out;
428 }
6002f266
PM
429 }
430 }
77ab9cff 431
9b03f38d 432 if (net->ct.expect_count >= nf_ct_expect_max) {
e87cc472 433 net_warn_ratelimited("nf_conntrack: expectation table full\n");
f264a7df 434 ret = -EMFILE;
f264a7df 435 }
19abb7b0
PNA
436out:
437 return ret;
438}
439
b476b72a 440int nf_ct_expect_related_report(struct nf_conntrack_expect *expect,
ec464e5d 441 u32 portid, int report)
19abb7b0
PNA
442{
443 int ret;
444
ca7433df 445 spin_lock_bh(&nf_conntrack_expect_lock);
19abb7b0 446 ret = __nf_ct_expect_check(expect);
83731671 447 if (ret <= 0)
19abb7b0 448 goto out;
f264a7df 449
3d058d7b
PNA
450 ret = nf_ct_expect_insert(expect);
451 if (ret < 0)
452 goto out;
ca7433df 453 spin_unlock_bh(&nf_conntrack_expect_lock);
ec464e5d 454 nf_ct_expect_event_report(IPEXP_NEW, expect, portid, report);
77ab9cff 455 return ret;
19abb7b0 456out:
ca7433df 457 spin_unlock_bh(&nf_conntrack_expect_lock);
19abb7b0
PNA
458 return ret;
459}
460EXPORT_SYMBOL_GPL(nf_ct_expect_related_report);
461
54b07dca 462#ifdef CONFIG_NF_CONNTRACK_PROCFS
5d08ad44 463struct ct_expect_iter_state {
dc5129f8 464 struct seq_net_private p;
5d08ad44
PM
465 unsigned int bucket;
466};
467
468static struct hlist_node *ct_expect_get_first(struct seq_file *seq)
77ab9cff 469{
dc5129f8 470 struct net *net = seq_file_net(seq);
5d08ad44 471 struct ct_expect_iter_state *st = seq->private;
7d0742da 472 struct hlist_node *n;
77ab9cff 473
5d08ad44 474 for (st->bucket = 0; st->bucket < nf_ct_expect_hsize; st->bucket++) {
0e60ebe0 475 n = rcu_dereference(hlist_first_rcu(&net->ct.expect_hash[st->bucket]));
7d0742da
PM
476 if (n)
477 return n;
5d08ad44
PM
478 }
479 return NULL;
480}
77ab9cff 481
5d08ad44
PM
482static struct hlist_node *ct_expect_get_next(struct seq_file *seq,
483 struct hlist_node *head)
484{
dc5129f8 485 struct net *net = seq_file_net(seq);
5d08ad44 486 struct ct_expect_iter_state *st = seq->private;
77ab9cff 487
0e60ebe0 488 head = rcu_dereference(hlist_next_rcu(head));
5d08ad44
PM
489 while (head == NULL) {
490 if (++st->bucket >= nf_ct_expect_hsize)
77ab9cff 491 return NULL;
0e60ebe0 492 head = rcu_dereference(hlist_first_rcu(&net->ct.expect_hash[st->bucket]));
77ab9cff 493 }
5d08ad44 494 return head;
77ab9cff
MJ
495}
496
5d08ad44 497static struct hlist_node *ct_expect_get_idx(struct seq_file *seq, loff_t pos)
77ab9cff 498{
5d08ad44 499 struct hlist_node *head = ct_expect_get_first(seq);
77ab9cff 500
5d08ad44
PM
501 if (head)
502 while (pos && (head = ct_expect_get_next(seq, head)))
503 pos--;
504 return pos ? NULL : head;
505}
77ab9cff 506
5d08ad44 507static void *exp_seq_start(struct seq_file *seq, loff_t *pos)
7d0742da 508 __acquires(RCU)
5d08ad44 509{
7d0742da 510 rcu_read_lock();
5d08ad44
PM
511 return ct_expect_get_idx(seq, *pos);
512}
77ab9cff 513
5d08ad44
PM
514static void *exp_seq_next(struct seq_file *seq, void *v, loff_t *pos)
515{
516 (*pos)++;
517 return ct_expect_get_next(seq, v);
77ab9cff
MJ
518}
519
5d08ad44 520static void exp_seq_stop(struct seq_file *seq, void *v)
7d0742da 521 __releases(RCU)
77ab9cff 522{
7d0742da 523 rcu_read_unlock();
77ab9cff
MJ
524}
525
526static int exp_seq_show(struct seq_file *s, void *v)
527{
5d08ad44 528 struct nf_conntrack_expect *expect;
b87921bd 529 struct nf_conntrack_helper *helper;
5d08ad44 530 struct hlist_node *n = v;
359b9ab6 531 char *delim = "";
5d08ad44
PM
532
533 expect = hlist_entry(n, struct nf_conntrack_expect, hnode);
77ab9cff
MJ
534
535 if (expect->timeout.function)
536 seq_printf(s, "%ld ", timer_pending(&expect->timeout)
537 ? (long)(expect->timeout.expires - jiffies)/HZ : 0);
538 else
539 seq_printf(s, "- ");
540 seq_printf(s, "l3proto = %u proto=%u ",
541 expect->tuple.src.l3num,
542 expect->tuple.dst.protonum);
543 print_tuple(s, &expect->tuple,
544 __nf_ct_l3proto_find(expect->tuple.src.l3num),
605dcad6 545 __nf_ct_l4proto_find(expect->tuple.src.l3num,
77ab9cff 546 expect->tuple.dst.protonum));
4bb119ea 547
359b9ab6
PM
548 if (expect->flags & NF_CT_EXPECT_PERMANENT) {
549 seq_printf(s, "PERMANENT");
550 delim = ",";
551 }
bc01befd 552 if (expect->flags & NF_CT_EXPECT_INACTIVE) {
359b9ab6 553 seq_printf(s, "%sINACTIVE", delim);
bc01befd
PNA
554 delim = ",";
555 }
556 if (expect->flags & NF_CT_EXPECT_USERSPACE)
557 seq_printf(s, "%sUSERSPACE", delim);
4bb119ea 558
b87921bd
PM
559 helper = rcu_dereference(nfct_help(expect->master)->helper);
560 if (helper) {
561 seq_printf(s, "%s%s", expect->flags ? " " : "", helper->name);
562 if (helper->expect_policy[expect->class].name)
563 seq_printf(s, "/%s",
564 helper->expect_policy[expect->class].name);
565 }
566
1ca9e417
JP
567 seq_putc(s, '\n');
568
569 return 0;
77ab9cff
MJ
570}
571
56b3d975 572static const struct seq_operations exp_seq_ops = {
77ab9cff
MJ
573 .start = exp_seq_start,
574 .next = exp_seq_next,
575 .stop = exp_seq_stop,
576 .show = exp_seq_show
577};
578
579static int exp_open(struct inode *inode, struct file *file)
580{
dc5129f8 581 return seq_open_net(inode, file, &exp_seq_ops,
e2da5913 582 sizeof(struct ct_expect_iter_state));
77ab9cff
MJ
583}
584
5d08ad44 585static const struct file_operations exp_file_ops = {
77ab9cff
MJ
586 .owner = THIS_MODULE,
587 .open = exp_open,
588 .read = seq_read,
589 .llseek = seq_lseek,
dc5129f8 590 .release = seq_release_net,
77ab9cff 591};
54b07dca 592#endif /* CONFIG_NF_CONNTRACK_PROCFS */
e9c1b084 593
dc5129f8 594static int exp_proc_init(struct net *net)
e9c1b084 595{
54b07dca 596#ifdef CONFIG_NF_CONNTRACK_PROCFS
e9c1b084 597 struct proc_dir_entry *proc;
f13f2aee
PW
598 kuid_t root_uid;
599 kgid_t root_gid;
e9c1b084 600
d4beaa66
G
601 proc = proc_create("nf_conntrack_expect", 0440, net->proc_net,
602 &exp_file_ops);
e9c1b084
PM
603 if (!proc)
604 return -ENOMEM;
f13f2aee
PW
605
606 root_uid = make_kuid(net->user_ns, 0);
607 root_gid = make_kgid(net->user_ns, 0);
608 if (uid_valid(root_uid) && gid_valid(root_gid))
609 proc_set_user(proc, root_uid, root_gid);
54b07dca 610#endif /* CONFIG_NF_CONNTRACK_PROCFS */
e9c1b084
PM
611 return 0;
612}
613
dc5129f8 614static void exp_proc_remove(struct net *net)
e9c1b084 615{
54b07dca 616#ifdef CONFIG_NF_CONNTRACK_PROCFS
ece31ffd 617 remove_proc_entry("nf_conntrack_expect", net->proc_net);
54b07dca 618#endif /* CONFIG_NF_CONNTRACK_PROCFS */
e9c1b084
PM
619}
620
13ccdfc2 621module_param_named(expect_hashsize, nf_ct_expect_hsize, uint, 0400);
a71c0855 622
83b4dbe1 623int nf_conntrack_expect_pernet_init(struct net *net)
e9c1b084 624{
a71c0855
PM
625 int err = -ENOMEM;
626
9b03f38d 627 net->ct.expect_count = 0;
d862a662 628 net->ct.expect_hash = nf_ct_alloc_hashtable(&nf_ct_expect_hsize, 0);
9b03f38d 629 if (net->ct.expect_hash == NULL)
a71c0855 630 goto err1;
e9c1b084 631
dc5129f8 632 err = exp_proc_init(net);
e9c1b084 633 if (err < 0)
83b4dbe1 634 goto err2;
e9c1b084
PM
635
636 return 0;
12293bf9 637err2:
d862a662 638 nf_ct_free_hashtable(net->ct.expect_hash, nf_ct_expect_hsize);
a71c0855 639err1:
e9c1b084
PM
640 return err;
641}
642
83b4dbe1 643void nf_conntrack_expect_pernet_fini(struct net *net)
e9c1b084 644{
dc5129f8 645 exp_proc_remove(net);
d862a662 646 nf_ct_free_hashtable(net->ct.expect_hash, nf_ct_expect_hsize);
e9c1b084 647}
83b4dbe1
G
648
649int nf_conntrack_expect_init(void)
650{
651 if (!nf_ct_expect_hsize) {
652 nf_ct_expect_hsize = nf_conntrack_htable_size / 256;
653 if (!nf_ct_expect_hsize)
654 nf_ct_expect_hsize = 1;
655 }
656 nf_ct_expect_max = nf_ct_expect_hsize * 4;
657 nf_ct_expect_cachep = kmem_cache_create("nf_conntrack_expect",
658 sizeof(struct nf_conntrack_expect),
659 0, 0, NULL);
660 if (!nf_ct_expect_cachep)
661 return -ENOMEM;
662 return 0;
663}
664
665void nf_conntrack_expect_fini(void)
666{
667 rcu_barrier(); /* Wait for call_rcu() before destroy */
668 kmem_cache_destroy(nf_ct_expect_cachep);
669}
This page took 0.738575 seconds and 5 git commands to generate.