smsc75xx: let EEPROM determine GPIO/LED settings
[deliverable/linux.git] / net / netfilter / nf_conntrack_ecache.c
... / ...
CommitLineData
1/* Event cache for netfilter. */
2
3/* (C) 1999-2001 Paul `Rusty' Russell
4 * (C) 2002-2006 Netfilter Core Team <coreteam@netfilter.org>
5 * (C) 2003,2004 USAGI/WIDE Project <http://www.linux-ipv6.org>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11
12#include <linux/types.h>
13#include <linux/netfilter.h>
14#include <linux/skbuff.h>
15#include <linux/vmalloc.h>
16#include <linux/stddef.h>
17#include <linux/err.h>
18#include <linux/percpu.h>
19#include <linux/kernel.h>
20#include <linux/netdevice.h>
21#include <linux/slab.h>
22#include <linux/export.h>
23
24#include <net/netfilter/nf_conntrack.h>
25#include <net/netfilter/nf_conntrack_core.h>
26#include <net/netfilter/nf_conntrack_extend.h>
27
28static DEFINE_MUTEX(nf_ct_ecache_mutex);
29
30/* deliver cached events and clear cache entry - must be called with locally
31 * disabled softirqs */
32void nf_ct_deliver_cached_events(struct nf_conn *ct)
33{
34 struct net *net = nf_ct_net(ct);
35 unsigned long events, missed;
36 struct nf_ct_event_notifier *notify;
37 struct nf_conntrack_ecache *e;
38 struct nf_ct_event item;
39 int ret;
40
41 rcu_read_lock();
42 notify = rcu_dereference(net->ct.nf_conntrack_event_cb);
43 if (notify == NULL)
44 goto out_unlock;
45
46 e = nf_ct_ecache_find(ct);
47 if (e == NULL)
48 goto out_unlock;
49
50 events = xchg(&e->cache, 0);
51
52 if (!nf_ct_is_confirmed(ct) || nf_ct_is_dying(ct) || !events)
53 goto out_unlock;
54
55 /* We make a copy of the missed event cache without taking
56 * the lock, thus we may send missed events twice. However,
57 * this does not harm and it happens very rarely. */
58 missed = e->missed;
59
60 if (!((events | missed) & e->ctmask))
61 goto out_unlock;
62
63 item.ct = ct;
64 item.pid = 0;
65 item.report = 0;
66
67 ret = notify->fcn(events | missed, &item);
68
69 if (likely(ret >= 0 && !missed))
70 goto out_unlock;
71
72 spin_lock_bh(&ct->lock);
73 if (ret < 0)
74 e->missed |= events;
75 else
76 e->missed &= ~missed;
77 spin_unlock_bh(&ct->lock);
78
79out_unlock:
80 rcu_read_unlock();
81}
82EXPORT_SYMBOL_GPL(nf_ct_deliver_cached_events);
83
84int nf_conntrack_register_notifier(struct net *net,
85 struct nf_ct_event_notifier *new)
86{
87 int ret = 0;
88 struct nf_ct_event_notifier *notify;
89
90 mutex_lock(&nf_ct_ecache_mutex);
91 notify = rcu_dereference_protected(net->ct.nf_conntrack_event_cb,
92 lockdep_is_held(&nf_ct_ecache_mutex));
93 if (notify != NULL) {
94 ret = -EBUSY;
95 goto out_unlock;
96 }
97 rcu_assign_pointer(net->ct.nf_conntrack_event_cb, new);
98 mutex_unlock(&nf_ct_ecache_mutex);
99 return ret;
100
101out_unlock:
102 mutex_unlock(&nf_ct_ecache_mutex);
103 return ret;
104}
105EXPORT_SYMBOL_GPL(nf_conntrack_register_notifier);
106
107void nf_conntrack_unregister_notifier(struct net *net,
108 struct nf_ct_event_notifier *new)
109{
110 struct nf_ct_event_notifier *notify;
111
112 mutex_lock(&nf_ct_ecache_mutex);
113 notify = rcu_dereference_protected(net->ct.nf_conntrack_event_cb,
114 lockdep_is_held(&nf_ct_ecache_mutex));
115 BUG_ON(notify != new);
116 RCU_INIT_POINTER(net->ct.nf_conntrack_event_cb, NULL);
117 mutex_unlock(&nf_ct_ecache_mutex);
118}
119EXPORT_SYMBOL_GPL(nf_conntrack_unregister_notifier);
120
121int nf_ct_expect_register_notifier(struct net *net,
122 struct nf_exp_event_notifier *new)
123{
124 int ret = 0;
125 struct nf_exp_event_notifier *notify;
126
127 mutex_lock(&nf_ct_ecache_mutex);
128 notify = rcu_dereference_protected(net->ct.nf_expect_event_cb,
129 lockdep_is_held(&nf_ct_ecache_mutex));
130 if (notify != NULL) {
131 ret = -EBUSY;
132 goto out_unlock;
133 }
134 rcu_assign_pointer(net->ct.nf_expect_event_cb, new);
135 mutex_unlock(&nf_ct_ecache_mutex);
136 return ret;
137
138out_unlock:
139 mutex_unlock(&nf_ct_ecache_mutex);
140 return ret;
141}
142EXPORT_SYMBOL_GPL(nf_ct_expect_register_notifier);
143
144void nf_ct_expect_unregister_notifier(struct net *net,
145 struct nf_exp_event_notifier *new)
146{
147 struct nf_exp_event_notifier *notify;
148
149 mutex_lock(&nf_ct_ecache_mutex);
150 notify = rcu_dereference_protected(net->ct.nf_expect_event_cb,
151 lockdep_is_held(&nf_ct_ecache_mutex));
152 BUG_ON(notify != new);
153 RCU_INIT_POINTER(net->ct.nf_expect_event_cb, NULL);
154 mutex_unlock(&nf_ct_ecache_mutex);
155}
156EXPORT_SYMBOL_GPL(nf_ct_expect_unregister_notifier);
157
158#define NF_CT_EVENTS_DEFAULT 1
159static int nf_ct_events __read_mostly = NF_CT_EVENTS_DEFAULT;
160static int nf_ct_events_retry_timeout __read_mostly = 15*HZ;
161
162#ifdef CONFIG_SYSCTL
163static struct ctl_table event_sysctl_table[] = {
164 {
165 .procname = "nf_conntrack_events",
166 .data = &init_net.ct.sysctl_events,
167 .maxlen = sizeof(unsigned int),
168 .mode = 0644,
169 .proc_handler = proc_dointvec,
170 },
171 {
172 .procname = "nf_conntrack_events_retry_timeout",
173 .data = &init_net.ct.sysctl_events_retry_timeout,
174 .maxlen = sizeof(unsigned int),
175 .mode = 0644,
176 .proc_handler = proc_dointvec_jiffies,
177 },
178 {}
179};
180#endif /* CONFIG_SYSCTL */
181
182static struct nf_ct_ext_type event_extend __read_mostly = {
183 .len = sizeof(struct nf_conntrack_ecache),
184 .align = __alignof__(struct nf_conntrack_ecache),
185 .id = NF_CT_EXT_ECACHE,
186};
187
188#ifdef CONFIG_SYSCTL
189static int nf_conntrack_event_init_sysctl(struct net *net)
190{
191 struct ctl_table *table;
192
193 table = kmemdup(event_sysctl_table, sizeof(event_sysctl_table),
194 GFP_KERNEL);
195 if (!table)
196 goto out;
197
198 table[0].data = &net->ct.sysctl_events;
199 table[1].data = &net->ct.sysctl_events_retry_timeout;
200
201 net->ct.event_sysctl_header =
202 register_net_sysctl(net, "net/netfilter", table);
203 if (!net->ct.event_sysctl_header) {
204 printk(KERN_ERR "nf_ct_event: can't register to sysctl.\n");
205 goto out_register;
206 }
207 return 0;
208
209out_register:
210 kfree(table);
211out:
212 return -ENOMEM;
213}
214
215static void nf_conntrack_event_fini_sysctl(struct net *net)
216{
217 struct ctl_table *table;
218
219 table = net->ct.event_sysctl_header->ctl_table_arg;
220 unregister_net_sysctl_table(net->ct.event_sysctl_header);
221 kfree(table);
222}
223#else
224static int nf_conntrack_event_init_sysctl(struct net *net)
225{
226 return 0;
227}
228
229static void nf_conntrack_event_fini_sysctl(struct net *net)
230{
231}
232#endif /* CONFIG_SYSCTL */
233
234int nf_conntrack_ecache_init(struct net *net)
235{
236 int ret;
237
238 net->ct.sysctl_events = nf_ct_events;
239 net->ct.sysctl_events_retry_timeout = nf_ct_events_retry_timeout;
240
241 if (net_eq(net, &init_net)) {
242 ret = nf_ct_extend_register(&event_extend);
243 if (ret < 0) {
244 printk(KERN_ERR "nf_ct_event: Unable to register "
245 "event extension.\n");
246 goto out_extend_register;
247 }
248 }
249
250 ret = nf_conntrack_event_init_sysctl(net);
251 if (ret < 0)
252 goto out_sysctl;
253
254 return 0;
255
256out_sysctl:
257 if (net_eq(net, &init_net))
258 nf_ct_extend_unregister(&event_extend);
259out_extend_register:
260 return ret;
261}
262
263void nf_conntrack_ecache_fini(struct net *net)
264{
265 nf_conntrack_event_fini_sysctl(net);
266 if (net_eq(net, &init_net))
267 nf_ct_extend_unregister(&event_extend);
268}
This page took 0.024723 seconds and 5 git commands to generate.