1 /* Event cache for netfilter. */
4 * (C) 2005 Harald Welte <laforge@gnumonks.org>
5 * (C) 2005 Patrick McHardy <kaber@trash.net>
6 * (C) 2005-2006 Netfilter Core Team <coreteam@netfilter.org>
7 * (C) 2005 USAGI/WIDE Project <http://www.linux-ipv6.org>
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
14 #include <linux/types.h>
15 #include <linux/netfilter.h>
16 #include <linux/skbuff.h>
17 #include <linux/vmalloc.h>
18 #include <linux/stddef.h>
19 #include <linux/err.h>
20 #include <linux/percpu.h>
21 #include <linux/kernel.h>
22 #include <linux/netdevice.h>
23 #include <linux/slab.h>
24 #include <linux/export.h>
26 #include <net/netfilter/nf_conntrack.h>
27 #include <net/netfilter/nf_conntrack_core.h>
28 #include <net/netfilter/nf_conntrack_extend.h>
30 static DEFINE_MUTEX(nf_ct_ecache_mutex
);
32 #define ECACHE_RETRY_WAIT (HZ/10)
40 static enum retry_state
ecache_work_evict_list(struct ct_pcpu
*pcpu
)
42 struct nf_conn
*refs
[16];
43 struct nf_conntrack_tuple_hash
*h
;
44 struct hlist_nulls_node
*n
;
45 unsigned int evicted
= 0;
46 enum retry_state ret
= STATE_DONE
;
48 spin_lock(&pcpu
->lock
);
50 hlist_nulls_for_each_entry(h
, n
, &pcpu
->dying
, hnnode
) {
51 struct nf_conn
*ct
= nf_ct_tuplehash_to_ctrack(h
);
53 if (nf_ct_is_dying(ct
))
56 if (nf_conntrack_event(IPCT_DESTROY
, ct
)) {
57 ret
= STATE_CONGESTED
;
61 /* we've got the event delivered, now it's dying */
62 set_bit(IPS_DYING_BIT
, &ct
->status
);
65 if (++evicted
>= ARRAY_SIZE(refs
)) {
71 spin_unlock(&pcpu
->lock
);
73 /* can't _put while holding lock */
75 nf_ct_put(refs
[--evicted
]);
80 static void ecache_work(struct work_struct
*work
)
82 struct netns_ct
*ctnet
=
83 container_of(work
, struct netns_ct
, ecache_dwork
.work
);
89 for_each_possible_cpu(cpu
) {
92 pcpu
= per_cpu_ptr(ctnet
->pcpu_lists
, cpu
);
94 ret
= ecache_work_evict_list(pcpu
);
98 delay
= ECACHE_RETRY_WAIT
;
111 ctnet
->ecache_dwork_pending
= delay
> 0;
113 schedule_delayed_work(&ctnet
->ecache_dwork
, delay
);
116 /* deliver cached events and clear cache entry - must be called with locally
117 * disabled softirqs */
118 void nf_ct_deliver_cached_events(struct nf_conn
*ct
)
120 struct net
*net
= nf_ct_net(ct
);
121 unsigned long events
, missed
;
122 struct nf_ct_event_notifier
*notify
;
123 struct nf_conntrack_ecache
*e
;
124 struct nf_ct_event item
;
128 notify
= rcu_dereference(net
->ct
.nf_conntrack_event_cb
);
132 e
= nf_ct_ecache_find(ct
);
136 events
= xchg(&e
->cache
, 0);
138 if (!nf_ct_is_confirmed(ct
) || nf_ct_is_dying(ct
) || !events
)
141 /* We make a copy of the missed event cache without taking
142 * the lock, thus we may send missed events twice. However,
143 * this does not harm and it happens very rarely. */
146 if (!((events
| missed
) & e
->ctmask
))
153 ret
= notify
->fcn(events
| missed
, &item
);
155 if (likely(ret
>= 0 && !missed
))
158 spin_lock_bh(&ct
->lock
);
162 e
->missed
&= ~missed
;
163 spin_unlock_bh(&ct
->lock
);
168 EXPORT_SYMBOL_GPL(nf_ct_deliver_cached_events
);
170 int nf_conntrack_register_notifier(struct net
*net
,
171 struct nf_ct_event_notifier
*new)
174 struct nf_ct_event_notifier
*notify
;
176 mutex_lock(&nf_ct_ecache_mutex
);
177 notify
= rcu_dereference_protected(net
->ct
.nf_conntrack_event_cb
,
178 lockdep_is_held(&nf_ct_ecache_mutex
));
179 if (notify
!= NULL
) {
183 rcu_assign_pointer(net
->ct
.nf_conntrack_event_cb
, new);
187 mutex_unlock(&nf_ct_ecache_mutex
);
190 EXPORT_SYMBOL_GPL(nf_conntrack_register_notifier
);
192 void nf_conntrack_unregister_notifier(struct net
*net
,
193 struct nf_ct_event_notifier
*new)
195 struct nf_ct_event_notifier
*notify
;
197 mutex_lock(&nf_ct_ecache_mutex
);
198 notify
= rcu_dereference_protected(net
->ct
.nf_conntrack_event_cb
,
199 lockdep_is_held(&nf_ct_ecache_mutex
));
200 BUG_ON(notify
!= new);
201 RCU_INIT_POINTER(net
->ct
.nf_conntrack_event_cb
, NULL
);
202 mutex_unlock(&nf_ct_ecache_mutex
);
204 EXPORT_SYMBOL_GPL(nf_conntrack_unregister_notifier
);
206 int nf_ct_expect_register_notifier(struct net
*net
,
207 struct nf_exp_event_notifier
*new)
210 struct nf_exp_event_notifier
*notify
;
212 mutex_lock(&nf_ct_ecache_mutex
);
213 notify
= rcu_dereference_protected(net
->ct
.nf_expect_event_cb
,
214 lockdep_is_held(&nf_ct_ecache_mutex
));
215 if (notify
!= NULL
) {
219 rcu_assign_pointer(net
->ct
.nf_expect_event_cb
, new);
223 mutex_unlock(&nf_ct_ecache_mutex
);
226 EXPORT_SYMBOL_GPL(nf_ct_expect_register_notifier
);
228 void nf_ct_expect_unregister_notifier(struct net
*net
,
229 struct nf_exp_event_notifier
*new)
231 struct nf_exp_event_notifier
*notify
;
233 mutex_lock(&nf_ct_ecache_mutex
);
234 notify
= rcu_dereference_protected(net
->ct
.nf_expect_event_cb
,
235 lockdep_is_held(&nf_ct_ecache_mutex
));
236 BUG_ON(notify
!= new);
237 RCU_INIT_POINTER(net
->ct
.nf_expect_event_cb
, NULL
);
238 mutex_unlock(&nf_ct_ecache_mutex
);
240 EXPORT_SYMBOL_GPL(nf_ct_expect_unregister_notifier
);
242 #define NF_CT_EVENTS_DEFAULT 1
243 static int nf_ct_events __read_mostly
= NF_CT_EVENTS_DEFAULT
;
246 static struct ctl_table event_sysctl_table
[] = {
248 .procname
= "nf_conntrack_events",
249 .data
= &init_net
.ct
.sysctl_events
,
250 .maxlen
= sizeof(unsigned int),
252 .proc_handler
= proc_dointvec
,
256 #endif /* CONFIG_SYSCTL */
258 static struct nf_ct_ext_type event_extend __read_mostly
= {
259 .len
= sizeof(struct nf_conntrack_ecache
),
260 .align
= __alignof__(struct nf_conntrack_ecache
),
261 .id
= NF_CT_EXT_ECACHE
,
265 static int nf_conntrack_event_init_sysctl(struct net
*net
)
267 struct ctl_table
*table
;
269 table
= kmemdup(event_sysctl_table
, sizeof(event_sysctl_table
),
274 table
[0].data
= &net
->ct
.sysctl_events
;
276 /* Don't export sysctls to unprivileged users */
277 if (net
->user_ns
!= &init_user_ns
)
278 table
[0].procname
= NULL
;
280 net
->ct
.event_sysctl_header
=
281 register_net_sysctl(net
, "net/netfilter", table
);
282 if (!net
->ct
.event_sysctl_header
) {
283 printk(KERN_ERR
"nf_ct_event: can't register to sysctl.\n");
294 static void nf_conntrack_event_fini_sysctl(struct net
*net
)
296 struct ctl_table
*table
;
298 table
= net
->ct
.event_sysctl_header
->ctl_table_arg
;
299 unregister_net_sysctl_table(net
->ct
.event_sysctl_header
);
303 static int nf_conntrack_event_init_sysctl(struct net
*net
)
308 static void nf_conntrack_event_fini_sysctl(struct net
*net
)
311 #endif /* CONFIG_SYSCTL */
313 int nf_conntrack_ecache_pernet_init(struct net
*net
)
315 net
->ct
.sysctl_events
= nf_ct_events
;
316 INIT_DELAYED_WORK(&net
->ct
.ecache_dwork
, ecache_work
);
317 return nf_conntrack_event_init_sysctl(net
);
320 void nf_conntrack_ecache_pernet_fini(struct net
*net
)
322 cancel_delayed_work_sync(&net
->ct
.ecache_dwork
);
323 nf_conntrack_event_fini_sysctl(net
);
326 int nf_conntrack_ecache_init(void)
328 int ret
= nf_ct_extend_register(&event_extend
);
330 pr_err("nf_ct_event: Unable to register event extension.\n");
334 void nf_conntrack_ecache_fini(void)
336 nf_ct_extend_unregister(&event_extend
);