netfilter: Fix memory leak in nf_register_net_hook
[deliverable/linux.git] / net / netfilter / core.c
1 /* netfilter.c: look after the filters for various protocols.
2 * Heavily influenced by the old firewall.c by David Bonn and Alan Cox.
3 *
4 * Thanks to Rob `CmdrTaco' Malda for not influencing this code in any
5 * way.
6 *
7 * Rusty Russell (C)2000 -- This code is GPL.
8 * Patrick McHardy (c) 2006-2012
9 */
10 #include <linux/kernel.h>
11 #include <linux/netfilter.h>
12 #include <net/protocol.h>
13 #include <linux/init.h>
14 #include <linux/skbuff.h>
15 #include <linux/wait.h>
16 #include <linux/module.h>
17 #include <linux/interrupt.h>
18 #include <linux/if.h>
19 #include <linux/netdevice.h>
20 #include <linux/netfilter_ipv6.h>
21 #include <linux/inetdevice.h>
22 #include <linux/proc_fs.h>
23 #include <linux/mutex.h>
24 #include <linux/slab.h>
25 #include <net/net_namespace.h>
26 #include <net/sock.h>
27
28 #include "nf_internals.h"
29
30 static DEFINE_MUTEX(afinfo_mutex);
31
32 const struct nf_afinfo __rcu *nf_afinfo[NFPROTO_NUMPROTO] __read_mostly;
33 EXPORT_SYMBOL(nf_afinfo);
34 const struct nf_ipv6_ops __rcu *nf_ipv6_ops __read_mostly;
35 EXPORT_SYMBOL_GPL(nf_ipv6_ops);
36
37 DEFINE_PER_CPU(bool, nf_skb_duplicated);
38 EXPORT_SYMBOL_GPL(nf_skb_duplicated);
39
40 int nf_register_afinfo(const struct nf_afinfo *afinfo)
41 {
42 mutex_lock(&afinfo_mutex);
43 RCU_INIT_POINTER(nf_afinfo[afinfo->family], afinfo);
44 mutex_unlock(&afinfo_mutex);
45 return 0;
46 }
47 EXPORT_SYMBOL_GPL(nf_register_afinfo);
48
49 void nf_unregister_afinfo(const struct nf_afinfo *afinfo)
50 {
51 mutex_lock(&afinfo_mutex);
52 RCU_INIT_POINTER(nf_afinfo[afinfo->family], NULL);
53 mutex_unlock(&afinfo_mutex);
54 synchronize_rcu();
55 }
56 EXPORT_SYMBOL_GPL(nf_unregister_afinfo);
57
58 #ifdef HAVE_JUMP_LABEL
59 struct static_key nf_hooks_needed[NFPROTO_NUMPROTO][NF_MAX_HOOKS];
60 EXPORT_SYMBOL(nf_hooks_needed);
61 #endif
62
63 static DEFINE_MUTEX(nf_hook_mutex);
64
65 static struct list_head *find_nf_hook_list(struct net *net,
66 const struct nf_hook_ops *reg)
67 {
68 struct list_head *nf_hook_list = NULL;
69
70 if (reg->pf != NFPROTO_NETDEV)
71 nf_hook_list = &net->nf.hooks[reg->pf][reg->hooknum];
72 else if (reg->hooknum == NF_NETDEV_INGRESS) {
73 #ifdef CONFIG_NETFILTER_INGRESS
74 if (reg->dev && dev_net(reg->dev) == net)
75 nf_hook_list = &reg->dev->nf_hooks_ingress;
76 #endif
77 }
78 return nf_hook_list;
79 }
80
81 int nf_register_net_hook(struct net *net, const struct nf_hook_ops *reg)
82 {
83 struct list_head *nf_hook_list;
84 struct nf_hook_ops *elem, *new;
85
86 new = kzalloc(sizeof(*new), GFP_KERNEL);
87 if (!new)
88 return -ENOMEM;
89
90 new->hook = reg->hook;
91 new->dev = reg->dev;
92 new->owner = reg->owner;
93 new->priv = reg->priv;
94 new->pf = reg->pf;
95 new->hooknum = reg->hooknum;
96 new->priority = reg->priority;
97
98 nf_hook_list = find_nf_hook_list(net, reg);
99 if (!nf_hook_list) {
100 kfree(new);
101 return -ENOENT;
102 }
103
104 mutex_lock(&nf_hook_mutex);
105 list_for_each_entry(elem, nf_hook_list, list) {
106 if (reg->priority < elem->priority)
107 break;
108 }
109 list_add_rcu(&new->list, elem->list.prev);
110 mutex_unlock(&nf_hook_mutex);
111 #ifdef CONFIG_NETFILTER_INGRESS
112 if (reg->pf == NFPROTO_NETDEV && reg->hooknum == NF_NETDEV_INGRESS)
113 net_inc_ingress_queue();
114 #endif
115 #ifdef HAVE_JUMP_LABEL
116 static_key_slow_inc(&nf_hooks_needed[reg->pf][reg->hooknum]);
117 #endif
118 return 0;
119 }
120 EXPORT_SYMBOL(nf_register_net_hook);
121
122 void nf_unregister_net_hook(struct net *net, const struct nf_hook_ops *reg)
123 {
124 struct list_head *nf_hook_list;
125 struct nf_hook_ops *elem;
126
127 nf_hook_list = find_nf_hook_list(net, reg);
128 if (!nf_hook_list)
129 return;
130
131 mutex_lock(&nf_hook_mutex);
132 list_for_each_entry(elem, nf_hook_list, list) {
133 if ((reg->hook == elem->hook) &&
134 (reg->dev == elem->dev) &&
135 (reg->owner == elem->owner) &&
136 (reg->priv == elem->priv) &&
137 (reg->pf == elem->pf) &&
138 (reg->hooknum == elem->hooknum) &&
139 (reg->priority == elem->priority)) {
140 list_del_rcu(&elem->list);
141 break;
142 }
143 }
144 mutex_unlock(&nf_hook_mutex);
145 if (&elem->list == nf_hook_list) {
146 WARN(1, "nf_unregister_net_hook: hook not found!\n");
147 return;
148 }
149 #ifdef CONFIG_NETFILTER_INGRESS
150 if (reg->pf == NFPROTO_NETDEV && reg->hooknum == NF_NETDEV_INGRESS)
151 net_dec_ingress_queue();
152 #endif
153 #ifdef HAVE_JUMP_LABEL
154 static_key_slow_dec(&nf_hooks_needed[reg->pf][reg->hooknum]);
155 #endif
156 synchronize_net();
157 nf_queue_nf_hook_drop(elem);
158 kfree(elem);
159 }
160 EXPORT_SYMBOL(nf_unregister_net_hook);
161
162 int nf_register_net_hooks(struct net *net, const struct nf_hook_ops *reg,
163 unsigned int n)
164 {
165 unsigned int i;
166 int err = 0;
167
168 for (i = 0; i < n; i++) {
169 err = nf_register_net_hook(net, &reg[i]);
170 if (err)
171 goto err;
172 }
173 return err;
174
175 err:
176 if (i > 0)
177 nf_unregister_net_hooks(net, reg, i);
178 return err;
179 }
180 EXPORT_SYMBOL(nf_register_net_hooks);
181
182 void nf_unregister_net_hooks(struct net *net, const struct nf_hook_ops *reg,
183 unsigned int n)
184 {
185 while (n-- > 0)
186 nf_unregister_net_hook(net, &reg[n]);
187 }
188 EXPORT_SYMBOL(nf_unregister_net_hooks);
189
190 static LIST_HEAD(nf_hook_list);
191
192 int nf_register_hook(struct nf_hook_ops *reg)
193 {
194 struct net *net, *last;
195 int ret;
196
197 rtnl_lock();
198 for_each_net(net) {
199 ret = nf_register_net_hook(net, reg);
200 if (ret && ret != -ENOENT)
201 goto rollback;
202 }
203 list_add_tail(&reg->list, &nf_hook_list);
204 rtnl_unlock();
205
206 return 0;
207 rollback:
208 last = net;
209 for_each_net(net) {
210 if (net == last)
211 break;
212 nf_unregister_net_hook(net, reg);
213 }
214 rtnl_unlock();
215 return ret;
216 }
217 EXPORT_SYMBOL(nf_register_hook);
218
219 void nf_unregister_hook(struct nf_hook_ops *reg)
220 {
221 struct net *net;
222
223 rtnl_lock();
224 list_del(&reg->list);
225 for_each_net(net)
226 nf_unregister_net_hook(net, reg);
227 rtnl_unlock();
228 }
229 EXPORT_SYMBOL(nf_unregister_hook);
230
231 int nf_register_hooks(struct nf_hook_ops *reg, unsigned int n)
232 {
233 unsigned int i;
234 int err = 0;
235
236 for (i = 0; i < n; i++) {
237 err = nf_register_hook(&reg[i]);
238 if (err)
239 goto err;
240 }
241 return err;
242
243 err:
244 if (i > 0)
245 nf_unregister_hooks(reg, i);
246 return err;
247 }
248 EXPORT_SYMBOL(nf_register_hooks);
249
250 void nf_unregister_hooks(struct nf_hook_ops *reg, unsigned int n)
251 {
252 while (n-- > 0)
253 nf_unregister_hook(&reg[n]);
254 }
255 EXPORT_SYMBOL(nf_unregister_hooks);
256
257 unsigned int nf_iterate(struct list_head *head,
258 struct sk_buff *skb,
259 struct nf_hook_state *state,
260 struct nf_hook_ops **elemp)
261 {
262 unsigned int verdict;
263
264 /*
265 * The caller must not block between calls to this
266 * function because of risk of continuing from deleted element.
267 */
268 list_for_each_entry_continue_rcu((*elemp), head, list) {
269 if (state->thresh > (*elemp)->priority)
270 continue;
271
272 /* Optimization: we don't need to hold module
273 reference here, since function can't sleep. --RR */
274 repeat:
275 verdict = (*elemp)->hook(*elemp, skb, state);
276 if (verdict != NF_ACCEPT) {
277 #ifdef CONFIG_NETFILTER_DEBUG
278 if (unlikely((verdict & NF_VERDICT_MASK)
279 > NF_MAX_VERDICT)) {
280 NFDEBUG("Evil return from %p(%u).\n",
281 (*elemp)->hook, state->hook);
282 continue;
283 }
284 #endif
285 if (verdict != NF_REPEAT)
286 return verdict;
287 goto repeat;
288 }
289 }
290 return NF_ACCEPT;
291 }
292
293
294 /* Returns 1 if okfn() needs to be executed by the caller,
295 * -EPERM for NF_DROP, 0 otherwise. */
296 int nf_hook_slow(struct sk_buff *skb, struct nf_hook_state *state)
297 {
298 struct nf_hook_ops *elem;
299 unsigned int verdict;
300 int ret = 0;
301
302 /* We may already have this, but read-locks nest anyway */
303 rcu_read_lock();
304
305 elem = list_entry_rcu(state->hook_list, struct nf_hook_ops, list);
306 next_hook:
307 verdict = nf_iterate(state->hook_list, skb, state, &elem);
308 if (verdict == NF_ACCEPT || verdict == NF_STOP) {
309 ret = 1;
310 } else if ((verdict & NF_VERDICT_MASK) == NF_DROP) {
311 kfree_skb(skb);
312 ret = NF_DROP_GETERR(verdict);
313 if (ret == 0)
314 ret = -EPERM;
315 } else if ((verdict & NF_VERDICT_MASK) == NF_QUEUE) {
316 int err = nf_queue(skb, elem, state,
317 verdict >> NF_VERDICT_QBITS);
318 if (err < 0) {
319 if (err == -ECANCELED)
320 goto next_hook;
321 if (err == -ESRCH &&
322 (verdict & NF_VERDICT_FLAG_QUEUE_BYPASS))
323 goto next_hook;
324 kfree_skb(skb);
325 }
326 }
327 rcu_read_unlock();
328 return ret;
329 }
330 EXPORT_SYMBOL(nf_hook_slow);
331
332
333 int skb_make_writable(struct sk_buff *skb, unsigned int writable_len)
334 {
335 if (writable_len > skb->len)
336 return 0;
337
338 /* Not exclusive use of packet? Must copy. */
339 if (!skb_cloned(skb)) {
340 if (writable_len <= skb_headlen(skb))
341 return 1;
342 } else if (skb_clone_writable(skb, writable_len))
343 return 1;
344
345 if (writable_len <= skb_headlen(skb))
346 writable_len = 0;
347 else
348 writable_len -= skb_headlen(skb);
349
350 return !!__pskb_pull_tail(skb, writable_len);
351 }
352 EXPORT_SYMBOL(skb_make_writable);
353
354 #if IS_ENABLED(CONFIG_NF_CONNTRACK)
355 /* This does not belong here, but locally generated errors need it if connection
356 tracking in use: without this, connection may not be in hash table, and hence
357 manufactured ICMP or RST packets will not be associated with it. */
358 void (*ip_ct_attach)(struct sk_buff *, const struct sk_buff *)
359 __rcu __read_mostly;
360 EXPORT_SYMBOL(ip_ct_attach);
361
362 void nf_ct_attach(struct sk_buff *new, const struct sk_buff *skb)
363 {
364 void (*attach)(struct sk_buff *, const struct sk_buff *);
365
366 if (skb->nfct) {
367 rcu_read_lock();
368 attach = rcu_dereference(ip_ct_attach);
369 if (attach)
370 attach(new, skb);
371 rcu_read_unlock();
372 }
373 }
374 EXPORT_SYMBOL(nf_ct_attach);
375
376 void (*nf_ct_destroy)(struct nf_conntrack *) __rcu __read_mostly;
377 EXPORT_SYMBOL(nf_ct_destroy);
378
379 void nf_conntrack_destroy(struct nf_conntrack *nfct)
380 {
381 void (*destroy)(struct nf_conntrack *);
382
383 rcu_read_lock();
384 destroy = rcu_dereference(nf_ct_destroy);
385 BUG_ON(destroy == NULL);
386 destroy(nfct);
387 rcu_read_unlock();
388 }
389 EXPORT_SYMBOL(nf_conntrack_destroy);
390
391 struct nfq_ct_hook __rcu *nfq_ct_hook __read_mostly;
392 EXPORT_SYMBOL_GPL(nfq_ct_hook);
393
394 struct nfq_ct_nat_hook __rcu *nfq_ct_nat_hook __read_mostly;
395 EXPORT_SYMBOL_GPL(nfq_ct_nat_hook);
396
397 #endif /* CONFIG_NF_CONNTRACK */
398
399 #ifdef CONFIG_NF_NAT_NEEDED
400 void (*nf_nat_decode_session_hook)(struct sk_buff *, struct flowi *);
401 EXPORT_SYMBOL(nf_nat_decode_session_hook);
402 #endif
403
404 static int nf_register_hook_list(struct net *net)
405 {
406 struct nf_hook_ops *elem;
407 int ret;
408
409 rtnl_lock();
410 list_for_each_entry(elem, &nf_hook_list, list) {
411 ret = nf_register_net_hook(net, elem);
412 if (ret && ret != -ENOENT)
413 goto out_undo;
414 }
415 rtnl_unlock();
416 return 0;
417
418 out_undo:
419 list_for_each_entry_continue_reverse(elem, &nf_hook_list, list)
420 nf_unregister_net_hook(net, elem);
421 rtnl_unlock();
422 return ret;
423 }
424
425 static void nf_unregister_hook_list(struct net *net)
426 {
427 struct nf_hook_ops *elem;
428
429 rtnl_lock();
430 list_for_each_entry(elem, &nf_hook_list, list)
431 nf_unregister_net_hook(net, elem);
432 rtnl_unlock();
433 }
434
435 static int __net_init netfilter_net_init(struct net *net)
436 {
437 int i, h, ret;
438
439 for (i = 0; i < ARRAY_SIZE(net->nf.hooks); i++) {
440 for (h = 0; h < NF_MAX_HOOKS; h++)
441 INIT_LIST_HEAD(&net->nf.hooks[i][h]);
442 }
443
444 #ifdef CONFIG_PROC_FS
445 net->nf.proc_netfilter = proc_net_mkdir(net, "netfilter",
446 net->proc_net);
447 if (!net->nf.proc_netfilter) {
448 if (!net_eq(net, &init_net))
449 pr_err("cannot create netfilter proc entry");
450
451 return -ENOMEM;
452 }
453 #endif
454 ret = nf_register_hook_list(net);
455 if (ret)
456 remove_proc_entry("netfilter", net->proc_net);
457
458 return ret;
459 }
460
461 static void __net_exit netfilter_net_exit(struct net *net)
462 {
463 nf_unregister_hook_list(net);
464 remove_proc_entry("netfilter", net->proc_net);
465 }
466
467 static struct pernet_operations netfilter_net_ops = {
468 .init = netfilter_net_init,
469 .exit = netfilter_net_exit,
470 };
471
472 int __init netfilter_init(void)
473 {
474 int ret;
475
476 ret = register_pernet_subsys(&netfilter_net_ops);
477 if (ret < 0)
478 goto err;
479
480 ret = netfilter_log_init();
481 if (ret < 0)
482 goto err_pernet;
483
484 return 0;
485 err_pernet:
486 unregister_pernet_subsys(&netfilter_net_ops);
487 err:
488 return ret;
489 }
This page took 0.04104 seconds and 5 git commands to generate.