1 /* netfilter.c: look after the filters for various protocols.
2 * Heavily influenced by the old firewall.c by David Bonn and Alan Cox.
4 * Thanks to Rob `CmdrTaco' Malda for not influencing this code in any
7 * Rusty Russell (C)2000 -- This code is GPL.
8 * Patrick McHardy (c) 2006-2012
10 #include <linux/kernel.h>
11 #include <linux/netfilter.h>
12 #include <net/protocol.h>
13 #include <linux/init.h>
14 #include <linux/skbuff.h>
15 #include <linux/wait.h>
16 #include <linux/module.h>
17 #include <linux/interrupt.h>
19 #include <linux/netdevice.h>
20 #include <linux/netfilter_ipv6.h>
21 #include <linux/inetdevice.h>
22 #include <linux/proc_fs.h>
23 #include <linux/mutex.h>
24 #include <linux/slab.h>
25 #include <net/net_namespace.h>
28 #include "nf_internals.h"
30 static DEFINE_MUTEX(afinfo_mutex
);
32 const struct nf_afinfo __rcu
*nf_afinfo
[NFPROTO_NUMPROTO
] __read_mostly
;
33 EXPORT_SYMBOL(nf_afinfo
);
34 const struct nf_ipv6_ops __rcu
*nf_ipv6_ops __read_mostly
;
35 EXPORT_SYMBOL_GPL(nf_ipv6_ops
);
37 DEFINE_PER_CPU(bool, nf_skb_duplicated
);
38 EXPORT_SYMBOL_GPL(nf_skb_duplicated
);
40 int nf_register_afinfo(const struct nf_afinfo
*afinfo
)
42 mutex_lock(&afinfo_mutex
);
43 RCU_INIT_POINTER(nf_afinfo
[afinfo
->family
], afinfo
);
44 mutex_unlock(&afinfo_mutex
);
47 EXPORT_SYMBOL_GPL(nf_register_afinfo
);
49 void nf_unregister_afinfo(const struct nf_afinfo
*afinfo
)
51 mutex_lock(&afinfo_mutex
);
52 RCU_INIT_POINTER(nf_afinfo
[afinfo
->family
], NULL
);
53 mutex_unlock(&afinfo_mutex
);
56 EXPORT_SYMBOL_GPL(nf_unregister_afinfo
);
58 #ifdef HAVE_JUMP_LABEL
59 struct static_key nf_hooks_needed
[NFPROTO_NUMPROTO
][NF_MAX_HOOKS
];
60 EXPORT_SYMBOL(nf_hooks_needed
);
63 static DEFINE_MUTEX(nf_hook_mutex
);
65 static struct list_head
*find_nf_hook_list(struct net
*net
,
66 const struct nf_hook_ops
*reg
)
68 struct list_head
*nf_hook_list
= NULL
;
70 if (reg
->pf
!= NFPROTO_NETDEV
)
71 nf_hook_list
= &net
->nf
.hooks
[reg
->pf
][reg
->hooknum
];
72 else if (reg
->hooknum
== NF_NETDEV_INGRESS
) {
73 #ifdef CONFIG_NETFILTER_INGRESS
74 if (reg
->dev
&& dev_net(reg
->dev
) == net
)
75 nf_hook_list
= ®
->dev
->nf_hooks_ingress
;
81 int nf_register_net_hook(struct net
*net
, const struct nf_hook_ops
*reg
)
83 struct list_head
*nf_hook_list
;
84 struct nf_hook_ops
*elem
, *new;
86 new = kzalloc(sizeof(*new), GFP_KERNEL
);
90 new->hook
= reg
->hook
;
92 new->owner
= reg
->owner
;
93 new->priv
= reg
->priv
;
95 new->hooknum
= reg
->hooknum
;
96 new->priority
= reg
->priority
;
98 nf_hook_list
= find_nf_hook_list(net
, reg
);
104 mutex_lock(&nf_hook_mutex
);
105 list_for_each_entry(elem
, nf_hook_list
, list
) {
106 if (reg
->priority
< elem
->priority
)
109 list_add_rcu(&new->list
, elem
->list
.prev
);
110 mutex_unlock(&nf_hook_mutex
);
111 #ifdef CONFIG_NETFILTER_INGRESS
112 if (reg
->pf
== NFPROTO_NETDEV
&& reg
->hooknum
== NF_NETDEV_INGRESS
)
113 net_inc_ingress_queue();
115 #ifdef HAVE_JUMP_LABEL
116 static_key_slow_inc(&nf_hooks_needed
[reg
->pf
][reg
->hooknum
]);
120 EXPORT_SYMBOL(nf_register_net_hook
);
122 void nf_unregister_net_hook(struct net
*net
, const struct nf_hook_ops
*reg
)
124 struct list_head
*nf_hook_list
;
125 struct nf_hook_ops
*elem
;
127 nf_hook_list
= find_nf_hook_list(net
, reg
);
131 mutex_lock(&nf_hook_mutex
);
132 list_for_each_entry(elem
, nf_hook_list
, list
) {
133 if ((reg
->hook
== elem
->hook
) &&
134 (reg
->dev
== elem
->dev
) &&
135 (reg
->owner
== elem
->owner
) &&
136 (reg
->priv
== elem
->priv
) &&
137 (reg
->pf
== elem
->pf
) &&
138 (reg
->hooknum
== elem
->hooknum
) &&
139 (reg
->priority
== elem
->priority
)) {
140 list_del_rcu(&elem
->list
);
144 mutex_unlock(&nf_hook_mutex
);
145 if (&elem
->list
== nf_hook_list
) {
146 WARN(1, "nf_unregister_net_hook: hook not found!\n");
149 #ifdef CONFIG_NETFILTER_INGRESS
150 if (reg
->pf
== NFPROTO_NETDEV
&& reg
->hooknum
== NF_NETDEV_INGRESS
)
151 net_dec_ingress_queue();
153 #ifdef HAVE_JUMP_LABEL
154 static_key_slow_dec(&nf_hooks_needed
[reg
->pf
][reg
->hooknum
]);
157 nf_queue_nf_hook_drop(elem
);
160 EXPORT_SYMBOL(nf_unregister_net_hook
);
162 int nf_register_net_hooks(struct net
*net
, const struct nf_hook_ops
*reg
,
168 for (i
= 0; i
< n
; i
++) {
169 err
= nf_register_net_hook(net
, ®
[i
]);
177 nf_unregister_net_hooks(net
, reg
, i
);
180 EXPORT_SYMBOL(nf_register_net_hooks
);
182 void nf_unregister_net_hooks(struct net
*net
, const struct nf_hook_ops
*reg
,
186 nf_unregister_net_hook(net
, ®
[n
]);
188 EXPORT_SYMBOL(nf_unregister_net_hooks
);
190 static LIST_HEAD(nf_hook_list
);
192 int nf_register_hook(struct nf_hook_ops
*reg
)
194 struct net
*net
, *last
;
199 ret
= nf_register_net_hook(net
, reg
);
200 if (ret
&& ret
!= -ENOENT
)
203 list_add_tail(®
->list
, &nf_hook_list
);
212 nf_unregister_net_hook(net
, reg
);
217 EXPORT_SYMBOL(nf_register_hook
);
219 void nf_unregister_hook(struct nf_hook_ops
*reg
)
224 list_del(®
->list
);
226 nf_unregister_net_hook(net
, reg
);
229 EXPORT_SYMBOL(nf_unregister_hook
);
231 int nf_register_hooks(struct nf_hook_ops
*reg
, unsigned int n
)
236 for (i
= 0; i
< n
; i
++) {
237 err
= nf_register_hook(®
[i
]);
245 nf_unregister_hooks(reg
, i
);
248 EXPORT_SYMBOL(nf_register_hooks
);
250 void nf_unregister_hooks(struct nf_hook_ops
*reg
, unsigned int n
)
253 nf_unregister_hook(®
[n
]);
255 EXPORT_SYMBOL(nf_unregister_hooks
);
257 unsigned int nf_iterate(struct list_head
*head
,
259 struct nf_hook_state
*state
,
260 struct nf_hook_ops
**elemp
)
262 unsigned int verdict
;
265 * The caller must not block between calls to this
266 * function because of risk of continuing from deleted element.
268 list_for_each_entry_continue_rcu((*elemp
), head
, list
) {
269 if (state
->thresh
> (*elemp
)->priority
)
272 /* Optimization: we don't need to hold module
273 reference here, since function can't sleep. --RR */
275 verdict
= (*elemp
)->hook(*elemp
, skb
, state
);
276 if (verdict
!= NF_ACCEPT
) {
277 #ifdef CONFIG_NETFILTER_DEBUG
278 if (unlikely((verdict
& NF_VERDICT_MASK
)
280 NFDEBUG("Evil return from %p(%u).\n",
281 (*elemp
)->hook
, state
->hook
);
285 if (verdict
!= NF_REPEAT
)
294 /* Returns 1 if okfn() needs to be executed by the caller,
295 * -EPERM for NF_DROP, 0 otherwise. */
296 int nf_hook_slow(struct sk_buff
*skb
, struct nf_hook_state
*state
)
298 struct nf_hook_ops
*elem
;
299 unsigned int verdict
;
302 /* We may already have this, but read-locks nest anyway */
305 elem
= list_entry_rcu(state
->hook_list
, struct nf_hook_ops
, list
);
307 verdict
= nf_iterate(state
->hook_list
, skb
, state
, &elem
);
308 if (verdict
== NF_ACCEPT
|| verdict
== NF_STOP
) {
310 } else if ((verdict
& NF_VERDICT_MASK
) == NF_DROP
) {
312 ret
= NF_DROP_GETERR(verdict
);
315 } else if ((verdict
& NF_VERDICT_MASK
) == NF_QUEUE
) {
316 int err
= nf_queue(skb
, elem
, state
,
317 verdict
>> NF_VERDICT_QBITS
);
319 if (err
== -ECANCELED
)
322 (verdict
& NF_VERDICT_FLAG_QUEUE_BYPASS
))
330 EXPORT_SYMBOL(nf_hook_slow
);
333 int skb_make_writable(struct sk_buff
*skb
, unsigned int writable_len
)
335 if (writable_len
> skb
->len
)
338 /* Not exclusive use of packet? Must copy. */
339 if (!skb_cloned(skb
)) {
340 if (writable_len
<= skb_headlen(skb
))
342 } else if (skb_clone_writable(skb
, writable_len
))
345 if (writable_len
<= skb_headlen(skb
))
348 writable_len
-= skb_headlen(skb
);
350 return !!__pskb_pull_tail(skb
, writable_len
);
352 EXPORT_SYMBOL(skb_make_writable
);
354 #if IS_ENABLED(CONFIG_NF_CONNTRACK)
355 /* This does not belong here, but locally generated errors need it if connection
356 tracking in use: without this, connection may not be in hash table, and hence
357 manufactured ICMP or RST packets will not be associated with it. */
358 void (*ip_ct_attach
)(struct sk_buff
*, const struct sk_buff
*)
360 EXPORT_SYMBOL(ip_ct_attach
);
362 void nf_ct_attach(struct sk_buff
*new, const struct sk_buff
*skb
)
364 void (*attach
)(struct sk_buff
*, const struct sk_buff
*);
368 attach
= rcu_dereference(ip_ct_attach
);
374 EXPORT_SYMBOL(nf_ct_attach
);
376 void (*nf_ct_destroy
)(struct nf_conntrack
*) __rcu __read_mostly
;
377 EXPORT_SYMBOL(nf_ct_destroy
);
379 void nf_conntrack_destroy(struct nf_conntrack
*nfct
)
381 void (*destroy
)(struct nf_conntrack
*);
384 destroy
= rcu_dereference(nf_ct_destroy
);
385 BUG_ON(destroy
== NULL
);
389 EXPORT_SYMBOL(nf_conntrack_destroy
);
391 struct nfq_ct_hook __rcu
*nfq_ct_hook __read_mostly
;
392 EXPORT_SYMBOL_GPL(nfq_ct_hook
);
394 struct nfq_ct_nat_hook __rcu
*nfq_ct_nat_hook __read_mostly
;
395 EXPORT_SYMBOL_GPL(nfq_ct_nat_hook
);
397 #endif /* CONFIG_NF_CONNTRACK */
399 #ifdef CONFIG_NF_NAT_NEEDED
400 void (*nf_nat_decode_session_hook
)(struct sk_buff
*, struct flowi
*);
401 EXPORT_SYMBOL(nf_nat_decode_session_hook
);
404 static int nf_register_hook_list(struct net
*net
)
406 struct nf_hook_ops
*elem
;
410 list_for_each_entry(elem
, &nf_hook_list
, list
) {
411 ret
= nf_register_net_hook(net
, elem
);
412 if (ret
&& ret
!= -ENOENT
)
419 list_for_each_entry_continue_reverse(elem
, &nf_hook_list
, list
)
420 nf_unregister_net_hook(net
, elem
);
425 static void nf_unregister_hook_list(struct net
*net
)
427 struct nf_hook_ops
*elem
;
430 list_for_each_entry(elem
, &nf_hook_list
, list
)
431 nf_unregister_net_hook(net
, elem
);
435 static int __net_init
netfilter_net_init(struct net
*net
)
439 for (i
= 0; i
< ARRAY_SIZE(net
->nf
.hooks
); i
++) {
440 for (h
= 0; h
< NF_MAX_HOOKS
; h
++)
441 INIT_LIST_HEAD(&net
->nf
.hooks
[i
][h
]);
444 #ifdef CONFIG_PROC_FS
445 net
->nf
.proc_netfilter
= proc_net_mkdir(net
, "netfilter",
447 if (!net
->nf
.proc_netfilter
) {
448 if (!net_eq(net
, &init_net
))
449 pr_err("cannot create netfilter proc entry");
454 ret
= nf_register_hook_list(net
);
456 remove_proc_entry("netfilter", net
->proc_net
);
461 static void __net_exit
netfilter_net_exit(struct net
*net
)
463 nf_unregister_hook_list(net
);
464 remove_proc_entry("netfilter", net
->proc_net
);
467 static struct pernet_operations netfilter_net_ops
= {
468 .init
= netfilter_net_init
,
469 .exit
= netfilter_net_exit
,
472 int __init
netfilter_init(void)
476 ret
= register_pernet_subsys(&netfilter_net_ops
);
480 ret
= netfilter_log_init();
486 unregister_pernet_subsys(&netfilter_net_ops
);