Commit | Line | Data |
---|---|---|
f6ebe77f HW |
1 | /* netfilter.c: look after the filters for various protocols. |
2 | * Heavily influenced by the old firewall.c by David Bonn and Alan Cox. | |
3 | * | |
4 | * Thanks to Rob `CmdrTaco' Malda for not influencing this code in any | |
5 | * way. | |
6 | * | |
7 | * Rusty Russell (C)2000 -- This code is GPL. | |
8 | * | |
9 | * February 2000: Modified by James Morris to have 1 queue per protocol. | |
10 | * 15-Mar-2000: Added NF_REPEAT --RR. | |
11 | * 08-May-2003: Internal logging interface added by Jozsef Kadlecsik. | |
12 | */ | |
f6ebe77f HW |
13 | #include <linux/kernel.h> |
14 | #include <linux/netfilter.h> | |
15 | #include <net/protocol.h> | |
16 | #include <linux/init.h> | |
17 | #include <linux/skbuff.h> | |
18 | #include <linux/wait.h> | |
19 | #include <linux/module.h> | |
20 | #include <linux/interrupt.h> | |
21 | #include <linux/if.h> | |
22 | #include <linux/netdevice.h> | |
23 | #include <linux/inetdevice.h> | |
24 | #include <linux/proc_fs.h> | |
25 | #include <net/sock.h> | |
26 | ||
27 | #include "nf_internals.h" | |
28 | ||
bce8032e PM |
29 | static DEFINE_SPINLOCK(afinfo_lock); |
30 | ||
31 | struct nf_afinfo *nf_afinfo[NPROTO]; | |
32 | EXPORT_SYMBOL(nf_afinfo); | |
33 | ||
34 | int nf_register_afinfo(struct nf_afinfo *afinfo) | |
35 | { | |
36 | spin_lock(&afinfo_lock); | |
37 | rcu_assign_pointer(nf_afinfo[afinfo->family], afinfo); | |
38 | spin_unlock(&afinfo_lock); | |
39 | return 0; | |
40 | } | |
41 | EXPORT_SYMBOL_GPL(nf_register_afinfo); | |
42 | ||
43 | void nf_unregister_afinfo(struct nf_afinfo *afinfo) | |
44 | { | |
45 | spin_lock(&afinfo_lock); | |
46 | rcu_assign_pointer(nf_afinfo[afinfo->family], NULL); | |
47 | spin_unlock(&afinfo_lock); | |
48 | synchronize_rcu(); | |
49 | } | |
50 | EXPORT_SYMBOL_GPL(nf_unregister_afinfo); | |
51 | ||
f6ebe77f HW |
52 | /* In this code, we can be waiting indefinitely for userspace to |
53 | * service a packet if a hook returns NF_QUEUE. We could keep a count | |
54 | * of skbuffs queued for userspace, and not deregister a hook unless | |
55 | * this is zero, but that sucks. Now, we simply check when the | |
56 | * packets come back: if the hook is gone, the packet is discarded. */ | |
57 | struct list_head nf_hooks[NPROTO][NF_MAX_HOOKS]; | |
58 | EXPORT_SYMBOL(nf_hooks); | |
59 | static DEFINE_SPINLOCK(nf_hook_lock); | |
60 | ||
61 | int nf_register_hook(struct nf_hook_ops *reg) | |
62 | { | |
63 | struct list_head *i; | |
64 | ||
65 | spin_lock_bh(&nf_hook_lock); | |
66 | list_for_each(i, &nf_hooks[reg->pf][reg->hooknum]) { | |
67 | if (reg->priority < ((struct nf_hook_ops *)i)->priority) | |
68 | break; | |
69 | } | |
70 | list_add_rcu(®->list, i->prev); | |
71 | spin_unlock_bh(&nf_hook_lock); | |
72 | ||
73 | synchronize_net(); | |
74 | return 0; | |
75 | } | |
76 | EXPORT_SYMBOL(nf_register_hook); | |
77 | ||
78 | void nf_unregister_hook(struct nf_hook_ops *reg) | |
79 | { | |
80 | spin_lock_bh(&nf_hook_lock); | |
81 | list_del_rcu(®->list); | |
82 | spin_unlock_bh(&nf_hook_lock); | |
83 | ||
84 | synchronize_net(); | |
85 | } | |
86 | EXPORT_SYMBOL(nf_unregister_hook); | |
87 | ||
972d1cb1 PM |
88 | int nf_register_hooks(struct nf_hook_ops *reg, unsigned int n) |
89 | { | |
90 | unsigned int i; | |
91 | int err = 0; | |
92 | ||
93 | for (i = 0; i < n; i++) { | |
94 | err = nf_register_hook(®[i]); | |
95 | if (err) | |
96 | goto err; | |
97 | } | |
98 | return err; | |
99 | ||
100 | err: | |
101 | if (i > 0) | |
102 | nf_unregister_hooks(reg, i); | |
103 | return err; | |
104 | } | |
105 | EXPORT_SYMBOL(nf_register_hooks); | |
106 | ||
107 | void nf_unregister_hooks(struct nf_hook_ops *reg, unsigned int n) | |
108 | { | |
109 | unsigned int i; | |
110 | ||
111 | for (i = 0; i < n; i++) | |
112 | nf_unregister_hook(®[i]); | |
113 | } | |
114 | EXPORT_SYMBOL(nf_unregister_hooks); | |
115 | ||
f6ebe77f HW |
116 | unsigned int nf_iterate(struct list_head *head, |
117 | struct sk_buff **skb, | |
118 | int hook, | |
119 | const struct net_device *indev, | |
120 | const struct net_device *outdev, | |
121 | struct list_head **i, | |
122 | int (*okfn)(struct sk_buff *), | |
123 | int hook_thresh) | |
124 | { | |
125 | unsigned int verdict; | |
126 | ||
127 | /* | |
128 | * The caller must not block between calls to this | |
129 | * function because of risk of continuing from deleted element. | |
130 | */ | |
131 | list_for_each_continue_rcu(*i, head) { | |
132 | struct nf_hook_ops *elem = (struct nf_hook_ops *)*i; | |
133 | ||
134 | if (hook_thresh > elem->priority) | |
135 | continue; | |
136 | ||
137 | /* Optimization: we don't need to hold module | |
138 | reference here, since function can't sleep. --RR */ | |
139 | verdict = elem->hook(hook, skb, indev, outdev, okfn); | |
140 | if (verdict != NF_ACCEPT) { | |
141 | #ifdef CONFIG_NETFILTER_DEBUG | |
142 | if (unlikely((verdict & NF_VERDICT_MASK) | |
143 | > NF_MAX_VERDICT)) { | |
144 | NFDEBUG("Evil return from %p(%u).\n", | |
145 | elem->hook, hook); | |
146 | continue; | |
147 | } | |
148 | #endif | |
149 | if (verdict != NF_REPEAT) | |
150 | return verdict; | |
151 | *i = (*i)->prev; | |
152 | } | |
153 | } | |
154 | return NF_ACCEPT; | |
155 | } | |
156 | ||
157 | ||
158 | /* Returns 1 if okfn() needs to be executed by the caller, | |
159 | * -EPERM for NF_DROP, 0 otherwise. */ | |
160 | int nf_hook_slow(int pf, unsigned int hook, struct sk_buff **pskb, | |
161 | struct net_device *indev, | |
162 | struct net_device *outdev, | |
163 | int (*okfn)(struct sk_buff *), | |
164 | int hook_thresh) | |
165 | { | |
166 | struct list_head *elem; | |
167 | unsigned int verdict; | |
168 | int ret = 0; | |
169 | ||
170 | /* We may already have this, but read-locks nest anyway */ | |
171 | rcu_read_lock(); | |
172 | ||
173 | elem = &nf_hooks[pf][hook]; | |
174 | next_hook: | |
175 | verdict = nf_iterate(&nf_hooks[pf][hook], pskb, hook, indev, | |
176 | outdev, &elem, okfn, hook_thresh); | |
177 | if (verdict == NF_ACCEPT || verdict == NF_STOP) { | |
178 | ret = 1; | |
179 | goto unlock; | |
180 | } else if (verdict == NF_DROP) { | |
181 | kfree_skb(*pskb); | |
182 | ret = -EPERM; | |
183 | } else if ((verdict & NF_VERDICT_MASK) == NF_QUEUE) { | |
184 | NFDEBUG("nf_hook: Verdict = QUEUE.\n"); | |
394f545d | 185 | if (!nf_queue(*pskb, elem, pf, hook, indev, outdev, okfn, |
f6ebe77f HW |
186 | verdict >> NF_VERDICT_BITS)) |
187 | goto next_hook; | |
188 | } | |
189 | unlock: | |
190 | rcu_read_unlock(); | |
191 | return ret; | |
192 | } | |
193 | EXPORT_SYMBOL(nf_hook_slow); | |
194 | ||
195 | ||
196 | int skb_make_writable(struct sk_buff **pskb, unsigned int writable_len) | |
197 | { | |
198 | struct sk_buff *nskb; | |
199 | ||
200 | if (writable_len > (*pskb)->len) | |
201 | return 0; | |
202 | ||
203 | /* Not exclusive use of packet? Must copy. */ | |
204 | if (skb_shared(*pskb) || skb_cloned(*pskb)) | |
205 | goto copy_skb; | |
206 | ||
207 | return pskb_may_pull(*pskb, writable_len); | |
208 | ||
209 | copy_skb: | |
210 | nskb = skb_copy(*pskb, GFP_ATOMIC); | |
211 | if (!nskb) | |
212 | return 0; | |
213 | BUG_ON(skb_is_nonlinear(nskb)); | |
214 | ||
215 | /* Rest of kernel will get very unhappy if we pass it a | |
216 | suddenly-orphaned skbuff */ | |
217 | if ((*pskb)->sk) | |
218 | skb_set_owner_w(nskb, (*pskb)->sk); | |
219 | kfree_skb(*pskb); | |
220 | *pskb = nskb; | |
221 | return 1; | |
222 | } | |
223 | EXPORT_SYMBOL(skb_make_writable); | |
224 | ||
4cf411de PM |
225 | u_int16_t nf_csum_update(u_int32_t oldval, u_int32_t newval, u_int32_t csum) |
226 | { | |
227 | u_int32_t diff[] = { oldval, newval }; | |
228 | ||
229 | return csum_fold(csum_partial((char *)diff, sizeof(diff), ~csum)); | |
230 | } | |
231 | EXPORT_SYMBOL(nf_csum_update); | |
232 | ||
233 | u_int16_t nf_proto_csum_update(struct sk_buff *skb, | |
234 | u_int32_t oldval, u_int32_t newval, | |
235 | u_int16_t csum, int pseudohdr) | |
236 | { | |
237 | if (skb->ip_summed != CHECKSUM_PARTIAL) { | |
238 | csum = nf_csum_update(oldval, newval, csum); | |
239 | if (skb->ip_summed == CHECKSUM_COMPLETE && pseudohdr) | |
240 | skb->csum = nf_csum_update(oldval, newval, skb->csum); | |
241 | } else if (pseudohdr) | |
242 | csum = ~nf_csum_update(oldval, newval, ~csum); | |
243 | ||
244 | return csum; | |
245 | } | |
246 | EXPORT_SYMBOL(nf_proto_csum_update); | |
f6ebe77f HW |
247 | |
248 | /* This does not belong here, but locally generated errors need it if connection | |
249 | tracking in use: without this, connection may not be in hash table, and hence | |
250 | manufactured ICMP or RST packets will not be associated with it. */ | |
251 | void (*ip_ct_attach)(struct sk_buff *, struct sk_buff *); | |
252 | EXPORT_SYMBOL(ip_ct_attach); | |
253 | ||
254 | void nf_ct_attach(struct sk_buff *new, struct sk_buff *skb) | |
255 | { | |
256 | void (*attach)(struct sk_buff *, struct sk_buff *); | |
257 | ||
258 | if (skb->nfct && (attach = ip_ct_attach) != NULL) { | |
259 | mb(); /* Just to be sure: must be read before executing this */ | |
260 | attach(new, skb); | |
261 | } | |
262 | } | |
263 | EXPORT_SYMBOL(nf_ct_attach); | |
264 | ||
265 | #ifdef CONFIG_PROC_FS | |
266 | struct proc_dir_entry *proc_net_netfilter; | |
267 | EXPORT_SYMBOL(proc_net_netfilter); | |
268 | #endif | |
269 | ||
270 | void __init netfilter_init(void) | |
271 | { | |
272 | int i, h; | |
273 | for (i = 0; i < NPROTO; i++) { | |
274 | for (h = 0; h < NF_MAX_HOOKS; h++) | |
275 | INIT_LIST_HEAD(&nf_hooks[i][h]); | |
276 | } | |
277 | ||
278 | #ifdef CONFIG_PROC_FS | |
279 | proc_net_netfilter = proc_mkdir("netfilter", proc_net); | |
280 | if (!proc_net_netfilter) | |
281 | panic("cannot create netfilter proc entry"); | |
282 | #endif | |
283 | ||
284 | if (netfilter_queue_init() < 0) | |
285 | panic("cannot initialize nf_queue"); | |
286 | if (netfilter_log_init() < 0) | |
287 | panic("cannot initialize nf_log"); | |
288 | } |