1d91e77ba4c2bbd4e9ddac3d3369606a61be4332
[deliverable/linux.git] / net / netfilter / nf_queue.c
1 /*
2 * Rusty Russell (C)2000 -- This code is GPL.
3 * Patrick McHardy (c) 2006-2012
4 */
5
6 #include <linux/kernel.h>
7 #include <linux/slab.h>
8 #include <linux/init.h>
9 #include <linux/module.h>
10 #include <linux/proc_fs.h>
11 #include <linux/skbuff.h>
12 #include <linux/netfilter.h>
13 #include <linux/seq_file.h>
14 #include <linux/rcupdate.h>
15 #include <net/protocol.h>
16 #include <net/netfilter/nf_queue.h>
17 #include <net/dst.h>
18
19 #include "nf_internals.h"
20
21 /*
22 * Hook for nfnetlink_queue to register its queue handler.
23 * We do this so that most of the NFQUEUE code can be modular.
24 *
25 * Once the queue is registered it must reinject all packets it
26 * receives, no matter what.
27 */
28 static const struct nf_queue_handler __rcu *queue_handler __read_mostly;
29
30 /* return EBUSY when somebody else is registered, return EEXIST if the
31 * same handler is registered, return 0 in case of success. */
32 void nf_register_queue_handler(const struct nf_queue_handler *qh)
33 {
34 /* should never happen, we only have one queueing backend in kernel */
35 WARN_ON(rcu_access_pointer(queue_handler));
36 rcu_assign_pointer(queue_handler, qh);
37 }
38 EXPORT_SYMBOL(nf_register_queue_handler);
39
40 /* The caller must flush their queue before this */
41 void nf_unregister_queue_handler(void)
42 {
43 RCU_INIT_POINTER(queue_handler, NULL);
44 synchronize_rcu();
45 }
46 EXPORT_SYMBOL(nf_unregister_queue_handler);
47
48 static void nf_queue_entry_release_refs(struct nf_queue_entry *entry)
49 {
50 /* Release those devices we held, or Alexey will kill me. */
51 if (entry->indev)
52 dev_put(entry->indev);
53 if (entry->outdev)
54 dev_put(entry->outdev);
55 #ifdef CONFIG_BRIDGE_NETFILTER
56 if (entry->skb->nf_bridge) {
57 struct nf_bridge_info *nf_bridge = entry->skb->nf_bridge;
58
59 if (nf_bridge->physindev)
60 dev_put(nf_bridge->physindev);
61 if (nf_bridge->physoutdev)
62 dev_put(nf_bridge->physoutdev);
63 }
64 #endif
65 /* Drop reference to owner of hook which queued us. */
66 module_put(entry->elem->owner);
67 }
68
69 /* Bump dev refs so they don't vanish while packet is out */
70 static bool nf_queue_entry_get_refs(struct nf_queue_entry *entry)
71 {
72 if (!try_module_get(entry->elem->owner))
73 return false;
74
75 if (entry->indev)
76 dev_hold(entry->indev);
77 if (entry->outdev)
78 dev_hold(entry->outdev);
79 #ifdef CONFIG_BRIDGE_NETFILTER
80 if (entry->skb->nf_bridge) {
81 struct nf_bridge_info *nf_bridge = entry->skb->nf_bridge;
82 struct net_device *physdev;
83
84 physdev = nf_bridge->physindev;
85 if (physdev)
86 dev_hold(physdev);
87 physdev = nf_bridge->physoutdev;
88 if (physdev)
89 dev_hold(physdev);
90 }
91 #endif
92
93 return true;
94 }
95
96 /*
97 * Any packet that leaves via this function must come back
98 * through nf_reinject().
99 */
100 static int __nf_queue(struct sk_buff *skb,
101 struct nf_hook_ops *elem,
102 u_int8_t pf, unsigned int hook,
103 struct net_device *indev,
104 struct net_device *outdev,
105 int (*okfn)(struct sk_buff *),
106 unsigned int queuenum)
107 {
108 int status = -ENOENT;
109 struct nf_queue_entry *entry = NULL;
110 const struct nf_afinfo *afinfo;
111 const struct nf_queue_handler *qh;
112
113 /* QUEUE == DROP if no one is waiting, to be safe. */
114 rcu_read_lock();
115
116 qh = rcu_dereference(queue_handler);
117 if (!qh) {
118 status = -ESRCH;
119 goto err_unlock;
120 }
121
122 afinfo = nf_get_afinfo(pf);
123 if (!afinfo)
124 goto err_unlock;
125
126 entry = kmalloc(sizeof(*entry) + afinfo->route_key_size, GFP_ATOMIC);
127 if (!entry) {
128 status = -ENOMEM;
129 goto err_unlock;
130 }
131
132 *entry = (struct nf_queue_entry) {
133 .skb = skb,
134 .elem = elem,
135 .pf = pf,
136 .hook = hook,
137 .indev = indev,
138 .outdev = outdev,
139 .okfn = okfn,
140 };
141
142 if (!nf_queue_entry_get_refs(entry)) {
143 status = -ECANCELED;
144 goto err_unlock;
145 }
146 skb_dst_force(skb);
147 afinfo->saveroute(skb, entry);
148 status = qh->outfn(entry, queuenum);
149
150 rcu_read_unlock();
151
152 if (status < 0) {
153 nf_queue_entry_release_refs(entry);
154 goto err;
155 }
156
157 return 0;
158
159 err_unlock:
160 rcu_read_unlock();
161 err:
162 kfree(entry);
163 return status;
164 }
165
166 #ifdef CONFIG_BRIDGE_NETFILTER
167 /* When called from bridge netfilter, skb->data must point to MAC header
168 * before calling skb_gso_segment(). Else, original MAC header is lost
169 * and segmented skbs will be sent to wrong destination.
170 */
171 static void nf_bridge_adjust_skb_data(struct sk_buff *skb)
172 {
173 if (skb->nf_bridge)
174 __skb_push(skb, skb->network_header - skb->mac_header);
175 }
176
177 static void nf_bridge_adjust_segmented_data(struct sk_buff *skb)
178 {
179 if (skb->nf_bridge)
180 __skb_pull(skb, skb->network_header - skb->mac_header);
181 }
182 #else
183 #define nf_bridge_adjust_skb_data(s) do {} while (0)
184 #define nf_bridge_adjust_segmented_data(s) do {} while (0)
185 #endif
186
187 int nf_queue(struct sk_buff *skb,
188 struct nf_hook_ops *elem,
189 u_int8_t pf, unsigned int hook,
190 struct net_device *indev,
191 struct net_device *outdev,
192 int (*okfn)(struct sk_buff *),
193 unsigned int queuenum)
194 {
195 struct sk_buff *segs;
196 int err = -EINVAL;
197 unsigned int queued;
198
199 if (!skb_is_gso(skb))
200 return __nf_queue(skb, elem, pf, hook, indev, outdev, okfn,
201 queuenum);
202
203 switch (pf) {
204 case NFPROTO_IPV4:
205 skb->protocol = htons(ETH_P_IP);
206 break;
207 case NFPROTO_IPV6:
208 skb->protocol = htons(ETH_P_IPV6);
209 break;
210 }
211
212 nf_bridge_adjust_skb_data(skb);
213 segs = skb_gso_segment(skb, 0);
214 /* Does not use PTR_ERR to limit the number of error codes that can be
215 * returned by nf_queue. For instance, callers rely on -ECANCELED to mean
216 * 'ignore this hook'.
217 */
218 if (IS_ERR(segs))
219 goto out_err;
220 queued = 0;
221 err = 0;
222 do {
223 struct sk_buff *nskb = segs->next;
224
225 segs->next = NULL;
226 if (err == 0) {
227 nf_bridge_adjust_segmented_data(segs);
228 err = __nf_queue(segs, elem, pf, hook, indev,
229 outdev, okfn, queuenum);
230 }
231 if (err == 0)
232 queued++;
233 else
234 kfree_skb(segs);
235 segs = nskb;
236 } while (segs);
237
238 if (queued) {
239 kfree_skb(skb);
240 return 0;
241 }
242 out_err:
243 nf_bridge_adjust_segmented_data(skb);
244 return err;
245 }
246
247 void nf_reinject(struct nf_queue_entry *entry, unsigned int verdict)
248 {
249 struct sk_buff *skb = entry->skb;
250 struct nf_hook_ops *elem = entry->elem;
251 const struct nf_afinfo *afinfo;
252 int err;
253
254 rcu_read_lock();
255
256 nf_queue_entry_release_refs(entry);
257
258 /* Continue traversal iff userspace said ok... */
259 if (verdict == NF_REPEAT) {
260 elem = list_entry(elem->list.prev, struct nf_hook_ops, list);
261 verdict = NF_ACCEPT;
262 }
263
264 if (verdict == NF_ACCEPT) {
265 afinfo = nf_get_afinfo(entry->pf);
266 if (!afinfo || afinfo->reroute(skb, entry) < 0)
267 verdict = NF_DROP;
268 }
269
270 if (verdict == NF_ACCEPT) {
271 next_hook:
272 verdict = nf_iterate(&nf_hooks[entry->pf][entry->hook],
273 skb, entry->hook,
274 entry->indev, entry->outdev, &elem,
275 entry->okfn, INT_MIN);
276 }
277
278 switch (verdict & NF_VERDICT_MASK) {
279 case NF_ACCEPT:
280 case NF_STOP:
281 local_bh_disable();
282 entry->okfn(skb);
283 local_bh_enable();
284 break;
285 case NF_QUEUE:
286 err = __nf_queue(skb, elem, entry->pf, entry->hook,
287 entry->indev, entry->outdev, entry->okfn,
288 verdict >> NF_VERDICT_QBITS);
289 if (err < 0) {
290 if (err == -ECANCELED)
291 goto next_hook;
292 if (err == -ESRCH &&
293 (verdict & NF_VERDICT_FLAG_QUEUE_BYPASS))
294 goto next_hook;
295 kfree_skb(skb);
296 }
297 break;
298 case NF_STOLEN:
299 break;
300 default:
301 kfree_skb(skb);
302 }
303 rcu_read_unlock();
304 kfree(entry);
305 }
306 EXPORT_SYMBOL(nf_reinject);
This page took 0.035976 seconds and 4 git commands to generate.