Commit | Line | Data |
---|---|---|
5b1158e9 JK |
1 | /* NAT for netfilter; shared with compatibility layer. */ |
2 | ||
3 | /* (C) 1999-2001 Paul `Rusty' Russell | |
4 | * (C) 2002-2006 Netfilter Core Team <coreteam@netfilter.org> | |
5 | * | |
6 | * This program is free software; you can redistribute it and/or modify | |
7 | * it under the terms of the GNU General Public License version 2 as | |
8 | * published by the Free Software Foundation. | |
9 | */ | |
10 | ||
11 | #include <linux/module.h> | |
12 | #include <linux/types.h> | |
13 | #include <linux/timer.h> | |
14 | #include <linux/skbuff.h> | |
5b1158e9 JK |
15 | #include <net/checksum.h> |
16 | #include <net/icmp.h> | |
17 | #include <net/ip.h> | |
18 | #include <net/tcp.h> /* For tcp_prot in getorigdst */ | |
19 | #include <linux/icmp.h> | |
20 | #include <linux/udp.h> | |
21 | #include <linux/jhash.h> | |
22 | ||
23 | #include <linux/netfilter_ipv4.h> | |
24 | #include <net/netfilter/nf_conntrack.h> | |
25 | #include <net/netfilter/nf_conntrack_core.h> | |
26 | #include <net/netfilter/nf_nat.h> | |
27 | #include <net/netfilter/nf_nat_protocol.h> | |
28 | #include <net/netfilter/nf_nat_core.h> | |
29 | #include <net/netfilter/nf_nat_helper.h> | |
30 | #include <net/netfilter/nf_conntrack_helper.h> | |
31 | #include <net/netfilter/nf_conntrack_l3proto.h> | |
32 | #include <net/netfilter/nf_conntrack_l4proto.h> | |
33 | ||
34 | #if 0 | |
35 | #define DEBUGP printk | |
36 | #else | |
37 | #define DEBUGP(format, args...) | |
38 | #endif | |
39 | ||
40 | static DEFINE_RWLOCK(nf_nat_lock); | |
41 | ||
42 | static struct nf_conntrack_l3proto *l3proto = NULL; | |
43 | ||
44 | /* Calculated at init based on memory size */ | |
45 | static unsigned int nf_nat_htable_size; | |
53aba597 | 46 | static int nf_nat_vmalloced; |
5b1158e9 | 47 | |
53aba597 | 48 | static struct hlist_head *bysource; |
5b1158e9 JK |
49 | |
50 | #define MAX_IP_NAT_PROTO 256 | |
51 | static struct nf_nat_protocol *nf_nat_protos[MAX_IP_NAT_PROTO]; | |
52 | ||
53 | static inline struct nf_nat_protocol * | |
54 | __nf_nat_proto_find(u_int8_t protonum) | |
55 | { | |
e22a0548 | 56 | return rcu_dereference(nf_nat_protos[protonum]); |
5b1158e9 JK |
57 | } |
58 | ||
59 | struct nf_nat_protocol * | |
60 | nf_nat_proto_find_get(u_int8_t protonum) | |
61 | { | |
62 | struct nf_nat_protocol *p; | |
63 | ||
e22a0548 | 64 | rcu_read_lock(); |
5b1158e9 JK |
65 | p = __nf_nat_proto_find(protonum); |
66 | if (!try_module_get(p->me)) | |
67 | p = &nf_nat_unknown_protocol; | |
e22a0548 | 68 | rcu_read_unlock(); |
5b1158e9 JK |
69 | |
70 | return p; | |
71 | } | |
72 | EXPORT_SYMBOL_GPL(nf_nat_proto_find_get); | |
73 | ||
74 | void | |
75 | nf_nat_proto_put(struct nf_nat_protocol *p) | |
76 | { | |
77 | module_put(p->me); | |
78 | } | |
79 | EXPORT_SYMBOL_GPL(nf_nat_proto_put); | |
80 | ||
81 | /* We keep an extra hash for each conntrack, for fast searching. */ | |
82 | static inline unsigned int | |
83 | hash_by_src(const struct nf_conntrack_tuple *tuple) | |
84 | { | |
85 | /* Original src, to ensure we map it consistently if poss. */ | |
86 | return jhash_3words((__force u32)tuple->src.u3.ip, tuple->src.u.all, | |
87 | tuple->dst.protonum, 0) % nf_nat_htable_size; | |
88 | } | |
89 | ||
5b1158e9 JK |
90 | /* Is this tuple already taken? (not by us) */ |
91 | int | |
92 | nf_nat_used_tuple(const struct nf_conntrack_tuple *tuple, | |
93 | const struct nf_conn *ignored_conntrack) | |
94 | { | |
95 | /* Conntrack tracking doesn't keep track of outgoing tuples; only | |
96 | incoming ones. NAT means they don't have a fixed mapping, | |
97 | so we invert the tuple and look for the incoming reply. | |
98 | ||
99 | We could keep a separate hash if this proves too slow. */ | |
100 | struct nf_conntrack_tuple reply; | |
101 | ||
102 | nf_ct_invert_tuplepr(&reply, tuple); | |
103 | return nf_conntrack_tuple_taken(&reply, ignored_conntrack); | |
104 | } | |
105 | EXPORT_SYMBOL(nf_nat_used_tuple); | |
106 | ||
107 | /* If we source map this tuple so reply looks like reply_tuple, will | |
108 | * that meet the constraints of range. */ | |
109 | static int | |
110 | in_range(const struct nf_conntrack_tuple *tuple, | |
111 | const struct nf_nat_range *range) | |
112 | { | |
113 | struct nf_nat_protocol *proto; | |
e22a0548 | 114 | int ret = 0; |
5b1158e9 | 115 | |
5b1158e9 JK |
116 | /* If we are supposed to map IPs, then we must be in the |
117 | range specified, otherwise let this drag us onto a new src IP. */ | |
118 | if (range->flags & IP_NAT_RANGE_MAP_IPS) { | |
119 | if (ntohl(tuple->src.u3.ip) < ntohl(range->min_ip) || | |
120 | ntohl(tuple->src.u3.ip) > ntohl(range->max_ip)) | |
121 | return 0; | |
122 | } | |
123 | ||
e22a0548 PM |
124 | rcu_read_lock(); |
125 | proto = __nf_nat_proto_find(tuple->dst.protonum); | |
5b1158e9 JK |
126 | if (!(range->flags & IP_NAT_RANGE_PROTO_SPECIFIED) || |
127 | proto->in_range(tuple, IP_NAT_MANIP_SRC, | |
128 | &range->min, &range->max)) | |
e22a0548 PM |
129 | ret = 1; |
130 | rcu_read_unlock(); | |
5b1158e9 | 131 | |
e22a0548 | 132 | return ret; |
5b1158e9 JK |
133 | } |
134 | ||
135 | static inline int | |
136 | same_src(const struct nf_conn *ct, | |
137 | const struct nf_conntrack_tuple *tuple) | |
138 | { | |
139 | const struct nf_conntrack_tuple *t; | |
140 | ||
141 | t = &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple; | |
142 | return (t->dst.protonum == tuple->dst.protonum && | |
143 | t->src.u3.ip == tuple->src.u3.ip && | |
144 | t->src.u.all == tuple->src.u.all); | |
145 | } | |
146 | ||
147 | /* Only called for SRC manip */ | |
148 | static int | |
149 | find_appropriate_src(const struct nf_conntrack_tuple *tuple, | |
150 | struct nf_conntrack_tuple *result, | |
151 | const struct nf_nat_range *range) | |
152 | { | |
153 | unsigned int h = hash_by_src(tuple); | |
154 | struct nf_conn_nat *nat; | |
155 | struct nf_conn *ct; | |
53aba597 | 156 | struct hlist_node *n; |
5b1158e9 JK |
157 | |
158 | read_lock_bh(&nf_nat_lock); | |
53aba597 | 159 | hlist_for_each_entry(nat, n, &bysource[h], bysource) { |
b6b84d4a | 160 | ct = nat->ct; |
5b1158e9 JK |
161 | if (same_src(ct, tuple)) { |
162 | /* Copy source part from reply tuple. */ | |
163 | nf_ct_invert_tuplepr(result, | |
164 | &ct->tuplehash[IP_CT_DIR_REPLY].tuple); | |
165 | result->dst = tuple->dst; | |
166 | ||
167 | if (in_range(result, range)) { | |
168 | read_unlock_bh(&nf_nat_lock); | |
169 | return 1; | |
170 | } | |
171 | } | |
172 | } | |
173 | read_unlock_bh(&nf_nat_lock); | |
174 | return 0; | |
175 | } | |
176 | ||
177 | /* For [FUTURE] fragmentation handling, we want the least-used | |
178 | src-ip/dst-ip/proto triple. Fairness doesn't come into it. Thus | |
179 | if the range specifies 1.2.3.4 ports 10000-10005 and 1.2.3.5 ports | |
180 | 1-65535, we don't do pro-rata allocation based on ports; we choose | |
181 | the ip with the lowest src-ip/dst-ip/proto usage. | |
182 | */ | |
183 | static void | |
184 | find_best_ips_proto(struct nf_conntrack_tuple *tuple, | |
185 | const struct nf_nat_range *range, | |
186 | const struct nf_conn *ct, | |
187 | enum nf_nat_manip_type maniptype) | |
188 | { | |
189 | __be32 *var_ipp; | |
190 | /* Host order */ | |
191 | u_int32_t minip, maxip, j; | |
192 | ||
193 | /* No IP mapping? Do nothing. */ | |
194 | if (!(range->flags & IP_NAT_RANGE_MAP_IPS)) | |
195 | return; | |
196 | ||
197 | if (maniptype == IP_NAT_MANIP_SRC) | |
198 | var_ipp = &tuple->src.u3.ip; | |
199 | else | |
200 | var_ipp = &tuple->dst.u3.ip; | |
201 | ||
202 | /* Fast path: only one choice. */ | |
203 | if (range->min_ip == range->max_ip) { | |
204 | *var_ipp = range->min_ip; | |
205 | return; | |
206 | } | |
207 | ||
208 | /* Hashing source and destination IPs gives a fairly even | |
209 | * spread in practice (if there are a small number of IPs | |
210 | * involved, there usually aren't that many connections | |
211 | * anyway). The consistency means that servers see the same | |
212 | * client coming from the same IP (some Internet Banking sites | |
213 | * like this), even across reboots. */ | |
214 | minip = ntohl(range->min_ip); | |
215 | maxip = ntohl(range->max_ip); | |
216 | j = jhash_2words((__force u32)tuple->src.u3.ip, | |
217 | (__force u32)tuple->dst.u3.ip, 0); | |
218 | *var_ipp = htonl(minip + j % (maxip - minip + 1)); | |
219 | } | |
220 | ||
221 | /* Manipulate the tuple into the range given. For NF_IP_POST_ROUTING, | |
222 | * we change the source to map into the range. For NF_IP_PRE_ROUTING | |
223 | * and NF_IP_LOCAL_OUT, we change the destination to map into the | |
224 | * range. It might not be possible to get a unique tuple, but we try. | |
225 | * At worst (or if we race), we will end up with a final duplicate in | |
226 | * __ip_conntrack_confirm and drop the packet. */ | |
227 | static void | |
228 | get_unique_tuple(struct nf_conntrack_tuple *tuple, | |
229 | const struct nf_conntrack_tuple *orig_tuple, | |
230 | const struct nf_nat_range *range, | |
231 | struct nf_conn *ct, | |
232 | enum nf_nat_manip_type maniptype) | |
233 | { | |
234 | struct nf_nat_protocol *proto; | |
235 | ||
236 | /* 1) If this srcip/proto/src-proto-part is currently mapped, | |
237 | and that same mapping gives a unique tuple within the given | |
238 | range, use that. | |
239 | ||
240 | This is only required for source (ie. NAT/masq) mappings. | |
241 | So far, we don't do local source mappings, so multiple | |
242 | manips not an issue. */ | |
243 | if (maniptype == IP_NAT_MANIP_SRC) { | |
244 | if (find_appropriate_src(orig_tuple, tuple, range)) { | |
245 | DEBUGP("get_unique_tuple: Found current src map\n"); | |
41f4689a EL |
246 | if (!(range->flags & IP_NAT_RANGE_PROTO_RANDOM)) |
247 | if (!nf_nat_used_tuple(tuple, ct)) | |
248 | return; | |
5b1158e9 JK |
249 | } |
250 | } | |
251 | ||
252 | /* 2) Select the least-used IP/proto combination in the given | |
253 | range. */ | |
254 | *tuple = *orig_tuple; | |
255 | find_best_ips_proto(tuple, range, ct, maniptype); | |
256 | ||
257 | /* 3) The per-protocol part of the manip is made to map into | |
258 | the range to make a unique tuple. */ | |
259 | ||
e22a0548 PM |
260 | rcu_read_lock(); |
261 | proto = __nf_nat_proto_find(orig_tuple->dst.protonum); | |
5b1158e9 | 262 | |
41f4689a EL |
263 | /* Change protocol info to have some randomization */ |
264 | if (range->flags & IP_NAT_RANGE_PROTO_RANDOM) { | |
265 | proto->unique_tuple(tuple, range, maniptype, ct); | |
e22a0548 | 266 | goto out; |
41f4689a EL |
267 | } |
268 | ||
5b1158e9 JK |
269 | /* Only bother mapping if it's not already in range and unique */ |
270 | if ((!(range->flags & IP_NAT_RANGE_PROTO_SPECIFIED) || | |
271 | proto->in_range(tuple, maniptype, &range->min, &range->max)) && | |
e22a0548 PM |
272 | !nf_nat_used_tuple(tuple, ct)) |
273 | goto out; | |
5b1158e9 JK |
274 | |
275 | /* Last change: get protocol to try to obtain unique tuple. */ | |
276 | proto->unique_tuple(tuple, range, maniptype, ct); | |
e22a0548 PM |
277 | out: |
278 | rcu_read_unlock(); | |
5b1158e9 JK |
279 | } |
280 | ||
281 | unsigned int | |
282 | nf_nat_setup_info(struct nf_conn *ct, | |
283 | const struct nf_nat_range *range, | |
284 | unsigned int hooknum) | |
285 | { | |
286 | struct nf_conntrack_tuple curr_tuple, new_tuple; | |
2d59e5ca | 287 | struct nf_conn_nat *nat; |
5b1158e9 JK |
288 | int have_to_hash = !(ct->status & IPS_NAT_DONE_MASK); |
289 | enum nf_nat_manip_type maniptype = HOOK2MANIP(hooknum); | |
290 | ||
2d59e5ca YK |
291 | /* nat helper or nfctnetlink also setup binding */ |
292 | nat = nfct_nat(ct); | |
293 | if (!nat) { | |
294 | nat = nf_ct_ext_add(ct, NF_CT_EXT_NAT, GFP_ATOMIC); | |
295 | if (nat == NULL) { | |
296 | DEBUGP("failed to add NAT extension\n"); | |
297 | return NF_ACCEPT; | |
298 | } | |
299 | } | |
300 | ||
5b1158e9 JK |
301 | NF_CT_ASSERT(hooknum == NF_IP_PRE_ROUTING || |
302 | hooknum == NF_IP_POST_ROUTING || | |
303 | hooknum == NF_IP_LOCAL_IN || | |
304 | hooknum == NF_IP_LOCAL_OUT); | |
305 | BUG_ON(nf_nat_initialized(ct, maniptype)); | |
306 | ||
307 | /* What we've got will look like inverse of reply. Normally | |
308 | this is what is in the conntrack, except for prior | |
309 | manipulations (future optimization: if num_manips == 0, | |
310 | orig_tp = | |
311 | conntrack->tuplehash[IP_CT_DIR_ORIGINAL].tuple) */ | |
312 | nf_ct_invert_tuplepr(&curr_tuple, | |
313 | &ct->tuplehash[IP_CT_DIR_REPLY].tuple); | |
314 | ||
315 | get_unique_tuple(&new_tuple, &curr_tuple, range, ct, maniptype); | |
316 | ||
317 | if (!nf_ct_tuple_equal(&new_tuple, &curr_tuple)) { | |
318 | struct nf_conntrack_tuple reply; | |
319 | ||
320 | /* Alter conntrack table so will recognize replies. */ | |
321 | nf_ct_invert_tuplepr(&reply, &new_tuple); | |
322 | nf_conntrack_alter_reply(ct, &reply); | |
323 | ||
324 | /* Non-atomic: we own this at the moment. */ | |
325 | if (maniptype == IP_NAT_MANIP_SRC) | |
326 | ct->status |= IPS_SRC_NAT; | |
327 | else | |
328 | ct->status |= IPS_DST_NAT; | |
329 | } | |
330 | ||
331 | /* Place in source hash if this is the first time. */ | |
332 | if (have_to_hash) { | |
333 | unsigned int srchash; | |
334 | ||
335 | srchash = hash_by_src(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple); | |
336 | write_lock_bh(&nf_nat_lock); | |
2d59e5ca | 337 | /* nf_conntrack_alter_reply might re-allocate exntension aera */ |
b6b84d4a YK |
338 | nat = nfct_nat(ct); |
339 | nat->ct = ct; | |
53aba597 | 340 | hlist_add_head(&nat->bysource, &bysource[srchash]); |
5b1158e9 JK |
341 | write_unlock_bh(&nf_nat_lock); |
342 | } | |
343 | ||
344 | /* It's done. */ | |
345 | if (maniptype == IP_NAT_MANIP_DST) | |
346 | set_bit(IPS_DST_NAT_DONE_BIT, &ct->status); | |
347 | else | |
348 | set_bit(IPS_SRC_NAT_DONE_BIT, &ct->status); | |
349 | ||
350 | return NF_ACCEPT; | |
351 | } | |
352 | EXPORT_SYMBOL(nf_nat_setup_info); | |
353 | ||
354 | /* Returns true if succeeded. */ | |
355 | static int | |
356 | manip_pkt(u_int16_t proto, | |
357 | struct sk_buff **pskb, | |
358 | unsigned int iphdroff, | |
359 | const struct nf_conntrack_tuple *target, | |
360 | enum nf_nat_manip_type maniptype) | |
361 | { | |
362 | struct iphdr *iph; | |
363 | struct nf_nat_protocol *p; | |
364 | ||
365 | if (!skb_make_writable(pskb, iphdroff + sizeof(*iph))) | |
366 | return 0; | |
367 | ||
368 | iph = (void *)(*pskb)->data + iphdroff; | |
369 | ||
370 | /* Manipulate protcol part. */ | |
e22a0548 PM |
371 | |
372 | /* rcu_read_lock()ed by nf_hook_slow */ | |
373 | p = __nf_nat_proto_find(proto); | |
374 | if (!p->manip_pkt(pskb, iphdroff, target, maniptype)) | |
5b1158e9 | 375 | return 0; |
5b1158e9 JK |
376 | |
377 | iph = (void *)(*pskb)->data + iphdroff; | |
378 | ||
379 | if (maniptype == IP_NAT_MANIP_SRC) { | |
380 | nf_csum_replace4(&iph->check, iph->saddr, target->src.u3.ip); | |
381 | iph->saddr = target->src.u3.ip; | |
382 | } else { | |
383 | nf_csum_replace4(&iph->check, iph->daddr, target->dst.u3.ip); | |
384 | iph->daddr = target->dst.u3.ip; | |
385 | } | |
386 | return 1; | |
387 | } | |
388 | ||
389 | /* Do packet manipulations according to nf_nat_setup_info. */ | |
390 | unsigned int nf_nat_packet(struct nf_conn *ct, | |
391 | enum ip_conntrack_info ctinfo, | |
392 | unsigned int hooknum, | |
393 | struct sk_buff **pskb) | |
394 | { | |
395 | enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo); | |
396 | unsigned long statusbit; | |
397 | enum nf_nat_manip_type mtype = HOOK2MANIP(hooknum); | |
398 | ||
399 | if (mtype == IP_NAT_MANIP_SRC) | |
400 | statusbit = IPS_SRC_NAT; | |
401 | else | |
402 | statusbit = IPS_DST_NAT; | |
403 | ||
404 | /* Invert if this is reply dir. */ | |
405 | if (dir == IP_CT_DIR_REPLY) | |
406 | statusbit ^= IPS_NAT_MASK; | |
407 | ||
408 | /* Non-atomic: these bits don't change. */ | |
409 | if (ct->status & statusbit) { | |
410 | struct nf_conntrack_tuple target; | |
411 | ||
412 | /* We are aiming to look like inverse of other direction. */ | |
413 | nf_ct_invert_tuplepr(&target, &ct->tuplehash[!dir].tuple); | |
414 | ||
415 | if (!manip_pkt(target.dst.protonum, pskb, 0, &target, mtype)) | |
416 | return NF_DROP; | |
417 | } | |
418 | return NF_ACCEPT; | |
419 | } | |
420 | EXPORT_SYMBOL_GPL(nf_nat_packet); | |
421 | ||
422 | /* Dir is direction ICMP is coming from (opposite to packet it contains) */ | |
423 | int nf_nat_icmp_reply_translation(struct nf_conn *ct, | |
424 | enum ip_conntrack_info ctinfo, | |
425 | unsigned int hooknum, | |
426 | struct sk_buff **pskb) | |
427 | { | |
428 | struct { | |
429 | struct icmphdr icmp; | |
430 | struct iphdr ip; | |
431 | } *inside; | |
923f4902 | 432 | struct nf_conntrack_l4proto *l4proto; |
5b1158e9 | 433 | struct nf_conntrack_tuple inner, target; |
c9bdd4b5 | 434 | int hdrlen = ip_hdrlen(*pskb); |
5b1158e9 JK |
435 | enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo); |
436 | unsigned long statusbit; | |
437 | enum nf_nat_manip_type manip = HOOK2MANIP(hooknum); | |
438 | ||
439 | if (!skb_make_writable(pskb, hdrlen + sizeof(*inside))) | |
440 | return 0; | |
441 | ||
c9bdd4b5 | 442 | inside = (void *)(*pskb)->data + ip_hdrlen(*pskb); |
5b1158e9 JK |
443 | |
444 | /* We're actually going to mangle it beyond trivial checksum | |
445 | adjustment, so make sure the current checksum is correct. */ | |
446 | if (nf_ip_checksum(*pskb, hooknum, hdrlen, 0)) | |
447 | return 0; | |
448 | ||
449 | /* Must be RELATED */ | |
450 | NF_CT_ASSERT((*pskb)->nfctinfo == IP_CT_RELATED || | |
451 | (*pskb)->nfctinfo == IP_CT_RELATED+IP_CT_IS_REPLY); | |
452 | ||
453 | /* Redirects on non-null nats must be dropped, else they'll | |
e905a9ed YH |
454 | start talking to each other without our translation, and be |
455 | confused... --RR */ | |
5b1158e9 JK |
456 | if (inside->icmp.type == ICMP_REDIRECT) { |
457 | /* If NAT isn't finished, assume it and drop. */ | |
458 | if ((ct->status & IPS_NAT_DONE_MASK) != IPS_NAT_DONE_MASK) | |
459 | return 0; | |
460 | ||
461 | if (ct->status & IPS_NAT_MASK) | |
462 | return 0; | |
463 | } | |
464 | ||
465 | DEBUGP("icmp_reply_translation: translating error %p manp %u dir %s\n", | |
466 | *pskb, manip, dir == IP_CT_DIR_ORIGINAL ? "ORIG" : "REPLY"); | |
467 | ||
923f4902 PM |
468 | /* rcu_read_lock()ed by nf_hook_slow */ |
469 | l4proto = __nf_ct_l4proto_find(PF_INET, inside->ip.protocol); | |
470 | ||
5b1158e9 | 471 | if (!nf_ct_get_tuple(*pskb, |
c9bdd4b5 ACM |
472 | ip_hdrlen(*pskb) + sizeof(struct icmphdr), |
473 | (ip_hdrlen(*pskb) + | |
474 | sizeof(struct icmphdr) + inside->ip.ihl * 4), | |
e905a9ed YH |
475 | (u_int16_t)AF_INET, |
476 | inside->ip.protocol, | |
923f4902 | 477 | &inner, l3proto, l4proto)) |
5b1158e9 JK |
478 | return 0; |
479 | ||
480 | /* Change inner back to look like incoming packet. We do the | |
481 | opposite manip on this hook to normal, because it might not | |
482 | pass all hooks (locally-generated ICMP). Consider incoming | |
483 | packet: PREROUTING (DST manip), routing produces ICMP, goes | |
484 | through POSTROUTING (which must correct the DST manip). */ | |
485 | if (!manip_pkt(inside->ip.protocol, pskb, | |
c9bdd4b5 | 486 | ip_hdrlen(*pskb) + sizeof(inside->icmp), |
5b1158e9 JK |
487 | &ct->tuplehash[!dir].tuple, |
488 | !manip)) | |
489 | return 0; | |
490 | ||
491 | if ((*pskb)->ip_summed != CHECKSUM_PARTIAL) { | |
492 | /* Reloading "inside" here since manip_pkt inner. */ | |
c9bdd4b5 | 493 | inside = (void *)(*pskb)->data + ip_hdrlen(*pskb); |
5b1158e9 JK |
494 | inside->icmp.checksum = 0; |
495 | inside->icmp.checksum = | |
496 | csum_fold(skb_checksum(*pskb, hdrlen, | |
497 | (*pskb)->len - hdrlen, 0)); | |
498 | } | |
499 | ||
500 | /* Change outer to look the reply to an incoming packet | |
501 | * (proto 0 means don't invert per-proto part). */ | |
502 | if (manip == IP_NAT_MANIP_SRC) | |
503 | statusbit = IPS_SRC_NAT; | |
504 | else | |
505 | statusbit = IPS_DST_NAT; | |
506 | ||
507 | /* Invert if this is reply dir. */ | |
508 | if (dir == IP_CT_DIR_REPLY) | |
509 | statusbit ^= IPS_NAT_MASK; | |
510 | ||
511 | if (ct->status & statusbit) { | |
512 | nf_ct_invert_tuplepr(&target, &ct->tuplehash[!dir].tuple); | |
513 | if (!manip_pkt(0, pskb, 0, &target, manip)) | |
514 | return 0; | |
515 | } | |
516 | ||
517 | return 1; | |
518 | } | |
519 | EXPORT_SYMBOL_GPL(nf_nat_icmp_reply_translation); | |
520 | ||
521 | /* Protocol registration. */ | |
522 | int nf_nat_protocol_register(struct nf_nat_protocol *proto) | |
523 | { | |
524 | int ret = 0; | |
525 | ||
526 | write_lock_bh(&nf_nat_lock); | |
527 | if (nf_nat_protos[proto->protonum] != &nf_nat_unknown_protocol) { | |
528 | ret = -EBUSY; | |
529 | goto out; | |
530 | } | |
e22a0548 | 531 | rcu_assign_pointer(nf_nat_protos[proto->protonum], proto); |
5b1158e9 JK |
532 | out: |
533 | write_unlock_bh(&nf_nat_lock); | |
534 | return ret; | |
535 | } | |
536 | EXPORT_SYMBOL(nf_nat_protocol_register); | |
537 | ||
538 | /* Noone stores the protocol anywhere; simply delete it. */ | |
539 | void nf_nat_protocol_unregister(struct nf_nat_protocol *proto) | |
540 | { | |
541 | write_lock_bh(&nf_nat_lock); | |
e22a0548 PM |
542 | rcu_assign_pointer(nf_nat_protos[proto->protonum], |
543 | &nf_nat_unknown_protocol); | |
5b1158e9 | 544 | write_unlock_bh(&nf_nat_lock); |
e22a0548 | 545 | synchronize_rcu(); |
5b1158e9 JK |
546 | } |
547 | EXPORT_SYMBOL(nf_nat_protocol_unregister); | |
548 | ||
e281db5c | 549 | #if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE) |
5b1158e9 JK |
550 | int |
551 | nf_nat_port_range_to_nfattr(struct sk_buff *skb, | |
552 | const struct nf_nat_range *range) | |
553 | { | |
554 | NFA_PUT(skb, CTA_PROTONAT_PORT_MIN, sizeof(__be16), | |
555 | &range->min.tcp.port); | |
556 | NFA_PUT(skb, CTA_PROTONAT_PORT_MAX, sizeof(__be16), | |
557 | &range->max.tcp.port); | |
558 | ||
559 | return 0; | |
560 | ||
561 | nfattr_failure: | |
562 | return -1; | |
563 | } | |
564 | EXPORT_SYMBOL_GPL(nf_nat_port_nfattr_to_range); | |
565 | ||
566 | int | |
567 | nf_nat_port_nfattr_to_range(struct nfattr *tb[], struct nf_nat_range *range) | |
568 | { | |
569 | int ret = 0; | |
570 | ||
571 | /* we have to return whether we actually parsed something or not */ | |
572 | ||
573 | if (tb[CTA_PROTONAT_PORT_MIN-1]) { | |
574 | ret = 1; | |
575 | range->min.tcp.port = | |
576 | *(__be16 *)NFA_DATA(tb[CTA_PROTONAT_PORT_MIN-1]); | |
577 | } | |
578 | ||
579 | if (!tb[CTA_PROTONAT_PORT_MAX-1]) { | |
580 | if (ret) | |
581 | range->max.tcp.port = range->min.tcp.port; | |
582 | } else { | |
583 | ret = 1; | |
584 | range->max.tcp.port = | |
585 | *(__be16 *)NFA_DATA(tb[CTA_PROTONAT_PORT_MAX-1]); | |
586 | } | |
587 | ||
588 | return ret; | |
589 | } | |
590 | EXPORT_SYMBOL_GPL(nf_nat_port_range_to_nfattr); | |
591 | #endif | |
592 | ||
d8a0509a YK |
593 | /* Noone using conntrack by the time this called. */ |
594 | static void nf_nat_cleanup_conntrack(struct nf_conn *ct) | |
595 | { | |
596 | struct nf_conn_nat *nat = nf_ct_ext_find(ct, NF_CT_EXT_NAT); | |
597 | ||
b6b84d4a | 598 | if (nat == NULL || nat->ct == NULL) |
d8a0509a YK |
599 | return; |
600 | ||
b6b84d4a | 601 | NF_CT_ASSERT(nat->ct->status & IPS_NAT_DONE_MASK); |
d8a0509a YK |
602 | |
603 | write_lock_bh(&nf_nat_lock); | |
53aba597 | 604 | hlist_del(&nat->bysource); |
b6b84d4a | 605 | nat->ct = NULL; |
d8a0509a YK |
606 | write_unlock_bh(&nf_nat_lock); |
607 | } | |
608 | ||
2d59e5ca YK |
609 | static void nf_nat_move_storage(struct nf_conn *conntrack, void *old) |
610 | { | |
611 | struct nf_conn_nat *new_nat = nf_ct_ext_find(conntrack, NF_CT_EXT_NAT); | |
612 | struct nf_conn_nat *old_nat = (struct nf_conn_nat *)old; | |
b6b84d4a | 613 | struct nf_conn *ct = old_nat->ct; |
2d59e5ca YK |
614 | unsigned int srchash; |
615 | ||
616 | if (!(ct->status & IPS_NAT_DONE_MASK)) | |
617 | return; | |
618 | ||
619 | srchash = hash_by_src(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple); | |
620 | ||
621 | write_lock_bh(&nf_nat_lock); | |
53aba597 | 622 | hlist_replace_rcu(&old_nat->bysource, &new_nat->bysource); |
b6b84d4a | 623 | new_nat->ct = ct; |
2d59e5ca YK |
624 | write_unlock_bh(&nf_nat_lock); |
625 | } | |
626 | ||
61eb3107 | 627 | static struct nf_ct_ext_type nat_extend __read_mostly = { |
d8a0509a YK |
628 | .len = sizeof(struct nf_conn_nat), |
629 | .align = __alignof__(struct nf_conn_nat), | |
630 | .destroy = nf_nat_cleanup_conntrack, | |
631 | .move = nf_nat_move_storage, | |
632 | .id = NF_CT_EXT_NAT, | |
633 | .flags = NF_CT_EXT_F_PREALLOC, | |
2d59e5ca YK |
634 | }; |
635 | ||
5b1158e9 JK |
636 | static int __init nf_nat_init(void) |
637 | { | |
638 | size_t i; | |
2d59e5ca YK |
639 | int ret; |
640 | ||
641 | ret = nf_ct_extend_register(&nat_extend); | |
642 | if (ret < 0) { | |
643 | printk(KERN_ERR "nf_nat_core: Unable to register extension\n"); | |
644 | return ret; | |
645 | } | |
5b1158e9 JK |
646 | |
647 | /* Leave them the same for the moment. */ | |
648 | nf_nat_htable_size = nf_conntrack_htable_size; | |
649 | ||
53aba597 PM |
650 | bysource = nf_ct_alloc_hashtable(&nf_nat_htable_size, |
651 | &nf_nat_vmalloced); | |
2d59e5ca YK |
652 | if (!bysource) { |
653 | ret = -ENOMEM; | |
654 | goto cleanup_extend; | |
655 | } | |
5b1158e9 JK |
656 | |
657 | /* Sew in builtin protocols. */ | |
658 | write_lock_bh(&nf_nat_lock); | |
659 | for (i = 0; i < MAX_IP_NAT_PROTO; i++) | |
e22a0548 PM |
660 | rcu_assign_pointer(nf_nat_protos[i], &nf_nat_unknown_protocol); |
661 | rcu_assign_pointer(nf_nat_protos[IPPROTO_TCP], &nf_nat_protocol_tcp); | |
662 | rcu_assign_pointer(nf_nat_protos[IPPROTO_UDP], &nf_nat_protocol_udp); | |
663 | rcu_assign_pointer(nf_nat_protos[IPPROTO_ICMP], &nf_nat_protocol_icmp); | |
5b1158e9 JK |
664 | write_unlock_bh(&nf_nat_lock); |
665 | ||
666 | for (i = 0; i < nf_nat_htable_size; i++) { | |
53aba597 | 667 | INIT_HLIST_HEAD(&bysource[i]); |
5b1158e9 JK |
668 | } |
669 | ||
5b1158e9 JK |
670 | /* Initialize fake conntrack so that NAT will skip it */ |
671 | nf_conntrack_untracked.status |= IPS_NAT_DONE_MASK; | |
672 | ||
673 | l3proto = nf_ct_l3proto_find_get((u_int16_t)AF_INET); | |
674 | return 0; | |
2d59e5ca YK |
675 | |
676 | cleanup_extend: | |
677 | nf_ct_extend_unregister(&nat_extend); | |
678 | return ret; | |
5b1158e9 JK |
679 | } |
680 | ||
681 | /* Clear NAT section of all conntracks, in case we're loaded again. */ | |
682 | static int clean_nat(struct nf_conn *i, void *data) | |
683 | { | |
684 | struct nf_conn_nat *nat = nfct_nat(i); | |
685 | ||
686 | if (!nat) | |
687 | return 0; | |
688 | memset(nat, 0, sizeof(nat)); | |
689 | i->status &= ~(IPS_NAT_MASK | IPS_NAT_DONE_MASK | IPS_SEQ_ADJUST); | |
690 | return 0; | |
691 | } | |
692 | ||
693 | static void __exit nf_nat_cleanup(void) | |
694 | { | |
695 | nf_ct_iterate_cleanup(&clean_nat, NULL); | |
982d9a9c | 696 | synchronize_rcu(); |
53aba597 | 697 | nf_ct_free_hashtable(bysource, nf_nat_vmalloced, nf_nat_htable_size); |
5b1158e9 | 698 | nf_ct_l3proto_put(l3proto); |
2d59e5ca | 699 | nf_ct_extend_unregister(&nat_extend); |
5b1158e9 JK |
700 | } |
701 | ||
702 | MODULE_LICENSE("GPL"); | |
703 | ||
704 | module_init(nf_nat_init); | |
705 | module_exit(nf_nat_cleanup); |