netfilter: conntrack: make netns address part of nat bysrc hash
[deliverable/linux.git] / net / netfilter / nf_nat_core.c
CommitLineData
c7232c99
PM
1/*
2 * (C) 1999-2001 Paul `Rusty' Russell
5b1158e9 3 * (C) 2002-2006 Netfilter Core Team <coreteam@netfilter.org>
c7232c99 4 * (C) 2011 Patrick McHardy <kaber@trash.net>
5b1158e9
JK
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10
11#include <linux/module.h>
12#include <linux/types.h>
13#include <linux/timer.h>
14#include <linux/skbuff.h>
5a0e3ad6 15#include <linux/gfp.h>
c7232c99 16#include <net/xfrm.h>
5b1158e9 17#include <linux/jhash.h>
c7232c99 18#include <linux/rtnetlink.h>
5b1158e9 19
5b1158e9
JK
20#include <net/netfilter/nf_conntrack.h>
21#include <net/netfilter/nf_conntrack_core.h>
22#include <net/netfilter/nf_nat.h>
c7232c99
PM
23#include <net/netfilter/nf_nat_l3proto.h>
24#include <net/netfilter/nf_nat_l4proto.h>
5b1158e9
JK
25#include <net/netfilter/nf_nat_core.h>
26#include <net/netfilter/nf_nat_helper.h>
27#include <net/netfilter/nf_conntrack_helper.h>
41d73ec0 28#include <net/netfilter/nf_conntrack_seqadj.h>
5b1158e9 29#include <net/netfilter/nf_conntrack_l3proto.h>
5d0aa2cc 30#include <net/netfilter/nf_conntrack_zones.h>
c7232c99 31#include <linux/netfilter/nf_nat.h>
5b1158e9 32
02502f62 33static DEFINE_SPINLOCK(nf_nat_lock);
5b1158e9 34
c7232c99
PM
35static DEFINE_MUTEX(nf_nat_proto_mutex);
36static const struct nf_nat_l3proto __rcu *nf_nat_l3protos[NFPROTO_NUMPROTO]
37 __read_mostly;
38static const struct nf_nat_l4proto __rcu **nf_nat_l4protos[NFPROTO_NUMPROTO]
ce4b1ceb 39 __read_mostly;
7001c6d1 40static unsigned int nf_nat_hash_rnd __read_mostly;
c7232c99
PM
41
42inline const struct nf_nat_l3proto *
43__nf_nat_l3proto_find(u8 family)
5b1158e9 44{
c7232c99 45 return rcu_dereference(nf_nat_l3protos[family]);
5b1158e9
JK
46}
47
c7232c99
PM
48inline const struct nf_nat_l4proto *
49__nf_nat_l4proto_find(u8 family, u8 protonum)
50{
51 return rcu_dereference(nf_nat_l4protos[family][protonum]);
52}
53EXPORT_SYMBOL_GPL(__nf_nat_l4proto_find);
54
55#ifdef CONFIG_XFRM
56static void __nf_nat_decode_session(struct sk_buff *skb, struct flowi *fl)
57{
58 const struct nf_nat_l3proto *l3proto;
59 const struct nf_conn *ct;
60 enum ip_conntrack_info ctinfo;
61 enum ip_conntrack_dir dir;
62 unsigned long statusbit;
63 u8 family;
64
65 ct = nf_ct_get(skb, &ctinfo);
66 if (ct == NULL)
67 return;
68
69 family = ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.l3num;
70 rcu_read_lock();
71 l3proto = __nf_nat_l3proto_find(family);
72 if (l3proto == NULL)
73 goto out;
74
75 dir = CTINFO2DIR(ctinfo);
76 if (dir == IP_CT_DIR_ORIGINAL)
77 statusbit = IPS_DST_NAT;
78 else
79 statusbit = IPS_SRC_NAT;
80
81 l3proto->decode_session(skb, ct, dir, statusbit, fl);
82out:
83 rcu_read_unlock();
84}
85
c7af6483 86int nf_xfrm_me_harder(struct net *net, struct sk_buff *skb, unsigned int family)
c7232c99
PM
87{
88 struct flowi fl;
89 unsigned int hh_len;
90 struct dst_entry *dst;
aaa795ad 91 int err;
c7232c99 92
aaa795ad 93 err = xfrm_decode_session(skb, &fl, family);
e7e6f630 94 if (err < 0)
aaa795ad 95 return err;
c7232c99
PM
96
97 dst = skb_dst(skb);
98 if (dst->xfrm)
99 dst = ((struct xfrm_dst *)dst)->route;
100 dst_hold(dst);
101
c7af6483 102 dst = xfrm_lookup(net, dst, &fl, skb->sk, 0);
c7232c99 103 if (IS_ERR(dst))
aaa795ad 104 return PTR_ERR(dst);
c7232c99
PM
105
106 skb_dst_drop(skb);
107 skb_dst_set(skb, dst);
108
109 /* Change in oif may mean change in hh_len. */
110 hh_len = skb_dst(skb)->dev->hard_header_len;
111 if (skb_headroom(skb) < hh_len &&
112 pskb_expand_head(skb, hh_len - skb_headroom(skb), 0, GFP_ATOMIC))
aaa795ad 113 return -ENOMEM;
c7232c99
PM
114 return 0;
115}
116EXPORT_SYMBOL(nf_xfrm_me_harder);
117#endif /* CONFIG_XFRM */
118
5b1158e9
JK
119/* We keep an extra hash for each conntrack, for fast searching. */
120static inline unsigned int
464c3855 121hash_by_src(const struct net *n, const struct nf_conntrack_tuple *tuple)
5b1158e9 122{
34498825
PM
123 unsigned int hash;
124
7001c6d1
FW
125 get_random_once(&nf_nat_hash_rnd, sizeof(nf_nat_hash_rnd));
126
5b1158e9 127 /* Original src, to ensure we map it consistently if poss. */
c7232c99 128 hash = jhash2((u32 *)&tuple->src, sizeof(tuple->src) / sizeof(u32),
464c3855 129 tuple->dst.protonum ^ nf_nat_hash_rnd ^ net_hash_mix(n));
8fc54f68 130
464c3855 131 return reciprocal_scale(hash, n->ct.nat_htable_size);
5b1158e9
JK
132}
133
5b1158e9
JK
134/* Is this tuple already taken? (not by us) */
135int
136nf_nat_used_tuple(const struct nf_conntrack_tuple *tuple,
137 const struct nf_conn *ignored_conntrack)
138{
139 /* Conntrack tracking doesn't keep track of outgoing tuples; only
c7232c99
PM
140 * incoming ones. NAT means they don't have a fixed mapping,
141 * so we invert the tuple and look for the incoming reply.
142 *
143 * We could keep a separate hash if this proves too slow.
144 */
5b1158e9
JK
145 struct nf_conntrack_tuple reply;
146
147 nf_ct_invert_tuplepr(&reply, tuple);
148 return nf_conntrack_tuple_taken(&reply, ignored_conntrack);
149}
150EXPORT_SYMBOL(nf_nat_used_tuple);
151
152/* If we source map this tuple so reply looks like reply_tuple, will
c7232c99
PM
153 * that meet the constraints of range.
154 */
155static int in_range(const struct nf_nat_l3proto *l3proto,
156 const struct nf_nat_l4proto *l4proto,
157 const struct nf_conntrack_tuple *tuple,
158 const struct nf_nat_range *range)
5b1158e9 159{
5b1158e9 160 /* If we are supposed to map IPs, then we must be in the
c7232c99
PM
161 * range specified, otherwise let this drag us onto a new src IP.
162 */
163 if (range->flags & NF_NAT_RANGE_MAP_IPS &&
164 !l3proto->in_range(tuple, range))
165 return 0;
5b1158e9 166
cbc9f2f4 167 if (!(range->flags & NF_NAT_RANGE_PROTO_SPECIFIED) ||
c7232c99
PM
168 l4proto->in_range(tuple, NF_NAT_MANIP_SRC,
169 &range->min_proto, &range->max_proto))
170 return 1;
5b1158e9 171
c7232c99 172 return 0;
5b1158e9
JK
173}
174
175static inline int
176same_src(const struct nf_conn *ct,
177 const struct nf_conntrack_tuple *tuple)
178{
179 const struct nf_conntrack_tuple *t;
180
181 t = &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple;
182 return (t->dst.protonum == tuple->dst.protonum &&
c7232c99 183 nf_inet_addr_cmp(&t->src.u3, &tuple->src.u3) &&
5b1158e9
JK
184 t->src.u.all == tuple->src.u.all);
185}
186
187/* Only called for SRC manip */
188static int
308ac914
DB
189find_appropriate_src(struct net *net,
190 const struct nf_conntrack_zone *zone,
c7232c99
PM
191 const struct nf_nat_l3proto *l3proto,
192 const struct nf_nat_l4proto *l4proto,
0c4c9288 193 const struct nf_conntrack_tuple *tuple,
5b1158e9 194 struct nf_conntrack_tuple *result,
c7232c99 195 const struct nf_nat_range *range)
5b1158e9 196{
deedb590 197 unsigned int h = hash_by_src(net, tuple);
72b72949
JE
198 const struct nf_conn_nat *nat;
199 const struct nf_conn *ct;
5b1158e9 200
b67bfe0d 201 hlist_for_each_entry_rcu(nat, &net->ct.nat_bysource[h], bysource) {
b6b84d4a 202 ct = nat->ct;
deedb590
DB
203 if (same_src(ct, tuple) &&
204 nf_ct_zone_equal(ct, zone, IP_CT_DIR_ORIGINAL)) {
5b1158e9
JK
205 /* Copy source part from reply tuple. */
206 nf_ct_invert_tuplepr(result,
207 &ct->tuplehash[IP_CT_DIR_REPLY].tuple);
208 result->dst = tuple->dst;
209
136251d0 210 if (in_range(l3proto, l4proto, result, range))
5b1158e9 211 return 1;
5b1158e9
JK
212 }
213 }
5b1158e9
JK
214 return 0;
215}
216
217/* For [FUTURE] fragmentation handling, we want the least-used
c7232c99
PM
218 * src-ip/dst-ip/proto triple. Fairness doesn't come into it. Thus
219 * if the range specifies 1.2.3.4 ports 10000-10005 and 1.2.3.5 ports
220 * 1-65535, we don't do pro-rata allocation based on ports; we choose
221 * the ip with the lowest src-ip/dst-ip/proto usage.
222 */
5b1158e9 223static void
308ac914
DB
224find_best_ips_proto(const struct nf_conntrack_zone *zone,
225 struct nf_conntrack_tuple *tuple,
c7232c99 226 const struct nf_nat_range *range,
5b1158e9
JK
227 const struct nf_conn *ct,
228 enum nf_nat_manip_type maniptype)
229{
c7232c99
PM
230 union nf_inet_addr *var_ipp;
231 unsigned int i, max;
5b1158e9 232 /* Host order */
c7232c99
PM
233 u32 minip, maxip, j, dist;
234 bool full_range;
5b1158e9
JK
235
236 /* No IP mapping? Do nothing. */
cbc9f2f4 237 if (!(range->flags & NF_NAT_RANGE_MAP_IPS))
5b1158e9
JK
238 return;
239
cbc9f2f4 240 if (maniptype == NF_NAT_MANIP_SRC)
c7232c99 241 var_ipp = &tuple->src.u3;
5b1158e9 242 else
c7232c99 243 var_ipp = &tuple->dst.u3;
5b1158e9
JK
244
245 /* Fast path: only one choice. */
c7232c99
PM
246 if (nf_inet_addr_cmp(&range->min_addr, &range->max_addr)) {
247 *var_ipp = range->min_addr;
5b1158e9
JK
248 return;
249 }
250
c7232c99
PM
251 if (nf_ct_l3num(ct) == NFPROTO_IPV4)
252 max = sizeof(var_ipp->ip) / sizeof(u32) - 1;
253 else
254 max = sizeof(var_ipp->ip6) / sizeof(u32) - 1;
255
5b1158e9
JK
256 /* Hashing source and destination IPs gives a fairly even
257 * spread in practice (if there are a small number of IPs
258 * involved, there usually aren't that many connections
259 * anyway). The consistency means that servers see the same
260 * client coming from the same IP (some Internet Banking sites
c7232c99
PM
261 * like this), even across reboots.
262 */
5693d68d 263 j = jhash2((u32 *)&tuple->src.u3, sizeof(tuple->src.u3) / sizeof(u32),
c7232c99 264 range->flags & NF_NAT_RANGE_PERSISTENT ?
308ac914 265 0 : (__force u32)tuple->dst.u3.all[max] ^ zone->id);
c7232c99
PM
266
267 full_range = false;
268 for (i = 0; i <= max; i++) {
269 /* If first bytes of the address are at the maximum, use the
270 * distance. Otherwise use the full range.
271 */
272 if (!full_range) {
273 minip = ntohl((__force __be32)range->min_addr.all[i]);
274 maxip = ntohl((__force __be32)range->max_addr.all[i]);
275 dist = maxip - minip + 1;
276 } else {
277 minip = 0;
278 dist = ~0;
279 }
280
281 var_ipp->all[i] = (__force __u32)
8fc54f68 282 htonl(minip + reciprocal_scale(j, dist));
c7232c99
PM
283 if (var_ipp->all[i] != range->max_addr.all[i])
284 full_range = true;
285
286 if (!(range->flags & NF_NAT_RANGE_PERSISTENT))
287 j ^= (__force u32)tuple->dst.u3.all[i];
288 }
5b1158e9
JK
289}
290
c7232c99
PM
291/* Manipulate the tuple into the range given. For NF_INET_POST_ROUTING,
292 * we change the source to map into the range. For NF_INET_PRE_ROUTING
6e23ae2a 293 * and NF_INET_LOCAL_OUT, we change the destination to map into the
c7232c99 294 * range. It might not be possible to get a unique tuple, but we try.
5b1158e9
JK
295 * At worst (or if we race), we will end up with a final duplicate in
296 * __ip_conntrack_confirm and drop the packet. */
297static void
298get_unique_tuple(struct nf_conntrack_tuple *tuple,
299 const struct nf_conntrack_tuple *orig_tuple,
c7232c99 300 const struct nf_nat_range *range,
5b1158e9
JK
301 struct nf_conn *ct,
302 enum nf_nat_manip_type maniptype)
303{
308ac914 304 const struct nf_conntrack_zone *zone;
c7232c99
PM
305 const struct nf_nat_l3proto *l3proto;
306 const struct nf_nat_l4proto *l4proto;
0c4c9288 307 struct net *net = nf_ct_net(ct);
308ac914
DB
308
309 zone = nf_ct_zone(ct);
5b1158e9 310
c7232c99
PM
311 rcu_read_lock();
312 l3proto = __nf_nat_l3proto_find(orig_tuple->src.l3num);
313 l4proto = __nf_nat_l4proto_find(orig_tuple->src.l3num,
314 orig_tuple->dst.protonum);
5b1158e9 315
c7232c99
PM
316 /* 1) If this srcip/proto/src-proto-part is currently mapped,
317 * and that same mapping gives a unique tuple within the given
318 * range, use that.
319 *
320 * This is only required for source (ie. NAT/masq) mappings.
321 * So far, we don't do local source mappings, so multiple
322 * manips not an issue.
323 */
cbc9f2f4 324 if (maniptype == NF_NAT_MANIP_SRC &&
34ce3240 325 !(range->flags & NF_NAT_RANGE_PROTO_RANDOM_ALL)) {
41a7cab6 326 /* try the original tuple first */
c7232c99 327 if (in_range(l3proto, l4proto, orig_tuple, range)) {
41a7cab6
CG
328 if (!nf_nat_used_tuple(orig_tuple, ct)) {
329 *tuple = *orig_tuple;
c7232c99 330 goto out;
41a7cab6 331 }
c7232c99
PM
332 } else if (find_appropriate_src(net, zone, l3proto, l4proto,
333 orig_tuple, tuple, range)) {
0d53778e 334 pr_debug("get_unique_tuple: Found current src map\n");
0dbff689 335 if (!nf_nat_used_tuple(tuple, ct))
c7232c99 336 goto out;
5b1158e9
JK
337 }
338 }
339
c7232c99 340 /* 2) Select the least-used IP/proto combination in the given range */
5b1158e9 341 *tuple = *orig_tuple;
5d0aa2cc 342 find_best_ips_proto(zone, tuple, range, ct, maniptype);
5b1158e9
JK
343
344 /* 3) The per-protocol part of the manip is made to map into
c7232c99
PM
345 * the range to make a unique tuple.
346 */
5b1158e9
JK
347
348 /* Only bother mapping if it's not already in range and unique */
34ce3240 349 if (!(range->flags & NF_NAT_RANGE_PROTO_RANDOM_ALL)) {
cbc9f2f4 350 if (range->flags & NF_NAT_RANGE_PROTO_SPECIFIED) {
c7232c99
PM
351 if (l4proto->in_range(tuple, maniptype,
352 &range->min_proto,
353 &range->max_proto) &&
354 (range->min_proto.all == range->max_proto.all ||
99ad3c53
CG
355 !nf_nat_used_tuple(tuple, ct)))
356 goto out;
357 } else if (!nf_nat_used_tuple(tuple, ct)) {
358 goto out;
359 }
360 }
5b1158e9
JK
361
362 /* Last change: get protocol to try to obtain unique tuple. */
c7232c99 363 l4proto->unique_tuple(l3proto, tuple, range, maniptype, ct);
e22a0548
PM
364out:
365 rcu_read_unlock();
5b1158e9
JK
366}
367
f768e5bd
FW
368struct nf_conn_nat *nf_ct_nat_ext_add(struct nf_conn *ct)
369{
370 struct nf_conn_nat *nat = nfct_nat(ct);
371 if (nat)
372 return nat;
373
374 if (!nf_ct_is_confirmed(ct))
375 nat = nf_ct_ext_add(ct, NF_CT_EXT_NAT, GFP_ATOMIC);
376
377 return nat;
378}
379EXPORT_SYMBOL_GPL(nf_ct_nat_ext_add);
380
5b1158e9
JK
381unsigned int
382nf_nat_setup_info(struct nf_conn *ct,
c7232c99 383 const struct nf_nat_range *range,
cc01dcbd 384 enum nf_nat_manip_type maniptype)
5b1158e9 385{
0c4c9288 386 struct net *net = nf_ct_net(ct);
5b1158e9 387 struct nf_conntrack_tuple curr_tuple, new_tuple;
2d59e5ca 388 struct nf_conn_nat *nat;
5b1158e9 389
2d59e5ca 390 /* nat helper or nfctnetlink also setup binding */
f768e5bd
FW
391 nat = nf_ct_nat_ext_add(ct);
392 if (nat == NULL)
393 return NF_ACCEPT;
2d59e5ca 394
cbc9f2f4
PM
395 NF_CT_ASSERT(maniptype == NF_NAT_MANIP_SRC ||
396 maniptype == NF_NAT_MANIP_DST);
5b1158e9
JK
397 BUG_ON(nf_nat_initialized(ct, maniptype));
398
399 /* What we've got will look like inverse of reply. Normally
c7232c99
PM
400 * this is what is in the conntrack, except for prior
401 * manipulations (future optimization: if num_manips == 0,
402 * orig_tp = ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple)
403 */
5b1158e9
JK
404 nf_ct_invert_tuplepr(&curr_tuple,
405 &ct->tuplehash[IP_CT_DIR_REPLY].tuple);
406
407 get_unique_tuple(&new_tuple, &curr_tuple, range, ct, maniptype);
408
409 if (!nf_ct_tuple_equal(&new_tuple, &curr_tuple)) {
410 struct nf_conntrack_tuple reply;
411
412 /* Alter conntrack table so will recognize replies. */
413 nf_ct_invert_tuplepr(&reply, &new_tuple);
414 nf_conntrack_alter_reply(ct, &reply);
415
416 /* Non-atomic: we own this at the moment. */
cbc9f2f4 417 if (maniptype == NF_NAT_MANIP_SRC)
5b1158e9
JK
418 ct->status |= IPS_SRC_NAT;
419 else
420 ct->status |= IPS_DST_NAT;
41d73ec0
PM
421
422 if (nfct_help(ct))
423 nfct_seqadj_ext_add(ct);
5b1158e9
JK
424 }
425
cbc9f2f4 426 if (maniptype == NF_NAT_MANIP_SRC) {
5b1158e9
JK
427 unsigned int srchash;
428
deedb590 429 srchash = hash_by_src(net,
5d0aa2cc 430 &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple);
02502f62 431 spin_lock_bh(&nf_nat_lock);
c7232c99 432 /* nf_conntrack_alter_reply might re-allocate extension aera */
b6b84d4a
YK
433 nat = nfct_nat(ct);
434 nat->ct = ct;
0c4c9288 435 hlist_add_head_rcu(&nat->bysource,
c7232c99 436 &net->ct.nat_bysource[srchash]);
02502f62 437 spin_unlock_bh(&nf_nat_lock);
5b1158e9
JK
438 }
439
440 /* It's done. */
cbc9f2f4 441 if (maniptype == NF_NAT_MANIP_DST)
a7c2f4d7 442 ct->status |= IPS_DST_NAT_DONE;
5b1158e9 443 else
a7c2f4d7 444 ct->status |= IPS_SRC_NAT_DONE;
5b1158e9
JK
445
446 return NF_ACCEPT;
447}
448EXPORT_SYMBOL(nf_nat_setup_info);
449
0eba801b
PNA
450static unsigned int
451__nf_nat_alloc_null_binding(struct nf_conn *ct, enum nf_nat_manip_type manip)
f59cb045
PNA
452{
453 /* Force range to this IP; let proto decide mapping for
454 * per-proto parts (hence not IP_NAT_RANGE_PROTO_SPECIFIED).
455 * Use reply in case it's already been mangled (eg local packet).
456 */
457 union nf_inet_addr ip =
0eba801b 458 (manip == NF_NAT_MANIP_SRC ?
f59cb045
PNA
459 ct->tuplehash[IP_CT_DIR_REPLY].tuple.dst.u3 :
460 ct->tuplehash[IP_CT_DIR_REPLY].tuple.src.u3);
461 struct nf_nat_range range = {
462 .flags = NF_NAT_RANGE_MAP_IPS,
463 .min_addr = ip,
464 .max_addr = ip,
465 };
0eba801b
PNA
466 return nf_nat_setup_info(ct, &range, manip);
467}
468
469unsigned int
470nf_nat_alloc_null_binding(struct nf_conn *ct, unsigned int hooknum)
471{
472 return __nf_nat_alloc_null_binding(ct, HOOK2MANIP(hooknum));
f59cb045
PNA
473}
474EXPORT_SYMBOL_GPL(nf_nat_alloc_null_binding);
475
5b1158e9
JK
476/* Do packet manipulations according to nf_nat_setup_info. */
477unsigned int nf_nat_packet(struct nf_conn *ct,
478 enum ip_conntrack_info ctinfo,
479 unsigned int hooknum,
3db05fea 480 struct sk_buff *skb)
5b1158e9 481{
c7232c99
PM
482 const struct nf_nat_l3proto *l3proto;
483 const struct nf_nat_l4proto *l4proto;
5b1158e9
JK
484 enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo);
485 unsigned long statusbit;
486 enum nf_nat_manip_type mtype = HOOK2MANIP(hooknum);
487
cbc9f2f4 488 if (mtype == NF_NAT_MANIP_SRC)
5b1158e9
JK
489 statusbit = IPS_SRC_NAT;
490 else
491 statusbit = IPS_DST_NAT;
492
493 /* Invert if this is reply dir. */
494 if (dir == IP_CT_DIR_REPLY)
495 statusbit ^= IPS_NAT_MASK;
496
497 /* Non-atomic: these bits don't change. */
498 if (ct->status & statusbit) {
499 struct nf_conntrack_tuple target;
500
501 /* We are aiming to look like inverse of other direction. */
502 nf_ct_invert_tuplepr(&target, &ct->tuplehash[!dir].tuple);
503
c7232c99
PM
504 l3proto = __nf_nat_l3proto_find(target.src.l3num);
505 l4proto = __nf_nat_l4proto_find(target.src.l3num,
506 target.dst.protonum);
507 if (!l3proto->manip_pkt(skb, 0, l4proto, &target, mtype))
5b1158e9
JK
508 return NF_DROP;
509 }
510 return NF_ACCEPT;
511}
512EXPORT_SYMBOL_GPL(nf_nat_packet);
513
c7232c99
PM
514struct nf_nat_proto_clean {
515 u8 l3proto;
516 u8 l4proto;
c7232c99
PM
517};
518
c2d421e1
FW
519/* kill conntracks with affected NAT section */
520static int nf_nat_proto_remove(struct nf_conn *i, void *data)
5b1158e9 521{
c7232c99
PM
522 const struct nf_nat_proto_clean *clean = data;
523 struct nf_conn_nat *nat = nfct_nat(i);
5b1158e9 524
c7232c99 525 if (!nat)
5b1158e9 526 return 0;
c2d421e1 527
c7232c99
PM
528 if ((clean->l3proto && nf_ct_l3num(i) != clean->l3proto) ||
529 (clean->l4proto && nf_ct_protonum(i) != clean->l4proto))
5b1158e9
JK
530 return 0;
531
c2d421e1 532 return i->status & IPS_NAT_MASK ? 1 : 0;
c7232c99 533}
5b1158e9 534
945b2b2d
FW
535static int nf_nat_proto_clean(struct nf_conn *ct, void *data)
536{
537 struct nf_conn_nat *nat = nfct_nat(ct);
538
539 if (nf_nat_proto_remove(ct, data))
540 return 1;
541
542 if (!nat || !nat->ct)
543 return 0;
544
545 /* This netns is being destroyed, and conntrack has nat null binding.
546 * Remove it from bysource hash, as the table will be freed soon.
547 *
548 * Else, when the conntrack is destoyed, nf_nat_cleanup_conntrack()
549 * will delete entry from already-freed table.
550 */
551 if (!del_timer(&ct->timeout))
552 return 1;
553
554 spin_lock_bh(&nf_nat_lock);
555 hlist_del_rcu(&nat->bysource);
556 ct->status &= ~IPS_NAT_DONE_MASK;
557 nat->ct = NULL;
558 spin_unlock_bh(&nf_nat_lock);
559
560 add_timer(&ct->timeout);
561
562 /* don't delete conntrack. Although that would make things a lot
563 * simpler, we'd end up flushing all conntracks on nat rmmod.
564 */
565 return 0;
566}
567
c7232c99
PM
568static void nf_nat_l4proto_clean(u8 l3proto, u8 l4proto)
569{
570 struct nf_nat_proto_clean clean = {
571 .l3proto = l3proto,
572 .l4proto = l4proto,
573 };
574 struct net *net;
575
576 rtnl_lock();
c7232c99 577 for_each_net(net)
c655bc68 578 nf_ct_iterate_cleanup(net, nf_nat_proto_remove, &clean, 0, 0);
c7232c99
PM
579 rtnl_unlock();
580}
5b1158e9 581
c7232c99
PM
582static void nf_nat_l3proto_clean(u8 l3proto)
583{
584 struct nf_nat_proto_clean clean = {
585 .l3proto = l3proto,
586 };
587 struct net *net;
588
589 rtnl_lock();
5b1158e9 590
c7232c99 591 for_each_net(net)
c655bc68 592 nf_ct_iterate_cleanup(net, nf_nat_proto_remove, &clean, 0, 0);
c7232c99 593 rtnl_unlock();
5b1158e9 594}
5b1158e9
JK
595
596/* Protocol registration. */
c7232c99 597int nf_nat_l4proto_register(u8 l3proto, const struct nf_nat_l4proto *l4proto)
5b1158e9 598{
c7232c99
PM
599 const struct nf_nat_l4proto **l4protos;
600 unsigned int i;
5b1158e9
JK
601 int ret = 0;
602
c7232c99
PM
603 mutex_lock(&nf_nat_proto_mutex);
604 if (nf_nat_l4protos[l3proto] == NULL) {
605 l4protos = kmalloc(IPPROTO_MAX * sizeof(struct nf_nat_l4proto *),
606 GFP_KERNEL);
607 if (l4protos == NULL) {
608 ret = -ENOMEM;
609 goto out;
610 }
611
612 for (i = 0; i < IPPROTO_MAX; i++)
613 RCU_INIT_POINTER(l4protos[i], &nf_nat_l4proto_unknown);
614
615 /* Before making proto_array visible to lockless readers,
616 * we must make sure its content is committed to memory.
617 */
618 smp_wmb();
619
620 nf_nat_l4protos[l3proto] = l4protos;
621 }
622
eb733162 623 if (rcu_dereference_protected(
c7232c99
PM
624 nf_nat_l4protos[l3proto][l4proto->l4proto],
625 lockdep_is_held(&nf_nat_proto_mutex)
626 ) != &nf_nat_l4proto_unknown) {
5b1158e9
JK
627 ret = -EBUSY;
628 goto out;
629 }
c7232c99 630 RCU_INIT_POINTER(nf_nat_l4protos[l3proto][l4proto->l4proto], l4proto);
5b1158e9 631 out:
c7232c99 632 mutex_unlock(&nf_nat_proto_mutex);
5b1158e9
JK
633 return ret;
634}
c7232c99 635EXPORT_SYMBOL_GPL(nf_nat_l4proto_register);
5b1158e9 636
25985edc 637/* No one stores the protocol anywhere; simply delete it. */
c7232c99 638void nf_nat_l4proto_unregister(u8 l3proto, const struct nf_nat_l4proto *l4proto)
5b1158e9 639{
c7232c99
PM
640 mutex_lock(&nf_nat_proto_mutex);
641 RCU_INIT_POINTER(nf_nat_l4protos[l3proto][l4proto->l4proto],
642 &nf_nat_l4proto_unknown);
643 mutex_unlock(&nf_nat_proto_mutex);
e22a0548 644 synchronize_rcu();
c7232c99
PM
645
646 nf_nat_l4proto_clean(l3proto, l4proto->l4proto);
5b1158e9 647}
c7232c99
PM
648EXPORT_SYMBOL_GPL(nf_nat_l4proto_unregister);
649
650int nf_nat_l3proto_register(const struct nf_nat_l3proto *l3proto)
651{
652 int err;
653
654 err = nf_ct_l3proto_try_module_get(l3proto->l3proto);
655 if (err < 0)
656 return err;
657
658 mutex_lock(&nf_nat_proto_mutex);
659 RCU_INIT_POINTER(nf_nat_l4protos[l3proto->l3proto][IPPROTO_TCP],
660 &nf_nat_l4proto_tcp);
661 RCU_INIT_POINTER(nf_nat_l4protos[l3proto->l3proto][IPPROTO_UDP],
662 &nf_nat_l4proto_udp);
663 mutex_unlock(&nf_nat_proto_mutex);
664
665 RCU_INIT_POINTER(nf_nat_l3protos[l3proto->l3proto], l3proto);
666 return 0;
667}
668EXPORT_SYMBOL_GPL(nf_nat_l3proto_register);
669
670void nf_nat_l3proto_unregister(const struct nf_nat_l3proto *l3proto)
671{
672 mutex_lock(&nf_nat_proto_mutex);
673 RCU_INIT_POINTER(nf_nat_l3protos[l3proto->l3proto], NULL);
674 mutex_unlock(&nf_nat_proto_mutex);
675 synchronize_rcu();
676
677 nf_nat_l3proto_clean(l3proto->l3proto);
678 nf_ct_l3proto_module_put(l3proto->l3proto);
679}
680EXPORT_SYMBOL_GPL(nf_nat_l3proto_unregister);
5b1158e9 681
25985edc 682/* No one using conntrack by the time this called. */
d8a0509a
YK
683static void nf_nat_cleanup_conntrack(struct nf_conn *ct)
684{
685 struct nf_conn_nat *nat = nf_ct_ext_find(ct, NF_CT_EXT_NAT);
686
b6b84d4a 687 if (nat == NULL || nat->ct == NULL)
d8a0509a
YK
688 return;
689
41a7cab6 690 NF_CT_ASSERT(nat->ct->status & IPS_SRC_NAT_DONE);
d8a0509a 691
02502f62 692 spin_lock_bh(&nf_nat_lock);
4d354c57 693 hlist_del_rcu(&nat->bysource);
02502f62 694 spin_unlock_bh(&nf_nat_lock);
d8a0509a
YK
695}
696
86577c66 697static void nf_nat_move_storage(void *new, void *old)
2d59e5ca 698{
86577c66
PM
699 struct nf_conn_nat *new_nat = new;
700 struct nf_conn_nat *old_nat = old;
b6b84d4a 701 struct nf_conn *ct = old_nat->ct;
2d59e5ca 702
41a7cab6 703 if (!ct || !(ct->status & IPS_SRC_NAT_DONE))
2d59e5ca
YK
704 return;
705
02502f62 706 spin_lock_bh(&nf_nat_lock);
68b80f11 707 hlist_replace_rcu(&old_nat->bysource, &new_nat->bysource);
02502f62 708 spin_unlock_bh(&nf_nat_lock);
2d59e5ca
YK
709}
710
61eb3107 711static struct nf_ct_ext_type nat_extend __read_mostly = {
d8a0509a
YK
712 .len = sizeof(struct nf_conn_nat),
713 .align = __alignof__(struct nf_conn_nat),
714 .destroy = nf_nat_cleanup_conntrack,
715 .move = nf_nat_move_storage,
716 .id = NF_CT_EXT_NAT,
717 .flags = NF_CT_EXT_F_PREALLOC,
2d59e5ca
YK
718};
719
24de3d37 720#if IS_ENABLED(CONFIG_NF_CT_NETLINK)
e6a7d3c0
PNA
721
722#include <linux/netfilter/nfnetlink.h>
723#include <linux/netfilter/nfnetlink_conntrack.h>
724
725static const struct nla_policy protonat_nla_policy[CTA_PROTONAT_MAX+1] = {
726 [CTA_PROTONAT_PORT_MIN] = { .type = NLA_U16 },
727 [CTA_PROTONAT_PORT_MAX] = { .type = NLA_U16 },
728};
729
730static int nfnetlink_parse_nat_proto(struct nlattr *attr,
731 const struct nf_conn *ct,
c7232c99 732 struct nf_nat_range *range)
e6a7d3c0
PNA
733{
734 struct nlattr *tb[CTA_PROTONAT_MAX+1];
c7232c99 735 const struct nf_nat_l4proto *l4proto;
e6a7d3c0
PNA
736 int err;
737
738 err = nla_parse_nested(tb, CTA_PROTONAT_MAX, attr, protonat_nla_policy);
739 if (err < 0)
740 return err;
741
c7232c99
PM
742 l4proto = __nf_nat_l4proto_find(nf_ct_l3num(ct), nf_ct_protonum(ct));
743 if (l4proto->nlattr_to_range)
744 err = l4proto->nlattr_to_range(tb, range);
745
e6a7d3c0
PNA
746 return err;
747}
748
749static const struct nla_policy nat_nla_policy[CTA_NAT_MAX+1] = {
c7232c99
PM
750 [CTA_NAT_V4_MINIP] = { .type = NLA_U32 },
751 [CTA_NAT_V4_MAXIP] = { .type = NLA_U32 },
58a317f1
PM
752 [CTA_NAT_V6_MINIP] = { .len = sizeof(struct in6_addr) },
753 [CTA_NAT_V6_MAXIP] = { .len = sizeof(struct in6_addr) },
329fb58a 754 [CTA_NAT_PROTO] = { .type = NLA_NESTED },
e6a7d3c0
PNA
755};
756
757static int
39938324 758nfnetlink_parse_nat(const struct nlattr *nat,
0eba801b
PNA
759 const struct nf_conn *ct, struct nf_nat_range *range,
760 const struct nf_nat_l3proto *l3proto)
e6a7d3c0
PNA
761{
762 struct nlattr *tb[CTA_NAT_MAX+1];
763 int err;
764
765 memset(range, 0, sizeof(*range));
766
767 err = nla_parse_nested(tb, CTA_NAT_MAX, nat, nat_nla_policy);
768 if (err < 0)
769 return err;
770
c7232c99
PM
771 err = l3proto->nlattr_to_range(tb, range);
772 if (err < 0)
0eba801b 773 return err;
e6a7d3c0
PNA
774
775 if (!tb[CTA_NAT_PROTO])
0eba801b 776 return 0;
e6a7d3c0 777
0eba801b 778 return nfnetlink_parse_nat_proto(tb[CTA_NAT_PROTO], ct, range);
e6a7d3c0
PNA
779}
780
0eba801b 781/* This function is called under rcu_read_lock() */
e6a7d3c0
PNA
782static int
783nfnetlink_parse_nat_setup(struct nf_conn *ct,
784 enum nf_nat_manip_type manip,
39938324 785 const struct nlattr *attr)
e6a7d3c0 786{
c7232c99 787 struct nf_nat_range range;
0eba801b 788 const struct nf_nat_l3proto *l3proto;
c7232c99 789 int err;
e6a7d3c0 790
0eba801b
PNA
791 /* Should not happen, restricted to creating new conntracks
792 * via ctnetlink.
793 */
794 if (WARN_ON_ONCE(nf_nat_initialized(ct, manip)))
795 return -EEXIST;
796
797 /* Make sure that L3 NAT is there by when we call nf_nat_setup_info to
798 * attach the null binding, otherwise this may oops.
799 */
800 l3proto = __nf_nat_l3proto_find(nf_ct_l3num(ct));
801 if (l3proto == NULL)
802 return -EAGAIN;
803
804 /* No NAT information has been passed, allocate the null-binding */
805 if (attr == NULL)
806 return __nf_nat_alloc_null_binding(ct, manip);
807
808 err = nfnetlink_parse_nat(attr, ct, &range, l3proto);
c7232c99
PM
809 if (err < 0)
810 return err;
e6a7d3c0
PNA
811
812 return nf_nat_setup_info(ct, &range, manip);
813}
814#else
815static int
816nfnetlink_parse_nat_setup(struct nf_conn *ct,
817 enum nf_nat_manip_type manip,
39938324 818 const struct nlattr *attr)
e6a7d3c0
PNA
819{
820 return -EOPNOTSUPP;
821}
822#endif
823
0c4c9288
AD
824static int __net_init nf_nat_net_init(struct net *net)
825{
d696c7bd 826 /* Leave them the same for the moment. */
56d52d48 827 net->ct.nat_htable_size = nf_conntrack_htable_size;
c7232c99
PM
828 net->ct.nat_bysource = nf_ct_alloc_hashtable(&net->ct.nat_htable_size, 0);
829 if (!net->ct.nat_bysource)
0c4c9288
AD
830 return -ENOMEM;
831 return 0;
832}
833
0c4c9288
AD
834static void __net_exit nf_nat_net_exit(struct net *net)
835{
c7232c99
PM
836 struct nf_nat_proto_clean clean = {};
837
945b2b2d 838 nf_ct_iterate_cleanup(net, nf_nat_proto_clean, &clean, 0, 0);
0c4c9288 839 synchronize_rcu();
c7232c99 840 nf_ct_free_hashtable(net->ct.nat_bysource, net->ct.nat_htable_size);
0c4c9288
AD
841}
842
843static struct pernet_operations nf_nat_net_ops = {
844 .init = nf_nat_net_init,
845 .exit = nf_nat_net_exit,
846};
847
544d5c7d
PNA
848static struct nf_ct_helper_expectfn follow_master_nat = {
849 .name = "nat-follow-master",
850 .expectfn = nf_nat_follow_master,
851};
852
5b1158e9
JK
853static int __init nf_nat_init(void)
854{
2d59e5ca
YK
855 int ret;
856
857 ret = nf_ct_extend_register(&nat_extend);
858 if (ret < 0) {
859 printk(KERN_ERR "nf_nat_core: Unable to register extension\n");
860 return ret;
861 }
5b1158e9 862
0c4c9288
AD
863 ret = register_pernet_subsys(&nf_nat_net_ops);
864 if (ret < 0)
2d59e5ca 865 goto cleanup_extend;
5b1158e9 866
c7232c99 867 nf_ct_helper_expectfn_register(&follow_master_nat);
5b1158e9 868
5b1158e9 869 /* Initialize fake conntrack so that NAT will skip it */
5bfddbd4 870 nf_ct_untracked_status_or(IPS_NAT_DONE_MASK);
5b1158e9 871
e6a7d3c0 872 BUG_ON(nfnetlink_parse_nat_setup_hook != NULL);
a9b3cd7f 873 RCU_INIT_POINTER(nfnetlink_parse_nat_setup_hook,
e6a7d3c0 874 nfnetlink_parse_nat_setup);
c7232c99
PM
875#ifdef CONFIG_XFRM
876 BUG_ON(nf_nat_decode_session_hook != NULL);
877 RCU_INIT_POINTER(nf_nat_decode_session_hook, __nf_nat_decode_session);
878#endif
5b1158e9 879 return 0;
2d59e5ca
YK
880
881 cleanup_extend:
882 nf_ct_extend_unregister(&nat_extend);
883 return ret;
5b1158e9
JK
884}
885
5b1158e9
JK
886static void __exit nf_nat_cleanup(void)
887{
c7232c99
PM
888 unsigned int i;
889
0c4c9288 890 unregister_pernet_subsys(&nf_nat_net_ops);
2d59e5ca 891 nf_ct_extend_unregister(&nat_extend);
544d5c7d 892 nf_ct_helper_expectfn_unregister(&follow_master_nat);
a9b3cd7f 893 RCU_INIT_POINTER(nfnetlink_parse_nat_setup_hook, NULL);
c7232c99
PM
894#ifdef CONFIG_XFRM
895 RCU_INIT_POINTER(nf_nat_decode_session_hook, NULL);
896#endif
897 for (i = 0; i < NFPROTO_NUMPROTO; i++)
898 kfree(nf_nat_l4protos[i]);
dd13b010 899 synchronize_net();
5b1158e9
JK
900}
901
902MODULE_LICENSE("GPL");
903
904module_init(nf_nat_init);
905module_exit(nf_nat_cleanup);
This page took 0.82502 seconds and 5 git commands to generate.