Commit | Line | Data |
---|---|---|
c7232c99 PM |
1 | /* |
2 | * (C) 1999-2001 Paul `Rusty' Russell | |
5b1158e9 | 3 | * (C) 2002-2006 Netfilter Core Team <coreteam@netfilter.org> |
c7232c99 | 4 | * (C) 2011 Patrick McHardy <kaber@trash.net> |
5b1158e9 JK |
5 | * |
6 | * This program is free software; you can redistribute it and/or modify | |
7 | * it under the terms of the GNU General Public License version 2 as | |
8 | * published by the Free Software Foundation. | |
9 | */ | |
10 | ||
11 | #include <linux/module.h> | |
12 | #include <linux/types.h> | |
13 | #include <linux/timer.h> | |
14 | #include <linux/skbuff.h> | |
5a0e3ad6 | 15 | #include <linux/gfp.h> |
c7232c99 | 16 | #include <net/xfrm.h> |
5b1158e9 | 17 | #include <linux/jhash.h> |
c7232c99 | 18 | #include <linux/rtnetlink.h> |
5b1158e9 | 19 | |
5b1158e9 JK |
20 | #include <net/netfilter/nf_conntrack.h> |
21 | #include <net/netfilter/nf_conntrack_core.h> | |
22 | #include <net/netfilter/nf_nat.h> | |
c7232c99 PM |
23 | #include <net/netfilter/nf_nat_l3proto.h> |
24 | #include <net/netfilter/nf_nat_l4proto.h> | |
5b1158e9 JK |
25 | #include <net/netfilter/nf_nat_core.h> |
26 | #include <net/netfilter/nf_nat_helper.h> | |
27 | #include <net/netfilter/nf_conntrack_helper.h> | |
41d73ec0 | 28 | #include <net/netfilter/nf_conntrack_seqadj.h> |
5b1158e9 | 29 | #include <net/netfilter/nf_conntrack_l3proto.h> |
5d0aa2cc | 30 | #include <net/netfilter/nf_conntrack_zones.h> |
c7232c99 | 31 | #include <linux/netfilter/nf_nat.h> |
5b1158e9 | 32 | |
02502f62 | 33 | static DEFINE_SPINLOCK(nf_nat_lock); |
5b1158e9 | 34 | |
c7232c99 PM |
35 | static DEFINE_MUTEX(nf_nat_proto_mutex); |
36 | static const struct nf_nat_l3proto __rcu *nf_nat_l3protos[NFPROTO_NUMPROTO] | |
37 | __read_mostly; | |
38 | static const struct nf_nat_l4proto __rcu **nf_nat_l4protos[NFPROTO_NUMPROTO] | |
ce4b1ceb | 39 | __read_mostly; |
5b1158e9 | 40 | |
c7232c99 PM |
41 | |
42 | inline const struct nf_nat_l3proto * | |
43 | __nf_nat_l3proto_find(u8 family) | |
5b1158e9 | 44 | { |
c7232c99 | 45 | return rcu_dereference(nf_nat_l3protos[family]); |
5b1158e9 JK |
46 | } |
47 | ||
c7232c99 PM |
48 | inline const struct nf_nat_l4proto * |
49 | __nf_nat_l4proto_find(u8 family, u8 protonum) | |
50 | { | |
51 | return rcu_dereference(nf_nat_l4protos[family][protonum]); | |
52 | } | |
53 | EXPORT_SYMBOL_GPL(__nf_nat_l4proto_find); | |
54 | ||
55 | #ifdef CONFIG_XFRM | |
56 | static void __nf_nat_decode_session(struct sk_buff *skb, struct flowi *fl) | |
57 | { | |
58 | const struct nf_nat_l3proto *l3proto; | |
59 | const struct nf_conn *ct; | |
60 | enum ip_conntrack_info ctinfo; | |
61 | enum ip_conntrack_dir dir; | |
62 | unsigned long statusbit; | |
63 | u8 family; | |
64 | ||
65 | ct = nf_ct_get(skb, &ctinfo); | |
66 | if (ct == NULL) | |
67 | return; | |
68 | ||
69 | family = ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.l3num; | |
70 | rcu_read_lock(); | |
71 | l3proto = __nf_nat_l3proto_find(family); | |
72 | if (l3proto == NULL) | |
73 | goto out; | |
74 | ||
75 | dir = CTINFO2DIR(ctinfo); | |
76 | if (dir == IP_CT_DIR_ORIGINAL) | |
77 | statusbit = IPS_DST_NAT; | |
78 | else | |
79 | statusbit = IPS_SRC_NAT; | |
80 | ||
81 | l3proto->decode_session(skb, ct, dir, statusbit, fl); | |
82 | out: | |
83 | rcu_read_unlock(); | |
84 | } | |
85 | ||
86 | int nf_xfrm_me_harder(struct sk_buff *skb, unsigned int family) | |
87 | { | |
88 | struct flowi fl; | |
89 | unsigned int hh_len; | |
90 | struct dst_entry *dst; | |
aaa795ad | 91 | int err; |
c7232c99 | 92 | |
aaa795ad | 93 | err = xfrm_decode_session(skb, &fl, family); |
e7e6f630 | 94 | if (err < 0) |
aaa795ad | 95 | return err; |
c7232c99 PM |
96 | |
97 | dst = skb_dst(skb); | |
98 | if (dst->xfrm) | |
99 | dst = ((struct xfrm_dst *)dst)->route; | |
100 | dst_hold(dst); | |
101 | ||
102 | dst = xfrm_lookup(dev_net(dst->dev), dst, &fl, skb->sk, 0); | |
103 | if (IS_ERR(dst)) | |
aaa795ad | 104 | return PTR_ERR(dst); |
c7232c99 PM |
105 | |
106 | skb_dst_drop(skb); | |
107 | skb_dst_set(skb, dst); | |
108 | ||
109 | /* Change in oif may mean change in hh_len. */ | |
110 | hh_len = skb_dst(skb)->dev->hard_header_len; | |
111 | if (skb_headroom(skb) < hh_len && | |
112 | pskb_expand_head(skb, hh_len - skb_headroom(skb), 0, GFP_ATOMIC)) | |
aaa795ad | 113 | return -ENOMEM; |
c7232c99 PM |
114 | return 0; |
115 | } | |
116 | EXPORT_SYMBOL(nf_xfrm_me_harder); | |
117 | #endif /* CONFIG_XFRM */ | |
118 | ||
5b1158e9 JK |
119 | /* We keep an extra hash for each conntrack, for fast searching. */ |
120 | static inline unsigned int | |
5d0aa2cc PM |
121 | hash_by_src(const struct net *net, u16 zone, |
122 | const struct nf_conntrack_tuple *tuple) | |
5b1158e9 | 123 | { |
34498825 PM |
124 | unsigned int hash; |
125 | ||
5b1158e9 | 126 | /* Original src, to ensure we map it consistently if poss. */ |
c7232c99 PM |
127 | hash = jhash2((u32 *)&tuple->src, sizeof(tuple->src) / sizeof(u32), |
128 | tuple->dst.protonum ^ zone ^ nf_conntrack_hash_rnd); | |
129 | return ((u64)hash * net->ct.nat_htable_size) >> 32; | |
5b1158e9 JK |
130 | } |
131 | ||
5b1158e9 JK |
132 | /* Is this tuple already taken? (not by us) */ |
133 | int | |
134 | nf_nat_used_tuple(const struct nf_conntrack_tuple *tuple, | |
135 | const struct nf_conn *ignored_conntrack) | |
136 | { | |
137 | /* Conntrack tracking doesn't keep track of outgoing tuples; only | |
c7232c99 PM |
138 | * incoming ones. NAT means they don't have a fixed mapping, |
139 | * so we invert the tuple and look for the incoming reply. | |
140 | * | |
141 | * We could keep a separate hash if this proves too slow. | |
142 | */ | |
5b1158e9 JK |
143 | struct nf_conntrack_tuple reply; |
144 | ||
145 | nf_ct_invert_tuplepr(&reply, tuple); | |
146 | return nf_conntrack_tuple_taken(&reply, ignored_conntrack); | |
147 | } | |
148 | EXPORT_SYMBOL(nf_nat_used_tuple); | |
149 | ||
150 | /* If we source map this tuple so reply looks like reply_tuple, will | |
c7232c99 PM |
151 | * that meet the constraints of range. |
152 | */ | |
153 | static int in_range(const struct nf_nat_l3proto *l3proto, | |
154 | const struct nf_nat_l4proto *l4proto, | |
155 | const struct nf_conntrack_tuple *tuple, | |
156 | const struct nf_nat_range *range) | |
5b1158e9 | 157 | { |
5b1158e9 | 158 | /* If we are supposed to map IPs, then we must be in the |
c7232c99 PM |
159 | * range specified, otherwise let this drag us onto a new src IP. |
160 | */ | |
161 | if (range->flags & NF_NAT_RANGE_MAP_IPS && | |
162 | !l3proto->in_range(tuple, range)) | |
163 | return 0; | |
5b1158e9 | 164 | |
cbc9f2f4 | 165 | if (!(range->flags & NF_NAT_RANGE_PROTO_SPECIFIED) || |
c7232c99 PM |
166 | l4proto->in_range(tuple, NF_NAT_MANIP_SRC, |
167 | &range->min_proto, &range->max_proto)) | |
168 | return 1; | |
5b1158e9 | 169 | |
c7232c99 | 170 | return 0; |
5b1158e9 JK |
171 | } |
172 | ||
173 | static inline int | |
174 | same_src(const struct nf_conn *ct, | |
175 | const struct nf_conntrack_tuple *tuple) | |
176 | { | |
177 | const struct nf_conntrack_tuple *t; | |
178 | ||
179 | t = &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple; | |
180 | return (t->dst.protonum == tuple->dst.protonum && | |
c7232c99 | 181 | nf_inet_addr_cmp(&t->src.u3, &tuple->src.u3) && |
5b1158e9 JK |
182 | t->src.u.all == tuple->src.u.all); |
183 | } | |
184 | ||
185 | /* Only called for SRC manip */ | |
186 | static int | |
5d0aa2cc | 187 | find_appropriate_src(struct net *net, u16 zone, |
c7232c99 PM |
188 | const struct nf_nat_l3proto *l3proto, |
189 | const struct nf_nat_l4proto *l4proto, | |
0c4c9288 | 190 | const struct nf_conntrack_tuple *tuple, |
5b1158e9 | 191 | struct nf_conntrack_tuple *result, |
c7232c99 | 192 | const struct nf_nat_range *range) |
5b1158e9 | 193 | { |
5d0aa2cc | 194 | unsigned int h = hash_by_src(net, zone, tuple); |
72b72949 JE |
195 | const struct nf_conn_nat *nat; |
196 | const struct nf_conn *ct; | |
5b1158e9 | 197 | |
b67bfe0d | 198 | hlist_for_each_entry_rcu(nat, &net->ct.nat_bysource[h], bysource) { |
b6b84d4a | 199 | ct = nat->ct; |
5d0aa2cc | 200 | if (same_src(ct, tuple) && nf_ct_zone(ct) == zone) { |
5b1158e9 JK |
201 | /* Copy source part from reply tuple. */ |
202 | nf_ct_invert_tuplepr(result, | |
203 | &ct->tuplehash[IP_CT_DIR_REPLY].tuple); | |
204 | result->dst = tuple->dst; | |
205 | ||
136251d0 | 206 | if (in_range(l3proto, l4proto, result, range)) |
5b1158e9 | 207 | return 1; |
5b1158e9 JK |
208 | } |
209 | } | |
5b1158e9 JK |
210 | return 0; |
211 | } | |
212 | ||
213 | /* For [FUTURE] fragmentation handling, we want the least-used | |
c7232c99 PM |
214 | * src-ip/dst-ip/proto triple. Fairness doesn't come into it. Thus |
215 | * if the range specifies 1.2.3.4 ports 10000-10005 and 1.2.3.5 ports | |
216 | * 1-65535, we don't do pro-rata allocation based on ports; we choose | |
217 | * the ip with the lowest src-ip/dst-ip/proto usage. | |
218 | */ | |
5b1158e9 | 219 | static void |
5d0aa2cc | 220 | find_best_ips_proto(u16 zone, struct nf_conntrack_tuple *tuple, |
c7232c99 | 221 | const struct nf_nat_range *range, |
5b1158e9 JK |
222 | const struct nf_conn *ct, |
223 | enum nf_nat_manip_type maniptype) | |
224 | { | |
c7232c99 PM |
225 | union nf_inet_addr *var_ipp; |
226 | unsigned int i, max; | |
5b1158e9 | 227 | /* Host order */ |
c7232c99 PM |
228 | u32 minip, maxip, j, dist; |
229 | bool full_range; | |
5b1158e9 JK |
230 | |
231 | /* No IP mapping? Do nothing. */ | |
cbc9f2f4 | 232 | if (!(range->flags & NF_NAT_RANGE_MAP_IPS)) |
5b1158e9 JK |
233 | return; |
234 | ||
cbc9f2f4 | 235 | if (maniptype == NF_NAT_MANIP_SRC) |
c7232c99 | 236 | var_ipp = &tuple->src.u3; |
5b1158e9 | 237 | else |
c7232c99 | 238 | var_ipp = &tuple->dst.u3; |
5b1158e9 JK |
239 | |
240 | /* Fast path: only one choice. */ | |
c7232c99 PM |
241 | if (nf_inet_addr_cmp(&range->min_addr, &range->max_addr)) { |
242 | *var_ipp = range->min_addr; | |
5b1158e9 JK |
243 | return; |
244 | } | |
245 | ||
c7232c99 PM |
246 | if (nf_ct_l3num(ct) == NFPROTO_IPV4) |
247 | max = sizeof(var_ipp->ip) / sizeof(u32) - 1; | |
248 | else | |
249 | max = sizeof(var_ipp->ip6) / sizeof(u32) - 1; | |
250 | ||
5b1158e9 JK |
251 | /* Hashing source and destination IPs gives a fairly even |
252 | * spread in practice (if there are a small number of IPs | |
253 | * involved, there usually aren't that many connections | |
254 | * anyway). The consistency means that servers see the same | |
255 | * client coming from the same IP (some Internet Banking sites | |
c7232c99 PM |
256 | * like this), even across reboots. |
257 | */ | |
5693d68d | 258 | j = jhash2((u32 *)&tuple->src.u3, sizeof(tuple->src.u3) / sizeof(u32), |
c7232c99 PM |
259 | range->flags & NF_NAT_RANGE_PERSISTENT ? |
260 | 0 : (__force u32)tuple->dst.u3.all[max] ^ zone); | |
261 | ||
262 | full_range = false; | |
263 | for (i = 0; i <= max; i++) { | |
264 | /* If first bytes of the address are at the maximum, use the | |
265 | * distance. Otherwise use the full range. | |
266 | */ | |
267 | if (!full_range) { | |
268 | minip = ntohl((__force __be32)range->min_addr.all[i]); | |
269 | maxip = ntohl((__force __be32)range->max_addr.all[i]); | |
270 | dist = maxip - minip + 1; | |
271 | } else { | |
272 | minip = 0; | |
273 | dist = ~0; | |
274 | } | |
275 | ||
276 | var_ipp->all[i] = (__force __u32) | |
277 | htonl(minip + (((u64)j * dist) >> 32)); | |
278 | if (var_ipp->all[i] != range->max_addr.all[i]) | |
279 | full_range = true; | |
280 | ||
281 | if (!(range->flags & NF_NAT_RANGE_PERSISTENT)) | |
282 | j ^= (__force u32)tuple->dst.u3.all[i]; | |
283 | } | |
5b1158e9 JK |
284 | } |
285 | ||
c7232c99 PM |
286 | /* Manipulate the tuple into the range given. For NF_INET_POST_ROUTING, |
287 | * we change the source to map into the range. For NF_INET_PRE_ROUTING | |
6e23ae2a | 288 | * and NF_INET_LOCAL_OUT, we change the destination to map into the |
c7232c99 | 289 | * range. It might not be possible to get a unique tuple, but we try. |
5b1158e9 JK |
290 | * At worst (or if we race), we will end up with a final duplicate in |
291 | * __ip_conntrack_confirm and drop the packet. */ | |
292 | static void | |
293 | get_unique_tuple(struct nf_conntrack_tuple *tuple, | |
294 | const struct nf_conntrack_tuple *orig_tuple, | |
c7232c99 | 295 | const struct nf_nat_range *range, |
5b1158e9 JK |
296 | struct nf_conn *ct, |
297 | enum nf_nat_manip_type maniptype) | |
298 | { | |
c7232c99 PM |
299 | const struct nf_nat_l3proto *l3proto; |
300 | const struct nf_nat_l4proto *l4proto; | |
0c4c9288 | 301 | struct net *net = nf_ct_net(ct); |
5d0aa2cc | 302 | u16 zone = nf_ct_zone(ct); |
5b1158e9 | 303 | |
c7232c99 PM |
304 | rcu_read_lock(); |
305 | l3proto = __nf_nat_l3proto_find(orig_tuple->src.l3num); | |
306 | l4proto = __nf_nat_l4proto_find(orig_tuple->src.l3num, | |
307 | orig_tuple->dst.protonum); | |
5b1158e9 | 308 | |
c7232c99 PM |
309 | /* 1) If this srcip/proto/src-proto-part is currently mapped, |
310 | * and that same mapping gives a unique tuple within the given | |
311 | * range, use that. | |
312 | * | |
313 | * This is only required for source (ie. NAT/masq) mappings. | |
314 | * So far, we don't do local source mappings, so multiple | |
315 | * manips not an issue. | |
316 | */ | |
cbc9f2f4 | 317 | if (maniptype == NF_NAT_MANIP_SRC && |
34ce3240 | 318 | !(range->flags & NF_NAT_RANGE_PROTO_RANDOM_ALL)) { |
41a7cab6 | 319 | /* try the original tuple first */ |
c7232c99 | 320 | if (in_range(l3proto, l4proto, orig_tuple, range)) { |
41a7cab6 CG |
321 | if (!nf_nat_used_tuple(orig_tuple, ct)) { |
322 | *tuple = *orig_tuple; | |
c7232c99 | 323 | goto out; |
41a7cab6 | 324 | } |
c7232c99 PM |
325 | } else if (find_appropriate_src(net, zone, l3proto, l4proto, |
326 | orig_tuple, tuple, range)) { | |
0d53778e | 327 | pr_debug("get_unique_tuple: Found current src map\n"); |
0dbff689 | 328 | if (!nf_nat_used_tuple(tuple, ct)) |
c7232c99 | 329 | goto out; |
5b1158e9 JK |
330 | } |
331 | } | |
332 | ||
c7232c99 | 333 | /* 2) Select the least-used IP/proto combination in the given range */ |
5b1158e9 | 334 | *tuple = *orig_tuple; |
5d0aa2cc | 335 | find_best_ips_proto(zone, tuple, range, ct, maniptype); |
5b1158e9 JK |
336 | |
337 | /* 3) The per-protocol part of the manip is made to map into | |
c7232c99 PM |
338 | * the range to make a unique tuple. |
339 | */ | |
5b1158e9 JK |
340 | |
341 | /* Only bother mapping if it's not already in range and unique */ | |
34ce3240 | 342 | if (!(range->flags & NF_NAT_RANGE_PROTO_RANDOM_ALL)) { |
cbc9f2f4 | 343 | if (range->flags & NF_NAT_RANGE_PROTO_SPECIFIED) { |
c7232c99 PM |
344 | if (l4proto->in_range(tuple, maniptype, |
345 | &range->min_proto, | |
346 | &range->max_proto) && | |
347 | (range->min_proto.all == range->max_proto.all || | |
99ad3c53 CG |
348 | !nf_nat_used_tuple(tuple, ct))) |
349 | goto out; | |
350 | } else if (!nf_nat_used_tuple(tuple, ct)) { | |
351 | goto out; | |
352 | } | |
353 | } | |
5b1158e9 JK |
354 | |
355 | /* Last change: get protocol to try to obtain unique tuple. */ | |
c7232c99 | 356 | l4proto->unique_tuple(l3proto, tuple, range, maniptype, ct); |
e22a0548 PM |
357 | out: |
358 | rcu_read_unlock(); | |
5b1158e9 JK |
359 | } |
360 | ||
f768e5bd FW |
361 | struct nf_conn_nat *nf_ct_nat_ext_add(struct nf_conn *ct) |
362 | { | |
363 | struct nf_conn_nat *nat = nfct_nat(ct); | |
364 | if (nat) | |
365 | return nat; | |
366 | ||
367 | if (!nf_ct_is_confirmed(ct)) | |
368 | nat = nf_ct_ext_add(ct, NF_CT_EXT_NAT, GFP_ATOMIC); | |
369 | ||
370 | return nat; | |
371 | } | |
372 | EXPORT_SYMBOL_GPL(nf_ct_nat_ext_add); | |
373 | ||
5b1158e9 JK |
374 | unsigned int |
375 | nf_nat_setup_info(struct nf_conn *ct, | |
c7232c99 | 376 | const struct nf_nat_range *range, |
cc01dcbd | 377 | enum nf_nat_manip_type maniptype) |
5b1158e9 | 378 | { |
0c4c9288 | 379 | struct net *net = nf_ct_net(ct); |
5b1158e9 | 380 | struct nf_conntrack_tuple curr_tuple, new_tuple; |
2d59e5ca | 381 | struct nf_conn_nat *nat; |
5b1158e9 | 382 | |
2d59e5ca | 383 | /* nat helper or nfctnetlink also setup binding */ |
f768e5bd FW |
384 | nat = nf_ct_nat_ext_add(ct); |
385 | if (nat == NULL) | |
386 | return NF_ACCEPT; | |
2d59e5ca | 387 | |
cbc9f2f4 PM |
388 | NF_CT_ASSERT(maniptype == NF_NAT_MANIP_SRC || |
389 | maniptype == NF_NAT_MANIP_DST); | |
5b1158e9 JK |
390 | BUG_ON(nf_nat_initialized(ct, maniptype)); |
391 | ||
392 | /* What we've got will look like inverse of reply. Normally | |
c7232c99 PM |
393 | * this is what is in the conntrack, except for prior |
394 | * manipulations (future optimization: if num_manips == 0, | |
395 | * orig_tp = ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple) | |
396 | */ | |
5b1158e9 JK |
397 | nf_ct_invert_tuplepr(&curr_tuple, |
398 | &ct->tuplehash[IP_CT_DIR_REPLY].tuple); | |
399 | ||
400 | get_unique_tuple(&new_tuple, &curr_tuple, range, ct, maniptype); | |
401 | ||
402 | if (!nf_ct_tuple_equal(&new_tuple, &curr_tuple)) { | |
403 | struct nf_conntrack_tuple reply; | |
404 | ||
405 | /* Alter conntrack table so will recognize replies. */ | |
406 | nf_ct_invert_tuplepr(&reply, &new_tuple); | |
407 | nf_conntrack_alter_reply(ct, &reply); | |
408 | ||
409 | /* Non-atomic: we own this at the moment. */ | |
cbc9f2f4 | 410 | if (maniptype == NF_NAT_MANIP_SRC) |
5b1158e9 JK |
411 | ct->status |= IPS_SRC_NAT; |
412 | else | |
413 | ct->status |= IPS_DST_NAT; | |
41d73ec0 PM |
414 | |
415 | if (nfct_help(ct)) | |
416 | nfct_seqadj_ext_add(ct); | |
5b1158e9 JK |
417 | } |
418 | ||
cbc9f2f4 | 419 | if (maniptype == NF_NAT_MANIP_SRC) { |
5b1158e9 JK |
420 | unsigned int srchash; |
421 | ||
5d0aa2cc PM |
422 | srchash = hash_by_src(net, nf_ct_zone(ct), |
423 | &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple); | |
02502f62 | 424 | spin_lock_bh(&nf_nat_lock); |
c7232c99 | 425 | /* nf_conntrack_alter_reply might re-allocate extension aera */ |
b6b84d4a YK |
426 | nat = nfct_nat(ct); |
427 | nat->ct = ct; | |
0c4c9288 | 428 | hlist_add_head_rcu(&nat->bysource, |
c7232c99 | 429 | &net->ct.nat_bysource[srchash]); |
02502f62 | 430 | spin_unlock_bh(&nf_nat_lock); |
5b1158e9 JK |
431 | } |
432 | ||
433 | /* It's done. */ | |
cbc9f2f4 | 434 | if (maniptype == NF_NAT_MANIP_DST) |
a7c2f4d7 | 435 | ct->status |= IPS_DST_NAT_DONE; |
5b1158e9 | 436 | else |
a7c2f4d7 | 437 | ct->status |= IPS_SRC_NAT_DONE; |
5b1158e9 JK |
438 | |
439 | return NF_ACCEPT; | |
440 | } | |
441 | EXPORT_SYMBOL(nf_nat_setup_info); | |
442 | ||
0eba801b PNA |
443 | static unsigned int |
444 | __nf_nat_alloc_null_binding(struct nf_conn *ct, enum nf_nat_manip_type manip) | |
f59cb045 PNA |
445 | { |
446 | /* Force range to this IP; let proto decide mapping for | |
447 | * per-proto parts (hence not IP_NAT_RANGE_PROTO_SPECIFIED). | |
448 | * Use reply in case it's already been mangled (eg local packet). | |
449 | */ | |
450 | union nf_inet_addr ip = | |
0eba801b | 451 | (manip == NF_NAT_MANIP_SRC ? |
f59cb045 PNA |
452 | ct->tuplehash[IP_CT_DIR_REPLY].tuple.dst.u3 : |
453 | ct->tuplehash[IP_CT_DIR_REPLY].tuple.src.u3); | |
454 | struct nf_nat_range range = { | |
455 | .flags = NF_NAT_RANGE_MAP_IPS, | |
456 | .min_addr = ip, | |
457 | .max_addr = ip, | |
458 | }; | |
0eba801b PNA |
459 | return nf_nat_setup_info(ct, &range, manip); |
460 | } | |
461 | ||
462 | unsigned int | |
463 | nf_nat_alloc_null_binding(struct nf_conn *ct, unsigned int hooknum) | |
464 | { | |
465 | return __nf_nat_alloc_null_binding(ct, HOOK2MANIP(hooknum)); | |
f59cb045 PNA |
466 | } |
467 | EXPORT_SYMBOL_GPL(nf_nat_alloc_null_binding); | |
468 | ||
5b1158e9 JK |
469 | /* Do packet manipulations according to nf_nat_setup_info. */ |
470 | unsigned int nf_nat_packet(struct nf_conn *ct, | |
471 | enum ip_conntrack_info ctinfo, | |
472 | unsigned int hooknum, | |
3db05fea | 473 | struct sk_buff *skb) |
5b1158e9 | 474 | { |
c7232c99 PM |
475 | const struct nf_nat_l3proto *l3proto; |
476 | const struct nf_nat_l4proto *l4proto; | |
5b1158e9 JK |
477 | enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo); |
478 | unsigned long statusbit; | |
479 | enum nf_nat_manip_type mtype = HOOK2MANIP(hooknum); | |
480 | ||
cbc9f2f4 | 481 | if (mtype == NF_NAT_MANIP_SRC) |
5b1158e9 JK |
482 | statusbit = IPS_SRC_NAT; |
483 | else | |
484 | statusbit = IPS_DST_NAT; | |
485 | ||
486 | /* Invert if this is reply dir. */ | |
487 | if (dir == IP_CT_DIR_REPLY) | |
488 | statusbit ^= IPS_NAT_MASK; | |
489 | ||
490 | /* Non-atomic: these bits don't change. */ | |
491 | if (ct->status & statusbit) { | |
492 | struct nf_conntrack_tuple target; | |
493 | ||
494 | /* We are aiming to look like inverse of other direction. */ | |
495 | nf_ct_invert_tuplepr(&target, &ct->tuplehash[!dir].tuple); | |
496 | ||
c7232c99 PM |
497 | l3proto = __nf_nat_l3proto_find(target.src.l3num); |
498 | l4proto = __nf_nat_l4proto_find(target.src.l3num, | |
499 | target.dst.protonum); | |
500 | if (!l3proto->manip_pkt(skb, 0, l4proto, &target, mtype)) | |
5b1158e9 JK |
501 | return NF_DROP; |
502 | } | |
503 | return NF_ACCEPT; | |
504 | } | |
505 | EXPORT_SYMBOL_GPL(nf_nat_packet); | |
506 | ||
c7232c99 PM |
507 | struct nf_nat_proto_clean { |
508 | u8 l3proto; | |
509 | u8 l4proto; | |
c7232c99 PM |
510 | }; |
511 | ||
c2d421e1 FW |
512 | /* kill conntracks with affected NAT section */ |
513 | static int nf_nat_proto_remove(struct nf_conn *i, void *data) | |
5b1158e9 | 514 | { |
c7232c99 PM |
515 | const struct nf_nat_proto_clean *clean = data; |
516 | struct nf_conn_nat *nat = nfct_nat(i); | |
5b1158e9 | 517 | |
c7232c99 | 518 | if (!nat) |
5b1158e9 | 519 | return 0; |
c2d421e1 | 520 | |
c7232c99 PM |
521 | if ((clean->l3proto && nf_ct_l3num(i) != clean->l3proto) || |
522 | (clean->l4proto && nf_ct_protonum(i) != clean->l4proto)) | |
5b1158e9 JK |
523 | return 0; |
524 | ||
c2d421e1 | 525 | return i->status & IPS_NAT_MASK ? 1 : 0; |
c7232c99 | 526 | } |
5b1158e9 | 527 | |
945b2b2d FW |
528 | static int nf_nat_proto_clean(struct nf_conn *ct, void *data) |
529 | { | |
530 | struct nf_conn_nat *nat = nfct_nat(ct); | |
531 | ||
532 | if (nf_nat_proto_remove(ct, data)) | |
533 | return 1; | |
534 | ||
535 | if (!nat || !nat->ct) | |
536 | return 0; | |
537 | ||
538 | /* This netns is being destroyed, and conntrack has nat null binding. | |
539 | * Remove it from bysource hash, as the table will be freed soon. | |
540 | * | |
541 | * Else, when the conntrack is destoyed, nf_nat_cleanup_conntrack() | |
542 | * will delete entry from already-freed table. | |
543 | */ | |
544 | if (!del_timer(&ct->timeout)) | |
545 | return 1; | |
546 | ||
547 | spin_lock_bh(&nf_nat_lock); | |
548 | hlist_del_rcu(&nat->bysource); | |
549 | ct->status &= ~IPS_NAT_DONE_MASK; | |
550 | nat->ct = NULL; | |
551 | spin_unlock_bh(&nf_nat_lock); | |
552 | ||
553 | add_timer(&ct->timeout); | |
554 | ||
555 | /* don't delete conntrack. Although that would make things a lot | |
556 | * simpler, we'd end up flushing all conntracks on nat rmmod. | |
557 | */ | |
558 | return 0; | |
559 | } | |
560 | ||
c7232c99 PM |
561 | static void nf_nat_l4proto_clean(u8 l3proto, u8 l4proto) |
562 | { | |
563 | struct nf_nat_proto_clean clean = { | |
564 | .l3proto = l3proto, | |
565 | .l4proto = l4proto, | |
566 | }; | |
567 | struct net *net; | |
568 | ||
569 | rtnl_lock(); | |
c7232c99 | 570 | for_each_net(net) |
c655bc68 | 571 | nf_ct_iterate_cleanup(net, nf_nat_proto_remove, &clean, 0, 0); |
c7232c99 PM |
572 | rtnl_unlock(); |
573 | } | |
5b1158e9 | 574 | |
c7232c99 PM |
575 | static void nf_nat_l3proto_clean(u8 l3proto) |
576 | { | |
577 | struct nf_nat_proto_clean clean = { | |
578 | .l3proto = l3proto, | |
579 | }; | |
580 | struct net *net; | |
581 | ||
582 | rtnl_lock(); | |
5b1158e9 | 583 | |
c7232c99 | 584 | for_each_net(net) |
c655bc68 | 585 | nf_ct_iterate_cleanup(net, nf_nat_proto_remove, &clean, 0, 0); |
c7232c99 | 586 | rtnl_unlock(); |
5b1158e9 | 587 | } |
5b1158e9 JK |
588 | |
589 | /* Protocol registration. */ | |
c7232c99 | 590 | int nf_nat_l4proto_register(u8 l3proto, const struct nf_nat_l4proto *l4proto) |
5b1158e9 | 591 | { |
c7232c99 PM |
592 | const struct nf_nat_l4proto **l4protos; |
593 | unsigned int i; | |
5b1158e9 JK |
594 | int ret = 0; |
595 | ||
c7232c99 PM |
596 | mutex_lock(&nf_nat_proto_mutex); |
597 | if (nf_nat_l4protos[l3proto] == NULL) { | |
598 | l4protos = kmalloc(IPPROTO_MAX * sizeof(struct nf_nat_l4proto *), | |
599 | GFP_KERNEL); | |
600 | if (l4protos == NULL) { | |
601 | ret = -ENOMEM; | |
602 | goto out; | |
603 | } | |
604 | ||
605 | for (i = 0; i < IPPROTO_MAX; i++) | |
606 | RCU_INIT_POINTER(l4protos[i], &nf_nat_l4proto_unknown); | |
607 | ||
608 | /* Before making proto_array visible to lockless readers, | |
609 | * we must make sure its content is committed to memory. | |
610 | */ | |
611 | smp_wmb(); | |
612 | ||
613 | nf_nat_l4protos[l3proto] = l4protos; | |
614 | } | |
615 | ||
eb733162 | 616 | if (rcu_dereference_protected( |
c7232c99 PM |
617 | nf_nat_l4protos[l3proto][l4proto->l4proto], |
618 | lockdep_is_held(&nf_nat_proto_mutex) | |
619 | ) != &nf_nat_l4proto_unknown) { | |
5b1158e9 JK |
620 | ret = -EBUSY; |
621 | goto out; | |
622 | } | |
c7232c99 | 623 | RCU_INIT_POINTER(nf_nat_l4protos[l3proto][l4proto->l4proto], l4proto); |
5b1158e9 | 624 | out: |
c7232c99 | 625 | mutex_unlock(&nf_nat_proto_mutex); |
5b1158e9 JK |
626 | return ret; |
627 | } | |
c7232c99 | 628 | EXPORT_SYMBOL_GPL(nf_nat_l4proto_register); |
5b1158e9 | 629 | |
25985edc | 630 | /* No one stores the protocol anywhere; simply delete it. */ |
c7232c99 | 631 | void nf_nat_l4proto_unregister(u8 l3proto, const struct nf_nat_l4proto *l4proto) |
5b1158e9 | 632 | { |
c7232c99 PM |
633 | mutex_lock(&nf_nat_proto_mutex); |
634 | RCU_INIT_POINTER(nf_nat_l4protos[l3proto][l4proto->l4proto], | |
635 | &nf_nat_l4proto_unknown); | |
636 | mutex_unlock(&nf_nat_proto_mutex); | |
e22a0548 | 637 | synchronize_rcu(); |
c7232c99 PM |
638 | |
639 | nf_nat_l4proto_clean(l3proto, l4proto->l4proto); | |
5b1158e9 | 640 | } |
c7232c99 PM |
641 | EXPORT_SYMBOL_GPL(nf_nat_l4proto_unregister); |
642 | ||
643 | int nf_nat_l3proto_register(const struct nf_nat_l3proto *l3proto) | |
644 | { | |
645 | int err; | |
646 | ||
647 | err = nf_ct_l3proto_try_module_get(l3proto->l3proto); | |
648 | if (err < 0) | |
649 | return err; | |
650 | ||
651 | mutex_lock(&nf_nat_proto_mutex); | |
652 | RCU_INIT_POINTER(nf_nat_l4protos[l3proto->l3proto][IPPROTO_TCP], | |
653 | &nf_nat_l4proto_tcp); | |
654 | RCU_INIT_POINTER(nf_nat_l4protos[l3proto->l3proto][IPPROTO_UDP], | |
655 | &nf_nat_l4proto_udp); | |
656 | mutex_unlock(&nf_nat_proto_mutex); | |
657 | ||
658 | RCU_INIT_POINTER(nf_nat_l3protos[l3proto->l3proto], l3proto); | |
659 | return 0; | |
660 | } | |
661 | EXPORT_SYMBOL_GPL(nf_nat_l3proto_register); | |
662 | ||
663 | void nf_nat_l3proto_unregister(const struct nf_nat_l3proto *l3proto) | |
664 | { | |
665 | mutex_lock(&nf_nat_proto_mutex); | |
666 | RCU_INIT_POINTER(nf_nat_l3protos[l3proto->l3proto], NULL); | |
667 | mutex_unlock(&nf_nat_proto_mutex); | |
668 | synchronize_rcu(); | |
669 | ||
670 | nf_nat_l3proto_clean(l3proto->l3proto); | |
671 | nf_ct_l3proto_module_put(l3proto->l3proto); | |
672 | } | |
673 | EXPORT_SYMBOL_GPL(nf_nat_l3proto_unregister); | |
5b1158e9 | 674 | |
25985edc | 675 | /* No one using conntrack by the time this called. */ |
d8a0509a YK |
676 | static void nf_nat_cleanup_conntrack(struct nf_conn *ct) |
677 | { | |
678 | struct nf_conn_nat *nat = nf_ct_ext_find(ct, NF_CT_EXT_NAT); | |
679 | ||
b6b84d4a | 680 | if (nat == NULL || nat->ct == NULL) |
d8a0509a YK |
681 | return; |
682 | ||
41a7cab6 | 683 | NF_CT_ASSERT(nat->ct->status & IPS_SRC_NAT_DONE); |
d8a0509a | 684 | |
02502f62 | 685 | spin_lock_bh(&nf_nat_lock); |
4d354c57 | 686 | hlist_del_rcu(&nat->bysource); |
02502f62 | 687 | spin_unlock_bh(&nf_nat_lock); |
d8a0509a YK |
688 | } |
689 | ||
86577c66 | 690 | static void nf_nat_move_storage(void *new, void *old) |
2d59e5ca | 691 | { |
86577c66 PM |
692 | struct nf_conn_nat *new_nat = new; |
693 | struct nf_conn_nat *old_nat = old; | |
b6b84d4a | 694 | struct nf_conn *ct = old_nat->ct; |
2d59e5ca | 695 | |
41a7cab6 | 696 | if (!ct || !(ct->status & IPS_SRC_NAT_DONE)) |
2d59e5ca YK |
697 | return; |
698 | ||
02502f62 | 699 | spin_lock_bh(&nf_nat_lock); |
68b80f11 | 700 | hlist_replace_rcu(&old_nat->bysource, &new_nat->bysource); |
02502f62 | 701 | spin_unlock_bh(&nf_nat_lock); |
2d59e5ca YK |
702 | } |
703 | ||
61eb3107 | 704 | static struct nf_ct_ext_type nat_extend __read_mostly = { |
d8a0509a YK |
705 | .len = sizeof(struct nf_conn_nat), |
706 | .align = __alignof__(struct nf_conn_nat), | |
707 | .destroy = nf_nat_cleanup_conntrack, | |
708 | .move = nf_nat_move_storage, | |
709 | .id = NF_CT_EXT_NAT, | |
710 | .flags = NF_CT_EXT_F_PREALLOC, | |
2d59e5ca YK |
711 | }; |
712 | ||
e6a7d3c0 PNA |
713 | #if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE) |
714 | ||
715 | #include <linux/netfilter/nfnetlink.h> | |
716 | #include <linux/netfilter/nfnetlink_conntrack.h> | |
717 | ||
718 | static const struct nla_policy protonat_nla_policy[CTA_PROTONAT_MAX+1] = { | |
719 | [CTA_PROTONAT_PORT_MIN] = { .type = NLA_U16 }, | |
720 | [CTA_PROTONAT_PORT_MAX] = { .type = NLA_U16 }, | |
721 | }; | |
722 | ||
723 | static int nfnetlink_parse_nat_proto(struct nlattr *attr, | |
724 | const struct nf_conn *ct, | |
c7232c99 | 725 | struct nf_nat_range *range) |
e6a7d3c0 PNA |
726 | { |
727 | struct nlattr *tb[CTA_PROTONAT_MAX+1]; | |
c7232c99 | 728 | const struct nf_nat_l4proto *l4proto; |
e6a7d3c0 PNA |
729 | int err; |
730 | ||
731 | err = nla_parse_nested(tb, CTA_PROTONAT_MAX, attr, protonat_nla_policy); | |
732 | if (err < 0) | |
733 | return err; | |
734 | ||
c7232c99 PM |
735 | l4proto = __nf_nat_l4proto_find(nf_ct_l3num(ct), nf_ct_protonum(ct)); |
736 | if (l4proto->nlattr_to_range) | |
737 | err = l4proto->nlattr_to_range(tb, range); | |
738 | ||
e6a7d3c0 PNA |
739 | return err; |
740 | } | |
741 | ||
742 | static const struct nla_policy nat_nla_policy[CTA_NAT_MAX+1] = { | |
c7232c99 PM |
743 | [CTA_NAT_V4_MINIP] = { .type = NLA_U32 }, |
744 | [CTA_NAT_V4_MAXIP] = { .type = NLA_U32 }, | |
58a317f1 PM |
745 | [CTA_NAT_V6_MINIP] = { .len = sizeof(struct in6_addr) }, |
746 | [CTA_NAT_V6_MAXIP] = { .len = sizeof(struct in6_addr) }, | |
329fb58a | 747 | [CTA_NAT_PROTO] = { .type = NLA_NESTED }, |
e6a7d3c0 PNA |
748 | }; |
749 | ||
750 | static int | |
39938324 | 751 | nfnetlink_parse_nat(const struct nlattr *nat, |
0eba801b PNA |
752 | const struct nf_conn *ct, struct nf_nat_range *range, |
753 | const struct nf_nat_l3proto *l3proto) | |
e6a7d3c0 PNA |
754 | { |
755 | struct nlattr *tb[CTA_NAT_MAX+1]; | |
756 | int err; | |
757 | ||
758 | memset(range, 0, sizeof(*range)); | |
759 | ||
760 | err = nla_parse_nested(tb, CTA_NAT_MAX, nat, nat_nla_policy); | |
761 | if (err < 0) | |
762 | return err; | |
763 | ||
c7232c99 PM |
764 | err = l3proto->nlattr_to_range(tb, range); |
765 | if (err < 0) | |
0eba801b | 766 | return err; |
e6a7d3c0 PNA |
767 | |
768 | if (!tb[CTA_NAT_PROTO]) | |
0eba801b | 769 | return 0; |
e6a7d3c0 | 770 | |
0eba801b | 771 | return nfnetlink_parse_nat_proto(tb[CTA_NAT_PROTO], ct, range); |
e6a7d3c0 PNA |
772 | } |
773 | ||
0eba801b | 774 | /* This function is called under rcu_read_lock() */ |
e6a7d3c0 PNA |
775 | static int |
776 | nfnetlink_parse_nat_setup(struct nf_conn *ct, | |
777 | enum nf_nat_manip_type manip, | |
39938324 | 778 | const struct nlattr *attr) |
e6a7d3c0 | 779 | { |
c7232c99 | 780 | struct nf_nat_range range; |
0eba801b | 781 | const struct nf_nat_l3proto *l3proto; |
c7232c99 | 782 | int err; |
e6a7d3c0 | 783 | |
0eba801b PNA |
784 | /* Should not happen, restricted to creating new conntracks |
785 | * via ctnetlink. | |
786 | */ | |
787 | if (WARN_ON_ONCE(nf_nat_initialized(ct, manip))) | |
788 | return -EEXIST; | |
789 | ||
790 | /* Make sure that L3 NAT is there by when we call nf_nat_setup_info to | |
791 | * attach the null binding, otherwise this may oops. | |
792 | */ | |
793 | l3proto = __nf_nat_l3proto_find(nf_ct_l3num(ct)); | |
794 | if (l3proto == NULL) | |
795 | return -EAGAIN; | |
796 | ||
797 | /* No NAT information has been passed, allocate the null-binding */ | |
798 | if (attr == NULL) | |
799 | return __nf_nat_alloc_null_binding(ct, manip); | |
800 | ||
801 | err = nfnetlink_parse_nat(attr, ct, &range, l3proto); | |
c7232c99 PM |
802 | if (err < 0) |
803 | return err; | |
e6a7d3c0 PNA |
804 | |
805 | return nf_nat_setup_info(ct, &range, manip); | |
806 | } | |
807 | #else | |
808 | static int | |
809 | nfnetlink_parse_nat_setup(struct nf_conn *ct, | |
810 | enum nf_nat_manip_type manip, | |
39938324 | 811 | const struct nlattr *attr) |
e6a7d3c0 PNA |
812 | { |
813 | return -EOPNOTSUPP; | |
814 | } | |
815 | #endif | |
816 | ||
0c4c9288 AD |
817 | static int __net_init nf_nat_net_init(struct net *net) |
818 | { | |
d696c7bd | 819 | /* Leave them the same for the moment. */ |
c7232c99 PM |
820 | net->ct.nat_htable_size = net->ct.htable_size; |
821 | net->ct.nat_bysource = nf_ct_alloc_hashtable(&net->ct.nat_htable_size, 0); | |
822 | if (!net->ct.nat_bysource) | |
0c4c9288 AD |
823 | return -ENOMEM; |
824 | return 0; | |
825 | } | |
826 | ||
0c4c9288 AD |
827 | static void __net_exit nf_nat_net_exit(struct net *net) |
828 | { | |
c7232c99 PM |
829 | struct nf_nat_proto_clean clean = {}; |
830 | ||
945b2b2d | 831 | nf_ct_iterate_cleanup(net, nf_nat_proto_clean, &clean, 0, 0); |
0c4c9288 | 832 | synchronize_rcu(); |
c7232c99 | 833 | nf_ct_free_hashtable(net->ct.nat_bysource, net->ct.nat_htable_size); |
0c4c9288 AD |
834 | } |
835 | ||
836 | static struct pernet_operations nf_nat_net_ops = { | |
837 | .init = nf_nat_net_init, | |
838 | .exit = nf_nat_net_exit, | |
839 | }; | |
840 | ||
544d5c7d PNA |
841 | static struct nf_ct_helper_expectfn follow_master_nat = { |
842 | .name = "nat-follow-master", | |
843 | .expectfn = nf_nat_follow_master, | |
844 | }; | |
845 | ||
5b1158e9 JK |
846 | static int __init nf_nat_init(void) |
847 | { | |
2d59e5ca YK |
848 | int ret; |
849 | ||
850 | ret = nf_ct_extend_register(&nat_extend); | |
851 | if (ret < 0) { | |
852 | printk(KERN_ERR "nf_nat_core: Unable to register extension\n"); | |
853 | return ret; | |
854 | } | |
5b1158e9 | 855 | |
0c4c9288 AD |
856 | ret = register_pernet_subsys(&nf_nat_net_ops); |
857 | if (ret < 0) | |
2d59e5ca | 858 | goto cleanup_extend; |
5b1158e9 | 859 | |
c7232c99 | 860 | nf_ct_helper_expectfn_register(&follow_master_nat); |
5b1158e9 | 861 | |
5b1158e9 | 862 | /* Initialize fake conntrack so that NAT will skip it */ |
5bfddbd4 | 863 | nf_ct_untracked_status_or(IPS_NAT_DONE_MASK); |
5b1158e9 | 864 | |
e6a7d3c0 | 865 | BUG_ON(nfnetlink_parse_nat_setup_hook != NULL); |
a9b3cd7f | 866 | RCU_INIT_POINTER(nfnetlink_parse_nat_setup_hook, |
e6a7d3c0 | 867 | nfnetlink_parse_nat_setup); |
c7232c99 PM |
868 | #ifdef CONFIG_XFRM |
869 | BUG_ON(nf_nat_decode_session_hook != NULL); | |
870 | RCU_INIT_POINTER(nf_nat_decode_session_hook, __nf_nat_decode_session); | |
871 | #endif | |
5b1158e9 | 872 | return 0; |
2d59e5ca YK |
873 | |
874 | cleanup_extend: | |
875 | nf_ct_extend_unregister(&nat_extend); | |
876 | return ret; | |
5b1158e9 JK |
877 | } |
878 | ||
5b1158e9 JK |
879 | static void __exit nf_nat_cleanup(void) |
880 | { | |
c7232c99 PM |
881 | unsigned int i; |
882 | ||
0c4c9288 | 883 | unregister_pernet_subsys(&nf_nat_net_ops); |
2d59e5ca | 884 | nf_ct_extend_unregister(&nat_extend); |
544d5c7d | 885 | nf_ct_helper_expectfn_unregister(&follow_master_nat); |
a9b3cd7f | 886 | RCU_INIT_POINTER(nfnetlink_parse_nat_setup_hook, NULL); |
c7232c99 PM |
887 | #ifdef CONFIG_XFRM |
888 | RCU_INIT_POINTER(nf_nat_decode_session_hook, NULL); | |
889 | #endif | |
890 | for (i = 0; i < NFPROTO_NUMPROTO; i++) | |
891 | kfree(nf_nat_l4protos[i]); | |
dd13b010 | 892 | synchronize_net(); |
5b1158e9 JK |
893 | } |
894 | ||
895 | MODULE_LICENSE("GPL"); | |
896 | ||
897 | module_init(nf_nat_init); | |
898 | module_exit(nf_nat_cleanup); |