0a02a8caf3b3b1821cf0b690d9ffea4608362da3
[deliverable/linux.git] / net / ipv4 / netfilter / nf_nat_rule.c
1 /* (C) 1999-2001 Paul `Rusty' Russell
2 * (C) 2002-2006 Netfilter Core Team <coreteam@netfilter.org>
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 */
8
9 /* Everything about the rules for NAT. */
10 #include <linux/types.h>
11 #include <linux/ip.h>
12 #include <linux/netfilter.h>
13 #include <linux/netfilter_ipv4.h>
14 #include <linux/module.h>
15 #include <linux/kmod.h>
16 #include <linux/skbuff.h>
17 #include <linux/proc_fs.h>
18 #include <net/checksum.h>
19 #include <net/route.h>
20 #include <linux/bitops.h>
21
22 #include <linux/netfilter_ipv4/ip_tables.h>
23 #include <net/netfilter/nf_nat.h>
24 #include <net/netfilter/nf_nat_core.h>
25 #include <net/netfilter/nf_nat_rule.h>
26
27 #define NAT_VALID_HOOKS ((1 << NF_INET_PRE_ROUTING) | \
28 (1 << NF_INET_POST_ROUTING) | \
29 (1 << NF_INET_LOCAL_OUT))
30
31 static struct
32 {
33 struct ipt_replace repl;
34 struct ipt_standard entries[3];
35 struct ipt_error term;
36 } nat_initial_table __net_initdata = {
37 .repl = {
38 .name = "nat",
39 .valid_hooks = NAT_VALID_HOOKS,
40 .num_entries = 4,
41 .size = sizeof(struct ipt_standard) * 3 + sizeof(struct ipt_error),
42 .hook_entry = {
43 [NF_INET_PRE_ROUTING] = 0,
44 [NF_INET_POST_ROUTING] = sizeof(struct ipt_standard),
45 [NF_INET_LOCAL_OUT] = sizeof(struct ipt_standard) * 2
46 },
47 .underflow = {
48 [NF_INET_PRE_ROUTING] = 0,
49 [NF_INET_POST_ROUTING] = sizeof(struct ipt_standard),
50 [NF_INET_LOCAL_OUT] = sizeof(struct ipt_standard) * 2
51 },
52 },
53 .entries = {
54 IPT_STANDARD_INIT(NF_ACCEPT), /* PRE_ROUTING */
55 IPT_STANDARD_INIT(NF_ACCEPT), /* POST_ROUTING */
56 IPT_STANDARD_INIT(NF_ACCEPT), /* LOCAL_OUT */
57 },
58 .term = IPT_ERROR_INIT, /* ERROR */
59 };
60
61 static struct xt_table nat_table = {
62 .name = "nat",
63 .valid_hooks = NAT_VALID_HOOKS,
64 .lock = __RW_LOCK_UNLOCKED(__nat_table.lock),
65 .me = THIS_MODULE,
66 .af = AF_INET,
67 };
68
69 /* Source NAT */
70 static unsigned int ipt_snat_target(struct sk_buff *skb,
71 const struct net_device *in,
72 const struct net_device *out,
73 unsigned int hooknum,
74 const struct xt_target *target,
75 const void *targinfo)
76 {
77 struct nf_conn *ct;
78 enum ip_conntrack_info ctinfo;
79 const struct nf_nat_multi_range_compat *mr = targinfo;
80
81 NF_CT_ASSERT(hooknum == NF_INET_POST_ROUTING);
82
83 ct = nf_ct_get(skb, &ctinfo);
84
85 /* Connection must be valid and new. */
86 NF_CT_ASSERT(ct && (ctinfo == IP_CT_NEW || ctinfo == IP_CT_RELATED ||
87 ctinfo == IP_CT_RELATED + IP_CT_IS_REPLY));
88 NF_CT_ASSERT(out);
89
90 return nf_nat_setup_info(ct, &mr->range[0], IP_NAT_MANIP_SRC);
91 }
92
93 /* Before 2.6.11 we did implicit source NAT if required. Warn about change. */
94 static void warn_if_extra_mangle(__be32 dstip, __be32 srcip)
95 {
96 static int warned = 0;
97 struct flowi fl = { .nl_u = { .ip4_u = { .daddr = dstip } } };
98 struct rtable *rt;
99
100 if (ip_route_output_key(&init_net, &rt, &fl) != 0)
101 return;
102
103 if (rt->rt_src != srcip && !warned) {
104 printk("NAT: no longer support implicit source local NAT\n");
105 printk("NAT: packet src %u.%u.%u.%u -> dst %u.%u.%u.%u\n",
106 NIPQUAD(srcip), NIPQUAD(dstip));
107 warned = 1;
108 }
109 ip_rt_put(rt);
110 }
111
112 static unsigned int ipt_dnat_target(struct sk_buff *skb,
113 const struct net_device *in,
114 const struct net_device *out,
115 unsigned int hooknum,
116 const struct xt_target *target,
117 const void *targinfo)
118 {
119 struct nf_conn *ct;
120 enum ip_conntrack_info ctinfo;
121 const struct nf_nat_multi_range_compat *mr = targinfo;
122
123 NF_CT_ASSERT(hooknum == NF_INET_PRE_ROUTING ||
124 hooknum == NF_INET_LOCAL_OUT);
125
126 ct = nf_ct_get(skb, &ctinfo);
127
128 /* Connection must be valid and new. */
129 NF_CT_ASSERT(ct && (ctinfo == IP_CT_NEW || ctinfo == IP_CT_RELATED));
130
131 if (hooknum == NF_INET_LOCAL_OUT &&
132 mr->range[0].flags & IP_NAT_RANGE_MAP_IPS)
133 warn_if_extra_mangle(ip_hdr(skb)->daddr,
134 mr->range[0].min_ip);
135
136 return nf_nat_setup_info(ct, &mr->range[0], IP_NAT_MANIP_DST);
137 }
138
139 static bool ipt_snat_checkentry(const char *tablename,
140 const void *entry,
141 const struct xt_target *target,
142 void *targinfo,
143 unsigned int hook_mask)
144 {
145 const struct nf_nat_multi_range_compat *mr = targinfo;
146
147 /* Must be a valid range */
148 if (mr->rangesize != 1) {
149 printk("SNAT: multiple ranges no longer supported\n");
150 return false;
151 }
152 return true;
153 }
154
155 static bool ipt_dnat_checkentry(const char *tablename,
156 const void *entry,
157 const struct xt_target *target,
158 void *targinfo,
159 unsigned int hook_mask)
160 {
161 const struct nf_nat_multi_range_compat *mr = targinfo;
162
163 /* Must be a valid range */
164 if (mr->rangesize != 1) {
165 printk("DNAT: multiple ranges no longer supported\n");
166 return false;
167 }
168 return true;
169 }
170
171 unsigned int
172 alloc_null_binding(struct nf_conn *ct, unsigned int hooknum)
173 {
174 /* Force range to this IP; let proto decide mapping for
175 per-proto parts (hence not IP_NAT_RANGE_PROTO_SPECIFIED).
176 Use reply in case it's already been mangled (eg local packet).
177 */
178 __be32 ip
179 = (HOOK2MANIP(hooknum) == IP_NAT_MANIP_SRC
180 ? ct->tuplehash[IP_CT_DIR_REPLY].tuple.dst.u3.ip
181 : ct->tuplehash[IP_CT_DIR_REPLY].tuple.src.u3.ip);
182 struct nf_nat_range range
183 = { IP_NAT_RANGE_MAP_IPS, ip, ip, { 0 }, { 0 } };
184
185 pr_debug("Allocating NULL binding for %p (%u.%u.%u.%u)\n",
186 ct, NIPQUAD(ip));
187 return nf_nat_setup_info(ct, &range, HOOK2MANIP(hooknum));
188 }
189
190 int nf_nat_rule_find(struct sk_buff *skb,
191 unsigned int hooknum,
192 const struct net_device *in,
193 const struct net_device *out,
194 struct nf_conn *ct)
195 {
196 struct net *net = nf_ct_net(ct);
197 int ret;
198
199 ret = ipt_do_table(skb, hooknum, in, out, net->ipv4.nat_table);
200
201 if (ret == NF_ACCEPT) {
202 if (!nf_nat_initialized(ct, HOOK2MANIP(hooknum)))
203 /* NUL mapping */
204 ret = alloc_null_binding(ct, hooknum);
205 }
206 return ret;
207 }
208
209 static struct xt_target ipt_snat_reg __read_mostly = {
210 .name = "SNAT",
211 .target = ipt_snat_target,
212 .targetsize = sizeof(struct nf_nat_multi_range_compat),
213 .table = "nat",
214 .hooks = 1 << NF_INET_POST_ROUTING,
215 .checkentry = ipt_snat_checkentry,
216 .family = AF_INET,
217 };
218
219 static struct xt_target ipt_dnat_reg __read_mostly = {
220 .name = "DNAT",
221 .target = ipt_dnat_target,
222 .targetsize = sizeof(struct nf_nat_multi_range_compat),
223 .table = "nat",
224 .hooks = (1 << NF_INET_PRE_ROUTING) | (1 << NF_INET_LOCAL_OUT),
225 .checkentry = ipt_dnat_checkentry,
226 .family = AF_INET,
227 };
228
229 static int __net_init nf_nat_rule_net_init(struct net *net)
230 {
231 net->ipv4.nat_table = ipt_register_table(net, &nat_table,
232 &nat_initial_table.repl);
233 if (IS_ERR(net->ipv4.nat_table))
234 return PTR_ERR(net->ipv4.nat_table);
235 return 0;
236 }
237
238 static void __net_exit nf_nat_rule_net_exit(struct net *net)
239 {
240 ipt_unregister_table(net->ipv4.nat_table);
241 }
242
243 static struct pernet_operations nf_nat_rule_net_ops = {
244 .init = nf_nat_rule_net_init,
245 .exit = nf_nat_rule_net_exit,
246 };
247
248 int __init nf_nat_rule_init(void)
249 {
250 int ret;
251
252 ret = register_pernet_subsys(&nf_nat_rule_net_ops);
253 if (ret != 0)
254 goto out;
255 ret = xt_register_target(&ipt_snat_reg);
256 if (ret != 0)
257 goto unregister_table;
258
259 ret = xt_register_target(&ipt_dnat_reg);
260 if (ret != 0)
261 goto unregister_snat;
262
263 return ret;
264
265 unregister_snat:
266 xt_unregister_target(&ipt_snat_reg);
267 unregister_table:
268 unregister_pernet_subsys(&nf_nat_rule_net_ops);
269 out:
270 return ret;
271 }
272
273 void nf_nat_rule_cleanup(void)
274 {
275 xt_unregister_target(&ipt_dnat_reg);
276 xt_unregister_target(&ipt_snat_reg);
277 unregister_pernet_subsys(&nf_nat_rule_net_ops);
278 }
This page took 0.046852 seconds and 4 git commands to generate.