netfilter: nf_conntrack_sip: add TCP support
[deliverable/linux.git] / net / ipv4 / netfilter / nf_nat_helper.c
CommitLineData
5b1158e9
JK
1/* ip_nat_helper.c - generic support functions for NAT helpers
2 *
3 * (C) 2000-2002 Harald Welte <laforge@netfilter.org>
4 * (C) 2003-2006 Netfilter Core Team <coreteam@netfilter.org>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10#include <linux/module.h>
11#include <linux/kmod.h>
12#include <linux/types.h>
13#include <linux/timer.h>
14#include <linux/skbuff.h>
15#include <linux/tcp.h>
16#include <linux/udp.h>
17#include <net/checksum.h>
18#include <net/tcp.h>
1668e010 19#include <net/route.h>
5b1158e9
JK
20
21#include <linux/netfilter_ipv4.h>
22#include <net/netfilter/nf_conntrack.h>
23#include <net/netfilter/nf_conntrack_helper.h>
13eae15a 24#include <net/netfilter/nf_conntrack_ecache.h>
5b1158e9
JK
25#include <net/netfilter/nf_conntrack_expect.h>
26#include <net/netfilter/nf_nat.h>
27#include <net/netfilter/nf_nat_protocol.h>
28#include <net/netfilter/nf_nat_core.h>
29#include <net/netfilter/nf_nat_helper.h>
30
0d53778e
PM
31#define DUMP_OFFSET(x) \
32 pr_debug("offset_before=%d, offset_after=%d, correction_pos=%u\n", \
33 x->offset_before, x->offset_after, x->correction_pos);
5b1158e9
JK
34
35static DEFINE_SPINLOCK(nf_nat_seqofs_lock);
36
37/* Setup TCP sequence correction given this change at this sequence */
38static inline void
39adjust_tcp_sequence(u32 seq,
40 int sizediff,
41 struct nf_conn *ct,
42 enum ip_conntrack_info ctinfo)
43{
76ac8940 44 enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo);
5b1158e9 45 struct nf_conn_nat *nat = nfct_nat(ct);
76ac8940 46 struct nf_nat_seq *this_way = &nat->seq[dir];
5b1158e9 47
76ac8940
HE
48 pr_debug("adjust_tcp_sequence: seq = %u, sizediff = %d\n",
49 seq, sizediff);
5b1158e9 50
76ac8940 51 pr_debug("adjust_tcp_sequence: Seq_offset before: ");
5b1158e9
JK
52 DUMP_OFFSET(this_way);
53
54 spin_lock_bh(&nf_nat_seqofs_lock);
55
56 /* SYN adjust. If it's uninitialized, or this is after last
57 * correction, record it: we don't handle more than one
58 * adjustment in the window, but do deal with common case of a
59 * retransmit */
60 if (this_way->offset_before == this_way->offset_after ||
61 before(this_way->correction_pos, seq)) {
76ac8940
HE
62 this_way->correction_pos = seq;
63 this_way->offset_before = this_way->offset_after;
64 this_way->offset_after += sizediff;
5b1158e9
JK
65 }
66 spin_unlock_bh(&nf_nat_seqofs_lock);
67
76ac8940 68 pr_debug("adjust_tcp_sequence: Seq_offset after: ");
5b1158e9
JK
69 DUMP_OFFSET(this_way);
70}
71
f9dd09c7
JK
72/* Get the offset value, for conntrack */
73s16 nf_nat_get_offset(const struct nf_conn *ct,
74 enum ip_conntrack_dir dir,
75 u32 seq)
76{
77 struct nf_conn_nat *nat = nfct_nat(ct);
78 struct nf_nat_seq *this_way;
79 s16 offset;
80
81 if (!nat)
82 return 0;
83
84 this_way = &nat->seq[dir];
85 spin_lock_bh(&nf_nat_seqofs_lock);
86 offset = after(seq, this_way->correction_pos)
87 ? this_way->offset_after : this_way->offset_before;
88 spin_unlock_bh(&nf_nat_seqofs_lock);
89
90 return offset;
91}
92EXPORT_SYMBOL_GPL(nf_nat_get_offset);
93
5b1158e9
JK
94/* Frobs data inside this packet, which is linear. */
95static void mangle_contents(struct sk_buff *skb,
96 unsigned int dataoff,
97 unsigned int match_offset,
98 unsigned int match_len,
99 const char *rep_buffer,
100 unsigned int rep_len)
101{
102 unsigned char *data;
103
104 BUG_ON(skb_is_nonlinear(skb));
eddc9ec5 105 data = skb_network_header(skb) + dataoff;
5b1158e9
JK
106
107 /* move post-replacement */
108 memmove(data + match_offset + rep_len,
109 data + match_offset + match_len,
27a884dc
ACM
110 skb->tail - (skb->network_header + dataoff +
111 match_offset + match_len));
5b1158e9
JK
112
113 /* insert data from buffer */
114 memcpy(data + match_offset, rep_buffer, rep_len);
115
116 /* update skb info */
117 if (rep_len > match_len) {
0d53778e
PM
118 pr_debug("nf_nat_mangle_packet: Extending packet by "
119 "%u from %u bytes\n", rep_len - match_len, skb->len);
5b1158e9
JK
120 skb_put(skb, rep_len - match_len);
121 } else {
0d53778e
PM
122 pr_debug("nf_nat_mangle_packet: Shrinking packet from "
123 "%u from %u bytes\n", match_len - rep_len, skb->len);
5b1158e9
JK
124 __skb_trim(skb, skb->len + rep_len - match_len);
125 }
126
127 /* fix IP hdr checksum information */
eddc9ec5
ACM
128 ip_hdr(skb)->tot_len = htons(skb->len);
129 ip_send_check(ip_hdr(skb));
5b1158e9
JK
130}
131
132/* Unusual, but possible case. */
3db05fea 133static int enlarge_skb(struct sk_buff *skb, unsigned int extra)
5b1158e9 134{
3db05fea 135 if (skb->len + extra > 65535)
5b1158e9
JK
136 return 0;
137
3db05fea 138 if (pskb_expand_head(skb, 0, extra - skb_tailroom(skb), GFP_ATOMIC))
5b1158e9
JK
139 return 0;
140
5b1158e9
JK
141 return 1;
142}
143
144/* Generic function for mangling variable-length address changes inside
145 * NATed TCP connections (like the PORT XXX,XXX,XXX,XXX,XXX,XXX
146 * command in FTP).
147 *
148 * Takes care about all the nasty sequence number changes, checksumming,
149 * skb enlargement, ...
150 *
151 * */
152int
3db05fea 153nf_nat_mangle_tcp_packet(struct sk_buff *skb,
5b1158e9
JK
154 struct nf_conn *ct,
155 enum ip_conntrack_info ctinfo,
156 unsigned int match_offset,
157 unsigned int match_len,
158 const char *rep_buffer,
159 unsigned int rep_len)
160{
511c3f92 161 struct rtable *rt = skb_rtable(skb);
5b1158e9
JK
162 struct iphdr *iph;
163 struct tcphdr *tcph;
164 int oldlen, datalen;
165
3db05fea 166 if (!skb_make_writable(skb, skb->len))
5b1158e9
JK
167 return 0;
168
169 if (rep_len > match_len &&
3db05fea
HX
170 rep_len - match_len > skb_tailroom(skb) &&
171 !enlarge_skb(skb, rep_len - match_len))
5b1158e9
JK
172 return 0;
173
3db05fea 174 SKB_LINEAR_ASSERT(skb);
5b1158e9 175
3db05fea 176 iph = ip_hdr(skb);
5b1158e9
JK
177 tcph = (void *)iph + iph->ihl*4;
178
3db05fea
HX
179 oldlen = skb->len - iph->ihl*4;
180 mangle_contents(skb, iph->ihl*4 + tcph->doff*4,
5b1158e9
JK
181 match_offset, match_len, rep_buffer, rep_len);
182
3db05fea
HX
183 datalen = skb->len - iph->ihl*4;
184 if (skb->ip_summed != CHECKSUM_PARTIAL) {
fe6092ea 185 if (!(rt->rt_flags & RTCF_LOCAL) &&
3db05fea
HX
186 skb->dev->features & NETIF_F_V4_CSUM) {
187 skb->ip_summed = CHECKSUM_PARTIAL;
188 skb->csum_start = skb_headroom(skb) +
189 skb_network_offset(skb) +
190 iph->ihl * 4;
191 skb->csum_offset = offsetof(struct tcphdr, check);
fe6092ea
PM
192 tcph->check = ~tcp_v4_check(datalen,
193 iph->saddr, iph->daddr, 0);
194 } else {
195 tcph->check = 0;
196 tcph->check = tcp_v4_check(datalen,
197 iph->saddr, iph->daddr,
a47362a2 198 csum_partial(tcph,
fe6092ea
PM
199 datalen, 0));
200 }
5b1158e9 201 } else
be0ea7d5
PM
202 inet_proto_csum_replace2(&tcph->check, skb,
203 htons(oldlen), htons(datalen), 1);
5b1158e9
JK
204
205 if (rep_len != match_len) {
206 set_bit(IPS_SEQ_ADJUST_BIT, &ct->status);
207 adjust_tcp_sequence(ntohl(tcph->seq),
208 (int)rep_len - (int)match_len,
209 ct, ctinfo);
a71996fc 210 nf_conntrack_event_cache(IPCT_NATSEQADJ, ct);
5b1158e9
JK
211 }
212 return 1;
213}
214EXPORT_SYMBOL(nf_nat_mangle_tcp_packet);
215
216/* Generic function for mangling variable-length address changes inside
217 * NATed UDP connections (like the CONNECT DATA XXXXX MESG XXXXX INDEX XXXXX
218 * command in the Amanda protocol)
219 *
220 * Takes care about all the nasty sequence number changes, checksumming,
221 * skb enlargement, ...
222 *
223 * XXX - This function could be merged with nf_nat_mangle_tcp_packet which
224 * should be fairly easy to do.
225 */
226int
3db05fea 227nf_nat_mangle_udp_packet(struct sk_buff *skb,
5b1158e9
JK
228 struct nf_conn *ct,
229 enum ip_conntrack_info ctinfo,
230 unsigned int match_offset,
231 unsigned int match_len,
232 const char *rep_buffer,
233 unsigned int rep_len)
234{
511c3f92 235 struct rtable *rt = skb_rtable(skb);
5b1158e9
JK
236 struct iphdr *iph;
237 struct udphdr *udph;
238 int datalen, oldlen;
239
240 /* UDP helpers might accidentally mangle the wrong packet */
3db05fea
HX
241 iph = ip_hdr(skb);
242 if (skb->len < iph->ihl*4 + sizeof(*udph) +
e905a9ed 243 match_offset + match_len)
5b1158e9
JK
244 return 0;
245
3db05fea 246 if (!skb_make_writable(skb, skb->len))
5b1158e9
JK
247 return 0;
248
249 if (rep_len > match_len &&
3db05fea
HX
250 rep_len - match_len > skb_tailroom(skb) &&
251 !enlarge_skb(skb, rep_len - match_len))
5b1158e9
JK
252 return 0;
253
3db05fea 254 iph = ip_hdr(skb);
5b1158e9
JK
255 udph = (void *)iph + iph->ihl*4;
256
3db05fea
HX
257 oldlen = skb->len - iph->ihl*4;
258 mangle_contents(skb, iph->ihl*4 + sizeof(*udph),
5b1158e9
JK
259 match_offset, match_len, rep_buffer, rep_len);
260
261 /* update the length of the UDP packet */
3db05fea 262 datalen = skb->len - iph->ihl*4;
5b1158e9
JK
263 udph->len = htons(datalen);
264
265 /* fix udp checksum if udp checksum was previously calculated */
3db05fea 266 if (!udph->check && skb->ip_summed != CHECKSUM_PARTIAL)
5b1158e9
JK
267 return 1;
268
3db05fea 269 if (skb->ip_summed != CHECKSUM_PARTIAL) {
fe6092ea 270 if (!(rt->rt_flags & RTCF_LOCAL) &&
3db05fea
HX
271 skb->dev->features & NETIF_F_V4_CSUM) {
272 skb->ip_summed = CHECKSUM_PARTIAL;
273 skb->csum_start = skb_headroom(skb) +
274 skb_network_offset(skb) +
275 iph->ihl * 4;
276 skb->csum_offset = offsetof(struct udphdr, check);
fe6092ea
PM
277 udph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
278 datalen, IPPROTO_UDP,
279 0);
280 } else {
281 udph->check = 0;
282 udph->check = csum_tcpudp_magic(iph->saddr, iph->daddr,
283 datalen, IPPROTO_UDP,
a47362a2 284 csum_partial(udph,
fe6092ea
PM
285 datalen, 0));
286 if (!udph->check)
287 udph->check = CSUM_MANGLED_0;
288 }
5b1158e9 289 } else
be0ea7d5
PM
290 inet_proto_csum_replace2(&udph->check, skb,
291 htons(oldlen), htons(datalen), 1);
5b1158e9
JK
292
293 return 1;
294}
295EXPORT_SYMBOL(nf_nat_mangle_udp_packet);
296
297/* Adjust one found SACK option including checksum correction */
298static void
299sack_adjust(struct sk_buff *skb,
300 struct tcphdr *tcph,
301 unsigned int sackoff,
302 unsigned int sackend,
303 struct nf_nat_seq *natseq)
304{
305 while (sackoff < sackend) {
306 struct tcp_sack_block_wire *sack;
307 __be32 new_start_seq, new_end_seq;
308
309 sack = (void *)skb->data + sackoff;
310 if (after(ntohl(sack->start_seq) - natseq->offset_before,
311 natseq->correction_pos))
312 new_start_seq = htonl(ntohl(sack->start_seq)
313 - natseq->offset_after);
314 else
315 new_start_seq = htonl(ntohl(sack->start_seq)
316 - natseq->offset_before);
317
318 if (after(ntohl(sack->end_seq) - natseq->offset_before,
319 natseq->correction_pos))
320 new_end_seq = htonl(ntohl(sack->end_seq)
321 - natseq->offset_after);
322 else
323 new_end_seq = htonl(ntohl(sack->end_seq)
324 - natseq->offset_before);
325
0d53778e
PM
326 pr_debug("sack_adjust: start_seq: %d->%d, end_seq: %d->%d\n",
327 ntohl(sack->start_seq), new_start_seq,
328 ntohl(sack->end_seq), new_end_seq);
5b1158e9 329
be0ea7d5
PM
330 inet_proto_csum_replace4(&tcph->check, skb,
331 sack->start_seq, new_start_seq, 0);
332 inet_proto_csum_replace4(&tcph->check, skb,
333 sack->end_seq, new_end_seq, 0);
5b1158e9
JK
334 sack->start_seq = new_start_seq;
335 sack->end_seq = new_end_seq;
336 sackoff += sizeof(*sack);
337 }
338}
339
340/* TCP SACK sequence number adjustment */
341static inline unsigned int
3db05fea 342nf_nat_sack_adjust(struct sk_buff *skb,
5b1158e9
JK
343 struct tcphdr *tcph,
344 struct nf_conn *ct,
345 enum ip_conntrack_info ctinfo)
346{
347 unsigned int dir, optoff, optend;
348 struct nf_conn_nat *nat = nfct_nat(ct);
349
3db05fea
HX
350 optoff = ip_hdrlen(skb) + sizeof(struct tcphdr);
351 optend = ip_hdrlen(skb) + tcph->doff * 4;
5b1158e9 352
3db05fea 353 if (!skb_make_writable(skb, optend))
5b1158e9
JK
354 return 0;
355
356 dir = CTINFO2DIR(ctinfo);
357
358 while (optoff < optend) {
359 /* Usually: option, length. */
3db05fea 360 unsigned char *op = skb->data + optoff;
5b1158e9
JK
361
362 switch (op[0]) {
363 case TCPOPT_EOL:
364 return 1;
365 case TCPOPT_NOP:
366 optoff++;
367 continue;
368 default:
369 /* no partial options */
370 if (optoff + 1 == optend ||
371 optoff + op[1] > optend ||
372 op[1] < 2)
373 return 0;
374 if (op[0] == TCPOPT_SACK &&
375 op[1] >= 2+TCPOLEN_SACK_PERBLOCK &&
376 ((op[1] - 2) % TCPOLEN_SACK_PERBLOCK) == 0)
3db05fea 377 sack_adjust(skb, tcph, optoff+2,
b6b84d4a 378 optoff+op[1], &nat->seq[!dir]);
5b1158e9
JK
379 optoff += op[1];
380 }
381 }
382 return 1;
383}
384
385/* TCP sequence number adjustment. Returns 1 on success, 0 on failure */
386int
3db05fea 387nf_nat_seq_adjust(struct sk_buff *skb,
5b1158e9
JK
388 struct nf_conn *ct,
389 enum ip_conntrack_info ctinfo)
390{
391 struct tcphdr *tcph;
392 int dir;
393 __be32 newseq, newack;
a3a9f79e 394 s16 seqoff, ackoff;
5b1158e9
JK
395 struct nf_conn_nat *nat = nfct_nat(ct);
396 struct nf_nat_seq *this_way, *other_way;
397
398 dir = CTINFO2DIR(ctinfo);
399
b6b84d4a
YK
400 this_way = &nat->seq[dir];
401 other_way = &nat->seq[!dir];
5b1158e9 402
3db05fea 403 if (!skb_make_writable(skb, ip_hdrlen(skb) + sizeof(*tcph)))
5b1158e9
JK
404 return 0;
405
3db05fea 406 tcph = (void *)skb->data + ip_hdrlen(skb);
5b1158e9 407 if (after(ntohl(tcph->seq), this_way->correction_pos))
a3a9f79e 408 seqoff = this_way->offset_after;
5b1158e9 409 else
a3a9f79e 410 seqoff = this_way->offset_before;
5b1158e9
JK
411
412 if (after(ntohl(tcph->ack_seq) - other_way->offset_before,
413 other_way->correction_pos))
a3a9f79e 414 ackoff = other_way->offset_after;
5b1158e9 415 else
a3a9f79e
PM
416 ackoff = other_way->offset_before;
417
418 newseq = htonl(ntohl(tcph->seq) + seqoff);
419 newack = htonl(ntohl(tcph->ack_seq) - ackoff);
5b1158e9 420
be0ea7d5
PM
421 inet_proto_csum_replace4(&tcph->check, skb, tcph->seq, newseq, 0);
422 inet_proto_csum_replace4(&tcph->check, skb, tcph->ack_seq, newack, 0);
5b1158e9 423
0d53778e
PM
424 pr_debug("Adjusting sequence number from %u->%u, ack from %u->%u\n",
425 ntohl(tcph->seq), ntohl(newseq), ntohl(tcph->ack_seq),
426 ntohl(newack));
5b1158e9
JK
427
428 tcph->seq = newseq;
429 tcph->ack_seq = newack;
430
f9dd09c7 431 return nf_nat_sack_adjust(skb, tcph, ct, ctinfo);
5b1158e9 432}
5b1158e9
JK
433
434/* Setup NAT on this expected conntrack so it follows master. */
435/* If we fail to get a free NAT slot, we'll get dropped on confirm */
436void nf_nat_follow_master(struct nf_conn *ct,
437 struct nf_conntrack_expect *exp)
438{
439 struct nf_nat_range range;
440
441 /* This must be a fresh one. */
442 BUG_ON(ct->status & IPS_NAT_DONE_MASK);
443
444 /* Change src to where master sends to */
445 range.flags = IP_NAT_RANGE_MAP_IPS;
446 range.min_ip = range.max_ip
447 = ct->master->tuplehash[!exp->dir].tuple.dst.u3.ip;
cc01dcbd 448 nf_nat_setup_info(ct, &range, IP_NAT_MANIP_SRC);
5b1158e9
JK
449
450 /* For DST manip, map port here to where it's expected. */
451 range.flags = (IP_NAT_RANGE_MAP_IPS | IP_NAT_RANGE_PROTO_SPECIFIED);
452 range.min = range.max = exp->saved_proto;
453 range.min_ip = range.max_ip
454 = ct->master->tuplehash[!exp->dir].tuple.src.u3.ip;
cc01dcbd 455 nf_nat_setup_info(ct, &range, IP_NAT_MANIP_DST);
5b1158e9
JK
456}
457EXPORT_SYMBOL(nf_nat_follow_master);
This page took 0.338549 seconds and 5 git commands to generate.