netfilter: conntrack: avoid zeroing timer
[deliverable/linux.git] / net / bridge / netfilter / nft_reject_bridge.c
CommitLineData
85f5b308
PNA
1/*
2 * Copyright (c) 2014 Pablo Neira Ayuso <pablo@netfilter.org>
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 */
8
9#include <linux/kernel.h>
10#include <linux/init.h>
11#include <linux/module.h>
12#include <linux/netlink.h>
13#include <linux/netfilter.h>
14#include <linux/netfilter/nf_tables.h>
15#include <net/netfilter/nf_tables.h>
16#include <net/netfilter/nft_reject.h>
51b0a5d8
PNA
17#include <net/netfilter/ipv4/nf_reject.h>
18#include <net/netfilter/ipv6/nf_reject.h>
523b929d
PNA
19#include <linux/ip.h>
20#include <net/ip.h>
c1207c04 21#include <net/ip6_checksum.h>
127917c2 22#include <linux/netfilter_bridge.h>
523b929d
PNA
23#include "../br_private.h"
24
25static void nft_reject_br_push_etherhdr(struct sk_buff *oldskb,
26 struct sk_buff *nskb)
27{
28 struct ethhdr *eth;
29
30 eth = (struct ethhdr *)skb_push(nskb, ETH_HLEN);
31 skb_reset_mac_header(nskb);
32 ether_addr_copy(eth->h_source, eth_hdr(oldskb)->h_dest);
33 ether_addr_copy(eth->h_dest, eth_hdr(oldskb)->h_source);
34 eth->h_proto = eth_hdr(oldskb)->h_proto;
35 skb_pull(nskb, ETH_HLEN);
36}
37
38static int nft_reject_iphdr_validate(struct sk_buff *oldskb)
39{
40 struct iphdr *iph;
41 u32 len;
42
43 if (!pskb_may_pull(oldskb, sizeof(struct iphdr)))
44 return 0;
45
46 iph = ip_hdr(oldskb);
47 if (iph->ihl < 5 || iph->version != 4)
48 return 0;
49
50 len = ntohs(iph->tot_len);
51 if (oldskb->len < len)
52 return 0;
53 else if (len < (iph->ihl*4))
54 return 0;
55
56 if (!pskb_may_pull(oldskb, iph->ihl*4))
57 return 0;
58
59 return 1;
60}
61
62static void nft_reject_br_send_v4_tcp_reset(struct sk_buff *oldskb, int hook)
63{
64 struct sk_buff *nskb;
65 struct iphdr *niph;
66 const struct tcphdr *oth;
67 struct tcphdr _oth;
68
69 if (!nft_reject_iphdr_validate(oldskb))
70 return;
71
72 oth = nf_reject_ip_tcphdr_get(oldskb, &_oth, hook);
73 if (!oth)
74 return;
75
76 nskb = alloc_skb(sizeof(struct iphdr) + sizeof(struct tcphdr) +
77 LL_MAX_HEADER, GFP_ATOMIC);
78 if (!nskb)
79 return;
80
81 skb_reserve(nskb, LL_MAX_HEADER);
82 niph = nf_reject_iphdr_put(nskb, oldskb, IPPROTO_TCP,
83 sysctl_ip_default_ttl);
84 nf_reject_ip_tcphdr_put(nskb, oldskb, oth);
85 niph->ttl = sysctl_ip_default_ttl;
86 niph->tot_len = htons(nskb->len);
87 ip_send_check(niph);
88
89 nft_reject_br_push_etherhdr(oldskb, nskb);
90
91 br_deliver(br_port_get_rcu(oldskb->dev), nskb);
92}
93
94static void nft_reject_br_send_v4_unreach(struct sk_buff *oldskb, int hook,
95 u8 code)
96{
97 struct sk_buff *nskb;
98 struct iphdr *niph;
99 struct icmphdr *icmph;
100 unsigned int len;
101 void *payload;
102 __wsum csum;
103
104 if (!nft_reject_iphdr_validate(oldskb))
105 return;
106
107 /* IP header checks: fragment. */
108 if (ip_hdr(oldskb)->frag_off & htons(IP_OFFSET))
109 return;
110
111 /* RFC says return as much as we can without exceeding 576 bytes. */
112 len = min_t(unsigned int, 536, oldskb->len);
113
114 if (!pskb_may_pull(oldskb, len))
115 return;
116
117 if (nf_ip_checksum(oldskb, hook, ip_hdrlen(oldskb), 0))
118 return;
119
120 nskb = alloc_skb(sizeof(struct iphdr) + sizeof(struct icmphdr) +
121 LL_MAX_HEADER + len, GFP_ATOMIC);
122 if (!nskb)
123 return;
124
125 skb_reserve(nskb, LL_MAX_HEADER);
126 niph = nf_reject_iphdr_put(nskb, oldskb, IPPROTO_ICMP,
127 sysctl_ip_default_ttl);
128
129 skb_reset_transport_header(nskb);
130 icmph = (struct icmphdr *)skb_put(nskb, sizeof(struct icmphdr));
131 memset(icmph, 0, sizeof(*icmph));
132 icmph->type = ICMP_DEST_UNREACH;
133 icmph->code = code;
134
135 payload = skb_put(nskb, len);
136 memcpy(payload, skb_network_header(oldskb), len);
137
138 csum = csum_partial((void *)icmph, len + sizeof(struct icmphdr), 0);
139 icmph->checksum = csum_fold(csum);
140
141 niph->tot_len = htons(nskb->len);
142 ip_send_check(niph);
143
144 nft_reject_br_push_etherhdr(oldskb, nskb);
145
146 br_deliver(br_port_get_rcu(oldskb->dev), nskb);
147}
148
149static int nft_reject_ip6hdr_validate(struct sk_buff *oldskb)
150{
151 struct ipv6hdr *hdr;
152 u32 pkt_len;
153
154 if (!pskb_may_pull(oldskb, sizeof(struct ipv6hdr)))
155 return 0;
156
157 hdr = ipv6_hdr(oldskb);
158 if (hdr->version != 6)
159 return 0;
160
161 pkt_len = ntohs(hdr->payload_len);
162 if (pkt_len + sizeof(struct ipv6hdr) > oldskb->len)
163 return 0;
164
165 return 1;
166}
167
168static void nft_reject_br_send_v6_tcp_reset(struct net *net,
169 struct sk_buff *oldskb, int hook)
170{
171 struct sk_buff *nskb;
172 const struct tcphdr *oth;
173 struct tcphdr _oth;
174 unsigned int otcplen;
175 struct ipv6hdr *nip6h;
176
177 if (!nft_reject_ip6hdr_validate(oldskb))
178 return;
179
180 oth = nf_reject_ip6_tcphdr_get(oldskb, &_oth, &otcplen, hook);
181 if (!oth)
182 return;
183
184 nskb = alloc_skb(sizeof(struct ipv6hdr) + sizeof(struct tcphdr) +
185 LL_MAX_HEADER, GFP_ATOMIC);
186 if (!nskb)
187 return;
188
189 skb_reserve(nskb, LL_MAX_HEADER);
190 nip6h = nf_reject_ip6hdr_put(nskb, oldskb, IPPROTO_TCP,
191 net->ipv6.devconf_all->hop_limit);
192 nf_reject_ip6_tcphdr_put(nskb, oldskb, oth, otcplen);
193 nip6h->payload_len = htons(nskb->len - sizeof(struct ipv6hdr));
194
195 nft_reject_br_push_etherhdr(oldskb, nskb);
196
197 br_deliver(br_port_get_rcu(oldskb->dev), nskb);
198}
199
200static void nft_reject_br_send_v6_unreach(struct net *net,
201 struct sk_buff *oldskb, int hook,
202 u8 code)
203{
204 struct sk_buff *nskb;
205 struct ipv6hdr *nip6h;
206 struct icmp6hdr *icmp6h;
207 unsigned int len;
208 void *payload;
209
210 if (!nft_reject_ip6hdr_validate(oldskb))
211 return;
212
213 /* Include "As much of invoking packet as possible without the ICMPv6
214 * packet exceeding the minimum IPv6 MTU" in the ICMP payload.
215 */
216 len = min_t(unsigned int, 1220, oldskb->len);
217
218 if (!pskb_may_pull(oldskb, len))
219 return;
220
221 nskb = alloc_skb(sizeof(struct iphdr) + sizeof(struct icmp6hdr) +
222 LL_MAX_HEADER + len, GFP_ATOMIC);
223 if (!nskb)
224 return;
225
226 skb_reserve(nskb, LL_MAX_HEADER);
227 nip6h = nf_reject_ip6hdr_put(nskb, oldskb, IPPROTO_ICMPV6,
228 net->ipv6.devconf_all->hop_limit);
229
230 skb_reset_transport_header(nskb);
231 icmp6h = (struct icmp6hdr *)skb_put(nskb, sizeof(struct icmp6hdr));
232 memset(icmp6h, 0, sizeof(*icmp6h));
233 icmp6h->icmp6_type = ICMPV6_DEST_UNREACH;
234 icmp6h->icmp6_code = code;
235
236 payload = skb_put(nskb, len);
237 memcpy(payload, skb_network_header(oldskb), len);
238 nip6h->payload_len = htons(nskb->len - sizeof(struct ipv6hdr));
239
240 icmp6h->icmp6_cksum =
241 csum_ipv6_magic(&nip6h->saddr, &nip6h->daddr,
242 nskb->len - sizeof(struct ipv6hdr),
243 IPPROTO_ICMPV6,
244 csum_partial(icmp6h,
245 nskb->len - sizeof(struct ipv6hdr),
246 0));
247
248 nft_reject_br_push_etherhdr(oldskb, nskb);
249
250 br_deliver(br_port_get_rcu(oldskb->dev), nskb);
251}
85f5b308
PNA
252
253static void nft_reject_bridge_eval(const struct nft_expr *expr,
254 struct nft_data data[NFT_REG_MAX + 1],
255 const struct nft_pktinfo *pkt)
256{
51b0a5d8
PNA
257 struct nft_reject *priv = nft_expr_priv(expr);
258 struct net *net = dev_net((pkt->in != NULL) ? pkt->in : pkt->out);
523b929d
PNA
259 const unsigned char *dest = eth_hdr(pkt->skb)->h_dest;
260
261 if (is_broadcast_ether_addr(dest) ||
262 is_multicast_ether_addr(dest))
263 goto out;
51b0a5d8 264
85f5b308
PNA
265 switch (eth_hdr(pkt->skb)->h_proto) {
266 case htons(ETH_P_IP):
51b0a5d8
PNA
267 switch (priv->type) {
268 case NFT_REJECT_ICMP_UNREACH:
523b929d
PNA
269 nft_reject_br_send_v4_unreach(pkt->skb,
270 pkt->ops->hooknum,
271 priv->icmp_code);
51b0a5d8
PNA
272 break;
273 case NFT_REJECT_TCP_RST:
523b929d
PNA
274 nft_reject_br_send_v4_tcp_reset(pkt->skb,
275 pkt->ops->hooknum);
51b0a5d8
PNA
276 break;
277 case NFT_REJECT_ICMPX_UNREACH:
523b929d
PNA
278 nft_reject_br_send_v4_unreach(pkt->skb,
279 pkt->ops->hooknum,
280 nft_reject_icmp_code(priv->icmp_code));
51b0a5d8
PNA
281 break;
282 }
283 break;
85f5b308 284 case htons(ETH_P_IPV6):
51b0a5d8
PNA
285 switch (priv->type) {
286 case NFT_REJECT_ICMP_UNREACH:
523b929d
PNA
287 nft_reject_br_send_v6_unreach(net, pkt->skb,
288 pkt->ops->hooknum,
289 priv->icmp_code);
51b0a5d8
PNA
290 break;
291 case NFT_REJECT_TCP_RST:
523b929d
PNA
292 nft_reject_br_send_v6_tcp_reset(net, pkt->skb,
293 pkt->ops->hooknum);
51b0a5d8
PNA
294 break;
295 case NFT_REJECT_ICMPX_UNREACH:
523b929d
PNA
296 nft_reject_br_send_v6_unreach(net, pkt->skb,
297 pkt->ops->hooknum,
298 nft_reject_icmpv6_code(priv->icmp_code));
51b0a5d8
PNA
299 break;
300 }
301 break;
85f5b308
PNA
302 default:
303 /* No explicit way to reject this protocol, drop it. */
85f5b308
PNA
304 break;
305 }
523b929d 306out:
51b0a5d8
PNA
307 data[NFT_REG_VERDICT].verdict = NF_DROP;
308}
309
127917c2
PNA
310static int nft_reject_bridge_validate_hooks(const struct nft_chain *chain)
311{
312 struct nft_base_chain *basechain;
313
314 if (chain->flags & NFT_BASE_CHAIN) {
315 basechain = nft_base_chain(chain);
316
317 switch (basechain->ops[0].hooknum) {
318 case NF_BR_PRE_ROUTING:
319 case NF_BR_LOCAL_IN:
320 break;
321 default:
322 return -EOPNOTSUPP;
323 }
324 }
325 return 0;
326}
327
51b0a5d8
PNA
328static int nft_reject_bridge_init(const struct nft_ctx *ctx,
329 const struct nft_expr *expr,
330 const struct nlattr * const tb[])
331{
332 struct nft_reject *priv = nft_expr_priv(expr);
127917c2
PNA
333 int icmp_code, err;
334
335 err = nft_reject_bridge_validate_hooks(ctx->chain);
336 if (err < 0)
337 return err;
51b0a5d8
PNA
338
339 if (tb[NFTA_REJECT_TYPE] == NULL)
340 return -EINVAL;
341
342 priv->type = ntohl(nla_get_be32(tb[NFTA_REJECT_TYPE]));
343 switch (priv->type) {
344 case NFT_REJECT_ICMP_UNREACH:
345 case NFT_REJECT_ICMPX_UNREACH:
346 if (tb[NFTA_REJECT_ICMP_CODE] == NULL)
347 return -EINVAL;
348
349 icmp_code = nla_get_u8(tb[NFTA_REJECT_ICMP_CODE]);
350 if (priv->type == NFT_REJECT_ICMPX_UNREACH &&
351 icmp_code > NFT_REJECT_ICMPX_MAX)
352 return -EINVAL;
353
354 priv->icmp_code = icmp_code;
355 break;
356 case NFT_REJECT_TCP_RST:
357 break;
358 default:
359 return -EINVAL;
360 }
361 return 0;
362}
363
364static int nft_reject_bridge_dump(struct sk_buff *skb,
365 const struct nft_expr *expr)
366{
367 const struct nft_reject *priv = nft_expr_priv(expr);
368
369 if (nla_put_be32(skb, NFTA_REJECT_TYPE, htonl(priv->type)))
370 goto nla_put_failure;
371
372 switch (priv->type) {
373 case NFT_REJECT_ICMP_UNREACH:
374 case NFT_REJECT_ICMPX_UNREACH:
375 if (nla_put_u8(skb, NFTA_REJECT_ICMP_CODE, priv->icmp_code))
376 goto nla_put_failure;
377 break;
378 }
379
380 return 0;
381
382nla_put_failure:
383 return -1;
85f5b308
PNA
384}
385
127917c2
PNA
386static int nft_reject_bridge_validate(const struct nft_ctx *ctx,
387 const struct nft_expr *expr,
388 const struct nft_data **data)
389{
390 return nft_reject_bridge_validate_hooks(ctx->chain);
391}
392
85f5b308
PNA
393static struct nft_expr_type nft_reject_bridge_type;
394static const struct nft_expr_ops nft_reject_bridge_ops = {
395 .type = &nft_reject_bridge_type,
396 .size = NFT_EXPR_SIZE(sizeof(struct nft_reject)),
397 .eval = nft_reject_bridge_eval,
51b0a5d8
PNA
398 .init = nft_reject_bridge_init,
399 .dump = nft_reject_bridge_dump,
127917c2 400 .validate = nft_reject_bridge_validate,
85f5b308
PNA
401};
402
403static struct nft_expr_type nft_reject_bridge_type __read_mostly = {
404 .family = NFPROTO_BRIDGE,
405 .name = "reject",
406 .ops = &nft_reject_bridge_ops,
407 .policy = nft_reject_policy,
408 .maxattr = NFTA_REJECT_MAX,
409 .owner = THIS_MODULE,
410};
411
412static int __init nft_reject_bridge_module_init(void)
413{
414 return nft_register_expr(&nft_reject_bridge_type);
415}
416
417static void __exit nft_reject_bridge_module_exit(void)
418{
419 nft_unregister_expr(&nft_reject_bridge_type);
420}
421
422module_init(nft_reject_bridge_module_init);
423module_exit(nft_reject_bridge_module_exit);
424
425MODULE_LICENSE("GPL");
426MODULE_AUTHOR("Pablo Neira Ayuso <pablo@netfilter.org>");
427MODULE_ALIAS_NFT_AF_EXPR(AF_BRIDGE, "reject");
This page took 0.134062 seconds and 5 git commands to generate.