Merge branch 'fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/evalenti/linux...
[deliverable/linux.git] / net / ipv4 / gre_offload.c
1 /*
2 * IPV4 GSO/GRO offload support
3 * Linux INET implementation
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License
7 * as published by the Free Software Foundation; either version
8 * 2 of the License, or (at your option) any later version.
9 *
10 * GRE GSO support
11 */
12
13 #include <linux/skbuff.h>
14 #include <linux/init.h>
15 #include <net/protocol.h>
16 #include <net/gre.h>
17
18 static struct sk_buff *gre_gso_segment(struct sk_buff *skb,
19 netdev_features_t features)
20 {
21 int tnl_hlen = skb_inner_mac_header(skb) - skb_transport_header(skb);
22 struct sk_buff *segs = ERR_PTR(-EINVAL);
23 u16 mac_offset = skb->mac_header;
24 __be16 protocol = skb->protocol;
25 u16 mac_len = skb->mac_len;
26 int gre_offset, outer_hlen;
27 bool need_csum, ufo;
28
29 if (unlikely(skb_shinfo(skb)->gso_type &
30 ~(SKB_GSO_TCPV4 |
31 SKB_GSO_TCPV6 |
32 SKB_GSO_UDP |
33 SKB_GSO_DODGY |
34 SKB_GSO_TCP_ECN |
35 SKB_GSO_GRE |
36 SKB_GSO_GRE_CSUM |
37 SKB_GSO_IPIP |
38 SKB_GSO_SIT)))
39 goto out;
40
41 if (!skb->encapsulation)
42 goto out;
43
44 if (unlikely(tnl_hlen < sizeof(struct gre_base_hdr)))
45 goto out;
46
47 if (unlikely(!pskb_may_pull(skb, tnl_hlen)))
48 goto out;
49
50 /* setup inner skb. */
51 skb->encapsulation = 0;
52 SKB_GSO_CB(skb)->encap_level = 0;
53 __skb_pull(skb, tnl_hlen);
54 skb_reset_mac_header(skb);
55 skb_set_network_header(skb, skb_inner_network_offset(skb));
56 skb->mac_len = skb_inner_network_offset(skb);
57 skb->protocol = skb->inner_protocol;
58
59 need_csum = !!(skb_shinfo(skb)->gso_type & SKB_GSO_GRE_CSUM);
60 skb->encap_hdr_csum = need_csum;
61
62 ufo = !!(skb_shinfo(skb)->gso_type & SKB_GSO_UDP);
63
64 features &= skb->dev->hw_enc_features;
65
66 /* The only checksum offload we care about from here on out is the
67 * outer one so strip the existing checksum feature flags based
68 * on the fact that we will be computing our checksum in software.
69 */
70 if (ufo) {
71 features &= ~NETIF_F_CSUM_MASK;
72 if (!need_csum)
73 features |= NETIF_F_HW_CSUM;
74 }
75
76 /* segment inner packet. */
77 segs = skb_mac_gso_segment(skb, features);
78 if (IS_ERR_OR_NULL(segs)) {
79 skb_gso_error_unwind(skb, protocol, tnl_hlen, mac_offset,
80 mac_len);
81 goto out;
82 }
83
84 outer_hlen = skb_tnl_header_len(skb);
85 gre_offset = outer_hlen - tnl_hlen;
86 skb = segs;
87 do {
88 struct gre_base_hdr *greh;
89 __be32 *pcsum;
90
91 /* Set up inner headers if we are offloading inner checksum */
92 if (skb->ip_summed == CHECKSUM_PARTIAL) {
93 skb_reset_inner_headers(skb);
94 skb->encapsulation = 1;
95 }
96
97 skb->mac_len = mac_len;
98 skb->protocol = protocol;
99
100 __skb_push(skb, outer_hlen);
101 skb_reset_mac_header(skb);
102 skb_set_network_header(skb, mac_len);
103 skb_set_transport_header(skb, gre_offset);
104
105 if (!need_csum)
106 continue;
107
108 greh = (struct gre_base_hdr *)skb_transport_header(skb);
109 pcsum = (__be32 *)(greh + 1);
110
111 *pcsum = 0;
112 *(__sum16 *)pcsum = gso_make_checksum(skb, 0);
113 } while ((skb = skb->next));
114 out:
115 return segs;
116 }
117
118 static struct sk_buff **gre_gro_receive(struct sk_buff **head,
119 struct sk_buff *skb)
120 {
121 struct sk_buff **pp = NULL;
122 struct sk_buff *p;
123 const struct gre_base_hdr *greh;
124 unsigned int hlen, grehlen;
125 unsigned int off;
126 int flush = 1;
127 struct packet_offload *ptype;
128 __be16 type;
129
130 if (NAPI_GRO_CB(skb)->encap_mark)
131 goto out;
132
133 NAPI_GRO_CB(skb)->encap_mark = 1;
134
135 off = skb_gro_offset(skb);
136 hlen = off + sizeof(*greh);
137 greh = skb_gro_header_fast(skb, off);
138 if (skb_gro_header_hard(skb, hlen)) {
139 greh = skb_gro_header_slow(skb, hlen, off);
140 if (unlikely(!greh))
141 goto out;
142 }
143
144 /* Only support version 0 and K (key), C (csum) flags. Note that
145 * although the support for the S (seq#) flag can be added easily
146 * for GRO, this is problematic for GSO hence can not be enabled
147 * here because a GRO pkt may end up in the forwarding path, thus
148 * requiring GSO support to break it up correctly.
149 */
150 if ((greh->flags & ~(GRE_KEY|GRE_CSUM)) != 0)
151 goto out;
152
153 /* We can only support GRE_CSUM if we can track the location of
154 * the GRE header. In the case of FOU/GUE we cannot because the
155 * outer UDP header displaces the GRE header leaving us in a state
156 * of limbo.
157 */
158 if ((greh->flags & GRE_CSUM) && NAPI_GRO_CB(skb)->is_fou)
159 goto out;
160
161 type = greh->protocol;
162
163 rcu_read_lock();
164 ptype = gro_find_receive_by_type(type);
165 if (!ptype)
166 goto out_unlock;
167
168 grehlen = GRE_HEADER_SECTION;
169
170 if (greh->flags & GRE_KEY)
171 grehlen += GRE_HEADER_SECTION;
172
173 if (greh->flags & GRE_CSUM)
174 grehlen += GRE_HEADER_SECTION;
175
176 hlen = off + grehlen;
177 if (skb_gro_header_hard(skb, hlen)) {
178 greh = skb_gro_header_slow(skb, hlen, off);
179 if (unlikely(!greh))
180 goto out_unlock;
181 }
182
183 /* Don't bother verifying checksum if we're going to flush anyway. */
184 if ((greh->flags & GRE_CSUM) && !NAPI_GRO_CB(skb)->flush) {
185 if (skb_gro_checksum_simple_validate(skb))
186 goto out_unlock;
187
188 skb_gro_checksum_try_convert(skb, IPPROTO_GRE, 0,
189 null_compute_pseudo);
190 }
191
192 for (p = *head; p; p = p->next) {
193 const struct gre_base_hdr *greh2;
194
195 if (!NAPI_GRO_CB(p)->same_flow)
196 continue;
197
198 /* The following checks are needed to ensure only pkts
199 * from the same tunnel are considered for aggregation.
200 * The criteria for "the same tunnel" includes:
201 * 1) same version (we only support version 0 here)
202 * 2) same protocol (we only support ETH_P_IP for now)
203 * 3) same set of flags
204 * 4) same key if the key field is present.
205 */
206 greh2 = (struct gre_base_hdr *)(p->data + off);
207
208 if (greh2->flags != greh->flags ||
209 greh2->protocol != greh->protocol) {
210 NAPI_GRO_CB(p)->same_flow = 0;
211 continue;
212 }
213 if (greh->flags & GRE_KEY) {
214 /* compare keys */
215 if (*(__be32 *)(greh2+1) != *(__be32 *)(greh+1)) {
216 NAPI_GRO_CB(p)->same_flow = 0;
217 continue;
218 }
219 }
220 }
221
222 skb_gro_pull(skb, grehlen);
223
224 /* Adjusted NAPI_GRO_CB(skb)->csum after skb_gro_pull()*/
225 skb_gro_postpull_rcsum(skb, greh, grehlen);
226
227 pp = ptype->callbacks.gro_receive(head, skb);
228 flush = 0;
229
230 out_unlock:
231 rcu_read_unlock();
232 out:
233 NAPI_GRO_CB(skb)->flush |= flush;
234
235 return pp;
236 }
237
238 static int gre_gro_complete(struct sk_buff *skb, int nhoff)
239 {
240 struct gre_base_hdr *greh = (struct gre_base_hdr *)(skb->data + nhoff);
241 struct packet_offload *ptype;
242 unsigned int grehlen = sizeof(*greh);
243 int err = -ENOENT;
244 __be16 type;
245
246 skb->encapsulation = 1;
247 skb_shinfo(skb)->gso_type = SKB_GSO_GRE;
248
249 type = greh->protocol;
250 if (greh->flags & GRE_KEY)
251 grehlen += GRE_HEADER_SECTION;
252
253 if (greh->flags & GRE_CSUM)
254 grehlen += GRE_HEADER_SECTION;
255
256 rcu_read_lock();
257 ptype = gro_find_complete_by_type(type);
258 if (ptype)
259 err = ptype->callbacks.gro_complete(skb, nhoff + grehlen);
260
261 rcu_read_unlock();
262
263 skb_set_inner_mac_header(skb, nhoff + grehlen);
264
265 return err;
266 }
267
268 static const struct net_offload gre_offload = {
269 .callbacks = {
270 .gso_segment = gre_gso_segment,
271 .gro_receive = gre_gro_receive,
272 .gro_complete = gre_gro_complete,
273 },
274 };
275
276 static int __init gre_offload_init(void)
277 {
278 return inet_add_offload(&gre_offload, IPPROTO_GRE);
279 }
280 device_initcall(gre_offload_init);
This page took 0.036554 seconds and 5 git commands to generate.