2 * IPV6 GSO/GRO offload support
3 * Linux INET6 implementation
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License
7 * as published by the Free Software Foundation; either version
8 * 2 of the License, or (at your option) any later version.
11 #include <linux/kernel.h>
12 #include <linux/socket.h>
13 #include <linux/netdevice.h>
14 #include <linux/skbuff.h>
15 #include <linux/printk.h>
17 #include <net/protocol.h>
20 #include "ip6_offload.h"
22 static int ipv6_gso_pull_exthdrs(struct sk_buff
*skb
, int proto
)
24 const struct net_offload
*ops
= NULL
;
27 struct ipv6_opt_hdr
*opth
;
30 if (proto
!= NEXTHDR_HOP
) {
31 ops
= rcu_dereference(inet6_offloads
[proto
]);
36 if (!(ops
->flags
& INET6_PROTO_GSO_EXTHDR
))
40 if (unlikely(!pskb_may_pull(skb
, 8)))
43 opth
= (void *)skb
->data
;
44 len
= ipv6_optlen(opth
);
46 if (unlikely(!pskb_may_pull(skb
, len
)))
49 opth
= (void *)skb
->data
;
50 proto
= opth
->nexthdr
;
57 static struct sk_buff
*ipv6_gso_segment(struct sk_buff
*skb
,
58 netdev_features_t features
)
60 struct sk_buff
*segs
= ERR_PTR(-EINVAL
);
61 struct ipv6hdr
*ipv6h
;
62 const struct net_offload
*ops
;
64 struct frag_hdr
*fptr
;
65 unsigned int unfrag_ip6hlen
;
66 unsigned int payload_len
;
72 if (unlikely(skb_shinfo(skb
)->gso_type
&
84 SKB_GSO_UDP_TUNNEL_CSUM
|
85 SKB_GSO_TUNNEL_REMCSUM
|
90 skb_reset_network_header(skb
);
91 nhoff
= skb_network_header(skb
) - skb_mac_header(skb
);
92 if (unlikely(!pskb_may_pull(skb
, sizeof(*ipv6h
))))
95 encap
= SKB_GSO_CB(skb
)->encap_level
> 0;
97 features
&= skb
->dev
->hw_enc_features
;
98 SKB_GSO_CB(skb
)->encap_level
+= sizeof(*ipv6h
);
100 ipv6h
= ipv6_hdr(skb
);
101 __skb_pull(skb
, sizeof(*ipv6h
));
102 segs
= ERR_PTR(-EPROTONOSUPPORT
);
104 proto
= ipv6_gso_pull_exthdrs(skb
, ipv6h
->nexthdr
);
106 if (skb
->encapsulation
&&
107 skb_shinfo(skb
)->gso_type
& (SKB_GSO_SIT
|SKB_GSO_IPIP
))
108 udpfrag
= proto
== IPPROTO_UDP
&& encap
;
110 udpfrag
= proto
== IPPROTO_UDP
&& !skb
->encapsulation
;
112 ops
= rcu_dereference(inet6_offloads
[proto
]);
113 if (likely(ops
&& ops
->callbacks
.gso_segment
)) {
114 skb_reset_transport_header(skb
);
115 segs
= ops
->callbacks
.gso_segment(skb
, features
);
121 for (skb
= segs
; skb
; skb
= skb
->next
) {
122 ipv6h
= (struct ipv6hdr
*)(skb_mac_header(skb
) + nhoff
);
124 payload_len
= skb_shinfo(skb
)->gso_size
+
125 SKB_GSO_CB(skb
)->data_offset
+
126 skb
->head
- (unsigned char *)(ipv6h
+ 1);
128 payload_len
= skb
->len
- nhoff
- sizeof(*ipv6h
);
129 ipv6h
->payload_len
= htons(payload_len
);
130 skb
->network_header
= (u8
*)ipv6h
- skb
->head
;
133 unfrag_ip6hlen
= ip6_find_1stfragopt(skb
, &prevhdr
);
134 fptr
= (struct frag_hdr
*)((u8
*)ipv6h
+ unfrag_ip6hlen
);
135 fptr
->frag_off
= htons(offset
);
137 fptr
->frag_off
|= htons(IP6_MF
);
138 offset
+= (ntohs(ipv6h
->payload_len
) -
139 sizeof(struct frag_hdr
));
142 skb_reset_inner_headers(skb
);
149 /* Return the total length of all the extension hdrs, following the same
150 * logic in ipv6_gso_pull_exthdrs() when parsing ext-hdrs.
152 static int ipv6_exthdrs_len(struct ipv6hdr
*iph
,
153 const struct net_offload
**opps
)
155 struct ipv6_opt_hdr
*opth
= (void *)iph
;
156 int len
= 0, proto
, optlen
= sizeof(*iph
);
158 proto
= iph
->nexthdr
;
160 if (proto
!= NEXTHDR_HOP
) {
161 *opps
= rcu_dereference(inet6_offloads
[proto
]);
162 if (unlikely(!(*opps
)))
164 if (!((*opps
)->flags
& INET6_PROTO_GSO_EXTHDR
))
167 opth
= (void *)opth
+ optlen
;
168 optlen
= ipv6_optlen(opth
);
170 proto
= opth
->nexthdr
;
175 static struct sk_buff
**ipv6_gro_receive(struct sk_buff
**head
,
178 const struct net_offload
*ops
;
179 struct sk_buff
**pp
= NULL
;
188 off
= skb_gro_offset(skb
);
189 hlen
= off
+ sizeof(*iph
);
190 iph
= skb_gro_header_fast(skb
, off
);
191 if (skb_gro_header_hard(skb
, hlen
)) {
192 iph
= skb_gro_header_slow(skb
, hlen
, off
);
197 skb_set_network_header(skb
, off
);
198 skb_gro_pull(skb
, sizeof(*iph
));
199 skb_set_transport_header(skb
, skb_gro_offset(skb
));
201 flush
+= ntohs(iph
->payload_len
) != skb_gro_len(skb
);
204 proto
= iph
->nexthdr
;
205 ops
= rcu_dereference(inet6_offloads
[proto
]);
206 if (!ops
|| !ops
->callbacks
.gro_receive
) {
207 __pskb_pull(skb
, skb_gro_offset(skb
));
208 proto
= ipv6_gso_pull_exthdrs(skb
, proto
);
209 skb_gro_pull(skb
, -skb_transport_offset(skb
));
210 skb_reset_transport_header(skb
);
211 __skb_push(skb
, skb_gro_offset(skb
));
213 ops
= rcu_dereference(inet6_offloads
[proto
]);
214 if (!ops
|| !ops
->callbacks
.gro_receive
)
220 NAPI_GRO_CB(skb
)->proto
= proto
;
223 nlen
= skb_network_header_len(skb
);
225 for (p
= *head
; p
; p
= p
->next
) {
226 const struct ipv6hdr
*iph2
;
227 __be32 first_word
; /* <Version:4><Traffic_Class:8><Flow_Label:20> */
229 if (!NAPI_GRO_CB(p
)->same_flow
)
232 iph2
= (struct ipv6hdr
*)(p
->data
+ off
);
233 first_word
= *(__be32
*)iph
^ *(__be32
*)iph2
;
235 /* All fields must match except length and Traffic Class.
236 * XXX skbs on the gro_list have all been parsed and pulled
237 * already so we don't need to compare nlen
238 * (nlen != (sizeof(*iph2) + ipv6_exthdrs_len(iph2, &ops)))
239 * memcmp() alone below is suffcient, right?
241 if ((first_word
& htonl(0xF00FFFFF)) ||
242 memcmp(&iph
->nexthdr
, &iph2
->nexthdr
,
243 nlen
- offsetof(struct ipv6hdr
, nexthdr
))) {
244 NAPI_GRO_CB(p
)->same_flow
= 0;
247 /* flush if Traffic Class fields are different */
248 NAPI_GRO_CB(p
)->flush
|= !!(first_word
& htonl(0x0FF00000));
249 NAPI_GRO_CB(p
)->flush
|= flush
;
251 /* If the previous IP ID value was based on an atomic
252 * datagram we can overwrite the value and ignore it.
254 if (NAPI_GRO_CB(skb
)->is_atomic
)
255 NAPI_GRO_CB(p
)->flush_id
= 0;
258 NAPI_GRO_CB(skb
)->is_atomic
= true;
259 NAPI_GRO_CB(skb
)->flush
|= flush
;
261 skb_gro_postpull_rcsum(skb
, iph
, nlen
);
263 pp
= ops
->callbacks
.gro_receive(head
, skb
);
269 NAPI_GRO_CB(skb
)->flush
|= flush
;
274 static struct sk_buff
**sit_gro_receive(struct sk_buff
**head
,
277 if (NAPI_GRO_CB(skb
)->encap_mark
) {
278 NAPI_GRO_CB(skb
)->flush
= 1;
282 NAPI_GRO_CB(skb
)->encap_mark
= 1;
284 return ipv6_gro_receive(head
, skb
);
287 static int ipv6_gro_complete(struct sk_buff
*skb
, int nhoff
)
289 const struct net_offload
*ops
;
290 struct ipv6hdr
*iph
= (struct ipv6hdr
*)(skb
->data
+ nhoff
);
293 if (skb
->encapsulation
)
294 skb_set_inner_network_header(skb
, nhoff
);
296 iph
->payload_len
= htons(skb
->len
- nhoff
- sizeof(*iph
));
300 nhoff
+= sizeof(*iph
) + ipv6_exthdrs_len(iph
, &ops
);
301 if (WARN_ON(!ops
|| !ops
->callbacks
.gro_complete
))
304 err
= ops
->callbacks
.gro_complete(skb
, nhoff
);
312 static int sit_gro_complete(struct sk_buff
*skb
, int nhoff
)
314 skb
->encapsulation
= 1;
315 skb_shinfo(skb
)->gso_type
|= SKB_GSO_SIT
;
316 return ipv6_gro_complete(skb
, nhoff
);
319 static struct packet_offload ipv6_packet_offload __read_mostly
= {
320 .type
= cpu_to_be16(ETH_P_IPV6
),
322 .gso_segment
= ipv6_gso_segment
,
323 .gro_receive
= ipv6_gro_receive
,
324 .gro_complete
= ipv6_gro_complete
,
328 static const struct net_offload sit_offload
= {
330 .gso_segment
= ipv6_gso_segment
,
331 .gro_receive
= sit_gro_receive
,
332 .gro_complete
= sit_gro_complete
,
336 static int __init
ipv6_offload_init(void)
339 if (tcpv6_offload_init() < 0)
340 pr_crit("%s: Cannot add TCP protocol offload\n", __func__
);
341 if (ipv6_exthdrs_offload_init() < 0)
342 pr_crit("%s: Cannot add EXTHDRS protocol offload\n", __func__
);
344 dev_add_offload(&ipv6_packet_offload
);
346 inet_add_offload(&sit_offload
, IPPROTO_IPV6
);
351 fs_initcall(ipv6_offload_init
);