2 * Copyright (c) 2007-2014 Nicira, Inc.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of version 2 of the GNU General Public
6 * License as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public License
14 * along with this program; if not, write to the Free Software
15 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
21 #include <linux/skbuff.h>
24 #include <linux/openvswitch.h>
25 #include <linux/sctp.h>
26 #include <linux/tcp.h>
27 #include <linux/udp.h>
28 #include <linux/in6.h>
29 #include <linux/if_arp.h>
30 #include <linux/if_vlan.h>
33 #include <net/checksum.h>
34 #include <net/dsfield.h>
35 #include <net/sctp/checksum.h>
41 static int do_execute_actions(struct datapath
*dp
, struct sk_buff
*skb
,
42 struct sw_flow_key
*key
,
43 const struct nlattr
*attr
, int len
);
45 struct deferred_action
{
47 const struct nlattr
*actions
;
49 /* Store pkt_key clone when creating deferred action. */
50 struct sw_flow_key pkt_key
;
53 #define DEFERRED_ACTION_FIFO_SIZE 10
57 /* Deferred action fifo queue storage. */
58 struct deferred_action fifo
[DEFERRED_ACTION_FIFO_SIZE
];
61 static struct action_fifo __percpu
*action_fifos
;
62 static DEFINE_PER_CPU(int, exec_actions_level
);
64 static void action_fifo_init(struct action_fifo
*fifo
)
70 static bool action_fifo_is_empty(struct action_fifo
*fifo
)
72 return (fifo
->head
== fifo
->tail
);
75 static struct deferred_action
*action_fifo_get(struct action_fifo
*fifo
)
77 if (action_fifo_is_empty(fifo
))
80 return &fifo
->fifo
[fifo
->tail
++];
83 static struct deferred_action
*action_fifo_put(struct action_fifo
*fifo
)
85 if (fifo
->head
>= DEFERRED_ACTION_FIFO_SIZE
- 1)
88 return &fifo
->fifo
[fifo
->head
++];
91 /* Return true if fifo is not full */
92 static struct deferred_action
*add_deferred_actions(struct sk_buff
*skb
,
93 struct sw_flow_key
*key
,
94 const struct nlattr
*attr
)
96 struct action_fifo
*fifo
;
97 struct deferred_action
*da
;
99 fifo
= this_cpu_ptr(action_fifos
);
100 da
= action_fifo_put(fifo
);
110 static int make_writable(struct sk_buff
*skb
, int write_len
)
112 if (!pskb_may_pull(skb
, write_len
))
115 if (!skb_cloned(skb
) || skb_clone_writable(skb
, write_len
))
118 return pskb_expand_head(skb
, 0, 0, GFP_ATOMIC
);
121 /* remove VLAN header from packet and update csum accordingly. */
122 static int __pop_vlan_tci(struct sk_buff
*skb
, __be16
*current_tci
)
124 struct vlan_hdr
*vhdr
;
127 err
= make_writable(skb
, VLAN_ETH_HLEN
);
131 if (skb
->ip_summed
== CHECKSUM_COMPLETE
)
132 skb
->csum
= csum_sub(skb
->csum
, csum_partial(skb
->data
133 + (2 * ETH_ALEN
), VLAN_HLEN
, 0));
135 vhdr
= (struct vlan_hdr
*)(skb
->data
+ ETH_HLEN
);
136 *current_tci
= vhdr
->h_vlan_TCI
;
138 memmove(skb
->data
+ VLAN_HLEN
, skb
->data
, 2 * ETH_ALEN
);
139 __skb_pull(skb
, VLAN_HLEN
);
141 vlan_set_encap_proto(skb
, vhdr
);
142 skb
->mac_header
+= VLAN_HLEN
;
143 if (skb_network_offset(skb
) < ETH_HLEN
)
144 skb_set_network_header(skb
, ETH_HLEN
);
145 skb_reset_mac_len(skb
);
150 static int pop_vlan(struct sk_buff
*skb
)
155 if (likely(vlan_tx_tag_present(skb
))) {
158 if (unlikely(skb
->protocol
!= htons(ETH_P_8021Q
) ||
159 skb
->len
< VLAN_ETH_HLEN
))
162 err
= __pop_vlan_tci(skb
, &tci
);
166 /* move next vlan tag to hw accel tag */
167 if (likely(skb
->protocol
!= htons(ETH_P_8021Q
) ||
168 skb
->len
< VLAN_ETH_HLEN
))
171 err
= __pop_vlan_tci(skb
, &tci
);
175 __vlan_hwaccel_put_tag(skb
, htons(ETH_P_8021Q
), ntohs(tci
));
179 static int push_vlan(struct sk_buff
*skb
, const struct ovs_action_push_vlan
*vlan
)
181 if (unlikely(vlan_tx_tag_present(skb
))) {
184 /* push down current VLAN tag */
185 current_tag
= vlan_tx_tag_get(skb
);
187 if (!__vlan_put_tag(skb
, skb
->vlan_proto
, current_tag
))
190 if (skb
->ip_summed
== CHECKSUM_COMPLETE
)
191 skb
->csum
= csum_add(skb
->csum
, csum_partial(skb
->data
192 + (2 * ETH_ALEN
), VLAN_HLEN
, 0));
195 __vlan_hwaccel_put_tag(skb
, vlan
->vlan_tpid
, ntohs(vlan
->vlan_tci
) & ~VLAN_TAG_PRESENT
);
199 static int set_eth_addr(struct sk_buff
*skb
,
200 const struct ovs_key_ethernet
*eth_key
)
203 err
= make_writable(skb
, ETH_HLEN
);
207 skb_postpull_rcsum(skb
, eth_hdr(skb
), ETH_ALEN
* 2);
209 ether_addr_copy(eth_hdr(skb
)->h_source
, eth_key
->eth_src
);
210 ether_addr_copy(eth_hdr(skb
)->h_dest
, eth_key
->eth_dst
);
212 ovs_skb_postpush_rcsum(skb
, eth_hdr(skb
), ETH_ALEN
* 2);
217 static void set_ip_addr(struct sk_buff
*skb
, struct iphdr
*nh
,
218 __be32
*addr
, __be32 new_addr
)
220 int transport_len
= skb
->len
- skb_transport_offset(skb
);
222 if (nh
->protocol
== IPPROTO_TCP
) {
223 if (likely(transport_len
>= sizeof(struct tcphdr
)))
224 inet_proto_csum_replace4(&tcp_hdr(skb
)->check
, skb
,
226 } else if (nh
->protocol
== IPPROTO_UDP
) {
227 if (likely(transport_len
>= sizeof(struct udphdr
))) {
228 struct udphdr
*uh
= udp_hdr(skb
);
230 if (uh
->check
|| skb
->ip_summed
== CHECKSUM_PARTIAL
) {
231 inet_proto_csum_replace4(&uh
->check
, skb
,
234 uh
->check
= CSUM_MANGLED_0
;
239 csum_replace4(&nh
->check
, *addr
, new_addr
);
244 static void update_ipv6_checksum(struct sk_buff
*skb
, u8 l4_proto
,
245 __be32 addr
[4], const __be32 new_addr
[4])
247 int transport_len
= skb
->len
- skb_transport_offset(skb
);
249 if (l4_proto
== NEXTHDR_TCP
) {
250 if (likely(transport_len
>= sizeof(struct tcphdr
)))
251 inet_proto_csum_replace16(&tcp_hdr(skb
)->check
, skb
,
253 } else if (l4_proto
== NEXTHDR_UDP
) {
254 if (likely(transport_len
>= sizeof(struct udphdr
))) {
255 struct udphdr
*uh
= udp_hdr(skb
);
257 if (uh
->check
|| skb
->ip_summed
== CHECKSUM_PARTIAL
) {
258 inet_proto_csum_replace16(&uh
->check
, skb
,
261 uh
->check
= CSUM_MANGLED_0
;
264 } else if (l4_proto
== NEXTHDR_ICMP
) {
265 if (likely(transport_len
>= sizeof(struct icmp6hdr
)))
266 inet_proto_csum_replace16(&icmp6_hdr(skb
)->icmp6_cksum
,
267 skb
, addr
, new_addr
, 1);
271 static void set_ipv6_addr(struct sk_buff
*skb
, u8 l4_proto
,
272 __be32 addr
[4], const __be32 new_addr
[4],
273 bool recalculate_csum
)
275 if (recalculate_csum
)
276 update_ipv6_checksum(skb
, l4_proto
, addr
, new_addr
);
279 memcpy(addr
, new_addr
, sizeof(__be32
[4]));
282 static void set_ipv6_tc(struct ipv6hdr
*nh
, u8 tc
)
284 nh
->priority
= tc
>> 4;
285 nh
->flow_lbl
[0] = (nh
->flow_lbl
[0] & 0x0F) | ((tc
& 0x0F) << 4);
288 static void set_ipv6_fl(struct ipv6hdr
*nh
, u32 fl
)
290 nh
->flow_lbl
[0] = (nh
->flow_lbl
[0] & 0xF0) | (fl
& 0x000F0000) >> 16;
291 nh
->flow_lbl
[1] = (fl
& 0x0000FF00) >> 8;
292 nh
->flow_lbl
[2] = fl
& 0x000000FF;
295 static void set_ip_ttl(struct sk_buff
*skb
, struct iphdr
*nh
, u8 new_ttl
)
297 csum_replace2(&nh
->check
, htons(nh
->ttl
<< 8), htons(new_ttl
<< 8));
301 static int set_ipv4(struct sk_buff
*skb
, const struct ovs_key_ipv4
*ipv4_key
)
306 err
= make_writable(skb
, skb_network_offset(skb
) +
307 sizeof(struct iphdr
));
313 if (ipv4_key
->ipv4_src
!= nh
->saddr
)
314 set_ip_addr(skb
, nh
, &nh
->saddr
, ipv4_key
->ipv4_src
);
316 if (ipv4_key
->ipv4_dst
!= nh
->daddr
)
317 set_ip_addr(skb
, nh
, &nh
->daddr
, ipv4_key
->ipv4_dst
);
319 if (ipv4_key
->ipv4_tos
!= nh
->tos
)
320 ipv4_change_dsfield(nh
, 0, ipv4_key
->ipv4_tos
);
322 if (ipv4_key
->ipv4_ttl
!= nh
->ttl
)
323 set_ip_ttl(skb
, nh
, ipv4_key
->ipv4_ttl
);
328 static int set_ipv6(struct sk_buff
*skb
, const struct ovs_key_ipv6
*ipv6_key
)
335 err
= make_writable(skb
, skb_network_offset(skb
) +
336 sizeof(struct ipv6hdr
));
341 saddr
= (__be32
*)&nh
->saddr
;
342 daddr
= (__be32
*)&nh
->daddr
;
344 if (memcmp(ipv6_key
->ipv6_src
, saddr
, sizeof(ipv6_key
->ipv6_src
)))
345 set_ipv6_addr(skb
, ipv6_key
->ipv6_proto
, saddr
,
346 ipv6_key
->ipv6_src
, true);
348 if (memcmp(ipv6_key
->ipv6_dst
, daddr
, sizeof(ipv6_key
->ipv6_dst
))) {
349 unsigned int offset
= 0;
350 int flags
= IP6_FH_F_SKIP_RH
;
351 bool recalc_csum
= true;
353 if (ipv6_ext_hdr(nh
->nexthdr
))
354 recalc_csum
= ipv6_find_hdr(skb
, &offset
,
355 NEXTHDR_ROUTING
, NULL
,
356 &flags
) != NEXTHDR_ROUTING
;
358 set_ipv6_addr(skb
, ipv6_key
->ipv6_proto
, daddr
,
359 ipv6_key
->ipv6_dst
, recalc_csum
);
362 set_ipv6_tc(nh
, ipv6_key
->ipv6_tclass
);
363 set_ipv6_fl(nh
, ntohl(ipv6_key
->ipv6_label
));
364 nh
->hop_limit
= ipv6_key
->ipv6_hlimit
;
369 /* Must follow make_writable() since that can move the skb data. */
370 static void set_tp_port(struct sk_buff
*skb
, __be16
*port
,
371 __be16 new_port
, __sum16
*check
)
373 inet_proto_csum_replace2(check
, skb
, *port
, new_port
, 0);
378 static void set_udp_port(struct sk_buff
*skb
, __be16
*port
, __be16 new_port
)
380 struct udphdr
*uh
= udp_hdr(skb
);
382 if (uh
->check
&& skb
->ip_summed
!= CHECKSUM_PARTIAL
) {
383 set_tp_port(skb
, port
, new_port
, &uh
->check
);
386 uh
->check
= CSUM_MANGLED_0
;
393 static int set_udp(struct sk_buff
*skb
, const struct ovs_key_udp
*udp_port_key
)
398 err
= make_writable(skb
, skb_transport_offset(skb
) +
399 sizeof(struct udphdr
));
404 if (udp_port_key
->udp_src
!= uh
->source
)
405 set_udp_port(skb
, &uh
->source
, udp_port_key
->udp_src
);
407 if (udp_port_key
->udp_dst
!= uh
->dest
)
408 set_udp_port(skb
, &uh
->dest
, udp_port_key
->udp_dst
);
413 static int set_tcp(struct sk_buff
*skb
, const struct ovs_key_tcp
*tcp_port_key
)
418 err
= make_writable(skb
, skb_transport_offset(skb
) +
419 sizeof(struct tcphdr
));
424 if (tcp_port_key
->tcp_src
!= th
->source
)
425 set_tp_port(skb
, &th
->source
, tcp_port_key
->tcp_src
, &th
->check
);
427 if (tcp_port_key
->tcp_dst
!= th
->dest
)
428 set_tp_port(skb
, &th
->dest
, tcp_port_key
->tcp_dst
, &th
->check
);
433 static int set_sctp(struct sk_buff
*skb
,
434 const struct ovs_key_sctp
*sctp_port_key
)
438 unsigned int sctphoff
= skb_transport_offset(skb
);
440 err
= make_writable(skb
, sctphoff
+ sizeof(struct sctphdr
));
445 if (sctp_port_key
->sctp_src
!= sh
->source
||
446 sctp_port_key
->sctp_dst
!= sh
->dest
) {
447 __le32 old_correct_csum
, new_csum
, old_csum
;
449 old_csum
= sh
->checksum
;
450 old_correct_csum
= sctp_compute_cksum(skb
, sctphoff
);
452 sh
->source
= sctp_port_key
->sctp_src
;
453 sh
->dest
= sctp_port_key
->sctp_dst
;
455 new_csum
= sctp_compute_cksum(skb
, sctphoff
);
457 /* Carry any checksum errors through. */
458 sh
->checksum
= old_csum
^ old_correct_csum
^ new_csum
;
466 static int do_output(struct datapath
*dp
, struct sk_buff
*skb
, int out_port
)
473 vport
= ovs_vport_rcu(dp
, out_port
);
474 if (unlikely(!vport
)) {
479 ovs_vport_send(vport
, skb
);
483 static int output_userspace(struct datapath
*dp
, struct sk_buff
*skb
,
484 struct sw_flow_key
*key
, const struct nlattr
*attr
)
486 struct dp_upcall_info upcall
;
487 const struct nlattr
*a
;
490 upcall
.cmd
= OVS_PACKET_CMD_ACTION
;
492 upcall
.userdata
= NULL
;
495 for (a
= nla_data(attr
), rem
= nla_len(attr
); rem
> 0;
496 a
= nla_next(a
, &rem
)) {
497 switch (nla_type(a
)) {
498 case OVS_USERSPACE_ATTR_USERDATA
:
502 case OVS_USERSPACE_ATTR_PID
:
503 upcall
.portid
= nla_get_u32(a
);
508 return ovs_dp_upcall(dp
, skb
, &upcall
);
511 static bool last_action(const struct nlattr
*a
, int rem
)
513 return a
->nla_len
== rem
;
516 static int sample(struct datapath
*dp
, struct sk_buff
*skb
,
517 struct sw_flow_key
*key
, const struct nlattr
*attr
)
519 const struct nlattr
*acts_list
= NULL
;
520 const struct nlattr
*a
;
523 for (a
= nla_data(attr
), rem
= nla_len(attr
); rem
> 0;
524 a
= nla_next(a
, &rem
)) {
525 switch (nla_type(a
)) {
526 case OVS_SAMPLE_ATTR_PROBABILITY
:
527 if (prandom_u32() >= nla_get_u32(a
))
531 case OVS_SAMPLE_ATTR_ACTIONS
:
537 rem
= nla_len(acts_list
);
538 a
= nla_data(acts_list
);
540 /* Actions list is empty, do nothing */
544 /* The only known usage of sample action is having a single user-space
545 * action. Treat this usage as a special case.
546 * The output_userspace() should clone the skb to be sent to the
547 * user space. This skb will be consumed by its caller.
549 if (likely(nla_type(a
) == OVS_ACTION_ATTR_USERSPACE
&&
550 last_action(a
, rem
)))
551 return output_userspace(dp
, skb
, key
, a
);
553 skb
= skb_clone(skb
, GFP_ATOMIC
);
555 /* Skip the sample action when out of memory. */
558 if (!add_deferred_actions(skb
, key
, a
)) {
560 pr_warn("%s: deferred actions limit reached, dropping sample action\n",
568 static void execute_hash(struct sk_buff
*skb
, struct sw_flow_key
*key
,
569 const struct nlattr
*attr
)
571 struct ovs_action_hash
*hash_act
= nla_data(attr
);
574 /* OVS_HASH_ALG_L4 is the only possible hash algorithm. */
575 hash
= skb_get_hash(skb
);
576 hash
= jhash_1word(hash
, hash_act
->hash_basis
);
580 key
->ovs_flow_hash
= hash
;
583 static int execute_set_action(struct sk_buff
*skb
,
584 const struct nlattr
*nested_attr
)
588 switch (nla_type(nested_attr
)) {
589 case OVS_KEY_ATTR_PRIORITY
:
590 skb
->priority
= nla_get_u32(nested_attr
);
593 case OVS_KEY_ATTR_SKB_MARK
:
594 skb
->mark
= nla_get_u32(nested_attr
);
597 case OVS_KEY_ATTR_TUNNEL_INFO
:
598 OVS_CB(skb
)->egress_tun_info
= nla_data(nested_attr
);
601 case OVS_KEY_ATTR_ETHERNET
:
602 err
= set_eth_addr(skb
, nla_data(nested_attr
));
605 case OVS_KEY_ATTR_IPV4
:
606 err
= set_ipv4(skb
, nla_data(nested_attr
));
609 case OVS_KEY_ATTR_IPV6
:
610 err
= set_ipv6(skb
, nla_data(nested_attr
));
613 case OVS_KEY_ATTR_TCP
:
614 err
= set_tcp(skb
, nla_data(nested_attr
));
617 case OVS_KEY_ATTR_UDP
:
618 err
= set_udp(skb
, nla_data(nested_attr
));
621 case OVS_KEY_ATTR_SCTP
:
622 err
= set_sctp(skb
, nla_data(nested_attr
));
629 static int execute_recirc(struct datapath
*dp
, struct sk_buff
*skb
,
630 struct sw_flow_key
*key
,
631 const struct nlattr
*a
, int rem
)
633 struct deferred_action
*da
;
636 err
= ovs_flow_key_update(skb
, key
);
640 if (!last_action(a
, rem
)) {
641 /* Recirc action is the not the last action
642 * of the action list, need to clone the skb.
644 skb
= skb_clone(skb
, GFP_ATOMIC
);
646 /* Skip the recirc action when out of memory, but
647 * continue on with the rest of the action list.
653 da
= add_deferred_actions(skb
, key
, NULL
);
655 da
->pkt_key
.recirc_id
= nla_get_u32(a
);
660 pr_warn("%s: deferred action limit reached, drop recirc action\n",
667 /* Execute a list of actions against 'skb'. */
668 static int do_execute_actions(struct datapath
*dp
, struct sk_buff
*skb
,
669 struct sw_flow_key
*key
,
670 const struct nlattr
*attr
, int len
)
672 /* Every output action needs a separate clone of 'skb', but the common
673 * case is just a single output action, so that doing a clone and
674 * then freeing the original skbuff is wasteful. So the following code
675 * is slightly obscure just to avoid that. */
677 const struct nlattr
*a
;
680 for (a
= attr
, rem
= len
; rem
> 0;
681 a
= nla_next(a
, &rem
)) {
684 if (prev_port
!= -1) {
685 do_output(dp
, skb_clone(skb
, GFP_ATOMIC
), prev_port
);
689 switch (nla_type(a
)) {
690 case OVS_ACTION_ATTR_OUTPUT
:
691 prev_port
= nla_get_u32(a
);
694 case OVS_ACTION_ATTR_USERSPACE
:
695 output_userspace(dp
, skb
, key
, a
);
698 case OVS_ACTION_ATTR_HASH
:
699 execute_hash(skb
, key
, a
);
702 case OVS_ACTION_ATTR_PUSH_VLAN
:
703 err
= push_vlan(skb
, nla_data(a
));
704 if (unlikely(err
)) /* skb already freed. */
708 case OVS_ACTION_ATTR_POP_VLAN
:
712 case OVS_ACTION_ATTR_RECIRC
:
713 err
= execute_recirc(dp
, skb
, key
, a
, rem
);
714 if (last_action(a
, rem
)) {
715 /* If this is the last action, the skb has
716 * been consumed or freed.
717 * Return immediately.
723 case OVS_ACTION_ATTR_SET
:
724 err
= execute_set_action(skb
, nla_data(a
));
727 case OVS_ACTION_ATTR_SAMPLE
:
728 err
= sample(dp
, skb
, key
, a
);
739 do_output(dp
, skb
, prev_port
);
746 static void process_deferred_actions(struct datapath
*dp
)
748 struct action_fifo
*fifo
= this_cpu_ptr(action_fifos
);
750 /* Do not touch the FIFO in case there is no deferred actions. */
751 if (action_fifo_is_empty(fifo
))
754 /* Finishing executing all deferred actions. */
756 struct deferred_action
*da
= action_fifo_get(fifo
);
757 struct sk_buff
*skb
= da
->skb
;
758 struct sw_flow_key
*key
= &da
->pkt_key
;
759 const struct nlattr
*actions
= da
->actions
;
762 do_execute_actions(dp
, skb
, key
, actions
,
765 ovs_dp_process_packet(skb
, key
);
766 } while (!action_fifo_is_empty(fifo
));
768 /* Reset FIFO for the next packet. */
769 action_fifo_init(fifo
);
772 /* Execute a list of actions against 'skb'. */
773 int ovs_execute_actions(struct datapath
*dp
, struct sk_buff
*skb
,
774 struct sw_flow_key
*key
)
776 int level
= this_cpu_read(exec_actions_level
);
777 struct sw_flow_actions
*acts
;
780 acts
= rcu_dereference(OVS_CB(skb
)->flow
->sf_acts
);
782 this_cpu_inc(exec_actions_level
);
783 OVS_CB(skb
)->egress_tun_info
= NULL
;
784 err
= do_execute_actions(dp
, skb
, key
,
785 acts
->actions
, acts
->actions_len
);
788 process_deferred_actions(dp
);
790 this_cpu_dec(exec_actions_level
);
794 int action_fifos_init(void)
796 action_fifos
= alloc_percpu(struct action_fifo
);
803 void action_fifos_exit(void)
805 free_percpu(action_fifos
);