1 #include <linux/etherdevice.h>
2 #include <linux/if_macvlan.h>
3 #include <linux/if_vlan.h>
4 #include <linux/interrupt.h>
5 #include <linux/nsproxy.h>
6 #include <linux/compat.h>
7 #include <linux/if_tun.h>
8 #include <linux/module.h>
9 #include <linux/skbuff.h>
10 #include <linux/cache.h>
11 #include <linux/sched.h>
12 #include <linux/types.h>
13 #include <linux/slab.h>
14 #include <linux/wait.h>
15 #include <linux/cdev.h>
16 #include <linux/idr.h>
20 #include <net/net_namespace.h>
21 #include <net/rtnetlink.h>
23 #include <linux/virtio_net.h>
26 * A macvtap queue is the central object of this driver, it connects
27 * an open character device to a macvlan interface. There can be
28 * multiple queues on one interface, which map back to queues
29 * implemented in hardware on the underlying device.
31 * macvtap_proto is used to allocate queues through the sock allocation
35 struct macvtap_queue
{
40 struct macvlan_dev __rcu
*vlan
;
45 struct list_head next
;
48 #define MACVTAP_FEATURES (IFF_VNET_HDR | IFF_VNET_LE | IFF_MULTI_QUEUE)
50 static inline u16
macvtap16_to_cpu(struct macvtap_queue
*q
, __virtio16 val
)
52 return __virtio16_to_cpu(q
->flags
& IFF_VNET_LE
, val
);
55 static inline __virtio16
cpu_to_macvtap16(struct macvtap_queue
*q
, u16 val
)
57 return __cpu_to_virtio16(q
->flags
& IFF_VNET_LE
, val
);
60 static struct proto macvtap_proto
= {
63 .obj_size
= sizeof (struct macvtap_queue
),
67 * Variables for dealing with macvtaps device numbers.
69 static dev_t macvtap_major
;
70 #define MACVTAP_NUM_DEVS (1U << MINORBITS)
71 static DEFINE_MUTEX(minor_lock
);
72 static DEFINE_IDR(minor_idr
);
74 #define GOODCOPY_LEN 128
75 static struct class *macvtap_class
;
76 static struct cdev macvtap_cdev
;
78 static const struct proto_ops macvtap_socket_ops
;
80 #define TUN_OFFLOADS (NETIF_F_HW_CSUM | NETIF_F_TSO_ECN | NETIF_F_TSO | \
82 #define RX_OFFLOADS (NETIF_F_GRO | NETIF_F_LRO)
83 #define TAP_FEATURES (NETIF_F_GSO | NETIF_F_SG)
85 static struct macvlan_dev
*macvtap_get_vlan_rcu(const struct net_device
*dev
)
87 return rcu_dereference(dev
->rx_handler_data
);
92 * The macvtap_queue and the macvlan_dev are loosely coupled, the
93 * pointers from one to the other can only be read while rcu_read_lock
96 * Both the file and the macvlan_dev hold a reference on the macvtap_queue
97 * through sock_hold(&q->sk). When the macvlan_dev goes away first,
98 * q->vlan becomes inaccessible. When the files gets closed,
99 * macvtap_get_queue() fails.
101 * There may still be references to the struct sock inside of the
102 * queue from outbound SKBs, but these never reference back to the
103 * file or the dev. The data structure is freed through __sk_free
104 * when both our references and any pending SKBs are gone.
107 static int macvtap_enable_queue(struct net_device
*dev
, struct file
*file
,
108 struct macvtap_queue
*q
)
110 struct macvlan_dev
*vlan
= netdev_priv(dev
);
119 rcu_assign_pointer(vlan
->taps
[vlan
->numvtaps
], q
);
120 q
->queue_index
= vlan
->numvtaps
;
129 static int macvtap_set_queue(struct net_device
*dev
, struct file
*file
,
130 struct macvtap_queue
*q
)
132 struct macvlan_dev
*vlan
= netdev_priv(dev
);
134 if (vlan
->numqueues
== MAX_MACVTAP_QUEUES
)
137 rcu_assign_pointer(q
->vlan
, vlan
);
138 rcu_assign_pointer(vlan
->taps
[vlan
->numvtaps
], q
);
142 q
->queue_index
= vlan
->numvtaps
;
144 file
->private_data
= q
;
145 list_add_tail(&q
->next
, &vlan
->queue_list
);
153 static int macvtap_disable_queue(struct macvtap_queue
*q
)
155 struct macvlan_dev
*vlan
;
156 struct macvtap_queue
*nq
;
162 vlan
= rtnl_dereference(q
->vlan
);
165 int index
= q
->queue_index
;
166 BUG_ON(index
>= vlan
->numvtaps
);
167 nq
= rtnl_dereference(vlan
->taps
[vlan
->numvtaps
- 1]);
168 nq
->queue_index
= index
;
170 rcu_assign_pointer(vlan
->taps
[index
], nq
);
171 RCU_INIT_POINTER(vlan
->taps
[vlan
->numvtaps
- 1], NULL
);
181 * The file owning the queue got closed, give up both
182 * the reference that the files holds as well as the
183 * one from the macvlan_dev if that still exists.
185 * Using the spinlock makes sure that we don't get
186 * to the queue again after destroying it.
188 static void macvtap_put_queue(struct macvtap_queue
*q
)
190 struct macvlan_dev
*vlan
;
193 vlan
= rtnl_dereference(q
->vlan
);
197 BUG_ON(macvtap_disable_queue(q
));
200 RCU_INIT_POINTER(q
->vlan
, NULL
);
202 list_del_init(&q
->next
);
212 * Select a queue based on the rxq of the device on which this packet
213 * arrived. If the incoming device is not mq, calculate a flow hash
214 * to select a queue. If all fails, find the first available queue.
215 * Cache vlan->numvtaps since it can become zero during the execution
218 static struct macvtap_queue
*macvtap_get_queue(struct net_device
*dev
,
221 struct macvlan_dev
*vlan
= netdev_priv(dev
);
222 struct macvtap_queue
*tap
= NULL
;
223 /* Access to taps array is protected by rcu, but access to numvtaps
224 * isn't. Below we use it to lookup a queue, but treat it as a hint
225 * and validate that the result isn't NULL - in case we are
226 * racing against queue removal.
228 int numvtaps
= ACCESS_ONCE(vlan
->numvtaps
);
234 /* Check if we can use flow to select a queue */
235 rxq
= skb_get_hash(skb
);
237 tap
= rcu_dereference(vlan
->taps
[rxq
% numvtaps
]);
241 if (likely(skb_rx_queue_recorded(skb
))) {
242 rxq
= skb_get_rx_queue(skb
);
244 while (unlikely(rxq
>= numvtaps
))
247 tap
= rcu_dereference(vlan
->taps
[rxq
]);
251 tap
= rcu_dereference(vlan
->taps
[0]);
257 * The net_device is going away, give up the reference
258 * that it holds on all queues and safely set the pointer
259 * from the queues to NULL.
261 static void macvtap_del_queues(struct net_device
*dev
)
263 struct macvlan_dev
*vlan
= netdev_priv(dev
);
264 struct macvtap_queue
*q
, *tmp
, *qlist
[MAX_MACVTAP_QUEUES
];
268 list_for_each_entry_safe(q
, tmp
, &vlan
->queue_list
, next
) {
269 list_del_init(&q
->next
);
271 RCU_INIT_POINTER(q
->vlan
, NULL
);
276 for (i
= 0; i
< vlan
->numvtaps
; i
++)
277 RCU_INIT_POINTER(vlan
->taps
[i
], NULL
);
278 BUG_ON(vlan
->numvtaps
);
279 BUG_ON(vlan
->numqueues
);
280 /* guarantee that any future macvtap_set_queue will fail */
281 vlan
->numvtaps
= MAX_MACVTAP_QUEUES
;
283 for (--j
; j
>= 0; j
--)
284 sock_put(&qlist
[j
]->sk
);
287 static rx_handler_result_t
macvtap_handle_frame(struct sk_buff
**pskb
)
289 struct sk_buff
*skb
= *pskb
;
290 struct net_device
*dev
= skb
->dev
;
291 struct macvlan_dev
*vlan
;
292 struct macvtap_queue
*q
;
293 netdev_features_t features
= TAP_FEATURES
;
295 vlan
= macvtap_get_vlan_rcu(dev
);
297 return RX_HANDLER_PASS
;
299 q
= macvtap_get_queue(dev
, skb
);
301 return RX_HANDLER_PASS
;
303 if (skb_queue_len(&q
->sk
.sk_receive_queue
) >= dev
->tx_queue_len
)
306 skb_push(skb
, ETH_HLEN
);
308 /* Apply the forward feature mask so that we perform segmentation
309 * according to users wishes. This only works if VNET_HDR is
312 if (q
->flags
& IFF_VNET_HDR
)
313 features
|= vlan
->tap_features
;
314 if (netif_needs_gso(dev
, skb
, features
)) {
315 struct sk_buff
*segs
= __skb_gso_segment(skb
, features
, false);
321 skb_queue_tail(&q
->sk
.sk_receive_queue
, skb
);
327 struct sk_buff
*nskb
= segs
->next
;
330 skb_queue_tail(&q
->sk
.sk_receive_queue
, segs
);
334 /* If we receive a partial checksum and the tap side
335 * doesn't support checksum offload, compute the checksum.
336 * Note: it doesn't matter which checksum feature to
337 * check, we either support them all or none.
339 if (skb
->ip_summed
== CHECKSUM_PARTIAL
&&
340 !(features
& NETIF_F_ALL_CSUM
) &&
341 skb_checksum_help(skb
))
343 skb_queue_tail(&q
->sk
.sk_receive_queue
, skb
);
347 wake_up_interruptible_poll(sk_sleep(&q
->sk
), POLLIN
| POLLRDNORM
| POLLRDBAND
);
348 return RX_HANDLER_CONSUMED
;
351 /* Count errors/drops only here, thus don't care about args. */
352 macvlan_count_rx(vlan
, 0, 0, 0);
354 return RX_HANDLER_CONSUMED
;
357 static int macvtap_get_minor(struct macvlan_dev
*vlan
)
359 int retval
= -ENOMEM
;
361 mutex_lock(&minor_lock
);
362 retval
= idr_alloc(&minor_idr
, vlan
, 1, MACVTAP_NUM_DEVS
, GFP_KERNEL
);
364 vlan
->minor
= retval
;
365 } else if (retval
== -ENOSPC
) {
366 printk(KERN_ERR
"too many macvtap devices\n");
369 mutex_unlock(&minor_lock
);
370 return retval
< 0 ? retval
: 0;
373 static void macvtap_free_minor(struct macvlan_dev
*vlan
)
375 mutex_lock(&minor_lock
);
377 idr_remove(&minor_idr
, vlan
->minor
);
380 mutex_unlock(&minor_lock
);
383 static struct net_device
*dev_get_by_macvtap_minor(int minor
)
385 struct net_device
*dev
= NULL
;
386 struct macvlan_dev
*vlan
;
388 mutex_lock(&minor_lock
);
389 vlan
= idr_find(&minor_idr
, minor
);
394 mutex_unlock(&minor_lock
);
398 static int macvtap_newlink(struct net
*src_net
,
399 struct net_device
*dev
,
401 struct nlattr
*data
[])
403 struct macvlan_dev
*vlan
= netdev_priv(dev
);
406 INIT_LIST_HEAD(&vlan
->queue_list
);
408 /* Since macvlan supports all offloads by default, make
409 * tap support all offloads also.
411 vlan
->tap_features
= TUN_OFFLOADS
;
413 err
= netdev_rx_handler_register(dev
, macvtap_handle_frame
, vlan
);
417 /* Don't put anything that may fail after macvlan_common_newlink
418 * because we can't undo what it does.
420 return macvlan_common_newlink(src_net
, dev
, tb
, data
);
423 static void macvtap_dellink(struct net_device
*dev
,
424 struct list_head
*head
)
426 netdev_rx_handler_unregister(dev
);
427 macvtap_del_queues(dev
);
428 macvlan_dellink(dev
, head
);
431 static void macvtap_setup(struct net_device
*dev
)
433 macvlan_common_setup(dev
);
434 dev
->tx_queue_len
= TUN_READQ_SIZE
;
437 static struct rtnl_link_ops macvtap_link_ops __read_mostly
= {
439 .setup
= macvtap_setup
,
440 .newlink
= macvtap_newlink
,
441 .dellink
= macvtap_dellink
,
445 static void macvtap_sock_write_space(struct sock
*sk
)
447 wait_queue_head_t
*wqueue
;
449 if (!sock_writeable(sk
) ||
450 !test_and_clear_bit(SOCK_ASYNC_NOSPACE
, &sk
->sk_socket
->flags
))
453 wqueue
= sk_sleep(sk
);
454 if (wqueue
&& waitqueue_active(wqueue
))
455 wake_up_interruptible_poll(wqueue
, POLLOUT
| POLLWRNORM
| POLLWRBAND
);
458 static void macvtap_sock_destruct(struct sock
*sk
)
460 skb_queue_purge(&sk
->sk_receive_queue
);
463 static int macvtap_open(struct inode
*inode
, struct file
*file
)
465 struct net
*net
= current
->nsproxy
->net_ns
;
466 struct net_device
*dev
;
467 struct macvtap_queue
*q
;
471 dev
= dev_get_by_macvtap_minor(iminor(inode
));
476 q
= (struct macvtap_queue
*)sk_alloc(net
, AF_UNSPEC
, GFP_KERNEL
,
481 RCU_INIT_POINTER(q
->sock
.wq
, &q
->wq
);
482 init_waitqueue_head(&q
->wq
.wait
);
483 q
->sock
.type
= SOCK_RAW
;
484 q
->sock
.state
= SS_CONNECTED
;
486 q
->sock
.ops
= &macvtap_socket_ops
;
487 sock_init_data(&q
->sock
, &q
->sk
);
488 q
->sk
.sk_write_space
= macvtap_sock_write_space
;
489 q
->sk
.sk_destruct
= macvtap_sock_destruct
;
490 q
->flags
= IFF_VNET_HDR
| IFF_NO_PI
| IFF_TAP
;
491 q
->vnet_hdr_sz
= sizeof(struct virtio_net_hdr
);
494 * so far only KVM virtio_net uses macvtap, enable zero copy between
495 * guest kernel and host kernel when lower device supports zerocopy
497 * The macvlan supports zerocopy iff the lower device supports zero
498 * copy so we don't have to look at the lower device directly.
500 if ((dev
->features
& NETIF_F_HIGHDMA
) && (dev
->features
& NETIF_F_SG
))
501 sock_set_flag(&q
->sk
, SOCK_ZEROCOPY
);
503 err
= macvtap_set_queue(dev
, file
, q
);
515 static int macvtap_release(struct inode
*inode
, struct file
*file
)
517 struct macvtap_queue
*q
= file
->private_data
;
518 macvtap_put_queue(q
);
522 static unsigned int macvtap_poll(struct file
*file
, poll_table
* wait
)
524 struct macvtap_queue
*q
= file
->private_data
;
525 unsigned int mask
= POLLERR
;
531 poll_wait(file
, &q
->wq
.wait
, wait
);
533 if (!skb_queue_empty(&q
->sk
.sk_receive_queue
))
534 mask
|= POLLIN
| POLLRDNORM
;
536 if (sock_writeable(&q
->sk
) ||
537 (!test_and_set_bit(SOCK_ASYNC_NOSPACE
, &q
->sock
.flags
) &&
538 sock_writeable(&q
->sk
)))
539 mask
|= POLLOUT
| POLLWRNORM
;
545 static inline struct sk_buff
*macvtap_alloc_skb(struct sock
*sk
, size_t prepad
,
546 size_t len
, size_t linear
,
547 int noblock
, int *err
)
551 /* Under a page? Don't bother with paged skb. */
552 if (prepad
+ len
< PAGE_SIZE
|| !linear
)
555 skb
= sock_alloc_send_pskb(sk
, prepad
+ linear
, len
- linear
, noblock
,
560 skb_reserve(skb
, prepad
);
561 skb_put(skb
, linear
);
562 skb
->data_len
= len
- linear
;
563 skb
->len
+= len
- linear
;
569 * macvtap_skb_from_vnet_hdr and macvtap_skb_to_vnet_hdr should
570 * be shared with the tun/tap driver.
572 static int macvtap_skb_from_vnet_hdr(struct macvtap_queue
*q
,
574 struct virtio_net_hdr
*vnet_hdr
)
576 unsigned short gso_type
= 0;
577 if (vnet_hdr
->gso_type
!= VIRTIO_NET_HDR_GSO_NONE
) {
578 switch (vnet_hdr
->gso_type
& ~VIRTIO_NET_HDR_GSO_ECN
) {
579 case VIRTIO_NET_HDR_GSO_TCPV4
:
580 gso_type
= SKB_GSO_TCPV4
;
582 case VIRTIO_NET_HDR_GSO_TCPV6
:
583 gso_type
= SKB_GSO_TCPV6
;
585 case VIRTIO_NET_HDR_GSO_UDP
:
586 pr_warn_once("macvtap: %s: using disabled UFO feature; please fix this program\n",
588 gso_type
= SKB_GSO_UDP
;
589 if (skb
->protocol
== htons(ETH_P_IPV6
))
590 ipv6_proxy_select_ident(skb
);
596 if (vnet_hdr
->gso_type
& VIRTIO_NET_HDR_GSO_ECN
)
597 gso_type
|= SKB_GSO_TCP_ECN
;
599 if (vnet_hdr
->gso_size
== 0)
603 if (vnet_hdr
->flags
& VIRTIO_NET_HDR_F_NEEDS_CSUM
) {
604 if (!skb_partial_csum_set(skb
, macvtap16_to_cpu(q
, vnet_hdr
->csum_start
),
605 macvtap16_to_cpu(q
, vnet_hdr
->csum_offset
)))
609 if (vnet_hdr
->gso_type
!= VIRTIO_NET_HDR_GSO_NONE
) {
610 skb_shinfo(skb
)->gso_size
= macvtap16_to_cpu(q
, vnet_hdr
->gso_size
);
611 skb_shinfo(skb
)->gso_type
= gso_type
;
613 /* Header must be checked, and gso_segs computed. */
614 skb_shinfo(skb
)->gso_type
|= SKB_GSO_DODGY
;
615 skb_shinfo(skb
)->gso_segs
= 0;
620 static void macvtap_skb_to_vnet_hdr(struct macvtap_queue
*q
,
621 const struct sk_buff
*skb
,
622 struct virtio_net_hdr
*vnet_hdr
)
624 memset(vnet_hdr
, 0, sizeof(*vnet_hdr
));
626 if (skb_is_gso(skb
)) {
627 struct skb_shared_info
*sinfo
= skb_shinfo(skb
);
629 /* This is a hint as to how much should be linear. */
630 vnet_hdr
->hdr_len
= cpu_to_macvtap16(q
, skb_headlen(skb
));
631 vnet_hdr
->gso_size
= cpu_to_macvtap16(q
, sinfo
->gso_size
);
632 if (sinfo
->gso_type
& SKB_GSO_TCPV4
)
633 vnet_hdr
->gso_type
= VIRTIO_NET_HDR_GSO_TCPV4
;
634 else if (sinfo
->gso_type
& SKB_GSO_TCPV6
)
635 vnet_hdr
->gso_type
= VIRTIO_NET_HDR_GSO_TCPV6
;
638 if (sinfo
->gso_type
& SKB_GSO_TCP_ECN
)
639 vnet_hdr
->gso_type
|= VIRTIO_NET_HDR_GSO_ECN
;
641 vnet_hdr
->gso_type
= VIRTIO_NET_HDR_GSO_NONE
;
643 if (skb
->ip_summed
== CHECKSUM_PARTIAL
) {
644 vnet_hdr
->flags
= VIRTIO_NET_HDR_F_NEEDS_CSUM
;
645 if (vlan_tx_tag_present(skb
))
646 vnet_hdr
->csum_start
= cpu_to_macvtap16(q
,
647 skb_checksum_start_offset(skb
) + VLAN_HLEN
);
649 vnet_hdr
->csum_start
= cpu_to_macvtap16(q
,
650 skb_checksum_start_offset(skb
));
651 vnet_hdr
->csum_offset
= cpu_to_macvtap16(q
, skb
->csum_offset
);
652 } else if (skb
->ip_summed
== CHECKSUM_UNNECESSARY
) {
653 vnet_hdr
->flags
= VIRTIO_NET_HDR_F_DATA_VALID
;
654 } /* else everything is zero */
657 /* Get packet from user space buffer */
658 static ssize_t
macvtap_get_user(struct macvtap_queue
*q
, struct msghdr
*m
,
659 const struct iovec
*iv
, unsigned long total_len
,
660 size_t count
, int noblock
)
662 int good_linear
= SKB_MAX_HEAD(NET_IP_ALIGN
);
664 struct macvlan_dev
*vlan
;
665 unsigned long len
= total_len
;
667 struct virtio_net_hdr vnet_hdr
= { 0 };
668 int vnet_hdr_len
= 0;
670 bool zerocopy
= false;
673 if (q
->flags
& IFF_VNET_HDR
) {
674 vnet_hdr_len
= q
->vnet_hdr_sz
;
677 if (len
< vnet_hdr_len
)
681 err
= memcpy_fromiovecend((void *)&vnet_hdr
, iv
, 0,
685 if ((vnet_hdr
.flags
& VIRTIO_NET_HDR_F_NEEDS_CSUM
) &&
686 macvtap16_to_cpu(q
, vnet_hdr
.csum_start
) +
687 macvtap16_to_cpu(q
, vnet_hdr
.csum_offset
) + 2 >
688 macvtap16_to_cpu(q
, vnet_hdr
.hdr_len
))
689 vnet_hdr
.hdr_len
= cpu_to_macvtap16(q
,
690 macvtap16_to_cpu(q
, vnet_hdr
.csum_start
) +
691 macvtap16_to_cpu(q
, vnet_hdr
.csum_offset
) + 2);
693 if (macvtap16_to_cpu(q
, vnet_hdr
.hdr_len
) > len
)
698 if (unlikely(len
< ETH_HLEN
))
702 if (unlikely(count
> UIO_MAXIOV
))
705 if (m
&& m
->msg_control
&& sock_flag(&q
->sk
, SOCK_ZEROCOPY
)) {
706 copylen
= vnet_hdr
.hdr_len
?
707 macvtap16_to_cpu(q
, vnet_hdr
.hdr_len
) : GOODCOPY_LEN
;
708 if (copylen
> good_linear
)
709 copylen
= good_linear
;
711 if (iov_pages(iv
, vnet_hdr_len
+ copylen
, count
)
718 if (macvtap16_to_cpu(q
, vnet_hdr
.hdr_len
) > good_linear
)
719 linear
= good_linear
;
721 linear
= macvtap16_to_cpu(q
, vnet_hdr
.hdr_len
);
724 skb
= macvtap_alloc_skb(&q
->sk
, NET_IP_ALIGN
, copylen
,
725 linear
, noblock
, &err
);
730 err
= zerocopy_sg_from_iovec(skb
, iv
, vnet_hdr_len
, count
);
732 err
= skb_copy_datagram_from_iovec(skb
, 0, iv
, vnet_hdr_len
,
734 if (!err
&& m
&& m
->msg_control
) {
735 struct ubuf_info
*uarg
= m
->msg_control
;
736 uarg
->callback(uarg
, false);
743 skb_set_network_header(skb
, ETH_HLEN
);
744 skb_reset_mac_header(skb
);
745 skb
->protocol
= eth_hdr(skb
)->h_proto
;
748 err
= macvtap_skb_from_vnet_hdr(q
, skb
, &vnet_hdr
);
753 skb_probe_transport_header(skb
, ETH_HLEN
);
756 vlan
= rcu_dereference(q
->vlan
);
757 /* copy skb_ubuf_info for callback when skb has no error */
759 skb_shinfo(skb
)->destructor_arg
= m
->msg_control
;
760 skb_shinfo(skb
)->tx_flags
|= SKBTX_DEV_ZEROCOPY
;
761 skb_shinfo(skb
)->tx_flags
|= SKBTX_SHARED_FRAG
;
764 skb
->dev
= vlan
->dev
;
778 vlan
= rcu_dereference(q
->vlan
);
780 this_cpu_inc(vlan
->pcpu_stats
->tx_dropped
);
786 static ssize_t
macvtap_aio_write(struct kiocb
*iocb
, const struct iovec
*iv
,
787 unsigned long count
, loff_t pos
)
789 struct file
*file
= iocb
->ki_filp
;
790 ssize_t result
= -ENOLINK
;
791 struct macvtap_queue
*q
= file
->private_data
;
793 result
= macvtap_get_user(q
, NULL
, iv
, iov_length(iv
, count
), count
,
794 file
->f_flags
& O_NONBLOCK
);
798 /* Put packet to the user space buffer */
799 static ssize_t
macvtap_put_user(struct macvtap_queue
*q
,
800 const struct sk_buff
*skb
,
801 const struct iovec
*iv
, int len
)
804 int vnet_hdr_len
= 0;
808 if (q
->flags
& IFF_VNET_HDR
) {
809 struct virtio_net_hdr vnet_hdr
;
810 vnet_hdr_len
= q
->vnet_hdr_sz
;
811 if ((len
-= vnet_hdr_len
) < 0)
814 macvtap_skb_to_vnet_hdr(q
, skb
, &vnet_hdr
);
816 if (memcpy_toiovecend(iv
, (void *)&vnet_hdr
, 0, sizeof(vnet_hdr
)))
819 total
= copied
= vnet_hdr_len
;
822 if (!vlan_tx_tag_present(skb
))
823 len
= min_t(int, skb
->len
, len
);
830 veth
.h_vlan_proto
= skb
->vlan_proto
;
831 veth
.h_vlan_TCI
= htons(vlan_tx_tag_get(skb
));
833 vlan_offset
= offsetof(struct vlan_ethhdr
, h_vlan_proto
);
834 len
= min_t(int, skb
->len
+ VLAN_HLEN
, len
);
837 copy
= min_t(int, vlan_offset
, len
);
838 ret
= skb_copy_datagram_const_iovec(skb
, 0, iv
, copied
, copy
);
844 copy
= min_t(int, sizeof(veth
), len
);
845 ret
= memcpy_toiovecend(iv
, (void *)&veth
, copied
, copy
);
852 ret
= skb_copy_datagram_const_iovec(skb
, vlan_offset
, iv
, copied
, len
);
855 return ret
? ret
: total
;
858 static ssize_t
macvtap_do_read(struct macvtap_queue
*q
,
859 const struct iovec
*iv
, unsigned long len
,
868 prepare_to_wait(sk_sleep(&q
->sk
), &wait
,
871 /* Read frames from the queue */
872 skb
= skb_dequeue(&q
->sk
.sk_receive_queue
);
878 if (signal_pending(current
)) {
882 /* Nothing to read, let's sleep */
886 ret
= macvtap_put_user(q
, skb
, iv
, len
);
892 finish_wait(sk_sleep(&q
->sk
), &wait
);
896 static ssize_t
macvtap_aio_read(struct kiocb
*iocb
, const struct iovec
*iv
,
897 unsigned long count
, loff_t pos
)
899 struct file
*file
= iocb
->ki_filp
;
900 struct macvtap_queue
*q
= file
->private_data
;
901 ssize_t len
, ret
= 0;
903 len
= iov_length(iv
, count
);
909 ret
= macvtap_do_read(q
, iv
, len
, file
->f_flags
& O_NONBLOCK
);
910 ret
= min_t(ssize_t
, ret
, len
);
917 static struct macvlan_dev
*macvtap_get_vlan(struct macvtap_queue
*q
)
919 struct macvlan_dev
*vlan
;
922 vlan
= rtnl_dereference(q
->vlan
);
929 static void macvtap_put_vlan(struct macvlan_dev
*vlan
)
934 static int macvtap_ioctl_set_queue(struct file
*file
, unsigned int flags
)
936 struct macvtap_queue
*q
= file
->private_data
;
937 struct macvlan_dev
*vlan
;
940 vlan
= macvtap_get_vlan(q
);
944 if (flags
& IFF_ATTACH_QUEUE
)
945 ret
= macvtap_enable_queue(vlan
->dev
, file
, q
);
946 else if (flags
& IFF_DETACH_QUEUE
)
947 ret
= macvtap_disable_queue(q
);
951 macvtap_put_vlan(vlan
);
955 static int set_offload(struct macvtap_queue
*q
, unsigned long arg
)
957 struct macvlan_dev
*vlan
;
958 netdev_features_t features
;
959 netdev_features_t feature_mask
= 0;
961 vlan
= rtnl_dereference(q
->vlan
);
965 features
= vlan
->dev
->features
;
967 if (arg
& TUN_F_CSUM
) {
968 feature_mask
= NETIF_F_HW_CSUM
;
970 if (arg
& (TUN_F_TSO4
| TUN_F_TSO6
)) {
971 if (arg
& TUN_F_TSO_ECN
)
972 feature_mask
|= NETIF_F_TSO_ECN
;
973 if (arg
& TUN_F_TSO4
)
974 feature_mask
|= NETIF_F_TSO
;
975 if (arg
& TUN_F_TSO6
)
976 feature_mask
|= NETIF_F_TSO6
;
980 /* tun/tap driver inverts the usage for TSO offloads, where
981 * setting the TSO bit means that the userspace wants to
982 * accept TSO frames and turning it off means that user space
983 * does not support TSO.
984 * For macvtap, we have to invert it to mean the same thing.
985 * When user space turns off TSO, we turn off GSO/LRO so that
986 * user-space will not receive TSO frames.
988 if (feature_mask
& (NETIF_F_TSO
| NETIF_F_TSO6
))
989 features
|= RX_OFFLOADS
;
991 features
&= ~RX_OFFLOADS
;
993 /* tap_features are the same as features on tun/tap and
994 * reflect user expectations.
996 vlan
->tap_features
= feature_mask
;
997 vlan
->set_features
= features
;
998 netdev_update_features(vlan
->dev
);
1004 * provide compatibility with generic tun/tap interface
1006 static long macvtap_ioctl(struct file
*file
, unsigned int cmd
,
1009 struct macvtap_queue
*q
= file
->private_data
;
1010 struct macvlan_dev
*vlan
;
1011 void __user
*argp
= (void __user
*)arg
;
1012 struct ifreq __user
*ifr
= argp
;
1013 unsigned int __user
*up
= argp
;
1015 int __user
*sp
= argp
;
1021 /* ignore the name, just look at flags */
1022 if (get_user(u
, &ifr
->ifr_flags
))
1026 if ((u
& ~MACVTAP_FEATURES
) != (IFF_NO_PI
| IFF_TAP
))
1035 vlan
= macvtap_get_vlan(q
);
1042 if (copy_to_user(&ifr
->ifr_name
, vlan
->dev
->name
, IFNAMSIZ
) ||
1043 put_user(q
->flags
, &ifr
->ifr_flags
))
1045 macvtap_put_vlan(vlan
);
1050 if (get_user(u
, &ifr
->ifr_flags
))
1053 ret
= macvtap_ioctl_set_queue(file
, u
);
1057 case TUNGETFEATURES
:
1058 if (put_user(IFF_TAP
| IFF_NO_PI
| MACVTAP_FEATURES
, up
))
1063 if (get_user(u
, up
))
1066 q
->sk
.sk_sndbuf
= u
;
1069 case TUNGETVNETHDRSZ
:
1071 if (put_user(s
, sp
))
1075 case TUNSETVNETHDRSZ
:
1076 if (get_user(s
, sp
))
1078 if (s
< (int)sizeof(struct virtio_net_hdr
))
1085 /* let the user check for future flags */
1086 if (arg
& ~(TUN_F_CSUM
| TUN_F_TSO4
| TUN_F_TSO6
|
1091 ret
= set_offload(q
, arg
);
1100 #ifdef CONFIG_COMPAT
1101 static long macvtap_compat_ioctl(struct file
*file
, unsigned int cmd
,
1104 return macvtap_ioctl(file
, cmd
, (unsigned long)compat_ptr(arg
));
1108 static const struct file_operations macvtap_fops
= {
1109 .owner
= THIS_MODULE
,
1110 .open
= macvtap_open
,
1111 .release
= macvtap_release
,
1112 .aio_read
= macvtap_aio_read
,
1113 .aio_write
= macvtap_aio_write
,
1114 .poll
= macvtap_poll
,
1115 .llseek
= no_llseek
,
1116 .unlocked_ioctl
= macvtap_ioctl
,
1117 #ifdef CONFIG_COMPAT
1118 .compat_ioctl
= macvtap_compat_ioctl
,
1122 static int macvtap_sendmsg(struct kiocb
*iocb
, struct socket
*sock
,
1123 struct msghdr
*m
, size_t total_len
)
1125 struct macvtap_queue
*q
= container_of(sock
, struct macvtap_queue
, sock
);
1126 return macvtap_get_user(q
, m
, m
->msg_iov
, total_len
, m
->msg_iovlen
,
1127 m
->msg_flags
& MSG_DONTWAIT
);
1130 static int macvtap_recvmsg(struct kiocb
*iocb
, struct socket
*sock
,
1131 struct msghdr
*m
, size_t total_len
,
1134 struct macvtap_queue
*q
= container_of(sock
, struct macvtap_queue
, sock
);
1136 if (flags
& ~(MSG_DONTWAIT
|MSG_TRUNC
))
1138 ret
= macvtap_do_read(q
, m
->msg_iov
, total_len
,
1139 flags
& MSG_DONTWAIT
);
1140 if (ret
> total_len
) {
1141 m
->msg_flags
|= MSG_TRUNC
;
1142 ret
= flags
& MSG_TRUNC
? ret
: total_len
;
1147 /* Ops structure to mimic raw sockets with tun */
1148 static const struct proto_ops macvtap_socket_ops
= {
1149 .sendmsg
= macvtap_sendmsg
,
1150 .recvmsg
= macvtap_recvmsg
,
1153 /* Get an underlying socket object from tun file. Returns error unless file is
1154 * attached to a device. The returned object works like a packet socket, it
1155 * can be used for sock_sendmsg/sock_recvmsg. The caller is responsible for
1156 * holding a reference to the file for as long as the socket is in use. */
1157 struct socket
*macvtap_get_socket(struct file
*file
)
1159 struct macvtap_queue
*q
;
1160 if (file
->f_op
!= &macvtap_fops
)
1161 return ERR_PTR(-EINVAL
);
1162 q
= file
->private_data
;
1164 return ERR_PTR(-EBADFD
);
1167 EXPORT_SYMBOL_GPL(macvtap_get_socket
);
1169 static int macvtap_device_event(struct notifier_block
*unused
,
1170 unsigned long event
, void *ptr
)
1172 struct net_device
*dev
= netdev_notifier_info_to_dev(ptr
);
1173 struct macvlan_dev
*vlan
;
1174 struct device
*classdev
;
1178 if (dev
->rtnl_link_ops
!= &macvtap_link_ops
)
1181 vlan
= netdev_priv(dev
);
1184 case NETDEV_REGISTER
:
1185 /* Create the device node here after the network device has
1186 * been registered but before register_netdevice has
1189 err
= macvtap_get_minor(vlan
);
1191 return notifier_from_errno(err
);
1193 devt
= MKDEV(MAJOR(macvtap_major
), vlan
->minor
);
1194 classdev
= device_create(macvtap_class
, &dev
->dev
, devt
,
1195 dev
, "tap%d", dev
->ifindex
);
1196 if (IS_ERR(classdev
)) {
1197 macvtap_free_minor(vlan
);
1198 return notifier_from_errno(PTR_ERR(classdev
));
1201 case NETDEV_UNREGISTER
:
1202 devt
= MKDEV(MAJOR(macvtap_major
), vlan
->minor
);
1203 device_destroy(macvtap_class
, devt
);
1204 macvtap_free_minor(vlan
);
1211 static struct notifier_block macvtap_notifier_block __read_mostly
= {
1212 .notifier_call
= macvtap_device_event
,
1215 static int macvtap_init(void)
1219 err
= alloc_chrdev_region(&macvtap_major
, 0,
1220 MACVTAP_NUM_DEVS
, "macvtap");
1224 cdev_init(&macvtap_cdev
, &macvtap_fops
);
1225 err
= cdev_add(&macvtap_cdev
, macvtap_major
, MACVTAP_NUM_DEVS
);
1229 macvtap_class
= class_create(THIS_MODULE
, "macvtap");
1230 if (IS_ERR(macvtap_class
)) {
1231 err
= PTR_ERR(macvtap_class
);
1235 err
= register_netdevice_notifier(&macvtap_notifier_block
);
1239 err
= macvlan_link_register(&macvtap_link_ops
);
1246 unregister_netdevice_notifier(&macvtap_notifier_block
);
1248 class_unregister(macvtap_class
);
1250 cdev_del(&macvtap_cdev
);
1252 unregister_chrdev_region(macvtap_major
, MACVTAP_NUM_DEVS
);
1256 module_init(macvtap_init
);
1258 static void macvtap_exit(void)
1260 rtnl_link_unregister(&macvtap_link_ops
);
1261 unregister_netdevice_notifier(&macvtap_notifier_block
);
1262 class_unregister(macvtap_class
);
1263 cdev_del(&macvtap_cdev
);
1264 unregister_chrdev_region(macvtap_major
, MACVTAP_NUM_DEVS
);
1266 module_exit(macvtap_exit
);
1268 MODULE_ALIAS_RTNL_LINK("macvtap");
1269 MODULE_AUTHOR("Arnd Bergmann <arnd@arndb.de>");
1270 MODULE_LICENSE("GPL");