2 * Back-end of the driver for virtual network devices. This portion of the
3 * driver exports a 'unified' network-device interface that can be accessed
4 * by any operating system that implements a compatible front end. A
5 * reference front-end implementation can be found in:
6 * drivers/net/xen-netfront.c
8 * Copyright (c) 2002-2005, K A Fraser
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License version 2
12 * as published by the Free Software Foundation; or, when distributed
13 * separately from the Linux kernel or incorporated into other
14 * software packages, subject to the following license:
16 * Permission is hereby granted, free of charge, to any person obtaining a copy
17 * of this source file (the "Software"), to deal in the Software without
18 * restriction, including without limitation the rights to use, copy, modify,
19 * merge, publish, distribute, sublicense, and/or sell copies of the Software,
20 * and to permit persons to whom the Software is furnished to do so, subject to
21 * the following conditions:
23 * The above copyright notice and this permission notice shall be included in
24 * all copies or substantial portions of the Software.
26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
27 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
28 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
29 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
30 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
31 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
37 #include <linux/kthread.h>
38 #include <linux/if_vlan.h>
39 #include <linux/udp.h>
40 #include <linux/highmem.h>
45 #include <xen/events.h>
46 #include <xen/interface/memory.h>
49 #include <asm/xen/hypercall.h>
51 /* Provide an option to disable split event channels at load time as
52 * event channels are limited resource. Split event channels are
55 bool separate_tx_rx_irq
= true;
56 module_param(separate_tx_rx_irq
, bool, 0644);
58 /* The time that packets can stay on the guest Rx internal queue
59 * before they are dropped.
61 unsigned int rx_drain_timeout_msecs
= 10000;
62 module_param(rx_drain_timeout_msecs
, uint
, 0444);
64 /* The length of time before the frontend is considered unresponsive
65 * because it isn't providing Rx slots.
67 unsigned int rx_stall_timeout_msecs
= 60000;
68 module_param(rx_stall_timeout_msecs
, uint
, 0444);
70 unsigned int xenvif_max_queues
;
71 module_param_named(max_queues
, xenvif_max_queues
, uint
, 0644);
72 MODULE_PARM_DESC(max_queues
,
73 "Maximum number of queues per virtual interface");
76 * This is the maximum slots a skb can have. If a guest sends a skb
77 * which exceeds this limit it is considered malicious.
79 #define FATAL_SKB_SLOTS_DEFAULT 20
80 static unsigned int fatal_skb_slots
= FATAL_SKB_SLOTS_DEFAULT
;
81 module_param(fatal_skb_slots
, uint
, 0444);
83 /* The amount to copy out of the first guest Tx slot into the skb's
84 * linear area. If the first slot has more data, it will be mapped
85 * and put into the first frag.
87 * This is sized to avoid pulling headers from the frags for most
90 #define XEN_NETBACK_TX_COPY_LEN 128
93 static void xenvif_idx_release(struct xenvif_queue
*queue
, u16 pending_idx
,
96 static void make_tx_response(struct xenvif_queue
*queue
,
97 struct xen_netif_tx_request
*txp
,
99 static void push_tx_responses(struct xenvif_queue
*queue
);
101 static inline int tx_work_todo(struct xenvif_queue
*queue
);
103 static struct xen_netif_rx_response
*make_rx_response(struct xenvif_queue
*queue
,
110 static inline unsigned long idx_to_pfn(struct xenvif_queue
*queue
,
113 return page_to_pfn(queue
->mmap_pages
[idx
]);
116 static inline unsigned long idx_to_kaddr(struct xenvif_queue
*queue
,
119 return (unsigned long)pfn_to_kaddr(idx_to_pfn(queue
, idx
));
122 #define callback_param(vif, pending_idx) \
123 (vif->pending_tx_info[pending_idx].callback_struct)
125 /* Find the containing VIF's structure from a pointer in pending_tx_info array
127 static inline struct xenvif_queue
*ubuf_to_queue(const struct ubuf_info
*ubuf
)
129 u16 pending_idx
= ubuf
->desc
;
130 struct pending_tx_info
*temp
=
131 container_of(ubuf
, struct pending_tx_info
, callback_struct
);
132 return container_of(temp
- pending_idx
,
137 static u16
frag_get_pending_idx(skb_frag_t
*frag
)
139 return (u16
)frag
->page_offset
;
142 static void frag_set_pending_idx(skb_frag_t
*frag
, u16 pending_idx
)
144 frag
->page_offset
= pending_idx
;
147 static inline pending_ring_idx_t
pending_index(unsigned i
)
149 return i
& (MAX_PENDING_REQS
-1);
152 static int xenvif_rx_ring_slots_needed(struct xenvif
*vif
)
155 return DIV_ROUND_UP(vif
->dev
->gso_max_size
, XEN_PAGE_SIZE
) + 1;
157 return DIV_ROUND_UP(vif
->dev
->mtu
, XEN_PAGE_SIZE
);
160 static bool xenvif_rx_ring_slots_available(struct xenvif_queue
*queue
)
165 needed
= xenvif_rx_ring_slots_needed(queue
->vif
);
168 prod
= queue
->rx
.sring
->req_prod
;
169 cons
= queue
->rx
.req_cons
;
171 if (prod
- cons
>= needed
)
174 queue
->rx
.sring
->req_event
= prod
+ 1;
176 /* Make sure event is visible before we check prod
180 } while (queue
->rx
.sring
->req_prod
!= prod
);
185 void xenvif_rx_queue_tail(struct xenvif_queue
*queue
, struct sk_buff
*skb
)
189 spin_lock_irqsave(&queue
->rx_queue
.lock
, flags
);
191 __skb_queue_tail(&queue
->rx_queue
, skb
);
193 queue
->rx_queue_len
+= skb
->len
;
194 if (queue
->rx_queue_len
> queue
->rx_queue_max
)
195 netif_tx_stop_queue(netdev_get_tx_queue(queue
->vif
->dev
, queue
->id
));
197 spin_unlock_irqrestore(&queue
->rx_queue
.lock
, flags
);
200 static struct sk_buff
*xenvif_rx_dequeue(struct xenvif_queue
*queue
)
204 spin_lock_irq(&queue
->rx_queue
.lock
);
206 skb
= __skb_dequeue(&queue
->rx_queue
);
208 queue
->rx_queue_len
-= skb
->len
;
210 spin_unlock_irq(&queue
->rx_queue
.lock
);
215 static void xenvif_rx_queue_maybe_wake(struct xenvif_queue
*queue
)
217 spin_lock_irq(&queue
->rx_queue
.lock
);
219 if (queue
->rx_queue_len
< queue
->rx_queue_max
)
220 netif_tx_wake_queue(netdev_get_tx_queue(queue
->vif
->dev
, queue
->id
));
222 spin_unlock_irq(&queue
->rx_queue
.lock
);
226 static void xenvif_rx_queue_purge(struct xenvif_queue
*queue
)
229 while ((skb
= xenvif_rx_dequeue(queue
)) != NULL
)
233 static void xenvif_rx_queue_drop_expired(struct xenvif_queue
*queue
)
238 skb
= skb_peek(&queue
->rx_queue
);
241 if (time_before(jiffies
, XENVIF_RX_CB(skb
)->expires
))
243 xenvif_rx_dequeue(queue
);
248 struct netrx_pending_operations
{
249 unsigned copy_prod
, copy_cons
;
250 unsigned meta_prod
, meta_cons
;
251 struct gnttab_copy
*copy
;
252 struct xenvif_rx_meta
*meta
;
254 grant_ref_t copy_gref
;
257 static struct xenvif_rx_meta
*get_next_rx_buffer(struct xenvif_queue
*queue
,
258 struct netrx_pending_operations
*npo
)
260 struct xenvif_rx_meta
*meta
;
261 struct xen_netif_rx_request
*req
;
263 req
= RING_GET_REQUEST(&queue
->rx
, queue
->rx
.req_cons
++);
265 meta
= npo
->meta
+ npo
->meta_prod
++;
266 meta
->gso_type
= XEN_NETIF_GSO_TYPE_NONE
;
272 npo
->copy_gref
= req
->gref
;
277 struct gop_frag_copy
{
278 struct xenvif_queue
*queue
;
279 struct netrx_pending_operations
*npo
;
280 struct xenvif_rx_meta
*meta
;
287 static void xenvif_setup_copy_gop(unsigned long gfn
,
290 struct gop_frag_copy
*info
)
292 struct gnttab_copy
*copy_gop
;
293 struct xen_page_foreign
*foreign
;
294 /* Convenient aliases */
295 struct xenvif_queue
*queue
= info
->queue
;
296 struct netrx_pending_operations
*npo
= info
->npo
;
297 struct page
*page
= info
->page
;
299 BUG_ON(npo
->copy_off
> MAX_BUFFER_OFFSET
);
301 if (npo
->copy_off
== MAX_BUFFER_OFFSET
)
302 info
->meta
= get_next_rx_buffer(queue
, npo
);
304 if (npo
->copy_off
+ *len
> MAX_BUFFER_OFFSET
)
305 *len
= MAX_BUFFER_OFFSET
- npo
->copy_off
;
307 copy_gop
= npo
->copy
+ npo
->copy_prod
++;
308 copy_gop
->flags
= GNTCOPY_dest_gref
;
309 copy_gop
->len
= *len
;
311 foreign
= xen_page_foreign(page
);
313 copy_gop
->source
.domid
= foreign
->domid
;
314 copy_gop
->source
.u
.ref
= foreign
->gref
;
315 copy_gop
->flags
|= GNTCOPY_source_gref
;
317 copy_gop
->source
.domid
= DOMID_SELF
;
318 copy_gop
->source
.u
.gmfn
= gfn
;
320 copy_gop
->source
.offset
= offset
;
322 copy_gop
->dest
.domid
= queue
->vif
->domid
;
323 copy_gop
->dest
.offset
= npo
->copy_off
;
324 copy_gop
->dest
.u
.ref
= npo
->copy_gref
;
326 npo
->copy_off
+= *len
;
327 info
->meta
->size
+= *len
;
329 /* Leave a gap for the GSO descriptor. */
330 if (info
->head
&& ((1 << info
->gso_type
) & queue
->vif
->gso_mask
))
331 queue
->rx
.req_cons
++;
333 info
->head
= 0; /* There must be something in this buffer now */
336 static void xenvif_gop_frag_copy_grant(unsigned long gfn
,
345 xenvif_setup_copy_gop(gfn
, offset
, &bytes
, data
);
352 * Set up the grant operations for this fragment. If it's a flipping
353 * interface, we also set up the unmap request from here.
355 static void xenvif_gop_frag_copy(struct xenvif_queue
*queue
, struct sk_buff
*skb
,
356 struct netrx_pending_operations
*npo
,
357 struct page
*page
, unsigned long size
,
358 unsigned long offset
, int *head
)
360 struct gop_frag_copy info
= {
364 .gso_type
= XEN_NETIF_GSO_TYPE_NONE
,
368 if (skb_is_gso(skb
)) {
369 if (skb_shinfo(skb
)->gso_type
& SKB_GSO_TCPV4
)
370 info
.gso_type
= XEN_NETIF_GSO_TYPE_TCPV4
;
371 else if (skb_shinfo(skb
)->gso_type
& SKB_GSO_TCPV6
)
372 info
.gso_type
= XEN_NETIF_GSO_TYPE_TCPV6
;
375 /* Data must not cross a page boundary. */
376 BUG_ON(size
+ offset
> PAGE_SIZE
<<compound_order(page
));
378 info
.meta
= npo
->meta
+ npo
->meta_prod
- 1;
380 /* Skip unused frames from start of page */
381 page
+= offset
>> PAGE_SHIFT
;
382 offset
&= ~PAGE_MASK
;
385 BUG_ON(offset
>= PAGE_SIZE
);
387 bytes
= PAGE_SIZE
- offset
;
392 gnttab_foreach_grant_in_range(page
, offset
, bytes
,
393 xenvif_gop_frag_copy_grant
,
400 BUG_ON(!PageCompound(page
));
409 * Prepare an SKB to be transmitted to the frontend.
411 * This function is responsible for allocating grant operations, meta
414 * It returns the number of meta structures consumed. The number of
415 * ring slots used is always equal to the number of meta slots used
416 * plus the number of GSO descriptors used. Currently, we use either
417 * zero GSO descriptors (for non-GSO packets) or one descriptor (for
418 * frontend-side LRO).
420 static int xenvif_gop_skb(struct sk_buff
*skb
,
421 struct netrx_pending_operations
*npo
,
422 struct xenvif_queue
*queue
)
424 struct xenvif
*vif
= netdev_priv(skb
->dev
);
425 int nr_frags
= skb_shinfo(skb
)->nr_frags
;
427 struct xen_netif_rx_request
*req
;
428 struct xenvif_rx_meta
*meta
;
434 old_meta_prod
= npo
->meta_prod
;
436 gso_type
= XEN_NETIF_GSO_TYPE_NONE
;
437 if (skb_is_gso(skb
)) {
438 if (skb_shinfo(skb
)->gso_type
& SKB_GSO_TCPV4
)
439 gso_type
= XEN_NETIF_GSO_TYPE_TCPV4
;
440 else if (skb_shinfo(skb
)->gso_type
& SKB_GSO_TCPV6
)
441 gso_type
= XEN_NETIF_GSO_TYPE_TCPV6
;
444 /* Set up a GSO prefix descriptor, if necessary */
445 if ((1 << gso_type
) & vif
->gso_prefix_mask
) {
446 req
= RING_GET_REQUEST(&queue
->rx
, queue
->rx
.req_cons
++);
447 meta
= npo
->meta
+ npo
->meta_prod
++;
448 meta
->gso_type
= gso_type
;
449 meta
->gso_size
= skb_shinfo(skb
)->gso_size
;
454 req
= RING_GET_REQUEST(&queue
->rx
, queue
->rx
.req_cons
++);
455 meta
= npo
->meta
+ npo
->meta_prod
++;
457 if ((1 << gso_type
) & vif
->gso_mask
) {
458 meta
->gso_type
= gso_type
;
459 meta
->gso_size
= skb_shinfo(skb
)->gso_size
;
461 meta
->gso_type
= XEN_NETIF_GSO_TYPE_NONE
;
468 npo
->copy_gref
= req
->gref
;
471 while (data
< skb_tail_pointer(skb
)) {
472 unsigned int offset
= offset_in_page(data
);
473 unsigned int len
= PAGE_SIZE
- offset
;
475 if (data
+ len
> skb_tail_pointer(skb
))
476 len
= skb_tail_pointer(skb
) - data
;
478 xenvif_gop_frag_copy(queue
, skb
, npo
,
479 virt_to_page(data
), len
, offset
, &head
);
483 for (i
= 0; i
< nr_frags
; i
++) {
484 xenvif_gop_frag_copy(queue
, skb
, npo
,
485 skb_frag_page(&skb_shinfo(skb
)->frags
[i
]),
486 skb_frag_size(&skb_shinfo(skb
)->frags
[i
]),
487 skb_shinfo(skb
)->frags
[i
].page_offset
,
491 return npo
->meta_prod
- old_meta_prod
;
495 * This is a twin to xenvif_gop_skb. Assume that xenvif_gop_skb was
496 * used to set up the operations on the top of
497 * netrx_pending_operations, which have since been done. Check that
498 * they didn't give any errors and advance over them.
500 static int xenvif_check_gop(struct xenvif
*vif
, int nr_meta_slots
,
501 struct netrx_pending_operations
*npo
)
503 struct gnttab_copy
*copy_op
;
504 int status
= XEN_NETIF_RSP_OKAY
;
507 for (i
= 0; i
< nr_meta_slots
; i
++) {
508 copy_op
= npo
->copy
+ npo
->copy_cons
++;
509 if (copy_op
->status
!= GNTST_okay
) {
511 "Bad status %d from copy to DOM%d.\n",
512 copy_op
->status
, vif
->domid
);
513 status
= XEN_NETIF_RSP_ERROR
;
520 static void xenvif_add_frag_responses(struct xenvif_queue
*queue
, int status
,
521 struct xenvif_rx_meta
*meta
,
525 unsigned long offset
;
527 /* No fragments used */
528 if (nr_meta_slots
<= 1)
533 for (i
= 0; i
< nr_meta_slots
; i
++) {
535 if (i
== nr_meta_slots
- 1)
538 flags
= XEN_NETRXF_more_data
;
541 make_rx_response(queue
, meta
[i
].id
, status
, offset
,
542 meta
[i
].size
, flags
);
546 void xenvif_kick_thread(struct xenvif_queue
*queue
)
551 static void xenvif_rx_action(struct xenvif_queue
*queue
)
555 struct xen_netif_rx_response
*resp
;
556 struct sk_buff_head rxq
;
560 unsigned long offset
;
561 bool need_to_notify
= false;
563 struct netrx_pending_operations npo
= {
564 .copy
= queue
->grant_copy_op
,
568 skb_queue_head_init(&rxq
);
570 while (xenvif_rx_ring_slots_available(queue
)
571 && (skb
= xenvif_rx_dequeue(queue
)) != NULL
) {
572 queue
->last_rx_time
= jiffies
;
574 XENVIF_RX_CB(skb
)->meta_slots_used
= xenvif_gop_skb(skb
, &npo
, queue
);
576 __skb_queue_tail(&rxq
, skb
);
579 BUG_ON(npo
.meta_prod
> ARRAY_SIZE(queue
->meta
));
584 BUG_ON(npo
.copy_prod
> MAX_GRANT_COPY_OPS
);
585 gnttab_batch_copy(queue
->grant_copy_op
, npo
.copy_prod
);
587 while ((skb
= __skb_dequeue(&rxq
)) != NULL
) {
589 if ((1 << queue
->meta
[npo
.meta_cons
].gso_type
) &
590 queue
->vif
->gso_prefix_mask
) {
591 resp
= RING_GET_RESPONSE(&queue
->rx
,
592 queue
->rx
.rsp_prod_pvt
++);
594 resp
->flags
= XEN_NETRXF_gso_prefix
| XEN_NETRXF_more_data
;
596 resp
->offset
= queue
->meta
[npo
.meta_cons
].gso_size
;
597 resp
->id
= queue
->meta
[npo
.meta_cons
].id
;
598 resp
->status
= XENVIF_RX_CB(skb
)->meta_slots_used
;
601 XENVIF_RX_CB(skb
)->meta_slots_used
--;
605 queue
->stats
.tx_bytes
+= skb
->len
;
606 queue
->stats
.tx_packets
++;
608 status
= xenvif_check_gop(queue
->vif
,
609 XENVIF_RX_CB(skb
)->meta_slots_used
,
612 if (XENVIF_RX_CB(skb
)->meta_slots_used
== 1)
615 flags
= XEN_NETRXF_more_data
;
617 if (skb
->ip_summed
== CHECKSUM_PARTIAL
) /* local packet? */
618 flags
|= XEN_NETRXF_csum_blank
| XEN_NETRXF_data_validated
;
619 else if (skb
->ip_summed
== CHECKSUM_UNNECESSARY
)
620 /* remote but checksummed. */
621 flags
|= XEN_NETRXF_data_validated
;
624 resp
= make_rx_response(queue
, queue
->meta
[npo
.meta_cons
].id
,
626 queue
->meta
[npo
.meta_cons
].size
,
629 if ((1 << queue
->meta
[npo
.meta_cons
].gso_type
) &
630 queue
->vif
->gso_mask
) {
631 struct xen_netif_extra_info
*gso
=
632 (struct xen_netif_extra_info
*)
633 RING_GET_RESPONSE(&queue
->rx
,
634 queue
->rx
.rsp_prod_pvt
++);
636 resp
->flags
|= XEN_NETRXF_extra_info
;
638 gso
->u
.gso
.type
= queue
->meta
[npo
.meta_cons
].gso_type
;
639 gso
->u
.gso
.size
= queue
->meta
[npo
.meta_cons
].gso_size
;
641 gso
->u
.gso
.features
= 0;
643 gso
->type
= XEN_NETIF_EXTRA_TYPE_GSO
;
647 xenvif_add_frag_responses(queue
, status
,
648 queue
->meta
+ npo
.meta_cons
+ 1,
649 XENVIF_RX_CB(skb
)->meta_slots_used
);
651 RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&queue
->rx
, ret
);
653 need_to_notify
|= !!ret
;
655 npo
.meta_cons
+= XENVIF_RX_CB(skb
)->meta_slots_used
;
661 notify_remote_via_irq(queue
->rx_irq
);
664 void xenvif_napi_schedule_or_enable_events(struct xenvif_queue
*queue
)
668 RING_FINAL_CHECK_FOR_REQUESTS(&queue
->tx
, more_to_do
);
671 napi_schedule(&queue
->napi
);
674 static void tx_add_credit(struct xenvif_queue
*queue
)
676 unsigned long max_burst
, max_credit
;
679 * Allow a burst big enough to transmit a jumbo packet of up to 128kB.
680 * Otherwise the interface can seize up due to insufficient credit.
682 max_burst
= RING_GET_REQUEST(&queue
->tx
, queue
->tx
.req_cons
)->size
;
683 max_burst
= min(max_burst
, 131072UL);
684 max_burst
= max(max_burst
, queue
->credit_bytes
);
686 /* Take care that adding a new chunk of credit doesn't wrap to zero. */
687 max_credit
= queue
->remaining_credit
+ queue
->credit_bytes
;
688 if (max_credit
< queue
->remaining_credit
)
689 max_credit
= ULONG_MAX
; /* wrapped: clamp to ULONG_MAX */
691 queue
->remaining_credit
= min(max_credit
, max_burst
);
694 void xenvif_tx_credit_callback(unsigned long data
)
696 struct xenvif_queue
*queue
= (struct xenvif_queue
*)data
;
697 tx_add_credit(queue
);
698 xenvif_napi_schedule_or_enable_events(queue
);
701 static void xenvif_tx_err(struct xenvif_queue
*queue
,
702 struct xen_netif_tx_request
*txp
, RING_IDX end
)
704 RING_IDX cons
= queue
->tx
.req_cons
;
708 spin_lock_irqsave(&queue
->response_lock
, flags
);
709 make_tx_response(queue
, txp
, XEN_NETIF_RSP_ERROR
);
710 push_tx_responses(queue
);
711 spin_unlock_irqrestore(&queue
->response_lock
, flags
);
714 txp
= RING_GET_REQUEST(&queue
->tx
, cons
++);
716 queue
->tx
.req_cons
= cons
;
719 static void xenvif_fatal_tx_err(struct xenvif
*vif
)
721 netdev_err(vif
->dev
, "fatal error; disabling device\n");
722 vif
->disabled
= true;
723 /* Disable the vif from queue 0's kthread */
725 xenvif_kick_thread(&vif
->queues
[0]);
728 static int xenvif_count_requests(struct xenvif_queue
*queue
,
729 struct xen_netif_tx_request
*first
,
730 struct xen_netif_tx_request
*txp
,
733 RING_IDX cons
= queue
->tx
.req_cons
;
738 if (!(first
->flags
& XEN_NETTXF_more_data
))
742 struct xen_netif_tx_request dropped_tx
= { 0 };
744 if (slots
>= work_to_do
) {
745 netdev_err(queue
->vif
->dev
,
746 "Asked for %d slots but exceeds this limit\n",
748 xenvif_fatal_tx_err(queue
->vif
);
752 /* This guest is really using too many slots and
753 * considered malicious.
755 if (unlikely(slots
>= fatal_skb_slots
)) {
756 netdev_err(queue
->vif
->dev
,
757 "Malicious frontend using %d slots, threshold %u\n",
758 slots
, fatal_skb_slots
);
759 xenvif_fatal_tx_err(queue
->vif
);
763 /* Xen network protocol had implicit dependency on
764 * MAX_SKB_FRAGS. XEN_NETBK_LEGACY_SLOTS_MAX is set to
765 * the historical MAX_SKB_FRAGS value 18 to honor the
766 * same behavior as before. Any packet using more than
767 * 18 slots but less than fatal_skb_slots slots is
770 if (!drop_err
&& slots
>= XEN_NETBK_LEGACY_SLOTS_MAX
) {
772 netdev_dbg(queue
->vif
->dev
,
773 "Too many slots (%d) exceeding limit (%d), dropping packet\n",
774 slots
, XEN_NETBK_LEGACY_SLOTS_MAX
);
781 memcpy(txp
, RING_GET_REQUEST(&queue
->tx
, cons
+ slots
),
784 /* If the guest submitted a frame >= 64 KiB then
785 * first->size overflowed and following slots will
786 * appear to be larger than the frame.
788 * This cannot be fatal error as there are buggy
789 * frontends that do this.
791 * Consume all slots and drop the packet.
793 if (!drop_err
&& txp
->size
> first
->size
) {
795 netdev_dbg(queue
->vif
->dev
,
796 "Invalid tx request, slot size %u > remaining size %u\n",
797 txp
->size
, first
->size
);
801 first
->size
-= txp
->size
;
804 if (unlikely((txp
->offset
+ txp
->size
) > XEN_PAGE_SIZE
)) {
805 netdev_err(queue
->vif
->dev
, "Cross page boundary, txp->offset: %u, size: %u\n",
806 txp
->offset
, txp
->size
);
807 xenvif_fatal_tx_err(queue
->vif
);
811 more_data
= txp
->flags
& XEN_NETTXF_more_data
;
819 xenvif_tx_err(queue
, first
, cons
+ slots
);
827 struct xenvif_tx_cb
{
831 #define XENVIF_TX_CB(skb) ((struct xenvif_tx_cb *)(skb)->cb)
833 static inline void xenvif_tx_create_map_op(struct xenvif_queue
*queue
,
835 struct xen_netif_tx_request
*txp
,
836 struct gnttab_map_grant_ref
*mop
)
838 queue
->pages_to_map
[mop
-queue
->tx_map_ops
] = queue
->mmap_pages
[pending_idx
];
839 gnttab_set_map_op(mop
, idx_to_kaddr(queue
, pending_idx
),
840 GNTMAP_host_map
| GNTMAP_readonly
,
841 txp
->gref
, queue
->vif
->domid
);
843 memcpy(&queue
->pending_tx_info
[pending_idx
].req
, txp
,
847 static inline struct sk_buff
*xenvif_alloc_skb(unsigned int size
)
849 struct sk_buff
*skb
=
850 alloc_skb(size
+ NET_SKB_PAD
+ NET_IP_ALIGN
,
851 GFP_ATOMIC
| __GFP_NOWARN
);
852 if (unlikely(skb
== NULL
))
855 /* Packets passed to netif_rx() must have some headroom. */
856 skb_reserve(skb
, NET_SKB_PAD
+ NET_IP_ALIGN
);
858 /* Initialize it here to avoid later surprises */
859 skb_shinfo(skb
)->destructor_arg
= NULL
;
864 static struct gnttab_map_grant_ref
*xenvif_get_requests(struct xenvif_queue
*queue
,
866 struct xen_netif_tx_request
*txp
,
867 struct gnttab_map_grant_ref
*gop
,
868 unsigned int frag_overflow
,
869 struct sk_buff
*nskb
)
871 struct skb_shared_info
*shinfo
= skb_shinfo(skb
);
872 skb_frag_t
*frags
= shinfo
->frags
;
873 u16 pending_idx
= XENVIF_TX_CB(skb
)->pending_idx
;
875 pending_ring_idx_t index
;
876 unsigned int nr_slots
;
878 nr_slots
= shinfo
->nr_frags
;
880 /* Skip first skb fragment if it is on same page as header fragment. */
881 start
= (frag_get_pending_idx(&shinfo
->frags
[0]) == pending_idx
);
883 for (shinfo
->nr_frags
= start
; shinfo
->nr_frags
< nr_slots
;
884 shinfo
->nr_frags
++, txp
++, gop
++) {
885 index
= pending_index(queue
->pending_cons
++);
886 pending_idx
= queue
->pending_ring
[index
];
887 xenvif_tx_create_map_op(queue
, pending_idx
, txp
, gop
);
888 frag_set_pending_idx(&frags
[shinfo
->nr_frags
], pending_idx
);
893 shinfo
= skb_shinfo(nskb
);
894 frags
= shinfo
->frags
;
896 for (shinfo
->nr_frags
= 0; shinfo
->nr_frags
< frag_overflow
;
897 shinfo
->nr_frags
++, txp
++, gop
++) {
898 index
= pending_index(queue
->pending_cons
++);
899 pending_idx
= queue
->pending_ring
[index
];
900 xenvif_tx_create_map_op(queue
, pending_idx
, txp
, gop
);
901 frag_set_pending_idx(&frags
[shinfo
->nr_frags
],
905 skb_shinfo(skb
)->frag_list
= nskb
;
911 static inline void xenvif_grant_handle_set(struct xenvif_queue
*queue
,
913 grant_handle_t handle
)
915 if (unlikely(queue
->grant_tx_handle
[pending_idx
] !=
916 NETBACK_INVALID_HANDLE
)) {
917 netdev_err(queue
->vif
->dev
,
918 "Trying to overwrite active handle! pending_idx: 0x%x\n",
922 queue
->grant_tx_handle
[pending_idx
] = handle
;
925 static inline void xenvif_grant_handle_reset(struct xenvif_queue
*queue
,
928 if (unlikely(queue
->grant_tx_handle
[pending_idx
] ==
929 NETBACK_INVALID_HANDLE
)) {
930 netdev_err(queue
->vif
->dev
,
931 "Trying to unmap invalid handle! pending_idx: 0x%x\n",
935 queue
->grant_tx_handle
[pending_idx
] = NETBACK_INVALID_HANDLE
;
938 static int xenvif_tx_check_gop(struct xenvif_queue
*queue
,
940 struct gnttab_map_grant_ref
**gopp_map
,
941 struct gnttab_copy
**gopp_copy
)
943 struct gnttab_map_grant_ref
*gop_map
= *gopp_map
;
944 u16 pending_idx
= XENVIF_TX_CB(skb
)->pending_idx
;
945 /* This always points to the shinfo of the skb being checked, which
946 * could be either the first or the one on the frag_list
948 struct skb_shared_info
*shinfo
= skb_shinfo(skb
);
949 /* If this is non-NULL, we are currently checking the frag_list skb, and
950 * this points to the shinfo of the first one
952 struct skb_shared_info
*first_shinfo
= NULL
;
953 int nr_frags
= shinfo
->nr_frags
;
954 const bool sharedslot
= nr_frags
&&
955 frag_get_pending_idx(&shinfo
->frags
[0]) == pending_idx
;
958 /* Check status of header. */
959 err
= (*gopp_copy
)->status
;
962 netdev_dbg(queue
->vif
->dev
,
963 "Grant copy of header failed! status: %d pending_idx: %u ref: %u\n",
964 (*gopp_copy
)->status
,
966 (*gopp_copy
)->source
.u
.ref
);
967 /* The first frag might still have this slot mapped */
969 xenvif_idx_release(queue
, pending_idx
,
970 XEN_NETIF_RSP_ERROR
);
975 for (i
= 0; i
< nr_frags
; i
++, gop_map
++) {
978 pending_idx
= frag_get_pending_idx(&shinfo
->frags
[i
]);
980 /* Check error status: if okay then remember grant handle. */
981 newerr
= gop_map
->status
;
983 if (likely(!newerr
)) {
984 xenvif_grant_handle_set(queue
,
987 /* Had a previous error? Invalidate this fragment. */
989 xenvif_idx_unmap(queue
, pending_idx
);
990 /* If the mapping of the first frag was OK, but
991 * the header's copy failed, and they are
992 * sharing a slot, send an error
994 if (i
== 0 && sharedslot
)
995 xenvif_idx_release(queue
, pending_idx
,
996 XEN_NETIF_RSP_ERROR
);
998 xenvif_idx_release(queue
, pending_idx
,
1004 /* Error on this fragment: respond to client with an error. */
1005 if (net_ratelimit())
1006 netdev_dbg(queue
->vif
->dev
,
1007 "Grant map of %d. frag failed! status: %d pending_idx: %u ref: %u\n",
1013 xenvif_idx_release(queue
, pending_idx
, XEN_NETIF_RSP_ERROR
);
1015 /* Not the first error? Preceding frags already invalidated. */
1019 /* First error: if the header haven't shared a slot with the
1020 * first frag, release it as well.
1023 xenvif_idx_release(queue
,
1024 XENVIF_TX_CB(skb
)->pending_idx
,
1025 XEN_NETIF_RSP_OKAY
);
1027 /* Invalidate preceding fragments of this skb. */
1028 for (j
= 0; j
< i
; j
++) {
1029 pending_idx
= frag_get_pending_idx(&shinfo
->frags
[j
]);
1030 xenvif_idx_unmap(queue
, pending_idx
);
1031 xenvif_idx_release(queue
, pending_idx
,
1032 XEN_NETIF_RSP_OKAY
);
1035 /* And if we found the error while checking the frag_list, unmap
1036 * the first skb's frags
1039 for (j
= 0; j
< first_shinfo
->nr_frags
; j
++) {
1040 pending_idx
= frag_get_pending_idx(&first_shinfo
->frags
[j
]);
1041 xenvif_idx_unmap(queue
, pending_idx
);
1042 xenvif_idx_release(queue
, pending_idx
,
1043 XEN_NETIF_RSP_OKAY
);
1047 /* Remember the error: invalidate all subsequent fragments. */
1051 if (skb_has_frag_list(skb
) && !first_shinfo
) {
1052 first_shinfo
= skb_shinfo(skb
);
1053 shinfo
= skb_shinfo(skb_shinfo(skb
)->frag_list
);
1054 nr_frags
= shinfo
->nr_frags
;
1059 *gopp_map
= gop_map
;
1063 static void xenvif_fill_frags(struct xenvif_queue
*queue
, struct sk_buff
*skb
)
1065 struct skb_shared_info
*shinfo
= skb_shinfo(skb
);
1066 int nr_frags
= shinfo
->nr_frags
;
1068 u16 prev_pending_idx
= INVALID_PENDING_IDX
;
1070 for (i
= 0; i
< nr_frags
; i
++) {
1071 skb_frag_t
*frag
= shinfo
->frags
+ i
;
1072 struct xen_netif_tx_request
*txp
;
1076 pending_idx
= frag_get_pending_idx(frag
);
1078 /* If this is not the first frag, chain it to the previous*/
1079 if (prev_pending_idx
== INVALID_PENDING_IDX
)
1080 skb_shinfo(skb
)->destructor_arg
=
1081 &callback_param(queue
, pending_idx
);
1083 callback_param(queue
, prev_pending_idx
).ctx
=
1084 &callback_param(queue
, pending_idx
);
1086 callback_param(queue
, pending_idx
).ctx
= NULL
;
1087 prev_pending_idx
= pending_idx
;
1089 txp
= &queue
->pending_tx_info
[pending_idx
].req
;
1090 page
= virt_to_page(idx_to_kaddr(queue
, pending_idx
));
1091 __skb_fill_page_desc(skb
, i
, page
, txp
->offset
, txp
->size
);
1092 skb
->len
+= txp
->size
;
1093 skb
->data_len
+= txp
->size
;
1094 skb
->truesize
+= txp
->size
;
1096 /* Take an extra reference to offset network stack's put_page */
1097 get_page(queue
->mmap_pages
[pending_idx
]);
1101 static int xenvif_get_extras(struct xenvif_queue
*queue
,
1102 struct xen_netif_extra_info
*extras
,
1105 struct xen_netif_extra_info extra
;
1106 RING_IDX cons
= queue
->tx
.req_cons
;
1109 if (unlikely(work_to_do
-- <= 0)) {
1110 netdev_err(queue
->vif
->dev
, "Missing extra info\n");
1111 xenvif_fatal_tx_err(queue
->vif
);
1115 memcpy(&extra
, RING_GET_REQUEST(&queue
->tx
, cons
),
1117 if (unlikely(!extra
.type
||
1118 extra
.type
>= XEN_NETIF_EXTRA_TYPE_MAX
)) {
1119 queue
->tx
.req_cons
= ++cons
;
1120 netdev_err(queue
->vif
->dev
,
1121 "Invalid extra type: %d\n", extra
.type
);
1122 xenvif_fatal_tx_err(queue
->vif
);
1126 memcpy(&extras
[extra
.type
- 1], &extra
, sizeof(extra
));
1127 queue
->tx
.req_cons
= ++cons
;
1128 } while (extra
.flags
& XEN_NETIF_EXTRA_FLAG_MORE
);
1133 static int xenvif_set_skb_gso(struct xenvif
*vif
,
1134 struct sk_buff
*skb
,
1135 struct xen_netif_extra_info
*gso
)
1137 if (!gso
->u
.gso
.size
) {
1138 netdev_err(vif
->dev
, "GSO size must not be zero.\n");
1139 xenvif_fatal_tx_err(vif
);
1143 switch (gso
->u
.gso
.type
) {
1144 case XEN_NETIF_GSO_TYPE_TCPV4
:
1145 skb_shinfo(skb
)->gso_type
= SKB_GSO_TCPV4
;
1147 case XEN_NETIF_GSO_TYPE_TCPV6
:
1148 skb_shinfo(skb
)->gso_type
= SKB_GSO_TCPV6
;
1151 netdev_err(vif
->dev
, "Bad GSO type %d.\n", gso
->u
.gso
.type
);
1152 xenvif_fatal_tx_err(vif
);
1156 skb_shinfo(skb
)->gso_size
= gso
->u
.gso
.size
;
1157 /* gso_segs will be calculated later */
1162 static int checksum_setup(struct xenvif_queue
*queue
, struct sk_buff
*skb
)
1164 bool recalculate_partial_csum
= false;
1166 /* A GSO SKB must be CHECKSUM_PARTIAL. However some buggy
1167 * peers can fail to set NETRXF_csum_blank when sending a GSO
1168 * frame. In this case force the SKB to CHECKSUM_PARTIAL and
1169 * recalculate the partial checksum.
1171 if (skb
->ip_summed
!= CHECKSUM_PARTIAL
&& skb_is_gso(skb
)) {
1172 queue
->stats
.rx_gso_checksum_fixup
++;
1173 skb
->ip_summed
= CHECKSUM_PARTIAL
;
1174 recalculate_partial_csum
= true;
1177 /* A non-CHECKSUM_PARTIAL SKB does not require setup. */
1178 if (skb
->ip_summed
!= CHECKSUM_PARTIAL
)
1181 return skb_checksum_setup(skb
, recalculate_partial_csum
);
1184 static bool tx_credit_exceeded(struct xenvif_queue
*queue
, unsigned size
)
1186 u64 now
= get_jiffies_64();
1187 u64 next_credit
= queue
->credit_window_start
+
1188 msecs_to_jiffies(queue
->credit_usec
/ 1000);
1190 /* Timer could already be pending in rare cases. */
1191 if (timer_pending(&queue
->credit_timeout
))
1194 /* Passed the point where we can replenish credit? */
1195 if (time_after_eq64(now
, next_credit
)) {
1196 queue
->credit_window_start
= now
;
1197 tx_add_credit(queue
);
1200 /* Still too big to send right now? Set a callback. */
1201 if (size
> queue
->remaining_credit
) {
1202 queue
->credit_timeout
.data
=
1203 (unsigned long)queue
;
1204 mod_timer(&queue
->credit_timeout
,
1206 queue
->credit_window_start
= next_credit
;
1214 /* No locking is required in xenvif_mcast_add/del() as they are
1215 * only ever invoked from NAPI poll. An RCU list is used because
1216 * xenvif_mcast_match() is called asynchronously, during start_xmit.
1219 static int xenvif_mcast_add(struct xenvif
*vif
, const u8
*addr
)
1221 struct xenvif_mcast_addr
*mcast
;
1223 if (vif
->fe_mcast_count
== XEN_NETBK_MCAST_MAX
) {
1224 if (net_ratelimit())
1225 netdev_err(vif
->dev
,
1226 "Too many multicast addresses\n");
1230 mcast
= kzalloc(sizeof(*mcast
), GFP_ATOMIC
);
1234 ether_addr_copy(mcast
->addr
, addr
);
1235 list_add_tail_rcu(&mcast
->entry
, &vif
->fe_mcast_addr
);
1236 vif
->fe_mcast_count
++;
1241 static void xenvif_mcast_del(struct xenvif
*vif
, const u8
*addr
)
1243 struct xenvif_mcast_addr
*mcast
;
1245 list_for_each_entry_rcu(mcast
, &vif
->fe_mcast_addr
, entry
) {
1246 if (ether_addr_equal(addr
, mcast
->addr
)) {
1247 --vif
->fe_mcast_count
;
1248 list_del_rcu(&mcast
->entry
);
1249 kfree_rcu(mcast
, rcu
);
1255 bool xenvif_mcast_match(struct xenvif
*vif
, const u8
*addr
)
1257 struct xenvif_mcast_addr
*mcast
;
1260 list_for_each_entry_rcu(mcast
, &vif
->fe_mcast_addr
, entry
) {
1261 if (ether_addr_equal(addr
, mcast
->addr
)) {
1271 void xenvif_mcast_addr_list_free(struct xenvif
*vif
)
1273 /* No need for locking or RCU here. NAPI poll and TX queue
1276 while (!list_empty(&vif
->fe_mcast_addr
)) {
1277 struct xenvif_mcast_addr
*mcast
;
1279 mcast
= list_first_entry(&vif
->fe_mcast_addr
,
1280 struct xenvif_mcast_addr
,
1282 --vif
->fe_mcast_count
;
1283 list_del(&mcast
->entry
);
1288 static void xenvif_tx_build_gops(struct xenvif_queue
*queue
,
1293 struct gnttab_map_grant_ref
*gop
= queue
->tx_map_ops
;
1294 struct sk_buff
*skb
, *nskb
;
1296 unsigned int frag_overflow
;
1298 while (skb_queue_len(&queue
->tx_queue
) < budget
) {
1299 struct xen_netif_tx_request txreq
;
1300 struct xen_netif_tx_request txfrags
[XEN_NETBK_LEGACY_SLOTS_MAX
];
1301 struct xen_netif_extra_info extras
[XEN_NETIF_EXTRA_TYPE_MAX
-1];
1305 unsigned int data_len
;
1306 pending_ring_idx_t index
;
1308 if (queue
->tx
.sring
->req_prod
- queue
->tx
.req_cons
>
1309 XEN_NETIF_TX_RING_SIZE
) {
1310 netdev_err(queue
->vif
->dev
,
1311 "Impossible number of requests. "
1312 "req_prod %d, req_cons %d, size %ld\n",
1313 queue
->tx
.sring
->req_prod
, queue
->tx
.req_cons
,
1314 XEN_NETIF_TX_RING_SIZE
);
1315 xenvif_fatal_tx_err(queue
->vif
);
1319 work_to_do
= RING_HAS_UNCONSUMED_REQUESTS(&queue
->tx
);
1323 idx
= queue
->tx
.req_cons
;
1324 rmb(); /* Ensure that we see the request before we copy it. */
1325 memcpy(&txreq
, RING_GET_REQUEST(&queue
->tx
, idx
), sizeof(txreq
));
1327 /* Credit-based scheduling. */
1328 if (txreq
.size
> queue
->remaining_credit
&&
1329 tx_credit_exceeded(queue
, txreq
.size
))
1332 queue
->remaining_credit
-= txreq
.size
;
1335 queue
->tx
.req_cons
= ++idx
;
1337 memset(extras
, 0, sizeof(extras
));
1338 if (txreq
.flags
& XEN_NETTXF_extra_info
) {
1339 work_to_do
= xenvif_get_extras(queue
, extras
,
1341 idx
= queue
->tx
.req_cons
;
1342 if (unlikely(work_to_do
< 0))
1346 if (extras
[XEN_NETIF_EXTRA_TYPE_MCAST_ADD
- 1].type
) {
1347 struct xen_netif_extra_info
*extra
;
1349 extra
= &extras
[XEN_NETIF_EXTRA_TYPE_MCAST_ADD
- 1];
1350 ret
= xenvif_mcast_add(queue
->vif
, extra
->u
.mcast
.addr
);
1352 make_tx_response(queue
, &txreq
,
1354 XEN_NETIF_RSP_OKAY
:
1355 XEN_NETIF_RSP_ERROR
);
1356 push_tx_responses(queue
);
1360 if (extras
[XEN_NETIF_EXTRA_TYPE_MCAST_DEL
- 1].type
) {
1361 struct xen_netif_extra_info
*extra
;
1363 extra
= &extras
[XEN_NETIF_EXTRA_TYPE_MCAST_DEL
- 1];
1364 xenvif_mcast_del(queue
->vif
, extra
->u
.mcast
.addr
);
1366 make_tx_response(queue
, &txreq
, XEN_NETIF_RSP_OKAY
);
1367 push_tx_responses(queue
);
1371 ret
= xenvif_count_requests(queue
, &txreq
, txfrags
, work_to_do
);
1372 if (unlikely(ret
< 0))
1377 if (unlikely(txreq
.size
< ETH_HLEN
)) {
1378 netdev_dbg(queue
->vif
->dev
,
1379 "Bad packet size: %d\n", txreq
.size
);
1380 xenvif_tx_err(queue
, &txreq
, idx
);
1384 /* No crossing a page as the payload mustn't fragment. */
1385 if (unlikely((txreq
.offset
+ txreq
.size
) > XEN_PAGE_SIZE
)) {
1386 netdev_err(queue
->vif
->dev
,
1387 "txreq.offset: %u, size: %u, end: %lu\n",
1388 txreq
.offset
, txreq
.size
,
1389 (unsigned long)(txreq
.offset
&~XEN_PAGE_MASK
) + txreq
.size
);
1390 xenvif_fatal_tx_err(queue
->vif
);
1394 index
= pending_index(queue
->pending_cons
);
1395 pending_idx
= queue
->pending_ring
[index
];
1397 data_len
= (txreq
.size
> XEN_NETBACK_TX_COPY_LEN
&&
1398 ret
< XEN_NETBK_LEGACY_SLOTS_MAX
) ?
1399 XEN_NETBACK_TX_COPY_LEN
: txreq
.size
;
1401 skb
= xenvif_alloc_skb(data_len
);
1402 if (unlikely(skb
== NULL
)) {
1403 netdev_dbg(queue
->vif
->dev
,
1404 "Can't allocate a skb in start_xmit.\n");
1405 xenvif_tx_err(queue
, &txreq
, idx
);
1409 skb_shinfo(skb
)->nr_frags
= ret
;
1410 if (data_len
< txreq
.size
)
1411 skb_shinfo(skb
)->nr_frags
++;
1412 /* At this point shinfo->nr_frags is in fact the number of
1413 * slots, which can be as large as XEN_NETBK_LEGACY_SLOTS_MAX.
1417 if (skb_shinfo(skb
)->nr_frags
> MAX_SKB_FRAGS
) {
1418 frag_overflow
= skb_shinfo(skb
)->nr_frags
- MAX_SKB_FRAGS
;
1419 BUG_ON(frag_overflow
> MAX_SKB_FRAGS
);
1420 skb_shinfo(skb
)->nr_frags
= MAX_SKB_FRAGS
;
1421 nskb
= xenvif_alloc_skb(0);
1422 if (unlikely(nskb
== NULL
)) {
1424 xenvif_tx_err(queue
, &txreq
, idx
);
1425 if (net_ratelimit())
1426 netdev_err(queue
->vif
->dev
,
1427 "Can't allocate the frag_list skb.\n");
1432 if (extras
[XEN_NETIF_EXTRA_TYPE_GSO
- 1].type
) {
1433 struct xen_netif_extra_info
*gso
;
1434 gso
= &extras
[XEN_NETIF_EXTRA_TYPE_GSO
- 1];
1436 if (xenvif_set_skb_gso(queue
->vif
, skb
, gso
)) {
1437 /* Failure in xenvif_set_skb_gso is fatal. */
1444 XENVIF_TX_CB(skb
)->pending_idx
= pending_idx
;
1446 __skb_put(skb
, data_len
);
1447 queue
->tx_copy_ops
[*copy_ops
].source
.u
.ref
= txreq
.gref
;
1448 queue
->tx_copy_ops
[*copy_ops
].source
.domid
= queue
->vif
->domid
;
1449 queue
->tx_copy_ops
[*copy_ops
].source
.offset
= txreq
.offset
;
1451 queue
->tx_copy_ops
[*copy_ops
].dest
.u
.gmfn
=
1452 virt_to_gfn(skb
->data
);
1453 queue
->tx_copy_ops
[*copy_ops
].dest
.domid
= DOMID_SELF
;
1454 queue
->tx_copy_ops
[*copy_ops
].dest
.offset
=
1455 offset_in_page(skb
->data
) & ~XEN_PAGE_MASK
;
1457 queue
->tx_copy_ops
[*copy_ops
].len
= data_len
;
1458 queue
->tx_copy_ops
[*copy_ops
].flags
= GNTCOPY_source_gref
;
1462 if (data_len
< txreq
.size
) {
1463 frag_set_pending_idx(&skb_shinfo(skb
)->frags
[0],
1465 xenvif_tx_create_map_op(queue
, pending_idx
, &txreq
, gop
);
1468 frag_set_pending_idx(&skb_shinfo(skb
)->frags
[0],
1469 INVALID_PENDING_IDX
);
1470 memcpy(&queue
->pending_tx_info
[pending_idx
].req
, &txreq
,
1474 queue
->pending_cons
++;
1476 gop
= xenvif_get_requests(queue
, skb
, txfrags
, gop
,
1477 frag_overflow
, nskb
);
1479 __skb_queue_tail(&queue
->tx_queue
, skb
);
1481 queue
->tx
.req_cons
= idx
;
1483 if (((gop
-queue
->tx_map_ops
) >= ARRAY_SIZE(queue
->tx_map_ops
)) ||
1484 (*copy_ops
>= ARRAY_SIZE(queue
->tx_copy_ops
)))
1488 (*map_ops
) = gop
- queue
->tx_map_ops
;
1492 /* Consolidate skb with a frag_list into a brand new one with local pages on
1493 * frags. Returns 0 or -ENOMEM if can't allocate new pages.
1495 static int xenvif_handle_frag_list(struct xenvif_queue
*queue
, struct sk_buff
*skb
)
1497 unsigned int offset
= skb_headlen(skb
);
1498 skb_frag_t frags
[MAX_SKB_FRAGS
];
1500 struct ubuf_info
*uarg
;
1501 struct sk_buff
*nskb
= skb_shinfo(skb
)->frag_list
;
1503 queue
->stats
.tx_zerocopy_sent
+= 2;
1504 queue
->stats
.tx_frag_overflow
++;
1506 xenvif_fill_frags(queue
, nskb
);
1507 /* Subtract frags size, we will correct it later */
1508 skb
->truesize
-= skb
->data_len
;
1509 skb
->len
+= nskb
->len
;
1510 skb
->data_len
+= nskb
->len
;
1512 /* create a brand new frags array and coalesce there */
1513 for (i
= 0; offset
< skb
->len
; i
++) {
1517 BUG_ON(i
>= MAX_SKB_FRAGS
);
1518 page
= alloc_page(GFP_ATOMIC
);
1521 skb
->truesize
+= skb
->data_len
;
1522 for (j
= 0; j
< i
; j
++)
1523 put_page(frags
[j
].page
.p
);
1527 if (offset
+ PAGE_SIZE
< skb
->len
)
1530 len
= skb
->len
- offset
;
1531 if (skb_copy_bits(skb
, offset
, page_address(page
), len
))
1535 frags
[i
].page
.p
= page
;
1536 frags
[i
].page_offset
= 0;
1537 skb_frag_size_set(&frags
[i
], len
);
1540 /* Copied all the bits from the frag list -- free it. */
1541 skb_frag_list_init(skb
);
1542 xenvif_skb_zerocopy_prepare(queue
, nskb
);
1545 /* Release all the original (foreign) frags. */
1546 for (f
= 0; f
< skb_shinfo(skb
)->nr_frags
; f
++)
1547 skb_frag_unref(skb
, f
);
1548 uarg
= skb_shinfo(skb
)->destructor_arg
;
1549 /* increase inflight counter to offset decrement in callback */
1550 atomic_inc(&queue
->inflight_packets
);
1551 uarg
->callback(uarg
, true);
1552 skb_shinfo(skb
)->destructor_arg
= NULL
;
1554 /* Fill the skb with the new (local) frags. */
1555 memcpy(skb_shinfo(skb
)->frags
, frags
, i
* sizeof(skb_frag_t
));
1556 skb_shinfo(skb
)->nr_frags
= i
;
1557 skb
->truesize
+= i
* PAGE_SIZE
;
1562 static int xenvif_tx_submit(struct xenvif_queue
*queue
)
1564 struct gnttab_map_grant_ref
*gop_map
= queue
->tx_map_ops
;
1565 struct gnttab_copy
*gop_copy
= queue
->tx_copy_ops
;
1566 struct sk_buff
*skb
;
1569 while ((skb
= __skb_dequeue(&queue
->tx_queue
)) != NULL
) {
1570 struct xen_netif_tx_request
*txp
;
1574 pending_idx
= XENVIF_TX_CB(skb
)->pending_idx
;
1575 txp
= &queue
->pending_tx_info
[pending_idx
].req
;
1577 /* Check the remap error code. */
1578 if (unlikely(xenvif_tx_check_gop(queue
, skb
, &gop_map
, &gop_copy
))) {
1579 /* If there was an error, xenvif_tx_check_gop is
1580 * expected to release all the frags which were mapped,
1581 * so kfree_skb shouldn't do it again
1583 skb_shinfo(skb
)->nr_frags
= 0;
1584 if (skb_has_frag_list(skb
)) {
1585 struct sk_buff
*nskb
=
1586 skb_shinfo(skb
)->frag_list
;
1587 skb_shinfo(nskb
)->nr_frags
= 0;
1593 data_len
= skb
->len
;
1594 callback_param(queue
, pending_idx
).ctx
= NULL
;
1595 if (data_len
< txp
->size
) {
1596 /* Append the packet payload as a fragment. */
1597 txp
->offset
+= data_len
;
1598 txp
->size
-= data_len
;
1600 /* Schedule a response immediately. */
1601 xenvif_idx_release(queue
, pending_idx
,
1602 XEN_NETIF_RSP_OKAY
);
1605 if (txp
->flags
& XEN_NETTXF_csum_blank
)
1606 skb
->ip_summed
= CHECKSUM_PARTIAL
;
1607 else if (txp
->flags
& XEN_NETTXF_data_validated
)
1608 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
1610 xenvif_fill_frags(queue
, skb
);
1612 if (unlikely(skb_has_frag_list(skb
))) {
1613 if (xenvif_handle_frag_list(queue
, skb
)) {
1614 if (net_ratelimit())
1615 netdev_err(queue
->vif
->dev
,
1616 "Not enough memory to consolidate frag_list!\n");
1617 xenvif_skb_zerocopy_prepare(queue
, skb
);
1623 skb
->dev
= queue
->vif
->dev
;
1624 skb
->protocol
= eth_type_trans(skb
, skb
->dev
);
1625 skb_reset_network_header(skb
);
1627 if (checksum_setup(queue
, skb
)) {
1628 netdev_dbg(queue
->vif
->dev
,
1629 "Can't setup checksum in net_tx_action\n");
1630 /* We have to set this flag to trigger the callback */
1631 if (skb_shinfo(skb
)->destructor_arg
)
1632 xenvif_skb_zerocopy_prepare(queue
, skb
);
1637 skb_probe_transport_header(skb
, 0);
1639 /* If the packet is GSO then we will have just set up the
1640 * transport header offset in checksum_setup so it's now
1641 * straightforward to calculate gso_segs.
1643 if (skb_is_gso(skb
)) {
1644 int mss
= skb_shinfo(skb
)->gso_size
;
1645 int hdrlen
= skb_transport_header(skb
) -
1646 skb_mac_header(skb
) +
1649 skb_shinfo(skb
)->gso_segs
=
1650 DIV_ROUND_UP(skb
->len
- hdrlen
, mss
);
1653 queue
->stats
.rx_bytes
+= skb
->len
;
1654 queue
->stats
.rx_packets
++;
1658 /* Set this flag right before netif_receive_skb, otherwise
1659 * someone might think this packet already left netback, and
1660 * do a skb_copy_ubufs while we are still in control of the
1661 * skb. E.g. the __pskb_pull_tail earlier can do such thing.
1663 if (skb_shinfo(skb
)->destructor_arg
) {
1664 xenvif_skb_zerocopy_prepare(queue
, skb
);
1665 queue
->stats
.tx_zerocopy_sent
++;
1668 netif_receive_skb(skb
);
1674 void xenvif_zerocopy_callback(struct ubuf_info
*ubuf
, bool zerocopy_success
)
1676 unsigned long flags
;
1677 pending_ring_idx_t index
;
1678 struct xenvif_queue
*queue
= ubuf_to_queue(ubuf
);
1680 /* This is the only place where we grab this lock, to protect callbacks
1683 spin_lock_irqsave(&queue
->callback_lock
, flags
);
1685 u16 pending_idx
= ubuf
->desc
;
1686 ubuf
= (struct ubuf_info
*) ubuf
->ctx
;
1687 BUG_ON(queue
->dealloc_prod
- queue
->dealloc_cons
>=
1689 index
= pending_index(queue
->dealloc_prod
);
1690 queue
->dealloc_ring
[index
] = pending_idx
;
1691 /* Sync with xenvif_tx_dealloc_action:
1692 * insert idx then incr producer.
1695 queue
->dealloc_prod
++;
1697 spin_unlock_irqrestore(&queue
->callback_lock
, flags
);
1699 if (likely(zerocopy_success
))
1700 queue
->stats
.tx_zerocopy_success
++;
1702 queue
->stats
.tx_zerocopy_fail
++;
1703 xenvif_skb_zerocopy_complete(queue
);
1706 static inline void xenvif_tx_dealloc_action(struct xenvif_queue
*queue
)
1708 struct gnttab_unmap_grant_ref
*gop
;
1709 pending_ring_idx_t dc
, dp
;
1710 u16 pending_idx
, pending_idx_release
[MAX_PENDING_REQS
];
1713 dc
= queue
->dealloc_cons
;
1714 gop
= queue
->tx_unmap_ops
;
1716 /* Free up any grants we have finished using */
1718 dp
= queue
->dealloc_prod
;
1720 /* Ensure we see all indices enqueued by all
1721 * xenvif_zerocopy_callback().
1726 BUG_ON(gop
- queue
->tx_unmap_ops
>= MAX_PENDING_REQS
);
1728 queue
->dealloc_ring
[pending_index(dc
++)];
1730 pending_idx_release
[gop
- queue
->tx_unmap_ops
] =
1732 queue
->pages_to_unmap
[gop
- queue
->tx_unmap_ops
] =
1733 queue
->mmap_pages
[pending_idx
];
1734 gnttab_set_unmap_op(gop
,
1735 idx_to_kaddr(queue
, pending_idx
),
1737 queue
->grant_tx_handle
[pending_idx
]);
1738 xenvif_grant_handle_reset(queue
, pending_idx
);
1742 } while (dp
!= queue
->dealloc_prod
);
1744 queue
->dealloc_cons
= dc
;
1746 if (gop
- queue
->tx_unmap_ops
> 0) {
1748 ret
= gnttab_unmap_refs(queue
->tx_unmap_ops
,
1750 queue
->pages_to_unmap
,
1751 gop
- queue
->tx_unmap_ops
);
1753 netdev_err(queue
->vif
->dev
, "Unmap fail: nr_ops %tu ret %d\n",
1754 gop
- queue
->tx_unmap_ops
, ret
);
1755 for (i
= 0; i
< gop
- queue
->tx_unmap_ops
; ++i
) {
1756 if (gop
[i
].status
!= GNTST_okay
)
1757 netdev_err(queue
->vif
->dev
,
1758 " host_addr: 0x%llx handle: 0x%x status: %d\n",
1767 for (i
= 0; i
< gop
- queue
->tx_unmap_ops
; ++i
)
1768 xenvif_idx_release(queue
, pending_idx_release
[i
],
1769 XEN_NETIF_RSP_OKAY
);
1773 /* Called after netfront has transmitted */
1774 int xenvif_tx_action(struct xenvif_queue
*queue
, int budget
)
1776 unsigned nr_mops
, nr_cops
= 0;
1779 if (unlikely(!tx_work_todo(queue
)))
1782 xenvif_tx_build_gops(queue
, budget
, &nr_cops
, &nr_mops
);
1787 gnttab_batch_copy(queue
->tx_copy_ops
, nr_cops
);
1789 ret
= gnttab_map_refs(queue
->tx_map_ops
,
1791 queue
->pages_to_map
,
1796 work_done
= xenvif_tx_submit(queue
);
1801 static void xenvif_idx_release(struct xenvif_queue
*queue
, u16 pending_idx
,
1804 struct pending_tx_info
*pending_tx_info
;
1805 pending_ring_idx_t index
;
1806 unsigned long flags
;
1808 pending_tx_info
= &queue
->pending_tx_info
[pending_idx
];
1810 spin_lock_irqsave(&queue
->response_lock
, flags
);
1812 make_tx_response(queue
, &pending_tx_info
->req
, status
);
1814 /* Release the pending index before pusing the Tx response so
1815 * its available before a new Tx request is pushed by the
1818 index
= pending_index(queue
->pending_prod
++);
1819 queue
->pending_ring
[index
] = pending_idx
;
1821 push_tx_responses(queue
);
1823 spin_unlock_irqrestore(&queue
->response_lock
, flags
);
1827 static void make_tx_response(struct xenvif_queue
*queue
,
1828 struct xen_netif_tx_request
*txp
,
1831 RING_IDX i
= queue
->tx
.rsp_prod_pvt
;
1832 struct xen_netif_tx_response
*resp
;
1834 resp
= RING_GET_RESPONSE(&queue
->tx
, i
);
1838 if (txp
->flags
& XEN_NETTXF_extra_info
)
1839 RING_GET_RESPONSE(&queue
->tx
, ++i
)->status
= XEN_NETIF_RSP_NULL
;
1841 queue
->tx
.rsp_prod_pvt
= ++i
;
1844 static void push_tx_responses(struct xenvif_queue
*queue
)
1848 RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&queue
->tx
, notify
);
1850 notify_remote_via_irq(queue
->tx_irq
);
1853 static struct xen_netif_rx_response
*make_rx_response(struct xenvif_queue
*queue
,
1860 RING_IDX i
= queue
->rx
.rsp_prod_pvt
;
1861 struct xen_netif_rx_response
*resp
;
1863 resp
= RING_GET_RESPONSE(&queue
->rx
, i
);
1864 resp
->offset
= offset
;
1865 resp
->flags
= flags
;
1867 resp
->status
= (s16
)size
;
1869 resp
->status
= (s16
)st
;
1871 queue
->rx
.rsp_prod_pvt
= ++i
;
1876 void xenvif_idx_unmap(struct xenvif_queue
*queue
, u16 pending_idx
)
1879 struct gnttab_unmap_grant_ref tx_unmap_op
;
1881 gnttab_set_unmap_op(&tx_unmap_op
,
1882 idx_to_kaddr(queue
, pending_idx
),
1884 queue
->grant_tx_handle
[pending_idx
]);
1885 xenvif_grant_handle_reset(queue
, pending_idx
);
1887 ret
= gnttab_unmap_refs(&tx_unmap_op
, NULL
,
1888 &queue
->mmap_pages
[pending_idx
], 1);
1890 netdev_err(queue
->vif
->dev
,
1891 "Unmap fail: ret: %d pending_idx: %d host_addr: %llx handle: 0x%x status: %d\n",
1894 tx_unmap_op
.host_addr
,
1896 tx_unmap_op
.status
);
1901 static inline int tx_work_todo(struct xenvif_queue
*queue
)
1903 if (likely(RING_HAS_UNCONSUMED_REQUESTS(&queue
->tx
)))
1909 static inline bool tx_dealloc_work_todo(struct xenvif_queue
*queue
)
1911 return queue
->dealloc_cons
!= queue
->dealloc_prod
;
1914 void xenvif_unmap_frontend_rings(struct xenvif_queue
*queue
)
1916 if (queue
->tx
.sring
)
1917 xenbus_unmap_ring_vfree(xenvif_to_xenbus_device(queue
->vif
),
1919 if (queue
->rx
.sring
)
1920 xenbus_unmap_ring_vfree(xenvif_to_xenbus_device(queue
->vif
),
1924 int xenvif_map_frontend_rings(struct xenvif_queue
*queue
,
1925 grant_ref_t tx_ring_ref
,
1926 grant_ref_t rx_ring_ref
)
1929 struct xen_netif_tx_sring
*txs
;
1930 struct xen_netif_rx_sring
*rxs
;
1934 err
= xenbus_map_ring_valloc(xenvif_to_xenbus_device(queue
->vif
),
1935 &tx_ring_ref
, 1, &addr
);
1939 txs
= (struct xen_netif_tx_sring
*)addr
;
1940 BACK_RING_INIT(&queue
->tx
, txs
, XEN_PAGE_SIZE
);
1942 err
= xenbus_map_ring_valloc(xenvif_to_xenbus_device(queue
->vif
),
1943 &rx_ring_ref
, 1, &addr
);
1947 rxs
= (struct xen_netif_rx_sring
*)addr
;
1948 BACK_RING_INIT(&queue
->rx
, rxs
, XEN_PAGE_SIZE
);
1953 xenvif_unmap_frontend_rings(queue
);
1957 static void xenvif_queue_carrier_off(struct xenvif_queue
*queue
)
1959 struct xenvif
*vif
= queue
->vif
;
1961 queue
->stalled
= true;
1963 /* At least one queue has stalled? Disable the carrier. */
1964 spin_lock(&vif
->lock
);
1965 if (vif
->stalled_queues
++ == 0) {
1966 netdev_info(vif
->dev
, "Guest Rx stalled");
1967 netif_carrier_off(vif
->dev
);
1969 spin_unlock(&vif
->lock
);
1972 static void xenvif_queue_carrier_on(struct xenvif_queue
*queue
)
1974 struct xenvif
*vif
= queue
->vif
;
1976 queue
->last_rx_time
= jiffies
; /* Reset Rx stall detection. */
1977 queue
->stalled
= false;
1979 /* All queues are ready? Enable the carrier. */
1980 spin_lock(&vif
->lock
);
1981 if (--vif
->stalled_queues
== 0) {
1982 netdev_info(vif
->dev
, "Guest Rx ready");
1983 netif_carrier_on(vif
->dev
);
1985 spin_unlock(&vif
->lock
);
1988 static bool xenvif_rx_queue_stalled(struct xenvif_queue
*queue
)
1990 RING_IDX prod
, cons
;
1992 prod
= queue
->rx
.sring
->req_prod
;
1993 cons
= queue
->rx
.req_cons
;
1995 return !queue
->stalled
&& prod
- cons
< 1
1996 && time_after(jiffies
,
1997 queue
->last_rx_time
+ queue
->vif
->stall_timeout
);
2000 static bool xenvif_rx_queue_ready(struct xenvif_queue
*queue
)
2002 RING_IDX prod
, cons
;
2004 prod
= queue
->rx
.sring
->req_prod
;
2005 cons
= queue
->rx
.req_cons
;
2007 return queue
->stalled
&& prod
- cons
>= 1;
2010 static bool xenvif_have_rx_work(struct xenvif_queue
*queue
)
2012 return (!skb_queue_empty(&queue
->rx_queue
)
2013 && xenvif_rx_ring_slots_available(queue
))
2014 || (queue
->vif
->stall_timeout
&&
2015 (xenvif_rx_queue_stalled(queue
)
2016 || xenvif_rx_queue_ready(queue
)))
2017 || kthread_should_stop()
2018 || queue
->vif
->disabled
;
2021 static long xenvif_rx_queue_timeout(struct xenvif_queue
*queue
)
2023 struct sk_buff
*skb
;
2026 skb
= skb_peek(&queue
->rx_queue
);
2028 return MAX_SCHEDULE_TIMEOUT
;
2030 timeout
= XENVIF_RX_CB(skb
)->expires
- jiffies
;
2031 return timeout
< 0 ? 0 : timeout
;
2034 /* Wait until the guest Rx thread has work.
2036 * The timeout needs to be adjusted based on the current head of the
2037 * queue (and not just the head at the beginning). In particular, if
2038 * the queue is initially empty an infinite timeout is used and this
2039 * needs to be reduced when a skb is queued.
2041 * This cannot be done with wait_event_timeout() because it only
2042 * calculates the timeout once.
2044 static void xenvif_wait_for_rx_work(struct xenvif_queue
*queue
)
2048 if (xenvif_have_rx_work(queue
))
2054 prepare_to_wait(&queue
->wq
, &wait
, TASK_INTERRUPTIBLE
);
2055 if (xenvif_have_rx_work(queue
))
2057 ret
= schedule_timeout(xenvif_rx_queue_timeout(queue
));
2061 finish_wait(&queue
->wq
, &wait
);
2064 int xenvif_kthread_guest_rx(void *data
)
2066 struct xenvif_queue
*queue
= data
;
2067 struct xenvif
*vif
= queue
->vif
;
2069 if (!vif
->stall_timeout
)
2070 xenvif_queue_carrier_on(queue
);
2073 xenvif_wait_for_rx_work(queue
);
2075 if (kthread_should_stop())
2078 /* This frontend is found to be rogue, disable it in
2079 * kthread context. Currently this is only set when
2080 * netback finds out frontend sends malformed packet,
2081 * but we cannot disable the interface in softirq
2082 * context so we defer it here, if this thread is
2083 * associated with queue 0.
2085 if (unlikely(vif
->disabled
&& queue
->id
== 0)) {
2086 xenvif_carrier_off(vif
);
2090 if (!skb_queue_empty(&queue
->rx_queue
))
2091 xenvif_rx_action(queue
);
2093 /* If the guest hasn't provided any Rx slots for a
2094 * while it's probably not responsive, drop the
2095 * carrier so packets are dropped earlier.
2097 if (vif
->stall_timeout
) {
2098 if (xenvif_rx_queue_stalled(queue
))
2099 xenvif_queue_carrier_off(queue
);
2100 else if (xenvif_rx_queue_ready(queue
))
2101 xenvif_queue_carrier_on(queue
);
2104 /* Queued packets may have foreign pages from other
2105 * domains. These cannot be queued indefinitely as
2106 * this would starve guests of grant refs and transmit
2109 xenvif_rx_queue_drop_expired(queue
);
2111 xenvif_rx_queue_maybe_wake(queue
);
2116 /* Bin any remaining skbs */
2117 xenvif_rx_queue_purge(queue
);
2122 static bool xenvif_dealloc_kthread_should_stop(struct xenvif_queue
*queue
)
2124 /* Dealloc thread must remain running until all inflight
2127 return kthread_should_stop() &&
2128 !atomic_read(&queue
->inflight_packets
);
2131 int xenvif_dealloc_kthread(void *data
)
2133 struct xenvif_queue
*queue
= data
;
2136 wait_event_interruptible(queue
->dealloc_wq
,
2137 tx_dealloc_work_todo(queue
) ||
2138 xenvif_dealloc_kthread_should_stop(queue
));
2139 if (xenvif_dealloc_kthread_should_stop(queue
))
2142 xenvif_tx_dealloc_action(queue
);
2146 /* Unmap anything remaining*/
2147 if (tx_dealloc_work_todo(queue
))
2148 xenvif_tx_dealloc_action(queue
);
2153 static int __init
netback_init(void)
2160 /* Allow as many queues as there are CPUs if user has not
2161 * specified a value.
2163 if (xenvif_max_queues
== 0)
2164 xenvif_max_queues
= num_online_cpus();
2166 if (fatal_skb_slots
< XEN_NETBK_LEGACY_SLOTS_MAX
) {
2167 pr_info("fatal_skb_slots too small (%d), bump it to XEN_NETBK_LEGACY_SLOTS_MAX (%d)\n",
2168 fatal_skb_slots
, XEN_NETBK_LEGACY_SLOTS_MAX
);
2169 fatal_skb_slots
= XEN_NETBK_LEGACY_SLOTS_MAX
;
2172 rc
= xenvif_xenbus_init();
2176 #ifdef CONFIG_DEBUG_FS
2177 xen_netback_dbg_root
= debugfs_create_dir("xen-netback", NULL
);
2178 if (IS_ERR_OR_NULL(xen_netback_dbg_root
))
2179 pr_warn("Init of debugfs returned %ld!\n",
2180 PTR_ERR(xen_netback_dbg_root
));
2181 #endif /* CONFIG_DEBUG_FS */
2189 module_init(netback_init
);
2191 static void __exit
netback_fini(void)
2193 #ifdef CONFIG_DEBUG_FS
2194 if (!IS_ERR_OR_NULL(xen_netback_dbg_root
))
2195 debugfs_remove_recursive(xen_netback_dbg_root
);
2196 #endif /* CONFIG_DEBUG_FS */
2197 xenvif_xenbus_fini();
2199 module_exit(netback_fini
);
2201 MODULE_LICENSE("Dual BSD/GPL");
2202 MODULE_ALIAS("xen-backend:vif");